diff --git a/.openpublishing.publish.config.json b/.openpublishing.publish.config.json index 67ab47fc058a9..bb257ddab952b 100644 --- a/.openpublishing.publish.config.json +++ b/.openpublishing.publish.config.json @@ -32,6 +32,18 @@ "need_preview_pull_request": true, "contribution_branch_mappings": {}, "dependent_repositories": [ + { + "path_to_root": "azure-dev-docs-pr", + "url": "https://github.com/MicrosoftDocs/azure-dev-docs-pr", + "branch": "main", + "branch_mapping": {} + }, + { + "path_to_root": "terraform_samples", + "url": "https://github.com/Azure/terraform", + "branch": "master", + "branch_mapping": {} + }, { "path_to_root": "quickstart-templates", "url": "https://github.com/Azure/azure-quickstart-templates", @@ -79,7 +91,7 @@ "url": "https://github.com/Azure-Samples/msdocs-storage-bind-function-service", "branch": "main", "branch_mapping": {} - }, + }, { "path_to_root": "azure_cli_scripts", "url": "https://github.com/Azure-Samples/azure-cli-samples", @@ -889,8 +901,19 @@ "url": "https://github.com/Azure-Samples/azure-sql-binding-func-dotnet-todo", "branch": "docs-snippets", "branch_mapping": {} + }, + { + "path_to_root": "ms-identity-node", + "url": "https://github.com/Azure-Samples/ms-identity-node", + "branch": "main", + "branch_mapping": {} + }, + { + "path_to_root": "ms-identity-javascript-nodejs-desktop", + "url": "https://github.com/Azure-Samples/ms-identity-javascript-nodejs-desktop", + "branch": "main", + "branch_mapping": {} } - ], "branch_target_mapping": { "live": ["Publish", "PDF"], @@ -922,6 +945,7 @@ ".openpublishing.redirection.azure-monitor.json", ".openpublishing.redirection.azure-percept.json", ".openpublishing.redirection.azure-productivity.json", + ".openpublishing.redirection.azure-australia.json", "articles/azure-fluid-relay/.openpublishing.redirection.fluid-relay.json", "articles/azure-netapp-files/.openpublishing.redirection.azure-netapp-files.json", "articles/azure-relay/.openpublishing.redirection.relay.json", @@ -954,6 +978,7 @@ "articles/container-apps/.openpublishing.redirection.container-apps.json", "articles/spring-cloud/.openpublishing.redirection.spring-cloud.json", "articles/load-testing/.openpublishing.redirection.azure-load-testing.json", - "articles/azure-video-indexer/.openpublishing.redirection.azure-video-indexer.json" + "articles/azure-video-indexer/.openpublishing.redirection.azure-video-indexer.json", + "articles/machine-learning/.openpublishing.redirection.machine-learning.json" ] } diff --git a/.openpublishing.redirection.active-directory.json b/.openpublishing.redirection.active-directory.json index 911d4b9033f7f..77588b96b0af9 100644 --- a/.openpublishing.redirection.active-directory.json +++ b/.openpublishing.redirection.active-directory.json @@ -25,6 +25,11 @@ "redirect_url": "/azure/active-directory/authentication/concept-certificate-based-authentication", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/active-directory/saas-apps/bridgelineunbound-tutorial.md", + "redirect_url": "/azure/active-directory/saas-apps/tutorial-list", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/active-directory/manage-apps/common-scenarios.md", "redirect_url": "/azure/active-directory/manage-apps/what-is-application-management", diff --git a/.openpublishing.redirection.azure-australia.json b/.openpublishing.redirection.azure-australia.json new file mode 100644 index 0000000000000..682bf44172d1e --- /dev/null +++ b/.openpublishing.redirection.azure-australia.json @@ -0,0 +1,84 @@ +{ + "redirections": [ + { + "source_path_from_root": "/articles/azure-australia/australia-overview.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/azure-key-vault.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/azure-policy.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/gateway-egress-traffic.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/gateway-ingress-traffic.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/gateway-log-audit-visibility.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/gateway-secure-remote-administration.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/identity-federation.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/index.yml", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/recovery-backup.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/reference-library.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/role-privileged.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/secure-your-data.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/security-explained.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/system-monitor.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-australia/vpn-gateway.md", + "redirect_url": "https://azure.microsoft.com/en-us/global-infrastructure/geographies/", + "redirect_document_id": false + } + ] +} diff --git a/.openpublishing.redirection.azure-monitor.json b/.openpublishing.redirection.azure-monitor.json index a157227d979f8..6cac837f651b8 100644 --- a/.openpublishing.redirection.azure-monitor.json +++ b/.openpublishing.redirection.azure-monitor.json @@ -1,5 +1,65 @@ { "redirections": [ + { + "source_path_from_root": "/articles/azure-monitor/app/profiler.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-aspnetcore-linux.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-aspnetcore-linux", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-azure-functions.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-azure-functions", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-bring-your-own-storage.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-bring-your-own-storage", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-cloudservice.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-cloudservice", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-containers.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-containers", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-overview.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-servicefabric.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-servicefabric", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-settings.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-settings", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-trackrequests.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-trackrequests", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-troubleshooting.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-troubleshooting", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/app/profiler-vm.md", + "redirect_url": "/azure/azure-monitor/profiler/profiler-vm", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/azure-monitor/app/app-insights-dashboards.md", "redirect_url": "/azure/azure-monitor/app/overview-dashboard", @@ -65,6 +125,11 @@ "redirect_url": "/azure/azure-monitor/logs/cost-logs", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/azure-monitor/logs/design-logs-deployment.md", + "redirect_url": "/azure/azure-monitor/logs/workspace-design", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/azure-monitor/app/apm-tables.md", "redirect_url": "/azure/azure-monitor/app/convert-classic-resource#workspace-based-resource-changes", @@ -77,13 +142,89 @@ }, { "source_path_from_root": "/articles/azure-monitor/alerts/itsmc-service-manager-script.md", - "redirect_url": "/azure/azure-monitor/alerts/itsmc-connections.md", + "redirect_url": "/azure/azure-monitor/alerts/itsmc-connections", "redirect_document_id": false }, { "source_path_from_root": "/articles/azure-monitor/alerts/itsmc-connections-scsm.md" , - "redirect_url": "/azure/azure-monitor/alerts/itsmc-connections.md", + "redirect_url": "/azure/azure-monitor/alerts/itsmc-connections", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/containers/container-insights-azure-redhat-setup.md" , + "redirect_url": "/azure/azure-monitor/containers/container-insights-transition-hybrid", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/containers/container-insights-azure-redhat4-setup.md" , + "redirect_url": "/azure/azure-monitor/containers/container-insights-transition-hybrid", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-metric-overview.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-types", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-managing-alert-instances.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-page.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-managing-alert-states.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-page", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-unified-log.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-types", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/activity-log-alerts.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-types", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-smartgroups-overview.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-types", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-managing-smart-groups.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-types", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/insights/data-explorer.md" , + "redirect_url": "/azure/data-explorer/data-explorer-insights", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/vm/vminsights-ga-release-faq.md" , + "redirect_url": "/azure/azure-monitor/faq#vm-insights", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/vm/vminsights-log-search.md" , + "redirect_url": "/azure/azure-monitor/alerts/vminsights-log-query", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/cli-samples.md" , + "redirect_url": "/cli/azure/azure-cli-reference-for-monitor", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/powershell-samples.md" , + "redirect_url": "/powershell/module/az.monitor", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/insights/azure-cli-application-insights-component.md" , + "redirect_url": "/cli/azure/monitor/app-insights", "redirect_document_id": false } ] -} \ No newline at end of file +} + diff --git a/.openpublishing.redirection.defender-for-cloud.json b/.openpublishing.redirection.defender-for-cloud.json index 61af983b70346..8967979a552c3 100644 --- a/.openpublishing.redirection.defender-for-cloud.json +++ b/.openpublishing.redirection.defender-for-cloud.json @@ -15,6 +15,11 @@ "redirect_url": "/azure/defender-for-cloud/policy-reference", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/security-center/security-center-identity-access.md", + "redirect_url": "/azure/defender-for-cloud/multi-factor-authentication-enforcement", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/security-center/security-center-policy-definitions.md", "redirect_url": "/azure/defender-for-cloud/policy-reference", diff --git a/.openpublishing.redirection.json b/.openpublishing.redirection.json index 2db91ac3cc69a..c157e60791b21 100644 --- a/.openpublishing.redirection.json +++ b/.openpublishing.redirection.json @@ -170,221 +170,7 @@ "redirect_url": "/previous-versions/azure/virtual-machines/linux/login-using-aad", "redirect_document_id": false }, - { - "source_path": "articles/machine-learning/classic/ai-gallery-control-personal-data-dsr.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/ai-gallery-control-personal-data-dsr", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/algorithm-parameters-optimize.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/algorithm-parameters-optimize", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/azure-ml-netsharp-reference-guide.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/azure-ml-netsharp-reference-guide", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/consume-web-services.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/consume-web-services", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/consuming-from-excel.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/consuming-from-excel", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/create-endpoint.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/create-endpoint", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/create-experiment.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/create-experiment", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/create-models-and-endpoints-with-powershell.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/create-models-and-endpoints-with-powershell", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/create-workspace.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/create-workspace", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/custom-r-modules.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/custom-r-modules", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/deploy-a-machine-learning-web-service.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/deploy-a-machine-learning-web-service", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/deploy-consume-web-service-guide.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/deploy-consume-web-service-guide", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/evaluate-model-performance.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/evaluate-model-performance", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/excel-add-in-for-web-services.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/excel-add-in-for-web-services", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/execute-python-scripts.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/execute-python-scripts", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/export-delete-personal-data-dsr.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/export-delete-personal-data-dsr", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/gallery-how-to-use-contribute-publish.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/gallery-how-to-use-contribute-publish", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/import-data.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/import-data", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/interpret-model-results.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/interpret-model-results", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/manage-experiment-iterations.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-experiment-iterations", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/manage-new-webservice.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-new-webservice", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/manage-web-service-endpoints-using-api-management.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-web-service-endpoints-using-api-management", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/manage-workspace.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-workspace", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/model-progression-experiment-to-web-service.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/model-progression-experiment-to-web-service", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/powershell-module.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/powershell-module", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/r-get-started.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/r-get-started", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/retired-data-science-for-beginners-videos.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/retrain-classic-web-service.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/retrain-classic-web-service", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/retrain-machine-learning-model.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/retrain-machine-learning-model", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/sample-experiments.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/sample-experiments", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/studio-classic-overview.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/studio-classic-overview", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/support-aml-studio.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/support-aml-studio", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/tutorial-part1-credit-risk.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/tutorial-part1-credit-risk", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/tutorial-part2-credit-risk-train.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/tutorial-part2-credit-risk-train", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/tutorial-part3-credit-risk-deploy.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/use-data-from-an-on-premises-sql-server.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/use-data-from-an-on-premises-sql-server", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/use-sample-datasets.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/use-sample-datasets", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/version-control.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/version-control", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/web-service-error-codes.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/web-service-error-codes", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/web-service-parameters.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/web-service-parameters", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/web-services-logging.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/web-services-logging", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/web-services-that-use-import-export-modules.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/web-services-that-use-import-export-modules", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/classic/index.yml", - "redirect_url": "/previous-versions/azure/machine-learning/classic/index", - "redirect_document_id": false - }, + { "source_path": "articles/storage/blobs/storage-c-plus-plus-how-to-use-blobs.md", "redirect_url": "/azure/storage/blobs/quickstart-blobs-c-plus-plus", @@ -2718,6 +2504,11 @@ "redirect_url": "/azure/aks/load-balancer-standard", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/aks/keda.md", + "redirect_url": "/azure/aks/keda-about", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/analysis-services/analysis-services-create-model-portal.md", "redirect_url": "/azure/analysis-services/analysis-services-overview", @@ -2748,131 +2539,7 @@ "redirect_url": "/azure/analysis-services/analysis-services-overview", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-deployment-managed-batch.md", - "redirect_url": "/azure/machine-learning/reference-yaml-deployment-batch.md", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-kubernetes-instance-type.md", - "redirect_url": "/azure/machine-learning/how-to-attach-kubernetes-anywhere", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-attach-arc-kubernetes.md", - "redirect_url": "/azure/machine-learning/how-to-attach-kubernetes-anywhere", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-endpoint-managed-batch.md", - "redirect_url": "/azure/machine-learning/reference-yaml-endpoint-batch.md", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-compute.md", - "redirect_url": "/azure/machine-learning/reference-yaml-compute-aml.md", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-deployment-k8s-online.md", - "redirect_url": "/azure/machine-learning/reference-yaml-overview.md", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-endpoint-k8s-online.md", - "redirect_url": "/azure/machine-learning/reference-yaml-overview.md", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-train-models-with-aml.md", - "redirect_url": "/azure/machine-learning/tutorial-train-deploy-notebook", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-deploy-models-with-aml.md", - "redirect_url": "/azure/machine-learning/tutorial-train-deploy-notebook", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-create-labeling-projects.md", - "redirect_url": "/azure/machine-learning/how-to-create-image-labeling-projects", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/overview-what-is-azure-ml.md", - "redirect_url": "/azure/machine-learning/overview-what-is-azure-machine-learning", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-troubleshoot-managed-online-endpoints.md", - "redirect_url": "/azure/machine-learning/how-to-troubleshoot-online-endpoints", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/migrate-overview.md", - "redirect_url": "/azure/machine-learning/migrate-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/migrate-register-dataset.md", - "redirect_url": "/azure/machine-learning/migrate-register-dataset", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/migrate-rebuild-experiment.md", - "redirect_url": "/azure/machine-learning/migrate-rebuild-experiment", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/migrate-rebuild-web-service.md", - "redirect_url": "/azure/machine-learning/migrate-rebuild-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/migrate-rebuild-integrate-with-client-app.md", - "redirect_url": "/azure/machine-learning/migrate-rebuild-integrate-with-client-app", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/migrate-execute-r-script.md", - "redirect_url": "/azure/machine-learning/migrate-execute-r-script", - "redirect_document_id": true - }, - { - "source_path": "articles/machine-learning/how-to-search-cross-workspace.md", - "redirect_url": "/azure/machine-learning/how-to-manage-workspace", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/resource-known-issues.md", - "redirect_url": "/azure/machine-learning/how-to-configure-auto-train#troubleshooting", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/how-to-compute-cluster-instance-os-upgrade.md", - "redirect_url": "/azure/machine-learning/concept-vulnerability-management", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/how-to-deploy-custom-docker-image.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-custom-container", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/how-to-deploy-continuously-deploy.md", - "redirect_url": "/azure/machine-learning/how-to-safely-rollout-managed-endpoints", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/tutorial-deploy-managed-endpoints-using-system-managed-identity.md", - "redirect_url": "/azure/machine-learning/how-to-access-resources-from-endpoints-managed-identities", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-auto-train-remote.md", - "redirect_url": "/azure/machine-learning/concept-automated-ml#local-remote", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/analysis-services/tutorials/aas-lesson-13-deploy.md", "redirect_url": "/azure/analysis-services/analysis-services-overview", @@ -5468,16 +5135,7 @@ "redirect_url": "/azure/architecture/cloud-adoption-guide/subscription-governance-examples", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/service/overview-more-machine-learning.md", - "redirect_url": "/azure/architecture/data-guide/technology-choices/data-science-and-machine-learning", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/r-developers-guide.md", - "redirect_url": "/azure/architecture/data-guide/technology-choices/r-developers-guide", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/guidance/guidance-elasticsearch.md", "redirect_url": "/azure/architecture/elasticsearch", @@ -17833,6 +17491,26 @@ "redirect_url": "/azure/iot-dps/quick-setup-auto-provision-cli", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/iot-dps/tutorial-net-provision-device-to-hub.md", + "redirect_url": "/azure/iot-dps/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/iot-dps/tutorial-provision-device-to-hub.md", + "redirect_url": "/azure/iot-dps/quick-create-simulated-device-symm-key", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/iot-dps/tutorial-set-up-cloud.md", + "redirect_url": "/azure/iot-dps/quick-create-simulated-device-symm-key", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/iot-dps/tutorial-set-up-device.md", + "redirect_url": "/azure/iot-dps/quick-create-simulated-device-symm-key", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/security/fundamentals/iot-overview.md", "redirect_url": "/azure/iot-fundamentals/iot-security-architecture", @@ -19299,438 +18977,8 @@ "redirect_document_id": false }, { - "source_path_from_root": "/articles/machine-learning/service/index.yml", - "redirect_url": "/azure/machine-learning/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-algorithm-cheat-sheet.md", - "redirect_url": "/azure/machine-learning/algorithm-cheat-sheet", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/algorithm-cheat-sheet.md", - "redirect_url": "/azure/machine-learning/algorithm-cheat-sheet", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-from-azure-blob-storage.md", - "redirect_url": "/azure/machine-learning/algorithm-module-reference/module-reference", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-from-azure-sql-database.md", - "redirect_url": "/azure/machine-learning/algorithm-module-reference/module-reference", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-from-web-url-via-http.md", - "redirect_url": "/azure/machine-learning/algorithm-module-reference/module-reference", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-credit-risk-basic.md", - "redirect_url": "/azure/machine-learning/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/azure-machine-learning-release-notes.md", - "redirect_url": "/azure/machine-learning/azure-machine-learning-release-notes", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/azure-ml-customer-churn-scenario.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/debug-models.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/extend-your-experiment-with-r.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/text-analytics-module-tutorial.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/troubleshooting-creating-ml-workspace.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/use-case-excel-studio.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/what-is-machine-learning.md", - "redirect_url": "/azure/machine-learning/classic/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/ai-gallery-control-personal-data-dsr.md", - "redirect_url": "/azure/machine-learning/classic/ai-gallery-control-personal-data-dsr", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/algorithm-parameters-optimize.md", - "redirect_url": "/azure/machine-learning/classic/algorithm-parameters-optimize", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/azure-ml-netsharp-reference-guide.md", - "redirect_url": "/azure/machine-learning/classic/azure-ml-netsharp-reference-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/consume-web-services.md", - "redirect_url": "/azure/machine-learning/classic/consume-web-services", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/consume-web-service-with-web-app-template.md", - "redirect_url": "/azure/machine-learning/classic/consume-web-services", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/consuming-from-excel.md", - "redirect_url": "/azure/machine-learning/classic/consuming-from-excel", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/create-endpoint.md", - "redirect_url": "/azure/machine-learning/classic/create-endpoint", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/create-experiment.md", - "redirect_url": "/azure/machine-learning/classic/create-experiment", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/create-models-and-endpoints-with-powershell.md", - "redirect_url": "/azure/machine-learning/classic/create-models-and-endpoints-with-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/create-workspace.md", - "redirect_url": "/azure/machine-learning/classic/create-workspace", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/custom-r-modules.md", - "redirect_url": "/azure/machine-learning/classic/custom-r-modules", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-ask-a-question-you-can-answer-with-data.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-copy-other-peoples-work-to-do-data-science.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-is-your-data-ready-for-data-science.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-predict-an-answer-with-a-simple-model.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-the-5-questions-data-science-answers.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-ask-a-question-you-can-answer-with-data.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-copy-other-peoples-work-to-do-data-science.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-is-your-data-ready-for-data-science.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-predict-an-answer-with-a-simple-model.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-the-5-questions-data-science-answers.md", - "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/convert-training-experiment-to-scoring-experiment.md", - "redirect_url": "/azure/machine-learning/classic/deploy-a-machine-learning-web-service", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/deploy-a-machine-learning-web-service.md", - "redirect_url": "/azure/machine-learning/classic/deploy-a-machine-learning-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/deploy-consume-web-service-guide.md", - "redirect_url": "/azure/machine-learning/classic/deploy-consume-web-service-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/deploy-with-resource-manager-template.md", - "redirect_url": "/azure/machine-learning/classic/deploy-with-resource-manager-template", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/evaluate-model-performance.md", - "redirect_url": "/azure/machine-learning/classic/evaluate-model-performance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/excel-add-in-for-web-services.md", - "redirect_url": "/azure/machine-learning/classic/excel-add-in-for-web-services", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/execute-python-scripts.md", - "redirect_url": "/azure/machine-learning/classic/execute-python-scripts", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/export-delete-personal-data-dsr.md", - "redirect_url": "/azure/machine-learning/classic/export-delete-personal-data-dsr", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-monitor-view-training-logs.md", - "redirect_url": "/azure/machine-learning/how-to-log-view-metrics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-manage-runs.md", - "redirect_url": "/azure/machine-learning/how-to-track-monitor-analyze-runs", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-track-experiments.md", - "redirect_url": "/azure/machine-learning/how-to-log-view-metrics", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-faq.md", - "redirect_url": "/azure/machine-learning/classic/faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/gallery-how-to-use-contribute-publish.md", - "redirect_url": "/azure/machine-learning/classic/gallery-how-to-use-contribute-publish", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/import-data.md", - "redirect_url": "/azure/machine-learning/classic/import-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/index.yml", - "redirect_url": "/azure/machine-learning/classic/index", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/interpret-model-results.md", - "redirect_url": "/azure/machine-learning/classic/interpret-model-results", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/manage-experiment-iterations.md", - "redirect_url": "/azure/machine-learning/classic/manage-experiment-iterations", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/manage-new-webservice.md", - "redirect_url": "/azure/machine-learning/classic/manage-new-webservice", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/manage-web-service-endpoints-using-api-management.md", - "redirect_url": "/azure/machine-learning/classic/manage-web-service-endpoints-using-api-management", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/manage-workspace.md", - "redirect_url": "/azure/machine-learning/classic/manage-workspace", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/model-progression-experiment-to-web-service.md", - "redirect_url": "/azure/machine-learning/classic/model-progression-experiment-to-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/powershell-module.md", - "redirect_url": "/azure/machine-learning/classic/powershell-module", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/retrain-classic-web-service.md", - "redirect_url": "/azure/machine-learning/classic/retrain-classic-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/troubleshooting-retraining-models.md", - "redirect_url": "/azure/machine-learning/classic/retrain-classic-web-service", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/retrain-existing-resource-manager-based-web-service.md", - "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/retrain-machine-learning-model.md", - "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/retrain-models-programmatically.md", - "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/retrain-new-web-service-using-powershell.md", - "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/r-get-started.md", - "redirect_url": "/azure/machine-learning/classic/r-get-started", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/sample-experiments.md", - "redirect_url": "/azure/machine-learning/classic/sample-experiments", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/studio-classic-overview.md", - "redirect_url": "/azure/machine-learning/classic/studio-classic-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/support-aml-studio.md", - "redirect_url": "/azure/machine-learning/classic/support-aml-studio", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/tutorial-part1-credit-risk.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-1-create-ml-workspace.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-2-upload-data.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-3-create-new-experiment.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-develop-predictive-solution.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/tutorial-part2-credit-risk-train.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part2-credit-risk-train", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-4-train-and-evaluate-models.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part2-credit-risk-train", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/tutorial-part3-credit-risk-deploy.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-5-publish-web-service.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/walkthrough-6-access-web-service.md", - "redirect_url": "/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/linear-regression-in-azure.md", - "redirect_url": "/azure/machine-learning/classic/use-case-excel-studio", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/use-data-from-an-on-premises-sql-server.md", - "redirect_url": "/azure/machine-learning/classic/use-data-from-an-on-premises-sql-server", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/use-sample-datasets.md", - "redirect_url": "/azure/machine-learning/classic/use-sample-datasets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/version-control.md", - "redirect_url": "/azure/machine-learning/classic/version-control", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/web-service-error-codes.md", - "redirect_url": "/azure/machine-learning/classic/web-service-error-codes", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/web-service-parameters.md", - "redirect_url": "/azure/machine-learning/classic/web-service-parameters", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/web-services-logging.md", - "redirect_url": "/azure/machine-learning/classic/web-services-logging", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/web-services-that-use-import-export-modules.md", - "redirect_url": "/azure/machine-learning/classic/web-services-that-use-import-export-modules", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/faq.md", - "redirect_url": "/azure/machine-learning/classic/what-is-ml-studio", + "source_path_from_root": "/articles/logic-apps/logic-apps-scenario-error-and-exception-handling.md", + "redirect_url": "/azure/logic-apps/logic-apps-exception-handling", "redirect_document_id": false }, { @@ -19773,1502 +19021,50 @@ "redirect_url": "/azure/machine-learning/classic-module-reference/text-analytics", "redirect_document_id": false }, + { - "source_path_from_root": "/articles/machine-learning/how-to-define-task-type.md", - "redirect_url": "/azure/machine-learning/concept-automated-ml", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-automated-ml.md", - "redirect_url": "/azure/machine-learning/concept-automated-ml", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-automated-ml.md", - "redirect_url": "/azure/machine-learning/concept-automated-ml", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-azure-machine-learning-architecture.md", - "redirect_url": "/azure/machine-learning/concept-azure-machine-learning-architecture", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-compute-instance.md", - "redirect_url": "/azure/machine-learning/concept-compute-instance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-compute-target.md", - "redirect_url": "/azure/machine-learning/concept-compute-target", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-data.md", - "redirect_url": "/azure/machine-learning/concept-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-deep-learning-vs-machine-learning.md", - "redirect_url": "/azure/machine-learning/concept-deep-learning-vs-machine-learning", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-designer.md", - "redirect_url": "/azure/machine-learning/concept-designer", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/ui-concept-visual-interface.md", - "redirect_url": "/azure/machine-learning/concept-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-enterprise-security.md", - "redirect_url": "/azure/machine-learning/concept-enterprise-security", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-event-grid-integration.md", - "redirect_url": "/azure/machine-learning/concept-event-grid-integration", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/event-grid/cli-samples.md", - "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-azure-subscription.md", - "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-create-custom-topic.md", - "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-resource-group-filter.md", - "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-blob.md", - "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-resource-group.md", - "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-ml-pipelines.md", - "redirect_url": "/azure/machine-learning/concept-ml-pipelines", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-model-management-and-deployment.md", - "redirect_url": "/azure/machine-learning/concept-model-management-and-deployment", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-onnx.md", - "redirect_url": "/azure/machine-learning/concept-onnx", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-onnx.md", - "redirect_url": "/azure/machine-learning/concept-onnx", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-train-machine-learning-model.md", - "redirect_url": "/azure/machine-learning/concept-train-machine-learning-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-train-model-git-integration.md", - "redirect_url": "/azure/machine-learning/concept-train-model-git-integration", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-workspace.md", - "redirect_url": "/azure/machine-learning/concept-workspace", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/concept-editions.md", - "redirect_url": "/azure/machine-learning/concept-workspace#wheres-enterprise", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-data-platforms.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-data-platforms", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-ml-data-science-tools.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-data-science", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-deep-learning-ai-frameworks.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-deep-learning-frameworks", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-languages.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-languages", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-tools-explore-and-visualize.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-productivity", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/provision-deep-learning-dsvm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-dsvm-ubuntu-intro.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-linux-dsvm-intro.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/linux-dsvm-intro", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-linux-dsvm-walkthrough.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/linux-dsvm-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/use-deep-learning-dsvm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/linux-dsvm-walkthrough#deep-learning", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/deep-learning-dsvm-overview.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/setup-sql-server-virtual-machine.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/setup-virtual-machine.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-setup-sql-server-virtual-machine.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-setup-virtual-machine.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-virtual-machine-overview.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-1st-experiment-sdk-setup.md", - "redirect_url": "/azure/machine-learning/quickstart-create-resources", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-1st-experiment-sdk-setup-local.md", - "redirect_url": "/azure/machine-learning/quickstart-create-resources", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/virtual-machines.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-provision-vm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/provision-vm", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/geo-ai-dsvm-overview.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/linux-dsvm-intro.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/provision-geo-ai-dsvm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/reference-centos-vm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/reference-deprecation.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/use-geo-ai-dsvm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-tools-overview.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/tools-included", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/reference-windows-vm.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/tools-included", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-vm-do-ten-things.md", - "redirect_url": "/azure/machine-learning/data-science-virtual-machine/vm-do-ten-things", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-access-data.md", - "redirect_url": "/azure/machine-learning/how-to-access-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-assign-roles.md", - "redirect_url": "/azure/machine-learning/how-to-assign-roles", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-create-attach-compute-sdk.md", - "redirect_url": "/azure/machine-learning/how-to-attach-compute-targets", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-auto-train-forecast.md", - "redirect_url": "/azure/machine-learning/how-to-auto-train-forecast", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-auto-train-remote.md", - "redirect_url": "/azure/machine-learning/how-to-auto-train-remote", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-change-storage-access-key.md", - "redirect_url": "/azure/machine-learning/how-to-change-storage-access-key", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-configure-auto-train.md", - "redirect_url": "/azure/machine-learning/how-to-configure-auto-train", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-choose-a-dev-environment.md", - "redirect_url": "/azure/machine-learning/how-to-configure-environment", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-configure-environment.md", - "redirect_url": "/azure/machine-learning/how-to-configure-environment", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/quickstart-create-workspace-with-python.md", - "redirect_url": "/azure/machine-learning/how-to-configure-environment#local", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/quickstart-run-local-notebook.md", - "redirect_url": "/azure/machine-learning/how-to-configure-environment#local", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-consume-web-service.md", - "redirect_url": "/azure/machine-learning/how-to-consume-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-create-labeling-projects.md", - "redirect_url": "/azure/machine-learning/how-to-create-labeling-projects", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-create-your-first-pipeline.md", - "redirect_url": "/azure/machine-learning/how-to-create-machine-learning-pipelines", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-create-register-datasets.md", - "redirect_url": "/azure/machine-learning/how-to-create-register-datasets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-transform-data.md", - "redirect_url": "/azure/machine-learning/how-to-create-register-datasets", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-write-data.md", - "redirect_url": "/azure/machine-learning/how-to-create-register-datasets", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-create-workspace-template.md", - "redirect_url": "/azure/machine-learning/how-to-create-workspace-template", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-resource-manager-workspace.md", - "redirect_url": "/azure/machine-learning/how-to-create-workspace-template", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-create-your-first-pipeline.md", - "redirect_url": "/azure/machine-learning/how-to-create-your-first-pipeline", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-debug-batch-predictions.md", - "redirect_url": "/azure/machine-learning/how-to-debug-batch-predictions", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-debug-batch-predictions.md", - "redirect_url": "/azure/machine-learning/how-to-debug-parallel-run-step", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/concept-pipeline-practices-tips.md", - "redirect_url": "/azure/machine-learning/how-to-debug-pipelines", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-debug-pipelines.md", - "redirect_url": "/azure/machine-learning/how-to-debug-pipelines", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-define-task-type.md", - "redirect_url": "/azure/machine-learning/how-to-define-task-type", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-and-where.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-to-aci.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-to-aks.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-to-iot.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/publish-a-machine-learning-web-service.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-app-service.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-managed-online-endpoints", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-azure-container-instance.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-azure-container-instance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-azure-kubernetes-service.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-azure-kubernetes-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-custom-docker-image.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-custom-docker-image", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-labeling.md", - "redirect_url": "/azure/machine-learning/how-to-create-image-labeling-projects", - "redirect_document_id": "false" - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-existing-model.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-existing-model.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-homomorphic-encryption-seal.md", - "redirect_url": "/azure/machine-learning/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-accelerate-with-fpgas.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-fpga-web-service", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-fpga-web-service.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-fpga-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-functions.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-managed-online-endpoints", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-no-code-deployment.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-mlflow-models-online-endpoints", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-accelerate-inferencing-with-gpus.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-inferencing-gpus", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-inferencing-gpus.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-inferencing-gpus", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-local-container-notebook-vm.md", - "redirect_url": "/azure/machine-learning/how-to-deploy-local-container-notebook-vm", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-churn.md", - "redirect_url": "/azure/machine-learning/how-to-designer-sample-classification-churn", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-predict-income.md", - "redirect_url": "/azure/machine-learning/how-to-designer-sample-classification-predict-income", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-enable-app-insights.md", - "redirect_url": "/azure/machine-learning/how-to-enable-app-insights", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-monitor-data-drift.md", - "redirect_url": "/azure/machine-learning/how-to-enable-data-collection", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-enable-data-collection.md", - "redirect_url": "/azure/machine-learning/how-to-enable-data-collection", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-enable-logging.md", - "redirect_url": "/azure/machine-learning/how-to-enable-logging", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-enable-virtual-network.md", - "redirect_url": "/azure/machine-learning/how-to-enable-virtual-network", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-export-delete-data.md", - "redirect_url": "/azure/machine-learning/how-to-export-delete-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-label-images.md", - "redirect_url": "/azure/machine-learning/how-to-label-images", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-debug-pipelines-application-insights.md", - "redirect_url": "/azure/machine-learning/how-to-log-pipelines-application-insights", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-machine-learning-interpretability.md", - "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/machine-learning-interpretability-explainability.md", - "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-machine-learning-interpretability-aml.md", - "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability-aml", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-machine-learning-interpretability-automl.md", - "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability-automl", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-manage-quotas.md", - "redirect_url": "/azure/machine-learning/how-to-manage-quotas", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-manage-runs.md", - "redirect_url": "/azure/machine-learning/how-to-manage-runs", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-manage-workspace.md", - "redirect_url": "/azure/machine-learning/how-to-manage-workspace", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/setup-create-workspace.md", - "redirect_url": "/azure/machine-learning/how-to-manage-workspace", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-manage-workspace-cli.md", - "redirect_url": "/azure/machine-learning/how-to-manage-workspace-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/concept-data-drift.md", - "redirect_url": "/azure/machine-learning/how-to-monitor-data-drift", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-monitor-data-drift.md", - "redirect_url": "/azure/machine-learning/how-to-monitor-data-drift", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-monitor-datasets.md", - "redirect_url": "/azure/machine-learning/how-to-monitor-datasets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-monitor-tensorboard.md", - "redirect_url": "/azure/machine-learning/how-to-monitor-tensorboard", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-enable-virtual-network.md", - "redirect_url": "/azure/machine-learning/how-to-network-security-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-retrain-designer.md", - "redirect_url": "/azure/machine-learning/how-to-retrain-designer", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-run-batch-predictions.md", - "redirect_url": "/azure/machine-learning/how-to-run-batch-predictions", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-run-batch-predictions-designer.md", - "redirect_url": "/azure/machine-learning/how-to-run-batch-predictions-designer", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-save-write-experiment-files.md", - "redirect_url": "/azure/machine-learning/how-to-save-write-experiment-files", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-schedule-pipelines.md", - "redirect_url": "/azure/machine-learning/how-to-schedule-pipelines", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-secure-web-service.md", - "redirect_url": "/azure/machine-learning/how-to-secure-web-service", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-algorithm-choice.md", - "redirect_url": "/azure/machine-learning/how-to-select-algorithms", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/algorithm-choice.md", - "redirect_url": "/azure/machine-learning/how-to-select-algorithms", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/studio/basics-infographic-with-algorithm-examples.md", - "redirect_url": "/azure/machine-learning/how-to-select-algorithms", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-setup-authentication.md", - "redirect_url": "/azure/machine-learning/how-to-setup-authentication", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-train-ml-models.md", - "redirect_url": "/azure/machine-learning/how-to-set-up-training-targets", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-set-up-training-targets.md", - "redirect_url": "/azure/machine-learning/how-to-set-up-training-targets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-enable-logging.md", - "redirect_url": "/azure/machine-learning/how-to-track-experiments", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-track-experiments.md", - "redirect_url": "/azure/machine-learning/how-to-track-experiments", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-chainer.md", - "redirect_url": "/azure/machine-learning/how-to-train-chainer", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-keras.md", - "redirect_url": "/azure/machine-learning/how-to-train-keras", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-train-chainer.md", - "redirect_url": "/azure/machine-learning/how-to-train-ml-models", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-ml-models.md", - "redirect_url": "/azure/machine-learning/how-to-train-ml-models", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-pytorch.md", - "redirect_url": "/azure/machine-learning/how-to-train-pytorch", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-scikit-learn.md", - "redirect_url": "/azure/machine-learning/how-to-train-scikit-learn", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-tensorflow.md", - "redirect_url": "/azure/machine-learning/how-to-train-tensorflow", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-train-with-datasets.md", - "redirect_url": "/azure/machine-learning/how-to-train-with-datasets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-schedule-pipelines.md", - "redirect_url": "/azure/machine-learning/how-to-trigger-published-pipeline", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-troubleshoot-deployment.md", - "redirect_url": "/azure/machine-learning/how-to-troubleshoot-deployment", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-tune-hyperparameters.md", - "redirect_url": "/azure/machine-learning/how-to-tune-hyperparameters", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-understand-accuracy-metrics.md", - "redirect_url": "/azure/machine-learning/how-to-understand-automated-ml", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-understand-automated-ml.md", - "redirect_url": "/azure/machine-learning/how-to-understand-automated-ml", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-create-portal-experiments.md", - "redirect_url": "/azure/machine-learning/how-to-use-automated-ml-for-ml-models", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-use-environments.md", - "redirect_url": "/azure/machine-learning/how-to-use-environments", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/concept-event-grid-integration.md", - "redirect_url": "/azure/machine-learning/how-to-use-event-grid", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-use-event-grid.md", - "redirect_url": "/azure/machine-learning/how-to-use-event-grid", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-use-mlflow.md", - "redirect_url": "/azure/machine-learning/how-to-use-mlflow", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-run-batch-predictions.md", - "redirect_url": "/azure/machine-learning/how-to-use-parallel-run-step", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-use-secrets-in-runs.md", - "redirect_url": "/azure/machine-learning/how-to-use-secrets-in-runs", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-version-track-datasets.md", - "redirect_url": "/azure/machine-learning/how-to-version-track-datasets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/services.md", - "redirect_url": "/azure/machine-learning/index", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/monitor-azure-machine-learning.md", - "redirect_url": "/azure/machine-learning/monitor-azure-machine-learning", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/monitor-resource-reference.md", - "redirect_url": "/azure/machine-learning/monitor-resource-reference", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-image-classification-models.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/concept-azure-machine-learning-architecture.md", - "redirect_url": "/azure/machine-learning/concept-azure-machine-learning-v2", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-image-similarity-models.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-object-detection-models.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-text-classification-models.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-improve-accuracy-for-computer-vision-models.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-migrate.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/overview-what-happened-to-workbench.md", - "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/overview-what-is-machine-learning.md", - "redirect_url": "/azure/machine-learning/overview-what-is-machine-learning", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/compare-azure-ml-to-studio-classic.md", - "redirect_url": "/azure/machine-learning/overview-what-is-machine-learning-studio#ml-studio-classic-vs-azure-machine-learning-studio", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/what-is-ml-studio.md", - "redirect_url": "/azure/machine-learning/overview-what-is-machine-learning-studio#ml-studio-classic-vs-azure-machine-learning-studio", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/quickstart-get-started-with-cli.md", - "redirect_url": "/azure/machine-learning/reference-azure-machine-learning-cli", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/reference-azure-machine-learning-cli.md", - "redirect_url": "/azure/machine-learning/reference-azure-machine-learning-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/resource-known-issues.md", - "redirect_url": "/azure/machine-learning/how-to-debug-visual-studio-code", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/designer-sample-datasets.md", - "redirect_url": "/azure/machine-learning/sample-designer-datasets", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-churn.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-credit-risk-cost-sensitive.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-flight-delay.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-predict-income.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-regression-automobile-price-basic.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-regression-automobile-price-compare-algorithms.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-text-classification.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-ui-sample-classification-predict-flight-delay.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-ui-sample-regression-predict-automobile-price-basic.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/sample-designer-datasets.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-credit-risk-cost-sensitive.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-flight-delay.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-regression-automobile-price-basic.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-regression-automobile-price-compare-algorithms.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-text-classification.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-churn.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-credit-risk-cost-sensitive.md", - "redirect_url": "/azure/machine-learning/samples-designer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-flight-delay.md", - "redirect_url": "/azure/machine-learning/samples-designer", + "source_path_from_root": "/articles/event-grid/cli-samples.md", + "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", "redirect_document_id": true }, { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-regression-predict-automobile-price-basic.md", - "redirect_url": "/azure/machine-learning/samples-designer", + "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-azure-subscription.md", + "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", "redirect_document_id": false }, { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-regression-predict-automobile-price-compare-algorithms.md", - "redirect_url": "/azure/machine-learning/samples-designer", + "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-create-custom-topic.md", + "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", "redirect_document_id": false }, { - "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-text-classification.md", - "redirect_url": "/azure/machine-learning/samples-designer", + "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-resource-group-filter.md", + "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", "redirect_document_id": false }, { - "source_path_from_root": "/articles/machine-learning/service/samples-notebooks.md", - "redirect_url": "/azure/machine-learning/samples-notebooks", - "redirect_document_id": true + "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-blob.md", + "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", + "redirect_document_id": false }, { - "source_path_from_root": "/articles/machine-learning/concept-managed-endpoints.md", - "redirect_url": "/azure/machine-learning/concept-endpoints", + "source_path_from_root": "/articles/event-grid/scripts/event-grid-cli-resource-group.md", + "redirect_url": "/azure/event-grid/scripts/event-grid-cli-subscribe-custom-topic", "redirect_document_id": false }, + { "source_path_from_root": "/articles/notebooks/use-machine-learning-services-jupyter-notebooks.md", "redirect_url": "/azure/machine-learning/samples-notebooks", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/project-execution.md", - "redirect_url": "/azure/architecture/data-science-process/agile-development", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-apps-anomaly-detection-api.md", - "redirect_url": "/azure/architecture/data-science-process/apps-anomaly-detection-api", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-automated-data-pipeline-cheat-sheet.md", - "redirect_url": "/azure/architecture/data-science-process/automated-data-pipeline-cheat-sheet", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-analytics-architecture-predictive-maintenance.md", - "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-architecture-predictive-maintenance", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-predictive-maintenance.md", - "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-predictive-maintenance", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-vehicle-telemetry.md", - "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-vehicle-telemetry", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-vehicle-telemetry-deep-dive.md", - "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-vehicle-telemetry-deep-dive", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-vehicle-telemetry-powerbi.md", - "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-vehicle-telemetry-powerbi", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-analytics-technical-guide-predictive-maintenance.md", - "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-technical-guide-predictive-maintenance", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features.md", - "redirect_url": "/azure/architecture/data-science-process/create-features", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features-hive.md", - "redirect_url": "/azure/architecture/data-science-process/create-features-hive", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/create-features-sql-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-data-blob.md", - "redirect_url": "/azure/architecture/data-science-process/data-blob", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-data-lake-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/data-lake-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-environment-setup.md", - "redirect_url": "/azure/architecture/data-science-process/environment-setup", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features-blob.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data-blob.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/create-features-blob.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data-hive-tables.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-hive-tables", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-sql-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-hive-criteo-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/hive-criteo-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-hive-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/hive-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-ingest-data.md", - "redirect_url": "/azure/architecture/data-science-process/ingest-data", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-lifecycle.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-azure-blob.md", - "redirect_url": "/azure/architecture/data-science-process/move-azure-blob", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-azcopy.md", - "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-azcopy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-azure-storage-explorer.md", - "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-azure-storage-explorer", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-python.md", - "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-python", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-ssis.md", - "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-ssis", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-hive-tables.md", - "redirect_url": "/azure/architecture/data-science-process/move-hive-tables", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-sql-azure.md", - "redirect_url": "/azure/architecture/data-science-process/move-sql-azure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-sql-azure-adf.md", - "redirect_url": "/azure/architecture/data-science-process/move-sql-azure-adf", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-sql-server-virtual-machine.md", - "redirect_url": "/azure/architecture/data-science-process/move-sql-server-virtual-machine", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-overview.md", - "redirect_url": "/azure/architecture/data-science-process/overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-parallel-load-sql-partitioned-tables.md", - "redirect_url": "/azure/architecture/data-science-process/parallel-load-sql-partitioned-tables", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-plan-sample-scenarios.md", - "redirect_url": "/azure/architecture/data-science-process/plan-sample-scenarios", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-plan-your-environment.md", - "redirect_url": "/azure/architecture/data-science-process/plan-your-environment", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-architecture-predictive-maintenance.md", - "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-architecture", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenance.md", - "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-playbook", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-technical-guide-predictive-maintenance.md", - "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-technical-guide", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-prepare-data.md", - "redirect_url": "/azure/architecture/data-science-process/prepare-data", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-python-data-access.md", - "redirect_url": "/azure/architecture/data-science-process/python-data-access", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data-blob.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data-blob", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data-hive.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data-hive", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data-sql-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-scala-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/scala-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-select-features.md", - "redirect_url": "/azure/architecture/data-science-process/select-features", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-advanced-data-exploration-modeling.md", - "redirect_url": "/azure/architecture/data-science-process/spark-advanced-data-exploration-modeling", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-data-exploration-modeling.md", - "redirect_url": "/azure/architecture/data-science-process/spark-data-exploration-modeling", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-model-consumption.md", - "redirect_url": "/azure/architecture/data-science-process/spark-model-consumption", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-overview.md", - "redirect_url": "/azure/architecture/data-science-process/spark-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-sqldw-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/sqldw-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-sql-server-virtual-machine.md", - "redirect_url": "/azure/architecture/data-science-process/sql-server-virtual-machine", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-sql-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/sql-walkthrough", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/isic-image-classification.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/predict-twitter-sentiment.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/predict-twitter-sentiment-amltextpackage.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-aml-with-tdsp.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-azure-data-lake.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-azure-data-lake", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-hdinsight-hadoop.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-hdinsight-hadoop", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-spark.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-spark", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-sql-data-warehouse.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-data-warehouse", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/quickstart-get-started.md", - "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-setup", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/quickstart-run-cloud-notebook.md", - "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-setup", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-1st-experiment-sdk-setup.md", - "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-setup", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-1st-experiment-sdk-train.md", - "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-train", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-1st-r-experiment.md", - "redirect_url": "https://github.com/Azure/azureml-sdk-for-r", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-1st-r-experiment.md", - "redirect_url": "https://github.com/Azure/azureml-sdk-for-r", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-auto-train-models.md", - "redirect_url": "/azure/machine-learning/tutorial-auto-train-models", - "redirect_document_id": true - }, - - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-data-prep.md", - "redirect_url": "/azure/machine-learning/tutorial-auto-train-models", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/open-datasets/tutorial-opendatasets-automl.md", "redirect_url": "/azure/machine-learning/tutorial-auto-train-models", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-deploy-models-with-aml.md", - "redirect_url": "/azure/machine-learning/tutorial-deploy-models-with-aml", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-designer-automobile-price-deploy.md", - "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-deploy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/ui-tutorial-automobile-price-deploy.md", - "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-deploy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-designer-automobile-price-train-score.md", - "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-train-score", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/ui-tutorial-automobile-price-train-score.md", - "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-train-score", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-first-experiment-automated-ml.md", - "redirect_url": "/azure/machine-learning/tutorial-first-experiment-automated-ml", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-label-images.md", - "redirect_url": "/azure/machine-learning/how-to-label-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-use-parallel-run-step.md", - "redirect_url": "/azure/machine-learning/tutorial-pipeline-batch-scoring-classification", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/reference-azure-machine-learning-cli.md", - "redirect_url": "/azure/machine-learning/v1/reference-azure-machine-learning-cli", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/reference-pipeline-yaml.md", - "redirect_url": "/azure/machine-learning/v1/reference-pipeline-yaml", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-azure-container-instance.md", - "redirect_url": "/azure/machine-learning/v1/how-to-deploy-azure-container-instance", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-profile-model.md", - "redirect_url": "/azure/machine-learning/v1/how-to-deploy-profile-model", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-azure-kubernetes-service.md", - "redirect_url": "/azure/machine-learning/v1/how-to-deploy-azure-kubernetes-service", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-deploy-mlflow-models.md", - "redirect_url": "/azure/machine-learning/v1/how-to-deploy-mlflow-models", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-use-mlflow.md", - "redirect_url": "/azure/machine-learning/v1/how-to-use-mlflow", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-pipeline-batch-scoring-classification.md", - "redirect_url": "/azure/machine-learning/tutorial-pipeline-batch-scoring-classification", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-vscode-tools.md", - "redirect_url": "/azure/machine-learning/tutorial-setup-vscode-extension", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-vscode-train-deploy.md", - "redirect_url": "/azure/machine-learning/tutorial-setup-vscode-extension", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-train-deploy-model-cli.md", - "redirect_url": "/azure/machine-learning/how-to-train-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-train-deploy-model-cli.md", - "redirect_url": "/azure/machine-learning/how-to-train-cli", - "redirect_document_id": "false" - }, - { - "source_path_from_root": "/articles/machine-learning/service/tutorial-train-models-with-aml.md", - "redirect_url": "/azure/machine-learning/tutorial-train-models-with-aml", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-pipeline-batch-scoring-classification.md", - "redirect_url": "/azure/machine-learning/tutorial-pipeline-python-sdk", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/how-to-attach-compute-targets.md", - "redirect_url": "/azure/machine-learning/v1/how-to-attach-compute-targets", - " redirect_document_id": true - }, + { "source_path_from_root": "/articles/cognitive-services/QnAMaker/reference-precise-answering.md", "redirect_url": "/azure/cognitive-services/language/custom-question-answering/concepts/precise-answering", @@ -21984,6 +19780,16 @@ "redirect_url": "/azure/scheduler/migrate-from-scheduler-to-logic-apps", "redirect_document_id": "" }, + { + "source_path_from_root": "/articles/search/cognitive-search-quickstart-ocr.md", + "redirect_url": "/azure/search/cognitive-search-quickstart-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/search/knowledge-store-view-storage-explorer.md", + "redirect_url": "/azure/search/knowledge-store-create-portal#view-kstore", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/search/cognitive-search-resources-documentation.md", "redirect_url": "/azure/search/cognitive-search-concept-intro", @@ -25149,11 +22955,7 @@ "redirect_url": "/azure/storage/blobs/storage-properties-metadata", "redirect_document_id": true }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-python.md", - "redirect_url": "/azure/storage/blobs/storage-python-how-to-use-blob-storage", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/storage/storage-python-how-to-use-blob-storage.md", "redirect_url": "/azure/storage/blobs/storage-python-how-to-use-blob-storage", @@ -25924,11 +23726,7 @@ "redirect_url": "/azure/storage/common/storage-use-azcopy-linux", "redirect_document_id": true }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-azcopy.md", - "redirect_url": "/azure/storage/common/storage-use-azcopy-v10", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/storage/common/storage-use-azcopy.md", "redirect_url": "/azure/storage/common/storage-use-azcopy-v10", @@ -27389,6 +25187,11 @@ "redirect_url": "/azure/web-application-firewall/afds/afds-overview", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/web-application-firewall/waf-cdn-create-portal.md", + "redirect_url": "/azure/web-application-firewall/cdn/cdn-overview", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/frontdoor/waf-faq.md", "redirect_url": "/azure/web-application-firewall/afds/waf-faq", @@ -31599,26 +29402,7 @@ "redirect_url": "/previous-versions/azure/virtual-network/virtual-networks-using-network-configuration-file", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-explore-prepare-data.md", - "redirect_url": "/python/api/azureml-core/azureml.core.dataset.dataset", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-manage-dataset-definitions.md", - "redirect_url": "/python/api/azureml-core/azureml.core.dataset", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-create-dataset-snapshots.md", - "redirect_url": "/python/api/azureml-core/azureml.data.dataset_snapshot.datasetsnapshot", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/service/how-to-data-prep.md", - "redirect_url": "/python/api/overview/azure/dataprep/intro?view=azure-dataprep-py", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/python-how-to-install.md", "redirect_url": "/python/azure/python-sdk-azure-install", @@ -32264,11 +30048,7 @@ "redirect_url": "create-manage-projects", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-the-cortana-analytics-process.md", - "redirect_url": "data-science-process-overview", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/migrate/how-to-tag-v-center.md", "redirect_url": "how-to-create-a-group", @@ -32409,26 +30189,7 @@ "redirect_url": "https://azure.microsoft.com/global-infrastructure/services/?products=active-directory-ds", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/cortana-intelligence-appsource-evaluation-tool.md", - "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/cortana-intelligence-appsource-publishing-guide.md", - "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-intelligence-appsource-evaluation-tool.md", - "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-intelligence-appsource-publishing-guide.md", - "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/virtual-machines/linux/containers.md", "redirect_url": "https://azure.microsoft.com/overview/containers/", @@ -32499,16 +30260,7 @@ "redirect_url": "https://azure.microsoft.com/resources/whitepapers/search/?term=security&type=WhitePaperResource", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/studio/datamarket-deprecation.md", - "redirect_url": "https://azure.microsoft.com/services/cognitive-services/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/whats-new.md", - "redirect_url": "https://azure.microsoft.com/updates/?product=machine-learning-studio", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/security/fundamentals/channel-nine.md", "redirect_url": "/teamblog/channel9joinedmicrosoftlearn/", @@ -32619,86 +30371,7 @@ "redirect_url": "/azure/active-directory/fundamentals/support-help-options", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-vehicle-telemetry.md", - "redirect_url": "https://gallery.azure.ai/browse?s=vehicle%20telemetry%20analytics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-vehicle-telemetry-deep-dive.md", - "redirect_url": "https://gallery.azure.ai/browse?s=vehicle%20telemetry%20analytics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-vehicle-telemetry-powerbi.md", - "redirect_url": "https://gallery.azure.ai/browse?s=vehicle%20telemetry%20analytics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-marketplace-faq.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-arima.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-binary-classifier.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-binomial-distribution.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-cluster-model.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-difference-in-two-proportions.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-forecasting-exponential-smoothing.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-lexicon-based-sentiment-analysis.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-multivariate-linear-regression.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-normal-distribution.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-retail-demand-forecasting.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-survival-analysis.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-web-service-examples.md", - "redirect_url": "https://gallery.cortanaintelligence.com/", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/container-registry/container-registry-headers.md", "redirect_url": "https://github.com/Azure/acr/blob/master/docs/http-headers.md", @@ -33604,16 +31277,7 @@ "redirect_url": "https://portal.azure.com/#create/WordPress.WordPress", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/service/support-for-aml-services.md", - "redirect_url": "https://social.msdn.microsoft.com/Forums/home?forum=AzureMachineLearningService", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/studio/live-chat.md", - "redirect_url": "https://social.msdn.microsoft.com/Forums/home?forum=MachineLearning", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/billing-troubleshoot-azure-sign-up-issues.md", "redirect_url": "https://support.microsoft.com/help/4042959", @@ -34004,56 +31668,7 @@ "redirect_url": "hyper-v-vmm-test-failover", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-apps-anomaly-detection.md", - "redirect_url": "machine-learning-apps-anomaly-detection-api", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-connect-to-azure-machine-learning-web-service.md", - "redirect_url": "machine-learning-consume-web-services", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-documentation.md", - "redirect_url": "machine-learning-datamarket-deprecation", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-faq.md", - "redirect_url": "machine-learning-datamarket-deprecation", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-javascript-integration.md", - "redirect_url": "machine-learning-datamarket-deprecation", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-quick-start-guide.md", - "redirect_url": "machine-learning-datamarket-deprecation", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-sample-application.md", - "redirect_url": "machine-learning-datamarket-deprecation", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-feature-selection-and-engineering.md", - "redirect_url": "machine-learning-data-science-create-features", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-publish-web-service-to-azure-marketplace.md", - "redirect_url": "machine-learning-gallery-experiments", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-webservice-deploy-a-web-service.md", - "redirect_url": "machine-learning-publish-a-machine-learning-web-service", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/migrate/concepts-collector.md", "redirect_url": "migrate-appliance", @@ -34249,11 +31864,7 @@ "redirect_url": "tutorial-assess-vmware-azure-vm", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/service/ui-quickstart-run-experiment.md", - "redirect_url": "tutorial-designer-automobile-price-train-score", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/migrate/tutorial-prepare-hyper-v.md", "redirect_url": "tutorial-discover-hyper-v", @@ -35364,11 +32975,7 @@ "redirect_url": "/azure/cognitive-services/cognitive-services-container-support", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/machine-learning-apps-text-analytics.md", - "redirect_url": "/azure/cognitive-services/cognitive-services-text-analytics-quick-start", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/cognitive-services/Computer-vision/QuickStarts/curl-disk.md", "redirect_url": "/azure/cognitive-services/computer-vision", @@ -35499,6 +33106,21 @@ "redirect_url": "/azure/cognitive-services/Computer-vision/overview-image-analysis", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/HowToCallVisionAPI.md", + "redirect_url": "/azure/cognitive-services/Computer-vision/how-to/call-analyze-image", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/HowtoAnalyzeVideo_Vision.md", + "redirect_url": "/azure/cognitive-services/Computer-vision/how-to/analyze-video", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/call-read-api.md", + "redirect_url": "/azure/cognitive-services/Computer-vision/how-to/call-read-api", + "redirect_document_id": true + }, { "source_path_from_root": "/articles/cognitive-services/Content-Moderator/Review-Tool-User-Guide/Upload-Images.md", "redirect_url": "/azure/cognitive-services/content-moderator", @@ -37226,7 +34848,12 @@ }, { "source_path_from_root": "/articles/cognitive-services/Speech-Service/how-to-customize-speech-models.md", - "redirect_url": "/azure/cognitive-services/speech-service/how-to-custom-speech-choose-model", + "redirect_url": "/azure/cognitive-services/speech-service/how-to-custom-speech-create-project", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/cognitive-services/Speech-Service/how-to-custom-speech-choose-model.md", + "redirect_url": "/azure/cognitive-services/speech-service/how-to-custom-speech-create-project", "redirect_document_id": false }, { @@ -37264,6 +34891,11 @@ "redirect_url": "/azure/cognitive-services/speech-service/how-to-develop-custom-commands-application", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/cognitive-services/Speech-Service/how-to-custom-commands-integrate-remote-skills.md", + "redirect_url": "/azure/cognitive-services/speech-service/custom-commands", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/cognitive-services/Speech/API-Reference-REST/BingVoiceOutput.md", "redirect_url": "/azure/cognitive-services/speech-service/how-to-migrate-from-bing-speech", @@ -37959,6 +35591,11 @@ "redirect_url": "/azure/cognitive-services/Speech-Service/custom-speech-overview", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/cognitive-services/Speech-Service/how-to-specify-source-language.md", + "redirect_url": "/azure/cognitive-services/Speech-Service/how-to-recognize-speech", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/cognitive-services/entitylinking/GettingStarted.md", "redirect_url": "/azure/cognitive-services/text-analytics", @@ -39889,6 +37526,111 @@ "redirect_url": "/azure/cognitive-services/form-recognizer/encrypt-data-at-rest", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Overview.md", + "redirect_url": "/azure/cognitive-services/computer-vision/overview-identity", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/ReleaseNotes.md", + "redirect_url": "/azure/cognitive-services/computer-vision/whats-new", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/build-enrollment-app.md", + "redirect_url": "/azure/cognitive-services/computer-vision/Tutorials/build-enrollment-app", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/encrypt-data-at-rest.md", + "redirect_url": "/azure/cognitive-services/computer-vision/identity-encrypt-data-at-rest", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/enrollment-overview.md", + "redirect_url": "/azure/cognitive-services/computer-vision/enrollment-overview", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/index.md", + "redirect_url": "/azure/cognitive-services/computer-vision/index-identity", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/APIReference.md", + "redirect_url": "/azure/cognitive-services/computer-vision/identity-api-reference", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoAnalyzeVideo_face.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/identity-analyze-video", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoDetectFacesinImage.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/identity-detect-faces", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/find-similar faces.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/find-similar-faces", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-add-faces.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/add-faces", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-migrate-face-data.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/migrate-face-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-mitigate-latency.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/mitigate-latency", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-headpose.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/use-headpose", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-large-scale.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/use-large-scale", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/specity-detection-model.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/specify-detection-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/specify-recognition-model.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/specify-recognition-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/Face-API-How-to-Topics/use-persondirectory.md", + "redirect_url": "/azure/cognitive-services/computer-vision/how-to/use-persondirectory", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/QuickStarts/client-libraries.md", + "redirect_url": "/azure/cognitive-services/computer-vision/quickstarts-sdk/identity-client-library", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/concepts/face-detection.md", + "redirect_url": "/azure/cognitive-services/computer-vision/concept-face-detection", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/cognitive-services/Face/concepts/face-recognition.md", + "redirect_url": "/azure/cognitive-services/computer-vision/concept-face-recognition", + "redirect_document_id": true + }, { "source_path_from_root": "/articles/azure-monitor/learn/tutorial-response.md", "redirect_url": "/azure/azure-monitor/alerts/tutorial-response", @@ -41189,11 +38931,7 @@ "redirect_url": "/azure/cognitive-services/form-recognizer/label-tool", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/tutorial-setup-vscode-extension.md", - "redirect_url": "/azure/machine-learning/how-to-setup-vs-code", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/communication-services/concepts/ui-framework/ui-sdk-features.md", "redirect_url": "/azure/communication-services/concepts/ui-library/ui-library-use-cases", @@ -41294,381 +39032,13 @@ "redirect_url": "/azure/dms/tutorial-mysql-azure-mysql-offline-portal", "redirect_document_id": false }, - { - "source_path": "articles/machine-learning/team-data-science-process/context/ml-context.yml", - "redirect_url": "/azure/architecture/data-science-process/context/ml-context", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/agile-development.md", - "redirect_url": "/azure/architecture/data-science-process/agile-development", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/apps-anomaly-detection-api.md", - "redirect_url": "/azure/architecture/data-science-process/apps-anomaly-detection-api", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/automated-data-pipeline-cheat-sheet.md", - "redirect_url": "/azure/architecture/data-science-process/automated-data-pipeline-cheat-sheet", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/ci-cd-flask.md", - "redirect_url": "/azure/architecture/data-science-process/ci-cd-flask", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/code-test.md", - "redirect_url": "/azure/architecture/data-science-process/code-test", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/collaborative-coding-with-git.md", - "redirect_url": "/azure/architecture/data-science-process/collaborative-coding-with-git", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/create-features-hive.md", - "redirect_url": "/azure/architecture/data-science-process/create-features-hive", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/create-features-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/create-features-sql-server", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/create-features.md", - "redirect_url": "/azure/architecture/data-science-process/create-features", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/data-blob.md", - "redirect_url": "/azure/architecture/data-science-process/data-blob", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/data-lake-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/data-lake-walkthrough", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/deploy-models-in-production.md", - "redirect_url": "/azure/architecture/data-science-process/deploy-models-in-production", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/environment-setup.md", - "redirect_url": "/azure/architecture/data-science-process/environment-setup", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/execute-data-science-tasks.md", - "redirect_url": "/azure/architecture/data-science-process/execute-data-science-tasks", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/explore-data-blob.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/explore-data-hive-tables.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-hive-tables", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/explore-data-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data-sql-server", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/explore-data.md", - "redirect_url": "/azure/architecture/data-science-process/explore-data", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/group-manager-tasks.md", - "redirect_url": "/azure/architecture/data-science-process/group-manager-tasks", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/hive-criteo-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/hive-criteo-walkthrough", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/hive-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/hive-walkthrough", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/ingest-data.md", - "redirect_url": "/azure/architecture/data-science-process/ingest-data", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/lifecycle-acceptance.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle-acceptance", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/lifecycle-business-understanding.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle-business-understanding", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/lifecycle-data.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle-data", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/lifecycle-deployment.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle-deployment", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/lifecycle-modeling.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle-modeling", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/lifecycle.md", - "redirect_url": "/azure/architecture/data-science-process/lifecycle", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-azure-blob.md", - "redirect_url": "/azure/architecture/data-science-process/move-azure-blob", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-azure-storage-explorer.md", - "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-azure-storage-explorer", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-ssis.md", - "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-ssis", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-hive-tables.md", - "redirect_url": "/azure/architecture/data-science-process/move-hive-tables", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-sql-azure-adf.md", - "redirect_url": "/azure/architecture/data-science-process/move-sql-azure-adf", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-sql-azure.md", - "redirect_url": "/azure/architecture/data-science-process/move-sql-azure", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/move-sql-server-virtual-machine.md", - "redirect_url": "/azure/architecture/data-science-process/move-sql-server-virtual-machine", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/overview.md", - "redirect_url": "/azure/architecture/data-science-process/overview", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/parallel-load-sql-partitioned-tables.md", - "redirect_url": "/azure/architecture/data-science-process/parallel-load-sql-partitioned-tables", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/plan-sample-scenarios.md", - "redirect_url": "/azure/architecture/data-science-process/plan-sample-scenarios", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/plan-your-environment.md", - "redirect_url": "/azure/architecture/data-science-process/plan-your-environment", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/platforms-and-tools.md", - "redirect_url": "/azure/architecture/data-science-process/platforms-and-tools", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/predictive-maintenance-architecture.md", - "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-architecture", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/predictive-maintenance-playbook.md", - "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-playbook", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/predictive-maintenance-technical-guide.md", - "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-technical-guide", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/prepare-data.md", - "redirect_url": "/azure/architecture/data-science-process/prepare-data", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/project-ic-tasks.md", - "redirect_url": "/azure/architecture/data-science-process/project-ic-tasks", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/project-lead-tasks.md", - "redirect_url": "/azure/architecture/data-science-process/project-lead-tasks", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/python-data-access.md", - "redirect_url": "/azure/architecture/data-science-process/python-data-access", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/roles-tasks.md", - "redirect_url": "/azure/architecture/data-science-process/roles-tasks", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sample-data-blob.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data-blob", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sample-data-hive.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data-hive", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sample-data-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data-sql-server", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sample-data.md", - "redirect_url": "/azure/architecture/data-science-process/sample-data", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/scala-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/scala-walkthrough", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/select-features.md", - "redirect_url": "/azure/architecture/data-science-process/select-features", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/spark-advanced-data-exploration-modeling.md", - "redirect_url": "/azure/architecture/data-science-process/spark-advanced-data-exploration-modeling", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/spark-data-exploration-modeling.md", - "redirect_url": "/azure/architecture/data-science-process/spark-data-exploration-modeling", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/spark-model-consumption.md", - "redirect_url": "/azure/architecture/data-science-process/spark-model-consumption", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/spark-overview.md", - "redirect_url": "/azure/architecture/data-science-process/spark-overview", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sql-server-virtual-machine.md", - "redirect_url": "/azure/architecture/data-science-process/sql-server-virtual-machine", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sql-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/sql-walkthrough", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/sqldw-walkthrough.md", - "redirect_url": "/azure/architecture/data-science-process/sqldw-walkthrough", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/team-data-science-process-for-data-scientists.md", - "redirect_url": "/azure/architecture/data-science-process/team-data-science-process-for-data-scientists", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/team-data-science-process-for-devops.md", - "redirect_url": "/azure/architecture/data-science-process/team-data-science-process-for-devops", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/team-data-science-process-project-templates.md", - "redirect_url": "/azure/architecture/data-science-process/team-data-science-process-project-templates", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/team-lead-tasks.md", - "redirect_url": "/azure/architecture/data-science-process/team-lead-tasks", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/track-progress.md", - "redirect_url": "/azure/architecture/data-science-process/track-progress", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/walkthroughs-azure-data-lake.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-azure-data-lake", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/walkthroughs-hdinsight-hadoop.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-hdinsight-hadoop", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/walkthroughs-spark.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-spark", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/walkthroughs-sql-data-warehouse.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-data-warehouse", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/walkthroughs-sql-server.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-server", - "redirect_document_id": false - }, - { - "source_path": "articles/machine-learning/team-data-science-process/walkthroughs.md", - "redirect_url": "/azure/architecture/data-science-process/walkthroughs", - "redirect_document_id": false - }, + { "source_path": "articles/load-balancer/load-balancer-monitor-log.md", "redirect_url": "/azure/load-balancer/monitor-load-balancer", "redirect_document_id": false }, - { - "source_path": "articles/machine-learning/team-data-science-process/index.yml", - "redirect_url": "/azure/architecture/data-science-process/overview", - "redirect_document_id": false - }, + { "source_path": "articles/sentinel/tutorial-investigate-cases.md", "redirect_url": "/azure/sentinel/investigate-cases", @@ -41774,411 +39144,7 @@ "redirect_url": "/azure/virtual-network/create-public-ip-prefix-portal", "redirect_document_id": true }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/add-columns.md", - "redirect_url": "/azure/machine-learning/component-reference/add-columns", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/add-rows.md", - "redirect_url": "/azure/machine-learning/component-reference/add-rows", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-image-transformation.md", - "redirect_url": "/azure/machine-learning/component-reference/apply-image-transformation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-math-operation.md", - "redirect_url": "/azure/machine-learning/component-reference/apply-math-operation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-sql-transformation.md", - "redirect_url": "/azure/machine-learning/component-reference/apply-sql-transformation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-transformation.md", - "redirect_url": "/azure/machine-learning/component-reference/apply-transformation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/assign-data-to-clusters.md", - "redirect_url": "/azure/machine-learning/component-reference/assign-data-to-clusters", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/boosted-decision-tree-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/boosted-decision-tree-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/clean-missing-data.md", - "redirect_url": "/azure/machine-learning/component-reference/clean-missing-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/clip-values.md", - "redirect_url": "/azure/machine-learning/component-reference/clip-values", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-csv.md", - "redirect_url": "/azure/machine-learning/component-reference/convert-to-csv", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-dataset.md", - "redirect_url": "/azure/machine-learning/component-reference/convert-to-dataset", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-image-directory.md", - "redirect_url": "/azure/machine-learning/component-reference/convert-to-image-directory", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-indicator-values.md", - "redirect_url": "/azure/machine-learning/component-reference/convert-to-indicator-values", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-word-to-vector.md", - "redirect_url": "/azure/machine-learning/component-reference/convert-word-to-vector", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/create-python-model.md", - "redirect_url": "/azure/machine-learning/component-reference/create-python-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/cross-validate-model.md", - "redirect_url": "/azure/machine-learning/component-reference/cross-validate-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/decision-forest-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/decision-forest-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/densenet.md", - "redirect_url": "/azure/machine-learning/component-reference/densenet", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/designer-error-codes.md", - "redirect_url": "/azure/machine-learning/component-reference/designer-error-codes", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/edit-metadata.md", - "redirect_url": "/azure/machine-learning/component-reference/edit-metadata", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/enter-data-manually.md", - "redirect_url": "/azure/machine-learning/component-reference/enter-data-manually", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/evaluate-model.md", - "redirect_url": "/azure/machine-learning/component-reference/evaluate-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/evaluate-recommender.md", - "redirect_url": "/azure/machine-learning/component-reference/evaluate-recommender", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/execute-python-script.md", - "redirect_url": "/azure/machine-learning/component-reference/execute-python-script", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/execute-r-script.md", - "redirect_url": "/azure/machine-learning/component-reference/execute-r-script", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/export-data.md", - "redirect_url": "/azure/machine-learning/component-reference/export-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/extract-n-gram-features-from-text.md", - "redirect_url": "/azure/machine-learning/component-reference/extract-n-gram-features-from-text", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/fast-forest-quantile-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/fast-forest-quantile-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/feature-hashing.md", - "redirect_url": "/azure/machine-learning/component-reference/feature-hashing", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/filter-based-feature-selection.md", - "redirect_url": "/azure/machine-learning/component-reference/filter-based-feature-selection", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/graph-search-syntax.md", - "redirect_url": "/azure/machine-learning/component-reference/graph-search-syntax", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/group-data-into-bins.md", - "redirect_url": "/azure/machine-learning/component-reference/group-data-into-bins", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-data.md", - "redirect_url": "/azure/machine-learning/component-reference/import-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/init-image-transformation.md", - "redirect_url": "/azure/machine-learning/component-reference/init-image-transformation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/join-data.md", - "redirect_url": "/azure/machine-learning/component-reference/join-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/k-means-clustering.md", - "redirect_url": "/azure/machine-learning/component-reference/k-means-clustering", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/latent-dirichlet-allocation.md", - "redirect_url": "/azure/machine-learning/component-reference/latent-dirichlet-allocation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/linear-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/linear-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/module-reference.md", - "redirect_url": "/azure/machine-learning/component-reference/component-reference", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-boosted-decision-tree.md", - "redirect_url": "/azure/machine-learning/component-reference/multiclass-boosted-decision-tree", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-decision-forest.md", - "redirect_url": "/azure/machine-learning/component-reference/multiclass-decision-forest", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-logistic-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/multiclass-logistic-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-neural-network.md", - "redirect_url": "/azure/machine-learning/component-reference/multiclass-neural-network", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/neural-network-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/neural-network-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/normalize-data.md", - "redirect_url": "/azure/machine-learning/component-reference/normalize-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/one-vs-all-multiclass.md", - "redirect_url": "/azure/machine-learning/component-reference/one-vs-all-multiclass", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/one-vs-one-multiclass.md", - "redirect_url": "/azure/machine-learning/component-reference/one-vs-one-multiclass", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/partition-and-sample.md", - "redirect_url": "/azure/machine-learning/component-reference/partition-and-sample", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/pca-based-anomaly-detection.md", - "redirect_url": "/azure/machine-learning/component-reference/pca-based-anomaly-detection", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/permutation-feature-importance.md", - "redirect_url": "/azure/machine-learning/component-reference/permutation-feature-importance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/poisson-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/poisson-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/preprocess-text.md", - "redirect_url": "/azure/machine-learning/component-reference/preprocess-text", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/remove-duplicate-rows.md", - "redirect_url": "/azure/machine-learning/component-reference/remove-duplicate-rows", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/resnet.md", - "redirect_url": "/azure/machine-learning/component-reference/resnet", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-image-model.md", - "redirect_url": "/azure/machine-learning/component-reference/score-image-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-model.md", - "redirect_url": "/azure/machine-learning/component-reference/score-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-svd-recommender.md", - "redirect_url": "/azure/machine-learning/component-reference/score-svd-recommender", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-vowpal-wabbit-model.md", - "redirect_url": "/azure/machine-learning/component-reference/score-vowpal-wabbit-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-wide-and-deep-recommender.md", - "redirect_url": "/azure/machine-learning/component-reference/score-wide-and-deep-recommender", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/select-columns-in-dataset.md", - "redirect_url": "/azure/machine-learning/component-reference/select-columns-in-dataset", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/select-columns-transform.md", - "redirect_url": "/azure/machine-learning/component-reference/select-columns-transform", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/smote.md", - "redirect_url": "/azure/machine-learning/component-reference/smote", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/split-data.md", - "redirect_url": "/azure/machine-learning/component-reference/split-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/split-image-directory.md", - "redirect_url": "/azure/machine-learning/component-reference/split-image-directory", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/summarize-data.md", - "redirect_url": "/azure/machine-learning/component-reference/summarize-data", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-anomaly-detection-model.md", - "redirect_url": "/azure/machine-learning/component-reference/train-anomaly-detection-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-clustering-model.md", - "redirect_url": "/azure/machine-learning/component-reference/train-clustering-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-model.md", - "redirect_url": "/azure/machine-learning/component-reference/train-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-pytorch-model.md", - "redirect_url": "/azure/machine-learning/component-reference/train-pytorch-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-svd-recommender.md", - "redirect_url": "/azure/machine-learning/component-reference/train-svd-recommender", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-vowpal-wabbit-model.md", - "redirect_url": "/azure/machine-learning/component-reference/train-vowpal-wabbit-model", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-wide-and-deep-recommender.md", - "redirect_url": "/azure/machine-learning/component-reference/train-wide-and-deep-recommender", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/tune-model-hyperparameters.md", - "redirect_url": "/azure/machine-learning/component-reference/tune-model-hyperparameters", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-averaged-perceptron.md", - "redirect_url": "/azure/machine-learning/component-reference/two-class-averaged-perceptron", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-boosted-decision-tree.md", - "redirect_url": "/azure/machine-learning/component-reference/two-class-boosted-decision-tree", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-decision-forest.md", - "redirect_url": "/azure/machine-learning/component-reference/two-class-decision-forest", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-logistic-regression.md", - "redirect_url": "/azure/machine-learning/component-reference/two-class-logistic-regression", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-neural-network.md", - "redirect_url": "/azure/machine-learning/component-reference/two-class-neural-network", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-support-vector-machine.md", - "redirect_url": "/azure/machine-learning/component-reference/two-class-support-vector-machine", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/web-service-input-output.md", - "redirect_url": "/azure/machine-learning/component-reference/web-service-input-output", - "redirect_document_id": true - }, + { "source_path_from_root": "/articles/partner-twilio-cloud-services-dotnet-phone-call-web-role.md", "redirect_url": "https://www.twilio.com/docs/usage/tutorials/serverless-webhooks-azure-functions-and-csharp", @@ -42239,21 +39205,7 @@ "redirect_url": "https://docs.sendgrid.com/for-developers/partners/microsoft-azure-2021#create-a-twilio-sendgrid-account", "redirect_document_id": false }, - { - "source_path_from_root": "/articles/machine-learning/reference-online-endpoint-yaml.md", - "redirect_url": "reference-yaml-endpoint-online", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-job-component.md", - "redirect_url": "reference-yaml-job-command", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/machine-learning/reference-yaml-dataset.md", - "redirect_url": "reference-yaml-data", - "redirect_document_id": true - }, + { "source_path_from_root": "/articles/store-sendgrid-nodejs-how-to-send-email.md", "redirect_url": "https://docs.sendgrid.com/for-developers/partners/microsoft-azure-2021#create-a-twilio-sendgrid-account", @@ -42774,6 +39726,11 @@ "redirect_url": "/azure/aks/open-service-mesh-integrations", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/aks/spark-job.md", + "redirect_url": "/azure/aks/integrations#open-source-and-third-party-integrations", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/iot-dps/quick-create-device-symmetric-key-csharp.md", "redirect_url": "/azure/iot-dps/quick-create-simulated-device-symm-key", @@ -43049,11 +40006,7 @@ "redirect_url": "/learn/modules/translate-text-with-translator-service?toc=/azure/cognitive-services/translator/toc.json&bc=/azure/cognitive-services/translator/breadcrumb/toc.json", "redirect_document_id": false }, - { - "source_path": "articles/machine-learning/classic/deploy-with-resource-manager-template.md", - "redirect_url": "/previous-versions/azure/machine-learning/classic/deploy-with-resource-manager-template", - "redirect_document_id": false - }, + { "source_path_from_root": "/articles/governance/policy/how-to/guest-configuration-create-group-policy.md", "redirect_url": "/azure/governance/policy/how-to/guest-configuration-create", @@ -43136,92 +40089,92 @@ }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/connect-to-azure.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/connect-to-azure", + "redirect_url": "/azure/azure-video-indexer/connect-to-azure", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/manage-account-connected-to-azure.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/manage-account-connected-to-azure", + "redirect_url": "/azure/azure-video-indexer/manage-account-connected-to-azure", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/migrate-from-v1-to-v2.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/", + "redirect_url": "/azure/azure-video-indexer/", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/upload-index-videos.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/upload-index-videos", + "redirect_url": "/azure/azure-video-indexer/upload-index-videos", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-concepts.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-create-new.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/", + "redirect_url": "/azure/azure-video-indexer/", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-embed-widgets.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-embed-widgets", + "redirect_url": "/azure/azure-video-indexer/video-indexer-embed-widgets", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-get-started.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-get-started", + "redirect_url": "/azure/azure-video-indexer/video-indexer-get-started", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-output-json-v2.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-output-json-v2", + "redirect_url": "/azure/azure-video-indexer/video-indexer-output-json-v2", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/Video/GetStarted.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/Video/Glossary.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/Video/Home.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/Video/How-To/HowtoAnalyzeVideo_Video.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/Video/How-To/HowtoCallVideoAPIs.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-overview.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-search.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-search", + "redirect_url": "/azure/azure-video-indexer/video-indexer-search", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-use-apis.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-use-apis", + "redirect_url": "/azure/azure-video-indexer/video-indexer-use-apis", "redirect_document_id": false }, { "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-view-edit.md", - "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-view-edit", + "redirect_url": "/azure/azure-video-indexer/video-indexer-view-edit", "redirect_document_id": false }, { @@ -43234,6 +40187,11 @@ "redirect_url": "/azure/cognitive-services/translator/custom-translator/key-terms", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md", + "redirect_url": "/azure/applied-ai-services/form-recognizer/create-sas-tokens", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/cognitive-services/language-service/text-summarization/how-to/call-api.md", "redirect_url": "/azure/cognitive-services/language-service/summarization/how-to/document-summarization", @@ -43253,6 +40211,26 @@ "source_path_from_root": "/articles/cognitive-services/language-service/text-summarization/quickstart.md", "redirect_url": "/azure/cognitive-services/language-service/summarization/quickstart", "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/virtual-network/nat-gateway/tutorial-create-nat-gateway-portal.md", + "redirect_url": "/azure/virtual-network/nat-gateway/quickstart-create-nat-gateway-portal", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/virtual-network/nat-gateway/tutorial-create-nat-gateway-powershell.md", + "redirect_url": "/azure/virtual-network/nat-gateway/quickstart-create-nat-gateway-powershell", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/virtual-network/nat-gateway/tutorial-create-nat-gateway-cli.md", + "redirect_url": "/azure/virtual-network/nat-gateway/quickstart-create-nat-gateway-cli", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/private-link/private-endpoint-static-ip-powershell.md", + "redirect_url": "/azure/private-link/create-private-endpoint-powershell", + "redirect_document_id": true } ] } diff --git a/.whatsnew/.application-management.json b/.whatsnew/.application-management.json index befe3203f7913..02cdda7720cf3 100644 --- a/.whatsnew/.application-management.json +++ b/.whatsnew/.application-management.json @@ -1,24 +1,24 @@ { - "$schema": "https://whatsnewapi.azurewebsites.net/schema", - "docSetProductName": "Azure Active Directory application management", - "rootDirectory": "articles/active-directory/manage-apps/", - "docLinkSettings": { - "linkFormat": "relative", - "relativeLinkPrefix": "/azure/active-directory/manage-apps" - }, - "inclusionCriteria": { - "excludePullRequestTitles": true, - "minAdditionsToFile" : 10, - "maxFilesChanged": 50, - "labels": [ - "label:active-directory/svc", - "label:app-mgmt/subsvc" - ] - }, - "areas": [ - { - "name": ".", - "heading": "Azure Active Directory application management" - } - ] -} + "$schema": "https://whatsnewapi.azurewebsites.net/schema", + "docSetProductName": "Azure Active Directory application management", + "rootDirectory": "articles/active-directory/manage-apps/", + "docLinkSettings": { + "linkFormat": "relative", + "relativeLinkPrefix": "/azure/active-directory/manage-apps" + }, + "inclusionCriteria": { + "excludePullRequestTitles": true, + "minAdditionsToFile" : 10, + "maxFilesChanged": 50, + "labels": [ + "label:active-directory/svc", + "label:app-mgmt/subsvc" + ] + }, + "areas": [ + { + "name": ".", + "heading": "Azure Active Directory application management" + } + ] +} \ No newline at end of file diff --git a/articles/active-directory-b2c/azure-monitor.md b/articles/active-directory-b2c/azure-monitor.md index f8e8a63b664ca..2b2158144b1e2 100644 --- a/articles/active-directory-b2c/azure-monitor.md +++ b/articles/active-directory-b2c/azure-monitor.md @@ -11,12 +11,12 @@ ms.workload: identity ms.topic: how-to ms.author: kengaderdus ms.subservice: B2C -ms.date: 02/23/2022 +ms.date: 06/03/2022 --- # Monitor Azure AD B2C with Azure Monitor -Use Azure Monitor to route Azure Active Directory B2C (Azure AD B2C) sign-in and [auditing](view-audit-logs.md) logs to different monitoring solutions. You can retain the logs for long-term use or integrate with third-party security information and event management (SIEM) tools to gain insights into your environment. +Use Azure Monitor to route Azure Active Directory B2C (Azure AD B2C) sign in and [auditing](view-audit-logs.md) logs to different monitoring solutions. You can retain the logs for long-term use or integrate with third-party security information and event management (SIEM) tools to gain insights into your environment. You can route log events to: @@ -26,10 +26,10 @@ You can route log events to: ![Azure Monitor](./media/azure-monitor/azure-monitor-flow.png) +When you plan to transfer Azure AD B2C logs to different monitoring solutions, or repository, consider that Azure AD B2C logs contain personal data. When you process such data, ensure you use appropriate security measures on the personal data. It includes protection against unauthorized or unlawful processing, using appropriate technical or organizational measures. + In this article, you learn how to transfer the logs to an Azure Log Analytics workspace. Then you can create a dashboard or create alerts that are based on Azure AD B2C users' activities. -> [!IMPORTANT] -> When you plan to transfer Azure AD B2C logs to different monitoring solutions, or repository, consider the following. Azure AD B2C logs contain personal data. Such data should be processed in a manner that ensures appropriate security of the personal data, including protection against unauthorized or unlawful processing, using appropriate technical or organizational measures. Watch this video to learn how to configure monitoring for Azure AD B2C using Azure Monitor. @@ -37,7 +37,7 @@ Watch this video to learn how to configure monitoring for Azure AD B2C using Azu ## Deployment overview -Azure AD B2C leverages [Azure Active Directory monitoring](../active-directory/reports-monitoring/overview-monitoring.md). Because an Azure AD B2C tenant, unlike Azure AD tenants, can't have a subscription associated with it, we need to take some additional steps to enable the integration between Azure AD B2C and Log Analytics, which is where we'll send the logs. +Azure AD B2C uses [Azure Active Directory monitoring](../active-directory/reports-monitoring/overview-monitoring.md). Unlike Azure AD tenants, an Azure AD B2C tenant can't have a subscription associated with it. So, we need to take extra steps to enable the integration between Azure AD B2C and Log Analytics, which is where we'll send the logs. To enable _Diagnostic settings_ in Azure Active Directory within your Azure AD B2C tenant, you use [Azure Lighthouse](../lighthouse/overview.md) to [delegate a resource](../lighthouse/concepts/architecture.md), which allows your Azure AD B2C (the **Service Provider**) to manage an Azure AD (the **Customer**) resource. > [!TIP] @@ -45,15 +45,21 @@ To enable _Diagnostic settings_ in Azure Active Directory within your Azure AD B After you complete the steps in this article, you'll have created a new resource group (here called _azure-ad-b2c-monitor_) and have access to that same resource group that contains the [Log Analytics workspace](../azure-monitor/logs/quick-create-workspace.md) in your **Azure AD B2C** portal. You'll also be able to transfer the logs from Azure AD B2C to your Log Analytics workspace. -During this deployment, you'll authorize a user or group in your Azure AD B2C directory to configure the Log Analytics workspace instance within the tenant that contains your Azure subscription. To create the authorization, you deploy an [Azure Resource Manager](../azure-resource-manager/index.yml) template to the subscription containing the Log Analytics workspace. +During this deployment, you'll authorize a user or group in your Azure AD B2C directory to configure the Log Analytics workspace instance within the tenant that contains your Azure subscription. To create the authorization, you deploy an [Azure Resource Manager](../azure-resource-manager/index.yml) template to the subscription that contains the Log Analytics workspace. The following diagram depicts the components you'll configure in your Azure AD and Azure AD B2C tenants. ![Resource group projection](./media/azure-monitor/resource-group-projection.png) -During this deployment, you'll configure both your Azure AD B2C tenant and Azure AD tenant where the Log Analytics workspace will be hosted. The Azure AD B2C accounts used (such as your admin account) should be assigned the [Global Administrator](../active-directory/roles/permissions-reference.md#global-administrator) role on the Azure AD B2C tenant. The Azure AD account used to run the deployment must be assigned the [Owner](../role-based-access-control/built-in-roles.md#owner) role in the Azure AD subscription. It's also important to make sure you're signed in to the correct directory as you complete each step as described. +During this deployment, you'll configure your Azure AD B2C tenant where logs are generated. You'll also configure Azure AD tenant where the Log Analytics workspace will be hosted. The Azure AD B2C accounts used (such as your admin account) should be assigned the [Global Administrator](../active-directory/roles/permissions-reference.md#global-administrator) role on the Azure AD B2C tenant. The Azure AD account you'll use to run the deployment must be assigned the [Owner](../role-based-access-control/built-in-roles.md#owner) role in the Azure AD subscription. It's also important to make sure you're signed in to the correct directory as you complete each step as described. + +In summary, you'll use Azure Lighthouse to allow a user or group in your Azure AD B2C tenant to manage a resource group in a subscription associated with a different tenant (the Azure AD tenant). After this authorization is completed, the subscription and log analytics workspace can be selected as a target in the Diagnostic settings in Azure AD B2C. + +## Pre-requisites + +- An Azure AD B2C account with [Global Administrator](../active-directory/roles/permissions-reference.md#global-administrator) role on the Azure AD B2C tenant. -In summary, you will use Azure Lighthouse to allow a user or group in your Azure AD B2C tenant to manage a resource group in a subscription associated with a different tenant (the Azure AD tenant). After this authorization is completed, the subscription and log analytics workspace can be selected as a target in the Diagnostic settings in Azure AD B2C. +- An Azure AD account with the [Owner](../role-based-access-control/built-in-roles.md#owner) role in the Azure AD subscription. See how to [Assign a user as an administrator of an Azure subscription](../role-based-access-control/role-assignments-portal-subscription-admin.md). ## 1. Create or choose resource group @@ -101,7 +107,7 @@ To make management easier, we recommend using Azure AD user _groups_ for each ro ### 3.3 Create an Azure Resource Manager template -To create the custom authorization and delegation in Azure Lighthouse, we use an Azure Resource Manager template that grants Azure AD B2C access to the Azure AD resource group you created earlier (for example, _azure-ad-b2c-monitor_). Deploy the template from the GitHub sample by using the **Deploy to Azure** button, which opens the Azure portal and lets you configure and deploy the template directly in the portal. For these steps, make sure you're signed in to your Azure AD tenant (not the Azure AD B2C tenant). +To create the custom authorization and delegation in Azure Lighthouse, we use an Azure Resource Manager template. This template grants Azure AD B2C access to the Azure AD resource group, which you created earlier, for example, _azure-ad-b2c-monitor_. Deploy the template from the GitHub sample by using the **Deploy to Azure** button, which opens the Azure portal and lets you configure and deploy the template directly in the portal. For these steps, make sure you're signed in to your Azure AD tenant (not the Azure AD B2C tenant). 1. Sign in to the [Azure portal](https://portal.azure.com). 1. Make sure you're using the directory that contains your Azure AD tenant. Select the **Directories + subscriptions** icon in the portal toolbar. @@ -116,7 +122,7 @@ To create the custom authorization and delegation in Azure Lighthouse, we use an | --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Subscription | Select the directory that contains the Azure subscription where the _azure-ad-b2c-monitor_ resource group was created. | | Region | Select the region where the resource will be deployed. | - | Msp Offer Name | A name describing this definition. For example, _Azure AD B2C Monitoring_. This is the name that will be displayed in Azure Lighthouse. The **MSP Offer Name** must be unique in your Azure AD. To monitor multiple Azure AD B2C tenants, use different names. | + | Msp Offer Name | A name describing this definition. For example, _Azure AD B2C Monitoring_. It's the name that will be displayed in Azure Lighthouse. The **MSP Offer Name** must be unique in your Azure AD. To monitor multiple Azure AD B2C tenants, use different names. | | Msp Offer Description | A brief description of your offer. For example, _Enables Azure Monitor in Azure AD B2C_. | | Managed By Tenant Id | The **Tenant ID** of your Azure AD B2C tenant (also known as the directory ID). | | Authorizations | Specify a JSON array of objects that include the Azure AD `principalId`, `principalIdDisplayName`, and Azure `roleDefinitionId`. The `principalId` is the **Object ID** of the B2C group or user that will have access to resources in this Azure subscription. For this walkthrough, specify the group's Object ID that you recorded earlier. For the `roleDefinitionId`, use the [built-in role](../role-based-access-control/built-in-roles.md) value for the _Contributor role_, `b24988ac-6180-42a0-ab88-20f7382dd24c`. | @@ -143,8 +149,7 @@ After you've deployed the template and waited a few minutes for the resource pro > [!NOTE] > On the **Portal settings | Directories + subscriptions** page, ensure that your Azure AD B2C and Azure AD tenants are selected under **Current + delegated directories**. -1. Sign out of the Azure portal if you're currently signed in (this allows your session credentials to be refreshed in the next step). -1. Sign in to the [Azure portal](https://portal.azure.com) with your **Azure AD B2C** administrative account. This account must be a member of the security group you specified in the [Delegate resource management](#3-delegate-resource-management) step. +1. Sign out of the [Azure portal](https://portal.azure.com) and sign back in with your **Azure AD B2C** administrative account. This account must be a member of the security group you specified in the [Delegate resource management](#3-delegate-resource-management) step. Signing out and singing back in allows your session credentials to be refreshed in the next step. 1. Select the **Directories + subscriptions** icon in the portal toolbar. 1. On the **Portal settings | Directories + subscriptions** page, in the **Directory name** list, find your Azure AD directory that contains the Azure subscription and the _azure-ad-b2c-monitor_ resource group you created, and then select **Switch**. 1. Verify that you've selected the correct directory and your Azure subscription is listed and selected in the **Default subscription filter**. @@ -189,7 +194,7 @@ To configure monitoring settings for Azure AD B2C activity logs: > [!NOTE] > It can take up to 15 minutes after an event is emitted for it to [appear in a Log Analytics workspace](../azure-monitor/logs/data-ingestion-time.md). Also, learn more about [Active Directory reporting latencies](../active-directory/reports-monitoring/reference-reports-latencies.md), which can impact the staleness of data and play an important role in reporting. -If you see the error message "To set up Diagnostic settings to use Azure Monitor for your Azure AD B2C directory, you need to set up delegated resource management," make sure you sign in with a user who is a member of the [security group](#32-select-a-security-group) and [select your subscription](#4-select-your-subscription). +If you see the error message, _To set up Diagnostic settings to use Azure Monitor for your Azure AD B2C directory, you need to set up delegated resource management_, make sure you sign in with a user who is a member of the [security group](#32-select-a-security-group) and [select your subscription](#4-select-your-subscription). ## 6. Visualize your data @@ -197,7 +202,7 @@ Now you can configure your Log Analytics workspace to visualize your data and co ### 6.1 Create a Query -Log queries help you to fully leverage the value of the data collected in Azure Monitor Logs. A powerful query language allows you to join data from multiple tables, aggregate large sets of data, and perform complex operations with minimal code. Virtually any question can be answered and analysis performed as long as the supporting data has been collected, and you understand how to construct the right query. For more information, see [Get started with log queries in Azure Monitor](../azure-monitor/logs/get-started-queries.md). +Log queries help you to fully use the value of the data collected in Azure Monitor Logs. A powerful query language allows you to join data from multiple tables, aggregate large sets of data, and perform complex operations with minimal code. Virtually any question can be answered and analysis performed as long as the supporting data has been collected, and you understand how to construct the right query. For more information, see [Get started with log queries in Azure Monitor](../azure-monitor/logs/get-started-queries.md). 1. From **Log Analytics workspace**, select **Logs** 1. In the query editor, paste the following [Kusto Query Language](/azure/data-explorer/kusto/query/) query. This query shows policy usage by operation over the past x days. The default duration is set to 90 days (90d). Notice that the query is focused only on the operation where a token/code is issued by policy. @@ -274,9 +279,9 @@ The workbook will display reports in the form of a dashboard. ## Create alerts -Alerts are created by alert rules in Azure Monitor and can automatically run saved queries or custom log searches at regular intervals. You can create alerts based on specific performance metrics or when certain events are created, absence of an event, or a number of events are created within a particular time window. For example, alerts can be used to notify you when average number of sign-in exceeds a certain threshold. For more information, see [Create alerts](../azure-monitor/alerts/alerts-log.md). +Alerts are created by alert rules in Azure Monitor and can automatically run saved queries or custom log searches at regular intervals. You can create alerts based on specific performance metrics or when certain events occur. You can also create alerts on absence of an event, or a number of events are occur within a particular time window. For example, alerts can be used to notify you when average number of sign in exceeds a certain threshold. For more information, see [Create alerts](../azure-monitor/alerts/alerts-log.md). -Use the following instructions to create a new Azure Alert, which will send an [email notification](../azure-monitor/alerts/action-groups.md#configure-notifications) whenever there is a 25% drop in the **Total Requests** compare to previous period. Alert will run every 5 minutes and look for the drop in the last hour compared to the hour before that. The alerts are created using Kusto query language. +Use the following instructions to create a new Azure Alert, which will send an [email notification](../azure-monitor/alerts/action-groups.md#configure-notifications) whenever there's a 25% drop in the **Total Requests** compared to previous period. Alert will run every 5 minutes and look for the drop in the last hour compared to the hour before it. The alerts are created using Kusto query language. 1. From **Log Analytics workspace**, select **Logs**. 1. Create a new **Kusto query** by using the query below. @@ -311,7 +316,7 @@ After the alert is created, go to **Log Analytics workspace** and select **Alert Azure Monitor and Service Health alerts use action groups to notify users that an alert has been triggered. You can include sending a voice call, SMS, email; or triggering various types of automated actions. Follow the guidance [Create and manage action groups in the Azure portal](../azure-monitor/alerts/action-groups.md) -Here is an example of an alert notification email. +Here's an example of an alert notification email. ![Email notification](./media/azure-monitor/alert-email-notification.png) diff --git a/articles/active-directory-b2c/configure-a-sample-node-web-app.md b/articles/active-directory-b2c/configure-a-sample-node-web-app.md index 4d27914af7cf9..8fb2e1bcbe2f6 100644 --- a/articles/active-directory-b2c/configure-a-sample-node-web-app.md +++ b/articles/active-directory-b2c/configure-a-sample-node-web-app.md @@ -8,7 +8,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 03/31/2022 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C --- @@ -113,7 +113,7 @@ Open your web app in a code editor such as Visual Studio Code. Under the project |Key |Value | |---------|---------| |`APP_CLIENT_ID`|The **Application (client) ID** for the web app you registered in [step 2.1](#step-2-register-a-web-application). | -|`APP_CLIENT_SECRET`|The client secret for the web app you created in [step 2.2](#step-22-create-a-web-app-client-secret) | +|`APP_CLIENT_SECRET`|The client secret value for the web app you created in [step 2.2](#step-22-create-a-web-app-client-secret) | |`SIGN_UP_SIGN_IN_POLICY_AUTHORITY`|The **Sign in and sign up** user flow authority such as `https://.b2clogin.com/.onmicrosoft.com/`. Replace `` with the name of your tenant and `` with the name of your Sign in and Sign up user flow such as `B2C_1_susi`. Learn how to [Get your tenant name](tenant-management.md#get-your-tenant-name). | |`RESET_PASSWORD_POLICY_AUTHORITY`| The **Reset password** user flow authority such as `https://.b2clogin.com/.onmicrosoft.com/`. Replace `` with the name of your tenant and `` with the name of your Reset password user flow such as `B2C_1_reset_password_node_app`.| |`EDIT_PROFILE_POLICY_AUTHORITY`|The **Profile editing** user flow authority such as `https://.b2clogin.com/.onmicrosoft.com/`. Replace `` with the name of your tenant and `` with the name of your reset password user flow such as `B2C_1_edit_profile_node_app`. | diff --git a/articles/active-directory-b2c/configure-authentication-in-sample-node-web-app-with-api.md b/articles/active-directory-b2c/configure-authentication-in-sample-node-web-app-with-api.md index bd18d4f58771e..1a25ab0bcdc28 100644 --- a/articles/active-directory-b2c/configure-authentication-in-sample-node-web-app-with-api.md +++ b/articles/active-directory-b2c/configure-authentication-in-sample-node-web-app-with-api.md @@ -9,7 +9,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 03/30/2022 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C --- @@ -150,7 +150,7 @@ Open your web app in a code editor such as Visual Studio Code. Under the `call-p |Key |Value | |---------|---------| |`APP_CLIENT_ID`|The **Application (client) ID** for the web app you registered in [step 2.3](#step-23-register-the-web-app). | -|`APP_CLIENT_SECRET`|The client secret for the web app you created in [step 2.4](#step-24-create-a-client-secret) | +|`APP_CLIENT_SECRET`|The client secret value for the web app you created in [step 2.4](#step-24-create-a-client-secret) | |`SIGN_UP_SIGN_IN_POLICY_AUTHORITY`|The **Sign in and sign up** user flow authority for the user flow you created in [step 1](#step-1-configure-your-user-flow) such as `https://.b2clogin.com/.onmicrosoft.com/`. Replace `` with the name of your tenant and `` with the name of your Sign in and Sign up user flow such as `B2C_1_susi`. Learn how to [Get your tenant name](tenant-management.md#get-your-tenant-name). | |`AUTHORITY_DOMAIN`| The Azure AD B2C authority domain such as `https://.b2clogin.com`. Replace `` with the name of your tenant.| |`APP_REDIRECT_URI`| The application redirect URI where Azure AD B2C will return authentication responses (tokens). It matches the **Redirect URI** you set while registering your app in Azure portal. This URL need to be publicly accessible. Leave the value as is.| diff --git a/articles/active-directory-b2c/configure-authentication-sample-python-web-app.md b/articles/active-directory-b2c/configure-authentication-sample-python-web-app.md index 81b80e97f72ad..2ec9e76d8c1cc 100644 --- a/articles/active-directory-b2c/configure-authentication-sample-python-web-app.md +++ b/articles/active-directory-b2c/configure-authentication-sample-python-web-app.md @@ -7,7 +7,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: reference -ms.date: 09/15/2021 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C ms.custom: "b2c-support" @@ -98,7 +98,7 @@ Open the *app_config.py* file. This file contains information about your Azure A |---------|---------| |`b2c_tenant`| The first part of your Azure AD B2C [tenant name](tenant-management.md#get-your-tenant-name) (for example, `contoso`).| |`CLIENT_ID`| The web API application ID from [step 2.1](#step-21-register-the-app).| -|`CLIENT_SECRET`| The client secret you created in [step 2.2](#step-22-create-a-web-app-client-secret). To help increase security, consider storing it instead in an environment variable, as recommended in the comments. | +|`CLIENT_SECRET`| The client secret value you created in [step 2.2](#step-22-create-a-web-app-client-secret). To help increase security, consider storing it instead in an environment variable, as recommended in the comments. | |`*_user_flow`|The user flows or custom policy you created in [step 1](#step-1-configure-your-user-flow).| | | | diff --git a/articles/active-directory-b2c/identity-provider-adfs.md b/articles/active-directory-b2c/identity-provider-adfs.md index 85f7aea72adb8..9aabf5320e715 100644 --- a/articles/active-directory-b2c/identity-provider-adfs.md +++ b/articles/active-directory-b2c/identity-provider-adfs.md @@ -9,7 +9,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 01/18/2022 +ms.date: 06/08/2022 ms.custom: project-no-code ms.author: kengaderdus ms.subservice: B2C @@ -88,7 +88,7 @@ In this step, configure the claims AD FS application returns to Azure AD B2C. 1. For **Client ID**, enter the application ID that you previously recorded. 1. For the **Scope**, enter the `openid`. -1. For **Response type**, select **id_token**, which makes the **Client secret** optional. Learn more about use of [Client ID and secret](identity-provider-generic-openid-connect.md#client-id-and-secret) when adding a generic OpenID Connect identity provider. +1. For **Response type**, select **id_token**. So, the **Client secret** value isn't needed. Learn more about use of [Client ID and secret](identity-provider-generic-openid-connect.md#client-id-and-secret) when adding a generic OpenID Connect identity provider. 1. (Optional) For the **Domain hint**, enter `contoso.com`. For more information, see [Set up direct sign-in using Azure Active Directory B2C](direct-signin.md#redirect-sign-in-to-a-social-provider). 1. Under **Identity provider claims mapping**, select the following claims: diff --git a/articles/active-directory-b2c/identity-provider-azure-ad-single-tenant.md b/articles/active-directory-b2c/identity-provider-azure-ad-single-tenant.md index 8ddaa46868d3a..17e2f9f18f6e2 100644 --- a/articles/active-directory-b2c/identity-provider-azure-ad-single-tenant.md +++ b/articles/active-directory-b2c/identity-provider-azure-ad-single-tenant.md @@ -9,7 +9,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 09/16/2021 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C ms.custom: fasttrack-edit, project-no-code @@ -100,7 +100,7 @@ If you want to get the `family_name` and `given_name` claims from Azure AD, you For example, `https://login.microsoftonline.com/contoso.onmicrosoft.com/v2.0/.well-known/openid-configuration`. If you use a custom domain, replace `contoso.com` with your custom domain in `https://login.microsoftonline.com/contoso.com/v2.0/.well-known/openid-configuration`. 1. For **Client ID**, enter the application ID that you previously recorded. -1. For **Client secret**, enter the client secret that you previously recorded. +1. For **Client secret**, enter the client secret value that you previously recorded. 1. For **Scope**, enter `openid profile`. 1. Leave the default values for **Response type**, and **Response mode**. 1. (Optional) For the **Domain hint**, enter `contoso.com`. For more information, see [Set up direct sign-in using Azure Active Directory B2C](direct-signin.md#redirect-sign-in-to-a-social-provider). @@ -145,7 +145,7 @@ You need to store the application key that you created in your Azure AD B2C tena 1. Select **Policy keys** and then select **Add**. 1. For **Options**, choose `Manual`. 1. Enter a **Name** for the policy key. For example, `ContosoAppSecret`. The prefix `B2C_1A_` is added automatically to the name of your key when it's created, so its reference in the XML in following section is to *B2C_1A_ContosoAppSecret*. -1. In **Secret**, enter your client secret that you recorded earlier. +1. In **Secret**, enter your client secret value that you recorded earlier. 1. For **Key usage**, select `Signature`. 1. Select **Create**. diff --git a/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png b/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png index 2e750c2d521ea..02b085c532dff 100644 Binary files a/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png and b/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png differ diff --git a/articles/active-directory-b2c/media/partner-gallery/haventec-logo.png b/articles/active-directory-b2c/media/partner-gallery/haventec-logo.png index 898fc82c2cee2..6440a8000287a 100644 Binary files a/articles/active-directory-b2c/media/partner-gallery/haventec-logo.png and b/articles/active-directory-b2c/media/partner-gallery/haventec-logo.png differ diff --git a/articles/active-directory-b2c/partner-asignio.md b/articles/active-directory-b2c/partner-asignio.md index fc58fe226996a..31f0beaf64231 100644 --- a/articles/active-directory-b2c/partner-asignio.md +++ b/articles/active-directory-b2c/partner-asignio.md @@ -114,7 +114,7 @@ Follow the steps mentioned in [this tutorial](tutorial-register-applications.md? | Property | Value | |:--------|:-------------| |Name | Login with Asignio *(or a name of your choice)* - |Metadata URL | https://authorization.asignio.com/.well-known/openid-configuration| + |Metadata URL | `https://authorization.asignio.com/.well-known/openid-configuration`| | Client ID | enter the client ID that you previously generated in [step 1](#step-1-configure-an-application-with-asignio)| |Client Secret | enter the Client secret that you previously generated in [step 1](#step-1-configure-an-application-with-asignio)| | Scope | openid email profile | diff --git a/articles/active-directory-b2c/partner-gallery.md b/articles/active-directory-b2c/partner-gallery.md index 04f79ea04c78b..1332dbfb451c5 100644 --- a/articles/active-directory-b2c/partner-gallery.md +++ b/articles/active-directory-b2c/partner-gallery.md @@ -21,7 +21,7 @@ Our ISV partner network extends our solution capabilities to help you build seam To be considered into this sample documentation, submit your application request in the [Microsoft Application Network portal](https://microsoft.sharepoint.com/teams/apponboarding/Apps/SitePages/Default.aspx). For any additional questions, send an email to [SaaSApplicationIntegrations@service.microsoft.com](mailto:SaaSApplicationIntegrations@service.microsoft.com). >[!NOTE] ->The [Azure Active Directory B2C community site on GitHub](https://azure-ad-b2c.github.io/azureadb2ccommunity.io/) also provides sample custom policies from the community. +>The [Azure Active Directory B2C community site on GitHub](https://github.com/azure-ad-b2c/partner-integrations) also provides sample custom policies from the community. ## Identity verification and proofing diff --git a/articles/active-directory-b2c/partner-n8identity.md b/articles/active-directory-b2c/partner-n8identity.md index dd97541591a5e..f11be929cf7e2 100644 --- a/articles/active-directory-b2c/partner-n8identity.md +++ b/articles/active-directory-b2c/partner-n8identity.md @@ -1,7 +1,7 @@ --- -title: Tutorial for configuring N8 Identity with Azure Active Directory B2C +title: Configure TheAccessHub Admin Tool by using Azure Active Directory B2C titleSuffix: Azure AD B2C -description: Tutorial for configuring TheAccessHub Admin Tool with Azure Active Directory B2C to address customer accounts migration and Customer Service Requests (CSR) administration. +description: In this tutorial, configure TheAccessHub Admin Tool by using Azure Active Directory B2C to address customer account migration and customer service request (CSR) administration. services: active-directory-b2c author: gargi-sinha manager: CelesteDG @@ -15,379 +15,391 @@ ms.author: gasinh ms.subservice: B2C --- -# Tutorial for configuring TheAccessHub Admin Tool with Azure Active Directory B2C +# Configure TheAccessHub Admin Tool by using Azure Active Directory B2C -In this sample tutorial, we provide guidance on how to integrate Azure Active Directory (AD) B2C with [TheAccessHub Admin Tool](https://n8id.com/products/theaccesshub-admintool/) from N8 Identity. This solution addresses customer accounts migration and Customer Service Requests (CSR) administration. +In this tutorial, we provide guidance on how to integrate Azure Active Directory B2C (Azure AD B2C) with [TheAccessHub Admin Tool](https://n8id.com/products/theaccesshub-admintool/) from N8 Identity. This solution addresses customer account migration and customer service request (CSR) administration. -This solution is suited for you, if you have one or more of the following needs: +This solution is suited for you if you have one or more of the following needs: -- You have an existing site and you want to migrate to Azure AD B2C. However, you're struggling with migration of your customer accounts including passwords +- You have an existing site and you want to migrate to Azure AD B2C. However, you're struggling with migration of your customer accounts, including passwords. -- You require a tool for your CSR to administer Azure AD B2C accounts. +- You need a tool for your CSR to administer Azure AD B2C accounts. - You have a requirement to use delegated administration for your CSRs. - You want to synchronize and merge your data from many repositories into Azure AD B2C. -## Pre-requisites +## Prerequisites To get started, you'll need: - An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). -- An [Azure AD B2C tenant](./tutorial-create-tenant.md). Tenant must be linked to your Azure subscription. +- An [Azure AD B2C tenant](./tutorial-create-tenant.md). The tenant must be linked to your Azure subscription. -- A TheAccessHub Admin Tool environment: Contact [N8 Identity](https://n8id.com/contact/) to provision a new environment. +- A TheAccessHub Admin Tool environment. Contact [N8 Identity](https://n8id.com/contact/) to provision a new environment. -- [Optional] Connection and credential information for any databases or Lightweight Directory Access Protocols (LDAPs) you want to migrate customer data from. +- (Optional:) Connection and credential information for any databases or Lightweight Directory Access Protocols (LDAPs) that you want to migrate customer data from. -- [Optional] Configured Azure AD B2C environment for using [custom policies](./tutorial-create-user-flows.md?pivots=b2c-custom-policy), if you wish to integrate TheAccessHub Admin Tool into your sign-up policy flow. +- (Optional:) A configured Azure AD B2C environment for using [custom policies](./tutorial-create-user-flows.md?pivots=b2c-custom-policy), if you want to integrate TheAccessHub Admin Tool into your sign-up policy flow. ## Scenario description -The TheAccessHub Admin Tool runs like any other application in Azure. It can run in either N8 Identity’s Azure subscription, or the customer’s subscription. The following architecture diagram shows the implementation. +The TheAccessHub Admin Tool runs like any other application in Azure. It can run in either N8 Identity's Azure subscription or the customer's subscription. The following architecture diagram shows the implementation. -![Image showing n8identity architecture diagram](./media/partner-n8identity/n8identity-architecture-diagram.png) +![Diagram of the n8identity architecture.](./media/partner-n8identity/n8identity-architecture-diagram.png) |Step | Description | |:-----| :-----------| -| 1. | User arrives at a login page. Users select sign-up to create a new account and enter information into the page. Azure AD B2C collects the user attributes. -| 2. | Azure AD B2C calls the TheAccessHub Admin Tool and passes on the user attributes +| 1. | Each user arrives at a login page. The user creates a new account and enters information on the page. Azure AD B2C collects the user attributes. +| 2. | Azure AD B2C calls the TheAccessHub Admin Tool and passes on the user attributes. | 3. | TheAccessHub Admin Tool checks your existing database for current user information. -| 4. | The user record is synced from the database to TheAccessHub Admin Tool. +| 4. | User records are synced from the database to TheAccessHub Admin Tool. | 5. | TheAccessHub Admin Tool shares the data with the delegated CSR/helpdesk admin. | 6. | TheAccessHub Admin Tool syncs the user records with Azure AD B2C. -| 7. |Based on the success/failure response from the TheAccessHub Admin Tool, Azure AD B2C sends a customized welcome email to the user. +| 7. |Based on the success/failure response from the TheAccessHub Admin Tool, Azure AD B2C sends a customized welcome email to users. -## Create a Global Admin in your Azure AD B2C tenant +## Create a Global Administrator in your Azure AD B2C tenant -The TheAccessHub Admin Tool requires permissions to act on behalf of a Global Administrator to read user information and conduct changes in your Azure AD B2C tenant. Changes to your regular administrators won; t impact TheAccessHub Admin Tool’s ability to interact with the tenant. +The TheAccessHub Admin Tool requires permissions to act on behalf of a Global Administrator to read user information and conduct changes in your Azure AD B2C tenant. Changes to your regular administrators won't affect TheAccessHub Admin Tool's ability to interact with the tenant. -To create a Global Administrator, follow these steps: +To create a Global Administrator: -1. In the Azure portal, sign into your Azure AD B2C tenant as an administrator. Navigate to **Azure Active Directory** > **Users** -2. Select **New User** -3. Choose **Create User** to create a regular directory user and not a customer -4. Complete the Identity information form +1. In the Azure portal, sign in to your Azure AD B2C tenant as an administrator. Go to **Azure Active Directory** > **Users**. +2. Select **New User**. +3. Choose **Create User** to create a regular directory user and not a customer. +4. Complete the identity information form: - a. Enter the username such as ‘theaccesshub’ + a. Enter the username, such as **theaccesshub**. - b. Enter the name such as ‘TheAccessHub Service Account’ + b. Enter the account name, such as **TheAccessHub Service Account**. -5. Select **Show Password** and copy the initial password for later use -6. Assign the Global Administrator role +5. Select **Show Password** and copy the initial password for later use. +6. Assign the Global Administrator role: - a. Select the user’s current roles **User** to change it + a. For **User**, select the user's current role to change it. - b. Check the record for Global Administrator + b. Select the **Global Administrator** record. - c. **Select** > **Create** + c. Select **Create**. ## Connect TheAccessHub Admin Tool with your Azure AD B2C tenant -TheAccessHub Admin Tool uses Microsoft’s Graph API to read and make changes to your directory. It acts as a Global Administrator in your tenant. Additional permission is needed by TheAccessHub Admin Tool, which you can grant from within the tool. +TheAccessHub Admin Tool uses the Microsoft Graph API to read and make changes to your directory. It acts as a Global Administrator in your tenant. TheAccessHub Admin Tool needs additional permission, which you can grant from within the tool. -To authorize TheAccessHub Admin Tool to access your directory, follow these steps: +To authorize TheAccessHub Admin Tool to access your directory: -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **System Admin** > **Azure AD B2C Config** +2. Go to **System Admin** > **Azure AD B2C Config**. -3. Select **Authorize Connection** +3. Select **Authorize Connection**. -4. In the new window sign-in with your Global Administrator account. You may be asked to reset your password if you're signing in for the first time with the new service account. +4. In the new window, sign in with your Global Administrator account. You might be asked to reset your password if you're signing in for the first time with the new service account. 5. Follow the prompts and select **Accept** to grant TheAccessHub Admin Tool the requested permissions. -## Configure a new CSR user using your enterprise identity +## Configure a new CSR user by using your enterprise identity -Create a CSR/Helpdesk user who accesses TheAccessHub Admin Tool using their existing enterprise Azure Active Directory credentials. +Create a CSR/Helpdesk user who accesses TheAccessHub Admin Tool by using their existing enterprise Azure Active Directory credentials. -To configure CSR/Helpdesk user with Single Sign-on (SSO), the following steps are recommended: +To configure a CSR/Helpdesk user with single sign-on (SSO), we recommend the following steps: -1. Log into TheAccessHub Admin Tool using credentials provided by N8 Identity. +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **Manager Tools** > **Manage Colleagues** +2. Go to **Manager Tools** > **Manage Colleagues**. -3. Select **Add Colleague** +3. Select **Add Colleague**. -4. Select **Colleague Type Azure Administrator** +4. For **Colleague Type**, select **Azure Administrator**. -5. Enter the colleague’s profile information +5. Enter the colleague's profile information: - a. Choosing a Home Organization will control who has permission to manage this user. + a. Choose a home organization to control who has permission to manage this user. - b. For Login ID/Azure AD User Name supply the User Principal Name from the user’s Azure Active Directory account. + b. For **Login ID/Azure AD User Name**, supply the user principal name from the user's Azure Active Directory account. - c. On the TheAccessHub Roles tab, check the managed role Helpdesk. It will allow the user to access the manage colleagues view. The user will still need to be placed into a group or be made an organization owner to act on customers. + c. On the **TheAccessHub Roles** tab, select the managed role **Helpdesk**. It will allow the user to access the **Manage Colleagues** view. The user will still need to be placed into a group or be made an organization owner to act on customers. 6. Select **Submit**. -## Configure a new CSR user using a new identity +## Configure a new CSR user by using a new identity -Create a CSR/Helpdesk user who will access TheAccessHub Admin Tool with a new local credential unique to TheAccessHub Admin Tool. This will be used mainly by organizations that don't use an Azure AD for their enterprise. +Create a CSR/Helpdesk user who will access TheAccessHub Admin Tool by using a new local credential that's unique to the tool. This user will be used mainly by organizations that don't use Azure Active Directory. -To [setup a CSR/Helpdesk](https://youtu.be/iOpOI2OpnLI) user without SSO, follow these steps: +To [set up a CSR/Helpdesk user](https://youtu.be/iOpOI2OpnLI) without SSO: -1. Log into TheAccessHub Admin Tool using credentials provided by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **Manager Tools** > **Manage Colleagues** +2. Go to **Manager Tools** > **Manage Colleagues**. -3. Select **Add Colleague** +3. Select **Add Colleague**. -4. Select **Colleague Type Local Administrator** +4. For **Colleague Type**, select **Local Administrator**. -5. Enter the colleague’s profile information +5. Enter the colleague's profile information: - a. Choosing a Home Organization will control who has permission to manage this user. + a. Choose a home organization to control who has permission to manage this user. - b. On the **TheAccessHub Roles** tab, select the managed role **Helpdesk**. It will allow the user to access the manage colleagues view. The user will still need to be placed into a group or be made an organization owner to act on customers. + b. On the **TheAccessHub Roles** tab, select the managed role **Helpdesk**. It will allow the user to access the **Manage Colleagues** view. The user will still need to be placed into a group or be made an organization owner to act on customers. -6. Copy the **Login ID/Email** and **One Time Password** attributes. Provide it to the new user. They'll use these credentials to log in to TheAccessHub Admin Tool. The user will be prompted to enter a new password on their first login. +6. Copy the **Login ID/Email** and **One Time Password** attributes. Provide them to the new user. The user will use these credentials to log in to TheAccessHub Admin Tool. The user will be prompted to enter a new password on first login. -7. Select **Submit** +7. Select **Submit**. ## Configure partitioned CSR administration -Permissions to manage customer and CSR/Helpdesk users in TheAccessHub Admin Tool are managed with the use of an organization hierarchy. All colleagues and customers have a home organization where they reside. Specific colleagues or groups of colleagues can be assigned as owners of organizations. Organization owners can manage (make changes to) colleagues and customers in organizations or suborganizations they own. To allow multiple colleagues to manage a set of users, a group can be created with many members. The group can then be assigned as an organization owner and all of the group’s members can manage colleagues and customers in the organization. +Permissions to manage customer and CSR/Helpdesk users in TheAccessHub Admin Tool are managed through an organization hierarchy. All colleagues and customers have a home organization where they reside. You can assign specific colleagues or groups of colleagues as owners of organizations. + +Organization owners can manage (make changes to) colleagues and customers in organizations or suborganizations that they own. To allow multiple colleagues to manage a set of users, you can create a group that has many members. You can then assign the group as an organization owner. All of the group's members can then manage colleagues and customers in the organization. ### Create a new group -1. Log into TheAccessHub Admin Tool using **credentials** provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **Organization > Manage Groups** +2. Go to **Organization > Manage Groups**. -3. Select > **Add Group** +3. Select **Add Group**. -4. Enter a **Group name**, **Group description**, and **Group owner** +4. Enter values for **Group name**, **Group description**, and **Group owner**. -5. Search for and check the boxes on the colleagues you want to be members of the group then select >**Add** +5. Search for and select the boxes on the colleagues you want to be members of the group, and then select **Add**. 6. At the bottom of the page, you can see all members of the group. -7. If needed members can be removed by selecting the **x** at the end of the row + If necessary, you can remove members by selecting the **x** at the end of the row. -8. Select **Submit** +7. Select **Submit**. ### Create a new organization -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to Organization > **Manage Organizations** +2. Go to **Organization** > **Manage Organizations**. -3. Select > **Add Organization** +3. Select **Add Organization**. -4. Supply an **Organization name**, **Organization owner**, and **Parent organization**. +4. Supply values for **Organization name**, **Organization owner**, and **Parent organization**: - a. The organization name is ideally a value that corresponds to your customer data. When loading colleague and customer data, if you supply the name of the organization in the load, the colleague can be automatically placed into the organization. + a. The organization name is ideally a value that corresponds to your customer data. When you're loading colleague and customer data, if you supply the name of the organization in the load, the colleague can be automatically placed into the organization. - b. The owner represents the person or group who will manage the customers and colleagues in this organization and any suborganization within. + b. The owner represents the person or group that will manage the customers and colleagues in this organization and any suborganization within it. - c. The parent organization indicates which other organization is inherently, also responsible for this organization. + c. The parent organization indicates which other organization is also responsible for this organization. 5. Select **Submit**. ### Modify the hierarchy via the tree view -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity - -2. Navigate to **Manager Tools** > **Tree View** +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -3. In this representation, you can visualize which colleagues and groups can manage which organizations. +2. Go to **Manager Tools** > **Tree View**. -4. The hierarchies can be modified by dragging organizations overtop organizations you want them to be parented by. +3. In this representation, you can visualize which colleagues and groups can manage which organizations. Modify the hierarchy by dragging organizations into parent organizations. 5. Select **Save** when you're finished altering the hierarchy. -## Customize welcome notification +## Customize the welcome notification -While you're using TheAccessHub to migrate users from a previous authentication solution into Azure AD B2C, you may want to customize the user welcome notification, which is sent to the user by TheAccessHub during migration. This message normally includes the link for the customer to set a new password in the Azure AD B2C directory. +While you're using TheAccessHub Admin Tool to migrate users from a previous authentication solution into Azure AD B2C, you might want to customize the user welcome notification. TheAccessHub Admin Tool sends this notification to users during migration. This message normally includes a link for users to set a new password in the Azure AD B2C directory. To customize the notification: -1. Log into TheAccessHub using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **System Admin** > **Notifications** +2. Go to **System Admin** > **Notifications**. -3. Select the **Create Colleague template** +3. Select the **Create Colleague** template. -4. Select **Edit** +4. Select **Edit**. -5. Alter the Message and Template fields as necessary. The Template field is HTML aware and can send HTML formatted notifications to customer emails. +5. Alter the **Message** and **Template** fields as necessary. The **Template** field is HTML aware and can send HTML-formatted email notifications to customers. -6. Select **Save** when finished. +6. Select **Save** when you're finished. ## Migrate data from external data sources to Azure AD B2C -Using TheAccessHub Admin Tool, you can import data from various databases, LDAPs, and CSV files and then push that data to your Azure AD B2C tenant. It's done by loading data into the Azure AD B2C user colleague type within TheAccessHub Admin Tool. If the source of data isn't Azure itself, the data will be placed into both TheAccessHub Admin Tool and Azure AD B2C. If the source of your external data isn't a simple .csv file on your machine, set up a data source before doing the data load. The below steps describe creating a data source and doing the data load. +By using TheAccessHub Admin Tool, you can import data from various databases, LDAPs, and .csv files and then push that data to your Azure AD B2C tenant. You migrate the data by loading it into the Azure AD B2C user colleague type within TheAccessHub Admin Tool. + +If the source of data isn't Azure itself, the data will be placed into both TheAccessHub Admin Tool and Azure AD B2C. If the source of your external data isn't a simple .csv file on your machine, set up a data source before doing the data load. The following steps describe creating a data source and loading the data. ### Configure a new data source -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **System Admin** > **Data Sources** +2. Go to **System Admin** > **Data Sources**. -3. Select **Add Data Source** +3. Select **Add Data Source**. -4. Supply a **Name** and **Type** for this data source +4. Supply **Name** and **Type** values for this data source. -5. Fill in the form data, depending on your data source type: +5. Fill in the form data, depending on your data source type. - **For databases** + For databases: - a. **Type** – Database + a. For **Type**, enter **Database**. - b. **Database type** – Select a database from one of the supported database types. + b. For **Database type**, select a database from one of the supported database types. - c. **Connection URL** – Enter a well-formatted JDBC connection string. Such as: ``jdbc:postgresql://myhost.com:5432/databasename`` + c. For **Connection URL**, enter a well-formatted JDBC connection string, such as `jdbc:postgresql://myhost.com:5432/databasename`. - d. **Username** – Enter the username for accessing the database + d. For **Username**, enter the username for accessing the database. - e. **Password** – Enter the password for accessing the database + e. For **Password**, enter the password for accessing the database. - f. **Query** – Enter the SQL query to extract the customer details. Such as: ``SELECT * FROM mytable;`` + f. For **Query**, enter the SQL query to extract the customer details, such as `SELECT * FROM mytable;`. - g. Select **Test Connection**, you'll see a sample of your data to ensure the connection is working. + g. Select **Test Connection**. You'll see a sample of your data to ensure that the connection is working. - **For LDAPs** + For LDAPs: - a. **Type** – LDAP + a. For **Type**, enter **LDAP**. - b. **Host** – Enter the hostname or IP for machine in which the LDAP server is running. Such as: ``mysite.com`` + b. For **Host**, enter the host name or IP address for the machine in which the LDAP server is running, such as `mysite.com`. - c. **Port** – Enter the port number in which the LDAP server is listening. + c. For **Port**, enter the port number in which the LDAP server is listening. - d. **SSL** – Check the box if TheAccessHub Admin Tool should communicate to the LDAP securely using SSL. Using SSL is highly recommended. + d. For **SSL**, select the box if TheAccessHub Admin Tool should communicate to the LDAP securely by using SSL. We highly recommend using SSL. - e. **Login DN** – Enter the DN of the user account to log in and do the LDAP search + e. For **Login DN**, enter the distinguished name (DN) of the user account to log in and do the LDAP search. - f. **Password** – Enter the password for the user + f. For **Password**, enter the password for the user. - g. **Base DN** – Enter the DN at the top of the hierarchy in which you want to do the search + g. For **Base DN**, enter the DN at the top of the hierarchy in which you want to do the search. - h. **Filter** – Enter the LDAP filter string, which will obtain your customer records + h. For **Filter**, enter the LDAP filter string, which will obtain your customer records. - i. **Attributes** – Enter a comma-separated list of attributes from your customer records to pass to TheAccessHub Admin Tool + i. For **Attributes**, enter a comma-separated list of attributes from your customer records to pass to TheAccessHub Admin Tool. - j. Select the **Test Connection**, you'll see a sample of your data to ensure the connection is working. + j. Select the **Test Connection**. You'll see a sample of your data to ensure that the connection is working. - **For OneDrive** + For OneDrive: - a. **Type** – OneDrive for Business + a. For **Type**, select **OneDrive for Business**. - b. Select **Authorize Connection** + b. Select **Authorize Connection**. - c. A new window will prompt you to log in to **OneDrive**, login with a user with read access to your OneDrive account. TheAccessHub Admin Tool, will act for this user to read CSV load files. + c. A new window prompts you to sign in to OneDrive. Sign in with a user that has read access to your OneDrive account. TheAccessHub Admin Tool will act for this user to read .csv load files. d. Follow the prompts and select **Accept** to grant TheAccessHub Admin Tool the requested permissions. -6. Select **Save** when finished. +6. Select **Save** when you're finished. ### Synchronize data from your data source into Azure AD B2C -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. + +2. Go to **System Admin** > **Data Synchronization**. -2. Navigate to **System Admin** > **Data Synchronization** +3. Select **New Load**. -3. Select **New Load** +4. For **Colleague Type**, select **Azure AD B2C User**. -4. Select the **Colleague Type** Azure AD B2C User +5. Select **Source**. In the pop-up dialog, select your data source. If you created a OneDrive data source, also select the file. -5. Select **Source**, in the pop-up dialog, select your data source. If you created a OneDrive data source, also select the file. +6. If you don't want to create new customer accounts with this load, change the first policy (**IF colleague not found in TheAccessHub THEN**) to **Do Nothing**. -6. If you don’t want to create new customer accounts with this load, then change the first policy: **IF colleague not found in TheAccessHub THEN** to **Do Nothing** +7. If you don't want to update existing customer accounts with this load, change the second policy (**IF source and TheAccessHub data mismatch THEN**) to **Do Nothing**. -7. If you don’t want to update existing customer accounts with this load, then change the second policy **IF source and TheAccessHub data mismatch THEN** to **Do Nothing** +8. Select **Next**. -8. Select **Next** +9. In **Search-Mapping configuration**, you identify how to correlate load records with customers already loaded into TheAccessHub Admin Tool. -9. In the **Search-Mapping configuration**, we identify how to correlate load records with customers already loaded into TheAccessHub Admin Tool. Choose one or more identifying attributes in the source. Match the attributes with an attribute in TheAccessHub Admin Tool that holds the same values. If a match is found, then the existing record will be overridden; otherwise, a new customer will be created. You can sequence a number of these checks. For example, you could check email first, and then first and last name. + Choose one or more identifying attributes in the source. Match the attributes with an attribute in TheAccessHub Admin Tool that holds the same values. If a match is found, the existing record will be overridden. Otherwise, a new customer will be created. + + You can sequence a number of these checks. For example, you could check email first, and then check first and last name. -10. On the left-hand side menu, select **Data Mapping**. +10. On the left-side menu, select **Data Mapping**. -11. In the Data-Mapping configuration, assign which TheAccessHub Admin Tool attributes should be populated from your source attributes. No need to map all the attributes. Unmapped attributes will remain unchanged for existing customers. +11. In **Data-Mapping configuration**, assign the TheAccessHub Admin Tool attributes that should be populated from your source attributes. There's no need to map all the attributes. Unmapped attributes will remain unchanged for existing customers. -12. If you map to the attribute org_name with a value that is the name of an existing organization, then new customers created will be placed in that organization. +12. If you map to the attribute `org_name` with a value that is the name of an existing organization, newly created customers will be placed in that organization. -13. Select **Next** +13. Select **Next**. -14. A Daily/Weekly or Monthly schedule may be specified if this load should be reoccurring. Otherwise keep the default **Now**. +14. If you want this load to be recurring, specify a **Daily/Weekly** or **Monthly** schedule. Otherwise, keep the default of **Now**. -15. Select **Submit** +15. Select **Submit**. -16. If the **Now schedule** was selected, a new record will be added to the Data Synchronizations screen immediately. Once the validation phase has reached 100%, select the **new record** to see the expected outcome for the load. For scheduled loads, these records will only appear after the scheduled time. +16. If you selected the **Now** schedule, a new record will be added to the **Data Synchronizations** screen immediately. After the validation phase has reached 100 percent, select the new record to see the expected outcome for the load. For scheduled loads, these records will appear only after the scheduled time. -17. If there are no errors, select **Run** to commit the changes. Otherwise, select **Remove** from the **More** menu to remove the load. You can then correct the source data or load mappings and try again. Instead, if the number of errors is small, you can manually update the records and select **Update** on each record to make corrections. Finally, you can continue with any errors and resolve them later as **Support Interventions** in TheAccessHub Admin Tool. +17. If there are no errors, select **Run** to commit the changes. Otherwise, select **Remove** from the **More** menu to remove the load. You can then correct the source data or load mappings and try again. -18. When the **Data Synchronization** record becomes 100% on the load phase, all the changes resulting from the load will have been initiated. Customers should begin appearing or receiving changes in Azure AD B2C. + Instead, if the number of errors is small, you can manually update the records and select **Update** on each record to make corrections. Another option is to continue with any errors and resolve them later as **Support Interventions** in TheAccessHub Admin Tool. + +18. When the **Data Synchronization** record becomes 100 percent on the load phase, all the changes resulting from the load have been initiated. Customers should begin appearing or receiving changes in Azure AD B2C. ## Synchronize Azure AD B2C customer data -As a one-time or ongoing operation, TheAccessHub Admin Tool can synchronize all the customer information from Azure AD B2C into TheAccessHub Admin Tool. This ensures that CSR/Helpdesk administrators are seeing up-to-date customer information. +As a one-time or ongoing operation, TheAccessHub Admin Tool can synchronize all the customer information from Azure AD B2C into TheAccessHub Admin Tool. This operation ensures that CSR/Helpdesk administrators see up-to-date customer information. To synchronize data from Azure AD B2C into TheAccessHub Admin Tool: -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **System Admin** > **Data Synchronization** +2. Go to **System Admin** > **Data Synchronization**. -3. Select **New Load** +3. Select **New Load**. -4. Select the **Colleague Type** Azure AD B2C User +4. For **Colleague Type**, select **Azure AD B2C User**. 5. For the **Options** step, leave the defaults. -6. Select **Next** +6. Select **Next**. + +7. For the **Data Mapping & Search** step, leave the defaults. Exception: if you map to the attribute `org_name` with a value that is the name of an existing organization, newly created customers will be placed in that organization. -7. For the **Data Mapping & Search** step, leave the defaults. Except if you map to the attribute **org_name** with a value that is the name of an existing organization, then new customers created will be placed in that organization. +8. Select **Next**. -8. Select **Next** +9. If you want this load to be recurring, specify a **Daily/Weekly** or **Monthly** schedule. Otherwise, keep the default of **Now**. We recommend syncing from Azure AD B2C on a regular basis. -9. A Daily/Weekly or Monthly schedule may be specified if this load should be reoccurring. Otherwise keep the default: **Now**. We recommend syncing from Azure AD B2C on a regular basis. +10. Select **Submit**. -10. Select **Submit** +11. If you selected the **Now** schedule, a new record will be added to the **Data Synchronizations** screen immediately. After the validation phase has reached 100 percent, select the new record to see the expected outcome for the load. For scheduled loads, these records will appear only after the scheduled time. -11. If the **Now** schedule was selected, a new record will be added to the Data Synchronizations screen immediately. Once the validation phase has reached 100%, select the new record to see the expected outcome for the load. For scheduled loads, these records will only appear after the scheduled time. +12. If there are no errors, select **Run** to commit the changes. Otherwise, select **Remove** from the **More** menu to remove the load. You can then correct the source data or load mappings and try again. -12. If there are no errors, select **Run** to commit the changes. Otherwise, select **Remove** from the More menu to remove the load. You can then correct the source data or load mappings and try again. Instead, if the number of errors is small, you can manually update the records and select **Update** on each record to make corrections. Finally, you can continue with any errors and resolve them later as Support Interventions in TheAccessHub Admin Tool. + Instead, if the number of errors is small, you can manually update the records and select **Update** on each record to make corrections. Another option is to continue with any errors and resolve them later as **Support Interventions** in TheAccessHub Admin Tool. -13. When the **Data Synchronization** record becomes 100% on the load phase, all the changes resulting from the load will have been initiated. +13. When the **Data Synchronization** record becomes 100 percent on the load phase, all the changes resulting from the load have been initiated. ## Configure Azure AD B2C policies -Occasionally syncing TheAccessHub Admin Tool is limited in its ability to keep its state up to date with Azure AD B2C. We can leverage TheAccessHub Admin Tool’s API and Azure AD B2C policies to inform TheAccessHub Admin Tool of changes as they happen. This solution requires technical knowledge of [Azure AD B2C custom policies](./user-flow-overview.md). In the next section, we'll give you an example policy steps and a secure certificate to notify TheAccessHub Admin Tool of new accounts in your Sign-Up custom policies. +Occasional syncing of TheAccessHub Admin Tool limits the tool's ability to keep its state up to date with Azure AD B2C. You can use TheAccessHub Admin Tool's API and Azure AD B2C policies to inform TheAccessHub Admin Tool of changes as they happen. This solution requires technical knowledge of [Azure AD B2C custom policies](./user-flow-overview.md). + +The following procedures give you example policy steps and a secure certificate to notify TheAccessHub Admin Tool of new accounts in your sign-up custom policies. -### Create a secure credential to invoke TheAccessHub Admin Tool’s API +### Create a secure credential to invoke TheAccessHub Admin Tool's API -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **System Admin** > **Admin Tools** > **API Security** +2. Go to **System Admin** > **Admin Tools** > **API Security**. -3. Select **Generate** +3. Select **Generate**. -4. Copy the **Certificate Password** +4. Copy the **Certificate Password**. 5. Select **Download** to get the client certificate. -6. Follow this [tutorial](./secure-rest-api.md#https-client-certificate-authentication ) to add the client certificate into Azure AD B2C. +6. Follow [this tutorial](./secure-rest-api.md#https-client-certificate-authentication ) to add the client certificate into Azure AD B2C. ### Retrieve your custom policy examples -1. Log into TheAccessHub Admin Tool using credentials provided to you by N8 Identity +1. Log in to TheAccessHub Admin Tool by using the credentials that N8 Identity has provided. -2. Navigate to **System Admin** > **Admin Tools** > **Azure B2C Policies** +2. Go to **System Admin** > **Admin Tools** > **Azure B2C Policies**. -3. Supply your Azure AD B2C tenant domain and the two Identity Experience Framework IDs from your Identity Experience Framework configuration +3. Supply your Azure AD B2C tenant domain and the two Identity Experience Framework IDs from your Identity Experience Framework configuration. -4. Select **Save** +4. Select **Save**. -5. Select **Download** to get a zip file with basic policies that add customers into TheAccessHub Admin Tool as customers sign up. +5. Select **Download** to get a .zip file with basic policies that add customers into TheAccessHub Admin Tool as customers sign up. -6. Follow this [tutorial](./tutorial-create-user-flows.md?pivots=b2c-custom-policy) to get started with designing custom policies in Azure AD B2C. +6. Follow [this tutorial](./tutorial-create-user-flows.md?pivots=b2c-custom-policy) to get started with designing custom policies in Azure AD B2C. ## Next steps -For additional information, review the following articles: +For more information, review the following articles: - [Custom policies in Azure AD B2C](./custom-policy-overview.md) diff --git a/articles/active-directory-b2c/partner-xid.md b/articles/active-directory-b2c/partner-xid.md index 800a397d38116..625d3d1ad2139 100644 --- a/articles/active-directory-b2c/partner-xid.md +++ b/articles/active-directory-b2c/partner-xid.md @@ -41,27 +41,36 @@ The following architecture diagram shows the implementation. ![image shows the architecture diagram](./media/partner-xid/partner-xid-architecture-diagram.png) -| Step | Description | -|:--------|:--------| -| 1. |User opens Azure AD B2C's sign-in page and then signs in or signs up by entering their username. | -| 2. |Azure AD B2C redirects the user to xID authorize API endpoint using an OpenID Connect (OIDC) request. An OIDC endpoint is available containing information about the endpoints. xID Identity provider (IdP) redirects the user to the xID authorization sign-in page allowing the user to fill in or select their email address. | -| 3. |xID IdP sends the push notification to the user's mobile device. | -| 4. |The user opens the xID app, checks the request, then enters the PIN or authenticates with their biometrics. If PIN or biometrics is successfully verified, xID app activates the private key and creates an electronic signature. | -| 5. |xID app sends the signature to xID IdP for verification. | -| 6. |xID IdP shows a consent screen to the user, requesting authorization to give their personal information to the service they're signing in. | -| 7. |xID IdP returns the OAuth authorization code to Azure AD B2C. | -| 8. | Azure AD B2C sends a token request using the authorization code. | -| 9. |xID IdP checks the token request and, if still valid, returns the OAuth access token and the ID token containing the requested user's identifier and email address. | -| 10. |In addition, if the user's customer content is needed, Azure AD B2C calls the xID userdata API. | -| 11. |The xID userdata API returns the user's encrypted customer content. Users can decrypt it with their private key, which they create when requesting the xID client information. | -| 12. | User is either granted or denied access to the customer application based on the verification results. | +| Step | Description | +| :--- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1. | User opens Azure AD B2C's sign-in page and then signs in or signs up by entering their username. | +| 2. | Azure AD B2C redirects the user to xID authorize API endpoint using an OpenID Connect (OIDC) request. An OIDC endpoint is available containing information about the endpoints. xID Identity provider (IdP) redirects the user to the xID authorization sign-in page allowing the user to fill in or select their email address. | +| 3. | xID IdP sends the push notification to the user's mobile device. | +| 4. | The user opens the xID app, checks the request, then enters the PIN or authenticates with their biometrics. If PIN or biometrics is successfully verified, xID app activates the private key and creates an electronic signature. | +| 5. | xID app sends the signature to xID IdP for verification. | +| 6. | xID IdP shows a consent screen to the user, requesting authorization to give their personal information to the service they're signing in. | +| 7. | xID IdP returns the OAuth authorization code to Azure AD B2C. | +| 8. | Azure AD B2C sends a token request using the authorization code. | +| 9. | xID IdP checks the token request and, if still valid, returns the OAuth access token and the ID token containing the requested user's identifier and email address. | +| 10. | In addition, if the user's customer content is needed, Azure AD B2C calls the xID userdata API. | +| 11. | The xID userdata API returns the user's encrypted customer content. Users can decrypt it with their private key, which they create when requesting the xID client information. | +| 12. | User is either granted or denied access to the customer application based on the verification results. | ## Onboard with xID Request API documents by filling out [the request form](https://xid.inc/contact-us). In the message field, indicate that you'd like to onboard with Azure AD B2C. Then, an xID sales representative will contact you. Follow the instructions provided in the xID API document and request an xID API client. xID tech team will send client information to you in 3-4 working days. +Supply redirect URI. This is the URI in your site to which the user is returned after a successful authentication. The URI that should be provided to xID for your Azure AD B2C follows the pattern - `https://.b2clogin.com/.onmicrosoft.com/oauth2/authresp`. -## Step 1: Create a xID policy key +## Step 1: Register a web application in Azure AD B2C + +Before your [applications](application-types.md) can interact with Azure AD B2C, they must be registered in a tenant that you manage. + +For testing purposes like this tutorial, you're registering `https://jwt.ms`, a Microsoft-owned web application that displays the decoded contents of a token (the contents of the token never leave your browser). + +Follow the steps mentioned in [this tutorial](tutorial-register-applications.md?tabs=app-reg-ga) to **register a web application** and **enable ID token implicit grant** for testing a user flow or custom policy. There's no need to create a Client Secret at this time. + +## Step 2: Create a xID policy key Store the client secret that you received from xID in your Azure AD B2C tenant. @@ -92,19 +101,19 @@ Store the client secret that you received from xID in your Azure AD B2C tenant. >[!NOTE] >In Azure AD B2C, [**custom policies**](./user-flow-overview.md) are designed primarily to address complex scenarios. -## Step 2: Configure xID as an Identity provider +## Step 3: Configure xID as an Identity provider To enable users to sign in using xID, you need to define xID as a claims provider that Azure AD B2C can communicate with through an endpoint. The endpoint provides a set of claims Azure AD B2C uses to verify that a specific user has authenticated using digital identity available on their device. Proving the user's identity. Use the following steps to add xID as a claims provider: -1. Get the custom policy starter packs from GitHub, then update the XML files in the SocialAndLocalAccounts starter pack with your Azure AD B2C tenant name: +1. Get the custom policy starter packs from GitHub, then update the XML files in the SocialAccounts starter pack with your Azure AD B2C tenant name: i. Download the [.zip file](https://github.com/Azure-Samples/active-directory-b2c-custom-policy-starterpack/archive/master.zip) or [clone the repository](https://github.com/Azure-Samples/active-directory-b2c-custom-policy-starterpack). - ii. In all of the files in the **LocalAccounts** directory, replace the string `yourtenant` with the name of your Azure AD B2C tenant. For example, if the name of your B2C tenant is `contoso`, all instances of `yourtenant.onmicrosoft.com` become `contoso.onmicrosoft.com`. + ii. In all of the files in the **SocialAccounts** directory, replace the string `yourtenant` with the name of your Azure AD B2C tenant. For example, if the name of your B2C tenant is `contoso`, all instances of `yourtenant.onmicrosoft.com` become `contoso.onmicrosoft.com`. -2. Open the `LocalAccounts/ TrustFrameworkExtensions.xml`. +2. Open the `SocialAccounts/TrustFrameworkExtensions.xml`. 3. Find the **ClaimsProviders** element. If it doesn't exist, add it under the root element. @@ -132,9 +141,10 @@ Use the following steps to add xID as a claims provider: true client_secret_basic https://oidc-uat.x-id.io/userinfo + https://oidc-uat.x-id.io/ - + @@ -142,7 +152,7 @@ Use the following steps to add xID as a claims provider: - + @@ -201,9 +211,9 @@ Use the following steps to add xID as a claims provider: 5. Save the changes. -## Step 3: Add a user journey +## Step 4: Add a user journey -At this point, you've set up the identity provider, but it's not yet available on any of the sign-in pages. If you have a custom user journey, continue to [step 4](#step-4-add-the-identity-provider-to-a-user-journey). Otherwise, create a duplicate of an existing template user journey as follows: +At this point, you've set up the identity provider, but it's not yet available on any of the sign-in pages. If you have a custom user journey, continue to [step 5](#step-5-add-the-identity-provider-to-a-user-journey). Otherwise, create a duplicate of an existing template user journey as follows: 1. Open the `TrustFrameworkBase.xml` file from the starter pack. @@ -215,19 +225,21 @@ At this point, you've set up the identity provider, but it's not yet available o 5. Rename the ID of the user journey. For example, `ID=CustomSignUpSignIn` -## Step 4: Add the identity provider to a user journey +## Step 5: Add the identity provider to a user journey Now that you have a user journey add the new identity provider to the user journey. 1. Find the orchestration step element that includes Type=`CombinedSignInAndSignUp`, or Type=`ClaimsProviderSelection` in the user journey. It's usually the first orchestration step. The **ClaimsProviderSelections** element contains a list of identity providers used for signing in. The order of the elements controls the order of the sign-in buttons presented to the user. Add a **ClaimsProviderSelection** XML element. Set the value of **TargetClaimsExchangeId** to a friendly name, such as `X-IDExchange`. -2. In the next orchestration step, add a **ClaimsExchange** element. Set the **Id** to the value of the target claims exchange ID to link the xID button to `X-ID-SignIn` action. Next, update the value of **TechnicalProfileReferenceId** to the ID of the technical profile you created earlier. +2. In the next orchestration step, add a **ClaimsExchange** element. Set the **Id** to the value of the target claims exchange ID to link the xID button to `X-IDExchange` action. Next, update the value of **TechnicalProfileReferenceId** to the ID of the technical profile you created earlier `X-ID-Oauth2`. - The following XML demonstrates the orchestration steps of a user journey with the identity provider: +3. Add a new Orchestration step to call xID UserInfo endpoint to return claims about the authenticated user `X-ID-Userdata`. + + The following XML demonstrates the orchestration steps of a user journey with xID identity provider: ```xml - + @@ -291,31 +303,119 @@ Now that you have a user journey add the new identity provider to the user journ ``` -## Step 5: Upload the custom policy +There are additional identity claims that xID supports and are referenced as part of the policy. Claims schema is the place where you declare these claims. ClaimsSchema element contains list of ClaimType elements. The ClaimType element contains the Id attribute, which is the claim name. -1. Sign in to the [Azure portal](https://portal.azure.com/#home). +1. Open the `TrustFrameworksExtension.xml` -2. Make sure you're using the directory that contains your Azure AD B2C tenant: +2. Find the `BuildingBlocks` element. - a. Select the **Directories + subscriptions** icon in the portal toolbar. +3. Add the following ClaimType element in the **ClaimsSchema** element of your `TrustFrameworksExtension.xml` policy - b. On the **Portal settings | Directories + subscriptions** page, find your Azure AD B2C directory in the **Directory name** list, and select **Switch**. - -3. In the [Azure portal](https://portal.azure.com/#home), search for and select **Azure AD B2C**. - -4. Under Policies, select **Identity Experience Framework**. - -5. Select **Upload Custom Policy**, and then upload the files in the **LocalAccounts** starter pack in the following order: the extension policy, for example, `TrustFrameworkExtensions.xml`, then the relying party policy, such as `SignUpSignIn.xml`. +```xml + + + + + sid + string + + + userdataid + string + + + xid_verified + boolean + + + email_verified + boolean + + + Identity Provider Access Token + string + Stores the access token of the identity provider. + + + last_name + string + + + first_name + string + + + previous_name + string + + + year + string + + + month + string + + + date + string + + + prefecture + string + + + city + string + + + address + string + + + sub_char_common_name + string + + + sub_char_previous_name + string + + + sub_char_address + string + + + verified_at + int + + + Gender + string + + + + The user's gender. + Your gender. + TextBox + + + correlation ID + string + + + + +``` ## Step 6: Configure the relying party policy The relying party policy, for example [SignUpSignIn.xml](https://github.com/Azure-Samples/active-directory-b2c-custom-policy-starterpack/blob/main/LocalAccounts/SignUpOrSignin.xml), specifies the user journey which Azure AD B2C will execute. First, find the **DefaultUserJourney** element within the relying party. Then, update the **ReferenceId** to match the user journey ID you added to the identity provider. -In the following example, for the `X-IDSignUpOrSignIn` user journey, the **ReferenceId** is set to `X-IDSignUpOrSignIn`: +In the following example, for the xID user journey, the **ReferenceId** is set to `CombinedSignInAndSignUp`: ```xml - + PolicyProfile @@ -350,8 +450,26 @@ In the following example, for the `X-IDSignUpOrSignIn` user journey, the **Refer ``` +## Step 7: Upload the custom policy + +1. Sign in to the [Azure portal](https://portal.azure.com/#home). + +2. Make sure you're using the directory that contains your Azure AD B2C tenant: + + a. Select the **Directories + subscriptions** icon in the portal toolbar. + + b. On the **Portal settings | Directories + subscriptions** page, find your Azure AD B2C directory in the **Directory name** list, and select **Switch**. + +3. In the [Azure portal](https://portal.azure.com/#home), search for and select **Azure AD B2C**. + +4. Under Policies, select **Identity Experience Framework**. + +5. Select **Upload Custom Policy**, and then upload the files in the following order: + 1. `TrustFrameworkBase.xml`, the base policy file + 2. `TrustFrameworkExtensions.xml`, the extension policy + 3. `SignUpSignIn.xml`, then the relying party policy -## Step 7: Test your custom policy +## Step 8: Test your custom policy 1. In your Azure AD B2C tenant, and under **Policies**, select **Identity Experience Framework**. diff --git a/articles/active-directory-b2c/quickstart-web-app-dotnet.md b/articles/active-directory-b2c/quickstart-web-app-dotnet.md index 3336f333b6f40..3e842f3a13a9b 100644 --- a/articles/active-directory-b2c/quickstart-web-app-dotnet.md +++ b/articles/active-directory-b2c/quickstart-web-app-dotnet.md @@ -85,7 +85,7 @@ Azure Active Directory B2C provides functionality to allow users to update their The ASP.NET web application includes an Azure AD access token in the request to the protected web API resource to perform operations on the user's to-do list items. -You've successfully used your Azure AD B2C user account to make an authorized call an Azure AD B2C protected web API. +You've successfully used your Azure AD B2C user account to make an authorized call to an Azure AD B2C protected web API. ## Next steps diff --git a/articles/active-directory-b2c/relyingparty.md b/articles/active-directory-b2c/relyingparty.md index f965f657fe650..f5e37a3327907 100644 --- a/articles/active-directory-b2c/relyingparty.md +++ b/articles/active-directory-b2c/relyingparty.md @@ -43,7 +43,7 @@ The following example shows a **RelyingParty** element in the *B2C_1A_signup_sig Rolling - 300 + 900 {OAUTH-KV:campaignId} diff --git a/articles/active-directory-b2c/restful-technical-profile.md b/articles/active-directory-b2c/restful-technical-profile.md index f6df2f3e18ca9..afc11c5d030fb 100644 --- a/articles/active-directory-b2c/restful-technical-profile.md +++ b/articles/active-directory-b2c/restful-technical-profile.md @@ -9,7 +9,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: reference -ms.date: 05/03/2021 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C --- @@ -223,7 +223,7 @@ If the type of authentication is set to `ApiKeyHeader`, the **CryptographicKeys* | The name of the HTTP header, such as `x-functions-key`, or `x-api-key`. | Yes | The key that is used to authenticate. | > [!NOTE] -> At this time, Azure AD B2C supports only one HTTP header for authentication. If your RESTful call requires multiple headers, such as a client ID and client secret, you will need to proxy the request in some manner. +> At this time, Azure AD B2C supports only one HTTP header for authentication. If your RESTful call requires multiple headers, such as a client ID and client secret value, you will need to proxy the request in some manner. ```xml diff --git a/articles/active-directory-b2c/saml-identity-provider-technical-profile.md b/articles/active-directory-b2c/saml-identity-provider-technical-profile.md index 02f41ed3246db..903daf029686e 100644 --- a/articles/active-directory-b2c/saml-identity-provider-technical-profile.md +++ b/articles/active-directory-b2c/saml-identity-provider-technical-profile.md @@ -176,7 +176,7 @@ The **CryptographicKeys** element contains the following attributes: | Attribute |Required | Description | | --------- | ----------- | ----------- | | SamlMessageSigning |Yes | The X509 certificate (RSA key set) to use to sign SAML messages. Azure AD B2C uses this key to sign the requests and send them to the identity provider. | -| SamlAssertionDecryption |No | The X509 certificate (RSA key set). A SAML identity provider uses the public portion of the certificate to encrypt the assertion of the SAML response. Azure AD B2C uses the private portion of the certificate to decrypt the assertion. | +| SamlAssertionDecryption |No* | The X509 certificate (RSA key set). A SAML identity provider uses the public portion of the certificate to encrypt the assertion of the SAML response. Azure AD B2C uses the private portion of the certificate to decrypt the assertion.

* Required if the external IDP encrypts SAML assertions.| | MetadataSigning |No | The X509 certificate (RSA key set) to use to sign SAML metadata. Azure AD B2C uses this key to sign the metadata. | ## Next steps diff --git a/articles/active-directory-b2c/secure-rest-api.md b/articles/active-directory-b2c/secure-rest-api.md index e91fa2c3bf02e..ca1a8aa87c150 100644 --- a/articles/active-directory-b2c/secure-rest-api.md +++ b/articles/active-directory-b2c/secure-rest-api.md @@ -9,7 +9,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 04/05/2022 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C zone_pivot_groups: b2c-policy-type @@ -303,7 +303,7 @@ For a client credentials flow, you need to create an application secret. The cli #### Create Azure AD B2C policy keys -You need to store the client ID and the client secret that you previously recorded in your Azure AD B2C tenant. +You need to store the client ID and the client secret value that you previously recorded in your Azure AD B2C tenant. 1. Sign in to the [Azure portal](https://portal.azure.com/). 1. Make sure you're using the directory that contains your Azure AD B2C tenant. Select the **Directories + subscriptions** icon in the portal toolbar. @@ -484,9 +484,22 @@ The following XML snippet is an example of a RESTful technical profile configure ``` -::: zone-end +Add the validation technical profile reference to the sign up technical profile, which calls the `REST-AcquireAccessToken`. This behavior means that Azure AD B2C moves on to create the account in the directory only after successful validation. + +For example: + ```XML + + .... + + .... + + ``` + + +::: zone-end + ## API key authentication ::: zone pivot="b2c-user-flow" diff --git a/articles/active-directory-b2c/string-transformations.md b/articles/active-directory-b2c/string-transformations.md index 430b002d028cb..8a578bf13488d 100644 --- a/articles/active-directory-b2c/string-transformations.md +++ b/articles/active-directory-b2c/string-transformations.md @@ -212,7 +212,7 @@ Determines whether a claim value is equal to the input parameter value. Check ou | InputClaim | inputClaim1 | string | The claim's type, which is to be compared. | | InputParameter | operator | string | Possible values: `EQUAL` or `NOT EQUAL`. | | InputParameter | compareTo | string | String comparison, one of the values: Ordinal, OrdinalIgnoreCase. | -| InputParameter | ignoreCase | boolean | Specifies whether this comparison should ignore the case of the strings being compared. | +| InputParameter | ignoreCase | string | Specifies whether this comparison should ignore the case of the strings being compared. | | OutputClaim | outputClaim | boolean | The claim that is produced after this claims transformation has been invoked. | ### Example of CompareClaimToValue @@ -227,7 +227,7 @@ Use this claims transformation to check if a claim is equal to a value you speci - + diff --git a/articles/active-directory-b2c/view-audit-logs.md b/articles/active-directory-b2c/view-audit-logs.md index 852384b0d72f7..0cd9c92d35f4a 100644 --- a/articles/active-directory-b2c/view-audit-logs.md +++ b/articles/active-directory-b2c/view-audit-logs.md @@ -9,7 +9,7 @@ manager: CelesteDG ms.service: active-directory ms.topic: how-to ms.workload: identity -ms.date: 02/20/2020 +ms.date: 06/08/2022 ms.author: kengaderdus ms.subservice: B2C ms.custom: fasttrack-edit @@ -114,7 +114,7 @@ You can try this script in the [Azure Cloud Shell](overview.md). Be sure to upda # Constants $ClientID = "your-client-application-id-here" # Insert your application's client ID, a GUID -$ClientSecret = "your-client-application-secret-here" # Insert your application's client secret +$ClientSecret = "your-client-application-secret-here" # Insert your application's client secret value $tenantdomain = "your-b2c-tenant.onmicrosoft.com" # Insert your Azure AD B2C tenant domain name $loginURL = "https://login.microsoftonline.com" diff --git a/articles/active-directory-b2c/whats-new-docs.md b/articles/active-directory-b2c/whats-new-docs.md index f39cea7f8c019..fa5ce2b39a44e 100644 --- a/articles/active-directory-b2c/whats-new-docs.md +++ b/articles/active-directory-b2c/whats-new-docs.md @@ -15,6 +15,17 @@ manager: CelesteDG Welcome to what's new in Azure Active Directory B2C documentation. This article lists new docs that have been added and those that have had significant updates in the last three months. To learn what's new with the B2C service, see [What's new in Azure Active Directory](../active-directory/fundamentals/whats-new.md). + +## May 2022 + +### Updated articles + +- [Set redirect URLs to b2clogin.com for Azure Active Directory B2C](b2clogin.md) +- [Enable custom domains for Azure Active Directory B2C](custom-domain.md) +- [Configure xID with Azure Active Directory B2C for passwordless authentication](partner-xid.md) +- [UserJourneys](userjourneys.md) +- [Secure your API used an API connector in Azure AD B2C](secure-rest-api.md) + ## April 2022 ### New articles diff --git a/articles/active-directory/app-provisioning/application-provisioning-config-problem-scim-compatibility.md b/articles/active-directory/app-provisioning/application-provisioning-config-problem-scim-compatibility.md index ab3a977ab598e..a1a0e2a7fabff 100644 --- a/articles/active-directory/app-provisioning/application-provisioning-config-problem-scim-compatibility.md +++ b/articles/active-directory/app-provisioning/application-provisioning-config-problem-scim-compatibility.md @@ -8,7 +8,7 @@ ms.service: active-directory ms.subservice: app-provisioning ms.workload: identity ms.topic: reference -ms.date: 08/25/2021 +ms.date: 05/25/2022 ms.author: kenwith ms.reviewer: arvinh --- @@ -27,10 +27,10 @@ The provisioning service uses the concept of a job to operate against an applica If you are using an application in the gallery, the job generally contains the name of the app (e.g. zoom snowFlake, dataBricks, etc.). You can skip this documentation when using a gallery application. This primarily applies for non-gallery applications with jobID SCIM or customAppSSO. ## SCIM 2.0 compliance issues and status -In the table below, any item marked as fixed means that the proper behavior can be found on the SCIM job. We have worked to ensure backwards compatibility for the changes we have made. However, we do not recommend implementing old behavior. We recommend using the new behavior for any new implementations and updating existing implementations. +In the table below, any item marked as fixed means that the proper behavior can be found on the SCIM job. We have worked to ensure backwards compatibility for the changes we have made. We recommend using the new behavior for any new implementations and updating existing implementations. Please note that the customappSSO behavior that was the default prior to December 2018 is not supported anymore. > [!NOTE] -> For the changes made in 2018, you can revert back to the customappsso behavior. For the changes made since 2018, you can use the URLs to revert back to the older behavior. We have worked to ensure backwards compatibility for the changes we have made by allowing you to revert back to the old jobID or by using a flag. However, as previously mentioned, we do not recommend implementing old behavior. We recommend using the new behavior for any new implementations and updating existing implementations. +> For the changes made in 2018, it is possible to revert back to the customappsso behavior. For the changes made since 2018, you can use the URLs to revert back to the older behavior. We have worked to ensure backwards compatibility for the changes we have made by allowing you to revert back to the old jobID or by using a flag. However, as previously mentioned, we do not recommend implementing old behavior as it is not supported anymore. We recommend using the new behavior for any new implementations and updating existing implementations. | **SCIM 2.0 compliance issue** | **Fixed?** | **Fix date** | **Backwards compatibility** | |---|---|---| diff --git a/articles/active-directory/app-provisioning/check-status-user-account-provisioning.md b/articles/active-directory/app-provisioning/check-status-user-account-provisioning.md index d54aaefdc7ee7..224bd013a5581 100644 --- a/articles/active-directory/app-provisioning/check-status-user-account-provisioning.md +++ b/articles/active-directory/app-provisioning/check-status-user-account-provisioning.md @@ -8,7 +8,7 @@ ms.service: active-directory ms.subservice: app-provisioning ms.workload: identity ms.topic: how-to -ms.date: 05/11/2021 +ms.date: 05/30/2022 ms.author: kenwith ms.reviewer: arvinh --- @@ -21,10 +21,15 @@ This article describes how to check the status of provisioning jobs after they h ## Overview -Provisioning connectors are set up and configured using the [Azure portal](https://portal.azure.com), by following the [provided documentation](../saas-apps/tutorial-list.md) for the supported application. Once configured and running, provisioning jobs can be reported on using one of two methods: +Provisioning connectors are set up and configured using the [Azure portal](https://portal.azure.com), by following the [provided documentation](../saas-apps/tutorial-list.md) for the supported application. Once configured and running, provisioning jobs can be reported on using the following methods: -* **Azure portal** - This article primarily describes retrieving report information from the [Azure portal](https://portal.azure.com), which provides both a provisioning summary report as well as detailed provisioning audit logs for a given application. -* **Audit API** - Azure Active Directory also provides an Audit API that enables programmatic retrieval of the detailed provisioning audit logs. See [Azure Active Directory audit API reference](/graph/api/resources/directoryaudit) for documentation specific to using this API. While this article does not specifically cover how to use the API, it does detail the types of provisioning events that are recorded in the audit log. +- The [Azure portal](https://portal.azure.com) + +- Streaming the provisioning logs into [Azure Monitor](../app-provisioning/application-provisioning-log-analytics.md). This method allows for extended data retention and building custom dashboards, alerts, and queries. + +- Querying the [Microsoft Graph API](/graph/api/resources/provisioningobjectsummary) for the provisioning logs. + +- Downloading the provisioning logs as a CSV or JSON file. ### Definitions @@ -35,7 +40,7 @@ This article uses the following terms, defined below: ## Getting provisioning reports from the Azure portal -To get provisioning report information for a given application, start by launching the [Azure portal](https://portal.azure.com) and **Azure Active Directory** > **Enterprise Apps** > **Provisioning logs (preview)** in the **Activity** section. You can also browse to the Enterprise Application for which provisioning is configured. For example, if you are provisioning users to LinkedIn Elevate, the navigation path to the application details is: +To get provisioning report information for a given application, start by launching the [Azure portal](https://portal.azure.com) and **Azure Active Directory** > **Enterprise Apps** > **Provisioning logs** in the **Activity** section. You can also browse to the Enterprise Application for which provisioning is configured. For example, if you are provisioning users to LinkedIn Elevate, the navigation path to the application details is: **Azure Active Directory > Enterprise Applications > All applications > LinkedIn Elevate** @@ -54,10 +59,10 @@ The **Current Status** should be the first place admins look to check on the ope  ![Summary report](./media/check-status-user-account-provisioning/provisioning-progress-bar-section.png) -## Provisioning logs (preview) +## Provisioning logs + +All activities performed by the provisioning service are recorded in the Azure AD [provisioning logs](../reports-monitoring/concept-provisioning-logs.md?context=azure/active-directory/manage-apps/context/manage-apps-context). You can access the provisioning logs in the Azure portal by selecting **Azure Active Directory** > **Enterprise Apps** > **Provisioning logs ** in the **Activity** section. You can search the provisioning data based on the name of the user or the identifier in either the source system or the target system. For details, see [Provisioning logs](../reports-monitoring/concept-provisioning-logs.md?context=azure/active-directory/manage-apps/context/manage-apps-context). -All activities performed by the provisioning service are recorded in the Azure AD [provisioning logs](../reports-monitoring/concept-provisioning-logs.md?context=azure/active-directory/manage-apps/context/manage-apps-context). You can access the provisioning logs in the Azure portal by selecting **Azure Active Directory** > **Enterprise Apps** > **Provisioning logs (preview)** in the **Activity** section. You can search the provisioning data based on the name of the user or the identifier in either the source system or the target system. For details, see [Provisioning logs (preview)](../reports-monitoring/concept-provisioning-logs.md?context=azure/active-directory/manage-apps/context/manage-apps-context). -Logged activity event types include: ## Troubleshooting @@ -68,4 +73,4 @@ For scenario-based guidance on how to troubleshoot automatic user provisioning, ## Additional Resources * [Managing user account provisioning for Enterprise Apps](configure-automatic-user-provisioning-portal.md) -* [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) \ No newline at end of file +* [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) diff --git a/articles/active-directory/app-provisioning/index.yml b/articles/active-directory/app-provisioning/index.yml index ee755366806b7..f58a38c05446a 100644 --- a/articles/active-directory/app-provisioning/index.yml +++ b/articles/active-directory/app-provisioning/index.yml @@ -64,9 +64,4 @@ landingContent: url: ../saas-apps/workday-inbound-tutorial.md?context=%2fazure%2factive-directory%2fapp-provisioning%2fcontext%2fapp-provisioning-context - text: SAP SuccessFactors provisioning url: ../saas-apps/sap-successfactors-inbound-provisioning-cloud-only-tutorial.md?context=%2fazure%2factive-directory%2fapp-provisioning%2fcontext%2fapp-provisioning-context - - title: New in docs - linkLists: - - linkListType: whats-new - links: - - text: What's new in docs - url: whats-new-docs.md \ No newline at end of file + diff --git a/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md b/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md index 27b2d2c2aa64d..f6580cdfaf66a 100644 --- a/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md +++ b/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md @@ -142,6 +142,14 @@ Microsoft provides direct support for the latest agent version and one version b ### Download link You can download the latest version of the agent using [this link](https://aka.ms/onpremprovisioningagent). +### 1.1.892.0 + +May 20th, 2022 - released for download + +#### Fixed issues + +- We added support for exporting changes to integer attributes, which benefits customers using the generic LDAP connector. + ### 1.1.846.0 April 11th, 2022 - released for download diff --git a/articles/active-directory/app-provisioning/plan-auto-user-provisioning.md b/articles/active-directory/app-provisioning/plan-auto-user-provisioning.md index ea9e26ef57606..92be6e9857896 100644 --- a/articles/active-directory/app-provisioning/plan-auto-user-provisioning.md +++ b/articles/active-directory/app-provisioning/plan-auto-user-provisioning.md @@ -237,7 +237,7 @@ It's common for a security review to be required as part of a deployment. If you If the automatic user provisioning implementation fails to work as desired in the production environment, the following rollback steps below can assist you in reverting to a previous known good state: -1. Review the [provisioning summary report](../app-provisioning/check-status-user-account-provisioning.md) and [provisioning logs](../app-provisioning/check-status-user-account-provisioning.md#provisioning-logs-preview) to determine what incorrect operations occurred on the affected users and/or groups. +1. Review the [provisioning logs](../app-provisioning/check-status-user-account-provisioning.md) to determine what incorrect operations occurred on the affected users and/or groups. 1. Use provisioning audit logs to determine the last known good state of the users and/or groups affected. Also review the source systems (Azure AD or AD). @@ -324,4 +324,4 @@ Refer to the following links to troubleshoot any issues that may turn up during * [Export or import your provisioning configuration by using Microsoft Graph API](../app-provisioning/export-import-provisioning-configuration.md) -* [Writing expressions for attribute mappings in Azure Active directory](../app-provisioning/functions-for-customizing-application-data.md) \ No newline at end of file +* [Writing expressions for attribute mappings in Azure Active directory](../app-provisioning/functions-for-customizing-application-data.md) diff --git a/articles/active-directory/app-provisioning/plan-cloud-hr-provision.md b/articles/active-directory/app-provisioning/plan-cloud-hr-provision.md index 3d32f5721e181..a3c3986ef7b12 100644 --- a/articles/active-directory/app-provisioning/plan-cloud-hr-provision.md +++ b/articles/active-directory/app-provisioning/plan-cloud-hr-provision.md @@ -463,7 +463,7 @@ It's common for a security review to be required as part of the deployment of a The cloud HR user provisioning implementation might fail to work as desired in the production environment. If so, the following rollback steps can assist you in reverting to a previous known good state. -1. Review the [provisioning summary report](../app-provisioning/check-status-user-account-provisioning.md#getting-provisioning-reports-from-the-azure-portal) and [provisioning logs](../app-provisioning/check-status-user-account-provisioning.md#provisioning-logs-preview) to determine what incorrect operations were performed on the affected users or groups. For more information on the provisioning summary report and logs, see [Manage cloud HR app user provisioning](#manage-your-configuration). +1. Review the [provisioning logs](../app-provisioning/check-status-user-account-provisioning.md#provisioning-logs) to determine what incorrect operations were performed on the affected users or groups. For more information on the provisioning summary report and logs, see [Manage cloud HR app user provisioning](#manage-your-configuration). 2. The last known good state of the users or groups affected can be determined through the provisioning audit logs or by reviewing the target systems (Azure AD or Active Directory). 3. Work with the app owner to update the users or groups affected directly in the app by using the last known good state values. diff --git a/articles/active-directory/app-provisioning/toc.yml b/articles/active-directory/app-provisioning/toc.yml index c02f878695ae1..5746f05dd9a3e 100644 --- a/articles/active-directory/app-provisioning/toc.yml +++ b/articles/active-directory/app-provisioning/toc.yml @@ -8,8 +8,6 @@ items: href: user-provisioning.md - name: What is HR-driven provisioning? href: what-is-hr-driven-provisioning.md - - name: What's new in docs? - href: whats-new-docs.md - name: Tutorials expanded: true items: diff --git a/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md b/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md index 1575c83baafd8..ce1d26ec4dc93 100644 --- a/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md +++ b/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md @@ -8,7 +8,7 @@ ms.service: active-directory ms.subservice: app-provisioning ms.workload: identity ms.topic: tutorial -ms.date: 04/13/2022 +ms.date: 05/25/2022 ms.author: kenwith ms.reviewer: arvinh --- @@ -1350,7 +1350,7 @@ The SCIM spec doesn't define a SCIM-specific scheme for authentication and autho |Username and password (not recommended or supported by Azure AD)|Easy to implement|Insecure - [Your Pa$$word doesn't matter](https://techcommunity.microsoft.com/t5/azure-active-directory-identity/your-pa-word-doesn-t-matter/ba-p/731984)|Not supported for new gallery or non-gallery apps.| |Long-lived bearer token|Long-lived tokens do not require a user to be present. They are easy for admins to use when setting up provisioning.|Long-lived tokens can be hard to share with an admin without using insecure methods such as email. |Supported for gallery and non-gallery apps. | |OAuth authorization code grant|Access tokens are much shorter-lived than passwords, and have an automated refresh mechanism that long-lived bearer tokens do not have. A real user must be present during initial authorization, adding a level of accountability. |Requires a user to be present. If the user leaves the organization, the token is invalid and authorization will need to be completed again.|Supported for gallery apps, but not non-gallery apps. However, you can provide an access token in the UI as the secret token for short term testing purposes. Support for OAuth code grant on non-gallery is in our backlog, in addition to support for configurable auth / token URLs on the gallery app.| -|OAuth client credentials grant|Access tokens are much shorter-lived than passwords, and have an automated refresh mechanism that long-lived bearer tokens do not have. Both the authorization code grant and the client credentials grant create the same type of access token, so moving between these methods is transparent to the API. Provisioning can be completely automated, and new tokens can be silently requested without user interaction. ||Not supported for gallery and non-gallery apps. Support is in our backlog.| +|OAuth client credentials grant|Access tokens are much shorter-lived than passwords, and have an automated refresh mechanism that long-lived bearer tokens do not have. Both the authorization code grant and the client credentials grant create the same type of access token, so moving between these methods is transparent to the API. Provisioning can be completely automated, and new tokens can be silently requested without user interaction. ||Supported for gallery apps, but not non-gallery apps. However, you can provide an access token in the UI as the secret token for short term testing purposes. Support for OAuth client credentials grant on non-gallery is in our backlog.| > [!NOTE] > It's not recommended to leave the token field blank in the Azure AD provisioning configuration custom app UI. The token generated is primarily available for testing purposes. diff --git a/articles/active-directory/app-proxy/application-proxy-configure-complex-application.md b/articles/active-directory/app-proxy/application-proxy-configure-complex-application.md index 5d7ed9aa6d90d..da4efd9ff4c5c 100644 --- a/articles/active-directory/app-proxy/application-proxy-configure-complex-application.md +++ b/articles/active-directory/app-proxy/application-proxy-configure-complex-application.md @@ -50,7 +50,7 @@ This article provides you with the information you need to configure wildcard ap - Note - Regular application will always take precedence over a complex app (wildcard application). ## Pre-requisites -Before you get started with single sign-on for header-based authentication apps, make sure your environment is ready with the following settings and configurations: +Before you get started with Application Proxy Complex application scenario apps, make sure your environment is ready with the following settings and configurations: - You need to enable Application Proxy and install a connector that has line of site to your applications. See the tutorial [Add an on-premises application for remote access through Application Proxy](application-proxy-add-on-premises-application.md#add-an-on-premises-app-to-azure-ad) to learn how to prepare your on-premises environment, install and register a connector, and test the connector. diff --git a/articles/active-directory/app-proxy/index.yml b/articles/active-directory/app-proxy/index.yml index a6b6092507dcb..3cce2a03af54b 100644 --- a/articles/active-directory/app-proxy/index.yml +++ b/articles/active-directory/app-proxy/index.yml @@ -46,9 +46,4 @@ landingContent: url: ../manage-apps/v2-howto-app-gallery-listing.md - text: Understand the platform url: ../develop/v2-overview.md - - title: New in docs - linkLists: - - linkListType: whats-new - links: - - text: What's new in docs - url: whats-new-docs.md + diff --git a/articles/active-directory/app-proxy/toc.yml b/articles/active-directory/app-proxy/toc.yml index 175c497922717..e20591e0ecc3f 100644 --- a/articles/active-directory/app-proxy/toc.yml +++ b/articles/active-directory/app-proxy/toc.yml @@ -5,8 +5,6 @@ items: - name: What is Application Proxy? href: what-is-application-proxy.md - - name: What's new in docs? - href: whats-new-docs.md - name: Tutorials expanded: true items: @@ -178,4 +176,4 @@ - name: Stack Overflow href: https://stackoverflow.com/questions/tagged/azure-active-directory - name: Videos - href: https://azure.microsoft.com/documentation/videos/index/?services=active-directory \ No newline at end of file + href: https://azure.microsoft.com/documentation/videos/index/?services=active-directory diff --git a/articles/active-directory/authentication/TOC.yml b/articles/active-directory/authentication/TOC.yml index 3c40922d9eadc..68b31d074cd1b 100644 --- a/articles/active-directory/authentication/TOC.yml +++ b/articles/active-directory/authentication/TOC.yml @@ -287,7 +287,7 @@ - name: Azure PowerShell cmdlets href: /powershell/azure/ - name: Microsoft Graph REST API beta - href: /graph/api/resources/authenticationmethods-overview?view=graph-rest-beta + href: /graph/api/resources/authenticationmethods-overview - name: Service limits and restrictions href: ../enterprise-users/directory-service-limits-restrictions.md - name: FIDO2 compatibility diff --git a/articles/active-directory/authentication/concept-authentication-passwordless.md b/articles/active-directory/authentication/concept-authentication-passwordless.md index 7c84a9ab92c03..b52abb7a11265 100644 --- a/articles/active-directory/authentication/concept-authentication-passwordless.md +++ b/articles/active-directory/authentication/concept-authentication-passwordless.md @@ -128,6 +128,7 @@ The following providers offer FIDO2 security keys of different form factors that | Nymi | ![y] | ![n]| ![y]| ![n]| ![n] | https://www.nymi.com/nymi-band | | Octatco | ![y] | ![y]| ![n]| ![n]| ![n] | https://octatco.com/ | | OneSpan Inc. | ![n] | ![y]| ![n]| ![y]| ![n] | https://www.onespan.com/products/fido | +| Swissbit | ![n] | ![y]| ![y]| ![n]| ![n] | https://www.swissbit.com/en/products/ishield-fido2/ | | Thales Group | ![n] | ![y]| ![y]| ![n]| ![n] | https://cpl.thalesgroup.com/access-management/authenticators/fido-devices | | Thetis | ![y] | ![y]| ![y]| ![y]| ![n] | https://thetis.io/collections/fido2 | | Token2 Switzerland | ![y] | ![y]| ![y]| ![n]| ![n] | https://www.token2.swiss/shop/product/token2-t2f2-alu-fido2-u2f-and-totp-security-key | @@ -136,6 +137,7 @@ The following providers offer FIDO2 security keys of different form factors that | Yubico | ![y] | ![y]| ![y]| ![n]| ![y] | https://www.yubico.com/solutions/passwordless/ | + [y]: ./media/fido2-compatibility/yes.png [n]: ./media/fido2-compatibility/no.png diff --git a/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md b/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md index 79f05568ec44b..92c2dfb10a889 100644 --- a/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md +++ b/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md @@ -6,7 +6,7 @@ services: active-directory ms.service: active-directory ms.subservice: authentication ms.topic: conceptual -ms.date: 03/1/2022 +ms.date: 05/24/2022 ms.author: justinha author: justinha @@ -73,6 +73,9 @@ Users can set one of the following options as the default Multi-Factor Authentic - Phone call - Text message +>[!NOTE] +>Virtual phone numbers are not supported for Voice calls or SMS messages. + Third party authenticator apps do not provide push notification. As we continue to add more authentication methods to Azure AD, those methods become available in combined registration. ## Combined registration modes diff --git a/articles/active-directory/authentication/fido2-compatibility.md b/articles/active-directory/authentication/fido2-compatibility.md index 55c1b360c95ce..43ba617bb1f4a 100644 --- a/articles/active-directory/authentication/fido2-compatibility.md +++ b/articles/active-directory/authentication/fido2-compatibility.md @@ -28,12 +28,12 @@ This table shows support for authenticating Azure Active Directory (Azure AD) an | | USB | NFC | BLE | USB | NFC | BLE | USB | NFC | BLE | USB | NFC | BLE | | **Windows** | ![Chrome supports USB on Windows for Azure AD accounts.][y] | ![Chrome supports NFC on Windows for Azure AD accounts.][y] | ![Chrome supports BLE on Windows for Azure AD accounts.][y] | ![Edge supports USB on Windows for Azure AD accounts.][y] | ![Edge supports NFC on Windows for Azure AD accounts.][y] | ![Edge supports BLE on Windows for Azure AD accounts.][y] | ![Firefox supports USB on Windows for Azure AD accounts.][y] | ![Firefox supports NFC on Windows for Azure AD accounts.][y] | ![Firefox supports BLE on Windows for Azure AD accounts.][y] | ![Safari supports USB on Windows for Azure AD accounts.][n] | ![Safari supports NFC on Windows for Azure AD accounts.][n] | ![Safari supports BLE on Windows for Azure AD accounts.][n] | | **macOS** | ![Chrome supports USB on macOS for Azure AD accounts.][y] | ![Chrome supports NFC on macOS for Azure AD accounts.][n] | ![Chrome supports BLE on macOS for Azure AD accounts.][n] | ![Edge supports USB on macOS for Azure AD accounts.][y] | ![Edge supports NFC on macOS for Azure AD accounts.][n] | ![Edge supports BLE on macOS for Azure AD accounts.][n] | ![Firefox supports USB on macOS for Azure AD accounts.][n] | ![Firefox supports NFC on macOS for Azure AD accounts.][n] | ![Firefox supports BLE on macOS for Azure AD accounts.][n] | ![Safari supports USB on macOS for Azure AD accounts.][n] | ![Safari supports NFC on macOS for Azure AD accounts.][n] | ![Safari supports BLE on macOS for Azure AD accounts.][n] | -| **ChromeOS** | ![Chrome supports USB on ChromeOS for Azure AD accounts.][y] | ![Chrome supports NFC on ChromeOS for Azure AD accounts.][n] | ![Chrome supports BLE on ChromeOS for Azure AD accounts.][n] | ![Edge supports USB on ChromeOS for Azure AD accounts.][n] | ![Edge supports NFC on ChromeOS for Azure AD accounts.][n] | ![Edge supports BLE on ChromeOS for Azure AD accounts.][n] | ![Firefox supports USB on ChromeOS for Azure AD accounts.][n] | ![Firefox supports NFC on ChromeOS for Azure AD accounts.][n] | ![Firefox supports BLE on ChromeOS for Azure AD accounts.][n] | ![Safari supports USB on ChromeOS for Azure AD accounts.][n] | ![Safari supports NFC on ChromeOS for Azure AD accounts.][n] | ![Safari supports BLE on ChromeOS for Azure AD accounts.][n] | +| **ChromeOS** | ![Chrome supports USB on ChromeOS for Azure AD accounts.][y]* | ![Chrome supports NFC on ChromeOS for Azure AD accounts.][n] | ![Chrome supports BLE on ChromeOS for Azure AD accounts.][n] | ![Edge supports USB on ChromeOS for Azure AD accounts.][n] | ![Edge supports NFC on ChromeOS for Azure AD accounts.][n] | ![Edge supports BLE on ChromeOS for Azure AD accounts.][n] | ![Firefox supports USB on ChromeOS for Azure AD accounts.][n] | ![Firefox supports NFC on ChromeOS for Azure AD accounts.][n] | ![Firefox supports BLE on ChromeOS for Azure AD accounts.][n] | ![Safari supports USB on ChromeOS for Azure AD accounts.][n] | ![Safari supports NFC on ChromeOS for Azure AD accounts.][n] | ![Safari supports BLE on ChromeOS for Azure AD accounts.][n] | | **Linux** | ![Chrome supports USB on Linux for Azure AD accounts.][y] | ![Chrome supports NFC on Linux for Azure AD accounts.][n] | ![Chrome supports BLE on Linux for Azure AD accounts.][n] | ![Edge supports USB on Linux for Azure AD accounts.][n] | ![Edge supports NFC on Linux for Azure AD accounts.][n] | ![Edge supports BLE on Linux for Azure AD accounts.][n] | ![Firefox supports USB on Linux for Azure AD accounts.][n] | ![Firefox supports NFC on Linux for Azure AD accounts.][n] | ![Firefox supports BLE on Linux for Azure AD accounts.][n] | ![Safari supports USB on Linux for Azure AD accounts.][n] | ![Safari supports NFC on Linux for Azure AD accounts.][n] | ![Safari supports BLE on Linux for Azure AD accounts.][n] | | **iOS** | ![Chrome supports USB on iOS for Azure AD accounts.][n] | ![Chrome supports NFC on iOS for Azure AD accounts.][n] | ![Chrome supports BLE on iOS for Azure AD accounts.][n] | ![Edge supports USB on iOS for Azure AD accounts.][n] | ![Edge supports NFC on Linux for Azure AD accounts.][n] | ![Edge supports BLE on Linux for Azure AD accounts.][n] | ![Firefox supports USB on Linux for Azure AD accounts.][n] | ![Firefox supports NFC on iOS for Azure AD accounts.][n] | ![Firefox supports BLE on iOS for Azure AD accounts.][n] | ![Safari supports USB on iOS for Azure AD accounts.][n] | ![Safari supports NFC on iOS for Azure AD accounts.][n] | ![Safari supports BLE on iOS for Azure AD accounts.][n] | | **Android** | ![Chrome supports USB on Android for Azure AD accounts.][n] | ![Chrome supports NFC on Android for Azure AD accounts.][n] | ![Chrome supports BLE on Android for Azure AD accounts.][n] | ![Edge supports USB on Android for Azure AD accounts.][n] | ![Edge supports NFC on Android for Azure AD accounts.][n] | ![Edge supports BLE on Android for Azure AD accounts.][n] | ![Firefox supports USB on Android for Azure AD accounts.][n] | ![Firefox supports NFC on Android for Azure AD accounts.][n] | ![Firefox supports BLE on Android for Azure AD accounts.][n] | ![Safari supports USB on Android for Azure AD accounts.][n] | ![Safari supports NFC on Android for Azure AD accounts.][n] | ![Safari supports BLE on Android for Azure AD accounts.][n] | - +*Key Registration is currently not supported with ChromeOS/Chrome Browser. ## Unsupported browsers diff --git a/articles/active-directory/authentication/how-to-mfa-number-match.md b/articles/active-directory/authentication/how-to-mfa-number-match.md index a01ffbe36d1dd..60bc32c970a5b 100644 --- a/articles/active-directory/authentication/how-to-mfa-number-match.md +++ b/articles/active-directory/authentication/how-to-mfa-number-match.md @@ -243,11 +243,10 @@ To enable number matching in the Azure AD portal, complete the following steps: ![Screenshot of enabling number match.](media/howto-authentication-passwordless-phone/enable-number-matching.png) >[!NOTE] ->[Least privilege role in Azure Active Directory - Multi-factor Authentication](https://docs.microsoft.com/azure/active-directory/roles/delegate-by-task#multi-factor-authentication) +>[Least privilege role in Azure Active Directory - Multi-factor Authentication](../roles/delegate-by-task.md#multi-factor-authentication) Number matching is not supported for Apple Watch notifications. Apple Watch need to use their phone to approve notifications when number matching is enabled. ## Next steps -[Authentication methods in Azure Active Directory - Microsoft Authenticator app](concept-authentication-authenticator-app.md) - +[Authentication methods in Azure Active Directory - Microsoft Authenticator app](concept-authentication-authenticator-app.md) \ No newline at end of file diff --git a/articles/active-directory/authentication/how-to-migrate-mfa-server-to-azure-mfa-with-federation.md b/articles/active-directory/authentication/how-to-migrate-mfa-server-to-azure-mfa-with-federation.md index 6c435910c295e..197bbaafbd097 100644 --- a/articles/active-directory/authentication/how-to-migrate-mfa-server-to-azure-mfa-with-federation.md +++ b/articles/active-directory/authentication/how-to-migrate-mfa-server-to-azure-mfa-with-federation.md @@ -175,7 +175,7 @@ This section covers final steps before migrating user phone numbers. ### Set federatedIdpMfaBehavior to enforceMfaByFederatedIdp -For federated domains, MFA may be enforced by Azure AD Conditional Access or by the on-premises federation provider. Each federated domain has a Microsoft Graph PowerShell security setting named **federatedIdpMfaBehavior**. You can set **federatedIdpMfaBehavior** to `enforceMfaByFederatedIdp` so Azure AD accepts MFA that's performed by the federated identity provider. If the federated identity provider didn't perform MFA, Azure AD redirects the request to the federated identity provider to perform MFA. For more information, see [federatedIdpMfaBehavior](/graph/api/resources/internaldomainfederation?view=graph-rest-beta#federatedidpmfabehavior-values). +For federated domains, MFA may be enforced by Azure AD Conditional Access or by the on-premises federation provider. Each federated domain has a Microsoft Graph PowerShell security setting named **federatedIdpMfaBehavior**. You can set **federatedIdpMfaBehavior** to `enforceMfaByFederatedIdp` so Azure AD accepts MFA that's performed by the federated identity provider. If the federated identity provider didn't perform MFA, Azure AD redirects the request to the federated identity provider to perform MFA. For more information, see [federatedIdpMfaBehavior](/graph/api/resources/internaldomainfederation?view=graph-rest-beta#federatedidpmfabehavior-values&preserve-view=true ). >[!NOTE] > The **federatedIdpMfaBehavior** setting is an evolved version of the **SupportsMfa** property of the [Set-MsolDomainFederationSettings MSOnline v1 PowerShell cmdlet](/powershell/module/msonline/set-msoldomainfederationsettings). diff --git a/articles/active-directory/authentication/howto-authentication-passwordless-security-key-on-premises.md b/articles/active-directory/authentication/howto-authentication-passwordless-security-key-on-premises.md index c9ac792e5ead8..948f391f98bf0 100644 --- a/articles/active-directory/authentication/howto-authentication-passwordless-security-key-on-premises.md +++ b/articles/active-directory/authentication/howto-authentication-passwordless-security-key-on-premises.md @@ -288,17 +288,10 @@ Make sure that enough DCs are patched to respond in time to service your resourc > [!NOTE] > The `/keylist` switch in the `nltest` command is available in client Windows 10 v2004 and later. -### What if I have a CloudTGT but it never gets exchange for a OnPremTGT when I am using Windows Hello for Business Cloud Trust? - -Make sure that the user you are signed in as, is a member of the groups of users that can use FIDO2 as an authentication method, or enable it for all users. - -> [!NOTE] -> Even if you are not explicitly using a security key to sign-in to your device, the underlying technology is dependent on the FIDO2 infrastructure requirements. - ### Do FIDO2 security keys work in a Windows login with RODC present in the hybrid environment? An FIDO2 Windows login looks for a writable DC to exchange the user TGT. As long as you have at least one writable DC per site, the login works fine. ## Next steps -[Learn more about passwordless authentication](concept-authentication-passwordless.md) \ No newline at end of file +[Learn more about passwordless authentication](concept-authentication-passwordless.md) diff --git a/articles/active-directory/authentication/howto-authentication-temporary-access-pass.md b/articles/active-directory/authentication/howto-authentication-temporary-access-pass.md index 32d47f58ff618..b01a8819962d8 100644 --- a/articles/active-directory/authentication/howto-authentication-temporary-access-pass.md +++ b/articles/active-directory/authentication/howto-authentication-temporary-access-pass.md @@ -1,21 +1,21 @@ --- title: Configure a Temporary Access Pass in Azure AD to register Passwordless authentication methods -description: Learn how to configure and enable users to to register Passwordless authentication methods by using a Temporary Access Pass +description: Learn how to configure and enable users to register Passwordless authentication methods by using a Temporary Access Pass services: active-directory ms.service: active-directory ms.subservice: authentication ms.topic: conceptual -ms.date: 10/22/2021 +ms.date: 05/24/2022 ms.author: justinha -author: inbarckMS +author: tilarso manager: karenhoran -ms.reviewer: inbarc +ms.reviewer: tilarso ms.collection: M365-identity-device-management --- -# Configure Temporary Access Pass in Azure AD to register Passwordless authentication methods (Preview) +# Configure Temporary Access Pass in Azure AD to register Passwordless authentication methods Passwordless authentication methods, such as FIDO2 and Passwordless Phone Sign-in through the Microsoft Authenticator app, enable users to sign in securely without a password. Users can bootstrap Passwordless methods in one of two ways: @@ -23,28 +23,30 @@ Users can bootstrap Passwordless methods in one of two ways: - Using existing Azure AD Multi-Factor Authentication methods - Using a Temporary Access Pass (TAP) -A Temporary Access Pass is a time-limited passcode issued by an admin that satisfies strong authentication requirements and can be used to onboard other authentication methods, including Passwordless ones. +A Temporary Access Pass is a time-limited passcode issued by an admin that satisfies strong authentication requirements and can be used to onboard other authentication methods, including Passwordless ones such as Microsoft Authenticator or even Windows Hello. A Temporary Access Pass also makes recovery easier when a user has lost or forgotten their strong authentication factor like a FIDO2 security key or Microsoft Authenticator app, but needs to sign in to register new strong authentication methods. This article shows you how to enable and use a Temporary Access Pass in Azure AD using the Azure portal. You can also perform these actions using the REST APIs. ->[!NOTE] ->Temporary Access Pass is currently in public preview. Some features might not be supported or have limited capabilities. - ## Enable the Temporary Access Pass policy A Temporary Access Pass policy defines settings, such as the lifetime of passes created in the tenant, or the users and groups who can use a Temporary Access Pass to sign-in. -Before anyone can sign in with a Temporary Access Pass, you need to enable the authentication method policy and choose which users and groups can sign in by using a Temporary Access Pass. +Before anyone can sign-in with a Temporary Access Pass, you need to enable Temporary Access Pass in the authentication method policy and choose which users and groups can sign in by using a Temporary Access Pass. Although you can create a Temporary Access Pass for any user, only those included in the policy can sign-in with it. Global administrator and Authentication Method Policy administrator role holders can update the Temporary Access Pass authentication method policy. To configure the Temporary Access Pass authentication method policy: -1. Sign in to the Azure portal as a Global admin and click **Azure Active Directory** > **Security** > **Authentication methods** > **Temporary Access Pass**. -1. Click **Yes** to enable the policy, select which users have the policy applied, and any **General** settings. +1. Sign in to the Azure portal as a Global admin or Authentication Policy admin and click **Azure Active Directory** > **Security** > **Authentication methods** > **Temporary Access Pass**. +![Screenshot of how to manage Temporary Access Pass within the authentication method policy experience.](./media/how-to-authentication-temporary-access-pass/policy.png) +1. Set Enable to **Yes** to enable the policy, select which users have the policy applied. +![Screenshot of how to enable the Temporary Access Pass authentication method policy.](./media/how-to-authentication-temporary-access-pass/policy-scope.png) +1. (Optional) Click **Configure** and modify the default Temporary Access Pass settings, such as setting maximum lifetime, or length. +![Screenshot of how to customize the settings for Temporary Access Pass.](./media/how-to-authentication-temporary-access-pass/policy-settings.png) +1. Click **Save** to apply the policy. + - ![Screenshot of how to enable the Temporary Access Pass authentication method policy](./media/how-to-authentication-temporary-access-pass/policy.png) The default value and the range of allowed values are described in the following table. @@ -52,7 +54,7 @@ To configure the Temporary Access Pass authentication method policy: | Setting | Default values | Allowed values | Comments | |---|---|---|---| | Minimum lifetime | 1 hour | 10 – 43200 Minutes (30 days) | Minimum number of minutes that the Temporary Access Pass is valid. | - | Maximum lifetime | 24 hours | 10 – 43200 Minutes (30 days) | Maximum number of minutes that the Temporary Access Pass is valid. | + | Maximum lifetime | 8 hours | 10 – 43200 Minutes (30 days) | Maximum number of minutes that the Temporary Access Pass is valid. | | Default lifetime | 1 hour | 10 – 43200 Minutes (30 days) | Default values can be override by the individual passes, within the minimum and maximum lifetime configured by the policy. | | One-time use | False | True / False | When the policy is set to false, passes in the tenant can be used either once or more than once during its validity (maximum lifetime). By enforcing one-time use in the Temporary Access Pass policy, all passes created in the tenant will be created as one-time use. | | Length | 8 | 8-48 characters | Defines the length of the passcode. | @@ -71,14 +73,14 @@ These roles can perform the following actions related to a Temporary Access Pass 1. Click **Azure Active Directory**, browse to Users, select a user, such as *Chris Green*, then choose **Authentication methods**. 1. If needed, select the option to **Try the new user authentication methods experience**. 1. Select the option to **Add authentication methods**. -1. Below **Choose method**, click **Temporary Access Pass (Preview)**. +1. Below **Choose method**, click **Temporary Access Pass**. 1. Define a custom activation time or duration and click **Add**. - ![Screenshot of how to create a Temporary Access Pass](./media/how-to-authentication-temporary-access-pass/create.png) + ![Screenshot of how to create a Temporary Access Pass.](./media/how-to-authentication-temporary-access-pass/create.png) 1. Once added, the details of the Temporary Access Pass are shown. Make a note of the actual Temporary Access Pass value. You provide this value to the user. You can't view this value after you click **Ok**. - ![Screenshot of Temporary Access Pass details](./media/how-to-authentication-temporary-access-pass/details.png) + ![Screenshot of Temporary Access Pass details.](./media/how-to-authentication-temporary-access-pass/details.png) The following commands show how to create and get a Temporary Access Pass by using PowerShell: @@ -86,34 +88,34 @@ The following commands show how to create and get a Temporary Access Pass by usi # Create a Temporary Access Pass for a user $properties = @{} $properties.isUsableOnce = $True -$properties.startDateTime = '2021-03-11 06:00:00' +$properties.startDateTime = '2022-05-23 06:00:00' $propertiesJSON = $properties | ConvertTo-Json New-MgUserAuthenticationTemporaryAccessPassMethod -UserId user2@contoso.com -BodyParameter $propertiesJSON Id CreatedDateTime IsUsable IsUsableOnce LifetimeInMinutes MethodUsabilityReason StartDateTime TemporaryAccessPass -- --------------- -------- ------------ ----------------- --------------------- ------------- ------------------- -c5dbd20a-8b8f-4791-a23f-488fcbde3b38 9/03/2021 11:19:17 PM False True 60 NotYetValid 11/03/2021 6:00:00 AM TAPRocks! +c5dbd20a-8b8f-4791-a23f-488fcbde3b38 5/22/2022 11:19:17 PM False True 60 NotYetValid 23/05/2022 6:00:00 AM TAPRocks! # Get a user's Temporary Access Pass Get-MgUserAuthenticationTemporaryAccessPassMethod -UserId user3@contoso.com Id CreatedDateTime IsUsable IsUsableOnce LifetimeInMinutes MethodUsabilityReason StartDateTime TemporaryAccessPass -- --------------- -------- ------------ ----------------- --------------------- ------------- ------------------- -c5dbd20a-8b8f-4791-a23f-488fcbde3b38 9/03/2021 11:19:17 PM False True 60 NotYetValid 11/03/2021 6:00:00 AM +c5dbd20a-8b8f-4791-a23f-488fcbde3b38 5/22/2022 11:19:17 PM False True 60 NotYetValid 23/05/2022 6:00:00 AM ``` ## Use a Temporary Access Pass -The most common use for a Temporary Access Pass is for a user to register authentication details during the first sign-in, without the need to complete additional security prompts. Authentication methods are registered at [https://aka.ms/mysecurityinfo](https://aka.ms/mysecurityinfo). Users can also update existing authentication methods here. +The most common use for a Temporary Access Pass is for a user to register authentication details during the first sign-in or device setup, without the need to complete additional security prompts. Authentication methods are registered at [https://aka.ms/mysecurityinfo](https://aka.ms/mysecurityinfo). Users can also update existing authentication methods here. 1. Open a web browser to [https://aka.ms/mysecurityinfo](https://aka.ms/mysecurityinfo). 1. Enter the UPN of the account you created the Temporary Access Pass for, such as *tapuser@contoso.com*. 1. If the user is included in the Temporary Access Pass policy, they will see a screen to enter their Temporary Access Pass. 1. Enter the Temporary Access Pass that was displayed in the Azure portal. - ![Screenshot of how to enter a Temporary Access Pass](./media/how-to-authentication-temporary-access-pass/enter.png) + ![Screenshot of how to enter a Temporary Access Pass.](./media/how-to-authentication-temporary-access-pass/enter.png) >[!NOTE] >For federated domains, a Temporary Access Pass is preferred over federation. A user with a Temporary Access Pass will complete the authentication in Azure AD and will not get redirected to the federated Identity Provider (IdP). @@ -122,12 +124,27 @@ The user is now signed in and can update or register a method such as FIDO2 secu Users who update their authentication methods due to losing their credentials or device should make sure they remove the old authentication methods. Users can also continue to sign-in by using their password; a TAP doesn’t replace a user’s password. + +### User management of Temporary Access Pass + +Users managing their security information at [https://aka.ms/mysecurityinfo](https://aka.ms/mysecurityinfo) will see an entry for the Temporary Access Pass. If a user does not have any other registered methods they will be presented a banner at the top of the screen requesting them to add a new sign-in method. Users can additionally view the TAP expiration time, and delete the TAP if no longer needed. + +![Screenshot of how users can manage a Temporary Access Pass in My Security Info.](./media/how-to-authentication-temporary-access-pass/tap-my-security-info.png) + +### Windows device setup +Users with a Temporary Access Pass can navigate the setup process on Windows 10 and 11 to perform device join operations and configure Windows Hello For Business. Temporary Access Pass usage for setting up Windows Hello for Business varies based on the devices joined state: +- During Azure AD Join setup, users can authenticate with a TAP (no password required) and setup Windows Hello for Business. +- On already Azure AD Joined devices, users must first authenticate with another method such as a password, smartcard or FIDO2 key, before using TAP to setup Windows Hello for Business. +- On Hybrid Azure AD Joined devices, users must first authenticate with another method such as a password, smartcard or FIDO2 key, before using TAP to setup Windows Hello for Business. + +![Screenshot of how to enter Temporary Access Pass when setting up Windows 10.](./media/how-to-authentication-temporary-access-pass/windows-10-tap.png) + ### Passwordless phone sign-in Users can also use their Temporary Access Pass to register for Passwordless phone sign-in directly from the Authenticator app. For more information, see [Add your work or school account to the Microsoft Authenticator app](https://support.microsoft.com/account-billing/add-your-work-or-school-account-to-the-microsoft-authenticator-app-43a73ab5-b4e8-446d-9e54-2a4cb8e4e93c). -![Screenshot of how to enter a Temporary Access Pass using work or school account](./media/how-to-authentication-temporary-access-pass/enter-work-school.png) +![Screenshot of how to enter a Temporary Access Pass using work or school account.](./media/how-to-authentication-temporary-access-pass/enter-work-school.png) ### Guest access @@ -144,7 +161,7 @@ Users need to reauthenticate with different authentication methods after the Tem Under the **Authentication methods** for a user, the **Detail** column shows when the Temporary Access Pass expired. You can delete an expired Temporary Access Pass using the following steps: 1. In the Azure AD portal, browse to **Users**, select a user, such as *Tap User*, then choose **Authentication methods**. -1. On the right-hand side of the **Temporary Access Pass (Preview)** authentication method shown in the list, select **Delete**. +1. On the right-hand side of the **Temporary Access Pass** authentication method shown in the list, select **Delete**. You can also use PowerShell: @@ -157,7 +174,7 @@ Remove-MgUserAuthenticationTemporaryAccessPassMethod -UserId user3@contoso.com - - A user can only have one Temporary Access Pass. The passcode can be used during the start and end time of the Temporary Access Pass. - If the user requires a new Temporary Access Pass: - - If the existing Temporary Access Pass is valid, the admin needs to delete the existing Temporary Access Pass and create a new pass for the user. + - If the existing Temporary Access Pass is valid, the admin can create a new Temporary Access Pass which will override the existing valid Temporary Access Pass. - If the existing Temporary Access Pass has expired, a new Temporary Access Pass will override the existing Temporary Access Pass. For more information about NIST standards for onboarding and recovery, see [NIST Special Publication 800-63A](https://pages.nist.gov/800-63-3/sp800-63a.html#sec4). @@ -167,10 +184,9 @@ For more information about NIST standards for onboarding and recovery, see [NIST Keep these limitations in mind: - When using a one-time Temporary Access Pass to register a Passwordless method such as FIDO2 or Phone sign-in, the user must complete the registration within 10 minutes of sign-in with the one-time Temporary Access Pass. This limitation does not apply to a Temporary Access Pass that can be used more than once. -- Temporary Access Pass is in public preview and currently not available in Azure for US Government. - Users in scope for Self Service Password Reset (SSPR) registration policy *or* [Identity Protection Multi-factor authentication registration policy](../identity-protection/howto-identity-protection-configure-mfa-policy.md) will be required to register authentication methods after they have signed in with a Temporary Access Pass. Users in scope for these policies will get redirected to the [Interrupt mode of the combined registration](concept-registration-mfa-sspr-combined.md#combined-registration-modes). This experience does not currently support FIDO2 and Phone Sign-in registration. -- A Temporary Access Pass cannot be used with the Network Policy Server (NPS) extension and Active Directory Federation Services (AD FS) adapter, or during Windows Setup/Out-of-Box-Experience (OOBE), Autopilot, or to deploy Windows Hello for Business. +- A Temporary Access Pass cannot be used with the Network Policy Server (NPS) extension and Active Directory Federation Services (AD FS) adapter. ## Troubleshooting diff --git a/articles/active-directory/authentication/howto-mfa-adfs.md b/articles/active-directory/authentication/howto-mfa-adfs.md index 263338232bc7b..6216fecb45858 100644 --- a/articles/active-directory/authentication/howto-mfa-adfs.md +++ b/articles/active-directory/authentication/howto-mfa-adfs.md @@ -20,7 +20,7 @@ ms.collection: M365-identity-device-management If your organization is federated with Azure Active Directory, use Azure AD Multi-Factor Authentication or Active Directory Federation Services (AD FS) to secure resources that are accessed by Azure AD. Use the following procedures to secure Azure Active Directory resources with either Azure AD Multi-Factor Authentication or Active Directory Federation Services. >[!NOTE] ->Set the domain setting [federatedIdpMfaBehavior](/graph/api/resources/internaldomainfederation?view=graph-rest-beta#federatedidpmfabehavior-values) to `enforceMfaByFederatedIdp` (recommended) or **SupportsMFA** to `$True`. The **federatedIdpMfaBehavior** setting overrides **SupportsMFA** when both are set. +>Set the domain setting [federatedIdpMfaBehavior](/graph/api/resources/internaldomainfederation?view=graph-rest-beta#federatedidpmfabehavior-values&preserve-view=true) to `enforceMfaByFederatedIdp` (recommended) or **SupportsMFA** to `$True`. The **federatedIdpMfaBehavior** setting overrides **SupportsMFA** when both are set. ## Secure Azure AD resources using AD FS diff --git a/articles/active-directory/authentication/howto-mfa-getstarted.md b/articles/active-directory/authentication/howto-mfa-getstarted.md index 9b59b148021e4..1fcf5abed4a8d 100644 --- a/articles/active-directory/authentication/howto-mfa-getstarted.md +++ b/articles/active-directory/authentication/howto-mfa-getstarted.md @@ -4,7 +4,7 @@ description: Learn about deployment considerations and strategy for successful i ms.service: active-directory ms.subservice: authentication ms.topic: how-to -ms.date: 02/02/2022 +ms.date: 06/01/2022 ms.author: mtillman author: mtillman manager: martinco @@ -246,7 +246,7 @@ You can monitor authentication method registration and usage across your organiz The Azure AD sign in reports include authentication details for events when a user is prompted for MFA, and if any Conditional Access policies were in use. You can also use PowerShell for reporting on users registered for Azure AD Multi-Factor Authentication. -NPS extension and AD FS logs can be viewed from **Security** > **MFA** > **Activity report**. +NPS extension and AD FS logs can be viewed from **Security** > **MFA** > **Activity report**. Inclusion of this activity in the [Sign-in logs](../reports-monitoring/concept-sign-ins.md) is currently in Preview. For more information, and additional Azure AD Multi-Factor Authentication reports, see [Review Azure AD Multi-Factor Authentication events](howto-mfa-reporting.md#view-the-azure-ad-sign-ins-report). diff --git a/articles/active-directory/authentication/howto-mfa-nps-extension-advanced.md b/articles/active-directory/authentication/howto-mfa-nps-extension-advanced.md index db5b999e32abe..a90c31e1cabb2 100644 --- a/articles/active-directory/authentication/howto-mfa-nps-extension-advanced.md +++ b/articles/active-directory/authentication/howto-mfa-nps-extension-advanced.md @@ -6,7 +6,7 @@ services: multi-factor-authentication ms.service: active-directory ms.subservice: authentication ms.topic: how-to -ms.date: 07/11/2018 +ms.date: 06/01/2022 ms.author: justinha author: justinha @@ -23,13 +23,13 @@ The Network Policy Server (NPS) extension extends your cloud-based Azure AD Mult Since the NPS extension connects to both your on-premises and cloud directories, you might encounter an issue where your on-premises user principal names (UPNs) don't match the names in the cloud. To solve this problem, use alternate login IDs. -Within the NPS extension, you can designate an Active Directory attribute to be used in place of the UPN for Azure AD Multi-Factor Authentication. This enables you to protect your on-premises resources with two-step verification without modifying your on-premises UPNs. +Within the NPS extension, you can designate an Active Directory attribute to be used as the UPN for Azure AD Multi-Factor Authentication. This enables you to protect your on-premises resources with two-step verification without modifying your on-premises UPNs. To configure alternate login IDs, go to `HKLM\SOFTWARE\Microsoft\AzureMfa` and edit the following registry values: | Name | Type | Default value | Description | | ---- | ---- | ------------- | ----------- | -| LDAP_ALTERNATE_LOGINID_ATTRIBUTE | string | Empty | Designate the name of Active Directory attribute that you want to use instead of the UPN. This attribute is used as the AlternateLoginId attribute. If this registry value is set to a [valid Active Directory attribute](/windows/win32/adschema/attributes-all) (for example, mail or displayName), then the attribute's value is used in place of the user's UPN for authentication. If this registry value is empty or not configured, then AlternateLoginId is disabled and the user's UPN is used for authentication. | +| LDAP_ALTERNATE_LOGINID_ATTRIBUTE | string | Empty | Designate the name of Active Directory attribute that you want to use as the UPN. This attribute is used as the AlternateLoginId attribute. If this registry value is set to a [valid Active Directory attribute](/windows/win32/adschema/attributes-all) (for example, mail or displayName), then the attribute's value is used as the user's UPN for authentication. If this registry value is empty or not configured, then AlternateLoginId is disabled and the user's UPN is used for authentication. | | LDAP_FORCE_GLOBAL_CATALOG | boolean | False | Use this flag to force the use of Global Catalog for LDAP searches when looking up AlternateLoginId. Configure a domain controller as a Global Catalog, add the AlternateLoginId attribute to the Global Catalog, and then enable this flag.

If LDAP_LOOKUP_FORESTS is configured (not empty), **this flag is enforced as true**, regardless of the value of the registry setting. In this case, the NPS extension requires the Global Catalog to be configured with the AlternateLoginId attribute for each forest. | | LDAP_LOOKUP_FORESTS | string | Empty | Provide a semi-colon separated list of forests to search. For example, *contoso.com;foobar.com*. If this registry value is configured, the NPS extension iteratively searches all the forests in the order in which they were listed, and returns the first successful AlternateLoginId value. If this registry value is not configured, the AlternateLoginId lookup is confined to the current domain.| diff --git a/articles/active-directory/authentication/howto-mfa-userstates.md b/articles/active-directory/authentication/howto-mfa-userstates.md index 0962904c000ba..769564fc4f06f 100644 --- a/articles/active-directory/authentication/howto-mfa-userstates.md +++ b/articles/active-directory/authentication/howto-mfa-userstates.md @@ -6,7 +6,7 @@ services: multi-factor-authentication ms.service: active-directory ms.subservice: authentication ms.topic: how-to -ms.date: 07/22/2021 +ms.date: 06/01/2022 ms.author: justinha author: justinha @@ -54,9 +54,9 @@ All users start out *Disabled*. When you enroll users in per-user Azure AD Multi To view and manage user states, complete the following steps to access the Azure portal page: 1. Sign in to the [Azure portal](https://portal.azure.com) as a Global administrator. -1. Search for and select *Azure Active Directory*, then select **Users** > **All users**. -1. Select **Per-user MFA**. You may need to scroll to the right to see this menu option. Select the example screenshot below to see the full Azure portal window and menu location: - [![Select Multi-Factor Authentication from the Users window in Azure AD.](media/howto-mfa-userstates/selectmfa-cropped.png)](media/howto-mfa-userstates/selectmfa.png#lightbox) +1. Search for and select **Azure Active Directory**, then select **Users** > **All users**. +1. Select **Per-user MFA**. + :::image type="content" border="true" source="media/howto-mfa-userstates/selectmfa-cropped.png" alt-text="Screenshot of select Multi-Factor Authentication from the Users window in Azure AD."::: 1. A new page opens that displays the user state, as shown in the following example. ![Screenshot that shows example user state information for Azure AD Multi-Factor Authentication](./media/howto-mfa-userstates/userstate1.png) diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/create.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/create.png index db06b8e02a33d..b218b02a47e38 100644 Binary files a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/create.png and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/create.png differ diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/details.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/details.png index f7c720cf38fa1..2644e77d59a23 100644 Binary files a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/details.png and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/details.png differ diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy-scope.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy-scope.png new file mode 100644 index 0000000000000..a250ec173590c Binary files /dev/null and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy-scope.png differ diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy-settings.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy-settings.png new file mode 100644 index 0000000000000..adc56e142f75c Binary files /dev/null and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy-settings.png differ diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy.png index 352e4914693a7..245d83aced56a 100644 Binary files a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy.png and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/policy.png differ diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/tap-my-security-info.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/tap-my-security-info.png new file mode 100644 index 0000000000000..48a4d0c770ef5 Binary files /dev/null and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/tap-my-security-info.png differ diff --git a/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/windows-10-tap.png b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/windows-10-tap.png new file mode 100644 index 0000000000000..9b806e4ae3cfb Binary files /dev/null and b/articles/active-directory/authentication/media/how-to-authentication-temporary-access-pass/windows-10-tap.png differ diff --git a/articles/active-directory/authentication/media/howto-mfa-userstates/selectmfa-cropped.png b/articles/active-directory/authentication/media/howto-mfa-userstates/selectmfa-cropped.png index 2d445277d852f..48cdf7518fde8 100644 Binary files a/articles/active-directory/authentication/media/howto-mfa-userstates/selectmfa-cropped.png and b/articles/active-directory/authentication/media/howto-mfa-userstates/selectmfa-cropped.png differ diff --git a/articles/active-directory/authentication/tutorial-enable-cloud-sync-sspr-writeback.md b/articles/active-directory/authentication/tutorial-enable-cloud-sync-sspr-writeback.md index 589246e7cabd0..e6ee0f1b24256 100644 --- a/articles/active-directory/authentication/tutorial-enable-cloud-sync-sspr-writeback.md +++ b/articles/active-directory/authentication/tutorial-enable-cloud-sync-sspr-writeback.md @@ -5,7 +5,7 @@ services: active-directory ms.service: active-directory ms.subservice: authentication ms.topic: tutorial -ms.date: 10/25/2021 +ms.date: 05/31/2022 ms.author: justinha author: justinha ms.reviewer: tilarso @@ -58,7 +58,7 @@ With password writeback enabled in Azure AD Connect cloud sync, now verify, and To verify and enable password writeback in SSPR, complete the following steps: -1. Sign into the Azure portal using a global administrator account. +1. Sign into the Azure portal using a [Hybrid Identity Administrator](../roles/permissions-reference.md#hybrid-identity-administrator) account. 1. Navigate to Azure Active Directory, select **Password reset**, then choose **On-premises integration**. 1. Verify the Azure AD Connect cloud sync agent set up is complete. 1. Set **Write back passwords to your on-premises directory?** to **Yes**. @@ -72,12 +72,12 @@ To verify and enable password writeback in SSPR, complete the following steps: If you no longer want to use the SSPR password writeback functionality you have configured as part of this document, complete the following steps: -1. Sign into the Azure portal using a global administrator account. +1. Sign into the Azure portal using a [Hybrid Identity Administrator](../roles/permissions-reference.md#hybrid-identity-administrator) account. 1. Search for and select Azure Active Directory, select **Password reset**, then choose **On-premises integration**. 1. Set **Write back passwords to your on-premises directory?** to **No**. 1. Set **Allow users to unlock accounts without resetting their password?** to **No**. -From your Azure AD Connect cloud sync server, run `Set-AADCloudSyncPasswordWritebackConfiguration` using global administrator credentials to disable password writeback with Azure AD Connect cloud sync. +From your Azure AD Connect cloud sync server, run `Set-AADCloudSyncPasswordWritebackConfiguration` using Hybrid Identity Administrator credentials to disable password writeback with Azure AD Connect cloud sync. ```powershell Import-Module ‘C:\\Program Files\\Microsoft Azure AD Connect Provisioning Agent\\Microsoft.CloudSync.Powershell.dll’ diff --git a/articles/active-directory/authentication/tutorial-enable-sspr-writeback.md b/articles/active-directory/authentication/tutorial-enable-sspr-writeback.md index fe68a51decb43..e0d4da0edc45d 100644 --- a/articles/active-directory/authentication/tutorial-enable-sspr-writeback.md +++ b/articles/active-directory/authentication/tutorial-enable-sspr-writeback.md @@ -6,7 +6,7 @@ services: active-directory ms.service: active-directory ms.subservice: authentication ms.topic: tutorial -ms.date: 11/11/2021 +ms.date: 05/31/2022 ms.author: justinha author: justinha @@ -14,6 +14,7 @@ ms.reviewer: tilarso ms.collection: M365-identity-device-management ms.custom: contperf-fy20q4 +adobe-target: true # Customer intent: As an Azure AD Administrator, I want to learn how to enable and use password writeback so that when end-users reset their password through a web browser their updated password is synchronized back to my on-premises AD environment. --- @@ -42,7 +43,7 @@ To complete this tutorial, you need the following resources and privileges: * A working Azure AD tenant with at least an Azure AD Premium P1 or trial license enabled. * If needed, [create one for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). * For more information, see [Licensing requirements for Azure AD SSPR](concept-sspr-licensing.md). -* An account with *global administrator* privileges. +* An account with [Hybrid Identity Administrator](../roles/permissions-reference.md#hybrid-identity-administrator). * Azure AD configured for self-service password reset. * If needed, [complete the previous tutorial to enable Azure AD SSPR](tutorial-enable-sspr.md). * An existing on-premises AD DS environment configured with a current version of Azure AD Connect. @@ -118,7 +119,7 @@ With password writeback enabled in Azure AD Connect, now configure Azure AD SSPR To enable password writeback in SSPR, complete the following steps: -1. Sign in to the [Azure portal](https://portal.azure.com) using a global administrator account. +1. Sign in to the [Azure portal](https://portal.azure.com) using a Hybrid Identity Administrator account. 1. Search for and select **Azure Active Directory**, select **Password reset**, then choose **On-premises integration**. 1. Set the option for **Write back passwords to your on-premises directory?** to *Yes*. 1. Set the option for **Allow users to unlock accounts without resetting their password?** to *Yes*. diff --git a/articles/active-directory/azuread-dev/about-microsoft-identity-platform.md b/articles/active-directory/azuread-dev/about-microsoft-identity-platform.md index 16a4c4404557b..08bb3d4ec3da3 100644 --- a/articles/active-directory/azuread-dev/about-microsoft-identity-platform.md +++ b/articles/active-directory/azuread-dev/about-microsoft-identity-platform.md @@ -1,5 +1,5 @@ --- -title: Evolution of Microsoft identity platform - Azure +title: Evolution of Microsoft identity platform description: Learn about Microsoft identity platform, an evolution of the Azure Active Directory (Azure AD) identity service and developer platform. services: active-directory author: rwike77 @@ -22,7 +22,7 @@ The [Microsoft identity platform](../develop/index.yml) is an evolution of the A Many developers have previously worked with the Azure AD v1.0 platform to authenticate work and school accounts (provisioned by Azure AD) by requesting tokens from the Azure AD v1.0 endpoint, using Azure AD Authentication Library (ADAL), Azure portal for application registration and configuration, and the Microsoft Graph API for programmatic application configuration. -With the unified Microsoft identity platform (v2.0), you can write code once and authenticate any Microsoft identity into your application. For several platforms, the fully supported open-source Microsoft Authentication Library (MSAL) is recommended for use against the identity platform endpoints. MSAL is simple to use, provides great single sign-on (SSO) experiences for your users, helps you achieve high reliability and performance, and is developed using Microsoft Secure Development Lifecycle (SDL). When calling APIs, you can configure your application to take advantage of incremental consent, which allows you to delay the request for consent for more invasive scopes until the application’s usage warrants this at runtime. MSAL also supports Azure Active Directory B2C, so your customers use their preferred social, enterprise, or local account identities to get single sign-on access to your applications and APIs. +With the unified Microsoft identity platform (v2.0), you can write code once and authenticate any Microsoft identity into your application. For several platforms, the fully supported open-source Microsoft Authentication Library (MSAL) is recommended for use against the identity platform endpoints. MSAL is simple to use, provides great single sign-on (SSO) experiences for your users, helps you achieve high reliability and performance, and is developed using Microsoft Secure Development Lifecycle (SDL). When calling APIs, you can configure your application to take advantage of incremental consent, which allows you to delay the request for consent for more invasive scopes until the application's usage warrants this at runtime. MSAL also supports Azure Active Directory B2C, so your customers use their preferred social, enterprise, or local account identities to get single sign-on access to your applications and APIs. With Microsoft identity platform, expand your reach to these kinds of users: @@ -42,9 +42,9 @@ The following diagram shows the Microsoft identity experience at a high level, i ### App registration experience -The Azure portal **[App registrations](https://go.microsoft.com/fwlink/?linkid=2083908)** experience is the one portal experience for managing all applications you’ve integrated with Microsoft identity platform. If you have been using the Application Registration Portal, start using the Azure portal app registration experience instead. +The Azure portal **[App registrations](https://go.microsoft.com/fwlink/?linkid=2083908)** experience is the one portal experience for managing all applications you've integrated with Microsoft identity platform. If you have been using the Application Registration Portal, start using the Azure portal app registration experience instead. -For integration with Azure AD B2C (when authenticating social or local identities), you’ll need to register your application in an Azure AD B2C tenant. This experience is also part of the Azure portal. +For integration with Azure AD B2C (when authenticating social or local identities), you'll need to register your application in an Azure AD B2C tenant. This experience is also part of the Azure portal. Use the [Application API](/graph/api/resources/application) to programmatically configure your applications integrated with Microsoft identity platform for authenticating any Microsoft identity. diff --git a/articles/active-directory/azuread-dev/active-directory-devhowto-adal-error-handling.md b/articles/active-directory/azuread-dev/active-directory-devhowto-adal-error-handling.md index 25123634e38b8..f4a6469e5326d 100644 --- a/articles/active-directory/azuread-dev/active-directory-devhowto-adal-error-handling.md +++ b/articles/active-directory/azuread-dev/active-directory-devhowto-adal-error-handling.md @@ -1,5 +1,5 @@ --- -title: ADAL client app error handling best practices | Azure +title: ADAL client app error handling best practices description: Provides error handling guidance and best practices for ADAL client applications. services: active-directory author: rwike77 diff --git a/articles/active-directory/azuread-dev/app-types.md b/articles/active-directory/azuread-dev/app-types.md index d03b33bcba088..5e5bab8f2c754 100644 --- a/articles/active-directory/azuread-dev/app-types.md +++ b/articles/active-directory/azuread-dev/app-types.md @@ -1,5 +1,5 @@ --- -title: Application types in v1.0 | Azure +title: Application types in v1.0 description: Describes the types of apps and scenarios supported by the Azure Active Directory v2.0 endpoint. services: active-directory author: rwike77 diff --git a/articles/active-directory/azuread-dev/azure-ad-endpoint-comparison.md b/articles/active-directory/azuread-dev/azure-ad-endpoint-comparison.md index bc60652609202..8566a38d78d65 100644 --- a/articles/active-directory/azuread-dev/azure-ad-endpoint-comparison.md +++ b/articles/active-directory/azuread-dev/azure-ad-endpoint-comparison.md @@ -1,5 +1,5 @@ --- -title: Why update to Microsoft identity platform (v2.0) | Azure +title: Why update to Microsoft identity platform (v2.0) description: Know the differences between the Microsoft identity platform (v2.0) endpoint and the Azure Active Directory (Azure AD) v1.0 endpoint, and learn the benefits of updating to v2.0. services: active-directory author: rwike77 diff --git a/articles/active-directory/azuread-dev/v1-authentication-scenarios.md b/articles/active-directory/azuread-dev/v1-authentication-scenarios.md index 60b3207dfb0bb..fc276ee116801 100644 --- a/articles/active-directory/azuread-dev/v1-authentication-scenarios.md +++ b/articles/active-directory/azuread-dev/v1-authentication-scenarios.md @@ -1,5 +1,5 @@ --- -title: Azure AD for developers (v1.0) | Azure +title: Azure AD for developers (v1.0) description: Learn authentication basics for Azure AD for developers (v1.0) such as the app model, API, provisioning, and the most common authentication scenarios. services: active-directory documentationcenter: dev-center-name diff --git a/articles/active-directory/azuread-dev/videos.md b/articles/active-directory/azuread-dev/videos.md index 8e708961e9d52..2394192756a00 100644 --- a/articles/active-directory/azuread-dev/videos.md +++ b/articles/active-directory/azuread-dev/videos.md @@ -1,11 +1,11 @@ --- -title: Azure ADAL to MSAL migration videos | Azure +title: Azure ADAL to MSAL migration videos description: Videos that help you migrate from the Azure Active Directory developer platform to the Microsoft identity platform services: active-directory author: mmacy manager: CelesteDG ms.service: active-directory -ms.subservice: develop +ms.subservice: azuread-dev ms.topic: conceptual ms.workload: identity ms.date: 02/12/2020 diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml b/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml index 7dd76b378929f..d0678307f7aad 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml @@ -1,161 +1,161 @@ - - name: CloudKnox Permissions Management + - name: Permissions Management href: index.yml - name: Overview expanded: true items: - - name: What's CloudKnox Permissions Management? - href: cloudknox-overview.md + - name: What's Permissions Management? + href: overview.md - name: How-to guides expanded: true items: - - name: Onboard CloudKnox on the Azure AD tenant + - name: Onboard Permissions Management on the Azure AD tenant expanded: true items: - - name: Enable CloudKnox in your organization - href: cloudknox-onboard-enable-tenant.md + - name: Enable Permissions Management in your organization + href: onboard-enable-tenant.md - name: Onboard an AWS account - href: cloudknox-onboard-aws.md + href: onboard-aws.md - name: Onboard an Azure subscription - href: cloudknox-onboard-azure.MD + href: onboard-azure.MD - name: Onboard a GCP project - href: cloudknox-onboard-gcp.md + href: onboard-gcp.md - name: Enable or disable the controller after onboarding is complete - href: cloudknox-onboard-enable-controller-after-onboarding.md + href: onboard-enable-controller-after-onboarding.md - name: Add an account/ subscription/ project after onboarding is complete - href: cloudknox-onboard-add-account-after-onboarding.md + href: onboard-add-account-after-onboarding.md - name: View risk metrics in your authorization system expanded: false items: - name: View key statistics and data about your authorization system - href: cloudknox-ui-dashboard.md + href: ui-dashboard.md - name: View data about the activity in your authorization system - href: cloudknox-product-dashboard.md + href: product-dashboard.md - name: Configure settings for data collection expanded: false items: - name: View and configure settings for data collection - href: cloudknox-product-data-sources.md + href: product-data-sources.md - name: Display an inventory of created resources and licenses - href: cloudknox-product-data-inventory.md + href: product-data-inventory.md - name: Manage organizational and personal information expanded: false items: - name: View personal and organization information - href: cloudknox-product-account-settings.md + href: product-account-settings.md - name: View information about identities, resources, and tasks expanded: false items: - name: View analytic information with the Analytics dashboard - href: cloudknox-usage-analytics-home.md + href: usage-analytics-home.md - name: View analytic information about users - href: cloudknox-usage-analytics-users.md + href: usage-analytics-users.md - name: View analytic information about groups - href: cloudknox-usage-analytics-groups.md + href: usage-analytics-groups.md - name: View analytic information about active resources - href: cloudknox-usage-analytics-active-resources.md + href: usage-analytics-active-resources.md - name: View analytic information about active tasks - href: cloudknox-usage-analytics-active-tasks.md + href: usage-analytics-active-tasks.md - name: View analytic information about access keys - href: cloudknox-usage-analytics-access-keys.md + href: usage-analytics-access-keys.md - name: View analytic information about serverless functions - href: cloudknox-usage-analytics-serverless-functions.md + href: usage-analytics-serverless-functions.md - name: Manage roles/policies and permission requests expanded: false items: - name: View roles/policies and requests for permission in the Remediation dashboard - href: cloudknox-ui-remediation.md + href: ui-remediation.md - name: View information about roles/policies - href: cloudknox-howto-view-role-policy.md + href: how-to-view-role-policy.md - name: View information about active and completed tasks - href: cloudknox-ui-tasks.md + href: ui-tasks.md - name: Create a role/policy - href: cloudknox-howto-create-role-policy.md + href: how-to-create-role-policy.md - name: Clone a role/policy - href: cloudknox-howto-clone-role-policy.md + href: how-to-clone-role-policy.md - name: Modify a role/policy - href: cloudknox-howto-modify-role-policy.md + href: how-to-modify-role-policy.md - name: Delete a role/policy - href: cloudknox-howto-delete-role-policy.md + href: how-to-delete-role-policy.md - name: Attach and detach policies for AWS identities - href: cloudknox-howto-attach-detach-permissions.md + href: how-to-attach-detach-permissions.md - name: Add and remove roles and tasks for Azure and GCP identities - href: cloudknox-howto-add-remove-role-task.md + href: how-to-add-remove-role-task.md - name: Revoke access to high-risk and unused tasks or assign read-only status - href: cloudknox-howto-revoke-task-readonly-status.md + href: how-to-revoke-task-readonly-status.md - name: Create or approve a request for permissions - href: cloudknox-howto-create-approve-privilege-request.md + href: how-to-create-approve-privilege-request.md - name: Manage users, roles, and their access levels expanded: false items: - name: Manage users and groups - href: cloudknox-ui-user-management.md + href: ui-user-management.md # - name: Define and manage users, roles, and access levels - # href: cloudknox-product-define-permission-levels.md + # href: product-define-permission-levels.md - name: Select group-based permissions settings - href: cloudknox-howto-create-group-based-permissions.md + href: how-to-create-group-based-permissions.md - name: Use queries to view information about user access expanded: false items: - name: Use queries to see how users access information - href: cloudknox-ui-audit-trail.md + href: ui-audit-trail.md - name: Create a custom query - href: cloudknox-howto-create-custom-queries.md + href: how-to-create-custom-queries.md - name: Generate an on-demand report from a query - href: cloudknox-howto-audit-trail-results.md + href: how-to-audit-trail-results.md - name: Filter and query user activity - href: cloudknox-product-audit-trail.md + href: product-audit-trail.md - name: Set activity alerts and triggers expanded: false items: - name: View information about activity triggers - href: cloudknox-ui-triggers.md + href: ui-triggers.md - name: Create and view activity alerts and alert triggers - href: cloudknox-howto-create-alert-trigger.md + href: how-to-create-alert-trigger.md - name: Create and view rule-based anomalies and anomaly triggers - href: cloudknox-product-rule-based-anomalies.md + href: product-rule-based-anomalies.md - name: Create and view statistical anomalies and anomaly triggers - href: cloudknox-product-statistical-anomalies.md + href: product-statistical-anomalies.md - name: Create and view permission analytics triggers - href: cloudknox-product-permission-analytics.md + href: product-permission-analytics.md - name: Manage rules for authorization systems expanded: false items: - name: View rules in the Autopilot dashboard - href: cloudknox-ui-autopilot.md + href: ui-autopilot.md - name: Create a rule - href: cloudknox-howto-create-rule.md + href: how-to-create-rule.md - name: Generate, view, and apply rule recommendations - href: cloudknox-howto-recommendations-rule.md + href: how-to-recommendations-rule.md - name: View notification settings for a rule - href: cloudknox-howto-notifications-rule.md + href: how-to-notifications-rule.md - name: Create and view reports expanded: false items: - name: View system reports in the Reports dashboard - href: cloudknox-product-reports.md + href: product-reports.md - name: View a list and description of system reports - href: cloudknox-all-reports.md + href: all-reports.md - name: Generate and view a system report - href: cloudknox-report-view-system-report.md + href: report-view-system-report.md - name: Create, view, and share a custom report - href: cloudknox-report-create-custom-report.md + href: report-create-custom-report.md - name: Generate and download the Permissions analytics report - href: cloudknox-product-permissions-analytics-reports.md + href: product-permissions-analytics-reports.md - name: Troubleshoot expanded: false items: - name: Troubleshoot issues - href: cloudknox-troubleshoot.md + href: troubleshoot.md - name: Training videos expanded: false items: - - name: Get started with CloudKnox training videos - href: cloudknox-training-videos.md + - name: Get started with Permissions Management training videos + href: training-videos.md - name: Reference expanded: false items: - name: FAQs - href: cloudknox-faqs.md + href: faqs.md - name: Glossary - href: cloudknox-multi-cloud-glossary.md + href: multi-cloud-glossary.md diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/all-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/all-reports.md new file mode 100644 index 0000000000000..ac4b7ff73a519 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/all-reports.md @@ -0,0 +1,61 @@ +--- +title: View a list and description of all system reports available in Permissions Management reports +description: View a list and description of all system reports available in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View a list and description of system reports + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some of the information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +Permissions Management has various types of system reports that capture specific sets of data. These reports allow management, auditors, and administrators to: + +- Make timely decisions. +- Analyze trends and system/user performance. +- Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. + +This article provides you with a list and description of the system reports available in Permissions Management. Depending on the report, you can download it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. + +## Download a system report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Systems Reports** subtab. +1. In the **Report Name** column, find the report you want, and then select the down arrow to the right of the report name to download the report. + + Or, from the ellipses **(...)** menu, select **Download**. + + The following message displays: **Successfully Started To Generate On Demand Report.** + + +## Summary of available system reports + +| Report name | Type of the report | File format | Description | Availability | Collated report? | +|----------------------------|-----------------------------------|--------------------------|---------------------------| ----------------------------|----------------------------------| +| Access Key Entitlements and Usage Report | Summary

Detailed | CSV | This report displays:

- Access key age, last rotation date, and last usage date availability in the summary report. Use this report to decide when to rotate access keys.

- Granted task and Permissions creep index (PCI) score. This report provides supporting information when you want to take the action on the keys. | AWS

Azure

GCP | Yes | +| All Permissions for Identity | Detailed | CSV | This report lists all the assigned permissions for the selected identities. | Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) | N/A | +| Group Entitlements and Usage | Summary | CSV | This report tracks all group level entitlements and the permission assignment, PCI. The number of members is also listed as part of this report. | AWS, Azure, or GCP | Yes | +| Identity Permissions | Summary | CSV | This report tracks any, or specific, task usage per **User**, **Group**, **Role**, or **App**. | AWS, Azure, or GCP | No | +| NIST 800-53 | Detailed

Summary

Dashboard | CSV

PDF | **Dashboard**: This report helps track the overall progress of the NIST 800-53 benchmark. It lists the percentage passing, overall pass or fail of test control along with the breakup of L1/L2 per Auth system.

**Summary**: For each authorized system, this report lists the test control pass or fail per authorized system and the number of resources evaluated for each test control.

**Detailed**: This report helps auditors and administrators to track the resource level pass or fail per test control. | AWS, Azure, or GCP | Yes | +| PCI DSS | Detailed

Summary

Dashboard | CSV | **Dashboard**: This report helps track the overall progress of the PCI-DSS benchmark. It lists the percentage passing, overall pass or fail of test control along with the breakup of L1/L2 per Auth system.

**Summary**: For each authorized system, this report lists the test control pass or fail per authorized system and the number of resources evaluated for each test control.

**Detailed**: This report helps auditors and administrators to track the resource level pass or fail per test control. | AWS, Azure, or GCP | Yes | +| PCI History | Summary | CSV | This report helps track **Monthly PCI History** for each authorized system. It can be used to plot the trend of the PCI. | AWS, Azure, or GCP | Yes | +| Permissions Analytics Report (PAR) | Summary | PDF | This report helps monitor the **Identity Privilege** related activity across the authorized systems. It captures any Identity permission change.

This report has the following main sections: **User Summary**, **Group Summary**, **Role Summary & Delete Task Summary**.

The **User Summary** lists the current granted permissions along with high-risk permissions and resources accessed in 1-day, 7-day, or 30-days durations. There are subsections for newly added or deleted users, users with PCI change, high-risk active/inactive users.

The **Group Summary** lists the administrator level groups with the current granted permissions along with high-risk permissions and resources accessed in 1-day, 7-day, or 30-day durations. There are subsections for newly added or deleted groups, groups with PCI change, High-risk active/inactive groups.

The **Role Summary** and the **Group Summary** list similar details.

The **Delete Task** summary section lists the number of times the **Delete Task** has been executed in the given period. | AWS, Azure, or GCP | No | +| Permissions Analytics Report (PAR) | Detailed | CSV | This report lists the different key findings in the selected authorized systems. The key findings include **Super identities**, **Inactive identities**, **Over-provisioned active identities**, **Storage bucket hygiene**, **Access key age (AWS)**, and so on.

This report helps administrators to visualize the findings across the organization and make decisions. | AWS, Azure, or GCP | Yes | +| Role/Policy Details | Summary | CSV | This report captures **Assigned/Unassigned** and **Custom/system policy with used/unused condition** for specific or all AWS accounts.

Similar data can be captured for Azure and GCP for assigned and unassigned roles. | AWS, Azure, or GCP | No | +| User Entitlements and Usage | Detailed

Summary | CSV | This report provides a summary and details of **User entitlements and usage**.

**Data displayed on Usage Analytics** screen is downloaded as part of the **Summary** report.

**Detailed permissions usage per User** is listed in the Detailed report. | AWS, Azure, or GCP | Yes | + + +## Next steps + +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). +- For information about how to create and view a custom report, see [Generate and view a custom report](report-create-custom-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-all-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-all-reports.md deleted file mode 100644 index 716f9029be38d..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-all-reports.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: View a list and description of all system reports available in CloudKnox Permissions Management reports -description: View a list and description of all system reports available in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View a list and description of system reports - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -CloudKnox Permissions Management (CloudKnox) has various types of system reports that capture specific sets of data. These reports allow management, auditors, and administrators to: - -- Make timely decisions. -- Analyze trends and system/user performance. -- Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. - -This article provides you with a list and description of the system reports available in CloudKnox. Depending on the report, you can download it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. - -## Download a system report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Systems Reports** subtab. -1. In the **Report Name** column, find the report you want, and then select the down arrow to the right of the report name to download the report. - - Or, from the ellipses **(...)** menu, select **Download**. - - The following message displays: **Successfully Started To Generate On Demand Report.** - - -## Summary of available system reports - -| Report name | Type of the report | File format | Description | Availability | Collated report? | -|----------------------------|-----------------------------------|--------------------------|---------------------------| ----------------------------|----------------------------------| -| Access Key Entitlements and Usage Report | Summary

Detailed | CSV | This report displays:

- Access key age, last rotation date, and last usage date availability in the summary report. Use this report to decide when to rotate access keys.

- Granted task and Permissions creep index (PCI) score. This report provides supporting information when you want to take the action on the keys. | AWS

Azure

GCP | Yes | -| All Permissions for Identity | Detailed | CSV | This report lists all the assigned permissions for the selected identities. | Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) | N/A | -| Group Entitlements and Usage | Summary | CSV | This report tracks all group level entitlements and the permission assignment, PCI. The number of members is also listed as part of this report. | AWS, Azure, or GCP | Yes | -| Identity Permissions | Summary | CSV | This report tracks any, or specific, task usage per **User**, **Group**, **Role**, or **App**. | AWS, Azure, or GCP | No | -| NIST 800-53 | Detailed

Summary

Dashboard | CSV

PDF | **Dashboard**: This report helps track the overall progress of the NIST 800-53 benchmark. It lists the percentage passing, overall pass or fail of test control along with the breakup of L1/L2 per Auth system.

**Summary**: For each authorized system, this report lists the test control pass or fail per authorized system and the number of resources evaluated for each test control.

**Detailed**: This report helps auditors and administrators to track the resource level pass or fail per test control. | AWS, Azure, or GCP | Yes | -| PCI DSS | Detailed

Summary

Dashboard | CSV | **Dashboard**: This report helps track the overall progress of the PCI-DSS benchmark. It lists the percentage passing, overall pass or fail of test control along with the breakup of L1/L2 per Auth system.

**Summary**: For each authorized system, this report lists the test control pass or fail per authorized system and the number of resources evaluated for each test control.

**Detailed**: This report helps auditors and administrators to track the resource level pass or fail per test control. | AWS, Azure, or GCP | Yes | -| PCI History | Summary | CSV | This report helps track **Monthly PCI History** for each authorized system. It can be used to plot the trend of the PCI. | AWS, Azure, or GCP | Yes | -| Permissions Analytics Report (PAR) | Summary | PDF | This report helps monitor the **Identity Privilege** related activity across the authorized systems. It captures any Identity permission change.

This report has the following main sections: **User Summary**, **Group Summary**, **Role Summary & Delete Task Summary**.

The **User Summary** lists the current granted permissions along with high-risk permissions and resources accessed in 1-day, 7-day, or 30-days durations. There are subsections for newly added or deleted users, users with PCI change, high-risk active/inactive users.

The **Group Summary** lists the administrator level groups with the current granted permissions along with high-risk permissions and resources accessed in 1-day, 7-day, or 30-day durations. There are subsections for newly added or deleted groups, groups with PCI change, High-risk active/inactive groups.

The **Role Summary** and the **Group Summary** list similar details.

The **Delete Task** summary section lists the number of times the **Delete Task** has been executed in the given period. | AWS, Azure, or GCP | No | -| Permissions Analytics Report (PAR) | Detailed | CSV | This report lists the different key findings in the selected authorized systems. The key findings include **Super identities**, **Inactive identities**, **Over-provisioned active identities**, **Storage bucket hygiene**, **Access key age (AWS)**, and so on.

This report helps administrators to visualize the findings across the organization and make decisions. | AWS, Azure, or GCP | Yes | -| Role/Policy Details | Summary | CSV | This report captures **Assigned/Unassigned** and **Custom/system policy with used/unused condition** for specific or all AWS accounts.

Similar data can be captured for Azure and GCP for assigned and unassigned roles. | AWS, Azure, or GCP | No | -| User Entitlements and Usage | Detailed

Summary | CSV | This report provides a summary and details of **User entitlements and usage**.

**Data displayed on Usage Analytics** screen is downloaded as part of the **Summary** report.

**Detailed permissions usage per User** is listed in the Detailed report. | AWS, Azure, or GCP | Yes | - - -## Next steps - -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). -- For information about how to create and view a custom report, see [Generate and view a custom report](cloudknox-report-create-custom-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-faqs.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-faqs.md deleted file mode 100644 index b06e14cd767e1..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-faqs.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Frequently asked questions (FAQs) about CloudKnox Permissions Management -description: Frequently asked questions (FAQs) about CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: faq -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# Frequently asked questions (FAQs) - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - - -This article answers frequently asked questions (FAQs) about CloudKnox Permissions Management (CloudKnox). - -## What's CloudKnox Permissions Management? - -CloudKnox is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). CloudKnox detects, automatically right-sizes, and continuously monitors unused and excessive permissions. It deepens the Zero Trust security strategy by augmenting the least privilege access principle. - - -## What are the prerequisites to use CloudKnox? - -CloudKnox supports data collection from AWS, GCP, and/or Microsoft Azure. For data collection and analysis, customers are required to have an Azure Active Directory (Azure AD) account to use CloudKnox. - -## Can a customer use CloudKnox if they have other identities with access to their IaaS platform that aren’t yet in Azure AD (for example, if part of their business has Okta or AWS Identity & Access Management (IAM))? - -Yes, a customer can detect, mitigate, and monitor the risk of ‘backdoor’ accounts that are local to AWS IAM, GCP, or from other identity providers such as Okta or AWS IAM. - -## Where can customers access CloudKnox? - -Customers can access the CloudKnox interface with a link from the Azure AD extension in the Azure portal. - -## Can non-cloud customers use CloudKnox on-premises? - -No, CloudKnox is a hosted cloud offering. - -## Can non-Azure customers use CloudKnox? - -Yes, non-Azure customers can use our solution. CloudKnox is a multi-cloud solution so even customers who have no subscription to Azure can benefit from it. - -## Is CloudKnox available for tenants hosted in the European Union (EU)? - -No, the CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - -## If I’m already using Azure AD Privileged Identity Management (PIM) for Azure, what value does CloudKnox provide? - -CloudKnox complements Azure AD PIM. Azure AD PIM provides just-in-time access for admin roles in Azure (as well as Microsoft Online Services and apps that use groups), while CloudKnox allows multi-cloud discovery, remediation, and monitoring of privileged access across Azure, AWS, and GCP. - -## What languages does CloudKnox support? - -CloudKnox currently supports English. - -## What public cloud infrastructures are supported by CloudKnox? - -CloudKnox currently supports the three major public clouds: Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. - -## Does CloudKnox support hybrid environments? - -CloudKnox currently doesn’t support hybrid environments. - -## What types of identities are supported by CloudKnox? - -CloudKnox supports user identities (for example, employees, customers, external partners) and workload identities (for example, virtual machines, containers, web apps, serverless functions). - - - -## Is CloudKnox available in Government Cloud? - -No, CloudKnox is currently not available in Government clouds. - -## Is CloudKnox available for sovereign clouds? - -No, CloudKnox is currently not available in sovereign Clouds. - -## How does CloudKnox collect insights about permissions usage? - -CloudKnox has a data collector that collects access permissions assigned to various identities, activity logs, and resources metadata. This gathers full visibility into permissions granted to all identities to access the resources and details on usage of granted permissions. - -## How does CloudKnox evaluate cloud permissions risk? - -CloudKnox offers granular visibility into all identities and their permissions granted versus used, across cloud infrastructures to uncover any action performed by any identity on any resource. This isn't limited to just user identities, but also workload identities such as virtual machines, access keys, containers, and scripts. The dashboard gives an overview of permission profile to locate the riskiest identities and resources. - -## What is the Permissions Creep Index? - -The Permissions Creep Index (PCI) is a quantitative measure of risk associated with an identity or role determined by comparing permissions granted versus permissions exercised. It allows users to instantly evaluate the level of risk associated with the number of unused or over-provisioned permissions across identities and resources. It measures how much damage identities can cause based on the permissions they have. - -## How can customers use CloudKnox to delete unused or excessive permissions? - -CloudKnox allows users to right-size excessive permissions and automate least privilege policy enforcement with just a few clicks. The solution continuously analyzes historical permission usage data for each identity and gives customers the ability to right-size permissions of that identity to only the permissions that are being used for day-to-day operations. All unused and other risky permissions can be automatically removed. - -## How can customers grant permissions on-demand with CloudKnox? - -For any break-glass or one-off scenarios where an identity needs to perform a specific set of actions on a set of specific resources, the identity can request those permissions on-demand for a limited period with a self-service workflow. Customers can either use the built-in workflow engine or their IT service management (ITSM) tool. The user experience is the same for any identity type, identity source (local, enterprise directory, or federated) and cloud. - -## What is the difference between permissions on-demand and just-in-time access? - -Just-in-time (JIT) access is a method used to enforce the principle of least privilege to ensure identities are given the minimum level of permissions to perform the task at hand. Permissions on-demand are a type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. - -## How can customers monitor permissions usage with CloudKnox? - -Customers only need to track the evolution of their Permission Creep Index to monitor permissions usage. They can do this in the “Analytics” tab in their CloudKnox dashboard where they can see how the PCI of each identity or resource is evolving over time. - -## Can customers generate permissions usage reports? - -Yes, CloudKnox has various types of system report available that capture specific data sets. These reports allow customers to: -- Make timely decisions. -- Analyze usage trends and system/user performance. -- Identify high-risk areas. - -For information about permissions usage reports, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). - -## Does CloudKnox integrate with third-party ITSM (Information Technology Security Management) tools? - -CloudKnox integrates with ServiceNow. - - -## How is CloudKnox being deployed? - -Customers with Global Admin role have first to onboard CloudKnox on their Azure AD tenant, and then onboard their AWS accounts, GCP projects, and Azure subscriptions. More details about onboarding can be found in our product documentation. - -## How long does it take to deploy CloudKnox? - -It depends on each customer and how many AWS accounts, GCP projects, and Azure subscriptions they have. - -## Once CloudKnox is deployed, how fast can I get permissions insights? - -Once fully onboarded with data collection set up, customers can access permissions usage insights within hours. Our machine-learning engine refreshes the Permission Creep Index every hour so that customers can start their risk assessment right away. - -## Is CloudKnox collecting and storing sensitive personal data? - -No, CloudKnox doesn’t have access to sensitive personal data. - -## Where can I find more information about CloudKnox? - -You can read our blog and visit our web page. You can also get in touch with your Microsoft point of contact to schedule a demo. - -## Resources - -- [Public Preview announcement blog](https://www.aka.ms/CloudKnox-Public-Preview-Blog) -- [CloudKnox Permissions Management web page](https://microsoft.com/security/business/identity-access-management/permissions-management) - - - -## Next steps - -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md). -- For information on how to onboard CloudKnox in your organization, see [Enable CloudKnox in your organization](cloudknox-onboard-enable-tenant.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-add-remove-role-task.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-add-remove-role-task.md deleted file mode 100644 index c9f6dd44a3096..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-add-remove-role-task.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Add and remove roles and tasks for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management -description: How to attach and detach permissions for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities - - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities using the **Remediation** dashboard. - -> [!NOTE] -> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -## View permissions - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP**. -1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. -1. Select **Apply**. - CloudKnox displays a list of groups, users, and service accounts that match your criteria. -1. In **Enter a username**, enter or select a user. -1. In **Enter a Group Name**, enter or select a group, then select **Apply**. -1. Make a selection from the results list. - - The table displays the **Username** **Domain/Account**, **Source**, **Resource** and **Current Role**. - - -## Add a role - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To attach a role, select **Add role**. -1. In the **Add Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. -1. When you have finished adding roles, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Remove a role - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To remove a role, select **Remove Role**. -1. In the **Remove Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. -1. When you have finished selecting roles, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Add a task - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To attach a role, select **Add Tasks**. -1. In the **Add Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. -1. When you have finished adding tasks, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Remove a task - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To remove a task, select **Remove Tasks**. -1. In the **Remove Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. -1. When you have finished selecting tasks, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Next steps - - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-attach-detach-permissions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-attach-detach-permissions.md deleted file mode 100644 index 6054e4c1c99c6..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-attach-detach-permissions.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in CloudKnox Permissions Management -description: How to attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Attach and detach policies for Amazon Web Services (AWS) identities - - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities using the **Remediation** dashboard. - -> [!NOTE] -> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -## View permissions - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **AWS**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **Role**. -1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. -1. Select **Apply**. - CloudKnox displays a list of users, roles, or groups that match your criteria. -1. In **Enter a username**, enter or select a user. -1. In **Enter a group name**, enter or select a group, then select **Apply**. -1. Make a selection from the results list. - - The table displays the related **Username** **Domain/Account**, **Source** and **Policy Name**. - - -## Attach policies - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **AWS**. -1. In **Enter a username**, enter or select a user. -1. In **Enter a Group Name**, enter or select a group, then select **Apply**. -1. Make a selection from the results list. -1. To attach a policy, select **Attach Policies**. -1. In the **Attach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. -1. When you have finished adding policies, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Detach policies - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **AWS**. -1. In **Enter a username**, enter or select a user. -1. In **Enter a Group Name**, enter or select a group, then select **Apply**. -1. Make a selection from the results list. -1. To remove a policy, select **Detach Policies**. -1. In the **Detach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. -1. When you have finished selecting policies, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Next steps - - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-audit-trail-results.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-audit-trail-results.md deleted file mode 100644 index 8b383ad66a584..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-audit-trail-results.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Generate an on-demand report from a query in the Audit dashboard in CloudKnox Permissions Management -description: How to generate an on-demand report from a query in the **Audit** dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Generate an on-demand report from a query - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can generate an on-demand report from a query in the **Audit** dashboard in CloudKnox Permissions Management (CloudKnox). You can: - -- Run a report on-demand. -- Schedule and run a report as often as you want. -- Share a report with other members of your team and management. - -## Generate a custom report on-demand - -1. In the CloudKnox home page, select the **Audit** tab. - - CloudKnox displays the query options available to you. -1. In the **Audit** dashboard, select **Search** to run the query. -1. Select **Export**. - - CloudKnox generates the report and exports it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. - - - - -## Next steps - -- For information on how to view how users access information, see [Use queries to see how users access information](cloudknox-ui-audit-trail.md). -- For information on how to filter and view user activity, see [Filter and query user activity](cloudknox-product-audit-trail.md). -- For information on how to create a query,see [Create a custom query](cloudknox-howto-create-custom-queries.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-clone-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-clone-role-policy.md deleted file mode 100644 index b922cd5fc9043..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-clone-role-policy.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Clone a role/policy in the Remediation dashboard in CloudKnox Permissions Management -description: How to clone a role/policy in the Just Enough Permissions (JEP) Controller. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Clone a role/policy in the Remediation dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to clone roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. - -> [!NOTE] -> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -> [!NOTE] -> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. - -## Clone a role/policy - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. -1. Select the role/policy you want to clone, and from the **Actions** column, select **Clone**. -1. **(AWS Only)** In the **Clone** box, the **Clone Resources** and **Clone Conditions** checkboxes are automatically selected. - Deselect the boxes if the resources and conditions are different from what is displayed. -1. Enter a name for each authorization system that was selected in the **Policy Name** boxes, and then select **Next**. - -1. If the data collector hasn't been given controller privileges, the following message displays: **Only online/controller-enabled authorization systems can be submitted for cloning.** - - To clone this role manually, download the script and JSON file. - -1. Select **Submit**. -1. Refresh the **Role/Policies** tab to see the role/policy you cloned. - -## Next steps - - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-alert-trigger.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-alert-trigger.md deleted file mode 100644 index fb94891542776..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-alert-trigger.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Create and view activity alerts and alert triggers in CloudKnox Permissions Management -description: How to create and view activity alerts and alert triggers in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create and view activity alerts and alert triggers - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can create and view activity alerts and alert triggers in CloudKnox Permissions Management (CloudKnox). - -## Create an activity alert trigger - -1. In the CloudKnox home page, select **Activity Triggers** (the bell icon). -1. In the **Activity** tab, select **Create Activity Trigger**. -1. In the **Alert Name** box, enter a name for your alert. -1. In **Authorization System Type**, select your authorization system: Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. In **Authorization System**, select **Is** or **In**, and then select one or more accounts and folders. -1. From the **Select a Type** dropdown, select: **Access Key ID**, **Identity Tag Key**, **Identity Tag Key Value**, **Resource Name**, **Resource Tag Key**, **Resource Tag Key Value**, **Role Name**, **Role Session Name**, **State**, **Task Name**, or **Username**. -1. From the **Operator** dropdown, select an option: - - - **Is**/**Is Not**: Select in the value field to view a list of all available values. You can either select or enter the required value. - - **Contains**/**Not Contains**: Enter any text that the query parameter should or shouldn't contain, for example *CloudKnox*. - - **In**/**Not In**: Select in the value field to view list of all available values. Select the required multiple values. - -1. To add another parameter, select the plus sign **(+)**, then select an operator, and then enter a value. - - To remove a parameter, select the minus sign **(-)**. -1. To add another activity type, select **Add**, and then enter your parameters. -1. To save your alert, select **Save**. - - A message displays to confirm your activity trigger has been created. - - The **Triggers** table in the **Alert Triggers** subtab displays your alert trigger. - -## View an activity alert - -1. In the CloudKnox home page, select **Activity Triggers** (the bell icon). -1. In the **Activity** tab, select the **Alerts** subtab. -1. From the **Alert Name** dropdown, select an alert. -1. From the **Date** dropdown, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**. - - If you select **Custom Range**, select date and time settings, and then select **Apply**. -1. To view the alert, select **Apply** - - The **Alerts** table displays information about your alert. - - - -## View activity alert triggers - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. In the **Activity** tab, select the **Alert Triggers** subtab. -1. From the **Status** dropdown, select **All**, **Activated** or **Deactivated**, then select **Apply**. - - The **Triggers** table displays the following information: - - - **Alerts**: The name of the alert trigger. - - **# of users subscribed**: The number of users who have subscribed to a specific alert trigger. - - - Select a number in this column to view information about the user. - - - **Created By**: The email address of the user who created the alert trigger. - - **Modified By**: The email address of the user who last modified the alert trigger. - - **Last Updated**: The date and time the alert trigger was last updated. - - **Subscription**: A switch that displays if the alert is **On** or **Off**. - - - If the column displays **Off**, the current user isn't subscribed to that alert. Switch the toggle to **On** to subscribe to the alert. - - The user who creates an alert trigger is automatically subscribed to the alert, and will receive emails about the alert. - -1. To see only activated or only deactivated triggers, from the **Status** dropdown, select **Activated** or **Deactivated**, and then select **Apply**. - -1. To view other options available to you, select the ellipses (**...**), and then select from the available options. - - If the **Subscription** is **On**, the following options are available: - - - **Edit**: Enables you to modify alert parameters - - > [!NOTE] - > Only the user who created the alert can perform the following actions: edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. - - - **Duplicate**: Create a duplicate of the alert called "**Copy of XXX**". - - **Rename**: Enter the new name of the query, and then select **Save.** - - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. - - **Activate**: Activate the alert trigger and start sending emails to subscribed users. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. - - **Delete**: Delete the alert. - - If the **Subscription** is **Off**, the following options are available: - - **View**: View details of the alert trigger. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. - - **Duplicate**: Create a duplicate copy of the selected alert trigger. - - - - -## Next steps - -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-approve-privilege-request.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-approve-privilege-request.md deleted file mode 100644 index 9cbe190dbef37..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-approve-privilege-request.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Create or approve a request for permissions in the Remediation dashboard in CloudKnox Permissions Management -description: How to create or approve a request for permissions in the Remediation dashboard. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create or approve a request for permissions - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to create or approve a request for permissions in the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox). You can create and approve requests for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. - -The **Remediation** dashboard has two privilege-on-demand (POD) workflows you can use: -- **New Request**: The workflow used by a user to create a request for permissions for a specified duration. -- **Approver**: The workflow used by an approver to review and approve or reject a user’s request for permissions. - - -> [!NOTE] -> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -## Create a request for permissions - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **My Requests** subtab. - - The **My Requests** subtab displays the following options: - - **Pending**: A list of requests you’ve made but haven't yet been reviewed. - - **Approved**: A list of requests that have been reviewed and approved by the approver. These requests have either already been activated or are in the process of being activated. - - **Processed**: A summary of the requests you’ve created that have been approved (**Done**), **Rejected**, and requests that have been **Canceled**. - -1. To create a request for permissions, select **New Request**. -1. In the **Roles/Tasks** page: - 1. From the **Authorization System Type** dropdown, select the authorization system type you want to access: **AWS**, **Azure** or **GCP**. - 1. From the **Authorization System** dropdown, select the accounts you want to access. - 1. From the **Identity** dropdown, select the identity on whose behalf you’re requesting access. - - - If the identity you select is a Security Assertions Markup Language (SAML) user, and since a SAML user accesses the system through assumption of a role, select the user’s role in **Role**. - - - If the identity you select is a local user, to select the policies you want: - 1. Select **Request Policy(s)**. - 1. In **Available Policies**, select the policies you want. - 1. To select a specific policy, select the plus sign, and then find and select the policy you want. - - The policies you’ve selected appear in the **Selected policies** box. - - - If the identity you select is a local user, to select the tasks you want: - 1. Select **Request Task(s)**. - 1. In **Available Tasks**, select the tasks you want. - 1. To select a specific task, select the plus sign, and then select the task you want. - - The tasks you’ve selected appear in the **Selected Tasks** box. - - If the user already has existing policies, they're displayed in **Existing Policies**. -1. Select **Next**. - -1. If you selected **AWS**, the **Scope** page appears. - - 1. In **Select Scope**, select: - - **All Resources** - - **Specific Resources**, and then select the resources you want. - - **No Resources** - 1. In **Request Conditions**: - 1. Select **JSON** to add a JSON block of code. - 1. Select **Done** to accept the code you’ve entered, or **Clear** to delete what you’ve entered and start again. - 1. In **Effect**, select **Allow** or **Deny.** - 1. Select **Next**. - -1. The **Confirmation** page appears. -1. In **Request Summary**, enter a summary for your request. -1. Optional: In **Note**, enter a note for the approver. -1. In **Schedule**, select when (how quickly) you want your request to be processed: - - **ASAP** - - **Once** - - In **Create Schedule**, select the **Frequency**, **Date**, **Time**, and **For** the required duration, then select **Schedule**. - - **Daily** - - **Weekly** - - **Monthly** -1. Select **Submit**. - - The following message appears: **Your Request Has Been Successfully Submitted.** - - The request you submitted is now listed in **Pending Requests**. - -## Approve or reject a request for permissions - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **My requests** subtab. -1. To view a list of requests that haven't yet been reviewed, select **Pending Requests**. -1. In the **Request Summary** list, select the ellipses **(…)** menu on the right of a request, and then select: - - - **Details** to view the details of the request. - - **Approve** to approve the request. - - **Reject** to reject the request. - -1. (Optional) add a note to the requestor, and then select **Confirm.** - - The **Approved** subtab displays a list of requests that have been reviewed and approved by the approver. These requests have either already been activated or are in the process of being activated. - The **Processed** subtab displays a summary of the requests that have been approved or rejected, and requests that have been canceled. - - -## Next steps - - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Add and remove roles and tasks for Azure and GCP identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-custom-queries.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-custom-queries.md deleted file mode 100644 index 181f0988bfc96..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-custom-queries.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Create a custom query in CloudKnox Permissions Management -description: How to create a custom query in the Audit dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create a custom query - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can use the **Audit** dashboard in CloudKnox Permissions Management (CloudKnox) to create custom queries that you can modify, save, and run as often as you want. - -## Open the Audit dashboard - -- In the CloudKnox home page, select the **Audit** tab. - - CloudKnox displays the query options available to you. - -## Create a custom query - -1. In the **Audit** dashboard, in the **New Query** subtab, select **Authorization System Type**, and then select the authorization systems you want to search: Amazon Web Services (**AWS**), Microsoft **Azure**, Google Cloud Platform (**GCP**), or Platform (**Platform**). -1. Select the authorization systems you want to search from the **List** and **Folders** box, and then select **Apply**. - -1. In the **New Query** box, enter your query parameters, and then select **Add**. - For example, to query by a date, select **Date** in the first box. In the second and third boxes, select the down arrow, and then select one of the date-related options. - -1. To add parameters, select **Add**, select the down arrow in the first box to display a dropdown of available selections. Then select the parameter you want. -1. To add more parameters to the same query, select **Add** (the plus sign), and from the first box, select **And** or **Or**. - - Repeat this step for the second and third box to complete entering the parameters. -1. To change your query as you're creating it, select **Edit** (the pencil icon), and then change the query parameters. -1. To change the parameter options, select the down arrow in each box to display a dropdown of available selections. Then select the option you want. -1. To discard your selections, select **Reset Query** for the parameter you want to change, and then make your selections again. -1. When you’re ready to run your query, select **Search**. -1. To save the query, select **Save**. - - CloudKnox saves the query and adds it to the **Saved Queries** list. - -## Save the query under a new name - -1. In the **Audit** dashboard, select the ellipses menu **(…)** on the far right and select **Save As**. -2. Enter a new name for the query, and then select **Save**. - - CloudKnox saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. - -## View a saved query - -1. In the **Audit** dashboard, select the down arrow next to **Saved Queries**. - - A list of saved queries appears. -2. Select the query you want to open. -3. To open the query with the authorization systems you saved with the query, select **Load with the saved authorization systems**. -4. To open the query with the authorization systems you have currently selected (which may be different from the ones you originally saved), select **Load with the currently selected authorization systems**. -5. Select **Load Queries**. - - CloudKnox displays details of the query in the **Activity** table. Select a query to see its details: - - - The **Identity Details**. - - The **Domain** name. - - The **Resource Name** and **Resource Type**. - - The **Task Name**. - - The **Date**. - - The **IP Address**. - - The **Authorization System**. - -## View a raw events summary - -1. In the **Audit** dashboard, select **View** (the eye icon) to open the **Raw Events Summary** box. - - The **Raw Events Summary** box displays **Username or Role Session Name**, the **Task name**, and the script for your query. -1. Select **Copy** to copy the script. -1. Select **X** to close the **Raw events summary** box. - - -## Run a saved query - -1. In the **Audit** dashboard, select the query you want to run. - - CloudKnox displays the results of the query in the **Activity** table. - -## Delete a query - -1. In the **Audit** dashboard, load the query you want to delete. -2. Select **Delete**. - - CloudKnox deletes the query. Deleted queries don't display in the **Saved Queries** list. - -## Rename a query - -1. In the **Audit** dashboard, load the query you want to rename. -2. Select the ellipses menu **(…)** on the far right, and select **Rename**. -3. Enter a new name for the query, and then select **Save**. - - CloudKnox saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. - -## Duplicate a query - -1. In the **Audit** dashboard, load the query you want to duplicate. -2. Select the ellipses menu **(…)** on the far right, and then select **Duplicate**. - - CloudKnox creates a copy of the query. Both the copy of the query and the original query display in the **Saved Queries** list. - - You can rename the original or copy of the query, change it, and save it without changing the other query. - - - -## Next steps - -- For information on how to view how users access information, see [Use queries to see how users access information](cloudknox-ui-audit-trail.md). -- For information on how to filter and view user activity, see [Filter and query user activity](cloudknox-product-audit-trail.md). -- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](cloudknox-howto-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-group-based-permissions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-group-based-permissions.md deleted file mode 100644 index 731a60ed97e41..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-group-based-permissions.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Select group-based permissions settings in CloudKnox Permissions Management with the User management dashboard -description: How to select group-based permissions settings in CloudKnox Permissions Management with the User management dashboard. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Select group-based permissions settings - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can create and manage group-based permissions in CloudKnox Permissions Management (CloudKnox) with the User management dashboard. - -[!NOTE] The CloudKnox Administrator for all authorization systems will be able to create the new group based permissions. - -## Select administrative permissions settings for a group - -1. To display the **User Management** dashboard, select **User** (your initials) in the upper right of the screen, and then select **User Management**. -1. Select the **Groups** tab, and then press the **Create Permission** button in the upper right of the table. -1. In the **Set Group Permission** box, begin typing the name of an **Azure Active Directory Security Group** in your tenant. - -1. Select the permission setting you want: -2. - - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. - - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. - - **Custom** allows you to set **View**, **Control**, and **Approve** permissions for the authorization system types that you select. -1. Select **Next** - -1. If you selected **Admin for all Authorization System Types** - - Select Identities for each Authorization System that you would like members of this group to Request on. - -1. If you selected **Admin for selected Authorization System Types** - - Select **Viewer**, **Controller**, or **Approver** for the **Authorization System Types** you want. - - Select **Next** and then select Identities for each Authorization System that you would like members of this group to Request on. - -1. If you select **Custom**, select the **Authorization System Types** you want. - - Select **Viewer**, **Controller**, or **Approver** for the **Authorization Systems** you want. - - Select **Next** and then select Identities for each Authorization System that you would like members of this group to Request on. - -1. Select **Save**, The following message appears: **New Group Has been Created Successfully.** -1. To see the group you created in the **Groups** table, refresh the page. - -## Next steps - -- For information about how to manage user information, see [Manage users and groups with the User management dashboard](cloudknox-ui-user-management.md). -- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](cloudknox-ui-tasks.md). -- For information about how to view personal and organization information, see [View personal and organization information](cloudknox-product-account-settings.md). - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-role-policy.md deleted file mode 100644 index 91218399c57d1..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-role-policy.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Create a role/policy in the Remediation dashboard in CloudKnox Permissions Management -description: How to create a role/policy in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create a role/policy in the Remediation dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to create roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. - -> [!NOTE] -> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -> [!NOTE] -> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. - -## Create a policy for AWS - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. -1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. -1. Select **Create Policy**. -1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. - - To change the settings, make a selection from the dropdown. -1. Under **How Would You Like To Create The Policy**, select the required option: - - - **Activity of User(s)**: Allows you to create a policy based on user activity. - - **Activity of Group(s)**: Allows you to create a policy based on the aggregated activity of all the users belonging to the group(s). - - **Activity of Resource(s)**: Allows you to create a policy based on the activity of a resource, for example, an EC2 instance. - - **Activity of Role**: Allows you to create a policy based on the aggregated activity of all the users that assumed the role. - - **Activity of Tag(s)**: Allows you to create a policy based on the aggregated activity of all the tags. - - **Activity of Lambda Function**: Allows you to create a new policy based on the Lambda function. - - **From Existing Policy**: Allows you to create a new policy based on an existing policy. - - **New Policy**: Allows you to create a new policy from scratch. -1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. -1. Depending on your preference, select or deselect **Include Access Advisor data.** -1. In **Settings**, from the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. - -1. On the **Tasks** page, from the **Available** column, select the plus sign **(+)** to move the task into the **Selected** column. - - To add a whole category, select a category. - - To add individual items from a category, select the down arrow on the left of the category name, and then select individual items. -1. In **Resources**, select **All Resources** or **Specific Resources**. - - If you select **Specific Resources**, a list of available resources appears. Find the resources you want to add, and then select **Add**. -1. In **Request Conditions**, select **JSON** . -1. In **Effect**, select **Allow** or **Deny**, and then select **Next**. -1. In **Policy name:**, enter a name for your policy. -1. To add another statement to your policy, select **Add Statement**, and then, from the list of **Statements**, select a statement. -1. Review your **Task**, **Resources**, **Request Conditions**, and **Effect** settings, and then select **Next**. - - -1. On the **Preview** page, review the script to confirm it's what you want. -1. If your controller isn't enabled, select **Download JSON** or **Download Script** to download the code and run it yourself. - - If your controller is enabled, skip this step. -1. Select **Split Policy**, and then select **Submit**. - - A message confirms that your policy has been submitted for creation - -1. The [**CloudKnox Tasks**](cloudknox-ui-tasks.md) pane appears on the right. - - The **Active** tab displays a list of the policies CloudKnox is currently processing. - - The **Completed** tab displays a list of the policies CloudKnox has completed. -1. Refresh the **Role/Policies** tab to see the policy you created. - - - -## Create a role for Azure - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. -1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. -1. Select **Create Role**. -1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. - - To change the settings, select the box and make a selection from the dropdown. -1. Under **How Would You Like To Create The Role?**, select the required option: - - - **Activity of User(s)**: Allows you to create a role based on user activity. - - **Activity of Group(s)**: Allows you to create a role based on the aggregated activity of all the users belonging to the group(s). - - **Activity of App(s)**: Allows you to create a role based on the aggregated activity of all apps. - - **From Existing Role**: Allows you to create a new role based on an existing role. - - **New Role**: Allows you to create a new role from scratch. - -1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. -1. Depending on your preference: - - Select or deselect **Ignore Non-Microsoft Read Actions**. - - Select or deselect **Include Read-Only Tasks**. -1. In **Settings**, from the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. - -1. On the **Tasks** page, in **Role name:**, enter a name for your role. -1. From the **Available** column, select the plus sign **(+)** to move the task into the **Selected** column. - - To add a whole category, select a category. - - To add individual items from a category, select the down arrow on the left of the category name, and then select individual items. -1. Select **Next**. - -1. On the **Preview** page, review: - - The list of selected **Actions** and **Not Actions**. - - The **JSON** or **Script** to confirm it's what you want. -1. If your controller isn't enabled, select **Download JSON** or **Download Script** to download the code and run it yourself. - - If your controller is enabled, skip this step. - -1. Select **Submit**. - - A message confirms that your role has been submitted for creation - -1. The [**CloudKnox Tasks**](cloudknox-ui-tasks.md) pane appears on the right. - - The **Active** tab displays a list of the policies CloudKnox is currently processing. - - The **Completed** tab displays a list of the policies CloudKnox has completed. -1. Refresh the **Role/Policies** tab to see the role you created. - -## Create a role for GCP - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. -1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. -1. Select **Create Role**. -1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. - - To change the settings, select the box and make a selection from the dropdown. -1. Under **How Would You Like To Create The Role?**, select the required option: - - - **Activity of User(s)**: Allows you to create a role based on user activity. - - **Activity of Group(s)**: Allows you to create a role based on the aggregated activity of all the users belonging to the group(s). - - **Activity of Service Account(s)**: Allows you to create a role based on the aggregated activity of all service accounts. - - **From Existing Role**: Allows you to create a new role based on an existing role. - - **New Role**: Allows you to create a new role from scratch. - -1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. -1. If you selected **Activity Of Service Account(s)** in the previous step, select or deselect **Collect activity across all GCP Authorization Systems.** -1. From the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. - - -1. On the **Tasks** page, in **Role name:**, enter a name for your role. -1. From the **Available** column, select the plus sign **(+)** to move the task into the **Selected** column. - - To add a whole category, select a category. - - To add individual items from a category, select the down arrow on the left of the category name, and then select individual items. -1. Select **Next**. - -1. On the **Preview** page, review: - - The list of selected **Actions**. - - The **YAML** or **Script** to confirm it's what you want. -1. If your controller isn't enabled, select **Download YAML** or **Download Script** to download the code and run it yourself. -1. Select **Submit**. - A message confirms that your role has been submitted for creation - -1. The [**CloudKnox Tasks**](cloudknox-ui-tasks.md) pane appears on the right. - - - The **Active** tab displays a list of the policies CloudKnox is currently processing. - - The **Completed** tab displays a list of the policies CloudKnox has completed. -1. Refresh the **Role/Policies** tab to see the role you created. - - -## Next steps - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to modify a role/policy, see [Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-rule.md deleted file mode 100644 index 38c00f0e645a2..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-rule.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Create a rule in the Autopilot dashboard in CloudKnox Permissions Management -description: How to create a rule in the Autopilot dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create a rule in the Autopilot dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to create a rule in the CloudKnox Permissions Management (CloudKnox) **Autopilot** dashboard. - -> [!NOTE] -> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don’t have these permissions, contact your system administrator. - -## Create a rule - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. -1. In the **Autopilot** dashboard, select **New Rule**. -1. In the **Rule Name** box, enter a name for your rule. -1. Select **AWS**, **Azure**, **GCP**, and then select **Next**. - -1. Select **Authorization Systems**, and then select **All** or the account names that you want. -1. From the **Folders** dropdown, select a folder, and then select **Apply**. - - To change your folder settings, select **Reset**. - - - The **Status** column displays if the authorization system is **Online** or **Offline**. - - The **Controller** column displays if the controller is **Enabled** or **Not Enabled**. - - -1. Select **Configure** , and then select the following parameters for your rule: - - - **Role Created On Is**: Select the duration in days. - - **Role Last Used On Is**: Select the duration in days when the role was last used. - - **Cross Account Role**: Select **True** or **False**. - -1. Select **Mode**, and then, if you want recommendations to be generated and applied manually, select **On-Demand**. -1. Select **Save** - - The following information displays in the **Autopilot Rules** table: - - - **Rule Name**: The name of the rule. - - **State**: The status of the rule: idle (not being use) or active (being used). - - **Rule Type**: The type of rule being applied. - - **Mode**: The status of the mode: on-demand or not. - - **Last Generated**: The date and time the rule was last generated. - - **Created By**: The email address of the user who created the rule. - - **Last Modified On**: The date and time the rule was last modified. - - **Subscription**: Provides an **On** or **Off** switch that allows you to receive email notifications when recommendations have been generated, applied, or unapplied. - - - - -## Next steps - -- For more information about viewing rules, see [View roles in the Autopilot dashboard](cloudknox-ui-autopilot.md). -- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](cloudknox-howto-recommendations-rule.md). -- For information about notification settings for rules, see [View notification settings for a rule](cloudknox-howto-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-delete-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-delete-role-policy.md deleted file mode 100644 index 5339d078bcdd7..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-delete-role-policy.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Delete a role/policy in the Remediation dashboard in CloudKnox Permissions Management -description: How to delete a role/policy in the Just Enough Permissions (JEP) Controller. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Delete a role/policy in the Remediation dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to delete roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. - -> [!NOTE] -> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -> [!NOTE] -> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. - -## Delete a role/policy - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. -1. Select the role/policy you want to delete, and from the **Actions** column, select **Delete**. - - You can only delete a role/policy if it isn't assigned to an identity. - - You can't delete system roles/policies. - -1. On the **Preview** page, review the role/policy information to make sure you want to delete it, and then select **Submit**. - -## Next steps - - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-modify-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-modify-role-policy.md deleted file mode 100644 index b04e1e695c7f5..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-modify-role-policy.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Modify a role/policy in the Remediation dashboard in CloudKnox Permissions Management -description: How to modify a role/policy in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Modify a role/policy in the Remediation dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to modify roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. - -> [!NOTE] -> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -> [!NOTE] -> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. - -## Modify a role/policy - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. -1. Select the role/policy you want to modify, and from the **Actions** column, select **Modify**. - - You can't modify **System** policies and roles. - -1. On the **Statements** page, make your changes to the **Tasks**, **Resources**, **Request conditions**, and **Effect** sections as required, and then select **Next**. - -1. Review the changes to the JSON or script on the **Preview** page, and then select **Submit**. - -## Next steps - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-notifications-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-notifications-rule.md deleted file mode 100644 index 54d9c277b0b4e..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-notifications-rule.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: View notification settings for a rule in the Autopilot dashboard in CloudKnox Permissions Management -description: How to view notification settings for a rule in the Autopilot dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View notification settings for a rule in the Autopilot dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to view notification settings for a rule in the CloudKnox Permissions Management (CloudKnox) **Autopilot** dashboard. - -> [!NOTE] -> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don’t have these permissions, contact your system administrator. - -## View notification settings for a rule - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. -1. In the **Autopilot** dashboard, select a rule. -1. In the far right of the row, select the ellipses **(...)** -1. To view notification settings for a rule, select **Notification Settings**. - - CloudKnox displays a list of subscribed users. These users are signed up to receive notifications for the selected rule. - -1. To close the **Notification Settings** box, select **Close**. - - -## Next steps - -- For more information about viewing rules, see [View roles in the Autopilot dashboard](cloudknox-ui-autopilot.md). -- For information about creating rules, see [Create a rule](cloudknox-howto-create-rule.md). -- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](cloudknox-howto-recommendations-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-recommendations-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-recommendations-rule.md deleted file mode 100644 index f73e725c3909c..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-recommendations-rule.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Generate, view, and apply rule recommendations in the Autopilot dashboard in CloudKnox Permissions Management -description: How to generate, view, and apply rule recommendations in the Autopilot dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Generate, view, and apply rule recommendations in the Autopilot dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to generate and view rule recommendations in the CloudKnox Permissions Management (CloudKnox) **Autopilot** dashboard. - -> [!NOTE] -> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don’t have these permissions, contact your system administrator. - -## Generate rule recommendations - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. -1. In the **Autopilot** dashboard, select a rule. -1. In the far right of the row, select the ellipses **(...)**. -1. To generate recommendations for each user and the authorization system, select **Generate Recommendations**. - - Only the user who created the selected rule can generate a recommendation. -1. View your recommendations in the **Recommendations** subtab. -1. Select **Close** to close the **Recommendations** subtab. - -## View rule recommendations - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. -1. In the **Autopilot** dashboard, select a rule. -1. In the far right of the row, select the ellipses **(...)** - -1. To view recommendations for each user and the authorization system, select **View Recommendations**. - - CloudKnox displays the recommendations for each user and authorization system in the **Recommendations** subtab. - -1. Select **Close** to close the **Recommendations** subtab. - -## Apply rule recommendations - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. -1. In the **Autopilot** dashboard, select a rule. -1. In the far right of the row, select the ellipses **(...)** - -1. To view recommendations for each user and the authorization system, select **View Recommendations**. - - CloudKnox displays the recommendations for each user and authorization system in the **Recommendations** subtab. - -1. To apply a recommendation, select the **Apply Recommendations** subtab, and then select a recommendation. -1. Select **Close** to close the **Recommendations** subtab. - -## Unapply rule recommendations - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. -1. In the **Autopilot** dashboard, select a rule. -1. In the far right of the row, select the ellipses **(...)** - -1. To view recommendations for each user and the authorization system, select **View Recommendations**. - - CloudKnox displays the recommendations for each user and authorization system in the **Recommendations** subtab. - -1. To remove a recommendation, select the **Unapply Recommendations** subtab, and then select a recommendation. -1. Select **Close** to close the **Recommendations** subtab. - - -## Next steps - -- For more information about viewing rules, see [View roles in the Autopilot dashboard](cloudknox-ui-autopilot.md). -- For information about creating rules, see [Create a rule](cloudknox-howto-create-rule.md). -- For information about notification settings for rules, see [View notification settings for a rule](cloudknox-howto-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-revoke-task-readonly-status.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-revoke-task-readonly-status.md deleted file mode 100644 index d2c5e51db065a..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-revoke-task-readonly-status.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management -description: How to revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities - - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities using the **Remediation** dashboard. - -> [!NOTE] -> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -## View an identity's permissions - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**. -1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. -1. Select **Apply**. - - CloudKnox displays a list of groups, users, and service accounts that match your criteria. -1. In **Enter a username**, enter or select a user. -1. In **Enter a Group Name**, enter or select a group, then select **Apply**. -1. Make a selection from the results list. - - The table displays the **Username** **Domain/Account**, **Source**, **Resource** and **Current Role**. - - -## Revoke an identity's access to unused tasks - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To revoke an identity's access to tasks they aren't using, select **Revoke Unused Tasks**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Revoke an identity's access to high-risk tasks - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To revoke an identity's access to high-risk tasks, select **Revoke High-Risk Tasks**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Revoke an identity's ability to delete tasks - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To revoke an identity's ability to delete tasks, select **Revoke Delete Tasks**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - -## Assign read-only status to an identity - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. -1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. -1. From the **Authorization System** dropdown, select the accounts you want to access. -1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. -1. Make a selection from the results list. - -1. To assign read-only status to an identity, select **Assign Read-Only Status**. -1. When the following message displays: **Are you sure you want to change permission?**, select: - - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - - **Execute** to change the permission. - - **Close** to cancel the action. - - -## Next steps - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to add and remove roles and tasks for Azure and GCP identities, see [Add and remove roles and tasks for Azure and GCP identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-view-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-view-role-policy.md deleted file mode 100644 index a6574d3ae8d2a..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-view-role-policy.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: View information about roles/ policies in the Remediation dashboard in CloudKnox Permissions Management -description: How to view and filter information about roles/ policies in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View information about roles/ policies in the Remediation dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) enables system administrators to view, adjust, and remediate excessive permissions based on a user's activity data. You can use the **Roles/Policies** subtab in the dashboard to view information about roles and policies in the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. - -> [!NOTE] -> To view the **Remediation dashboard** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -> [!NOTE] -> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. - - -## View information about roles/policies - -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. - - The **Role/Policies list** displays a list of existing roles/policies and the following information about each role/policy - - **Role/Policy Name**: The name of the roles/policies available to you. - - **Role/Policy Type**: **Custom**, **System**, or **CloudKnox Only** - - **Actions**: The type of action you can perform on the role/policy, **Clone**, **Modify**, or **Delete** - - -1. To display details about the role/policy and view its assigned tasks and identities, select the arrow to the left of the role/policy name. - - The **Tasks** list appears, displaying: - - A list of **Tasks**. - - **For AWS:** - - The **Users**, **Groups**, and **Roles** the task is **Directly Assigned To**. - - The **Group Members** and **Role Identities** the task is **Indirectly Accessible By**. - - - **For Azure:** - - The **Users**, **Groups**, **Enterprise Applications** and **Managed Identities** the task is **Directly Assigned To**. - - The **Group Members** the task is **Indirectly Accessible By**. - - - **For GCP:** - - The **Users**, **Groups**, and **Service Accounts** the task is **Directly Assigned To**. - - The **Group Members** the task is **Indirectly Accessible By**. - -1. To close the role/policy details, select the arrow to the left of the role/policy name. - -## Export information about roles/policies - -- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. - - When the file is successfully exported, a message appears: **Exported Successfully.** - - - Check your email for a message from the CloudKnox Customer Success Team. This email contains a link to: - - The **Role Policy Details** report in CSV format. - - The **Reports** dashboard where you can configure how and when you can automatically receive reports. - - - - -## Filter information about roles/policies - -1. On the CloudKnox home page, select the **Remediation** dashboard, and then select the **Role/Policies** tab. -1. To filter the roles/policies, select from the following options: - - - **Authorization System Type**: Select **AWS**, **Azure**, or **GCP**. - - **Authorization System**: Select the accounts you want. - - **Role/Policy Type**: Select from the following options: - - - **All**: All managed roles/policies. - - **Custom**: A customer-managed role/policy. - - **System**: A cloud service provider-managed role/policy. - - **CloudKnox Only**: A role/policy created by CloudKnox. - - - **Role/Policy Status**: Select **All**, **Assigned**, or **Unassigned**. - - **Role/Policy Usage**: Select **All** or **Unused**. -1. Select **Apply**. - - To discard your changes, select **Reset Filter**. - - -## Next steps - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- For information on how to attach and detach permissions AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-integration-api.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-integration-api.md deleted file mode 100644 index 2bcae5561976b..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-integration-api.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Set and view configuration settings in CloudKnox Permissions Management -description: How to view the CloudKnox Permissions Management API integration settings and create service accounts and roles. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Set and view configuration settings - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This topic describes how to view configuration settings, create and delete a service account, and create a role in CloudKnox Permissions Management (CloudKnox). - -## View configuration settings - -The **Integrations** dashboard displays the authorization systems available to you. - -1. To display the **Integrations** dashboard, select **User** (your initials) in the upper right of the screen, and then select **Integrations.** - - The **Integrations** dashboard displays a tile for each available authorization system. - -1. Select an authorization system tile to view the following integration information: - - 1. To find out more about the CloudKnox API, select **CloudKnox API**, and then select documentation. - - - 1. To view information about service accounts, select **Integration**: - - **Email**: Lists the email address of the user who created the integration. - - **Created By**: Lists the first and last name of the user who created the integration. - - **Created On**: Lists the date and time the integration was created. - - **Recent Activity**: Lists the date and time the integration was last used, or notes if the integration was never used. - - **Service Account ID**: Lists the service account ID. - - **Access Key**: Lists the access key code. - - 1. To view settings information, select **Settings**: - - **Roles can create service account**: Lists the type of roles you can create. - - **Access Key Rotation Policy**: Lists notifications and actions you can set. - - **Access Key Usage Policy**: Lists notifications and actions you can set. - -## Create a service account - -1. On the **Integrations** dashboard, select **User**, and then select **Integrations.** -2. Click **Create Service Account**. The following information is pre-populated on the page: - - **API Endpoint** - - **Service Account ID** - - **Access Key** - - **Secret Key** - -3. To copy the codes, select the **Duplicate** icon next to the respective information. - - > [!NOTE] - > The codes are time sensitive and will regenerate after the box is closed. - -4. To regenerate the codes, at the bottom of the column, select **Regenerate**. - -## Delete a service account - -1. On the **Integrations** dashboard, select **User**, and then select **Integrations.** - -1. On the right of the email address, select **Delete Service Account**. - - On the **Validate OTP To Delete [Service Name] Integration** box, a message displays asking you to check your email for a code sent to the email address on file. - - If you don't receive the code, select **Resend OTP**. - -1. In the **Enter OTP** box, enter the code from the email. - -1. Click **Verify**. - -## Create a role - -1. On the **Integrations** dashboard, select **User**, and then select **Settings**. -2. Under **Roles can create service account**, select the role you want: - - **Super Admin** - - **Viewer** - - **Controller** - -3. In the **Access Key Rotation Policy** column, select options for the following: - - - **How often should the users rotate their access keys?**: Select **30 days**, **60 days**, **90 days**, or **Never**. - - **Notification**: Enter a whole number in the blank space within **Notify "X" days before the selected period**, or select **Don't Notify**. - - **Action (after the key rotation period ends)**: Select **Disable Action Key** or **No Action**. - -4. In the **Access Key Usage Policy** column, select options for the following: - - - **How often should the users go without using their access keys?**: Select **30 days**, **60 days**, **90 days**, or **Never**. - - **Notification**: Enter a whole number in the blank space within **Notify "X" days before the selected period**, or select **Don't Notify**. - - **Action (after the key rotation period ends)**: Select **Disable Action Key** or **No Action**. - -5. Click **Save**. - - - - - - \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-multi-cloud-glossary.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-multi-cloud-glossary.md deleted file mode 100644 index c18ec28669bb1..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-multi-cloud-glossary.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: CloudKnox Permissions Management - The CloudKnox glossary -description: CloudKnox Permissions Management glossary -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: conceptual -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# The CloudKnox glossary - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This glossary provides a list of some of the commonly used cloud terms in CloudKnox Permissions Management (CloudKnox). These terms will help CloudKnox users navigate through cloud-specific terms and cloud-generic terms. - -## Commonly-used acronyms and terms - -| Term | Definition | -|-----------------------|-----------------------------------------------------| -| ACL | Access control list. A list of files or resources that contain information about which users or groups have permission to access those resources or modify those files. | -| ARN | Azure Resource Notification | -| Authorization System | CIEM supports AWS accounts, Azure Subscriptions, GCP projects as the Authorization systems | -| Authorization System Type | Any system which provides the authorizations by assigning the permissions to the identities, resources. CIEM supports AWS, Azure, GCP as the Authorization System Types | -| Cloud security | A form of cybersecurity that protects data stored online on cloud computing platforms from theft, leakage, and deletion. Includes firewalls, penetration testing, obfuscation, tokenization, virtual private networks (VPN), and avoiding public internet connections. | -| Cloud storage | A service model in which data is maintained, managed, and backed up remotely. Available to users over a network. | -| CIAM | Cloud Infrastructure Access Management | -| CIEM | Cloud Infrastructure Entitlement Management. The next generation of solutions for enforcing least privilege in the cloud. It addresses cloud-native security challenges of managing identity access management in cloud environments. | -| CIS | Cloud infrastructure security | -| CWP | Cloud Workload Protection. A workload-centric security solution that targets the unique protection requirements of workloads in modern enterprise environments. | -| CNAPP | Cloud-Native Application Protection. The convergence of cloud security posture management (CSPM), cloud workload protection (CWP), cloud infrastructure entitlement management (CIEM), and cloud applications security broker (CASB). An integrated security approach that covers the entire lifecycle of cloud-native applications. | -| CSPM | Cloud Security Posture Management. Addresses risks of compliance violations and misconfigurations in enterprise cloud environments. Also focuses on the resource level to identify deviations from best practice security settings for cloud governance and compliance. | -| CWPP | Cloud Workload Protection Platform | -| Data Collector | Virtual entity which stores the data collection configuration | -| Delete task | A high-risk task that allows users to permanently delete a resource. | -| ED | Enterprise directory | -| Entitlement | An abstract attribute that represents different forms of user permissions in a range of infrastructure systems and business applications.| -| Entitlement management | Technology that grants, resolves, enforces, revokes, and administers fine-grained access entitlements (that is, authorizations, privileges, access rights, permissions and rules). Its purpose is to execute IT access policies to structured/unstructured data, devices, and services. It can be delivered by different technologies, and is often different across platforms, applications, network components, and devices. | -| High-risk task | A task in which a user can cause data leakage, service disruption, or service degradation. | -| Hybrid cloud | Sometimes called a cloud hybrid. A computing environment that combines an on-premises data center (a private cloud) with a public cloud. It allows data and applications to be shared between them. | -| hybrid cloud storage | A private or public cloud used to store an organization's data. | -| ICM | Incident Case Management | -| IDS | Intrusion Detection Service | -| Identity analytics | Includes basic monitoring and remediation, dormant and orphan account detection and removal, and privileged account discovery. | -| Identity lifecycle management | Maintain digital identities, their relationships with the organization, and their attributes during the entire process from creation to eventual archiving, using one or more identity life cycle patterns. | -| IGA | Identity governance and administration. Technology solutions that conduct identity management and access governance operations. IGA includes the tools, technologies, reports, and compliance activities required for identity lifecycle management. It includes every operation from account creation and termination to user provisioning, access certification, and enterprise password management. It looks at automated workflow and data from authoritative sources capabilities, self-service user provisioning, IT governance, and password management. | -| ITSM | Information Technology Security Management. Tools that enable IT operations organizations (infrastructure and operations managers), to better support the production environment. Facilitate the tasks and workflows associated with the management and delivery of quality IT services. | -| JEP | Just Enough Permissions | -| JIT | Just in Time access can be seen as a way to enforce the principle of least privilege to ensure users and non-human identities are given the minimum level of privileges. It also ensures that privileged activities are conducted in accordance with an organization’s Identity Access Management (IAM), IT Service Management (ITSM), and Privileged Access Management (PAM) policies, with its entitlements and workflows. JIT access strategy enables organizations to maintain a full audit trail of privileged activities so they can easily identify who or what gained access to which systems, what they did at what time, and for how long. | -| Least privilege | Ensures that users only gain access to the specific tools they need to complete a task. | -| Multi-tenant | A single instance of the software and its supporting infrastructure serves multiple customers. Each customer shares the software application and also shares a single database. | -| OIDC | OpenID Connect. An authentication protocol that verifies user identity when a user is trying to access a protected HTTPs end point. OIDC is an evolutionary development of ideas implemented earlier in OAuth. | -| PAM | Privileged access management. Tools that offer one or more of these features: discover, manage, and govern privileged accounts on multiple systems and applications; control access to privileged accounts, including shared and emergency access; randomize, manage, and vault credentials (password, keys, etc.) for administrative, service, and application accounts; single sign-on (SSO) for privileged access to prevent credentials from being revealed; control, filter, and orchestrate privileged commands, actions, and tasks; manage and broker credentials to applications, services, and devices to avoid exposure; and monitor, record, audit, and analyze privileged access, sessions, and actions. | -| PASM | Privileged accounts are protected by vaulting their credentials. Access to those accounts is then brokered for human users, services, and applications. Privileged session management (PSM) functions establish sessions with possible credential injection and full session recording. Passwords and other credentials for privileged accounts are actively managed and changed at definable intervals or upon the occurrence of specific events. PASM solutions may also provide application-to-application password management (AAPM) and zero-install remote privileged access features for IT staff and third parties that don't require a VPN. | -| PEDM | Specific privileges are granted on the managed system by host-based agents to logged-in users. PEDM tools provide host-based command control (filtering); application allow, deny, and isolate controls; and/or privilege elevation. The latter is in the form of allowing particular commands to be run with a higher level of privileges. PEDM tools execute on the actual operating system at the kernel or process level. Command control through protocol filtering is explicitly excluded from this definition because the point of control is less reliable. PEDM tools may also provide file integrity monitoring features. | -| Permission | Rights and privileges. Details given by users or network administrators that define access rights to files on a network. Access controls attached to a resource dictating which identities can access it and how. Privileges are attached to identities and are the ability to perform certain actions. An identity having the ability to perform an action on a resource. | -| POD | Permission on Demand. A type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. | -| Permissions creep index (PCI) | A number from 0 to 100 that represents the incurred risk of users with access to high-risk privileges. PCI is a function of users who have access to high-risk privileges but aren't actively using them. | -| Policy and role management | Maintain rules that govern automatic assignment and removal of access rights. Provides visibility of access rights for selection in access requests, approval processes, dependencies, and incompatibilities between access rights, and more. Roles are a common vehicle for policy management. | -| Privilege | The authority to make changes to a network or computer. Both people and accounts can have privileges, and both can have different levels of privilege. | -| Privileged account | A login credential to a server, firewall, or other administrative account. Often referred to as admin accounts. Comprised of the actual username and password; these two things together make up the account. A privileged account is allowed to do more things than a normal account. | -| Public Cloud | Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them. They may be free or sold on-demand, allowing customers to pay only per usage for the CPU cycles, storage, or bandwidth they consume. | -| Resource | Any entity that uses compute capabilities can be accessed by users and services to perform actions. | -| Role | An IAM identity that has specific permissions. Instead of being uniquely associated with one person, a role is intended to be assumable by anyone who needs it. A role doesn't have standard long-term credentials such as a password or access keys associated with. | -| SCIM | System for Cross–domain Identity Management | -| SIEM | Security Information and Event Management. Technology that supports threat detection, compliance and security incident management through the collection and analysis (both near real time and historical) of security events, as well as a wide variety of other event and contextual data sources. The core capabilities are a broad scope of log event collection and management, the ability to analyze log events and other data across disparate sources, and operational capabilities (such as incident management, dashboards, and reporting). | -| SOAR | Security orchestration, automation and response (SOAR). Technologies that enable organizations to take inputs from various sources (mostly from security information and event management [SIEM] systems) and apply workflows aligned to processes and procedures. These workflows can be orchestrated via integrations with other technologies and automated to achieve the desired outcome and greater visibility. Other capabilities include case and incident management features; the ability to manage threat intelligence, dashboards and reporting; and analytics that can be applied across various functions. SOAR tools significantly enhance security operations activities like threat detection and response by providing machine-powered assistance to human analysts to improve the efficiency and consistency of people and processes. | -| Super user / Super identity | A powerful account used by IT system administrators that can be used to make configurations to a system or application, add or remove users, or delete data. | -| Tenant | A dedicated instance of the services and organization data stored within a specific default location. | -| UUID | Universally unique identifier. A 128-bit label used for information in computer systems. The term globally unique identifier (GUID) is also used.| -| Zero trust security | The three foundational principles: explicit verification, breach assumption, and least privileged access.| -| ZTNA | Zero trust network access. A product or service that creates an identity- and context-based, logical access boundary around an application or set of applications. The applications are hidden from discovery, and access is restricted via a trust broker to a set of named entities. The broker verifies the identity, context and policy adherence of the specified participants before allowing access and prohibits lateral movement elsewhere in the network. It removes application assets from public visibility and significantly reduces the surface area for attack.| - -## Next steps - -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-add-account-after-onboarding.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-add-account-after-onboarding.md deleted file mode 100644 index bceb2295d4593..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-add-account-after-onboarding.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Add an account/ subscription/ project to Microsoft CloudKnox Permissions Management after onboarding is complete -description: How to add an account/ subscription/ project to Microsoft CloudKnox Permissions Management after onboarding is complete. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Add an account/ subscription/ project after onboarding is complete - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to add an Amazon Web Services (AWS) account, Microsoft Azure subscription, or Google Cloud Platform (GCP) project in Microsoft CloudKnox Permissions Management (CloudKnox) after you've completed the onboarding process. - -## Add an AWS account after onboarding is complete - -1. In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. -1. On the **Data collectors** dashboard, select **AWS**. -1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. - - The **CloudKnox Onboarding - Summary** page displays. - -1. Go to **AWS Account IDs**, and then select **Edit** (the pencil icon). - - The **CloudKnox Onboarding - AWS Member Account Details** page displays. - -1. Go to **Enter Your AWS Account IDs**, and then select **Add** (the plus **+** sign). -1. Copy your account ID from AWS and paste it into the **Enter Account ID** box. - - The AWS account ID is automatically added to the script. - - If you want to add more account IDs, repeat steps 5 and 6 to add up to a total of 10 account IDs. - -1. Copy the script. -1. Go to AWS and start the Cloud Shell. -1. Create a new script for the new account and press the **Enter** key. -1. Paste the script you copied. -1. Locate the account line, delete the original account ID (the one that was previously added), and then run the script. -1. Return to CloudKnox, and the new account ID you added will be added to the list of account IDs displayed in the **CloudKnox Onboarding - Summary** page. -1. Select **Verify now & save**. - - When your changes are saved, the following message displays: **Successfully updated configuration.** - - -## Add an Azure subscription after onboarding is complete - -1. In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. -1. On the **Data collectors** dashboard, select **Azure**. -1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. - - The **CloudKnox Onboarding - Summary** page displays. - -1. Go to **Azure subscription IDs**, and then select **Edit** (the pencil icon). -1. Go to **Enter your Azure Subscription IDs**, and then select **Add subscription** (the plus **+** sign). -1. Copy and paste your subscription ID from Azure and paste it into the subscription ID box. - - The subscription ID is automatically added to the subscriptions line in the script. - - If you want to add more subscription IDs, repeat steps 4 and 5 to add up to a total of 10 subscriptions. - -1. Copy the script. -1. Go to Azure and start the Cloud Shell. -1. Create a new script for the new subscription and press enter. -1. Paste the script you copied. -1. Locate the subscription line and delete the original subscription ID (the one that was previously added), and then run the script. -1. Return to CloudKnox, and the new subscription ID you added will be added to the list of subscription IDs displayed in the **CloudKnox Onboarding - Summary** page. -1. Select **Verify now & save**. - - When your changes are saved, the following message displays: **Successfully updated configuration.** - -## Add a GCP project after onboarding is complete - -1. In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. -1. On the **Data collectors** dashboard, select **GCP**. -1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. - - The **CloudKnox Onboarding - Summary** page displays. - -1. Go to **GCP Project IDs**, and then select **Edit** (the pencil icon). -1. Go to **Enter your GCP Project IDs**, and then select **Add Project ID** (the plus **+** sign). -1. Copy and paste your project ID from Azure and paste it into the **Project ID** box. - - The project ID is automatically added to the **Project ID** line in the script. - - If you want to add more project IDs, repeat steps 4 and 5 to add up to a total of 10 project IDs. - -1. Copy the script. -1. Go to GCP and start the Cloud Shell. -1. Create a new script for the new project ID and press enter. -1. Paste the script you copied. -1. Locate the project ID line and delete the original project ID (the one that was previously added), and then run the script. -1. Return to CloudKnox, and the new project ID you added will be added to the list of project IDs displayed in the **CloudKnox Onboarding - Summary** page. -1. Select **Verify now & save**. - - When your changes are saved, the following message displays: **Successfully updated configuration.** - - - -## Next steps - -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](cloudknox-onboard-aws.md). - - For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](cloudknox-onboard-gcp.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-aws.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-aws.md deleted file mode 100644 index 968f5dfb047ee..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-aws.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: Onboard an Amazon Web Services (AWS) account on CloudKnox Permissions Management -description: How to onboard an Amazon Web Services (AWS) account on CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# Onboard an Amazon Web Services (AWS) account - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - - -This article describes how to onboard an Amazon Web Services (AWS) account on CloudKnox Permissions Management (CloudKnox). - -> [!NOTE] -> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable CloudKnox on your Azure Active Directory tenant](cloudknox-onboard-enable-tenant.md). - - -## View a training video on configuring and onboarding an AWS account - -To view a video on how to configure and onboard AWS accounts in CloudKnox, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). - -## Onboard an AWS account - -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: - - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. - -1. On the **Data Collectors** dashboard, select **AWS**, and then select **Create Configuration**. - -### 1. Create an Azure AD OIDC App. - -1. On the **CloudKnox Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure app name**. - - This app is used to set up an OpenID Connect (OIDC) connection to your AWS account. OIDC is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. The scripts generated on this page create the app of this specified name in your Azure AD tenant with the right configuration. - -1. To create the app registration, copy the script and run it in your Azure command-line app. - - > [!NOTE] - > 1. To confirm that the app was created, open **App registrations** in Azure and, on the **All applications** tab, locate your app. - > 1. Select the app name to open the **Expose an API** page. The **Application ID URI** displayed in the **Overview** page is the *audience value* used while making an OIDC connection with your AWS account. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - Azure AD OIDC App Creation**, select **Next**. - -### 2. Set up an AWS OIDC account. - -1. In the **CloudKnox Onboarding - AWS OIDC Account Setup** page, enter the **AWS OIDC account ID** where the OIDC provider is created. You can change the role name to your requirements. -1. Open another browser window and sign in to the AWS account where you want to create the OIDC provider. -1. Select **Launch Template**. This link takes you to the **AWS CloudFormation create stack** page. -1. Scroll to the bottom of the page, and in the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create Stack.** - - This AWS CloudFormation stack creates an OIDC Identity Provider (IdP) representing Azure AD STS and an AWS IAM role with a trust policy that allows external identities from Azure AD to assume it via the OIDC IdP. These entities are listed on the **Resources** page. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS OIDC Account Setup** page, select **Next**. - -### 3. Set up an AWS master account. (Optional) - -1. If your organization has Service Control Policies (SCPs) that govern some or all of the member accounts, set up the master account connection in the **CloudKnox Onboarding - AWS Master Account Details** page. - - Setting up the master account connection allows CloudKnox to auto-detect and onboard any AWS member accounts that have the correct CloudKnox role. - - - In the **CloudKnox Onboarding - AWS Master Account Details** page, enter the **Master Account ID** and **Master Account Role**. - -1. Open another browser window and sign in to the AWS console for your master account. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Master Account Details** page, select **Launch Template**. - - The **AWS CloudFormation create stack** page opens, displaying the template. - -1. Review the information in the template, make changes, if necessary, then scroll to the bottom of the page. - -1. In the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. - - This AWS CloudFormation stack creates a role in the master account with the necessary permissions (policies) to collect SCPs and list all the accounts in your organization. - - A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - -1. Return to CloudKnox, and in **CloudKnox Onboarding - AWS Master Account Details**, select **Next**. - -### 4. Set up an AWS Central logging account. (Optional but recommended) - -1. If your organization has a central logging account where logs from some or all of your AWS account are stored, in the **CloudKnox Onboarding - AWS Central Logging Account Details** page, set up the logging account connection. - - In the **CloudKnox Onboarding - AWS Central Logging Account Details** page, enter the **Logging Account ID** and **Logging Account Role**. - -1. In another browser window, sign in to the AWS console for the AWS account you use for central logging. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Central Logging Account Details** page, select **Launch Template**. - - The **AWS CloudFormation create stack** page opens, displaying the template. - -1. Review the information in the template, make changes, if necessary, then scroll to the bottom of the page. - -1. In the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**, and then select **Create stack**. - - This AWS CloudFormation stack creates a role in the logging account with the necessary permissions (policies) to read S3 buckets used for central logging. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Central Logging Account Details** page, select **Next**. - -### 5. Set up an AWS member account. - -1. In the **CloudKnox Onboarding - AWS Member Account Details** page, enter the **Member Account Role** and the **Member Account IDs**. - - You can enter up to 10 account IDs. Click the plus icon next to the text box to add more account IDs. - - > [!NOTE] - > Perform the next 6 steps for each account ID you add. - -1. Open another browser window and sign in to the AWS console for the member account. - -1. Return to the **CloudKnox Onboarding - AWS Member Account Details** page, select **Launch Template**. - - The **AWS CloudFormation create stack** page opens, displaying the template. - -1. In the **CloudTrailBucketName** page, enter a name. - - You can copy and paste the **CloudTrailBucketName** name from the **Trails** page in AWS. - - > [!NOTE] - > A *cloud bucket* collects all the activity in a single account that CloudKnox monitors. Enter the name of a cloud bucket here to provide CloudKnox with the access required to collect activity data. - -1. From the **Enable Controller** dropdown, select: - - - **True**, if you want the controller to provide CloudKnox with read and write access so that any remediation you want to do from the CloudKnox platform can be done automatically. - - **False**, if you want the controller to provide CloudKnox with read-only access. - -1. Scroll to the bottom of the page, and in the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. - - This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. - - A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Member Account Details** page, select **Next**. - - This step completes the sequence of required connections from Azure AD STS to the OIDC connection account and the AWS member account. - -### 6. Review and save. - -1. In **CloudKnox Onboarding – Summary**, review the information you’ve added, and then select **Verify Now & Save**. - - The following message appears: **Successfully created configuration.** - - On the **Data Collectors** dashboard, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** - - You have now completed onboarding AWS, and CloudKnox has started collecting and processing your data. - -### 7. View the data. - -1. To view the data, select the **Authorization Systems** tab. - - The **Status** column in the table displays **Collecting Data.** - - The data collection process may take some time, depending on the size of the account and how much data is available for collection. - - -## Next steps - -- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](cloudknox-onboard-gcp.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-azure.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-azure.md deleted file mode 100644 index 939c093c9b3a5..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-azure.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Onboard a Microsoft Azure subscription in CloudKnox Permissions Management -description: How to a Microsoft Azure subscription on CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# Onboard a Microsoft Azure subscription - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - -This article describes how to onboard a Microsoft Azure subscription or subscriptions on CloudKnox Permissions Management (CloudKnox). Onboarding a subscription creates a new authorization system to represent the Azure subscription in CloudKnox. - -> [!NOTE] -> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable CloudKnox on your Azure Active Directory tenant](cloudknox-onboard-enable-tenant.md). - -## Prerequisites - -To add CloudKnox to your Azure AD tenant: -- You must have an Azure AD user account and an Azure command-line interface (Azure CLI) on your system, or an Azure subscription. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). -- You must have **Microsoft.Authorization/roleAssignments/write** permission at the subscription or management group scope to perform these tasks. If you don't have this permission, you can ask someone who has this permission to perform these tasks for you. - - -## View a training video on enabling CloudKnox in your Azure AD tenant - -To view a video on how to enable CloudKnox in your Azure AD tenant, select [Enable CloudKnox in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). - -## How to onboard an Azure subscription - -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: - - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. - -1. On the **Data Collectors** dashboard, select **Azure**, and then select **Create Configuration**. - -### 1. Add Azure subscription details - -1. On the **CloudKnox Onboarding - Azure Subscription Details** page, enter the **Subscription IDs** that you want to onboard. - - > [!NOTE] - > To locate the Azure subscription IDs, open the **Subscriptions** page in Azure. - > You can enter up to 10 subscriptions IDs. Select the plus sign **(+)** icon next to the text box to enter more subscriptions. - -1. From the **Scope** dropdown, select **Subscription** or **Management Group**. The script box displays the role assignment script. - - > [!NOTE] - > Select **Subscription** if you want to assign permissions separately for each individual subscription. The generated script has to be executed once per subscription. - > Select **Management Group** if all of your subscriptions are under one management group. The generated script must be executed once for the management group. - -1. To give this role assignment to the service principal, copy the script to a file on your system where Azure CLI is installed and execute it. - - You can execute the script once for each subscription, or once for all the subscriptions in the management group. - -1. From the **Enable Controller** dropdown, select: - - - **True**, if you want the controller to provide CloudKnox with read and write access so that any remediation you want to do from the CloudKnox platform can be done automatically. - - **False**, if you want the controller to provide CloudKnox with read-only access. - -1. Return to **CloudKnox Onboarding - Azure Subscription Details** page and select **Next**. - -### 2. Review and save. - -- In **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. - - The following message appears: **Successfully Created Configuration.** - - On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** - - You have now completed onboarding Azure, and CloudKnox has started collecting and processing your data. - -### 3. View the data. - -- To view the data, select the **Authorization Systems** tab. - - The **Status** column in the table displays **Collecting Data.** - - The data collection process will take some time, depending on the size of the account and how much data is available for collection. - - -## Next steps - -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](cloudknox-onboard-aws.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](cloudknox-onboard-gcp.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). -- For an overview on CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md). -- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-controller-after-onboarding.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-controller-after-onboarding.md deleted file mode 100644 index 2e380779c657a..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-controller-after-onboarding.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Enable or disable the controller in Microsoft CloudKnox Permissions Management after onboarding is complete -description: How to enable or disable the controller in Microsoft CloudKnox Permissions Management after onboarding is complete. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Enable or disable the controller after onboarding is complete - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to enable or disable the controller in Microsoft Azure and Google Cloud Platform (GCP) after onboarding is complete. - -This article also describes how to enable the controller in Amazon Web Services (AWS) if you disabled it during onboarding. You can only enable the controller in AWS at this time; you can't disable it. - -## Enable the controller in AWS - -> [!NOTE] -> You can only enable the controller in AWS; you can't disable it at this time. - -1. Sign in to the AWS console of the member account in a separate browser window. -1. Go to the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. -1. On the **Data Collectors** dashboard, select **AWS**, and then select **Create Configuration**. -1. On the **CloudKnox Onboarding - AWS Member Account Details** page, select **Launch Template**. - - The **AWS CloudFormation create stack** page opens, displaying the template. -1. In the **CloudTrailBucketName** box, enter a name. - - You can copy and paste the **CloudTrailBucketName** name from the **Trails** page in AWS. - - > [!NOTE] - > A *cloud bucket* collects all the activity in a single account that CloudKnox monitors. Enter the name of a cloud bucket here to provide CloudKnox with the access required to collect activity data. - -1. In the **EnableController** box, from the drop-down list, select **True** to provide CloudKnox with read and write access so that any remediation you want to do from the CloudKnox platform can be done automatically. - -1. Scroll to the bottom of the page, and in the **Capabilities** box and select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. - - This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - -1. Return to CloudKnox, and on the CloudKnox **Onboarding - AWS Member Account Details** page, select **Next**. -1. On **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. - - The following message appears: **Successfully created configuration.** - -## Enable or disable the controller in Azure - - -1. In Azure, open the **Access control (IAM)** page. -1. In the **Check access** section, in the **Find** box, enter **Cloud Infrastructure Entitlement Management**. - - The **Cloud Infrastructure Entitlement Management assignments** page appears, displaying the roles assigned to you. - - - If you have read-only permission, the **Role** column displays **Reader**. - - If you have administrative permission, the **Role** column displays **User Access Administrative**. - -1. To add the administrative role assignment, return to the **Access control (IAM)** page, and then select **Add role assignment**. -1. Add or remove the role assignment for Cloud Infrastructure Entitlement Management. - -1. Go to the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. -1. On the **Data Collectors** dashboard, select **Azure**, and then select **Create Configuration**. -1. On the **CloudKnox Onboarding - Azure Subscription Details** page, enter the **Subscription ID**, and then select **Next**. -1. On **CloudKnox Onboarding – Summary** page, review the controller permissions, and then select **Verify Now & Save**. - - The following message appears: **Successfully Created Configuration.** - - -## Enable or disable the controller in GCP - -1. Execute the **gcloud auth login**. -1. Follow the instructions displayed on the screen to authorize access to your Google account. -1. Execute the **sh mciem-workload-identity-pool.sh** to create the workload identity pool, provider, and service account. -1. Execute the **sh mciem-member-projects.sh** to give CloudKnox permissions to access each of the member projects. - - - If you want to manage permissions through CloudKnox, select **Y** to **Enable controller**. - - If you want to onboard your projects in read-only mode, select **N** to **Disable controller**. - -1. Optionally, execute **mciem-enable-gcp-api.sh** to enable all recommended GCP APIs. - -1. Go to the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. -1. On the **Data Collectors** dashboard, select **GCP**, and then select **Create Configuration**. -1. On the **CloudKnox Onboarding - Azure AD OIDC App Creation** page, select **Next**. -1. On the **CloudKnox Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project Number** and **OIDC Project ID**, and then select **Next**. -1. On the **CloudKnox Onboarding - GCP Project IDs** page, enter the **Project IDs**, and then select **Next**. -1. On the **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. - - The following message appears: **Successfully Created Configuration.** - -## Next steps - -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](cloudknox-onboard-aws.md). -- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](cloudknox-onboard-gcp.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-tenant.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-tenant.md deleted file mode 100644 index d8c80ae7996f4..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-tenant.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Enable CloudKnox Permissions Management in your organization -description: How to enable CloudKnox Permissions Management in your organization. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# Enable CloudKnox in your organization - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - - - -This article describes how to enable CloudKnox Permissions Management (CloudKnox) in your organization. Once you've enabled CloudKnox, you can connect it to your Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) platforms. - -> [!NOTE] -> To complete this task, you must have *global administrator* permissions as a user in that tenant. You can't enable CloudKnox as a user from other tenant who has signed in via B2B or via Azure Lighthouse. - -## Prerequisites - -To enable CloudKnox in your organization: - -- You must have an Azure AD tenant. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). -- You must be eligible for or have an active assignment to the global administrator role as a user in that tenant. - -> [!NOTE] -> During public preview, CloudKnox doesn't perform a license check. - -## View a training video on enabling CloudKnox - -- To view a video on how to enable CloudKnox in your Azure AD tenant, select [Enable CloudKnox in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). -- To view a video on how to configure and onboard AWS accounts in CloudKnox, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). -- To view a video on how to configure and onboard GCP accounts in CloudKnox, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). - - -## How to enable CloudKnox on your Azure AD tenant - -1. In your browser: - 1. Go to [Azure services](https://portal.azure.com) and use your credentials to sign in to [Azure Active Directory](https://ms.portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview). - 1. If you aren't already authenticated, sign in as a global administrator user. - 1. If needed, activate the global administrator role in your Azure AD tenant. - 1. In the Azure AD portal, select **Features highlights**, and then select **CloudKnox Permissions Management**. - - 1. If you're prompted to select a sign in account, sign in as a global administrator for a specified tenant. - - The **Welcome to CloudKnox Permissions Management** screen appears, displaying information on how to enable CloudKnox on your tenant. - -1. To provide access to the CloudKnox application, create a service principal. - - An Azure service principal is a security identity used by user-created apps, services, and automation tools to access specific Azure resources. - - > [!NOTE] - > To complete this step, you must have Azure CLI or Azure PowerShell on your system, or an Azure subscription where you can run Cloud Shell. - - - To create a service principal that points to the CloudKnox application via Cloud Shell: - - 1. Copy the script on the **Welcome** screen: - - `az ad sp create --id b46c3ac5-9da6-418f-a849-0a07a10b3c6c` - - 1. If you have an Azure subscription, return to the Azure AD portal and select **Cloud Shell** on the navigation bar. - If you don't have an Azure subscription, open a command prompt on a Windows Server. - 1. If you have an Azure subscription, paste the script into Cloud Shell and press **Enter**. - - - For information on how to create a service principal through the Azure portal, see [Create an Azure service principal with the Azure CLI](/cli/azure/create-an-azure-service-principal-azure-cli). - - - For information on the **az** command and how to sign in with the no subscriptions flag, see [az login](/cli/azure/reference-index?view=azure-cli-latest#az-login&preserve-view=true). - - - For information on how to create a service principal via Azure PowerShell, see [Create an Azure service principal with Azure PowerShell](/powershell/azure/create-azure-service-principal-azureps?view=azps-7.1.0&preserve-view=true). - - 1. After the script runs successfully, the service principal attributes for CloudKnox display. Confirm the attributes. - - The **Cloud Infrastructure Entitlement Management** application displays in the Azure AD portal under **Enterprise applications**. - -1. Return to the **Welcome to CloudKnox** screen and select **Enable CloudKnox Permissions Management**. - - You have now completed enabling CloudKnox on your tenant. CloudKnox launches with the **Data Collectors** dashboard. - -## Configure data collection settings - -Use the **Data Collectors** dashboard in CloudKnox to configure data collection settings for your authorization system. - -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: - - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. - -1. Select the authorization system you want: **AWS**, **Azure**, or **GCP**. - -1. For information on how to onboard an AWS account, Azure subscription, or GCP project into CloudKnox, select one of the following articles and follow the instructions: - - - [Onboard an AWS account](cloudknox-onboard-aws.md) - - [Onboard an Azure subscription](cloudknox-onboard-azure.md) - - [Onboard a GCP project](cloudknox-onboard-gcp.md) - -## Next steps - -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md) -- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](cloudknox-faqs.md). -- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-gcp.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-gcp.md deleted file mode 100644 index 8b894aea37e73..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-gcp.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Onboard a Google Cloud Platform (GCP) project in CloudKnox Permissions Management -description: How to onboard a Google Cloud Platform (GCP) project on CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# Onboard a Google Cloud Platform (GCP) project - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - - -This article describes how to onboard a Google Cloud Platform (GCP) project on CloudKnox Permissions Management (CloudKnox). - -> [!NOTE] -> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable CloudKnox on your Azure Active Directory tenant](cloudknox-onboard-enable-tenant.md). - -## View a training video on configuring and onboarding a GCP account - -To view a video on how to configure and onboard GCP accounts in CloudKnox, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). - - -## Onboard a GCP project - -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: - - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. - -1. On the **Data Collectors** tab, select **GCP**, and then select **Create Configuration**. - -### 1. Create an Azure AD OIDC app. - -1. On the **CloudKnox Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure App Name**. - - This app is used to set up an OpenID Connect (OIDC) connection to your GCP project. OIDC is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. The scripts generated will create the app of this specified name in your Azure AD tenant with the right configuration. - -1. To create the app registration, copy the script and run it in your command-line app. - - > [!NOTE] - > 1. To confirm that the app was created, open **App registrations** in Azure and, on the **All applications** tab, locate your app. - > 1. Select the app name to open the **Expose an API** page. The **Application ID URI** displayed in the **Overview** page is the *audience value* used while making an OIDC connection with your AWS account. - - 1. Return to CloudKnox, and in the **CloudKnox Onboarding - Azure AD OIDC App Creation**, select **Next**. - -### 2. Set up a GCP OIDC project. - -1. In the **CloudKnox Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project ID** and **OIDC Project Number** of the GCP project in which the OIDC provider and pool will be created. You can change the role name to your requirements. - - > [!NOTE] - > You can find the **Project number** and **Project ID** of your GCP project on the GCP **Dashboard** page of your project in the **Project info** panel. - -1. You can change the **OIDC Workload Identity Pool Id**, **OIDC Workload Identity Pool Provider Id** and **OIDC Service Account Name** to meet your requirements. - - Optionally, specify **G-Suite IDP Secret Name** and **G-Suite IDP User Email** to enable G-Suite integration. - - You can either download and run the script at this point or you can do it in the Google Cloud Shell, as described [later in this article](cloudknox-onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). -1. Select **Next**. - -### 3. Set up GCP member projects. - -1. In the **CloudKnox Onboarding - GCP Project Ids** page, enter the **Project IDs**. - - You can enter up to 10 GCP project IDs. Select the plus icon next to the text box to insert more project IDs. - -1. You can choose to download and run the script at this point, or you can do it via Google Cloud Shell, as described in the [next step](cloudknox-onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). - -### 4. Run scripts in Cloud Shell. (Optional if not already executed) - -1. In the **CloudKnox Onboarding - GCP Project Ids** page, select **Launch SSH**. -1. To copy all your scripts into your current directory, in **Open in Cloud Shell**, select **Trust repo**, and then select **Confirm**. - - The Cloud Shell provisions the Cloud Shell machine and makes a connection to your Cloud Shell instance. - - > [!NOTE] - > Follow the instructions in the browser as they may be different from the ones given here. - - The **Welcome to CloudKnox GCP onboarding** screen appears, displaying steps you must complete to onboard your GCP project. - -### 5. Paste the environment vars from the CloudKnox portal. - -1. Return to CloudKnox and select **Copy export variables**. -1. In the GCP Onboarding shell editor, paste the variables you copied, and then press **Enter**. -1. Execute the **gcloud auth login**. -1. Follow instructions displayed on the screen to authorize access to your Google account. -1. Execute the **sh mciem-workload-identity-pool.sh** to create the workload identity pool, provider, and service account. -1. Execute the **sh mciem-member-projects.sh** to give CloudKnox permissions to access each of the member projects. - - - If you want to manage permissions through CloudKnox, select **Y** to **Enable controller**. - - - If you want to onboard your projects in read-only mode, select **N** to **Disable controller**. - -1. Optionally, execute **mciem-enable-gcp-api.sh** to enable all recommended GCP APIs. - -1. Return to **CloudKnox Onboarding - GCP Project Ids**, and then select **Next**. - -### 6. Review and save. - -1. In the **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. - - The following message appears: **Successfully Created Configuration.** - - On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** - - You have now completed onboarding GCP, and CloudKnox has started collecting and processing your data. - -### 7. View the data. - -- To view the data, select the **Authorization Systems** tab. - - The **Status** column in the table displays **Collecting Data.** - - The data collection process may take some time, depending on the size of the account and how much data is available for collection. - - - -## Next steps - -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](cloudknox-onboard-aws.md). -- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-overview.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-overview.md deleted file mode 100644 index cac6d12faa329..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-overview.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: What's CloudKnox Permissions Management? -description: An introduction to CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# What's CloudKnox Permissions Management? - - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - -## Overview - -CloudKnox Permissions Management (CloudKnox) is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). - -CloudKnox detects, automatically right-sizes, and continuously monitors unused and excessive permissions. - -Organizations have to consider permissions management as a central piece of their Zero Trust security to implement least privilege access across their entire infrastructure: - -- Organizations are increasingly adopting multi-cloud strategy and are struggling with the lack of visibility and the increasing complexity of managing access permissions. -- With the proliferation of identities and cloud services, the number of high-risk cloud permissions is exploding, expanding the attack surface for organizations. -- IT security teams are under increased pressure to ensure access to their expanding cloud estate is secure and compliant. -- The inconsistency of cloud providers’ native access management models makes it even more complex for Security and Identity to manage permissions and enforce least privilege access policies across their entire environment. - -:::image type="content" source="media/cloudknox-overview/cloudknox-key-cases.png" alt-text="CloudKnox Permissions Management."::: - -## Key use cases - -CloudKnox allows customers to address three key use cases: *discover*, *remediate*, and *monitor*. - -### Discover - -Customers can assess permission risks by evaluating the gap between permissions granted and permissions used. - -- Cross-cloud permissions discovery: Granular and normalized metrics for key cloud platforms: AWS, Azure, and GCP. -- Permission Creep Index (PCI): An aggregated metric that periodically evaluates the level of risk associated with the number of unused or excessive permissions across your identities and resources. It measures how much damage identities can cause based on the permissions they have. -- Permission usage analytics: Multi-dimensional view of permissions risk for all identities, actions, and resources. - -### Remediate - -Customers can right-size permissions based on usage, grant new permissions on-demand, and automate just-in-time access for cloud resources. - -- Automated deletion of permissions unused for the past 90 days. -- Permissions on-demand: Grant identities permissions on-demand for a time-limited period or an as-needed basis. - - -### Monitor - -Customers can detect anomalous activities with machine language-powered (ML-powered) alerts and generate detailed forensic reports. - -- ML-powered anomaly detections. -- Context-rich forensic reports around identities, actions, and resources to support rapid investigation and remediation. - -CloudKnox deepens Zero Trust security strategies by augmenting the least privilege access principle, allowing customers to: - -- Get comprehensive visibility: Discover which identity is doing what, where, and when. -- Automate least privilege access: Use access analytics to ensure identities have the right permissions, at the right time. -- Unify access policies across infrastructure as a service (IaaS) platforms: Implement consistent security policies across your cloud infrastructure. - - - -## Next steps - -- For information on how to onboard CloudKnox in your organization, see [Enable CloudKnox in your organization](cloudknox-onboard-enable-tenant.md). -- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](cloudknox-faqs.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-explorer.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-explorer.md deleted file mode 100644 index 3ee999fbceb97..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-explorer.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: The CloudKnox Permissions Management - View roles and identities that can access account information from an external account -description: How to view information about identities that can access accounts from an external account in CloudKnox Permissions Management. -services: active-directory -manager: rkarlin -ms.service: active-directory -ms.topic: how-to -author: kenwith -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View roles and identities that can access account information from an external account - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -You can view information about users, groups, and resources that can access account information from an external account in CloudKnox Permissions Management (CloudKnox). - -## Display information about users, groups, or tasks - -1. In CloudKnox, select the **Usage analytics** tab, and then, from the dropdown, select one of the following: - - - **Users** - - **Group** - - **Active resources** - - **Active tasks** - - **Active resources** - - **Serverless functions** - -1. To choose an account from your authorization system, select the lock icon in the left panel. -1. In the **Authorization systems** pane, select an account, then select **Apply**. -1. To choose a user, role, or group, select the person icon. -1. Select a user or group, then select **Apply**. -1. To choose an account from your authorization system, select it from the Authorization Systems menu. -1. In the user type filter, user, role, or group. -1. In the **Task** filter, select **All** or **High-risk tasks**, then select **Apply**. -1. To delete a task, select **Delete**, then select **Apply**. - -## Export information about users, groups, or tasks - -To export the data in comma-separated values (CSV) file format, select **Export** from the top-right hand corner of the table. - -## View users and roles -1. To view users and roles, select the lock icon, and then select the person icon to open the **Users** pane. -1. To view the **Role summary**, select the "eye" icon to the right of the role name. - - The following details display: - - **Policies**: A list of all the policies attached to the role. - - **Trusted entities**: The identities from external accounts that can assume this role. - -1. To view all the identities from various accounts that can assume this role, select the down arrow to the left of the role name. -1. To view a graph of all the identities that can access the specified account and through which role(s), select the role name. - - If CloudKnox is monitoring the external account, it lists specific identities from the accounts that can assume this role. Otherwise, it lists the identities declared in the **Trusted entity** section. - - **Connecting roles**: Lists the following roles for each account: - - *Direct roles* that are trusted by the account role. - - *Intermediary roles* that aren't directly trusted by the account role but are assumable by identities through role-chaining. - -1. To view all the roles from that account that are used to access the specified account, select the down arrow to the left of the account name. -1. To view the trusted identities declared by the role, select the down arrow to the left of the role name. - - The trusted identities for the role are listed only if the account is being monitored by CloudKnox. - -1. To view the role definition, select the "eye" icon to the right of the role name. - - When you select the down arrow and expand details, a search box is displayed. Enter your criteria in this box to search for specific roles. - - **Identities with access**: Lists the identities that come from external accounts: - - To view all the identities from that account can access the specified account, select the down arrow to the left of the account name. - - To view the **Role summary** for EC2 instances and Lambda functions, select the "eye" icon to the right of the identity name. - - To view a graph of how the identity can access the specified account and through which role(s), select the identity name. - -1. The **Info** tab displays the **Privilege creep index** and **Service control policy (SCP)** information about the account. - -For more information about the **Privilege creep index** and SCP information, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-settings.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-settings.md deleted file mode 100644 index 5ab6917745ce9..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-settings.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: View personal and organization information in CloudKnox Permissions Management -description: How to view personal and organization information in the Account settings dashboard in CloudKnox Permissions Management. -services: active-directory -manager: rkarlin -ms.service: active-directory -ms.topic: overview -author: kenwith -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View personal and organization information - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Account settings** dashboard in CloudKnox Permissions Management (CloudKnox) allows you to view personal information, passwords, and account preferences. -This information can't be modified because the user information is pulled from Azure AD. Only **User Session Time(min)** - -## View personal information - -1. In the CloudKnox home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. - - The **Personal Information** box displays your **First Name**, **Last Name**, and the **Email Address** that was used to register your account on CloudKnox. - -## View current organization information - -1. In the CloudKnox home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. - - The **Current Organization Information** displays the **Name** of your organization, the **Tenant ID** box, and the **User Session Timeout (min)**. - -1. To change duration of the **User Session Timeout (min)**, select **Edit** (the pencil icon), and then enter the number of minutes before you want a user session to time out. -1. Select the check mark to confirm your new setting. - - -## Next steps - -- For information about how to manage user information, see [Manage users and groups with the User management dashboard](cloudknox-ui-user-management.md). -- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](cloudknox-ui-tasks.md). -- For information about how to select group-based permissions settings, see [Select group-based permissions settings](cloudknox-howto-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-audit-trail.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-audit-trail.md deleted file mode 100644 index ef3ea798af791..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-audit-trail.md +++ /dev/null @@ -1,401 +0,0 @@ ---- -title: Filter and query user activity in CloudKnox Permissions Management -description: How to filter and query user activity in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Filter and query user activity - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Audit** dashboard in CloudKnox Permissions Management (CloudKnox) details all user activity performed in your authorization system. It captures all high risk activity in a centralized location, and allows system administrators to query the logs. The **Audit** dashboard enables you to: - -- Create and save new queries so you can access key data points easily. -- Query across multiple authorization systems in one query. - -## Filter information by authorization system - -If you haven't used filters before, the default filter is the first authorization system in the filter list. - -If you have used filters before, the default filter is last filter you selected. - -1. To display the **Audit** dashboard, on the CloudKnox home page, select **Audit**. - -1. To select your authorization system type, in the **Authorization System Type** box, select Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). - -1. To select your authorization system, in the **Authorization System** box: - - - From the **List** subtab, select the accounts you want to use. - - From the **Folders** subtab, select the folders you want to use. - -1. To view your query results, select **Apply**. - -## Create, view, modify, or delete a query - -There are several different query parameters you can configure individually or in combination. The query parameters and corresponding instructions are listed in the following sections. - -- To create a new query, select **New Query**. -- To view an existing query, select **View** (the eye icon). -- To edit an existing query, select **Edit** (the pencil icon). -- To delete a function line in a query, select **Delete** (the minus sign **-** icon). -- To create multiple queries at one time, select **Add New Tab** to the right of the **Query** tabs that are displayed. - - You can open a maximum number of six query tab pages at the same time. A message will appear when you've reached the maximum. - -## Create a query with specific parameters - -### Create a query with a date - -1. In the **New Query** section, the default parameter displayed is **Date In "Last day"**. - - The first-line parameter always defaults to **Date** and can't be deleted. - -1. To edit date details, select **Edit** (the pencil icon). - - To view query details, select **View** (the eye icon). - -1. Select **Operator**, and then select an option: - - **In**: Select this option to set a time range from the past day to the past year. - - **Is**: Select this option to choose a specific date from the calendar. - - **Custom**: Select this option to set a date range from the **From** and **To** calendars. - -1. To run the query on the current selection, select **Search**. - -1. To save your query, select **Save**. - - To clear the recent selections, select **Reset**. - -### View operator options for identities - -The **Operator** menu displays the following options depending on the identity you select in the first dropdown: - -- **Is** / **Is Not**: View a list of all available usernames. You can either select or enter a username in the box. -- **Contains** / **Not Contains**: Enter text that the **Username** should or shouldn't contain, for example, *CloudKnox*. -- **In** / **Not In**: View a list all available usernames and select multiple usernames. - -### Create a query with a username - -1. In the **New query** section, select **Add**. - -1. From the menu, select **Username**. - -1. From the **Operator** menu, select the required option. - -1. To add criteria to this section, select **Add**. - - You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with the username **Test**. - -1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *CloudKnox*. - -1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -1. To run the query on the current selection, select **Search**. - -1. To clear the recent selections, select **Reset**. - -### Create a query with a resource name - -1. In the **New query** section, select **Add**. - -1. From the menu, select **Resource Name**. - -1. From the **Operator** menu, select the required option. - -1. To add criteria to this section, select **Add**. - - You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource name **Test**. - -1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *CloudKnox*. - -1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -1. To run the query on the current selection, select **Search**. - -1. To clear the recent selections, select **Reset**. - -### Create a query with a resource type - -1. In the **New Query** section, select **Add**. - -1. From the menu, select **Resource Type**. - -1. From the **Operator** menu, select the required option. - -1. To add criteria to this section, select **Add**. - -1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource type **s3::bucket**. - -1. Select the plus (**+**) sign, select **Or** with **Is**, and then enter or select `ec2::instance`. - -1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -1. To run the query on the current selection, select **Search**. - -1. To clear the recent selections, select **Reset**. - - -### Create a query with a task name - -1. In the **New Query** section, select **Add**. - -1. From the menu, select **Task Name**. - -1. From the **Operator** menu, select the required option. - -1. To add criteria to this section, select **Add**. - -1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with task name **s3:CreateBucket**. - -1. Select **Add**, select **Or** with **Is**, and then enter or select `ec2:TerminateInstance`. - -1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -1. To run the query on the current selection, select **Search**. - -1. To clear the recent selections, select **Reset**. - -### Create a query with a state - -1. In the **New Query** section, select **Add**. - -1. From the menu, select **State**. - -1. From the **Operator** menu, select the required option. - - - **Is** / **Is not**: Allows a user to select in the value field and select **Authorization Failure**, **Error**, or **Success**. - -1. To add criteria to this section, select **Add**. - -1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with State **Authorization Failure**. - -1. Select the **Add** icon, select **Or** with **Is**, and then select **Success**. - -1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -1. To run the query on the current selection, select **Search**. - -1. To clear the recent selections, select **Reset**. - -### Create a query with a role name - -1. In the **New query** section, select **Add**. - -2. From the menu, select **Role Name**. - -3. From the **Operator** menu, select the required option. - -4. To add criteria to this section, select **Add**. - -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. - -6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *CloudKnox*. - -7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -8. To run the query on the current selection, select **Search**. - -9. To clear the recent selections, select **Reset**. - -### Create a query with a role session name - -1. In the **New Query** section, select **Add**. - -2. From the menu, select **Role Session Name**. - -3. From the **Operator** menu, select the required option. - -4. To add criteria to this section, select **Add**. - -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. - -6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *CloudKnox*. - -7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -8. To run the query on the current selection, select **Search**. - -9. To clear the recent selections, select **Reset**. - -### Create a query with an access key ID - -1. In the **New Query** section, select **Add**. - -2. From the menu, select **Access Key ID**. - -3. From the **Operator** menu, select the required option. - -4. To add criteria to this section, select **Add**. - -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free `AKIAIFXNDW2Z2MPEH5OQ`. - -6. Select the **Add** icon, select **Or** with **Not** **Contains**, and then enter `AKIAVP2T3XG7JUZRM7WU`. - -7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -8. To run the query on the current selection, select **Search**. - -9. To clear the recent selections, select **Reset**. - -### Create a query with a tag key - -1. In the **New Query** section, select **Add**. - -2. From the menu, select **Tag Key**. - -3. From the **Operator** menu, select the required option. - -4. To add criteria to this section, select **Add**. - -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. - -6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *CloudKnox*. - -7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -8. To run the query on the current selection, select **Search**. - -9. To clear the recent selections, select **Reset**. - -### Create a query with a tag key value - -1. In the **New Query** section, select **Add**. - -2. From the menu, select **Tag Key Value**. - -3. From the **Operator** menu, select the required option. - -4. To add criteria to this section, select **Add**. - -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. - -6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *CloudKnox*. - -7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). - -8. To run the query on the current selection, select **Search**. - -9. To clear the recent selections, select **Reset**. - -### View query results - -1. In the **Activity** table, your query results display in columns. - - The results display all executed tasks that aren't read-only. - -1. To sort each column by ascending or descending value, select the up or down arrows next to the column name. - - - **Identity Details**: The name of the identity, for example the name of the role session performing the task. - - - To view the **Raw Events Summary**, which displays the full details of the event, next to the **Name** column, select **View**. - - - **Resource Name**: The name of the resource on which the task is being performed. - - If the column displays **Multiple**, it means multiple resources are listed in the column. - -1. To view a list of all resources, hover over **Multiple**. - - - **Resource Type**: Displays the type of resource, for example, *Key* (encryption key) or *Bucket* (storage). - - **Task Name**: The name of the task that was performed by the identity. - - An exclamation mark (**!**) next to the task name indicates that the task failed. - - - **Date**: The date when the task was performed. - - - **IP Address**: The IP address from where the user performed the task. - - - **Authorization System**: The authorization system name in which the task was performed. - -1. To download the results in comma-separated values (CSV) file format, select **Download**. - -## Save a query - -1. After you complete your query selections from the **New Query** section, select **Save**. - -2. In the **Query Name** box, enter a name for your query, and then select **Save**. - -3. To save a query with a different name, select the ellipses (**...**) next to **Save**, and then select **Save As**. - -4. Make your query selections from the **New Query** section, select the ellipses (**...**), and then select **Save As**. - -5. To save a new query, in the **Save Query** box, enter the name for the query, and then select **Save**. - -6. To save an existing query you've modified, select the ellipses (**...**). - - - To save a modified query under the same name, select **Save**. - - To save a modified query under a different name, select **Save As**. - -### View a saved query - -1. Select **Saved Queries**, and then select a query from the **Load Queries** list. - - A message box opens with the following options: **Load with the saved authorization system** or **Load with the currently selected authorization system**. - -1. Select the appropriate option, and then select **Load Queries**. - -1. View the query information: - - - **Query Name**: Displays the name of the saved query. - - **Query Type**: Displays whether the query is a *System* query or a *Custom* query. - - **Schedule**: Displays how often a report will be generated. You can schedule a one-time report or a monthly report. - - **Next On**: Displays the date and time the next report will be generated. - - **Format**: Displays the output format for the report, for example, CSV. - - **Last Modified On**: Displays the date in which the query was last modified on. - -1. To view or set schedule details, select the gear icon, select **Create Schedule**, and then set the details. - - If a schedule has already been created, select the gear icon to open the **Edit Schedule** box. - - - **Repeat**: Sets how often the report should repeat. - - **Start On**: Sets the date when you want to receive the report. - - **At**: Sets the specific time when you want to receive the report. - - **Report Format**: Select the output type for the file, for example, CSV. - - **Share Report With**: The email address of the user who is creating the schedule is displayed in this field. You can add other email addresses. - -1. After selecting your options, select **Schedule**. - - -### Save a query under a different name - -- Select the ellipses (**...**). - - System queries have only one option: - - - **Duplicate**: Creates a duplicate of the query and names the file *Copy of XXX*. - - Custom queries have the following options: - - - **Rename**: Enter the new name of the query and select **Save**. - - **Delete**: Delete the saved query. - - The **Delete Query** box opens, asking you to confirm that you want to delete the query. Select **Yes** or **No**. - - - **Duplicate**: Creates a duplicate of the query and names it *Copy of XXX*. - - **Delete Schedule**: Deletes the schedule details for this query. - - This option isn't available if you haven't yet saved a schedule. - - The **Delete Schedule** box opens, asking you to confirm that you want to delete the schedule. Select **Yes** or **No**. - - -## Export the results of a query as a report - -- To export the results of the query, select **Export**. - - CloudKnox exports the results in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. - - -## Next steps - -- For information on how to view how users access information, see [Use queries to see how users access information](cloudknox-ui-audit-trail.md). -- For information on how to create a query, see [Create a custom query](cloudknox-howto-create-custom-queries.md). -- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](cloudknox-howto-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-dashboard.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-dashboard.md deleted file mode 100644 index 48d0653e35e93..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-dashboard.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: View data about the activity in your authorization system in CloudKnox Permissions Management -description: How to view data about the activity in your authorization system in the CloudKnox Dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - - - -# View data about the activity in your authorization system - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The CloudKnox Permissions Management (CloudKnox) **Dashboard** provides an overview of the authorization system and account activity being monitored. You can use this dashboard to view data collected from your Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP) authorization systems. - -## View data about your authorization system - -1. In the CloudKnox home page, select **Dashboard**. -1. From the **Authorization systems type** dropdown, select **AWS**, **Azure**, or **GCP**. -1. Select the **Authorization System** box to display a **List** of accounts and **Folders** available to you. -1. Select the accounts and folders you want, and then select **Apply**. - - The **Permission Creep Index (PCI)** chart updates to display information about the accounts and folders you selected. The number of days since the information was last updated displays in the upper right corner. - -1. In the Permission Creep Index (PCI) graph, select a bubble. - - The bubble displays the number of identities that are considered high-risk. - - *High-risk* refers to the number of users who have permissions that exceed their normal or required usage. - -1. Select the box to display detailed information about the identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**. - -1. The **Highest PCI change** displays the authorization system name with the PCI number and the change number for the last seven days, if applicable. - - - To view all the changes and PCI ratings in your authorization system, select **View all**. - -1. To return to the PCI graph, select the **Graph** icon in the upper right of the list box. - -For more information about the CloudKnox **Dashboard**, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). - -## View user data on the PCI heat map - -The **Permission Creep Index (PCI)** heat map shows the incurred risk of users with access to high-risk privileges. The distribution graph displays all the users who contribute to the privilege creep. It displays how many users contribute to a particular score. For example, if the score from the PCI chart is 14, the graph shows how many users have a score of 14. - -- To view detailed data about a user, select the number. - - The PCI trend graph shows you the historical trend of the PCI score over the last 90 days. - -- To download the **PCI History** report, select **Download** (the down arrow icon). - - -## View information about users, roles, resources, and PCI trends - -To view specific information about the following, select the number displayed on the heat map. - -- **Users**: Displays the total number of users and how many fall into the high, medium, and low categories. -- **Roles**: Displays the total number of roles and how many fall into the high, medium, and low categories. -- **Resources**: Displays the total number of resources and how many fall into the high, medium, and low categories. -- **PCI trend**: Displays a line graph of the PCI trend over the last several weeks. - -## View identity findings - -The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. - -- To expand the full list of identity findings, select **All findings**. - -## View resource findings - -The **Resource** section below the heat map on the right side of the page shows all the relevant findings about your resources. It includes unencrypted S3 buckets, open security groups, managed keys, and so on. - -## Next steps - -- For more information about how to view key statistics and data in the Dashboard, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-inventory.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-inventory.md deleted file mode 100644 index 594f7f8b54df2..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-inventory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: CloudKnox Permissions Management - Display an inventory of created resources and licenses for your authorization system -description: How to display an inventory of created resources and licenses for your authorization system in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Display an inventory of created resources and licenses for your authorization system - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -You can use the **Inventory** dashboard in CloudKnox Permissions Management (CloudKnox) to display an inventory of created resources and licensing information for your authorization system and its associated accounts. - -## View resources created for your authorization system - -1. To access your inventory information, in the CloudKnox home page, select **Settings** (the gear icon). -1. Select the **Inventory** tab, select the **Inventory** subtab, and then select your authorization system type: - - - **AWS** for Amazon Web Services. - - **Azure** for Microsoft Azure. - - **GCP** for Google Cloud Platform. - - The **Inventory** tab displays information pertinent to your authorization system type. - -1. To change the columns displayed in the table, select **Columns**, and then select the information you want to display. - - - To discard your changes, select **Reset to default**. - -## View the number of licenses associated with your authorization system - -1. To access licensing information about your data sources, in the CloudKnox home page, select **Settings** (the gear icon). - -1. Select the **Inventory** tab, select the **Licensing** subtab, and then select your authorization system type. - - The **Licensing** table displays the following information pertinent to your authorization system type: - - - The names of your accounts in the **Authorization system** column. - - The number of **Compute** licenses. - - The number of **Serverless** licenses. - - The number of **Compute containers**. - - The number of **Databases**. - - The **Total number of licenses**. - - -## Next steps - -- For information about viewing and configuring settings for collecting data from your authorization system and its associated accounts, see [View and configure settings for data collection](cloudknox-product-data-sources.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-sources.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-sources.md deleted file mode 100644 index 2e28b31534987..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-sources.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: View and configure settings for data collection from your authorization system in CloudKnox Permissions Management -description: How to view and configure settings for collecting data from your authorization system in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View and configure settings for data collection - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - - -You can use the **Data Collectors** dashboard in CloudKnox Permissions Management (CloudKnox) to view and configure settings for collecting data from your authorization systems. It also provides information about the status of the data collection. - -## Access and view data sources - -1. To access your data sources, in the CloudKnox home page, select **Settings** (the gear icon). Then select the **Data Collectors** tab. - -1. On the **Data Collectors** dashboard, select your authorization system type: - - - **AWS** for Amazon Web Services. - - **Azure** for Microsoft Azure. - - **GCP** for Google Cloud Platform. - -1. To display specific information about an account: - - 1. Enter the following information: - - - **Uploaded on**: Select **All** accounts, **Online** accounts, or **Offline** accounts. - - **Transformed on**: Select **All** accounts, **Online** accounts, or **Offline** accounts. - - **Search**: Enter an ID or Internet Protocol (IP) address to find a specific account. - - 1. Select **Apply** to display the results. - - Select **Reset Filter** to discard your settings. - -1. The following information displays: - - - **ID**: The unique identification number for the data collector. - - **Data types**: Displays the data types that are collected: - - **Entitlements**: The permissions of all identities and resources for all the configured authorization systems. - - **Recently uploaded on**: Displays whether the entitlement data is being collected. - - The status displays *ONLINE* if the data collection has no errors and *OFFLINE* if there are errors. - - **Recently transformed on**: Displays whether the entitlement data is being processed. - - The status displays *ONLINE* if the data processing has no errors and *OFFLINE* if there are errors. - - The **Tenant ID**. - - The **Tenant name**. - -## Modify a data collector - -1. Select the ellipses **(...)** at the end of the row in the table. -1. Select **Edit Configuration**. - - The **CloudKnox Onboarding - Summary** box displays. - -1. Select **Edit** (the pencil icon) for each field you want to change. -1. Select **Verify now & save**. - - To verify your changes later, select **Save & verify later**. - - When your changes are saved, the following message displays: **Successfully updated configuration.** - -## Delete a data collector - -1. Select the ellipses **(...)** at the end of the row in the table. -1. Select **Delete Configuration**. - - The **CloudKnox Onboarding - Summary** box displays. -1. Select **Delete**. -1. Check your email for a one time password (OTP) code, and enter it in **Enter OTP**. - - If you don't receive an OTP, select **Resend OTP**. - - The following message displays: **Successfully deleted configuration.** - -## Start collecting data from an authorization system - -1. Select the **Authorization Systems** tab, and then select your authorization system type. -1. Select the ellipses **(...)** at the end of the row in the table. -1. Select **Collect Data**. - - A message displays to confirm data collection has started. - -## Stop collecting data from an authorization system - -1. Select the ellipses **(...)** at the end of the row in the table. -1. To delete your authorization system, select **Delete**. - - The **Validate OTP To Delete Authorization System** box displays. - -1. Enter the OTP code -1. Select **Verify**. - -## Next steps - -- For information about viewing an inventory of created resources and licensing information for your authorization system, see [Display an inventory of created resources and licenses for your authorization system](cloudknox-product-data-inventory.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-define-permission-levels.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-define-permission-levels.md deleted file mode 100644 index d775a826389a1..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-define-permission-levels.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -title: Define and manage users, roles, and access levels in CloudKnox Permissions Management -description: How to define and manage users, roles, and access levels in CloudKnox Permissions Management User management dashboard. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Define and manage users, roles, and access levels - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -In CloudKnox Permissions Management (CloudKnox), a key component of the interface is the User management dashboard. This topic describes how system administrators can define and manage users, their roles, and their access levels in the system. - -## The User management dashboard - -The CloudKnox User management dashboard provides a high-level overview of: - -- Registered and invited users. -- Permissions allowed for each user within a given system. -- Recent user activity. - -It also provides the functionality to invite or delete a user, edit, view, and customize permissions settings. - - -## Manage users for customers without SAML integration - -Follow this process to invite users if the customer hasn't enabled SAML integration with the CloudKnox application. - -### Invite a user to CloudKnox - -Inviting a user to CloudKnox adds the user to the system and allows system administrators to assign permissions to those users. Follow the steps below to invite a user to CloudKnox. - -1. To invite a user to CloudKnox, select the down caret icon next to the **User** icon on the right of the screen, and then select **User Management**. -2. From the **Users** tab, select **Invite User**. -3. From the **Set User Permission** window, in the **User** text box, enter the user's email address. -4. Under **Permission**, select the applicable option. - - - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. - - 1. Select **Next**. - 2. Select **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select the **Add** icon and the **Users** icon to request access for all their accounts. - 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. - - 1. Select **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). - 2. Select **Next**. - 3. Select **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in **Auth System Types**. - - 1. Select **Next**. - - The default view displays the **List** section. - 2. Select the appropriate boxes for **Viewer**, **Controller**, or **Approver**. - - For access to all authorization system types, select **All (Current and Future)**. - 1. Select **Next**. - 1. Select **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - -5. Select **Save**. - - The following message displays in green at the top of the screen: **New User Has Been Invited Successfully**. - - - -## Manage users for customers with SAML integration - -Follow this process to invite users if the customer has enabled SAML integration with the CloudKnox application. - -### Create a permission in CloudKnox - -Creating a permission directly in CloudKnox allows system administrators to assign permissions to specific users. The following steps help you to create a permission. - -- On the right side of the screen, select the down caret icon next to **User**, and then select **User management**. - -- For **Users**: - 1. To create permissions for a specific user, select the **Users** tab, and then select **Permission.** - 2. From the **Set User Permission** window, enter the user's email address in the **User** text box. - 3. Under **Permission**, select the applicable button. Then expand menu to view instructions for each option. - - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. - 1. Select **Next**. - 2. Check **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - - 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - - 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. - 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). - 2. Select **Next**. - 3. Check **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - - 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in **Auth System Types**. - - 1. Select **Next**. - - The default view displays the **List** tab, which displays individual authorization systems. - - To view groups of authorization systems organized into folder, select the **Folder** tab. - 2. Check the appropriate boxes for **Viewer**, **Controller**, or **Approver**. - - For access to all authorization system types, select **All (Current and Future)**. - 3. Select **Next**. - 4. Check **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user can have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - - 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - 4. Select **Save**. - - The following message displays in green at the top of the screen: - **New User Has Been Created Successfully**. - 5. The new user receives an email invitation to log in to CloudKnox. - -### The Pending tab - -1. To view the created permission, select the **Pending** tab. The system administrator can view the following details: - - **Email Address**: Displays the email address of the invited user. - - **Permissions**: Displays each service account and if the user has permissions as a **Viewer**, **Controller**, **Approver**, or **Requestor**. - - **Invited By**: Displays the email address of the person who sent the invitation. - - **Sent**: Displays the date the invitation was sent to the user. -2. To make changes to the following, select the ellipses **(...)** in the far right column. - - **View Permissions**: Displays a list of accounts for which the user has permissions. - - **Edit Permissions**: System administrators can edit a user's permissions. - - **Delete**: System administrators can delete a permission - - **Reinvite**: System administrator can reinvite the permission if the user didn't receive the email invite - - When a user registers with CloudKnox, they move from the **Pending** tab to the **Registered** tab. - -### The Registered tab - -- For **Users**: - - 1. The **Registered** tab provides a high-level overview of user details to system administrators: - - The **Name/Email Address** column lists the name and email address of the user. - - The **Permissions** column lists each authorization system, and each type of permission. - - If a user has all permissions for all authorization systems, **Admin for All Authorization Types** display across all columns. If a user only has some permissions, numbers display in each column they have permissions for. For example, if the number "3" is listed in the **Viewer** column, the user has viewer permission for three accounts within that authorization system. - - The **Joined On** column records when the user registered for CloudKnox. - - The **Recent Activity** column displays the date when a user last performed an activity. - - The **Search** button allows a system administrator to search for a user by name and all users who match the criteria displays. - - The **Filters** option allows a system administrator to filter by specific details. When the filter option is selected, the **Authorization System** box displays. - - To display all authorization system accounts,Select **All**. Then select the appropriate boxes for the accounts that need to be viewed. - 2. To make the changes to the following changes, select the ellipses **(...)** in the far right column: - - **View Permissions**: Displays a list of accounts for which the user has permissions. - - **Edit Permissions**: System administrators can edit the accounts for which a user has permissions. - - **Remove Permissions**: System administrators can remove permissions from a user. - -- For **Groups**: - 1. To create permissions for a specific user, select the **Groups** tab, and then select **Permission**. - 2. From the **Set Group Permission** window, enter the name of the group in the **Group Name** box. - - The identity provider creates groups. - - Some users may be part of multiple groups. In this case, the user's overall permissions is a union of the permissions assigned the various groups the user is a member of. - 3. Under **Permission**, select the applicable button and expand the menu to view instructions for each option. - - - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. - 1. Select **Next**. - 2. Check **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - - 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. - 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). - 2. Select **Next**. - 3. Check **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - - 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in Auth System Types. - 1. Select **Next**. - - The default view displays the **List** section. - - 2. Check the appropriate boxes for **Viewer**, **Controller**, or **Approver. - - For access to all authorization system types, select **All (Current and Future)**. - - 3. Select **Next**. - - 4. Check **Requestor for User** for each authorization system, if applicable. - - A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - - 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. - - 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - - 4. Select **Save**. - - The following message displays in green at the top of the screen: **New Group Has Been Created Successfully**. - -### The Groups tab - -1. The **Groups** tab provides a high-level overview of user details to system administrators: - - - The **Name** column lists the name of the group. - - The **Permissions** column lists each authorization system, and each type of permission. - - If a group has all permissions for all authorization systems, **Admin for All Authorization Types** displays across all columns. - - If a group only has some permissions, the corresponding columns display numbers for the groups. - - For example, if the number "3" is listed in the **Viewer** column, then the group has viewer permission for three accounts within that authorization system. - - The **Modified By** column records the email address of the person who created the group. - - The **Modified On** column records the date the group was last modified on. - - The **Search** button allows a system administrator to search for a group by name and all groups who match the criteria displays. - - The **Filters** option allows a system administrator to filter by specific details. When the filter option is selected, the **Authorization System** box displays. - - To display all authorization system accounts, select **All**. Then select the appropriate boxes for the accounts that need to be viewed. - -2. To make changes to the following, select the ellipses **(...)** in the far right column: - - **View Permissions**: Displays a list of the accounts for which the group has permissions. - - **Edit Permissions**: System administrators can edit a group's permissions. - - **Duplicate**: System administrators can duplicate permissions from one group to another. - - **Delete**: System administrators can delete permissions from a group. - - -## Next steps - -- For information about how to view user management information, see [Manage users with the User management dashboard](cloudknox-ui-user-management.md). -- For information about how to create group-based permissions, see [Create group-based permissions](cloudknox-howto-create-group-based-permissions.md). - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-integrations.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-integrations.md deleted file mode 100644 index b6a48c857287e..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-integrations.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: View integration information about an authorization system in CloudKnox Permissions Management -description: View integration information about an authorization system in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View integration information about an authorization system - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Integrations** dashboard in CloudKnox Permissions Management (CloudKnox) allows you to view all your authorization systems in one place, and to ensure all applications are functioning as one. This information helps improve quality and performance as a whole. - -## Display integration information about an authorization system - -Refer to the **Integration** subpages in CloudKnox for information about available authorization systems for integration. - -1. To display the **Integrations** dashboard, select **User** (your initials) in the upper right of the screen, and then select **Integrations.** - - The **Integrations** dashboard displays a tile for each available authorization system. - -1. Select an authorization system tile to view its integration information. - -## Available integrated authorization systems - -The following authorization systems may be listed in the **Integrations** dashboard, depending on which systems are integrated into the CloudKnox application. - -- **ServiceNow**: Manages digital workflows for enterprise operations, and the CloudKnox integration allows you to request and approve permissions through the ServiceNow ticketing workflow. -- **Splunk**: Searches, monitors, and analyzes machine-generated data, and the CloudKnox integration enables exporting usage analytics data, alerts, and logs. -- **HashiCorp Terraform**: CloudKnox enables the generation of least-privilege policies through the Hashi Terraform provider. -- **CloudKnox API**: The CloudKnox application programming interface (API) provides access to CloudKnox features. -- **Saviynt**: Enables you to view Identity entitlements and usage inside the Saviynt console. -- **Securonix**: Enables exporting usage analytics data, alerts, and logs. - - - - - - - - - \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permission-analytics.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permission-analytics.md deleted file mode 100644 index 479f73496ba06..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permission-analytics.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Create and view permission analytics triggers in CloudKnox Permissions Management -description: How to create and view permission analytics triggers in the Permission analytics tab in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create and view permission analytics triggers - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how you can create and view permission analytics triggers in CloudKnox Permissions Management (CloudKnox). - -## View permission analytics triggers - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Permission Analytics**, and then select the **Alerts** subtab. - - The **Alerts** subtab displays the following information: - - - **Alert Name**: Lists the name of the alert. - - To view the name, ID, role, domain, authorization system, statistical condition, anomaly date, and observance period, select **Alert name**. - - To expand the top information found with a graph of when the anomaly occurred, select **Details**. - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - - **# of Occurrences**: Displays how many times the alert trigger has occurred. - - **Task**: Displays how many tasks are affected by the alert - - **Resources**: Displays how many resources are affected by the alert - - **Identity**: Displays how many identities are affected by the alert - - **Authorization System**: Displays which authorization systems the alert applies to - - **Date/Time**: Displays the date and time of the alert. - - **Date/Time (UTC)**: Lists the date and time of the alert in Coordinated Universal Time (UTC). - -1. To filter the alerts, select the appropriate alert name or, from the **Alert Name** menu,select **All**. - - - From the **Date** dropdown menu, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**, and then select **Apply**. - - If you select **Custom range**, select date and time settings, and then select **Apply**. - **View Trigger**: Displays the current trigger settings and applicable authorization system details. - -1. To view the following details, select the ellipses (**...**): - - - **Details**: Displays **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, and **Identities** that matched the alert criteria. -1. To view specific matches, select **Resources**, **Tasks**, or **Identities**. - - The **Activity** section displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date**, and **IP Address**. - -## Create a permission analytics trigger - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Permission Analytics**, select the **Alerts** subtab, and then select **Create Permission Analytics Trigger**. -1. In the **Alert Name** box, enter a name for the alert. -1. Select the **Authorization System**. -1. Select **Identity performed high number of tasks**, and then select **Next**. -1. On the **Authorization Systems** tab, select the appropriate accounts and folders, or select **All**. - - This screen defaults to the **List** view but can also be changed to the **Folder** view, and the applicable folder can be selected instead of individually by system. - - - The **Status** column displays if the authorization system is online or offline - - The **Controller** column displays if the controller is enabled or disabled. - -1. On the **Configuration** tab, to update the **Time Interval**, select **90 Days**, **60 Days**, or **30 Days** from the **Time range** dropdown. -1. Select **Save**. - -## View permission analytics alert triggers - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Permission Analytics**, and then select the **Alert Triggers** subtab. - - The **Alert triggers** subtab displays the following information: - - - **Alert**: Lists the name of the alert. - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - - **# of users subscribed**: Displays the number of users subscribed to the alert. - - **Created By**: Displays the email address of the user who created the alert. - - **Last modified By**: Displays the email address of the user who last modified the alert. - - **Last Modified On**: Displays the date and time the trigger was last modified. - - **Subscription**: Toggle the button to **On** or **Off**. - - **View Trigger**: Displays the current trigger settings and applicable authorization system details. - -1. To view other options available to you, select the ellipses (**...**), and then make a selection from the available options: - - - **Details** displays **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, and **Identities** that matched the alert criteria. - - To view the specific matches, select **Resources**, **Tasks**, or **Identities**. - - The **Activity** section displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date**, and **IP Address**. - -1. To filter by **Activated** or **Deactivated**, in the **Status** section, select **All**, **Activated**, or **Deactivated**, and then select **Apply**. - - -## Next steps - -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permissions-analytics-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permissions-analytics-reports.md deleted file mode 100644 index 7f2acdd173c01..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permissions-analytics-reports.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Generate and download the Permissions analytics report in CloudKnox Permissions Management -description: How to generate and download the Permissions analytics report in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Generate and download the Permissions analytics report - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to generate and download the **Permissions analytics report** in CloudKnox Permissions Management (CloudKnox). - -> [!NOTE] -> This topic applies only to Amazon Web Services (AWS) users. - -## Generate the Permissions analytics report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Systems Reports** subtab. - - The **Systems Reports** subtab displays a list of reports the **Reports** table. -1. Find **Permissions Analytics Report** in the list, and to download the report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. - - The following message displays: **Successfully Started To Generate On Demand Report.** - -1. For detailed information in the report, select the right arrow next to one of the following categories. Or, select the required category under the **Findings** column. - - - **AWS** - - Inactive Identities - - Users - - Roles - - Resources - - Serverless Functions - - Inactive Groups - - Super Identities - - Users - - Roles - - Resources - - Serverless Functions - - Over-Provisioned Active Identities - - Users - - Roles - - Resources - - Serverless Functions - - PCI Distribution - - Privilege Escalation - - Users - - Roles - - Resources - - S3 Bucket Encryption - - Unencrypted Buckets - - SSE-S3 Buckets - - S3 Buckets Accessible Externally - - EC2 S3 Buckets Accessibility - - Open Security Groups - - Identities That Can Administer Security Tools - - Users - - Roles - - Resources - - Serverless Functions - - Identities That Can Access Secret Information - - Users - - Roles - - Resources - - Serverless Functions - - Cross-Account Access - - External Accounts - - Roles That Allow All Identities - - Hygiene: MFA Enforcement - - Hygiene: IAM Access Key Age - - Hygiene: Unused IAM Access Keys - - Exclude From Reports - - Users - - Roles - - Resources - - Serverless Functions - - Groups - - Security Groups - - S3 Buckets - - -1. Select a category and view the following columns of information: - - - **User**, **Role**, **Resource**, **Serverless Function Name**: Displays the name of the identity. - - **Authorization System**: Displays the authorization system to which the identity belongs. - - **Domain**: Displays the domain name to which the identity belongs. - - **Permissions**: Displays the maximum number of permissions that the identity can be granted. - - **Used**: Displays how many permissions that the identity has used. - - **Granted**: Displays how many permissions that the identity has been granted. - - **PCI**: Displays the permission creep index (PCI) score of the identity. - - **Date Last Active On**: Displays the date that the identity was last active. - - **Date Created On**: Displays the date when the identity was created. - - - - - -## Next steps - -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to generate and view a system report, see [Generate and view a system report](cloudknox-report-view-system-report.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-reports.md deleted file mode 100644 index be79c6b0acb40..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-reports.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: View system reports in the Reports dashboard in CloudKnox Permissions Management -description: How to view system reports in the Reports dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View system reports in the Reports dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -CloudKnox Permissions Management (CloudKnox) has various types of system report types available that capture specific sets of data. These reports allow management to: - -- Make timely decisions. -- Analyze trends and system/user performance. -- Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. - -## Explore the Reports dashboard - -The **Reports** dashboard provides a table of information with both system reports and custom reports. The **Reports** dashboard defaults to the **System Reports** tab, which has the following details: - -- **Report Name**: The name of the report. -- **Category**: The type of report. For example, **Permission**. -- **Authorization Systems**: Displays which authorizations the custom report applies to. -- **Format**: Displays the output format the report can be generated in. For example, comma-separated values (CSV) format, portable document format (PDF), or Microsoft Excel Open XML Spreadsheet (XLSX) format. - - - To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. - - The following message displays across the top of the screen in green if the download is successful: **Successfully Started To Generate On Demand Report**. - -## Available system reports - -CloudKnox offers the following reports for management associated with the authorization systems noted in parenthesis: - -- **Access Key Entitlements And Usage**: - - **Summary of report**: Provides information about access key, for example, permissions, usage, and rotation date. - - **Applies to**: Amazon Web Services (AWS) and Microsoft Azure - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** or **Detailed** - - **Use cases**: - - The access key age, last rotation date, and last usage date is available in the summary report to help with key rotation. - - The granted task and Permissions creep index (PCI) score to take action on the keys. - -- **User Entitlements And Usage**: - - **Summary of report**: Provides information about the identities' permissions, for example, entitlement, usage, and PCI. - - **Applies to**: AWS, Azure, and Google Cloud Platform (GCP) - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** or **Detailed** - - **Use cases**: - - The data displayed on the **Usage Analytics** screen is downloaded as part of the **Summary** report. The user's detailed permissions usage is listed in the **Detailed** report. - -- **Group Entitlements And Usage**: - - **Summary of report**: Provides information about the group's permissions, for example, entitlement, usage, and PCI. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** - - **Use cases**: - - All group level entitlements and permission assignments, PCIs, and the number of members are listed as part of this report. - -- **Identity Permissions**: - - **Summary of report**: Report on identities that have specific permissions, for example, identities that have permission to delete any S3 buckets. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: CSV - - **Ability to collate report**: No - - **Type of report**: **Summary** - - **Use cases**: - - Any task usage or specific task usage via User/Group/Role/App can be tracked with this report. - -- **Identity privilege activity report** - - **Summary of report**: Provides information about permission changes that have occurred in the selected duration. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: PDF - - **Ability to collate report**: No - - **Type of report**: **Summary** - - **Use cases**: - - Any identity permission change can be captured using this report. - - The **Identity Privilege Activity** report has the following main sections: **User Summary**, **Group Summary**, **Role Summary**, and **Delete Task Summary**. - - The **User** summary lists the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted users, users with PCI change, and High-risk active/inactive users. - - The **Group** summary lists the administrator level groups with the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted groups, groups with PCI change, and High-risk active/inactive groups. - - The **Role summary** lists similar details as **Group Summary**. - - The **Delete Task summary** section lists the number of times the **Delete task** has been executed in the given time period. - -- **Permissions Analytics Report** - - **Summary of report**: Provides information about the violation of key security best practices. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Detailed** - - **Use cases**: - - This report lists the different key findings in the selected auth systems. The key findings include super identities, inactive identities, over provisioned active identities, storage bucket hygiene, and access key age (for AWS only). The report helps administrators to visualize the findings across the organization. - - For more information about this report, see [Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). - -- **Role/Policy Details** - - **Summary of report**: Provides information about roles and policies. - - **Applies to**: AWS, Azure, GCP - - **Report output type**: CSV - - **Ability to collate report**: No - - **Type of report**: **Summary** - - **Use cases**: - - Assigned/Unassigned, custom/system policy, and the used/unused condition is captured in this report for any specific, or all, AWS accounts. Similar data can be captured for Azure/GCP for the assigned/unassigned roles. - -- **PCI History** - - **Summary of report**: Provides a report of privilege creep index (PCI) history. - - **Applies to**: AWS, Azure, GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** - - **Use cases**: - - This report plots the trend of the PCI by displaying the monthly PCI history for each authorization system. - -- **All Permissions for Identity** - - **Summary of report**: Provides results of all permissions for identities. - - **Applies to**: AWS, Azure, GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Detailed** - - **Use cases**: - - This report lists all the assigned permissions for the selected identities. - - - - -## Next steps - -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). -- For information about how to create and view a custom report, see [Generate and view a custom report](cloudknox-report-create-custom-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-rule-based-anomalies.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-rule-based-anomalies.md deleted file mode 100644 index f9af667bb858d..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-rule-based-anomalies.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Create and view rule-based anomalies and anomaly triggers in CloudKnox Permissions Management -description: How to create and view rule-based anomalies and anomaly triggers in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create and view rule-based anomaly alerts and anomaly triggers - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -Rule-based anomalies identify recent activity in CloudKnox Permissions Management (CloudKnox) that is determined to be unusual based on explicit rules defined in the activity trigger. The goal of rule-based anomaly is high precision detection. - -## View rule-based anomaly alerts - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Rule-Based Anomaly**, and then select the **Alerts** subtab. - - The **Alerts** subtab displays the following information: - - - **Alert Name**: Lists the name of the alert. - - - To view the specific identity, resource, and task names that occurred during the alert collection period, select the **Alert Name**. - - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - - **# of Occurrences**: How many times the alert trigger has occurred. - - **Task**: How many tasks performed are triggered by the alert. - - **Resources**: How many resources accessed are triggered by the alert. - - **Identity**: How many identities performing unusual behavior are triggered by the alert. - - **Authorization System**: Displays which authorization systems the alert applies to, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). - - **Date/Time**: Lists the date and time of the alert. - - **Date/Time (UTC)**: Lists the date and time of the alert in Coordinated Universal Time (UTC). - - -1. To filter alerts: - - - From the **Alert Name** dropdown, select **All** or the appropriate alert name. - - From the **Date** dropdown menu, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**, and select **Apply**. - - - If you select **Custom Range**, also enter **From** and **To** duration settings. -1. To view details that match the alert criteria, select the ellipses (**...**). - - - **View Trigger**: Displays the current trigger settings and applicable authorization system details - - **Details**: Displays details about **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, **Identities**, and **Activity** - - **Activity**: Displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date/Time**, **Inactive For**, and **IP Address**. Selecting the "eye" icon displays the **Raw Events Summary** - -## Create a rule-based anomaly trigger - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Rule-Based Anomaly**, and then select the **Alerts** subtab. -1. Select **Create Anomaly Trigger**. - -1. In the **Alert Name** box, enter a name for the alert. -1. Select the **Authorization System**, **AWS**, **Azure**, or **GCP**. -1. Select one of the following conditions: - - **Any Resource Accessed for the First Time**: The identity accesses a resource for the first time during the specified time interval. - - **Identity Performs a Particular Task for the First Time**: The identity does a specific task for the first time during the specified time interval. - - **Identity Performs a Task for the First Time**: The identity performs any task for the first time during the specified time interval -1. Select **Next**. -1. On the **Authorization Systems** tab, select the available authorization systems and folders, or select **All**. - - This screen defaults to **List** view, but you can change it to **Folders** view. You can select the applicable folder instead of individually selecting by authorization system. - - - The **Status** column displays if the authorization system is online or offline. - - The **Controller** column displays if the controller is enabled or disabled. - -1. On the **Configuration** tab, to update the **Time Interval**, select **90 Days**, **60 Days**, or **30 Days** from the **Time range** dropdown. -1. Select **Save**. - -## View a rule-based anomaly trigger - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Rule-Based Anomaly**, and then select the **Alert Triggers** subtab. - - The **Alert Triggers** subtab displays the following information: - - - **Alerts**: Displays the name of the alert. - - **Anomaly Alert Rule**: Displays the name of the selected rule when creating the alert. - - **# of Users Subscribed**: Displays the number of users subscribed to the alert. - - **Created By**: Displays the email address of the user who created the alert. - - **Last Modified By**: Displays the email address of the user who last modified the alert. - - **Last Modified On**: Displays the date and time the trigger was last modified. - - **Subscription**: Subscribes you to receive alert emails. Switches between **On** and **Off**. - -1. To view other options available to you, select the ellipses (**...**), and then select from the available options: - - If the **Subscription** is **On**, the following options are available: - - - **Edit**: Enables you to modify alert parameters. - - Only the user who created the alert can edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. - - - **Duplicate**: Create a duplicate copy of the selected alert trigger. - - **Rename**: Enter the new name of the query, and then select **Save.** - - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. - - **Activate**: Activate the alert trigger and start sending emails to subscribed users. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. - - **Delete**: Delete the alert. - - If the **Subscription** is **Off**, the following options are available: - - **View**: View details of the alert trigger. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. - - **Duplicate**: Create a duplicate copy of the selected alert trigger. - -1. To filter by **Activated** or **Deactivated**, in the **Status** section, select **All**, **Activated**, or **Deactivated**, and then select **Apply**. - - - -## Next steps - -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-statistical-anomalies.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-statistical-anomalies.md deleted file mode 100644 index ebddfd89f42dd..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-statistical-anomalies.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Create and view statistical anomalies and anomaly triggers in CloudKnox Permissions Management -description: How to create and view statistical anomalies and anomaly triggers in the Statistical Anomaly tab in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create and view statistical anomalies and anomaly triggers - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -Statistical anomalies can detect outliers in an identity's behavior if recent activity is determined to be unusual based on models defined in an activity trigger. The goal of this anomaly trigger is a high recall rate. - -## View statistical anomalies in an identity's behavior - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Statistical Anomaly**, and then select the **Alerts** subtab. - - The **Alerts** subtab displays the following information: - - - **Alert Name**: Lists the name of the alert. - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - - **# of Occurrences**: Displays how many times the alert trigger has occurred. - - **Authorization System**: Displays which authorization systems the alert applies to. - - **Date/Time**: Lists the day of the outlier occurring. - - **Date/Time (UTC)**: Lists the day of the outlier occurring in Coordinated Universal Time (UTC). - - -1. To filter the alerts based on name, select the appropriate alert name or choose **All** from the **Alert Name** dropdown menu, and select **Apply**. -1. To filter the alerts based on alert time, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range** from the **Date** dropdown menu, and select **Apply**. -1. If you select the ellipses (**...**) and select: - - **Details**, this brings you to an Alert Summary view with **Authorization System**, **Statistical Model** and **Observance Period** displayed along with a table with a row per identity triggering this alert. From here you can click: - - **Details**: Displays graph(s) highlighting the anomaly with context, and up to the top 3 actions performed on the day of the anomaly - - **View Trigger**: Displays the current trigger settings and applicable authorization system details - - **View Trigger**: Displays the current trigger settings and applicable authorization system details - -## Create a statistical anomaly trigger - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Statistical Anomaly**, select the **Alerts** subtab, and then select **Create Alert Trigger**. -1. Enter a name for the alert in the **Alert Name** box. -1. Select the **Authorization System**, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. Select one of the following conditions: - - - **Identity Performed High Number of Tasks**: The identity performs higher than their usual volume of tasks. For example, an identity typically performs 25 tasks per day, and now it is performing 100 tasks per day. - - **Identity Performed Low Number of Tasks**: The identity performs lower than their usual volume of tasks. For example, an identity typically performs 100 tasks per day, and now it is performing 25 tasks per day. - - **Identity Performed Tasks with Unusual Results**: The identity performing an action gets a different result than usual, such as most tasks end in a successful result and are now ending in a failed result or vice versa. - - **Identity Performed Tasks with Unusual Timing**: The identity does tasks at unusual times as established by their baseline in the observance period. Times are grouped by the following UTC 4 hour windows. - - 12AM-4AM UTC - - 4AM-8AM UTC - - 8AM-12PM UTC - - 12PM-4PM UTC - - 4PM-8PM UTC - - 8PM-12AM UTC - - **Identity Performed Tasks with Unusual Types**: The identity performs unusual types of tasks as established by their baseline in the observance period. For example, an identity performs read, write, or delete tasks they wouldn't ordinarily perform. - - **Identity Performed Tasks with Multiple Unusual Patterns**: The identity has several unusual patterns in the tasks performed by the identity as established by their baseline in the observance period. -1. Select **Next**. - -1. On the **Authorization Systems** tab, select the appropriate systems, or, to select all systems, select **All**. - - The screen defaults to the **List** view but you can switch to **Folder** view using the menu, and then select the applicable folder instead of individually by system. - - - The **Status** column displays if the authorization system is online or offline. - - - The **Controller** column displays if the controller is enabled or disabled. - - -1. On the **Configuration** tab, to update the **Time Interval**, from the **Time Range** dropdown, select **90 Days**, **60 Days**, or **30 Days**, and then select **Save**. - -## View statistical anomaly triggers - -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). -1. Select **Statistical Anomaly**, and then select the **Alert Triggers** subtab. - - The **Alert Triggers** subtab displays the following information: - - - **Alert**: Displays the name of the alert. - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - - **# of users subscribed**: Displays the number of users subscribed to the alert. - - **Created By**: Displays the email address of the user who created the alert. - - **Last Modified By**: Displays the email address of the user who last modified the alert. - - **Last Modified On**: Displays the date and time the trigger was last modified. - - **Subscription**: Subscribes you to receive alert emails. Toggle the button to **On** or **Off**. - -1. To filter by **Activated** or **Deactivated**, in the **Status** section, select **All**, **Activated**, or **Deactivated**, and then select **Apply**. - -1. To view other options available to you, select the ellipses (**...**), and then select from the available options: - - If the **Subscription** is **On**, the following options are available: - - **Edit**: Enables you to modify alert parameters - - > [!NOTE] - > Only the user who created the alert can perform the following actions: edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. - - **Duplicate**: Create a duplicate copy of the selected alert trigger. - - **Rename**: Enter the new name of the query, and then select **Save.** - - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. - - **Activate**: Activate the alert trigger and start sending emails to subscribed users. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. - - **Delete**: Delete the alert. - - If the **Subscription** is **Off**, the following options are available: - - **View**: View details of the alert trigger. - - **Notification settings**: View the **Email** of users who are subscribed to the alert trigger. - - **Duplicate**: Create a duplicate copy of the selected alert trigger. - - -1. Select **Apply**. - - - -## Next steps - -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-create-custom-report.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-create-custom-report.md deleted file mode 100644 index 2f7d8b0c51ff4..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-create-custom-report.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Create, view, and share a custom report a custom report in CloudKnox Permissions Management -description: How to create, view, and share a custom report in the CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Create, view, and share a custom report - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to create, view, and share a custom report in CloudKnox Permissions Management (CloudKnox). - -## Create a custom report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. -1. Select **New Custom Report**. -1. In the **Report Name** box, enter a name for your report. -1. From the **Report Based on** list: - 1. To view which authorization systems the report applies to, hover over each report name. - 1. To view a description of a report, select the report. -1. Select a report you want to use as the base for your custom report, and then select **Next**. -1. In the **MyReport** box, select the **Authorization System** you want: Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), or Google Cloud Platform (**GCP**). - -1. To add specific accounts, select the **List** subtab, and then select **All** or the account names. -1. To add specific folders, select the **Folders** subtab, and then select **All** or the folder names. - -1. Select the **Report Format** subtab, and then select the format for your report: comma-separated values (**CSV**) file, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) file. -1. Select the **Schedule** tab, and then select the frequency for your report, from **None** up to **Monthly**. - - - For **Hourly** and **Daily** options, set the start date by choosing from the **Calendar** dropdown, and can input a specific time of the day they want to receive the report. - - In addition to date and time, the **Weekly** and **Biweekly** provide options for you to select on which day(s)of the week the report should repeat. - -1. Select **Save**. - - The following message displays across the top of the screen in green if the download is successful: **Report has been created**. -The report name appears in the **Reports** table. - -## View a custom report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. - - The **Custom Reports** tab displays the following information in the **Reports** table: - - - **Report Name**: The name of the report. - - **Category**: The type of report: **Permission**. - - **Authorization System**: The authorization system in which you can view the report: AWS, Azure, and GCP. - - **Format**: The format of the report, **CSV**, **PDF**, or **XLSX** format. - -1. To view a report, from the **Report Name** column, select the report you want. -1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. -1. To refresh the list of reports, select **Reload**. - -## Share a custom report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. -1. In the **Reports** table, select a report and then select the ellipses (**...**) icon. -1. In the **Report Settings** box, select **Share with**. -1. In the **Search Email to add** box, enter the name of other CloudKnox user(s). - - You can only share reports with other CloudKnox users. -1. Select **Save**. - -## Search for a custom report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. -1. On the **Custom Reports** tab, select **Search**. -1. In the **Search** box, enter the name of the report you want. - - The **Custom Reports** tab displays a list of reports that match your search criteria. -1. Select the report you want. -1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. -1. To refresh the list of reports, select **Reload**. - - -## Modify a saved or scheduled custom report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. -1. Hover over the report name on the **Custom Reports** tab. - - - To rename the report, select **Edit** (the pencil icon), and enter a new name. - - To change the settings for your report, select **Settings** (the gear icon). Make your changes, and then select **Save**. - - - To download a copy of the report, select the **Down arrow** icon. - -1. To perform other actions to the report, select the ellipses (**...**) icon: - - - **Download**: Downloads a copy of the report. - - - **Report Settings**: Displays the settings for the report, including scheduling, sharing the report, and so on. - - - **Duplicate**: Creates a duplicate of the report called **"Copy of XXX"**. Any reports not created by the current user are listed as **Duplicate**. - - When you select **Duplicate**, a box appears asking if you're sure you want to create a duplicate. Select **Confirm**. - - When the report is successfully duplicated, the following message displays: **Report generated successfully**. - - - **API Settings**: Download the report using your Application Programming Interface (API) settings. - - When this option is selected, the **API Settings** window opens and displays the **Report ID** and **Secret Key**. Select **Generate New Key**. - - - **Delete**: Select this option to delete the report. - - After selecting **Delete**, a pop-up box appears asking if the user is sure they want to delete the report. Select **Confirm**. - - **Report is deleted successfully** appears across the top of the screen in green if successfully deleted. - - - **Unsubscribe**: Unsubscribe the user from receiving scheduled reports and notifications. - - This option is only available after a report has been scheduled. - - -## Next steps - -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to generate and view a system report, see [Generate and view a system report](cloudknox-report-view-system-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-view-system-report.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-view-system-report.md deleted file mode 100644 index 35563a9b5634b..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-view-system-report.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Generate and view a system report in CloudKnox Permissions Management -description: How to generate and view a system report in the CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Generate and view a system report - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to generate and view a system report in CloudKnox Permissions Management (CloudKnox). - -## Generate a system report - -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Systems Reports** subtab. - The **Systems Reports** subtab displays the following options in the **Reports** table: - - - **Report Name**: The name of the report. - - **Category**: The type of report: **Permission**. - - **Authorization System**: The authorization system activity in the report: Amazon Web Services (AWS), Microsoft Azure (Azure), and Google Cloud Platform (GCP). - - **Format**: The format in which the report is available: comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. - -1. In the **Report Name** column, find the report you want, and then select the down arrow to the right of the report name to download the report. - - Or, from the ellipses **(...)** menu, select **Download**. - - The following message displays: **Successfully Started To Generate On Demand Report.** - - > [!NOTE] - > If you select one authorization system, the report includes a summary. If you select more than one authorization system, the report does not include a summary. - -1. To refresh the list of reports, select **Reload**. - -## Search for a system report - -1. On the **Systems Reports** subtab, select **Search**. -1. In the **Search** box, enter the name of the report you want. - - The **Systems Reports** subtab displays a list of reports that match your search criteria. -1. Select a report from the **Report Name** column. -1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. -1. To refresh the list of reports, select **Reload**. - - -## Next steps - -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-training-videos.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-training-videos.md deleted file mode 100644 index 5e92b74f6f400..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-training-videos.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: CloudKnox Permissions Management training videos -description: CloudKnox Permissions Management training videos. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# CloudKnox Permissions Management training videos - -To view step-by-step training videos on how to use CloudKnox Permissions Management (CloudKnox) features, select a link below. - -## Onboard CloudKnox in your organization - - -### Enable CloudKnox in your Azure Active Directory (Azure AD) tenant - -To view a video on how to enable CloudKnox in your Azure AD tenant, select [Enable CloudKnox in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). - -### Configure and onboard Amazon Web Services (AWS) accounts - -To view a video on how to configure and onboard Amazon Web Services (AWS) accounts in CloudKnox, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). - -### Configure and onboard Google Cloud Platform (GCP) accounts - -To view a video on how to configure and onboard Google Cloud Platform (GCP) accounts in CloudKnox, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). - - - - -## Next steps - -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md) -- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](cloudknox-faqs.md). -- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-troubleshoot.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-troubleshoot.md deleted file mode 100644 index 8d685638b9e0c..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-troubleshoot.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Troubleshoot issues with CloudKnox Permissions Management -description: Troubleshoot issues with CloudKnox Permissions Management -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: troubleshooting -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Troubleshoot issues with CloudKnox Permissions Management - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This section answers troubleshoot issues with CloudKnox Permissions Management (CloudKnox). - -## One time passcode (OTP) email - -### The user didn't receive the OTP email. - -- Check your junk or Spam mail folder for the email. - -## Reports - -### The individual files are generated according to the authorization system (subscription/account/project). - -- Select the **Collate** option in the **Custom Report** screen in the CloudKnox **Reports** tab. - -## Data collection in AWS - -### Data collection > AWS Authorization system data collection status is offline. Upload and transform is also offline. - -- Check the CloudKnox-related role that exists in these accounts. -- Validate the trust relationship with the OpenID Connect (OIDC) role. - - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-audit-trail.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-audit-trail.md deleted file mode 100644 index 6f854bb414c74..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-audit-trail.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Use queries to see how users access information in an authorization system in CloudKnox Permissions Management -description: How to use queries to see how users access information in an authorization system in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Use queries to see how users access information - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Audit** dashboard in CloudKnox Permissions Management (CloudKnox) provides an overview of queries a CloudKnox user has created to review how users access their authorization systems and accounts. - -This article provides an overview of the components of the **Audit** dashboard. - -## View information in the Audit dashboard - - -1. In CloudKnox, select the **Audit** tab. - - CloudKnox displays the query options available to you. - -1. The following options display at the top of the **Audit** dashboard: - - - A tab for each existing query. Select the tab to see details about the query. - - **New Query**: Select the tab to create a new query. - - **New tab (+)**: Select the tab to add a **New Query** tab. - - **Saved Queries**: Select to view a list of saved queries. - -1. To return to the main page, select **Back to Audit Trail**. - - -## Use a query to view information - -1. In CloudKnox, select the **Audit** tab. -1. The **New query** tab displays the following options: - - - **Authorization Systems Type**: A list of your authorization systems: Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). - - - **Authorization System**: A **List** of accounts and **Folders** in the authorization system. - - - To display a **List** of accounts and **Folders** in the authorization system, select the down arrow, and then select **Apply**. - -1. To add an **Audit Trail Condition**, select **Conditions** (the eye icon), select the conditions you want to add, and then select **Close**. - -1. To edit existing parameters, select **Edit** (the pencil icon). - -1. To add the parameter that you created to the query, select **Add**. - -1. To search for activity data that you can add to the query, select **Search** . - -1. To save your query, select **Save**. - -1. To save your query under a different name, select **Save As** (the ellipses **(...)** icon). - -1. To discard your work and start creating a query again, select **Reset Query**. - -1. To delete a query, select the **X** to the right of the query tab. - - - -## Next steps - -- For information on how to filter and view user activity, see [Filter and query user activity](cloudknox-product-audit-trail.md). -- For information on how to create a query,see [Create a custom query](cloudknox-howto-create-custom-queries.md). -- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](cloudknox-howto-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-autopilot.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-autopilot.md deleted file mode 100644 index 586576497eb4b..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-autopilot.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: View rules in the Autopilot dashboard in CloudKnox Permissions Management -description: How to view rules in the Autopilot dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View rules in the Autopilot dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Autopilot** dashboard in CloudKnox Permissions Management (CloudKnox) provides a table of information about **Autopilot rules** for administrators. - - -> [!NOTE] -> Only users with the **Administrator** role can view and make changes on this tab. - -## View a list of rules - -1. In the CloudKnox home page, select the **Autopilot** tab. -1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select the authorization system types you want: Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want. -1. Select **Apply**. - - The following information displays in the **Autopilot Rules** table: - - - **Rule Name**: The name of the rule. - - **State**: The status of the rule: idle (not being use) or active (being used). - - **Rule Type**: The type of rule being applied. - - **Mode**: The status of the mode: on-demand or not. - - **Last Generated**: The date and time the rule was last generated. - - **Created By**: The email address of the user who created the rule. - - **Last Modified**: The date and time the rule was last modified. - - **Subscription**: Provides an **On** or **Off** subscription that allows you to receive email notifications when recommendations have been generated, applied, or unapplied. - -## View other available options for rules - -- Select the ellipses **(...)** - - The following options are available: - - - **View Rule**: Select to view details of the rule. - - **Delete Rule**: Select to delete the rule. Only the user who created the selected rule can delete the rule. - - **Generate Recommendations**: Creates recommendations for each user and the authorization system. Only the user who created the selected rule can create recommendations. - - **View Recommendations**: Displays the recommendations for each user and authorization system. - - **Notification Settings**: Displays the users subscribed to this rule. Only the user who created the selected rule can add other users to be notified. - -You can also select: - -- **Reload**: Select to refresh the displayed list of roles/policies. -- **Search**: Select to search for a specific role/policy. -- **Columns**: From the dropdown list, select the columns you want to display. - - Select **Reset to default** to return to the system defaults. -- **New Rule**: Select to create a new rule. For more information, see [Create a rule](cloudknox-howto-create-rule.md). - - - -## Next steps - -- For information about creating rules, see [Create a rule](cloudknox-howto-create-rule.md). -- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](cloudknox-howto-recommendations-rule.md). -- For information about notification settings for rules, see [View notification settings for a rule](cloudknox-howto-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-dashboard.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-dashboard.md deleted file mode 100644 index f813214c6f402..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-dashboard.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: View key statistics and data about your authorization system in CloudKnox Permissions Management -description: How to view statistics and data about your authorization system in the CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - - -# View key statistics and data about your authorization system - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -CloudKnox Permissions Management (CloudKnox) provides a summary of key statistics and data about your authorization system regularly. This information is available for Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). - -## View metrics related to avoidable risk - -The data provided by CloudKnox includes metrics related to avoidable risk. These metrics allow the CloudKnox administrator to identify areas where they can reduce risks related to the principle of least permissions. - -You can view the following information in CloudKnox: - -- The **Permission Creep Index (PCI)** heat map on the CloudKnox **Dashboard** identifies: - - The number of users who have been granted high-risk permissions but aren't using them. - - The number of users who contribute to the permission creep index (PCI) and where they are on the scale. - -- The [**Analytics** dashboard](cloudknox-usage-analytics-home.md) provides a snapshot of permission metrics within the last 90 days. - - -## Components of the CloudKnox Dashboard - -The CloudKnox **Dashboard** displays the following information: - -- **Authorization system types**: A dropdown list of authorization system types you can access: AWS, Azure, and GCP. - -- **Authorization System**: Displays a **List** of accounts and **Folders** in the selected authorization system you can access. - - - To add or remove accounts and folders, from the **Name** list, select or deselect accounts and folders, and then select **Apply**. - -- **Permission Creep Index (PCI)**: The graph displays the **# of identities contributing to PCI**. - - The PCI graph may display one or more bubbles. Each bubble displays the number of identities that are considered high risk. *High-risk* refers to the number of users who have permissions that exceed their normal or required usage. - - To display a list of the number of identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**, select the **List** icon in the upper right of the graph. - - To display the PCI graph again, select the **Graph** icon in the upper right of the list box. - -- **Highest PCI change**: Displays a list of your accounts and information about the **PCI** and **Change** in the index over the past 7 days. - - To download the list, select the down arrow in the upper right of the list box. - - The following message displays: **We'll email you a link to download the file.** - - Check your email for the message from the CloudKnox Customer Success Team. The email contains a link to the **PCI history** report in Microsoft Excel format. - - The email also includes a link to the **Reports** dashboard, where you can configure how and when you want to receive reports automatically. - - To view all the PCI changes, select **View all**. - -- **Identity**: A summary of the **Findings** that includes: - - The number of **Inactive** identities that haven't been accessed in over 90 days. - - The number of **Super** identities that access data regularly. - - The number of identities that can **Access secret information**: A list of roles that can access sensitive or secret information. - - **Over-provisioned active** identities that have more permissions than they currently access. - - The number of identities **With permission escalation**: A list of roles that can increase permissions. - - To view the list of all identities, select **All findings**. - -- **Resources**: A summary of the **Findings** that includes the number of resources that are: - - **Open security groups** - - **Microsoft managed keys** - - **Instances with access to S3 buckets** - - **Unencrypted S3 buckets** - - **SSE-S3 Encrypted buckets** - - **S3 Bucket accessible externally** - - - -## The PCI heat map - -The **Permission Creep Index** heat map shows the incurred risk of users with access to high-risk permissions, and provides information about: - -- Users who were given access to high-risk permissions but aren't actively using them. *High-risk permissions* include the ability to modify or delete information in the authorization system. - -- The number of resources a user has access to, otherwise known as resource reach. - -- The high-risk permissions coupled with the number of resources a user has access to produce the score seen on the chart. - - Permissions are classified as *high*, *medium*, and *low*. - - - **High** (displayed in red) - The score is between 68 and 100. The user has access to many high-risk permissions they aren't using, and has high resource reach. - - **Medium** (displayed in yellow) - The score is between 34 and 67. The user has access to some high-risk permissions that they use, or have medium resource reach. - - **Low** (displayed in green) - The score is between 0 and 33. The user has access to few high-risk permissions. They use all their permissions and have low resource reach. - -- The number displayed on the graph shows how many users contribute to a particular score. To view detailed data about a user, hover over the number. - - The distribution graph displays all the users who contribute to the permission creep. It displays how many users contribute to a particular score. For example, if the score from the PCI chart is 14, the graph shows how many users have a score of 14. - -- The **PCI Trend** graph shows you the historical trend of the PCI score over the last 90 days. - - To download the **PCI history report**, select **Download**. - -### View information on the heat map - -1. Select the number on the heat map bubble to display: - - - The total number of **Identities** and how many of them are in the high, medium, and low categories. - - The **PCI trend** over the last several weeks. - -1. The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. - - - To expand the full list of identities, select **All findings**. - -1. The **Resource** section below the heat map on the right side of the page shows all the relevant findings about resources. It includes unencrypted S3 buckets, open security groups, and so on. - - -## The Analytics summary - -You can also view a summary of users and activities section on the [Analytics dashboard](cloudknox-usage-analytics-home.md). This dashboard provides a snapshot of the following high-risk tasks or actions users have accessed, and displays the total number of users with the high-risk access, how many users are inactive or have unexecuted tasks, and how many users are active or have executed tasks: - -- **Users with access to high-risk tasks**: Displays the total number of users with access to a high risk task (**Total**), how many users have access but haven't used the task (**Inactive**), and how many users are actively using the task (**Active**). - -- **Users with access to delete tasks**: A subset of high-risk tasks, which displays the number of users with access to delete tasks (**Total**), how many users have the delete permissions but haven't used the permissions (**Inactive**), and how many users are actively executing the delete capability (**Active**). - -- **High-risk tasks accessible by users**: Displays all available high-risk tasks in the authorization system (**Granted**), how many high-risk tasks aren't used (**Unexecuted**), and how many high-risk tasks are used (**Executed**). - -- **Delete tasks accessible by users**: Displays all available delete tasks in the authorization system (**Granted**), how many delete tasks aren't used (**Unexecuted**), and how many delete tasks are used (**Executed**). - -- **Resources that permit high-risk tasks**: Displays the total number of resources a user has access to (**Total**), how many resources are available but not used (**Inactive**), and how many resources are used (**Active**). - -- **Resources that permit delete tasks**: Displays the total number of resources that permit delete tasks (**Total**), how many resources with delete tasks aren't used (**Inactive**), and how many resources with delete tasks are used (**Active**). - - - -## Next steps - -- For information on how to view authorization system and account activity data on the CloudKnox Dashboard, see [View data about the activity in your authorization system](cloudknox-product-dashboard.md). -- For an overview of the Analytics dashboard, see [An overview of the Analytics dashboard](cloudknox-usage-analytics-home.md). - - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-remediation.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-remediation.md deleted file mode 100644 index c2a38900d6ff2..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-remediation.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: View existing roles/policies and requests for permission in the Remediation dashboard in CloudKnox Permissions Management -description: How to view existing roles/policies and requests for permission in the Remediation dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View roles/policies and requests for permission in the Remediation dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) provides an overview of roles/policies, permissions, a list of existing requests for permissions, and requests for permissions you have made. - -This article provides an overview of the components of the **Remediation** dashboard. - -> [!NOTE] -> To view the **Remediation** dashboard, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this dashboard, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. - -> [!NOTE] -> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. - -## Display the Remediation dashboard - -1. On the CloudKnox home page, select the **Remediation** tab. - - The **Remediation** dashboard includes six subtabs: - - - **Roles/Policies**: Use this subtab to perform Create Read Update Delete (CRUD) operations on roles/policies. - - **Permissions**: Use this subtab to perform Read Update Delete (RUD) on granted permissions. - - **Role/Policy Template**: Use this subtab to create a template for roles/policies template. - - **Requests**: Use this subtab to view approved, pending, and processed Permission on Demand (POD) requests. - - **My Requests**: Use this tab to manage lifecycle of the POD request either created by you or needs your approval. - - **Settings**: Use this subtab to select **Request Role/Policy Filters**, **Request Settings**, and **Auto-Approve** settings. - -1. Use the dropdown to select the **Authorization System Type** and **Authorization System**, and then select **Apply**. - -## View and create roles/policies - -The **Role/Policies** subtab provides the following settings that you can use to view and create a role/policy. - -- **Authorization System Type**: Displays a dropdown with authorization system types you can access, Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). -- **Authorization System**: Displays a list of authorization systems accounts you can access. -- **Policy Type**: A dropdown with available role/policy types. You can select **All**, **Custom**, **System**, or **CloudKnox Only**. -- **Policy Status**: A dropdown with available role/policy statuses. You can select **All**, **Assigned**, or **Unassigned**. -- **Policy Usage**: A dropdown with **All** or **Unused** roles/policies. -- **Apply**: Select this option to save the changes you've made. -- **Reset Filter**: Select this option to discard the changes you've made. - -The **Policy list** displays a list of existing roles/policies and the following information about each role/policy. - -- **Policy Name**: The name of the roles/policies available to you. -- **Policy Type**: **Custom**, **System**, or **CloudKnox Only** -- **Actions** - - Select **Clone** to create a duplicate copy of the role/policy. - - Select **Modify** to change the existing role/policy. - - Select **Delete** to delete the role/policy. - -Other options available to you: -- **Search**: Select this option to search for a specific role/policy. -- **Reload**: Select this option to refresh the displayed list of roles/policies. -- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. - - When the file is successfully exported, a message appears: **Exported Successfully.** - - - Check your email for a message from the CloudKnox Customer Success Team. This email contains a link to: - - The **Role Policy Details** report in CSV format. - - The **Reports** dashboard where you can configure how and when you can automatically receive reports. -- **Create Role/Policy**: Select this option to create a new role/policy. For more information, see [Create a role/policy](cloudknox-howto-create-role-policy.md). - - -## Add filters to permissions - -The **Permissions** subtab provides the following settings that you can use to add filters to your permissions. - -- **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. -- **Authorization System**: Displays a list of authorization systems accounts you can access. -- **Search For**: A dropdown from which you can select **Group**, **User**, or **Role**. -- **User Status**: A dropdown from which you can select **Any**, **Active**, or **Inactive**. -- **Privilege Creep Index** (PCI): A dropdown from which you can select a PCI rating of **Any**, **High**, **Medium**, or **Low**. -- **Task Usage**: A dropdown from which you can select **Any**, **Granted**, **Used**, or **Unused**. -- **Enter a Username**: A dropdown from which you can select a username. -- **Enter a Group Name**: A dropdown from which you can select a group name. -- **Apply**: Select this option to save the changes you've made and run the filter. -- **Reset Filter**: Select this option to discard the changes you've made. -- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. - - When the file is successfully exported, a message appears: **Exported Successfully.** - - - Check your email for a message from the CloudKnox Customer Success Team. This email contains a link to: - - The **Role Policy Details** report in CSV format. - - The **Reports** dashboard where you can configure how and when you can automatically receive reports. - - -## Create templates for roles/policies - -Use the **Role/Policy Template** subtab to create a template for roles/policies. - -1. Select: - - **Authorization System Type**: Displays a dropdown with authorization system types you can access, WS, Azure, and GCP. - - **Create Template**: Select this option to create a template. - -1. In the **Details** page, make the required selections: - - **Authorization System Type**: Select the authorization system types you want, **AWS**, **Azure**, or **GCP**. - - **Template Name**: Enter a name for your template, and then select **Next**. - -1. In the **Statements** page, complete the **Tasks**, **Resources**, **Request Conditions** and **Effect** sections. Then select **Save** to save your role/policy template. - -Other options available to you: -- **Search**: Select this option to search for a specific role/policy. -- **Reload**: Select this option to refresh the displayed list of roles/policies. - -## View requests for permission - -Use the **Requests** tab to view a list of **Pending**, **Approved**, and **Processed** requests for permissions your team members have made. - -- Select: - - **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. - - **Authorization System**: Displays a list of authorization systems accounts you can access. - -Other options available to you: - -- **Reload**: Select this option to refresh the displayed list of roles/policies. -- **Search**: Select this option to search for a specific role/policy. -- **Columns**: Select one or more of the following to view more information about the request: - - **Submitted By** - - **On Behalf Of** - - **Authorization System** - - **Tasks/Scope/Policies** - - **Request Date** - - **Schedule** - - **Submitted** - - **Reset to Default**: Select this option to discard your settings. - -### View pending requests - -The **Pending** table displays the following information: - -- **Summary**: A summary of the request. -- **Submitted By**: The name of the user who submitted the request. -- **On Behalf Of**: The name of the user on whose behalf the request was made. -- **Authorization System**: The authorization system the user selected. -- **Task/Scope/Policies**: The type of task/scope/policy selected. -- **Request Date**: The date when the request was made. -- **Submitted**: The period since the request was made. -- The ellipses **(...)** menu - Select the ellipses, and then select **Details**, **Approve**, or **Reject**. -- Select an option: - - **Reload**: Select this option to refresh the displayed list of roles/policies. - - **Search**: Select this option to search for a specific role/policy. - - **Columns**: From the dropdown, select the columns you want to display. - -**To return to the previous view:** - -- Select the up arrow. - -### View approved requests - -The **Approved** table displays information about the requests that have been approved. - -### View processed requests - -The **Processed** table displays information about the requests that have been processed. - -## View requests for permission for your approval - -Use the **My Requests** subtab to view a list of **Pending**, **Approved**, and **Processed** requests for permissions your team members have made and you must approve or reject. - -- Select: - - **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. - - **Authorization System**: Displays a list of authorization systems accounts you can access. - -Other options available to you: - -- **Reload**: Select this option to refresh the displayed list of roles/policies. -- **Search**: Select this option to search for a specific role/policy. -- **Columns**: Select one or more of the following to view more information about the request: - - **On Behalf Of** - - **Authorization System** - - **Tasks/Scope/Policies** - - **Request Date** - - **Schedule** - - **Reset to Default**: Select this option to discard your settings. -- **New Request**: Select this option to create a new request for permissions. For more information, see Create a request for permissions. - -### View pending requests - -The **Pending** table displays the following information: - -- **Summary**: A summary of the request. -- **Submitted By**: The name of the user who submitted the request. -- **On Behalf Of**: The name of the user on whose behalf the request was made. -- **Authorization System**: The authorization system the user selected. -- **Task/Scope/Policies**: The type of task/scope/policy selected. -- **Request Date**: The date when the request was made. -- **Submitted**: The period since the request was made. -- The ellipses **(...)** menu - Select the ellipses, and then select **Details**, **Approve**, or **Reject**. -- Select an option: - - **Reload**: Select this option to refresh the displayed list of roles/policies. - - **Search**: Select this option to search for a specific role/policy. - - **Columns**: From the dropdown, select the columns you want to display. - - -### View approved requests - -The **Approved** table displays information about the requests that have been approved. - -### View processed requests - -The **Processed** table displays information about the requests that have been processed. - -## Make setting selections for requests and auto-approval - -The **Settings** subtab provides the following settings that you can use to make setting selections to **Request Role/Policy Filters**, **Request Settings**, and **Auto-Approve** requests. - -- **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. -- **Authorization System**: Displays a list of authorization systems accounts you can access. -- **Reload**: Select this option to refresh the displayed list of role/policy filters. -- **Create Filter**: Select this option to create a new filter. - -## Next steps - - -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) - diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-tasks.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-tasks.md deleted file mode 100644 index bd11caa9bb3b6..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-tasks.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: View information about active and completed tasks in CloudKnox Permissions Management -description: How to view information about active and completed tasks in the Activities pane in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View information about active and completed tasks - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes the usage of the **CloudKnox Tasks** pane in CloudKnox Permissions Management (CloudKnox). - -## Display active and completed tasks - -1. In the CloudKnox home page, select **Tasks** (the timer icon). - - The **CloudKnox Tasks** pane appears on the right of the CloudKnox home page. It has two tabs: - - **Active**: Displays a list of active tasks, a description of each task, and when the task was started. - - If there are no active tasks, the following message displays: **There are no active tasks**. - - **Completed**: Displays a list of completed tasks, a description of each task, when the task was started and ended, and whether the task **Failed** or **Succeeded**. - - If there are no completed activities, the following message displays: **There are no recently completed tasks**. -1. To close the **CloudKnox Tasks** pane, click outside the pane. - -## Next steps - -- For information on how to create a role/policy in the **Remediation** dashboard, see [Create a role/policy](cloudknox-howto-create-role-policy.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-triggers.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-triggers.md deleted file mode 100644 index c0faaaaba109d..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-triggers.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: View information about activity triggers in CloudKnox Permissions Management -description: How to view information about activity triggers in the Activity triggers dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View information about activity triggers - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to use the **Activity triggers** dashboard in CloudKnox Permissions Management (CloudKnox) to view information about activity alerts and triggers. - -## Display the Activity triggers dashboard - -- In the CloudKnox home page, select **Activity triggers** (the bell icon). - - The **Activity triggers** dashboard has four tabs: - - - **Activity** - - **Rule-Based Anomaly** - - **Statistical Anomaly** - - **Permission Analytics** - - Each tab has two subtabs: - - - **Alerts** - - **Alert Triggers** - -## View information about alerts - -The **Alerts** subtab in the **Activity**, **Rule-Based Anomaly**, **Statistical Anomaly**, and **Permission Analytics** tabs display the following information: - -- **Alert Name**: Select **All** alert names or specific ones. -- **Date**: Select **Last 24 hours**, **Last 2 Days**, **Last Week**, or **Custom Range.** - - - If you select **Custom Range**, also enter **From** and **To** duration settings. -- **Apply**: Select this option to activate your settings. -- **Reset Filter**: Select this option to discard your settings. -- **Reload**: Select this option to refresh the displayed information. -- **Create Activity Trigger**: Select this option to [create a new alert trigger](cloudknox-howto-create-alert-trigger.md). -- The **Alerts** table displays a list of alerts with the following information: - - **Alerts**: The name of the alert. - - **# of users subscribed**: The number of users who have subscribed to the alert. - - **Created By**: The name of the user who created the alert. - - **Modified By**: The name of the user who modified the alert. - -The **Rule-Based Anomaly** tab and the **Statistical Anomaly** tab both have one more option: - -- **Columns**: Select the columns you want to display: **Task**, **Resource**, and **Identity**. - - To return to the system default settings, select **Reset to default**. - -## View information about alert triggers - -The **Alert Triggers** subtab in the **Activity**, **Rule-Based Anomaly**, **Statistical Anomaly**, and **Permission Analytics** tab displays the following information: - -- **Status**: Select the alert status you want to display: **All**, **Activated**, or **Deactivated**. -- **Apply**: Select this option to activate your settings. -- **Reset Filter**: Select this option to discard your settings. -- **Reload**: Select **Reload** to refresh the displayed information. -- **Create Activity Trigger**: Select this option to [create a new alert trigger](cloudknox-howto-create-alert-trigger.md). -- The **Triggers** table displays a list of triggers with the following information: - - **Alerts**: The name of the alert. - - **# of users subscribed**: The number of users who have subscribed to the alert. - - **Created By**: The name of the user who created the alert. - - **Modified By**: The name of the user who modified the alert. - - - - - - -## Next steps - -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-user-management.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-user-management.md deleted file mode 100644 index 010ce9de7b7e1..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-user-management.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Manage users and groups with the User management dashboard in CloudKnox Permissions Management -description: How to manage users and groups in the User management dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: overview -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# Manage users and groups with the User management dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to use the CloudKnox Permissions Management (CloudKnox) **User management** dashboard to view and manage users and groups. - -**To display the User management dashboard**: - -- In the upper right of the CloudKnox home page, select **User** (your initials) in the upper right of the screen, and then select **User management.** - - The **User Management** dashboard has two tabs: - - - **Users**: Displays information about registered users. - - **Groups**: Displays information about groups. - -## Manage users - -Use the **Users** tab to display the following information about users: - -- **Name** and **Email Address**: The user's name and email address. -- **Joined On**: The date the user registered on the system. -- **Recent Activity**: The date the user last used their permissions to access the system. -- The ellipses **(...)** menu: Select the ellipses, and then select **View Permissions** to open the **View User Permission** box. - - - To view details about the user's permissions, select one of the following options: - - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. - - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. - - **Custom** provides **View**, **Control**, and **Approve** permissions for the authorization system types you select. - -You can also select the following options: - -- **Reload**: Select this option to refresh the information displayed in the **User** table. -- **Search**: Enter a name or email address to search for a specific user. - -## Manage groups - -Use the **Groups** tab to display the following information about groups: - -- **Name**: Displays the registered user's name and email address. -- **Permissions**: - - The **Authorization Systems** and the type of permissions the user has been granted: **Admin for all Authorization System Types**, **Admin for selected Authorization System Types**, or **Custom**. - - Information about the **Viewer**, **Controller**, **Approver**, and **Requestor**. -- **Modified By**: The email address of the user who modified the group. -- **Modified On**: The date the user last modified the group. - -- The ellipses **(...)** menu: Select the ellipses to: - - - **View Permissions**: Select this option to view details about the group's permissions, and then select one of the following options: - - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. - - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. - - **Custom** provides **View**, **Control**, and **Approve** permissions for specific authorization system types that you select. - - - **Edit Permissions**: Select this option to modify the group's permissions. - - **Delete**: Select this option to delete the group's permissions. - - The **Delete Permission** box asks you to confirm that you want to delete the group. - - Select **Delete** if you want to delete the group, **Cancel** to discard your changes. - - -You can also select the following options: - -- **Reload**: Select this option to refresh the information displayed in the **User** table. -- **Search**: Enter a name or email address to search for a specific user. -- **Filters**: Select the authorization systems and accounts you want to display. -- **Create Permission**: Create a group and set up its permissions. For more information, see [Create group-based permissions](cloudknox-howto-create-group-based-permissions.md) - - - -## Next steps - -- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](cloudknox-ui-tasks.md). -- For information about how to view personal and organization information, see [View personal and organization information](cloudknox-product-account-settings.md). -- For information about how to select group-based permissions settings, see [Select group-based permissions settings](cloudknox-howto-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-access-keys.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-access-keys.md deleted file mode 100644 index 2d8b54bda2202..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-access-keys.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: View analytic information about access keys in CloudKnox Permissions Management -description: How to view analytic information about access keys in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information about access keys - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) provides details about identities, resources, and tasks that you can use make informed decisions about granting permissions, and reducing risk on unused permissions. - -- **Users**: Tracks assigned permissions and usage of various identities. -- **Groups**: Tracks assigned permissions and usage of the group and the group members. -- **Active Resources**: Tracks active resources (used in the last 90 days). -- **Active Tasks**: Tracks active tasks (performed in the last 90 days). -- **Access Keys**: Tracks the permission usage of access keys for a given user. -- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. - -This article describes how to view usage analytics about access keys. - -## Create a query to view access keys - -When you select **Access keys**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. - -1. On the main **Analytics** dashboard, select **Access Keys** from the drop-down list at the top of the screen. - - The following components make up the **Access Keys** dashboard: - - - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - - **Authorization System**: Select from a **List** of accounts and **Folders***. - - **Key Status**: Select **All**, **Active**, or **Inactive**. - - **Key Activity State**: Select **All**, how long the access key has been used, or **Not Used**. - - **Key Age**: Select **All** or how long ago the access key was created. - - **Task Type**: Select **All** tasks, **High Risk Tasks** or, for a list of tasks where users have deleted data, select **Delete Tasks**. - - **Search**: Enter criteria to find specific tasks. -1. Select **Apply** to display the criteria you've selected. - - Select **Reset Filter** to discard your changes. - - -## View the results of your query - -The **Access Keys** table displays the results of your query. - -- **Access Key ID**: Provides the ID for the access key. - - To view details about the access keys, select the down arrow to the left of the ID. -- The **Owner** name. -- The **Account** number. -- The **Permission Creep Index (PCI)**: Provides the following information: - - **Index**: A numeric value assigned to the PCI. - - **Since**: How many days the PCI value has been at the displayed level. -- **Tasks** Displays the number of **Granted** and **Executed** tasks. -- **Resources**: The number of resources used. -- **Access Key Age**: How old the access key is, in days. -- **Last Used**: How long ago the access key was last accessed. - -## Apply filters to your query - -There are many filter options within the **Active Tasks** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. - -### Apply filters by authorization system type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by authorization system - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by key status - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Key Status** dropdown, select the type of key: **All**, **Active**, or **Inactive**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by key activity status - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Key Activity State** dropdown, select **All**, the duration for how long the access key has been used, or **Not Used**. - -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by key age - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Key Age** dropdown, select **All** or how long ago the access key was created. - -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by task type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Task Type** dropdown, select **All** tasks, **High Risk Tasks** or, for a list of tasks where users have deleted data, select **Delete tasks**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - - -## Export the results of your query - -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV** or **CSV (Detailed)**. - -## Next steps - -- To view active tasks, see [View usage analytics about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View usage analytics about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View usage analytics about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View usage analytics about active resources](cloudknox-usage-analytics-active-resources.md). -- To view assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](cloudknox-usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-resources.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-resources.md deleted file mode 100644 index e42aa721e001a..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-resources.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: View analytic information about active resources in CloudKnox Permissions Management -description: How to view usage analytics about active resources in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information about active resources - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - -- **Users**: Tracks assigned permissions and usage of various identities. -- **Groups**: Tracks assigned permissions and usage of the group and the group members. -- **Active Resources**: Tracks active resources (used in the last 90 days). -- **Active Tasks**: Tracks active tasks (performed in the last 90 days). -- **Access Keys**: Tracks the permission usage of access keys for a given user. -- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. - -This article describes how to view usage analytics about active resources. - -## Create a query to view active resources - -1. On the main **Analytics** dashboard, select **Active Resources** from the drop-down list at the top of the screen. - - The dashboard only lists tasks that are active. The following components make up the **Active Resources** dashboard: -1. From the dropdowns, select: - - **Authorization System Type**: The authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - - **Authorization System**: The **List** of accounts and **Folders** you want to include. - - **Tasks Type**: Select **All** tasks, **High Risk Tasks** or, for a list of tasks where users have deleted data, select **Delete Tasks**. - - **Service Resource Type**: The service resource type. - - **Search**: Enter criteria to find specific tasks. - -1. Select **Apply** to display the criteria you've selected. - - Select **Reset Filter** to discard your changes. - - -## View the results of your query - -The **Active Resources** table displays the results of your query: - -- **Resource Name**: Provides the name of the task. - - To view details about the task, select the down arrow. -- **Account**: The name of the account. -- **Resources Type**: The type of resources used, for example, **bucket** or **key**. -- **Tasks**: Displays the number of **Granted** and **Executed** tasks. -- **Number of Users**: The number of users with access and accessed. -- Select the ellipses **(...)** and select **Tags** to add a tag. - -## Add a tag to an active resource - -1. Select the ellipses **(...)** and select **Tags**. -1. From the **Select a Tag** dropdown, select a tag. -1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. -1. In the **Value (Optional)** box, enter a value. -1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. -1. To add the tag to the serverless function, select **Add Tag**. - - -## Apply filters to your query - -There are many filter options within the **Active Resources** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. - -### Apply filters by authorization system - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by authorization system type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by task type - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Task Type**, select the type of user: **All**, **User**, **Role/App/Service a/c**, or **Resource**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by service resource type - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Service Resource Type**, select the type of service resource. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -## Export the results of your query - -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. - - -## Next steps - -- To track active tasks, see [View usage analytics about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To track assigned permissions and usage of users, see [View usage analytics about users](cloudknox-usage-analytics-users.md). -- To track assigned permissions and usage of the group and the group members, see [View usage analytics about groups](cloudknox-usage-analytics-groups.md). -- To track the permission usage of access keys for a given user, see [View usage analytics about access keys](cloudknox-usage-analytics-access-keys.md). -- To track assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](cloudknox-usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-tasks.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-tasks.md deleted file mode 100644 index e0e6679f637f0..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-tasks.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: View analytic information about active tasks in CloudKnox Permissions Management -description: How to view analytic information about active tasks in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information about active tasks - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - -- **Users**: Tracks assigned permissions and usage of various identities. -- **Groups**: Tracks assigned permissions and usage of the group and the group members. -- **Active Resources**: Tracks active resources (used in the last 90 days). -- **Active Tasks**: Tracks active tasks (performed in the last 90 days). -- **Access Keys**: Tracks the permission usage of access keys for a given user. -- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. - -This article describes how to view usage analytics about active tasks. - -## Create a query to view active tasks - -When you select **Active Tasks**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. - -1. On the main **Analytics** dashboard, select **Active Tasks** from the drop-down list at the top of the screen. - - The dashboard only lists tasks that are active. The following components make up the **Active Tasks** dashboard: - - - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - - **Authorization System**: Select from a **List** of accounts and **Folders***. - - **Tasks Type**: Select **All** tasks, **High Risk tasks** or, for a list of tasks where users have deleted data, select **Delete Tasks**. - - **Search**: Enter criteria to find specific tasks. - -1. Select **Apply** to display the criteria you've selected. - - Select **Reset Filter** to discard your changes. - - -## View the results of your query - -The **Active Tasks** table displays the results of your query. - -- **Task Name**: Provides the name of the task. - - To view details about the task, select the down arrow in the table. - - - A **Normal Task** icon displays to the left of the task name if the task is normal (that is, not risky). - - A **Deleted Task** icon displays to the left of the task name if the task involved deleting data. - - A **High-Risk Task** icon displays to the left of the task name if the task is high-risk. - -- **Performed on (resources)**: The number of resources on which the task was used. - -- **Number of Users**: Displays how many users performed tasks. The tasks are organized into the following columns: - - **With Access**: Displays the number of users that have access to the task but haven't accessed it. - - **Accessed**: Displays the number of users that have accessed the task. - - -## Apply filters to your query - -There are many filter options within the **Active Tasks** screen, including **Authorization System**, **User**, and **Task**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. - -### Apply filters by authorization system type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by authorization system - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by task type - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Task Type** dropdown, select the type of tasks: **All**, **High Risk Tasks**, or **Delete Tasks**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -## Export the results of your query - -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. - -## Next steps - -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-groups.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-groups.md deleted file mode 100644 index f53777999454d..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-groups.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: View analytic information about groups in CloudKnox Permissions Management -description: How to view analytic information about groups in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information about groups - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - -- **Users**: Tracks assigned permissions and usage of various identities. -- **Groups**: Tracks assigned permissions and usage of the group and the group members. -- **Active Resources**: Tracks active resources (used in the last 90 days). -- **Active Tasks**: Tracks active tasks (performed in the last 90 days). -- **Access Keys**: Tracks the permission usage of access keys for a given user. -- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. - -This article describes how to view usage analytics about groups. - -## Create a query to view groups - -When you select **Groups**, the **Usage Analytics** dashboard provides a high-level overview of groups. - -1. On the main **Analytics** dashboard, select **Groups** from the drop-down list at the top of the screen. - - The following components make up the **Groups** dashboard: - - - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - - **Authorization System**: Select from a **List** of accounts and **Folders**. - - **Group Type**: Select **All**, **ED**, or **Local**. - - **Group Activity Status**: Select **All**, **Active**, or **Inactive**. - - **Tasks Type**: Select **All**, **High Risk Tasks**, or **Delete Tasks** - - **Search**: Enter group name to find specific group. -1. To display the criteria you've selected, select **Apply**. - - **Reset Filter**: Select to discard your changes. - - -## View the results of your query - -The **Groups** table displays the results of your query: - -- **Group Name**: Provides the name of the group. - - To view details about the group, select the down arrow. -- A **Group Type** icon displays to the left of the group name to describe the type of group (**ED** or **Local**). -- The **Domain/Account** name. -- The **Permission Creep Index (PCI)**: Provides the following information: - - **Index**: A numeric value assigned to the PCI. - - **Since**: How many days the PCI value has been at the displayed level. -- **Tasks**: Displays the number of **Granted** and **Executed** tasks. -- **Resources**: The number of resources used. -- **Users**: The number of users who accessed the group. -- Select the ellipses **(...)** and select **Tags** to add a tag. - -## Add a tag to a group - -1. Select the ellipses **(...)** and select **Tags**. -1. From the **Select a Tag** dropdown, select a tag. -1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. -1. In the **Value (Optional)** box, enter a value. -1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. -1. To add the tag to the serverless function, select **Add Tag**. - -## View detailed information about a group - -1. Select the down arrow to the left of the **Group Name**. - - The list of **Tasks** organized by **Unused** and **Used** displays. - -1. Select the arrow to the left of the group name to view details about the task. -1. Select **Information** (**i**) to view when the task was last used. -1. From the **Tasks** dropdown, select **All Tasks**, **High Risk Tasks**, and **Delete Tasks**. -1. The pane on the right displays a list of **Users**, **Policies** for **AWS** and **Roles** for **GCP or AZURE**, and **Tags**. - -## Apply filters to your query - -There are many filter options within the **Groups** screen, including filters by **Authorization System Type**, **Authorization System**, **Group Type**, **Group Activity Status**, and **Tasks Type**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. - -### Apply filters by authorization system type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by authorization system - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by group type - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Group Type** dropdown, select the type of user: **All**, **ED**, or **Local**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by group activity status - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Group Activity Status** dropdown, select the type of user: **All**, **Active**, or **Inactive**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by tasks type - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Tasks Type** dropdown, select the type of user: **All**, **High Risk Tasks**, or **Delete Tasks**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -## Export the results of your query - -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. -- To view a list of members of the groups in your query, select **Export**, and then select **Memberships**. - - - -## Next steps - -- To view active tasks, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-home.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-home.md deleted file mode 100644 index 055a106e941f0..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-home.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: View analytic information with the Analytics dashboard in CloudKnox Permissions Management -description: How to use the Analytics dashboard in CloudKnox Permissions Management to view details about users, groups, active resources, active tasks, access keys, and serverless functions. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information with the Analytics dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article provides a brief overview of the Analytics dashboard in CloudKnox Permissions Management (CloudKnox), and the type of analytic information it provides for Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - -## Display the Analytics dashboard - -- From the CloudKnox home page, select the **Analytics** tab. - - The **Analytics** dashboard displays detailed information about: - - - **Users**: Tracks assigned permissions and usage by users. For more information, see [View analytic information about users](cloudknox-usage-analytics-users.md). - - - **Groups**: Tracks assigned permissions and usage of the group and the group members. For more information, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). - - - **Active Resources**: Tracks resources that have been used in the last 90 days. For more information, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). - - - **Active Tasks**: Tracks tasks that have been performed in the last 90 days. For more information, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). - - - **Access Keys**: Tracks the permission usage of access keys for a given user. For more information, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). - - - **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions for AWS only. For more information, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). - - System administrators can use this information to make decisions about granting permissions and reducing risk on unused permissions. - - - -## Next steps - -- To view active tasks, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-serverless-functions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-serverless-functions.md deleted file mode 100644 index 976ff2b442a0b..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-serverless-functions.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: View analytic information about serverless functions in CloudKnox Permissions Management -description: How to view analytic information about serverless functions in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information about serverless functions - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - -- **Users**: Tracks assigned permissions and usage of various identities. -- **Groups**: Tracks assigned permissions and usage of the group and the group members. -- **Active Resources**: Tracks active resources (used in the last 90 days). -- **Active Tasks**: Tracks active tasks (performed in the last 90 days). -- **Access Keys**: Tracks the permission usage of access keys for a given user. -- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. - -This article describes how to view usage analytics about serverless functions. - -## Create a query to view serverless functions - -When you select **Serverless Functions**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. - -1. On the main **Analytics** dashboard, select **Serverless Functions** from the dropdown list at the top of the screen. - - The following components make up the **Serverless Functions** dashboard: - - - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - - **Authorization System**: Select from a **List** of accounts and **Folders**. - - **Search**: Enter criteria to find specific tasks. -1. Select **Apply** to display the criteria you've selected. - - Select **Reset Filter** to discard your changes. - - -## View the results of your query - -The **Serverless Functions** table displays the results of your query. - -- **Function Name**: Provides the name of the serverless function. - - To view details about a serverless function, select the down arrow to the left of the function name. -- A **Function Type** icon displays to the left of the function name to describe the type of serverless function, for example **Lambda function**. -- The **Permission Creep Index (PCI)**: Provides the following information: - - **Index**: A numeric value assigned to the PCI. - - **Since**: How many days the PCI value has been at the displayed level. -- **Tasks**: Displays the number of **Granted** and **Executed** tasks. -- **Resources**: The number of resources used. -- **Last Activity On**: The date the function was last accessed. -- Select the ellipses **(...)**, and then select **Tags** to add a tag. - -## Add a tag to a serverless function - -1. Select the ellipses **(...)** and select **Tags**. -1. From the **Select a Tag** dropdown, select a tag. -1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. -1. In the **Value (Optional)** box, enter a value. -1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. -1. To add the tag to the serverless function, select **Add Tag**. - -## View detailed information about a serverless function - -1. Select the down arrow to the left of the function name to display the following: - - - A list of **Tasks** organized by **Used** and **Unused**. - - **Versions**, if a version is available. - -1. Select the arrow to the left of the task name to view details about the task. -1. Select **Information** (**i**) to view when the task was last used. -1. From the **Tasks** dropdown, select **All Tasks**, **High Risk Tasks**, and **Delete Tasks**. - - -## Apply filters to your query - -You can filter the **Serverless Functions** results by **Authorization System Type** and **Authorization System**. - -### Apply filters by authorization system type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by authorization system - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - - -## Next steps - -- To view active tasks, see [View usage analytics about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-users.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-users.md deleted file mode 100644 index 43aea761580c9..0000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-users.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: View analytic information about users in CloudKnox Permissions Management -description: How to view analytic information about users in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View analytic information about users - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - -- **Users**: Tracks assigned permissions and usage of various identities. -- **Groups**: Tracks assigned permissions and usage of the group and the group members. -- **Active Resources**: Tracks active resources (used in the last 90 days). -- **Active Tasks**: Tracks active tasks (performed in the last 90 days). -- **Access Keys**: Tracks the permission usage of access keys for a given user. -- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. - -This article describes how to view usage analytics about users. - -## Create a query to view users - -When you select **Users**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. - -1. On the main **Analytics** dashboard, select **Users** from the drop-down list at the top of the screen. - - The following components make up the **Users** dashboard: - - - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). - - **Authorization System**: Select from a **List** of accounts and **Folders***. - - **Identity Type**: Select **All** identity types, **User**, **Role/App/Service a/c** or **Resource**. - - **Search**: Enter criteria to find specific tasks. -1. Select **Apply** to display the criteria you've selected. - - Select **Reset filter** to discard your changes. - - -## View the results of your query - -The **Identities** table displays the results of your query. - -- **Name**: Provides the name of the group. - - To view details about the group, select the down arrow. -- The **Domain/Account** name. -- The **Permission Creep Index (PCI)**: Provides the following information: - - **Index**: A numeric value assigned to the PCI. - - **Since**: How many days the PCI value has been at the displayed level. -- **Tasks**: Displays the number of **Granted** and **Executed** tasks. -- **Resources**: The number of resources used. -- **User Groups**: The number of users who accessed the group. -- **Last Activity On**: The date the function was last accessed. -- The ellipses **(...)**: Select **Tags** to add a tag. - - If you're using AWS, another selection is available from the ellipses menu: **Auto Remediate**. You can use this option to remediate your results automatically. - -## Add a tag to a user - -1. Select the ellipses **(...)** and select **Tags**. -1. From the **Select a Tag** dropdown, select a tag. -1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. -1. In the **Value (Optional)** box, enter a value. -1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. -1. To add the tag to the serverless function, select **Add Tag**. - -## Set the auto-remediate option (AWS only) - -- Select the ellipses **(...)** and select **Auto Remediate**. - - A message displays to confirm that your remediation settings are automatically updated. - -## Apply filters to your query - -There are many filter options within the **Users** screen, including filters by **Authorization System**, **Identity Type**, and **Identity State**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. - -### Apply filters by authorization system type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by authorization system - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset filter** to discard your changes. - -### Apply filters by identity type - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Identity Type**, select the type of user: **All**, **User**, **Role/App/Service a/c**, or **Resource**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - -### Apply filters by identity subtype - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Identity Subtype**, select the type of user: **All**, **ED**, **Local**, or **Cross Account**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset filter** to discard your changes. - -### Apply filters by identity state - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Identity State**, select the type of user: **All**, **Active**, or **Inactive**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by identity filters - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Identity Type**, select: **Risky** or **Incl. in PCI Calculation Only**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -### Apply filters by task type - -You can filter user details by type of user, user role, app, or service used, or by resource. - -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. -1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. -1. From the **Task Type**, select the type of user: **All** or **High Risk Tasks**. -1. Select **Apply** to run your query and display the information you selected. - - Select **Reset Filter** to discard your changes. - - -## Export the results of your query - -- To export a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. -- To export the data in a detailed comma-separated values (CSV) file format, select **Export** and then select **CSV (Detailed)**. -- To export a report of user permissions, select **Export** and then select **Permissions**. - - -## Next steps - -- To view active tasks, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/faqs.md b/articles/active-directory/cloud-infrastructure-entitlement-management/faqs.md new file mode 100644 index 0000000000000..d97c4fafe1cec --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/faqs.md @@ -0,0 +1,158 @@ +--- +title: Frequently asked questions (FAQs) about Permissions Management +description: Frequently asked questions (FAQs) about Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: faq +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Frequently asked questions (FAQs) + +> [!IMPORTANT] +> Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + + +This article answers frequently asked questions (FAQs) about Permissions Management. + +## What's Permissions Management? + +Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). Permissions Management detects, automatically right-sizes, and continuously monitors unused and excessive permissions. It deepens the Zero Trust security strategy by augmenting the least privilege access principle. + + +## What are the prerequisites to use Permissions Management? + +Permissions Management supports data collection from AWS, GCP, and/or Microsoft Azure. For data collection and analysis, customers are required to have an Azure Active Directory (Azure AD) account to use Permissions Management. + +## Can a customer use Permissions Management if they have other identities with access to their IaaS platform that aren't yet in Azure AD (for example, if part of their business has Okta or AWS Identity & Access Management (IAM))? + +Yes, a customer can detect, mitigate, and monitor the risk of 'backdoor' accounts that are local to AWS IAM, GCP, or from other identity providers such as Okta or AWS IAM. + +## Where can customers access Permissions Management? + +Customers can access the Permissions Management interface with a link from the Azure AD extension in the Azure portal. + +## Can non-cloud customers use Permissions Management on-premises? + +No, Permissions Management is a hosted cloud offering. + +## Can non-Azure customers use Permissions Management? + +Yes, non-Azure customers can use our solution. Permissions Management is a multi-cloud solution so even customers who have no subscription to Azure can benefit from it. + +## Is Permissions Management available for tenants hosted in the European Union (EU)? + +No, the Permissions Management Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + +## If I'm already using Azure AD Privileged Identity Management (PIM) for Azure, what value does Permissions Management provide? + +Permissions Management complements Azure AD PIM. Azure AD PIM provides just-in-time access for admin roles in Azure (as well as Microsoft Online Services and apps that use groups), while Permissions Management allows multi-cloud discovery, remediation, and monitoring of privileged access across Azure, AWS, and GCP. + +## What languages does Permissions Management support? + +Permissions Management currently supports English. + +## What public cloud infrastructures are supported by Permissions Management? + +Permissions Management currently supports the three major public clouds: Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. + +## Does Permissions Management support hybrid environments? + +Permissions Management currently doesn't support hybrid environments. + +## What types of identities are supported by Permissions Management? + +Permissions Management supports user identities (for example, employees, customers, external partners) and workload identities (for example, virtual machines, containers, web apps, serverless functions). + + + +## Is Permissions Management available in Government Cloud? + +No, Permissions Management is currently not available in Government clouds. + +## Is Permissions Management available for sovereign clouds? + +No, Permissions Management is currently not available in sovereign Clouds. + +## How does Permissions Management collect insights about permissions usage? + +Permissions Management has a data collector that collects access permissions assigned to various identities, activity logs, and resources metadata. This gathers full visibility into permissions granted to all identities to access the resources and details on usage of granted permissions. + +## How does Permissions Management evaluate cloud permissions risk? + +Permissions Management offers granular visibility into all identities and their permissions granted versus used, across cloud infrastructures to uncover any action performed by any identity on any resource. This isn't limited to just user identities, but also workload identities such as virtual machines, access keys, containers, and scripts. The dashboard gives an overview of permission profile to locate the riskiest identities and resources. + +## What is the Permissions Creep Index? + +The Permissions Creep Index (PCI) is a quantitative measure of risk associated with an identity or role determined by comparing permissions granted versus permissions exercised. It allows users to instantly evaluate the level of risk associated with the number of unused or over-provisioned permissions across identities and resources. It measures how much damage identities can cause based on the permissions they have. + +## How can customers use Permissions Management to delete unused or excessive permissions? + +Permissions Management allows users to right-size excessive permissions and automate least privilege policy enforcement with just a few clicks. The solution continuously analyzes historical permission usage data for each identity and gives customers the ability to right-size permissions of that identity to only the permissions that are being used for day-to-day operations. All unused and other risky permissions can be automatically removed. + +## How can customers grant permissions on-demand with Permissions Management? + +For any break-glass or one-off scenarios where an identity needs to perform a specific set of actions on a set of specific resources, the identity can request those permissions on-demand for a limited period with a self-service workflow. Customers can either use the built-in workflow engine or their IT service management (ITSM) tool. The user experience is the same for any identity type, identity source (local, enterprise directory, or federated) and cloud. + +## What is the difference between permissions on-demand and just-in-time access? + +Just-in-time (JIT) access is a method used to enforce the principle of least privilege to ensure identities are given the minimum level of permissions to perform the task at hand. Permissions on-demand are a type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. + +## How can customers monitor permissions usage with Permissions Management? + +Customers only need to track the evolution of their Permission Creep Index to monitor permissions usage. They can do this in the "Analytics" tab in their Permissions Management dashboard where they can see how the PCI of each identity or resource is evolving over time. + +## Can customers generate permissions usage reports? + +Yes, Permissions Management has various types of system report available that capture specific data sets. These reports allow customers to: +- Make timely decisions. +- Analyze usage trends and system/user performance. +- Identify high-risk areas. + +For information about permissions usage reports, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). + +## Does Permissions Management integrate with third-party ITSM (Information Technology Security Management) tools? + +Permissions Management integrates with ServiceNow. + +## How is Permissions Management being deployed? + +Customers with Global Admin role have first to onboard Permissions Management on their Azure AD tenant, and then onboard their AWS accounts, GCP projects, and Azure subscriptions. More details about onboarding can be found in our product documentation. + +## How long does it take to deploy Permissions Management? + +It depends on each customer and how many AWS accounts, GCP projects, and Azure subscriptions they have. + +## Once Permissions Management is deployed, how fast can I get permissions insights? + +Once fully onboarded with data collection set up, customers can access permissions usage insights within hours. Our machine-learning engine refreshes the Permission Creep Index every hour so that customers can start their risk assessment right away. + +## Is Permissions Management collecting and storing sensitive personal data? + +No, Permissions Management doesn't have access to sensitive personal data. + +## Where can I find more information about Permissions Management? + +You can read our blog and visit our web page. You can also get in touch with your Microsoft point of contact to schedule a demo. + +## Resources + +- [Public Preview announcement blog](https://www.aka.ms/CloudKnox-Public-Preview-Blog) +- [Permissions Management web page](https://microsoft.com/security/business/identity-access-management/permissions-management) + + +## Next steps + +- For an overview of Permissions Management, see [What's Permissions Management Permissions Management?](overview.md). +- For information on how to onboard Permissions Management in your organization, see [Enable Permissions Management in your organization](onboard-enable-tenant.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-add-remove-role-task.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-add-remove-role-task.md new file mode 100644 index 0000000000000..d07250a8bd6e1 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-add-remove-role-task.md @@ -0,0 +1,118 @@ +--- +title: Add and remove roles and tasks for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management +description: How to attach and detach permissions for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities + + +> [!IMPORTANT] +> Microsoft Entra Permissions Management (Entra) is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities using the **Remediation** dashboard. + +> [!NOTE] +> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +## View permissions + +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP**. +1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. +1. Select **Apply**. + Entra displays a list of groups, users, and service accounts that match your criteria. +1. In **Enter a username**, enter or select a user. +1. In **Enter a Group Name**, enter or select a group, then select **Apply**. +1. Make a selection from the results list. + + The table displays the **Username** **Domain/Account**, **Source**, **Resource** and **Current Role**. + + +## Add a role + +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To attach a role, select **Add role**. +1. In the **Add Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. +1. When you have finished adding roles, select **Submit**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Remove a role + +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To remove a role, select **Remove Role**. +1. In the **Remove Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. +1. When you have finished selecting roles, select **Submit**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Add a task + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To attach a role, select **Add Tasks**. +1. In the **Add Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. +1. When you have finished adding tasks, select **Submit**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Remove a task + +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To remove a task, select **Remove Tasks**. +1. In the **Remove Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. +1. When you have finished selecting tasks, select **Submit**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Next steps + + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-attach-detach-permissions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-attach-detach-permissions.md new file mode 100644 index 0000000000000..fc27f20740905 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-attach-detach-permissions.md @@ -0,0 +1,83 @@ +--- +title: Attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in Permissions Management +description: How to attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Attach and detach policies for Amazon Web Services (AWS) identities + + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities using the **Remediation** dashboard. + +> [!NOTE] +> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +## View permissions + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **AWS**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **Role**. +1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. +1. Select **Apply**. + Permissions Management displays a list of users, roles, or groups that match your criteria. +1. In **Enter a username**, enter or select a user. +1. In **Enter a group name**, enter or select a group, then select **Apply**. +1. Make a selection from the results list. + + The table displays the related **Username** **Domain/Account**, **Source** and **Policy Name**. + + +## Attach policies + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **AWS**. +1. In **Enter a username**, enter or select a user. +1. In **Enter a Group Name**, enter or select a group, then select **Apply**. +1. Make a selection from the results list. +1. To attach a policy, select **Attach Policies**. +1. In the **Attach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. +1. When you have finished adding policies, select **Submit**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Detach policies + +1. On the Permissions Management Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **AWS**. +1. In **Enter a username**, enter or select a user. +1. In **Enter a Group Name**, enter or select a group, then select **Apply**. +1. Make a selection from the results list. +1. To remove a policy, select **Detach Policies**. +1. In the **Detach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. +1. When you have finished selecting policies, select **Submit**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Next steps + + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-audit-trail-results.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-audit-trail-results.md new file mode 100644 index 0000000000000..2f94f20e97959 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-audit-trail-results.md @@ -0,0 +1,65 @@ +--- +title: Generate an on-demand report from a query in the Audit dashboard in Permissions Management +description: How to generate an on-demand report from a query in the **Audit** dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Generate an on-demand report from a query + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can generate an on-demand report from a query in the **Audit** dashboard in Permissions Management. You can: + +- Run a report on-demand. +- Schedule and run a report as often as you want. +- Share a report with other members of your team and management. + +## Generate a custom report on-demand + +1. In the Permissions Management home page, select the **Audit** tab. + + Permissions Management displays the query options available to you. +1. In the **Audit** dashboard, select **Search** to run the query. +1. Select **Export**. + + Permissions Management generates the report and exports it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. + + + + +## Next steps + +- For information on how to view how users access information, see [Use queries to see how users access information](ui-audit-trail.md). +- For information on how to filter and view user activity, see [Filter and query user activity](product-audit-trail.md). +- For information on how to create a query,see [Create a custom query](how-to-create-custom-queries.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-clone-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-clone-role-policy.md new file mode 100644 index 0000000000000..9ae6da95198fe --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-clone-role-policy.md @@ -0,0 +1,55 @@ +--- +title: Clone a role/policy in the Remediation dashboard in Permissions Management +description: How to clone a role/policy in the Just Enough Permissions (JEP) Controller. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Clone a role/policy in the Remediation dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can use the **Remediation** dashboard in Permissions Management to clone roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. + +> [!NOTE] +> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +> [!NOTE] +> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. + +## Clone a role/policy + +1. On the Permissions Management Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. Select the role/policy you want to clone, and from the **Actions** column, select **Clone**. +1. **(AWS Only)** In the **Clone** box, the **Clone Resources** and **Clone Conditions** checkboxes are automatically selected. + Deselect the boxes if the resources and conditions are different from what is displayed. +1. Enter a name for each authorization system that was selected in the **Policy Name** boxes, and then select **Next**. + +1. If the data collector hasn't been given controller privileges, the following message displays: **Only online/controller-enabled authorization systems can be submitted for cloning.** + + To clone this role manually, download the script and JSON file. + +1. Select **Submit**. +1. Refresh the **Role/Policies** tab to see the role/policy you cloned. + +## Next steps + + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-alert-trigger.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-alert-trigger.md new file mode 100644 index 0000000000000..aa7340f908e9b --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-alert-trigger.md @@ -0,0 +1,113 @@ +--- +title: Create and view activity alerts and alert triggers in Permissions Management +description: How to create and view activity alerts and alert triggers in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create and view activity alerts and alert triggers + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can create and view activity alerts and alert triggers in Permissions Management. + +## Create an activity alert trigger + +1. In the Permissions Management home page, select **Activity Triggers** (the bell icon). +1. In the **Activity** tab, select **Create Activity Trigger**. +1. In the **Alert Name** box, enter a name for your alert. +1. In **Authorization System Type**, select your authorization system: Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. In **Authorization System**, select **Is** or **In**, and then select one or more accounts and folders. +1. From the **Select a Type** dropdown, select: **Access Key ID**, **Identity Tag Key**, **Identity Tag Key Value**, **Resource Name**, **Resource Tag Key**, **Resource Tag Key Value**, **Role Name**, **Role Session Name**, **State**, **Task Name**, or **Username**. +1. From the **Operator** dropdown, select an option: + + - **Is**/**Is Not**: Select in the value field to view a list of all available values. You can either select or enter the required value. + - **Contains**/**Not Contains**: Enter any text that the query parameter should or shouldn't contain, for example *Permissions Management*. + - **In**/**Not In**: Select in the value field to view list of all available values. Select the required multiple values. + +1. To add another parameter, select the plus sign **(+)**, then select an operator, and then enter a value. + + To remove a parameter, select the minus sign **(-)**. +1. To add another activity type, select **Add**, and then enter your parameters. +1. To save your alert, select **Save**. + + A message displays to confirm your activity trigger has been created. + + The **Triggers** table in the **Alert Triggers** subtab displays your alert trigger. + +## View an activity alert + +1. In the Permissions Management home page, select **Activity Triggers** (the bell icon). +1. In the **Activity** tab, select the **Alerts** subtab. +1. From the **Alert Name** dropdown, select an alert. +1. From the **Date** dropdown, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**. + + If you select **Custom Range**, select date and time settings, and then select **Apply**. +1. To view the alert, select **Apply** + + The **Alerts** table displays information about your alert. + + + +## View activity alert triggers + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. In the **Activity** tab, select the **Alert Triggers** subtab. +1. From the **Status** dropdown, select **All**, **Activated** or **Deactivated**, then select **Apply**. + + The **Triggers** table displays the following information: + + - **Alerts**: The name of the alert trigger. + - **# of users subscribed**: The number of users who have subscribed to a specific alert trigger. + + - Select a number in this column to view information about the user. + + - **Created By**: The email address of the user who created the alert trigger. + - **Modified By**: The email address of the user who last modified the alert trigger. + - **Last Updated**: The date and time the alert trigger was last updated. + - **Subscription**: A switch that displays if the alert is **On** or **Off**. + + - If the column displays **Off**, the current user isn't subscribed to that alert. Switch the toggle to **On** to subscribe to the alert. + - The user who creates an alert trigger is automatically subscribed to the alert, and will receive emails about the alert. + +1. To see only activated or only deactivated triggers, from the **Status** dropdown, select **Activated** or **Deactivated**, and then select **Apply**. + +1. To view other options available to you, select the ellipses (**...**), and then select from the available options. + + If the **Subscription** is **On**, the following options are available: + + - **Edit**: Enables you to modify alert parameters + + > [!NOTE] + > Only the user who created the alert can perform the following actions: edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. + + - **Duplicate**: Create a duplicate of the alert called "**Copy of XXX**". + - **Rename**: Enter the new name of the query, and then select **Save.** + - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. + - **Activate**: Activate the alert trigger and start sending emails to subscribed users. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. + - **Delete**: Delete the alert. + + If the **Subscription** is **Off**, the following options are available: + - **View**: View details of the alert trigger. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. + - **Duplicate**: Create a duplicate copy of the selected alert trigger. + + + + +## Next steps + +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-approve-privilege-request.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-approve-privilege-request.md new file mode 100644 index 0000000000000..9b71b530ad173 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-approve-privilege-request.md @@ -0,0 +1,120 @@ +--- +title: Create or approve a request for permissions in the Remediation dashboard in Permissions Management +description: How to create or approve a request for permissions in the Remediation dashboard. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create or approve a request for permissions + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to create or approve a request for permissions in the **Remediation** dashboard in Permissions Management. You can create and approve requests for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. + +The **Remediation** dashboard has two privilege-on-demand (POD) workflows you can use: +- **New Request**: The workflow used by a user to create a request for permissions for a specified duration. +- **Approver**: The workflow used by an approver to review and approve or reject a user's request for permissions. + + +> [!NOTE] +> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +## Create a request for permissions + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **My Requests** subtab. + + The **My Requests** subtab displays the following options: + - **Pending**: A list of requests you've made but haven't yet been reviewed. + - **Approved**: A list of requests that have been reviewed and approved by the approver. These requests have either already been activated or are in the process of being activated. + - **Processed**: A summary of the requests you've created that have been approved (**Done**), **Rejected**, and requests that have been **Canceled**. + +1. To create a request for permissions, select **New Request**. +1. In the **Roles/Tasks** page: + 1. From the **Authorization System Type** dropdown, select the authorization system type you want to access: **AWS**, **Azure** or **GCP**. + 1. From the **Authorization System** dropdown, select the accounts you want to access. + 1. From the **Identity** dropdown, select the identity on whose behalf you're requesting access. + + - If the identity you select is a Security Assertions Markup Language (SAML) user, and since a SAML user accesses the system through assumption of a role, select the user's role in **Role**. + + - If the identity you select is a local user, to select the policies you want: + 1. Select **Request Policy(s)**. + 1. In **Available Policies**, select the policies you want. + 1. To select a specific policy, select the plus sign, and then find and select the policy you want. + + The policies you've selected appear in the **Selected policies** box. + + - If the identity you select is a local user, to select the tasks you want: + 1. Select **Request Task(s)**. + 1. In **Available Tasks**, select the tasks you want. + 1. To select a specific task, select the plus sign, and then select the task you want. + + The tasks you've selected appear in the **Selected Tasks** box. + + If the user already has existing policies, they're displayed in **Existing Policies**. +1. Select **Next**. + +1. If you selected **AWS**, the **Scope** page appears. + + 1. In **Select Scope**, select: + - **All Resources** + - **Specific Resources**, and then select the resources you want. + - **No Resources** + 1. In **Request Conditions**: + 1. Select **JSON** to add a JSON block of code. + 1. Select **Done** to accept the code you've entered, or **Clear** to delete what you've entered and start again. + 1. In **Effect**, select **Allow** or **Deny.** + 1. Select **Next**. + +1. The **Confirmation** page appears. +1. In **Request Summary**, enter a summary for your request. +1. Optional: In **Note**, enter a note for the approver. +1. In **Schedule**, select when (how quickly) you want your request to be processed: + - **ASAP** + - **Once** + - In **Create Schedule**, select the **Frequency**, **Date**, **Time**, and **For** the required duration, then select **Schedule**. + - **Daily** + - **Weekly** + - **Monthly** +1. Select **Submit**. + + The following message appears: **Your Request Has Been Successfully Submitted.** + + The request you submitted is now listed in **Pending Requests**. + +## Approve or reject a request for permissions + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **My requests** subtab. +1. To view a list of requests that haven't yet been reviewed, select **Pending Requests**. +1. In the **Request Summary** list, select the ellipses **(…)** menu on the right of a request, and then select: + + - **Details** to view the details of the request. + - **Approve** to approve the request. + - **Reject** to reject the request. + +1. (Optional) add a note to the requestor, and then select **Confirm.** + + The **Approved** subtab displays a list of requests that have been reviewed and approved by the approver. These requests have either already been activated or are in the process of being activated. + The **Processed** subtab displays a summary of the requests that have been approved or rejected, and requests that have been canceled. + + +## Next steps + + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Add and remove roles and tasks for Azure and GCP identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-custom-queries.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-custom-queries.md new file mode 100644 index 0000000000000..fa299568213f7 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-custom-queries.md @@ -0,0 +1,121 @@ +--- +title: Create a custom query in Permissions Management +description: How to create a custom query in the Audit dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create a custom query + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can use the **Audit** dashboard in Permissions Management to create custom queries that you can modify, save, and run as often as you want. + +## Open the Audit dashboard + +- In the Permissions Management home page, select the **Audit** tab. + + Permissions Management displays the query options available to you. + +## Create a custom query + +1. In the **Audit** dashboard, in the **New Query** subtab, select **Authorization System Type**, and then select the authorization systems you want to search: Amazon Web Services (**AWS**), Microsoft **Azure**, Google Cloud Platform (**GCP**), or Platform (**Platform**). +1. Select the authorization systems you want to search from the **List** and **Folders** box, and then select **Apply**. + +1. In the **New Query** box, enter your query parameters, and then select **Add**. + For example, to query by a date, select **Date** in the first box. In the second and third boxes, select the down arrow, and then select one of the date-related options. + +1. To add parameters, select **Add**, select the down arrow in the first box to display a dropdown of available selections. Then select the parameter you want. +1. To add more parameters to the same query, select **Add** (the plus sign), and from the first box, select **And** or **Or**. + + Repeat this step for the second and third box to complete entering the parameters. +1. To change your query as you're creating it, select **Edit** (the pencil icon), and then change the query parameters. +1. To change the parameter options, select the down arrow in each box to display a dropdown of available selections. Then select the option you want. +1. To discard your selections, select **Reset Query** for the parameter you want to change, and then make your selections again. +1. When you're ready to run your query, select **Search**. +1. To save the query, select **Save**. + + Permissions Management saves the query and adds it to the **Saved Queries** list. + +## Save the query under a new name + +1. In the **Audit** dashboard, select the ellipses menu **(…)** on the far right and select **Save As**. +2. Enter a new name for the query, and then select **Save**. + + Permissions Management saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. + +## View a saved query + +1. In the **Audit** dashboard, select the down arrow next to **Saved Queries**. + + A list of saved queries appears. +2. Select the query you want to open. +3. To open the query with the authorization systems you saved with the query, select **Load with the saved authorization systems**. +4. To open the query with the authorization systems you have currently selected (which may be different from the ones you originally saved), select **Load with the currently selected authorization systems**. +5. Select **Load Queries**. + + Permissions Management displays details of the query in the **Activity** table. Select a query to see its details: + + - The **Identity Details**. + - The **Domain** name. + - The **Resource Name** and **Resource Type**. + - The **Task Name**. + - The **Date**. + - The **IP Address**. + - The **Authorization System**. + +## View a raw events summary + +1. In the **Audit** dashboard, select **View** (the eye icon) to open the **Raw Events Summary** box. + + The **Raw Events Summary** box displays **Username or Role Session Name**, the **Task name**, and the script for your query. +1. Select **Copy** to copy the script. +1. Select **X** to close the **Raw events summary** box. + + +## Run a saved query + +1. In the **Audit** dashboard, select the query you want to run. + + Permissions Management displays the results of the query in the **Activity** table. + +## Delete a query + +1. In the **Audit** dashboard, load the query you want to delete. +2. Select **Delete**. + + Permissions Management deletes the query. Deleted queries don't display in the **Saved Queries** list. + +## Rename a query + +1. In the **Audit** dashboard, load the query you want to rename. +2. Select the ellipses menu **(…)** on the far right, and select **Rename**. +3. Enter a new name for the query, and then select **Save**. + + Permissions Management saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. + +## Duplicate a query + +1. In the **Audit** dashboard, load the query you want to duplicate. +2. Select the ellipses menu **(…)** on the far right, and then select **Duplicate**. + + Permissions Management creates a copy of the query. Both the copy of the query and the original query display in the **Saved Queries** list. + + You can rename the original or copy of the query, change it, and save it without changing the other query. + + + +## Next steps + +- For information on how to view how users access information, see [Use queries to see how users access information](ui-audit-trail.md). +- For information on how to filter and view user activity, see [Filter and query user activity](product-audit-trail.md). +- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](how-to-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-group-based-permissions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-group-based-permissions.md new file mode 100644 index 0000000000000..51cc754dc8906 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-group-based-permissions.md @@ -0,0 +1,56 @@ +--- +title: Select group-based permissions settings in Permissions Management with the User management dashboard +description: How to select group-based permissions settings in Permissions Management with the User management dashboard. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Select group-based permissions settings + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can create and manage group-based permissions in Permissions Management with the User management dashboard. + +[!NOTE] The Permissions Management Administrator for all authorization systems will be able to create the new group based permissions. + +## Select administrative permissions settings for a group + +1. To display the **User Management** dashboard, select **User** (your initials) in the upper right of the screen, and then select **User Management**. +1. Select the **Groups** tab, and then press the **Create Permission** button in the upper right of the table. +1. In the **Set Group Permission** box, begin typing the name of an **Azure Active Directory Security Group** in your tenant. + +1. Select the permission setting you want: +2. + - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. + - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. + - **Custom** allows you to set **View**, **Control**, and **Approve** permissions for the authorization system types that you select. +1. Select **Next** + +1. If you selected **Admin for all Authorization System Types** + - Select Identities for each Authorization System that you would like members of this group to Request on. + +1. If you selected **Admin for selected Authorization System Types** + - Select **Viewer**, **Controller**, or **Approver** for the **Authorization System Types** you want. + - Select **Next** and then select Identities for each Authorization System that you would like members of this group to Request on. + +1. If you select **Custom**, select the **Authorization System Types** you want. + - Select **Viewer**, **Controller**, or **Approver** for the **Authorization Systems** you want. + - Select **Next** and then select Identities for each Authorization System that you would like members of this group to Request on. + +1. Select **Save**, The following message appears: **New Group Has been Created Successfully.** +1. To see the group you created in the **Groups** table, refresh the page. + +## Next steps + +- For information about how to manage user information, see [Manage users and groups with the User management dashboard](ui-user-management.md). +- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](ui-tasks.md). +- For information about how to view personal and organization information, see [View personal and organization information](product-account-settings.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-role-policy.md new file mode 100644 index 0000000000000..cd2a8f0ab8be8 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-role-policy.md @@ -0,0 +1,171 @@ +--- +title: Create a role/policy in the Remediation dashboard in Permissions Management +description: How to create a role/policy in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create a role/policy in the Remediation dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can use the **Remediation** dashboard in Permissions Management to create roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. + +> [!NOTE] +> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +> [!NOTE] +> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. + +## Create a policy for AWS + +1. On the Entra home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. +1. Select **Create Policy**. +1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. + - To change the settings, make a selection from the dropdown. +1. Under **How Would You Like To Create The Policy**, select the required option: + + - **Activity of User(s)**: Allows you to create a policy based on user activity. + - **Activity of Group(s)**: Allows you to create a policy based on the aggregated activity of all the users belonging to the group(s). + - **Activity of Resource(s)**: Allows you to create a policy based on the activity of a resource, for example, an EC2 instance. + - **Activity of Role**: Allows you to create a policy based on the aggregated activity of all the users that assumed the role. + - **Activity of Tag(s)**: Allows you to create a policy based on the aggregated activity of all the tags. + - **Activity of Lambda Function**: Allows you to create a new policy based on the Lambda function. + - **From Existing Policy**: Allows you to create a new policy based on an existing policy. + - **New Policy**: Allows you to create a new policy from scratch. +1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. +1. Depending on your preference, select or deselect **Include Access Advisor data.** +1. In **Settings**, from the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. + +1. On the **Tasks** page, from the **Available** column, select the plus sign **(+)** to move the task into the **Selected** column. + - To add a whole category, select a category. + - To add individual items from a category, select the down arrow on the left of the category name, and then select individual items. +1. In **Resources**, select **All Resources** or **Specific Resources**. + + If you select **Specific Resources**, a list of available resources appears. Find the resources you want to add, and then select **Add**. +1. In **Request Conditions**, select **JSON** . +1. In **Effect**, select **Allow** or **Deny**, and then select **Next**. +1. In **Policy name:**, enter a name for your policy. +1. To add another statement to your policy, select **Add Statement**, and then, from the list of **Statements**, select a statement. +1. Review your **Task**, **Resources**, **Request Conditions**, and **Effect** settings, and then select **Next**. + + +1. On the **Preview** page, review the script to confirm it's what you want. +1. If your controller isn't enabled, select **Download JSON** or **Download Script** to download the code and run it yourself. + + If your controller is enabled, skip this step. +1. Select **Split Policy**, and then select **Submit**. + + A message confirms that your policy has been submitted for creation + +1. The [**Permissions Management Tasks**](ui-tasks.md) pane appears on the right. + - The **Active** tab displays a list of the policies Permissions Management is currently processing. + - The **Completed** tab displays a list of the policies Permissions Management has completed. +1. Refresh the **Role/Policies** tab to see the policy you created. + + + +## Create a role for Azure + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. +1. Select **Create Role**. +1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. + - To change the settings, select the box and make a selection from the dropdown. +1. Under **How Would You Like To Create The Role?**, select the required option: + + - **Activity of User(s)**: Allows you to create a role based on user activity. + - **Activity of Group(s)**: Allows you to create a role based on the aggregated activity of all the users belonging to the group(s). + - **Activity of App(s)**: Allows you to create a role based on the aggregated activity of all apps. + - **From Existing Role**: Allows you to create a new role based on an existing role. + - **New Role**: Allows you to create a new role from scratch. + +1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. +1. Depending on your preference: + - Select or deselect **Ignore Non-Microsoft Read Actions**. + - Select or deselect **Include Read-Only Tasks**. +1. In **Settings**, from the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. + +1. On the **Tasks** page, in **Role name:**, enter a name for your role. +1. From the **Available** column, select the plus sign **(+)** to move the task into the **Selected** column. + - To add a whole category, select a category. + - To add individual items from a category, select the down arrow on the left of the category name, and then select individual items. +1. Select **Next**. + +1. On the **Preview** page, review: + - The list of selected **Actions** and **Not Actions**. + - The **JSON** or **Script** to confirm it's what you want. +1. If your controller isn't enabled, select **Download JSON** or **Download Script** to download the code and run it yourself. + + If your controller is enabled, skip this step. + +1. Select **Submit**. + + A message confirms that your role has been submitted for creation + +1. The [**Permissions Management Tasks**](ui-tasks.md) pane appears on the right. + - The **Active** tab displays a list of the policies Permissions Management is currently processing. + - The **Completed** tab displays a list of the policies Permissions Management has completed. +1. Refresh the **Role/Policies** tab to see the role you created. + +## Create a role for GCP + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. +1. Select **Create Role**. +1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. + - To change the settings, select the box and make a selection from the dropdown. +1. Under **How Would You Like To Create The Role?**, select the required option: + + - **Activity of User(s)**: Allows you to create a role based on user activity. + - **Activity of Group(s)**: Allows you to create a role based on the aggregated activity of all the users belonging to the group(s). + - **Activity of Service Account(s)**: Allows you to create a role based on the aggregated activity of all service accounts. + - **From Existing Role**: Allows you to create a new role based on an existing role. + - **New Role**: Allows you to create a new role from scratch. + +1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. +1. If you selected **Activity Of Service Account(s)** in the previous step, select or deselect **Collect activity across all GCP Authorization Systems.** +1. From the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. + + +1. On the **Tasks** page, in **Role name:**, enter a name for your role. +1. From the **Available** column, select the plus sign **(+)** to move the task into the **Selected** column. + - To add a whole category, select a category. + - To add individual items from a category, select the down arrow on the left of the category name, and then select individual items. +1. Select **Next**. + +1. On the **Preview** page, review: + - The list of selected **Actions**. + - The **YAML** or **Script** to confirm it's what you want. +1. If your controller isn't enabled, select **Download YAML** or **Download Script** to download the code and run it yourself. +1. Select **Submit**. + A message confirms that your role has been submitted for creation + +1. The [**Permissions Management Tasks**](ui-tasks.md) pane appears on the right. + + - The **Active** tab displays a list of the policies Permissions Management is currently processing. + - The **Completed** tab displays a list of the policies Permissions Management has completed. +1. Refresh the **Role/Policies** tab to see the role you created. + + +## Next steps + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to modify a role/policy, see [Modify a role/policy](how-to-modify-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-rule.md new file mode 100644 index 0000000000000..d2da0287aecb9 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-rule.md @@ -0,0 +1,71 @@ +--- +title: Create a rule in the Autopilot dashboard in Permissions Management +description: How to create a rule in the Autopilot dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create a rule in the Autopilot dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to create a rule in the Permissions Management **Autopilot** dashboard. + +> [!NOTE] +> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don't have these permissions, contact your system administrator. + +## Create a rule + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. In the **Autopilot** dashboard, select **New Rule**. +1. In the **Rule Name** box, enter a name for your rule. +1. Select **AWS**, **Azure**, **GCP**, and then select **Next**. + +1. Select **Authorization Systems**, and then select **All** or the account names that you want. +1. From the **Folders** dropdown, select a folder, and then select **Apply**. + + To change your folder settings, select **Reset**. + + - The **Status** column displays if the authorization system is **Online** or **Offline**. + - The **Controller** column displays if the controller is **Enabled** or **Not Enabled**. + + +1. Select **Configure** , and then select the following parameters for your rule: + + - **Role Created On Is**: Select the duration in days. + - **Role Last Used On Is**: Select the duration in days when the role was last used. + - **Cross Account Role**: Select **True** or **False**. + +1. Select **Mode**, and then, if you want recommendations to be generated and applied manually, select **On-Demand**. +1. Select **Save** + + The following information displays in the **Autopilot Rules** table: + + - **Rule Name**: The name of the rule. + - **State**: The status of the rule: idle (not being use) or active (being used). + - **Rule Type**: The type of rule being applied. + - **Mode**: The status of the mode: on-demand or not. + - **Last Generated**: The date and time the rule was last generated. + - **Created By**: The email address of the user who created the rule. + - **Last Modified On**: The date and time the rule was last modified. + - **Subscription**: Provides an **On** or **Off** switch that allows you to receive email notifications when recommendations have been generated, applied, or unapplied. + + + + +## Next steps + +- For more information about viewing rules, see [View roles in the Autopilot dashboard](ui-autopilot.md). +- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](how-to-recommendations-rule.md). +- For information about notification settings for rules, see [View notification settings for a rule](how-to-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-delete-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-delete-role-policy.md new file mode 100644 index 0000000000000..6cb3b89f7592c --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-delete-role-policy.md @@ -0,0 +1,51 @@ +--- +title: Delete a role/policy in the Remediation dashboard in Permissions Management +description: How to delete a role/policy in the Just Enough Permissions (JEP) Controller. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Delete a role/policy in the Remediation dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can use the **Remediation** dashboard in Permissions Management to delete roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. + +> [!NOTE] +> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +> [!NOTE] +> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. + +## Delete a role/policy + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. +1. Select the role/policy you want to delete, and from the **Actions** column, select **Delete**. + + You can only delete a role/policy if it isn't assigned to an identity. + + You can't delete system roles/policies. + +1. On the **Preview** page, review the role/policy information to make sure you want to delete it, and then select **Submit**. + +## Next steps + + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-modify-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-modify-role-policy.md new file mode 100644 index 0000000000000..8c51e75c7c2f7 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-modify-role-policy.md @@ -0,0 +1,50 @@ +--- +title: Modify a role/policy in the Remediation dashboard in Permissions Management +description: How to modify a role/policy in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Modify a role/policy in the Remediation dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can use the **Remediation** dashboard in Permissions Management to modify roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. + +> [!NOTE] +> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +> [!NOTE] +> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. + +## Modify a role/policy + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. Select the role/policy you want to modify, and from the **Actions** column, select **Modify**. + + You can't modify **System** policies and roles. + +1. On the **Statements** page, make your changes to the **Tasks**, **Resources**, **Request conditions**, and **Effect** sections as required, and then select **Next**. + +1. Review the changes to the JSON or script on the **Preview** page, and then select **Submit**. + +## Next steps + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-notifications-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-notifications-rule.md new file mode 100644 index 0000000000000..08e466861d3f9 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-notifications-rule.md @@ -0,0 +1,44 @@ +--- +title: View notification settings for a rule in the Autopilot dashboard in Permissions Management +description: How to view notification settings for a rule in the Autopilot dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View notification settings for a rule in the Autopilot dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to view notification settings for a rule in the Permissions Management **Autopilot** dashboard. + +> [!NOTE] +> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don't have these permissions, contact your system administrator. + +## View notification settings for a rule + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. In the **Autopilot** dashboard, select a rule. +1. In the far right of the row, select the ellipses **(...)** +1. To view notification settings for a rule, select **Notification Settings**. + + Permissions Management displays a list of subscribed users. These users are signed up to receive notifications for the selected rule. + +1. To close the **Notification Settings** box, select **Close**. + + +## Next steps + +- For more information about viewing rules, see [View roles in the Autopilot dashboard](ui-autopilot.md). +- For information about creating rules, see [Create a rule](how-to-create-rule.md). +- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](how-to-recommendations-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-recommendations-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-recommendations-rule.md new file mode 100644 index 0000000000000..2d83f8b4a4693 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-recommendations-rule.md @@ -0,0 +1,88 @@ +--- +title: Generate, view, and apply rule recommendations in the Autopilot dashboard in Permissions Management +description: How to generate, view, and apply rule recommendations in the Autopilot dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Generate, view, and apply rule recommendations in the Autopilot dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to generate and view rule recommendations in the Permissions Management **Autopilot** dashboard. + +> [!NOTE] +> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don't have these permissions, contact your system administrator. + +## Generate rule recommendations + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. In the **Autopilot** dashboard, select a rule. +1. In the far right of the row, select the ellipses **(...)**. +1. To generate recommendations for each user and the authorization system, select **Generate Recommendations**. + + Only the user who created the selected rule can generate a recommendation. +1. View your recommendations in the **Recommendations** subtab. +1. Select **Close** to close the **Recommendations** subtab. + +## View rule recommendations + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. In the **Autopilot** dashboard, select a rule. +1. In the far right of the row, select the ellipses **(...)** + +1. To view recommendations for each user and the authorization system, select **View Recommendations**. + + Permissions Management displays the recommendations for each user and authorization system in the **Recommendations** subtab. + +1. Select **Close** to close the **Recommendations** subtab. + +## Apply rule recommendations + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. In the **Autopilot** dashboard, select a rule. +1. In the far right of the row, select the ellipses **(...)** + +1. To view recommendations for each user and the authorization system, select **View Recommendations**. + + Permissions Management displays the recommendations for each user and authorization system in the **Recommendations** subtab. + +1. To apply a recommendation, select the **Apply Recommendations** subtab, and then select a recommendation. +1. Select **Close** to close the **Recommendations** subtab. + +## Unapply rule recommendations + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. In the **Autopilot** dashboard, select a rule. +1. In the far right of the row, select the ellipses **(...)** + +1. To view recommendations for each user and the authorization system, select **View Recommendations**. + + Permissions Management displays the recommendations for each user and authorization system in the **Recommendations** subtab. + +1. To remove a recommendation, select the **Unapply Recommendations** subtab, and then select a recommendation. +1. Select **Close** to close the **Recommendations** subtab. + + +## Next steps + +- For more information about viewing rules, see [View roles in the Autopilot dashboard](ui-autopilot.md). +- For information about creating rules, see [Create a rule](how-to-create-rule.md). +- For information about notification settings for rules, see [View notification settings for a rule](how-to-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-revoke-task-readonly-status.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-revoke-task-readonly-status.md new file mode 100644 index 0000000000000..85a0a4465fe8c --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-revoke-task-readonly-status.md @@ -0,0 +1,111 @@ +--- +title: Revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management +description: How to revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities + + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities using the **Remediation** dashboard. + +> [!NOTE] +> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +## View an identity's permissions + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**. +1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. +1. Select **Apply**. + + Permissions Management displays a list of groups, users, and service accounts that match your criteria. +1. In **Enter a username**, enter or select a user. +1. In **Enter a Group Name**, enter or select a group, then select **Apply**. +1. Make a selection from the results list. + + The table displays the **Username** **Domain/Account**, **Source**, **Resource** and **Current Role**. + + +## Revoke an identity's access to unused tasks + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To revoke an identity's access to tasks they aren't using, select **Revoke Unused Tasks**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Revoke an identity's access to high-risk tasks + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To revoke an identity's access to high-risk tasks, select **Revoke High-Risk Tasks**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Revoke an identity's ability to delete tasks + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To revoke an identity's ability to delete tasks, select **Revoke Delete Tasks**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + +## Assign read-only status to an identity + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. +1. From the **Authorization System** dropdown, select the accounts you want to access. +1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. +1. Make a selection from the results list. + +1. To assign read-only status to an identity, select **Assign Read-Only Status**. +1. When the following message displays: **Are you sure you want to change permission?**, select: + - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. + - **Execute** to change the permission. + - **Close** to cancel the action. + + +## Next steps + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to add and remove roles and tasks for Azure and GCP identities, see [Add and remove roles and tasks for Azure and GCP identities](how-to-attach-detach-permissions.md). +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-view-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-view-role-policy.md new file mode 100644 index 0000000000000..9c1e939b897f4 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-view-role-policy.md @@ -0,0 +1,102 @@ +--- +title: View information about roles/ policies in the Remediation dashboard in Permissions Management +description: How to view and filter information about roles/ policies in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View information about roles/ policies in the Remediation dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Remediation** dashboard in Permissions Management enables system administrators to view, adjust, and remediate excessive permissions based on a user's activity data. You can use the **Roles/Policies** subtab in the dashboard to view information about roles and policies in the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. + +> [!NOTE] +> To view the **Remediation dashboard** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +> [!NOTE] +> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. + + +## View information about roles/policies + +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. + + The **Role/Policies list** displays a list of existing roles/policies and the following information about each role/policy + - **Role/Policy Name**: The name of the roles/policies available to you. + - **Role/Policy Type**: **Custom**, **System**, or **Permissions Management Only** + - **Actions**: The type of action you can perform on the role/policy, **Clone**, **Modify**, or **Delete** + + +1. To display details about the role/policy and view its assigned tasks and identities, select the arrow to the left of the role/policy name. + + The **Tasks** list appears, displaying: + - A list of **Tasks**. + - **For AWS:** + - The **Users**, **Groups**, and **Roles** the task is **Directly Assigned To**. + - The **Group Members** and **Role Identities** the task is **Indirectly Accessible By**. + + - **For Azure:** + - The **Users**, **Groups**, **Enterprise Applications** and **Managed Identities** the task is **Directly Assigned To**. + - The **Group Members** the task is **Indirectly Accessible By**. + + - **For GCP:** + - The **Users**, **Groups**, and **Service Accounts** the task is **Directly Assigned To**. + - The **Group Members** the task is **Indirectly Accessible By**. + +1. To close the role/policy details, select the arrow to the left of the role/policy name. + +## Export information about roles/policies + +- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. + + When the file is successfully exported, a message appears: **Exported Successfully.** + + - Check your email for a message from the Permissions Management Customer Success Team. This email contains a link to: + - The **Role Policy Details** report in CSV format. + - The **Reports** dashboard where you can configure how and when you can automatically receive reports. + + + + +## Filter information about roles/policies + +1. On the Permissions Management home page, select the **Remediation** dashboard, and then select the **Role/Policies** tab. +1. To filter the roles/policies, select from the following options: + + - **Authorization System Type**: Select **AWS**, **Azure**, or **GCP**. + - **Authorization System**: Select the accounts you want. + - **Role/Policy Type**: Select from the following options: + + - **All**: All managed roles/policies. + - **Custom**: A customer-managed role/policy. + - **System**: A cloud service provider-managed role/policy. + - **Permissions Management Only**: A role/policy created by Permissions Management. + + - **Role/Policy Status**: Select **All**, **Assigned**, or **Unassigned**. + - **Role/Policy Usage**: Select **All** or **Unused**. +1. Select **Apply**. + + To discard your changes, select **Reset Filter**. + + +## Next steps + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- For information on how to attach and detach permissions AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml b/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml index ad09baeee4cfe..89f86084a634a 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml @@ -1,11 +1,11 @@ ### YamlMime:Landing -title: CloudKnox Permissions Management -summary: CloudKnox Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities (users and workloads), actions, and resources across cloud infrastructures. It detects, right-sizes, and monitors unused and excessive permissions and enables Zero Trust security through least privilege access in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). +title: Permissions Management +summary: Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities (users and workloads), actions, and resources across cloud infrastructures. It detects, right-sizes, and monitors unused and excessive permissions and enables Zero Trust security through least privilege access in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). metadata: - title: CloudKnox Permissions Management - description: Learn how to use CloudKnox Permissions Management and Cloud Infrastructure Entitlement Management (CIEM) + title: Permissions Management + description: Learn how to use Permissions Management and Cloud Infrastructure Entitlement Management (CIEM) services: active-directory author: kenwith manager: rkarlin @@ -15,8 +15,8 @@ metadata: ms.topic: landing-page ms.date: 03/09/2022 ms.author: kenwith - - + + # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new @@ -24,104 +24,102 @@ landingContent: # Cards and links should be based on top customer tasks or top subjects # Start card title with a verb # Card - - title: What's CloudKnox Permissions Management? + - title: What's Permissions Management? linkLists: - linkListType: overview links: - text: Overview - url: cloudknox-overview.md + url: overview.md # Card - - title: Onboard CloudKnox Permissions Management + - title: Onboard Permissions Management linkLists: - linkListType: overview links: - - text: Enable CloudKnox - url: cloudknox-onboard-enable-tenant.md + - text: Enable Permissions Management + url: onboard-enable-tenant.md # Card - title: View risk metrics in your authorization system linkLists: - linkListType: overview links: - text: View key statistics and data about your authorization system - url: cloudknox-ui-dashboard.md + url: ui-dashboard.md # Card - title: Configure settings for data collection linkLists: - linkListType: overview links: - text: View and configure settings for data collection - url: cloudknox-product-data-sources.md + url: product-data-sources.md # Card # - title: Manage organizational and personal information # linkLists: # - linkListType: overview # links: # - text: Set personal information and preferences - # url: cloudknox-product-account-settings.md + # url: product-account-settings.md # Card - title: View information about identities linkLists: - linkListType: overview links: - text: View information about identities - url: cloudknox-usage-analytics-home.md + url: usage-analytics-home.md - text: View how users access information - url: cloudknox-ui-audit-trail.md + url: ui-audit-trail.md # Card - title: Manage roles/policies and permission requests linkLists: - linkListType: overview links: - text: View existing roles/policies and requests for permission - url: cloudknox-ui-remediation.md + url: ui-remediation.md # Card # - title: View how users access information # linkLists: # - linkListType: overview # links: # - text: View how users access information - # url: cloudknox-ui-audit-trail.md + # url: ui-audit-trail.md # Card - title: Set activity alerts and triggers linkLists: - linkListType: overview links: - text: View information about activity triggers - url: cloudknox-ui-triggers.md + url: ui-triggers.md # Card - title: Manage rules for authorization systems linkLists: - linkListType: overview links: - text: Create and view rules in the Autopilot dashboard - url: cloudknox-ui-autopilot.md + url: ui-autopilot.md # Card - title: Generate reports linkLists: - linkListType: overview links: - text: Generate and view a system report - url: cloudknox-report-view-system-report.md + url: report-view-system-report.md # Card - # - title: Learn with CloudKnox videos + # - title: Learn with Permissions Management videos # linkLists: # - linkListType: overview # links: - # - text: CloudKnox Permissions Management training videos - # url: cloudknox-training-videos.md + # - text: Permissions Management training videos + # url: training-videos.md # Card - title: FAQs linkLists: - linkListType: overview links: - text: FAQs - url: cloudknox-faqs.md + url: faqs.md # Card - title: Troubleshoot linkLists: - linkListType: overview links: - text: Troubleshoot - url: cloudknox-troubleshoot.md - - + url: troubleshoot.md diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/integration-api.md b/articles/active-directory/cloud-infrastructure-entitlement-management/integration-api.md new file mode 100644 index 0000000000000..75795ba239219 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/integration-api.md @@ -0,0 +1,105 @@ +--- +title: Set and view configuration settings in Permissions Management +description: How to view the Permissions Management API integration settings and create service accounts and roles. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Set and view configuration settings + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This topic describes how to view configuration settings, create and delete a service account, and create a role in Permissions Management. + +## View configuration settings + +The **Integrations** dashboard displays the authorization systems available to you. + +1. To display the **Integrations** dashboard, select **User** (your initials) in the upper right of the screen, and then select **Integrations.** + + The **Integrations** dashboard displays a tile for each available authorization system. + +1. Select an authorization system tile to view the following integration information: + + 1. To find out more about the Permissions Management API, select **Permissions Management API**, and then select documentation. + + + 1. To view information about service accounts, select **Integration**: + - **Email**: Lists the email address of the user who created the integration. + - **Created By**: Lists the first and last name of the user who created the integration. + - **Created On**: Lists the date and time the integration was created. + - **Recent Activity**: Lists the date and time the integration was last used, or notes if the integration was never used. + - **Service Account ID**: Lists the service account ID. + - **Access Key**: Lists the access key code. + + 1. To view settings information, select **Settings**: + - **Roles can create service account**: Lists the type of roles you can create. + - **Access Key Rotation Policy**: Lists notifications and actions you can set. + - **Access Key Usage Policy**: Lists notifications and actions you can set. + +## Create a service account + +1. On the **Integrations** dashboard, select **User**, and then select **Integrations.** +2. Click **Create Service Account**. The following information is pre-populated on the page: + - **API Endpoint** + - **Service Account ID** + - **Access Key** + - **Secret Key** + +3. To copy the codes, select the **Duplicate** icon next to the respective information. + + > [!NOTE] + > The codes are time sensitive and will regenerate after the box is closed. + +4. To regenerate the codes, at the bottom of the column, select **Regenerate**. + +## Delete a service account + +1. On the **Integrations** dashboard, select **User**, and then select **Integrations.** + +1. On the right of the email address, select **Delete Service Account**. + + On the **Validate OTP To Delete [Service Name] Integration** box, a message displays asking you to check your email for a code sent to the email address on file. + + If you don't receive the code, select **Resend OTP**. + +1. In the **Enter OTP** box, enter the code from the email. + +1. Click **Verify**. + +## Create a role + +1. On the **Integrations** dashboard, select **User**, and then select **Settings**. +2. Under **Roles can create service account**, select the role you want: + - **Super Admin** + - **Viewer** + - **Controller** + +3. In the **Access Key Rotation Policy** column, select options for the following: + + - **How often should the users rotate their access keys?**: Select **30 days**, **60 days**, **90 days**, or **Never**. + - **Notification**: Enter a whole number in the blank space within **Notify "X" days before the selected period**, or select **Don't Notify**. + - **Action (after the key rotation period ends)**: Select **Disable Action Key** or **No Action**. + +4. In the **Access Key Usage Policy** column, select options for the following: + + - **How often should the users go without using their access keys?**: Select **30 days**, **60 days**, **90 days**, or **Never**. + - **Notification**: Enter a whole number in the blank space within **Notify "X" days before the selected period**, or select **Don't Notify**. + - **Action (after the key rotation period ends)**: Select **Disable Action Key** or **No Action**. + +5. Click **Save**. + + + + + + \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/multi-cloud-glossary.md b/articles/active-directory/cloud-infrastructure-entitlement-management/multi-cloud-glossary.md new file mode 100644 index 0000000000000..a23f7007f5708 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/multi-cloud-glossary.md @@ -0,0 +1,82 @@ +--- +title: Permissions Management glossary +description: Permissions Management glossary +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: conceptual +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# The Permissions Management glossary + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This glossary provides a list of some of the commonly used cloud terms in Permissions Management. These terms will help Permissions Management users navigate through cloud-specific terms and cloud-generic terms. + +## Commonly-used acronyms and terms + +| Term | Definition | +|-----------------------|-----------------------------------------------------| +| ACL | Access control list. A list of files or resources that contain information about which users or groups have permission to access those resources or modify those files. | +| ARN | Azure Resource Notification | +| Authorization System | CIEM supports AWS accounts, Azure Subscriptions, GCP projects as the Authorization systems | +| Authorization System Type | Any system which provides the authorizations by assigning the permissions to the identities, resources. CIEM supports AWS, Azure, GCP as the Authorization System Types | +| Cloud security | A form of cybersecurity that protects data stored online on cloud computing platforms from theft, leakage, and deletion. Includes firewalls, penetration testing, obfuscation, tokenization, virtual private networks (VPN), and avoiding public internet connections. | +| Cloud storage | A service model in which data is maintained, managed, and backed up remotely. Available to users over a network. | +| CIAM | Cloud Infrastructure Access Management | +| CIEM | Cloud Infrastructure Entitlement Management. The next generation of solutions for enforcing least privilege in the cloud. It addresses cloud-native security challenges of managing identity access management in cloud environments. | +| CIS | Cloud infrastructure security | +| CWP | Cloud Workload Protection. A workload-centric security solution that targets the unique protection requirements of workloads in modern enterprise environments. | +| CNAPP | Cloud-Native Application Protection. The convergence of cloud security posture management (CSPM), cloud workload protection (CWP), cloud infrastructure entitlement management (CIEM), and cloud applications security broker (CASB). An integrated security approach that covers the entire lifecycle of cloud-native applications. | +| CSPM | Cloud Security Posture Management. Addresses risks of compliance violations and misconfigurations in enterprise cloud environments. Also focuses on the resource level to identify deviations from best practice security settings for cloud governance and compliance. | +| CWPP | Cloud Workload Protection Platform | +| Data Collector | Virtual entity which stores the data collection configuration | +| Delete task | A high-risk task that allows users to permanently delete a resource. | +| ED | Enterprise directory | +| Entitlement | An abstract attribute that represents different forms of user permissions in a range of infrastructure systems and business applications.| +| Entitlement management | Technology that grants, resolves, enforces, revokes, and administers fine-grained access entitlements (that is, authorizations, privileges, access rights, permissions and rules). Its purpose is to execute IT access policies to structured/unstructured data, devices, and services. It can be delivered by different technologies, and is often different across platforms, applications, network components, and devices. | +| High-risk task | A task in which a user can cause data leakage, service disruption, or service degradation. | +| Hybrid cloud | Sometimes called a cloud hybrid. A computing environment that combines an on-premises data center (a private cloud) with a public cloud. It allows data and applications to be shared between them. | +| hybrid cloud storage | A private or public cloud used to store an organization's data. | +| ICM | Incident Case Management | +| IDS | Intrusion Detection Service | +| Identity analytics | Includes basic monitoring and remediation, dormant and orphan account detection and removal, and privileged account discovery. | +| Identity lifecycle management | Maintain digital identities, their relationships with the organization, and their attributes during the entire process from creation to eventual archiving, using one or more identity life cycle patterns. | +| IGA | Identity governance and administration. Technology solutions that conduct identity management and access governance operations. IGA includes the tools, technologies, reports, and compliance activities required for identity lifecycle management. It includes every operation from account creation and termination to user provisioning, access certification, and enterprise password management. It looks at automated workflow and data from authoritative sources capabilities, self-service user provisioning, IT governance, and password management. | +| ITSM | Information Technology Security Management. Tools that enable IT operations organizations (infrastructure and operations managers), to better support the production environment. Facilitate the tasks and workflows associated with the management and delivery of quality IT services. | +| JEP | Just Enough Permissions | +| JIT | Just in Time access can be seen as a way to enforce the principle of least privilege to ensure users and non-human identities are given the minimum level of privileges. It also ensures that privileged activities are conducted in accordance with an organization's Identity Access Management (IAM), IT Service Management (ITSM), and Privileged Access Management (PAM) policies, with its entitlements and workflows. JIT access strategy enables organizations to maintain a full audit trail of privileged activities so they can easily identify who or what gained access to which systems, what they did at what time, and for how long. | +| Least privilege | Ensures that users only gain access to the specific tools they need to complete a task. | +| Multi-tenant | A single instance of the software and its supporting infrastructure serves multiple customers. Each customer shares the software application and also shares a single database. | +| OIDC | OpenID Connect. An authentication protocol that verifies user identity when a user is trying to access a protected HTTPs end point. OIDC is an evolutionary development of ideas implemented earlier in OAuth. | +| PAM | Privileged access management. Tools that offer one or more of these features: discover, manage, and govern privileged accounts on multiple systems and applications; control access to privileged accounts, including shared and emergency access; randomize, manage, and vault credentials (password, keys, etc.) for administrative, service, and application accounts; single sign-on (SSO) for privileged access to prevent credentials from being revealed; control, filter, and orchestrate privileged commands, actions, and tasks; manage and broker credentials to applications, services, and devices to avoid exposure; and monitor, record, audit, and analyze privileged access, sessions, and actions. | +| PASM | Privileged accounts are protected by vaulting their credentials. Access to those accounts is then brokered for human users, services, and applications. Privileged session management (PSM) functions establish sessions with possible credential injection and full session recording. Passwords and other credentials for privileged accounts are actively managed and changed at definable intervals or upon the occurrence of specific events. PASM solutions may also provide application-to-application password management (AAPM) and zero-install remote privileged access features for IT staff and third parties that don't require a VPN. | +| PEDM | Specific privileges are granted on the managed system by host-based agents to logged-in users. PEDM tools provide host-based command control (filtering); application allow, deny, and isolate controls; and/or privilege elevation. The latter is in the form of allowing particular commands to be run with a higher level of privileges. PEDM tools execute on the actual operating system at the kernel or process level. Command control through protocol filtering is explicitly excluded from this definition because the point of control is less reliable. PEDM tools may also provide file integrity monitoring features. | +| Permission | Rights and privileges. Details given by users or network administrators that define access rights to files on a network. Access controls attached to a resource dictating which identities can access it and how. Privileges are attached to identities and are the ability to perform certain actions. An identity having the ability to perform an action on a resource. | +| POD | Permission on Demand. A type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. | +| Permissions creep index (PCI) | A number from 0 to 100 that represents the incurred risk of users with access to high-risk privileges. PCI is a function of users who have access to high-risk privileges but aren't actively using them. | +| Policy and role management | Maintain rules that govern automatic assignment and removal of access rights. Provides visibility of access rights for selection in access requests, approval processes, dependencies, and incompatibilities between access rights, and more. Roles are a common vehicle for policy management. | +| Privilege | The authority to make changes to a network or computer. Both people and accounts can have privileges, and both can have different levels of privilege. | +| Privileged account | A login credential to a server, firewall, or other administrative account. Often referred to as admin accounts. Comprised of the actual username and password; these two things together make up the account. A privileged account is allowed to do more things than a normal account. | +| Public Cloud | Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them. They may be free or sold on-demand, allowing customers to pay only per usage for the CPU cycles, storage, or bandwidth they consume. | +| Resource | Any entity that uses compute capabilities can be accessed by users and services to perform actions. | +| Role | An IAM identity that has specific permissions. Instead of being uniquely associated with one person, a role is intended to be assumable by anyone who needs it. A role doesn't have standard long-term credentials such as a password or access keys associated with. | +| SCIM | System for Cross–domain Identity Management | +| SIEM | Security Information and Event Management. Technology that supports threat detection, compliance and security incident management through the collection and analysis (both near real time and historical) of security events, as well as a wide variety of other event and contextual data sources. The core capabilities are a broad scope of log event collection and management, the ability to analyze log events and other data across disparate sources, and operational capabilities (such as incident management, dashboards, and reporting). | +| SOAR | Security orchestration, automation and response (SOAR). Technologies that enable organizations to take inputs from various sources (mostly from security information and event management [SIEM] systems) and apply workflows aligned to processes and procedures. These workflows can be orchestrated via integrations with other technologies and automated to achieve the desired outcome and greater visibility. Other capabilities include case and incident management features; the ability to manage threat intelligence, dashboards and reporting; and analytics that can be applied across various functions. SOAR tools significantly enhance security operations activities like threat detection and response by providing machine-powered assistance to human analysts to improve the efficiency and consistency of people and processes. | +| Super user / Super identity | A powerful account used by IT system administrators that can be used to make configurations to a system or application, add or remove users, or delete data. | +| Tenant | A dedicated instance of the services and organization data stored within a specific default location. | +| UUID | Universally unique identifier. A 128-bit label used for information in computer systems. The term globally unique identifier (GUID) is also used.| +| Zero trust security | The three foundational principles: explicit verification, breach assumption, and least privileged access.| +| ZTNA | Zero trust network access. A product or service that creates an identity- and context-based, logical access boundary around an application or set of applications. The applications are hidden from discovery, and access is restricted via a trust broker to a set of named entities. The broker verifies the identity, context and policy adherence of the specified participants before allowing access and prohibits lateral movement elsewhere in the network. It removes application assets from public visibility and significantly reduces the surface area for attack.| + +## Next steps + +- For an overview of Permissions Management, see [What's Permissions Management?](overview.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-add-account-after-onboarding.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-add-account-after-onboarding.md new file mode 100644 index 0000000000000..c02c442060d62 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-add-account-after-onboarding.md @@ -0,0 +1,112 @@ +--- +title: Add an account /subscription/ project to Permissions Management after onboarding is complete +description: How to add an account/ subscription/ project to Permissions Management after onboarding is complete. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Add an account/ subscription/ project after onboarding is complete + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to add an Amazon Web Services (AWS) account, Microsoft Azure subscription, or Google Cloud Platform (GCP) project in Microsoft Permissions Management after you've completed the onboarding process. + +## Add an AWS account after onboarding is complete + +1. In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. +1. On the **Data collectors** dashboard, select **AWS**. +1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. + + The **Permissions Management Onboarding - Summary** page displays. + +1. Go to **AWS Account IDs**, and then select **Edit** (the pencil icon). + + The **Permissions Management Onboarding - AWS Member Account Details** page displays. + +1. Go to **Enter Your AWS Account IDs**, and then select **Add** (the plus **+** sign). +1. Copy your account ID from AWS and paste it into the **Enter Account ID** box. + + The AWS account ID is automatically added to the script. + + If you want to add more account IDs, repeat steps 5 and 6 to add up to a total of 10 account IDs. + +1. Copy the script. +1. Go to AWS and start the Cloud Shell. +1. Create a new script for the new account and press the **Enter** key. +1. Paste the script you copied. +1. Locate the account line, delete the original account ID (the one that was previously added), and then run the script. +1. Return to Permissions Management, and the new account ID you added will be added to the list of account IDs displayed in the **Permissions Management Onboarding - Summary** page. +1. Select **Verify now & save**. + + When your changes are saved, the following message displays: **Successfully updated configuration.** + + +## Add an Azure subscription after onboarding is complete + +1. In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. +1. On the **Data collectors** dashboard, select **Azure**. +1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. + + The **Permissions Management Onboarding - Summary** page displays. + +1. Go to **Azure subscription IDs**, and then select **Edit** (the pencil icon). +1. Go to **Enter your Azure Subscription IDs**, and then select **Add subscription** (the plus **+** sign). +1. Copy and paste your subscription ID from Azure and paste it into the subscription ID box. + + The subscription ID is automatically added to the subscriptions line in the script. + + If you want to add more subscription IDs, repeat steps 4 and 5 to add up to a total of 10 subscriptions. + +1. Copy the script. +1. Go to Azure and start the Cloud Shell. +1. Create a new script for the new subscription and press enter. +1. Paste the script you copied. +1. Locate the subscription line and delete the original subscription ID (the one that was previously added), and then run the script. +1. Return to Permissions Management, and the new subscription ID you added will be added to the list of subscription IDs displayed in the **Permissions Management Onboarding - Summary** page. +1. Select **Verify now & save**. + + When your changes are saved, the following message displays: **Successfully updated configuration.** + +## Add a GCP project after onboarding is complete + +1. In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. +1. On the **Data collectors** dashboard, select **GCP**. +1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. + + The **Permissions Management Onboarding - Summary** page displays. + +1. Go to **GCP Project IDs**, and then select **Edit** (the pencil icon). +1. Go to **Enter your GCP Project IDs**, and then select **Add Project ID** (the plus **+** sign). +1. Copy and paste your project ID from Azure and paste it into the **Project ID** box. + + The project ID is automatically added to the **Project ID** line in the script. + + If you want to add more project IDs, repeat steps 4 and 5 to add up to a total of 10 project IDs. + +1. Copy the script. +1. Go to GCP and start the Cloud Shell. +1. Create a new script for the new project ID and press enter. +1. Paste the script you copied. +1. Locate the project ID line and delete the original project ID (the one that was previously added), and then run the script. +1. Return to Permissions Management, and the new project ID you added will be added to the list of project IDs displayed in the **Permissions Management Onboarding - Summary** page. +1. Select **Verify now & save**. + + When your changes are saved, the following message displays: **Successfully updated configuration.** + + + +## Next steps + +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](onboard-aws.md). + - For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](onboard-gcp.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-aws.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-aws.md new file mode 100644 index 0000000000000..fc4d7b83549e4 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-aws.md @@ -0,0 +1,173 @@ +--- +title: Onboard an Amazon Web Services (AWS) account on Permissions Management +description: How to onboard an Amazon Web Services (AWS) account on Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Onboard an Amazon Web Services (AWS) account + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + + +This article describes how to onboard an Amazon Web Services (AWS) account on Permissions Management. + +> [!NOTE] +> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable Permissions Management on your Azure Active Directory tenant](onboard-enable-tenant.md). + + +## View a training video on configuring and onboarding an AWS account + +To view a video on how to configure and onboard AWS accounts in Permissions Management, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). + +## Onboard an AWS account + +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: + + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + +1. On the **Data Collectors** dashboard, select **AWS**, and then select **Create Configuration**. + +### 1. Create an Azure AD OIDC App + +1. On the **Permissions Management Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure app name**. + + This app is used to set up an OpenID Connect (OIDC) connection to your AWS account. OIDC is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. The scripts generated on this page create the app of this specified name in your Azure AD tenant with the right configuration. + +1. To create the app registration, copy the script and run it in your Azure command-line app. + + > [!NOTE] + > 1. To confirm that the app was created, open **App registrations** in Azure and, on the **All applications** tab, locate your app. + > 1. Select the app name to open the **Expose an API** page. The **Application ID URI** displayed in the **Overview** page is the *audience value* used while making an OIDC connection with your AWS account. + +1. Return to Permissions Management, and in the **Permissions Management Onboarding - Azure AD OIDC App Creation**, select **Next**. + +### 2. Set up an AWS OIDC account + +1. In the **Permissions Management Onboarding - AWS OIDC Account Setup** page, enter the **AWS OIDC account ID** where the OIDC provider is created. You can change the role name to your requirements. +1. Open another browser window and sign in to the AWS account where you want to create the OIDC provider. +1. Select **Launch Template**. This link takes you to the **AWS CloudFormation create stack** page. +1. Scroll to the bottom of the page, and in the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create Stack.** + + This AWS CloudFormation stack creates an OIDC Identity Provider (IdP) representing Azure AD STS and an AWS IAM role with a trust policy that allows external identities from Azure AD to assume it via the OIDC IdP. These entities are listed on the **Resources** page. + +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS OIDC Account Setup** page, select **Next**. + +### 3. Set up an AWS master account (Optional) + +1. If your organization has Service Control Policies (SCPs) that govern some or all of the member accounts, set up the master account connection in the **Permissions Management Onboarding - AWS Master Account Details** page. + + Setting up the master account connection allows Permissions Management to auto-detect and onboard any AWS member accounts that have the correct Permissions Management role. + + - In the **Permissions Management Onboarding - AWS Master Account Details** page, enter the **Master Account ID** and **Master Account Role**. + +1. Open another browser window and sign in to the AWS console for your master account. + +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Master Account Details** page, select **Launch Template**. + + The **AWS CloudFormation create stack** page opens, displaying the template. + +1. Review the information in the template, make changes, if necessary, then scroll to the bottom of the page. + +1. In the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. + + This AWS CloudFormation stack creates a role in the master account with the necessary permissions (policies) to collect SCPs and list all the accounts in your organization. + + A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. + +1. Return to Permissions Management, and in **Permissions Management Onboarding - AWS Master Account Details**, select **Next**. + +### 4. Set up an AWS Central logging account (Optional but recommended) + +1. If your organization has a central logging account where logs from some or all of your AWS account are stored, in the **Permissions Management Onboarding - AWS Central Logging Account Details** page, set up the logging account connection. + + In the **Permissions Management Onboarding - AWS Central Logging Account Details** page, enter the **Logging Account ID** and **Logging Account Role**. + +1. In another browser window, sign in to the AWS console for the AWS account you use for central logging. + +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Central Logging Account Details** page, select **Launch Template**. + + The **AWS CloudFormation create stack** page opens, displaying the template. + +1. Review the information in the template, make changes, if necessary, then scroll to the bottom of the page. + +1. In the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**, and then select **Create stack**. + + This AWS CloudFormation stack creates a role in the logging account with the necessary permissions (policies) to read S3 buckets used for central logging. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. + +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Central Logging Account Details** page, select **Next**. + +### 5. Set up an AWS member account + +1. In the **Permissions Management Onboarding - AWS Member Account Details** page, enter the **Member Account Role** and the **Member Account IDs**. + + You can enter up to 10 account IDs. Click the plus icon next to the text box to add more account IDs. + + > [!NOTE] + > Perform the next 6 steps for each account ID you add. + +1. Open another browser window and sign in to the AWS console for the member account. + +1. Return to the **Permissions Management Onboarding - AWS Member Account Details** page, select **Launch Template**. + + The **AWS CloudFormation create stack** page opens, displaying the template. + +1. In the **CloudTrailBucketName** page, enter a name. + + You can copy and paste the **CloudTrailBucketName** name from the **Trails** page in AWS. + + > [!NOTE] + > A *cloud bucket* collects all the activity in a single account that Permissions Management monitors. Enter the name of a cloud bucket here to provide Permissions Management with the access required to collect activity data. + +1. From the **Enable Controller** dropdown, select: + + - **True**, if you want the controller to provide Permissions Management with read and write access so that any remediation you want to do from the Permissions Management platform can be done automatically. + - **False**, if you want the controller to provide Permissions Management with read-only access. + +1. Scroll to the bottom of the page, and in the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. + + This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. + + A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. + +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Member Account Details** page, select **Next**. + + This step completes the sequence of required connections from Azure AD STS to the OIDC connection account and the AWS member account. + +### 6. Review and save + +1. In **Permissions Management Onboarding – Summary**, review the information you've added, and then select **Verify Now & Save**. + + The following message appears: **Successfully created configuration.** + + On the **Data Collectors** dashboard, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** + + You have now completed onboarding AWS, and Permissions Management has started collecting and processing your data. + +### 7. View the data + +1. To view the data, select the **Authorization Systems** tab. + + The **Status** column in the table displays **Collecting Data.** + + The data collection process may take some time, depending on the size of the account and how much data is available for collection. + + +## Next steps + +- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](onboard-gcp.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-azure.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-azure.md new file mode 100644 index 0000000000000..9b21f89b3dbc7 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-azure.md @@ -0,0 +1,99 @@ +--- +title: Onboard a Microsoft Azure subscription in Permissions Management +description: How to a Microsoft Azure subscription on Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Onboard a Microsoft Azure subscription + +> [!IMPORTANT] +> Microsoft Entra Permissions Management (Permissions Management) is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + +This article describes how to onboard a Microsoft Azure subscription or subscriptions on Permissions Management (Permissions Management). Onboarding a subscription creates a new authorization system to represent the Azure subscription in Permissions Management. + +> [!NOTE] +> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable Permissions Management on your Azure Active Directory tenant](onboard-enable-tenant.md). + +## Prerequisites + +To add Permissions Management to your Azure AD tenant: +- You must have an Azure AD user account and an Azure command-line interface (Azure CLI) on your system, or an Azure subscription. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). +- You must have **Microsoft.Authorization/roleAssignments/write** permission at the subscription or management group scope to perform these tasks. If you don't have this permission, you can ask someone who has this permission to perform these tasks for you. + + +## View a training video on enabling Permissions Management in your Azure AD tenant + +To view a video on how to enable Permissions Management in your Azure AD tenant, select [Enable Permissions Management in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). + +## How to onboard an Azure subscription + +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: + + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + +1. On the **Data Collectors** dashboard, select **Azure**, and then select **Create Configuration**. + +### 1. Add Azure subscription details + +1. On the **Permissions Management Onboarding - Azure Subscription Details** page, enter the **Subscription IDs** that you want to onboard. + + > [!NOTE] + > To locate the Azure subscription IDs, open the **Subscriptions** page in Azure. + > You can enter up to 10 subscriptions IDs. Select the plus sign **(+)** icon next to the text box to enter more subscriptions. + +1. From the **Scope** dropdown, select **Subscription** or **Management Group**. The script box displays the role assignment script. + + > [!NOTE] + > Select **Subscription** if you want to assign permissions separately for each individual subscription. The generated script has to be executed once per subscription. + > Select **Management Group** if all of your subscriptions are under one management group. The generated script must be executed once for the management group. + +1. To give this role assignment to the service principal, copy the script to a file on your system where Azure CLI is installed and execute it. + + You can execute the script once for each subscription, or once for all the subscriptions in the management group. + +1. From the **Enable Controller** dropdown, select: + + - **True**, if you want the controller to provide Permissions Management with read and write access so that any remediation you want to do from the Permissions Management platform can be done automatically. + - **False**, if you want the controller to provide Permissions Management with read-only access. + +1. Return to **Permissions Management Onboarding - Azure Subscription Details** page and select **Next**. + +### 2. Review and save. + +- In **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. + + The following message appears: **Successfully Created Configuration.** + + On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** + + You have now completed onboarding Azure, and Permissions Management has started collecting and processing your data. + +### 3. View the data. + +- To view the data, select the **Authorization Systems** tab. + + The **Status** column in the table displays **Collecting Data.** + + The data collection process will take some time, depending on the size of the account and how much data is available for collection. + + +## Next steps + +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](onboard-aws.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](onboard-gcp.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). +- For an overview on Permissions Management, see [What's Permissions Management?](overview.md). +- For information on how to start viewing information about your authorization system in Permissions Management, see [View key statistics and data about your authorization system](ui-dashboard.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-controller-after-onboarding.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-controller-after-onboarding.md new file mode 100644 index 0000000000000..f8fa037bb9114 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-controller-after-onboarding.md @@ -0,0 +1,102 @@ +--- +title: Enable or disable the controller in Permissions Management after onboarding is complete +description: How to enable or disable the controller in Permissions Management after onboarding is complete. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Enable or disable the controller after onboarding is complete + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to enable or disable the controller in Microsoft Azure and Google Cloud Platform (GCP) after onboarding is complete. + +This article also describes how to enable the controller in Amazon Web Services (AWS) if you disabled it during onboarding. You can only enable the controller in AWS at this time; you can't disable it. + +## Enable the controller in AWS + +> [!NOTE] +> You can only enable the controller in AWS; you can't disable it at this time. + +1. Sign in to the AWS console of the member account in a separate browser window. +1. Go to the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. +1. On the **Data Collectors** dashboard, select **AWS**, and then select **Create Configuration**. +1. On the **Permissions Management Onboarding - AWS Member Account Details** page, select **Launch Template**. + + The **AWS CloudFormation create stack** page opens, displaying the template. +1. In the **CloudTrailBucketName** box, enter a name. + + You can copy and paste the **CloudTrailBucketName** name from the **Trails** page in AWS. + + > [!NOTE] + > A *cloud bucket* collects all the activity in a single account that Permissions Management monitors. Enter the name of a cloud bucket here to provide Permissions Management with the access required to collect activity data. + +1. In the **EnableController** box, from the drop-down list, select **True** to provide Permissions Management with read and write access so that any remediation you want to do from the Permissions Management platform can be done automatically. + +1. Scroll to the bottom of the page, and in the **Capabilities** box and select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. + + This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. + +1. Return to Permissions Management, and on the Permissions Management **Onboarding - AWS Member Account Details** page, select **Next**. +1. On **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. + + The following message appears: **Successfully created configuration.** + +## Enable or disable the controller in Azure + + +1. In Azure, open the **Access control (IAM)** page. +1. In the **Check access** section, in the **Find** box, enter **Cloud Infrastructure Entitlement Management**. + + The **Cloud Infrastructure Entitlement Management assignments** page appears, displaying the roles assigned to you. + + - If you have read-only permission, the **Role** column displays **Reader**. + - If you have administrative permission, the **Role** column displays **User Access Administrative**. + +1. To add the administrative role assignment, return to the **Access control (IAM)** page, and then select **Add role assignment**. +1. Add or remove the role assignment for Cloud Infrastructure Entitlement Management. + +1. Go to the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. +1. On the **Data Collectors** dashboard, select **Azure**, and then select **Create Configuration**. +1. On the **Permissions Management Onboarding - Azure Subscription Details** page, enter the **Subscription ID**, and then select **Next**. +1. On **Permissions Management Onboarding – Summary** page, review the controller permissions, and then select **Verify Now & Save**. + + The following message appears: **Successfully Created Configuration.** + + +## Enable or disable the controller in GCP + +1. Execute the **gcloud auth login**. +1. Follow the instructions displayed on the screen to authorize access to your Google account. +1. Execute the **sh mciem-workload-identity-pool.sh** to create the workload identity pool, provider, and service account. +1. Execute the **sh mciem-member-projects.sh** to give Permissions Management permissions to access each of the member projects. + + - If you want to manage permissions through Permissions Management, select **Y** to **Enable controller**. + - If you want to onboard your projects in read-only mode, select **N** to **Disable controller**. + +1. Optionally, execute **mciem-enable-gcp-api.sh** to enable all recommended GCP APIs. + +1. Go to the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. +1. On the **Data Collectors** dashboard, select **GCP**, and then select **Create Configuration**. +1. On the **Permissions Management Onboarding - Azure AD OIDC App Creation** page, select **Next**. +1. On the **Permissions Management Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project Number** and **OIDC Project ID**, and then select **Next**. +1. On the **Permissions Management Onboarding - GCP Project IDs** page, enter the **Project IDs**, and then select **Next**. +1. On the **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. + + The following message appears: **Successfully Created Configuration.** + +## Next steps + +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](onboard-aws.md). +- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](onboard-gcp.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-tenant.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-tenant.md new file mode 100644 index 0000000000000..3bae1ac5a5865 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-tenant.md @@ -0,0 +1,112 @@ +--- +title: Enable Permissions Management in your organization +description: How to enable Permissions Management in your organization. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Enable Permissions Management in your organization + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + + + +This article describes how to enable Permissions Management in your organization. Once you've enabled Permissions Management, you can connect it to your Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) platforms. + +> [!NOTE] +> To complete this task, you must have *global administrator* permissions as a user in that tenant. You can't enable Permissions Management as a user from other tenant who has signed in via B2B or via Azure Lighthouse. + +## Prerequisites + +To enable Permissions Management in your organization: + +- You must have an Azure AD tenant. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). +- You must be eligible for or have an active assignment to the global administrator role as a user in that tenant. + +> [!NOTE] +> During public preview, Permissions Management doesn't perform a license check. + +## View a training video on enabling Permissions Management + +- To view a video on how to enable Permissions Management in your Azure AD tenant, select [Enable Permissions Management in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). +- To view a video on how to configure and onboard AWS accounts in Permissions Management, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). +- To view a video on how to configure and onboard GCP accounts in Permissions Management, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). + + +## How to enable Permissions Management on your Azure AD tenant + +1. In your browser: + 1. Go to [Azure services](https://portal.azure.com) and use your credentials to sign in to [Azure Active Directory](https://ms.portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview). + 1. If you aren't already authenticated, sign in as a global administrator user. + 1. If needed, activate the global administrator role in your Azure AD tenant. + 1. In the Azure AD portal, select **Features highlights**, and then select **Permissions Management**. + + 1. If you're prompted to select a sign in account, sign in as a global administrator for a specified tenant. + + The **Welcome to Permissions Management** screen appears, displaying information on how to enable Permissions Management on your tenant. + +1. To provide access to the Permissions Management application, create a service principal. + + An Azure service principal is a security identity used by user-created apps, services, and automation tools to access specific Azure resources. + + > [!NOTE] + > To complete this step, you must have Azure CLI or Azure PowerShell on your system, or an Azure subscription where you can run Cloud Shell. + + - To create a service principal that points to the Permissions Management application via Cloud Shell: + + 1. Copy the script on the **Welcome** screen: + + `az ad sp create --id b46c3ac5-9da6-418f-a849-0a07a10b3c6c` + + 1. If you have an Azure subscription, return to the Azure AD portal and select **Cloud Shell** on the navigation bar. + If you don't have an Azure subscription, open a command prompt on a Windows Server. + 1. If you have an Azure subscription, paste the script into Cloud Shell and press **Enter**. + + - For information on how to create a service principal through the Azure portal, see [Create an Azure service principal with the Azure CLI](/cli/azure/create-an-azure-service-principal-azure-cli). + + - For information on the **az** command and how to sign in with the no subscriptions flag, see [az login](/cli/azure/reference-index?view=azure-cli-latest#az-login&preserve-view=true). + + - For information on how to create a service principal via Azure PowerShell, see [Create an Azure service principal with Azure PowerShell](/powershell/azure/create-azure-service-principal-azureps?view=azps-7.1.0&preserve-view=true). + + 1. After the script runs successfully, the service principal attributes for Permissions Management display. Confirm the attributes. + + The **Cloud Infrastructure Entitlement Management** application displays in the Azure AD portal under **Enterprise applications**. + +1. Return to the **Welcome to Permissions Management** screen and select **Enable Permissions Management**. + + You have now completed enabling Permissions Management on your tenant. Permissions Management launches with the **Data Collectors** dashboard. + +## Configure data collection settings + +Use the **Data Collectors** dashboard in Permissions Management to configure data collection settings for your authorization system. + +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: + + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + +1. Select the authorization system you want: **AWS**, **Azure**, or **GCP**. + +1. For information on how to onboard an AWS account, Azure subscription, or GCP project into Permissions Management, select one of the following articles and follow the instructions: + + - [Onboard an AWS account](onboard-aws.md) + - [Onboard an Azure subscription](onboard-azure.md) + - [Onboard a GCP project](onboard-gcp.md) + +## Next steps + +- For an overview of Permissions Management, see [What's Permissions Management?](overview.md) +- For a list of frequently asked questions (FAQs) about Permissions Management, see [FAQs](faqs.md). +- For information on how to start viewing information about your authorization system in Permissions Management, see [View key statistics and data about your authorization system](ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-gcp.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-gcp.md new file mode 100644 index 0000000000000..f811ac098cdd4 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-gcp.md @@ -0,0 +1,134 @@ +--- +title: Onboard a Google Cloud Platform (GCP) project in Permissions Management +description: How to onboard a Google Cloud Platform (GCP) project on Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Onboard a Google Cloud Platform (GCP) project + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + + +This article describes how to onboard a Google Cloud Platform (GCP) project on Permissions Management. + +> [!NOTE] +> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable Permissions Management on your Azure Active Directory tenant](onboard-enable-tenant.md). + +## View a training video on configuring and onboarding a GCP account + +To view a video on how to configure and onboard GCP accounts in Permissions Management, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). + + +## Onboard a GCP project + +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: + + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + +1. On the **Data Collectors** tab, select **GCP**, and then select **Create Configuration**. + +### 1. Create an Azure AD OIDC app. + +1. On the **Permissions Management Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure App Name**. + + This app is used to set up an OpenID Connect (OIDC) connection to your GCP project. OIDC is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. The scripts generated will create the app of this specified name in your Azure AD tenant with the right configuration. + +1. To create the app registration, copy the script and run it in your command-line app. + + > [!NOTE] + > 1. To confirm that the app was created, open **App registrations** in Azure and, on the **All applications** tab, locate your app. + > 1. Select the app name to open the **Expose an API** page. The **Application ID URI** displayed in the **Overview** page is the *audience value* used while making an OIDC connection with your AWS account. + + 1. Return to Permissions Management, and in the **Permissions Management Onboarding - Azure AD OIDC App Creation**, select **Next**. + +### 2. Set up a GCP OIDC project. + +1. In the **Permissions Management Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project ID** and **OIDC Project Number** of the GCP project in which the OIDC provider and pool will be created. You can change the role name to your requirements. + + > [!NOTE] + > You can find the **Project number** and **Project ID** of your GCP project on the GCP **Dashboard** page of your project in the **Project info** panel. + +1. You can change the **OIDC Workload Identity Pool Id**, **OIDC Workload Identity Pool Provider Id** and **OIDC Service Account Name** to meet your requirements. + + Optionally, specify **G-Suite IDP Secret Name** and **G-Suite IDP User Email** to enable G-Suite integration. + + You can either download and run the script at this point or you can do it in the Google Cloud Shell, as described [later in this article](onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). +1. Select **Next**. + +### 3. Set up GCP member projects. + +1. In the **Permissions Management Onboarding - GCP Project Ids** page, enter the **Project IDs**. + + You can enter up to 10 GCP project IDs. Select the plus icon next to the text box to insert more project IDs. + +1. You can choose to download and run the script at this point, or you can do it via Google Cloud Shell, as described in the [next step](onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). + +### 4. Run scripts in Cloud Shell. (Optional if not already executed) + +1. In the **Permissions Management Onboarding - GCP Project Ids** page, select **Launch SSH**. +1. To copy all your scripts into your current directory, in **Open in Cloud Shell**, select **Trust repo**, and then select **Confirm**. + + The Cloud Shell provisions the Cloud Shell machine and makes a connection to your Cloud Shell instance. + + > [!NOTE] + > Follow the instructions in the browser as they may be different from the ones given here. + + The **Welcome to Permissions Management GCP onboarding** screen appears, displaying steps you must complete to onboard your GCP project. + +### 5. Paste the environment vars from the Permissions Management portal. + +1. Return to Permissions Management and select **Copy export variables**. +1. In the GCP Onboarding shell editor, paste the variables you copied, and then press **Enter**. +1. Execute the **gcloud auth login**. +1. Follow instructions displayed on the screen to authorize access to your Google account. +1. Execute the **sh mciem-workload-identity-pool.sh** to create the workload identity pool, provider, and service account. +1. Execute the **sh mciem-member-projects.sh** to give Permissions Management permissions to access each of the member projects. + + - If you want to manage permissions through Permissions Management, select **Y** to **Enable controller**. + + - If you want to onboard your projects in read-only mode, select **N** to **Disable controller**. + +1. Optionally, execute **mciem-enable-gcp-api.sh** to enable all recommended GCP APIs. + +1. Return to **Permissions Management Onboarding - GCP Project Ids**, and then select **Next**. + +### 6. Review and save. + +1. In the **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. + + The following message appears: **Successfully Created Configuration.** + + On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** + + You have now completed onboarding GCP, and Permissions Management has started collecting and processing your data. + +### 7. View the data. + +- To view the data, select the **Authorization Systems** tab. + + The **Status** column in the table displays **Collecting Data.** + + The data collection process may take some time, depending on the size of the account and how much data is available for collection. + + + +## Next steps + +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](onboard-aws.md). +- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/overview.md b/articles/active-directory/cloud-infrastructure-entitlement-management/overview.md new file mode 100644 index 0000000000000..67286f887251c --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/overview.md @@ -0,0 +1,78 @@ +--- +title: What's Permissions Management? +description: An introduction to Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# What's Permissions Management? + + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + +## Overview + +Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). + +Permissions Management detects, automatically right-sizes, and continuously monitors unused and excessive permissions. + +Organizations have to consider permissions management as a central piece of their Zero Trust security to implement least privilege access across their entire infrastructure: + +- Organizations are increasingly adopting multi-cloud strategy and are struggling with the lack of visibility and the increasing complexity of managing access permissions. +- With the proliferation of identities and cloud services, the number of high-risk cloud permissions is exploding, expanding the attack surface for organizations. +- IT security teams are under increased pressure to ensure access to their expanding cloud estate is secure and compliant. +- The inconsistency of cloud providers' native access management models makes it even more complex for Security and Identity to manage permissions and enforce least privilege access policies across their entire environment. + +:::image type="content" source="media/cloudknox-overview/cloudknox-key-cases.png" alt-text="CloudKnox Permissions Management."::: + +## Key use cases + +Permissions Management allows customers to address three key use cases: *discover*, *remediate*, and *monitor*. + +### Discover + +Customers can assess permission risks by evaluating the gap between permissions granted and permissions used. + +- Cross-cloud permissions discovery: Granular and normalized metrics for key cloud platforms: AWS, Azure, and GCP. +- Permission Creep Index (PCI): An aggregated metric that periodically evaluates the level of risk associated with the number of unused or excessive permissions across your identities and resources. It measures how much damage identities can cause based on the permissions they have. +- Permission usage analytics: Multi-dimensional view of permissions risk for all identities, actions, and resources. + +### Remediate + +Customers can right-size permissions based on usage, grant new permissions on-demand, and automate just-in-time access for cloud resources. + +- Automated deletion of permissions unused for the past 90 days. +- Permissions on-demand: Grant identities permissions on-demand for a time-limited period or an as-needed basis. + + +### Monitor + +Customers can detect anomalous activities with machine language-powered (ML-powered) alerts and generate detailed forensic reports. + +- ML-powered anomaly detections. +- Context-rich forensic reports around identities, actions, and resources to support rapid investigation and remediation. + +Permissions Management deepens Zero Trust security strategies by augmenting the least privilege access principle, allowing customers to: + +- Get comprehensive visibility: Discover which identity is doing what, where, and when. +- Automate least privilege access: Use access analytics to ensure identities have the right permissions, at the right time. +- Unify access policies across infrastructure as a service (IaaS) platforms: Implement consistent security policies across your cloud infrastructure. + + + +## Next steps + +- For information on how to onboard Permissions Management for your organization, see [Enable Permissions Management in your organization](onboard-enable-tenant.md). +- For a list of frequently asked questions (FAQs) about Permissions Management, see [FAQs](faqs.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-explorer.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-explorer.md new file mode 100644 index 0000000000000..d36ed904a965b --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-explorer.md @@ -0,0 +1,78 @@ +--- +title: View roles and identities that can access account information from an external account +description: How to view information about identities that can access accounts from an external account in Permissions Management. +services: active-directory +manager: rkarlin +ms.service: active-directory +ms.topic: how-to +author: kenwith +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View roles and identities that can access account information from an external account + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +You can view information about users, groups, and resources that can access account information from an external account in Permissions Management. + +## Display information about users, groups, or tasks + +1. In Permissions Management, select the **Usage analytics** tab, and then, from the dropdown, select one of the following: + + - **Users** + - **Group** + - **Active resources** + - **Active tasks** + - **Active resources** + - **Serverless functions** + +1. To choose an account from your authorization system, select the lock icon in the left panel. +1. In the **Authorization systems** pane, select an account, then select **Apply**. +1. To choose a user, role, or group, select the person icon. +1. Select a user or group, then select **Apply**. +1. To choose an account from your authorization system, select it from the Authorization Systems menu. +1. In the user type filter, user, role, or group. +1. In the **Task** filter, select **All** or **High-risk tasks**, then select **Apply**. +1. To delete a task, select **Delete**, then select **Apply**. + +## Export information about users, groups, or tasks + +To export the data in comma-separated values (CSV) file format, select **Export** from the top-right hand corner of the table. + +## View users and roles +1. To view users and roles, select the lock icon, and then select the person icon to open the **Users** pane. +1. To view the **Role summary**, select the "eye" icon to the right of the role name. + + The following details display: + - **Policies**: A list of all the policies attached to the role. + - **Trusted entities**: The identities from external accounts that can assume this role. + +1. To view all the identities from various accounts that can assume this role, select the down arrow to the left of the role name. +1. To view a graph of all the identities that can access the specified account and through which role(s), select the role name. + + If Permissions Management is monitoring the external account, it lists specific identities from the accounts that can assume this role. Otherwise, it lists the identities declared in the **Trusted entity** section. + + **Connecting roles**: Lists the following roles for each account: + - *Direct roles* that are trusted by the account role. + - *Intermediary roles* that aren't directly trusted by the account role but are assumable by identities through role-chaining. + +1. To view all the roles from that account that are used to access the specified account, select the down arrow to the left of the account name. +1. To view the trusted identities declared by the role, select the down arrow to the left of the role name. + + The trusted identities for the role are listed only if the account is being monitored by Permissions Management. + +1. To view the role definition, select the "eye" icon to the right of the role name. + + When you select the down arrow and expand details, a search box is displayed. Enter your criteria in this box to search for specific roles. + + **Identities with access**: Lists the identities that come from external accounts: + - To view all the identities from that account can access the specified account, select the down arrow to the left of the account name. + - To view the **Role summary** for EC2 instances and Lambda functions, select the "eye" icon to the right of the identity name. + - To view a graph of how the identity can access the specified account and through which role(s), select the identity name. + +1. The **Info** tab displays the **Privilege creep index** and **Service control policy (SCP)** information about the account. + +For more information about the **Privilege creep index** and SCP information, see [View key statistics and data about your authorization system](ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-settings.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-settings.md new file mode 100644 index 0000000000000..7219ed8d1fb91 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-settings.md @@ -0,0 +1,42 @@ +--- +title: View personal and organization information in Permissions Management +description: How to view personal and organization information in the Account settings dashboard in Permissions Management. +services: active-directory +manager: rkarlin +ms.service: active-directory +ms.topic: overview +author: kenwith +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View personal and organization information + +> [!IMPORTANT] +> Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Account settings** dashboard in Permissions Management allows you to view personal information, passwords, and account preferences. +This information can't be modified because the user information is pulled from Azure AD. Only **User Session Time(min)** + +## View personal information + +1. In the Permissions Management home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. + + The **Personal Information** box displays your **First Name**, **Last Name**, and the **Email Address** that was used to register your account on Permissions Management. + +## View current organization information + +1. In the Permissions Management home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. + + The **Current Organization Information** displays the **Name** of your organization, the **Tenant ID** box, and the **User Session Timeout (min)**. + +1. To change duration of the **User Session Timeout (min)**, select **Edit** (the pencil icon), and then enter the number of minutes before you want a user session to time out. +1. Select the check mark to confirm your new setting. + + +## Next steps + +- For information about how to manage user information, see [Manage users and groups with the User management dashboard](ui-user-management.md). +- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](ui-tasks.md). +- For information about how to select group-based permissions settings, see [Select group-based permissions settings](how-to-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-audit-trail.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-audit-trail.md new file mode 100644 index 0000000000000..fc0679b50dabc --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-audit-trail.md @@ -0,0 +1,401 @@ +--- +title: Filter and query user activity in Permissions Management +description: How to filter and query user activity in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Filter and query user activity + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Audit** dashboard in Permissions Management details all user activity performed in your authorization system. It captures all high risk activity in a centralized location, and allows system administrators to query the logs. The **Audit** dashboard enables you to: + +- Create and save new queries so you can access key data points easily. +- Query across multiple authorization systems in one query. + +## Filter information by authorization system + +If you haven't used filters before, the default filter is the first authorization system in the filter list. + +If you have used filters before, the default filter is last filter you selected. + +1. To display the **Audit** dashboard, on the Permissions Management home page, select **Audit**. + +1. To select your authorization system type, in the **Authorization System Type** box, select Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). + +1. To select your authorization system, in the **Authorization System** box: + + - From the **List** subtab, select the accounts you want to use. + - From the **Folders** subtab, select the folders you want to use. + +1. To view your query results, select **Apply**. + +## Create, view, modify, or delete a query + +There are several different query parameters you can configure individually or in combination. The query parameters and corresponding instructions are listed in the following sections. + +- To create a new query, select **New Query**. +- To view an existing query, select **View** (the eye icon). +- To edit an existing query, select **Edit** (the pencil icon). +- To delete a function line in a query, select **Delete** (the minus sign **-** icon). +- To create multiple queries at one time, select **Add New Tab** to the right of the **Query** tabs that are displayed. + + You can open a maximum number of six query tab pages at the same time. A message will appear when you've reached the maximum. + +## Create a query with specific parameters + +### Create a query with a date + +1. In the **New Query** section, the default parameter displayed is **Date In "Last day"**. + + The first-line parameter always defaults to **Date** and can't be deleted. + +1. To edit date details, select **Edit** (the pencil icon). + + To view query details, select **View** (the eye icon). + +1. Select **Operator**, and then select an option: + - **In**: Select this option to set a time range from the past day to the past year. + - **Is**: Select this option to choose a specific date from the calendar. + - **Custom**: Select this option to set a date range from the **From** and **To** calendars. + +1. To run the query on the current selection, select **Search**. + +1. To save your query, select **Save**. + + To clear the recent selections, select **Reset**. + +### View operator options for identities + +The **Operator** menu displays the following options depending on the identity you select in the first dropdown: + +- **Is** / **Is Not**: View a list of all available usernames. You can either select or enter a username in the box. +- **Contains** / **Not Contains**: Enter text that the **Username** should or shouldn't contain, for example, *Permissions Management*. +- **In** / **Not In**: View a list all available usernames and select multiple usernames. + +### Create a query with a username + +1. In the **New query** section, select **Add**. + +1. From the menu, select **Username**. + +1. From the **Operator** menu, select the required option. + +1. To add criteria to this section, select **Add**. + + You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with the username **Test**. + +1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *Permissions Management*. + +1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +1. To run the query on the current selection, select **Search**. + +1. To clear the recent selections, select **Reset**. + +### Create a query with a resource name + +1. In the **New query** section, select **Add**. + +1. From the menu, select **Resource Name**. + +1. From the **Operator** menu, select the required option. + +1. To add criteria to this section, select **Add**. + + You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource name **Test**. + +1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *Permissions Management*. + +1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +1. To run the query on the current selection, select **Search**. + +1. To clear the recent selections, select **Reset**. + +### Create a query with a resource type + +1. In the **New Query** section, select **Add**. + +1. From the menu, select **Resource Type**. + +1. From the **Operator** menu, select the required option. + +1. To add criteria to this section, select **Add**. + +1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource type **s3::bucket**. + +1. Select the plus (**+**) sign, select **Or** with **Is**, and then enter or select `ec2::instance`. + +1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +1. To run the query on the current selection, select **Search**. + +1. To clear the recent selections, select **Reset**. + + +### Create a query with a task name + +1. In the **New Query** section, select **Add**. + +1. From the menu, select **Task Name**. + +1. From the **Operator** menu, select the required option. + +1. To add criteria to this section, select **Add**. + +1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with task name **s3:CreateBucket**. + +1. Select **Add**, select **Or** with **Is**, and then enter or select `ec2:TerminateInstance`. + +1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +1. To run the query on the current selection, select **Search**. + +1. To clear the recent selections, select **Reset**. + +### Create a query with a state + +1. In the **New Query** section, select **Add**. + +1. From the menu, select **State**. + +1. From the **Operator** menu, select the required option. + + - **Is** / **Is not**: Allows a user to select in the value field and select **Authorization Failure**, **Error**, or **Success**. + +1. To add criteria to this section, select **Add**. + +1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with State **Authorization Failure**. + +1. Select the **Add** icon, select **Or** with **Is**, and then select **Success**. + +1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +1. To run the query on the current selection, select **Search**. + +1. To clear the recent selections, select **Reset**. + +### Create a query with a role name + +1. In the **New query** section, select **Add**. + +2. From the menu, select **Role Name**. + +3. From the **Operator** menu, select the required option. + +4. To add criteria to this section, select **Add**. + +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. + +6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *Permissions Management*. + +7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +8. To run the query on the current selection, select **Search**. + +9. To clear the recent selections, select **Reset**. + +### Create a query with a role session name + +1. In the **New Query** section, select **Add**. + +2. From the menu, select **Role Session Name**. + +3. From the **Operator** menu, select the required option. + +4. To add criteria to this section, select **Add**. + +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. + +6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *Permissions Management*. + +7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +8. To run the query on the current selection, select **Search**. + +9. To clear the recent selections, select **Reset**. + +### Create a query with an access key ID + +1. In the **New Query** section, select **Add**. + +2. From the menu, select **Access Key ID**. + +3. From the **Operator** menu, select the required option. + +4. To add criteria to this section, select **Add**. + +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free `AKIAIFXNDW2Z2MPEH5OQ`. + +6. Select the **Add** icon, select **Or** with **Not** **Contains**, and then enter `AKIAVP2T3XG7JUZRM7WU`. + +7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +8. To run the query on the current selection, select **Search**. + +9. To clear the recent selections, select **Reset**. + +### Create a query with a tag key + +1. In the **New Query** section, select **Add**. + +2. From the menu, select **Tag Key**. + +3. From the **Operator** menu, select the required option. + +4. To add criteria to this section, select **Add**. + +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. + +6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *Permissions Management*. + +7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +8. To run the query on the current selection, select **Search**. + +9. To clear the recent selections, select **Reset**. + +### Create a query with a tag key value + +1. In the **New Query** section, select **Add**. + +2. From the menu, select **Tag Key Value**. + +3. From the **Operator** menu, select the required option. + +4. To add criteria to this section, select **Add**. + +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. + +6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *Permissions Management*. + +7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). + +8. To run the query on the current selection, select **Search**. + +9. To clear the recent selections, select **Reset**. + +### View query results + +1. In the **Activity** table, your query results display in columns. + + The results display all executed tasks that aren't read-only. + +1. To sort each column by ascending or descending value, select the up or down arrows next to the column name. + + - **Identity Details**: The name of the identity, for example the name of the role session performing the task. + + - To view the **Raw Events Summary**, which displays the full details of the event, next to the **Name** column, select **View**. + + - **Resource Name**: The name of the resource on which the task is being performed. + + If the column displays **Multiple**, it means multiple resources are listed in the column. + +1. To view a list of all resources, hover over **Multiple**. + + - **Resource Type**: Displays the type of resource, for example, *Key* (encryption key) or *Bucket* (storage). + - **Task Name**: The name of the task that was performed by the identity. + + An exclamation mark (**!**) next to the task name indicates that the task failed. + + - **Date**: The date when the task was performed. + + - **IP Address**: The IP address from where the user performed the task. + + - **Authorization System**: The authorization system name in which the task was performed. + +1. To download the results in comma-separated values (CSV) file format, select **Download**. + +## Save a query + +1. After you complete your query selections from the **New Query** section, select **Save**. + +2. In the **Query Name** box, enter a name for your query, and then select **Save**. + +3. To save a query with a different name, select the ellipses (**...**) next to **Save**, and then select **Save As**. + +4. Make your query selections from the **New Query** section, select the ellipses (**...**), and then select **Save As**. + +5. To save a new query, in the **Save Query** box, enter the name for the query, and then select **Save**. + +6. To save an existing query you've modified, select the ellipses (**...**). + + - To save a modified query under the same name, select **Save**. + - To save a modified query under a different name, select **Save As**. + +### View a saved query + +1. Select **Saved Queries**, and then select a query from the **Load Queries** list. + + A message box opens with the following options: **Load with the saved authorization system** or **Load with the currently selected authorization system**. + +1. Select the appropriate option, and then select **Load Queries**. + +1. View the query information: + + - **Query Name**: Displays the name of the saved query. + - **Query Type**: Displays whether the query is a *System* query or a *Custom* query. + - **Schedule**: Displays how often a report will be generated. You can schedule a one-time report or a monthly report. + - **Next On**: Displays the date and time the next report will be generated. + - **Format**: Displays the output format for the report, for example, CSV. + - **Last Modified On**: Displays the date in which the query was last modified on. + +1. To view or set schedule details, select the gear icon, select **Create Schedule**, and then set the details. + + If a schedule has already been created, select the gear icon to open the **Edit Schedule** box. + + - **Repeat**: Sets how often the report should repeat. + - **Start On**: Sets the date when you want to receive the report. + - **At**: Sets the specific time when you want to receive the report. + - **Report Format**: Select the output type for the file, for example, CSV. + - **Share Report With**: The email address of the user who is creating the schedule is displayed in this field. You can add other email addresses. + +1. After selecting your options, select **Schedule**. + + +### Save a query under a different name + +- Select the ellipses (**...**). + + System queries have only one option: + + - **Duplicate**: Creates a duplicate of the query and names the file *Copy of XXX*. + + Custom queries have the following options: + + - **Rename**: Enter the new name of the query and select **Save**. + - **Delete**: Delete the saved query. + + The **Delete Query** box opens, asking you to confirm that you want to delete the query. Select **Yes** or **No**. + + - **Duplicate**: Creates a duplicate of the query and names it *Copy of XXX*. + - **Delete Schedule**: Deletes the schedule details for this query. + + This option isn't available if you haven't yet saved a schedule. + + The **Delete Schedule** box opens, asking you to confirm that you want to delete the schedule. Select **Yes** or **No**. + + +## Export the results of a query as a report + +- To export the results of the query, select **Export**. + + Permissions Management exports the results in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. + + +## Next steps + +- For information on how to view how users access information, see [Use queries to see how users access information](ui-audit-trail.md). +- For information on how to create a query, see [Create a custom query](how-to-create-custom-queries.md). +- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](how-to-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-dashboard.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-dashboard.md new file mode 100644 index 0000000000000..7822f837ca117 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-dashboard.md @@ -0,0 +1,82 @@ +--- +title: View data about the activity in your authorization system in Permissions Management +description: How to view data about the activity in your authorization system in the Permissions Management Dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + + + +# View data about the activity in your authorization system + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The Permissions Management **Dashboard** provides an overview of the authorization system and account activity being monitored. You can use this dashboard to view data collected from your Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP) authorization systems. + +## View data about your authorization system + +1. In the Permissions Management home page, select **Dashboard**. +1. From the **Authorization systems type** dropdown, select **AWS**, **Azure**, or **GCP**. +1. Select the **Authorization System** box to display a **List** of accounts and **Folders** available to you. +1. Select the accounts and folders you want, and then select **Apply**. + + The **Permission Creep Index (PCI)** chart updates to display information about the accounts and folders you selected. The number of days since the information was last updated displays in the upper right corner. + +1. In the Permission Creep Index (PCI) graph, select a bubble. + + The bubble displays the number of identities that are considered high-risk. + + *High-risk* refers to the number of users who have permissions that exceed their normal or required usage. + +1. Select the box to display detailed information about the identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**. + +1. The **Highest PCI change** displays the authorization system name with the PCI number and the change number for the last seven days, if applicable. + + - To view all the changes and PCI ratings in your authorization system, select **View all**. + +1. To return to the PCI graph, select the **Graph** icon in the upper right of the list box. + +For more information about the Permissions Management **Dashboard**, see [View key statistics and data about your authorization system](ui-dashboard.md). + +## View user data on the PCI heat map + +The **Permission Creep Index (PCI)** heat map shows the incurred risk of users with access to high-risk privileges. The distribution graph displays all the users who contribute to the privilege creep. It displays how many users contribute to a particular score. For example, if the score from the PCI chart is 14, the graph shows how many users have a score of 14. + +- To view detailed data about a user, select the number. + + The PCI trend graph shows you the historical trend of the PCI score over the last 90 days. + +- To download the **PCI History** report, select **Download** (the down arrow icon). + + +## View information about users, roles, resources, and PCI trends + +To view specific information about the following, select the number displayed on the heat map. + +- **Users**: Displays the total number of users and how many fall into the high, medium, and low categories. +- **Roles**: Displays the total number of roles and how many fall into the high, medium, and low categories. +- **Resources**: Displays the total number of resources and how many fall into the high, medium, and low categories. +- **PCI trend**: Displays a line graph of the PCI trend over the last several weeks. + +## View identity findings + +The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. + +- To expand the full list of identity findings, select **All findings**. + +## View resource findings + +The **Resource** section below the heat map on the right side of the page shows all the relevant findings about your resources. It includes unencrypted S3 buckets, open security groups, managed keys, and so on. + +## Next steps + +- For more information about how to view key statistics and data in the Dashboard, see [View key statistics and data about your authorization system](ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-inventory.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-inventory.md new file mode 100644 index 0000000000000..50ad92ce49182 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-inventory.md @@ -0,0 +1,56 @@ +--- +title: Display an inventory of created resources and licenses for your authorization system +description: How to display an inventory of created resources and licenses for your authorization system in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Display an inventory of created resources and licenses for your authorization system + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +You can use the **Inventory** dashboard in Permissions Management to display an inventory of created resources and licensing information for your authorization system and its associated accounts. + +## View resources created for your authorization system + +1. To access your inventory information, in the Permissions Management home page, select **Settings** (the gear icon). +1. Select the **Inventory** tab, select the **Inventory** subtab, and then select your authorization system type: + + - **AWS** for Amazon Web Services. + - **Azure** for Microsoft Azure. + - **GCP** for Google Cloud Platform. + + The **Inventory** tab displays information pertinent to your authorization system type. + +1. To change the columns displayed in the table, select **Columns**, and then select the information you want to display. + + - To discard your changes, select **Reset to default**. + +## View the number of licenses associated with your authorization system + +1. To access licensing information about your data sources, in the Permissions Management home page, select **Settings** (the gear icon). + +1. Select the **Inventory** tab, select the **Licensing** subtab, and then select your authorization system type. + + The **Licensing** table displays the following information pertinent to your authorization system type: + + - The names of your accounts in the **Authorization system** column. + - The number of **Compute** licenses. + - The number of **Serverless** licenses. + - The number of **Compute containers**. + - The number of **Databases**. + - The **Total number of licenses**. + + +## Next steps + +- For information about viewing and configuring settings for collecting data from your authorization system and its associated accounts, see [View and configure settings for data collection](product-data-sources.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-sources.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-sources.md new file mode 100644 index 0000000000000..35fc4609c1268 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-sources.md @@ -0,0 +1,107 @@ +--- +title: View and configure settings for data collection from your authorization system in Permissions Management +description: How to view and configure settings for collecting data from your authorization system in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View and configure settings for data collection + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + + +You can use the **Data Collectors** dashboard in Permissions Management to view and configure settings for collecting data from your authorization systems. It also provides information about the status of the data collection. + +## Access and view data sources + +1. To access your data sources, in the Permissions Management home page, select **Settings** (the gear icon). Then select the **Data Collectors** tab. + +1. On the **Data Collectors** dashboard, select your authorization system type: + + - **AWS** for Amazon Web Services. + - **Azure** for Microsoft Azure. + - **GCP** for Google Cloud Platform. + +1. To display specific information about an account: + + 1. Enter the following information: + + - **Uploaded on**: Select **All** accounts, **Online** accounts, or **Offline** accounts. + - **Transformed on**: Select **All** accounts, **Online** accounts, or **Offline** accounts. + - **Search**: Enter an ID or Internet Protocol (IP) address to find a specific account. + + 1. Select **Apply** to display the results. + + Select **Reset Filter** to discard your settings. + +1. The following information displays: + + - **ID**: The unique identification number for the data collector. + - **Data types**: Displays the data types that are collected: + - **Entitlements**: The permissions of all identities and resources for all the configured authorization systems. + - **Recently uploaded on**: Displays whether the entitlement data is being collected. + + The status displays *ONLINE* if the data collection has no errors and *OFFLINE* if there are errors. + - **Recently transformed on**: Displays whether the entitlement data is being processed. + + The status displays *ONLINE* if the data processing has no errors and *OFFLINE* if there are errors. + - The **Tenant ID**. + - The **Tenant name**. + +## Modify a data collector + +1. Select the ellipses **(...)** at the end of the row in the table. +1. Select **Edit Configuration**. + + The **Permissions Management Onboarding - Summary** box displays. + +1. Select **Edit** (the pencil icon) for each field you want to change. +1. Select **Verify now & save**. + + To verify your changes later, select **Save & verify later**. + + When your changes are saved, the following message displays: **Successfully updated configuration.** + +## Delete a data collector + +1. Select the ellipses **(...)** at the end of the row in the table. +1. Select **Delete Configuration**. + + The **Permissions Management Onboarding - Summary** box displays. +1. Select **Delete**. +1. Check your email for a one time password (OTP) code, and enter it in **Enter OTP**. + + If you don't receive an OTP, select **Resend OTP**. + + The following message displays: **Successfully deleted configuration.** + +## Start collecting data from an authorization system + +1. Select the **Authorization Systems** tab, and then select your authorization system type. +1. Select the ellipses **(...)** at the end of the row in the table. +1. Select **Collect Data**. + + A message displays to confirm data collection has started. + +## Stop collecting data from an authorization system + +1. Select the ellipses **(...)** at the end of the row in the table. +1. To delete your authorization system, select **Delete**. + + The **Validate OTP To Delete Authorization System** box displays. + +1. Enter the OTP code +1. Select **Verify**. + +## Next steps + +- For information about viewing an inventory of created resources and licensing information for your authorization system, see [Display an inventory of created resources and licenses for your authorization system](product-data-inventory.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-define-permission-levels.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-define-permission-levels.md new file mode 100644 index 0000000000000..9aeb4875d5ccb --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-define-permission-levels.md @@ -0,0 +1,276 @@ +--- +title: Define and manage users, roles, and access levels in Permissions Management +description: How to define and manage users, roles, and access levels in Permissions Management User management dashboard. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Define and manage users, roles, and access levels + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +In Permissions Management, a key component of the interface is the User management dashboard. This topic describes how system administrators can define and manage users, their roles, and their access levels in the system. + +## The User management dashboard + +The Permissions Management User management dashboard provides a high-level overview of: + +- Registered and invited users. +- Permissions allowed for each user within a given system. +- Recent user activity. + +It also provides the functionality to invite or delete a user, edit, view, and customize permissions settings. + + +## Manage users for customers without SAML integration + +Follow this process to invite users if the customer hasn't enabled SAML integration with the Permissions Management application. + +### Invite a user to Permissions Management + +Inviting a user to Permissions Management adds the user to the system and allows system administrators to assign permissions to those users. Follow the steps below to invite a user to Permissions Management. + +1. To invite a user to Permissions Management, select the down caret icon next to the **User** icon on the right of the screen, and then select **User Management**. +2. From the **Users** tab, select **Invite User**. +3. From the **Set User Permission** window, in the **User** text box, enter the user's email address. +4. Under **Permission**, select the applicable option. + + - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. + + 1. Select **Next**. + 2. Select **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select the **Add** icon and the **Users** icon to request access for all their accounts. + 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. + + 1. Select **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). + 2. Select **Next**. + 3. Select **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in **Auth System Types**. + + 1. Select **Next**. + + The default view displays the **List** section. + 2. Select the appropriate boxes for **Viewer**, **Controller**, or **Approver**. + + For access to all authorization system types, select **All (Current and Future)**. + 1. Select **Next**. + 1. Select **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + +5. Select **Save**. + + The following message displays in green at the top of the screen: **New User Has Been Invited Successfully**. + + + +## Manage users for customers with SAML integration + +Follow this process to invite users if the customer has enabled SAML integration with the Permissions Management application. + +### Create a permission in Permissions Management + +Creating a permission directly in Permissions Management allows system administrators to assign permissions to specific users. The following steps help you to create a permission. + +- On the right side of the screen, select the down caret icon next to **User**, and then select **User management**. + +- For **Users**: + 1. To create permissions for a specific user, select the **Users** tab, and then select **Permission.** + 2. From the **Set User Permission** window, enter the user's email address in the **User** text box. + 3. Under **Permission**, select the applicable button. Then expand menu to view instructions for each option. + - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. + 1. Select **Next**. + 2. Check **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + + 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + + 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. + 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). + 2. Select **Next**. + 3. Check **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + + 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in **Auth System Types**. + + 1. Select **Next**. + + The default view displays the **List** tab, which displays individual authorization systems. + - To view groups of authorization systems organized into folder, select the **Folder** tab. + 2. Check the appropriate boxes for **Viewer**, **Controller**, or **Approver**. + + For access to all authorization system types, select **All (Current and Future)**. + 3. Select **Next**. + 4. Check **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user can have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + + 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + 4. Select **Save**. + + The following message displays in green at the top of the screen: + **New User Has Been Created Successfully**. + 5. The new user receives an email invitation to log in to Permissions Management. + +### The Pending tab + +1. To view the created permission, select the **Pending** tab. The system administrator can view the following details: + - **Email Address**: Displays the email address of the invited user. + - **Permissions**: Displays each service account and if the user has permissions as a **Viewer**, **Controller**, **Approver**, or **Requestor**. + - **Invited By**: Displays the email address of the person who sent the invitation. + - **Sent**: Displays the date the invitation was sent to the user. +2. To make changes to the following, select the ellipses **(...)** in the far right column. + - **View Permissions**: Displays a list of accounts for which the user has permissions. + - **Edit Permissions**: System administrators can edit a user's permissions. + - **Delete**: System administrators can delete a permission + - **Reinvite**: System administrator can reinvite the permission if the user didn't receive the email invite + + When a user registers with Permissions Management, they move from the **Pending** tab to the **Registered** tab. + +### The Registered tab + +- For **Users**: + + 1. The **Registered** tab provides a high-level overview of user details to system administrators: + - The **Name/Email Address** column lists the name and email address of the user. + - The **Permissions** column lists each authorization system, and each type of permission. + + If a user has all permissions for all authorization systems, **Admin for All Authorization Types** display across all columns. If a user only has some permissions, numbers display in each column they have permissions for. For example, if the number "3" is listed in the **Viewer** column, the user has viewer permission for three accounts within that authorization system. + - The **Joined On** column records when the user registered for Permissions Management. + - The **Recent Activity** column displays the date when a user last performed an activity. + - The **Search** button allows a system administrator to search for a user by name and all users who match the criteria displays. + - The **Filters** option allows a system administrator to filter by specific details. When the filter option is selected, the **Authorization System** box displays. + + To display all authorization system accounts,Select **All**. Then select the appropriate boxes for the accounts that need to be viewed. + 2. To make the changes to the following changes, select the ellipses **(...)** in the far right column: + - **View Permissions**: Displays a list of accounts for which the user has permissions. + - **Edit Permissions**: System administrators can edit the accounts for which a user has permissions. + - **Remove Permissions**: System administrators can remove permissions from a user. + +- For **Groups**: + 1. To create permissions for a specific user, select the **Groups** tab, and then select **Permission**. + 2. From the **Set Group Permission** window, enter the name of the group in the **Group Name** box. + + The identity provider creates groups. + + Some users may be part of multiple groups. In this case, the user's overall permissions is a union of the permissions assigned the various groups the user is a member of. + 3. Under **Permission**, select the applicable button and expand the menu to view instructions for each option. + + - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. + 1. Select **Next**. + 2. Check **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + + 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. + 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). + 2. Select **Next**. + 3. Check **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + + 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in Auth System Types. + 1. Select **Next**. + + The default view displays the **List** section. + + 2. Check the appropriate boxes for **Viewer**, **Controller**, or **Approver. + + For access to all authorization system types, select **All (Current and Future)**. + + 3. Select **Next**. + + 4. Check **Requestor for User** for each authorization system, if applicable. + + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. + + 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + + 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. + + 4. Select **Save**. + + The following message displays in green at the top of the screen: **New Group Has Been Created Successfully**. + +### The Groups tab + +1. The **Groups** tab provides a high-level overview of user details to system administrators: + + - The **Name** column lists the name of the group. + - The **Permissions** column lists each authorization system, and each type of permission. + + If a group has all permissions for all authorization systems, **Admin for All Authorization Types** displays across all columns. + + If a group only has some permissions, the corresponding columns display numbers for the groups. + + For example, if the number "3" is listed in the **Viewer** column, then the group has viewer permission for three accounts within that authorization system. + - The **Modified By** column records the email address of the person who created the group. + - The **Modified On** column records the date the group was last modified on. + - The **Search** button allows a system administrator to search for a group by name and all groups who match the criteria displays. + - The **Filters** option allows a system administrator to filter by specific details. When the filter option is selected, the **Authorization System** box displays. + + To display all authorization system accounts, select **All**. Then select the appropriate boxes for the accounts that need to be viewed. + +2. To make changes to the following, select the ellipses **(...)** in the far right column: + - **View Permissions**: Displays a list of the accounts for which the group has permissions. + - **Edit Permissions**: System administrators can edit a group's permissions. + - **Duplicate**: System administrators can duplicate permissions from one group to another. + - **Delete**: System administrators can delete permissions from a group. + + +## Next steps + +- For information about how to view user management information, see [Manage users with the User management dashboard](ui-user-management.md). +- For information about how to create group-based permissions, see [Create group-based permissions](how-to-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-integrations.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-integrations.md new file mode 100644 index 0000000000000..b20516154c2bc --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-integrations.md @@ -0,0 +1,51 @@ +--- +title: View integration information about an authorization system in Permissions Management +description: View integration information about an authorization system in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View integration information about an authorization system + +> [!IMPORTANT] +> Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Integrations** dashboard in Permissions Management allows you to view all your authorization systems in one place, and to ensure all applications are functioning as one. This information helps improve quality and performance as a whole. + +## Display integration information about an authorization system + +Refer to the **Integration** subpages in Permissions Management for information about available authorization systems for integration. + +1. To display the **Integrations** dashboard, select **User** (your initials) in the upper right of the screen, and then select **Integrations.** + + The **Integrations** dashboard displays a tile for each available authorization system. + +1. Select an authorization system tile to view its integration information. + +## Available integrated authorization systems + +The following authorization systems may be listed in the **Integrations** dashboard, depending on which systems are integrated into the Permissions Management application. + +- **ServiceNow**: Manages digital workflows for enterprise operations, and the Permissions Management integration allows you to request and approve permissions through the ServiceNow ticketing workflow. +- **Splunk**: Searches, monitors, and analyzes machine-generated data, and the Permissions Management integration enables exporting usage analytics data, alerts, and logs. +- **HashiCorp Terraform**: Permissions Management enables the generation of least-privilege policies through the Hashi Terraform provider. +- **Permissions Management API**: The Permissions Management application programming interface (API) provides access to Permissions Management features. +- **Saviynt**: Enables you to view Identity entitlements and usage inside the Saviynt console. +- **Securonix**: Enables exporting usage analytics data, alerts, and logs. + + + + + + + + + \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-permission-analytics.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permission-analytics.md new file mode 100644 index 0000000000000..6c51f7ca8c575 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permission-analytics.md @@ -0,0 +1,102 @@ +--- +title: Create and view permission analytics triggers in Permissions Management +description: How to create and view permission analytics triggers in the Permission analytics tab in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create and view permission analytics triggers + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how you can create and view permission analytics triggers in Permissions Management. + +## View permission analytics triggers + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Permission Analytics**, and then select the **Alerts** subtab. + + The **Alerts** subtab displays the following information: + + - **Alert Name**: Lists the name of the alert. + - To view the name, ID, role, domain, authorization system, statistical condition, anomaly date, and observance period, select **Alert name**. + - To expand the top information found with a graph of when the anomaly occurred, select **Details**. + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **# of Occurrences**: Displays how many times the alert trigger has occurred. + - **Task**: Displays how many tasks are affected by the alert + - **Resources**: Displays how many resources are affected by the alert + - **Identity**: Displays how many identities are affected by the alert + - **Authorization System**: Displays which authorization systems the alert applies to + - **Date/Time**: Displays the date and time of the alert. + - **Date/Time (UTC)**: Lists the date and time of the alert in Coordinated Universal Time (UTC). + +1. To filter the alerts, select the appropriate alert name or, from the **Alert Name** menu,select **All**. + + - From the **Date** dropdown menu, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**, and then select **Apply**. + + If you select **Custom range**, select date and time settings, and then select **Apply**. - **View Trigger**: Displays the current trigger settings and applicable authorization system details. + +1. To view the following details, select the ellipses (**...**): + + - **Details**: Displays **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, and **Identities** that matched the alert criteria. +1. To view specific matches, select **Resources**, **Tasks**, or **Identities**. + + The **Activity** section displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date**, and **IP Address**. + +## Create a permission analytics trigger + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Permission Analytics**, select the **Alerts** subtab, and then select **Create Permission Analytics Trigger**. +1. In the **Alert Name** box, enter a name for the alert. +1. Select the **Authorization System**. +1. Select **Identity performed high number of tasks**, and then select **Next**. +1. On the **Authorization Systems** tab, select the appropriate accounts and folders, or select **All**. + + This screen defaults to the **List** view but can also be changed to the **Folder** view, and the applicable folder can be selected instead of individually by system. + + - The **Status** column displays if the authorization system is online or offline + - The **Controller** column displays if the controller is enabled or disabled. + +1. On the **Configuration** tab, to update the **Time Interval**, select **90 Days**, **60 Days**, or **30 Days** from the **Time range** dropdown. +1. Select **Save**. + +## View permission analytics alert triggers + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Permission Analytics**, and then select the **Alert Triggers** subtab. + + The **Alert triggers** subtab displays the following information: + + - **Alert**: Lists the name of the alert. + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **# of users subscribed**: Displays the number of users subscribed to the alert. + - **Created By**: Displays the email address of the user who created the alert. + - **Last modified By**: Displays the email address of the user who last modified the alert. + - **Last Modified On**: Displays the date and time the trigger was last modified. + - **Subscription**: Toggle the button to **On** or **Off**. + - **View Trigger**: Displays the current trigger settings and applicable authorization system details. + +1. To view other options available to you, select the ellipses (**...**), and then make a selection from the available options: + + - **Details** displays **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, and **Identities** that matched the alert criteria. + - To view the specific matches, select **Resources**, **Tasks**, or **Identities**. + - The **Activity** section displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date**, and **IP Address**. + +1. To filter by **Activated** or **Deactivated**, in the **Status** section, select **All**, **Activated**, or **Deactivated**, and then select **Apply**. + + +## Next steps + +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-permissions-analytics-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permissions-analytics-reports.md new file mode 100644 index 0000000000000..523b70421c076 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permissions-analytics-reports.md @@ -0,0 +1,126 @@ +--- +title: Generate and download the Permissions analytics report in Permissions Management +description: How to generate and download the Permissions analytics report in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Generate and download the Permissions analytics report + +> [!IMPORTANT] +> Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to generate and download the **Permissions analytics report** in Permissions Management. + +> [!NOTE] +> This topic applies only to Amazon Web Services (AWS) users. + +## Generate the Permissions analytics report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Systems Reports** subtab. + + The **Systems Reports** subtab displays a list of reports the **Reports** table. +1. Find **Permissions Analytics Report** in the list, and to download the report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. + + The following message displays: **Successfully Started To Generate On Demand Report.** + +1. For detailed information in the report, select the right arrow next to one of the following categories. Or, select the required category under the **Findings** column. + + - **AWS** + - Inactive Identities + - Users + - Roles + - Resources + - Serverless Functions + - Inactive Groups + - Super Identities + - Users + - Roles + - Resources + - Serverless Functions + - Over-Provisioned Active Identities + - Users + - Roles + - Resources + - Serverless Functions + - PCI Distribution + - Privilege Escalation + - Users + - Roles + - Resources + - S3 Bucket Encryption + - Unencrypted Buckets + - SSE-S3 Buckets + - S3 Buckets Accessible Externally + - EC2 S3 Buckets Accessibility + - Open Security Groups + - Identities That Can Administer Security Tools + - Users + - Roles + - Resources + - Serverless Functions + - Identities That Can Access Secret Information + - Users + - Roles + - Resources + - Serverless Functions + - Cross-Account Access + - External Accounts + - Roles That Allow All Identities + - Hygiene: MFA Enforcement + - Hygiene: IAM Access Key Age + - Hygiene: Unused IAM Access Keys + - Exclude From Reports + - Users + - Roles + - Resources + - Serverless Functions + - Groups + - Security Groups + - S3 Buckets + + +1. Select a category and view the following columns of information: + + - **User**, **Role**, **Resource**, **Serverless Function Name**: Displays the name of the identity. + - **Authorization System**: Displays the authorization system to which the identity belongs. + - **Domain**: Displays the domain name to which the identity belongs. + - **Permissions**: Displays the maximum number of permissions that the identity can be granted. + - **Used**: Displays how many permissions that the identity has used. + - **Granted**: Displays how many permissions that the identity has been granted. + - **PCI**: Displays the permission creep index (PCI) score of the identity. + - **Date Last Active On**: Displays the date that the identity was last active. + - **Date Created On**: Displays the date when the identity was created. + + + + + +## Next steps + +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to generate and view a system report, see [Generate and view a system report](report-view-system-report.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-reports.md new file mode 100644 index 0000000000000..2872e36f1dcbb --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-reports.md @@ -0,0 +1,141 @@ +--- +title: View system reports in the Reports dashboard in Permissions Management +description: How to view system reports in the Reports dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View system reports in the Reports dashboard + +> [!IMPORTANT] +> Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +Permissions Management has various types of system report types available that capture specific sets of data. These reports allow management to: + +- Make timely decisions. +- Analyze trends and system/user performance. +- Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. + +## Explore the Reports dashboard + +The **Reports** dashboard provides a table of information with both system reports and custom reports. The **Reports** dashboard defaults to the **System Reports** tab, which has the following details: + +- **Report Name**: The name of the report. +- **Category**: The type of report. For example, **Permission**. +- **Authorization Systems**: Displays which authorizations the custom report applies to. +- **Format**: Displays the output format the report can be generated in. For example, comma-separated values (CSV) format, portable document format (PDF), or Microsoft Excel Open XML Spreadsheet (XLSX) format. + + - To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. + + The following message displays across the top of the screen in green if the download is successful: **Successfully Started To Generate On Demand Report**. + +## Available system reports + +Permissions Management offers the following reports for management associated with the authorization systems noted in parenthesis: + +- **Access Key Entitlements And Usage**: + - **Summary of report**: Provides information about access key, for example, permissions, usage, and rotation date. + - **Applies to**: Amazon Web Services (AWS) and Microsoft Azure + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** or **Detailed** + - **Use cases**: + - The access key age, last rotation date, and last usage date is available in the summary report to help with key rotation. + - The granted task and Permissions creep index (PCI) score to take action on the keys. + +- **User Entitlements And Usage**: + - **Summary of report**: Provides information about the identities' permissions, for example, entitlement, usage, and PCI. + - **Applies to**: AWS, Azure, and Google Cloud Platform (GCP) + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** or **Detailed** + - **Use cases**: + - The data displayed on the **Usage Analytics** screen is downloaded as part of the **Summary** report. The user's detailed permissions usage is listed in the **Detailed** report. + +- **Group Entitlements And Usage**: + - **Summary of report**: Provides information about the group's permissions, for example, entitlement, usage, and PCI. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** + - **Use cases**: + - All group level entitlements and permission assignments, PCIs, and the number of members are listed as part of this report. + +- **Identity Permissions**: + - **Summary of report**: Report on identities that have specific permissions, for example, identities that have permission to delete any S3 buckets. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: CSV + - **Ability to collate report**: No + - **Type of report**: **Summary** + - **Use cases**: + - Any task usage or specific task usage via User/Group/Role/App can be tracked with this report. + +- **Identity privilege activity report** + - **Summary of report**: Provides information about permission changes that have occurred in the selected duration. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: PDF + - **Ability to collate report**: No + - **Type of report**: **Summary** + - **Use cases**: + - Any identity permission change can be captured using this report. + - The **Identity Privilege Activity** report has the following main sections: **User Summary**, **Group Summary**, **Role Summary**, and **Delete Task Summary**. + - The **User** summary lists the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted users, users with PCI change, and High-risk active/inactive users. + - The **Group** summary lists the administrator level groups with the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted groups, groups with PCI change, and High-risk active/inactive groups. + - The **Role summary** lists similar details as **Group Summary**. + - The **Delete Task summary** section lists the number of times the **Delete task** has been executed in the given time period. + +- **Permissions Analytics Report** + - **Summary of report**: Provides information about the violation of key security best practices. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Detailed** + - **Use cases**: + - This report lists the different key findings in the selected auth systems. The key findings include super identities, inactive identities, over provisioned active identities, storage bucket hygiene, and access key age (for AWS only). The report helps administrators to visualize the findings across the organization. + + For more information about this report, see [Permissions analytics report](product-permissions-analytics-reports.md). + +- **Role/Policy Details** + - **Summary of report**: Provides information about roles and policies. + - **Applies to**: AWS, Azure, GCP + - **Report output type**: CSV + - **Ability to collate report**: No + - **Type of report**: **Summary** + - **Use cases**: + - Assigned/Unassigned, custom/system policy, and the used/unused condition is captured in this report for any specific, or all, AWS accounts. Similar data can be captured for Azure/GCP for the assigned/unassigned roles. + +- **PCI History** + - **Summary of report**: Provides a report of privilege creep index (PCI) history. + - **Applies to**: AWS, Azure, GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** + - **Use cases**: + - This report plots the trend of the PCI by displaying the monthly PCI history for each authorization system. + +- **All Permissions for Identity** + - **Summary of report**: Provides results of all permissions for identities. + - **Applies to**: AWS, Azure, GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Detailed** + - **Use cases**: + - This report lists all the assigned permissions for the selected identities. + + + + +## Next steps + +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). +- For information about how to create and view a custom report, see [Generate and view a custom report](report-create-custom-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-rule-based-anomalies.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-rule-based-anomalies.md new file mode 100644 index 0000000000000..2d014ae108e06 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-rule-based-anomalies.md @@ -0,0 +1,123 @@ +--- +title: Create and view rule-based anomalies and anomaly triggers in Permissions Management +description: How to create and view rule-based anomalies and anomaly triggers in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create and view rule-based anomaly alerts and anomaly triggers + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +Rule-based anomalies identify recent activity in Permissions Management that is determined to be unusual based on explicit rules defined in the activity trigger. The goal of rule-based anomaly is high precision detection. + +## View rule-based anomaly alerts + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Rule-Based Anomaly**, and then select the **Alerts** subtab. + + The **Alerts** subtab displays the following information: + + - **Alert Name**: Lists the name of the alert. + + - To view the specific identity, resource, and task names that occurred during the alert collection period, select the **Alert Name**. + + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **# of Occurrences**: How many times the alert trigger has occurred. + - **Task**: How many tasks performed are triggered by the alert. + - **Resources**: How many resources accessed are triggered by the alert. + - **Identity**: How many identities performing unusual behavior are triggered by the alert. + - **Authorization System**: Displays which authorization systems the alert applies to, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). + - **Date/Time**: Lists the date and time of the alert. + - **Date/Time (UTC)**: Lists the date and time of the alert in Coordinated Universal Time (UTC). + + +1. To filter alerts: + + - From the **Alert Name** dropdown, select **All** or the appropriate alert name. + - From the **Date** dropdown menu, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**, and select **Apply**. + + - If you select **Custom Range**, also enter **From** and **To** duration settings. +1. To view details that match the alert criteria, select the ellipses (**...**). + + - **View Trigger**: Displays the current trigger settings and applicable authorization system details + - **Details**: Displays details about **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, **Identities**, and **Activity** + - **Activity**: Displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date/Time**, **Inactive For**, and **IP Address**. Selecting the "eye" icon displays the **Raw Events Summary** + +## Create a rule-based anomaly trigger + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Rule-Based Anomaly**, and then select the **Alerts** subtab. +1. Select **Create Anomaly Trigger**. + +1. In the **Alert Name** box, enter a name for the alert. +1. Select the **Authorization System**, **AWS**, **Azure**, or **GCP**. +1. Select one of the following conditions: + - **Any Resource Accessed for the First Time**: The identity accesses a resource for the first time during the specified time interval. + - **Identity Performs a Particular Task for the First Time**: The identity does a specific task for the first time during the specified time interval. + - **Identity Performs a Task for the First Time**: The identity performs any task for the first time during the specified time interval +1. Select **Next**. +1. On the **Authorization Systems** tab, select the available authorization systems and folders, or select **All**. + + This screen defaults to **List** view, but you can change it to **Folders** view. You can select the applicable folder instead of individually selecting by authorization system. + + - The **Status** column displays if the authorization system is online or offline. + - The **Controller** column displays if the controller is enabled or disabled. + +1. On the **Configuration** tab, to update the **Time Interval**, select **90 Days**, **60 Days**, or **30 Days** from the **Time range** dropdown. +1. Select **Save**. + +## View a rule-based anomaly trigger + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Rule-Based Anomaly**, and then select the **Alert Triggers** subtab. + + The **Alert Triggers** subtab displays the following information: + + - **Alerts**: Displays the name of the alert. + - **Anomaly Alert Rule**: Displays the name of the selected rule when creating the alert. + - **# of Users Subscribed**: Displays the number of users subscribed to the alert. + - **Created By**: Displays the email address of the user who created the alert. + - **Last Modified By**: Displays the email address of the user who last modified the alert. + - **Last Modified On**: Displays the date and time the trigger was last modified. + - **Subscription**: Subscribes you to receive alert emails. Switches between **On** and **Off**. + +1. To view other options available to you, select the ellipses (**...**), and then select from the available options: + + If the **Subscription** is **On**, the following options are available: + + - **Edit**: Enables you to modify alert parameters. + + Only the user who created the alert can edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. + + - **Duplicate**: Create a duplicate copy of the selected alert trigger. + - **Rename**: Enter the new name of the query, and then select **Save.** + - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. + - **Activate**: Activate the alert trigger and start sending emails to subscribed users. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. + - **Delete**: Delete the alert. + + If the **Subscription** is **Off**, the following options are available: + - **View**: View details of the alert trigger. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. + - **Duplicate**: Create a duplicate copy of the selected alert trigger. + +1. To filter by **Activated** or **Deactivated**, in the **Status** section, select **All**, **Activated**, or **Deactivated**, and then select **Apply**. + + + +## Next steps + +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-statistical-anomalies.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-statistical-anomalies.md new file mode 100644 index 0000000000000..bcef698e31a8c --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-statistical-anomalies.md @@ -0,0 +1,125 @@ +--- +title: Create and view statistical anomalies and anomaly triggers in Permissions Management +description: How to create and view statistical anomalies and anomaly triggers in the Statistical Anomaly tab in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create and view statistical anomalies and anomaly triggers + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +Statistical anomalies can detect outliers in an identity's behavior if recent activity is determined to be unusual based on models defined in an activity trigger. The goal of this anomaly trigger is a high recall rate. + +## View statistical anomalies in an identity's behavior + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Statistical Anomaly**, and then select the **Alerts** subtab. + + The **Alerts** subtab displays the following information: + + - **Alert Name**: Lists the name of the alert. + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **# of Occurrences**: Displays how many times the alert trigger has occurred. + - **Authorization System**: Displays which authorization systems the alert applies to. + - **Date/Time**: Lists the day of the outlier occurring. + - **Date/Time (UTC)**: Lists the day of the outlier occurring in Coordinated Universal Time (UTC). + + +1. To filter the alerts based on name, select the appropriate alert name or choose **All** from the **Alert Name** dropdown menu, and select **Apply**. +1. To filter the alerts based on alert time, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range** from the **Date** dropdown menu, and select **Apply**. +1. If you select the ellipses (**...**) and select: + - **Details**, this brings you to an Alert Summary view with **Authorization System**, **Statistical Model** and **Observance Period** displayed along with a table with a row per identity triggering this alert. From here you can click: + - **Details**: Displays graph(s) highlighting the anomaly with context, and up to the top 3 actions performed on the day of the anomaly + - **View Trigger**: Displays the current trigger settings and applicable authorization system details + - **View Trigger**: Displays the current trigger settings and applicable authorization system details + +## Create a statistical anomaly trigger + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Statistical Anomaly**, select the **Alerts** subtab, and then select **Create Alert Trigger**. +1. Enter a name for the alert in the **Alert Name** box. +1. Select the **Authorization System**, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. Select one of the following conditions: + + - **Identity Performed High Number of Tasks**: The identity performs higher than their usual volume of tasks. For example, an identity typically performs 25 tasks per day, and now it is performing 100 tasks per day. + - **Identity Performed Low Number of Tasks**: The identity performs lower than their usual volume of tasks. For example, an identity typically performs 100 tasks per day, and now it is performing 25 tasks per day. + - **Identity Performed Tasks with Unusual Results**: The identity performing an action gets a different result than usual, such as most tasks end in a successful result and are now ending in a failed result or vice versa. + - **Identity Performed Tasks with Unusual Timing**: The identity does tasks at unusual times as established by their baseline in the observance period. Times are grouped by the following UTC 4 hour windows. + - 12AM-4AM UTC + - 4AM-8AM UTC + - 8AM-12PM UTC + - 12PM-4PM UTC + - 4PM-8PM UTC + - 8PM-12AM UTC + - **Identity Performed Tasks with Unusual Types**: The identity performs unusual types of tasks as established by their baseline in the observance period. For example, an identity performs read, write, or delete tasks they wouldn't ordinarily perform. + - **Identity Performed Tasks with Multiple Unusual Patterns**: The identity has several unusual patterns in the tasks performed by the identity as established by their baseline in the observance period. +1. Select **Next**. + +1. On the **Authorization Systems** tab, select the appropriate systems, or, to select all systems, select **All**. + + The screen defaults to the **List** view but you can switch to **Folder** view using the menu, and then select the applicable folder instead of individually by system. + + - The **Status** column displays if the authorization system is online or offline. + + - The **Controller** column displays if the controller is enabled or disabled. + + +1. On the **Configuration** tab, to update the **Time Interval**, from the **Time Range** dropdown, select **90 Days**, **60 Days**, or **30 Days**, and then select **Save**. + +## View statistical anomaly triggers + +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). +1. Select **Statistical Anomaly**, and then select the **Alert Triggers** subtab. + + The **Alert Triggers** subtab displays the following information: + + - **Alert**: Displays the name of the alert. + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **# of users subscribed**: Displays the number of users subscribed to the alert. + - **Created By**: Displays the email address of the user who created the alert. + - **Last Modified By**: Displays the email address of the user who last modified the alert. + - **Last Modified On**: Displays the date and time the trigger was last modified. + - **Subscription**: Subscribes you to receive alert emails. Toggle the button to **On** or **Off**. + +1. To filter by **Activated** or **Deactivated**, in the **Status** section, select **All**, **Activated**, or **Deactivated**, and then select **Apply**. + +1. To view other options available to you, select the ellipses (**...**), and then select from the available options: + + If the **Subscription** is **On**, the following options are available: + - **Edit**: Enables you to modify alert parameters + + > [!NOTE] + > Only the user who created the alert can perform the following actions: edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. + - **Duplicate**: Create a duplicate copy of the selected alert trigger. + - **Rename**: Enter the new name of the query, and then select **Save.** + - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. + - **Activate**: Activate the alert trigger and start sending emails to subscribed users. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. + - **Delete**: Delete the alert. + + If the **Subscription** is **Off**, the following options are available: + - **View**: View details of the alert trigger. + - **Notification settings**: View the **Email** of users who are subscribed to the alert trigger. + - **Duplicate**: Create a duplicate copy of the selected alert trigger. + + +1. Select **Apply**. + + + +## Next steps + +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/report-create-custom-report.md b/articles/active-directory/cloud-infrastructure-entitlement-management/report-create-custom-report.md new file mode 100644 index 0000000000000..203365d8a8840 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/report-create-custom-report.md @@ -0,0 +1,128 @@ +--- +title: Create, view, and share a custom report a custom report in Permissions Management +description: How to create, view, and share a custom report in the Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Create, view, and share a custom report + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to create, view, and share a custom report in Permissions Management. + +## Create a custom report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. Select **New Custom Report**. +1. In the **Report Name** box, enter a name for your report. +1. From the **Report Based on** list: + 1. To view which authorization systems the report applies to, hover over each report name. + 1. To view a description of a report, select the report. +1. Select a report you want to use as the base for your custom report, and then select **Next**. +1. In the **MyReport** box, select the **Authorization System** you want: Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), or Google Cloud Platform (**GCP**). + +1. To add specific accounts, select the **List** subtab, and then select **All** or the account names. +1. To add specific folders, select the **Folders** subtab, and then select **All** or the folder names. + +1. Select the **Report Format** subtab, and then select the format for your report: comma-separated values (**CSV**) file, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) file. +1. Select the **Schedule** tab, and then select the frequency for your report, from **None** up to **Monthly**. + + - For **Hourly** and **Daily** options, set the start date by choosing from the **Calendar** dropdown, and can input a specific time of the day they want to receive the report. + + In addition to date and time, the **Weekly** and **Biweekly** provide options for you to select on which day(s)of the week the report should repeat. + +1. Select **Save**. + + The following message displays across the top of the screen in green if the download is successful: **Report has been created**. +The report name appears in the **Reports** table. + +## View a custom report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. + + The **Custom Reports** tab displays the following information in the **Reports** table: + + - **Report Name**: The name of the report. + - **Category**: The type of report: **Permission**. + - **Authorization System**: The authorization system in which you can view the report: AWS, Azure, and GCP. + - **Format**: The format of the report, **CSV**, **PDF**, or **XLSX** format. + +1. To view a report, from the **Report Name** column, select the report you want. +1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. +1. To refresh the list of reports, select **Reload**. + +## Share a custom report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. In the **Reports** table, select a report and then select the ellipses (**...**) icon. +1. In the **Report Settings** box, select **Share with**. +1. In the **Search Email to add** box, enter the name of other Permissions Management user(s). + + You can only share reports with other Permissions Management users. +1. Select **Save**. + +## Search for a custom report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. On the **Custom Reports** tab, select **Search**. +1. In the **Search** box, enter the name of the report you want. + + The **Custom Reports** tab displays a list of reports that match your search criteria. +1. Select the report you want. +1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. +1. To refresh the list of reports, select **Reload**. + + +## Modify a saved or scheduled custom report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. Hover over the report name on the **Custom Reports** tab. + + - To rename the report, select **Edit** (the pencil icon), and enter a new name. + - To change the settings for your report, select **Settings** (the gear icon). Make your changes, and then select **Save**. + + - To download a copy of the report, select the **Down arrow** icon. + +1. To perform other actions to the report, select the ellipses (**...**) icon: + + - **Download**: Downloads a copy of the report. + + - **Report Settings**: Displays the settings for the report, including scheduling, sharing the report, and so on. + + - **Duplicate**: Creates a duplicate of the report called **"Copy of XXX"**. Any reports not created by the current user are listed as **Duplicate**. + + When you select **Duplicate**, a box appears asking if you're sure you want to create a duplicate. Select **Confirm**. + + When the report is successfully duplicated, the following message displays: **Report generated successfully**. + + - **API Settings**: Download the report using your Application Programming Interface (API) settings. + + When this option is selected, the **API Settings** window opens and displays the **Report ID** and **Secret Key**. Select **Generate New Key**. + + - **Delete**: Select this option to delete the report. + + After selecting **Delete**, a pop-up box appears asking if the user is sure they want to delete the report. Select **Confirm**. + + **Report is deleted successfully** appears across the top of the screen in green if successfully deleted. + + - **Unsubscribe**: Unsubscribe the user from receiving scheduled reports and notifications. + + This option is only available after a report has been scheduled. + + +## Next steps + +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to generate and view a system report, see [Generate and view a system report](report-view-system-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/report-view-system-report.md b/articles/active-directory/cloud-infrastructure-entitlement-management/report-view-system-report.md new file mode 100644 index 0000000000000..d93af027fb2f9 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/report-view-system-report.md @@ -0,0 +1,60 @@ +--- +title: Generate and view a system report in Permissions Management +description: How to generate and view a system report in the Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Generate and view a system report + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to generate and view a system report in Permissions Management. + +## Generate a system report + +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Systems Reports** subtab. + The **Systems Reports** subtab displays the following options in the **Reports** table: + + - **Report Name**: The name of the report. + - **Category**: The type of report: **Permission**. + - **Authorization System**: The authorization system activity in the report: Amazon Web Services (AWS), Microsoft Azure (Azure), and Google Cloud Platform (GCP). + - **Format**: The format in which the report is available: comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. + +1. In the **Report Name** column, find the report you want, and then select the down arrow to the right of the report name to download the report. + + Or, from the ellipses **(...)** menu, select **Download**. + + The following message displays: **Successfully Started To Generate On Demand Report.** + + > [!NOTE] + > If you select one authorization system, the report includes a summary. If you select more than one authorization system, the report does not include a summary. + +1. To refresh the list of reports, select **Reload**. + +## Search for a system report + +1. On the **Systems Reports** subtab, select **Search**. +1. In the **Search** box, enter the name of the report you want. + + The **Systems Reports** subtab displays a list of reports that match your search criteria. +1. Select a report from the **Report Name** column. +1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. +1. To refresh the list of reports, select **Reload**. + + +## Next steps + +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/training-videos.md b/articles/active-directory/cloud-infrastructure-entitlement-management/training-videos.md new file mode 100644 index 0000000000000..f80eac0ac3941 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/training-videos.md @@ -0,0 +1,41 @@ +--- +title: Permissions Management training videos +description: Permissions Management training videos. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Entra Permissions Management training videos + +To view step-by-step training videos on how to use Permissions Management features, select a link below. + +## Onboard Permissions Management in your organization + + +### Enable Permissions Management in your Azure Active Directory (Azure AD) tenant + +To view a video on how to enable Permissions Management in your Azure AD tenant, select [Enable Permissions Management in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). + +### Configure and onboard Amazon Web Services (AWS) accounts + +To view a video on how to configure and onboard Amazon Web Services (AWS) accounts in Permissions Management, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). + +### Configure and onboard Google Cloud Platform (GCP) accounts + +To view a video on how to configure and onboard Google Cloud Platform (GCP) accounts in Permissions Management, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). + + + + +## Next steps + +- For an overview of Permissions Management, see [What's Permissions Management?](overview.md) +- For a list of frequently asked questions (FAQs) about Permissions Management, see [FAQs](faqs.md). +- For information on how to start viewing information about your authorization system in Permissions Management, see [View key statistics and data about your authorization system](ui-dashboard.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/troubleshoot.md b/articles/active-directory/cloud-infrastructure-entitlement-management/troubleshoot.md new file mode 100644 index 0000000000000..fe392e6558e77 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/troubleshoot.md @@ -0,0 +1,42 @@ +--- +title: Troubleshoot issues with Permissions Management +description: Troubleshoot issues with Permissions Management +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: troubleshooting +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Troubleshoot issues with Permissions Management + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This section answers troubleshoot issues with Permissions Management. + +## One time passcode (OTP) email + +### The user didn't receive the OTP email. + +- Check your junk or Spam mail folder for the email. + +## Reports + +### The individual files are generated according to the authorization system (subscription/account/project). + +- Select the **Collate** option in the **Custom Report** screen in the Permissions Management **Reports** tab. + +## Data collection in AWS + +### Data collection > AWS Authorization system data collection status is offline. Upload and transform is also offline. + +- Check the Permissions Management-related role that exists in these accounts. +- Validate the trust relationship with the OpenID Connect (OIDC) role. + + diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-audit-trail.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-audit-trail.md new file mode 100644 index 0000000000000..43ee6f14eec9f --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-audit-trail.md @@ -0,0 +1,75 @@ +--- +title: Use queries to see how users access information in an authorization system in Permissions Management +description: How to use queries to see how users access information in an authorization system in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Use queries to see how users access information + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Audit** dashboard in Permissions Management provides an overview of queries a Permissions Management user has created to review how users access their authorization systems and accounts. + +This article provides an overview of the components of the **Audit** dashboard. + +## View information in the Audit dashboard + + +1. In Permissions Management, select the **Audit** tab. + + Permissions Management displays the query options available to you. + +1. The following options display at the top of the **Audit** dashboard: + + - A tab for each existing query. Select the tab to see details about the query. + - **New Query**: Select the tab to create a new query. + - **New tab (+)**: Select the tab to add a **New Query** tab. + - **Saved Queries**: Select to view a list of saved queries. + +1. To return to the main page, select **Back to Audit Trail**. + + +## Use a query to view information + +1. In Permissions Management, select the **Audit** tab. +1. The **New query** tab displays the following options: + + - **Authorization Systems Type**: A list of your authorization systems: Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). + + - **Authorization System**: A **List** of accounts and **Folders** in the authorization system. + + - To display a **List** of accounts and **Folders** in the authorization system, select the down arrow, and then select **Apply**. + +1. To add an **Audit Trail Condition**, select **Conditions** (the eye icon), select the conditions you want to add, and then select **Close**. + +1. To edit existing parameters, select **Edit** (the pencil icon). + +1. To add the parameter that you created to the query, select **Add**. + +1. To search for activity data that you can add to the query, select **Search** . + +1. To save your query, select **Save**. + +1. To save your query under a different name, select **Save As** (the ellipses **(...)** icon). + +1. To discard your work and start creating a query again, select **Reset Query**. + +1. To delete a query, select the **X** to the right of the query tab. + + + +## Next steps + +- For information on how to filter and view user activity, see [Filter and query user activity](product-audit-trail.md). +- For information on how to create a query,see [Create a custom query](how-to-create-custom-queries.md). +- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](how-to-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-autopilot.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-autopilot.md new file mode 100644 index 0000000000000..0d4f53e45ee7b --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-autopilot.md @@ -0,0 +1,71 @@ +--- +title: View rules in the Autopilot dashboard in Permissions Management +description: How to view rules in the Autopilot dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View rules in the Autopilot dashboard + +> [!IMPORTANT] +> Micorosft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Autopilot** dashboard in Permissions Management provides a table of information about **Autopilot rules** for administrators. + + +> [!NOTE] +> Only users with the **Administrator** role can view and make changes on this tab. + +## View a list of rules + +1. In the Permissions Management home page, select the **Autopilot** tab. +1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select the authorization system types you want: Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want. +1. Select **Apply**. + + The following information displays in the **Autopilot Rules** table: + + - **Rule Name**: The name of the rule. + - **State**: The status of the rule: idle (not being use) or active (being used). + - **Rule Type**: The type of rule being applied. + - **Mode**: The status of the mode: on-demand or not. + - **Last Generated**: The date and time the rule was last generated. + - **Created By**: The email address of the user who created the rule. + - **Last Modified**: The date and time the rule was last modified. + - **Subscription**: Provides an **On** or **Off** subscription that allows you to receive email notifications when recommendations have been generated, applied, or unapplied. + +## View other available options for rules + +- Select the ellipses **(...)** + + The following options are available: + + - **View Rule**: Select to view details of the rule. + - **Delete Rule**: Select to delete the rule. Only the user who created the selected rule can delete the rule. + - **Generate Recommendations**: Creates recommendations for each user and the authorization system. Only the user who created the selected rule can create recommendations. + - **View Recommendations**: Displays the recommendations for each user and authorization system. + - **Notification Settings**: Displays the users subscribed to this rule. Only the user who created the selected rule can add other users to be notified. + +You can also select: + +- **Reload**: Select to refresh the displayed list of roles/policies. +- **Search**: Select to search for a specific role/policy. +- **Columns**: From the dropdown list, select the columns you want to display. + - Select **Reset to default** to return to the system defaults. +- **New Rule**: Select to create a new rule. For more information, see [Create a rule](how-to-create-rule.md). + + + +## Next steps + +- For information about creating rules, see [Create a rule](how-to-create-rule.md). +- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](how-to-recommendations-rule.md). +- For information about notification settings for rules, see [View notification settings for a rule](how-to-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-dashboard.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-dashboard.md new file mode 100644 index 0000000000000..23e1ce1b147b4 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-dashboard.md @@ -0,0 +1,138 @@ +--- +title: View key statistics and data about your authorization system in Permissions Management +description: How to view statistics and data about your authorization system in the Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + + +# View key statistics and data about your authorization system + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +Permissions Management provides a summary of key statistics and data about your authorization system regularly. This information is available for Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). + +## View metrics related to avoidable risk + +The data provided by Permissions Management includes metrics related to avoidable risk. These metrics allow the Permissions Management administrator to identify areas where they can reduce risks related to the principle of least permissions. + +You can view the following information in Entra: + +- The **Permission Creep Index (PCI)** heat map on the Permissions Management **Dashboard** identifies: + - The number of users who have been granted high-risk permissions but aren't using them. + - The number of users who contribute to the permission creep index (PCI) and where they are on the scale. + +- The [**Analytics** dashboard](usage-analytics-home.md) provides a snapshot of permission metrics within the last 90 days. + + +## Components of the Permissions Management Dashboard + +The Permissions Management **Dashboard** displays the following information: + +- **Authorization system types**: A dropdown list of authorization system types you can access: AWS, Azure, and GCP. + +- **Authorization System**: Displays a **List** of accounts and **Folders** in the selected authorization system you can access. + + - To add or remove accounts and folders, from the **Name** list, select or deselect accounts and folders, and then select **Apply**. + +- **Permission Creep Index (PCI)**: The graph displays the **# of identities contributing to PCI**. + + The PCI graph may display one or more bubbles. Each bubble displays the number of identities that are considered high risk. *High-risk* refers to the number of users who have permissions that exceed their normal or required usage. + - To display a list of the number of identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**, select the **List** icon in the upper right of the graph. + - To display the PCI graph again, select the **Graph** icon in the upper right of the list box. + +- **Highest PCI change**: Displays a list of your accounts and information about the **PCI** and **Change** in the index over the past 7 days. + - To download the list, select the down arrow in the upper right of the list box. + + The following message displays: **We'll email you a link to download the file.** + - Check your email for the message from the Permissions Management Customer Success Team. The email contains a link to the **PCI history** report in Microsoft Excel format. + - The email also includes a link to the **Reports** dashboard, where you can configure how and when you want to receive reports automatically. + - To view all the PCI changes, select **View all**. + +- **Identity**: A summary of the **Findings** that includes: + - The number of **Inactive** identities that haven't been accessed in over 90 days. + - The number of **Super** identities that access data regularly. + - The number of identities that can **Access secret information**: A list of roles that can access sensitive or secret information. + - **Over-provisioned active** identities that have more permissions than they currently access. + - The number of identities **With permission escalation**: A list of roles that can increase permissions. + + To view the list of all identities, select **All findings**. + +- **Resources**: A summary of the **Findings** that includes the number of resources that are: + - **Open security groups** + - **Microsoft managed keys** + - **Instances with access to S3 buckets** + - **Unencrypted S3 buckets** + - **SSE-S3 Encrypted buckets** + - **S3 Bucket accessible externally** + + + +## The PCI heat map + +The **Permission Creep Index** heat map shows the incurred risk of users with access to high-risk permissions, and provides information about: + +- Users who were given access to high-risk permissions but aren't actively using them. *High-risk permissions* include the ability to modify or delete information in the authorization system. + +- The number of resources a user has access to, otherwise known as resource reach. + +- The high-risk permissions coupled with the number of resources a user has access to produce the score seen on the chart. + + Permissions are classified as *high*, *medium*, and *low*. + + - **High** (displayed in red) - The score is between 68 and 100. The user has access to many high-risk permissions they aren't using, and has high resource reach. + - **Medium** (displayed in yellow) - The score is between 34 and 67. The user has access to some high-risk permissions that they use, or have medium resource reach. + - **Low** (displayed in green) - The score is between 0 and 33. The user has access to few high-risk permissions. They use all their permissions and have low resource reach. + +- The number displayed on the graph shows how many users contribute to a particular score. To view detailed data about a user, hover over the number. + + The distribution graph displays all the users who contribute to the permission creep. It displays how many users contribute to a particular score. For example, if the score from the PCI chart is 14, the graph shows how many users have a score of 14. + +- The **PCI Trend** graph shows you the historical trend of the PCI score over the last 90 days. + - To download the **PCI history report**, select **Download**. + +### View information on the heat map + +1. Select the number on the heat map bubble to display: + + - The total number of **Identities** and how many of them are in the high, medium, and low categories. + - The **PCI trend** over the last several weeks. + +1. The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. + + - To expand the full list of identities, select **All findings**. + +1. The **Resource** section below the heat map on the right side of the page shows all the relevant findings about resources. It includes unencrypted S3 buckets, open security groups, and so on. + + +## The Analytics summary + +You can also view a summary of users and activities section on the [Analytics dashboard](usage-analytics-home.md). This dashboard provides a snapshot of the following high-risk tasks or actions users have accessed, and displays the total number of users with the high-risk access, how many users are inactive or have unexecuted tasks, and how many users are active or have executed tasks: + +- **Users with access to high-risk tasks**: Displays the total number of users with access to a high risk task (**Total**), how many users have access but haven't used the task (**Inactive**), and how many users are actively using the task (**Active**). + +- **Users with access to delete tasks**: A subset of high-risk tasks, which displays the number of users with access to delete tasks (**Total**), how many users have the delete permissions but haven't used the permissions (**Inactive**), and how many users are actively executing the delete capability (**Active**). + +- **High-risk tasks accessible by users**: Displays all available high-risk tasks in the authorization system (**Granted**), how many high-risk tasks aren't used (**Unexecuted**), and how many high-risk tasks are used (**Executed**). + +- **Delete tasks accessible by users**: Displays all available delete tasks in the authorization system (**Granted**), how many delete tasks aren't used (**Unexecuted**), and how many delete tasks are used (**Executed**). + +- **Resources that permit high-risk tasks**: Displays the total number of resources a user has access to (**Total**), how many resources are available but not used (**Inactive**), and how many resources are used (**Active**). + +- **Resources that permit delete tasks**: Displays the total number of resources that permit delete tasks (**Total**), how many resources with delete tasks aren't used (**Inactive**), and how many resources with delete tasks are used (**Active**). + + + +## Next steps + +- For information on how to view authorization system and account activity data on the Permissions ManagementDashboard, see [View data about the activity in your authorization system](product-dashboard.md). +- For an overview of the Analytics dashboard, see [An overview of the Analytics dashboard](usage-analytics-home.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-remediation.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-remediation.md new file mode 100644 index 0000000000000..4d8ae893d6358 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-remediation.md @@ -0,0 +1,240 @@ +--- +title: View existing roles/policies and requests for permission in the Remediation dashboard in Permissions Management +description: How to view existing roles/policies and requests for permission in the Remediation dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View roles/policies and requests for permission in the Remediation dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Remediation** dashboard in Permissions Management provides an overview of roles/policies, permissions, a list of existing requests for permissions, and requests for permissions you have made. + +This article provides an overview of the components of the **Remediation** dashboard. + +> [!NOTE] +> To view the **Remediation** dashboard, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this dashboard, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. + +> [!NOTE] +> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. + +## Display the Remediation dashboard + +1. On the Permissions Management home page, select the **Remediation** tab. + + The **Remediation** dashboard includes six subtabs: + + - **Roles/Policies**: Use this subtab to perform Create Read Update Delete (CRUD) operations on roles/policies. + - **Permissions**: Use this subtab to perform Read Update Delete (RUD) on granted permissions. + - **Role/Policy Template**: Use this subtab to create a template for roles/policies template. + - **Requests**: Use this subtab to view approved, pending, and processed Permission on Demand (POD) requests. + - **My Requests**: Use this tab to manage lifecycle of the POD request either created by you or needs your approval. + - **Settings**: Use this subtab to select **Request Role/Policy Filters**, **Request Settings**, and **Auto-Approve** settings. + +1. Use the dropdown to select the **Authorization System Type** and **Authorization System**, and then select **Apply**. + +## View and create roles/policies + +The **Role/Policies** subtab provides the following settings that you can use to view and create a role/policy. + +- **Authorization System Type**: Displays a dropdown with authorization system types you can access, Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). +- **Authorization System**: Displays a list of authorization systems accounts you can access. +- **Policy Type**: A dropdown with available role/policy types. You can select **All**, **Custom**, **System**, or **Permissions Management Only**. +- **Policy Status**: A dropdown with available role/policy statuses. You can select **All**, **Assigned**, or **Unassigned**. +- **Policy Usage**: A dropdown with **All** or **Unused** roles/policies. +- **Apply**: Select this option to save the changes you've made. +- **Reset Filter**: Select this option to discard the changes you've made. + +The **Policy list** displays a list of existing roles/policies and the following information about each role/policy. + +- **Policy Name**: The name of the roles/policies available to you. +- **Policy Type**: **Custom**, **System**, or **Permissions Management Only** +- **Actions** + - Select **Clone** to create a duplicate copy of the role/policy. + - Select **Modify** to change the existing role/policy. + - Select **Delete** to delete the role/policy. + +Other options available to you: +- **Search**: Select this option to search for a specific role/policy. +- **Reload**: Select this option to refresh the displayed list of roles/policies. +- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. + + When the file is successfully exported, a message appears: **Exported Successfully.** + + - Check your email for a message from the Permissions Management Customer Success Team. This email contains a link to: + - The **Role Policy Details** report in CSV format. + - The **Reports** dashboard where you can configure how and when you can automatically receive reports. +- **Create Role/Policy**: Select this option to create a new role/policy. For more information, see [Create a role/policy](how-to-create-role-policy.md). + + +## Add filters to permissions + +The **Permissions** subtab provides the following settings that you can use to add filters to your permissions. + +- **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. +- **Authorization System**: Displays a list of authorization systems accounts you can access. +- **Search For**: A dropdown from which you can select **Group**, **User**, or **Role**. +- **User Status**: A dropdown from which you can select **Any**, **Active**, or **Inactive**. +- **Privilege Creep Index** (PCI): A dropdown from which you can select a PCI rating of **Any**, **High**, **Medium**, or **Low**. +- **Task Usage**: A dropdown from which you can select **Any**, **Granted**, **Used**, or **Unused**. +- **Enter a Username**: A dropdown from which you can select a username. +- **Enter a Group Name**: A dropdown from which you can select a group name. +- **Apply**: Select this option to save the changes you've made and run the filter. +- **Reset Filter**: Select this option to discard the changes you've made. +- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. + + When the file is successfully exported, a message appears: **Exported Successfully.** + + - Check your email for a message from the Permissions Management Customer Success Team. This email contains a link to: + - The **Role Policy Details** report in CSV format. + - The **Reports** dashboard where you can configure how and when you can automatically receive reports. + + +## Create templates for roles/policies + +Use the **Role/Policy Template** subtab to create a template for roles/policies. + +1. Select: + - **Authorization System Type**: Displays a dropdown with authorization system types you can access, WS, Azure, and GCP. + - **Create Template**: Select this option to create a template. + +1. In the **Details** page, make the required selections: + - **Authorization System Type**: Select the authorization system types you want, **AWS**, **Azure**, or **GCP**. + - **Template Name**: Enter a name for your template, and then select **Next**. + +1. In the **Statements** page, complete the **Tasks**, **Resources**, **Request Conditions** and **Effect** sections. Then select **Save** to save your role/policy template. + +Other options available to you: +- **Search**: Select this option to search for a specific role/policy. +- **Reload**: Select this option to refresh the displayed list of roles/policies. + +## View requests for permission + +Use the **Requests** tab to view a list of **Pending**, **Approved**, and **Processed** requests for permissions your team members have made. + +- Select: + - **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. + - **Authorization System**: Displays a list of authorization systems accounts you can access. + +Other options available to you: + +- **Reload**: Select this option to refresh the displayed list of roles/policies. +- **Search**: Select this option to search for a specific role/policy. +- **Columns**: Select one or more of the following to view more information about the request: + - **Submitted By** + - **On Behalf Of** + - **Authorization System** + - **Tasks/Scope/Policies** + - **Request Date** + - **Schedule** + - **Submitted** + - **Reset to Default**: Select this option to discard your settings. + +### View pending requests + +The **Pending** table displays the following information: + +- **Summary**: A summary of the request. +- **Submitted By**: The name of the user who submitted the request. +- **On Behalf Of**: The name of the user on whose behalf the request was made. +- **Authorization System**: The authorization system the user selected. +- **Task/Scope/Policies**: The type of task/scope/policy selected. +- **Request Date**: The date when the request was made. +- **Submitted**: The period since the request was made. +- The ellipses **(...)** menu - Select the ellipses, and then select **Details**, **Approve**, or **Reject**. +- Select an option: + - **Reload**: Select this option to refresh the displayed list of roles/policies. + - **Search**: Select this option to search for a specific role/policy. + - **Columns**: From the dropdown, select the columns you want to display. + +**To return to the previous view:** + +- Select the up arrow. + +### View approved requests + +The **Approved** table displays information about the requests that have been approved. + +### View processed requests + +The **Processed** table displays information about the requests that have been processed. + +## View requests for permission for your approval + +Use the **My Requests** subtab to view a list of **Pending**, **Approved**, and **Processed** requests for permissions your team members have made and you must approve or reject. + +- Select: + - **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. + - **Authorization System**: Displays a list of authorization systems accounts you can access. + +Other options available to you: + +- **Reload**: Select this option to refresh the displayed list of roles/policies. +- **Search**: Select this option to search for a specific role/policy. +- **Columns**: Select one or more of the following to view more information about the request: + - **On Behalf Of** + - **Authorization System** + - **Tasks/Scope/Policies** + - **Request Date** + - **Schedule** + - **Reset to Default**: Select this option to discard your settings. +- **New Request**: Select this option to create a new request for permissions. For more information, see Create a request for permissions. + +### View pending requests + +The **Pending** table displays the following information: + +- **Summary**: A summary of the request. +- **Submitted By**: The name of the user who submitted the request. +- **On Behalf Of**: The name of the user on whose behalf the request was made. +- **Authorization System**: The authorization system the user selected. +- **Task/Scope/Policies**: The type of task/scope/policy selected. +- **Request Date**: The date when the request was made. +- **Submitted**: The period since the request was made. +- The ellipses **(...)** menu - Select the ellipses, and then select **Details**, **Approve**, or **Reject**. +- Select an option: + - **Reload**: Select this option to refresh the displayed list of roles/policies. + - **Search**: Select this option to search for a specific role/policy. + - **Columns**: From the dropdown, select the columns you want to display. + + +### View approved requests + +The **Approved** table displays information about the requests that have been approved. + +### View processed requests + +The **Processed** table displays information about the requests that have been processed. + +## Make setting selections for requests and auto-approval + +The **Settings** subtab provides the following settings that you can use to make setting selections to **Request Role/Policy Filters**, **Request Settings**, and **Auto-Approve** requests. + +- **Authorization System Type**: Displays a dropdown with authorization system types you can access, AWS, Azure, and GCP. +- **Authorization System**: Displays a list of authorization systems accounts you can access. +- **Reload**: Select this option to refresh the displayed list of role/policy filters. +- **Create Filter**: Select this option to create a new filter. + +## Next steps + + +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-tasks.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-tasks.md new file mode 100644 index 0000000000000..c4d6d89960a9f --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-tasks.md @@ -0,0 +1,38 @@ +--- +title: View information about active and completed tasks in Permissions Management +description: How to view information about active and completed tasks in the Activities pane in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View information about active and completed tasks + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes the usage of the **Permissions Management Tasks** pane in Permissions Management. + +## Display active and completed tasks + +1. In the Permissions Management home page, select **Tasks** (the timer icon). + + The **Permissions Management Tasks** pane appears on the right of the Permissions Management home page. It has two tabs: + - **Active**: Displays a list of active tasks, a description of each task, and when the task was started. + + If there are no active tasks, the following message displays: **There are no active tasks**. + - **Completed**: Displays a list of completed tasks, a description of each task, when the task was started and ended, and whether the task **Failed** or **Succeeded**. + + If there are no completed activities, the following message displays: **There are no recently completed tasks**. +1. To close the **Permissions Management Tasks** pane, click outside the pane. + +## Next steps + +- For information on how to create a role/policy in the **Remediation** dashboard, see [Create a role/policy](how-to-create-role-policy.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-triggers.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-triggers.md new file mode 100644 index 0000000000000..01471aee6f3ac --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-triggers.md @@ -0,0 +1,87 @@ +--- +title: View information about activity triggers in Permissions Management +description: How to view information about activity triggers in the Activity triggers dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View information about activity triggers + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to use the **Activity triggers** dashboard in Permissions Management to view information about activity alerts and triggers. + +## Display the Activity triggers dashboard + +- In the Permissions Management home page, select **Activity triggers** (the bell icon). + + The **Activity triggers** dashboard has four tabs: + + - **Activity** + - **Rule-Based Anomaly** + - **Statistical Anomaly** + - **Permission Analytics** + + Each tab has two subtabs: + + - **Alerts** + - **Alert Triggers** + +## View information about alerts + +The **Alerts** subtab in the **Activity**, **Rule-Based Anomaly**, **Statistical Anomaly**, and **Permission Analytics** tabs display the following information: + +- **Alert Name**: Select **All** alert names or specific ones. +- **Date**: Select **Last 24 hours**, **Last 2 Days**, **Last Week**, or **Custom Range.** + + - If you select **Custom Range**, also enter **From** and **To** duration settings. +- **Apply**: Select this option to activate your settings. +- **Reset Filter**: Select this option to discard your settings. +- **Reload**: Select this option to refresh the displayed information. +- **Create Activity Trigger**: Select this option to [create a new alert trigger](how-to-create-alert-trigger.md). +- The **Alerts** table displays a list of alerts with the following information: + - **Alerts**: The name of the alert. + - **# of users subscribed**: The number of users who have subscribed to the alert. + - **Created By**: The name of the user who created the alert. + - **Modified By**: The name of the user who modified the alert. + +The **Rule-Based Anomaly** tab and the **Statistical Anomaly** tab both have one more option: + +- **Columns**: Select the columns you want to display: **Task**, **Resource**, and **Identity**. + - To return to the system default settings, select **Reset to default**. + +## View information about alert triggers + +The **Alert Triggers** subtab in the **Activity**, **Rule-Based Anomaly**, **Statistical Anomaly**, and **Permission Analytics** tab displays the following information: + +- **Status**: Select the alert status you want to display: **All**, **Activated**, or **Deactivated**. +- **Apply**: Select this option to activate your settings. +- **Reset Filter**: Select this option to discard your settings. +- **Reload**: Select **Reload** to refresh the displayed information. +- **Create Activity Trigger**: Select this option to [create a new alert trigger](how-to-create-alert-trigger.md). +- The **Triggers** table displays a list of triggers with the following information: + - **Alerts**: The name of the alert. + - **# of users subscribed**: The number of users who have subscribed to the alert. + - **Created By**: The name of the user who created the alert. + - **Modified By**: The name of the user who modified the alert. + + + + + + +## Next steps + +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/ui-user-management.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-user-management.md new file mode 100644 index 0000000000000..0ac386906b41d --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-user-management.md @@ -0,0 +1,89 @@ +--- +title: Manage users and groups with the User management dashboard in Permissions Management +description: How to manage users and groups in the User management dashboard in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: overview +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# Manage users and groups with the User management dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article describes how to use the Permissions Management **User management** dashboard to view and manage users and groups. + +**To display the User management dashboard**: + +- In the upper right of the Permissions Management home page, select **User** (your initials) in the upper right of the screen, and then select **User management.** + + The **User Management** dashboard has two tabs: + + - **Users**: Displays information about registered users. + - **Groups**: Displays information about groups. + +## Manage users + +Use the **Users** tab to display the following information about users: + +- **Name** and **Email Address**: The user's name and email address. +- **Joined On**: The date the user registered on the system. +- **Recent Activity**: The date the user last used their permissions to access the system. +- The ellipses **(...)** menu: Select the ellipses, and then select **View Permissions** to open the **View User Permission** box. + + - To view details about the user's permissions, select one of the following options: + - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. + - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. + - **Custom** provides **View**, **Control**, and **Approve** permissions for the authorization system types you select. + +You can also select the following options: + +- **Reload**: Select this option to refresh the information displayed in the **User** table. +- **Search**: Enter a name or email address to search for a specific user. + +## Manage groups + +Use the **Groups** tab to display the following information about groups: + +- **Name**: Displays the registered user's name and email address. +- **Permissions**: + - The **Authorization Systems** and the type of permissions the user has been granted: **Admin for all Authorization System Types**, **Admin for selected Authorization System Types**, or **Custom**. + - Information about the **Viewer**, **Controller**, **Approver**, and **Requestor**. +- **Modified By**: The email address of the user who modified the group. +- **Modified On**: The date the user last modified the group. + +- The ellipses **(...)** menu: Select the ellipses to: + + - **View Permissions**: Select this option to view details about the group's permissions, and then select one of the following options: + - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. + - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. + - **Custom** provides **View**, **Control**, and **Approve** permissions for specific authorization system types that you select. + + - **Edit Permissions**: Select this option to modify the group's permissions. + - **Delete**: Select this option to delete the group's permissions. + + The **Delete Permission** box asks you to confirm that you want to delete the group. + - Select **Delete** if you want to delete the group, **Cancel** to discard your changes. + + +You can also select the following options: + +- **Reload**: Select this option to refresh the information displayed in the **User** table. +- **Search**: Enter a name or email address to search for a specific user. +- **Filters**: Select the authorization systems and accounts you want to display. +- **Create Permission**: Create a group and set up its permissions. For more information, see [Create group-based permissions](how-to-create-group-based-permissions.md) + + + +## Next steps + +- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](ui-tasks.md). +- For information about how to view personal and organization information, see [View personal and organization information](product-account-settings.md). +- For information about how to select group-based permissions settings, see [Select group-based permissions settings](how-to-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-access-keys.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-access-keys.md new file mode 100644 index 0000000000000..c2677c84e9cf8 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-access-keys.md @@ -0,0 +1,139 @@ +--- +title: View analytic information about access keys in Permissions Management +description: How to view analytic information about access keys in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information about access keys + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Analytics** dashboard in Permissions Management provides details about identities, resources, and tasks that you can use make informed decisions about granting permissions, and reducing risk on unused permissions. + +- **Users**: Tracks assigned permissions and usage of various identities. +- **Groups**: Tracks assigned permissions and usage of the group and the group members. +- **Active Resources**: Tracks active resources (used in the last 90 days). +- **Active Tasks**: Tracks active tasks (performed in the last 90 days). +- **Access Keys**: Tracks the permission usage of access keys for a given user. +- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. + +This article describes how to view usage analytics about access keys. + +## Create a query to view access keys + +When you select **Access keys**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. + +1. On the main **Analytics** dashboard, select **Access Keys** from the drop-down list at the top of the screen. + + The following components make up the **Access Keys** dashboard: + + - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + - **Authorization System**: Select from a **List** of accounts and **Folders***. + - **Key Status**: Select **All**, **Active**, or **Inactive**. + - **Key Activity State**: Select **All**, how long the access key has been used, or **Not Used**. + - **Key Age**: Select **All** or how long ago the access key was created. + - **Task Type**: Select **All** tasks, **High Risk Tasks** or, for a list of tasks where users have deleted data, select **Delete Tasks**. + - **Search**: Enter criteria to find specific tasks. +1. Select **Apply** to display the criteria you've selected. + + Select **Reset Filter** to discard your changes. + + +## View the results of your query + +The **Access Keys** table displays the results of your query. + +- **Access Key ID**: Provides the ID for the access key. + - To view details about the access keys, select the down arrow to the left of the ID. +- The **Owner** name. +- The **Account** number. +- The **Permission Creep Index (PCI)**: Provides the following information: + - **Index**: A numeric value assigned to the PCI. + - **Since**: How many days the PCI value has been at the displayed level. +- **Tasks** Displays the number of **Granted** and **Executed** tasks. +- **Resources**: The number of resources used. +- **Access Key Age**: How old the access key is, in days. +- **Last Used**: How long ago the access key was last accessed. + +## Apply filters to your query + +There are many filter options within the **Active Tasks** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. + +### Apply filters by authorization system type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by authorization system + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by key status + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Key Status** dropdown, select the type of key: **All**, **Active**, or **Inactive**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by key activity status + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Key Activity State** dropdown, select **All**, the duration for how long the access key has been used, or **Not Used**. + +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by key age + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Key Age** dropdown, select **All** or how long ago the access key was created. + +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by task type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Task Type** dropdown, select **All** tasks, **High Risk Tasks** or, for a list of tasks where users have deleted data, select **Delete tasks**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + + +## Export the results of your query + +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV** or **CSV (Detailed)**. + +## Next steps + +- To view active tasks, see [View usage analytics about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View usage analytics about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View usage analytics about groups](usage-analytics-groups.md). +- To view active resources, see [View usage analytics about active resources](usage-analytics-active-resources.md). +- To view assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-resources.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-resources.md new file mode 100644 index 0000000000000..d05b4f4b48984 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-resources.md @@ -0,0 +1,126 @@ +--- +title: View analytic information about active resources in Permissions Management +description: How to view usage analytics about active resources in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information about active resources + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: + +- **Users**: Tracks assigned permissions and usage of various identities. +- **Groups**: Tracks assigned permissions and usage of the group and the group members. +- **Active Resources**: Tracks active resources (used in the last 90 days). +- **Active Tasks**: Tracks active tasks (performed in the last 90 days). +- **Access Keys**: Tracks the permission usage of access keys for a given user. +- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. + +This article describes how to view usage analytics about active resources. + +## Create a query to view active resources + +1. On the main **Analytics** dashboard, select **Active Resources** from the drop-down list at the top of the screen. + + The dashboard only lists tasks that are active. The following components make up the **Active Resources** dashboard: +1. From the dropdowns, select: + - **Authorization System Type**: The authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + - **Authorization System**: The **List** of accounts and **Folders** you want to include. + - **Tasks Type**: Select **All** tasks, **High Risk Tasks** or, for a list of tasks where users have deleted data, select **Delete Tasks**. + - **Service Resource Type**: The service resource type. + - **Search**: Enter criteria to find specific tasks. + +1. Select **Apply** to display the criteria you've selected. + + Select **Reset Filter** to discard your changes. + + +## View the results of your query + +The **Active Resources** table displays the results of your query: + +- **Resource Name**: Provides the name of the task. + - To view details about the task, select the down arrow. +- **Account**: The name of the account. +- **Resources Type**: The type of resources used, for example, **bucket** or **key**. +- **Tasks**: Displays the number of **Granted** and **Executed** tasks. +- **Number of Users**: The number of users with access and accessed. +- Select the ellipses **(...)** and select **Tags** to add a tag. + +## Add a tag to an active resource + +1. Select the ellipses **(...)** and select **Tags**. +1. From the **Select a Tag** dropdown, select a tag. +1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. +1. In the **Value (Optional)** box, enter a value. +1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. +1. To add the tag to the serverless function, select **Add Tag**. + + +## Apply filters to your query + +There are many filter options within the **Active Resources** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. + +### Apply filters by authorization system + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by authorization system type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by task type + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Task Type**, select the type of user: **All**, **User**, **Role/App/Service a/c**, or **Resource**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by service resource type + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Service Resource Type**, select the type of service resource. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +## Export the results of your query + +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. + + +## Next steps + +- To track active tasks, see [View usage analytics about active tasks](usage-analytics-active-tasks.md). +- To track assigned permissions and usage of users, see [View usage analytics about users](usage-analytics-users.md). +- To track assigned permissions and usage of the group and the group members, see [View usage analytics about groups](usage-analytics-groups.md). +- To track the permission usage of access keys for a given user, see [View usage analytics about access keys](usage-analytics-access-keys.md). +- To track assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-tasks.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-tasks.md new file mode 100644 index 0000000000000..729df078b99de --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-tasks.md @@ -0,0 +1,111 @@ +--- +title: View analytic information about active tasks in Permissions Management +description: How to view analytic information about active tasks in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information about active tasks + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: + +- **Users**: Tracks assigned permissions and usage of various identities. +- **Groups**: Tracks assigned permissions and usage of the group and the group members. +- **Active Resources**: Tracks active resources (used in the last 90 days). +- **Active Tasks**: Tracks active tasks (performed in the last 90 days). +- **Access Keys**: Tracks the permission usage of access keys for a given user. +- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. + +This article describes how to view usage analytics about active tasks. + +## Create a query to view active tasks + +When you select **Active Tasks**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. + +1. On the main **Analytics** dashboard, select **Active Tasks** from the drop-down list at the top of the screen. + + The dashboard only lists tasks that are active. The following components make up the **Active Tasks** dashboard: + + - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + - **Authorization System**: Select from a **List** of accounts and **Folders***. + - **Tasks Type**: Select **All** tasks, **High Risk tasks** or, for a list of tasks where users have deleted data, select **Delete Tasks**. + - **Search**: Enter criteria to find specific tasks. + +1. Select **Apply** to display the criteria you've selected. + + Select **Reset Filter** to discard your changes. + + +## View the results of your query + +The **Active Tasks** table displays the results of your query. + +- **Task Name**: Provides the name of the task. + - To view details about the task, select the down arrow in the table. + + - A **Normal Task** icon displays to the left of the task name if the task is normal (that is, not risky). + - A **Deleted Task** icon displays to the left of the task name if the task involved deleting data. + - A **High-Risk Task** icon displays to the left of the task name if the task is high-risk. + +- **Performed on (resources)**: The number of resources on which the task was used. + +- **Number of Users**: Displays how many users performed tasks. The tasks are organized into the following columns: + - **With Access**: Displays the number of users that have access to the task but haven't accessed it. + - **Accessed**: Displays the number of users that have accessed the task. + + +## Apply filters to your query + +There are many filter options within the **Active Tasks** screen, including **Authorization System**, **User**, and **Task**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. + +### Apply filters by authorization system type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by authorization system + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by task type + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Task Type** dropdown, select the type of tasks: **All**, **High Risk Tasks**, or **Delete Tasks**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +## Export the results of your query + +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. + +## Next steps + +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-groups.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-groups.md new file mode 100644 index 0000000000000..11894bc662e38 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-groups.md @@ -0,0 +1,154 @@ +--- +title: View analytic information about groups in Permissions Management +description: How to view analytic information about groups in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information about groups + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: + +- **Users**: Tracks assigned permissions and usage of various identities. +- **Groups**: Tracks assigned permissions and usage of the group and the group members. +- **Active Resources**: Tracks active resources (used in the last 90 days). +- **Active Tasks**: Tracks active tasks (performed in the last 90 days). +- **Access Keys**: Tracks the permission usage of access keys for a given user. +- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. + +This article describes how to view usage analytics about groups. + +## Create a query to view groups + +When you select **Groups**, the **Usage Analytics** dashboard provides a high-level overview of groups. + +1. On the main **Analytics** dashboard, select **Groups** from the drop-down list at the top of the screen. + + The following components make up the **Groups** dashboard: + + - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + - **Authorization System**: Select from a **List** of accounts and **Folders**. + - **Group Type**: Select **All**, **ED**, or **Local**. + - **Group Activity Status**: Select **All**, **Active**, or **Inactive**. + - **Tasks Type**: Select **All**, **High Risk Tasks**, or **Delete Tasks** + - **Search**: Enter group name to find specific group. +1. To display the criteria you've selected, select **Apply**. + - **Reset Filter**: Select to discard your changes. + + +## View the results of your query + +The **Groups** table displays the results of your query: + +- **Group Name**: Provides the name of the group. + - To view details about the group, select the down arrow. +- A **Group Type** icon displays to the left of the group name to describe the type of group (**ED** or **Local**). +- The **Domain/Account** name. +- The **Permission Creep Index (PCI)**: Provides the following information: + - **Index**: A numeric value assigned to the PCI. + - **Since**: How many days the PCI value has been at the displayed level. +- **Tasks**: Displays the number of **Granted** and **Executed** tasks. +- **Resources**: The number of resources used. +- **Users**: The number of users who accessed the group. +- Select the ellipses **(...)** and select **Tags** to add a tag. + +## Add a tag to a group + +1. Select the ellipses **(...)** and select **Tags**. +1. From the **Select a Tag** dropdown, select a tag. +1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. +1. In the **Value (Optional)** box, enter a value. +1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. +1. To add the tag to the serverless function, select **Add Tag**. + +## View detailed information about a group + +1. Select the down arrow to the left of the **Group Name**. + + The list of **Tasks** organized by **Unused** and **Used** displays. + +1. Select the arrow to the left of the group name to view details about the task. +1. Select **Information** (**i**) to view when the task was last used. +1. From the **Tasks** dropdown, select **All Tasks**, **High Risk Tasks**, and **Delete Tasks**. +1. The pane on the right displays a list of **Users**, **Policies** for **AWS** and **Roles** for **GCP or AZURE**, and **Tags**. + +## Apply filters to your query + +There are many filter options within the **Groups** screen, including filters by **Authorization System Type**, **Authorization System**, **Group Type**, **Group Activity Status**, and **Tasks Type**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. + +### Apply filters by authorization system type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by authorization system + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by group type + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Group Type** dropdown, select the type of user: **All**, **ED**, or **Local**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by group activity status + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Group Activity Status** dropdown, select the type of user: **All**, **Active**, or **Inactive**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by tasks type + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Tasks Type** dropdown, select the type of user: **All**, **High Risk Tasks**, or **Delete Tasks**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +## Export the results of your query + +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. +- To view a list of members of the groups in your query, select **Export**, and then select **Memberships**. + + + +## Next steps + +- To view active tasks, see [View analytic information about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-home.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-home.md new file mode 100644 index 0000000000000..e0933b95f7a19 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-home.md @@ -0,0 +1,52 @@ +--- +title: View analytic information with the Analytics dashboard in Permissions Management +description: How to use the Analytics dashboard in Permissions Management to view details about users, groups, active resources, active tasks, access keys, and serverless functions. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information with the Analytics dashboard + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +This article provides a brief overview of the Analytics dashboard in Permissions Management, and the type of analytic information it provides for Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + +## Display the Analytics dashboard + +- From the Permissions Management home page, select the **Analytics** tab. + + The **Analytics** dashboard displays detailed information about: + + - **Users**: Tracks assigned permissions and usage by users. For more information, see [View analytic information about users](usage-analytics-users.md). + + - **Groups**: Tracks assigned permissions and usage of the group and the group members. For more information, see [View analytic information about groups](usage-analytics-groups.md). + + - **Active Resources**: Tracks resources that have been used in the last 90 days. For more information, see [View analytic information about active resources](usage-analytics-active-resources.md). + + - **Active Tasks**: Tracks tasks that have been performed in the last 90 days. For more information, see [View analytic information about active tasks](usage-analytics-active-tasks.md). + + - **Access Keys**: Tracks the permission usage of access keys for a given user. For more information, see [View analytic information about access keys](usage-analytics-access-keys.md). + + - **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions for AWS only. For more information, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). + + System administrators can use this information to make decisions about granting permissions and reducing risk on unused permissions. + + + +## Next steps + +- To view active tasks, see [View analytic information about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-serverless-functions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-serverless-functions.md new file mode 100644 index 0000000000000..e9d93ed26b0a6 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-serverless-functions.md @@ -0,0 +1,112 @@ +--- +title: View analytic information about serverless functions in Permissions Management +description: How to view analytic information about serverless functions in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information about serverless functions + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: + +- **Users**: Tracks assigned permissions and usage of various identities. +- **Groups**: Tracks assigned permissions and usage of the group and the group members. +- **Active Resources**: Tracks active resources (used in the last 90 days). +- **Active Tasks**: Tracks active tasks (performed in the last 90 days). +- **Access Keys**: Tracks the permission usage of access keys for a given user. +- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. + +This article describes how to view usage analytics about serverless functions. + +## Create a query to view serverless functions + +When you select **Serverless Functions**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. + +1. On the main **Analytics** dashboard, select **Serverless Functions** from the dropdown list at the top of the screen. + + The following components make up the **Serverless Functions** dashboard: + + - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + - **Authorization System**: Select from a **List** of accounts and **Folders**. + - **Search**: Enter criteria to find specific tasks. +1. Select **Apply** to display the criteria you've selected. + + Select **Reset Filter** to discard your changes. + + +## View the results of your query + +The **Serverless Functions** table displays the results of your query. + +- **Function Name**: Provides the name of the serverless function. + - To view details about a serverless function, select the down arrow to the left of the function name. +- A **Function Type** icon displays to the left of the function name to describe the type of serverless function, for example **Lambda function**. +- The **Permission Creep Index (PCI)**: Provides the following information: + - **Index**: A numeric value assigned to the PCI. + - **Since**: How many days the PCI value has been at the displayed level. +- **Tasks**: Displays the number of **Granted** and **Executed** tasks. +- **Resources**: The number of resources used. +- **Last Activity On**: The date the function was last accessed. +- Select the ellipses **(...)**, and then select **Tags** to add a tag. + +## Add a tag to a serverless function + +1. Select the ellipses **(...)** and select **Tags**. +1. From the **Select a Tag** dropdown, select a tag. +1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. +1. In the **Value (Optional)** box, enter a value. +1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. +1. To add the tag to the serverless function, select **Add Tag**. + +## View detailed information about a serverless function + +1. Select the down arrow to the left of the function name to display the following: + + - A list of **Tasks** organized by **Used** and **Unused**. + - **Versions**, if a version is available. + +1. Select the arrow to the left of the task name to view details about the task. +1. Select **Information** (**i**) to view when the task was last used. +1. From the **Tasks** dropdown, select **All Tasks**, **High Risk Tasks**, and **Delete Tasks**. + + +## Apply filters to your query + +You can filter the **Serverless Functions** results by **Authorization System Type** and **Authorization System**. + +### Apply filters by authorization system type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by authorization system + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + + +## Next steps + +- To view active tasks, see [View usage analytics about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-users.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-users.md new file mode 100644 index 0000000000000..51779608d21ca --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-users.md @@ -0,0 +1,166 @@ +--- +title: View analytic information about users in Permissions Management +description: How to view analytic information about users in Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View analytic information about users + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: + +- **Users**: Tracks assigned permissions and usage of various identities. +- **Groups**: Tracks assigned permissions and usage of the group and the group members. +- **Active Resources**: Tracks active resources (used in the last 90 days). +- **Active Tasks**: Tracks active tasks (performed in the last 90 days). +- **Access Keys**: Tracks the permission usage of access keys for a given user. +- **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions. + +This article describes how to view usage analytics about users. + +## Create a query to view users + +When you select **Users**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. + +1. On the main **Analytics** dashboard, select **Users** from the drop-down list at the top of the screen. + + The following components make up the **Users** dashboard: + + - **Authorization System Type**: Select the authorization you want to use: Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). + - **Authorization System**: Select from a **List** of accounts and **Folders***. + - **Identity Type**: Select **All** identity types, **User**, **Role/App/Service a/c** or **Resource**. + - **Search**: Enter criteria to find specific tasks. +1. Select **Apply** to display the criteria you've selected. + + Select **Reset filter** to discard your changes. + + +## View the results of your query + +The **Identities** table displays the results of your query. + +- **Name**: Provides the name of the group. + - To view details about the group, select the down arrow. +- The **Domain/Account** name. +- The **Permission Creep Index (PCI)**: Provides the following information: + - **Index**: A numeric value assigned to the PCI. + - **Since**: How many days the PCI value has been at the displayed level. +- **Tasks**: Displays the number of **Granted** and **Executed** tasks. +- **Resources**: The number of resources used. +- **User Groups**: The number of users who accessed the group. +- **Last Activity On**: The date the function was last accessed. +- The ellipses **(...)**: Select **Tags** to add a tag. + + If you're using AWS, another selection is available from the ellipses menu: **Auto Remediate**. You can use this option to remediate your results automatically. + +## Add a tag to a user + +1. Select the ellipses **(...)** and select **Tags**. +1. From the **Select a Tag** dropdown, select a tag. +1. To create a custom tag select **New Custom Tag**, add a tag name, and then select **Create**. +1. In the **Value (Optional)** box, enter a value. +1. Select the ellipses **(...)** to select **Advanced Save** options, and then select **Save**. +1. To add the tag to the serverless function, select **Add Tag**. + +## Set the auto-remediate option (AWS only) + +- Select the ellipses **(...)** and select **Auto Remediate**. + + A message displays to confirm that your remediation settings are automatically updated. + +## Apply filters to your query + +There are many filter options within the **Users** screen, including filters by **Authorization System**, **Identity Type**, and **Identity State**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. + +### Apply filters by authorization system type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by authorization system + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset filter** to discard your changes. + +### Apply filters by identity type + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Identity Type**, select the type of user: **All**, **User**, **Role/App/Service a/c**, or **Resource**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + +### Apply filters by identity subtype + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Identity Subtype**, select the type of user: **All**, **ED**, **Local**, or **Cross Account**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset filter** to discard your changes. + +### Apply filters by identity state + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Identity State**, select the type of user: **All**, **Active**, or **Inactive**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by identity filters + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Identity Type**, select: **Risky** or **Incl. in PCI Calculation Only**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +### Apply filters by task type + +You can filter user details by type of user, user role, app, or service used, or by resource. + +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. +1. From the **Task Type**, select the type of user: **All** or **High Risk Tasks**. +1. Select **Apply** to run your query and display the information you selected. + + Select **Reset Filter** to discard your changes. + + +## Export the results of your query + +- To export a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. +- To export the data in a detailed comma-separated values (CSV) file format, select **Export** and then select **CSV (Detailed)**. +- To export a report of user permissions, select **Export** and then select **Permissions**. + + +## Next steps + +- To view active tasks, see [View analytic information about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). \ No newline at end of file diff --git a/articles/active-directory/conditional-access/TOC.yml b/articles/active-directory/conditional-access/TOC.yml index be1e8d4bbe3bb..189303227a579 100644 --- a/articles/active-directory/conditional-access/TOC.yml +++ b/articles/active-directory/conditional-access/TOC.yml @@ -115,13 +115,13 @@ - name: Beta Graph APIs items: - name: conditionalAccessPolicy API - href: /graph/api/resources/conditionalaccesspolicy?view=graph-rest-beta + href: /graph/api/resources/conditionalaccesspolicy - name: namedLocation API - href: /graph/api/resources/namedlocation?view=graph-rest-beta + href: /graph/api/resources/namedlocation - name: countryNamedLocation API - href: /graph/api/resources/countrynamedlocation?view=graph-rest-beta + href: /graph/api/resources/countrynamedlocation - name: ipNamedLocation API - href: /graph/api/resources/ipnamedlocation?view=graph-rest-beta + href: /graph/api/resources/ipnamedlocation - name: Resources items: - name: Azure feedback forum diff --git a/articles/active-directory/conditional-access/block-legacy-authentication.md b/articles/active-directory/conditional-access/block-legacy-authentication.md index 1424f91a24ea3..324b94af47432 100644 --- a/articles/active-directory/conditional-access/block-legacy-authentication.md +++ b/articles/active-directory/conditional-access/block-legacy-authentication.md @@ -11,7 +11,7 @@ manager: karenhoran ms.reviewer: calebb, dawoo, jebeckha, grtaylor ms.collection: M365-identity-device-management --- -# How to: Block legacy authentication to Azure AD with Conditional Access +# How to: Block legacy authentication access to Azure AD with Conditional Access To give your users easy access to your cloud apps, Azure Active Directory (Azure AD) supports a broad variety of authentication protocols including legacy authentication. However, legacy authentication doesn't support multifactor authentication (MFA). MFA is in many environments a common requirement to address identity theft. @@ -85,7 +85,7 @@ For more information about these authentication protocols and services, see [Sig Before you can block legacy authentication in your directory, you need to first understand if your users have apps that use legacy authentication and how it affects your overall directory. Azure AD sign-in logs can be used to understand if you're using legacy authentication. -1. Navigate to the **Azure portal** > **Azure Active Directory** > **Sign-ins**. +1. Navigate to the **Azure portal** > **Azure Active Directory** > **Sign-in logs**. 1. Add the Client App column if it isn't shown by clicking on **Columns** > **Client App**. 1. **Add filters** > **Client App** > select all of the legacy authentication protocols. Select outside the filtering dialog box to apply your selections and close the dialog box. 1. If you've activated the [new sign-in activity reports preview](../reports-monitoring/concept-all-sign-ins.md), repeat the above steps also on the **User sign-ins (non-interactive)** tab. diff --git a/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md b/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md index 31dc70544f969..de0fdca9aef12 100644 --- a/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md +++ b/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md @@ -23,7 +23,7 @@ There are multiple scenarios that organizations can now enable using filter for - **Restrict access to privileged resources**. For this example, lets say you want to allow access to Microsoft Azure Management from a user who is assigned a privilged role Global Admin, has satisfied multifactor authentication and accessing from a device that is [privileged or secure admin workstations](/security/compass/privileged-access-devices) and attested as compliant. For this scenario, organizations would create two Conditional Access policies: - Policy 1: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, and for Access controls, Grant access, but require multifactor authentication and require device to be marked as compliant. - - Policy 2: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, excluding a filter for devices using rule expression device.extensionAttribute1 equals SAW and for Access controls, Block. Learn how to [update extensionAttributes on an Azure AD device object](https://docs.microsoft.com/graph/api/device-update?view=graph-rest-1.0&tabs=http). + - Policy 2: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, excluding a filter for devices using rule expression device.extensionAttribute1 equals SAW and for Access controls, Block. Learn how to [update extensionAttributes on an Azure AD device object](/graph/api/device-update?view=graph-rest-1.0&tabs=http&preserve-view=true). - **Block access to organization resources from devices running an unsupported Operating System**. For this example, lets say you want to block access to resources from Windows OS version older than Windows 10. For this scenario, organizations would create the following Conditional Access policy: - All users, accessing all cloud apps, excluding a filter for devices using rule expression device.operatingSystem equals Windows and device.operatingSystemVersion startsWith "10.0" and for Access controls, Block. - **Do not require multifactor authentication for specific accounts on specific devices**. For this example, lets say you want to not require multifactor authentication when using service accounts on specific devices like Teams phones or Surface Hub devices. For this scenario, organizations would create the following two Conditional Access policies: @@ -145,6 +145,7 @@ The filter for devices condition in Conditional Access evaluates policy based on ## Next steps +- [Back to school – Using Boolean algebra correctly in complex filters](https://techcommunity.microsoft.com/t5/intune-customer-success/back-to-school-using-boolean-algebra-correctly-in-complex/ba-p/3422765) - [Update device Graph API](/graph/api/device-update?tabs=http) - [Conditional Access: Conditions](concept-conditional-access-conditions.md) - [Common Conditional Access policies](concept-conditional-access-policy-common.md) diff --git a/articles/active-directory/conditional-access/concept-conditional-access-session.md b/articles/active-directory/conditional-access/concept-conditional-access-session.md index 3155a26f175b6..56d068aef84fd 100644 --- a/articles/active-directory/conditional-access/concept-conditional-access-session.md +++ b/articles/active-directory/conditional-access/concept-conditional-access-session.md @@ -28,7 +28,7 @@ Organizations can use this control to require Azure AD to pass device informatio For more information on the use and configuration of app-enforced restrictions, see the following articles: - [Enabling limited access with SharePoint Online](/sharepoint/control-access-from-unmanaged-devices) -- [Enabling limited access with Exchange Online](/microsoft-365/security/office-365-security/secure-email-recommended-policies?view=o365-worldwide#limit-access-to-exchange-online-from-outlook-on-the-web) +- [Enabling limited access with Exchange Online](/microsoft-365/security/office-365-security/secure-email-recommended-policies?view=o365-worldwide#limit-access-to-exchange-online-from-outlook-on-the-web&preserve-view=true) ## Conditional Access application control diff --git a/articles/active-directory/conditional-access/concept-conditional-access-users-groups.md b/articles/active-directory/conditional-access/concept-conditional-access-users-groups.md index 03e5a2e35ecf1..4cb779d52a844 100644 --- a/articles/active-directory/conditional-access/concept-conditional-access-users-groups.md +++ b/articles/active-directory/conditional-access/concept-conditional-access-users-groups.md @@ -6,7 +6,7 @@ services: active-directory ms.service: active-directory ms.subservice: conditional-access ms.topic: conceptual -ms.date: 03/17/2021 +ms.date: 06/01/2022 ms.author: joflore author: MicrosoftGuyJFlo @@ -35,7 +35,7 @@ The following options are available to include when creating a Conditional Acces - All guest and external users - This selection includes any B2B guests and external users including any user with the `user type` attribute set to `guest`. This selection also applies to any external user signed-in from a different organization like a Cloud Solution Provider (CSP). - Directory roles - - Allows administrators to select specific built-in Azure AD directory roles used to determine policy assignment. For example, organizations may create a more restrictive policy on users assigned the global administrator role. Other role types are not supported, including administrative unit-scoped roles and custom roles. + - Allows administrators to select specific built-in Azure AD directory roles used to determine policy assignment. For example, organizations may create a more restrictive policy on users assigned the global administrator role. Other role types aren't supported, including administrative unit-scoped roles and custom roles. - Users and groups - Allows targeting of specific sets of users. For example, organizations can select a group that contains all members of the HR department when an HR app is selected as the cloud app. A group can be any type of user group in Azure AD, including dynamic or assigned security and distribution groups. Policy will be applied to nested users and groups. @@ -50,7 +50,7 @@ The following options are available to include when creating a Conditional Acces ## Exclude users -When organizations both include and exclude a user or group the user or group is excluded from the policy, as an exclude action overrides an include in policy. Exclusions are commonly used for emergency access or break-glass accounts. More information about emergency access accounts and why they are important can be found in the following articles: +When organizations both include and exclude a user or group the user or group is excluded from the policy, as an exclude action overrides an include in policy. Exclusions are commonly used for emergency access or break-glass accounts. More information about emergency access accounts and why they're important can be found in the following articles: * [Manage emergency access accounts in Azure AD](../roles/security-emergency-access.md) * [Create a resilient access control management strategy with Azure Active Directory](../authentication/concept-resilient-controls.md) @@ -66,7 +66,7 @@ The following options are available to exclude when creating a Conditional Acces ### Preventing administrator lockout -To prevent an administrator from locking themselves out of their directory when creating a policy applied to **All users** and **All apps**, they will see the following warning. +To prevent an administrator from locking themselves out of their directory when creating a policy applied to **All users** and **All apps**, they'll see the following warning. > Don't lock yourself out! We recommend applying a policy to a small set of users first to verify it behaves as expected. We also recommend excluding at least one administrator from this policy. This ensures that you still have access and can update a policy if a change is required. Please review the affected users and apps. @@ -74,7 +74,11 @@ By default the policy will provide an option to exclude the current user from th ![Warning, don't lock yourself out!](./media/concept-conditional-access-users-groups/conditional-access-users-and-groups-lockout-warning.png) -If you do find yourself locked out, see [What to do if you are locked out of the Azure portal?](troubleshoot-conditional-access.md#what-to-do-if-youre-locked-out-of-the-azure-portal) +If you do find yourself locked out, see [What to do if you're locked out of the Azure portal?](troubleshoot-conditional-access.md#what-to-do-if-youre-locked-out-of-the-azure-portal) + +### External partner access + +Conditional Access policies that target external users may interfere with service provider access, for example granular delegated admin privileges [Introduction to granular delegated admin privileges (GDAP)](/partner-center/gdap-introduction). ## Next steps diff --git a/articles/active-directory/conditional-access/howto-conditional-access-session-lifetime.md b/articles/active-directory/conditional-access/howto-conditional-access-session-lifetime.md index a872bc0a6cbab..a535d5653edee 100644 --- a/articles/active-directory/conditional-access/howto-conditional-access-session-lifetime.md +++ b/articles/active-directory/conditional-access/howto-conditional-access-session-lifetime.md @@ -36,7 +36,7 @@ The Azure Active Directory (Azure AD) default configuration for user sign-in fre It might sound alarming to not ask for a user to sign back in, in reality any violation of IT policies will revoke the session. Some examples include (but aren't limited to) a password change, an incompliant device, or account disable. You can also explicitly [revoke users’ sessions using PowerShell](/powershell/module/azuread/revoke-azureaduserallrefreshtoken). The Azure AD default configuration comes down to “don’t ask users to provide their credentials if security posture of their sessions hasn't changed”. -The sign-in frequency setting works with apps that have implemented OAUTH2 or OIDC protocols according to the standards. Most Microsoft native apps for Windows, Mac, and Mobile including the following web applications comply with the setting. +The sign-in frequency setting works with apps that have implemented OAuth2 or OIDC protocols according to the standards. Most Microsoft native apps for Windows, Mac, and Mobile including the following web applications comply with the setting. - Word, Excel, PowerPoint Online - OneNote Online @@ -48,7 +48,7 @@ The sign-in frequency setting works with apps that have implemented OAUTH2 or OI - Dynamics CRM Online - Azure portal -The sign-in frequency setting works with SAML applications as well, as long as they don't drop their own cookies and are redirected back to Azure AD for authentication on regular basis. +The sign-in frequency setting works with 3rd party SAML applications and apps that have implemented OAuth2 or OIDC protocols, as long as they don't drop their own cookies and are redirected back to Azure AD for authentication on regular basis. ### User sign-in frequency and multi-factor authentication diff --git a/articles/active-directory/conditional-access/media/terms-of-use/edit-terms-use.png b/articles/active-directory/conditional-access/media/terms-of-use/edit-terms-use.png index b13be2e3756a6..fbd9621d23126 100644 Binary files a/articles/active-directory/conditional-access/media/terms-of-use/edit-terms-use.png and b/articles/active-directory/conditional-access/media/terms-of-use/edit-terms-use.png differ diff --git a/articles/active-directory/conditional-access/media/terms-of-use/expire-consents.png b/articles/active-directory/conditional-access/media/terms-of-use/expire-consents.png index 36583155aaddd..552c8d9b4b1b4 100644 Binary files a/articles/active-directory/conditional-access/media/terms-of-use/expire-consents.png and b/articles/active-directory/conditional-access/media/terms-of-use/expire-consents.png differ diff --git a/articles/active-directory/conditional-access/media/terms-of-use/new-tou.png b/articles/active-directory/conditional-access/media/terms-of-use/new-tou.png index ec421d9c144cd..98593e1d6770c 100644 Binary files a/articles/active-directory/conditional-access/media/terms-of-use/new-tou.png and b/articles/active-directory/conditional-access/media/terms-of-use/new-tou.png differ diff --git a/articles/active-directory/conditional-access/media/terms-of-use/view-tou.png b/articles/active-directory/conditional-access/media/terms-of-use/view-tou.png index 14ee7c1299b44..c2fad540454ec 100644 Binary files a/articles/active-directory/conditional-access/media/terms-of-use/view-tou.png and b/articles/active-directory/conditional-access/media/terms-of-use/view-tou.png differ diff --git a/articles/active-directory/conditional-access/terms-of-use.md b/articles/active-directory/conditional-access/terms-of-use.md index ab332166fa5bb..269b2ac08e682 100644 --- a/articles/active-directory/conditional-access/terms-of-use.md +++ b/articles/active-directory/conditional-access/terms-of-use.md @@ -1,17 +1,17 @@ --- -title: Terms of use - Azure Active Directory | Microsoft Docs +title: Terms of use in Azure Active Directory description: Get started using Azure Active Directory terms of use to present information to employees or guests before getting access. services: active-directory ms.service: active-directory ms.subservice: compliance ms.topic: how-to -ms.date: 01/12/2022 +ms.date: 05/26/2022 ms.author: joflore author: MicrosoftGuyJFlo manager: karenhoran -ms.reviewer: jocastel +ms.reviewer: siz ms.collection: M365-identity-device-management --- @@ -53,7 +53,7 @@ Azure AD terms of use policies have the following capabilities: To use and configure Azure AD terms of use policies, you must have: -- Azure AD Premium P1, P2, EMS E3, or EMS E5 subscription. +- Azure AD Premium P1, P2, EMS E3, or EMS E5 licenses. - If you don't have one of these subscriptions, you can [get Azure AD Premium](../fundamentals/active-directory-get-started-premium.md) or [enable Azure AD Premium trial](https://azure.microsoft.com/trial/get-started-active-directory/). - One of the following administrator accounts for the directory you want to configure: - Global Administrator @@ -68,19 +68,16 @@ Azure AD terms of use policies use the PDF format to present content. The PDF fi Once you've completed your terms of use policy document, use the following procedure to add it. -1. Sign in to Azure as a Global Administrator, Security Administrator, or Conditional Access Administrator. -1. Navigate to **Terms of use** at [https://aka.ms/catou](https://aka.ms/catou). - - ![Conditional Access - Terms of use blade](./media/terms-of-use/tou-blade.png) - -1. Click **New terms**. - +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. +1. Select, **New terms**. + ![New term of use pane to specify your terms of use settings](./media/terms-of-use/new-tou.png) 1. In the **Name** box, enter a name for the terms of use policy that will be used in the Azure portal. -1. In the **Display name** box, enter a title that users see when they sign in. 1. For **Terms of use document**, browse to your finalized terms of use policy PDF and select it. 1. Select the language for your terms of use policy document. The language option allows you to upload multiple terms of use policies, each with a different language. The version of the terms of use policy that an end user will see will be based on their browser preferences. +1. In the **Display name** box, enter a title that users see when they sign in. 1. To require end users to view the terms of use policy before accepting them, set **Require users to expand the terms of use** to **On**. 1. To require end users to accept your terms of use policy on every device they're accessing from, set **Require users to consent on every device** to **On**. Users may be required to install other applications if this option is enabled. For more information, see [Per-device terms of use](#per-device-terms-of-use). 1. If you want to expire terms of use policy consents on a schedule, set **Expire consents** to **On**. When set to On, two more schedule settings are displayed. @@ -108,25 +105,21 @@ Once you've completed your terms of use policy document, use the following proce | Alice | Jan 1 | Jan 31 | Mar 2 | Apr 1 | | Bob | Jan 15 | Feb 14 | Mar 16 | Apr 15 | - It is possible to use the **Expire consents** and **Duration before re-acceptance required (days)** settings together, but typically you use one or the other. + It's possible to use the **Expire consents** and **Duration before re-acceptance required (days)** settings together, but typically you use one or the other. 1. Under **Conditional Access**, use the **Enforce with Conditional Access policy template** list to select the template to enforce the terms of use policy. - ![Conditional Access drop-down list to select a policy template](./media/terms-of-use/conditional-access-templates.png) - | Template | Description | | --- | --- | - | **Access to cloud apps for all guests** | A Conditional Access policy will be created for all guests and all cloud apps. This policy impacts the Azure portal. Once this is created, you might be required to sign out and sign in. | - | **Access to cloud apps for all users** | A Conditional Access policy will be created for all users and all cloud apps. This policy impacts the Azure portal. Once this is created, you'll be required to sign out and sign in. | | **Custom policy** | Select the users, groups, and apps that this terms of use policy will be applied to. | | **Create Conditional Access policy later** | This terms of use policy will appear in the grant control list when creating a Conditional Access policy. | - >[!IMPORTANT] - >Conditional Access policy controls (including terms of use policies) do not support enforcement on service accounts. We recommend excluding all service accounts from the Conditional Access policy. + > [!IMPORTANT] + > Conditional Access policy controls (including terms of use policies) do not support enforcement on service accounts. We recommend excluding all service accounts from the Conditional Access policy. Custom Conditional Access policies enable granular terms of use policies, down to a specific cloud application or group of users. For more information, see [Quickstart: Require terms of use to be accepted before accessing cloud apps](require-tou.md). -1. Click **Create**. +1. Select **Create**. If you selected a custom Conditional Access template, then a new screen appears that allows you to create the custom Conditional Access policy. @@ -134,8 +127,6 @@ Once you've completed your terms of use policy document, use the following proce You should now see your new terms of use policies. - ![New terms of use listed in the terms of use blade](./media/terms-of-use/create-tou.png) - ## View report of who has accepted and declined The Terms of use blade shows a count of the users who have accepted and declined. These counts and who accepted/declined are stored for the life of the terms of use policy. @@ -144,11 +135,11 @@ The Terms of use blade shows a count of the users who have accepted and declined ![Terms of use blade listing the number of user show have accepted and declined](./media/terms-of-use/view-tou.png) -1. For a terms of use policy, click the numbers under **Accepted** or **Declined** to view the current state for users. +1. For a terms of use policy, select the numbers under **Accepted** or **Declined** to view the current state for users. ![Terms of use consents pane listing the users that have accepted](./media/terms-of-use/accepted-tou.png) -1. To view the history for an individual user, click the ellipsis (**...**) and then **View History**. +1. To view the history for an individual user, select the ellipsis (**...**) and then **View History**. ![View History context menu for a user](./media/terms-of-use/view-history-menu.png) @@ -162,19 +153,17 @@ If you want to view more activity, Azure AD terms of use policies include audit To get started with Azure AD audit logs, use the following procedure: -1. Sign in to Azure and navigate to **Terms of use** at [https://aka.ms/catou](https://aka.ms/catou). +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. 1. Select a terms of use policy. -1. Click **View audit logs**. - - ![Terms of use blade with the View audit logs option highlighted](./media/terms-of-use/audit-tou.png) - +1. Select **View audit logs**. 1. On the Azure AD audit logs screen, you can filter the information using the provided lists to target specific audit log information. - You can also click **Download** to download the information in a .csv file for use locally. + You can also select **Download** to download the information in a .csv file for use locally. ![Azure AD audit logs screen listing date, target policy, initiated by, and activity](./media/terms-of-use/audit-logs-tou.png) - If you click a log, a pane appears with more activity details. + If you select a log, a pane appears with more activity details. ![Activity details for a log showing activity, activity status, initiated by, target policy](./media/terms-of-use/audit-log-activity-details.png) @@ -207,13 +196,14 @@ Users can review and see the terms of use policies that they've accepted by usin You can edit some details of terms of use policies, but you can't modify an existing document. The following procedure describes how to edit the details. -1. Sign in to Azure and navigate to **Terms of use** at [https://aka.ms/catou](https://aka.ms/catou). +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. 1. Select the terms of use policy you want to edit. -1. Click **Edit terms**. -1. In the Edit terms of use pane, you can change the following: - - **Name** – this is the internal name of the ToU that isn't shared with end users - - **Display name** – this is the name that end users can see when viewing the ToU - - **Require users to expand the terms of use** – Setting this to **On** will force the end user to expand the terms of use policy document before accepting it. +1. Select **Edit terms**. +1. In the Edit terms of use pane, you can change the following options: + - **Name** – the internal name of the ToU that isn't shared with end users + - **Display name** – the name that end users can see when viewing the ToU + - **Require users to expand the terms of use** – Setting this option to **On** will force the end user to expand the terms of use policy document before accepting it. - (Preview) You can **update an existing terms of use** document - You can add a language to an existing ToU @@ -221,61 +211,65 @@ You can edit some details of terms of use policies, but you can't modify an exis ![Edit showing different language options ](./media/terms-of-use/edit-terms-use.png) -1. Once you're done, click **Save** to save your changes. +1. Once you're done, select **Save** to save your changes. ## Update the version or pdf of an existing terms of use -1. Sign in to Azure and navigate to [Terms of use](https://aka.ms/catou) -2. Select the terms of use policy you want to edit. -3. Click **Edit terms**. -4. For the language that you would like to update a new version, click **Update** under the action column +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. +1. Select the terms of use policy you want to edit. +1. Select **Edit terms**. +1. For the language that you would like to update a new version, select **Update** under the action column ![Edit terms of use pane showing name and expand options](./media/terms-of-use/edit-terms-use.png) -5. In the pane on the right, upload the pdf for the new version -6. There's also a toggle option here **Require reaccept** if you want to require your users to accept this new version the next time they sign in. If you require your users to reaccept, next time they try to access the resource defined in your conditional access policy they'll be prompted to accept this new version. If you don’t require your users to reaccept, their previous consent will stay current and only new users who haven't consented before or whose consent expires will see the new version. Until the session expires, **Require reaccept** not require users to accept the new TOU. If you want to ensure reaccept, delete and recreate or create a new TOU for this case. +1. In the pane on the right, upload the pdf for the new version +1. There's also a toggle option here **Require reaccept** if you want to require your users to accept this new version the next time they sign in. If you require your users to reaccept, next time they try to access the resource defined in your conditional access policy they'll be prompted to accept this new version. If you don’t require your users to reaccept, their previous consent will stay current and only new users who haven't consented before or whose consent expires will see the new version. Until the session expires, **Require reaccept** not require users to accept the new TOU. If you want to ensure reaccept, delete and recreate or create a new TOU for this case. ![Edit terms of use re-accept option highlighted](./media/terms-of-use/re-accept.png) -7. Once you've uploaded your new pdf and decided on reaccept, click Add at the bottom of the pane. -8. You'll now see the most recent version under the Document column. +1. Once you've uploaded your new pdf and decided on reaccept, select Add at the bottom of the pane. +1. You'll now see the most recent version under the Document column. ## View previous versions of a ToU -1. Sign in to Azure and navigate to **Terms of use** at https://aka.ms/catou. -2. Select the terms of use policy for which you want to view a version history. -3. Click on **Languages and version history** -4. Click on **See previous versions.** +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. +1. Select the terms of use policy for which you want to view a version history. +1. Select **Languages and version history** +1. Select **See previous versions.** ![document details including language versions](./media/terms-of-use/document-details.png) -5. You can click on the name of the document to download that version +1. You can select the name of the document to download that version ## See who has accepted each version -1. Sign in to Azure and navigate to **Terms of use** at https://aka.ms/catou. -2. To see who has currently accepted the ToU, click on the number under the **Accepted** column for the ToU you want. -3. By default, the next page will show you the current state of each users acceptance to the ToU -4. If you would like to see the previous consent events, you can select **All** from the **Current State** drop-down. Now you can see each users events in details about each version and what happened. -5. Alternatively, you can select a specific version from the **Version** drop-down to see who has accepted that specific version. +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. +1. To see who has currently accepted the ToU, select the number under the **Accepted** column for the ToU you want. +1. By default, the next page will show you the current state of each user's acceptance to the ToU +1. If you would like to see the previous consent events, you can select **All** from the **Current State** drop-down. Now you can see each users events in details about each version and what happened. +1. Alternatively, you can select a specific version from the **Version** drop-down to see who has accepted that specific version. ## Add a ToU language The following procedure describes how to add a ToU language. -1. Sign in to Azure and navigate to **Terms of use** at [https://aka.ms/catou](https://aka.ms/catou). +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. 1. Select the terms of use policy you want to edit. -1. Click **Edit Terms** -1. Click **Add language** at the bottom of the page. +1. Select **Edit Terms** +1. Select **Add language** at the bottom of the page. 1. In the Add terms of use language pane, upload your localized PDF, and select the language. ![Terms of use selected and showing the Languages tab in the details pane](./media/terms-of-use/select-language.png) -1. Click **Add language**. -1. Click **Save** +1. Select **Add language**. +1. Select **Save** -1. Click **Add** to add the language. +1. Select **Add** to add the language. ## Per-device terms of use @@ -326,10 +320,11 @@ If a user is using browser that isn't supported, they'll be asked to use a diffe You can delete old terms of use policies using the following procedure. -1. Sign in to Azure and navigate to **Terms of use** at [https://aka.ms/catou](https://aka.ms/catou). +1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. +1. Browse to **Azure Active Directory** > **Security** > **Conditional Access** > **Terms of use**. 1. Select the terms of use policy you want to remove. -1. Click **Delete terms**. -1. In the message that appears asking if you want to continue, click **Yes**. +1. Select **Delete terms**. +1. In the message that appears asking if you want to continue, select **Yes**. ![Message asking for confirmation to delete terms of use](./media/terms-of-use/delete-tou.png) @@ -386,7 +381,7 @@ You can configure a Conditional Access policy for the Microsoft Intune Enrollmen A: Terms of use can only be accepted when authenticating interactively. **Q: How do I see when/if a user has accepted a terms of use?**
-A: On the Terms of use blade, click the number under **Accepted**. You can also view or search the accept activity in the Azure AD audit logs. For more information, see View report of who has accepted and declined and [View Azure AD audit logs](#view-azure-ad-audit-logs). +A: On the Terms of use blade, select the number under **Accepted**. You can also view or search the accept activity in the Azure AD audit logs. For more information, see View report of who has accepted and declined and [View Azure AD audit logs](#view-azure-ad-audit-logs). **Q: How long is information stored?**
A: The user counts in the terms of use report and who accepted/declined are stored for the life of the terms of use. The Azure AD audit logs are stored for 30 days. @@ -395,7 +390,7 @@ A: The user counts in the terms of use report and who accepted/declined are stor A: The terms of use report is stored for the lifetime of that terms of use policy, while the Azure AD audit logs are stored for 30 days. Also, the terms of use report only displays the users current consent state. For example, if a user declines and then accepts, the terms of use report will only show that user's accept. If you need to see the history, you can use the Azure AD audit logs. **Q: If hyperlinks are in the terms of use policy PDF document, will end users be able to click them?**
-A: Yes, end users are able to select hyperlinks to other pages but links to sections within the document are not supported. Also, hyperlinks in terms of use policy PDFs do not work when accessed from the Azure AD MyApps/MyAccount portal. +A: Yes, end users are able to select hyperlinks to other pages but links to sections within the document aren't supported. Also, hyperlinks in terms of use policy PDFs don't work when accessed from the Azure AD MyApps/MyAccount portal. **Q: Can a terms of use policy support multiple languages?**
A: Yes. Currently there are 108 different languages an administrator can configure for a single terms of use policy. An administrator can upload multiple PDF documents and tag those documents with a corresponding language (up to 108). When end users sign in, we look at their browser language preference and display the matching document. If there's no match, we display the default document, which is the first document that is uploaded. @@ -419,7 +414,7 @@ A: You can [review previously accepted terms of use policies](#how-users-can-rev A: If you've configured both Azure AD terms of use and [Intune terms and conditions](/intune/terms-and-conditions-create), the user will be required to accept both. For more information, see the [Choosing the right Terms solution for your organization blog post](https://go.microsoft.com/fwlink/?linkid=2010506&clcid=0x409). **Q: What endpoints does the terms of use service use for authentication?**
-A: Terms of use utilize the following endpoints for authentication: https://tokenprovider.termsofuse.identitygovernance.azure.com and https://account.activedirectory.windowsazure.com. If your organization has an allowlist of URLs for enrollment, you will need to add these endpoints to your allowlist, along with the Azure AD endpoints for sign-in. +A: Terms of use utilize the following endpoints for authentication: https://tokenprovider.termsofuse.identitygovernance.azure.com and https://account.activedirectory.windowsazure.com. If your organization has an allowlist of URLs for enrollment, you'll need to add these endpoints to your allowlist, along with the Azure AD endpoints for sign-in. ## Next steps diff --git a/articles/active-directory/develop/access-tokens.md b/articles/active-directory/develop/access-tokens.md index d36564a82a228..c92f87a8dcda1 100644 --- a/articles/active-directory/develop/access-tokens.md +++ b/articles/active-directory/develop/access-tokens.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform access tokens | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform access tokens description: Learn about access tokens emitted by the Azure AD v1.0 and Microsoft identity platform (v2.0) endpoints. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/accounts-overview.md b/articles/active-directory/develop/accounts-overview.md index 533ca7863a693..6684dc2629ca0 100644 --- a/articles/active-directory/develop/accounts-overview.md +++ b/articles/active-directory/develop/accounts-overview.md @@ -1,5 +1,5 @@ --- -title: Microsoft identity platform accounts & tenant profiles on Android | Azure +title: Microsoft identity platform accounts & tenant profiles on Android description: An overview of the Microsoft identity platform accounts for Android services: active-directory author: shoatman diff --git a/articles/active-directory/develop/active-directory-certificate-credentials.md b/articles/active-directory/develop/active-directory-certificate-credentials.md index 872ccbf3b38a8..b2550ba59efb7 100644 --- a/articles/active-directory/develop/active-directory-certificate-credentials.md +++ b/articles/active-directory/develop/active-directory-certificate-credentials.md @@ -1,6 +1,5 @@ --- title: Microsoft identity platform certificate credentials -titleSuffix: Microsoft identity platform description: This article discusses the registration and use of certificate credentials for application authentication. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/active-directory-claims-mapping.md b/articles/active-directory/develop/active-directory-claims-mapping.md index e4f949582a208..9a0f58e3c8e36 100644 --- a/articles/active-directory/develop/active-directory-claims-mapping.md +++ b/articles/active-directory/develop/active-directory-claims-mapping.md @@ -1,6 +1,5 @@ --- title: Customize Azure AD tenant app claims (PowerShell) -titleSuffix: Microsoft identity platform description: Learn how to customize claims emitted in tokens for an application in a specific Azure Active Directory tenant. author: rwike77 manager: CelesteDG diff --git a/articles/active-directory/develop/active-directory-configurable-token-lifetimes.md b/articles/active-directory/develop/active-directory-configurable-token-lifetimes.md index 4a8182e4776fa..b055b6adfe788 100644 --- a/articles/active-directory/develop/active-directory-configurable-token-lifetimes.md +++ b/articles/active-directory/develop/active-directory-configurable-token-lifetimes.md @@ -1,6 +1,5 @@ --- title: Configurable token lifetimes -titleSuffix: Microsoft identity platform description: Learn how to set lifetimes for access, SAML, and ID tokens issued by the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/active-directory-enterprise-app-role-management.md b/articles/active-directory/develop/active-directory-enterprise-app-role-management.md index 14cc5a1bb549c..aef90ee006814 100644 --- a/articles/active-directory/develop/active-directory-enterprise-app-role-management.md +++ b/articles/active-directory/develop/active-directory-enterprise-app-role-management.md @@ -1,6 +1,5 @@ --- -title: Configure role claim for enterprise Azure AD apps | Azure -titleSuffix: Microsoft identity platform +title: Configure role claim for enterprise Azure AD apps description: Learn how to configure the role claim issued in the SAML token for enterprise applications in Azure Active Directory services: active-directory author: jeevansd diff --git a/articles/active-directory/develop/active-directory-how-applications-are-added.md b/articles/active-directory/develop/active-directory-how-applications-are-added.md index 39a7e8af3a187..33754695716bb 100644 --- a/articles/active-directory/develop/active-directory-how-applications-are-added.md +++ b/articles/active-directory/develop/active-directory-how-applications-are-added.md @@ -1,6 +1,5 @@ --- title: How and why apps are added to Azure AD -titleSuffix: Microsoft identity platform description: What does it mean for an application to be added to Azure AD and how do they get there? services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/active-directory-how-to-integrate.md b/articles/active-directory/develop/active-directory-how-to-integrate.md index 735f63df4613f..db2d961b2f6b7 100644 --- a/articles/active-directory/develop/active-directory-how-to-integrate.md +++ b/articles/active-directory/develop/active-directory-how-to-integrate.md @@ -1,6 +1,5 @@ --- -title: How to integrate with the Microsoft identity platform | Azure -titleSuffix: Microsoft identity platform +title: How to integrate with the Microsoft identity platform description: Learn the benefits of integrating your application with the Microsoft identity platform, and get resources for features like simplified sign-in, identity management, multi-factor authentication, and access control. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/active-directory-optional-claims.md b/articles/active-directory/develop/active-directory-optional-claims.md index b3973b1015201..abec4223bb4c7 100644 --- a/articles/active-directory/develop/active-directory-optional-claims.md +++ b/articles/active-directory/develop/active-directory-optional-claims.md @@ -1,6 +1,5 @@ --- title: Provide optional claims to Azure AD apps -titleSuffix: Microsoft identity platform description: How to add custom or additional claims to the SAML 2.0 and JSON Web Tokens (JWT) tokens issued by Microsoft identity platform. author: rwike77 manager: CelesteDG diff --git a/articles/active-directory/develop/active-directory-saml-claims-customization.md b/articles/active-directory/develop/active-directory-saml-claims-customization.md index 28dd964714b93..a57f858d8e27c 100644 --- a/articles/active-directory/develop/active-directory-saml-claims-customization.md +++ b/articles/active-directory/develop/active-directory-saml-claims-customization.md @@ -1,6 +1,5 @@ --- title: Customize app SAML token claims -titleSuffix: Microsoft identity platform description: Learn how to customize the claims issued by Microsoft identity platform in the SAML token for enterprise applications. services: active-directory author: kenwith diff --git a/articles/active-directory/develop/active-directory-schema-extensions.md b/articles/active-directory/develop/active-directory-schema-extensions.md index 5588aeb3cd293..542a1691e0207 100644 --- a/articles/active-directory/develop/active-directory-schema-extensions.md +++ b/articles/active-directory/develop/active-directory-schema-extensions.md @@ -1,6 +1,5 @@ --- title: Use Azure AD schema extension attributes in claims -titleSuffix: Microsoft identity platform description: Describes how to use directory schema extension attributes for sending user data to applications in token claims. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/active-directory-v2-protocols.md b/articles/active-directory/develop/active-directory-v2-protocols.md index 5aa299a0a156d..ffb3cfec10a11 100644 --- a/articles/active-directory/develop/active-directory-v2-protocols.md +++ b/articles/active-directory/develop/active-directory-v2-protocols.md @@ -1,6 +1,5 @@ --- -title: OAuth 2.0 and OpenID Connect protocols on the Microsoft identity platform | Azure -titleSuffix: Microsoft identity platform +title: OAuth 2.0 and OpenID Connect protocols on the Microsoft identity platform description: A guide to OAuth 2.0 and OpenID Connect protocols as supported by the Microsoft identity platform. services: active-directory author: nickludwig @@ -77,6 +76,11 @@ https://login.microsoftonline.com//oauth2/v2.0/token # NOTE: These are examples. Endpoint URI format may vary based on application type, # sign-in audience, and Azure cloud instance (global or national cloud). + +# The {issuer} value in the path of the request can be used to control who can sign into the application. +# The allowed values are **common** for both Microsoft accounts and work or school accounts, +# **organizations** for work or school accounts only, **consumers** for Microsoft accounts only, +# and **tenant identifiers** such as the tenant ID or domain name. ``` To find the endpoints for an application you've registered, in the [Azure portal](https://portal.azure.com) navigate to: diff --git a/articles/active-directory/develop/api-find-an-api-how-to.md b/articles/active-directory/develop/api-find-an-api-how-to.md index 05ee90c4a6d9c..eaff1c458dc85 100644 --- a/articles/active-directory/develop/api-find-an-api-how-to.md +++ b/articles/active-directory/develop/api-find-an-api-how-to.md @@ -1,5 +1,5 @@ --- -title: Find an API for a custom-developed app | Azure +title: Find an API for a custom-developed app description: How to configure the permissions you need to access a particular API in your custom developed Azure AD application services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/app-objects-and-service-principals.md b/articles/active-directory/develop/app-objects-and-service-principals.md index a3f1bb392c0da..68f9f49a38ac3 100644 --- a/articles/active-directory/develop/app-objects-and-service-principals.md +++ b/articles/active-directory/develop/app-objects-and-service-principals.md @@ -1,6 +1,5 @@ --- title: Apps & service principals in Azure AD -titleSuffix: Microsoft identity platform description: Learn about the relationship between application and service principal objects in Azure Active Directory. author: rwike77 manager: CelesteDG diff --git a/articles/active-directory/develop/app-resilience-continuous-access-evaluation.md b/articles/active-directory/develop/app-resilience-continuous-access-evaluation.md index 049448bd85b47..c01ff48b4a732 100644 --- a/articles/active-directory/develop/app-resilience-continuous-access-evaluation.md +++ b/articles/active-directory/develop/app-resilience-continuous-access-evaluation.md @@ -1,6 +1,5 @@ --- -title: "How to use Continuous Access Evaluation enabled APIs in your applications | Azure" -titleSuffix: Microsoft identity platform +title: "How to use Continuous Access Evaluation enabled APIs in your applications" description: How to increase app security and resilience by adding support for Continuous Access Evaluation, enabling long-lived access tokens that can be revoked based on critical events and policy evaluation. services: active-directory author: knicholasa diff --git a/articles/active-directory/develop/app-sign-in-flow.md b/articles/active-directory/develop/app-sign-in-flow.md index fc91bcdafd0dd..ddcd2d6f750b8 100644 --- a/articles/active-directory/develop/app-sign-in-flow.md +++ b/articles/active-directory/develop/app-sign-in-flow.md @@ -1,6 +1,5 @@ --- -title: App sign-in flow with the Microsoft identity platform | Azure -titleSuffix: Microsoft identity platform +title: App sign-in flow with the Microsoft identity platform description: Learn about the sign-in flow of web, desktop, and mobile apps in Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/apple-sso-plugin.md b/articles/active-directory/develop/apple-sso-plugin.md index 8309f4e94d95b..e6509d8e94534 100644 --- a/articles/active-directory/develop/apple-sso-plugin.md +++ b/articles/active-directory/develop/apple-sso-plugin.md @@ -1,6 +1,5 @@ --- title: Microsoft Enterprise SSO plug-in for Apple devices -titleSuffix: Microsoft identity platform | Azure description: Learn about the Azure Active Directory SSO plug-in for iOS, iPadOS, and macOS devices. services: active-directory author: brandwe diff --git a/articles/active-directory/develop/application-consent-experience.md b/articles/active-directory/develop/application-consent-experience.md index 0268acf7225c0..2c68bb9b30d15 100644 --- a/articles/active-directory/develop/application-consent-experience.md +++ b/articles/active-directory/develop/application-consent-experience.md @@ -1,6 +1,5 @@ --- title: Azure AD app consent experiences -titleSuffix: Microsoft identity platform description: Learn more about the Azure AD consent experiences to see how you can use it when managing and developing applications on Azure AD services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/application-model.md b/articles/active-directory/develop/application-model.md index 17c8215a16d65..08dc1e80a239e 100644 --- a/articles/active-directory/develop/application-model.md +++ b/articles/active-directory/develop/application-model.md @@ -1,6 +1,5 @@ --- -title: Application model | Azure -titleSuffix: Microsoft identity platform +title: Application model description: Learn about the process of registering your application so it can integrate with the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/authentication-flows-app-scenarios.md b/articles/active-directory/develop/authentication-flows-app-scenarios.md index fb47e39443155..ab9cc8a850ef1 100644 --- a/articles/active-directory/develop/authentication-flows-app-scenarios.md +++ b/articles/active-directory/develop/authentication-flows-app-scenarios.md @@ -1,5 +1,5 @@ --- -title: Microsoft identity platform authentication flows & app scenarios | Azure +title: Microsoft identity platform authentication flows & app scenarios description: Learn about application scenarios for the Microsoft identity platform, including authenticating identities, acquiring tokens, and calling protected APIs. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/authentication-national-cloud.md b/articles/active-directory/develop/authentication-national-cloud.md index 94b6adff03deb..cefba606c4e90 100644 --- a/articles/active-directory/develop/authentication-national-cloud.md +++ b/articles/active-directory/develop/authentication-national-cloud.md @@ -1,6 +1,5 @@ --- -title: Azure AD authentication & national clouds | Azure -titleSuffix: Microsoft identity platform +title: Azure AD authentication & national clouds description: Learn about app registration and authentication endpoints for national clouds. services: active-directory author: negoe diff --git a/articles/active-directory/develop/authentication-vs-authorization.md b/articles/active-directory/develop/authentication-vs-authorization.md index 8431c8750d2d8..a311d4a712e69 100644 --- a/articles/active-directory/develop/authentication-vs-authorization.md +++ b/articles/active-directory/develop/authentication-vs-authorization.md @@ -1,6 +1,5 @@ --- -title: Authentication vs. authorization | Azure -titleSuffix: Microsoft identity platform +title: Authentication vs. authorization description: Learn about the basics of authentication and authorization in the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/authorization-basics.md b/articles/active-directory/develop/authorization-basics.md index 66a8a93e393d7..b612f1feda9ee 100644 --- a/articles/active-directory/develop/authorization-basics.md +++ b/articles/active-directory/develop/authorization-basics.md @@ -1,6 +1,5 @@ --- -title: Authorization basics | Azure -titleSuffix: Microsoft identity platform +title: Authorization basics description: Learn about the basics of authorization in the Microsoft identity platform. services: active-directory author: Chrispine-Chiedo diff --git a/articles/active-directory/develop/claims-challenge.md b/articles/active-directory/develop/claims-challenge.md index 09b3af59c3ffc..c394ee40ec336 100644 --- a/articles/active-directory/develop/claims-challenge.md +++ b/articles/active-directory/develop/claims-challenge.md @@ -1,6 +1,5 @@ --- title: Claims challenges, claims requests, and client capabilities -titleSuffix: Microsoft identity platform description: Explanation of claims challenges, claims requests, and client capabilities in the Microsoft identity platform. services: active-directory author: knicholasa diff --git a/articles/active-directory/develop/config-authority.md b/articles/active-directory/develop/config-authority.md index 302bcfb04c19f..9822abe65a8c7 100644 --- a/articles/active-directory/develop/config-authority.md +++ b/articles/active-directory/develop/config-authority.md @@ -1,6 +1,5 @@ --- -title: Configure identity providers (MSAL iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: Configure identity providers (MSAL iOS/macOS) description: Learn how to use different authorities such as B2C, sovereign clouds, and guest users, with MSAL for iOS and macOS. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/configure-token-lifetimes.md b/articles/active-directory/develop/configure-token-lifetimes.md index 61de98fa80158..d2d603b4fa83c 100644 --- a/articles/active-directory/develop/configure-token-lifetimes.md +++ b/articles/active-directory/develop/configure-token-lifetimes.md @@ -1,6 +1,5 @@ --- title: Set lifetimes for tokens -titleSuffix: Microsoft identity platform description: Learn how to set lifetimes for tokens issued by Microsoft identity platform. Learn how to learn how to manage an organization's default policy, create a policy for web sign-in, create a policy for a native app that calls a web API, and manage an advanced policy. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/consent-framework.md b/articles/active-directory/develop/consent-framework.md index 851871158c33e..453cc2828c063 100644 --- a/articles/active-directory/develop/consent-framework.md +++ b/articles/active-directory/develop/consent-framework.md @@ -1,6 +1,5 @@ --- title: Microsoft identity platform consent framework -titleSuffix: Microsoft identity platform description: Learn about the consent framework in the Microsoft identity platform and how it applies to multi-tenant applications. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/console-app-quickstart.md b/articles/active-directory/develop/console-app-quickstart.md index 2db83178487b4..86a4ee1d2b891 100644 --- a/articles/active-directory/develop/console-app-quickstart.md +++ b/articles/active-directory/develop/console-app-quickstart.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a console application | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a console application" description: In this quickstart, you learn how a console application can get an access token and call an API protected by Microsoft identity platform, using the app's own identity services: active-directory author: Dickson-Mwendia diff --git a/articles/active-directory/develop/customize-webviews.md b/articles/active-directory/develop/customize-webviews.md index 639ef44e82272..6c07fdb9b7cef 100644 --- a/articles/active-directory/develop/customize-webviews.md +++ b/articles/active-directory/develop/customize-webviews.md @@ -1,6 +1,5 @@ --- -title: Customize browsers & WebViews (MSAL iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: Customize browsers & WebViews (MSAL iOS/macOS) description: Learn how to customize the MSAL iOS/macOS browser experience to sign in users. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/delegated-and-app-perms.md b/articles/active-directory/develop/delegated-and-app-perms.md index 43ea104fb0b51..585e0ccb06928 100644 --- a/articles/active-directory/develop/delegated-and-app-perms.md +++ b/articles/active-directory/develop/delegated-and-app-perms.md @@ -1,5 +1,5 @@ --- -title: Differences between delegated and app permissions | Azure +title: Differences between delegated and app permissions description: Learn about delegated and application permissions, how they are used by clients and exposed by resources for applications you are developing with Azure AD services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/desktop-app-quickstart.md b/articles/active-directory/develop/desktop-app-quickstart.md index 45141923fff46..7ea69c6329b5b 100644 --- a/articles/active-directory/develop/desktop-app-quickstart.md +++ b/articles/active-directory/develop/desktop-app-quickstart.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users and call Microsoft Graph in a desktop app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users and call Microsoft Graph in a desktop app" description: In this quickstart, learn how a desktop application can get an access token and call an API protected by the Microsoft identity platform. services: active-directory author: Dickson-Mwendia diff --git a/articles/active-directory/develop/developer-glossary.md b/articles/active-directory/develop/developer-glossary.md index ac417d44e0c9a..8a670becc1217 100644 --- a/articles/active-directory/develop/developer-glossary.md +++ b/articles/active-directory/develop/developer-glossary.md @@ -1,44 +1,42 @@ --- -title: Microsoft identity platform developer glossary | Azure -description: A list of terms for commonly used Microsoft identity platform developer concepts and features. +title: Glossary of terms in the Microsoft identity platform +description: Definitions of terms commonly found in Microsoft identity platform documentation, Azure portal, and authentication SDKs like the Microsoft Authentication Library (MSAL). services: active-directory author: rwike77 manager: CelesteDG ms.service: active-directory ms.subservice: develop -ms.topic: conceptual -ms.workload: identity -ms.date: 12/14/2021 +ms.topic: reference +ms.date: 05/28/2022 ms.author: ryanwi -ms.custom: aaddev -ms.reviewer: jmprieur, saeeda, jesakowi, nacanuma +ms.reviewer: mmacy --- -# Microsoft identity platform developer glossary +# Glossary: Microsoft identity platform -This article contains definitions for some of the core developer concepts and terminology, which are helpful when learning about application development using Microsoft identity platform. +You'll see these terms when you use our documentation, the Azure portal, our authentication libraries, and the Microsoft Graph API. Some terms are Microsoft-specific while others are related to protocols like OAuth or other technologies you use with the Microsoft identity platform. ## Access token -A type of [security token](#security-token) issued by an [authorization server](#authorization-server), and used by a [client application](#client-application) in order to access a [protected resource server](#resource-server). Typically in the form of a [JSON Web Token (JWT)][JWT], the token embodies the authorization granted to the client by the [resource owner](#resource-owner), for a requested level of access. The token contains all applicable [claims](#claim) about the subject, enabling the client application to use it as a form of credential when accessing a given resource. This also eliminates the need for the resource owner to expose credentials to the client. +A type of [security token](#security-token) issued by an [authorization server](#authorization-server) and used by a [client application](#client-application) to access a [protected resource server](#resource-server). Typically in the form of a [JSON Web Token (JWT)][JWT], the token embodies the authorization granted to the client by the [resource owner](#resource-owner), for a requested level of access. The token contains all applicable [claims](#claim) about the subject, enabling the client application to use it as a form of credential when accessing a given resource. This also eliminates the need for the resource owner to expose credentials to the client. -Access tokens are only valid for a short period of time and cannot be revoked. An authorization server may also issue a [refresh token](#refresh-token) when the access token is issued. Refresh tokens are typically provided only to confidential client applications. +Access tokens are only valid for a short period of time and can't be revoked. An authorization server may also issue a [refresh token](#refresh-token) when the access token is issued. Refresh tokens are typically provided only to confidential client applications. Access tokens are sometimes referred to as "User+App" or "App-Only", depending on the credentials being represented. For example, when a client application uses the: -* ["Authorization code" authorization grant](#authorization-grant), the end user authenticates first as the resource owner, delegating authorization to the client to access the resource. The client authenticates afterward when obtaining the access token. The token can sometimes be referred to more specifically as a "User+App" token, as it represents both the user that authorized the client application, and the application. -* ["Client credentials" authorization grant](#authorization-grant), the client provides the sole authentication, functioning without the resource-owner's authentication/authorization, so the token can sometimes be referred to as an "App-Only" token. +- ["Authorization code" authorization grant](#authorization-grant), the end user authenticates first as the resource owner, delegating authorization to the client to access the resource. The client authenticates afterward when obtaining the access token. The token can sometimes be referred to more specifically as a "User+App" token, as it represents both the user that authorized the client application, and the application. +- ["Client credentials" authorization grant](#authorization-grant), the client provides the sole authentication, functioning without the resource-owner's authentication/authorization, so the token can sometimes be referred to as an "App-Only" token. See the [access tokens reference][AAD-Tokens-Claims] for more details. ## Actor -Another term for the [client application](#client-application) - this is the party acting on behalf of the subject, or [resource owner](#resource-owner). +Another term for the [client application](#client-application). The actor is the party acting on behalf of a subject ([resource owner](#resource-owner)). -## Application ID (client ID) +## Application (client) ID -The unique identifier Azure AD issues to an application registration that identifies a specific application and the associated configurations. This application ID ([client ID](https://tools.ietf.org/html/rfc6749#page-15)) is used when performing authentication requests and is provided to the authentication libraries in development time. The application ID (client ID) is not a secret. +The application ID, or _[client ID](https://datatracker.ietf.org/doc/html/rfc6749#section-2.2)_, is a value the Microsoft identity platform assigns to your application when you register it in Azure AD. The application ID is a GUID value that uniquely identifies the application and its configuration within the identity platform. You add the app ID to your application's code, and authentication libraries include the value in their requests to the identity platform at application runtime. The application (client) ID isn't a secret - don't use it as a password or other credential. ## Application manifest @@ -46,62 +44,62 @@ A feature provided by the [Azure portal][AZURE-portal], which produces a JSON re ## Application object -When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an application object and a corresponding [service principal object](#service-principal-object) for that tenant. The application object *defines* the application's identity configuration globally (across all tenants where it has access), providing a template from which its corresponding service principal object(s) are *derived* for use locally at run-time (in a specific tenant). +When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an application object and a corresponding [service principal object](#service-principal-object) for that tenant. The application object _defines_ the application's identity configuration globally (across all tenants where it has access), providing a template from which its corresponding service principal object(s) are _derived_ for use locally at run-time (in a specific tenant). For more information, see [Application and Service Principal Objects][AAD-App-SP-Objects]. ## Application registration -In order to allow an application to integrate with and delegate Identity and Access Management functions to Azure AD, it must be registered with an Azure AD [tenant](#tenant). When you register your application with Azure AD, you are providing an identity configuration for your application, allowing it to integrate with Azure AD and use features such as: +In order to allow an application to integrate with and delegate Identity and Access Management functions to Azure AD, it must be registered with an Azure AD [tenant](#tenant). When you register your application with Azure AD, you're providing an identity configuration for your application, allowing it to integrate with Azure AD and use features like: -* Robust management of Single Sign-On using Azure AD Identity Management and [OpenID Connect][OpenIDConnect] protocol implementation -* Brokered access to [protected resources](#resource-server) by [client applications](#client-application), via OAuth 2.0 [authorization server](#authorization-server) -* [Consent framework](#consent) for managing client access to protected resources, based on resource owner authorization. +- Robust management of Single Sign-On using Azure AD Identity Management and [OpenID Connect][OpenIDConnect] protocol implementation +- Brokered access to [protected resources](#resource-server) by [client applications](#client-application), via OAuth 2.0 [authorization server](#authorization-server) +- [Consent framework](#consent) for managing client access to protected resources, based on resource owner authorization. See [Integrating applications with Azure Active Directory][AAD-Integrating-Apps] for more details. ## Authentication -The act of challenging a party for legitimate credentials, providing the basis for creation of a security principal to be used for identity and access control. During an [OAuth2 authorization grant](#authorization-grant) for example, the party authenticating is filling the role of either [resource owner](#resource-owner) or [client application](#client-application), depending on the grant used. +The act of challenging a party for legitimate credentials, providing the basis for creation of a security principal to be used for identity and access control. During an [OAuth 2.0 authorization grant](#authorization-grant) for example, the party authenticating is filling the role of either [resource owner](#resource-owner) or [client application](#client-application), depending on the grant used. ## Authorization The act of granting an authenticated security principal permission to do something. There are two primary use cases in the Azure AD programming model: -* During an [OAuth2 authorization grant](#authorization-grant) flow: when the [resource owner](#resource-owner) grants authorization to the [client application](#client-application), allowing the client to access the resource owner's resources. -* During resource access by the client: as implemented by the [resource server](#resource-server), using the [claim](#claim) values present in the [access token](#access-token) to make access control decisions based upon them. +- During an [OAuth 2.0 authorization grant](#authorization-grant) flow: when the [resource owner](#resource-owner) grants authorization to the [client application](#client-application), allowing the client to access the resource owner's resources. +- During resource access by the client: as implemented by the [resource server](#resource-server), using the [claim](#claim) values present in the [access token](#access-token) to make access control decisions based upon them. ## Authorization code -A short lived "token" provided to a [client application](#client-application) by the [authorization endpoint](#authorization-endpoint), as part of the "authorization code" flow, one of the four OAuth2 [authorization grants](#authorization-grant). The code is returned to the client application in response to authentication of a [resource owner](#resource-owner), indicating the resource owner has delegated authorization to access the requested resources. As part of the flow, the code is later redeemed for an [access token](#access-token). +A short-lived value provided by the [authorization endpoint](#authorization-endpoint) to a [client application](#client-application) during the OAuth 2.0 _authorization code grant flow_, one of the four OAuth 2.0 [authorization grants](#authorization-grant). Also called an _auth code_, the authorization code is returned to the client application in response to the authentication of a [resource owner](#resource-owner). The auth code indicates the resource owner has delegated authorization to the client application to access their resources. As part of the flow, the auth code is later redeemed for an [access token](#access-token). ## Authorization endpoint -One of the endpoints implemented by the [authorization server](#authorization-server), used to interact with the [resource owner](#resource-owner) in order to provide an [authorization grant](#authorization-grant) during an OAuth2 authorization grant flow. Depending on the authorization grant flow used, the actual grant provided can vary, including an [authorization code](#authorization-code) or [security token](#security-token). +One of the endpoints implemented by the [authorization server](#authorization-server), used to interact with the [resource owner](#resource-owner) to provide an [authorization grant](#authorization-grant) during an OAuth 2.0 authorization grant flow. Depending on the authorization grant flow used, the actual grant provided can vary, including an [authorization code](#authorization-code) or [security token](#security-token). -See the OAuth2 specification's [authorization grant types][OAuth2-AuthZ-Grant-Types] and [authorization endpoint][OAuth2-AuthZ-Endpoint] sections, and the [OpenIDConnect specification][OpenIDConnect-AuthZ-Endpoint] for more details. +See the OAuth 2.0 specification's [authorization grant types][OAuth2-AuthZ-Grant-Types] and [authorization endpoint][OAuth2-AuthZ-Endpoint] sections, and the [OpenIDConnect specification][OpenIDConnect-AuthZ-Endpoint] for more details. ## Authorization grant -A credential representing the [resource owner's](#resource-owner) [authorization](#authorization) to access its protected resources, granted to a [client application](#client-application). A client application can use one of the [four grant types defined by the OAuth2 Authorization Framework][OAuth2-AuthZ-Grant-Types] to obtain a grant, depending on client type/requirements: "authorization code grant", "client credentials grant", "implicit grant", and "resource owner password credentials grant". The credential returned to the client is either an [access token](#access-token), or an [authorization code](#authorization-code) (exchanged later for an access token), depending on the type of authorization grant used. +A credential representing the [resource owner's](#resource-owner) [authorization](#authorization) to access its protected resources, granted to a [client application](#client-application). A client application can use one of the [four grant types defined by the OAuth 2.0 Authorization Framework][OAuth2-AuthZ-Grant-Types] to obtain a grant, depending on client type/requirements: "authorization code grant", "client credentials grant", "implicit grant", and "resource owner password credentials grant". The credential returned to the client is either an [access token](#access-token), or an [authorization code](#authorization-code) (exchanged later for an access token), depending on the type of authorization grant used. ## Authorization server -As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], the server responsible for issuing access tokens to the [client](#client-application) after successfully authenticating the [resource owner](#resource-owner) and obtaining its authorization. A [client application](#client-application) interacts with the authorization server at runtime via its [authorization](#authorization-endpoint) and [token](#token-endpoint) endpoints, in accordance with the OAuth2 defined [authorization grants](#authorization-grant). +As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], the server responsible for issuing access tokens to the [client](#client-application) after successfully authenticating the [resource owner](#resource-owner) and obtaining its authorization. A [client application](#client-application) interacts with the authorization server at runtime via its [authorization](#authorization-endpoint) and [token](#token-endpoint) endpoints, in accordance with the OAuth 2.0 defined [authorization grants](#authorization-grant). In the case of the Microsoft identity platform application integration, the Microsoft identity platform implements the authorization server role for Azure AD applications and Microsoft service APIs, for example [Microsoft Graph APIs][Microsoft-Graph]. ## Claim -A [security token](#security-token) contains claims, which provide assertions about one entity (such as a [client application](#client-application) or [resource owner](#resource-owner)) to another entity (such as the [resource server](#resource-server)). Claims are name/value pairs that relay facts about the token subject (for example, the security principal that was authenticated by the [authorization server](#authorization-server)). The claims present in a given token are dependent upon several variables, including the type of token, the type of credential used to authenticate the subject, the application configuration, etc. +Claims are name/values pairs in a [security token](#security-token) that provide assertions made by one entity to another. These entities are typically the [client application](#client-application) or a [resource owner](#resource-owner) providing assertions to a [resource server](#resource-server). Claims relay facts about the token subject like the ID of the security principal that was authenticated by the [authorization server](#authorization-server). The claims present in a token can vary and depend on several factors like the type of token, type of credential used for authenticating the subject, the application configuration, and others. See the [Microsoft identity platform token reference][AAD-Tokens-Claims] for more details. ## Client application -Also known as the "[actor](#actor)". As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], an application that makes protected resource requests on behalf of the [resource owner](#resource-owner). They receive permissions from the resource owner in the form of scopes. The term "client" does not imply any particular hardware implementation characteristics (for instance, whether the application executes on a server, a desktop, or other devices). +Also known as the "[actor](#actor)". As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], an application that makes protected resource requests on behalf of the [resource owner](#resource-owner). They receive permissions from the resource owner in the form of scopes. The term "client" doesn't imply any particular hardware implementation characteristics (for instance, whether the application executes on a server, a desktop, or other devices). -A client application requests [authorization](#authorization) from a resource owner to participate in an [OAuth2 authorization grant](#authorization-grant) flow, and may access APIs/data on the resource owner's behalf. The OAuth2 Authorization Framework [defines two types of clients][OAuth2-Client-Types], "confidential" and "public", based on the client's ability to maintain the confidentiality of its credentials. Applications can implement a [web client (confidential)](#web-client) which runs on a web server, a [native client (public)](#native-client) installed on a device, or a [user-agent-based client (public)](#user-agent-based-client) which runs in a device's browser. +A client application requests [authorization](#authorization) from a resource owner to participate in an [OAuth 2.0 authorization grant](#authorization-grant) flow, and may access APIs/data on the resource owner's behalf. The OAuth 2.0 Authorization Framework [defines two types of clients][OAuth2-Client-Types], "confidential" and "public", based on the client's ability to maintain the confidentiality of its credentials. Applications can implement a [web client (confidential)](#web-client) which runs on a web server, a [native client (public)](#native-client) installed on a device, or a [user-agent-based client (public)](#user-agent-based-client) which runs in a device's browser. ## Consent @@ -111,7 +109,7 @@ See [consent framework](consent-framework.md) for more information. ## ID token -An [OpenID Connect][OpenIDConnect-ID-Token] [security token](#security-token) provided by an [authorization server's](#authorization-server) [authorization endpoint](#authorization-endpoint), which contains [claims](#claim) pertaining to the authentication of an end user [resource owner](#resource-owner). Like an access token, ID tokens are also represented as a digitally signed [JSON Web Token (JWT)][JWT]. Unlike an access token though, an ID token's claims are not used for purposes related to resource access and specifically access control. +An [OpenID Connect][OpenIDConnect-ID-Token] [security token](#security-token) provided by an [authorization server's](#authorization-server) [authorization endpoint](#authorization-endpoint), which contains [claims](#claim) pertaining to the authentication of an end user [resource owner](#resource-owner). Like an access token, ID tokens are also represented as a digitally signed [JSON Web Token (JWT)][JWT]. Unlike an access token though, an ID token's claims aren't used for purposes related to resource access and specifically access control. See the [ID token reference](id-tokens.md) for more details. @@ -121,7 +119,7 @@ Eliminate the need for developers to manage credentials. Managed identities prov ## Microsoft identity platform -The Microsoft identity platform is an evolution of the Azure Active Directory (Azure AD) identity service and developer platform. It allows developers to build applications that sign in all Microsoft identities, get tokens to call Microsoft Graph, other Microsoft APIs, or APIs that developers have built. It’s a full-featured platform that consists of an authentication service, libraries, application registration and configuration, full developer documentation, code samples, and other developer content. The Microsoft identity platform supports industry standard protocols such as OAuth 2.0 and OpenID Connect. +The Microsoft identity platform is an evolution of the Azure Active Directory (Azure AD) identity service and developer platform. It allows developers to build applications that sign in all Microsoft identities, get tokens to call Microsoft Graph, other Microsoft APIs, or APIs that developers have built. It's a full-featured platform that consists of an authentication service, libraries, application registration and configuration, full developer documentation, code samples, and other developer content. The Microsoft identity platform supports industry standard protocols such as OAuth 2.0 and OpenID Connect. ## Multi-tenant application @@ -131,14 +129,14 @@ See [How to sign in any Azure AD user using the multi-tenant application pattern ## Native client -A type of [client application](#client-application) that is installed natively on a device. Since all code is executed on a device, it is considered a "public" client due to its inability to store credentials privately/confidentially. See [OAuth2 client types and profiles][OAuth2-Client-Types] for more details. +A type of [client application](#client-application) that is installed natively on a device. Since all code is executed on a device, it's considered a "public" client due to its inability to store credentials privately/confidentially. See [OAuth 2.0 client types and profiles][OAuth2-Client-Types] for more details. ## Permissions A [client application](#client-application) gains access to a [resource server](#resource-server) by declaring permission requests. Two types are available: -* "Delegated" permissions, which specify [scope-based](#scopes) access using delegated authorization from the signed-in [resource owner](#resource-owner), are presented to the resource at run-time as ["scp" claims](#claim) in the client's [access token](#access-token). These indicate the permission granted to the [actor](#actor) by the [subject](#subject). -* "Application" permissions, which specify [role-based](#roles) access using the client application's credentials/identity, are presented to the resource at run-time as ["roles" claims](#claim) in the client's access token. These indicate permissions granted to the [subject](#subject) by the tenant. +- "Delegated" permissions, which specify [scope-based](#scopes) access using delegated authorization from the signed-in [resource owner](#resource-owner), are presented to the resource at run-time as ["scp" claims](#claim) in the client's [access token](#access-token). These indicate the permission granted to the [actor](#actor) by the [subject](#subject). +- "Application" permissions, which specify [role-based](#roles) access using the client application's credentials/identity, are presented to the resource at run-time as ["roles" claims](#claim) in the client's access token. These indicate permissions granted to the [subject](#subject) by the tenant. They also surface during the [consent](#consent) process, giving the administrator or resource owner the opportunity to grant/deny the client access to resources in their tenant. @@ -146,21 +144,21 @@ Permission requests are configured on the **API permissions** page for an applic ## Refresh token -A type of [security token](#security-token) issued by an [authorization server](#authorization-server), and used by a [client application](#client-application) in order to request a new [access token](#access-token) before the access token expires. Typically in the form of a [JSON Web Token (JWT)][JWT]. +A type of [security token](#security-token) issued by an [authorization server](#authorization-server). Before an access token expires, a [client application](#client-application) includes its associated refresh token when it requests a new [access token](#access-token) from the authorization server. Refresh tokens are typically formatted as a [JSON Web Token (JWT)][JWT]. -Unlike access tokens, refresh tokens can be revoked. If a client application attempts to request a new access token using a refresh token that has been revoked, the authorization server will deny the request, and the client application will no longer have permission to access the [resource server](#resource-server) on behalf of the [resource owner](#resource-owner). +Unlike access tokens, refresh tokens can be revoked. An authorization server denies any request from a client application that includes a refresh token that has been revoked. When the authorization server denies a request that includes a revoked refresh token, the client application loses the permission to access the [resource server](#resource-server) on behalf of the [resource owner](#resource-owner). See the [refresh tokens](refresh-tokens.md) for more details. ## Resource owner -As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], an entity capable of granting access to a protected resource. When the resource owner is a person, it is referred to as an end user. For example, when a [client application](#client-application) wants to access a user's mailbox through the [Microsoft Graph API][Microsoft-Graph], it requires permission from the resource owner of the mailbox. The "resource owner" is also sometimes called the [subject](#subject). +As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], an entity capable of granting access to a protected resource. When the resource owner is a person, it's referred to as an end user. For example, when a [client application](#client-application) wants to access a user's mailbox through the [Microsoft Graph API][Microsoft-Graph], it requires permission from the resource owner of the mailbox. The "resource owner" is also sometimes called the [subject](#subject). -Every [security token](#security-token) represents a resource owner. The resource owner is what the subject [claim](#claim), object ID claim, and personal data in the token represent. Resource owners are the party that grants delegated permissions to a client application, in the form of scopes. Resource owners are also the recipients of [roles](#roles) that indicate expanded permissions within a tenant or on an application. +Every [security token](#security-token) represents a resource owner. The resource owner is what the subject [claim](#claim), object ID claim, and personal data in the token represent. Resource owners are the party that grants delegated permissions to a client application, in the form of scopes. Resource owners are also the recipients of [roles](#roles) that indicate expanded permissions within a tenant or on an application. ## Resource server -As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], a server that hosts protected resources, capable of accepting and responding to protected resource requests by [client applications](#client-application) that present an [access token](#access-token). Also known as a protected resource server, or resource application. +As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], a server that hosts protected resources, capable of accepting and responding to protected resource requests by [client applications](#client-application) that present an [access token](#access-token). Also known as a protected resource server, or resource application. A resource server exposes APIs and enforces access to its protected resources through [scopes](#scopes) and [roles](#roles), using the OAuth 2.0 Authorization Framework. Examples include the [Microsoft Graph API][Microsoft-Graph], which provides access to Azure AD tenant data, and the Microsoft 365 APIs that provide access to data such as mail and calendar. @@ -168,9 +166,9 @@ Just like a client application, resource application's identity configuration is ## Roles -Like [scopes](#scopes), app roles provide a way for a [resource server](#resource-server) to govern access to its protected resources. Unlike scopes, roles represent privileges that the [subject](#subject) has been granted beyond the baseline - this is why reading your own email is a scope, while being an email administrator that can read everyone's email is a role. +Like [scopes](#scopes), app roles provide a way for a [resource server](#resource-server) to govern access to its protected resources. Unlike scopes, roles represent privileges that the [subject](#subject) has been granted beyond the baseline - this is why reading your own email is a scope, while being an email administrator that can read everyone's email is a role. -App roles can support two assignment types: "user" assignment implements role-based access control for users/groups that require access to the resource, while "application" assignment implements the same for [client applications](#client-application) that require access. An app role can be defined as user-assignable, app-assignabnle, or both. +App roles can support two assignment types: "user" assignment implements role-based access control for users/groups that require access to the resource, while "application" assignment implements the same for [client applications](#client-application) that require access. An app role can be defined as user-assignable, app-assignabnle, or both. Roles are resource-defined strings (for example "Expense approver", "Read-only", "Directory.ReadWrite.All"), managed in the [Azure portal][AZURE-portal] via the resource's [application manifest](#application-manifest), and stored in the resource's [appRoles property][Graph-Sp-Resource]. The Azure portal is also used to assign users to "user" assignable roles, and configure client [application permissions](#permissions) to request "application" assignable roles. @@ -186,17 +184,17 @@ A best practice naming convention, is to use a "resource.operation.constraint" f ## Security token -A signed document containing claims, such as an OAuth2 token or SAML 2.0 assertion. For an OAuth2 [authorization grant](#authorization-grant), an [access token](#access-token) (OAuth2), [refresh token](#refresh-token), and an [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#IDToken) are types of security tokens, all of which are implemented as a [JSON Web Token (JWT)][JWT]. +A signed document containing claims, such as an OAuth 2.0 token or SAML 2.0 assertion. For an OAuth 2.0 [authorization grant](#authorization-grant), an [access token](#access-token) (OAuth2), [refresh token](#refresh-token), and an [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#IDToken) are types of security tokens, all of which are implemented as a [JSON Web Token (JWT)][JWT]. ## Service principal object -When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an [application object](#application-object) and a corresponding service principal object for that tenant. The application object *defines* the application's identity configuration globally (across all tenants where the associated application has been granted access), and is the template from which its corresponding service principal object(s) are *derived* for use locally at run-time (in a specific tenant). +When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an [application object](#application-object) and a corresponding service principal object for that tenant. The application object _defines_ the application's identity configuration globally (across all tenants where the associated application has been granted access), and is the template from which its corresponding service principal object(s) are _derived_ for use locally at run-time (in a specific tenant). For more information, see [Application and Service Principal Objects][AAD-App-SP-Objects]. ## Sign-in -The process of a [client application](#client-application) initiating end-user authentication and capturing related state, for the purpose of acquiring a [security token](#security-token) and scoping the application session to that state. State can include artifacts such as user profile information, and information derived from token claims. +The process of a [client application](#client-application) initiating end-user authentication and capturing related state for requesting a [security token](#security-token) and scoping the application session to that state. State can include artifacts like user profile information, and information derived from token claims. The sign-in function of an application is typically used to implement single-sign-on (SSO). It may also be preceded by a "sign-up" function, as the entry point for an end user to gain access to an application (upon first sign-in). The sign-up function is used to gather and persist additional state specific to the user, and may require [user consent](#consent). @@ -206,37 +204,37 @@ The process of unauthenticating an end user, detaching the user state associated ## Subject -Also known as the [resource owner](#resource-owner). +Also known as the [resource owner](#resource-owner). ## Tenant An instance of an Azure AD directory is referred to as an Azure AD tenant. It provides several features, including: -* a registry service for integrated applications -* authentication of user accounts and registered applications -* REST endpoints required to support various protocols including OAuth2 and SAML, including the [authorization endpoint](#authorization-endpoint), [token endpoint](#token-endpoint) and the "common" endpoint used by [multi-tenant applications](#multi-tenant-application). +- a registry service for integrated applications +- authentication of user accounts and registered applications +- REST endpoints required to support various protocols including OAuth 2.0 and SAML, including the [authorization endpoint](#authorization-endpoint), [token endpoint](#token-endpoint) and the "common" endpoint used by [multi-tenant applications](#multi-tenant-application). Azure AD tenants are created/associated with Azure and Microsoft 365 subscriptions during sign-up, providing Identity & Access Management features for the subscription. Azure subscription administrators can also create additional Azure AD tenants via the Azure portal. See [How to get an Azure Active Directory tenant][AAD-How-To-Tenant] for details on the various ways you can get access to a tenant. See [Associate or add an Azure subscription to your Azure Active Directory tenant][AAD-How-Subscriptions-Assoc] for details on the relationship between subscriptions and an Azure AD tenant, and for instructions on how to associate or add a subscription to an Azure AD tenant. ## Token endpoint -One of the endpoints implemented by the [authorization server](#authorization-server) to support OAuth2 [authorization grants](#authorization-grant). Depending on the grant, it can be used to acquire an [access token](#access-token) (and related "refresh" token) to a [client](#client-application), or [ID token](#id-token) when used with the [OpenID Connect][OpenIDConnect] protocol. +One of the endpoints implemented by the [authorization server](#authorization-server) to support OAuth 2.0 [authorization grants](#authorization-grant). Depending on the grant, it can be used to acquire an [access token](#access-token) (and related "refresh" token) to a [client](#client-application), or [ID token](#id-token) when used with the [OpenID Connect][OpenIDConnect] protocol. ## User-agent-based client -A type of [client application](#client-application) that downloads code from a web server and executes within a user-agent (for instance, a web browser), such as a single-page application (SPA). Since all code is executed on a device, it is considered a "public" client due to its inability to store credentials privately/confidentially. For more information, see [OAuth2 client types and profiles][OAuth2-Client-Types]. +A type of [client application](#client-application) that downloads code from a web server and executes within a user-agent (for instance, a web browser), such as a single-page application (SPA). Since all code is executed on a device, it is considered a "public" client due to its inability to store credentials privately/confidentially. For more information, see [OAuth 2.0 client types and profiles][OAuth2-Client-Types]. ## User principal -Similar to the way a service principal object is used to represent an application instance, a user principal object is another type of security principal, which represents a user. The Microsoft Graph [User resource type][Graph-User-Resource] defines the schema for a user object, including user-related properties such as first and last name, user principal name, directory role membership, etc. This provides the user identity configuration for Azure AD to establish a user principal at run-time. The user principal is used to represent an authenticated user for Single Sign-On, recording [consent](#consent) delegation, making access control decisions, etc. +Similar to the way a service principal object is used to represent an application instance, a user principal object is another type of security principal, which represents a user. The Microsoft Graph [User resource type][Graph-User-Resource] defines the schema for a user object, including user-related properties like first and last name, user principal name, directory role membership, etc. This provides the user identity configuration for Azure AD to establish a user principal at run-time. The user principal is used to represent an authenticated user for Single Sign-On, recording [consent](#consent) delegation, making access control decisions, etc. ## Web client -A type of [client application](#client-application) that executes all code on a web server, and able to function as a "confidential" client by securely storing its credentials on the server. For more information, see [OAuth2 client types and profiles][OAuth2-Client-Types]. +A type of [client application](#client-application) that executes all code on a web server, functioning as a _confidential client_ because it can securely store its credentials on the server. For more information, see [OAuth 2.0 client types and profiles][OAuth2-Client-Types]. ## Workload identity -An identity used by a software workload (such as an application, service, script, or container) to authenticate and access other services and resources. In Azure AD, workload identities are apps, service principals, and managed identities. For more information, see [workload identity overview](workload-identities-overview.md). +An identity used by a software workload like an application, service, script, or container to authenticate and access other services and resources. In Azure AD, workload identities are apps, service principals, and managed identities. For more information, see [workload identity overview](workload-identities-overview.md). ## Workload identity federation @@ -244,9 +242,9 @@ Allows you to securely access Azure AD protected resources from external apps an ## Next steps -The [Microsoft identity platform Developer's Guide][AAD-Dev-Guide] is the landing page to use for all the Microsoft identity platform development-related topics, including an overview of [application integration][AAD-How-To-Integrate] and the basics of the [Microsoft identity platform authentication and supported authentication scenarios][AAD-Auth-Scenarios]. You can also find code samples & tutorials on how to get up and running quickly on [GitHub](https://github.com/azure-samples?utf8=%E2%9C%93&q=active%20directory&type=&language=). +Many of the terms in this glossary are related to the OAuth 2.0 and OpenID Connect protocols. Though you don't need to know how the protocols work "on the wire" to use the identity platform, knowing some protocol basics can help you more easily build and debug authentication and authorization in your apps: -Use the following comments section to provide feedback and help to refine and shape this content, including requests for new definitions or updating existing ones! +- [OAuth 2.0 and OpenID Connect (OIDC) in the Microsoft identity platform](active-directory-v2-protocols.md) @@ -278,4 +276,4 @@ Use the following comments section to provide feedback and help to refine and sh [OAuth2-Role-Def]: https://tools.ietf.org/html/rfc6749#page-6 [OpenIDConnect]: https://openid.net/specs/openid-connect-core-1_0.html [OpenIDConnect-AuthZ-Endpoint]: https://openid.net/specs/openid-connect-core-1_0.html#AuthorizationEndpoint -[OpenIDConnect-ID-Token]: https://openid.net/specs/openid-connect-core-1_0.html#IDToken \ No newline at end of file +[OpenIDConnect-ID-Token]: https://openid.net/specs/openid-connect-core-1_0.html#IDToken diff --git a/articles/active-directory/develop/developer-guide-conditional-access-authentication-context.md b/articles/active-directory/develop/developer-guide-conditional-access-authentication-context.md index c6d32e8191ce8..208132934127c 100644 --- a/articles/active-directory/develop/developer-guide-conditional-access-authentication-context.md +++ b/articles/active-directory/develop/developer-guide-conditional-access-authentication-context.md @@ -224,5 +224,5 @@ Do not use auth context where the app itself is going to be a target of Conditio - [Conditional Access authentication context](../conditional-access/concept-conditional-access-cloud-apps.md#authentication-context-preview) - [authenticationContextClassReference resource type - MS Graph](/graph/api/conditionalaccessroot-list-authenticationcontextclassreferences) - [Claims challenge, claims request, and client capabilities in the Microsoft identity platform](claims-challenge.md) -- [Using authentication context with Microsoft Information Protection and SharePoint](/microsoft-365/compliance/sensitivity-labels-teams-groups-sites#more-information-about-the-dependencies-for-the-authentication-context-option) +- [Using authentication context with Microsoft Purview Information Protection and SharePoint](/microsoft-365/compliance/sensitivity-labels-teams-groups-sites#more-information-about-the-dependencies-for-the-authentication-context-option) - [How to use Continuous Access Evaluation enabled APIs in your applications](app-resilience-continuous-access-evaluation.md) diff --git a/articles/active-directory/develop/developer-support-help-options.md b/articles/active-directory/develop/developer-support-help-options.md index 76078c9af8cac..5cb59be6bc880 100644 --- a/articles/active-directory/develop/developer-support-help-options.md +++ b/articles/active-directory/develop/developer-support-help-options.md @@ -1,5 +1,5 @@ --- -title: Support and help options for Microsoft identity platform developers | Azure +title: Support and help options for Microsoft identity platform developers description: Learn where to get help and find answers to your questions as you build identity and access management (IAM) solutions that integrate with Azure Active Directory (Azure AD) and other components of the Microsoft identity platform. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/howto-add-app-roles-in-azure-ad-apps.md b/articles/active-directory/develop/howto-add-app-roles-in-azure-ad-apps.md index 35cd4a60c918c..d1bee6b8cb3a1 100644 --- a/articles/active-directory/develop/howto-add-app-roles-in-azure-ad-apps.md +++ b/articles/active-directory/develop/howto-add-app-roles-in-azure-ad-apps.md @@ -1,6 +1,5 @@ --- -title: Add app roles and get them from a token | Azure -titleSuffix: Microsoft identity platform +title: Add app roles and get them from a token description: Learn how to add app roles to an application registered in Azure Active Directory, assign users and groups to these roles, and receive them in the 'roles' claim in the token. services: active-directory author: kalyankrishna1 diff --git a/articles/active-directory/develop/howto-add-branding-in-azure-ad-apps.md b/articles/active-directory/develop/howto-add-branding-in-azure-ad-apps.md index b26b62f9740a4..dcab75d651fce 100644 --- a/articles/active-directory/develop/howto-add-branding-in-azure-ad-apps.md +++ b/articles/active-directory/develop/howto-add-branding-in-azure-ad-apps.md @@ -1,6 +1,5 @@ --- title: Sign in with Microsoft branding guidelines | Azure AD -titleSuffix: Microsoft identity platform description: Learn about application branding guidelines for Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-add-terms-of-service-privacy-statement.md b/articles/active-directory/develop/howto-add-terms-of-service-privacy-statement.md index cca41c2208f18..ec4306fa2f058 100644 --- a/articles/active-directory/develop/howto-add-terms-of-service-privacy-statement.md +++ b/articles/active-directory/develop/howto-add-terms-of-service-privacy-statement.md @@ -1,5 +1,5 @@ --- -title: Terms of Service and privacy statement for apps | Azure +title: Terms of Service and privacy statement for apps description: Learn how you can configure the terms of service and privacy statement for apps registered to use Azure AD. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-authenticate-service-principal-powershell.md b/articles/active-directory/develop/howto-authenticate-service-principal-powershell.md index de308f7ca244c..41ccac7c2212d 100644 --- a/articles/active-directory/develop/howto-authenticate-service-principal-powershell.md +++ b/articles/active-directory/develop/howto-authenticate-service-principal-powershell.md @@ -1,6 +1,5 @@ --- -title: Create an Azure app identity (PowerShell) | Azure -titleSuffix: Microsoft identity platform +title: Create an Azure app identity (PowerShell) description: Describes how to use Azure PowerShell to create an Azure Active Directory application and service principal, and grant it access to resources through role-based access control. It shows how to authenticate application with a certificate. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-build-services-resilient-to-metadata-refresh.md b/articles/active-directory/develop/howto-build-services-resilient-to-metadata-refresh.md index a1b33618c202d..2bc6a8129bb7a 100644 --- a/articles/active-directory/develop/howto-build-services-resilient-to-metadata-refresh.md +++ b/articles/active-directory/develop/howto-build-services-resilient-to-metadata-refresh.md @@ -1,6 +1,5 @@ --- -title: "How to: Build services that are resilient to Azure AD's OpenID Connect metadata refresh | Azure" -titleSuffix: Microsoft identity platform +title: "How to: Build services that are resilient to Azure AD's OpenID Connect metadata refresh" description: Learn how to ensure that your web app or web api is resilient to Azure AD's OpenID Connect metadata refresh. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/howto-configure-publisher-domain.md b/articles/active-directory/develop/howto-configure-publisher-domain.md index e90ebbb9db40a..4030d4dc48bcd 100644 --- a/articles/active-directory/develop/howto-configure-publisher-domain.md +++ b/articles/active-directory/develop/howto-configure-publisher-domain.md @@ -1,6 +1,5 @@ --- -title: Configure an app's publisher domain | Azure -titleSuffix: Microsoft identity platform +title: Configure an app's publisher domain description: Learn how to configure an application's publisher domain to let users know where their information is being sent. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-convert-app-to-be-multi-tenant.md b/articles/active-directory/develop/howto-convert-app-to-be-multi-tenant.md index 2d406e21fc5d5..1a7ed22565640 100644 --- a/articles/active-directory/develop/howto-convert-app-to-be-multi-tenant.md +++ b/articles/active-directory/develop/howto-convert-app-to-be-multi-tenant.md @@ -1,6 +1,5 @@ --- title: Build apps that sign in Azure AD users -titleSuffix: Microsoft identity platform description: Shows how to build a multi-tenant application that can sign in a user from any Azure Active Directory tenant. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-create-self-signed-certificate.md b/articles/active-directory/develop/howto-create-self-signed-certificate.md index fff9bb4bcbeee..76968921fbd35 100644 --- a/articles/active-directory/develop/howto-create-self-signed-certificate.md +++ b/articles/active-directory/develop/howto-create-self-signed-certificate.md @@ -1,6 +1,5 @@ --- -title: Create a self-signed public certificate to authenticate your application | Azure -titleSuffix: Microsoft identity platform +title: Create a self-signed public certificate to authenticate your application description: Create a self-signed public certificate to authenticate your application. services: active-directory author: FaithOmbongi diff --git a/articles/active-directory/develop/howto-create-service-principal-portal.md b/articles/active-directory/develop/howto-create-service-principal-portal.md index 5a0d4c3c8d7ec..0086eab616392 100644 --- a/articles/active-directory/develop/howto-create-service-principal-portal.md +++ b/articles/active-directory/develop/howto-create-service-principal-portal.md @@ -1,6 +1,5 @@ --- title: Create an Azure AD app and service principal in the portal -titleSuffix: Microsoft identity platform description: Create a new Azure Active Directory app and service principal to manage access to resources with role-based access control in Azure Resource Manager. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-get-list-of-all-active-directory-auth-library-apps.md b/articles/active-directory/develop/howto-get-list-of-all-active-directory-auth-library-apps.md index 60b067382871e..f2bfecccc3815 100644 --- a/articles/active-directory/develop/howto-get-list-of-all-active-directory-auth-library-apps.md +++ b/articles/active-directory/develop/howto-get-list-of-all-active-directory-auth-library-apps.md @@ -1,6 +1,5 @@ --- -title: "How to: Get a complete list of all apps using Active Directory Authentication Library (ADAL) in your tenant | Azure" -titleSuffix: Microsoft identity platform +title: "How to: Get a complete list of all apps using Active Directory Authentication Library (ADAL) in your tenant" description: In this how-to guide, you get a complete list of all apps that are using ADAL in your tenant. services: active-directory author: SHERMANOUKO diff --git a/articles/active-directory/develop/howto-handle-samesite-cookie-changes-chrome-browser.md b/articles/active-directory/develop/howto-handle-samesite-cookie-changes-chrome-browser.md index 371bc2c3740a8..467e07f73a2af 100644 --- a/articles/active-directory/develop/howto-handle-samesite-cookie-changes-chrome-browser.md +++ b/articles/active-directory/develop/howto-handle-samesite-cookie-changes-chrome-browser.md @@ -1,6 +1,5 @@ --- -title: How to handle SameSite cookie changes in Chrome browser | Azure -titleSuffix: Microsoft identity platform +title: How to handle SameSite cookie changes in Chrome browser description: Learn how to handle SameSite cookie changes in Chrome browser. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/howto-implement-rbac-for-apps.md b/articles/active-directory/develop/howto-implement-rbac-for-apps.md index 3535628fb5671..eaef0b0149705 100644 --- a/articles/active-directory/develop/howto-implement-rbac-for-apps.md +++ b/articles/active-directory/develop/howto-implement-rbac-for-apps.md @@ -1,6 +1,5 @@ --- title: Implement role-based access control in apps -titleSuffix: Microsoft identity platform description: Learn how to implement role-based access control in your applications. services: active-directory author: Chrispine-Chiedo diff --git a/articles/active-directory/develop/howto-modify-supported-accounts.md b/articles/active-directory/develop/howto-modify-supported-accounts.md index 1fc634db70c49..475de4b52f5c1 100644 --- a/articles/active-directory/develop/howto-modify-supported-accounts.md +++ b/articles/active-directory/develop/howto-modify-supported-accounts.md @@ -1,6 +1,5 @@ --- -title: "How to: Change the account types supported by an application | Azure" -titleSuffix: Microsoft identity platform +title: "How to: Change the account types supported by an application" description: In this how-to, you configure an application registered with the Microsoft identity platform to change who, or what accounts, can access the application. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-remove-app.md b/articles/active-directory/develop/howto-remove-app.md index 62780a24ed302..f93dfc8143a7a 100644 --- a/articles/active-directory/develop/howto-remove-app.md +++ b/articles/active-directory/develop/howto-remove-app.md @@ -1,6 +1,5 @@ --- -title: "How to: Remove a registered app from the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "How to: Remove a registered app from the Microsoft identity platform" description: In this how-to, you learn how to remove an application registered with the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/howto-restore-app.md b/articles/active-directory/develop/howto-restore-app.md index 543216ece2501..a970ab0ac2d7b 100644 --- a/articles/active-directory/develop/howto-restore-app.md +++ b/articles/active-directory/develop/howto-restore-app.md @@ -1,6 +1,5 @@ --- -title: "How to: Restore or remove a recently deleted application with the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "How to: Restore or remove a recently deleted application with the Microsoft identity platform" description: In this how-to, you learn how to restore or permanently delete a recently deleted application registered with the Microsoft identity platform. services: active-directory author: arcrowe diff --git a/articles/active-directory/develop/howto-restrict-your-app-to-a-set-of-users.md b/articles/active-directory/develop/howto-restrict-your-app-to-a-set-of-users.md index a5c71f738cfda..a27ab482ed777 100644 --- a/articles/active-directory/develop/howto-restrict-your-app-to-a-set-of-users.md +++ b/articles/active-directory/develop/howto-restrict-your-app-to-a-set-of-users.md @@ -1,6 +1,5 @@ --- -title: Restrict Azure AD app to a set of users | Azure -titleSuffix: Microsoft identity platform +title: Restrict Azure AD app to a set of users description: Learn how to restrict access to your apps registered in Azure AD to a selected set of users. services: active-directory author: kalyankrishna1 diff --git a/articles/active-directory/develop/howto-v2-keychain-objc.md b/articles/active-directory/develop/howto-v2-keychain-objc.md index b96a1d812901d..a9a6cd6920751 100644 --- a/articles/active-directory/develop/howto-v2-keychain-objc.md +++ b/articles/active-directory/develop/howto-v2-keychain-objc.md @@ -1,6 +1,5 @@ --- title: Configure keychain -titleSuffix: Microsoft identity platform description: Learn how to configure keychain so that your app can cache tokens in the keychain. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/id-tokens.md b/articles/active-directory/develop/id-tokens.md index 642ce686655ab..f742ee209f717 100644 --- a/articles/active-directory/develop/id-tokens.md +++ b/articles/active-directory/develop/id-tokens.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform ID tokens | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform ID tokens description: Learn how to use id_tokens emitted by the Azure AD v1.0 and Microsoft identity platform (v2.0) endpoints. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/identity-platform-integration-checklist.md b/articles/active-directory/develop/identity-platform-integration-checklist.md index d216812d35348..e52ec3673f25f 100644 --- a/articles/active-directory/develop/identity-platform-integration-checklist.md +++ b/articles/active-directory/develop/identity-platform-integration-checklist.md @@ -1,5 +1,5 @@ --- -title: Best practices for the Microsoft identity platform | Azure +title: Best practices for the Microsoft identity platform description: Learn about best practices, recommendations, and common oversights when integrating with the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/identity-videos.md b/articles/active-directory/develop/identity-videos.md index 80fbd63342188..7ed3ceffc7910 100644 --- a/articles/active-directory/develop/identity-videos.md +++ b/articles/active-directory/develop/identity-videos.md @@ -1,5 +1,5 @@ --- -title: Microsoft identity platform videos | Azure +title: Microsoft identity platform videos description: A list of videos about modern authentication and the Microsoft identity platform services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/console-app/quickstart-java.md b/articles/active-directory/develop/includes/console-app/quickstart-java.md index 06d4df639244c..14a4367f3611a 100644 --- a/articles/active-directory/develop/includes/console-app/quickstart-java.md +++ b/articles/active-directory/develop/includes/console-app/quickstart-java.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Java daemon | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Java daemon" description: In this quickstart, you learn how a Java app can get an access token and call an API protected by Microsoft identity platform endpoint, using the app's own identity services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/console-app/quickstart-netcore.md b/articles/active-directory/develop/includes/console-app/quickstart-netcore.md index faf7a37384d6e..f1f501b0cf8f8 100644 --- a/articles/active-directory/develop/includes/console-app/quickstart-netcore.md +++ b/articles/active-directory/develop/includes/console-app/quickstart-netcore.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Get token & call Microsoft Graph in a console app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Get token & call Microsoft Graph in a console app" description: In this quickstart, you learn how a .NET Core sample app can use the client credentials flow to get a token and call Microsoft Graph. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/includes/console-app/quickstart-nodejs.md b/articles/active-directory/develop/includes/console-app/quickstart-nodejs.md index 182a61f351684..4a990ae84b2ee 100644 --- a/articles/active-directory/develop/includes/console-app/quickstart-nodejs.md +++ b/articles/active-directory/develop/includes/console-app/quickstart-nodejs.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Node.js console app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Node.js console app" description: In this quickstart, you download and run a code sample that shows how a Node.js console application can get an access token and call an API protected by a Microsoft identity platform endpoint, using the app's own identity services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/console-app/quickstart-python.md b/articles/active-directory/develop/includes/console-app/quickstart-python.md index 896b71e253932..74b73e8a9026b 100644 --- a/articles/active-directory/develop/includes/console-app/quickstart-python.md +++ b/articles/active-directory/develop/includes/console-app/quickstart-python.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Python daemon | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Python daemon" description: In this quickstart, you learn how a Python process can get an access token and call an API protected by Microsoft identity platform, using the app's own identity services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/includes/desktop-app/quickstart-nodejs-electron.md b/articles/active-directory/develop/includes/desktop-app/quickstart-nodejs-electron.md index aecbb91404d8b..c0b0cd9bfea7b 100644 --- a/articles/active-directory/develop/includes/desktop-app/quickstart-nodejs-electron.md +++ b/articles/active-directory/develop/includes/desktop-app/quickstart-nodejs-electron.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Node.js desktop app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Node.js desktop app" description: In this quickstart, you learn how a Node.js Electron desktop application can sign-in users and get an access token to call an API protected by a Microsoft identity platform endpoint services: active-directory author: mmacy @@ -39,7 +38,7 @@ To register your application and add the app's registration information to your 1. Select **Register** to create the application. 1. Under **Manage**, select **Authentication**. 1. Select **Add a platform** > **Mobile and desktop applications**. -1. In the **Redirect URIs** section, enter `msal://redirect`. +1. In the **Redirect URIs** section, enter the redirect URI suggested by the app registration portal, e.g. `msalfa29b4c9-7675-4b61-8a0a-bf7b2b4fda91://auth`. 1. Select **Configure**. #### Step 2: Download the Electron sample project @@ -49,39 +48,80 @@ To register your application and add the app's registration information to your #### Step 3: Configure the Electron sample project -1. Extract the zip file to a local folder close to the root of the disk, for example, *C:/Azure-Samples*. -1. Edit *.env* and replace the values of the fields `TENANT_ID` and `CLIENT_ID` with the following snippet: +*Extract the project, open the *ms-identity-JavaScript-nodejs-desktop-main* folder, and then open *.authConfig.js* file. Replace the value as follows: + +| Variable | Description | Example(s) | +|-----------|--------------|------------| +| `Enter_the_Cloud_Instance_Id_Here` | The Azure cloud instance in which your application is registered | `https://login.microsoftonline.com/` (include the trailing forward-slash)| +| `Enter_the_Tenant_Id_Here` | Tenant ID or Primary domain | `contoso.microsoft.com` or `cbe899ec-5f5c-4efe-b7a0-599505d3d54f` | +| `Enter_the_Application_Id_Here` | Client ID of the application you registered | `fa29b4c9-7675-4b61-8a0a-bf7b2b4fda91` | +| `Enter_the_Redirect_Uri_Here` | Redirect Uri of the application you registered | `msalfa29b4c9-7675-4b61-8a0a-bf7b2b4fda91://auth` | +| `Enter_the_Graph_Endpoint_Here` | The Microsoft Graph API cloud instance that your app will call | `https://graph.microsoft.com/` (include the trailing forward-slash)| + +Your file should look similar to below: + + ```javascript + const AAD_ENDPOINT_HOST = "https://login.microsoftonline.com/"; // include the trailing slash + const REDIRECT_URI = "msalfa29b4c9-7675-4b61-8a0a-bf7b2b4fda91://auth"; + const msalConfig = { + auth: { + clientId: "fa29b4c9-7675-4b61-8a0a-bf7b2b4fda91", + authority: `${AAD_ENDPOINT_HOST}/cbe899ec-5f5c-4efe-b7a0-599505d3d54f`, + }, + system: { + loggerOptions: { + loggerCallback(loglevel, message, containsPii) { + console.log(message); + }, + piiLoggingEnabled: false, + logLevel: LogLevel.Verbose, + } + } + } + + const GRAPH_ENDPOINT_HOST = "https://graph.microsoft.com/"; // include the trailing slash + const protectedResources = { + graphMe: { + endpoint: `${GRAPH_ENDPOINT_HOST}v1.0/me`, + scopes: ["User.Read"], + }, + graphMessages: { + endpoint: `${GRAPH_ENDPOINT_HOST}v1.0/me/messages`, + scopes: ["Mail.Read"], + } + }; + + module.exports = { + msalConfig: msalConfig, + protectedResources: protectedResources, + }; ``` - "TENANT_ID": "Enter_the_Tenant_Id_Here", - "CLIENT_ID": "Enter_the_Application_Id_Here" - ``` - Where: - - `Enter_the_Application_Id_Here` - is the **Application (client) ID** for the application you registered. - - `Enter_the_Tenant_Id_Here` - replace this value with the **Tenant Id** or **Tenant name** (for example, contoso.microsoft.com) - -> [!TIP] -> To find the values of **Application (client) ID**, **Directory (tenant) ID**, go to the app's **Overview** page in the Azure portal. #### Step 4: Run the application -You'll need to install the dependencies of this sample once: +1. You'll need to install the dependencies of this sample once: -```console -npm install -``` + ```console + cd ms-identity-javascript-nodejs-desktop-main + npm install + ``` -Then, run the application via command prompt or console: +1. Then, run the application via command prompt or console: -```console -npm start -``` + ```console + npm start + ``` + +1. Select **Sign in** to start the sign-in process. + + The first time you sign in, you're prompted to provide your consent to allow the application to sign you in and access your profile. After you're signed in successfully, you'll be redirected back to the application. -You should see application's UI with a **Sign in** button. +## More information -## About the code +### How the sample works -Below, some of the important aspects of the sample application are discussed. +When a user selects the **Sign In** button for the first time, get `getTokenInteractive` method of *AuthProvider.js* is called. This method redirects the user to sign-in with the *Microsoft identity platform endpoint* and validate the user's credentials, and then obtains an **authorization code**. This code is then exchanged for an access token using the `acquireTokenByCode` method of MSAL Node. ### MSAL Node @@ -92,124 +132,6 @@ You can install MSAL Node by running the following npm command. ```console npm install @azure/msal-node --save ``` - -### MSAL initialization - -You can add the reference for MSAL Node by adding the following code: - -```javascript -const { PublicClientApplication } = require('@azure/msal-node'); -``` - -Then, initialize MSAL using the following code: - -```javascript -const MSAL_CONFIG = { - auth: { - clientId: "Enter_the_Application_Id_Here", - authority: "https://login.microsoftonline.com/Enter_the_Tenant_Id_Here", - }, -}; - -const pca = new PublicClientApplication(MSAL_CONFIG); -``` - -> | Where: |Description | -> |---------|---------| -> | `clientId` | Is the **Application (client) ID** for the application registered in the Azure portal. You can find this value in the app's **Overview** page in the Azure portal. | -> | `authority` | The STS endpoint for user to authenticate. Usually `https://login.microsoftonline.com/{tenant}` for public cloud, where {tenant} is the name of your tenant or your tenant Id.| - -### Requesting tokens - -In the first leg of authorization code flow with PKCE, prepare and send an authorization code request with the appropriate parameters. Then, in the second leg of the flow, listen for the authorization code response. Once the code is obtained, exchange it to obtain a token. - -```javascript -// The redirect URI you setup during app registration with a custom file protocol "msal" -const redirectUri = "msal://redirect"; - -const cryptoProvider = new CryptoProvider(); - -const pkceCodes = { - challengeMethod: "S256", // Use SHA256 Algorithm - verifier: "", // Generate a code verifier for the Auth Code Request first - challenge: "" // Generate a code challenge from the previously generated code verifier -}; - -/** - * Starts an interactive token request - * @param {object} authWindow: Electron window object - * @param {object} tokenRequest: token request object with scopes - */ -async function getTokenInteractive(authWindow, tokenRequest) { - - /** - * Proof Key for Code Exchange (PKCE) Setup - * - * MSAL enables PKCE in the Authorization Code Grant Flow by including the codeChallenge and codeChallengeMethod - * parameters in the request passed into getAuthCodeUrl() API, as well as the codeVerifier parameter in the - * second leg (acquireTokenByCode() API). - */ - - const {verifier, challenge} = await cryptoProvider.generatePkceCodes(); - - pkceCodes.verifier = verifier; - pkceCodes.challenge = challenge; - - const authCodeUrlParams = { - redirectUri: redirectUri - scopes: tokenRequest.scopes, - codeChallenge: pkceCodes.challenge, // PKCE Code Challenge - codeChallengeMethod: pkceCodes.challengeMethod // PKCE Code Challenge Method - }; - - const authCodeUrl = await pca.getAuthCodeUrl(authCodeUrlParams); - - // register the custom file protocol in redirect URI - protocol.registerFileProtocol(redirectUri.split(":")[0], (req, callback) => { - const requestUrl = url.parse(req.url, true); - callback(path.normalize(`${__dirname}/${requestUrl.path}`)); - }); - - const authCode = await listenForAuthCode(authCodeUrl, authWindow); // see below - - const authResponse = await pca.acquireTokenByCode({ - redirectUri: redirectUri, - scopes: tokenRequest.scopes, - code: authCode, - codeVerifier: pkceCodes.verifier // PKCE Code Verifier - }); - - return authResponse; -} - -/** - * Listens for auth code response from Azure AD - * @param {string} navigateUrl: URL where auth code response is parsed - * @param {object} authWindow: Electron window object - */ -async function listenForAuthCode(navigateUrl, authWindow) { - - authWindow.loadURL(navigateUrl); - - return new Promise((resolve, reject) => { - authWindow.webContents.on('will-redirect', (event, responseUrl) => { - try { - const parsedUrl = new URL(responseUrl); - const authCode = parsedUrl.searchParams.get('code'); - resolve(authCode); - } catch (err) { - reject(err); - } - }); - }); -} -``` - -> |Where:| Description | -> |---------|---------| -> | `authWindow` | Current Electron window in process. | -> | `tokenRequest` | Contains the scopes being requested, such as `"User.Read"` for Microsoft Graph or `"api:///access_as_user"` for custom web APIs. | - ## Next steps To learn more about Electron desktop app development with MSAL Node, see the tutorial: diff --git a/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md b/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md index 93d43e5640d9e..a80a52b6ae5db 100644 --- a/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md +++ b/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users and call Microsoft Graph in a Universal Windows Platform app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users and call Microsoft Graph in a Universal Windows Platform app" description: In this quickstart, learn how a Universal Windows Platform (UWP) application can get an access token and call an API protected by Microsoft identity platform. services: active-directory author: jmprieur @@ -9,7 +8,7 @@ ms.service: active-directory ms.subservice: develop ms.topic: include ms.workload: identity -ms.date: 03/04/2022 +ms.date: 05/19/2022 ms.author: jmprieur ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "languages:UWP", mode-api #Customer intent: As an application developer, I want to learn how my Universal Windows Platform (UWP) application can get an access token and call an API that's protected by the Microsoft identity platform. @@ -134,7 +133,7 @@ Some situations require forcing users to interact with the Microsoft identity pl - When two factor authentication is required ```csharp -authResult = await App.PublicClientApp.AcquireTokenInteractive(scopes) +authResult = await PublicClientApp.AcquireTokenInteractive(scopes) .ExecuteAsync(); ``` @@ -145,9 +144,9 @@ The `scopes` parameter contains the scopes being requested, such as `{ "user.rea Use the `AcquireTokenSilent` method to obtain tokens to access protected resources after the initial `AcquireTokenInteractive` method. You don’t want to require the user to validate their credentials every time they need to access a resource. Most of the time you want token acquisitions and renewal without any user interaction ```csharp -var accounts = await App.PublicClientApp.GetAccountsAsync(); +var accounts = await PublicClientApp.GetAccountsAsync(); var firstAccount = accounts.FirstOrDefault(); -authResult = await App.PublicClientApp.AcquireTokenSilent(scopes, firstAccount) +authResult = await PublicClientApp.AcquireTokenSilent(scopes, firstAccount) .ExecuteAsync(); ``` diff --git a/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md b/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md index a6e406c3f0b0c..592e9feab9091 100644 --- a/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md +++ b/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md @@ -1,5 +1,5 @@ --- -title: "Quickstart: Sign in users and call Microsoft Graph in a Windows desktop app | Azure" +title: "Quickstart: Sign in users and call Microsoft Graph in a Windows desktop app" description: Learn how a Windows Presentation Foundation (WPF) application can get an access token and call an API protected by the Microsoft identity platform. services: active-directory author: jmprieur @@ -8,7 +8,7 @@ ms.service: active-directory ms.subservice: develop ms.topic: include ms.workload: identity -ms.date: 03/04/2022 +ms.date: 05/19/2022 ms.author: jmprieur ms.custom: aaddev, identityplatformtop40, mode-api #Customer intent: As an application developer, I want to learn how my Windows Presentation Foundation (WPF) application can get an access token and call an API that's protected by the Microsoft identity platform. @@ -134,7 +134,7 @@ Some situations require forcing users interact with the Microsoft identity platf - When two factor authentication is required ```csharp -authResult = await App.PublicClientApp.AcquireTokenInteractive(_scopes) +authResult = await app.AcquireTokenInteractive(_scopes) .ExecuteAsync(); ``` @@ -147,9 +147,9 @@ authResult = await App.PublicClientApp.AcquireTokenInteractive(_scopes) You don't want to require the user to validate their credentials every time they need to access a resource. Most of the time you want token acquisitions and renewal without any user interaction. You can use the `AcquireTokenSilent` method to obtain tokens to access protected resources after the initial `AcquireTokenInteractive` method: ```csharp -var accounts = await App.PublicClientApp.GetAccountsAsync(); +var accounts = await app.GetAccountsAsync(); var firstAccount = accounts.FirstOrDefault(); -authResult = await App.PublicClientApp.AcquireTokenSilent(scopes, firstAccount) +authResult = await app.AcquireTokenSilent(scopes, firstAccount) .ExecuteAsync(); ``` diff --git a/articles/active-directory/develop/includes/mobile-app/quickstart-android.md b/articles/active-directory/develop/includes/mobile-app/quickstart-android.md index 167767eba00b9..b446e8905dcdd 100644 --- a/articles/active-directory/develop/includes/mobile-app/quickstart-android.md +++ b/articles/active-directory/develop/includes/mobile-app/quickstart-android.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to an Android app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to an Android app" description: In this quickstart, learn how Android applications can call an API that requires access tokens issued by the Microsoft identity platform. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/mobile-app/quickstart-ios.md b/articles/active-directory/develop/includes/mobile-app/quickstart-ios.md index bf90794551262..332b4d42e6e66 100644 --- a/articles/active-directory/develop/includes/mobile-app/quickstart-ios.md +++ b/articles/active-directory/develop/includes/mobile-app/quickstart-ios.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to an iOS or macOS app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to an iOS or macOS app" description: In this quickstart, learn how an iOS or macOS app can sign in users, get an access token from the Microsoft identity platform, and call the Microsoft Graph API. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/single-page-app/quickstart-angular.md b/articles/active-directory/develop/includes/single-page-app/quickstart-angular.md index e56794febaed9..319c5f098b027 100644 --- a/articles/active-directory/develop/includes/single-page-app/quickstart-angular.md +++ b/articles/active-directory/develop/includes/single-page-app/quickstart-angular.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript Angular single-page apps (SPA) with auth code and call Microsoft Graph | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript Angular single-page apps (SPA) with auth code and call Microsoft Graph" description: In this quickstart, learn how a JavaScript Angular single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow and call Microsoft Graph. services: active-directory author: j-mantu diff --git a/articles/active-directory/develop/includes/single-page-app/quickstart-javascript.md b/articles/active-directory/develop/includes/single-page-app/quickstart-javascript.md index dd0a910dd59a5..ef9c5f546b348 100644 --- a/articles/active-directory/develop/includes/single-page-app/quickstart-javascript.md +++ b/articles/active-directory/develop/includes/single-page-app/quickstart-javascript.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript single-page apps (SPA) with auth code | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript single-page apps (SPA) with auth code" description: In this quickstart, learn how a JavaScript single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/single-page-app/quickstart-react.md b/articles/active-directory/develop/includes/single-page-app/quickstart-react.md index 25bf1bea8de1f..6af4df2fde295 100644 --- a/articles/active-directory/develop/includes/single-page-app/quickstart-react.md +++ b/articles/active-directory/develop/includes/single-page-app/quickstart-react.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript React single-page apps (SPA) with auth code and call Microsoft Graph | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript React single-page apps (SPA) with auth code and call Microsoft Graph" description: In this quickstart, learn how a JavaScript React single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow and call Microsoft Graph. services: active-directory author: j-mantu diff --git a/articles/active-directory/develop/includes/suggest-msal-from-protocols.md b/articles/active-directory/develop/includes/suggest-msal-from-protocols.md index a493fec534602..e2e74d2f90d52 100644 --- a/articles/active-directory/develop/includes/suggest-msal-from-protocols.md +++ b/articles/active-directory/develop/includes/suggest-msal-from-protocols.md @@ -1,5 +1,5 @@ --- -title: Prefer MSAL | Azure +title: Prefer MSAL description: Include file indicating that it's best to use MSAL. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/includes/try-in-postman-link.md b/articles/active-directory/develop/includes/try-in-postman-link.md index 912a09dee9968..98e92677e35ba 100644 --- a/articles/active-directory/develop/includes/try-in-postman-link.md +++ b/articles/active-directory/develop/includes/try-in-postman-link.md @@ -1,5 +1,5 @@ --- -title: Try the protocols in Postman | Azure +title: Try the protocols in Postman description: Standard link to use the Postman files services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/includes/web-api/quickstart-aspnet-core.md b/articles/active-directory/develop/includes/web-api/quickstart-aspnet-core.md index 3ea1e36e32f85..88a19bbbbbe34 100644 --- a/articles/active-directory/develop/includes/web-api/quickstart-aspnet-core.md +++ b/articles/active-directory/develop/includes/web-api/quickstart-aspnet-core.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Protect an ASP.NET Core web API with the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Protect an ASP.NET Core web API with the Microsoft identity platform" description: In this quickstart, you download and modify a code sample that demonstrates how to protect an ASP.NET Core web API by using the Microsoft identity platform for authorization. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/includes/web-api/quickstart-dotnet-native-aspnet.md b/articles/active-directory/develop/includes/web-api/quickstart-dotnet-native-aspnet.md index 122a39f2e57a8..78929d853106c 100644 --- a/articles/active-directory/develop/includes/web-api/quickstart-dotnet-native-aspnet.md +++ b/articles/active-directory/develop/includes/web-api/quickstart-dotnet-native-aspnet.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call an ASP.NET web API that is protected by the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call an ASP.NET web API that is protected by the Microsoft identity platform" description: In this quickstart, learn how to call an ASP.NET web API that's protected by the Microsoft identity platform from a Windows Desktop (WPF) application. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/includes/web-app/quickstart-aspnet-core.md b/articles/active-directory/develop/includes/web-app/quickstart-aspnet-core.md index b76296caeef24..5c9f4bdb6ab29 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-aspnet-core.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-aspnet-core.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: ASP.NET Core web app that signs in users and calls Microsoft Graph | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: ASP.NET Core web app that signs in users and calls Microsoft Graph" description: Learn how an ASP.NET Core web app leverages Microsoft.Identity.Web to implement Microsoft sign-in using OpenID Connect and call Microsoft Graph services: active-directory author: jmprieur @@ -33,7 +32,7 @@ See [How the sample works](#how-the-sample-works) for an illustration. 1. Search for and select **Azure Active Directory**. 1. Under **Manage**, select **App registrations** > **New registration**. 1. For **Name**, enter a name for your application. For example, enter **AspNetCore-Quickstart**. Users of your app will see this name, and you can change it later. -1. For **Redirect URI**, enter **https://localhost:44321/signin-oidc**. +1. Set the **Redirect URI** type to **Web** and value to `https://localhost:44321/signin-oidc`. 1. Select **Register**. 1. Under **Manage**, select **Authentication**. 1. For **Front-channel logout URL**, enter **https://localhost:44321/signout-oidc**. diff --git a/articles/active-directory/develop/includes/web-app/quickstart-aspnet.md b/articles/active-directory/develop/includes/web-app/quickstart-aspnet.md index 00dd1d7eb14ef..a7c7a0fd8c55a 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-aspnet.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-aspnet.md @@ -1,6 +1,5 @@ --- title: "Quickstart: ASP.NET web app that signs in users" -titleSuffix: Microsoft identity platform description: Download and run a code sample that shows how an ASP.NET web app can sign in Azure AD users. services: active-directory author: jmprieur @@ -46,7 +45,8 @@ If you want to manually configure your application and code sample, use the foll 1. Search for and select **Azure Active Directory**. 1. Under **Manage**, select **App registrations** > **New registration**. 1. For **Name**, enter a name for your application. For example, enter **ASPNET-Quickstart**. Users of your app will see this name, and you can change it later. -1. Add **https://localhost:44368/** in **Redirect URI**, and select **Register**. +1. Set the **Redirect URI** type to **Web** and value to `https://localhost:44368/`. +1. Select **Register**. 1. Under **Manage**, select **Authentication**. 1. In the **Implicit grant and hybrid flows** section, select **ID tokens**. 1. Select **Save**. diff --git a/articles/active-directory/develop/includes/web-app/quickstart-java.md b/articles/active-directory/develop/includes/web-app/quickstart-java.md index 50991e52e0362..a87acd087003b 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-java.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-java.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign-in with Microsoft to a Java web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign-in with Microsoft to a Java web app" description: In this quickstart, you'll learn how to add sign-in with Microsoft to a Java web application by using OpenID Connect. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md b/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md index 6ccdb4ffd9831..c5ded10b6f603 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add authentication to a Node.js web app with MSAL Node | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add authentication to a Node.js web app with MSAL Node" description: In this quickstart, you learn how to implement authentication with a Node.js web app and the Microsoft Authentication Library (MSAL) for Node.js. services: active-directory author: mmacy @@ -16,7 +15,7 @@ ms.custom: aaddev, scenarios:getting-started, languages:js, devx-track-js # Customer intent: As an application developer, I want to know how to set up authentication in a web application built using Node.js and MSAL Node. --- -In this quickstart, you download and run a code sample that demonstrates how a Node.js web app can sign in users by using the authorization code flow. The code sample also demonstrates how to get an access token to call Microsoft Graph API. +In this quickstart, you download and run a code sample that demonstrates how a Node.js web app can sign in users by using the authorization code flow. The code sample also demonstrates how to get an access token to call the Microsoft Graph API. See [How the sample works](#how-the-sample-works) for an illustration. @@ -37,8 +36,8 @@ This quickstart uses the Microsoft Authentication Library for Node.js (MSAL Node 1. If you have access to multiple tenants, use the **Directories + subscriptions** filter :::image type="icon" source="../../media/common/portal-directory-subscription-filter.png" border="false"::: in the top menu to switch to the tenant in which you want to register the application. 1. Under **Manage**, select **App registrations** > **New registration**. 1. Enter a **Name** for your application. Users of your app might see this name, and you can change it later. -1. Under **Supported account types**, select **Accounts in any organizational directory and personal Microsoft accounts**. -1. Set the **Redirect URI** value to `http://localhost:3000/redirect`. +1. Under **Supported account types**, select **Accounts in this organizational directory only**. +1. Set the **Redirect URI** type to **Web** and value to `http://localhost:3000/auth/redirect`. 1. Select **Register**. 1. On the app **Overview** page, note the **Application (client) ID** value for later use. 1. Under **Manage**, select **Certificates & secrets** > **Client secrets** > **New client secret**. Leave the description blank and default expiration, and then select **Add**. @@ -51,45 +50,34 @@ To run the project with a web server by using Node.js, [download the core projec #### Step 3: Configure your Node app -Extract the project, open the *ms-identity-node-main* folder, and then open the *index.js* file. - -Set the `clientID` value with the application (client) ID, and then set the `clientSecret` value with the client secret. - -```javascript -const config = { - auth: { - clientId: "Enter_the_Application_Id_Here", - authority: "https://login.microsoftonline.com/common", - clientSecret: "Enter_the_Client_Secret_Here" - }, -    system: { -        loggerOptions: { -            loggerCallback(loglevel, message, containsPii) { -                console.log(message); -            }, -         piiLoggingEnabled: false, -         logLevel: msal.LogLevel.Verbose, -        } -    } -}; -``` - +Extract the project, open the *ms-identity-node-main* folder, and then open the *.env* file under the *App* folder. Replace the values above as follows: -Modify the values in the `config` section: +| Variable | Description | Example(s) | +|-----------|--------------|------------| +| `Enter_the_Cloud_Instance_Id_Here` | The Azure cloud instance in which your application is registered | `https://login.microsoftonline.com/` (include the trailing forward-slash) | +| `Enter_the_Tenant_Info_here` | Tenant ID or Primary domain | `contoso.microsoft.com` or `cbe899ec-5f5c-4efe-b7a0-599505d3d54f` | +| `Enter_the_Application_Id_Here` | Client ID of the application you registered | `cbe899ec-5f5c-4efe-b7a0-599505d3d54f` | +| `Enter_the_Client_Secret_Here` | Client secret of the application you registered | `WxvhStRfDXoEiZQj1qCy` | +| `Enter_the_Graph_Endpoint_Here` | The Microsoft Graph API cloud instance that your app will call | `https://graph.microsoft.com/` (include the trailing forward-slash) | +| `Enter_the_Express_Session_Secret_Here` | A random string of characters used to sign the Express session cookie | `WxvhStRfDXoEiZQj1qCy` | -- `Enter_the_Application_Id_Here` is the application (client) ID for the application you registered. +Your file should look similar to below: - To find the application (client) ID, go to the app registration's **Overview** page in the Azure portal. -- `Enter_the_Client_Secret_Here` is the client secret for the application you registered. +```text +CLOUD_INSTANCE=https://login.microsoftonline.com/ +TENANT_ID=cbe899ec-5f5c-4efe-b7a0-599505d3d54f +CLIENT_ID=fa29b4c9-7675-4b61-8a0a-bf7b2b4fda91 +CLIENT_SECRET=WxvhStRfDXoEiZQj1qCy - To retrieve or generate a new client secret, under **Manage**, select **Certificates & secrets**. +REDIRECT_URI=http://localhost:3000/auth/redirect +POST_LOGOUT_REDIRECT_URI=http://localhost:3000 -The default `authority` value represents the main (global) Azure cloud: +GRAPH_API_ENDPOINT=https://graph.microsoft.com/ -```javascript -authority: "https://login.microsoftonline.com/common", +EXPRESS_SESSION_SECRET=6DP6v09eLiW7f1E65B8k ``` + #### Step 4: Run the project Run the project by using Node.js. @@ -97,21 +85,22 @@ Run the project by using Node.js. 1. To start the server, run the following commands from within the project directory: ```console + cd App npm install npm start ``` 1. Go to `http://localhost:3000/`. -1. Select **Sign In** to start the sign-in process. +1. Select **Sign in** to start the sign-in process. - The first time you sign in, you're prompted to provide your consent to allow the application to access your profile and sign you in. After you're signed in successfully, you will see a log message in the command line. + The first time you sign in, you're prompted to provide your consent to allow the application to sign you in and access your profile. After you're signed in successfully, you'll be redirected back to the application home page. ## More information ### How the sample works -The sample hosts a web server on localhost, port 3000. When a web browser accesses this site, the sample immediately redirects the user to a Microsoft authentication page. Because of this, the sample does not contain any HTML or display elements. Authentication success displays the message "OK". +The sample hosts a web server on localhost, port 3000. When a web browser accesses this address, the app renders the home page. Once the user selects **Sign in**, the app redirects the browser to Azure AD sign-in screen, via the URL generated by the MSAL Node library. After user consents, the browser redirects the user back to the application home page, along with an ID and access token. ### MSAL Node @@ -123,5 +112,6 @@ npm install @azure/msal-node ## Next steps +Learn more about the web app scenario that the Microsoft identity platform supports: > [!div class="nextstepaction"] -> [Adding Auth to an existing web app - GitHub code sample >](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/samples/msal-node-samples/auth-code) +> [Web app that signs in users scenario](../../scenario-web-app-sign-user-overview.md) diff --git a/articles/active-directory/develop/includes/web-app/quickstart-nodejs.md b/articles/active-directory/develop/includes/web-app/quickstart-nodejs.md index 1a9fc72d7160e..f03303d429f25 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-nodejs.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-nodejs.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add user sign-in to a Node.js web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add user sign-in to a Node.js web app" description: In this quickstart, you learn how to implement authentication in a Node.js web application using OpenID Connect. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/includes/web-app/quickstart-python.md b/articles/active-directory/develop/includes/web-app/quickstart-python.md index d32dde22f6c5d..7a7b0aa639861 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-python.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-python.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign-in with Microsoft to a Python web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign-in with Microsoft to a Python web app" description: In this quickstart, learn how a Python web app can sign in users, get an access token from the Microsoft identity platform, and call the Microsoft Graph API. services: active-directory author: abhidnya13 diff --git a/articles/active-directory/develop/mark-app-as-publisher-verified.md b/articles/active-directory/develop/mark-app-as-publisher-verified.md index 2bd2c4939f05f..b8a7e09000d69 100644 --- a/articles/active-directory/develop/mark-app-as-publisher-verified.md +++ b/articles/active-directory/develop/mark-app-as-publisher-verified.md @@ -1,5 +1,5 @@ --- -title: Mark an app as publisher verified - Microsoft identity platform | Azure +title: Mark an app as publisher verified description: Describes how to mark an app as publisher verified. When an application is marked as publisher verified, it means that the publisher has verified their identity using a Microsoft Partner Network account that has completed the verification process and has associated this MPN account with their application registration. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png new file mode 100644 index 0000000000000..366d9f849001d Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png new file mode 100644 index 0000000000000..7b101c7d24bf3 Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png new file mode 100644 index 0000000000000..278d3c2a6b8d9 Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png new file mode 100644 index 0000000000000..fc47e78a9ab3c Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png new file mode 100644 index 0000000000000..f7a25c5d8aa19 Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png differ diff --git a/articles/active-directory/develop/microsoft-graph-intro.md b/articles/active-directory/develop/microsoft-graph-intro.md index 4deffd4d31998..b485729658056 100644 --- a/articles/active-directory/develop/microsoft-graph-intro.md +++ b/articles/active-directory/develop/microsoft-graph-intro.md @@ -1,7 +1,7 @@ --- title: Microsoft Graph API description: The Microsoft Graph API is a RESTful web API that enables you to access Microsoft Cloud service resources. -author: davidmu1 +author: FaithOmbongi services: active-directory manager: CelesteDG @@ -10,7 +10,7 @@ ms.subservice: develop ms.topic: conceptual ms.workload: identity ms.date: 10/08/2021 -ms.author: davidmu +ms.author: ombongifaith ms.custom: aaddev --- diff --git a/articles/active-directory/develop/microsoft-identity-web.md b/articles/active-directory/develop/microsoft-identity-web.md index f52d9a1da79e4..38b6d58f54822 100644 --- a/articles/active-directory/develop/microsoft-identity-web.md +++ b/articles/active-directory/develop/microsoft-identity-web.md @@ -1,6 +1,5 @@ --- title: Microsoft Identity Web authentication library overview -titleSuffix: Microsoft identity platform description: Learn about Microsoft Identity Web, an authentication and authorization library for ASP.NET Core applications that integrate with Azure Active Directory, Azure AD B2C, and Microsoft Graph and other web APIs. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/migrate-adal-msal-java.md b/articles/active-directory/develop/migrate-adal-msal-java.md index 401a235926a76..fbc8de9b35f0d 100644 --- a/articles/active-directory/develop/migrate-adal-msal-java.md +++ b/articles/active-directory/develop/migrate-adal-msal-java.md @@ -1,6 +1,5 @@ --- -title: ADAL to MSAL migration guide (MSAL4j) | Azure -titleSuffix: Microsoft identity platform +title: ADAL to MSAL migration guide (MSAL4j) description: Learn how to migrate your Azure Active Directory Authentication Library (ADAL) Java app to the Microsoft Authentication Library (MSAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/migrate-android-adal-msal.md b/articles/active-directory/develop/migrate-android-adal-msal.md index 84effdc70b2ee..11bc3af565f0b 100644 --- a/articles/active-directory/develop/migrate-android-adal-msal.md +++ b/articles/active-directory/develop/migrate-android-adal-msal.md @@ -1,6 +1,5 @@ --- -title: ADAL to MSAL migration guide for Android | Azure -titleSuffix: Microsoft identity platform +title: ADAL to MSAL migration guide for Android description: Learn how to migrate your Azure Active Directory Authentication Library (ADAL) Android app to the Microsoft Authentication Library (MSAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/migrate-objc-adal-msal.md b/articles/active-directory/develop/migrate-objc-adal-msal.md index 973ab6749ebe2..088d81297f13b 100644 --- a/articles/active-directory/develop/migrate-objc-adal-msal.md +++ b/articles/active-directory/develop/migrate-objc-adal-msal.md @@ -1,6 +1,5 @@ --- -title: ADAL to MSAL migration guide (MSAL iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: ADAL to MSAL migration guide (MSAL iOS/macOS) description: Learn the differences between MSAL for iOS/macOS and the Azure AD Authentication Library for ObjectiveC (ADAL.ObjC) and how to migrate to MSAL for iOS/macOS. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/migrate-python-adal-msal.md b/articles/active-directory/develop/migrate-python-adal-msal.md index ada6c68919b5c..8bb1e01125ac8 100644 --- a/articles/active-directory/develop/migrate-python-adal-msal.md +++ b/articles/active-directory/develop/migrate-python-adal-msal.md @@ -1,6 +1,5 @@ --- -title: Python ADAL to MSAL migration guide | Azure -titleSuffix: Microsoft identity platform +title: Python ADAL to MSAL migration guide description: Learn how to migrate your Azure Active Directory Authentication Library (ADAL) Python app to the Microsoft Authentication Library (MSAL) for Python. services: active-directory author: rayluo diff --git a/articles/active-directory/develop/migrate-spa-implicit-to-auth-code.md b/articles/active-directory/develop/migrate-spa-implicit-to-auth-code.md index 7cd2e7207d188..d38bef57aabba 100644 --- a/articles/active-directory/develop/migrate-spa-implicit-to-auth-code.md +++ b/articles/active-directory/develop/migrate-spa-implicit-to-auth-code.md @@ -1,6 +1,5 @@ --- -title: Migrate JavaScript single-page app from implicit grant to authorization code flow | Azure -titleSuffix: Microsoft identity platform +title: Migrate JavaScript single-page app from implicit grant to authorization code flow description: How to update a JavaScript SPA using MSAL.js 1.x and the implicit grant flow to MSAL.js 2.x and the authorization code flow with PKCE and CORS support. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/mobile-app-quickstart-portal-android.md b/articles/active-directory/develop/mobile-app-quickstart-portal-android.md index edab8ca0d69b6..a6642a315edd9 100644 --- a/articles/active-directory/develop/mobile-app-quickstart-portal-android.md +++ b/articles/active-directory/develop/mobile-app-quickstart-portal-android.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to an Android app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to an Android app" description: In this quickstart, learn how Android applications can call an API that requires access tokens issued by the Microsoft identity platform. services: active-directory author: mmacy @@ -24,7 +23,7 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > > We apologize for the inconvenience and appreciate your patience while we work to get this resolved. -> [!div renderon="portal" class="sxs-lookup display-on-portal"] +> [!div renderon="portal" id="display-on-portal" class="sxs-lookup"] > # Quickstart: Sign in users and call the Microsoft Graph API from an Android app > > In this quickstart, you download and run a code sample that demonstrates how an Android application can sign in users and get an access token to call the Microsoft Graph API. @@ -42,7 +41,7 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > ### Step 1: Configure your application in the Azure portal > For the code sample in this quickstart to work, add a **Redirect URI** compatible with the Auth broker. > -> +> > > > [!div id="appconfigured" class="alert alert-info"] > > ![Already configured](media/quickstart-v2-android/green-check.png) Your application is configured with these attributes @@ -50,7 +49,9 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > ### Step 2: Download the project > > Run the project using Android Studio. -> +> +> > [!div class="nextstepaction"] +> > > > > ### Step 3: Your app is configured and ready to run @@ -484,4 +485,4 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > Move on to the Android tutorial in which you build an Android app that gets an access token from the Microsoft identity platform and uses it to call the Microsoft Graph API. > > > [!div class="nextstepaction"] -> > [Tutorial: Sign in users and call the Microsoft Graph from an Android application](tutorial-v2-android.md) \ No newline at end of file +> > [Tutorial: Sign in users and call the Microsoft Graph from an Android application](tutorial-v2-android.md) diff --git a/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md b/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md index 87e9bc2f40c53..24a5f4e718f68 100644 --- a/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md +++ b/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to an iOS or macOS app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to an iOS or macOS app" description: In this quickstart, learn how an iOS or macOS app can sign in users, get an access token from the Microsoft identity platform, and call the Microsoft Graph API. services: active-directory author: mmacy @@ -26,7 +25,7 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > > We apologize for the inconvenience and appreciate your patience while we work to get this resolved. -> [!div renderon="portal" class="sxs-lookup display-on-portal"] +> [!div renderon="portal" id="display-on-portal" class="sxs-lookup"] > # Quickstart: Sign in users and call the Microsoft Graph API from an iOS or macOS app > > In this quickstart, you download and run a code sample that demonstrates how a native iOS or macOS application can sign in users and get an access token to call the Microsoft Graph API. @@ -47,16 +46,18 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > #### Step 1: Configure your application > For the code sample in this quickstart to work, add a **Redirect URI** compatible with the Auth broker. > -> +> > > > [!div id="appconfigured" class="alert alert-info"] > > ![Already configured](media/quickstart-v2-ios/green-check.png) Your application is configured with these attributes > > #### Step 2: Download the sample project > -> -> -> +> > [!div class="nextstepaction"] +> > +> +> > [!div class="nextstepaction"] +> > > > #### Step 3: Install dependencies > @@ -238,4 +239,4 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > Move on to the step-by-step tutorial in which you build an iOS or macOS app that gets an access token from the Microsoft identity platform and uses it to call the Microsoft Graph API. > > > [!div class="nextstepaction"] -> > [Tutorial: Sign in users and call Microsoft Graph from an iOS or macOS app](tutorial-v2-ios.md) \ No newline at end of file +> > [Tutorial: Sign in users and call Microsoft Graph from an iOS or macOS app](tutorial-v2-ios.md) diff --git a/articles/active-directory/develop/mobile-app-quickstart.md b/articles/active-directory/develop/mobile-app-quickstart.md index 25073a37c7bb2..739a3432ed7ff 100644 --- a/articles/active-directory/develop/mobile-app-quickstart.md +++ b/articles/active-directory/develop/mobile-app-quickstart.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to a mobile app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to a mobile app" description: In this quickstart, learn how a mobile app can sign in users, get an access token from the Microsoft identity platform, and call the Microsoft Graph API. services: active-directory author: Dickson-Mwendia diff --git a/articles/active-directory/develop/mobile-sso-support-overview.md b/articles/active-directory/develop/mobile-sso-support-overview.md index f27091d00cef2..7fbea6ae683b9 100644 --- a/articles/active-directory/develop/mobile-sso-support-overview.md +++ b/articles/active-directory/develop/mobile-sso-support-overview.md @@ -1,6 +1,5 @@ --- -title: Support single sign-on and app protection policies in mobile apps you develop | Azure -titleSuffix: Microsoft identity platform +title: Support single sign-on and app protection policies in mobile apps you develop description: Explanation and overview of building mobile applications that support single sign-on and app protection policies using the Microsoft identity platform and integrating with Azure Active Directory. services: active-directory author: knicholasa diff --git a/articles/active-directory/develop/msal-acquire-cache-tokens.md b/articles/active-directory/develop/msal-acquire-cache-tokens.md index 02fc388122079..c102d029077dd 100644 --- a/articles/active-directory/develop/msal-acquire-cache-tokens.md +++ b/articles/active-directory/develop/msal-acquire-cache-tokens.md @@ -1,6 +1,5 @@ --- -title: Acquire and cache tokens with Microsoft Authentication Library (MSAL) | Azure -titleSuffix: Microsoft identity platform +title: Acquire and cache tokens with Microsoft Authentication Library (MSAL) description: Learn about acquiring and caching tokens using MSAL. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-android-b2c.md b/articles/active-directory/develop/msal-android-b2c.md index c6fa91e10f199..4be52c906cfc0 100644 --- a/articles/active-directory/develop/msal-android-b2c.md +++ b/articles/active-directory/develop/msal-android-b2c.md @@ -1,6 +1,5 @@ --- -title: Azure AD B2C (MSAL Android) | Azure -titleSuffix: Microsoft identity platform +title: Azure AD B2C (MSAL Android) description: Learn about specific considerations when using Azure AD B2C with the Microsoft Authentication Library for Android (MSAL.Android) services: active-directory author: iambmelt diff --git a/articles/active-directory/develop/msal-android-handling-exceptions.md b/articles/active-directory/develop/msal-android-handling-exceptions.md index 116c0a77d3d95..dd1b08cb3974a 100644 --- a/articles/active-directory/develop/msal-android-handling-exceptions.md +++ b/articles/active-directory/develop/msal-android-handling-exceptions.md @@ -1,6 +1,5 @@ --- -title: Errors and exceptions (MSAL Android) | Azure -titleSuffix: Microsoft identity platform +title: Errors and exceptions (MSAL Android) description: Learn how to handle errors and exceptions, Conditional Access, and claims challenges in MSAL Android applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-android-shared-devices.md b/articles/active-directory/develop/msal-android-shared-devices.md index 8b301fe3ec423..f368ed5c765e3 100644 --- a/articles/active-directory/develop/msal-android-shared-devices.md +++ b/articles/active-directory/develop/msal-android-shared-devices.md @@ -1,6 +1,5 @@ --- title: Shared device mode for Android devices -titleSuffix: Microsoft identity platform | Azure description: Learn how to enable shared device mode to allow frontline workers to share an Android device services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-android-single-sign-on.md b/articles/active-directory/develop/msal-android-single-sign-on.md index dac9a22556879..53c7104c9e4cb 100644 --- a/articles/active-directory/develop/msal-android-single-sign-on.md +++ b/articles/active-directory/develop/msal-android-single-sign-on.md @@ -1,6 +1,5 @@ --- -title: How to enable cross-app SSO on Android using MSAL | Azure -titleSuffix: Microsoft identity platform +title: How to enable cross-app SSO on Android using MSAL description: How to use the Microsoft Authentication Library (MSAL) for Android to enable single sign-on across your applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-authentication-flows.md b/articles/active-directory/develop/msal-authentication-flows.md index 23266605ee204..9cdd5e5c0b4d0 100644 --- a/articles/active-directory/develop/msal-authentication-flows.md +++ b/articles/active-directory/develop/msal-authentication-flows.md @@ -1,6 +1,5 @@ --- -title: Authentication flow support in the Microsoft Authentication Library (MSAL) | Azure -titleSuffix: Microsoft identity platform +title: Authentication flow support in the Microsoft Authentication Library (MSAL) description: Learn about the authorization grants and authentication flows supported by MSAL. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-b2c-overview.md b/articles/active-directory/develop/msal-b2c-overview.md index e0ec3b8d8fe8c..99ab9f9f40bd3 100644 --- a/articles/active-directory/develop/msal-b2c-overview.md +++ b/articles/active-directory/develop/msal-b2c-overview.md @@ -1,6 +1,5 @@ --- title: Use MSAL.js with Azure AD B2C -titleSuffix: Microsoft identity platform description: The Microsoft Authentication Library for JavaScript (MSAL.js) enables applications to work with Azure AD B2C and acquire tokens to call secured web APIs. These web APIs can be Microsoft Graph, other Microsoft APIs, web APIs from others, or your own web API. services: active-directory author: negoe diff --git a/articles/active-directory/develop/msal-client-application-configuration.md b/articles/active-directory/develop/msal-client-application-configuration.md index 5b0fbf4a786bd..7861a0522c29a 100644 --- a/articles/active-directory/develop/msal-client-application-configuration.md +++ b/articles/active-directory/develop/msal-client-application-configuration.md @@ -1,6 +1,5 @@ --- -title: Client application configuration (MSAL) | Azure -titleSuffix: Microsoft identity platform +title: Client application configuration (MSAL) description: Learn about configuration options for public client and confidential client applications using the Microsoft Authentication Library (MSAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-client-applications.md b/articles/active-directory/develop/msal-client-applications.md index f0b5b5fb1760c..1fe87af8400f6 100644 --- a/articles/active-directory/develop/msal-client-applications.md +++ b/articles/active-directory/develop/msal-client-applications.md @@ -1,6 +1,5 @@ --- -title: Public and confidential client apps (MSAL) | Azure -titleSuffix: Microsoft identity platform +title: Public and confidential client apps (MSAL) description: Learn about public client and confidential client applications in the Microsoft Authentication Library (MSAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-compare-msal-js-and-adal-js.md b/articles/active-directory/develop/msal-compare-msal-js-and-adal-js.md index 3853165322312..365a4511f47eb 100644 --- a/articles/active-directory/develop/msal-compare-msal-js-and-adal-js.md +++ b/articles/active-directory/develop/msal-compare-msal-js-and-adal-js.md @@ -1,6 +1,5 @@ --- -title: "Migrate your JavaScript application from ADAL.js to MSAL.js | Azure" -titleSuffix: Microsoft identity platform +title: "Migrate your JavaScript application from ADAL.js to MSAL.js" description: How to update your existing JavaScript application to use the Microsoft Authentication Library (MSAL) for authentication and authorization instead of the Active Directory Authentication Library (ADAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-configuration.md b/articles/active-directory/develop/msal-configuration.md index 1480cf626713c..306cd2335b226 100644 --- a/articles/active-directory/develop/msal-configuration.md +++ b/articles/active-directory/develop/msal-configuration.md @@ -1,6 +1,5 @@ --- -title: Android MSAL configuration file | Azure -titleSuffix: Microsoft identity platform +title: Android MSAL configuration file description: An overview of the Android Microsoft Authentication Library (MSAL) configuration file, which represents an application's configuration in Azure Active Directory. services: active-directory author: shoatman diff --git a/articles/active-directory/develop/msal-differences-ios-macos.md b/articles/active-directory/develop/msal-differences-ios-macos.md index e349aae040c05..18af77312825e 100644 --- a/articles/active-directory/develop/msal-differences-ios-macos.md +++ b/articles/active-directory/develop/msal-differences-ios-macos.md @@ -1,6 +1,5 @@ --- -title: MSAL for iOS & macOS differences | Azure -titleSuffix: Microsoft identity platform +title: MSAL for iOS & macOS differences description: Describes the Microsoft Authentication Library (MSAL) usage differences between iOS and macOS. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-error-handling-dotnet.md b/articles/active-directory/develop/msal-error-handling-dotnet.md index 59eb215ba2a3b..219184d01c78f 100644 --- a/articles/active-directory/develop/msal-error-handling-dotnet.md +++ b/articles/active-directory/develop/msal-error-handling-dotnet.md @@ -1,6 +1,5 @@ --- title: Handle errors and exceptions in MSAL.NET -titleSuffix: Microsoft identity platform description: Learn how to handle errors and exceptions, Conditional Access claims challenges, and retries in MSAL.NET. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-error-handling-ios.md b/articles/active-directory/develop/msal-error-handling-ios.md index 09c8db32f7704..192cc8f59f084 100644 --- a/articles/active-directory/develop/msal-error-handling-ios.md +++ b/articles/active-directory/develop/msal-error-handling-ios.md @@ -1,6 +1,5 @@ --- title: Handle errors and exceptions in MSAL for iOS/macOS -titleSuffix: Microsoft identity platform description: Learn how to handle errors and exceptions, Conditional Access claims challenges, and retries in MSAL for iOS/macOS applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-error-handling-java.md b/articles/active-directory/develop/msal-error-handling-java.md index f866d03bd3642..56476a017a7e4 100644 --- a/articles/active-directory/develop/msal-error-handling-java.md +++ b/articles/active-directory/develop/msal-error-handling-java.md @@ -1,6 +1,5 @@ --- title: Handle errors and exceptions in MSAL4J -titleSuffix: Microsoft identity platform description: Learn how to handle errors and exceptions, Conditional Access claims challenges, and retries in MSAL4J applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-error-handling-js.md b/articles/active-directory/develop/msal-error-handling-js.md index c3f320b1533d0..ae92640221232 100644 --- a/articles/active-directory/develop/msal-error-handling-js.md +++ b/articles/active-directory/develop/msal-error-handling-js.md @@ -1,6 +1,5 @@ --- title: Handle errors and exceptions in MSAL.js -titleSuffix: Microsoft identity platform description: Learn how to handle errors and exceptions, Conditional Access claims challenges, and retries in MSAL.js applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-error-handling-python.md b/articles/active-directory/develop/msal-error-handling-python.md index 5a6d84fe50a14..520ddd12b6ca3 100644 --- a/articles/active-directory/develop/msal-error-handling-python.md +++ b/articles/active-directory/develop/msal-error-handling-python.md @@ -1,6 +1,5 @@ --- title: Handle errors and exceptions in MSAL for Python -titleSuffix: Microsoft identity platform description: Learn how to handle errors and exceptions, Conditional Access claims challenges, and retries in MSAL for Python applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-ios-shared-devices.md b/articles/active-directory/develop/msal-ios-shared-devices.md index 7a40fb3bb550f..3b4e14cef517d 100644 --- a/articles/active-directory/develop/msal-ios-shared-devices.md +++ b/articles/active-directory/develop/msal-ios-shared-devices.md @@ -1,6 +1,5 @@ --- title: Shared device mode for iOS devices -titleSuffix: Microsoft identity platform | Azure description: Learn how to enable shared device mode to allow frontline workers to share an iOS device services: active-directory author: brandwe diff --git a/articles/active-directory/develop/msal-java-adfs-support.md b/articles/active-directory/develop/msal-java-adfs-support.md index 01bc576b5007d..6e3e599981747 100644 --- a/articles/active-directory/develop/msal-java-adfs-support.md +++ b/articles/active-directory/develop/msal-java-adfs-support.md @@ -1,6 +1,5 @@ --- title: AD FS support (MSAL for Java) -titleSuffix: Microsoft identity platform description: Learn about Active Directory Federation Services (AD FS) support in the Microsoft Authentication Library for Java (MSAL4j). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-java-get-remove-accounts-token-cache.md b/articles/active-directory/develop/msal-java-get-remove-accounts-token-cache.md index 182f6062199a5..546f42321b67d 100644 --- a/articles/active-directory/develop/msal-java-get-remove-accounts-token-cache.md +++ b/articles/active-directory/develop/msal-java-get-remove-accounts-token-cache.md @@ -1,6 +1,5 @@ --- -title: Get & remove accounts from the token cache (MSAL4j) | Azure -titleSuffix: Microsoft identity platform +title: Get & remove accounts from the token cache (MSAL4j) description: Learn how to view and remove accounts from the token cache using the Microsoft Authentication Library for Java. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-java-token-cache-serialization.md b/articles/active-directory/develop/msal-java-token-cache-serialization.md index e73902780334e..12158f3b1cdc0 100644 --- a/articles/active-directory/develop/msal-java-token-cache-serialization.md +++ b/articles/active-directory/develop/msal-java-token-cache-serialization.md @@ -1,6 +1,5 @@ --- title: Custom token cache serialization (MSAL4j) -titleSuffix: Microsoft identity platform description: Learn how to serialize the token cache for MSAL for Java services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-js-avoid-page-reloads.md b/articles/active-directory/develop/msal-js-avoid-page-reloads.md index 786ded358e9c0..8b3a5729f9cec 100644 --- a/articles/active-directory/develop/msal-js-avoid-page-reloads.md +++ b/articles/active-directory/develop/msal-js-avoid-page-reloads.md @@ -1,6 +1,5 @@ --- -title: Avoid page reloads (MSAL.js) | Azure -titleSuffix: Microsoft identity platform +title: Avoid page reloads (MSAL.js) description: Learn how to avoid page reloads when acquiring and renewing tokens silently using the Microsoft Authentication Library for JavaScript (MSAL.js). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-js-initializing-client-applications.md b/articles/active-directory/develop/msal-js-initializing-client-applications.md index 7bd6aa6c48368..d30a2dceb2a75 100644 --- a/articles/active-directory/develop/msal-js-initializing-client-applications.md +++ b/articles/active-directory/develop/msal-js-initializing-client-applications.md @@ -1,6 +1,5 @@ --- -title: Initialize MSAL.js client apps | Azure -titleSuffix: Microsoft identity platform +title: Initialize MSAL.js client apps description: Learn about initializing client applications using the Microsoft Authentication Library for JavaScript (MSAL.js). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-js-known-issues-ie-edge-browsers.md b/articles/active-directory/develop/msal-js-known-issues-ie-edge-browsers.md index cf0a752e880ac..d11ffcd6f2abb 100644 --- a/articles/active-directory/develop/msal-js-known-issues-ie-edge-browsers.md +++ b/articles/active-directory/develop/msal-js-known-issues-ie-edge-browsers.md @@ -1,6 +1,5 @@ --- -title: Issues on Internet Explorer & Microsoft Edge (MSAL.js) | Azure -titleSuffix: Microsoft identity platform +title: Issues on Internet Explorer & Microsoft Edge (MSAL.js) description: Learn about know issues when using the Microsoft Authentication Library for JavaScript (MSAL.js) with Internet Explorer and Microsoft Edge browsers. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-js-pass-custom-state-authentication-request.md b/articles/active-directory/develop/msal-js-pass-custom-state-authentication-request.md index 1241660c0dd50..6e01937743269 100644 --- a/articles/active-directory/develop/msal-js-pass-custom-state-authentication-request.md +++ b/articles/active-directory/develop/msal-js-pass-custom-state-authentication-request.md @@ -1,6 +1,5 @@ --- -title: Pass custom state in authentication requests (MSAL.js) | Azure -titleSuffix: Microsoft identity platform +title: Pass custom state in authentication requests (MSAL.js) description: Learn how to pass a custom state parameter value in authentication request using the Microsoft Authentication Library for JavaScript (MSAL.js). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-js-prompt-behavior.md b/articles/active-directory/develop/msal-js-prompt-behavior.md index 0825c5d90ad48..614ac7a1ce3b7 100644 --- a/articles/active-directory/develop/msal-js-prompt-behavior.md +++ b/articles/active-directory/develop/msal-js-prompt-behavior.md @@ -1,6 +1,5 @@ --- -title: Interactive request prompt behavior (MSAL.js) | Azure -titleSuffix: Microsoft identity platform +title: Interactive request prompt behavior (MSAL.js) description: Learn to customize prompt behavior in interactive calls using the Microsoft Authentication Library for JavaScript (MSAL.js). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-js-sso.md b/articles/active-directory/develop/msal-js-sso.md index 8ba97653b94ca..7dc4363516c62 100644 --- a/articles/active-directory/develop/msal-js-sso.md +++ b/articles/active-directory/develop/msal-js-sso.md @@ -1,6 +1,5 @@ --- -title: Single sign-on (MSAL.js) | Azure -titleSuffix: Microsoft identity platform +title: Single sign-on (MSAL.js) description: Learn about building single sign-on experiences using the Microsoft Authentication Library for JavaScript (MSAL.js). services: active-directory author: mmacy @@ -19,20 +18,18 @@ ms.custom: aaddev, has-adal-ref # Single sign-on with MSAL.js -Single sign-on (SSO) provides a more seamless experience by reducing the number of times your users are asked for their credentials. Users enter their credentials once, and the established session can be reused by other applications on the device without further prompting. +Single sign-on (SSO) provides a more seamless experience by reducing the number of times your users are asked for their credentials. Users enter their credentials once, and the established session can be reused by other applications on the device without further prompting. -Azure Active Directory (Azure AD) enables SSO by setting a session cookie when a user first authenticates. MSAL.js allows use of the session cookie for SSO between the browser tabs opened for one or several applications. +Azure Active Directory (Azure AD) enables SSO by setting a session cookie when a user authenticates for the first time. MSAL.js allows the usage of the session cookie for SSO between the browser tabs opened for one or several applications. -## SSO between browser tabs +## SSO between browser tabs for the same app -When a user has your application open in several tabs and signs in on one of them, they're signed into the same app open on the other tabs without being prompted. MSAL.js caches the ID token for the user in the browser `localStorage` and will sign the user in to the application on the other open tabs. - -By default, MSAL.js uses `sessionStorage`, which doesn't allow the session to be shared between tabs. To get SSO between tabs, make sure to set the `cacheLocation` in MSAL.js to `localStorage` as shown below. +When a user has your application open in several tabs and signs in on one of them, they can be signed into the same app open on the other tabs without being prompted. To do so, you'll need to set the *cacheLocation* in MSAL.js configuration object to `localStorage` as shown below. ```javascript const config = { auth: { - clientId: "abcd-ef12-gh34-ikkl-ashdjhlhsdg", + clientId: "1111-2222-3333-4444-55555555", }, cache: { cacheLocation: "localStorage", @@ -42,61 +39,65 @@ const config = { const msalInstance = new msal.PublicClientApplication(config); ``` -## SSO between apps - -When a user authenticates, a session cookie is set on the Azure AD domain in the browser. MSAL.js relies on this session cookie to provide SSO for the user between different applications. MSAL.js also caches the ID tokens and access tokens of the user in the browser storage per application domain. As a result, the SSO behavior varies for different cases: - -### Applications on the same domain - -When applications are hosted on the same domain, the user can sign into an app once and then get authenticated to the other apps without a prompt. MSAL.js uses the tokens cached for the user on the domain to provide SSO. - -### Applications on different domain - -When applications are hosted on different domains, the tokens cached on domain A cannot be accessed by MSAL.js in domain B. - -When a user signed in on domain A navigates to an application on domain B, they're typically redirected or prompted to sign in. Because Azure AD still has the user's session cookie, it signs in the user without prompting for credentials. +## SSO between different apps -If the user has multiple user accounts in a session with Azure AD, the user is prompted to pick an account to sign in with. +When a user authenticates, a session cookie is set on the Azure AD domain in the browser. MSAL.js relies on this session cookie to provide SSO for the user between different applications. MSAL.js also caches the ID tokens and access tokens of the user in the browser storage per application domain. -### Automatic account selection +MSAL.js offers the `ssoSilent` method to sign-in the user and obtain tokens without an interaction. However, if the user has multiple user accounts in a session with Azure AD, then the user is prompted to pick an account to sign in with. As such, there are two ways to achieve SSO using `ssoSilent` method. -When a user is signed in concurrently to multiple Azure AD accounts on the same device, you might find you have the need to bypass the account selection prompt. +### With user hint -**Using a session ID** +To improve performance and ensure that the authorization server will look for the correct account session. You can pass one of the following options in the request object of the `ssoSilent` method to obtain the token silently. -Use the session ID (SID) in silent authentication requests you make with `acquireTokenSilent` in MSAL.js. +- Session ID `sid` (which can be retrieved from `idTokenClaims` of an `account` object) +- `login_hint` (which can be retrieved from the `account` object username property or the `upn` claim in the ID token) +- `account` (which can be retrieved from using one the [account methods](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/login-user.md#account-apis)) -To use a SID, add `sid` as an [optional claim](active-directory-optional-claims.md) to your app's ID tokens. The `sid` claim allows an application to identify a user's Azure AD session independent of their account name or username. To learn how to add optional claims like `sid`, see [Provide optional claims to your app](active-directory-optional-claims.md). +#### Using a session ID -The SID is bound to the session cookie and won't cross browser contexts. You can use the SID only with `acquireTokenSilent`. +To use a session ID, add `sid` as an [optional claim](active-directory-optional-claims.md) to your app's ID tokens. The `sid` claim allows an application to identify a user's Azure AD session independent of their account name or username. To learn how to add optional claims like `sid`, see [Provide optional claims to your app](active-directory-optional-claims.md). Use the session ID (SID) in silent authentication requests you make with `ssoSilent` in MSAL.js. ```javascript -var request = { +const request = { scopes: ["user.read"], sid: sid, }; - msalInstance.acquireTokenSilent(request) - .then(function (response) { - const token = response.accessToken; - }) - .catch(function (error) { - //handle error - }); + try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} ``` -**Using a login hint** +#### Using a login hint To bypass the account selection prompt typically shown during interactive authentication requests (or for silent requests when you haven't configured the `sid` optional claim), provide a `loginHint`. In multi-tenant applications, also include a `domain_hint`. ```javascript -var request = { +const request = { scopes: ["user.read"], loginHint: preferred_username, extraQueryParameters: { domain_hint: "organizations" }, }; - msalInstance.loginRedirect(request); +try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} ``` Get the values for `loginHint` and `domain_hint` from the user's **ID token**: @@ -107,34 +108,83 @@ Get the values for `loginHint` and `domain_hint` from the user's **ID token**: For more information about login hint and domain hint, see [Microsoft identity platform and OAuth 2.0 authorization code flow](v2-oauth2-auth-code-flow.md). -## SSO without MSAL.js login +#### Using an account object -By design, MSAL.js requires that a login method is called to establish a user context before getting tokens for APIs. Since login methods are interactive, the user sees a prompt. +If you know the user account information, you can also retrieve the user account by using the `getAccountByUsername()` or `getAccountByHomeId()` methods: -There are certain cases in which applications have access to the authenticated user's context or ID token through authentication initiated in another application and want to use SSO to acquire tokens without first signing in through MSAL.js. +```javascript +const username = "test@contoso.com"; +const myAccount = msalInstance.getAccountByUsername(username); + +const request = { + scopes: ["User.Read"], + account: myAccount +}; -An example: A user is signed in to Microsoft account in a browser that hosts another JavaScript application running as an add-on or plugin, which requires a Microsoft account sign-in. +try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} +``` -The SSO experience in this scenario can be achieved as follows: +### Without user hint -Pass the `sid` if available (or `login_hint` and optionally `domain_hint`) as request parameters to the MSAL.js `acquireTokenSilent` call as follows: +You can attempt to use the `ssoSilent` method without passing any `account`, `sid` or `login_hint` as shown in the code below: ```javascript -var request = { - scopes: ["user.read"], - loginHint: preferred_username, - extraQueryParameters: { domain_hint: "organizations" }, +const request = { + scopes: ["User.Read"] }; -msalInstance.acquireTokenSilent(request) - .then(function (response) { - const token = response.accessToken; - }) - .catch(function (error) { - //handle error - }); +try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} ``` +However, there's a likelihood of silent sign-in errors if the application has multiple users in a single browser session or if the user has multiple accounts for that single browser session. You may see the following error in the case of multiple accounts: + +```txt +InteractionRequiredAuthError: interaction_required: AADSTS16000: Either multiple user identities are available for the current request or selected account is not supported for the scenario. +``` + +The error indicates that the server couldn't determine which account to sign into, and will require either one of the parameters above (`account`, `login_hint`, `sid`) or an interactive sign-in to choose the account. + +## Considerations when using `ssoSilent` + +### Redirect URI (reply URL) + +For better performance and to help avoid issues, set the `redirectUri` to a blank page or other page that doesn't use MSAL. + +- If your application users only popup and silent methods, set the `redirectUri` on the `PublicClientApplication` configuration object. +- If your application also uses redirect methods, set the `redirectUri` on a per-request basis. + +### Third-party cookies + +`ssoSilent` attempts to open a hidden iframe and reuse an existing session with Azure AD. This won't work in browsers that block third-party cookies such as safari, and will lead to an interaction error: + +```txt +InteractionRequiredAuthError: login_required: AADSTS50058: A silent sign-in request was sent but no user is signed in. The cookies used to represent the user's session were not sent in the request to Azure AD +``` + +To resolve the error, the user must create an interactive authentication request using the `loginPopup()` or `loginRedirect()`. + +Additionally, the request object is required when using the **silent** methods. If you already have the user's sign-in information, you can pass either the `loginHint` or `sid` optional parameters to sign-in a specific account. + ## SSO in ADAL.js to MSAL.js update MSAL.js brings feature parity with ADAL.js for Azure AD authentication scenarios. To make the migration from ADAL.js to MSAL.js easy and to avoid prompting your users to sign in again, the library reads the ID token representing user’s session in ADAL.js cache, and seamlessly signs in the user in MSAL.js. @@ -145,7 +195,7 @@ To take advantage of the SSO behavior when updating from ADAL.js, you'll need to // In ADAL.js window.config = { - clientId: "g075edef-0efa-453b-997b-de1337c29185", + clientId: "1111-2222-3333-4444-55555555", cacheLocation: "localStorage", }; @@ -154,7 +204,7 @@ var authContext = new AuthenticationContext(config); // In latest MSAL.js version const config = { auth: { - clientId: "abcd-ef12-gh34-ikkl-ashdjhlhsdg", + clientId: "1111-2222-3333-4444-55555555", }, cache: { cacheLocation: "localStorage", @@ -170,5 +220,6 @@ Once the `cacheLocation` is configured, MSAL.js can read the cached state of the For more information about SSO, see: -- [Single Sign-On SAML protocol](single-sign-on-saml-protocol.md) +- [Single Sign-on SAML protocol](single-sign-on-saml-protocol.md) +- [Optional token claims](active-directory-optional-claims.md) - [Configurable token lifetimes](active-directory-configurable-token-lifetimes.md) diff --git a/articles/active-directory/develop/msal-js-use-ie-browser.md b/articles/active-directory/develop/msal-js-use-ie-browser.md index c59a998c4ce4c..1203cecbc6966 100644 --- a/articles/active-directory/develop/msal-js-use-ie-browser.md +++ b/articles/active-directory/develop/msal-js-use-ie-browser.md @@ -1,6 +1,5 @@ --- -title: Issues on Internet Explorer (MSAL.js) | Azure -titleSuffix: Microsoft identity platform +title: Issues on Internet Explorer (MSAL.js) description: Use the Microsoft Authentication Library for JavaScript (MSAL.js) with Internet Explorer browser. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-logging-android.md b/articles/active-directory/develop/msal-logging-android.md index 5b94cc351608a..8329df9bae997 100644 --- a/articles/active-directory/develop/msal-logging-android.md +++ b/articles/active-directory/develop/msal-logging-android.md @@ -1,6 +1,5 @@ --- title: Logging errors and exceptions in MSAL for Android. -titleSuffix: Microsoft identity platform description: Learn how to log errors and exceptions in MSAL for Android. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-logging-dotnet.md b/articles/active-directory/develop/msal-logging-dotnet.md index a2e1bd53fb6e6..10e8dd5b368d1 100644 --- a/articles/active-directory/develop/msal-logging-dotnet.md +++ b/articles/active-directory/develop/msal-logging-dotnet.md @@ -1,6 +1,5 @@ --- title: Logging errors and exceptions in MSAL.NET -titleSuffix: Microsoft identity platform description: Learn how to log errors and exceptions in MSAL.NET services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-logging-ios.md b/articles/active-directory/develop/msal-logging-ios.md index 0796a8d276162..dc5ed047c7ffd 100644 --- a/articles/active-directory/develop/msal-logging-ios.md +++ b/articles/active-directory/develop/msal-logging-ios.md @@ -1,6 +1,5 @@ --- title: Logging errors and exceptions in MSAL for iOS/macOS -titleSuffix: Microsoft identity platform description: Learn how to log errors and exceptions in MSAL for iOS/macOS services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-logging-java.md b/articles/active-directory/develop/msal-logging-java.md index 7ceb5cd3d266d..6ce9daf3641b1 100644 --- a/articles/active-directory/develop/msal-logging-java.md +++ b/articles/active-directory/develop/msal-logging-java.md @@ -1,6 +1,5 @@ --- title: Logging errors and exceptions in MSAL for Java -titleSuffix: Microsoft identity platform description: Learn how to log errors and exceptions in MSAL for Java services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-logging-js.md b/articles/active-directory/develop/msal-logging-js.md index 96fd7d095dd89..6315b9ddb26c3 100644 --- a/articles/active-directory/develop/msal-logging-js.md +++ b/articles/active-directory/develop/msal-logging-js.md @@ -1,6 +1,5 @@ --- title: Logging errors and exceptions in MSAL.js -titleSuffix: Microsoft identity platform description: Learn how to log errors and exceptions in MSAL.js services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-logging-python.md b/articles/active-directory/develop/msal-logging-python.md index fd1d74f7aacfe..5d76e416dc934 100644 --- a/articles/active-directory/develop/msal-logging-python.md +++ b/articles/active-directory/develop/msal-logging-python.md @@ -1,6 +1,5 @@ --- title: Logging errors and exceptions in MSAL for Python -titleSuffix: Microsoft identity platform description: Learn how to log errors and exceptions in MSAL for Python services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-migration.md b/articles/active-directory/develop/msal-migration.md index 16f87af9211df..c1b477bcf9219 100644 --- a/articles/active-directory/develop/msal-migration.md +++ b/articles/active-directory/develop/msal-migration.md @@ -1,6 +1,5 @@ --- title: Migrate to the Microsoft Authentication Library (MSAL) -titleSuffix: Microsoft identity platform description: Learn about the differences between the Microsoft Authentication Library (MSAL) and Azure AD Authentication Library (ADAL) and how to migrate to MSAL. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/msal-national-cloud.md b/articles/active-directory/develop/msal-national-cloud.md index acab5cd7a30b9..5a81df8bc5144 100644 --- a/articles/active-directory/develop/msal-national-cloud.md +++ b/articles/active-directory/develop/msal-national-cloud.md @@ -1,6 +1,5 @@ --- -title: Use MSAL in a national cloud app | Azure -titleSuffix: Microsoft identity platform +title: Use MSAL in a national cloud app description: The Microsoft Authentication Library (MSAL) enables application developers to acquire tokens in order to call secured web APIs. These web APIs can be Microsoft Graph, other Microsoft APIs, partner web APIs, or your own web API. MSAL supports multiple application architectures and platforms. services: active-directory author: negoe diff --git a/articles/active-directory/develop/msal-net-aad-b2c-considerations.md b/articles/active-directory/develop/msal-net-aad-b2c-considerations.md index b3a43be364f60..82307a6a9f767 100644 --- a/articles/active-directory/develop/msal-net-aad-b2c-considerations.md +++ b/articles/active-directory/develop/msal-net-aad-b2c-considerations.md @@ -1,6 +1,5 @@ --- title: Azure AD B2C and MSAL.NET -titleSuffix: Microsoft identity platform description: Considerations when using Azure AD B2C with the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-acquire-token-silently.md b/articles/active-directory/develop/msal-net-acquire-token-silently.md index 253950cb71b94..3e58392886b8b 100644 --- a/articles/active-directory/develop/msal-net-acquire-token-silently.md +++ b/articles/active-directory/develop/msal-net-acquire-token-silently.md @@ -1,6 +1,5 @@ --- title: Acquire a token from the cache (MSAL.NET) -titleSuffix: Microsoft identity platform description: Learn how to acquire an access token silently (from the token cache) using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy @@ -13,7 +12,7 @@ ms.workload: identity ms.date: 07/16/2019 ms.author: marsma ms.reviewer: saeeda -ms.custom: "devx-track-csharp, aaddev" +ms.custom: devx-track-csharp, aaddev #Customer intent: As an application developer, I want to learn how how to use the AcquireTokenSilent method so I can acquire tokens from the cache. --- @@ -21,7 +20,7 @@ ms.custom: "devx-track-csharp, aaddev" When you acquire an access token using the Microsoft Authentication Library for .NET (MSAL.NET), the token is cached. When the application needs a token, it should first call the `AcquireTokenSilent` method to verify if an acceptable token is in the cache. In many cases, it's possible to acquire another token with more scopes based on a token in the cache. It's also possible to refresh a token when it's getting close to expiration (as the token cache also contains a refresh token). -For authentication flows that require a user interaction, MSAL caches the access, refresh, and ID tokens, as well as the `IAccount` object, which represents information about a single account. Learn more about [IAccount](/dotnet/api/microsoft.identity.client.iaccount?view=azure-dotnet). For application flows, such as [client credentials](msal-authentication-flows.md#client-credentials), only access tokens are cached, because the `IAccount` object and ID token require a user, and the refresh token is not applicable. +For authentication flows that require a user interaction, MSAL caches the access, refresh, and ID tokens, as well as the `IAccount` object, which represents information about a single account. Learn more about [IAccount](/dotnet/api/microsoft.identity.client.iaccount?view=azure-dotnet&preserve-view=true). For application flows, such as [client credentials](msal-authentication-flows.md#client-credentials), only access tokens are cached, because the `IAccount` object and ID token require a user, and the refresh token is not applicable. The recommended pattern is to call the `AcquireTokenSilent` method first. If `AcquireTokenSilent` fails, then acquire a token using other methods. diff --git a/articles/active-directory/develop/msal-net-adfs-support.md b/articles/active-directory/develop/msal-net-adfs-support.md index a3576bd6eceae..8326c43499397 100644 --- a/articles/active-directory/develop/msal-net-adfs-support.md +++ b/articles/active-directory/develop/msal-net-adfs-support.md @@ -1,6 +1,5 @@ --- -title: AD FS support in MSAL.NET | Azure -titleSuffix: Microsoft identity platform +title: AD FS support in MSAL.NET description: Learn about Active Directory Federation Services (AD FS) support in the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-clear-token-cache.md b/articles/active-directory/develop/msal-net-clear-token-cache.md index f6094a7f3f36f..2f2af49b7de1a 100644 --- a/articles/active-directory/develop/msal-net-clear-token-cache.md +++ b/articles/active-directory/develop/msal-net-clear-token-cache.md @@ -1,6 +1,5 @@ --- -title: Clear the token cache (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Clear the token cache (MSAL.NET) description: Learn how to clear the token cache using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-client-assertions.md b/articles/active-directory/develop/msal-net-client-assertions.md index 7f7ab24c31ab0..ecd6a8e60f9ea 100644 --- a/articles/active-directory/develop/msal-net-client-assertions.md +++ b/articles/active-directory/develop/msal-net-client-assertions.md @@ -1,6 +1,5 @@ --- -title: Client assertions (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Client assertions (MSAL.NET) description: Learn about signed client assertions support for confidential client applications in the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/msal-net-differences-adal-net.md b/articles/active-directory/develop/msal-net-differences-adal-net.md index 25b7577b2bfd7..5896775f5334f 100644 --- a/articles/active-directory/develop/msal-net-differences-adal-net.md +++ b/articles/active-directory/develop/msal-net-differences-adal-net.md @@ -1,6 +1,5 @@ --- -title: Differences between ADAL.NET and MSAL.NET apps | Azure -titleSuffix: Microsoft identity platform +title: Differences between ADAL.NET and MSAL.NET apps description: Learn about the differences between the Microsoft Authentication Library for .NET (MSAL.NET) and Azure AD Authentication Library for .NET (ADAL.NET). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/msal-net-initializing-client-applications.md b/articles/active-directory/develop/msal-net-initializing-client-applications.md index 82ff49a4e0aec..947e7b76ddd28 100644 --- a/articles/active-directory/develop/msal-net-initializing-client-applications.md +++ b/articles/active-directory/develop/msal-net-initializing-client-applications.md @@ -1,6 +1,5 @@ --- -title: Initialize MSAL.NET client applications | Azure -titleSuffix: Microsoft identity platform +title: Initialize MSAL.NET client applications description: Learn about initializing public client and confidential client applications using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-instantiate-confidential-client-config-options.md b/articles/active-directory/develop/msal-net-instantiate-confidential-client-config-options.md index 234ddda0db196..98752a0111586 100644 --- a/articles/active-directory/develop/msal-net-instantiate-confidential-client-config-options.md +++ b/articles/active-directory/develop/msal-net-instantiate-confidential-client-config-options.md @@ -1,6 +1,5 @@ --- -title: Instantiate a confidential client app (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Instantiate a confidential client app (MSAL.NET) description: Learn how to instantiate a confidential client application with configuration options using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-instantiate-public-client-config-options.md b/articles/active-directory/develop/msal-net-instantiate-public-client-config-options.md index a597811428019..65092f6555c8d 100644 --- a/articles/active-directory/develop/msal-net-instantiate-public-client-config-options.md +++ b/articles/active-directory/develop/msal-net-instantiate-public-client-config-options.md @@ -1,6 +1,5 @@ --- -title: Instantiate a public client app (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Instantiate a public client app (MSAL.NET) description: Learn how to instantiate a public client application with configuration options using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-migration-android-broker.md b/articles/active-directory/develop/msal-net-migration-android-broker.md index 0400a2e015011..3efc564bcb4f7 100644 --- a/articles/active-directory/develop/msal-net-migration-android-broker.md +++ b/articles/active-directory/develop/msal-net-migration-android-broker.md @@ -1,6 +1,5 @@ --- title: Migrate Xamarin Android apps using brokers to MSAL.NET -titleSuffix: Microsoft identity platform description: Learn how to migrate Xamarin Android apps that use the Microsoft Authenticator or Intune Company Portal from ADAL.NET to MSAL.NET. author: mmacy manager: CelesteDG diff --git a/articles/active-directory/develop/msal-net-migration-confidential-client.md b/articles/active-directory/develop/msal-net-migration-confidential-client.md index 1a806c67f8e6c..d40d80b5f26e0 100644 --- a/articles/active-directory/develop/msal-net-migration-confidential-client.md +++ b/articles/active-directory/develop/msal-net-migration-confidential-client.md @@ -1,11 +1,9 @@ --- title: Migrate confidential client applications to MSAL.NET -titleSuffix: Microsoft identity platform description: Learn how to migrate a confidential client application from Azure Active Directory Authentication Library for .NET to Microsoft Authentication Library for .NET. services: active-directory author: jmprieur manager: CelesteDG - ms.service: active-directory ms.subservice: develop ms.topic: how-to @@ -13,13 +11,13 @@ ms.workload: identity ms.date: 06/08/2021 ms.author: jmprieur ms.reviewer: saeeda, shermanouko -ms.custom: "devx-track-csharp, aaddev, has-adal-ref" +ms.custom: "devx-track-csharp, aaddev, has-adal-ref, kr2b-contr-experiment" #Customer intent: As an application developer, I want to migrate my confidential client app from ADAL.NET to MSAL.NET. --- # Migrate confidential client applications from ADAL.NET to MSAL.NET -This article describes how to migrate a confidential client application from Azure Active Directory Authentication Library for .NET (ADAL.NET) to Microsoft Authentication Library for .NET (MSAL.NET). Confidential client applications are web apps, web APIs, and daemon applications that call another service on their own behalf. For more information about confidential applications, see [Authentication flows and application scenarios](authentication-flows-app-scenarios.md). If your app is based on ASP.NET Core, use [Microsoft.Identity.Web](microsoft-identity-web.md). +In this how-to guide you'll migrate a confidential client application from Azure Active Directory Authentication Library for .NET (ADAL.NET) to Microsoft Authentication Library for .NET (MSAL.NET). Confidential client applications include web apps, web APIs, and daemon applications that call another service on their own behalf. For more information about confidential apps, see [Authentication flows and application scenarios](authentication-flows-app-scenarios.md). If your app is based on ASP.NET Core, see [Microsoft.Identity.Web](microsoft-identity-web.md). For app registrations: @@ -28,24 +26,24 @@ For app registrations: ## Migration steps -1. Find the code by using ADAL.NET in your app. +1. Find the code that uses ADAL.NET in your app. - The code that uses ADAL in a confidential client application instantiates `AuthenticationContext` and calls either `AcquireTokenByAuthorizationCode` or one override of `AcquireTokenAsync` with the following parameters: + The code that uses ADAL in a confidential client app instantiates `AuthenticationContext` and calls either `AcquireTokenByAuthorizationCode` or one override of `AcquireTokenAsync` with the following parameters: - A `resourceId` string. This variable is the app ID URI of the web API that you want to call. - An instance of `IClientAssertionCertificate` or `ClientAssertion`. This instance provides the client credentials for your app to prove the identity of your app. -1. After you've identified that you have apps that are using ADAL.NET, install the MSAL.NET NuGet package [Microsoft.Identity.Client](https://www.nuget.org/packages/Microsoft.Identity.Client) and update your project library references. For more information, see [Install a NuGet package](https://www.bing.com/search?q=install+nuget+package). If you want to use token cache serializers, also install [Microsoft.Identity.Web.TokenCache](https://www.nuget.org/packages/Microsoft.Identity.Web.TokenCache). +1. After you've identified that you have apps that are using ADAL.NET, install the MSAL.NET NuGet package [Microsoft.Identity.Client](https://www.nuget.org/packages/Microsoft.Identity.Client) and update your project library references. For more information, see [Install a NuGet package](https://www.bing.com/search?q=install+nuget+package). To use token cache serializers, install [Microsoft.Identity.Web.TokenCache](https://www.nuget.org/packages/Microsoft.Identity.Web.TokenCache). 1. Update the code according to the confidential client scenario. Some steps are common and apply across all the confidential client scenarios. Other steps are unique to each scenario. - The confidential client scenarios are: + Confidential client scenarios: - [Daemon scenarios](?tabs=daemon#migrate-daemon-apps) supported by web apps, web APIs, and daemon console applications. - [Web API calling downstream web APIs](?tabs=obo#migrate-a-web-api-that-calls-downstream-web-apis) supported by web APIs calling downstream web APIs on behalf of the user. - [Web app calling web APIs](?tabs=authcode#migrate-a-web-api-that-calls-downstream-web-apis) supported by web apps that sign in users and call a downstream web API. -You might have provided a wrapper around ADAL.NET to handle certificates and caching. This article uses the same approach to illustrate the process of migrating from ADAL.NET to MSAL.NET. However, this code is only for demonstration purposes. Don't copy/paste these wrappers or integrate them in your code as they are. +You might have provided a wrapper around ADAL.NET to handle certificates and caching. This guide uses the same approach to illustrate the process of migrating from ADAL.NET to MSAL.NET. However, this code is only for demonstration purposes. Don't copy/paste these wrappers or integrate them in your code as they are. ## [Daemon](#tab/daemon) @@ -60,13 +58,13 @@ The ADAL code for your app uses daemon scenarios if it contains a call to `Authe - A resource (app ID URI) as a first parameter - `IClientAssertionCertificate` or `ClientAssertion` as the second parameter -`AuthenticationContext.AcquireTokenAsync` doesn't have a parameter of type `UserAssertion`. If it does, then your app is a web API, and it's using the [web API calling downstream web APIs](?tabs=obo#migrate-a-web-api-that-calls-downstream-web-apis) scenario. +`AuthenticationContext.AcquireTokenAsync` doesn't have a parameter of type `UserAssertion`. If it does, then your app is a web API, and it uses the [web API calling downstream web APIs](?tabs=obo#migrate-a-web-api-that-calls-downstream-web-apis) scenario. #### Update the code of daemon scenarios [!INCLUDE [Common steps](includes/msal-net-adoption-steps-confidential-clients.md)] -In this case, we replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenClient`. +In this case, replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenClient`. Here's a comparison of ADAL.NET and MSAL.NET code for daemon scenarios: @@ -160,9 +158,9 @@ public partial class AuthWrapper #### Benefit from token caching -To benefit from the in-memory cache, the instance of `IConfidentialClientApplication` needs to be kept in a member variable. If you re-create the confidential client application each time you request a token, you won't benefit from the token cache. +To benefit from the in-memory cache, the instance of `IConfidentialClientApplication` must be kept in a member variable. If you re-create the confidential client app each time you request a token, you won't benefit from the token cache. -You'll need to serialize `AppTokenCache` if you choose not to use the default in-memory app token cache. Similarly, If you want to implement a distributed token cache, you'll need to serialize `AppTokenCache`. For details, see [Token cache for a web app or web API (confidential client application)](msal-net-token-cache-serialization.md?tabs=aspnet) and the sample [active-directory-dotnet-v1-to-v2/ConfidentialClientTokenCache](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). +You'll need to serialize `AppTokenCache` if you don't use the default in-memory app token cache. Similarly, If you want to implement a distributed token cache, serialize `AppTokenCache`. For details, see [Token cache for a web app or web API (confidential client application)](msal-net-token-cache-serialization.md?tabs=aspnet) and the sample [active-directory-dotnet-v1-to-v2/ConfidentialClientTokenCache](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). [Learn more about the daemon scenario](scenario-daemon-overview.md) and how it's implemented with MSAL.NET or Microsoft.Identity.Web in new applications. @@ -285,25 +283,25 @@ public partial class AuthWrapper #### Benefit from token caching -For token caching in OBOs, you need to use a distributed token cache. For details, see [Token cache for a web app or web API (confidential client application)](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). +For token caching in OBOs, use a distributed token cache. For details, see [Token cache for a web app or web API (confidential client app)](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). ```CSharp app.UseInMemoryTokenCaches(); // or a distributed token cache. ``` -[Learn more about web APIs calling downstream web APIs](scenario-web-api-call-api-overview.md) and how they're implemented with MSAL.NET or Microsoft.Identity.Web in new applications. +[Learn more about web APIs calling downstream web APIs](scenario-web-api-call-api-overview.md) and how they're implemented with MSAL.NET or Microsoft.Identity.Web in new apps. ## [Web app calling web APIs](#tab/authcode) ### Migrate a web app that calls web APIs -If your app uses ASP.NET Core, we strongly recommend that you update to Microsoft.Identity.Web, which processes everything for you. For a quick presentation, see the [Microsoft.Identity.Web announcement of general availability](https://github.com/AzureAD/microsoft-identity-web/wiki/1.0.0). For details about how to use it in a web app, see [Why use Microsoft.Identity.Web in web apps?](https://aka.ms/ms-id-web/webapp). +If your app uses ASP.NET Core, we strongly recommend that you update to Microsoft.Identity.Web because it processes everything for you. For a quick presentation, see the [Microsoft.Identity.Web announcement of general availability](https://github.com/AzureAD/microsoft-identity-web/wiki/1.0.0). For details about how to use it in a web app, see [Why use Microsoft.Identity.Web in web apps?](https://aka.ms/ms-id-web/webapp). -Web apps that sign in users and call web APIs on behalf of users use the OAuth2.0 [authorization code flow](v2-oauth2-auth-code-flow.md). Typically: +Web apps that sign in users and call web APIs on behalf of users employ the OAuth2.0 [authorization code flow](v2-oauth2-auth-code-flow.md). Typically: -1. The web app signs in a user by executing a first leg of the authorization code flow. It does this by going to the Microosft identity platform authorize endpoint. The user signs in and performs multifactor authentications if needed. As an outcome of this operation, the app receives the authorization code. The authentication library is not used at this stage. +1. The app signs in a user by executing a first leg of the authorization code flow by going to the Microsoft identity platform authorize endpoint. The user signs in and performs multi-factor authentications if needed. As an outcome of this operation, the app receives the authorization code. The authentication library isn't used at this stage. 1. The app executes the second leg of the authorization code flow. It uses the authorization code to get an access token, an ID token, and a refresh token. Your application needs to provide the `redirectUri` value, which is the URI where the Microsoft identity platform endpoint will provide the security tokens. After the app receives that URI, it typically calls `AcquireTokenByAuthorizationCode` for ADAL or MSAL to redeem the code and to get a token that will be stored in the token cache. -1. The app uses ADAL or MSAL to call `AcquireTokenSilent` so that it can get tokens for calling the necessary web APIs. This is done from the web app controllers. +1. The app uses ADAL or MSAL to call `AcquireTokenSilent` to get tokens for calling the necessary web APIs from the web app controllers. #### Find out if your code uses the auth code flow @@ -313,7 +311,7 @@ The ADAL code for your app uses auth code flow if it contains a call to `Authent [!INCLUDE [Common steps](includes/msal-net-adoption-steps-confidential-clients.md)] -In this case, we replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenByAuthorizationCode`. +In this case, replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenByAuthorizationCode`. Here's a comparison of sample authorization code flows for ADAL.NET and MSAL.NET: @@ -460,7 +458,7 @@ public partial class AuthWrapper #### Benefit from token caching -Because your web app uses `AcquireTokenByAuthorizationCode`, your app needs to use a distributed token cache for token caching. For details, see [Token cache for a web app or web API](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). +Because your web app uses `AcquireTokenByAuthorizationCode`, it needs to use a distributed token cache for token caching. For details, see [Token cache for a web app or web API](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). ```CSharp @@ -470,9 +468,9 @@ app.UseInMemoryTokenCaches(); // or a distributed token cache. #### Handling MsalUiRequiredException When your controller attempts to acquire a token silently for different -scopes/resources, MSAL.NET might throw an `MsalUiRequiredException`. This is expected if, for instance, the user needs to re-sign-in, or if the +scopes/resources, MSAL.NET might throw an `MsalUiRequiredException` as expected if the user needs to re-sign-in, or if the access to the resource requires more claims (because of a conditional access -policy for instance). For details on mitigation see how to [Handle errors and exceptions in MSAL.NET](msal-error-handling-dotnet.md). +policy). For details on mitigation see how to [Handle errors and exceptions in MSAL.NET](msal-error-handling-dotnet.md). [Learn more about web apps calling web APIs](scenario-web-app-call-api-overview.md) and how they're implemented with MSAL.NET or Microsoft.Identity.Web in new applications. @@ -482,14 +480,14 @@ policy for instance). For details on mitigation see how to [Handle errors and ex Key benefits of MSAL.NET for your app include: -- **Resilience**. MSAL.NET helps make your app resilient through the following: +- **Resilience**. MSAL.NET helps make your app resilient through: - - Azure AD Cached Credential Service (CCS) benefits. CCS operates as an Azure AD backup. - - Proactive renewal of tokens if the API that you call enables long-lived tokens through [continuous access evaluation](app-resilience-continuous-access-evaluation.md). + - Azure AD Cached Credential Service (CCS) benefits. CCS operates as an Azure AD backup. + - Proactive renewal of tokens if the API that you call enables long-lived tokens through [continuous access evaluation](app-resilience-continuous-access-evaluation.md). - **Security**. You can acquire Proof of Possession (PoP) tokens if the web API that you want to call requires it. For details, see [Proof Of Possession tokens in MSAL.NET](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/wiki/Proof-Of-Possession-(PoP)-tokens) -- **Performance and scalability**. If you don't need to share your cache with ADAL.NET, disable the legacy cache compatibility when you're creating the confidential client application (`.WithLegacyCacheCompatibility(false)`). This increases the performance significantly. +- **Performance and scalability**. If you don't need to share your cache with ADAL.NET, disable the legacy cache compatibility when you're creating the confidential client application (`.WithLegacyCacheCompatibility(false)`) to significantly increase performance. ```csharp app = ConfidentialClientApplicationBuilder.Create(ClientId) @@ -516,14 +514,14 @@ If you get an exception with either of the following messages: > `subscriptions for the tenant. Check to make sure you have the correct tenant ID. Check with your subscription` > `administrator.` -You can troubleshoot the exception by using these steps: +Troubleshoot the exception using these steps: 1. Confirm that you're using the latest version of [MSAL.NET](https://www.nuget.org/packages/Microsoft.Identity.Client/). -1. Confirm that the authority host that you set when building the confidential client application and the authority host that you used with ADAL are similar. In particular, is it the same [cloud](msal-national-cloud.md) (Azure Government, Azure China 21Vianet, or Azure Germany)? +1. Confirm that the authority host that you set when building the confidential client app and the authority host that you used with ADAL are similar. In particular, is it the same [cloud](msal-national-cloud.md) (Azure Government, Azure China 21Vianet, or Azure Germany)? ### MsalClientException -In multi-tenant applications, you can have scenarios where you specify a common authority when building the application, but then want to target a specific tenant (for instance the tenant of the user) when calling a web API. Since MSAL.NET 4.37.0, when you specify `.WithAzureRegion` at the application creation, you can no longer specify the Authority using `.WithAuthority` during the token requests. If you do, you'll get the following error when updating from previous versions of MSAL.NET: +In multi-tenant apps, specify a common authority when building the app to target a specific tenant such as, the tenant of the user when calling a web API. Since MSAL.NET 4.37.0, when you specify `.WithAzureRegion` at the app creation, you can no longer specify the Authority using `.WithAuthority` during the token requests. If you do, you'll get the following error when updating from previous versions of MSAL.NET: `MsalClientException - "You configured WithAuthority at the request level, and also WithAzureRegion. This is not supported when the environment changes from application to request. Use WithTenantId at the request level instead."` diff --git a/articles/active-directory/develop/msal-net-migration-ios-broker.md b/articles/active-directory/develop/msal-net-migration-ios-broker.md index 3b578e614f3bf..ec7006514e2cb 100644 --- a/articles/active-directory/develop/msal-net-migration-ios-broker.md +++ b/articles/active-directory/develop/msal-net-migration-ios-broker.md @@ -1,6 +1,5 @@ --- title: Migrate Xamarin apps using brokers to MSAL.NET -titleSuffix: Microsoft identity platform description: Learn how to migrate Xamarin iOS apps that use Microsoft Authenticator from ADAL.NET to MSAL.NET. author: jmprieur manager: CelesteDG diff --git a/articles/active-directory/develop/msal-net-migration-public-client.md b/articles/active-directory/develop/msal-net-migration-public-client.md index 90e0443c43d4d..5f0d123729227 100644 --- a/articles/active-directory/develop/msal-net-migration-public-client.md +++ b/articles/active-directory/develop/msal-net-migration-public-client.md @@ -1,6 +1,5 @@ --- title: Migrate public client applications to MSAL.NET -titleSuffix: Microsoft identity platform description: Learn how to migrate a public client application from Azure Active Directory Authentication Library for .NET to Microsoft Authentication Library for .NET. services: active-directory author: CelesteDG diff --git a/articles/active-directory/develop/msal-net-migration.md b/articles/active-directory/develop/msal-net-migration.md index 3200a09a41ebd..c8ad29a752da9 100644 --- a/articles/active-directory/develop/msal-net-migration.md +++ b/articles/active-directory/develop/msal-net-migration.md @@ -1,6 +1,5 @@ --- title: Migrating to MSAL.NET and Microsoft.Identity.Web -titleSuffix: Microsoft identity platform description: Learn why and how to migrate from Azure AD Authentication Library for .NET (ADAL.NET) to Microsoft Authentication Library for .NET (MSAL.NET) or Microsoft.Identity.Web services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/msal-net-provide-httpclient.md b/articles/active-directory/develop/msal-net-provide-httpclient.md index 673b799c7d6df..94146e155e682 100644 --- a/articles/active-directory/develop/msal-net-provide-httpclient.md +++ b/articles/active-directory/develop/msal-net-provide-httpclient.md @@ -1,6 +1,5 @@ --- -title: Provide an HttpClient & proxy (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Provide an HttpClient & proxy (MSAL.NET) description: Learn about providing your own HttpClient and proxy to connect to Azure AD using the Microsoft Authentication Library for .NET (MSAL.NET). author: jmprieur manager: CelesteDG diff --git a/articles/active-directory/develop/msal-net-system-browser-android-considerations.md b/articles/active-directory/develop/msal-net-system-browser-android-considerations.md index d2098cf690e40..d4e2c0ae84926 100644 --- a/articles/active-directory/develop/msal-net-system-browser-android-considerations.md +++ b/articles/active-directory/develop/msal-net-system-browser-android-considerations.md @@ -1,6 +1,5 @@ --- -title: Xamarin Android system browser considerations (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Xamarin Android system browser considerations (MSAL.NET) description: Learn about considerations for using system browsers on Xamarin Android with the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-token-cache-serialization.md b/articles/active-directory/develop/msal-net-token-cache-serialization.md index d844e6b6eabe3..832029f8b97bc 100644 --- a/articles/active-directory/develop/msal-net-token-cache-serialization.md +++ b/articles/active-directory/develop/msal-net-token-cache-serialization.md @@ -1,6 +1,5 @@ --- -title: Token cache serialization (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Token cache serialization (MSAL.NET) description: Learn about serialization and custom serialization of the token cache using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: jmprieur @@ -278,7 +277,7 @@ You can also specify options to limit the size of the in-memory token cache: #### Distributed caches -If you use `app.AddDistributedTokenCache`, the token cache is an adapter against the .NET `IDistributedCache` implementation. So you can choose between a SQL Server cache, a Redis cache, an Azure Cosmos DB cache, or any other cache implementing the [IDistributedCache](https://docs.microsoft.com/dotnet/api/microsoft.extensions.caching.distributed.idistributedcache?view=dotnet-plat-ext-6.0) interface. +If you use `app.AddDistributedTokenCache`, the token cache is an adapter against the .NET `IDistributedCache` implementation. So you can choose between a SQL Server cache, a Redis cache, an Azure Cosmos DB cache, or any other cache implementing the [IDistributedCache](/dotnet/api/microsoft.extensions.caching.distributed.idistributedcache?view=dotnet-plat-ext-6.0) interface. For testing purposes only, you may want to use `services.AddDistributedMemoryCache()`, an in-memory implementation of `IDistributedCache`. @@ -709,4 +708,4 @@ The following samples illustrate token cache serialization. | ------ | -------- | ----------- | |[active-directory-dotnet-desktop-msgraph-v2](https://github.com/azure-samples/active-directory-dotnet-desktop-msgraph-v2) | Desktop (WPF) | Windows Desktop .NET (WPF) application that calls the Microsoft Graph API. ![Diagram that shows a topology with a desktop app client flowing to Azure Active Directory by acquiring a token interactively and to Microsoft Graph.](media/msal-net-token-cache-serialization/topology.png)| |[active-directory-dotnet-v1-to-v2](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2) | Desktop (console) | Set of Visual Studio solutions that illustrate the migration of Azure AD v1.0 applications (using ADAL.NET) to Microsoft identity platform applications (using MSAL.NET). In particular, see [Token cache migration](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/blob/master/TokenCacheMigration/README.md) and [Confidential client token cache](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). | -[ms-identity-aspnet-webapp-openidconnect](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect) | ASP.NET (net472) | Example of token cache serialization in an ASP.NET MVC application (using MSAL.NET). In particular, see [MsalAppBuilder](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect/blob/master/WebApp/Utils/MsalAppBuilder.cs). +[ms-identity-aspnet-webapp-openidconnect](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect) | ASP.NET (net472) | Example of token cache serialization in an ASP.NET MVC application (using MSAL.NET). In particular, see [MsalAppBuilder](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect/blob/master/WebApp/Utils/MsalAppBuilder.cs). \ No newline at end of file diff --git a/articles/active-directory/develop/msal-net-use-brokers-with-xamarin-apps.md b/articles/active-directory/develop/msal-net-use-brokers-with-xamarin-apps.md index 03e672495b905..c2c8da5364abd 100644 --- a/articles/active-directory/develop/msal-net-use-brokers-with-xamarin-apps.md +++ b/articles/active-directory/develop/msal-net-use-brokers-with-xamarin-apps.md @@ -1,6 +1,5 @@ --- -title: Use brokers with Xamarin iOS & Android | Azure -titleSuffix: Microsoft identity platform +title: Use brokers with Xamarin iOS & Android description: Learn how to setup Xamarin iOS applications that can use the Microsoft Authenticator and the Microsoft Authentication Library for .NET (MSAL.NET). Also learn how to migrate from Azure AD Authentication Library for .NET (ADAL.NET) to the Microsoft Authentication Library for .NET (MSAL.NET). author: jmprieur manager: CelesteDG diff --git a/articles/active-directory/develop/msal-net-user-gets-consent-for-multiple-resources.md b/articles/active-directory/develop/msal-net-user-gets-consent-for-multiple-resources.md index 5b9a9d2ba3313..7f94022932127 100644 --- a/articles/active-directory/develop/msal-net-user-gets-consent-for-multiple-resources.md +++ b/articles/active-directory/develop/msal-net-user-gets-consent-for-multiple-resources.md @@ -1,6 +1,5 @@ --- -title: Get consent for several resources (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Get consent for several resources (MSAL.NET) description: Learn how a user can get pre-consent for several resources using the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-uwp-considerations.md b/articles/active-directory/develop/msal-net-uwp-considerations.md index 35b3de72136f9..add85d70423f1 100644 --- a/articles/active-directory/develop/msal-net-uwp-considerations.md +++ b/articles/active-directory/develop/msal-net-uwp-considerations.md @@ -1,6 +1,5 @@ --- -title: UWP considerations (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: UWP considerations (MSAL.NET) description: Learn about considerations for using Universal Windows Platform (UWP) with the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-web-browsers.md b/articles/active-directory/develop/msal-net-web-browsers.md index c99d143c9290c..3d0603846df86 100644 --- a/articles/active-directory/develop/msal-net-web-browsers.md +++ b/articles/active-directory/develop/msal-net-web-browsers.md @@ -1,6 +1,5 @@ --- -title: Using web browsers (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Using web browsers (MSAL.NET) description: Learn about specific considerations when using Xamarin Android with the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-net-xamarin-android-considerations.md b/articles/active-directory/develop/msal-net-xamarin-android-considerations.md index 1224eaaae07e9..924328f12caa3 100644 --- a/articles/active-directory/develop/msal-net-xamarin-android-considerations.md +++ b/articles/active-directory/develop/msal-net-xamarin-android-considerations.md @@ -1,6 +1,5 @@ --- -title: Xamarin Android code configuration and troubleshooting (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Xamarin Android code configuration and troubleshooting (MSAL.NET) description: Learn about considerations for using Xamarin Android with the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/msal-net-xamarin-ios-considerations.md b/articles/active-directory/develop/msal-net-xamarin-ios-considerations.md index ce998b7d4f617..8868b38f3ec73 100644 --- a/articles/active-directory/develop/msal-net-xamarin-ios-considerations.md +++ b/articles/active-directory/develop/msal-net-xamarin-ios-considerations.md @@ -1,6 +1,5 @@ --- -title: Xamarin iOS considerations (MSAL.NET) | Azure -titleSuffix: Microsoft identity platform +title: Xamarin iOS considerations (MSAL.NET) description: Learn about considerations for using Xamarin iOS with the Microsoft Authentication Library for .NET (MSAL.NET). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/msal-node-extensions.md b/articles/active-directory/develop/msal-node-extensions.md index a97d4532d9533..d756f72cd1ff5 100644 --- a/articles/active-directory/develop/msal-node-extensions.md +++ b/articles/active-directory/develop/msal-node-extensions.md @@ -1,6 +1,5 @@ --- -title: "Learn about Microsoft Authentication Extensions for Node | Azure" -titleSuffix: Microsoft identity platform +title: "Learn about Microsoft Authentication Extensions for Node" description: The Microsoft Authentication Extensions for Node enables application developers to perform cross-platform token cache serialization and persistence. It gives extra support to the Microsoft Authentication Library for Node (MSAL Node). services: active-directory author: henrymbuguakiarie diff --git a/articles/active-directory/develop/msal-node-migration.md b/articles/active-directory/develop/msal-node-migration.md index 9f80f47992d75..c8dc4dd369ec7 100644 --- a/articles/active-directory/develop/msal-node-migration.md +++ b/articles/active-directory/develop/msal-node-migration.md @@ -1,6 +1,5 @@ --- -title: "Migrate your Node.js application from ADAL to MSAL | Azure" -titleSuffix: Microsoft identity platform +title: "Migrate your Node.js application from ADAL to MSAL" description: How to update your existing Node.js application to use the Microsoft Authentication Library (MSAL) for authentication and authorization instead of the Active Directory Authentication Library (ADAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-overview.md b/articles/active-directory/develop/msal-overview.md index 2140848c5b3dc..f5e0029b46b6f 100644 --- a/articles/active-directory/develop/msal-overview.md +++ b/articles/active-directory/develop/msal-overview.md @@ -1,6 +1,5 @@ --- -title: Learn about MSAL | Azure -titleSuffix: Microsoft identity platform +title: Learn about MSAL description: The Microsoft Authentication Library (MSAL) enables application developers to acquire tokens in order to call secured web APIs. These web APIs can be the Microsoft Graph, other Microsoft APIs, third-party web APIs, or your own web API. MSAL supports multiple application architectures and platforms. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/msal-python-adfs-support.md b/articles/active-directory/develop/msal-python-adfs-support.md index 86ec0face4f71..560e1be417a16 100644 --- a/articles/active-directory/develop/msal-python-adfs-support.md +++ b/articles/active-directory/develop/msal-python-adfs-support.md @@ -1,6 +1,5 @@ --- title: Azure AD FS support (MSAL Python) -titleSuffix: Microsoft identity platform description: Learn about Active Directory Federation Services (AD FS) support in the Microsoft Authentication Library for Python services: active-directory author: CelesteDG diff --git a/articles/active-directory/develop/msal-python-token-cache-serialization.md b/articles/active-directory/develop/msal-python-token-cache-serialization.md index 84ec7783563c1..b495399f98873 100644 --- a/articles/active-directory/develop/msal-python-token-cache-serialization.md +++ b/articles/active-directory/develop/msal-python-token-cache-serialization.md @@ -1,6 +1,5 @@ --- -title: Custom token cache serialization (MSAL Python) | Azure -titleSuffix: Microsoft identity platform +title: Custom token cache serialization (MSAL Python) description: Learn how to serializing the token cache for MSAL for Python services: active-directory author: rayluo diff --git a/articles/active-directory/develop/msal-shared-devices.md b/articles/active-directory/develop/msal-shared-devices.md index 80cc3a51767af..fbd379021ed69 100644 --- a/articles/active-directory/develop/msal-shared-devices.md +++ b/articles/active-directory/develop/msal-shared-devices.md @@ -1,6 +1,5 @@ --- title: Shared device mode overview -titleSuffix: Microsoft identity platform | Azure description: Learn about shared device mode to enable device sharing for your frontline workers. services: active-directory author: brandwe diff --git a/articles/active-directory/develop/msal-v1-app-scopes.md b/articles/active-directory/develop/msal-v1-app-scopes.md index 02222661c87f0..213e925508f30 100644 --- a/articles/active-directory/develop/msal-v1-app-scopes.md +++ b/articles/active-directory/develop/msal-v1-app-scopes.md @@ -1,5 +1,5 @@ --- -title: Scopes for v1.0 apps (MSAL) | Azure +title: Scopes for v1.0 apps (MSAL) description: Learn about the scopes for a v1.0 application using the Microsoft Authentication Library (MSAL). services: active-directory author: mmacy diff --git a/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-app.md b/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-app.md index bdf868f6e793b..9a60a4d194c96 100644 --- a/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-app.md +++ b/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-app.md @@ -1,5 +1,5 @@ --- -title: Tutorial - Web app accesses Microsoft Graph as the app| Azure +title: Tutorial - Web app accesses Microsoft Graph as the app description: In this tutorial, you learn how to access data in Microsoft Graph by using managed identities. services: microsoft-graph, app-service-web author: rwike77 diff --git a/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md b/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md index 6102c64a30892..9607f4f196c49 100644 --- a/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md +++ b/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md @@ -1,5 +1,5 @@ --- -title: Tutorial - Web app accesses Microsoft Graph as the user | Azure +title: Tutorial - Web app accesses Microsoft Graph as the user description: In this tutorial, you learn how to access data in Microsoft Graph from a web app for a signed-in user. services: microsoft-graph, app-service-web author: rwike77 @@ -130,7 +130,7 @@ Using the [Microsoft.Identity.Web library](https://github.com/AzureAD/microsoft- To see this code as part of a sample application, see the [sample on GitHub](https://github.com/Azure-Samples/ms-identity-easyauth-dotnet-storage-graphapi/tree/main/2-WebApp-graphapi-on-behalf). > [!NOTE] -> The Microsoft.Identity.Web library isn't required in your web app for basic authentication/authorization or to authenticate requests with Microsoft Graph. It's possible to [securely call downstream APIs](/azure/app-service/tutorial-auth-aad#call-api-securely-from-server-code) with only the App Service authentication/authorization module enabled. +> The Microsoft.Identity.Web library isn't required in your web app for basic authentication/authorization or to authenticate requests with Microsoft Graph. It's possible to [securely call downstream APIs](../../app-service/tutorial-auth-aad.md#call-api-securely-from-server-code) with only the App Service authentication/authorization module enabled. > > However, the App Service authentication/authorization is designed for more basic authentication scenarios. For more complex scenarios (handling custom claims, for example), you need the Microsoft.Identity.Web library or [Microsoft Authentication Library](msal-overview.md). There's a little more setup and configuration work in the beginning, but the Microsoft.Identity.Web library can run alongside the App Service authentication/authorization module. Later, when your web app needs to handle more complex scenarios, you can disable the App Service authentication/authorization module and Microsoft.Identity.Web will already be a part of your app. diff --git a/articles/active-directory/develop/multi-service-web-app-access-storage.md b/articles/active-directory/develop/multi-service-web-app-access-storage.md index 8e85cea4a0edf..48830d1656530 100644 --- a/articles/active-directory/develop/multi-service-web-app-access-storage.md +++ b/articles/active-directory/develop/multi-service-web-app-access-storage.md @@ -1,5 +1,5 @@ --- -title: Tutorial - Web app accesses storage by using managed identities | Azure +title: Tutorial - Web app accesses storage by using managed identities description: In this tutorial, you learn how to access Azure Storage for an app by using managed identities. services: storage, app-service-web author: rwike77 diff --git a/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md b/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md index ce52b322df7f0..a3d202ed4db4f 100644 --- a/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md +++ b/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md @@ -1,5 +1,5 @@ --- -title: Tutorial - Add authentication to a web app on Azure App Service | Azure +title: Tutorial - Add authentication to a web app on Azure App Service description: In this tutorial, you learn how to enable authentication and authorization for a web app running on Azure App Service. Limit access to the web app to users in your organization​. services: active-directory, app-service-web author: rwike77 @@ -23,7 +23,7 @@ Learn how to enable authentication for your web app running on Azure App Service App Service provides built-in authentication and authorization support, so you can sign in users and access data by writing minimal or no code in your web app. Using the App Service authentication/authorization module isn't required, but helps simplify authentication and authorization for your app. This article shows how to secure your web app with the App Service authentication/authorization module by using Azure Active Directory (Azure AD) as the identity provider. -The authentication/authorization module is enabled and configured through the Azure portal and app settings. No SDKs, specific languages, or changes to application code are required.​ A variety of identity providers are supported, which includes Azure AD, Microsoft Account, Facebook, Google, and Twitter​​. When the authentication/authorization module is enabled, every incoming HTTP request passes through it before being handled by app code.​​ To learn more, see [Authentication and authorization in Azure App Service](/azure/app-service/overview-authentication-authorization.md). +The authentication/authorization module is enabled and configured through the Azure portal and app settings. No SDKs, specific languages, or changes to application code are required.​ A variety of identity providers are supported, which includes Azure AD, Microsoft Account, Facebook, Google, and Twitter​​. When the authentication/authorization module is enabled, every incoming HTTP request passes through it before being handled by app code.​​ To learn more, see [Authentication and authorization in Azure App Service](../../app-service/overview-authentication-authorization.md). In this tutorial, you learn how to: @@ -38,7 +38,7 @@ In this tutorial, you learn how to: ## Create and publish a web app on App Service -For this tutorial, you need a web app deployed to App Service. You can use an existing web app, or you can follow one of the [ASP.NET Core](/azure/app-service/quickstart-dotnetcore), [Node.js](/azure/app-service/quickstart-nodejs), [Python](/azure/app-service/quickstart-python), or [Java](/azure/app-service/quickstart-java) quickstarts to create and publish a new web app to App Service. +For this tutorial, you need a web app deployed to App Service. You can use an existing web app, or you can follow one of the [ASP.NET Core](../../app-service/quickstart-dotnetcore.md), [Node.js](../../app-service/quickstart-nodejs.md), [Python](../../app-service/quickstart-python.md), or [Java](../../app-service/quickstart-java.md) quickstarts to create and publish a new web app to App Service. Whether you use an existing web app or create a new one, take note of the following: @@ -49,7 +49,7 @@ You need these names throughout this tutorial. ## Configure authentication and authorization -You now have a web app running on App Service. Next, you enable authentication and authorization for the web app. You use Azure AD as the identity provider. For more information, see [Configure Azure AD authentication for your App Service application](/azure/app-service/configure-authentication-provider-aad.md). +You now have a web app running on App Service. Next, you enable authentication and authorization for the web app. You use Azure AD as the identity provider. For more information, see [Configure Azure AD authentication for your App Service application](../../app-service/configure-authentication-provider-aad.md). In the [Azure portal](https://portal.azure.com) menu, select **Resource groups**, or search for and select **Resource groups** from any page. diff --git a/articles/active-directory/develop/multi-service-web-app-clean-up-resources.md b/articles/active-directory/develop/multi-service-web-app-clean-up-resources.md index c5430ea555ff3..9ae6800fe2c49 100644 --- a/articles/active-directory/develop/multi-service-web-app-clean-up-resources.md +++ b/articles/active-directory/develop/multi-service-web-app-clean-up-resources.md @@ -1,5 +1,5 @@ --- -title: Tutorial - Clean up resources | Azure +title: Tutorial - Clean up resources description: In this tutorial, you learn how to clean up the Azure resources allocated while creating the web app. services: storage, app-service-web author: rwike77 diff --git a/articles/active-directory/develop/multi-service-web-app-overview.md b/articles/active-directory/develop/multi-service-web-app-overview.md index 247fc626d5d2a..f5d99fc86b19b 100644 --- a/articles/active-directory/develop/multi-service-web-app-overview.md +++ b/articles/active-directory/develop/multi-service-web-app-overview.md @@ -1,5 +1,5 @@ --- -title: Tutorial - Build a secure web app on Azure App Service | Azure +title: Tutorial - Build a secure web app on Azure App Service description: In this tutorial, you learn how to build a web app by using Azure App Service, sign in users to the web app, call Azure Storage, and call Microsoft Graph. services: active-directory, app-service-web, storage, microsoft-graph author: rwike77 diff --git a/articles/active-directory/develop/publisher-verification-overview.md b/articles/active-directory/develop/publisher-verification-overview.md index 873affa7af8eb..c85d581e0c354 100644 --- a/articles/active-directory/develop/publisher-verification-overview.md +++ b/articles/active-directory/develop/publisher-verification-overview.md @@ -1,5 +1,5 @@ --- -title: Publisher verification overview - Microsoft identity platform | Azure +title: Publisher verification overview description: Provides an overview of the publisher verification program for the Microsoft identity platform. Lists the benefits, program requirements, and frequently asked questions. When an application is marked as publisher verified, it means that the publisher has verified their identity using a Microsoft Partner Network account that has completed the verification process and has associated this MPN account with their application registration. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/quickstart-configure-app-access-web-apis.md b/articles/active-directory/develop/quickstart-configure-app-access-web-apis.md index eb4d4a9fc9dec..b20b5671dc36b 100644 --- a/articles/active-directory/develop/quickstart-configure-app-access-web-apis.md +++ b/articles/active-directory/develop/quickstart-configure-app-access-web-apis.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Configure an app to access a web API | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Configure an app to access a web API" description: In this quickstart, you configure an app registration representing a web API in the Microsoft identity platform to enable scoped resource access (permissions) to client applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-configure-app-expose-web-apis.md b/articles/active-directory/develop/quickstart-configure-app-expose-web-apis.md index 079239c5d920a..55ee2dd6437f4 100644 --- a/articles/active-directory/develop/quickstart-configure-app-expose-web-apis.md +++ b/articles/active-directory/develop/quickstart-configure-app-expose-web-apis.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Register and expose a web API | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Register and expose a web API" description: In this quickstart, your register a web API with the Microsoft identity platform and configure its scopes, exposing it to clients for permissions-based access to the API's resources. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-create-new-tenant.md b/articles/active-directory/develop/quickstart-create-new-tenant.md index 989c5ae2c29d6..ec6c882c71056 100644 --- a/articles/active-directory/develop/quickstart-create-new-tenant.md +++ b/articles/active-directory/develop/quickstart-create-new-tenant.md @@ -1,6 +1,5 @@ --- title: "Quickstart: Create an Azure Active Directory tenant" -titleSuffix: Microsoft identity platform description: In this quickstart, you learn how to create an Azure Active Directory tenant for use in developing applications that use the Microsoft identity platform for authentication and authorization. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/quickstart-register-app.md b/articles/active-directory/develop/quickstart-register-app.md index a3438ec1a8f96..da561b27f188b 100644 --- a/articles/active-directory/develop/quickstart-register-app.md +++ b/articles/active-directory/develop/quickstart-register-app.md @@ -1,5 +1,5 @@ --- -title: "Quickstart: Register an app in the Microsoft identity platform | Azure" +title: "Quickstart: Register an app in the Microsoft identity platform" description: In this quickstart, you learn how to register an application with the Microsoft identity platform. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-android.md b/articles/active-directory/develop/quickstart-v2-android.md index 2e99c792c01d0..39b4aa04ff02d 100644 --- a/articles/active-directory/develop/quickstart-v2-android.md +++ b/articles/active-directory/develop/quickstart-v2-android.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to an Android app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to an Android app" description: In this quickstart, learn how Android applications can call an API that requires access tokens issued by the Microsoft identity platform. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-aspnet-core-web-api.md b/articles/active-directory/develop/quickstart-v2-aspnet-core-web-api.md index 3a32e60784c74..e9a6a20a1e637 100644 --- a/articles/active-directory/develop/quickstart-v2-aspnet-core-web-api.md +++ b/articles/active-directory/develop/quickstart-v2-aspnet-core-web-api.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Protect an ASP.NET Core web API with the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Protect an ASP.NET Core web API with the Microsoft identity platform" description: In this quickstart, you download and modify a code sample that demonstrates how to protect an ASP.NET Core web API by using the Microsoft identity platform for authorization. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp-calls-graph.md b/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp-calls-graph.md index a28f300cca315..f28abb7d717b0 100644 --- a/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp-calls-graph.md +++ b/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp-calls-graph.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: ASP.NET Core web app that signs in users and calls Microsoft Graph | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: ASP.NET Core web app that signs in users and calls Microsoft Graph" description: In this quickstart, you learn how an app uses Microsoft.Identity.Web to implement Microsoft sign-in in an ASP.NET Core web app using OpenID Connect and calls Microsoft Graph. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp.md b/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp.md index 74e9664749bcb..ca6363d79ebb6 100644 --- a/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp.md +++ b/articles/active-directory/develop/quickstart-v2-aspnet-core-webapp.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign-in with Microsoft Identity to an ASP.NET Core web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign-in with Microsoft Identity to an ASP.NET Core web app" description: In this quickstart, you learn how an app implements Microsoft sign-in on an ASP.NET Core web app by using OpenID Connect services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-aspnet-webapp.md b/articles/active-directory/develop/quickstart-v2-aspnet-webapp.md index 4e2d7970ae65b..202256cd4b435 100644 --- a/articles/active-directory/develop/quickstart-v2-aspnet-webapp.md +++ b/articles/active-directory/develop/quickstart-v2-aspnet-webapp.md @@ -1,6 +1,5 @@ --- title: "Quickstart: ASP.NET web app that signs in users" -titleSuffix: Microsoft identity platform description: Download and run a code sample that shows how an ASP.NET web app can sign in Azure AD users. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-dotnet-native-aspnet.md b/articles/active-directory/develop/quickstart-v2-dotnet-native-aspnet.md index 8c73733dd6f2b..97245f9695a14 100644 --- a/articles/active-directory/develop/quickstart-v2-dotnet-native-aspnet.md +++ b/articles/active-directory/develop/quickstart-v2-dotnet-native-aspnet.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call an ASP.NET web API that is protected by the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call an ASP.NET web API that is protected by the Microsoft identity platform" description: In this quickstart, learn how to call an ASP.NET web API that's protected by the Microsoft identity platform from a Windows Desktop (WPF) application. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-ios.md b/articles/active-directory/develop/quickstart-v2-ios.md index 17b838b7a3af1..de49d1e510ceb 100644 --- a/articles/active-directory/develop/quickstart-v2-ios.md +++ b/articles/active-directory/develop/quickstart-v2-ios.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign in with Microsoft to an iOS or macOS app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign in with Microsoft to an iOS or macOS app" description: In this quickstart, learn how an iOS or macOS app can sign in users, get an access token from the Microsoft identity platform, and call the Microsoft Graph API. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-java-daemon.md b/articles/active-directory/develop/quickstart-v2-java-daemon.md index dcedc80e213f7..79d3d25455985 100644 --- a/articles/active-directory/develop/quickstart-v2-java-daemon.md +++ b/articles/active-directory/develop/quickstart-v2-java-daemon.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Java daemon | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Java daemon" description: In this quickstart, you learn how a Java app can get an access token and call an API protected by Microsoft identity platform endpoint, using the app's own identity services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-java-webapp.md b/articles/active-directory/develop/quickstart-v2-java-webapp.md index c45d885e00490..5d65f778436bf 100644 --- a/articles/active-directory/develop/quickstart-v2-java-webapp.md +++ b/articles/active-directory/develop/quickstart-v2-java-webapp.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign-in with Microsoft to a Java web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign-in with Microsoft to a Java web app" description: In this quickstart, you'll learn how to add sign-in with Microsoft to a Java web application by using OpenID Connect. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-javascript-auth-code-angular.md b/articles/active-directory/develop/quickstart-v2-javascript-auth-code-angular.md index 1fcd19a82778b..b1416e7a25ace 100644 --- a/articles/active-directory/develop/quickstart-v2-javascript-auth-code-angular.md +++ b/articles/active-directory/develop/quickstart-v2-javascript-auth-code-angular.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript Angular single-page apps (SPA) with auth code and call Microsoft Graph | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript Angular single-page apps (SPA) with auth code and call Microsoft Graph" description: In this quickstart, learn how a JavaScript Angular single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow and call Microsoft Graph. services: active-directory author: j-mantu diff --git a/articles/active-directory/develop/quickstart-v2-javascript-auth-code-react.md b/articles/active-directory/develop/quickstart-v2-javascript-auth-code-react.md index 47323e7b6c445..adb0b0d0a8b28 100644 --- a/articles/active-directory/develop/quickstart-v2-javascript-auth-code-react.md +++ b/articles/active-directory/develop/quickstart-v2-javascript-auth-code-react.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript React single-page apps (SPA) with auth code and call Microsoft Graph | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript React single-page apps (SPA) with auth code and call Microsoft Graph" description: In this quickstart, learn how a JavaScript React single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow and call Microsoft Graph. services: active-directory author: j-mantu diff --git a/articles/active-directory/develop/quickstart-v2-javascript-auth-code.md b/articles/active-directory/develop/quickstart-v2-javascript-auth-code.md index 1e8fc7fd59929..b91eff359c9b9 100644 --- a/articles/active-directory/develop/quickstart-v2-javascript-auth-code.md +++ b/articles/active-directory/develop/quickstart-v2-javascript-auth-code.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript single-page apps (SPA) with auth code | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript single-page apps (SPA) with auth code" description: In this quickstart, learn how a JavaScript single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-javascript.md b/articles/active-directory/develop/quickstart-v2-javascript.md index 428e63f3b19de..b4a27830ac4f1 100644 --- a/articles/active-directory/develop/quickstart-v2-javascript.md +++ b/articles/active-directory/develop/quickstart-v2-javascript.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in JavaScript single-page apps | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in JavaScript single-page apps" description: In this quickstart, you learn how a JavaScript app can call an API that requires access tokens issued by the Microsoft identity platform. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-netcore-daemon.md b/articles/active-directory/develop/quickstart-v2-netcore-daemon.md index f5f89ee2ed60a..a1de62c6b8d4d 100644 --- a/articles/active-directory/develop/quickstart-v2-netcore-daemon.md +++ b/articles/active-directory/develop/quickstart-v2-netcore-daemon.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Get token & call Microsoft Graph in a console app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Get token & call Microsoft Graph in a console app" description: In this quickstart, you learn how a .NET Core sample app can use the client credentials flow to get a token and call Microsoft Graph. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-nodejs-console.md b/articles/active-directory/develop/quickstart-v2-nodejs-console.md index 012f69c48e6c3..938added5e0c6 100644 --- a/articles/active-directory/develop/quickstart-v2-nodejs-console.md +++ b/articles/active-directory/develop/quickstart-v2-nodejs-console.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Node.js console app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Node.js console app" description: In this quickstart, you download and run a code sample that shows how a Node.js console application can get an access token and call an API protected by a Microsoft identity platform endpoint, using the app's own identity services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-nodejs-desktop.md b/articles/active-directory/develop/quickstart-v2-nodejs-desktop.md index 07dae37711a8c..07725f16e3872 100644 --- a/articles/active-directory/develop/quickstart-v2-nodejs-desktop.md +++ b/articles/active-directory/develop/quickstart-v2-nodejs-desktop.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Node.js desktop app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Node.js desktop app" description: In this quickstart, you learn how a Node.js Electron desktop application can sign-in users and get an access token to call an API protected by a Microsoft identity platform endpoint services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-nodejs-webapp-msal.md b/articles/active-directory/develop/quickstart-v2-nodejs-webapp-msal.md index bf9c4c3ba8cd5..7c1b4c83382e6 100644 --- a/articles/active-directory/develop/quickstart-v2-nodejs-webapp-msal.md +++ b/articles/active-directory/develop/quickstart-v2-nodejs-webapp-msal.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add authentication to a Node.js web app with MSAL Node | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add authentication to a Node.js web app with MSAL Node" description: In this quickstart, you learn how to implement authentication with a Node.js web app and the Microsoft Authentication Library (MSAL) for Node.js. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/quickstart-v2-nodejs-webapp.md b/articles/active-directory/develop/quickstart-v2-nodejs-webapp.md index 7d30606fdc9d0..ea921ff8db993 100644 --- a/articles/active-directory/develop/quickstart-v2-nodejs-webapp.md +++ b/articles/active-directory/develop/quickstart-v2-nodejs-webapp.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add user sign-in to a Node.js web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add user sign-in to a Node.js web app" description: In this quickstart, you learn how to implement authentication in a Node.js web application using OpenID Connect. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-python-daemon.md b/articles/active-directory/develop/quickstart-v2-python-daemon.md index 2810b40685164..dafd9b999a1cc 100644 --- a/articles/active-directory/develop/quickstart-v2-python-daemon.md +++ b/articles/active-directory/develop/quickstart-v2-python-daemon.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Call Microsoft Graph from a Python daemon | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Call Microsoft Graph from a Python daemon" description: In this quickstart, you learn how a Python process can get an access token and call an API protected by Microsoft identity platform, using the app's own identity services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-python-webapp.md b/articles/active-directory/develop/quickstart-v2-python-webapp.md index 97ea891f25cbd..57063d2adc873 100644 --- a/articles/active-directory/develop/quickstart-v2-python-webapp.md +++ b/articles/active-directory/develop/quickstart-v2-python-webapp.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Add sign-in with Microsoft to a Python web app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Add sign-in with Microsoft to a Python web app" description: In this quickstart, learn how a Python web app can sign in users, get an access token from the Microsoft identity platform, and call the Microsoft Graph API. services: active-directory author: CelesteDG diff --git a/articles/active-directory/develop/quickstart-v2-uwp.md b/articles/active-directory/develop/quickstart-v2-uwp.md index d7c7493b7d2de..8b25bca5247cb 100644 --- a/articles/active-directory/develop/quickstart-v2-uwp.md +++ b/articles/active-directory/develop/quickstart-v2-uwp.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users and call Microsoft Graph in a Universal Windows Platform app | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users and call Microsoft Graph in a Universal Windows Platform app" description: In this quickstart, learn how a Universal Windows Platform (UWP) application can get an access token and call an API protected by Microsoft identity platform. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/quickstart-v2-windows-desktop.md b/articles/active-directory/develop/quickstart-v2-windows-desktop.md index a228c6dff6714..41ab2a04e65b5 100644 --- a/articles/active-directory/develop/quickstart-v2-windows-desktop.md +++ b/articles/active-directory/develop/quickstart-v2-windows-desktop.md @@ -1,5 +1,5 @@ --- -title: "Quickstart: Sign in users and call Microsoft Graph in a Windows desktop app | Azure" +title: "Quickstart: Sign in users and call Microsoft Graph in a Windows desktop app" description: In this quickstart, learn how a Windows Presentation Foundation (WPF) application can get an access token and call an API protected by the Microsoft identity platform. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/redirect-uris-ios.md b/articles/active-directory/develop/redirect-uris-ios.md index 2067ad904f536..34fcd9e02a75e 100644 --- a/articles/active-directory/develop/redirect-uris-ios.md +++ b/articles/active-directory/develop/redirect-uris-ios.md @@ -1,6 +1,5 @@ --- -title: Use redirect URIs with MSAL (iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: Use redirect URIs with MSAL (iOS/macOS) description: Learn about the differences between the Microsoft Authentication Library for ObjectiveC (MSAL for iOS and macOS) and Azure AD Authentication Library for ObjectiveC (ADAL.ObjC) and how to migrate between them. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/reference-app-manifest.md b/articles/active-directory/develop/reference-app-manifest.md index 4cc4d0d2d7671..35e9d656ebddf 100644 --- a/articles/active-directory/develop/reference-app-manifest.md +++ b/articles/active-directory/develop/reference-app-manifest.md @@ -1,6 +1,5 @@ --- title: Understanding the Azure Active Directory app manifest -titleSuffix: Microsoft identity platform description: Detailed coverage of the Azure Active Directory app manifest, which represents an application's identity configuration in an Azure AD tenant, and is used to facilitate OAuth authorization, consent experience, and more. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/reference-claims-mapping-policy-type.md b/articles/active-directory/develop/reference-claims-mapping-policy-type.md index 55e638a1ee942..4276f59f7641a 100644 --- a/articles/active-directory/develop/reference-claims-mapping-policy-type.md +++ b/articles/active-directory/develop/reference-claims-mapping-policy-type.md @@ -1,6 +1,5 @@ --- title: Claims mapping policy -titleSuffix: Microsoft identity platform description: Learn about the claims mapping policy type, which is used to modify the claims emitted in tokens issued for specific applications. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/reference-saml-tokens.md b/articles/active-directory/develop/reference-saml-tokens.md index c792c12e99774..2e01ae2122bcf 100644 --- a/articles/active-directory/develop/reference-saml-tokens.md +++ b/articles/active-directory/develop/reference-saml-tokens.md @@ -1,6 +1,5 @@ --- -title: SAML 2.0 token claims reference | Azure -titleSuffix: Microsoft identity platform +title: SAML 2.0 token claims reference description: Claims reference with details on the claims included in SAML 2.0 tokens issued by the Microsoft identity platform, including their JWT equivalents. author: kenwith services: active-directory diff --git a/articles/active-directory/develop/reference-third-party-cookies-spas.md b/articles/active-directory/develop/reference-third-party-cookies-spas.md index 71d1fac442ef0..94c5d57837153 100644 --- a/articles/active-directory/develop/reference-third-party-cookies-spas.md +++ b/articles/active-directory/develop/reference-third-party-cookies-spas.md @@ -1,6 +1,5 @@ --- -title: How to handle Intelligent Tracking Protection (ITP) in Safari | Azure -titleSuffix: Microsoft identity platform +title: How to handle Intelligent Tracking Protection (ITP) in Safari description: Single-page app (SPA) authentication when third-party cookies are no longer allowed. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/reference-v2-libraries.md b/articles/active-directory/develop/reference-v2-libraries.md index 833f48c2518f4..aeeeb4415eba6 100644 --- a/articles/active-directory/develop/reference-v2-libraries.md +++ b/articles/active-directory/develop/reference-v2-libraries.md @@ -1,5 +1,5 @@ --- -title: Microsoft identity platform authentication libraries | Azure +title: Microsoft identity platform authentication libraries description: List of client libraries and middleware compatible with the Microsoft identity platform. Use these libraries to add support for user sign-in (authentication) and protected web API access (authorization) to your applications. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/refresh-tokens.md b/articles/active-directory/develop/refresh-tokens.md index 22a7ca457ba9d..42bc9e3dcc2be 100644 --- a/articles/active-directory/develop/refresh-tokens.md +++ b/articles/active-directory/develop/refresh-tokens.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform refresh tokens | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform refresh tokens description: Learn about refresh tokens emitted by the Azure AD. services: active-directory author: SHERMANOUKO @@ -29,7 +28,10 @@ Before reading through this article, it's recommended that you go through the fo ## Refresh token lifetime -Refresh tokens have a longer lifetime than access tokens. The default lifetime for the tokens is 90 days and they replace themselves with a fresh token upon every use. As such, whenever a refresh token is used to acquire a new access token, a new refresh token is also issued. The Microsoft identity platform doesn't revoke old refresh tokens when used to fetch new access tokens. Securely delete the old refresh token after acquiring a new one. Refresh tokens need to be stored safely like access tokens or application credentials. +Refresh tokens have a longer lifetime than access tokens. The default lifetime for the refresh tokens is 24 hours for [single page apps](reference-third-party-cookies-spas.md) and 90 days for all other scenarios. Refresh tokens replace themselves with a fresh token upon every use. The Microsoft identity platform doesn't revoke old refresh tokens when used to fetch new access tokens. Securely delete the old refresh token after acquiring a new one. Refresh tokens need to be stored safely like access tokens or application credentials. + +>[!IMPORTANT] +> Refresh tokens sent to a redirect URI registered as `spa` expire after 24 hours. Additional refresh tokens acquired using the initial refresh token carry over that expiration time, so apps must be prepared to rerun the authorization code flow using an interactive authentication to get a new refresh token every 24 hours. Users do not have to enter their credentials and usually don't even see any related user experience, just a reload of your application. The browser must visit the log-in page in a top-level frame to show the login session. This is due to [privacy features in browsers that block third party cookies](reference-third-party-cookies-spas.md). ## Refresh token expiration diff --git a/articles/active-directory/develop/registration-config-how-to.md b/articles/active-directory/develop/registration-config-how-to.md index 48ff3235a5178..b5815d2101fa3 100644 --- a/articles/active-directory/develop/registration-config-how-to.md +++ b/articles/active-directory/develop/registration-config-how-to.md @@ -1,6 +1,5 @@ --- title: Get the endpoints for an Azure AD app registration -titleSuffix: Microsoft identity platform description: How to find the authentication endpoints for a custom application you're developing or registering with Azure AD. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/reply-url.md b/articles/active-directory/develop/reply-url.md index ca8256c05dec2..f7329f05b3b34 100644 --- a/articles/active-directory/develop/reply-url.md +++ b/articles/active-directory/develop/reply-url.md @@ -1,6 +1,5 @@ --- title: Redirect URI (reply URL) restrictions | Azure AD -titleSuffix: Microsoft identity platform description: A description of the restrictions and limitations on redirect URI (reply URL) format enforced by the Microsoft identity platform. author: madansr7 ms.author: saumadan diff --git a/articles/active-directory/develop/request-custom-claims.md b/articles/active-directory/develop/request-custom-claims.md index c0c22dc0011bc..49e517d91d2c2 100644 --- a/articles/active-directory/develop/request-custom-claims.md +++ b/articles/active-directory/develop/request-custom-claims.md @@ -1,6 +1,5 @@ --- -title: Request custom claims (MSAL iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: Request custom claims (MSAL iOS/macOS) description: Learn how to request custom claims. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/sample-v2-code.md b/articles/active-directory/develop/sample-v2-code.md index 448d6b01a40f0..8935ca3701016 100644 --- a/articles/active-directory/develop/sample-v2-code.md +++ b/articles/active-directory/develop/sample-v2-code.md @@ -108,7 +108,7 @@ The following samples show an application that accesses the Microsoft Graph API > |.NET Core| • [Call Microsoft Graph](https://github.com/Azure-Samples/active-directory-dotnetcore-daemon-v2/tree/master/1-Call-MSGraph)
• [Call web API](https://github.com/Azure-Samples/active-directory-dotnetcore-daemon-v2/tree/master/2-Call-OwnApi)
• [Call own web API](https://github.com/Azure-Samples/active-directory-dotnetcore-daemon-v2/tree/master/4-Call-OwnApi-Pop)
• [Using managed identity and Azure key vault](https://github.com/Azure-Samples/active-directory-dotnetcore-daemon-v2/tree/master/3-Using-KeyVault)| MSAL.NET | Client credentials grant| > | ASP.NET|[Multi-tenant with Microsoft identity platform endpoint](https://github.com/Azure-Samples/ms-identity-aspnet-daemon-webapp) | MSAL.NET | Client credentials grant| > | Java | • [Call Microsoft Graph with Secret](https://github.com/Azure-Samples/ms-identity-msal-java-samples/tree/main/1.%20Server-Side%20Scenarios/msal-client-credential-secret)
• [Call Microsoft Graph with Certificate](https://github.com/Azure-Samples/ms-identity-msal-java-samples/tree/main/1.%20Server-Side%20Scenarios/msal-client-credential-certificate)| MSAL Java | Client credentials grant| -> | Node.js | [Sign in users and call web API](https://github.com/Azure-Samples/ms-identity-javascript-nodejs-console) | MSAL Node | Client credentials grant | +> | Node.js | [Call Microsoft Graph with secret](https://github.com/Azure-Samples/ms-identity-javascript-nodejs-console) | MSAL Node | Client credentials grant | > | Python | • [Call Microsoft Graph with secret](https://github.com/Azure-Samples/ms-identity-python-daemon/tree/master/1-Call-MsGraph-WithSecret)
• [Call Microsoft Graph with certificate](https://github.com/Azure-Samples/ms-identity-python-daemon/tree/master/2-Call-MsGraph-WithCertificate) | MSAL Python| Client credentials grant| ## Azure Functions as web APIs diff --git a/articles/active-directory/develop/scenario-daemon-app-configuration.md b/articles/active-directory/develop/scenario-daemon-app-configuration.md index 94d6e6b3c2d2a..1e33e3023fa60 100644 --- a/articles/active-directory/develop/scenario-daemon-app-configuration.md +++ b/articles/active-directory/develop/scenario-daemon-app-configuration.md @@ -1,5 +1,5 @@ --- -title: Configure daemon apps that call web APIs - Microsoft identity platform | Azure +title: Configure daemon apps that call web APIs description: Learn how to configure the code for your daemon application that calls web APIs (app configuration) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-daemon-app-registration.md b/articles/active-directory/develop/scenario-daemon-app-registration.md index 55356c27ef926..6ba695086a434 100644 --- a/articles/active-directory/develop/scenario-daemon-app-registration.md +++ b/articles/active-directory/develop/scenario-daemon-app-registration.md @@ -1,5 +1,5 @@ --- -title: Register daemon apps that call web APIs - Microsoft identity platform | Azure +title: Register daemon apps that call web APIs description: Learn how to build a daemon app that calls web APIs - app registration services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-daemon-call-api.md b/articles/active-directory/develop/scenario-daemon-call-api.md index 62e131cdd75cf..4d816d6dc9a40 100644 --- a/articles/active-directory/develop/scenario-daemon-call-api.md +++ b/articles/active-directory/develop/scenario-daemon-call-api.md @@ -1,6 +1,5 @@ --- -title: Call a web API from a daemon app | Azure -titleSuffix: Microsoft identity platform +title: Call a web API from a daemon app description: Learn how to build a daemon app that calls a web API. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-daemon-overview.md b/articles/active-directory/develop/scenario-daemon-overview.md index a4c3f173fc62b..46926e5b763ae 100644 --- a/articles/active-directory/develop/scenario-daemon-overview.md +++ b/articles/active-directory/develop/scenario-daemon-overview.md @@ -1,6 +1,5 @@ --- -title: Build a daemon app that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Build a daemon app that calls web APIs description: Learn how to build a daemon app that calls web APIs services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-daemon-production.md b/articles/active-directory/develop/scenario-daemon-production.md index b8bffe0f11bf9..74d7aa17f06e6 100644 --- a/articles/active-directory/develop/scenario-daemon-production.md +++ b/articles/active-directory/develop/scenario-daemon-production.md @@ -1,6 +1,5 @@ --- -title: Move a daemon app that calls web APIs to production | Azure -titleSuffix: Microsoft identity platform +title: Move a daemon app that calls web APIs to production description: Learn how to move a daemon app that calls web APIs to production services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-desktop-acquire-token-device-code-flow.md b/articles/active-directory/develop/scenario-desktop-acquire-token-device-code-flow.md index 57b2d715f0d16..bd250f56c2cd0 100644 --- a/articles/active-directory/develop/scenario-desktop-acquire-token-device-code-flow.md +++ b/articles/active-directory/develop/scenario-desktop-acquire-token-device-code-flow.md @@ -1,9 +1,8 @@ --- -title: Acquire a token to call a web API using device code flow (desktop app) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API using device code flow (desktop app) description: Learn how to build a desktop app that calls web APIs to acquire a token for the app using device code flow services: active-directory -author: CelesteDG +author: Dickson-Mwendia manager: CelesteDG ms.service: active-directory @@ -11,7 +10,7 @@ ms.subservice: develop ms.topic: conceptual ms.workload: identity ms.date: 08/25/2021 -ms.author: celested +ms.author: dmwendia ms.custom: aaddev, devx-track-python #Customer intent: As an application developer, I want to know how to write a desktop app that calls web APIs by using the Microsoft identity platform. --- diff --git a/articles/active-directory/develop/scenario-desktop-acquire-token-integrated-windows-authentication.md b/articles/active-directory/develop/scenario-desktop-acquire-token-integrated-windows-authentication.md index 2c3689416bb02..830f5d2cd336b 100644 --- a/articles/active-directory/develop/scenario-desktop-acquire-token-integrated-windows-authentication.md +++ b/articles/active-directory/develop/scenario-desktop-acquire-token-integrated-windows-authentication.md @@ -1,9 +1,8 @@ --- -title: Acquire a token to call a web API using integrated Windows authentication (desktop app) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API using integrated Windows authentication (desktop app) description: Learn how to build a desktop app that calls web APIs to acquire a token for the app using integrated Windows authentication services: active-directory -author: CelesteDG +author: Dickson-Mwendia manager: CelesteDG ms.service: active-directory @@ -11,7 +10,7 @@ ms.subservice: develop ms.topic: conceptual ms.workload: identity ms.date: 08/25/2021 -ms.author: celested +ms.author: dmwendia ms.custom: aaddev, devx-track-python #Customer intent: As an application developer, I want to know how to write a desktop app that calls web APIs by using the Microsoft identity platform. --- diff --git a/articles/active-directory/develop/scenario-desktop-acquire-token-interactive.md b/articles/active-directory/develop/scenario-desktop-acquire-token-interactive.md index 012b5ed8fb2b5..e0c423137d79a 100644 --- a/articles/active-directory/develop/scenario-desktop-acquire-token-interactive.md +++ b/articles/active-directory/develop/scenario-desktop-acquire-token-interactive.md @@ -1,9 +1,8 @@ --- -title: Acquire a token to call a web API interactively (desktop app) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API interactively (desktop app) description: Learn how to build a desktop app that calls web APIs to acquire a token for the app interactively services: active-directory -author: CelesteDG +author: Dickson-Mwendia manager: CelesteDG ms.service: active-directory @@ -11,7 +10,7 @@ ms.subservice: develop ms.topic: conceptual ms.workload: identity ms.date: 08/25/2021 -ms.author: celested +ms.author: dmwendia ms.custom: aaddev, devx-track-python, has-adal-ref #Customer intent: As an application developer, I want to know how to write a desktop app that calls web APIs by using the Microsoft identity platform. --- diff --git a/articles/active-directory/develop/scenario-desktop-acquire-token-username-password.md b/articles/active-directory/develop/scenario-desktop-acquire-token-username-password.md index fafb414f36de5..2604fbc2bb0bb 100644 --- a/articles/active-directory/develop/scenario-desktop-acquire-token-username-password.md +++ b/articles/active-directory/develop/scenario-desktop-acquire-token-username-password.md @@ -1,9 +1,8 @@ --- -title: Acquire a token to call a web API using username and password (desktop app) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API using username and password (desktop app) description: Learn how to build a desktop app that calls web APIs to acquire a token for the app using username and password. services: active-directory -author: CelesteDG +author: Dickson-Mwendia manager: CelesteDG ms.service: active-directory @@ -11,7 +10,7 @@ ms.subservice: develop ms.topic: conceptual ms.workload: identity ms.date: 08/25/2021 -ms.author: celested +ms.author: dmwendia ms.custom: aaddev, devx-track-python #Customer intent: As an application developer, I want to know how to write a desktop app that calls web APIs by using the Microsoft identity platform. --- diff --git a/articles/active-directory/develop/scenario-desktop-acquire-token-wam.md b/articles/active-directory/develop/scenario-desktop-acquire-token-wam.md index 0a9ee4307c923..c1fcbd358a09f 100644 --- a/articles/active-directory/develop/scenario-desktop-acquire-token-wam.md +++ b/articles/active-directory/develop/scenario-desktop-acquire-token-wam.md @@ -1,17 +1,16 @@ --- -title: Acquire a token to call a web API using web account manager (desktop app) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API using web account manager (desktop app) description: Learn how to build a desktop app that calls web APIs to acquire a token for the app using web account manager services: active-directory -author: CelesteDG +author: Dickson-Mwendia manager: CelesteDG ms.service: active-directory ms.subservice: develop ms.topic: conceptual ms.workload: identity -ms.date: 08/25/2021 -ms.author: celested +ms.date: 06/07/2022 +ms.author: dmwendia ms.custom: aaddev, devx-track-python #Customer intent: As an application developer, I want to know how to write a desktop app that calls web APIs by using the Microsoft identity platform. --- @@ -26,13 +25,13 @@ MSAL 4.25+ supports WAM on UWP, .NET Classic, .NET Core 3.1, and .NET 5. For .NET Classic and .NET Core 3.1, WAM functionality is fully supported but you have to add a reference to [Microsoft.Identity.Client.Desktop](https://www.nuget.org/packages/Microsoft.Identity.Client.Desktop/) package, alongside MSAL, and instead of `WithBroker()`, call `.WithWindowsBroker()`. -For .NET 5, target `net5.0-windows10.0.17763.0` (or higher) and not just `net5.0`. Your app will still run on older versions of Windows if you add `7` in the csproj. MSAL will use a browser when WAM is not available. +For .NET 5, target `net5.0-windows10.0.17763.0` (or higher) and not just `net5.0`. Your app will still run on older versions of Windows if you add `7` in the csproj. MSAL will use a browser when WAM isn't available. ## WAM value proposition Using an authentication broker such as WAM has numerous benefits. -- Enhanced security (your app does not have to manage the powerful refresh token) +- Enhanced security (your app doesn't have to manage the powerful refresh token) - Better support for Windows Hello, Conditional Access and FIDO keys - Integration with Windows' "Email and Accounts" view - Better Single Sign-On (users don't have to reenter passwords) @@ -40,8 +39,8 @@ Using an authentication broker such as WAM has numerous benefits. ## WAM limitations -- B2C and ADFS authorities are not supported. MSAL will fallback to a browser. -- Available on Win10+ and Win Server 2019+. On Mac, Linux and earlier Windows MSAL will fallback to a browser. +- B2C and ADFS authorities aren't supported. MSAL will fall back to a browser. +- Available on Win10+ and Win Server 2019+. On Mac, Linux, and earlier versions of Windows, MSAL will fall back to a browser. - Not available on Xbox. ## WAM calling pattern @@ -79,11 +78,11 @@ catch (MsalUiRequiredException) // no change in the pattern } ``` -Call `.WithBroker(true)`. If a broker is not present (e.g. Win8.1, Mac, or Linux), then MSAL will fallback to a browser! Redirect URI rules apply to the browser. +Call `.WithBroker(true)`. If a broker isn't present (for example, Win8.1, Mac, or Linux), then MSAL will fall back to a browser. Redirect URI rules apply to the browser. ## Redirect URI -WAM redirect URIs do not need to be configured in MSAL, but they must be configured in the app registration. +WAM redirect URIs don't need to be configured in MSAL, but they must be configured in the app registration. ### Win32 (.NET framework / .NET 5) @@ -102,13 +101,13 @@ ms-appx-web://microsoft.aad.brokerplugin/{client_id} ## Token cache persistence -It's important to persist MSAL's token cache because MSAL needs to save internal WAM account IDs there. Without it, restarting the app means that `GetAccounts` API will miss some of the accounts. Note that on UWP, MSAL knows where to save the token cache. +It's important to persist MSAL's token cache because MSAL needs to save internal WAM account IDs there. Without it, restarting the app means that `GetAccounts` API will miss some of the accounts. On UWP, MSAL knows where to save the token cache. ## GetAccounts `GetAccounts` returns accounts of users who have previously logged in interactively into the app. -In addition to this, WAM can list the OS-wide Work and School accounts configured in Windows (for Win32 apps but not for UWP apps). To opt-into this feature, set `ListWindowsWorkAndSchoolAccounts` in `WindowsBrokerOptions` to **true**. You can enable it as below. +In addition, WAM can list the OS-wide Work and School accounts configured in Windows (for Win32 apps but not for UWP apps). To opt-into this feature, set `ListWindowsWorkAndSchoolAccounts` in `WindowsBrokerOptions` to **true**. You can enable it as below. ```csharp .WithWindowsBrokerOptions(new WindowsBrokerOptions() @@ -122,13 +121,13 @@ In addition to this, WAM can list the OS-wide Work and School accounts configure ``` >[!NOTE] -> Microsoft (i.e. outlook.com etc.) accounts will not be listed in Win32 nor UWP for privacy reasons. +> Microsoft (outlook.com etc.) accounts will not be listed in Win32 nor UWP for privacy reasons. Applications cannot remove accounts from Windows! ## RemoveAsync -- Removes all account information from MSAL's token cache (this includes MSA - i.e. personal accounts - account info and other account information copied by MSAL into its cache). +- Removes all account information from MSAL's token cache (this includes MSA, that is, personal accounts information copied by MSAL into its cache). - Removes app-only (not OS-wide) accounts. >[!NOTE] @@ -136,22 +135,22 @@ Applications cannot remove accounts from Windows! ## Other considerations -- WAM's interactive operations require being on the UI thread. MSAL throws a meaningful exception when not on UI thread. This does NOT apply to console apps. +- WAM's interactive operations require being on the UI thread. MSAL throws a meaningful exception when not on UI thread. This doesn't apply to console apps. - `WithAccount` provides an accelerated authentication experience if the MSAL account was originally obtained via WAM, or, WAM can find a work and school account in Windows. -- WAM is not able to pre-populate the username field with a login hint, unless a Work and School account with the same username is found in Windows. +- WAM isn't able to pre-populate the username field with a login hint, unless a Work and School account with the same username is found in Windows. - If WAM is unable to offer an accelerated authentication experience, it will show an account picker. Users can add new accounts. !["WAM account picker"](media/scenario-desktop-acquire-token-wam/wam-account-picker.png) -- New accounts are automatically remembered by Windows. Work and School have the option of joining the organization's directory or opting out completely, in which case the account will not appear under "Email & Accounts". Microsoft accounts are automatically added to Windows. Apps cannot list these accounts programmatically (but only through the Account Picker). +- New accounts are automatically remembered by Windows. Work and School have the option of joining the organization's directory or opting out completely, in which case the account won't appear under "Email & Accounts". Microsoft accounts are automatically added to Windows. Apps can't list these accounts programmatically (but only through the Account Picker). ## Troubleshooting -### "Either the user cancelled the authentication or the WAM Account Picker crashed because the app is running in an elevated process" error message +### "Either the user canceled the authentication or the WAM Account Picker crashed because the app is running in an elevated process" error message When an app that uses MSAL is run as an elevated process, some of these calls within WAM may fail due to different process security levels. Internally MSAL.NET uses native Windows methods ([COM](/windows/win32/com/the-component-object-model)) to integrate with WAM. Starting with version 4.32.0, MSAL will display a descriptive error message when it detects that the app process is elevated and WAM returned no accounts. -One solution is to not run the app as elevated, if possible. Another solution is for the app developer to call `WindowsNativeUtils.InitializeProcessSecurity` method when the app starts up. This will set the security of the processes used by WAM to the same levels. See [this sample app](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/master/tests/devapps/WAM/NetCoreWinFormsWam/Program.cs#L18-L21) for an example. However, note, that this solution is not guaranteed to succeed to due external factors like the underlying CLR behavior. In that case, an `MsalClientException` will be thrown. See issue [#2560](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/2560) for additional information. +One solution is to not run the app as elevated, if possible. Another solution is for the app developer to call `WindowsNativeUtils.InitializeProcessSecurity` method when the app starts up. This will set the security of the processes used by WAM to the same levels. See [this sample app](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/master/tests/devapps/WAM/NetCoreWinFormsWam/Program.cs#L18-L21) for an example. However, note, that this solution isn't guaranteed to succeed to due external factors like the underlying CLR behavior. In that case, an `MsalClientException` will be thrown. For more information, see issue [#2560](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/2560). ### "WAM Account Picker did not return an account" error message diff --git a/articles/active-directory/develop/scenario-desktop-acquire-token.md b/articles/active-directory/develop/scenario-desktop-acquire-token.md index 043b1cb3caa8f..80155c315bbab 100644 --- a/articles/active-directory/develop/scenario-desktop-acquire-token.md +++ b/articles/active-directory/develop/scenario-desktop-acquire-token.md @@ -1,9 +1,8 @@ --- -title: Acquire a token to call a web API (desktop app) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API (desktop app) description: Learn how to build a desktop app that calls web APIs to acquire a token for the app services: active-directory -author: CelesteDG +author: Dickson-Mwendia manager: CelesteDG ms.service: active-directory @@ -11,7 +10,7 @@ ms.subservice: develop ms.topic: conceptual ms.workload: identity ms.date: 08/25/2021 -ms.author: celested +ms.author: dmwendia ms.custom: aaddev, devx-track-python, has-adal-ref #Customer intent: As an application developer, I want to know how to write a desktop app that calls web APIs by using the Microsoft identity platform. --- @@ -176,7 +175,7 @@ let accounts = await msalTokenCache.getAllAccounts(); const tokenRequest = { code: response["authorization_code"], - codeVerifier: verifier // PKCE Code Verifier + codeVerifier: verifier, // PKCE Code Verifier redirectUri: "your_redirect_uri", scopes: ["User.Read"], }; diff --git a/articles/active-directory/develop/scenario-desktop-app-configuration.md b/articles/active-directory/develop/scenario-desktop-app-configuration.md index a16007776342b..1a6c8b7dd2e21 100644 --- a/articles/active-directory/develop/scenario-desktop-app-configuration.md +++ b/articles/active-directory/develop/scenario-desktop-app-configuration.md @@ -1,6 +1,5 @@ --- -title: Configure desktop apps that call web APIs | Azure -titleSuffix: Microsoft identity platform +title: Configure desktop apps that call web APIs description: Learn how to configure the code of a desktop app that calls web APIs services: active-directory author: jmprieur @@ -240,51 +239,21 @@ if let application = try? MSALPublicClientApplication(configuration: config) { / # [Node.js](#tab/nodejs) -Configuration parameters can be loaded from many sources, like a JSON file or from environment variables. Below, an *.env* file is used. +Configuration parameters can be loaded from many sources, like a JavaScript file or from environment variables. Below, an *authConfig.js* file is used. -```Text -# Credentials -CLIENT_ID=Enter_the_Application_Id_Here -TENANT_ID=Enter_the_Tenant_Info_Here +:::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/authConfig.js"::: -# Configuration -REDIRECT_URI=msal://redirect - -# Endpoints -AAD_ENDPOINT_HOST=Enter_the_Cloud_Instance_Id_Here -GRAPH_ENDPOINT_HOST=Enter_the_Graph_Endpoint_Here - -# RESOURCES -GRAPH_ME_ENDPOINT=v1.0/me -GRAPH_MAIL_ENDPOINT=v1.0/me/messages - -# SCOPES -GRAPH_SCOPES=User.Read Mail.Read -``` - -Load the *.env* file to environment variables. MSAL Node can be initialized minimally as below. See the available [configuration options](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/configuration.md). +Import the configuration object from *authConfig.js* file. MSAL Node can be initialized minimally as below. See the available [configuration options](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/configuration.md). ```JavaScript const { PublicClientApplication } = require('@azure/msal-node'); +const { msalConfig } = require('./authConfig') -const MSAL_CONFIG = { - auth: { - clientId: process.env.CLIENT_ID, - authority: `${process.env.AAD_ENDPOINT_HOST}${process.env.TENANT_ID}`, - redirectUri: process.env.REDIRECT_URI, - }, - system: { - loggerOptions: { - loggerCallback(loglevel, message, containsPii) { - console.log(message); - }, - piiLoggingEnabled: false, - logLevel: LogLevel.Verbose, - } - } -}; - -clientApplication = new PublicClientApplication(MSAL_CONFIG); +/** +* Initialize a public client application. For more information, visit: +* https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/initialize-public-client-application.md +*/ +clientApplication = new PublicClientApplication(msalConfig); ``` # [Python](#tab/python) diff --git a/articles/active-directory/develop/scenario-desktop-app-registration.md b/articles/active-directory/develop/scenario-desktop-app-registration.md index 2fff477c02b6a..f04089267b154 100644 --- a/articles/active-directory/develop/scenario-desktop-app-registration.md +++ b/articles/active-directory/develop/scenario-desktop-app-registration.md @@ -1,6 +1,5 @@ --- -title: Register desktop apps that call web APIs | Azure -titleSuffix: Microsoft identity platform +title: Register desktop apps that call web APIs description: Learn how to build a desktop app that calls web APIs (app registration) services: active-directory author: jmprieur @@ -52,7 +51,7 @@ Specify the redirect URI for your app by [configuring the platform settings](qui > As a security best practice, we recommend explicitly setting `https://login.microsoftonline.com/common/oauth2/nativeclient` or `http://localhost` as the redirect URI. Some authentication libraries like MSAL.NET use a default value of `urn:ietf:wg:oauth:2.0:oob` when no other redirect URI is specified, which is not recommended. This default will be updated as a breaking change in the next major release. - If you build a native Objective-C or Swift app for macOS, register the redirect URI based on your application's bundle identifier in the following format: `msauth.://auth`. Replace `` with your application's bundle identifier. -- If you build a Node.js Electron app, use a custom file protocol instead of a regular web (https://) redirect URI in order to handle the redirection step of the authorization flow, for instance `msal://redirect`. The custom file protocol name shouldn't be obvious to guess and should follow the suggestions in the [OAuth2.0 specification for Native Apps](https://tools.ietf.org/html/rfc8252#section-7.1). +- If you build a Node.js Electron app, use a custom string protocol instead of a regular web (https://) redirect URI in order to handle the redirection step of the authorization flow, for instance `msal{Your_Application/Client_Id}://auth` (e.g. *msalfa29b4c9-7675-4b61-8a0a-bf7b2b4fda91://auth*). The custom string protocol name shouldn't be obvious to guess and should follow the suggestions in the [OAuth2.0 specification for Native Apps](https://tools.ietf.org/html/rfc8252#section-7.1). - If your app uses only integrated Windows authentication or a username and a password, you don't need to register a redirect URI for your application. These flows do a round trip to the Microsoft identity platform v2.0 endpoint. Your application won't be called back on any specific URI. - To distinguish [device code flow](scenario-desktop-acquire-token-device-code-flow.md), [integrated Windows authentication](scenario-desktop-acquire-token-integrated-windows-authentication.md), and a [username and a password](scenario-desktop-acquire-token-username-password.md) from a confidential client application using a client credential flow used in [daemon applications](scenario-daemon-overview.md), none of which requires a redirect URI, configure it as a public client application. To achieve this configuration: diff --git a/articles/active-directory/develop/scenario-desktop-call-api.md b/articles/active-directory/develop/scenario-desktop-call-api.md index 80571ffdce460..b9f92c62552b1 100644 --- a/articles/active-directory/develop/scenario-desktop-call-api.md +++ b/articles/active-directory/develop/scenario-desktop-call-api.md @@ -1,6 +1,5 @@ --- -title: Call web APIs from a desktop app | Azure -titleSuffix: Microsoft identity platform +title: Call web APIs from a desktop app description: Learn how to build a desktop app that calls web APIs services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-desktop-overview.md b/articles/active-directory/develop/scenario-desktop-overview.md index 26b34410db114..69cfc7631a763 100644 --- a/articles/active-directory/develop/scenario-desktop-overview.md +++ b/articles/active-directory/develop/scenario-desktop-overview.md @@ -1,6 +1,5 @@ --- -title: Build a desktop app that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Build a desktop app that calls web APIs description: Learn how to build a desktop app that calls web APIs (overview) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-desktop-production.md b/articles/active-directory/develop/scenario-desktop-production.md index 713097e7fd6e3..912e53954d1e5 100644 --- a/articles/active-directory/develop/scenario-desktop-production.md +++ b/articles/active-directory/develop/scenario-desktop-production.md @@ -1,6 +1,5 @@ --- -title: Move desktop app calling web APIs to production | Azure -titleSuffix: Microsoft identity platform +title: Move desktop app calling web APIs to production description: Learn how to move a desktop app that calls web APIs to production services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-mobile-acquire-token.md b/articles/active-directory/develop/scenario-mobile-acquire-token.md index 0bf8d14e23ad7..9928a1fec46d8 100644 --- a/articles/active-directory/develop/scenario-mobile-acquire-token.md +++ b/articles/active-directory/develop/scenario-mobile-acquire-token.md @@ -1,6 +1,5 @@ --- -title: Acquire a token to call a web API (mobile apps) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API (mobile apps) description: Learn how to build a mobile app that calls web APIs. (Get a token for the app.) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-mobile-app-configuration.md b/articles/active-directory/develop/scenario-mobile-app-configuration.md index 82ca9f06abab3..55616d1973da7 100644 --- a/articles/active-directory/develop/scenario-mobile-app-configuration.md +++ b/articles/active-directory/develop/scenario-mobile-app-configuration.md @@ -1,6 +1,5 @@ --- -title: Configure mobile apps that call web APIs | Azure -titleSuffix: Microsoft identity platform +title: Configure mobile apps that call web APIs description: Learn how to configure your mobile app's code to call a web API services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-mobile-app-registration.md b/articles/active-directory/develop/scenario-mobile-app-registration.md index d816c0cacbceb..d5c7797aef7d0 100644 --- a/articles/active-directory/develop/scenario-mobile-app-registration.md +++ b/articles/active-directory/develop/scenario-mobile-app-registration.md @@ -1,6 +1,5 @@ --- -title: Register mobile apps that call web APIs | Azure -titleSuffix: Microsoft identity platform +title: Register mobile apps that call web APIs description: Learn how to build a mobile app that calls web APIs (app's registration) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-mobile-call-api.md b/articles/active-directory/develop/scenario-mobile-call-api.md index a40c6cbe820ff..8759d006553a3 100644 --- a/articles/active-directory/develop/scenario-mobile-call-api.md +++ b/articles/active-directory/develop/scenario-mobile-call-api.md @@ -1,6 +1,5 @@ --- -title: Call a web API from a mobile app | Azure -titleSuffix: Microsoft identity platform +title: Call a web API from a mobile app description: Learn how to build a mobile app that calls web APIs. (Call a web API.) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-mobile-overview.md b/articles/active-directory/develop/scenario-mobile-overview.md index 3a7e97593a324..074db9ee17027 100644 --- a/articles/active-directory/develop/scenario-mobile-overview.md +++ b/articles/active-directory/develop/scenario-mobile-overview.md @@ -1,6 +1,5 @@ --- -title: Build a mobile app that calls web APIs | Azure -titleSuffix: Microsoft identity platform | Azure +title: Build a mobile app that calls web APIs description: Learn how to build a mobile app that calls web APIs (overview) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-mobile-production.md b/articles/active-directory/develop/scenario-mobile-production.md index 616b1ad8ceb5a..2a24704d3c1fd 100644 --- a/articles/active-directory/develop/scenario-mobile-production.md +++ b/articles/active-directory/develop/scenario-mobile-production.md @@ -1,6 +1,5 @@ --- -title: Prepare mobile app-calling web APIs for production | Azure -titleSuffix: Microsoft identity platform +title: Prepare mobile app-calling web APIs for production description: Learn how to build a mobile app that calls web APIs. (Prepare apps for production.) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-protected-web-api-app-configuration.md b/articles/active-directory/develop/scenario-protected-web-api-app-configuration.md index 7493b06bcbcdb..3e5fdcd4855b3 100644 --- a/articles/active-directory/develop/scenario-protected-web-api-app-configuration.md +++ b/articles/active-directory/develop/scenario-protected-web-api-app-configuration.md @@ -117,7 +117,7 @@ You can create a web API from scratch by using Microsoft.Identity.Web project te #### Starting from an existing ASP.NET Core 3.1 application -ASP.NET Core 3.1 uses the Microsoft.AspNetCore.AzureAD.UI library. The middleware is initialized in the Startup.cs file. +ASP.NET Core 3.1 uses the Microsoft.AspNetCore.Authentication.JwtBearer library. The middleware is initialized in the Startup.cs file. ```csharp using Microsoft.AspNetCore.Authentication.JwtBearer; diff --git a/articles/active-directory/develop/scenario-protected-web-api-app-registration.md b/articles/active-directory/develop/scenario-protected-web-api-app-registration.md index 58fc2c6a5201d..c9524452874e0 100644 --- a/articles/active-directory/develop/scenario-protected-web-api-app-registration.md +++ b/articles/active-directory/develop/scenario-protected-web-api-app-registration.md @@ -1,6 +1,5 @@ --- -title: Protected web API app registration | Azure -titleSuffix: Microsoft identity platform +title: Protected web API app registration description: Learn how to build a protected web API and the information you need to register the app. author: jmprieur manager: CelesteDG diff --git a/articles/active-directory/develop/scenario-protected-web-api-overview.md b/articles/active-directory/develop/scenario-protected-web-api-overview.md index b8672d043d39b..a3a7fe284792d 100644 --- a/articles/active-directory/develop/scenario-protected-web-api-overview.md +++ b/articles/active-directory/develop/scenario-protected-web-api-overview.md @@ -1,6 +1,5 @@ --- title: Protected web API - overview -titleSuffix: Microsoft identity platform description: Learn how to build a protected web API (overview). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-protected-web-api-production.md b/articles/active-directory/develop/scenario-protected-web-api-production.md index feee2340de35c..32623abf5ac93 100644 --- a/articles/active-directory/develop/scenario-protected-web-api-production.md +++ b/articles/active-directory/develop/scenario-protected-web-api-production.md @@ -1,6 +1,5 @@ --- -title: Move a protected web API to production | Azure -titleSuffix: Microsoft identity platform +title: Move a protected web API to production description: Learn how to build a protected web API (move to production). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-spa-acquire-token.md b/articles/active-directory/develop/scenario-spa-acquire-token.md index 87cc6370f7091..2427bb313c635 100644 --- a/articles/active-directory/develop/scenario-spa-acquire-token.md +++ b/articles/active-directory/develop/scenario-spa-acquire-token.md @@ -1,6 +1,5 @@ --- -title: Acquire a token to call a web API (single-page apps) | Azure -titleSuffix: Microsoft identity platform +title: Acquire a token to call a web API (single-page apps) description: Learn how to build a single-page application (acquire a token to call an API) services: active-directory author: negoe @@ -154,6 +153,8 @@ For success and failure of the silent token acquisition, MSAL Angular provides e import { MsalBroadcastService } from '@azure/msal-angular'; import { EventMessage, EventType } from '@azure/msal-browser'; +import { filter, Subject, takeUntil } from 'rxjs'; + // In app.component.ts export class AppComponent implements OnInit { private readonly _destroying$ = new Subject(); @@ -226,7 +227,7 @@ For success and failure of the silent token acquisition, MSAL Angular provides c ```javascript // In app.component.ts ngOnInit() { - this.subscription= this.broadcastService.subscribe("msal:acquireTokenFailure", (payload) => { + this.subscription = this.broadcastService.subscribe("msal:acquireTokenFailure", (payload) => { }); } ngOnDestroy() { @@ -394,15 +395,18 @@ You can use optional claims for the following purposes: To request optional claims in `IdToken`, you can send a stringified claims object to the `claimsRequest` field of the `AuthenticationParameters.ts` class. ```javascript -"optionalClaims": - { - "idToken": [ - { - "name": "auth_time", - "essential": true - } - ], - +var claims = { + optionalClaims: + { + idToken: [ + { + name: "auth_time", + essential: true + } + ], + } +}; + var request = { scopes: ["user.read"], claimsRequest: JSON.stringify(claims) diff --git a/articles/active-directory/develop/scenario-spa-app-configuration.md b/articles/active-directory/develop/scenario-spa-app-configuration.md index 7f0eeb9c23dbf..c4cf6239587a3 100644 --- a/articles/active-directory/develop/scenario-spa-app-configuration.md +++ b/articles/active-directory/develop/scenario-spa-app-configuration.md @@ -1,6 +1,5 @@ --- -title: Configure single-page app | Azure -titleSuffix: Microsoft identity platform +title: Configure single-page app description: Learn how to build a single-page application (app's code configuration) services: active-directory author: mmacy diff --git a/articles/active-directory/develop/scenario-spa-app-registration.md b/articles/active-directory/develop/scenario-spa-app-registration.md index bd6b5bef1e6d0..530b20f746ac5 100644 --- a/articles/active-directory/develop/scenario-spa-app-registration.md +++ b/articles/active-directory/develop/scenario-spa-app-registration.md @@ -1,6 +1,5 @@ --- -title: Register single-page applications (SPA) | Azure -titleSuffix: Microsoft identity platform +title: Register single-page applications (SPA) description: Learn how to build a single-page application (app registration) services: active-directory author: mmacy diff --git a/articles/active-directory/develop/scenario-spa-call-api.md b/articles/active-directory/develop/scenario-spa-call-api.md index 38ae1bfc64014..48cd2bc2775e9 100644 --- a/articles/active-directory/develop/scenario-spa-call-api.md +++ b/articles/active-directory/develop/scenario-spa-call-api.md @@ -1,6 +1,5 @@ --- title: Build single-page app calling a web API -titleSuffix: Microsoft identity platform description: Learn how to build a single-page application that calls a web API services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/scenario-spa-overview.md b/articles/active-directory/develop/scenario-spa-overview.md index 9e4405c519499..94b8152631877 100644 --- a/articles/active-directory/develop/scenario-spa-overview.md +++ b/articles/active-directory/develop/scenario-spa-overview.md @@ -1,6 +1,5 @@ --- title: JavaScript single-page app scenario -titleSuffix: Microsoft identity platform description: Learn how to build a single-page application (scenario overview) by using the Microsoft identity platform. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/scenario-spa-production.md b/articles/active-directory/develop/scenario-spa-production.md index c8f2c1f243590..0991dc4634576 100644 --- a/articles/active-directory/develop/scenario-spa-production.md +++ b/articles/active-directory/develop/scenario-spa-production.md @@ -1,6 +1,5 @@ --- title: Move single-page app to production -titleSuffix: Microsoft identity platform description: Learn how to build a single-page application (move to production) services: active-directory author: mmacy diff --git a/articles/active-directory/develop/scenario-spa-sign-in.md b/articles/active-directory/develop/scenario-spa-sign-in.md index 41406d638a487..c4c360064e928 100644 --- a/articles/active-directory/develop/scenario-spa-sign-in.md +++ b/articles/active-directory/develop/scenario-spa-sign-in.md @@ -1,6 +1,5 @@ --- title: Single-page app sign-in & sign-out -titleSuffix: Microsoft identity platform description: Learn how to build a single-page application (sign-in) services: active-directory author: mmacy @@ -28,7 +27,7 @@ Before you can get tokens to access APIs in your application, you need an authen You can also optionally pass the scopes of the APIs for which you need the user to consent at the time of sign-in. > [!NOTE] -> If your application already has access to an authenticated user context or ID token, you can skip the login step and directly acquire tokens. For details, see [SSO without MSAL.js login](msal-js-sso.md#sso-without-msaljs-login). +> If your application already has access to an authenticated user context or ID token, you can skip the login step and directly acquire tokens. For details, see [SSO with user hint](msal-js-sso.md#with-user-hint). ## Choosing between a pop-up or redirect experience diff --git a/articles/active-directory/develop/scenario-web-api-call-api-acquire-token.md b/articles/active-directory/develop/scenario-web-api-call-api-acquire-token.md index 42d6f3dbf9f2c..24610c0ca643f 100644 --- a/articles/active-directory/develop/scenario-web-api-call-api-acquire-token.md +++ b/articles/active-directory/develop/scenario-web-api-call-api-acquire-token.md @@ -1,6 +1,5 @@ --- -title: Get a token for a web API that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Get a token for a web API that calls web APIs description: Learn how to build a web API that calls web APIs that require acquiring a token for the app. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-api-call-api-app-configuration.md b/articles/active-directory/develop/scenario-web-api-call-api-app-configuration.md index c8ec084eadb25..1f5a3e4e20d88 100644 --- a/articles/active-directory/develop/scenario-web-api-call-api-app-configuration.md +++ b/articles/active-directory/develop/scenario-web-api-call-api-app-configuration.md @@ -1,6 +1,5 @@ --- -title: Configure a web API that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Configure a web API that calls web APIs description: Learn how to build a web API that calls web APIs (app's code configuration) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-api-call-api-app-registration.md b/articles/active-directory/develop/scenario-web-api-call-api-app-registration.md index 0256bb2989c9b..09b55e20c597a 100644 --- a/articles/active-directory/develop/scenario-web-api-call-api-app-registration.md +++ b/articles/active-directory/develop/scenario-web-api-call-api-app-registration.md @@ -1,6 +1,5 @@ --- -title: Register a web API that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Register a web API that calls web APIs description: Learn how to build a web API that calls downstream web APIs (app registration). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-api-call-api-call-api.md b/articles/active-directory/develop/scenario-web-api-call-api-call-api.md index edd16f04647a0..bdf8e89babb8a 100644 --- a/articles/active-directory/develop/scenario-web-api-call-api-call-api.md +++ b/articles/active-directory/develop/scenario-web-api-call-api-call-api.md @@ -1,6 +1,5 @@ --- -title: Web API that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Web API that calls web APIs description: Learn how to build a web API that calls web APIs. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-api-call-api-overview.md b/articles/active-directory/develop/scenario-web-api-call-api-overview.md index 6128388f34991..f71ed5e85f045 100644 --- a/articles/active-directory/develop/scenario-web-api-call-api-overview.md +++ b/articles/active-directory/develop/scenario-web-api-call-api-overview.md @@ -1,6 +1,5 @@ --- -title: Build a web API that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Build a web API that calls web APIs description: Learn how to build a web API that calls downstream web APIs (overview). services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-api-call-api-production.md b/articles/active-directory/develop/scenario-web-api-call-api-production.md index 39e29b439dcb0..fe89a359848d5 100644 --- a/articles/active-directory/develop/scenario-web-api-call-api-production.md +++ b/articles/active-directory/develop/scenario-web-api-call-api-production.md @@ -1,6 +1,5 @@ --- -title: Move web API calling web APIs to production | Azure -titleSuffix: Microsoft identity platform +title: Move web API calling web APIs to production description: Learn how to move a web API that calls web APIs to production. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-acquire-token.md b/articles/active-directory/develop/scenario-web-app-call-api-acquire-token.md index f1e3e0501ade4..2cfc69ae7af05 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-acquire-token.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-acquire-token.md @@ -1,6 +1,5 @@ --- -title: Get a token in a web app that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Get a token in a web app that calls web APIs description: Learn how to acquire a token for a web app that calls web APIs services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-app-configuration.md b/articles/active-directory/develop/scenario-web-app-call-api-app-configuration.md index ed9b7c319ee07..fa25cb72c1da1 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-app-configuration.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-app-configuration.md @@ -1,6 +1,5 @@ --- -title: Configure a web app that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Configure a web app that calls web APIs description: Learn how to configure the code of a web app that calls web APIs services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-app-registration.md b/articles/active-directory/develop/scenario-web-app-call-api-app-registration.md index e206175741f7d..733d87599f534 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-app-registration.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-app-registration.md @@ -1,6 +1,5 @@ --- -title: Register a web app that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Register a web app that calls web APIs description: Learn how to register a web app that calls web APIs services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-call-api.md b/articles/active-directory/develop/scenario-web-app-call-api-call-api.md index 6caa7be1bc296..587fadc795154 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-call-api.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-call-api.md @@ -1,6 +1,5 @@ --- -title: Call a web api from a web app | Azure -titleSuffix: Microsoft identity platform +title: Call a web api from a web app description: Learn how to build a web app that calls web APIs (calling a protected web API) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-overview.md b/articles/active-directory/develop/scenario-web-app-call-api-overview.md index 60102d5655995..2d7eacf1c6f68 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-overview.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-overview.md @@ -1,6 +1,5 @@ --- -title: Build a web app that authenticates users and calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Build a web app that authenticates users and calls web APIs description: Learn how to build a web app that authenticates users and calls web APIs (overview) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-production.md b/articles/active-directory/develop/scenario-web-app-call-api-production.md index 7d90bfa977c58..a61457305ccf2 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-production.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-production.md @@ -1,6 +1,5 @@ --- -title: Move to production a web app that calls web APIs | Azure -titleSuffix: Microsoft identity platform +title: Move to production a web app that calls web APIs description: Learn how to move to production a web app that calls web APIs. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-call-api-sign-in.md b/articles/active-directory/develop/scenario-web-app-call-api-sign-in.md index a307096168bcc..b7b729bd536f8 100644 --- a/articles/active-directory/develop/scenario-web-app-call-api-sign-in.md +++ b/articles/active-directory/develop/scenario-web-app-call-api-sign-in.md @@ -1,6 +1,5 @@ --- -title: Remove accounts from the token cache on sign-out | Azure -titleSuffix: Microsoft identity platform +title: Remove accounts from the token cache on sign-out description: Learn how to remove an account from the token cache on sign-out services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md b/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md index b9a59a676984a..cf903ee7b3887 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md @@ -1,6 +1,5 @@ --- -title: Configure a web app that signs in users | Azure -titleSuffix: Microsoft identity platform +title: Configure a web app that signs in users description: Learn how to build a web app that signs in users (code configuration) services: active-directory author: jmprieur @@ -180,31 +179,15 @@ In the Azure portal, the reply URIs that you register on the **Authentication** # [Node.js](#tab/nodejs) -Here, the configuration parameters reside in `index.js` +Here, the configuration parameters reside in *.env* as environment variables: -```javascript +:::code language="text" source="~/ms-identity-node/App/.env"::: -const REDIRECT_URI = "http://localhost:3000/redirect"; +These parameters are used to create a configuration object in *authConfig.js* file, which will eventually be used to initialize MSAL Node: -const config = { - auth: { - clientId: "Enter_the_Application_Id_Here", - authority: "https://login.microsoftonline.com/Enter_the_Tenant_Info_Here/", - clientSecret: "Enter_the_Client_Secret_Here" - }, - system: { - loggerOptions: { - loggerCallback(loglevel, message, containsPii) { - console.log(message); - }, - piiLoggingEnabled: false, - logLevel: msal.LogLevel.Verbose, - } - } -}; -``` +:::code language="js" source="~/ms-identity-node/App/authConfig.js"::: -In the Azure portal, the reply URIs that you register on the Authentication page for your application need to match the redirectUri instances that the application defines (`http://localhost:3000/redirect`). +In the Azure portal, the reply URIs that you register on the Authentication page for your application need to match the redirectUri instances that the application defines (`http://localhost:3000/auth/redirect`). > [!NOTE] > This quickstart proposes to store the client secret in the configuration file for simplicity. In your production app, you'd want to use other ways to store your secret, such as a key vault or an environment variable. @@ -350,12 +333,9 @@ For details about the authorization code flow that this method triggers, see the # [Node.js](#tab/nodejs) -```javascript -const msal = require('@azure/msal-node'); +Node sample the Express framework. MSAL is initialized in *auth* route handler: -// Create msal application object -const cca = new msal.ConfidentialClientApplication(config); -``` +:::code language="js" source="~/ms-identity-node/App/routes/auth.js" range="6-16"::: # [Python](#tab/python) diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md b/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md index 2e7f4999fc082..a8966172ab92c 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md @@ -1,6 +1,5 @@ --- -title: Register a web app that signs in users | Azure -titleSuffix: Microsoft identity platform +title: Register a web app that signs in users description: Learn how to register a web app that signs in users services: active-directory author: jmprieur @@ -96,8 +95,8 @@ By default, the sample uses: 1. When the **Register an application page** appears, enter your application's registration information: 1. Enter a **Name** for your application, for example `node-webapp`. Users of your app might see this name, and you can change it later. - 1. Change **Supported account types** to **Accounts in any organizational directory and personal Microsoft accounts (e.g. Skype, Xbox, Outlook.com)**. - 1. In the **Redirect URI (optional)** section, select **Web** in the combo box and enter the following redirect URI: `http://localhost:3000/redirect`. + 1. Change **Supported account types** to **Accounts in this organizational directory only**. + 1. In the **Redirect URI (optional)** section, select **Web** in the combo box and enter the following redirect URI: `http://localhost:3000/auth/redirect`. 1. Select **Register** to create the application. 1. On the app's **Overview** page, find the **Application (client) ID** value and record it for later. You'll need it to configure the configuration file for this project. 1. Under **Manage**, select **Certificates & secrets**. diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-overview.md b/articles/active-directory/develop/scenario-web-app-sign-user-overview.md index fd7700143ebf4..5f25eb4c08871 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-overview.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-overview.md @@ -1,6 +1,5 @@ --- -title: Sign in users from a Web app | Azure -titleSuffix: Microsoft identity platform +title: Sign in users from a Web app description: Learn how to build a web app that signs in users (overview) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-production.md b/articles/active-directory/develop/scenario-web-app-sign-user-production.md index 4c58c6e963eb0..2776555c01dda 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-production.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-production.md @@ -1,6 +1,5 @@ --- -title: Move web app that signs in users to production | Azure -titleSuffix: Microsoft identity platform +title: Move web app that signs in users to production description: Learn how to build a web app that signs in users (move to production) services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md b/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md index 9e70e56361a44..c53d1de74057d 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md @@ -1,6 +1,5 @@ --- -title: Write a web app that signs in/out users | Azure -titleSuffix: Microsoft identity platform +title: Write a web app that signs in/out users description: Learn how to build a web app that signs in/out users services: active-directory author: jmprieur @@ -72,7 +71,7 @@ else # [Java](#tab/java) -In our Java quickstart, the sign-in button is located in the [main/resources/templates/index.html](https://github.com/Azure-Samples/ms-identity-java-webapp/blob/master/msal-java-webapp-sample/src/main/resources/templates/index.html) file. +In the Java quickstart, the sign-in button is located in the [main/resources/templates/index.html](https://github.com/Azure-Samples/ms-identity-java-webapp/blob/master/msal-java-webapp-sample/src/main/resources/templates/index.html) file. ```html @@ -94,13 +93,13 @@ In our Java quickstart, the sign-in button is located in the [main/resources/tem # [Node.js](#tab/nodejs) -In the Node.js quickstart, there's no sign-in button. The code-behind automatically prompts the user for sign-in when it's reaching the root of the web app. +In the Node.js quickstart, the code for the sign-in button is located in *index.hbs* template file. -```javascript -app.get('/', (req, res) => { - // authentication logic -}); -``` +:::code language="hbs" source="~/ms-identity-node/App/views/index.hbs" range="10-11"::: + +This template is served via the main (index) route of the app: + +:::code language="js" source="~/ms-identity-node/App/routes/index.js" range="6-15"::: # [Python](#tab/python) @@ -169,40 +168,9 @@ public class AuthPageController { # [Node.js](#tab/nodejs) -Unlike other platforms, here the MSAL Node takes care of letting the user sign in from the login page. - -```javascript - -// 1st leg of auth code flow: acquire a code -app.get('/', (req, res) => { - const authCodeUrlParameters = { - scopes: ["user.read"], - redirectUri: REDIRECT_URI, - }; - - // get url to sign user in and consent to scopes needed for application - pca.getAuthCodeUrl(authCodeUrlParameters).then((response) => { - res.redirect(response); - }).catch((error) => console.log(JSON.stringify(error))); -}); - -// 2nd leg of auth code flow: exchange code for token -app.get('/redirect', (req, res) => { - const tokenRequest = { - code: req.query.code, - scopes: ["user.read"], - redirectUri: REDIRECT_URI, - }; - - pca.acquireTokenByCode(tokenRequest).then((response) => { - console.log("\nResponse: \n:", response); - res.sendStatus(200); - }).catch((error) => { - console.log(error); - res.status(500).send(error); - }); -}); -``` +When the user selects the **Sign in** link, which triggers the `/auth/signin` route, the sign-in controller takes over to authenticate the user with Microsoft identity platform. + +:::code language="js" source="~/ms-identity-node/App/routes/auth.js" range="27-107, 135-161"::: # [Python](#tab/python) @@ -355,7 +323,7 @@ In our Java quickstart, the sign-out button is located in the main/resources/tem # [Node.js](#tab/nodejs) -This sample application does not implement sign-out. +:::code language="hbs" source="~/ms-identity-node/App/views/index.hbs" range="2, 8"::: # [Python](#tab/python) @@ -431,7 +399,9 @@ In Java, sign-out is handled by calling the Microsoft identity platform `logout` # [Node.js](#tab/nodejs) -This sample application does not implement sign-out. +When the user selects the **Sign out** button, the app triggers the `/signout` route, which destroys the session and redirects the browser to Microsoft identity platform sign-out endpoint. + +:::code language="js" source="~/ms-identity-node/App/routes/auth.js" range="163-174"::: # [Python](#tab/python) @@ -479,7 +449,7 @@ In the Java quickstart, the post-logout redirect URI just displays the index.htm # [Node.js](#tab/nodejs) -This sample application does not implement sign-out. +In the Node quickstart, the post-logout redirect URI is used to redirect the browser back to sample home page after the user completes the logout process with the Microsoft identity platform. # [Python](#tab/python) @@ -494,4 +464,4 @@ If you want to learn more about sign-out, read the protocol documentation that's ## Next steps Move on to the next article in this scenario, -[Move to production](scenario-web-app-sign-user-production.md). \ No newline at end of file +[Move to production](scenario-web-app-sign-user-production.md). diff --git a/articles/active-directory/develop/secure-group-access-control.md b/articles/active-directory/develop/secure-group-access-control.md index ef8053e29c3fa..d26b1a79cf3a2 100644 --- a/articles/active-directory/develop/secure-group-access-control.md +++ b/articles/active-directory/develop/secure-group-access-control.md @@ -1,5 +1,5 @@ --- -title: Secure access control using groups in Azure AD - Microsoft identity platform +title: Secure access control using groups in Azure AD description: Learn about how groups are used to securely control access to resources in Azure AD. services: active-directory author: chrischiedo diff --git a/articles/active-directory/develop/secure-least-privileged-access.md b/articles/active-directory/develop/secure-least-privileged-access.md index 8228300a5af1a..fa98d4186d339 100644 --- a/articles/active-directory/develop/secure-least-privileged-access.md +++ b/articles/active-directory/develop/secure-least-privileged-access.md @@ -1,6 +1,5 @@ --- title: "Increase app security with the principle of least privilege" -titleSuffix: Microsoft identity platform description: Learn how the principle of least privilege can help increase the security of your application, its data, and which features of the Microsoft identity platform you can use to implement least privileged access. services: active-directory author: Chrispine-Chiedo diff --git a/articles/active-directory/develop/security-best-practices-for-app-registration.md b/articles/active-directory/develop/security-best-practices-for-app-registration.md index d1be63024f8cb..05808cdf99be5 100644 --- a/articles/active-directory/develop/security-best-practices-for-app-registration.md +++ b/articles/active-directory/develop/security-best-practices-for-app-registration.md @@ -1,5 +1,5 @@ --- -title: Best practices for Azure AD application registration configuration - Microsoft identity platform +title: Best practices for Azure AD application registration configuration description: Learn about a set of best practices and general guidance on Azure AD application registration configuration. services: active-directory author: Chrispine-Chiedo diff --git a/articles/active-directory/develop/security-tokens.md b/articles/active-directory/develop/security-tokens.md index 1ddb6269ef2b7..a16d1a944b334 100644 --- a/articles/active-directory/develop/security-tokens.md +++ b/articles/active-directory/develop/security-tokens.md @@ -1,6 +1,5 @@ --- -title: Security tokens | Azure -titleSuffix: Microsoft identity platform +title: Security tokens description: Learn about the basics of security tokens in the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/single-and-multi-tenant-apps.md b/articles/active-directory/develop/single-and-multi-tenant-apps.md index 09a80213bf1e8..716ec1a823242 100644 --- a/articles/active-directory/develop/single-and-multi-tenant-apps.md +++ b/articles/active-directory/develop/single-and-multi-tenant-apps.md @@ -1,6 +1,5 @@ --- title: Single and multi-tenant apps in Azure AD -titleSuffix: Microsoft identity platform description: Learn about the features and differences between single-tenant and multi-tenant apps in Azure AD. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/single-multi-account.md b/articles/active-directory/develop/single-multi-account.md index 4133e7b09dfc5..6b4544e7be7f8 100644 --- a/articles/active-directory/develop/single-multi-account.md +++ b/articles/active-directory/develop/single-multi-account.md @@ -1,5 +1,5 @@ --- -title: Single and multiple account public client apps | Azure +title: Single and multiple account public client apps description: An overview of single and multiple account public client apps. services: active-directory author: shoatman diff --git a/articles/active-directory/develop/single-page-app-quickstart.md b/articles/active-directory/develop/single-page-app-quickstart.md index ac6a837a28f51..d5d57010ce02f 100644 --- a/articles/active-directory/develop/single-page-app-quickstart.md +++ b/articles/active-directory/develop/single-page-app-quickstart.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Sign in users in single-page apps (SPA) with auth code | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Sign in users in single-page apps (SPA) with auth code" description: In this quickstart, learn how a JavaScript single-page application (SPA) can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow. services: active-directory author: Dickson-Mwendia diff --git a/articles/active-directory/develop/single-sign-on-macos-ios.md b/articles/active-directory/develop/single-sign-on-macos-ios.md index f2fcce9891c87..34ad1e937f83d 100644 --- a/articles/active-directory/develop/single-sign-on-macos-ios.md +++ b/articles/active-directory/develop/single-sign-on-macos-ios.md @@ -1,6 +1,5 @@ --- title: Configure SSO on macOS and iOS -titleSuffix: Microsoft identity platform description: Learn how to configure single sign on (SSO) on macOS and iOS. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/single-sign-on-saml-protocol.md b/articles/active-directory/develop/single-sign-on-saml-protocol.md index ab4b57acce478..a83322ec260c4 100644 --- a/articles/active-directory/develop/single-sign-on-saml-protocol.md +++ b/articles/active-directory/develop/single-sign-on-saml-protocol.md @@ -1,6 +1,5 @@ --- title: Azure Single Sign On SAML Protocol -titleSuffix: Microsoft identity platform description: This article describes the Single Sign-On (SSO) SAML protocol in Azure Active Directory services: active-directory documentationcenter: .net diff --git a/articles/active-directory/develop/ssl-issues.md b/articles/active-directory/develop/ssl-issues.md index f1f3251dcbcf3..2b3e5f983d20f 100644 --- a/articles/active-directory/develop/ssl-issues.md +++ b/articles/active-directory/develop/ssl-issues.md @@ -1,6 +1,5 @@ --- -title: Troubleshoot TLS/SSL issues (MSAL iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: Troubleshoot TLS/SSL issues (MSAL iOS/macOS) description: Learn what to do about various problems using TLS/SSL certificates with the MSAL.Objective-C library. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/sso-between-adal-msal-apps-macos-ios.md b/articles/active-directory/develop/sso-between-adal-msal-apps-macos-ios.md index 12cd9fd44b9cc..50cfb0fbfe9f3 100644 --- a/articles/active-directory/develop/sso-between-adal-msal-apps-macos-ios.md +++ b/articles/active-directory/develop/sso-between-adal-msal-apps-macos-ios.md @@ -1,6 +1,5 @@ --- -title: SSO between ADAL & MSAL apps (iOS/macOS) | Azure -titleSuffix: Microsoft identity platform +title: SSO between ADAL & MSAL apps (iOS/macOS) description: Learn how to share SSO between ADAL and MSAL apps services: active-directory author: mmacy diff --git a/articles/active-directory/develop/support-fido2-authentication.md b/articles/active-directory/develop/support-fido2-authentication.md index c83bd5d036d40..23224de47cadb 100644 --- a/articles/active-directory/develop/support-fido2-authentication.md +++ b/articles/active-directory/develop/support-fido2-authentication.md @@ -1,6 +1,5 @@ --- -title: Support passwordless authentication with FIDO2 keys in apps you develop | Azure -titleSuffix: Microsoft identity platform +title: Support passwordless authentication with FIDO2 keys in apps you develop description: This deployment guide explains how to support passwordless authentication with FIDO2 security keys in the applications you develop services: active-directory author: knicholasa diff --git a/articles/active-directory/develop/supported-accounts-validation.md b/articles/active-directory/develop/supported-accounts-validation.md index c09d4ff0e434e..b75d2f20fc062 100644 --- a/articles/active-directory/develop/supported-accounts-validation.md +++ b/articles/active-directory/develop/supported-accounts-validation.md @@ -1,7 +1,6 @@ --- # required metadata -title: Validation differences by supported account types | Azure -titleSuffix: Microsoft identity platform +title: Validation differences by supported account types description: Learn about the validation differences of various properties for different supported account types when registering your app with the Microsoft identity platform. author: SureshJa ms.author: sureshja diff --git a/articles/active-directory/develop/test-automate-integration-testing.md b/articles/active-directory/develop/test-automate-integration-testing.md index 4588492912612..71cfcf9ab1bef 100644 --- a/articles/active-directory/develop/test-automate-integration-testing.md +++ b/articles/active-directory/develop/test-automate-integration-testing.md @@ -1,6 +1,5 @@ --- title: Run automated integration tests -titleSuffix: Microsoft identity platform description: Learn how to run automated integration tests as a user against APIs protected by the Microsoft identity platform. Use the Resource Owner Password Credential Grant (ROPC) auth flow to sign in as a user instead of automating the interactive sign-in prompt UI. services: active-directory author: arcrowe diff --git a/articles/active-directory/develop/test-setup-environment.md b/articles/active-directory/develop/test-setup-environment.md index c3beb3e8f678a..b660cf4c08f52 100644 --- a/articles/active-directory/develop/test-setup-environment.md +++ b/articles/active-directory/develop/test-setup-environment.md @@ -1,6 +1,5 @@ --- title: Set up a test environment for your app -titleSuffix: Microsoft identity platform description: Learn how to set up an Azure Active Directory test environment so you can test your application integrated with Microsoft identity platform. Evaluate whether you need a separate tenant for testing or if you can use your production tenant. services: active-directory author: arcrowe diff --git a/articles/active-directory/develop/test-throttle-service-limits.md b/articles/active-directory/develop/test-throttle-service-limits.md index e3f4158c91c3c..53f085d0d3d95 100644 --- a/articles/active-directory/develop/test-throttle-service-limits.md +++ b/articles/active-directory/develop/test-throttle-service-limits.md @@ -1,6 +1,5 @@ --- title: Test environments, throttling, and service limits -titleSuffix: Microsoft identity platform description: Learn about the throttling and service limits to consider while deploying an Azure Active Directory test environment and testing an app integrated with the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/troubleshoot-publisher-verification.md b/articles/active-directory/develop/troubleshoot-publisher-verification.md index fbe1a575b416f..3c52a18e9da7a 100644 --- a/articles/active-directory/develop/troubleshoot-publisher-verification.md +++ b/articles/active-directory/develop/troubleshoot-publisher-verification.md @@ -1,6 +1,5 @@ --- -title: Troubleshoot publisher verification | Azure -titleSuffix: Microsoft identity platform +title: Troubleshoot publisher verification description: Describes how to troubleshoot publisher verification for the Microsoft identity platform by calling Microsoft Graph APIs. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/tutorial-blazor-server.md b/articles/active-directory/develop/tutorial-blazor-server.md index 95b134259c809..a3ef9452bae6e 100644 --- a/articles/active-directory/develop/tutorial-blazor-server.md +++ b/articles/active-directory/develop/tutorial-blazor-server.md @@ -1,6 +1,5 @@ --- -title: Tutorial - Create a Blazor Server app that uses the Microsoft identity platform for authentication | Azure -titleSuffix: Microsoft identity platform +title: Tutorial - Create a Blazor Server app that uses the Microsoft identity platform for authentication description: In this tutorial, you set up authentication using the Microsoft identity platform in a Blazor Server app. author: knicholasa diff --git a/articles/active-directory/develop/tutorial-blazor-webassembly.md b/articles/active-directory/develop/tutorial-blazor-webassembly.md index 162254945acf5..f48e1e6e8dc51 100644 --- a/articles/active-directory/develop/tutorial-blazor-webassembly.md +++ b/articles/active-directory/develop/tutorial-blazor-webassembly.md @@ -1,6 +1,5 @@ --- title: Tutorial - Sign in users and call a protected API from a Blazor WebAssembly app -titleSuffix: Microsoft identity platform description: In this tutorial, sign in users and call a protected API using the Microsoft identity platform in a Blazor WebAssembly (WASM) app. author: knicholasa diff --git a/articles/active-directory/develop/tutorial-v2-android.md b/articles/active-directory/develop/tutorial-v2-android.md index 4c83d481af3f7..16195bddb900b 100644 --- a/articles/active-directory/develop/tutorial-v2-android.md +++ b/articles/active-directory/develop/tutorial-v2-android.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create an Android app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create an Android app that uses the Microsoft identity platform for authentication" description: In this tutorial, you build an Android app that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/tutorial-v2-angular-auth-code.md b/articles/active-directory/develop/tutorial-v2-angular-auth-code.md index dba7cbf209e73..1cd3ca6015703 100644 --- a/articles/active-directory/develop/tutorial-v2-angular-auth-code.md +++ b/articles/active-directory/develop/tutorial-v2-angular-auth-code.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create an Angular app that uses the Microsoft identity platform for authentication using auth code flow | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create an Angular app that uses the Microsoft identity platform for authentication using auth code flow" description: In this tutorial, you build an Angular single-page app (SPA) using auth code flow that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. services: active-directory author: jo-arroyo diff --git a/articles/active-directory/develop/tutorial-v2-asp-webapp.md b/articles/active-directory/develop/tutorial-v2-asp-webapp.md index d7d9fb8237dcb..10fb995a5ada8 100644 --- a/articles/active-directory/develop/tutorial-v2-asp-webapp.md +++ b/articles/active-directory/develop/tutorial-v2-asp-webapp.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create an ASP.NET web app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create an ASP.NET web app that uses the Microsoft identity platform for authentication" description: In this tutorial, you build an ASP.NET web application that uses the Microsoft identity platform and OWIN middleware to enable user login. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/tutorial-v2-aspnet-daemon-web-app.md b/articles/active-directory/develop/tutorial-v2-aspnet-daemon-web-app.md index 235bddb9d58da..2eb57175f051c 100644 --- a/articles/active-directory/develop/tutorial-v2-aspnet-daemon-web-app.md +++ b/articles/active-directory/develop/tutorial-v2-aspnet-daemon-web-app.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Build a multi-tenant daemon that accesses Microsoft Graph business data | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Build a multi-tenant daemon that accesses Microsoft Graph business data" description: In this tutorial, learn how to call an ASP.NET web API protected by Azure Active Directory from a Windows desktop (WPF) application. The WPF client authenticates a user, requests an access token, and calls the web API. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/tutorial-v2-ios.md b/articles/active-directory/develop/tutorial-v2-ios.md index 657e5b2399bdc..e8f6b1bf542d8 100644 --- a/articles/active-directory/develop/tutorial-v2-ios.md +++ b/articles/active-directory/develop/tutorial-v2-ios.md @@ -1,16 +1,13 @@ --- -title: "Tutorial: Create an iOS or macOS app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform -description: In this tutorial, you build an iOS or macOS app that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. -services: active-directory +title: "Tutorial: Create an iOS or macOS app that uses the Microsoft identity platform for authentication" +description: Build an iOS or macOS app that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. author: mmacy manager: CelesteDG ms.service: active-directory ms.subservice: develop ms.topic: tutorial -ms.workload: identity -ms.date: 09/18/2020 +ms.date: 05/28/2022 ms.author: marsma ms.reviewer: oldalton ms.custom: aaddev, identityplatformtop40, has-adal-ref @@ -20,7 +17,7 @@ ms.custom: aaddev, identityplatformtop40, has-adal-ref In this tutorial, you build an iOS or macOS app that integrates with the Microsoft identity platform to sign users and get an access token to call the Microsoft Graph API. -When you've completed the guide, your application will accept sign-ins of personal Microsoft accounts (including outlook.com, live.com, and others) and work or school accounts from any company or organization that uses Azure Active Directory. This tutorial is applicable to both iOS and macOS apps. Some steps are different between the two platforms. +When you've completed the tutorial, your application will accept sign-ins of personal Microsoft accounts (including outlook.com, live.com, and others) and work or school accounts from any company or organization that uses Azure Active Directory. This tutorial is applicable to both iOS and macOS apps. Some steps are different between the two platforms. In this tutorial: @@ -75,8 +72,8 @@ If you'd like to download a completed version of the app you build in this tutor 1. Select **Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)** under **Supported account types**. 1. Select **Register**. 1. Under **Manage**, select **Authentication** > **Add a platform** > **iOS/macOS**. -1. Enter your project's Bundle ID. If you downloaded the code, this is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. -1. Select **Configure** and save the **MSAL Configuration** that appears in the **MSAL configuration** page so you can enter it when you configure your app later. +1. Enter your project's Bundle ID. If downloaded the code sample, the Bundle ID is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. +1. Select **Configure** and save the **MSAL Configuration** that appears in the **MSAL configuration** page so you can enter it when you configure your app later. 1. Select **Done**. ## Add MSAL @@ -85,7 +82,7 @@ Choose one of the following ways to install the MSAL library in your app: ### CocoaPods -1. If you're using [CocoaPods](https://cocoapods.org/), install `MSAL` by first creating an empty file called `podfile` in the same folder as your project's `.xcodeproj` file. Add the following to `podfile`: +1. If you're using [CocoaPods](https://cocoapods.org/), install `MSAL` by first creating an empty file called _podfile_ in the same folder as your project's _.xcodeproj_ file. Add the following to _podfile_: ``` use_frameworks! @@ -96,18 +93,18 @@ Choose one of the following ways to install the MSAL library in your app: ``` 2. Replace `` with the name of your project. -3. In a terminal window, navigate to the folder that contains the `podfile` you created and run `pod install` to install the MSAL library. +3. In a terminal window, navigate to the folder that contains the _podfile_ you created and run `pod install` to install the MSAL library. 4. Close Xcode and open `.xcworkspace` to reload the project in Xcode. ### Carthage -If you're using [Carthage](https://github.com/Carthage/Carthage), install `MSAL` by adding it to your `Cartfile`: +If you're using [Carthage](https://github.com/Carthage/Carthage), install `MSAL` by adding it to your _Cartfile_: ``` github "AzureAD/microsoft-authentication-library-for-objc" "master" ``` -From a terminal window, in the same directory as the updated `Cartfile`, run the following command to have Carthage update the dependencies in your project. +From a terminal window, in the same directory as the updated _Cartfile_, run the following command to have Carthage update the dependencies in your project. iOS: @@ -129,13 +126,13 @@ You can also use Git Submodule, or check out the latest release to use as a fram Next, we'll add your app registration to your code. -First, add the following import statement to the top of the `ViewController.swift`, as well as `AppDelegate.swift` or `SceneDelegate.swift` files: +First, add the following import statement to the top of the _ViewController.swift_ file and either _AppDelegate.swift_ or _SceneDelegate.swift_: ```swift import MSAL ``` -Then Add the following code to `ViewController.swift` prior to `viewDidLoad()`: +Next, add the following code to _ViewController.swift_ before to `viewDidLoad()`: ```swift // Update the below to your client ID you received in the portal. The below is for running the demo only @@ -151,7 +148,7 @@ var webViewParameters : MSALWebviewParameters? var currentAccount: MSALAccount? ``` -The only value you modify above is the value assigned to `kClientID`to be your [Application ID](./developer-glossary.md#application-id-client-id). This value is part of the MSAL Configuration data that you saved during the step at the beginning of this tutorial to register the application in the Azure portal. +The only value you modify above is the value assigned to `kClientID` to be your [Application ID](./developer-glossary.md#application-client-id). This value is part of the MSAL Configuration data that you saved during the step at the beginning of this tutorial to register the application in the Azure portal. ## Configure Xcode project settings @@ -161,9 +158,9 @@ Add a new keychain group to your project **Signing & Capabilities**. The keychai ## For iOS only, configure URL schemes -In this step, you will register `CFBundleURLSchemes` so that the user can be redirected back to the app after sign in. By the way, `LSApplicationQueriesSchemes` also allows your app to make use of Microsoft Authenticator. +In this step, you'll register `CFBundleURLSchemes` so that the user can be redirected back to the app after sign in. By the way, `LSApplicationQueriesSchemes` also allows your app to make use of Microsoft Authenticator. -In Xcode, open `Info.plist` as a source code file, and add the following inside of the `` section. Replace `[BUNDLE_ID]` with the value you used in the Azure portal. If you downloaded the code, the bundle identifier is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. +In Xcode, open _Info.plist_ as a source code file, and add the following inside of the `` section. Replace `[BUNDLE_ID]` with the value you used in the Azure portal. If you downloaded the code, the bundle identifier is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. ```xml CFBundleURLTypes @@ -189,7 +186,7 @@ In Xcode, open `Info.plist` as a source code file, and add the following inside ## Create your app's UI -Now create a UI that includes a button to call the Microsoft Graph API, another to sign out, and a text view to see some output by adding the following code to the `ViewController`class: +Now create a UI that includes a button to call the Microsoft Graph API, another to sign out, and a text view to see some output by adding the following code to the `ViewController` class: ### iOS UI @@ -372,7 +369,7 @@ Next, also inside the `ViewController` class, replace the `viewDidLoad()` method ### Initialize MSAL -Add the following `initMSAL` method to the `ViewController` class: +To the `ViewController` class, add the `initMSAL` method: ```swift func initMSAL() throws { @@ -390,7 +387,7 @@ Add the following `initMSAL` method to the `ViewController` class: } ``` -Add the following after `initMSAL` method to the `ViewController` class. +Still in the `ViewController` class and after the `initMSAL` method, add the `initWebViewParams` method: ### iOS code: @@ -408,9 +405,9 @@ func initWebViewParams() { } ``` -### For iOS only, handle the sign-in callback +### Handle the sign-in callback (iOS only) -Open the `AppDelegate.swift` file. To handle the callback after sign-in, add `MSALPublicClientApplication.handleMSALResponse` to the `appDelegate` class like this: +Open the _AppDelegate.swift_ file. To handle the callback after sign-in, add `MSALPublicClientApplication.handleMSALResponse` to the `appDelegate` class like this: ```swift // Inside AppDelegate... @@ -421,7 +418,7 @@ func application(_ app: UIApplication, open url: URL, options: [UIApplication.Op ``` -**If you are using Xcode 11**, you should place MSAL callback into the `SceneDelegate.swift` instead. +**If you are using Xcode 11**, you should place MSAL callback into the _SceneDelegate.swift_ instead. If you support both UISceneDelegate and UIApplicationDelegate for compatibility with older iOS, MSAL callback would need to be placed into both files. ```swift @@ -442,9 +439,9 @@ func scene(_ scene: UIScene, openURLContexts URLContexts: Set) Now, we can implement the application's UI processing logic and get tokens interactively through MSAL. -MSAL exposes two primary methods for getting tokens: `acquireTokenSilently()` and `acquireTokenInteractively()`: +MSAL exposes two primary methods for getting tokens: `acquireTokenSilently()` and `acquireTokenInteractively()`. -- `acquireTokenSilently()` attempts to sign in a user and get tokens without any user interaction as long as an account is present. `acquireTokenSilently()` requires providing a valid `MSALAccount` which can be retrieved by using one of MSAL account enumeration APIs. This sample uses `applicationContext.getCurrentAccount(with: msalParameters, completionBlock: {})` to retrieve current account. +- `acquireTokenSilently()` attempts to sign in a user and get tokens without user interaction as long as an account is present. `acquireTokenSilently()` require a valid `MSALAccount` which can be retrieved by using one of MSAL's account enumeration APIs. This tutorial uses `applicationContext.getCurrentAccount(with: msalParameters, completionBlock: {})` to retrieve the current account. - `acquireTokenInteractively()` always shows UI when attempting to sign in the user. It may use session cookies in the browser or an account in the Microsoft authenticator to provide an interactive-SSO experience. @@ -513,7 +510,7 @@ Add the following code to the `ViewController` class: #### Get a token interactively -The following code snippet gets a token for the first time by creating an `MSALInteractiveTokenParameters` object and calling `acquireToken`. Next you will add code that: +The following code snippet gets a token for the first time by creating an `MSALInteractiveTokenParameters` object and calling `acquireToken`. Next you'll add code that: 1. Creates `MSALInteractiveTokenParameters` with scopes. 2. Calls `acquireToken()` with the created parameters. @@ -812,7 +809,7 @@ Add the following helper methods to the `ViewController` class to complete the s } ``` -### For iOS only, get additional device information +### iOS only: get additional device information Use following code to read current device configuration, including whether device is configured as shared: @@ -839,13 +836,13 @@ Use following code to read current device configuration, including whether devic ### Multi-account applications -This app is built for a single account scenario. MSAL also supports multi-account scenarios, but it requires some additional work from apps. You will need to create UI to help users select which account they want to use for each action that requires tokens. Alternatively, your app can implement a heuristic to select which account to use by querying all accounts from MSAL. For example, see `accountsFromDeviceForParameters:completionBlock:` [API](https://azuread.github.io/microsoft-authentication-library-for-objc/Classes/MSALPublicClientApplication.html#/c:objc(cs)MSALPublicClientApplication(im)accountsFromDeviceForParameters:completionBlock:) +This app is built for a single account scenario. MSAL also supports multi-account scenarios, but it requires more application work. You'll need to create UI to help users select which account they want to use for each action that requires tokens. Alternatively, your app can implement a heuristic to select which account to use by querying all accounts from MSAL. For example, see `accountsFromDeviceForParameters:completionBlock:` [API](https://azuread.github.io/microsoft-authentication-library-for-objc/Classes/MSALPublicClientApplication.html#/c:objc(cs)MSALPublicClientApplication(im)accountsFromDeviceForParameters:completionBlock:) ## Test your app Build and deploy the app to a test device or simulator. You should be able to sign in and get tokens for Azure AD or personal Microsoft accounts. -The first time a user signs into your app, they will be prompted by Microsoft identity to consent to the permissions requested. While most users are capable of consenting, some Azure AD tenants have disabled user consent, which requires admins to consent on behalf of all users. To support this scenario, register your app's scopes in the Azure portal. +The first time a user signs into your app, they'll be prompted by Microsoft identity to consent to the permissions requested. While most users are capable of consenting, some Azure AD tenants have disabled user consent, which requires admins to consent on behalf of all users. To support this scenario, register your app's scopes in the Azure portal. After you sign in, the app will display the data returned from the Microsoft Graph `/me` endpoint. diff --git a/articles/active-directory/develop/tutorial-v2-javascript-auth-code.md b/articles/active-directory/develop/tutorial-v2-javascript-auth-code.md index c03d1cb1d604a..95da6f495da34 100644 --- a/articles/active-directory/develop/tutorial-v2-javascript-auth-code.md +++ b/articles/active-directory/develop/tutorial-v2-javascript-auth-code.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create a JavaScript single-page app that uses auth code flow | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create a JavaScript single-page app that uses auth code flow" description: In this tutorial, you create a JavaScript SPA that can sign in users and use the auth code flow to obtain an access token from the Microsoft identity platform and call the Microsoft Graph API. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/tutorial-v2-javascript-spa.md b/articles/active-directory/develop/tutorial-v2-javascript-spa.md index 1f85b0420750e..17bbc62b75711 100644 --- a/articles/active-directory/develop/tutorial-v2-javascript-spa.md +++ b/articles/active-directory/develop/tutorial-v2-javascript-spa.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create a JavaScript single-page app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create a JavaScript single-page app that uses the Microsoft identity platform for authentication" description: In this tutorial, you build a JavaScript single-page app (SPA) that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/tutorial-v2-nodejs-console.md b/articles/active-directory/develop/tutorial-v2-nodejs-console.md index 90fee01f9f6c0..c87331c75f54d 100644 --- a/articles/active-directory/develop/tutorial-v2-nodejs-console.md +++ b/articles/active-directory/develop/tutorial-v2-nodejs-console.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Call Microsoft Graph in a Node.js console app | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Call Microsoft Graph in a Node.js console app" description: In this tutorial, you build a console app for calling Microsoft Graph to a Node.js console app. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/tutorial-v2-nodejs-desktop.md b/articles/active-directory/develop/tutorial-v2-nodejs-desktop.md index 0ccfb39b22d13..7920f4abc56f7 100644 --- a/articles/active-directory/develop/tutorial-v2-nodejs-desktop.md +++ b/articles/active-directory/develop/tutorial-v2-nodejs-desktop.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Sign in users and call the Microsoft Graph API in an Electron desktop app | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Sign in users and call the Microsoft Graph API in an Electron desktop app" description: In this tutorial, you build an Electron desktop app that can sign in users and use the auth code flow to obtain an access token from the Microsoft identity platform and call the Microsoft Graph API. services: active-directory author: mmacy @@ -20,6 +19,7 @@ In this tutorial, you build an Electron desktop application that signs in users Follow the steps in this tutorial to: > [!div class="checklist"] +> > - Register the application in the Azure portal > - Create an Electron desktop app project > - Add authentication logic to your app @@ -42,7 +42,7 @@ Use the following settings for your app registration: - Name: `ElectronDesktopApp` (suggested) - Supported account types: **Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)** - Platform type: **Mobile and desktop applications** -- Redirect URI: `msal://redirect` +- Redirect URI: `msal{Your_Application/Client_Id}://auth` ## Create the project @@ -53,548 +53,68 @@ Create a folder to host your application, for example *ElectronDesktopApp*. ```console npm init -y npm install --save @azure/msal-node axios bootstrap dotenv jquery popper.js - npm install --save-dev babel electron@10.1.6 webpack + npm install --save-dev babel electron@18.2.3 webpack ``` 2. Then, create a folder named *App*. Inside this folder, create a file named *index.html* that will serve as UI. Add the following code there: - ```html - - - - - - - - MSAL Node Electron Sample App - - - - - - - - - -
-
Electron sample app calling MS Graph API using MSAL Node
-
-
- -
-
-
-
-
-
-
- -
-
-
-
- - - - - - - - - - - ``` + :::code language="html" source="~/ms-identity-JavaScript-nodejs-desktop/App/index.html"::: 3. Next, create file named *main.js* and add the following code: - ```JavaScript - require('dotenv').config() - - const path = require('path'); - const { app, ipcMain, BrowserWindow } = require('electron'); - const { IPC_MESSAGES } = require('./constants'); - - const { callEndpointWithToken } = require('./fetch'); - const AuthProvider = require('./AuthProvider'); - - const authProvider = new AuthProvider(); - let mainWindow; - - function createWindow () { - mainWindow = new BrowserWindow({ - width: 800, - height: 600, - webPreferences: { - nodeIntegration: true, - contextIsolation: false - } - }); - - mainWindow.loadFile(path.join(__dirname, './index.html')); - }; - - app.on('ready', () => { - createWindow(); - }); - - app.on('window-all-closed', () => { - app.quit(); - }); - + :::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/main.js"::: - // Event handlers - ipcMain.on(IPC_MESSAGES.LOGIN, async() => { - const account = await authProvider.login(mainWindow); - - await mainWindow.loadFile(path.join(__dirname, './index.html')); - - mainWindow.webContents.send(IPC_MESSAGES.SHOW_WELCOME_MESSAGE, account); - }); - - ipcMain.on(IPC_MESSAGES.LOGOUT, async() => { - await authProvider.logout(); - await mainWindow.loadFile(path.join(__dirname, './index.html')); - }); - - ipcMain.on(IPC_MESSAGES.GET_PROFILE, async() => { - - const tokenRequest = { - scopes: ['User.Read'], - }; - - const token = await authProvider.getToken(mainWindow, tokenRequest); - const account = authProvider.account +In the code snippet above, we initialize an Electron main window object and create some event handlers for interactions with the Electron window. We also import configuration parameters, instantiate *authProvider* class for handling sign-in, sign-out and token acquisition, and call the Microsoft Graph API. - await mainWindow.loadFile(path.join(__dirname, './index.html')); +4. In the same folder (*App*), create another file named *renderer.js* and add the following code: - const graphResponse = await callEndpointWithToken(`${process.env.GRAPH_ENDPOINT_HOST}${process.env.GRAPH_ME_ENDPOINT}`, token); + :::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/renderer.js"::: - mainWindow.webContents.send(IPC_MESSAGES.SHOW_WELCOME_MESSAGE, account); - mainWindow.webContents.send(IPC_MESSAGES.SET_PROFILE, graphResponse); - }); +The renderer methods are exposed by the preload script found in the *preload.js* file in order to give the renderer access to the `Node API` in a secure and controlled way - ipcMain.on(IPC_MESSAGES.GET_MAIL, async() => { +5. Then, create a new file *preload.js* and add the following code: - const tokenRequest = { - scopes: ['Mail.Read'], - }; + :::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/preload.js"::: - const token = await authProvider.getToken(mainWindow, tokenRequest); - const account = authProvider.account; +This preload script exposes a renderer methods to give the renderer process controlled access to some `Node APIs` by applying IPC channels that have been configured for communication between the main and renderer processes. - await mainWindow.loadFile(path.join(__dirname, './index.html')); +6. Next, create *UIManager.js* class inside the *App* folder and add the following code: - const graphResponse = await callEndpointWithToken(`${process.env.GRAPH_ENDPOINT_HOST}${process.env.GRAPH_MAIL_ENDPOINT}`, token); + :::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/UIManager.js"::: - mainWindow.webContents.send(IPC_MESSAGES.SHOW_WELCOME_MESSAGE, account); - mainWindow.webContents.send(IPC_MESSAGES.SET_MAIL, graphResponse); - }); - ``` +7. After that, create *CustomProtocolListener.js* class and add the following code there: -In the code snippet above, we initialize an Electron main window object and create some event handlers for interactions with the Electron window. We also import configuration parameters, instantiate *authProvider* class for handling sign-in, sign-out and token acquisition, and call the Microsoft Graph API. + :::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/CustomProtocolListener.js"::: -4. In the same folder (*App*), create another file named *renderer.js* and add the following code: +*CustomProtocolListener* class can be instantiated in order to register and unregister a custom typed protocol on which MSAL Node can listen for Auth Code responses. - ```JavaScript - const { ipcRenderer } = require('electron'); - const { IPC_MESSAGES } = require('./constants'); - - // UI event handlers - document.querySelector('#signIn').addEventListener('click', () => { - ipcRenderer.send(IPC_MESSAGES.LOGIN); - }); - - document.querySelector('#signOut').addEventListener('click', () => { - ipcRenderer.send(IPC_MESSAGES.LOGOUT); - }); - - document.querySelector('#seeProfile').addEventListener('click', () => { - ipcRenderer.send(IPC_MESSAGES.GET_PROFILE); - }); - - document.querySelector('#readMail').addEventListener('click', () => { - ipcRenderer.send(IPC_MESSAGES.GET_MAIL); - }); - - // Main process message subscribers - ipcRenderer.on(IPC_MESSAGES.SHOW_WELCOME_MESSAGE, (event, account) => { - showWelcomeMessage(account); - }); - - ipcRenderer.on(IPC_MESSAGES.SET_PROFILE, (event, graphResponse) => { - updateUI(graphResponse, `${process.env.GRAPH_ENDPOINT_HOST}${process.env.GRAPH_ME_ENDPOINT}`); - }); - - ipcRenderer.on(IPC_MESSAGES.SET_MAIL, (event, graphResponse) => { - updateUI(graphResponse, `${process.env.GRAPH_ENDPOINT_HOST}${process.env.GRAPH_MAIL_ENDPOINT}`); - }); - - // DOM elements to work with - const welcomeDiv = document.getElementById("WelcomeMessage"); - const signInButton = document.getElementById("signIn"); - const signOutButton = document.getElementById("signOut"); - const cardDiv = document.getElementById("cardDiv"); - const profileDiv = document.getElementById("profileDiv"); - const tabList = document.getElementById("list-tab"); - const tabContent = document.getElementById("nav-tabContent"); - - function showWelcomeMessage(account) { - cardDiv.style.display = "initial"; - welcomeDiv.innerHTML = `Welcome ${account.name}`; - signInButton.hidden = true; - signOutButton.hidden = false; - } - - function clearTabs() { - tabList.innerHTML = ""; - tabContent.innerHTML = ""; - } - - function updateUI(data, endpoint) { - - console.log(`Graph API responded at: ${new Date().toString()}`); - - if (endpoint === `${process.env.GRAPH_ENDPOINT_HOST}${process.env.GRAPH_ME_ENDPOINT}`) { - setProfile(data); - } else if (endpoint === `${process.env.GRAPH_ENDPOINT_HOST}${process.env.GRAPH_MAIL_ENDPOINT}`) { - setMail(data); - } - } - - function setProfile(data) { - profileDiv.innerHTML = '' - - const title = document.createElement('p'); - const email = document.createElement('p'); - const phone = document.createElement('p'); - const address = document.createElement('p'); - - title.innerHTML = "Title: " + data.jobTitle; - email.innerHTML = "Mail: " + data.mail; - phone.innerHTML = "Phone: " + data.businessPhones[0]; - address.innerHTML = "Location: " + data.officeLocation; - - profileDiv.appendChild(title); - profileDiv.appendChild(email); - profileDiv.appendChild(phone); - profileDiv.appendChild(address); - } - - function setMail(data) { - const mailInfo = data; - if (mailInfo.value.length < 1) { - alert("Your mailbox is empty!") - } else { - clearTabs(); - mailInfo.value.slice(0, 10).forEach((d, i) => { - createAndAppendListItem(d, i); - createAndAppendContentItem(d, i); - }); - } - } - - function createAndAppendListItem(d, i) { - const listItem = document.createElement("a"); - listItem.setAttribute("class", "list-group-item list-group-item-action") - listItem.setAttribute("id", "list" + i + "list") - listItem.setAttribute("data-toggle", "list") - listItem.setAttribute("href", "#list" + i) - listItem.setAttribute("role", "tab") - listItem.setAttribute("aria-controls", i) - listItem.innerHTML = d.subject; - tabList.appendChild(listItem); - } - - function createAndAppendContentItem(d, i) { - const contentItem = document.createElement("div"); - contentItem.setAttribute("class", "tab-pane fade") - contentItem.setAttribute("id", "list" + i) - contentItem.setAttribute("role", "tabpanel") - contentItem.setAttribute("aria-labelledby", "list" + i + "list") - - if (d.from) { - contentItem.innerHTML = " from: " + d.from.emailAddress.address + "

" + d.bodyPreview + "..."; - tabContent.appendChild(contentItem); - } - } - ``` +8. Finally, create a file named *constants.js* that will store the strings constants for describing the application **events**: -5. Finally, create a file named *constants.js* that will store the strings constants for describing the application **events**: - - ```JavaScript - const IPC_MESSAGES = { - SHOW_WELCOME_MESSAGE: 'SHOW_WELCOME_MESSAGE', - LOGIN: 'LOGIN', - LOGOUT: 'LOGOUT', - GET_PROFILE: 'GET_PROFILE', - SET_PROFILE: 'SET_PROFILE', - GET_MAIL: 'GET_MAIL', - SET_MAIL: 'SET_MAIL' - } - - module.exports = { - IPC_MESSAGES: IPC_MESSAGES, - } - ``` + :::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/constants.js"::: You now have a simple GUI and interactions for your Electron app. After completing the rest of the tutorial, the file and folder structure of your project should look similar to the following: ``` ElectronDesktopApp/ ├── App -│   ├── authProvider.js +│   ├── AuthProvider.js │   ├── constants.js +│   ├── CustomProtocolListener.js │   ├── fetch.js -│   ├── main.js -│   ├── renderer.js │   ├── index.html +| ├── main.js +| ├── preload.js +| ├── renderer.js +│   ├── UIManager.js +│   ├── authConfig.js ├── package.json -└── .env ``` ## Add authentication logic to your app -In *App* folder, create a file named *AuthProvider.js*. This will contain an authentication provider class that will handle login, logout, token acquisition, account selection and related authentication tasks using MSAL Node. Add the following code there: - -```JavaScript -const { PublicClientApplication, LogLevel, CryptoProvider } = require('@azure/msal-node'); -const { protocol } = require('electron'); -const path = require('path'); -const url = require('url'); - -/** - * To demonstrate best security practices, this Electron sample application makes use of - * a custom file protocol instead of a regular web (https://) redirect URI in order to - * handle the redirection step of the authorization flow, as suggested in the OAuth2.0 specification for Native Apps. - */ -const CUSTOM_FILE_PROTOCOL_NAME = process.env.REDIRECT_URI.split(':')[0]; // e.g. msal://redirect - -/** - * Configuration object to be passed to MSAL instance on creation. - * For a full list of MSAL Node configuration parameters, visit: - * https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/configuration.md - */ -const MSAL_CONFIG = { - auth: { - clientId: process.env.CLIENT_ID, - authority: `${process.env.AAD_ENDPOINT_HOST}${process.env.TENANT_ID}`, - redirectUri: process.env.REDIRECT_URI, - }, - system: { - loggerOptions: { - loggerCallback(loglevel, message, containsPii) { -     console.log(message); - }, -         piiLoggingEnabled: false, - logLevel: LogLevel.Verbose, - } - } -}; - -class AuthProvider { - - clientApplication; - cryptoProvider; - authCodeUrlParams; - authCodeRequest; - pkceCodes; - account; - - constructor() { - /** - * Initialize a public client application. For more information, visit: - * https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/initialize-public-client-application.md - */ - this.clientApplication = new PublicClientApplication(MSAL_CONFIG); - this.account = null; - - // Initialize CryptoProvider instance - this.cryptoProvider = new CryptoProvider(); - - this.setRequestObjects(); - } - - /** - * Initialize request objects used by this AuthModule. - */ - setRequestObjects() { - const requestScopes = ['openid', 'profile', 'User.Read']; - const redirectUri = process.env.REDIRECT_URI; - - this.authCodeUrlParams = { - scopes: requestScopes, - redirectUri: redirectUri - }; - - this.authCodeRequest = { - scopes: requestScopes, - redirectUri: redirectUri, - code: null - } - - this.pkceCodes = { - challengeMethod: "S256", // Use SHA256 Algorithm - verifier: "", // Generate a code verifier for the Auth Code Request first - challenge: "" // Generate a code challenge from the previously generated code verifier - }; - } - - async login(authWindow) { - const authResult = await this.getTokenInteractive(authWindow, this.authCodeUrlParams); - return this.handleResponse(authResult); - } - - async logout() { - if (this.account) { - await this.clientApplication.getTokenCache().removeAccount(this.account); - this.account = null; - } - } - - async getToken(authWindow, tokenRequest) { - let authResponse; - - authResponse = await this.getTokenInteractive(authWindow, tokenRequest); - - return authResponse.accessToken || null; - } - - // This method contains an implementation of access token acquisition in authorization code flow - async getTokenInteractive(authWindow, tokenRequest) { - - /** - * Proof Key for Code Exchange (PKCE) Setup - * - * MSAL enables PKCE in the Authorization Code Grant Flow by including the codeChallenge and codeChallengeMethod parameters - * in the request passed into getAuthCodeUrl() API, as well as the codeVerifier parameter in the - * second leg (acquireTokenByCode() API). - * - * MSAL Node provides PKCE Generation tools through the CryptoProvider class, which exposes - * the generatePkceCodes() asynchronous API. As illustrated in the example below, the verifier - * and challenge values should be generated previous to the authorization flow initiation. - * - * For details on PKCE code generation logic, consult the - * PKCE specification https://tools.ietf.org/html/rfc7636#section-4 - */ - - const {verifier, challenge} = await this.cryptoProvider.generatePkceCodes(); - - this.pkceCodes.verifier = verifier; - this.pkceCodes.challenge = challenge; - - const authCodeUrlParams = { - ...this.authCodeUrlParams, - scopes: tokenRequest.scopes, - codeChallenge: this.pkceCodes.challenge, // PKCE Code Challenge - codeChallengeMethod: this.pkceCodes.challengeMethod // PKCE Code Challenge Method - }; - - const authCodeUrl = await this.clientApplication.getAuthCodeUrl(authCodeUrlParams); - - protocol.registerFileProtocol(CUSTOM_FILE_PROTOCOL_NAME, (req, callback) => { - const requestUrl = url.parse(req.url, true); - callback(path.normalize(`${__dirname}/${requestUrl.path}`)); - }); - - const authCode = await this.listenForAuthCode(authCodeUrl, authWindow); - - const authResponse = await this.clientApplication.acquireTokenByCode({ - ...this.authCodeRequest, - scopes: tokenRequest.scopes, - code: authCode, - codeVerifier: this.pkceCodes.verifier // PKCE Code Verifier - }); - - return authResponse; - } - - // Listen for authorization code response from Azure AD - async listenForAuthCode(navigateUrl, authWindow) { - - authWindow.loadURL(navigateUrl); - - return new Promise((resolve, reject) => { - authWindow.webContents.on('will-redirect', (event, responseUrl) => { - try { - const parsedUrl = new URL(responseUrl); - const authCode = parsedUrl.searchParams.get('code'); - resolve(authCode); - } catch (err) { - reject(err); - } - }); - }); - } - - /** - * Handles the response from a popup or redirect. If response is null, will check if we have any accounts and attempt to sign in. - * @param response - */ - async handleResponse(response) { - if (response !== null) { - this.account = response.account; - } else { - this.account = await this.getAccount(); - } - - return this.account; - } - - /** - * Calls getAllAccounts and determines the correct account to sign into, currently defaults to first account found in cache. - * https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-common/docs/Accounts.md - */ - async getAccount() { - const cache = this.clientApplication.getTokenCache(); - const currentAccounts = await cache.getAllAccounts(); - - if (currentAccounts === null) { - console.log('No accounts detected'); - return null; - } - - if (currentAccounts.length > 1) { - // Add choose account code here - console.log('Multiple accounts detected, need to add choose account code.'); - return currentAccounts[0]; - } else if (currentAccounts.length === 1) { - return currentAccounts[0]; - } else { - return null; - } - } -} - -module.exports = AuthProvider; -``` +In *App* folder, create a file named *AuthProvider.js*. The *AuthProvider.js* file will contain an authentication provider class that will handle login, logout, token acquisition, account selection and related authentication tasks using MSAL Node. Add the following code there: + +:::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/AuthProvider.js"::: In the code snippet above, we first initialized MSAL Node `PublicClientApplication` by passing a configuration object (`msalConfig`). We then exposed `login`, `logout` and `getToken` methods to be called by main module (*main.js*). In `login` and `getToken`, we acquire ID and access tokens, respectively, by first requesting an authorization code and then exchanging this with a token using MSAL Node `acquireTokenByCode` public API. @@ -602,56 +122,13 @@ In the code snippet above, we first initialized MSAL Node `PublicClientApplicati Create another file named *fetch.js*. This file will contain an Axios HTTP client for making REST calls to the Microsoft Graph API. -```JavaScript -const axios = require('axios'); - -/** - * Makes an Authorization 'Bearer' request with the given accessToken to the given endpoint. - * @param endpoint - * @param accessToken - */ -async function callEndpointWithToken(endpoint, accessToken) { - const options = { - headers: { - Authorization: `Bearer ${accessToken}` - } - }; - - console.log('Request made at: ' + new Date().toString()); - - const response = await axios.default.get(endpoint, options); - - return response.data; -} - -module.exports = { - callEndpointWithToken: callEndpointWithToken, -}; -``` +:::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/fetch.js"::: ## Add app registration details -Finally, create an environment file to store the app registration details that will be used when acquiring tokens. To do so, create a file named *.env* inside the root folder of the sample (*ElectronDesktopApp*), and add the following code: - -``` -# Credentials -CLIENT_ID=Enter_the_Application_Id_Here -TENANT_ID=Enter_the_Tenant_Id_Here +Finally, create an environment file to store the app registration details that will be used when acquiring tokens. To do so, create a file named *authConfig.js* inside the root folder of the sample (*ElectronDesktopApp*), and add the following code: -# Configuration -REDIRECT_URI=msal://redirect - -# Endpoints -AAD_ENDPOINT_HOST=Enter_the_Cloud_Instance_Id_Here -GRAPH_ENDPOINT_HOST=Enter_the_Graph_Endpoint_Here - -# RESOURCES -GRAPH_ME_ENDPOINT=v1.0/me -GRAPH_MAIL_ENDPOINT=v1.0/me/messages - -# SCOPES -GRAPH_SCOPES=User.Read Mail.Read -``` +:::code language="js" source="~/ms-identity-JavaScript-nodejs-desktop/App/authConfig.js"::: Fill in these details with the values you obtain from Azure app registration portal: @@ -664,6 +141,7 @@ Fill in these details with the values you obtain from Azure app registration por - `Enter_the_Cloud_Instance_Id_Here`: The Azure cloud instance in which your application is registered. - For the main (or *global*) Azure cloud, enter `https://login.microsoftonline.com/`. - For **national** clouds (for example, China), you can find appropriate values in [National clouds](authentication-national-cloud.md). +- `Enter_the_Redirect_Uri_Here`: The Redirect Uri of the application you registered `msal{Your_Application/Client_Id}:///auth`. - `Enter_the_Graph_Endpoint_Here` is the instance of the Microsoft Graph API the application should communicate with. - For the **global** Microsoft Graph API endpoint, replace both instances of this string with `https://graph.microsoft.com/`. - For endpoints in **national** cloud deployments, see [National cloud deployments](/graph/deployments) in the Microsoft Graph documentation. @@ -700,13 +178,13 @@ Select **Read Mails** to view the messages in user's account. You'll be presente :::image type="content" source="media/tutorial-v2-nodejs-desktop/desktop-05-consent-mail.png" alt-text="consent screen for read.mail permission"::: -After consent, you will view the messages returned in the response from the call to the Microsoft Graph API: +After consent, you'll view the messages returned in the response from the call to the Microsoft Graph API: :::image type="content" source="media/tutorial-v2-nodejs-desktop/desktop-06-mails.png" alt-text="mail information from Microsoft Graph"::: ## How the application works -When a user selects the **Sign In** button for the first time, get `getTokenInteractive` method of *AuthProvider.js* is called. This method redirects the user to sign-in with the *Microsoft identity platform endpoint* and validate the user's credentials, and then obtains an **authorization code**. This code is then exchanged for an access token using `acquireTokenByCode` public API of MSAL Node. +When a user selects the **Sign In** button for the first time, get `getTokenInteractive` method of *AuthProvider.js* is called. This method redirects the user to sign-in with the Microsoft identity platform endpoint and validates the user's credentials, and then obtains an **authorization code**. This code is then exchanged for an access token using `acquireTokenByCode` public API of MSAL Node. At this point, a PKCE-protected authorization code is sent to the CORS-protected token endpoint and is exchanged for tokens. An ID token, access token, and refresh token are received by your application and processed by MSAL Node, and the information contained in the tokens is cached. @@ -714,9 +192,9 @@ The ID token contains basic information about the user, like their display name. The desktop app you've created in this tutorial makes a REST call to the Microsoft Graph API using an access token as bearer token in request header ([RFC 6750](https://tools.ietf.org/html/rfc6750)). -The Microsoft Graph API requires the *user.read* scope to read a user's profile. By default, this scope is automatically added in every application that's registered in the Azure portal. Other APIs for Microsoft Graph, as well as custom APIs for your back-end server, might require additional scopes. For example, the Microsoft Graph API requires the *Mail.Read* scope in order to list the user's email. +The Microsoft Graph API requires the *user.read* scope to read a user's profile. By default, this scope is automatically added in every application that's registered in the Azure portal. Other APIs for Microsoft Graph, and custom APIs for your back-end server, might require extra scopes. For example, the Microsoft Graph API requires the *Mail.Read* scope in order to list the user's email. -As you add scopes, your users might be prompted to provide additional consent for the added scopes. +As you add scopes, your users might be prompted to provide another consent for the added scopes. [!INCLUDE [Help and support](../../../includes/active-directory-develop-help-support-include.md)] diff --git a/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md b/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md index 757a04d178465..191ebe4f1498e 100644 --- a/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md +++ b/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Sign in users in a Node.js & Express web app | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Sign in users in a Node.js & Express web app" description: In this tutorial, you add support for signing-in users in a web app. services: active-directory author: mmacy @@ -13,9 +12,9 @@ ms.date: 02/17/2021 ms.author: marsma --- -# Tutorial: Sign in users in a Node.js & Express web app +# Tutorial: Sign in users and acquire a token for Microsoft Graph in a Node.js & Express web app -In this tutorial, you build a web app that signs-in users. The web app you build uses the [Microsoft Authentication Library (MSAL) for Node](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/lib/msal-node). +In this tutorial, you build a web app that signs-in users and acquires access tokens for calling Microsoft Graph. The web app you build uses the [Microsoft Authentication Library (MSAL) for Node](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/lib/msal-node). Follow the steps in this tutorial to: @@ -39,151 +38,177 @@ First, complete the steps in [Register an application with the Microsoft identit Use the following settings for your app registration: - Name: `ExpressWebApp` (suggested) -- Supported account types: **Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)** +- Supported account types: **Accounts in this organizational directory only** - Platform type: **Web** -- Redirect URI: `http://localhost:3000/redirect` +- Redirect URI: `http://localhost:3000/auth/redirect` - Client secret: `*********` (record this value for use in a later step - it's shown only once) ## Create the project -Create a folder to host your application, for example *ExpressWebApp*. +Use the [Express application generator tool](https://expressjs.com/en/starter/generator.html) to create an application skeleton. -1. First, change to your project directory in your terminal and then run the following `npm` commands: +1. First, install the [express-generator](https://www.npmjs.com/package/express-generator) package: ```console - npm init -y - npm install --save express + npm install -g express-generator ``` -2. Next, create file named *index.js* and add the following code: - -```JavaScript - const express = require("express"); - const msal = require('@azure/msal-node'); - - const SERVER_PORT = process.env.PORT || 3000; - - // Create Express App and Routes - const app = express(); - - app.listen(SERVER_PORT, () => console.log(`Msal Node Auth Code Sample app listening on port ${SERVER_PORT}!`)) +2. Then, create an application skeleton as follows: + +```console + express --view=hbs /ExpressWebApp && cd /ExpressWebApp + npm install ``` -You now have a simple web server running on port 3000. The file and folder structure of your project should look similar to the following: +You now have a simple Express web app. The file and folder structure of your project should look similar to the following: ``` ExpressWebApp/ -├── index.js +├── bin/ +| └── wwww +├── public/ +| ├── images/ +| ├── javascript/ +| └── stylesheets/ +| └── style.css +├── routes/ +| ├── index.js +| └── users.js +├── views/ +| ├── error.hbs +| ├── index.hbs +| └── layout.hbs +├── app.js └── package.json ``` ## Install the auth library -Locate the root of your project directory in a terminal and install the MSAL Node package via NPM. +Locate the root of your project directory in a terminal and install the MSAL Node package via npm. ```console npm install --save @azure/msal-node ``` -## Add app registration details +## Install other dependencies + +The web app sample in this tutorial uses the [express-session](https://www.npmjs.com/package/express-session) package for session management, [dotenv](https://www.npmjs.com/package/dotenv) package for reading environment parameters during development, and [axios](https://www.npmjs.com/package/axios) for making network calls to the Microsoft Graph API. Install these via npm: -In the *index.js* file you've created earlier, add the following code: - -```JavaScript - // Before running the sample, you will need to replace the values in the config, - // including the clientSecret - const config = { - auth: { - clientId: "Enter_the_Application_Id", - authority: "Enter_the_Cloud_Instance_Id_Here/Enter_the_Tenant_Id_here", - clientSecret: "Enter_the_Client_secret" - }, -     system: { -         loggerOptions: { -             loggerCallback(loglevel, message, containsPii) { -                 console.log(message); -             }, -          piiLoggingEnabled: false, -          logLevel: msal.LogLevel.Verbose, -         } -     } - }; +```console + npm install --save express-session dotenv axios ``` +## Add app registration details + +1. Create a *.env* file in the root of your project folder. Then add the following code: + +:::code language="text" source="~/ms-identity-node/App/.env"::: + Fill in these details with the values you obtain from Azure app registration portal: -- `Enter_the_Tenant_Id_here` should be one of the following: +- `Enter_the_Cloud_Instance_Id_Here`: The Azure cloud instance in which your application is registered. + - For the main (or *global*) Azure cloud, enter `https://login.microsoftonline.com/` (include the trailing forward-slash). + - For **national** clouds (for example, China), you can find appropriate values in [National clouds](authentication-national-cloud.md). +- `Enter_the_Tenant_Info_here` should be one of the following: - If your application supports *accounts in this organizational directory*, replace this value with the **Tenant ID** or **Tenant name**. For example, `contoso.microsoft.com`. - If your application supports *accounts in any organizational directory*, replace this value with `organizations`. - If your application supports *accounts in any organizational directory and personal Microsoft accounts*, replace this value with `common`. - To restrict support to *personal Microsoft accounts only*, replace this value with `consumers`. - `Enter_the_Application_Id_Here`: The **Application (client) ID** of the application you registered. -- `Enter_the_Cloud_Instance_Id_Here`: The Azure cloud instance in which your application is registered. - - For the main (or *global*) Azure cloud, enter `https://login.microsoftonline.com`. - - For **national** clouds (for example, China), you can find appropriate values in [National clouds](authentication-national-cloud.md). - `Enter_the_Client_secret`: Replace this value with the client secret you created earlier. To generate a new key, use **Certificates & secrets** in the app registration settings in the Azure portal. > [!WARNING] > Any plaintext secret in source code poses an increased security risk. This article uses a plaintext client secret for simplicity only. Use [certificate credentials](active-directory-certificate-credentials.md) instead of client secrets in your confidential client applications, especially those apps you intend to deploy to production. -## Add code for user login - -In the *index.js* file you've created earlier, add the following code: - -```JavaScript - // Create msal application object - const cca = new msal.ConfidentialClientApplication(config); - - app.get('/', (req, res) => { - const authCodeUrlParameters = { - scopes: ["user.read"], - redirectUri: "http://localhost:3000/redirect", - }; - - // get url to sign user in and consent to scopes needed for application - cca.getAuthCodeUrl(authCodeUrlParameters).then((response) => { - res.redirect(response); - }).catch((error) => console.log(JSON.stringify(error))); - }); - - app.get('/redirect', (req, res) => { - const tokenRequest = { - code: req.query.code, - scopes: ["user.read"], - redirectUri: "http://localhost:3000/redirect", - }; - - cca.acquireTokenByCode(tokenRequest).then((response) => { - console.log("\nResponse: \n:", response); - res.sendStatus(200); - }).catch((error) => { - console.log(error); - res.status(500).send(error); - }); - }); -``` +- `Enter_the_Graph_Endpoint_Here`: The Microsoft Graph API cloud instance that your app will call. For the main (global) Microsoft Graph API service, enter `https://graph.microsoft.com/` (include the trailing forward-slash). +- `Enter_the_Express_Session_Secret_Here` the secret used to sign the Express session cookie. Choose a random string of characters to replace this string with, such as your client secret. + + +2. Next, create a file named *authConfig.js* in the root of your project for reading in these parameters. Once created, add the following code there: + +:::code language="js" source="~/ms-identity-node/App/authConfig.js"::: + +## Add code for user login and token acquisition + +1. Create a new file named *auth.js* under the *router* folder and add the following code there: + +:::code language="js" source="~/ms-identity-node/App/routes/auth.js"::: + +2. Next, update the *index.js* route by replacing the existing code with the following: + +:::code language="js" source="~/ms-identity-node/App/routes/index.js"::: -## Test sign in +3. Finally, update the *users.js* route by replacing the existing code with the following: + +:::code language="js" source="~/ms-identity-node/App/routes/users.js"::: + +## Add code for calling the Microsoft Graph API + +Create a file named *fetch.js* in the root of your project and add the following code: + +:::code language="js" source="~/ms-identity-node/App/fetch.js"::: + +## Add views for displaying data + +1. In the *views* folder, update the *index.hbs* file by replacing the existing code with the following: + +:::code language="hbs" source="~/ms-identity-node/App/views/index.hbs"::: + +2. Still in the same folder, create another file named *id.hbs* for displaying the contents of user's ID token: + +:::code language="hbs" source="~/ms-identity-node/App/views/id.hbs"::: + +3. Finally, create another file named *profile.hbs* for displaying the result of the call made to Microsoft Graph: + +:::code language="hbs" source="~/ms-identity-node/App/views/profile.hbs"::: + +## Register routers and add state management + +In the *app.js* file in the root of the project folder, register the routes you have created earlier and add session support for tracking authentication state using the **express-session** package. Replace the existing code there with the following: + +:::code language="js" source="~/ms-identity-node/App/app.js"::: + +## Test sign in and call Microsoft Graph You've completed creation of the application and are now ready to test the app's functionality. 1. Start the Node.js console app by running the following command from within the root of your project folder: ```console - node index.js + npm start ``` -2. Open a browser window and navigate to `http://localhost:3000`. You should see a sign-in screen: +2. Open a browser window and navigate to `http://localhost:3000`. You should see a welcome page: + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png" alt-text="Web app welcome page displaying"::: + +3. Select **Sign in** link. You should see the Azure AD sign-in screen: :::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/sign-in-screen.png" alt-text="Azure AD sign-in screen displaying"::: -3. Once you enter your credentials, you should see a consent screen asking you to approve the permissions for the app. +4. Once you enter your credentials, you should see a consent screen asking you to approve the permissions for the app. :::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/consent-screen.png" alt-text="Azure AD consent screen displaying"::: +5. Once you consent, you should be redirected back to application home page. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png" alt-text="Web app welcome page after sign-in displaying"::: + +6. Select the **View ID Token** link for displaying the contents of the signed-in user's ID token. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png" alt-text="User ID token screen displaying"::: + +7. Go back to the home page, and select the **Acquire an access token and call the Microsoft Graph API** link. Once you do, you should see the response from Microsoft Graph /me endpoint for the signed-in user. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png" alt-text="Graph call screen displaying"::: + +8. Go back to the home page, and select the **Sign out** link. You should see the Azure AD sign-out screen. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png" alt-text="Azure AD sign-out screen displaying"::: + ## How the application works -In this tutorial, you initialized an MSAL Node [ConfidentialClientApplication](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/initialize-confidential-client-application.md) object by passing it a configuration object (*msalConfig*) that contains parameters obtained from your Azure AD app registration on Azure portal. The web app you created uses the [OAuth 2.0 Authorization code grant flow](./v2-oauth2-auth-code-flow.md) to sign-in users and obtain ID and access tokens. +In this tutorial, you instantiated an MSAL Node [ConfidentialClientApplication](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/initialize-confidential-client-application.md) object by passing it a configuration object (*msalConfig*) that contains parameters obtained from your Azure AD app registration on Azure portal. The web app you created uses the [OpenID Connect protocol](./v2-protocols-oidc.md) to sign-in users and the [OAuth 2.0 Authorization code grant flow](./v2-oauth2-auth-code-flow.md) obtain access tokens. ## Next steps diff --git a/articles/active-directory/develop/tutorial-v2-react.md b/articles/active-directory/develop/tutorial-v2-react.md index 7d1fe6b4408ac..417de1fb47936 100644 --- a/articles/active-directory/develop/tutorial-v2-react.md +++ b/articles/active-directory/develop/tutorial-v2-react.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create a React single-page app that uses auth code flow | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create a React single-page app that uses auth code flow" description: In this tutorial, you create a React SPA that can sign in users and use the auth code flow to obtain an access token from the Microsoft identity platform and call the Microsoft Graph API. services: active-directory author: j-mantu diff --git a/articles/active-directory/develop/tutorial-v2-shared-device-mode.md b/articles/active-directory/develop/tutorial-v2-shared-device-mode.md index 72d21f4310051..8e971b2a4364c 100644 --- a/articles/active-directory/develop/tutorial-v2-shared-device-mode.md +++ b/articles/active-directory/develop/tutorial-v2-shared-device-mode.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Use shared-device mode with the Microsoft Authentication Library (MSAL) for Android | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Use shared-device mode with the Microsoft Authentication Library (MSAL) for Android" description: In this tutorial, you learn how to prepare an Android device to run in shared mode and run a first-line worker app. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/tutorial-v2-windows-desktop.md b/articles/active-directory/develop/tutorial-v2-windows-desktop.md index 54e98b73c3bdb..6240069796e19 100644 --- a/articles/active-directory/develop/tutorial-v2-windows-desktop.md +++ b/articles/active-directory/develop/tutorial-v2-windows-desktop.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create a Windows Presentation Foundation (WPF) app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create a Windows Presentation Foundation (WPF) app that uses the Microsoft identity platform for authentication" description: In this tutorial, you build a WPF application that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/tutorial-v2-windows-uwp.md b/articles/active-directory/develop/tutorial-v2-windows-uwp.md index 0281a97ddd40c..d6e611c4efe0c 100644 --- a/articles/active-directory/develop/tutorial-v2-windows-uwp.md +++ b/articles/active-directory/develop/tutorial-v2-windows-uwp.md @@ -1,6 +1,5 @@ --- -title: "Tutorial: Create a Universal Windows Platform (UWP) app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform +title: "Tutorial: Create a Universal Windows Platform (UWP) app that uses the Microsoft identity platform for authentication" description: In this tutorial, you build a UWP application that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/userinfo.md b/articles/active-directory/develop/userinfo.md index ac7e2c2f1d40a..5ca0be4a8cad2 100644 --- a/articles/active-directory/develop/userinfo.md +++ b/articles/active-directory/develop/userinfo.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform UserInfo endpoint | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform UserInfo endpoint description: Learn about the UserInfo endpoint on the Microsoft identity platform. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-app-types.md b/articles/active-directory/develop/v2-app-types.md index abdcc56a7884a..bb9a26dd821b8 100644 --- a/articles/active-directory/develop/v2-app-types.md +++ b/articles/active-directory/develop/v2-app-types.md @@ -1,5 +1,5 @@ --- -title: Application types for the Microsoft identity platform | Azure +title: Application types for the Microsoft identity platform description: The types of apps and scenarios supported by the Microsoft identity platform. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/v2-conditional-access-dev-guide.md b/articles/active-directory/develop/v2-conditional-access-dev-guide.md index 33b0dbe401d88..2cc37f1153aae 100644 --- a/articles/active-directory/develop/v2-conditional-access-dev-guide.md +++ b/articles/active-directory/develop/v2-conditional-access-dev-guide.md @@ -1,6 +1,5 @@ --- title: Developer guidance for Azure Active Directory Conditional Access -titleSuffix: Microsoft identity platform description: Developer guidance and scenarios for Azure AD Conditional Access and Microsoft identity platform. services: active-directory keywords: diff --git a/articles/active-directory/develop/v2-oauth-ropc.md b/articles/active-directory/develop/v2-oauth-ropc.md index e640d73b0644c..6892f9c4ba473 100644 --- a/articles/active-directory/develop/v2-oauth-ropc.md +++ b/articles/active-directory/develop/v2-oauth-ropc.md @@ -1,6 +1,5 @@ --- -title: Sign in with resource owner password credentials grant | Azure -titleSuffix: Microsoft identity platform +title: Sign in with resource owner password credentials grant description: Support browser-less authentication flows using the resource owner password credential (ROPC) grant. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-oauth2-auth-code-flow.md b/articles/active-directory/develop/v2-oauth2-auth-code-flow.md index b4b4f2f920f2d..c968237660c78 100644 --- a/articles/active-directory/develop/v2-oauth2-auth-code-flow.md +++ b/articles/active-directory/develop/v2-oauth2-auth-code-flow.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform and OAuth 2.0 authorization code flow | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform and OAuth 2.0 authorization code flow description: Build web applications using the Microsoft identity platform implementation of the OAuth 2.0 authentication protocol. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-oauth2-client-creds-grant-flow.md b/articles/active-directory/develop/v2-oauth2-client-creds-grant-flow.md index 85d04b57f6ca2..660b3048024c2 100644 --- a/articles/active-directory/develop/v2-oauth2-client-creds-grant-flow.md +++ b/articles/active-directory/develop/v2-oauth2-client-creds-grant-flow.md @@ -1,5 +1,5 @@ --- -title: OAuth 2.0 client credentials flow on the Microsoft identity platform | Azure +title: OAuth 2.0 client credentials flow on the Microsoft identity platform description: Build web applications by using the Microsoft identity platform implementation of the OAuth 2.0 authentication protocol. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-oauth2-device-code.md b/articles/active-directory/develop/v2-oauth2-device-code.md index 21001b0668cdf..46965ed0c1f12 100644 --- a/articles/active-directory/develop/v2-oauth2-device-code.md +++ b/articles/active-directory/develop/v2-oauth2-device-code.md @@ -1,6 +1,5 @@ --- -title: OAuth 2.0 device code flow | Azure -titleSuffix: Microsoft identity platform +title: OAuth 2.0 device code flow description: Sign in users without a browser. Build embedded and browser-less authentication flows using the device authorization grant. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-oauth2-implicit-grant-flow.md b/articles/active-directory/develop/v2-oauth2-implicit-grant-flow.md index d03cb9cfe85c8..d955287a0286e 100644 --- a/articles/active-directory/develop/v2-oauth2-implicit-grant-flow.md +++ b/articles/active-directory/develop/v2-oauth2-implicit-grant-flow.md @@ -1,5 +1,5 @@ --- -title: OAuth 2.0 implicit grant flow - The Microsoft identity platform | Azure +title: OAuth 2.0 implicit grant flow - The Microsoft identity platform description: Secure single-page apps using Microsoft identity platform implicit flow. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-oauth2-on-behalf-of-flow.md b/articles/active-directory/develop/v2-oauth2-on-behalf-of-flow.md index 195c162022a18..615eed7e48eb2 100644 --- a/articles/active-directory/develop/v2-oauth2-on-behalf-of-flow.md +++ b/articles/active-directory/develop/v2-oauth2-on-behalf-of-flow.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform and OAuth2.0 On-Behalf-Of flow | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform and OAuth2.0 On-Behalf-Of flow description: This article describes how to use HTTP messages to implement service to service authentication using the OAuth2.0 On-Behalf-Of flow. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-overview.md b/articles/active-directory/develop/v2-overview.md index b99c92bfedb12..f3c5b2ae292e2 100644 --- a/articles/active-directory/develop/v2-overview.md +++ b/articles/active-directory/develop/v2-overview.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform overview - Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform overview description: Learn about the components of the Microsoft identity platform and how they can help you build identity and access management (IAM) support into your applications. services: active-directory author: rwike77 @@ -32,7 +31,7 @@ There are several components that make up the Microsoft identity platform: - **Application configuration API and PowerShell**: Programmatic configuration of your applications through the Microsoft Graph API and PowerShell so you can automate your DevOps tasks. - **Developer content**: Technical documentation including quickstarts, tutorials, how-to guides, and code samples. -For developers, the Microsoft identity platform offers integration of modern innovations in the identity and security space like passwordless authentication, step-up authentication, and Conditional Access. You don’t need to implement such functionality yourself: applications integrated with the Microsoft identity platform natively take advantage of such innovations. +For developers, the Microsoft identity platform offers integration of modern innovations in the identity and security space like passwordless authentication, step-up authentication, and Conditional Access. You don't need to implement such functionality yourself: applications integrated with the Microsoft identity platform natively take advantage of such innovations. With the Microsoft identity platform, you can write code once and reach any user. You can build an app once and have it work across many platforms, or build an app that functions as a client as well as a resource application (API). diff --git a/articles/active-directory/develop/v2-protocols-oidc.md b/articles/active-directory/develop/v2-protocols-oidc.md index 734d813cd3ecb..a1a7d0d1124b3 100644 --- a/articles/active-directory/develop/v2-protocols-oidc.md +++ b/articles/active-directory/develop/v2-protocols-oidc.md @@ -1,6 +1,5 @@ --- -title: Microsoft identity platform and OpenID Connect protocol | Azure -titleSuffix: Microsoft identity platform +title: Microsoft identity platform and OpenID Connect protocol description: Build web applications by using the Microsoft identity platform implementation of the OpenID Connect authentication protocol. services: active-directory author: nickludwig diff --git a/articles/active-directory/develop/v2-saml-bearer-assertion.md b/articles/active-directory/develop/v2-saml-bearer-assertion.md index 1636318973d88..5b985a968f65b 100644 --- a/articles/active-directory/develop/v2-saml-bearer-assertion.md +++ b/articles/active-directory/develop/v2-saml-bearer-assertion.md @@ -1,6 +1,5 @@ --- title: Exchange a SAML token issued by Active Directory Federation Services (AD FS) for a Microsoft Graph access token -titleSuffix: Microsoft identity platform description: Learn how to fetch data from Microsoft Graph without prompting an AD FS-federated user for credentials by using the SAML bearer assertion flow. services: active-directory author: mmacy diff --git a/articles/active-directory/develop/v2-supported-account-types.md b/articles/active-directory/develop/v2-supported-account-types.md index 28377f28e06b4..5fc571eec3ae1 100644 --- a/articles/active-directory/develop/v2-supported-account-types.md +++ b/articles/active-directory/develop/v2-supported-account-types.md @@ -1,6 +1,5 @@ --- -title: Supported account types | Azure -titleSuffix: Microsoft identity platform +title: Supported account types description: Conceptual documentation about audiences and supported account types in applications services: active-directory author: jmprieur diff --git a/articles/active-directory/develop/web-api-quickstart.md b/articles/active-directory/develop/web-api-quickstart.md index 24c544a8a7d65..3319bcfb9b96f 100644 --- a/articles/active-directory/develop/web-api-quickstart.md +++ b/articles/active-directory/develop/web-api-quickstart.md @@ -1,6 +1,5 @@ --- -title: "Quickstart: Protect a web API with the Microsoft identity platform | Azure" -titleSuffix: Microsoft identity platform +title: "Quickstart: Protect a web API with the Microsoft identity platform" description: In this quickstart, you download and modify a code sample that demonstrates how to protect a web API by using the Microsoft identity platform for authorization. services: active-directory author: Dickson-Mwendia diff --git a/articles/active-directory/develop/web-app-quickstart.md b/articles/active-directory/develop/web-app-quickstart.md index ef6fa69406be8..66dcc0a0e07d2 100644 --- a/articles/active-directory/develop/web-app-quickstart.md +++ b/articles/active-directory/develop/web-app-quickstart.md @@ -1,6 +1,5 @@ --- title: "Quickstart: Sign in users in web apps using the auth code flow" -titleSuffix: Microsoft identity platform description: In this quickstart, learn how a web app can sign in users of personal accounts, work accounts, and school accounts by using the authorization code flow. services: active-directory author: Dickson-Mwendia diff --git a/articles/active-directory/develop/whats-new-docs.md b/articles/active-directory/develop/whats-new-docs.md index c4438313797b7..3b25e7239259a 100644 --- a/articles/active-directory/develop/whats-new-docs.md +++ b/articles/active-directory/develop/whats-new-docs.md @@ -1,12 +1,11 @@ --- title: "What's new in the Microsoft identity platform docs" -titleSuffix: Microsoft identity platform description: "New and updated documentation for the Microsoft identity platform." services: active-directory author: mmacy manager: CelesteDG -ms.date: 04/04/2022 +ms.date: 06/02/2022 ms.service: active-directory ms.subservice: develop ms.topic: reference @@ -19,6 +18,21 @@ ms.custom: has-adal-ref Welcome to what's new in the Microsoft identity platform documentation. This article lists new docs that have been added and those that have had significant updates in the last three months. +## May 2022 + +### Updated articles + +- [Developer guide to Conditional Access authentication context](developer-guide-conditional-access-authentication-context.md) +- [Migrate confidential client applications from ADAL.NET to MSAL.NET](msal-net-migration-confidential-client.md) +- [Protected web API: App registration](scenario-protected-web-api-app-registration.md) +- [Quickstart: Sign in users and call the Microsoft Graph API from an Android app](mobile-app-quickstart-portal-android.md) +- [Quickstart: Sign in users and call the Microsoft Graph API from an iOS or macOS app](mobile-app-quickstart-portal-ios.md) +- [Set up your application's Azure AD test environment](test-setup-environment.md) +- [Single Sign-On SAML protocol](single-sign-on-saml-protocol.md) +- [Single sign-on with MSAL.js](msal-js-sso.md) +- [Tutorial: Sign in users and acquire a token for Microsoft Graph in a Node.js & Express web app](tutorial-v2-nodejs-webapp-msal.md) +- [What's new for authentication?](reference-breaking-changes.md) + ## March 2022 ### New articles @@ -39,24 +53,3 @@ Welcome to what's new in the Microsoft identity platform documentation. This art ### Updated articles - [Desktop app that calls web APIs: Acquire a token using WAM](scenario-desktop-acquire-token-wam.md) - -## January 2022 - -### New articles - -- [Access Azure AD protected resources from an app in Google Cloud (preview)](workload-identity-federation-create-trust-gcp.md) - -### Updated articles - -- [Confidential client assertions](msal-net-client-assertions.md) -- [Claims mapping policy type](reference-claims-mapping-policy-type.md) -- [Configure an app to trust a GitHub repo (preview)](workload-identity-federation-create-trust-github.md) -- [Configure an app to trust an external identity provider (preview)](workload-identity-federation-create-trust.md) -- [Exchange a SAML token issued by AD FS for a Microsoft Graph access token](v2-saml-bearer-assertion.md) -- [Logging in MSAL.js](msal-logging-js.md) -- [Permissions and consent in the Microsoft identity platform](v2-permissions-and-consent.md) -- [Quickstart: Acquire a token and call the Microsoft Graph API by using a console app's identity](console-app-quickstart.md) -- [Quickstart: Acquire a token and call Microsoft Graph API from a desktop application](desktop-app-quickstart.md) -- [Quickstart: Add sign-in with Microsoft to a web app](web-app-quickstart.md) -- [Quickstart: Protect a web API with the Microsoft identity platform](web-api-quickstart.md) -- [Quickstart: Sign in users and call the Microsoft Graph API from a mobile application](mobile-app-quickstart.md) diff --git a/articles/active-directory/develop/workload-identities-overview.md b/articles/active-directory/develop/workload-identities-overview.md index d0dc0d1f727c7..4969901bb4973 100644 --- a/articles/active-directory/develop/workload-identities-overview.md +++ b/articles/active-directory/develop/workload-identities-overview.md @@ -1,6 +1,5 @@ --- title: Workload identities -titleSuffix: Microsoft identity platform description: Understand the concepts and supported scenarios for using workload identity in Azure Active Directory. author: rwike77 manager: CelesteDG diff --git a/articles/active-directory/develop/workload-identity-federation-create-trust-gcp.md b/articles/active-directory/develop/workload-identity-federation-create-trust-gcp.md index 250d0f37f84a2..7c0afddaa733c 100644 --- a/articles/active-directory/develop/workload-identity-federation-create-trust-gcp.md +++ b/articles/active-directory/develop/workload-identity-federation-create-trust-gcp.md @@ -1,6 +1,5 @@ --- title: Access Azure resources from Google Cloud without credentials -titleSuffix: Microsoft identity platform description: Access Azure AD protected resources from a service running in Google Cloud without using secrets or certificates. Use workload identity federation to set up a trust relationship between an app in Azure AD and an identity in Google Cloud. The workload running in Google Cloud can get an access token from Microsoft identity platform and access Azure AD protected resources. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/workload-identity-federation-create-trust-github.md b/articles/active-directory/develop/workload-identity-federation-create-trust-github.md index dc73382f8f9fe..67b2e8d11ebae 100644 --- a/articles/active-directory/develop/workload-identity-federation-create-trust-github.md +++ b/articles/active-directory/develop/workload-identity-federation-create-trust-github.md @@ -1,6 +1,5 @@ --- title: Create a trust relationship between an app and GitHub -titleSuffix: Microsoft identity platform description: Set up a trust relationship between an app in Azure AD and a GitHub repo. This allows a GitHub Actions workflow to access Azure AD protected resources without using secrets or certificates. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/workload-identity-federation-create-trust.md b/articles/active-directory/develop/workload-identity-federation-create-trust.md index e85e35f4add05..6c861c702b65b 100644 --- a/articles/active-directory/develop/workload-identity-federation-create-trust.md +++ b/articles/active-directory/develop/workload-identity-federation-create-trust.md @@ -1,6 +1,5 @@ --- title: Create a trust relationship between an app and an external identity provider -titleSuffix: Microsoft identity platform description: Set up a trust relationship between an app in Azure AD and an external identity provider. This allows a software workload outside of Azure to access Azure AD protected resources without using secrets or certificates. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/workload-identity-federation.md b/articles/active-directory/develop/workload-identity-federation.md index 50d25ad903dd8..c8965287fd768 100644 --- a/articles/active-directory/develop/workload-identity-federation.md +++ b/articles/active-directory/develop/workload-identity-federation.md @@ -1,6 +1,5 @@ --- title: Workload identity federation -titleSuffix: Microsoft identity platform description: Use workload identity federation to grant workloads running outside of Azure access to Azure AD protected resources without using secrets or certificates. This eliminates the need for developers to store and maintain long-lived secrets or certificates outside of Azure. services: active-directory author: rwike77 diff --git a/articles/active-directory/develop/zero-trust-for-developers.md b/articles/active-directory/develop/zero-trust-for-developers.md index dc011909e3f73..7029ee26b23ad 100644 --- a/articles/active-directory/develop/zero-trust-for-developers.md +++ b/articles/active-directory/develop/zero-trust-for-developers.md @@ -1,6 +1,5 @@ --- title: "Increase app security by following Zero Trust principles" -titleSuffix: Microsoft identity platform description: Learn how following the Zero Trust principles can help increase the security of your application, its data, and which features of the Microsoft identity platform you can use to build Zero Trust-ready apps. services: active-directory author: chrischiedo diff --git a/articles/active-directory/devices/azureadjoin-plan.md b/articles/active-directory/devices/azureadjoin-plan.md index 8448e00584085..83d4c2429df7d 100644 --- a/articles/active-directory/devices/azureadjoin-plan.md +++ b/articles/active-directory/devices/azureadjoin-plan.md @@ -17,7 +17,7 @@ ms.collection: M365-identity-device-management --- # How to: Plan your Azure AD join implementation -Azure AD join allows you to join devices directly to Azure AD without the need to join to on-premises Active Directory while keeping your users productive and secure. Azure AD join is enterprise-ready for both at-scale and scoped deployments. SSO access to on-premises resources is also available to devices that are Azure AD joined. For more information, see [How SSO to on-premises resources works on Azure AD joined devices](azuread-join-sso.md). +You can join devices directly to Azure Active Directory (Azure AD) without the need to join to on-premises Active Directory while keeping your users productive and secure. Azure AD join is enterprise-ready for both at-scale and scoped deployments. Single sign-on (SSO) access to on-premises resources is also available to devices that are Azure AD joined. For more information, see [How SSO to on-premises resources works on Azure AD joined devices](azuread-join-sso.md). This article provides you with the information you need to plan your Azure AD join implementation. @@ -40,7 +40,7 @@ To plan your Azure AD join implementation, you should familiarize yourself with: ## Review your scenarios -While hybrid Azure AD join may be preferred for certain scenarios, Azure AD join enables you to transition towards a cloud-first model with Windows. If you're planning to modernize your devices management and reduce device-related IT costs, Azure AD join provides a great foundation towards achieving those goals. +Azure AD join enables you to transition towards a cloud-first model with Windows. If you're planning to modernize your devices management and reduce device-related IT costs, Azure AD join provides a great foundation towards achieving those goals. Consider Azure AD join if your goals align with the following criteria: @@ -51,7 +51,7 @@ Consider Azure AD join if your goals align with the following criteria: ## Review your identity infrastructure -Azure AD join works in managed and federated environments. We think most organizations will deploy hybrid Azure AD join with managed domains. Managed domain scenarios don't require configuring a federation server. +Azure AD join works in managed and federated environments. We think most organizations will deploy with managed domains. Managed domain scenarios don't require configuring and managing a federation server like Active Directory Federation Services (AD FS). ### Managed environment @@ -75,12 +75,6 @@ If your identity provider doesn't support these protocols, Azure AD join doesn't > [!NOTE] > Currently, Azure AD join does not work with [AD FS 2019 configured with external authentication providers as the primary authentication method](/windows-server/identity/ad-fs/operations/additional-authentication-methods-ad-fs#enable-external-authentication-methods-as-primary). Azure AD join defaults to password authentication as the primary method, which results in authentication failures in this scenario -### Smartcards and certificate-based authentication - -You can't use smartcards or certificate-based authentication to join devices to Azure AD. However, smartcards can be used to sign in to Azure AD joined devices if you have AD FS configured. - -**Recommendation:** Implement Windows Hello for Business for strong, password-less authentication to Windows 10 or newer. - ### User configuration If you create users in your: @@ -88,7 +82,7 @@ If you create users in your: - **On-premises Active Directory**, you need to synchronize them to Azure AD using [Azure AD Connect](../hybrid/how-to-connect-sync-whatis.md). - **Azure AD**, no extra setup is required. -On-premises UPNs that are different from Azure AD UPNs aren't supported on Azure AD joined devices. If your users use an on-premises UPN, you should plan to switch to using their primary UPN in Azure AD. +On-premises user principal names (UPNs) that are different from Azure AD UPNs aren't supported on Azure AD joined devices. If your users use an on-premises UPN, you should plan to switch to using their primary UPN in Azure AD. UPN changes are only supported starting Windows 10 2004 update. Users on devices with this update won't have any issues after changing their UPNs. For devices before the Windows 10 2004 update, users would have SSO and Conditional Access issues on their devices. They need to sign in to Windows through the "Other user" tile using their new UPN to resolve this issue. @@ -106,7 +100,7 @@ Azure AD join: ### Management platform -Device management for Azure AD joined devices is based on an MDM platform such as Intune, and MDM CSPs. Starting in Windows 10 there is a built-in MDM agent that works with all compatible MDM solutions. +Device management for Azure AD joined devices is based on a mobile device management (MDM) platform such as Intune, and MDM CSPs. Starting in Windows 10 there's a built-in MDM agent that works with all compatible MDM solutions. > [!NOTE] > Group policies are not supported in Azure AD joined devices as they are not connected to on-premises Active Directory. Management of Azure AD joined devices is only possible through MDM @@ -114,7 +108,7 @@ Device management for Azure AD joined devices is based on an MDM platform such a There are two approaches for managing Azure AD joined devices: - **MDM-only** - A device is exclusively managed by an MDM provider like Intune. All policies are delivered as part of the MDM enrollment process. For Azure AD Premium or EMS customers, MDM enrollment is an automated step that is part of an Azure AD join. -- **Co-management** - A device is managed by an MDM provider and SCCM. In this approach, the SCCM agent is installed on an MDM-managed device to administer certain aspects. +- **Co-management** - A device is managed by an MDM provider and Microsoft Endpoint Configuration Manager. In this approach, the Microsoft Endpoint Configuration Manager agent is installed on an MDM-managed device to administer certain aspects. If you're using Group Policies, evaluate your GPO and MDM policy parity by using [Group Policy analytics](/mem/intune/configuration/group-policy-analytics) in Microsoft Endpoint Manager. @@ -126,7 +120,7 @@ Review supported and unsupported policies to determine whether you can use an MD If your MDM solution isn't available through the Azure AD app gallery, you can add it following the process outlined in [Azure Active Directory integration with MDM](/windows/client-management/mdm/azure-active-directory-integration-with-mdm). -Through co-management, you can use SCCM to manage certain aspects of your devices while policies are delivered through your MDM platform. Microsoft Intune enables co-management with SCCM. For more information on co-management for Windows 10 or newer devices, see [What is co-management?](/configmgr/core/clients/manage/co-management-overview). If you use an MDM product other than Intune, check with your MDM provider on applicable co-management scenarios. +Through co-management, you can use Microsoft Endpoint Configuration Manager to manage certain aspects of your devices while policies are delivered through your MDM platform. Microsoft Intune enables co-management with Microsoft Endpoint Configuration Manager. For more information on co-management for Windows 10 or newer devices, see [What is co-management?](/configmgr/core/clients/manage/co-management-overview). If you use an MDM product other than Intune, check with your MDM provider on applicable co-management scenarios. **Recommendation:** Consider MDM only management for Azure AD joined devices. @@ -250,8 +244,8 @@ Before you can configure your mobility settings, you may have to add an MDM prov **To add an MDM provider**: -1. On the **Azure Active Directory page**, in the **Manage** section, click `Mobility (MDM and MAM)`. -1. Click **Add application**. +1. On the **Azure Active Directory page**, in the **Manage** section, select `Mobility (MDM and MAM)`. +1. Select **Add application**. 1. Select your MDM provider from the list. :::image type="content" source="./media/azureadjoin-plan/04.png" alt-text="Screenshot of the Azure Active Directory Add an application page. Several M D M providers are listed." border="false"::: diff --git a/articles/active-directory/devices/device-management-azure-portal.md b/articles/active-directory/devices/device-management-azure-portal.md index 236e3023847fc..b724976dc6724 100644 --- a/articles/active-directory/devices/device-management-azure-portal.md +++ b/articles/active-directory/devices/device-management-azure-portal.md @@ -157,7 +157,7 @@ You must be assigned one of the following roles to view or manage device setting - **Additional local administrators on Azure AD joined devices**: This setting allows you to select the users who are granted local administrator rights on a device. These users are added to the Device Administrators role in Azure AD. Global Administrators in Azure AD and device owners are granted local administrator rights by default. This option is a premium edition capability available through products like Azure AD Premium and Enterprise Mobility + Security. - **Users may register their devices with Azure AD**: You need to configure this setting to allow users to register Windows 10 or newer personal, iOS, Android, and macOS devices with Azure AD. If you select **None**, devices aren't allowed to register with Azure AD. Enrollment with Microsoft Intune or mobile device management for Microsoft 365 requires registration. If you've configured either of these services, **ALL** is selected and **NONE** is unavailable. -- **Require Multi-Factor Authentication to register or join devices with Azure AD**: This setting allows you to specify whether users are required to provide another authentication factor to join or register their devices to Azure AD. The default is **No**. We recommend that you require multifactor authentication when a device is registered or joined. Before you enable multifactor authentication for this service, you must ensure that multifactor authentication is configured for users that register their devices. For more information on Azure AD Multi-Factor Authentication services, see [getting started with Azure AD Multi-Factor Authentication](../authentication/concept-mfa-howitworks.md). +- **Require Multi-Factor Authentication to register or join devices with Azure AD**: This setting allows you to specify whether users are required to provide another authentication factor to join or register their devices to Azure AD. The default is **No**. We recommend that you require multifactor authentication when a device is registered or joined. Before you enable multifactor authentication for this service, you must ensure that multifactor authentication is configured for users that register their devices. For more information on Azure AD Multi-Factor Authentication services, see [getting started with Azure AD Multi-Factor Authentication](../authentication/concept-mfa-howitworks.md). This setting may not work with third-party identity providers. > [!NOTE] > The **Require Multi-Factor Authentication to register or join devices with Azure AD** setting applies to devices that are either Azure AD joined (with some exceptions) or Azure AD registered. This setting doesn't apply to hybrid Azure AD joined devices, [Azure AD joined VMs in Azure](./howto-vm-sign-in-azure-ad-windows.md#enabling-azure-ad-login-for-windows-vm-in-azure), or Azure AD joined devices that use [Windows Autopilot self-deployment mode](/mem/autopilot/self-deploying). diff --git a/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md b/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md index 3f5e7b45bb7ef..66f435cf619ae 100644 --- a/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md +++ b/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md @@ -274,14 +274,14 @@ The AADLoginForWindows extension must install successfully in order for the VM t 1. Ensure the required endpoints are accessible from the VM using PowerShell: - - `curl https://login.microsoftonline.com/ -D -` - - `curl https://login.microsoftonline.com// -D -` - - `curl https://enterpriseregistration.windows.net/ -D -` - - `curl https://device.login.microsoftonline.com/ -D -` - - `curl https://pas.windows.net/ -D -` + - `curl.exe https://login.microsoftonline.com/ -D -` + - `curl.exe https://login.microsoftonline.com// -D -` + - `curl.exe https://enterpriseregistration.windows.net/ -D -` + - `curl.exe https://device.login.microsoftonline.com/ -D -` + - `curl.exe https://pas.windows.net/ -D -` > [!NOTE] - > Replace `` with the Azure AD Tenant ID that is associated with the Azure subscription.
`enterpriseregistration.windows.net` and `pas.windows.net` should return 404 Not Found, which is expected behavior. + > Replace `` with the Azure AD Tenant ID that is associated with the Azure subscription.
`login.microsoftonline.com/`, `enterpriseregistration.windows.net`, and `pas.windows.net` should return 404 Not Found, which is expected behavior. 1. The Device State can be viewed by running `dsregcmd /status`. The goal is for Device State to show as `AzureAdJoined : YES`. @@ -398,12 +398,12 @@ If you've configured a Conditional Access policy that requires multi-factor auth - Your credentials did not work. -![Your credentials did not work](./media/howto-vm-sign-in-azure-ad-windows/your-credentials-did-not-work.png) - > [!WARNING] -> Per-user Enabled/Enforced Azure AD Multi-Factor Authentication is not supported for VM Sign-In. This setting causes Sign-in to fail with “Your credentials do not work.” error message. +> Legacy per-user Enabled/Enforced Azure AD Multi-Factor Authentication is not supported for VM Sign-In. This setting causes Sign-in to fail with “Your credentials do not work.” error message. + +![Your credentials did not work](./media/howto-vm-sign-in-azure-ad-windows/your-credentials-did-not-work.png) -You can resolve the above issue by removing the per user MFA setting, by following these steps: +You can resolve the above issue by removing the per-user MFA setting, by following these steps: ``` diff --git a/articles/active-directory/devices/hybrid-azuread-join-control.md b/articles/active-directory/devices/hybrid-azuread-join-control.md index ff95afc45afab..4aa7bd9117400 100644 --- a/articles/active-directory/devices/hybrid-azuread-join-control.md +++ b/articles/active-directory/devices/hybrid-azuread-join-control.md @@ -72,10 +72,10 @@ Use the following example to create a Group Policy Object (GPO) to deploy a regi ### Configure AD FS settings -If you're using AD FS, you first need to configure client-side SCP using the instructions mentioned earlier by linking the GPO to your AD FS servers. The SCP object defines the source of authority for device objects. It can be on-premises or Azure AD. When client-side SCP is configured for AD FS, the source for device objects is established as Azure AD. +If your Azure AD is federated with AD FS, you first need to configure client-side SCP using the instructions mentioned earlier by linking the GPO to your AD FS servers. The SCP object defines the source of authority for device objects. It can be on-premises or Azure AD. When client-side SCP is configured for AD FS, the source for device objects is established as Azure AD. > [!NOTE] -> If you failed to configure client-side SCP on your AD FS servers, the source for device identities would be considered as on-premises. ADFS will then start deleting device objects from on-premises directory after the stipulated period defined in the ADFS Device Registration's attribute "MaximumInactiveDays". ADFS Device Registration objects can be found using the [Get-AdfsDeviceRegistration cmdlet](/powershell/module/adfs/get-adfsdeviceregistration). +> If you failed to configure client-side SCP on your AD FS servers, the source for device identities would be considered as on-premises. AD FS will then start deleting device objects from on-premises directory after the stipulated period defined in the AD FS Device Registration's attribute "MaximumInactiveDays". AD FS Device Registration objects can be found using the [Get-AdfsDeviceRegistration cmdlet](/powershell/module/adfs/get-adfsdeviceregistration). ## Supporting down-level devices diff --git a/articles/active-directory/devices/manage-stale-devices.md b/articles/active-directory/devices/manage-stale-devices.md index 9490d3a4e50b0..3b3e3880e7be0 100644 --- a/articles/active-directory/devices/manage-stale-devices.md +++ b/articles/active-directory/devices/manage-stale-devices.md @@ -6,7 +6,7 @@ services: active-directory ms.service: active-directory ms.subservice: devices ms.topic: how-to -ms.date: 02/15/2022 +ms.date: 06/01/2022 ms.author: joflore author: MicrosoftGuyJFlo @@ -83,7 +83,7 @@ It isn't advisable to immediately delete a device that appears to be stale becau ### MDM-controlled devices -If your device is under control of Intune or any other MDM solution, retire the device in the management system before disabling or deleting it. +If your device is under control of Intune or any other MDM solution, retire the device in the management system before disabling or deleting it. For more information see the article [Remove devices by using wipe, retire, or manually unenrolling the device](/mem/intune/remote-actions/devices-wipe). ### System-managed devices @@ -208,4 +208,6 @@ Any authentication where a device is being used to authenticate to Azure AD are ## Next steps +Devices managed with Intune can be retired or wiped, for more information see the article [Remove devices by using wipe, retire, or manually unenrolling the device](/mem/intune/remote-actions/devices-wipe). + To get an overview of how to manage device in the Azure portal, see [managing devices using the Azure portal](device-management-azure-portal.md) diff --git a/articles/active-directory/enterprise-users/TOC.yml b/articles/active-directory/enterprise-users/TOC.yml index a4a1db4174451..0918ea7cfd08a 100644 --- a/articles/active-directory/enterprise-users/TOC.yml +++ b/articles/active-directory/enterprise-users/TOC.yml @@ -110,6 +110,8 @@ href: groups-dynamic-rule-more-efficient.md - name: Change group membership type href: groups-change-type.md + - name: Group as memberOf a dynamic group + href: groups-dynamic-rule-member-of.md - name: Add members in bulk href: groups-bulk-import-members.md - name: Remove members in bulk diff --git a/articles/active-directory/enterprise-users/directory-delete-howto.md b/articles/active-directory/enterprise-users/directory-delete-howto.md index 6be13ba83a46b..820cefe157bbf 100644 --- a/articles/active-directory/enterprise-users/directory-delete-howto.md +++ b/articles/active-directory/enterprise-users/directory-delete-howto.md @@ -93,7 +93,7 @@ You can put a subscription into the **Deprovisioned** state to be deleted in thr If you have an Active or Cancelled Azure Subscription associated to your Azure AD Tenant then you would not be able to delete Azure AD Tenant. After you cancel, billing is stopped immediately. However, Microsoft waits 30 - 90 days before permanently deleting your data in case you need to access it or you change your mind. We don't charge you for keeping the data. -- If you have a free trial or pay-as-you-go subscription, you don't have to wait 90 days for the subscription to automatically delete. You can delete your subscription three days after you cancel it. The Delete subscription option isn't available until three days after you cancel your subscription. For more details please read through [Delete free trial or pay-as-you-go subscriptions](../../cost-management-billing/manage/cancel-azure-subscription.md#delete-free-trial-or-pay-as-you-go-subscriptions). +- If you have a free trial or pay-as-you-go subscription, you don't have to wait 90 days for the subscription to automatically delete. You can delete your subscription three days after you cancel it. The Delete subscription option isn't available until three days after you cancel your subscription. For more details please read through [Delete free trial or pay-as-you-go subscriptions](../../cost-management-billing/manage/cancel-azure-subscription.md#delete-subscriptions). - All other subscription types are deleted only through the [subscription cancellation](../../cost-management-billing/manage/cancel-azure-subscription.md#cancel-subscription-in-the-azure-portal) process. In other words, you can't delete a subscription directly unless it's a free trial or pay-as-you-go subscription. However, after you cancel a subscription, you can create an [Azure support request](https://go.microsoft.com/fwlink/?linkid=2083458) to ask to have the subscription deleted immediately. - Alternatively, you can also move/transfer the Azure subscription to another Azure AD tenant account. When you transfer billing ownership of your subscription to an account in another Azure AD tenant, you can move the subscription to the new account's tenant. Additionally, perfoming Switch Directory on the subscription would not help as the billing would still be aligned with Azure AD Tenant which was used to sign up for the subscription. For more information review [Transfer a subscription to another Azure AD tenant account](../../cost-management-billing/manage/billing-subscription-transfer.md#transfer-a-subscription-to-another-azure-ad-tenant-account) @@ -156,4 +156,4 @@ You can put a self-service sign-up product like Microsoft Power BI or Azure Righ ## Next steps -[Azure Active Directory documentation](../index.yml) \ No newline at end of file +[Azure Active Directory documentation](../index.yml) diff --git a/articles/active-directory/enterprise-users/domains-verify-custom-subdomain.md b/articles/active-directory/enterprise-users/domains-verify-custom-subdomain.md index 854cf047b679d..5565e144df4b2 100644 --- a/articles/active-directory/enterprise-users/domains-verify-custom-subdomain.md +++ b/articles/active-directory/enterprise-users/domains-verify-custom-subdomain.md @@ -64,7 +64,7 @@ Because subdomains inherit the authentication type of the root domain by default Use the following command to promote the subdomain: ```http -POST https://graph.microsoft.com/v1.0/domains/foo.contoso.com/promote +POST https://graph.windows.net/{tenant-id}/domains/foo.contoso.com/promote ``` ### Promote command error conditions @@ -114,4 +114,4 @@ Invoking API with a federated verified subdomain with user references | POST | 4 - [Add custom domain names](../fundamentals/add-custom-domain.md?context=azure%2factive-directory%2fusers-groups-roles%2fcontext%2fugr-context) - [Manage domain names](domains-manage.md) -- [ForceDelete a custom domain name with Microsoft Graph API](/graph/api/domain-forcedelete) \ No newline at end of file +- [ForceDelete a custom domain name with Microsoft Graph API](/graph/api/domain-forcedelete) diff --git a/articles/active-directory/enterprise-users/groups-dynamic-membership.md b/articles/active-directory/enterprise-users/groups-dynamic-membership.md index 31a0f4fae8192..b479f6aed1257 100644 --- a/articles/active-directory/enterprise-users/groups-dynamic-membership.md +++ b/articles/active-directory/enterprise-users/groups-dynamic-membership.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: enterprise-users ms.workload: identity ms.topic: overview -ms.date: 09/24/2021 +ms.date: 06/03/2022 ms.author: curtand ms.reviewer: krbain ms.custom: it-pro @@ -18,12 +18,12 @@ ms.collection: M365-identity-device-management # Dynamic membership rules for groups in Azure Active Directory -In Azure Active Directory (Azure AD), you can create complex attribute-based rules to enable dynamic memberships for groups. Dynamic group membership reduces the administrative overhead of adding and removing users. This article details the properties and syntax to create dynamic membership rules for users or devices. You can set up a rule for dynamic membership on security groups or Microsoft 365 groups. +In Azure Active Directory (Azure AD), you can create attribute-based rules to enable dynamic membership for a group. Dynamic group membership adds and removes group members automatically using membership rules based on member attributes. This article details the properties and syntax to create dynamic membership rules for users or devices. You can set up a rule for dynamic membership on security groups or Microsoft 365 groups. When any attributes of a user or device change, the system evaluates all dynamic group rules in a directory to see if the change would trigger any group adds or removes. If a user or device satisfies a rule on a group, they are added as a member of that group. If they no longer satisfy the rule, they are removed. You can't manually add or remove a member of a dynamic group. - You can create a dynamic group for devices or for users, but you can't create a rule that contains both users and devices. -- You can't create a device group based on the device owners' attributes. Device membership rules can only reference device attributes. +- You can't create a device group based on the user attributes of the device owner. Device membership rules can reference only device attributes. > [!NOTE] > This feature requires an Azure AD Premium P1 license or Intune for Education for each unique user that is a member of one or more dynamic groups. You don't have to assign licenses to users for them to be members of dynamic groups, but you must have the minimum number of licenses in the Azure AD organization to cover all such users. For example, if you had a total of 1,000 unique users in all dynamic groups in your organization, you would need at least 1,000 licenses for Azure AD Premium P1 to meet the license requirement. @@ -81,10 +81,10 @@ The following are the user properties that you can use to create a single expres ### Properties of type boolean -| Properties | Allowed values | Usage | -| --- | --- | --- | -| accountEnabled |true false |user.accountEnabled -eq true | -| dirSyncEnabled |true false |user.dirSyncEnabled -eq true | +Properties | Allowed values | Usage +--- | --- | --- +accountEnabled |true false |user.accountEnabled -eq true +dirSyncEnabled |true false |user.dirSyncEnabled -eq true ### Properties of type string @@ -101,6 +101,7 @@ The following are the user properties that you can use to create a single expres | jobTitle |Any string value or *null* |(user.jobTitle -eq "value") | | mail |Any string value or *null* (SMTP address of the user) |(user.mail -eq "value") | | mailNickName |Any string value (mail alias of the user) |(user.mailNickName -eq "value") | +| memberOf | Any string value (valid group object ID) | (device.memberof -any (group.objectId -in ['value'])) | | mobile |Any string value or *null* |(user.mobile -eq "value") | | objectId |GUID of the user object |(user.objectId -eq "11111111-1111-1111-1111-111111111111") | | onPremisesDistinguishedName (preview)| Any string value or *null* |(user.onPremisesDistinguishedName -eq "value") | @@ -265,7 +266,7 @@ assignedPlans is a multi-value property that lists all service plans assigned to user.assignedPlans -any (assignedPlan.servicePlanId -eq "efb87545-963c-4e0d-99df-69c6916d9eb0" -and assignedPlan.capabilityStatus -eq "Enabled") ``` -A rule such as this one can be used to group all users for whom a Microsoft 365 (or other Microsoft Online Service) capability is enabled. You could then apply with a set of policies to the group. +A rule such as this one can be used to group all users for whom a Microsoft 365 or other Microsoft Online Service capability is enabled. You could then apply with a set of policies to the group. #### Example 2 @@ -345,13 +346,13 @@ device.objectId -ne null ## Extension properties and custom extension properties -Extension attributes and custom extension properties are supported as string properties in dynamic membership rules. [Extension attributes](/graph/api/resources/onpremisesextensionattributes) are synced from on-premises Window Server AD and take the format of "ExtensionAttributeX", where X equals 1 - 15. Here's an example of a rule that uses an extension attribute as a property: +Extension attributes and custom extension properties are supported as string properties in dynamic membership rules. [Extension attributes](/graph/api/resources/onpremisesextensionattributes) are synced from on-premises Window Server Active Directory and take the format of "ExtensionAttributeX", where X equals 1 - 15. Here's an example of a rule that uses an extension attribute as a property: ``` (user.extensionAttribute15 -eq "Marketing") ``` -[Custom extension properties](../hybrid/how-to-connect-sync-feature-directory-extensions.md) are synced from on-premises Windows Server AD or from a connected SaaS application and are of the format of `user.extension_[GUID]_[Attribute]`, where: +[Custom extension properties](../hybrid/how-to-connect-sync-feature-directory-extensions.md) are synced from on-premises Windows Server Active Directory or from a connected SaaS application and are of the format of `user.extension_[GUID]_[Attribute]`, where: - [GUID] is the unique identifier in Azure AD for the application that created the property in Azure AD - [Attribute] is the name of the property as it was created @@ -393,12 +394,13 @@ The following device attributes can be used. enrollmentProfileName | Apple Device Enrollment Profile name, Android Enterprise Corporate-owned dedicated device Enrollment Profile name, or Windows Autopilot profile name | (device.enrollmentProfileName -eq "DEP iPhones") isRooted | true false | (device.isRooted -eq true) managementType | MDM (for mobile devices) | (device.managementType -eq "MDM") + memberOf | Any string value (valid group object ID) | (user.memberof -any (group.objectId -in ['value'])) deviceId | a valid Azure AD device ID | (device.deviceId -eq "d4fe7726-5966-431c-b3b8-cddc8fdb717d") objectId | a valid Azure AD object ID | (device.objectId -eq "76ad43c9-32c5-45e8-a272-7b58b58f596d") devicePhysicalIds | any string value used by Autopilot, such as all Autopilot devices, OrderID, or PurchaseOrderID | (device.devicePhysicalIDs -any _ -contains "[ZTDId]") (device.devicePhysicalIds -any _ -eq "[OrderID]:179887111881") (device.devicePhysicalIds -any _ -eq "[PurchaseOrderId]:76222342342") systemLabels | any string matching the Intune device property for tagging Modern Workplace devices | (device.systemLabels -contains "M365Managed") -> [!Note] +> [!NOTE] > For the deviceOwnership when creating Dynamic Groups for devices you need to set the value equal to "Company". On Intune the device ownership is represented instead as Corporate. Refer to [OwnerTypes](/intune/reports-ref-devices#ownertypes) for more details. ## Next steps diff --git a/articles/active-directory/enterprise-users/groups-dynamic-rule-member-of.md b/articles/active-directory/enterprise-users/groups-dynamic-rule-member-of.md new file mode 100644 index 0000000000000..7afd278316885 --- /dev/null +++ b/articles/active-directory/enterprise-users/groups-dynamic-rule-member-of.md @@ -0,0 +1,59 @@ +--- +title: Group membership for Azure AD dynamic groups with memberOf - Azure AD | Microsoft Docs +description: How to create a dynamic membership group that can contain members of other groups in Azure Active Directory. +services: active-directory +documentationcenter: '' +author: curtand +manager: karenhoran +ms.service: active-directory +ms.subservice: enterprise-users +ms.workload: identity +ms.topic: overview +ms.date: 06/02/2022 +ms.author: curtand +ms.reviewer: krbain +ms.custom: it-pro +ms.collection: M365-identity-device-management +--- + +# Group membership in a dynamic group (preview) in Azure Active Directory + +This feature preview enables admins to create dynamic groups in Azure Active Directory (Azure AD) that populate by adding members of other groups using the memberOf attribute. Apps that couldn't read group-based membership previously in Azure AD can now read the entire membership of these new memberOf groups. Not only can these groups be used for apps, they can also be used for licensing assignment and role-based access control. The following diagram illustrates how you could create Dynamic-Group-A with members of Security-Group-X and Security-Group-Y. Members of the groups inside of Security-Group-X and Security-Group-Y don't become members of Dynamic-Group-A. + +:::image type="content" source="./media/groups-dynamic-rule-member-of/member-of-diagram.png" alt-text="Diagram showing how the memberOf attribute works."::: + +With this preview, admins can configure dynamic groups with the memberOf attribute in the Azure portal, Microsoft Graph, and PowerShell. Security groups, Microsoft 365 groups, groups that are synced from on-premises Active Directory can all be added as members of these dynamic groups, and can all be added to a single group. For example, the dynamic group could be a security group, but you can use Microsoft 365 groups, security groups, and groups that are synced from on-premises to define its membership. + +## Prerequisites + +Only administrators in the Global Administrator, Intune Administrator, or User Administrator role can use the memberOf attribute to create an Azure AD dynamic group. You must have an Azure AD Premium license for the Azure AD tenant. + +## Preview limitations + +- Each Azure AD tenant is limited to 500 dynamic groups using the memberOf attribute. memberOf groups do count towards the total dynamic group member quota of 5,000. +- Each dynamic group can have up to 50 member groups. +- When adding members of security groups to memberOf dynamic groups, only direct members of the security group become members of the dynamic group. +- You can't use one memberOf dynamic group to define the membership of another memberOf dynamic groups. For example, Dynamic Group A, with members of group B and C in it, can't be a member of Dynamic Group D). +- MemberOf can't be used with other rules. For example, a rule that states dynamic group A should contain members of group B and also should contain only users located in Redmond will fail. +- Dynamic group rule builder and validate feature can't be used for memberOf at this time. +- MemberOf can't be used with other operators. For example, you can't create a rule that states “Members Of group A can't be in Dynamic group B.” + +## Getting started + +This feature can be used in the Azure AD portal, Microsoft Graph, and in PowerShell. Because memberOf isn't yet supported in the rule builder, you must enter your rule in the rule editor. + +### Steps to create a memberOf dynamic group + +1. Sign in to the Azure portal with an account that has Global Administrator, Intune Administrator, or User Administrator role permissions. +1. Select **Azure Active Directory** > **Groups**, and then select **New group**. +1. Fill in group details. The group type can be Security or Microsoft 365, and the membership type can be set to **Dynamic User** or **Dynamic Device**. +1. Select **Add dynamic query**. +1. MemberOf isn't yet supported in the rule builder. Select **Edit** to write the rule in the **Rule syntax** box. + 1. Example user rule: `user.memberof -any (group.objectId -in ['groupId', 'groupId'])` + 1. Example device rule: `device.memberof -any (group.objectId -in ['groupId', 'groupId'])` +1. Select **OK**. +1. Select **Create group**. + +## Next steps + +To report an issue, contact us in the [Teams channel](https://teams.microsoft.com/l/channel/19%3a39Q7HFuexXXE3Vh90woJRNQQBbZl1YyesJHIEquuQCw1%40thread.tacv2/General?groupId=bfd3bfb8-e0db-4e9e-9008-5d7ba8c996b0&tenantId=72f988bf-86f1-41af-91ab-2d7cd011db47). \ No newline at end of file diff --git a/articles/active-directory/enterprise-users/groups-dynamic-rule-more-efficient.md b/articles/active-directory/enterprise-users/groups-dynamic-rule-more-efficient.md index 775666d869c14..be7d28d3d7df6 100644 --- a/articles/active-directory/enterprise-users/groups-dynamic-rule-more-efficient.md +++ b/articles/active-directory/enterprise-users/groups-dynamic-rule-more-efficient.md @@ -31,8 +31,8 @@ Minimize the usage of the 'match' operator in rules as much as possible. Instead It's better to use rules like: -- `user.city -contains "ago,"` -- `user.city -startswith "Lag,"` +- `user.city -contains "ago"` +- `user.city -startswith "Lag"` Or, best of all: diff --git a/articles/active-directory/enterprise-users/groups-settings-cmdlets.md b/articles/active-directory/enterprise-users/groups-settings-cmdlets.md index 3697e01459222..74d4ed21fe1d0 100644 --- a/articles/active-directory/enterprise-users/groups-settings-cmdlets.md +++ b/articles/active-directory/enterprise-users/groups-settings-cmdlets.md @@ -160,7 +160,7 @@ Here are the settings defined in the Group.Unified SettingsTemplate. Unless othe |
  • GuestUsageGuidelinesUrl
  • Type: String
  • Default: "" | The URL of a link to the guest usage guidelines. | |
    • AllowToAddGuests
    • Type: Boolean
    • Default: True | A boolean indicating whether or not is allowed to add guests to this directory.
      This setting may be overridden and become read-only if *EnableMIPLabels* is set to *True* and a guest policy is associated with the sensitivity label assigned to the group.
      If the AllowToAddGuests setting is set to False at the organization level, any AllowToAddGuests setting at the group level is ignored. If you want to enable guest access for only a few groups, you must set AllowToAddGuests to be true at the organization level, and then selectively disable it for specific groups. | |
      • ClassificationList
      • Type: String
      • Default: "" | A comma-delimited list of valid classification values that can be applied to Microsoft 365 groups.
        This setting does not apply when EnableMIPLabels == True.| -|
        • EnableMIPLabels
        • Type: Boolean
        • Default: "False" |The flag indicating whether sensitivity labels published in Microsoft 365 Compliance Center can be applied to Microsoft 365 groups. For more information, see [Assign Sensitivity Labels for Microsoft 365 groups](groups-assign-sensitivity-labels.md). | +|
          • EnableMIPLabels
          • Type: Boolean
          • Default: "False" |The flag indicating whether sensitivity labels published in Microsoft Purview compliance portal can be applied to Microsoft 365 groups. For more information, see [Assign Sensitivity Labels for Microsoft 365 groups](groups-assign-sensitivity-labels.md). | ## Example: Configure Guest policy for groups at the directory level 1. Get all the setting templates: diff --git a/articles/active-directory/enterprise-users/licensing-groups-assign.md b/articles/active-directory/enterprise-users/licensing-groups-assign.md index 856aeb69c227f..ca79e54bbd04e 100644 --- a/articles/active-directory/enterprise-users/licensing-groups-assign.md +++ b/articles/active-directory/enterprise-users/licensing-groups-assign.md @@ -11,7 +11,7 @@ ms.service: active-directory ms.subservice: enterprise-users ms.topic: how-to ms.workload: identity -ms.date: 12/02/2020 +ms.date: 05/26/2022 ms.author: curtand ms.reviewer: sumitp ms.custom: it-pro @@ -27,7 +27,7 @@ In this example, the Azure AD organization contains a security group called **HR > [!NOTE] > Some Microsoft services are not available in all locations. Before a license can be assigned to a user, the administrator has to specify the Usage location property on the user. > -> For group license assignment, any users without a usage location specified inherit the location of the directory. If you have users in multiple locations, we recommend that you always set usage location as part of your user creation flow in Azure AD (e.g. via AAD Connect configuration) - that ensures the result of license assignment is always correct and users do not receive services in locations that are not allowed. +> For group license assignment, any users without a usage location specified inherit the location of the directory. If you have users in multiple locations, we recommend that you always set usage location as part of your user creation flow in Azure AD. For example, configure Azure AD Connect configuration to set usage location. This recommendation makes sure the result of license assignment is always correct and users do not receive services in locations that are not allowed. ## Step 1: Assign the required licenses @@ -43,6 +43,9 @@ In this example, the Azure AD organization contains a security group called **HR 1. Select a user or group, and then use the **Select** button at the bottom of the page to confirm your selection. + >[!NOTE] + >When assigning licenses to a group with service plans that have dependencies on other service plans, they must both be assigned together in the same group, otherwise the service plan with the dependency will be disabled. + 1. On the **Assign license** page, click **Assignment options**, which displays all service plans included in the two products that we selected previously. Find **Yammer Enterprise** and turn it **Off** to disable that service from the product license. Confirm by clicking **OK** at the bottom of **License options**. ![select service plans for licenses](./media/licensing-groups-assign/assignment-options.png) diff --git a/articles/active-directory/enterprise-users/media/groups-dynamic-rule-member-of/member-of-diagram.png b/articles/active-directory/enterprise-users/media/groups-dynamic-rule-member-of/member-of-diagram.png new file mode 100644 index 0000000000000..6575c2e9a77bf Binary files /dev/null and b/articles/active-directory/enterprise-users/media/groups-dynamic-rule-member-of/member-of-diagram.png differ diff --git a/articles/active-directory/external-identities/b2b-direct-connect-overview.md b/articles/active-directory/external-identities/b2b-direct-connect-overview.md index d7afbed466087..4413fbdd8a81e 100644 --- a/articles/active-directory/external-identities/b2b-direct-connect-overview.md +++ b/articles/active-directory/external-identities/b2b-direct-connect-overview.md @@ -24,7 +24,7 @@ B2B direct connect requires a mutual trust relationship between two Azure AD org Currently, B2B direct connect capabilities work with Teams shared channels. When B2B direct connect is established between two organizations, users in one organization can create a shared channel in Teams and invite an external B2B direct connect user to it. Then from within Teams, the B2B direct connect user can seamlessly access the shared channel in their home tenant Teams instance, without having to manually sign in to the organization hosting the shared channel. -For licensing and pricing information related to B2B direct connect users, refer to [Azure Active Directory pricing](https://azure.microsoft.com/pricing/details/active-directory/). +For licensing and pricing information related to B2B direct connect users, refer to [Azure Active Directory External Identities pricing](https://azure.microsoft.com/pricing/details/active-directory/external-identities/). ## Managing cross-tenant access for B2B direct connect diff --git a/articles/active-directory/external-identities/b2b-quickstart-invite-powershell.md b/articles/active-directory/external-identities/b2b-quickstart-invite-powershell.md index f0c8bb72db7c4..043517b173191 100644 --- a/articles/active-directory/external-identities/b2b-quickstart-invite-powershell.md +++ b/articles/active-directory/external-identities/b2b-quickstart-invite-powershell.md @@ -23,7 +23,7 @@ If you don’t have an Azure subscription, create a [free account](https://azure ## Prerequisites ### PowerShell Module -Install the [Microsoft Graph Identity Sign-ins module](/powershell/module/microsoft.graph.identity.signins/?view=graph-powershell-beta) (Microsoft.Graph.Identity.SignIns) and the [Microsoft Graph Users module](/powershell/module/microsoft.graph.users/?view=graph-powershell-beta) (Microsoft.Graph.Users). +Install the [Microsoft Graph Identity Sign-ins module](/powershell/module/microsoft.graph.identity.signins/?view=graph-powershell-beta&preserve-view=true) (Microsoft.Graph.Identity.SignIns) and the [Microsoft Graph Users module](/powershell/module/microsoft.graph.users/?view=graph-powershell-beta&preserve-view=true) (Microsoft.Graph.Users). ### Get a test email account diff --git a/articles/active-directory/external-identities/cross-cloud-settings.md b/articles/active-directory/external-identities/cross-cloud-settings.md index 907b1bd5c59c0..66b5a7623d549 100644 --- a/articles/active-directory/external-identities/cross-cloud-settings.md +++ b/articles/active-directory/external-identities/cross-cloud-settings.md @@ -44,9 +44,16 @@ After each organization has completed these steps, Azure AD B2B collaboration be In your Microsoft cloud settings, enable the Microsoft Azure cloud you want to collaborate with. +> [!NOTE] +> The admin experience is currently still deploying to national clouds. To access the admin experience in Microsoft Azure Government or Microsoft Azure China, you can use these links: +> +>Microsoft Azure Government - https://aka.ms/cloudsettingsusgov +> +>Microsoft Azure China - https://aka.ms/cloudsettingschina + 1. Sign in to the [Azure portal](https://portal.azure.com) using a Global administrator or Security administrator account. Then open the **Azure Active Directory** service. 1. Select **External Identities**, and then select **Cross-tenant access settings (Preview)**. -1. Select **Cross cloud settings**. +1. Select **Microsoft cloud settings (Preview)**. 1. Select the checkboxes next to the external Microsoft Azure clouds you want to enable. ![Screenshot showing Microsoft cloud settings.](media/cross-cloud-settings/cross-cloud-settings.png) diff --git a/articles/active-directory/external-identities/cross-tenant-access-overview.md b/articles/active-directory/external-identities/cross-tenant-access-overview.md index 79006efa5192e..309af5bfd30a6 100644 --- a/articles/active-directory/external-identities/cross-tenant-access-overview.md +++ b/articles/active-directory/external-identities/cross-tenant-access-overview.md @@ -71,6 +71,13 @@ To set up B2B collaboration, both organizations configure their Microsoft cloud For configuration steps, see [Configure Microsoft cloud settings for B2B collaboration (Preview)](cross-cloud-settings.md). +> [!NOTE] +> The admin experience is currently still deploying to national clouds. To access the admin experience in Microsoft Azure Government or Microsoft Azure China, you can use these links: +> +>Microsoft Azure Government - https://aka.ms/cloudsettingsusgov +> +>Microsoft Azure China - https://aka.ms/cloudsettingschina + ### Default settings in cross-cloud scenarios To collaborate with a partner tenant in a different Microsoft Azure cloud, both organizations need to mutually enable B2B collaboration with each other. The first step is to enable the partner's cloud in your cross-tenant settings. When you first enable another cloud, B2B collaboration is blocked for all tenants in that cloud. You need to add the tenant you want to collaborate with to your Organizational settings, and at that point your default settings go into effect for that tenant only. You can allow the default settings to remain in effect, or you can modify the organizational settings for the tenant. diff --git a/articles/active-directory/external-identities/leave-the-organization.md b/articles/active-directory/external-identities/leave-the-organization.md index 6bde3a4c74768..e6aefdbc3733b 100644 --- a/articles/active-directory/external-identities/leave-the-organization.md +++ b/articles/active-directory/external-identities/leave-the-organization.md @@ -15,6 +15,7 @@ manager: celestedg ms.reviewer: mal ms.collection: M365-identity-device-management +adobe-target: true --- # Leave an organization as a B2B collaboration user diff --git a/articles/active-directory/external-identities/whats-new-docs.md b/articles/active-directory/external-identities/whats-new-docs.md index f4732de075cbd..4dd155126e238 100644 --- a/articles/active-directory/external-identities/whats-new-docs.md +++ b/articles/active-directory/external-identities/whats-new-docs.md @@ -1,7 +1,7 @@ --- title: "What's new in Azure Active Directory External Identities" description: "New and updated documentation for the Azure Active Directory External Identities." -ms.date: 05/02/2022 +ms.date: 06/01/2022 ms.service: active-directory ms.subservice: B2B ms.topic: reference @@ -15,6 +15,32 @@ manager: CelesteDG Welcome to what's new in Azure Active Directory External Identities documentation. This article lists new docs that have been added and those that have had significant updates in the last three months. To learn what's new with the External Identities service, see [What's new in Azure Active Directory](../fundamentals/whats-new.md). + +## May 2022 + +### New articles + +- [Configure Microsoft cloud settings for B2B collaboration (Preview)](cross-cloud-settings.md) + +### Updated articles + +- [Configure Microsoft cloud settings for B2B collaboration (Preview)](cross-cloud-settings.md) +- [Overview: Cross-tenant access with Azure AD External Identities (Preview)](cross-tenant-access-overview.md) +- [Example: Configure SAML/WS-Fed based identity provider federation with AD FS](direct-federation-adfs.md) +- [Federation with SAML/WS-Fed identity providers for guest users](direct-federation.md) +- [External Identities documentation](index.yml) +- [Quickstart: Add a guest user and send an invitation](b2b-quickstart-add-guest-users-portal.md) +- [B2B collaboration overview](what-is-b2b.md) +- [Leave an organization as a B2B collaboration user](leave-the-organization.md) +- [Configure external collaboration settings](external-collaboration-settings-configure.md) +- [B2B direct connect overview (Preview)](b2b-direct-connect-overview.md) +- [Azure Active Directory External Identities: What's new](whats-new-docs.md) +- [Configure cross-tenant access settings for B2B collaboration (Preview)](cross-tenant-access-settings-b2b-collaboration.md) +- [Configure cross-tenant access settings for B2B direct connect (Preview)](cross-tenant-access-settings-b2b-direct-connect.md) +- [Azure AD B2B in government and national clouds](b2b-government-national-clouds.md) +- [External Identities in Azure Active Directory](external-identities-overview.md) +- [Troubleshooting Azure Active Directory B2B collaboration](troubleshoot.md) + ## April 2022 ### Updated articles @@ -58,22 +84,3 @@ Welcome to what's new in Azure Active Directory External Identities documentatio - [Leave an organization as a B2B collaboration user](leave-the-organization.md) - [Configure external collaboration settings](external-collaboration-settings-configure.md) - [Reset redemption status for a guest user (Preview)](reset-redemption-status.md) - -## February 2022 - -### Updated articles - -- [Add Google as an identity provider for B2B guest users](google-federation.md) -- [External Identities in Azure Active Directory](external-identities-overview.md) -- [Overview: Cross-tenant access with Azure AD External Identities (Preview)](cross-tenant-access-overview.md) -- [B2B collaboration overview](what-is-b2b.md) -- [Federation with SAML/WS-Fed identity providers for guest users (preview)](direct-federation.md) -- [Quickstart: Add a guest user with PowerShell](b2b-quickstart-invite-powershell.md) -- [Tutorial: Bulk invite Azure AD B2B collaboration users](tutorial-bulk-invite.md) -- [Azure Active Directory B2B best practices](b2b-fundamentals.md) -- [Azure Active Directory B2B collaboration FAQs](faq.yml) -- [Email one-time passcode authentication](one-time-passcode.md) -- [Azure Active Directory B2B collaboration invitation redemption](redemption-experience.md) -- [Troubleshooting Azure Active Directory B2B collaboration](troubleshoot.md) -- [Properties of an Azure Active Directory B2B collaboration user](user-properties.md) -- [Authentication and Conditional Access for External Identities](authentication-conditional-access.md) diff --git a/articles/active-directory/fundamentals/5-secure-access-b2b.md b/articles/active-directory/fundamentals/5-secure-access-b2b.md index dd24d16b2494c..c7a9bdc740dc3 100644 --- a/articles/active-directory/fundamentals/5-secure-access-b2b.md +++ b/articles/active-directory/fundamentals/5-secure-access-b2b.md @@ -76,7 +76,7 @@ You can use an allowlist or blocklist to [restrict invitations to B2B users](../ > Limiting to a predefined domain may inadvertently prevent authorized collaboration with organizations, which have other domains for their users. For example, if doing business with an organization Contoso, the initial point of contact with Contoso might be one of their US-based employees who has an email with a ".com" domain. However, if you only allow the ".com" domain you may inadvertently omit their Canadian employees who have ".ca" domain. > [!IMPORTANT] -> These lists do not apply to users who are already in your directory. By default, they also do not apply to OneDrive for Business and SharePoint allow/blocklists which are separate unless you enable the [SharePoint/OneDrive B2B integration](https://docs.microsoft.com/sharepoint/sharepoint-azureb2b-integration). +> These lists do not apply to users who are already in your directory. By default, they also do not apply to OneDrive for Business and SharePoint allow/blocklists which are separate unless you enable the [SharePoint/OneDrive B2B integration](/sharepoint/sharepoint-azureb2b-integration). Some organizations use a list of known ‘bad actor’ domains provided by their managed security provider for their blocklist. For example, if the organization is legitimately doing business with Contoso and using a .com domain, there may be an unrelated organization that has been using the Contoso .org domain and attempting a phishing attack to impersonate Contoso employees. @@ -254,4 +254,4 @@ See the following articles on securing external access to resources. We recommen 8. [Secure access with Sensitivity labels](8-secure-access-sensitivity-labels.md) -9. [Secure access to Microsoft Teams, OneDrive, and SharePoint](9-secure-access-teams-sharepoint.md) +9. [Secure access to Microsoft Teams, OneDrive, and SharePoint](9-secure-access-teams-sharepoint.md) \ No newline at end of file diff --git a/articles/active-directory/fundamentals/7-secure-access-conditional-access.md b/articles/active-directory/fundamentals/7-secure-access-conditional-access.md index de48d28cad045..ff6a7bc159df2 100644 --- a/articles/active-directory/fundamentals/7-secure-access-conditional-access.md +++ b/articles/active-directory/fundamentals/7-secure-access-conditional-access.md @@ -89,6 +89,10 @@ There may be times you want to block external users except a specific group. For After confirming your settings using [report-only mode](../conditional-access/howto-conditional-access-insights-reporting.md), an administrator can move the **Enable policy** toggle from **Report-only** to **On**. +### External partner access + +Conditional Access policies that target external users may interfere with service provider access, for example granular delegated admin privileges [Introduction to granular delegated admin privileges (GDAP)](/partner-center/gdap-introduction). + ## Implement Conditional Access Many common Conditional Access policies are documented. See the article [Common Conditional Access policies](../conditional-access/concept-conditional-access-policy-common.md) for other common policies you may want to adapt for external users. diff --git a/articles/active-directory/fundamentals/8-secure-access-sensitivity-labels.md b/articles/active-directory/fundamentals/8-secure-access-sensitivity-labels.md index 804f7d1bcaa6f..afbafdfd65dad 100644 --- a/articles/active-directory/fundamentals/8-secure-access-sensitivity-labels.md +++ b/articles/active-directory/fundamentals/8-secure-access-sensitivity-labels.md @@ -32,7 +32,7 @@ Sensitivity labels on email and other content travel with the content. Sensitivi ## Permissions necessary to create and manage sensitivity levels -Members of your compliance team who will create sensitivity labels need permissions to the Microsoft 365 Defender portal, Microsoft 365 Compliance Center, or Office 365 Security & Compliance Center. +Members of your compliance team who will create sensitivity labels need permissions to the Microsoft 365 Defender portal, Microsoft Purview compliance portal, or Office 365 Security & Compliance Center. By default, global administrators for your tenant have access to these admin centers and can give compliance officers and other people access, without giving them all the permissions of a tenant admin. For this delegated limited admin access, add users to the Compliance Data Administrator, Compliance Administrator, or Security Administrator role group. diff --git a/articles/active-directory/fundamentals/active-directory-access-create-new-tenant.md b/articles/active-directory/fundamentals/active-directory-access-create-new-tenant.md index 9b40f921e87b7..16a9c614b5e46 100644 --- a/articles/active-directory/fundamentals/active-directory-access-create-new-tenant.md +++ b/articles/active-directory/fundamentals/active-directory-access-create-new-tenant.md @@ -41,10 +41,10 @@ After you sign in to the Azure portal, you can create a new tenant for your orga 1. Select **Next: Configuration** to move on to the Configuration tab. +1. On the Configuration tab, enter the following information: + ![Azure Active Directory - Create a tenant page - configuration tab ](media/active-directory-access-create-new-tenant/azure-ad-create-new-tenant.png) -1. On the Configuration tab, enter the following information: - - Type _Contoso Organization_ into the **Organization name** box. - Type _Contosoorg_ into the **Initial domain name** box. diff --git a/articles/active-directory/fundamentals/active-directory-groups-membership-azure-portal.md b/articles/active-directory/fundamentals/active-directory-groups-membership-azure-portal.md index adbb74d9644e5..13657022a17fb 100644 --- a/articles/active-directory/fundamentals/active-directory-groups-membership-azure-portal.md +++ b/articles/active-directory/fundamentals/active-directory-groups-membership-azure-portal.md @@ -26,7 +26,7 @@ This article helps you to add and remove a group from another group using Azure You can add an existing Security group to another existing Security group (also known as nested groups), creating a member group (subgroup) and a parent group. The member group inherits the attributes and properties of the parent group, saving you configuration time. >[!Important] ->We don't currently support:
            • Adding groups to a group synced with on-premises Active Directory.
            • Adding Security groups to Microsoft 365 groups.
            • Adding Microsoft 365 groups to Security groups or other Microsoft 365 groups.
            • Assigning apps to nested groups.
            • Applying licenses to nested groups.
            • Adding distribution groups in nesting scenarios.
            • Adding security groups as members of mail-enabled security groups
            +>We don't currently support:
            • Adding groups to a group synced with on-premises Active Directory.
            • Adding Security groups to Microsoft 365 groups.
            • Adding Microsoft 365 groups to Security groups or other Microsoft 365 groups.
            • Assigning apps to nested groups.
            • Applying licenses to nested groups.
            • Adding distribution groups in nesting scenarios.
            • Adding security groups as members of mail-enabled security groups
            • Adding groups as members of a role-assignable group.
            ### To add a group as a member of another group diff --git a/articles/active-directory/fundamentals/add-custom-domain.md b/articles/active-directory/fundamentals/add-custom-domain.md index 8cf9ad9925ad6..bd12103153c58 100644 --- a/articles/active-directory/fundamentals/add-custom-domain.md +++ b/articles/active-directory/fundamentals/add-custom-domain.md @@ -56,6 +56,8 @@ After you create your directory, you can add your custom domain name. >[!IMPORTANT] >You must include *.com*, *.net*, or any other top-level extension for this to work properly. + > + >When adding a custom domain, the Password Policy values will be inherited from the initial domain. The unverified domain is added. The **contoso.com** page appears showing your DNS information. Save this information. You need it later to create a TXT record to configure DNS. @@ -114,4 +116,4 @@ If Azure AD can't verify a custom domain name, try the following suggestions: - Manage your domain name information in Azure AD. For more information, see [Managing custom domain names](../enterprise-users/domains-manage.md). -- If you have on-premises versions of Windows Server that you want to use alongside Azure Active Directory, see [Integrate your on-premises directories with Azure Active Directory](../hybrid/whatis-hybrid-identity.md). \ No newline at end of file +- If you have on-premises versions of Windows Server that you want to use alongside Azure Active Directory, see [Integrate your on-premises directories with Azure Active Directory](../hybrid/whatis-hybrid-identity.md). diff --git a/articles/active-directory/fundamentals/concept-fundamentals-block-legacy-authentication.md b/articles/active-directory/fundamentals/concept-fundamentals-block-legacy-authentication.md index f2e93526824f5..e338cd6a135c9 100644 --- a/articles/active-directory/fundamentals/concept-fundamentals-block-legacy-authentication.md +++ b/articles/active-directory/fundamentals/concept-fundamentals-block-legacy-authentication.md @@ -29,7 +29,7 @@ Today, the majority of all compromising sign-in attempts come from legacy authen Before you can block legacy authentication in your directory, you need to first understand if your users have apps that use legacy authentication and how it affects your overall directory. Azure AD sign-in logs can be used to understand if you're using legacy authentication. -1. Navigate to the **Azure portal** > **Azure Active Directory** > **Sign-ins**. +1. Navigate to the **Azure portal** > **Azure Active Directory** > **Sign-in logs**. 1. Add the **Client App** column if it is not shown by clicking on **Columns** > **Client App**. 1. Filter by **Client App** > check all the **Legacy Authentication Clients** options presented. 1. Filter by **Status** > **Success**. diff --git a/articles/active-directory/fundamentals/protect-m365-from-on-premises-attacks.md b/articles/active-directory/fundamentals/protect-m365-from-on-premises-attacks.md index 212c09fc89ac7..1a73bb9c00ad4 100644 --- a/articles/active-directory/fundamentals/protect-m365-from-on-premises-attacks.md +++ b/articles/active-directory/fundamentals/protect-m365-from-on-premises-attacks.md @@ -1,138 +1,139 @@ --- title: Protecting Microsoft 365 from on-premises attacks -description: Guidance about how to ensure an on-premises attack doesn't affect Microsoft 365. +description: Learn how to configure your systems to help protect your Microsoft 365 cloud environment from on-premises compromise. services: active-directory author: BarbaraSelden manager: martinco ms.service: active-directory ms.workload: identity ms.subservice: fundamentals -ms.topic: conceptual -ms.date: 12/22/2020 +ms.topic: how-to +ms.date: 04/29/2022 ms.author: baselden ms.reviewer: ajburnle -ms.custom: "it-pro, seodec18" +ms.custom: + - it-pro + - seodec18 + - kr2b-contr-experiment ms.collection: M365-identity-device-management --- # Protecting Microsoft 365 from on-premises attacks -Many customers connect their private corporate networks to Microsoft 365 to benefit their users, devices, and applications. However, these private networks can be compromised in many well-documented ways. Because Microsoft 365 acts as a sort of nervous system for many organizations, it's critical to protect it from compromised on-premises infrastructure. +Many customers connect their private corporate networks to Microsoft 365 to benefit their users, devices, and applications. However, these private networks can be compromised in many well-documented ways. Microsoft 365 acts as a sort of nervous system for many organizations. It's critical to protect it from compromised on-premises infrastructure. -This article shows you how to configure your systems to protect your Microsoft 365 cloud environment from on-premises compromise. We focus primarily on: +This article shows you how to configure your systems to help protect your Microsoft 365 cloud environment from on-premises compromise, including the following elements: -- Azure Active Directory (Azure AD) tenant configuration settings. -- How Azure AD tenants can be safely connected to on-premises systems. -- The tradeoffs required to operate your systems in ways that protect your cloud systems from on-premises compromise. +- Azure Active Directory (Azure AD) tenant configuration settings +- How Azure AD tenants can be safely connected to on-premises systems +- The tradeoffs required to operate your systems in ways that protect your cloud systems from on-premises compromise -We strongly recommend you implement this guidance to secure your Microsoft 365 cloud environment. +Microsoft strongly recommends that you implement this guidance. -> [!NOTE] -> This article was initially published as a blog post. It has been moved to its current location for longevity and maintenance. -> -> To create an offline version of this article, use your browser's print-to-PDF functionality. Check back here frequently for updates. +## Threat sources in on-premises environments -## Primary threat vectors from compromised on-premises environments +Your Microsoft 365 cloud environment benefits from an extensive monitoring and security infrastructure. Microsoft 365 uses machine learning and human intelligence to look across worldwide traffic. It can rapidly detect attacks and allow you to reconfigure nearly in real time. -Your Microsoft 365 cloud environment benefits from an extensive monitoring and security infrastructure. Using machine learning and human intelligence, Microsoft 365 looks across worldwide traffic. It can rapidly detect attacks and allow you to reconfigure nearly in real time. - -In hybrid deployments that connect on-premises infrastructure to Microsoft 365, many organizations delegate trust to on-premises components for critical authentication and directory object state management decisions. Unfortunately, if the on-premises environment is compromised, these trust relationships become an attacker's opportunities to compromise your Microsoft 365 environment. +Hybrid deployments can connect on-premises infrastructure to Microsoft 365. In such deployments, many organizations delegate trust to on-premises components for critical authentication and directory object state management decisions. If the on-premises environment is compromised, these trust relationships become an attacker's opportunities to compromise your Microsoft 365 environment. The two primary threat vectors are *federation trust relationships* and *account synchronization.* Both vectors can grant an attacker administrative access to your cloud. -* **Federated trust relationships**, such as SAML authentication, are used to authenticate to Microsoft 365 through your on-premises identity infrastructure. If a SAML token-signing certificate is compromised, federation allows anyone who has that certificate to impersonate any user in your cloud. *We recommend you disable federation trust relationships for authentication to Microsoft 365 when possible.* - -* **Account synchronization** can be used to modify privileged users (including their credentials) or groups that have administrative privileges in Microsoft 365. *We recommend you ensure that synchronized objects hold no privileges beyond a user in Microsoft 365,* either directly or through inclusion in trusted roles or groups. Ensure these objects have no direct or nested assignment in trusted cloud roles or groups. +- **Federated trust relationships**, such as Security Assertions Markup Language (SAML) authentication, are used to authenticate to Microsoft 365 through your on-premises identity infrastructure. If a SAML token-signing certificate is compromised, federation allows anyone who has that certificate to impersonate any user in your cloud. -## Protecting Microsoft 365 from on-premises compromise + We recommend that you disable federation trust relationships for authentication to Microsoft 365 when possible. -To address the threat vectors outlined earlier, we recommend you adhere to the principles illustrated in the following diagram: +- **Account synchronization** can be used to modify privileged users, including their credentials, or groups that have administrative privileges in Microsoft 365. -![Reference architecture for protecting Microsoft 365.](media/protect-m365/protect-m365-principles.png) + We recommend that you ensure that synchronized objects hold no privileges beyond a user in Microsoft 365. You can control privileges either directly or through inclusion in trusted roles or groups. Ensure these objects have no direct or nested assignment in trusted cloud roles or groups. -1. **Fully isolate your Microsoft 365 administrator accounts.** They should be: +## Protecting Microsoft 365 from on-premises compromise - * Mastered in Azure AD. +To address the threats described above, we recommend you adhere to the principles illustrated in the following diagram: - * Authenticated by using multifactor authentication. +![Reference architecture for protecting Microsoft 365, as described in the following list.](media/protect-m365/protect-m365-principles.png) - * Secured by Azure AD Conditional Access. +1. **Fully isolate your Microsoft 365 administrator accounts.** They should be: - * Accessed only by using Azure-managed workstations. + - Mastered in Azure AD. + - Authenticated by using multifactor authentication. + - Secured by Azure AD Conditional Access. + - Accessed only by using Azure-managed workstations. - These administrator accounts are restricted-use accounts. *No on-premises accounts should have administrative privileges in Microsoft 365.* + These administrator accounts are restricted-use accounts. No on-premises accounts should have administrative privileges in Microsoft 365. - For more information, see the [overview of Microsoft 365 administrator roles](/microsoft-365/admin/add-users/about-admin-roles). Also see [Roles for Microsoft 365 in Azure AD](../roles/m365-workload-docs.md). + For more information, see [About admin roles](/microsoft-365/admin/add-users/about-admin-roles). Also, see [Roles for Microsoft 365 in Azure AD](../roles/m365-workload-docs.md). 1. **Manage devices from Microsoft 365.** Use Azure AD join and cloud-based mobile device management (MDM) to eliminate dependencies on your on-premises device management infrastructure. These dependencies can compromise device and security controls. 1. **Ensure no on-premises account has elevated privileges to Microsoft 365.** Some accounts access on-premises applications that require NTLM, LDAP, or Kerberos authentication. These accounts must be in the organization's on-premises identity infrastructure. Ensure that these accounts, including service accounts, aren't included in privileged cloud roles or groups. Also ensure that changes to these accounts can't affect the integrity of your cloud environment. Privileged on-premises software must not be capable of affecting Microsoft 365 privileged accounts or roles. -1. **Use Azure AD cloud authentication** to eliminate dependencies on your on-premises credentials. Always use strong authentication, such as Windows Hello, FIDO, Microsoft Authenticator, or Azure AD multifactor authentication. +1. **Use Azure AD cloud authentication to eliminate dependencies on your on-premises credentials.** Always use strong authentication, such as Windows Hello, FIDO, Microsoft Authenticator, or Azure AD multifactor authentication. ## Specific security recommendations -The following sections provide specific guidance about how to implement the principles described earlier. +The following sections provide guidance about how to implement the principles described above. ### Isolate privileged identities In Azure AD, users who have privileged roles, such as administrators, are the root of trust to build and manage the rest of the environment. Implement the following practices to minimize the effects of a compromise. -* Use cloud-only accounts for Azure AD and Microsoft 365 privileged roles. +- Use cloud-only accounts for Azure AD and Microsoft 365 privileged roles. -* Deploy [privileged access devices](/security/compass/privileged-access-devices#device-roles-and-profiles) for privileged access to manage Microsoft 365 and Azure AD. +- Deploy privileged access devices for privileged access to manage Microsoft 365 and Azure AD. See [Device roles and profiles](/security/compass/privileged-access-devices#device-roles-and-profiles). -* Deploy [Azure AD Privileged Identity Management](../privileged-identity-management/pim-configure.md) (PIM) for just-in-time (JIT) access to all human accounts that have privileged roles. Require strong authentication to activate roles. + Deploy Azure AD Privileged Identity Management (PIM) for just-in-time access to all human accounts that have privileged roles. Require strong authentication to activate roles. See [What is Azure AD Privileged Identity Management](../privileged-identity-management/pim-configure.md). -* Provide administrative roles that allow the [least privilege necessary to do required tasks](../roles/delegate-by-task.md). +- Provide administrative roles that allow the least privilege necessary to do required tasks. See [Least privileged roles by task in Azure Active Directory](../roles/delegate-by-task.md). -* To enable a rich role assignment experience that includes delegation and multiple roles at the same time, consider using Azure AD security groups or Microsoft 365 Groups. These groups are collectively called *cloud groups*. Also [enable role-based access control](../roles/groups-assign-role.md). You can use [administrative units](../roles/administrative-units.md) to restrict the scope of roles to a portion of the organization. +- To enable a rich role assignment experience that includes delegation and multiple roles at the same time, consider using Azure AD security groups or Microsoft 365 Groups. These groups are collectively called *cloud groups*. -* Deploy [emergency access accounts](../roles/security-emergency-access.md). Do *not* use on-premises password vaults to store credentials. + Also, enable role-based access control. See [Assign Azure AD roles to groups](../roles/groups-assign-role.md). You can use administrative units to restrict the scope of roles to a portion of the organization. See [Administrative units in Azure Active Directory](../roles/administrative-units.md). -For more information, see [Securing privileged access](/security/compass/overview). Also see [Secure access practices for administrators in Azure AD](../roles/security-planning.md). +- Deploy emergency access accounts. Do *not* use on-premises password vaults to store credentials. See [Manage emergency access accounts in Azure AD](../roles/security-emergency-access.md). -### Use cloud authentication +For more information, see [Securing privileged access](/security/compass/overview). Also, see [Secure access practices for administrators in Azure AD](../roles/security-planning.md). + +### Use cloud authentication Credentials are a primary attack vector. Implement the following practices to make credentials more secure: -* [Deploy passwordless authentication](../authentication/howto-authentication-passwordless-deployment.md). Reduce the use of passwords as much as possible by deploying passwordless credentials. These credentials are managed and validated natively in the cloud. Choose from these authentication methods: +- **Deploy passwordless authentication**. Reduce the use of passwords as much as possible by deploying passwordless credentials. These credentials are managed and validated natively in the cloud. For more information, see [Plan a passwordless authentication deployment in Azure Active Directory](../authentication/howto-authentication-passwordless-deployment.md). - * [Windows Hello for business](/windows/security/identity-protection/hello-for-business/passwordless-strategy) + Choose from these authentication methods: - * [The Microsoft Authenticator app](../authentication/howto-authentication-passwordless-phone.md) + - [Windows Hello for business](/windows/security/identity-protection/hello-for-business/passwordless-strategy) + - [The Microsoft Authenticator app](../authentication/howto-authentication-passwordless-phone.md) + - [FIDO2 security keys](../authentication/howto-authentication-passwordless-security-key-windows.md) - * [FIDO2 security keys](../authentication/howto-authentication-passwordless-security-key-windows.md) +- **Deploy multifactor authentication**. For more information, see [Plan an Azure Active Directory Multi-Factor Authentication deployment](../authentication/howto-mfa-getstarted.md). -* [Deploy multifactor authentication](../authentication/howto-mfa-getstarted.md). Provision - [multiple strong credentials by using Azure AD multifactor authentication](../fundamentals/resilience-in-credentials.md). That way, access to cloud resources will require a credential that's managed in Azure AD in addition to an on-premises password that can be manipulated. For more information, see [Create a resilient access control management strategy by using Azure AD](./resilience-overview.md). + Provision multiple strong credentials by using Azure AD multifactor authentication. That way, access to cloud resources requires an Azure AD managed credential in addition to an on-premises password. For more information, see [Build resilience with credential management](../fundamentals/resilience-in-credentials.md) and [Create a resilient access control management strategy by using Azure AD](./resilience-overview.md). ### Limitations and tradeoffs -* Hybrid account password management requires hybrid components such as password protection agents and password writeback agents. If your on-premises infrastructure is compromised, attackers can control the machines on which these agents reside. This vulnerability won't compromise your cloud infrastructure. But your cloud accounts won't protect these components from on-premises compromise. +Hybrid account password management requires hybrid components such as password protection agents and password writeback agents. If your on-premises infrastructure is compromised, attackers can control the machines on which these agents reside. This vulnerability won't compromise your cloud infrastructure. But your cloud accounts won't protect these components from on-premises compromise. -* On-premises accounts synced from Active Directory are marked to never expire in Azure AD. This setting is usually mitigated by on-premises Active Directory password settings. However, if your on-premises instance of Active Directory is compromised and synchronization is disabled, you must set the [EnforceCloudPasswordPolicyForPasswordSyncedUsers](../hybrid/how-to-connect-password-hash-synchronization.md) option to force password changes. +On-premises accounts synced from Active Directory are marked to never expire in Azure AD. This setting is usually mitigated by on-premises Active Directory password settings. If your instance of Active Directory is compromised and synchronization is disabled, set the [EnforceCloudPasswordPolicyForPasswordSyncedUsers](../hybrid/how-to-connect-password-hash-synchronization.md) option to force password changes. ## Provision user access from the cloud *Provisioning* refers to the creation of user accounts and groups in applications or identity providers. -![Diagram of provisioning architecture.](media/protect-m365/protect-m365-provision.png) +![Diagram of provisioning architecture shows the interaction of Azure A D with Cloud HR, Azure A D B 2 B, Azure app provisioning, and group-based licensing.](media/protect-m365/protect-m365-provision.png) We recommend the following provisioning methods: -* **Provision from cloud HR apps to Azure AD**: This provisioning enables an on-premises compromise to be isolated, without disrupting your joiner-mover-leaver cycle from your cloud HR apps to Azure AD. - -* **Cloud applications**: Where possible, deploy [Azure AD app provisioning](../app-provisioning/user-provisioning.md) as opposed to on-premises provisioning solutions. This method protects some of your software-as-a-service (SaaS) apps from being affected by malicious hacker profiles in on-premises breaches. +- **Provision from cloud HR apps to Azure AD.** This provisioning enables an on-premises compromise to be isolated. This isolation doesn't disrupt your joiner-mover-leaver cycle from your cloud HR apps to Azure AD. +- **Cloud applications.** Where possible, deploy Azure AD app provisioning as opposed to on-premises provisioning solutions. This method protects some of your software as a service (SaaS) apps from malicious hacker profiles in on-premises breaches. For more information, see [What is app provisioning in Azure Active Directory](../app-provisioning/user-provisioning.md). +- **External identities.** Use Azure AD B2B collaboration to reduce the dependency on on-premises accounts for external collaboration with partners, customers, and suppliers. Carefully evaluate any direct federation with other identity providers. For more information, see [B2B collaboration overview](../external-identities/what-is-b2b.md). -* **External identities**: Use [Azure AD B2B collaboration](../external-identities/what-is-b2b.md) This method reduces the dependency on on-premises accounts for external collaboration with partners, customers, and suppliers. Carefully evaluate any direct federation with other identity providers. We recommend limiting B2B guest accounts in the following ways: + We recommend limiting B2B guest accounts in the following ways: - * Limit guest access to browsing groups and other properties in the directory. Use the external collaboration settings to restrict guests' ability to read groups they're not members of. + - Limit guest access to browsing groups and other properties in the directory. Use the external collaboration settings to restrict guests' ability to read groups they're not members of. + - Block access to the Azure portal. You can make rare necessary exceptions. Create a Conditional Access policy that includes all guests and external users. Then implement a policy to block access. See [Conditional Access](../conditional-access/concept-conditional-access-cloud-apps.md). - * Block access to the Azure portal. You can make rare necessary exceptions. Create a Conditional Access policy that includes all guests and external users. Then [implement a policy to block access](../conditional-access/concept-conditional-access-cloud-apps.md). +- **Disconnected forests.** Use Azure AD cloud provisioning to connect to disconnected forests. This approach eliminates the need to establish cross-forest connectivity or trusts, which can broaden the effect of an on-premises breach. For more information, see [What is Azure AD Connect cloud sync](../cloud-sync/what-is-cloud-sync.md). -* **Disconnected forests**: Use [Azure AD cloud provisioning](../cloud-sync/what-is-cloud-sync.md). This method enables you to connect to disconnected forests, eliminating the need to establish cross-forest connectivity or trusts, which can broaden the effect of an on-premises breach. - ### Limitations and tradeoffs When used to provision hybrid accounts, the Azure-AD-from-cloud-HR system relies on on-premises synchronization to complete the data flow from Active Directory to Azure AD. If synchronization is interrupted, new employee records won't be available in Azure AD. @@ -141,11 +142,9 @@ When used to provision hybrid accounts, the Azure-AD-from-cloud-HR system relies Cloud groups allow you to decouple your collaboration and access from your on-premises infrastructure. -* **Collaboration**: Use Microsoft 365 Groups and Microsoft Teams for modern collaboration. Decommission on-premises distribution lists, and [upgrade distribution lists to Microsoft 365 Groups in Outlook](/office365/admin/manage/upgrade-distribution-lists). - -* **Access**: Use Azure AD security groups or Microsoft 365 Groups to authorize access to applications in Azure AD. - -* **Office 365 licensing**: Use group-based licensing to provision to Office 365 by using cloud-only groups. This method decouples control of group membership from on-premises infrastructure. +- **Collaboration**. Use Microsoft 365 Groups and Microsoft Teams for modern collaboration. Decommission on-premises distribution lists, and [upgrade distribution lists to Microsoft 365 Groups in Outlook](/office365/admin/manage/upgrade-distribution-lists). +- **Access**. Use Azure AD security groups or Microsoft 365 Groups to authorize access to applications in Azure AD. +- **Office 365 licensing**. Use group-based licensing to provision to Office 365 by using cloud-only groups. This method decouples control of group membership from on-premises infrastructure. Owners of groups that are used for access should be considered privileged identities to avoid membership takeover in an on-premises compromise. A takeover would include direct manipulation of group membership on-premises or manipulation of on-premises attributes that can affect dynamic group membership in Microsoft 365. @@ -153,146 +152,133 @@ Owners of groups that are used for access should be considered privileged identi Use Azure AD capabilities to securely manage devices. -- **Use Windows 10 workstations**: [Deploy Azure AD joined](../devices/azureadjoin-plan.md) devices with MDM policies. Enable [Windows Autopilot](/mem/autopilot/windows-autopilot) for a fully automated provisioning experience. - - - Deprecate machines that run Windows 8.1 and earlier. - - - Don't deploy server OS machines as workstations. +Deploy Azure AD joined Windows 10 workstations with mobile device management policies. Enable Windows Autopilot for a fully automated provisioning experience. See [Plan your Azure AD join implementation](../devices/azureadjoin-plan.md) and [Windows Autopilot](/mem/autopilot/windows-autopilot). - - Use [Microsoft Intune](https://www.microsoft.com/microsoft-365/enterprise-mobility-security/microsoft-intune) as the source of authority for all device management workloads. +- **Use Windows 10 workstations**. + - Deprecate machines that run Windows 8.1 and earlier. + - Don't deploy computers that have server operating systems as workstations. +- **Use Microsoft Endpoint Manager as the authority for all device management workloads.** See [Microsoft Endpoint Manager](https://www.microsoft.com/security/business/microsoft-endpoint-manager). +- **Deploy privileged access devices.** For more information, see [Device roles and profiles](/security/compass/privileged-access-devices#device-roles-and-profiles). -- [**Deploy privileged access devices**](/security/compass/privileged-access-devices#device-roles-and-profiles): - Use privileged access to manage Microsoft 365 and Azure AD as part of a complete approach to [Securing privileged access](/security/compass/overview). +### Workloads, applications, and resources -## Workloads, applications, and resources +- **On-premises single-sign-on (SSO) systems** -- **On-premises single-sign-on (SSO) systems** + Deprecate any on-premises federation and web access management infrastructure. Configure applications to use Azure AD. - Deprecate any on-premises federation and web access management infrastructure. Configure applications to use Azure AD. +- **SaaS and line-of-business (LOB) applications that support modern authentication protocols** -- **SaaS and line-of-business (LOB) applications that support modern authentication protocols** + Use Azure AD for SSO. The more apps you configure to use Azure AD for authentication, the less risk in an on-premises compromise. For more information, see [What is single sign-on in Azure Active Directory](../manage-apps/what-is-single-sign-on.md). - [Use Azure AD for SSO](../manage-apps/what-is-single-sign-on.md). The more apps you configure to use Azure AD for authentication, the less risk in an on-premises compromise. +- **Legacy applications** + You can enable authentication, authorization, and remote access to legacy applications that don't support modern authentication. Use [Azure AD Application Proxy](../app-proxy/application-proxy.md). Or, enable them through a network or application delivery controller solution by using secure hybrid access partner integrations. See [Secure legacy apps with Azure Active Directory](../manage-apps/secure-hybrid-access.md). -* **Legacy applications** + Choose a VPN vendor that supports modern authentication. Integrate its authentication with Azure AD. In an on-premises compromise, you can use Azure AD to disable or block access by disabling the VPN. - * You can enable authentication, authorization, and remote access to legacy applications that don't support modern authentication. Use [Azure AD Application Proxy](../app-proxy/application-proxy.md). You can also enable them through a network or application delivery controller solution by using [secure hybrid access partner integrations](../manage-apps/secure-hybrid-access.md). +- **Application and workload servers** - * Choose a VPN vendor that supports modern authentication. Integrate its authentication with Azure AD. In an on-premises compromise, you can use Azure AD to disable or block access by disabling the VPN. + Applications or resources that required servers can be migrated to Azure infrastructure as a service (IaaS). Use Azure AD Domain Services (Azure AD DS) to decouple trust and dependency on on-premises instances of Active Directory. To achieve this decoupling, make sure virtual networks used for Azure AD DS don't have a connection to corporate networks. See [Azure AD Domain Services](../../active-directory-domain-services/overview.md). -* **Application and workload servers** - - * Applications or resources that required servers can be migrated to Azure infrastructure as a service (IaaS). Use [Azure AD Domain Services](../../active-directory-domain-services/overview.md) (Azure AD DS) to decouple trust and dependency on on-premises instances of Active Directory. To achieve this decoupling, make sure virtual networks used for Azure AD DS don't have a connection to corporate networks. - - * Follow the guidance for [credential tiering](/security/compass/privileged-access-access-model#ADATM_BM). Application servers are typically considered tier-1 assets. + Use credential tiering. Application servers are typically considered tier-1 assets. For more information, see [Enterprise access model](/security/compass/privileged-access-access-model#ADATM_BM). ## Conditional Access policies Use Azure AD Conditional Access to interpret signals and use them to make authentication decisions. For more information, see the [Conditional Access deployment plan](../conditional-access/plan-conditional-access.md). -* Use Conditional Access to [block legacy authentication protocols](../conditional-access/howto-conditional-access-policy-block-legacy.md) whenever possible. Additionally, disable legacy authentication protocols at the application level by using an application-specific configuration. +- Use Conditional Access to block legacy authentication protocols whenever possible. Additionally, disable legacy authentication protocols at the application level by using an application-specific configuration. See [Block legacy authentication](../conditional-access/howto-conditional-access-policy-block-legacy.md). + + For more information, see [Legacy authentication protocols](../fundamentals/auth-sync-overview.md#legacy-authentication-protocols). Or see specific details for [Exchange Online](/exchange/clients-and-mobile-in-exchange-online/disable-basic-authentication-in-exchange-online#how-basic-authentication-works-in-exchange-online) and [SharePoint Online](/powershell/module/sharepoint-online/set-spotenant). - For more information, see [Legacy authentication protocols](../fundamentals/auth-sync-overview.md). Or see specific details for [Exchange Online](/exchange/clients-and-mobile-in-exchange-online/disable-basic-authentication-in-exchange-online#how-basic-authentication-works-in-exchange-online) and [SharePoint Online](/powershell/module/sharepoint-online/set-spotenant). +- Implement the recommended identity and device access configurations. See [Common Zero Trust identity and device access policies](/microsoft-365/security/office-365-security/identity-access-policies). -* Implement the recommended [identity and device access configurations](/microsoft-365/security/office-365-security/identity-access-policies). +- If you're using a version of Azure AD that doesn't include Conditional Access, use [Security defaults in Azure AD](../fundamentals/concept-fundamentals-security-defaults.md). -* If you're using a version of Azure AD that doesn't include Conditional Access, ensure that you're using the [Azure AD security defaults](../fundamentals/concept-fundamentals-security-defaults.md). + For more information about Azure AD feature licensing, see the [Azure AD pricing guide](https://www.microsoft.com/security/business/identity-access-management/azure-ad-pricing). - For more information about Azure AD feature licensing, see the [Azure AD pricing guide](https://www.microsoft.com/security/business/identity-access-management/azure-ad-pricing). +## Monitor -## Monitor +After you configure your environment to protect your Microsoft 365 from an on-premises compromise, proactively monitor the environment. For more information, see [What is Azure Active Directory monitoring](../reports-monitoring/overview-monitoring.md). -After you configure your environment to protect your Microsoft 365 -from an on-premises compromise, [proactively monitor](../reports-monitoring/overview-monitoring.md) -the environment. ### Scenarios to monitor Monitor the following key scenarios, in addition to any scenarios specific to your organization. For example, you should proactively monitor access to your business-critical applications and resources. -* **Suspicious activity** - - Monitor all [Azure AD risk events](../identity-protection/overview-identity-protection.md#risk-detection-and-remediation) for suspicious activity. [Azure AD Identity Protection](../identity-protection/overview-identity-protection.md) is natively integrated with Microsoft Defender for Cloud. - - Define the network [named locations](../conditional-access/location-condition.md) to avoid noisy detections on location-based signals. -* **User and Entity Behavioral Analytics (UEBA) alerts** - - Use UEBA to get insights on anomaly detection. +- **Suspicious activity** - * Microsoft Defender for Cloud Apps provides [UEBA in the cloud](/cloud-app-security/tutorial-ueba). + Monitor all Azure AD risk events for suspicious activity. See [Risk detection and remediation](../identity-protection/overview-identity-protection.md#risk-detection-and-remediation). Azure AD Identity Protection is natively integrated with Microsoft Defender for Cloud. See [What is Identity Protection](../identity-protection/overview-identity-protection.md). - * You can [integrate on-premises UEBA from Azure Advanced Threat Protection (ATP)](/defender-for-identity/install-step2). Defender for Cloud Apps reads signals from Azure AD Identity Protection. + Define the network named locations to avoid noisy detections on location-based signals. See [Using the location condition in a Conditional Access policy](../conditional-access/location-condition.md). -* **Emergency access accounts activity** +- **User and Entity Behavioral Analytics (UEBA) alerts** - Monitor any access that uses [emergency access accounts](../roles/security-emergency-access.md). Create alerts for investigations. This monitoring must include: + Use UEBA to get insights on anomaly detection. Microsoft Defender for Cloud Apps provides UEBA in the cloud. See [Investigate risky users](/cloud-app-security/tutorial-ueba). - * Sign-ins. + You can integrate on-premises UEBA from Azure Advanced Threat Protection (ATP). Microsoft Defender for Cloud Apps reads signals from Azure AD Identity Protection. See [Connect to your Active Directory Forest](/defender-for-identity/install-step2). - * Credential management. +- **Emergency access accounts activity** - * Any updates on group memberships. + Monitor any access that uses emergency access accounts. See [Manage emergency access accounts in Azure AD](../roles/security-emergency-access.md). Create alerts for investigations. This monitoring must include the following actions: - * Application assignments. + - Sign-ins + - Credential management + - Any updates on group memberships + - Application assignments -* **Privileged role activity** +- **Privileged role activity** - Configure and review security [alerts generated by Azure AD Privileged Identity Management (PIM)](../privileged-identity-management/pim-how-to-configure-security-alerts.md?tabs=new#security-alerts). Monitor direct assignment of privileged roles outside PIM by generating alerts whenever a user is assigned directly. + Configure and review security alerts generated by Azure AD Privileged Identity Management (PIM). Monitor direct assignment of privileged roles outside PIM by generating alerts whenever a user is assigned directly. See [Security alerts](../privileged-identity-management/pim-how-to-configure-security-alerts.md?tabs=new#security-alerts). -* **Azure AD tenant-wide configurations** +- **Azure AD tenant-wide configurations** - Any change to tenant-wide configurations should generate alerts in the system. These changes include but aren't limited to: + Any change to tenant-wide configurations should generate alerts in the system. These changes include but aren't limited to the following changes: - * Updated custom domains. + - Updated custom domains + - Azure AD B2B changes to allowlists and blocklists + - Azure AD B2B changes to allowed identity providers, such as SAML identity providers through direct federation or social sign-ins + - Conditional Access or Risk policy changes - * Azure AD B2B changes to allowlists and blocklists. +- **Application and service principal objects** - * Azure AD B2B changes to allowed identity providers (SAML identity providers through direct federation or social sign-ins). + - New applications or service principals that might require Conditional Access policies + - Credentials added to service principals + - Application consent activity - * Conditional Access or Risk policy changes. +- **Custom roles** -* **Application and service principal objects** - - * New applications or service principals that might require Conditional Access policies. - - * Credentials added to service principals. - * Application consent activity. - -* **Custom roles** - * Updates to the custom role definitions. - - * Newly created custom roles. + - Updates to the custom role definitions + - Newly created custom roles ### Log management Define a log storage and retention strategy, design, and implementation to facilitate a consistent tool set. For example, you could consider security information and event management (SIEM) systems like Microsoft Sentinel, common queries, and investigation and forensics playbooks. -* **Azure AD logs**: Ingest generated logs and signals by consistently following best practices for settings such as diagnostics, log retention, and SIEM ingestion. - - The log strategy must include the following Azure AD logs: - * Sign-in activity - - * Audit logs - - * Risk events +- **Azure AD logs**. Ingest generated logs and signals by consistently following best practices for settings such as diagnostics, log retention, and SIEM ingestion. - Azure AD provides [Azure Monitor integration](../reports-monitoring/concept-activity-logs-azure-monitor.md) for the sign-in activity log and audit logs. Risk events can be ingested through the [Microsoft Graph API](/graph/api/resources/identityprotection-root). You can [stream Azure AD logs to Azure Monitor logs](../reports-monitoring/howto-integrate-activity-logs-with-log-analytics.md). + The log strategy must include the following Azure AD logs: -* **Hybrid infrastructure OS security logs**: All hybrid identity infrastructure OS logs should be archived and carefully monitored as a tier-0 system, because of the surface-area implications. Include the following elements: + - Sign-in activity + - Audit logs + - Risk events - * Azure AD Connect. [Azure AD Connect Health](../hybrid/whatis-azure-ad-connect.md) must be deployed to monitor identity synchronization. + Azure AD provides Azure Monitor integration for the sign-in activity log and audit logs. See [Azure AD activity logs in Azure Monitor](../reports-monitoring/concept-activity-logs-azure-monitor.md). - * Application Proxy agents + Use the Microsoft Graph API to ingest risk events. See [Use the Microsoft Graph identity protection APIs](/graph/api/resources/identityprotection-root). + You can stream Azure AD logs to Azure Monitor logs. See [Integrate Azure AD logs with Azure Monitor logs](../reports-monitoring/howto-integrate-activity-logs-with-log-analytics.md). - * Password writeback agents +- **Hybrid infrastructure operating system security logs**. All hybrid identity infrastructure operating system logs should be archived and carefully monitored as a tier-0 system, because of the surface-area implications. Include the following elements: - * Password Protection Gateway machines + - Application Proxy agents + - Password writeback agents + - Password Protection Gateway machines + - Network policy servers (NPSs) that have the Azure AD multifactor authentication RADIUS extension + - Azure AD Connect - * Network policy servers (NPSs) that have the Azure AD multifactor authentication RADIUS extension + You must deploy Azure AD Connect Health to monitor identity synchronization. See [What is Azure AD Connect](../hybrid/whatis-azure-ad-connect.md). ## Next steps -* [Build resilience into identity and access management by using Azure AD](resilience-overview.md) -* [Secure external access to resources](secure-external-access-resources.md) -* [Integrate all your apps with Azure AD](five-steps-to-full-application-integration-with-azure-ad.md) +- [Build resilience into identity and access management by using Azure AD](resilience-overview.md) +- [Secure external access to resources](secure-external-access-resources.md) +- [Integrate all your apps with Azure AD](five-steps-to-full-application-integration-with-azure-ad.md) diff --git a/articles/active-directory/fundamentals/recover-from-deletions.md b/articles/active-directory/fundamentals/recover-from-deletions.md index 3c927ac2725e8..a767544b8e0d1 100644 --- a/articles/active-directory/fundamentals/recover-from-deletions.md +++ b/articles/active-directory/fundamentals/recover-from-deletions.md @@ -17,22 +17,21 @@ ms.collection: M365-identity-device-management # Recover from deletions -This article addresses recovering from soft and hard deletions in your Azure AD tenant. If you haven’t already done so, we recommend first reading the [Recoverability best practices article](recoverability-overview.md) for foundational knowledge. +This article addresses recovering from soft and hard deletions in your Azure Active Directory (Azure AD) tenant. If you haven't already done so, read [Recoverability best practices](recoverability-overview.md) for foundational knowledge. ## Monitor for deletions -The [Azure AD Audit Log](../reports-monitoring/concept-audit-logs.md) contains information on all delete operations performed in your tenant. We recommend that you export these logs to a security information and event management (SIEM) tool such as [Microsoft Sentinel](../../sentinel/overview.md). You can also use Microsoft Graph to audit changes and build a custom solution to monitor differences over time. For more information on finding deleted items using Microsoft Graph, see [List deleted items - Microsoft Graph v1.0. ](/graph/api/directory-deleteditems-list?tabs=http) +The [Azure AD Audit log](../reports-monitoring/concept-audit-logs.md) contains information on all delete operations performed in your tenant. Export these logs to a security information and event management tool such as [Microsoft Sentinel](../../sentinel/overview.md). -### Audit log +You can also use Microsoft Graph to audit changes and build a custom solution to monitor differences over time. For more information on how to find deleted items by using Microsoft Graph, see [List deleted items - Microsoft Graph v1.0](/graph/api/directory-deleteditems-list?tabs=http). -The Audit Log always records a "Delete \" event when an object in the tenant is removed from an active state by either a soft or hard deletion. +### Audit log -[![Screenshot of audit log showing deletions](./media/recoverability/delete-audit-log.png)](./media/recoverability/delete-audit-log.png#lightbox) +The Audit log always records a "Delete \" event when an object in the tenant is removed from an active state by either a soft or hard deletion. +[![Screenshot that shows an Audit log with deletions.](./media/recoverability/delete-audit-log.png)](./media/recoverability/delete-audit-log.png#lightbox) - -A delete event for applications, users, and Microsoft 365 Groups is a soft delete. For any other object type, it's a hard delete. Track the occurrence of hard-delete events by comparing "Delete \" events with the type of object that has been deleted, noting those that do not support soft-delete. In addition, note "Hard Delete \" events. - +A delete event for applications, users, and Microsoft 365 Groups is a soft delete. For any other object type, it's a hard delete. Track the occurrence of hard-delete events by comparing "Delete \" events with the type of object that was deleted. Note the events that don't support soft delete. Also note "Hard Delete \" events. | Object type | Activity in log| Result | | - | - | - | @@ -42,166 +41,141 @@ A delete event for applications, users, and Microsoft 365 Groups is a soft delet | User| Hard delete user| Hard deleted | | Microsoft 365 Group| Delete group| Soft deleted | | Microsoft 365 Group| Hard delete group| Hard deleted | -| All other objects| Delete “objectType”| Hard deleted | - +| All other objects| Delete "objectType"| Hard deleted | > [!NOTE] -> The audit log does not distinguish the group type of a deleted group. Only Microsoft 365 Groups are soft-deleted. If you see a Delete group entry, it may be the soft delete of a M365 group, or the hard delete of another type of group. **It is therefore important that your documentation of your known good state include the group type for each group in your organization**. To learn more about documenting your known good state, see [Recoverability best practices](recoverability-overview.md). +> The Audit log doesn't distinguish the group type of a deleted group. Only Microsoft 365 Groups are soft deleted. If you see a Delete group entry, it might be the soft delete of a Microsoft 365 Group or the hard delete of another type of group. +> +>*It's important that your documentation of your known good state includes the group type for each group in your organization*. To learn more about documenting your known good state, see [Recoverability best practices](recoverability-overview.md). + ### Monitor support tickets -A sudden increase in support tickets regarding access to a specific object may indicate that there has been a deletion. Because some objects have dependencies, deletion of a group used to access an application, an application itself, or a Conditional Access policy targeting an application can all cause broad sudden impact. If you see a trend like this, check to ensure that none of the objects required for access have been deleted. +A sudden increase in support tickets about access to a specific object might indicate that a deletion occurred. Because some objects have dependencies, deletion of a group used to access an application, an application itself, or a Conditional Access policy that targets an application can all cause broad sudden impact. If you see a trend like this, check to ensure that none of the objects required for access were deleted. ## Soft deletions -When objects such as users, Microsoft 365 groups, or application registrations are “soft deleted,” they enter a suspended state in which they aren't available for use by other services. In this state, items retain their properties and can be restored for 30 days. After 30 days, objects in the soft-deleted state are permanently or “hard” deleted. +When objects such as users, Microsoft 365 Groups, or application registrations are soft deleted, they enter a suspended state in which they aren't available for use by other services. In this state, items retain their properties and can be restored for 30 days. After 30 days, objects in the soft-deleted state are permanently, or hard, deleted. > [!NOTE] -> Objects cannot be restored from a hard-deleted state. They must be recreated and reconfigured. - +> Objects can't be restored from a hard-deleted state. They must be re-created and reconfigured. + ### When soft deletes occur -It's important to understand why object deletions occur in your environment to prepare for them. This section outlines frequent scenarios for soft deletion by object class. Keep in mind there may be scenarios your organization sees which are unique to your organization so a discovery process is key to preparation. +It's important to understand why object deletions occur in your environment so that you can prepare for them. This section outlines frequent scenarios for soft deletion by object class. You might see scenarios that are unique to your organization, so a discovery process is key to preparation. ### Users -Users enter the soft delete state anytime the user object is deleted by using the Azure portal, Microsoft Graph, or PowerShell. +Users enter the soft-delete state anytime the user object is deleted by using the Azure portal, Microsoft Graph, or PowerShell. The most frequent scenarios for user deletion are: -* An administrator intentionally deletes a user in the Azure AD portal in response to a request, or as part of routine user maintenance. - -* An automation script in Microsoft Graph or PowerShell triggers the deletion. For example, you may have a script that removes users who haven't signed in for a specified time period. - -* A user is moved out of scope for synchronization with Azure Active Directory (Azure AD) connect. - -* A user is removed in an HR system and is deprovisioned via an automated workflow. +* An administrator intentionally deletes a user in the Azure AD portal in response to a request or as part of routine user maintenance. +* An automation script in Microsoft Graph or PowerShell triggers the deletion. For example, you might have a script that removes users who haven't signed in for a specified time. +* A user is moved out of scope for synchronization with Azure AD Connect. +* A user is removed from an HR system and is deprovisioned via an automated workflow. ### Microsoft 365 Groups The most frequent scenarios for Microsoft 365 Groups being deleted are: -* An administrator intentionally deletes the group, for example in response to a support request. - -* An automation script in Microsoft Graph or PowerShell triggers the deletion. For example, you may have a script that deletes groups that haven't been accessed or attested to by the group owner for a specific period of time. - -* Non-admins’ unintentional deletion of a group they own. - - +* An administrator intentionally deletes the group, for example, in response to a support request. +* An automation script in Microsoft Graph or PowerShell triggers the deletion. For example, you might have a script that deletes groups that haven't been accessed or attested to by the group owner for a specified time. +* Unintentional deletion of a group owned by non-admins. ### Application objects and service principals The most frequent scenarios for application deletion are: -* An administrator intentionally deletes the application, for example in response to a support request. - -* An automation script in Microsoft Graph or PowerShell triggers the deletion. For example, you may want a process for deleting abandoned applications that are no longer used or managed. In general, create an offboarding process for applications rather than scripting to avoid unintentional deletions. +* An administrator intentionally deletes the application, for example, in response to a support request. +* An automation script in Microsoft Graph or PowerShell triggers the deletion. For example, you might want a process for deleting abandoned applications that are no longer used or managed. In general, create an offboarding process for applications rather than scripting to avoid unintentional deletions. ### Properties maintained with soft delete - | Object type| Important properties maintained | | - | - | -| Users (including external users)| **All properties maintained**, including ObjectID, group memberships, roles, licenses, application assignments. | -| Microsoft 365 Groups| **All properties maintained**, including ObjectID, group memberships, licenses, application assignments | -| Application Registration| **All properties maintained.** (See additional information following this table.) | - - - - -When you delete an application, the application registration by default enters the soft-delete state. To understand the relationship between application registrations and service principals, see [Apps & service principals in Azure AD - Microsoft identity platform](../develop/app-objects-and-service-principals.md). - +| Users (including external users)| *All properties are maintained*, including ObjectID, group memberships, roles, licenses, and application assignments. | +| Microsoft 365 Groups| *All properties are maintained*, including ObjectID, group memberships, licenses, and application assignments. | +| Application registration| *All properties are maintained.* (See more information after this table.) | +When you delete an application, the application registration by default enters the soft-delete state. To understand the relationship between application registrations and service principals, see [Apps and service principals in Azure AD - Microsoft identity platform](/azure/active-directory/develop/app-objects-and-service-principals). ## Recover from soft deletion -You can restore soft deleted items in the Azure portal or with Microsoft Graph. +You can restore soft-deleted items in the Azure portal or with Microsoft Graph. ### Users -You can see soft-deleted users in the Azure portal on the Users – Deleted users page. - -![screenshot showing restoring users in the Azure portal](media/recoverability/deletion-restore-user.png) +You can see soft-deleted users in the Azure portal on the **Users | Deleted users** page. -For details on restoring users, see the following documentation: +![Screenshot that shows restoring users in the Azure portal.](media/recoverability/deletion-restore-user.png) -* See [Restore or permanently remove recently deleted user](active-directory-users-restore.md) for restoring in the Azure portal. +For more information on how to restore users, see the following documentation: -* See [Restore deleted item – Microsoft Graph v1.0](/graph/api/directory-deleteditems-restore?tabs=http) for restoring with Microsoft Graph. +* To restore from the Azure portal, see [Restore or permanently remove recently deleted user](active-directory-users-restore.md). +* To restore by using Microsoft Graph, see [Restore deleted item – Microsoft Graph v1.0](/graph/api/directory-deleteditems-restore?tabs=http). ### Groups -You can see soft-deleted Microsoft 365 (Microsoft 365) Groups in the Azure portal in the Groups – Deleted groups screen. - -![Screenshot showing restoring groups in the Azure portal.](media/recoverability/deletion-restore-groups.png) - +You can see soft-deleted Microsoft 365 Groups in the Azure portal on the **Groups | Deleted groups** page. -For details on restoring soft deleted Microsoft 365 Groups, see the following documentation: +![Screenshot that shows restoring groups in the Azure portal.](media/recoverability/deletion-restore-groups.png) -* To restore from the Azure portal, see [Restore a deleted Microsoft 365 group. ](../enterprise-users/groups-restore-deleted.md) +For more information on how to restore soft-deleted Microsoft 365 Groups, see the following documentation: -* To restore by using Microsoft Graph, see [Restore deleted item – Microsoft Graph v1.0](/graph/api/directory-deleteditems-restore?tabs=http). +* To restore from the Azure portal, see [Restore a deleted Microsoft 365 Group](../enterprise-users/groups-restore-deleted.md). +* To restore by using Microsoft Graph, see [Restore deleted item – Microsoft Graph v1.0](/graph/api/directory-deleteditems-restore?tabs=http). ### Applications -Applications have two objects, the application registration and the service principle. For more information on the differences between the registration and the service principal, see [Apps & service principals in Azure AD.](/develop/app-objects-and-service-principals.md) +Applications have two objects: the application registration and the service principal. For more information on the differences between the registration and the service principal, see [Apps and service principals in Azure AD](/azure/active-directory/develop/app-objects-and-service-principals). -To restore an application from the Azure portal, select App registrations, then deleted applications. Select the application registration to restore, and then select Restore app registration. +To restore an application from the Azure portal, select **App registrations** > **Deleted applications**. Select the application registration to restore, and then select **Restore app registration**. + +[![Screenshot that shows the app registration restore process in the azure portal.](./media/recoverability/deletion-restore-application.png)](./media/recoverability/deletion-restore-application.png#lightbox) -[![A screenshot showing the app registration restore process in the azure portal.](./media/recoverability/deletion-restore-application.png)](./media/recoverability/deletion-restore-application.png#lightbox) - ## Hard deletions -A “hard deletion” is the permanent removal of an object from your Azure Active Directory (Azure AD) tenant. Objects that don't support soft delete are removed in this way. Similarly, soft deleted objects are hard deleted once the deletion time is 30 days ago. The only object types that support a soft delete are: +A hard deletion is the permanent removal of an object from your Azure AD tenant. Objects that don't support soft delete are removed in this way. Similarly, soft-deleted objects are hard deleted after a deletion time of 30 days. The only object types that support a soft delete are: * Users - * Microsoft 365 Groups - * Application registration > [!IMPORTANT] -> All other item types are hard deleted. When an item is hard deleted it cannot be restored: it must be recreated. Neither administrators nor Microsoft can restore hard deleted items. It's important to prepare for this situation by ensuring that you have processes and documentation to minimize potential disruption from a hard delete. -For information on preparing for and documenting current states, see [Recoverability best practices](recoverability-overview.md). +> All other item types are hard deleted. When an item is hard deleted, it can't be restored. It must be re-created. Neither administrators nor Microsoft can restore hard-deleted items. Prepare for this situation by ensuring that you have processes and documentation to minimize potential disruption from a hard delete. +> +> For information on how to prepare for and document current states, see [Recoverability best practices](recoverability-overview.md). ### When hard deletes usually occur Hard deletes most often occur in the following circumstances. -Moving from soft to hard delete +Moving from soft to hard delete: * A soft-deleted object wasn't restored within 30 days. +* An administrator intentionally deletes an object in the soft delete state. -* An administrator intentionally deletes an object in the soft delete state - -Directly hard deleted +Directly hard deleted: -* The object type deleted doesn't support soft delete. - -* An administrator chooses to permanently delete an item by using the portal, typically in response to a request. - -* An automation script triggers the deletion of the object by using Microsoft Graph or PowerShell. Use of an automation script to clean up stale objects isn't uncommon. Microsoft recommends a robust off-boarding process for objects in your tenant to avoid mistakes that may result in mass-deletion of critical objects. +* The object type that was deleted doesn't support soft delete. +* An administrator chooses to permanently delete an item by using the portal, which typically occurs in response to a request. +* An automation script triggers the deletion of the object by using Microsoft Graph or PowerShell. Use of an automation script to clean up stale objects isn't uncommon. A robust off-boarding process for objects in your tenant helps you to avoid mistakes that might result in mass deletion of critical objects. ## Recover from hard deletion -Hard deleted items must be recreated and reconfigured. It's best to avoid unwanted hard deletions. +Hard-deleted items must be re-created and reconfigured. It's best to avoid unwanted hard deletions. -### Review soft-deleted objects +### Review soft-deleted objects -Ensure you have a process to frequently review items in the soft delete state and restore them if appropriate. To do so, you should: - -* Frequently [list deleted items](/graph/api/directory-deleteditems-list?tabs=http). +Ensure you have a process to frequently review items in the soft-delete state and restore them if appropriate. To do so, you should: +* Frequently [list deleted items](/graph/api/directory-deleteditems-list?tabs=http). * Ensure that you have specific criteria for what should be restored. +* Ensure that you have specific roles or users assigned to evaluate and restore items as appropriate. +* Develop and test a continuity management plan. For more information, see [Considerations for your Enterprise Business Continuity Management Plan](/compliance/assurance/assurance-developing-your-ebcm-plan). -* Ensure that you have specific roles or users assigned to evaluating and restoring items as appropriate. - -* Develop and test a continuity management plan. For more information, see [Considerations for your Enterprise Business Continuity Management Plan. ](/compliance/assurance/assurance-developing-your-ebcm-plan) - - -For more information on avoiding unwanted deletions, see the following topics in the [Recoverability best practices](recoverability-overview.md) article. +For more information on how to avoid unwanted deletions, see the following topics in [Recoverability best practices](recoverability-overview.md): * Business continuity and disaster planning - * Document known good states - * Monitoring and data retention diff --git a/articles/active-directory/fundamentals/recover-from-misconfigurations.md b/articles/active-directory/fundamentals/recover-from-misconfigurations.md index 5c2bfedcacad2..9e452e4760141 100644 --- a/articles/active-directory/fundamentals/recover-from-misconfigurations.md +++ b/articles/active-directory/fundamentals/recover-from-misconfigurations.md @@ -17,70 +17,60 @@ ms.collection: M365-identity-device-management # Recover from misconfiguration -Configuration settings in Azure Active Directory (Azure AD) can affect any resource in the Azure AD tenant through targeted or tenant-wide management actions. +Configuration settings in Azure Active Directory (Azure AD) can affect any resource in the Azure AD tenant through targeted or tenant-wide management actions. ## What is configuration? -Configurations are any changes in Azure AD that alter the behavior or capabilities of an Azure AD service or feature. For example, when you configure a Conditional Access policy you alter who can access the targeted applications and under what circumstances. +Configurations are any changes in Azure AD that alter the behavior or capabilities of an Azure AD service or feature. For example, when you configure a Conditional Access policy, you alter who can access the targeted applications and under what circumstances. -It's important to understand the configuration items that are important to your organization. The following configurations have a high impact on your security posture. +You need to understand the configuration items that are important to your organization. The following configurations have a high impact on your security posture. -### Tenant wide configurations +### Tenant-wide configurations -* **External identities**: Global administrators for the tenant identify and control the external identities that can be provisioned in the tenant. +* **External identities**: Global administrators for the tenant identify and control the external identities that can be provisioned in the tenant. They determine: * Whether to allow external identities in the tenant. - - * From which domain(s) external identities can be added. - + * From which domains external identities can be added. * Whether users can invite users from other tenants. -* **Named Locations**: Global administrators can create named locations, which can then be used to +* **Named locations**: Global administrators can create named locations, which can then be used to: * Block sign-ins from specific locations. + * Trigger Conditional Access policies like multifactor authentication. - * Trigger conditional access policies such as MFA. - -* **Allowed authentication methods**: Global administrators set the authentication methods allowed for the tenant. - -* **Self-service options**. Global Administrators set self-service options such as self-service-password reset and create Office 365 groups at the tenant level. +* **Allowed authentication methods**: Global administrators set the authentication methods allowed for the tenant. +* **Self-service options**: Global administrators set self-service options like self-service password reset and create Office 365 groups at the tenant level. The implementation of some tenant-wide configurations can be scoped, provided they aren't overridden by global administration policies. For example: * If the tenant is configured to allow external identities, a resource administrator can still exclude those identities from accessing a resource. - * If the tenant is configured to allow personal device registration, a resource administrator can exclude those devices from accessing specific resources. - -* If named locations are configured, a resource administrator can configure policies either allowing or excluding access from those locations. +* If named locations are configured, a resource administrator can configure policies that either allow or exclude access from those locations. ### Conditional Access configurations -Conditional Access policies are access control configurations that bring together signals to make decisions and enforce organizational policies. - -![A screenshot showing user, location. Device, application, and risk signals coming together in conditional access policies.](media\recoverability\miscofigurations-conditional-accss-signals.png) +Conditional Access policies are access control configurations that bring together signals to make decisions and enforce organizational policies. +![Screenshot that shows user, location, device, application, and risk signals coming together in Conditional Access policies.](media\recoverability\miscofigurations-conditional-accss-signals.png) - -To learn more about Conditional Access policies, see [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) +To learn more about Conditional Access policies, see [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md). > [!NOTE] -> While configuration alters the behavior or capabilities of an object or policy, not all changes to an object are configuration. You can change the data or attributes associated with an item, such as changing a user’s address, without affecting the capabilities of that user object. -## What is misconfiguration +> While configuration alters the behavior or capabilities of an object or policy, not all changes to an object are configuration. You can change the data or attributes associated with an item, like changing a user's address, without affecting the capabilities of that user object. + +## What is misconfiguration? -A misconfiguration is a configuration of a resource or policy that diverges from your organizational policies or plans and causes unintended or unwanted consequences. +Misconfiguration is a configuration of a resource or policy that diverges from your organizational policies or plans and causes unintended or unwanted consequences. A misconfiguration of tenant-wide settings or Conditional Access policies can seriously affect your security and the public image of your organization by: -* Changing how administrators, tenant users, and external users interact with resources in your tenant. +* Changing how administrators, tenant users, and external users interact with resources in your tenant: * Unnecessarily limiting access to resources. - * Loosening access controls on sensitive resources. -* Changing the ability of your users to interact with other tenants, and external users to interact with your tenant. - -* Causing denial of service, for example by not allowing customers to access their accounts. - +* Changing the ability of your users to interact with other tenants and external users to interact with your tenant. +* Causing denial of service, for example, by not allowing customers to access their accounts. * Breaking dependencies among data, systems, and applications resulting in business process failures. ### When does misconfiguration occur? @@ -88,120 +78,98 @@ A misconfiguration of tenant-wide settings or Conditional Access policies can se Misconfiguration is most likely to occur when: * A mistake is made during ad-hoc changes. - * A mistake is made as a result of troubleshooting exercises. - -* Malicious intent by a bad actor. +* An action was carried out with malicious intent by a bad actor. ## Prevent misconfiguration It's critical that alterations to the intended configuration of an Azure AD tenant are subject to robust change management processes, including: * Documenting the change, including prior state and intended post-change state. - -* Using Privileged Identity Management (PIM) to ensure that administrators with intent to change must deliberately escalate their privileges to do so. To learn more about PIM, see [What is Privileged Identity Management?](../privileged-identity-management/pim-configure.md) - +* Using Privileged Identity Management (PIM) to ensure that administrators with intent to change must deliberately escalate their privileges to do so. To learn more about PIM, see [What is Privileged Identity Management?](../privileged-identity-management/pim-configure.md). * Using a strong approval workflow for changes, for example, requiring [approval of PIM escalation of privileges](../privileged-identity-management/azure-ad-pim-approval-workflow.md). - - ## Monitor for configuration changes -While you want to prevent misconfiguration, you can't set the bar for changes so high that it impacts administrators’ ability to perform their work efficiently. +While you want to prevent misconfiguration, you can't set the bar for changes so high that it affects the ability of administrators to perform their work efficiently. -Closely monitor for configuration changes by watching for the following operations in your [Azure AD Audit log](../reports-monitoring/concept-audit-logs.md). +Closely monitor for configuration changes by watching for the following operations in your [Azure AD Audit log](../reports-monitoring/concept-audit-logs.md): * Add - * Create +* Update +* Set +* Delete -* Update - -* Set - -* Delete - -The following table includes informative entries in the Audit Log you can look for. +The following table includes informative entries in the Audit log you can look for. ### Conditional Access and authentication method configuration changes -Conditional Access policies are created on the Conditional Access page in the Azure portal. Changes to policies are made in the Conditional Access policy details page for the policy. +Conditional Access policies are created on the **Conditional Access** page in the Azure portal. Changes to policies are made on the **Conditional Access policy details** page for the policy. | Service filter| Activities| Potential impacts | | - | - | - | -| Conditional Access| Add, Update, or Delete Conditional Access policy| User access is granted or blocked when it shouldn’t be. | -| Conditional Access| Add, Update, or Delete Named location| Network locations consumed by CA Policy aren't configured as intended, creating gaps in CA Policy conditions. | -| Authentication Method| Update Authentication methods policy| Users can use weaker authentication methods or are blocked from a method they should use | - +| Conditional Access| Add, update, or delete Conditional Access policy| User access is granted or blocked when it shouldn’t be. | +| Conditional Access| Add, update, or delete named location| Network locations consumed by the Conditional Access policy aren't configured as intended, which creates gaps in Conditional Access policy conditions. | +| Authentication method| Update authentication methods policy| Users can use weaker authentication methods or are blocked from a method they should use. | ### User and password reset configuration changes -User settings changes are made in the Azure AD portal User settings page. Password Reset changes are made on the Password reset page. Changes made on these pages are captured in the audit log as detailed in the following table. +User settings changes are made on the Azure AD portal **User settings** page. Password reset changes are made on the **Password reset** page. Changes made on these pages are captured in the Audit log as detailed in the following table. | Service filter| Activities| Potential impacts | | - | - | - | -| Core Directory| Update company settings| Users may or may not be able to register applications, contrary to intent. | -| Core Directory| Set company information| Users may or may not be able to access the Azure AD administration portal contrary to intent.
            Sign-in pages don’t represent the company brand with potential damage to reputation | -| Core Directory| **Activity**: Updated service principal
            **Target**: 0365 LinkedIn connection| Users may/may not be able to connect their Azure AD account with LinkedIn contrary to intent. | -| Self-service group Management| Update Myapps feature value| Users may/may not be able to use user features contrary to intent. | -| Self-service group Management| Update ConvergedUXV2 feature value| Users may/may not be able to use user features contrary to intent. | -| Self-service group Management| Update MyStaff feature value| Users may/may not be able to use user features contrary to intent. | -| Core directory| **Activity**: Update service principal
            **Target**: Microsoft password reset service| Users are able/unable to reset their password contrary to intent.
            Users are required/not required to register for SSPR contrary to intent.
            Users can reset their password using methods that are unapproved, for example by using security questions. | - - +| Core directory| Update company settings| Users might or might not be able to register applications, contrary to intent. | +| Core directory| Set company information| Users might or might not be able to access the Azure AD administration portal, contrary to intent.
            Sign-in pages don't represent the company brand, with potential damage to reputation. | +| Core directory| **Activity**: Updated service principal
            **Target**: 0365 LinkedIn connection| Users might or might not be able to connect their Azure AD account with LinkedIn, contrary to intent. | +| Self-service group management| Update MyApps feature value| Users might or might not be able to use user features, contrary to intent. | +| Self-service group management| Update ConvergedUXV2 feature value| Users might or might not be able to use user features, contrary to intent. | +| Self-service group management| Update MyStaff feature value| Users might or might not be able to use user features, contrary to intent. | +| Core directory| **Activity**: Update service principal
            **Target**: Microsoft password reset service| Users are able or unable to reset their password, contrary to intent.
            Users are required or not required to register for self-service password reset, contrary to intent.
            Users can reset their password by using methods that are unapproved, for example, by using security questions. | ### External identities configuration changes -You can make changes to these settings on the External identities or External collaboration settings pages in the Azure AD portal. +You can make changes to these settings on the **External identities** or **External collaboration** settings pages in the Azure AD portal. | Service filter| Activities| Potential impacts | | - | - | - | -| Core Directory| Add, update, or delete a partner to cross-tenant access setting| Users have outbound access to tenants that should be blocked.
            Users from external tenants who should be blocked have inbound access | +| Core directory| Add, update, or delete a partner to cross-tenant access setting| Users have outbound access to tenants that should be blocked.
            Users from external tenants who should be blocked have inbound access. | | B2C| Create or delete identity provider| Identity providers for users who should be able to collaborate are missing, blocking access for those users. | -| Core directory| Set directory feature on tenant| External users have greater/less visibility of directory objects than intended.
            External users may/may not invite other external users to your tenant contrary to intent. | -| Core Directory| Set federation settings on domain| External user invitations may/may not be sent to users in other tenants contrary to intent. | -| AuthorizationPolicy| Update authorization policy| External user invitations may/may not be sent to users in other tenants contrary to intent. | -| Core Directory| Update Policy| External user invitations may/may not be sent to users in other tenants contrary to intent. | - - - +| Core directory| Set directory feature on tenant| External users have greater or less visibility of directory objects than intended.
            External users might or might not invite other external users to your tenant, contrary to intent. | +| Core directory| Set federation settings on domain| External user invitations might or might not be sent to users in other tenants, contrary to intent. | +| AuthorizationPolicy| Update authorization policy| External user invitations might or might not be sent to users in other tenants, contrary to intent. | +| Core directory| Update policy| External user invitations might or might not be sent to users in other tenants, contrary to intent. | ### Custom role and mobility definition configuration changes - -| Service filter| Activities / portal| Potential impacts | +| Service filter| Activities/portal| Potential impacts | | - |- | -| -| Core Directory| Add role definition| Custom role scope is narrower or broader than intended | -| PIM| Update role setting| Custom role scope is narrower or broader than intended | -| Core Directory| Update role definition| Custom role scope is narrower or broader than intended | -| Core Directory| Delete role definition| Custom role are missing | -| Core Directory| Add delegated permission grant| Mobile Device Management (MDM) and/or Mobile Application Management (MAM) configuration is missing or misconfigured leading to the failure of device or application management | +| Core directory| Add role definition| Custom role scope is narrower or broader than intended. | +| PIM| Update role setting| Custom role scope is narrower or broader than intended. | +| Core directory| Update role definition| Custom role scope is narrower or broader than intended. | +| Core directory| Delete role definition| Custom roles are missing. | +| Core directory| Add delegated permission grant| Mobile device management or mobile application management configuration is missing or misconfigured, which leads to the failure of device or application management. | ### Audit log detail view -Selecting some audit entries in the Audit Log will provide you with details on the old and new configuration values. For example, for Conditional Access policy configuration changes you can see the information in the following screenshot. - -![A screenshot of audit log details for a change to a conditional access policy.](media/recoverability/misconfiguration-audit-log-details.png) +Selecting some audit entries in the Audit log will provide you with details on the old and new configuration values. For example, for Conditional Access policy configuration changes, you can see the information in the following screenshot. +![Screenshot that shows Audit log details for a change to a Conditional Access policy.](media/recoverability/misconfiguration-audit-log-details.png) ## Use workbooks to track changes -There are several Azure Monitor workbooks that can help you to monitor configuration changes. +Azure Monitor workbooks can help you monitor configuration changes. -[The Sensitive Operations Report workbook](../reports-monitoring/workbook-sensitive-operations-report.md) can help identify suspicious application and service principal activity that may indicate a compromise, including: +The [Sensitive operations report workbook](../reports-monitoring/workbook-sensitive-operations-report.md) can help identify suspicious application and service principal activity that might indicate a compromise, including: -* Modified application or service principal credentials or authentication methods +* Modified application or service principal credentials or authentication methods. +* New permissions granted to service principals. +* Directory role and group membership updates for service principals. +* Modified federation settings. -* New permissions granted to service principals - -* Directory role and group membership updates for service principals - -* Modified federation settings - -The [Cross-tenant access activity workbook ](../reports-monitoring/workbook-cross-tenant-access-activity.md)can help you monitor which applications in external tenants your users are accessing, and which applications I your tenant external users are accessing. Use this workbook to look for anomalous changes in either inbound or outbound application access across tenants. +The [Cross-tenant access activity workbook](../reports-monitoring/workbook-cross-tenant-access-activity.md) can help you monitor which applications in external tenants your users are accessing and which applications your tenant external users are accessing. Use this workbook to look for anomalous changes in either inbound or outbound application access across tenants. ## Next steps -For foundational information on recoverability, see [Recoverability best practices](recoverability-overview.md) - -for information on recovering from deletions, see [Recover from deletions](recover-from-deletions.md) +- For foundational information on recoverability, see [Recoverability best practices](recoverability-overview.md). +- For information on recovering from deletions, see [Recover from deletions](recover-from-deletions.md). diff --git a/articles/active-directory/fundamentals/recoverability-overview.md b/articles/active-directory/fundamentals/recoverability-overview.md index a8ec4a029bb97..6a372aa9cd76d 100644 --- a/articles/active-directory/fundamentals/recoverability-overview.md +++ b/articles/active-directory/fundamentals/recoverability-overview.md @@ -17,14 +17,13 @@ ms.collection: M365-identity-device-management # Recoverability best practices +Unintended deletions and misconfigurations will happen to your tenant. To minimize the impact of these unintended events, you must prepare for their occurrence. -Unintended deletions and misconfigurations will happen to your tenant. To minimize the impact of these unintended events, you must prepare for their occurrence. +Recoverability is the preparatory processes and functionality that enable you to return your services to a prior functioning state after an unintended change. Unintended changes include the soft or hard deletion or misconfiguration of applications, groups, users, policies, and other objects in your Azure Active Directory (Azure AD) tenant. -Recoverability is the preparatory processes and functionality that enable you to return your services to a prior functioning state after an unintended change. Unintended changes include the soft- or hard-deletion or misconfiguration of applications, groups, users, policies, and other objects in your Azure Active Directory (Azure AD) tenant. +Recoverability helps your organization be more resilient. Resilience, while related, is different. Resilience is the ability to endure disruption to system components and recover with minimal impact to your business, users, customers, and operations. For more information about how to make your systems more resilient, see [Building resilience into identity and access management with Azure Active Directory](resilience-overview.md). -Recoverability helps your organization be more resilient. Resilience while related, is different. Resilience is the ability to endure disruption to system components and recover with minimal impact to your business, users, customers, and operations. For more information about making your systems more resilient, see [Building resilient identity and access management with Azure Active Directory](resilience-overview.md). - -This article describes the best practices in preparing for deletions and misconfigurations to minimize the unintended consequences to your organization’s business. +This article describes the best practices in preparing for deletions and misconfigurations to minimize the unintended consequences to your organization's business. ## Deletions and misconfigurations @@ -34,95 +33,84 @@ Deletions and misconfigurations have different impacts on your tenant. The impact of deletions depends on the object type. -Users, Microsoft 365 (Microsoft 365) Groups, and applications can be “soft deleted.” Soft deleted items are sent to the Azure AD recycle bin. While in the recycle bin, items are not available for use. However, they retain all their properties, and can be restored via a Microsoft Graph API call, or in the Azure AD portal. Items in the soft delete state that aren't restored within 30 days, are permanently or “hard deleted.” +Users, Microsoft 365 Groups, and applications can be soft deleted. Soft-deleted items are sent to the Azure AD recycle bin. While in the recycle bin, items aren't available for use. However, they retain all their properties and can be restored via a Microsoft Graph API call or in the Azure AD portal. Items in the soft-delete state that aren't restored within 30 days are permanently, or hard, deleted. -![Screenshot showing that users, Microsoft 365 groups, and applications are soft deleted, and then hard deleted after 30 days.](media/recoverability/overview-deletes.png) +![Diagram that shows that users, Microsoft 365 Groups, and applications are soft deleted and then hard deleted after 30 days.](media/recoverability/overview-deletes.png) > [!IMPORTANT] -> All other object types are hard deleted immediately when selected for deletion. When an object is hard deleted, it cannot be recovered. It must be recreated and reconfigured. -For more information on deletions and how to recover from them, see [Recover from deletions](recover-from-deletions.md). +> All other object types are hard deleted immediately when they're selected for deletion. When an object is hard deleted, it can't be recovered. It must be re-created and reconfigured. +> +>For more information on deletions and how to recover from them, see [Recover from deletions](recover-from-deletions.md). ### Misconfigurations -Configurations are any changes in Azure AD that alter the behavior or capabilities of an Azure AD service or feature. For example, when you configure a Conditional Access policy you alter who can access the targeted applications and under what circumstances. Tenant-wide configurations affect your entire tenant. Configurations of specific objects or services affect only that object and its dependencies. +Misconfigurations are configurations of a resource or policy that diverge from your organizational policies or plans and cause unintended or unwanted consequences. Misconfiguration of tenant-wide settings or Conditional Access policies can seriously affect your security and the public image of your organization. Misconfigurations can: -For more information on misconfigurations and how to recover from them, see [Recover from misconfigurations](recover-from-misconfigurations.md). +* Change how administrators, tenant users, and external users interact with resources in your tenant. +* Change the ability of your users to interact with other tenants and external users to interact with your tenant. +* Cause denial of service. +* Break dependencies among data, systems, and applications. -## Shared responsibility +For more information on misconfigurations and how to recover from them, see [Recover from misconfigurations](recover-from-misconfigurations.md). -Recoverability is a shared responsibility between Microsoft as your cloud service provider, and your organization. +## Shared responsibility -![Screenshot that shows shared responsibilities between Microsoft and customers for planning and recovery.](media/recoverability/overview-shared-responsiblility.png) +Recoverability is a shared responsibility between Microsoft as your cloud service provider and your organization. +![Diagram that shows shared responsibilities between Microsoft and customers for planning and recovery.](media/recoverability/overview-shared-responsiblility.png) You can use the tools and services that Microsoft provides to prepare for deletions and misconfigurations. ## Business continuity and disaster planning -Restoring a hard deleted or misconfigured item is a resource-intensive process. You can minimize the resources needed by planning ahead. Consider having a specific team of admins in charge of restorations. +Restoring a hard-deleted or misconfigured item is a resource-intensive process. You can minimize the resources needed by planning ahead. Consider having a specific team of admins in charge of restorations. ### Test your restoration process -You should rehearse your restoration process for different object types, and the communication that will go out as a result. Be sure to do rehearse with test objects, ideally in a test tenant. +Rehearse your restoration process for different object types and the communication that will go out as a result. Be sure to rehearse with test objects, ideally in a test tenant. -Testing your plan can help you to determine the following: +Testing your plan can help you determine the: - Validity and completeness of your object state documentation. - - Typical time to resolution. - - Appropriate communications and their audiences. - - Expected successes and potential challenges. ### Create the communication process -Create a process of pre-defined communications to make others aware of the issue and timelines for restoration. Include the following in your restoration communication plan. - -- The types of communications to go out. Consider creating pre-defined templates. +Create a process of predefined communications to make others aware of the issue and timelines for restoration. Include the following points in your restoration communication plan: -- Stakeholders to receive communications. Include the following as applicable: - - - impacted business owners. - - - operational admins who will perform recovery. +- The types of communications to go out. Consider creating predefined templates. +- Stakeholders to receive communications. Include the following groups, as applicable: + - Affected business owners. + - Operational admins who will perform recovery. - Business and technical approvers. + - Affected users. - - Impacted users. - -- Define the events that trigger communications, such as - - - Initial deletion - - - Impact assessment - - - Time to resolution +- Define the events that trigger communications, such as: - - Restoration + - Initial deletion. + - Impact assessment. + - Time to resolution. + - Restoration. ## Document known good states -Document the state of your tenant and its objects regularly so that in the event of a hard delete or misconfiguration you have a road map to recovery. The following tools can help you in documenting your current state. - -- The [Microsoft Graph APIs](/graph/overview) can be used to export the current state of many Azure AD configurations. - -- You can use the [Azure AD Exporter](https://github.com/microsoft/azureadexporter) to regularly export your configuration settings. - -- The [Microsoft 365 desired state configuration](https://github.com/microsoft/Microsoft365DSC/wiki/What-is-Microsoft365DSC) module is a module of the PowerShell Desired State Configuration framework. It can be used to export the configurations for reference, and application of the prior state of many settings. - -- The [Conditional Access APIs](https://github.com/Azure-Samples/azure-ad-conditional-access-apis) can be used to manage your Conditional Access policies as code. - +Document the state of your tenant and its objects regularly. Then if a hard delete or misconfiguration occurs, you have a roadmap to recovery. The following tools can help you document your current state: +- [Microsoft Graph APIs](/graph/overview) can be used to export the current state of many Azure AD configurations. +- [Azure AD Exporter](https://github.com/microsoft/azureadexporter) is a tool you can use to export your configuration settings. +- [Microsoft 365 Desired State Configuration](https://github.com/microsoft/Microsoft365DSC/wiki/What-is-Microsoft365DSC) is a module of the PowerShell Desired State Configuration framework. You can use it to export configurations for reference and application of the prior state of many settings. +- [Conditional Access APIs](https://github.com/Azure-Samples/azure-ad-conditional-access-apis) can be used to manage your Conditional Access policies as code. ### Commonly used Microsoft Graph APIs -The Microsoft Graph APIs can be used to export the current state of many Azure AD configurations. The APIs cover most scenarios where reference material about the prior state, or the ability to apply that state from an exported copy, could become vital to keep your business running. - -Graph APIs are highly customizable based on your organizational needs. To implement a solution for backups or reference material requires developers to engineer code to query for, store, and display the data. Many implementations use online code repositories as part of this functionality. +You can use Microsoft Graph APIs to export the current state of many Azure AD configurations. The APIs cover most scenarios where reference material about the prior state, or the ability to apply that state from an exported copy, could become vital to keeping your business running. -### Useful APIS for recovery +Microsoft Graph APIs are highly customizable based on your organizational needs. To implement a solution for backups or reference material requires developers to engineer code to query for, store, and display the data. Many implementations use online code repositories as part of this functionality. +### Useful APIs for recovery | Resource types| Reference links | | - | - | @@ -131,53 +119,50 @@ Graph APIs are highly customizable based on your organizational needs. To implem | Conditional Access policies| [Conditional Access policy API](/graph/api/resources/conditionalaccesspolicy) | | Devices| [devices API](/graph/api/resources/device) | | Domains| [domains API](/graph/api/domain-list?tabs=http) | -| Administrative Units| [administrativeUnit API)](/graph/api/resources/administrativeunit) | -| Deleted Items*| [deletedItems API](/graph/api/resources/directory) | - +| Administrative units| [administrative unit API)](/graph/api/resources/administrativeunit) | +| Deleted items*| [deletedItems API](/graph/api/resources/directory) | -Securely store these configuration exports with access provided to a limited number of admins. +*Securely store these configuration exports with access provided to a limited number of admins. -The [Azure AD Exporter](https://github.com/microsoft/azureadexporter) can provide most of the documentation you'll need. +The [Azure AD Exporter](https://github.com/microsoft/azureadexporter) can provide most of the documentation you need: - Verify that you've implemented the desired configuration. - Use the exporter to capture current configurations. - Review the export, understand the settings for your tenant that aren't exported, and manually document them. - Store the output in a secure location with limited access. - > [!NOTE] -> Settings in the legacy MFA portal, for Application Proxy and federation settings may not be exported with the Azure AD Exporter, or with the Graph API. -The [Microsoft 365 desired state configuration](https://github.com/microsoft/Microsoft365DSC/wiki/What-is-Microsoft365DSC) module uses Microsoft Graph and PowerShell to retrieve the state of many of the configurations in Azure AD. This information can be used as reference information or, by using PowerShell Desired State Configuration scripting, to reapply a known-good state. +> Settings in the legacy multifactor authentication portal for Application Proxy and federation settings might not be exported with the Azure AD Exporter, or with the Microsoft Graph API. +The [Microsoft 365 Desired State Configuration](https://github.com/microsoft/Microsoft365DSC/wiki/What-is-Microsoft365DSC) module uses Microsoft Graph and PowerShell to retrieve the state of many of the configurations in Azure AD. This information can be used as reference information or, by using PowerShell Desired State Configuration scripting, to reapply a known good state. - Use [Conditional Access Graph APIs](https://github.com/Azure-Samples/azure-ad-conditional-access-apis) to manage policies like code. Automate approvals to promote policies from preproduction environments, backup and restore, monitor change, and plan ahead for emergencies. + Use [Conditional Access Graph APIs](https://github.com/Azure-Samples/azure-ad-conditional-access-apis) to manage policies like code. Automate approvals to promote policies from preproduction environments, backup and restore, monitor change, and plan ahead for emergencies. -### Map the dependencies among objects. +### Map the dependencies among objects -The deletion of some objects can cause a ripple effect due to dependencies. For example, deletion of a security group used for application assignment would result in users who were members of that group being unable to access the applications to which the group was assigned. +The deletion of some objects can cause a ripple effect because of dependencies. For example, deletion of a security group used for application assignment would result in users who were members of that group being unable to access the applications to which the group was assigned. #### Common dependencies - -| Object Type| Potential Dependencies | +| Object type| Potential dependencies | | - | - | -| Application object| Service Principal (Enterprise Application).
            Groups assigned to the application.
            Conditional Access Policies affecting the application. | -| Service principals| Application object | -| Conditional Access Policies| Users assigned to the policy.
            Groups assigned to the policy.
            Service Principal (Enterprise Application) targeted by the policy. | -| Groups other than Microsoft 365 Groups| Users assigned to the group.
            Conditional access policies to which the group is assigned.
            Applications to which the group is assigned access. | +| Application object| Service principal (enterprise application).
            Groups assigned to the application.
            Conditional Access policies affecting the application. | +| Service principals| Application object. | +| Conditional Access policies| Users assigned to the policy.
            Groups assigned to the policy.
            Service principal (enterprise application) targeted by the policy. | +| Groups other than Microsoft 365 Groups| Users assigned to the group.
            Conditional Access policies to which the group is assigned.
            Applications to which the group is assigned access. | ## Monitoring and data retention -The [Azure AD Audit Log](../reports-monitoring/concept-audit-logs.md) contains information on all delete and configuration operations performed in your tenant. We recommend that you export these logs to a security information and event management (SIEM) tool such as [Microsoft Sentinel](../../sentinel/overview.md). You can also use Microsoft Graph to audit changes, and build a custom solution to monitor differences over time. For more information on finding deleted items using Microsoft Graph, see [List deleted items - Microsoft Graph v1.0 ](/graph/api/directory-deleteditems-list?tabs=http) +The [Azure AD Audit log](../reports-monitoring/concept-audit-logs.md) contains information on all delete and configuration operations performed in your tenant. We recommend that you export these logs to a security information and event management tool such as [Microsoft Sentinel](../../sentinel/overview.md). You can also use Microsoft Graph to audit changes and build a custom solution to monitor differences over time. For more information on finding deleted items by using Microsoft Graph, see [List deleted items - Microsoft Graph v1.0 ](/graph/api/directory-deleteditems-list?tabs=http). ### Audit logs -The Audit Log always records a "Delete \" event when an object in the tenant is removed from an active state (either from active to soft-deleted or active to hard-deleted). +The Audit log always records a "Delete \" event when an object in the tenant is removed from an active state, either from active to soft deleted or active to hard deleted. -:::image type="content" source="media/recoverability/deletions-audit-log.png" alt-text="Screenshot of audit log detail." lightbox="media/recoverability/deletions-audit-log.png"::: +:::image type="content" source="media/recoverability/deletions-audit-log.png" alt-text="Screenshot that shows Audit log detail." lightbox="media/recoverability/deletions-audit-log.png"::: -A Delete event for applications, users, and Microsoft 365 Groups is a soft delete. For any other object type it's a hard delete. +A Delete event for applications, users, and Microsoft 365 Groups is a soft delete. For any other object type, it's a hard delete. -| Object Type | Activity in log| Result | +| Object type | Activity in log| Result | | - | - | - | | Application| Delete application| Soft deleted | | Application| Hard delete application| Hard deleted | @@ -188,36 +173,32 @@ A Delete event for applications, users, and Microsoft 365 Groups is a soft delet | All other objects| Delete “objectType”| Hard deleted | > [!NOTE] -> The audit log does not distinguish the group type of a deleted group. Only Microsoft 365 Groups are soft-deleted. If you see a Delete group entry, it may be the soft delete of a M365 group, or the hard delete of another type of group. It is therefore important that your documentation of your known good state include the group type for each group in your organization. +> The Audit log doesn't distinguish the group type of a deleted group. Only Microsoft 365 Groups are soft deleted. If you see a Delete group entry, it might be the soft delete of a Microsoft 365 Group or the hard delete of another type of group. Your documentation of your known good state should include the group type for each group in your organization. -For information on monitoring configuration changes, see [Recover from misconfigurations](recover-from-misconfigurations.md). +For information on monitoring configuration changes, see [Recover from misconfigurations](recover-from-misconfigurations.md). ### Use workbooks to track configuration changes -There are several Azure Monitor workbooks that can help you to monitor configuration changes. - -[The Sensitive Operations Report workbook](../reports-monitoring/workbook-sensitive-operations-report.md) can help identify suspicious application and service principal activity that may indicate a compromise, including: - -- Modified application or service principal credentials or authentication methods -- New permissions granted to service principals -- Directory role and group membership updates for service principals -- Modified federation settings - -The [Cross-tenant access activity workbook ](../reports-monitoring/workbook-cross-tenant-access-activity.md)can help you monitor which applications in external tenants your users are accessing, and which applications in your tenant external users are accessing. Use this workbook to look for anomalous changes in either inbound or outbound application access across tenants. - -## Operational security +Azure Monitor workbooks can help you monitor configuration changes. -Preventing unwanted changes is far less difficult than needing to recreate and reconfigure objects. Include the following in your change management processes to minimize accidents: +The [Sensitive operations report workbook](../reports-monitoring/workbook-sensitive-operations-report.md) can help identify suspicious application and service principal activity that might indicate a compromise, including: -- Use a least privilege model. Ensure that each member of your team has the least privileges necessary to complete their usual tasks and require a process to escalate privileges for more unusual tasks. +- Modified application or service principal credentials or authentication methods. +- New permissions granted to service principals. +- Directory role and group membership updates for service principals. +- Modified federation settings. -- Administrative control of an object enables configuration and deletion. Use Read Only admin roles, for example the Global Reader role, for any tasks that do not require operations to create, update, or delete (CRUD). When CRUD operations are required, use object specific roles when possible. For example, User Administrators can delete only users, and Application Administrators can delete only applications. Use these more limited roles whenever possible, instead of a Global Administrator role, which can delete anything, including the tenant. +The [Cross-tenant access activity workbook ](../reports-monitoring/workbook-cross-tenant-access-activity.md)can help you monitor which applications in external tenants your users are accessing and which applications in your tenant external users are accessing. Use this workbook to look for anomalous changes in either inbound or outbound application access across tenants. -- [Use Privileged Identity Management (PIM)](../privileged-identity-management/pim-configure.md). PIM enables just-in-time escalation of privileges to perform tasks like hard deletion. You can configure PIM to have notifications and or approvals for the privilege escalation. +## Operational security +Preventing unwanted changes is far less difficult than needing to re-create and reconfigure objects. Include the following tasks in your change management processes to minimize accidents: -## Next steps +- Use a least privilege model. Ensure that each member of your team has the least privileges necessary to complete their usual tasks. Require a process to escalate privileges for more unusual tasks. +- Administrative control of an object enables configuration and deletion. Use read-only admin roles, for example, the Global Reader role, for tasks that don't require operations to create, update, or delete (CRUD). When CRUD operations are required, use object-specific roles when possible. For example, User administrators can delete only users, and Application administrators can delete only applications. Use these more limited roles whenever possible, instead of a Global administrator role, which can delete anything, including the tenant. +- [Use Privileged Identity Management (PIM)](../privileged-identity-management/pim-configure.md). PIM enables just-in-time escalation of privileges to perform tasks like hard deletion. You can configure PIM to have notifications or approvals for the privilege escalation. -[Recover from deletions](recover-from-deletions.md) +## Next steps -[Recover from misconfigurations](recover-from-misconfigurations.md) +- [Recover from deletions](recover-from-deletions.md) +- [Recover from misconfigurations](recover-from-misconfigurations.md) diff --git a/articles/active-directory/fundamentals/resilience-app-development-overview.md b/articles/active-directory/fundamentals/resilience-app-development-overview.md index cb0e284716449..1830694608b8b 100644 --- a/articles/active-directory/fundamentals/resilience-app-development-overview.md +++ b/articles/active-directory/fundamentals/resilience-app-development-overview.md @@ -1,6 +1,5 @@ --- title: Increase resilience of authentication and authorization applications you develop -titleSuffix: Microsoft identity platform description: Overview of our resilience guidance for application development using Azure Active Directory and the Microsoft identity platform services: active-directory ms.service: active-directory diff --git a/articles/active-directory/fundamentals/resilience-client-app.md b/articles/active-directory/fundamentals/resilience-client-app.md index 092c4839b2318..bbdb695c3efad 100644 --- a/articles/active-directory/fundamentals/resilience-client-app.md +++ b/articles/active-directory/fundamentals/resilience-client-app.md @@ -1,6 +1,5 @@ --- title: Increase the resilience of authentication and authorization in client applications you develop -titleSuffix: Microsoft identity platform description: Guidance for increasing resiliency of authentication and authorization in client application using the Microsoft identity platform services: active-directory ms.service: active-directory diff --git a/articles/active-directory/fundamentals/resilience-daemon-app.md b/articles/active-directory/fundamentals/resilience-daemon-app.md index fad0116856367..719d8bc894565 100644 --- a/articles/active-directory/fundamentals/resilience-daemon-app.md +++ b/articles/active-directory/fundamentals/resilience-daemon-app.md @@ -1,6 +1,5 @@ --- title: Increase the resilience of authentication and authorization in daemon applications you develop -titleSuffix: Microsoft identity platform description: Guidance for increasing resiliency of authentication and authorization in daemon application using the Microsoft identity platform services: active-directory ms.service: active-directory diff --git a/articles/active-directory/fundamentals/resilience-overview.md b/articles/active-directory/fundamentals/resilience-overview.md index 0fbda46b9a4bc..7d340536102d9 100644 --- a/articles/active-directory/fundamentals/resilience-overview.md +++ b/articles/active-directory/fundamentals/resilience-overview.md @@ -1,6 +1,6 @@ --- -title: Building resilient identity and access management with Azure Active Directory -description: A guide for architects, IT administrators, and developers on building resilience to disruption of their identity systems. +title: Resilience in identity and access management with Azure Active Directory +description: Learn how to build resilience into identity and access management. Resilience helps endure disruption to system components and recover with minimal effort. services: active-directory author: BarbaraSelden manager: martinco @@ -8,40 +8,37 @@ manager: martinco ms.service: active-directory ms.workload: identity ms.subservice: fundamentals -ms.topic: conceptual -ms.date: 11/30/2020 +ms.topic: overview +ms.date: 04/29/2022 ms.author: baselden ms.reviewer: ajburnle -ms.custom: "it-pro, seodec18" +ms.custom: + - it-pro + - seodec18 + - kr2b-contr-experiment ms.collection: M365-identity-device-management --- # Building resilience into identity and access management with Azure Active Directory -Identity and access management (IAM) is a framework of processes, policies, and technologies that facilitate the management of identities and what they access. It includes the many components supporting the authentication and authorization of user and other accounts in your system. +Identity and access management (IAM) is a framework of processes, policies, and technologies. IAM facilitates the management of identities and what they access. It includes the many components supporting the authentication and authorization of user and other accounts in your system. -IAM resilience is the ability to endure disruption to system components and recover with minimal impact to your business, users, customers, and operations. Reducing dependencies, complexity, and single-points-of-failure, while ensuring comprehensive error handling will increase your resilience. +IAM resilience is the ability to endure disruption to system components and recover with minimal impact to your business, users, customers, and operations. Reducing dependencies, complexity, and single-points-of-failure, while ensuring comprehensive error handling, increases your resilience. -Disruption can come from any component of your IAM systems. To build a resilient IAM system, assume disruptions will occur and plan for it. +Disruption can come from any component of your IAM systems. To build a resilient IAM system, assume disruptions will occur and plan for them. -When planning the resilience of your IAM solution, consider the following elements: +When planning the resilience of your IAM solution, consider the following elements: -* Your applications that rely on your IAM system. +* Your applications that rely on your IAM system +* The public infrastructures your authentication calls use, including telecom companies, Internet service providers, and public key providers +* Your cloud and on-premises identity providers +* Other services that rely on your IAM, and the APIs that connect them +* Any other on-premises components in your system -* The public infrastructures your authentication calls use, including telecom companies, Internet service providers, and public key providers. - -* Your cloud and on-premises identity providers. - -* Other services that rely on your IAM, and the APIs that connect them. - -* Any other on-premises components in your system. - -Whatever the source, recognizing and planning for the contingencies is important. However, adding additional identity systems, and their resultant dependencies and complexity, may reduce your resilience rather than increase it. +Whatever the source, recognizing and planning for the contingencies is important. However, adding other identity systems, and their resultant dependencies and complexity, may reduce your resilience rather than increase it. To build more resilience in your systems, review the following articles: * [Build resilience in your IAM infrastructure](resilience-in-infrastructure.md) - * [Build IAM resilience in your applications](resilience-app-development-overview.md) - * [Build resilience in your Customer Identity and Access Management (CIAM) systems](resilience-b2c.md) diff --git a/articles/active-directory/fundamentals/security-operations-devices.md b/articles/active-directory/fundamentals/security-operations-devices.md index 34e3dfc86543b..fadb98bc1ca52 100644 --- a/articles/active-directory/fundamentals/security-operations-devices.md +++ b/articles/active-directory/fundamentals/security-operations-devices.md @@ -87,11 +87,11 @@ You can create an alert that notifies appropriate administrators when a device i ``` Sign-in logs -| where ResourceDisplayName == “Device Registration Service” +| where ResourceDisplayName == "Device Registration Service" -| where conditionalAccessStatus ==”success” +| where conditionalAccessStatus == "success" -| where AuthenticationRequirement <> “multiFactorAuthentication” +| where AuthenticationRequirement <> "multiFactorAuthentication" ``` You can also use [Microsoft Intune to set and monitor device compliance policies](/mem/intune/protect/device-compliance-get-started). @@ -104,7 +104,7 @@ It might not be possible to block access to all cloud and software-as-a-service | What to monitor| Risk Level| Where| Filter/sub-filter| Notes | | - |- |- |- |- | -| Sign-ins by non-compliant devices| High| Sign-in logs| DeviceDetail.isCompliant ==false| If requiring sign-in from compliant devices, alert when:
          • any sign in by non-compliant devices.
          • any access without MFA or a trusted location.

            If working toward requiring devices, monitor for suspicious sign-ins.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/SigninLogs/SuspiciousSignintoPrivilegedAccount.yaml) | +| Sign-ins by non-compliant devices| High| Sign-in logs| DeviceDetail.isCompliant == false| If requiring sign-in from compliant devices, alert when:

          • any sign in by non-compliant devices.
          • any access without MFA or a trusted location.

            If working toward requiring devices, monitor for suspicious sign-ins.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/SigninLogs/SuspiciousSignintoPrivilegedAccount.yaml) | | Sign-ins by unknown devices| Low| Sign-in logs|

          • DeviceDetail is empty
          • Single factor authentication
          • From a non-trusted location| Look for:
          • any access from out of compliance devices.
          • any access without MFA or trusted location | @@ -115,9 +115,9 @@ It might not be possible to block access to all cloud and software-as-a-service ``` SigninLogs -| where DeviceDetail.isCompliant ==false +| where DeviceDetail.isCompliant == false -| where conditionalAccessStatus == “success” +| where conditionalAccessStatus == "success" ``` @@ -149,7 +149,7 @@ Attackers who have compromised a user’s device may retrieve the [BitLocker](/w | What to monitor| Risk Level| Where| Filter/sub-filter| Notes | | - |- |- |- |- | -| Key retrieval| Medium| Audit logs| OperationName == "Read BitLocker key”| Look for
          • key retrieval`
          • other anomalous behavior by users retrieving keys. | +| Key retrieval| Medium| Audit logs| OperationName == "Read BitLocker key"| Look for
          • key retrieval`
          • other anomalous behavior by users retrieving keys. | In LogAnalytics create a query such as @@ -157,7 +157,7 @@ In LogAnalytics create a query such as ``` AuditLogs -| where OperationName == "Read BitLocker key” +| where OperationName == "Read BitLocker key" ``` ## Device administrator roles diff --git a/articles/active-directory/fundamentals/security-operations-introduction.md b/articles/active-directory/fundamentals/security-operations-introduction.md index cf348b900eff8..737dbce4013e9 100644 --- a/articles/active-directory/fundamentals/security-operations-introduction.md +++ b/articles/active-directory/fundamentals/security-operations-introduction.md @@ -1,233 +1,218 @@ --- title: Azure Active Directory security operations guide -description: Learn to monitor, identify, and alert on security issues with accounts, applications, devices, and infrastructure +description: Learn to monitor, identify, and alert on security issues with accounts, applications, devices, and infrastructure in Azure Active Directory. services: active-directory author: BarbaraSelden manager: martinco ms.service: active-directory ms.workload: identity ms.subservice: fundamentals -ms.topic: conceptual -ms.date: 07/15/2021 +ms.topic: overview +ms.date: 04/29/2022 ms.author: baselden -ms.custom: "it-pro, seodec18" +ms.custom: + - it-pro + - seodec18 + - kr2b-contr-experiment ms.collection: M365-identity-device-management --- # Azure Active Directory security operations guide -Microsoft has a successful and proven approach to [Zero Trust security](https://aka.ms/Zero-Trust) using [Defense in Depth](https://us-cert.cisa.gov/bsi/articles/knowledge/principles/defense-in-depth) principles that leverage identity as a control plane. As organizations continue to embrace a hybrid workload world for scale, cost savings, and security, Azure Active Directory (Azure AD) plays a pivotal role in your strategy for identity management. Recently, news surrounding identity and security compromise has increasingly prompted enterprise IT to consider their identity security posture as a measurement of defensive security success. +Microsoft has a successful and proven approach to [Zero Trust security](https://aka.ms/Zero-Trust) using [Defense in Depth](https://us-cert.cisa.gov/bsi/articles/knowledge/principles/defense-in-depth) principles that use identity as a control plane. Organizations continue to embrace a hybrid workload world for scale, cost savings, and security. Azure Active Directory (Azure AD) plays a pivotal role in your strategy for identity management. Recently, news surrounding identity and security compromise has increasingly prompted enterprise IT to consider their identity security posture as a measurement of defensive security success. Increasingly, organizations must embrace a mixture of on-premises and cloud applications, which users access with both on–premises and cloud-only accounts. Managing users, applications, and devices both on-premises and in the cloud poses challenging scenarios. -Azure Active Directory creates a common user identity for authentication and authorization to all resources, regardless of location. We call this hybrid identity. +## Hybrid identity + +Azure Active Directory creates a common user identity for authentication and authorization to all resources, regardless of location. We call this *hybrid identity*. To achieve hybrid identity with Azure AD, one of three authentication methods can be used, depending on your scenarios. The three methods are: * [Password hash synchronization (PHS)](../hybrid/whatis-phs.md) - * [Pass-through authentication (PTA)](../hybrid/how-to-connect-pta.md) - * [Federation (AD FS)](../hybrid/whatis-fed.md) As you audit your current security operations or establish security operations for your Azure environment, we recommend you: * Read specific portions of the Microsoft security guidance to establish a baseline of knowledge about securing your cloud-based or hybrid Azure environment. - * Audit your account and password strategy and authentication methods to help deter the most common attack vectors. - * Create a strategy for continuous monitoring and alerting on activities that might indicate a security threat. -## Audience +### Audience The Azure AD SecOps Guide is intended for enterprise IT identity and security operations teams and managed service providers that need to counter threats through better identity security configuration and monitoring profiles. This guide is especially relevant for IT administrators and identity architects advising Security Operations Center (SOC) defensive and penetration testing teams to improve and maintain their identity security posture. -## Scope +### Scope -This introduction provides the suggested prereading and password audit and strategy recommendations. This article also provides an overview of the tools available for hybrid Azure environments as well as fully cloud-based Azure environments. Finally, we provide a list of data sources you can use for monitoring and alerting and configuring your security information and event management (SIEM) strategy and environment. The rest of the guidance presents monitoring and alerting strategies in the following areas: +This introduction provides the suggested prereading and password audit and strategy recommendations. This article also provides an overview of the tools available for hybrid Azure environments and fully cloud-based Azure environments. Finally, we provide a list of data sources you can use for monitoring and alerting and configuring your security information and event management (SIEM) strategy and environment. The rest of the guidance presents monitoring and alerting strategies in the following areas: -* [User accounts](security-operations-user-accounts.md) – Guidance specific to non-privileged user accounts without administrative privilege, including anomalous account creation and usage, and unusual sign-ins. +* [User accounts](security-operations-user-accounts.md). Guidance specific to non-privileged user accounts without administrative privilege, including anomalous account creation and usage, and unusual sign-ins. -* [Privileged accounts](security-operations-privileged-accounts.md) – Guidance specific to privileged user accounts that have elevated permissions to perform administrative tasks, including Azure AD role assignments, Azure resource role assignments, and access management for Azure resources and subscriptions. +* [Privileged accounts](security-operations-privileged-accounts.md). Guidance specific to privileged user accounts that have elevated permissions to perform administrative tasks. Tasks include Azure AD role assignments, Azure resource role assignments, and access management for Azure resources and subscriptions. -* [Privileged Identity Management (PIM)](security-operations-privileged-identity-management.md) – guidance specific to using PIM to manage, control, and monitor access to resources. +* [Privileged Identity Management (PIM)](security-operations-privileged-identity-management.md). Guidance specific to using PIM to manage, control, and monitor access to resources. -* [Applications](security-operations-applications.md) – Guidance specific to accounts used to provide authentication for applications. +* [Applications](security-operations-applications.md). Guidance specific to accounts used to provide authentication for applications. -* [Devices](security-operations-devices.md) – Guidance specific to monitoring and alerting for devices registered or joined outside of policies, non-compliant usage, managing device administration roles, and sign-ins to virtual machines. +* [Devices](security-operations-devices.md). Guidance specific to monitoring and alerting for devices registered or joined outside of policies, non-compliant usage, managing device administration roles, and sign-ins to virtual machines. -* [Infrastructure](security-operations-infrastructure.md)– Guidance specific to monitoring and alerting on threats to your hybrid and purely cloud-based environments. +* [Infrastructure](security-operations-infrastructure.md). Guidance specific to monitoring and alerting on threats to your hybrid and purely cloud-based environments. ## Important reference content -Microsoft has many products and services that enable you to customize your IT environment to fit your needs. We recommend as part of your monitoring and alerting strategy you review the following guidance that is relevant to your operating environment: +Microsoft has many products and services that enable you to customize your IT environment to fit your needs. We recommend that you review the following guidance for your operating environment: * Windows operating systems - * [Windows 10 and Windows Server 2016 security auditing and monitoring reference](https://www.microsoft.com/download/details.aspx?id=52630) - - * [Security baseline (FINAL) for Windows 10 v1909 and Windows Server v1909](https://techcommunity.microsoft.com/t5/microsoft-security-baselines/security-baseline-final-for-windows-10-v1909-and-windows-server/ba-p/1023093) + * [Windows 10 and Windows Server 2016 security auditing and monitoring reference](https://www.microsoft.com/download/details.aspx?id=52630) + * [Security baseline (FINAL) for Windows 10 v1909 and Windows Server v1909](https://techcommunity.microsoft.com/t5/microsoft-security-baselines/security-baseline-final-for-windows-10-v1909-and-windows-server/ba-p/1023093) + * [Security baseline for Windows 11](https://techcommunity.microsoft.com/t5/microsoft-security-baselines/windows-11-security-baseline/ba-p/2810772) + * [Security baseline for Windows Server 2022](https://techcommunity.microsoft.com/t5/microsoft-security-baselines/windows-server-2022-security-baseline/ba-p/2724685) - * [Security baseline for Windows 11](https://techcommunity.microsoft.com/t5/microsoft-security-baselines/windows-11-security-baseline/ba-p/2810772) - - * [Security baseline for Windows Server 2022](https://techcommunity.microsoft.com/t5/microsoft-security-baselines/windows-server-2022-security-baseline/ba-p/2724685) - * On-premises environments - * [Microsoft Defender for Identity architecture](/defender-for-identity/architecture) - - * [Connect Microsoft Defender for Identity to Active Directory quickstart](/defender-for-identity/install-step2) - - * [Azure security baseline for Microsoft Defender for Identity](/defender-for-identity/security-baseline) - - * [Monitoring Active Directory for Signs of Compromise](/windows-server/identity/ad-ds/plan/security-best-practices/monitoring-active-directory-for-signs-of-compromise) + * [Microsoft Defender for Identity architecture](/defender-for-identity/architecture) + * [Connect Microsoft Defender for Identity to Active Directory quickstart](/defender-for-identity/install-step2) + * [Azure security baseline for Microsoft Defender for Identity](/defender-for-identity/security-baseline) + * [Monitoring Active Directory for Signs of Compromise](/windows-server/identity/ad-ds/plan/security-best-practices/monitoring-active-directory-for-signs-of-compromise) * Cloud-based Azure environments - - * [Monitor sign-ins with the Azure AD sign-in log](../reports-monitoring/concept-all-sign-ins.md) - - * [Audit activity reports in the Azure Active Directory portal](../reports-monitoring/concept-audit-logs.md) - - * [Investigate risk with Azure Active Directory Identity Protection](../identity-protection/howto-identity-protection-investigate-risk.md) - - * [Connect Azure AD Identity Protection data to Microsoft Sentinel](../../sentinel/data-connectors-reference.md#azure-active-directory-identity-protection) + * [Monitor sign-ins with the Azure AD sign-in log](../reports-monitoring/concept-all-sign-ins.md) + * [Audit activity reports in the Azure Active Directory portal](../reports-monitoring/concept-audit-logs.md) + * [Investigate risk with Azure Active Directory Identity Protection](../identity-protection/howto-identity-protection-investigate-risk.md) + * [Connect Azure AD Identity Protection data to Microsoft Sentinel](../../sentinel/data-connectors-reference.md#azure-active-directory-identity-protection) * Active Directory Domain Services (AD DS) - * [Audit Policy Recommendations](/windows-server/identity/ad-ds/plan/security-best-practices/audit-policy-recommendations) + * [Audit Policy Recommendations](/windows-server/identity/ad-ds/plan/security-best-practices/audit-policy-recommendations) * Active Directory Federation Services (AD FS) - * [AD FS Troubleshooting - Auditing Events and Logging](/windows-server/identity/ad-fs/troubleshooting/ad-fs-tshoot-logging) + * [AD FS Troubleshooting - Auditing Events and Logging](/windows-server/identity/ad-fs/troubleshooting/ad-fs-tshoot-logging) -## Data sources +## Data sources The log files you use for investigation and monitoring are: * [Azure AD Audit logs](../reports-monitoring/concept-audit-logs.md) - * [Sign-in logs](../reports-monitoring/concept-all-sign-ins.md) - * [Microsoft 365 Audit logs](/microsoft-365/compliance/auditing-solutions-overview) - * [Azure Key Vault logs](../../key-vault/general/logging.md?tabs=Vault) -From the Azure portal you can view the Azure AD Audit logs and download as comma separated value (CSV) or JavaScript Object Notation (JSON) files. The Azure portal has several ways to integrate Azure AD logs with other tools that allow for greater automation of monitoring and alerting: +From the Azure portal, you can view the Azure AD Audit logs. Download logs as comma separated value (CSV) or JavaScript Object Notation (JSON) files. The Azure portal has several ways to integrate Azure AD logs with other tools that allow for greater automation of monitoring and alerting: -* **[Microsoft Sentinel](../../sentinel/overview.md)** – enables intelligent security analytics at the enterprise level by providing security information and event management (SIEM) capabilities. +* **[Microsoft Sentinel](../../sentinel/overview.md)**. Enables intelligent security analytics at the enterprise level by providing security information and event management (SIEM) capabilities. -* **[Azure Monitor](../../azure-monitor/overview.md)** – enables automated monitoring and alerting of various conditions. Can create or use workbooks to combine data from different sources. +* **[Azure Monitor](../../azure-monitor/overview.md)**. Enables automated monitoring and alerting of various conditions. Can create or use workbooks to combine data from different sources. -* **[Azure Event Hubs](../../event-hubs/event-hubs-about.md) integrated with a SIEM**- [Azure AD logs can be integrated to other SIEMs](../reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub.md) such as Splunk, ArcSight, QRadar and Sumo Logic via the Azure Event Hub integration. +* **[Azure Event Hubs](../../event-hubs/event-hubs-about.md)** integrated with a SIEM. Azure AD logs can be integrated to other SIEMs such as Splunk, ArcSight, QRadar and Sumo Logic via the Azure Event Hubs integration. For more information, see [Stream Azure Active Directory logs to an Azure event hub](../reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub.md). -* **[Microsoft Defender for Cloud Apps](/cloud-app-security/what-is-cloud-app-security)** – enables you to discover and manage apps, govern across apps and resources, and check the compliance of your cloud apps. +* **[Microsoft Defender for Cloud Apps](/cloud-app-security/what-is-cloud-app-security)**. Enables you to discover and manage apps, govern across apps and resources, and check the compliance of your cloud apps. -* **[Securing workload identities with Identity Protection Preview](..//identity-protection/concept-workload-identity-risk.md)** - Used to detect risk on workload identities across sign-in behavior and offline indicators of compromise. +* **[Securing workload identities with Identity Protection Preview](../identity-protection/concept-workload-identity-risk.md)**. Used to detect risk on workload identities across sign-in behavior and offline indicators of compromise. -Much of what you will monitor and alert on are the effects of your Conditional Access policies. You can use the [Conditional Access insights and reporting workbook](../conditional-access/howto-conditional-access-insights-reporting.md) to examine the effects of one or more Conditional Access policies on your sign-ins, as well as the results of policies, including device state. This workbook enables you to view an impact summary, and identify the impact over a specific time period. You can also use the workbook to investigate the sign-ins of a specific user. +Much of what you will monitor and alert on are the effects of your Conditional Access policies. You can use the Conditional Access insights and reporting workbook to examine the effects of one or more Conditional Access policies on your sign-ins and the results of policies, including device state. This workbook enables you to view an impact summary, and identify the impact over a specific time period. You can also use the workbook to investigate the sign-ins of a specific user. For more information, see [Conditional Access insights and reporting](../conditional-access/howto-conditional-access-insights-reporting.md). -The remainder of this article describes what we recommend you monitor and alert on, and is organized by the type of threat. Where there are specific pre-built solutions we link to them or provide samples following the table. Otherwise, you can build alerts using the preceding tools. +The remainder of this article describes what to monitor and alert on. Where there are specific pre-built solutions we link to them or provide samples following the table. Otherwise, you can build alerts using the preceding tools. -* **[Identity Protection](../identity-protection/overview-identity-protection.md)** -- generates three key reports that you can use to help with your investigation: +* **[Identity Protection](../identity-protection/overview-identity-protection.md)** generates three key reports that you can use to help with your investigation: - * **Risky users** – contains information about which users are at risk, details about detections, history of all risky sign-ins, and risk history. +* **Risky users** contains information about which users are at risk, details about detections, history of all risky sign-ins, and risk history. - * **Risky sign-ins** – contains information surrounding the circumstance of a sign-in that might indicate suspicious circumstances. For additional information on investigating information from this report, visit [How To: Investigate risk](../identity-protection/howto-identity-protection-investigate-risk.md). +* **Risky sign-ins** contains information surrounding the circumstance of a sign-in that might indicate suspicious circumstances. For more information on investigating information from this report, see [How To: Investigate risk](../identity-protection/howto-identity-protection-investigate-risk.md). - * **Risk detections** - contains information on risk signals detected by Azure AD Identity Protection that informs sign-in and user risk. For more information, see the [Azure AD security operations guide for user accounts](security-operations-user-accounts.md). +* **Risk detections** contains information on risk signals detected by Azure AD Identity Protection that informs sign-in and user risk. For more information, see the [Azure AD security operations guide for user accounts](security-operations-user-accounts.md). + +For more information, see [What is Identity Protection](../identity-protection/overview-identity-protection.md). ### Data sources for domain controller monitoring -For the best results, we recommend that you monitor your domain controllers using Microsoft Defender for Identity. This will enable you for the best detection and automation capabilities. Please follow the guidance from: +For the best results, we recommend that you monitor your domain controllers using Microsoft Defender for Identity. This approach enables the best detection and automation capabilities. Follow the guidance from these resources: * [Microsoft Defender for Identity architecture](/defender-for-identity/architecture) - * [Connect Microsoft Defender for Identity to Active Directory quickstart](/defender-for-identity/install-step2) -If you do not plan to use Microsoft Defender for identity, you can [monitor your domain controllers either by event log messages](/windows-server/identity/ad-ds/plan/security-best-practices/monitoring-active-directory-for-signs-of-compromise) or by [running PowerShell cmdlets](/windows-server/identity/ad-ds/deploy/troubleshooting-domain-controller-deployment). +If you don't plan to use Microsoft Defender for Identity, monitor your domain controllers by one of these approaches: + +* Event log messages. See [Monitoring Active Directory for Signs of Compromise](/windows-server/identity/ad-ds/plan/security-best-practices/monitoring-active-directory-for-signs-of-compromise). +* PowerShell cmdlets. See [Troubleshooting Domain Controller Deployment](/windows-server/identity/ad-ds/deploy/troubleshooting-domain-controller-deployment). ## Components of hybrid authentication -As part of an Azure hybrid environment, the following should be baselined and included in your monitoring and alerting strategy. +As part of an Azure hybrid environment, the following items should be baselined and included in your monitoring and alerting strategy. -* **PTA Agent** – The Pass-through authentication agent is used to enable pass-through authentication and is installed on-premises. See [Azure AD Pass-through Authentication agent: Version release history](../hybrid/reference-connect-pta-version-history.md) for information on verifying your agent version and next steps. +* **PTA Agent**. The pass-through authentication agent is used to enable pass-through authentication and is installed on-premises. See [Azure AD Pass-through Authentication agent: Version release history](../hybrid/reference-connect-pta-version-history.md) for information on verifying your agent version and next steps. -* **AD FS/WAP** – Azure Active Directory Federation Services (Azure AD FS) and Web Application Proxy (WAP) enable secure sharing of digital identity and entitlement rights across your security and enterprise boundaries. For information on security best practices, see [Best practices for securing Active Directory Federation Services](/windows-server/identity/ad-fs/deployment/best-practices-securing-ad-fs). +* **AD FS/WAP**. Azure Active Directory Federation Services (Azure AD FS) and Web Application Proxy (WAP) enable secure sharing of digital identity and entitlement rights across your security and enterprise boundaries. For information on security best practices, see [Best practices for securing Active Directory Federation Services](/windows-server/identity/ad-fs/deployment/best-practices-securing-ad-fs). -* **Azure AD Connect Health Agent** – The agent used to provide a communications link for Azure AD Connect Health. For information on installing the agent, see [Azure AD Connect Health agent installation](../hybrid/how-to-connect-health-agent-install.md). +* **Azure AD Connect Health Agent**. The agent used to provide a communications link for Azure AD Connect Health. For information on installing the agent, see [Azure AD Connect Health agent installation](../hybrid/how-to-connect-health-agent-install.md). -* **Azure AD Connect Sync Engine** - The on-premises component, also called the sync engine. For information on the feature, see [Azure AD Connect sync service features](../hybrid/how-to-connect-syncservice-features.md). +* **Azure AD Connect Sync Engine**. The on-premises component, also called the sync engine. For information on the feature, see [Azure AD Connect sync service features](../hybrid/how-to-connect-syncservice-features.md). -* **Password Protection DC agent** – Azure password protection DC agent is used to help with monitoring and reporting event log messages. For information, see [Enforce on-premises Azure AD Password Protection for Active Directory Domain Services](../authentication/concept-password-ban-bad-on-premises.md). +* **Password Protection DC agent**. Azure password protection DC agent is used to help with monitoring and reporting event log messages. For information, see [Enforce on-premises Azure AD Password Protection for Active Directory Domain Services](../authentication/concept-password-ban-bad-on-premises.md). -* **Password Filter DLL** – The password filter DLL of the DC Agent receives user password-validation requests from the operating system. The filter forwards them to the DC Agent service that's running locally on the DC. For information on using the DLL, see [Enforce on-premises Azure AD Password Protection for Active Directory Domain Services](../authentication/concept-password-ban-bad-on-premises.md). +* **Password Filter DLL**. The password filter DLL of the DC Agent receives user password-validation requests from the operating system. The filter forwards them to the DC Agent service that's running locally on the DC. For information on using the DLL, see [Enforce on-premises Azure AD Password Protection for Active Directory Domain Services](../authentication/concept-password-ban-bad-on-premises.md). -* **Password writeback Agent** – Password writeback is a feature enabled with [Azure AD Connect](../hybrid/whatis-hybrid-identity.md) that allows password changes in the cloud to be written back to an existing on-premises directory in real time. For more information on this feature, see [How does self-service password reset writeback work in Azure Active Directory?](../authentication/concept-sspr-writeback.md) +* **Password writeback Agent**. Password writeback is a feature enabled with [Azure AD Connect](../hybrid/whatis-hybrid-identity.md) that allows password changes in the cloud to be written back to an existing on-premises directory in real time. For more information on this feature, see [How does self-service password reset writeback work in Azure Active Directory](../authentication/concept-sspr-writeback.md). -* **Azure AD Application Proxy Connector** – Lightweight agents that sit on-premises and facilitate the outbound connection to the Application Proxy service. For more information, see [Understand Azure ADF Application Proxy connectors](../app-proxy/application-proxy-connectors.md). +* **Azure AD Application Proxy Connector**. Lightweight agents that sit on-premises and facilitate the outbound connection to the Application Proxy service. For more information, see [Understand Azure ADF Application Proxy connectors](../app-proxy/application-proxy-connectors.md). ## Components of cloud-based authentication -As part of an Azure cloud-based environment, the following should be baselined and included in your monitoring and alerting strategy. +As part of an Azure cloud-based environment, the following items should be baselined and included in your monitoring and alerting strategy. -* **Azure AD Application Proxy** – This cloud service provides secure remote access to on-premises web applications. For more information, see [Remote access to on-premises applications through Azure AD Application Proxy](../app-proxy/application-proxy-connectors.md). +* **Azure AD Application Proxy**. This cloud service provides secure remote access to on-premises web applications. For more information, see [Remote access to on-premises applications through Azure AD Application Proxy](../app-proxy/application-proxy-connectors.md). -* **Azure AD Connect** – Services used for an Azure AD Connect solution. For more information, see [What is Azure AD Connect](../hybrid/whatis-azure-ad-connect.md). +* **Azure AD Connect**. Services used for an Azure AD Connect solution. For more information, see [What is Azure AD Connect](../hybrid/whatis-azure-ad-connect.md). -* **Azure AD Connect Health** – Service Health provides you with a customizable dashboard which tracks the health of your Azure services in the regions where you use them. For more information, see [Azure AD Connect Health](../hybrid/whatis-azure-ad-connect.md). +* **Azure AD Connect Health**. Service Health provides you with a customizable dashboard that tracks the health of your Azure services in the regions where you use them. For more information, see [Azure AD Connect Health](../hybrid/whatis-azure-ad-connect.md). -* **Azure MFA** – Azure AD Multi-Factor Authentication requires a user to provide more than one form of proof for authentication. This can provide a proactive first step to securing your environment. For more information, see [How it works: Azure AD Multi-Factor Authentication](../authentication/concept-mfa-howitworks.md). +* **Azure AD multifactor authentication**. Multifactor authentication requires a user to provide more than one form of proof for authentication. This approach can provide a proactive first step to securing your environment. For more information, see [Azure AD multi-factor authentication](../authentication/concept-mfa-howitworks.md). -* **Dynamic Groups** – Dynamic configuration of security group membership for Azure Active Directory (Azure AD) Administrators can set rules to populate groups that are created in Azure AD based on user attributes. For more information, see [Dynamic groups and Azure Active Directory B2B collaboration](../external-identities/use-dynamic-groups.md). +* **Dynamic groups**. Dynamic configuration of security group membership for Azure AD Administrators can set rules to populate groups that are created in Azure AD based on user attributes. For more information, see [Dynamic groups and Azure Active Directory B2B collaboration](../external-identities/use-dynamic-groups.md). -* **Conditional Access** – Conditional Access is the tool used by Azure Active Directory to bring signals together, to make decisions, and enforce organizational policies. Conditional Access is at the heart of the new identity driven control plane. For more information, see [What is Conditional Access](../conditional-access/overview.md). +* **Conditional Access**. Conditional Access is the tool used by Azure Active Directory to bring signals together, to make decisions, and enforce organizational policies. Conditional Access is at the heart of the new identity driven control plane. For more information, see [What is Conditional Access](../conditional-access/overview.md). -* **Identity Protection** – A tool that enables organizations to automate the detection and remediation of identity-based risks, investigate risks using data in the portal, and export risk detection data to your SIEM. For more information, see [What is Identity Protection](../identity-protection/overview-identity-protection.md)? +* **Identity Protection**. A tool that enables organizations to automate the detection and remediation of identity-based risks, investigate risks using data in the portal, and export risk detection data to your SIEM. For more information, see [What is Identity Protection](../identity-protection/overview-identity-protection.md). -* **Group-based licensing**– Licenses can be assigned to groups rather than directly to users. Azure AD stores information about license assignment states for users. +* **Group-based licensing**. Licenses can be assigned to groups rather than directly to users. Azure AD stores information about license assignment states for users. -* **Provisioning Service** – Provisioning refers to creating user identities and roles in the cloud applications that users need access to. In addition to creating user identities, automatic provisioning includes the maintenance and removal of user identities as status or roles change. For more information, see [How Application Provisioning works in Azure Active Directory](../app-provisioning/how-provisioning-works.md). +* **Provisioning Service**. Provisioning refers to creating user identities and roles in the cloud applications that users need access to. In addition to creating user identities, automatic provisioning includes the maintenance and removal of user identities as status or roles change. For more information, see [How Application Provisioning works in Azure Active Directory](../app-provisioning/how-provisioning-works.md). -* **Graph API** – The Microsoft Graph API is a RESTful web API that enables you to access Microsoft Cloud service resources. After you register your app and get authentication tokens for a user or service, you can make requests to the Microsoft Graph API. For more information, see [Overview of Microsoft Graph](/graph/overview). +* **Graph API**. The Microsoft Graph API is a RESTful web API that enables you to access Microsoft Cloud service resources. After you register your app and get authentication tokens for a user or service, you can make requests to the Microsoft Graph API. For more information, see [Overview of Microsoft Graph](/graph/overview). -* **Domain Service** – Azure Active Directory Domain Services (AD DS) provides managed domain services such as domain join, group policy. For more information, see [What is Azure Active Directory Domain Services?](../../active-directory-domain-services/overview.md) +* **Domain Service**. Azure Active Directory Domain Services (AD DS) provides managed domain services such as domain join, group policy. For more information, see [What is Azure Active Directory Domain Services](../../active-directory-domain-services/overview.md). -* **Azure Resource Manager** – Azure Resource Manager is the deployment and management service for Azure. It provides a management layer that enables you to create, update, and delete resources in your Azure account. For more information, see [What is Azure Resource Manager?](../../azure-resource-manager/management/overview.md) +* **Azure Resource Manager**. Azure Resource Manager is the deployment and management service for Azure. It provides a management layer that enables you to create, update, and delete resources in your Azure account. For more information, see [What is Azure Resource Manager](../../azure-resource-manager/management/overview.md). -* **Managed Identity** – Managed identities eliminate the need for developers to manage credentials. Managed identities provide an identity for applications to use when connecting to resources that support Azure Active Directory (Azure AD) authentication. For more information, see [What are managed identities for Azure resources?](../managed-identities-azure-resources/overview.md) +* **Managed identity**. Managed identities eliminate the need for developers to manage credentials. Managed identities provide an identity for applications to use when connecting to resources that support Azure AD authentication. For more information, see [What are managed identities for Azure resources](../managed-identities-azure-resources/overview.md). -* **Privileged Identity Management** – Privileged Identity Management (PIM) is a service in Azure Active Directory (Azure AD) that enables you to manage, control, and monitor access to important resources in your organization. For more information, see [What is Azure AD Privileged Identity Management](../privileged-identity-management/pim-configure.md). +* **Privileged Identity Management**. PIM is a service in Azure AD that enables you to manage, control, and monitor access to important resources in your organization. For more information, see [What is Azure AD Privileged Identity Management](../privileged-identity-management/pim-configure.md). -* **Access Reviews** – Azure Active Directory (Azure AD) access reviews enable organizations to efficiently manage group memberships, access to enterprise applications, and role assignments. User's access can be reviewed on a regular basis to make sure only the right people have continued access. For more information, see [What are Azure AD access reviews?](../governance/access-reviews-overview.md) +* **Access reviews**. Azure AD access reviews enable organizations to efficiently manage group memberships, access to enterprise applications, and role assignments. User's access can be reviewed regularly to make sure only the right people have continued access. For more information, see [What are Azure AD access reviews](../governance/access-reviews-overview.md). -* **Entitlement Management** – Azure Active Directory (Azure AD) entitlement management is an [identity governance](../governance/identity-governance-overview.md) feature that enables organizations to manage identity and access lifecycle at scale, by automating access request workflows, access assignments, reviews, and expiration. For more information, see [What is Azure AD entitlement management?](../governance/entitlement-management-overview.md) +* **Entitlement management**. Azure AD entitlement management is an [identity governance](../governance/identity-governance-overview.md) feature. Organizations can manage identity and access lifecycle at scale, by automating access request workflows, access assignments, reviews, and expiration. For more information, see [What is Azure AD entitlement management](../governance/entitlement-management-overview.md). -* **Activity Logs** – The Activity log is a [platform log](../../azure-monitor/essentials/platform-logs-overview.md) in Azure that provides insight into subscription-level events. This includes such information as when a resource is modified or when a virtual machine is started. For more information, see [Azure Activity log](../../azure-monitor/essentials/activity-log.md). +* **Activity logs**. The Activity log is an Azure [platform log](../../azure-monitor/essentials/platform-logs-overview.md) that provides insight into subscription-level events. This log includes such information as when a resource is modified or when a virtual machine is started. For more information, see [Azure Activity log](../../azure-monitor/essentials/activity-log.md). -* **Self-service Password reset service** – Azure Active Directory (Azure AD) self-service password reset (SSPR) gives users the ability to change or reset their password, with no administrator or help desk involvement. For more information, see [How it works: Azure AD self-service password reset](../authentication/concept-sspr-howitworks.md). +* **Self-service password reset service**. Azure AD self-service password reset (SSPR) gives users the ability to change or reset their password. The administrator or help desk isn't required. For more information, see [How it works: Azure AD self-service password reset](../authentication/concept-sspr-howitworks.md). -* **Device Services** – Device identity management is the foundation for [device-based Conditional Access](../conditional-access/require-managed-devices.md). With device-based Conditional Access policies, you can ensure that access to resources in your environment is only possible with managed devices. For more information, see [What is a device identity?](../devices/overview.md) +* **Device services**. Device identity management is the foundation for [device-based Conditional Access](../conditional-access/require-managed-devices.md). With device-based Conditional Access policies, you can ensure that access to resources in your environment is only possible with managed devices. For more information, see [What is a device identity](../devices/overview.md). -* **Self-Service Group Management** – You can enable users to create and manage their own security groups or Microsoft 365 groups in Azure Active Directory (Azure AD). The owner of the group can approve or deny membership requests and can delegate control of group membership. Self-service group management features are not available for mail-enabled security groups or distribution lists. For more information, see [Set up self-service group management in Azure Active Directory](../enterprise-users/groups-self-service-management.md). +* **Self-service group management**. You can enable users to create and manage their own security groups or Microsoft 365 groups in Azure AD. The owner of the group can approve or deny membership requests and can delegate control of group membership. Self-service group management features aren't available for mail-enabled security groups or distribution lists. For more information, see [Set up self-service group management in Azure Active Directory](../enterprise-users/groups-self-service-management.md). -* **Risk detections** – contains information about other risks triggered when a risk is detected and other pertinent information such as sign-in location and any details from Microsoft Defender for Cloud Apps. +* **Risk detections**. Contains information about other risks triggered when a risk is detected and other pertinent information such as sign-in location and any details from Microsoft Defender for Cloud Apps. ## Next steps See these security operations guide articles: -[Azure AD security operations overview](security-operations-introduction.md) - -[Security operations for user accounts](security-operations-user-accounts.md) - -[Security operations for privileged accounts](security-operations-privileged-accounts.md) - -[Security operations for Privileged Identity Management](security-operations-privileged-identity-management.md) - -[Security operations for applications](security-operations-applications.md) - -[Security operations for devices](security-operations-devices.md) - - -[Security operations for infrastructure](security-operations-infrastructure.md) +* [Azure AD security operations overview](security-operations-introduction.md) +* [Security operations for user accounts](security-operations-user-accounts.md) +* [Security operations for privileged accounts](security-operations-privileged-accounts.md) +* [Security operations for Privileged Identity Management](security-operations-privileged-identity-management.md) +* [Security operations for applications](security-operations-applications.md) +* [Security operations for devices](security-operations-devices.md) +* [Security operations for infrastructure](security-operations-infrastructure.md) diff --git a/articles/active-directory/fundamentals/security-operations-privileged-accounts.md b/articles/active-directory/fundamentals/security-operations-privileged-accounts.md index 4fcee0228c441..8db6e145cca38 100644 --- a/articles/active-directory/fundamentals/security-operations-privileged-accounts.md +++ b/articles/active-directory/fundamentals/security-operations-privileged-accounts.md @@ -1,19 +1,20 @@ --- -title: Azure Active Directory security operations for privileged accounts -description: Learn to set baselines, and then monitor and alert on potential security issues with privileged accounts in Azure Active directory. +title: Security operations for privileged accounts in Azure Active Directory +description: Learn about baselines, and how to monitor and alert on potential security issues with privileged accounts in Azure Active Directory. services: active-directory author: BarbaraSelden manager: martinco ms.service: active-directory ms.workload: identity ms.subservice: fundamentals -ms.topic: conceptual -ms.date: 07/15/2021 +ms.topic: how-to +ms.date: 04/29/2022 ms.author: baselden +ms.custom: kr2b-contr-experiment ms.collection: M365-identity-device-management --- -# Security operations for privileged accounts +# Security operations for privileged accounts in Azure Active Directory The security of business assets depends on the integrity of the privileged accounts that administer your IT systems. Cyber attackers use credential theft attacks and other means to target privileged accounts and gain access to sensitive data. @@ -27,7 +28,7 @@ You're entirely responsible for all layers of security for your on-premises IT e * For more information on securing access for privileged users, see [Securing privileged access for hybrid and cloud deployments in Azure AD](../roles/security-planning.md). * For a wide range of videos, how-to guides, and content of key concepts for privileged identity, see [Privileged Identity Management documentation](../privileged-identity-management/index.yml). -## Where to look +## Log files to monitor The log files you use for investigation and monitoring are: @@ -37,29 +38,29 @@ The log files you use for investigation and monitoring are: From the Azure portal, you can view the Azure AD Audit logs and download as comma-separated value (CSV) or JavaScript Object Notation (JSON) files. The Azure portal has several ways to integrate Azure AD logs with other tools that allow for greater automation of monitoring and alerting: -* [Microsoft Sentinel](../../sentinel/overview.md): Enables intelligent security analytics at the enterprise level by providing security information and event management (SIEM) capabilities. -* [Azure Monitor](../../azure-monitor/overview.md): Enables automated monitoring and alerting of various conditions. Can create or use workbooks to combine data from different sources. -* [Azure Event Hubs](../../event-hubs/event-hubs-about.md) integrated with a SIEM: Enables [Azure AD logs to be pushed to other SIEMs](../reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub.md) such as Splunk, ArcSight, QRadar, and Sumo Logic via the Azure Event Hubs integration. -* [Microsoft Defender for Cloud Apps](/cloud-app-security/what-is-cloud-app-security): Enables you to discover and manage apps, govern across apps and resources, and check your cloud apps' compliance. -* **Microsoft Graph**: Enables you to export data and use Microsoft Graph to do more analysis. For more information on Microsoft Graph, see [Microsoft Graph PowerShell SDK and Azure Active Directory Identity Protection](../identity-protection/howto-identity-protection-graph-api.md). -* [Identity Protection](../identity-protection/overview-identity-protection.md): Generates three key reports you can use to help with your investigation: +* **[Microsoft Sentinel](../../sentinel/overview.md)**. Enables intelligent security analytics at the enterprise level by providing security information and event management (SIEM) capabilities. +* **[Azure Monitor](../../azure-monitor/overview.md)**. Enables automated monitoring and alerting of various conditions. Can create or use workbooks to combine data from different sources. +* **[Azure Event Hubs](../../event-hubs/event-hubs-about.md)** integrated with a SIEM. Enables Azure AD logs to be pushed to other SIEMs such as Splunk, ArcSight, QRadar, and Sumo Logic via the Azure Event Hubs integration. For more information, see [Stream Azure Active Directory logs to an Azure event hub](../reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub.md). +* **[Microsoft Defender for Cloud Apps](/cloud-app-security/what-is-cloud-app-security)**. Enables you to discover and manage apps, govern across apps and resources, and check your cloud apps' compliance. +* **Microsoft Graph**. Enables you to export data and use Microsoft Graph to do more analysis. For more information, see [Microsoft Graph PowerShell SDK and Azure Active Directory Identity Protection](../identity-protection/howto-identity-protection-graph-api.md). +* **[Identity Protection](../identity-protection/overview-identity-protection.md)**. Generates three key reports you can use to help with your investigation: - * **Risky users**: Contains information about which users are at risk, details about detections, history of all risky sign-ins, and risk history. - * **Risky sign-ins**: Contains information about a sign-in that might indicate suspicious circumstances. For more information on investigating information from this report, see [Investigate risk](../identity-protection/howto-identity-protection-investigate-risk.md). - * **Risk detections**: Contains information about other risks triggered when a risk is detected and other pertinent information such as sign-in location and any details from Microsoft Defender for Cloud Apps. + * **Risky users**. Contains information about which users are at risk, details about detections, history of all risky sign-ins, and risk history. + * **Risky sign-ins**. Contains information about a sign-in that might indicate suspicious circumstances. For more information on investigating information from this report, see [Investigate risk](../identity-protection/howto-identity-protection-investigate-risk.md). + * **Risk detections**. Contains information about other risks triggered when a risk is detected and other pertinent information such as sign-in location and any details from Microsoft Defender for Cloud Apps. -* **[Securing workload identities with Identity Protection Preview](..//identity-protection/concept-workload-identity-risk.md)** - Used to detect risk on workload identities across sign-in behavior and offline indicators of compromise. +* **[Securing workload identities with Identity Protection Preview](..//identity-protection/concept-workload-identity-risk.md)**. Use to detect risk on workload identities across sign-in behavior and offline indicators of compromise. Although we discourage the practice, privileged accounts can have standing administration rights. If you choose to use standing privileges, and the account is compromised, it can have a strongly negative effect. We recommend you prioritize monitoring privileged accounts and include the accounts in your Privileged Identity Management (PIM) configuration. For more information on PIM, see [Start using Privileged Identity Management](../privileged-identity-management/pim-getting-started.md). Also, we recommend you validate that admin accounts: * Are required. * Have the least privilege to execute the require activities. -* Are protected with multifactor authentication (MFA) at a minimum. +* Are protected with multifactor authentication at a minimum. * Are run from privileged access workstation (PAW) or secure admin workstation (SAW) devices. -The rest of this article describes what we recommend you monitor and alert on. The article is organized by the type of threat. Where there are specific prebuilt solutions, we link to them following the table. Otherwise, you can build alerts by using the preceding tools. +The rest of this article describes what we recommend you monitor and alert on. The article is organized by the type of threat. Where there are specific prebuilt solutions, we link to them following the table. Otherwise, you can build alerts by using the tools described above. -Specifically, this article provides details on setting baselines and auditing sign-in and usage of privileged accounts. It also discusses tools and resources you can use to help maintain the integrity of your privileged accounts. The content is organized into the following subjects: +This article provides details on setting baselines and auditing sign-in and usage of privileged accounts. It also discusses tools and resources you can use to help maintain the integrity of your privileged accounts. The content is organized into the following subjects: * Emergency "break-glass" accounts * Privileged account sign-in @@ -69,7 +70,7 @@ Specifically, this article provides details on setting baselines and auditing si ## Emergency access accounts -It's important that you prevent being accidentally locked out of your Azure AD tenant. You can mitigate the effect of an accidental lockout by creating emergency access accounts in your organization. Emergency access accounts are also known as break-glass accounts, as in "break glass in case of emergency" messages found on physical security equipment like fire alarms. +It's important that you prevent being accidentally locked out of your Azure AD tenant. You can mitigate the effect of an accidental lockout by creating emergency access accounts in your organization. Emergency access accounts are also known as *break-glass accounts*, as in "break glass in case of emergency" messages found on physical security equipment like fire alarms. Emergency access accounts are highly privileged, and they aren't assigned to specific individuals. Emergency access accounts are limited to emergency or break-glass scenarios where normal privileged accounts can't be used. An example is when a Conditional Access policy is misconfigured and locks out all normal administrative accounts. Restrict emergency account use to only the times when it's absolutely necessary. @@ -81,11 +82,10 @@ Send a high-priority alert every time an emergency access account is used. Because break-glass accounts are only used if there's an emergency, your monitoring should discover no account activity. Send a high-priority alert every time an emergency access account is used or changed. Any of the following events might indicate a bad actor is trying to compromise your environments: -* **Account used**: Monitor and alert on any activity by using this type of account, such as: - * Sign-in. - * Account password change. - * Account permission or roles changed. - * Credential or auth method added or changed. +* Sign-in. +* Account password change. +* Account permission or roles changed. +* Credential or auth method added or changed. For more information on managing emergency access accounts, see [Manage emergency access admin accounts in Azure AD](../roles/security-emergency-access.md). For detailed information on creating an alert for an emergency account, see [Create an alert rule](../roles/security-emergency-access.md). @@ -112,14 +112,14 @@ You can monitor privileged account sign-in events in the Azure AD Sign-in logs. | - | - | - | - | - | | Sign-in failure, bad password threshold | High | Azure AD Sign-ins log | Status = Failure
            -and-
            error code = 50126 | Define a baseline threshold and then monitor and adjust to suit your organizational behaviors and limit false alerts from being generated.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/MultipleDataSources/PrivilegedAccountsSigninFailureSpikes.yaml) | | Failure because of Conditional Access requirement |High | Azure AD Sign-ins log | Status = Failure
            -and-
            error code = 53003
            -and-
            Failure reason = Blocked by Conditional Access | This event can be an indication an attacker is trying to get into the account.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/SigninLogs/UserAccounts-CABlockedSigninSpikes.yaml) | -| Privileged accounts that don't follow naming policy| | Azure subscription | [List Azure role assignments using the Azure portal - Azure RBAC](../../role-based-access-control/role-assignments-list-portal.md)| List role assignments for subscriptions and alert where the sign-in name doesn't match your organization's format. An example is the use of ADM_ as a prefix. | -| Interrupt | High, medium | Azure AD Sign-ins | Status = Interrupted
            -and-
            error code = 50074
            -and-
            Failure reason = Strong auth required
            Status = Interrupted
            -and-
            Error code = 500121
            Failure reason = Authentication failed during strong authentication request | This event can be an indication an attacker has the password for the account but can't pass the MFA challenge.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/MultipleDataSources/AADPrivilegedAccountsFailedMFA.yaml) | +| Privileged accounts that don't follow naming policy| | Azure subscription | [List Azure role assignments using the Azure portal](../../role-based-access-control/role-assignments-list-portal.md)| List role assignments for subscriptions and alert where the sign-in name doesn't match your organization's format. An example is the use of ADM_ as a prefix. | +| Interrupt | High, medium | Azure AD Sign-ins | Status = Interrupted
            -and-
            error code = 50074
            -and-
            Failure reason = Strong auth required
            Status = Interrupted
            -and-
            Error code = 500121
            Failure reason = Authentication failed during strong authentication request | This event can be an indication an attacker has the password for the account but can't pass the multifactor authentication challenge.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/MultipleDataSources/AADPrivilegedAccountsFailedMFA.yaml) | | Privileged accounts that don't follow naming policy| High | Azure AD directory | [List Azure AD role assignments](../roles/view-assignments.md)| List role assignments for Azure AD roles and alert where the UPN doesn't match your organization's format. An example is the use of ADM_ as a prefix. | -| Discover privileged accounts not registered for MFA | High | Microsoft Graph API| Query for IsMFARegistered eq false for admin accounts. [List credentialUserRegistrationDetails - Microsoft Graph beta](/graph/api/reportroot-list-credentialuserregistrationdetails?view=graph-rest-beta&preserve-view=true&tabs=http) | Audit and investigate to determine if the event is intentional or an oversight. | +| Discover privileged accounts not registered for multifactor authentication | High | Microsoft Graph API| Query for IsMFARegistered eq false for admin accounts. [List credentialUserRegistrationDetails - Microsoft Graph beta](/graph/api/reportroot-list-credentialuserregistrationdetails?view=graph-rest-beta&preserve-view=true&tabs=http) | Audit and investigate to determine if the event is intentional or an oversight. | | Account lockout | High | Azure AD Sign-ins log | Status = Failure
            -and-
            error code = 50053 | Define a baseline threshold, and then monitor and adjust to suit your organizational behaviors and limit false alerts from being generated.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/MultipleDataSources/PrivilegedAccountsLockedOut.yaml) | | Account disabled or blocked for sign-ins | Low | Azure AD Sign-ins log | Status = Failure
            -and-
            Target = User UPN
            -and-
            error code = 50057 | This event could indicate someone is trying to gain access to an account after they've left the organization. Although the account is blocked, it's still important to log and alert on this activity.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/SigninLogs/UserAccounts-BlockedAccounts.yaml) | -| MFA fraud alert or block | High | Azure AD Sign-ins log/Azure Log Analytics | Sign-ins>Authentication details Result details = MFA denied, fraud code entered | Privileged user has indicated they haven't instigated the MFA prompt, which could indicate an attacker has the password for the account.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/SigninLogs/MFARejectedbyUser.yaml) | -| MFA fraud alert or block | High | Azure AD Audit log log/Azure Log Analytics | Activity type = Fraud reported - User is blocked for MFA or fraud reported - No action taken (based on tenant-level settings for fraud report) | Privileged user has indicated they haven't instigated the MFA prompt, which could indicate an attacker has the password for the account.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/SigninLogs/MFARejectedbyUser.yaml) | +| MFA fraud alert or block | High | Azure AD Sign-ins log/Azure Log Analytics | Sign-ins>Authentication details Result details = MFA denied, fraud code entered | Privileged user has indicated they haven't instigated the multi-factor authentication prompt, which could indicate an attacker has the password for the account.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/SigninLogs/MFARejectedbyUser.yaml) | +| MFA fraud alert or block | High | Azure AD Audit log log/Azure Log Analytics | Activity type = Fraud reported - User is blocked for MFA or fraud reported - No action taken (based on tenant-level settings for fraud report) | Privileged user has indicated they haven't instigated the multi-factor authentication prompt, which could indicate an attacker has the password for the account.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/SigninLogs/MFARejectedbyUser.yaml) | | Privileged account sign-ins outside of expected controls | | Azure AD Sign-ins log | Status = Failure
            UserPricipalName = \
            Location = \
            IP address = \
            Device info = \ | Monitor and alert on any entries that you've defined as unapproved.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/SigninLogs/SuspiciousSignintoPrivilegedAccount.yaml) | | Outside of normal sign-in times | High | Azure AD Sign-ins log | Status = Success
            -and-
            Location =
            -and-
            Time = Outside of working hours | Monitor and alert if sign-ins occur outside of expected times. It's important to find the normal working pattern for each privileged account and to alert if there are unplanned changes outside of normal working times. Sign-ins outside of normal working hours could indicate compromise or possible insider threats.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/MultipleDataSources/AnomolousSignInsBasedonTime.yaml) | | Identity protection risk | High | Identity Protection logs | Risk state = At risk
            -and-
            Risk level = Low, medium, high
            -and-
            Activity = Unfamiliar sign-in/TOR, and so on | This event indicates there's some abnormality detected with the sign-in for the account and should be alerted on. | @@ -127,25 +127,26 @@ You can monitor privileged account sign-in events in the Azure AD Sign-in logs. | Change in legacy authentication protocol | High | Azure AD Sign-ins log | Client App = Other client, IMAP, POP3, MAPI, SMTP, and so on
            -and-
            Username = UPN
            -and-
            Application = Exchange (example) | Many attacks use legacy authentication, so if there's a change in auth protocol for the user, it could be an indication of an attack. | | New device or location | High | Azure AD Sign-ins log | Device info = Device ID
            -and-
            Browser
            -and-
            OS
            -and-
            Compliant/Managed
            -and-
            Target = User
            -and-
            Location | Most admin activity should be from [privileged access devices](/security/compass/privileged-access-devices), from a limited number of locations. For this reason, alert on new devices or locations.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Hunting%20Queries/SigninLogs/SuspiciousSignintoPrivilegedAccount.yaml) | | Audit alert setting is changed | High | Azure AD Audit logs | Service = PIM
            -and-
            Category = Role management
            -and-
            Activity = Disable PIM alert
            -and-
            Status = Success | Changes to a core alert should be alerted if unexpected. | -| Administrators authenticating to other Azure AD tenants| Medium| Azure AD Sign-ins log| Status = success

            Resource tenantID != Home Tenant ID| When scoped to Privileged Users this detects when an administrator has successfully authenticated to another Azure AD tenant with an identity in your organization's tenant.

            Alert if Resource TenantID is not equal to Home Tenant ID | -|Admin User state changed from Guest to Member|Medium|Azure AD Audit logs|Activity: Update user

            Category: UserManagement

            UserType changed from Guest to Member|Monitor and alert on change of user type from Guest to Member.

            Was this expected? +| Administrators authenticating to other Azure AD tenants| Medium| Azure AD Sign-ins log| Status = success

            Resource tenantID != Home Tenant ID| When scoped to Privileged Users, this monitor detects when an administrator has successfully authenticated to another Azure AD tenant with an identity in your organization's tenant.

            Alert if Resource TenantID isn't equal to Home Tenant ID | +|Admin User state changed from Guest to Member|Medium|Azure AD Audit logs|Activity: Update user

            Category: UserManagement

            UserType changed from Guest to Member|Monitor and alert on change of user type from Guest to Member.

            Was this change expected? |Guest users invited to tenant by non-approved inviters|Medium|Azure AD Audit logs|Activity: Invite external user

            Category: UserManagement

            Initiated by (actor): User Principal Name|Monitor and alert on non-approved actors inviting external users. + ## Changes by privileged accounts Monitor all completed and attempted changes by a privileged account. This data enables you to establish what's normal activity for each privileged account and alert on activity that deviates from the expected. The Azure AD Audit logs are used to record this type of event. For more information on Azure AD Audit logs, see [Audit logs in Azure Active Directory](../reports-monitoring/concept-audit-logs.md). ### Azure Active Directory Domain Services -Privileged accounts that have been assigned permissions in Azure AD Domain Services can perform tasks for Azure AD Domain Services that affect the security posture of your Azure-hosted virtual machines (VMs) that use Azure AD Domain Services. Enable security audits on VMs and monitor the logs. For more information on enabling Azure AD Domain Services audits and for a list of sensitive privileges, see the following resources: +Privileged accounts that have been assigned permissions in Azure AD Domain Services can perform tasks for Azure AD Domain Services that affect the security posture of your Azure-hosted virtual machines that use Azure AD Domain Services. Enable security audits on virtual machines and monitor the logs. For more information on enabling Azure AD Domain Services audits and for a list of sensitive privileges, see the following resources: * [Enable security audits for Azure Active Directory Domain Services](../../active-directory-domain-services/security-audit-events.md) * [Audit Sensitive Privilege Use](/windows/security/threat-protection/auditing/audit-sensitive-privilege-use) -| What to monitor | Risk level | Where | Filter/subfilter | Notes | -|-------------------------------------------------------------------------|------------|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Attempted and completed changes | High | Azure AD Audit logs | Date and time
            -and-
            Service
            -and-
            Category and name of the activity (what)
            -and-
            Status = Success or failure
            -and-
            Target
            -and-
            Initiator or actor (who) | Any unplanned changes should be alerted on immediately. These logs should be retained to assist in any investigation. Any tenant-level changes should be investigated immediately (link out to Infra doc) that would lower the security posture of your tenant. An example is excluding accounts from MFA or Conditional Access. Alert on any [additions or changes to applications](security-operations-applications.md). | -| **EXAMPLE**
            Attempted or completed change to high-value apps or services | High | Audit log | Service
            -and-
            Category and name of the activity |
          • Date and time
          • Service
          • Category and name of the activity
          • Status = Success or failure
          • Target
          • Initiator or actor (who) | -| Privileged changes in Azure AD Domain Services | High | Azure AD Domain Services | Look for event [4673](/windows/security/threat-protection/auditing/event-4673) | [Enable security audits for Azure Active Directory Domain Services](../../active-directory-domain-services/security-audit-events.md)
            [Audit Sensitive Privilege use](/windows/security/threat-protection/auditing/audit-sensitive-privilege-use). See the article for a list of all privileged events. | +| What to monitor | Risk level | Where | Filter/subfilter | Notes | +|------------------|------------|---------------------|-------------------------------|-------------------------| +| Attempted and completed changes | High | Azure AD Audit logs | Date and time
            -and-
            Service
            -and-
            Category and name of the activity (what)
            -and-
            Status = Success or failure
            -and-
            Target
            -and-
            Initiator or actor (who) | Any unplanned changes should be alerted on immediately. These logs should be retained to help with any investigation. Any tenant-level changes should be investigated immediately (link out to Infra doc) that would lower the security posture of your tenant. An example is excluding accounts from multifactor authentication or Conditional Access. Alert on any additions or changes to applications. See [Azure Active Directory security operations guide for Applications](security-operations-applications.md). | +| **EXAMPLE**
            Attempted or completed change to high-value apps or services | High | Audit log | Service
            -and-
            Category and name of the activity |
          • Date and time
          • Service
          • Category and name of the activity
          • Status = Success or failure
          • Target
          • Initiator or actor (who) | +| Privileged changes in Azure AD Domain Services | High | Azure AD Domain Services | Look for event [4673](/windows/security/threat-protection/auditing/event-4673) | [Enable security audits for Azure Active Directory Domain Services](../../active-directory-domain-services/security-audit-events.md)
            For a list of all privileged events, see [Audit Sensitive Privilege use](/windows/security/threat-protection/auditing/audit-sensitive-privilege-use). | ## Changes to privileged accounts @@ -157,8 +158,8 @@ Investigate changes to privileged accounts' authentication rules and privileges, | Changes to authentication methods| High| Azure AD Audit logs| Service = Authentication Method
            -and-
            Activity type = User registered security information
            -and-
            Category = User management| This change could be an indication of an attacker adding an auth method to the account so they can have continued access.
            [Azure Sentinel template](https://github.com/Azure/Azure-Sentinel/blob/master/Detections/MultipleDataSources/AuthenticationMethodsChangedforPrivilegedAccount.yaml) | | Alert on changes to privileged account permissions| High| Azure AD Audit logs| Category = Role management
            -and-
            Activity type = Add eligible member (permanent)
            -and-
            Activity type = Add eligible member (eligible)
            -and-
            Status = Success or failure
            -and-
            Modified properties = Role.DisplayName| This alert is especially for accounts being assigned roles that aren't known or are outside of their normal responsibilities. | | Unused privileged accounts| Medium| Azure AD Access Reviews| | Perform a monthly review for inactive privileged user accounts. | -| Accounts exempt from Conditional Access| High| Azure Monitor Logs
            -or-
            Access Reviews| Conditional Access = Insights and reporting| Any account exempt from Conditional Access is most likely bypassing security controls and is more vulnerable to compromise. Break-glass accounts are exempt. See information on how to monitor break-glass accounts in a subsequent section of this article.| -| Addition of a Temporary Access Pass to a privileged account| High| Azure AD Audit logs| Activity: Admin registered security info

            Status Reason: Admin registered temporary access pass method for user

            Category: UserManagement

            Initiated by (actor): User Principal Name

            Target:User Principal Name|Monitor and alert on a Temporary Access Pass being created for a privileged user. +| Accounts exempt from Conditional Access| High| Azure Monitor Logs
            -or-
            Access Reviews| Conditional Access = Insights and reporting| Any account exempt from Conditional Access is most likely bypassing security controls and is more vulnerable to compromise. Break-glass accounts are exempt. See information on how to monitor break-glass accounts later in this article.| +| Addition of a Temporary Access Pass to a privileged account| High| Azure AD Audit logs| Activity: Admin registered security info

            Status Reason: Admin registered temporary access pass method for user

            Category: UserManagement

            Initiated by (actor): User Principal Name

            Target: User Principal Name|Monitor and alert on a Temporary Access Pass being created for a privileged user. For more information on how to monitor for exceptions to Conditional Access policies, see [Conditional Access insights and reporting](../conditional-access/howto-conditional-access-insights-reporting.md). @@ -170,28 +171,28 @@ Having privileged accounts that are permanently provisioned with elevated abilit ### Establish a baseline -To monitor for exceptions, you must first create a baseline. Determine the following information for: +To monitor for exceptions, you must first create a baseline. Determine the following information for these elements -* **Admin accounts**: +* **Admin accounts** - * Your privileged account strategy - * Use of on-premises accounts to administer on-premises resources - * Use of cloud-based accounts to administer cloud-based resources - * Approach to separating and monitoring administrative permissions for on-premises and cloud-based resources + * Your privileged account strategy + * Use of on-premises accounts to administer on-premises resources + * Use of cloud-based accounts to administer cloud-based resources + * Approach to separating and monitoring administrative permissions for on-premises and cloud-based resources -* **Privileged role protection**: +* **Privileged role protection** - * Protection strategy for roles that have administrative privileges - * Organizational policy for using privileged accounts - * Strategy and principles for maintaining permanent privilege versus providing time-bound and approved access + * Protection strategy for roles that have administrative privileges + * Organizational policy for using privileged accounts + * Strategy and principles for maintaining permanent privilege versus providing time-bound and approved access -The following concepts and information will help you determine policies: +The following concepts and information help determine policies: -* **Just-in-time admin principles**: Use the Azure AD logs to capture information for performing administrative tasks that are common in your environment. Determine the typical amount of time needed to complete the tasks. -* **Just-enough admin principles**: [Determine the least-privileged role](../roles/delegate-by-task.md), which might be a custom role, that's needed for administrative tasks. -* **Establish an elevation policy**: After you have insight into the type of elevated privilege needed and how long is needed for each task, create policies that reflect elevated privileged usage for your environment. As an example, define a policy to limit Global admin access to one hour. +* **Just-in-time admin principles**. Use the Azure AD logs to capture information for performing administrative tasks that are common in your environment. Determine the typical amount of time needed to complete the tasks. +* **Just-enough admin principles**. Determine the least-privileged role, which might be a custom role, that's needed for administrative tasks. For more information, see [Least privileged roles by task in Azure Active Directory](../roles/delegate-by-task.md). +* **Establish an elevation policy**. After you have insight into the type of elevated privilege needed and how long is needed for each task, create policies that reflect elevated privileged usage for your environment. As an example, define a policy to limit Global admin access to one hour. - After you establish your baseline and set policy, you can configure monitoring to detect and alert usage outside of policy. +After you establish your baseline and set policy, you can configure monitoring to detect and alert usage outside of policy. ### Discovery @@ -199,7 +200,7 @@ Pay particular attention to and investigate changes in assignment and elevation ### Things to monitor -You can monitor privileged account changes by using Azure AD Audit logs and Azure Monitor logs. Specifically, include the following changes in your monitoring process. +You can monitor privileged account changes by using Azure AD Audit logs and Azure Monitor logs. Include the following changes in your monitoring process. | What to monitor| Risk level| Where| Filter/subfilter| Notes | | - | - | - | - | - | @@ -215,7 +216,7 @@ For more information about managing elevation, see [Elevate access to manage all For information about configuring alerts for Azure roles, see [Configure security alerts for Azure resource roles in Privileged Identity Management](../privileged-identity-management/pim-resource-roles-configure-alerts.md). - ## Next steps +## Next steps See these security operations guide articles: diff --git a/articles/active-directory/fundamentals/whats-new-archive.md b/articles/active-directory/fundamentals/whats-new-archive.md index abfbe8a598d79..5784a94f98cb4 100644 --- a/articles/active-directory/fundamentals/whats-new-archive.md +++ b/articles/active-directory/fundamentals/whats-new-archive.md @@ -31,6 +31,202 @@ The What's new in Azure Active Directory? release notes provide information abou --- +## November 2021 + +### Tenant enablement of combined security information registration for Azure Active Directory + +**Type:** Plan for change +**Service category:** MFA +**Product capability:** Identity Security & Protection + +We previously announced in April 2020, a new combined registration experience enabling users to register authentication methods for SSPR and multi-factor authentication at the same time was generally available for existing customer to opt in. Any Azure AD tenants created after August 2020 automatically have the default experience set to combined registration. Starting 2022, Microsoft will be enabling the MFA/SSPR combined registration experience for existing customers. [Learn more](../authentication/concept-registration-mfa-sspr-combined.md). + +--- + +### Windows users will see prompts more often when switching user accounts + +**Type:** Fixed +**Service category:** Authentications (Logins) +**Product capability:** User Authentication + +A problematic interaction between Windows and a local Active Directory Federation Services (ADFS) instance can result in users attempting to sign into another account, but be silently signed into their existing account instead, with no warning. For federated IdPs such as ADFS, that support the [prompt=login](/windows-server/identity/ad-fs/operations/ad-fs-prompt-login) pattern, Azure AD will now trigger a fresh login at ADFS when a user is directed to ADFS with a login hint. This ensures that the user is signed into the account they requested, rather than being silently signed into the account they're already signed in with. + +For more information, see the [change notice](../develop/reference-breaking-changes.md). + +--- + +### Public preview - Conditional Access Overview Dashboard + +**Type:** New feature +**Service category:** Conditional Access +**Product capability:** Monitoring & Reporting + +The new Conditional Access overview dashboard enables all tenants to see insights about the impact of their Conditional Access policies without requiring an Azure Monitor subscription. This built-in dashboard provides tutorials to deploy policies, a summary of the policies in your tenant, a snapshot of your policy coverage, and security recommendations. [Learn more](../conditional-access/overview.md). + +--- + +### Public preview - SSPR writeback is now available for disconnected forests using Azure AD Connect cloud sync + +**Type:** New feature +**Service category:** Azure AD Connect Cloud Sync +**Product capability:** Identity Lifecycle Management + +The Public Preview feature for Azure AD Connect Cloud Sync Password writeback provides customers the capability to writeback a user’s password changes in the cloud to the on-premises directory in real time using the lightweight Azure AD cloud provisioning agent.[Learn more](../authentication/tutorial-enable-cloud-sync-sspr-writeback.md). + +--- + +### Public preview - Conditional Access for workload identities + +**Type:** New feature +**Service category:** Conditional Access for workload identities +**Product capability:** Identity Security & Protection + +Previously, Conditional Access policies applied only to users when they access apps and services like SharePoint online or the Azure portal. This preview adds support for Conditional Access policies applied to service principals owned by the organization. You can block service principals from accessing resources from outside trusted-named locations or Azure Virtual Networks. [Learn more](../conditional-access/workload-identity.md). + +--- + +### Public preview - Extra attributes available as claims + +**Type:** Changed feature +**Service category:** Enterprise Apps +**Product capability:** SSO + +Several user attributes have been added to the list of attributes available to map to claims to bring attributes available in claims more in line with what is available on the user object in Microsoft Graph. New attributes include mobilePhone and ProxyAddresses. [Learn more](../develop/reference-claims-mapping-policy-type.md#table-3-valid-id-values-per-source). + +--- + +### Public preview - "Session Lifetime Policies Applied" property in the sign-in logs + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** Identity Security & Protection + +We have recently added other property to the sign-in logs called "Session Lifetime Policies Applied". This property will list all the session lifetime policies that applied to the sign-in for example, Sign-in frequency, Remember multi-factor authentication and Configurable token lifetime. [Learn more](../reports-monitoring/concept-sign-ins.md#authentication-details). + +--- + +### Public preview - Enriched reviews on access packages in entitlement management + +**Type:** New feature +**Service category:** User Access Management +**Product capability:** Entitlement Management + +Entitlement Management’s enriched review experience allows even more flexibility on access packages reviews. Admins can now choose what happens to access if the reviewers don't respond, provide helper information to reviewers, or decide whether a justification is necessary. [Learn more](../governance/entitlement-management-access-reviews-create.md). + +--- + +### General availability - randomString and redact provisioning functions + +**Type:** New feature +**Service category:** Provisioning +**Product capability:** Outbound to SaaS Applications + + +The Azure AD Provisioning service now supports two new functions, randomString() and Redact(): +- randomString - generate a string based on the length and characters you would like to include or exclude in your string. +- redact - remove the value of the attribute from the audit and provisioning logs. [Learn more](../app-provisioning/functions-for-customizing-application-data.md#randomstring). + +--- + +### General availability - Now access review creators can select users and groups to receive notification on completion of reviews + +**Type:** New feature +**Service category:** Access Reviews +**Product capability:** Identity Governance + +Now access review creators can select users and groups to receive notification on completion of reviews. [Learn more](../governance/create-access-review.md). + +--- + +### General availability - Azure AD users can now view and report suspicious sign-ins and manage their accounts within Microsoft Authenticator + +**Type:** New feature +**Service category:** Microsoft Authenticator App +**Product capability:** Identity Security & Protection + +This feature allows Azure AD users to manage their work or school accounts within the Microsoft Authenticator app. The management features will allow users to view sign-in history and sign-in activity. Users can also report any suspicious or unfamiliar activity, change their Azure AD account passwords, and update the account's security information. + +For more information on how to use this feature visit [View and search your recent sign-in activity from the My Sign-ins page](../user-help/my-account-portal-sign-ins-page.md). + +--- + +### General availability - New Microsoft Authenticator app icon + +**Type:** New feature +**Service category:** Microsoft Authenticator App +**Product capability:** Identity Security & Protection + +New updates have been made to the Microsoft Authenticator app icon. To learn more about these updates, see the [Microsoft Authenticator app](https://techcommunity.microsoft.com/t5/azure-active-directory-identity/microsoft-authenticator-app-easier-ways-to-add-or-manage/ba-p/2464408) blog post. + +--- + +### General availability - Azure AD single Sign-on and device-based Conditional Access support in Firefox on Windows 10/11 + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** SSO + +We now support native single sign-on (SSO) support and device-based Conditional Access to Firefox browser on Windows 10 and Windows Server 2019 starting in Firefox version 91. [Learn more](../conditional-access/require-managed-devices.md#prerequisites). + +--- + +### New provisioning connectors in the Azure AD Application Gallery - November 2021 + +**Type:** New feature +**Service category:** App Provisioning +**Product capability:** 3rd Party Integration + +You can now automate creating, updating, and deleting user accounts for these newly integrated apps: + +- [Appaegis Isolation Access Cloud](../saas-apps/appaegis-isolation-access-cloud-provisioning-tutorial.md) +- [BenQ IAM](../saas-apps/benq-iam-provisioning-tutorial.md) +- [BIC Cloud Design](../saas-apps/bic-cloud-design-provisioning-tutorial.md) +- [Chaos](../saas-apps/chaos-provisioning-tutorial.md) +- [directprint.io](../saas-apps/directprint-io-provisioning-tutorial.md) +- [Documo](../saas-apps/documo-provisioning-tutorial.md) +- [Facebook Work Accounts](../saas-apps/facebook-work-accounts-provisioning-tutorial.md) +- [introDus Pre and Onboarding Platform](../saas-apps/introdus-pre-and-onboarding-platform-provisioning-tutorial.md) +- [Kisi Physical Security](../saas-apps/kisi-physical-security-provisioning-tutorial.md) +- [Klaxoon](../saas-apps/klaxoon-provisioning-tutorial.md) +- [Klaxoon SAML](../saas-apps/klaxoon-saml-provisioning-tutorial.md) +- [MX3 Diagnostics](../saas-apps/mx3-diagnostics-connector-provisioning-tutorial.md) +- [Netpresenter](../saas-apps/netpresenter-provisioning-tutorial.md) +- [Peripass](../saas-apps/peripass-provisioning-tutorial.md) +- [Real Links](../saas-apps/real-links-provisioning-tutorial.md) +- [Sentry](../saas-apps/sentry-provisioning-tutorial.md) +- [Teamgo](../saas-apps/teamgo-provisioning-tutorial.md) +- [Zero](../saas-apps/zero-provisioning-tutorial.md) + +For more information about how to better secure your organization by using automated user account provisioning, see [Automate user provisioning to SaaS applications with Azure AD](../manage-apps/user-provisioning.md). + +--- + +### New Federated Apps available in Azure AD Application gallery - November 2021 + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** 3rd Party Integration + +In November 2021, we have added following 32 new applications in our App gallery with Federation support: + +[Tide - Connector](https://gallery.ctinsuretech-tide.com/), [Virtual Risk Manager - USA](../saas-apps/virtual-risk-manager-usa-tutorial.md), [Xorlia Policy Management](https://app.xoralia.com/), [WorkPatterns](https://app.workpatterns.com/oauth2/login?data_source_type=office_365_account_calendar_workspace_sync&utm_source=azure_sso), [GHAE](../saas-apps/ghae-tutorial.md), [Nodetrax Project](../saas-apps/nodetrax-project-tutorial.md), [Touchstone Benchmarking](https://app.touchstonebenchmarking.com/), [SURFsecureID - Azure MFA](../saas-apps/surfsecureid-azure-mfa-tutorial.md), [AiDEA](https://truebluecorp.com/en/prodotti/aidea-en/),[R and D Tax Credit Services: 10-wk Implementation](../saas-apps/r-and-d-tax-credit-services-tutorial.md), [Mapiq Essentials](../saas-apps/mapiq-essentials-tutorial.md), [Celtra Authentication Service](https://auth.celtra.com/login), [Compete HR](https://app.competewith.com/auth/login), [Snackmagic](../saas-apps/snackmagic-tutorial.md), [FileOrbis](../saas-apps/fileorbis-tutorial.md), [ClarivateWOS](../saas-apps/clarivatewos-tutorial.md), [RewardCo Engagement Cloud](https://cloud.live.rewardco.com/oauth/login), [ZoneVu](https://zonevu.ubiterra.com/onboarding/index), [V-Client](../saas-apps/v-client-tutorial.md), [Netpresenter Next](https://www.netpresenter.com/), [UserTesting](../saas-apps/usertesting-tutorial.md), [InfinityQS ProFicient on Demand](../saas-apps/infinityqs-proficient-on-demand-tutorial.md), [Feedonomics](https://auth.feedonomics.com/), [Customer Voice](https://cx.pobuca.com/), [Zanders Inside](https://home.zandersinside.com/), [Connecter](https://teamwork.connecterapp.com/azure_login), [Paychex Flex](https://login.flex.paychex.com/azfed-app/v1/azure/federation/admin), [InsightSquared](https://us2.insightsquared.com/#/boards/office365.com/settings/userconnection), [Kiteline Health](https://my.kitelinehealth.com/), [Fabrikam Enterprise Managed User (OIDC)](https://github.com/login), [PROXESS for Office365](https://www.proxess.de/office365), [Coverity Static Application Security Testing](../saas-apps/coverity-static-application-security-testing-tutorial.md) + +You can also find the documentation of all the applications [here](../saas-apps/tutorial-list.md). + +For listing your application in the Azure AD app gallery, read the details [here](../manage-apps/v2-howto-app-gallery-listing.md). + +--- + +### Updated "switch organizations" user experience in My Account. + +**Type:** Changed feature +**Service category:** My Profile/Account +**Product capability:** End User Experiences + +Updated "switch organizations" user interface in My Account. This visually improves the UI and provides the end-user with clear instructions. Added a manage organizations link to blade per customer feedback. [Learn more](https://support.microsoft.com/account-billing/switch-organizations-in-your-work-or-school-account-portals-c54c32c9-2f62-4fad-8c23-2825ed49d146). + +--- + ## October 2021 ### Limits on the number of configured API permissions for an application registration will be enforced starting in October 2021 diff --git a/articles/active-directory/fundamentals/whats-new.md b/articles/active-directory/fundamentals/whats-new.md index 1e638a150c0aa..3c2e87fa9ea0f 100644 --- a/articles/active-directory/fundamentals/whats-new.md +++ b/articles/active-directory/fundamentals/whats-new.md @@ -31,6 +31,165 @@ Azure AD receives improvements on an ongoing basis. To stay up to date with the This page is updated monthly, so revisit it regularly. If you're looking for items older than six months, you can find them in [Archive for What's new in Azure Active Directory](whats-new-archive.md). +## May 2022 + +### General Availability: Tenant-based service outage notifications + +**Type:** Plan for change +**Service category:** Other +**Product capability:** Platform + + +Azure Service Health will soon support service outage notifications to Tenant Admins for Azure Active Directory issues in the near future. These outages will also appear on the Azure AD admin portal overview page with appropriate links to Azure Service Health. Outage events will be able to be seen by built-in Tenant Administrator Roles. We will continue to send outage notifications to subscriptions within a tenant for a period of transition. More information will be available when this capability is released. The expected release is for June 2022. + +--- + + + +### New Federated Apps available in Azure AD Application gallery - May 2022 + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** 3rd Party Integration + + + +In May 2022 we've added the following 25 new applications in our App gallery with Federation support: + +[UserZoom](../saas-apps/userzoom-tutorial.md), [AMX Mobile](https://www.amxsolutions.co.uk/), [i-Sight](../saas-apps/isight-tutorial.md), [Method InSight](https://digital.methodrecycling.com/), [Chronus SAML](../saas-apps/chronus-saml-tutorial.md), [Attendant Console for Microsoft Teams](https://attendant.anywhere365.io/), [Skopenow](../saas-apps/skopenow-tutorial.md), [Fidelity PlanViewer](../saas-apps/fidelity-planviewer-tutorial.md), [Lyve Cloud](../saas-apps/lyve-cloud-tutorial.md), [Framer](../saas-apps/framer-tutorial.md), [Authomize](../saas-apps/authomize-tutorial.md), [gamba!](../saas-apps/gamba-tutorial.md), [Datto File Protection Single Sign On](../saas-apps/datto-file-protection-tutorial.md), [LONEALERT](https://portal.lonealert.co.uk/auth/azure/saml/signin), [Payfactors](https://pf.payfactors.com/client/auth/login), [deBroome Brand Portal](../saas-apps/debroome-brand-portal-tutorial.md), [TeamSlide](../saas-apps/teamslide-tutorial.md), [Sensera Systems](https://sitecloud.senserasystems.com/), [YEAP](https://prismaonline.propay.be/logon/login.aspx), [Monaca Education](https://monaca.education/ja/signup), [Personify Inc](https://personifyinc.com/login), [Phenom TXM](../saas-apps/phenom-txm-tutorial.md), [Forcepoint Cloud Security Gateway - User Authentication](../saas-apps/forcepoint-cloud-security-gateway-tutorial.md), [GoalQuest](../saas-apps/goalquest-tutorial.md), [OpenForms](https://login.openforms.com/Login). + +You can also find the documentation of all the applications from here https://aka.ms/AppsTutorial, + +For listing your application in the Azure AD app gallery, please read the details here https://aka.ms/AzureADAppRequest + + + + + +--- + + +### General Availability – My Apps users can make apps from URLs (add sites) + +**Type:** New feature +**Service category:** My Apps +**Product capability:** End User Experiences + + +When editing a collection using the My Apps portal, users can now add their own sites, in addition to adding apps that have been assigned to them by an admin. To add a site, users must provide a name and URL. For more information on how to use this feature, see: [Customize app collections in the My Apps portal](https://support.microsoft.com/account-billing/customize-app-collections-in-the-my-apps-portal-2dae6b8a-d8b0-4a16-9a5d-71ed4d6a6c1d). + + +--- + + +### Public preview - New provisioning connectors in the Azure AD Application Gallery - May 2022 + +**Type:** New feature +**Service category:** App Provisioning +**Product capability:** 3rd Party Integration + + +You can now automate creating, updating, and deleting user accounts for these newly integrated apps: + +- [Alinto Protect](../saas-apps/alinto-protect-provisioning-tutorial.md) +- [Blinq](../saas-apps/blinq-provisioning-tutorial.md) +- [Cerby](../saas-apps/cerby-provisioning-tutorial.md) + +For more information about how to better secure your organization by using automated user account provisioning, see: [Automate user provisioning to SaaS applications with Azure AD](../app-provisioning/user-provisioning.md). + + +--- + + +### Public Preview: Confirm safe and compromised in signIns API beta + +**Type:** New feature +**Service category:** Identity Protection +**Product capability:** Identity Security & Protection + + +The signIns Microsoft Graph API now supports confirming safe and compromised on risky sign-ins. This public preview functionality is available at the beta endpoint. For more information, please check out the Microsoft Graph documentation: [signIn: confirmSafe - Microsoft Graph beta | Microsoft Docs](/graph/api/signin-confirmsafe?view=graph-rest-beta&preserve-view=true) + + +--- + + +### Public Preview of Microsoft cloud settings for Azure AD B2B + +**Type:** New feature +**Service category:** B2B +**Product capability:** B2B/B2C +**Clouds impacted:** China;Public (M365,GCC);US Gov (GCC-H, DoD) + + +Microsoft cloud settings let you collaborate with organizations from different Microsoft Azure clouds. With Microsoft cloud settings, you can establish mutual B2B collaboration between the following clouds: + +-Microsoft Azure global cloud and Microsoft Azure Government +-Microsoft Azure global cloud and Microsoft Azure China 21Vianet + +To learn more about Microsoft cloud settings for B2B collaboration, see: [Cross-tenant access overview - Azure AD | Microsoft Docs](../external-identities/cross-tenant-access-overview.md#microsoft-cloud-settings). + + +--- + + +### General Availability of SAML and WS-Fed federation in External Identities + +**Type:** Changed feature +**Service category:** B2B +**Product capability:** B2B/B2C +**Clouds impacted:** Public (M365,GCC);US Gov (GCC-H, DoD) + + +When setting up federation with a partner's IdP, new guest users from that domain can use their own IdP-managed organizational account to sign in to your Azure AD tenant and start collaborating with you. There's no need for the guest user to create a separate Azure AD account. To learn more about federating with SAML or WS-Fed identity providers in External Identities, see: [Federation with a SAML/WS-Fed identity provider (IdP) for B2B - Azure AD | Microsoft Docs](../external-identities/direct-federation.md). + + +--- + + +### Public Preview - Create Group in Administrative Unit + +**Type:** Changed feature +**Service category:** Directory Management +**Product capability:** Access Control +**Clouds impacted:** China;Public (M365,GCC);US Gov (GCC-H, DoD) + + +Groups Administrators assigned over the scope of an administrative unit can now create groups within the administrative unit. This enables scoped group administrators to create groups that they can manage directly, without needing to elevate to Global Administrator or Privileged Role Administrator. For more information, see: [Administrative units in Azure Active Directory](../roles/administrative-units.md). + + +--- + + +### Public Preview - Dynamic administrative unit support for onPremisesDistinguishedName property + +**Type:** Changed feature +**Service category:** Directory Management +**Product capability:** AuthZ/Access Delegation +**Clouds impacted:** Public (M365,GCC) + + +The public preview of dynamic administrative units now supports the **onPremisesDistinguishedName** property for users. This makes it possible to create dynamic rules which incorporate the organizational unit of the user from on-premises AD. For more information, see: [Manage users or devices for an administrative unit with dynamic membership rules (Preview)](../roles/admin-units-members-dynamic.md). + + +--- + + +### General Availability - Improvements to Azure AD Smart Lockout + +**Type:** Changed feature +**Service category:** Other +**Product capability:** User Management +**Clouds impacted:** China;Public (M365,GCC);US Gov (GCC-H, DoD);US Nat;US Sec + + +Smart Lockout now synchronizes the lockout state across Azure AD data centers, so the total number of failed sign-in attempts allowed before an account is locked out will match the configured lockout threshold. For more information, see: [Protect user accounts from attacks with Azure Active Directory smart lockout](../authentication/howto-password-smart-lockout.md). + + +--- + + + ## April 2022 ### General Availability - Microsoft Defender for Endpoint Signal in Identity Protection @@ -71,15 +230,6 @@ With a recent improvement, Smart Lockout now synchronizes the lockout state acro --- -### Public Preview - Enabling customization capabilities for the Self-Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icons in Company Branding. - -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** User Authentication - -Updating the Company Branding functionality on the Azure AD/Microsoft 365 sign-in experience to allow customizing Self Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icon. For more information, see: [Add branding to your organization’s Azure Active Directory sign-in page](customize-branding.md). - ---- ### Public Preview - Integration of Microsoft 365 App Certification details into AAD UX and Consent Experiences @@ -92,15 +242,6 @@ Microsoft 365 Certification status for an app is now available in Azure AD conse --- -### Public Preview - Organizations can replace all references to Microsoft on the AAD auth experience - -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** User Authentication - -Updating the Company Branding functionality on the Azure AD/Microsoft 365 sign-in experience to allow customizing Self Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icon. For more information, see: [Add branding to your organization’s Azure Active Directory sign-in page](customize-branding.md). - ---- ### Public preview - Use Azure AD access reviews to review access of B2B direct connect users in Teams shared channels @@ -165,7 +306,7 @@ We highly recommend enabling this new protection when using Azure AD Multi-Facto **Service category:** Enterprise Apps **Product capability:** Third Party Integration -In April 2022 we added the following 24 new applications in our App gallery with Federation support +In April 2022 we added the following 24 new applications in our App gallery with Federation support: [X-1FBO](https://www.x1fbo.com/), [select Armor](https://app.clickarmor.ca/), [Smint.io Portals for SharePoint](https://www.smint.io/portals-for-sharepoint/), [Pluto](../saas-apps/pluto-tutorial.md), [ADEM](../saas-apps/adem-tutorial.md), [Smart360](../saas-apps/smart360-tutorial.md), [MessageWatcher SSO](https://messagewatcher.com/), [Beatrust](../saas-apps/beatrust-tutorial.md), [AeyeScan](https://aeyescan.com/azure_sso), [ABa Customer](https://abacustomer.com/), [Twilio Sendgrid](../saas-apps/twilio-sendgrid-tutorial.md), [Vault Platform](../saas-apps/vault-platform-tutorial.md), [Speexx](../saas-apps/speexx-tutorial.md), [Clicksign](https://app.clicksign.com/signin), [Per Angusta](../saas-apps/per-angusta-tutorial.md), [EruditAI](https://dashboard.erudit.ai/login), [MetaMoJi ClassRoom](https://business.metamoji.com/), [Numici](https://app.numici.com/), [MCB.CLOUD](https://identity.mcb.cloud/Identity/Account/Manage), [DepositLink](https://depositlink.com/external-login), [Last9](https://auth.last9.io/auth), [ParkHere Corporate](../saas-apps/parkhere-corporate-tutorial.md), [Keepabl](../saas-apps/keepabl-tutorial.md), [Swit](../saas-apps/swit-tutorial.md) You can also find the documentation of all the applications from here https://aka.ms/AppsTutorial. @@ -641,201 +782,6 @@ We’re no longer publishing sign-in logs with the following error codes because --- -## November 2021 - -### Tenant enablement of combined security information registration for Azure Active Directory - -**Type:** Plan for change -**Service category:** MFA -**Product capability:** Identity Security & Protection - -We previously announced in April 2020, a new combined registration experience enabling users to register authentication methods for SSPR and multi-factor authentication at the same time was generally available for existing customer to opt in. Any Azure AD tenants created after August 2020 automatically have the default experience set to combined registration. Starting 2022, Microsoft will be enabling the MFA/SSPR combined registration experience for existing customers. [Learn more](../authentication/concept-registration-mfa-sspr-combined.md). - ---- - -### Windows users will see prompts more often when switching user accounts - -**Type:** Fixed -**Service category:** Authentications (Logins) -**Product capability:** User Authentication - -A problematic interaction between Windows and a local Active Directory Federation Services (ADFS) instance can result in users attempting to sign into another account, but be silently signed into their existing account instead, with no warning. For federated IdPs such as ADFS, that support the [prompt=login](/windows-server/identity/ad-fs/operations/ad-fs-prompt-login) pattern, Azure AD will now trigger a fresh login at ADFS when a user is directed to ADFS with a login hint. This ensures that the user is signed into the account they requested, rather than being silently signed into the account they're already signed in with. - -For more information, see the [change notice](../develop/reference-breaking-changes.md). - ---- - -### Public preview - Conditional Access Overview Dashboard - -**Type:** New feature -**Service category:** Conditional Access -**Product capability:** Monitoring & Reporting - -The new Conditional Access overview dashboard enables all tenants to see insights about the impact of their Conditional Access policies without requiring an Azure Monitor subscription. This built-in dashboard provides tutorials to deploy policies, a summary of the policies in your tenant, a snapshot of your policy coverage, and security recommendations. [Learn more](../conditional-access/overview.md). - ---- - -### Public preview - SSPR writeback is now available for disconnected forests using Azure AD Connect cloud sync - -**Type:** New feature -**Service category:** Azure AD Connect Cloud Sync -**Product capability:** Identity Lifecycle Management - -The Public Preview feature for Azure AD Connect Cloud Sync Password writeback provides customers the capability to writeback a user’s password changes in the cloud to the on-premises directory in real time using the lightweight Azure AD cloud provisioning agent.[Learn more](../authentication/tutorial-enable-cloud-sync-sspr-writeback.md). - ---- - -### Public preview - Conditional Access for workload identities - -**Type:** New feature -**Service category:** Conditional Access for workload identities -**Product capability:** Identity Security & Protection - -Previously, Conditional Access policies applied only to users when they access apps and services like SharePoint online or the Azure portal. This preview adds support for Conditional Access policies applied to service principals owned by the organization. You can block service principals from accessing resources from outside trusted-named locations or Azure Virtual Networks. [Learn more](../conditional-access/workload-identity.md). - ---- - -### Public preview - Extra attributes available as claims - -**Type:** Changed feature -**Service category:** Enterprise Apps -**Product capability:** SSO - -Several user attributes have been added to the list of attributes available to map to claims to bring attributes available in claims more in line with what is available on the user object in Microsoft Graph. New attributes include mobilePhone and ProxyAddresses. [Learn more](../develop/reference-claims-mapping-policy-type.md#table-3-valid-id-values-per-source). - ---- - -### Public preview - "Session Lifetime Policies Applied" property in the sign-in logs - -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** Identity Security & Protection - -We have recently added other property to the sign-in logs called "Session Lifetime Policies Applied". This property will list all the session lifetime policies that applied to the sign-in for example, Sign-in frequency, Remember multi-factor authentication and Configurable token lifetime. [Learn more](../reports-monitoring/concept-sign-ins.md#authentication-details). - ---- - -### Public preview - Enriched reviews on access packages in entitlement management - -**Type:** New feature -**Service category:** User Access Management -**Product capability:** Entitlement Management - -Entitlement Management’s enriched review experience allows even more flexibility on access packages reviews. Admins can now choose what happens to access if the reviewers don't respond, provide helper information to reviewers, or decide whether a justification is necessary. [Learn more](../governance/entitlement-management-access-reviews-create.md). - ---- - -### General availability - randomString and redact provisioning functions - -**Type:** New feature -**Service category:** Provisioning -**Product capability:** Outbound to SaaS Applications - - -The Azure AD Provisioning service now supports two new functions, randomString() and Redact(): -- randomString - generate a string based on the length and characters you would like to include or exclude in your string. -- redact - remove the value of the attribute from the audit and provisioning logs. [Learn more](../app-provisioning/functions-for-customizing-application-data.md#randomstring). - ---- - -### General availability - Now access review creators can select users and groups to receive notification on completion of reviews - -**Type:** New feature -**Service category:** Access Reviews -**Product capability:** Identity Governance - -Now access review creators can select users and groups to receive notification on completion of reviews. [Learn more](../governance/create-access-review.md). - ---- - -### General availability - Azure AD users can now view and report suspicious sign-ins and manage their accounts within Microsoft Authenticator - -**Type:** New feature -**Service category:** Microsoft Authenticator App -**Product capability:** Identity Security & Protection - -This feature allows Azure AD users to manage their work or school accounts within the Microsoft Authenticator app. The management features will allow users to view sign-in history and sign-in activity. Users can also report any suspicious or unfamiliar activity, change their Azure AD account passwords, and update the account's security information. - -For more information on how to use this feature visit [View and search your recent sign-in activity from the My Sign-ins page](../user-help/my-account-portal-sign-ins-page.md). - ---- - -### General availability - New Microsoft Authenticator app icon - -**Type:** New feature -**Service category:** Microsoft Authenticator App -**Product capability:** Identity Security & Protection - -New updates have been made to the Microsoft Authenticator app icon. To learn more about these updates, see the [Microsoft Authenticator app](https://techcommunity.microsoft.com/t5/azure-active-directory-identity/microsoft-authenticator-app-easier-ways-to-add-or-manage/ba-p/2464408) blog post. - ---- - -### General availability - Azure AD single Sign-on and device-based Conditional Access support in Firefox on Windows 10/11 -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** SSO - -We now support native single sign-on (SSO) support and device-based Conditional Access to Firefox browser on Windows 10 and Windows Server 2019 starting in Firefox version 91. [Learn more](../conditional-access/require-managed-devices.md#prerequisites). - ---- - -### New provisioning connectors in the Azure AD Application Gallery - November 2021 - -**Type:** New feature -**Service category:** App Provisioning -**Product capability:** 3rd Party Integration - -You can now automate creating, updating, and deleting user accounts for these newly integrated apps: - -- [Appaegis Isolation Access Cloud](../saas-apps/appaegis-isolation-access-cloud-provisioning-tutorial.md) -- [BenQ IAM](../saas-apps/benq-iam-provisioning-tutorial.md) -- [BIC Cloud Design](../saas-apps/bic-cloud-design-provisioning-tutorial.md) -- [Chaos](../saas-apps/chaos-provisioning-tutorial.md) -- [directprint.io](../saas-apps/directprint-io-provisioning-tutorial.md) -- [Documo](../saas-apps/documo-provisioning-tutorial.md) -- [Facebook Work Accounts](../saas-apps/facebook-work-accounts-provisioning-tutorial.md) -- [introDus Pre and Onboarding Platform](../saas-apps/introdus-pre-and-onboarding-platform-provisioning-tutorial.md) -- [Kisi Physical Security](../saas-apps/kisi-physical-security-provisioning-tutorial.md) -- [Klaxoon](../saas-apps/klaxoon-provisioning-tutorial.md) -- [Klaxoon SAML](../saas-apps/klaxoon-saml-provisioning-tutorial.md) -- [MX3 Diagnostics](../saas-apps/mx3-diagnostics-connector-provisioning-tutorial.md) -- [Netpresenter](../saas-apps/netpresenter-provisioning-tutorial.md) -- [Peripass](../saas-apps/peripass-provisioning-tutorial.md) -- [Real Links](../saas-apps/real-links-provisioning-tutorial.md) -- [Sentry](../saas-apps/sentry-provisioning-tutorial.md) -- [Teamgo](../saas-apps/teamgo-provisioning-tutorial.md) -- [Zero](../saas-apps/zero-provisioning-tutorial.md) - -For more information about how to better secure your organization by using automated user account provisioning, see [Automate user provisioning to SaaS applications with Azure AD](../manage-apps/user-provisioning.md). - ---- - -### New Federated Apps available in Azure AD Application gallery - November 2021 - -**Type:** New feature -**Service category:** Enterprise Apps -**Product capability:** 3rd Party Integration - -In November 2021, we have added following 32 new applications in our App gallery with Federation support: - -[Tide - Connector](https://gallery.ctinsuretech-tide.com/), [Virtual Risk Manager - USA](../saas-apps/virtual-risk-manager-usa-tutorial.md), [Xorlia Policy Management](https://app.xoralia.com/), [WorkPatterns](https://app.workpatterns.com/oauth2/login?data_source_type=office_365_account_calendar_workspace_sync&utm_source=azure_sso), [GHAE](../saas-apps/ghae-tutorial.md), [Nodetrax Project](../saas-apps/nodetrax-project-tutorial.md), [Touchstone Benchmarking](https://app.touchstonebenchmarking.com/), [SURFsecureID - Azure MFA](../saas-apps/surfsecureid-azure-mfa-tutorial.md), [AiDEA](https://truebluecorp.com/en/prodotti/aidea-en/),[R and D Tax Credit Services: 10-wk Implementation](../saas-apps/r-and-d-tax-credit-services-tutorial.md), [Mapiq Essentials](../saas-apps/mapiq-essentials-tutorial.md), [Celtra Authentication Service](https://auth.celtra.com/login), [Compete HR](https://app.competewith.com/auth/login), [Snackmagic](../saas-apps/snackmagic-tutorial.md), [FileOrbis](../saas-apps/fileorbis-tutorial.md), [ClarivateWOS](../saas-apps/clarivatewos-tutorial.md), [RewardCo Engagement Cloud](https://cloud.live.rewardco.com/oauth/login), [ZoneVu](https://zonevu.ubiterra.com/onboarding/index), [V-Client](../saas-apps/v-client-tutorial.md), [Netpresenter Next](https://www.netpresenter.com/), [UserTesting](../saas-apps/usertesting-tutorial.md), [InfinityQS ProFicient on Demand](../saas-apps/infinityqs-proficient-on-demand-tutorial.md), [Feedonomics](https://auth.feedonomics.com/), [Customer Voice](https://cx.pobuca.com/), [Zanders Inside](https://home.zandersinside.com/), [Connecter](https://teamwork.connecterapp.com/azure_login), [Paychex Flex](https://login.flex.paychex.com/azfed-app/v1/azure/federation/admin), [InsightSquared](https://us2.insightsquared.com/#/boards/office365.com/settings/userconnection), [Kiteline Health](https://my.kitelinehealth.com/), [Fabrikam Enterprise Managed User (OIDC)](https://github.com/login), [PROXESS for Office365](https://www.proxess.de/office365), [Coverity Static Application Security Testing](../saas-apps/coverity-static-application-security-testing-tutorial.md) - -You can also find the documentation of all the applications [here](../saas-apps/tutorial-list.md). - -For listing your application in the Azure AD app gallery, read the details [here](../manage-apps/v2-howto-app-gallery-listing.md). - ---- - -### Updated "switch organizations" user experience in My Account. - -**Type:** Changed feature -**Service category:** My Profile/Account -**Product capability:** End User Experiences - -Updated "switch organizations" user interface in My Account. This visually improves the UI and provides the end-user with clear instructions. Added a manage organizations link to blade per customer feedback. [Learn more](https://support.microsoft.com/account-billing/switch-organizations-in-your-work-or-school-account-portals-c54c32c9-2f62-4fad-8c23-2825ed49d146). - ---- - diff --git a/articles/active-directory/governance/TOC.yml b/articles/active-directory/governance/TOC.yml index 0368b32e389ea..3780581177ef9 100644 --- a/articles/active-directory/governance/TOC.yml +++ b/articles/active-directory/governance/TOC.yml @@ -29,7 +29,7 @@ - name: Manage access to resources - Microsoft Graph href: /graph/tutorial-access-package-api?toc=/azure/active-directory/governance/toc.json&bc=/azure/active-directory/governance/breadcrumb/toc.json - name: Manage access to resources - PowerShell - href: /powershell/microsoftgraph/tutorial-entitlement-management?view=graph-powershell-beta + href: /powershell/microsoftgraph/tutorial-entitlement-management - name: Review access to Microsoft 365 groups - Microsoft Graph href: /graph/tutorial-accessreviews-m365group - name: Review access to security groups - Microsoft Graph diff --git a/articles/active-directory/governance/access-reviews-application-preparation.md b/articles/active-directory/governance/access-reviews-application-preparation.md index 651d83df6a318..b5084b545b6fc 100644 --- a/articles/active-directory/governance/access-reviews-application-preparation.md +++ b/articles/active-directory/governance/access-reviews-application-preparation.md @@ -70,7 +70,7 @@ The integration patterns listed above are applicable to third party SaaS applica Now that you have identified the integration pattern for the application, check the application as represented in Azure AD is ready for review. 1. In the Azure portal, click **Azure Active Directory**, click **Enterprise Applications**, and check whether your application is on the [list of enterprise applications](../manage-apps/view-applications-portal.md) in your Azure AD tenant. -1. If the application is not already listed, then check if the application is available the [application gallery](../manage-apps/overview-application-gallery.md) for applications that can be integrated for federated SSO or provisioning. If it is in the gallery, then use the [tutorials](../saas-apps/tutorial-list.md) to configure the application for federation, and if it supports provisioning, also [configure the application](/app-provisioning/configure-automatic-user-provisioning-portal.md) for provisioning. +1. If the application is not already listed, then check if the application is available the [application gallery](../manage-apps/overview-application-gallery.md) for applications that can be integrated for federated SSO or provisioning. If it is in the gallery, then use the [tutorials](../saas-apps/tutorial-list.md) to configure the application for federation, and if it supports provisioning, also [configure the application](/azure/active-directory/app-provisioning/configure-automatic-user-provisioning-portal) for provisioning. 1. One the application is in the list of enterprise applications in your tenant, select the application from the list. 1. Change to the **Properties** tab. Verify that the **User assignment required?** option is set to **Yes**. If it's set to **No**, all users in your directory, including external identities, can access the application, and you can't review access to the application. diff --git a/articles/active-directory/governance/conditional-access-exclusion.md b/articles/active-directory/governance/conditional-access-exclusion.md index 8c5db291d2a43..6ab922fcda78e 100644 --- a/articles/active-directory/governance/conditional-access-exclusion.md +++ b/articles/active-directory/governance/conditional-access-exclusion.md @@ -1,5 +1,5 @@ --- -title: Manage users excluded from Conditional Access policies - Azure AD +title: Manage users excluded from Conditional Access policies description: Learn how to use Azure Active Directory (Azure AD) access reviews to manage users that have been excluded from Conditional Access policies services: active-directory documentationcenter: '' diff --git a/articles/active-directory/governance/entitlement-management-external-users.md b/articles/active-directory/governance/entitlement-management-external-users.md index 78356642d0198..ab497499a4a53 100644 --- a/articles/active-directory/governance/entitlement-management-external-users.md +++ b/articles/active-directory/governance/entitlement-management-external-users.md @@ -86,6 +86,9 @@ To ensure people outside of your organization can request access packages and ge - For more information about Azure AD B2B external collaboration settings, see [Configure external collaboration settings](../external-identities/external-collaboration-settings-configure.md). ![Azure AD external collaboration settings](./media/entitlement-management-external-users/collaboration-settings.png) + + > [!NOTE] + > If you create a connected organization for an Azure AD tenant from a different Microsoft cloud, you also need to configure cross-tenant access settings appropriately. For more information on how to configure these settings, see [Configure cross-tenant access settings](../external-identities/cross-cloud-settings.md). ### Review your Conditional Access policies diff --git a/articles/active-directory/governance/entitlement-management-logs-and-reporting.md b/articles/active-directory/governance/entitlement-management-logs-and-reporting.md index 13308b2917a90..9a50a5b979a82 100644 --- a/articles/active-directory/governance/entitlement-management-logs-and-reporting.md +++ b/articles/active-directory/governance/entitlement-management-logs-and-reporting.md @@ -55,7 +55,7 @@ Archiving Azure AD audit logs requires you to have Azure Monitor in an Azure sub ## View events for an access package -To view events for an access package, you must have access to the underlying Azure monitor workspace (see [Manage access to log data and workspaces in Azure Monitor](../../azure-monitor/logs/manage-access.md#manage-access-using-azure-permissions) for information) and in one of the following roles: +To view events for an access package, you must have access to the underlying Azure monitor workspace (see [Manage access to log data and workspaces in Azure Monitor](../../azure-monitor/logs/manage-access.md#azure-rbac) for information) and in one of the following roles: - Global administrator - Security administrator diff --git a/articles/active-directory/governance/entitlement-management-organization.md b/articles/active-directory/governance/entitlement-management-organization.md index 0e9cb8d024d06..10811d75b2d51 100644 --- a/articles/active-directory/governance/entitlement-management-organization.md +++ b/articles/active-directory/governance/entitlement-management-organization.md @@ -31,7 +31,7 @@ A connected organization is another organization that you have a relationship wi There are three ways that entitlement management lets you specify the users that form a connected organization. It could be -* users in another Azure AD directory, +* users in another Azure AD directory (from any Microsoft cloud), * users in another non-Azure AD directory that has been configured for direct federation, or * users in another non-Azure AD directory, whose email addresses all have the same domain name in common. diff --git a/articles/active-directory/governance/identity-governance-overview.md b/articles/active-directory/governance/identity-governance-overview.md index 5708471637212..c3b8c7bd679fc 100644 --- a/articles/active-directory/governance/identity-governance-overview.md +++ b/articles/active-directory/governance/identity-governance-overview.md @@ -21,7 +21,7 @@ ms.collection: M365-identity-device-management Azure Active Directory (Azure AD) Identity Governance allows you to balance your organization's need for security and employee productivity with the right processes and visibility. It provides you with capabilities to ensure that the right people have the right access to the right resources. These and related Azure AD and Enterprise Mobility + Security features allows you to mitigate access risk by protecting, monitoring, and auditing access to critical assets -- while ensuring employee and business partner productivity. -Identity Governance give organizations the ability to do the following tasks across employees, business partners and vendors, and across services and applications both on-premises and in clouds: +Identity Governance gives organizations the ability to do the following tasks across employees, business partners and vendors, and across services and applications both on-premises and in clouds: - Govern the identity lifecycle - Govern access lifecycle @@ -115,4 +115,4 @@ It's a best practice to use the least privileged role to perform administrative - [What is Azure AD entitlement management?](entitlement-management-overview.md) - [What are Azure AD access reviews?](access-reviews-overview.md) - [What is Azure AD Privileged Identity Management?](../privileged-identity-management/pim-configure.md) -- [What can I do with Terms of use?](../conditional-access/terms-of-use.md) \ No newline at end of file +- [What can I do with Terms of use?](../conditional-access/terms-of-use.md) diff --git a/articles/active-directory/hybrid/choose-ad-authn.md b/articles/active-directory/hybrid/choose-ad-authn.md index f76258082f89b..13aac1de69bb2 100644 --- a/articles/active-directory/hybrid/choose-ad-authn.md +++ b/articles/active-directory/hybrid/choose-ad-authn.md @@ -1,6 +1,5 @@ --- title: Authentication for Azure AD hybrid identity solutions -titleSuffix: Active Directory description: This guide helps CEOs, CIOs, CISOs, Chief Identity Architects, Enterprise Architects, and Security and IT decision makers responsible for choosing an authentication method for their Azure AD hybrid identity solution in medium to large organizations. keywords: author: billmath diff --git a/articles/active-directory/hybrid/how-to-connect-fed-group-claims.md b/articles/active-directory/hybrid/how-to-connect-fed-group-claims.md index 3b108091a20d8..698c10526e701 100644 --- a/articles/active-directory/hybrid/how-to-connect-fed-group-claims.md +++ b/articles/active-directory/hybrid/how-to-connect-fed-group-claims.md @@ -252,12 +252,12 @@ Emit group names to be returned in `NetbiosDomain\sAMAccountName` format as the "optionalClaims": { "saml2Token": [{ "name": "groups", - "additionalProperties": ["netbios_name_and_sam_account_name", "emit_as_roles"] + "additionalProperties": ["netbios_domain_and_sam_account_name", "emit_as_roles"] }], "idToken": [{ "name": "groups", - "additionalProperties": ["netbios_name_and_sam_account_name", "emit_as_roles"] + "additionalProperties": ["netbios_domain_and_sam_account_name", "emit_as_roles"] }] } ``` @@ -266,4 +266,4 @@ Emit group names to be returned in `NetbiosDomain\sAMAccountName` format as the - [Add authorization using groups & group claims to an ASP.NET Core web app (code sample)](https://github.com/Azure-Samples/active-directory-aspnetcore-webapp-openidconnect-v2/blob/master/5-WebApp-AuthZ/5-2-Groups/README.md) - [Assign a user or group to an enterprise app](../../active-directory/manage-apps/assign-user-or-group-access-portal.md) -- [Configure role claims](../../active-directory/develop/active-directory-enterprise-app-role-management.md) \ No newline at end of file +- [Configure role claims](../../active-directory/develop/active-directory-enterprise-app-role-management.md) diff --git a/articles/active-directory/hybrid/how-to-connect-fed-sha256-guidance.md b/articles/active-directory/hybrid/how-to-connect-fed-sha256-guidance.md index c1270219d9567..9bec7028ff368 100644 --- a/articles/active-directory/hybrid/how-to-connect-fed-sha256-guidance.md +++ b/articles/active-directory/hybrid/how-to-connect-fed-sha256-guidance.md @@ -1,5 +1,5 @@ --- -title: Change signature hash algorithm for Microsoft 365 relying party trust - Azure +title: Change signature hash algorithm for Microsoft 365 relying party trust description: This page provides guidelines for changing SHA algorithm for federation trust with Microsoft 365. keywords: SHA1,SHA256,M365,federation,aadconnect,adfs,ad fs,change sha,federation trust,relying party trust services: active-directory diff --git a/articles/active-directory/hybrid/how-to-connect-fed-single-adfs-multitenant-federation.md b/articles/active-directory/hybrid/how-to-connect-fed-single-adfs-multitenant-federation.md index 9705b6fb53458..0d7d5a8136191 100644 --- a/articles/active-directory/hybrid/how-to-connect-fed-single-adfs-multitenant-federation.md +++ b/articles/active-directory/hybrid/how-to-connect-fed-single-adfs-multitenant-federation.md @@ -1,5 +1,5 @@ --- -title: Federating multiple Azure AD with single AD FS - Azure +title: Federating multiple Azure AD with single AD FS description: In this document, you will learn how to federate multiple Azure AD with a single AD FS. keywords: federate, ADFS, AD FS, multiple tenants, single AD FS, one ADFS, multi-tenant federation, multi-forest adfs, aad connect, federation, cross-tenant federation services: active-directory diff --git a/articles/active-directory/hybrid/reference-connect-sync-attributes-synchronized.md b/articles/active-directory/hybrid/reference-connect-sync-attributes-synchronized.md index 0e5239587075e..4d30651f81456 100644 --- a/articles/active-directory/hybrid/reference-connect-sync-attributes-synchronized.md +++ b/articles/active-directory/hybrid/reference-connect-sync-attributes-synchronized.md @@ -291,7 +291,7 @@ In this case, start with the list of attributes in this topic and identify those | st |X |X | | | | streetAddress |X |X | | | | telephoneNumber |X |X | | | -| thumbnailphoto |X |X | |synced only once from Azure AD to Exchange Online after which Exchange Online becomes source of authority for this attribute and any later changes can't be synced from on-premise. See ([KB](https://support.microsoft.com/help/3062745/user-photos-aren-t-synced-from-the-on-premises-environment-to-exchange)) for more.| +| thumbnailphoto |X |X | |synced only once from Azure AD to Exchange Online after which Exchange Online becomes source of authority for this attribute and any later changes can't be synced from on-premises. See ([KB](https://support.microsoft.com/help/3062745/user-photos-aren-t-synced-from-the-on-premises-environment-to-exchange)) for more.| | title |X |X | | | | usageLocation |X | | |mechanical property. The user’s country/region. Used for license assignment. | | userPrincipalName |X | | |UPN is the login ID for the user. Most often the same as [mail] value. | diff --git a/articles/active-directory/hybrid/tshoot-connect-connectivity.md b/articles/active-directory/hybrid/tshoot-connect-connectivity.md index 77583374a2a79..633ab0eb4e772 100644 --- a/articles/active-directory/hybrid/tshoot-connect-connectivity.md +++ b/articles/active-directory/hybrid/tshoot-connect-connectivity.md @@ -26,7 +26,7 @@ This article explains how connectivity between Azure AD Connect and Azure AD wor Azure AD Connect uses the MSAL library for authentication. The installation wizard and the sync engine proper require machine.config to be properly configured since these two are .NET applications. >[!NOTE] ->Azure AD Connect v1.6.xx.x uses the ADAL library. The ADAL library is being depricated and support will end in June 2022. Microsoft recommends that you upgrade to the latest version of [Azure AD Connect v2](whatis-azure-ad-connect-v2.md). +>Azure AD Connect v1.6.xx.x uses the ADAL library. The ADAL library is being deprecated and support will end in June 2022. Microsoft recommends that you upgrade to the latest version of [Azure AD Connect v2](whatis-azure-ad-connect-v2.md). In this article, we show how Fabrikam connects to Azure AD through its proxy. The proxy server is named fabrikamproxy and is using port 8080. diff --git a/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md b/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md index 685bdfcdeb863..452a2629c7e7c 100644 --- a/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md +++ b/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md @@ -67,6 +67,8 @@ If a password reset isn't an option for you, you can choose to dismiss user risk When you select **Dismiss user risk**, all events are closed and the affected user is no longer at risk. However, because this method doesn't have an impact on the existing password, it doesn't bring the related identity back into a safe state. +To **Dismiss user risk**, search for and select **Azure AD Risky users**, select the affected user, and select **Dismiss user(s) risk**. + ### Close individual risk detections manually You can close individual risk detections manually. By closing risk detections manually, you can lower the user risk level. Typically, risk detections are closed manually in response to a related investigation. For example, when talking to a user reveals that an active risk detection isn't required anymore. diff --git a/articles/active-directory/identity-protection/overview-identity-protection.md b/articles/active-directory/identity-protection/overview-identity-protection.md index bbfa4ae699945..fdee8be8e14d0 100644 --- a/articles/active-directory/identity-protection/overview-identity-protection.md +++ b/articles/active-directory/identity-protection/overview-identity-protection.md @@ -6,7 +6,7 @@ services: active-directory ms.service: active-directory ms.subservice: identity-protection ms.topic: overview -ms.date: 06/15/2021 +ms.date: 05/31/2022 ms.author: joflore author: MicrosoftGuyJFlo @@ -31,16 +31,12 @@ The signals generated by and fed to Identity Protection, can be further fed into ## Why is automation important? -In his [blog post in October of 2018](https://techcommunity.microsoft.com/t5/Azure-Active-Directory-Identity/Eight-essentials-for-hybrid-identity-3-Securing-your-identity/ba-p/275843) Alex Weinert, who leads Microsoft's Identity Security and Protection team, explains why automation is so important when dealing with the volume of events: +In the blog post *[Cyber Signals: Defending against cyber threats with the latest research, insights, and trends](https://www.microsoft.com/security/blog/2022/02/03/cyber-signals-defending-against-cyber-threats-with-the-latest-research-insights-and-trends/)* dated February 3, 2022 we shared a thread intelligence brief including the following statistics: -> Each day, our machine learning and heuristic systems provide risk scores for 18 billion login attempts for over 800 million distinct accounts, 300 million of which are discernibly done by adversaries (entities like: criminal actors, hackers). -> -> At Ignite last year, I spoke about the top 3 attacks on our identity systems. Here is the recent volume of these attacks -> -> - **Breach replay**: 4.6BN attacks detected in May 2018 -> - **Password spray**: 350k in April 2018 -> - **Phishing**: This is hard to quantify exactly, but we saw 23M risk events in March 2018, many of which are phish related +> * Analyzed ...24 trillion security signals combined with intelligence we track by monitoring more than 40 nation-state groups and over 140 threat groups... +> * ...From January 2021 through December 2021, we’ve blocked more than 25.6 billion Azure AD brute force authentication attacks... +This scale of signals and attacks requires some level of automation to be able to keep up. ## Risk detection and remediation Identity Protection identifies risks of many types, including: @@ -53,7 +49,7 @@ Identity Protection identifies risks of many types, including: - Password spray - and more... -More detail on these and other risks including how or when they are calculated can be found in the article, [What is risk](concept-identity-protection-risks.md). +More detail on these and other risks including how or when they're calculated can be found in the article, [What is risk](concept-identity-protection-risks.md). The risk signals can trigger remediation efforts such as requiring users to: perform Azure AD Multi-Factor Authentication, reset their password using self-service password reset, or blocking until an administrator takes action. @@ -69,9 +65,9 @@ More information can be found in the article, [How To: Investigate risk](howto-i ### Risk levels -Identity Protection categorizes risk into three tiers: low, medium, and high. +Identity Protection categorizes risk into tiers: low, medium, and high. -While Microsoft does not provide specific details about how risk is calculated, we will say that each level brings higher confidence that the user or sign-in is compromised. For example, something like one instance of unfamiliar sign-in properties for a user might not be as threatening as leaked credentials for another user. +While Microsoft doesn't provide specific details about how risk is calculated, we'll say that each level brings higher confidence that the user or sign-in is compromised. For example, something like one instance of unfamiliar sign-in properties for a user might not be as threatening as leaked credentials for another user. ## Exporting risk data @@ -79,7 +75,7 @@ Data from Identity Protection can be exported to other tools for archive and fur Information about integrating Identity Protection information with Microsoft Sentinel can be found in the article, [Connect data from Azure AD Identity Protection](../../sentinel/data-connectors-reference.md#azure-active-directory-identity-protection). -Additionally, organizations can choose to store data for longer periods by changing diagnostic settings in Azure AD to send RiskyUsers and UserRiskEvents data to a Log Analytics workspace, archive data to a storage account, stream data to an Event Hub, or send data to a partner solution. Detailed information about how to do so can be found in the article, [How To: Export risk data](howto-export-risk-data.md). +Additionally, organizations can choose to store data for longer periods by changing diagnostic settings in Azure AD to send RiskyUsers and UserRiskEvents data to a Log Analytics workspace, archive data to a storage account, stream data to Event Hubs, or send data to a partner solution. Detailed information about how to do so can be found in the article, [How To: Export risk data](howto-export-risk-data.md). ## Permissions @@ -92,7 +88,7 @@ Identity Protection requires users be a Security Reader, Security Operator, Secu | Security operator | View all Identity Protection reports and Overview blade

            Dismiss user risk, confirm safe sign-in, confirm compromise | Configure or change policies

            Reset password for a user

            Configure alerts | | Security reader | View all Identity Protection reports and Overview blade | Configure or change policies

            Reset password for a user

            Configure alerts

            Give feedback on detections | -Currently, the security operator role cannot access the Risky sign-ins report. +Currently, the security operator role can't access the Risky sign-ins report. Conditional Access administrators can also create policies that factor in sign-in risk as a condition. Find more information in the article [Conditional Access: Conditions](../conditional-access/concept-conditional-access-conditions.md#sign-in-risk). @@ -100,17 +96,17 @@ Conditional Access administrators can also create policies that factor in sign-i [!INCLUDE [Active Directory P2 license](../../../includes/active-directory-p2-license.md)] -| Capability | Details | Azure AD Free / Microsoft 365 Apps | Azure AD Premium P1|Azure AD Premium P2 | +| Capability | Details | Azure AD Free / Microsoft 365 Apps | Azure AD Premium P1 | Azure AD Premium P2 | | --- | --- | --- | --- | --- | -| Risk policies | User risk policy (via Identity Protection) | No | No |Yes | -| Risk policies | Sign-in risk policy (via Identity Protection or Conditional Access) | No | No |Yes | -| Security reports | Overview | No | No |Yes | -| Security reports | Risky users | Limited Information. Only users with medium and high risk are shown. No details drawer or risk history. | Limited Information. Only users with medium and high risk are shown. No details drawer or risk history. | Full access| -| Security reports | Risky sign-ins | Limited Information. No risk detail or risk level is shown. | Limited Information. No risk detail or risk level is shown. | Full access| -| Security reports | Risk detections | No | Limited Information. No details drawer.| Full access| -| Notifications | Users at risk detected alerts | No | No |Yes | -| Notifications | Weekly digest| No | No | Yes | -| | MFA registration policy | No | No | Yes | +| Risk policies | User risk policy (via Identity Protection) | No | No | Yes | +| Risk policies | Sign-in risk policy (via Identity Protection or Conditional Access) | No | No | Yes | +| Security reports | Overview | No | No | Yes | +| Security reports | Risky users | Limited Information. Only users with medium and high risk are shown. No details drawer or risk history. | Limited Information. Only users with medium and high risk are shown. No details drawer or risk history. | Full access| +| Security reports | Risky sign-ins | Limited Information. No risk detail or risk level is shown. | Limited Information. No risk detail or risk level is shown. | Full access | +| Security reports | Risk detections | No | Limited Information. No details drawer.| Full access | +| Notifications | Users at risk detected alerts | No | No | Yes | +| Notifications | Weekly digest | No | No | Yes | +| MFA registration policy | | No | No | Yes | More information on these rich reports can be found in the article, [How To: Investigate risk](howto-identity-protection-investigate-risk.md#navigating-the-reports). diff --git a/articles/active-directory/index.yml b/articles/active-directory/index.yml index 78c433108d6b0..60954e0e1b23c 100644 --- a/articles/active-directory/index.yml +++ b/articles/active-directory/index.yml @@ -1,19 +1,19 @@ ### YamlMime:Hub title: Azure Active Directory documentation -summary: Use Azure AD to manage user identities and control access to your apps, data, and resources. +summary: Use Microsoft Azure Active Directory (Azure AD), part of Microsoft Entra, to manage user identities and control access to your apps, data, and resources. brand: azure ## Used for color theming of icons and hero area metadata: title: Azure Active Directory documentation - description: Azure Active Directory (Azure AD) is Microsoft's multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection into a single solution. + description: Microsoft Azure Active Directory (Azure AD), part of Microsoft Entra, is Microsoft's multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection into a single solution. ms.service: active-directory ms.topic: hub-page ms.collection: M365-identity-device-management author: rolyon ms.author: rolyon manager: CelesteDG - ms.date: 01/25/2022 + ms.date: 06/01/2022 highlightedContent: # itemType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new diff --git a/articles/active-directory/manage-apps/access-panel-collections.md b/articles/active-directory/manage-apps/access-panel-collections.md index 64c4580d90b77..3a78a375c735f 100644 --- a/articles/active-directory/manage-apps/access-panel-collections.md +++ b/articles/active-directory/manage-apps/access-panel-collections.md @@ -1,6 +1,5 @@ --- title: Create collections for My Apps portals -titleSuffix: Azure AD description: Use My Apps collections to Customize My Apps pages for a simpler My Apps experience for your users. Organize applications into groups with separate tabs. services: active-directory author: lnalepa diff --git a/articles/active-directory/manage-apps/add-application-portal-assign-users.md b/articles/active-directory/manage-apps/add-application-portal-assign-users.md index 8830cbc6d10fd..2f0d1f65f27a8 100644 --- a/articles/active-directory/manage-apps/add-application-portal-assign-users.md +++ b/articles/active-directory/manage-apps/add-application-portal-assign-users.md @@ -1,6 +1,5 @@ --- title: 'Quickstart: Create and assign a user account' -titleSuffix: Azure AD description: Create a user account in your Azure Active Directory tenant and assign it to an application. services: active-directory author: omondiatieno @@ -67,4 +66,4 @@ If you are planning to complete the next quickstart, keep the application that y Learn how to set up single sign-on for an enterprise application. > [!div class="nextstepaction"] -> [Enable single sign-on](add-application-portal-setup-sso.md) +> [Enable single sign-on](what-is-single-sign-on.md) diff --git a/articles/active-directory/manage-apps/add-application-portal-configure.md b/articles/active-directory/manage-apps/add-application-portal-configure.md index f2cd611b8499a..206c000b73d8b 100644 --- a/articles/active-directory/manage-apps/add-application-portal-configure.md +++ b/articles/active-directory/manage-apps/add-application-portal-configure.md @@ -1,6 +1,5 @@ --- title: 'Configure enterprise application properties' -titleSuffix: Azure AD description: Configure the properties of an enterprise application in Azure Active Directory. services: active-directory author: omondiatieno diff --git a/articles/active-directory/manage-apps/add-application-portal-setup-oidc-sso.md b/articles/active-directory/manage-apps/add-application-portal-setup-oidc-sso.md index ecaa16ef71010..84c51a98dd08f 100644 --- a/articles/active-directory/manage-apps/add-application-portal-setup-oidc-sso.md +++ b/articles/active-directory/manage-apps/add-application-portal-setup-oidc-sso.md @@ -1,7 +1,6 @@ --- title: 'Add an OpenID Connect-based single sign-on application' description: Learn how to add OpenID Connect-based single sign-on application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: eringreenlee manager: CelesteDG diff --git a/articles/active-directory/manage-apps/add-application-portal-setup-sso.md b/articles/active-directory/manage-apps/add-application-portal-setup-sso.md index 03b94240cfb18..b3ec826af0bdd 100644 --- a/articles/active-directory/manage-apps/add-application-portal-setup-sso.md +++ b/articles/active-directory/manage-apps/add-application-portal-setup-sso.md @@ -1,16 +1,15 @@ --- title: 'Quickstart: Enable single sign-on for an enterprise application' -titleSuffix: Azure AD description: Enable single sign-on for an enterprise application in Azure Active Directory. services: active-directory -author: omondiatieno +author: davidmu1 manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.topic: quickstart ms.workload: identity ms.date: 09/21/2021 -ms.author: jomondi +ms.author: davidmu ms.reviewer: ergleenl ms.custom: contperf-fy22q2, mode-other #Customer intent: As an administrator of an Azure AD tenant, I want to enable single sign-on for an enterprise application. diff --git a/articles/active-directory/manage-apps/add-application-portal.md b/articles/active-directory/manage-apps/add-application-portal.md index 8e614917483c6..f2523c4535c33 100644 --- a/articles/active-directory/manage-apps/add-application-portal.md +++ b/articles/active-directory/manage-apps/add-application-portal.md @@ -1,7 +1,6 @@ --- title: 'Quickstart: Add an enterprise application' description: Add an enterprise application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: omondiatieno manager: CelesteDG diff --git a/articles/active-directory/manage-apps/admin-consent-workflow-faq.md b/articles/active-directory/manage-apps/admin-consent-workflow-faq.md index 613d6c774b25b..d868d4160d445 100644 --- a/articles/active-directory/manage-apps/admin-consent-workflow-faq.md +++ b/articles/active-directory/manage-apps/admin-consent-workflow-faq.md @@ -1,6 +1,5 @@ --- title: Frequently asked questions about the admin consent workflow -titleSuffix: Azure AD description: Find answers to frequently asked questions (FAQs) about the admin consent workflow. services: active-directory author: eringreenlee @@ -8,8 +7,8 @@ manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity -ms.topic: how-to -ms.date: 11/17/2021 +ms.topic: reference +ms.date: 05/27/2022 ms.author: ergreenl ms.reviewer: ergreenl ms.collection: M365-identity-device-management diff --git a/articles/active-directory/manage-apps/admin-consent-workflow-overview.md b/articles/active-directory/manage-apps/admin-consent-workflow-overview.md index 6cbe5226b4a65..5419caac41ca2 100644 --- a/articles/active-directory/manage-apps/admin-consent-workflow-overview.md +++ b/articles/active-directory/manage-apps/admin-consent-workflow-overview.md @@ -1,6 +1,5 @@ --- title: Overview of admin consent workflow -titleSuffix: Azure AD description: Learn about the admin consent workflow in Azure Active Directory services: active-directory author: eringreenlee @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: how-to ms.date: 03/30/2022 ms.author: ergreenl -ms.reviewer: davidmu ms.collection: M365-identity-device-management #customer intent: As an admin, I want to learn about the admin consent workflow and how it affects end-user and admin consent experience diff --git a/articles/active-directory/manage-apps/app-management-powershell-samples.md b/articles/active-directory/manage-apps/app-management-powershell-samples.md index 3d467220ea59a..f2a7b3e2d4013 100644 --- a/articles/active-directory/manage-apps/app-management-powershell-samples.md +++ b/articles/active-directory/manage-apps/app-management-powershell-samples.md @@ -1,16 +1,15 @@ --- title: PowerShell samples in Application Management -titleSuffix: Azure AD description: These PowerShell samples are used for apps you manage in your Azure Active Directory tenant. You can use these sample scripts to find expiration information about secrets and certificates. services: active-directory -author: davidmu1 +author: omondiatieno manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: sample ms.date: 02/18/2021 -ms.author: sureshja +ms.author: jomondi ms.reviewer: sureshja --- diff --git a/articles/active-directory/manage-apps/app-management-videos.md b/articles/active-directory/manage-apps/app-management-videos.md new file mode 100644 index 0000000000000..88a5d42456242 --- /dev/null +++ b/articles/active-directory/manage-apps/app-management-videos.md @@ -0,0 +1,104 @@ +--- +title: Application management videos +description: A list of videos about app registrations, enterprise apps, consent and permissions, and app ownership and assignment in Azure AD +services: azure AD +author: omondiatieno +manager: CelesteDG + +ms.service: active-directory +ms.subservice: app-mgmt +ms.topic: conceptual +ms.workload: identity +ms.date: 05/31/2022 +ms.author: jomondi +ms.reviewer: celested +--- + +# Application management videos + +Learn about the key concepts of application management such as App registrations vs enterprise apps, consent and permissions framework and app ownership and, user assignment. + +## App registrations and Enterprise apps + +Learn about the different use cases and personas involved in App Registrations and Enterprise Apps and how developers and admins interact with each option to manage applications in Azure AD. +___ + +:::row::: + :::column::: + [What is the difference between app registrations and enterprise apps?](https://www.youtube.com/watch?v=JeahL9ZtGfQ&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=4&t=2s)(2:01) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/JeahL9ZtGfQ] + :::column-end::: +:::row-end::: + + + +## Consent and permissions for admins + +Learn about the options available for managing consent to applications in a tenant. Learn how about delegated permissions and how to revoke previously consented permissions to mitigate risks posed by malicious applications. +___ + +:::row::: + :::column::: + 1 - [How do I turn on the admin consent workflow?](https://www.youtube.com/watch?v=19v7WSt9HwU&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=4)(1:04) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/19v7WSt9HwU] + :::column-end::: + :::column::: + 2 - [How do I grant admin consent in the Azure AD portal](https://www.youtube.com/watch?v=LSYcelwdhHI&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=5)(1:19) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/LSYcelwdhHI] + :::column-end::: +:::row-end::: +:::row::: + :::column::: + 3 - [How do delegated permissions work](https://www.youtube.com/watch?v=URTrOXCyH1s&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=7)(1:21) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/URTrOXCyH1s] + :::column-end::: + :::column::: + 4 - [How do I revoke permissions I've previously consented to for an app](https://www.youtube.com/watch?v=A88uh7ICNJU&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=6)(1:34) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/A88uh7ICNJU] + :::column-end::: +:::row-end::: + + +## Assigning owners and users to an enterprise app +Learn about who can assign owners to service principals, how to assign these owners, permissions that owners have, and what to do when an owner leaves the organization. +Learn how to assign users and, groups to an enterprise application and how and why an enterprise app may show up in a tenant. +___ + +:::row::: + :::column::: + 1 - [How can you ensure healthy ownership to manage your Azure AD app ecosystem?](https://www.youtube.com/watch?v=akOrP3mP4UQ&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=1)(2:13) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/akOrP3mP4UQ] + :::column-end::: + :::column::: + 2 - [How do I manage who can access the applications in my tenant](https://www.youtube.com/watch?v=IVRI9mSPDBA&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=2)(1:48) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/IVRI9mSPDBA] + :::column-end::: +:::row-end::: +:::row::: + :::column::: + 3 - [Why is this app in my tenant?](https://www.youtube.com/watch?v=NhbcVt5xOVI&list=PLlrxD0HtieHiBPIyUWkqVzoMrgfwKi4dY&index=8)(1:36) + :::column-end::: + :::column::: + >[!Video https://www.youtube.com/embed/NhbcVt5xOVI] + :::column-end::: + :::column::: + + :::column-end::: + :::column::: + + :::column-end::: +:::row-end::: diff --git a/articles/active-directory/manage-apps/application-list.md b/articles/active-directory/manage-apps/application-list.md index 1484a1921c64e..12b5bcb10af5f 100644 --- a/articles/active-directory/manage-apps/application-list.md +++ b/articles/active-directory/manage-apps/application-list.md @@ -1,6 +1,5 @@ --- title: Viewing apps using your tenant for identity management -titleSuffix: Azure AD description: Understand how to view all applications using your Azure Active Directory tenant for identity management. services: active-directory author: AllisonAm diff --git a/articles/active-directory/manage-apps/application-management-certs-faq.md b/articles/active-directory/manage-apps/application-management-certs-faq.md index 15d1301b4911d..df23ddb7d7b23 100644 --- a/articles/active-directory/manage-apps/application-management-certs-faq.md +++ b/articles/active-directory/manage-apps/application-management-certs-faq.md @@ -1,16 +1,15 @@ --- title: Application Management certificates frequently asked questions -titleSuffix: Azure AD description: Learn answers to frequently asked questions (FAQ) about managing certificates for apps using Azure Active Directory as an Identity Provider (IdP). services: active-directory -author: davidmu1 +author: omondiatieno manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: reference ms.date: 03/19/2021 -ms.author: sureshja +ms.author: jomondi ms.reviewer: sureshja, saumadan --- diff --git a/articles/active-directory/manage-apps/application-properties.md b/articles/active-directory/manage-apps/application-properties.md index dd94c83522d5e..2b0b390cdcdb5 100644 --- a/articles/active-directory/manage-apps/application-properties.md +++ b/articles/active-directory/manage-apps/application-properties.md @@ -1,6 +1,5 @@ --- title: 'Properties of an enterprise application' -titleSuffix: Azure AD description: Learn about the properties of an enterprise application in Azure Active Directory. services: active-directory author: eringreenlee diff --git a/articles/active-directory/manage-apps/application-sign-in-other-problem-access-panel.md b/articles/active-directory/manage-apps/application-sign-in-other-problem-access-panel.md index d069ce3b67504..b0262b7ee9de3 100644 --- a/articles/active-directory/manage-apps/application-sign-in-other-problem-access-panel.md +++ b/articles/active-directory/manage-apps/application-sign-in-other-problem-access-panel.md @@ -1,6 +1,5 @@ --- title: Troubleshoot problems signing in to an application from My Apps portal -titleSuffix: Azure AD description: Troubleshoot problems signing in to an application from Azure AD My Apps services: active-directory author: lnalepa diff --git a/articles/active-directory/manage-apps/application-sign-in-problem-application-error.md b/articles/active-directory/manage-apps/application-sign-in-problem-application-error.md index 35cbcb21a70fe..bf2e86e55bb16 100644 --- a/articles/active-directory/manage-apps/application-sign-in-problem-application-error.md +++ b/articles/active-directory/manage-apps/application-sign-in-problem-application-error.md @@ -1,6 +1,5 @@ --- title: Error message appears on app page after you sign in -titleSuffix: Azure AD description: How to resolve issues with Azure AD sign in when the app returns an error message. services: active-directory author: eringreenlee @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: troubleshooting ms.date: 07/11/2017 ms.author: ergreenl -ms.reviewer: davidmu ms.collection: M365-identity-device-management --- diff --git a/articles/active-directory/manage-apps/application-sign-in-problem-first-party-microsoft.md b/articles/active-directory/manage-apps/application-sign-in-problem-first-party-microsoft.md index 693777d2e5cf1..26bef2d283b54 100644 --- a/articles/active-directory/manage-apps/application-sign-in-problem-first-party-microsoft.md +++ b/articles/active-directory/manage-apps/application-sign-in-problem-first-party-microsoft.md @@ -1,6 +1,5 @@ --- title: Problems signing in to a Microsoft application -titleSuffix: Azure AD description: Troubleshoot common problems faced when signing in to first-party Microsoft Applications using Azure AD (like Microsoft 365). services: active-directory author: AlAmaral diff --git a/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-error.md b/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-error.md index 22d508003a600..c020e37a919fb 100644 --- a/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-error.md +++ b/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-error.md @@ -1,6 +1,5 @@ --- title: Unexpected error when performing consent to an application -titleSuffix: Azure AD description: Discusses errors that can occur during the process of consenting to an application and what you can do about them services: active-directory author: eringreenlee diff --git a/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-prompt.md b/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-prompt.md index 2c75c6b25a67d..4e8833c79c8c3 100644 --- a/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-prompt.md +++ b/articles/active-directory/manage-apps/application-sign-in-unexpected-user-consent-prompt.md @@ -1,6 +1,5 @@ --- title: Unexpected consent prompt when signing in to an application -titleSuffix: Azure AD description: How to troubleshoot when a user sees a consent prompt for an application you have integrated with Azure AD that you did not expect services: active-directory author: eringreenlee diff --git a/articles/active-directory/manage-apps/assign-app-owners.md b/articles/active-directory/manage-apps/assign-app-owners.md index edcc5fc53487e..0023acb264536 100644 --- a/articles/active-directory/manage-apps/assign-app-owners.md +++ b/articles/active-directory/manage-apps/assign-app-owners.md @@ -1,6 +1,5 @@ --- title: Assign enterprise application owners -titleSuffix: Azure AD description: Learn how to assign owners to applications in Azure Active Directory services: active-directory documentationcenter: '' diff --git a/articles/active-directory/manage-apps/assign-user-or-group-access-portal.md b/articles/active-directory/manage-apps/assign-user-or-group-access-portal.md index 0f6b1b4673afb..6b196b3808291 100644 --- a/articles/active-directory/manage-apps/assign-user-or-group-access-portal.md +++ b/articles/active-directory/manage-apps/assign-user-or-group-access-portal.md @@ -1,6 +1,5 @@ --- title: Assign users and groups -titleSuffix: Azure AD description: Learn how to assign and unassign users, and groups, for an app using Azure Active Directory for identity management. services: active-directory author: eringreenlee @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: how-to ms.date: 10/23/2021 ms.author: ergreenl -ms.reviewer: davidmu ms.custom: contperf-fy22q2, contperf-fy22q3 #customer intent: As an admin, I want to manage user assignment for an app in Azure Active Directory using PowerShell diff --git a/articles/active-directory/manage-apps/certificate-signing-options.md b/articles/active-directory/manage-apps/certificate-signing-options.md index 9ec8ab2ffecf9..4edbef3597984 100644 --- a/articles/active-directory/manage-apps/certificate-signing-options.md +++ b/articles/active-directory/manage-apps/certificate-signing-options.md @@ -1,16 +1,15 @@ --- title: Advanced certificate signing options in a SAML token -titleSuffix: Azure AD description: Learn how to use advanced certificate signing options in the SAML token for pre-integrated apps in Azure Active Directory services: active-directory -author: davidmu1 +author: omondiatieno manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: conceptual ms.date: 07/30/2021 -ms.author: saumadan +ms.author: jomondi ms.reviewer: saumadan ms.custom: aaddev ms.collection: M365-identity-device-management diff --git a/articles/active-directory/manage-apps/cloud-app-security.md b/articles/active-directory/manage-apps/cloud-app-security.md index 780568bd799a4..f8dd961beb1b8 100644 --- a/articles/active-directory/manage-apps/cloud-app-security.md +++ b/articles/active-directory/manage-apps/cloud-app-security.md @@ -1,16 +1,15 @@ --- title: App visibility and control with Microsoft Defender for Cloud Apps -titleSuffix: Azure AD description: Learn ways to identify app risk levels, stop breaches and leaks in real time, and use app connectors to take advantage of provider APIs for visibility and governance. services: active-directory -author: davidmu1 +author: omondiatieno manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.topic: conceptual ms.workload: identity ms.date: 07/29/2021 -ms.author: davidmu +ms.author: jomondi ms.collection: M365-identity-device-management ms.reviewer: bokacevi, dacurwin --- diff --git a/articles/active-directory/manage-apps/configure-admin-consent-workflow.md b/articles/active-directory/manage-apps/configure-admin-consent-workflow.md index dc2d010d2165f..29b964545ca84 100755 --- a/articles/active-directory/manage-apps/configure-admin-consent-workflow.md +++ b/articles/active-directory/manage-apps/configure-admin-consent-workflow.md @@ -1,6 +1,5 @@ --- title: Configure the admin consent workflow -titleSuffix: Azure AD description: Learn how to configure a way for end users to request access to applications that require admin consent. services: active-directory author: eringreenlee @@ -9,9 +8,8 @@ ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: how-to -ms.date: 03/22/2021 +ms.date: 05/27/2022 ms.author: ergreenl -ms.reviewer: davidmu ms.collection: M365-identity-device-management ms.custom: contperf-fy22q2 #customer intent: As an admin, I want to configure the admin consent workflow. diff --git a/articles/active-directory/manage-apps/configure-authentication-for-federated-users-portal.md b/articles/active-directory/manage-apps/configure-authentication-for-federated-users-portal.md index b91db52225358..adcd007577cea 100644 --- a/articles/active-directory/manage-apps/configure-authentication-for-federated-users-portal.md +++ b/articles/active-directory/manage-apps/configure-authentication-for-federated-users-portal.md @@ -1,6 +1,5 @@ --- title: Configure sign-in auto-acceleration using Home Realm Discovery -titleSuffix: Azure AD description: Learn how to force federated IdP acceleration for an application using Home Realm Discovery policy. services: active-directory author: nickludwig @@ -219,7 +218,7 @@ Use the previous example to get the **ObjectID** of the policy, and that of the ## Configuring policy through Graph Explorer -Set the HRD policy using Microsoft Graph. See [homeRealmDiscoveryPolicy](/graph/api/resources/homeRealmDiscoveryPolicy?view=graph-rest-1.0) resource type for information on how to create the policy. +Set the HRD policy using Microsoft Graph. See [homeRealmDiscoveryPolicy](/graph/api/resources/homeRealmDiscoveryPolicy?view=graph-rest-1.0&preserve-view=true) resource type for information on how to create the policy. From the Microsoft Graph explorer window: diff --git a/articles/active-directory/manage-apps/configure-linked-sign-on.md b/articles/active-directory/manage-apps/configure-linked-sign-on.md index eb7a1149aa927..bf82bbd20d9f7 100644 --- a/articles/active-directory/manage-apps/configure-linked-sign-on.md +++ b/articles/active-directory/manage-apps/configure-linked-sign-on.md @@ -1,7 +1,6 @@ --- title: Add linked single sign-on to an application description: Add linked single sign-on to an application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG diff --git a/articles/active-directory/manage-apps/configure-password-single-sign-on-non-gallery-applications.md b/articles/active-directory/manage-apps/configure-password-single-sign-on-non-gallery-applications.md index edcf1d1ac6849..5364833768c30 100644 --- a/articles/active-directory/manage-apps/configure-password-single-sign-on-non-gallery-applications.md +++ b/articles/active-directory/manage-apps/configure-password-single-sign-on-non-gallery-applications.md @@ -1,7 +1,6 @@ --- title: Add password-based single sign-on to an application description: Add password-based single sign-on to an application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG diff --git a/articles/active-directory/manage-apps/configure-permission-classifications.md b/articles/active-directory/manage-apps/configure-permission-classifications.md index 71f68c4f7552a..f4976caabba3f 100644 --- a/articles/active-directory/manage-apps/configure-permission-classifications.md +++ b/articles/active-directory/manage-apps/configure-permission-classifications.md @@ -1,6 +1,5 @@ --- title: Configure permission classifications -titleSuffix: Azure AD description: Learn how to manage delegated permission classifications. services: active-directory author: jackson-woods diff --git a/articles/active-directory/manage-apps/configure-risk-based-step-up-consent.md b/articles/active-directory/manage-apps/configure-risk-based-step-up-consent.md index 3b77349bbdfa0..5bba07c175c8a 100644 --- a/articles/active-directory/manage-apps/configure-risk-based-step-up-consent.md +++ b/articles/active-directory/manage-apps/configure-risk-based-step-up-consent.md @@ -1,6 +1,5 @@ --- title: Configure risk-based step-up consent -titleSuffix: Azure AD description: Learn how to disable and enable risk-based step-up consent to reduce user exposure to malicious apps that make illicit consent requests. services: active-directory author: psignoret diff --git a/articles/active-directory/manage-apps/configure-user-consent-groups.md b/articles/active-directory/manage-apps/configure-user-consent-groups.md index 17b4604358a94..ff738608d7197 100644 --- a/articles/active-directory/manage-apps/configure-user-consent-groups.md +++ b/articles/active-directory/manage-apps/configure-user-consent-groups.md @@ -1,6 +1,5 @@ --- title: Configure group owner consent to apps accessing group data -titleSuffix: Azure AD description: Learn manage whether group and team owners can consent to applications that will have access to the group or team's data. services: active-directory author: eringreenlee diff --git a/articles/active-directory/manage-apps/configure-user-consent.md b/articles/active-directory/manage-apps/configure-user-consent.md index 5bb33408f81d3..7e8f1660fd05b 100755 --- a/articles/active-directory/manage-apps/configure-user-consent.md +++ b/articles/active-directory/manage-apps/configure-user-consent.md @@ -1,6 +1,5 @@ --- title: Configure how users consent to applications -titleSuffix: Azure AD description: Learn how to manage how and when users can consent to applications that will have access to your organization's data. services: active-directory author: psignoret diff --git a/articles/active-directory/manage-apps/consent-and-permissions-overview.md b/articles/active-directory/manage-apps/consent-and-permissions-overview.md index 2a87abf1d5740..cb02a0d3efb72 100644 --- a/articles/active-directory/manage-apps/consent-and-permissions-overview.md +++ b/articles/active-directory/manage-apps/consent-and-permissions-overview.md @@ -1,6 +1,5 @@ --- title: Overview of consent and permissions -titleSuffix: Azure AD description: Learn about the fundamental concepts of consents and permissions in Azure AD services: active-directory author: psignoret diff --git a/articles/active-directory/manage-apps/datawiza-with-azure-ad.md b/articles/active-directory/manage-apps/datawiza-with-azure-ad.md index 4f36ea3a01f37..9607bb4ba1707 100644 --- a/articles/active-directory/manage-apps/datawiza-with-azure-ad.md +++ b/articles/active-directory/manage-apps/datawiza-with-azure-ad.md @@ -1,7 +1,6 @@ --- title: Secure hybrid access with Datawiza -titleSuffix: Azure AD -description: In this tutorial, learn how to integrate Datawiza with Azure AD for secure hybrid access +description: Learn how to integrate Datawiza with Azure AD. See how to use Datawiza and Azure AD to authenticate users and give them access to on-premises and cloud apps. services: active-directory author: gargi-sinha manager: martinco @@ -9,81 +8,87 @@ ms.service: active-directory ms.subservice: app-mgmt ms.topic: how-to ms.workload: identity -ms.date: 8/27/2021 +ms.date: 05/19/2022 ms.author: gasinh ms.collection: M365-identity-device-management +ms.custom: kr2b-contr-experiment --- # Tutorial: Configure Datawiza with Azure Active Directory for secure hybrid access In this sample tutorial, learn how to integrate Azure Active Directory (Azure AD) with [Datawiza](https://www.datawiza.com/) for secure hybrid access. -Datawiza's [Datawiza Access Broker -(DAB)](https://www.datawiza.com/access-broker) extends Azure AD to enable Single Sign-on (SSO) and granular access controls to protect on-premise and cloud-hosted applications, such as Oracle E-Business Suite, Microsoft IIS, and SAP. +Datawiza's [Datawiza Access Broker (DAB)](https://www.datawiza.com/access-broker) extends Azure AD to enable single sign-on (SSO) and provide granular access controls to protect on-premises and cloud-hosted applications, such as Oracle E-Business Suite, Microsoft IIS, and SAP. -Using this solution enterprises can quickly transition from legacy Web Access Managers (WAMs), such as Symantec SiteMinder, NetIQ, Oracle, and IBM to Azure AD without rewriting applications. Enterprises can also use Datawiza as a no-code or low-code solution to integrate new applications to Azure AD. This saves engineering time, reduces cost significantly and delivers the project in a secured manner. +By using this solution, enterprises can quickly transition from legacy web access managers (WAMs), such as Symantec SiteMinder, NetIQ, Oracle, and IBM, to Azure AD without rewriting applications. Enterprises can also use Datawiza as a no-code or low-code solution to integrate new applications to Azure AD. This approach saves engineering time, reduces cost significantly, and delivers the project in a secured manner. ## Prerequisites -To get started, you'll need: +To get started, you need: - An Azure subscription. If you don\'t have a subscription, you can get a [trial account](https://azure.microsoft.com/free/). - An [Azure AD tenant](../fundamentals/active-directory-access-create-new-tenant.md) that's linked to your Azure subscription. -- [Docker](https://docs.docker.com/get-docker/) and -[docker-compose](https://docs.docker.com/compose/install/) -are required to run DAB. Your applications can run on any platform, such as the virtual machine and bare metal. +- [Docker](https://docs.docker.com/get-docker/) and [docker-compose](https://docs.docker.com/compose/install/), which are required to run DAB. Your applications can run on any platform, such as a virtual machine and bare metal. -- An application that you'll transition from a legacy identity system to Azure AD. In this example, DAB is deployed on the same server where the application is. The application will run on localhost: 3001 and DAB proxies traffic to the application via localhost: 9772. The traffic to the application will reach DAB first and then be proxied to the application. +- An application that you'll transition from a legacy identity system to Azure AD. In this example, DAB is deployed on the same server as the application. The application runs on localhost: 3001, and DAB proxies traffic to the application via localhost: 9772. The traffic to the application reaches DAB first and is then proxied to the application. ## Scenario description Datawiza integration includes the following components: -- [Azure AD](../fundamentals/active-directory-whatis.md) - Microsoft's cloud-based identity and access management service, which helps users sign in and access external and internal resources. +- [Azure AD](../fundamentals/active-directory-whatis.md) - A cloud-based identity and access management service from Microsoft. Azure AD helps users sign in and access external and internal resources. -- Datawiza Access Broker (DAB) - The service user sign on and transparently passes identity to applications through HTTP headers. +- Datawiza Access Broker (DAB) - The service that users sign on to. DAB transparently passes identity information to applications through HTTP headers. -- Datawiza Cloud Management Console (DCMC) - A centralized management console that manages DAB. DCMC provides UI and RESTful APIs for administrators to manage the configurations of DAB and its access control policies. +- Datawiza Cloud Management Console (DCMC) - A centralized management console that manages DAB. DCMC provides UI and RESTful APIs for administrators to manage the DAB configuration and access control policies. The following architecture diagram shows the implementation. -![image shows architecture diagram](./media/datawiza-with-azure-active-directory/datawiza-architecture-diagram.png) +![Architecture diagram that shows the authentication process that gives a user access to an on-premises application.](./media/datawiza-with-azure-active-directory/datawiza-architecture-diagram.png) -|Steps| Description| +|Step| Description| |:----------|:-----------| -| 1. | The user makes a request to access the on-premises or cloud-hosted application. DAB proxies the request made by the user to the application.| -| 2. |The DAB checks the user's authentication state. If it doesn't receive a session token, or the supplied session token is invalid, then it sends the user to Azure AD for authentication.| +| 1. | The user makes a request to access the on-premises or cloud-hosted application. DAB proxies the request made by the user to the application.| +| 2. | DAB checks the user's authentication state. If it doesn't receive a session token, or the supplied session token is invalid, it sends the user to Azure AD for authentication.| | 3. | Azure AD sends the user request to the endpoint specified during the DAB application's registration in the Azure AD tenant.| -| 4. | The DAB evaluates access policies and calculates attribute values to be included in HTTP headers forwarded to the application. During this step, the DAB may call out to the Identity provider to retrieve the information needed to set the header values correctly. The DAB sets the header values and sends the request to the application. | -| 5. | The user is now authenticated and has access to the application.| +| 4. | DAB evaluates access policies and calculates attribute values to be included in HTTP headers forwarded to the application. During this step, DAB may call out to the identity provider to retrieve the information needed to set the header values correctly. DAB sets the header values and sends the request to the application. | +| 5. | The user is authenticated and has access to the application.| ## Onboard with Datawiza -To integrate your on-premises or cloud-hosted application with Azure AD, login to [Datawiza Cloud Management +To integrate your on-premises or cloud-hosted application with Azure AD, sign in to [Datawiza Cloud Management Console](https://console.datawiza.com/) (DCMC). ## Create an application on DCMC -[Create an application](https://docs.datawiza.com/step-by-step/step2.html) and generate a key pair of `PROVISIONING_KEY` and `PROVISIONING_SECRET` for the application on the DCMC. +In the next step, you create an application on DCMC and generate a key pair for the app. The key pair consists of a `PROVISIONING_KEY` and `PROVISIONING_SECRET`. To create the app and generate the key pair, follow the instructions in [Datawiza Cloud Management Console](https://docs.datawiza.com/step-by-step/step2.html). -For Azure AD, Datawiza offers a convenient [One click integration](https://docs.datawiza.com/tutorial/web-app-azure-one-click.html). This method to integrate Azure AD with DCMC can create an application registration on your behalf in your Azure AD tenant. +For Azure AD, Datawiza offers a convenient [one-click integration](https://docs.datawiza.com/tutorial/web-app-azure-one-click.html). This method to integrate Azure AD with DCMC can create an application registration on your behalf in your Azure AD tenant. -![image shows configure idp](./media/datawiza-with-azure-active-directory/configure-idp.png) +![Screenshot of the Datawiza Configure I D P page. Boxes for name, protocol, and other values are visible. An automatic generator option is turned on.](./media/datawiza-with-azure-active-directory/configure-idp.png) -Instead, if you want to use an existing web application in your Azure AD tenant, you can disable the option and populate the fields of the form. You'll need the tenant ID, client ID, and client secret. [Create a web application and get these values in your tenant](https://docs.datawiza.com/idp/azure.html). +Instead, if you want to use an existing web application in your Azure AD tenant, you can disable the option and populate the fields of the form. You need the tenant ID, client ID, and client secret. For more information about creating a web application and getting these values, see [Microsoft Azure AD in the Datawiza documentation](https://docs.datawiza.com/idp/azure.html). -![image shows configure idp using form](./media/datawiza-with-azure-active-directory/use-form.png) +![Screenshot of the Datawiza Configure I D P page. Boxes for name, protocol, and other values are visible. An automatic generator option is turned off.](./media/datawiza-with-azure-active-directory/use-form.png) ## Run DAB with a header-based application -1. You can use either Docker or Kubernetes to run DAB. The docker image is needed for users to create a sample header-based application. [Configure DAB and SSO -integration](https://docs.datawiza.com/step-by-step/step3.html). [Deploy DAB with Kubernetes](https://docs.datawiza.com/tutorial/web-app-AKS.html). A sample docker image `docker-compose.yml` file is provided for you to download and use. [Log in to the container registry](https://docs.datawiza.com/step-by-step/step3.html#important-step) to download the images of DAB and the header-based application. +You can use either Docker or Kubernetes to run DAB. The docker image is needed to create a sample header-based application. - ```yaml - services: +To run DAB with a header-based application, follow these steps: + +1. Use either Docker or Kubernetes to run DAB: + + - For Docker-specific instructions, see [Deploy Datawiza Access Broker With Your App](https://docs.datawiza.com/step-by-step/step3.html). + - For Kubernetes-specific instructions, see [Deploy Datawiza Access Broker with a Web App using Kubernetes](https://docs.datawiza.com/tutorial/web-app-AKS.html). + + You can use the following sample docker image docker-compose.yml file: + + ```yaml + services: datawiza-access-broker: image: registry.gitlab.com/datawiza/access-broker container_name: datawiza-access-broker @@ -97,34 +102,37 @@ integration](https://docs.datawiza.com/step-by-step/step3.html). [Deploy DAB wit header-based-app: image: registry.gitlab.com/datawiza/header-based-app restart: always - ports: - - "3001:3001" + ports: + - "3001:3001" ``` -2. After executing `docker-compose -f docker-compose.yml up`, the -header-based application should have SSO enabled with Azure AD. Open a browser and type in `http://localhost:9772/`. +1. To sign in to the container registry and download the images of DAB and the header-based application, follow the instructions in [Important Step](https://docs.datawiza.com/step-by-step/step3.html#important-step). -3. An Azure AD login page will show up. +1. Run the following command: -## Pass user attributes to the header-based application + `docker-compose -f docker-compose.yml up` -1. DAB gets user attributes from IdP and can pass the user attributes to the application via header or cookie. See the instructions on how to [pass user attributes](https://docs.datawiza.com/step-by-step/step4.html) such as email address, firstname, and lastname to the header-based application. + The header-based application should now have SSO enabled with Azure AD. -2. After successfully configuring the user attributes, you should see the green check sign for each of the user attributes. +1. In a browser, go to `http://localhost:9772/`. An Azure AD sign-in page appears. - ![image shows datawiza application home page](./media/datawiza-with-azure-active-directory/datawiza-application-home-page.png) +## Pass user attributes to the header-based application -## Test the flow +DAB gets user attributes from Azure AD and can pass these attributes to the application via a header or cookie. + +To pass user attributes such as an email address, a first name, and a last name to the header-based application, follow the instructions in [Pass User Attributes](https://docs.datawiza.com/step-by-step/step4.html). -1. Navigate to the application URL. +After successfully configuring the user attributes, you should see a green check mark next to each attribute. -2. The DAB should redirect to the Azure AD login page. +![Screenshot that shows the Datawiza application home page. Green check marks are visible next to the host, email, firstname, and lastname attributes.](./media/datawiza-with-azure-active-directory/datawiza-application-home-page.png) + +## Test the flow -3. After successfully authenticating, you should be redirected to DAB. +1. Go to the application URL. DAB should redirect you to the Azure AD sign-in page. -4. The DAB evaluates policies, calculates headers, and sends the user to the upstream application. +1. After successfully authenticating, you should be redirected to DAB. -5. Your requested application should show up. +DAB evaluates policies, calculates headers, and sends you to the upstream application. Your requested application should appear. ## Next steps diff --git a/articles/active-directory/manage-apps/debug-saml-sso-issues.md b/articles/active-directory/manage-apps/debug-saml-sso-issues.md index ab9a045fca08d..0b782b552e563 100644 --- a/articles/active-directory/manage-apps/debug-saml-sso-issues.md +++ b/articles/active-directory/manage-apps/debug-saml-sso-issues.md @@ -1,6 +1,5 @@ --- title: Debug SAML-based single sign-on -titleSuffix: Azure AD description: Debug SAML-based single sign-on to applications in Azure Active Directory. services: active-directory ms.author: alamaral @@ -10,7 +9,7 @@ ms.service: active-directory ms.subservice: app-mgmt ms.topic: troubleshooting ms.workload: identity -ms.date: 02/18/2019 +ms.date: 05/27/2022 --- # Debug SAML-based single sign-on to applications @@ -19,7 +18,7 @@ Learn how to find and fix [single sign-on](what-is-single-sign-on.md) issues for ## Before you begin -We recommend installing the [My Apps Secure Sign-in Extension](https://support.microsoft.com/account-billing/troubleshoot-problems-with-the-my-apps-portal-d228da80-fcb7-479c-b960-a1e2535cbdff#im-having-trouble-installing-the-my-apps-secure-sign-in-extension). This browser extension makes it easy to gather the SAML request and SAML response information that you need to resolving issues with single sign-on. In case you cannot install the extension, this article shows you how to resolve issues both with and without the extension installed. +We recommend installing the [My Apps Secure Sign-in Extension](https://support.microsoft.com/account-billing/troubleshoot-problems-with-the-my-apps-portal-d228da80-fcb7-479c-b960-a1e2535cbdff#im-having-trouble-installing-the-my-apps-secure-sign-in-extension). This browser extension makes it easy to gather the SAML request and SAML response information that you need to resolve issues with single sign-on. In case you can't install the extension, this article shows you how to resolve issues both with and without the extension installed. To download and install the My Apps Secure Sign-in Extension, use one of the following links. @@ -38,7 +37,7 @@ To test SAML-based single sign-on between Azure AD and a target application: ![Screenshot showing the test SAML SSO page](./media/debug-saml-sso-issues/test-single-sign-on.png) -If you are successfully signed in, the test has passed. In this case, Azure AD issued a SAML response token to the application. The application used the SAML token to successfully sign you in. +If you're successfully signed in, the test has passed. In this case, Azure AD issued a SAML response token to the application. The application used the SAML token to successfully sign you in. If you have an error on the company sign-in page or the application's page, use one of the next sections to resolve the error. @@ -55,7 +54,7 @@ To debug this error, you need the error message and the SAML request. The My App 1. When an error occurs, the extension redirects you back to the Azure AD **Test single sign-on** blade. 1. On the **Test single sign-on** blade, select **Download the SAML request**. 1. You should see specific resolution guidance based on the error and the values in the SAML request. -1. You will see a **Fix it** button to automatically update the configuration in Azure AD to resolve the issue. If you don't see this button, then the sign-in issue is not due to a misconfiguration on Azure AD. +1. You'll see a **Fix it** button to automatically update the configuration in Azure AD to resolve the issue. If you don't see this button, then the sign-in issue isn't due to a misconfiguration on Azure AD. If no resolution is provided for the sign-in error, we suggest that you use the feedback textbox to inform us. @@ -66,21 +65,21 @@ If no resolution is provided for the sign-in error, we suggest that you use the - A statement identifying the root cause of the problem. 1. Go back to Azure AD and find the **Test single sign-on** blade. 1. In the text box above **Get resolution guidance**, paste the error message. -1. Click **Get resolution guidance** to display steps for resolving the issue. The guidance might require information from the SAML request or SAML response. If you're not using the My Apps Secure Sign-in Extension, you might need a tool such as [Fiddler](https://www.telerik.com/fiddler) to retrieve the SAML request and response. -1. Verify that the destination in the SAML request corresponds to the SAML Single Sign-On Service URL obtained from Azure AD. -1. Verify the issuer in the SAML request is the same identifier you have configured for the application in Azure AD. Azure AD uses the issuer to find an application in your directory. +1. Select **Get resolution guidance** to display steps for resolving the issue. The guidance might require information from the SAML request or SAML response. If you're not using the My Apps Secure Sign-in Extension, you might need a tool such as [Fiddler](https://www.telerik.com/fiddler) to retrieve the SAML request and response. +1. Verify that the destination in the SAML request corresponds to the SAML Single Sign-on Service URL obtained from Azure AD. +1. Verify the issuer in the SAML request is the same identifier you've configured for the application in Azure AD. Azure AD uses the issuer to find an application in your directory. 1. Verify AssertionConsumerServiceURL is where the application expects to receive the SAML token from Azure AD. You can configure this value in Azure AD, but it's not mandatory if it's part of the SAML request. ## Resolve a sign-in error on the application page -You might sign in successfully and then see an error on the application's page. This occurs when Azure AD issued a token to the application, but the application does not accept the response. +You might sign in successfully and then see an error on the application's page. This occurs when Azure AD issued a token to the application, but the application doesn't accept the response. To resolve the error, follow these steps, or watch this [short video about how to use Azure AD to troubleshoot SAML SSO](https://www.youtube.com/watch?v=poQCJK0WPUk&list=PLLasX02E8BPBm1xNMRdvP6GtA6otQUqp0&index=8): 1. If the application is in the Azure AD Gallery, verify that you've followed all the steps for integrating the application with Azure AD. To find the integration instructions for your application, see the [list of SaaS application integration tutorials](../saas-apps/tutorial-list.md). 1. Retrieve the SAML response. - - If the My Apps Secure Sign-in extension is installed, from the **Test single sign-on** blade, click **download the SAML response**. - - If the extension is not installed, use a tool such as [Fiddler](https://www.telerik.com/fiddler) to retrieve the SAML response. + - If the My Apps Secure Sign-in extension is installed, from the **Test single sign-on** blade, select **download the SAML response**. + - If the extension isn't installed, use a tool such as [Fiddler](https://www.telerik.com/fiddler) to retrieve the SAML response. 1. Notice these elements in the SAML response token: - User unique identifier of NameID value and format - Claims issued in the token @@ -88,7 +87,7 @@ To resolve the error, follow these steps, or watch this [short video about how t For more information on the SAML response, see [Single Sign-on SAML protocol](../develop/single-sign-on-saml-protocol.md?toc=/azure/active-directory/azuread-dev/toc.json&bc=/azure/active-directory/azuread-dev/breadcrumb/toc.json). -1. Now that you have reviewed the SAML response, see [Error on an application's page after signing in](application-sign-in-problem-application-error.md) for guidance on how to resolve the problem. +1. Now that you've reviewed the SAML response, see [Error on an application's page after signing in](application-sign-in-problem-application-error.md) for guidance on how to resolve the problem. 1. If you're still not able to sign in successfully, you can ask the application vendor what is missing from the SAML response. ## Next steps diff --git a/articles/active-directory/manage-apps/delete-application-portal.md b/articles/active-directory/manage-apps/delete-application-portal.md index bbadfefbba94a..cca73033b9952 100644 --- a/articles/active-directory/manage-apps/delete-application-portal.md +++ b/articles/active-directory/manage-apps/delete-application-portal.md @@ -1,16 +1,15 @@ --- title: 'Quickstart: Delete an enterprise application' description: Delete an enterprise application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory -author: davidmu1 +author: omondiatieno manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.topic: quickstart ms.workload: identity ms.date: 03/24/2022 -ms.author: sureshja +ms.author: jomondi ms.reviewer: sureshja ms.custom: mode-other #Customer intent: As an administrator of an Azure AD tenant, I want to delete an enterprise application. diff --git a/articles/active-directory/manage-apps/disable-user-sign-in-portal.md b/articles/active-directory/manage-apps/disable-user-sign-in-portal.md index a436dc709cf1b..45a9b20b3dbc9 100644 --- a/articles/active-directory/manage-apps/disable-user-sign-in-portal.md +++ b/articles/active-directory/manage-apps/disable-user-sign-in-portal.md @@ -1,6 +1,5 @@ --- title: Disable how a how a user signs in -titleSuffix: Azure AD description: How to disable an enterprise application so that no users may sign in to it in Azure Active Directory services: active-directory author: eringreenlee @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: how-to ms.date: 09/23/2021 ms.author: ergreenl -ms.reviewer: davidmu ms.custom: it-pro ms.collection: M365-identity-device-management #customer intent: As an admin, I want to disable the way a user signs in for an application so that no user can sign in to it in Azure Active Directory. diff --git a/articles/active-directory/manage-apps/end-user-experiences.md b/articles/active-directory/manage-apps/end-user-experiences.md index 01a6839979ed0..5065b990f4d92 100644 --- a/articles/active-directory/manage-apps/end-user-experiences.md +++ b/articles/active-directory/manage-apps/end-user-experiences.md @@ -1,6 +1,5 @@ --- title: End-user experiences for applications -titleSuffix: Azure AD description: Azure Active Directory (Azure AD) provides several customizable ways to deploy applications to end users in your organization. services: active-directory author: lnalepa diff --git a/articles/active-directory/manage-apps/f5-aad-integration.md b/articles/active-directory/manage-apps/f5-aad-integration.md index ee96472fa0926..03f5d8a036a77 100644 --- a/articles/active-directory/manage-apps/f5-aad-integration.md +++ b/articles/active-directory/manage-apps/f5-aad-integration.md @@ -1,6 +1,5 @@ --- title: Secure hybrid access with F5 -titleSuffix: Azure AD description: F5 BIG-IP Access Policy Manager and Azure Active Directory integration for Secure Hybrid Access author: gargi-sinha manager: martinco diff --git a/articles/active-directory/manage-apps/f5-aad-password-less-vpn.md b/articles/active-directory/manage-apps/f5-aad-password-less-vpn.md index fc997fb758f19..1ce9467d7a4f5 100644 --- a/articles/active-directory/manage-apps/f5-aad-password-less-vpn.md +++ b/articles/active-directory/manage-apps/f5-aad-password-less-vpn.md @@ -1,6 +1,5 @@ --- title: Configure F5 BIG-IP SSL-VPN solution in Azure AD -titleSuffix: Azure AD description: Tutorial to configure F5’s BIG-IP based Secure socket layer Virtual private network (SSL-VPN) solution with Azure Active Directory (AD) for Secure Hybrid Access (SHA) services: active-directory author: gargi-sinha diff --git a/articles/active-directory/manage-apps/f5-bigip-deployment-guide.md b/articles/active-directory/manage-apps/f5-bigip-deployment-guide.md index 0003a1308448c..42484bfd64b17 100644 --- a/articles/active-directory/manage-apps/f5-bigip-deployment-guide.md +++ b/articles/active-directory/manage-apps/f5-bigip-deployment-guide.md @@ -1,6 +1,5 @@ --- title: Secure hybrid access with F5 deployment guide -titleSuffix: Azure AD description: Tutorial to deploy F5 BIG-IP Virtual Edition (VE) VM in Azure IaaS for Secure hybrid access services: active-directory author: gargi-sinha diff --git a/articles/active-directory/manage-apps/grant-admin-consent.md b/articles/active-directory/manage-apps/grant-admin-consent.md index 236cf95825f4c..df9cd59b5bbd4 100755 --- a/articles/active-directory/manage-apps/grant-admin-consent.md +++ b/articles/active-directory/manage-apps/grant-admin-consent.md @@ -1,6 +1,5 @@ --- title: Grant tenant-wide admin consent to an application -titleSuffix: Azure AD description: Learn how to grant tenant-wide consent to an application so that end-users are not prompted for consent when signing in to an application. services: active-directory author: eringreenlee @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: how-to ms.date: 10/23/2021 ms.author: ergreenl -ms.reviewer: davidmu ms.collection: M365-identity-device-management ms.custom: contperf-fy22q2 diff --git a/articles/active-directory/manage-apps/grant-consent-single-user.md b/articles/active-directory/manage-apps/grant-consent-single-user.md index dd5819567b866..9d7e6b3ab4155 100644 --- a/articles/active-directory/manage-apps/grant-consent-single-user.md +++ b/articles/active-directory/manage-apps/grant-consent-single-user.md @@ -1,7 +1,6 @@ --- title: Grant consent on behalf of a single user description: Learn how to grant consent on behalf of a single user when user consent is disabled or restricted. -titleSuffix: Azure AD services: active-directory author: psignoret manager: CelesteDG diff --git a/articles/active-directory/manage-apps/hide-application-from-user-portal.md b/articles/active-directory/manage-apps/hide-application-from-user-portal.md index f408cfdf26f4e..32ac187d56f6d 100644 --- a/articles/active-directory/manage-apps/hide-application-from-user-portal.md +++ b/articles/active-directory/manage-apps/hide-application-from-user-portal.md @@ -1,6 +1,5 @@ --- title: Hide an Enterprise application -titleSuffix: Azure AD description: How to hide an Enterprise application from user's experience in Azure Active Directory access portals or Microsoft 365 launchers. services: active-directory author: lnalepa diff --git a/articles/active-directory/manage-apps/home-realm-discovery-policy.md b/articles/active-directory/manage-apps/home-realm-discovery-policy.md index 68f5c87fcceca..5ca2680f795de 100644 --- a/articles/active-directory/manage-apps/home-realm-discovery-policy.md +++ b/articles/active-directory/manage-apps/home-realm-discovery-policy.md @@ -1,6 +1,5 @@ --- title: Home Realm Discovery policy -titleSuffix: Azure AD description: Learn how to manage Home Realm Discovery policy for Azure Active Directory authentication for federated users, including auto-acceleration and domain hints. services: active-directory author: nickludwig diff --git a/articles/active-directory/manage-apps/howto-saml-token-encryption.md b/articles/active-directory/manage-apps/howto-saml-token-encryption.md index bf5c8b391ce4f..13bf153022542 100644 --- a/articles/active-directory/manage-apps/howto-saml-token-encryption.md +++ b/articles/active-directory/manage-apps/howto-saml-token-encryption.md @@ -1,7 +1,6 @@ --- title: SAML token encryption description: Learn how to configure Azure Active Directory SAML token encryption. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG @@ -9,7 +8,7 @@ ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: conceptual -ms.date: 03/13/2020 +ms.date: 05/27/2022 ms.author: alamaral ms.collection: M365-identity-device-management --- @@ -134,7 +133,7 @@ When you configure a keyCredential using Graph, PowerShell, or in the applicatio 1. From the Azure portal, go to **Azure Active Directory > App registrations**. -1. Select **All apps** from the dropdown to show all apps, and then select the enterprise application that you want to configure. +1. Select the **All apps** tab to show all apps, and then select the application that you want to configure. 1. In the application's page, select **Manifest** to edit the [application manifest](../develop/reference-app-manifest.md). diff --git a/articles/active-directory/manage-apps/index.yml b/articles/active-directory/manage-apps/index.yml index a460d2d22c09a..71949dca2114b 100644 --- a/articles/active-directory/manage-apps/index.yml +++ b/articles/active-directory/manage-apps/index.yml @@ -57,12 +57,14 @@ landingContent: url: add-application-portal-configure.md - text: Assign users and groups url: assign-user-or-group-access-portal.md - - text: Configure single sign-on - url: plan-sso-deployment.md - text: Provision an app url: ../governance/what-is-provisioning.md - text: Configure My Apps url: my-apps-deployment-plan.md + - linkListType: learn + links: + - text: Enable single sign-on + url: /learn/modules/enable-single-sign-on/ - title: Secure an app linkLists: - linkListType: how-to-guide diff --git a/articles/active-directory/manage-apps/manage-app-consent-policies.md b/articles/active-directory/manage-apps/manage-app-consent-policies.md index 4f43ff8ac54d2..a4422ddac90de 100644 --- a/articles/active-directory/manage-apps/manage-app-consent-policies.md +++ b/articles/active-directory/manage-apps/manage-app-consent-policies.md @@ -1,7 +1,6 @@ --- title: Manage app consent policies description: Learn how to manage built-in and custom app consent policies to control when consent can be granted. -titleSuffix: Azure AD services: active-directory author: psignoret ms.service: active-directory diff --git a/articles/active-directory/manage-apps/manage-application-permissions.md b/articles/active-directory/manage-apps/manage-application-permissions.md index 862ee0d43ae25..b18cb8e45f365 100644 --- a/articles/active-directory/manage-apps/manage-application-permissions.md +++ b/articles/active-directory/manage-apps/manage-application-permissions.md @@ -1,6 +1,5 @@ --- title: Review permissions granted to applications -titleSuffix: Azure AD description: Learn how to review and manage permissions for an application in Azure Active Directory. services: active-directory author: Jackson-Woods diff --git a/articles/active-directory/manage-apps/manage-consent-requests.md b/articles/active-directory/manage-apps/manage-consent-requests.md index 81b3dab0f4dd4..3f173813f7388 100755 --- a/articles/active-directory/manage-apps/manage-consent-requests.md +++ b/articles/active-directory/manage-apps/manage-consent-requests.md @@ -1,7 +1,6 @@ --- title: Manage consent to applications and evaluate consent requests description: Learn how to manage consent requests when user consent is disabled or restricted, and how to evaluate a request for tenant-wide admin consent to an application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: psignoret manager: CelesteDG diff --git a/articles/active-directory/manage-apps/manage-self-service-access.md b/articles/active-directory/manage-apps/manage-self-service-access.md index 29f9238f1dadb..b77898a754b78 100644 --- a/articles/active-directory/manage-apps/manage-self-service-access.md +++ b/articles/active-directory/manage-apps/manage-self-service-access.md @@ -1,6 +1,5 @@ --- title: How to enable self-service application assignment -titleSuffix: Azure AD description: Enable self-service application access to allow users to find their own applications from their My Apps portal services: active-directory author: omondiatieno diff --git a/articles/active-directory/manage-apps/media/configure-admin-consent-workflow/review-consent-requests.png b/articles/active-directory/manage-apps/media/configure-admin-consent-workflow/review-consent-requests.png new file mode 100644 index 0000000000000..ba24bf8533aa1 Binary files /dev/null and b/articles/active-directory/manage-apps/media/configure-admin-consent-workflow/review-consent-requests.png differ diff --git a/articles/active-directory/manage-apps/migrate-adfs-application-activity.md b/articles/active-directory/manage-apps/migrate-adfs-application-activity.md index ee4e2859e10da..dad2d533dbe48 100644 --- a/articles/active-directory/manage-apps/migrate-adfs-application-activity.md +++ b/articles/active-directory/manage-apps/migrate-adfs-application-activity.md @@ -1,7 +1,6 @@ --- title: Use the activity report to move AD FS apps to Azure Active Directory description: The Active Directory Federation Services (AD FS) application activity report lets you quickly migrate applications from AD FS to Azure Active Directory (Azure AD). This migration tool for AD FS identifies compatibility with Azure AD and gives migration guidance. -titleSuffix: Azure AD services: active-directory author: omondiatieno manager: CelesteDG diff --git a/articles/active-directory/manage-apps/migrate-adfs-apps-to-azure.md b/articles/active-directory/manage-apps/migrate-adfs-apps-to-azure.md index 5a388d891e1f4..f929e923ea2a3 100644 --- a/articles/active-directory/manage-apps/migrate-adfs-apps-to-azure.md +++ b/articles/active-directory/manage-apps/migrate-adfs-apps-to-azure.md @@ -1,7 +1,6 @@ --- title: Moving application authentication from AD FS to Azure Active Directory description: Learn how to use Azure Active Directory to replace Active Directory Federation Services (AD FS), giving users single sign-on to all their applications. -titleSuffix: Azure AD services: active-directory author: omondiatieno manager: CelesteDG diff --git a/articles/active-directory/manage-apps/migrate-applications-from-okta-to-azure-active-directory.md b/articles/active-directory/manage-apps/migrate-applications-from-okta-to-azure-active-directory.md index 56e1777f7a716..c25a342506723 100644 --- a/articles/active-directory/manage-apps/migrate-applications-from-okta-to-azure-active-directory.md +++ b/articles/active-directory/manage-apps/migrate-applications-from-okta-to-azure-active-directory.md @@ -1,7 +1,6 @@ --- title: Tutorial to migrate your applications from Okta to Azure Active Directory -titleSuffix: Active Directory description: Learn how to migrate your applications from Okta to Azure Active Directory. services: active-directory author: gargi-sinha diff --git a/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md b/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md index 04c00fa4e0ed1..9f766e987ae97 100644 --- a/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md +++ b/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md @@ -1,17 +1,16 @@ --- -title: Tutorial to migrate Okta federation to Azure Active Directory-managed authentication -titleSuffix: Active Directory -description: Learn how to migrate your Okta federated applications to Azure AD-managed authentication. +title: Migrate Okta federation to Azure Active Directory +description: Learn how to migrate your Okta-federated applications to managed authentication under Azure AD. See how to migrate federation in a staged manner. services: active-directory author: gargi-sinha manager: martinco - ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 09/01/2021 +ms.date: 05/19/2022 ms.author: gasinh ms.subservice: app-mgmt +ms.custom: kr2b-contr-experiment --- # Tutorial: Migrate Okta federation to Azure Active Directory-managed authentication @@ -39,7 +38,7 @@ Seamless SSO can be deployed to password hash synchronization or pass-through au Follow the [deployment guide](../hybrid/how-to-connect-sso-quick-start.md#step-1-check-the-prerequisites) to ensure that you deploy all necessary prerequisites of seamless SSO to your users. -For our example, we'll configure password hash synchronization and seamless SSO. +For this example, you configure password hash synchronization and seamless SSO. ### Configure Azure AD Connect for password hash synchronization and seamless SSO @@ -47,15 +46,15 @@ Follow these steps to configure Azure AD Connect for password hash synchronizati 1. On your Azure AD Connect server, open the **Azure AD Connect** app and then select **Configure**. - ![Screenshot that shows the Azure A D icon and Configure button.](media/migrate-okta-federation-to-azure-active-directory/configure-azure-ad.png) + ![Screenshot that shows the Azure A D icon and the Configure button in the Azure A D Connect app.](media/migrate-okta-federation-to-azure-active-directory/configure-azure-ad.png) -1. Select **Change user sign-in** > **Next**. +1. Select **Change user sign-in**, and then select **Next**. - ![Screenshot that shows the page for changing user sign-in.](media/migrate-okta-federation-to-azure-active-directory/change-user-signin.png) + ![Screenshot of the Azure A D Connect app that shows the page for changing user sign-in.](media/migrate-okta-federation-to-azure-active-directory/change-user-signin.png) 1. Enter your global administrator credentials. - ![Screenshot that shows where to enter global admin credentials.](media/migrate-okta-federation-to-azure-active-directory/global-admin-credentials.png) + ![Screenshot of the Azure A D Connect app that shows where to enter global admin credentials.](media/migrate-okta-federation-to-azure-active-directory/global-admin-credentials.png) 1. Currently, the server is configured for federation with Okta. Change the selection to **Password Hash Synchronization**. Then select **Enable single sign-on**. @@ -65,15 +64,15 @@ Follow these steps to enable seamless SSO: 1. Enter the domain administrator credentials for the local on-premises system. Then select **Next**. - ![Screenshot that shows settings for user sign-in.](media/migrate-okta-federation-to-azure-active-directory/domain-admin-credentials.png) + ![Screenshot of the Azure A D Connect app that shows settings for user sign-in.](media/migrate-okta-federation-to-azure-active-directory/domain-admin-credentials.png) 1. On the final page, select **Configure** to update the Azure AD Connect server. - ![Screenshot that shows the configuration page.](media/migrate-okta-federation-to-azure-active-directory/update-azure-ad-connect-server.png) + ![Screenshot of the Ready to configure page of the Azure A D Connect app.](media/migrate-okta-federation-to-azure-active-directory/update-azure-ad-connect-server.png) 1. Ignore the warning for hybrid Azure AD join for now. You'll reconfigure the device options after you disable federation from Okta. - ![Screenshot that shows the link to configure device options.](media/migrate-okta-federation-to-azure-active-directory/reconfigure-device-options.png) + ![Screenshot of the Azure A D Connect app. A warning about the hybrid Azure A D join is visible. A link for configuring device options is also visible.](media/migrate-okta-federation-to-azure-active-directory/reconfigure-device-options.png) ## Configure staged rollout features @@ -83,7 +82,7 @@ After you enable password hash sync and seamless SSO on the Azure AD Connect ser 1. In the [Azure portal](https://portal.azure.com/#home), select **View** or **Manage Azure Active Directory**. - ![Screenshot that shows the Azure portal.](media/migrate-okta-federation-to-azure-active-directory/azure-portal.png) + ![Screenshot that shows the Azure portal. A welcome message is visible.](media/migrate-okta-federation-to-azure-active-directory/azure-portal.png) 1. On the **Azure Active Directory** menu, select **Azure AD Connect**. Then confirm that **Password Hash Sync** is enabled in the tenant. @@ -93,21 +92,21 @@ After you enable password hash sync and seamless SSO on the Azure AD Connect ser 1. Your **Password Hash Sync** setting might have changed to **On** after the server was configured. If the setting isn't enabled, enable it now. - Notice that **Seamless single sign-on** is set to **Off**. If you attempt to enable it, you'll get an error because it's already enabled for users in the tenant. + Notice that **Seamless single sign-on** is set to **Off**. If you attempt to enable it, you get an error because it's already enabled for users in the tenant. 1. Select **Manage groups**. - ![Screenshot that shows the button for managing groups.](media/migrate-okta-federation-to-azure-active-directory/password-hash-sync.png) + ![Screenshot of the Enable staged rollout features page in the Azure portal. A Manage groups button is visible.](media/migrate-okta-federation-to-azure-active-directory/password-hash-sync.png) -Follow the instructions to add a group to the password hash sync rollout. In the following example, the security group starts with 10 members. +1. Follow the instructions to add a group to the password hash sync rollout. In the following example, the security group starts with 10 members. -![Screenshot that shows an example of a security group.](media/migrate-okta-federation-to-azure-active-directory/example-security-group.png) + ![Screenshot of the Manage groups for Password Hash Sync page in the Azure portal. A group is visible in a table.](media/migrate-okta-federation-to-azure-active-directory/example-security-group.png) -After you add the group, wait for about 30 minutes while the feature takes effect in your tenant. When the feature has taken effect, your users will no longer be redirected to Okta when they attempt to access Office 365 services. +1. After you add the group, wait for about 30 minutes while the feature takes effect in your tenant. When the feature has taken effect, your users are no longer redirected to Okta when they attempt to access Office 365 services. The staged rollout feature has some unsupported scenarios: -- Legacy authentication such as POP3 and SMTP aren't supported. +- Legacy authentication protocols such as POP3 and SMTP aren't supported. - If you've configured hybrid Azure AD join for use with Okta, all the hybrid Azure AD join flows go to Okta until the domain is defederated. A sign-on policy should remain in Okta to allow legacy authentication for hybrid Azure AD join Windows clients. ## Create an Okta app in Azure AD @@ -120,30 +119,30 @@ To configure the enterprise application registration for Okta: 1. On the left menu, under **Manage**, select **Enterprise applications**. - ![Screenshot that shows the "Enterprise applications" selection.](media/migrate-okta-federation-to-azure-active-directory/enterprise-application.png) + ![Screenshot that shows the left menu of the Azure portal. Enterprise applications is visible.](media/migrate-okta-federation-to-azure-active-directory/enterprise-application.png) 1. On the **All applications** menu, select **New application**. - ![Screenshot that shows the "New application" selection.](media/migrate-okta-federation-to-azure-active-directory/new-application.png) + ![Screenshot that shows the All applications page in the Azure portal. A new application is visible.](media/migrate-okta-federation-to-azure-active-directory/new-application.png) 1. Select **Create your own application**. On the menu that opens, name the Okta app and select **Register an application you're working on to integrate with Azure AD**. Then select **Create**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/register-application.png" alt-text="Screenshot that shows how to register an application." lightbox="media/migrate-okta-federation-to-azure-active-directory/register-application.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/register-application.png" alt-text="Screenshot that shows the Create your own application menu. The app name is visible. The option to integrate with Azure A D is turned on." lightbox="media/migrate-okta-federation-to-azure-active-directory/register-application.png"::: -1. Select **Accounts in any organizational directory (Any Azure AD Directory - Multitenant)** > **Register**. +1. Select **Accounts in any organizational directory (Any Azure AD Directory - Multitenant)**, and then select **Register**. ![Screenshot that shows how to register an application and change the application account.](media/migrate-okta-federation-to-azure-active-directory/register-change-application.png) 1. On the Azure AD menu, select **App registrations**. Then open the newly created registration. - ![Screenshot that shows the new app registration.](media/migrate-okta-federation-to-azure-active-directory/app-registration.png) + ![Screenshot that shows the App registrations page in the Azure portal. The new app registration is visible.](media/migrate-okta-federation-to-azure-active-directory/app-registration.png) 1. Record your tenant ID and application ID. >[!Note] >You'll need the tenant ID and application ID to configure the identity provider in Okta. - ![Screenshot that shows the tenant ID and application ID.](media/migrate-okta-federation-to-azure-active-directory/record-ids.png) + ![Screenshot that shows the Okta Application Access page in the Azure portal. The tenant I D and application I D are called out.](media/migrate-okta-federation-to-azure-active-directory/record-ids.png) 1. On the left menu, select **Certificates & secrets**. Then select **New client secret**. Give the secret a generic name and set its expiration date. @@ -152,40 +151,40 @@ To configure the enterprise application registration for Okta: >[!NOTE] >The value and ID aren't shown later. If you fail to record this information now, you'll have to regenerate a secret. - ![Screenshot that shows where to record the secret's value and I D.](media/migrate-okta-federation-to-azure-active-directory/record-secrets.png) + ![Screenshot of the Certificates and secrets page. The value and I D of the secret are visible.](media/migrate-okta-federation-to-azure-active-directory/record-secrets.png) 1. On the left menu, select **API permissions**. Grant the application access to the OpenID Connect (OIDC) stack. 1. Select **Add a permission** > **Microsoft Graph** > **Delegated permissions**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png" alt-text="Screenshot that shows delegated permissions." lightbox="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png" alt-text="Screenshot that shows the A P I permissions page of the Azure portal. A delegated permission for reading is visible." lightbox="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png"::: 1. In the OpenID permissions section, add **email**, **openid**, and **profile**. Then select **Add permissions**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png" alt-text="Screenshot that shows how to add permissions." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png" alt-text="Screenshot that shows the A P I permissions page of the Azure portal. Permissions for email, openid, profile, and reading are visible." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png"::: 1. Select **Grant admin consent for \** and wait until the **Granted** status appears. - ![Screenshot that shows granted consent.](media/migrate-okta-federation-to-azure-active-directory/grant-consent.png) + ![Screenshot of the A P I permissions page that shows a message about granted consent.](media/migrate-okta-federation-to-azure-active-directory/grant-consent.png) 1. On the left menu, select **Branding**. For **Home page URL**, add your user's application home page. - ![Screenshot that shows how to add branding.](media/migrate-okta-federation-to-azure-active-directory/add-branding.png) + ![Screenshot of the Branding page in the Azure portal. Several input boxes are visible, including one for the home page U R L.](media/migrate-okta-federation-to-azure-active-directory/add-branding.png) 1. In the Okta administration portal, select **Security** > **Identity Providers** to add a new identity provider. Select **Add Microsoft**. - ![Screenshot that shows how to add the identity provider.](media/migrate-okta-federation-to-azure-active-directory/configure-idp.png) + ![Screenshot of the Okta administration portal. Add Microsoft is visible in the Add Identity Provider list.](media/migrate-okta-federation-to-azure-active-directory/configure-idp.png) 1. On the **Identity Provider** page, copy your application ID to the **Client ID** field. Copy the client secret to the **Client Secret** field. -1. Select **Show Advanced Settings**. By default, this configuration will tie the user principal name (UPN) in Okta to the UPN in Azure AD for reverse-federation access. +1. Select **Show Advanced Settings**. By default, this configuration ties the user principal name (UPN) in Okta to the UPN in Azure AD for reverse-federation access. >[!IMPORTANT] >If your UPNs in Okta and Azure AD don't match, select an attribute that's common between users. -1. Finish your selections for autoprovisioning. By default, if a user doesn't match in Okta, the system will attempt to provision the user in Azure AD. If you've migrated provisioning away from Okta, select **Redirect to Okta sign-in page**. +1. Finish your selections for autoprovisioning. By default, if no match is found for an Okta user, the system attempts to provision the user in Azure AD. If you've migrated provisioning away from Okta, select **Redirect to Okta sign-in page**. - ![Screenshot that shows the option for redirecting to the Okta sign-in page.](media/migrate-okta-federation-to-azure-active-directory/redirect-okta.png) + ![Screenshot of the General Settings page in the Okta admin portal. The option for redirecting to the Okta sign-in page is visible.](media/migrate-okta-federation-to-azure-active-directory/redirect-okta.png) Now that you've created the identity provider (IDP), you need to send users to the correct IDP. @@ -195,7 +194,7 @@ To configure the enterprise application registration for Okta: In this example, the **Division** attribute is unused on all Okta profiles, so it's a good choice for IDP routing. - ![Screenshot that shows the division attribute for I D P routing.](media/migrate-okta-federation-to-azure-active-directory/division-idp-routing.png) + ![Screenshot of the Edit Rule page in the Okta admin portal. A rule definition that involves the division attribute is visible.](media/migrate-okta-federation-to-azure-active-directory/division-idp-routing.png) 1. Now that you've added the routing rule, record the redirect URI so you can add it to the application registration. @@ -203,23 +202,23 @@ To configure the enterprise application registration for Okta: 1. On your application registration, on the left menu, select **Authentication**. Then select **Add a platform** > **Web**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-platform.png" alt-text="Screenshot that shows how to add a web platform." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-platform.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-platform.png" alt-text="Screenshot of the Authentication page in the Azure portal. Add a platform and a Configure platforms menu are visible." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-platform.png"::: 1. Add the redirect URI that you recorded in the IDP in Okta. Then select **Access tokens** and **ID tokens**. - ![Screenshot that shows Okta access and I D tokens.](media/migrate-okta-federation-to-azure-active-directory/access-id-tokens.png) + ![Screenshot of the Configure Web page in the Azure portal. A redirect U R I is visible. The access and I D tokens are selected.](media/migrate-okta-federation-to-azure-active-directory/access-id-tokens.png) 1. In the admin console, select **Directory** > **People**. Select your first test user to edit the profile. 1. In the profile, add **ToAzureAD** as in the following image. Then select **Save**. - ![Screenshot that shows how to edit a profile.](media/migrate-okta-federation-to-azure-active-directory/profile-editing.png) + ![Screenshot of the Okta admin portal. Profile settings are visible, and the Division box contains ToAzureAD.](media/migrate-okta-federation-to-azure-active-directory/profile-editing.png) -1. Try to sign in to the [Microsoft 356 portal](https://portal.office.com) as the modified user. If your user isn't a part of the managed authentication pilot, you'll notice that your action loops. To exit the loop, add the user to the managed authentication experience. +1. Try to sign in to the [Microsoft 356 portal](https://portal.office.com) as the modified user. If your user isn't part of the managed authentication pilot, your action enters a loop. To exit the loop, add the user to the managed authentication experience. ## Test Okta app access on pilot members -After you configure the Okta app in Azure AD and you configure the IDP in the Okta portal, you must assign the application to users. +After you configure the Okta app in Azure AD and you configure the IDP in the Okta portal, assign the application to users. 1. In the Azure portal, select **Azure Active Directory** > **Enterprise applications**. @@ -228,15 +227,15 @@ After you configure the Okta app in Azure AD and you configure the IDP in the Ok >[!NOTE] >You can add users and groups only from the **Enterprise applications** page. You can't add users from the **App registrations** menu. - ![Screenshot that shows how to add a group.](media/migrate-okta-federation-to-azure-active-directory/add-group.png) + ![Screenshot of the Users and groups page of the Azure portal. A group called Managed Authentication Staging Group is visible.](media/migrate-okta-federation-to-azure-active-directory/add-group.png) 1. After about 15 minutes, sign in as one of the managed authentication pilot users and go to [My Apps](https://myapplications.microsoft.com). - ![Screenshot that shows the My Apps gallery.](media/migrate-okta-federation-to-azure-active-directory/my-applications.png) + ![Screenshot that shows the My Apps gallery. An icon for Okta Application Access is visible.](media/migrate-okta-federation-to-azure-active-directory/my-applications.png) 1. Select the **Okta Application Access** tile to return the user to the Okta home page. -## Test-managed authentication on pilot members +## Test managed authentication on pilot members After you configure the Okta reverse-federation app, have your users conduct full testing on the managed authentication experience. We recommend that you set up company branding to help your users recognize the tenant they're signing in to. For more information, see [Add branding to your organization's Azure AD sign-in page](../fundamentals/customize-branding.md). diff --git a/articles/active-directory/manage-apps/migrate-okta-sign-on-policies-to-azure-active-directory-conditional-access.md b/articles/active-directory/manage-apps/migrate-okta-sign-on-policies-to-azure-active-directory-conditional-access.md index 3771f2b7b3a1d..11983dc2f8dbe 100644 --- a/articles/active-directory/manage-apps/migrate-okta-sign-on-policies-to-azure-active-directory-conditional-access.md +++ b/articles/active-directory/manage-apps/migrate-okta-sign-on-policies-to-azure-active-directory-conditional-access.md @@ -1,6 +1,5 @@ --- title: Tutorial to migrate Okta sign-on policies to Azure Active Directory Conditional Access -titleSuffix: Active Directory description: In this tutorial, you learn how to migrate Okta sign-on policies to Azure Active Directory Conditional Access. services: active-directory author: gargi-sinha diff --git a/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md b/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md index 3d123b3b11237..202289e699e8d 100644 --- a/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md +++ b/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md @@ -1,22 +1,21 @@ --- -title: Tutorial to migrate Okta sync provisioning to Azure AD Connect-based synchronization -titleSuffix: Active Directory -description: In this tutorial, you learn how to migrate your Okta sync provisioning to Azure AD Connect-based synchronization. +title: Migrate Okta sync provisioning to Azure AD Connect +description: Learn how to migrate user provisioning from Okta to Azure Active Directory (Azure AD). See how to use Azure AD Connect server or Azure AD cloud provisioning. services: active-directory-b2c author: gargi-sinha manager: martinco - ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 09/01/2021 +ms.date: 05/19/2022 ms.author: gasinh ms.subservice: app-mgmt +ms.custom: kr2b-contr-experiment --- # Tutorial: Migrate Okta sync provisioning to Azure AD Connect-based synchronization -In this tutorial, you'll learn how your organization can currently migrate User provisioning from Okta to Azure Active Directory (Azure AD) and migrate either User sync or Universal sync to Azure AD Connect. This capability will enable further provisioning into Azure AD and Office 365. +In this tutorial, you'll learn how your organization can migrate user provisioning from Okta to Azure Active Directory (Azure AD) and migrate either User Sync or Universal Sync to Azure AD Connect. This capability enables further provisioning into Azure AD and Office 365. Migrating synchronization platforms isn't a small change. Each step of the process mentioned in this article should be validated against your own environment before you remove Azure AD Connect from staging mode or enable the Azure AD cloud provisioning agent. @@ -24,21 +23,21 @@ Migrating synchronization platforms isn't a small change. Each step of the proce When you switch from Okta provisioning to Azure AD, you have two choices. You can use either an Azure AD Connect server or Azure AD cloud provisioning. To understand the differences between the two, read the [comparison article from Microsoft](../cloud-sync/what-is-cloud-sync.md#comparison-between-azure-ad-connect-and-cloud-sync). -Azure AD cloud provisioning will be the most familiar migration path for Okta customers who use Universal or User sync. The cloud provisioning agents are lightweight. They can be installed on or near domain controllers like the Okta directory sync agents. Don't install them on the same server. +Azure AD cloud provisioning is the most familiar migration path for Okta customers who use Universal Sync or User Sync. The cloud provisioning agents are lightweight. You can install them on or near domain controllers like the Okta directory sync agents. Don't install them on the same server. Use an Azure AD Connect server if your organization needs to take advantage of any of the following technologies when you synchronize users: - Device synchronization: Hybrid Azure AD join or Hello for Business -- Passthrough authentication -- More than 150,000-object support +- Pass-through authentication +- Support for more than 150,000 objects - Support for writeback >[!NOTE] ->All prerequisites should be taken into consideration when you install Azure AD Connect or Azure AD cloud provisioning. To learn more before you continue with installation, see [Prerequisites for Azure AD Connect](../hybrid/how-to-connect-install-prerequisites.md). +>Take all prerequisites into consideration when you install Azure AD Connect or Azure AD cloud provisioning. To learn more before you continue with installation, see [Prerequisites for Azure AD Connect](../hybrid/how-to-connect-install-prerequisites.md). ## Confirm ImmutableID attribute synchronized by Okta -ImmutableID is the core attribute used to tie synchronized objects to their on-premises counterparts. Okta takes the Active Directory objectGUID of an on-premises object and converts it to a Base64 encoded string. Then, by default it stamps that string to the ImmutableID field in Azure AD. +ImmutableID is the core attribute used to tie synchronized objects to their on-premises counterparts. Okta takes the Active Directory objectGUID of an on-premises object and converts it to a Base64-encoded string. By default, it then stamps that string to the ImmutableID field in Azure AD. You can connect to Azure AD PowerShell and examine the current ImmutableID value. If you've never used the Azure AD PowerShell module, run `Install-Module AzureAD` in an administrative PowerShell session before you run the following commands: @@ -52,15 +51,15 @@ If you already have the module, you might receive a warning to update to the lat After the module is installed, import it and follow these steps to connect to the Azure AD service: -1. Enter your global administrator credentials in the modern authentication window. +1. Enter your global administrator credentials in the authentication window. - ![Screenshot that shows import-module.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/import-module.png) + ![Screenshot of the Azure A D PowerShell window. The install-module, import-module, and connect commands are visible with their output.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/import-module.png) -1. After you connect to the tenant, verify the settings for your ImmutableID values. The example shown uses Okta defaults of objectGUID to ImmutableID. +1. After you connect to the tenant, verify the settings for your ImmutableID values. The following example uses the Okta default approach of converting the objectGUID into the ImmutableID. - ![Screenshot that shows Okta defaults of objectGUID to ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/okta-default-objectid.png) + ![Screenshot of the Azure A D PowerShell window. The Get-AzureADUser command is visible. Its output includes the UserPrincipalName and the ImmutableId.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/okta-default-objectid.png) -1. There are several ways to manually confirm the objectGUID to Base64 conversion on-premises. For individual validation, use this example: +1. There are several ways to manually confirm the conversion from objectGUID to Base64 on-premises. To test an individual value, use these commands: ```PowerShell Get-ADUser onpremupn | fl objectguid @@ -68,27 +67,27 @@ After the module is installed, import it and follow these steps to connect to th [system.convert]::ToBase64String(([GUID]$objectGUID).ToByteArray()) ``` - ![Screenshot that shows how to manually change Okta objectGUID to ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/manual-objectguid.png) + ![Screenshot of the Azure A D PowerShell window. The commands that convert an objectGUID to an ImmutableID are visible with their output.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/manual-objectguid.png) ## Mass validation methods for objectGUID -Before you cut over to Azure AD Connect, it's critical to validate that the ImmutableID values in Azure AD are going to exactly match their on-premises values. +Before you move to Azure AD Connect, it's critical to validate that the ImmutableID values in Azure AD exactly match their on-premises values. -The example will grab *all* on-premises Azure AD users and export a list of their objectGUID values and ImmutableID values already calculated to a CSV file. +The following command gets *all* on-premises Azure AD users and exports a list of their objectGUID values and ImmutableID values already calculated to a CSV file. -1. Run these commands in PowerShell on a domain controller on-premises: +1. Run this command in PowerShell on an on-premises domain controller: ```PowerShell - Get-ADUser -Filter * -Properties objectGUID | Select -Object + Get-ADUser -Filter * -Properties objectGUID | Select-Object UserPrincipalName, Name, objectGUID, @{Name = 'ImmutableID'; Expression = { [system.convert]::ToBase64String((GUID).tobytearray()) } } | export-csv C:\Temp\OnPremIDs.csv ``` - ![Screenshot that shows domain controller on-premises commands.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/domain-controller.png) + ![Screenshot of a .csv file that lists sample output data. Columns include UserPrincipalName, Name, objectGUID, and ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/domain-controller.png) -1. Run these commands in an Azure AD PowerShell session to gather the already synchronized values: +1. Run this command in an Azure AD PowerShell session to list the already synchronized values: ```powershell Get-AzureADUser -all $true | Where-Object {$_.dirsyncenabled -like @@ -98,9 +97,9 @@ The example will grab *all* on-premises Azure AD users and export a list of thei ImmutableID | export-csv C:\\temp\\AzureADSyncedIDS.csv ``` - ![Screenshot that shows an Azure AD PowerShell session.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-powershell.png) + ![Screenshot of a .csv file that lists sample output data. Columns include UserPrincipalName, objectGUID, and ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-powershell.png) - After you have both exports, confirm that the ImmutableID for each user matches. + After you have both exports, confirm that each user's ImmutableID values match. >[!IMPORTANT] >If your ImmutableID values in the cloud don't match objectGUID values, you've modified the defaults for Okta sync. You've likely chosen another attribute to determine ImmutableID values. Before you move on to the next section, it's critical to identify which source attribute is populating ImmutableID values. Ensure that you update the attribute Okta is syncing before you disable Okta sync. @@ -109,18 +108,24 @@ The example will grab *all* on-premises Azure AD users and export a list of thei After you've prepared your list of source and destination targets, it's time to install an Azure AD Connect server. If you've opted to use Azure AD Connect cloud provisioning, skip this section. -1. Continue with [downloading and installing Azure AD Connect](../hybrid/how-to-connect-install-custom.md) to your chosen server. +1. Download and install Azure AD Connect on your chosen server by following the instructions in [Custom installation of Azure Active Directory Connect](../hybrid/how-to-connect-install-custom.md). + +1. In the left panel, select **Identifying users**. -1. On the **Identifying users** page, under **Select how users should be identified with Azure AD**, select the **Choose a specific attribute** option. Then, select **mS-DS-ConsistencyGUID** if you haven't modified the Okta defaults. +1. On the **Uniquely identifying your users** page, under **Select how users should be identified with Azure AD**, select **Choose a specific attribute**. Then select **mS-DS-ConsistencyGUID** if you haven't modified the Okta defaults. >[!WARNING] - >This is the most critical step on this page. Before you select **Next**, ensure that the attribute you're selecting for a source anchor is what *currently* populates your existing Azure AD users. If you select the wrong attribute, you must uninstall and reinstall Azure AD Connect to reselect this option. + >This step is critical. Ensure that the attribute that you select for a source anchor is what *currently* populates your existing Azure AD users. If you select the wrong attribute, you need to uninstall and reinstall Azure AD Connect to reselect this option. - ![Screenshot that shows mS-DS-ConsistencyGuid.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/consistency-guid.png) + ![Screenshot of the Azure A D Connect window. The page is titled Uniquely identifying your users, and the mS-DS-ConsistencyGuid attribute is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/consistency-guid.png) + +1. Select **Next**. -1. On the **Configure** page, make sure to select the **Enable staging mode** checkbox. Then select **Install**. +1. In the left panel, select **Configure**. - ![Screenshot that shows the Enable staging mode checkbox.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/enable-staging-mode.png) +1. On the **Ready to configure** page, select **Enable staging mode**. Then select **Install**. + + ![Screenshot of the Azure A D Connect window. The page is titled Ready to configure, and the Enable staging mode checkbox is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/enable-staging-mode.png) 1. After the configuration is complete, select **Exit**. @@ -128,29 +133,29 @@ After you've prepared your list of source and destination targets, it's time to 1. Open **Synchronization Service** as an administrator. - ![Screenshot that shows opening Synchronization Service.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/open-sync-service.png) + ![Screenshot that shows the Synchronization Service shortcut menus, with More and Run as administrator selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/open-sync-service.png) -1. Check that **Full Synchronization** to the domain.onmicrosoft.com connector space has users displaying under the **Connectors with Flow Updates** tab. +1. Find the **Full Synchronization** to the domain.onmicrosoft.com connector space. Check that there are users under the **Connectors with Flow Updates** tab. - ![Screenshot that shows the Connectors with Flow Updates tab.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/connector-flow-update.png) + ![Screenshot of the Synchronization Service window. The Connectors with Flow Updates tab is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/connector-flow-update.png) 1. Verify there are no deletions pending in the export. Select the **Connectors** tab, and then highlight the domain.onmicrosoft.com connector space. Then select **Search Connector Space**. - ![Screenshot that shows the Search Connector Space action.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/search-connector-space.png) + ![Screenshot of the Synchronization Service window. The Search Connector Space action is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/search-connector-space.png) -1. In the **Search Connector Space** dialog, select the **Scope** dropdown and select **Pending Export**. +1. In the **Search Connector Space** dialog, under **Scope**, select **Pending Export**. - ![Screenshot that shows Pending Export.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/pending-export.png) + ![Screenshot of the Search Connector Space dialog. In the Scope list, Pending Export is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/pending-export.png) 1. Select **Delete** and then select **Search**. If all objects have matched properly, there should be zero matching records for **Deletes**. Record any objects pending deletion and their on-premises values. - ![Screenshot that shows deleted matching records.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/delete-matching-records.png) + ![Screenshot of the Search Connector Space dialog. In the search results, Text is highlighted that indicates that there were zero matching records.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/delete-matching-records.png) -1. Clear **Delete**, and select **Add** and **Modify**, followed by a search. You should see update functions for all users currently being synchronized to Azure AD via Okta. Add any new objects that Okta isn't currently syncing, but that exist in the organizational unit (OU) structure that was selected during the Azure AD Connect installation. +1. Clear **Delete**, and select **Add** and **Modify**. Then select **Search**. You should see update functions for all users currently being synchronized to Azure AD via Okta. Add any new objects that Okta isn't currently syncing, but that exist in the organizational unit (OU) structure that was selected during the Azure AD Connect installation. - ![Screenshot that shows adding a new object.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/add-new-object.png) + ![Screenshot of the Search Connector Space dialog. In the search results, seven records are visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/add-new-object.png) -1. Double-clicking on updates shows what Azure AD Connect will communicate with Azure AD. +1. To see what Azure AD Connect will communicate with Azure AD, double-click an update. 1. If there are any **add** functions for a user who already exists in Azure AD, their on-premises account doesn't match their cloud account. AD Connect has determined it will create a new object and record any new adds that are unexpected. Make sure to correct the ImmutableID value in Azure AD before you exit the staging mode. @@ -158,14 +163,14 @@ After you've prepared your list of source and destination targets, it's time to Verify that your updates still include all attributes expected in Azure AD. If multiple attributes are being deleted, you might need to manually populate these on-premises AD values before you remove the staging mode. - ![Screenshot that shows populating on-premises add values.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/on-premises-ad-values.png) + ![Screenshot of the Connector Space Object Properties window. The attributes for user John Smith are visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/on-premises-ad-values.png) >[!NOTE] - >Before you continue to the next step, ensure all user attributes are syncing properly and show on the **Pending Export** tab as expected. If they're deleted, make sure their ImmutableID values match and the user is in one of the selected OUs for synchronization. + >Before you continue to the next step, ensure all user attributes are syncing properly and appear on the **Pending Export** tab as expected. If they're deleted, make sure their ImmutableID values match and the user is in one of the selected OUs for synchronization. ## Install Azure AD cloud sync agents -After you've prepared your list of source and destination targets, it's time to [install and configure Azure AD cloud sync agents](../cloud-sync/tutorial-single-forest.md). If you've opted to use an Azure AD Connect server, skip this section. +After you've prepared your list of source and destination targets, install and configure Azure AD cloud sync agents by following the instructions in [Tutorial: Integrate a single forest with a single Azure AD tenant](../cloud-sync/tutorial-single-forest.md). If you've opted to use an Azure AD Connect server, skip this section. ## Disable Okta provisioning to Azure AD @@ -173,14 +178,14 @@ After you've verified the Azure AD Connect installation and your pending exports 1. Go to your Okta portal, select **Applications**, and then select your Okta app used to provision users to Azure AD. Open the **Provisioning** tab and select the **Integration** section. - ![Screenshot that shows the Integration section in Okta.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/integration-section.png) + ![Screenshot that shows the Integration section in the Okta portal.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/integration-section.png) -1. Select **Edit**, clear the **Enable API integration** option and select **Save**. +1. Select **Edit**, clear the **Enable API integration** option, and select **Save**. - ![Screenshot that shows editing the Enable API integration in Okta.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/edit-api-integration.png) + ![Screenshot that shows the Integration section in the Okta portal. A message on the page says provisioning is not enabled.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/edit-api-integration.png) >[!NOTE] - >If you have multiple Office 365 apps handling provisioning to Azure AD, ensure they're all switched off. + >If you have multiple Office 365 apps that handle provisioning to Azure AD, ensure they're all switched off. ## Disable staging mode in Azure AD Connect @@ -188,41 +193,43 @@ After you disable Okta provisioning, the Azure AD Connect server is ready to beg 1. Run the installation wizard from the desktop again and select **Configure**. - ![Screenshot that shows the Azure AD Connect server.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-connect-server.png) + ![Screenshot of the Azure A D Connect window. The welcome page is visible with a Configure button at the bottom.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-connect-server.png) 1. Select **Configure staging mode** and then select **Next**. Enter your global administrator credentials. - ![Screenshot that shows the Configure staging mode option.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/configure-staging-mode.png) + ![Screenshot of the Azure A D Connect window. On the left, Tasks is selected. On the Additional tasks page, Configure staging mode is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/configure-staging-mode.png) -1. Clear the **Enable staging mode** option and select **Next**. +1. Clear **Enable staging mode** and select **Next**. - ![Screenshot that shows clearing the Enable staging mode option.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/uncheck-enable-staging-mode.png) + ![Screenshot of the Azure A D Connect window. On the left, Staging Mode is selected. On the Configure staging mode page, nothing is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/uncheck-enable-staging-mode.png) 1. Select **Configure** to continue. - ![Screenshot that shows selecting the Configure button.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/ready-to-configure.png) + ![Screenshot of the Ready to configure page in Azure A D Connect. On the left, Configure is selected. A Configure button is also visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/ready-to-configure.png) -1. After the configuration completes, open the **Synchronization Service** as an administrator. View the **Export** on the domain.onmicrosoft.com connector. Verify that all additions, updates, and deletions are done as expected. +1. After the configuration finishes, open the **Synchronization Service** as an administrator. View the **Export** on the domain.onmicrosoft.com connector. Verify that all additions, updates, and deletions are done as expected. - ![Screenshot that shows verifying the sync service.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/verify-sync-service.png) + ![Screenshot of the Synchronization Service window. An export line is selected, and export statistics like the number of adds, updates, and deletes are visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/verify-sync-service.png) -You've now successfully migrated to Azure AD Connect server-based provisioning. Updates and expansions to the feature set of Azure AD Connect can be done by rerunning the installation wizard. +You've now successfully migrated to Azure AD Connect server-based provisioning. You can update and expand the feature set of Azure AD Connect by rerunning the installation wizard. ## Enable cloud sync agents -After you disable Okta provisioning, the Azure AD cloud sync agent is ready to begin synchronizing objects. Return to the [Azure AD portal](https://aad.portal.azure.com/). +After you disable Okta provisioning, the Azure AD cloud sync agent is ready to begin synchronizing objects. + +1. Go to the [Azure AD portal](https://aad.portal.azure.com/). -1. Modify the **Configuration** profile to **Enabled**. +1. In the **Configuration** profile, select **Enable**. 1. Return to the provisioning menu and select **Logs**. -1. Evaluate that the provisioning connector has properly updated in-place objects. The cloud sync agents are nondestructive. They'll fail their updates if a match didn't occur properly. +1. Check that the provisioning connector has properly updated in-place objects. The cloud sync agents are nondestructive. Their updates fail if a match isn't found. 1. If a user is mismatched, make the necessary updates to bind the ImmutableID values. Then restart the cloud provisioning sync. ## Next steps -For more information about migrating from Okta to Azure AD, see: +For more information about migrating from Okta to Azure AD, see these resources: - [Migrate applications from Okta to Azure AD](migrate-applications-from-okta-to-azure-active-directory.md) - [Migrate Okta federation to Azure AD managed authentication](migrate-okta-federation-to-azure-active-directory.md) diff --git a/articles/active-directory/manage-apps/migration-resources.md b/articles/active-directory/manage-apps/migration-resources.md index a7a0441ebc8cb..6cd96df187828 100644 --- a/articles/active-directory/manage-apps/migration-resources.md +++ b/articles/active-directory/manage-apps/migration-resources.md @@ -1,7 +1,6 @@ --- title: Resources for migrating apps to Azure Active Directory description: Resources to help you migrate application access and authentication to Azure Active Directory (Azure AD). -titleSuffix: Azure AD services: active-directory author: omondiatieno manager: CelesteDG diff --git a/articles/active-directory/manage-apps/myapps-overview.md b/articles/active-directory/manage-apps/myapps-overview.md index daec7973e0f55..9242d9210dcab 100644 --- a/articles/active-directory/manage-apps/myapps-overview.md +++ b/articles/active-directory/manage-apps/myapps-overview.md @@ -1,7 +1,6 @@ --- title: My Apps portal overview description: Learn about how to manage applications in the My Apps portal. -titleSuffix: Azure AD services: active-directory author: saipradeepb23 manager: CelesteDG diff --git a/articles/active-directory/manage-apps/one-click-sso-tutorial.md b/articles/active-directory/manage-apps/one-click-sso-tutorial.md index f3d059010028b..5f048d98fa477 100644 --- a/articles/active-directory/manage-apps/one-click-sso-tutorial.md +++ b/articles/active-directory/manage-apps/one-click-sso-tutorial.md @@ -1,7 +1,6 @@ --- title: One-click, single sign-on (SSO) configuration of your Azure Marketplace application description: Steps for one-click configuration of SSO for your application from the Azure Marketplace. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG diff --git a/articles/active-directory/manage-apps/overview-application-gallery.md b/articles/active-directory/manage-apps/overview-application-gallery.md index 686fbade09537..0e5b3c3cb5318 100644 --- a/articles/active-directory/manage-apps/overview-application-gallery.md +++ b/articles/active-directory/manage-apps/overview-application-gallery.md @@ -1,7 +1,6 @@ --- title: Overview of the Azure Active Directory application gallery description: An overview of using the Azure Active Directory application gallery. -titleSuffix: Azure AD services: active-directory author: eringreenlee manager: CelesteDG diff --git a/articles/active-directory/manage-apps/overview-assign-app-owners.md b/articles/active-directory/manage-apps/overview-assign-app-owners.md index 3fc082a5ead7e..91368dcc6d26d 100644 --- a/articles/active-directory/manage-apps/overview-assign-app-owners.md +++ b/articles/active-directory/manage-apps/overview-assign-app-owners.md @@ -1,6 +1,5 @@ --- title: Overview of enterprise application ownership -titleSuffix: Azure AD description: Learn about enterprise application ownership in Azure Active Directory services: active-directory author: saipradeepb23 diff --git a/articles/active-directory/manage-apps/plan-an-application-integration.md b/articles/active-directory/manage-apps/plan-an-application-integration.md index a23ad1a62ad17..35c4cfd58358c 100644 --- a/articles/active-directory/manage-apps/plan-an-application-integration.md +++ b/articles/active-directory/manage-apps/plan-an-application-integration.md @@ -2,7 +2,6 @@ title: Get started integrating Azure Active Directory with apps description: This article is a getting started guide for integrating Azure Active Directory (AD) with on-premises applications, and cloud applications. -titleSuffix: Azure AD services: active-directory author: eringreenlee manager: CelesteDG @@ -12,7 +11,6 @@ ms.topic: conceptual ms.workload: identity ms.date: 04/05/2021 ms.author: ergreenl -ms.reviewer: davidmu --- # Integrating Azure Active Directory with applications getting started guide diff --git a/articles/active-directory/manage-apps/plan-sso-deployment.md b/articles/active-directory/manage-apps/plan-sso-deployment.md index 81b677c5f8fa3..b47e2b34586bc 100644 --- a/articles/active-directory/manage-apps/plan-sso-deployment.md +++ b/articles/active-directory/manage-apps/plan-sso-deployment.md @@ -1,7 +1,6 @@ --- title: Plan a single sign-on deployment description: Plan the deployment of single sign-on in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG @@ -91,7 +90,7 @@ The following SSO protocols are available to use: - **OpenID Connect and OAuth** - Choose OpenID Connect and OAuth 2.0 if the application you're connecting to supports it. For more information, see [OAuth 2.0 and OpenID Connect protocols on the Microsoft identity platform](../develop/active-directory-v2-protocols.md). For steps to implement OpenID Connect SSO, see [Set up OIDC-based single sign-on for an application in Azure Active Directory](add-application-portal-setup-oidc-sso.md). -- **SAML** - Choose SAML whenever possible for existing applications that do not use OpenID Connect or OAuth. For more information, see [Single Sign-On SAML protocol](../develop/single-sign-on-saml-protocol.md). For a quick introduction to implementing SAML SSO, see [Quickstart: Set up SAML-based single sign-on for an application in Azure Active Directory](add-application-portal-setup-sso.md). +- **SAML** - Choose SAML whenever possible for existing applications that do not use OpenID Connect or OAuth. For more information, see [Single Sign-On SAML protocol](../develop/single-sign-on-saml-protocol.md). - **Password-based** - Choose password-based when the application has an HTML sign-in page. Password-based SSO is also known as password vaulting. Password-based SSO enables you to manage user access and passwords to web applications that don't support identity federation. It's also useful where several users need to share a single account, such as to your organization's social media app accounts. @@ -108,4 +107,5 @@ The following SSO protocols are available to use: - **Header-based** - Choose header-based single sign-on when the application uses headers for authentication. For more information, see [Header-based SSO](../app-proxy/application-proxy-configure-single-sign-on-with-headers.md). ## Next steps -- [Manage access to apps](what-is-access-management.md) + +- Consider completing the single sign-on training in [Enable single sign-on for applications by using Azure Active Directory](/learn/modules/enable-single-sign-on). diff --git a/articles/active-directory/manage-apps/prevent-domain-hints-with-home-realm-discovery.md b/articles/active-directory/manage-apps/prevent-domain-hints-with-home-realm-discovery.md index 8f8cf7a4ae3e6..4cb8bf6888421 100644 --- a/articles/active-directory/manage-apps/prevent-domain-hints-with-home-realm-discovery.md +++ b/articles/active-directory/manage-apps/prevent-domain-hints-with-home-realm-discovery.md @@ -1,6 +1,5 @@ --- title: Prevent sign-in auto-acceleration using Home Realm Discovery policy -titleSuffix: Azure AD description: Learn how to prevent domain_hint auto-acceleration to federated IDPs. services: active-directory author: nickludwig diff --git a/articles/active-directory/manage-apps/protect-against-consent-phishing.md b/articles/active-directory/manage-apps/protect-against-consent-phishing.md index f2b5e0b6b7390..a234c4d7ea14a 100644 --- a/articles/active-directory/manage-apps/protect-against-consent-phishing.md +++ b/articles/active-directory/manage-apps/protect-against-consent-phishing.md @@ -1,6 +1,5 @@ --- title: Protecting against consent phishing -titleSuffix: Azure AD description: Learn ways of mitigating against app-based consent phishing attacks using Azure AD. services: active-directory author: Chrispine-Chiedo diff --git a/articles/active-directory/manage-apps/review-admin-consent-requests.md b/articles/active-directory/manage-apps/review-admin-consent-requests.md index b6a1935f72d25..246f3aff6e29e 100644 --- a/articles/active-directory/manage-apps/review-admin-consent-requests.md +++ b/articles/active-directory/manage-apps/review-admin-consent-requests.md @@ -1,6 +1,5 @@ --- title: Review and take action on admin consent requests -titleSuffix: Azure AD description: Learn how to review and take action on admin consent requests that were created after you were designated as a reviewer. services: active-directory author: eringreenlee @@ -9,7 +8,7 @@ ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: how-to -ms.date: 03/22/2021 +ms.date: 05/27/2022 ms.author: ergreenl ms.reviewer: ergreenl @@ -18,7 +17,7 @@ ms.reviewer: ergreenl --- # Review admin consent requests -In this article, you learn how to review and take action on admin consent requests. To review and act on consent requests, you must be designated as a reviewer. As a reviewer, you only see admin consent requests that were created after you were designated as a reviewer. +In this article, you learn how to review and take action on admin consent requests. To review and act on consent requests, you must be designated as a reviewer. As a reviewer, you can view all admin consent requests but you can only act on those requests that were created after you were designated as a reviewer. ## Prerequisites @@ -36,12 +35,20 @@ To review the admin consent requests and take action: 1. In the filter search box, type and select **Azure Active Directory**. 1. From the navigation menu, select **Enterprise applications**. 1. Under **Activity**, select **Admin consent requests**. -1. Select the application that is being requested. -1. Review details about the request: +1. Select **My Pending** tab to view and act on the pending requests. +1. Select the application that is being requested from the list. +1. Review details about the request: + - To view the application details, select the **App details** tab. - To see who is requesting access and why, select the **Requested by** tab. - To see what permissions are being requested by the application, select **Review permissions and consent**. + :::image type="content" source="media/configure-admin-consent-workflow/review-consent-requests.png" alt-text="Screenshot of the admin consent requests in the portal."::: + 1. Evaluate the request and take the appropriate action: - **Approve the request**. To approve a request, grant admin consent to the application. Once a request is approved, all requestors are notified that they have been granted access. Approving a request allows all users in your tenant to access the application unless otherwise restricted with user assignment. - - **Deny the request**. To deny a request, you must provide a justification that will be provided to all requestors. Once a request is denied, all requestors are notified that they have been denied access to the application. Denying a request won't prevent users from requesting admin consent to the app again in the future. + - **Deny the request**. To deny a request, you must provide a justification that will be provided to all requestors. Once a request is denied, all requestors are notified that they have been denied access to the application. Denying a request won't prevent users from requesting admin consent to the application again in the future. - **Block the request**. To block a request, you must provide a justification that will be provided to all requestors. Once a request is blocked, all requestors are notified they've been denied access to the application. Blocking a request creates a service principal object for the application in your tenant in a disabled state. Users won't be able to request admin consent to the application in the future. + +## Next steps +- [Review permissions granted to apps](manage-application-permissions.md) +- [Grant tenant-wide admin consent](grant-admin-consent.md) diff --git a/articles/active-directory/manage-apps/secure-hybrid-access-integrations.md b/articles/active-directory/manage-apps/secure-hybrid-access-integrations.md index a8240cafc6050..df6de1bcdffaa 100644 --- a/articles/active-directory/manage-apps/secure-hybrid-access-integrations.md +++ b/articles/active-directory/manage-apps/secure-hybrid-access-integrations.md @@ -1,7 +1,6 @@ --- title: Secure hybrid access with Azure AD partner integration description: Help customers discover and migrate SaaS applications into Azure AD and connect apps that use legacy authentication methods with Azure AD. -titleSuffix: Azure AD services: active-directory author: gargi-sinha manager: martinco diff --git a/articles/active-directory/manage-apps/secure-hybrid-access.md b/articles/active-directory/manage-apps/secure-hybrid-access.md index 041d8cd216639..8056d9fd80368 100644 --- a/articles/active-directory/manage-apps/secure-hybrid-access.md +++ b/articles/active-directory/manage-apps/secure-hybrid-access.md @@ -1,7 +1,6 @@ --- title: Secure hybrid access description: This article describes partner solutions for integrating your legacy on-premises, public cloud, or private cloud applications with Azure AD. -titleSuffix: Azure AD services: active-directory author: gargi-sinha manager: martinco diff --git a/articles/active-directory/manage-apps/silverfort-azure-ad-integration.md b/articles/active-directory/manage-apps/silverfort-azure-ad-integration.md index 3dc9967a759e5..547a2a5c055f9 100644 --- a/articles/active-directory/manage-apps/silverfort-azure-ad-integration.md +++ b/articles/active-directory/manage-apps/silverfort-azure-ad-integration.md @@ -1,7 +1,6 @@ --- title: Secure hybrid access with Azure AD and Silverfort description: In this tutorial, learn how to integrate Silverfort with Azure AD for secure hybrid access -titleSuffix: Azure AD services: active-directory author: gargi-sinha manager: martinco @@ -14,11 +13,13 @@ ms.author: gasinh ms.collection: M365-identity-device-management --- -# Tutorial: Configure Silverfort with Azure Active Directory for secure hybrid access +# Tutorial: Configure Secure Hybrid Access with Azure Active Directory and Silverfort -In this tutorial, learn how to integrate Silverfort with Azure Active Directory (Azure AD). [Silverfort](https://www.silverfort.com/) uses innovative agent-less and proxy-less technology to connect all your assets on-premises and in the cloud to Azure AD. This solution enables organizations to apply identity protection, visibility, and user experience across all environments in Azure AD. It enables universal risk-based monitoring and assessment of authentication activity for on-premises and cloud environments, and proactively prevents threats. +[Silverfort](https://www.silverfort.com/) uses innovative agent-less and proxy-less technology to connect all your assets on-premises and in the cloud to Azure AD. This solution enables organizations to apply identity protection, visibility, and user experience across all environments in Azure AD. It enables universal risk-based monitoring and assessment of authentication activity for on-premises and cloud environments, and proactively prevents threats. -Silverfort can seamlessly connect any type of asset into Azure AD, as if it was a modern web application. For example: +In this tutorial, learn how to integrate your existing on premises Silverfort implementation with Azure Active Directory (Azure AD) for [hybrid access](../devices/concept-azure-ad-join-hybrid.md). + +Silverfort seamlessly connects assets with Azure AD. These **bridged** assets appear as regular applications in Azure AD and can be protected with Conditional Access, single-sign-on (SSO), multifactor authentication, auditing and more. Use Silverfort to connect assets including: - Legacy and homegrown applications @@ -30,17 +31,13 @@ Silverfort can seamlessly connect any type of asset into Azure AD, as if it was - Infrastructure and industrial systems -These **bridged** assets appear as regular applications in Azure AD and can be protected with Conditional Access, single-sign-on (SSO), multifactor authentication, auditing and more. - -This solution combines all corporate assets and third-party Identity and Access Management (IAM) platforms. For example, Active Directory, Active Directory Federation Services (ADFS), and Remote Authentication Dial-In User Service (RADIUS) on Azure AD, including hybrid and multi-cloud environments. +Silverfort integrates your corporate assets and third-party Identity and Access Management (IAM) platforms. This includes Active Directory, Active Directory Federation Services (ADFS), and Remote Authentication Dial-In User Service (RADIUS) on Azure AD, including hybrid and multi-cloud environments. -## Scenario description +Follow the steps in this tutorial to configure and test the Silverfort Azure AD bridge in your Azure AD tenant to communicate with your existing Silverfort implementation. Once configured, you can create Silverfort authentication policies that bridge authentication requests from various identity sources to Azure AD for SSO. After an application is bridged, it can be managed in Azure AD. -In this guide, you'll configure and test the Silverfort Azure AD bridge in your Azure AD tenant. +## Silverfort with Azure AD Authentication Architecture -Once configured, you can create Silverfort authentication policies that bridge authentication requests from various identity sources to Azure AD for SSO. Once an application is bridged, it can be managed in Azure AD. - -The following diagram shows the components included in the solution and sequence of authentication orchestrated by Silverfort. +The following diagram describes the authentication architecture orchestrated by Silverfort in a hybrid environment. ![image shows the architecture diagram](./media/silverfort-azure-ad-integration/silverfort-architecture-diagram.png) @@ -56,23 +53,21 @@ The following diagram shows the components included in the solution and sequence ## Prerequisites -To set up SSO for an application that you added to your Azure AD tenant, you'll need: +You must already have Silverfort deployed in your tenant or infrastructure in order to perform this tutorial. To deploy Silverfort in your tenant or infrastructure, [contact Silverfort](https://www.silverfort.com/). You will need to install Silverfort Desktop app on relevant workstations. + +This tutorial requires you to set up Silverfort Azure AD Adapter in your Azure AD tenant. You'll need: - An Azure account with an active subscription. You can create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). - One of the following roles in your Azure account - Global administrator, Cloud application administrator, Application administrator, or Owner of the service principal. -- An application that supports SSO and that was already pre-configured and added to the Azure AD gallery. The Silverfort application in the Azure AD gallery is already pre-configured. You'll need to add it as an Enterprise application from the gallery. - -## Onboard with Silverfort - -To deploy Silverfort in your tenant or infrastructure, [contact Silverfort](https://www.silverfort.com/). Install Silverfort Desktop app on relevant workstations. +- The Silverfort Azure AD Adapter application in the Azure AD gallery is pre-configured to support SSO. You'll need to add Silverfort Azure AD Adapter to your tenant as an Enterprise application from the gallery. ## Configure Silverfort and create a policy 1. From a browser, log in to the **Silverfort admin console**. -2. In the main menu, navigate to **Settings**, and then scroll to +2. In the main menu, navigate to **Settings** and then scroll to **Azure AD Bridge Connector** in the General section. Confirm your tenant ID, and then select **Authorize**. ![image shows azure ad bridge connector](./media/silverfort-azure-ad-integration/azure-ad-bridge-connector.png) @@ -91,9 +86,9 @@ To deploy Silverfort in your tenant or infrastructure, [contact Silverfort](http ![image shows enterprise application](./media/silverfort-azure-ad-integration/enterprise-application.png) -5. In the Silverfot admin console, navigate to the **Policies** page, and select **Create Policy**. +5. In the Silverfort admin console, navigate to the **Policies** page and select **Create Policy**. -6. The **New Policy** dialog will appear. Enter a **Policy Name**, that would indicate the application name that will be created in Azure. For example, if you're adding multiple servers or applications under this policy, name it to reflect the resources covered by the policy. In the example, we'll create a policy for the *SL-APP1* server. +6. The **New Policy** dialog will appear. Enter a **Policy Name** that would indicate the application name that will be created in Azure. For example, if you're adding multiple servers or applications under this policy, name it to reflect the resources covered by the policy. In the example, we'll create a policy for the *SL-APP1* server. ![image shows define policy](./media/silverfort-azure-ad-integration/define-policy.png) @@ -131,10 +126,12 @@ To deploy Silverfort in your tenant or infrastructure, [contact Silverfort](http ![image shows add policy](./media/silverfort-azure-ad-integration/add-policy.png) -14. Return to the Azure AD console, and navigate to **Enterprise applications**. The new Silverfort application should now appear. This application can now be included in [CA policies](../authentication/tutorial-enable-azure-mfa.md?bc=/azure/active-directory/conditional-access/breadcrumb/toc.json&toc=/azure/active-directory/conditional-access/toc.json%23create-a-conditional-access-policy). +14. Return to the Azure AD console, and navigate to **Enterprise applications**. The new Silverfort application should now appear. This application can now be included in [Conditional Access policies](../authentication/tutorial-enable-azure-mfa.md?bc=/azure/active-directory/conditional-access/breadcrumb/toc.json&toc=/azure/active-directory/conditional-access/toc.json%23create-a-conditional-access-policy). ## Next steps - [Silverfort Azure AD adapter](https://azuremarketplace.microsoft.com/marketplace/apps/aad.silverfortazureadadapter?tab=overview) - [Silverfort resources](https://www.silverfort.com/resources/) + +- [Contact Silverfort](https://www.silverfort.com/company/contact/) diff --git a/articles/active-directory/manage-apps/tenant-restrictions.md b/articles/active-directory/manage-apps/tenant-restrictions.md index e3746717cd6cc..99b0546777c58 100644 --- a/articles/active-directory/manage-apps/tenant-restrictions.md +++ b/articles/active-directory/manage-apps/tenant-restrictions.md @@ -1,7 +1,6 @@ --- title: Use tenant restrictions to manage access to SaaS apps description: How to use tenant restrictions to manage which users can access apps based on their Azure AD tenant. -titleSuffix: Azure AD author: vimrang manager: CelesteDG ms.service: active-directory diff --git a/articles/active-directory/manage-apps/toc.yml b/articles/active-directory/manage-apps/toc.yml index 6a1f8f96037b6..d65df4e42b564 100644 --- a/articles/active-directory/manage-apps/toc.yml +++ b/articles/active-directory/manage-apps/toc.yml @@ -14,8 +14,6 @@ href: add-application-portal.md - name: Assign a user href: add-application-portal-assign-users.md - - name: Enable single sign-on - href: add-application-portal-setup-sso.md - name: View applications href: view-applications-portal.md - name: Delete applications @@ -246,6 +244,8 @@ href: methods-for-removing-user-access.md - name: Resources items: + - name: Video learning + href: app-management-videos.md - name: Support and help options for developers href: ../develop/developer-support-help-options.md?context=%2fazure%2factive-directory%2fmanage-apps%2fcontext%2fmanage-apps-context - name: Azure feedback forum @@ -258,5 +258,3 @@ href: https://azure.microsoft.com/updates/?product=active-directory - name: Stack Overflow href: https://stackoverflow.com/questions/tagged/azure-active-directory - - name: Videos - href: https://azure.microsoft.com/documentation/videos/index/?services=active-directory diff --git a/articles/active-directory/manage-apps/troubleshoot-app-publishing.md b/articles/active-directory/manage-apps/troubleshoot-app-publishing.md index dfda9ea42def7..06e7f38162ab9 100644 --- a/articles/active-directory/manage-apps/troubleshoot-app-publishing.md +++ b/articles/active-directory/manage-apps/troubleshoot-app-publishing.md @@ -1,16 +1,15 @@ --- title: Your sign-in was blocked description: Troubleshoot a blocked sign-in to the Microsoft Application Network portal. -titleSuffix: Azure AD services: active-directory -author: davidmu1 +author: omondiatieno manager: CelesteDG ms.service: active-directory ms.subservice: app-mgmt ms.topic: troubleshooting ms.workload: identity ms.date: 1/18/2022 -ms.author: davidmu +ms.author: jomondi ms.reviewer: jeedes #Customer intent: As a publisher of an application, I want troubleshoot a blocked sign-in to the Microsoft Application Network portal. --- diff --git a/articles/active-directory/manage-apps/troubleshoot-password-based-sso.md b/articles/active-directory/manage-apps/troubleshoot-password-based-sso.md index cf63c9046f518..e869333651d26 100644 --- a/articles/active-directory/manage-apps/troubleshoot-password-based-sso.md +++ b/articles/active-directory/manage-apps/troubleshoot-password-based-sso.md @@ -1,7 +1,6 @@ --- title: Troubleshoot password-based single sign-on description: Troubleshoot issues with an Azure AD app that's configured for password-based single sign-on. -titleSuffix: Azure AD author: AllisonAm manager: CelesteDG ms.service: active-directory diff --git a/articles/active-directory/manage-apps/troubleshoot-saml-based-sso.md b/articles/active-directory/manage-apps/troubleshoot-saml-based-sso.md index fb7f73c417118..ca3248f9de045 100644 --- a/articles/active-directory/manage-apps/troubleshoot-saml-based-sso.md +++ b/articles/active-directory/manage-apps/troubleshoot-saml-based-sso.md @@ -1,7 +1,6 @@ --- title: Troubleshoot SAML-based single sign-on description: Troubleshoot issues with an Azure AD app that's configured for SAML-based single sign-on. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG diff --git a/articles/active-directory/manage-apps/tutorial-govern-monitor.md b/articles/active-directory/manage-apps/tutorial-govern-monitor.md index b760d1a4c28ee..df90d6a18f946 100644 --- a/articles/active-directory/manage-apps/tutorial-govern-monitor.md +++ b/articles/active-directory/manage-apps/tutorial-govern-monitor.md @@ -1,6 +1,5 @@ --- title: "Tutorial: Govern and monitor applications" -titleSuffix: Azure AD description: In this tutorial, you learn how to govern and monitor an application in Azure Active Directory. author: omondiatieno manager: CelesteDG diff --git a/articles/active-directory/manage-apps/tutorial-manage-access-security.md b/articles/active-directory/manage-apps/tutorial-manage-access-security.md index c2c9d4af917d9..40bf2054f082c 100644 --- a/articles/active-directory/manage-apps/tutorial-manage-access-security.md +++ b/articles/active-directory/manage-apps/tutorial-manage-access-security.md @@ -1,6 +1,5 @@ --- title: "Tutorial: Manage application access and security" -titleSuffix: Azure AD description: In this tutorial, you learn how to manage access to an application in Azure Active Directory and make sure it's secure. author: omondiatieno manager: CelesteDG diff --git a/articles/active-directory/manage-apps/tutorial-manage-certificates-for-federated-single-sign-on.md b/articles/active-directory/manage-apps/tutorial-manage-certificates-for-federated-single-sign-on.md index 1669c4bf876b6..3b2b6c5b6638f 100644 --- a/articles/active-directory/manage-apps/tutorial-manage-certificates-for-federated-single-sign-on.md +++ b/articles/active-directory/manage-apps/tutorial-manage-certificates-for-federated-single-sign-on.md @@ -1,7 +1,6 @@ --- title: "Tutorial: Manage federation certificates" description: In this tutorial, you'll learn how to customize the expiration date for your federation certificates, and how to renew certificates that will soon expire. -titleSuffix: Azure AD services: active-directory author: davidmu1 manager: CelesteDG @@ -9,7 +8,7 @@ ms.service: active-directory ms.subservice: app-mgmt ms.workload: identity ms.topic: tutorial -ms.date: 03/31/2022 +ms.date: 05/27/2022 ms.author: davidmu ms.reviewer: jeedes ms.collection: M365-identity-device-management @@ -19,13 +18,28 @@ ms.collection: M365-identity-device-management # Tutorial: Manage certificates for federated single sign-on -In this article, we cover common questions and information related to certificates that Azure Active Directory (Azure AD) creates to establish federated single sign-on (SSO) to your software as a service (SaaS) applications. Add applications from the Azure AD app gallery or by using a non-gallery application template. Configure the application by using the federated SSO option. +In this article, we cover common questions and information related to certificates that Azure Active Directory (Azure AD) creates to establish federated single sign-on (SSO) to your software as a service (SaaS) applications. Add applications from the Azure AD application gallery or by using a non-gallery application template. Configure the application by using the federated SSO option. This tutorial is relevant only to apps that are configured to use Azure AD SSO through [Security Assertion Markup Language](https://wikipedia.org/wiki/Security_Assertion_Markup_Language) (SAML) federation. +Using the information in this tutorial, an administrator of the application learns how to: + +> [!div class="checklist"] +> * Generate certificates for gallery and non-gallery applications +> * Customize the expiration dates for certificates +> * Add email notification address for certificate expiration dates +> * Renew certificates + +## Prerequisites + +- An Azure account with an active subscription. If you don't already have one, [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- One of the following roles: Global Administrator, Privileged Role Administrator, Cloud Application Administrator, or Application Administrator. +- An enterprise application that has been configured in your Azure AD tenant. + + ## Auto-generated certificate for gallery and non-gallery applications -When you add a new application from the gallery and configure a SAML-based sign-on (by selecting **Single sign-on** > **SAML** from the application overview page), Azure AD generates a certificate for the application that is valid for three years. To download the active certificate as a security certificate (**.cer**) file, return to that page (**SAML-based sign-on**) and select a download link in the **SAML Signing Certificate** heading. You can choose between the raw (binary) certificate or the Base64 (base 64-encoded text) certificate. For gallery applications, this section might also show a link to download the certificate as federation metadata XML (an **.xml** file), depending on the requirement of the application. +When you add a new application from the gallery and configure a SAML-based sign-on (by selecting **Single sign-on** > **SAML** from the application overview page), Azure AD generates a self-signed certificate for the application that is valid for three years. To download the active certificate as a security certificate (**.cer**) file, return to that page (**SAML-based sign-on**) and select a download link in the **SAML Signing Certificate** heading. You can choose between the raw (binary) certificate or the Base64 (base 64-encoded text) certificate. For gallery applications, this section might also show a link to download the certificate as federation metadata XML (an **.xml** file), depending on the requirement of the application. You can also download an active or inactive certificate by selecting the **SAML Signing Certificate** heading's **Edit** icon (a pencil), which displays the **SAML Signing Certificate** page. Select the ellipsis (**...**) next to the certificate you want to download, and then choose which certificate format you want. You have the additional option to download the certificate in privacy-enhanced mail (PEM) format. This format is identical to Base64 but with a **.pem** file name extension, which isn't recognized in Windows as a certificate format. @@ -75,7 +89,7 @@ Next, download the new certificate in the correct format, upload it to the appli 1. When you want to roll over to the new certificate, go back to the **SAML Signing Certificate** page, and in the newly saved certificate row, select the ellipsis (**...**) and select **Make certificate active**. The status of the new certificate changes to **Active**, and the previously active certificate changes to a status of **Inactive**. 1. Continue following the application's SAML sign-on configuration instructions that you displayed earlier, so that you can upload the SAML signing certificate in the correct encoding format. -If your application doesn't have any validation for the certificate's expiration, and the certificate matches in both Azure Active Directory and your application, your app is still accessible despite having an expired certificate. Ensure your application can validate the certificate's expiration date. +If your application doesn't have any validation for the certificate's expiration, and the certificate matches in both Azure Active Directory and your application, your application is still accessible despite having an expired certificate. Ensure your application can validate the certificate's expiration date. ## Add email notification addresses for certificate expiration @@ -101,15 +115,14 @@ If a certificate is about to expire, you can renew it using a procedure that res 1. In the newly saved certificate row, select the ellipsis (**...**) and then select **Make certificate active**. 1. Skip the next two steps. -1. If the app can only handle one certificate at a time, pick a downtime interval to perform the next step. (Otherwise, if the application doesn’t automatically pick up the new certificate but can handle more than one signing certificate, you can perform the next step anytime.) -1. Before the old certificate expires, follow the instructions in the [Upload and activate a certificate](#upload-and-activate-a-certificate) section earlier. If your application certificate isn't updated after a new certificate is updated in Azure Active Directory, authentication on your app may fail. +1. If the application can only handle one certificate at a time, pick a downtime interval to perform the next step. (Otherwise, if the application doesn’t automatically pick up the new certificate but can handle more than one signing certificate, you can perform the next step anytime.) +1. Before the old certificate expires, follow the instructions in the [Upload and activate a certificate](#upload-and-activate-a-certificate) section earlier. If your application certificate isn't updated after a new certificate is updated in Azure Active Directory, authentication on your application may fail. 1. Sign in to the application to make sure that the certificate works correctly. -If your application doesn't validate the certificate expiration configured in Azure Active Directory, and the certificate matches in both Azure Active Directory and your application, your app is still accessible despite having an expired certificate. Ensure your application can validate certificate expiration. +If your application doesn't validate the certificate expiration configured in Azure Active Directory, and the certificate matches in both Azure Active Directory and your application, your application is still accessible despite having an expired certificate. Ensure your application can validate certificate expiration. ## Related articles -- [Tutorials for integrating SaaS applications with Azure Active Directory](../saas-apps/tutorial-list.md) - [Application management with Azure Active Directory](what-is-application-management.md) - [Single sign-on to applications in Azure Active Directory](what-is-single-sign-on.md) - [Debug SAML-based single sign-on to applications in Azure Active Directory](./debug-saml-sso-issues.md) diff --git a/articles/active-directory/manage-apps/v2-howto-app-gallery-listing.md b/articles/active-directory/manage-apps/v2-howto-app-gallery-listing.md index 629e3a5042d64..3db4406735822 100644 --- a/articles/active-directory/manage-apps/v2-howto-app-gallery-listing.md +++ b/articles/active-directory/manage-apps/v2-howto-app-gallery-listing.md @@ -1,7 +1,6 @@ --- title: Publish your application description: Learn how to publish your application in the Azure Active Directory application gallery. -titleSuffix: Azure AD services: active-directory author: eringreenlee manager: CelesteDG diff --git a/articles/active-directory/manage-apps/view-applications-portal.md b/articles/active-directory/manage-apps/view-applications-portal.md index 8a2444317fe74..c4010616721a5 100644 --- a/articles/active-directory/manage-apps/view-applications-portal.md +++ b/articles/active-directory/manage-apps/view-applications-portal.md @@ -1,7 +1,6 @@ --- title: 'Quickstart: View enterprise applications' description: View the enterprise applications that are registered to use your Azure Active Directory tenant. -titleSuffix: Azure AD services: active-directory author: AllisonAm manager: CelesteDG diff --git a/articles/active-directory/manage-apps/ways-users-get-assigned-to-applications.md b/articles/active-directory/manage-apps/ways-users-get-assigned-to-applications.md index e27608712b976..020e54bdae6d3 100644 --- a/articles/active-directory/manage-apps/ways-users-get-assigned-to-applications.md +++ b/articles/active-directory/manage-apps/ways-users-get-assigned-to-applications.md @@ -1,7 +1,6 @@ --- title: Understand how users are assigned to apps description: Understand how users get assigned to an app that is using Azure Active Directory for identity management. -titleSuffix: Azure AD services: active-directory author: eringreenlee manager: CelesteDG @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: reference ms.date: 01/07/2021 ms.author: ergreenl -ms.reviewer: davidmu --- # Understand how users are assigned to apps diff --git a/articles/active-directory/manage-apps/what-is-access-management.md b/articles/active-directory/manage-apps/what-is-access-management.md index a9f62a71390a3..0d587bbd18087 100644 --- a/articles/active-directory/manage-apps/what-is-access-management.md +++ b/articles/active-directory/manage-apps/what-is-access-management.md @@ -1,6 +1,5 @@ --- title: Manage access to apps -titleSuffix: Azure AD description: Describes how Azure Active Directory enables organizations to specify the apps to which each user has access. services: active-directory author: eringreenlee @@ -11,7 +10,6 @@ ms.workload: identity ms.topic: conceptual ms.date: 09/23/2021 ms.author: ergreenl -ms.reviewer: davidmu --- # Manage access to an application diff --git a/articles/active-directory/manage-apps/what-is-application-management.md b/articles/active-directory/manage-apps/what-is-application-management.md index 1b81ea5ba6eac..6e4f111d6d3f8 100644 --- a/articles/active-directory/manage-apps/what-is-application-management.md +++ b/articles/active-directory/manage-apps/what-is-application-management.md @@ -1,7 +1,6 @@ --- title: What is application management? description: An overview of managing the lifecycle of an application in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: omondiatieno manager: CelesteDG @@ -44,7 +43,6 @@ If you develop your own business application, you can register it with Azure AD If you want to make your application available through the gallery, you can [submit a request to have it added](../manage-apps/v2-howto-app-gallery-listing.md). - ### On-premises applications If you want to continue using an on-premises application, but take advantage of what Azure AD offers, connect it with Azure AD using [Azure AD Application Proxy](../app-proxy/application-proxy.md). Application Proxy can be implemented when you want to publish on-premises applications externally. Remote users who need access to internal applications can then access them in a secure manner. @@ -69,13 +67,13 @@ As an administrator, you can [grant tenant-wide admin consent](grant-admin-conse ### Single sign-on -Consider implementing SSO in your application. You can manually configure most applications for SSO. The most popular options in Azure AD are [SAML-based SSO and OpenID Connect-based SSO](../develop/active-directory-v2-protocols.md). Before you start, make sure that you understand the requirements for SSO and how to [plan for deployment](plan-sso-deployment.md). For a simple example of how to configure SAML-based SSO for an enterprise application in your Azure AD tenant, see [Quickstart: Enable single sign-on for an enterprise application](add-application-portal-setup-sso.md). +Consider implementing SSO in your application. You can manually configure most applications for SSO. The most popular options in Azure AD are [SAML-based SSO and OpenID Connect-based SSO](../develop/active-directory-v2-protocols.md). Before you start, make sure that you understand the requirements for SSO and how to [plan for deployment](plan-sso-deployment.md). For training related to configuring SAML-based SSO for an enterprise application in your Azure AD tenant, see [Enable single sign-on for an application by using Azure Active Directory](/learn/modules/enable-single-sign-on). ### User, group, and owner assignment By default, all users can access your enterprise applications without being assigned to them. However, if you want to assign the application to a set of users, your application requires user assignment. For a simple example of how to create and assign a user account to an application, see [Quickstart: Create and assign a user account](add-application-portal-assign-users.md). -If included in your subscription, [assign groups to an application](assign-user-or-group-access-portal.md) so that you can delegate ongoing access management to the group owner. +If included in your subscription, [assign groups to an application](assign-user-or-group-access-portal.md) so that you can delegate ongoing access management to the group owner. [Assigning owners](assign-app-owners.md) is a simple way to grant the ability to manage all aspects of Azure AD configuration for an application. As an owner, a user can manage the organization-specific configuration of the application. diff --git a/articles/active-directory/manage-apps/what-is-single-sign-on.md b/articles/active-directory/manage-apps/what-is-single-sign-on.md index 3e8a31a3725bf..165ae9d20225b 100644 --- a/articles/active-directory/manage-apps/what-is-single-sign-on.md +++ b/articles/active-directory/manage-apps/what-is-single-sign-on.md @@ -1,7 +1,6 @@ --- title: What is single sign-on? description: Learn about single sign-on for enterprise applications in Azure Active Directory. -titleSuffix: Azure AD services: active-directory author: omondiatieno manager: CelesteDG @@ -68,4 +67,4 @@ If you're a user of an application, you likely don't care much about SSO details ## Next steps -- [Quickstart: Enable single sign on](add-application-portal-setup-sso.md) +- [Plan for single sign-on deployment](plan-sso-deployment.md) diff --git a/articles/active-directory/manage-apps/whats-new-docs.md b/articles/active-directory/manage-apps/whats-new-docs.md index 9b3464e0f81ca..7d33f37032cbf 100644 --- a/articles/active-directory/manage-apps/whats-new-docs.md +++ b/articles/active-directory/manage-apps/whats-new-docs.md @@ -15,6 +15,19 @@ manager: CelesteDG Welcome to what's new in Azure Active Directory application management documentation. This article lists new docs that have been added and those that have had significant updates in the last three months. To learn what's new with the application management service, see [What's new in Azure Active Directory](../fundamentals/whats-new.md). +## May 2022 + +### New articles + +- [My Apps portal overview](myapps-overview.md) + +### Updated articles + +- [Tutorial: Configure Datawiza with Azure Active Directory for secure hybrid access](datawiza-with-azure-ad.md) +- [Tutorial: Manage certificates for federated single sign-on](tutorial-manage-certificates-for-federated-single-sign-on.md) +- [Tutorial: Migrate Okta federation to Azure Active Directory-managed authentication](migrate-okta-federation-to-azure-active-directory.md) +- [Tutorial: Migrate Okta sync provisioning to Azure AD Connect-based synchronization](migrate-okta-sync-provisioning-to-azure-active-directory.md) + ## March 2022 ### New articles diff --git a/articles/active-directory/managed-identities-azure-resources/TOC.yml b/articles/active-directory/managed-identities-azure-resources/TOC.yml index 2866395dd710c..b664b589be2dd 100644 --- a/articles/active-directory/managed-identities-azure-resources/TOC.yml +++ b/articles/active-directory/managed-identities-azure-resources/TOC.yml @@ -76,6 +76,8 @@ href: qs-configure-rest-vm.md - name: Azure SDKs href: qs-configure-sdk-windows-vm.md + - name: Using Azure Policy + href: how-to-assign-managed-identity-via-azure-policy.md - name: Configure managed identities on virtual machine scale sets items: @@ -89,6 +91,8 @@ href: qs-configure-template-windows-vmss.md - name: REST href: qs-configure-rest-vmss.md + - name: Using Azure Policy + href: how-to-assign-managed-identity-via-azure-policy.md - name: Use managed identities on VMs items: @@ -105,7 +109,7 @@ - name: CLI href: howto-assign-access-cli.md - name: PowerShell - href: howto-assign-access-powershell.md + href: howto-assign-access-powershell.md - name: Manage user-assigned managed identities href: how-manage-user-assigned-managed-identities.md diff --git a/articles/active-directory/managed-identities-azure-resources/how-to-assign-app-role-managed-identity-powershell.md b/articles/active-directory/managed-identities-azure-resources/how-to-assign-app-role-managed-identity-powershell.md index f36eebdd4820a..3ce29deb5e82f 100644 --- a/articles/active-directory/managed-identities-azure-resources/how-to-assign-app-role-managed-identity-powershell.md +++ b/articles/active-directory/managed-identities-azure-resources/how-to-assign-app-role-managed-identity-powershell.md @@ -124,7 +124,7 @@ Connect-MgGraph -TenantId $tenantId -Scopes 'Application.Read.All','Application. # Look up the details about the server app's service principal and app role. $serverServicePrincipal = (Get-MgServicePrincipal -Filter "DisplayName eq '$serverApplicationName'") -$serverServicePrincipalObjectId = $serverServicePrincipal.ObjectId +$serverServicePrincipalObjectId = $serverServicePrincipal.Id $appRoleId = ($serverServicePrincipal.AppRoles | Where-Object {$_.Value -eq $appRoleName }).Id # Assign the managed identity access to the app role. diff --git a/articles/active-directory/managed-identities-azure-resources/how-to-assign-managed-identity-via-azure-policy.md b/articles/active-directory/managed-identities-azure-resources/how-to-assign-managed-identity-via-azure-policy.md new file mode 100644 index 0000000000000..b652ddf3cf110 --- /dev/null +++ b/articles/active-directory/managed-identities-azure-resources/how-to-assign-managed-identity-via-azure-policy.md @@ -0,0 +1,109 @@ +--- +title: Use Azure Policy to assign managed identities (preview) +description: Documentation for the Azure Policy that can be used to assign managed identities to Azure resources. +services: active-directory +author: karavar +manager: skwan +editor: barclayn +ms.service: active-directory +ms.subservice: msi +ms.topic: how-to +ms.workload: identity +ms.date: 05/23/2022 +ms.author: vakarand +ms.collection: M365-identity-device-management +--- + +# [Preview] Use Azure Policy to assign managed identities + + +[Azure Policy](../../governance/policy/overview.md) helps enforce organizational standards and assess compliance at scale. Through its compliance dashboard, Azure policy provides an aggregated view that helps administrators evaluate the overall state of the environment. You have the ability to drill down to the per-resource, per-policy granularity. It also helps bring your resources to compliance through bulk remediation for existing resources and automatic remediation for new resources. Common use cases for Azure Policy include implementing governance for: + +- Resource consistency +- Regulatory compliance +- Security +- Cost +- Management + + +Policy definitions for these common use cases are already available in your Azure environment to help you get started. + +Azure Monitoring Agents require a [managed identity](overview.md) on the monitored Azure Virtual Machines (VMs). This document describes the behavior of a built-in Azure Policy provided by Microsoft that helps ensure a managed identity, needed for these scenarios, is assigned to VMs at scale. + +While using system-assigned managed identity is possible, when used at scale (for example, for all VMs in a subscription) it results in substantial number of identities created (and deleted) in Azure AD (Azure Active Directory). To avoid this churn of identities, it is recommended to use user-assigned managed identities, which can be created once and shared across multiple VMs. + +> [!NOTE] +> We recommend using a user-assigned managed identity per Azure subscription per Azure region. + +The policy is designed to implement this recommendation. + +## Policy definition and details + +- [Policy for Virtual Machines](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fd367bd60-64ca-4364-98ea-276775bddd94) +- [Policy for Virtual Machine Scale Sets](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F516187d4-ef64-4a1b-ad6b-a7348502976c) + + + +When executed, the policy takes the following actions: + +1. Create, if not exist, a new built-in user-assigned managed identity in the subscription and each Azure region based on the VMs that are in scope of the policy. +2. Once created, put a lock on the user-assigned managed identity so that it will not be accidentally deleted. +3. Assign the built-in user-assigned managed identity to Virtual Machines from the subscription and region based on the VMs that are in scope of the policy. +> [!NOTE] +> If the Virtual Machine has exactly 1 user-assigned managed identity already assigned, then the policy skips this VM to assign the built-in identity. This is to make sure assignment of the policy does not break applications that take a dependency on [the default behavior of the token endpoint on IMDS.](managed-identities-faq.md#what-identity-will-imds-default-to-if-dont-specify-the-identity-in-the-request) + + +There are two scenarios to use the policy: + +- Let the policy create and use a “built-in” user-assigned managed identity. +- Bring your own user-assigned managed identity. + +The policy takes the following input parameters: + +- Bring-Your-Own-UAMI? - Should the policy create, if not exist, a new user-assigned managed identity? +- If set to true, then you must specify: + - Name of the managed identity + - Resource group in which the managed identity should be created. +- If set to false, then no additional input is needed. + - The policy will create the required user-assigned managed identity called “built-in-identity” in a resource group called “built-in-identity-rg". + +## Using the policy +### Creating the policy assignment + +The policy definition can be assigned to different scopes in Azure – at the management group subscription or a specific resource group. As policies need to be enforced all the time, the assignment operation is performed using a managed identity associated with the policy-assignment object. The policy assignment object supports both system-assigned and user-assigned managed identity. +For example, Joe can create a user-assigned managed identity called PolicyAssignmentMI. The built-in policy creates a user-assigned managed identity in each subscription and in each region with resources that are in scope of the policy assignment. The user-assigned managed identities created by the policy has the following resourceId format: + +> /subscriptions/your-subscription-id/resourceGroups/built-in-identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/built-in-identity-{location} + +For example: +> /subscriptions/aaaabbbb-aaaa-bbbb-1111-111122223333/resourceGroups/built-in-identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/built-in-identity-eastus + +### Required authorization + +For PolicyAssignmentMI managed identity to be able to assign the built-in policy across the specified scope, it needs the following permissions, expressed as an Azure RBAC (Azure role-based access control) Role Assignment: + +| Principal| Role / Action | Scope | Purpose | +|----|----|----------------|----| +|PolicyAssigmentMI |Managed Identity Operator | /subscription/subscription-id/resourceGroups/built-in-identity
            OR
            Bring-your-own-User-assinged-Managed identity |Required to assign the built-in identity to VMs.| +|PolicyAssigmentMI |Contributor | /subscription/subscription-id> |Required to create the resource-group that holds the built-in managed identity in the subscription. | +|PolicyAssigmentMI |Managed Identity Contributor | /subscription/subscription-id/resourceGroups/built-in-identity |Required to create a new user-assigned managed identity.| +|PolicyAssigmentMI |User Access Administrator | /subscription/subscription-id/resourceGroups/built-in-identity
            OR
            Bring-your-own-User-assigned-Managed identity |Required to set a lock on the user-assigned managed identity created by the policy.| + + +As the policy assignment object must have this permission ahead of time, PolicyAssignmentMI cannot be a system-assigned managed identity for this scenario. The user performing the policy assignment task must pre-authorize PolicyAssignmentMI ahead of time with the above role assignments. + +As you can see the resultant least privilege role required is “contributor” at the subscription scope. + + + +## Known issues + +Possible race condition with another deployment that changes the identities assigned to a VM can result in unexpected results. + +If there are two or more parallel deployments updating the same virtual machine and they all change the identity configuration of the virtual machine, then it is possible, under specific race conditions, that all expected identities will NOT be assigned to the machines. +For example, if the policy in this document is updating the managed identities of a VM and at the same time another process is also making changes to the managed identities section, then it is not guaranteed that all the expected identities are properly assigned to the VM. + + +## Next steps + +- [Deploy Azure Monitoring Agent](../../azure-monitor/overview.md) \ No newline at end of file diff --git a/articles/active-directory/managed-identities-azure-resources/how-to-use-vm-sign-in.md b/articles/active-directory/managed-identities-azure-resources/how-to-use-vm-sign-in.md index 440f98bfe8e88..2fb485dda1296 100644 --- a/articles/active-directory/managed-identities-azure-resources/how-to-use-vm-sign-in.md +++ b/articles/active-directory/managed-identities-azure-resources/how-to-use-vm-sign-in.md @@ -14,7 +14,7 @@ ms.workload: identity ms.date: 01/11/2022 ms.author: barclayn ms.collection: M365-identity-device-management -ms.custom: devx-track-azurepowershell, devx-track-azurecli +ms.tool: azure-cli, azure-powershell ms.devlang: azurecli --- diff --git a/articles/active-directory/managed-identities-azure-resources/managed-identities-faq.md b/articles/active-directory/managed-identities-azure-resources/managed-identities-faq.md index e2278607857d5..db70c56372745 100644 --- a/articles/active-directory/managed-identities-azure-resources/managed-identities-faq.md +++ b/articles/active-directory/managed-identities-azure-resources/managed-identities-faq.md @@ -148,6 +148,9 @@ Moving a user-assigned managed identity to a different resource group isn't supp Managed identity tokens are cached by the underlying Azure infrastructure for performance and resiliency purposes: the back-end services for managed identities maintain a cache per resource URI for around 24 hours. It can take several hours for changes to a managed identity's permissions to take effect, for example. Today, it is not possible to force a managed identity's token to be refreshed before its expiry. For more information, see [Limitation of using managed identities for authorization](managed-identity-best-practice-recommendations.md#limitation-of-using-managed-identities-for-authorization). +### What happens to tokens after a managed identity is deleted? +When a managed identity is deleted, an Azure resource that was previously associated with that identity can no longer request new tokens for that identity. Tokens that were issued before the identity was deleted will still be valid until their original expiry. Some target endpoints' authorization systems may carry out additional checks in the directory for the identity, in which case the request will fail as the object can't be found. However some systems, like Azure RBAC, will continue to accept requests from that token until it expires. + ## Next steps - Learn [how managed identities work with virtual machines](how-managed-identities-work-vm.md) diff --git a/articles/active-directory/managed-identities-azure-resources/overview.md b/articles/active-directory/managed-identities-azure-resources/overview.md index 707752c8ef745..d0fab633d86d1 100644 --- a/articles/active-directory/managed-identities-azure-resources/overview.md +++ b/articles/active-directory/managed-identities-azure-resources/overview.md @@ -21,9 +21,9 @@ ms.collection: M365-identity-device-management # What are managed identities for Azure resources? -A common challenge for developers is the management of secrets and credentials used to secure communication between different components making up a solution. Managed identities eliminate the need for developers to manage credentials. +A common challenge for developers is the management of secrets, credentials, certificates, keys etc used to secure communication between services. Managed identities eliminate the need for developers to manage these credentials. -Managed identities provide an identity for applications to use when connecting to resources that support Azure Active Directory (Azure AD) authentication. Applications may use the managed identity to obtain Azure AD tokens. With [Azure Key Vault](../../key-vault/general/overview.md), developers can use managed identities to access resources. Key Vault stores credentials in a secure manner and gives access to storage accounts. +While developers can securely store the secrets in [Azure Key Vault](../../key-vault/general/overview.md), services need a way to access Azure Key Vault. Managed identities provide an automatically managed identity in Azure Active Directory for applications to use when connecting to resources that support Azure Active Directory (Azure AD) authentication. Applications can use managed identities to obtain Azure AD tokens without having manage any credentials. The following video shows how you can use managed identities:
            @@ -64,7 +64,12 @@ The following table shows the differences between the two types of managed ident ## How can I use managed identities for Azure resources? -[![This flowchart shows examples of how a developer may use managed identities to get access to resources from their code without managing authentication information.](media/overview/when-use-managed-identities.png)](media/overview/when-use-managed-identities.png#lightbox) +For using Managed identities, you have should do the following: +1. Create a managed identity in Azure. You can choose between system-assigned managed identity or user-assigned managed identity. +2. In case of user-assigned managed identity, assign the managed identity to the "source" Azure Resource, such as an Azure Logic App or an Azure Web App. +3. Authorize the managed identity to have accees to the "target" service. +4. Use the managed identity to perform access. For this, you can use the Azure SDK with the Azure.Identity library. Some "source" resources offer connectors that know how to use Managed identities for the connections. In that case you simply use the ideantity as a feature of that "source" resource. + ## What Azure services support the feature? diff --git a/articles/active-directory/managed-identities-azure-resources/tutorial-linux-vm-access-nonaad.md b/articles/active-directory/managed-identities-azure-resources/tutorial-linux-vm-access-nonaad.md index 3e9dd37d105f6..337524b6a6d46 100644 --- a/articles/active-directory/managed-identities-azure-resources/tutorial-linux-vm-access-nonaad.md +++ b/articles/active-directory/managed-identities-azure-resources/tutorial-linux-vm-access-nonaad.md @@ -1,5 +1,5 @@ --- -title: Tutorial`:` Use a managed identity to access Azure Key Vault - Linux - Azure AD +title: "Tutorial: Use a managed identity to access Azure Key Vault - Linux" description: A tutorial that walks you through the process of using a Linux VM system-assigned managed identity to access Azure Resource Manager. services: active-directory documentationcenter: '' diff --git a/articles/active-directory/managed-identities-azure-resources/tutorial-vm-managed-identities-cosmos.md b/articles/active-directory/managed-identities-azure-resources/tutorial-vm-managed-identities-cosmos.md index 3c576aacc6886..658e1afeeccc5 100644 --- a/articles/active-directory/managed-identities-azure-resources/tutorial-vm-managed-identities-cosmos.md +++ b/articles/active-directory/managed-identities-azure-resources/tutorial-vm-managed-identities-cosmos.md @@ -1,5 +1,5 @@ --- -title: Use managed identities from a virtual machine to access Cosmos DB | Microsoft Docs +title: Use managed identities from a virtual machine to access Cosmos DB description: Learn how to use managed identities with Windows VMs using the Azure portal, CLI, PowerShell, Azure Resource Manager template author: barclayn manager: karenhoran @@ -9,7 +9,8 @@ ms.workload: integration ms.topic: tutorial ms.date: 01/11/2022 ms.author: barclayn -ms.custom: ep-miar, devx-track-azurecli +ms.custom: ep-miar +ms.tool: azure-cli, azure-powershell ms.devlang: azurecli #Customer intent: As an administrator, I want to know how to access Cosmos DB from a virtual machine using a managed identity --- @@ -181,9 +182,9 @@ Depending on your API version, you have to take [different steps](qs-configure-t ```json "variables": { - "identityName": "my-user-assigned" - - }, + "identityName": "my-user-assigned" + + }, ``` Under the resources element, add the following entry to assign a user-assigned managed identity to your VM. Be sure to replace `````` with the name of the user-assigned managed identity you created. diff --git a/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md b/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md index d6cb49d9eedef..811c578a9426f 100644 --- a/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md +++ b/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md @@ -1,5 +1,5 @@ --- -title: Configure security alerts for Azure resource roles in Privileged Identity Management - Azure Active Directory | Microsoft Docs +title: Configure security alerts for Azure roles in Privileged Identity Management - Azure Active Directory | Microsoft Docs description: Learn how to configure security alerts for Azure resource roles in Azure AD Privileged Identity Management (PIM). services: active-directory documentationcenter: '' @@ -10,14 +10,14 @@ ms.topic: how-to ms.tgt_pltfrm: na ms.workload: identity ms.subservice: pim -ms.date: 10/07/2021 +ms.date: 06/03/2022 ms.author: curtand ms.reviewer: shaunliu ms.custom: pim ms.collection: M365-identity-device-management --- -# Configure security alerts for Azure resource roles in Privileged Identity Management +# Configure security alerts for Azure roles in Privileged Identity Management Privileged Identity Management (PIM) generates alerts when there is suspicious or unsafe activity in your Azure Active Directory (Azure AD) organization. When an alert is triggered, it shows up on the Alerts page. @@ -31,11 +31,15 @@ Select an alert to see a report that lists the users or roles that triggered the ## Alerts -| Alert | Severity | Trigger | Recommendation | -| --- | --- | --- | --- | -| **Too many owners assigned to a resource** |Medium |Too many users have the owner role. |Review the users in the list and reassign some to less privileged roles. | -| **Too many permanent owners assigned to a resource** |Medium |Too many users are permanently assigned to a role. |Review the users in the list and re-assign some to require activation for role use. | -| **Duplicate role created** |Medium |Multiple roles have the same criteria. |Use only one of these roles. | +Alert | Severity | Trigger | Recommendation +--- | --- | --- | --- +**Too many owners assigned to a resource** | Medium | Too many users have the owner role. | Review the users in the list and reassign some to less privileged roles. +**Too many permanent owners assigned to a resource** | Medium | Too many users are permanently assigned to a role. | Review the users in the list and re-assign some to require activation for role use. +**Duplicate role created** | Medium | Multiple roles have the same criteria. | Use only one of these roles. +**Roles are being assigned outside of Privileged Identity Management (Preview)** | High | A role is managed directly through the Azure IAM resource blade or the Azure Resource Manager API | Review the users in the list and remove them from privileged roles assigned outside of Privilege Identity Management. + +> [!NOTE] +> During the public preview of the **Roles are being assigned outside of Privileged Identity Management (Preview)** alert, Microsoft supports only permissions that are assigned at the subscription level. ### Severity diff --git a/articles/active-directory/reports-monitoring/concept-usage-insights-report.md b/articles/active-directory/reports-monitoring/concept-usage-insights-report.md index ff7b2d23cc465..18f4613655c17 100644 --- a/articles/active-directory/reports-monitoring/concept-usage-insights-report.md +++ b/articles/active-directory/reports-monitoring/concept-usage-insights-report.md @@ -13,9 +13,9 @@ ms.topic: conceptual ms.tgt_pltfrm: na ms.workload: identity ms.subservice: report-monitor -ms.date: 05/13/2019 +ms.date: 05/27/2022 ms.author: markvi -ms.reviewer: dhanyahk +ms.reviewer: besiler --- # Usage and insights report in the Azure Active Directory portal @@ -45,7 +45,7 @@ To access the data from the usage and insights report, you need: ## Use the report -The usage and insights report shows the list of applications with one or more sign-in attempts, and allows you to sort by the number of successful sign-ins, failed sign-ins, and the success rate. +The usage and insights report shows the list of applications with one or more sign-in attempts, and allows you to sort by the number of successful sign-ins, failed sign-ins, and the success rate. The sign-in graph per application only counts interactive user sign-ins. Clicking **Load more** at the bottom of the list allows you to view additional applications on the page. You can select the date range to view all applications that have been used within the range. diff --git a/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md b/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md index 9f1a68243576b..b8ae5a2299fd3 100644 --- a/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md +++ b/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md @@ -32,9 +32,9 @@ In this article, you learn how to analyze the Azure AD activity logs in your Log To follow along, you need: -* A Log Analytics workspace in your Azure subscription. Learn how to [create a Log Analytics workspace](../../azure-monitor/logs/quick-create-workspace.md). +* A [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md) in your Azure subscription. Learn how to [create a Log Analytics workspace](../../azure-monitor/logs/quick-create-workspace.md). * First, complete the steps to [route the Azure AD activity logs to your Log Analytics workspace](howto-integrate-activity-logs-with-log-analytics.md). -* [Access](../../azure-monitor/logs/manage-access.md#manage-access-using-workspace-permissions) to the log analytics workspace +* [Access](../../azure-monitor/logs/manage-access.md#azure-rbac) to the log analytics workspace * The following roles in Azure Active Directory (if you are accessing Log Analytics through Azure Active Directory portal) - Security Admin - Security Reader diff --git a/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md b/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md index 3f5af0fc8742c..77996e1393ead 100644 --- a/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md +++ b/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md @@ -63,7 +63,7 @@ To use Monitor workbooks, you need: - A [Log Analytics workspace](../../azure-monitor/logs/quick-create-workspace.md). -- [Access](../../azure-monitor/logs/manage-access.md#manage-access-using-workspace-permissions) to the log analytics workspace +- [Access](../../azure-monitor/logs/manage-access.md#azure-rbac) to the log analytics workspace - Following roles in Azure Active Directory (if you are accessing Log Analytics through Azure Active Directory portal) - Security administrator - Security reader @@ -72,7 +72,7 @@ To use Monitor workbooks, you need: ## Roles -To access workbooks in Azure Active Directory, you must have access to the underlying [Log Analytics](../../azure-monitor/logs/manage-access.md#manage-access-using-azure-permissions) workspace and be assigned to one of the following roles: +To access workbooks in Azure Active Directory, you must have access to the underlying [Log Analytics workspace](../../azure-monitor/logs/manage-access.md#azure-rbac) and be assigned to one of the following roles: - Global Reader diff --git a/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md b/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md index ccf4ede20ef65..1f9ca0537d77e 100644 --- a/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md +++ b/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md @@ -94,7 +94,7 @@ This attribute describes the type of cross-tenant access used by the actor to ac - `b2bDirectConnect` - A cross tenant sign-in performed by a B2B. - `microsoftSupport`- A cross tenant sign-in performed by a Microsoft support agent in a Microsoft customer tenant. - `serviceProvider` - A cross-tenant sign-in performed by a Cloud Service Provider (CSP) or similar admin on behalf of that CSP's customer in a tenant -- `unknownFutureValue` - A sentinel value used by MS Graph to help clients handle changes in enum lists. For more information, see [Best practices for working with Microsoft Graph](https://docs.microsoft.com/graph/best-practices-concept). +- `unknownFutureValue` - A sentinel value used by MS Graph to help clients handle changes in enum lists. For more information, see [Best practices for working with Microsoft Graph](/graph/best-practices-concept). If the sign-in did not the pass the boundaries of a tenant, the value is `none`. @@ -135,4 +135,4 @@ This value shows whether continuous access evaluation (CAE) was applied to the s ## Next steps * [Sign-in logs in Azure Active Directory](concept-sign-ins.md) -* [What is the sign-in diagnostic in Azure AD?](overview-sign-in-diagnostics.md) +* [What is the sign-in diagnostic in Azure AD?](overview-sign-in-diagnostics.md) \ No newline at end of file diff --git a/articles/active-directory/roles/TOC.yml b/articles/active-directory/roles/TOC.yml index 0e6f67b70db35..37d428b5216ae 100644 --- a/articles/active-directory/roles/TOC.yml +++ b/articles/active-directory/roles/TOC.yml @@ -58,7 +58,7 @@ href: groups-pim-eligible.md - name: Assign roles with scope using PowerShell href: custom-assign-powershell.md - - name: Assign roles using Graph API + - name: Assign roles using Microsoft Graph href: custom-assign-graph.md - name: Remove role assignments items: diff --git a/articles/active-directory/roles/admin-units-assign-roles.md b/articles/active-directory/roles/admin-units-assign-roles.md index 7a2847ac8a325..3684d5836b8be 100644 --- a/articles/active-directory/roles/admin-units-assign-roles.md +++ b/articles/active-directory/roles/admin-units-assign-roles.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.topic: how-to ms.subservice: roles ms.workload: identity -ms.date: 05/09/2022 +ms.date: 06/01/2022 ms.author: rolyon ms.reviewer: anandy ms.custom: oldportal;it-pro; @@ -102,6 +102,8 @@ $roleAssignment = New-AzureADMSRoleAssignment -DirectoryScopeId $directoryScope ### Microsoft Graph API +Use the [Add a scopedRoleMember](/graph/api/administrativeunit-post-scopedrolemembers) API to assign a role with administrative unit scope. + Request ```http @@ -144,6 +146,8 @@ Get-AzureADMSScopedRoleMembership -Id $adminUnit.Id | fl * ### Microsoft Graph API +Use the [List scopedRoleMembers](/graph/api/administrativeunit-list-scopedrolemembers) API to list role assignments with administrative unit scope. + Request ```http diff --git a/articles/active-directory/roles/admin-units-members-list.md b/articles/active-directory/roles/admin-units-members-list.md index d2f40f733afdd..75ea9bcae1cc1 100644 --- a/articles/active-directory/roles/admin-units-members-list.md +++ b/articles/active-directory/roles/admin-units-members-list.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.topic: how-to ms.subservice: roles ms.workload: identity -ms.date: 03/22/2022 +ms.date: 06/01/2022 ms.author: rolyon ms.reviewer: anandy ms.custom: oldportal;it-pro; @@ -24,7 +24,6 @@ In Azure Active Directory (Azure AD), you can list the users, groups, or devices - Azure AD Premium P1 or P2 license for each administrative unit administrator - Azure AD Free licenses for administrative unit members -- Privileged Role Administrator or Global Administrator - AzureAD module when using PowerShell - AzureADPreview module when using PowerShell for devices - Admin consent when using Graph explorer for Microsoft Graph API diff --git a/articles/active-directory/roles/custom-group-permissions.md b/articles/active-directory/roles/custom-group-permissions.md index 7aaf55b7e5c0a..79a350927b137 100644 --- a/articles/active-directory/roles/custom-group-permissions.md +++ b/articles/active-directory/roles/custom-group-permissions.md @@ -26,8 +26,9 @@ Group management permissions can be used in custom role definitions in Azure Act This article lists the permissions you can use in your custom roles for different group management scenarios. For information about how to create custom roles, see [Create and assign a custom role](custom-create.md). -> [!NOTE] -> Assigning custom roles at a group scope using the Azure portal is currently available **only** for Azure AD Premium P1. +## License requirements + +[!INCLUDE [License requirement for using custom roles in Azure AD](../../../includes/active-directory-p1-license.md)] ## How to interpret group management permissions diff --git a/articles/active-directory/roles/groups-create-eligible.md b/articles/active-directory/roles/groups-create-eligible.md index 6bf2b3a04d0bf..665883dcad46f 100644 --- a/articles/active-directory/roles/groups-create-eligible.md +++ b/articles/active-directory/roles/groups-create-eligible.md @@ -102,7 +102,7 @@ Add-AzureADGroupMember -ObjectId $roleAssignablegroup.Id -RefObjectId $member.Ob ### Create a role-assignable group in Azure AD ```http -POST https://graph.microsoft.com/beta/groups +POST https://graph.microsoft.com/v1.0/groups { "description": "This group is assigned to Helpdesk Administrator built-in role of Azure AD.", "displayName": "Contoso_Helpdesk_Administrators", diff --git a/articles/active-directory/roles/manage-roles-portal.md b/articles/active-directory/roles/manage-roles-portal.md index 6c14ab435ed52..de4cf2176fdbb 100644 --- a/articles/active-directory/roles/manage-roles-portal.md +++ b/articles/active-directory/roles/manage-roles-portal.md @@ -158,16 +158,11 @@ If PIM is enabled, you have additional capabilities, such as making a user eligi ## Microsoft Graph API -Follow these instructions to assign a role using the Microsoft Graph API in [Graph Explorer](https://aka.ms/ge). +Follow these instructions to assign a role using the Microsoft Graph API. ### Assign a role -In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned the Billing Administrator role (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) at tenant scope. If you want to see the list of immutable role template IDs of all built-in roles, see [Azure AD built-in roles](permissions-reference.md). - -1. Sign in to the [Graph Explorer](https://aka.ms/ge). -2. Select **POST** as the HTTP method from the dropdown. -3. Select the API version to **v1.0**. -4. Use the [Create unifiedRoleAssignment](/graph/api/rbacapplication-post-roleassignments) API to assign roles. Add following details to the URL and Request Body and select **Run query**. +In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned the Billing Administrator role (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) at tenant scope. To see the list of immutable role template IDs of all built-in roles, see [Azure AD built-in roles](permissions-reference.md). ```http POST https://graph.microsoft.com/v1.0/roleManagement/directory/roleAssignments @@ -183,19 +178,16 @@ Content-type: application/json ### Assign a role using PIM -In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned a time-bound eligible role assignment to Billing Administrator (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) for 180 days. +#### Assign a time-bound eligible role assignment -1. Sign in to the [Graph Explorer](https://aka.ms/ge). -2. Select **POST** as the HTTP method from the dropdown. -3. Select the API version to **beta**. -4. Use the [Create unifiedRoleEligibilityScheduleRequest](/graph/api/unifiedroleeligibilityschedulerequest-post-unifiedroleeligibilityschedulerequests) API to assign roles using PIM. Add following details to the URL and Request Body and select **Run query**. +In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned a time-bound eligible role assignment to Billing Administrator (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) for 180 days. ```http -POST https://graph.microsoft.com/beta/rolemanagement/directory/roleEligibilityScheduleRequests +POST https://graph.microsoft.com/v1.0/rolemanagement/directory/roleEligibilityScheduleRequests Content-type: application/json { - "action": "AdminAssign", + "action": "adminAssign", "justification": "for managing admin tasks", "directoryScopeId": "/", "principalId": "f8ca5a85-489a-49a0-b555-0a6d81e56f0d", @@ -203,21 +195,23 @@ Content-type: application/json "scheduleInfo": { "startDateTime": "2021-07-15T19:15:08.941Z", "expiration": { - "type": "AfterDuration", + "type": "afterDuration", "duration": "PT180D" } } } ``` +#### Assign a permanent eligible role assignment + In the following example, a security principal is assigned a permanent eligible role assignment to Billing Administrator. ```http -POST https://graph.microsoft.com/beta/rolemanagement/directory/roleEligibilityScheduleRequests +POST https://graph.microsoft.com/v1.0/rolemanagement/directory/roleEligibilityScheduleRequests Content-type: application/json { - "action": "AdminAssign", + "action": "adminAssign", "justification": "for managing admin tasks", "directoryScopeId": "/", "principalId": "f8ca5a85-489a-49a0-b555-0a6d81e56f0d", @@ -225,20 +219,22 @@ Content-type: application/json "scheduleInfo": { "startDateTime": "2021-07-15T19:15:08.941Z", "expiration": { - "type": "NoExpiration" + "type": "noExpiration" } } } ``` -To activate the role assignment, use the [Create unifiedRoleAssignmentScheduleRequest](/graph/api/unifiedroleassignmentschedulerequest-post-unifiedroleassignmentschedulerequests) API. +#### Activate a role assignment + +To activate the role assignment, use the [Create roleAssignmentScheduleRequests](/graph/api/rbacapplication-post-roleeligibilityschedulerequests) API. ```http -POST https://graph.microsoft.com/beta/roleManagement/directory/roleAssignmentScheduleRequests +POST https://graph.microsoft.com/v1.0/roleManagement/directory/roleAssignmentScheduleRequests Content-type: application/json { - "action": "SelfActivate", + "action": "selfActivate", "justification": "activating role assignment for admin privileges", "roleDefinitionId": "b0f54661-2d74-4c50-afa3-1ec803f12efe", "directoryScopeId": "/", @@ -246,6 +242,8 @@ Content-type: application/json } ``` +For more information about managing Azure AD roles through the PIM API in Microsoft Graph, see [Overview of role management through the privileged identity management (PIM) API](/graph/api/resources/privilegedidentitymanagementv3-overview). + ## Next steps - [List Azure AD role assignments](view-assignments.md) diff --git a/articles/active-directory/roles/permissions-reference.md b/articles/active-directory/roles/permissions-reference.md index 69315e8301b41..1f26c67294c4a 100644 --- a/articles/active-directory/roles/permissions-reference.md +++ b/articles/active-directory/roles/permissions-reference.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.workload: identity ms.subservice: roles ms.topic: reference -ms.date: 04/03/2022 +ms.date: 05/20/2022 ms.author: rolyon ms.reviewer: abhijeetsinha ms.custom: generated, it-pro, fasttrack-edit @@ -550,11 +550,11 @@ Users in this role can enable, disable, and delete devices in Azure AD and read ## Compliance Administrator -Users with this role have permissions to manage compliance-related features in the Microsoft 365 compliance center, Microsoft 365 admin center, Azure, and Office 365 Security & Compliance Center. Assignees can also manage all features within the Exchange admin center and Teams & Skype for Business admin centers and create support tickets for Azure and Microsoft 365. More information is available at [About Microsoft 365 admin roles](https://support.office.com/article/About-Office-365-admin-roles-da585eea-f576-4f55-a1e0-87090b6aaa9d). +Users with this role have permissions to manage compliance-related features in the Microsoft Purview compliance portal, Microsoft 365 admin center, Azure, and Office 365 Security & Compliance Center. Assignees can also manage all features within the Exchange admin center and Teams & Skype for Business admin centers and create support tickets for Azure and Microsoft 365. More information is available at [About Microsoft 365 admin roles](https://support.office.com/article/About-Office-365-admin-roles-da585eea-f576-4f55-a1e0-87090b6aaa9d). In | Can do ----- | ---------- -[Microsoft 365 compliance center](https://protection.office.com) | Protect and manage your organization's data across Microsoft 365 services
            Manage compliance alerts +[Microsoft Purview compliance portal](https://protection.office.com) | Protect and manage your organization's data across Microsoft 365 services
            Manage compliance alerts [Compliance Manager](/office365/securitycompliance/meet-data-protection-and-regulatory-reqs-using-microsoft-cloud) | Track, assign, and verify your organization's regulatory compliance activities [Office 365 Security & Compliance Center](https://support.office.com/article/About-Office-365-admin-roles-da585eea-f576-4f55-a1e0-87090b6aaa9d) | Manage data governance
            Perform legal and data investigation
            Manage Data Subject Request

            This role has the same permissions as the [Compliance Administrator RoleGroup](/microsoft-365/security/office-365-security/permissions-in-the-security-and-compliance-center#permissions-needed-to-use-features-in-the-security--compliance-center) in Office 365 Security & Compliance Center role-based access control. [Intune](/intune/role-based-access-control) | View all Intune audit data @@ -573,11 +573,11 @@ In | Can do ## Compliance Data Administrator -Users with this role have permissions to track data in the Microsoft 365 compliance center, Microsoft 365 admin center, and Azure. Users can also track compliance data within the Exchange admin center, Compliance Manager, and Teams & Skype for Business admin center and create support tickets for Azure and Microsoft 365. [This documentation](/microsoft-365/security/office-365-security/permissions-in-the-security-and-compliance-center#permissions-needed-to-use-features-in-the-security--compliance-center) has details on differences between Compliance Administrator and Compliance Data Administrator. +Users with this role have permissions to track data in the Microsoft Purview compliance portal, Microsoft 365 admin center, and Azure. Users can also track compliance data within the Exchange admin center, Compliance Manager, and Teams & Skype for Business admin center and create support tickets for Azure and Microsoft 365. [This documentation](/microsoft-365/security/office-365-security/permissions-in-the-security-and-compliance-center#permissions-needed-to-use-features-in-the-security--compliance-center) has details on differences between Compliance Administrator and Compliance Data Administrator. In | Can do ----- | ---------- -[Microsoft 365 compliance center](https://protection.office.com) | Monitor compliance-related policies across Microsoft 365 services
            Manage compliance alerts +[Microsoft Purview compliance portal](https://protection.office.com) | Monitor compliance-related policies across Microsoft 365 services
            Manage compliance alerts [Compliance Manager](/office365/securitycompliance/meet-data-protection-and-regulatory-reqs-using-microsoft-cloud) | Track, assign, and verify your organization's regulatory compliance activities [Office 365 Security & Compliance Center](https://support.office.com/article/About-Office-365-admin-roles-da585eea-f576-4f55-a1e0-87090b6aaa9d) | Manage data governance
            Perform legal and data investigation
            Manage Data Subject Request

            This role has the same permissions as the [Compliance Data Administrator RoleGroup](/microsoft-365/security/office-365-security/permissions-in-the-security-and-compliance-center#permissions-needed-to-use-features-in-the-security--compliance-center) in Office 365 Security & Compliance Center role-based access control. [Intune](/intune/role-based-access-control) | View all Intune audit data @@ -782,7 +782,6 @@ Users in this role can read and update basic information of users, groups, and s > | microsoft.directory/servicePrincipals/synchronizationCredentials/manage | Manage application provisioning secrets and credentials | > | microsoft.directory/servicePrincipals/synchronizationJobs/manage | Start, restart, and pause application provisioning syncronization jobs | > | microsoft.directory/servicePrincipals/synchronizationSchema/manage | Create and manage application provisioning syncronization jobs and schema | -> | microsoft.directory/servicePrincipals/managePermissionGrantsForGroup.microsoft-all-application-permissions | Grant a service principal direct access to a group's data | > | microsoft.directory/servicePrincipals/appRoleAssignedTo/update | Update service principal role assignments | > | microsoft.directory/users/assignLicense | Manage user licenses | > | microsoft.directory/users/create | Add users | @@ -904,7 +903,7 @@ This administrator manages federation between Azure AD organizations and externa ## Global Administrator -Users with this role have access to all administrative features in Azure Active Directory, as well as services that use Azure Active Directory identities like the Microsoft 365 Defender portal, the Microsoft 365 compliance center, Exchange Online, SharePoint Online, and Skype for Business Online. Furthermore, Global Administrators can [elevate their access](../../role-based-access-control/elevate-access-global-admin.md) to manage all Azure subscriptions and management groups. This allows Global Administrators to get full access to all Azure resources using the respective Azure AD Tenant. The person who signs up for the Azure AD organization becomes a Global Administrator. There can be more than one Global Administrator at your company. Global Administrators can reset the password for any user and all other administrators. +Users with this role have access to all administrative features in Azure Active Directory, as well as services that use Azure Active Directory identities like the Microsoft 365 Defender portal, the Microsoft Purview compliance portal, Exchange Online, SharePoint Online, and Skype for Business Online. Furthermore, Global Administrators can [elevate their access](../../role-based-access-control/elevate-access-global-admin.md) to manage all Azure subscriptions and management groups. This allows Global Administrators to get full access to all Azure resources using the respective Azure AD Tenant. The person who signs up for the Azure AD organization becomes a Global Administrator. There can be more than one Global Administrator at your company. Global Administrators can reset the password for any user and all other administrators. > [!NOTE] > As a best practice, Microsoft recommends that you assign the Global Administrator role to fewer than five people in your organization. For more information, see [Best practices for Azure AD roles](best-practices.md). @@ -963,21 +962,7 @@ Users with this role have access to all administrative features in Azure Active > | microsoft.directory/passwordHashSync/allProperties/allTasks | Manage all aspects of Password Hash Synchronization (PHS) in Azure AD | > | microsoft.directory/policies/allProperties/allTasks | Create and delete policies, and read and update all properties | > | microsoft.directory/conditionalAccessPolicies/allProperties/allTasks | Manage all properties of conditional access policies | -> | microsoft.directory/crossTenantAccessPolicy/standard/read | Read basic properties of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/allowedCloudEndpoints/update | Update allowed cloud endpoints of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/basic/update | Update basic settings of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/standard/read | Read basic properties of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/b2bCollaboration/update | Update Azure AD B2B collaboration settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/b2bDirectConnect/update | Update Azure AD B2B direct connect settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/crossCloudMeetings/update | Update cross-cloud Teams meeting settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/tenantRestrictions/update | Update tenant restrictions of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/partners/create | Create cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/delete | Delete cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/standard/read | Read basic properties of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/b2bCollaboration/update | Update Azure AD B2B collaboration settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/b2bDirectConnect/update | Update Azure AD B2B direct connect settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/crossCloudMeetings/update | Update cross-cloud Teams meeting settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/tenantRestrictions/update | Update tenant restrictions of cross-tenant access policy for partners | +> | microsoft.directory/crossTenantAccessPolicies/allProperties/allTasks | Manage all aspects of cross-tenant access policies | > | microsoft.directory/privilegedIdentityManagement/allProperties/read | Read all resources in Privileged Identity Management | > | microsoft.directory/provisioningLogs/allProperties/read | Read all properties of provisioning logs | > | microsoft.directory/roleAssignments/allProperties/allTasks | Create and delete role assignments, and read and update all role assignment properties | @@ -989,7 +974,6 @@ Users with this role have access to all administrative features in Azure Active > | microsoft.directory/serviceAction/getAvailableExtentionProperties | Can perform the getAvailableExtentionProperties service action | > | microsoft.directory/servicePrincipals/allProperties/allTasks | Create and delete service principals, and read and update all properties | > | microsoft.directory/servicePrincipals/managePermissionGrantsForAll.microsoft-company-admin | Grant consent for any permission to any application | -> | microsoft.directory/servicePrincipals/managePermissionGrantsForGroup.microsoft-all-application-permissions | Grant a service principal direct access to a group's data | > | microsoft.directory/servicePrincipals/synchronization/standard/read | Read provisioning settings associated with your service principal | > | microsoft.directory/signInReports/allProperties/read | Read all properties on sign-in reports, including privileged properties | > | microsoft.directory/subscribedSkus/allProperties/allTasks | Buy and manage subscriptions and delete subscriptions | @@ -1011,6 +995,7 @@ Users with this role have access to all administrative features in Azure Active > | microsoft.directory/verifiableCredentials/configuration/delete | Delete configuration required to create and manage verifiable credentials and delete all of its verifiable credentials | > | microsoft.directory/verifiableCredentials/configuration/allProperties/read | Read configuration required to create and manage verifiable credentials | > | microsoft.directory/verifiableCredentials/configuration/allProperties/update | Update configuration required to create and manage verifiable credentials | +> | microsoft.directory/lifecycleManagement/workflows/allProperties/allTasks | Manage all aspects of lifecycle management workflows and tasks in Azure AD | > | microsoft.azure.advancedThreatProtection/allEntities/allTasks | Manage all aspects of Azure Advanced Threat Protection | > | microsoft.azure.informationProtection/allEntities/allTasks | Manage all aspects of Azure Information Protection | > | microsoft.azure.serviceHealth/allEntities/allTasks | Read and configure Azure Service Health | @@ -1066,6 +1051,7 @@ Users in this role can read settings and administrative information across Micro >- [Privileged Access Management (PAM)](/office365/securitycompliance/privileged-access-management-overview) doesn't support the Global Reader role. >- [Azure Information Protection](/azure/information-protection/what-is-information-protection) - Global Reader is supported [for central reporting](/azure/information-protection/reports-aip) only, and when your Azure AD organization isn't on the [unified labeling platform](/azure/information-protection/faqs#how-can-i-determine-if-my-tenant-is-on-the-unified-labeling-platform). > - [SharePoint](https://admin.microsoft.com/sharepoint) - Global Reader currently can't access SharePoint using PowerShell. +> - [Power Platform admin center](https://admin.powerplatform.microsoft.com) - Global Reader is not yet supported in the Power Platform admin center. > > These features are currently in development. > @@ -1104,9 +1090,7 @@ Users in this role can read settings and administrative information across Micro > | microsoft.directory/permissionGrantPolicies/standard/read | Read standard properties of permission grant policies | > | microsoft.directory/policies/allProperties/read | Read all properties of policies | > | microsoft.directory/conditionalAccessPolicies/allProperties/read | Read all properties of conditional access policies | -> | microsoft.directory/crossTenantAccessPolicy/standard/read | Read basic properties of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/standard/read | Read basic properties of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/partners/standard/read | Read basic properties of cross-tenant access policy for partners | +> | microsoft.directory/crossTenantAccessPolicies/allProperties/read | Read all properties of cross-tenant access policies | > | microsoft.directory/deviceManagementPolicies/standard/read | Read standard properties on device management application policies | > | microsoft.directory/deviceRegistrationPolicy/standard/read | Read standard properties on device registration policies | > | microsoft.directory/privilegedIdentityManagement/allProperties/read | Read all resources in Privileged Identity Management | @@ -1124,6 +1108,7 @@ Users in this role can read settings and administrative information across Micro > | microsoft.directory/verifiableCredentials/configuration/contracts/cards/allProperties/read | Read a verifiable credential card | > | microsoft.directory/verifiableCredentials/configuration/contracts/allProperties/read | Read a verifiable credential contract | > | microsoft.directory/verifiableCredentials/configuration/allProperties/read | Read configuration required to create and manage verifiable credentials | +> | microsoft.directory/lifecycleManagement/workflows/allProperties/read | Read all properties of lifecycle management workflows and tasks in Azure AD | > | microsoft.cloudPC/allEntities/allProperties/read | Read all aspects of Windows 365 | > | microsoft.commerce.billing/allEntities/read | Read all resources of Office 365 billing | > | microsoft.edge/allEntities/allProperties/read | Read all aspects of Microsoft Edge | @@ -1165,7 +1150,6 @@ Users in this role can create/manage groups and its settings like naming and exp > | microsoft.directory/groups/owners/update | Update owners of Security groups and Microsoft 365 groups, excluding role-assignable groups | > | microsoft.directory/groups/settings/update | Update settings of groups | > | microsoft.directory/groups/visibility/update | Update the visibility property of Security groups and Microsoft 365 groups, excluding role-assignable groups | -> | microsoft.directory/servicePrincipals/managePermissionGrantsForGroup.microsoft-all-application-permissions | Grant a service principal direct access to a group's data | > | microsoft.azure.serviceHealth/allEntities/allTasks | Read and configure Azure Service Health | > | microsoft.azure.supportTickets/allEntities/allTasks | Create and manage Azure support tickets | > | microsoft.office365.serviceHealth/allEntities/allTasks | Read and configure Service Health in the Microsoft 365 admin center | @@ -1818,26 +1802,17 @@ Azure Advanced Threat Protection | Monitor and respond to suspicious security ac > | microsoft.directory/auditLogs/allProperties/read | Read all properties on audit logs, including privileged properties | > | microsoft.directory/authorizationPolicy/standard/read | Read standard properties of authorization policy | > | microsoft.directory/bitlockerKeys/key/read | Read bitlocker metadata and key on devices | -> | microsoft.directory/crossTenantAccessPolicy/standard/read | Read basic properties of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/allowedCloudEndpoints/update | Update allowed cloud endpoints of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/basic/update | Update basic settings of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/standard/read | Read basic properties of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/b2bCollaboration/update | Update Azure AD B2B collaboration settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/b2bDirectConnect/update | Update Azure AD B2B direct connect settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/crossCloudMeetings/update | Update cross-cloud Teams meeting settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/tenantRestrictions/update | Update tenant restrictions of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/partners/create | Create cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/delete | Delete cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/standard/read | Read basic properties of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/b2bCollaboration/update | Update Azure AD B2B collaboration settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/b2bDirectConnect/update | Update Azure AD B2B direct connect settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/crossCloudMeetings/update | Update cross-cloud Teams meeting settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/tenantRestrictions/update | Update tenant restrictions of cross-tenant access policy for partners | +> | microsoft.directory/crossTenantAccessPolicies/create | Create cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/delete | Delete cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/standard/read | Read basic properties of cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/owners/read | Read owners of cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/policyAppliedTo/read | Read the policyAppliedTo property of cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/basic/update | Update basic properties of cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/owners/update | Update owners of cross-tenant access policies | +> | microsoft.directory/crossTenantAccessPolicies/tenantDefault/update | Update the default tenant for cross-tenant access policies | > | microsoft.directory/entitlementManagement/allProperties/read | Read all properties in Azure AD entitlement management | -> | microsoft.directory/hybridAuthenticationPolicy/allProperties/allTasks | Manage hybrid authentication policy in Azure AD | > | microsoft.directory/identityProtection/allProperties/read | Read all resources in Azure AD Identity Protection | > | microsoft.directory/identityProtection/allProperties/update | Update all resources in Azure AD Identity Protection | -> | microsoft.directory/passwordHashSync/allProperties/allTasks | Manage all aspects of Password Hash Synchronization (PHS) in Azure AD | > | microsoft.directory/policies/create | Create policies in Azure AD | > | microsoft.directory/policies/delete | Delete policies in Azure AD | > | microsoft.directory/policies/basic/update | Update basic properties on policies | @@ -2017,7 +1992,6 @@ Users in this role can manage all aspects of the Microsoft Teams workload via th > | microsoft.directory/groups.unified/basic/update | Update basic properties on Microsoft 365 groups, excluding role-assignable groups | > | microsoft.directory/groups.unified/members/update | Update members of Microsoft 365 groups, excluding role-assignable groups | > | microsoft.directory/groups.unified/owners/update | Update owners of Microsoft 365 groups, excluding role-assignable groups | -> | microsoft.directory/servicePrincipals/managePermissionGrantsForGroup.microsoft-all-application-permissions | Grant a service principal direct access to a group's data | > | microsoft.azure.serviceHealth/allEntities/allTasks | Read and configure Azure Service Health | > | microsoft.azure.supportTickets/allEntities/allTasks | Create and manage Azure support tickets | > | microsoft.office365.network/performance/allProperties/read | Read all network performance properties in the Microsoft 365 admin center | @@ -2027,21 +2001,6 @@ Users in this role can manage all aspects of the Microsoft Teams workload via th > | microsoft.office365.usageReports/allEntities/allProperties/read | Read Office 365 usage reports | > | microsoft.office365.webPortal/allEntities/standard/read | Read basic properties on all resources in the Microsoft 365 admin center | > | microsoft.teams/allEntities/allProperties/allTasks | Manage all resources in Teams | -> | microsoft.directory/crossTenantAccessPolicy/standard/read | Read basic properties of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/allowedCloudEndpoints/update | Update allowed cloud endpoints of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/basic/update | Update basic settings of cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/standard/read | Read basic properties of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/b2bCollaboration/update | Update Azure AD B2B collaboration settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/b2bDirectConnect/update | Update Azure AD B2B direct connect settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/crossCloudMeetings/update | Update cross-cloud Teams meeting settings of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/default/tenantRestrictions/update | Update tenant restrictions of the default cross-tenant access policy | -> | microsoft.directory/crossTenantAccessPolicy/partners/create | Create cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/delete | Delete cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/standard/read | Read basic properties of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/b2bCollaboration/update | Update Azure AD B2B collaboration settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/b2bDirectConnect/update | Update Azure AD B2B direct connect settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/crossCloudMeetings/update | Update cross-cloud Teams meeting settings of cross-tenant access policy for partners | -> | microsoft.directory/crossTenantAccessPolicy/partners/tenantRestrictions/update | Update tenant restrictions of cross-tenant access policy for partners | ## Teams Communications Administrator diff --git a/articles/active-directory/roles/security-planning.md b/articles/active-directory/roles/security-planning.md index 82e5b4ebba397..83992b5189cb7 100644 --- a/articles/active-directory/roles/security-planning.md +++ b/articles/active-directory/roles/security-planning.md @@ -252,7 +252,7 @@ Attackers might try to target privileged accounts so that they can disrupt the i * Impersonation attacks * Credential theft attacks such as keystroke logging, Pass-the-Hash, and Pass-The-Ticket -By deploying privileged access workstations, you can reduce the risk that administrators enter their credentials in a desktop environment that hasn't been hardened. For more information, see [Privileged Access Workstations](https://4sysops.com/archives/understand-the-microsoft-privileged-access-workstation-paw-security-model/). +By deploying privileged access workstations, you can reduce the risk that administrators enter their credentials in a desktop environment that hasn't been hardened. For more information, see [Privileged Access Workstations](/security/compass/overview). #### Review National Institute of Standards and Technology recommendations for handling incidents diff --git a/articles/active-directory/saas-apps/agile-provisioning-tutorial.md b/articles/active-directory/saas-apps/agile-provisioning-tutorial.md new file mode 100644 index 0000000000000..d1d7a922d68b2 --- /dev/null +++ b/articles/active-directory/saas-apps/agile-provisioning-tutorial.md @@ -0,0 +1,144 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Agile Provisioning' +description: Learn how to configure single sign-on between Azure Active Directory and Agile Provisioning. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/23/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Agile Provisioning + +In this tutorial, you'll learn how to integrate Agile Provisioning with Azure Active Directory (Azure AD). When you integrate Agile Provisioning with Azure AD, you can: + +* Control in Azure AD who has access to Agile Provisioning. +* Enable your users to be automatically signed-in to Agile Provisioning with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Agile Provisioning single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Agile Provisioning supports **SP** and **IDP** initiated SSO. + +## Add Agile Provisioning from the gallery + +To configure the integration of Agile Provisioning into Azure AD, you need to add Agile Provisioning from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Agile Provisioning** in the search box. +1. Select **Agile Provisioning** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Agile Provisioning + +Configure and test Azure AD SSO with Agile Provisioning using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Agile Provisioning. + +To configure and test Azure AD SSO with Agile Provisioning, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Agile Provisioning SSO](#configure-agile-provisioning-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Agile Provisioning test user](#create-agile-provisioning-test-user)** - to have a counterpart of B.Simon in Agile Provisioning that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Agile Provisioning** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** text box, type a value using the following pattern: + `` + + b. In the **Reply URL** text box, type a URL using the following pattern: + `https:///web-portal/saml/SSO` + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type a URL using the following pattern: + `https:///web-portal/` + + > [!NOTE] + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign-on URL. Contact [Agile Provisioning Client support team](mailto:support@flexcomlabs.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Agile Provisioning. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Agile Provisioning**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Agile Provisioning SSO + +To configure single sign-on on **Agile Provisioning** side, you need to send the **App Federation Metadata Url** to [Agile Provisioning support team](mailto:support@flexcomlabs.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create Agile Provisioning test user + +In this section, you create a user called Britta Simon in Agile Provisioning. Work with [Agile Provisioning support team](mailto:support@flexcomlabs.com) to add the users in the Agile Provisioning platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to Agile Provisioning Sign on URL where you can initiate the login flow. + +* Go to Agile Provisioning Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the Agile Provisioning for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the Agile Provisioning tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Agile Provisioning for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure Agile Provisioning you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/airwatch-tutorial.md b/articles/active-directory/saas-apps/airwatch-tutorial.md index b4bc8010aee6f..770e057d0f8e5 100644 --- a/articles/active-directory/saas-apps/airwatch-tutorial.md +++ b/articles/active-directory/saas-apps/airwatch-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with AirWatch | Microsoft Docs' +title: 'Tutorial: Azure Active Directory integration with AirWatch' description: Learn how to configure single sign-on between Azure Active Directory and AirWatch. services: active-directory author: jeevansd @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 01/20/2021 +ms.date: 06/08/2022 ms.author: jeedes --- @@ -28,6 +28,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * AirWatch single sign-on (SSO)-enabled subscription. +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -70,14 +73,22 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Basic SAML Configuration** page, enter the values for the following fields: - 1. In the **Sign on URL** text box, type a URL using the following pattern: - `https://.awmdm.com/AirWatch/Login?gid=companycode` - - 1. In the **Identifier (Entity ID)** text box, type the value as: + a. In the **Identifier (Entity ID)** text box, type the value as: `AirWatch` + b. In the **Reply URL** text box, type a URL using one of the following patterns: + + | Reply URL| + |-----------| + | `https://.awmdm.com/` | + | `https://.airwatchportals.com/` | + | + + c. In the **Sign on URL** text box, type a URL using the following pattern: + `https://.awmdm.com/AirWatch/Login?gid=companycode` + > [!NOTE] - > This value is not the real. Update this value with the actual Sign-on URL. Contact [AirWatch Client support team](https://www.vmware.com/in/support/acquisitions/airwatch.html) to get this value. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not the real. Update these values with the actual Reply URL and Sign-on URL. Contact [AirWatch Client support team](https://www.vmware.com/in/support/acquisitions/airwatch.html) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 1. AirWatch application expects the SAML assertions in a specific format. Configure the following claims for this application. You can manage the values of these attributes from the **User Attributes** section on application integration page. On the **Set up Single Sign-On with SAML** page, click **Edit** button to open **User Attributes** dialog. diff --git a/articles/active-directory/saas-apps/asccontracts-tutorial.md b/articles/active-directory/saas-apps/asccontracts-tutorial.md index 1bfb1f200386c..5546a34042d38 100644 --- a/articles/active-directory/saas-apps/asccontracts-tutorial.md +++ b/articles/active-directory/saas-apps/asccontracts-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with ASC Contracts | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with ASC Contracts' description: Learn how to configure single sign-on between Azure Active Directory and ASC Contracts. services: active-directory author: jeevansd @@ -9,91 +9,67 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 01/17/2019 +ms.date: 06/07/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with ASC Contracts +# Tutorial: Azure AD SSO integration with ASC Contracts -In this tutorial, you learn how to integrate ASC Contracts with Azure Active Directory (Azure AD). -Integrating ASC Contracts with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate ASC Contracts with Azure Active Directory (Azure AD). When you integrate ASC Contracts with Azure AD, you can: -* You can control in Azure AD who has access to ASC Contracts. -* You can enable your users to be automatically signed-in to ASC Contracts (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to ASC Contracts. +* Enable your users to be automatically signed-in to ASC Contracts with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with ASC Contracts, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* ASC Contracts single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* ASC Contracts single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* ASC Contracts supports **IDP** initiated SSO +* ASC Contracts supports **IDP** initiated SSO. -## Adding ASC Contracts from the gallery +## Add ASC Contracts from the gallery To configure the integration of ASC Contracts into Azure AD, you need to add ASC Contracts from the gallery to your list of managed SaaS apps. -**To add ASC Contracts from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **ASC Contracts**, select **ASC Contracts** from result panel then click **Add** button to add the application. - - ![ASC Contracts in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with ASC Contracts based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in ASC Contracts needs to be established. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **ASC Contracts** in the search box. +1. Select **ASC Contracts** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -To configure and test Azure AD single sign-on with ASC Contracts, you need to complete the following building blocks: +## Configure and test Azure AD SSO for ASC Contracts -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure ASC Contracts Single Sign-On](#configure-asc-contracts-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create ASC Contracts test user](#create-asc-contracts-test-user)** - to have a counterpart of Britta Simon in ASC Contracts that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +Configure and test Azure AD SSO with ASC Contracts using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in ASC Contracts. -### Configure Azure AD single sign-on +To configure and test Azure AD SSO with ASC Contracts, perform the following steps: -In this section, you enable Azure AD single sign-on in the Azure portal. +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure ASC Contracts SSO](#configure-asc-contracts-sso)** - to configure the single sign-on settings on application side. + 1. **[Create ASC Contracts test user](#create-asc-contracts-test-user)** - to have a counterpart of B.Simon in ASC Contracts that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -To configure Azure AD single sign-on with ASC Contracts, perform the following steps: +## Configure Azure AD SSO -1. In the [Azure portal](https://portal.azure.com/), on the **ASC Contracts** application integration page, select **Single sign-on**. +Follow these steps to enable Azure AD SSO in the Azure portal. - ![Configure single sign-on link](common/select-sso.png) +1. In the Azure portal, on the **ASC Contracts** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. - - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) - -4. On the **Set up Single Sign-On with SAML** page, perform the following steps: - - ![ASC Contracts Domain and URLs single sign-on information](common/idp-intiated.png) +1. On the **Basic SAML Configuration** page, perform the following steps: a. In the **Identifier** text box, type a URL using the following pattern: `https://.asccontracts.com/shibboleth` @@ -104,89 +80,54 @@ To configure Azure AD single sign-on with ASC Contracts, perform the following s > [!NOTE] > These values are not real. Update these values with the actual Identifier and Reply URL. Contact ASC Networks Inc. (ASC) team at **613.599.6178** to get these values. -5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. +1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") -6. On the **Set up ASC Contracts** section, copy the appropriate URL(s) as per your requirement. +1. On the **Set up ASC Contracts** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure ASC Contracts Single Sign-On - -To configure single sign-on on **ASC Contracts** side, call ASC Networks Inc. (ASC) support at **613.599.6178** and provide them with the downloaded **Federation Metadata XML**. They set this application up to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Metadata") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. +In this section, you'll create a test user in the Azure portal called B.Simon. - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to ASC Contracts. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to ASC Contracts. -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **ASC Contracts**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **ASC Contracts**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![Enterprise applications blade](common/enterprise-applications.png) +## Configure ASC Contracts SSO -2. In the applications list, select **ASC Contracts**. - - ![The ASC Contracts link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **ASC Contracts** side, call ASC Networks Inc. (ASC) support at **613.599.6178** and provide them with the downloaded **Federation Metadata XML**. They set this application up to have the SAML SSO connection set properly on both sides. ### Create ASC Contracts test user Work with ASC Networks Inc. (ASC) support team at **613.599.6178** to get the users added in the ASC Contracts platform. -### Test single sign-on - -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +## Test SSO -When you click the ASC Contracts tile in the Access Panel, you should be automatically signed in to the ASC Contracts for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +In this section, you test your Azure AD single sign-on configuration with following options. -## Additional Resources +* Click on Test this application in Azure portal and you should be automatically signed in to the ASC Contracts for which you set up the SSO. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the ASC Contracts tile in the My Apps, you should be automatically signed in to the ASC Contracts for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure ASC Contracts you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/battery-management-information-system-tutorial.md b/articles/active-directory/saas-apps/battery-management-information-system-tutorial.md new file mode 100644 index 0000000000000..3c3fceb466e95 --- /dev/null +++ b/articles/active-directory/saas-apps/battery-management-information-system-tutorial.md @@ -0,0 +1,139 @@ +--- +title: 'Tutorial: Azure AD SSO integration with BMIS - Battery Management Information System' +description: Learn how to configure single sign-on between Azure Active Directory and BMIS - Battery Management Information System. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/27/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with BMIS - Battery Management Information System + +In this tutorial, you'll learn how to integrate BMIS - Battery Management Information System with Azure Active Directory (Azure AD). When you integrate BMIS - Battery Management Information System with Azure AD, you can: + +* Control in Azure AD who has access to BMIS - Battery Management Information System. +* Enable your users to be automatically signed-in to BMIS - Battery Management Information System with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* BMIS - Battery Management Information System single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* BMIS - Battery Management Information System supports **IDP** initiated SSO. + +## Add BMIS - Battery Management Information System from the gallery + +To configure the integration of BMIS - Battery Management Information System into Azure AD, you need to add BMIS - Battery Management Information System from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **BMIS - Battery Management Information System** in the search box. +1. Select **BMIS - Battery Management Information System** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for BMIS - Battery Management Information System + +Configure and test Azure AD SSO with BMIS - Battery Management Information System using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in BMIS - Battery Management Information System. + +To configure and test Azure AD SSO with BMIS - Battery Management Information System, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure BMIS - Battery Management Information System SSO](#configure-bmis---battery-management-information-system-sso)** - to configure the single sign-on settings on application side. + 1. **[Create BMIS - Battery Management Information System test user](#create-bmis---battery-management-information-system-test-user)** - to have a counterpart of B.Simon in BMIS - Battery Management Information System that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **BMIS - Battery Management Information System** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic SAML Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the application is pre-configured and the necessary URLs are already pre-populated with Azure. The user needs to save the configuration by clicking the **Save** button. + +1. BMIS - Battery Management Information System application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. + + ![Screenshot shows the Battery Management Information System application image.](common/default-attributes.png "Image") + +1. In addition to above, BMIS - Battery Management Information System application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirement. + + | Name | Source Attribute | + |-------| --------- | + | email | user.mail | + | first_name | user.givenname | + | last_name | user.surname | + | user_name | user.mail | + +1. On the **Set-up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Federation Metadata XML** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") + +1. On the **Set up BMIS - Battery Management Information System** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate URLs.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to BMIS - Battery Management Information System. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **BMIS - Battery Management Information System**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure BMIS - Battery Management Information System SSO + +To configure single sign-on on **BMIS - Battery Management Information System** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [BMIS - Battery Management Information System support team](mailto:bmissupport@midtronics.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create BMIS - Battery Management Information System test user + +In this section, you create a user called Britta Simon in BMIS - Battery Management Information System. Work with [BMIS - Battery Management Information System support team](mailto:bmissupport@midtronics.com) to add the users in the BMIS - Battery Management Information System platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on Test this application in Azure portal and you should be automatically signed in to the BMIS - Battery Management Information System for which you set up the SSO. + +* You can use Microsoft My Apps. When you click the BMIS - Battery Management Information System tile in the My Apps, you should be automatically signed in to the BMIS - Battery Management Information System for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure BMIS - Battery Management Information System you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/blinq-provisioning-tutorial.md b/articles/active-directory/saas-apps/blinq-provisioning-tutorial.md new file mode 100644 index 0000000000000..367092d33909d --- /dev/null +++ b/articles/active-directory/saas-apps/blinq-provisioning-tutorial.md @@ -0,0 +1,186 @@ +--- +title: 'Tutorial: Configure Blinq for automatic user provisioning with Azure Active Directory | Microsoft Docs' +description: Learn how to automatically provision and de-provision user accounts from Azure AD to Blinq. +services: active-directory +author: twimmers +writer: twimmers +manager: beatrizd +ms.assetid: 5b076ac0-cd0e-43c3-85ed-8591bfd424ff +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/25/2022 +ms.author: thwimmer +--- + +# Tutorial: Configure Blinq for automatic user provisioning + +This tutorial describes the steps you need to do in both Blinq and Azure Active Directory (Azure AD) to configure automatic user provisioning. When configured, Azure AD automatically provisions and de-provisions users and groups to [Blinq](https://blinq.me/) using the Azure AD Provisioning service. For important details on what this service does, how it works, and frequently asked questions, see [Automate user provisioning and deprovisioning to SaaS applications with Azure Active Directory](../app-provisioning/user-provisioning.md). + + +## Capabilities supported +> [!div class="checklist"] +> * Create users in Blinq. +> * Remove users in Blinq when they do not require access anymore. +> * Keep user attributes synchronized between Azure AD and Blinq. + +## Prerequisites + +The scenario outlined in this tutorial assumes that you already have the following prerequisites: + +* [An Azure AD tenant](../develop/quickstart-create-new-tenant.md) +* A user account in Azure AD with [permission](../roles/permissions-reference.md) to configure provisioning (for example, Application Administrator, Cloud Application administrator, Application Owner, or Global Administrator). +* A user account in Blinq with Admin permission + +## Step 1. Plan your provisioning deployment +1. Learn about [how the provisioning service works](../app-provisioning/user-provisioning.md). +1. Determine who will be in [scope for provisioning](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). +1. Determine what data to [map between Azure AD and Blinq](../app-provisioning/customize-application-attributes.md). + +## Step 2. Configure Blinq to support provisioning with Azure AD + +1. Navigate to [Blinq Admin Console](https://dash.blinq.me) in a separate browser tab. +1. If you aren't logged in to Blinq you will need to do so. +1. Click on your workspace in the top left hand corner of the screen and select **Settings** in the dropdown menu. + + [![Screenshot of the Blinq settings option.](media/blinq-provisioning-tutorial/blinq-settings.png)](media/blinq-provisioning-tutorial/blinq-settings.png#lightbox) + +1. Under the **Integrations** page you should see **Team Card Provisioning** which contains a URL and Token. You will need to generate the token by clicking **Generate**. +Copy the **URL** and **Token**. The URL and the Token are to be inserted into the **Tenant URL*** and **Secret Token** field in the Azure portal respectively. + + [![Screenshot of the Blinq integration page.](media/blinq-provisioning-tutorial/blinq-integrations-page.png)](media/blinq-provisioning-tutorial/blinq-integrations-page.png#lightbox) + +## Step 3. Add Blinq from the Azure AD application gallery + +Add Blinq from the Azure AD application gallery to start managing provisioning to Blinq. If you have previously setup Blinq for SSO, you can use the same application. However it's recommended you create a separate app when testing out the integration initially. Learn more about adding an application from the gallery [here](../manage-apps/add-application-portal.md). + +## Step 4. Define who will be in scope for provisioning + +The Azure AD provisioning service allows you to scope who will be provisioned based on assignment to the application and or based on attributes of the user and group. If you choose to scope who will be provisioned to your app based on assignment, you can use the following [steps](../manage-apps/assign-user-or-group-access-portal.md) to assign users and groups to the application. If you choose to scope who will be provisioned based solely on attributes of the user or group, you can use a scoping filter as described [here](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +* Start small. Test with a small set of users and groups before rolling out to everyone. When scope for provisioning is set to assigned users and groups, you can control this by assigning one or two users or groups to the app. When scope is set to all users and groups, you can specify an [attribute based scoping filter](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +* If you need additional roles, you can [update the application manifest](../develop/howto-add-app-roles-in-azure-ad-apps.md) to add new roles. + + +## Step 5. Configure automatic user provisioning to Blinq + +This section guides you through the steps to configure the Azure AD provisioning service to create, update, and disable users and groups in Blinq based on user and group assignments in Azure AD. + +### To configure automatic user provisioning for Blinq in Azure AD: + +1. Sign in to the [Azure portal](https://portal.azure.com). Select **Enterprise Applications**, then select **All applications**. + + ![Screenshot of Enterprise applications blade.](common/enterprise-applications.png) + +1. In the applications list, select **Blinq**. + + ![Screenshot of the Blinq link in the Applications list.](common/all-applications.png) + +1. Select the **Provisioning** tab. + + ![Screenshot of Provisioning tab.](common/provisioning.png) + +1. Set the **Provisioning Mode** to **Automatic**. + + ![Screenshot of Provisioning tab automatic.](common/provisioning-automatic.png) + +1. In the **Admin Credentials** section, input your Blinq Tenant URL and Secret Token. Click **Test Connection** to ensure Azure AD can connect to Blinq. If the connection fails, ensure your Blinq account has Admin permissions and try again. + + ![Screenshot of Token field.](common/provisioning-testconnection-tenanturltoken.png) + +1. In the **Notification Email** field, enter the email address of a person or group who should receive the provisioning error notifications and select the **Send an email notification when a failure occurs** check box. + + ![Screenshot of Notification Email.](common/provisioning-notification-email.png) + +1. Select **Save**. + +1. In the **Mappings** section, select **Synchronize Azure Active Directory Users to Blinq**. + +1. Review the user attributes that are synchronized from Azure AD to Blinq in the **Attribute-Mapping** section. The attributes selected as **Matching** properties are used to match the user accounts in Blinq for update operations. If you choose to change the [matching target attribute](../app-provisioning/customize-application-attributes.md), you'll need to ensure that the Blinq API supports filtering users based on that attribute. Select the **Save** button to commit any changes. + + |Attribute|Type|Supported for filtering|Required by Blinq| + |---|---|---|---| + |userName|String|✓|✓ + |active|Boolean|| + |displayName|String|| + |nickName|String|| + |title|String|| + |preferredLanguage|String|| + |locale|String|| + |timezone|String|| + |name.givenName|String|| + |name.familyName|String|| + |name.formatted|String|| + |name.middleName|String|| + |name.honorificPrefix|String|| + |name.honorificSuffix|String|| + |externalId|String|| + |emails[type eq "work"].value|String|| + |emails[type eq "home"].value|String|| + |emails[type eq "other"].value|String|| + |phoneNumbers[type eq "work"].value|String|| + |phoneNumbers[type eq "mobile"].value|String|| + |phoneNumbers[type eq "fax"].value|String|| + |phoneNumbers[type eq "home"].value|String|| + |phoneNumbers[type eq "other"].value|String|| + |phoneNumbers[type eq "pager"].value|String|| + |addresses[type eq "work"].formatted|String|| + |addresses[type eq "work"].streetAddress|String|| + |addresses[type eq "work"].locality|String|| + |addresses[type eq "work"].region|String|| + |addresses[type eq "work"].postalCode|String|| + |addresses[type eq "work"].country|String|| + |addresses[type eq "home"].formatted|String|| + |addresses[type eq "home"].streetAddress|String|| + |addresses[type eq "home"].locality|String|| + |addresses[type eq "home"].region|String|| + |addresses[type eq "home"].postalCode|String|| + |addresses[type eq "home"].country|String|| + |addresses[type eq "other"].formatted|String|| + |addresses[type eq "other"].streetAddress|String|| + |addresses[type eq "other"].locality|String|| + |addresses[type eq "other"].region|String|| + |addresses[type eq "other"].postalCode|String|| + |addresses[type eq "other"].country|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:employeeNumber|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:organization|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:division|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:department|String|| + + +1. To configure scoping filters, refer to the following instructions provided in the [Scoping filter tutorial](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +1. To enable the Azure AD provisioning service for Blinq, change the **Provisioning Status** to **On** in the **Settings** section. + + ![Screenshot of Provisioning Status Toggled On.](common/provisioning-toggle-on.png) + +1. Define the users and groups that you would like to provision to Blinq by choosing the desired values in **Scope** in the **Settings** section. + + ![Screenshot of Provisioning Scope.](common/provisioning-scope.png) + +1. When you're ready to provision, click **Save**. + + ![Screenshot of Saving Provisioning Configuration.](common/provisioning-configuration-save.png) + +This operation starts the initial synchronization cycle of all users and groups defined in **Scope** in the **Settings** section. The initial cycle takes longer to complete than next cycles, which occur approximately every 40 minutes as long as the Azure AD provisioning service is running. + +## Step 6. Monitor your deployment +Once you've configured provisioning, use the following resources to monitor your deployment: + +* Use the [provisioning logs](../reports-monitoring/concept-provisioning-logs.md) to determine which users have been provisioned successfully or unsuccessfully +* Check the [progress bar](../app-provisioning/application-provisioning-when-will-provisioning-finish-specific-user.md) to see the status of the provisioning cycle and how close it's to completion +* If the provisioning configuration seems to be in an unhealthy state, the application will go into quarantine. Learn more about quarantine states [here](../app-provisioning/application-provisioning-quarantine-status.md). + +## Change Logs +05/25/2022 - **Schema Discovery** feature enabled on this app. + +## More resources + +* [Managing user account provisioning for Enterprise Apps](../app-provisioning/configure-automatic-user-provisioning-portal.md) +* [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) + +## Next steps + +* [Learn how to review logs and get reports on provisioning activity](../app-provisioning/check-status-user-account-provisioning.md) \ No newline at end of file diff --git a/articles/active-directory/saas-apps/bridgelineunbound-tutorial.md b/articles/active-directory/saas-apps/bridgelineunbound-tutorial.md deleted file mode 100644 index 2012dfe70a396..0000000000000 --- a/articles/active-directory/saas-apps/bridgelineunbound-tutorial.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: 'Tutorial: Azure Active Directory integration with Bridgeline Unbound | Microsoft Docs' -description: Learn how to configure single sign-on between Azure Active Directory and Bridgeline Unbound. -services: active-directory -author: jeevansd -manager: CelesteDG -ms.reviewer: celested -ms.service: active-directory -ms.subservice: saas-app-tutorial -ms.workload: identity -ms.topic: tutorial -ms.date: 12/16/2020 -ms.author: jeedes ---- -# Tutorial: Azure Active Directory integration with Bridgeline Unbound - -In this tutorial, you'll learn how to integrate Bridgeline Unbound with Azure Active Directory (Azure AD). When you integrate Bridgeline Unbound with Azure AD, you can: - -* Control in Azure AD who has access to Bridgeline Unbound. -* Enable your users to be automatically signed-in to Bridgeline Unbound with their Azure AD accounts. -* Manage your accounts in one central location - the Azure portal. - -## Prerequisites - -To configure Azure AD integration with Bridgeline Unbound, you need the following items: - -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Bridgeline Unbound single sign-on enabled subscription - -## Scenario description - -In this tutorial, you configure and test Azure AD single sign-on in a test environment. - -* Bridgeline supports **SP and IDP** initiated SSO -* Bridgeline Unbound supports **Just In Time** user provisioning - -## Adding Bridgeline Unbound from the gallery - -To configure the integration of Bridgeline Unbound into Azure AD, you need to add Bridgeline Unbound from the gallery to your list of managed SaaS apps. - -1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. -1. On the left navigation pane, select the **Azure Active Directory** service. -1. Navigate to **Enterprise Applications** and then select **All Applications**. -1. To add new application, select **New application**. -1. In the **Add from the gallery** section, type **Bridgeline Unbound** in the search box. -1. Select **Bridgeline Unbound** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. - - -## Configure and test Azure AD SSO for Bridgeline Unbound - -Configure and test Azure AD SSO with Bridgeline Unbound using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Bridgeline Unbound. - -To configure and test Azure AD SSO with Bridgeline Unbound, perform the following steps: - -1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. - 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. - 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -2. **[Configure Bridgeline Unbound SSO](#configure-bridgeline-unbound-sso)** - to configure the Single Sign-On settings on application side. - 1. **[Create Bridgeline Unbound test user](#create-bridgeline-unbound-test-user)** - to have a counterpart of Britta Simon in Bridgeline Unbound that is linked to the Azure AD representation of user. -6. **[Test SSO](#test-sso)** - to verify whether the configuration works. - -### Configure Azure AD SSO - -Follow these steps to enable Azure AD SSO in the Azure portal. - -1. In the Azure portal, on the **Bridgeline Unbound** application integration page, find the **Manage** section and select **single sign-on**. -1. On the **Select a single sign-on method** page, select **SAML**. -1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - - ![Edit Basic SAML Configuration](common/edit-urls.png) -4. On the **Basic SAML Configuration** section, If you wish to configure the application in **IDP** initiated mode, perform the following steps: - - a. In the **Identifier** text box, type a URL using the following pattern: - `iApps_UPSTT_` - - b. In the **Reply URL** text box, type a URL using the following pattern: - `https://.iapps.com/SAMLAssertionService.aspx` - -5. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: - - In the **Sign-on URL** text box, type a URL using the following pattern: - `https://.iapps.com/CommonLogin/login?` - - > [!NOTE] - > These values are not real. Update these values with the actual Identifier, Reply URL and Sign-on URL. Contact [Bridgeline Unbound Client support team](mailto:support@iapps.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. - -6. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Certificate (Base64)** from the given options as per your requirement and save it on your computer. - - ![The Certificate download link](common/certificatebase64.png) - -7. On the **Set up Bridgeline Unbound** section, copy the appropriate URL(s) as per your requirement. - - ![Copy configuration URLs](common/copy-configuration-urls.png) - - -### Create an Azure AD test user - -In this section, you'll create a test user in the Azure portal called B.Simon. - -1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. -1. Select **New user** at the top of the screen. -1. In the **User** properties, follow these steps: - 1. In the **Name** field, enter `B.Simon`. - 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. - 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. - 1. Click **Create**. - -### Assign the Azure AD test user - -In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Bridgeline Unbound. - -1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. -1. In the applications list, select **Bridgeline Unbound**. -1. In the app's overview page, find the **Manage** section and select **Users and groups**. -1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. -1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. -1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. -1. In the **Add Assignment** dialog, click the **Assign** button. - - -## Configure Bridgeline Unbound SSO - -To configure single sign-on on **Bridgeline Unbound** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Bridgeline Unbound support team](mailto:support@iapps.com). They set this setting to have the SAML SSO connection set properly on both sides. - -### Create Bridgeline Unbound test user - -In this section, a user called Britta Simon is created in Bridgeline Unbound. Bridgeline Unbound supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in Bridgeline Unbound, a new one is created after authentication. - -## Test SSO - -In this section, you test your Azure AD single sign-on configuration with following options. - -#### SP initiated: - -* Click on **Test this application** in Azure portal. This will redirect to Bridgeline Unbound Sign on URL where you can initiate the login flow. - -* Go to Bridgeline Unbound Sign-on URL directly and initiate the login flow from there. - -#### IDP initiated: - -* Click on **Test this application** in Azure portal and you should be automatically signed in to the Bridgeline Unbound for which you set up the SSO - -You can also use Microsoft My Apps to test the application in any mode. When you click the Bridgeline Unbound tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Bridgeline Unbound for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). - - -## Next steps - -Once you configure Bridgeline Unbound you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). diff --git a/articles/active-directory/saas-apps/capriza-tutorial.md b/articles/active-directory/saas-apps/capriza-tutorial.md index 2bccc8b0c31e2..c31424c5c981c 100644 --- a/articles/active-directory/saas-apps/capriza-tutorial.md +++ b/articles/active-directory/saas-apps/capriza-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Capriza Platform | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Capriza Platform' description: Learn how to configure single sign-on between Azure Active Directory and Capriza Platform. services: active-directory author: jeevansd @@ -9,92 +9,71 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/12/2019 +ms.date: 06/01/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Capriza Platform +# Tutorial: Azure AD SSO integration with Capriza Platform -In this tutorial, you learn how to integrate Capriza Platform with Azure Active Directory (Azure AD). -Integrating Capriza Platform with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Capriza Platform with Azure Active Directory (Azure AD). When you integrate Capriza Platform with Azure AD, you can: -* You can control in Azure AD who has access to Capriza Platform. -* You can enable your users to be automatically signed-in to Capriza Platform (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Capriza Platform. +* Enable your users to be automatically signed-in to Capriza Platform with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Capriza Platform, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Capriza Platform single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Capriza Platform single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Capriza Platform supports **SP** initiated SSO -* Capriza Platform supports **Just In Time** user provisioning - -## Adding Capriza Platform from the gallery - -To configure the integration of Capriza Platform into Azure AD, you need to add Capriza Platform from the gallery to your list of managed SaaS apps. - -**To add Capriza Platform from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. +* Capriza Platform supports **SP** initiated SSO. +* Capriza Platform supports **Just In Time** user provisioning. - ![The New application button](common/add-new-app.png) +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. -4. In the search box, type **Capriza Platform**, select **Capriza Platform** from result panel then click **Add** button to add the application. +## Add Capriza Platform from the gallery - ![Capriza Platform in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with Capriza Platform based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Capriza Platform needs to be established. - -To configure and test Azure AD single sign-on with Capriza Platform, you need to complete the following building blocks: - -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Capriza Platform Single Sign-On](#configure-capriza-platform-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create Capriza Platform test user](#create-capriza-platform-test-user)** - to have a counterpart of Britta Simon in Capriza Platform that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. - -### Configure Azure AD single sign-on +To configure the integration of Capriza Platform into Azure AD, you need to add Capriza Platform from the gallery to your list of managed SaaS apps. -In this section, you enable Azure AD single sign-on in the Azure portal. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Capriza Platform** in the search box. +1. Select **Capriza Platform** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -To configure Azure AD single sign-on with Capriza Platform, perform the following steps: +## Configure and test Azure AD SSO for Capriza Platform -1. In the [Azure portal](https://portal.azure.com/), on the **Capriza Platform** application integration page, select **Single sign-on**. +Configure and test Azure AD SSO with Capriza Platform using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Capriza Platform. - ![Configure single sign-on link](common/select-sso.png) +To configure and test Azure AD SSO with Capriza Platform, perform the following steps: -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Capriza Platform SSO](#configure-capriza-platform-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Capriza Platform test user](#create-capriza-platform-test-user)** - to have a counterpart of B.Simon in Capriza Platform that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. - ![Single sign-on select mode](common/select-saml-option.png) +## Configure Azure AD SSO -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. +Follow these steps to enable Azure AD SSO in the Azure portal. - ![Edit Basic SAML Configuration](common/edit-urls.png) +1. In the Azure portal, on the **Capriza Platform** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. -4. On the **Basic SAML Configuration** section, perform the following steps: + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") - ![Capriza Platform Domain and URLs single sign-on information](common/sp-signonurl.png) +4. On the **Basic SAML Configuration** section, perform the following step: In the **Sign-on URL** text box, type a URL using the following pattern: `https://.capriza.com/` @@ -104,72 +83,39 @@ To configure Azure AD single sign-on with Capriza Platform, perform the followin 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Certificate (Base64)** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 6. On the **Set up Capriza Platform** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure Capriza Platform Single Sign-On - -To configure single sign-on on **Capriza Platform** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Capriza Platform support team](mailto:support@capriza.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. +In this section, you'll create a test user in the Azure portal called B.Simon. - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Capriza Platform. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Capriza Platform. -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Capriza Platform**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Capriza Platform**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![Enterprise applications blade](common/enterprise-applications.png) +## Configure Capriza Platform SSO -2. In the applications list, select **Capriza Platform**. - - ![The Capriza Platform link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **Capriza Platform** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Capriza Platform support team](mailto:support@capriza.com). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Capriza Platform test user @@ -177,16 +123,16 @@ The objective of this section is to create a user called Britta Simon in Capriza There is no action item for you in this section. A new user will be created during an attempt to access Capriza if it doesn't exist yet. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the Capriza Platform tile in the Access Panel, you should be automatically signed in to the Capriza Platform for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to Capriza Platform Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to Capriza Platform Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Capriza Platform tile in the My Apps, this will redirect to Capriza Platform Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Capriza Platform you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/carlsonwagonlit-tutorial.md b/articles/active-directory/saas-apps/carlsonwagonlit-tutorial.md index e7810d43816e0..811de56be2616 100644 --- a/articles/active-directory/saas-apps/carlsonwagonlit-tutorial.md +++ b/articles/active-directory/saas-apps/carlsonwagonlit-tutorial.md @@ -1,6 +1,6 @@ --- -title: 'Tutorial: Azure Active Directory integration with Carlson Wagonlit Travel | Microsoft Docs' -description: Learn how to configure single sign-on between Azure Active Directory and Carlson Wagonlit Travel. +title: 'Tutorial: Azure AD SSO integration with CWT' +description: Learn how to configure single sign-on between Azure Active Directory and CWT. services: active-directory author: jeevansd manager: CelesteDG @@ -9,15 +9,15 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 07/21/2021 +ms.date: 06/08/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Carlson Wagonlit Travel +# Tutorial: Azure AD SSO integration with CWT -In this tutorial, you'll learn how to integrate Carlson Wagonlit Travel with Azure Active Directory (Azure AD). When you integrate Carlson Wagonlit Travel with Azure AD, you can: +In this tutorial, you'll learn how to integrate CWT with Azure Active Directory (Azure AD). When you integrate CWT with Azure AD, you can: -* Control in Azure AD who has access to Carlson Wagonlit Travel. -* Enable your users to be automatically signed-in to Carlson Wagonlit Travel with their Azure AD accounts. +* Control in Azure AD who has access to CWT. +* Enable your users to be automatically signed-in to CWT with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. ## Prerequisites @@ -25,63 +25,59 @@ In this tutorial, you'll learn how to integrate Carlson Wagonlit Travel with Azu To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). -* Carlson Wagonlit Travel single sign-on (SSO) enabled subscription. +* CWT single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Carlson Wagonlit Travel supports **IDP** initiated SSO. +* CWT supports **IDP** initiated SSO. -> [!NOTE] -> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. +## Add CWT from the gallery -## Add Carlson Wagonlit Travel from the gallery - -To configure the integration of Carlson Wagonlit Travel into Azure AD, you need to add Carlson Wagonlit Travel from the gallery to your list of managed SaaS apps. +To configure the integration of CWT into Azure AD, you need to add CWT from the gallery to your list of managed SaaS apps. 1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. -1. In the **Add from the gallery** section, type **Carlson Wagonlit Travel** in the search box. -1. Select **Carlson Wagonlit Travel** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. +1. In the **Add from the gallery** section, type **CWT** in the search box. +1. Select **CWT** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -## Configure and test Azure AD SSO for Carlson Wagonlit Travel +## Configure and test Azure AD SSO for CWT -Configure and test Azure AD SSO with Carlson Wagonlit Travel using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Carlson Wagonlit Travel. +Configure and test Azure AD SSO with CWT using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in CWT. -To configure and test Azure AD SSO with Carlson Wagonlit Travel, perform the following steps: +To configure and test Azure AD SSO with CWT, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. -1. **[Configure Carlson Wagonlit Travel SSO](#configure-carlson-wagonlit-travel-sso)** - to configure the single sign-on settings on application side. - 1. **[Create Carlson Wagonlit Travel test user](#create-carlson-wagonlit-travel-test-user)** - to have a counterpart of B.Simon in Carlson Wagonlit Travel that is linked to the Azure AD representation of user. +1. **[Configure CWT SSO](#configure-cwt-sso)** - to configure the single sign-on settings on application side. + 1. **[Create CWT test user](#create-cwt-test-user)** - to have a counterpart of B.Simon in CWT that is linked to the Azure AD representation of user. 1. **[Test SSO](#test-sso)** - to verify whether the configuration works. ## Configure Azure AD SSO Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the Azure portal, on the **Carlson Wagonlit Travel** application integration page, find the **Manage** section and select **single sign-on**. +1. In the Azure portal, on the **CWT** application integration page, find the **Manage** section and select **single sign-on**. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) - -4. On the **Basic SAML Configuration** section, perform the following step: + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") - In the **Identifier** text box, type the value: - `cwt-stage` +1. On the **Basic SAML Configuration** section, the application is pre-configured and the necessary URLs are already pre-populated with Azure. The user needs to save the configuration by clicking the **Save** button. -5. On the **Set-up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. +1. On the **Set-up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") -6. On the **Set-up Carlson Wagonlit Travel** section, copy the appropriate URL(s) as per your requirement. +1. On the **Set-up CWT** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Metadata") ### Create an Azure AD test user @@ -97,32 +93,32 @@ In this section, you'll create a test user in the Azure portal called B.Simon. ### Assign the Azure AD test user -In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Carlson Wagonlit Travel. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to CWT. 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. -1. In the applications list, select **Carlson Wagonlit Travel**. +1. In the applications list, select **CWT**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. 1. In the **Add Assignment** dialog, click the **Assign** button. -## Configure Carlson Wagonlit Travel SSO +## Configure CWT SSO -To configure single sign-on on **Carlson Wagonlit Travel** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Carlson Wagonlit Travel support team](https://www.mycwt.com/traveler-help/). They set this setting to have the SAML SSO connection set properly on both sides. +To configure single sign-on on **CWT** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [CWT support team](https://www.mycwt.com/traveler-help/). They set this setting to have the SAML SSO connection set properly on both sides. -### Create Carlson Wagonlit Travel test user +### Create CWT test user -In this section, you create a user called Britta Simon in Carlson Wagonlit Travel. Work with [Carlson Wagonlit Travel support team](https://www.mycwt.com/traveler-help/) to add the users in the Carlson Wagonlit Travel platform. Users must be created and activated before you use single sign-on. +In this section, you create a user called Britta Simon in CWT. Work with [CWT support team](https://www.mycwt.com/traveler-help/) to add the users in the CWT platform. Users must be created and activated before you use single sign-on. ## Test SSO In this section, you test your Azure AD single sign-on configuration with following options. -* Click on Test this application in Azure portal and you should be automatically signed in to the Carlson Wagonlit Travel for which you set up the SSO. +* Click on Test this application in Azure portal and you should be automatically signed in to the CWT for which you set up the SSO. -* You can use Microsoft My Apps. When you click the Carlson Wagonlit Travel tile in the My Apps, you should be automatically signed in to the Carlson Wagonlit Travel for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* You can use Microsoft My Apps. When you click the CWT tile in the My Apps, you should be automatically signed in to the CWT for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). ## Next steps -Once you configure Carlson Wagonlit Travel you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). +Once you configure CWT you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md b/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md index 9263823daea0f..4445778ab45d4 100644 --- a/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md +++ b/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md @@ -138,6 +138,16 @@ Once you've configured provisioning, use the following resources to monitor your * Check the [progress bar](../app-provisioning/application-provisioning-when-will-provisioning-finish-specific-user.md) to see the status of the provisioning cycle and how close it's to completion * If the provisioning configuration seems to be in an unhealthy state, the application will go into quarantine. Learn more about quarantine states [here](../app-provisioning/application-provisioning-quarantine-status.md). +## Troubleshooting Tips +If you need to regenerate the SCIM API authentication token, complete the following steps: + +1. Send an email with your request to [Cerby Support Team](mailto:support@cerby.com). The Cerby team regenerates the SCIM API authentication token. +1. Receive the response email from Cerby to confirm that the token was successfully regenerated. +1. Complete the instructions from the [How to Retrieve the SCIM API Authentication Token from Cerby](https://help.cerby.com/en/articles/5638472-how-to-configure-automatic-user-provisioning-for-azure-ad) article to retrieve the new token. + + >[!NOTE] + >The Cerby team is currently developing a self-service solution for regenerating the SCIM API authentication token. To regenerate the token, the Cerby team members must validate their identity. + ## More resources * [Managing user account provisioning for Enterprise Apps](../app-provisioning/configure-automatic-user-provisioning-portal.md) diff --git a/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md b/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md index afb70f435405a..d934197ca71ae 100644 --- a/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md +++ b/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Cisco Umbrella Admin SSO | Microsoft Docs' +title: 'Tutorial: Azure AD integration with Cisco Umbrella Admin SSO' description: Learn how to configure single sign-on between Azure Active Directory and Cisco Umbrella Admin SSO. services: active-directory author: jeevansd @@ -9,10 +9,10 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 03/16/2021 +ms.date: 05/24/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Cisco Umbrella Admin SSO +# Tutorial: Azure AD integration with Cisco Umbrella Admin SSO In this tutorial, you'll learn how to integrate Cisco Umbrella Admin SSO with Azure Active Directory (Azure AD). When you integrate Cisco Umbrella Admin SSO with Azure AD, you can: @@ -27,6 +27,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Cisco Umbrella Admin SSO single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. @@ -65,7 +68,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. @@ -77,11 +80,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up Cisco Umbrella Admin SSO** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") ### Create an Azure AD test user @@ -113,27 +116,27 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 2. From the left side of menu, click **Admin** and navigate to **Authentication** and then click on **SAML**. - ![The Admin](./media/cisco-umbrella-tutorial/admin.png) + ![Screenshot shows the Admin menu window.](./media/cisco-umbrella-tutorial/admin.png "Administrator") 3. Choose **Other** and click on **NEXT**. - ![The Other](./media/cisco-umbrella-tutorial/other.png) + ![Screenshot shows the Other menu window.](./media/cisco-umbrella-tutorial/other.png "Folder") 4. On the **Cisco Umbrella Admin SSO Metadata**, page, click **NEXT**. - ![The metadata](./media/cisco-umbrella-tutorial/metadata.png) + ![Screenshot shows the metadata file page.](./media/cisco-umbrella-tutorial/metadata.png "File") 5. On the **Upload Metadata** tab, if you had pre-configured SAML, select **Click here to change them** option and follow the below steps. - ![The Next](./media/cisco-umbrella-tutorial/next.png) + ![Screenshot shows the Next Folder window.](./media/cisco-umbrella-tutorial/next.png "Values") 6. In the **Option A: Upload XML file**, upload the **Federation Metadata XML** file that you downloaded from the Azure portal and after uploading metadata the below values get auto populated automatically then click **NEXT**. - ![The choosefile](./media/cisco-umbrella-tutorial/choose-file.png) + ![Screenshot shows the choosefile from folder.](./media/cisco-umbrella-tutorial/choose-file.png "Federation") 7. Under **Validate SAML Configuration** section, click **TEST YOUR SAML CONFIGURATION**. - ![The Test](./media/cisco-umbrella-tutorial/test.png) + ![Screenshot shows the Test SAML Configuration.](./media/cisco-umbrella-tutorial/test.png "Validate") 8. Click **SAVE**. @@ -148,11 +151,11 @@ In the case of Cisco Umbrella Admin SSO, provisioning is a manual task. 2. From the left side of menu, click **Admin** and navigate to **Accounts**. - ![The Account](./media/cisco-umbrella-tutorial/account.png) + ![Screenshot shows the Account of Cisco Umbrella Admin.](./media/cisco-umbrella-tutorial/account.png "Account") 3. On the **Accounts** page, click on **Add** on the top right side of the page and perform the following steps. - ![The User](./media/cisco-umbrella-tutorial/create-user.png) + ![Screenshot shows the User of Accounts.](./media/cisco-umbrella-tutorial/create-user.png "User") a. In the **First Name** field, enter the firstname like **Britta**. diff --git a/articles/active-directory/saas-apps/cloud-service-picco-tutorial.md b/articles/active-directory/saas-apps/cloud-service-picco-tutorial.md index a95d9ad10147a..fa893dd677857 100644 --- a/articles/active-directory/saas-apps/cloud-service-picco-tutorial.md +++ b/articles/active-directory/saas-apps/cloud-service-picco-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Cloud Service PICCO | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Cloud Service PICCO' description: Learn how to configure single sign-on between Azure Active Directory and Cloud Service PICCO. services: active-directory author: jeevansd @@ -9,178 +9,127 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 12/21/2018 +ms.date: 06/07/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Cloud Service PICCO +# Tutorial: Azure AD SSO integration with Cloud Service PICCO -In this tutorial, you learn how to integrate Cloud Service PICCO with Azure Active Directory (Azure AD). -Integrating Cloud Service PICCO with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Cloud Service PICCO with Azure Active Directory (Azure AD). When you integrate Cloud Service PICCO with Azure AD, you can: -* You can control in Azure AD who has access to Cloud Service PICCO. -* You can enable your users to be automatically signed-in to Cloud Service PICCO (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Cloud Service PICCO. +* Enable your users to be automatically signed-in to Cloud Service PICCO with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Cloud Service PICCO, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Cloud Service PICCO single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Cloud Service PICCO single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Cloud Service PICCO supports **SP** initiated SSO -* Cloud Service PICCO supports **Just In Time** user provisioning +* Cloud Service PICCO supports **SP** initiated SSO. +* Cloud Service PICCO supports **Just In Time** user provisioning. -## Adding Cloud Service PICCO from the gallery +## Add Cloud Service PICCO from the gallery To configure the integration of Cloud Service PICCO into Azure AD, you need to add Cloud Service PICCO from the gallery to your list of managed SaaS apps. -**To add Cloud Service PICCO from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **Cloud Service PICCO**, select **Cloud Service PICCO** from result panel then click **Add** button to add the application. - - ![Cloud Service PICCO in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Cloud Service PICCO** in the search box. +1. Select **Cloud Service PICCO** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -In this section, you configure and test Azure AD single sign-on with Cloud Service PICCO based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Cloud Service PICCO needs to be established. +## Configure and test Azure AD SSO for Cloud Service PICCO -To configure and test Azure AD single sign-on with Cloud Service PICCO, you need to complete the following building blocks: +Configure and test Azure AD SSO with Cloud Service PICCO using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Cloud Service PICCO. -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Cloud Service PICCO Single Sign-On](#configure-cloud-service-picco-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Create Cloud Service PICCO test user](#create-cloud-service-picco-test-user)** - to have a counterpart of Britta Simon in Cloud Service PICCO that is linked to the Azure AD representation of user. -5. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +To configure and test Azure AD SSO with Cloud Service PICCO, perform the following steps: -### Configure Azure AD single sign-on +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Cloud Service PICCO SSO](#configure-cloud-service-picco-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Cloud Service PICCO test user](#create-cloud-service-picco-test-user)** - to have a counterpart of B.Simon in Cloud Service PICCO that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -In this section, you enable Azure AD single sign-on in the Azure portal. +## Configure Azure AD SSO -To configure Azure AD single sign-on with Cloud Service PICCO, perform the following steps: +Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **Cloud Service PICCO** application integration page, select **Single sign-on**. +1. In the Azure portal, on the **Cloud Service PICCO** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") - ![Configure single sign-on link](common/select-sso.png) +1. On the **Basic SAML Configuration** section, perform the following steps: -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. - - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) - -4. On the **Basic SAML Configuration** section, perform the following steps: - - ![Cloud Service PICCO Domain and URLs single sign-on information](common/sp-identifier-reply.png) + a. In the **Identifier** box, type a value using the following pattern: + `.cloudservicepicco.com` - a. In the **Sign-on URL** text box, type a URL using the following pattern: + b. In the **Reply URL** text box, type a URL using the following pattern: `https://.cloudservicepicco.com/app` - b. In the **Identifier** box, type a URL using the following pattern: - `.cloudservicepicco.com` - - c. In the **Reply URL** text box, type a URL using the following pattern: + c. In the **Sign-on URL** text box, type a URL using the following pattern: `https://.cloudservicepicco.com/app` > [!NOTE] - > These values are not real. Update these values with the actual Sign-On URL, Identifier and Reply URL. Contact [Cloud Service PICCO Client support team](mailto:picco.support@est.fujitsu.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [Cloud Service PICCO Client support team](mailto:picco.support@est.fujitsu.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. -4. On the **Set up Single Sign-On with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. +1. On the **Set up Single Sign-On with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. - ![The Certificate download link](common/copy-metadataurl.png) - -### Configure Cloud Service PICCO Single Sign-On - -To configure single sign-on on **Cloud Service PICCO** side, you need to send the **App Federation Metadata Url** to [Cloud Service PICCO support team](mailto:picco.support@est.fujitsu.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. +In this section, you'll create a test user in the Azure portal called B.Simon. - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Cloud Service PICCO. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Cloud Service PICCO. -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Cloud Service PICCO**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Cloud Service PICCO**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![Enterprise applications blade](common/enterprise-applications.png) +## Configure Cloud Service PICCO SSO -2. In the applications list, select **Cloud Service PICCO**. - - ![The Cloud Service PICCO link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **Cloud Service PICCO** side, you need to send the **App Federation Metadata Url** to [Cloud Service PICCO support team](mailto:picco.support@est.fujitsu.com). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Cloud Service PICCO test user In this section, a user called Britta Simon is created in Cloud Service PICCO. Cloud Service PICCO supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in Cloud Service PICCO, a new one is created after authentication. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the Cloud Service PICCO tile in the Access Panel, you should be automatically signed in to the Cloud Service PICCO for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to Cloud Service PICCO Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to Cloud Service PICCO Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Cloud Service PICCO tile in the My Apps, this will redirect to Cloud Service PICCO Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Cloud Service PICCO you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/common/provisioning-testconnection-tenanturltoken.png b/articles/active-directory/saas-apps/common/provisioning-testconnection-tenanturltoken.png index dc929793b4192..7ed17892a92e8 100644 Binary files a/articles/active-directory/saas-apps/common/provisioning-testconnection-tenanturltoken.png and b/articles/active-directory/saas-apps/common/provisioning-testconnection-tenanturltoken.png differ diff --git a/articles/active-directory/saas-apps/competencyiq-tutorial.md b/articles/active-directory/saas-apps/competencyiq-tutorial.md index dab1cad04a066..847654cfa289b 100644 --- a/articles/active-directory/saas-apps/competencyiq-tutorial.md +++ b/articles/active-directory/saas-apps/competencyiq-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with CompetencyIQ | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with CompetencyIQ' description: Learn how to configure single sign-on between Azure Active Directory and CompetencyIQ. services: active-directory author: jeevansd @@ -9,184 +9,130 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 01/23/2019 +ms.date: 06/01/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with CompetencyIQ +# Tutorial: Azure AD SSO integration with CompetencyIQ -In this tutorial, you learn how to integrate CompetencyIQ with Azure Active Directory (Azure AD). -Integrating CompetencyIQ with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate CompetencyIQ with Azure Active Directory (Azure AD). When you integrate CompetencyIQ with Azure AD, you can: -* You can control in Azure AD who has access to CompetencyIQ. -* You can enable your users to be automatically signed-in to CompetencyIQ (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to CompetencyIQ. +* Enable your users to be automatically signed-in to CompetencyIQ with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with CompetencyIQ, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* CompetencyIQ single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* CompetencyIQ single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* CompetencyIQ supports **SP** initiated SSO - -## Adding CompetencyIQ from the gallery - -To configure the integration of CompetencyIQ into Azure AD, you need to add CompetencyIQ from the gallery to your list of managed SaaS apps. - -**To add CompetencyIQ from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) +* CompetencyIQ supports **SP** initiated SSO. -4. In the search box, type **CompetencyIQ**, select **CompetencyIQ** from result panel then click **Add** button to add the application. +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. - ![CompetencyIQ in the results list](common/search-new-app.png) +## Add CompetencyIQ from the gallery -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with CompetencyIQ based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in CompetencyIQ needs to be established. - -To configure and test Azure AD single sign-on with CompetencyIQ, you need to complete the following building blocks: - -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure CompetencyIQ Single Sign-On](#configure-competencyiq-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create CompetencyIQ test user](#create-competencyiq-test-user)** - to have a counterpart of Britta Simon in CompetencyIQ that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +To configure the integration of CompetencyIQ into Azure AD, you need to add CompetencyIQ from the gallery to your list of managed SaaS apps. -### Configure Azure AD single sign-on +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **CompetencyIQ** in the search box. +1. Select **CompetencyIQ** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -In this section, you enable Azure AD single sign-on in the Azure portal. +## Configure and test Azure AD SSO for CompetencyIQ -To configure Azure AD single sign-on with CompetencyIQ, perform the following steps: +Configure and test Azure AD SSO with CompetencyIQ using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in CompetencyIQ. -1. In the [Azure portal](https://portal.azure.com/), on the **CompetencyIQ** application integration page, select **Single sign-on**. +To configure and test Azure AD SSO with CompetencyIQ, perform the following steps: - ![Configure single sign-on link](common/select-sso.png) +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure CompetencyIQ SSO](#configure-competencyiq-sso)** - to configure the single sign-on settings on application side. + 1. **[Create CompetencyIQ test user](#create-competencyiq-test-user)** - to have a counterpart of B.Simon in CompetencyIQ that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. +## Configure Azure AD SSO - ![Single sign-on select mode](common/select-saml-option.png) +Follow these steps to enable Azure AD SSO in the Azure portal. -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. +1. In the Azure portal, on the **CompetencyIQ** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, perform the following steps: - ![CompetencyIQ Domain and URLs single sign-on information](common/sp-identifier.png) + a. In the **Identifier (Entity ID)** text box, type the URL: + `https://www.competencyiq.com/` - a. In the **Sign on URL** text box, type a URL using the following pattern: + b. In the **Sign on URL** text box, type a URL using the following pattern: `https://.competencyiq.com/` - b. In the **Identifier (Entity ID)** text box, type a URL: - `https://www.competencyiq.com/` - > [!NOTE] > The Sign on URL value is not real. Update the value with the actual Sign on URL. Contact [CompetencyIQ Client support team](https://www.competencyiq.com/) to get the value. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up CompetencyIQ** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure CompetencyIQ Single Sign-On - -To configure single sign-on on **CompetencyIQ** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [CompetencyIQ support team](https://www.competencyiq.com/). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com +In this section, you'll create a test user in the Azure portal called B.Simon. - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to CompetencyIQ. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **CompetencyIQ**. - - ![Enterprise applications blade](common/enterprise-applications.png) - -2. In the applications list, select **CompetencyIQ**. - - ![The CompetencyIQ link in the Applications list](common/all-applications.png) +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to CompetencyIQ. -3. In the menu on the left, select **Users and groups**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **CompetencyIQ**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![The "Users and groups" link](common/users-groups-blade.png) +## Configure CompetencyIQ SSO -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **CompetencyIQ** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [CompetencyIQ support team](https://www.competencyiq.com/). They set this setting to have the SAML SSO connection set properly on both sides. ### Create CompetencyIQ test user In this section, you create a user called Britta Simon in CompetencyIQ. Work with [CompetencyIQ support team](https://www.competencyiq.com/) to add the users in the CompetencyIQ platform. Users must be created and activated before you use single sign-on. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the CompetencyIQ tile in the Access Panel, you should be automatically signed in to the CompetencyIQ for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to CompetencyIQ Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to CompetencyIQ Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the CompetencyIQ tile in the My Apps, this will redirect to CompetencyIQ Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure CompetencyIQ you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/e2open-lsp-tutorial.md b/articles/active-directory/saas-apps/e2open-lsp-tutorial.md new file mode 100644 index 0000000000000..2eb7773c88d25 --- /dev/null +++ b/articles/active-directory/saas-apps/e2open-lsp-tutorial.md @@ -0,0 +1,136 @@ +--- +title: 'Tutorial: Azure AD SSO integration with E2open LSP' +description: Learn how to configure single sign-on between Azure Active Directory and E2open LSP. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/23/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with E2open LSP + +In this tutorial, you'll learn how to integrate E2open LSP with Azure Active Directory (Azure AD). When you integrate E2open LSP with Azure AD, you can: + +* Control in Azure AD who has access to E2open LSP. +* Enable your users to be automatically signed-in to E2open LSP with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* E2open LSP single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* E2open LSP supports **SP** initiated SSO. + +## Add E2open LSP from the gallery + +To configure the integration of E2open LSP into Azure AD, you need to add E2open LSP from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **E2open LSP** in the search box. +1. Select **E2open LSP** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for E2open LSP + +Configure and test Azure AD SSO with E2open LSP using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in E2open LSP. + +To configure and test Azure AD SSO with E2open LSP, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure E2open LSP SSO](#configure-e2open-lsp-sso)** - to configure the single sign-on settings on application side. + 1. **[Create E2open LSP test user](#create-e2open-lsp-test-user)** - to have a counterpart of B.Simon in E2open LSP that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **E2open LSP** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** text box, type a URL using the following pattern: + `https://-.tms-lsp.blujaysolutions.net/navi/saml/metadata` + + b. In the **Reply URL** text box, type a URL using the following pattern: + `https://-.tms-lsp.blujaysolutions.net/navi/sam` + + c. In the **Sign-on URL** text box, type a URL using the following pattern: + `https://-.tms-lsp.blujaysolutions.net/navi/` + + > [!NOTE] + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign-on URL. Contact [E2open LSP Client support team](mailto:customersupport@e2open.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to E2open LSP. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **E2open LSP**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure E2open LSP SSO + +To configure single sign-on on **E2open LSP** side, you need to send the **App Federation Metadata Url** to [E2open LSP support team](mailto:customersupport@e2open.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create E2open LSP test user + +In this section, you create a user called Britta Simon in E2open LSP. Work with [E2open LSP support team](mailto:customersupport@e2open.com) to add the users in the E2open LSP platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on **Test this application** in Azure portal. This will redirect to E2open LSP Sign-on URL where you can initiate the login flow. + +* Go to E2open LSP Sign-on URL directly and initiate the login flow from there. + +* You can use Microsoft My Apps. When you click the E2open LSP tile in the My Apps, this will redirect to E2open LSP Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). + +## Next steps + +Once you configure E2open LSP you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/edcor-tutorial.md b/articles/active-directory/saas-apps/edcor-tutorial.md index 505b07d319ff7..166d3274df290 100644 --- a/articles/active-directory/saas-apps/edcor-tutorial.md +++ b/articles/active-directory/saas-apps/edcor-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Edcor | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Edcor' description: Learn how to configure single sign-on between Azure Active Directory and Edcor. services: active-directory author: jeevansd @@ -9,179 +9,122 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/04/2019 +ms.date: 06/01/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Edcor +# Tutorial: Azure AD SSO integration with Edcor -In this tutorial, you learn how to integrate Edcor with Azure Active Directory (Azure AD). -Integrating Edcor with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Edcor with Azure Active Directory (Azure AD). When you integrate Edcor with Azure AD, you can: -* You can control in Azure AD who has access to Edcor. -* You can enable your users to be automatically signed-in to Edcor (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Edcor. +* Enable your users to be automatically signed-in to Edcor with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Edcor, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Edcor single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Edcor single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. +* Edcor supports **IDP** initiated SSO. -* Edcor supports **IDP** initiated SSO +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. -## Adding Edcor from the gallery +## Add Edcor from the gallery To configure the integration of Edcor into Azure AD, you need to add Edcor from the gallery to your list of managed SaaS apps. -**To add Edcor from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **Edcor**, select **Edcor** from result panel then click **Add** button to add the application. - - ![Edcor in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Edcor** in the search box. +1. Select **Edcor** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -In this section, you configure and test Azure AD single sign-on with Edcor based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Edcor needs to be established. +## Configure and test Azure AD SSO for Edcor -To configure and test Azure AD single sign-on with Edcor, you need to complete the following building blocks: +Configure and test Azure AD SSO with Edcor using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Edcor. -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Edcor Single Sign-On](#configure-edcor-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create Edcor test user](#create-edcor-test-user)** - to have a counterpart of Britta Simon in Edcor that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +To configure and test Azure AD SSO with Edcor, perform the following steps: -### Configure Azure AD single sign-on +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Edcor SSO](#configure-edcor-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Edcor test user](#create-edcor-test-user)** - to have a counterpart of B.Simon in Edcor that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -In this section, you enable Azure AD single sign-on in the Azure portal. +## Configure Azure AD SSO -To configure Azure AD single sign-on with Edcor, perform the following steps: +Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **Edcor** application integration page, select **Single sign-on**. +1. In the Azure portal, on the **Edcor** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Configure single sign-on link](common/select-sso.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. +4. On the **Basic SAML Configuration** section, perform the following step: - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) - -4. On the **Basic SAML Configuration** section, perform the following steps: - - ![Edcor Domain and URLs single sign-on information](common/idp-identifier.png) - - In the **Identifier** text box, type a URL: + In the **Identifier** text box, type the URL: `https://sso.edcor.com/sp/ACS.saml2` 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up Edcor** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure Edcor Single Sign-On - -To configure single sign-on on **Edcor** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Edcor support team](https://www.edcor.com/contact-us/). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) +In this section, you'll create a test user in the Azure portal called B.Simon. -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Edcor. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Edcor**. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Edcor. - ![Enterprise applications blade](common/enterprise-applications.png) +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Edcor**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. -2. In the applications list, select **Edcor**. +## Configure Edcor SSO - ![The Edcor link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **Edcor** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Edcor support team](https://www.edcor.com/contact-us/). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Edcor test user In this section, you create a user called Britta Simon in Edcor. Work with [Edcor support team](https://www.edcor.com/contact-us/) to add the users in the Edcor platform. Users must be created and activated before you use single sign-on. -### Test single sign-on - -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +## Test SSO -When you click the Edcor tile in the Access Panel, you should be automatically signed in to the Edcor for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +In this section, you test your Azure AD single sign-on configuration with following options. -## Additional resources +* Click on Test this application in Azure portal and you should be automatically signed in to the Edcor for which you set up the SSO. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Edcor tile in the My Apps, you should be automatically signed in to the Edcor for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Edcor you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/eluminate-tutorial.md b/articles/active-directory/saas-apps/eluminate-tutorial.md index 8052c2415bcba..72f8a38e5a328 100644 --- a/articles/active-directory/saas-apps/eluminate-tutorial.md +++ b/articles/active-directory/saas-apps/eluminate-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with eLuminate | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with eLuminate' description: Learn how to configure single sign-on between Azure Active Directory and eLuminate. services: active-directory author: jeevansd @@ -9,174 +9,126 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/05/2019 +ms.date: 06/01/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with eLuminate +# Tutorial: Azure AD SSO integration with eLuminate -In this tutorial, you learn how to integrate eLuminate with Azure Active Directory (Azure AD). -Integrating eLuminate with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate eLuminate with Azure Active Directory (Azure AD). When you integrate eLuminate with Azure AD, you can: -* You can control in Azure AD who has access to eLuminate. -* You can enable your users to be automatically signed-in to eLuminate (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to eLuminate. +* Enable your users to be automatically signed-in to eLuminate with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with eLuminate, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* eLuminate single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* eLuminate single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* eLuminate supports **SP** initiated SSO - -## Adding eLuminate from the gallery - -To configure the integration of eLuminate into Azure AD, you need to add eLuminate from the gallery to your list of managed SaaS apps. - -**To add eLuminate from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. +* eLuminate supports **SP** initiated SSO. - ![The New application button](common/add-new-app.png) +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. -4. In the search box, type **eLuminate**, select **eLuminate** from result panel then click **Add** button to add the application. +## Add eLuminate from the gallery - ![eLuminate in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with eLuminate based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in eLuminate needs to be established. - -To configure and test Azure AD single sign-on with eLuminate, you need to complete the following building blocks: - -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure eLuminate Single Sign-On](#configure-eluminate-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create eLuminate test user](#create-eluminate-test-user)** - to have a counterpart of Britta Simon in eLuminate that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +To configure the integration of eLuminate into Azure AD, you need to add eLuminate from the gallery to your list of managed SaaS apps. -### Configure Azure AD single sign-on +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **eLuminate** in the search box. +1. Select **eLuminate** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -In this section, you enable Azure AD single sign-on in the Azure portal. +## Configure and test Azure AD SSO for eLuminate -To configure Azure AD single sign-on with eLuminate, perform the following steps: +Configure and test Azure AD SSO with eLuminate using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in eLuminate. -1. In the [Azure portal](https://portal.azure.com/), on the **eLuminate** application integration page, select **Single sign-on**. +To configure and test Azure AD SSO with eLuminate, perform the following steps: - ![Configure single sign-on link](common/select-sso.png) +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure eLuminate SSO](#configure-eluminate-sso)** - to configure the single sign-on settings on application side. + 1. **[Create eLuminate test user](#create-eluminate-test-user)** - to have a counterpart of B.Simon in eLuminate that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. +## Configure Azure AD SSO - ![Single sign-on select mode](common/select-saml-option.png) +Follow these steps to enable Azure AD SSO in the Azure portal. -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. +1. In the Azure portal, on the **eLuminate** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, perform the following steps: - ![eLuminate Domain and URLs single sign-on information](common/sp-identifier.png) + a. In the **Identifier (Entity ID)** text box, type the value: + `Eluminate/ClientShortName` - a. In the **Sign on URL** text box, type a URL using the following pattern: + b. In the **Sign on URL** text box, type the URL: `https://ClientShortName.eluminate.ca/azuresso/account/SignIn` - b. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: - `Eluminate/ClientShortName` - > [!NOTE] - > These values are not real. Update these values with the actual Sign on URL and Identifier. Contact [eLuminate Client support team](mailto:support@intellimedia.ca) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. - -4. On the **Set up Single Sign-On with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + > These values are not real. Update these values with the actual Identifier and Sign on URL. Contact [eLuminate Client support team](mailto:support@intellimedia.ca) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. - ![The Certificate download link](common/copy-metadataurl.png) +5. On the **Set up Single Sign-On with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. -### Configure eLuminate Single Sign-On - -To configure single sign-on on **eLuminate** side, you need to send the **App Federation Metadata Url** to [eLuminate support team](mailto:support@intellimedia.ca). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. +In this section, you'll create a test user in the Azure portal called B.Simon. - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to eLuminate. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to eLuminate. -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **eLuminate**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **eLuminate**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![Enterprise applications blade](common/enterprise-applications.png) +## Configure eLuminate SSO -2. In the applications list, select **eLuminate**. - - ![The eLuminate link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **eLuminate** side, you need to send the **App Federation Metadata Url** to [eLuminate support team](mailto:support@intellimedia.ca). They set this setting to have the SAML SSO connection set properly on both sides. ### Create eLuminate test user In this section, you create a user called Britta Simon in eLuminate. Work with [eLuminate support team](mailto:support@intellimedia.ca) to add the users in the eLuminate platform. Users must be created and activated before you use single sign-on. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the eLuminate tile in the Access Panel, you should be automatically signed in to the eLuminate for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to eLuminate Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to eLuminate Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the eLuminate tile in the My Apps, this will redirect to eLuminate Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure eLuminate you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/empactis-tutorial.md b/articles/active-directory/saas-apps/empactis-tutorial.md index e2dd617da21cd..0e31ecbe34a81 100644 --- a/articles/active-directory/saas-apps/empactis-tutorial.md +++ b/articles/active-directory/saas-apps/empactis-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Empactis | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Empactis' description: Learn how to configure single sign-on between Azure Active Directory and Empactis. services: active-directory author: jeevansd @@ -9,175 +9,116 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 03/13/2019 +ms.date: 05/26/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Empactis +# Tutorial: Azure AD SSO integration with Empactis -In this tutorial, you learn how to integrate Empactis with Azure Active Directory (Azure AD). -Integrating Empactis with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Empactis with Azure Active Directory (Azure AD). When you integrate Empactis with Azure AD, you can: -* You can control in Azure AD who has access to Empactis. -* You can enable your users to be automatically signed-in to Empactis (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Empactis. +* Enable your users to be automatically signed-in to Empactis with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Empactis, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Empactis single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Empactis single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Empactis supports **IDP** initiated SSO +* Empactis supports **IDP** initiated SSO. -## Adding Empactis from the gallery +## Add Empactis from the gallery To configure the integration of Empactis into Azure AD, you need to add Empactis from the gallery to your list of managed SaaS apps. -**To add Empactis from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **Empactis**, select **Empactis** from result panel then click **Add** button to add the application. - - ![Empactis in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Empactis** in the search box. +1. Select **Empactis** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -In this section, you configure and test Azure AD single sign-on with Empactis based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Empactis needs to be established. +## Configure and test Azure AD SSO for Empactis -To configure and test Azure AD single sign-on with Empactis, you need to complete the following building blocks: +Configure and test Azure AD SSO with Empactis using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Empactis. -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Empactis Single Sign-On](#configure-empactis-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create Empactis test user](#create-empactis-test-user)** - to have a counterpart of Britta Simon in Empactis that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +To configure and test Azure AD SSO with Empactis, perform the following steps: -### Configure Azure AD single sign-on +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Empactis SSO](#configure-empactis-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Empactis test user](#create-empactis-test-user)** - to have a counterpart of B.Simon in Empactis that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -In this section, you enable Azure AD single sign-on in the Azure portal. +## Configure Azure AD SSO -To configure Azure AD single sign-on with Empactis, perform the following steps: +Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **Empactis** application integration page, select **Single sign-on**. +1. In the Azure portal, on the **Empactis** application integration page, find the **Manage** section and select **Single sign-on**. +1. On the **Select a Single sign-on method** page, select **SAML**. +1. On the **Set up Single Sign-On with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Configure single sign-on link](common/select-sso.png) - -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. - - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. - ![Empactis Domain and URLs single sign-on information](common/preintegrated.png) - 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Certificate (Base64)** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 6. On the **Set up Empactis** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure AD Identifier - - c. Logout URL - -### Configure Empactis Single Sign-On - -To configure single sign-on on **Empactis** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Empactis support team](mailto:support@empactis.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) +In this section, you'll create a test user in the Azure portal called B.Simon. -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field, enter **BrittaSimon**. - - b. In the **User name** field, type **brittasimon@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Empactis. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Empactis**. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Empactis. - ![Enterprise applications blade](common/enterprise-applications.png) +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Empactis**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. +1. In the **Add Assignment** dialog, click the **Assign** button. -2. In the applications list, select **Empactis**. +## Configure Empactis SSO - ![The Empactis link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog, select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog, click the **Assign** button. +To configure single sign-on on **Empactis** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Empactis support team](mailto:support@empactis.com). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Empactis test user In this section, you create a user called Britta Simon in Empactis. Work with [Empactis support team](mailto:support@empactis.com) to add the users in the Empactis platform. Users must be created and activated before you use single sign-on. -### Test single sign-on - -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +## Test SSO -When you click the Empactis tile in the Access Panel, you should be automatically signed in to the Empactis for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +In this section, you test your Azure AD single sign-on configuration with following options. -## Additional Resources +* Click on Test this application in Azure portal and you should be automatically signed in to the Empactis for which you set up the SSO. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Empactis tile in the My Apps, you should be automatically signed in to the Empactis for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Empactis you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/ethicspoint-incident-management-tutorial.md b/articles/active-directory/saas-apps/ethicspoint-incident-management-tutorial.md index 349c623142455..bf5743b75354f 100644 --- a/articles/active-directory/saas-apps/ethicspoint-incident-management-tutorial.md +++ b/articles/active-directory/saas-apps/ethicspoint-incident-management-tutorial.md @@ -84,7 +84,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. | > [!NOTE] - > These values are not real. Update these values with the actual Identifier,Reply URL and Sign-On URL. Contact [EthicsPoint Incident Management (EPIM) Client support team](https://www.navexglobal.com/company/contact-us) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not real. Update these values with the actual Identifier,Reply URL and Sign-On URL. Contact [EthicsPoint Incident Management (EPIM) Client support team](https://www.navex.com/en-us/products/navex-ethics-compliance/ethicspoint-hotline-incident-management/) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. @@ -120,11 +120,11 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a ## Configure EthicsPoint Incident Management (EPIM) SSO -To configure single sign-on on **EthicsPoint Incident Management (EPIM)** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [EthicsPoint Incident Management (EPIM) support team](https://www.navexglobal.com/company/contact-us). They set this setting to have the SAML SSO connection set properly on both sides. +To configure single sign-on on **EthicsPoint Incident Management (EPIM)** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [EthicsPoint Incident Management (EPIM) support team](https://www.navex.com/en-us/products/navex-ethics-compliance/ethicspoint-hotline-incident-management/). They set this setting to have the SAML SSO connection set properly on both sides. ### Create EthicsPoint Incident Management (EPIM) test user -In this section, you create a user called Britta Simon in EthicsPoint Incident Management (EPIM). Work with [EthicsPoint Incident Management (EPIM) support team](https://www.navexglobal.com/company/contact-us) to add the users in the EthicsPoint Incident Management (EPIM) platform. Users must be created and activated before you use single sign-on. +In this section, you create a user called Britta Simon in EthicsPoint Incident Management (EPIM). Work with [EthicsPoint Incident Management (EPIM) support team](https://www.navex.com/en-us/products/navex-ethics-compliance/ethicspoint-hotline-incident-management/) to add the users in the EthicsPoint Incident Management (EPIM) platform. Users must be created and activated before you use single sign-on. ## Test SSO diff --git a/articles/active-directory/saas-apps/flexera-one-tutorial.md b/articles/active-directory/saas-apps/flexera-one-tutorial.md index 589f4ce1b2705..797d7f33f6279 100644 --- a/articles/active-directory/saas-apps/flexera-one-tutorial.md +++ b/articles/active-directory/saas-apps/flexera-one-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 12/29/2021 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -29,6 +29,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Flexera One single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -68,7 +71,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, perform the following steps: @@ -86,7 +89,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Flexera One application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of Flexera One application.](common/default-attributes.png "Attributes") 1. In addition to above, Flexera One application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre populated but you can review them as per your requirements. @@ -97,11 +100,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up Flexera One** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy Configuration appropriate U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md b/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md index fdb779149a136..85abe932b97ed 100644 --- a/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md +++ b/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 04/19/2022 +ms.date: 05/26/2022 ms.author: jeedes --- @@ -72,7 +72,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, perform the following steps: @@ -85,13 +85,13 @@ Follow these steps to enable Azure AD SSO in the Azure portal. c. In the **Sign-on URL** text box, type the URL: `https://mailcontrol.com` -1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. +1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 1. On the **Set up Forcepoint Cloud Security Gateway - User Authentication** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Authentication") ### Create an Azure AD test user @@ -131,7 +131,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a b. Select **Identity provider** from the dropdown. - c. Open the downloaded **Certificate (Base64)** from the Azure portal and upload the file into the **File upload** textbox by clicking **Browse** option. + c. Upload the **Federation Metadata XML** file from the Azure portal into the **File upload** textbox by clicking **Browse** option. d. Click **Save**. diff --git a/articles/active-directory/saas-apps/github-ae-tutorial.md b/articles/active-directory/saas-apps/github-ae-tutorial.md index d4889f0696b61..12802e940cd93 100644 --- a/articles/active-directory/saas-apps/github-ae-tutorial.md +++ b/articles/active-directory/saas-apps/github-ae-tutorial.md @@ -1,6 +1,6 @@ --- -title: 'Tutorial: Azure Active Directory single sign-on (SSO) integration with GitHub AE | Microsoft Docs' -description: Learn how to configure single sign-on between Azure Active Directory and GitHub AE. +title: 'Tutorial: Azure AD SSO integration with GitHub Enterprise Server' +description: Learn how to configure single sign-on between Azure Active Directory and GitHub Enterprise Server. services: active-directory author: jeevansd manager: CelesteDG @@ -9,16 +9,16 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 08/31/2021 +ms.date: 05/20/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory single sign-on (SSO) integration with GitHub AE +# Tutorial: Azure AD SSO integration with GitHub Enterprise Server -In this tutorial, you'll learn how to integrate GitHub AE with Azure Active Directory (Azure AD). When you integrate GitHub AE with Azure AD, you can: +In this tutorial, you'll learn how to integrate GitHub Enterprise Server with Azure Active Directory (Azure AD). When you integrate GitHub Enterprise Server with Azure AD, you can: -* Control in Azure AD who has access to GitHub AE. -* Enable your users to be automatically signed-in to GitHub AE with their Azure AD accounts. +* Control in Azure AD who has access to GitHub Enterprise Server. +* Enable your users to be automatically signed-in to GitHub Enterprise Server with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. ## Prerequisites @@ -26,52 +26,53 @@ In this tutorial, you'll learn how to integrate GitHub AE with Azure Active Dire To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). -* GitHub AE, ready for [initialization](https://docs.github.com/github-ae@latest/admin/configuration/initializing-github-ae). +* GitHub Enterprise Server, ready for [initialization](https://docs.github.com/github-ae@latest/admin/configuration/initializing-github-ae). +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. -* GitHub AE supports **SP** and **IDP** initiated SSO. -* GitHub AE supports **Just In Time** user provisioning. -* GitHub AE supports [Automated user provisioning](github-ae-provisioning-tutorial.md). +* GitHub Enterprise Server supports **SP** and **IDP** initiated SSO. +* GitHub Enterprise Server supports **Just In Time** user provisioning. +* GitHub Enterprise Server supports [Automated user provisioning](github-ae-provisioning-tutorial.md). -## Adding GitHub AE from the gallery +## Adding GitHub Enterprise Server from the gallery -To configure the integration of GitHub AE into Azure AD, you need to add GitHub AE from the gallery to your list of managed SaaS apps. +To configure the integration of GitHub Enterprise Server into Azure AD, you need to add GitHub Enterprise Server from the gallery to your list of managed SaaS apps. 1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. -1. In the **Add from the gallery** section, type **GitHub AE** in the search box. -1. Select **GitHub AE** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. +1. In the **Add from the gallery** section, type **GitHub Enterprise Server** in the search box. +1. Select **GitHub Enterprise Server** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. +## Configure and test Azure AD SSO for GitHub Enterprise Server -## Configure and test Azure AD SSO for GitHub AE +Configure and test Azure AD SSO with GitHub Enterprise Server using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in GitHub Enterprise Server. -Configure and test Azure AD SSO with GitHub AE using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in GitHub AE. - -To configure and test Azure AD SSO with GitHub AE, complete the following building blocks: +To configure and test Azure AD SSO with GitHub Enterprise Server, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. -1. **[Configure GitHub AE SSO](#configure-github-ae-sso)** - to configure the single sign-on settings on application side. - 1. **[Create GitHub AE test user](#create-github-ae-test-user)** - to have a counterpart of B.Simon in GitHub AE that is linked to the Azure AD representation of user. +1. **[Configure GitHub Enterprise Server SSO](#configure-github-enterprise-server-sso)** - to configure the single sign-on settings on application side. + 1. **[Create GitHub Enterprise Server test user](#create-github-enterprise-server-test-user)** - to have a counterpart of B.Simon in GitHub Enterprise Server that is linked to the Azure AD representation of user. 1. **[Test SSO](#test-sso)** - to verify whether the configuration works. ## Configure Azure AD SSO Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the Azure portal, on the **GitHub AE** application integration page, find the **Manage** section and select **single sign-on**. +1. In the Azure portal, on the **GitHub Enterprise Server** application integration page, find the **Manage** section and select **single sign-on**. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -1. On the **Basic SAML Configuration** section, if you wish to configure the application in **IDP** initiated mode, enter the values for the following fields: +1. On the **Basic SAML Configuration** section, if you wish to configure the application in **IDP** initiated mode, perform the following steps: a. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: `https://` @@ -85,12 +86,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. `https:///sso` > [!NOTE] - > These values are not real. Update these values with the actual Sign on URL, Reply URL and Identifier. Contact [GitHub AE Client support team](mailto:support@github.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. - + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [GitHub Enterprise Server Client support team](mailto:support@github.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. -1. GitHub AE application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. +1. GitHub Enterprise Server application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of Enterprise Server application.](common/default-attributes.png "Attributes") 1. Edit **User Attributes & Claims**. @@ -104,18 +104,18 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Click **Save**. - ![manage claim](./media/github-ae-tutorial/administrator.png) + ![Screenshot shows to manage claim for attributes.](./media/github-ae-tutorial/administrator.png "Claims") > [!NOTE] > To know the instructions on how to add a claim, please follow the [link](https://docs.github.com/en/github-ae@latest/admin/authentication/configuring-authentication-and-provisioning-for-your-enterprise-using-azure-ad). 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") -1. On the **Set up GitHub AE** section, copy the appropriate URL(s) based on your requirement. +1. On the **Set up GitHub Enterprise Server** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Metadata") ### Create an Azure AD test user @@ -131,25 +131,25 @@ In this section, you'll create a test user in the Azure portal called B.Simon. ### Assign the Azure AD test user -In this section, you'll enable B.Simon to use Azure single sign-on by granting access to GitHub AE. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to GitHub Enterprise Server. 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. -1. In the applications list, select **GitHub AE**. +1. In the applications list, select **GitHub Enterprise Server**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. 1. In the **Add Assignment** dialog, click the **Assign** button. -## Configure GitHub AE SSO +## Configure GitHub Enterprise Server SSO -To configure SSO on GitHub AE side, you need to follow the instructions mentioned [here](https://docs.github.com/github-ae@latest/admin/authentication/configuring-saml-single-sign-on-for-your-enterprise#enabling-saml-sso). +To configure SSO on GitHub Enterprise Server side, you need to follow the instructions mentioned [here](https://docs.github.com/github-ae@latest/admin/authentication/configuring-saml-single-sign-on-for-your-enterprise#enabling-saml-sso). -### Create GitHub AE test user +### Create GitHub Enterprise Server test user -In this section, a user called B.Simon is created in GitHub AE. GitHub AE supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in GitHub AE, a new one is created after authentication. +In this section, a user called B.Simon is created in GitHub Enterprise Server. GitHub Enterprise Server supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in GitHub Enterprise Server, a new one is created after authentication. -GitHub AE also supports automatic user provisioning, you can find more details [here](./github-ae-provisioning-tutorial.md) on how to configure automatic user provisioning. +GitHub Enterprise Server also supports automatic user provisioning, you can find more details [here](./github-ae-provisioning-tutorial.md) on how to configure automatic user provisioning. ## Test SSO @@ -157,18 +157,18 @@ In this section, you test your Azure AD single sign-on configuration with follow #### SP initiated: -* Click on **Test this application** in Azure portal. This will redirect to GitHub AE Sign on URL where you can initiate the login flow. +* Click on **Test this application** in Azure portal. This will redirect to GitHub Enterprise Server Sign on URL where you can initiate the login flow. -* Go to GitHub AE Sign-on URL directly and initiate the login flow from there. +* Go to GitHub Enterprise Server Sign-on URL directly and initiate the login flow from there. #### IDP initiated: -* Click on **Test this application** in Azure portal and you should be automatically signed in to the GitHub AE for which you set up the SSO +* Click on **Test this application** in Azure portal and you should be automatically signed in to the GitHub Enterprise Server for which you set up the SSO. -You can also use Microsoft My Apps to test the application in any mode. When you click the GitHub AE tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the GitHub AE for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). +You can also use Microsoft My Apps to test the application in any mode. When you click the GitHub Enterprise Server tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the GitHub Enterprise Server for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). ## Next steps * [Configuring user provisioning for your enterprise](https://docs.github.com/github-ae@latest/admin/authentication/configuring-user-provisioning-for-your-enterprise). -* Once you configure GitHub AE you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). +* Once you configure GitHub Enterprise Server you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/github-provisioning-tutorial.md b/articles/active-directory/saas-apps/github-provisioning-tutorial.md index f5860f54f4130..450f88c714113 100644 --- a/articles/active-directory/saas-apps/github-provisioning-tutorial.md +++ b/articles/active-directory/saas-apps/github-provisioning-tutorial.md @@ -48,7 +48,7 @@ For more information, see [Assign a user or group to an enterprise app](../manag ## Configuring user provisioning to GitHub -This section guides you through connecting your Azure AD to GitHub's SCIM provisioning API to automate provisioning of GitHub organization membership. This integration, which leverages an [OAuth app](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/authorizing-oauth-apps#oauth-apps-and-organizations), automatically adds, manages, and removes members' access to a GitHub Enterprise Cloud organization based on user and group assignment in Azure AD. When users are [provisioned to a GitHub organization via SCIM](https://docs.github.com/en/free-pro-team@latest/rest/reference/scim#provision-and-invite-a-scim-user), an email invitation is sent to the user's email address. +This section guides you through connecting your Azure AD to GitHub's SCIM provisioning API to automate provisioning of GitHub organization membership. This integration, which leverages an [OAuth app](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/authorizing-oauth-apps#oauth-apps-and-organizations), automatically adds, manages, and removes members' access to a GitHub Enterprise Cloud organization based on user and group assignment in Azure AD. When users are [provisioned to a GitHub organization via SCIM](https://docs.github.com/en/rest/enterprise-admin/scim), an email invitation is sent to the user's email address. ### Configure automatic user account provisioning to GitHub in Azure AD diff --git a/articles/active-directory/saas-apps/guardium-data-protection-tutorial.md b/articles/active-directory/saas-apps/guardium-data-protection-tutorial.md new file mode 100644 index 0000000000000..0ad09f2ba5654 --- /dev/null +++ b/articles/active-directory/saas-apps/guardium-data-protection-tutorial.md @@ -0,0 +1,158 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Guardium Data Protection' +description: Learn how to configure single sign-on between Azure Active Directory and Guardium Data Protection. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/31/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Guardium Data Protection + +In this tutorial, you'll learn how to integrate Guardium Data Protection with Azure Active Directory (Azure AD). When you integrate Guardium Data Protection with Azure AD, you can: + +* Control in Azure AD who has access to Guardium Data Protection. +* Enable your users to be automatically signed-in to Guardium Data Protection with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Guardium Data Protection single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Guardium Data Protection supports **SP** and **IDP** initiated SSO. + +## Add Guardium Data Protection from the gallery + +To configure the integration of Guardium Data Protection into Azure AD, you need to add Guardium Data Protection from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Guardium Data Protection** in the search box. +1. Select **Guardium Data Protection** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Guardium Data Protection + +Configure and test Azure AD SSO with Guardium Data Protection using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Guardium Data Protection. + +To configure and test Azure AD SSO with Guardium Data Protection, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Guardium Data Protection SSO](#configure-guardium-data-protection-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Guardium Data Protection test user](#create-guardium-data-protection-test-user)** - to have a counterpart of B.Simon in Guardium Data Protection that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Guardium Data Protection** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** text box, type a value using the following pattern: + `` + + b. In the **Reply URL** text box, type a URL using the following pattern: + `https://:8443/saml/sso` + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type a URL using the following pattern: + `https://:8443` + + > [!Note] + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign-on URL. Contact [Guardium Data Protection support team](mailto:NA@ibm.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. Guardium Data Protection application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. + + ![Screenshot shows the Guardium Data Protection application image.](common/default-attributes.png "Image") + +1. In addition to above, Guardium Data Protection application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirement. + + | Name | Source Attribute | + |-------| --------- | + | jobtitle | user.jobtitle | + +1. On the **Set-up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Federation Metadata XML** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") + +1. On the **Set up Guardium Data Protection** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Metadata") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Guardium Data Protection. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Guardium Data Protection**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Guardium Data Protection SSO + +To configure single sign-on on **Guardium Data Protection** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Guardium Data Protection support team](mailto:NA@ibm.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create Guardium Data Protection test user + +In this section, you create a user called Britta Simon in Guardium Data Protection. Work with [Guardium Data Protection support team](mailto:NA@ibm.com) to add the users in the Guardium Data Protection platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to Guardium Data Protection Sign on URL where you can initiate the login flow. + +* Go to Guardium Data Protection Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the Guardium Data Protection for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the Guardium Data Protection tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Guardium Data Protection for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure Guardium Data Protection you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/iauditor-tutorial.md b/articles/active-directory/saas-apps/iauditor-tutorial.md index 05dae794f9848..85c724738f2de 100644 --- a/articles/active-directory/saas-apps/iauditor-tutorial.md +++ b/articles/active-directory/saas-apps/iauditor-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 03/24/2022 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -31,6 +31,9 @@ To get started, you need the following items: * Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. For more information, see [Azure built-in roles](../roles/permissions-reference.md). +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -69,7 +72,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, if you wish to configure the application in **IDP** initiated mode, perform the following steps: @@ -96,7 +99,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. iAuditor application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of iAuditor application.](common/default-attributes.png "Attributes") 1. In addition to above, iAuditor application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre-populated but you can review them as per your requirements. @@ -108,7 +111,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (PEM)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificate-base64-download.png) + ![Screenshot shows the Certificate download link.](common/certificate-base64-download.png "Certificate") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/igrafx-platform-tutorial.md b/articles/active-directory/saas-apps/igrafx-platform-tutorial.md index c29824b0a3778..e2377669d58ce 100644 --- a/articles/active-directory/saas-apps/igrafx-platform-tutorial.md +++ b/articles/active-directory/saas-apps/igrafx-platform-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/18/2022 +ms.date: 06/03/2022 ms.author: jeedes --- @@ -69,7 +69,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, perform the following steps: @@ -77,35 +77,35 @@ Follow these steps to enable Azure AD SSO in the Azure portal. | **Identifier** | |--------| - | `https://.igrafxcloud.com/saml/metadata` | - | `https://.igrafxdemo.com/saml/metadata` | - | `https://.igrafxtraining.com/saml/metadata` | - | `https://.igrafx.com/saml/metadata` | + | `https://.igrafxcloud.com/saml/metadata` | + | `https://.igrafxdemo.com/saml/metadata` | + | `https://.igrafxtraining.com/saml/metadata` | + | `https://.igrafx.com/saml/metadata` | b. In the **Reply URL** text box, type a URL using one of the following patterns: | **Reply URL** | |---------| - | `https://.igrafxcloud.com/` | - | `https://.igrafxdemo.com/` | - | `https://.igrafxtraining.com/` | - | `https://.igrafx.com/` | + | `https://.igrafxcloud.com/` | + | `https://.igrafxdemo.com/` | + | `https://.igrafxtraining.com/` | + | `https://.igrafx.com/` | c. In the **Sign on URL** text box, type a URL using one of the following patterns: | **Sign on URL** | |-------| - | `https://.igrafxcloud.com/` | - | `https://.igrafxdemo.com/` | - | `https://.igrafxtraining.com/` | - | `https://.igrafx.com/` | + | `https://.igrafxcloud.com/` | + | `https://.igrafxdemo.com/` | + | `https://.igrafxtraining.com/` | + | `https://.igrafx.com/` | > [!NOTE] > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [iGrafx Platform Client support team](mailto:support@igrafx.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. - ![The Certificate download link](common/copy-metadataurl.png) + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/iwellnessnow-tutorial.md b/articles/active-directory/saas-apps/iwellnessnow-tutorial.md index d01b709b63e35..cda1ff0501677 100644 --- a/articles/active-directory/saas-apps/iwellnessnow-tutorial.md +++ b/articles/active-directory/saas-apps/iwellnessnow-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with iWellnessNow | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with iWellnessNow' description: Learn how to configure single sign-on between Azure Active Directory and iWellnessNow. services: active-directory author: jeevansd @@ -9,11 +9,11 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 08/07/2019 +ms.date: 05/26/2022 ms.author: jeedes --- -# Tutorial: Integrate iWellnessNow with Azure Active Directory +# Tutorial: Azure AD SSO integration with iWellnessNow In this tutorial, you'll learn how to integrate iWellnessNow with Azure Active Directory (Azure AD). When you integrate iWellnessNow with Azure AD, you can: @@ -21,101 +21,93 @@ In this tutorial, you'll learn how to integrate iWellnessNow with Azure Active D * Enable your users to be automatically signed-in to iWellnessNow with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. -To learn more about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). - ## Prerequisites To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * iWellnessNow single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. -* iWellnessNow supports **SP and IDP** initiated SSO +* iWellnessNow supports **SP and IDP** initiated SSO. -## Adding iWellnessNow from the gallery +## Add iWellnessNow from the gallery To configure the integration of iWellnessNow into Azure AD, you need to add iWellnessNow from the gallery to your list of managed SaaS apps. -1. Sign in to the [Azure portal](https://portal.azure.com) using either a work or school account, or a personal Microsoft account. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. 1. In the **Add from the gallery** section, type **iWellnessNow** in the search box. 1. Select **iWellnessNow** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -## Configure and test Azure AD single sign-on +## Configure and test Azure AD SSO for iWellnessNow Configure and test Azure AD SSO with iWellnessNow using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in iWellnessNow. -To configure and test Azure AD SSO with iWellnessNow, complete the following building blocks: +To configure and test Azure AD SSO with iWellnessNow, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. -2. **[Configure iWellnessNow SSO](#configure-iwellnessnow-sso)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. -5. **[Create iWellnessNow test user](#create-iwellnessnow-test-user)** - to have a counterpart of B.Simon in iWellnessNow that is linked to the Azure AD representation of user. -6. **[Test SSO](#test-sso)** - to verify whether the configuration works. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure iWellnessNow SSO](#configure-iwellnessnow-sso)** - to configure the single sign-on settings on application side. + 1. **[Create iWellnessNow test user](#create-iwellnessnow-test-user)** - to have a counterpart of B.Simon in iWellnessNow that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -### Configure Azure AD SSO +## Configure Azure AD SSO Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **iWellnessNow** application integration page, find the **Manage** section and select **Single sign-on**. +1. In the Azure portal, on the **iWellnessNow** application integration page, find the **Manage** section and select **Single sign-on**. 1. On the **Select a Single sign-on method** page, select **SAML**. -1. On the **Set up Single Sign-On with SAML** page, click the edit/pen icon for **Basic SAML Configuration** to edit the settings. +1. On the **Set up Single Sign-On with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, if you have **Service Provider metadata file** and wish to configure in **IDP** initiated mode, perform the following steps: a. Click **Upload metadata file**. - ![Upload metadata file](common/upload-metadata.png) + ![Screenshot shows to upload metadata file.](common/upload-metadata.png "Metadata") b. Click on **folder logo** to select the metadata file and click **Upload**. - ![choose metadata file](common/browse-upload-metadata.png) + ![Screenshot shows to choose metadata file.](common/browse-upload-metadata.png "Folder") c. After the metadata file is successfully uploaded, the **Identifier** and **Reply URL** values get auto populated in Basic SAML Configuration section. - ![Screenshot shows the Basic SAML Configuration, where you can enter Reply U R L, and select Save.](common/idp-intiated.png) - > [!Note] - > If the **Identifier** and **Reply URL** values do not get auto polulated, then fill in the values manually according to your requirement. + > If the **Identifier** and **Reply URL** values do not get auto populated, then fill in the values manually according to your requirement. 1. If you don't have **Service Provider metadata file** and wish to configure the application in **IDP** initiated mode, perform the following steps: - ![iWellnessNow Domain and URLs single sign-on information](common/idp-intiated.png) - - a. In the **Identifier** textbox, type a URL using the following pattern: `http://.iwellnessnow.com` + a. In the **Identifier** textbox, type a URL using the following pattern: + `http://.iwellnessnow.com` - b. In the **Reply URL** textbox, type a URL using the following pattern: `https://.iwellnessnow.com/ssologin` + b. In the **Reply URL** textbox, type a URL using the following pattern: + `https://.iwellnessnow.com/ssologin` 1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: - ![Screenshot shows Set additional U R Ls where you can enter a Sign on U R L.](common/metadata-upload-additional-signon.png) - In the **Sign-on URL** text box, type a URL using the following pattern: `https://.iwellnessnow.com/` > [!NOTE] - > These values are not real. Update these values with the actual Sign-on URL, Identifier and Reply URL. Contact [iWellnessNow Client support team](mailto:info@iwellnessnow.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [iWellnessNow Client support team](mailto:info@iwellnessnow.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, find **Metadata XML** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 1. On the **Set up iWellnessNow** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - -### Configure iWellnessNow SSO - -To configure single sign-on on **iWellnessNow** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [iWellnessNow support team](mailto:info@iwellnessnow.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user @@ -136,31 +128,35 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. 1. In the applications list, select **iWellnessNow**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add User link](common/add-assign-user.png) - 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. 1. In the **Add Assignment** dialog, click the **Assign** button. +## Configure iWellnessNow SSO + +To configure single sign-on on **iWellnessNow** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [iWellnessNow support team](mailto:info@iwellnessnow.com). They set this setting to have the SAML SSO connection set properly on both sides. + ### Create iWellnessNow test user In this section, you create a user called Britta Simon in iWellnessNow. Work with [iWellnessNow support team](mailto:info@iwellnessnow.com) to add the users in the iWellnessNow platform. Users must be created and activated before you use single sign-on. -### Test SSO +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to iWellnessNow Sign on URL where you can initiate the login flow. -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +* Go to iWellnessNow Sign-on URL directly and initiate the login flow from there. -When you click the iWellnessNow tile in the Access Panel, you should be automatically signed in to the iWellnessNow for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +#### IDP initiated: -## Additional resources +* Click on **Test this application** in Azure portal and you should be automatically signed in to the iWellnessNow for which you set up the SSO. -- [ List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory ](./tutorial-list.md) +You can also use Microsoft My Apps to test the application in any mode. When you click the iWellnessNow tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the iWellnessNow for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory? ](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is conditional access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure iWellnessNow you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/javelo-tutorial.md b/articles/active-directory/saas-apps/javelo-tutorial.md new file mode 100644 index 0000000000000..d76c6003cf48b --- /dev/null +++ b/articles/active-directory/saas-apps/javelo-tutorial.md @@ -0,0 +1,162 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Javelo' +description: Learn how to configure single sign-on between Azure Active Directory and Javelo. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 06/06/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Javelo + +In this tutorial, you'll learn how to integrate Javelo with Azure Active Directory (Azure AD). When you integrate Javelo with Azure AD, you can: + +* Control in Azure AD who has access to Javelo. +* Enable your users to be automatically signed-in to Javelo with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Javelo single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Javelo supports **SP** initiated SSO. +* Javelo supports **Just In Time** user provisioning. + +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. + +## Add Javelo from the gallery + +To configure the integration of Javelo into Azure AD, you need to add Javelo from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Javelo** in the search box. +1. Select **Javelo** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Javelo + +Configure and test Azure AD SSO with Javelo using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Javelo. + +To configure and test Azure AD SSO with Javelo, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Javelo SSO](#configure-javelo-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Javelo test user](#create-javelo-test-user)** - to have a counterpart of B.Simon in Javelo that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Javelo** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, Upload the **Service Provider metadata file** which you can download from the [URL](https://api.javelo.io/omniauth/_saml/metadata) and perform the following steps: + + a. Click **Upload metadata file**. + + ![Screenshot shows Basic SAML Configuration with the Upload metadata file link.](common/upload-metadata.png "Folder") + + b. Click on **folder logo** to select the metadata file and click **Upload**. + + ![Screenshot shows a dialog box where you can select and upload a file.](common/browse-upload-metadata.png "Logo") + + c. Once the metadata file is successfully uploaded, the necessary URLs get auto populated automatically. + + d. In the **Sign-on URL** text box, type a URL using the following pattern: + `https://.javelo.io/auth/login` + + > [!NOTE] + > This value is not real. Update this value with the actual Sign-on URL. Contact [Javelo Client support team](mailto:Support@javelo.io) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Javelo. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Javelo**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Javelo SSO + +1. Log in to your Javelo company site as an administrator. + +1. Go to **Admin** view and navigate to **SSO** tab > **Azure Active Directory** and click **Configure**. + +1. In the **Enable SSO with Azure Active Directory** page, perform the following steps: + + ![Screenshot that shows the Configuration Settings.](./media/javelo-tutorial/settings.png "Configuration") + + a. Enter a valid name in the **Provider** textbox. + + b. In the **Entity ID** textbox, paste the **Azure AD Identifier** value which you have copied from the Azure portal. + + c. In the **Metadata URL** textbox, paste the **App Federation Metadata Url** which you have copied from the Azure portal. + + d. Click **Test URL**. + + e. Enter a valid domain in the **Email Domains** textbox. + + f. Click **Enable SSO with Azure Active Directory**. + +### Create Javelo test user + +In this section, a user called B.Simon is created in Javelo. Javelo supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in Javelo, a new one is created after authentication. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on **Test this application** in Azure portal. This will redirect to Javelo Sign-on URL where you can initiate the login flow. + +* Go to Javelo Sign-on URL directly and initiate the login flow from there. + +* You can use Microsoft My Apps. When you click the Javelo tile in the My Apps, this will redirect to Javelo Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). + +## Next steps + +Once you configure Javelo you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/jobbadmin-tutorial.md b/articles/active-directory/saas-apps/jobbadmin-tutorial.md index bfffb9b1b8880..8e65d7e1ecc03 100644 --- a/articles/active-directory/saas-apps/jobbadmin-tutorial.md +++ b/articles/active-directory/saas-apps/jobbadmin-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Jobbadmin | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Jobbadmin' description: Learn how to configure single sign-on between Azure Active Directory and Jobbadmin. services: active-directory author: jeevansd @@ -9,186 +9,129 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/25/2019 +ms.date: 02/25/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Jobbadmin +# Tutorial: Azure AD SSO integration with Jobbadmin -In this tutorial, you learn how to integrate Jobbadmin with Azure Active Directory (Azure AD). -Integrating Jobbadmin with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Jobbadmin with Azure Active Directory (Azure AD). When you integrate Jobbadmin with Azure AD, you can: -* You can control in Azure AD who has access to Jobbadmin. -* You can enable your users to be automatically signed-in to Jobbadmin (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Jobbadmin. +* Enable your users to be automatically signed-in to Jobbadmin with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Jobbadmin, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Jobbadmin single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Jobbadmin single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Jobbadmin supports **SP** initiated SSO +* Jobbadmin supports **SP** initiated SSO. -## Adding Jobbadmin from the gallery +## Add Jobbadmin from the gallery To configure the integration of Jobbadmin into Azure AD, you need to add Jobbadmin from the gallery to your list of managed SaaS apps. -**To add Jobbadmin from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **Jobbadmin**, select **Jobbadmin** from result panel then click **Add** button to add the application. - - ![Jobbadmin in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with Jobbadmin based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Jobbadmin needs to be established. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Jobbadmin** in the search box. +1. Select **Jobbadmin** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -To configure and test Azure AD single sign-on with Jobbadmin, you need to complete the following building blocks: +## Configure and test Azure AD SSO for Jobbadmin -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Jobbadmin Single Sign-On](#configure-jobbadmin-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create Jobbadmin test user](#create-jobbadmin-test-user)** - to have a counterpart of Britta Simon in Jobbadmin that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +Configure and test Azure AD SSO with Jobbadmin using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Jobbadmin. -### Configure Azure AD single sign-on +To configure and test Azure AD SSO with Jobbadmin, perform the following steps: -In this section, you enable Azure AD single sign-on in the Azure portal. +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Jobbadmin SSO](#configure-jobbadmin-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Jobbadmin test user](#create-jobbadmin-test-user)** - to have a counterpart of B.Simon in Jobbadmin that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -To configure Azure AD single sign-on with Jobbadmin, perform the following steps: +## Configure Azure AD SSO -1. In the [Azure portal](https://portal.azure.com/), on the **Jobbadmin** application integration page, select **Single sign-on**. +Follow these steps to enable Azure AD SSO in the Azure portal. - ![Configure single sign-on link](common/select-sso.png) - -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. - - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) +1. In the Azure portal, on the **Jobbadmin** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, perform the following steps: - ![Jobbadmin Domain and URLs single sign-on information](common/sp-identifier-reply.png) - - a. In the **Sign on URL** text box, type a URL using the following pattern: - `https://.jobbnorge.no/auth/saml2/login.ashx` - - b. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: + a. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: `https://.jobnorge.no` - c. In the **Reply URL** textbox, type a URL using the following pattern: `https://.jobbnorge.no/auth/saml2/login.ashx` + b. In the **Reply URL** textbox, type a URL using the following pattern: `https://.jobbnorge.no/auth/saml2/login.ashx` + + c. In the **Sign on URL** text box, type a URL using the following pattern: + `https://.jobbnorge.no/auth/saml2/login.ashx` > [!NOTE] - > These values are not real. Update these values with the actual Sign on URL, Identifier and Reply URL. Contact [Jobbadmin Client support team](https://www.jobbnorge.no/om-oss/kontakt-oss) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [Jobbadmin Client support team](https://www.jobbnorge.no/om-oss/kontakt-oss) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up Jobbadmin** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure Jobbadmin Single Sign-On - -To configure single sign-on on **Jobbadmin** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Jobbadmin support team](https://www.jobbnorge.no/om-oss/kontakt-oss). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com +In this section, you'll create a test user in the Azure portal called B.Simon. - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Jobbadmin. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Jobbadmin**. - - ![Enterprise applications blade](common/enterprise-applications.png) - -2. In the applications list, select **Jobbadmin**. - - ![The Jobbadmin link in the Applications list](common/all-applications.png) +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Jobbadmin. -3. In the menu on the left, select **Users and groups**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Jobbadmin**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![The "Users and groups" link](common/users-groups-blade.png) +## Configure Jobbadmin SSO -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **Jobbadmin** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Jobbadmin support team](https://www.jobbnorge.no/om-oss/kontakt-oss). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Jobbadmin test user In this section, you create a user called Britta Simon in Jobbadmin. Work with [Jobbadmin support team](https://www.jobbnorge.no/om-oss/kontakt-oss) to add the users in the Jobbadmin platform. Users must be created and activated before you use single sign-on. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the Jobbadmin tile in the Access Panel, you should be automatically signed in to the Jobbadmin for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to Jobbadmin Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to Jobbadmin Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Jobbadmin tile in the My Apps, this will redirect to Jobbadmin Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Jobbadmin you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/jobscore-tutorial.md b/articles/active-directory/saas-apps/jobscore-tutorial.md index 5c45535b7b244..590cce6ab93bf 100644 --- a/articles/active-directory/saas-apps/jobscore-tutorial.md +++ b/articles/active-directory/saas-apps/jobscore-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with JobScore | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with JobScore' description: Learn how to configure single sign-on between Azure Active Directory and JobScore. services: active-directory author: jeevansd @@ -9,91 +9,70 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/25/2019 +ms.date: 05/25/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with JobScore +# Tutorial: Azure AD SSO integration with JobScore -In this tutorial, you learn how to integrate JobScore with Azure Active Directory (Azure AD). -Integrating JobScore with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate JobScore with Azure Active Directory (Azure AD). When you integrate JobScore with Azure AD, you can: -* You can control in Azure AD who has access to JobScore. -* You can enable your users to be automatically signed-in to JobScore (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to JobScore. +* Enable your users to be automatically signed-in to JobScore with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with JobScore, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* JobScore single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* JobScore single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* JobScore supports **SP** initiated SSO - -## Adding JobScore from the gallery - -To configure the integration of JobScore into Azure AD, you need to add JobScore from the gallery to your list of managed SaaS apps. - -**To add JobScore from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. +* JobScore supports **SP** initiated SSO. - ![The New application button](common/add-new-app.png) +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. -4. In the search box, type **JobScore**, select **JobScore** from result panel then click **Add** button to add the application. +## Add JobScore from the gallery - ![JobScore in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with JobScore based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in JobScore needs to be established. - -To configure and test Azure AD single sign-on with JobScore, you need to complete the following building blocks: - -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure JobScore Single Sign-On](#configure-jobscore-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create JobScore test user](#create-jobscore-test-user)** - to have a counterpart of Britta Simon in JobScore that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. - -### Configure Azure AD single sign-on - -In this section, you enable Azure AD single sign-on in the Azure portal. +To configure the integration of JobScore into Azure AD, you need to add JobScore from the gallery to your list of managed SaaS apps. -To configure Azure AD single sign-on with JobScore, perform the following steps: +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **JobScore** in the search box. +1. Select **JobScore** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -1. In the [Azure portal](https://portal.azure.com/), on the **JobScore** application integration page, select **Single sign-on**. +## Configure and test Azure AD SSO for JobScore - ![Configure single sign-on link](common/select-sso.png) +Configure and test Azure AD SSO with JobScore using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in JobScore. -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. +To configure and test Azure AD SSO with JobScore, perform the following steps: - ![Single sign-on select mode](common/select-saml-option.png) +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure JobScore SSO](#configure-jobscore-sso)** - to configure the single sign-on settings on application side. + 1. **[Create JobScore test user](#create-jobscore-test-user)** - to have a counterpart of B.Simon in JobScore that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. +## Configure Azure AD SSO - ![Edit Basic SAML Configuration](common/edit-urls.png) +Follow these steps to enable Azure AD SSO in the Azure portal. -4. On the **Basic SAML Configuration** section, perform the following steps: +1. In the Azure portal, on the **JobScore** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") - ![JobScore Domain and URLs single sign-on information](common/sp-signonurl.png) +4. On the **Basic SAML Configuration** section, perform the following step: In the **Sign-on URL** text box, type a URL using the following pattern: `https://hire.jobscore.com/auth/adfs/` @@ -103,87 +82,54 @@ To configure Azure AD single sign-on with JobScore, perform the following steps: 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up JobScore** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure JobScore Single Sign-On - -To configure single sign-on on **JobScore** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [JobScore support team](mailto:support@jobscore.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com +In this section, you'll create a test user in the Azure portal called B.Simon. - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to JobScore. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **JobScore**. - - ![Enterprise applications blade](common/enterprise-applications.png) - -2. In the applications list, select **JobScore**. - - ![The JobScore link in the Applications list](common/all-applications.png) +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to JobScore. -3. In the menu on the left, select **Users and groups**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **JobScore**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![The "Users and groups" link](common/users-groups-blade.png) +## Configure JobScore SSO -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **JobScore** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [JobScore support team](mailto:support@jobscore.com). They set this setting to have the SAML SSO connection set properly on both sides. ### Create JobScore test user In this section, you create a user called Britta Simon in JobScore. Work with [JobScore support team](mailto:support@jobscore.com) to add the users in the JobScore platform. Users must be created and activated before you use single sign-on. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the JobScore tile in the Access Panel, you should be automatically signed in to the JobScore for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to JobScore Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to JobScore Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the JobScore tile in the My Apps, this will redirect to JobScore Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure JobScore you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/media/blinq-provisioning-tutorial/blinq-integrations-page.png b/articles/active-directory/saas-apps/media/blinq-provisioning-tutorial/blinq-integrations-page.png new file mode 100644 index 0000000000000..f22cad7c4b4b6 Binary files /dev/null and b/articles/active-directory/saas-apps/media/blinq-provisioning-tutorial/blinq-integrations-page.png differ diff --git a/articles/active-directory/saas-apps/media/blinq-provisioning-tutorial/blinq-settings.png b/articles/active-directory/saas-apps/media/blinq-provisioning-tutorial/blinq-settings.png new file mode 100644 index 0000000000000..f47e356f3ac17 Binary files /dev/null and b/articles/active-directory/saas-apps/media/blinq-provisioning-tutorial/blinq-settings.png differ diff --git a/articles/active-directory/saas-apps/media/javelo-tutorial/settings.png b/articles/active-directory/saas-apps/media/javelo-tutorial/settings.png new file mode 100644 index 0000000000000..3c7b1da7de21a Binary files /dev/null and b/articles/active-directory/saas-apps/media/javelo-tutorial/settings.png differ diff --git a/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/authentication.png b/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/authentication.png index ec842dedda743..8fe5f47c6e86e 100644 Binary files a/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/authentication.png and b/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/authentication.png differ diff --git a/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/settings.png b/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/settings.png index 01cc3a6a6878c..5f2878859f971 100644 Binary files a/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/settings.png and b/articles/active-directory/saas-apps/media/paloaltoadmin-tutorial/settings.png differ diff --git a/articles/active-directory/saas-apps/media/standard-for-success-tutorial/name.png b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/name.png new file mode 100644 index 0000000000000..eb660314b9feb Binary files /dev/null and b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/name.png differ diff --git a/articles/active-directory/saas-apps/media/standard-for-success-tutorial/settings.png b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/settings.png new file mode 100644 index 0000000000000..a6a25b8ff1052 Binary files /dev/null and b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/settings.png differ diff --git a/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/add-domain.png b/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/add-domain.png new file mode 100644 index 0000000000000..70288dab09341 Binary files /dev/null and b/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/add-domain.png differ diff --git a/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/domain-details.png b/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/domain-details.png new file mode 100644 index 0000000000000..35913036811ce Binary files /dev/null and b/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/domain-details.png differ diff --git a/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/initialize.png b/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/initialize.png new file mode 100644 index 0000000000000..f39e176794828 Binary files /dev/null and b/articles/active-directory/saas-apps/media/tap-app-security-provisioning-tutorial/initialize.png differ diff --git a/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/account.png b/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/account.png new file mode 100644 index 0000000000000..abe760f53dc1b Binary files /dev/null and b/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/account.png differ diff --git a/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/settings.png b/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/settings.png new file mode 100644 index 0000000000000..d941f04731ce9 Binary files /dev/null and b/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/settings.png differ diff --git a/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/values.png b/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/values.png new file mode 100644 index 0000000000000..a7e3189e74d6f Binary files /dev/null and b/articles/active-directory/saas-apps/media/timeoffmanager-tutorial/values.png differ diff --git a/articles/active-directory/saas-apps/nodetrax-project-tutorial.md b/articles/active-directory/saas-apps/nodetrax-project-tutorial.md index 3bb34c43a1510..0f44c85d2e0c2 100644 --- a/articles/active-directory/saas-apps/nodetrax-project-tutorial.md +++ b/articles/active-directory/saas-apps/nodetrax-project-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 10/06/2021 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -29,6 +29,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Nodetrax Project single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -67,7 +70,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. @@ -78,7 +81,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Nodetrax Project application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of Nodetrax Project application.](common/default-attributes.png "Attributes") 1. In addition to above, Nodetrax Project application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre populated but you can review them as per your requirements. @@ -88,11 +91,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up Nodetrax Project** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy Configuration appropriate U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/openlearning-tutorial.md b/articles/active-directory/saas-apps/openlearning-tutorial.md index 0f7c546a61871..11c76fedb29c2 100644 --- a/articles/active-directory/saas-apps/openlearning-tutorial.md +++ b/articles/active-directory/saas-apps/openlearning-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/17/2022 +ms.date: 05/31/2022 ms.author: jeedes --- @@ -29,6 +29,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * OpenLearning single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -67,17 +70,17 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, if you have **Service Provider metadata file**, perform the following steps: a. Click **Upload metadata file**. - ![Upload metadata file](common/upload-metadata.png) + ![Screenshot shows to upload metadata file.](common/upload-metadata.png "Metadata") b. Click on **folder logo** to select the metadata file and click **Upload**. - ![choose metadata file](common/browse-upload-metadata.png) + ![Screenshot shows to choose metadata file.](common/browse-upload-metadata.png "Folder") c. After the metadata file is successfully uploaded, the **Identifier** value gets auto populated in Basic SAML Configuration section. @@ -87,13 +90,27 @@ Follow these steps to enable Azure AD SSO in the Azure portal. > [!Note] > If the **Identifier** value does not get auto populated, then please fill in the value manually according to your requirement. The Sign-on URL value is not real. Update this value with the actual Sign-on URL. Contact [OpenLearning Client support team](mailto:dev@openlearning.com) to get this value. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. +1. OpenLearning Identity Authentication application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. + + ![image](common/default-attributes.png) + +1. In addition to above, OpenLearning Identity Authentication application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre populated but you can review them as per your requirements. + + | Name | Source Attribute| + | ---------------| --------------- | + | urn:oid:0.9.2342.19200300.100.1.3 | user.mail | + | urn:oid:2.16.840.1.113730.3.1.241 | user.displayname | + | urn:oid:1.3.6.1.4.1.5923.1.1.1.9 | user.extensionattribute1 | + | urn:oid:1.3.6.1.4.1.5923.1.1.1.6 | user.objectid | + | urn:oid:2.5.4.10 | user.companyname | + 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up OpenLearning** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy Configuration appropriate U R L.](common/copy-configuration-urls.png "Configuration") 1. OpenLearning application expects to enable token encryption in order to make SSO work. To activate token encryption, go to the **Azure Active Directory** > **Enterprise applications** and select **Token encryption**. For more information, please refer this [link](../manage-apps/howto-saml-token-encryption.md). @@ -149,13 +166,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a ### Create OpenLearning test user -1. In a different web browser window, log in to your OpenLearning website as an administrator. - -1. Navigate to **People** and select **Invite People**. - -1. Enter the valid **Email Addresses** in the textbox and click **INVITE ALL USERS**. - - ![Screenshot shows inviting all users](./media/openlearning-tutorial/users.png "SAML USERS") +In this section, a user called Britta Simon is created in OpenLearning. OpenLearning supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in OpenLearning, a new one is created after authentication. ## Test SSO diff --git a/articles/active-directory/saas-apps/paloaltoadmin-tutorial.md b/articles/active-directory/saas-apps/paloaltoadmin-tutorial.md index a67a9b3e1e611..be5bd71cae43c 100644 --- a/articles/active-directory/saas-apps/paloaltoadmin-tutorial.md +++ b/articles/active-directory/saas-apps/paloaltoadmin-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure AD SSO integration with Palo Alto Networks - Admin UI | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Palo Alto Networks - Admin UI' description: Learn how to configure single sign-on between Azure Active Directory and Palo Alto Networks - Admin UI. services: active-directory author: jeevansd @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 09/08/2021 +ms.date: 06/08/2022 ms.author: jeedes --- # Tutorial: Azure AD SSO integration with Palo Alto Networks - Admin UI @@ -26,6 +26,7 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Palo Alto Networks - Admin UI single sign-on (SSO) enabled subscription. +* It is a requirement that the service should be public available. Please refer [this](../develop/single-sign-on-saml-protocol.md) page for more information. ## Scenario description diff --git a/articles/active-directory/saas-apps/rackspacesso-tutorial.md b/articles/active-directory/saas-apps/rackspacesso-tutorial.md index 731c56f701247..de2b84d828e49 100644 --- a/articles/active-directory/saas-apps/rackspacesso-tutorial.md +++ b/articles/active-directory/saas-apps/rackspacesso-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Rackspace SSO | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Rackspace SSO' description: Learn how to configure single sign-on between Azure Active Directory and Rackspace SSO. services: active-directory author: jeevansd @@ -9,10 +9,10 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 05/14/2021 +ms.date: 06/03/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Rackspace SSO +# Tutorial: Azure AD SSO integration with Rackspace SSO In this tutorial, you'll learn how to integrate Rackspace SSO with Azure Active Directory (Azure AD). When you integrate Rackspace SSO with Azure AD, you can: @@ -31,7 +31,7 @@ To configure Azure AD integration with Rackspace SSO, you need the following ite In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Rackspace SSO supports **SP** initiated SSO. +* Rackspace SSO supports **IDP** initiated SSO. > [!NOTE] > Identifier of this application is a fixed string value so only one instance can be configured in one tenant. @@ -57,7 +57,7 @@ To configure and test Azure AD single sign-on with Rackspace SSO, you need to pe 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -2. **[Configure Rackspace SSO Single Sign-On](#configure-rackspace-sso-single-sign-on)** - to configure the Single Sign-On settings on application side. +2. **[Configure Rackspace SSO](#configure-rackspace-sso)** - to configure the Single Sign-On settings on application side. 1. **[Set up Attribute Mapping in the Rackspace Control Panel](#set-up-attribute-mapping-in-the-rackspace-control-panel)** - to assign Rackspace roles to Azure AD users. 1. **[Test SSO](#test-sso)** - to verify whether the configuration works. @@ -69,26 +69,23 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -4. On the **Basic SAML Configuration** section, Upload the **Service Provider metadata file** which you can download from the [URL](https://login.rackspace.com/federate/sp.xml) and perform the following steps: +4. On the **Basic SAML Configuration** section, upload the **Service Provider metadata file** which you can download from the [URL](https://login.rackspace.com/federate/sp.xml) and perform the following steps: a. Click **Upload metadata file**. - ![Screenshot shows Basic SAML Configuration with the Upload metadata file link.](common/upload-metadata.png) + ![Screenshot shows Basic S A M L Configuration with the Upload metadata file link.](common/upload-metadata.png "Metadata") b. Click on **folder logo** to select the metadata file and click **Upload**. - ![Screenshot shows a dialog box where you can select and upload a file.](common/browse-upload-metadata.png) + ![Screenshot shows a dialog box where you can select and upload a file.](common/browse-upload-metadata.png "Folder") c. Once the metadata file is successfully uploaded, the necessary URLs get auto populated automatically. - d. In the **Sign-on URL** text box, type the URL: - `https://login.rackspace.com/federate/` - 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") This file will be uploaded to Rackspace to populate required Identity Federation configuration settings. @@ -116,13 +113,13 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. 1. In the **Add Assignment** dialog, click the **Assign** button. -## Configure Rackspace SSO Single Sign-On +## Configure Rackspace SSO To configure single sign-on on **Rackspace SSO** side: 1. See the documentation at [Add an Identity Provider to the Control Panel](https://developer.rackspace.com/docs/rackspace-federation/gettingstarted/add-idp-cp/) 1. It will lead you through the steps to: - 1. Create a new Identity Provider + 1. Create a new Identity Provider. 1. Specify an email domain that users will use to identify your company when signing in. 1. Upload the **Federation Metadata XML** previously downloaded from the Azure control panel. @@ -134,11 +131,11 @@ Rackspace uses an **Attribute Mapping Policy** to assign Rackspace roles and gro * If you want to assign varying levels of Rackspace access using Azure AD groups, you will need to enable the Groups claim in the Azure **Rackspace SSO** Single Sign-on settings. The **Attribute Mapping Policy** will then be used to match those groups to desired Rackspace roles and groups: - ![The Groups claim settings](common/sso-groups-claim.png) + ![Screenshot shows the Groups claim settings.](common/sso-groups-claim.png "Groups") * By default, Azure AD sends the UID of Azure AD Groups in the SAML claim, versus the name of the Group. However, if you are synchronizing your on-premises Active Directory to Azure AD, you have the option to send the actual names of the groups: - ![The Groups claim name settings](common/sso-groups-claims-names.png) + ![Screenshot shows the Groups claim name settings.](common/sso-groups-claims-names.png "Claims") The following example **Attribute Mapping Policy** demonstrates: 1. Setting the Rackspace user's name to the `user.name` SAML claim. Any claim can be used, but it is most common to set this to a field containing the user's email address. @@ -173,18 +170,16 @@ See the Rackspace [Attribute Mapping Basics documentation](https://developer.rac ## Test SSO -In this section, you test your Azure AD single sign-on configuration with following options. - -* Click on **Test this application** in Azure portal. This will redirect to Rackspace SSO Sign-on URL where you can initiate the login flow. +In this section, you test your Azure AD single sign-on configuration with following options. -* Go to Rackspace SSO Sign-on URL directly and initiate the login flow from there. +* Click on Test this application in Azure portal and you should be automatically signed in to the Rackspace SSO for which you set up the SSO. -* You can use Microsoft My Apps. When you click the Rackspace SSO tile in the My Apps, this will redirect to Rackspace SSO Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* You can use Microsoft My Apps. When you click the Rackspace SSO tile in the My Apps, you should be automatically signed in to the Rackspace SSO for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). You can also use the **Validate** button in the **Rackspace SSO** Single sign-on settings: - ![SSO Validate Button](common/sso-validate-sign-on.png) + ![Screenshot shows the SSO Validate Button.](common/sso-validate-sign-on.png "Validate") ## Next steps -Once you configure Rackspace SSO you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). +Once you configure Rackspace SSO you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/rstudio-connect-tutorial.md b/articles/active-directory/saas-apps/rstudio-connect-tutorial.md index 36834735fda04..5174348227600 100644 --- a/articles/active-directory/saas-apps/rstudio-connect-tutorial.md +++ b/articles/active-directory/saas-apps/rstudio-connect-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 09/14/2021 +ms.date: 06/03/2022 ms.author: jeedes --- # Tutorial: Azure AD SSO integration with RStudio Connect SAML Authentication @@ -146,6 +146,23 @@ IdPAttributeProfile = azure SSOInitiated = IdPAndSP ``` +If `IdPAttributeProfile = azure`,the profile sets the NameIDFormat to persistent, among other settings and overrides any other specified attributes defined in the configuration [file](https://docs.rstudio.com/connect/admin/authentication/saml/#the-azure-profile). + +This becomes an issue if you want to create a user ahead of time using the RStudio Connect API and apply permissions prior to the user logging in the first time. The NameIDFormat should be set to emailAddress or some other unique identifier because when it's set to persistent, then the value gets hashed and you don't know what the value is ahead of time. So using the API will not work. +API for creating user for SAML: https://docs.rstudio.com/connect/api/#post-/v1/users + +So you may want to have this in your configuration file in this situation: + +``` +[SAML] +NameIDFormat = emailAddress +UniqueIdAttribute = NameID +UsernameAttribute = http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name +FirstNameAttribute = http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname +LastNameAttribute = http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname +EmailAttribute = http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailAddress +``` + Store your **Server Address** in the `Server.Address` value, and the **App Federation Metadata Url** in the `SAML.IdPMetaData` value. Note that this sample configuration uses an unencrypted HTTP connection, while Azure AD requires the use of an encrypted HTTPS connection. You can either use a [reverse proxy](https://docs.rstudio.com/connect/admin/proxy/) in front of RStudio Connect SAML Authentication or configure RStudio Connect SAML Authentication to [use HTTPS directly](https://docs.rstudio.com/connect/admin/appendix/configuration/#HTTPS). If you have trouble with configuration, you can read the [RStudio Connect SAML Authentication Admin Guide](https://docs.rstudio.com/connect/admin/authentication/saml/) or email the [RStudio support team](mailto:support@rstudio.com) for help. diff --git a/articles/active-directory/saas-apps/s4-digitsec-tutorial.md b/articles/active-directory/saas-apps/s4-digitsec-tutorial.md new file mode 100644 index 0000000000000..732170a416bbe --- /dev/null +++ b/articles/active-directory/saas-apps/s4-digitsec-tutorial.md @@ -0,0 +1,142 @@ +--- +title: 'Tutorial: Azure AD SSO integration with S4 - Digitsec' +description: Learn how to configure single sign-on between Azure Active Directory and S4 - Digitsec. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: celested +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/23/2022 +ms.author: jeedes +--- + +# Tutorial: Azure AD SSO integration with S4 - Digitsec + +In this tutorial, you'll learn how to integrate S4 - Digitsec with Azure Active Directory (Azure AD). When you integrate S4 - Digitsec with Azure AD, you can: + +* Control in Azure AD who has access to S4 - Digitsec. +* Enable your users to be automatically signed-in to S4 - Digitsec with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* S4 - Digitsec single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* S4 - Digitsec supports **SP and IDP** initiated SSO. +* S4 - Digitsec supports **Just In Time** user provisioning. + +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. + +## Add S4 - Digitsec from the gallery + +To configure the integration of S4 - Digitsec into Azure AD, you need to add S4 - Digitsec from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **S4 - Digitsec** in the search box. +1. Select **S4 - Digitsec** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for S4 - Digitsec + +Configure and test Azure AD SSO with S4 - Digitsec using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in S4 - Digitsec. + +To configure and test Azure AD SSO with S4 - Digitsec, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure S4 - Digitsec SSO](#configure-s4---digitsec-sso)** - to configure the single sign-on settings on application side. + 1. **[Create S4 - Digitsec test user](#create-s4---digitsec-test-user)** - to have a counterpart of B.Simon in S4 - Digitsec that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **S4 - Digitsec** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type the URL: + `https://s4.digitsec.com` + +1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") + +1. On the **Set up S4 - Digitsec** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to S4 - Digitsec. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **S4 - Digitsec**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure S4 - Digitsec SSO + +To configure single sign-on on S4 - Digitsec side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [S4 - Digitsec support team](mailto:Support@digitsec.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create S4 - Digitsec test user + +In this section, a user called B.Simon is created in S4 - Digitsec. S4 - Digitsec supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in S4 - Digitsec, a new one is created after authentication. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to S4 - Digitsec Sign on URL where you can initiate the login flow. + +* Go to S4 - Digitsec Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the S4 - Digitsec for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the S4 - Digitsec tile in the My Apps, if configured in SP mode you would be redirected to the application sign-on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the S4 - Digitsec for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). + +## Next steps + +Once you configure S4 - Digitsec you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/seculio-tutorial.md b/articles/active-directory/saas-apps/seculio-tutorial.md new file mode 100644 index 0000000000000..f3c0a0e7a3298 --- /dev/null +++ b/articles/active-directory/saas-apps/seculio-tutorial.md @@ -0,0 +1,148 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Seculio' +description: Learn how to configure single sign-on between Azure Active Directory and Seculio. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/30/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Seculio + +In this tutorial, you'll learn how to integrate Seculio with Azure Active Directory (Azure AD). When you integrate Seculio with Azure AD, you can: + +* Control in Azure AD who has access to Seculio. +* Enable your users to be automatically signed-in to Seculio with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Seculio single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Seculio supports **SP** and **IDP** initiated SSO. + +## Add Seculio from the gallery + +To configure the integration of Seculio into Azure AD, you need to add Seculio from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Seculio** in the search box. +1. Select **Seculio** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Seculio + +Configure and test Azure AD SSO with Seculio using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Seculio. + +To configure and test Azure AD SSO with Seculio, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Seculio SSO](#configure-seculio-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Seculio test user](#create-seculio-test-user)** - to have a counterpart of B.Simon in Seculio that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Seculio** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** textbox, type a URL using the following pattern: + `https://seculio.com/saml/` + + b. In the **Reply URL** textbox, type a URL using the following pattern: + `https://seculio.com/saml/acs/` + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type the URL: + `https://seculio.com/` + + > [!Note] + > These values are not real. Update these values with the actual Identifier and Reply URL. Contact [Seculio support team](mailto:seculio@lrm.jp) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. On the **Set-up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Federation Metadata XML** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") + +1. On the **Set up Seculio** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Seculio. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Seculio**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Seculio SSO + +To configure single sign-on on **Seculio** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Seculio support team](mailto:seculio@lrm.jp). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create Seculio test user + +In this section, you create a user called Britta Simon in Seculio. Work with [Seculio support team](mailto:seculio@lrm.jp) to add the users in the Seculio platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to Seculio Sign on URL where you can initiate the login flow. + +* Go to Seculio Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the Seculio for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the Seculio tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Seculio for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure Seculio you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/snowflake-tutorial.md b/articles/active-directory/saas-apps/snowflake-tutorial.md index 049eae9b3f01b..07c8aab241406 100644 --- a/articles/active-directory/saas-apps/snowflake-tutorial.md +++ b/articles/active-directory/saas-apps/snowflake-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 12/22/2021 +ms.date: 06/03/2022 ms.author: jeedes --- # Tutorial: Azure AD SSO integration with Snowflake @@ -124,7 +124,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a ## Configure Snowflake SSO -1. In a different web browser window, login to Snowflake as a Security Administrator. +1. In a different web browser window, log in to Snowflake as a Security Administrator. 1. **Switch Role** to **ACCOUNTADMIN**, by clicking on **profile** on the top right side of page. @@ -165,6 +165,18 @@ CREATE [ OR REPLACE ] SECURITY INTEGRATION [ IF NOT EXISTS ] [ SAML2_SNOWFLAKE_ACS_URL = '' ] ``` +If you are using a new Snowflake URL with an organization name as the login URL, it is necessary to update the following parameters: + + Alter the integration to add Snowflake Issuer URL and SAML2 Snowflake ACS URL, please follow the step-6 in [this](https://community.snowflake.com/s/article/HOW-TO-SETUP-SSO-WITH-ADFS-AND-THE-SNOWFLAKE-NEW-URL-FORMAT-OR-PRIVATELINK) article for more information. + +1. [ SAML2_SNOWFLAKE_ISSUER_URL = '' ] + + alter security integration `` set SAML2_SNOWFLAKE_ISSUER_URL = `https://-.snowflakecomputing.com`; + +2. [ SAML2_SNOWFLAKE_ACS_URL = '' ] + + alter security integration `` set SAML2_SNOWFLAKE_ACS_URL = `https://-.snowflakecomputing.com/fed/login`; + > [!NOTE] > Please follow [this](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration.html) guide to know more about how to create a SAML2 security integration. @@ -200,15 +212,15 @@ In this section, you test your Azure AD single sign-on configuration with follow #### SP initiated: -* Click on **Test this application** in Azure portal. This will redirect to Snowflake Sign on URL where you can initiate the login flow. +* Click on **Test this application** in Azure portal. This will redirect to Snowflake Sign-on URL where you can initiate the login flow. -* Go to Snowflake Sign-on URL directly and initiate the login flow from there. +* Go to Snowflake Sign on URL directly and initiate the login flow from there. #### IDP initiated: * Click on **Test this application** in Azure portal and you should be automatically signed in to the Snowflake for which you set up the SSO. -You can also use Microsoft My Apps to test the application in any mode. When you click the Snowflake tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Snowflake for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). +You can also use Microsoft My Apps to test the application in any mode. When you click the Snowflake tile in the My Apps, if configured in SP mode you would be redirected to the application Sign-on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Snowflake for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). ## Next steps diff --git a/articles/active-directory/saas-apps/standard-for-success-tutorial.md b/articles/active-directory/saas-apps/standard-for-success-tutorial.md new file mode 100644 index 0000000000000..680bd4d19d3a6 --- /dev/null +++ b/articles/active-directory/saas-apps/standard-for-success-tutorial.md @@ -0,0 +1,185 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Standard for Success K-12' +description: Learn how to configure single sign-on between Azure Active Directory and Standard for Success K-12. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/27/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Standard for Success K-12 + +In this tutorial, you'll learn how to integrate Standard for Success K-12 with Azure Active Directory (Azure AD). When you integrate Standard for Success K-12 with Azure AD, you can: + +* Control in Azure AD who has access to Standard for Success K-12. +* Enable your users to be automatically signed-in to Standard for Success K-12 with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Standard for Success K-12 single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Standard for Success K-12 supports **SP** and **IDP** initiated SSO. + +## Add Standard for Success K-12 from the gallery + +To configure the integration of Standard for Success K-12 into Azure AD, you need to add Standard for Success K-12 from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Standard for Success K-12** in the search box. +1. Select **Standard for Success K-12** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Standard for Success K-12 + +Configure and test Azure AD SSO with Standard for Success K-12 using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Standard for Success K-12. + +To configure and test Azure AD SSO with Standard for Success K-12, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Standard for Success K-12 SSO](#configure-standard-for-success-k-12-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Standard for Success K-12 test user](#create-standard-for-success-k-12-test-user)** - to have a counterpart of B.Simon in Standard for Success K-12 that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Standard for Success K-12** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic SAML Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** text box, type a value using the following pattern: + `api://` + + b. In the **Reply URL** text box, type a URL using the following pattern: + `https://edu.standardforsuccess.com/access/mssaml_consume?did=` + +1. Click **Set additional URLs** and perform the following steps if you wish to configure the application in SP initiated mode: + + a. In the **Sign-on URL** text box, type a URL using the following pattern: + `https://edu.standardforsuccess.com/access/mssaml_int?did=` + + b. In the **Relay State** text box, type a URL using the following pattern: + `https://edu.standardforsuccess.com/access/mssaml_consume?did=` + + > [!Note] + > These values are not real. Update these values with the actual Identifier, Reply URL, Sign-on URL and Relay State. Contact [Standard for Success K-12 Client support team](mailto:help@standardforsuccess.com) to get the INSTITUTION-ID value. You can also refer to the patterns shown in the Basic SAML Configuration section in the Azure portal. + +1. In the **SAML Signing Certificate** section, click **Edit** button to open **SAML Signing Certificate** dialog. + + ![Screenshot shows to edit SAML Signing Certificate.](common/edit-certificate.png "Signing Certificate") + +1. In the **SAML Signing Certificate** section, copy the **Thumbprint Value** and save it on your computer. + + ![Screenshot shows to copy thumbprint value.](common/copy-thumbprint.png "Thumbprint") + +1. On the **Set up Standard for Success K-12** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Standard for Success K-12. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Standard for Success K-12**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Standard for Success K-12 SSO + +1. Log in to your Standard for Success K-12 company site as an administrator with superuser access. + +1. From the menu, navigate to **Utilities** -> **Tools & Features**. + +1. Scroll down to **Single Sign On Settings** and click the **Microsoft Azure Single Sign On** link and perform the following steps: + + ![Screenshot that shows the Configuration Settings.](./media/standard-for-success-tutorial/settings.png "Configuration") + + a. Select **Enable Azure Single Sign On** checkbox. + + b. In the **Login URL** textbox, paste the **Login URL** value which you have copied from the Azure portal. + + c. In the **Azure AD Identifier** textbox, paste the **Azure AD Identifier** value which you have copied from the Azure portal. + + d. Fill the **Application ID** in the **Application ID** text box. + + e. In the **Certificate Thumbprint** text box, paste the **Thumbprint Value** that you copied from the Azure portal. + + f. Click **Save**. + +### Create Standard for Success K-12 test user + +1. In a different web browser window, log into your Standard for Success K-12 website as an administrator with superuser privileges. + +1. From the menu, navigate to **Utilites** -> **Accounts Manager**, then click **Create New User** and perform the following steps: + + ![Screenshot that shows the User Information fields.](./media/standard-for-success-tutorial/name.png "User Information") + + a. In **First Name** text box, enter the first name of the user. + + b. In **Last Name** text box, enter the last name of the user. + + c. In **Email** text box, enter the email address which you have added within Azure. + + d. Scroll to the bottom and Click **Create User**. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to Standard for Success K-12 Sign on URL where you can initiate the login flow. + +* Go to Standard for Success K-12 Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the Standard for Success K-12 for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the Standard for Success K-12 tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Standard for Success K-12 for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure Standard for Success K-12 you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/tap-app-security-provisioning-tutorial.md b/articles/active-directory/saas-apps/tap-app-security-provisioning-tutorial.md index f6bda0b9515af..d7edd1e4c0666 100644 --- a/articles/active-directory/saas-apps/tap-app-security-provisioning-tutorial.md +++ b/articles/active-directory/saas-apps/tap-app-security-provisioning-tutorial.md @@ -45,9 +45,14 @@ The scenario outlined in this tutorial assumes that you already have the followi ## Step 2. Configure TAP App Security to support provisioning with Azure AD -Contact [TAP App Security support](mailto:support@tapappsecurity.com) in order to obtain a SCIM Token. - - +1. Log in to [TAP App Security back-end control panel](https://app.tapappsecurity.com/). +1. Navigate to **Single Sign On > Active Directory**. +1. Click on the **Integrate Active Directory app** button. Then enter the domain of your organization and click **Save** button. + [![Screenshot on how to add domain.](media/tap-app-security-provisioning-tutorial/add-domain.png)](media/tap-app-security-provisioning-tutorial/add-domain.png#lightbox) +1. After entering the domain, a new line in the table appears showing domain name and its status as **initialize**. Click on the gear icon to reveal technical data about TAP app Security server and to complete initialization. + [![Screenshot showing initialize.](media/tap-app-security-provisioning-tutorial/initialize.png)](media/tap-app-security-provisioning-tutorial/initialize.png#lightbox) +1. Technical data about TAP App Security servers is revealed.You can now copy the **Tenant Url** and **Authorization Token** from this page to be used later on while setting up provisioning in Azure AD. + [![Screenshot showing domain details.](media/tap-app-security-provisioning-tutorial/domain-details.png)](media/tap-app-security-provisioning-tutorial/domain-details.png#lightbox) ## Step 3. Add TAP App Security from the Azure AD application gallery Add TAP App Security from the Azure AD application gallery to start managing provisioning to TAP App Security. If you have previously setup TAP App Security for SSO, you can use the same application. However it is recommended that you create a separate app when testing out the integration initially. Learn more about adding an application from the gallery [here](../manage-apps/add-application-portal.md). diff --git a/articles/active-directory/saas-apps/timeclock-365-saml-tutorial.md b/articles/active-directory/saas-apps/timeclock-365-saml-tutorial.md index d7f7c617fe0ad..146096f0619e9 100644 --- a/articles/active-directory/saas-apps/timeclock-365-saml-tutorial.md +++ b/articles/active-directory/saas-apps/timeclock-365-saml-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory single sign-on (SSO) integration with Timeclock 365 SAML | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Timeclock 365 SAML' description: Learn how to configure single sign-on between Azure Active Directory and Timeclock 365 SAML. services: active-directory author: jeevansd @@ -9,12 +9,12 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 09/02/2021 +ms.date: 05/27/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory single sign-on (SSO) integration with Timeclock 365 SAML +# Tutorial: Azure AD SSO integration with Timeclock 365 SAML In this tutorial, you'll learn how to integrate Timeclock 365 SAML with Azure Active Directory (Azure AD). When you integrate Timeclock 365 SAML with Azure AD, you can: @@ -34,7 +34,7 @@ To get started, you need the following items: In this tutorial, you configure and test Azure AD SSO in a test environment. * Timeclock 365 SAML supports **SP** initiated SSO. -* Timeclock 365 SAML supports [Automated user provisioning](timeclock-365-provisioning-tutorial.md). +* Timeclock 365 SAML supports [Automated user provisioning](timeclock-365-saml-provisioning-tutorial.md). ## Adding Timeclock 365 SAML from the gallery @@ -141,7 +141,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 1. Click on **Create** button to create the test user. > [!NOTE] -> Timeclock 365 SAML also supports automatic user provisioning, you can find more details [here](./timeclock-365-provisioning-tutorial.md) on how to configure automatic user provisioning. +> Timeclock 365 SAML also supports automatic user provisioning, you can find more details [here](./timeclock-365-saml-provisioning-tutorial.md) on how to configure automatic user provisioning. ## Test SSO diff --git a/articles/active-directory/saas-apps/timeoffmanager-tutorial.md b/articles/active-directory/saas-apps/timeoffmanager-tutorial.md index 83e4c53fa12b4..8a13f67858834 100644 --- a/articles/active-directory/saas-apps/timeoffmanager-tutorial.md +++ b/articles/active-directory/saas-apps/timeoffmanager-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory single sign-on (SSO) integration with TimeOffManager | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with TimeOffManager' description: Learn how to configure single sign-on between Azure Active Directory and TimeOffManager. services: active-directory author: jeevansd @@ -9,11 +9,11 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 12/10/2019 +ms.date: 06/07/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory single sign-on (SSO) integration with TimeOffManager +# Tutorial: Azure AD SSO integration with TimeOffManager In this tutorial, you'll learn how to integrate TimeOffManager with Azure Active Directory (Azure AD). When you integrate TimeOffManager with Azure AD, you can: @@ -21,45 +21,42 @@ In this tutorial, you'll learn how to integrate TimeOffManager with Azure Active * Enable your users to be automatically signed-in to TimeOffManager with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. -To learn more about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). - ## Prerequisites To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * TimeOffManager single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. +* TimeOffManager supports **IDP** initiated SSO. -* TimeOffManager supports **IDP** initiated SSO - -* TimeOffManager supports **Just In Time** user provisioning +* TimeOffManager supports **Just In Time** user provisioning. > [!NOTE] > Identifier of this application is a fixed string value so only one instance can be configured in one tenant. - -## Adding TimeOffManager from the gallery +## Add TimeOffManager from the gallery To configure the integration of TimeOffManager into Azure AD, you need to add TimeOffManager from the gallery to your list of managed SaaS apps. -1. Sign in to the [Azure portal](https://portal.azure.com) using either a work or school account, or a personal Microsoft account. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. 1. In the **Add from the gallery** section, type **TimeOffManager** in the search box. 1. Select **TimeOffManager** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. - -## Configure and test Azure AD single sign-on for TimeOffManager +## Configure and test Azure AD SSO for TimeOffManager Configure and test Azure AD SSO with TimeOffManager using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in TimeOffManager. -To configure and test Azure AD SSO with TimeOffManager, complete the following building blocks: +To configure and test Azure AD SSO with TimeOffManager, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. @@ -72,13 +69,13 @@ To configure and test Azure AD SSO with TimeOffManager, complete the following b Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **TimeOffManager** application integration page, find the **Manage** section and select **single sign-on**. +1. In the Azure portal, on the **TimeOffManager** application integration page, find the **Manage** section and select **single sign-on**. 1. On the **Select a single sign-on method** page, select **SAML**. -1. On the **Set up single sign-on with SAML** page, click the edit/pen icon for **Basic SAML Configuration** to edit the settings. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -1. On the **Basic SAML Configuration** section, enter the values for the following fields: +1. On the **Basic SAML Configuration** section, perform the following step: In the **Reply URL** text box, type a URL using the following pattern: `https://www.timeoffmanager.com/cpanel/sso/consume.aspx?company_id=` @@ -88,7 +85,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. TimeOffManager application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/edit-attribute.png) + ![Screenshot shows the image of TimeOffManager application.](common/edit-attribute.png "Image") 1. In addition to above, TimeOffManager application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre populated but you can review them as per your requirement. @@ -100,11 +97,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up TimeOffManager** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Metadata") ### Create an Azure AD test user @@ -125,13 +122,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. 1. In the applications list, select **TimeOffManager**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add User link](common/add-assign-user.png) - 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. 1. In the **Add Assignment** dialog, click the **Assign** button. @@ -142,11 +133,11 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 2. Go to **Account \> Account Options \> Single Sign-On Settings**. - ![Screenshot shows Single Sign-On Settings selected from Account Options.](./media/timeoffmanager-tutorial/ic795917.png "Single Sign-On Settings") + ![Screenshot shows Single Sign-On Settings selected from Account Options.](./media/timeoffmanager-tutorial/account.png "Single Sign-On Settings") 3. In the **Single Sign-On Settings** section, perform the following steps: - ![Screenshot shows the Single Sign-On Settings section where you can enter the values described.](./media/timeoffmanager-tutorial/ic795918.png "Single Sign-On Settings") + ![Screenshot shows the Single Sign-On Settings section where you can enter the values described.](./media/timeoffmanager-tutorial/settings.png "Single Sign-On Settings") a. Open your base-64 encoded certificate in notepad, copy the content of it into your clipboard, and then paste the entire Certificate into **X.509 Certificate** textbox. @@ -164,7 +155,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 4. In **Single Sign on settings** page, copy the value of **Assertion Consumer Service URL** and paste it in the **Reply URL** text box under **Basic SAML Configuration** section in Azure portal. - ![Screenshot shows the Assertion Consumer Service U R L link.](./media/timeoffmanager-tutorial/ic795915.png "Single Sign-On Settings") + ![Screenshot shows the Assertion Consumer Service U R L link.](./media/timeoffmanager-tutorial/values.png "Single Sign-On Settings") ### Create TimeOffManager test user @@ -175,16 +166,12 @@ In this section, a user called Britta Simon is created in TimeOffManager. TimeOf ## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. - -When you click the TimeOffManager tile in the Access Panel, you should be automatically signed in to the TimeOffManager for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). - -## Additional resources +In this section, you test your Azure AD single sign-on configuration with following options. -- [ List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory ](./tutorial-list.md) +* Click on Test this application in Azure portal and you should be automatically signed in to the TimeOffManager for which you set up the SSO. -- [What is application access and single sign-on with Azure Active Directory? ](../manage-apps/what-is-single-sign-on.md) +* You can use Microsoft My Apps. When you click the TimeOffManager tile in the My Apps, you should be automatically signed in to the TimeOffManager for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is conditional access in Azure Active Directory?](../conditional-access/overview.md) +## Next steps -- [Try TimeOffManager with Azure AD](https://aad.portal.azure.com/) \ No newline at end of file +Once you configure TimeOffManager you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/toc.yml b/articles/active-directory/saas-apps/toc.yml index f025d461b3792..ce6ec2b3c5696 100644 --- a/articles/active-directory/saas-apps/toc.yml +++ b/articles/active-directory/saas-apps/toc.yml @@ -75,6 +75,8 @@ href: adp-emea-french-hr-portal-tutorial.md - name: ADP Globalview (Deprecated) href: adglobalview-tutorial.md + - name: Agile Provisioning + href: agile-provisioning-tutorial.md - name: Agiloft Contract Management Suite href: agiloft-tutorial.md - name: Aha! @@ -247,6 +249,8 @@ href: baldwin-safety-&-compliance-tutorial.md - name: Balsamiq Wireframes href: balsamiq-wireframes-tutorial.md + - name: BMIS - Battery Management Information System + href: battery-management-information-system-tutorial.md - name: BC in the Cloud href: bcinthecloud-tutorial.md - name: Bealink @@ -325,8 +329,6 @@ href: braze-tutorial.md - name: Bridge href: bridge-tutorial.md - - name: Bridgeline Unbound - href: bridgelineunbound-tutorial.md - name: Bright Pattern Omnichannel Contact Center href: bright-pattern-omnichannel-contact-center-tutorial.md - name: Brightidea @@ -365,7 +367,7 @@ href: capriza-tutorial.md - name: Carbonite Endpoint Backup href: carbonite-endpoint-backup-tutorial.md - - name: Carlson Wagonlit Travel + - name: CWT href: carlsonwagonlit-tutorial.md - name: Catchpoint href: catchpoint-tutorial.md @@ -739,6 +741,8 @@ href: envimmis-tutorial.md - name: Envoy href: envoy-tutorial.md + - name: E2open LSP + href: e2open-lsp-tutorial.md - name: EPHOTO DAM href: ephoto-dam-tutorial.md - name: ePlatform @@ -926,7 +930,7 @@ href: gigya-tutorial.md - name: GitHub href: github-tutorial.md - - name: GitHub AE + - name: GitHub Enterprise Server href: github-ae-tutorial.md - name: GitHub Enterprise Cloud - Enterprise Account href: github-enterprise-cloud-enterprise-account-tutorial.md @@ -968,6 +972,8 @@ href: grovo-tutorial.md - name: GT Nexus Prod System href: gtnexus-sso-module-tutorial.md + - name: Guardium Data Protection + href: guardium-data-protection-tutorial.md - name: H - I items: - name: Hackerone @@ -1184,6 +1190,8 @@ items: - name: Jamf Pro href: jamfprosamlconnector-tutorial.md + - name: Javelo + href: javelo-tutorial.md - name: JDA Cloud href: jdacloud-tutorial.md - name: Jedox @@ -1942,6 +1950,8 @@ href: screensteps-tutorial.md - name: Scuba Analytics href: scuba-analytics-tutorial.md + - name: S4 - Digitsec + href: s4-digitsec-tutorial.md - name: SD Elements href: sd-elements-tutorial.md - name: SDS & Chemical Information Management @@ -1950,6 +1960,8 @@ href: secretserver-on-premises-tutorial.md - name: Sectigo Certificate Manager href: sectigo-certificate-manager-tutorial.md + - name: Seculio + href: seculio-tutorial.md - name: SECURE DELIVER href: securedeliver-tutorial.md - name: SecureW2 JoinNow Connector @@ -2114,6 +2126,8 @@ href: ssogen-tutorial.md - name: Standard for Success Accreditation href: standard-for-success-accreditation-tutorial.md + - name: Standard for Success K-12 + href: standard-for-success-tutorial.md - name: Starmind href: starmind-tutorial.md - name: StatusPage @@ -2294,6 +2308,8 @@ href: torii-tutorial.md - name: TruNarrative href: trunarrative-tutorial.md + - name: TVU Service + href: tvu-service-tutorial.md - name: Tulip href: tulip-tutorial.md - name: TurboRater @@ -2452,6 +2468,8 @@ href: workplacebyfacebook-tutorial.md - name: Workrite href: workrite-tutorial.md + - name: Workshop + href: workshop-tutorial.md - name: Workspot Control href: workspotcontrol-tutorial.md - name: Workstars @@ -2466,6 +2484,8 @@ href: wuru-app-tutorial.md - name: XaitPorter href: xaitporter-tutorial.md + - name: xCarrier® + href: xcarrier-tutorial.md - name: xMatters OnDemand href: xmatters-ondemand-tutorial.md - name: X-point Cloud @@ -2599,6 +2619,8 @@ href: bldng-app-provisioning-tutorial.md - name: Blink href: blink-provisioning-tutorial.md + - name: Blinq + href: blinq-provisioning-tutorial.md - name: BlogIn href: blogin-provisioning-tutorial.md - name: BlueJeans @@ -3007,6 +3029,8 @@ href: webroot-security-awareness-training-provisioning-tutorial.md - name: WEDO href: wedo-provisioning-tutorial.md + - name: Whimsical + href: whimsical-provisioning-tutorial.md - name: Workday to Active Directory href: workday-inbound-tutorial.md - name: Workday to Azure AD diff --git a/articles/active-directory/saas-apps/tvu-service-tutorial.md b/articles/active-directory/saas-apps/tvu-service-tutorial.md new file mode 100644 index 0000000000000..0de988fbb9f56 --- /dev/null +++ b/articles/active-directory/saas-apps/tvu-service-tutorial.md @@ -0,0 +1,135 @@ +--- +title: 'Tutorial: Azure AD SSO integration with TVU Service' +description: Learn how to configure single sign-on between Azure Active Directory and TVU Service. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 06/01/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with TVU Service + +In this tutorial, you'll learn how to integrate TVU Service with Azure Active Directory (Azure AD). When you integrate TVU Service with Azure AD, you can: + +* Control in Azure AD who has access to TVU Service. +* Enable your users to be automatically signed-in to TVU Service with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* TVU Service single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* TVU Service supports **IDP** initiated SSO. + +## Add TVU Service from the gallery + +To configure the integration of TVU Service into Azure AD, you need to add TVU Service from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **TVU Service** in the search box. +1. Select **TVU Service** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for TVU Service + +Configure and test Azure AD SSO with TVU Service using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in TVU Service. + +To configure and test Azure AD SSO with TVU Service, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure TVU Service SSO](#configure-tvu-service-sso)** - to configure the single sign-on settings on application side. + 1. **[Create TVU Service test user](#create-tvu-service-test-user)** - to have a counterpart of B.Simon in TVU Service that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **TVU Service** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the application is pre-configured and the necessary URLs are already pre-populated with Azure. The user needs to save the configuration by clicking the **Save** button. + +1. Your TVU Service application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows an example for this. The default value of **Unique User Identifier** is **user.userprincipalname** but TVU Service expects this to be mapped with the user's email address. For that you can use **user.mail** attribute from the list or use the appropriate attribute value based on your organization configuration. + + ![Screenshot shows the image of TVU Service application.](common/default-attributes.png "Attributes") + +1. In addition to above, TVU Service application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirements. + + | Name | Source Attribute| + | ------------ | --------- | + | surname | user.surname | + | firstName | user.givenname | + | lastName | user.surname | + | email | user.mail | + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to TVU Service. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **TVU Service**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure TVU Service SSO + +To configure single sign-on on **TVU Service** side, you need to send the **App Federation Metadata Url** to [TVU Service support team](mailto:support@tvunetworks.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create TVU Service test user + +In this section, you create a user called Britta Simon in TVU Service. Work with [TVU Service support team](mailto:support@tvunetworks.com) to add the users in the TVU Service platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on Test this application in Azure portal and you should be automatically signed in to the TVU Service for which you set up the SSO. + +* You can use Microsoft My Apps. When you click the TVU Service tile in the My Apps, you should be automatically signed in to the TVU Service for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure TVU Service you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/versal-tutorial.md b/articles/active-directory/saas-apps/versal-tutorial.md index 00524203b3fef..e26ba853e601f 100644 --- a/articles/active-directory/saas-apps/versal-tutorial.md +++ b/articles/active-directory/saas-apps/versal-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory single sign-on (SSO) integration with Versal | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Versal' description: Learn how to configure single sign-on between Azure Active Directory and Versal. services: active-directory author: jeevansd @@ -9,11 +9,11 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 12/10/2019 +ms.date: 06/07/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory single sign-on (SSO) integration with Versal +# Tutorial: Azure AD SSO integration with Versal In this tutorial, you'll learn how to integrate Versal with Azure Active Directory (Azure AD). When you integrate Versal with Azure AD, you can: @@ -21,42 +21,40 @@ In this tutorial, you'll learn how to integrate Versal with Azure Active Directo * Enable your users to be automatically signed-in to Versal with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. -To learn more about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). - ## Prerequisites To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Versal single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. - -* Versal supports **IDP** initiated SSO +* Versal supports **IDP** initiated SSO. > [!NOTE] > Identifier of this application is a fixed string value so only one instance can be configured in one tenant. -## Adding Versal from the gallery +## Add Versal from the gallery To configure the integration of Versal into Azure AD, you need to add Versal from the gallery to your list of managed SaaS apps. -1. Sign in to the [Azure portal](https://portal.azure.com) using either a work or school account, or a personal Microsoft account. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. 1. In the **Add from the gallery** section, type **Versal** in the search box. 1. Select **Versal** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. - -## Configure and test Azure AD single sign-on for Versal +## Configure and test Azure AD SSO for Versal Configure and test Azure AD SSO with Versal using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Versal. -To configure and test Azure AD SSO with Versal, complete the following building blocks: +To configure and test Azure AD SSO with Versal, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. @@ -69,15 +67,15 @@ To configure and test Azure AD SSO with Versal, complete the following building Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **Versal** application integration page, find the **Manage** section and select **single sign-on**. +1. In the Azure portal, on the **Versal** application integration page, find the **Manage** section and select **single sign-on**. 1. On the **Select a single sign-on method** page, select **SAML**. -1. On the **Set up single sign-on with SAML** page, click the edit/pen icon for **Basic SAML Configuration** to edit the settings. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -1. On the **Set up single sign-on with SAML** page, enter the values for the following fields: +1. On the **Basic SAML Configuration** page, perform the following steps: - a. In the **Identifier** text box, type a URL: + a. In the **Identifier** text box, type the value: `VERSAL` b. In the **Reply URL** text box, type a URL using the following pattern: @@ -88,15 +86,15 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Versal application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes, where as **nameidentifier** is mapped with **user.userprincipalname**. Versal application expects **nameidentifier** to be mapped with **user.mail**, so you need to edit the attribute mapping by clicking on **Edit** icon and change the attribute mapping. - ![image](common/edit-attribute.png) + ![Screenshot shows the image of Versal application.](common/edit-attribute.png "Attributes") 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Federation Metadata XML** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 1. On the **Set up Versal** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Metadata") ### Create an Azure AD test user @@ -117,13 +115,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. 1. In the applications list, select **Versal**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add User link](common/add-assign-user.png) - 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. 1. In the **Add Assignment** dialog, click the **Assign** button. @@ -147,12 +139,6 @@ You will need to create a course, share it with your organization, and publish i Please see [Creating a course](https://support.versal.com/hc/articles/203722528-Create-a-course), [Publishing a course](https://support.versal.com/hc/articles/203753398-Publishing-a-course), and [Course and learner management](https://support.versal.com/hc/articles/206029467-Course-and-learner-management) for more information. -## Additional resources - -- [ List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory ](./tutorial-list.md) - -- [What is application access and single sign-on with Azure Active Directory? ](../manage-apps/what-is-single-sign-on.md) - -- [What is conditional access in Azure Active Directory?](../conditional-access/overview.md) +## Next steps -- [Try Versal with Azure AD](https://aad.portal.azure.com/) \ No newline at end of file +Once you configure Versal you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/whimsical-provisioning-tutorial.md b/articles/active-directory/saas-apps/whimsical-provisioning-tutorial.md new file mode 100644 index 0000000000000..84bfdfa401f09 --- /dev/null +++ b/articles/active-directory/saas-apps/whimsical-provisioning-tutorial.md @@ -0,0 +1,132 @@ +--- +title: 'Tutorial: Configure Whimsical for automatic user provisioning with Azure Active Directory | Microsoft Docs' +description: Learn how to automatically provision and de-provision user accounts from Azure AD to Whimsical. +services: active-directory +author: twimmers +writer: twimmers +manager: beatrizd +ms.assetid: 4457a724-ed81-4f7b-bb3e-70beea80cb51 +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/11/2022 +ms.author: thwimmer +--- + +# Tutorial: Configure Whimsical for automatic user provisioning + +This tutorial describes the steps you need to perform in both Whimsical and Azure Active Directory (Azure AD) to configure automatic user provisioning. When configured, Azure AD automatically provisions and de-provisions users and groups to [Whimsical](https://service-portaltest.benq.com/login) using the Azure AD Provisioning service. For important details on what this service does, how it works, and frequently asked questions, see [Automate user provisioning and deprovisioning to SaaS applications with Azure Active Directory](../app-provisioning/user-provisioning.md). + +## Capabilities Supported +> [!div class="checklist"] +> * Create users in Whimsical +> * Remove users in Whimsical when they do not require access anymore +> * Keep user attributes synchronized between Azure AD and Whimsical +> * [Single sign-on](benq-iam-tutorial.md) to Whimsical (recommended) + +## Prerequisites + +The scenario outlined in this tutorial assumes that you already have the following prerequisites: + +* [An Azure AD tenant](../develop/quickstart-create-new-tenant.md) +* A user account in Azure AD with [permission](../roles/permissions-reference.md) to configure provisioning (e.g. Application Administrator, Cloud Application administrator, Application Owner, or Global Administrator). +* To use SCIM, SAML has to be enabled and correctly configured. + +## Step 1. Plan your provisioning deployment +1. Learn about [how the provisioning service works](../app-provisioning/user-provisioning.md). +2. Determine who will be in [scope for provisioning](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). +3. Determine what data to [map between Azure AD and Whimsical](../app-provisioning/customize-application-attributes.md). + +## Step 2. Configure Whimsical to support provisioning with Azure AD +1. To enable SCIM, you must first set up SAML SSO with AAD. +1. Go to "Workspace Settings", which you'll find under your workspace name in the top left. +1. Enable SCIM provisioning and click "Reveal" to retrieve the token. +1. In the "Provisioning" tab in AAD, set "Provisioning Mode" to "Automatic", and paste "https://whimsical.com/public-api/scim-v2/?aadOptscim062020" into "Tenant URL" + +## Step 3. Add Whimsical from the Azure AD application gallery + +Add Whimsical from the Azure AD application gallery to start managing provisioning to Whimsical. If you have previously setup Whimsical for SSO you can use the same application. However it is recommended that you create a separate app when testing out the integration initially. Learn more about adding an application from the gallery [here](../manage-apps/add-application-portal.md). + +## Step 4. Define who will be in scope for provisioning + +The Azure AD provisioning service allows you to scope who will be provisioned based on assignment to the application and or based on attributes of the user / group. If you choose to scope who will be provisioned to your app based on assignment, you can use the following [steps](../manage-apps/assign-user-or-group-access-portal.md) to assign users and groups to the application. If you choose to scope who will be provisioned based solely on attributes of the user or group, you can use a scoping filter as described [here](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +* When assigning users and groups to Whimsical, you must select a role other than **Default Access**. Users with the Default Access role are excluded from provisioning and will be marked as not effectively entitled in the provisioning logs. If the only role available on the application is the default access role, you can [update the application manifest](../develop/howto-add-app-roles-in-azure-ad-apps.md) to add additional roles. + +* Start small. Test with a small set of users and groups before rolling out to everyone. When scope for provisioning is set to assigned users and groups, you can control this by assigning one or two users or groups to the app. When scope is set to all users and groups, you can specify an [attribute based scoping filter](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +## Step 5. Configure automatic user provisioning to Whimsical + +This section guides you through the steps to configure the Azure AD provisioning service to create, update, and disable users and/or groups in TestApp based on user and/or group assignments in Azure AD. + +### To configure automatic user provisioning for Whimsical in Azure AD: + +1. Sign in to the [Azure portal](https://portal.azure.com). Select **Enterprise Applications**, then select **All applications**. + + ![Enterprise applications blade](common/enterprise-applications.png) + +2. In the applications list, select **Whimsical**. + + ![The Whimsical link in the Applications list](common/all-applications.png) + +3. Select the **Provisioning** tab. + + ![Provision tab](common/provisioning.png) + +4. Set the **Provisioning Mode** to **Automatic**. + + ![Provisioning tab](common/provisioning-automatic.png) + +5. Under the **Admin Credentials** section, input your Whimsical Tenant URL and Secret Token. Click **Test Connection** to ensure Azure AD can connect to Whimsical. If the connection fails, ensure your Whimsical account has Admin permissions and try again. + + ![Token](common/provisioning-testconnection-tenanturltoken.png) + +6. In the **Notification Email** field, enter the email address of a person or group who should receive the provisioning error notifications and select the **Send an email notification when a failure occurs** check box. + + ![Notification Email](common/provisioning-notification-email.png) + +7. Select **Save**. + +8. Under the **Mappings** section, select **Synchronize Azure Active Directory Users to Whimsical**. + +9. Review the user attributes that are synchronized from Azure AD to Whimsical in the **Attribute-Mapping** section. The attributes selected as **Matching** properties are used to match the user accounts in Whimsical for update operations. If you choose to change the [matching target attribute](../app-provisioning/customize-application-attributes.md), you will need to ensure that the Whimsical API supports filtering users based on that attribute. Select the **Save** button to commit any changes. + + |Attribute|Type|Supported for filtering| + |---|---|---| + |userName|String|✓ + |externalId|String| + |active|Boolean| + |displayName|String| + +10. To configure scoping filters, refer to the following instructions provided in the [Scoping filter tutorial](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +11. To enable the Azure AD provisioning service for Whimsical, change the **Provisioning Status** to **On** in the **Settings** section. + + ![Provisioning Status Toggled On](common/provisioning-toggle-on.png) + +12. Define the users and/or groups that you would like to provision to Whimsical by choosing the desired values in **Scope** in the **Settings** section. + + ![Provisioning Scope](common/provisioning-scope.png) + +13. When you are ready to provision, click **Save**. + + ![Saving Provisioning Configuration](common/provisioning-configuration-save.png) + +This operation starts the initial synchronization cycle of all users and groups defined in **Scope** in the **Settings** section. The initial cycle takes longer to perform than subsequent cycles, which occur approximately every 40 minutes as long as the Azure AD provisioning service is running. + +## Step 6. Monitor your deployment +Once you've configured provisioning, use the following resources to monitor your deployment: + +1. Use the [provisioning logs](../reports-monitoring/concept-provisioning-logs.md) to determine which users have been provisioned successfully or unsuccessfully +2. Check the [progress bar](../app-provisioning/application-provisioning-when-will-provisioning-finish-specific-user.md) to see the status of the provisioning cycle and how close it is to completion +3. If the provisioning configuration seems to be in an unhealthy state, the application will go into quarantine. Learn more about quarantine states [here](../app-provisioning/application-provisioning-quarantine-status.md). + +## Additional resources + +* [Managing user account provisioning for Enterprise Apps](../app-provisioning/configure-automatic-user-provisioning-portal.md) +* [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) + +## Next steps + +* [Learn how to review logs and get reports on provisioning activity](../app-provisioning/check-status-user-account-provisioning.md) diff --git a/articles/active-directory/saas-apps/xcarrier-tutorial.md b/articles/active-directory/saas-apps/xcarrier-tutorial.md new file mode 100644 index 0000000000000..b235ba072efdf --- /dev/null +++ b/articles/active-directory/saas-apps/xcarrier-tutorial.md @@ -0,0 +1,142 @@ +--- +title: 'Tutorial: Azure AD SSO integration with xCarrier®' +description: Learn how to configure single sign-on between Azure Active Directory and xCarrier®. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/30/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with xCarrier® + +In this tutorial, you'll learn how to integrate xCarrier® with Azure Active Directory (Azure AD). When you integrate xCarrier® with Azure AD, you can: + +* Control in Azure AD who has access to xCarrier®. +* Enable your users to be automatically signed-in to xCarrier® with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* xCarrier® single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* xCarrier® supports **SP** and **IDP** initiated SSO. + +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. + +## Add xCarrier® from the gallery + +To configure the integration of xCarrier® into Azure AD, you need to add xCarrier® from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **xCarrier®** in the search box. +1. Select **xCarrier®** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for xCarrier® + +Configure and test Azure AD SSO with xCarrier® using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in xCarrier®. + +To configure and test Azure AD SSO with xCarrier®, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure xCarrier® SSO](#configure-xcarrier-sso)** - to configure the single sign-on settings on application side. + 1. **[Create xCarrier® test user](#create-xcarrier-test-user)** - to have a counterpart of B.Simon in xCarrier® that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **xCarrier®** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type the URL: + `https://msdev.myxcarrier.com/Home/Index` + +1. On the **Set-up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Federation Metadata XML** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") + +1. On the **Set up xCarrier®** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Metadata") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to xCarrier®. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **xCarrier®**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure xCarrier® SSO + +To configure single sign-on on **xCarrier®** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [xCarrier® support team](mailto:pw_support@elemica.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create xCarrier® test user + +In this section, a user called B.Simon is created in xCarrier®. xCarrier® supports just-in-time user provisioning, which is enabled by default. There's no action item for you in this section. If a user doesn't already exist in xCarrier®, a new one is created after authentication. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to xCarrier® Sign on URL where you can initiate the login flow. + +* Go to xCarrier® Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the xCarrier® for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the xCarrier® tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the xCarrier® for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure xCarrier® you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/standards/configure-azure-active-directory-for-fedramp-high-impact.md b/articles/active-directory/standards/configure-azure-active-directory-for-fedramp-high-impact.md index 567d1bfd3129d..76a94ca2a2d74 100644 --- a/articles/active-directory/standards/configure-azure-active-directory-for-fedramp-high-impact.md +++ b/articles/active-directory/standards/configure-azure-active-directory-for-fedramp-high-impact.md @@ -52,7 +52,7 @@ The following is a list of FedRAMP resources: * [FedRAMP High Azure Policy built-in initiative definition](../../governance/policy/samples/fedramp-high.md) -* [Microsoft 365 compliance center](/microsoft-365/compliance/microsoft-365-compliance-center) +* [Microsoft Purview compliance portal](/microsoft-365/compliance/microsoft-365-compliance-center) * [Microsoft Compliance Manager](/microsoft-365/compliance/compliance-manager) diff --git a/articles/active-directory/standards/index.yml b/articles/active-directory/standards/index.yml index 2fed3e9248b71..e959921098df7 100644 --- a/articles/active-directory/standards/index.yml +++ b/articles/active-directory/standards/index.yml @@ -1,18 +1,19 @@ ### YamlMime:Landing -title: Azure Active Directory configuring to standards documentation -summary: Azure and Azure Active Directory enable you to leverage their compliance certifications and configure your environment to meet governmental and industry standards. +title: Implement identity standards with Azure Active Directory +summary: Azure and Azure Active Directory offer compliance certifications. Learn to configure your environment to meet governmental and industry standards. metadata: - title: Azure Active Directory configuring to standards documentation - description: "Learn to configure Azure and Azure Active directory to meet governmental and industry standards." + title: Implement identity standards with Azure Active Directory + description: "Azure and Azure Active Directory offer compliance certifications. Learn to configure your environment to meet governmental and industry standards." manager: mtillman ms.author: baselden ms.collection: na - ms.date: 04/26/2021 + ms.date: 04/29/2022 ms.service: active-directory ms.subservice: na ms.topic: landing-page + ms.custom: kr2b-contr-experiment services: active-directory # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new @@ -29,7 +30,19 @@ landingContent: url: nist-overview.md - text: FedRAMP High impact overview url: configure-azure-active-directory-for-fedramp-high-impact.md - + + # Card + - title: Understand NIST AALs + linkLists: + - linkListType: overview + links: + - text: Authenticator assurance levels + url: nist-about-authenticator-assurance-levels.md + - text: Authenticator basics + url: nist-authentication-basics.md + - text: Types and methods + url: nist-authenticator-types.md + # Card - title: Configure NIST AALs linkLists: @@ -54,3 +67,16 @@ landingContent: - text: Configure additional controls url: fedramp-other-controls.md + # Card + - title: Meet identity requirements of memorandum 22-09 + linkLists: + - linkListType: how-to-guide + links: + - text: Enterprise-wide identity management system + url: memo-22-09-enterprise-wide-identity-management-system.md + - text: Meet multifactor authentication requirements + url: memo-22-09-multi-factor-authentication.md + - text: Meet authorization requirements + url: memo-22-09-authorization.md + - text: Other areas of Zero Trust + url: memo-22-09-other-areas-zero-trust.md diff --git a/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md b/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md index b52ee62e51a02..d333e23677a1b 100644 --- a/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md +++ b/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md @@ -35,7 +35,7 @@ U.S. Federal agencies will be approaching this guidance from different starting - **[Azure AD certificate-based authentication](../authentication/concept-certificate-based-authentication.md)** offers cloud native certificate based authentication (without dependency on a federated identity provider). This includes smart card implementations such as Common Access Card (CAC) & Personal Identity Verification (PIV) as well as derived PIV credentials deployed to mobile devices or security keys -- **[Windows Hello for Business](/windows/security/identity-protection/hello-for-business/hello-overview)** offers passwordless multifactor authentication that is phishing-resistant. For more information, see the [Windows Hello for Business Deployment Overview](https://docs.microsoft.com/windows/security/identity-protection/hello-for-business/hello-deployment-guide) +- **[Windows Hello for Business](/windows/security/identity-protection/hello-for-business/hello-overview)** offers passwordless multifactor authentication that is phishing-resistant. For more information, see the [Windows Hello for Business Deployment Overview](/windows/security/identity-protection/hello-for-business/hello-deployment-guide) ### Protection from external phishing @@ -75,8 +75,8 @@ For more information on deploying this method, see the following resources: For more information on deploying this method, see the following resources: -- [Deploying Active Directory Federation Services in Azure](https://docs.microsoft.com/windows-server/identity/ad-fs/deployment/how-to-connect-fed-azure-adfs) -- [Configuring AD FS for user certificate authentication](https://docs.microsoft.com/windows-server/identity/ad-fs/operations/configure-user-certificate-authentication) +- [Deploying Active Directory Federation Services in Azure](/windows-server/identity/ad-fs/deployment/how-to-connect-fed-azure-adfs) +- [Configuring AD FS for user certificate authentication](/windows-server/identity/ad-fs/operations/configure-user-certificate-authentication) ### Additional phishing-resistant method considerations @@ -164,4 +164,4 @@ The following articles are part of this documentation set: For more information about Zero Trust, see: -[Securing identity with Zero Trust](/security/zero-trust/deploy/identity) +[Securing identity with Zero Trust](/security/zero-trust/deploy/identity) \ No newline at end of file diff --git a/articles/active-directory/verifiable-credentials/credential-design.md b/articles/active-directory/verifiable-credentials/credential-design.md index 8b7a6b91d5294..136e6bc56e1d7 100644 --- a/articles/active-directory/verifiable-credentials/credential-design.md +++ b/articles/active-directory/verifiable-credentials/credential-design.md @@ -14,6 +14,8 @@ ms.author: barclayn # How to customize your verifiable credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Verifiable credentials are made up of two components, the rules and display files. The rules file determines what the user needs to provide before they receive a verifiable credential. The display file controls the branding of the credential and styling of the claims. In this guide, we will explain how to modify both files to meet the requirements of your organization. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md b/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md index 6890ecc203472..e420a1c03b24b 100644 --- a/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md +++ b/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md @@ -15,6 +15,8 @@ ms.reviewer: # Introduction to Azure Active Directory Verifiable Credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. > This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. @@ -96,7 +98,7 @@ The scenario we use to explain how VCs work involves: Today, Alice provides a username and password to log onto Woodgrove’s networked environment. Woodgrove is deploying a verifiable credential solution to provide a more manageable way for Alice to prove that she is an employee of Woodgrove. Proseware accepts verifiable credentials issued by Woodgrove as proof of employment to offer corporate discounts as part of their corporate discount program. -Alice requests Woodgrove Inc for a proof of employment verifiable credential. Woodgrove Inc attests Alice's identiy and issues a signed verfiable credential that Alice can accept and store in her digital wallet application. Alice can now present this verifiable credential as a proof of employement on the Proseware site. After a succesfull presentation of the credential, Prosware offers discount to Alice and the transaction is logged in Alice's wallet application so that she can track where and to whom she has presented her proof of employment verifiable credential. +Alice requests Woodgrove Inc for a proof of employment verifiable credential. Woodgrove Inc attests Alice's identity and issues a signed verfiable credential that Alice can accept and store in her digital wallet application. Alice can now present this verifiable credential as a proof of employement on the Proseware site. After a succesfull presentation of the credential, Prosware offers discount to Alice and the transaction is logged in Alice's wallet application so that she can track where and to whom she has presented her proof of employment verifiable credential. ![microsoft-did-overview](media/decentralized-identifier-overview/did-overview.png) @@ -106,7 +108,7 @@ There are three primary actors in the verifiable credential solution. In the fol - **Step 1**, the **user** requests a verifiable credential from an issuer. - **Step 2**, the **issuer** of the credential attests that the proof the user provided is accurate and creates a verifiable credential signed with their DID and the user’s DID is the subject. -- **In Step 3**, the user signs a verifiable presentation (VP) with their DID and sends to the **verifier.** The verifier then validates of the credential by matching with the public key placed in the DPKI. +- **In Step 3**, the user signs a verifiable presentation (VP) with their DID and sends to the **verifier.** The verifier then validates the credential by matching with the public key placed in the DPKI. The roles in this scenario are: diff --git a/articles/active-directory/verifiable-credentials/get-started-request-api.md b/articles/active-directory/verifiable-credentials/get-started-request-api.md index 69735ef9b6419..bf9404d37c36a 100644 --- a/articles/active-directory/verifiable-credentials/get-started-request-api.md +++ b/articles/active-directory/verifiable-credentials/get-started-request-api.md @@ -1,6 +1,5 @@ --- title: How to call the Request Service REST API (preview) -titleSuffix: Azure Active Directory Verifiable Credentials description: Learn how to issue and verify by using the Request Service REST API documentationCenter: '' author: barclayn @@ -16,6 +15,8 @@ ms.author: barclayn # Request Service REST API (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials includes the Request Service REST API. This API allows you to issue and verify credentials. This article shows you how to start using the Request Service REST API. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md b/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md index d536c8818e309..bf652ab7b48c9 100644 --- a/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md +++ b/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md @@ -14,6 +14,8 @@ ms.author: barclayn # How to create a free Azure Active Directory developer tenant +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. > This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. diff --git a/articles/active-directory/verifiable-credentials/how-to-dnsbind.md b/articles/active-directory/verifiable-credentials/how-to-dnsbind.md index 4ff5751dffb66..130809e7d03b5 100644 --- a/articles/active-directory/verifiable-credentials/how-to-dnsbind.md +++ b/articles/active-directory/verifiable-credentials/how-to-dnsbind.md @@ -15,6 +15,8 @@ ms.author: barclayn # Link your domain to your Decentralized Identifier (DID) (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. > This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. diff --git a/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md b/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md index 804bf9560befc..a2b9887454cbc 100644 --- a/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md +++ b/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md @@ -15,6 +15,8 @@ ms.author: barclayn # Revoke a previously issued verifiable credential (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + As part of the process of working with verifiable credentials (VCs), you not only have to issue credentials, but sometimes you also have to revoke them. In this article we go over the **Status** property part of the VC specification and take a closer look at the revocation process, why we may want to revoke credentials and some data and privacy implications. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/how-to-opt-out.md b/articles/active-directory/verifiable-credentials/how-to-opt-out.md index 5f099618df8fe..d1cc625b7f6f0 100644 --- a/articles/active-directory/verifiable-credentials/how-to-opt-out.md +++ b/articles/active-directory/verifiable-credentials/how-to-opt-out.md @@ -15,6 +15,8 @@ ms.author: barclayn # Opt out of the verifiable credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + In this article: - The reason why you may need to opt out. diff --git a/articles/active-directory/verifiable-credentials/index.yml b/articles/active-directory/verifiable-credentials/index.yml index a280c4354d336..78e144a5780d9 100644 --- a/articles/active-directory/verifiable-credentials/index.yml +++ b/articles/active-directory/verifiable-credentials/index.yml @@ -1,7 +1,7 @@ ### YamlMime:Landing -title: Azure Active Directory Verifiable Credentials documentation (preview) -summary: Verifiable credentials helps you build solutions that empower customers to manage their own data. +title: Microsoft Entra Verified ID documentation (preview) +summary: Azure Active Directory Verifiable Credentials is now Microsoft Entra Verified ID and part of the Microsoft Entra family of products. We'll be updating our documentation in the next few months as we move toward general availability. metadata: author: barclayn diff --git a/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md b/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md index 46cac82dc69a0..f564a0a8a59c6 100644 --- a/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md +++ b/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md @@ -13,6 +13,8 @@ ms.author: baselden # Azure AD Verifiable Credentials architecture overview (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. For more information, see [**Supplemental Terms of Use for Microsoft Azure Previews**](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/active-directory/verifiable-credentials/issuance-request-api.md b/articles/active-directory/verifiable-credentials/issuance-request-api.md index 180225004608d..399d83afcf013 100644 --- a/articles/active-directory/verifiable-credentials/issuance-request-api.md +++ b/articles/active-directory/verifiable-credentials/issuance-request-api.md @@ -1,6 +1,5 @@ --- title: Specify the Request Service REST API issuance request (preview) -titleSuffix: Azure Active Directory Verifiable Credentials description: Learn how to issue a verifiable credential that you've issued. documentationCenter: '' author: barclayn @@ -16,6 +15,8 @@ ms.author: barclayn # Request Service REST API issuance specification (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials includes the Request Service REST API. This API allows you to issue and verify a credential. This article specifies the Request Service REST API for an issuance request. ## HTTP request diff --git a/articles/active-directory/verifiable-credentials/issuer-openid.md b/articles/active-directory/verifiable-credentials/issuer-openid.md index 59374980c2a0e..c4f7384130cfa 100644 --- a/articles/active-directory/verifiable-credentials/issuer-openid.md +++ b/articles/active-directory/verifiable-credentials/issuer-openid.md @@ -15,6 +15,8 @@ ms.author: barclayn # Issuer service communication examples (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + The Azure AD Verifiable Credential service can issue verifiable credentials by retrieving claims from an ID token generated by your organization's OpenID compliant identity provider. This article instructs you on how to set up your identity provider so Authenticator can communicate with it and retrieve the correct ID Token to pass to the issuing service. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/plan-issuance-solution.md b/articles/active-directory/verifiable-credentials/plan-issuance-solution.md index 0195706a75dc9..77753973d3ddd 100644 --- a/articles/active-directory/verifiable-credentials/plan-issuance-solution.md +++ b/articles/active-directory/verifiable-credentials/plan-issuance-solution.md @@ -14,6 +14,8 @@ ms.custom: references_regions # Plan your Azure Active Directory Verifiable Credentials issuance solution (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + >[!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. For more information, see [**Supplemental Terms of Use for Microsoft Azure Previews**](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/active-directory/verifiable-credentials/plan-verification-solution.md b/articles/active-directory/verifiable-credentials/plan-verification-solution.md index d50bdb986d8a0..1a772da51745f 100644 --- a/articles/active-directory/verifiable-credentials/plan-verification-solution.md +++ b/articles/active-directory/verifiable-credentials/plan-verification-solution.md @@ -14,6 +14,8 @@ ms.custom: references_regions # Plan your Azure Active Directory Verifiable Credentials verification solution (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + >[!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/active-directory/verifiable-credentials/presentation-request-api.md b/articles/active-directory/verifiable-credentials/presentation-request-api.md index 51f43fb45ef36..75ea8cc1d1876 100644 --- a/articles/active-directory/verifiable-credentials/presentation-request-api.md +++ b/articles/active-directory/verifiable-credentials/presentation-request-api.md @@ -1,6 +1,5 @@ --- title: Specify the Request Service REST API verify request (preview) -titleSuffix: Azure Active Directory Verifiable Credentials description: Learn how to start a presentation request in Verifiable Credentials documentationCenter: '' author: barclayn @@ -8,7 +7,7 @@ manager: rkarlin ms.service: decentralized-identity ms.topic: reference ms.subservice: verifiable-credentials -ms.date: 10/08/2021 +ms.date: 05/26/2022 ms.author: barclayn #Customer intent: As an administrator, I am trying to learn the process of revoking verifiable credentials that I have issued. @@ -16,6 +15,8 @@ ms.author: barclayn # Request Service REST API presentation specification (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials includes the Request Service REST API. This API allows you to issue and verify a credential. This article specifies the Request Service REST API for a presentation request. The presentation request asks the user to present a verifiable credential, and then verify the credential. ## HTTP request @@ -46,18 +47,18 @@ POST https://beta.did.msidentity.com/v1.0/contoso.onmicrosoft.com/verifiablecred Content-Type: application/json Authorization: Bearer -{ -    "includeQRCode": true, - "callback": { -    "url": "https://www.contoso.com/api/verifier/presentationCallbac", -    "state": "11111111-2222-2222-2222-333333333333", -      "headers": { -        "api-key": "an-api-key-can-go-here" -      } -    }, +{ +    "includeQRCode": true, + "callback": { +    "url": "https://www.contoso.com/api/verifier/presentationCallbac", +    "state": "11111111-2222-2222-2222-333333333333", +      "headers": { +        "api-key": "an-api-key-can-go-here" +      } +    },     ... -} -``` +} +``` The following permission is required to call the Request Service REST API. For more information, see [Grant permissions to get access tokens](verifiable-credentials-configure-tenant.md#grant-permissions-to-get-access-tokens). @@ -98,7 +99,7 @@ The presentation request payload contains information about your verifiable cred } ``` -The payload contains the following properties. +The payload contains the following properties. |Parameter |Type | Description | |---------|---------|---------| @@ -154,12 +155,12 @@ The Request Service REST API generates several events to the callback endpoint. If successful, this method returns a response code (*HTTP 201 Created*), and a collection of event objects in the response body. The following JSON demonstrates a successful response: ```json -{ +{ "requestId": "e4ef27ca-eb8c-4b63-823b-3b95140eac11", "url": "openid://vc/?request_uri=https://beta.did.msidentity.com/v1.0/87654321-0000-0000-0000-000000000000/verifiablecredentials/request/e4ef27ca-eb8c-4b63-823b-3b95140eac11", "expiry": 1633017751, "qrCode": "data:image/png;base64,iVBORw0KGgoA" -} +} ``` The response contains the following properties: @@ -200,7 +201,7 @@ The response contains the following properties: ## Callback events -The callback endpoint is called when a user scans the QR code, uses the deep link the authenticator app, or finishes the presentation process. +The callback endpoint is called when a user scans the QR code, uses the deep link the authenticator app, or finishes the presentation process. |Property |Type |Description | |---------|---------|---------| @@ -208,17 +209,17 @@ The callback endpoint is called when a user scans the QR code, uses the deep lin | `code` |string |The code returned when the request was retrieved by the authenticator app. Possible values:
            • `request_retrieved`: The user scanned the QR code or selected the link that starts the presentation flow.
            • `presentation_verified`: The verifiable credential validation completed successfully.
            | | `state` |string| Returns the state value that you passed in the original payload. | | `subject`|string | The verifiable credential user DID.| -| `issuers`| array |Returns an array of verifiable credentials requested. For each verifiable credential, it provides:
          • The verifiable credential type.
          • The claims retrieved.
          • The verifiable credential issuer’s domain.
          • The verifiable credential issuer’s domain validation status.
          | +| `issuers`| array |Returns an array of verifiable credentials requested. For each verifiable credential, it provides:
        • The verifiable credential type(s).
        • The issuer's DID
        • The claims retrieved.
        • The verifiable credential issuer's domain.
        • The verifiable credential issuer's domain validation status.
        | | `receipt`| string | Optional. The receipt contains the original payload sent from the wallet to the Verifiable Credentials service. The receipt should be used for troubleshooting/debugging only. The format in the receipt is not fix and can change based on the wallet and version used.| The following example demonstrates a callback payload when the authenticator app starts the presentation request: ```json -{ -    "requestId":"aef2133ba45886ce2c38974339ba1057", -    "code":"request_retrieved", +{ +    "requestId":"aef2133ba45886ce2c38974339ba1057", +    "code":"request_retrieved",     "state":"Wy0ThUz1gSasAjS1" -} +} ``` The following example demonstrates a callback payload after the verifiable credential presentation has successfully completed: diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md index 5b6fa0221229d..b7b326011cedc 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md @@ -12,8 +12,11 @@ ms.date: 05/03/2022 --- + # Issue Azure AD Verifiable Credentials from an application (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + In this tutorial, you run a sample application from your local computer that connects to your Azure Active Directory (Azure AD) tenant. Using the application, you're going to issue and verify a verified credential expert card. In this article, you learn how to: diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md index da56a83c22832..4650bf09e5fe4 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md @@ -14,6 +14,8 @@ ms.date: 05/06/2022 # Configure your tenant for Azure AD Verifiable Credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials safeguards your organization with an identity solution that's seamless and decentralized. The service allows you to issue and verify credentials. For issuers, Azure AD provides a service that they can customize and use to issue their own verifiable credentials. For verifiers, the service provides a free REST API that makes it easy to request and accept verifiable credentials in your apps and services. In this tutorial, you learn how to configure your Azure AD tenant so it can use the verifiable credentials service. diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md index adee0076d4695..0bdf160451165 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md @@ -7,13 +7,15 @@ author: barclayn manager: rkarlin ms.author: barclayn ms.topic: tutorial -ms.date: 10/08/2021 +ms.date: 05/18/2022 # Customer intent: As an enterprise, we want to enable customers to manage information about themselves by using verifiable credentials. --- # Configure Azure AD Verifiable Credentials verifier (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + In [Issue Azure AD Verifiable Credentials from an application (preview)](verifiable-credentials-configure-issuer.md), you learn how to issue and verify credentials by using the same Azure Active Directory (Azure AD) tenant. In this tutorial, you go over the steps needed to present and verify your first verifiable credential: a verified credential expert card. As a verifier, you unlock privileges to subjects that possess verified credential expert cards. In this tutorial, you run a sample application from your local computer that asks you to present a verified credential expert card, and then verifies it. diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md index 83b69dd8f2663..d8bfe7ea11be2 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md @@ -13,6 +13,8 @@ ms.author: barclayn # Frequently Asked Questions (FAQ) (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + This page contains commonly asked questions about Verifiable Credentials and Decentralized Identity. Questions are organized into the following sections. - [Vocabulary and basics](#the-basics) diff --git a/articles/active-directory/verifiable-credentials/whats-new.md b/articles/active-directory/verifiable-credentials/whats-new.md index 24c8d975af2bd..0ffb3b9f36b80 100644 --- a/articles/active-directory/verifiable-credentials/whats-new.md +++ b/articles/active-directory/verifiable-credentials/whats-new.md @@ -16,6 +16,8 @@ ms.author: barclayn # What's new in Azure Active Directory Verifiable Credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + This article lists the latest features, improvements, and changes in the Azure Active Directory (Azure AD) Verifiable Credentials service. ## May 2022 diff --git a/articles/advisor/advisor-release-notes.md b/articles/advisor/advisor-release-notes.md index 90461f778eaa4..749415d78443c 100644 --- a/articles/advisor/advisor-release-notes.md +++ b/articles/advisor/advisor-release-notes.md @@ -8,6 +8,22 @@ ms.date: 01/03/2022 Learn what's new in the service. These items may be release notes, videos, blog posts, and other types of information. Bookmark this page to stay up to date with the service. +## May 2022 + +### Unlimited number of subscriptions +It is easier now to get an overview of optimization opportunities available to your organization – no need to spend time and effort to apply filters and process subscription in batches. + +To learn more, visit [Get started with Azure Advisor](advisor-get-started.md). + +### Tag filtering + +You can now get Advisor recommendations scoped to a business unit, workload, or team. Filter recommendations and calculate scores using tags you have already assigned to Azure resources, resource groups and subscriptions. Apply tag filters to: + +* Identify cost saving opportunities by business units +* Compare scores for workloads to optimize critical ones first + +To learn more, visit [How to filter Advisor recommendations using tags](advisor-tag-filtering.md). + ## January 2022 [**Shutdown/Resize your virtual machines**](advisor-cost-recommendations.md#optimize-virtual-machine-spend-by-resizing-or-shutting-down-underutilized-instances) recommendation was enhanced to increase the quality, robustness, and applicability. diff --git a/articles/advisor/advisor-tag-filtering.md b/articles/advisor/advisor-tag-filtering.md new file mode 100644 index 0000000000000..258aee8f21530 --- /dev/null +++ b/articles/advisor/advisor-tag-filtering.md @@ -0,0 +1,50 @@ +--- +title: Review optimization opportunities by workload, environment or team +description: Review optimization opportunities by workload, environment or team +ms.topic: article +ms.custom: tags +ms.date: 05/25/2022 +--- + +# Review optimization opportunities by workload, environment or team + +You can now get Advisor recommendations and scores scoped to a workload, environment, or team using resource tag filters. Filter recommendations and calculate scores using tags you have already assigned to Azure resources, resource groups and subscriptions. Use tag filters to: + +* Identify cost saving opportunities by team +* Compare scores for workloads to optimize the critical ones first + +> [!TIP] +> For more information on how to use resource tags to organize and govern your Azure resources, please see the [Cloud Adoption Framework’s guidance](/azure/cloud-adoption-framework/ready/azure-best-practices/resource-tagging) and [Build a cloud governance strategy on Azure](/learn/modules/build-cloud-governance-strategy-azure/). + +## How to filter recommendations using tags + +1. Sign in to the [Azure portal](https://portal.azure.com/). +1. Search for and select [Advisor](https://aka.ms/azureadvisordashboard) from any page. +1. On the Advisor dashboard, click on the **Add Filter** button. +1. Select the tag in the **Filter** field and value(s). +1. Click **Apply**. Summary tiles will be updated to reflect the filter. +1. Click on any of the categories to review recommendations. + + [ ![Screenshot of the Azure Advisor dashboard that shows count of recommendations after tag filter is applied.](./media/tags/overview-tag-filters.png) ](./media/tags/overview-tag-filters.png#lightbox) + + +## How to calculate scores using resource tags + +1. Sign in to the [Azure portal](https://portal.azure.com/). +1. Search for and select [Advisor](https://aka.ms/azureadvisordashboard) from any page. +1. Select **Advisor score (preview)** from the navigation menu on the left. +1. Click on the **Add Filter** button. +1. Select the tag in the **Filter** field and value(s). +1. Click **Apply**. Advisor score will be updated to only include resources impacted by the filter. +1. Click on any of the categories to review recommendations. + + [ ![Screenshot of the Azure Advisor score dashboard that shows score and recommendations after tag filter is applied.](./media/tags/score-tag-filters.png) ](./media/tags/score-tag-filters.png#lightbox) + +> [!NOTE] +> Not all capabilities are available when tag filters are used. For example, tag filters are not supported for security score and score history. + +## Next steps + +To learn more about tagging, see: +- [Define your tagging strategy - Cloud Adoption Framework](/azure/cloud-adoption-framework/ready/azure-best-practices/resource-tagging) +- [Tag resources, resource groups, and subscriptions for logical organization - Azure Resource Manager](/azure/azure-resource-manager/management/tag-resources?tabs=json) diff --git a/articles/advisor/media/tags/overview-tag-filters.png b/articles/advisor/media/tags/overview-tag-filters.png new file mode 100644 index 0000000000000..4caa6c029fefc Binary files /dev/null and b/articles/advisor/media/tags/overview-tag-filters.png differ diff --git a/articles/advisor/media/tags/score-tag-filters.png b/articles/advisor/media/tags/score-tag-filters.png new file mode 100644 index 0000000000000..d7bdd0e5538c7 Binary files /dev/null and b/articles/advisor/media/tags/score-tag-filters.png differ diff --git a/articles/advisor/toc.yml b/articles/advisor/toc.yml index 5968e48f8c596..93ecdee9fa82f 100644 --- a/articles/advisor/toc.yml +++ b/articles/advisor/toc.yml @@ -40,10 +40,12 @@ href: ./resource-graph-samples.md - name: How-to guides items: - - name: Security - href: advisor-security-recommendations.md - name: Optimize costs href: advisor-cost-recommendations.md + - name: Improve security + href: advisor-security-recommendations.md + - name: Use tags to filter recommendations and score + href: advisor-tag-filtering.md - name: Configure recommendations href: view-recommendations.md - name: Permissions and blocked actions diff --git a/articles/aks/TOC.yml b/articles/aks/TOC.yml index 4fcd70b252cf8..727e0f9846203 100644 --- a/articles/aks/TOC.yml +++ b/articles/aks/TOC.yml @@ -181,7 +181,7 @@ href: upgrade-cluster.md - name: Use Uptime SLA href: uptime-sla.md - - name: Use Draft + - name: Use Draft (preview) href: draft.md - name: Use proximity placement groups href: reduce-latency-ppg.md @@ -280,6 +280,8 @@ href: manage-azure-rbac.md - name: Use Kubernetes RBAC with Azure AD integration href: azure-ad-rbac.md + - name: Use custom certificate authorities (preview) + href: custom-certificate-authority.md - name: Rotate certificates href: certificate-rotation.md - name: Use Azure Policy @@ -335,7 +337,7 @@ href: load-balancer-standard.md - name: Use a static IP address and DNS label href: static-ip.md - - name: Use an HTTP proxy (preview) + - name: Use an HTTP proxy href: http-proxy.md - name: Ingress items: @@ -362,33 +364,33 @@ href: limit-egress-traffic.md - name: Use a user defined route for egress href: egress-outboundtype.md - - name: Managed NAT Gateway (preview) + - name: Managed NAT Gateway href: nat-gateway.md - name: Customize CoreDNS href: coredns-custom.md - name: Configure data volumes items: - - name: Azure Disk - Dynamic + - name: Azure disk - dynamic href: azure-disks-dynamic-pv.md - - name: Azure Disk - Static + - name: Azure disk - static href: azure-disk-volume.md - - name: Azure Files - Dynamic + - name: Azure Files - dynamic href: azure-files-dynamic-pv.md - - name: Azure Files - Static + - name: Azure Files - static href: azure-files-volume.md - name: Azure HPC Cache href: azure-hpc-cache.md - - name: NFS Server - Static + - name: NFS Server - static href: azure-nfs-volume.md - name: Azure NetApp Files href: azure-netapp-files.md - name: Use Azure Ultra Disks href: use-ultra-disks.md - - name: CSI Storage Drivers + - name: CSI storage drivers items: - - name: Enable CSI Storage Drivers + - name: CSI storage driver overview href: csi-storage-drivers.md - - name: Azure Disk CSI drivers + - name: Azure disk CSI drivers href: azure-disk-csi.md - name: Azure Files CSI drivers href: azure-files-csi.md @@ -425,8 +427,6 @@ href: kubernetes-helm.md - name: Use OpenFaaS href: openfaas.md - - name: Run Spark jobs - href: spark-job.md - name: Use GPUs href: gpu-cluster.md - name: Build Django app with PostgreSQL @@ -465,9 +465,17 @@ href: open-service-mesh-troubleshoot.md - name: Uninstall the Open Service Mesh AKS add-on href: open-service-mesh-uninstall-add-on.md - - name: Kubernetes Event-driven Autoscaler add-on (preview) - href: keda.md - - name: Web Application Routing (preview) + - name: Track releases and region availability + href: release-tracker.md + - name: Deploy the Kubernetes Event-driven Autoscaler (KEDA) add-on (preview) + items: + - name: About Kubernetes Event-driven Autoscaler (KEDA) + href: keda-about.md + - name: Use ARM template + href: keda-deploy-add-on-arm.md + - name: Kubernetes Event-driven Autoscaler (KEDA) integrations + href: keda-integrations.md + - name: Use Web Application Routing (preview) href: web-app-routing.md - name: Use cluster extensions href: cluster-extensions.md diff --git a/articles/aks/aks-migration.md b/articles/aks/aks-migration.md index 3cdda34208a51..3238af52d8b3a 100644 --- a/articles/aks/aks-migration.md +++ b/articles/aks/aks-migration.md @@ -132,7 +132,7 @@ Stateless application migration is the most straightforward case: Carefully plan your migration of stateful applications to avoid data loss or unexpected downtime. * If you use Azure Files, you can mount the file share as a volume into the new cluster. See [Mount Static Azure Files as a Volume](./azure-files-volume.md#mount-file-share-as-a-persistent-volume). -* If you use Azure Managed Disks, you can only mount the disk if unattached to any VM. See [Mount Static Azure Disk as a Volume](./azure-disk-volume.md#mount-disk-as-volume). +* If you use Azure Managed Disks, you can only mount the disk if unattached to any VM. See [Mount Static Azure Disk as a Volume](./azure-disk-volume.md#mount-disk-as-a-volume). * If neither of those approaches work, you can use a backup and restore options. See [Velero on Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/blob/master/README.md). #### Azure Files diff --git a/articles/aks/azure-ad-integration-cli.md b/articles/aks/azure-ad-integration-cli.md index ef2efa84c07ad..633d248fd9068 100644 --- a/articles/aks/azure-ad-integration-cli.md +++ b/articles/aks/azure-ad-integration-cli.md @@ -260,7 +260,6 @@ For best practices on identity and resource control, see [Best practices for aut [kubernetes-webhook]:https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication [kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply [kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[complete-script]: https://github.com/Azure-Samples/azure-cli-samples/tree/master/aks/azure-ad-integration/azure-ad-integration.sh [az-aks-create]: /cli/azure/aks#az_aks_create diff --git a/articles/aks/azure-disk-csi.md b/articles/aks/azure-disk-csi.md index edd748720e769..f96aa10b845b5 100644 --- a/articles/aks/azure-disk-csi.md +++ b/articles/aks/azure-disk-csi.md @@ -1,43 +1,74 @@ --- -title: Use Container Storage Interface (CSI) drivers for Azure Disks on Azure Kubernetes Service (AKS) +title: Use Container Storage Interface (CSI) driver for Azure Disk in Azure Kubernetes Service (AKS) description: Learn how to use the Container Storage Interface (CSI) drivers for Azure disks in an Azure Kubernetes Service (AKS) cluster. services: container-service ms.topic: article -ms.date: 04/06/2022 +ms.date: 05/23/2022 author: palma21 --- -# Use the Azure disk Container Storage Interface (CSI) drivers in Azure Kubernetes Service (AKS) +# Use the Azure disk Container Storage Interface (CSI) driver in Azure Kubernetes Service (AKS) + The Azure disk Container Storage Interface (CSI) driver is a [CSI specification](https://github.com/container-storage-interface/spec/blob/master/spec.md)-compliant driver used by Azure Kubernetes Service (AKS) to manage the lifecycle of Azure disks. The CSI is a standard for exposing arbitrary block and file storage systems to containerized workloads on Kubernetes. By adopting and using CSI, AKS can write, deploy, and iterate plug-ins to expose new or improve existing storage systems in Kubernetes without having to touch the core Kubernetes code and wait for its release cycles. -To create an AKS cluster with CSI driver support, see [Enable CSI drivers for Azure disks and Azure Files on AKS](csi-storage-drivers.md). +To create an AKS cluster with CSI driver support, see [Enable CSI driver on AKS](csi-storage-drivers.md). This article describes how to use the Azure disk CSI driver version 1. + +> [!NOTE] +> Azure disk CSI driver v2 (preview) improves scalability and reduces pod failover latency. It uses shared disks to provision attachment replicas on multiple cluster nodes and integrates with the pod scheduler to ensure a node with an attachment replica is chosen on pod failover. Azure disk CSI driver v2 (preview) also provides the ability to fine tune performance. If you're interested in participating in the preview, submit a request: [https://aka.ms/DiskCSIv2Preview](https://aka.ms/DiskCSIv2Preview). This preview version is provided without a service level agreement, and you can occasionally expect breaking changes while in preview. The preview version isn't recommended for production workloads. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). > [!NOTE] > *In-tree drivers* refers to the current storage drivers that are part of the core Kubernetes code versus the new CSI drivers, which are plug-ins. -## Azure Disk CSI driver new features -Besides original in-tree driver features, Azure Disk CSI driver already provides following new features: -- performance improvement when attach or detach disks in parallel - - in-tree driver attaches or detaches disks in serial while CSI driver would attach or detach disks in batch, there would be significant improvement when there are multiple disks attaching to one node. -- ZRS disk support +## Azure disk CSI driver features + +In addition to in-tree driver features, Azure disk CSI driver supports the following features: + +- Performance improvements during concurrent disk attach and detach + - In-tree drivers attach or detach disks in serial, while CSI drivers attach or detach disks in batch. There is significant improvement when there are multiple disks attaching to one node. +- Zone-redundant storage (ZRS) disk support - `Premium_ZRS`, `StandardSSD_ZRS` disk types are supported, check more details about [Zone-redundant storage for managed disks](../virtual-machines/disks-redundancy.md) - [Snapshot](#volume-snapshots) - [Volume clone](#clone-volumes) - [Resize disk PV without downtime](#resize-a-persistent-volume-without-downtime) +## Storage class driver dynamic disk parameters + +|Name | Meaning | Available Value | Mandatory | Default value +|--- | --- | --- | --- | --- +|skuName | Azure disk storage account type (alias: `storageAccountType`)| `Standard_LRS`, `Premium_LRS`, `StandardSSD_LRS`, `UltraSSD_LRS`, `Premium_ZRS`, `StandardSSD_ZRS` | No | `StandardSSD_LRS`| +|kind | Managed or unmanaged (blob based) disk | `managed` (`dedicated` and `shared` are deprecated) | No | `managed`| +|fsType | File System Type | `ext4`, `ext3`, `ext2`, `xfs`, `btrfs` for Linux, `ntfs` for Windows | No | `ext4` for Linux, `ntfs` for Windows| +|cachingMode | [Azure Data Disk Host Cache Setting](../virtual-machines/windows/premium-storage-performance.md#disk-caching) | `None`, `ReadOnly`, `ReadWrite` | No | `ReadOnly`| +|location | Specify Azure region where Azure disks will be created | `eastus`, `westus`, etc. | No | If empty, driver will use the same location name as current AKS cluster| +|resourceGroup | Specify the resource group where the Azure disk will be created | Existing resource group name | No | If empty, driver will use the same resource group name as current AKS cluster| +|DiskIOPSReadWrite | [UltraSSD disk](../virtual-machines/linux/disks-ultra-ssd.md) IOPS Capability (minimum: 2 IOPS/GiB ) | 100~160000 | No | `500`| +|DiskMBpsReadWrite | [UltraSSD disk](../virtual-machines/linux/disks-ultra-ssd.md) Throughput Capability(minimum: 0.032/GiB) | 1~2000 | No | `100`| +|LogicalSectorSize | Logical sector size in bytes for Ultra disk. Supported values are 512 ad 4096. 4096 is the default. | `512`, `4096` | No | `4096`| +|tags | Azure disk [tags](../azure-resource-manager/management/tag-resources.md) | Tag format: `key1=val1,key2=val2` | No | ""| +|diskEncryptionSetID | ResourceId of the disk encryption set to use for [enabling encryption at rest](../virtual-machines/windows/disk-encryption.md) | format: `/subscriptions/{subs-id}/resourceGroups/{rg-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name}` | No | ""| +|diskEncryptionType | Encryption type of the disk encryption set | `EncryptionAtRestWithCustomerKey`(by default), `EncryptionAtRestWithPlatformAndCustomerKeys` | No | ""| +|writeAcceleratorEnabled | [Write Accelerator on Azure Disks](../virtual-machines/windows/how-to-enable-write-accelerator.md) | `true`, `false` | No | ""| +|networkAccessPolicy | NetworkAccessPolicy property to prevent generation of the SAS URI for a disk or a snapshot | `AllowAll`, `DenyAll`, `AllowPrivate` | No | `AllowAll`| +|diskAccessID | ARM ID of the DiskAccess resource to use private endpoints on disks | | No | ``| +|enableBursting | [Enable on-demand bursting](../virtual-machines/disk-bursting.md) beyond the provisioned performance target of the disk. On-demand bursting should only be applied to Premium disk and when the disk size > 512GB. Ultra and shared disk is not supported. Bursting is disabled by default. | `true`, `false` | No | `false`| +|useragent | User agent used for [customer usage attribution](../marketplace/azure-partner-customer-usage-attribution.md)| | No | Generated Useragent formatted `driverName/driverVersion compiler/version (OS-ARCH)`| +|enableAsyncAttach | Allow multiple disk attach operations (in batch) on one node in parallel.
        While this can speed up disk attachment, you may encounter Azure API throttling limit when there are large number of volume attachments. | `true`, `false` | No | `false`| +|subscriptionID | Specify Azure subscription ID where the Azure disk will be created | Azure subscription ID | No | If not empty, `resourceGroup` must be provided.| + ## Use CSI persistent volumes with Azure disks -A [persistent volume](concepts-storage.md#persistent-volumes) (PV) represents a piece of storage that's provisioned for use with Kubernetes pods. A PV can be used by one or many pods and can be dynamically or statically provisioned. This article shows you how to dynamically create PVs with Azure disks for use by a single pod in an AKS cluster. For static provisioning, see [Manually create and use a volume with Azure disks](azure-disk-volume.md). +A [persistent volume](concepts-storage.md#persistent-volumes) (PV) represents a piece of storage that's provisioned for use with Kubernetes pods. A PV can be used by one or many pods and can be dynamically or statically provisioned. This article shows you how to dynamically create PVs with Azure disks for use by a single pod in an AKS cluster. For static provisioning, see [Create a static volume with Azure disks](azure-disk-volume.md). For more information on Kubernetes volumes, see [Storage options for applications in AKS][concepts-storage]. ## Dynamically create Azure disk PVs by using the built-in storage classes -A storage class is used to define how a unit of storage is dynamically created with a persistent volume. For more information on Kubernetes storage classes, see [Kubernetes storage classes][kubernetes-storage-classes]. -When you use storage CSI drivers on AKS, there are two additional built-in `StorageClasses` that use the Azure disk CSI storage drivers. The additional CSI storage classes are created with the cluster alongside the in-tree default storage classes. +A storage class is used to define how a unit of storage is dynamically created with a persistent volume. For more information on Kubernetes storage classes, see [Kubernetes storage classes][kubernetes-storage-classes]. + +When you use the Azure disk storage CSI driver on AKS, there are two additional built-in `StorageClasses` that use the Azure disk CSI storage driver. The additional CSI storage classes are created with the cluster alongside the in-tree default storage classes. - `managed-csi`: Uses Azure Standard SSD locally redundant storage (LRS) to create a managed disk. - `managed-csi-premium`: Uses Azure Premium LRS to create a managed disk. @@ -46,7 +77,7 @@ The reclaim policy in both storage classes ensures that the underlying Azure dis To leverage these storage classes, create a [PVC](concepts-storage.md#persistent-volume-claims) and respective pod that references and uses them. A PVC is used to automatically provision storage based on a storage class. A PVC can use one of the pre-created storage classes or a user-defined storage class to create an Azure-managed disk for the desired SKU and size. When you create a pod definition, the PVC is specified to request the desired storage. -Create an example pod and respective PVC with the [kubectl apply][kubectl-apply] command: +Create an example pod and respective PVC by running the [kubectl apply][kubectl-apply] command: ```console $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/pvc-azuredisk-csi.yaml @@ -56,13 +87,13 @@ persistentvolumeclaim/pvc-azuredisk created pod/nginx-azuredisk created ``` -After the pod is in the running state, create a new file called `test.txt`. +After the pod is in the running state, run the following command to create a new file called `test.txt`. ```bash $ kubectl exec nginx-azuredisk -- touch /mnt/azuredisk/test.txt ``` -You can now validate that the disk is correctly mounted by running the following command and verifying you see the `test.txt` file in the output: +To validate the disk is correctly mounted, run the following command and verify you see the `test.txt` file in the output: ```console $ kubectl exec nginx-azuredisk -- ls /mnt/azuredisk @@ -74,14 +105,13 @@ test.txt ## Create a custom storage class -The default storage classes suit the most common scenarios, but not all. For some cases, you might want to have your own storage class customized with your own parameters. For example, we have a scenario where you might want to change the `volumeBindingMode` class. +The default storage classes are suitable for most common scenarios. For some cases, you might want to have your own storage class customized with your own parameters. For example, you might want to change the `volumeBindingMode` class. -You can use a `volumeBindingMode: Immediate` class that guarantees that occurs immediately once the PVC is created. In cases where your node pools are topology constrained, for example, using availability zones, PVs would be bound or provisioned without knowledge of the pod's scheduling requirements (in this case to be in a specific zone). +You can use a `volumeBindingMode: Immediate` class that guarantees it occurs immediately once the PVC is created. In cases where your node pools are topology constrained, for example when using availability zones, PVs would be bound or provisioned without knowledge of the pod's scheduling requirements (in this case to be in a specific zone). -To address this scenario, you can use `volumeBindingMode: WaitForFirstConsumer`, which delays the binding and provisioning of a PV until a pod that uses the PVC is created. In this way, the PV will conform and be provisioned in the availability zone (or other topology) that's specified by the pod's scheduling constraints. The default storage classes use `volumeBindingMode: WaitForFirstConsumer` class. +To address this scenario, you can use `volumeBindingMode: WaitForFirstConsumer`, which delays the binding and provisioning of a PV until a pod that uses the PVC is created. This way, the PV conforms and is provisioned in the availability zone (or other topology) that's specified by the pod's scheduling constraints. The default storage classes use `volumeBindingMode: WaitForFirstConsumer` class. -Create a file named `sc-azuredisk-csi-waitforfirstconsumer.yaml`, and paste the following manifest. -The storage class is the same as our `managed-csi` storage class but with a different `volumeBindingMode` class. +Create a file named `sc-azuredisk-csi-waitforfirstconsumer.yaml`, and then paste the following manifest. The storage class is the same as our `managed-csi` storage class, but with a different `volumeBindingMode` class. ```yaml kind: StorageClass @@ -96,7 +126,7 @@ reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer ``` -Create the storage class with the [kubectl apply][kubectl-apply] command, and specify your `sc-azuredisk-csi-waitforfirstconsumer.yaml` file: +Create the storage class by running the [kubectl apply][kubectl-apply] command and specify your `sc-azuredisk-csi-waitforfirstconsumer.yaml` file: ```console $ kubectl apply -f sc-azuredisk-csi-waitforfirstconsumer.yaml @@ -108,7 +138,15 @@ storageclass.storage.k8s.io/azuredisk-csi-waitforfirstconsumer created The Azure disk CSI driver supports creating [snapshots of persistent volumes](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html). As part of this capability, the driver can perform either *full* or [*incremental* snapshots](../virtual-machines/disks-incremental-snapshots.md) depending on the value set in the `incremental` parameter (by default, it's true). -For details on all the parameters, see [volume snapshot class parameters](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md#volumesnapshotclass). +The following table provides details for all of the parameters. + +|Name | Meaning | Available Value | Mandatory | Default value +|--- | --- | --- | --- | --- +|resourceGroup | Resource group for storing snapshot shots | EXISTING RESOURCE GROUP | No | If not specified, snapshot will be stored in the same resource group as source Azure disk +|incremental | Take [full or incremental snapshot](../virtual-machines/windows/incremental-snapshots.md) | `true`, `false` | No | `true` +|tags | azure disk [tags](../azure-resource-manager/management/tag-resources.md) | Tag format: 'key1=val1,key2=val2' | No | "" +|userAgent | User agent used for [customer usage attribution](../marketplace/azure-partner-customer-usage-attribution.md) | | No | Generated Useragent formatted `driverName/driverVersion compiler/version (OS-ARCH)` +|subscriptionID | Specify Azure subscription ID in which Azure disk will be created | Azure subscription ID | No | If not empty, `resourceGroup` must be provided, `incremental` must set as `false` ### Create a volume snapshot @@ -199,7 +237,7 @@ persistentvolumeclaim/pvc-azuredisk-cloning created pod/nginx-restored-cloning created ``` -We can now check the content of the cloned volume by running the following command and confirming we still see our `test.txt` created file. +You can verify the content of the cloned volume by running the following command and confirming the file `test.txt` is created. ```console $ kubectl exec nginx-restored-cloning -- ls /mnt/azuredisk @@ -216,7 +254,7 @@ You can request a larger volume for a PVC. Edit the PVC object, and specify a la > [!NOTE] > A new PV is never created to satisfy the claim. Instead, an existing volume is resized. -In AKS, the built-in `managed-csi` storage class already allows for expansion, so use the [PVC created earlier with this storage class](#dynamically-create-azure-disk-pvs-by-using-the-built-in-storage-classes). The PVC requested a 10-Gi persistent volume. We can confirm that by running: +In AKS, the built-in `managed-csi` storage class already supports expansion, so use the [PVC created earlier with this storage class](#dynamically-create-azure-disk-pvs-by-using-the-built-in-storage-classes). The PVC requested a 10-Gi persistent volume. You can confirm by running the following command: ```console $ kubectl exec -it nginx-azuredisk -- df -h /mnt/azuredisk @@ -226,11 +264,11 @@ Filesystem Size Used Avail Use% Mounted on ``` > [!IMPORTANT] -> Currently, Azure disk CSI driver supports resizing PVCs without downtime on specific regions. +> Azure disk CSI driver supports resizing PVCs without downtime in specific regions. > Follow this [link][expand-an-azure-managed-disk] to register the disk online resize feature. > If your cluster is not in the supported region list, you need to delete application first to detach disk on the node before expanding PVC. -Let's expand the PVC by increasing the `spec.resources.requests.storage` field: +Expand the PVC by increasing the `spec.resources.requests.storage` field running the following command: ```console $ kubectl patch pvc pvc-azuredisk --type merge --patch '{"spec": {"resources": {"requests": {"storage": "15Gi"}}}}' @@ -238,7 +276,7 @@ $ kubectl patch pvc pvc-azuredisk --type merge --patch '{"spec": {"resources": { persistentvolumeclaim/pvc-azuredisk patched ``` -Let's confirm the volume is now larger: +Run the following command to confirm the volume size has increased: ```console $ kubectl get pv @@ -248,7 +286,7 @@ pvc-391ea1a6-0191-4022-b915-c8dc4216174a 15Gi RWO Delete (...) ``` -And after a few minutes, confirm the size of the PVC and inside the pod: +And after a few minutes, run the following commands to confirm the size of the PVC and inside the pod: ```console $ kubectl get pvc pvc-azuredisk @@ -260,11 +298,34 @@ Filesystem Size Used Avail Use% Mounted on /dev/sdc 15G 46M 15G 1% /mnt/azuredisk ``` +## On-demand bursting + +On-demand disk bursting model allows disk bursts whenever its needs exceed its current capacity. This model incurs additional charges anytime the disk bursts. On-demand bursting is only available for premium SSDs larger than 512 GiB. For more information on premium SSDs provisioned IOPS and throughput per disk, see [Premium SSD size][az-premium-ssd]. Alternatively, credit-based bursting is where the disk will burst only if it has burst credits accumulated in its credit bucket. Credit-based bursting does not incur additional charges when the disk bursts. Credit-based bursting is only available for premium SSDs 512 GiB and smaller, and standard SSDs 1024 GiB and smaller. For more details on on-demand bursting, see [On-demand bursting][az-on-demand-bursting]. + +> [!IMPORTANT] +> The default `managed-csi-premium` storage class has on-demand bursting disabled and uses credit-based bursting. Any premium SSD dynamically created by a persistent volume claim based on the default `managed-csi-premium` storage class also has on-demand bursting disabled. + +To create a premium SSD persistent volume with [on-demand bursting][az-on-demand-bursting] enabled you can create a new storage class with the [enableBursting][csi-driver-parameters] parameter set to `true` as shown in the following YAML template. For more details on enabling on-demand bursting, see [On-demand bursting][az-on-demand-bursting]. For more details on building your own storage class with on-demand bursting enabled, see [Create a Burstable Managed CSI Premium Storage Class][create-burstable-storage-class]. + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: burstable-managed-csi-premium +provisioner: disk.csi.azure.com +parameters: + skuname: Premium_LRS + enableBursting: "true" +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +``` + ## Windows containers -The Azure disk CSI driver also supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers quickstart][aks-quickstart-cli] to add a Windows node pool. +The Azure disk CSI driver supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers quickstart][aks-quickstart-cli] to add a Windows node pool. -After you have a Windows node pool, you can now use the built-in storage classes like `managed-csi`. You can deploy an example [Windows-based stateful set](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/deploy/example/windows/statefulset.yaml) that saves timestamps into the file `data.txt` by deploying the following command with the [kubectl apply][kubectl-apply] command: +After you have a Windows node pool, you can now use the built-in storage classes like `managed-csi`. You can deploy an example [Windows-based stateful set](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/deploy/example/windows/statefulset.yaml) that saves timestamps into the file `data.txt` by running the following [kubectl apply][kubectl-apply] command: ```console $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/windows/statefulset.yaml @@ -272,7 +333,7 @@ $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-c statefulset.apps/busybox-azuredisk created ``` -You can now validate the contents of the volume by running: +To validate the content of the volume, run the following command: ```console $ kubectl exec -it busybox-azuredisk-0 -- cat c:\\mnt\\azuredisk\\data.txt # on Linux/MacOS Bash @@ -286,7 +347,7 @@ $ kubectl exec -it busybox-azuredisk-0 -- cat c:\mnt\azuredisk\data.txt # on Win ## Next steps -- To learn how to use CSI drivers for Azure Files, see [Use Azure Files with CSI drivers](azure-files-csi.md). +- To learn how to use CSI driver for Azure Files, see [Use Azure Files with CSI driver](azure-files-csi.md). - For more information about storage best practices, see [Best practices for storage and backups in Azure Kubernetes Service][operator-best-practices-storage]. @@ -296,6 +357,8 @@ $ kubectl exec -it busybox-azuredisk-0 -- cat c:\mnt\azuredisk\data.txt # on Win [kubernetes-storage-classes]: https://kubernetes.io/docs/concepts/storage/storage-classes/ [kubernetes-volumes]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ [managed-disk-pricing-performance]: https://azure.microsoft.com/pricing/details/managed-disks/ +[csi-driver-parameters]: https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md +[create-burstable-storage-class]: https://github.com/Azure-Samples/burstable-managed-csi-premium [azure-disk-volume]: azure-disk-volume.md @@ -317,3 +380,6 @@ $ kubectl exec -it busybox-azuredisk-0 -- cat c:\mnt\azuredisk\data.txt # on Win [az-feature-register]: /cli/azure/feature#az_feature_register [az-feature-list]: /cli/azure/feature#az_feature_list [az-provider-register]: /cli/azure/provider#az_provider_register +[az-on-demand-bursting]: ../virtual-machines/disk-bursting.md#on-demand-bursting +[enable-on-demand-bursting]: ../virtual-machines/disks-enable-bursting.md?tabs=azure-cli +[az-premium-ssd]: ../virtual-machines/disks-types.md#premium-ssds \ No newline at end of file diff --git a/articles/aks/azure-disk-volume.md b/articles/aks/azure-disk-volume.md index 6eaa2145c456e..806a56edda711 100644 --- a/articles/aks/azure-disk-volume.md +++ b/articles/aks/azure-disk-volume.md @@ -3,13 +3,13 @@ title: Create a static volume for pods in Azure Kubernetes Service (AKS) description: Learn how to manually create a volume with Azure disks for use with a pod in Azure Kubernetes Service (AKS) services: container-service ms.topic: article -ms.date: 05/09/2019 +ms.date: 05/17/2022 #Customer intent: As a developer, I want to learn how to manually create and attach storage to a specific pod in AKS. --- -# Manually create and use a volume with Azure disks in Azure Kubernetes Service (AKS) +# Create a static volume with Azure disks in Azure Kubernetes Service (AKS) Container-based applications often need to access and persist data in an external data volume. If a single pod needs access to storage, you can use Azure disks to present a native volume for application use. This article shows you how to manually create an Azure disk and attach it to a pod in AKS. @@ -22,129 +22,145 @@ For more information on Kubernetes volumes, see [Storage options for application This article assumes that you have an existing AKS cluster with 1.21 or later version. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. -If you want to interact with Azure Disks on an AKS cluster with 1.20 or previous version, see the [Kubernetes plugin for Azure Disks][kubernetes-disks]. +If you want to interact with Azure disks on an AKS cluster with 1.20 or previous version, see the [Kubernetes plugin for Azure disks][kubernetes-disks]. -## Create an Azure disk - -When you create an Azure disk for use with AKS, you can create the disk resource in the **node** resource group. This approach allows the AKS cluster to access and manage the disk resource. If you instead create the disk in a separate resource group, you must grant the Azure Kubernetes Service (AKS) managed identity for your cluster the `Contributor` role to the disk's resource group. - -For this article, create the disk in the node resource group. First, get the resource group name with the [az aks show][az-aks-show] command and add the `--query nodeResourceGroup` query parameter. The following example gets the node resource group for the AKS cluster name *myAKSCluster* in the resource group name *myResourceGroup*: +## Storage class static provisioning -```azurecli-interactive -$ az aks show --resource-group myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv +The following table describes the Storage Class parameters for the Azure disk CSI driver static provisioning: -MC_myResourceGroup_myAKSCluster_eastus -``` +|Name | Meaning | Available Value | Mandatory | Default value| +|--- | --- | --- | --- | ---| +|volumeHandle| Azure disk URI | `/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}` | Yes | N/A| +|volumeAttributes.fsType | File system type | `ext4`, `ext3`, `ext2`, `xfs`, `btrfs` for Linux, `ntfs` for Windows | No | `ext4` for Linux, `ntfs` for Windows | +|volumeAttributes.partition | Partition number of the existing disk (only supported on Linux) | `1`, `2`, `3` | No | Empty (no partition)
        - Make sure partition format is like `-part1` | +|volumeAttributes.cachingMode | [Disk host cache setting](../virtual-machines/windows/premium-storage-performance.md#disk-caching)| `None`, `ReadOnly`, `ReadWrite` | No | `ReadOnly`| -Now create a disk using the [az disk create][az-disk-create] command. Specify the node resource group name obtained in the previous command, and then a name for the disk resource, such as *myAKSDisk*. The following example creates a *20*GiB disk, and outputs the ID of the disk once created. If you need to create a disk for use with Windows Server containers, add the `--os-type windows` parameter to correctly format the disk. - -```azurecli-interactive -az disk create \ - --resource-group MC_myResourceGroup_myAKSCluster_eastus \ - --name myAKSDisk \ - --size-gb 20 \ - --query id --output tsv -``` +## Create an Azure disk -> [!NOTE] -> Azure disks are billed by SKU for a specific size. These SKUs range from 32GiB for S4 or P4 disks to 32TiB for S80 or P80 disks (in preview). The throughput and IOPS performance of a Premium managed disk depends on both the SKU and the instance size of the nodes in the AKS cluster. See [Pricing and Performance of Managed Disks][managed-disk-pricing-performance]. - -The disk resource ID is displayed once the command has successfully completed, as shown in the following example output. This disk ID is used to mount the disk in the next step. - -```console -/subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk -``` - -## Mount disk as volume -Create a *pv-azuredisk.yaml* file with a *PersistentVolume*. Update `volumeHandle` with disk resource ID. For example: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pv-azuredisk -spec: - capacity: - storage: 20Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - storageClassName: managed-csi - csi: - driver: disk.csi.azure.com - readOnly: false - volumeHandle: /subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk - volumeAttributes: - fsType: ext4 -``` - -Create a *pvc-azuredisk.yaml* file with a *PersistentVolumeClaim* that uses the *PersistentVolume*. For example: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: pvc-azuredisk -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - volumeName: pv-azuredisk - storageClassName: managed-csi -``` - -Use the `kubectl` commands to create the *PersistentVolume* and *PersistentVolumeClaim*. - -```console -kubectl apply -f pv-azuredisk.yaml -kubectl apply -f pvc-azuredisk.yaml -``` - -Verify your *PersistentVolumeClaim* is created and bound to the *PersistentVolume*. - -```console -$ kubectl get pvc pvc-azuredisk - -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -pvc-azuredisk Bound pv-azuredisk 20Gi RWO 5s -``` - -Create a *azure-disk-pod.yaml* file to reference your *PersistentVolumeClaim*. For example: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - image: mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine - name: mypod - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: azure - mountPath: /mnt/azure - volumes: - - name: azure - persistentVolumeClaim: - claimName: pvc-azuredisk -``` - -```console -kubectl apply -f azure-disk-pod.yaml -``` +When you create an Azure disk for use with AKS, you can create the disk resource in the **node** resource group. This approach allows the AKS cluster to access and manage the disk resource. If instead you created the disk in a separate resource group, you must grant the Azure Kubernetes Service (AKS) managed identity for your cluster the `Contributor` role to the disk's resource group. In this exercise, you're going to create the disk in the same resource group as your cluster. + +1. Identify the resource group name using the [az aks show][az-aks-show] command and add the `--query nodeResourceGroup` parameter. The following example gets the node resource group for the AKS cluster name *myAKSCluster* in the resource group name *myResourceGroup*: + + ```azurecli-interactive + $ az aks show --resource-group myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv + + MC_myResourceGroup_myAKSCluster_eastus + ``` + +2. Create a disk using the [az disk create][az-disk-create] command. Specify the node resource group name obtained in the previous command, and then a name for the disk resource, such as *myAKSDisk*. The following example creates a *20*GiB disk, and outputs the ID of the disk after it's created. If you need to create a disk for use with Windows Server containers, add the `--os-type windows` parameter to correctly format the disk. + + ```azurecli-interactive + az disk create \ + --resource-group MC_myResourceGroup_myAKSCluster_eastus \ + --name myAKSDisk \ + --size-gb 20 \ + --query id --output tsv + ``` + + > [!NOTE] + > Azure disks are billed by SKU for a specific size. These SKUs range from 32GiB for S4 or P4 disks to 32TiB for S80 or P80 disks (in preview). The throughput and IOPS performance of a Premium managed disk depends on both the SKU and the instance size of the nodes in the AKS cluster. See [Pricing and Performance of Managed Disks][managed-disk-pricing-performance]. + + The disk resource ID is displayed once the command has successfully completed, as shown in the following example output. This disk ID is used to mount the disk in the next section. + + ```console + /subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk + ``` + +## Mount disk as a volume + +1. Create a *pv-azuredisk.yaml* file with a *PersistentVolume*. Update `volumeHandle` with disk resource ID from the previous step. For example: + + ```yaml + apiVersion: v1 + kind: PersistentVolume + metadata: + name: pv-azuredisk + spec: + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: managed-csi + csi: + driver: disk.csi.azure.com + readOnly: false + volumeHandle: /subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk + volumeAttributes: + fsType: ext4 + ``` + +2. Create a *pvc-azuredisk.yaml* file with a *PersistentVolumeClaim* that uses the *PersistentVolume*. For example: + + ```yaml + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: pvc-azuredisk + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeName: pv-azuredisk + storageClassName: managed-csi + ``` + +3. Use the `kubectl` commands to create the *PersistentVolume* and *PersistentVolumeClaim*, referencing the two YAML files created earlier: + + ```console + kubectl apply -f pv-azuredisk.yaml + kubectl apply -f pvc-azuredisk.yaml + ``` + +4. To verify your *PersistentVolumeClaim* is created and bound to the *PersistentVolume*, run the +following command: + + ```console + $ kubectl get pvc pvc-azuredisk + + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + pvc-azuredisk Bound pv-azuredisk 20Gi RWO 5s + ``` + +5. Create a *azure-disk-pod.yaml* file to reference your *PersistentVolumeClaim*. For example: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: mypod + spec: + containers: + - image: mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine + name: mypod + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + volumeMounts: + - name: azure + mountPath: /mnt/azure + volumes: + - name: azure + persistentVolumeClaim: + claimName: pvc-azuredisk + ``` + +6. Run the following command to apply the configuration and mount the volume, referencing the YAML +configuration file created in the previous steps: + + ```console + kubectl apply -f azure-disk-pod.yaml + ``` ## Next steps -For associated best practices, see [Best practices for storage and backups in AKS][operator-best-practices-storage]. +To learn about our recommended storage and backup practices, see [Best practices for storage and backups in AKS][operator-best-practices-storage]. [kubernetes-disks]: https://github.com/kubernetes/examples/blob/master/staging/volumes/azure_disk/README.md diff --git a/articles/aks/cluster-container-registry-integration.md b/articles/aks/cluster-container-registry-integration.md index 9e0ef2226baf9..3b46b82bf41f7 100644 --- a/articles/aks/cluster-container-registry-integration.md +++ b/articles/aks/cluster-container-registry-integration.md @@ -5,7 +5,7 @@ services: container-service manager: gwallace ms.topic: article ms.date: 06/10/2021 -ms.custom: devx-track-azurepowershell, devx-track-azurecli +ms.tool: azure-cli, azure-powershell ms.devlang: azurecli --- diff --git a/articles/aks/configure-azure-cni.md b/articles/aks/configure-azure-cni.md index 76a851450e9e4..680f4861aa38e 100644 --- a/articles/aks/configure-azure-cni.md +++ b/articles/aks/configure-azure-cni.md @@ -59,7 +59,7 @@ The maximum number of pods per node in an AKS cluster is 250. The *default* maxi | -- | :--: | :--: | -- | | Azure CLI | 110 | 30 | Yes (up to 250) | | Resource Manager template | 110 | 30 | Yes (up to 250) | -| Portal | 110 | 110 (configured in the Node Pools tab) | No | +| Portal | 110 | 110 (configurable in the Node Pools tab) | Yes (up to 250) | ### Configure maximum - new clusters @@ -72,7 +72,7 @@ A minimum value for maximum pods per node is enforced to guarantee space for sys | Networking | Minimum | Maximum | | -- | :--: | :--: | | Azure CNI | 10 | 250 | -| Kubenet | 10 | 110 | +| Kubenet | 10 | 250 | > [!NOTE] > The minimum value in the table above is strictly enforced by the AKS service. You can not set a maxPods value lower than the minimum shown as doing so can prevent the cluster from starting. @@ -159,6 +159,9 @@ A drawback with the traditional CNI is the exhaustion of pod IP addresses as the ### Additional prerequisites +> [!NOTE] +> When using dynamic allocation of IPs, exposing an application as a Private Link Service using a Kubernetes Load Balancer Service is not supported. + The [prerequisites][prerequisites] already listed for Azure CNI still apply, but there are a few additional limitations: * Only linux node clusters and node pools are supported. @@ -175,10 +178,10 @@ The planning of IPs for Kubernetes services and Docker bridge remain unchanged. The pods per node values when using Azure CNI with dynamic allocation of IPs have changed slightly from the traditional CNI behavior: -|CNI|Deployment Method|Default|Configurable at deployment| -|--|--| :--: |--| -|Traditional Azure CNI|Azure CLI|30|Yes (up to 250)| -|Azure CNI with dynamic allocation of IPs|Azure CLI|250|Yes (up to 250)| +|CNI|Default|Configurable at deployment| +|--| :--: |--| +|Traditional Azure CNI|30|Yes (up to 250)| +|Azure CNI with dynamic allocation of IPs|250|Yes (up to 250)| All other guidance related to configuring the maximum nodes per pod remains the same. @@ -285,12 +288,6 @@ The following questions and answers apply to the **Azure CNI network configurati The entire cluster should use only one type of CNI. -## AKS Engine - -[Azure Kubernetes Service Engine (AKS Engine)][aks-engine] is an open-source project that generates Azure Resource Manager templates you can use for deploying Kubernetes clusters on Azure. - -Kubernetes clusters created with AKS Engine support both the [kubenet][kubenet] and [Azure CNI][cni-networking] plugins. As such, both networking scenarios are supported by AKS Engine. - ## Next steps Learn more about networking in AKS in the following articles: @@ -308,7 +305,6 @@ Learn more about networking in AKS in the following articles: [portal-01-networking-advanced]: ./media/networking-overview/portal-01-networking-advanced.png -[aks-engine]: https://github.com/Azure/aks-engine [services]: https://kubernetes.io/docs/concepts/services-networking/service/ [portal]: https://portal.azure.com [cni-networking]: https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md diff --git a/articles/aks/configure-kubenet.md b/articles/aks/configure-kubenet.md index 1f9af23f1d983..fb6178de42eed 100644 --- a/articles/aks/configure-kubenet.md +++ b/articles/aks/configure-kubenet.md @@ -3,7 +3,7 @@ title: Configure kubenet networking in Azure Kubernetes Service (AKS) description: Learn how to configure kubenet (basic) network in Azure Kubernetes Service (AKS) to deploy an AKS cluster into an existing virtual network and subnet. services: container-service ms.topic: article -ms.date: 06/02/2020 +ms.date: 06/02/2022 ms.reviewer: nieberts, jomore --- @@ -21,7 +21,7 @@ This article shows you how to use *kubenet* networking to create and use a virtu * The virtual network for the AKS cluster must allow outbound internet connectivity. * Don't create more than one AKS cluster in the same subnet. * AKS clusters may not use `169.254.0.0/16`, `172.30.0.0/16`, `172.31.0.0/16`, or `192.0.2.0/24` for the Kubernetes service address range, pod address range or cluster virtual network address range. -* The cluster identity used by the AKS cluster must have at least [Network Contributor](../role-based-access-control/built-in-roles.md#network-contributor) role on the subnet within your virtual network. You must also have the appropriate permissions, such as the subscription owner, to create a cluster identity and assign it permissions. If you wish to define a [custom role](../role-based-access-control/custom-roles.md) instead of using the built-in Network Contributor role, the following permissions are required: +* The cluster identity used by the AKS cluster must have at least [Network Contributor](../role-based-access-control/built-in-roles.md#network-contributor) role on the subnet within your virtual network. CLI helps do the role assignment automatically. If you are using ARM template or other clients, the role assignment needs to be done manually. You must also have the appropriate permissions, such as the subscription owner, to create a cluster identity and assign it permissions. If you wish to define a [custom role](../role-based-access-control/custom-roles.md) instead of using the built-in Network Contributor role, the following permissions are required: * `Microsoft.Network/virtualNetworks/subnets/join/action` * `Microsoft.Network/virtualNetworks/subnets/read` @@ -138,6 +138,9 @@ The following example output shows the application ID and password for your serv To assign the correct delegations in the remaining steps, use the [az network vnet show][az-network-vnet-show] and [az network vnet subnet show][az-network-vnet-subnet-show] commands to get the required resource IDs. These resource IDs are stored as variables and referenced in the remaining steps: +> [!NOTE] +> If you are using CLI, you can skip this step. With ARM template or other clients, you need to do the below role assignment. + ```azurecli-interactive VNET_ID=$(az network vnet show --resource-group myResourceGroup --name myAKSVnet --query id -o tsv) SUBNET_ID=$(az network vnet subnet show --resource-group myResourceGroup --vnet-name myAKSVnet --name myAKSSubnet --query id -o tsv) @@ -266,4 +269,4 @@ With an AKS cluster deployed into your existing virtual network subnet, you can [express-route]: ../expressroute/expressroute-introduction.md [network-comparisons]: concepts-network.md#compare-network-models [custom-route-table]: ../virtual-network/manage-route-table.md -[user-assigned managed identity]: use-managed-identity.md#bring-your-own-control-plane-mi +[user-assigned managed identity]: use-managed-identity.md#bring-your-own-control-plane-managed-identity diff --git a/articles/aks/control-kubeconfig-access.md b/articles/aks/control-kubeconfig-access.md index 119958571cb7f..890301f35b4a5 100644 --- a/articles/aks/control-kubeconfig-access.md +++ b/articles/aks/control-kubeconfig-access.md @@ -156,7 +156,7 @@ For enhanced security on access to AKS clusters, [integrate Azure Active Directo [aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md [aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md -[aks-quickstart-powershell]: /learn/quick-kubernetes-deploy-powershell.md +[aks-quickstart-powershell]: /azure/aks/learn/quick-kubernetes-deploy-powershell [azure-cli-install]: /cli/azure/install-azure-cli [az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials [azure-rbac]: ../role-based-access-control/overview.md diff --git a/articles/aks/csi-secrets-store-driver.md b/articles/aks/csi-secrets-store-driver.md index 40e720b944875..875219151f17a 100644 --- a/articles/aks/csi-secrets-store-driver.md +++ b/articles/aks/csi-secrets-store-driver.md @@ -164,14 +164,14 @@ az aks disable-addons --addons azure-keyvault-secrets-provider -g myResourceGrou > When the Azure Key Vault Provider for Secrets Store CSI Driver is enabled, it updates the pod mount and the Kubernetes secret that's defined in the `secretObjects` field of `SecretProviderClass`. It does so by polling for changes periodically, based on the rotation poll interval you've defined. The default rotation poll interval is 2 minutes. >[!NOTE] -> When the secret/key is updated in external secrets store after the initial pod deployment, the updated secret will be periodically updated in the pod mount and the Kubernetes Secret. +> When a secret is updated in an external secrets store after initial pod deployment, the Kubernetes Secret and the pod mount will be periodically updated depending on how the application consumes the secret data. > -> Depending on how the application consumes the secret data: +> **Mount the Kubernetes Secret as a volume**: Use the auto rotation and Sync K8s secrets features of Secrets Store CSI Driver. The application will need to watch for changes from the mounted Kubernetes Secret volume. When the Kubernetes Secret is updated by the CSI Driver, the corresponding volume contents are automatically updated. > -> 1. Mount Kubernetes secret as a volume: Use auto rotation feature + Sync K8s secrets feature in Secrets Store CSI Driver, application will need to watch for changes from the mounted Kubernetes Secret volume. When the Kubernetes Secret is updated by the CSI Driver, the corresponding volume contents are automatically updated. -> 2. Application reads the data from container’s filesystem: Use rotation feature in Secrets Store CSI Driver, application will need to watch for the file change from the volume mounted by the CSI driver. -> 3. Using Kubernetes secret for environment variable: The pod needs to be restarted to get the latest secret as environment variable. -> Use something like https://github.com/stakater/Reloader to watch for changes on the synced Kubernetes secret and do rolling upgrades on pods +> **Application reads the data from the container’s filesystem**: Use the rotation feature of Secrets Store CSI Driver. The application will need to watch for the file change from the volume mounted by the CSI driver. +> +> **Use the Kubernetes Secret for an environment variable**: Restart the pod to get the latest secret as an environment variable. +> Use a tool such as [Reloader][reloader] to watch for changes on the synced Kubernetes Secret and perform rolling upgrades on pods. To enable autorotation of secrets, use the `enable-secret-rotation` flag when you create your cluster: @@ -332,3 +332,5 @@ Now that you've learned how to use the Azure Key Vault Provider for Secrets Stor [kube-csi]: https://kubernetes-csi.github.io/docs/ [key-vault-provider-install]: https://azure.github.io/secrets-store-csi-driver-provider-azure/getting-started/installation [sample-secret-provider-class]: https://azure.github.io/secrets-store-csi-driver-provider-azure/getting-started/usage/#create-your-own-secretproviderclass-object +[reloader]: https://github.com/stakater/Reloader + diff --git a/articles/aks/csi-secrets-store-nginx-tls.md b/articles/aks/csi-secrets-store-nginx-tls.md index 4c652d9fd4c37..3ad97c8fa3426 100644 --- a/articles/aks/csi-secrets-store-nginx-tls.md +++ b/articles/aks/csi-secrets-store-nginx-tls.md @@ -5,7 +5,7 @@ author: nickomang ms.author: nickoman ms.service: container-service ms.topic: how-to -ms.date: 10/19/2021 +ms.date: 05/26/2022 ms.custom: template-how-to --- @@ -15,8 +15,8 @@ This article walks you through the process of securing an NGINX Ingress Controll Importing the ingress TLS certificate to the cluster can be accomplished using one of two methods: -- **Application** - The application deployment manifest declares and mounts the provider volume. Only when the application is deployed is the certificate made available in the cluster, and when the application is removed the secret is removed as well. This scenario fits development teams who are responsible for the application’s security infrastructure and their integration with the cluster. -- **Ingress Controller** - The ingress deployment is modified to declare and mount the provider volume. The secret is imported when ingress pods are created. The application’s pods have no access to the TLS certificate. This scenario fits scenarios where one team (i.e. IT) manages and provisions infrastructure and networking components (including HTTPS TLS certificates) and other teams manage application lifecycle. In this case, ingress is specific to a single namespace/workload and is deployed in the same namespace as the application. +- **Application** - The application deployment manifest declares and mounts the provider volume. Only when the application is deployed, is the certificate made available in the cluster, and when the application is removed the secret is removed as well. This scenario fits development teams who are responsible for the application’s security infrastructure and their integration with the cluster. +- **Ingress Controller** - The ingress deployment is modified to declare and mount the provider volume. The secret is imported when ingress pods are created. The application’s pods have no access to the TLS certificate. This scenario fits scenarios where one team (for example, IT) manages and creates infrastructure and networking components (including HTTPS TLS certificates) and other teams manage application lifecycle. In this case, ingress is specific to a single namespace/workload and is deployed in the same namespace as the application. ## Prerequisites @@ -28,18 +28,18 @@ Importing the ingress TLS certificate to the cluster can be accomplished using o ## Generate a TLS certificate ```bash -export CERT_NAME=ingresscert +export CERT_NAME=aks-ingress-cert openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ - -out ingress-tls.crt \ - -keyout ingress-tls.key \ - -subj "/CN=demo.test.com/O=ingress-tls" + -out aks-ingress-tls.crt \ + -keyout aks-ingress-tls.key \ + -subj "/CN=demo.azure.com/O=aks-ingress-tls" ``` ### Import the certificate to AKV ```bash export AKV_NAME="[YOUR AKV NAME]" -openssl pkcs12 -export -in ingress-tls.crt -inkey ingress-tls.key -out $CERT_NAME.pfx +openssl pkcs12 -export -in aks-ingress-tls.crt -inkey aks-ingress-tls.key -out $CERT_NAME.pfx # skip Password prompt ``` @@ -52,11 +52,11 @@ az keyvault certificate import --vault-name $AKV_NAME -n $CERT_NAME -f $CERT_NAM First, create a new namespace: ```bash -export NAMESPACE=ingress-test +export NAMESPACE=ingress-basic ``` ```azurecli-interactive -kubectl create ns $NAMESPACE +kubectl create namespace $NAMESPACE ``` Select a [method to provide an access identity][csi-ss-identity-access] and configure your SecretProviderClass YAML accordingly. Additionally: @@ -64,7 +64,7 @@ Select a [method to provide an access identity][csi-ss-identity-access] and conf - Be sure to use `objectType=secret`, which is the only way to obtain the private key and the certificate from AKV. - Set `kubernetes.io/tls` as the `type` in your `secretObjects` section. -See the following for an example of what your SecretProviderClass might look like: +See the following example of what your SecretProviderClass might look like: ```yml apiVersion: secrets-store.csi.x-k8s.io/v1 @@ -83,6 +83,8 @@ spec: key: tls.crt parameters: usePodIdentity: "false" + useVMManagedIdentity: "true" + userAssignedIdentityID: keyvaultName: $AKV_NAME # the name of the AKV instance objects: | array: @@ -119,9 +121,9 @@ The application’s deployment will reference the Secrets Store CSI Driver's Azu helm install ingress-nginx/ingress-nginx --generate-name \ --namespace $NAMESPACE \ --set controller.replicaCount=2 \ - --set controller.nodeSelector."beta\.kubernetes\.io/os"=linux \ + --set controller.nodeSelector."kubernetes\.io/os"=linux \ --set controller.service.annotations."service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path"=/healthz \ - --set defaultBackend.nodeSelector."beta\.kubernetes\.io/os"=linux + --set defaultBackend.nodeSelector."kubernetes\.io/os"=linux ``` #### Bind certificate to ingress controller @@ -135,8 +137,8 @@ The ingress controller’s deployment will reference the Secrets Store CSI Drive helm install ingress-nginx/ingress-nginx --generate-name \ --namespace $NAMESPACE \ --set controller.replicaCount=2 \ - --set controller.nodeSelector."beta\.kubernetes\.io/os"=linux \ - --set defaultBackend.nodeSelector."beta\.kubernetes\.io/os"=linux \ + --set controller.nodeSelector."kubernetes\.io/os"=linux \ + --set defaultBackend.nodeSelector."kubernetes\.io/os"=linux \ --set controller.service.annotations."service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path"=/healthz \ --set controller.podLabels.aadpodidbinding=$AAD_POD_IDENTITY_NAME \ -f - < 80/TCP 19m ``` @@ -338,14 +438,46 @@ nginx-ingress-1588032400-default-backend ClusterIP 10.0.223.214 Use `curl` to verify your ingress has been properly configured with TLS. Be sure to use the external IP you've obtained from the previous step: ```bash -curl -v -k --resolve demo.test.com:443:52.xx.xx.xx https://demo.test.com +curl -v -k --resolve demo.azure.com:443:EXTERNAL_IP https://demo.azure.com +``` -# You should see output similar to the following -* subject: CN=demo.test.com; O=ingress-tls -* start date: Oct 15 04:23:46 2021 GMT -* expire date: Oct 15 04:23:46 2022 GMT -* issuer: CN=demo.test.com; O=ingress-tls +No additional path was provided with the address, so the ingress controller defaults to the */* route. The first demo application is returned, as shown in the following condensed example output: + +```console +[...] + + + + + Welcome to Azure Kubernetes Service (AKS) +[...] +``` + +The *-v* parameter in our `curl` command outputs verbose information, including the TLS certificate received. Half-way through your curl output, you can verify that your own TLS certificate was used. The *-k* parameter continues loading the page even though we're using a self-signed certificate. The following example shows that the *issuer: CN=demo.azure.com; O=aks-ingress-tls* certificate was used: + +``` +[...] +* Server certificate: +* subject: CN=demo.azure.com; O=aks-ingress-tls +* start date: Oct 22 22:13:54 2021 GMT +* expire date: Oct 22 22:13:54 2022 GMT +* issuer: CN=demo.azure.com; O=aks-ingress-tls * SSL certificate verify result: self signed certificate (18), continuing anyway. +[...] +``` + +Now add */hello-world-two* path to the address, such as `https://demo.azure.com/hello-world-two`. The second demo application with the custom title is returned, as shown in the following condensed example output: + +``` +curl -v -k --resolve demo.azure.com:443:EXTERNAL_IP https://demo.azure.com/hello-world-two + +[...] + + + + + AKS Ingress Demo +[...] ``` diff --git a/articles/aks/csi-storage-drivers.md b/articles/aks/csi-storage-drivers.md index dbac1b4b58415..5a011bd9d4313 100644 --- a/articles/aks/csi-storage-drivers.md +++ b/articles/aks/csi-storage-drivers.md @@ -1,14 +1,14 @@ --- -title: Enable Container Storage Interface (CSI) drivers on Azure Kubernetes Service (AKS) +title: Container Storage Interface (CSI) drivers in Azure Kubernetes Service (AKS) description: Learn how to enable the Container Storage Interface (CSI) drivers for Azure disks and Azure Files in an Azure Kubernetes Service (AKS) cluster. services: container-service ms.topic: article -ms.date: 05/06/2022 +ms.date: 05/23/2022 author: palma21 --- -# Enable Container Storage Interface (CSI) drivers on Azure Kubernetes Service (AKS) +# Container Storage Interface (CSI) drivers in Azure Kubernetes Service (AKS) The Container Storage Interface (CSI) is a standard for exposing arbitrary block and file storage systems to containerized workloads on Kubernetes. By adopting and using CSI, Azure Kubernetes Service (AKS) can write, deploy, and iterate plug-ins to expose new or improve existing storage systems in Kubernetes without having to touch the core Kubernetes code and wait for its release cycles. @@ -22,6 +22,9 @@ The CSI storage driver support on AKS allows you to natively use: > > *In-tree drivers* refers to the current storage drivers that are part of the core Kubernetes code opposed to the new CSI drivers, which are plug-ins. +> [!NOTE] +> Azure disk CSI driver v2 (preview) improves scalability and reduces pod failover latency. It uses shared disks to provision attachment replicas on multiple cluster nodes and integrates with the pod scheduler to ensure a node with an attachment replica is chosen on pod failover. Azure disk CSI driver v2 (preview) also provides the ability to fine tune performance. If you're interested in participating in the preview, submit a request: [https://aka.ms/DiskCSIv2Preview](https://aka.ms/DiskCSIv2Preview). This preview version is provided without a service level agreement, and you can occasionally expect breaking changes while in preview. The preview version isn't recommended for production workloads. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + ## Migrate custom in-tree storage classes to CSI If you created in-tree driver storage classes, those storage classes continue to work since CSI migration is turned on after upgrading your cluster to 1.21.x. If you want to use CSI features you'll need to perform the migration. @@ -61,7 +64,7 @@ parameters: ## Migrate in-tree persistent volumes > [!IMPORTANT] -> If your in-tree persistent volume `reclaimPolicy` is set to **Delete**, you need to change its policy to **Retain** to persist your data. This can be achieved using a [patch operation on the PV](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). For example: +> If your in-tree persistent volume `reclaimPolicy` is set to **Delete**, you need to change its policy to **Retain** to persist your data. This can be achieved using a [patch operation on the PV](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). For example: > > ```console > $ kubectl patch pv pv-azuredisk --type merge --patch '{"spec": {"persistentVolumeReclaimPolicy": "Retain"}}' @@ -93,7 +96,7 @@ If you have in-tree Azure File persistent volumes, get `secretName`, `shareName` [azure-disk-volume]: azure-disk-volume.md -[azure-disk-static-mount]: azure-disk-volume.md#mount-disk-as-volume +[azure-disk-static-mount]: azure-disk-volume.md#mount-disk-as-a-volume [azure-file-static-mount]: azure-files-volume.md#mount-file-share-as-a-persistent-volume [azure-files-pvc]: azure-files-dynamic-pv.md [premium-storage]: ../virtual-machines/disks-types.md diff --git a/articles/aks/custom-certificate-authority.md b/articles/aks/custom-certificate-authority.md new file mode 100644 index 0000000000000..4387a75b15813 --- /dev/null +++ b/articles/aks/custom-certificate-authority.md @@ -0,0 +1,140 @@ +--- +title: Custom certificate authority (CA) in Azure Kubernetes Service (AKS) (preview) +description: Learn how to use a custom certificate authority (CA) in an Azure Kubernetes Service (AKS) cluster. +services: container-service +author: erik-ha-msft +ms.author: erikha +ms.topic: article +ms.date: 4/12/2022 +--- + +# Custom certificate authority (CA) in Azure Kubernetes Service (AKS) (preview) + +Custom certificate authorities (CAs) allow you to establish trust between your Azure Kubernetes Service (AKS) cluster and your workloads, such as private registries, proxies, and firewalls. A Kubernetes secret is used to store the certificate authority's information, then it's passed to all nodes in the cluster. + +This feature is applied per nodepool, so new and existing nodepools must be configured to enable this feature. + +[!INCLUDE [preview features note](./includes/preview/preview-callout.md)] + +## Prerequisites + +* An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). +* [Azure CLI installed][azure-cli-install]. +* A base64 encoded certificate string. + +### Limitations + +This feature isn't currently supported for Windows nodepools. + +### Install the `aks-preview` extension + +You also need the *aks-preview* Azure CLI extensions version 0.5.72 or later. Install the *aks-preview* extension by using the [az extension add][az-extension-add] command, or install any available updates by using the [az extension update][az-extension-update] command. + +```azurecli +# Install the aks-preview extension +az extension add --name aks-preview + +# Update the extension to make sure you have the latest version installed +az extension update --name aks-preview +``` + +### Register the `CustomCATrustPreview` preview feature + +Register the `CustomCATrustPreview` feature flag by using the [az feature register][az-feature-register] command: + +```azurecli +az feature register --namespace "Microsoft.ContainerService" --name "CustomCATrustPreview" +``` + +It takes a few minutes for the status to show *Registered*. Verify the registration status by using the [az feature list][az-feature-list] command: + +```azurecli +az feature list --query "[?contains(name, 'Microsoft.ContainerService/CustomCATrustPreview')].{Name:name,State:properties.state}" -o table +``` + +Refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: + +```azurecli +az provider register --namespace Microsoft.ContainerService +``` + +## Configure a new AKS cluster to use a custom CA + +To configure a new AKS cluster to use a custom CA, run the [az aks create][az-aks-create] command with the `--enable-custom-ca-trust` parameter. + +```azurecli +az aks create \ + --resource-group myResourceGroup \ + --name myAKSCluster \ + --node-count 2 \ + --enable-custom-ca-trust +``` + +## Configure a new nodepool to use a custom CA + +To configure a new nodepool to use a custom CA, run the [az aks nodepool add][az-aks-nodepool-add] command with the `--enable-custom-ca-trust` parameter. + +```azurecli +az aks nodepool add \ + --cluster-name myAKSCluster \ + --resource-group myResourceGroup \ + --name myNodepool \ + --enable-custom-ca-trust \ + --os-type Linux +``` + +## Configure an existing nodepool to use a custom CA + +To configure an existing nodepool to use a custom CA, run the [az aks nodepool update][az-aks-nodepool-update] command with the `--enable-custom-trust-ca` parameter. + +```azurecli +az aks nodepool update \ + --resource-group myResourceGroup \ + --cluster-name myAKSCluster \ + --name myNodepool \ + --enable-custom-ca-trust +``` + +## Create a Kubernetes secret with your CA information + +Create a [Kubernetes secret][kubernetes-secrets] YAML manifest with your base64 encoded certificate string in the `data` field. Data from this secret is used to update CAs on all nodes. + +You must ensure that: +* The secret is named `custom-ca-trust-secret`. +* The secret is created in the `kube-system` namespace. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: custom-ca-trust-secret + namespace: kube-system +type: Opaque +data: + ca1.crt: | + {base64EncodedCertStringHere} + ca2.crt: | + {anotherBase64EncodedCertStringHere} +``` + +To update or remove a CA, edit and apply the YAML manifest. The cluster will poll for changes and update the nodes accordingly. This process may take a couple of minutes before changes are applied. + +## Next steps + +For more information on AKS security best practices, see [Best practices for cluster security and upgrades in Azure Kubernetes Service (AKS)][aks-best-practices-security-upgrades]. + + +[kubernetes-secrets]:https://kubernetes.io/docs/concepts/configuration/secret/ + + +[aks-best-practices-security-upgrades]: operator-best-practices-cluster-security.md +[azure-cli-install]: /cli/azure/install-azure-cli +[az-aks-create]: /cli/azure/aks#az-aks-create +[az-aks-update]: /cli/azure/aks#az-aks-update +[az-aks-nodepool-add]: /cli/azure/aks#az-aks-nodepool-add +[az-aks-nodepool-update]: /cli/azure/aks#az-aks-update +[az-extension-add]: /cli/azure/extension#az-extension-add +[az-extension-update]: /cli/azure/extension#az-extension-update +[az-feature-list]: /cli/azure/feature#az-feature-list +[az-feature-register]: /cli/azure/feature#az-feature-register +[az-provider-register]: /cli/azure/provider#az-provider-register diff --git a/articles/aks/dapr.md b/articles/aks/dapr.md index 3b349f0ddfadd..b5adbe083172f 100644 --- a/articles/aks/dapr.md +++ b/articles/aks/dapr.md @@ -59,7 +59,7 @@ Global Azure cloud is supported with Arc support on the regions listed by [Azure ## Prerequisites - If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -- Install the latest version of the [Azure CLI](/cli/azure/install-azure-cli-windows). +- Install the latest version of the [Azure CLI][install-cli]. - If you don't have one already, you need to create an [AKS cluster][deploy-cluster] or connect an [Arc-enabled Kubernetes cluster][arc-k8s-cluster]. ### Set up the Azure CLI extension for cluster extensions @@ -262,8 +262,9 @@ az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSClu [az-provider-register]: /cli/azure/provider#az-provider-register [sample-application]: ./quickstart-dapr.md [k8s-version-support-policy]: ./supported-kubernetes-versions.md?tabs=azure-cli#kubernetes-version-support-policy -[arc-k8s-cluster]: /azure-arc/kubernetes/quickstart-connect-cluster.md +[arc-k8s-cluster]: /azure/azure-arc/kubernetes/quickstart-connect-cluster [update-extension]: ./cluster-extensions.md#update-extension-instance +[install-cli]: /cli/azure/install-azure-cli [kubernetes-production]: https://docs.dapr.io/operations/hosting/kubernetes/kubernetes-production diff --git a/articles/aks/deployment-center-launcher.md b/articles/aks/deployment-center-launcher.md index 4bb2281ca7d33..291b97a536e7b 100644 --- a/articles/aks/deployment-center-launcher.md +++ b/articles/aks/deployment-center-launcher.md @@ -1,10 +1,10 @@ --- title: Deployment Center for Azure Kubernetes description: Deployment Center in Azure DevOps simplifies setting up a robust Azure DevOps pipeline for your application -ms.author: puagarw +ms.author: rayoflores ms.topic: tutorial ms.date: 07/12/2019 -author: pulkitaggarwl +author: rayoef --- # Deployment Center for Azure Kubernetes diff --git a/articles/aks/devops-pipeline.md b/articles/aks/devops-pipeline.md index 8a25a490f820a..68c0f58fbbea1 100644 --- a/articles/aks/devops-pipeline.md +++ b/articles/aks/devops-pipeline.md @@ -124,7 +124,7 @@ After the pipeline run is finished, explore what happened and then go see your a 1. Select **View environment**. -1. Select the instance if your app for the namespace you deployed to. If you stuck to the defaults we mentioned above, then it will be the **myapp** app in the **default** namespace. +1. Select the instance of your app for the namespace you deployed to. If you stuck to the defaults we mentioned above, then it will be the **myapp** app in the **default** namespace. 1. Select the **Services** tab. diff --git a/articles/aks/draft.md b/articles/aks/draft.md index dd0bc5c98eb9f..940d548b8081d 100644 --- a/articles/aks/draft.md +++ b/articles/aks/draft.md @@ -26,32 +26,18 @@ Draft has the following commands to help ease your development on Kubernetes: - If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - Install the latest version of the [Azure CLI](/cli/azure/install-azure-cli-windows) and the *aks-preview* extension. -- If you don't have one already, you need to create an [AKS cluster][deploy-cluster]. +- If you don't have one already, you need to create an [AKS cluster][deploy-cluster] and an Azure Container Registry instance. -### Install the `AKS-Draft` extension preview +### Install the `aks-preview` Azure CLI extension [!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] -To create an AKS cluster that can use the Draft extension, you must enable the `AKS-ExtensionManager` and `AKS-Draft` feature flags on your subscription. - -Register the `AKS-ExtensionManager` and `AKS-Draft` feature flags by using the [az feature register][az-feature-register] command, as shown in the following example: - ```azurecli-interactive -az extension add --name draft -``` - -### Set up the Azure CLI extension for cluster extensions - -You'll also need the `k8s-extension` Azure CLI extension, which can be installed by running the following command: - -```azurecli-interactive -az extension add --name k8s-extension -``` +# Install the aks-preview extension +az extension add --name aks-preview -If the `k8s-extension` extension is already installed, you can update it to the latest version using the following command: - -```azurecli-interactive -az extension update --name k8s-extension +# Update the extension to make sure you have the latest version installed +az extension update --name aks-preview ``` ## Create artifacts using `draft create` @@ -120,14 +106,6 @@ You can also run the command on a specific directory using the `--destination` f az aks draft update --destination /Workspaces/ContosoAir ``` -## Delete the extension - -To delete the extension and remove Draft from your AKS cluster, you can use the following command: - -```azure-cli-interactive -az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSCluster --cluster-type managedClusters --name draft -``` - [deploy-cluster]: ./tutorial-kubernetes-deploy-cluster.md [az-feature-register]: /cli/azure/feature#az-feature-register @@ -136,3 +114,5 @@ az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSClu [sample-application]: ./quickstart-dapr.md [k8s-version-support-policy]: ./supported-kubernetes-versions.md?tabs=azure-cli#kubernetes-version-support-policy [web-app-routing]: web-app-routing.md +[az-extension-add]: /cli/azure/extension#az-extension-add +[az-extension-update]: /cli/azure/extension#az-extension-update diff --git a/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md b/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md index fec268159616d..ef5c02bc74a9d 100644 --- a/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md +++ b/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md @@ -81,7 +81,7 @@ The steps in this section guide you through creating an Azure Database for Postg Use the [az postgres server create](/cli/azure/postgres/server#az-postgres-server-create) command to create the DB server. The following example creates a DB server named *youruniquedbname*. Make sure *youruniqueacrname* is unique within Azure. > [!TIP] - > To help ensure a globally unique name, prepend a disambiguation string such as your intitials and the MMDD of today's date. + > To help ensure a globally unique name, prepend a disambiguation string such as your initials and the MMDD of today's date. ```bash @@ -153,7 +153,7 @@ In directory *liberty/config*, the *server.xml* is used to configure the DB conn After the offer is successfully deployed, an AKS cluster will be generated automatically. The AKS cluster is configured to connect to the ACR. Before we get started with the application, we need to extract the namespace configured for the AKS. -1. Run following command to print the current deployment file, using the `appDeploymentTemplateYamlEncoded` you saved above. The output contains all the variables we need. +1. Run the following command to print the current deployment file, using the `appDeploymentTemplateYamlEncoded` you saved above. The output contains all the variables we need. ```bash echo | base64 -d diff --git a/articles/aks/http-proxy.md b/articles/aks/http-proxy.md index 55731f98569f3..ab6adc21511eb 100644 --- a/articles/aks/http-proxy.md +++ b/articles/aks/http-proxy.md @@ -4,11 +4,11 @@ description: Use the HTTP proxy configuration feature for Azure Kubernetes Servi services: container-service author: nickomang ms.topic: article -ms.date: 09/09/2021 +ms.date: 05/23/2022 ms.author: nickoman --- -# HTTP proxy support in Azure Kubernetes Service (preview) +# HTTP proxy support in Azure Kubernetes Service Azure Kubernetes Service (AKS) clusters, whether deployed into a managed or custom virtual network, have certain outbound dependencies necessary to function properly. Previously, in environments requiring internet access to be routed through HTTP proxies, this was a problem. Nodes had no way of bootstrapping the configuration, environment variables, and certificates necessary to access internet services. @@ -16,8 +16,6 @@ This feature adds HTTP proxy support to AKS clusters, exposing a straightforward Some more complex solutions may require creating a chain of trust to establish secure communications across the network. The feature also enables installation of a trusted certificate authority onto the nodes as part of bootstrapping a cluster. -[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] - ## Limitations and other details The following scenarios are **not** supported: @@ -33,40 +31,7 @@ By default, *httpProxy*, *httpsProxy*, and *trustedCa* have no value. ## Prerequisites * An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). -* [Azure CLI installed](/cli/azure/install-azure-cli). - -### Install the `aks-preview` Azure CLI - -You also need the *aks-preview* Azure CLI extension version 0.5.25 or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. - -```azurecli-interactive -# Install the aks-preview extension -az extension add --name aks-preview -# Update the extension to make sure you have the latest version installed -az extension update --name aks-preview -``` - -### Register the `HTTPProxyConfigPreview` preview feature - -To use the feature, you must also enable the `HTTPProxyConfigPreview` feature flag on your subscription. - -Register the `HTTPProxyConfigPreview` feature flag by using the [az feature register][az-feature-register] command, as shown in the following example: - -```azurecli-interactive -az feature register --namespace "Microsoft.ContainerService" --name "HTTPProxyConfigPreview" -``` - -It takes a few minutes for the status to show *Registered*. Verify the registration status by using the [az feature list][az-feature-list] command: - -```azurecli-interactive -az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/HTTPProxyConfigPreview')].{Name:name,State:properties.state}" -``` - -When ready, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: - -```azurecli-interactive -az provider register --namespace Microsoft.ContainerService -``` +* Latest version of [Azure CLI installed](/cli/azure/install-azure-cli). ## Configuring an HTTP proxy using Azure CLI diff --git a/articles/aks/includes/keda/current-version-callout.md b/articles/aks/includes/keda/current-version-callout.md new file mode 100644 index 0000000000000..59fdc4969333b --- /dev/null +++ b/articles/aks/includes/keda/current-version-callout.md @@ -0,0 +1,11 @@ +--- +author: tomkerkhove + +ms.service: container-service +ms.topic: include +ms.date: 05/24/2022 +ms.author: tomkerkhove +--- + +> [!IMPORTANT] +> The KEDA add-on installs version *2.7.0* of KEDA on your cluster. \ No newline at end of file diff --git a/articles/aks/integrations.md b/articles/aks/integrations.md index 0e69f1088d025..cd42ba25a8b45 100644 --- a/articles/aks/integrations.md +++ b/articles/aks/integrations.md @@ -27,7 +27,7 @@ The below table shows the available add-ons. | open-service-mesh | Use Open Service Mesh with your AKS cluster. | [Open Service Mesh AKS add-on][osm] | | azure-keyvault-secrets-provider | Use Azure Keyvault Secrets Provider addon.| [Use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster][keyvault-secret-provider] | | web_application_routing | Use a managed NGINX ingress Controller with your AKS cluster.| [Web Application Routing Overview][web-app-routing] | - +| keda | Event-driven autoscaling for the applications on your AKS cluster. | [Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on][keda]| ## Extensions @@ -47,10 +47,10 @@ The below table shows a few examples of open-source and third-party integrations |---|---|---| | [Helm][helm] | An open-source packaging tool that helps you install and manage the lifecycle of Kubernetes applications. | [Quickstart: Develop on Azure Kubernetes Service (AKS) with Helm][helm-qs] | | [Prometheus][prometheus] | An open source monitoring and alerting toolkit. | [Container insights with metrics in Prometheus format][prometheus-az-monitor], [Prometheus Helm chart][prometheus-helm-chart] | -| [Grafana][grafana] | An open-source dashboard for observability. | [Deploy Grafana on Kubernetes][grafana-install] | +| [Grafana][grafana] | An open-source dashboard for observability. | [Deploy Grafana on Kubernetes][grafana-install] or use [Managed Grafana][managed-grafana]| | [Couchbase][couchdb] | A distributed NoSQL cloud database. | [Install Couchbase and the Operator on AKS][couchdb-install] | | [OpenFaaS][open-faas]| An open-source framework for building serverless functions by using containers. | [Use OpenFaaS with AKS][open-faas-aks] | -| [Apache Spark][apache-spark] | An open source, fast engine for large-scale data processing. | [Run an Apache Spark job with AKS][spark-job] | +| [Apache Spark][apache-spark] | An open source, fast engine for large-scale data processing. | Running Apache Spark jobs requires a minimum node size of *Standard_D3_v2*. See [running Spark on Kubernetes][spark-kubernetes] for more details on running Spark jobs on Kubernetes. | | [Istio][istio] | An open-source service mesh. | [Istio Installation Guides][istio-install] | | [Linkerd][linkerd] | An open-source service mesh. | [Linkerd Getting Started][linkerd-install] | | [Consul][consul] | An open source, identity-based networking solution. | [Getting Started with Consul Service Mesh for Kubernetes][consul-install] | @@ -84,8 +84,10 @@ The below table shows a few examples of open-source and third-party integrations [open-faas]: https://www.openfaas.com/ [open-faas-aks]: openfaas.md [apache-spark]: https://spark.apache.org/ -[spark-job]: spark-job.md [azure-ml-overview]: ../machine-learning/how-to-attach-kubernetes-anywhere.md +[spark-kubernetes]: https://spark.apache.org/docs/latest/running-on-kubernetes.html [dapr-overview]: ./dapr.md [gitops-overview]: ../azure-arc/kubernetes/conceptual-gitops-flux2.md -[web-app-routing]: web-app-routing.md +[managed-grafana]: ../managed-grafana/overview.md +[keda]: keda-about.md +[web-app-routing]: web-app-routing.md \ No newline at end of file diff --git a/articles/aks/internal-lb.md b/articles/aks/internal-lb.md index 68ca708437c98..767eaebd8fe95 100644 --- a/articles/aks/internal-lb.md +++ b/articles/aks/internal-lb.md @@ -23,7 +23,7 @@ This article assumes that you have an existing AKS cluster. If you need an AKS c You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. -The AKS cluster cluster identity needs permission to manage network resources if you use an existing subnet or resource group. For information see [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)][use-kubenet] or [Configure Azure CNI networking in Azure Kubernetes Service (AKS)][advanced-networking]. If you are configuring your load balancer to use an [IP address in a different subnet][different-subnet], ensure the the AKS cluster identity also has read access to that subnet. +The AKS cluster identity needs permission to manage network resources if you use an existing subnet or resource group. For information, see [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)][use-kubenet] or [Configure Azure CNI networking in Azure Kubernetes Service (AKS)][advanced-networking]. If you are configuring your load balancer to use an [IP address in a different subnet][different-subnet], ensure the AKS cluster identity also has read access to that subnet. For more information on permissions, see [Delegate AKS access to other Azure resources][aks-sp]. @@ -94,6 +94,89 @@ internal-app LoadBalancer 10.0.184.168 10.240.0.25 80:30225/TCP 4m For more information on configuring your load balancer in a different subnet, see [Specify a different subnet][different-subnet] +## Connect Azure Private Link service to internal load balancer (Preview) + +### Before you begin + +You must have the following resource installed: + +* The Azure CLI +* The `aks-preview` extension version 0.5.50 or later +* Kubernetes version 1.22.x or above + +#### Install the aks-preview CLI extension + +```azurecli-interactive +# Install the aks-preview extension +az extension add --name aks-preview + +# Update the extension to make sure you have the latest version installed +az extension update --name aks-preview +``` + +### Create a Private Link service connection + +To attach an Azure Private Link service to an internal load balancer, create a service manifest named `internal-lb-pls.yaml` with the service type *LoadBalancer* and the *azure-load-balancer-internal* and *azure-pls-create* annotation as shown in the example below. For more options, refer to the [Azure Private Link Service Integration](https://kubernetes-sigs.github.io/cloud-provider-azure/development/design-docs/pls-integration/) design document + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: internal-app + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + service.beta.kubernetes.io/azure-pls-create: "true" +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: internal-app +``` + +Deploy the internal load balancer using the [kubectl apply][kubectl-apply] and specify the name of your YAML manifest: + +```console +kubectl apply -f internal-lb-pls.yaml +``` + +An Azure load balancer is created in the node resource group and connected to the same virtual network as the AKS cluster. + +When you view the service details, the IP address of the internal load balancer is shown in the *EXTERNAL-IP* column. In this context, *External* is in relation to the external interface of the load balancer, not that it receives a public, external IP address. It may take a minute or two for the IP address to change from *\* to an actual internal IP address, as shown in the following example: + +``` +$ kubectl get service internal-app + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +internal-app LoadBalancer 10.125.17.53 10.125.0.66 80:30430/TCP 64m +``` + +Additionally, a Private Link Service object will also be created that connects to the Frontend IP configuration of the Load Balancer associated with the Kubernetes service. Details of the Private Link Service object can be retrieved as shown in the following example: +``` +$ AKS_MC_RG=$(az aks show -g myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv) +$ az network private-link-service list -g ${AKS_MC_RG} --query "[].{Name:name,Alias:alias}" -o table + +Name Alias +-------- ------------------------------------------------------------------------- +pls-xyz pls-xyz.abc123-defg-4hij-56kl-789mnop.eastus2.azure.privatelinkservice + +``` + +### Create a Private Endpoint to the Private Link service + +A Private Endpoint allows you to privately connect to your Kubernetes service object via the Private Link Service created above. To do so, follow the example shown below: + +```azurecli +$ AKS_PLS_ID=$(az network private-link-service list -g ${AKS_MC_RG} --query "[].id" -o tsv) +$ az network private-endpoint create \ + -g myOtherResourceGroup \ + --name myAKSServicePE \ + --vnet-name myOtherVNET \ + --subnet pe-subnet \ + --private-connection-resource-id ${AKS_PLS_ID} \ + --connection-name connectToMyK8sService +``` + ## Use private networks When you create your AKS cluster, you can specify advanced networking settings. This approach lets you deploy the cluster into an existing Azure virtual network and subnets. One scenario is to deploy your AKS cluster into a private network connected to your on-premises environment and run services only accessible internally. For more information, see configure your own virtual network subnets with [Kubenet][use-kubenet] or [Azure CNI][advanced-networking]. @@ -143,7 +226,6 @@ Learn more about Kubernetes services at the [Kubernetes services documentation][ [kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply [kubernetes-services]: https://kubernetes.io/docs/concepts/services-networking/service/ -[aks-engine]: https://github.com/Azure/aks-engine [advanced-networking]: configure-azure-cni.md diff --git a/articles/aks/intro-kubernetes.md b/articles/aks/intro-kubernetes.md index d5600d4d26487..f40ffb5308c25 100644 --- a/articles/aks/intro-kubernetes.md +++ b/articles/aks/intro-kubernetes.md @@ -149,7 +149,6 @@ Learn more about deploying and managing AKS with the Azure CLI Quickstart. > [Deploy an AKS Cluster using Azure CLI][aks-quickstart-cli] -[aks-engine]: https://github.com/Azure/aks-engine [kubectl-overview]: https://kubernetes.io/docs/user-guide/kubectl-overview/ [compliance-doc]: https://azure.microsoft.com/overview/trusted-cloud/compliance/ diff --git a/articles/aks/keda-about.md b/articles/aks/keda-about.md new file mode 100644 index 0000000000000..68e36648197dc --- /dev/null +++ b/articles/aks/keda-about.md @@ -0,0 +1,79 @@ +--- +title: Kubernetes Event-driven Autoscaling (KEDA) (Preview) +description: Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on. +services: container-service +author: tomkerkhove +ms.topic: article +ms.date: 05/24/2022 +ms.author: tomkerkhove +--- + +# Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on (Preview) + +Kubernetes Event-driven Autoscaling (KEDA) is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. + +It applies event-driven autoscaling to scale your application to meet demand in a sustainable and cost-efficient manner with scale-to-zero. + +The KEDA add-on makes it even easier by deploying a managed KEDA installation, providing you with [a rich catalog of 50+ KEDA scalers][keda-scalers] that you can scale your applications with on your Azure Kubernetes Services (AKS) cluster. + +[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] + +## Architecture + +[KEDA][keda] provides two main components: + +- **KEDA operator** allows end-users to scale workloads in/out from 0 to N instances with support for Kubernetes Deployments, Jobs, StatefulSets or any custom resource that defines `/scale` subresource. +- **Metrics server** exposes external metrics to Horizontal Pod Autoscaler (HPA) in Kubernetes for autoscaling purposes such as messages in a Kafka topic, or number of events in an Azure event hub. Due to upstream limitations, KEDA must be the only installed metric adapter. + +![Diagram that shows the architecture of K E D A and how it extends Kubernetes instead of re-inventing the wheel.](./media/keda/architecture.png) + +Learn more about how KEDA works in the [official KEDA documentation][keda-architecture]. + +## Installation and version + +KEDA can be added to your Azure Kubernetes Service (AKS) cluster by enabling the KEDA add-on using an [ARM template][keda-arm]. + +The KEDA add-on provides a fully supported installation of KEDA that is integrated with AKS. + +[!INCLUDE [Current version callout](./includes/keda/current-version-callout.md)] + +## Capabilities and features + +KEDA provides the following capabilities and features: + +- Build sustainable and cost-efficient applications with scale-to-zero +- Scale application workloads to meet demand using [a rich catalog of 50+ KEDA scalers][keda-scalers] +- Autoscale applications with `ScaledObjects`, such as Deployments, StatefulSets or any custom resource that defines `/scale` subresource +- Autoscale job-like workloads with `ScaledJobs` +- Use production-grade security by decoupling autoscaling authentication from workloads +- Bring-your-own external scaler to use tailor-made autoscaling decisions + +## Add-on limitations + +The KEDA AKS add-on has the following limitations: + +* KEDA's [HTTP add-on (preview)][keda-http-add-on] to scale HTTP workloads isn't installed with the extension, but can be deployed separately. +* KEDA's [external scaler for Azure Cosmos DB][keda-cosmos-db-scaler] to scale based on Azure Cosmos DB change feed isn't installed with the extension, but can be deployed separately. +* Only one metric server is allowed in the Kubernetes cluster. Because of that the KEDA add-on should be the only metrics server inside the cluster. + * Multiple KEDA installations aren't supported +* Managed identity isn't supported. + +For general KEDA questions, we recommend [visiting the FAQ overview][keda-faq]. + +## Next steps + +* [Enable the KEDA add-on with an ARM template][keda-arm] +* [Autoscale a .NET Core worker processing Azure Service Bus Queue messages][keda-sample] + + +[keda-azure-cli]: keda-deploy-addon-az-cli.md +[keda-arm]: keda-deploy-add-on-arm.md + + +[keda]: https://keda.sh/ +[keda-architecture]: https://keda.sh/docs/latest/concepts/ +[keda-faq]: https://keda.sh/docs/latest/faq/ +[keda-sample]: https://github.com/kedacore/sample-dotnet-worker-servicebus-queue +[keda-scalers]: https://keda.sh/docs/scalers/ +[keda-http-add-on]: https://github.com/kedacore/http-add-on +[keda-cosmos-db-scaler]: https://github.com/kedacore/external-scaler-azure-cosmos-db diff --git a/articles/aks/keda-deploy-add-on-arm.md b/articles/aks/keda-deploy-add-on-arm.md new file mode 100644 index 0000000000000..53421176639ec --- /dev/null +++ b/articles/aks/keda-deploy-add-on-arm.md @@ -0,0 +1,157 @@ +--- +title: Deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on by using an ARM template +description: Use an ARM template to deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on to Azure Kubernetes Service (AKS). +services: container-service +author: jahabibi +ms.topic: article +ms.date: 05/24/2022 +ms.author: jahabibi +--- + +# Deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on by using ARM template + +This article shows you how to deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on to Azure Kubernetes Service (AKS) by using an [ARM](../azure-resource-manager/templates/index.yml) template. + +[!INCLUDE [Current version callout](./includes/keda/current-version-callout.md)] + +[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] + +## Prerequisites + +> [!NOTE] +> KEDA is currently only available in the `westcentralus` region. + +- An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). +- [Azure CLI installed](/cli/azure/install-azure-cli). + +### Register the `AKS-KedaPreview` feature flag + +To use the KEDA, you must enable the `AKS-KedaPreview` feature flag on your subscription. + +```azurecli +az feature register --name AKS-KedaPreview --namespace Microsoft.ContainerService +``` + +You can check on the registration status by using the `az feature list` command: + +```azurecli-interactive +az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-KedaPreview')].{Name:name,State:properties.state}" +``` + +When ready, refresh the registration of the *Microsoft.ContainerService* resource provider by using the `az provider register` command: + +```azurecli-interactive +az provider register --namespace Microsoft.ContainerService +``` + +## Deploy the KEDA add-on with Azure Resource Manager (ARM) templates + +The KEDA add-on can be enabled by deploying an AKS cluster with an Azure Resource Manager template and specifying the `workloadAutoScalerProfile` field: + +```json + "workloadAutoScalerProfile": { + "keda": { + "enabled": true + } + } +``` + +## Connect to your AKS cluster + +To connect to the Kubernetes cluster from your local computer, you use [kubectl][kubectl], the Kubernetes command-line client. + +If you use the Azure Cloud Shell, `kubectl` is already installed. You can also install it locally using the [az aks install-cli][az aks install-cli] command: + +```azurecli +az aks install-cli +``` + +To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][az aks get-credentials] command. The following example gets credentials for the AKS cluster named *MyAKSCluster* in the *MyResourceGroup*: + +```azurecli +az aks get-credentials --resource-group MyResourceGroup --name MyAKSCluster +``` + +## Example deployment + +The following snippet is a sample deployment that creates a cluster with KEDA enabled with a single node pool comprised of three `DS2_v5` nodes. + +```json +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "resources": [ + { + "apiVersion": "2022-05-02-preview", + "dependsOn": [], + "type": "Microsoft.ContainerService/managedClusters", + "location": "westcentralus", + "name": "myAKSCluster", + "properties": { + "kubernetesVersion": "1.23.5", + "enableRBAC": true, + "dnsPrefix": "myAKSCluster", + "agentPoolProfiles": [ + { + "name": "agentpool", + "osDiskSizeGB": 200, + "count": 3, + "enableAutoScaling": false, + "vmSize": "Standard_D2S_v5", + "osType": "Linux", + "storageProfile": "ManagedDisks", + "type": "VirtualMachineScaleSets", + "mode": "System", + "maxPods": 110, + "availabilityZones": [], + "nodeTaints": [], + "enableNodePublicIP": false + } + ], + "networkProfile": { + "loadBalancerSku": "standard", + "networkPlugin": "kubenet" + }, + "workloadAutoScalerProfile": { + "keda": { + "enabled": true + } + } + }, + "identity": { + "type": "SystemAssigned" + } + } + ] +} +``` + +## Start scaling apps with KEDA + +Now that KEDA is installed, you can start autoscaling your apps with KEDA by using its custom resource definition has been defined (CRD). + +To learn more about KEDA CRDs, follow the official [KEDA documentation][keda-scalers] to define your scaler. + +## Clean Up + +To remove the resource group, and all related resources, use the [az group delete][az-group-delete] command: + +```azurecli +az group delete --name MyResourceGroup +``` +## Next steps + +This article showed you how to install the KEDA add-on on an AKS cluster, and then verify that it's installed and running. With the KEDA add-on installed on your cluster, you can [deploy a sample application][keda-sample] to start scaling apps + + +[az-aks-create]: /cli/azure/aks#az-aks-create +[az aks install-cli]: /cli/azure/aks#az-aks-install-cli +[az aks get-credentials]: /cli/azure/aks#az-aks-get-credentials +[az aks update]: /cli/azure/aks#az-aks-update +[az-group-delete]: /cli/azure/group#az-group-delete + + +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl +[keda]: https://keda.sh/ +[keda-scalers]: https://keda.sh/docs/scalers/ +[keda-sample]: https://github.com/kedacore/sample-dotnet-worker-servicebus-queue diff --git a/articles/aks/keda-integrations.md b/articles/aks/keda-integrations.md new file mode 100644 index 0000000000000..71b8c744bbb1a --- /dev/null +++ b/articles/aks/keda-integrations.md @@ -0,0 +1,60 @@ +--- +title: Integrations with Kubernetes Event-driven Autoscaling (KEDA) on Azure Kubernetes Service (AKS) (Preview) +description: Integrations with Kubernetes Event-driven Autoscaling (KEDA) on Azure Kubernetes Service (AKS) (Preview). +services: container-service +author: tomkerkhove +ms.topic: article +ms.date: 05/24/2022 +ms.author: tomkerkhove +--- + +# Integrations with Kubernetes Event-driven Autoscaling (KEDA) on Azure Kubernetes Service (AKS) (Preview) + +The Kubernetes Event-driven Autoscaling (KEDA) add-on integrates with features provided by Azure and open source projects. + +[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] + +> [!IMPORTANT] +> Integrations with open source projects are not covered by the [AKS support policy][aks-support-policy]. + +## Observe your autoscaling with Kubernetes events + +KEDA automatically emits Kubernetes events allowing customers to operate their application autoscaling. + +To learn about the available metrics, we recommend reading the [KEDA documentation][keda-event-docs]. + +## Scalers for Azure services + +KEDA can integrate with various tools and services through [a rich catalog of 50+ KEDA scalers][keda-scalers]. It supports leading cloud platforms (such as Azure) and open-source technologies such as Redis and Kafka. + +It leverages the following scalers for Azure services: + +- [Azure Application Insights](https://keda.sh/docs/latest/scalers/azure-app-insights/) +- [Azure Blob Storage](https://keda.sh/docs/latest/scalers/azure-storage-blob/) +- [Azure Data Explorer](https://keda.sh/docs/latest/scalers/azure-data-explorer/) +- [Azure Event Hubs](https://keda.sh/docs/latest/scalers/azure-event-hub/) +- [Azure Log Analytics](https://keda.sh/docs/latest/scalers/azure-log-analytics/) +- [Azure Monitor](https://keda.sh/docs/latest/scalers/azure-monitor/) +- [Azure Pipelines](https://keda.sh/docs/latest/scalers/azure-pipelines/) +- [Azure Service Bus](https://keda.sh/docs/latest/scalers/azure-service-bus/) +- [Azure Storage Queue](https://keda.sh/docs/latest/scalers/azure-storage-queue/) + +Next to the built-in scalers, you can install external scalers yourself to autoscale on other Azure services: + +- [Azure Cosmos DB (Change feed)](https://github.com/kedacore/external-scaler-azure-cosmos-db) + +However, these external scalers aren't supported as part of the add-on and rely on community support. + +## Next steps + +* [Enable the KEDA add-on with an ARM template][keda-arm] +* [Autoscale a .NET Core worker processing Azure Service Bus Queue message][keda-sample] + + +[aks-support-policy]: support-policies.md +[keda-arm]: keda-deploy-add-on-arm.md + + +[keda-scalers]: https://keda.sh/docs/latest/scalers/ +[keda-event-docs]: https://keda.sh/docs/latest/operate/events/ +[keda-sample]: https://github.com/kedacore/sample-dotnet-worker-servicebus-queue diff --git a/articles/aks/keda.md b/articles/aks/keda.md deleted file mode 100644 index 2aa63d24d41bb..0000000000000 --- a/articles/aks/keda.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: KEDA add-on on Azure Kubernetes Service (AKS) (Preview) -description: Use the KEDA add-on to deploy a managed KEDA instance on Azure Kubernetes Service (AKS). -services: container-service -author: jahabibi -ms.topic: article -ms.custom: event-tier1-build-2022 -ms.date: 05/13/2021 -ms.author: jahabibi ---- - -# Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on (Preview) - -Kubernetes Event-driven Autoscaling (KEDA) is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. - -The KEDA add-on makes it even easier by deploying a managed KEDA installation, providing you with [a rich catalog of 40+ KEDA scalers](https://keda.sh/docs/latest/scalers/) that you can scale your applications with on your Azure Kubernetes Services (AKS) cluster. - -[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] - -## KEDA add-on overview - -[KEDA][keda] provides two main components: - -- **KEDA operator** allows end-users to scale workloads in/out from 0 to N instances with support for Kubernetes Deployments, Jobs, StatefulSets or any custom resource that defines `/scale` subresource. -- **Metrics server** exposes external metrics to HPA in Kubernetes for autoscaling purposes such as messages in a Kafka topic, or number of events in an Azure event hub. Due to upstream limitations, this must be the only installed metric adapter. - -## Prerequisites - -- An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). -- [Azure CLI installed](/cli/azure/install-azure-cli). - -## Deploy the KEDA add-on with Azure CLI - -The KEDA add-on can be enabled with the Azure CLI when deploying an AKS cluster. - -To do so, use the [az aks create][az-aks-create] command with the `--enable-keda` argument. - -```azurecli -az aks create --resource-group MyResourceGroup --name MyAKSCluster --enable-keda -``` - -Additionally, KEDA can be deployed to an existing cluster via the [az aks update][az aks update] command. - -```azure cli -az aks update --resource-group MyResourceGroup --name MyAKSCluster --enable-keda -``` - -## Connect to your AKS cluster - -To connect to the Kubernetes cluster from your local computer, you use [kubectl][kubectl], the Kubernetes command-line client. - -If you use the Azure Cloud Shell, `kubectl` is already installed. You can also install it locally using the [az aks install-cli][az aks install-cli] command: - -```azurecli -az aks install-cli -``` - -To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][az aks get-credentials] command. The following example gets credentials for the AKS cluster named *MyAKSCluster* in the *MyResourceGroup*: - -```azurecli -az aks get-credentials --resource-group MyResourceGroup --name MyAKSCluster -``` - -## Use KEDA -KEDA scaling will only work once a custom resource definition has been defined (CRD). To learn more about KEDA CRDs, follow the official [KEDA documentation][keda-scalers] to define your scaler. - -## Clean Up -To remove KEDA, utilize the `--disable-keda` flag. - -```azurecli -az aks update --resource-group MyResourceGroup --name MyAKSCluster --disable-keda -``` - -To remove the resource group, and all related resources, use the [az group delete][az-group-delete] command: - -```azurecli -az group delete --name MyResourceGroup -``` - - - -[az-aks-create]: /cli/azure/aks#az-aks-create -[az aks install-cli]: /cli/azure/aks#az-aks-install-cli -[az aks get-credentials]: /cli/azure/aks#az-aks-get-credentials -[az aks update]: /cli/azure/aks#az-aks-update -[az-group-delete]: /cli/azure/group#az-group-delete - - -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl -[keda]: https://keda.sh/ -[keda-scalers]: https://keda.sh/docs/scalers/ diff --git a/articles/aks/kubernetes-service-principal.md b/articles/aks/kubernetes-service-principal.md index 55a74da813c46..02308f193943b 100644 --- a/articles/aks/kubernetes-service-principal.md +++ b/articles/aks/kubernetes-service-principal.md @@ -1,62 +1,34 @@ --- -title: Service principals for Azure Kubernetes Services (AKS) -description: Create and manage an Azure Active Directory service principal for a cluster in Azure Kubernetes Service (AKS) +title: Use a service principal with Azure Kubernetes Services (AKS) +description: Create and manage an Azure Active Directory service principal with a cluster in Azure Kubernetes Service (AKS) services: container-service ms.topic: conceptual -ms.date: 12/06/2021 +ms.date: 06/08/2022 ms.custom: devx-track-azurepowershell, devx-track-azurecli #Customer intent: As a cluster operator, I want to understand how to create a service principal and delegate permissions for AKS to access required resources. In large enterprise environments, the user that deploys the cluster (or CI/CD system), may not have permissions to create this service principal automatically when the cluster is created. --- -# Service principals with Azure Kubernetes Service (AKS) +# Use a service principal with Azure Kubernetes Service (AKS) -To interact with Azure APIs, an AKS cluster requires either an [Azure Active Directory (AD) service principal][aad-service-principal] or a [managed identity](use-managed-identity.md). A service principal or managed identity is needed to dynamically create and manage other Azure resources such as an Azure load balancer or container registry (ACR). +To access other Azure Active Directory (Azure AD) resources, an AKS cluster requires either an [Azure Active Directory (AD) service principal][aad-service-principal] or a [managed identity][managed-identity-resources-overview]. A service principal or managed identity is needed to dynamically create and manage other Azure resources such as an Azure load balancer or container registry (ACR). + +Managed identities are the recommended way to authenticate with other resources in Azure, and is the default authentication method for your AKS cluster. For more information about using a managed identity with your cluster, see [Use a system-assigned managed identity][use-managed-identity]. This article shows how to create and use a service principal for your AKS clusters. ## Before you begin -To create an Azure AD service principal, you must have permissions to register an application with your Azure AD tenant, and to assign the application to a role in your subscription. If you don't have the necessary permissions, you might need to ask your Azure AD or subscription administrator to assign the necessary permissions, or pre-create a service principal for you to use with the AKS cluster. - -If you are using a service principal from a different Azure AD tenant, there are additional considerations around the permissions available when you deploy the cluster. You may not have the appropriate permissions to read and write directory information. For more information, see [What are the default user permissions in Azure Active Directory?][azure-ad-permissions] - -### [Azure CLI](#tab/azure-cli) - -You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. - -### [Azure PowerShell](#tab/azure-powershell) - -You also need Azure PowerShell version 5.0.0 or later installed. Run `Get-InstalledModule -Name Az` to find the version. If you need to install or upgrade, see [Install the Azure Az PowerShell module][install-the-azure-az-powershell-module]. +To create an Azure AD service principal, you must have permissions to register an application with your Azure AD tenant, and to assign the application to a role in your subscription. If you don't have the necessary permissions, you need to ask your Azure AD or subscription administrator to assign the necessary permissions, or pre-create a service principal for you to use with the AKS cluster. ---- - -## Automatically create and use a service principal - -### [Azure CLI](#tab/azure-cli) +If you're using a service principal from a different Azure AD tenant, there are other considerations around the permissions available when you deploy the cluster. You may not have the appropriate permissions to read and write directory information. For more information, see [What are the default user permissions in Azure Active Directory?][azure-ad-permissions] -When you create an AKS cluster in the Azure portal or using the [az aks create][az-aks-create] command, Azure creates a managed identity. +## Prerequisites -In the following Azure CLI example, a service principal is not specified. In this scenario, the Azure CLI creates a managed identity for the AKS cluster. +Azure CLI version 2.0.59 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. -```azurecli -az aks create --name myAKSCluster --resource-group myResourceGroup -``` - -### [Azure PowerShell](#tab/azure-powershell) +Azure PowerShell version 5.0.0 or later. Run `Get-InstalledModule -Name Az` to find the version. If you need to install or upgrade, see [Install the Azure Az PowerShell module][install-the-azure-az-powershell-module]. -When you create an AKS cluster in the Azure portal or using the [New-AzAksCluster][new-azakscluster] command, Azure can generate a new managed identity . - -In the following Azure PowerShell example, a service principal is not specified. In this scenario, Azure PowerShell creates a managed identity for the AKS cluster. - -```azurepowershell-interactive -New-AzAksCluster -Name myAKSCluster -ResourceGroupName myResourceGroup -``` - -> [!NOTE] -> For error "Service principal clientID: 00000000-0000-0000-0000-000000000000 not found in Active Directory tenant 00000000-0000-0000-0000-000000000000", see [Additional considerations](#additional-considerations) to remove the `acsServicePrincipal.json` file. - ---- ## Manually create a service principal ### [Azure CLI](#tab/azure-cli) @@ -67,7 +39,7 @@ To manually create a service principal with the Azure CLI, use the [az ad sp cre az ad sp create-for-rbac --name myAKSClusterServicePrincipal ``` -The output is similar to the following example. Make a note of your own `appId` and `password`. These values are used when you create an AKS cluster in the next section. +The output is similar to the following example. Copy the values for `appId` and `password`. These values are used when you create an AKS cluster in the next section. ```json { @@ -99,7 +71,7 @@ Id : 559513bd-0c19-4c1a-87cd-851a26afd5fc Type : ``` -To decrypt the value stored in the **Secret** secure string, you use the following example. +To decrypt the value stored in the **Secret** secure string, run the following command: ```azurepowershell-interactive $BSTR = [System.Runtime.InteropServices.Marshal]::SecureStringToBSTR($sp.Secret) @@ -125,14 +97,7 @@ az aks create \ ``` > [!NOTE] -> If you're using an existing service principal with customized secret, ensure the secret is no longer than 190 bytes. - -If you deploy an AKS cluster using the Azure portal, on the *Authentication* page of the **Create Kubernetes cluster** dialog, choose to **Configure service principal**. Select **Use existing**, and specify the following values: - -- **Service principal client ID** is your *appId* -- **Service principal client secret** is the *password* value - -![Image of browsing to Azure Vote](media/kubernetes-service-principal/portal-configure-service-principal.png) +> If you're using an existing service principal with customized secret, ensure the secret is not longer than 190 bytes. ### [Azure PowerShell](#tab/azure-powershell) @@ -151,13 +116,6 @@ New-AzAksCluster -ResourceGroupName myResourceGroup -Name myAKSCluster -ServiceP > [!NOTE] > If you're using an existing service principal with customized secret, ensure the secret is no longer than 190 bytes. -If you deploy an AKS cluster using the Azure portal, on the *Authentication* page of the **Create Kubernetes cluster** dialog, choose to **Configure service principal**. Select **Use existing**, and specify the following values: - -- **Service principal client ID** is your *ApplicationId* -- **Service principal client secret** is the decrypted *Secret* value - -![Image of browsing to Azure Vote](media/kubernetes-service-principal/portal-configure-service-principal.png) - --- ## Delegate access to other Azure resources @@ -188,9 +146,9 @@ The `Scope` for a resource needs to be a full resource ID, such as */subscriptio > [!NOTE] > If you have removed the Contributor role assignment from the node resource group, the operations below may fail. -> Permission grants to clusters using System Managed Identity may take up 60 minutes to populate. +> Permission granted to a cluster using a system-assigned managed identity may take up 60 minutes to populate. -The following sections detail common delegations that you may need to make. +The following sections detail common delegations that you may need to assign. ### Azure Container Registry @@ -206,11 +164,11 @@ If you use Azure Container Registry (ACR) as your container image store, you nee ### Networking -You may use advanced networking where the virtual network and subnet or public IP addresses are in another resource group. Assign the [Network Contributor][rbac-network-contributor] built-in role on the subnet within the virtual network. Alternatively, you can create a [custom role][rbac-custom-role] with permissions to access the network resources in that resource group. See [AKS service permissions][aks-permissions] for more details. +You may use advanced networking where the virtual network and subnet or public IP addresses are in another resource group. Assign the [Network Contributor][rbac-network-contributor] built-in role on the subnet within the virtual network. Alternatively, you can create a [custom role][rbac-custom-role] with permissions to access the network resources in that resource group. For more information, see [AKS service permissions][aks-permissions]. ### Storage -You may need to access existing Disk resources in another resource group. Assign one of the following set of role permissions: +If you need to access existing disk resources in another resource group, assign one of the following set of role permissions: - Create a [custom role][rbac-custom-role] and define the following role permissions: - *Microsoft.Compute/disks/read* @@ -219,24 +177,24 @@ You may need to access existing Disk resources in another resource group. Assign ### Azure Container Instances -If you use Virtual Kubelet to integrate with AKS and choose to run Azure Container Instances (ACI) in resource group separate to the AKS cluster, the AKS service principal must be granted *Contributor* permissions on the ACI resource group. +If you use Virtual Kubelet to integrate with AKS and choose to run Azure Container Instances (ACI) in resource group separate from the AKS cluster, the AKS cluster service principal must be granted *Contributor* permissions on the ACI resource group. -## Additional considerations +## Other considerations ### [Azure CLI](#tab/azure-cli) -When using AKS and Azure AD service principals, keep the following considerations in mind. +When using AKS and an Azure AD service principal, consider the following: -- The service principal for Kubernetes is a part of the cluster configuration. However, don't use the identity to deploy the cluster. +- The service principal for Kubernetes is a part of the cluster configuration. However, don't use this identity to deploy the cluster. - By default, the service principal credentials are valid for one year. You can [update or rotate the service principal credentials][update-credentials] at any time. - Every service principal is associated with an Azure AD application. The service principal for a Kubernetes cluster can be associated with any valid Azure AD application name (for example: *https://www.contoso.org/example*). The URL for the application doesn't have to be a real endpoint. - When you specify the service principal **Client ID**, use the value of the `appId`. - On the agent node VMs in the Kubernetes cluster, the service principal credentials are stored in the file `/etc/kubernetes/azure.json` - When you use the [az aks create][az-aks-create] command to generate the service principal automatically, the service principal credentials are written to the file `~/.azure/aksServicePrincipal.json` on the machine used to run the command. -- If you do not specifically pass a service principal in additional AKS CLI commands, the default service principal located at `~/.azure/aksServicePrincipal.json` is used. -- You can also optionally remove the aksServicePrincipal.json file, and AKS will create a new service principal. -- When you delete an AKS cluster that was created by [az aks create][az-aks-create], the service principal that was created automatically is not deleted. - - To delete the service principal, query for your cluster *servicePrincipalProfile.clientId* and then delete with [az ad sp delete][az-ad-sp-delete]. Replace the following resource group and cluster names with your own values: +- If you don't specify a service principal with AKS CLI commands, the default service principal located at `~/.azure/aksServicePrincipal.json` is used. +- You can optionally remove the `aksServicePrincipal.json` file, and AKS creates a new service principal. +- When you delete an AKS cluster that was created by [az aks create][az-aks-create], the service principal created automatically isn't deleted. + - To delete the service principal, query for your clusters *servicePrincipalProfile.clientId* and then delete it using the [az ad sp delete][az-ad-sp-delete] command. Replace the values for the `-g` parameter for the resource group name, and `-n` parameter for the cluster name: ```azurecli az ad sp delete --id $(az aks show -g myResourceGroup -n myAKSCluster --query servicePrincipalProfile.clientId -o tsv) @@ -244,18 +202,18 @@ When using AKS and Azure AD service principals, keep the following consideration ### [Azure PowerShell](#tab/azure-powershell) -When using AKS and Azure AD service principals, keep the following considerations in mind. +When using AKS and an Azure AD service principal, consider the following: -- The service principal for Kubernetes is a part of the cluster configuration. However, don't use the identity to deploy the cluster. +- The service principal for Kubernetes is a part of the cluster configuration. However, don't use this identity to deploy the cluster. - By default, the service principal credentials are valid for one year. You can [update or rotate the service principal credentials][update-credentials] at any time. - Every service principal is associated with an Azure AD application. The service principal for a Kubernetes cluster can be associated with any valid Azure AD application name (for example: *https://www.contoso.org/example*). The URL for the application doesn't have to be a real endpoint. - When you specify the service principal **Client ID**, use the value of the `ApplicationId`. - On the agent node VMs in the Kubernetes cluster, the service principal credentials are stored in the file `/etc/kubernetes/azure.json` - When you use the [New-AzAksCluster][new-azakscluster] command to generate the service principal automatically, the service principal credentials are written to the file `~/.azure/acsServicePrincipal.json` on the machine used to run the command. -- If you do not specifically pass a service principal in additional AKS PowerShell commands, the default service principal located at `~/.azure/acsServicePrincipal.json` is used. -- You can also optionally remove the acsServicePrincipal.json file, and AKS will create a new service principal. -- When you delete an AKS cluster that was created by [New-AzAksCluster][new-azakscluster], the service principal that was created automatically is not deleted. - - To delete the service principal, query for your cluster *ServicePrincipalProfile.ClientId* and then delete with [Remove-AzADServicePrincipal][remove-azadserviceprincipal]. Replace the following resource group and cluster names with your own values: +- If you don't specify a service principal with AKS PowerShell commands, the default service principal located at `~/.azure/acsServicePrincipal.json` is used. +- You can optionally remove the `acsServicePrincipal.json` file, and AKS creates a new service principal. +- When you delete an AKS cluster that was created by [New-AzAksCluster][new-azakscluster], the service principal created automatically isn't deleted. + - To delete the service principal, query for your clusters *ServicePrincipalProfile.ClientId* and then delete it using the [Remove-AzADServicePrincipal][remove-azadserviceprincipal] command. Replace the values for the `-ResourceGroupName` parameter for the resource group name, and `-Name` parameter for the cluster name: ```azurepowershell-interactive $ClientId = (Get-AzAksCluster -ResourceGroupName myResourceGroup -Name myAKSCluster ).ServicePrincipalProfile.ClientId @@ -267,7 +225,7 @@ When using AKS and Azure AD service principals, keep the following consideration ### [Azure CLI](#tab/azure-cli) -The service principal credentials for an AKS cluster are cached by the Azure CLI. If these credentials have expired, you encounter errors deploying AKS clusters. The following error message when running [az aks create][az-aks-create] may indicate a problem with the cached service principal credentials: +The service principal credentials for an AKS cluster are cached by the Azure CLI. If these credentials have expired, you encounter errors during deployment of the AKS cluster. The following error message when running [az aks create][az-aks-create] may indicate a problem with the cached service principal credentials: ```console Operation failed with status: 'Bad Request'. @@ -275,17 +233,17 @@ Details: The credentials in ServicePrincipalProfile were invalid. Please see htt (Details: adal: Refresh request failed. Status Code = '401'. ``` -Check the age of the credentials file using the following command: +Check the age of the credentials file by running the following command: ```console ls -la $HOME/.azure/aksServicePrincipal.json ``` -The default expiration time for the service principal credentials is one year. If your *aksServicePrincipal.json* file is older than one year, delete the file and try to deploy an AKS cluster again. +The default expiration time for the service principal credentials is one year. If your *aksServicePrincipal.json* file is older than one year, delete the file and retry deploying the AKS cluster. ### [Azure PowerShell](#tab/azure-powershell) -The service principal credentials for an AKS cluster are cached by Azure PowerShell. If these credentials have expired, you encounter errors deploying AKS clusters. The following error message when running [New-AzAksCluster][new-azakscluster] may indicate a problem with the cached service principal credentials: +The service principal credentials for an AKS cluster are cached by Azure PowerShell. If these credentials have expired, you encounter errors during deployment of the AKS cluster. The following error message when running [New-AzAksCluster][new-azakscluster] may indicate a problem with the cached service principal credentials: ```console Operation failed with status: 'Bad Request'. @@ -293,13 +251,13 @@ Details: The credentials in ServicePrincipalProfile were invalid. Please see htt (Details: adal: Refresh request failed. Status Code = '401'. ``` -Check the age of the credentials file using the following command: +Check the age of the credentials file by running the following command: ```azurepowershell-interactive Get-ChildItem -Path $HOME/.azure/aksServicePrincipal.json ``` -The default expiration time for the service principal credentials is one year. If your *aksServicePrincipal.json* file is older than one year, delete the file and try to deploy an AKS cluster again. +The default expiration time for the service principal credentials is one year. If your *aksServicePrincipal.json* file is older than one year, delete the file and retry deploying the AKS cluster. --- @@ -337,3 +295,5 @@ For information on how to update the credentials, see [Update or rotate the cred [new-azroleassignment]: /powershell/module/az.resources/new-azroleassignment [set-azakscluster]: /powershell/module/az.aks/set-azakscluster [remove-azadserviceprincipal]: /powershell/module/az.resources/remove-azadserviceprincipal +[use-managed-identity]: use-managed-identity.md +[managed-identity-resources-overview]: ..//active-directory/managed-identities-azure-resources/overview.md diff --git a/articles/aks/learn/quick-windows-container-deploy-cli.md b/articles/aks/learn/quick-windows-container-deploy-cli.md index 6d9c53d57c983..fadef3afb8f44 100644 --- a/articles/aks/learn/quick-windows-container-deploy-cli.md +++ b/articles/aks/learn/quick-windows-container-deploy-cli.md @@ -97,7 +97,6 @@ az aks create \ --generate-ssh-keys \ --windows-admin-username $WINDOWS_USERNAME \ --vm-set-type VirtualMachineScaleSets \ - --kubernetes-version 1.20.7 \ --network-plugin azure ``` @@ -302,9 +301,6 @@ spec: limits: cpu: 1 memory: 800M - requests: - cpu: .1 - memory: 300M ports: - containerPort: 80 selector: diff --git a/articles/aks/learn/quick-windows-container-deploy-powershell.md b/articles/aks/learn/quick-windows-container-deploy-powershell.md index e4fed7b2564f2..e18be108d1a9f 100644 --- a/articles/aks/learn/quick-windows-container-deploy-powershell.md +++ b/articles/aks/learn/quick-windows-container-deploy-powershell.md @@ -211,9 +211,6 @@ spec: limits: cpu: 1 memory: 800M - requests: - cpu: .1 - memory: 300M ports: - containerPort: 80 selector: diff --git a/articles/aks/load-balancer-standard.md b/articles/aks/load-balancer-standard.md index d05184f253085..1e36ee7f04bf8 100644 --- a/articles/aks/load-balancer-standard.md +++ b/articles/aks/load-balancer-standard.md @@ -378,7 +378,6 @@ Learn more about using Internal Load Balancer for Inbound traffic at the [AKS In [kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get [kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply [kubernetes-services]: https://kubernetes.io/docs/concepts/services-networking/service/ -[aks-engine]: https://github.com/Azure/aks-engine [advanced-networking]: configure-azure-cni.md diff --git a/articles/aks/media/keda/architecture.png b/articles/aks/media/keda/architecture.png new file mode 100644 index 0000000000000..b5751183644dd Binary files /dev/null and b/articles/aks/media/keda/architecture.png differ diff --git a/articles/aks/media/release-tracker/regional-status.png b/articles/aks/media/release-tracker/regional-status.png new file mode 100644 index 0000000000000..6f4b21d9b0329 Binary files /dev/null and b/articles/aks/media/release-tracker/regional-status.png differ diff --git a/articles/aks/media/release-tracker/sdp-process.png b/articles/aks/media/release-tracker/sdp-process.png new file mode 100644 index 0000000000000..1618aeb0e37d2 Binary files /dev/null and b/articles/aks/media/release-tracker/sdp-process.png differ diff --git a/articles/aks/monitor-aks.md b/articles/aks/monitor-aks.md index 7cf77bdf7b792..9ffa5bdf70617 100644 --- a/articles/aks/monitor-aks.md +++ b/articles/aks/monitor-aks.md @@ -41,7 +41,7 @@ You require at least one Log Analytics workspace to support Container insights a If you're just getting started with Azure Monitor, then start with a single workspace and consider creating additional workspaces as your requirements evolve. Many environments will use a single workspace for all the Azure resources they monitor. You can even share a workspace used by [Microsoft Defender for Cloud and Microsoft Sentinel](../azure-monitor/vm/monitor-virtual-machine-security.md), although many customers choose to segregate their availability and performance telemetry from security data. -See [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md) for details on logic that you should consider for designing a workspace configuration. +See [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md) for details on logic that you should consider for designing a workspace configuration. ### Enable container insights When you enable Container insights for your AKS cluster, it deploys a containerized version of the [Log Analytics agent](../agents/../azure-monitor/agents/log-analytics-agent.md) that sends data to Azure Monitor. There are multiple methods to enable it depending whether you're working with a new or existing AKS cluster. See [Enable Container insights](../azure-monitor/containers/container-insights-onboard.md) for prerequisites and configuration options. diff --git a/articles/aks/nat-gateway.md b/articles/aks/nat-gateway.md index 75d5ff05ea8f2..35ff3e9b45cd8 100644 --- a/articles/aks/nat-gateway.md +++ b/articles/aks/nat-gateway.md @@ -1,5 +1,5 @@ --- -title: Managed NAT Gateway (preview) +title: Managed NAT Gateway description: Learn how to create an AKS cluster with managed NAT integration services: container-service ms.topic: article @@ -7,56 +7,22 @@ ms.date: 10/26/2021 ms.author: juda --- -# Managed NAT Gateway (preview) +# Managed NAT Gateway Whilst AKS customers are able to route egress traffic through an Azure Load Balancer, there are limitations on the amount of outbound flows of traffic that is possible. -Azure NAT Gateway allows up to 64,000 outbound UDP and TCP traffic flows per IP address with a maximum of 16 IP addresses. +Azure NAT Gateway allows up to 64,512 outbound UDP and TCP traffic flows per IP address with a maximum of 16 IP addresses. This article will show you how to create an AKS cluster with a Managed NAT Gateway for egress traffic. -[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] ## Before you begin To use Managed NAT gateway, you must have the following: * The latest version of the Azure CLI -* The `aks-preview` extension version 0.5.31 or later * Kubernetes version 1.20.x or above -### Install aks-preview CLI extension - -You also need the *aks-preview* Azure CLI extension version 0.5.31 or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. - -```azurecli-interactive -# Install the aks-preview extension -az extension add --name aks-preview - -# Update the extension to make sure you have the latest version installed -az extension update --name aks-preview -``` - -### Register the `AKS-NATGatewayPreview` feature flag - -To use the NAT Gateway feature, you must enable the `AKS-NATGatewayPreview` feature flag on your subscription. - -```azurecli -az feature register --namespace "Microsoft.ContainerService" --name "AKS-NATGatewayPreview" -``` -You can check on the registration status by using the [az feature list][az-feature-list] command: - -```azurecli-interactive -az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-NATGatewayPreview')].{Name:name,State:properties.state}" -``` - -When ready, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: - -```azurecli-interactive -az provider register --namespace Microsoft.ContainerService -``` - - ## Create an AKS cluster with a Managed NAT Gateway To create an AKS cluster with a new Managed NAT Gateway, use `--outbound-type managedNATGateway` as well as `--nat-gateway-managed-outbound-ip-count` and `--nat-gateway-idle-timeout` when running `az aks create`. The following example creates a *myresourcegroup* resource group, then creates a *natcluster* AKS cluster in *myresourcegroup* with a Managed NAT Gateway, two outbound IPs, and an idle timeout of 30 seconds. diff --git a/articles/aks/node-auto-repair.md b/articles/aks/node-auto-repair.md index c0d47f242d35a..932ee79b7ecbb 100644 --- a/articles/aks/node-auto-repair.md +++ b/articles/aks/node-auto-repair.md @@ -35,7 +35,7 @@ If AKS identifies an unhealthy node that remains unhealthy for 10 minutes, AKS t 1. Reboot the node. 1. If the reboot is unsuccessful, reimage the node. -1. If the reimage is unsuccessful, redploy the node. +1. If the reimage is unsuccessful, redeploy the node. Alternative remediations are investigated by AKS engineers if auto-repair is unsuccessful. diff --git a/articles/aks/open-service-mesh-about.md b/articles/aks/open-service-mesh-about.md index c9162ceac7a2d..22e7b77af38f7 100644 --- a/articles/aks/open-service-mesh-about.md +++ b/articles/aks/open-service-mesh-about.md @@ -32,7 +32,7 @@ OSM provides the following capabilities and features: - Define and execute fine grained access control policies for services. - Monitor and debug services using observability and insights into application metrics. - Integrate with external certificate management. -- Integrates with existing ingress solutions such as the [Azure Gateway Ingress Controller][agic], [NGINX][nginx], and [Contour][contour]. For more details on how ingress works with OSM, see [Using Ingress to manage external access to services within the cluster][osm-ingress]. For an example on integrating OSM with Contour for ingress, see [Ingress with Contour][osm-contour]. For an example on integrating OSM with ingress controllers that use the `networking.k8s.io/v1` API, such as NGINX, see [Ingress with Kubernetes Nginx Ingress Controller][osm-nginx]. +- Integrates with existing ingress solutions such as [NGINX][nginx], [Contour][contour], and [Web Application Routing][web-app-routing]. For more details on how ingress works with OSM, see [Using Ingress to manage external access to services within the cluster][osm-ingress]. For an example on integrating OSM with Contour for ingress, see [Ingress with Contour][osm-contour]. For an example on integrating OSM with ingress controllers that use the `networking.k8s.io/v1` API, such as NGINX, see [Ingress with Kubernetes Nginx Ingress Controller][osm-nginx]. For more details on using Web Application Routing, which automatically integrates with OSM, see [Web Application Routing][web-app-routing]. ## Example scenarios @@ -64,9 +64,9 @@ After enabling the OSM add-on using the [Azure CLI][osm-azure-cli] or a [Bicep t [osm-onboard-app]: https://release-v1-0.docs.openservicemesh.io/docs/guides/app_onboarding/ [ip-tables-redirection]: https://docs.openservicemesh.io/docs/guides/traffic_management/iptables_redirection/ [global-exclusion]: https://docs.openservicemesh.io/docs/guides/traffic_management/iptables_redirection/#global-outbound-ip-range-exclusions -[agic]: ../application-gateway/ingress-controller-overview.md [nginx]: https://github.com/kubernetes/ingress-nginx [contour]: https://projectcontour.io/ [osm-ingress]: https://release-v1-0.docs.openservicemesh.io/docs/guides/traffic_management/ingress/ [osm-contour]: https://release-v1-0.docs.openservicemesh.io/docs/demos/ingress_contour [osm-nginx]: https://release-v1-0.docs.openservicemesh.io/docs/demos/ingress_k8s_nginx +[web-app-routing]: web-app-routing.md \ No newline at end of file diff --git a/articles/aks/open-service-mesh-integrations.md b/articles/aks/open-service-mesh-integrations.md index 0ebde87b0afa1..94769380bbb2c 100644 --- a/articles/aks/open-service-mesh-integrations.md +++ b/articles/aks/open-service-mesh-integrations.md @@ -15,7 +15,7 @@ The Open Service Mesh (OSM) add-on integrates with features provided by Azure as ## Ingress -Ingress allows for traffic external to the mesh to be routed to services within the mesh. With OSM, you can configure most ingress solutions to work with your mesh, but OSM works best with either [NGINX ingress][osm-nginx] or [Contour ingress][osm-contour]. Open source projects integrating with OSM, including NGINX ingress and Contour ingress, are not covered by the [AKS support policy][aks-support-policy]. +Ingress allows for traffic external to the mesh to be routed to services within the mesh. With OSM, you can configure most ingress solutions to work with your mesh, but OSM works best with [Web Application Routing][web-app-routing], [NGINX ingress][osm-nginx], or [Contour ingress][osm-contour]. Open source projects integrating with OSM, including NGINX ingress and Contour ingress, are not covered by the [AKS support policy][aks-support-policy]. Using [Azure Gateway Ingress Controller (AGIC)][agic] for ingress with OSM is not supported and not recommended. @@ -101,4 +101,5 @@ OSM has several types of certificates it uses to operate on your AKS cluster. OS [osm-cert-manager]: https://release-v1-0.docs.openservicemesh.io/docs/guides/certificates/#using-cert-manager [open-source-integrations]: open-service-mesh-integrations.md#additional-open-source-integrations [osm-traffic-management-example]: https://github.com/MicrosoftDocs/azure-docs/pull/81085/files -[osm-tresor]: https://release-v1-0.docs.openservicemesh.io/docs/guides/certificates/#using-osms-tresor-certificate-issuer \ No newline at end of file +[osm-tresor]: https://release-v1-0.docs.openservicemesh.io/docs/guides/certificates/#using-osms-tresor-certificate-issuer +[web-app-routing]: web-app-routing.md \ No newline at end of file diff --git a/articles/aks/open-service-mesh-troubleshoot.md b/articles/aks/open-service-mesh-troubleshoot.md index 219ff4d5e89ca..782db5a337786 100644 --- a/articles/aks/open-service-mesh-troubleshoot.md +++ b/articles/aks/open-service-mesh-troubleshoot.md @@ -103,7 +103,7 @@ aks-osm-webhook-osm 1 102m ### Check for the service and the CA bundle of the Validating webhook ```azurecli-interactive -kubectl get ValidatingWebhookConfiguration aks-osm-webhook-osm -o json | jq '.webhooks[0].clientConfig.service' +kubectl get ValidatingWebhookConfiguration aks-osm-validator-mesh-osm -o json | jq '.webhooks[0].clientConfig.service' ``` A well configured Validating Webhook Configuration would look exactly like this: diff --git a/articles/aks/operator-best-practices-cluster-security.md b/articles/aks/operator-best-practices-cluster-security.md index 63fa9bc59c139..6d7b1ae749fb2 100644 --- a/articles/aks/operator-best-practices-cluster-security.md +++ b/articles/aks/operator-best-practices-cluster-security.md @@ -141,7 +141,7 @@ AppArmor profiles are added using the `apparmor_parser` command. spec: containers: - name: hello - image: mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 + image: mcr.microsoft.com/dotnet/runtime-deps:6.0 command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ] ``` @@ -216,7 +216,7 @@ To see seccomp in action, create a filter that prevents changing permissions on spec: containers: - name: chmod - image: mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 + image: mcr.microsoft.com/dotnet/runtime-deps:6.0 command: - "chmod" args: @@ -239,7 +239,7 @@ To see seccomp in action, create a filter that prevents changing permissions on localhostProfile: prevent-chmod containers: - name: chmod - image: mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 + image: mcr.microsoft.com/dotnet/runtime-deps:6.0 command: - "chmod" args: diff --git a/articles/aks/policy-reference.md b/articles/aks/policy-reference.md index 4b73dc93c0862..d73984a6d03c9 100644 --- a/articles/aks/policy-reference.md +++ b/articles/aks/policy-reference.md @@ -25,10 +25,6 @@ the link in the **Version** column to view the source on the [!INCLUDE [azure-policy-reference-rp-aks-containerservice](../../includes/policy/reference/byrp/microsoft.containerservice.md)] -### AKS Engine - -[!INCLUDE [azure-policy-reference-rp-aks-aksengine](../../includes/policy/reference/byrp/aks-engine.md)] - ## Next steps - See the built-ins on the [Azure Policy GitHub repo](https://github.com/Azure/azure-policy). diff --git a/articles/aks/private-clusters.md b/articles/aks/private-clusters.md index 94723e3c13bbb..8cca35c58e576 100644 --- a/articles/aks/private-clusters.md +++ b/articles/aks/private-clusters.md @@ -3,7 +3,7 @@ title: Create a private Azure Kubernetes Service cluster description: Learn how to create a private Azure Kubernetes Service (AKS) cluster services: container-service ms.topic: article -ms.date: 01/12/2022 +ms.date: 05/27/2022 --- @@ -150,6 +150,9 @@ As mentioned, virtual network peering is one way to access your private cluster. 3. In scenarios where the VNet containing your cluster has custom DNS settings (4), cluster deployment fails unless the private DNS zone is linked to the VNet that contains the custom DNS resolvers (5). This link can be created manually after the private zone is created during cluster provisioning or via automation upon detection of creation of the zone using event-based deployment mechanisms (for example, Azure Event Grid and Azure Functions). +> [!NOTE] +> Conditional Forwarding doesn't support subdomains. + > [!NOTE] > If you are using [Bring Your Own Route Table with kubenet](./configure-kubenet.md#bring-your-own-subnet-and-route-table-with-kubenet) and Bring Your Own DNS with Private Cluster, the cluster creation will fail. You will need to associate the [RouteTable](./configure-kubenet.md#bring-your-own-subnet-and-route-table-with-kubenet) in the node resource group to the subnet after the cluster creation failed, in order to make the creation successful. diff --git a/articles/aks/quickstart-dapr.md b/articles/aks/quickstart-dapr.md index c3d032a3148c7..fe931410894ed 100644 --- a/articles/aks/quickstart-dapr.md +++ b/articles/aks/quickstart-dapr.md @@ -1,6 +1,6 @@ --- -title: Deploy an application with the Dapr cluster extension for Azure Kubernetes Service (AKS) -description: Use the Dapr cluster extension for Azure Kubernetes Service (AKS) to deploy an application +title: Deploy an application with the Dapr cluster extension for Azure Kubernetes Service (AKS) or Arc-enabled Kubernetes +description: Use the Dapr cluster extension for Azure Kubernetes Service (AKS) or Arc-enabled Kubernetes to deploy an application author: nickomang ms.author: nickoman ms.service: container-service @@ -9,15 +9,15 @@ ms.date: 05/03/2022 ms.custom: template-quickstart, mode-other, event-tier1-build-2022 --- -# Quickstart: Deploy an application using the Dapr cluster extension for Azure Kubernetes Service (AKS) +# Quickstart: Deploy an application using the Dapr cluster extension for Azure Kubernetes Service (AKS) or Arc-enabled Kubernetes -In this quickstart, you will get familiar with using the [Dapr cluster extension][dapr-overview] in an AKS cluster. You will be deploying a hello world example, consisting of a Python application that generates messages and a Node application that consumes and persists them. +In this quickstart, you will get familiar with using the [Dapr cluster extension][dapr-overview] in an AKS or Arc-enabled Kubernetes cluster. You will be deploying a hello world example, consisting of a Python application that generates messages and a Node application that consumes and persists them. ## Prerequisites * An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). * [Azure CLI installed](/cli/azure/install-azure-cli). -* An AKS cluster with the [Dapr cluster extension][dapr-overview] enabled +* An AKS or Arc-enabled Kubernetes cluster with the [Dapr cluster extension][dapr-overview] enabled ## Clone the repository @@ -201,7 +201,7 @@ You should see the latest JSON in the response. ## Clean up resources -Use the [az group delete][az-group-delete] command to remove the resource group, the AKS cluster, namespace, and all related resources. +Use the [az group delete][az-group-delete] command to remove the resource group, the cluster, the namespace, and all related resources. ```azurecli-interactive az group delete --name MyResourceGroup diff --git a/articles/aks/quickstart-event-grid.md b/articles/aks/quickstart-event-grid.md index 26a11d1b6f8a3..cbd83a0609588 100644 --- a/articles/aks/quickstart-event-grid.md +++ b/articles/aks/quickstart-event-grid.md @@ -141,7 +141,7 @@ az group delete --name MyResourceGroup --yes --no-wait ## Next steps -In this quickstart, you deployed a Kubernetes cluster and then subscribed to AKS events in Azure Event Hub. +In this quickstart, you deployed a Kubernetes cluster and then subscribed to AKS events in Azure Event Hubs. To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. @@ -158,4 +158,4 @@ To learn more about AKS, and walk through a complete code to deployment example, [az-feature-list]: /cli/azure/feature#az_feature_list [az-provider-register]: /cli/azure/provider#az_provider_register [az-group-delete]: /cli/azure/group#az_group_delete -[sp-delete]: kubernetes-service-principal.md#additional-considerations +[sp-delete]: kubernetes-service-principal.md#other-considerations diff --git a/articles/aks/quickstart-helm.md b/articles/aks/quickstart-helm.md index c12cedb17c1f8..9485c5986508f 100644 --- a/articles/aks/quickstart-helm.md +++ b/articles/aks/quickstart-helm.md @@ -236,5 +236,5 @@ For more information about using Helm, see the Helm documentation. [helm-documentation]: https://helm.sh/docs/ [helm-existing]: kubernetes-helm.md [helm-install]: https://helm.sh/docs/intro/install/ -[sp-delete]: kubernetes-service-principal.md#additional-considerations +[sp-delete]: kubernetes-service-principal.md#other-considerations [acr-helm]: ../container-registry/container-registry-helm-repos.md \ No newline at end of file diff --git a/articles/aks/release-tracker.md b/articles/aks/release-tracker.md new file mode 100644 index 0000000000000..5cf412edaa53c --- /dev/null +++ b/articles/aks/release-tracker.md @@ -0,0 +1,35 @@ +--- +title: AKS release tracker +description: Learn how to determine which Azure regions have the weekly AKS release deployments rolled out in real time. +services: container-service +ms.topic: overview +ms.date: 05/24/2022 +ms.author: nickoman +author: nickomang + +ms.custom: mvc +--- + +# AKS release tracker + +> [!NOTE] +> The AKS release tracker is currently not accessible. When the feature is fully released, this article will be updated to include access instructions. + +AKS releases weekly rounds of fixes and feature and component updates that affect all clusters and customers. However, these releases can take up to two weeks to roll out to all regions from the initial time of shipping due to Azure Safe Deployment Practices (SDP). It is important for customers to know when a particular AKS release is hitting their region, and the AKS release tracker provides these details in real time by versions and regions. + +## Why release tracker? + +With AKS release tracker, customers can follow specific component updates present in an AKS version release, such as fixes shipped to a core add-on. In addition to providing real-time updates of region release status, the tracker also links to the specific version of the AKS [release notes][aks-release] to help customers identify which instance of the release is relevant to them. As the data is updated in real time, customers can track the entire SDP process with a single tool. + +## How to use the release tracker + +The top half of the tracker shows the latest and 3 previously available release versions for each region, and links to the corresponding release notes entry. This view is helpful when you want to track the available versions by region. + +:::image type="content" source="./media/release-tracker/regional-status.png" alt-text="Screenshot of the A K S release tracker's regional status table displayed in a web browser."::: + +The bottom half of the tracker shows the SDP process. The table has two views: one shows the latest version and status update for each grouping of regions and the other shows the status and region availability of each currently supported version. + +:::image type="content" source="./media/release-tracker/sdp-process.png" alt-text="Screenshot of the A K S release tracker's S D P process table displayed in a web browser."::: + + +[aks-release]: https://github.com/Azure/AKS/releases \ No newline at end of file diff --git a/articles/aks/spark-job.md b/articles/aks/spark-job.md deleted file mode 100644 index 6e1b45cef6c1c..0000000000000 --- a/articles/aks/spark-job.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: Run an Apache Spark job with Azure Kubernetes Service (AKS) -description: Use Azure Kubernetes Service (AKS) to create and run an Apache Spark job for large-scale data processing. -ms.topic: conceptual -ms.date: 10/18/2019 -ms.custom: mvc, devx-track-azurecli ---- - -# Running Apache Spark jobs on AKS - -[Apache Spark][apache-spark] is a fast engine for large-scale data processing. As of the [Spark 2.3.0 release][spark-kubernetes-earliest-version], Apache Spark supports native integration with Kubernetes clusters. Azure Kubernetes Service (AKS) is a managed Kubernetes environment running in Azure. This document details preparing and running Apache Spark jobs on an Azure Kubernetes Service (AKS) cluster. - -## Prerequisites - -In order to complete the steps within this article, you need the following. - -* Basic understanding of Kubernetes and [Apache Spark][spark-quickstart]. -* [Docker Hub][docker-hub] account, or an [Azure Container Registry][acr-create]. -* Azure CLI [installed][azure-cli] on your development system. -* [JDK 8][java-install] installed on your system. -* [Apache Maven][maven-install] installed on your system. -* SBT ([Scala Build Tool][sbt-install]) installed on your system. -* Git command-line tools installed on your system. - -## Create an AKS cluster - -Spark is used for large-scale data processing and requires that Kubernetes nodes are sized to meet the Spark resources requirements. We recommend a minimum size of `Standard_D3_v2` for your Azure Kubernetes Service (AKS) nodes. - -If you need an AKS cluster that meets this minimum recommendation, run the following commands. - -Create a resource group for the cluster. - -```azurecli -az group create --name mySparkCluster --location eastus -``` - -Create a Service Principal for the cluster. After it is created, you will need the Service Principal appId and password for the next command. - -```azurecli -az ad sp create-for-rbac --name SparkSP --role Contributor --scopes /subscriptions/mySubscriptionID -``` - -Create the AKS cluster with nodes that are of size `Standard_D3_v2`, and values of appId and password passed as service-principal and client-secret parameters. - -```azurecli -az aks create --resource-group mySparkCluster --name mySparkCluster --node-vm-size Standard_D3_v2 --generate-ssh-keys --service-principal --client-secret -``` - -Connect to the AKS cluster. - -```azurecli -az aks get-credentials --resource-group mySparkCluster --name mySparkCluster -``` - -If you are using Azure Container Registry (ACR) to store container images, configure authentication between AKS and ACR. See the [ACR authentication documentation][acr-aks] for these steps. - -## Build the Spark source - -Before running Spark jobs on an AKS cluster, you need to build the Spark source code and package it into a container image. The Spark source includes scripts that can be used to complete this process. - -Clone the Spark project repository to your development system. - -```bash -git clone -b branch-2.4 https://github.com/apache/spark -``` - -Change into the directory of the cloned repository and save the path of the Spark source to a variable. - -```bash -cd spark -sparkdir=$(pwd) -``` - -If you have multiple JDK versions installed, set `JAVA_HOME` to use version 8 for the current session. - -```bash -export JAVA_HOME=`/usr/libexec/java_home -d 64 -v "1.8*"` -``` - -Run the following command to build the Spark source code with Kubernetes support. - -```bash -./build/mvn -Pkubernetes -DskipTests clean package -``` - -The following commands create the Spark container image and push it to a container image registry. Replace `registry.example.com` with the name of your container registry and `v1` with the tag you prefer to use. If using Docker Hub, this value is the registry name. If using Azure Container Registry (ACR), this value is the ACR login server name. - -```bash -REGISTRY_NAME=registry.example.com -REGISTRY_TAG=v1 -``` - -```bash -./bin/docker-image-tool.sh -r $REGISTRY_NAME -t $REGISTRY_TAG build -``` - -Push the container image to your container image registry. - -```bash -./bin/docker-image-tool.sh -r $REGISTRY_NAME -t $REGISTRY_TAG push -``` - -## Prepare a Spark job - -Next, prepare a Spark job. A jar file is used to hold the Spark job and is needed when running the `spark-submit` command. The jar can be made accessible through a public URL or pre-packaged within a container image. In this example, a sample jar is created to calculate the value of Pi. This jar is then uploaded to Azure storage. If you have an existing jar, feel free to substitute - -Create a directory where you would like to create the project for a Spark job. - -```bash -mkdir myprojects -cd myprojects -``` - -Create a new Scala project from a template. - -```bash -sbt new sbt/scala-seed.g8 -``` - -When prompted, enter `SparkPi` for the project name. - -```bash -name [Scala Seed Project]: SparkPi -``` - -Navigate to the newly created project directory. - -```bash -cd sparkpi -``` - -Run the following commands to add an SBT plugin, which allows packaging the project as a jar file. - -```bash -touch project/assembly.sbt -echo 'addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.10")' >> project/assembly.sbt -``` - -Run these commands to copy the sample code into the newly created project and add all necessary dependencies. - -```bash -EXAMPLESDIR="src/main/scala/org/apache/spark/examples" -mkdir -p $EXAMPLESDIR -cp $sparkdir/examples/$EXAMPLESDIR/SparkPi.scala $EXAMPLESDIR/SparkPi.scala - -cat <> build.sbt -// https://mvnrepository.com/artifact/org.apache.spark/spark-sql -libraryDependencies += "org.apache.spark" %% "spark-sql" % "2.3.0" % "provided" -EOT - -sed -ie 's/scalaVersion.*/scalaVersion := "2.11.11"/' build.sbt -sed -ie 's/name.*/name := "SparkPi",/' build.sbt -``` - -To package the project into a jar, run the following command. - -```bash -sbt assembly -``` - -After successful packaging, you should see output similar to the following. - -```bash -[info] Packaging /Users/me/myprojects/sparkpi/target/scala-2.11/SparkPi-assembly-0.1.0-SNAPSHOT.jar ... -[info] Done packaging. -[success] Total time: 10 s, completed Mar 6, 2018 11:07:54 AM -``` - -## Copy job to storage - -Create an Azure storage account and container to hold the jar file. - -```azurecli -RESOURCE_GROUP=sparkdemo -STORAGE_ACCT=sparkdemo$RANDOM -az group create --name $RESOURCE_GROUP --location eastus -az storage account create --resource-group $RESOURCE_GROUP --name $STORAGE_ACCT --sku Standard_LRS -export AZURE_STORAGE_CONNECTION_STRING=`az storage account show-connection-string --resource-group $RESOURCE_GROUP --name $STORAGE_ACCT -o tsv` -``` - -Upload the jar file to the Azure storage account with the following commands. - -```azurecli -CONTAINER_NAME=jars -BLOB_NAME=SparkPi-assembly-0.1.0-SNAPSHOT.jar -FILE_TO_UPLOAD=target/scala-2.11/SparkPi-assembly-0.1.0-SNAPSHOT.jar - -echo "Creating the container..." -az storage container create --name $CONTAINER_NAME -az storage container set-permission --name $CONTAINER_NAME --public-access blob - -echo "Uploading the file..." -az storage blob upload --container-name $CONTAINER_NAME --file $FILE_TO_UPLOAD --name $BLOB_NAME - -jarUrl=$(az storage blob url --container-name $CONTAINER_NAME --name $BLOB_NAME | tr -d '"') -``` - -Variable `jarUrl` now contains the publicly accessible path to the jar file. - -## Submit a Spark job - -Start kube-proxy in a separate command-line with the following code. - -```bash -kubectl proxy -``` - -Navigate back to the root of Spark repository. - -```bash -cd $sparkdir -``` - -Create a service account that has sufficient permissions for running a job. - -```bash -kubectl create serviceaccount spark -kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=default:spark --namespace=default -``` - -Submit the job using `spark-submit`. - -```bash -./bin/spark-submit \ - --master k8s://http://127.0.0.1:8001 \ - --deploy-mode cluster \ - --name spark-pi \ - --class org.apache.spark.examples.SparkPi \ - --conf spark.executor.instances=3 \ - --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ - --conf spark.kubernetes.container.image=$REGISTRY_NAME/spark:$REGISTRY_TAG \ - $jarUrl -``` - -This operation starts the Spark job, which streams job status to your shell session. While the job is running, you can see Spark driver pod and executor pods using the kubectl get pods command. Open a second terminal session to run these commands. - -```console -kubectl get pods -``` - -```output -NAME READY STATUS RESTARTS AGE -spark-pi-2232778d0f663768ab27edc35cb73040-driver 1/1 Running 0 16s -spark-pi-2232778d0f663768ab27edc35cb73040-exec-1 0/1 Init:0/1 0 4s -spark-pi-2232778d0f663768ab27edc35cb73040-exec-2 0/1 Init:0/1 0 4s -spark-pi-2232778d0f663768ab27edc35cb73040-exec-3 0/1 Init:0/1 0 4s -``` - -While the job is running, you can also access the Spark UI. In the second terminal session, use the `kubectl port-forward` command provide access to Spark UI. - -```bash -kubectl port-forward spark-pi-2232778d0f663768ab27edc35cb73040-driver 4040:4040 -``` - -To access Spark UI, open the address `127.0.0.1:4040` in a browser. - -![Spark UI](media/aks-spark-job/spark-ui.png) - -## Get job results and logs - -After the job has finished, the driver pod will be in a "Completed" state. Get the name of the pod with the following command. - -```bash -kubectl get pods --show-all -``` - -Output: - -```output -NAME READY STATUS RESTARTS AGE -spark-pi-2232778d0f663768ab27edc35cb73040-driver 0/1 Completed 0 1m -``` - -Use the `kubectl logs` command to get logs from the spark driver pod. Replace the pod name with your driver pod's name. - -```bash -kubectl logs spark-pi-2232778d0f663768ab27edc35cb73040-driver -``` - -Within these logs, you can see the result of the Spark job, which is the value of Pi. - -```output -Pi is roughly 3.152155760778804 -``` - -## Package jar with container image - -In the above example, the Spark jar file was uploaded to Azure storage. Another option is to package the jar file into custom-built Docker images. - -To do so, find the `dockerfile` for the Spark image located at `$sparkdir/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/` directory. Add an `ADD` statement for the Spark job `jar` somewhere between `WORKDIR` and `ENTRYPOINT` declarations. - -Update the jar path to the location of the `SparkPi-assembly-0.1.0-SNAPSHOT.jar` file on your development system. You can also use your own custom jar file. - -```bash -WORKDIR /opt/spark/work-dir - -ADD /path/to/SparkPi-assembly-0.1.0-SNAPSHOT.jar SparkPi-assembly-0.1.0-SNAPSHOT.jar - -ENTRYPOINT [ "/opt/entrypoint.sh" ] -``` - -Build and push the image with the included Spark scripts. - -```bash -./bin/docker-image-tool.sh -r -t build -./bin/docker-image-tool.sh -r -t push -``` - -When running the job, instead of indicating a remote jar URL, the `local://` scheme can be used with the path to the jar file in the Docker image. - -```bash -./bin/spark-submit \ - --master k8s://https://: \ - --deploy-mode cluster \ - --name spark-pi \ - --class org.apache.spark.examples.SparkPi \ - --conf spark.executor.instances=3 \ - --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ - --conf spark.kubernetes.container.image= \ - local:///opt/spark/work-dir/.jar -``` - -> [!WARNING] -> From Spark [documentation][spark-docs]: "The Kubernetes scheduler is currently experimental. In future versions, there may be behavioral changes around configuration, container images and entrypoints". - -## Next steps - -Check out Spark documentation for more details. - -> [!div class="nextstepaction"] -> [Spark documentation][spark-docs] - - -[apache-spark]: https://spark.apache.org/ -[docker-hub]: https://docs.docker.com/docker-hub/ -[java-install]: /azure/developer/java/fundamentals/java-support-on-azure -[maven-install]: https://maven.apache.org/install.html -[sbt-install]: https://www.scala-sbt.org/1.x/docs/Setup.html -[spark-docs]: https://spark.apache.org/docs/latest/running-on-kubernetes.html -[spark-kubernetes-earliest-version]: https://spark.apache.org/releases/spark-release-2-3-0.html -[spark-quickstart]: https://spark.apache.org/docs/latest/quick-start.html - - - -[acr-aks]: cluster-container-registry-integration.md -[acr-create]: ../container-registry/container-registry-get-started-azure-cli.md -[aks-quickstart]: ./index.yml -[azure-cli]: /cli/azure/ -[storage-account]: ../storage/blobs/storage-quickstart-blobs-cli.md diff --git a/articles/aks/start-stop-cluster.md b/articles/aks/start-stop-cluster.md index d759901888e92..fca38f486433f 100644 --- a/articles/aks/start-stop-cluster.md +++ b/articles/aks/start-stop-cluster.md @@ -150,7 +150,7 @@ If the `ProvisioningState` shows `Starting` that means your cluster hasn't fully [aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md [aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md -[aks-quickstart-powershell]: /learn/quick-kubernetes-deploy-powershell.md +[aks-quickstart-powershell]: /azure/aks/learn/quick-kubernetes-deploy-powershell [install-azure-cli]: /cli/azure/install-azure-cli [az-extension-add]: /cli/azure/extension#az_extension_add [az-extension-update]: /cli/azure/extension#az_extension_update diff --git a/articles/aks/support-policies.md b/articles/aks/support-policies.md index 0e2b03064b5d4..d0e154fd24070 100644 --- a/articles/aks/support-policies.md +++ b/articles/aks/support-policies.md @@ -33,7 +33,7 @@ Microsoft manages and monitors the following components through the control pane AKS isn't a Platform-as-a-Service (PaaS) solution. Some components, such as agent nodes, have *shared responsibility*, where users must help maintain the AKS cluster. User input is required, for example, to apply an agent node operating system (OS) security patch. -The services are *managed* in the sense that Microsoft and the AKS team deploys, operates, and is responsible for service availability and functionality. Customers can't alter these managed components. Microsoft limits customization to ensure a consistent and scalable user experience. For a fully customizable solution, see [AKS Engine](https://github.com/Azure/aks-engine). +The services are *managed* in the sense that Microsoft and the AKS team deploys, operates, and is responsible for service availability and functionality. Customers can't alter these managed components. Microsoft limits customization to ensure a consistent and scalable user experience. ## Shared responsibility diff --git a/articles/aks/supported-kubernetes-versions.md b/articles/aks/supported-kubernetes-versions.md index ef02286b9b268..7c4bb4eab8c11 100644 --- a/articles/aks/supported-kubernetes-versions.md +++ b/articles/aks/supported-kubernetes-versions.md @@ -264,7 +264,6 @@ Patches have a two month minimum lifecycle. To keep up to date when new patches For information on how to upgrade your cluster, see [Upgrade an Azure Kubernetes Service (AKS) cluster][aks-upgrade]. -[aks-engine]: https://github.com/Azure/aks-engine [azure-update-channel]: https://azure.microsoft.com/updates/?product=kubernetes-service diff --git a/articles/aks/troubleshooting.md b/articles/aks/troubleshooting.md index f2b5a330e97d3..3b5de98a4ae3a 100644 --- a/articles/aks/troubleshooting.md +++ b/articles/aks/troubleshooting.md @@ -272,7 +272,7 @@ spec: ```yaml initContainers: - name: volume-mount - image: mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 + image: mcr.microsoft.com/dotnet/runtime-deps:6.0 command: ["sh", "-c", "chown -R 100:100 /data"] volumeMounts: - name: diff --git a/articles/aks/tutorial-kubernetes-upgrade-cluster.md b/articles/aks/tutorial-kubernetes-upgrade-cluster.md index 368b6e539343a..6a879e92b639f 100644 --- a/articles/aks/tutorial-kubernetes-upgrade-cluster.md +++ b/articles/aks/tutorial-kubernetes-upgrade-cluster.md @@ -291,7 +291,7 @@ For more information on AKS, see [AKS overview][aks-intro]. For guidance on a cr [az aks upgrade]: /cli/azure/aks#az_aks_upgrade [azure-cli-install]: /cli/azure/install-azure-cli [az-group-delete]: /cli/azure/group#az_group_delete -[sp-delete]: kubernetes-service-principal.md#additional-considerations +[sp-delete]: kubernetes-service-principal.md#other-considerations [aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?WT.mc_id=AKSDOCSPAGE [azure-powershell-install]: /powershell/azure/install-az-ps [get-azakscluster]: /powershell/module/az.aks/get-azakscluster diff --git a/articles/aks/use-group-managed-service-accounts.md b/articles/aks/use-group-managed-service-accounts.md index 4574e133763cb..605dc17d5416a 100644 --- a/articles/aks/use-group-managed-service-accounts.md +++ b/articles/aks/use-group-managed-service-accounts.md @@ -41,6 +41,8 @@ az keyvault secret set --vault-name MyAKSGMSAVault --name "GMSADomainUserCred" - > [!NOTE] > Use the Fully Qualified Domain Name for the Domain rather than the Partially Qualified Domain Name that may be used on internal networks. +> +> The above command escapes the `value` parameter for running the Azure CLI on a Linux shell. When running the Azure CLI command on Windows PowerShell, you don't need to escape characters in the `value` parameter. ## Optional: Use a custom VNET with custom DNS diff --git a/articles/aks/use-kms-etcd-encryption.md b/articles/aks/use-kms-etcd-encryption.md index c29557cf0b0b8..48cdd2b0c7fb0 100644 --- a/articles/aks/use-kms-etcd-encryption.md +++ b/articles/aks/use-kms-etcd-encryption.md @@ -3,7 +3,7 @@ title: Use KMS etcd encryption in Azure Kubernetes Service (AKS) (Preview) description: Learn how to use kms etcd encryption with Azure Kubernetes Service (AKS) services: container-service ms.topic: article -ms.date: 04/11/2022 +ms.date: 06/06/2022 --- @@ -65,12 +65,9 @@ The following limitations apply when you integrate KMS etcd encryption with AKS: * Changing of key ID, including key name and key version. * Deletion of the key, Key Vault, or the associated identity. * KMS etcd encryption doesn't work with System-Assigned Managed Identity. The keyvault access-policy is required to be set before the feature is enabled. In addition, System-Assigned Managed Identity isn't available until cluster creation, thus there's a cycle dependency. -* Using Azure Key Vault with PrivateLink enabled. * Using more than 2000 secrets in a cluster. -* Managed HSM Support * Bring your own (BYO) Azure Key Vault from another tenant. - ## Create a KeyVault and key > [!WARNING] diff --git a/articles/aks/use-managed-identity.md b/articles/aks/use-managed-identity.md index 1cb37f7a47ba6..b2494a69ce935 100644 --- a/articles/aks/use-managed-identity.md +++ b/articles/aks/use-managed-identity.md @@ -1,25 +1,25 @@ --- -title: Use managed identities in Azure Kubernetes Service -description: Learn how to use managed identities in Azure Kubernetes Service (AKS) +title: Use a managed identity in Azure Kubernetes Service +description: Learn how to use a system-assigned or user-assigned managed identity in Azure Kubernetes Service (AKS) ms.topic: article -ms.date: 01/25/2022 +ms.date: 06/07/2022 --- -# Use managed identities in Azure Kubernetes Service +# Use a managed identity in Azure Kubernetes Service -Currently, an Azure Kubernetes Service (AKS) cluster (specifically, the Kubernetes cloud provider) requires an identity to create additional resources like load balancers and managed disks in Azure. This identity can be either a *managed identity* or a *service principal*. If you use a [service principal](kubernetes-service-principal.md), you must either provide one or AKS creates one on your behalf. If you use managed identity, this will be created for you by AKS automatically. Clusters using service principals eventually reach a state in which the service principal must be renewed to keep the cluster working. Managing service principals adds complexity, which is why it's easier to use managed identities instead. The same permission requirements apply for both service principals and managed identities. +An Azure Kubernetes Service (AKS) cluster requires an identity to access Azure resources like load balancers and managed disks. This identity can be either a managed identity or a service principal. By default, when you create an AKS cluster a system-assigned managed identity automatically created. The identity is managed by the Azure platform and doesn't require you to provision or rotate any secrets. For more information about managed identities in Azure AD, see [Managed identities for Azure resources][managed-identity-resources-overview]. -*Managed identities* are essentially a wrapper around service principals, and make their management simpler. Credential rotation for MI happens automatically every 46 days according to Azure Active Directory default. AKS uses both system-assigned and user-assigned managed identity types. These identities are currently immutable. To learn more, read about [managed identities for Azure resources](../active-directory/managed-identities-azure-resources/overview.md). +To use a [service principal](kubernetes-service-principal.md), you have to create one, AKS does not create one automatically. Clusters using a service principal eventually expire and the service principal must be renewed to keep the cluster working. Managing service principals adds complexity, which is why it's easier to use managed identities instead. The same permission requirements apply for both service principals and managed identities. -## Before you begin +Managed identities are essentially a wrapper around service principals, and make their management simpler. Managed identities use certificate-based authentication, and each managed identities credential has an expiration of 90 days and it's rolled after 45 days. AKS uses both system-assigned and user-assigned managed identity types, and these identities are immutable. -You must have the following resource installed: +## Prerequisites -- The Azure CLI, version 2.23.0 or later +Azure CLI version 2.23.0 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. ## Limitations -* Tenants move / migrate of managed identity enabled clusters isn't supported. +* Tenants move or migrate a managed identity-enabled cluster isn't supported. * If the cluster has `aad-pod-identity` enabled, Node-Managed Identity (NMI) pods modify the nodes' iptables to intercept calls to the Azure Instance Metadata endpoint. This configuration means any request made to the Metadata endpoint is intercepted by NMI even if the pod doesn't use @@ -52,9 +52,12 @@ AKS uses several managed identities for built-in services and add-ons. | Add-on | Virtual-Node (ACIConnector) | Manages required network resources for Azure Container Instances (ACI) | Contributor role for node resource group | No | OSS project | aad-pod-identity | Enables applications to access cloud resources securely with Azure Active Directory (AAD) | NA | Steps to grant permission at https://github.com/Azure/aad-pod-identity#role-assignment. -## Create an AKS cluster with managed identities +> [!NOTE] +> AKS will create a kubelet managed identity in the Node resource group if you do not specify your own kubelet managed identity. + +## Create an AKS cluster using a managed identity -You can now create an AKS cluster with managed identities by using the following CLI commands. +You can create an AKS cluster using a system-assigned managed identity by running the following CLI command. First, create an Azure resource group: @@ -77,28 +80,29 @@ Finally, get credentials to access the cluster: az aks get-credentials --resource-group myResourceGroup --name myManagedCluster ``` -## Update an AKS cluster to managed identities +## Update an AKS cluster to use a managed identity -You can now update an AKS cluster currently working with service principals to work with managed identities by using the following CLI commands. +To update an AKS cluster currently using a service principals to work with a system-assigned managed identity, run the following CLI command. ```azurecli-interactive az aks update -g -n --enable-managed-identity ``` + > [!NOTE] -> An update will only work if there is an actual VHD update to consume. If you are running the latest VHD, you will need to wait till the next VHD is available in order to do the actual update. +> An update will only work if there is an actual VHD update to consume. If you are running the latest VHD, you'll need to wait until the next VHD is available in order to perform the update. > > [!NOTE] -> After updating, your cluster's control plane and addon pods will switch to use managed identity, but kubelet will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool. Perform an `az aks nodepool upgrade --node-image-only` on your nodes to complete the update to managed identity. +> After updating, your cluster's control plane and addon pods, they use the managed identity, but kubelet will continue using a service principal until you upgrade your agentpool. Perform an `az aks nodepool upgrade --node-image-only` on your nodes to complete the update to a managed identity. > -> If your cluster was using --attach-acr to pull from image from Azure Container Registry, after updating your cluster to Managed Identity, you need to rerun `az aks update --attach-acr ` to let the newly created kubelet used for managed identity get the permission to pull from ACR. Otherwise you will not be able to pull from ACR after the upgrade. +> If your cluster was using `--attach-acr` to pull from image from Azure Container Registry, after updating your cluster to a managed identity, you need to rerun `az aks update --attach-acr ` to let the newly created kubelet used for managed identity get the permission to pull from ACR. Otherwise, you won't be able to pull from ACR after the upgrade. > -> The Azure CLI will ensure your addon's permission is correctly set after migrating, if you're not using the Azure CLI to perform the migrating operation, you will need to handle the addon identity's permission by yourself. Here is one example using [ARM](../role-based-access-control/role-assignments-template.md). +> The Azure CLI will ensure your addon's permission is correctly set after migrating, if you're not using the Azure CLI to perform the migrating operation, you'll need to handle the addon identity's permission by yourself. Here is one example using an [Azure Resource Manager](../role-based-access-control/role-assignments-template.md) template. > [!WARNING] -> Nodepool upgrade will cause downtime for your AKS cluster as the nodes in the nodepools will be cordoned/drained and then reimaged. +> A nodepool upgrade will cause downtime for your AKS cluster as the nodes in the nodepools will be cordoned/drained and then reimaged. -## Obtain and use the system-assigned managed identity for your AKS cluster +## Get and use the system-assigned managed identity for your AKS cluster Confirm your AKS cluster is using managed identity with the following CLI command: @@ -106,7 +110,7 @@ Confirm your AKS cluster is using managed identity with the following CLI comman az aks show -g -n --query "servicePrincipalProfile" ``` -If the cluster is using managed identities, you will see a `clientId` value of "msi". A cluster using a Service Principal instead will instead show the object ID. For example: +If the cluster is using a managed identity, the output shows `clientId` with a value of **msi**. A cluster using a service principal shows an object ID. For example: ```output { @@ -114,7 +118,7 @@ If the cluster is using managed identities, you will see a `clientId` value of " } ``` -After verifying the cluster is using managed identities, you can find the control plane system-assigned identity's object ID with the following command: +After verifying the cluster is using a managed identity, you can find the control plane system-assigned identity's object ID by running the following command: ```azurecli-interactive az aks show -g -n --query "identity" @@ -130,57 +134,36 @@ az aks show -g -n --query "identity" ``` > [!NOTE] -> For creating and using your own VNet, static IP address, or attached Azure disk where the resources are outside of the worker node resource group, use the PrincipalID of the cluster System Assigned Managed Identity to perform a role assignment. For more information on role assignment, see [Delegate access to other Azure resources](kubernetes-service-principal.md#delegate-access-to-other-azure-resources). +> For creating and using your own VNet, static IP address, or attached Azure disk where the resources are outside of the worker node resource group, the CLI will add the role assignment automatically. If you are using an ARM template or other method, you need to use the PrincipalID of the cluster system-assigned managed identity to perform a role assignment. For more information on role assignment, see [Delegate access to other Azure resources](kubernetes-service-principal.md#delegate-access-to-other-azure-resources). > -> Permission grants to cluster Managed Identity used by Azure Cloud provider may take up 60 minutes to populate. +> Permission granted to your cluster's managed identity used by Azure may take up 60 minutes to populate. +## Bring your own control plane managed identity -## Bring your own control plane MI -A custom control plane identity enables access to be granted to the existing identity prior to cluster creation. This feature enables scenarios such as using a custom VNET or outboundType of UDR with a pre-created managed identity. +A custom control plane managed identity enables access to be granted to the existing identity prior to cluster creation. This feature enables scenarios such as using a custom VNET or outboundType of UDR with a pre-created managed identity. -You must have the Azure CLI, version 2.15.1 or later installed. - -### Limitations -* USDOD Central, USDOD East, USGov Iowa in Azure Government aren't currently supported. +> [!NOTE] +> USDOD Central, USDOD East, USGov Iowa regions in Azure US Government cloud aren't currently supported. -If you don't have a managed identity yet, you should go ahead and create one for example by using the [az identity][az-identity-create] command. +If you don't have a managed identity, you should create one by running the [az identity][az-identity-create] command. ```azurecli-interactive az identity create --name myIdentity --resource-group myResourceGroup ``` -Assign "Managed Identity Operator" role to the identity. +Azure CLI automatically adds required role assignment for the control plane managed identity. If you are using an ARM template or other method, you need to create the role assignment manually. ```azurecli-interactive -az role assignment create --assignee --role "Managed Identity Operator" --scope - - -The result should look like: - -```output -{ - "canDelegate": null, - "condition": null, - "conditionVersion": null, - "description": null, - "id": "/subscriptions//resourcegroups/myResourceGroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myIdentity", - "name": "myIdentity, - "principalId": "", - "principalType": "ServicePrincipal", - "resourceGroup": "myResourceGroup", - "roleDefinitionId": "/subscriptions//providers/Microsoft.Authorization/roleDefinitions/", - "scope": "", - "type": "Microsoft.Authorization/roleAssignments" -} +az role assignment create --assignee --role "Managed Identity Operator" --scope ``` -If your managed identity is part of your subscription, you can use [az identity CLI command][az-identity-list] to query it. +If your managed identity is part of your subscription, run the following [az identity CLI command][az-identity-list] command to query it. ```azurecli-interactive az identity list --query "[].{Name:name, Id:id, Location:location}" -o table ``` -Now you can use the following command to create your cluster with your existing identity: +Run the following command to create a cluster with your existing identity: ```azurecli-interactive az aks create \ @@ -195,7 +178,7 @@ az aks create \ --assign-identity ``` -A successful cluster creation using your own managed identities contains this userAssignedIdentities profile information: +A successful cluster creation using your own managed identity should resemble the following **userAssignedIdentities** profile information: ```output "identity": { @@ -211,28 +194,31 @@ A successful cluster creation using your own managed identities contains this us }, ``` -## Bring your own kubelet MI +## Use a pre-created kubelet managed identity -A Kubelet identity enables access to be granted to the existing identity prior to cluster creation. This feature enables scenarios such as connection to ACR with a pre-created managed identity. +A Kubelet identity enables access granted to the existing identity prior to cluster creation. This feature enables scenarios such as connection to ACR with a pre-created managed identity. + +> [!WARNING] +> Updating kubelet managed identity upgrades Nodepool, which causes downtime for your AKS cluster as the nodes in the nodepools will be cordoned/drained and then reimaged. ### Prerequisites -- You must have the Azure CLI, version 2.26.0 or later installed. +- Azure CLI version 2.26.0 or later installed. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. ### Limitations -- Only works with a User-Assigned Managed cluster. -- China East, China North in Azure China 21Vianet aren't currently supported. +- Only works with a user-assigned managed cluster. +- China East and China North regions in Azure China 21Vianet aren't currently supported. ### Create or obtain managed identities -If you don't have a control plane managed identity yet, you should go ahead and create one. The following example uses the [az identity create][az-identity-create] command: +If you don't have a control plane managed identity, you can create by running the following [az identity create][az-identity-create] command: ```azurecli-interactive az identity create --name myIdentity --resource-group myResourceGroup ``` -The result should look like: +The output should resemble the following: ```output { @@ -249,13 +235,13 @@ The result should look like: } ``` -If you don't have a kubelet managed identity yet, you should go ahead and create one. The following example uses the [az identity create][az-identity-create] command: +If you don't have a kubelet managed identity, you can create one by running the following [az identity create][az-identity-create] command: ```azurecli-interactive az identity create --name myKubeletIdentity --resource-group myResourceGroup ``` -The result should look like: +The output should resemble the following: ```output { @@ -280,7 +266,7 @@ az identity list --query "[].{Name:name, Id:id, Location:location}" -o table ### Create a cluster using kubelet identity -Now you can use the following command to create your cluster with your existing identities. Provide the control plane identity id via `assign-identity` and the kubelet managed identity via `assign-kubelet-identity`: +Now you can use the following command to create your AKS cluster with your existing identities. Provide the control plane identity resource ID via `assign-identity` and the kubelet managed identity via `assign-kubelet-identity`: ```azurecli-interactive az aks create \ @@ -292,11 +278,11 @@ az aks create \ --dns-service-ip 10.2.0.10 \ --service-cidr 10.2.0.0/24 \ --enable-managed-identity \ - --assign-identity \ - --assign-kubelet-identity + --assign-identity \ + --assign-kubelet-identity ``` -A successful cluster creation using your own kubelet managed identity contains the following output: +A successful AKS cluster creation using your own kubelet managed identity should resemble the following output: ```output "identity": { @@ -319,32 +305,31 @@ A successful cluster creation using your own kubelet managed identity contains t }, ``` -### Update an existing cluster using kubelet identity (Preview) - -Update kubelet identity on an existing cluster with your existing identities. +### Update an existing cluster using kubelet identity -#### Install the `aks-preview` Azure CLI +Update kubelet identity on an existing AKS cluster with your existing identities. -You also need the *aks-preview* Azure CLI extension version 0.5.64 or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. +#### Make sure the CLI version is 2.37.0 or later ```azurecli-interactive -# Install the aks-preview extension -az extension add --name aks-preview +# Check the version of Azure CLI modules +az version -# Update the extension to make sure you have the latest version installed -az extension update --name aks-preview +# Upgrade the version to make sure it is 2.37.0 or later +az upgrade ``` -#### Updating your cluster with kubelet identity (Preview) -Now you can use the following command to update your cluster with your existing identities. Provide the control plane identity id via `assign-identity` and the kubelet managed identity via `assign-kubelet-identity`: +#### Updating your cluster with kubelet identity + +Now you can use the following command to update your cluster with your existing identities. Provide the control plane identity resource ID via `assign-identity` and the kubelet managed identity via `assign-kubelet-identity`: ```azurecli-interactive az aks update \ --resource-group myResourceGroup \ --name myManagedCluster \ --enable-managed-identity \ - --assign-identity \ - --assign-kubelet-identity + --assign-identity \ + --assign-kubelet-identity ``` A successful cluster update using your own kubelet managed identity contains the following output: @@ -371,7 +356,8 @@ A successful cluster update using your own kubelet managed identity contains the ``` ## Next steps -* Use [Azure Resource Manager templates ][aks-arm-template] to create Managed Identity enabled clusters. + +Use [Azure Resource Manager templates ][aks-arm-template] to create a managed identity-enabled cluster. [aks-arm-template]: /azure/templates/microsoft.containerservice/managedclusters @@ -381,3 +367,4 @@ A successful cluster update using your own kubelet managed identity contains the [az-identity-list]: /cli/azure/identity#az_identity_list [az-feature-list]: /cli/azure/feature#az_feature_list [az-provider-register]: /cli/azure/provider#az_provider_register +[managed-identity-resources-overview]: ../active-directory/managed-identities-azure-resources/overview.md diff --git a/articles/aks/use-multiple-node-pools.md b/articles/aks/use-multiple-node-pools.md index 8b42fbe229607..3621e1d70ff48 100644 --- a/articles/aks/use-multiple-node-pools.md +++ b/articles/aks/use-multiple-node-pools.md @@ -181,7 +181,7 @@ A workload may require splitting a cluster's nodes into separate pools for logic * All subnets assigned to node pools must belong to the same virtual network. * System pods must have access to all nodes/pods in the cluster to provide critical functionality such as DNS resolution and tunneling kubectl logs/exec/port-forward proxy. -* If you expand your VNET after creating the cluster you must update your cluster (perform any managed cluster operation but node pool operations don't count) before adding a subnet outside the original cidr. AKS will error out on the agent pool add now though we originally allowed it. If you don't know how to reconcile your cluster file a support ticket. +* If you expand your VNET after creating the cluster you must update your cluster (perform any managed cluster operation but node pool operations don't count) before adding a subnet outside the original cidr. AKS will error out on the agent pool add now though we originally allowed it. The `aks-preview` Azure CLI extension (version 0.5.66+) now supports running `az aks update -g -n ` without any optional arguments. This command will perform an update operation without making any changes, which can recover a cluster stuck in a failed state. * In clusters with Kubernetes version < 1.23.3, kube-proxy will SNAT traffic from new subnets, which can cause Azure Network Policy to drop the packets. * Windows nodes will SNAT traffic to the new subnets until the node pool is reimaged. * Internal load balancers default to one of the node pool subnets (usually the first subnet of the node pool at cluster creation). To override this behavior, you can [specify the load balancer's subnet explicitly using an annotation][internal-lb-different-subnet]. @@ -639,7 +639,7 @@ az aks nodepool add \ To verify your node pool is FIPS-enabled, use [az aks show][az-aks-show] to check the *enableFIPS* value in *agentPoolProfiles*. ```azurecli-interactive -az aks show --resource-group myResourceGroup --cluster-name myAKSCluster --query="agentPoolProfiles[].{Name:name enableFips:enableFips}" -o table +az aks show --resource-group myResourceGroup --name myAKSCluster --query="agentPoolProfiles[].{Name:name enableFips:enableFips}" -o table ``` The following example output shows the *fipsnp* node pool is FIPS-enabled and *nodepool1* isn't. @@ -665,7 +665,7 @@ aks-nodepool1-12345678-vmss000000 Ready agent 34m v1.19.9 In the above example, the nodes starting with `aks-fipsnp` are part of the FIPS-enabled node pool. Use `kubectl debug` to run a deployment with an interactive session on one of those nodes in the FIPS-enabled node pool. ```azurecli-interactive -kubectl debug node/aks-fipsnp-12345678-vmss000000 -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 +kubectl debug node/aks-fipsnp-12345678-vmss000000 -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 ``` From the interactive session, you can verify the FIPS cryptographic libraries are enabled: diff --git a/articles/aks/use-network-policies.md b/articles/aks/use-network-policies.md index a81d4d4e5ada9..7a78a97532f7c 100644 --- a/articles/aks/use-network-policies.md +++ b/articles/aks/use-network-policies.md @@ -4,7 +4,7 @@ titleSuffix: Azure Kubernetes Service description: Learn how to secure traffic that flows in and out of pods by using Kubernetes network policies in Azure Kubernetes Service (AKS) services: container-service ms.topic: article -ms.date: 03/16/2021 +ms.date: 03/29/2022 --- @@ -18,15 +18,6 @@ This article shows you how to install the network policy engine and create Kuber You need the Azure CLI version 2.0.61 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. -> [!TIP] -> If you used the network policy feature during preview, we recommend that you [create a new cluster](#create-an-aks-cluster-and-enable-network-policy). -> -> If you wish to continue using existing test clusters that used network policy during preview, upgrade your cluster to a new Kubernetes versions for the latest GA release and then deploy the following YAML manifest to fix the crashing metrics server and Kubernetes dashboard. This fix is only required for clusters that used the Calico network policy engine. -> -> As a security best practice, [review the contents of this YAML manifest][calico-aks-cleanup] to understand what is deployed into the AKS cluster. -> -> `kubectl delete -f https://raw.githubusercontent.com/Azure/aks-engine/master/docs/topics/calico-3.3.1-cleanup-after-upgrade.yaml` - ## Overview of network policy All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them. @@ -100,7 +91,7 @@ az network vnet create \ --subnet-prefix 10.240.0.0/16 # Create a service principal and read in the application ID -SP=$(az ad sp create-for-rbac --role Contributor --output json) +SP=$(az ad sp create-for-rbac --output json) SP_ID=$(echo $SP | jq -r .appId) SP_PASSWORD=$(echo $SP | jq -r .password) @@ -239,7 +230,13 @@ kubectl run backend --image=mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine --la Create another pod and attach a terminal session to test that you can successfully reach the default NGINX webpage: ```console -kubectl run --rm -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 network-policy --namespace development +kubectl run --rm -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 network-policy --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to confirm that you can access the default NGINX webpage: @@ -295,7 +292,13 @@ kubectl apply -f backend-policy.yaml Let's see if you can use the NGINX webpage on the back-end pod again. Create another test pod and attach a terminal session: ```console -kubectl run --rm -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 network-policy --namespace development +kubectl run --rm -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 network-policy --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see if you can access the default NGINX webpage. This time, set a timeout value to *2* seconds. The network policy now blocks all inbound traffic, so the page can't be loaded, as shown in the following example: @@ -352,7 +355,13 @@ kubectl apply -f backend-policy.yaml Schedule a pod that is labeled as *app=webapp,role=frontend* and attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace development +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see if you can access the default NGINX webpage: @@ -382,7 +391,13 @@ exit The network policy allows traffic from pods labeled *app: webapp,role: frontend*, but should deny all other traffic. Let's test to see whether another pod without those labels can access the back-end NGINX pod. Create another test pod and attach a terminal session: ```console -kubectl run --rm -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 network-policy --namespace development +kubectl run --rm -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 network-policy --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see if you can access the default NGINX webpage. The network policy blocks the inbound traffic, so the page can't be loaded, as shown in the following example: @@ -415,7 +430,13 @@ kubectl label namespace/production purpose=production Schedule a test pod in the *production* namespace that is labeled as *app=webapp,role=frontend*. Attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace production +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace production +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to confirm that you can access the default NGINX webpage: @@ -479,7 +500,13 @@ kubectl apply -f backend-policy.yaml Schedule another pod in the *production* namespace and attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace production +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace production +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see that the network policy now denies traffic: @@ -501,7 +528,13 @@ exit With traffic denied from the *production* namespace, schedule a test pod back in the *development* namespace and attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace development +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see that the network policy allows the traffic: diff --git a/articles/aks/use-tags.md b/articles/aks/use-tags.md index 34d05c2009402..e8bde9ef110b0 100644 --- a/articles/aks/use-tags.md +++ b/articles/aks/use-tags.md @@ -3,7 +3,7 @@ title: Use Azure tags in Azure Kubernetes Service (AKS) description: Learn how to use Azure provider tags to track resources in Azure Kubernetes Service (AKS). services: container-service ms.topic: article -ms.date: 02/08/2022 +ms.date: 05/26/2022 --- # Use Azure tags in Azure Kubernetes Service (AKS) @@ -40,8 +40,16 @@ When you create or update an AKS cluster with the `--tags` parameter, the follow * The AKS cluster * The route table that's associated with the cluster * The public IP that's associated with the cluster +* The load balancer that's associated with the cluster * The network security group that's associated with the cluster * The virtual network that's associated with the cluster +* The AKS managed kubelet msi associated with the cluster +* The AKS managed addon msi associated with the cluster +* The private DNS zone associated with the private cluster +* The private endpoint associated with the private cluster + +> [!NOTE] +> Azure Private DNS only supports 15 tags. [tag resources](../azure-resource-manager/management/tag-resources.md). To create a cluster and assign Azure tags, run `az aks create` with the `--tags` parameter, as shown in the following command. Running the command creates a *myAKSCluster* in the *myResourceGroup* with the tags *dept=IT* and *costcenter=9999*. @@ -203,4 +211,4 @@ parameters: > > Any updates that you make to tags through Kubernetes will retain the value that's set through Kubernetes. For example, if your disk has tags *dept=IT* and *costcenter=5555* set by Kubernetes, and you use the portal to set the tags *team=beta* and *costcenter=3333*, the new list of tags would be *dept=IT*, *team=beta*, and *costcenter=5555*. If you then remove the disk through Kubernetes, the disk would have the tag *team=beta*. -[install-azure-cli]: /cli/azure/install-azure-cli \ No newline at end of file +[install-azure-cli]: /cli/azure/install-azure-cli diff --git a/articles/aks/virtual-nodes-cli.md b/articles/aks/virtual-nodes-cli.md index 5b10f6dc021c7..19544f279df31 100644 --- a/articles/aks/virtual-nodes-cli.md +++ b/articles/aks/virtual-nodes-cli.md @@ -241,7 +241,7 @@ The pod is assigned an internal IP address from the Azure virtual network subnet To test the pod running on the virtual node, browse to the demo application with a web client. As the pod is assigned an internal IP address, you can quickly test this connectivity from another pod on the AKS cluster. Create a test pod and attach a terminal session to it: ```console -kubectl run -it --rm testvk --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 +kubectl run -it --rm testvk --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 ``` Install `curl` in the pod using `apt-get`: @@ -299,13 +299,19 @@ AKS_SUBNET=myVirtualNodeSubnet NODE_RES_GROUP=$(az aks show --resource-group $RES_GROUP --name $AKS_CLUSTER --query nodeResourceGroup --output tsv) # Get network profile ID -NETWORK_PROFILE_ID=$(az network profile list --resource-group $NODE_RES_GROUP --query [0].id --output tsv) +NETWORK_PROFILE_ID=$(az network profile list --resource-group $NODE_RES_GROUP --query "[0].id" --output tsv) # Delete the network profile az network profile delete --id $NETWORK_PROFILE_ID -y +# Grab the service association link ID +SAL_ID=$(az network vnet subnet show --resource-group $RES_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --query id --output tsv)/providers/Microsoft.ContainerInstance/serviceAssociationLinks/default + +# Delete the service association link for the subnet +az resource delete --ids $SAL_ID --api-version {api-version} + # Delete the subnet delegation to Azure Container Instances -az network vnet subnet update --resource-group $RES_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --remove delegations 0 +az network vnet subnet update --resource-group $RES_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --remove delegations ``` ## Next steps diff --git a/articles/aks/virtual-nodes-portal.md b/articles/aks/virtual-nodes-portal.md index c3b14f4439996..154be1d4293ed 100644 --- a/articles/aks/virtual-nodes-portal.md +++ b/articles/aks/virtual-nodes-portal.md @@ -153,7 +153,7 @@ The pod is assigned an internal IP address from the Azure virtual network subnet To test the pod running on the virtual node, browse to the demo application with a web client. As the pod is assigned an internal IP address, you can quickly test this connectivity from another pod on the AKS cluster. Create a test pod and attach a terminal session to it: ```console -kubectl run -it --rm virtual-node-test --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 +kubectl run -it --rm virtual-node-test --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 ``` Install `curl` in the pod using `apt-get`: diff --git a/articles/aks/web-app-routing.md b/articles/aks/web-app-routing.md index 5e7b464865caf..25bc780f57386 100644 --- a/articles/aks/web-app-routing.md +++ b/articles/aks/web-app-routing.md @@ -4,7 +4,6 @@ description: Use the Web Application Routing add-on to securely access applicat services: container-service author: jahabibi ms.topic: article -ms.custom: event-tier1-build-2022 ms.date: 05/13/2021 ms.author: jahabibi --- @@ -20,34 +19,38 @@ The Web Application Routing solution makes it easy to access applications that a - Web Application Routing currently doesn't support named ports in ingress backend. ## Web Application Routing solution overview + The add-on deploys four components: an [nginx ingress controller][nginx], [Secrets Store CSI Driver][csi-driver], [Open Service Mesh (OSM)][osm], and [External-DNS][external-dns] controller. - **Nginx ingress Controller**: The ingress controller exposed to the internet. -- **External-dns**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. +- **External-DNS controller**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. - **CSI driver**: Connector used to communicate with keyvault to retrieve SSL certificates for ingress controller. - **OSM**: A lightweight, extensible, cloud native service mesh that allows users to uniformly manage, secure, and get out-of-the-box observability features for highly dynamic microservice environments. -- **External-DNS controller**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. ## Prerequisites -* An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). -* [Azure CLI installed](/cli/azure/install-azure-cli). +- An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). +- [Azure CLI installed](/cli/azure/install-azure-cli). +- An Azure Key Vault containing any application certificates. +- A DNS solution. -### Install the `aks-preview` Azure CLI +### Install the `aks-preview` Azure CLI extension -You also need the *aks-preview* Azure CLI extension version 0.5.25 or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. +You also need the *aks-preview* Azure CLI extension version `0.5.75` or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. ```azurecli-interactive # Install the aks-preview extension az extension add --name aks-preview + # Update the extension to make sure you have the latest version installed az extension update --name aks-preview ``` -### Install `osm` CLI -Since Web Application Routing uses OSM internally to secure intranet communication, we need to set up the CLI. The OSM command-line tool contains everything needed to install and configure Open Service Mesh. The binary is available on the [OSM GitHub releases page][osm-release]. +### Install the `osm` CLI + +Since Web Application Routing uses OSM internally to secure intranet communication, we need to set up the `osm` CLI. This command-line tool contains everything needed to install and configure Open Service Mesh. The binary is available on the [OSM GitHub releases page][osm-release]. -## Deploy Web Application Routing: CLI +## Deploy Web Application Routing with the Azure CLI The Web Application Routing routing add-on can be enabled with the Azure CLI when deploying an AKS cluster. To do so, use the [az aks create][az-aks-create] command with the `--enable-addons` argument. @@ -64,25 +67,23 @@ You can also enable Web Application Routing on an existing AKS cluster using the az aks enable-addons --resource-group myResourceGroup --name myAKSCluster --addons web_application_routing ``` -After the cluster is deployed or updated, use the [az aks show][az-aks-show] command to retrieve the DNS zone name. - ## Connect to your AKS cluster To connect to the Kubernetes cluster from your local computer, you use [kubectl][kubectl], the Kubernetes command-line client. -If you use the Azure Cloud Shell, `kubectl` is already installed. You can also install it locally using the [az aks install-cli][] command: +If you use the Azure Cloud Shell, `kubectl` is already installed. You can also install it locally using the `az aks install-cli` command: ```azurecli az aks install-cli ``` -To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][] command. The following example gets credentials for the AKS cluster named *MyAKSCluster* in the *MyResourceGroup*: +To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][az-aks-get-credentials] command. The following example gets credentials for the AKS cluster named *myAKSCluster* in *myResourceGroup*: ```azurecli -az aks get-credentials --resource-group MyResourceGroup --name MyAKSCluster +az aks get-credentials --resource-group myResourceGroup --name myAKSCluster ``` -## Create Application Namespace +## Create the application namespace For the sample application environment, let's first create a namespace called `hello-web-app-routing` to run the example pods: @@ -90,7 +91,7 @@ For the sample application environment, let's first create a namespace called `h kubectl create namespace hello-web-app-routing ``` -## Add Application Namespace to OSM Control Plane +We also need to add the application namespace to the OSM control plane: ```bash osm namespace add hello-web-app-routing @@ -98,7 +99,7 @@ osm namespace add hello-web-app-routing ## Grant permissions for Web Application Routing -Identify the Web Application Routing-associated managed identity within the cluster resource group `webapprouting-`. In this walkthrough, the identity is named `webapprouting-myakscluster`. +Identify the Web Application Routing-associated managed identity within the cluster resource group `webapprouting-`. In this walkthrough, the identity is named `webapprouting-myakscluster`. :::image type="content" source="media/web-app-routing/identify-msi-web-app-routing.png" alt-text="Cluster resource group in the Azure portal is shown, and the webapprouting-myakscluster user-assigned managed identity is highlighted." lightbox="media/web-app-routing/identify-msi-web-app-routing.png"::: @@ -106,7 +107,13 @@ Copy the identity's object ID: :::image type="content" source="media/web-app-routing/msi-web-app-object-id.png" alt-text="The webapprouting-myakscluster managed identity screen in Azure portal, the identity's object ID is highlighted. " lightbox="media/web-app-routing/msi-web-app-object-id.png"::: -### Grant Access to Keyvault +### Grant access to Azure Key Vault + +Obtain the vault URI for your Azure Key Vault: + +```azurecli +az keyvault show --resource-group myResourceGroup --name myapp-contoso +``` Grant `GET` permissions for Web Application Routing to retrieve certificates from Azure Key Vault: @@ -121,12 +128,12 @@ The Web Application Routing solution may only be triggered on service resources ```yaml annotations: kubernetes.azure.com/ingress-host: myapp.contoso.com - kubernetes.azure.com/tls-cert-keyvault-uri: myapp-contoso.vault.azure.net + kubernetes.azure.com/tls-cert-keyvault-uri: myapp-contoso.vault.azure.net/certificates/keyvault-certificate-name/keyvault-certificate-name-revision ``` -These annotations in the service manifest would direct Web Application Routing to create an ingress servicing `myapp.contoso.com` connected to the keyvault `myapp-contoso`. +These annotations in the service manifest would direct Web Application Routing to create an ingress servicing `myapp.contoso.com` connected to the keyvault `myapp-contoso` and will retrieve the `keyvault-certificate-name` with `keyvault-certificate-name-revision` -Create a file named **samples-web-app-routing.yaml** and copy in the following YAML. On line 29-31, update `` and `` with the DNS zone name collected in the previous step of this article. +Create a file named **samples-web-app-routing.yaml** and copy in the following YAML. On line 29-31, update `` with your DNS host name and `` with the full certficicate vault URI. ```yaml apiVersion: apps/v1 @@ -156,9 +163,9 @@ apiVersion: v1 kind: Service metadata: name: aks-helloworld -annotations: - kubernetes.azure.com/ingress-host: - kubernetes.azure.com/tls-cert-keyvault-uri: + annotations: + kubernetes.azure.com/ingress-host: + kubernetes.azure.com/tls-cert-keyvault-uri: spec: type: ClusterIP ports: @@ -173,37 +180,36 @@ Use the [kubectl apply][kubectl-apply] command to create the resources. kubectl apply -f samples-web-app-routing.yaml -n hello-web-app-routing ``` -The following example shows the created resources: +The following example output shows the created resources: ```bash -$ kubectl apply -f samples-web-app-routing.yaml -n hello-web-app-routing - deployment.apps/aks-helloworld created service/aks-helloworld created ``` -## Verify managed ingress created +## Verify the managed ingress was created + ```bash -$ kubectl get ingress -n hello-web-app-routing -n hello-web-app-routing +$ kubectl get ingress -n hello-web-app-routing ``` Open a web browser to **, for example *myapp.contoso.com* and verify you see the demo application. The application may take a few minutes to appear. ## Remove Web Application Routing -```console + +First, remove the associated namespace: + +```bash kubectl delete namespace hello-web-app-routing ``` -The Web Application Routing solution can be removed using the Azure CLI. To do so run the following command, substituting your AKS cluster and resource group name. +The Web Application Routing add-on can be removed using the Azure CLI. To do so run the following command, substituting your AKS cluster and resource group name. ```azurecli az aks disable-addons --addons web_application_routing --name myAKSCluster --resource-group myResourceGroup --no-wait ``` -When the Web Application Routing routing add-on is disabled, some Kubernetes resources may remain in the cluster. These resources include *configMaps* and *secrets*, and are created in the *app-routing-system* namespace. To maintain a clean cluster, you may want to remove these resources. - -Look for *addon-web-application-routing* resources using the following [kubectl get][kubectl-get] commands: - +When the Web Application Routing add-on is disabled, some Kubernetes resources may remain in the cluster. These resources include *configMaps* and *secrets*, and are created in the *app-routing-system* namespace. To maintain a clean cluster, you may want to remove these resources. ## Clean up @@ -227,8 +233,8 @@ service "aks-helloworld" deleted [az-aks-show]: /cli/azure/aks#az-aks-show [ingress-https]: ./ingress-tls.md [az-aks-enable-addons]: /cli/azure/aks#az-aks-enable-addons -[az aks install-cli]: /cli/azure/aks#az-aks-install-cli -[az aks get-credentials]: /cli/azure/aks#az-aks-get-credentials +[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli +[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials [csi-driver]: https://github.com/Azure/secrets-store-csi-driver-provider-azure [az-extension-add]: /cli/azure/extension#az-extension-add [az-extension-update]: /cli/azure/extension#az-extension-update diff --git a/articles/aks/windows-faq.md b/articles/aks/windows-faq.md index 40d713ed4dda9..b3e170d8c588f 100644 --- a/articles/aks/windows-faq.md +++ b/articles/aks/windows-faq.md @@ -195,17 +195,12 @@ Use the following configuration: 1. In your Kubernetes service configuration, set **externalTrafficPolicy=Local**. This ensures that the Kubernetes service directs traffic only to pods within the local node. 1. In your Kubernetes service configuration, set **sessionAffinity: ClientIP**. This ensures that the Azure Load Balancer gets configured with session affinity. -## What if I need a feature that's not supported? - -If you encounter feature gaps, the open-source [aks-engine][aks-engine] project provides an easy and fully customizable way of running Kubernetes in Azure, including Windows support. For more information, see [AKS roadmap][aks-roadmap]. - ## Next steps To get started with Windows Server containers in AKS, see [Create a node pool that runs Windows Server in AKS][windows-node-cli]. [kubernetes]: https://kubernetes.io -[aks-engine]: https://github.com/azure/aks-engine [upstream-limitations]: https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations [intro-windows]: https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/ [aks-roadmap]: https://github.com/Azure/AKS/projects/1 diff --git a/articles/api-management/TOC.yml b/articles/api-management/TOC.yml index 82c0c5e0662e9..f3f11aa60bbf3 100644 --- a/articles/api-management/TOC.yml +++ b/articles/api-management/TOC.yml @@ -190,6 +190,7 @@ href: graphql-api.md - name: Import and resolve GraphQL schema href: graphql-schema-resolve-api.md + displayName: synthetic GraphQL - name: Import an App Service web API href: import-app-service-as-api.md - name: Import a Container App web API @@ -238,6 +239,7 @@ href: api-management-policy-expressions.md - name: Reuse policy configurations href: policy-fragments.md + displayName: policy fragments - name: Error handling href: api-management-error-handling-policies.md - name: Advanced monitoring @@ -263,6 +265,15 @@ href: api-management-howto-ca-certificates.md - name: Manage protocols and ciphers href: api-management-howto-manage-protocols-ciphers.md + - name: Mitigate OWASP API threats + href: mitigate-owasp-api-threats.md + displayName: OWASP top 10, vulnerability, vulnerabilities + - name: Manage API authorizations + items: + - name: Authorizations overview + href: authorizations-overview.md + - name: Configure and use authorization + href: authorizations-how-to.md - name: Set up backend authentication items: - name: Mutual certificate authentication @@ -389,6 +400,8 @@ href: api-management-transformation-policies.md - name: Validation policies href: validation-policies.md + - name: Authorizations - identity providers + href: authorizations-reference.md - name: Azure Policy built-ins displayName: samples, policies, definitions href: ./policy-reference.md diff --git a/articles/api-management/add-api-manually.md b/articles/api-management/add-api-manually.md index 273b4e35c26f0..c54a73e5fbc3e 100644 --- a/articles/api-management/add-api-manually.md +++ b/articles/api-management/add-api-manually.md @@ -89,6 +89,9 @@ Test the operation in the Azure portal. You can also test it in the **Developer This section shows how to add a wildcard operation. A wildcard operation lets you pass an arbitrary value with an API request. Instead of creating separate GET operations as shown in the previous sections, you could create a wildcard GET operation. +> [!CAUTION] +> Use care when configuring a wildcard operation. This configuration may make an API more vulnerable to certain [API security threats](mitigate-owasp-api-threats.md#improper-assets-management). + ### Add the operation 1. Select the API you created in the previous step. diff --git a/articles/api-management/api-management-access-restriction-policies.md b/articles/api-management/api-management-access-restriction-policies.md index 9f78d28b8d4d0..a68c273438aca 100644 --- a/articles/api-management/api-management-access-restriction-policies.md +++ b/articles/api-management/api-management-access-restriction-policies.md @@ -7,7 +7,7 @@ author: dlepow ms.service: api-management ms.topic: reference -ms.date: 03/04/2022 +ms.date: 06/03/2022 ms.author: danlep --- @@ -20,12 +20,13 @@ This article provides a reference for API Management access restriction policies ## Access restriction policies - [Check HTTP header](#CheckHTTPHeader) - Enforces existence and/or value of an HTTP header. +- [Get authorization context](#GetAuthorizationContext) - Gets the authorization context of a specified [authorization](authorizations-overview.md) configured in the API Management instance. - [Limit call rate by subscription](#LimitCallRate) - Prevents API usage spikes by limiting call rate, on a per subscription basis. - [Limit call rate by key](#LimitCallRateByKey) - Prevents API usage spikes by limiting call rate, on a per key basis. - [Restrict caller IPs](#RestrictCallerIPs) - Filters (allows/denies) calls from specific IP addresses and/or address ranges. - [Set usage quota by subscription](#SetUsageQuota) - Allows you to enforce a renewable or lifetime call volume and/or bandwidth quota, on a per subscription basis. - [Set usage quota by key](#SetUsageQuotaByKey) - Allows you to enforce a renewable or lifetime call volume and/or bandwidth quota, on a per key basis. -- [Validate JWT](#ValidateJWT) - Enforces existence and validity of a JWT extracted from either a specified HTTP Header or a specified query parameter. +- [Validate JWT](#ValidateJWT) - Enforces existence and validity of a JWT extracted from either a specified HTTP header or a specified query parameter. - [Validate client certificate](#validate-client-certificate) - Enforces that a certificate presented by a client to an API Management instance matches specified validation rules and claims. > [!TIP] @@ -67,7 +68,7 @@ Use the `check-header` policy to enforce that a request has a specified HTTP hea | -------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | | failed-check-error-message | Error message to return in the HTTP response body if the header doesn't exist or has an invalid value. This message must have any special characters properly escaped. | Yes | N/A | | failed-check-httpcode | HTTP Status code to return if the header doesn't exist or has an invalid value. | Yes | N/A | -| header-name | The name of the HTTP Header to check. | Yes | N/A | +| header-name | The name of the HTTP header to check. | Yes | N/A | | ignore-case | Can be set to True or False. If set to True case is ignored when the header value is compared against the set of acceptable values. | Yes | N/A | ### Usage @@ -78,6 +79,142 @@ This policy can be used in the following policy [sections](./api-management-howt - **Policy scopes:** all scopes +## Get authorization context + +Use the `get-authorization-context` policy to get the authorization context of a specified [authorization](authorizations-overview.md) (preview) configured in the API Management instance. + +The policy fetches and stores authorization and refresh tokens from the configured authorization provider. + +If `identity-type=jwt` is configured, a JWT token is required to be validated. The audience of this token must be https://azure-api.net/authorization-manager. + +[!INCLUDE [api-management-policy-generic-alert](../../includes/api-management-policy-generic-alert.md)] + + +### Policy statement + +```xml + +``` + +### Examples + +#### Example 1: Get token back + +```xml + + + + + + @(((Authorization)context.Variables.GetValueOrDefault("auth-context"))?.AccessToken) + +``` + +#### Example 2: Get token back with dynamically set attributes + +```xml + + + + + + @(((Authorization)context.Variables.GetValueOrDefault("auth-context"))?.AccessToken) + +``` + +#### Example 3: Attach the token to the backend call + +```xml + + + + + @("Bearer " + ((Authorization)context.Variables.GetValueOrDefault("auth-context"))?.AccessToken) + +``` + +#### Example 4: Get token from incoming request and return token + +```xml + + + + + + @(((Authorization)context.Variables.GetValueOrDefault("auth-context"))?.AccessToken) + +``` + +### Elements + +| Name | Description | Required | +| ----- | ------------- | -------- | +| get-authorization-context | Root element. | Yes | + +### Attributes + +| Name | Description | Required | Default | +|---|---|---|---| +| provider-id | The authorization provider resource identifier. | Yes | | +| authorization-id | The authorization resource identifier. | Yes | | +| context-variable-name | The name of the context variable to receive the [`Authorization` object](#authorization-object). | Yes | | +| identity-type | Type of identity to be checked against the authorization access policy.
        - `managed`: managed identity of the API Management service.
        - `jwt`: JWT bearer token specified in the `identity` attribute. | No | managed | +| identity | An Azure AD JWT bearer token to be checked against the authorization permissions. Ignored for `identity-type` other than `jwt`.

        Expected claims:
        - audience: https://azure-api.net/authorization-manager
        - `oid`: Permission object id
        - `tid`: Permission tenant id | No | | +| ignore-error | Boolean. If acquiring the authorization context results in an error (for example, the authorization resource is not found or is in an error state):
        - `true`: the context variable is assigned a value of null.
        - `false`: return `500` | No | false | + +### Authorization object + +The Authorization context variable receives an object of type `Authorization`. + +```c# +class Authorization +{ + public string AccessToken { get; } + public IReadOnlyDictionary Claims { get; } +} +``` + +| Property Name | Description | +| -- | -- | +| AccessToken | Bearer access token to authorize a backend HTTP request. | +| Claims | Claims returned from the authorization server’s token response API (see [RFC6749#section-5.1](https://datatracker.ietf.org/doc/html/rfc6749#section-5.1)). | + +### Usage + +This policy can be used in the following policy [sections](./api-management-howto-policies.md#sections) and [scopes](./api-management-howto-policies.md#scopes). + +- **Policy sections:** inbound + +- **Policy scopes:** all scopes + + ## Limit call rate by subscription The `rate-limit` policy prevents API usage spikes on a per subscription basis by limiting the call rate to a specified number per a specified time period. When the call rate is exceeded, the caller receives a `429 Too Many Requests` response status code. @@ -280,6 +417,9 @@ This policy can be used in the following policy [sections](./api-management-howt - **Policy sections:** inbound - **Policy scopes:** all scopes +> [!NOTE] +> If you configure this policy at more than one scope, IP filtering is applied in the order of [policy evaluation](set-edit-policies.md#use-base-element-to-set-policy-evaluation-order) in your policy definition. + ## Set usage quota by subscription The `quota` policy enforces a renewable or lifetime call volume and/or bandwidth quota, on a per subscription basis. @@ -412,7 +552,7 @@ This policy can be used in the following policy [sections](./api-management-howt ## Validate JWT -The `validate-jwt` policy enforces existence and validity of a JSON web token (JWT) extracted from either a specified HTTP Header or a specified query parameter. +The `validate-jwt` policy enforces existence and validity of a JSON web token (JWT) extracted from either a specified HTTP header or a specified query parameter. > [!IMPORTANT] > The `validate-jwt` policy requires that the `exp` registered claim is included in the JWT token, unless `require-expiration-time` attribute is specified and set to `false`. diff --git a/articles/api-management/api-management-advanced-policies.md b/articles/api-management/api-management-advanced-policies.md index e29a6ef18032d..e8e579e4286d4 100644 --- a/articles/api-management/api-management-advanced-policies.md +++ b/articles/api-management/api-management-advanced-policies.md @@ -713,7 +713,7 @@ This sample policy shows an example of using the `send-one-way-request` policy t - https://hooks.slack.com/services/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 + https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX POST @{ return new JObject( diff --git a/articles/api-management/api-management-cross-domain-policies.md b/articles/api-management/api-management-cross-domain-policies.md index d4bf59079ecd6..eb27f8f544213 100644 --- a/articles/api-management/api-management-cross-domain-policies.md +++ b/articles/api-management/api-management-cross-domain-policies.md @@ -51,6 +51,9 @@ Use the `cross-domain` policy to make the API accessible from Adobe Flash and Mi |----------|-----------------|--------------| |cross-domain|Root element. Child elements must conform to the [Adobe cross-domain policy file specification](https://www.adobe.com/devnet-docs/acrobatetk/tools/AppSec/CrossDomain_PolicyFile_Specification.pdf).|Yes| +> [!CAUTION] +> Use the `*` wildcard with care in policy settings. This configuration may be overly permissive and may make an API more vulnerable to certain [API security threats](mitigate-owasp-api-threats.md#security-misconfiguration). + ### Usage This policy can be used in the following policy [sections](./api-management-howto-policies.md#sections) and [scopes](./api-management-howto-policies.md#scopes). @@ -137,6 +140,9 @@ This example demonstrates how to support [pre-flight requests](https://developer |expose-headers|This element contains `header` elements specifying names of the headers that will be accessible by the client.|No|N/A| |header|Specifies a header name.|At least one `header` element is required in `allowed-headers` or `expose-headers` if the section is present.|N/A| +> [!CAUTION] +> Use the `*` wildcard with care in policy settings. This configuration may be overly permissive and may make an API more vulnerable to certain [API security threats](mitigate-owasp-api-threats.md#security-misconfiguration). + ### Attributes |Name|Description|Required|Default| diff --git a/articles/api-management/api-management-get-started-revise-api.md b/articles/api-management/api-management-get-started-revise-api.md index b2f6ac98a8adf..3542faaf6d071 100644 --- a/articles/api-management/api-management-get-started-revise-api.md +++ b/articles/api-management/api-management-get-started-revise-api.md @@ -120,7 +120,7 @@ Use this procedure to create and update a release. The notes you specify appear in the change log. You can see them in the output of the previous command. -1. When you create a release, the `--notes` parameter is optional. You can add or change the notes later using the [az apim api release update](/cli/azure/apim/api/release#az_apim_api_release_update) command: +1. When you create a release, the `--notes` parameter is optional. You can add or change the notes later using the [az apim api release update](/cli/azure/apim/api/release#az-apim-api-release-update) command: ```azurecli az apim api release update --resource-group apim-hello-word-resource-group \ diff --git a/articles/api-management/api-management-howto-aad.md b/articles/api-management/api-management-howto-aad.md index e67741f224a0d..cfd7eda37cf97 100644 --- a/articles/api-management/api-management-howto-aad.md +++ b/articles/api-management/api-management-howto-aad.md @@ -1,18 +1,12 @@ --- -title: Authorize developer accounts by using Azure Active Directory +title: Authorize access to API Management developer portal by using Azure AD titleSuffix: Azure API Management -description: Learn how to authorize users by using Azure Active Directory in API Management. -services: api-management -documentationcenter: API Management -author: dlepow -manager: cfowler -editor: '' +description: Learn how to enable user sign-in to the API Management developer portal by using Azure Active Directory. +author: dlepow ms.service: api-management -ms.workload: mobile -ms.tgt_pltfrm: na ms.topic: article -ms.date: 09/20/2021 +ms.date: 05/20/2022 ms.author: danlep --- @@ -27,19 +21,39 @@ In this article, you'll learn how to: - Complete the [Create an Azure API Management instance](get-started-create-service-instance.md) quickstart. -- [Import and publish](import-and-publish.md) an Azure API Management instance. +- [Import and publish](import-and-publish.md) an API in the Azure API Management instance. [!INCLUDE [azure-cli-prepare-your-environment-no-header.md](../../includes/azure-cli-prepare-your-environment-no-header.md)] [!INCLUDE [premium-dev-standard.md](../../includes/api-management-availability-premium-dev-standard.md)] -## Authorize developer accounts by using Azure AD +[!INCLUDE [api-management-navigate-to-instance.md](../../includes/api-management-navigate-to-instance.md)] + + +## Enable user sign-in using Azure AD - portal + +To simplify the configuration, API Management can automatically enable an Azure AD application and identity provider for users of the developer portal. Alternatively, you can manually enable the Azure AD application and identity provider. + +### Automatically enable Azure AD application and identity provider + +1. In the left menu of your API Management instance, under **Developer portal**, select **Portal overview**. +1. On the **Portal overview** page, scroll down to **Enable user sign-in with Azure Active Directory**. +1. Select **Enable Azure AD**. +1. On the **Enable Azure AD** page, select **Enable Azure AD**. +1. Select **Close**. -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Select ![Arrow icon.](./media/api-management-howto-aad/arrow.png). -1. Search for and select **API Management services**. -1. Select your API Management service instance. -1. Under **Developer portal**, select **Identities**. + :::image type="content" source="media/api-management-howto-aad/enable-azure-ad-portal.png" alt-text="Screenshot of enabling Azure AD in the developer portal overview page."::: + +After the Azure AD provider is enabled: + +* Users in the specified Azure AD instance can [sign into the developer portal by using an Azure AD account](#log_in_to_dev_portal). +* You can manage the Azure AD configuration on the **Developer portal** > **Identities** page in the portal. +* Optionally configure other sign-in settings by selecting **Identities** > **Settings**. For example, you might want to redirect anonymous users to the sign-in page. +* Republish the developer portal after any configuration change. + +### Manually enable Azure AD application and identity provider + +1. In the left menu of your API Management instance, under **Developer portal**, select **Identities**. 1. Select **+Add** from the top to open the **Add identity provider** pane to the right. 1. Under **Type**, select **Azure Active Directory** from the drop-down menu. * Once selected, you'll be able to enter other necessary information. @@ -47,7 +61,7 @@ In this article, you'll learn how to: * See more information about these controls later in the article. 1. Save the **Redirect URL** for later. - :::image type="content" source="media/api-management-howto-aad/api-management-with-aad001.png" alt-text="Add identity provider in Azure portal"::: + :::image type="content" source="media/api-management-howto-aad/api-management-with-aad001.png" alt-text="Screenshot of adding identity provider in Azure portal."::: > [!NOTE] > There are two redirect URLs:
        @@ -60,9 +74,9 @@ In this article, you'll learn how to: 1. Navigate to [App registrations](https://go.microsoft.com/fwlink/?linkid=2083908) to register an app in Active Directory. 1. Select **New registration**. On the **Register an application** page, set the values as follows: - * Set **Name** to a meaningful name. e.g., *developer-portal* + * Set **Name** to a meaningful name such as *developer-portal* * Set **Supported account types** to **Accounts in this organizational directory only**. - * Set **Redirect URI** to the value you saved from step 9. + * In **Redirect URI**, select **Web** and paste the redirect URL you saved from a previous step. * Select **Register**. 1. After you've registered the application, copy the **Application (client) ID** from the **Overview** page. @@ -77,14 +91,19 @@ In this article, you'll learn how to: * Choose **Add**. 1. Copy the client **Secret value** before leaving the page. You will need it later. 1. Under **Manage** in the side menu, select **Authentication**. -1. Under the **Implicit grant and hybrid flows** sections, select the **ID tokens** checkbox. + 1. Under the **Implicit grant and hybrid flows** section, select the **ID tokens** checkbox. + 1. Select **Save**. +1. Under **Manage** in the side menu, select **Token configuration** > **+ Add optional claim**. + 1. In **Token type**, select **ID**. + 1. Select (check) the following claims: **email**, **family_name**, **given_name**. + 1. Select **Add**. If prompted, select **Turn on the Microsoft Graph email, profile permission**. 1. Switch to the browser tab with your API Management instance. 1. Paste the secret into the **Client secret** field in the **Add identity provider** pane. > [!IMPORTANT] > Update the **Client secret** before the key expires. -1. In the **Add identity provider** pane's **Allowed Tenants** field, specify the Azure AD instances' domains to which you want to grant access to the API Management service instance APIs. +1. In the **Add identity provider** pane's **Allowed tenants** field, specify the Azure AD instance's domains to which you want to grant access to the API Management service instance APIs. * You can separate multiple domains with newlines, spaces, or commas. > [!NOTE] @@ -93,9 +112,15 @@ In this article, you'll learn how to: > 1. Enter the domain name of the Azure AD tenant to which they want to grant access. > 1. Select **Submit**. -1. After you specify the desired configuration, select **Add**. +1. After you specify the desired configuration, select **Add**. +1. Republish the developer portal for the Azure AD configuration to take effect. In the left menu, under **Developer portal**, select **Portal overview** > **Publish**. -Once changes are saved, users in the specified Azure AD instance can [sign into the developer portal by using an Azure AD account](#log_in_to_dev_portal). +After the Azure AD provider is enabled: + +* Users in the specified Azure AD instance can [sign into the developer portal by using an Azure AD account](#log_in_to_dev_portal). +* You can manage the Azure AD configuration on the **Developer portal** > **Identities** page in the portal. +* Optionally configure other sign-in settings by selecting **Identities** > **Settings**. For example, you might want to redirect anonymous users to the sign-in page. +* Republish the developer portal after any configuration change. ## Add an external Azure AD group @@ -120,20 +145,20 @@ Follow these steps to grant: az rest --method PATCH --uri "https://graph.microsoft.com/v1.0/$($tenantId)/applications/$($appObjectID)" --body "{'requiredResourceAccess':[{'resourceAccess': [{'id': 'e1fe6dd8-ba31-4d61-89e7-88639da4683d','type': 'Scope'},{'id': '7ab1d382-f21e-4acd-a863-ba3e13f7da61','type': 'Role'}],'resourceAppId': '00000003-0000-0000-c000-000000000000'}]}" ``` -2. Log out and log back in to the Azure portal. -3. Navigate to the App Registration page for the application you registered in [the previous section](#authorize-developer-accounts-by-using-azure-ad). -4. Click **API Permissions**. You should see the permissions granted by the Azure CLI script in step 1. -5. Select **Grant admin consent for {tenantname}** so that you grant access for all users in this directory. +1. Sign out and sign back in to the Azure portal. +1. Navigate to the App Registration page for the application you registered in [the previous section](#enable-user-sign-in-using-azure-ad---portal). +1. Select **API Permissions**. You should see the permissions granted by the Azure CLI script in step 1. +1. Select **Grant admin consent for {tenantname}** so that you grant access for all users in this directory. Now you can add external Azure AD groups from the **Groups** tab of your API Management instance. 1. Under **Developer portal** in the side menu, select **Groups**. -2. Select the **Add Azure AD group** button. +1. Select the **Add Azure AD group** button. - !["Add A A D group" button](./media/api-management-howto-aad/api-management-with-aad008.png) + !["Screenshot showing Add Azure AD group button.](./media/api-management-howto-aad/api-management-with-aad008.png) 1. Select the **Tenant** from the drop-down. -2. Search for and select the group that you want to add. -3. Press the **Select** button. +1. Search for and select the group that you want to add. +1. Press the **Select** button. Once you add an external Azure AD group, you can review and configure its properties: 1. Select the name of the group from the **Groups** tab. @@ -144,12 +169,15 @@ Users from the configured Azure AD instance can now: * View and subscribe to any groups for which they have visibility. > [!NOTE] -> Learn more about the difference between **Delegated** and **Application** permissions types in [Permissions and consent in the Microsoft identity platform](../active-directory/develop/v2-permissions-and-consent.md#permission-types) article. +> Learn more about the difference between **Delegated** and **Application** permissions types in [Permissions and consent in the Microsoft identity platform](../active-directory/develop/v2-permissions-and-consent.md#permission-types) article. ## Developer portal: Add Azure AD account authentication In the developer portal, you can sign in with Azure AD using the **Sign-in button: OAuth** widget included on the sign-in page of the default developer portal content. +:::image type="content" source="media/api-management-howto-aad/developer-portal-azure-ad-signin.png" alt-text="Screenshot showing OAuth widget in developer portal."::: + + Although a new account will automatically be created when a new user signs in with Azure AD, consider adding the same widget to the sign-up page. The **Sign-up form: OAuth** widget represents a form used for signing up with OAuth. > [!IMPORTANT] diff --git a/articles/api-management/api-management-howto-add-products.md b/articles/api-management/api-management-howto-add-products.md index 2fb53903ab57c..0fefbdd150c7b 100644 --- a/articles/api-management/api-management-howto-add-products.md +++ b/articles/api-management/api-management-howto-add-products.md @@ -57,6 +57,9 @@ In this tutorial, you learn how to: 1. Select **Create** to create your new product. +> [!CAUTION] +> Use care when configuring a product that doesn't require a subscription. This configuration may be overly permissive and may make the product's APIs more vulnerable to certain [API security threats](mitigate-owasp-api-threats.md#security-misconfiguration). + ### [Azure CLI](#tab/azure-cli) To begin using Azure CLI: @@ -84,6 +87,9 @@ You can specify various values for your product: | `--subscriptions-limit` | Optionally, limit the count of multiple simultaneous subscriptions.| | `--legal-terms` | You can include the terms of use for the product, which subscribers must accept to use the product. | +> [!CAUTION] +> Use care when configuring a product that doesn't require a subscription. This configuration may be overly permissive and may make the product's APIs more vulnerable to certain [API security threats](mitigate-owasp-api-threats.md#security-misconfiguration). + To see your current products, use the [az apim product list](/cli/azure/apim/product#az-apim-product-list) command: ```azurecli diff --git a/articles/api-management/api-management-howto-cache-external.md b/articles/api-management/api-management-howto-cache-external.md index ac4f046ffb2e0..3576c9a2bca8a 100644 --- a/articles/api-management/api-management-howto-cache-external.md +++ b/articles/api-management/api-management-howto-cache-external.md @@ -1,31 +1,28 @@ --- title: Use an external cache in Azure API Management | Microsoft Docs -description: Learn how to configure and use an external cache in Azure API Management. Using an external cache lets you overcome some limitations of the built-in cache. +description: Learn how to configure and use an external Redis-compatible cache in Azure API Management. Using an external cache gives you more control and flexibility than the built-in cache. services: api-management documentationcenter: '' author: dlepow -manager: erikre -editor: '' -ms.assetid: 740f6a27-8323-474d-ade2-828ae0c75e7a ms.service: api-management -ms.topic: conceptual -ms.date: 04/26/2020 +ms.topic: how-to +ms.date: 05/19/2022 ms.author: danlep --- # Use an external Redis-compatible cache in Azure API Management -In addition to utilizing the built-in cache, Azure API Management allows for caching responses in an external Redis-compatible cache, e.g. Azure Cache for Redis. +In addition to utilizing the built-in cache, Azure API Management allows for caching responses in an external Redis-compatible cache, such as Azure Cache for Redis. Using an external cache allows you to overcome a few limitations of the built-in cache: * Avoid having your cache periodically cleared during API Management updates * Have more control over your cache configuration -* Cache more data than your API Management tier allows to +* Cache more data than your API Management tier allows * Use caching with the Consumption tier of API Management -* Enable caching in the [API Management self-hosted gateways](self-hosted-gateway-overview.md) +* Enable caching in the [API Management self-hosted gateway](self-hosted-gateway-overview.md) For more detailed information about caching, see [API Management caching policies](api-management-caching-policies.md) and [Custom caching in Azure API Management](api-management-sample-cache-by-key.md). @@ -45,62 +42,76 @@ To complete this tutorial, you need to: ## Create Azure Cache for Redis -This section explains how to create an Azure Cache for Redis in Azure. If you already have an Azure Cache for Redis, within or outside of Azure, you can skip to the next section. +This section explains how to create an Azure Cache for Redis in Azure. If you already have an Azure Cache for Redis, or another Redis-compatible cache within or outside of Azure, you can skip to the next section. [!INCLUDE [redis-cache-create](../azure-cache-for-redis/includes/redis-cache-create.md)] ## Deploy Redis cache to Kubernetes -For caching, self-hosted gateways rely exclusively on external caches. For caching to be effective self-hosted gateways and the cache they rely on must be located close to each other to minimize lookup and store latencies. Deploying a Redis cache into the same Kubernetes cluster or in a separate cluster nearby are the best options. Follow this [link](https://github.com/kubernetes/examples/tree/master/guestbook) to learn how to deploy Redis cache to a Kubernetes cluster. +For a self-hosted gateway, caching requires an external cache. For caching to be effective, a self-hosted gateway and the cache it relies on must be located close to each other to minimize lookup and store latencies. Deploying a Redis cache into the same Kubernetes cluster or in a separate cluster nearby are the best options. Learn how to [deploy Redis cache to a Kubernetes cluster](https://github.com/kubernetes/examples/tree/master/guestbook). ## Add an external cache -Follow the steps below to add an external Azure Cache for Redis in Azure API Management. +Follow the steps below to add an external Redis-compatible cache in Azure API Management. You can limit the cache to a specific gateway in your API Management instance. ![Screenshot that shows how to add an external Azure Cache for Redis in Azure API Management.](media/api-management-howto-cache-external/add-external-cache.png) +### Use from setting + +The **Use from** setting in the configuration specifies the location of your API Management instance that will use the cache. Select one of the following: + +* The Azure region where the API Management instance is hosted (or one of the configured locations, if you have a [multi-region](api-management-howto-deploy-multi-region.md) deployment) + +* A self-hosted gateway location + +* **Default**, to configure the cache as the default for all gateway locations in the API Management instance + + A cache used for **Default** will be overridden by a cache used for a specific matching region or location. + + For example, consider an API Management instance that's hosted in the East US, Southeast Asia, and West Europe regions. There are two caches configured, one for **Default** and one for **Southeast Asia**. In this example, API Management in **Southeast Asia** will use its own cache, while the other two regions will use the **Default** cache entry. + > [!NOTE] -> The **Use from** setting specifies an Azure region or a self-hosted gateway location that will use the configured cache. The caches configured as **Default** will be overridden by caches with a specific matching region or location value. -> -> For example, if API Management is hosted in the East US, Southeast Asia and West Europe regions and there are two caches configured, one for **Default** and one for **Southeast Asia**, API Management in **Southeast Asia** will use its own cache, while the other two regions will use the **Default** cache entry. +> You can configure the same external cache for more than one API Management instance. The API Management instances can be in the same or different regions. When sharing the cache for more than one instance, you must select **Default** in the **Use from** setting. ### Add an Azure Cache for Redis from the same subscription 1. Browse to your API Management instance in the Azure portal. 2. Select the **External cache** tab from the menu on the left. -3. Click the **+ Add** button. +3. Select the **+ Add** button. 4. Select your cache in the **Cache instance** dropdown field. -5. Select **Default** or specify the desired region in the **Use from** dropdown field. -6. Click **Save**. +5. Select **Default** or specify the desired region in the [**Use from**](#use-from-setting) dropdown field. +6. Select **Save**. -### Add an Azure Cache for Redis hosted outside of the current Azure subscription or Azure in general +### Add a Redis-compatible cache hosted outside of the current Azure subscription or Azure in general 1. Browse to your API Management instance in the Azure portal. 2. Select the **External cache** tab from the menu on the left. -3. Click the **+ Add** button. +3. Select the **+ Add** button. 4. Select **Custom** in the **Cache instance** dropdown field. -5. Select **Default** or specify the desired region in the **Use from** dropdown field. -6. Provide your Azure Cache for Redis connection string in the **Connection string** field. -7. Click **Save**. +5. Select **Default** or specify the desired region in the [**Use from**](#use-from-setting) dropdown field. +6. Provide your Azure Cache for Redis (or Redis-compatible cache) connection string in the **Connection string** field. +7. Select **Save**. ### Add a Redis cache to a self-hosted gateway 1. Browse to your API Management instance in the Azure portal. 2. Select the **External cache** tab from the menu on the left. -3. Click the **+ Add** button. +3. Select the **+ Add** button. 4. Select **Custom** in the **Cache instance** dropdown field. -5. Specify the desired self-hosted gateway location or **Default** in the **Use from** dropdown field. +5. Specify the desired self-hosted gateway location or **Default** in the [**Use from**](#use-from-setting) dropdown field. 6. Provide your Redis cache connection string in the **Connection string** field. -7. Click **Save**. +7. Select **Save**. ## Use the external cache -Once the external cache is configured in Azure API Management, it can be used through caching policies. See [Add caching to improve performance in Azure API Management](api-management-howto-cache.md) for detailed steps. +After adding a Redis-compatible cache, configure [caching policies](api-management-caching-policies.md) to enable response caching, or caching of values by key, in the external cache. + +For a detailed example, see [Add caching to improve performance in Azure API Management](api-management-howto-cache.md). ## Next steps * For more information about caching policies, see [Caching policies][Caching policies] in the [API Management policy reference][API Management policy reference]. -* For information on caching items by key using policy expressions, see [Custom caching in Azure API Management](api-management-sample-cache-by-key.md). +* To cache items by key using policy expressions, see [Custom caching in Azure API Management](api-management-sample-cache-by-key.md). [API Management policy reference]: ./api-management-policies.md [Caching policies]: ./api-management-caching-policies.md diff --git a/articles/api-management/api-management-howto-create-or-invite-developers.md b/articles/api-management/api-management-howto-create-or-invite-developers.md index de0f16bc29489..305af98970613 100644 --- a/articles/api-management/api-management-howto-create-or-invite-developers.md +++ b/articles/api-management/api-management-howto-create-or-invite-developers.md @@ -52,6 +52,8 @@ When a developer is invited, an email is sent to the developer. This email is ge Once the invitation is accepted, the account becomes active. +Invitation link will be active for 2 days. + ## Deactivate or reactivate a developer account By default, newly created or invited developer accounts are **Active**. To deactivate a developer account, click **Block**. To reactivate a blocked developer account, click **Activate**. A blocked developer account can't access the developer portal or call any APIs. To delete a user account, click **Delete**. diff --git a/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md b/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md index 8696f4d6f57f4..ea1ded555981d 100644 --- a/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md +++ b/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md @@ -78,7 +78,7 @@ All of the tasks that you do on resources using the Azure Resource Manager must Before calling the APIs that generate the backup and restore, you need to get a token. The following example uses the [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package to retrieve the token. > [!IMPORTANT] -> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. +> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../active-directory/develop/msal-migration.md) for more details. ```csharp using Microsoft.IdentityModel.Clients.ActiveDirectory; @@ -331,4 +331,4 @@ API Management **Premium** tier also supports [zone redundancy](zone-redundancy. [api-management-arm-token]: ./media/api-management-howto-disaster-recovery-backup-restore/api-management-arm-token.png [api-management-endpoint]: ./media/api-management-howto-disaster-recovery-backup-restore/api-management-endpoint.png [control-plane-ip-address]: virtual-network-reference.md#control-plane-ip-addresses -[azure-storage-ip-firewall]: ../storage/common/storage-network-security.md#grant-access-from-an-internet-ip-range +[azure-storage-ip-firewall]: ../storage/common/storage-network-security.md#grant-access-from-an-internet-ip-range \ No newline at end of file diff --git a/articles/api-management/api-management-howto-integrate-internal-vnet-appgateway.md b/articles/api-management/api-management-howto-integrate-internal-vnet-appgateway.md index 1e5724678c703..76db55128709d 100644 --- a/articles/api-management/api-management-howto-integrate-internal-vnet-appgateway.md +++ b/articles/api-management/api-management-howto-integrate-internal-vnet-appgateway.md @@ -23,6 +23,11 @@ By combining API Management provisioned in an internal virtual network with the * Use a single API Management resource and have a subset of APIs defined in API Management available for external consumers. * Provide a turnkey way to switch access to API Management from the public internet on and off. +For architectural guidance, see: +* **Basic enterprise integration**: [Reference architecture](/azure/architecture/reference-architectures/enterprise-integration/basic-enterprise-integration?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json) +* **API Management landing zone accelerator**: [Reference architecture](/azure/architecture/example-scenario/integration/app-gateway-internal-api-management-function?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json) and [design guidance](/azure/cloud-adoption-framework/scenarios/app-platform/api-management/land?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json) + + > [!NOTE] > This article has been updated to use the [Application Gateway WAF_v2 SKU](../application-gateway/application-gateway-autoscaling-zone-redundant.md). diff --git a/articles/api-management/api-management-howto-properties.md b/articles/api-management/api-management-howto-properties.md index 855b64f707d8a..f75c9d2b39d2b 100644 --- a/articles/api-management/api-management-howto-properties.md +++ b/articles/api-management/api-management-howto-properties.md @@ -143,10 +143,11 @@ az apim nv delete --resource-group apim-hello-word-resource-group \ The examples in this section use the named values shown in the following table. | Name | Value | Secret | -|--------------------|----------------------------|--------|---------| +|--------------------|----------------------------|--------| | ContosoHeader | `TrackingId` | False | | ContosoHeaderValue | •••••••••••••••••••••• | True | | ExpressionProperty | `@(DateTime.Now.ToString())` | False | +| ContosoHeaderValue2 | `This is a header value.` | False | To use a named value in a policy, place its display name inside a double pair of braces like `{{ContosoHeader}}`, as shown in the following example: @@ -182,6 +183,16 @@ If you look at the outbound [API trace](api-management-howto-api-inspector.md) f :::image type="content" source="media/api-management-howto-properties/api-management-api-inspector-trace.png" alt-text="API Inspector trace"::: +String interpolation can also be used with named values. + +```xml + + @($"The URL encoded value is {System.Net.WebUtility.UrlEncode("{{ContosoHeaderValue2}}")}") + +``` + +The value for `CustomHeader` will be `The URL encoded value is This+is+a+header+value.`. + > [!CAUTION] > If a policy references a secret in Azure Key Vault, the value from the key vault will be visible to users who have access to subscriptions enabled for [API request tracing](api-management-howto-api-inspector.md). diff --git a/articles/api-management/api-management-policies.md b/articles/api-management/api-management-policies.md index 64876e34f5654..9626b27954826 100644 --- a/articles/api-management/api-management-policies.md +++ b/articles/api-management/api-management-policies.md @@ -19,6 +19,7 @@ More information about policies: ## [Access restriction policies](api-management-access-restriction-policies.md) - [Check HTTP header](api-management-access-restriction-policies.md#CheckHTTPHeader) - Enforces existence and/or value of an HTTP Header. +- [Get authorization context](api-management-access-restriction-policies.md#GetAuthorizationContext) - Gets the authorization context of a specified [authorization](authorizations-overview.md) configured in the API Management instance. - [Limit call rate by subscription](api-management-access-restriction-policies.md#LimitCallRate) - Prevents API usage spikes by limiting call rate, on a per subscription basis. - [Limit call rate by key](api-management-access-restriction-policies.md#LimitCallRateByKey) - Prevents API usage spikes by limiting call rate, on a per key basis. - [Restrict caller IPs](api-management-access-restriction-policies.md#RestrictCallerIPs) - Filters (allows/denies) calls from specific IP addresses and/or address ranges. diff --git a/articles/api-management/api-management-policy-expressions.md b/articles/api-management/api-management-policy-expressions.md index 7e52ec847bdf4..d08095c8f56d0 100644 --- a/articles/api-management/api-management-policy-expressions.md +++ b/articles/api-management/api-management-policy-expressions.md @@ -199,7 +199,7 @@ The `context` variable is implicitly available in every policy [expression](api- |----------------------|-------------------------------------------------------| |context|[Api](#ref-context-api): [IApi](#ref-iapi)

        [Deployment](#ref-context-deployment)

        Elapsed: TimeSpan - time interval between the value of Timestamp and current time

        [LastError](#ref-context-lasterror)

        [Operation](#ref-context-operation)

        [Product](#ref-context-product)

        [Request](#ref-context-request)

        RequestId: Guid - unique request identifier

        [Response](#ref-context-response)

        [Subscription](#ref-context-subscription)

        Timestamp: DateTime - point in time when request was received

        Tracing: bool - indicates if tracing is on or off

        [User](#ref-context-user)

        [Variables](#ref-context-variables): IReadOnlyDictionary

        void Trace(message: string)| |context.Api|Id: string

        IsCurrentRevision: bool

        Name: string

        Path: string

        Revision: string

        ServiceUrl: [IUrl](#ref-iurl)

        Version: string | -|context.Deployment|GatewayId: string (returns 'managed' for managed gateways)

        Region: string

        ServiceName: string

        Certificates: IReadOnlyDictionary| +|context.Deployment|GatewayId: string (returns 'managed' for managed gateways)

        Region: string

        ServiceId: string

        ServiceName: string

        Certificates: IReadOnlyDictionary| |context.LastError|Source: string

        Reason: string

        Message: string

        Scope: string

        Section: string

        Path: string

        PolicyId: string

        For more information about context.LastError, see [Error handling](api-management-error-handling-policies.md).| |context.Operation|Id: string

        Method: string

        Name: string

        UrlTemplate: string| |context.Product|Apis: IEnumerable<[IApi](#ref-iapi)\>

        ApprovalRequired: bool

        Groups: IEnumerable<[IGroup](#ref-igroup)\>

        Id: string

        Name: string

        State: enum ProductState {NotPublished, Published}

        SubscriptionLimit: int?

        SubscriptionRequired: bool| diff --git a/articles/api-management/api-management-sample-cache-by-key.md b/articles/api-management/api-management-sample-cache-by-key.md index ceb16439a83c3..ccc338645a6df 100644 --- a/articles/api-management/api-management-sample-cache-by-key.md +++ b/articles/api-management/api-management-sample-cache-by-key.md @@ -4,26 +4,24 @@ description: Learn how to cache items by key in Azure API Management. You can mo services: api-management documentationcenter: '' author: dlepow -manager: erikre editor: '' - -ms.assetid: 772bc8dd-5cda-41c4-95bf-b9f6f052bc85 +ms.topic: how-to ms.service: api-management -ms.topic: article -ms.tgt_pltfrm: na -ms.workload: na -ms.date: 12/15/2016 +ms.date: 05/19/2022 ms.author: danlep --- # Custom caching in Azure API Management -Azure API Management service has built-in support for [HTTP response caching](api-management-howto-cache.md) using the resource URL as the key. The key can be modified by request headers using the `vary-by` properties. This is useful for caching entire HTTP responses (also known as representations), but sometimes it is useful to just cache a portion of a representation. The new [cache-lookup-value](./api-management-caching-policies.md#GetFromCacheByKey) and [cache-store-value](./api-management-caching-policies.md#StoreToCacheByKey) policies provide the ability to store and retrieve arbitrary pieces of data from within policy definitions. This ability also adds value to the previously introduced [send-request](./api-management-advanced-policies.md#SendRequest) policy because you can now cache responses from external services. +Azure API Management service has built-in support for [HTTP response caching](api-management-howto-cache.md) using the resource URL as the key. The key can be modified by request headers using the `vary-by` properties. This is useful for caching entire HTTP responses (also known as representations), but sometimes it's useful to just cache a portion of a representation. The [cache-lookup-value](./api-management-caching-policies.md#GetFromCacheByKey) and [cache-store-value](./api-management-caching-policies.md#StoreToCacheByKey) policies provide the ability to store and retrieve arbitrary pieces of data from within policy definitions. This ability also adds value to the [send-request](./api-management-advanced-policies.md#SendRequest) policy because you can cache responses from external services. ## Architecture -API Management service uses a shared per-tenant data cache so that, as you scale up to multiple units you still get access to the same cached data. However, when working with a multi-region deployment there are independent caches within each of the regions. It is important to not treat the cache as a data store, where it is the only source of some piece of information. If you did, and later decided to take advantage of the multi-region deployment, then customers with users that travel may lose access to that cached data. +API Management service uses a shared per-tenant internal data cache so that, as you scale up to multiple units, you still get access to the same cached data. However, when working with a multi-region deployment there are independent caches within each of the regions. It's important to not treat the cache as a data store, where it's the only source of some piece of information. If you did, and later decided to take advantage of the multi-region deployment, then customers with users that travel may lose access to that cached data. + +> [!NOTE] +> The internal cache is not available in the **Consumption** tier of Azure API Management. You can [use an external Azure Cache for Redis](api-management-howto-cache-external.md) instead. An external cache allows for greater cache control and flexibility for API Management instances in all tiers. ## Fragment caching -There are certain cases where responses being returned contain some portion of data that is expensive to determine and yet remains fresh for a reasonable amount of time. As an example, consider a service built by an airline that provides information relating flight reservations, flight status, etc. If the user is a member of the airlines points program, they would also have information relating to their current status and accumulated mileage. This user-related information might be stored in a different system, but it may be desirable to include it in responses returned about flight status and reservations. This can be done using a process called fragment caching. The primary representation can be returned from the origin server using some kind of token to indicate where the user-related information is to be inserted. +There are certain cases where responses being returned contain some portion of data that is expensive to determine and yet remains fresh for a reasonable amount of time. As an example, consider a service built by an airline that provides information relating flight reservations, flight status, and so on. If the user is a member of the airlines points program, they would also have information relating to their current status and accumulated mileage. This user-related information might be stored in a different system, but it may be desirable to include it in responses returned about flight status and reservations. This can be done using a process called fragment caching. The primary representation can be returned from the origin server using some kind of token to indicate where the user-related information is to be inserted. Consider the following JSON response from a backend API. @@ -44,7 +42,7 @@ And secondary resource at `/userprofile/{userid}` that looks like, { "username" : "Bob Smith", "Status" : "Gold" } ``` -To determine the appropriate user information to include, API Management needs to identify who the end user is. This mechanism is implementation-dependent. As an example, I am using the `Subject` claim of a `JWT` token. +To determine the appropriate user information to include, API Management needs to identify who the end user is. This mechanism is implementation-dependent. The following example uses the `Subject` claim of a `JWT` token. ```xml ``` -API Management stores the value in the cache using the exact same key that API Management originally attempted to retrieve it with. The duration that API Management chooses to store the value should be based on how often the information changes and how tolerant users are to out-of-date information. +API Management stores the value in the cache using the same key that API Management originally attempted to retrieve it with. The duration that API Management chooses to store the value should be based on how often the information changes and how tolerant users are to out-of-date information. -It is important to realize that retrieving from the cache is still an out-of-process, network request and potentially can still add tens of milliseconds to the request. The benefits come when determining the user profile information takes longer than that due to needing to do database queries or aggregate information from multiple back-ends. +It is important to realize that retrieving from the cache is still an out-of-process network request and potentially can add tens of milliseconds to the request. The benefits come when determining the user profile information takes longer than that due to needing to do database queries or aggregate information from multiple back-ends. The final step in the process is to update the returned response with the user profile information. @@ -116,9 +114,9 @@ The final step in the process is to update the returned response with the user p to="@((string)context.Variables["userprofile"])" /> ``` -You can chose to include the quotation marks as part of the token so that even when the replace doesn’t occur, the response is still a valid JSON. +You can choose to include the quotation marks as part of the token so that even when the replacement doesn’t occur, the response is still a valid JSON. -Once you combine all these steps together, the end result is a policy that looks like the following one. +Once you combine these steps, the end result is a policy that looks like the following one. ```xml @@ -172,14 +170,14 @@ Once you combine all these steps together, the end result is a policy that looks ``` -This caching approach is primarily used in web sites where HTML is composed on the server side so that it can be rendered as a single page. It can also be useful in APIs where clients cannot do client-side HTTP caching or it is desirable not to put that responsibility on the client. +This caching approach is primarily used in websites where HTML is composed on the server side so that it can be rendered as a single page. It can also be useful in APIs where clients can't do client-side HTTP caching or it's desirable not to put that responsibility on the client. This same kind of fragment caching can also be done on the backend web servers using a Redis caching server, however, using the API Management service to perform this work is useful when the cached fragments are coming from different back-ends than the primary responses. ## Transparent versioning -It is common practice for multiple different implementation versions of an API to be supported at any one time. For example, to support different environments (dev, test, production, etc.) or to support older versions of the API to give time for API consumers to migrate to newer versions. +It's common practice for multiple different implementation versions of an API to be supported at any one time. For example, to support different environments (dev, test, production, etc.) or to support older versions of the API to give time for API consumers to migrate to newer versions. -One approach to handling this, instead of requiring client developers to change the URLs from `/v1/customers` to `/v2/customers` is to store in the consumer’s profile data which version of the API they currently wish to use and call the appropriate backend URL. To determine the correct backend URL to call for a particular client, it is necessary to query some configuration data. By caching this configuration data, API Management can minimize the performance penalty of doing this lookup. +One approach to handling this, instead of requiring client developers to change the URLs from `/v1/customers` to `/v2/customers` is to store in the consumer’s profile data which version of the API they currently wish to use and call the appropriate backend URL. To determine the correct backend URL to call for a particular client, it's necessary to query some configuration data. By caching this configuration data, API Management can minimize the performance penalty of doing this lookup. The first step is to determine the identifier used to configure the desired version. In this example, I chose to associate the version to the product subscription key. @@ -195,7 +193,7 @@ key="@("clientversion-" + context.Variables["clientid"])" variable-name="clientversion" /> ``` -Then, API Management checks to see if it did not find it in the cache. +Then, API Management checks to see if it didn't find it in the cache. ```xml diff --git a/articles/api-management/api-management-subscriptions.md b/articles/api-management/api-management-subscriptions.md index ab655401f8079..fc173adfedbd5 100644 --- a/articles/api-management/api-management-subscriptions.md +++ b/articles/api-management/api-management-subscriptions.md @@ -84,6 +84,9 @@ API publishers can [create subscriptions](api-management-howto-create-subscripti By default, a developer can only access a product or API by using a subscription key. Under certain scenarios, API publishers might want to publish a product or a particular API to the public without the requirement of subscriptions. While a publisher could choose to enable unsecured access to certain APIs, configuring another mechanism to secure client access is recommended. +> [!CAUTION] +> Use care when configuring a product or an API that doesn't require a subscription. This configuration may be overly permissive and may make an API more vulnerable to certain [API security threats](mitigate-owasp-api-threats.md#security-misconfiguration). + To disable the subscription requirement using the portal: * **Disable requirement for product** - Disable **Requires subscription** on the **Settings** page of the product. diff --git a/articles/api-management/authorizations-how-to.md b/articles/api-management/authorizations-how-to.md new file mode 100644 index 0000000000000..0231af5e78927 --- /dev/null +++ b/articles/api-management/authorizations-how-to.md @@ -0,0 +1,156 @@ +--- +title: Create and use authorization in Azure API Management | Microsoft Docs +description: Learn how to create and use an authorization in Azure API Management. An authorization manages authorization tokens to OAuth 2.0 backend services. The example uses GitHub as an identity provider. +services: api-management +author: dlepow +ms.service: api-management +ms.topic: how-to +ms.date: 06/03/2022 +ms.author: danlep +--- + +# Configure and use an authorization + +In this article, you learn how to create an [authorization](authorizations-overview.md) (preview) in API Management and call a GitHub API that requires an authorization token. The authorization code grant type will be used. + +Four steps are needed to set up an authorization with the authorization code grant type: + +1. Register an application in the identity provider (in this case, GitHub). +1. Configure an authorization in API Management. +1. Authorize with GitHub and configure access policies. +1. Create an API in API Management and configure a policy. + +## Prerequisites + +- A GitHub account is required. +- Complete the following quickstart: [Create an Azure API Management instance](get-started-create-service-instance.md). +- Enable a [managed identity](api-management-howto-use-managed-service-identity.md) for API Management in the API Management instance. + +## Step 1: Register an application in GitHub + +1. Sign in to GitHub. +1. In your account profile, go to **Settings > Developer Settings > OAuth Apps > Register a new application**. + + + :::image type="content" source="media/authorizations-how-to/register-application.png" alt-text="Screenshot of registering a new OAuth application in GitHub."::: + 1. Enter an **Application name** and **Homepage URL** for the application. + 1. Optionally, add an **Application description**. + 1. In **Authorization callback URL** (the redirect URL), enter `https://authorization-manager-test.consent.azure-apim.net/redirect/apim/`, substituting the API Management service name that is used. +1. Select **Register application**. +1. In the **General** page, copy the **Client ID**, which you'll use in a later step. +1. Select **Generate a new client secret**. Copy the secret, which won't be displayed again, and which you'll use in a later step. + + :::image type="content" source="media/authorizations-how-to/generate-secret.png" alt-text="Screenshot showing how to get client ID and client secret for the application in GitHub."::: + +## Step 2: Configure an authorization in API Management + +1. Sign into Azure portal and go to your API Management instance. +1. In the left menu, select **Authorizations** > **+ Create**. + + :::image type="content" source="media/authorizations-how-to/create-authorization.png" alt-text="Screenshot of creating an API Management authorization in the Azure portal."::: +1. In the **Create authorization** window, enter the following settings, and select **Create**: + + |Settings |Value | + |---------|---------| + |**Provider name** | A name of your choice, such as *github-01* | + |**Identity provider** | Select **GitHub** | + |**Grant type** | Select **Authorization code** | + |**Client id** | Paste the value you copied earlier from the app registration | + |**Client secret** | Paste the value you copied earlier from the app registration | + |**Scope** | Set the scope to `User` | + |**Authorization name** | A name of your choice, such as *auth-01* | + + + +1. After the authorization provider and authorization are created, select **Next**. + +1. On the **Login** tab, select **Login with GitHub**. Before the authorization will work, it needs to be authorized at GitHub. + + :::image type="content" source="media/authorizations-how-to/authorize-with-github.png" alt-text="Screenshot of logging into the GitHub authorization from the portal."::: + +## Step 3: Authorize with GitHub and configure access policies + +1. Sign in to your GitHub account if you're prompted to do so. +1. Select **Authorize** so that the application can access the signed-in user’s account. + + :::image type="content" source="media/authorizations-how-to/consent-to-authorization.png" alt-text="Screenshot of consenting to authorize with Github."::: + + After authorization, the browser is redirected to API Management and the window is closed. If prompted during redirection, select **Allow access**. In API Management, select **Next**. +1. On the **Access policy** page, create an access policy so that API Management has access to use the authorization. Ensure that a managed identity is configured for API Management. [Learn more about managed identities in API Management](api-management-howto-use-managed-service-identity.md#create-a-system-assigned-managed-identity). + +1. Select **Managed identity** **+ Add members** and then select your subscription. +1. In **Managed identity**, select **API Management service**, and then select the API Management instance that is used. Click **Select** and then **Complete**. + + :::image type="content" source="media/authorizations-how-to/select-managed-identity.png" alt-text="Screenshot of selecting a managed identity to use the authorization."::: + +## Step 4: Create an API in API Management and configure a policy + +1. Sign into Azure portal and go to your API Management instance. +1. In the left menu, select **APIs > + Add API**. +1. Select **HTTP** and enter the following settings. Then select **Create**. + + |Setting |Value | + |---------|---------| + |**Display name** | *github* | + |**Web service URL** | https://api.github.com/users/ | + |**API URL suffix** | *github* | + +2. Navigate to the newly created API and select **Add Operation**. Enter the following settings and select **Save**. + + |Setting |Value | + |---------|---------| + |**Display name** | *getdata* | + |**URL** | /data | + + :::image type="content" source="media/authorizations-how-to/add-operation.png" alt-text="Screenshot of adding a getdata operation to the API in the portal."::: + +1. In the **Inbound processing** section, select the (****) (code editor) icon. +1. Copy the following, and paste in the policy editor. Make sure the provider-id and authorization-id correspond to the names in step 2.3. Select **Save**. + + ```xml + + + + + + @("Bearer " + ((Authorization)context.Variables.GetValueOrDefault("auth-context"))?.AccessToken) + + + + API Management + + + + + + + + + + + + + ``` + + The policy to be used consists of four parts. + + - Fetch an authorization token. + - Create an HTTP header with the fetched authorization token. + - Create an HTTP header with a `User-Agent` header (GitHub requirement). [Learn more](https://docs.github.com/rest/overview/resources-in-the-rest-api#user-agent-required) + - Because the incoming request to API Management will consist of a query parameter called *username*, add the username to the backend call. + + > [!NOTE] + > The `get-authorization-context` policy references the authorization provider and authorization that were created earlier. [Learn more](api-management-access-restriction-policies.md#GetAuthorizationContext) about how to configure this policy. + + :::image type="content" source="media/authorizations-how-to/policy-configuration-cropped.png" lightbox="media/authorizations-how-to/policy-configuration.png" alt-text="Screenshot of configuring policy in the portal."::: +1. Test the API. + 1. On the **Test** tab, enter a query parameter with the name *username*. + 1. As value, enter the username that was used to sign into GitHub, or another valid GitHub username. + 1. Select **Send**. + :::image type="content" source="media/authorizations-how-to/test-api.png" alt-text="Screenshot of testing the API successfully in the portal."::: + + A successful response returns user data from the GitHub API. + +## Next steps + +Learn more about [access restriction policies](api-management-access-restriction-policies.md). \ No newline at end of file diff --git a/articles/api-management/authorizations-overview.md b/articles/api-management/authorizations-overview.md new file mode 100644 index 0000000000000..37c00891b7358 --- /dev/null +++ b/articles/api-management/authorizations-overview.md @@ -0,0 +1,182 @@ +--- +title: About OAuth 2.0 authorizations in Azure API Management | Microsoft Docs +description: Learn about authorizations in Azure API Management, a feature that simplifies the process of managing OAuth 2.0 authorization tokens to APIs +author: dlepow +ms.service: api-management +ms.topic: conceptual +ms.date: 06/03/2022 +ms.author: danlep +--- + +# Authorizations overview + +API Management authorizations (preview) simplify the process of managing authorization tokens to OAuth 2.0 backend services. +By configuring any of the supported identity providers and creating an authorization using the standardized OAuth 2.0 flow, API Management can retrieve and refresh access tokens to be used inside of API management or sent back to a client. +This feature enables APIs to be exposed with or without a subscription key, and the authorization to the backend service uses OAuth 2.0. + +Some example scenarios that will be possible through this feature are: + +- Citizen/low code developers using Power Apps or Power Automate can easily connect to SaaS providers that are using OAuth 2.0. +- Unattended scenarios such as an Azure function using a timer trigger can utilize this feature to connect to a backend API using OAuth 2.0. +- A marketing team in an enterprise company could use the same authorization for interacting with a social media platform using OAuth 2.0. +- Exposing APIs in API Management as a custom connector in Logic Apps where the backend service requires OAuth 2.0 flow. +- On behalf of a scenario where a service such as Dropbox or any other service protected by OAuth 2.0 flow is used by multiple clients. +- Connect to different services that require OAuth 2.0 authorization using synthetic GraphQL in API Management. +- Enterprise Application Integration (EAI) patterns using service-to-service authorization can use the client credentials grant type against backend APIs that use OAuth 2.0. +- Single-page applications that only want to retrieve an access token to be used in a client's SDK against an API using OAuth 2.0. + +The feature consists of two parts, management and runtime: + +* The **management** part takes care of configuring identity providers, enabling the consent flow for the identity provider, and managing access to the authorizations. + + +* The **runtime** part uses the [`get-authorization-context`](api-management-access-restriction-policies.md#GetAuthorizationContext) policy to fetch and store access and refresh tokens. When a call comes into API Management, and the `get-authorization-context` policy is executed, it will first validate if the existing authorization token is valid. If the authorization token has expired, the refresh token is used to try to fetch a new authorization and refresh token from the configured identity provider. If the call to the backend provider is successful, the new authorization token will be used, and both the authorization token and refresh token will be stored encrypted. + + + During the policy execution, access to the tokens is also validated using access policies. + +:::image type="content" source="media/authorizations-overview/overview.png" alt-text="Screenshot showing identity providers that can be used for OAuth 2.0 authorizations in API Management." border="false"::: + +### Requirements + +- Managed system-assigned identity must be enabled for the API Management instance. +- API Management instance must have outbound connectivity to internet on port `443` (HTTPS). + +### Limitations + +For public preview the following limitations exist: + +- Authorizations feature will be available in the Consumption tier in the coming weeks. +- Authorizations feature is not supported in the following regions: swedencentral, australiacentral, australiacentral2, jioindiacentral. +- Supported identity providers: Azure AD, DropBox, Generic OAuth 2.0, GitHub, Google, LinkedIn, Spotify +- Maximum configured number of authorization providers per API Management instance: 50 +- Maximum configured number of authorizations per authorization provider: 500 +- Maximum configured number of access policies per authorization: 100 +- Maximum requests per minute per authorization: 100 +- Authorization code PKCE flow with code challenge isn't supported. +- Authorizations feature isn't supported on self-hosted gateways. +- API documentation is not available yet. Please see [this](https://github.com/Azure/APIManagement-Authorizations) GitHub repository with samples. + +### Authorization providers + +Authorization provider configuration includes which identity provider and grant type are used. Each identity provider requires different configurations. + +* An authorization provider configuration can only have one grant type. +* One authorization provider configuration can have multiple authorizations. + +The following identity providers are supported for public preview: + +- Azure AD, DropBox, Generic OAuth 2.0, GitHub, Google, LinkedIn, Spotify + + +With the Generic OAuth 2.0 provider, other identity providers that support the standards of OAuth 2.0 flow can be used. + + +### Authorizations + +To use an authorization provider, at least one *authorization* is required. The process of configuring an authorization differs based on the used grant type. Each authorization provider configuration only supports one grant type. For example, if you want to configure Azure AD to use both grant types, two authorization provider configurations are needed. + +**Authorization code grant type** + +Authorization code grant type is bound to a user context, meaning a user needs to consent to the authorization. As long as the refresh token is valid, API Management can retrieve new access and refresh tokens. If the refresh token becomes invalid, the user needs to reauthorize. All identity providers support authorization code. [Read more about Authorization code grant type](https://www.rfc-editor.org/rfc/rfc6749?msclkid=929b18b5d0e611ec82a764a7c26a9bea#section-1.3.1). + +**Client credentials grant type** + +Client credentials grant type isn't bound to a user and is often used in application-to-application scenarios. No consent is required for client credentials grant type, and the authorization doesn't become invalid. [Read more about Client Credentials grant type](https://www.rfc-editor.org/rfc/rfc6749?msclkid=929b18b5d0e611ec82a764a7c26a9bea#section-1.3.4). + + +### Access policies +Access policies determine which identities can use the authorization that the access policy is related to. The supported identities are managed identities, user identities, and service principals. The identities must belong to the same tenant as the API Management tenant. + +- **Managed identities** - System- or user-assigned identity for the API Management instance that is being used. +- **User identities** - Users in the same tenant as the API Management instance. +- **Service principals** - Applications in the same Azure AD tenant as the API Management instance. + +### Process flow for creating authorizations + +The following image shows the process flow for creating an authorization in API Management using the grant type authorization code. For public preview no API documentation is available. + +:::image type="content" source="media/authorizations-overview/get-token.svg" alt-text="Process flow for creating authorizations" border="false"::: + + +1. Client sends a request to create an authorization provider. +1. Authorization provider is created, and a response is sent back. +1. Client sends a request to create an authorization. +1. Authorization is created, and a response is sent back with the information that the authorization is not "connected". +1. Client sends a request to retrieve a login URL to start the OAuth 2.0 consent at the identity provider. The request includes a post-redirect URL to be used in the last step. +1. Response is returned with a login URL that should be used to start the consent flow. +1. Client opens a browser with the login URL that was provided in the previous step. The browser is redirected to the identity provider OAuth 2.0 consent flow. +1. After the consent is approved, the browser is redirected with an authorization code to the redirect URL configured at the identity provider. +1. API Management uses the authorization code to fetch access and refresh tokens. +1. API Management receives the tokens and encrypts them. +1. API Management redirects to the provided URL from step 5. + +### Process flow for runtime + +The following image shows the process flow to fetch and store authorization and refresh tokens based on a configured authorization. After the tokens have been retrieved a call is made to the backend API. + +:::image type="content" source="media/authorizations-overview/get-token-for-backend.svg" alt-text="Diagram that shows the process flow for creating runtime." border="false"::: + +1. Client sends request to API Management instance. +1. The policy [`get-authorization-context`](api-management-access-restriction-policies.md#GetAuthorizationContext) checks if the access token is valid for the current authorization. +1. If the access token has expired but the refresh token is valid, API Management tries to fetch new access and refresh tokens from the configured identity provider. +1. The identity provider returns both an access token and a refresh token, which are encrypted and saved to API Management. +1. After the tokens have been retrieved, the access token is attached using the `set-header` policy as an authorization header to the outgoing request to the backend API. +1. Response is returned to API Management. +1. Response is returned to the client. + +### Error handling + +If acquiring the authorization context results in an error, the outcome depends on how the attribute `ignore-error` is configured in the policy `get-authorization-context`. If the value is set to `false` (default), an error with `500 Internal Server Error` will be returned. If the value is set to `true`, the error will be ignored and execution will proceed with the context variable set to `null`. + +If the value is set to `false`, and the on-error section in the policy is configured, the error will be available in the property `context.LastError`. By using the on-error section, the error that is sent back to the client can be adjusted. Errors from API Management can be caught using standard Azure alerts. Read more about [handling errors in policies](api-management-error-handling-policies.md). + +### Authorizations FAQ + +##### How can I provide feedback and influence the roadmap for this feature? + +Please use [this](https://aka.ms/apimauthorizations/feedback) form to provide feedback. + +##### How are the tokens stored in API Management? + +The access token and other secrets (for example, client secrets) are encrypted with an envelope encryption and stored in an internal, multitenant storage. The data are encrypted with AES-128 using a key that is unique per data; those keys are encrypted asymmetrically with a master certificate stored in Azure Key Vault and rotated every month. + +##### When are the access tokens refreshed? + +When the policy `get-authorization-context` is executed at runtime, API Management checks if the stored access token is valid. If the token has expired or is near expiry, API Management uses the refresh token to fetch a new access token and a new refresh token from the configured identity provider. If the refresh token has expired, an error is thrown, and the authorization needs to be reauthorized before it will work. + +##### What happens if the client secret expires at the identity provider? +At runtime API Management can't fetch new tokens, and an error will occur. + +* If the authorization is of type authorization code, the client secret needs to be updated on authorization provider level. + +* If the authorization is of type client credentials, the client secret needs to be updated on authorizations level. + +##### Is this feature supported using API Management running inside a VNet? + +Yes, as long as API Management gateway has outbound internet connectivity on port `443`. + +##### What happens when an authorization provider is deleted? + +All underlying authorizations and access policies are also deleted. + +##### Are the access tokens cached by API Management? + +The access token is cached by the API management until 3 minutes before the token expiration time. + +##### What grant types are supported? + +For public preview, the Azure AD identity provider supports authorization code and client credentials. + +The other identity providers support authorization code. After public preview, more identity providers and grant types will be added. + +### Next steps + +- Learn how to [configure and use an authorization](authorizations-how-to.md). +- See [reference](authorizations-reference.md) for supported identity providers in authorizations. +- Use [policies]() together with authorizations. +- Authorizations [samples](https://github.com/Azure/APIManagement-Authorizations) GitHub repository. +- Learn more about OAuth 2.0: + + * [OAuth 2.0 overview](https://aaronparecki.com/oauth-2-simplified/) + * [OAuth 2.0 specification](https://oauth.net/2/) diff --git a/articles/api-management/authorizations-reference.md b/articles/api-management/authorizations-reference.md new file mode 100644 index 0000000000000..1e69e9b613d37 --- /dev/null +++ b/articles/api-management/authorizations-reference.md @@ -0,0 +1,102 @@ +--- +title: Reference for OAuth 2.0 authorizations - Azure API Management | Microsoft Docs +description: Reference for identity providers supported in authorizations in Azure API Management. API Management authorizations manage OAuth 2.0 authorization tokens to APIs. +author: dlepow +ms.service: api-management +ms.topic: reference +ms.date: 05/02/2022 +ms.author: danlep +--- + +# Authorizations reference +This article is a reference for the supported identity providers in API Management [authorizations](authorizations-overview.md) (preview) and their configuration options. + +## Azure Active Directory + + +**Supported grant types**: authorization code and client credentials + + +### Authorization provider - Authorization code grant type + +| Name | Required | Description | Default | +|---|---|---|---| +| Provider name | Yes | Name of Authorization provider. | | +| Client id | Yes | The id used to identify this application with the service provider. | | +| Client secret | Yes | The shared secret used to authenticate this application with the service provider. || +| Login URL | No | The Azure Active Directory login URL. | https://login.windows.net | +| Tenant ID | No | The tenant ID of your Azure Active Directory application. | common | +| Resource URL | Yes | The resource to get authorization for. | | +| Scopes | No | Scopes used for the authorization. Multiple scopes could be defined separate with a space, for example, "User.Read User.ReadBasic.All" | | + + +### Authorization - Authorization code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Authorization name | Yes | Name of Authorization. | | + +--- + +### Authorization provider - Client credentials code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Provider name | Yes | Name of Authorization provider. | | +| Login URL | No | The Azure Active Directory login URL. | https://login.windows.net | +| Tenant ID | No | The tenant ID of your Azure Active Directory application. | common | +| Resource URL | Yes | The resource to get authorization for. | | + + +### Authorization - Client credentials code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Authorization name | Yes | Name of Authorization. | | +| Client id | Yes | The id used to identify this application with the service provider. | | +| Client secret | Yes | The shared secret used to authenticate this application with the service provider. || + +--- + +## Google, LinkedIn, Spotify, Dropbox, GitHub + +**Supported grant types**: authorization code + +### Authorization provider - Authorization code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Provider name | Yes | Name of Authorization provider. | | +| Client id | Yes | The id used to identify this application with the service provider. | | +| Client secret | Yes | The shared secret used to authenticate this application with the service provider. || +| Scopes | No | Scopes used for the authorization. Depending on the identity provider, multiple scopes are separated by space or comma. Default for most identity providers is space. | | + + +### Authorization - Authorization code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Authorization name | Yes | Name of Authorization. | | + +--- + +## Generic OAuth 2 + +**Supported grant types**: authorization code + + +### Authorization provider - Authorization code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Provider name | Yes | Name of Authorization provider. | | +| Client id | Yes | The id used to identify this application with the service provider. | | +| Client secret | Yes | The shared secret used to authenticate this application with the service provider. || +| Authorization URL | No | The authorization endpoint URL. | | +| Token URL | No | The token endpoint URL. | | +| Refresh URL | No | The token refresh endpoint URL. | | +| Scopes | No | Scopes used for the authorization. Depending on the identity provider, multiple scopes are separated by space or comma. Default for most identity providers is space. | | + + +### Authorization - Authorization code grant type +| Name | Required | Description | Default | +|---|---|---|---| +| Authorization name | Yes | Name of Authorization. | | + +## Next steps + +Learn more about [authorizations](authorizations-overview.md) and how to [create and use authorizations](authorizations-how-to.md) diff --git a/articles/api-management/devops-api-development-templates.md b/articles/api-management/devops-api-development-templates.md index 395df26750338..90ee25d7e35c0 100644 --- a/articles/api-management/devops-api-development-templates.md +++ b/articles/api-management/devops-api-development-templates.md @@ -16,6 +16,10 @@ This article shows you how to use API DevOps with Azure API Management, through For details, tools, and code samples to implement the DevOps approach described in this article, see the open-source [Azure API Management DevOps Resource Kit](https://github.com/Azure/azure-api-management-devops-resource-kit) in GitHub. Because customers bring a wide range of engineering cultures and existing automation solutions, the approach isn't a one-size-fits-all solution. +For architectural guidance, see: + +* **API Management landing zone accelerator**: [Reference architecture](/azure/architecture/example-scenario/integration/app-gateway-internal-api-management-function?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json) and [design guidance](/azure/cloud-adoption-framework/scenarios/app-platform/api-management/land?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json) + ## The problem Organizations today normally have multiple deployment environments (such as development, testing, and production) and use separate API Management instances for each environment. Some instances are shared by multiple development teams, who are responsible for different APIs with different release cadences. diff --git a/articles/api-management/graphql-api.md b/articles/api-management/graphql-api.md index 7912d5f325105..42ff1de8f21e6 100644 --- a/articles/api-management/graphql-api.md +++ b/articles/api-management/graphql-api.md @@ -6,7 +6,7 @@ ms.service: api-management author: dlepow ms.author: danlep ms.topic: how-to -ms.date: 05/17/2022 +ms.date: 05/19/2022 ms.custom: event-tier1-build-2022 --- @@ -46,7 +46,7 @@ If you want to import a GraphQL schema and set up field resolvers using REST or |----------------|-------| | **Display name** | The name by which your GraphQL API will be displayed. | | **Name** | Raw name of the GraphQL API. Automatically populates as you type the display name. | - | **GraphQL API endpoint** | The base URL with your GraphQL API endpoint name.
        For example: *`https://example.com/your-GraphQL-name`*. You can also use the common ["Star Wars" GraphQL endpoint](https://swapi-graphql.netlify.app/.netlify/functions/index) as a demo. | + | **GraphQL API endpoint** | The base URL with your GraphQL API endpoint name.
        For example: *`https://example.com/your-GraphQL-name`*. You can also use a common "Star Wars" GraphQL endpoint such as `https://swapi-graphql.azure-api.net/graphql` as a demo. | | **Upload schema** | Optionally select to browse and upload your schema file to replace the schema retrieved from the GraphQL endpoint (if available). | | **Description** | Add a description of your API. | | **URL scheme** | Select **HTTP**, **HTTPS**, or **Both**. Default selection: *Both*. | diff --git a/articles/api-management/index.yml b/articles/api-management/index.yml index f0c961b856e8d..873ac13b3d87b 100644 --- a/articles/api-management/index.yml +++ b/articles/api-management/index.yml @@ -1,7 +1,7 @@ ### YamlMime:Landing title: API Management documentation -summary: Learn how to use API Management to publish APIs to external, partner, and employee developers securely and at scale. Shows you how to create and manage modern API gateways for existing back-end services hosted anywhere. +summary: Learn how to use API Management to publish APIs to external, partner, and employee developers securely and at scale. Create and manage modern API gateways for existing back-end services hosted anywhere. metadata: title: API Management documentation @@ -28,7 +28,9 @@ landingContent: - linkListType: architecture links: - text: Basic enterprise integration - url: /azure/architecture/reference-architectures/enterprise-integration/basic-enterprise-integration + url: /azure/architecture/reference-architectures/enterprise-integration/basic-enterprise-integration?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json + - text: Landing zone accelerator + url: /azure/architecture/example-scenario/integration/app-gateway-internal-api-management-function?toc=%2Fazure%2Fapi-management%2Ftoc.json&bc=/azure/api-management/breadcrumb/toc.json # Card (optional) - title: Create an instance linkLists: diff --git a/articles/api-management/media/api-management-howto-aad/arrow.png b/articles/api-management/media/api-management-howto-aad/arrow.png deleted file mode 100644 index c0df8c5523d4f..0000000000000 Binary files a/articles/api-management/media/api-management-howto-aad/arrow.png and /dev/null differ diff --git a/articles/api-management/media/api-management-howto-aad/developer-portal-azure-ad-signin.png b/articles/api-management/media/api-management-howto-aad/developer-portal-azure-ad-signin.png new file mode 100644 index 0000000000000..2b1a4a1e0ff9f Binary files /dev/null and b/articles/api-management/media/api-management-howto-aad/developer-portal-azure-ad-signin.png differ diff --git a/articles/api-management/media/api-management-howto-aad/enable-azure-ad-portal.png b/articles/api-management/media/api-management-howto-aad/enable-azure-ad-portal.png new file mode 100644 index 0000000000000..1a528de39d19e Binary files /dev/null and b/articles/api-management/media/api-management-howto-aad/enable-azure-ad-portal.png differ diff --git a/articles/api-management/media/authorizations-how-to/add-operation.png b/articles/api-management/media/authorizations-how-to/add-operation.png new file mode 100644 index 0000000000000..4ee042f4b5f1f Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/add-operation.png differ diff --git a/articles/api-management/media/authorizations-how-to/authorization-settings.png b/articles/api-management/media/authorizations-how-to/authorization-settings.png new file mode 100644 index 0000000000000..a0e865976fe81 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/authorization-settings.png differ diff --git a/articles/api-management/media/authorizations-how-to/authorize-with-github.png b/articles/api-management/media/authorizations-how-to/authorize-with-github.png new file mode 100644 index 0000000000000..0c7de978d4b09 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/authorize-with-github.png differ diff --git a/articles/api-management/media/authorizations-how-to/consent-to-authorization.png b/articles/api-management/media/authorizations-how-to/consent-to-authorization.png new file mode 100644 index 0000000000000..c35e2d289c0a3 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/consent-to-authorization.png differ diff --git a/articles/api-management/media/authorizations-how-to/create-authorization.png b/articles/api-management/media/authorizations-how-to/create-authorization.png new file mode 100644 index 0000000000000..e08bdd4abe38b Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/create-authorization.png differ diff --git a/articles/api-management/media/authorizations-how-to/generate-secret.png b/articles/api-management/media/authorizations-how-to/generate-secret.png new file mode 100644 index 0000000000000..74f4ecd2e6987 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/generate-secret.png differ diff --git a/articles/api-management/media/authorizations-how-to/policy-configuration-cropped.png b/articles/api-management/media/authorizations-how-to/policy-configuration-cropped.png new file mode 100644 index 0000000000000..00c0924f53b5f Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/policy-configuration-cropped.png differ diff --git a/articles/api-management/media/authorizations-how-to/policy-configuration.png b/articles/api-management/media/authorizations-how-to/policy-configuration.png new file mode 100644 index 0000000000000..4fb63dda18121 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/policy-configuration.png differ diff --git a/articles/api-management/media/authorizations-how-to/register-application.png b/articles/api-management/media/authorizations-how-to/register-application.png new file mode 100644 index 0000000000000..c74f8b7c1b805 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/register-application.png differ diff --git a/articles/api-management/media/authorizations-how-to/select-managed-identity.png b/articles/api-management/media/authorizations-how-to/select-managed-identity.png new file mode 100644 index 0000000000000..c4a4d3dfa2b18 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/select-managed-identity.png differ diff --git a/articles/api-management/media/authorizations-how-to/test-api.png b/articles/api-management/media/authorizations-how-to/test-api.png new file mode 100644 index 0000000000000..23529ce2b4363 Binary files /dev/null and b/articles/api-management/media/authorizations-how-to/test-api.png differ diff --git a/articles/api-management/media/authorizations-overview/get-token-for-backend.svg b/articles/api-management/media/authorizations-overview/get-token-for-backend.svg new file mode 100644 index 0000000000000..42e235b72d336 --- /dev/null +++ b/articles/api-management/media/authorizations-overview/get-token-for-backend.svg @@ -0,0 +1,1125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/articles/api-management/media/authorizations-overview/get-token.svg b/articles/api-management/media/authorizations-overview/get-token.svg new file mode 100644 index 0000000000000..7e82102f56e0f --- /dev/null +++ b/articles/api-management/media/authorizations-overview/get-token.svg @@ -0,0 +1,1513 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/articles/api-management/media/authorizations-overview/overview.png b/articles/api-management/media/authorizations-overview/overview.png new file mode 100644 index 0000000000000..82b96aae12263 Binary files /dev/null and b/articles/api-management/media/authorizations-overview/overview.png differ diff --git a/articles/api-management/media/graphql-api/create-from-graphql-schema.png b/articles/api-management/media/graphql-api/create-from-graphql-schema.png index 0191bc9e7e7dc..aebbd318aec9b 100644 Binary files a/articles/api-management/media/graphql-api/create-from-graphql-schema.png and b/articles/api-management/media/graphql-api/create-from-graphql-schema.png differ diff --git a/articles/api-management/media/private-endpoint/api-management-private-endpoint.png b/articles/api-management/media/private-endpoint/api-management-private-endpoint.png new file mode 100644 index 0000000000000..9c622add93619 Binary files /dev/null and b/articles/api-management/media/private-endpoint/api-management-private-endpoint.png differ diff --git a/articles/api-management/media/virtual-network-concepts/api-management-application-gateway.png b/articles/api-management/media/virtual-network-concepts/api-management-application-gateway.png new file mode 100644 index 0000000000000..a912186f7b3f2 Binary files /dev/null and b/articles/api-management/media/virtual-network-concepts/api-management-application-gateway.png differ diff --git a/articles/api-management/media/virtual-network-concepts/api-management-private-endpoint.png b/articles/api-management/media/virtual-network-concepts/api-management-private-endpoint.png new file mode 100644 index 0000000000000..9c622add93619 Binary files /dev/null and b/articles/api-management/media/virtual-network-concepts/api-management-private-endpoint.png differ diff --git a/articles/api-management/mitigate-owasp-api-threats.md b/articles/api-management/mitigate-owasp-api-threats.md new file mode 100644 index 0000000000000..fae9c2e49ed4b --- /dev/null +++ b/articles/api-management/mitigate-owasp-api-threats.md @@ -0,0 +1,313 @@ +--- +title: Mitigate OWASP API security top 10 in Azure API Management +description: Learn how to protect against common API-based vulnerabilities, as identified by the OWASP API Security Top 10 threats, using Azure API Management. +author: mikebudzynski +ms.service: api-management +ms.topic: conceptual +ms.date: 05/31/2022 +ms.author: mibudz +--- + +# Recommendations to mitigate OWASP API Security Top 10 threats using API Management + +The Open Web Application Security Project ([OWASP](https://owasp.org/about/)) Foundation works to improve software security through its community-led open source software projects, hundreds of chapters worldwide, tens of thousands of members, and by hosting local and global conferences. + +The OWASP [API Security Project](https://owasp.org/www-project-api-security/) focuses on strategies and solutions to understand and mitigate the unique *vulnerabilities and security risks of APIs*. In this article, we'll discuss recommendations to use Azure API Management to mitigate the top 10 API threats identified by OWASP. + +## Broken object level authorization + +API objects that aren't protected with the appropriate level of authorization may be vulnerable to data leaks and unauthorized data manipulation through weak object access identifiers. For example, an attacker could exploit an integer object identifier, which can be iterated. + +More information about this threat: [API1:2019 Broken Object Level Authorization](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa1-broken-object-level-authorization.md) + +### Recommendations + +* The best place to implement object level authorization is within the backend API itself. At the backend, the correct authorization decisions can be made at the request (or object) level, where applicable, using logic applicable to the domain and API. Consider scenarios where a given request may yield differing levels of detail in the response, depending on the requestor's permissions and authorization. + +* If a current vulnerable API can't be changed at the backend, then API Management could be used as a fallback. For example: + + * Use a custom policy to implement object-level authorization, if it's not implemented in the backend. + + * Implement a custom policy to map identifiers from request to backend and from backend to client, so that internal identifiers aren't exposed. + + In these cases, the custom policy could be a [policy expression](api-management-policy-expressions.md) with a look-up (for example, a dictionary) or integration with another service through the [send request](api-management-advanced-policies.md#SendRequest) policy. + +* For GraphQL scenarios, enforce object-level authorization through the [validate GraphQL request](graphql-policies.md#validate-graphql-request) policy, using the `authorize` element. + +## Broken user authentication + +Authentication mechanisms are often implemented incorrectly or missing, allowing attackers to exploit implementation flaws to access data. + +More information about this threat: [API2:2019 Broken User Authentication](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa2-broken-user-authentication.md) + +### Recommendations + +Use API Management for user authentication and authorization: + +* **Authentication** - API Management supports the following [authentication methods](api-management-authentication-policies.md): + + * [Basic authentication](api-management-authentication-policies.md#Basic) policy - Username and password credentials. + + * [Subscription key](api-management-subscriptions.md) - A subscription key provides a similar level of security as basic authentication and may not be sufficient alone. If the subscription key is compromised, an attacker may get unlimited access to the system. + + * [Client certificate](api-management-authentication-policies.md#ClientCertificate) policy - Using client certificates is more secure than basic credentials or subscription key, but it doesn't allow the flexibility provided by token-based authorization protocols such as OAuth 2.0. + +* **Authorization** - API Management supports a [validate JWT](api-management-access-restriction-policies.md#ValidateJWT) policy to check the validity of an incoming OAuth 2.0 JWT access token based on information obtained from the OAuth identity provider's metadata endpoint. Configure the policy to check relevant token claims, audience, and expiration time. Learn more about protecting an API using [OAuth 2.0 authorization and Azure Active Directory](api-management-howto-protect-backend-with-aad.md). + +More recommendations: + +* Use [access restriction policies](api-management-access-restriction-policies.md) in API Management to increase security. For example, [call rate limiting](api-management-access-restriction-policies.md#LimitCallRate) slows down bad actors using brute force attacks to compromise credentials. + +* APIs should use TLS/SSL (transport security) to protect the credentials or tokens. Credentials and tokens should be sent in request headers and not as query parameters. + +* In the API Management [developer portal](api-management-howto-developer-portal.md), configure [Azure Active Directory](api-management-howto-aad.md) or [Azure Active Directory B2C](api-management-howto-aad-b2c.md) as the identity provider to increase the account security. The developer portal uses CAPTCHA to mitigate brute force attacks. + +### Related information + +* [Authentication vs. authorization](../active-directory/develop/authentication-vs-authorization.md) + +## Excessive data exposure + +Good API interface design is deceptively challenging. Often, particularly with legacy APIs that have evolved over time, the request and response interfaces contain more data fields than the consuming applications require. + +A bad actor could attempt to access the API directly (perhaps by replaying a valid request), or sniff the traffic between server and API. Analysis of the API actions and the data available could yield sensitive data to the attacker, which isn't surfaced to, or used by, the frontend application. + +More information about this threat: [API3:2019 Excessive Data Exposure](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa3-excessive-data-exposure.md) + +### Recommendations + +* The best approach to mitigating this vulnerability is to ensure that the external interfaces defined at the backend API are designed carefully and, ideally, independently of the data persistence. They should contain only the fields required by consumers of the API. APIs should be reviewed frequently, and legacy fields deprecated, then removed. + + In API Management, use: + * [Revisions](api-management-revisions.md) to gracefully control nonbreaking changes, for example, the addition of a field to an interface. You may use revisions along with a versioning implementation at the backend. + + * [Versions](api-management-versions.md) for breaking changes, for example, the removal of a field from an interface. + +* If it's not possible to alter the backend interface design and excessive data is a concern, use API Management [transformation policies](transform-api.md) to rewrite response payloads and mask or filter data. For example, [remove unneeded JSON properties](./policies/filter-response-content.md) from a response body. + +* [Response content validation](validation-policies.md#validate-content) in API Management can be used with an XML or JSON schema to block responses with undocumented properties or improper values. The policy also supports blocking responses exceeding a specified size. + +* Use the [validate status code](validation-policies.md#validate-status-code) policy to block responses with errors undefined in the API schema. + +* Use the [validate headers](validation-policies.md#validate-headers) policy to block responses with headers that aren't defined in the schema or don't comply to their definition in the schema. Remove unwanted headers with the [set header](api-management-transformation-policies.md#SetHTTPheader) policy. + +* For GraphQL scenarios, use the [validate GraphQL request](graphql-policies.md#validate-graphql-request) policy to validate GraphQL requests, authorize access to specific query paths, and limit response size. + +## Lack of resources and rate limiting + +Lack of rate limiting may lead to data exfiltration or successful DDoS attacks on backend services, causing an outage for all consumers. + +More information about this threat: [API4:2019 Lack of resources and rate limiting](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa4-lack-of-resources-and-rate-limiting.md) + +### Recommendations + +* Use [rate limit](api-management-access-restriction-policies.md#LimitCallRate) (short-term) and [quota limit](api-management-access-restriction-policies.md#SetUsageQuota) (long-term) policies to control the allowed number of API calls or bandwidth per consumer. + +* Define strict request object definitions and their properties in the OpenAPI definition. For example, define the max value for paging integers, maxLength and regular expression (regex) for strings. Enforce those schemas with the [validate content](validation-policies.md#validate-content) and [validate parameters](validation-policies.md#validate-parameters) policies in API Management. + +* Enforce maximum size of the request with the [validate content](validation-policies.md#validate-content) policy. + +* Optimize performance with [built-in caching](api-management-howto-cache.md), thus reducing the consumption of CPU, memory, and networking resources for certain operations. + +* Enforce authentication for API calls (see [Broken user authentication](#broken-user-authentication)). Revoke access for abusive users. For example, deactivate the subscription key, block the IP address with the [restrict caller IPs](api-management-access-restriction-policies.md#RestrictCallerIPs) policy, or reject requests for a certain user claim from a [JWT token](api-management-access-restriction-policies.md#ValidateJWT). + +* Apply a [CORS](api-management-cross-domain-policies.md#CORS) policy to control the websites that are allowed to load the resources served through the API. To avoid overly permissive configurations, don’t use wildcard values (`*`) in the CORS policy. + +* Minimize the time it takes a backend service to respond. The longer the backend service takes to respond, the longer the connection is occupied in API Management, therefore reducing the number of requests that can be served in a given timeframe. + + * Define `timeout` in the [forward request](api-management-advanced-policies.md#ForwardRequest) policy. + + * Use the [validate GraphQL request](graphql-policies.md#validate-graphql-request) policy for GraphQL APIs and configure `max-depth` and `max-size` parameters. + + * Limit the number of parallel backend connections with the [limit concurrency](api-management-advanced-policies.md#LimitConcurrency) policy. + +* While API Management can protect backend services from DDoS attacks, it may be vulnerable to those attacks itself. Deploy a bot protection service in front of API Management (for example, [Azure Application Gateway](api-management-howto-integrate-internal-vnet-appgateway.md), [Azure Front Door](../frontdoor/front-door-overview.md), or [Azure DDoS Protection Service](../ddos-protection/ddos-protection-overview.md)) to better protect against DDoS attacks. When using a WAF with Azure Application Gateway or Azure Front Door, consider using [Microsoft_BotManagerRuleSet_1.0](../web-application-firewall/afds/afds-overview.md#bot-protection-rule-set). + +## Broken function level authorization + +Complex access control policies with different hierarchies, groups, and roles, and an unclear separation between administrative and regular functions lead to authorization flaws. By exploiting these issues, attackers gain access to other users’ resources or administrative functions. + +More information about this threat: [API5:2019 Broken function level authorization](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa5-broken-function-level-authorization.md) + +### Recommendations + +* By default, protect all API endpoints in API Management with [subscription keys](api-management-subscriptions.md). + +* Define a [validate JWT](api-management-access-restriction-policies.md#ValidateJWT) policy and enforce required token claims. If certain operations require stricter claims enforcement, define extra `validate-jwt` policies for those operations only. + +* Use an Azure virtual network or Private Link to hide API endpoints from the internet. Learn more about [virtual network options](virtual-network-concepts.md) with API Management. + +* Don't define [wildcard API operations](add-api-manually.md#add-and-test-a-wildcard-operation) (that is, "catch-all" APIs with `*` as the path). Ensure that API Management only serves requests for explicitly defined endpoints, and requests to undefined endpoints are rejected. + +* Don't publish APIs with [open products](api-management-howto-add-products.md#access-to-product-apis) that don't require a subscription. + +## Mass assignment + +If an API offers more fields than the client requires for a given action, an attacker may inject excessive properties to perform unauthorized operations on data. Attackers may discover undocumented properties by inspecting the format of requests and responses or other APIs, or guessing them. This vulnerability is especially applicable if you don’t use strongly typed programming languages. + +More information about this threat: [API6:2019 Mass assignment](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa6-mass-assignment.md) + +### Recommendations + +* External API interfaces should be decoupled from the internal data implementation. Avoid binding API contracts directly to data contracts in backend services. Review the API design frequently, and deprecate and remove legacy properties using [versioning](/api-management-versions.md) in API Management. + +* Precisely define XML and JSON contracts in the API schema and use [validate content](validation-policies.md#validate-content) and [validate parameters](validation-policies.md#validate-parameters) policies to block requests and responses with undocumented properties. Blocking requests with undocumented properties mitigates attacks, while blocking responses with undocumented properties makes it harder to reverse-engineer potential attack vectors. + +* If the backend interface can't be changed, use [transformation policies](transform-api.md) to rewrite request and response payloads and decouple the API contracts from backend contracts. For example, mask or filter data or [remove unneeded JSON properties](./policies/filter-response-content.md). + +## Security misconfiguration + +Attackers may attempt to exploit security misconfiguration vulnerabilities such as: + +* Missing security hardening +* Unnecessary enabled features +* Network connections unnecessarily open to the internet +* Use of weak protocols or ciphers +* Other settings or endpoints that may allow unauthorized access to the system + +More information about this threat: [API7:2019 Security misconfiguration](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa7-security-misconfiguration.md) + +### Recommendations + +* Correctly configure [gateway TLS](api-management-howto-manage-protocols-ciphers.MD). Don't use vulnerable protocols (for example, TLS 1.0, 1.1) or ciphers. + +* Configure APIs to accept encrypted traffic only, for example through HTTPS or WSS protocols. + +* Consider deploying API Management behind a [private endpoint](private-endpoint.md) or attached to a [virtual network deployed in internal mode](api-management-using-with-internal-vnet.md). In internal networks, access can be controlled from within the private network (via firewall or network security groups) and from the internet (via a reverse proxy). + +* Use Azure API Management policies: + + * Always inherit parent policies through the `` tag. + + * When using OAuth 2.0, configure and test the [validate JWT](api-management-access-restriction-policies.md#ValidateJWT) policy to check the existence and validity of the JWT token before it reaches the backend. Automatically check the token expiration time, token signature, and issuer. Enforce claims, audiences, token expiration, and token signature through policy settings. + + * Configure the [CORS](api-management-cross-domain-policies.md#CORS) policy and don't use wildcard `*` for any configuration option. Instead, explicitly list allowed values. + + * Set [validation policies](validation-policies.md) to `prevent` in production environments to validate JSON and XML schemas, headers, query parameters, and status codes, and to enforce the maximum size for request or response. + + * If API Management is outside a network boundary, client IP validation is still possible using the [restrict caller IPs](api-management-access-restriction-policies.md#RestrictCallerIPs) policy. Ensure that it uses an allowlist, not a blocklist. + + * If client certificates are used between caller and API Management, use the [validate client certificate](api-management-access-restriction-policies.md#validate-client-certificate) policy. Ensure that the `validate-revocation`, `validate-trust`, `validate-not-before`, and `validate-not-after` attributes are all set to `true`. + + * Client certificates (mutual TLS) can also be applied between API Management and the backend. The backend should: + + * Have authorization credentials configured + + * Validate the certificate chain where applicable + + * Validate the certificate name where applicable + +* For GraphQL scenarios, use the [validate GraphQL request](graphql-policies.md#validate-graphql-request) policy. Ensure that the `authorization` element and `max-size` and `max-depth` attributes are set. + +* Don't store secrets in policy files or in source control. Always use API Management [named values](api-management-howto-properties.md) or fetch the secrets at runtime using custom policy expressions. + + * Named values should be [integrated with Key Vault](api-management-howto-properties.md#key-vault-secrets) or encrypted within API Management by marking them "secret". Never store secrets in plain-text named values. + +* Publish APIs through [products](api-management-howto-add-products.md), which require subscriptions. Don't use [open products](api-management-howto-add-products.md#access-to-product-apis) that don't require a subscription. + +* Use Key Vault integration to manage all certificates – this centralizes certificate management and can help to ease operations management tasks such as certificate renewal or revocation. + +* When using the [self-hosted-gateway](self-hosted-gateway-overview.md), ensure that there's a process in place to update the image to the latest version periodically. + +* Represent backend services as [backend entities](backends.md). Configure authorization credentials, certificate chain validation, and certificate name validation where applicable. + +* When using the [developer portal](api-management-howto-developer-portal.md): + + * If you choose to [self-host](developer-portal-self-host.md) the developer portal, ensure there's a process in place to periodically update the self-hosted portal to the latest version. Updates for the default managed version are automatic. + + * Use [Azure Active Directory (Azure AD)](api-management-howto-aad.md) or [Azure Active Directory B2C](api-management-howto-aad-b2c.md) for user sign-up and sign-in. Disable the default username and password authentication, which is less secure. + + * Assign [user groups](api-management-howto-create-groups.md#-associate-a-group-with-a-product) to products, to control the visibility of APIs in the portal. + +* Use [Azure Policy](security-controls-policy.md) to enforce API Management resource-level configuration and role-based access control (RBAC) permissions to control resource access. Grant minimum required privileges to every user. + +* Use a [DevOps process](devops-api-development-templates.md) and infrastructure-as-code approach outside of a development environment to ensure consistency of API Management content and configuration changes and to minimize human errors. + +* Don't use any deprecated features. + +## Injection + +Any endpoint accepting user data is potentially vulnerable to an injection exploit. Examples include, but aren't limited to: + +* [Command injection](https://owasp.org/www-community/attacks/Command_Injection), where a bad actor attempts to alter the API request to execute commands on the operating system hosting the API + +* [SQL injection](https://owasp.org/www-community/attacks/SQL_Injection), where a bad actor attempts to alter the API request to execute commands and queries against the database an API depends on + +More information about this threat: [API8:2019 Injection](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa8-injection.md) + +### Recommendations + +* [Modern Web Application Firewall (WAF) policies](https://github.com/SpiderLabs/ModSecurity) cover many common injection vulnerabilities. While API Management doesn’t have a built-in WAF component, deploying a WAF upstream (in front) of the API Management instance is strongly recommended. For example, use [Azure Application Gateway](/azure/architecture/reference-architectures/apis/protect-apis) or [Azure Front Door](../frontdoor/front-door-overview.md). + + > [!IMPORTANT] + > Ensure that a bad actor can't bypass the gateway hosting the WAF and connect directly to the API Management gateway or backend API itself. Possible mitigations include: [network ACLs](../virtual-network/network-security-groups-overview.md), using API Management policy to [restrict inbound traffic by client IP](api-management-access-restriction-policies.md#RestrictCallerIPs), removing public access where not required, and [client certificate authentication](api-management-howto-mutual-certificates-for-clients.md) (also known as mutual TLS or mTLS). + +* Use schema and parameter [validation](validation-policies.md) policies, where applicable, to further constrain and validate the request before it reaches the backend API service. + + The schema supplied with the API definition should have a regex pattern constraint applied to vulnerable fields. Each regex should be tested to ensure that it constrains the field sufficiently to mitigate common injection attempts. + +### Related information + +* [Deployment stamps pattern with Azure Front Door and API Management](/azure/architecture/patterns/deployment-stamp) + +* [Deploy Azure API Management with Azure Application Gateway](api-management-howto-integrate-internal-vnet-appgateway.md) + +## Improper assets management + +Vulnerabilities related to improper assets management include: + +* Lack of proper API documentation or ownership information + +* Excessive numbers of older API versions, which may be missing security fixes + +More information about this threat: [API9:2019 Improper assets management](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa9-improper-assets-management.md) + +### Recommendations + +* Use a well-defined [OpenAPI specification](https://swagger.io/specification/) as the source for importing REST APIs. The specification allows encapsulation of the API definition, including self-documenting metadata. + + * Use API interfaces with precise paths, data schemas, headers, query parameters, and status codes. Avoid [wildcard operations](add-api-manually.md#add-and-test-a-wildcard-operation). Provide descriptions for each API and operation and include contact and license information. + + * Avoid endpoints that don’t directly contribute to the business objective. They unnecessarily increase the attack surface area and make it harder to evolve the API. + +* Use [revisions](api-management-revisions.md) and [versions](api-management-versions.md) in API Management to govern and control the API endpoints. Have a strong backend versioning strategy and commit to a maximum number of supported API versions (for example, 2 or 3 prior versions). Plan to quickly deprecate and ultimately remove older, often less secure, API versions. + +* Use an API Management instance per environment (such as development, test, and production). Ensure that each API Management instance connects to its dependencies in the same environment. For example, in the test environment, the test API Management resource should connect to a test Azure Key Vault resource and the test versions of backend services. Use [DevOps automation and infrastructure-as-code practices](devops-api-development-templates.md) to help maintain consistency and accuracy between environments and reduce human errors. + +* Use tags to organize APIs and products and group them for publishing. + +* Publish APIs for consumption through the built-in [developer portal](api-management-howto-developer-portal.md). Make sure the API documentation is up-to-date. + +* Discover undocumented or unmanaged APIs and expose them through API Management for better control. + +## Insufficient logging and monitoring + +Insufficient logging and monitoring, coupled with missing or ineffective integration with incident response, allows attackers to further attack systems, maintain persistence, pivot to more systems to tamper with, and extract or destroy data. Most breach studies demonstrate that the time to detect a breach is over 200 days, typically detected by external parties rather than internal processes or monitoring. + +More information about this threat: [API10:2019 Insufficient logging and monitoring](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xaa-insufficient-logging-monitoring.md) + +### Recommendations + +* Understand [observability options](observability.md) in Azure API Management and [best practices](/azure/architecture/best-practices/monitoring) for monitoring in Azure. + +* Monitor API traffic with [Azure Monitor](api-management-howto-use-azure-monitor.md). + +* Log to [Application Insights](api-management-howto-app-insights.md) for debugging purposes. Correlate [transactions in Application Insights](../azure-monitor/app/transaction-diagnostics.md) between API Management and the backend API to [trace them end-to-end](../azure-monitor/app/correlation.md). + +* If needed, forward custom events to [Event Hubs](api-management-howto-log-event-hubs.md). + +* Set alerts in Azure Monitor and Application Insights - for example, for the [capacity metric](api-management-howto-autoscale.md) or for excessive requests or bandwidth transfer. + +* Use the [emit metrics](api-management-advanced-policies.md#emit-metrics) policy for custom metrics. + +* Use the Azure Activity log for tracking activity in the service. + +* Use custom events in [Azure Application Insights](../azure-monitor/app/api-custom-events-metrics.md) and [Azure Monitor](../azure-monitor/app/custom-data-correlation.md) as needed. + +* Configure [OpenTelemetry](how-to-deploy-self-hosted-gateway-kubernetes-opentelemetry.md#introduction-to-opentelemetry) for [self-hosted gateways](self-hosted-gateway-overview.md) on Kubernetes. + +## Next steps + +* [Security baseline for API Management](/security/benchmark/azure/baselines/api-management-security-baseline) +* [Security controls by Azure policy](security-controls-policy.md) +* [Landing zone accelerator for API Management](/azure/cloud-adoption-framework/scenarios/app-platform/api-management/landing-zone-accelerator) diff --git a/articles/api-management/policy-fragments.md b/articles/api-management/policy-fragments.md index ecda6bc7e089d..533cb9fa41ca4 100644 --- a/articles/api-management/policy-fragments.md +++ b/articles/api-management/policy-fragments.md @@ -82,13 +82,13 @@ For example, insert the policy fragment named *ForwardContext* in the inbound po ``` > [!TIP] -> To see the content of an included fragment displayed in the policy definition, select **Recalculate effective policy** in the policy editor. +> To see the content of an included fragment displayed in the policy definition, select **Calculate effective policy** in the policy editor. ## Manage policy fragments -After creating a policy fragment, you can view and update policy properties, or delete the policy at any time. +After creating a policy fragment, you can view and update the properties of a policy fragment, or delete the policy fragment at any time. -**To view properties of a fragment:** +**To view properties of a policy fragment:** 1. In the left navigation of your API Management instance, under **APIs**, select **Policy fragments**. Select the name of your fragment. 1. On the **Overview** page, review the **Policy document references** to see the policy definitions that include the fragment. diff --git a/articles/api-management/private-endpoint.md b/articles/api-management/private-endpoint.md index 12ef721464b14..48d1b348a7a98 100644 --- a/articles/api-management/private-endpoint.md +++ b/articles/api-management/private-endpoint.md @@ -5,7 +5,7 @@ ms.service: api-management author: dlepow ms.author: danlep ms.topic: how-to -ms.date: 02/23/2022 +ms.date: 03/31/2022 --- @@ -19,6 +19,8 @@ You can configure a [private endpoint](../private-link/private-endpoint-overview * Configure custom DNS settings or an Azure DNS private zone to map the API Management hostname to the endpoint's private IP address. +:::image type="content" source="media/private-endpoint/api-management-private-endpoint.png" alt-text="Diagram that shows a secure connection to API Management using private endpoint."::: + With a private endpoint and Private Link, you can: - Create multiple Private Link connections to an API Management instance. diff --git a/articles/api-management/set-edit-policies.md b/articles/api-management/set-edit-policies.md index 638f07a731fa6..1bd73ea9ab431 100644 --- a/articles/api-management/set-edit-policies.md +++ b/articles/api-management/set-edit-policies.md @@ -187,7 +187,7 @@ If you configure policy definitions at more than one scope, multiple policies co In API Management, determine the policy evaluation order by placement of the `base` element in each section in the policy definition at each scope. The `base` element inherits the policies configured in that section at the next broader (parent) scope. The `base` element is included by default in each policy section. > [!NOTE] -> To view the effective policies at the current scope, select **Recalculate effective policy** in the policy editor. +> To view the effective policies at the current scope, select **Calculate effective policy** in the policy editor. To modify the policy evaluation order using the policy editor: diff --git a/articles/api-management/validation-policies.md b/articles/api-management/validation-policies.md index d924a133edeab..2bdb2ec351f32 100644 --- a/articles/api-management/validation-policies.md +++ b/articles/api-management/validation-policies.md @@ -6,15 +6,15 @@ documentationcenter: '' author: dlepow ms.service: api-management ms.topic: reference -ms.date: 03/07/2022 +ms.date: 06/07/2022 ms.author: danlep --- # API Management policies to validate requests and responses -This article provides a reference for API Management policies to validate REST or SOAP API requests and responses against schemas defined in the API definition or supplementary JSON or XML schemas. Validation policies protect from vulnerabilities such as injection of headers or payload or leaking sensitive data. +This article provides a reference for API Management policies to validate REST or SOAP API requests and responses against schemas defined in the API definition or supplementary JSON or XML schemas. Validation policies protect from vulnerabilities such as injection of headers or payload or leaking sensitive data. Learn more about common [API vulnerabilites](mitigate-owasp-api-threats.md). -While not a replacement for a Web Application Firewall, validation policies provide flexibility to respond to an additional class of threats that aren’t covered by security products that rely on static, predefined rules. +While not a replacement for a Web Application Firewall, validation policies provide flexibility to respond to an additional class of threats that aren’t covered by security products that rely on static, predefined rules. [!INCLUDE [api-management-policy-intro-links](../../includes/api-management-policy-intro-links.md)] @@ -66,7 +66,7 @@ The `validate-content` policy validates the size or content of a request or resp [!INCLUDE [api-management-policy-form-alert](../../includes/api-management-policy-form-alert.md)] -The following table shows the schema formats and request or response content types that the policy supports. Content type values are case insensitive. +The following table shows the schema formats and request or response content types that the policy supports. Content type values are case insensitive. | Format | Content types | |---------|---------| @@ -74,6 +74,18 @@ The following table shows the schema formats and request or response content typ |XML | Example: `application/xml` | |SOAP | Allowed values: `application/soap+xml` for SOAP 1.2 APIs
        `text/xml` for SOAP 1.1 APIs| +### What content is validated + +The policy validates the following content in the request or response against the schema: + +* Presence of all required properties. +* Absence of additional properties, if the schema has the `additionalProperties` field set to `false`. +* Types of all properties. For example, if a schema specifies a property as an integer, the request (or response) must include an integer and not another type, such as a string. +* The format of the properties, if specified in the schema - for example, regex (if the `pattern` keyword is specified), `minimum` for integers, and so on. + +> [!TIP] +> For examples of regex pattern constraints that can be used in schemas, see [OWASP Validation Regex Repository](https://owasp.org/www-community/OWASP_Validation_Regex_Repository). + ### Policy statement ```xml @@ -166,7 +178,6 @@ After the schema is created, it appears in the list on the **Schemas** page. Sel > * A schema may cross-reference another schema that is added to the API Management instance. > * Open-source tools to resolve WSDL and XSD schema references and to batch-import generated schemas to API Management are available on [GitHub](https://github.com/Azure-Samples/api-management-schema-import). - ### Usage This policy can be used in the following policy [sections](./api-management-howto-policies.md#sections) and [scopes](./api-management-howto-policies.md#scopes). diff --git a/articles/api-management/virtual-network-concepts.md b/articles/api-management/virtual-network-concepts.md index d7303df5d9abe..f7be67f48b011 100644 --- a/articles/api-management/virtual-network-concepts.md +++ b/articles/api-management/virtual-network-concepts.md @@ -1,43 +1,54 @@ --- title: Azure API Management with an Azure virtual network -description: Learn about scenarios and requirements to connect your API Management instance to an Azure virtual network. +description: Learn about scenarios and requirements to secure your API Management instance using an Azure virtual network. author: dlepow ms.service: api-management ms.topic: conceptual -ms.date: 01/14/2022 +ms.date: 05/26/2022 ms.author: danlep ms.custom: --- # Use a virtual network with Azure API Management -With Azure virtual networks (VNets), you can place ("inject") your API Management instance in a non-internet-routable network to which you control access. In a virtual network, your API Management instance can securely access other networked Azure resources and also connect to on-premises networks using various VPN technologies. To learn more about Azure VNets, start with the information in the [Azure Virtual Network Overview](../virtual-network/virtual-networks-overview.md). +API Management provides several options to secure access to your API Management instance and APIs using an Azure virtual network. API Management supports the following options, which are mutually exclusive: + +* **Integration (injection)** of the API Management instance into the virtual network, enabling the gateway to access resources in the network. + + You can choose one of two integration modes: *external* or *internal*. They differ in whether inbound connectivity to the gateway and other API Management endpoints is allowed from the internet or only from within the virtual network. + +* **Enabling secure and private connectivity** to the API Management gateway using a *private endpoint* (preview). -> [!TIP] -> API Management also supports [private endpoints](../private-link/private-endpoint-overview.md). A private endpoint enables secure client connectivity to your API Management instance using a private IP address from your virtual network and Azure Private Link. [Learn more](private-endpoint.md) about using private endpoints with API Management. +The following table compares virtual networking options. For more information, see later sections of this article and links to detailed guidance. + +|Networking model |Supported tiers |Supported components |Supported traffic |Usage scenario | +|---------|---------|---------|---------|----| +|**[Virtual network - external](#virtual-network-integration)** | Developer, Premium | Azure portal, gateway, management plane, and Git repository | Inbound and outbound traffic can be allowed to internet, peered virtual networks, Express Route, and S2S VPN connections. | External access to private and on-premises backends +|**[Virtual network - internal](#virtual-network-integration)** | Developer, Premium | Developer portal, gateway, management plane, and Git repository. | Inbound and outbound traffic can be allowed to peered virtual networks, Express Route, and S2S VPN connections. | Internal access to private and on-premises backends +|**[Private endpoint (preview)](#private-endpoint)** | Developer, Basic, Standard, Premium | Gateway only (managed gateway supported, self-hosted gateway not supported). | Only inbound traffic can be allowed from internet, peered virtual networks, Express Route, and S2S VPN connections. | Secure client connection to API Management gateway | + +## Virtual network integration +With Azure virtual networks (VNets), you can place ("inject") your API Management instance in a non-internet-routable network to which you control access. In a virtual network, your API Management instance can securely access other networked Azure resources and also connect to on-premises networks using various VPN technologies. To learn more about Azure VNets, start with the information in the [Azure Virtual Network Overview](../virtual-network/virtual-networks-overview.md). -This article explains VNet connectivity options, requirements, and considerations for your API Management instance. You can use the Azure portal, Azure CLI, Azure Resource Manager templates, or other tools for the configuration. You control inbound and outbound traffic into the subnet in which API Management is deployed by using [network security groups][NetworkSecurityGroups]. + You can use the Azure portal, Azure CLI, Azure Resource Manager templates, or other tools for the configuration. You control inbound and outbound traffic into the subnet in which API Management is deployed by using [network security groups](../virtual-network/network-security-groups-overview.md). For detailed deployment steps and network configuration, see: * [Connect to an external virtual network using Azure API Management](./api-management-using-with-vnet.md). * [Connect to an internal virtual network using Azure API Management](./api-management-using-with-internal-vnet.md). -[!INCLUDE [premium-dev.md](../../includes/api-management-availability-premium-dev.md)] - -## Access options - -When created, an API Management instance must be accessible from the internet. Using a virtual network, you can configure the developer portal, API gateway, and other API Management endpoints to be accessible either from the internet (external mode) or only within the VNet (internal mode). +### Access options +Using a virtual network, you can configure the developer portal, API gateway, and other API Management endpoints to be accessible either from the internet (external mode) or only within the VNet (internal mode). * **External** - The API Management endpoints are accessible from the public internet via an external load balancer. The gateway can access resources within the VNet. - :::image type="content" source="media/virtual-network-concepts/api-management-vnet-external.png" alt-text="Connect to external VNet"::: + :::image type="content" source="media/virtual-network-concepts/api-management-vnet-external.png" alt-text="Diagram showing a connection to external VNet." lightbox="media/virtual-network-concepts/api-management-vnet-external.png"::: Use API Management in external mode to access backend services deployed in the virtual network. * **Internal** - The API Management endpoints are accessible only from within the VNet via an internal load balancer. The gateway can access resources within the VNet. - :::image type="content" source="media/virtual-network-concepts/api-management-vnet-internal.png" alt-text="Connect to internal VNet"::: + :::image type="content" source="media/virtual-network-concepts/api-management-vnet-internal.png" alt-text="Diagram showing a connection to internal VNet." lightbox="media/virtual-network-concepts/api-management-vnet-internal.png"::: Use API Management in internal mode to: @@ -46,11 +57,11 @@ When created, an API Management instance must be accessible from the internet. U * Manage your APIs hosted in multiple geographic locations, using a single gateway endpoint. -## Network resource requirements +### Network resource requirements The following are virtual network resource requirements for API Management. Some requirements differ depending on the version (`stv2` or `stv1`) of the [compute platform](compute-infrastructure.md) hosting your API Management instance. -### [stv2](#tab/stv2) +#### [stv2](#tab/stv2) * An Azure Resource Manager virtual network is required. * You must provide a Standard SKU [public IPv4 address](../virtual-network/ip-services/public-ip-addresses.md#sku) in addition to specifying a virtual network and subnet. @@ -59,16 +70,16 @@ The following are virtual network resource requirements for API Management. Some * The API Management service, virtual network and subnet, and public IP address resource must be in the same region and subscription. * For multi-region API Management deployments, configure virtual network resources separately for each location. -### [stv1](#tab/stv1) +#### [stv1](#tab/stv1) * An Azure Resource Manager virtual network is required. -* The subnet used to connect to the API Management instance must be dedicated to API Management. It cannot contain other Azure resource types. +* The subnet used to connect to the API Management instance must be dedicated to API Management. It can't contain other Azure resource types. * The API Management service, virtual network, and subnet resources must be in the same region and subscription. -* For multi-region API Management deployments, you configure virtual network resources separately for each location. +* For multi-region API Management deployments, configure virtual network resources separately for each location. --- -## Subnet size +### Subnet size The minimum size of the subnet in which API Management can be deployed is /29, which gives three usable IP addresses. Each extra scale [unit](api-management-capacity.md) of API Management requires two more IP addresses. The minimum size requirement is based on the following considerations: @@ -80,46 +91,78 @@ The minimum size of the subnet in which API Management can be deployed is /29, w * When deploying into an [internal VNet](./api-management-using-with-internal-vnet.md), the instance requires an extra IP address for the internal load balancer. -## Routing +### Routing See the Routing guidance when deploying your API Management instance into an [external VNet](./api-management-using-with-vnet.md#routing) or [internal VNet](./api-management-using-with-internal-vnet.md#routing). Learn more about the [IP addresses of API Management](api-management-howto-ip-addresses.md). -## DNS +### DNS -* In external mode, the VNet enables [Azure-provided name resolution](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#azure-provided-name-resolution) by default for your API Management endpoints and other Azure resources. It does not provide name resolution for on-premises resources. Optionally, configure your own DNS solution. +* In external mode, the VNet enables [Azure-provided name resolution](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#azure-provided-name-resolution) by default for your API Management endpoints and other Azure resources. It doesn't provide name resolution for on-premises resources. Optionally, configure your own DNS solution. * In internal mode, you must provide your own DNS solution to ensure name resolution for API Management endpoints and other required Azure resources. We recommend configuring an Azure [private DNS zone](../dns/private-dns-overview.md). For more information, see the DNS guidance when deploying your API Management instance into an [external VNet](./api-management-using-with-vnet.md#routing) or [internal VNet](./api-management-using-with-internal-vnet.md#routing). -For more information, see: +Related information: * [Name resolution for resources in Azure virtual networks](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#name-resolution-that-uses-your-own-dns-server). * [Create an Azure private DNS zone](../dns/private-dns-getstarted-portal.md) > [!IMPORTANT] > If you plan to use a custom DNS solution for the VNet, set it up **before** deploying an API Management service into it. Otherwise, you'll need to update the API Management service each time you change the DNS server(s) by running the [Apply Network Configuration Operation](/rest/api/apimanagement/current-ga/api-management-service/apply-network-configuration-updates), or by selecting **Apply network configuration** in the service instance's network configuration window in the Azure portal. -## Limitations +### Limitations -Some limitations differ depending on the version (`stv2` or `stv1`) of the [compute platform](compute-infrastructure.md) hosting your API Management instance. +Some virtual network limitations differ depending on the version (`stv2` or `stv1`) of the [compute platform](compute-infrastructure.md) hosting your API Management instance. -### [stv2](#tab/stv2) +#### [stv2](#tab/stv2) * A subnet containing API Management instances can't be moved across subscriptions. * For multi-region API Management deployments configured in internal VNet mode, users own the routing and are responsible for managing the load balancing across multiple regions. * To import an API to API Management from an [OpenAPI specification](import-and-publish.md), the specification URL must be hosted at a publicly accessible internet address. -### [stv1](#tab/stv1) +#### [stv1](#tab/stv1) -* A subnet containing API Management instances can't be movacross subscriptions. +* A subnet containing API Management instances can't be moved across subscriptions. * For multi-region API Management deployments configured in internal VNet mode, users own the routing and are responsible for managing the load balancing across multiple regions. * To import an API to API Management from an [OpenAPI specification](import-and-publish.md), the specification URL must be hosted at a publicly accessible internet address. -* Due to platform limitations, connectivity between a resource in a globally peered VNet in another region and an API Management service in internal mode will not work. For more information, see the [virtual network documentation](../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). +* Due to platform limitations, connectivity between a resource in a globally peered VNet in another region and an API Management service in internal mode won't work. For more information, see the [virtual network documentation](../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). --- +## Private endpoint + +API Management supports [private endpoints](../private-link/private-endpoint-overview.md). A private endpoint enables secure client connectivity to your API Management instance using a private IP address from your virtual network and Azure Private Link. + +:::image type="content" source="media/virtual-network-concepts/api-management-private-endpoint.png" alt-text="Diagram showing a secure connection to API Management using private endpoint." lightbox="media/virtual-network-concepts/api-management-private-endpoint.png"::: + +With a private endpoint and Private Link, you can: + +* Create multiple Private Link connections to an API Management instance. +* Use the private endpoint to send inbound traffic on a secure connection. +* Use policy to distinguish traffic that comes from the private endpoint. +* Limit incoming traffic only to private endpoints, preventing data exfiltration. + +> [!IMPORTANT] +> * API Management support for private endpoints is currently in preview. +> * During the preview period, a private endpoint connection supports only incoming traffic to the API Management managed gateway. + +For more information, see [Connect privately to API Management using a private endpoint](private-endpoint.md). + +## Advanced networking configurations + +### Secure API Management endpoints with a web application firewall + +You may have scenarios where you need both secure external and internal access to your API Management instance, and flexibility to reach private and on-premises backends. For these scenarios, you may choose to manage external access to the endpoints of an API Management instance with a web application firewall (WAF). + +One example is to deploy an API Management instance in an internal virtual network, and route public access to it using an internet-facing Azure Application Gateway: + +:::image type="content" source="media/virtual-network-concepts/api-management-application-gateway.png" alt-text="Diagram showing Application Gateway in front of API Management instance." lightbox="media/virtual-network-concepts/api-management-application-gateway.png"::: + +For more information, see [Integrate API Management in an internal virtual network with Application Gateway](api-management-howto-integrate-internal-vnet-appgateway.md). + + ## Next steps Learn more about: @@ -128,11 +171,13 @@ Learn more about: * [Connecting a virtual network from different deployment models](../vpn-gateway/vpn-gateway-connect-different-deployment-models-powershell.md) * [Virtual network frequently asked questions](../virtual-network/virtual-networks-faq.md) -Connect to a virtual network: +Virtual network configuration with API Management: * [Connect to an external virtual network using Azure API Management](./api-management-using-with-vnet.md). * [Connect to an internal virtual network using Azure API Management](./api-management-using-with-internal-vnet.md). +* [Connect privately to API Management using a private endpoint](private-endpoint.md) + -Review the following topics +Related articles: * [Connecting a Virtual Network to backend using Vpn Gateway](../vpn-gateway/design.md#s2smulti) * [Connecting a Virtual Network from different deployment models](../vpn-gateway/vpn-gateway-connect-different-deployment-models-powershell.md) @@ -140,17 +185,6 @@ Review the following topics * [Virtual Network Frequently asked Questions](../virtual-network/virtual-networks-faq.md) * [Service tags](../virtual-network/network-security-groups-overview.md#service-tags) -[api-management-using-vnet-menu]: ./media/api-management-using-with-vnet/api-management-menu-vnet.png -[api-management-setup-vpn-select]: ./media/api-management-using-with-vnet/api-management-using-vnet-select.png -[api-management-setup-vpn-add-api]: ./media/api-management-using-with-vnet/api-management-using-vnet-add-api.png -[api-management-vnet-private]: ./media/virtual-network-concepts/api-management-vnet-internal.png -[api-management-vnet-public]: ./media/virtual-network-concepts/api-management-vnet-external.png -[Enable VPN connections]: #enable-vpn -[Connect to a web service behind VPN]: #connect-vpn -[Related content]: #related-content -[UDRs]: ../virtual-network/virtual-networks-udr-overview.md -[NetworkSecurityGroups]: ../virtual-network/network-security-groups-overview.md -[ServiceEndpoints]: ../virtual-network/virtual-network-service-endpoints-overview.md -[ServiceTags]: ../virtual-network/network-security-groups-overview.md#service-tags + diff --git a/articles/app-service/configure-custom-container.md b/articles/app-service/configure-custom-container.md index 938069c61c35a..7a377aa88cd77 100644 --- a/articles/app-service/configure-custom-container.md +++ b/articles/app-service/configure-custom-container.md @@ -206,7 +206,7 @@ The only exception is the `C:\home\LogFiles` directory, which is used to store t ::: zone pivot="container-linux" -You can use the */home* directory in your custom container file system to persist files across restarts and share them across instances. The `/home` directory is provided to enable your custom container to access persistent storage. Saving data within `/home` will contribute to the [storage space quota](https://docs.microsoft.com/azure/azure-resource-manager/management/azure-subscription-service-limits#app-service-limits) included with your App Service Plan. +You can use the */home* directory in your custom container file system to persist files across restarts and share them across instances. The `/home` directory is provided to enable your custom container to access persistent storage. Saving data within `/home` will contribute to the [storage space quota](../azure-resource-manager/management/azure-subscription-service-limits.md#app-service-limits) included with your App Service Plan. When persistent storage is disabled, then writes to the `/home` directory are not persisted across app restarts or across multiple instances. When persistent storage is enabled, all writes to the `/home` directory are persisted and can be accessed by all instances of a scaled-out app. Additionally, any contents inside the `/home` directory of the container are overwritten by any existing files already present on the persistent storage when the container starts. @@ -524,4 +524,4 @@ The following lists show supported and unsupported Docker Compose configuration Or, see additional resources: - [Environment variables and app settings reference](reference-app-settings.md) -- [Load certificate in Windows/Linux containers](configure-ssl-certificate-in-code.md#load-certificate-in-linuxwindows-containers) +- [Load certificate in Windows/Linux containers](configure-ssl-certificate-in-code.md#load-certificate-in-linuxwindows-containers) \ No newline at end of file diff --git a/articles/app-service/configure-language-java.md b/articles/app-service/configure-language-java.md index 6ef2e9260d6d6..6142f2f45f54c 100644 --- a/articles/app-service/configure-language-java.md +++ b/articles/app-service/configure-language-java.md @@ -1135,6 +1135,8 @@ App Service supports clustering for JBoss EAP versions 7.4.1 and greater. To ena When clustering is enabled, the JBoss EAP instances use the FILE_PING JGroups discovery protocol to discover new instances and persist the cluster information like the cluster members, their identifiers, and their IP addresses. On App Service, these files are under `/home/clusterinfo/`. The first EAP instance to start will obtain read/write permissions on the cluster membership file. Other instances will read the file, find the primary node, and coordinate with that node to be included in the cluster and added to the file. +The Premium V3 and Isolated V2 App Service Plan types can optionally be distributed across Availability Zones to improve resiliency and reliability for your business-critical workloads. This architecture is also known as [zone redundancy](how-to-zone-redundancy.md). The JBoss EAP clustering feature is compatabile with the zone redundancy feature. + ### JBoss EAP App Service Plans diff --git a/articles/app-service/configure-vnet-integration-enable.md b/articles/app-service/configure-vnet-integration-enable.md index cecc93cd48beb..e4b6a8ab910b5 100644 --- a/articles/app-service/configure-vnet-integration-enable.md +++ b/articles/app-service/configure-vnet-integration-enable.md @@ -6,6 +6,7 @@ author: madsd ms.author: madsd ms.topic: how-to ms.date: 10/20/2021 +ms.tool: azure-cli, azure-powershell --- # Enable virtual network integration in Azure App Service diff --git a/articles/app-service/environment/how-to-migrate.md b/articles/app-service/environment/how-to-migrate.md index ff0f6593184b6..6693900fdf2a6 100644 --- a/articles/app-service/environment/how-to-migrate.md +++ b/articles/app-service/environment/how-to-migrate.md @@ -80,7 +80,7 @@ az network vnet subnet update -g $ASE_RG -n --vnet-name ` is not available in this location|You'll see this error if you're trying to migrate an App Service Environment in a region that doesn't support one of your requested features. |Migrate using one of the [manual migration options](migration-alternatives.md) if you want to migrate immediately. Otherwise, wait for the migration feature to support this App Service Environment configuration. | |Migrate cannot be called on this ASE until the active upgrade has finished. |App Service Environments can't be migrated during platform upgrades. You can set your [upgrade preference](using-an-ase.md#upgrade-preference) from the Azure portal. |Wait until the upgrade finishes and then migrate. | @@ -114,11 +114,11 @@ App Service Environment v3 requires the subnet it's in to have a single delegati After updating all dependent resources with your new IPs and properly delegating your subnet, you should continue with migration as soon as possible. -During migration, the following events will occur: +During migration, which requires up to a three hour service window, the following events will occur: - The existing App Service Environment is shut down and replaced by the new App Service Environment v3. - All App Service plans in the App Service Environment are converted from Isolated to Isolated v2. -- All of the apps that are on your App Service Environment are temporarily down. You should expect about one hour of downtime. +- All of the apps that are on your App Service Environment are temporarily down. You should expect about one hour of downtime during this period. - If you can't support downtime, see [migration-alternatives](migration-alternatives.md#guidance-for-manual-migration). - The public addresses that are used by the App Service Environment will change to the IPs identified during the previous step. @@ -137,7 +137,7 @@ There's no cost to migrate your App Service Environment. You'll stop being charg - **What if migrating my App Service Environment is not currently supported?** You won't be able migrate using the migration feature at this time. If you have an unsupported environment and want to migrate immediately, see the [manual migration options](migration-alternatives.md). This doc will be updated as additional regions and supported scenarios become available. - **Will I experience downtime during the migration?** - Yes, you should expect about one hour of downtime during the migration step so plan accordingly. If downtime isn't an option for you, see the [manual migration options](migration-alternatives.md). + Yes, you should expect about one hour of downtime during the three hour service window during the migration step so plan accordingly. If downtime isn't an option for you, see the [manual migration options](migration-alternatives.md). - **Will I need to do anything to my apps after the migration to get them running on the new App Service Environment?** No, all of your apps running on the old environment will be automatically migrated to the new environment and run like before. No user input is needed. - **What if my App Service Environment has a custom domain suffix?** @@ -149,7 +149,7 @@ There's no cost to migrate your App Service Environment. You'll stop being charg - **What happens if migration fails or there is an unexpected issue during the migration?** If there's an unexpected issue, support teams will be on hand. It's recommended to migrate dev environments before touching any production environments. - **What happens to my old App Service Environment?** - If you decide to migrate an App Service Environment, the old environment gets shut down and deleted and all of your apps are migrated to a new environment. Your old environment will no longer be accessible. + If you decide to migrate an App Service Environment, the old environment gets shut down and deleted and all of your apps are migrated to a new environment. Your old environment will no longer be accessible. A rollback to the old environment will not be possible. - **What will happen to my App Service Environment v1/v2 resources after 31 August 2024?** After 31 August 2024, if you haven't migrated to App Service Environment v3, your App Service Environment v1/v2s and the apps deployed in them will no longer be available. App Service Environment v1/v2 is hosted on App Service scale units running on [Cloud Services (classic)](../../cloud-services/cloud-services-choose-me.md) architecture that will be [retired on 31 August 2024](https://azure.microsoft.com/updates/cloud-services-retirement-announcement/). Because of this, [App Service Environment v1/v2 will no longer be available after that date](https://azure.microsoft.com/updates/app-service-environment-v1-and-v2-retirement-announcement/). Migrate to App Service Environment v3 to keep your apps running or save or back up any resources or data that you need to maintain. diff --git a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-01.md b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-01.md index 3f79f7c1bbfb5..7a00fb8c878ce 100644 --- a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-01.md +++ b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-01.md @@ -2,9 +2,10 @@ author: alexwolfmsft ms.author: alexwolf ms.topic: include -ms.date: 02/03/2022 +ms.date: 06/01/2022 --- In the Azure portal: -
        -Enter *coredb* in the Azure portal search bar. Select the matching item in the search results to navigate to the database overview page. \ No newline at end of file + +1. Type the name of your app in the search box at the top of the screen. +1. In the search results, select the app to navigate to it. \ No newline at end of file diff --git a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-02.md b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-02.md index e0aeba2cb69b0..6668192d0b663 100644 --- a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-02.md +++ b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-02.md @@ -2,7 +2,8 @@ author: alexwolfmsft ms.author: alexwolf ms.topic: include -ms.date: 02/03/2022 +ms.date: 06/01/2022 --- -On the left navigation of the database settings page, select **Connection strings**. Copy the connection string out of the text box under the preselected **ADO.NET** tab. Save it somewhere for later use throughout this guide. \ No newline at end of file +1. On the left navigation, select **Service Connector**. +1. Select **Create**. \ No newline at end of file diff --git a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-03.md b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-03.md index c3e01ba859342..63740541d089b 100644 --- a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-03.md +++ b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-03.md @@ -2,7 +2,15 @@ author: alexwolfmsft ms.author: alexwolf ms.topic: include -ms.date: 02/03/2022 +ms.date: 06/01/2022 --- -Next, in the search bar at the top of the Azure portal, search for the `coreSqlXYZ` App Service you created previously and select it to navigate to the overview page. +On the **Create connection** page + +1. select or enter the following settings: + + * **Service Type**: Select **SQL Database**. + * **SQL server**: Enter your SQL Database server name. + * **SQL database**: Select **coreDB**. + +1. Select **Next: Authentication**. \ No newline at end of file diff --git a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-04.md b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-04.md index b9c017b510a3d..e65707b0bc0d1 100644 --- a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-04.md +++ b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-04.md @@ -2,17 +2,10 @@ author: alexwolfmsft ms.author: alexwolf ms.topic: include -ms.date: 02/03/2022 +ms.date: 06/01/2022 --- -Select the **Configuration** link on the left nav to go to the configuration page. +Under the **Authentication** tab: -Select the **+ New Connection string** button in the **Connection Strings** section, and enter the following values: - -* **Name** - enter `MyDbConnection`. -* **Value** - paste the connection string you copied into the value field. Make sure to replace the username and password in the Connection String with the values you specified when creating the database. -* **Type** - select **SQLServer**. - -Select **OK** to close the dialog, and then select **Save** at the top of the configuration screen. - -Your app can now connect to the SQL database. Next let's generate the schema for our data using Entity Framework Core. +1. Specify the username and password of your SQL database. +1. Select **Next: Networking**, then select **Next: Review + Create**. diff --git a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-05.md b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-05.md new file mode 100644 index 0000000000000..0fa2f839759ee --- /dev/null +++ b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-05.md @@ -0,0 +1,11 @@ +--- +author: alexwolfmsft +ms.author: alexwolf +ms.topic: include +ms.date: 06/01/2022 +--- + + +After validation is complete, select **Create** to create the service connection. + +It might take 1 minute to complete the operation. Click **Refresh** button to see the SQL database connection. diff --git a/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-06.md b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-06.md new file mode 100644 index 0000000000000..18c29ebde3ec8 --- /dev/null +++ b/articles/app-service/includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-06.md @@ -0,0 +1,13 @@ +--- +author: cephalin +ms.author: cephalin +ms.topic: include +ms.date: 06/01/2022 +--- + +In the **Service Connector** page: + +1. Expand the connection by selecting **>** next to it. `AZURE_SQL_CONNECTIONSTRING` is the connection string generated for you. +1. Select **Hidden value. Click to show value** and copy the connection string for later. + +Your app can now connect to the SQL database. Next, let's generate the schema for our data using Entity Framework Core. \ No newline at end of file diff --git a/articles/app-service/media/quickstart-wordpress/05-wordpress-basics-instance-details.png b/articles/app-service/media/quickstart-wordpress/05-wordpress-basics-instance-details.png index 0d08a5ccba5c9..36fe6c235ad68 100644 Binary files a/articles/app-service/media/quickstart-wordpress/05-wordpress-basics-instance-details.png and b/articles/app-service/media/quickstart-wordpress/05-wordpress-basics-instance-details.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01-240px.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01-240px.png index 2d045bc05feca..341a9cf59b5bd 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01-240px.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01-240px.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01.png index 4842fe301a2ac..6226ab9864403 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02-240px.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02-240px.png index 0fa5a24e7e60d..205bf9d115518 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02-240px.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02-240px.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02.png index db5948d3f375c..1b8f0ee80e98d 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03-240px.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03-240px.png index 7c37337e2d580..14bf9c96e6992 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03-240px.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03-240px.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03.png index 5953aaf3e8d16..d55005b1738c9 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04-240px.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04-240px.png index 26a470546150e..55e1850bcd7a6 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04-240px.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04-240px.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04.png index f5891d8c0ddff..dd2cbdbb53c79 100644 Binary files a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04.png and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05-240px.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05-240px.png new file mode 100644 index 0000000000000..d7b607954c9cf Binary files /dev/null and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05-240px.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05.png new file mode 100644 index 0000000000000..5f9f75ede12c3 Binary files /dev/null and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06-240px.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06-240px.png new file mode 100644 index 0000000000000..2af15bcf73b13 Binary files /dev/null and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06-240px.png differ diff --git a/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06.png b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06.png new file mode 100644 index 0000000000000..797da957140af Binary files /dev/null and b/articles/app-service/media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06.png differ diff --git a/articles/app-service/media/webjobs-dotnet-deploy-vs/publish-settings.png b/articles/app-service/media/webjobs-dotnet-deploy-vs/publish-settings.png index 643616a9a385f..538d1c0d47bab 100644 Binary files a/articles/app-service/media/webjobs-dotnet-deploy-vs/publish-settings.png and b/articles/app-service/media/webjobs-dotnet-deploy-vs/publish-settings.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/blob-upload-button.png b/articles/app-service/media/webjobs-sdk-get-started/blob-upload-button.png index 03c3557cc22d1..f76b1848f067a 100644 Binary files a/articles/app-service/media/webjobs-sdk-get-started/blob-upload-button.png and b/articles/app-service/media/webjobs-sdk-get-started/blob-upload-button.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/change-webjob-type.png b/articles/app-service/media/webjobs-sdk-get-started/change-webjob-type.png new file mode 100644 index 0000000000000..f5cfaff8cd9a3 Binary files /dev/null and b/articles/app-service/media/webjobs-sdk-get-started/change-webjob-type.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/connection-key.png b/articles/app-service/media/webjobs-sdk-get-started/connection-key.png index 726237221c55e..a4bdce6db7a41 100644 Binary files a/articles/app-service/media/webjobs-sdk-get-started/connection-key.png and b/articles/app-service/media/webjobs-sdk-get-started/connection-key.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/create-queue-azure-storage.png b/articles/app-service/media/webjobs-sdk-get-started/create-queue-azure-storage.png new file mode 100644 index 0000000000000..20d6ece1b0ab5 Binary files /dev/null and b/articles/app-service/media/webjobs-sdk-get-started/create-queue-azure-storage.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/hello-world-text.png b/articles/app-service/media/webjobs-sdk-get-started/hello-world-text.png index 8f8704a56dbaf..c37d8e2d1f7ed 100644 Binary files a/articles/app-service/media/webjobs-sdk-get-started/hello-world-text.png and b/articles/app-service/media/webjobs-sdk-get-started/hello-world-text.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/queue-msg-program-cs.png b/articles/app-service/media/webjobs-sdk-get-started/queue-msg-program-cs.png index e9cc6a7412280..6e99e7981a0be 100644 Binary files a/articles/app-service/media/webjobs-sdk-get-started/queue-msg-program-cs.png and b/articles/app-service/media/webjobs-sdk-get-started/queue-msg-program-cs.png differ diff --git a/articles/app-service/media/webjobs-sdk-get-started/stop-app-service.png b/articles/app-service/media/webjobs-sdk-get-started/stop-app-service.png new file mode 100644 index 0000000000000..d5308f480155e Binary files /dev/null and b/articles/app-service/media/webjobs-sdk-get-started/stop-app-service.png differ diff --git a/articles/app-service/networking/nat-gateway-integration.md b/articles/app-service/networking/nat-gateway-integration.md index 432d0f7a929d0..037d9e1b1bcbb 100644 --- a/articles/app-service/networking/nat-gateway-integration.md +++ b/articles/app-service/networking/nat-gateway-integration.md @@ -79,7 +79,7 @@ az network vnet subnet update --resource-group [myResourceGroup] --vnet-name [my The same NAT gateway can be used across multiple subnets in the same Virtual Network allowing a NAT gateway to be used across multiple apps and App Service plans. -NAT gateway supports both public IP addresses and public IP prefixes. A NAT gateway can support up to 16 IP addresses across individual IP addresses and prefixes. Each IP address allocates 64,000 ports (SNAT ports) allowing up to 1M available ports. Learn more in the [Scaling section](../../virtual-network/nat-gateway/nat-gateway-resource.md#scale-nat-gateway) of NAT gateway. +NAT gateway supports both public IP addresses and public IP prefixes. A NAT gateway can support up to 16 IP addresses across individual IP addresses and prefixes. Each IP address allocates 64,512 ports (SNAT ports) allowing up to 1M available ports. Learn more in the [Scaling section](../../virtual-network/nat-gateway/nat-gateway-resource.md#scale-nat-gateway) of NAT gateway. ## Next steps diff --git a/articles/app-service/provision-resource-terraform.md b/articles/app-service/provision-resource-terraform.md index 551d106f69378..494fbefc6c819 100644 --- a/articles/app-service/provision-resource-terraform.md +++ b/articles/app-service/provision-resource-terraform.md @@ -5,6 +5,7 @@ author: seligj95 ms.author: msangapu ms.topic: article ms.date: 8/26/2021 +ms.tool: terraform ms.custom: subject-terraform --- diff --git a/articles/app-service/quickstart-php.md b/articles/app-service/quickstart-php.md index 35a6a26a8cc4c..7d27471704d2a 100644 --- a/articles/app-service/quickstart-php.md +++ b/articles/app-service/quickstart-php.md @@ -59,15 +59,12 @@ Azure CLI has a command [`az webapp up`](/cli/azure/webapp#az_webapp_up) that wi In the terminal, deploy the code in your local folder using the [`az webapp up`](/cli/azure/webapp#az_webapp_up) command: ```azurecli -az webapp up \ - --sku F1 \ - --logs +az webapp up --runtime "php|8.0" --os-type=linux ``` - If the `az` command isn't recognized, be sure you have Azure CLI installed. - -- The `--sku F1` argument creates the web app on the Free pricing tier, which incurs a no cost. -- The `--logs` flag configures default logging required to enable viewing the log stream immediately after launching the webapp. +- The `--runtime "php|8.0"` argument creates the web app with PHP version 8.0. +- The `--os-type=linux` argument creates the web app on App Service on Linux. - You can optionally specify a name with the argument `--name `. If you don't provide one, then a name will be automatically generated. - You can optionally include the argument `--location ` where `` is an available Azure region. You can retrieve a list of allowable regions for your Azure account by running the [`az account list-locations`](/cli/azure/appservice#az_appservice_list_locations) command. - If you see the error, "Could not auto-detect the runtime stack of your app," make sure you're running the command in the code directory (See [Troubleshooting auto-detect issues with az webapp up](https://github.com/Azure/app-service-linux-docs/blob/master/AzWebAppUP/runtime_detection.md)). @@ -81,7 +78,7 @@ Resource group creation complete Creating AppServicePlan '<app-service-plan-name>' ... Creating webapp '<app-name>' ... Configuring default logging for the app, if not already enabled -Creating zip with contents of dir /home/cephas/myExpressApp ... +Creating zip with contents of dir /home/msangapu/myPhpApp ... Getting scm site credentials for zip deployment Starting zip deployment. This operation can take a while to complete ... Deployment endpoint responded with status code 202 @@ -116,10 +113,10 @@ Browse to the deployed application in your web browser at the URL `http:// [!div class="nextstepaction"] > [Configure PHP app](configure-language-php.md) -::: zone-end \ No newline at end of file +::: zone-end diff --git a/articles/app-service/quickstart-python-portal.md b/articles/app-service/quickstart-python-portal.md index 80441daa478ba..3e0534d70ac85 100644 --- a/articles/app-service/quickstart-python-portal.md +++ b/articles/app-service/quickstart-python-portal.md @@ -149,7 +149,7 @@ Having issues? [Let us know](https://aka.ms/FlaskCLIQuickstartHelp). ## Next steps > [!div class="nextstepaction"] -> [Tutorial: Python (Django) web app with PostgreSQL](/azure/developer/python/tutorial-python-postgresql-app-portal) +> [Tutorial: Python (Django) web app with PostgreSQL](/azure/app-service/tutorial-python-postgresql-app) > [!div class="nextstepaction"] > [Configure Python app](configure-language-python.md) diff --git a/articles/app-service/quickstart-python.md b/articles/app-service/quickstart-python.md index a77ef4b0bc174..5a29ae2d26ead 100644 --- a/articles/app-service/quickstart-python.md +++ b/articles/app-service/quickstart-python.md @@ -3,8 +3,8 @@ title: 'Quickstart: Deploy a Python (Django or Flask) web app to Azure' description: Get started with Azure App Service by deploying your first Python app to Azure App Service. ms.topic: quickstart ms.date: 03/22/2022 -author: DavidCBerry13 -ms.author: daberry +author: mijacobs +ms.author: mijacobs ms.devlang: python ms.custom: devx-azure-cli, devx-azure-portal, devx-vscode-azure-extension, devdivchpfy22 --- diff --git a/articles/app-service/security-controls-policy.md b/articles/app-service/security-controls-policy.md index 726530f9d5a3f..c34ddffb05d0c 100644 --- a/articles/app-service/security-controls-policy.md +++ b/articles/app-service/security-controls-policy.md @@ -1,7 +1,7 @@ --- title: Azure Policy Regulatory Compliance controls for Azure App Service description: Lists Azure Policy Regulatory Compliance controls available for Azure App Service. These built-in policy definitions provide common approaches to managing the compliance of your Azure resources. -ms.date: 05/10/2022 +ms.date: 06/03/2022 ms.topic: sample ms.service: app-service ms.custom: subject-policy-compliancecontrols @@ -15,9 +15,21 @@ page lists the **compliance domains** and **security controls** for Azure App Se assign the built-ins for a **security control** individually to help make your Azure resources compliant with the specific standard. -[!INCLUDE [azure-policy-compliancecontrols-introwarning](../../includes/policy/standards/intro-warning.md)] +[!INCLUDE [Azure-policy-compliancecontrols-introwarning](../../includes/policy/standards/intro-warning.md)] -[!INCLUDE [azure-policy-compliancecontrols-appservice](../../includes/policy/standards/byrp/microsoft.web.md)] +[!INCLUDE [Azure-policy-compliancecontrols-appservice](../../includes/policy/standards/byrp/microsoft.web.md)] + +## Release notes + +### June 2022 + +- Deprecation of policy "API App should only be accessible over HTTPS" +- Rename of policy "Web Application should only be accessible over HTTPS" to "App Service apps should only be accessible over HTTPS" +- Update scope of policy "App Service apps should only be accessible over HTTPS" to include all app types except Function apps +- Update scope of policy "App Service apps should only be accessible over HTTPS" to include slots +- Update scope of policy "Function apps should only be accessible over HTTPS" to include slots +- Update logic of policy "App Service apps should use a SKU that supports private link" to include checks on App Service plan tier or name so that the policy supports Terraform deployments +- Update list of supported SKUs of policy "App Service apps should use a SKU that supports private link" to include the Basic and Standard tiers ## Next steps diff --git a/articles/app-service/troubleshoot-dotnet-visual-studio.md b/articles/app-service/troubleshoot-dotnet-visual-studio.md index ea582cdc5fae5..20fcf4c5e286f 100644 --- a/articles/app-service/troubleshoot-dotnet-visual-studio.md +++ b/articles/app-service/troubleshoot-dotnet-visual-studio.md @@ -33,7 +33,7 @@ The tutorial assumes you're using Visual Studio 2019. The streaming logs feature only works for applications that target .NET Framework 4 or later. ## App configuration and management -Visual Studio provides access to a subset of the app management functions and configuration settings available in the [Azure portal](https://go.microsoft.com/fwlink/?LinkId=529715). In this section, you'll see what's available by using **Server Explorer**. To see the latest Azure integration features, try out **Cloud Explorer** also. You can open both windows from the **View** menu. +Visual Studio provides access to a subset of the app management functions and configuration settings available in the [Azure portal](/rest/api/appservice/web-apps). In this section, you'll see what's available by using **Server Explorer**. To see the latest Azure integration features, try out **Cloud Explorer** also. You can open both windows from the **View** menu. 1. If you aren't already signed in to Azure in Visual Studio, right-click **Azure** and select Connect to **Microsoft Azure Subscription** in **Server Explorer**. @@ -684,4 +684,4 @@ For more information about analyzing web server logs, see the following resource The Microsoft TechNet website includes a [Using Failed Request Tracing](https://www.iis.net/learn/troubleshoot/using-failed-request-tracing) section, which may be helpful for understanding how to use these logs. However, this documentation focuses mainly on configuring failed request tracing in IIS, which you can't do in Azure App Service. [GetStarted]: quickstart-dotnetcore.md?pivots=platform-windows -[GetStartedWJ]: https://github.com/Azure/azure-webjobs-sdk/wiki \ No newline at end of file +[GetStartedWJ]: https://github.com/Azure/azure-webjobs-sdk/wiki diff --git a/articles/app-service/tutorial-connect-msi-azure-database.md b/articles/app-service/tutorial-connect-msi-azure-database.md index 361aead405183..5c808b55a97d9 100644 --- a/articles/app-service/tutorial-connect-msi-azure-database.md +++ b/articles/app-service/tutorial-connect-msi-azure-database.md @@ -13,11 +13,11 @@ ms.custom: "mvc, devx-track-azurecli" [App Service](overview.md) provides a highly scalable, self-patching web hosting service in Azure. It also provides a [managed identity](overview-managed-identity.md) for your app, which is a turn-key solution for securing access to Azure databases, including: - [Azure SQL Database](/azure/azure-sql/database/) -- [Azure Database for MySQL](/azure/mysql/) -- [Azure Database for PostgreSQL](/azure/postgresql/) +- [Azure Database for MySQL](../mysql/index.yml) +- [Azure Database for PostgreSQL](../postgresql/index.yml) > [!NOTE] -> This tutorial doesn't include guidance for [Azure Cosmos DB](/azure/cosmos-db/), which supports Azure Active Directory authentication differently. For information, see Cosmos DB documentation. For example: [Use system-assigned managed identities to access Azure Cosmos DB data](../cosmos-db/managed-identity-based-authentication.md). +> This tutorial doesn't include guidance for [Azure Cosmos DB](../cosmos-db/index.yml), which supports Azure Active Directory authentication differently. For information, see Cosmos DB documentation. For example: [Use system-assigned managed identities to access Azure Cosmos DB data](../cosmos-db/managed-identity-based-authentication.md). Managed identities in App Service make your app more secure by eliminating secrets from your app, such as credentials in the connection strings. This tutorial shows you how to connect to the above-mentioned databases from App Service using managed identities. @@ -54,7 +54,7 @@ First, enable Azure Active Directory authentication to the Azure database by ass 1. If your Azure AD tenant doesn't have a user yet, create one by following the steps at [Add or delete users using Azure Active Directory](../active-directory/fundamentals/add-users-azure-active-directory.md). -1. Find the object ID of the Azure AD user using the [`az ad user list`](/cli/azure/ad/user#az_ad_user_list) and replace *\*. The result is saved to a variable. +1. Find the object ID of the Azure AD user using the [`az ad user list`](/cli/azure/ad/user#az-ad-user-list) and replace *\*. The result is saved to a variable. ```azurecli-interactive azureaduser=$(az ad user list --filter "userPrincipalName eq ''" --query [].objectId --output tsv) @@ -62,7 +62,7 @@ First, enable Azure Active Directory authentication to the Azure database by ass # [Azure SQL Database](#tab/sqldatabase) -3. Add this Azure AD user as an Active Directory administrator using [`az sql server ad-admin create`](/cli/azure/sql/server/ad-admin#az_sql_server_ad_admin_create) command in the Cloud Shell. In the following command, replace *\* and *\* with your own parameters. +3. Add this Azure AD user as an Active Directory administrator using [`az sql server ad-admin create`](/cli/azure/sql/server/ad-admin#az-sql-server-ad-admin-create) command in the Cloud Shell. In the following command, replace *\* and *\* with your own parameters. ```azurecli-interactive az sql server ad-admin create --resource-group --server-name --display-name ADMIN --object-id $azureaduser @@ -72,7 +72,7 @@ First, enable Azure Active Directory authentication to the Azure database by ass # [Azure Database for MySQL](#tab/mysql) -3. Add this Azure AD user as an Active Directory administrator using [`az mysql server ad-admin create`](/cli/azure/mysql/server/ad-admin#az_mysql_server_ad_admin_create) command in the Cloud Shell. In the following command, replace *\* and *\* with your own parameters. +3. Add this Azure AD user as an Active Directory administrator using [`az mysql server ad-admin create`](/cli/azure/mysql/server/ad-admin#az-mysql-server-ad-admin-create) command in the Cloud Shell. In the following command, replace *\* and *\* with your own parameters. ```azurecli-interactive az mysql server ad-admin create --resource-group --server-name --display-name --object-id $azureaduser @@ -83,7 +83,7 @@ First, enable Azure Active Directory authentication to the Azure database by ass # [Azure Database for PostgreSQL](#tab/postgresql) -3. Add this Azure AD user as an Active Directory administrator using [`az postgres server ad-admin create`](/cli/azure/postgres/server/ad-admin#az_postgres_server_ad_admin_create) command in the Cloud Shell. In the following command, replace *\* and *\* with your own parameters. +3. Add this Azure AD user as an Active Directory administrator using [`az postgres server ad-admin create`](/cli/azure/postgres/server/ad-admin#az-postgres-server-ad-admin-create) command in the Cloud Shell. In the following command, replace *\* and *\* with your own parameters. ```azurecli-interactive az postgres server ad-admin create --resource-group --server-name --display-name --object-id $azureaduser @@ -98,7 +98,7 @@ First, enable Azure Active Directory authentication to the Azure database by ass Next, you configure your App Service app to connect to SQL Database with a managed identity. -1. Enable a managed identity for your App Service app with the [az webapp identity assign](/cli/azure/webapp/identity#az_webapp_identity_assign) command in the Cloud Shell. In the following command, replace *\*. +1. Enable a managed identity for your App Service app with the [az webapp identity assign](/cli/azure/webapp/identity#az-webapp-identity-assign) command in the Cloud Shell. In the following command, replace *\*. # [System-assigned identity](#tab/systemassigned/sqldatabase) @@ -741,7 +741,7 @@ For Azure Database for MySQL and Azure Database for PostgreSQL, the database use The `if` statement sets the MySQL username based on which identity the token applies to. The token is then passed in to the [standard MySQL connection](../mysql/connect-python.md) as the password of the Azure identity. - The `LIBMYSQL_ENABLE_CLEARTEXT_PLUGIN` environment variable enables the [Cleartext plugin](https://dev.mysql.com/doc/refman/8.0/cleartext-pluggable-authentication.html) in the MySQL Connector (see [Use Azure Active Directory for authentication with MySQL](../mysql/howto-configure-sign-in-azure-ad-authentication.md#compatibility-with-application-drivers)). + The `LIBMYSQL_ENABLE_CLEARTEXT_PLUGIN` environment variable enables the [Cleartext plugin](https://dev.mysql.com/doc/refman/8.0/en/cleartext-pluggable-authentication.html) in the MySQL Connector (see [Use Azure Active Directory for authentication with MySQL](../mysql/howto-configure-sign-in-azure-ad-authentication.md#compatibility-with-application-drivers)). # [Azure Database for PostgreSQL](#tab/postgresql) diff --git a/articles/app-service/tutorial-connect-msi-sql-database.md b/articles/app-service/tutorial-connect-msi-sql-database.md index 755462b8a8fed..052ec53770fcd 100644 --- a/articles/app-service/tutorial-connect-msi-sql-database.md +++ b/articles/app-service/tutorial-connect-msi-sql-database.md @@ -144,10 +144,9 @@ The steps you follow for your project depends on whether you're using [Entity Fr 1. In Visual Studio, open the Package Manager Console and add the NuGet package [Azure.Identity](https://www.nuget.org/packages/Azure.Identity) and update Entity Framework: ```powershell - Install-Package Azure.Identity -Version 1.5.0 + Install-Package Azure.Identity Update-Package EntityFramework ``` - 1. In your DbContext object (in *Models/MyDbContext.cs*), add the following code to the default constructor. ```csharp diff --git a/articles/app-service/tutorial-dotnetcore-sqldb-app.md b/articles/app-service/tutorial-dotnetcore-sqldb-app.md index f6a01c40cceb2..7bd08b1e449b8 100644 --- a/articles/app-service/tutorial-dotnetcore-sqldb-app.md +++ b/articles/app-service/tutorial-dotnetcore-sqldb-app.md @@ -2,7 +2,7 @@ title: Deploy an ASP.NET Core and Azure SQL Database app to Azure App Service description: Learn how to deploy an ASP.NET Core web app to Azure App Service and connect to an Azure SQL Database. ms.topic: tutorial -ms.date: 03/02/2022 +ms.date: 06/01/2022 author: alexwolfmsft ms.author: alexwolf ms.devlang: csharp @@ -96,7 +96,6 @@ Sign in to the [Azure portal](https://portal.azure.com/) and follow these steps | [!INCLUDE [Create database step 1](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-01.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-01-240px.png" alt-text="A screenshot showing how to use the search box in the top tool bar to find Azure SQL in Azure." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-01.png"::: | | [!INCLUDE [Create database step 2](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-02.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-02-240px.png" alt-text="A screenshot showing the create button on the SQL Servers page used to create a new database server." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-02.png"::: | | [!INCLUDE [Create database step 3](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-03.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-03-240px.png" alt-text="A screenshot showing the form to fill out to create a SQL Server in Azure." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-03.png"::: | -| [!INCLUDE [Create database step 4](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-04.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-04-240px.png" alt-text="A screenshot showing the form used to allow other Azure services to connect to the database." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-04.png"::: | | [!INCLUDE [Create database step 5](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-05.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-05-240px.png" alt-text="A screenshot showing how to use the search box to find the SQL databases item in Azure." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-05.png"::: | | [!INCLUDE [Create database step 6](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-06.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-06-240px.png" alt-text="A screenshot showing the create button in on the SQL databases page." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-06.png"::: | | [!INCLUDE [Create database step 7](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-sql-db-create-07.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-07-240px.png" alt-text="A screenshot showing the form to fill out to create a new SQL database in Azure." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-create-sql-07.png"::: | @@ -125,17 +124,6 @@ az sql db create \ --name coreDb ``` -We also need to add the following firewall rule to our database server to allow other Azure resources to connect to it. - -```azurecli-interactive -az sql server firewall-rule create \ - --resource-group msdocs-core-sql \ - --server \ - --name AzureAccess \ - --start-ip-address 0.0.0.0 \ - --end-ip-address 0.0.0.0 -``` - --- ## 4 - Deploy to the App Service @@ -167,7 +155,7 @@ We're now ready to deploy our .NET app to the App Service. ## 5 - Connect the App to the Database -Next, we must connect the App hosted in our App Service to our database using a Connection String. +Next, we must connect the App hosted in our App Service to our database using a Connection String. You can use [Service Connector](../service-connector/overview.md) to create the connection. ### [Azure portal](#tab/azure-portal) @@ -175,42 +163,44 @@ Sign in to the [Azure portal](https://portal.azure.com/) and follow the steps to | Instructions | Screenshot | |:----------------|-----------:| -| [!INCLUDE [Connect Service step 1](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-01.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01-240px.png" alt-text="A screenshot showing how to locate the database used by the App in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01.png"::: | -| [!INCLUDE [Connect Service step 2](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-02.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02-240px.png" alt-text="A screenshot showing how to get the connection string used to connect to the database from the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02.png"::: | -| [!INCLUDE [Connect Service step 3](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-03.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03-240px.png" alt-text="A screenshot showing how to use the search box to find the App Service instance for the app in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03.png"::: | -| [!INCLUDE [Connect Service step 4](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-04.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04-240px.png" alt-text="A screenshot showing how to enter the connection string as an app setting for the web app in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04.png"::: | +| [!INCLUDE [Connect Service step 1](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-01.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01-240px.png" alt-text="A screenshot showing how to locate the app service in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-01.png"::: | +| [!INCLUDE [Connect Service step 2](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-02.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02-240px.png" alt-text="A screenshot showing how to locate Service Connector from the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-02.png"::: | +| [!INCLUDE [Connect Service step 3](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-03.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03-240px.png" alt-text="A screenshot showing how to create a connection to the SQL database for the app in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-03.png"::: | +| [!INCLUDE [Connect Service step 4](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-04.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04-240px.png" alt-text="A screenshot showing how to enter username and password of SQL Database during service connection in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-04.png"::: | +| [!INCLUDE [Connect Service step 5](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-05.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05-240px.png" alt-text="A screenshot showing how to review and create the connection in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-05.png"::: | +| [!INCLUDE [Connect Service step 6](<./includes/tutorial-dotnetcore-sqldb-app/azure-portal-connect-database-06.md>)] | :::image type="content" source="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06-240px.png" alt-text="A screenshot showing how to get the connection string for a service connector in the Azure portal." lightbox="./media/tutorial-dotnetcore-sqldb-app/azure-portal-connect-sql-db-06.png"::: | ### [Azure CLI](#tab/azure-cli) -Run Azure CLI commands in the [Azure Cloud Shell](https://shell.azure.com) or on a workstation with the [Azure CLI installed](/cli/azure/install-azure-cli). - -We can retrieve the Connection String for our database using the [az sql db show-connection-string](/cli/azure/sql/db#az-sql-db-show-connection-string) command. This command allows us to add the Connection String to our App Service configuration settings. Copy this Connection String value for later use. +Configure the connection between your app and the SQL database by using the [az webapp connection create sql](/cli/azure/webapp/connection/create#az-webapp-connection-create-sql) command. ```azurecli-interactive -az sql db show-connection-string \ - --client ado.net \ - --name coreDb \ - --server +az webapp connection create sql \ + --resource-group msdocs-core-sql \ + --name \ + --target-resource-group msdocs-core-sql \ + --server \ + --database coreDB \ + --query configurations ``` -Next, let's assign the Connection String to our App Service using the command below. `MyDbConnection` is the name of the Connection String in our appsettings.json file, which means it gets loaded by our app during startup. +When prompted, provide the administrator username and password for the SQL database. -Replace the username and password in the connection string with your own before running the command. +> [!NOTE] +> The CLI command does everything the app needs to successfully connect to the database, including: +> +> - In your App Service app, adds a connection string with the name `AZURE_SQL_CONNECTIONSTRING`, which your code can use for its database connection. If the connection string is already in use, `AZURE_SQL__CONNECTIONSTRING` is used for the name instead. +> - In your SQL database server, allows Azure services to access the SQL database server. -```azurecli-interactive -az webapp config connection-string set \ - -g msdocs-core-sql \ - -n \ - -t SQLServer \ - --settings MyDbConnection= +Copy this connection string value from the output for later. -``` +To see the entirety of the command output, drop the `--query` in the command. --- ## 6 - Generate the Database Schema -To generate our database schema, we need to set up a firewall rule on our Database Server. This rule allows our local computer to connect to Azure. For this step, you'll need to know your local computer's IP address. For more information about how to find the IP address, [see here](https://whatismyipaddress.com/). +To generate our database schema, set up a firewall rule on the SQL database server. This rule lets your local computer connect to Azure. For this step, you'll need to know your local computer's IP address. For more information about how to find the IP address, [see here](https://whatismyipaddress.com/). ### [Azure portal](#tab/azure-portal) @@ -231,17 +221,17 @@ az sql server firewall-rule create --resource-group msdocs-core-sql --server .database.windows.net,1433; - Initial Catalog=coredb; - Persist Security Info=False; - User ID=;Password=; - Encrypt=True; - TrustServerCertificate=False;" - } +"AZURE_SQL_CONNECTIONSTRING": "Data Source=.database.windows.net,1433;Initial Catalog=coreDb;User ID=;Password=" +``` + +Next, update the *Startup.cs* file the sample project by updating the existing connection string name `MyDbConnection` to `AZURE_SQL_CONNECTIONSTRING`: + +```csharp +services.AddDbContext(options => + options.UseSqlServer(Configuration.GetConnectionString("AZURE_SQL_CONNECTIONSTRING"))); ``` Finally, run the following commands to install the necessary CLI tools for Entity Framework Core. Create an initial database migration file and apply those changes to update the database: @@ -254,7 +244,7 @@ dotnet ef database update After the migration finishes, the correct schema is created. -If you receive an error stating `Client with IP address xxx.xxx.xxx.xxx is not allowed to access the server`, that means the IP address you entered into your Azure firewall rule is incorrect. To fix this issue, update the Azure firewall rule with the IP address provided in the error message. +If you receive the error `Client with IP address xxx.xxx.xxx.xxx is not allowed to access the server`, that means the IP address you entered into your Azure firewall rule is incorrect. To fix this issue, update the Azure firewall rule with the IP address provided in the error message. ## 7 - Browse the Deployed Application and File Directory diff --git a/articles/app-service/webjobs-dotnet-deploy-vs.md b/articles/app-service/webjobs-dotnet-deploy-vs.md index 42044702fe23c..0ef467ea0ffec 100644 --- a/articles/app-service/webjobs-dotnet-deploy-vs.md +++ b/articles/app-service/webjobs-dotnet-deploy-vs.md @@ -13,7 +13,7 @@ ms.reviewer: david.ebbo;suwatch;pbatum;naren.soni # Develop and deploy WebJobs using Visual Studio -This article explains how to use Visual Studio to deploy a console app project to a web app in [Azure App Service](overview.md) as an [Azure WebJob](/azure/app-service/webjobs-create). For information about how to deploy WebJobs by using the [Azure portal](https://portal.azure.com), see [Run background tasks with WebJobs in Azure App Service](webjobs-create.md). +This article explains how to use Visual Studio to deploy a console app project to a web app in [Azure App Service](overview.md) as an [Azure WebJob](./webjobs-create.md). For information about how to deploy WebJobs by using the [Azure portal](https://portal.azure.com), see [Run background tasks with WebJobs in Azure App Service](webjobs-create.md). You can choose to develop a WebJob that runs as either a [.NET Core app](#webjobs-as-net-core-console-apps) or a [.NET Framework app](#webjobs-as-net-framework-console-apps). Version 3.x of the [Azure WebJobs SDK](webjobs-sdk-how-to.md) lets you develop WebJobs that run as either .NET Core apps or .NET Framework apps, while version 2.x supports only the .NET Framework. The way that you deploy a WebJobs project is different for .NET Core projects than for .NET Framework projects. @@ -51,7 +51,7 @@ Deploy a project as a WebJob by itself, or link it to a web project so that it a ### Prerequisites -Install Visual Studio 2017 or Visual Studio 2019 with the [Azure development workload](/visualstudio/install/install-visual-studio#step-4---choose-workloads). +Install Visual Studio 2022 with the [Azure development workload](/visualstudio/install/install-visual-studio#step-4---choose-workloads). ### Enable WebJobs deployment for an existing console app project @@ -98,7 +98,7 @@ To create a new WebJobs-enabled project, use the console app project template an Create a project that is configured to deploy automatically as a WebJob when you deploy a web project in the same solution. Use this option when you want to run your WebJob in the same web app in which you run the related web application. > [!NOTE] -> The WebJobs new-project template automatically installs NuGet packages and includes code in *Program.cs* for the [WebJobs SDK](/azure/app-service/webjobs-sdk-get-started). If you don't want to use the WebJobs SDK, remove or change the `host.RunAndBlock` statement in *Program.cs*. +> The WebJobs new-project template automatically installs NuGet packages and includes code in *Program.cs* for the [WebJobs SDK](./webjobs-sdk-get-started.md). If you don't want to use the WebJobs SDK, remove or change the `host.RunAndBlock` statement in *Program.cs*. > > @@ -228,7 +228,7 @@ If you enable **Always on** in Azure, you can use Visual Studio to change the We 1. In **Solution Explorer**, right-click the project and select **Publish**. -1. In the **Publish** tab, choose **Edit**. +1. In the **Settings** section, choose **Show all settings**. 1. In the **Profile settings** dialog box, choose **Continuous** for **WebJob Type**, and then choose **Save**. @@ -239,4 +239,4 @@ If you enable **Always on** in Azure, you can use Visual Studio to change the We ## Next steps > [!div class="nextstepaction"] -> [Learn more about the WebJobs SDK](webjobs-sdk-how-to.md) +> [Learn more about the WebJobs SDK](webjobs-sdk-how-to.md) \ No newline at end of file diff --git a/articles/app-service/webjobs-sdk-get-started.md b/articles/app-service/webjobs-sdk-get-started.md index fc55d2d45cc75..fe446c548d78c 100644 --- a/articles/app-service/webjobs-sdk-get-started.md +++ b/articles/app-service/webjobs-sdk-get-started.md @@ -15,7 +15,7 @@ ms.topic: tutorial Get started with the Azure WebJobs SDK for Azure App Service to enable your web apps to run background tasks, scheduled tasks, and respond to events. -Use Visual Studio 2019 to create a .NET core console app that uses the WebJobs SDK to respond to Azure Storage Queue messages, run the project locally, and finally deploy it to Azure. +Use Visual Studio 2022 to create a .NET Core console app that uses the WebJobs SDK to respond to Azure Storage Queue messages, run the project locally, and finally deploy it to Azure. In this tutorial, you will learn how to: @@ -29,15 +29,15 @@ In this tutorial, you will learn how to: ## Prerequisites -* Visual Studio 2019 with the **Azure development** workload. [Install Visual Studio 2019](/visualstudio/install/). +* Visual Studio 2022 with the **Azure development** workload. [Install Visual Studio 2022](/visualstudio/install/). * An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). ## Create a console app -In this section, you start by creating a project in Visual Studio 2019. Next, you'll add tools for Azure development, code publishing, and functions that listen for triggers and call functions. Last, you'll set up console logging that disables a legacy monitoring tool and enables a console provider with default filtering. +In this section, you start by creating a project in Visual Studio 2022. Next, you'll add tools for Azure development, code publishing, and functions that listen for triggers and call functions. Last, you'll set up console logging that disables a legacy monitoring tool and enables a console provider with default filtering. >[!NOTE] ->The procedures in this article are verified for creating a .NET Core console app that runs on .NET Core 3.1. +>The procedures in this article are verified for creating a .NET Core console app that runs on .NET 6.0. ### Create a project @@ -47,22 +47,22 @@ In this section, you start by creating a project in Visual Studio 2019. Next, yo 1. Under **Configure your new project**, name the project *WebJobsSDKSample*, and then select **Next**. -1. Choose your **Target framework** and select **Create**. This tutorial has been verified using .NET Core 3.1. +1. Choose your **Target framework** and select **Create**. This tutorial has been verified using .NET 6.0. ### Install WebJobs NuGet packages Install the latest WebJobs NuGet package. This package includes Microsoft.Azure.WebJobs (WebJobs SDK), which lets you publish your function code to WebJobs in Azure App Service. -1. Get the latest stable 3.x version of the [Microsoft.Azure.WebJobs.Extensions NuGet package](https://www.nuget.org/packages/Microsoft.Azure.WebJobs.Extensions/). +1. Get the latest stable 4.x version of the [Microsoft.Azure.WebJobs.Extensions NuGet package](https://www.nuget.org/packages/Microsoft.Azure.WebJobs.Extensions/). 2. In Visual Studio, go to **Tools** > **NuGet Package Manager**. 3. Select **Package Manager Console**. You'll see a list of NuGet cmdlets, a link to documentation, and a `PM>` entry point. -4. In the following command, replace `<3_X_VERSION>` with the current version number you found in step 1. +4. In the following command, replace `<4_X_VERSION>` with the current version number you found in step 1. ```powershell - Install-Package Microsoft.Azure.WebJobs.Extensions -version <3_X_VERSION> + Install-Package Microsoft.Azure.WebJobs.Extensions -version <4_X_VERSION> ``` 5. In the **Package Manager Console**, execute the command. The extension list appears and automatically installs. @@ -70,27 +70,33 @@ Install the latest WebJobs NuGet package. This package includes Microsoft.Azure. The host is the runtime container for functions that listens for triggers and calls functions. The following steps create a host that implements [`IHost`](/dotnet/api/microsoft.extensions.hosting.ihost), which is the Generic Host in ASP.NET Core. -1. Select the **Program.cs** tab and add these `using` statements: +1. Select the **Program.cs** tab, remove the existing contents, and add these `using` statements: ```cs using System.Threading.Tasks; using Microsoft.Extensions.Hosting; ``` -1. Also under **Program.cs**, replace the `Main` method with the following code: +1. Also under **Program.cs**, add the following code: ```cs - static async Task Main() + namespace WebJobsSDKSample { - var builder = new HostBuilder(); - builder.ConfigureWebJobs(b => + class Program + { + static async Task Main() + { + var builder = new HostBuilder(); + builder.ConfigureWebJobs(b => { b.AddAzureStorageCoreServices(); }); - var host = builder.Build(); - using (host) - { - await host.RunAsync(); + var host = builder.Build(); + using (host) + { + await host.RunAsync(); + } + } } } ``` @@ -103,10 +109,10 @@ Set up console logging that uses the [ASP.NET Core logging framework](/aspnet/co 1. Get the latest stable version of the [`Microsoft.Extensions.Logging.Console` NuGet package](https://www.nuget.org/packages/Microsoft.Extensions.Logging.Console/), which includes `Microsoft.Extensions.Logging`. -2. In the following command, replace `<3_X_VERSION>` with the current version number you found in step 1. Each type of NuGet Package has a unique version number. +2. In the following command, replace `<6_X_VERSION>` with the current version number you found in step 1. Each type of NuGet Package has a unique version number. ```powershell - Install-Package Microsoft.Extensions.Logging.Console -version <3_X_VERSION> + Install-Package Microsoft.Extensions.Logging.Console -version <6_X_VERSION> ``` 3. In the **Package Manager Console**, fill in the current version number and execute the command. The extension list appears and automatically installs. @@ -166,22 +172,22 @@ Starting with version 3 of the WebJobs SDK, to connect to Azure Storage services >[!NOTE] > Beginning with 5.x, Microsoft.Azure.WebJobs.Extensions.Storage has been [split by storage service](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/storage/Microsoft.Azure.WebJobs.Extensions.Storage/CHANGELOG.md#major-changes-and-features) and has migrated the `AddAzureStorage()` extension method by service type. -1. Get the latest stable version of the [Microsoft.Azure.WebJobs.Extensions.Storage](https://www.nuget.org/packages/Microsoft.Azure.WebJobs.Extensions.Storage) NuGet package, version 3.x. +1. Get the latest stable version of the [Microsoft.Azure.WebJobs.Extensions.Storage](https://www.nuget.org/packages/Microsoft.Azure.WebJobs.Extensions.Storage) NuGet package, version 5.x. -1. In the following command, replace `<3_X_VERSION>` with the current version number you found in step 1. Each type of NuGet Package has a unique version number. +1. In the following command, replace `<5_X_VERSION>` with the current version number you found in step 1. Each type of NuGet Package has a unique version number. ```powershell - Install-Package Microsoft.Azure.WebJobs.Extensions.Storage -Version <3_X_VERSION> + Install-Package Microsoft.Azure.WebJobs.Extensions.Storage -Version <5_X_VERSION> ``` 1. In the **Package Manager Console**, execute the command with the current version number at the `PM>` entry point. -1. Continuing in **Program.cs**, in the `ConfigureWebJobs` extension method, add the `AddAzureStorage` method on the [`HostBuilder`](/dotnet/api/microsoft.extensions.hosting.hostbuilder) instance (before the `Build` command) to initialize the Storage extension. At this point, the `ConfigureWebJobs` method looks like this: +1. Continuing in **Program.cs**, in the `ConfigureWebJobs` extension method, add the `AddAzureStorageQueues` method on the [`HostBuilder`](/dotnet/api/microsoft.extensions.hosting.hostbuilder) instance (before the `Build` command) to initialize the Storage extension. At this point, the `ConfigureWebJobs` method looks like this: ```cs builder.ConfigureWebJobs(b => { b.AddAzureStorageCoreServices(); - b.AddAzureStorage(); + b.AddAzureStorageQueues(); }); ``` 1. Add the following code in the `Main` method after the `builder` is instantiated: @@ -206,7 +212,7 @@ Starting with version 3 of the WebJobs SDK, to connect to Azure Storage services builder.ConfigureWebJobs(b => { b.AddAzureStorageCoreServices(); - b.AddAzureStorage(); + b.AddAzureStorageQueues(); }); var host = builder.Build(); using (host) @@ -285,37 +291,29 @@ Because this file contains a connection string secret, you shouldn't store the f Build and run the project locally and create a message queue to trigger the function. -1. In **Cloud Explorer** in Visual Studio, expand the node for your new storage account, and then right-click **Queues**. - -1. Select **Create Queue**. - -1. Enter *queue* as the name for the queue, and then select **OK**. - - ![Screenshot that shows where you create the queue and name it "queue". ](./media/webjobs-sdk-get-started/create-queue.png) - -1. Right-click the node for the new queue, and then select **Open**. +1. In the Azure portal, navigate to your storage account and select the **Queues** tab (1). Select **+ Queue** (2) and enter **queue** as the Queue name (3). Then, select **OK** (4). -1. Select the **Add Message** icon. + ![This image shows how to create a new Azure Storage Queue.](./media/webjobs-sdk-get-started/create-queue-azure-storage.png "New Azure Storage Queue") - ![Screenshot that highlights the Add Message icon.](./media/webjobs-sdk-get-started/create-queue-message.png) +2. Click the new queue and select **Add message**. -1. In the **Add Message** dialog, enter *Hello World!* as the **Message text**, and then select **OK**. There is now a message in the queue. +3. In the **Add Message** dialog, enter *Hello World!* as the **Message text**, and then select **OK**. There is now a message in the queue. ![Create queue](./media/webjobs-sdk-get-started/hello-world-text.png) -1. Press **Ctrl+F5** to run the project. +4. Press **Ctrl+F5** to run the project. The console shows that the runtime found your function. Because you used the `QueueTrigger` attribute in the `ProcessQueueMessage` function, the WebJobs runtime listens for messages in the queue named `queue`. When it finds a new message in this queue, the runtime calls the function, passing in the message string value. -1. Go back to the **Queue** window and refresh it. The message is gone, since it has been processed by your function running locally. +5. Go back to the **Queue** window and refresh it. The message is gone, since it has been processed by your function running locally. -1. Close the console window. +6. Close the console window. It's now time to publish your WebJobs SDK project to Azure. ## Deploy to Azure -During deployment, you create an app service instance where you'll run your functions. When you publish a .NET Core console app to App Service in Azure, it automatically runs as a WebJob. To learn more about publishing, see [Develop and deploy WebJobs using Visual Studio](webjobs-dotnet-deploy-vs.md). +During deployment, you create an app service instance where you'll run your functions. When you publish a .NET console app to App Service in Azure, it automatically runs as a WebJob. To learn more about publishing, see [Develop and deploy WebJobs using Visual Studio](webjobs-dotnet-deploy-vs.md). ### Create Azure resources @@ -333,9 +331,12 @@ For a continuous WebJob, you should enable the Always on setting in the site so With the web app created in Azure, it's time to publish the WebJobs project. -1. In the **Publish** page under **Hosting**, select the edit button and change the **WebJob Type** to `Continuous` and select **Save**. This makes sure that the WebJob is running when messages are added to the queue. Triggered WebJobs are typically used only for manual webhooks. +1. In the **Publish** page under **Hosting**, select the edit button and change the **WebJob Type** to `Continuous` and select **Save**. This makes sure that the WebJob is running when messages are added to the queue. Triggered WebJobs are typically used only for manual webhooks. -1. Select the **Publish** button at the top right corner of the **Publish** page. When the operation completes, your WebJob is running on Azure. + ![Change WebJob type from the VS 2022 Publish window.](./media/webjobs-sdk-get-started/change-webjob-type.png) + + +2. Select the **Publish** button at the top right corner of the **Publish** page. When the operation completes, your WebJob is running on Azure. ### Create a storage connection app setting @@ -436,7 +437,7 @@ This initializes the Application Insights logging provider with default [filteri 1. In **Solution Explorer**, right-click the project and select **Publish**. -1. As before, use **Cloud Explorer** in Visual Studio to create a queue message like you did [earlier](#test-locally), except enter *Hello App Insights!* as the message text. +1. As before, use the Azure portal to create a queue message like you did [earlier](#test-locally), except enter *Hello App Insights!* as the message text. 1. In your **Publish** profile page, select the three dots above **Hosting** to show **Hosting profile section actions** and choose **Open in Azure Portal**. @@ -454,94 +455,72 @@ This initializes the Application Insights logging provider with default [filteri Bindings simplify code that reads and writes data. Input bindings simplify code that reads data. Output bindings simplify code that writes data. -### Add input binding +### Add bindings + +Input bindings simplify code that reads data. For this example, the queue message is the name of a blob, which you'll use to find and read a blob in Azure Storage. You will then use output bindings to write a copy of the file to the same container. + +1. In **Functions.cs**, add a `using`: -Input bindings simplify code that reads data. For this example, the queue message is the name of a blob, which you'll use to find and read a blob in Azure Storage. + ```cs + using System.IO; + ``` -1. In *Functions.cs*, replace the `ProcessQueueMessage` method with the following code: +2. Replace the `ProcessQueueMessage` method with the following code: ```cs public static void ProcessQueueMessage( [QueueTrigger("queue")] string message, [Blob("container/{queueTrigger}", FileAccess.Read)] Stream myBlob, + [Blob("container/copy-{queueTrigger}", FileAccess.Write)] Stream outputBlob, ILogger logger) { logger.LogInformation($"Blob name:{message} \n Size: {myBlob.Length} bytes"); + myBlob.CopyTo(outputBlob); } ``` - + In this code, `queueTrigger` is a [binding expression](../azure-functions/functions-bindings-expressions-patterns.md), which means it resolves to a different value at runtime. At runtime, it has the contents of the queue message. -1. Add a `using`: + This code uses output bindings to create a copy of the file identified by the queue message. The file copy is prefixed with *copy-*. - ```cs - using System.IO; - ``` +3. In **Program.cs**, in the `ConfigureWebJobs` extension method, add the `AddAzureStorageBlobs` method on the [`HostBuilder`](/dotnet/api/microsoft.extensions.hosting.hostbuilder) instance (before the `Build` command) to initialize the Storage extension. At this point, the `ConfigureWebJobs` method looks like this: + + ```cs + builder.ConfigureWebJobs(b => + { + b.AddAzureStorageCoreServices(); + b.AddAzureStorageQueues(); + b.AddAzureStorageBlobs(); + }); + ``` -1. Create a blob container in your storage account. +4. Create a blob container in your storage account. - a. In **Cloud Explorer** in Visual Studio, expand the node for your storage account, right-click **Blobs**, and then select **Create Blob Container**. + a. In the Azure portal, navigate to the **Containers** tab below **Data storage** and select **+ Container** - b. In the **Create Blob Container** dialog, enter *container* as the container name, and then select **OK**. + b. In the **New container** dialog, enter *container* as the container name, and then select **Create**. -1. Upload the *Program.cs* file to the blob container. (This file is used here as an example; you could upload any text file and create a queue message with the file's name.) +5. Upload the *Program.cs* file to the blob container. (This file is used here as an example; you could upload any text file and create a queue message with the file's name.) - a. In **Cloud Explorer**, double-click the node for the container you created. + a. Select the new container you created - b. In the **Container** window, select the **Upload** button. + b. Select the **Upload** button. ![Blob upload button](./media/webjobs-sdk-get-started/blob-upload-button.png) c. Find and select *Program.cs*, and then select **OK**. -1. Create a queue message in the queue you created earlier, with *Program.cs* as the text of the message. - - ![Queue message Program.cs](./media/webjobs-sdk-get-started/queue-msg-program-cs.png) - -1. Run the project locally. - - The queue message triggers the function, which then reads the blob and logs its length. The console output looks like this: - - ```console - Found the following functions: - ConsoleApp1.Functions.ProcessQueueMessage - Job host started - Executing 'Functions.ProcessQueueMessage' (Reason='New queue message detected on 'queue'.', Id=5a2ac479-de13-4f41-aae9-1361f291ff88) - Blob name:Program.cs - Size: 532 bytes - Executed 'Functions.ProcessQueueMessage' (Succeeded, Id=5a2ac479-de13-4f41-aae9-1361f291ff88) - ``` -### Add an output binding - -Output bindings simplify code that writes data. This example modifies the previous one by writing a copy of the blob instead of logging its size. Blob storage bindings are included in the Azure Storage extension package that we installed previously. - -1. Replace the `ProcessQueueMessage` method with the following code: - - ```cs - public static void ProcessQueueMessage( - [QueueTrigger("queue")] string message, - [Blob("container/{queueTrigger}", FileAccess.Read)] Stream myBlob, - [Blob("container/copy-{queueTrigger}", FileAccess.Write)] Stream outputBlob, - ILogger logger) - { - logger.LogInformation($"Blob name:{message} \n Size: {myBlob.Length} bytes"); - myBlob.CopyTo(outputBlob); - } - ``` - -1. Create another queue message with *Program.cs* as the text of the message. - -1. Run the project locally. - - The queue message triggers the function, which then reads the blob, logs its length, and creates a new blob. The console output is the same, but when you go to the blob container window and select **Refresh**, you see a new blob named *copy-Program.cs.* - ### Republish the project 1. In **Solution Explorer**, right-click the project and select **Publish**. 1. In the **Publish** dialog, make sure that the current profile is selected and then select **Publish**. Results of the publish are detailed in the **Output** window. -1. Verify the function in Azure by again uploading a file to the blob container and adding a message to the queue that is the name of the uploaded file. You see the message get removed from the queue and a copy of the file created in the blob container. +1. Create a queue message in the queue you created earlier, with *Program.cs* as the text of the message. + + ![Queue message Program.cs](./media/webjobs-sdk-get-started/queue-msg-program-cs.png) + +1. A copy of the file, *copy-Program.cs*, will appear in the blob container. ## Next steps diff --git a/articles/application-gateway/application-gateway-key-vault-common-errors.md b/articles/application-gateway/application-gateway-key-vault-common-errors.md index b6c6400143888..2de9e78be4651 100644 --- a/articles/application-gateway/application-gateway-key-vault-common-errors.md +++ b/articles/application-gateway/application-gateway-key-vault-common-errors.md @@ -12,16 +12,16 @@ ms.author: jaysoni # Common key vault errors in Azure Application Gateway -Application Gateway enables customers to securely store TLS certificates in Azure Key Vault. When using a Key Vault resource, it is important that the gateway always has access to the linked key vault. If your Application Gateway is unable to fetch the certificate, the associated HTTPS listeners will be placed in a disabled state. [Learn more](../application-gateway/disabled-listeners.md). +Application Gateway enables customers to securely store TLS certificates in Azure Key Vault. When using a key vault resource, it is important that the gateway always has access to the linked key vault. If your Application Gateway is unable to fetch the certificate, the associated HTTPS listeners will be placed in a disabled state. [Learn more](../application-gateway/disabled-listeners.md). -This article helps you understand the details of key vault error codes you might encounter, including what is causing these errors. This article also contains steps to resolve such misconfigurations. +This article helps you understand the details of the error codes and the steps to resolve such key vault misconfigurations. > [!TIP] > Use a secret identifier that doesn't specify a version. This way, Azure Application Gateway will automatically rotate the certificate, if a newer version is available in Azure Key Vault. An example of a secret URI without a version is: `https://myvault.vault.azure.net/secrets/mysecret/`. ## List of error codes and their details -The following sections cover various errors you might encounter. You can find the details in Azure Advisor, and use this troubleshooting article to fix the problems. For more information, see [Create Azure Advisor alerts on new recommendations by using the Azure portal](../advisor/advisor-alerts-portal.md). +The following sections describe the various errors you might encounter. You can verify if your gateway has any such problem by visting [**Azure Advisor**](./key-vault-certs.md#investigating-and-resolving-key-vault-errors) for your account, and use this troubleshooting article to fix the problem. We recommend configuring Azure Advisor alerts to stay informed when a key vault problem is detected for your gateway. > [!NOTE] > Azure Application Gateway generates logs for key vault diagnostics every four hours. If the diagnostic continues to show the error after you have fixed the configuration, you might have to wait for the logs to be refreshed. @@ -29,19 +29,37 @@ The following sections cover various errors you might encounter. You can find th [comment]: # (Error Code 1) ### Error code: UserAssignedIdentityDoesNotHaveGetPermissionOnKeyVault -**Description:** The associated user-assigned managed identity doesn't have the "Get" permission. +**Description:** The associated user-assigned managed identity doesn't have the required permission. -**Resolution:** Configure the access policy of Key Vault to grant the user-assigned managed identity this permission on secrets. -1. Go to the linked key vault in the Azure portal. -1. Open the **Access policies** pane. -1. For **Permission model**, select **Vault access policy**. -1. Under **Secret Management Operations**, select the **Get** permission. -1. Select **Save**. +**Resolution:** Configure the access policies of your key vault to grant the user-assigned managed identity permission on secrets. You may do so in any of the following ways: + + **Vault access policy** + 1. Go to the linked key vault in the Azure portal. + 1. Open the **Access policies** blade. + 1. For **Permission model**, select **Vault access policy**. + 1. Under **Secret Management Operations**, select the **Get** permission. + 1. Select **Save**. :::image type="content" source="./media/application-gateway-key-vault-common-errors/no-get-permssion-for-managed-identity.png " alt-text=" Screenshot that shows how to resolve the Get permission error."::: For more information, see [Assign a Key Vault access policy by using the Azure portal](../key-vault/general/assign-access-policy-portal.md). + **Azure role-based access control** + 1. Go to the linked key vault in the Azure portal. + 1. Open the **Access policies** blade. + 1. For **Permission model**, select **Azure role-based access control**. + 1. After this, navigate to **Access Control (IAM)** blade to configure permissions. + 1. **Add role assignment** for your managed identity by choosing the following
        + a. **Role**: Key Vault Secrets User
        + b. **Assign access to**: Managed identity
        + c. **Members**: select the user-assigned managed identity which you've associated with your application gateway.
        + 1. Select **Review + assign**. + +For more information, see [Azure role-based access control in Key Vault](../key-vault/general/rbac-guide.md). + +> [!NOTE] +> Portal support for adding a new key vault-based certificate is currently not available when using **Azure role-based access control**. You can accomplish it by using ARM template, CLI, or PowerShell. Visit [this page](./key-vault-certs.md#key-vault-azure-role-based-access-control-permission-model) for guidance. + [comment]: # (Error Code 2) ### Error code: SecretDisabled @@ -74,12 +92,9 @@ On the other hand, if a certificate object is permanently deleted, you will need **Description:** The associated user-assigned managed identity has been deleted. -**Resolution:** To use the identity again: -1. Re-create a managed identity with the same name that was used previously, and under the same resource group. Resource activity logs contain more details. -1. After you create the identity, go to **Application Gateway - Access Control (IAM)**. Assign the identity the **Reader** role, at a minimum. -1. Finally, go to the desired Key Vault resource, and set its access policies to grant **Get** secret permissions for this new managed identity. - -For more information, see [How integration works](./key-vault-certs.md#how-integration-works). +**Resolution:** Create a new managed identity and use it with the key vault. +1. Re-create a managed identity with the same name that was previously used, and under the same resource group. (**TIP**: Refer to resource Activity Logs for naming details). +1. Go to the desired key vault resource, and set its access policies to grant this new managed identity the required permission. You can follow the same steps as mentioned under [UserAssignedIdentityDoesNotHaveGetPermissionOnKeyVault](./application-gateway-key-vault-common-errors.md#error-code-userassignedidentitydoesnothavegetpermissiononkeyvault). [comment]: # (Error Code 5) ### Error code: KeyVaultHasRestrictedAccess @@ -117,5 +132,6 @@ Select **Managed deleted vaults**. From here, you can find the deleted Key Vault These troubleshooting articles might be helpful as you continue to use Application Gateway: +- [Understanding and fixing disabled listeners](disabled-listeners.md) - [Azure Application Gateway Resource Health overview](resource-health-overview.md) -- [Troubleshoot Azure Application Gateway session affinity issues](how-to-troubleshoot-application-gateway-session-affinity-issues.md) + diff --git a/articles/application-gateway/application-gateway-websocket.md b/articles/application-gateway/application-gateway-websocket.md index 29cc7e1024826..56d573e95a591 100644 --- a/articles/application-gateway/application-gateway-websocket.md +++ b/articles/application-gateway/application-gateway-websocket.md @@ -25,6 +25,9 @@ To establish a WebSocket connection, a specific HTTP-based handshake is exchange ![Diagram compares a client interacting with a web server, connecting twice to get two replies, with a WebSocket interaction, where a client connects to a server once to get multiple replies.](./media/application-gateway-websocket/websocket.png) +> [!NOTE] +> As described, the HTTP protocol is used only to perform a handshake when establishing a WebSocket connection. Once the handshake is completed, a WebSocket connection gets opened for transmitting the data, and the Web Application Firewall (WAF) cannot parse any contents. Therefore, WAF does not perform any inspections on such data. + ### Listener configuration element An existing HTTP listener can be used to support WebSocket traffic. The following is a snippet of an httpListeners element from a sample template file. You would need both HTTP and HTTPS listeners to support WebSocket and secure WebSocket traffic. Similarly you can use the portal or Azure PowerShell to create an application gateway with listeners on port 80/443 to support WebSocket traffic. @@ -118,4 +121,4 @@ Another reason for this is that application gateway backend health probe support ## Next steps -After learning about WebSocket support, go to [create an application gateway](quick-create-powershell.md) to get started with a WebSocket enabled web application. \ No newline at end of file +After learning about WebSocket support, go to [create an application gateway](quick-create-powershell.md) to get started with a WebSocket enabled web application. diff --git a/articles/application-gateway/configuration-infrastructure.md b/articles/application-gateway/configuration-infrastructure.md index ac73f013be498..8a8bd2f7e383b 100644 --- a/articles/application-gateway/configuration-infrastructure.md +++ b/articles/application-gateway/configuration-infrastructure.md @@ -15,7 +15,7 @@ The application gateway infrastructure includes the virtual network, subnets, ne ## Virtual network and dedicated subnet -An application gateway is a dedicated deployment in your virtual network. Within your virtual network, a dedicated subnet is required for the application gateway. You can have multiple instances of a given application gateway deployment in a subnet. You can also deploy other application gateways in the subnet. But you can't deploy any other resource in the application gateway subnet. You can't mix Standard_v2 and Standard Azure Application Gateway on the same subnet. +An application gateway is a dedicated deployment in your virtual network. Within your virtual network, a dedicated subnet is required for the application gateway. You can have multiple instances of a given application gateway deployment in a subnet. You can also deploy other application gateways in the subnet. But you can't deploy any other resource in the application gateway subnet. You can't mix v1 and v2 Azure Application Gateway SKUs on the same subnet. > [!NOTE] > [Virtual network service endpoint policies](../virtual-network/virtual-network-service-endpoint-policies-overview.md) are currently not supported in an Application Gateway subnet. diff --git a/articles/application-gateway/http-response-codes.md b/articles/application-gateway/http-response-codes.md index 9e4613fa0cf95..425cb3e069ba8 100644 --- a/articles/application-gateway/http-response-codes.md +++ b/articles/application-gateway/http-response-codes.md @@ -83,13 +83,13 @@ An HTTP 499 response is presented if a client request that is sent to applicatio #### 500 – Internal Server Error -Azure Application Gateway shouldn't exhibit 500 response codes. Please open a support request if you see this code, because this issue is an internal error to the service. For information on how to open a support case, see [Create an Azure support request](/azure/azure-portal/supportability/how-to-create-azure-support-request). +Azure Application Gateway shouldn't exhibit 500 response codes. Please open a support request if you see this code, because this issue is an internal error to the service. For information on how to open a support case, see [Create an Azure support request](../azure-portal/supportability/how-to-create-azure-support-request.md). #### 502 – Bad Gateway HTTP 502 errors can have several root causes, for example: - NSG, UDR, or custom DNS is blocking access to backend pool members. -- Back-end VMs or instances of [virtual machine scale sets](/azure/virtual-machine-scale-sets/overview) aren't responding to the default health probe. +- Back-end VMs or instances of [virtual machine scale sets](../virtual-machine-scale-sets/overview.md) aren't responding to the default health probe. - Invalid or improper configuration of custom health probes. - Azure Application Gateway's [back-end pool isn't configured or empty](application-gateway-troubleshooting-502.md#empty-backendaddresspool). - None of the VMs or instances in [virtual machine scale set are healthy](application-gateway-troubleshooting-502.md#unhealthy-instances-in-backendaddresspool). @@ -103,4 +103,4 @@ HTTP 504 errors are presented if a request is sent to application gateways using ## Next steps -If the information in this article doesn't help to resolve the issue, [submit a support ticket](https://azure.microsoft.com/support/options/). +If the information in this article doesn't help to resolve the issue, [submit a support ticket](https://azure.microsoft.com/support/options/). \ No newline at end of file diff --git a/articles/application-gateway/key-vault-certs.md b/articles/application-gateway/key-vault-certs.md index a57e50ee7f8b5..0be672b86e06c 100644 --- a/articles/application-gateway/key-vault-certs.md +++ b/articles/application-gateway/key-vault-certs.md @@ -78,7 +78,7 @@ As of March 15, 2021, Key Vault recognizes Application Gateway as a trusted serv When you're using a restricted Key Vault, use the following steps to configure Application Gateway to use firewalls and virtual networks: > [!TIP] -> The following steps are not required if your Key Vault has a Private Endpoint enabled. The application gateway can access the Key Vault using the private IP address. +> Steps 1-3 are not required if your Key Vault has a Private Endpoint enabled. The application gateway can access the Key Vault using the private IP address. 1. In the Azure portal, in your Key Vault, select **Networking**. 1. On the **Firewalls and virtual networks** tab, select **Selected networks**. diff --git a/articles/application-gateway/tutorial-ssl-cli.md b/articles/application-gateway/tutorial-ssl-cli.md index 2929f5e7fa5e1..6a588f2352cfc 100644 --- a/articles/application-gateway/tutorial-ssl-cli.md +++ b/articles/application-gateway/tutorial-ssl-cli.md @@ -102,6 +102,7 @@ az network application-gateway create \ --frontend-port 443 \ --http-settings-port 80 \ --http-settings-protocol Http \ + --priority "1" \ --public-ip-address myAGPublicIPAddress \ --cert-file appgwcert.pfx \ --cert-password "Azure123456!" diff --git a/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md b/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md index d10922ecc3df4..be52105a10d15 100644 --- a/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md +++ b/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: how-to -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -15,11 +15,11 @@ recommendations: false # Compose custom models v3.0 | Preview > [!NOTE] -> This how-to guide references Form Recognizer v3.0 (preview). To use Form Recognizer v2.1 (GA), see [Compose custom models v2.1.](compose-custom-models.md). +> This how-to guide references Form Recognizer v3.0 (preview). To use Form Recognizer v2.1 (GA), see [Compose custom models v2.1](compose-custom-models.md). -A composed model is created by taking a collection of custom models and assigning them to a single model built from your form types. You can assign up to 100 trained custom models to a single composed model. When analyze documents with a composed model, Form Recognizer will first classify the form you submitted, then choose the best matching assigned model, and return results the results. +A composed model is created by taking a collection of custom models and assigning them to a single model ID. You can assign up to 100 trained custom models to a single composed model ID. When a document is submitted to a composed model, the service performs a classification step to decide which custom model accurately represents the form presented for analysis. Composed models are useful when you've trained several models and want to group them to analyze similar form types. For example, your composed model might include custom models trained to analyze your supply, equipment, and furniture purchase orders. Instead of manually trying to select the appropriate model, you can use a composed model to determine the appropriate custom model for each analysis and extraction. -To learn more, see [Composed custom models](concept-composed-models.md) +To learn more, see [Composed custom models](concept-composed-models.md). In this article, you'll learn how to create and use composed custom models to analyze your forms and documents. @@ -27,7 +27,7 @@ In this article, you'll learn how to create and use composed custom models to an To get started, you'll need the following resources: -* **An Azure subscription**. You can [create a free Azure subscription](https://azure.microsoft.com/free/cognitive-services/) +* **An Azure subscription**. You can [create a free Azure subscription](https://azure.microsoft.com/free/cognitive-services/). * **A Form Recognizer instance**. Once you have your Azure subscription, [create a Form Recognizer resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) in the Azure portal to get your key and endpoint. If you have an existing Form Recognizer resource, navigate directly to your resource page. You can use the free pricing tier (F0) to try the service, and upgrade later to a paid tier for production. @@ -44,7 +44,7 @@ To get started, you'll need the following resources: ## Create your custom models -First, you'll need to a set of custom models to compose. You can use the Form Recognizer Studio, REST API, or client-library SDKs. The steps are as follows: +First, you'll need a set of custom models to compose. You can use the Form Recognizer Studio, REST API, or client-library SDKs. The steps are as follows: * [**Assemble your training dataset**](#assemble-your-training-dataset) * [**Upload your training set to Azure blob storage**](#upload-your-training-dataset) @@ -74,13 +74,13 @@ If you want to use manually labeled data, you'll also have to upload the *.label When you [train your model](https://formrecognizer.appliedai.azure.com/studio/custommodel/projects) with labeled data, the model uses supervised learning to extract values of interest, using the labeled forms you provide. Labeled data results in better-performing models and can produce models that work with complex forms or forms containing values without keys. -Form Recognizer uses the [prebuilt-layout model](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) API to learn the expected sizes and positions of printed and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started with training a new model. Then, add more labeled data, as needed, to improve the model accuracy. Form Recognizer enables training a model to extract key-value pairs and tables using supervised learning capabilities. +Form Recognizer uses the [prebuilt-layout model](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) API to learn the expected sizes and positions of typeface and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started with training a new model. Then, add more labeled data, as needed, to improve the model accuracy. Form Recognizer enables training a model to extract key-value pairs and tables using supervised learning capabilities. ### [Form Recognizer Studio](#tab/studio) -To create custom models, you start with configuring your project: +To create custom models, start with configuring your project: -1. From the Studio home, select the [Custom form project](https://formrecognizer.appliedai.azure.com/studio/customform/projects) to open the Custom form home page. +1. From the Studio homepage, select [**Create new**](https://formrecognizer.appliedai.azure.com/studio/custommodel/projects) from the Custom model card. 1. Use the ➕ **Create a project** command to start the new project configuration wizard. @@ -100,7 +100,7 @@ See [Form Recognizer Studio: labeling as tables](quickstarts/try-v3-form-recogni ### [REST API](#tab/rest) -Training with labels leads to better performance in some scenarios. To train with labels, you need to have special label information files (*\.pdf.labels.json*) in your blob storage container alongside the training documents. +Training with labels leads to better performance in some scenarios. To train with labels, you need to have special label information files (*\.pdf.labels.json*) in your blob storage container alongside the training documents. Label files contain key-value associations that a user has entered manually. They're needed for labeled data training, but not every source file needs to have a corresponding label file. Source files without labels will be treated as ordinary training documents. We recommend five or more labeled files for reliable training. You can use a UI tool like [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/customform/projects) to generate these files. @@ -115,7 +115,7 @@ Training with labels leads to better performance in some scenarios. To train wit |Language |Method| |--|--| |**C#**|[**StartBuildModel**](/dotnet/api/azure.ai.formrecognizer.documentanalysis.documentmodeladministrationclient.startbuildmodel?view=azure-dotnet-preview#azure-ai-formrecognizer-documentanalysis-documentmodeladministrationclient-startbuildmodel&preserve-view=true)| -|**Java**| [**beginBuildModel**](/java/api/com.azure.ai.formrecognizer.administration.documentmodeladministrationclient.beginbuildmodel?view=azure-java-preview&preserve-view=true)| +|**Java**| [**beginBuildModel**](/java/api/com.azure.ai.formrecognizer.administration.documentmodeladministrationclient.beginbuildmodel?view=azure-java-preview&preserve-view=true)| |**JavaScript** | [**beginBuildModel**](/javascript/api/@azure/ai-form-recognizer/documentmodeladministrationclient?view=azure-node-preview#@azure-ai-form-recognizer-documentmodeladministrationclient-beginbuildmodel&preserve-view=true)| | **Python** | [**begin_build_model**](/python/api/azure-ai-formrecognizer/azure.ai.formrecognizer.aio.documentmodeladministrationclient?view=azure-python-preview#azure-ai-formrecognizer-aio-documentmodeladministrationclient-begin-build-model&preserve-view=true) @@ -159,8 +159,6 @@ When you train models using the [**Form Recognizer Studio**](https://formrecogni 1. Once the model is ready, use the **Test** command to validate it with your test documents and observe the results. - - #### Analyze documents The custom model **Analyze** operation requires you to provide the `modelID` in the call to Form Recognizer. You should provide the composed model ID for the `modelID` parameter in your applications. @@ -170,7 +168,7 @@ The custom model **Analyze** operation requires you to provide the `modelID` in #### Manage your composed models You can manage your custom models throughout life cycles: - + * Test and validate new documents. * Download your model to use in your applications. * Delete your model when its lifecycle is complete. @@ -194,7 +192,7 @@ The [compose model API](https://westus.dev.cognitive.microsoft.com/docs/services #### Analyze documents -You can make an [**Analyze document**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) request using a unique model name in the request parameters. +To make an [**Analyze document**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) request, use a unique model name in the request parameters. :::image type="content" source="media/custom-model-analyze-request.png" alt-text="Screenshot of a custom model request URL."::: @@ -223,7 +221,7 @@ You can use the programming language of your choice to create a composed model: #### Analyze documents -Once you have built your composed model, it can be used to analyze forms and documents You can use your composed `model ID` and let the service decide which of your aggregated custom models fits best according to the document provided. +Once you've built your composed model, you can use it to analyze forms and documents. Use your composed `model ID` and let the service decide which of your aggregated custom models fits best according to the document provided. |Programming language| Code sample | |--|--| @@ -234,7 +232,7 @@ Once you have built your composed model, it can be used to analyze forms and doc ## Manage your composed models -Custom models can be managed throughout their lifecycle. You can view a list of all custom models under your subscription, retrieve information about a specific custom model, and delete custom models from your account. +You can manage a custom models at each stage in its life cycles. You can view a list of all custom models under your subscription, retrieve information about a specific custom model, and delete custom models from your account. |Programming language| Code sample | |--|--| @@ -247,7 +245,7 @@ Custom models can be managed throughout their lifecycle. You can view a list of ## Next steps -Try one of our quickstarts to get started using Form Recognizer preview +Try one of our Form Recognizer quickstarts: > [!div class="nextstepaction"] > [Form Recognizer Studio](quickstarts/try-v3-form-recognizer-studio.md) diff --git a/articles/applied-ai-services/form-recognizer/compose-custom-models.md b/articles/applied-ai-services/form-recognizer/compose-custom-models.md index 2982ab482a603..9656fa1eccb73 100644 --- a/articles/applied-ai-services/form-recognizer/compose-custom-models.md +++ b/articles/applied-ai-services/form-recognizer/compose-custom-models.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: how-to -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -27,7 +27,7 @@ In this article, you'll learn how to create Form Recognizer custom and composed ## Sample Labeling tool -You can see how data is extracted from custom forms by trying our Sample Labeling tool. You'll need the following resources: +Try extracting data from custom forms using our Sample Labeling tool. You'll need the following resources: * An Azure subscription—you can [create one for free](https://azure.microsoft.com/free/cognitive-services/) @@ -41,12 +41,12 @@ You can see how data is extracted from custom forms by trying our Sample Labelin In the Form Recognizer UI: 1. Select **Use Custom to train a model with labels and get key value pairs**. - - :::image type="content" source="media/label-tool/fott-use-custom.png" alt-text="Screenshot: FOTT tool select custom option."::: + + :::image type="content" source="media/label-tool/fott-use-custom.png" alt-text="Screenshot of the FOTT tool select custom model option."::: 1. In the next window, select **New project**: - :::image type="content" source="media/label-tool/fott-new-project.png" alt-text="Screenshot: FOTT tool select new project."::: + :::image type="content" source="media/label-tool/fott-new-project.png" alt-text="Screenshot of the FOTT tool select new project option."::: ## Create your models @@ -74,7 +74,7 @@ You [train your model](./quickstarts/try-sdk-rest-api.md#train-a-custom-model) When you train with labeled data, the model uses supervised learning to extract values of interest, using the labeled forms you provide. Labeled data results in better-performing models and can produce models that work with complex forms or forms containing values without keys. -Form Recognizer uses the [Layout](concept-layout.md) API to learn the expected sizes and positions of printed and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started when training a new model and add more labeled data as needed to improve the model accuracy. Form Recognizer enables training a model to extract key value pairs and tables using supervised learning capabilities. +Form Recognizer uses the [Layout](concept-layout.md) API to learn the expected sizes and positions of typeface and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started when training a new model. Add more labeled data as needed to improve the model accuracy. Form Recognizer enables training a model to extract key value pairs and tables using supervised learning capabilities. [Get started with Train with labels](label-tool.md) @@ -104,7 +104,7 @@ When you train models using the [**Form Recognizer Sample Labeling tool**](https ### [**REST API**](#tab/rest-api) -The [**REST API**](./quickstarts/try-sdk-rest-api.md?pivots=programming-language-rest-api#train-a-custom-model), will return a `201 (Success)` response with a **Location** header. The value of the last parameter in this header is the model ID for the newly trained model: +The [**REST API**](./quickstarts/try-sdk-rest-api.md?pivots=programming-language-rest-api#train-a-custom-model) will return a `201 (Success)` response with a **Location** header. The value of the last parameter in this header is the model ID for the newly trained model: :::image type="content" source="media/model-id.png" alt-text="Screenshot: the returned location header containing the model ID."::: @@ -124,7 +124,7 @@ The [**REST API**](./quickstarts/try-sdk-rest-api.md?pivots=programming-language #### Compose your custom models -After you have gathered your custom models corresponding to a single form type, you can compose them into a single model. +After you've gathered your custom models corresponding to a single form type, you can compose them into a single model. ### [**Form Recognizer Sample Labeling tool**](#tab/fott) @@ -132,7 +132,7 @@ The **Sample Labeling tool** enables you to quickly get started training models After you have completed training, compose your models as follows: -1. On the left rail menu, select the **Model Compose icon** (merging arrow). +1. On the left rail menu, select the **Model Compose** icon (merging arrow). 1. In the main window, select the models you wish to assign to a single model ID. Models with the arrows icon are already composed models. @@ -142,7 +142,7 @@ After you have completed training, compose your models as follows: When the operation completes, your newly composed model will appear in the list. - :::image type="content" source="media/custom-model-compose.png" alt-text="Screenshot: model compose window." lightbox="media/custom-model-compose-expanded.png"::: + :::image type="content" source="media/custom-model-compose.png" alt-text="Screenshot of the model compose window." lightbox="media/custom-model-compose-expanded.png"::: ### [**REST API**](#tab/rest-api) @@ -168,7 +168,7 @@ Use the programming language code of your choice to create a composed model that ### [**Form Recognizer Sample Labeling tool**](#tab/fott) -1. On the tool's left-pane menu, select the **Analyze icon** (lightbulb). +1. On the tool's left-pane menu, select the **Analyze icon** (light bulb). 1. Choose a local file or image URL to analyze. @@ -196,13 +196,13 @@ Using the programming language of your choice to analyze a form or document with --- -Test your newly trained models by [analyzing forms](./quickstarts/try-sdk-rest-api.md#analyze-forms-with-a-custom-model) that were not part of the training dataset. Depending on the reported accuracy, you may want to do further training to improve the model. You can continue further training to [improve results](label-tool.md#improve-results). +Test your newly trained models by [analyzing forms](./quickstarts/try-sdk-rest-api.md#analyze-forms-with-a-custom-model) that weren't part of the training dataset. Depending on the reported accuracy, you may want to do further training to improve the model. You can continue further training to [improve results](label-tool.md#improve-results). ## Manage your custom models You can [manage your custom models](./quickstarts/try-sdk-rest-api.md#manage-custom-models) throughout their lifecycle by viewing a [list of all custom models](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/GetCustomModels) under your subscription, retrieving information about [a specific custom model](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/GetCustomModel), and [deleting custom models](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/DeleteCustomModel) from your account. -Great! You have learned the steps to create custom and composed models and use them in your Form Recognizer projects and applications. +Great! You've learned the steps to create custom and composed models and use them in your Form Recognizer projects and applications. ## Next steps diff --git a/articles/applied-ai-services/form-recognizer/concept-business-card.md b/articles/applied-ai-services/form-recognizer/concept-business-card.md index e494cd436c226..915de06124105 100644 --- a/articles/applied-ai-services/form-recognizer/concept-business-card.md +++ b/articles/applied-ai-services/form-recognizer/concept-business-card.md @@ -1,16 +1,15 @@ --- title: Form Recognizer business card model titleSuffix: Azure Applied AI Services -description: Concepts encompassing data extraction and analysis using prebuilt business card model +description: Concepts related to data extraction and analysis using the prebuilt business card model. author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/11/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false -ms.custom: ignite-fall-2021 --- @@ -34,7 +33,7 @@ The following tools are supported by Form Recognizer v3.0: | Feature | Resources | Model ID | |----------|-------------|-----------| -|**Business card model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-businessCard**| +|**Business card model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-businessCard**| ### Try Form Recognizer @@ -96,7 +95,7 @@ You'll need a business card document. You can use our [sample business card docu | Model | Language—Locale code | Default | |--------|:----------------------|:---------| -|Business card|
        • English (United States)—en-US
        • English (Australia)—en-AU
        • English (Canada)—en-CA
        • English (United Kingdom)—en-GB
        • English (India)—en-IN
        | Autodetected | +|Business card|
        • English (United States)—en-US
        • English (Australia)—en-AU
        • English (Canada)—en-CA
        • English (United Kingdom)—en-GB
        • English (India)—en-IN
        • English (Japan)—en-JP
        • Japanese (Japan)—ja-JP
        | Autodetected (en-US or ja-JP) | ## Field extraction diff --git a/articles/applied-ai-services/form-recognizer/concept-composed-models.md b/articles/applied-ai-services/form-recognizer/concept-composed-models.md index a0078547b6157..29b3d5a3dabff 100644 --- a/articles/applied-ai-services/form-recognizer/concept-composed-models.md +++ b/articles/applied-ai-services/form-recognizer/concept-composed-models.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/25/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -18,7 +18,7 @@ recommendations: false With composed models, you can assign multiple custom models to a composed model called with a single model ID. It's useful when you've trained several models and want to group them to analyze similar form types. For example, your composed model might include custom models trained to analyze your supply, equipment, and furniture purchase orders. Instead of manually trying to select the appropriate model, you can use a composed model to determine the appropriate custom model for each analysis and extraction. -* ```Custom form```and ```Custom document``` models can be composed together into a single composed model when they're trained with the same API version or an API version later than ```2021-01-30-preview```. For more information on composing custom template and custom neural models, see [compose model limits](#compose-model-limits). +* ```Custom form```and ```Custom document``` models can be composed together into a single composed model when they're trained with the same API version or an API version later than ```2021-06-30-preview```. For more information on composing custom template and custom neural models, see [compose model limits](#compose-model-limits). * With the model compose operation, you can assign up to 100 trained custom models to a single composed model. To analyze a document with a composed model, Form Recognizer first classifies the submitted form, chooses the best-matching assigned model, and returns results. * For **_custom template models_**, the composed model can be created using variations of a custom template or different form types. This operation is useful when incoming forms may belong to one of several templates. * The response will include a ```docType``` property to indicate which of the composed models was used to analyze the document. @@ -30,10 +30,10 @@ With composed models, you can assign multiple custom models to a composed model ### Composed model compatibility - |Custom model type | API Version |Custom form 2021-01-30-preview (v3.0)| Custom document 2021-01-30-preview(v3.0) | Custom form GA version (v2.1) or earlier| + |Custom model type | API Version |Custom form 2021-06-30-preview (v3.0)| Custom document 2021-06-30-preview(v3.0) | Custom form GA version (v2.1) or earlier| |--|--|--|--|--| -|**Custom template** (updated custom form)| 2021-01-30-preview | ✱| ✓ | X | -|**Custom neural**| trained with current API version (2021-01-30-preview) |✓ |✓ | X | +|**Custom template** (updated custom form)| 2021-06-30-preview | ✱| ✓ | X | +|**Custom neural**| trained with current API version (2021-06-30-preview) |✓ |✓ | X | |**Custom form**| Custom form GA version (v2.1) or earlier | X | X| ✓| **Table symbols**: ✔—supported; **X—not supported; ✱—unsupported for this API version, but will be supported in a future API version. diff --git a/articles/applied-ai-services/form-recognizer/concept-custom-neural.md b/articles/applied-ai-services/form-recognizer/concept-custom-neural.md index b1be49897c58a..c87daf443fc1f 100644 --- a/articles/applied-ai-services/form-recognizer/concept-custom-neural.md +++ b/articles/applied-ai-services/form-recognizer/concept-custom-neural.md @@ -1,13 +1,13 @@ --- title: Form Recognizer custom neural model titleSuffix: Azure Applied AI Services -description: Learn about custom neural (neural) model type, its features and how you train a model with high accuracy to extract data from structured and unstructured documents +description: Learn about custom neural (neural) model type, its features and how you train a model with high accuracy to extract data from structured and unstructured documents. author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: lajanuar ms.custom: references_regions recommendations: false @@ -23,19 +23,34 @@ Custom neural models or neural models are a deep learned model that combines lay |semi-structured | invoices, purchase orders | |unstructured | contracts, letters| -Custom neural models share the same labeling format and strategy as custom template models. Currently custom neural models only support a subset of the field types supported by custom template models. +Custom neural models share the same labeling format and strategy as [custom template](concept-custom-template.md) models. Currently custom neural models only support a subset of the field types supported by custom template models. ## Model capabilities Custom neural models currently only support key-value pairs and selection marks, future releases will include support for structured fields (tables) and signature. -| Form fields | Selection marks | Tables | Signature | Region | -|--|--|--|--|--| -| Supported| Supported | Unsupported | Unsupported | Unsupported | +| Form fields | Selection marks | Tabular fields | Signature | Region | +|:--:|:--:|:--:|:--:|:--:| +| Supported | Supported | Supported | Unsupported | Unsupported | + +## Tabular fields + +With the release of API version **2022-06-30-preview**, custom neural models will support tabular fields (tables): + +* Models trained with API version 2022-06-30-preview or later will accept tabular field labels. +* Documents analyzed with custom neural models using API version 2022-06-30-preview or later will produce tabular fields aggregated across the tables. +* The results can be found in the ```analyzeResult``` object's ```documents``` array that is returned following an analysis operation. + +Tabular fields support **cross page tables** by default: + +* To label a table that spans multiple pages, label each row of the table across the different pages in a single table. +* As a best practice, ensure that your dataset contains a few samples of the expected variations. For example, include samples where the entire table is on a single page and where tables span two or more pages. + +Tabular fields are also useful when extracting repeating information within a document that isn't recognized as a table. For example, a repeating section of work experiences in a resume can be labeled and extracted as a tabular field. ## Supported regions -In public preview custom neural models can only be trained in select Azure regions. +For the **2022-06-30-preview**, custom neural models can only be trained in the following Azure regions: * AustraliaEast * BrazilSouth @@ -57,13 +72,14 @@ In public preview custom neural models can only be trained in select Azure regio * WestUS2 * WestUS3 -You can copy a model trained in one of the regions listed above to any other region for use. +> [!TIP] +> You can copy a model trained in one of the select regions listed above to **any other region** and use it accordingly. ## Best practices -Custom neural models differ from custom template models in a few different ways. +Custom neural models differ from custom template models in a few different ways. The custom template or model relies on a consistent visual template to extract the labeled data. Custom neural models support structured, semi-structured, and unstructured documents to extract fields. When you're choosing between the two model types, start with a neural model and test to determine if it supports your functional needs. -### Dealing with variations +### Dealing with variations Custom neural models can generalize across different formats of a single document type. As a best practice, create a single model for all variations of a document type. Add at least five labeled samples for each of the different variations to the training dataset. @@ -96,12 +112,12 @@ Custom neural models are only available in the [v3 API](v3-migration-guide.md). | Document Type | REST API | SDK | Label and Test Models| |--|--|--|--| -| Custom document | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio) +| Custom document | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio) The build operation to train model supports a new ```buildMode``` property, to train a custom neural model, set the ```buildMode``` to ```neural```. ```REST -https://{endpoint}/formrecognizer/documentModels:build?api-version=2022-01-30-preview +https://{endpoint}/formrecognizer/documentModels:build?api-version=2022-06-30 { "modelId": "string", diff --git a/articles/applied-ai-services/form-recognizer/concept-custom-template.md b/articles/applied-ai-services/form-recognizer/concept-custom-template.md index 23d5d9fc5912c..e226c78b3aeff 100644 --- a/articles/applied-ai-services/form-recognizer/concept-custom-template.md +++ b/articles/applied-ai-services/form-recognizer/concept-custom-template.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -20,15 +20,24 @@ Custom template models share the same labeling format and strategy as custom neu ## Model capabilities -Custom template models support key-value pairs, selection marks, tables, signature fields, and selected regions. +Custom template models support key-value pairs, selection marks, tables, signature fields, and selected regions. -| Form fields | Selection marks | Structured fields (Tables) | Signature | Selected regions | -|--|--|--|--|--| +| Form fields | Selection marks | Tabular fields (Tables) | Signature | Selected regions | +|:--:|:--:|:--:|:--:|:--:| | Supported| Supported | Supported | Preview | Supported | -## Dealing with variations +## Tabular fields -Template models rely on a defined visual template, changes to the template will result in lower accuracy. In those instances, split your training dataset to include at least five samples of each template and train a model for each of the variations. You can then [compose](concept-composed-models.md) the models into a single endpoint. When dealing with subtle variations, like digital PDF documents and images, it's best to include at least five examples of each type in the same training dataset. +With the release of API version **2022-06-30-preview**, custom template models will add support for **cross page** tabular fields (tables): + +* To label a table that spans multiple pages, label each row of the table across the different pages in a single table. +* As a best practice, ensure that your dataset contains a few samples of the expected variations. For example, include samples where the entire table is on a single page and where tables span two or more pages if you expect to see those variations in documents. + +Tabular fields are also useful when extracting repeating information within a document that isn't recognized as a table. For example, a repeating section of work experiences in a resume can be labeled and extracted as a tabular field. + +## Dealing with variations + +Template models rely on a defined visual template, changes to the template will result in lower accuracy. In those instances, split your training dataset to include at least five samples of each template and train a model for each of the variations. You can then [compose](concept-composed-models.md) the models into a single endpoint. For subtle variations, like digital PDF documents and images, it's best to include at least five examples of each type in the same training dataset. ## Training a model @@ -36,13 +45,13 @@ Template models are available generally [v2.1 API](https://westus.dev.cognitive. | Model | REST API | SDK | Label and Test Models| |--|--|--|--| -| Custom template (preview) | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio)| +| Custom template (preview) | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio)| | Custom template | [Form Recognizer 2.1 (GA)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/AnalyzeWithCustomForm)| [Form Recognizer SDK](quickstarts/get-started-sdk-rest-api.md?pivots=programming-language-python)| [Form Recognizer Sample labeling tool](https://fott-2-1.azurewebsites.net/)| On the v3 API, the build operation to train model supports a new ```buildMode``` property, to train a custom template model, set the ```buildMode``` to ```template```. ```REST -https://{endpoint}/formrecognizer/documentModels:build?api-version=2022-01-30-preview +https://{endpoint}/formrecognizer/documentModels:build?api-version=2022-06-30 { "modelId": "string", @@ -72,4 +81,4 @@ https://{endpoint}/formrecognizer/documentModels:build?api-version=2022-01-30-pr * View the REST API: > [!div class="nextstepaction"] - > [Form Recognizer API v2.1](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/AnalyzeWithCustomForm) \ No newline at end of file + > [Form Recognizer API v2.1](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/AnalyzeWithCustomForm) diff --git a/articles/applied-ai-services/form-recognizer/concept-custom.md b/articles/applied-ai-services/form-recognizer/concept-custom.md index 2c014d58e5e97..58c146716a418 100644 --- a/articles/applied-ai-services/form-recognizer/concept-custom.md +++ b/articles/applied-ai-services/form-recognizer/concept-custom.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/10/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -35,7 +35,7 @@ Your training set will consist of structured documents where the formatting and ### Custom neural model -The custom neural (custom document) model uses deep learning models and base model trained on a large collection of documents. This model is then fine-tuned or adapted to your data when you train the model with a labeled dataset. Custom neural models support structured, semi-structured, and unstructured documents to extract fields. Custom neural models currently support English-language documents. When you're choosing between the two model types, start with a neural model if it meets your functional needs. See [neural models](concept-custom-neural.md) to learn more about custom document models. +The custom neural (custom document) model uses deep learning models and base model trained on a large collection of documents. This model is then fine-tuned or adapted to your data when you train the model with a labeled dataset. Custom neural models support structured, semi-structured, and unstructured documents to extract fields. Custom neural models currently support English-language documents. When you're choosing between the two model types, start with a neural model to determine if it meets your functional needs. See [neural models](concept-custom-neural.md) to learn more about custom document models. ## Build mode @@ -82,7 +82,7 @@ The following tools are supported by Form Recognizer v3.0: ### Try Form Recognizer -See how data is extracted from your specific or unique documents by using custom models. You need the following resources: +Try extracting data from your specific or unique documents using custom models. You need the following resources: * An Azure subscription. You can [create one for free](https://azure.microsoft.com/free/cognitive-services/). * A [Form Recognizer instance](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) in the Azure portal. You can use the free pricing tier (`F0`) to try the service. After your resource deploys, select **Go to resource** to get your key and endpoint. @@ -141,8 +141,8 @@ The following table describes the features available with the associated tools a | Document type | REST API | SDK | Label and Test Models| |--|--|--|--| | Custom form 2.1 | [Form Recognizer 2.1 GA API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/AnalyzeWithCustomForm) | [Form Recognizer SDK](quickstarts/get-started-sdk-rest-api.md?pivots=programming-language-python)| [Sample labeling tool](https://fott-2-1.azurewebsites.net/)| -| Custom template 3.0 | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio)| -| Custom neural | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio) +| Custom template 3.0 | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio)| +| Custom neural | [Form Recognizer 3.0 (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)| [Form Recognizer Preview SDK](quickstarts/try-v3-python-sdk.md)| [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio) > [!NOTE] @@ -177,7 +177,7 @@ The [Sample Labeling tool](https://fott-2-1.azurewebsites.net/) doesn't support * **Custom model API (v3.0)**: This version supports signature detection for custom forms. When you train custom models, you can specify certain fields as signatures. When a document is analyzed with your custom model, it indicates whether a signature was detected or not. * [Form Recognizer v3.0 migration guide](v3-migration-guide.md): This guide shows you how to use the preview version in your applications and workflows. -* [REST API (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument): This API shows you more about the preview version and new capabilities. +* [REST API (preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument): This API shows you more about the preview version and new capabilities. ### Try signature detection @@ -204,5 +204,5 @@ Explore Form Recognizer quickstarts and REST APIs: | Quickstart | REST API| |--|--| -|[v3.0 Studio quickstart](quickstarts/try-v3-form-recognizer-studio.md) |[Form Recognizer v3.0 API 2022-01-30-preview](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument)| +|[v3.0 Studio quickstart](quickstarts/try-v3-form-recognizer-studio.md) |[Form Recognizer v3.0 API 2022-06-30](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument)| | [v2.1 quickstart](quickstarts/get-started-sdk-rest-api.md) | [Form Recognizer API v2.1](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/BuildDocumentModel) | \ No newline at end of file diff --git a/articles/applied-ai-services/form-recognizer/concept-general-document.md b/articles/applied-ai-services/form-recognizer/concept-general-document.md index 6f0ae5e7bfbe2..865544676826a 100644 --- a/articles/applied-ai-services/form-recognizer/concept-general-document.md +++ b/articles/applied-ai-services/form-recognizer/concept-general-document.md @@ -1,13 +1,13 @@ --- title: Form Recognizer general document model | Preview titleSuffix: Azure Applied AI Services -description: Concepts encompassing data extraction and analysis using prebuilt general document preview model +description: Concepts related to data extraction and analysis using prebuilt general document preview model author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/08/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -21,19 +21,18 @@ The General document preview model combines powerful Optical Character Recogniti The general document API supports most form types and will analyze your documents and extract keys and associated values. It's ideal for extracting common key-value pairs from documents. You can use the general document model as an alternative to training a custom model without labels. > [!NOTE] -> The ```2022-01-30-preview``` update to the general document model adds support for selection marks. +> The ```2022-06-30``` update to the general document model adds support for selection marks. ## General document features -* The general document model is a pre-trained model, doesn't require labels or training. +* The general document model is a pre-trained model; it doesn't require labels or training. -* A single API extracts key-value pairs, selection marks entities, text, tables, and structure from documents. +* A single API extracts key-value pairs, selection marks, entities, text, tables, and structure from documents. * The general document model supports structured, semi-structured, and unstructured documents. * Key names are spans of text within the document that are associated with a value. - * Selection marks are identified as fields with a value of ```:selected:``` or ```:unselected:``` ***Sample document processed in the Form Recognizer Studio*** @@ -46,11 +45,11 @@ The following tools are supported by Form Recognizer v3.0: | Feature | Resources | |----------|-------------------------| -|🆕 **General document model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        | +|🆕 **General document model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        | ### Try Form Recognizer -See how data is extracted from forms and documents using the Form Recognizer Studio or our Sample Labeling tool. +Try extracting data from forms and documents using the Form Recognizer Studio. You'll need the following resources: @@ -78,15 +77,15 @@ You'll need the following resources: ## Key-value pairs -Key-value pairs are specific spans within the document that identify a label or key and its associated response or value. In a structured form, these pairs could be the label and the value the user entered for that field or in an unstructured document they could be the date a contract was executed on based on the text in a paragraph. The AI model is trained to extract identifiable keys and values based on a wide variety of document types, formats, and structures. +Key-value pairs are specific spans within the document that identify a label or key and its associated response or value. In a structured form, these pairs could be the label and the value the user entered for that field. In an unstructured document, they could be the date a contract was executed on based on the text in a paragraph. The AI model is trained to extract identifiable keys and values based on a wide variety of document types, formats, and structures. -Keys can also exist in isolation when the model detects that a key exists, with no associated value or when processing optional fields. For example, a middle name field may be left blank on a form in some instances. key-value pairs are always spans of text contained in the document and if you have documents where same value is described in different ways, for example, a customer or a user, the associated key will be either customer or user based on what the document contained. +Keys can also exist in isolation when the model detects that a key exists, with no associated value or when processing optional fields. For example, a middle name field may be left blank on a form in some instances. Key-value pairs are spans of text contained in the document. If you have documents where the same value is described in different ways, for example, customer and user, the associated key will be either customer or user based on context. ## Entities Natural language processing models can identify parts of speech and classify each token or word. The named entity recognition model is able to identify entities like people, locations, and dates to provide for a richer experience. Identifying entities enables you to distinguish between customer types, for example, an individual or an organization. -The key value pair extraction model and entity identification model are run in parallel on the entire document and not just on the values of the extracted key-value pairs. This process ensures that complex structures where a key can't be identified is still enriched by identifying the entities referenced. You can still match keys or values to entities based on the offsets of the identified spans. +The key-value pair extraction model and entity identification model are run in parallel on the entire document—not just on the values of the extracted key-value pairs. This process ensures that complex structures where a key can't be identified are still enriched by identifying the entities referenced. You can still match keys or values to entities based on the offsets of the identified spans. * The general document is a pre-trained model and can be directly invoked via the REST API. diff --git a/articles/applied-ai-services/form-recognizer/concept-id-document.md b/articles/applied-ai-services/form-recognizer/concept-id-document.md index 93df1ad7e889f..82ec023661fec 100644 --- a/articles/applied-ai-services/form-recognizer/concept-id-document.md +++ b/articles/applied-ai-services/form-recognizer/concept-id-document.md @@ -1,16 +1,15 @@ --- title: Form Recognizer ID document model titleSuffix: Azure Applied AI Services -description: Concepts encompassing data extraction and analysis using the prebuilt ID document model +description: Concepts related to data extraction and analysis using the prebuilt ID document model author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/11/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false -ms.custom: ignite-fall-2021 --- @@ -34,11 +33,11 @@ The following tools are supported by Form Recognizer v3.0: | Feature | Resources | Model ID | |----------|-------------|-----------| -|**ID document model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-idDocument**| +|**ID document model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-idDocument**| ### Try Form Recognizer -See how to extract data, including name, birth date, machine-readable zone, and expiration date, from ID documents using the Form Recognizer Studio or our Sample Labeling tool. You'll need the following resources: +Extract data, including name, birth date, machine-readable zone, and expiration date, from ID documents using the Form Recognizer Studio or our Sample Labeling tool. You'll need the following resources: * An Azure subscription—you can [create one for free](https://azure.microsoft.com/free/cognitive-services/) @@ -114,17 +113,32 @@ You'll need an ID document. You can use our [sample ID document](https://raw.git ## Form Recognizer preview v3.0 - The Form Recognizer preview introduces several new features and capabilities: + The Form Recognizer preview v3.0 introduces several new features and capabilities: -* **ID document (v3.0)** model supports endorsements, restrictions, and vehicle classification extraction from US driver's licenses. +* **ID document (v3.0)** prebuilt model supports extraction of endorsement, restriction, and vehicle class codes from US driver's licenses. + +* The ID Document **2022-06-30-preview** release supports the following data extraction from US driver's licenses: + + * Date issued + * Height + * Weight + * Eye color + * Hair color + * Document discriminator security code ### ID document preview field extraction |Name| Type | Description | Standardized output| |:-----|:----|:----|:----| -| 🆕 Endorsements | String | Additional driving privileges granted to a driver such as Motorcycle or School bus. | | -| 🆕 Restrictions | String | Restricted driving privileges applicable to suspended or revoked licenses.| | -| 🆕VehicleClassification | String | Types of vehicles that can be driven by a driver. || +| 🆕 DateOfIssue | Date | Issue date | yyyy-mm-dd | +| 🆕 Height | String | Height of the holder. | | +| 🆕 Weight | String | Weight of the holder. | | +| 🆕 EyeColor | String | Eye color of the holder. | | +| 🆕 HairColor | String | Hair color of the holder. | | +| 🆕 DocumentDiscriminator | String | Document discriminator is a security code that identifies where and when the license was issued. | | +| Endorsements | String | More driving privileges granted to a driver such as Motorcycle or School bus. | | +| Restrictions | String | Restricted driving privileges applicable to suspended or revoked licenses.| | +| VehicleClassification | String | Types of vehicles that can be driven by a driver. || | CountryRegion | countryRegion | Country or region code compliant with ISO 3166 standard | | | DateOfBirth | Date | DOB | yyyy-mm-dd | | DateOfExpiration | Date | Expiration date DOB | yyyy-mm-dd | diff --git a/articles/applied-ai-services/form-recognizer/concept-invoice.md b/articles/applied-ai-services/form-recognizer/concept-invoice.md index 062824b46cbc1..b518421103941 100644 --- a/articles/applied-ai-services/form-recognizer/concept-invoice.md +++ b/articles/applied-ai-services/form-recognizer/concept-invoice.md @@ -1,13 +1,13 @@ --- title: Form Recognizer invoice model titleSuffix: Azure Applied AI Services -description: Concepts encompassing data extraction and analysis using prebuilt invoice model +description: Concepts related to data extraction and analysis using prebuilt invoice model author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -33,7 +33,7 @@ The following tools are supported by Form Recognizer v3.0: | Feature | Resources | Model ID | |----------|-------------|-----------| -|**Invoice model** |
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-invoice**| +|**Invoice model** |
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-invoice**| ### Try Form Recognizer @@ -93,6 +93,11 @@ You'll need an invoice document. You can use our [sample invoice document](https |--------|:----------------------|:---------| |Invoice|
        • English (United States)—en-US
        | English (United States)—en-US| |Invoice|
        • Spanish—es
        | Spanish (United States)—es| +|Invoice (preview)|
        • German—de
        | German (Germany)-de| +|Invoice (preview)|
        • French—fr
        | French (France)—fr| +|Invoice (preview)|
        • Italian—it
        | Italian (Italy)—it| +|Invoice (preview)|
        • Portuguese—pt
        | Portuguese (Portugal)—pt| +|Invoice (preview)|
        • Dutch—nl
        | Dutch (Netherlands)—nl| ## Field extraction @@ -144,9 +149,15 @@ Following are the line items extracted from an invoice in the JSON output respon | Unit | String| The unit of the line item, e.g, kg, lb etc. | Hours | | | Date | Date| Date corresponding to each line item. Often it's a date the line item was shipped | 3/4/2021| 2021-03-04 | | Tax | Number | Tax associated with each line item. Possible values include tax amount, tax %, and tax Y/N | 10% | | -| VAT | Number | Stands for Value added tax. This is a flat tax levied on an item. Common in European countries | €20.00 | | +| VAT | Number | Stands for Value added tax. VAT is a flat tax levied on an item. Common in European countries | €20.00 | | -The invoice key-value pairs and line items extracted are in the `documentResults` section of the JSON output. +The invoice key-value pairs and line items extracted are in the `documentResults` section of the JSON output. + +### Key-value pairs (Preview) + +The prebuilt invoice **2022-06-30-preview** release returns key-value pairs at no extra cost. Key-value pairs are specific spans within the invoice that identify a label or key and its associated response or value. In an invoice, these pairs could be the label and the value the user entered for that field or telephone number. The AI model is trained to extract identifiable keys and values based on a wide variety of document types, formats, and structures. + +Keys can also exist in isolation when the model detects that a key exists, with no associated value or when processing optional fields. For example, a middle name field may be left blank on a form in some instances. key-value pairs are always spans of text contained in the document. If you have documents where the same value is described in different ways, for example, a customer or a user, the associated key will be either customer or user based on context. ## Form Recognizer preview v3.0 @@ -154,7 +165,7 @@ The invoice key-value pairs and line items extracted are in the `documentResults * Follow our [**Form Recognizer v3.0 migration guide**](v3-migration-guide.md) to learn how to use the preview version in your applications and workflows. -* Explore our [**REST API (preview)**](https://westcentralus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) to learn more about the preview version and new capabilities. +* Explore our [**REST API (preview)**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) to learn more about the preview version and new capabilities. ## Next steps @@ -165,7 +176,7 @@ The invoice key-value pairs and line items extracted are in the `documentResults * Explore our REST API: > [!div class="nextstepaction"] - > [Form Recognizer API v3.0 (Preview)](https://westcentralus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) - + > [Form Recognizer API v3.0 (Preview)](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) + > [!div class="nextstepaction"] > [Form Recognizer API v2.1](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/5ed8c9843c2794cbb1a96291) diff --git a/articles/applied-ai-services/form-recognizer/concept-layout.md b/articles/applied-ai-services/form-recognizer/concept-layout.md index 69226aad645a2..194a1fa2fba19 100644 --- a/articles/applied-ai-services/form-recognizer/concept-layout.md +++ b/articles/applied-ai-services/form-recognizer/concept-layout.md @@ -1,16 +1,16 @@ --- title: Layouts - Form Recognizer titleSuffix: Azure Applied AI Services -description: Learn concepts related to Layout API analysis with Form Recognizer API—usage and limits. +description: Learn concepts related to the Layout API with Form Recognizer REST API usage and limits. author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/11/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false -ms.custom: ignite-fall-2021 +ms.custom: --- # Form Recognizer layout model @@ -19,14 +19,30 @@ The Form Recognizer Layout API extracts text, tables, selection marks, and struc ***Sample form processed with [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/layout)*** -:::image type="content" source="media/studio/analyze-layout.png" alt-text="Screenshot: Screenshot of sample document processed using Form Recognizer studio"::: +:::image type="content" source="media/studio/form-recognizer-studio-layout-newspaper.png" alt-text="Screenshot of sample newspaper page processed using Form Recognizer studio"::: -**Data extraction features** +## Supported document types -| **Layout model** | **Text Extraction** | **Selection Marks** | **Tables** | +| **Model** | **Images** | **PDF** | **TIFF** | | --- | --- | --- | --- | | Layout | ✓ | ✓ | ✓ | +### Data extraction + +| **Model** | **Text** | **Selection Marks** | **Tables** | **Paragraphs** | **Paragraph roles** | +| --- | --- | --- | --- | --- | --- | +| Layout | ✓ | ✓ | ✓ | ✓ | ✓ | + +**Supported paragraph roles**: +The paragraph roles are best used with unstructured documents. PAragraph roles help analyze the structure of the extracted content for better semantic search and analysis. + +* title +* sectionHeading +* footnote +* pageHeader +* pageFooter +* pageNumber + ## Development options The following tools are supported by Form Recognizer v2.1: @@ -39,11 +55,11 @@ The following tools are supported by Form Recognizer v3.0: | Feature | Resources | Model ID | |----------|------------|------------| -|**Layout model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-layout**| +|**Layout model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
        • [**JavaScript SDK**](quickstarts/try-v3-javascript-sdk.md)
        |**prebuilt-layout**| -### Try Form Recognizer +## Try Form Recognizer -See how data is extracted from forms and documents using the Form Recognizer Studio or Sample Labeling tool. You'll need the following resources: +Try extracting data from forms and documents using the Form Recognizer Studio. You'll need the following resources: * An Azure subscription—you can [create one for free](https://azure.microsoft.com/free/cognitive-services/) @@ -51,14 +67,14 @@ See how data is extracted from forms and documents using the Form Recognizer Stu :::image type="content" source="media/containers/keys-and-endpoint.png" alt-text="Screenshot: keys and endpoint location in the Azure portal."::: -#### Form Recognizer Studio (preview) +### Form Recognizer Studio (preview) > [!NOTE] > Form Recognizer studio is available with the preview (v3.0) API. ***Sample form processed with [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/layout)*** -:::image type="content" source="media/studio/form-recognizer-studio-layout-v3p2.png" alt-text="Screenshot: Layout processing in Form Recognizer Studio."::: +:::image type="content" source="media/studio/form-recognizer-studio-layout-newspaper.png" alt-text="Screenshot: Layout processing a newspaper page in Form Recognizer Studio."::: 1. On the Form Recognizer Studio home page, select **Layout** @@ -71,79 +87,55 @@ See how data is extracted from forms and documents using the Form Recognizer Stu > [!div class="nextstepaction"] > [Try Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/layout) -#### Sample Labeling tool - -You'll need a form document. You can use our [sample form document](https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/sample-layout.pdf). - -1. On the Sample Labeling tool home page, select **Use Layout to get text, tables, and selection marks**. - -1. Select **Local file** from the dropdown menu. - -1. Upload your file and select **Run Layout** - - :::image type="content" source="media/try-layout.png" alt-text="Screenshot: Screenshot: Sample Labeling tool dropdown layout file source selection menu."::: - - > [!div class="nextstepaction"] - > [Try Sample Labeling tool](https://fott-2-1.azurewebsites.net/prebuilts-analyze) - ## Input requirements * For best results, provide one clear photo or high-quality scan per document. -* Supported file formats: JPEG/JPG, PNG, BMP, TIFF, and PDF (text-embedded or scanned). Text-embedded PDFs are best to eliminate the possibility of error in character extraction and location. +* Supported file formats: JPEG/JPG, PNG, BMP, TIFF, and PDF (text-embedded or scanned). * For PDF and TIFF, up to 2000 pages can be processed (with a free tier subscription, only the first two pages are processed). -* The file size must be less than 500 MB for paid (S0) tier and 4 MB for free (F0) tier (4 MB for the free tier). +* The file size must be less than 500 MB for paid (S0) tier and 4 MB for free (F0) tier. * Image dimensions must be between 50 x 50 pixels and 10,000 x 10,000 pixels. - -> [!NOTE] -> The [Sample Labeling tool](https://fott-2-1.azurewebsites.net/) does not support the BMP file format. This is a limitation of the tool not the Form Recognizer Service. +* The minimum height of the text to be extracted is 12 pixels for a 1024 X 768 image. This dimension corresponds to about eight font point text at 150 DPI. ## Supported languages and locales *See* [Language Support](language-support.md) for a complete list of supported handwritten and printed languages. -## Data extraction - -The layout model extracts table structures, selection marks, printed and handwritten text, and bounding box coordinates from your documents. +## Model extraction -### Tables and table headers +The layout model extracts text, selection marks, tables, paragraphs, and paragraph types (`roles`) from your documents. -Layout API extracts tables in the `pageResults` section of the JSON output. Documents can be scanned, photographed, or digitized. Tables can be complex with merged cells or columns, with or without borders, and with odd angles. Extracted table information includes the number of columns and rows, row span, and column span. Each cell with its bounding box is output along with information whether it's recognized as part of a header or not. The model predicted header cells can span multiple rows and aren't necessarily the first rows in a table. They also work with rotated tables. Each table cell also includes the full text with references to the individual words in the `readResults` section. +### Text lines and words -:::image type="content" source="./media/layout-table-headers-example.png" alt-text="Layout table headers output"::: +Layout API extracts print and handwritten style text as `lines` and `words`. The model outputs bounding `polygon` coordinates and `confidence` for the extracted words. The `styles` collection includes any handwritten style for lines, if detected, along with the spans pointing to the associated text. This feature applies to [supported handwritten languages](language-support.md). ### Selection marks -Layout API also extracts selection marks from documents. Extracted selection marks include the bounding box, confidence, and state (selected/unselected). Selection mark information is extracted in the `readResults` section of the JSON output. - -:::image type="content" source="./media/layout-selection-marks.png" alt-text="Layout selection marks output"::: - -### Text lines and words +Layout API also extracts selection marks from documents. Extracted selection marks appear within the `pages` collection for each page. They include the bounding `polygon`, `confidence`, and selection `state` (`selected/unselected`). Any associated text if extracted is also included as the starting index (`offset`) and `length` that references the top level `content` property that contains the full text from the document. -The layout model extracts text from documents and images with multiple text angles and colors. It accepts photos of documents, faxes, printed and/or handwritten (English only) text, and mixed modes. Printed and handwritten text is extracted from lines and words. The service then returns bounding box coordinates, confidence scores, and style (handwritten or other). All the text information is included in the `readResults` section of the JSON output. +### Tables and table headers -:::image type="content" source="./media/layout-text-extraction.png" alt-text="Layout text extraction output"::: +Layout API extracts tables in the `pageResults` section of the JSON output. Documents can be scanned, photographed, or digitized. Extracted table information includes the number of columns and rows, row span, and column span. Each cell with its bounding `polygon` is output along with information whether it's recognized as a `columnHeader` or not. The API also works with rotated tables. Each table cell contains the row and column index and bounding polygon coordinates. For the cell text, the model outputs the `span` information containing the starting index (`offset`). The model also outputs the `length` within the top level `content` that contains the full text from the document. -### Natural reading order for text lines (Latin only) +### Paragraphs -In Form Recognizer v2.1, you can specify the order in which the text lines are output with the `readingOrder` query parameter. Use `natural` for a more human-friendly reading order output as shown in the following example. This feature is only supported for Latin languages. +The Layout model extracts all identified blocks of text in the `paragraphs` collection as a top level object under `analyzeResults`. Each entry in this collection represents a text block and includes the extracted text as`content`and the bounding `polygon` coordinates. The `span` information points to the text fragment within the top level `content` property that contains the full text from the document. -In Form Recognizer v3.0, the natural reading order output is used by the service in all cases. Therefore, there's no `readingOrder` parameter provided in this version. +### Paragraph roles -### Handwritten classification for text lines (Latin only) +The Layout model may flag certain paragraphs with their specialized type or `role` as predicted by the model. They're best used with unstructured documents to help understand the layout of the extracted content for a richer semantic analysis. The following paragraph roles are supported: -The response includes classifying whether each text line is of handwriting style or not, along with a confidence score. This feature is only supported for Latin languages. +| **Predicted role** | **Description** | +| --- | --- | +| `title` | The main heading(s) in the page | +| `sectionHeading` | One or more subheading(s) on the page | +| `footnote` | Text near the bottom of the page | +| `pageHeader` | Text near the top edge of the page | +| `pageFooter` | Text near the bottom edge of the page | +| `pageNumber` | Page number | ### Select page numbers or ranges for text extraction -For large multi-page documents, use the `pages` query parameter to indicate specific page numbers or page ranges for text extraction. - -## Form Recognizer preview v3.0 - - The Form Recognizer preview introduces several new features and capabilities. - -* Follow our [**Form Recognizer v3.0 migration guide**](v3-migration-guide.md) to learn how to use the preview version in your applications and workflows. - -* Explore our [**REST API (preview)**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) to learn more about the preview version and new capabilities. +For large multi-page documents, use the `pages` query parameter to indicate specific page numbers or page ranges for text extraction. ## Next steps diff --git a/articles/applied-ai-services/form-recognizer/concept-model-overview.md b/articles/applied-ai-services/form-recognizer/concept-model-overview.md index 7522e18a8b2e7..9215f198cb177 100644 --- a/articles/applied-ai-services/form-recognizer/concept-model-overview.md +++ b/articles/applied-ai-services/form-recognizer/concept-model-overview.md @@ -1,13 +1,13 @@ --- title: Form Recognizer models titleSuffix: Azure Applied AI Services -description: Concepts encompassing data extraction and analysis using prebuilt models. +description: Concepts related to data extraction and analysis using prebuilt models. author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/16/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false ms.custom: ignite-fall-2021 @@ -16,14 +16,14 @@ ms.custom: ignite-fall-2021 # Form Recognizer models -Azure Form Recognizer prebuilt models enable you to add intelligent document processing to your apps and flows without having to train and build your own models. Prebuilt models use optical character recognition (OCR) combined with deep learning models to identify and extract predefined text and data fields common to specific form and document types. Form Recognizer extracts analyzes form and document data then returns an organized, structured JSON response. Form Recognizer v2.1 supports invoice, receipt, ID document, and business card models. + Azure Form Recognizer supports a wide variety of models that enable you to add intelligent document processing to your apps and flows. You can use a prebuilt document analysis or domain specific model or train a custom model tailored to your specific business needs and use cases. Form Recognizer can be used with the REST API or Python, C#, Java, and JavaScript SDKs. ## Model overview | **Model** | **Description** | | --- | --- | |**Document analysis**|| -| 🆕[Read (preview)](#read-preview) | Extract printed and handwritten text lines, words, locations, and detected languages.| +| 🆕[Read (preview)](#read-preview) | Extract typeface and handwritten text lines, words, locations, and detected languages.| | 🆕[General document (preview)](#general-document-preview) | Extract text, tables, structure, key-value pairs, and named entities.| | [Layout](#layout) | Extract text and layout information from documents.| |**Prebuilt**|| @@ -83,20 +83,21 @@ The W-2 model analyzes and extracts key information reported in each box on a W- [:::image type="icon" source="media/studio/layout.png":::](https://formrecognizer.appliedai.azure.com/studio/layout) -The Layout API analyzes and extracts text, tables and headers, selection marks, and structure information from forms and documents. +The Layout API analyzes and extracts text, tables and headers, selection marks, and structure information from documents. ***Sample document processed using the [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/layout)***: -:::image type="content" source="media/studio/analyze-layout.png" alt-text="Screenshot: Screenshot of sample document processed using Form Recognizer studio"::: +:::image type="content" source="media/studio/form-recognizer-studio-layout-newspaper.png" alt-text="Screenshot: Screenshot of sample newspaper page processed using Form Recognizer studio"::: > [!div class="nextstepaction"] +> > [Learn more: layout model](concept-layout.md) ### Invoice [:::image type="icon" source="media/studio/invoice.png":::](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=invoice) -The invoice model analyzes and extracts key information from sales invoices. The API analyzes invoices in various formats and extracts key information such as customer name, billing address, due date, and amount due. Currently, the model supports both English and Spanish invoices. +The invoice model analyzes and extracts key information from sales invoices. The API analyzes invoices in various formats and extracts key information such as customer name, billing address, due date, and amount due. Currently, the model supports English, Spanish, German, French, Italian, Portuguese, and Dutch invoices. ***Sample invoice processed using [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=invoice)***: @@ -109,7 +110,9 @@ The invoice model analyzes and extracts key information from sales invoices. The [:::image type="icon" source="media/studio/receipt.png":::](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=receipt) -The receipt model analyzes and extracts key information from printed and handwritten receipts. +* The receipt model analyzes and extracts key information from printed and handwritten sales receipts. + +* The preview version v3.0 also supports single-page hotel receipt processing. ***Sample receipt processed using [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=receipt)***: @@ -152,7 +155,9 @@ The business card model analyzes and extracts key information from business card [:::image type="icon" source="media/studio/custom.png":::](https://formrecognizer.appliedai.azure.com/studio/custommodel/projects) -The custom model analyzes and extracts data from forms and documents specific to your business. The API is a machine-learning program trained to recognize form fields within your distinct content and extract key-value pairs and table data. You only need five examples of the same form type to get started and your custom model can be trained with or without labeled datasets. +* Custom models analyze and extract data from forms and documents specific to your business. The API is a machine-learning program trained to recognize form fields within your distinct content and extract key-value pairs and table data. You only need five examples of the same form type to get started and your custom model can be trained with or without labeled datasets. + +* The preview version v3.0 custom model supports signature detection in custom forms (template model) and cross-page tables in both template and neural models. ***Sample custom template processed using [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/customform/projects)***: @@ -165,7 +170,7 @@ The custom model analyzes and extracts data from forms and documents specific to A composed model is created by taking a collection of custom models and assigning them to a single model built from your form types. You can assign multiple custom models to a composed model called with a single model ID. you can assign up to 100 trained custom models to a single composed model. -***Composed model dialog window[Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/customform/projects)***: +***Composed model dialog window in [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/customform/projects)***: :::image type="content" source="media/studio/composed-model.png" alt-text="Screenshot of Form Recognizer Studio compose custom model dialog window."::: @@ -174,43 +179,31 @@ A composed model is created by taking a collection of custom models and assignin ## Model data extraction - | **Data extraction** | **Text extraction** |**Key-Value pairs** |**Fields**|**Selection Marks** | **Tables** |**Entities** | -| --- |:---: |:---:|:---: |:---: |:---: |:---: | -|🆕 [prebuilt-read](concept-read.md#data-extraction) | ✓ | || | | | -|🆕 [prebuilt-tax.us.w2](concept-w2.md#field-extraction) | ✓ | ✓ | ✓ | ✓ | ✓ || -|🆕 [prebuilt-document](concept-general-document.md#data-extraction)| ✓ | ✓ || ✓ | ✓ | ✓ | -| [prebuilt-layout](concept-layout.md#data-extraction) | ✓ | || ✓ | ✓ | | -| [prebuilt-invoice](concept-invoice.md#field-extraction) | ✓ | ✓ |✓| ✓ | ✓ || -| [prebuilt-receipt](concept-receipt.md#field-extraction) | ✓ | ✓ |✓| | || -| [prebuilt-idDocument](concept-id-document.md#field-extraction) | ✓ | ✓ |✓| | || -| [prebuilt-businessCard](concept-business-card.md#field-extraction) | ✓ | ✓ | ✓| | || -| [Custom](concept-custom.md#compare-model-features) |✓ | ✓ || ✓ | ✓ | ✓ | + | **Model ID** | **Text extraction** | **Language detection** | **Selection Marks** | **Tables** | **Paragraphs** | **Paragraph roles** | **Key-Value pairs** | **Fields** | + |:-----|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:| +|🆕 [prebuilt-read](concept-read.md#data-extraction) | ✓ | ✓ | | | ✓ | | | | +|🆕 [prebuilt-tax.us.w2](concept-w2.md#field-extraction) | ✓ | | ✓ | | ✓ | | | ✓ | +|🆕 [prebuilt-document](concept-general-document.md#data-extraction)| ✓ | | ✓ | ✓ | ✓ | | ✓ | | +| [prebuilt-layout](concept-layout.md#data-extraction) | ✓ | | ✓ | ✓ | ✓ | ✓ | | | | +| [prebuilt-invoice](concept-invoice.md#field-extraction) | ✓ | | ✓ | ✓ | ✓ | | ✓ | ✓ | +| [prebuilt-receipt](concept-receipt.md#field-extraction) | ✓ | | | | ✓ | | | ✓ | +| [prebuilt-idDocument](concept-id-document.md#field-extraction) | ✓ | | | | ✓ | | | ✓ | +| [prebuilt-businessCard](concept-business-card.md#field-extraction) | ✓ | | | | ✓ | | | ✓ | +| [Custom](concept-custom.md#compare-model-features) | ✓ | | ✓ | ✓ | ✓ | | | ✓ | ## Input requirements * For best results, provide one clear photo or high-quality scan per document. -* Supported file formats: JPEG/JPG, PNG, BMP, TIFF, and PDF (text-embedded or scanned). Text-embedded PDFs are best to eliminate the possibility of error in character extraction and location. +* Supported file formats: JPEG/JPG, PNG, BMP, TIFF, and PDF (text-embedded or scanned). Additionally, the Read API supports Microsoft Word (DOCX), Excel (XLS), PowerPoint (PPT), and HTML files. * For PDF and TIFF, up to 2000 pages can be processed (with a free tier subscription, only the first two pages are processed). * The file size must be less than 500 MB for paid (S0) tier and 4 MB for free (F0) tier. * Image dimensions must be between 50 x 50 pixels and 10,000 x 10,000 pixels. -* PDF dimensions are up to 17 x 17 inches, corresponding to Legal or A3 paper size, or smaller. * The total size of the training data is 500 pages or less. * If your PDFs are password-locked, you must remove the lock before submission. > [!NOTE] > The [Sample Labeling tool](https://fott-2-1.azurewebsites.net/) does not support the BMP file format. This is a limitation of the tool not the Form Recognizer Service. -## Form Recognizer preview v3.0 - - Form Recognizer v3.0 (preview) introduces several new features and capabilities: - -* [**Read (preview)**](concept-read.md) model is a new API that extracts text lines, words, their locations, detected languages, and handwritten text, if detected. -* [**General document (preview)**](concept-general-document.md) model is a new API that uses a pre-trained model to extract text, tables, structure, key-value pairs, and named entities from forms and documents. -* [**Receipt (preview)**](concept-receipt.md) model supports single-page hotel receipt processing. -* [**ID document (preview)**](concept-id-document.md) model supports endorsements, restrictions, and vehicle classification extraction from US driver's licenses. -* [**W-2 (preview)**](concept-w2.md) model supports employee, employer, wage information, etc. from US W-2 forms. -* [**Custom model API (preview)**](concept-custom.md) supports signature detection for custom forms. - ### Version migration Learn how to use Form Recognizer v3.0 in your applications by following our [**Form Recognizer v3.0 migration guide**](v3-migration-guide.md) diff --git a/articles/applied-ai-services/form-recognizer/concept-read.md b/articles/applied-ai-services/form-recognizer/concept-read.md index 80e1f756e19a4..173a0ff05b2de 100644 --- a/articles/applied-ai-services/form-recognizer/concept-read.md +++ b/articles/applied-ai-services/form-recognizer/concept-read.md @@ -1,21 +1,33 @@ --- -title: Read - Form Recognizer +title: Read OCR - Form Recognizer titleSuffix: Azure Applied AI Services -description: Learn concepts related to Read API analysis with Form Recognizer API—usage and limits. +description: Learn concepts related to Read OCR API analysis with Form Recognizer API—usage and limits. author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/09/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false ms.custom: ignite-fall-2021 --- -# Form Recognizer read model +# Form Recognizer Read OCR model -The Form Recognizer v3.0 preview includes the new Read OCR model. Form Recognizer Read builds on the success of COmputer Vision Read and optimizes even more for analyzing documents, including new document formats in the future. It extracts printed and handwritten text from documents and images and can handle mixed languages in the documents and text line. The read model can detect lines, words, locations, and additionally detect languages. It is the foundational technology powering the text extraction in Form Recognizer Layout, prebuilt, general document, and custom models. +Form Recognizer v3.0 preview includes the new Read Optical Character Recognition (OCR) model. The Read OCR model extracts typeface and handwritten text including mixed languages in documents. The Read OCR model can detect lines, words, locations, and languages and is the core of all other Form Recognizer models. Layout, general document, custom, and prebuilt models all use the Read OCR model as a foundation for extracting texts from documents. + +## Supported document types + +| **Model** | **Images** | **PDF** | **TIFF** | **Word** | **Excel** | **PowerPoint** | **HTML** | +| --- | --- | --- | --- | --- | --- | --- | --- | +| Read | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | + +### Data extraction + +| **Read model** | **Text** | **[Language detection](language-support.md#detected-languages-read-api)** | +| --- | --- | --- | +prebuilt-read | ✓ |✓ | ## Development options @@ -25,15 +37,9 @@ The following resources are supported by Form Recognizer v3.0: |----------|------------|------------| |**Read model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](how-to-guides/use-prebuilt-read.md?pivots=programming-language-rest-api)
        • [**C# SDK**](how-to-guides/use-prebuilt-read.md?pivots=programming-language-csharp)
        • [**Python SDK**](how-to-guides/use-prebuilt-read.md?pivots=programming-language-python)
        • [**Java SDK**](how-to-guides/use-prebuilt-read.md?pivots=programming-language-java)
        • [**JavaScript**](how-to-guides/use-prebuilt-read.md?pivots=programming-language-javascript)
        |**prebuilt-read**| -## Data extraction - -| **Read model** | **Text Extraction** | **[Language detection](language-support.md#detected-languages-read-api)** | -| --- | --- | --- | -prebuilt-read | ✓ |✓ | - -### Try Form Recognizer +## Try Form Recognizer -See how text is extracted from forms and documents using the Form Recognizer Studio. You'll need the following assets: +Try extracting text from forms and documents using the Form Recognizer Studio. You'll need the following assets: * An Azure subscription—you can [create one for free](https://azure.microsoft.com/free/cognitive-services/) @@ -41,10 +47,10 @@ See how text is extracted from forms and documents using the Form Recognizer Stu :::image type="content" source="media/containers/keys-and-endpoint.png" alt-text="Screenshot: keys and endpoint location in the Azure portal."::: -#### Form Recognizer Studio (preview) +### Form Recognizer Studio (preview) > [!NOTE] -> Form Recognizer studio is available with the preview (v3.0) API. +> Currently, Form Recognizer Studio doesn't support Microsoft Word, Excel, PowerPoint, and HTML file formats in the Read preview. ***Sample form processed with [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/read)*** @@ -63,33 +69,47 @@ See how text is extracted from forms and documents using the Form Recognizer Stu ## Input requirements -* For best results, provide one clear photo or high-quality scan per document. -* Supported file formats: JPEG/JPG, PNG, BMP, TIFF, and PDF (text-embedded or scanned). Text-embedded PDFs are best to eliminate the possibility of error in character extraction and location. +* Supported file formats: These include JPEG/JPG, PNG, BMP, TIFF, PDF (text-embedded or scanned). Additionally, the newest API version `2022-06-30-preview` supports Microsoft Word (DOCX), Excel (XLS), PowerPoint (PPT), and HTML files. * For PDF and TIFF, up to 2000 pages can be processed (with a free tier subscription, only the first two pages are processed). -* The file size must be less than 500 MB for paid (S0) tier and 4 MB for free (F0) tier (4 MB for the free tier) +* The file size must be less than 500 MB for paid (S0) tier and 4 MB for free (F0) tier. * Image dimensions must be between 50 x 50 pixels and 10,000 x 10,000 pixels. +* The minimum height of the text to be extracted is 12 pixels for a 1024X768 image. This dimension corresponds to about eight font point text at 150 DPI. ## Supported languages and locales Form Recognizer preview version supports several languages for the read model. *See* our [Language Support](language-support.md) for a complete list of supported handwritten and printed languages. -## Features +## Data detection and extraction -### Text lines and words +### Pages -Read API extracts text from documents and images. It accepts PDFs and images of documents and handles printed and/or handwritten text, and supports mixed languages. Text is extracted as text lnes, words, bounding boxes, confidence scores, and style, whether handwritten or not, supported for Latin languages only. +With the added support for Microsoft Word, Excel, PowerPoint, and HTML files, the page units in the model output are computed as shown: -### Language detection + **File format** | **Computed page unit** | **Total pages** | +| --- | --- | --- | +|Images | Each image = 1 page unit | Total images | +|PDF | Each page in the PDF = 1 page unit | Total pages in the PDF | +|Word | Up to 3,000 characters = 1 page unit, Each embedded image = 1 page unit | Total pages of up to 3,000 characters each + Total embedded images | +|Excel | Each worksheet = 1 page unit, Each embedded image = 1 page unit | Total worksheets + Total images +|PowerPoint| Each slide = 1 page unit, Each embedded image = 1 page unit | Total slides + Total images +|HTML| Up to 3,000 characters = 1 page unit, embedded or linked images not supported | Total pages of up to 3,000 characters each | + +### Text lines and words -Read adds [language detection](language-support.md#detected-languages-read-api) as a new feature for text lines. Read will predict the language at the text line level along with the confidence score. +Read extracts print and handwritten style text as `lines` and `words`. The model outputs bounding `polygon` coordinates and `confidence` for the extracted words. The `styles` collection includes any handwritten style for lines if detected along with the spans pointing to the associated text. This feature applies to [supported handwritten languages](language-support.md). -### Handwritten classification for text lines (Latin only) +For Microsoft Word, Excel, PowerPoint, and HTML file formats, Read will extract all embedded text as is. For any embedded images, it will run OCR on the images to extract text and append the text from each image as an added entry to the `pages` collection. These added entries will include the extracted text lines and words, their bounding polygons, confidences, and the spans pointing to the associated text. -The response includes classifying whether each text line is of handwriting style or not, along with a confidence score. This feature is only supported for Latin languages. +### Language detection + +Read adds [language detection](language-support.md#detected-languages-read-api) as a new feature for text lines. Read will predict all detected languages for text lines along with the `confidence` in the `languages` collection under `analyzeResult`. ### Select page (s) for text extraction -For large multi-page documents, use the `pages` query parameter to indicate specific page numbers or page ranges for text extraction. +For large multi-page PDF documents, use the `pages` query parameter to indicate specific page numbers or page ranges for text extraction. + +> [!NOTE] +> For Microsoft Word, Excel, PowerPoint, and HTML file formats, the Read API ignores the pages parameter and extracts all pages by default. ## Next steps diff --git a/articles/applied-ai-services/form-recognizer/concept-receipt.md b/articles/applied-ai-services/form-recognizer/concept-receipt.md index 8d05f3ad82149..ec85bc975e9fd 100644 --- a/articles/applied-ai-services/form-recognizer/concept-receipt.md +++ b/articles/applied-ai-services/form-recognizer/concept-receipt.md @@ -1,13 +1,13 @@ --- title: Form Recognizer receipt model titleSuffix: Azure Applied AI Services -description: Concepts encompassing data extraction and analysis using the prebuilt receipt model +description: Concepts related to data extraction and analysis using the prebuilt receipt model author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/11/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false ms.custom: ignite-fall-2021 @@ -16,7 +16,7 @@ ms.custom: ignite-fall-2021 # Form Recognizer receipt model -The receipt model combines powerful Optical Character Recognition (OCR) capabilities with deep learning models to analyze and extract key information from sales receipts. Receipts can be of various formats and quality including printed and handwritten receipts. The API extracts key information such as merchant name, merchant phone number, transaction date, tax, and transaction total and returns a structured JSON data representation. +The receipt model combines powerful Optical Character Recognition (OCR) capabilities with deep learning models to analyze and extract key information from sales receipts. Receipts can be of various formats and quality including printed and handwritten receipts. The API extracts key information such as merchant name, merchant phone number, transaction date, total tax, and transaction total and returns a structured JSON data representation. ***Sample receipt processed with [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=receipt)***: @@ -34,7 +34,7 @@ The following tools are supported by Form Recognizer v3.0: | Feature | Resources | Model ID | |----------|-------------|-----------| -|**Receipt model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        |**prebuilt-receipt**| +|**Receipt model**|
        • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com)
        • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
        • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
        • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
        |**prebuilt-receipt**| ### Try Form Recognizer @@ -64,7 +64,7 @@ See how data, including time and date of transactions, merchant information, and #### Sample Labeling tool (API v2.1) -You will need a receipt document. You can use our [sample receipt document](https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/contoso-receipt.png). +You'll need a receipt document. You can use our [sample receipt document](https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/contoso-receipt.png). 1. On the Sample Labeling tool home page, select **Use prebuilt model to get data**. @@ -81,7 +81,7 @@ You will need a receipt document. You can use our [sample receipt document](http * Supported file formats: JPEG/JPG, PNG, BMP, TIFF, and PDF (text-embedded or scanned). Text-embedded PDFs are best to eliminate the possibility of error in character extraction and location. * For PDF and TIFF, up to 2000 pages can be processed (with a free tier subscription, only the first two pages are processed). * The file size must be less than 500 MB for paid (S0) tier and 4 MB for free (F0) tier. -* Image dimensions must be between 50 x 50 pixels and 10000 x 10000 pixels. +* Image dimensions must be between 50 x 50 pixels and 10,000 x 10,000 pixels. * PDF dimensions are up to 17 x 17 inches, corresponding to Legal or A3 paper size, or smaller. * The total size of the training data is 500 pages or less. * If your PDFs are password-locked, you must remove the lock before submission. @@ -107,13 +107,13 @@ You will need a receipt document. You can use our [sample receipt document](http | TransactionTime | Time | Time the receipt was issued | hh-mm-ss (24-hour) | | Total | Number (USD)| Full transaction total of receipt | Two-decimal float| | Subtotal | Number (USD) | Subtotal of receipt, often before taxes are applied | Two-decimal float| -| Tax | Number (USD) | Tax on receipt (often sales tax or equivalent) | Two-decimal float | + | Tax | Number (USD) | Total tax on receipt (often sales tax or equivalent). **Renamed to "TotalTax" in 2022-06-30-preview version**. | Two-decimal float | | Tip | Number (USD) | Tip included by buyer | Two-decimal float| | Items | Array of objects | Extracted line items, with name, quantity, unit price, and total price extracted | | -| Name | String | Item name | | -| Quantity | Number | Quantity of each item | Integer | +| Name | String | Item description. **Renamed to "Description" in 2022-06-30-preview version**. | | +| Quantity | Number | Quantity of each item | Two-decimal float | | Price | Number | Individual price of each item unit| Two-decimal float | -| Total Price | Number | Total price of line item | Two-decimal float | +| TotalPrice | Number | Total price of line item | Two-decimal float | ## Form Recognizer preview v3.0 @@ -130,7 +130,7 @@ You will need a receipt document. You can use our [sample receipt document](http | Items.*.Category | String | Item category, for example, Room, Tax, etc. | | | Items.*.Date | Date | Item date | yyyy-mm-dd | | Items.*.Description | String | Item description | | -| Items.*.TotalPrice | Number | Item total price | Integer | +| Items.*.TotalPrice | Number | Item total price | Two-decimal float | | Locale | String | Locale of the receipt, for example, en-US. | ISO language-county code | | MerchantAddress | String | Listed address of merchant | | | MerchantAliases | Array| | | diff --git a/articles/applied-ai-services/form-recognizer/concept-w2.md b/articles/applied-ai-services/form-recognizer/concept-w2.md index e0811d38ebc95..9010a7ffaf5c8 100644 --- a/articles/applied-ai-services/form-recognizer/concept-w2.md +++ b/articles/applied-ai-services/form-recognizer/concept-w2.md @@ -1,13 +1,13 @@ --- -title: Form Recognizer W-2 form prebuilt model +title: Form Recognizer W-2 prebuilt model titleSuffix: Azure Applied AI Services -description: Data extraction and analysis extraction using the prebuilt-tax Form W-2 model +description: Data extraction and analysis extraction using the prebuilt W-2 model author: laujan manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 03/25/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -24,7 +24,7 @@ A W-2 is a multipart form divided into state and federal sections and consisting ## Development options -The prebuilt W-2 form, model is supported by Form Recognizer v3.0 with the following tools: +The prebuilt W-2 model is supported by Form Recognizer v3.0 with the following tools: | Feature | Resources | Model ID | |----------|-------------|-----------| @@ -32,7 +32,7 @@ The prebuilt W-2 form, model is supported by Form Recognizer v3.0 with the follo ### Try Form Recognizer -See how data is extracted from W-2 forms using the Form Recognizer Studio. You'll need the following resources: +Try extracting data from W-2 forms using the Form Recognizer Studio. You'll need the following resources: * An Azure subscription—you can [create one for free](https://azure.microsoft.com/free/cognitive-services/) @@ -45,7 +45,7 @@ See how data is extracted from W-2 forms using the Form Recognizer Studio. You'l > [!NOTE] > Form Recognizer studio is available with v3.0 preview API. -1. On the [Form Recognizer Studio home page](https://formrecognizer.appliedai.azure.com/studio), select **W-2 form**. +1. On the [Form Recognizer Studio home page](https://formrecognizer.appliedai.azure.com/studio), select **W-2**. 1. You can analyze the sample W-2 document or select the **➕ Add** button to upload your own sample. @@ -71,7 +71,7 @@ See how data is extracted from W-2 forms using the Form Recognizer Studio. You'l | Model | Language—Locale code | Default | |--------|:----------------------|:---------| -|prebuilt-tax.us.w2|
          English (United States)

        |English (United States)—en-US| +|prebuilt-tax.us.w2|
        • English (United States)
        |English (United States)—en-US| ## Field extraction @@ -117,7 +117,6 @@ See how data is extracted from W-2 forms using the Form Recognizer Studio. You'l | TaxYear | | Number | Tax year | 2020 | | W2FormVariant | | String | The variants of W-2 forms, including "W-2", "W-2AS", "W-2CM", "W-2GU", "W-2VI" | W-2 | - ### Migration guide and REST API v3.0 * Follow our [**Form Recognizer v3.0 migration guide**](v3-migration-guide.md) to learn how to use the preview version in your applications and workflows. @@ -127,9 +126,10 @@ See how data is extracted from W-2 forms using the Form Recognizer Studio. You'l ## Next steps * Complete a Form Recognizer quickstart: - -|Programming language | :::image type="content" source="media/form-recognizer-icon.png" alt-text="Form Recognizer icon from the Azure portal."::: |Programming language -|:---:|:---:|:---:| -|[**C#**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model)||[**JavaScript**](quickstarts/try-v3-javascript-sdk.md#prebuilt-model)| -|[**Java**](quickstarts/try-v3-java-sdk.md#prebuilt-model)||[**Python**](quickstarts/try-v3-python-sdk.md#prebuilt-model)| -|[**REST API**](quickstarts/try-v3-rest-api.md)||| +> [!div class="checklist"] +> +> * [**REST API**](quickstarts/try-v3-rest-api.md) +> * [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model) +> * [**Python SDK**](quickstarts/try-v3-python-sdk.md#prebuilt-model) +> * [**Java SDK**](quickstarts/try-v3-java-sdk.md#prebuilt-model) +> * [**JavaScript**](quickstarts/try-v3-javascript-sdk.md#prebuilt-model)
      diff --git a/articles/applied-ai-services/form-recognizer/containers/form-recognizer-container-configuration.md b/articles/applied-ai-services/form-recognizer/containers/form-recognizer-container-configuration.md index 02b3459bf1f8d..e802008f9572b 100644 --- a/articles/applied-ai-services/form-recognizer/containers/form-recognizer-container-configuration.md +++ b/articles/applied-ai-services/form-recognizer/containers/form-recognizer-container-configuration.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: how-to -ms.date: 03/25/2022 +ms.date: 06/06/2022 ms.author: lajanuar --- # Configure Form Recognizer containers @@ -16,7 +16,7 @@ ms.author: lajanuar > > Form Recognizer containers are in gated preview. To use them, you must submit an [online request](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUNlpBU1lFSjJUMFhKNzVHUUVLN1NIOEZETiQlQCN0PWcu), and have it approved. For more information, See [**Request approval to run container**](form-recognizer-container-install-run.md#request-approval-to-run-the-container). -With Azure Form Recognizer containers, you can build an application architecture that's optimized to take advantage of both robust cloud capabilities and edge locality. Containers provide a minimalist, isolated environment that can be easily deployed on-premise and in the cloud. In this article, you'll learn to configure the Form Recognizer container run-time environment by using the `docker compose` command arguments. Form Recognizer features are supported by six Form Recognizer feature containers—**Layout**, **Business Card**,**ID Document**, **Receipt**, **Invoice**, **Custom**. These containers have several required settings and a few optional settings. For a few examples, see the [Example docker-compose.yml file](#example-docker-composeyml-file) section. +With Azure Form Recognizer containers, you can build an application architecture that's optimized to take advantage of both robust cloud capabilities and edge locality. Containers provide a minimalist, isolated environment that can be easily deployed on-premise and in the cloud. In this article, you'll learn to configure the Form Recognizer container run-time environment by using the `docker compose` command arguments. Form Recognizer features are supported by six Form Recognizer feature containers—**Layout**, **Business Card**,**ID Document**, **Receipt**, **Invoice**, **Custom**. These containers have both required and optional settings. For a few examples, see the [Example docker-compose.yml file](#example-docker-composeyml-file) section. ## Configuration settings @@ -25,9 +25,9 @@ Each container has the following configuration settings: |Required|Setting|Purpose| |--|--|--| |Yes|[Key](#key-and-billing-configuration-setting)|Tracks billing information.| -|Yes|[Billing](#key-and-billing-configuration-setting)|Specifies the endpoint URI of the service resource on Azure. _See_ [Billing]](form-recognizer-container-install-run.md#billing), for more information. For more information and a complete list of regional endpoints, _see_ [Custom subdomain names for Cognitive Services](../../../cognitive-services/cognitive-services-custom-subdomains.md).| +|Yes|[Billing](#key-and-billing-configuration-setting)|Specifies the endpoint URI of the service resource on Azure. For more information, _see_ [Billing](form-recognizer-container-install-run.md#billing). For more information and a complete list of regional endpoints, _see_ [Custom subdomain names for Cognitive Services](../../../cognitive-services/cognitive-services-custom-subdomains.md).| |Yes|[Eula](#eula-setting)| Indicates that you've accepted the license for the container.| -|No|[ApplicationInsights](#applicationinsights-setting)|Enables adding [Azure Application Insights](/azure/application-insights) telemetry support to your container.| +|No|[ApplicationInsights](#applicationinsights-setting)|Enables adding [Azure Application Insights](/azure/application-insights) customer content support to your container.| |No|[Fluentd](#fluentd-settings)|Writes log and, optionally, metric data to a Fluentd server.| |No|HTTP Proxy|Configures an HTTP proxy for making outbound requests.| |No|[Logging](#logging-settings)|Provides ASP.NET Core logging support for your container. | diff --git a/articles/applied-ai-services/form-recognizer/create-a-form-recognizer-resource.md b/articles/applied-ai-services/form-recognizer/create-a-form-recognizer-resource.md index 4f6e9a07d1260..ca600f1a576bf 100644 --- a/articles/applied-ai-services/form-recognizer/create-a-form-recognizer-resource.md +++ b/articles/applied-ai-services/form-recognizer/create-a-form-recognizer-resource.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: how-to -ms.date: 01/06/2022 +ms.date: 06/06/2022 ms.author: bemabonsu recommendations: false #Customer intent: I want to learn how to use create a Form Recognizer service in the Azure portal. @@ -61,7 +61,7 @@ Let's get started: 1. Copy the key and endpoint values from your Form Recognizer resource paste them in a convenient location, such as *Microsoft Notepad*. You'll need the key and endpoint values to connect your application to the Form Recognizer API. -1. If your overview page does not have the keys and endpoint visible, you can select the **Keys and Endpoint** button on the left navigation bar and retrieve them there. +1. If your overview page doesn't have the keys and endpoint visible, you can select the **Keys and Endpoint** button on the left navigation bar and retrieve them there. :::image border="true" type="content" source="media/containers/keys-and-endpoint.png" alt-text="Still photo showing how to access resource key and endpoint URL"::: @@ -71,4 +71,9 @@ That's it! You're now ready to start automating data extraction using Azure Form * Try the [Form Recognizer Studio](concept-form-recognizer-studio.md), an online tool for visually exploring, understanding, and integrating features from the Form Recognizer service into your applications. -* Complete a Form Recognizer [C#](quickstarts/try-v3-csharp-sdk.md),[Python](quickstarts/try-v3-python-sdk.md), [Java](quickstarts/try-v3-java-sdk.md), or [JavaScript](quickstarts/try-v3-javascript-sdk.md) quickstart and get started creating a document processing app in the development language of your choice. +* Complete a Form Recognizer quickstart and get started creating a document processing app in the development language of your choice: + + * [C#](quickstarts/try-v3-csharp-sdk.md) + * [Python](quickstarts/try-v3-python-sdk.md) + * [Java](quickstarts/try-v3-java-sdk.md) + * [JavaScript](quickstarts/try-v3-javascript-sdk.md) \ No newline at end of file diff --git a/articles/applied-ai-services/form-recognizer/create-sas-tokens.md b/articles/applied-ai-services/form-recognizer/create-sas-tokens.md new file mode 100644 index 0000000000000..bd7961793a6b0 --- /dev/null +++ b/articles/applied-ai-services/form-recognizer/create-sas-tokens.md @@ -0,0 +1,181 @@ +--- +title: Create SAS tokens for containers and blobs with the Azure portal +description: Learn how to create shared access signature (SAS) tokens for containers using Azure portal, or Azure Explorer +ms.topic: how-to +author: laujan +manager: nitinme +ms.service: applied-ai-services +ms.subservice: forms-recognizer +ms.date: 05/27/2022 +ms.author: lajanuar +recommendations: false +--- + +# Create SAS tokens for storage containers + + In this article, you'll learn how to create user delegation, shared access signature (SAS) tokens, using the Azure portal or Azure Storage Explorer. User delegation SAS tokens are secured with Azure AD credentials. SAS tokens provide secure, delegated access to resources in your Azure storage account. + +At a high level, here's how SAS tokens work: + +* Your application submits the SAS token to Azure Storage as part of a REST API request. + +* If the storage service verifies that the SAS is valid, the request is authorized. + +* If the SAS token is deemed invalid, the request is declined and the error code 403 (Forbidden) is returned. + +Azure Blob Storage offers three resource types: + +* **Storage** accounts provide a unique namespace in Azure for your data. +* **Data storage containers** are located in storage accounts and organize sets of blobs. +* **Blobs** are located in containers and store text and binary data such as files, text, and images. + +## When to use a SAS token + +* **Training custom models**. Your assembled set of training documents *must* be uploaded to an Azure Blob Storage container. You can opt to use a SAS token to grant access to your training documents. + +* **Using storage containers with public access**. You can opt to use a SAS token to grant limited access to your storage resources that have public read access. + + > [!IMPORTANT] + > + > * If your Azure storage account is protected by a virtual network or firewall, you can't grant access with a SAS token. You'll have to use a [managed identity](managed-identities.md) to grant access to your storage resource. + > + > * [Managed identity](managed-identities-secured-access.md) supports both privately and publicly accessible Azure Blob Storage accounts. + > + > * SAS tokens grant permissions to storage resources, and should be protected in the same manner as an account key. + > + > * Operations that use SAS tokens should be performed only over an HTTPS connection, and SAS URIs should only be distributed on a secure connection such as HTTPS. + +## Prerequisites + +To get started, you'll need: + +* An active [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [create a free account](https://azure.microsoft.com/free/). + +* A [Form Recognizer](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) or [Cognitive Services multi-service](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource. + +* A **standard performance** [Azure Blob Storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You'll create containers to store and organize your blob data within your storage account. If you don't know how to create an Azure storage account with a storage container, follow these quickstarts: + + * [Create a storage account](../../storage/common/storage-account-create.md). When you create your storage account, select **Standard** performance in the **Instance details** > **Performance** field. + * [Create a container](../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). When you create your container, set **Public access level** to **Container** (anonymous read access for containers and blobs) in the **New Container** window. + +## Upload your documents + +1. Go to the [Azure portal](https://portal.azure.com/#home). + * Select **Your storage account** → **Data storage** → **Containers**. + + :::image type="content" source="media/sas-tokens/data-storage-menu.png" alt-text="Screenshot that shows the Data storage menu in the Azure portal."::: + +1. Select a container from the list. + +1. Select **Upload** from the menu at the top of the page. + + :::image type="content" source="media/sas-tokens/container-upload-button.png" alt-text="Screenshot that shows the container Upload button in the Azure portal."::: + +1. The **Upload blob** window will appear. Select your files to upload. + + :::image type="content" source="media/sas-tokens/upload-blob-window.png" alt-text="Screenshot that shows the Upload blob window in the Azure portal."::: + + > [!NOTE] + > By default, the REST API uses form documents located at the root of your container. You can also use data organized in subfolders if specified in the API call. For more information, see [Organize your data in subfolders](./build-training-data-set.md#organize-your-data-in-subfolders-optional). + +## Use the Azure portal + +The Azure portal is a web-based console that enables you to manage your Azure subscription and resources using a graphical user interface (GUI). + +1. Go to the [Azure portal](https://portal.azure.com/#home) and navigate as follows: + + * **Your storage account** → **containers** → **your container**. + +1. Select **Generate SAS** from the menu near the top of the page. + +1. Select **Signing method** → **User delegation key**. + +1. Define **Permissions** by selecting or clearing the appropriate checkbox.
      + + * Make sure the **Read**, **Write**, **Delete**, and **List** permissions are selected. + + :::image type="content" source="media/sas-tokens/sas-permissions.png" alt-text="Screenshot that shows the SAS permission fields in the Azure portal."::: + + >[!IMPORTANT] + > + > * If you receive a message similar to the following one, you'll also need to assign access to the blob data in your storage account: + > + > :::image type="content" source="media/sas-tokens/need-permissions.png" alt-text="Screenshot that shows the lack of permissions warning."::: + > + > * [Azure role-based access control](../../role-based-access-control/overview.md) (Azure RBAC) is the authorization system used to manage access to Azure resources. Azure RBAC helps you manage access and permissions for your Azure resources. + > * [Assign an Azure role for access to blob data](../../role-based-access-control/role-assignments-portal.md?tabs=current) to assign a role that allows for read, write, and delete permissions for your Azure storage container. *See* [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor). + +1. Specify the signed key **Start** and **Expiry** times. + + * When you create a SAS token, the default duration is 48 hours. After 48 hours, you'll need to create a new token. + * Consider setting a longer duration period for the time you'll be using your storage account for Form Recognizer Service operations. + * The value for the expiry time is a maximum of seven days from the creation of the SAS token. + +1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. + +1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the SAS token. The default value is HTTPS. + +1. Select **Generate SAS token and URL**. + +1. The **Blob SAS token** query string and **Blob SAS URL** appear in the lower area of the window. To use the Blob SAS token, append it to a storage service URI. + +1. Copy and paste the **Blob SAS token** and **Blob SAS URL** values in a secure location. They're displayed only once and can't be retrieved after the window is closed. + +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. + +## Use Azure Storage Explorer + +Azure Storage Explorer is a free standalone app that enables you to easily manage your Azure cloud storage resources from your desktop. + +### Get started + +* You'll need the [**Azure Storage Explorer**](../../vs-azure-tools-storage-manage-with-storage-explorer.md) app installed in your Windows, macOS, or Linux development environment. + +* After the Azure Storage Explorer app is installed, [connect it the storage account](../../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#connect-to-a-storage-account-or-service) you're using for Form Recognizer. + +### Create your SAS tokens + +1. Open the Azure Storage Explorer app on your local machine and navigate to your connected **Storage Accounts**. +1. Expand the Storage Accounts node and select **Blob Containers**. +1. Expand the Blob Containers node and right-click a storage **container** node to display the options menu. +1. Select **Get Shared Access Signature** from options menu. +1. In the **Shared Access Signature** window, make the following selections: + * Select your **Access policy** (the default is none). + * Specify the signed key **Start** and **Expiry** date and time. A short lifespan is recommended because, once generated, a SAS can't be revoked. + * Select the **Time zone** for the Start and Expiry date and time (default is Local). + * Define your container **Permissions** by selecting the **Read**, **Write**, **List**, and **Delete** checkboxes. + * Select **key1** or **key2**. + * Review and select **Create**. + +1. A new window will appear with the **Container** name, **SAS URL**, and **Query string** for your container. + +1. **Copy and paste the SAS URL and query string values in a secure location. They'll only be displayed once and can't be retrieved once the window is closed.** + +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. + +## Use your SAS URL to grant access + +The SAS URL includes a special set of [query parameters](/rest/api/storageservices/create-user-delegation-sas#assign-permissions-with-rbac). Those parameters indicate how the resources may be accessed by the client. + +### REST API + +To use your SAS URL with the [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/TrainCustomModelAsync), add the SAS URL to the request body: + + ```json + { + "source":"" + } + ``` + +### Sample Labeling Tool + +To use your SAS URL with the [Form Recognizer labeling tool](https://fott-2-1.azurewebsites.net/connections/create), add the SAS URL to the **Connection Settings** → **Azure blob container** → **SAS URI** field: + + :::image type="content" source="media/sas-tokens/fott-add-sas-uri.png" alt-text="Screenshot that shows the SAS URI field."::: + +That's it! You've learned how to create SAS tokens to authorize how clients access your data. + +## Next step + +> [!div class="nextstepaction"] +> [Build a training data set](build-training-data-set.md) diff --git a/articles/applied-ai-services/form-recognizer/encrypt-data-at-rest.md b/articles/applied-ai-services/form-recognizer/encrypt-data-at-rest.md index 69a919d05372f..a082921dd0012 100644 --- a/articles/applied-ai-services/form-recognizer/encrypt-data-at-rest.md +++ b/articles/applied-ai-services/form-recognizer/encrypt-data-at-rest.md @@ -9,7 +9,7 @@ ms.subservice: forms-recognizer ms.topic: conceptual ms.date: 08/28/2020 ms.author: egeaney -#Customer intent: As a user of the Form Recognizer service, I want to learn how encryption at rest works. +ms.custom: applied-ai-non-critical-form --- # Form Recognizer encryption of data at rest @@ -26,4 +26,4 @@ Azure Form Recognizer automatically encrypts your data when persisting it to the ## Next steps * [Form Recognizer Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk) -* [Learn more about Azure Key Vault](../../key-vault/general/overview.md) \ No newline at end of file +* [Learn more about Azure Key Vault](../../key-vault/general/overview.md) diff --git a/articles/applied-ai-services/form-recognizer/faq.yml b/articles/applied-ai-services/form-recognizer/faq.yml index 907c43422345b..e847467a9ffb1 100644 --- a/articles/applied-ai-services/form-recognizer/faq.yml +++ b/articles/applied-ai-services/form-recognizer/faq.yml @@ -7,7 +7,7 @@ metadata: ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: faq - ms.date: 05/23/2022 + ms.date: 06/06/2022 ms.author: lajanuar title: Form Recognizer frequently asked questions @@ -72,7 +72,7 @@ sections: Azure Form Recognizer is a cloud-based Azure Applied AI Service that is built using optical character recognition (OCR), Text Analytics, and Custom Text from Azure Cognitive Services. - OCR is used to extract text from printed and handwritten documents. + OCR is used to extract typeface and handwritten text documents. Form Recognizer uses OCR to detect and extract information from forms and documents supported by AI to provide more structure and information to the text extraction. @@ -100,7 +100,7 @@ sections: - For signature and region labeling, don't include the surrounding text. - See [[Interpret and improve accuracy and confidence scores](concept-accuracy-confidence.md#ensure-high-model-accuracy)] + See [Interpret and improve accuracy and confidence scores](concept-accuracy-confidence.md#ensure-high-model-accuracy) - question: | What is the confidence score and how is it calculated? @@ -154,7 +154,7 @@ sections: Form Recognizer offers the latest development options within the following platforms: - - [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument) + - [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) - [Form Recognizer Studio](https://formrecognizer.appliedai.azure.com/studio) @@ -173,11 +173,11 @@ sections: This table provides links to the latest SDK versions and shows the relationship between supported Form Recognizer SDK and API versions: | Supported Language | Azure SDK client-library|API reference |Supported API version| - | ----------- | --------|--------|:---------------------: | - | C#/.NET| [4.0.0-beta.3](https://azuresdkdocs.blob.core.windows.net/$web/dotnet/Azure.AI.FormRecognizer/4.0.0-beta.3/index.html)|[.NET SDK](/dotnet/api/azure.ai.formrecognizer.documentanalysis?view=azure-dotnet-preview&preserve-view=true) |2022-01-30-preview, 2021-09-30-preview, v2.1, v2.0 | - | Java | [4.0.0-beta.4](https://azuresdkdocs.blob.core.windows.net/$web/java/azure-ai-formrecognizer/4.0.0-beta.4/index.html)|[Java SDK](/java/api/overview/azure/ai-formrecognizer-readme?view=azure-java-preview&preserve-view=true)|2022-01-30-preview, 2021-09-30-preview, v2.1, v2.0 | - | JavaScript| [4.0.0-beta.3](https://azuresdkdocs.blob.core.windows.net/$web/javascript/azure-ai-form-recognizer/4.0.0-beta.3/index.html)|[JavaScript SDK](/javascript/api/@azure/ai-form-recognizer/?view=azure-node-preview&preserve-view=true) |2022-01-30-preview, 2021-09-30-preview, v2.1, v2.0 | - | Python | [3.2.0b3](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-formrecognizer/3.2.0b3/index.html) |[Python SDK](/python/api/azure-ai-formrecognizer/azure.ai.formrecognizer?view=azure-python-preview&preserve-view=true) |2022-01-30-preview, 2021-09-30-preview, v2.1, v2.0 | + | ----- | -----|-----|-----| + | C#/.NET| [4.0.0-beta.3](https://azuresdkdocs.blob.core.windows.net/$web/dotnet/Azure.AI.FormRecognizer/4.0.0-beta.3/index.html)|[.NET SDK](/dotnet/api/azure.ai.formrecognizer.documentanalysis?view=azure-dotnet-preview&preserve-view=true) |2022-06-30, 2022-01-30, 2021-09-30-preview, v2.1, v2.0 | + | Java | [4.0.0-beta.4](https://azuresdkdocs.blob.core.windows.net/$web/java/azure-ai-formrecognizer/4.0.0-beta.4/index.html)|[Java SDK](/java/api/overview/azure/ai-formrecognizer-readme?view=azure-java-preview&preserve-view=true)| 2022-06-30, 2022-01-30, 2021-09-30-preview, v2.1, v2.0 | + | JavaScript | [4.0.0-beta.3](https://azuresdkdocs.blob.core.windows.net/$web/javascript/azure-ai-form-recognizer/4.0.0-beta.3/index.html)|[JavaScript SDK](/javascript/api/@azure/ai-form-recognizer/?view=azure-node-preview&preserve-view=true) |2022-06-30, 2022-01-30, 2021-09-30-preview, v2.1, v2.0| + | Python | [3.2.0b3](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-formrecognizer/3.2.0b3/index.html) |[Python SDK](/python/api/azure-ai-formrecognizer/azure.ai.formrecognizer?view=azure-python-preview&preserve-view=true) |2022-06-30, 2022-01-30, 2021-09-30-preview, v2.1, v2.0| - question: | What is the difference between Form Recognizer v3.0 and v2.1 and how do I migrate to the latest version? @@ -209,10 +209,19 @@ sections: To ensure the best results, see [input requirements](concept-model-overview.md#input-requirements). - question: | - How can I specify a specific range of pages to be analyzed in a document? + How can I specify a specific range of pages to be analyzed in a document? answer: | - There's a parameter `pages` supported in both v2.1 and v3.0 REST API that you can specify for multi-page PDF and TIFF documents. Accepted input includes single pages (for example,'1, 2' -> pages 1 and 2 will be processed), finite (for example '2-5' -> pages 2 to 5 will be processed) and open-ended ranges (for example '5-' -> all the pages from page 5 will be processed & for example, '-10' -> pages 1 to 10 will be processed). These parameters can be mixed together and ranges are allowed to overlap (for example, '-5, 1, 3, 5-10' - pages 1 to 10 will be processed). The service will accept the request if it can process at least one page of the document (for example, using '5-100' on a five page document is a valid input where page 5 will be processed). If no page range is provided, the entire document will be processed. + - The parameter `pages`(supported in both v2.1 and v3.0 REST API) enables you to specify pages for multi-page PDF and TIFF documents. Accepted input includes the following ranges: + + - Single pages (for example,'1, 2' -> pages 1 and 2 will be processed).- Finite (for example '2-5' -> pages 2 to 5 will be processed) + - Open-ended ranges (for example '5-' -> all the pages from page 5 will be processed & for example, '-10' -> pages 1 to 10 will be processed). + + - These parameters can be mixed together and ranges are allowed to overlap (for example, '-5, 1, 3, 5-10' - pages 1 to 10 will be processed). + + - The service will accept the request if it can process at least one page of the document. For example, using '5-100' on a five page document is a valid input where page 5 will be processed. + + - If no page range is provided, the entire document will be processed. - question: | Both Form Recognizer Studio and the FOTT sample labeling tool are available. Which one should I use? @@ -234,6 +243,12 @@ sections: - When you submit a document for analysis, all pages are analyzed unless you specify a page range with the `pages` parameter in your request. + - When analyzing Microsoft Word and HTML files with the new Read OCR model, pages are counted in blocks of 3,000 characters each. For example, if your document contains 7,000 characters, it will be counted as three pages as two pages with 3,000 characters each and one page with 1,000 characters. + + - In addition, if your Microsoft Word, Excel, and PowerPoint documents have embedded images, each image will be analyzed for text extraction and counted as a page. Therefore, the total analyzed pages for Microsoft Office documents will be equal to the sum of total text pages and total images analyzed. In the previous example if the document contains 2 embedded images, the total page count in the service output will be 3 + 2 equaling five pages. + + - When analyzing Microsoft Excel and PowerPoint documents with the new Read OCR model, each worksheet and slide is counted as one page respectively. + - Training a custom model is always free with Form Recognizer. You’re only charged when a model is used to analyze a document. - Container pricing is the same as cloud service pricing. @@ -254,7 +269,7 @@ sections: - If you find that you’re being throttled on the number of POST requests, consider adding a delay between the requests. - Increase the workload gradually. Avoid sharp changes. - + - [Create a support request](service-limits.md#create-and-submit-support-request) to increase transactions per second(TPS) limit. Learn more about Form Recognizer [service quotas and limits](service-limits.md) @@ -263,7 +278,7 @@ sections: How long will it take to analyze a document? answer: | Form Recognizer is a multi-tenanted service where latency for similar documents is comparable but not always identical. The time to analyze a document depends on the size (for example, number of pages) and associated content on each page. - + Latency is the amount of time it takes for an API server to handle and process an incoming request and deliver the outgoing response to the client. Occasional variability in latency and performance is inherent in any micro-service-based, stateless, asynchronous service that processes images and large documents at scale. While we're continuously scaling up the hardware and capacity and scaling capabilities, you may still see latency issues at run time. - name: Custom models @@ -323,7 +338,7 @@ sections: If the number of models I want to compose exceeds the upper limit of composed model, what are the alternatives? answer: | You can classify the documents before calling the custom model or consider [Custom neural model](concept-custom-neural.md): - + - Use [Read model](concept-read.md) and build a classification based on the extracted text from the documents and certain phrases using code, regular expressions, search etc. - If you want to extract the same fields from various structured, semi-structured, and unstructured documents. Consider using the deep learning [custom neural model](concept-custom-neural.md). Learn more about the [differences between custom template model and custom neural model](concept-custom.md#compare-model-features). @@ -362,7 +377,7 @@ sections: - Do your tables span across multiple pages? If so, to avoid having to label all of the pages, split the PDF into pages prior to sending it to Form Recognizer. Following the analysis, post-process the pages to a single table. - If you’re creating custom models, refer to [Labeling as tables](quickstarts/try-v3-form-recognizer-studio.md#labeling-as-tables). Dynamic tables have a variable number of rows for each given column. Fixed tables have a constant number of rows for each given column. - + - question: | How can I move my trained models from one environment (like beta) to another (like production)? answer: | @@ -389,6 +404,18 @@ sections: - name: Form Recognizer Studio questions: + - question: | + What permissions do I need to access Form Recognizer Studio? + answer: | + + - You need an active [Azure account](https://azure.microsoft.com/free/cognitive-services/) and subscription with at least a **Reader** role to access Form Recognizer Studio. + + - For **document analysis and prebuilt models**, you need full access—**Contributor** role—to at least one [Form Recognizer](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) or [Cognitive Services multi-service](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource to enter the analyze page. Once you access the model analyze page, you can change the endpoint and key to access other resources, if needed. + + - For custom models, you can use the endpoint and key of a [Form Recognizer](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) or [Cognitive Services multi-service](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource to create a project, and access to at least one blob storage account. + + - For more information, *see* [Azure AD built-in roles](/azure/role-based-access-control/built-in-roles). + - question: | I have multiple pages in a document. Why are there only two pages analyzed in Form Recognizer Studio? answer: | @@ -400,7 +427,7 @@ sections: answer: | - In Form Recognizer Studio, you can select the top right gear button (Settings), under Directory, search and select the directory from the list and select on Switch Directory. You'll be prompted to sign in again after switching directory. - + - Switching subscriptions or resources can be done under Settings -> Resource tab. - name: Containers @@ -427,7 +454,7 @@ sections: Form Recognizer connected containers send billing information to Azure by using a Form Recognizer resource on your Azure account. Connected containers don't send customer data, such as the image or text that's being analyzed, to Microsoft. See the [Cognitive Services container FAQ](../../cognitive-services/containers/disconnected-container-faq.yml#how-does-billing-work) for an example of the information sent to Microsoft for billing. - question: | - I received an "OutOfQuota" error message: "Container isn't in a valid state. Subscription validation failed with status 'OutOfQuota'. Api Key is out of quota". + I received an "OutOfQuota" error message: "Container isn't in a valid state. Subscription validation failed with status 'OutOfQuota'. API key is out of quota". answer: | Form Recognizer connected containers send billing information to Azure by using a Form Recognizer resource on your Azure account. You could get this message if the containers can't communicate with the billing endpoint. diff --git a/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md b/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md deleted file mode 100644 index 63305541f6cdc..0000000000000 --- a/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: Generate SAS tokens for containers and blobs with the Azure portal -description: Learn how to generate shared access signature (SAS) tokens for containers and blobs in the Azure portal. -ms.topic: how-to -author: laujan -manager: nitinme -ms.service: applied-ai-services -ms.subservice: forms-recognizer -ms.date: 09/23/2021 -ms.author: lajanuar -recommendations: false ---- - -# Generate SAS tokens for storage containers - -In this article, you'll learn how to generate user delegation shared access signature (SAS) tokens for Azure Blob Storage containers. A user delegation SAS token is signed with Azure Active Directory (Azure AD) credentials instead of Azure Storage keys. It provides superior secure and delegated access to resources in your Azure storage account. - -At a high level, here's how it works: your application provides the SAS token to Azure Storage as part of a request. If the storage service verifies that the shared access signature is valid, the request is authorized. If the shared access signature is considered invalid, the request is declined with error code 403 (Forbidden). - -Azure Blob Storage offers three types of resources: - -* **Storage** accounts provide a unique namespace in Azure for your data. -* **Containers** are located in storage accounts and organize sets of blobs. -* **Blobs** are located in containers and store text and binary data. - -> [!NOTE] -> -> * If your Azure storage account is protected by a virtual network or firewall, you can't grant access by using a SAS token. You'll have to use a [managed identity](managed-identity-byos.md) to grant access to your storage resource. -> * [Managed identity](managed-identity-byos.md) supports both privately and publicly accessible Azure Blob Storage accounts. -> - -## When to use a shared access signature - -* If you're using storage containers with public access, you can opt to use a SAS token to grant limited access to your storage resources. -* When you're training a custom model, your assembled set of training documents *must* be uploaded to an Azure Blob Storage container. You can grant permission to your training resources with a user delegation SAS token. - -## Prerequisites - -To get started, you'll need: - -* An active [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [create a free account](https://azure.microsoft.com/free/). -* A [Form Recognizer](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) or [Cognitive Services multi-service](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource. -* A **standard performance** [Azure Blob Storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You'll create containers to store and organize your blob data within your storage account. If you don't know how to create an Azure storage account with a container, following these quickstarts: - - * [Create a storage account](../../storage/common/storage-account-create.md). When you create your storage account, select **Standard** performance in the **Instance details** > **Performance** field. - * [Create a container](../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). When you create your container, set **Public access level** to **Container** (anonymous read access for containers and blobs) in the **New Container** window. - -## Upload your documents - -1. Go to the [Azure portal](https://portal.azure.com/#home). Select **Your storage account** > **Data storage** > **Containers**. - - :::image type="content" source="media/sas-tokens/data-storage-menu.png" alt-text="Screenshot that shows the Data storage menu in the Azure portal."::: - -1. Select a container from the list. -1. Select **Upload** from the menu at the top of the page. - - :::image type="content" source="media/sas-tokens/container-upload-button.png" alt-text="Screenshot that shows the container Upload button in the Azure portal."::: - - The **Upload blob** window appears. -1. Select your files to upload. - - :::image type="content" source="media/sas-tokens/upload-blob-window.png" alt-text="Screenshot that shows the Upload blob window in the Azure portal."::: - -> [!NOTE] -> By default, the REST API uses form documents located at the root of your container. You can also use data organized in subfolders if specified in the API call. For more information, see [Organize your data in subfolders](./build-training-data-set.md#organize-your-data-in-subfolders-optional). - -## Create a shared access signature with the Azure portal - -> [!IMPORTANT] -> -> Generate and retrieve the shared access signature for your container, not for the storage account itself. - -1. In the [Azure portal](https://portal.azure.com/#home), select **Your storage account** > **Containers**. -1. Select a container from the list. -1. Go to the right of the main window, and select the three ellipses associated with your chosen container. -1. Select **Generate SAS** from the dropdown menu to open the **Generate SAS** window. - - :::image type="content" source="media/sas-tokens/generate-sas.png" alt-text="Screenshot that shows the Generate SAS token dropdown menu in the Azure portal."::: - -1. Select **Signing method** > **User delegation key**. - -1. Define **Permissions** by selecting or clearing the appropriate checkbox. Make sure the **Read**, **Write**, **Delete**, and **List** permissions are selected. - - :::image type="content" source="media/sas-tokens/sas-permissions.png" alt-text="Screenshot that shows the SAS permission fields in the Azure portal."::: - - >[!IMPORTANT] - > - > * If you receive a message similar to the following one, you'll need to assign access to the blob data in your storage account: - > - > :::image type="content" source="media/sas-tokens/need-permissions.png" alt-text="Screenshot that shows the lack of permissions warning."::: - > - > * [Azure role-based access control](../../role-based-access-control/overview.md) (Azure RBAC) is the authorization system used to manage access to Azure resources. Azure RBAC helps you manage access and permissions for your Azure resources. - > * [Assign an Azure role for access to blob data](../../role-based-access-control/role-assignments-portal.md?tabs=current) shows you how to assign a role that allows for read, write, and delete permissions for your Azure storage container. For example, see [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor). - -1. Specify the signed key **Start** and **Expiry** times. The value for the expiry time is a maximum of seven days from the start of the shared access signature. - -1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. - -1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the shared access signature. The default value is HTTPS. - -1. Select **Generate SAS token and URL**. - -1. The **Blob SAS token** query string and **Blob SAS URL** appear in the lower area of the window. To use the Blob SAS token, append it to a storage service URI. - -1. Copy and paste the **Blob SAS token** and **Blob SAS URL** values in a secure location. They're displayed only once and can't be retrieved after the window is closed. - -## Create a shared access signature with the Azure CLI - -1. To create a user delegation SAS for a container by using the Azure CLI, make sure that you've installed version 2.0.78 or later. To check your installed version, use the `az --version` command. - -1. Call the [az storage container generate-sas](/cli/azure/storage/container#az-storage-container-generate-sas) command. - -1. The following parameters are required: - - * `auth-mode login`. This parameter ensures that requests made to Azure Storage are authorized with your Azure AD credentials. - * `as-user`. This parameter indicates that the generated SAS is a user delegation SAS. - -1. Supported permissions for a user delegation SAS on a container include Add (a), Create (c), Delete (d), List (l), Read (r), and Write (w). Make sure **r**, **w**, **d**, and **l** are included as part of the permissions parameters. - -1. When you create a user delegation SAS with the Azure CLI, the maximum interval during which the user delegation key is valid is seven days from the start date. Specify an expiry time for the shared access signature that's within seven days of the start time. For more information, see [Create a user delegation SAS for a container or blob with the Azure CLI](../../storage/blobs/storage-blob-user-delegation-sas-create-cli.md#use-azure-ad-credentials-to-secure-a-sas). - -### Example - -Generate a user delegation SAS. Replace the placeholder values in the brackets with your own values: - -```azurecli-interactive -az storage container generate-sas \ - --account-name \ - --name \ - --permissions rwdl \ - --expiry \ - --auth-mode login \ - --as-user -``` - -## Use your Blob SAS URL - -Two options are available: - -* To use your Blob SAS URL with the [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/TrainCustomModelAsync), add the SAS URL to the request body: - - ```json - { - "source":"" - } - ``` - -* To use your Blob SAS URL with the [Form Recognizer labeling tool](https://fott-2-1.azurewebsites.net/connections/create), add the SAS URL to the **Connection Settings** > **Azure blob container** > **SAS URI** field: - - :::image type="content" source="media/sas-tokens/fott-add-sas-uri.png" alt-text="Screenshot that shows the SAS URI field."::: - -That's it. You've learned how to generate SAS tokens to authorize how clients access your data. - -## Next step - -> [!div class="nextstepaction"] -> [Build a training data set](build-training-data-set.md) \ No newline at end of file diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md index 45bfd24f0a69c..b895b3d935590 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with C#/.NET programming language" -description: Use the Form Recognizer prebuilt-read model and C# to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and C# to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md index c4982257af454..1ee5071670908 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with Java programming language" -description: Use the Form Recognizer prebuilt-read model and Java to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and Java to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md index 1dc3e5affe333..da8373668309c 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with JavaScript programming language" -description: Use the Form Recognizer prebuilt-read model and JavaScript to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and JavaScript to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md index afa500a5c985b..dd42ae66c6b67 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with Python programming language" -description: Use the Form Recognizer prebuilt-read model and Python to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and Python to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md index 521275a21c7fe..33519a3c0db0b 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with the Form Recognizer REST API" -description: Use the Form Recognizer prebuilt-read model and REST API to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and REST API to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services @@ -45,7 +45,7 @@ Before you run the following cURL command, make the following changes: 1. Replace `{key}` with the key value from your Form Recognizer instance in the Azure portal. ```bash -curl -v -i POST "{endpoint}/formrecognizer/documentModels/prebuilt-read:analyze?api-version=2022-01-30-preview" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {key}" --data-ascii "{'urlSource': 'https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/rest-api/read.png'}" +curl -v -i POST "{endpoint}/formrecognizer/documentModels/prebuilt-read:analyze?api-version=2022-06-30" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {key}" --data-ascii "{'urlSource': 'https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/rest-api/read.png'}" ``` #### Operation-Location @@ -56,14 +56,14 @@ You'll receive a `202 (Success)` response that includes an **Operation-Location* ### Get Request -After you've called the [**Analyze document**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument) API, call the [**Get analyze result**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/GetAnalyzeDocumentResult) API to get the status of the operation and the extracted data. Before you run the command, make these changes: +After you've called the [**Analyze document**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) API, call the [**Get analyze result**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/GetAnalyzeDocumentResult) API to get the status of the operation and the extracted data. Before you run the command, make these changes: 1. Replace `{endpoint}` with the endpoint value from your Form Recognizer instance in the Azure portal. 1. Replace `{key}` with the key value from your Form Recognizer instance in the Azure portal. 1. Replace `{resultID}` with the result ID from the [Operation-Location](#operation-location) header. ```bash -curl -v -X GET "{endpoint}/formrecognizer/documentModels/prebuilt-read/analyzeResults/{resultId}?api-version=2022-01-30-preview" -H "Ocp-Apim-Subscription-Key: {key}" +curl -v -X GET "{endpoint}/formrecognizer/documentModels/prebuilt-read/analyzeResults/{resultId}?api-version=2022-06-30" -H "Ocp-Apim-Subscription-Key: {key}" ``` ### Read Model Output @@ -76,7 +76,7 @@ You'll receive a `200 (Success)` response with JSON output. The first field, `"s "createdDateTime": "2022-04-08T00:36:48Z", "lastUpdatedDateTime": "2022-04-08T00:36:50Z", "analyzeResult": { - "apiVersion": "2022-01-30-preview", + "apiVersion": "2022-06-30", "modelId": "prebuilt-read", "stringIndexType": "textElements", "content": "While healthcare is still in the early stages of its Al journey, we\nare seeing...", diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md index fa0ba04185198..da08585658225 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md @@ -15,13 +15,13 @@ recommendations: false # Use the Read Model - In this how-to guide, you'll learn to use Azure Form Recognizer's [read model](../concept-read.md) to extract printed and handwritten text from documents. The read model can detect lines, words, locations, and languages. You can use a programming language of your choice or the REST API. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. + In this how-to guide, you'll learn to use Azure Form Recognizer's [read model](../concept-read.md) to extract typeface and handwritten text from documents. The read model can detect lines, words, locations, and languages. You can use a programming language of your choice or the REST API. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. The read model is the core of all the other Form Recognizer models. Layout, general document, custom, and prebuilt models all use the read model as a foundation for extracting texts from documents. >[!NOTE] > Form Recognizer v3.0 is currently in public preview. Some features may not be supported or have limited capabilities. -The current API version is ```2022-01-30-preview```. +The current API version is ```2022-06-30```. ::: zone pivot="programming-language-csharp" diff --git a/articles/applied-ai-services/form-recognizer/includes/get-started/csharp.md b/articles/applied-ai-services/form-recognizer/includes/get-started/csharp.md index 57b3269c17e43..6000f1afa249e 100644 --- a/articles/applied-ai-services/form-recognizer/includes/get-started/csharp.md +++ b/articles/applied-ai-services/form-recognizer/includes/get-started/csharp.md @@ -230,7 +230,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("InvoiceId", out invoiceIdField)) { if (invoiceIdField.Value.ValueType == FieldValueType.String) { string invoiceId = invoiceIdField.Value.AsString(); - Console.WriteLine($ " Invoice Id: '{invoiceId}', with confidence {invoiceIdField.Confidence}"); + Console.WriteLine($" Invoice Id: '{invoiceId}', with confidence {invoiceIdField.Confidence}"); } } @@ -238,7 +238,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("InvoiceDate", out invoiceDateField)) { if (invoiceDateField.Value.ValueType == FieldValueType.Date) { DateTime invoiceDate = invoiceDateField.Value.AsDate(); - Console.WriteLine($ " Invoice Date: '{invoiceDate}', with confidence {invoiceDateField.Confidence}"); + Console.WriteLine($" Invoice Date: '{invoiceDate}', with confidence {invoiceDateField.Confidence}"); } } @@ -246,7 +246,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("DueDate", out dueDateField)) { if (dueDateField.Value.ValueType == FieldValueType.Date) { DateTime dueDate = dueDateField.Value.AsDate(); - Console.WriteLine($ " Due Date: '{dueDate}', with confidence {dueDateField.Confidence}"); + Console.WriteLine($" Due Date: '{dueDate}', with confidence {dueDateField.Confidence}"); } } @@ -254,7 +254,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("VendorName", out vendorNameField)) { if (vendorNameField.Value.ValueType == FieldValueType.String) { string vendorName = vendorNameField.Value.AsString(); - Console.WriteLine($ " Vendor Name: '{vendorName}', with confidence {vendorNameField.Confidence}"); + Console.WriteLine($" Vendor Name: '{vendorName}', with confidence {vendorNameField.Confidence}"); } } @@ -262,7 +262,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("VendorAddress", out vendorAddressField)) { if (vendorAddressField.Value.ValueType == FieldValueType.String) { string vendorAddress = vendorAddressField.Value.AsString(); - Console.WriteLine($ " Vendor Address: '{vendorAddress}', with confidence {vendorAddressField.Confidence}"); + Console.WriteLine($" Vendor Address: '{vendorAddress}', with confidence {vendorAddressField.Confidence}"); } } @@ -270,7 +270,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("CustomerName", out customerNameField)) { if (customerNameField.Value.ValueType == FieldValueType.String) { string customerName = customerNameField.Value.AsString(); - Console.WriteLine($ " Customer Name: '{customerName}', with confidence {customerNameField.Confidence}"); + Console.WriteLine($" Customer Name: '{customerName}', with confidence {customerNameField.Confidence}"); } } @@ -278,7 +278,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("CustomerAddress", out customerAddressField)) { if (customerAddressField.Value.ValueType == FieldValueType.String) { string customerAddress = customerAddressField.Value.AsString(); - Console.WriteLine($ " Customer Address: '{customerAddress}', with confidence {customerAddressField.Confidence}"); + Console.WriteLine($" Customer Address: '{customerAddress}', with confidence {customerAddressField.Confidence}"); } } @@ -286,7 +286,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("CustomerAddressRecipient", out customerAddressRecipientField)) { if (customerAddressRecipientField.Value.ValueType == FieldValueType.String) { string customerAddressRecipient = customerAddressRecipientField.Value.AsString(); - Console.WriteLine($ " Customer address recipient: '{customerAddressRecipient}', with confidence {customerAddressRecipientField.Confidence}"); + Console.WriteLine($" Customer address recipient: '{customerAddressRecipient}', with confidence {customerAddressRecipientField.Confidence}"); } } @@ -294,7 +294,7 @@ FormRecognizerClient recognizerClient = AuthenticateClient(); if (invoice.Fields.TryGetValue("InvoiceTotal", out invoiceTotalField)) { if (invoiceTotalField.Value.ValueType == FieldValueType.Float) { float invoiceTotal = invoiceTotalField.Value.AsFloat(); - Console.WriteLine($ " Invoice Total: '{invoiceTotal}', with confidence {invoiceTotalField.Confidence}"); + Console.WriteLine($" Invoice Total: '{invoiceTotal}', with confidence {invoiceTotalField.Confidence}"); } } } @@ -318,4 +318,4 @@ Congratulations! In this quickstart, you used the Form Recognizer C# SDK to anal > [REST API v2.1 reference documentation](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/5ed8c9843c2794cbb1a96291) > [!div class="nextstepaction"] -> [Form Recognizer C#/.NET reference library](/dotnet/api/overview/azure/AI.FormRecognizer-readme) \ No newline at end of file +> [Form Recognizer C#/.NET reference library](/dotnet/api/overview/azure/AI.FormRecognizer-readme) diff --git a/articles/applied-ai-services/form-recognizer/index.yml b/articles/applied-ai-services/form-recognizer/index.yml index d2d0a400e4014..624d570266f4c 100644 --- a/articles/applied-ai-services/form-recognizer/index.yml +++ b/articles/applied-ai-services/form-recognizer/index.yml @@ -85,8 +85,8 @@ landingContent: url: v3-migration-guide.md - text: Use the read model url: how-to-guides/use-prebuilt-read.md - - text: Generate SAS tokens for Azure Blob containers - url: generate-sas-tokens.md + - text: Create SAS tokens for storage containers + url: create-sas-tokens.md - text: Build a custom model (v3.0) url: how-to-guides/build-custom-model-v3.md - text: Compose custom models (v3.0) diff --git a/articles/applied-ai-services/form-recognizer/language-support.md b/articles/applied-ai-services/form-recognizer/language-support.md index b6023261f9bd3..8e51520c1ea80 100644 --- a/articles/applied-ai-services/form-recognizer/language-support.md +++ b/articles/applied-ai-services/form-recognizer/language-support.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: overview -ms.date: 04/22/2022 +ms.date: 06/06/2022 ms.author: lajanuar --- @@ -159,12 +159,25 @@ Pre-Built Receipt and Business Cards support all English receipts and business c |English (India|`en-in`| |English (United States)| `en-us`| +## Business card model + +The **2022-06-30-preview** release includes Japanese language support: + +|Language| Locale code | +|:-----|:----:| +| Japanese | `ja` | + ## Invoice model Language| Locale code | |:-----|:----:| -|English (United States)|en-us| -|Spanish (preview) | es | +|English (United States) |en-US| +|Spanish| es| +|German (**2022-06-30-preview**)| de| +|French (**2022-06-30-preview**)| fr| +|Italian (**2022-06-30-preview**)|it| +|Portuguese (**2022-06-30-preview**)|pt| +|Dutch (**2022-06-30-preview**)| nl| ## ID documents diff --git a/articles/applied-ai-services/form-recognizer/media/quickstarts/form-recognizer-demo-preview3.gif b/articles/applied-ai-services/form-recognizer/media/quickstarts/form-recognizer-demo-preview3.gif new file mode 100644 index 0000000000000..24bd7f6eadddd Binary files /dev/null and b/articles/applied-ai-services/form-recognizer/media/quickstarts/form-recognizer-demo-preview3.gif differ diff --git a/articles/applied-ai-services/form-recognizer/media/quickstarts/form-recognizer-general-document-demo-preview3.gif b/articles/applied-ai-services/form-recognizer/media/quickstarts/form-recognizer-general-document-demo-preview3.gif new file mode 100644 index 0000000000000..1261894a2695a Binary files /dev/null and b/articles/applied-ai-services/form-recognizer/media/quickstarts/form-recognizer-general-document-demo-preview3.gif differ diff --git a/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-newspaper.png b/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-newspaper.png new file mode 100644 index 0000000000000..f51cf591f7d80 Binary files /dev/null and b/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-newspaper.png differ diff --git a/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-pod-2.png b/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-pod-2.png new file mode 100644 index 0000000000000..42e784cb6c4de Binary files /dev/null and b/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-pod-2.png differ diff --git a/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-pod.png b/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-pod.png new file mode 100644 index 0000000000000..23f6bb5355131 Binary files /dev/null and b/articles/applied-ai-services/form-recognizer/media/studio/form-recognizer-studio-layout-pod.png differ diff --git a/articles/applied-ai-services/form-recognizer/overview.md b/articles/applied-ai-services/form-recognizer/overview.md index 2676c00642751..63aa94200a5a9 100644 --- a/articles/applied-ai-services/form-recognizer/overview.md +++ b/articles/applied-ai-services/form-recognizer/overview.md @@ -7,12 +7,11 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: overview -ms.date: 03/08/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false keywords: automated data processing, document processing, automated data entry, forms processing #Customer intent: As a developer of form-processing software, I want to learn what the Form Recognizer service does so I can determine if I should use it. -ms.custom: ignite-fall-2021 --- @@ -25,8 +24,8 @@ Form Recognizer uses the following models to easily identify, extract, and analy **Document analysis models** -* [**Read model**](concept-read.md) | Extract printed and handwritten text lines, words, locations, and detected languages from documents and images. -* [**Layout model**](concept-layout.md) | Extract text, tables, selection marks, and structure information from documents (PDF and TIFF) and images (JPG, PNG, and BMP). +* [**Read model**](concept-read.md) | Extract text lines, words, locations, and detected languages from documents and images. +* [**Layout model**](concept-layout.md) | Extract text, tables, selection marks, and structure information from documents and images. * [**General document model**](concept-general-document.md) | Extract key-value pairs, selection marks, and entities from documents. **Prebuilt models** @@ -49,8 +48,9 @@ This section helps you decide which Form Recognizer v3.0 supported feature you s | What type of document do you want to analyze?| How is the document formatted? | Your best solution | | -----------------|-------------------| ----------| |
      • **W-2 Form**
      • | Is your W-2 document composed in United States English (en-US) text?|
        • If **Yes**, use the [**W-2 Form**](concept-w2.md) model.
        • If **No**, use the [**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md) model.
        | -|
        • **Text-only document**
        • | Is your text-only document _printed_ in a [supported language](language-support.md#read-layout-and-custom-form-template-model) or, if handwritten, is it composed in English?|
          • If **Yes**, use the [**Read**](concept-invoice.md) model.
          • If **No**, use the [**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md) model.
          -|
          • **Invoice**
          • | Is your invoice document composed in English or Spanish text?|
            • If **Yes**, use the [**Invoice**](concept-invoice.md) model.
            • If **No**, use the [**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md) model.
            +|
            • **Primarily text content**
            • | Is your document _printed_ in a [supported language](language-support.md#read-layout-and-custom-form-template-model) and are you only interested in text and not tables, selection marks, and the structure?|
              • If **Yes** to text-only extraction, use the [**Read**](concept-read.md) model.
              • If **No**, because you also need structure information, use the [**Layout**](concept-layout.md) model.
              +|
              • **General structured document**
              • | Is your document mostly structured and does it contain a few fields and values that may not be covered by the other prebuilt models?|
                • If **Yes**, use the [**General document (preview)**](concept-general-document.md) model.
                • If **No**, because the fields and values are complex and highly variable, train and build a [**Custom**](how-to-guides/build-custom-model-v3.md) model.
                +|
                • **Invoice**
                • | Is your invoice document composed in a [supported language](language-support.md#invoice-model) text?|
                  • If **Yes**, use the [**Invoice**](concept-invoice.md) model.
                  • If **No**, use the [**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md) model.
                  |
                  • **Receipt**
                  • **Business card**
                  | Is your receipt or business card document composed in English text? |
                  • If **Yes**, use the [**Receipt**](concept-receipt.md) or [**Business Card**](concept-business-card.md) model.
                  • If **No**, use the [**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md) model.
                  | |
                  • **ID document**
                  | Is your ID document a US driver's license or an international passport?|
                  • If **Yes**, use the [**ID document**](concept-id-document.md) model.
                  • If **No**, use the[**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md) model
                  | |
                  • **Form** or **Document**
                  | Is your form or document an industry-standard format commonly used in your business or industry?|
                  • If **Yes**, use the [**Layout**](concept-layout.md) or [**General document (preview)**](concept-general-document.md).
                  • If **No**, you can [**Train and build a custom model**](quickstarts/try-sample-label-tool.md#train-a-custom-form-model). @@ -68,7 +68,7 @@ The following features and development options are supported by the Form Recogn |[🆕 **General document model**](concept-general-document.md)|Extract text, tables, structure, key-value pairs and, named entities.|
                    • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/document)
                    • [**REST API**](quickstarts/try-v3-rest-api.md#reference-table)
                    • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#general-document-model)
                    • [**Python SDK**](quickstarts/try-v3-python-sdk.md#general-document-model)
                    • [**Java SDK**](quickstarts/try-v3-java-sdk.md#general-document-model)
                    • [**JavaScript**](quickstarts/try-v3-javascript-sdk.md#general-document-model)
                    | |[**Layout model**](concept-layout.md) | Extract text, selection marks, and tables structures, along with their bounding box coordinates, from forms and documents.

                    Layout API has been updated to a prebuilt model. |
                    • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/layout)
                    • [**REST API**](quickstarts/try-v3-rest-api.md#reference-table)
                    • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#layout-model)
                    • [**Python SDK**](quickstarts/try-v3-python-sdk.md#layout-model)
                    • [**Java SDK**](quickstarts/try-v3-java-sdk.md#layout-model)
                    • [**JavaScript**](quickstarts/try-v3-javascript-sdk.md#layout-model)
                    | |[**Custom model (updated)**](concept-custom.md) | Extraction and analysis of data from forms and documents specific to distinct business data and use cases.
                    • Custom model API v3.0 supports **signature detection for custom template (custom form) models**.

                    • Custom model API v3.0 offers a new model type **Custom Neural** or custom document to analyze unstructured documents.
                    | [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/custommodel/projects)
                  • [**REST API**](quickstarts/try-v3-rest-api.md)
                  • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md)
                  • [**Python SDK**](quickstarts/try-v3-python-sdk.md)
                  • [**Java SDK**](quickstarts/try-v3-java-sdk.md)
                  • [**JavaScript**](quickstarts/try-v3-javascript-sdk.md)
                  | -|[**Invoice model**](concept-invoice.md) | Automated data processing and extraction of key information from sales invoices. |
                  • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=invoice)
                  • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument)
                  • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model)
                  • [**Python SDK**](quickstarts/try-v3-python-sdk.md#prebuilt-model)
                  | +|[**Invoice model**](concept-invoice.md) | Automated data processing and extraction of key information from sales invoices. |
                  • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=invoice)
                  • [**REST API**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument)
                  • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model)
                  • [**Python SDK**](quickstarts/try-v3-python-sdk.md#prebuilt-model)
                  | |[**Receipt model (updated)**](concept-receipt.md) | Automated data processing and extraction of key information from sales receipts.

                  Receipt model v3.0 supports processing of **single-page hotel receipts**.|
                  • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=receipt)
                  • [**REST API**](quickstarts/try-v3-rest-api.md)
                  • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model)
                  • [**Python SDK**](quickstarts/try-v3-python-sdk.md#prebuilt-model)
                  • [**Java SDK**](quickstarts/try-v3-java-sdk.md#prebuilt-model)
                  • [**JavaScript**](quickstarts/try-v3-javascript-sdk.md#prebuilt-model)
                  | |[**ID document model (updated)**](concept-id-document.md) |Automated data processing and extraction of key information from US driver's licenses and international passports.

                  Prebuilt ID document API supports the **extraction of endorsements, restrictions, and vehicle classifications from US driver's licenses**. |
                  • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=idDocument)
                  • [**REST API**](quickstarts/try-v3-rest-api.md)
                  • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model)
                  • [**Python SDK**](quickstarts/try-v3-python-sdk.md#prebuilt-model)
                  • [**Java SDK**](quickstarts/try-v3-java-sdk.md#prebuilt-model)
                  • [**JavaScript**](quickstarts/try-v3-javascript-sdk.md#prebuilt-model)
                  | |[**Business card model**](concept-business-card.md) |Automated data processing and extraction of key information from business cards.|
                  • [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=businessCard)
                  • [**REST API**](quickstarts/try-v3-rest-api.md)
                  • [**C# SDK**](quickstarts/try-v3-csharp-sdk.md#prebuilt-model)
                  • [**Python SDK**](quickstarts/try-v3-python-sdk.md#prebuilt-model)
                  • [**Java SDK**](quickstarts/try-v3-java-sdk.md#prebuilt-model)
                  • [**JavaScript**](quickstarts/try-v3-javascript-sdk.md#prebuilt-model)
                  | @@ -108,7 +108,7 @@ This documentation contains the following article types: > [!div class="checklist"] > > * Try our [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com) -> * Explore the [**REST API reference documentation**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument) to learn more. +> * Explore the [**REST API reference documentation**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) to learn more. > * If you're familiar with a previous version of the API, see the [**What's new**](./whats-new.md) article to learn of recent changes. ### [Form Recognizer v2.1](#tab/v2-1) diff --git a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-form-recognizer-studio.md b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-form-recognizer-studio.md index d03f0343268fe..e52e77568744c 100644 --- a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-form-recognizer-studio.md +++ b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-form-recognizer-studio.md @@ -19,7 +19,7 @@ ms.custom: ignite-fall-2021, mode-ui [Form Recognizer Studio preview](https://formrecognizer.appliedai.azure.com/) is an online tool for visually exploring, understanding, and integrating features from the Form Recognizer service in your applications. Get started with exploring the pre-trained models with sample documents or your own. Create projects to build custom template models and reference the models in your applications using the [Python SDK preview](try-v3-python-sdk.md) and other quickstarts. -:::image border="true" type="content" source="../media/quickstarts/form-recognizer-demo-v3p2.gif" alt-text="Form Recognizer Studio demo"::: +:::image border="true" type="content" source="../media/quickstarts/form-recognizer-demo-preview3.gif" alt-text="Selecting the Layout API to analyze a newspaper document in the Form Recognizer Studio."::: ## Prerequisites for new users @@ -39,23 +39,25 @@ Prebuilt models help you add Form Recognizer features to your apps without havin * [**ID document**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=idDocument): extract text and key information from driver licenses and international passports. * [**Business card**](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=businessCard): extract text and key information from business cards. -After you've completed the prerequisites, navigate to the [Form Recognizer Studio General Documents preview](https://formrecognizer.appliedai.azure.com). In the following example, we use the General Documents feature. The steps to use other pre-trained features like [W2 tax form](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=tax.us.w2), [Read](https://formrecognizer.appliedai.azure.com/studio/read), [Layout](https://formrecognizer.appliedai.azure.com/studio/layout), [Invoice](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=invoice), [Receipt](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=receipt), [Business card](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=businessCard), and [ID documents](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=idDocument) models are similar. +After you've completed the prerequisites, navigate to the [Form Recognizer Studio General Documents preview](https://formrecognizer.appliedai.azure.com). + +In the following example, we use the General Documents feature. The steps to use other pre-trained features like [W2 tax form](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=tax.us.w2), [Read](https://formrecognizer.appliedai.azure.com/studio/read), [Layout](https://formrecognizer.appliedai.azure.com/studio/layout), [Invoice](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=invoice), [Receipt](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=receipt), [Business card](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=businessCard), and [ID documents](https://formrecognizer.appliedai.azure.com/studio/prebuilt?formType=idDocument) models are similar. + + :::image border="true" type="content" source="../media/quickstarts/form-recognizer-general-document-demo-preview3.gif" alt-text="Selecting the General Document API to analysis a document in the Form Recognizer Studio."::: 1. Select a Form Recognizer service feature from the Studio home page. -1. This is a one-time step unless you've already selected the service resource from prior use. Select your Azure subscription, resource group, and resource. (You can change the resources anytime in "Settings" in the top menu.) Review and confirm your selections. +1. This step is a one-time process unless you've already selected the service resource from prior use. Select your Azure subscription, resource group, and resource. (You can change the resources anytime in "Settings" in the top menu.) Review and confirm your selections. 1. Select the Analyze command to run analysis on the sample document or try your document by using the Add command. -1. Observe the highlighted extracted content in the document view. Hover your move over the keys and values to see details. - 1. Use the controls at the bottom of the screen to zoom in and out and rotate the document view. -1. Show and hide the text, tables, and selection marks layers to focus on each one of them at a time. +1. Observe the highlighted extracted content in the document view. Hover your move over the keys and values to see details. -1. In the output section's Result tab, browse the JSON output to understand the service response format. Copy and download to jumpstart integration. +1. In the output section's Result tab, browse the JSON output to understand the service response format. -:::image border="true" type="content" source="../media/quickstarts/layout-get-started-v2.gif" alt-text="Form Recognizer Layout example"::: +1. In the Code tab, browse the sample code for integration. Copy and download to get started. ## Additional prerequisites for custom projects @@ -70,9 +72,9 @@ A **standard performance** [**Azure Blob Storage account**](https://portal.azure ### Configure CORS -[CORS (Cross Origin Resource Sharing)](/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) needs to be configured on your Azure storage account for it to be accessible from the Form Recognizer Studio. To configure CORS in the Azure portal, you'll need access to the CORS blade of your storage account. +[CORS (Cross Origin Resource Sharing)](/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) needs to be configured on your Azure storage account for it to be accessible from the Form Recognizer Studio. To configure CORS in the Azure portal, you'll need access to the CORS tab of your storage account. -1. Select the CORS blade for the storage account. +1. Select the CORS tab for the storage account. :::image type="content" source="../media/quickstarts/cors-setting-menu.png" alt-text="Screenshot of the CORS setting menu in the Azure portal."::: diff --git a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-javascript-sdk.md b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-javascript-sdk.md index 2a4395d504c34..d80581f3767e1 100644 --- a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-javascript-sdk.md +++ b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-javascript-sdk.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: quickstart -ms.date: 03/16/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false ms.custom: ignite-fall-2021, mode-api @@ -20,17 +20,17 @@ ms.custom: ignite-fall-2021, mode-api [Reference documentation](/javascript/api/@azure/ai-form-recognizer/?view=azure-node-preview&preserve-view=true) | [Library source code](https://github.com/Azure/azure-sdk-for-js/tree/@azure/ai-form-recognizer_4.0.0-beta.3/sdk/formrecognizer/ai-form-recognizer/) | [Package (npm)](https://www.npmjs.com/package/@azure/ai-form-recognizer/v/4.0.0-beta.3) | [Samples](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples/v4-beta/javascript/README.md) -Get started with Azure Form Recognizer using the JavaScript programming language. Azure Form Recognizer is a cloud-based Azure Applied AI Service that uses machine learning to extract key-value pairs, text, and tables from your documents. You can easily call Form Recognizer models by integrating our client library SDks into your workflows and applications. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. +Get started with Azure Form Recognizer using the JavaScript programming language. Azure Form Recognizer is a cloud-based Azure Applied AI Service that uses machine learning to extract key-value pairs, text, and tables from your documents. You can easily call Form Recognizer models by integrating our client library SDKs into your workflows and applications. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. To learn more about Form Recognizer features and development options, visit our [Overview](../overview.md#form-recognizer-features-and-development-options) page. In this quickstart you'll use following features to analyze and extract data and values from forms and documents: -* [🆕 **General document**](#general-document-model)—Analyze and extract common fields from specific document types using a pre-trained invoice model. +* [🆕 **General document**](#general-document-model)—Analyze and extract key-value pairs, selection marks, and entities from documents. * [**Layout**](#layout-model)—Analyze and extract tables, lines, words, and selection marks like radio buttons and check boxes in forms documents, without the need to train a model. -* [**Prebuilt Invoice**](#prebuilt-model)—Analyze and extract common fields from specific document types using a pre-trained model. +* [**Prebuilt Invoice**](#prebuilt-model)—Analyze and extract common fields from specific document types using a pre-trained invoice model. ## Prerequisites @@ -121,9 +121,9 @@ Extract text, tables, structure, key-value pairs, and named entities from docume const { AzureKeyCredential, DocumentAnalysisClient } = require("@azure/ai-form-recognizer"); - // set `` and `` variables with the values from the Azure portal - const key = ""; - const endpoint = ""; + // set `` and `` variables with the values from the Azure portal. + const key = ""; + const endpoint = ""; // sample document const formUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/sample-layout.pdf" @@ -231,9 +231,9 @@ Extract text, selection marks, text styles, table structures, and bounding regio const { AzureKeyCredential, DocumentAnalysisClient } = require("@azure/ai-form-recognizer"); - // set `` and `` variables with the values from the Azure portal - const key = ""; - const endpoint = ""; + // set `` and `` variables with the values from the Azure portal. + const key = ""; + const endpoint = ""; // sample document const formUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/sample-layout.pdf" @@ -324,9 +324,9 @@ In this example, we'll analyze an invoice using the **prebuilt-invoice** model. // using the PrebuiltModels object, rather than the raw model ID, adds strong typing to the model's output const { PrebuiltModels } = require("@azure/ai-form-recognizer"); - // set `` and `` variables with the values from the Azure portal - const key = ""; - const endpoint = ""; + // set `` and `` variables with the values from the Azure portal. + const key = ""; + const endpoint = ""; // sample document const invoiceUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/sample-invoice.pdf"; @@ -419,7 +419,7 @@ In this quickstart, you used the Form Recognizer JavaScript SDK to analyze vario ## Next steps > [!div class="nextstepaction"] -> [REST API v3.0reference documentation](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument) +> [REST API v3.0reference documentation](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) > [!div class="nextstepaction"] > [Form Recognizer JavaScript reference library](https://azuresdkdocs.blob.core.windows.net/$web/javascript/azure-ai-form-recognizer/4.0.0-beta.1/index.html) diff --git a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-python-sdk.md b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-python-sdk.md index 5dda4a0c5d86d..b105379236047 100644 --- a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-python-sdk.md +++ b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-python-sdk.md @@ -120,7 +120,7 @@ def analyze_general_documents(): docUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/sample-layout.pdf" # create your `DocumentAnalysisClient` instance and `AzureKeyCredential` variable - document_analysis_client = DocumentAnalysisClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + document_analysis_client = DocumentAnalysisClient(endpoint=endpoint, credential=AzureKeyCredential(key)) poller = document_analysis_client.begin_analyze_document_from_url( "prebuilt-document", docUrl) diff --git a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md index bb1e8bcce4758..ce43b4b839c65 100644 --- a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md +++ b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md @@ -7,19 +7,19 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: quickstart -ms.date: 03/24/2022 +ms.date: 06/06/2022 ms.author: lajanuar --- -# Get started: Form Recognizer REST API 2022-01-30-preview +# Get started: Form Recognizer REST API 2022-06-30-preview >[!NOTE] > Form Recognizer v3.0 is currently in public preview. Some features may not be supported or have limited capabilities. -The current API version is ```2022-01-30-preview```. +The current API version is **2022-06-30-preview**. -| [Form Recognizer REST API](https://westcentralus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) | [Azure SDKS](https://azure.github.io/azure-sdk/releases/latest/index.html) | +| [Form Recognizer REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) | [Azure SDKS](https://azure.github.io/azure-sdk/releases/latest/index.html) | Get started with Azure Form Recognizer using the REST API. Azure Form Recognizer is a cloud-based Azure Applied AI Service that uses machine learning to extract key-value pairs, text, and tables from your documents. You can easily call Form Recognizer models using the REST API or by integrating our client library SDks into your workflows and applications. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. @@ -31,7 +31,7 @@ To learn more about Form Recognizer features and development options, visit our **Document Analysis** -* 🆕 Read—Analyze and extract printed and handwritten text lines, words, locations, and detected languages. +* 🆕 Read—Analyze and extract printed (typeface) and handwritten text lines, words, locations, and detected languages. * 🆕General document—Analyze and extract text, tables, structure, key-value pairs, and named entities. * Layout—Analyze and extract tables, lines, words, and selection marks from documents, without the need to train a model. @@ -46,7 +46,7 @@ To learn more about Form Recognizer features and development options, visit our **Custom Models** * Custom—Analyze and extract form fields and other content from your custom forms, using models you trained with your own form types. -* Composed custom—Compose a collection of custom models and assign them to a single model built from your form types. +* Composed custom—Compose a collection of custom models and assign them to a single model ID. ## Prerequisites @@ -84,7 +84,7 @@ Before you run the cURL command, make the following changes: #### POST request ```bash -curl -v -i POST "{endpoint}/formrecognizer/documentModels/{modelID}:analyze?api-version=2022-01-30-preview" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {key}" --data-ascii "{'urlSource': '{your-document-url}'}" +curl -v -i POST "{endpoint}/formrecognizer/documentModels/{modelID}:analyze?api-version=2022-06-30-preview" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {key}" --data-ascii "{'urlSource': '{your-document-url}'}" ``` #### Reference table @@ -108,18 +108,22 @@ You'll receive a `202 (Success)` response that includes an **Operation-Location* ### Get analyze results (GET Request) -After you've called the [**Analyze document**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument) API, call the [**Get analyze result**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/GetAnalyzeDocumentResult) API to get the status of the operation and the extracted data. Before you run the command, make these changes: +After you've called the [**Analyze document**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) API, call the [**Get analyze result**](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/GetAnalyzeDocumentResult) API to get the status of the operation and the extracted data. Before you run the command, make these changes: 1. Replace `{endpoint}` with the endpoint value from your Form Recognizer instance in the Azure portal. 1. Replace `{key}` with the key value from your Form Recognizer instance in the Azure portal. -1. Replace `{modelID}` with the same model name you used to analyze your document. +1. Replace `{modelID}` with the same modelID you used to analyze your document. 1. Replace `{resultID}` with the result ID from the [Operation-Location](#operation-location) header. #### GET request ```bash -curl -v -X GET "{endpoint}/formrecognizer/documentModels/{model name}/analyzeResults/{resultId}?api-version=2022-01-30-preview" -H "Ocp-Apim-Subscription-Key: {key}" +<<<<<<< HEAD +curl -v -X GET "{endpoint}/formrecognizer/documentModels/{model name}/analyzeResults/{resultId}?api-version=2022-06-30-preview" -H "Ocp-Apim-Subscription-Key: {key}" +======= +curl -v -X GET "{endpoint}/formrecognizer/documentModels/{modelID}/analyzeResults/{resultId}?api-version=2022-06-30-preview" -H "Ocp-Apim-Subscription-Key: {key}" +>>>>>>> resolve-merge-conflict ``` #### Examine the response @@ -134,7 +138,7 @@ You'll receive a `200 (Success)` response with JSON output. The first field, `"s "createdDateTime": "2022-03-25T19:31:37Z", "lastUpdatedDateTime": "2022-03-25T19:31:43Z", "analyzeResult": { - "apiVersion": "2022-01-30-preview", + "apiVersion": "2022-06-30", "modelId": "prebuilt-invoice", "stringIndexType": "textElements"... ..."pages": [ diff --git a/articles/applied-ai-services/form-recognizer/resource-customer-stories.md b/articles/applied-ai-services/form-recognizer/resource-customer-stories.md index ef95365357356..7e1a0b695c63e 100644 --- a/articles/applied-ai-services/form-recognizer/resource-customer-stories.md +++ b/articles/applied-ai-services/form-recognizer/resource-customer-stories.md @@ -7,8 +7,9 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 02/04/2022 +ms.date: 05/25/2022 ms.author: lajanuar +ms.custom: applied-ai-non-critical-form --- # Customer spotlight @@ -17,23 +18,26 @@ The following customers and partners have adopted Form Recognizer across a wide | Customer/Partner | Description | Link | |---------|-------------|----------------------| -| **Acumatica** | [**Acumatica**](https://www.acumatica.com/) is a technology provider that develops cloud- and browser-based enterprise resource planning (ERP) software for small and medium-sized businesses (SMBs). To bring expense claims into the modern age, Acumatica incorporated Form Recognizer into its native application. The Form Recognizer's prebuilt-receipt API and machine learning capabilities are used to automatically extract data from receipts. Acumatica's customers can file multiple, error-free claims in a matter of seconds, freeing up more time to focus on other important tasks. | [Customer story](https://customers.microsoft.com/story/762684-acumatica-partner-professional-services-azure) | -|**Arkas Logistics** | [**Arkas Logistics**](http://www.arkaslojistik.com.tr/) is operates under the umbrella of Arkas Holding, Turkey's leading holding institution and operating in 23 countries. During the COVID-19 crisis, Arkas Logistics has been able to provide outstanding, complete logistical services thanks to its focus on contactless operation and digitalization steps. Form Recognizer powers a solution that maintains the continuity of the supply chain and allows for uninterrupted service. | [Customer story](https://customers.microsoft.com/story/842149-arkas-logistics-transportation-azure-en-turkey ) | +| **Acumatica** | [**Acumatica**](https://www.acumatica.com/) is a technology provider that develops cloud and browser-based enterprise resource planning (ERP) software for small and medium-sized businesses (SMBs). To bring expense claims into the modern age, Acumatica incorporated Form Recognizer into its native application. The Form Recognizer's prebuilt-receipt API and machine learning capabilities are used to automatically extract data from receipts. Acumatica's customers can file multiple, error-free claims in a matter of seconds, freeing up more time to focus on other important tasks. | [Customer story](https://customers.microsoft.com/story/762684-acumatica-partner-professional-services-azure) | + | **Air Canada** | In September 2021, [**Air Canada**](https://www.aircanada.com/) was tasked with verifying the COVID-19 vaccination status of thousands of worldwide employees in only two months. After realizing manual verification would be too costly and complex within the time constraint, Air Canada turned to its internal AI team for an automated solution. The AI team partnered with Microsoft and used Form Recognizer to roll out a fully functional, accurate solution within weeks. This partnership met the government mandate on time and saved thousands of hours of manual work. | [Customer story](https://customers.microsoft.com/story/1505667713938806113-air-canada-travel-transportation-azure-form-recognizer)| +|**Arkas Logistics** | [**Arkas Logistics**](http://www.arkaslojistik.com.tr/) is operates under the umbrella of Arkas Holding, Turkey's leading holding institution and operating in 23 countries. During the COVID-19 crisis, the company has been able to provide outstanding, complete logistical services thanks to its focus on contactless operation and digitalization steps. Form Recognizer powers a solution that maintains the continuity of the supply chain and allows for uninterrupted service. | [Customer story](https://customers.microsoft.com/story/842149-arkas-logistics-transportation-azure-en-turkey ) | |**Automation Anywhere**| [**Automation Anywhere**](https://www.automationanywhere.com/) is on a singular and unwavering mission to democratize automation by liberating teams from mundane, repetitive tasks, and allowing more time for innovation and creativity with cloud-native robotic process automation (RPA)software. To protect the citizens of the United Kingdom, healthcare providers must process tens of thousands of COVID-19 tests daily, each one accompanied by a form for the World Health Organization (WHO). Manually completing and processing these forms would potentially slow testing and divert resources away from patient care. In response, Automation Anywhere built an AI-powered bot to help a healthcare provider automatically process and submit the COVID-19 test forms at scale. | [Customer story](https://customers.microsoft.com/story/811346-automation-anywhere-partner-professional-services-azure-cognitive-services) | |**AvidXchange**| [**AvidXchange**](https://www.avidxchange.com/) has developed an accounts payable automation solution applying Form Recognizer. AvidXchange partners with Azure Cognitive Services to deliver an accounts payable automation solution for the middle market. Customers benefit from faster invoice processing times and increased accuracy to ensure their suppliers are paid the right amount, at the right time. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| |**Blue Prism**| [**Blue Prism**](https://www.blueprism.com/) Decipher is an AI-powered document processing capability that's directly embedded into the company's connected-RPA platform. Decipher works with Form Recognizer to help organizations process forms faster and with less human effort. One of Blue Prism's customers has been testing the solution to automate invoice handling as part of its procurement process. | [Customer story](https://customers.microsoft.com/story/737482-blue-prism-partner-professional-services-azure) | |**Chevron**| [**Chevron**](https://www.chevron.com//) Canada Business Unit is now using Form Recognizer with UiPath's robotic process automation platform to automate the extraction of data and move it into back-end systems for analysis. Subject matter experts have more time to focus on higher-value activities and information flows more rapidly. Accelerated operational control enables the company to analyze its business with greater speed, accuracy, and depth. | [Customer story](https://customers.microsoft.com/story/chevron-mining-oil-gas-azure-cognitive-services)| |**Cross Masters**|[**Cross Masters**](https://crossmasters.com/), uses cutting-edge AI technologies not only as a passion, but as an essential part of a work culture requiring continuous innovation. One of the latest success stories is automation of manual paperwork required to process thousands of invoices. Cross Masters used Form Recognizer to develop a unique, customized solution, to provide clients with market insights from a large set of collected invoices. Most impressive is the extraction quality and continuous introduction of new features, such as model composing and table labeling. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| -|**Element**| [**Element**](https://www.element.com/) is a global business that provides specialist testing, inspection, and certification services to a diverse range of businesses. Element is one of the fastest growing companies in the global testing, inspection and certification sector having over 6,500 engaged experts working in more than 200 facilities across the globe. When the finance team for the Americas was forced to work from home during the COVID-19 pandemic, it needed to digitalize its paper processes fast. The creativity of the team and its use of Azure Form Recognizer delivered more than business as usual—it delivered significant efficiencies. The Element team used the tools in Microsoft Azure so the next phase could be expedited. Rather than coding from scratch, they saw the opportunity to use the Azure Form Recognizer. This integration quickly gave them the functionality they needed, together with the agility and security of Microsoft Azure. Microsoft Azure Logic Apps is used to automate the process of extracting the documents from email, storing them, and updating the system with the extracted data. Computer Vision, part of Azure Cognitive Services, partners with Azure Form Recognizer to extract the right data points from the invoice documents—whether they're a pdf or scanned images. | [Customer story](https://customers.microsoft.com/story/1414941527887021413-element)| -|**Emaar Properties**| [**Emaar Properties**](https://www.emaar.com/en/), operates Dubai Mall, the world's most-visited retail and entertainment destination. Each year, the Dubai Mall draws more than 80 million visitors. To enrich the shopping experience, Emaar Properties offers a unique rewards program through a dedicated mobile app. Loyalty program points are earned via submitted receipts. Emaar Properties uses Microsoft Azure Form Recognizer to process submitted receipts and has achieved 92 percent reading accuracy.| [Customer story](https://customers.microsoft.com/story/1459754150957690925-emaar-retailers-azure-en-united-arab-emirates)| -|**EY**| [**EY**](https://ey.com/) (Ernst & Young Global Limited) is a multinational professional services network that helps to create long-term value for clients and build trust in the capital markets. Enabled by data and technology, diverse EY teams in over 150 countries to help clients grow, transform, and operate. EY teams work across assurance, consulting, law, strategy, tax, and transactions to find solutions for complex issues facing our world today. The EY Technology team collaborated with Microsoft to build a platform that hastens invoice extraction and contract comparison processes. Azure Form Recognizer and Custom Vision partnered to enable EY teams to automate and improve the OCR and document handling processes for its consulting, tax, audit, and transactions services clients. | [Customer story](https://customers.microsoft.com/story/1404985164224935715-ey-professional-services-azure-form-recognizer)| -|**Financial Fabric**| [**Financial Fabric**](https://www.financialfabric.com//), a Microsoft Cloud Solution Provider, delivers data architecture, science, and analytics services to investment managers at hedge funds, family offices, and corporate treasuries. Its daily processes involve extracting and normalizing data from thousands of complex financial documents, such as bank statements and legal agreements. The company then provides custom analytics to help its clients make better investment decisions. Extracting this data previously took days or weeks. By using Form Recognizer, Financial Fabric has reduced the time it takes to go from extraction to analysis to just minutes. | [Customer story](https://customers.microsoft.com/story/financial-fabric-banking-capital-markets-azure)| -|**GEP**| [**GEP**](https://www.gep.com/) has developed an invoice processing solution for a client using Form Recognizer. "At GEP, we're seeing AI and automation make a profound impact on procurement and the supply chain. By combining our AI solution with Microsoft Form Recognizer, we automated the processing of 4,000 invoices a day for a client... It saved them tens of thousands of hours of manual effort, while improving accuracy, controls and compliance on a global scale." Sarateudu Sethi, GEP's Vice President of Artificial Intelligence. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| +|**Element**| [**Element**](https://www.element.com/) is a global business that provides specialist testing, inspection, and certification services to a diverse range of businesses. Element is one of the fastest growing companies in the global testing, inspection and certification sector having over 6,500 engaged experts working in more than 200 facilities across the globe. When the finance team for the Americas was forced to work from home during the COVID-19 pandemic, it needed to digitalize its paper processes fast. The creativity of the team and its use of Azure Form Recognizer delivered more than business as usual—it delivered significant efficiencies. The Element team used the tools in Azure so the next phase could be expedited. Rather than coding from scratch, they saw the opportunity to use the Azure Form Recognizer. This integration quickly gave them the functionality they needed, together with the agility and security of Azure. Azure Logic Apps is used to automate the process of extracting the documents from email, storing them, and updating the system with the extracted data. Computer Vision, part of Azure Cognitive Services, partners with Azure Form Recognizer to extract the right data points from the invoice documents—whether they're a pdf or scanned images. | [Customer story](https://customers.microsoft.com/story/1414941527887021413-element)| +|**Emaar Properties**| [**Emaar Properties**](https://www.emaar.com/en/), operates Dubai Mall, the world's most-visited retail and entertainment destination. Each year, the Dubai Mall draws more than 80 million visitors. To enrich the shopping experience, Emaar Properties offers a unique rewards program through a dedicated mobile app. Loyalty program points are earned via submitted receipts. Emaar Properties uses Azure Form Recognizer to process submitted receipts and has achieved 92 percent reading accuracy.| [Customer story](https://customers.microsoft.com/story/1459754150957690925-emaar-retailers-azure-en-united-arab-emirates)| +|**EY**| [**EY**](https://ey.com/) (Ernst & Young Global Limited) is a multinational professional services network that helps to create long-term value for clients and build trust in the capital markets. Enabled by data and technology, diverse EY teams in over 150 countries to help clients grow, transform, and operate. EY teams work across assurance, consulting, law, strategy, tax, and transactions to find solutions for complex issues facing our world today. The EY Technology team collaborated with Microsoft to build a platform that hastens invoice extraction and contract comparison processes. Azure Form Recognizer and Custom Vision partnered to enable EY teams to automate and improve the OCR and document handling processes for its transactions services clients. | [Customer story](https://customers.microsoft.com/story/1404985164224935715-ey-professional-services-azure-form-recognizer)| +|**Financial Fabric**| [**Financial Fabric**](https://www.financialfabric.com/), a Microsoft Cloud Solution Provider, delivers data architecture, science, and analytics services to investment managers at hedge funds, family offices, and corporate treasuries. Its daily processes involve extracting and normalizing data from thousands of complex financial documents, such as bank statements and legal agreements. The company then provides custom analytics to help its clients make better investment decisions. Extracting this data previously took days or weeks. By using Form Recognizer, Financial Fabric has reduced the time it takes to go from extraction to analysis to just minutes. | [Customer story](https://customers.microsoft.com/story/financial-fabric-banking-capital-markets-azure)| +|**Fujitsu**| [**Fujitsu**](https://scanners.us.fujitsu.com/about-us) is the world leader in document scanning technology, with more than 50 percent of global market share, but that doesn't stop the company from constantly innovating. To improve the performance and accuracy of its cloud scanning solution, Fujitsu incorporated Azure Form Recognizer. It took only a few months to deploy the new technologies, and they have boosted character recognition rates as high as 99.9 percent. This collaboration helps Fujitsu deliver market-leading innovation and give its customers powerful and flexible tools for end-to-end document management. | [Customer story](https://customers.microsoft.com/en-us/story/1504311236437869486-fujitsu-document-scanning-azure-form-recognizer)| +|**GEP**| [**GEP**](https://www.gep.com/) has developed an invoice processing solution for a client using Form Recognizer. GEP combined their AI solution with Azure Form Recognizer to automate the processing of 4,000 invoices a day for a client saving them tens of thousands of hours of manual effort. This collaborative effort improved accuracy, controls, and compliance on a global scale." Sarateudu Sethi, GEP's Vice President of Artificial Intelligence. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| |**HCA Healthcare**| [**HCA Healthcare**](https://hcahealthcare.com/) is one of the nation's leading providers of healthcare with over 180 hospitals and 2,000 sites-of-care located throughout the United States and serving approximately 35 million patients each year. Currently, they're using Azure Form Recognizer to simplify and improve the patient onboarding experience and reducing administrative time spent entering repetitive data into the care center's system. | [Customer story](https://customers.microsoft.com/story/1404891793134114534-hca-healthcare-healthcare-provider-azure)| |**Icertis**| [**Icertis**](https://www.icertis.com/), is a Software as a Service (SaaS) provider headquartered in Bellevue, Washington. Icertis digitally transforms the contract management process with a cloud-based, AI-powered, contract lifecycle management solution. Azure Form Recognizer enables Icertis Contract Intelligence to take key-value pairs embedded in contracts and create structured data understood and operated upon by machine algorithms. Through these and other powerful Azure Cognitive and AI services, Icertis empowers customers in every industry to improve business in multiple ways: optimized manufacturing operations, added agility to retail strategies, reduced risk in IT services, and faster delivery of life-saving pharmaceutical products. | [Blog](https://cloudblogs.microsoft.com/industry-blog/en-in/unicorn/2022/01/12/how-icertis-built-a-contract-management-solution-using-azure-form-recognizer/)| -|**Instabase**| [**Instabase**](https://instabase.com/) is a horizontal application platform that provides best-in-class machine learning processes to help retrieve, organize, identify, and understand complex masses of unorganized data. Instabase then brings this data into business workflows as organized information. The platform provides a repository of integrative applications to orchestrate and harness that information with the means to rapidly extend and enhance them as required. Instabase applications are fully containerized for widespread, infrastructure-agnostic deployment. | [Customer story](https://customers.microsoft.com/en-gb/story/1376278902865681018-instabase-partner-professional-services-azure)| -|**Northern Trust**| [**Northern Trust**](https://www.northerntrust.com/) is a leading provider of wealth management, asset servicing, asset management, and banking to corporations, institutions, families, and individuals. As part of its initiative to digitize alternative asset servicing, Northern Trust has launched an AI-powered solution to extract unstructured investment data from alternative asset documents and making it accessible and actionable for asset-owner clients. Microsoft Azure Applied AI services accelerate time-to-value for enterprises building AI solutions. This proprietary solution transforms crucial information such as capital call notices, cash and stock distribution notices, and capital account statements from various unstructured formats into digital, actionable insights for investment teams. | [Customer story](https://www.businesswire.com/news/home/20210914005449/en/Northern-Trust-Automates-Data-Extraction-from-Alternative-Asset-Documentation)| +|**Instabase**| [**Instabase**](https://instabase.com/) is a horizontal application platform that provides best-in-class machine learning processes to help retrieve, organize, identify, and understand complex masses of unorganized data. The application platform then brings this data into business workflows as organized information. This workflow provides a repository of integrative applications to orchestrate and harness that information with the means to rapidly extend and enhance them as required. The applications are fully containerized for widespread, infrastructure-agnostic deployment. | [Customer story](https://customers.microsoft.com/en-gb/story/1376278902865681018-instabase-partner-professional-services-azure)| +|**Northern Trust**| [**Northern Trust**](https://www.northerntrust.com/) is a leading provider of wealth management, asset servicing, asset management, and banking to corporations, institutions, families, and individuals. As part of its initiative to digitize alternative asset servicing, Northern Trust has launched an AI-powered solution to extract unstructured investment data from alternative asset documents and making it accessible and actionable for asset-owner clients. Azure Applied AI services accelerate time-to-value for enterprises building AI solutions. This proprietary solution transforms crucial information from various unstructured formats into digital, actionable insights for investment teams. | [Customer story](https://www.businesswire.com/news/home/20210914005449/en/Northern-Trust-Automates-Data-Extraction-from-Alternative-Asset-Documentation)| +|**Old Mutual**| [**Old Mutual**](https://www.oldmutual.co.za/) is Africa's leading financial services group with a comprehensive range of investment capabilities. They're the industry leader in retirement fund solutions, investments, asset management, group risk benefits, insurance, and multi-fund management. The Old Mutual team used Microsoft Natural Language Processing and Optical Character Recognition to provide the basis for automating key customer transactions received via emails. It also offered an opportunity to identify incomplete customer requests in order to nudge customers to the correct digital channels. Old Mutual's extensible solution technology was further developed as a microservice to be consumed by any enterprise application through a secure API management layer. | [Customer story](https://customers.microsoft.com/en-us/story/1507561807660098567-old-mutual-banking-capital-markets-azure-en-south-africa)| |**Standard Bank**| [**Standard Bank of South Africa**](https://www.standardbank.co.za/southafrica/personal/home) is Africa's largest bank by assets. Standard Bank is headquartered in Johannesburg, South Africa, and has more than 150 years of trade experience in Africa and beyond. When manual due diligence in cross-border transactions began absorbing too much staff time, the bank decided it needed a new way forward. Standard Bank uses Form Recognizer to significantly reduce its cross-border payments registration and processing time. | [Customer story](https://customers.microsoft.com/en-hk/story/1395059149522299983-standard-bank-of-south-africa-banking-capital-markets-azure-en-south-africa)| | **WEX**| [**WEX**](https://www.wexinc.com/) has developed a tool to process Explanation of Benefits documents using Form Recognizer. "The technology is truly amazing. I was initially worried that this type of solution wouldn't be feasible, but I soon realized that Form Recognizer can read virtually any document with accuracy." Matt Dallahan, Senior Vice President of Product Management and Strategy | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| -|**Wilson Allen** | [**Wilson Allen**](https://wilsonallen.com/) took advantage of AI container support for Microsoft Azure Cognitive Services and created a powerful AI solution that help firms around the world find unprecedented levels of insight in previously siloed and unstructured data. Its clients can use this data to support business development and foster client relationships. | [Customer story](https://customers.microsoft.com/story/814361-wilson-allen-partner-professional-services-azure)| -|**Zelros**| [**Zelros**](http://www.zelros.com/) offers AI-powered software for the insurance industry. Insurers use the Zelros platform to take in forms and seamlessly manage customer enrollment and claims filing. The company combined its technology with Form Recognizer to automatically pull key-value pairs and text out of documents. When insurers use the Zelros platform, they can quickly process paperwork, ensure high accuracy, and redirect thousands of hours previously spent on manual data extraction toward better service. | [Customer story](https://customers.microsoft.com/story/816397-zelros-insurance-azure)| +|**Wilson Allen** | [**Wilson Allen**](https://wilsonallen.com/) took advantage of AI container support for Azure Cognitive Services and created a powerful AI solution that help firms around the world find unprecedented levels of insight in previously siloed and unstructured data. Its clients can use this data to support business development and foster client relationships. | [Customer story](https://customers.microsoft.com/story/814361-wilson-allen-partner-professional-services-azure)| +|**Zelros**| [**Zelros**](http://www.zelros.com/) offers AI-powered software for the insurance industry. Insurers use the platform to take in forms and seamlessly manage customer enrollment and claims filing. The company combined its technology with Form Recognizer to automatically pull key-value pairs and text out of documents. When insurers use the platform, they can quickly process paperwork, ensure high accuracy, and redirect thousands of hours previously spent on manual data extraction toward better service. | [Customer story](https://customers.microsoft.com/story/816397-zelros-insurance-azure)| diff --git a/articles/applied-ai-services/form-recognizer/service-limits.md b/articles/applied-ai-services/form-recognizer/service-limits.md index da564f000d489..66eddf1b88a7c 100644 --- a/articles/applied-ai-services/form-recognizer/service-limits.md +++ b/articles/applied-ai-services/form-recognizer/service-limits.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 05/23/2022 +ms.date: 06/06/2022 ms.author: lajanuar --- @@ -30,6 +30,10 @@ For the usage with [Form Recognizer SDK](quickstarts/try-v3-csharp-sdk.md), [For | Adjustable | No | No | | **Max size of OCR json response** | 500 MB | 500 MB | | Adjustable | No | No | +| **Max number of Template models** | 500 | 5000 | +| Adjustable | No | No | +| **Max number of Neural models** | 100 | 500 | +| Adjustable | No | No | # [Form Recognizer v3.0 (Preview)](#tab/v30) diff --git a/articles/applied-ai-services/form-recognizer/toc.yml b/articles/applied-ai-services/form-recognizer/toc.yml index dba6bde2ab60e..8dd00c9950efb 100644 --- a/articles/applied-ai-services/form-recognizer/toc.yml +++ b/articles/applied-ai-services/form-recognizer/toc.yml @@ -48,8 +48,8 @@ items: href: how-to-guides/use-prebuilt-read.md - name: Use SDKs and the REST API (v2.1) href: how-to-guides/try-sdk-rest-api.md - - name: Create SAS tokens for Azure Blob storage - href: generate-sas-tokens.md + - name: Create SAS tokens for storage containers + href: create-sas-tokens.md - name: Custom models items: - name: Build a custom model (v2.1) @@ -182,7 +182,7 @@ items: - name: Azure Form Recognizer REST API v3.0 (preview) items: - name: REST API v3.0 reference - href: https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument + href: https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument - name: Client libraries and SDKs items: - name: .NET diff --git a/articles/applied-ai-services/form-recognizer/tutorial-azure-function.md b/articles/applied-ai-services/form-recognizer/tutorial-azure-function.md index cce14d5c50a0a..f216192aa1bef 100644 --- a/articles/applied-ai-services/form-recognizer/tutorial-azure-function.md +++ b/articles/applied-ai-services/form-recognizer/tutorial-azure-function.md @@ -10,6 +10,7 @@ ms.subservice: forms-recognizer ms.topic: tutorial ms.date: 03/19/2021 ms.author: lajanuar +ms.custom: applied-ai-non-critical-form --- # Tutorial: Use an Azure Function to process stored documents @@ -255,4 +256,4 @@ In this tutorial, you learned how to use an Azure Function written in Python to > [Microsoft Power BI](https://powerbi.microsoft.com/integrations/azure-table-storage/) * [What is Form Recognizer?](overview.md) -* Learn more about the [Layout API](concept-layout.md) \ No newline at end of file +* Learn more about the [Layout API](concept-layout.md) diff --git a/articles/applied-ai-services/form-recognizer/v3-migration-guide.md b/articles/applied-ai-services/form-recognizer/v3-migration-guide.md index b72cc784164d2..bca025ae5a4ef 100644 --- a/articles/applied-ai-services/form-recognizer/v3-migration-guide.md +++ b/articles/applied-ai-services/form-recognizer/v3-migration-guide.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: how-to -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: lajanuar recommendations: false --- @@ -25,10 +25,16 @@ Form Recognizer v3.0 (preview) introduces several new features and capabilities: * [**Custom document model (v3.0)**](concept-custom-neural.md) is a new custom model type to extract fields from structured and unstructured documents. * [**Receipt (v3.0)**](concept-receipt.md) model supports single-page hotel receipt processing. * [**ID document (v3.0)**](concept-id-document.md) model supports endorsements, restrictions, and vehicle classification extraction from US driver's licenses. -* [**Custom model API (v3.0)**](concept-custom.md) supports signature detection for custom forms. +* [**Custom model API (v3.0)**](concept-custom.md) supports signature detection for custom template models. +* [**Custom model API (v3.0)**](overview.md) supports analysis of all the newly added prebuilt models. For a complete list of prebuilt models, see the [overview](overview.md) page. In this article, you'll learn the differences between Form Recognizer v2.1 and v3.0 and how to move to the newer version of the API. +> [!CAUTION] +> +> * REST API **2022-06-30-preview** release includes a breaking change in the REST API analyze response JSON. +> * The `boundingBox` property is renamed to `polygon` in each instance. + ## Changes to the REST API endpoints The v3.0 REST API combines the analysis operations for layout analysis, prebuilt models, and custom models into a single pair of operations by assigning **`documentModels`** and **`modelId`** to the layout analysis (prebuilt-layout) and prebuilt models. @@ -36,14 +42,14 @@ In this article, you'll learn the differences between Form Recognizer v2.1 and v ### POST request ```http -https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}?api-version=2022-01-30-preview +https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}?api-version=2022-06-30 ``` ### GET request ```http -https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}/AnalyzeResult/{resultId}?api-version=2022-01-30-preview +https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}/AnalyzeResult/{resultId}?api-version=2022-06-30 ``` ### Analyze operation @@ -56,14 +62,14 @@ https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}/ | Model | v2.1 | v3.0 | |:--| :--| :--| | **Request URL prefix**| **https://{your-form-recognizer-endpoint}/formrecognizer/v2.1** | **https://{your-form-recognizer-endpoint}/formrecognizer** | -|🆕 **General document**|N/A|/documentModels/prebuilt-document:analyze | -| **Layout**| /layout/analyze |/documentModels/prebuilt-layout:analyze| -|**Custom**| /custom/{modelId}/analyze |/documentModels/{modelId}:analyze | -| **Invoice** | /prebuilt/invoice/analyze | /documentModels/prebuilt-invoice:analyze | -| **Receipt** | /prebuilt/receipt/analyze | /documentModels/prebuilt-receipt:analyze | -| **ID document** | /prebuilt/idDocument/analyze | /documentModels/prebuilt-idDocument:analyze | -|**Business card**| /prebuilt/businessCard/analyze| /documentModels/prebuilt-businessCard:analyze| -|**W-2**| /prebuilt/w-2/analyze| /documentModels/prebuilt-w-2:analyze| +|🆕 **General document**|N/A|`/documentModels/prebuilt-document:analyze` | +| **Layout**| /layout/analyze |`/documentModels/prebuilt-layout:analyze`| +|**Custom**| /custom/{modelId}/analyze |`/documentModels/{modelId}:analyze` | +| **Invoice** | /prebuilt/invoice/analyze | `/documentModels/prebuilt-invoice:analyze` | +| **Receipt** | /prebuilt/receipt/analyze | `/documentModels/prebuilt-receipt:analyze` | +| **ID document** | /prebuilt/idDocument/analyze | `/documentModels/prebuilt-idDocument:analyze` | +|**Business card**| /prebuilt/businessCard/analyze| `/documentModels/prebuilt-businessCard:analyze`| +|**W-2**| /prebuilt/w-2/analyze| `/documentModels/prebuilt-w-2:analyze`| ### Analyze request body @@ -89,10 +95,10 @@ Base64 encoding is also supported in Form Recognizer v3.0: Parameters that continue to be supported: -* `pages` -* `locale` +* `pages` : Analyze only a specific subset of pages in the document. List of page numbers indexed from the number `1` to analyze. Ex. "1-3,5,7-9" +* `locale` : Locale hint for text recognition and document analysis. Value may contain only the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). -Parameters no longer supported: +Parameters no longer supported: * includeTextDetails @@ -117,7 +123,7 @@ Analyze response has been refactored to the following top-level results to suppo { // Basic analyze result metadata -"apiVersion": "2022-01-30-preview", // REST API version used +"apiVersion": "2022-06-30", // REST API version used "modelId": "prebuilt-invoice", // ModelId used "stringIndexType": "textElements", // Character unit used for string offsets and lengths: // textElements, unicodeCodePoint, utf16CodeUnit // Concatenated content in global reading order across pages. @@ -131,7 +137,7 @@ Analyze response has been refactored to the following top-level results to suppo "angle": 0, // Orientation of content in clockwise direction (degree) "width": 0, // Page width "height": 0, // Page height -"unit": "pixel", // Unit for width, height, and bounding box coordinates +"unit": "pixel", // Unit for width, height, and polygon coordinates "spans": [ // Parts of top-level content covered by page { "offset": 0, // Offset in content @@ -167,10 +173,10 @@ Analyze response has been refactored to the following top-level results to suppo { "rowCount": 1, // Number of rows in table "columnCount": 1, // Number of columns in table -"boundingRegions": [ // Bounding boxes potentially across pages covered by table +"boundingRegions": [ // Polygons or Bounding boxes potentially across pages covered by table { "pageNumber": 1, // 1-indexed page number -"boundingBox": [ ... ], // Bounding box +"polygon": [ ... ], // Previously Bounding box, renamed to polygon in the 2022-06-30-preview API } ], "spans": [ ... ], // Parts of top-level content covered by table // List of cells in table @@ -240,8 +246,6 @@ Analyze response has been refactored to the following top-level results to suppo ] } - - ``` ## Build or train model @@ -250,14 +254,14 @@ The model object has three updates in the new API * ```modelId``` is now a property that can be set on a model for a human readable name. * ```modelName``` has been renamed to ```description``` -* ```buildMode``` is a new proerty with values of ```template``` for custom form models or ```neural``` for custom document models. +* ```buildMode``` is a new property with values of ```template``` for custom form models or ```neural``` for custom document models. -The ```build``` operation is invoked to train a model. The request payload and call pattern remain unchanged. The build operation specifies the model and training dataset, it returns the result via the Operation-Location header in the response. Poll this model operation URL, via a GET request to check the status of the build operation (minimum recommended interval between requests is 1 second). Unlike v2.1, this URL is not the resource location of the model. Instead, the model URL can be constructed from the given modelId, also retrieved from the resourceLocation property in the response. Upon success, status is set to ```succeeded``` and result contains the custom model info. If errors are encountered, status is set to ```failed``` and the error is returned. +The ```build``` operation is invoked to train a model. The request payload and call pattern remain unchanged. The build operation specifies the model and training dataset, it returns the result via the Operation-Location header in the response. Poll this model operation URL, via a GET request to check the status of the build operation (minimum recommended interval between requests is 1 second). Unlike v2.1, this URL isn't the resource location of the model. Instead, the model URL can be constructed from the given modelId, also retrieved from the resourceLocation property in the response. Upon success, status is set to ```succeeded``` and result contains the custom model info. If errors are encountered, status is set to ```failed``` and the error is returned. The following code is a sample build request using a SAS token. Note the trailing slash when setting the prefix or folder path. ```json -POST https://{your-form-recognizer-endpoint}/formrecognizer/documentModels:build?api-version=2022-01-30-preview +POST https://{your-form-recognizer-endpoint}/formrecognizer/documentModels:build?api-version=2022-06-30 { "modelId": {modelId}, @@ -275,7 +279,7 @@ POST https://{your-form-recognizer-endpoint}/formrecognizer/documentModels:build Model compose is now limited to single level of nesting. Composed models are now consistent with custom models with the addition of ```modelId``` and ```description``` properties. ```json -POST https://{your-form-recognizer-endpoint}/formrecognizer/documentModels:compose?api-version=2022-01-30-preview +POST https://{your-form-recognizer-endpoint}/formrecognizer/documentModels:compose?api-version=2022-06-30 { "modelId": "{composedModelId}", "description": "{composedModelDescription}", @@ -303,7 +307,7 @@ The only changes to the copy model function are: ***Authorize the copy*** ```json -POST https://{targetHost}/formrecognizer/documentModels:authorizeCopy?api-version=2022-01-30-preview +POST https://{targetHost}/formrecognizer/documentModels:authorizeCopy?api-version=2022-06-30 { "modelId": "{targetModelId}", "description": "{targetModelDescription}", @@ -313,7 +317,7 @@ POST https://{targetHost}/formrecognizer/documentModels:authorizeCopy?api-versio Use the response body from the authorize action to construct the request for the copy. ```json -POST https://{sourceHost}/formrecognizer/documentModels/{sourceModelId}:copy-to?api-version=2022-01-30-preview +POST https://{sourceHost}/formrecognizer/documentModels/{sourceModelId}:copy-to?api-version=2022-06-30 { "targetResourceId": "{targetResourceId}", "targetResourceRegion": "{targetResourceRegion}", @@ -331,7 +335,7 @@ List models have been extended to now return prebuilt and custom models. All pre ***Sample list models request*** ```json -GET https://{your-form-recognizer-endpoint}/formrecognizer/documentModels?api-version=2022-01-30-preview +GET https://{your-form-recognizer-endpoint}/formrecognizer/documentModels?api-version=2022-06-30 ``` ## Change to get model @@ -339,7 +343,7 @@ GET https://{your-form-recognizer-endpoint}/formrecognizer/documentModels?api-ve As get model now includes prebuilt models, the get operation returns a ```docTypes``` dictionary. Each document type is described by its name, optional description, field schema, and optional field confidence. The field schema describes the list of fields potentially returned with the document type. ```json -GET https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}?api-version=2022-01-30-preview +GET https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{modelId}?api-version=2022-06-30 ``` ## New get info operation @@ -347,7 +351,7 @@ GET https://{your-form-recognizer-endpoint}/formrecognizer/documentModels/{model The ```info``` operation on the service returns the custom model count and custom model limit. ```json -GET https://{your-form-recognizer-endpoint}/formrecognizer/info? api-version=2022-01-30-preview +GET https://{your-form-recognizer-endpoint}/formrecognizer/info? api-version=2022-06-30 ``` ***Sample response*** @@ -365,6 +369,6 @@ GET https://{your-form-recognizer-endpoint}/formrecognizer/info? api-version=202 In this migration guide, you've learned how to upgrade your existing Form Recognizer application to use the v3.0 APIs. Continue to use the 2.1 API for all GA features and use the 3.0 API for any of the preview features. -* [Review the new REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-1/operations/AnalyzeDocument) +* [Review the new REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-2022-06-30-preview/operations/AnalyzeDocument) * [What is Form Recognizer?](overview.md) * [Form Recognizer quickstart](./quickstarts/try-sdk-rest-api.md) \ No newline at end of file diff --git a/articles/applied-ai-services/form-recognizer/whats-new.md b/articles/applied-ai-services/form-recognizer/whats-new.md index c0a9b6858a148..84057c6c71235 100644 --- a/articles/applied-ai-services/form-recognizer/whats-new.md +++ b/articles/applied-ai-services/form-recognizer/whats-new.md @@ -7,9 +7,8 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 02/28/2022 +ms.date: 06/06/2022 ms.author: lajanuar -ms.custom: ignite-fall-2021 --- @@ -18,6 +17,21 @@ ms.custom: ignite-fall-2021 Form Recognizer service is updated on an ongoing basis. Bookmark this page to stay up to date with release notes, feature enhancements, and documentation updates. +## June 2022 + +### Form Recognizer v3.0 preview release (beta.3) + +The **2022-06-30-preview** release is the latest update to the Form Recognizer service for v3.0 capabilities. There are considerable updates across the feature APIs: + +* [🆕 **Layout extends structure extraction**](concept-layout.md). Layout now includes added structure elements including sections, section headers, and paragraphs. This update enables finer grain document segmentation scenarios. For a complete list of structure elements identified, _see_ [enhanced structure](concept-layout.md#data-extraction). +* [🆕 **Custom neural model tabular fields support**](concept-custom-neural.md). Custom document models now support tabular fields. Tabular fields by default are also multi page. To learn more about tabular fields in custom neural models, _see_ [tabular fields](concept-custom-neural.md#tabular-fields). +* [🆕 **Custom template model tabular fields support for cross page tables**](concept-custom-template.md). Custom form models now support tabular fields across pages. To learn more about tabular fields in custom template models, _see_ [tabular fields](concept-custom-neural.md#tabular-fields). +* [🆕 **Invoice model output now includes general document key-value pairs**](concept-invoice.md). Where invoices contain required fields beyond the fields included in the prebuilt model, the general document model supplements the output with key-value pairs. _See_ [key value pairs](concept-invoice.md#key-value-pairs-preview). +* [🆕 **Invoice language expansion**](concept-invoice.md). The invoice model includes expanded language support. _See_ [supported languages](concept-invoice.md#supported-languages-and-locales). +* [🆕 **Prebuilt business card**](concept-business-card.md) now includes Japanese language support. _See_ [supported languages](concept-business-card.md#supported-languages-and-locales). +* [🆕 **Prebuilt ID document model**](concept-id-document.md). The ID document model now extracts DateOfIssue, Height, Weight, EyeColor, HairColor, and DocumentDiscriminator from US driver's licenses. _See_ [field extraction](concept-id-document.md#id-document-preview-field-extraction). +* [🆕 **Read model now supports common Microsoft Office document types**](concept-read.md). Document types like Word (docx) and PowerPoint (ppt) are now supported with the Read API. See [page extraction](concept-read.md#pages). + ## February 2022 ### Form Recognizer v3.0 preview release @@ -30,22 +44,23 @@ Form Recognizer service is updated on an ongoing basis. Bookmark this page to st * [**General document**](concept-general-document.md) pre-trained model is now updated to support selection marks in addition to API text, tables, structure, key-value pairs, and named entities from forms and documents. * [**Invoice API**](language-support.md#invoice-model) Invoice prebuilt model expands support to Spanish invoices. * [**Form Recognizer Studio**](https://formrecognizer.appliedai.azure.com) adds new demos for Read, W2, Hotel receipt samples, and support for training the new custom neural models. -* [**Language Expansion**](language-support.md) Form Recognizer Read, Layout, and Custom Form add support for 42 new languages including Arabic, Hindi, and other languages using Arabic and Devanagari scripts to expand the coverage to 164 languages. Handwritten support for the same features expands to Japanese and Korean in addition to English, Chinese Simplified, French, German, Italian, Portuguese, and Spanish languages. +* [**Language Expansion**](language-support.md) Form Recognizer Read, Layout, and Custom Form add support for 42 new languages including Arabic, Hindi, and other languages using Arabic and Devanagari scripts to expand the coverage to 164 languages. Handwritten language support expands to Japanese and Korean. Get started with the new [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument), [Python](quickstarts/try-v3-python-sdk.md), or [.NET](quickstarts/try-v3-csharp-sdk.md) SDK for the v3.0 preview API. #### Form Recognizer model data extraction - | **Model** | **Text extraction** |**Key-Value pairs** |**Selection Marks** | **Tables** |**Entities** | + | **Model** | **Text extraction** |**Key-Value pairs** |**Selection Marks** | **Tables** |**Entities** |**Signatures**| | --- | :---: |:---:| :---: | :---: |:---: | - |🆕Read | ✓ | | | | | - |🆕General document | ✓ | ✓ | ✓ | ✓ | ✓ | - | Layout | ✓ | | ✓ | ✓ | | - | Invoice | ✓ | ✓ | ✓ | ✓ || - |Receipt | ✓ | ✓ | | || - | ID document | ✓ | ✓ | | || - | Business card | ✓ | ✓ | | || - | Custom |✓ | ✓ | ✓ | ✓ | ✓ | + |🆕Read | ✓ | | | | | | + |🆕General document | ✓ | ✓ | ✓ | ✓ | ✓ | | + | Layout | ✓ | | ✓ | ✓ | | | + | Invoice | ✓ | ✓ | ✓ | ✓ || | + |Receipt | ✓ | ✓ | | || | + | ID document | ✓ | ✓ | | || | + | Business card | ✓ | ✓ | | || | + | Custom template |✓ | ✓ | ✓ | ✓ | | ✓ | + | Custom neural |✓ | ✓ | ✓ | ✓ | | | #### Form Recognizer SDK beta preview release @@ -562,7 +577,7 @@ pip package version 3.1.0b4 **Form Recognizer v2.1 public preview 3 is now available.** v2.1-preview.3 has been released, including the following features: -* **New prebuilt ID model** The new prebuilt ID model enables customers to take IDs and return structured data to automate processing. It combines our powerful Optical Character Recognition (OCR) capabilities with ID understanding models to extract key information from passports and U.S. driver licenses, such as name, date of birth, issue date, expiration date, and more. +* **New prebuilt ID model** The new prebuilt ID model enables customers to take IDs and return structured data to automate processing. It combines our powerful Optical Character Recognition (OCR) capabilities with ID understanding models to extract key information from passports and U.S. driver licenses. [Learn more about the prebuilt ID model](./concept-id-document.md) @@ -576,7 +591,7 @@ pip package version 3.1.0b4 :::image type="content" source="./media/table-labeling.png" alt-text="Table labeling" lightbox="./media/table-labeling.png"::: - In addition to labeling tables, you can now label empty values and regions; if some documents in your training set don't have values for certain fields, you can label them so that your model will know to extract values properly from analyzed documents. + In addition to labeling tables, you can now label empty values and regions. If some documents in your training set don't have values for certain fields, you can label them so that your model will know to extract values properly from analyzed documents. * **Support for 66 new languages** - The Layout API and Custom Models for Form Recognizer now support 73 languages. @@ -615,7 +630,7 @@ pip package version 3.1.0b4 ![Screenshot: Sample Labeling tool.](./media/ui-preview.jpg) * **Feedback Loop** - When Analyzing files via the Sample Labeling tool you can now also add it to the training set and adjust the labels if necessary and train to improve the model. -* **Auto Label Documents** - Automatically labels additional documents based on previous labeled documents in the project. +* **Auto Label Documents** - Automatically labels added documents based on previous labeled documents in the project. ## August 2020 @@ -661,7 +676,7 @@ pip package version 3.1.0b4 * **CopyModel API added to client SDKs** - You can now use the client SDKs to copy models from one subscription to another. See [Back up and recover models](./disaster-recovery.md) for general information on this feature. * **Azure Active Directory integration** - You can now use your Azure AD credentials to authenticate your Form Recognizer client objects in the SDKs. -* **SDK-specific changes** - This change includes both minor feature additions and breaking changes. For more information, *see* the SDK changelogs for more information. +* **SDK-specific changes** - This change includes both minor feature additions and breaking changes. For more information, _see_ the SDK changelogs. * [C# SDK Preview 3 changelog](https://github.com/Azure/azure-sdk-for-net/blob/master/sdk/formrecognizer/Azure.AI.FormRecognizer/CHANGELOG.md) * [Python SDK Preview 3 changelog](https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md) * [Java SDK Preview 3 changelog](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md) @@ -677,9 +692,9 @@ pip package version 3.1.0b4 * [Python SDK](/python/api/overview/azure/ai-formrecognizer-readme) * [JavaScript SDK](/javascript/api/overview/azure/ai-form-recognizer-readme) - The new SDK supports all the features of the v2.0 REST API for Form Recognizer. For example, you can train a model with or without labels and extract text, key-value pairs and tables from your forms, extract data from receipts with the pre-built receipts service and extract text and tables with the layout service from your documents. You can share your feedback on the SDKs through the [SDK Feedback form](https://aka.ms/FR_SDK_v1_feedback). + The new SDK supports all the features of the v2.0 REST API for Form Recognizer. You can share your feedback on the SDKs through the [SDK Feedback form](https://aka.ms/FR_SDK_v1_feedback). -* **Copy Custom Model** You can now copy models between regions and subscriptions using the new Copy Custom Model feature. Before invoking the Copy Custom Model API, you must first obtain authorization to copy into the target resource by calling the Copy Authorization operation against the target resource endpoint. +* **Copy Custom Model** You can now copy models between regions and subscriptions using the new Copy Custom Model feature. Before invoking the Copy Custom Model API, you must first obtain authorization to copy into the target resource. This authorization is secured by calling the Copy Authorization operation against the target resource endpoint. * [Generate a copy authorization](https://westus2.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2/operations/CopyCustomFormModelAuthorization) REST API * [Copy a custom model](https://westus2.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2/operations/CopyCustomFormModel) REST API diff --git a/articles/applied-ai-services/metrics-advisor/whats-new.md b/articles/applied-ai-services/metrics-advisor/whats-new.md index d5a67006d1e9a..1670177f1a306 100644 --- a/articles/applied-ai-services/metrics-advisor/whats-new.md +++ b/articles/applied-ai-services/metrics-advisor/whats-new.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: metrics-advisor ms.topic: overview -ms.date: 10/14/2020 +ms.date: 05/25/2022 ms.author: mbullwin --- @@ -16,6 +16,12 @@ ms.author: mbullwin Welcome! This page covers what's new in the Metrics Advisor docs. Check back every month for information on service changes, doc additions and updates this month. +## May 2022 + + **Detection configuration auto-tuning** has been released. This feature enables you to customize the service to better surface and personalize anomalies. Instead of the traditional way of setting configurations for each time series or a group of time series. A guided experience is provided to capture your detection preferences, such as the level of sensitivity, and the types of anomaly patterns, which allows you to tailor the model to your own needs on the back end. Those preferences can then be applied to all the time series you're monitoring. This allows you to reduce configuration costs while achieving better detection results. + +Check out [this article](how-tos/configure-metrics.md#tune-the-detection-configuration) to learn how to take advantage of the new feature. + ## SDK updates If you want to learn about the latest updates to Metrics Advisor client SDKs see: diff --git a/articles/attestation/attestation-token-examples.md b/articles/attestation/attestation-token-examples.md new file mode 100644 index 0000000000000..b692d911e15df --- /dev/null +++ b/articles/attestation/attestation-token-examples.md @@ -0,0 +1,138 @@ +--- +title: Examples of an Azure Attestation token +description: Examples of Azure Attestation token +services: attestation +author: msmbaldwin +ms.service: attestation +ms.topic: overview +ms.date: 06/07/2022 +ms.author: mbaldwin + + +--- +# Examples of an attestation token + +Attestation policy is used to process the attestation evidence and determine whether Azure Attestation will issue an attestation token. Attestation token generation can be controlled with custom policies. Below are some examples of an attestation policy. + +## Sample JWT generated for SGX attestation + +``` +{ + "alg": "RS256", + "jku": "https://tradewinds.us.attest.azure.net/certs", + "kid": , + "exp": 1568187398, + "iat": 1568158598, + "is-debuggable": false, + "iss": "https://tradewinds.us.attest.azure.net", + "maa-attestationcollateral": + { + "qeidcertshash": , + "qeidcrlhash": , + "qeidhash": , + "quotehash": , + "tcbinfocertshash": , + "tcbinfocrlhash": , + "tcbinfohash": + }, + "maa-ehd": , + "nbf": 1568158598, + "product-id": 4639, + "sgx-mrenclave": , + "sgx-mrsigner": , + "svn": 0, + "tee": "sgx" + "x-ms-attestation-type": "sgx", + "x-ms-policy-hash": <>, + "x-ms-sgx-collateral": + { + "qeidcertshash": , + "qeidcrlhash": , + "qeidhash": , + "quotehash": , + "tcbinfocertshash": , + "tcbinfocrlhash": , + "tcbinfohash": + }, + "x-ms-sgx-ehd": <>, + "x-ms-sgx-is-debuggable": true, + "x-ms-sgx-mrenclave": , + "x-ms-sgx-mrsigner": , + "x-ms-sgx-product-id": 1, + "x-ms-sgx-svn": 1, + "x-ms-ver": "1.0", + "x-ms-sgx-config-id": "000102030405060708090a0b0c0d8f99000102030405060708090a0b0c860e9a000102030405060708090a0b7d0d0e9b000102030405060708090a740c0d0e9c", + "x-ms-sgx-config-svn": 3451, + "x-ms-sgx-isv-extended-product-id": "8765432143211234abcdabcdef123456", + "x-ms-sgx-isv-family-id": "1234567812344321abcd1234567890ab" +}.[Signature] +``` + +Some of the claims used above are considered deprecated but are fully supported. It is recommended that all future code and tooling use the non-deprecated claim names. See [claims issued by Azure Attestation](claim-sets.md) for more information. + +The below claims will appear only in the attestation token generated for Intel® Xeon® Scalable processor-based server platforms. The claims will not appear if the SGX enclave is not configured with [Key Separation and Sharing Support](https://github.com/openenclave/openenclave/issues/3054) + +**x-ms-sgx-config-id** + +**x-ms-sgx-config-svn** + +**x-ms-sgx-isv-extended-product-id** + +**x-ms-sgx-isv-family-id** + +## Sample JWT generated for SEV-SNP attestation + +``` +{ +  "exp": 1649970020, +  "iat": 1649941220, +  "iss": "https://maasandbox0001.wus.attest.azure.net", +  "jti": "b65da1dcfbb4698b0bb2323cac664b745a2ff1cffbba55641fd65784aa9474d5", +  "nbf": 1649941220, +  "x-ms-attestation-type": "sevsnpvm", +  "x-ms-compliance-status": "azure-compliant-cvm", +  "x-ms-policy-hash": "LTPRQQju-FejAwdYihF8YV_c2XWebG9joKvrHKc3bxs", +  "x-ms-runtime": { +    "keys": [ +      { +        "e": "AQAB", +        "key_ops": ["encrypt"], +        "kid": "HCLTransferKey", +        "kty": "RSA", +        "n": "ur08DccjGGzRo3OIq445n00Q3OthMIbR3SWIzCcicIM_7nPiVF5NBIknk2zdHZN1iiNhIzJezrXSqVT7Ty1Dl4AB5xiAAqxo7xGjFqlL47NA8WbZRMxQtwlsOjZgFxosDNXIt6dMq7ODh4nj6nV2JMScNfRKyr1XFIUK0XkOWvVlSlNZjaAxj8H4pS0yNfNwr1Q94VdSn3LPRuZBHE7VrofHRGSHJraDllfKT0-8oKW8EjpMwv1ME_OgPqPwLyiRzr99moB7uxzjEVDe55D2i2mPrcmT7kSsHwp5O2xKhM68rda6F-IT21JgdhQ6n4HWCicslBmx4oqkI-x5lVsRkQ" +      } +    ], +    "vm-configuration": { +      "secure-boot": true, +      "secure-boot-template-id": "1734c6e8-3154-4dda-ba5f-a874cc483422", +      "tpm-enabled": true, +      "vmUniqueId": "AE5CBB2A-DC95-4870-A74A-EE4FB33B1A9C" +    } +  }, +  "x-ms-sevsnpvm-authorkeydigest": "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", +  "x-ms-sevsnpvm-bootloader-svn": 0, +  "x-ms-sevsnpvm-familyId": "01000000000000000000000000000000", +  "x-ms-sevsnpvm-guestsvn": 1, +  "x-ms-sevsnpvm-hostdata": "0000000000000000000000000000000000000000000000000000000000000000", +  "x-ms-sevsnpvm-idkeydigest": "38ed94f9aab20bc5eb40e89c7cbb03aa1b9efb435892656ade789ccaa0ded82ff18bae0e849c3166351ba1fa7ff620a2", +  "x-ms-sevsnpvm-imageId": "02000000000000000000000000000000", +  "x-ms-sevsnpvm-is-debuggable": false, +  "x-ms-sevsnpvm-launchmeasurement": "04a170f39a3f702472ed0c7ecbda9babfc530e3caac475fdd607ff499177d14c278c5a15ad07ceacd5230ae63d507e9d", +  "x-ms-sevsnpvm-microcode-svn": 40, +  "x-ms-sevsnpvm-migration-allowed": false, +  "x-ms-sevsnpvm-reportdata": "99dd4593a43f4b0f5f10f1856c7326eba309b943251fededc15592e3250ca9e90000000000000000000000000000000000000000000000000000000000000000", +  "x-ms-sevsnpvm-reportid": "d1d5c2c71596fae601433ecdfb62799de2a785cc08be3b1c8a4e26a381494787", +  "x-ms-sevsnpvm-smt-allowed": true, +  "x-ms-sevsnpvm-snpfw-svn": 0, +  "x-ms-sevsnpvm-tee-svn": 0, +  "x-ms-sevsnpvm-vmpl": 0, +  "x-ms-ver": "1.0" +} +``` + +## Next steps + +- [View examples of an attestation policy](policy-examples.md) diff --git a/articles/attestation/audit-logs.md b/articles/attestation/audit-logs.md index 8ef8823540a49..8da2a9ece7b5c 100644 --- a/articles/attestation/audit-logs.md +++ b/articles/attestation/audit-logs.md @@ -65,7 +65,7 @@ Individual blobs are stored as text, formatted as a JSON blob. Let’s look at a } ``` -Most of these fields are documented in the [Top-level common schema](/azure/azure-monitor/essentials/resource-logs-schema#top-level-common-schema). The following table lists the field names and descriptions for the entries not included in the top-level common schema: +Most of these fields are documented in the [Top-level common schema](../azure-monitor/essentials/resource-logs-schema.md#top-level-common-schema). The following table lists the field names and descriptions for the entries not included in the top-level common schema: | Field Name | Description | |------------------------------------------|-----------------------------------------------------------------------------------------------| @@ -82,4 +82,4 @@ The properties contain additional Azure attestation specific context: | infoDataReceived | Information about the request received from the client. Includes some HTTP headers, the number of headers received, the content type and content length | ## Next steps -- [How to enable Microsoft Azure Attestation logging ](azure-diagnostic-monitoring.md) +- [How to enable Microsoft Azure Attestation logging ](azure-diagnostic-monitoring.md) \ No newline at end of file diff --git a/articles/attestation/basic-concepts.md b/articles/attestation/basic-concepts.md index 795171a09a56d..a42a1e4806cfd 100755 --- a/articles/attestation/basic-concepts.md +++ b/articles/attestation/basic-concepts.md @@ -65,7 +65,7 @@ Attestation policy is used to process the attestation evidence and is configurab If the default policy in the attestation provider doesn’t meet the needs, customers will be able to create custom policies in any of the regions supported by Azure Attestation. Policy management is a key feature provided to customers by Azure Attestation. Policies will be attestation type specific and can be used to identify enclaves or add claims to the output token or modify claims in an output token. -See [examples of an attestation policy](policy-examples.md) for policy samples. +See [examples of an attestation policy](policy-examples.md) ## Benefits of policy signing @@ -83,74 +83,7 @@ Azure Attestation response will be a JSON string whose value contains JWT. Azure The Get OpenID Metadata API returns an OpenID Configuration response as specified by the [OpenID Connect Discovery protocol](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig). The API retrieves metadata about the signing certificates in use by Azure Attestation. -Example of JWT generated for an SGX enclave: - -``` -{ - "alg": "RS256", - "jku": "https://tradewinds.us.attest.azure.net/certs", - "kid": , - "exp": 1568187398, - "iat": 1568158598, - "is-debuggable": false, - "iss": "https://tradewinds.us.attest.azure.net", - "maa-attestationcollateral": - { - "qeidcertshash": , - "qeidcrlhash": , - "qeidhash": , - "quotehash": , - "tcbinfocertshash": , - "tcbinfocrlhash": , - "tcbinfohash": - }, - "maa-ehd": , - "nbf": 1568158598, - "product-id": 4639, - "sgx-mrenclave": , - "sgx-mrsigner": , - "svn": 0, - "tee": "sgx" - "x-ms-attestation-type": "sgx", - "x-ms-policy-hash": <>, - "x-ms-sgx-collateral": - { - "qeidcertshash": , - "qeidcrlhash": , - "qeidhash": , - "quotehash": , - "tcbinfocertshash": , - "tcbinfocrlhash": , - "tcbinfohash": - }, - "x-ms-sgx-ehd": <>, - "x-ms-sgx-is-debuggable": true, - "x-ms-sgx-mrenclave": , - "x-ms-sgx-mrsigner": , - "x-ms-sgx-product-id": 1, - "x-ms-sgx-svn": 1, - "x-ms-ver": "1.0", - "x-ms-sgx-config-id": "000102030405060708090a0b0c0d8f99000102030405060708090a0b0c860e9a000102030405060708090a0b7d0d0e9b000102030405060708090a740c0d0e9c", - "x-ms-sgx-config-svn": 3451, - "x-ms-sgx-isv-extended-product-id": "8765432143211234abcdabcdef123456", - "x-ms-sgx-isv-family-id": "1234567812344321abcd1234567890ab" -}.[Signature] -``` - -Some of the claims used above are considered deprecated but are fully supported. It is recommended that all future code and tooling use the non-deprecated claim names. See [claims issued by Azure Attestation](claim-sets.md) for more information. - -The below claims will appear only in the attestation token generated for Intel® Xeon® Scalable processor-based server platforms. The claims will not appear if the SGX enclave is not configured with [Key Separation and Sharing Support](https://github.com/openenclave/openenclave/issues/3054) - -**x-ms-sgx-config-id** - -**x-ms-sgx-config-svn** - -**x-ms-sgx-isv-extended-product-id** - -**x-ms-sgx-isv-family-id** +See [examples of attestation token](attestation-token-examples.md). ## Encryption of data at rest diff --git a/articles/attestation/faq.yml b/articles/attestation/faq.yml index 1558e5d837e3c..036037200ef21 100644 --- a/articles/attestation/faq.yml +++ b/articles/attestation/faq.yml @@ -51,12 +51,12 @@ sections: The same process can be implemented for Azure Attestation. However to leverage the benefits offered by Trusted Hardware Identity Management (THIM), after installing ACC virtual machine, it is recommended to install [Azure DCAP library](https://www.nuget.org/packages/Microsoft.Azure.DCAP). Based on the agreement with Intel, when Azure DCAP library is installed, the requests for generating enclave evidence are redirected from Intel PCK caching service to THIM. Azure DCAP library is supported in Windows and Linux-based environments. - question: | - How to shift to Azure Attestation from other attestation models + How to shift to Azure Attestation from other SGX attestation models answer: | - After installing Azure Confidential computing virtual machine, install Azure DCAP library ([Windows/](https://www.nuget.org/packages/Microsoft.Azure.DCAP/) [Linux](https://packages.microsoft.com/ubuntu/18.04/prod/pool/main/a/az-dcap-client/)) to leverage the benefits offered by Trusted Hardware Identity Management (THIM). - Remote attestation client needs to be authored which can retrieve the enclave evidence and send requests to Azure Attestation. See [code samples](/samples/browse/?expanded=azure&terms=attestation) for reference - Attestation requests can be sent to the REST API endpoint of default providers or custom attestation providers - - Azure Attestation APIs are protected by Azure AD authentication. Hence the client that invokes attest APIs must be able to obtain and pass a valid Azure AD access token in the attestation request + - In [2018-09-01-preview](https://github.com/Azure/azure-rest-api-specs/tree/master/specification/attestation/data-plane/Microsoft.Attestation/stable/2018-09-01-preview) API version, the client needs to send Azure AD access token along with the evidence to SGX attest API endpoint. The Azure AD access token is not a required parameter to perform SGX attestation in [2020-10-01](https://github.com/Azure/azure-rest-api-specs/tree/master/specification/attestation/data-plane/Microsoft.Attestation/stable/2020-10-01) API version - question: | How can the relying party verify the integrity of attestation token and confirm that Azure Attestation is running inside an enclave diff --git a/articles/attestation/overview.md b/articles/attestation/overview.md index dd75bc2372731..1a8d549c7a3be 100755 --- a/articles/attestation/overview.md +++ b/articles/attestation/overview.md @@ -54,7 +54,7 @@ Azure [Confidential VM](../confidential-computing/confidential-vm-overview.md) ( Azure customers can [prevent bootkit and rootkit infections](https://www.youtube.com/watch?v=CQqu_rTSi0Q) by enabling [Trusted launch](../virtual-machines/trusted-launch.md)) for their virtual machines (VMs). When the VM is Secure Boot and vTPM enabled with guest attestation extension installed, vTPM measurements get submitted to Azure Attestation periodically for monitoring of boot integrity. An attestation failure indicates potential malware, which is surfaced to customers via Microsoft Defender for Cloud, through Alerts and Recommendations. -## Azure Attestation can run in a TEE +## Azure Attestation runs in a TEE Azure Attestation is critical to Confidential Computing scenarios, as it performs the following actions: @@ -63,11 +63,7 @@ Azure Attestation is critical to Confidential Computing scenarios, as it perform - Manages and stores tenant-specific policies. - Generates and signs a token that is used by relying parties to interact with the enclave. -Azure Attestation is built to run in two types of environments: -- Azure Attestation running in an SGX enabled TEE. -- Azure Attestation running in a non-TEE. - -Azure Attestation customers have expressed a requirement for Microsoft to be operationally out of trusted computing base (TCB). This is to prevent Microsoft entities such as VM admins, host admins, and Microsoft developers from modifying attestation requests, policies, and Azure Attestation-issued tokens. Azure Attestation is also built to run in TEE, where features of Azure Attestation like quote validation, token generation, and token signing are moved into an SGX enclave. +To keep Microsoft operationally out of trusted computing base (TCB), critical operations of Azure Attestation like quote validation, token generation, policy evaluation and token signing are moved into an SGX enclave. ## Why use Azure Attestation diff --git a/articles/attestation/policy-examples.md b/articles/attestation/policy-examples.md index ea2c303f95e59..493f3b722520a 100644 --- a/articles/attestation/policy-examples.md +++ b/articles/attestation/policy-examples.md @@ -55,6 +55,31 @@ issuancerules { Claims used in default policy are considered deprecated but are fully supported and will continue to be included in the future. It's recommended to use the non-deprecated claim names. For more information on the recommended claim names, see [claim sets](./claim-sets.md). +## Sample custom policy to support multiple SGX enclaves + +``` +version= 1.0; +authorizationrules +{ + [ type=="x-ms-sgx-is-debuggable", value==true ]&& + [ type=="x-ms-sgx-mrsigner", value=="mrsigner1"] => permit(); + [ type=="x-ms-sgx-is-debuggable", value==true ]&& + [ type=="x-ms-sgx-mrsigner", value=="mrsigner2"] => permit(); +}; +``` + +## Unsigned Policy for an SGX enclave with PolicyFormat=JWT + +``` +eyJhbGciOiJub25lIn0.eyJBdHRlc3RhdGlvblBvbGljeSI6ICJkbVZ5YzJsdmJqMGdNUzR3TzJGMWRHaHZjbWw2WVhScGIyNXlkV3hsYzN0ak9sdDBlWEJsUFQwaUpHbHpMV1JsWW5WbloyRmliR1VpWFNBOVBpQndaWEp0YVhRb0tUdDlPMmx6YzNWaGJtTmxjblZzWlhON1l6cGJkSGx3WlQwOUlpUnBjeTFrWldKMVoyZGhZbXhsSWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYVhNdFpHVmlkV2RuWVdKc1pTSXNJSFpoYkhWbFBXTXVkbUZzZFdVcE8yTTZXM1I1Y0dVOVBTSWtjMmQ0TFcxeWMybG5ibVZ5SWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYzJkNExXMXljMmxuYm1WeUlpd2dkbUZzZFdVOVl5NTJZV3gxWlNrN1l6cGJkSGx3WlQwOUlpUnpaM2d0YlhKbGJtTnNZWFpsSWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYzJkNExXMXlaVzVqYkdGMlpTSXNJSFpoYkhWbFBXTXVkbUZzZFdVcE8yTTZXM1I1Y0dVOVBTSWtjSEp2WkhWamRDMXBaQ0pkSUQwLUlHbHpjM1ZsS0hSNWNHVTlJbkJ5YjJSMVkzUXRhV1FpTENCMllXeDFaVDFqTG5aaGJIVmxLVHRqT2x0MGVYQmxQVDBpSkhOMmJpSmRJRDAtSUdsemMzVmxLSFI1Y0dVOUluTjJiaUlzSUhaaGJIVmxQV011ZG1Gc2RXVXBPMk02VzNSNWNHVTlQU0lrZEdWbElsMGdQVDRnYVhOemRXVW9kSGx3WlQwaWRHVmxJaXdnZG1Gc2RXVTlZeTUyWVd4MVpTazdmVHMifQ. +``` + +## Signed Policy for an SGX enclave with PolicyFormat=JWT + +``` +eyJhbGciOiJSU0EyNTYiLCJ4NWMiOlsiTUlJQzFqQ0NBYjZnQXdJQkFnSUlTUUdEOUVGakJcdTAwMkJZd0RRWUpLb1pJaHZjTkFRRUxCUUF3SWpFZ01CNEdBMVVFQXhNWFFYUjBaWE4wWVhScGIyNURaWEowYVdacFkyRjBaVEF3SGhjTk1qQXhNVEl6TVRneU1EVXpXaGNOTWpFeE1USXpNVGd5TURVeldqQWlNU0F3SGdZRFZRUURFeGRCZEhSbGMzUmhkR2x2YmtObGNuUnBabWxqWVhSbE1EQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUpyRWVNSlo3UE01VUJFbThoaUNLRGh6YVA2Y2xYdkhmd0RIUXJ5L3V0L3lHMUFuMGJ3MVU2blNvUEVtY2FyMEc1WmYxTUR4alZOdEF5QjZJWThKLzhaQUd4eFFnVVZsd1dHVmtFelpGWEJVQTdpN1B0NURWQTRWNlx1MDAyQkJnanhTZTBCWVpGYmhOcU5zdHhraUNybjYwVTYwQUU1WFx1MDAyQkE1M1JvZjFUUkNyTXNLbDRQVDRQeXAzUUtNVVlDaW9GU3d6TkFQaU8vTy9cdTAwMkJIcWJIMXprU0taUXh6bm5WUGVyYUFyMXNNWkptRHlyUU8vUFlMTHByMXFxSUY2SmJsbjZEenIzcG5uMXk0Wi9OTzJpdFBxMk5Nalx1MDAyQnE2N1FDblNXOC9xYlpuV3ZTNXh2S1F6QVR5VXFaOG1PSnNtSThUU05rLzBMMlBpeS9NQnlpeDdmMTYxQ2tjRm1LU3kwQ0F3RUFBYU1RTUE0d0RBWURWUjBUQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBZ1ZKVWRCaXRud3ZNdDdvci9UMlo4dEtCUUZsejFVcVVSRlRUTTBBcjY2YWx2Y2l4VWJZR3gxVHlTSk5pbm9XSUJROU9QamdMa1dQMkVRRUtvUnhxN1NidGxqNWE1RUQ2VjRyOHRsejRISjY0N3MyM2V0blJFa2o5dE9Gb3ZNZjhOdFNVeDNGTnBhRUdabDJMUlZHd3dcdTAwMkJsVThQd0gzL2IzUmVCZHRhQTdrZmFWNVx1MDAyQml4ZWRjZFN5S1F1VkFUbXZNSTcxM1A4VlBsNk1XbXNNSnRrVjNYVi9ZTUVzUVx1MDAyQkdZcU1yN2tLWGwxM3lldUVmVTJWVkVRc1ovMXRnb29iZVZLaVFcdTAwMkJUcWIwdTJOZHNcdTAwMkJLamRIdmFNYngyUjh6TDNZdTdpR0pRZnd1aU1tdUxSQlJwSUFxTWxRRktLNmRYOXF6Nk9iT01zUjlpczZ6UDZDdmxGcEV6bzVGUT09Il19.eyJBdHRlc3RhdGlvblBvbGljeSI6ImRtVnljMmx2YmoweExqQTdZWFYwYUc5eWFYcGhkR2x2Ym5KMWJHVnpJSHRqT2x0MGVYQmxQVDBpSkdsekxXUmxZblZuWjJGaWJHVWlYU0FtSmlCYmRtRnNkV1U5UFhSeWRXVmRJRDAtSUdSbGJua29LVHM5UGlCd1pYSnRhWFFvS1R0OU8ybHpjM1ZoYm1ObGNuVnNaWE1nZXlBZ0lDQmpPbHQwZVhCbFBUMGlKR2x6TFdSbFluVm5aMkZpYkdVaVhTQTlQaUJwYzNOMVpTaDBlWEJsUFNKT2IzUkVaV0oxWjJkaFlteGxJaXdnZG1Gc2RXVTlZeTUyWVd4MVpTazdJQ0FnSUdNNlczUjVjR1U5UFNJa2FYTXRaR1ZpZFdkbllXSnNaU0pkSUQwLUlHbHpjM1ZsS0hSNWNHVTlJbWx6TFdSbFluVm5aMkZpYkdVaUxDQjJZV3gxWlQxakxuWmhiSFZsS1RzZ0lDQWdZenBiZEhsd1pUMDlJaVJ6WjNndGJYSnphV2R1WlhJaVhTQTlQaUJwYzNOMVpTaDBlWEJsUFNKelozZ3RiWEp6YVdkdVpYSWlMQ0IyWVd4MVpUMWpMblpoYkhWbEtUc2dJQ0FnWXpwYmRIbHdaVDA5SWlSelozZ3RiWEpsYm1Oc1lYWmxJbDBnUFQ0Z2FYTnpkV1VvZEhsd1pUMGljMmQ0TFcxeVpXNWpiR0YyWlNJc0lIWmhiSFZsUFdNdWRtRnNkV1VwT3lBZ0lDQmpPbHQwZVhCbFBUMGlKSEJ5YjJSMVkzUXRhV1FpWFNBOVBpQnBjM04xWlNoMGVYQmxQU0p3Y205a2RXTjBMV2xrSWl3Z2RtRnNkV1U5WXk1MllXeDFaU2s3SUNBZ0lHTTZXM1I1Y0dVOVBTSWtjM1p1SWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYzNadUlpd2dkbUZzZFdVOVl5NTJZV3gxWlNrN0lDQWdJR002VzNSNWNHVTlQU0lrZEdWbElsMGdQVDRnYVhOemRXVW9kSGx3WlQwaWRHVmxJaXdnZG1Gc2RXVTlZeTUyWVd4MVpTazdmVHMifQ.c0l-xqGDFQ8_kCiQ0_vvmDQYG_u544CYmoiucPNxd9MU8ZXT69UD59UgSuya2yl241NoVXA_0LaMEB2re0JnTbPD_dliJn96HnIOqnxXxRh7rKbu65ECUOMWPXbyKQMZ0I3Wjhgt_XyyhfEiQGfJfGzA95-wm6yWqrmW7dMI7JkczG9ideztnr0bsw5NRsIWBXOjVy7Bg66qooTnODS_OqeQ4iaNsN-xjMElHABUxXhpBt2htbhemDU1X41o8clQgG84aEHCgkE07pR-7IL_Fn2gWuPVC66yxAp00W1ib2L-96q78D9J52HPdeDCSFio2RL7r5lOtz8YkQnjacb6xA +``` + ## Sample policy for TPM using Policy version 1.0 ``` @@ -123,31 +148,6 @@ c:[type=="boolProperties", issuer=="AttestationPolicy"] => add(type="elamDriverL The policy uses the TPM version to restrict attestation calls. The issuancerules looks at various properties measured during boot. -## Sample custom policy to support multiple SGX enclaves - -``` -version= 1.0; -authorizationrules -{ - [ type=="x-ms-sgx-is-debuggable", value==true ]&& - [ type=="x-ms-sgx-mrsigner", value=="mrsigner1"] => permit(); - [ type=="x-ms-sgx-is-debuggable", value==true ]&& - [ type=="x-ms-sgx-mrsigner", value=="mrsigner2"] => permit(); -}; -``` - -## Unsigned Policy for an SGX enclave with PolicyFormat=JWT - -``` -eyJhbGciOiJub25lIn0.eyJBdHRlc3RhdGlvblBvbGljeSI6ICJkbVZ5YzJsdmJqMGdNUzR3TzJGMWRHaHZjbWw2WVhScGIyNXlkV3hsYzN0ak9sdDBlWEJsUFQwaUpHbHpMV1JsWW5WbloyRmliR1VpWFNBOVBpQndaWEp0YVhRb0tUdDlPMmx6YzNWaGJtTmxjblZzWlhON1l6cGJkSGx3WlQwOUlpUnBjeTFrWldKMVoyZGhZbXhsSWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYVhNdFpHVmlkV2RuWVdKc1pTSXNJSFpoYkhWbFBXTXVkbUZzZFdVcE8yTTZXM1I1Y0dVOVBTSWtjMmQ0TFcxeWMybG5ibVZ5SWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYzJkNExXMXljMmxuYm1WeUlpd2dkbUZzZFdVOVl5NTJZV3gxWlNrN1l6cGJkSGx3WlQwOUlpUnpaM2d0YlhKbGJtTnNZWFpsSWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYzJkNExXMXlaVzVqYkdGMlpTSXNJSFpoYkhWbFBXTXVkbUZzZFdVcE8yTTZXM1I1Y0dVOVBTSWtjSEp2WkhWamRDMXBaQ0pkSUQwLUlHbHpjM1ZsS0hSNWNHVTlJbkJ5YjJSMVkzUXRhV1FpTENCMllXeDFaVDFqTG5aaGJIVmxLVHRqT2x0MGVYQmxQVDBpSkhOMmJpSmRJRDAtSUdsemMzVmxLSFI1Y0dVOUluTjJiaUlzSUhaaGJIVmxQV011ZG1Gc2RXVXBPMk02VzNSNWNHVTlQU0lrZEdWbElsMGdQVDRnYVhOemRXVW9kSGx3WlQwaWRHVmxJaXdnZG1Gc2RXVTlZeTUyWVd4MVpTazdmVHMifQ. -``` - -## Signed Policy for an SGX enclave with PolicyFormat=JWT - -``` -eyJhbGciOiJSU0EyNTYiLCJ4NWMiOlsiTUlJQzFqQ0NBYjZnQXdJQkFnSUlTUUdEOUVGakJcdTAwMkJZd0RRWUpLb1pJaHZjTkFRRUxCUUF3SWpFZ01CNEdBMVVFQXhNWFFYUjBaWE4wWVhScGIyNURaWEowYVdacFkyRjBaVEF3SGhjTk1qQXhNVEl6TVRneU1EVXpXaGNOTWpFeE1USXpNVGd5TURVeldqQWlNU0F3SGdZRFZRUURFeGRCZEhSbGMzUmhkR2x2YmtObGNuUnBabWxqWVhSbE1EQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUpyRWVNSlo3UE01VUJFbThoaUNLRGh6YVA2Y2xYdkhmd0RIUXJ5L3V0L3lHMUFuMGJ3MVU2blNvUEVtY2FyMEc1WmYxTUR4alZOdEF5QjZJWThKLzhaQUd4eFFnVVZsd1dHVmtFelpGWEJVQTdpN1B0NURWQTRWNlx1MDAyQkJnanhTZTBCWVpGYmhOcU5zdHhraUNybjYwVTYwQUU1WFx1MDAyQkE1M1JvZjFUUkNyTXNLbDRQVDRQeXAzUUtNVVlDaW9GU3d6TkFQaU8vTy9cdTAwMkJIcWJIMXprU0taUXh6bm5WUGVyYUFyMXNNWkptRHlyUU8vUFlMTHByMXFxSUY2SmJsbjZEenIzcG5uMXk0Wi9OTzJpdFBxMk5Nalx1MDAyQnE2N1FDblNXOC9xYlpuV3ZTNXh2S1F6QVR5VXFaOG1PSnNtSThUU05rLzBMMlBpeS9NQnlpeDdmMTYxQ2tjRm1LU3kwQ0F3RUFBYU1RTUE0d0RBWURWUjBUQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBZ1ZKVWRCaXRud3ZNdDdvci9UMlo4dEtCUUZsejFVcVVSRlRUTTBBcjY2YWx2Y2l4VWJZR3gxVHlTSk5pbm9XSUJROU9QamdMa1dQMkVRRUtvUnhxN1NidGxqNWE1RUQ2VjRyOHRsejRISjY0N3MyM2V0blJFa2o5dE9Gb3ZNZjhOdFNVeDNGTnBhRUdabDJMUlZHd3dcdTAwMkJsVThQd0gzL2IzUmVCZHRhQTdrZmFWNVx1MDAyQml4ZWRjZFN5S1F1VkFUbXZNSTcxM1A4VlBsNk1XbXNNSnRrVjNYVi9ZTUVzUVx1MDAyQkdZcU1yN2tLWGwxM3lldUVmVTJWVkVRc1ovMXRnb29iZVZLaVFcdTAwMkJUcWIwdTJOZHNcdTAwMkJLamRIdmFNYngyUjh6TDNZdTdpR0pRZnd1aU1tdUxSQlJwSUFxTWxRRktLNmRYOXF6Nk9iT01zUjlpczZ6UDZDdmxGcEV6bzVGUT09Il19.eyJBdHRlc3RhdGlvblBvbGljeSI6ImRtVnljMmx2YmoweExqQTdZWFYwYUc5eWFYcGhkR2x2Ym5KMWJHVnpJSHRqT2x0MGVYQmxQVDBpSkdsekxXUmxZblZuWjJGaWJHVWlYU0FtSmlCYmRtRnNkV1U5UFhSeWRXVmRJRDAtSUdSbGJua29LVHM5UGlCd1pYSnRhWFFvS1R0OU8ybHpjM1ZoYm1ObGNuVnNaWE1nZXlBZ0lDQmpPbHQwZVhCbFBUMGlKR2x6TFdSbFluVm5aMkZpYkdVaVhTQTlQaUJwYzNOMVpTaDBlWEJsUFNKT2IzUkVaV0oxWjJkaFlteGxJaXdnZG1Gc2RXVTlZeTUyWVd4MVpTazdJQ0FnSUdNNlczUjVjR1U5UFNJa2FYTXRaR1ZpZFdkbllXSnNaU0pkSUQwLUlHbHpjM1ZsS0hSNWNHVTlJbWx6TFdSbFluVm5aMkZpYkdVaUxDQjJZV3gxWlQxakxuWmhiSFZsS1RzZ0lDQWdZenBiZEhsd1pUMDlJaVJ6WjNndGJYSnphV2R1WlhJaVhTQTlQaUJwYzNOMVpTaDBlWEJsUFNKelozZ3RiWEp6YVdkdVpYSWlMQ0IyWVd4MVpUMWpMblpoYkhWbEtUc2dJQ0FnWXpwYmRIbHdaVDA5SWlSelozZ3RiWEpsYm1Oc1lYWmxJbDBnUFQ0Z2FYTnpkV1VvZEhsd1pUMGljMmQ0TFcxeVpXNWpiR0YyWlNJc0lIWmhiSFZsUFdNdWRtRnNkV1VwT3lBZ0lDQmpPbHQwZVhCbFBUMGlKSEJ5YjJSMVkzUXRhV1FpWFNBOVBpQnBjM04xWlNoMGVYQmxQU0p3Y205a2RXTjBMV2xrSWl3Z2RtRnNkV1U5WXk1MllXeDFaU2s3SUNBZ0lHTTZXM1I1Y0dVOVBTSWtjM1p1SWwwZ1BUNGdhWE56ZFdVb2RIbHdaVDBpYzNadUlpd2dkbUZzZFdVOVl5NTJZV3gxWlNrN0lDQWdJR002VzNSNWNHVTlQU0lrZEdWbElsMGdQVDRnYVhOemRXVW9kSGx3WlQwaWRHVmxJaXdnZG1Gc2RXVTlZeTUyWVd4MVpTazdmVHMifQ.c0l-xqGDFQ8_kCiQ0_vvmDQYG_u544CYmoiucPNxd9MU8ZXT69UD59UgSuya2yl241NoVXA_0LaMEB2re0JnTbPD_dliJn96HnIOqnxXxRh7rKbu65ECUOMWPXbyKQMZ0I3Wjhgt_XyyhfEiQGfJfGzA95-wm6yWqrmW7dMI7JkczG9ideztnr0bsw5NRsIWBXOjVy7Bg66qooTnODS_OqeQ4iaNsN-xjMElHABUxXhpBt2htbhemDU1X41o8clQgG84aEHCgkE07pR-7IL_Fn2gWuPVC66yxAp00W1ib2L-96q78D9J52HPdeDCSFio2RL7r5lOtz8YkQnjacb6xA -``` - ## Next steps - [How to author and sign an attestation policy](author-sign-policy.md) diff --git a/articles/attestation/toc.yml b/articles/attestation/toc.yml index 96eb9c97e9de9..49287ed752baa 100755 --- a/articles/attestation/toc.yml +++ b/articles/attestation/toc.yml @@ -35,6 +35,8 @@ href: policy-examples.md - name: Policy signer certificate href: policy-signer-examples.md + - name: Attestation token + href: attestation-token-examples.md - name: Concepts items: - name: Basic concepts @@ -68,6 +70,16 @@ href: faq.yml - name: REST href: /rest/api/attestation/ + - name: C++ data-plane SDK + href: https://azuresdkdocs.blob.core.windows.net/$web/cpp/azure-security-attestation/1.0.0-beta.2/index.html + - name: .NET data-plane SDK + href: https://www.nuget.org/packages/Azure.Security.Attestation + - name: Java data-plane SDK + href: https://search.maven.org/artifact/com.azure/azure-security-attestation/1.1.2/jar + - name: Python data-plane SDK + href: https://pypi.org/project/azure-security-attestation/ + - name: JavaScript SDK + href: https://www.npmjs.com/package/@azure/attestation/v/1.0.0 - name: Azure PowerShell href: /powershell/module/az.attestation/#attestation - name: Azure CLI diff --git a/articles/attestation/workflow.md b/articles/attestation/workflow.md index 9e12dc151f551..9ca28cfb22c36 100755 --- a/articles/attestation/workflow.md +++ b/articles/attestation/workflow.md @@ -25,10 +25,10 @@ The following actors are involved in an Azure Attestation work flow: Here are the general steps in a typical SGX enclave attestation workflow (using Azure Attestation): -1. Client collects evidence from an enclave. Evidence is information about the enclave environment and the client library running inside the enclave. -1. The client has an URI which refers to an instance of Azure Attestation. The client sends evidence to Azure Attestation. Exact information submitted to the provider depends on the enclave type. -1. Azure Attestation validates the submitted information and evaluates it against a configured policy. If the verification succeeds, Azure Attestation issues an attestation token and returns it to the client. If this step fails, Azure Attestation reports an error to the client. -1. The client sends the attestation token to relying party. The relying party calls public key metadata endpoint of Azure Attestation to retrieve signing certificates. The relying party then verifies the signature of the attestation token and ensures the enclave trustworthiness. +1. Client collects evidence from an enclave. Evidence is information about the enclave environment and the client library running inside the enclave +1. The client has an URI which refers to an instance of Azure Attestation. The client sends evidence to Azure Attestation. Exact information submitted to the provider depends on the enclave type +1. Azure Attestation validates the submitted information and evaluates it against a configured policy. If the verification succeeds, Azure Attestation issues an attestation token and returns it to the client. If this step fails, Azure Attestation reports an error to the client +1. The client sends the attestation token to relying party. The relying party calls public key metadata endpoint of Azure Attestation to retrieve signing certificates. The relying party then verifies the signature of the attestation token and ensures the enclave trustworthiness ![SGX enclave validation flow](./media/sgx-validation-flow.png) @@ -39,11 +39,11 @@ Here are the general steps in a typical SGX enclave attestation workflow (using Here are the general steps in a typical TPM enclave attestation workflow (using Azure Attestation): -1. On device/platform boot, various boot loaders and boot services measure events which backed by the TPM and are securely stored (TCG log). -2. Client collects the TCG logs from the device and TPM quote, which acts the evidence for attestation. -3. The client has an URI which refers to an instance of Azure Attestation. The client sends evidence to Azure Attestation. Exact information submitted to the provider depends on the platform. -4. Azure Attestation validates the submitted information and evaluates it against a configured policy. If the verification succeeds, Azure Attestation issues an attestation token and returns it to the client. If this step fails, Azure Attestation reports an error to the client. The communication between the client and attestation service is dictated by the Azure attestation TPM protocol. -5. The client then sends the attestation token to relying party. The relying party calls public key metadata endpoint of Azure Attestation to retrieve signing certificates. The relying party then verifies the signature of the attestation token and ensures the platforms trustworthiness. +1. On device/platform boot, various boot loaders and boot services measure events backed by TPM and securely store them as TCG logs. Client collects the TCG logs from the device and TPM quote which acts evidence for attestation +2. The client authenticates to Azure AD and obtains a access token +3. The client has an URI which refers to an instance of Azure Attestation. The client sends the evidence and the Azure Active Directory (Azure AD) access token to Azure Attestation. Exact information submitted to the provider depends on the platform +4. Azure Attestation validates the submitted information and evaluates it against a configured policy. If the verification succeeds, Azure Attestation issues an attestation token and returns it to the client. If this step fails, Azure Attestation reports an error to the client. The communication between the client and attestation service is dictated by the Azure attestation TPM protocol +5. The client then sends the attestation token to relying party. The relying party calls public key metadata endpoint of Azure Attestation to retrieve signing certificates. The relying party then verifies the signature of the attestation token and ensures the platforms trustworthiness ![TPM validation flow](./media/tpm-validation-flow.png) diff --git a/articles/automanage/automanage-linux.md b/articles/automanage/automanage-linux.md index e833b45790e9c..09bf3b9e8a6eb 100644 --- a/articles/automanage/automanage-linux.md +++ b/articles/automanage/automanage-linux.md @@ -40,7 +40,7 @@ Automanage supports the following Linux distributions and versions: |[Guest configuration](../governance/policy/concepts/guest-configuration.md) | Guest configuration is used to monitor the configuration and report on the compliance of the machine. The Automanage service will install the Azure Linux baseline using the guest configuration extension. For Linux machines, the guest configuration service will install the baseline in audit-only mode. You will be able to see where your VM is out of compliance with the baseline, but noncompliance won't be automatically remediated. Learn [more](../governance/policy/concepts/guest-configuration.md). |Production, Dev/Test | |[Boot Diagnostics](../virtual-machines/boot-diagnostics.md) | Boot diagnostics is a debugging feature for Azure virtual machines (VM) that allows diagnosis of VM boot failures. Boot diagnostics enables a user to observe the state of their VM as it is booting up by collecting serial log information and screenshots. This will only be enabled for machines that are using managed disks. |Production, Dev/Test | |[Azure Automation Account](../automation/automation-create-standalone-account.md) |Azure Automation supports management throughout the lifecycle of your infrastructure and applications. Learn [more](../automation/automation-intro.md). |Production, Dev/Test | -|[Log Analytics Workspace](../azure-monitor/logs/log-analytics-overview.md) |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/design-logs-deployment.md). |Production, Dev/Test | +|[Log Analytics Workspace](../azure-monitor/logs/log-analytics-workspace-overview.md) |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/workspace-design.md). |Production, Dev/Test | 1 The configuration profile selection is available when you are enabling Automanage. Learn [more](automanage-virtual-machines.md#configuration-profile). You can also create your own custom profile with the set of Azure services and settings that you need. diff --git a/articles/automanage/virtual-machines-best-practices.md b/articles/automanage/virtual-machines-best-practices.md index b4399d38046e8..6ad448a336540 100644 --- a/articles/automanage/virtual-machines-best-practices.md +++ b/articles/automanage/virtual-machines-best-practices.md @@ -28,7 +28,7 @@ For all of these services, we will auto-onboard, auto-configure, monitor for dri |Change Tracking & Inventory |Change Tracking and Inventory combines change tracking and inventory functions to allow you to track virtual machine and server infrastructure changes. The service supports change tracking across services, daemons software, registry, and files in your environment to help you diagnose unwanted changes and raise alerts. Inventory support allows you to query in-guest resources for visibility into installed applications and other configuration items. Learn [more](../automation/change-tracking/overview.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | |Guest configuration | Guest configuration is used to monitor the configuration and report on the compliance of the machine. The Automanage service will install the [Windows security baselines](/windows/security/threat-protection/windows-security-baselines) using the guest configuration extension. Learn [more](../governance/policy/concepts/guest-configuration.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | |Azure Automation Account |Azure Automation supports management throughout the lifecycle of your infrastructure and applications. Learn [more](../automation/automation-intro.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | -|Log Analytics Workspace |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/design-logs-deployment.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | +|Log Analytics Workspace |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/log-analytics-workspace-overview.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | 1 Configuration profiles are available when you are enabling Automanage. Learn [more](automanage-virtual-machines.md). You can also adjust the default settings of the configuration profile and set your own preferences within the best practices constraints. diff --git a/articles/automation/automation-hrw-run-runbooks.md b/articles/automation/automation-hrw-run-runbooks.md index faedab8383793..140c0267e7883 100644 --- a/articles/automation/automation-hrw-run-runbooks.md +++ b/articles/automation/automation-hrw-run-runbooks.md @@ -78,45 +78,95 @@ You can also use an [InlineScript](automation-powershell-workflow.md#use-inlines Hybrid Runbook Workers on Azure virtual machines can use managed identities to authenticate to Azure resources. Using managed identities for Azure resources instead of Run As accounts provides benefits because you don't need to: -* Export the Run As certificate and then import it into the Hybrid Runbook Worker. -* Renew the certificate used by the Run As account. -* Handle the Run As connection object in your runbook code. +- Export the Run As certificate and then import it into the Hybrid Runbook Worker. +- Renew the certificate used by the Run As account. +- Handle the Run As connection object in your runbook code. -Follow the next steps to use a managed identity for Azure resources on a Hybrid Runbook Worker: +There are two ways to use the Managed Identities in Hybrid Runbook Worker scripts. -1. Create an Azure VM. -1. Configure managed identities for Azure resources on the VM. See [Configure managed identities for Azure resources on a VM using the Azure portal](../active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm.md#enable-system-assigned-managed-identity-on-an-existing-vm). -1. Give the VM access to a resource group in Resource Manager. Refer to [Use a Windows VM system-assigned managed identity to access Resource Manager](../active-directory/managed-identities-azure-resources/tutorial-windows-vm-access-arm.md#grant-your-vm-access-to-a-resource-group-in-resource-manager). -1. Install the Hybrid Runbook Worker on the VM. See [Deploy a Windows Hybrid Runbook Worker](automation-windows-hrw-install.md) or [Deploy a Linux Hybrid Runbook Worker](automation-linux-hrw-install.md). -1. Update the runbook to use the [Connect-AzAccount](/powershell/module/az.accounts/connect-azaccount) cmdlet with the `Identity` parameter to authenticate to Azure resources. This configuration reduces the need to use a Run As account and perform the associated account management. +1. Use the system-assigned Managed Identity for the Automation account: + + 1. [Configure](/enable-managed-identity-for-automation.md#enable-a-system-assigned-managed-identity-for-an-azure-automation-account) a System-assigned Managed Identity for the Automation account. + 1. Grant this identity the [required permissions](/enable-managed-identity-for-automation.md#assign-role-to-a-system-assigned-managed-identity) within the Subscription to perform its task. + 1. Update the runbook to use the [Connect-AzAccount](/powershell/module/az.accounts/connect-azaccount) cmdlet with the `Identity` parameter to authenticate to Azure resources. This configuration reduces the need to use a Run As account and perform the associated account management. + + ```powershell + # Ensures you do not inherit an AzContext in your runbook + Disable-AzContextAutosave -Scope Process + + # Connect to Azure with system-assigned managed identity + $AzureContext = (Connect-AzAccount -Identity).context + + # set and store context + $AzureContext = Set-AzContext -SubscriptionName $AzureContext.Subscription -DefaultProfile + $AzureContext + + # Get all VM names from the subscription + Get-AzVM -DefaultProfile $AzureContext | Select Name + ``` + > [!NOTE] + > It is **Not** possible to use the Automation Account's User Managed Identity on a Hybrid Runbook Worker, it must be the Automation Account's System Managed Identity. + +2. Use the VM Managed Identity for both the Azure VM or Arc-enabled server running as a Hybrid Runbook Worker. + Here, you can use either the **VM’s User-assigned Managed Identity** or the **VM’s System-assigned Managed Identity**. + + > [!NOTE] + > This will **Not** work in an Automation Account which has been configured with an Automation account Managed Identity. As soon as the Automation account Managed Identity is enabled, you can't use the VM Managed Identity. The only available option is to use the Automation Account **System-Assigned Managed Identity** as mentioned in option 1. + + **To use a VM's system-assigned managed identity**: + + 1. [Configure](/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm) a System Managed Identity for the VM. + 1. Grant this identity the [required permissions](/active-directory/managed-identities-azure-resources/tutorial-windows-vm-access-arm#grant-your-vm-access-to-a-resource-group-in-resource-manager) within the subscription to perform its tasks. + 1. Update the runbook to use the [Connect-Az-Account](/powershell/module/az.accounts/connect-azaccount?view=azps-8.0.0) cmdlet with the `Identity` parameter to authenticate to Azure resources. This configuration reduces the need to use a Run As Account and perform the associated account management. ```powershell - # Ensures you do not inherit an AzContext in your runbook - Disable-AzContextAutosave -Scope Process - - # Connect to Azure with system-assigned managed identity - $AzureContext = (Connect-AzAccount -Identity).context - - # set and store context - $AzureContext = Set-AzContext -SubscriptionName $AzureContext.Subscription -DefaultProfile $AzureContext + # Ensures you do not inherit an AzContext in your runbook + Disable-AzContextAutosave -Scope Process + + # Connect to Azure with system-assigned managed identity + $AzureContext = (Connect-AzAccount -Identity).context + + # set and store context + $AzureContext = Set-AzContext -SubscriptionName $AzureContext.Subscription -DefaultProfile + $AzureContext + + # Get all VM names from the subscription + Get-AzVM -DefaultProfile $AzureContext | Select Name + ``` - # Get all VM names from the subscription - Get-AzVM -DefaultProfile $AzureContext | Select Name + **To use a VM's user-assigned managed identity**: + 1. [Configure](/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#user-assigned-managed-identity) a User Managed Identity for the VM. + 1. Grant this identity the [required permissions](/active-directory/managed-identities-azure-resources/howto-assign-access-portal) within the Subscription to perform its tasks. + 1. Update the runbook to use the [Connect-AzAccount](/powershell/module/az.accounts/connect-azaccount?view=azps-8.0.0) cmdlet with the `Identity ` and `AccountID` parameters to authenticate to Azure resources. This configuration reduces the need to use a Run As account and perform the associated account management. + + ```powershell + # Ensures you do not inherit an AzContext in your runbook + Disable-AzContextAutosave -Scope Process + + # Connect to Azure with user-managed-assigned managed identity. Replace below with the Client Id of the User Managed Identity + $AzureContext = (Connect-AzAccount -Identity -AccountId ).context + + # set and store context + $AzureContext = Set-AzContext -SubscriptionName $AzureContext.Subscription -DefaultProfile + $AzureContext + + # Get all VM names from the subscription + Get-AzVM -DefaultProfile $AzureContext | Select Name ``` + > [!NOTE] + > You can find the client Id of the user-assigned managed identity in the Azure portal. + + > :::image type="content" source="./media/automation-hrw-run-runbooks/managed-identities-client-id-inline.png" alt-text="Screenshot of client id in Managed Identites." lightbox="./media/automation-hrw-run-runbooks/managed-identities-client-id-expanded.png"::: - If you want the runbook to execute with the system-assigned managed identity, leave the code as-is. If you prefer to use a user-assigned managed identity, then: - 1. From line 5, remove `$AzureContext = (Connect-AzAccount -Identity).context`, - 1. Replace it with `$AzureContext = (Connect-AzAccount -Identity -AccountId ).context`, and - 1. Enter the Client ID. >[!NOTE] ->By default, the Azure contexts are saved for use between PowerShell sessions. It is possible that when a previous runbook on the Hybrid Runbook Worker has been authenticated with Azure, that context persists to the disk in the System PowerShell profile, as per [Azure contexts and sign-in credentials | Microsoft Docs](/powershell/azure/context-persistence?view=azps-7.3.2). +> By default, the Azure contexts are saved for use between PowerShell sessions. It is possible that when a previous runbook on the Hybrid Runbook Worker has been authenticated with Azure, that context persists to the disk in the System PowerShell profile, as per [Azure contexts and sign-in credentials | Microsoft Docs](/powershell/azure/context-persistence?view=azps-7.3.2). For instance, a runbook with `Get-AzVM` can return all the VMs in the subscription with no call to `Connect-AzAccount`, and the user would be able to access Azure resources without having to authenticate within that runbook. You can disable context autosave in Azure PowerShell, as detailed [here](/powershell/azure/context-persistence?view=azps-7.3.2#save-azure-contexts-across-powershell-sessions). + +### Use runbook authentication with Hybrid Worker Credentials -### Use runbook authentication with Run As account - -Instead of having your runbook provide its own authentication to local resources, you can specify a Run As account for a Hybrid Runbook Worker group. To specify a Run As account, you must define a [credential asset](./shared-resources/credentials.md) that has access to local resources. These resources include certificate stores and all runbooks run under these credentials on a Hybrid Runbook Worker in the group. +Instead of having your runbook provide its own authentication to local resources, you can specify Hybrid Worker Credentials for a Hybrid Runbook Worker group. To specify a Hybrid Worker Credentials, you must define a [credential asset](./shared-resources/credentials.md) that has access to local resources. These resources include certificate stores and all runbooks run under these credentials on a Hybrid Runbook Worker in the group. - The user name for the credential must be in one of the following formats: @@ -126,16 +176,35 @@ Instead of having your runbook provide its own authentication to local resources - To use the PowerShell runbook **Export-RunAsCertificateToHybridWorker**, you need to install the Az modules for Azure Automation on the local machine. -#### Use a credential asset to specify a Run As account +#### Use a credential asset for a Hybrid Runbook Worker group -Use the following procedure to specify a Run As account for a Hybrid Runbook Worker group: +By default, the Hybrid jobs run under the context of System account. However, to run Hybrid jobs under a different credential asset, follow the steps: 1. Create a [credential asset](./shared-resources/credentials.md) with access to local resources. 1. Open the Automation account in the Azure portal. 1. Select **Hybrid Worker Groups**, and then select the specific group. -1. Select **All settings**, followed by **Hybrid worker group settings**. -1. Change the value of **Run As** from **Default** to **Custom**. +1. Select **Settings**. +1. Change the value of **Hybrid Worker credentials** from **Default** to **Custom**. 1. Select the credential and click **Save**. +1. If the following permissions are not assigned for Custom users, jobs might get suspended. +Use your discretion in assigning the elevated permissions corresponding to the following registry keys/folders: + +**Registry path** + +- HKLM\SYSTEM\CurrentControlSet\Services\EventLog (read)
                  +- HKLM\SYSTEM\CurrentControlSet\Services\WinSock2\Parameters (full access)
                  +- HKLM\SOFTWARE\Microsoft\Wbem\CIMOM (full access)
                  +- HKLM\Software\Policies\Microsoft\SystemCertificates\Root (full access)
                  +- HKLM\Software\Microsoft\SystemCertificates (full access)
                  +- HKLM\Software\Microsoft\EnterpriseCertificates (full access)
                  +- HKLM\software\Microsoft\HybridRunbookWorker (full access)
                  +- HKLM\software\Microsoft\HybridRunbookWorkerV2 (full access)
                  +- HKEY_CURRENT_USER\SOFTWARE\Policies\Microsoft\SystemCertificates\Disallowed (full access)
                  +- HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles (full access)
                  + +**Folders** +- C:\ProgramData\AzureConnectedMachineAgent\Tokens (read)
                  +- C:\Packages\Plugins\Microsoft.Azure.Automation.HybridWorker.HybridWorkerForWindows\0.1.0.18\HybridWorkerPackage\HybridWorkerAgent (full access) ## Install Run As account certificate @@ -394,6 +463,7 @@ To help troubleshoot issues with your runbooks running on a hybrid runbook worke ## Next steps +* For more information on Hybrid Runbook Worker, see [Automation Hybrid Runbook Worker](automation-hybrid-runbook-worker.md). * If your runbooks aren't completing successfully, review the troubleshooting guide for [runbook execution failures](troubleshoot/hybrid-runbook-worker.md#runbook-execution-fails). * For more information on PowerShell, including language reference and learning modules, see [PowerShell Docs](/powershell/scripting/overview). * Learn about [using Azure Policy to manage runbook execution](enforce-job-execution-hybrid-worker.md) with Hybrid Runbook Workers. diff --git a/articles/automation/automation-hybrid-runbook-worker.md b/articles/automation/automation-hybrid-runbook-worker.md index 2ee01f537abc9..865fc8e3279ce 100644 --- a/articles/automation/automation-hybrid-runbook-worker.md +++ b/articles/automation/automation-hybrid-runbook-worker.md @@ -1,6 +1,6 @@ --- title: Azure Automation Hybrid Runbook Worker overview -description: This article provides an overview of the Hybrid Runbook Worker, which you can use to run runbooks on machines in your local datacenter or cloud provider. +description: Know about Hybrid Runbook Worker. How to install and run the runbooks on machines in your local datacenter or cloud provider. services: automation ms.subservice: process-automation ms.date: 11/11/2021 @@ -17,7 +17,7 @@ Azure Automation provides native integration of the Hybrid Runbook Worker role t | Platform | Description | |---|---| |**Extension-based (V2)** |Installed using the [Hybrid Runbook Worker VM extension](./extension-based-hybrid-runbook-worker-install.md), without any dependency on the Log Analytics agent reporting to an Azure Monitor Log Analytics workspace. **This is the recommended platform**.| -|**Agent-based (V1)** |Installed after the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/design-logs-deployment.md) is completed.| +|**Agent-based (V1)** |Installed after the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/log-analytics-workspace-overview.md) is completed.| :::image type="content" source="./media/automation-hybrid-runbook-worker/hybrid-worker-group-platform.png" alt-text="Hybrid worker group showing platform field"::: @@ -47,7 +47,7 @@ There are two types of Runbook Workers - system and user. The following table de |**System** |Supports a set of hidden runbooks used by the Update Management feature that are designed to install user-specified updates on Windows and Linux machines.
                  This type of Hybrid Runbook Worker isn't a member of a Hybrid Runbook Worker group, and therefore doesn't run runbooks that target a Runbook Worker group. | |**User** |Supports user-defined runbooks intended to run directly on the Windows and Linux machine that are members of one or more Runbook Worker groups. | -Agent-based (V1) Hybrid Runbook Workers rely on the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/design-logs-deployment.md). The workspace isn't only to collect monitoring data from the machine, but also to download the components required to install the agent-based Hybrid Runbook Worker. +Agent-based (V1) Hybrid Runbook Workers rely on the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/log-analytics-workspace-overview.md). The workspace isn't only to collect monitoring data from the machine, but also to download the components required to install the agent-based Hybrid Runbook Worker. When Azure Automation [Update Management](./update-management/overview.md) is enabled, any machine connected to your Log Analytics workspace is automatically configured as a system Hybrid Runbook Worker. To configure it as a user Windows Hybrid Runbook Worker, see [Deploy an agent-based Windows Hybrid Runbook Worker in Automation](automation-windows-hrw-install.md) and for Linux, see [Deploy an agent-based Linux Hybrid Runbook Worker in Automation](./automation-linux-hrw-install.md). diff --git a/articles/automation/automation-linux-hrw-install.md b/articles/automation/automation-linux-hrw-install.md index fb56541bb947d..93759e885eafa 100644 --- a/articles/automation/automation-linux-hrw-install.md +++ b/articles/automation/automation-linux-hrw-install.md @@ -28,7 +28,7 @@ Before you start, make sure that you have the following. The Hybrid Runbook Worker role depends on an Azure Monitor Log Analytics workspace to install and configure the role. You can create it through [Azure Resource Manager](../azure-monitor/logs/resource-manager-workspace.md#create-a-log-analytics-workspace), through [PowerShell](../azure-monitor/logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../azure-monitor/logs/quick-create-workspace.md). -If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/design-logs-deployment.md) before you create the workspace. +If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/workspace-design.md) before you create the workspace. ### Log Analytics agent diff --git a/articles/automation/automation-managing-data.md b/articles/automation/automation-managing-data.md index 8df46fb14bcc9..fe54cfc05ad31 100644 --- a/articles/automation/automation-managing-data.md +++ b/articles/automation/automation-managing-data.md @@ -97,6 +97,6 @@ The Automation geo-replication service isn't accessible directly to external cus ## Next steps +* To learn about security guidelines, see [Security best practices in Azure Automation](automation-security-guidelines.md). * To learn more about secure assets in Azure Automation, see [Encryption of secure assets in Azure Automation](automation-secure-asset-encryption.md). - * To find out more about geo-replication, see [Creating and using active geo-replication](/azure/azure-sql/database/active-geo-replication-overview). diff --git a/articles/automation/automation-role-based-access-control.md b/articles/automation/automation-role-based-access-control.md index 351c9b06284ee..eef8ee274092f 100644 --- a/articles/automation/automation-role-based-access-control.md +++ b/articles/automation/automation-role-based-access-control.md @@ -1,6 +1,6 @@ --- title: Manage role permissions and security in Azure Automation -description: This article describes how to use Azure role-based access control (Azure RBAC), which enables access management for Azure resources. +description: This article describes how to use Azure role-based access control (Azure RBAC), which enables access management and role permissions for Azure resources. services: automation ms.subservice: shared-capabilities ms.date: 09/10/2021 @@ -9,7 +9,7 @@ ms.custom: devx-track-azurepowershell, subject-rbac-steps #Customer intent: As an administrator, I want to understand permissions so that I use the least necessary set of permissions. --- -# Manage role permissions and security in Automation +# Manage role permissions and security in Azure Automation Azure role-based access control (Azure RBAC) enables access management for Azure resources. Using [Azure RBAC](../role-based-access-control/overview.md), you can segregate duties within your team and grant only the amount of access to users, groups, and applications that they need to perform their jobs. You can grant role-based access to users using the Azure portal, Azure Command-Line tools, or Azure Management APIs. @@ -469,6 +469,7 @@ When a user assigned to the Automation Operator role on the Runbook scope views ## Next steps +* To learn about security guidelines, see [Security best practices in Azure Automation](automation-security-guidelines.md). * To find out more about Azure RBAC using PowerShell, see [Add or remove Azure role assignments using Azure PowerShell](../role-based-access-control/role-assignments-powershell.md). * For details of the types of runbooks, see [Azure Automation runbook types](automation-runbook-types.md). * To start a runbook, see [Start a runbook in Azure Automation](start-runbooks.md). \ No newline at end of file diff --git a/articles/automation/automation-runbook-types.md b/articles/automation/automation-runbook-types.md index 6fad31f9f13fc..436978159d35d 100644 --- a/articles/automation/automation-runbook-types.md +++ b/articles/automation/automation-runbook-types.md @@ -64,7 +64,7 @@ The same Azure sandbox and Hybrid Runbook Worker can execute **PowerShell 5.1** Ensure that you select the right Runtime Version for modules. -For example : if you are executing a runbook for a Sharepoint automation scenario in **Runtime version** *7.1 (preview)*, then import the module in **Runtime version** **7.1 (preview)**; if you are executing a runbook for a Sharepoint automation scenario in **Runtime version** **5.1**, then import the module in **Runtime version** *5.1*. In this case, you would see two entries for the module, one for **Runtime Version** **7.1(preview)** and other for **5.1**. +For example : if you are executing a runbook for a SharePoint automation scenario in **Runtime version** *7.1 (preview)*, then import the module in **Runtime version** **7.1 (preview)**; if you are executing a runbook for a SharePoint automation scenario in **Runtime version** **5.1**, then import the module in **Runtime version** *5.1*. In this case, you would see two entries for the module, one for **Runtime Version** **7.1(preview)** and other for **5.1**. :::image type="content" source="./media/automation-runbook-types/runbook-types.png" alt-text="runbook Types."::: diff --git a/articles/automation/automation-secure-asset-encryption.md b/articles/automation/automation-secure-asset-encryption.md index c680e93836d1b..3fc8a4053c1fa 100644 --- a/articles/automation/automation-secure-asset-encryption.md +++ b/articles/automation/automation-secure-asset-encryption.md @@ -281,6 +281,7 @@ To revoke access to customer-managed keys, use PowerShell or the Azure CLI. For ## Next steps +- To learn about security guidelines, see [Security best practices in Azure Automation](automation-security-guidelines.md). - To understand Azure Key Vault, see [What is Azure Key Vault?](../key-vault/general/overview.md). - To work with certificates, see [Manage certificates in Azure Automation](shared-resources/certificates.md). - To handle credentials, see [Manage credentials in Azure Automation](shared-resources/credentials.md). diff --git a/articles/automation/automation-security-guidelines.md b/articles/automation/automation-security-guidelines.md index 6a460daf960ca..2d5db3808abe4 100644 --- a/articles/automation/automation-security-guidelines.md +++ b/articles/automation/automation-security-guidelines.md @@ -1,5 +1,5 @@ --- -title: Azure Automation security guidelines, security best practices Automation. +title: Azure Automation security guidelines, security best practices Automation jobs. description: This article helps you with the guidelines that Azure Automation offers to ensure a secured configuration of Automation account, Hybrid Runbook worker role, authentication certificate and identities, network isolation and policies. services: automation ms.subservice: shared-capabilities @@ -7,7 +7,7 @@ ms.date: 02/16/2022 ms.topic: conceptual --- -# Best practices for security in Azure Automation +# Security best practices in Azure Automation This article details the best practices to securely execute the automation jobs. [Azure Automation](./overview.md) provides you the platform to orchestrate frequent, time consuming, error-prone infrastructure management and operational tasks, as well as mission-critical operations. This service allows you to execute scripts, known as automation runbooks seamlessly across cloud and hybrid environments. diff --git a/articles/automation/automation-services.md b/articles/automation/automation-services.md index 0589ba1535a8c..5b06592069e3a 100644 --- a/articles/automation/automation-services.md +++ b/articles/automation/automation-services.md @@ -1,6 +1,6 @@ --- title: Automation services in Azure - overview -description: This article tells what are the Automation services in Azure and how to use it to automate the lifecycle of infrastructure and applications. +description: This article tells what are the Automation services in Azure and how to compare and use it to automate the lifecycle of infrastructure and applications. services: automation keywords: azure automation services, automanage, Bicep, Blueprints, Guest Config, Policy, Functions ms.date: 03/04/2022 diff --git a/articles/automation/automation-solution-vm-management.md b/articles/automation/automation-solution-vm-management.md index 7768e0e2bea78..a0e0ff4b4dcb4 100644 --- a/articles/automation/automation-solution-vm-management.md +++ b/articles/automation/automation-solution-vm-management.md @@ -37,7 +37,7 @@ The following are limitations with the current feature: - The runbooks for the Start/Stop VMs during off hours feature work with an [Azure Run As account](./automation-security-overview.md#run-as-accounts). The Run As account is the preferred authentication method because it uses certificate authentication instead of a password that might expire or change frequently. -- An [Azure Monitor Log Analytics workspace](../azure-monitor/logs/design-logs-deployment.md) that stores the runbook job logs and job stream results in a workspace to query and analyze. The Automation account and Log Analytics workspace need to be in the same subscription and supported region. The workspace needs to already exist, you cannot create a new workspace during deployment of this feature. +- An [Azure Monitor Log Analytics workspace](../azure-monitor/logs/log-analytics-workspace-overview.md) that stores the runbook job logs and job stream results in a workspace to query and analyze. The Automation account and Log Analytics workspace need to be in the same subscription and supported region. The workspace needs to already exist, you cannot create a new workspace during deployment of this feature. We recommend that you use a separate Automation account for working with VMs enabled for the Start/Stop VMs during off-hours feature. Azure module versions are frequently upgraded, and their parameters might change. The feature isn't upgraded on the same cadence and it might not work with newer versions of the cmdlets that it uses. Before importing the updated modules into your production Automation account(s), we recommend you import them into a test Automation account to verify there aren't any compatibility issues. diff --git a/articles/automation/automation-update-azure-modules.md b/articles/automation/automation-update-azure-modules.md index 827b74e324387..432e3a9b098a2 100644 --- a/articles/automation/automation-update-azure-modules.md +++ b/articles/automation/automation-update-azure-modules.md @@ -28,7 +28,7 @@ If you develop your scripts locally, it's recommended to have the same module ve ## Update Az modules -You can update Az modules through the portal **(recommended)** or through the runbook. +The following sections explains on how you can update Az modules either through the **portal** (recommended) or through the runbook. ### Update Az modules through portal @@ -52,7 +52,14 @@ The Azure team will regularly update the module version and provide an option to ### Update Az modules through runbook -To update the Azure modules in your Automation account, you must use the [Update-AutomationAzureModulesForAccount](https://github.com/Microsoft/AzureAutomation-Account-Modules-Update) runbook, available as open source. To start using this runbook to update your Azure modules, download it from the GitHub repository. You can then import it into your Automation account or run it as a script. To learn how to import a runbook in your Automation account, see [Import a runbook](manage-runbooks.md#import-a-runbook). In case of any runbook failure, we recommend that you modify the parameters in the runbook according to your specific needs, as the runbook is available as open-source and provided as a reference. +To update the Azure modules in your Automation account: + +1. Use the [Update-AutomationAzureModulesForAccount](https://github.com/Microsoft/AzureAutomation-Account-Modules-Update) runbook, available as open source. +1. Download from the GitHub repository, to start using this runbook to update your Azure modules. +1. Import it into your Automation account or run it as a script. To learn how to import a runbook in your Automation account, see [Import a runbook](manage-runbooks.md#import-a-runbook). + +>[!NOTE] +> We recommend you to update Az modules through Azure portal. You can also perform this using the `Update-AutomationAzureModulesForAccount` script, available as open-source and provided as a reference. However, in case of any runbook failure, you need to modify parameters in the runbook as required or debug the script as per the scenario. The **Update-AutomationAzureModulesForAccount** runbook supports updating the Azure, AzureRM, and Az modules by default. Review the [Update Azure modules runbook README](https://github.com/microsoft/AzureAutomation-Account-Modules-Update/blob/master/README.md) for more information on updating Az.Automation modules with this runbook. There are additional important factors that you need to take into account when using the Az modules in your Automation account. To learn more, see [Manage modules in Azure Automation](shared-resources/modules.md). diff --git a/articles/automation/automation-windows-hrw-install.md b/articles/automation/automation-windows-hrw-install.md index 83f64ba1117ca..bed2ea76a9af5 100644 --- a/articles/automation/automation-windows-hrw-install.md +++ b/articles/automation/automation-windows-hrw-install.md @@ -28,7 +28,7 @@ Before you start, make sure that you have the following. The Hybrid Runbook Worker role depends on an Azure Monitor Log Analytics workspace to install and configure the role. You can create it through [Azure Resource Manager](../azure-monitor/logs/resource-manager-workspace.md#create-a-log-analytics-workspace), through [PowerShell](../azure-monitor/logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../azure-monitor/logs/quick-create-workspace.md). -If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/design-logs-deployment.md) before you create the workspace. +If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/workspace-design.md) before you create the workspace. ### Log Analytics agent diff --git a/articles/automation/change-tracking/enable-from-runbook.md b/articles/automation/change-tracking/enable-from-runbook.md index 188b0e568e872..248627f3a6a0d 100644 --- a/articles/automation/change-tracking/enable-from-runbook.md +++ b/articles/automation/change-tracking/enable-from-runbook.md @@ -23,7 +23,7 @@ This method uses two runbooks: * Azure subscription. If you don't have one yet, you can [activate your MSDN subscriber benefits](https://azure.microsoft.com/pricing/member-offers/msdn-benefits-details/) or sign up for a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). * [Automation account](../automation-security-overview.md) to manage machines. -* [Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md) +* [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md) * A [virtual machine](../../virtual-machines/windows/quick-create-portal.md). * Two Automation assets, which are used by the **Enable-AutomationSolution** runbook. This runbook, if it doesn't already exist in your Automation account, is automatically imported by the **Enable-MultipleSolution** runbook during its first run. * *LASolutionSubscriptionId*: Subscription ID of where the Log Analytics workspace is located. diff --git a/articles/automation/disable-local-authentication.md b/articles/automation/disable-local-authentication.md index 9fb2c983ff7f3..2743710a21308 100644 --- a/articles/automation/disable-local-authentication.md +++ b/articles/automation/disable-local-authentication.md @@ -20,7 +20,7 @@ Disabling local authentication doesn't take effect immediately. Allow a few minu >[!NOTE] > Currently, PowerShell support for the new API version (2021-06-22) or the flag – `DisableLocalAuth` is not available. However, you can use the Rest-API with this API version to update the flag. -To allow list and enroll your subscription for this feature in your respective regions, follow the steps in [how to create an Azure support request - Azure supportability | Microsoft Docs](/azure/azure-portal/supportability/how-to-create-azure-support-request). +To allow list and enroll your subscription for this feature in your respective regions, follow the steps in [how to create an Azure support request - Azure supportability | Microsoft Docs](../azure-portal/supportability/how-to-create-azure-support-request.md). ## Re-enable local authentication @@ -42,4 +42,4 @@ Update Management patching will not work when local authentication is disabled. ## Next steps -- [Azure Automation account authentication overview](./automation-security-overview.md) +- [Azure Automation account authentication overview](./automation-security-overview.md) \ No newline at end of file diff --git a/articles/automation/extension-based-hybrid-runbook-worker-install.md b/articles/automation/extension-based-hybrid-runbook-worker-install.md index 8f538dd278439..6e824c7fbd38d 100644 --- a/articles/automation/extension-based-hybrid-runbook-worker-install.md +++ b/articles/automation/extension-based-hybrid-runbook-worker-install.md @@ -67,6 +67,20 @@ If you use a proxy server for communication between Azure Automation and machine > [!NOTE] > You can set up the proxy settings by PowerShell cmdlets or API. + To install the extension using cmdlets: + +1. Get the automation account details using the below API call. + + ```http + GET https://westcentralus.management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}?api-version=2021-06-22 + + ``` + + The API call will provide the value with the key: `AutomationHybridServiceUrl`. Use the URL in the next step to enable extension on the VM. + +1. Install the Hybrid Worker Extension on the VM by running the following PowerShell cmdlet (Required module: Az.Compute). Use the `properties.automationHybridServiceUrl` provided by the above API call + + **Proxy server settings** # [Windows](#tab/windows) @@ -82,6 +96,17 @@ $protectedsettings = @{ "ProxyPassword" = "password"; }; ``` +**Azure VMs** + +```powershell +Set-AzVMExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForWindows -TypeHandlerVersion 0.1 -Settings $settings +``` + +**Azure Arc-enabled VMs** + +```powershell +New-AzConnectedMachineExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForWindows -TypeHandlerVersion 0.1 -Settings $settings -NoWait +``` # [Linux](#tab/linux) @@ -93,6 +118,18 @@ $settings = @{ "AutomationAccountURL" = "/"; }; ``` +**Azure VMs** + +```powershell +Set-AzVMExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForLinux -TypeHandlerVersion 0.1 -Settings $settings +``` + +**Azure Arc-enabled VMs** + +```powershell +New-AzConnectedMachineExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForLinux -TypeHandlerVersion 0.1 -Settings $settings -NoWait +``` + --- ### Firewall use @@ -127,10 +164,10 @@ To create a hybrid worker group in the Azure portal, follow these steps: 1. From the **Basics** tab, in the **Name** text box, enter a name for your Hybrid worker group. -1. For the **Use run as credential** option: +1. For the **Use Hybrid Worker Credentials** option: - - If you select **No**, the hybrid extension will be installed using the local system account. - - If you select **Yes**, then from the drop-down list, select the credential asset. + - If you select **Default**, the hybrid extension will be installed using the local system account. + - If you select **Custom**, then from the drop-down list, select the credential asset. 1. Select **Next** to advance to the **Hybrid workers** tab. You can select Azure virtual machines or Azure Arc-enabled servers to be added to this Hybrid worker group. If you don't select any machines, an empty Hybrid worker group will be created. You can still add machines later. @@ -585,7 +622,7 @@ To install and use Hybrid Worker extension using REST API, follow these steps. T 1. Get the automation account details using this API call. ```http - GET https://westcentralus.management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}?api-version=2021-06-22 + GET https://westcentralus.management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/HybridWorkerExtension?api-version=2021-06-22 ``` @@ -594,7 +631,7 @@ To install and use Hybrid Worker extension using REST API, follow these steps. T 1. Install the Hybrid Worker Extension on Azure VM by using the following API call. ```http - PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}?api-version=2021-11-01 + PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/HybridWorkerExtension?api-version=2021-11-01 ``` diff --git a/articles/automation/media/automation-hrw-run-runbooks/managed-identities-client-id-expanded.png b/articles/automation/media/automation-hrw-run-runbooks/managed-identities-client-id-expanded.png new file mode 100644 index 0000000000000..baedca73b7f48 Binary files /dev/null and b/articles/automation/media/automation-hrw-run-runbooks/managed-identities-client-id-expanded.png differ diff --git a/articles/automation/media/automation-hrw-run-runbooks/managed-identities-client-id-inline.png b/articles/automation/media/automation-hrw-run-runbooks/managed-identities-client-id-inline.png new file mode 100644 index 0000000000000..baedca73b7f48 Binary files /dev/null and b/articles/automation/media/automation-hrw-run-runbooks/managed-identities-client-id-inline.png differ diff --git a/articles/automation/media/overview/automation-overview.png b/articles/automation/media/overview/automation-overview.png index a10ab5a94703b..3d32e2c883c68 100644 Binary files a/articles/automation/media/overview/automation-overview.png and b/articles/automation/media/overview/automation-overview.png differ diff --git a/articles/automation/overview.md b/articles/automation/overview.md index b49b6729333cf..06ff2f91fa8dd 100644 --- a/articles/automation/overview.md +++ b/articles/automation/overview.md @@ -135,7 +135,7 @@ These Azure services can work with Automation job and runbook resources using an ## Pricing for Azure Automation -Process automation includes runbook jobs and watchers. Billing for jobs is based on the number of job run time minutes used in the month, and for watchers, it is on the number of hours used in a month. The charges for process automation are incurred whenever a [job](/azure/automation/start-runbooks) or [watcher](/azure/automation/automation-scenario-using-watcher-task) runs. +Process automation includes runbook jobs and watchers. Billing for jobs is based on the number of job run time minutes used in the month, and for watchers, it is on the number of hours used in a month. The charges for process automation are incurred whenever a [job](./start-runbooks.md) or [watcher](./automation-scenario-using-watcher-task.md) runs. You create Automation accounts with a Basic SKU, wherein the first 500 job run time minutes are free per subscription. You are billed only for minutes/hours that exceed the 500 mins free included units. You can review the prices associated with Azure Automation on the [pricing](https://azure.microsoft.com/pricing/details/automation/) page. @@ -143,4 +143,4 @@ You can review the prices associated with Azure Automation on the [pricing](http ## Next steps > [!div class="nextstepaction"] -> [Create an Automation account](./quickstarts/create-account-portal.md) +> [Create an Automation account](./quickstarts/create-account-portal.md) \ No newline at end of file diff --git a/articles/automation/quickstart-create-automation-account-template.md b/articles/automation/quickstart-create-automation-account-template.md index 469f9d7c9991d..5ef25b462e917 100644 --- a/articles/automation/quickstart-create-automation-account-template.md +++ b/articles/automation/quickstart-create-automation-account-template.md @@ -35,7 +35,7 @@ If you're new to Azure Automation and Azure Monitor, it's important that you und * Review [workspace mappings](how-to/region-mappings.md) to specify the supported regions inline or in a parameter file. Only certain regions are supported for linking a Log Analytics workspace and an Automation account in your subscription. -* If you're new to Azure Monitor Logs and haven't deployed a workspace already, review the [workspace design guidance](../azure-monitor/logs/design-logs-deployment.md). This document will help you learn about access control, and help you understand the recommended design implementation strategies for your organization. +* If you're new to Azure Monitor Logs and haven't deployed a workspace already, review the [workspace design guidance](../azure-monitor/logs/workspace-design.md). This document will help you learn about access control, and help you understand the recommended design implementation strategies for your organization. ## Review the template diff --git a/articles/automation/troubleshoot/update-agent-issues.md b/articles/automation/troubleshoot/update-agent-issues.md index e6f97785dead1..35aefce6628d6 100644 --- a/articles/automation/troubleshoot/update-agent-issues.md +++ b/articles/automation/troubleshoot/update-agent-issues.md @@ -44,7 +44,7 @@ Results are shown on the page when they're ready. The checks sections show what' ### Operating system -The operating system check verifies whether the Hybrid Runbook Worker is running [one of the supported operating systems.](/azure/automation/update-management/operating-system-requirements.md#windows-operating-system) +The operating system check verifies whether the Hybrid Runbook Worker is running [one of the supported operating systems.](../update-management/operating-system-requirements.md) one of the supported operating systems ### .NET 4.6.2 diff --git a/articles/automation/update-management/enable-from-runbook.md b/articles/automation/update-management/enable-from-runbook.md index bc2facd427710..109ad1e3b3bb0 100644 --- a/articles/automation/update-management/enable-from-runbook.md +++ b/articles/automation/update-management/enable-from-runbook.md @@ -24,7 +24,7 @@ This method uses two runbooks: * Azure subscription. If you don't have one yet, you can [activate your MSDN subscriber benefits](https://azure.microsoft.com/pricing/member-offers/msdn-benefits-details/) or sign up for a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). * [Automation account](../automation-security-overview.md) to manage machines. -* [Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md) +* [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md) * A [virtual machine](../../virtual-machines/windows/quick-create-portal.md). * Two Automation assets, which are used by the **Enable-AutomationSolution** runbook. This runbook, if it doesn't already exist in your Automation account, is automatically imported by the **Enable-MultipleSolution** runbook during its first run. * *LASolutionSubscriptionId*: Subscription ID of where the Log Analytics workspace is located. diff --git a/articles/automation/update-management/enable-from-template.md b/articles/automation/update-management/enable-from-template.md index 959c210c6e8b1..34b6ccb7d9efc 100644 --- a/articles/automation/update-management/enable-from-template.md +++ b/articles/automation/update-management/enable-from-template.md @@ -62,7 +62,7 @@ If you're new to Azure Automation and Azure Monitor, it's important that you und * Review [workspace mappings](../how-to/region-mappings.md) to specify the supported regions inline or in a parameter file. Only certain regions are supported for linking a Log Analytics workspace and an Automation account in your subscription. -* If you're new to Azure Monitor logs and have not deployed a workspace already, you should review the [workspace design guidance](../../azure-monitor/logs/design-logs-deployment.md). It will help you to learn about access control, and understand the design implementation strategies we recommend for your organization. +* If you're new to Azure Monitor logs and have not deployed a workspace already, you should review the [workspace design guidance](../../azure-monitor/logs/workspace-design.md). It will help you to learn about access control, and understand the design implementation strategies we recommend for your organization. ## Deploy template diff --git a/articles/automation/update-management/plan-deployment.md b/articles/automation/update-management/plan-deployment.md index e090c8085cd67..49f881e1d9292 100644 --- a/articles/automation/update-management/plan-deployment.md +++ b/articles/automation/update-management/plan-deployment.md @@ -17,7 +17,7 @@ Update Management is an Azure Automation feature, and therefore requires an Auto Update Management depends on a Log Analytics workspace in Azure Monitor to store assessment and update status log data collected from managed machines. Integration with Log Analytics also enables detailed analysis and alerting in Azure Monitor. You can use an existing workspace in your subscription, or create a new one dedicated only for Update Management. -If you are new to Azure Monitor Logs and the Log Analytics workspace, you should review the [Design a Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md) deployment guide. +If you are new to Azure Monitor Logs and the Log Analytics workspace, you should review the [Design a Log Analytics workspace](../../azure-monitor/logs/workspace-design.md) deployment guide. ## Step 3 - Supported operating systems diff --git a/articles/automation/whats-new.md b/articles/automation/whats-new.md index fd1939b6843b9..55141c35a67b5 100644 --- a/articles/automation/whats-new.md +++ b/articles/automation/whats-new.md @@ -13,9 +13,12 @@ ms.custom: references_regions Azure Automation receives improvements on an ongoing basis. To stay up to date with the most recent developments, this article provides you with information about: - The latest releases +- New features +- Improvements to existing features - Known issues - Bug fixes + This page is updated monthly, so revisit it regularly. If you're looking for items older than six months, you can find them in [Archive for What's new in Azure Automation](whats-new-archive.md). @@ -49,7 +52,7 @@ Users can now restore an Automation account deleted within 30 days. Read [here]( **Type:** New feature -New scripts are added to the Azure Automation [GitHub repository](https://github.com/azureautomation) to address one of Azure Automation's key scenarios of VM management based on Azure Monitor alert. For more information, see [Trigger runbook from Azure alert](./automation-create-alert-triggered-runbook.md). +New scripts are added to the Azure Automation [GitHub repository](https://github.com/azureautomation) to address one of Azure Automation's key scenarios of VM management based on Azure Monitor alert. For more information, see [Trigger runbook from Azure alert](./automation-create-alert-triggered-runbook.md#common-azure-vm-management-operations). - Stop-Azure-VM-On-Alert - Restart-Azure-VM-On-Alert diff --git a/articles/availability-zones/TOC.yml b/articles/availability-zones/TOC.yml index 3e623d74f98f2..c00d5afb8708e 100644 --- a/articles/availability-zones/TOC.yml +++ b/articles/availability-zones/TOC.yml @@ -16,6 +16,8 @@ href: az-region.md - name: Migration Guidance items: + - name: Virtual Machines and Virtual Machine Scale Sets + href: migrate-vm.md - name: Storage accounts href: migrate-storage.md - name: Terminology @@ -102,8 +104,7 @@ - name: Identity items: - name: Create an Azure Active Directory Domain Services instance - href: ../active-directory-domain-services/tutorial-create-instance.md - + href: ../active-directory-domain-services/tutorial-create-instance.md - name: Disaster Recovery items: - name: Business continuity management in Azure diff --git a/articles/availability-zones/az-overview.md b/articles/availability-zones/az-overview.md index 05c76a63d074d..6b120b23c57a8 100644 --- a/articles/availability-zones/az-overview.md +++ b/articles/availability-zones/az-overview.md @@ -4,7 +4,7 @@ description: Learn about regions and availability zones and how they work to hel author: awysza ms.service: azure ms.topic: conceptual -ms.date: 03/30/2022 +ms.date: 05/30/2022 ms.author: rarco ms.reviewer: cynthn ms.custom: references_regions @@ -42,21 +42,7 @@ Some organizations require high availability of availability zones and protectio ## Azure regions with availability zones -Azure provides the most extensive global footprint of any cloud provider and is rapidly opening new regions and availability zones. The following regions currently support availability zones. - -| Americas | Europe | Africa | Asia Pacific | -|--------------------|----------------------|---------------------|----------------| -| Brazil South | France Central | South Africa North | Australia East | -| Canada Central | Germany West Central | | Central India | -| Central US | North Europe | | Japan East | -| East US | Norway East | | Korea Central | -| East US 2 | UK South | | Southeast Asia | -| South Central US | West Europe | | East Asia | -| US Gov Virginia | Sweden Central | | China North 3 | -| West US 2 | Switzerland North* | | | -| West US 3 | | | | - -\* To learn more about Availability Zones and available services support in these regions, contact your Microsoft sales or customer representative. For the upcoming regions that will support Availability Zones, see [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/). +[!INCLUDE [availability-zone-regions-include](./includes/availability-zone-regions-include.md)] ## Next steps diff --git a/articles/availability-zones/az-region.md b/articles/availability-zones/az-region.md index fa1787e731f1b..6e27b3f2df00e 100644 --- a/articles/availability-zones/az-region.md +++ b/articles/availability-zones/az-region.md @@ -4,7 +4,7 @@ description: Learn what services are supported by availability zones and underst author: awysza ms.service: azure ms.topic: conceptual -ms.date: 03/25/2022 +ms.date: 05/30/2022 ms.author: rarco ms.reviewer: cynthn ms.custom: references_regions @@ -19,21 +19,7 @@ Azure strives to enable high resiliency across every service and offering. Runni ## Azure regions with availability zones -Azure provides the most extensive global footprint of any cloud provider and is rapidly opening new regions and availability zones. The following regions currently support availability zones. - -| Americas | Europe | Africa | Asia Pacific | -|--------------------|----------------------|---------------------|----------------| -| Brazil South | France Central | South Africa North | Australia East | -| Canada Central | Germany West Central | | Central India | -| Central US | North Europe | | Japan East | -| East US | Norway East | | Korea Central | -| East US 2 | UK South | | Southeast Asia | -| South Central US | West Europe | | East Asia | -| US Gov Virginia | Sweden Central | | China North 3 | -| West US 2 | Switzerland North* | | | -| West US 3 | | | | - -\* To learn more about Availability Zones and available services support in these regions, contact your Microsoft sales or customer representative. For the upcoming regions that will support Availability Zones, see [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/). +[!INCLUDE [availability-zone-regions-include](./includes/availability-zone-regions-include.md)] For a list of Azure services that support availability zones by Azure region, see the [availability zones documentation](az-overview.md). @@ -84,19 +70,19 @@ In the Product Catalog, always-available services are listed as "non-regional" s | [Azure Storage: Disk Storage](migrate-storage.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) | | [Azure Storage: Blob Storage](migrate-storage.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) | | [Azure Storage: Managed Disks](migrate-storage.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) ![An icon that signifies this service is zonal](media/icon-zonal.svg) | -| [Azure Virtual Machine Scale Sets](../virtual-machine-scale-sets/scripts/cli-sample-zone-redundant-scale-set.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| [Azure Virtual Machines](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [Av2-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [Bs-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [DSv2-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [DSv3-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [Dv2-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [Dv3-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [ESv3-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [Ev3-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [F-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [FS-Series](../virtual-machines/windows/create-powershell-availability-zone.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | -| Virtual Machines: [Azure Compute Gallery](../virtual-machines/azure-compute-gallery.md#high-availability)| ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) | +| [Azure Virtual Machine Scale Sets](migrate-vm.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| [Azure Virtual Machines](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [Av2-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [Bs-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [DSv2-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [DSv3-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [Dv2-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [Dv3-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [ESv3-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [Ev3-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [F-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [FS-Series](migrate-vm.md) | ![An icon that signifies this service is zonal.](media/icon-zonal.svg) | +| Virtual Machines: [Azure Compute Gallery](migrate-vm.md)| ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) | | [Azure Virtual Network](../vpn-gateway/create-zone-redundant-vnet-gateway.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) | | [Azure VPN Gateway](../vpn-gateway/about-zone-redundant-vnet-gateways.md) | ![An icon that signifies this service is zone redundant.](media/icon-zone-redundant.svg) | diff --git a/articles/availability-zones/includes/availability-zone-regions-include.md b/articles/availability-zones/includes/availability-zone-regions-include.md new file mode 100644 index 0000000000000..de0bb4db74c61 --- /dev/null +++ b/articles/availability-zones/includes/availability-zone-regions-include.md @@ -0,0 +1,24 @@ +--- + title: include file + description: include file + author: awysza + ms.service: azure + ms.topic: include + ms.date: 05/30/2022 + ms.author: rarco + ms.custom: include file +--- + +Azure provides the most extensive global footprint of any cloud provider and is rapidly opening new regions and availability zones. The following regions currently support availability zones. + +| Americas | Europe | Africa | Asia Pacific | +|--------------------|----------------------|---------------------|----------------| +| Brazil South | France Central | South Africa North | Australia East | +| Canada Central | Germany West Central | | Central India | +| Central US | North Europe | | Japan East | +| East US | Norway East | | Korea Central | +| East US 2 | UK South | | Southeast Asia | +| South Central US | West Europe | | East Asia | +| US Gov Virginia | Sweden Central | | China North 3 | +| West US 2 | Switzerland North | | | +| West US 3 | | | | diff --git a/articles/availability-zones/migrate-vm.md b/articles/availability-zones/migrate-vm.md new file mode 100644 index 0000000000000..5bd4d24ea54c9 --- /dev/null +++ b/articles/availability-zones/migrate-vm.md @@ -0,0 +1,96 @@ +--- +title: Migrate Azure Virtual Machines and Azure Virtual Machine Scale Sets to availability zone support +description: Learn how to migrate your Azure Virtual Machines and Virtual Machine Scale Sets to availability zone support. +author: anaharris-ms +ms.service: azure +ms.topic: conceptual +ms.date: 04/21/2022 +ms.author: anaharris +ms.reviewer: anaharris +ms.custom: references_regions +--- + +# Migrate Virtual Machines and Virtual Machine Scale Sets to availability zone support + +This guide describes how to migrate Virtual Machines (VMs) and Virtual Machine Scale Sets (VMSS) from non-availability zone support to availability zone support. We'll take you through the different options for migration, including how you can use availability zone support for Disaster Recovery solutions. + +Virtual Machine (VM) and Virtual Machine Scale Sets (VMSS) are zonal services, which means that VM resources can be deployed by using one of the following methods: + +- VM resources are deployed to a specific, self-selected availability zone to achieve more stringent latency or performance requirements. + +- VM resources are replicated to one or more zones within the region to improve the resiliency of the application and data in a High Availability (HA) architecture. + +When you migrate resources to availability zone support, we recommend that you select multiple zones for your new VMs and VMSS, to ensure high-availability of your compute resources. + +## Prerequisites + +To migrate to availability zone support, your VM SKUs must be available across the zones in for your region. To check for VM SKU availability, use one of the following methods: + +- Use PowerShell to [Check VM SKU availability](../virtual-machines/windows/create-PowerShell-availability-zone.md#check-vm-sku-availability). +- Use the Azure CLI to [Check VM SKU availability](../virtual-machines/linux/create-cli-availability-zone.md#check-vm-sku-availability). +- Go to [Foundational Services](az-region.md#an-icon-that-signifies-this-service-is-foundational-foundational-services). + +## Downtime requirements + +Because zonal VMs are created across the availability zones, all migration options mentioned in this article require downtime during deployment because zonal VMs are created across the availability zones. + +## Migration Option 1: Redeployment + +### When to use redeployment + +Use the redeployment option if you have good Infrastructure as Code (IaC) practices setup to manage infrastructure. The redeployment option gives you more control, and the ability to automate various processes within your deployment pipelines. + +### Redeployment considerations + +- When you redeploy your VM and VMSS resources, the underlying resources such as managed disk and IP address for the VM are created in the same availability zone. You must use a Standard SKU public IP address and load balancer to create zone-redundant network resources. + +- For zonal deployments that require reasonably low network latency and good performance between application tier and data tier, use [proximity placement groups](../virtual-machines/co-location.md). Proximity groups can force grouping of different VM resources under a single network spine. For an example of an SAP workload that uses proximity placement groups, see [Azure proximity placement groups for optimal network latency with SAP applications](../virtual-machines/workloads/sap/sap-proximity-placement-scenarios.md) + +### How to redeploy + +To redeploy, you'll need to recreate your VM and VMSS resources. To ensure high-availability of your compute resources, it's recommended that you select multiple zones for your new VMs and VMSS. + +To learn how create VMs in an availability zone, see: + +- [Create VM using Azure CLI](../virtual-machines/linux/create-cli-availability-zone.md) +- [Create VM using Azure PowerShell](../virtual-machines/windows/create-PowerShell-availability-zone.md) +- [Create VM using Azure portal](../virtual-machines/create-portal-availability-zone.md?tabs=standard) + +To learn how to create VMSS in an availability zone, see [Create a virtual machine scale set that uses Availability Zones](../virtual-machine-scale-sets/virtual-machine-scale-sets-use-availability-zones.md). + +## Migration Option 2: Azure Resource Mover + +### When to use Azure Resource Mover + +Use Azure Resource Mover for an easy way to move VMs or encrypted VMs from one region without availability zones to another with availability zones. If you want to learn more about the benefits of using Azure Resource Mover, see [Why use Azure Resource Mover?](../resource-mover/overview.md#why-use-resource-mover). + +### Azure Resource Mover considerations + +When you use Azure Resource mover, all keys and secrets are copied from the source key vault to the newly created destination key vault in your target region. All resources related to your customer-managed keys, such as Azure Key Vaults, disk encryption sets, VMs, disks, and snapshots, must be in the same subscription and region. Azure Key Vault’s default availability and redundancy feature can't be used as the destination key vault for the moved VM resources, even if the target region is a secondary region to which your source key vault is replicated. + +### How to use Azure Resource Mover + +To learn how to move VMs to another region, see [Move Azure VMs to an availability zone in another region](../resource-mover/move-region-availability-zone.md) + +To learn how to move encrypted VMs to another region, see [Tutorial: Move encrypted Azure VMs across regions](../resource-mover/tutorial-move-region-encrypted-virtual-machines.md) + +## Disaster Recovery Considerations + +Typically, availability zones are used to deploy VMs in a High Availability configuration. They may be too close to each other to serve as a Disaster Recovery solution during a natural disaster. However, there are scenarios where availability zones can be used for Disaster Recovery. To learn more, see [Using Availability Zones for Disaster Recovery](../site-recovery/azure-to-azure-how-to-enable-zone-to-zone-disaster-recovery.md#using-availability-zones-for-disaster-recovery). + +The following requirements should be part of a disaster recovery strategy that helps your organization run its workloads during planned or unplanned outages across zones: + +- The source VM must already be a zonal VM, which means that it's placed in a logical zone. +- You'll need to replicate your VM from one zone to another zone using Azure Site Recovery service. +- Once your VM is replicated to another zone, you can follow steps to run a Disaster Recovery drill, fail over, reprotect, and failback. +- To enable VM disaster recovery between availability zones, follow the instructions in [Enable Azure VM disaster recovery between availability zones](../site-recovery/azure-to-azure-how-to-enable-zone-to-zone-disaster-recovery.md) . + +## Next Steps + +Learn more about: + +> [!div class="nextstepaction"] +> [Regions and Availability Zones in Azure](az-overview.md) + +> [!div class="nextstepaction"] +> [Azure Services that support Availability Zones](az-region.md) \ No newline at end of file diff --git a/articles/azure-app-configuration/TOC.yml b/articles/azure-app-configuration/TOC.yml index bcdde3634ebfe..60261c886011c 100644 --- a/articles/azure-app-configuration/TOC.yml +++ b/articles/azure-app-configuration/TOC.yml @@ -171,6 +171,8 @@ href: howto-move-resource-between-regions.md - name: Recover App Configuration stores (Preview) href: howto-recover-deleted-stores-in-azure-app-configuration.md + - name: Disable public access + href: howto-disable-public-access.md - name: Reference items: - name: Client libraries diff --git a/articles/azure-app-configuration/concept-github-action.md b/articles/azure-app-configuration/concept-github-action.md index 4b560432d1fd1..00ac784079407 100644 --- a/articles/azure-app-configuration/concept-github-action.md +++ b/articles/azure-app-configuration/concept-github-action.md @@ -20,7 +20,7 @@ A GitHub Actions [workflow](https://docs.github.com/en/actions/learn-github-acti The GitHub [documentation](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions) provides in-depth view of GitHub workflows and actions. ## Enable GitHub Actions in your repository -To start using this GitHub action, go to your repository and select the **Actions** tab. Select **New workflow**, then **Set up a workflow yourself**. Finally, search the marketplace for “Azure App Configuration Sync.” +To start using this GitHub Action, go to your repository and select the **Actions** tab. Select **New workflow**, then **Set up a workflow yourself**. Finally, search the marketplace for “Azure App Configuration Sync.” > [!div class="mx-imgBorder"] > ![Select the Action tab](media/find-github-action.png) @@ -57,7 +57,7 @@ jobs: ``` ## Use strict sync -By default the GitHub action does not enable strict mode, meaning that the sync will only add key-values from the configuration file to the App Configuration instance (no key-value pairs will be deleted). Enabling strict mode will mean key-value pairs that aren't in the configuration file are deleted from the App Configuration instance, so that it matches the configuration file. If you are syncing from multiple sources or using Azure Key Vault with App Configuration, you'll want to use different prefixes or labels with strict sync to avoid wiping out configuration settings from other files (see samples below). +By default the GitHub Action does not enable strict mode, meaning that the sync will only add key-values from the configuration file to the App Configuration instance (no key-value pairs will be deleted). Enabling strict mode will mean key-value pairs that aren't in the configuration file are deleted from the App Configuration instance, so that it matches the configuration file. If you are syncing from multiple sources or using Azure Key Vault with App Configuration, you'll want to use different prefixes or labels with strict sync to avoid wiping out configuration settings from other files (see samples below). ```json on: diff --git a/articles/azure-app-configuration/concept-soft-delete.md b/articles/azure-app-configuration/concept-soft-delete.md index 28d6ebd4a8b45..8d5879caf90a9 100644 --- a/articles/azure-app-configuration/concept-soft-delete.md +++ b/articles/azure-app-configuration/concept-soft-delete.md @@ -38,13 +38,18 @@ Purge is the operation to permanently delete the stores in a soft deleted state, ## Purge protection With Purge protection enabled, soft deleted stores can't be purged in the retention period. If disabled, the soft deleted store can be purged before the retention period expires. Once purge protection is enabled on a store, it can't be disabled. -## Permissions to recover or purge store +## Permissions to recover a deleted store -A user has to have below permissions to recover or purge a soft-deleted app configuration store. The built-in Contributor and Owner roles already have the required permissions to recover and purge. +- `Microsoft.AppConfiguration/configurationStores/write` -- Permission to recover - `Microsoft.AppConfiguration/configurationStores/write` +To recover a deleted App Configuration store the `Microsoft.AppConfiguration/configurationStores/write` permission is needed. The built-in "Owner" and "Contributor" roles contain this permission by default. The permission can be assigned at the subscription or resource group scope. -- Permission to purge - `Microsoft.AppConfiguration/configurationStores/action` +## Permissions to read and purge deleted stores + +* Read: `Microsoft.AppConfiguration/locations/deletedConfigurationStores/read` +* Purge: `Microsoft.AppConfiguration/locations/deletedConfigurationStores/purge/action` + +To list deleted App Configuration stores, or get an individual store by name the `Microsoft.AppConfiguration/locations/deletedConfigurationStores/read` permission is needed. To purge a deleted App Configuration store the `Microsoft.AppConfiguration/locations/deletedConfigurationStores/purge/action` permission is needed. The built-in "Owner" and "Contributor" roles contain these permissions by default. Permissions for reading and purging deleted App Configuration stores must be assigned at the subscription level. This is because deleted configuration stores exist outside of individual resource groups. ## Billing implications diff --git a/articles/azure-app-configuration/howto-disable-public-access.md b/articles/azure-app-configuration/howto-disable-public-access.md new file mode 100644 index 0000000000000..3d0287e9183d2 --- /dev/null +++ b/articles/azure-app-configuration/howto-disable-public-access.md @@ -0,0 +1,79 @@ +--- +title: How to disable public access in Azure App Configuration +description: How to disable public access to your Azure App Configuration store. +author: maud-lv +ms.author: malev +ms.service: azure-app-configuration +ms.topic: how-to +ms.date: 05/25/2022 +ms.custom: template-how-to +--- + +# Disable public access in Azure App Configuration + +In this article, you'll learn how to disable public access for your Azure App Configuration store. Setting up private access can offer a better security for your configuration store. + +## Prerequisites + +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). +- We assume you already have an App Configuration store. If you want to create one, [create an App Configuration store](quickstart-aspnet-core-app.md). + +## Sign in to Azure + +You will need to sign in to Azure first to access the App Configuration service. + +### [Portal](#tab/azure-portal) + +Sign in to the Azure portal at [https://portal.azure.com/](https://portal.azure.com/) with your Azure account. + +### [Azure CLI](#tab/azure-cli) + +Sign in to Azure using the `az login` command in the [Azure CLI](/cli/azure/install-azure-cli). + +```azurecli-interactive +az login +``` + +This command will prompt your web browser to launch and load an Azure sign-in page. If the browser fails to open, use device code flow with `az login --use-device-code`. For more sign in options, go to [sign in with the Azure CLI](/cli/azure/authenticate-azure-cli). + +--- + +## Disable public access to a store + +Azure App Configuration offers three public access options: + +- Automatic public access: public network access is enabled, as long as you don't have a private endpoint present. Once you create a private endpoint, App Configuration disables public network access and enables private access. This option can only be selected when creating the store. +- Disabled: public access is disabled and no traffic can access this resource unless it's through a private endpoint. +- Enabled: all networks can access this resource. + +To disable access to the App Configuration store from public network, follow the process below. + +### [Portal](#tab/azure-portal) + +1. In your App Configuration store, under **Settings**, select **Networking**. +1. Under **Public Access**, select **Disabled** to disable public access to the App Configuration store and only allow access through private endpoints. If you already had public access disabled and instead wanted to enable public access to your configuration store, you would select **Enabled**. + + > [!NOTE] + > Once you've switched **Public Access** to **Disabled** or **Enabled**, you won't be able to select **Public Access: Automatic** anymore, as this option can only be selected when creating the store. + +1. Select **Apply**. + +:::image type="content" source="media/disable-public-access.png" alt-text="Screenshot of the Azure portal disabling public access."::: + +### [Azure CLI](#tab/azure-cli) + +In the CLI, run the following code: + +```azurecli-interactive +az appconfig update --name --enable-public-network false +``` + +> [!NOTE] +> When you create an App Config store without specifying if you want public access to be enabled or disabled, public access is set to automatic by default. After you've run the `--enable-public-network` command, you won't be able to switch to an automatic public access anymore. + +--- + +## Next steps + +> [!div class="nextstepaction"] +>[Using private endpoints for Azure App Configuration](./concept-private-endpoint.md) diff --git a/articles/azure-app-configuration/howto-recover-deleted-stores-in-azure-app-configuration.md b/articles/azure-app-configuration/howto-recover-deleted-stores-in-azure-app-configuration.md index 7847d8fb43e9a..8374871985111 100644 --- a/articles/azure-app-configuration/howto-recover-deleted-stores-in-azure-app-configuration.md +++ b/articles/azure-app-configuration/howto-recover-deleted-stores-in-azure-app-configuration.md @@ -19,7 +19,7 @@ To learn more about the concept of soft delete feature, see [Soft-Delete in Azur * An Azure subscription - [create one for free](https://azure.microsoft.com/free/dotnet) -* Refer to the [Soft-Delete in Azure App Configuration](./concept-soft-delete.md#permissions-to-recover-or-purge-store) for permissions requirements. +* Refer to the [Soft-Delete in Azure App Configuration](./concept-soft-delete.md#permissions-to-recover-a-deleted-store) section for permissions requirements. ## Set retention policy and enable purge protection at store creation diff --git a/articles/azure-app-configuration/media/disable-public-access.png b/articles/azure-app-configuration/media/disable-public-access.png new file mode 100644 index 0000000000000..f40a1dfed0c03 Binary files /dev/null and b/articles/azure-app-configuration/media/disable-public-access.png differ diff --git a/articles/azure-arc/data/active-directory-introduction.md b/articles/azure-arc/data/active-directory-introduction.md index 4e2aa81f5f331..badf4919ec5ec 100644 --- a/articles/azure-arc/data/active-directory-introduction.md +++ b/articles/azure-arc/data/active-directory-introduction.md @@ -12,8 +12,11 @@ ms.topic: how-to --- # Azure Arc-enabled SQL Managed Instance with Active Directory authentication + Azure Arc-enabled data services support Active Directory (AD) for Identity and Access Management (IAM). The Arc-enabled SQL Managed Instance uses an existing on-premises Active Directory (AD) domain for authentication. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + This article describes how to enable Azure Arc-enabled SQL Managed Instance with Active Directory (AD) Authentication. The article demonstrates two possible AD integration modes: - Customer-managed keytab (CMK) - System-managed keytab (SMK) diff --git a/articles/azure-arc/data/active-directory-prerequisites.md b/articles/azure-arc/data/active-directory-prerequisites.md index 97abd37826960..fafd2be89973f 100644 --- a/articles/azure-arc/data/active-directory-prerequisites.md +++ b/articles/azure-arc/data/active-directory-prerequisites.md @@ -15,6 +15,8 @@ ms.topic: how-to This document explains how to prepare to deploy Azure Arc-enabled data services with Active Directory (AD) authentication. Specifically the article describes Active Directory objects you need to configure before the deployment of Kubernetes resources. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + [The introduction](active-directory-introduction.md#compare-ad-integration-modes) describes two different integration modes: - *System-managed keytab* mode allows the system to create and manage the AD accounts for each SQL Managed Instance. - *Customer-managed keytab* mode allows you to create and manage the AD accounts for each SQL Managed Instance. diff --git a/articles/azure-arc/data/configure-managed-instance.md b/articles/azure-arc/data/configure-managed-instance.md index e60fcc4ca9cdf..fd08daf42e3ea 100644 --- a/articles/azure-arc/data/configure-managed-instance.md +++ b/articles/azure-arc/data/configure-managed-instance.md @@ -7,7 +7,7 @@ ms.subservice: azure-arc-data author: dnethi ms.author: dinethi ms.reviewer: mikeray -ms.date: 02/22/2022 +ms.date: 05/27/2022 ms.topic: how-to --- @@ -45,6 +45,39 @@ To view the changes made to the Azure Arc-enabled SQL managed instance, you can az sql mi-arc show -n --k8s-namespace --use-k8s ``` +## Configure readable secondaries + +When you deploy Azure Arc enabled SQL managed instance in `BusinessCritical` service tier with 2 or more replicas, by default, one secondary replica is automatically configured as `readableSecondary`. This setting can be changed, either to add or to remove the readable secondaries as follows: + +```azurecli +az sql mi-arc update --name --readable-secondaries --k8s-namespace --use-k8s +``` + +For example, the following example will reset the readable secondaries to 0. + +```azurecli +az sql mi-arc update --name sqlmi1 --readable-secondaries 0 --k8s-namespace mynamespace --use-k8s +``` +## Configure replicas + +You can also scale up or down the number of replicas deployed in the `BusinessCritical` service tier as follows: + +```azurecli +az sql mi-arc update --name --replicas --k8s-namespace --use-k8s +``` + +For example: + +The following example will scale down the number of replicas from 3 to 2. + +```azurecli +az sql mi-arc update --name sqlmi1 --replicas 2 --k8s-namespace mynamespace --use-k8s +``` + +> [Note] +> If you scale down from 2 replicas to 1 replica, you may run into a conflict with the pre-configured `--readable--secondaries` setting. You can first edit the `--readable--secondaries` before scaling down the replicas. + + ## Configure Server options You can configure server configuration settings for Azure Arc-enabled SQL managed instance after creation time. This article describes how to configure settings like enabling or disabling mssql Agent, enable specific trace flags for troubleshooting scenarios. diff --git a/articles/azure-arc/data/configure-transparent-data-encryption-manually.md b/articles/azure-arc/data/configure-transparent-data-encryption-manually.md index 3539257ce8daf..045e0516e3cba 100644 --- a/articles/azure-arc/data/configure-transparent-data-encryption-manually.md +++ b/articles/azure-arc/data/configure-transparent-data-encryption-manually.md @@ -14,27 +14,27 @@ ms.custom: template-how-to, event-tier1-build-2022 # Enable transparent data encryption on Azure Arc-enabled SQL Managed Instance -This article describes how to enable transparent data encryption on a database created in an Azure Arc-enabled SQL Managed Instance. +This article describes how to enable transparent data encryption on a database created in an Azure Arc-enabled SQL Managed Instance. In this article, the term *managed instance* refers to a deployment of Azure Arc-enabled SQL Managed Instance. ## Prerequisites -Before you proceed with this article, you must have an Azure Arc-enabled SQL Managed Instance resource created and have connected to it. +Before you proceed with this article, you must have an Azure Arc-enabled SQL Managed Instance resource created and connect to it. - [An Azure Arc-enabled SQL Managed Instance created](./create-sql-managed-instance.md) - [Connect to Azure Arc-enabled SQL Managed Instance](./connect-managed-instance.md) -## Turn on transparent data encryption on a database in Azure Arc-enabled SQL Managed Instance +## Turn on transparent data encryption on a database in the managed instance -Turning on transparent data encryption in Azure Arc-enabled SQL Managed Instance follows the same steps as SQL Server on-premises. Follow the steps described in [SQL Server's transparent data encryption guide](/sql/relational-databases/security/encryption/transparent-data-encryption#enable-tde). +Turning on transparent data encryption in the managed instance follows the same steps as SQL Server on-premises. Follow the steps described in [SQL Server's transparent data encryption guide](/sql/relational-databases/security/encryption/transparent-data-encryption#enable-tde). -After creating the necessary credentials, it's highly recommended to back up any newly created credentials. +After you create the necessary credentials, back up any newly created credentials. -## Back up a transparent data encryption credential from Azure Arc-enabled SQL Managed Instance +## Back up a transparent data encryption credential -When backing up from Azure Arc-enabled SQL Managed Instance, the credentials will be stored within the container. It isn't necessary to store the credentials on a persistent volume, but you may use the mount path for the data volume within the container if you'd like: `/var/opt/mssql/data`. Otherwise, the credentials will be stored in-memory in the container. Below is an example of backing up a certificate from Azure Arc-enabled SQL Managed Instance. +When you back up credentials from the managed instance, the credentials are stored within the container. To store credentials on a persistent volume, specify the mount path in the container. For example, `var/opt/mssql/data`. The following example backs up a certificate from the managed instance: > [!NOTE] -> If the `kubectl cp` command is run from Windows, the command may fail when using absolute Windows paths. `kubectl` can mistake the drive in the path as a pod name. For example, `kubectl` might mistake `C` to be a pod name in `C:\folder`. Users can avoid this issue by using relative paths or removing the `C:` from the provided path while in the `C:` drive. This issue also applies to environment variables on Windows like `$HOME`. +> If the `kubectl cp` command is run from Windows, the command may fail when using absolute Windows paths. Use relative paths or the commands specified below. 1. Back up the certificate from the container to `/var/opt/mssql/data`. @@ -60,6 +60,19 @@ When backing up from Azure Arc-enabled SQL Managed Instance, the credentials wil 2. Copy the certificate from the container to your file system. +### [Windows](#tab/windows) + + ```console + kubectl exec -n -c arc-sqlmi -- cat > + ``` + + Example: + + ```console + kubectl exec -n arc-ns -c arc-sqlmi sql-0 -- cat /var/opt/mssql/data/servercert.crt > $HOME\sqlcerts\servercert.crt + ``` + +### [Linux](#tab/linux) ```console kubectl cp --namespace --container arc-sqlmi : ``` @@ -67,11 +80,25 @@ When backing up from Azure Arc-enabled SQL Managed Instance, the credentials wil Example: ```console - kubectl cp --namespace arc-ns --container arc-sqlmi sql-0:/var/opt/mssql/data/servercert.crt ./sqlcerts/servercert.crt + kubectl cp --namespace arc-ns --container arc-sqlmi sql-0:/var/opt/mssql/data/servercert.crt $HOME/sqlcerts/servercert.crt ``` +--- + 3. Copy the private key from the container to your file system. +### [Windows](#tab/windows) + ```console + kubectl exec -n -c arc-sqlmi -- cat > + ``` + + Example: + + ```console + kubectl exec -n arc-ns -c arc-sqlmi sql-0 -- cat /var/opt/mssql/data/servercert.key > $HOME\sqlcerts\servercert.key + ``` + +### [Linux](#tab/linux) ```console kubectl cp --namespace --container arc-sqlmi : ``` @@ -79,9 +106,11 @@ When backing up from Azure Arc-enabled SQL Managed Instance, the credentials wil Example: ```console - kubectl cp --namespace arc-ns --container arc-sqlmi sql-0:/var/opt/mssql/data/servercert.key ./sqlcerts/servercert.key + kubectl cp --namespace arc-ns --container arc-sqlmi sql-0:/var/opt/mssql/data/servercert.key $HOME/sqlcerts/servercert.key ``` +--- + 4. Delete the certificate and private key from the container. ```console @@ -94,15 +123,26 @@ When backing up from Azure Arc-enabled SQL Managed Instance, the credentials wil kubectl exec -it --namespace arc-ns --container arc-sqlmi sql-0 -- bash -c "rm /var/opt/mssql/data/servercert.crt /var/opt/mssql/data/servercert.key" ``` -## Restore a transparent data encryption credential to Azure Arc-enabled SQL Managed Instance +## Restore a transparent data encryption credential to a managed instance -Similar to above, restore the credentials by copying them into the container and running the corresponding T-SQL afterwards. +Similar to above, to restore the credentials, copy them into the container and run the corresponding T-SQL afterwards. > [!NOTE] -> If the `kubectl cp` command is run from Windows, the command may fail when using absolute Windows paths. `kubectl` can mistake the drive in the path as a pod name. For example, `kubectl` might mistake `C` to be a pod name in `C:\folder`. Users can avoid this issue by using relative paths or removing the `C:` from the provided path while in the `C:` drive. This issue also applies to environment variables on Windows like `$HOME`. +> If the `kubectl cp` command is run from Windows, the command may fail when using absolute Windows paths. Use relative paths or the commands specified below. 1. Copy the certificate from your file system to the container. +### [Windows](#tab/windows) + ```console + type | kubectl exec -i -n -c arc-sqlmi -- tee + ``` + + Example: + ```console + type $HOME\sqlcerts\servercert.crt | kubectl exec -i -n arc-ns -c arc-sqlmi sql-0 -- tee /var/opt/mssql/data/servercert.crt + ``` + +### [Linux](#tab/linux) ```console kubectl cp --namespace --container arc-sqlmi : ``` @@ -110,11 +150,24 @@ Similar to above, restore the credentials by copying them into the container and Example: ```console - kubectl cp --namespace arc-ns --container arc-sqlmi ./sqlcerts/servercert.crt sql-0:/var/opt/mssql/data/servercert.crt + kubectl cp --namespace arc-ns --container arc-sqlmi $HOME/sqlcerts/servercert.crt sql-0:/var/opt/mssql/data/servercert.crt ``` +--- + 2. Copy the private key from your file system to the container. +### [Windows](#tab/windows) + ```console + type | kubectl exec -i -n -c arc-sqlmi -- tee + ``` + + Example: + ```console + type $HOME\sqlcerts\servercert.key | kubectl exec -i -n arc-ns -c arc-sqlmi sql-0 -- tee /var/opt/mssql/data/servercert.key + ``` + +### [Linux](#tab/linux) ```console kubectl cp --namespace --container arc-sqlmi : ``` @@ -122,9 +175,11 @@ Similar to above, restore the credentials by copying them into the container and Example: ```console - kubectl cp --namespace arc-ns --container arc-sqlmi ./sqlcerts/servercert.key sql-0:/var/opt/mssql/data/servercert.key + kubectl cp --namespace arc-ns --container arc-sqlmi $HOME/sqlcerts/servercert.key sql-0:/var/opt/mssql/data/servercert.key ``` +--- + 3. Create the certificate using file paths from `/var/opt/mssql/data`. ```sql diff --git a/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md b/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md index e9d9c4902c050..cb952f4ce7972 100644 --- a/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md +++ b/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md @@ -15,6 +15,8 @@ ms.topic: how-to This article describes how to connect to SQL Managed Instance endpoint using Active Directory (AD) authentication. Before you proceed, make sure you have an AD-integrated Azure Arc-enabled SQL Managed Instance deployed already. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + See [Tutorial – Deploy AD-integrated SQL Managed Instance](deploy-active-directory-sql-managed-instance.md) to deploy Azure Arc-enabled SQL Managed Instance with Active Directory authentication enabled. > [!NOTE] diff --git a/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md b/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md index 4fdb6b2ab4215..330e45f27ac35 100644 --- a/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md +++ b/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md @@ -160,7 +160,7 @@ NAME STATE Ready ``` -## Create Azure Arc-enabled SQL Managed Instance +## Create an instance of Azure Arc-enabled SQL Managed Instance Now, we can create the Azure MI for indirectly connected mode with the following command: @@ -188,4 +188,4 @@ To connect with Azure Data Studio, see [Connect to Azure Arc-enabled SQL Managed ## Next steps -[Upload usage data, metrics, and logs to Azure](upload-metrics-and-logs-to-azure-monitor.md). \ No newline at end of file +[Upload usage data, metrics, and logs to Azure](upload-metrics-and-logs-to-azure-monitor.md). diff --git a/articles/azure-arc/data/create-data-controller-direct-cli.md b/articles/azure-arc/data/create-data-controller-direct-cli.md index 81599d2c97bef..014983d40ec6a 100644 --- a/articles/azure-arc/data/create-data-controller-direct-cli.md +++ b/articles/azure-arc/data/create-data-controller-direct-cli.md @@ -8,7 +8,7 @@ ms.reviewer: mikeray services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -ms.date: 03/24/2022 +ms.date: 05/27/2022 ms.topic: overview --- @@ -225,18 +225,18 @@ Optionally, you can specify certificates for logs and metrics UI dashboards. See After the extension and custom location are created, proceed to deploy the Azure Arc data controller as follows. ```azurecli -az arcdata dc create --name --resource-group --location --connectivity-mode direct --profile-name --auto-upload-logs true --auto-upload-metrics true --custom-location --storage-class +az arcdata dc create --name --resource-group --location --connectivity-mode direct --profile-name --auto-upload-metrics true --custom-location --storage-class # Example -az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --profile-name azure-arc-aks-premium-storage --auto-upload-logs true --auto-upload-metrics true --custom-location mycustomlocation --storage-class mystorageclass +az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --profile-name azure-arc-aks-premium-storage --auto-upload-metrics true --custom-location mycustomlocation --storage-class mystorageclass ``` If you want to create the Azure Arc data controller using a custom configuration template, follow the steps described in [Create custom configuration profile](create-custom-configuration-template.md) and provide the path to the file as follows: ```azurecli -az arcdata dc create --name --resource-group --location --connectivity-mode direct --path ./azure-arc-custom --auto-upload-logs true --auto-upload-metrics true --custom-location +az arcdata dc create --name --resource-group --location --connectivity-mode direct --path ./azure-arc-custom --auto-upload-metrics true --custom-location # Example -az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --path ./azure-arc-custom --auto-upload-logs true --auto-upload-metrics true --custom-location mycustomlocation +az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --path ./azure-arc-custom --auto-upload-metrics true --custom-location mycustomlocation ``` ## Monitor the status of Azure Arc data controller deployment diff --git a/articles/azure-arc/data/delete-managed-instance.md b/articles/azure-arc/data/delete-managed-instance.md index b41939581cb0e..57f30c8c3cde3 100644 --- a/articles/azure-arc/data/delete-managed-instance.md +++ b/articles/azure-arc/data/delete-managed-instance.md @@ -1,6 +1,7 @@ --- -title: Delete Azure Arc-enabled SQL Managed Instance -description: Delete Azure Arc-enabled SQL Managed Instance +title: Delete an Azure Arc-enabled SQL Managed Instance +description: Learn how to delete an Azure Arc-enabled SQL Managed Instance and optionally, reclaim associated Kubernetes persistent volume claims (PVCs). +ms.custom: kr2b-contr-experiment services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data @@ -11,107 +12,101 @@ ms.date: 07/30/2021 ms.topic: how-to --- -# Delete Azure Arc-enabled SQL Managed Instance -This article describes how you can delete an Azure Arc-enabled SQL Managed Instance. +# Delete an Azure Arc-enabled SQL Managed Instance +In this how-to guide, you'll find and then delete an Azure Arc-enabled SQL Managed Instance. Optionally, after deleting managed instances, you can reclaim associated Kubernetes persistent volume claims (PVCs). -## View Existing Azure Arc-enabled SQL Managed Instances -To view SQL Managed Instances, run the following command: +1. Find existing Azure Arc-enabled SQL Managed Instances: -```azurecli -az sql mi-arc list --k8s-namespace --use-k8s -``` + ```azurecli + az sql mi-arc list --k8s-namespace --use-k8s + ``` -Output should look something like this: + Example output: -```console -Name Replicas ServerEndpoint State ------- ---------- ---------------- ------- -demo-mi 1/1 10.240.0.4:32023 Ready -``` + ```console + Name Replicas ServerEndpoint State + ------ ---------- ---------------- ------- + demo-mi 1/1 10.240.0.4:32023 Ready + ``` -## Delete Azure Arc-enabled SQL Managed Instance +1. Delete the SQL Managed Instance, run one of the commands appropriate for your deployment type: -To delete a SQL Managed Instance, run the appropriate command for your deployment type. For example: + 1. **Indirectly connected mode**: -### [Indirectly connected mode](#tab/indirectly) + ```azurecli + az sql mi-arc delete --name --k8s-namespace --use-k8s + ``` -```azurecli -az sql mi-arc delete -n --k8s-namespace --use-k8s -``` + Example output: -Output should look something like this: + ```azurecli + # az sql mi-arc delete --name demo-mi --k8s-namespace --use-k8s + Deleted demo-mi from namespace arc + ``` -```azurecli -# az sql mi-arc delete -n demo-mi --k8s-namespace --use-k8s -Deleted demo-mi from namespace arc -``` + 1. **Directly connected mode**: -### [Directly connected mode](#tab/directly) + ```azurecli + az sql mi-arc delete --name --resource-group + ``` -```azurecli -az sql mi-arc delete -n -g -``` + Example output: -Output should look something like this: + ```azurecli + # az sql mi-arc delete --name demo-mi --resource-group my-rg + Deleted demo-mi from namespace arc + ``` -```azurecli -# az sql mi-arc delete -n demo-mi -g my-rg -Deleted demo-mi from namespace arc -``` +## Optional - Reclaim Kubernetes PVCs ---- +A Persistent Volume Claim (PVC) is a request for storage by a user from a Kubernetes cluster while creating and adding storage to a SQL Managed Instance. Deleting PVCs is recommended but it isn't mandatory. However, if you don't reclaim these PVCs, you'll eventually end up with errors in your Kubernetes cluster. For example, you might be unable to create, read, update, or delete resources from the Kubernetes API. You might not be able to run commands like `az arcdata dc export` because the controller pods were evicted from the Kubernetes nodes due to storage issues (normal Kubernetes behavior). You can see messages in the logs similar to: -## Reclaim the Kubernetes Persistent Volume Claims (PVCs) +- Annotations: microsoft.com/ignore-pod-health: true +- Status: Failed +- Reason: Evicted +- Message: The node was low on resource: ephemeral-storage. Container controller was using 16372Ki, which exceeds its request of 0. -A PersistentVolumeClaim (PVC) is a request for storage by a user from Kubernetes cluster while creating and adding storage to a SQL Managed Instance. Deleting a SQL Managed Instance does not remove its associated [PVCs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). This is by design. The intention is to help the user to access the database files in case the deletion of instance was accidental. Deleting PVCs is not mandatory. However it is recommended. If you don't reclaim these PVCs, you'll eventually end up with errors as your Kubernetes cluster will run out of disk space or usage of the same SQL Managed Instance name while creating new instance might cause inconsistencies. To reclaim the PVCs, take the following steps: +By design, deleting a SQL Managed Instance doesn't remove its associated [PVCs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). The intention is to ensure that you can access the database files in case the deletion was accidental. -### 1. List the PVCs for the server group you deleted +1. To reclaim the PVCs, take the following steps: + 1. Find the PVCs for the server group you deleted. -To list the PVCs, run the following command: -```console -kubectl get pvc -``` + ```console + kubectl get pvc + ``` -In the example below, notice the PVCs for the SQL Managed Instances you deleted. + In the example below, notice the PVCs for the SQL Managed Instances you deleted. -```console -# kubectl get pvc -n arc + ```console + # kubectl get pvc -n arc -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -data-demo-mi-0 Bound pvc-1030df34-4b0d-4148-8986-4e4c20660cc4 5Gi RWO managed-premium 13h -logs-demo-mi-0 Bound pvc-11836e5e-63e5-4620-a6ba-d74f7a916db4 5Gi RWO managed-premium 13h -``` + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + data-demo-mi-0 Bound pvc-1030df34-4b0d-4148-8986-4e4c20660cc4 5Gi RWO managed-premium 13h + logs-demo-mi-0 Bound pvc-11836e5e-63e5-4620-a6ba-d74f7a916db4 5Gi RWO managed-premium 13h + ``` -### 2. Delete each of the PVCs -Delete the data and log PVCs for each of the SQL Managed Instances you deleted. -The general format of this command is: -```console -kubectl delete pvc -``` + 1. Delete the data and log PVCs for each of the SQL Managed Instances you deleted. + The general format of this command is: -For example: -```console -kubectl delete pvc data-demo-mi-0 -n arc -kubectl delete pvc logs-demo-mi-0 -n arc -``` + ```console + kubectl delete pvc + ``` -Each of these kubectl commands will confirm the successful deleting of the PVC. For example: -```console -persistentvolumeclaim "data-demo-mi-0" deleted -persistentvolumeclaim "logs-demo-mi-0" deleted -``` - + For example: -> [!NOTE] -> As indicated, not deleting the PVCs might eventually get your Kubernetes cluster in a situation where it will throw errors. Some of these errors may include being unable to create, read, update, delete resources from the Kubernetes API, or being able to run commands like `az arcdata dc export` as the controller pods may be evicted from the Kubernetes nodes because of this storage issue (normal Kubernetes behavior). -> -> For example, you may see messages in the logs similar to: -> - Annotations: microsoft.com/ignore-pod-health: true -> - Status: Failed -> - Reason: Evicted -> - Message: The node was low on resource: ephemeral-storage. Container controller was using 16372Ki, which exceeds its request of 0. + ```console + kubectl delete pvc data-demo-mi-0 -n arc + kubectl delete pvc logs-demo-mi-0 -n arc + ``` + Each of these kubectl commands will confirm the successful deleting of the PVC. For example: + + ```console + persistentvolumeclaim "data-demo-mi-0" deleted + persistentvolumeclaim "logs-demo-mi-0" deleted + ``` + ## Next steps Learn more about [Features and Capabilities of Azure Arc-enabled SQL Managed Instance](managed-instance-features.md) diff --git a/articles/azure-arc/data/deploy-active-directory-connector-cli.md b/articles/azure-arc/data/deploy-active-directory-connector-cli.md index 19e31a3fbde3a..535b8389e4374 100644 --- a/articles/azure-arc/data/deploy-active-directory-connector-cli.md +++ b/articles/azure-arc/data/deploy-active-directory-connector-cli.md @@ -16,6 +16,8 @@ ms.topic: how-to This article explains how to deploy an Active Directory (AD) connector using Azure CLI. The AD connector is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instance. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + ## Prerequisites ### Install tools diff --git a/articles/azure-arc/data/deploy-active-directory-connector-portal.md b/articles/azure-arc/data/deploy-active-directory-connector-portal.md new file mode 100644 index 0000000000000..15f734ab3f455 --- /dev/null +++ b/articles/azure-arc/data/deploy-active-directory-connector-portal.md @@ -0,0 +1,116 @@ +--- +title: Tutorial – Deploy Active Directory connector using Azure portal +description: Tutorial to deploy an Active Directory connector using Azure portal +services: azure-arc +ms.service: azure-arc +ms.subservice: azure-arc-data +author: MikeRayMSFT +ms.author: mikeray +ms.reviewer: dinethi +ms.date: 05/24/2022 +ms.topic: how-to +--- + +# Tutorial – Deploy Active Directory connector using Azure portal + +Active Directory (AD) connector is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instances. + +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + +This article explains how to deploy, manage, and delete an Active Directory (AD) connector in directly connected mode from the Azure portal. + +## Prerequisites + +For details about how to set up OU and AD account, go to [Deploy Azure Arc-enabled data services in Active Directory authentication - prerequisites](active-directory-prerequisites.md). + +Make sure you have the following deployed before proceed with the steps in this article: + +- An Arc-enabled Azure Kubernetes cluster. +- A data controller in directly connected mode. + +## Create a new AD connector + +1. Log in to [Azure portal](https://portal.azure.com). +1. In the search resources field at the top of the portal, type **data controllers**, and select **Azure Arc data controllers**. + +Azure takes you to where you can find all available data controllers deployed in your selected Azure subscription. + +1. Select the data controller where you wish to add an AD connector. +1. Under **Settings** select **Active Directory**. The portal shows the Active Directory connectors for this data controller. +1. Select **+ Add Connector**, the portal presents an **Add Connector** interface. +1. Under **Active Directory connector** + 1. Specify your **Connector name**. + 2. Choose the account provisioning type - either **Automatic** or **Manual**. + +The account provisioning type determines whether you deploy a customer-managed keytab AD connector or a system-managed keytab AD connector. + +### Create a new customer-managed keytab AD connector + +1. Click **Add Connector**. + +1. Choose the account provisioning type **Manual**. + +1. Set the editable fields for your connector: + - **Realm**: The name of the Active Directory (AD) domain in uppercase. For example *CONTOSO.COM*. + - **Nameserver IP address**: A comma-separated list of Active Directory DNS server IP addresses. For example: *10.10.10.11, 10.10.10.12*. + - **Netbios domain name**: Optional. The NETBIOS name of the Active Directory domain. For example *CONTOSO*. Defaults to the first label of realm. + - **DNS domain name**: Optional. The DNS domain name associated with the Active Directory domain. For example, *contoso.com*. + - **DNS replicas**: Optional. The number of replicas to deploy for the DNS proxy service. Defaults to `1`. + - **Prefer Kubernetes DNS for PTR lookups**: Optional. Check to set Kubernetes DNS for IP address lookup. Clear to use Active Directory DNS. + + ![Screenshot of the portal interface to add customer managed keytab.](media/active-directory-deployment/add-ad-customer-managed-keytab-connector-portal.png) + +1. Click **Add Connector** to create a new customer-managed keytab AD connector. + +### Create a new system-managed keytab AD connector +1. Click **Add Connector**. +1. Choose the account provisioning type **Automatic**. +1. Set the editable fields for your connector: + - **Realm**: The name of the Active Directory (AD) domain in uppercase. For example *CONTOSO.COM*. + - **Nameserver IP address**: A comma-separated list of Active Directory DNS server IP addresses. For example: *10.10.10.11, 10.10.10.12*. + - **OU distinguished name** The distinguished name of the Organizational Unit (OU) pre-created in the Active Directory (AD) domain. For example, `OU=arcou,DC=contoso,DC=com`. + - **Domain Service Account username** The username of the Domain Service Account in Active Directory. + - **Domain Service Account password** The password of the Domain Service Account in Active Directory. + - **Primary domain controller hostname (Optional)** The hostname of the primary Active Directory domain controller. For example, `azdc01.contoso.com`. + - **Secondary domain controller hostname (Optional)** The secondary domain controller hostname. + - **Netbios domain name**: Optional. The NETBIOS name of the Active Directory domain. For example *CONTOSO*. Defaults to the first label of realm. + - **DNS domain name**: Optional. The DNS domain name associated with the Active Directory domain. For example, *contoso.com*. + - **DNS replicas (Optional)** The number of replicas to deploy for the DNS proxy service. Defaults to `1`. + - **Prefer Kubernetes DNS for PTR lookups**: Optional. Check to set Kubernetes DNS for IP address lookup. Clear to use Active Directory DNS. + + ![Screenshot of the portal interface to add system managed keytab.](media/active-directory-deployment/add-ad-system-managed-keytab-connector-portal.png) + +1. Click **Add Connector** to create a new system-managed keytab AD connector. + +## Edit an existing AD connector + +1. Select the AD connect that you want to edit. Select the ellipses (**...**), and then **Edit**. The portal presents an **Edit Connector** interface. + +1. You may update any editable fields. For example: + - **Primary domain controller hostname** The hostname of the primary Active Directory domain controller. For example, `azdc01.contoso.com`. + - **Secondary domain controller hostname** The secondary domain controller hostname. + - **Nameserver IP address**: A comma-separated list of Active Directory DNS server IP addresses. For example: *10.10.10.11, 10.10.10.12*. + - **DNS replicas** The number of replicas to deploy for the DNS proxy service. Defaults to `1`. + - **Prefer Kubernetes DNS for PTR lookups**: Check to set Kubernetes DNS for IP address lookup. Clear to use Active Directory DNS. + +1. Click on **Apply** for changes to take effect. + + +## Delete an AD connector + +1. Select the ellipses (**...**) on the right of the Active Directory connector you would like to delete. +1. Select **Delete**. + +To delete multiple AD connectors at one time: + +1. Select the checkbox in the beginning row of each AD connector you want to delete. + + Alternatively, select the checkbox in the top row to select all the AD connectors in the table. + +1. Click **Delete** in the management bar to delete the AD connectors that you selected. + +## Next steps +* [Tutorial – Deploy Active Directory connector using Azure CLI](deploy-active-directory-connector-cli.md) +* [Tutorial – Deploy AD connector in customer-managed keytab mode](deploy-customer-managed-keytab-active-directory-connector.md) +* [Tutorial – Deploy Active Directory connector in system-managed keytab mode](deploy-system-managed-keytab-active-directory-connector.md) +* [Deploy Arc-enabled SQL Managed Instance with Active Directory Authentication](deploy-active-directory-sql-managed-instance.md). diff --git a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md index 7bd2bef7df040..e6e78d2b6fca4 100644 --- a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md +++ b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md @@ -15,6 +15,8 @@ ms.topic: how-to This article explains how to deploy Azure Arc-enabled SQL Managed Instance with Active Directory (AD) authentication using Azure CLI. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + See these articles for specific instructions: - [Tutorial – Deploy AD connector in customer-managed keytab mode](deploy-customer-managed-keytab-active-directory-connector.md) diff --git a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md index 84191fd1e79c8..3b749ec7cf09c 100644 --- a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md +++ b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md @@ -15,6 +15,8 @@ ms.topic: how-to This article explains how to deploy Azure Arc-enabled SQL Managed Instance with Active Directory (AD) authentication. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + Before you proceed, complete the steps explained in [Customer-managed keytab Active Directory (AD) connector](deploy-customer-managed-keytab-active-directory-connector.md) or [Deploy a system-managed keytab AD connector](deploy-system-managed-keytab-active-directory-connector.md) ## Prerequisites diff --git a/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md b/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md index ad7eb142aa670..a31373f2d685e 100644 --- a/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md +++ b/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md @@ -15,6 +15,8 @@ ms.topic: how-to This article explains how to deploy Active Directory (AD) connector in customer-managed keytab mode. The connector is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instance. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + ## Active Directory connector in customer-managed keytab mode In customer-managed keytab mode, an Active Directory connector deploys a DNS proxy service that proxies the DNS requests coming from the managed instance to either of the two upstream DNS services: diff --git a/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md b/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md index 1af4002378708..9c10c76346307 100644 --- a/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md +++ b/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md @@ -16,6 +16,8 @@ ms.topic: how-to This article explains how to deploy Active Directory connector in system-managed keytab mode. It is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instance. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + ## Active Directory connector in system-managed keytab mode In System-Managed Keytab mode, an Active Directory connector deploys a DNS proxy service that proxies the DNS requests coming from the managed instance to either of the two upstream DNS services: diff --git a/articles/azure-arc/data/managed-instance-high-availability.md b/articles/azure-arc/data/managed-instance-high-availability.md index eb6a58ee76cfa..d1822de394063 100644 --- a/articles/azure-arc/data/managed-instance-high-availability.md +++ b/articles/azure-arc/data/managed-instance-high-availability.md @@ -114,7 +114,7 @@ az sql mi-arc create -n --k8s-namespace --use-k8s --t Example: ```azurecli -az sql mi-arc create -n sqldemo --k8s-namespace my-namespace --use-k8s --tier bc --replicas 3 +az sql mi-arc create -n sqldemo --k8s-namespace my-namespace --use-k8s --tier BusinessCritical --replicas 3 ``` Directly connected mode: @@ -124,7 +124,7 @@ az sql mi-arc create --name --resource-group --location FROM URL = 'https://.blob.core.windows.net//.bak' WITH MOVE 'Test' to '/var/opt/mssql/data/.mdf' ,MOVE 'Test_log' to '/var/opt/mssql/data/.ldf' - ,RECOVERY - ,REPLACE - ,STATS = 5; + ,RECOVERY; GO ``` @@ -171,9 +169,7 @@ Prepare and run the RESTORE command to restore the backup file to the Azure SQL RESTORE DATABASE test FROM DISK = '/var/opt/mssql/data/.bak' WITH MOVE '' to '/var/opt/mssql/data/.mdf' ,MOVE '' to '/var/opt/mssql/data/_log.ldf' -,RECOVERY -,REPLACE -,STATS = 5; +,RECOVERY; GO ``` @@ -183,9 +179,7 @@ Example: RESTORE DATABASE test FROM DISK = '/var/opt/mssql/data/test.bak' WITH MOVE 'test' to '/var/opt/mssql/data/test.mdf' ,MOVE 'test' to '/var/opt/mssql/data/test_log.ldf' -,RECOVERY -,REPLACE -,STATS = 5; +,RECOVERY; GO ``` diff --git a/articles/azure-arc/data/reference/reference-az-arcdata-dc.md b/articles/azure-arc/data/reference/reference-az-arcdata-dc.md index 1a235c6a6b67b..27889b3bc04c5 100644 --- a/articles/azure-arc/data/reference/reference-az-arcdata-dc.md +++ b/articles/azure-arc/data/reference/reference-az-arcdata-dc.md @@ -144,7 +144,7 @@ Increase logging verbosity. Use `--debug` for full debug logs. ## az arcdata dc export Export metrics, logs or usage to a file. ```azurecli -az arcdata dc export +az arcdata dc export -t logs --path logs.json --k8s-namespace namespace --use-k8s ``` ### Global Arguments #### `--debug` diff --git a/articles/azure-arc/data/release-notes.md b/articles/azure-arc/data/release-notes.md index b91c31d54a7ee..ff8c836f6753b 100644 --- a/articles/azure-arc/data/release-notes.md +++ b/articles/azure-arc/data/release-notes.md @@ -7,7 +7,7 @@ ms.reviewer: mikeray services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -ms.date: 05/04/2022 +ms.date: 05/24/2022 ms.topic: conceptual ms.custom: references_regions, devx-track-azurecli, event-tier1-build-2022 #Customer intent: As a data professional, I want to understand why my solutions would benefit from running with Azure Arc-enabled data services so that I can leverage the capability of the feature. @@ -26,9 +26,27 @@ This release is published May 24, 2022. For complete release version information, see [Version log](version-log.md). +### Data controller reminders and warnings + +Reminders and warnings are implemented in Azure portal, custom resource status, and through CLI when the billing data related to all resources managed by the data controller has not been uploaded or exported for an extended period. + ### SQL Managed Instance -Azure SQL Managed Instance Business Critical tier is generally available. +General Availability of Business Critical service tier. Azure Arc-enabled SQL Managed Instance instances that have a version greater than or equal to v1.7.0 will be charged through Azure billing meters. + +### User experience improvements + +#### Azure portal + +Added ability to create AD Connectors from Azure portal. + +Preview expected costs for Azure Arc-enabled SQL Managed Instance Business Critical tier when you create new instances. + +#### Azure Data Studio + +Added ability to upgrade Azure Arc-enabled SQL Managed Instances from Azure Data Studio in the indirect and direct connectivity modes. + +Preview expected costs for Azure Arc-enabled SQL Managed Instance Business Critical tier when you create new instances. ## May 4, 2022 @@ -63,7 +81,7 @@ Separated the availability group and failover group status into two different se Updated SQL engine binaries to the latest version. -Add support for `NodeSelector`, `TopologySpreadConstraints` and `Affinity`. Only available through Kubernetes yaml/json file create/edit currently. No Azure CLI, Azure Portal, or Azure Data Studio user experience yet. +Add support for `NodeSelector`, `TopologySpreadConstraints` and `Affinity`. Only available through Kubernetes yaml/json file create/edit currently. No Azure CLI, Azure portal, or Azure Data Studio user experience yet. Add support for specifying labels and annotations on the secondary service endpoint. `REQUIRED_SECONDARIES_TO_COMMIT` is now a function of the number of replicas. @@ -74,7 +92,7 @@ In this release, the default value of the readable secondary service is `Cluster ### User experience improvements -Notifications added in Azure Portal if billing data has not been uploaded to Azure recently. +Notifications added in Azure portal if billing data has not been uploaded to Azure recently. #### Azure Data Studio @@ -171,7 +189,7 @@ For complete release version information, see [Version log](version-log.md). - Set `--readable-secondaries` to any value between 0 and the number of replicas minus 1. - `--readable-secondaries` only applies to Business Critical tier. - Automatic backups are taken on the primary instance in a Business Critical service tier when there are multiple replicas. When a failover happens, backups move to the new primary. -- [ReadWriteMany (RWX) capable storage class](/azure/aks/concepts-storage#azure-disks) is required for backups, for both General Purpose and Business Critical service tiers. Specifying a non-ReadWriteMany storage class will cause the SQL Managed Instance to be stuck in "Pending" status during deployment. +- [ReadWriteMany (RWX) capable storage class](../../aks/concepts-storage.md#azure-disks) is required for backups, for both General Purpose and Business Critical service tiers. Specifying a non-ReadWriteMany storage class will cause the SQL Managed Instance to be stuck in "Pending" status during deployment. - Billing support when using multiple read replicas. For additional information about service tiers, see [High Availability with Azure Arc-enabled SQL Managed Instance (preview)](managed-instance-high-availability.md). @@ -838,4 +856,4 @@ For instructions see [What are Azure Arc-enabled data services?](overview.md) - [Plan an Azure Arc-enabled data services deployment](plan-azure-arc-data-services.md) (requires installing the client tools first) - [Create an Azure SQL Managed Instance on Azure Arc](create-sql-managed-instance.md) (requires creation of an Azure Arc data controller first) - [Create an Azure Database for PostgreSQL Hyperscale server group on Azure Arc](create-postgresql-hyperscale-server-group.md) (requires creation of an Azure Arc data controller first) -- [Resource providers for Azure services](../../azure-resource-manager/management/azure-services-resource-providers.md) +- [Resource providers for Azure services](../../azure-resource-manager/management/azure-services-resource-providers.md) \ No newline at end of file diff --git a/articles/azure-arc/data/toc.yml b/articles/azure-arc/data/toc.yml index d25142da0697c..62c96062395b0 100644 --- a/articles/azure-arc/data/toc.yml +++ b/articles/azure-arc/data/toc.yml @@ -13,14 +13,15 @@ items: href: privacy-data-collection-and-reporting.md - name: Quickstarts items: - - name: Plan an Azure Arc-enabled data services deployment - href: plan-azure-arc-data-services.md - name: Directly connected | Azure portal example href: create-complete-managed-instance-directly-connected.md - name: Indirectly connected | Azure CLI example href: create-complete-managed-instance-indirectly-connected.md - name: Concepts + expanded: true items: + - name: Plan an Azure Arc-enabled data services deployment + href: plan-azure-arc-data-services.md - name: Connectivity modes and requirements href: connectivity.md - name: Storage configuration @@ -73,6 +74,8 @@ items: href: uninstall-azure-arc-data-controller.md - name: Manage items: + - name: Upload usage data + href: upload-usage-data.md - name: Monitor with Grafana & Kibana href: monitor-grafana-kibana.md - name: Upload to Azure Monitor @@ -83,8 +86,6 @@ items: href: upload-logs.md - name: Upload metrics href: upload-metrics.md - - name: Upload usage data - href: upload-usage-data.md - name: Inventory database instances href: view-arc-data-services-inventory-in-azure-portal.md - name: Update service principal credentials @@ -247,6 +248,8 @@ items: href: deploy-customer-managed-keytab-active-directory-connector.md - name: Deploy AD connector - CLI href: deploy-active-directory-connector-cli.md + - name: Deploy AD connector - portal + href: deploy-active-directory-connector-portal.md - name: Deploy SQL Managed Instance href: deploy-active-directory-sql-managed-instance.md - name: Deploy SQL Managed Instance - CLI diff --git a/articles/azure-arc/data/troubleshoot-guide.md b/articles/azure-arc/data/troubleshoot-guide.md index 5a3c0d04128d2..1d35158ec2ac5 100644 --- a/articles/azure-arc/data/troubleshoot-guide.md +++ b/articles/azure-arc/data/troubleshoot-guide.md @@ -4,10 +4,10 @@ description: Introduction to troubleshooting resources services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -author: twright-msft -ms.author: twright +author: dnethi +ms.author: dinethi ms.reviewer: mikeray -ms.date: 07/30/2021 +ms.date: 05/27/2022 ms.topic: how-to --- @@ -16,6 +16,60 @@ ms.topic: how-to This article identifies troubleshooting resources for Azure Arc-enabled data services. +## Logs Upload related errors + +If you deployed Azure Arc data controller in the `direct` connectivity mode using `kubectl`, and have not created a secret for the Log Analytics workspace credentials, you may see the following error messages in the Data Controller CR (Custom Resource): + +``` +status": { + "azure": { + "uploadStatus": { + "logs": { + "lastUploadTime": "YYYY-MM-HHTMM:SS:MS.SSSSSSZ", + "message": "spec.settings.azure.autoUploadLogs is true, but failed to get log-workspace-secret secret." + }, + +``` + +To resolve the above error, create a secret with the Log Analytics Workspace credentials containing the `WorkspaceID` and the `SharedAccessKey` as follows: + +``` +apiVersion: v1 +data: + primaryKey: + workspaceId: +kind: Secret +metadata: + name: log-workspace-secret + namespace: +type: Opaque + +``` + +## Metrics upload related errors in direct connected mode + +If you configured automatic upload of metrics, in the direct connected mode and the permissions needed for the MSI have not been properly granted (as described in [Upload metrics](upload-metrics.md)), you might see an error in your logs as follows: + +```output +'Metric upload response: {"error":{"code":"AuthorizationFailed","message":"Check Access Denied Authorization for AD object XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX over scope /subscriptions/XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX/resourcegroups/my-resource-group/providers/microsoft.azurearcdata/sqlmanagedinstances/arc-dc, User Tenant Id: XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX. Microsoft.Insights/Metrics/write was not allowed, Microsoft.Insights/Telemetry/write was notallowed. Warning: Principal will be blocklisted if the service principal is not granted proper access while it hits the GIG endpoint continuously."}} +``` + +To resolve above error, retrieve the MSI for the Azure Arc data controller extension, and grant the required roles as described in [Upload metrics](upload-metrics.md). + + +## Usage upload related errors in direct connected mode + +If you deployed your Azure Arc data controller in the direct connected mode the permissions needed to upload your usage information are automatically granted for the Azure Arc data controller extension MSI. If the automatic upload process runs into permissions related issues you might see an error in your logs as follows: + +``` +identified that your data controller stopped uploading usage data to Azure. The error was: + +{"lastUploadTime":"2022-05-05T20:10:47.6746860Z","message":"Data controller upload response: {\"error\":{\"code\":\"AuthorizationFailed\",\"message\":\"The client 'XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX' with object id 'XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX' does not have authorization to perform action 'microsoft.azurearcdata/datacontrollers/write' over scope '/subscriptions/XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX/resourcegroups/my-resource-group/providers/microsoft.azurearcdata/datacontrollers/arc-dc' or the scope is invalid. If access was recently granted, please refresh your credentials.\"}}"} +``` + +To resolve the permissions issue, retrieve the MSI and grant the required roles as described in [Upload metrics](upload-metrics.md)). + + ## Resources by type [Scenario: Troubleshooting PostgreSQL Hyperscale server groups](troubleshoot-postgresql-hyperscale-server-group.md) diff --git a/articles/azure-arc/data/upgrade-data-controller-direct-cli.md b/articles/azure-arc/data/upgrade-data-controller-direct-cli.md index bcec538d935f1..76b8b76a3142a 100644 --- a/articles/azure-arc/data/upgrade-data-controller-direct-cli.md +++ b/articles/azure-arc/data/upgrade-data-controller-direct-cli.md @@ -4,10 +4,10 @@ description: Article describes how to upgrade a directly connected Azure Arc dat services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -author: grrlgeek -ms.author: jeschult +author: dnethi +ms.author: dinethi ms.reviewer: mikeray -ms.date: 12/10/2021 +ms.date: 05/24/2022 ms.topic: how-to --- @@ -85,7 +85,32 @@ Preparing to upgrade dc arcdc in namespace arc to version . Arcdata Control Plane would be upgraded to: ``` -To upgrade the data controller, run the `az arcdata dc upgrade` command. If you don't specify a target image, the data controller will be upgraded to the latest version. +Upgrade the data controller by running an upgrade on the Arc data controller extension first. This can be done as follows: + +```azurecli +az k8s-extension update --resource-group --cluster-name --cluster-type connectedClusters --name --version --release-train stable --config systemDefaultValues.image="//arc-bootstrapper:" +``` +You can retrieve the name of your extension and its version, by browsing to the Overview blade of your Arc enabled kubernetes cluster and select Extensions tab on the left. You can also retrieve the name of your extension and its version running `az` CLI As follows: + +```azurecli +az k8s-extension list --resource-group --cluster-name --cluster-type connectedClusters +``` + +For example: + +```azurecli +az k8s-extension list --resource-group myresource-group --cluster-name myconnected-cluster --cluster-type connectedClusters +``` + +After retrieving the Arc data controller extension name and its version, the extension can be upgraded as follows: + +For example: + +```azurecli +az k8s-extension update --resource-group myresource-group --cluster-name myconnected-cluster --cluster-type connectedClusters --name arcdc-ext --version 1.2.19481002 --release-train stable --config systemDefaultValues.image="mcr.microsoft.com/arcdata/arc-bootstrapper:v1.6.0_2022-05-02" +``` + +Once the extension is upgraded, run the `az arcdata dc upgrade` command to upgrade the data controller. If you don't specify a target image, the data controller will be upgraded to the latest version. ```azurecli az arcdata dc upgrade --resource-group --name [--no-wait] @@ -93,6 +118,10 @@ az arcdata dc upgrade --resource-group --name ` to specify a version if you do not want the latest version. +> [!NOTE] +> Currently upgrade is only supported to the next immediate version. Hence, if you are more than one version behind, specify the `--desired-version` to avoid compatibility issues. + + ## Monitor the upgrade status You can monitor the progress of the upgrade with CLI. diff --git a/articles/azure-arc/data/upgrade-sql-managed-instance-direct-cli.md b/articles/azure-arc/data/upgrade-sql-managed-instance-direct-cli.md index 445ea09b0b15c..cffda7883789e 100644 --- a/articles/azure-arc/data/upgrade-sql-managed-instance-direct-cli.md +++ b/articles/azure-arc/data/upgrade-sql-managed-instance-direct-cli.md @@ -8,7 +8,7 @@ ms.custom: event-tier1-build-2022 author: grrlgeek ms.author: jeschult ms.reviewer: mikeray -ms.date: 11/10/2021 +ms.date: 05/21/2022 ms.topic: how-to --- @@ -57,13 +57,13 @@ During a SQL Managed Instance General Purpose upgrade, the containers in the pod To upgrade the Managed Instance, use the following command: ````cli -az sql mi-arc upgrade --resource-group --name [--no-wait] +az sql mi-arc upgrade --resource-group --name --desired-version [--no-wait] ```` Example: ````cli -az sql mi-arc upgrade --resource-group rgarc --name sql1 [--no-wait] +az sql mi-arc upgrade --resource-group myresource-group --name sql1 --desired-version v1.6.0_2022-05-02 [--no-wait] ```` ## Monitor @@ -89,7 +89,7 @@ Status: Observed Generation: 2 Primary Endpoint: 30.76.129.38,1433 Ready Replicas: 1/1 - Running Version: v1.0.0_2021-07-30 + Running Version: v1.5.0_2022-04-05 State: Updating ``` @@ -102,7 +102,7 @@ Status: Observed Generation: 2 Primary Endpoint: 30.76.129.38,1433 Ready Replicas: 1/1 - Running Version: + Running Version: v1.6.0_2022-05-02 State: Ready ``` diff --git a/articles/azure-arc/data/upload-logs.md b/articles/azure-arc/data/upload-logs.md index ee2a0fa633437..38f21f3ad6c53 100644 --- a/articles/azure-arc/data/upload-logs.md +++ b/articles/azure-arc/data/upload-logs.md @@ -4,10 +4,10 @@ description: Upload logs for Azure Arc-enabled data services to Azure Monitor services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -author: twright-msft -ms.author: twright +author: dnethi +ms.author: dinethi ms.reviewer: mikeray -ms.date: 11/03/2021 +ms.date: 05/27/2022 ms.topic: how-to --- @@ -149,9 +149,9 @@ echo $WORKSPACE_SHARED_KEY With the environment variables set, you can upload logs to the log workspace. -## Upload logs to Azure Log Analytics Workspace in direct mode +## Configure automatic upload of logs to Azure Log Analytics Workspace in direct mode using `az` CLI -In the **direct** connected mode, Logs upload can only be setup in **automatic** mode. This automatic upload of metrics can be setup either during deployment or post deployment of Azure Arc data controller. +In the **direct** connected mode, Logs upload can only be set up in **automatic** mode. This automatic upload of metrics can be set up either during deployment or post deployment of Azure Arc data controller. ### Enable automatic upload of logs to Azure Log Analytics Workspace @@ -163,7 +163,7 @@ az arcdata dc update --name --resource-group --auto-upload-logs true ``` -### Disable automatic upload of logs to Azure Log Analytics Workspace +### Enable automatic upload of logs to Azure Log Analytics Workspace If the automatic upload of logs was enabled during Azure Arc data controller deployment, run the below command to disable automatic upload of logs. ``` @@ -172,7 +172,54 @@ az arcdata dc update --name --resource-group --auto-upload-logs false ``` -## Upload logs to Azure Monitor in indirect mode +## Configure automatic upload of logs to Azure Log Analytics Workspace in **direct** mode using `kubectl` CLI + +### Enable automatic upload of logs to Azure Log Analytics Workspace + +To configure automatic upload of logs using ```kubectl```: + +- ensure the Log Analytics Workspace is created as described in the earlier section +- create a Kubernetes secret for the Log Analytics workspace using the ```WorkspaceID``` and `SharedAccessKey` as follows: + +``` +apiVersion: v1 +data: + primaryKey: + workspaceId: +kind: Secret +metadata: + name: log-workspace-secret + namespace: +type: Opaque +``` + +- To create the secret, run: + + ```console + kubectl apply -f --namespace + ``` + +- To open the settings as a yaml file in the default editor, run: + + ```console + kubectl edit datacontroller --name + ``` + +- update the autoUploadLogs property to ```"true"```, and save the file + + + +### Enable automatic upload of logs to Azure Log Analytics Workspace + +To disable automatic upload of logs, run: + +```console +kubectl edit datacontroller --name +``` + +- update the autoUploadLogs property to `"false"`, and save the file + +## Upload logs to Azure Monitor in **indirect** mode To upload logs for your Azure Arc-enabled SQL managed instances and Azure Arc-enabled PostgreSQL Hyperscale server groups run the following CLI commands- @@ -210,7 +257,7 @@ Once your logs are uploaded, you should be able to query them using the log quer If you want to upload metrics and logs on a scheduled basis, you can create a script and run it on a timer every few minutes. Below is an example of automating the uploads using a Linux shell script. -In your favorite text/code editor, add the following script to the file and save as a script executable file such as .sh (Linux/Mac) or .cmd, .bat, .ps1. +In your favorite text/code editor, add the following script to the file and save as a script executable file - such as .sh for Linux/Mac, or .cmd, .bat, or .ps1 for Windows. ```azurecli az arcdata dc export --type logs --path logs.json --force --k8s-namespace arc diff --git a/articles/azure-arc/data/upload-metrics-and-logs-to-azure-monitor.md b/articles/azure-arc/data/upload-metrics-and-logs-to-azure-monitor.md index 001fe75ccf3bb..f8c296f6c5678 100644 --- a/articles/azure-arc/data/upload-metrics-and-logs-to-azure-monitor.md +++ b/articles/azure-arc/data/upload-metrics-and-logs-to-azure-monitor.md @@ -160,7 +160,7 @@ Example output: ## Verify service principal role ```azurecli -az role assignment list -o table +az role assignment list --scope subscriptions//resourceGroups/ -o table ``` With the service principal assigned to the appropriate role, you can proceed to upload metrics, or user data. diff --git a/articles/azure-arc/data/upload-usage-data.md b/articles/azure-arc/data/upload-usage-data.md index 78aeed4ccfe9f..932d62fe8d500 100644 --- a/articles/azure-arc/data/upload-usage-data.md +++ b/articles/azure-arc/data/upload-usage-data.md @@ -4,16 +4,16 @@ description: Upload usage Azure Arc-enabled data services data to Azure services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -author: twright-msft -ms.author: twright +author: dnethi +ms.author: dinethi ms.reviewer: mikeray -ms.date: 11/03/2021 +ms.date: 05/27/2022 ms.topic: how-to --- # Upload usage data to Azure in **indirect** mode -Periodically, you can export out usage information. The export and upload of this information creates and updates the data controller, SQL managed instance, and PostgreSQL Hyperscale server group resources in Azure. +Periodically, you can export out usage information. The export and upload of this information creates and updates the data controller, SQL managed instance, and PostgreSQL resources in Azure. > [!NOTE] > Usage information is automatically uploaded for Azure Arc data controller deployed in **direct** connectivity mode. The instructions in this article only apply to uploading usage information for Azure Arc data controller deployed in **indirect** connectivity mode.. @@ -42,12 +42,12 @@ Usage information such as inventory and resource usage can be uploaded to Azure az arcdata dc export --type usage --path usage.json --k8s-namespace --use-k8s ``` - This command creates a `usage.json` file with all the Azure Arc-enabled data resources such as SQL managed instances and PostgreSQL Hyperscale instances etc. that are created on the data controller. + This command creates a `usage.json` file with all the Azure Arc-enabled data resources such as SQL managed instances and PostgreSQL instances etc. that are created on the data controller. For now, the file is not encrypted so that you can see the contents. Feel free to open in a text editor and see what the contents look like. -You will notice that there are two sets of data: `resources` and `data`. The `resources` are the data controller, PostgreSQL Hyperscale server groups, and SQL Managed Instances. The `resources` records in the data capture the pertinent events in the history of a resource - when it was created, when it was updated, and when it was deleted. The `data` records capture how many cores were available to be used by a given instance for every hour. +You will notice that there are two sets of data: `resources` and `data`. The `resources` are the data controller, PostgreSQL, and SQL Managed Instances. The `resources` records in the data capture the pertinent events in the history of a resource - when it was created, when it was updated, and when it was deleted. The `data` records capture how many cores were available to be used by a given instance for every hour. Example of a `resource` entry: @@ -104,6 +104,18 @@ Example of a `data` entry: az arcdata dc upload --path usage.json ``` +## Upload frequency + +In the **indirect** mode, usage information needs to be uploaded to Azure at least once in every 30 days. It is highly recommended to upload more frequently, such as daily or weekly. If usage information is not uploaded past 32 days, you will see some degradation in the service such as being unable to provision any new resources. + +There will be two types of notifications for delayed usage uploads - warning phase and degraded phase. In the warning phase there will be a message such as `Billing data for the Azure Arc data controller has not been uploaded in {0} hours. Please upload billing data as soon as possible.`. + +In the degraded phase, the message will look like `Billing data for the Azure Arc data controller has not been uploaded in {0} hours. Some functionality will not be available until the billing data is uploaded.`. + +The Azure portal Overview page for Data Controller and the Custom Resource status of the Data controller in your kubernetes cluster will both indicate the last upload date and the status message(s). + + + ## Automating uploads (optional) If you want to upload metrics and logs on a scheduled basis, you can create a script and run it on a timer every few minutes. Below is an example of automating the uploads using a Linux shell script. diff --git a/articles/azure-arc/data/version-log.md b/articles/azure-arc/data/version-log.md index 1e40cc27ff584..76ca06814180c 100644 --- a/articles/azure-arc/data/version-log.md +++ b/articles/azure-arc/data/version-log.md @@ -22,11 +22,11 @@ This article identifies the component versions with each release of Azure Arc-en |Component |Value | |--------------------------------------------------------|---------| |Container images tag |`v1.7.0_2022-05-24`| -|CRD names and versions |`datacontrollers.arcdata.microsoft.com`: v1beta1, v1 through v6
                  `exporttasks.tasks.arcdata.microsoft.com`: v1beta1, v1, v2
                  `kafkas.arcdata.microsoft.com`: v1beta1
                  `monitors.arcdata.microsoft.com`: v1beta1, v1, v2
                  `sqlmanagedinstances.sql.arcdata.microsoft.com`: v1beta1, v1 through v6
                  `postgresqls.arcdata.microsoft.com`: v1beta1, v1beta2
                  `sqlmanagedinstancerestoretasks.tasks.sql.arcdata.microsoft.com`: v1beta1, v1
                  `failovergroups.sql.arcdata.microsoft.com`: v1beta1, v1beta2
                  `activedirectoryconnectors.arcdata.microsoft.com`: v1beta1, v1beta2| -|ARM API version|2022-03-01-preview| -|`arcdata` Azure CLI extension version| 1.4.0| -|Arc enabled Kubernetes helm chart extension version|1.2.19481002| -|Arc Data extension for Azure Data Studio|1.2.0| +|CRD names and versions |`datacontrollers.arcdata.microsoft.com`: v1beta1, v1 through v6
                  `exporttasks.tasks.arcdata.microsoft.com`: v1beta1, v1, v2
                  `kafkas.arcdata.microsoft.com`: v1beta1
                  `monitors.arcdata.microsoft.com`: v1beta1, v1, v2
                  `sqlmanagedinstances.sql.arcdata.microsoft.com`: v1beta1, v1 through v6
                  `postgresqls.arcdata.microsoft.com`: v1beta1, v1beta2
                  `sqlmanagedinstancerestoretasks.tasks.sql.arcdata.microsoft.com`: v1beta1, v1
                  `failovergroups.sql.arcdata.microsoft.com`: v1beta1, v1beta2,v1
                  `activedirectoryconnectors.arcdata.microsoft.com`: v1beta1, v1beta2| +|ARM API version|2022-03-01-preview (No change)| +|`arcdata` Azure CLI extension version| 1.4.1| +|Arc enabled Kubernetes helm chart extension version|1.2.19581002| +|Arc Data extension for Azure Data Studio|1.3.0| ## May 4, 2022 diff --git a/articles/azure-arc/index.yml b/articles/azure-arc/index.yml index 2bb3c4439d4d3..792df72031c19 100644 --- a/articles/azure-arc/index.yml +++ b/articles/azure-arc/index.yml @@ -141,6 +141,9 @@ conceptualContent: - url: ./vmware-vsphere/overview.md itemType: overview text: Azure Arc-enabled VMware vSphere (preview) + - url: ./system-center-virtual-machine-manager/overview.md + itemType: overview + text: Azure Arc-enabled System Center Virtual Machine Manager (preview) # Card - title: Application services links: diff --git a/articles/azure-arc/kubernetes/cluster-connect.md b/articles/azure-arc/kubernetes/cluster-connect.md index 5f950825c7496..3028318d22cef 100644 --- a/articles/azure-arc/kubernetes/cluster-connect.md +++ b/articles/azure-arc/kubernetes/cluster-connect.md @@ -2,7 +2,7 @@ title: "Use Cluster Connect to connect to Azure Arc-enabled Kubernetes clusters" services: azure-arc ms.service: azure-arc -ms.date: 10/31/2021 +ms.date: 06/03/2022 ms.topic: article author: shashankbarsin ms.author: shasb @@ -11,47 +11,80 @@ description: "Use Cluster Connect to securely connect to Azure Arc-enabled Kuber # Use Cluster Connect to connect to Azure Arc-enabled Kubernetes clusters -With Cluster Connect, you can securely connect to Azure Arc-enabled Kubernetes clusters without requiring any inbound port to be enabled on the firewall. Access to the `apiserver` of the Azure Arc-enabled Kubernetes cluster enables the following scenarios: -* Enable interactive debugging and troubleshooting. -* Provide cluster access to Azure services for [custom locations](custom-locations.md) and other resources created on top of it. +With Cluster Connect, you can securely connect to Azure Arc-enabled Kubernetes clusters without requiring any inbound port to be enabled on the firewall. -A conceptual overview of this feature is available in [Cluster connect - Azure Arc-enabled Kubernetes](conceptual-cluster-connect.md) article. +Access to the `apiserver` of the Azure Arc-enabled Kubernetes cluster enables the following scenarios: -## Prerequisites +- Interactive debugging and troubleshooting. +- Cluster access to Azure services for [custom locations](custom-locations.md) and other resources created on top of it. + +A conceptual overview of this feature is available in [Cluster connect - Azure Arc-enabled Kubernetes](conceptual-cluster-connect.md). + +## Prerequisites + +### [Azure CLI](#tab/azure-cli) + +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). - [Install](/cli/azure/install-azure-cli) or [update](/cli/azure/update-azure-cli) Azure CLI to version >= 2.16.0. - Install the `connectedk8s` Azure CLI extension of version >= 1.2.5: - ```azurecli - az extension add --name connectedk8s - ``` - - If you've already installed the `connectedk8s` extension, update the extension to the latest version: - - ```azurecli - az extension update --name connectedk8s - ``` + ```azurecli + az extension add --name connectedk8s + ``` + + If you've already installed the `connectedk8s` extension, update the extension to the latest version: + + ```azurecli + az extension update --name connectedk8s + ``` - An existing Azure Arc-enabled Kubernetes connected cluster. - - If you haven't connected a cluster yet, use our [quickstart](quickstart-connect-cluster.md). - - [Upgrade your agents](agent-upgrade.md#manually-upgrade-agents) to version >= 1.5.3. + - If you haven't connected a cluster yet, use our [quickstart](quickstart-connect-cluster.md). + - [Upgrade your agents](agent-upgrade.md#manually-upgrade-agents) to version >= 1.5.3. - Enable the below endpoints for outbound access in addition to the ones mentioned under [connecting a Kubernetes cluster to Azure Arc](quickstart-connect-cluster.md#meet-network-requirements): - | Endpoint | Port | - |----------------|-------| - |`*.servicebus.windows.net` | 443 | - |`guestnotificationservice.azure.com`, `*.guestnotificationservice.azure.com` | 443 | + | Endpoint | Port | + |----------------|-------| + |`*.servicebus.windows.net` | 443 | + |`guestnotificationservice.azure.com`, `*.guestnotificationservice.azure.com` | 443 | - Replace the placeholders and run the below command to set the environment variables used in this document: - ```azurecli - CLUSTER_NAME= - RESOURCE_GROUP= - ARM_ID_CLUSTER=$(az connectedk8s show -n $CLUSTER_NAME -g $RESOURCE_GROUP --query id -o tsv) - ``` + ```azurecli + CLUSTER_NAME= + RESOURCE_GROUP= + ARM_ID_CLUSTER=$(az connectedk8s show -n $CLUSTER_NAME -g $RESOURCE_GROUP --query id -o tsv) + ``` + +### [Azure PowerShell](#tab/azure-powershell) + +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). + +- Install [Azure PowerShell version 6.6.0 or later](/powershell/azure/install-az-ps). + +- An existing Azure Arc-enabled Kubernetes connected cluster. + - If you haven't connected a cluster yet, use our [quickstart](quickstart-connect-cluster.md). + - [Upgrade your agents](agent-upgrade.md#manually-upgrade-agents) to version >= 1.5.3. + +- Enable the below endpoints for outbound access in addition to the ones mentioned under [connecting a Kubernetes cluster to Azure Arc](quickstart-connect-cluster.md#meet-network-requirements): + | Endpoint | Port | + |----------------|-------| + |`*.servicebus.windows.net` | 443 | + |`guestnotificationservice.azure.com`, `*.guestnotificationservice.azure.com` | 443 | + +- Replace the placeholders and run the below command to set the environment variables used in this document: + + ```azurepowershell + $CLUSTER_NAME = + $RESOURCE_GROUP = + $ARM_ID_CLUSTER = (az connectedk8s show -n $CLUSTER_NAME -g $RESOURCE_GROUP --query id -o tsv) + ``` + +--- ## Enable Cluster Connect feature @@ -63,41 +96,77 @@ az connectedk8s enable-features --features cluster-connect -n $CLUSTER_NAME -g $ ## Azure Active Directory authentication option -1. Get the `objectId` associated with your Azure AD entity: +### [Azure CLI](#tab/azure-cli) + +1. Get the `objectId` associated with your Azure AD entity. + + - For an Azure AD user account: + + ```azurecli + AAD_ENTITY_OBJECT_ID=$(az ad signed-in-user show --query objectId -o tsv) + ``` + + - For an Azure AD application: + + ```azurecli + AAD_ENTITY_OBJECT_ID=$(az ad sp show --id --query objectId -o tsv) + ``` + +1. Authorize the entity with appropriate permissions. + + - If you are using Kubernetes native ClusterRoleBinding or RoleBinding for authorization checks on the cluster, with the `kubeconfig` file pointing to the `apiserver` of your cluster for direct access, you can create one mapped to the Azure AD entity (service principal or user) that needs to access this cluster. Example: + + ```console + kubectl create clusterrolebinding admin-user-binding --clusterrole cluster-admin --user=$AAD_ENTITY_OBJECT_ID + ``` + + - If you are using Azure RBAC for authorization checks on the cluster, you can create an Azure role assignment mapped to the Azure AD entity. Example: + + ```azurecli + az role assignment create --role "Azure Arc Kubernetes Viewer" --assignee $AAD_ENTITY_OBJECT_ID --scope $ARM_ID_CLUSTER + ``` - - For Azure AD user account: +### [Azure PowerShell](#tab/azure-powershell) - ```azurecli - AAD_ENTITY_OBJECT_ID=$(az ad signed-in-user show --query objectId -o tsv) - ``` +1. Get the `objectId` associated with your Azure AD entity. - - For Azure AD application: + - For an Azure AD user account: - ```azurecli - AAD_ENTITY_OBJECT_ID=$(az ad sp show --id --query objectId -o tsv) - ``` + ```azurepowershell + $AAD_ENTITY_OBJECT_ID = (az ad signed-in-user show --query objectId -o tsv) + ``` -1. Authorize the entity with appropriate permissions: + - For an Azure AD application: - - If you are using Kubernetes native ClusterRoleBinding or RoleBinding for authorization checks on the cluster, with the `kubeconfig` file pointing to the `apiserver` of your cluster for direct access, you can create one mapped to the Azure AD entity (service principal or user) that needs to access this cluster. Example: - - ```console - kubectl create clusterrolebinding admin-user-binding --clusterrole cluster-admin --user=$AAD_ENTITY_OBJECT_ID - ``` + ```azurepowershell + $AAD_ENTITY_OBJECT_ID = (az ad sp show --id --query objectId -o tsv) + ``` - - If you are using Azure RBAC for authorization checks on the cluster, you can create an Azure role assignment mapped to the Azure AD entity. Example: +1. Authorize the entity with appropriate permissions. - ```azurecli - az role assignment create --role "Azure Arc Kubernetes Viewer" --assignee $AAD_ENTITY_OBJECT_ID --scope $ARM_ID_CLUSTER - ``` + - If you are using Kubernetes native ClusterRoleBinding or RoleBinding for authorization checks on the cluster, with the `kubeconfig` file pointing to the `apiserver` of your cluster for direct access, you can create one mapped to the Azure AD entity (service principal or user) that needs to access this cluster. Example: + + ```console + kubectl create clusterrolebinding admin-user-binding --clusterrole cluster-admin --user=$AAD_ENTITY_OBJECT_ID + ``` + + - If you are using Azure RBAC for authorization checks on the cluster, you can create an Azure role assignment mapped to the Azure AD entity. Example: + + ```azurecli + az role assignment create --role "Azure Arc Kubernetes Viewer" --assignee $AAD_ENTITY_OBJECT_ID --scope $ARM_ID_CLUSTER + ``` + +--- ## Service account token authentication option -1. With the `kubeconfig` file pointing to the `apiserver` of your Kubernetes cluster, create a service account in any namespace (following command creates it in the default namespace): +### [Azure CLI](#tab/azure-cli) - ```console - kubectl create serviceaccount admin-user - ``` +1. With the `kubeconfig` file pointing to the `apiserver` of your Kubernetes cluster, create a service account in any namespace (the following command creates it in the default namespace): + + ```console + kubectl create serviceaccount admin-user + ``` 1. Create ClusterRoleBinding or RoleBinding to grant this [service account the appropriate permissions on the cluster](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#kubectl-create-rolebinding). Example: @@ -105,7 +174,7 @@ az connectedk8s enable-features --features cluster-connect -n $CLUSTER_NAME -g $ kubectl create clusterrolebinding admin-user-binding --clusterrole cluster-admin --serviceaccount default:admin-user ``` -1. Get the service account's token using the following commands +1. Get the service account's token using the following commands: ```console SECRET_NAME=$(kubectl get serviceaccount admin-user -o jsonpath='{$.secrets[0].name}') @@ -115,43 +184,68 @@ az connectedk8s enable-features --features cluster-connect -n $CLUSTER_NAME -g $ TOKEN=$(kubectl get secret ${SECRET_NAME} -o jsonpath='{$.data.token}' | base64 -d | sed $'s/$/\\\n/g') ``` +### [Azure PowerShell](#tab/azure-powershell) + +1. With the `kubeconfig` file pointing to the `apiserver` of your Kubernetes cluster, create a service account in any namespace (the following command creates it in the default namespace): + + ```console + kubectl create serviceaccount admin-user + ``` + +1. Create ClusterRoleBinding or RoleBinding to grant this [service account the appropriate permissions on the cluster](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#kubectl-create-rolebinding). Example: + + ```console + kubectl create clusterrolebinding admin-user-binding --clusterrole cluster-admin --serviceaccount default:admin-user + ``` + +1. Get the service account's token using the following commands: + + ```console + $SECRET_NAME = (kubectl get serviceaccount admin-user -o jsonpath='{$.secrets[0].name}') + ``` + + ```console + $TOKEN = ([System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String((kubectl get secret $SECRET_NAME -o jsonpath='{$.data.token}')))) + ``` + +--- + ## Access your cluster 1. Set up the Cluster Connect based kubeconfig needed to access your cluster based on the authentication option used: - - If using Azure Active Directory authentication option, after logging into Azure CLI using the Azure AD entity of interest, get the Cluster Connect `kubeconfig` needed to communicate with the cluster from anywhere (from even outside the firewall surrounding the cluster): + - If using Azure Active Directory authentication option, after logging into Azure CLI using the Azure AD entity of interest, get the Cluster Connect `kubeconfig` needed to communicate with the cluster from anywhere (from even outside the firewall surrounding the cluster): - ```azurecli - az connectedk8s proxy -n $CLUSTER_NAME -g $RESOURCE_GROUP - ``` + ```azurecli + az connectedk8s proxy -n $CLUSTER_NAME -g $RESOURCE_GROUP + ``` - - If using the service account authentication option, get the Cluster Connect `kubeconfig` needed to communicate with the cluster from anywhere: + - If using the service account authentication option, get the Cluster Connect `kubeconfig` needed to communicate with the cluster from anywhere: - ```azurecli - az connectedk8s proxy -n $CLUSTER_NAME -g $RESOURCE_GROUP --token $TOKEN - ``` + ```azurecli + az connectedk8s proxy -n $CLUSTER_NAME -g $RESOURCE_GROUP --token $TOKEN + ``` 1. Use `kubectl` to send requests to the cluster: - ```console - kubectl get pods - ``` - - You should now see a response from the cluster containing the list of all pods under the `default` namespace. + ```console + kubectl get pods + ``` + +You should now see a response from the cluster containing the list of all pods under the `default` namespace. ## Known limitations When making requests to the Kubernetes cluster, if the Azure AD entity used is a part of more than 200 groups, the following error is observed as this is a known limitation: -```console -You must be logged in to the server (Error:Error while retrieving group info. Error:Overage claim (users with more than 200 group membership) is currently not supported. -``` +`You must be logged in to the server (Error:Error while retrieving group info. Error:Overage claim (users with more than 200 group membership) is currently not supported.` To get past this error: + 1. Create a [service principal](/cli/azure/create-an-azure-service-principal-azure-cli), which is less likely to be a member of more than 200 groups. -1. [Sign in](/cli/azure/create-an-azure-service-principal-azure-cli#sign-in-using-a-service-principal) to Azure CLI with the service principal before running `az connectedk8s proxy` command. +1. [Sign in](/cli/azure/create-an-azure-service-principal-azure-cli#sign-in-using-a-service-principal) to Azure CLI with the service principal before running the `az connectedk8s proxy` command. ## Next steps -> [!div class="nextstepaction"] -> Set up [Azure AD RBAC](azure-rbac.md) on your clusters +- Set up [Azure AD RBAC](azure-rbac.md) on your clusters. +- Deploy and manage [cluster extensions](extensions.md). diff --git a/articles/azure-arc/kubernetes/conceptual-configurations.md b/articles/azure-arc/kubernetes/conceptual-configurations.md index 5269b5d9a4778..26b83cc3add2d 100644 --- a/articles/azure-arc/kubernetes/conceptual-configurations.md +++ b/articles/azure-arc/kubernetes/conceptual-configurations.md @@ -1,5 +1,5 @@ --- -title: "Configurations and GitOps - Azure Arc-enabled Kubernetes" +title: "GitOps Flux v1 configurations with Azure Arc-enabled Kubernetes" services: azure-arc ms.service: azure-arc ms.date: 05/24/2022 @@ -10,12 +10,13 @@ description: "This article provides a conceptual overview of GitOps and configur keywords: "Kubernetes, Arc, Azure, containers, configuration, GitOps" --- -# Configurations and GitOps with Azure Arc-enabled Kubernetes +# GitOps Flux v1 configurations with Azure Arc-enabled Kubernetes > [!NOTE] > This document is for GitOps with Flux v1. GitOps with Flux v2 is now available for Azure Arc-enabled Kubernetes and Azure Kubernetes Service (AKS) clusters; [learn about GitOps with Flux v2](./conceptual-gitops-flux2.md). Eventually Azure will stop supporting GitOps with Flux v1, so begin using Flux v2 as soon as possible. In relation to Kubernetes, GitOps is the practice of declaring the desired state of Kubernetes cluster configurations (deployments, namespaces, etc.) in a Git repository. This declaration is followed by a polling and pull-based deployment of these cluster configurations using an operator. The Git repository can contain: + * YAML-format manifests describing any valid Kubernetes resources, including Namespaces, ConfigMaps, Deployments, DaemonSets, etc. * Helm charts for deploying applications. diff --git a/articles/azure-arc/kubernetes/conceptual-gitops-flux2.md b/articles/azure-arc/kubernetes/conceptual-gitops-flux2.md index 556fddd97237e..38ae7517f6e96 100644 --- a/articles/azure-arc/kubernetes/conceptual-gitops-flux2.md +++ b/articles/azure-arc/kubernetes/conceptual-gitops-flux2.md @@ -1,14 +1,14 @@ --- -title: "Conceptual overview Azure Kubernetes Configuration Management (GitOps)" +title: "GitOps Flux v2 configurations with AKS and Azure Arc-enabled Kubernetes" description: "This article provides a conceptual overview of GitOps in Azure for use in Azure Arc-enabled Kubernetes and Azure Kubernetes Service (AKS) clusters." keywords: "GitOps, Flux, Kubernetes, K8s, Azure, Arc, AKS, Azure Kubernetes Service, containers, devops" services: azure-arc, aks ms.service: azure-arc -ms.date: 5/3/2022 +ms.date: 5/26/2022 ms.topic: conceptual --- -# GitOps in Azure +# GitOps Flux v2 configurations with AKS and Azure Arc-enabled Kubernetes Azure provides configuration management capability using GitOps in Azure Kubernetes Service (AKS) and Azure Arc-enabled Kubernetes clusters. You can easily enable and use GitOps in these clusters. diff --git a/articles/azure-arc/kubernetes/custom-locations.md b/articles/azure-arc/kubernetes/custom-locations.md index f1e597113471a..056ffd070cf63 100644 --- a/articles/azure-arc/kubernetes/custom-locations.md +++ b/articles/azure-arc/kubernetes/custom-locations.md @@ -70,15 +70,21 @@ If you are logged into Azure CLI as an Azure AD user, to enable this feature on az connectedk8s enable-features -n -g --features cluster-connect custom-locations ``` -If you are logged into Azure CLI using a service principal, to enable this feature on your cluster, execute the following steps: +If you run the above command while being logged into Azure CLI using a service principal, you may observe the following warning: -1. Fetch the Object ID of the Azure AD application used by Azure Arc service: +```console +Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature. Insufficient privileges to complete the operation. +``` + +This is because a service principal doesn't have permissions to get information of the application used by Azure Arc service. To avoid this error, execute the following steps: + +1. Login into Azure CLI using your user account. Fetch the Object ID of the Azure AD application used by Azure Arc service: ```azurecli - az ad sp show --id 'bc313c14-388c-4e7d-a58e-70017303ee3b' --query objectId -o tsv + az ad sp show --id bc313c14-388c-4e7d-a58e-70017303ee3b --query objectId -o tsv ``` -1. Use the `` value from above step to enable custom locations feature on the cluster: +1. Login into Azure CLI using the service principal. Use the `` value from above step to enable custom locations feature on the cluster: ```azurecli az connectedk8s enable-features -n -g --custom-locations-oid --features cluster-connect custom-locations diff --git a/articles/azure-arc/kubernetes/extensions.md b/articles/azure-arc/kubernetes/extensions.md index 537f9fc4e3afc..e75bd2be8d133 100644 --- a/articles/azure-arc/kubernetes/extensions.md +++ b/articles/azure-arc/kubernetes/extensions.md @@ -65,7 +65,7 @@ A conceptual overview of this feature is available in [Cluster extensions - Azur | [Azure API Management on Azure Arc](../../api-management/how-to-deploy-self-hosted-gateway-azure-arc.md) | Deploy and manage API Management gateway on Azure Arc-enabled Kubernetes clusters. | | [Azure Arc-enabled Machine Learning](../../machine-learning/how-to-attach-kubernetes-anywhere.md) | Deploy and run Azure Machine Learning on Azure Arc-enabled Kubernetes clusters. | | [Flux (GitOps)](./conceptual-gitops-flux2.md) | Use GitOps with Flux to manage cluster configuration and application deployment. | -| [Dapr extension for Azure Kubernetes Service (AKS) and Arc-enabled Kubernetes](/azure/aks/dapr)| Eliminates the overhead of downloading Dapr tooling and manually installing and managing the runtime on your clusters. | +| [Dapr extension for Azure Kubernetes Service (AKS) and Arc-enabled Kubernetes](../../aks/dapr.md)| Eliminates the overhead of downloading Dapr tooling and manually installing and managing the runtime on your clusters. | ## Usage of cluster extensions @@ -280,4 +280,4 @@ Learn more about the cluster extensions currently available for Azure Arc-enable > [Event Grid on Kubernetes](../../event-grid/kubernetes/overview.md) > > [!div class="nextstepaction"] -> [Azure API Management on Azure Arc](../../api-management/how-to-deploy-self-hosted-gateway-azure-arc.md) +> [Azure API Management on Azure Arc](../../api-management/how-to-deploy-self-hosted-gateway-azure-arc.md) \ No newline at end of file diff --git a/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png b/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png index 3013179a76d74..e9028d24b81da 100644 Binary files a/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png and b/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png differ diff --git a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png index c5e9399332467..23f6c124879ff 100644 Binary files a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png and b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png differ diff --git a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png index ee33fad499b92..2463e5c7a7e81 100644 Binary files a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png and b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png differ diff --git a/articles/azure-arc/kubernetes/move-regions.md b/articles/azure-arc/kubernetes/move-regions.md new file mode 100644 index 0000000000000..2e10437ce88c7 --- /dev/null +++ b/articles/azure-arc/kubernetes/move-regions.md @@ -0,0 +1,88 @@ +--- +title: "Move Arc-enabled Kubernetes clusters between regions" +services: azure-arc +ms.service: azure-arc +ms.date: 03/03/2021 +ms.topic: how-to +ms.custom: subject-moving-resources +author: anraghun +ms.author: anraghun +description: "Manually move your Azure Arc-enabled Kubernetes between regions" +keywords: "Kubernetes, Arc, Azure, K8s, containers, region, move" +#Customer intent: As a Kubernetes cluster administrator, I want to move my Arc-enabled Kubernetes cluster to another Azure region. +--- + +# Move Arc-enabled Kubernetes clusters across Azure regions + +This article describes how to move Arc-enabled Kubernetes clusters (or connected cluster resources) to a different Azure region. You might move your resources to another region for a number of reasons. For example, to take advantage of a new Azure region, to deploy features or services available in specific regions only, to meet internal policy and governance requirements, or in response to capacity planning requirements. + +## Prerequisites + +- Ensure that Azure Arc-enabled Kubernetes resource (Microsoft.Kubernetes/connectedClusters) is supported in the target region. +- Ensure that Azure Arc-enabled Kubernetes configuration (Microsoft.KubernetesConfiguration/SourceControlConfigurations, Microsoft.KubernetesConfiguration/Extensions, Microsoft.KubernetesConfiguration/FluxConfigurations) resources are supported in the target region. +- Ensure that the Arc-enabled services you've deployed on top are supported in the target region. +- Ensure you have network access to the api server of your underlying Kubernetes cluster. + +## Prepare + +Before you begin, it's important to understand what moving these resources mean. + +### Kubernetes configurations + +Source control configurations, Flux configurations and extensions are child resources to the connected cluster resource. In order to move these resources, you'll first need to move the parent connected cluster resource. + +### Connected cluster + +The connectedClusters resource is the ARM representation of your Kubernetes clusters outside of Azure (on-premises, another cloud, edge...). The underlying infrastructure lies in your environment and Arc provides a first-class representation of the cluster on Azure, by installing agents on your cluster. + +When it comes to "moving" your Arc connected cluster, it means deleting the ARM resource in the source region, cleaning up the agents on your cluster and re-onboarding your cluster again in the target region. + +## Move + +### Kubernetes configurations + +1. Do a LIST of all configuration resources in the source cluster (the cluster to be moved) and save the response body to be used as the request body when re-creating these resources. + - [Microsoft.KubernetesConfiguration/SourceControlConfigurations](/cli/azure/k8s-configuration?view=azure-cli-latest&preserve-view=true#az-k8sconfiguration-list) + - [Microsoft.KubernetesConfiguration/Extensions](/cli/azure/k8s-extension?view=azure-cli-latest&preserve-view=true#az-k8s-extension-list) + - [Microsoft.KubernetesConfiguration/FluxConfigurations](/cli/azure/k8s-configuration/flux?view=azure-cli-latest&preserve-view=true#az-k8s-configuration-flux-list) + > [!NOTE] + > LIST/GET of configuration resources **do not** return `ConfigurationProtectedSettings`. + > For such cases, the only option is to save the original request body and reuse them while creating the resources in the new region. +2. [Delete](./move-regions.md#kubernetes-configurations-3) the above configuration resources. +2. Ensure the Arc connected cluster is up and running in the new region. This is the target cluster. +3. Re-create each of the configuration resources obtained in the LIST command from the source cluster on the target cluster. + +### Connected cluster + +1. [Delete](./move-regions.md#connected-cluster-3) the previous Arc deployment from the underlying Kubernetes cluster. +2. With network access to the underlying Kubernetes cluster, run [this command](./quickstart-connect-cluster.md?tabs=azure-cli#connect-an-existing-kubernetes-cluster) to create the Arc connected cluster in the new region. +> [!NOTE] +> The above command creates the cluster by default in the same location as its resource group. +> Use the `--location` parameter to explicitly provide the target region value. + +## Verify + +### Kubernetes configurations + +Do a LIST of all configuration resources in the target cluster. This should match the LIST response from the source cluster. + +### Connected cluster + +1. Run `az connectedk8s show -n -g ` and ensure the `connectivityStatus` value is `Connected`. +2. Run [this command](./quickstart-connect-cluster.md?tabs=azure-cli#view-azure-arc-agents-for-kubernetes) to verify all Arc agents are successfully deployed on the underlying cluster. + +## Clean up source resources + +### Kubernetes configurations + +Delete each of the configuration resources returned in the LIST command in the source cluster: +- [Microsoft.KubernetesConfiguration/SourceControlConfigurations](/cli/azure/k8s-configuration?view=azure-cli-latest&preserve-view=true#az-k8s-configuration-delete) +- [Microsoft.KubernetesConfiguration/Extensions](/cli/azure/k8s-extension?view=azure-cli-latest&preserve-view=true#az-k8s-extension-delete) +- [Microsoft.KubernetesConfiguration/FluxConfigurations](/cli/azure/k8s-configuration/flux?view=azure-cli-latest&preserve-view=true#az-k8s-configuration-flux-delete) + +> [!NOTE] +> This step may be skipped if the parent Arc connected cluster is also being deleted. Doing so would automatically remove the configuration resources on top. + +### Connected cluster + +With network access to the underlying Kubernetes cluster, run [this command](./quickstart-connect-cluster.md?tabs=azure-cli#clean-up-resources) to delete the Arc connected cluster. This command will clean up the Arc footprint on the underlying cluster as well as on ARM. \ No newline at end of file diff --git a/articles/azure-arc/kubernetes/toc.yml b/articles/azure-arc/kubernetes/toc.yml index 69fee039d14f8..4a8d42e6761b1 100644 --- a/articles/azure-arc/kubernetes/toc.yml +++ b/articles/azure-arc/kubernetes/toc.yml @@ -105,6 +105,8 @@ href: custom-locations.md - name: Azure Arc-enabled Machine Learning href: ../../machine-learning/how-to-attach-kubernetes-anywhere.md?toc=/azure/azure-arc/kubernetes/toc.json&bc=/azure/azure-arc/kubernetes/breadcrumb/toc.json + - name: Move between regions + href: move-regions.md - name: Troubleshooting href: troubleshooting.md - name: Reference diff --git a/articles/azure-arc/kubernetes/troubleshooting.md b/articles/azure-arc/kubernetes/troubleshooting.md index a982cca302101..6af8e8400acf3 100644 --- a/articles/azure-arc/kubernetes/troubleshooting.md +++ b/articles/azure-arc/kubernetes/troubleshooting.md @@ -3,15 +3,15 @@ title: "Troubleshoot common Azure Arc-enabled Kubernetes issues" services: azure-arc ms.service: azure-arc #ms.subservice: azure-arc-kubernetes coming soon -ms.date: 03/09/2022 +ms.date: 06/07/2022 ms.topic: article -description: "Troubleshooting common issues with Azure Arc-enabled Kubernetes clusters and GitOps." +description: "Learn how to resolve common issues with Azure Arc-enabled Kubernetes clusters and GitOps." keywords: "Kubernetes, Arc, Azure, containers, GitOps, Flux" --- # Azure Arc-enabled Kubernetes and GitOps troubleshooting -This document provides troubleshooting guides for issues with Azure Arc-enabled Kubernetes connectivity, permissions, and agents. It also provides troubleshooting guides for Azure GitOps, which can be used in either Azure Arc-enabled Kubernetes or Azure Kubernetes Service (AKS) clusters. +This document provides troubleshooting guides for issues with Azure Arc-enabled Kubernetes connectivity, permissions, and agents. It also provides troubleshooting guides for Azure GitOps, which can be used in either Azure Arc-enabled Kubernetes or Azure Kubernetes Service (AKS) clusters. ## General troubleshooting @@ -28,7 +28,7 @@ az account show All agents for Azure Arc-enabled Kubernetes are deployed as pods in the `azure-arc` namespace. All pods should be running and passing their health checks. -First, verify the Azure Arc helm release: +First, verify the Azure Arc Helm Chart release: ```console $ helm --namespace default status azure-arc @@ -40,9 +40,9 @@ REVISION: 5 TEST SUITE: None ``` -If the Helm release isn't found or missing, try [connecting the cluster to Azure Arc](./quickstart-connect-cluster.md) again. +If the Helm Chart release isn't found or missing, try [connecting the cluster to Azure Arc](./quickstart-connect-cluster.md) again. -If the Helm release is present with `STATUS: deployed`, check the status of the agents using `kubectl`: +If the Helm Chart release is present with `STATUS: deployed`, check the status of the agents using `kubectl`: ```console $ kubectl -n azure-arc get deployments,pods @@ -65,15 +65,15 @@ pod/metrics-agent-58b765c8db-n5l7k 2/2 Running 0 16h pod/resource-sync-agent-5cf85976c7-522p5 3/3 Running 0 16h ``` -All pods should show `STATUS` as `Running` with either `3/3` or `2/2` under the `READY` column. Fetch logs and describe the pods returning an `Error` or `CrashLoopBackOff`. If any pods are stuck in `Pending` state, there might be insufficient resources on cluster nodes. [Scale up your cluster](https://kubernetes.io/docs/tasks/administer-cluster/) can get these pods to transition to `Running` state. +All pods should show `STATUS` as `Running` with either `3/3` or `2/2` under the `READY` column. Fetch logs and describe the pods returning an `Error` or `CrashLoopBackOff`. If any pods are stuck in `Pending` state, there might be insufficient resources on cluster nodes. [Scaling up your cluster](https://kubernetes.io/docs/tasks/administer-cluster/) can get these pods to transition to `Running` state. ## Connecting Kubernetes clusters to Azure Arc -Connecting clusters to Azure requires both access to an Azure subscription and `cluster-admin` access to a target cluster. If you cannot reach the cluster or you have insufficient permissions, connecting the cluster to Azure Arc will fail. +Connecting clusters to Azure Arc requires access to an Azure subscription and `cluster-admin` access to a target cluster. If you can't reach the cluster, or if you have insufficient permissions, connecting the cluster to Azure Arc will fail. Make sure you've met all of the [prerequisites to connect a cluster](quickstart-connect-cluster.md#prerequisites). ### Azure CLI is unable to download Helm chart for Azure Arc agents -If you are using Helm version >= 3.7.0, you will run into the following error when `az connectedk8s connect` is run to connect the cluster to Azure Arc: +With Helm version >= 3.7.0, you may run into the following error when using `az connectedk8s connect` to connect the cluster to Azure Arc: ```azurecli az connectedk8s connect -n AzureArcTest -g AzureArcTest @@ -84,11 +84,11 @@ Unable to pull helm chart from the registry 'mcr.microsoft.com/azurearck8s/batch Run 'helm --help' for usage. ``` -In this case, you'll need to install a prior version of [Helm 3](https://helm.sh/docs/intro/install/), where version < 3.7.0. After this, run the `az connectedk8s connect` command again to connect the cluster to Azure Arc. +To resolve this issue, you'll need to install a prior version of [Helm 3](https://helm.sh/docs/intro/install/), where the version is less than 3.7.0. After you've installed that version, run the `az connectedk8s connect` command again to connect the cluster to Azure Arc. ### Insufficient cluster permissions -If the provided kubeconfig file does not have sufficient permissions to install the Azure Arc agents, the Azure CLI command will return an error. +If the provided kubeconfig file doesn't have sufficient permissions to install the Azure Arc agents, the Azure CLI command will return an error. ```azurecli az connectedk8s connect --resource-group AzureArc --name AzureArcCluster @@ -101,15 +101,15 @@ This operation might take a while... Error: list: failed to list: secrets is forbidden: User "myuser" cannot list resource "secrets" in API group "" at the cluster scope ``` -The user connecting the cluster to Azure Arc should have `cluster-admin` role assigned to them on the cluster. +To resolve this issue, the user connecting the cluster to Azure Arc should have the `cluster-admin` role assigned to them on the cluster. ### Unable to connect OpenShift cluster to Azure Arc -If `az connectedk8s connect` is timing out and failing when connecting an OpenShift cluster to Azure Arc, check the following: +If `az connectedk8s connect` is timing out and failing when connecting an OpenShift cluster to Azure Arc: -1. The OpenShift cluster needs to meet the version prerequisites: 4.5.41+ or 4.6.35+ or 4.7.18+. +1. Ensure that the OpenShift cluster meets the version prerequisites: 4.5.41+ or 4.6.35+ or 4.7.18+. -1. Before running `az connectedk8s connnect`, the following command needs to be run on the cluster: +1. Before you run `az connectedk8s connnect`, run this command on the cluster: ```console oc adm policy add-scc-to-user privileged system:serviceaccount:azure-arc:azure-arc-kube-aad-proxy-sa @@ -127,8 +127,11 @@ az connectedk8s connect --resource-group AzureArc --name AzureArcCluster Ensure that you have the latest helm version installed before proceeding to avoid unexpected errors. This operation might take a while... ``` + ### Helm timeout error +You may see the following Helm timeout error: + ```azurecli az connectedk8s connect -n AzureArcTest -g AzureArcTest ``` @@ -137,38 +140,41 @@ az connectedk8s connect -n AzureArcTest -g AzureArcTest Unable to install helm release: Error: UPGRADE Failed: time out waiting for the condition ``` -If you get the above helm timeout issue, you can troubleshoot as follows: - - 1. Run the following command: - - ```console - kubectl get pods -n azure-arc - ``` - 2. Check if the `clusterconnect-agent` or the `config-agent` pods are showing crashloopbackoff, or not all containers are running: - - ```output - NAME READY STATUS RESTARTS AGE - cluster-metadata-operator-664bc5f4d-chgkl 2/2 Running 0 4m14s - clusterconnect-agent-7cb8b565c7-wklsh 2/3 CrashLoopBackOff 0 1m15s - clusteridentityoperator-76d645d8bf-5qx5c 2/2 Running 0 4m15s - config-agent-65d5df564f-lffqm 1/2 CrashLoopBackOff 0 1m14s - ``` - 3. If the below certificate isn't present, the system assigned managed identity didn't get installed. - - ```console - kubectl get secret -n azure-arc -o yaml | grep name: - ``` - - ```output - name: azure-identity-certificate - ``` - This could be a transient issue. You can try deleting the Arc deployment by running the `az connectedk8s delete` command and reinstalling it. If you're consistently facing this, it could be an issue with your proxy settings. Please follow [these steps](./quickstart-connect-cluster.md#connect-using-an-outbound-proxy-server) to connect your cluster to Arc via a proxy. - 4. If the `clusterconnect-agent` and the `config-agent` pods are running, but the `kube-aad-proxy` pod is missing, check your pod security policies. This pod uses the `azure-arc-kube-aad-proxy-sa` service account, which doesn't have admin permissions but requires the permission to mount host path. - +To resolve this issue, try the following steps. + +1. Run the following command: + + ```console + kubectl get pods -n azure-arc + ``` + +2. Check if the `clusterconnect-agent` or the `config-agent` pods are showing `crashloopbackoff`, or if not all containers are running: + + ```output + NAME READY STATUS RESTARTS AGE + cluster-metadata-operator-664bc5f4d-chgkl 2/2 Running 0 4m14s + clusterconnect-agent-7cb8b565c7-wklsh 2/3 CrashLoopBackOff 0 1m15s + clusteridentityoperator-76d645d8bf-5qx5c 2/2 Running 0 4m15s + config-agent-65d5df564f-lffqm 1/2 CrashLoopBackOff 0 1m14s + ``` + +3. If the certificate below isn't present, the system assigned managed identity hasn't been installed. + + ```console + kubectl get secret -n azure-arc -o yaml | grep name: + ``` + + ```output + name: azure-identity-certificate + ``` + + To resolve this issue, try deleting the Arc deployment by running the `az connectedk8s delete` command and reinstalling it. If the issue continues to happen, it could be an issue with your proxy settings. In that case, [try connecting your cluster to Azure Arc via a proxy](./quickstart-connect-cluster.md#connect-using-an-outbound-proxy-server) to connect your cluster to Arc via a proxy. + +4. If the `clusterconnect-agent` and the `config-agent` pods are running, but the `kube-aad-proxy` pod is missing, check your pod security policies. This pod uses the `azure-arc-kube-aad-proxy-sa` service account, which doesn't have admin permissions but requires the permission to mount host path. ### Helm validation error -Helm `v3.3.0-rc.1` version has an [issue](https://github.com/helm/helm/pull/8527) where helm install/upgrade (used by `connectedk8s` CLI extension) results in running of all hooks leading to the following error: +Helm `v3.3.0-rc.1` version has an [issue](https://github.com/helm/helm/pull/8527) where helm install/upgrade (used by the `connectedk8s` CLI extension) results in running of all hooks leading to the following error: ```azurecli az connectedk8s connect -n AzureArcTest -g AzureArcTest @@ -186,12 +192,12 @@ To recover from this issue, follow these steps: 1. Delete the Azure Arc-enabled Kubernetes resource in the Azure portal. 2. Run the following commands on your machine: - - ```console - kubectl delete ns azure-arc - kubectl delete clusterrolebinding azure-arc-operator - kubectl delete secret sh.helm.release.v1.azure-arc.v1 - ``` + + ```console + kubectl delete ns azure-arc + kubectl delete clusterrolebinding azure-arc-operator + kubectl delete secret sh.helm.release.v1.azure-arc.v1 + ``` 3. [Install a stable version](https://helm.sh/docs/intro/install/) of Helm 3 on your machine instead of the release candidate version. 4. Run the `az connectedk8s connect` command with the appropriate values to connect the cluster to Azure Arc. @@ -226,6 +232,9 @@ az extension add --name k8s-configuration ### Flux v1 - General +> [!NOTE] +> Eventually Azure will stop supporting GitOps with Flux v1, so begin using [Flux v2](./tutorial-use-gitops-flux2.md) as soon as possible. + To help troubleshoot issues with `sourceControlConfigurations` resource (Flux v1), run these az commands with `--debug` parameter specified: ```azurecli @@ -298,9 +307,9 @@ For more information, see [How do I resolve `webhook does not support dry run` e ### Flux v2 - Error installing the `microsoft.flux` extension -The `microsoft.flux` extension installs the Flux controllers and Azure GitOps agents into your Azure Arc-enabled Kubernetes or Azure Kubernetes Service (AKS) clusters. If the extension is not already installed in a cluster and you create a GitOps configuration resource for that cluster, the extension will be installed automatically. +The `microsoft.flux` extension installs the Flux controllers and Azure GitOps agents into your Azure Arc-enabled Kubernetes or Azure Kubernetes Service (AKS) clusters. If the extension isn't already installed in a cluster and you create a GitOps configuration resource for that cluster, the extension will be installed automatically. -If you experience an error during installation or if the extension is in a failed state, you can first run a script to investigate. The cluster-type parameter can be set to `connectedClusters` for an Arc-enabled cluster or `managedClusters` for an AKS cluster. The name of the `microsoft.flux` extension will be "flux" if the extension was installed automatically during creation of a GitOps configuration. Look in the "statuses" object for information. +If you experience an error during installation, or if the extension is in a failed state, run a script to investigate. The cluster-type parameter can be set to `connectedClusters` for an Arc-enabled cluster or `managedClusters` for an AKS cluster. The name of the `microsoft.flux` extension will be "flux" if the extension was installed automatically during creation of a GitOps configuration. Look in the "statuses" object for information. One example: @@ -361,16 +370,16 @@ kubectl delete namespaces flux-system ``` Some other aspects to consider: - -* For AKS cluster, assure that the subscription has the following feature flag enabled: `Microsoft.ContainerService/AKS-ExtensionManager`. + +* For an AKS cluster, assure that the subscription has the `Microsoft.ContainerService/AKS-ExtensionManager` feature flag enabled. ```azurecli az feature register --namespace Microsoft.ContainerService --name AKS-ExtensionManager ``` -* Assure that the cluster does not have any policies that restrict creation of the `flux-system` namespace or resources in that namespace. +* Assure that the cluster doesn't have any policies that restrict creation of the `flux-system` namespace or resources in that namespace. -With these actions accomplished you can either [re-create a flux configuration](./tutorial-use-gitops-flux2.md) which will install the flux extension automatically or you can re-install the flux extension manually. +With these actions accomplished, you can either [recreate a flux configuration](./tutorial-use-gitops-flux2.md), which will install the flux extension automatically, or you can reinstall the flux extension manually. ### Flux v2 - Installing the `microsoft.flux` extension in a cluster with Azure AD Pod Identity enabled @@ -386,7 +395,7 @@ The extension status also returns as "Failed". "{\"status\":\"Failed\",\"error\":{\"code\":\"ResourceOperationFailure\",\"message\":\"The resource operation completed with terminal provisioning state 'Failed'.\",\"details\":[{\"code\":\"ExtensionCreationFailed\",\"message\":\" error: Unable to get the status from the local CRD with the error : {Error : Retry for given duration didn't get any results with err {status not populated}}\"}]}}", ``` -The issue is that the extension-agent pod is trying to get its token from IMDS on the cluster in order to talk to the extension service in Azure; however, this token request is being intercepted by pod identity ([details here](../../aks/use-azure-ad-pod-identity.md)). +The extension-agent pod is trying to get its token from IMDS on the cluster in order to talk to the extension service in Azure, but the token request is intercepted by the [pod identity](../../aks/use-azure-ad-pod-identity.md)). The workaround is to create an `AzurePodIdentityException` that will tell Azure AD Pod Identity to ignore the token requests from flux-extension pods. @@ -403,7 +412,7 @@ spec: ## Monitoring -Azure Monitor for containers requires its DaemonSet to be run in privileged mode. To successfully set up a Canonical Charmed Kubernetes cluster for monitoring, run the following command: +Azure Monitor for Containers requires its DaemonSet to run in privileged mode. To successfully set up a Canonical Charmed Kubernetes cluster for monitoring, run the following command: ```console juju config kubernetes-worker allow-privileged=true @@ -413,7 +422,7 @@ juju config kubernetes-worker allow-privileged=true ### Old version of agents used -Usage of older version of agents where Cluster Connect feature was not yet supported will result in the following error: +Some older agent versions didn't support the Cluster Connect feature. If you use one of these versions, you may see this error: ```azurecli az connectedk8s proxy -n AzureArcTest -g AzureArcTest @@ -423,7 +432,9 @@ az connectedk8s proxy -n AzureArcTest -g AzureArcTest Hybrid connection for the target resource does not exist. Agent might not have started successfully. ``` -When this occurs, ensure that you are using `connectedk8s` Azure CLI extension of version >= 1.2.0 and [connect your cluster again](quickstart-connect-cluster.md) to Azure Arc. Also, verify that you've met all the [network prerequisites](quickstart-connect-cluster.md#meet-network-requirements) needed for Arc-enabled Kubernetes. If your cluster is behind an outbound proxy or firewall, verify that websocket connections are enabled for `*.servicebus.windows.net` which is required specifically for the [Cluster Connect](cluster-connect.md) feature. +Be sure to use the `connectedk8s` Azure CLI extension with version >= 1.2.0, then [connect your cluster again](quickstart-connect-cluster.md) to Azure Arc. Also, verify that you've met all the [network prerequisites](quickstart-connect-cluster.md#meet-network-requirements) needed for Arc-enabled Kubernetes. + +If your cluster is behind an outbound proxy or firewall, verify that websocket connections are enabled for `*.servicebus.windows.net`, which is required specifically for the [Cluster Connect](cluster-connect.md) feature. ### Cluster Connect feature disabled @@ -441,77 +452,82 @@ To resolve this error, [enable the Cluster Connect feature](cluster-connect.md#e ## Enable custom locations using service principal -When you are connecting your cluster to Azure Arc or when you are enabling custom locations feature on an existing cluster, you may observe the following warning: +When connecting your cluster to Azure Arc or enabling custom locations on an existing cluster, you may see the following warning: ```console Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature. Insufficient privileges to complete the operation. ``` -The above warning is observed when you have used a service principal to log into Azure and this service principal doesn't have permissions to get information of the application used by Azure Arc service. To avoid this error, execute the following steps: +This warning occurs when you use a service principal to log into Azure. The service principal doesn't have permissions to get information of the application used by Azure Arc service. To avoid this error, execute the following steps: -1. Fetch the Object ID of the Azure AD application used by Azure Arc service: +1. Sign in into Azure CLI using your user account. Fetch the Object ID of the Azure AD application used by Azure Arc service: ```azurecli az ad sp show --id bc313c14-388c-4e7d-a58e-70017303ee3b --query objectId -o tsv ``` -1. Use the `` value from above step to enable custom locations feature on the cluster: - - If you are enabling custom locations feature as part of connecting the cluster to Arc, run the following command: +1. Sign in into Azure CLI using the service principal. Use the `` value from above step to enable custom locations on the cluster: - ```azurecli - az connectedk8s connect -n -g --custom-locations-oid - ``` + * To enable custom locations when connecting the cluster to Arc, run the following command: - - If you are enabling custom locations feature on an existing Azure Arc-enabled Kubernetes cluster, run the following command: + ```azurecli + az connectedk8s connect -n -g --custom-locations-oid + ``` - ```azurecli - az connectedk8s enable-features -n -g --custom-locations-oid --features cluster-connect custom-locations - ``` + * To enable custom locations on an existing Azure Arc-enabled Kubernetes cluster, run the following command: -Once above permissions are granted, you can now proceed to [enabling the custom location feature](custom-locations.md#enable-custom-locations-on-cluster) on the cluster. + ```azurecli + az connectedk8s enable-features -n -g --custom-locations-oid --features cluster-connect custom-locations + ``` ## Azure Arc-enabled Open Service Mesh -The following troubleshooting steps provide guidance on validating the deployment of all the Open Service Mesh extension components on your cluster. +The steps below provide guidance on validating the deployment of all the Open Service Mesh (OSM) extension components on your cluster. ### Check OSM Controller **Deployment** + ```bash kubectl get deployment -n arc-osm-system --selector app=osm-controller ``` -If the OSM Controller is healthy, you will get an output similar to the following output: -``` +If the OSM Controller is healthy, you'll see output similar to the following: + +```output NAME READY UP-TO-DATE AVAILABLE AGE osm-controller 1/1 1 1 59m ``` ### Check the OSM Controller **Pod** + ```bash kubectl get pods -n arc-osm-system --selector app=osm-controller ``` -If the OSM Controller is healthy, you will get an output similar to the following output: -``` +If the OSM Controller is healthy, you'll see output similar to the following: + +```output NAME READY STATUS RESTARTS AGE osm-controller-b5bd66db-wglzl 0/1 Evicted 0 61m osm-controller-b5bd66db-wvl9w 1/1 Running 0 31m ``` -Even though we had one controller _evicted_ at some point, we have another one which is `READY 1/1` and `Running` with `0` restarts. -If the column `READY` is anything other than `1/1` the service mesh would be in a broken state. -Column `READY` with `0/1` indicates the control plane container is crashing - we need to get logs. Use the following command to inspect controller logs: +Even though one controller was _evicted_ at some point, there's another which is `READY 1/1` and `Running` with `0` restarts. If the column `READY` is anything other than `1/1`, the service mesh would be in a broken state. Column `READY` with `0/1` indicates the control plane container is crashing. Use the following command to inspect controller logs: + ```bash kubectl logs -n arc-osm-system -l app=osm-controller ``` + Column `READY` with a number higher than 1 after the `/` would indicate that there are sidecars installed. OSM Controller would most likely not work with any sidecars attached to it. ### Check OSM Controller **Service** + ```bash kubectl get service -n arc-osm-system osm-controller ``` -If the OSM Controller is healthy, you will have the following output: -``` +If the OSM Controller is healthy, you'll see the following output: + +```output NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE osm-controller ClusterIP 10.0.31.254 15128/TCP,9092/TCP 67m ``` @@ -520,36 +536,42 @@ osm-controller ClusterIP 10.0.31.254 15128/TCP,9092/TCP 67 > The `CLUSTER-IP` would be different. The service `NAME` and `PORT(S)` must be the same as seen in the output. ### Check OSM Controller **Endpoints** + ```bash kubectl get endpoints -n arc-osm-system osm-controller ``` -If the OSM Controller is healthy, you will get an output similar to the following output: -``` +If the OSM Controller is healthy, you'll see output similar to the following: + +```output NAME ENDPOINTS AGE osm-controller 10.240.1.115:9092,10.240.1.115:15128 69m ``` -If the user's cluster has no `ENDPOINTS` for `osm-controller` this would indicate that the control plane is unhealthy. This may be caused by the OSM Controller pod crashing, or never deployed correctly. +If the user's cluster has no `ENDPOINTS` for `osm-controller`, the control plane is unhealthy. This unhealthy state may be caused by the OSM Controller pod crashing, or the pod may never have been deployed correctly. ### Check OSM Injector **Deployment** + ```bash kubectl get deployments -n arc-osm-system osm-injector ``` -If the OSM Injector is healthy, you will get an output similar to the following output: -``` +If the OSM Injector is healthy, you'll see output similar to the following: + +```output NAME READY UP-TO-DATE AVAILABLE AGE osm-injector 1/1 1 1 73m ``` ### Check OSM Injector **Pod** + ```bash kubectl get pod -n arc-osm-system --selector app=osm-injector ``` -If the OSM Injector is healthy, you will get an output similar to the following output: -``` +If the OSM Injector is healthy, you'll see output similar to the following: + +```output NAME READY STATUS RESTARTS AGE osm-injector-5986c57765-vlsdk 1/1 Running 0 73m ``` @@ -557,12 +579,14 @@ osm-injector-5986c57765-vlsdk 1/1 Running 0 73m The `READY` column must be `1/1`. Any other value would indicate an unhealthy osm-injector pod. ### Check OSM Injector **Service** + ```bash kubectl get service -n arc-osm-system osm-injector ``` -If the OSM Injector is healthy, you will get an output similar to the following output: -``` +If the OSM Injector is healthy, you'll see output similar to the following: + +```output NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE osm-injector ClusterIP 10.0.39.54 9090/TCP 75m ``` @@ -570,11 +594,13 @@ osm-injector ClusterIP 10.0.39.54 9090/TCP 75m Ensure the IP address listed for `osm-injector` service is `9090`. There should be no `EXTERNAL-IP`. ### Check OSM Injector **Endpoints** + ```bash kubectl get endpoints -n arc-osm-system osm-injector ``` -If the OSM Injector is healthy, you will get an output similar to the following output: +If the OSM Injector is healthy, you'll see output similar to the following: + ``` NAME ENDPOINTS AGE osm-injector 10.240.1.172:9090 75m @@ -582,14 +608,15 @@ osm-injector 10.240.1.172:9090 75m For OSM to function, there must be at least one endpoint for `osm-injector`. The IP address of your OSM Injector endpoints will be different. The port `9090` must be the same. - ### Check **Validating** and **Mutating** webhooks + ```bash kubectl get ValidatingWebhookConfiguration --selector app=osm-controller ``` -If the Validating Webhook is healthy, you will get an output similar to the following output: -``` +If the **Validating** webhook is healthy, you'll see output similar to the following: + +```output NAME WEBHOOKS AGE osm-validator-mesh-osm 1 81m ``` @@ -598,19 +625,21 @@ osm-validator-mesh-osm 1 81m kubectl get MutatingWebhookConfiguration --selector app=osm-injector ``` +If the **Mutating** webhook is healthy, you'll see output similar to the following: -If the Mutating Webhook is healthy, you will get an output similar to the following output: -``` +```output NAME WEBHOOKS AGE arc-osm-webhook-osm 1 102m ``` -Check for the service and the CA bundle of the **Validating** webhook +Check for the service and the CA bundle of the **Validating** webhook by using the following command: + ```bash kubectl get ValidatingWebhookConfiguration osm-validator-mesh-osm -o json | jq '.webhooks[0].clientConfig.service' ``` -A well configured Validating Webhook Configuration would have the following output: +A well configured **Validating** webhook configuration will have output similar to the following: + ```json { "name": "osm-config-validator", @@ -620,12 +649,13 @@ A well configured Validating Webhook Configuration would have the following outp } ``` -Check for the service and the CA bundle of the **Mutating** webhook +Check for the service and the CA bundle of the **Mutating** webhook by using the following command: + ```bash kubectl get MutatingWebhookConfiguration arc-osm-webhook-osm -o json | jq '.webhooks[0].clientConfig.service' ``` -A well configured Mutating Webhook Configuration would have the following output: +A well configured **Mutating** webhook configuration will have output similar to the following: ``` { "name": "osm-injector", @@ -635,8 +665,7 @@ A well configured Mutating Webhook Configuration would have the following output } ``` - -Check whether OSM Controller has given the Validating (or Mutating) Webhook a CA Bundle by using the following command: +Check whether OSM Controller has given the **Validating** (or **Mutating**) webhook a CA Bundle by using the following command: ```bash kubectl get ValidatingWebhookConfiguration osm-validator-mesh-osm -o json | jq -r '.webhooks[0].clientConfig.caBundle' | wc -c @@ -647,20 +676,22 @@ kubectl get MutatingWebhookConfiguration arc-osm-webhook-osm -o json | jq -r '.w ``` Example output: + ```bash 1845 ``` -The number in the output indicates the number of bytes, or the size of the CA Bundle. If this is empty, 0, or some number under a 1000, it would indicate that the CA Bundle is not correctly provisioned. Without a correct CA Bundle, the ValidatingWebhook would throw an error. + +The number in the output indicates the number of bytes, or the size of the CA Bundle. If this is empty, 0, or a number under 1000, the CA Bundle is not correctly provisioned. Without a correct CA Bundle, the `ValidatingWebhook` will throw an error. ### Check the `osm-mesh-config` resource -Check for the existence: +Check for the existence of the resource: ```azurecli-interactive kubectl get meshconfig osm-mesh-config -n arc-osm-system ``` -Check the content of the OSM MeshConfig +Check the content of the OSM MeshConfig: ```azurecli-interactive kubectl get meshconfig osm-mesh-config -n arc-osm-system -o yaml @@ -739,60 +770,64 @@ metadata: | spec.featureFlags.enableIngressBackendPolicy | bool | `"true"` | `kubectl patch meshconfig osm-mesh-config -n arc-osm-system -p '{"spec":{"featureFlags":{"enableIngressBackendPolicy":"true"}}}' --type=merge` | | spec.featureFlags.enableEnvoyActiveHealthChecks | bool | `"false"` | `kubectl patch meshconfig osm-mesh-config -n arc-osm-system -p '{"spec":{"featureFlags":{"enableEnvoyActiveHealthChecks":"false"}}}' --type=merge` | -### Check Namespaces +### Check namespaces >[!Note] ->The arc-osm-system namespace will never participate in a service mesh and will never be labeled and/or annotated with the key/values below. +>The arc-osm-system namespace will never participate in a service mesh and will never be labeled or annotated with the key/values below. -We use the `osm namespace add` command to join namespaces to a given service mesh. -When a kubernetes namespace is part of the mesh, the following must be true: +We use the `osm namespace add` command to join namespaces to a given service mesh. When a Kubernetes namespace is part of the mesh, confirm the following: View the annotations of the namespace `bookbuyer`: + ```bash kubectl get namespace bookbuyer -o json | jq '.metadata.annotations' ``` The following annotation must be present: + ``` { "openservicemesh.io/sidecar-injection": "enabled" } ``` - View the labels of the namespace `bookbuyer`: ```bash kubectl get namespace bookbuyer -o json | jq '.metadata.labels' ``` The following label must be present: + ``` { "openservicemesh.io/monitored-by": "osm" } ``` -Note that if you are not using `osm` CLI, you could also manually add these annotations to your namespaces. If a namespace is not annotated with `"openservicemesh.io/sidecar-injection": "enabled"` or not labeled with `"openservicemesh.io/monitored-by": "osm"` the OSM Injector will not add Envoy sidecars. + +If you aren't using `osm` CLI, you could also manually add these annotations to your namespaces. If a namespace isn't annotated with `"openservicemesh.io/sidecar-injection": "enabled"`, or isn't labeled with `"openservicemesh.io/monitored-by": "osm"`, the OSM Injector will not add Envoy sidecars. >[!Note] >After `osm namespace add` is called, only **new** pods will be injected with an Envoy sidecar. Existing pods must be restarted with `kubectl rollout restart deployment` command. - ### Verify the SMI CRDs -Check whether the cluster has the required CRDs: + +Check whether the cluster has the required Custom Resource Definitions (CRDs) by using the following command: + ```bash kubectl get crds ``` -Ensure that the CRDs correspond to the versions available in the release branch. For example, if you are using OSM-Arc v1.0.0-1, navigate to the [SMI supported versions page](https://docs.openservicemesh.io/docs/overview/smi/) and select v1.0 from the Releases dropdown to check which CRDs versions are in use. +Ensure that the CRDs correspond to the versions available in the release branch. For example, if you're using OSM-Arc v1.0.0-1, navigate to the [SMI supported versions page](https://docs.openservicemesh.io/docs/overview/smi/) and select v1.0 from the Releases dropdown to check which CRDs versions are in use. Get the versions of the CRDs installed with the following command: + ```bash for x in $(kubectl get crds --no-headers | awk '{print $1}' | grep 'smi-spec.io'); do kubectl get crd $x -o json | jq -r '(.metadata.name, "----" , .spec.versions[].name, "\n")' done ``` -If CRDs are missing, use the following commands to install them on the cluster. If you are using a version of OSM-Arc that is not v1.0, ensure that you replace the version in the command (ex: v1.1.0 would be release-v1.1). +If CRDs are missing, use the following commands to install them on the cluster. If you're using a version of OSM-Arc that's not v1.0, ensure that you replace the version in the command (for example, v1.1.0 would be release-v1.1). ```bash kubectl apply -f https://raw.githubusercontent.com/openservicemesh/osm/release-v1.0/cmd/osm-bootstrap/crds/smi_http_route_group.yaml @@ -804,10 +839,12 @@ kubectl apply -f https://raw.githubusercontent.com/openservicemesh/osm/release-v kubectl apply -f https://raw.githubusercontent.com/openservicemesh/osm/release-v1.0/cmd/osm-bootstrap/crds/smi_traffic_split.yaml ``` -Refer to [OSM release notes](https://github.com/openservicemesh/osm/releases) to see CRD changes between releases. +To see CRD changes between releases, refer to the [OSM release notes](https://github.com/openservicemesh/osm/releases). ### Troubleshoot certificate management -Information on how OSM issues and manages certificates to Envoy proxies running on application pods can be found on the [OSM docs site](https://docs.openservicemesh.io/docs/guides/certificates/). + +For information on how OSM issues and manages certificates to Envoy proxies running on application pods, see the [OSM docs site](https://docs.openservicemesh.io/docs/guides/certificates/). ### Upgrade Envoy -When a new pod is created in a namespace monitored by the add-on, OSM will inject an [Envoy proxy sidecar](https://docs.openservicemesh.io/docs/guides/app_onboarding/sidecar_injection/) in that pod. If the envoy version needs to be updated, steps to do so can be found in the [Upgrade Guide](https://docs.openservicemesh.io/docs/guides/upgrade/#envoy) on the OSM docs site. + +When a new pod is created in a namespace monitored by the add-on, OSM will inject an [Envoy proxy sidecar](https://docs.openservicemesh.io/docs/guides/app_onboarding/sidecar_injection/) in that pod. If the Envoy version needs to be updated, follow the steps in the [Upgrade Guide](https://docs.openservicemesh.io/docs/guides/upgrade/#envoy) on the OSM docs site. diff --git a/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md b/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md index 07b3f54cc6991..7b6de3cf9c883 100644 --- a/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md +++ b/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md @@ -1,69 +1,75 @@ --- -title: Azure Key Vault Secrets Provider extension -description: Tutorial for setting up Azure Key Vault provider for Secrets Store CSI Driver interface as an extension on Azure Arc enabled Kubernetes cluster +title: Use Azure Key Vault Secrets Provider extension to fetch secrets into Azure Arc-enabled Kubernetes clusters +description: Learn how to set up the Azure Key Vault Provider for Secrets Store CSI Driver interface as an extension on Azure Arc enabled Kubernetes cluster services: azure-arc ms.service: azure-arc -ms.date: 5/13/2022 +ms.date: 5/26/2022 ms.topic: article author: mayurigupta13 ms.author: mayg --- -# Using Azure Key Vault Secrets Provider extension to fetch secrets into Arc clusters +# Use the Azure Key Vault Secrets Provider extension to fetch secrets into Azure Arc-enabled Kubernetes clusters -The Azure Key Vault Provider for Secrets Store CSI Driver allows for the integration of Azure Key Vault as a secrets store with a Kubernetes cluster via a [CSI volume](https://kubernetes-csi.github.io/docs/). +The Azure Key Vault Provider for Secrets Store CSI Driver allows for the integration of Azure Key Vault as a secrets store with a Kubernetes cluster via a [CSI volume](https://kubernetes-csi.github.io/docs/). For Azure Arc-enabled Kubernetes clusters, you can install the Azure Key Vault Secrets Provider extension to fetch secrets. -## Prerequisites -1. Ensure you have met all the common prerequisites for cluster extensions listed [here](extensions.md#prerequisites). -2. Use az k8s-extension CLI version >= v0.4.0 - -### Support limitations for Azure Key Vault (AKV) secrets provider extension -- Following Kubernetes distributions are currently supported - - Cluster API Azure - - Azure Kubernetes Service on Azure Stack HCI (AKS-HCI) - - Google Kubernetes Engine - - OpenShift Kubernetes Distribution - - Canonical Kubernetes Distribution - - Elastic Kubernetes Service - - Tanzu Kubernetes Grid - - -## Features +Benefits of the Azure Key Vault Secrets Provider extension include the folllowing: - Mounts secrets/keys/certs to pod using a CSI Inline volume - Supports pod portability with the SecretProviderClass CRD - Supports Linux and Windows containers - Supports sync with Kubernetes Secrets - Supports auto rotation of secrets +- Extension components are deployed to availability zones, making them zone redundant +## Prerequisites -## Install AKV secrets provider extension on an Arc enabled Kubernetes cluster +- A cluster with a supported Kubernetes distribution that has already been [connected to Azure Arc](quickstart-connect-cluster.md). The following Kubernetes distributions are currently supported for this scenario: + - Cluster API Azure + - Azure Kubernetes Service on Azure Stack HCI (AKS-HCI) + - Google Kubernetes Engine + - OpenShift Kubernetes Distribution + - Canonical Kubernetes Distribution + - Elastic Kubernetes Service + - Tanzu Kubernetes Grid +- Ensure you have met the [general prerequisites for cluster extensions](extensions.md#prerequisites). You must use version 0.4.0 or newer of the `k8s-extension` Azure CLI extension. -The following steps assume that you already have a cluster with supported Kubernetes distribution connected to Azure Arc. +## Install the Azure Key Vault Secrets Provider extension on an Arc-enabled Kubernetes cluster -To deploy using Azure portal, go to the cluster's **Extensions** blade under **Settings**. Click on **+Add** button. +You can install the Azure Key Vault Secrets Provider extension on your connected cluster in the Azure portal, by using Azure CLI, or by deploying ARM template. -[![Extensions located under Settings for Arc enabled Kubernetes cluster](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg)](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg#lightbox) +> [!TIP] +> Only one instance of the extension can be deployed on each Azure Arc-enabled Kubernetes cluster. -From the list of available extensions, select the **Azure Key Vault Secrets Provider** to deploy the latest version of the extension. You can also choose to customize the installation through the portal by changing the defaults on **Configuration** tab. +### Azure portal -[![AKV Secrets Provider available as an extension by clicking on Add button on Extensions blade](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg)](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg#lightbox) +1. In the [Azure portal](https://ms.portal.azure.com/#home), navigate to **Kubernetes - Azure Arc** and select your cluster. +1. Select **Extensions** (under **Settings**), and then select **+ Add**. -Alternatively, you can use the CLI experience captured below. + [![Screenshot showing the Extensions page for an Arc-enabled Kubernetes cluster in the Azure portal.](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg)](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg#lightbox) -Set the environment variables: -```azurecli-interactive -export CLUSTER_NAME= -export RESOURCE_GROUP= -``` +1. From the list of available extensions, select **Azure Key Vault Secrets Provider** to deploy the latest version of the extension. -```azurecli-interactive -az k8s-extension create --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureKeyVaultSecretsProvider --name akvsecretsprovider -``` + [![Screenshot of the Azure Key Vault Secrets Provider extension in the Azure portal.](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg)](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg) + +1. Follow the prompts to deploy the extension. If needed, you can customize the installation by changing the default options on the **Configuration** tab. + +### Azure CLI + +1. Set the environment variables: + + ```azurecli-interactive + export CLUSTER_NAME= + export RESOURCE_GROUP= + ``` + +2. Install the Secrets Store CSI Driver and the Azure Key Vault Secrets Provider extension by running the following command: -The above will install the Secrets Store CSI Driver and the Azure Key Vault Provider on your cluster nodes. You should see output similar to the output shown below. It may take 3-5 minutes for the actual AKV secrets provider helm chart to get deployed to the cluster. + ```azurecli-interactive + az k8s-extension create --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureKeyVaultSecretsProvider --name akvsecretsprovider + ``` -Note that only one instance of AKV secrets provider extension can be deployed on an Arc connected Kubernetes cluster. +You should see output similar to the example below. Note that it may take several minutes before the secrets provider Helm chart is deployed to the cluster. ```json { @@ -106,88 +112,93 @@ Note that only one instance of AKV secrets provider extension can be deployed on } ``` -### Install AKV secrets provider extension using ARM template -After connecting your cluster to Azure Arc, create a json file with the following format, making sure to update the \ value: - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "ConnectedClusterName": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The Connected Cluster name." - } - }, - "ExtensionInstanceName": { - "defaultValue": "akvsecretsprovider", - "type": "String", - "metadata": { - "description": "The extension instance name." - } - }, - "ExtensionVersion": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The version of the extension type." - } - }, - "ExtensionType": { - "defaultValue": "Microsoft.AzureKeyVaultSecretsProvider", - "type": "String", - "metadata": { - "description": "The extension type." - } - }, - "ReleaseTrain": { - "defaultValue": "stable", - "type": "String", - "metadata": { - "description": "The release train." - } - } - }, - "functions": [], - "resources": [ - { - "type": "Microsoft.KubernetesConfiguration/extensions", - "apiVersion": "2021-09-01", - "name": "[parameters('ExtensionInstanceName')]", - "properties": { - "extensionType": "[parameters('ExtensionType')]", - "releaseTrain": "[parameters('ReleaseTrain')]", - "version": "[parameters('ExtensionVersion')]" - }, - "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', parameters('ConnectedClusterName'))]" - } - ] -} -``` -Now set the environment variables: -```azurecli-interactive -export TEMPLATE_FILE_NAME= -export DEPLOYMENT_NAME= -``` - -Finally, run this command to install the AKV secrets provider extension through az CLI: - -```azurecli-interactive -az deployment group create --name $DEPLOYMENT_NAME --resource-group $RESOURCE_GROUP --template-file $TEMPLATE_FILE_NAME -``` -Now, you should be able to view the AKV provider resources and use the extension in your cluster. +### ARM template + +1. Create a .json file using the following format. Be sure to update the \ value to refer to your cluster. + + ```json + { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "ConnectedClusterName": { + "defaultValue": "", + "type": "String", + "metadata": { + "description": "The Connected Cluster name." + } + }, + "ExtensionInstanceName": { + "defaultValue": "akvsecretsprovider", + "type": "String", + "metadata": { + "description": "The extension instance name." + } + }, + "ExtensionVersion": { + "defaultValue": "", + "type": "String", + "metadata": { + "description": "The version of the extension type." + } + }, + "ExtensionType": { + "defaultValue": "Microsoft.AzureKeyVaultSecretsProvider", + "type": "String", + "metadata": { + "description": "The extension type." + } + }, + "ReleaseTrain": { + "defaultValue": "stable", + "type": "String", + "metadata": { + "description": "The release train." + } + } + }, + "functions": [], + "resources": [ + { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2021-09-01", + "name": "[parameters('ExtensionInstanceName')]", + "properties": { + "extensionType": "[parameters('ExtensionType')]", + "releaseTrain": "[parameters('ReleaseTrain')]", + "version": "[parameters('ExtensionVersion')]" + }, + "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', parameters('ConnectedClusterName'))]" + } + ] + } + ``` + +1. Now set the environment variables by using the following Azure CLI command: + + ```azurecli-interactive + export TEMPLATE_FILE_NAME= + export DEPLOYMENT_NAME= + ``` + +1. Finally, run this Azure CLI command to install the Azure Key Vault Secrets Provider extension: + + ```azurecli-interactive + az deployment group create --name $DEPLOYMENT_NAME --resource-group $RESOURCE_GROUP --template-file $TEMPLATE_FILE_NAME + ``` + +You should now be able to view the secret provider resources and use the extension in your cluster. ## Validate the extension installation -Run the following command. +To confirm successful installation of the Azure Key Vault Secrets Provider extension, run the following command. ```azurecli-interactive az k8s-extension show --cluster-type connectedClusters --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --name akvsecretsprovider ``` -You should see a JSON output similar to the output below: +You should see output similar to the example below. + ```json { "aksAssignedIdentity": null, @@ -229,113 +240,120 @@ You should see a JSON output similar to the output below: } ``` -## Create or use an existing Azure Key Vault +## Create or select an Azure Key Vault + +Next, specify the Azure Key Vault to use with your connected cluster. If you don't already have one, create a new Key Vault by using the following commands. Keep in mind that the name of your Key Vault must be globally unique. + +```azurecli +az keyvault create -n $AZUREKEYVAULT_NAME -g $AKV_RESOURCE_GROUP -l $AZUREKEYVAULT_LOCATION + +Next, set the following environment variables: -Set the environment variables: ```azurecli-interactive export AKV_RESOURCE_GROUP= export AZUREKEYVAULT_NAME= export AZUREKEYVAULT_LOCATION= ``` -You will need an Azure Key Vault resource containing the secret content. Keep in mind that the Key Vault's name must be globally unique. - -```azurecli -az keyvault create -n $AZUREKEYVAULT_NAME -g $AKV_RESOURCE_GROUP -l $AZUREKEYVAULT_LOCATION -``` - -Azure Key Vault can store keys, secrets, and certificates. In this example, we'll set a plain text secret called `DemoSecret`: +Azure Key Vault can store keys, secrets, and certificates. For this example, you can set a plain text secret called `DemoSecret` by using the following command: ```azurecli az keyvault secret set --vault-name $AZUREKEYVAULT_NAME -n DemoSecret --value MyExampleSecret ``` -Take note of the following properties for use in the next section: +Before you move on to the next section, take note of the following properties: -- Name of secret object in Key Vault +- Name of the secret object in Key Vault - Object type (secret, key, or certificate) -- Name of your Azure Key Vault resource -- Azure Tenant ID the Subscription belongs to +- Name of your Key Vault resource +- The Azure Tenant ID for the subscription to which the Key Vault belongs ## Provide identity to access Azure Key Vault -The Secrets Store CSI Driver on Arc connected clusters currently allows for the following methods to access an Azure Key Vault instance: -- Service Principal - -Follow the steps below to provide identity to access Azure Key Vault +Currently, the Secrets Store CSI Driver on Arc-enabled clusters can be accessed through a service principal. Follow the steps below to provide an identity that can access your Key Vault. 1. Follow the steps [here](../../active-directory/develop/howto-create-service-principal-portal.md#register-an-application-with-azure-ad-and-create-a-service-principal) to create a service principal in Azure. Take note of the Client ID and Client Secret generated in this step. -2. Provide Azure Key Vault GET permission to the created service principal by following the steps [here](../../key-vault/general/assign-access-policy.md). -3. Use the client ID and Client Secret from step 1 to create a Kubernetes secret on the Arc connected cluster: -```bash -kubectl create secret generic secrets-store-creds --from-literal clientid="" --from-literal clientsecret="" -``` -4. Label the created secret: -```bash -kubectl label secret secrets-store-creds secrets-store.csi.k8s.io/used=true -``` -5. Create a SecretProviderClass with the following YAML, filling in your values for key vault name, tenant ID, and objects to retrieve from your AKV instance: -```yml -# This is a SecretProviderClass example using service principal to access Keyvault -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: akvprovider-demo -spec: - provider: azure - parameters: - usePodIdentity: "false" - keyvaultName: - objects: | - array: - - | - objectName: DemoSecret - objectType: secret # object types: secret, key or cert - objectVersion: "" # [OPTIONAL] object versions, default to latest if empty - tenantId: # The tenant ID of the Azure Key Vault instance -``` -6. Apply the SecretProviderClass to your cluster: - -```bash -kubectl apply -f secretproviderclass.yaml -``` -7. Create a pod with the following YAML, filling in the name of your identity: - -```yml -# This is a sample pod definition for using SecretProviderClass and service principal to access Keyvault -kind: Pod -apiVersion: v1 -metadata: - name: busybox-secrets-store-inline -spec: - containers: - - name: busybox - image: k8s.gcr.io/e2e-test-images/busybox:1.29 - command: - - "/bin/sleep" - - "10000" - volumeMounts: - - name: secrets-store-inline - mountPath: "/mnt/secrets-store" - readOnly: true - volumes: - - name: secrets-store-inline - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: "akvprovider-demo" - nodePublishSecretRef: - name: secrets-store-creds -``` -8. Apply the pod to your cluster: - -```bash -kubectl apply -f pod.yaml -``` +1. Provide Azure Key Vault GET permission to the created service principal by following the steps [here](../../key-vault/general/assign-access-policy.md). +1. Use the client ID and Client Secret from step 1 to create a Kubernetes secret on the Arc connected cluster: + + ```bash + kubectl create secret generic secrets-store-creds --from-literal clientid="" --from-literal clientsecret="" + ``` + +1. Label the created secret: + + ```bash + kubectl label secret secrets-store-creds secrets-store.csi.k8s.io/used=true + ``` + +1. Create a SecretProviderClass with the following YAML, filling in your values for key vault name, tenant ID, and objects to retrieve from your AKV instance: + + ```yml + # This is a SecretProviderClass example using service principal to access Keyvault + apiVersion: secrets-store.csi.x-k8s.io/v1 + kind: SecretProviderClass + metadata: + name: akvprovider-demo + spec: + provider: azure + parameters: + usePodIdentity: "false" + keyvaultName: + objects: | + array: + - | + objectName: DemoSecret + objectType: secret # object types: secret, key or cert + objectVersion: "" # [OPTIONAL] object versions, default to latest if empty + tenantId: # The tenant ID of the Azure Key Vault instance + ``` + +1. Apply the SecretProviderClass to your cluster: + + ```bash + kubectl apply -f secretproviderclass.yaml + ``` + +1. Create a pod with the following YAML, filling in the name of your identity: + + ```yml + # This is a sample pod definition for using SecretProviderClass and service principal to access Keyvault + kind: Pod + apiVersion: v1 + metadata: + name: busybox-secrets-store-inline + spec: + containers: + - name: busybox + image: k8s.gcr.io/e2e-test-images/busybox:1.29 + command: + - "/bin/sleep" + - "10000" + volumeMounts: + - name: secrets-store-inline + mountPath: "/mnt/secrets-store" + readOnly: true + volumes: + - name: secrets-store-inline + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "akvprovider-demo" + nodePublishSecretRef: + name: secrets-store-creds + ``` + +1. Apply the pod to your cluster: + + ```bash + kubectl apply -f pod.yaml + ``` ## Validate the secrets + After the pod starts, the mounted content at the volume path specified in your deployment YAML is available. + ```Bash ## show secrets held in secrets-store kubectl exec busybox-secrets-store-inline -- ls /mnt/secrets-store/ @@ -345,53 +363,53 @@ kubectl exec busybox-secrets-store-inline -- cat /mnt/secrets-store/DemoSecret ``` ## Additional configuration options -Following configuration settings are available for Azure Key Vault secrets provider extension: + +The following configuration settings are available for the Azure Key Vault Secrets Provider extension: | Configuration Setting | Default | Description | | --------- | ----------- | ----------- | -| enableSecretRotation | false | Boolean type; Periodically update the pod mount and Kubernetes Secret with the latest content from external secrets store | -| rotationPollInterval | 2m | Secret rotation poll interval duration if `enableSecretRotation` is `true`. This can be tuned based on how frequently the mounted contents for all pods and Kubernetes secrets need to be resynced to the latest | -| syncSecret.enabled | false | Boolean input; In some cases, you may want to create a Kubernetes Secret to mirror the mounted content. This configuration setting allows SecretProviderClass to allow secretObjects field to define the desired state of the synced Kubernetes secret objects | +| enableSecretRotation | false | Boolean type. If `true`, periodically updates the pod mount and Kubernetes Secret with the latest content from external secrets store | +| rotationPollInterval | 2m | Specifies the secret rotation poll interval duration if `enableSecretRotation` is `true`. This duration can be adjusted based on how frequently the mounted contents for all pods and Kubernetes secrets need to be resynced to the latest. | +| syncSecret.enabled | false | Boolean input. In some cases, you may want to create a Kubernetes Secret to mirror the mounted content. If `true`, `SecretProviderClass` allows the `secretObjects` field to define the desired state of the synced Kubernetes Secret objects. | -These settings can be changed either at the time of extension installation using `az k8s-extension create` command or post installation using `az k8s-extension update` command. +These settings can be specified when the extension is installed by using the `az k8s-extension create` command: -Use following command to add configuration settings while creating extension instance: ```azurecli-interactive az k8s-extension create --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureKeyVaultSecretsProvider --name akvsecretsprovider --configuration-settings secrets-store-csi-driver.enableSecretRotation=true secrets-store-csi-driver.rotationPollInterval=3m secrets-store-csi-driver.syncSecret.enabled=true ``` -Use following command to update configuration settings of existing extension instance: +You can also change the settings after installation by using the `az k8s-extension update` command: + ```azurecli-interactive az k8s-extension update --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --name akvsecretsprovider --configuration-settings secrets-store-csi-driver.enableSecretRotation=true secrets-store-csi-driver.rotationPollInterval=3m secrets-store-csi-driver.syncSecret.enabled=true ``` -## Uninstall Azure Key Vault secrets provider extension -Use the below command: +## Uninstall the Azure Key Vault Secrets Provider extension + +To uninstall the extension, run the following command: + ```azurecli-interactive az k8s-extension delete --cluster-type connectedClusters --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --name akvsecretsprovider ``` -Note that the uninstallation does not delete the CRDs that are created at the time of extension installation. -Verify that the extension instance has been deleted. +> [!NOTE] +> Uninstalling the extension doesn't delete the Custom Resource Definitions (CRDs) that were created when the extension was installed. + +To confirm that the extension instance has been deleted, run the following command: + ```azurecli-interactive az k8s-extension list --cluster-type connectedClusters --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP ``` -This output should not include AKV secrets provider. If you don't have any other extensions installed on your cluster, it will just be an empty array. - -## Reconciliation and Troubleshooting -Azure Key Vault secrets provider extension is self-healing. All extension components that are deployed on the cluster at the time of extension installation are reconciled to their original state in case somebody tries to intentionally or unintentionally change or delete them. The only exception to that is CRDs. In case the CRDs are deleted, they are not reconciled. You can bring them back by using the 'az k8s-exstension create' command again and providing the existing extension instance name. - -Some common issues and troubleshooting steps for Azure Key Vault secrets provider are captured in the open source documentation [here](https://azure.github.io/secrets-store-csi-driver-provider-azure/docs/troubleshooting/) for your reference. -Additional troubleshooting steps that are specific to the Secrets Store CSI Driver Interface can be referenced [here](https://secrets-store-csi-driver.sigs.k8s.io/troubleshooting.html). +If the extension was successfully removed, you won't see the the Azure Key Vault Secrets Provider extension listed in the output. If you don't have any other extensions installed on your cluster, you'll see an empty array. -## Frequently asked questions +## Reconciliation and troubleshooting -### Is the extension of Azure Key Vault Secrets Provider zone redundant? +The Azure Key Vault Secrets Provider extension is self-healing. If somebody tries to change or delete an extension component that was deployed when the extension was installed, that component will be reconciled to its original state. The only exceptions are for Custom Resource Definitions (CRDs). If CRDs are deleted, they won't be reconciled. To restore deleted CRDs, use the `az k8s-exstension create` command again with the existing extension instance name. -Yes, all components of Azure Key Vault Secrets Provider are deployed on availability zones and are hence zone redundant. +For more information about resolving common issues, see the open source troubleshooting guides for [Azure Key Vault provider for Secrets Store CSI driver](https://azure.github.io/secrets-store-csi-driver-provider-azure/docs/troubleshooting/) and [Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/troubleshooting.html). ## Next steps -> **Just want to try things out?** -> Get started quickly with an [Azure Arc Jumpstart scenario](https://aka.ms/arc-jumpstart-akv-secrets-provider) using Cluster API. +- Want to try things out? Get started quickly with an [Azure Arc Jumpstart scenario](https://aka.ms/arc-jumpstart-akv-secrets-provider) using Cluster API. +- Learn more about [Azure Key Vault](/azure/key-vault/general/overview). diff --git a/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md b/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md index 6a4f826cbd778..da55c04868fb4 100644 --- a/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md +++ b/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md @@ -2,7 +2,7 @@ title: Azure Arc-enabled Open Service Mesh description: Open Service Mesh (OSM) extension on Azure Arc-enabled Kubernetes cluster ms.service: azure-arc -ms.date: 05/02/2022 +ms.date: 05/25/2022 ms.topic: article author: mayurigupta13 ms.author: mayg @@ -26,7 +26,7 @@ Azure Arc-enabled Open Service Mesh can be deployed through Azure portal, Azure ### Current support limitations - Only one instance of Open Service Mesh can be deployed on an Azure Arc-connected Kubernetes cluster. -- Support is available for Azure Arc-enabled Open Service Mesh version v1.0.0-1 and above. Find the latest version [here](https://github.com/Azure/osm-azure/releases). Supported release versions are appended with notes. Ignore the tags associated with intermediate releases. +- Support is available for the two most recently released minor versions of Arc-enabled Open Service Mesh. Find the latest version [here](https://github.com/Azure/osm-azure/releases). Supported release versions are appended with notes. Ignore the tags associated with intermediate releases. - The following Kubernetes distributions are currently supported: - AKS Engine - AKS on HCI diff --git a/articles/azure-arc/kubernetes/tutorial-gitops-flux2-ci-cd.md b/articles/azure-arc/kubernetes/tutorial-gitops-flux2-ci-cd.md index 65d6078069c51..6935952e5a8a3 100644 --- a/articles/azure-arc/kubernetes/tutorial-gitops-flux2-ci-cd.md +++ b/articles/azure-arc/kubernetes/tutorial-gitops-flux2-ci-cd.md @@ -93,10 +93,12 @@ Import an [application repository](./conceptual-gitops-ci-cd.md#application-repo * **arc-cicd-demo-src** application repository * URL: https://github.com/Azure/arc-cicd-demo-src * Contains the example Azure Vote App that you will deploy using GitOps. + * Import the repository with name `arc-cicd-demo-src` * **arc-cicd-demo-gitops** GitOps repository * URL: https://github.com/Azure/arc-cicd-demo-gitops * Works as a base for your cluster resources that house the Azure Vote App. + * Import the repository with name `arc-cicd-demo-gitops` Learn more about [importing Git repositories](/azure/devops/repos/git/import-git-repository). @@ -122,9 +124,9 @@ The CI/CD workflow will populate the manifest directory with extra manifests to az k8s-configuration flux create \ --name cluster-config \ --cluster-name arc-cicd-cluster \ - --namespace cluster-config \ + --namespace flux-system \ --resource-group myResourceGroup \ - -u https://dev.azure.com///arc-cicd-demo-gitops \ + -u https://dev.azure.com///_git/arc-cicd-demo-gitops \ --https-user \ --https-key \ --scope cluster \ @@ -135,6 +137,8 @@ The CI/CD workflow will populate the manifest directory with extra manifests to 1. Check the state of the deployment in Azure portal. * If successful, you'll see both `dev` and `stage` namespaces created in your cluster. + * You can also check on Azure Portal page of your K8s cluster on `GitOps` tab a configuration `cluster-config` is created. + ### Import the CI/CD pipelines @@ -144,9 +148,9 @@ The application repository contains a `.pipeline` folder with the pipelines you' | Pipeline file name | Description | | ------------- | ------------- | -| [`.pipelines/az-vote-pr-pipeline.yaml`](https://github.com/Azure/arc-cicd-demo-src/blob/FluxV2/.pipelines/az-vote-pr-pipeline.yaml) | The application PR pipeline, named **arc-cicd-demo-src PR** | -| [`.pipelines/az-vote-ci-pipeline.yaml`](https://github.com/Azure/arc-cicd-demo-src/blob/FluxV2/.pipelines/az-vote-ci-pipeline.yaml) | The application CI pipeline, named **arc-cicd-demo-src CI** | -| [`.pipelines/az-vote-cd-pipeline.yaml`](https://github.com/Azure/arc-cicd-demo-src/blob/FluxV2/.pipelines/az-vote-cd-pipeline.yaml) | The application CD pipeline, named **arc-cicd-demo-src CD** | +| [`.pipelines/az-vote-pr-pipeline.yaml`](https://github.com/Azure/arc-cicd-demo-src/blob/master/.pipelines/az-vote-pr-pipeline.yaml) | The application PR pipeline, named **arc-cicd-demo-src PR** | +| [`.pipelines/az-vote-ci-pipeline.yaml`](https://github.com/Azure/arc-cicd-demo-src/blob/master/.pipelines/az-vote-ci-pipeline.yaml) | The application CI pipeline, named **arc-cicd-demo-src CI** | +| [`.pipelines/az-vote-cd-pipeline.yaml`](https://github.com/Azure/arc-cicd-demo-src/blob/master/.pipelines/az-vote-cd-pipeline.yaml) | The application CD pipeline, named **arc-cicd-demo-src CD** | ### Connect Azure Container Registry to Azure DevOps During the CI process, you'll deploy your application containers to a registry. Start by creating an Azure service connection: @@ -192,6 +196,9 @@ CD pipeline manipulates PRs in the GitOps repository. It needs a Service Connect --set gitOpsAppURL=https://dev.azure.com///_git/arc-cicd-demo-gitops \ --set orchestratorPAT= ``` +> [!NOTE] +> `Azure Repos PAT token` should have `Build: Read & executee` and `Code: Read` permissions. + 3. Configure Flux to send notifications to GitOps connector: ```console cat < + name: cluster-config - kind: Kustomization - name: + name: cluster-config-cluster-config providerRef: name: gitops-connector --- @@ -259,17 +266,20 @@ For the details on installation, refer to the [GitOps Connector](https://github. You're now ready to deploy to the `dev` and `stage` environments. +#### Create environments + +In Azure DevOps project create `Dev` and `Stage` environments. See [Create and target an environment](/azure/devops/pipelines/process/environments) for more details. + ### Give more permissions to the build service The CD pipeline uses the security token of the running build to authenticate to the GitOps repository. More permissions are needed for the pipeline to create a new branch, push changes, and create pull requests. 1. Go to `Project settings` from the Azure DevOps project main page. 1. Select `Repos/Repositories`. -1. Select ``. 1. Select `Security`. -1. For the ` Build Service ()`, allow `Contribute`, `Contribute to pull requests`, and `Create branch`. +1. For the ` Build Service ()` and for the `Project Collection Build Service ()` (type in the search field, if it doesn't show up), allow `Contribute`, `Contribute to pull requests`, and `Create branch`. 1. Go to `Pipelines/Settings` -1. Switch off `Limit job authorization scope to referenced Azure DevOps repositories` +1. Switch off `Protect access to repositories in YAML pipelines` option For more information, see: - [Grant VC Permissions to the Build Service](/azure/devops/pipelines/scripts/git-commands?preserve-view=true&tabs=yaml&view=azure-devops#version-control ) @@ -390,7 +400,7 @@ A successful CI pipeline run triggers the CD pipeline to complete the deployment * View the Azure Vote app in your browser at `http://localhost:8080/` and verify the voting choices have changed to Tabs vs Spaces. 1. Repeat steps 1-7 for the `stage` environment. -Your deployment is now complete. This ends the CI/CD workflow. Refer to the [Azure DevOps GitOps Flow diagram](https://github.com/Azure/arc-cicd-demo-src/blob/FluxV2/docs/azdo-gitops.md) in the application repository that explains in details the steps and techniques implemented in the CI/CD pipelines used in this tutorial. +Your deployment is now complete. This ends the CI/CD workflow. Refer to the [Azure DevOps GitOps Flow diagram](https://github.com/Azure/arc-cicd-demo-src/blob/master/docs/azdo-gitops.md) in the application repository that explains in details the steps and techniques implemented in the CI/CD pipelines used in this tutorial. ## Implement CI/CD with GitHub @@ -459,6 +469,7 @@ The CI/CD workflow will populate the manifest directory with extra manifests to --set gitOpsAppURL=https://github.com//arc-cicd-demo-gitops/commit \ --set orchestratorPAT= ``` + 3. Configure Flux to send notifications to GitOps connector: ```console cat < + name: cluster-config - kind: Kustomization - name: + name: cluster-config-cluster-config providerRef: name: gitops-connector --- @@ -566,7 +577,7 @@ The CD Stage workflow: Once the manifests PR to the Stage environment is merged and Flux successfully applied all the changes, it updates Git commit status in the GitOps repository. -Your deployment is now complete. This ends the CI/CD workflow. Refer to the [GitHub GitOps Flow diagram](https://github.com/Azure/arc-cicd-demo-src/blob/FluxV2/docs/azdo-gitops-githubfluxv2.md) in the application repository that explains in details the steps and techniques implemented in the CI/CD workflows used in this tutorial. +Your deployment is now complete. This ends the CI/CD workflow. Refer to the [GitHub GitOps Flow diagram](https://github.com/Azure/arc-cicd-demo-src/blob/master/docs/azdo-gitops-githubfluxv2.md) in the application repository that explains in details the steps and techniques implemented in the CI/CD workflows used in this tutorial. ## Clean up resources diff --git a/articles/azure-arc/kubernetes/tutorial-use-gitops-flux2.md b/articles/azure-arc/kubernetes/tutorial-use-gitops-flux2.md index 815ae35374d78..423acebe6c7aa 100644 --- a/articles/azure-arc/kubernetes/tutorial-use-gitops-flux2.md +++ b/articles/azure-arc/kubernetes/tutorial-use-gitops-flux2.md @@ -1,10 +1,10 @@ --- title: "Tutorial: Use GitOps with Flux v2 in Azure Arc-enabled Kubernetes or Azure Kubernetes Service (AKS) clusters" description: "This tutorial shows how to use GitOps with Flux v2 to manage configuration and application deployment in Azure Arc and AKS clusters." -keywords: "GitOps, Flux, Kubernetes, K8s, Azure, Arc, AKS, Azure Kubernetes Service, containers, devops" +keywords: "GitOps, Flux, Flux v2, Kubernetes, K8s, Azure, Arc, AKS, Azure Kubernetes Service, containers, devops" services: azure-arc, aks ms.service: azure-arc -ms.date: 05/24/2022 +ms.date: 06/08/2022 ms.topic: tutorial ms.custom: template-tutorial, devx-track-azurecli --- @@ -37,10 +37,11 @@ To manage GitOps through the Azure CLI or the Azure portal, you need the followi ### For Azure Kubernetes Service clusters -* An AKS cluster that's up and running. +* An MSI-based AKS cluster that's up and running. >[!IMPORTANT] - >Ensure that the AKS cluster is created with MSI (not SPN), because the `microsoft.flux` extension won't work with SPN-based AKS clusters. + >**Ensure that the AKS cluster is created with MSI** (not SPN), because the `microsoft.flux` extension won't work with SPN-based AKS clusters. + >For new AKS clusters created with “az aks create”, the cluster will be MSI-based by default. For already created SPN-based clusters that need to be converted to MSI run “az aks update -g $RESOURCE_GROUP -n $CLUSTER_NAME --enable-managed-identity”. For more information, refer to [managed identity docs](../../aks/use-managed-identity.md). * Read and write permissions on the `Microsoft.ContainerService/managedClusters` resource type. * Registration of your subscription with the `AKS-ExtensionManager` feature flag. Use the following command: @@ -616,6 +617,21 @@ Here's an example for including the [Flux image-reflector and image-automation c az k8s-extension create -g -c -t --name flux --extension-type microsoft.flux --config image-automation-controller.enabled=true image-reflector-controller.enabled=true ``` +### Red Hat OpenShift onboarding guidance +Flux controllers require a **nonroot** [Security Context Constraint](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.2/html/authentication/managing-pod-security-policies) to properly provision pods on the cluster. These constraints must be added to the cluster prior to onboarding of the `microsoft.flux` extension. + +```console +NS="flux-system" +oc adm policy add-scc-to-user nonroot system:serviceaccount:$NS:kustomize-controller +oc adm policy add-scc-to-user nonroot system:serviceaccount:$NS:helm-controller +oc adm policy add-scc-to-user nonroot system:serviceaccount:$NS:source-controller +oc adm policy add-scc-to-user nonroot system:serviceaccount:$NS:notification-controller +oc adm policy add-scc-to-user nonroot system:serviceaccount:$NS:image-automation-controller +oc adm policy add-scc-to-user nonroot system:serviceaccount:$NS:image-reflector-controller +``` + +For more information on OpenShift guidance for onboarding Flux, refer to the [Flux documentation](https://fluxcd.io/docs/use-cases/openshift/#openshift-setup). + ## Work with parameters For a description of all parameters that Flux supports, see the [official Flux documentation](https://fluxcd.io/docs/). Flux in Azure doesn't support all parameters yet. Let us know if a parameter you need is missing from the Azure implementation. diff --git a/articles/azure-arc/overview.md b/articles/azure-arc/overview.md index 7323b8299c692..31e2140867a97 100644 --- a/articles/azure-arc/overview.md +++ b/articles/azure-arc/overview.md @@ -85,4 +85,5 @@ For information, see the [Azure pricing page](https://azure.microsoft.com/pricin * Learn about [Azure Arc-enabled data services](https://azure.microsoft.com/services/azure-arc/hybrid-data-services/). * Learn about [SQL Server on Azure Arc-enabled servers](/sql/sql-server/azure-arc/overview). * Learn about [Azure Arc-enabled VMware vSphere](vmware-vsphere/overview.md) and [Azure Arc-enabled Azure Stack HCI](/azure-stack/hci/manage/azure-arc-enabled-virtual-machines) -* Experience Azure Arc-enabled services by exploring the [Jumpstart proof of concept](https://azurearcjumpstart.io/azure_arc_jumpstart/). \ No newline at end of file +* Learn about [Azure Arc-enabled System Center Virtual Machine Manager](system-center-virtual-machine-manager/overview.md) +* Experience Azure Arc-enabled services by exploring the [Jumpstart proof of concept](https://azurearcjumpstart.io/azure_arc_jumpstart/). diff --git a/articles/azure-arc/servers/agent-overview.md b/articles/azure-arc/servers/agent-overview.md index 5f7124c0d3f56..a77ded54e8d62 100644 --- a/articles/azure-arc/servers/agent-overview.md +++ b/articles/azure-arc/servers/agent-overview.md @@ -1,7 +1,7 @@ --- title: Overview of the Azure Connected Machine agent description: This article provides a detailed overview of the Azure Arc-enabled servers agent available, which supports monitoring virtual machines hosted in hybrid environments. -ms.date: 03/14/2022 +ms.date: 06/06/2022 ms.topic: conceptual ms.custom: devx-track-azurepowershell --- @@ -51,7 +51,19 @@ Metadata information about a connected machine is collected after the Connected * Hardware manufacturer * Hardware model * Cloud provider -* Amazon Web Services (AWS) account ID, instance ID and region (if running in AWS) +* Amazon Web Services (AWS) metadata, when running in AWS: + * Account ID + * Instance ID + * Region +* Google Cloud Platform (GCP) metadata, when running in GCP: + * Instance ID + * Image + * Machine type + * OS + * Project ID + * Project number + * Service accounts + * Zone The following metadata information is requested by the agent from Azure: diff --git a/articles/azure-arc/servers/agent-release-notes-archive.md b/articles/azure-arc/servers/agent-release-notes-archive.md index daa546093f945..3f6e4481ca816 100644 --- a/articles/azure-arc/servers/agent-release-notes-archive.md +++ b/articles/azure-arc/servers/agent-release-notes-archive.md @@ -2,7 +2,7 @@ title: Archive for What's new with Azure Arc-enabled servers agent description: The What's new release notes in the Overview section for Azure Arc-enabled servers agent contains six months of activity. Thereafter, the items are removed from the main article and put into this article. ms.topic: overview -ms.date: 05/24/2022 +ms.date: 06/06/2022 ms.custom: references_regions --- @@ -16,6 +16,12 @@ The Azure Connected Machine agent receives improvements on an ongoing basis. Thi - Known issues - Bug fixes +## Version 1.14 - January 2022 + +### Fixed + +- A state corruption issue in the extension manager that could cause extension operations to get stuck in transient states has been fixed. Customers running agent version 1.13 are encouraged to upgrade to version 1.14 as soon as possible. If you continue to have issues with extensions after upgrading the agent, [submit a support ticket](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). + ## Version 1.13 - November 2021 ### Known issues diff --git a/articles/azure-arc/servers/agent-release-notes.md b/articles/azure-arc/servers/agent-release-notes.md index 1183bcee3fa9c..8060a57807851 100644 --- a/articles/azure-arc/servers/agent-release-notes.md +++ b/articles/azure-arc/servers/agent-release-notes.md @@ -2,7 +2,7 @@ title: What's new with Azure Arc-enabled servers agent description: This article has release notes for Azure Arc-enabled servers agent. For many of the summarized issues, there are links to more details. ms.topic: overview -ms.date: 05/24/2022 +ms.date: 06/06/2022 ms.custom: references_regions --- @@ -16,6 +16,17 @@ The Azure Connected Machine agent receives improvements on an ongoing basis. To This page is updated monthly, so revisit it regularly. If you're looking for items older than six months, you can find them in [archive for What's new with Azure Arc-enabled servers agent](agent-release-notes-archive.md). +## Version 1.19 - June 2022 + +### New features + +- When installed on a Google Compute Engine virtual machine, the agent will now detect and report Google Cloud metadata in the "detected properties" of the Azure Arc-enabled servers resource. [Learn more](agent-overview.md#instance-metadata) about the new metadata. + +### Fixed + +- An issue that could cause the extension manager to hang during extension installation, update, and removal operations has been resolved. +- Improved support for TLS 1.3 + ## Version 1.18 - May 2022 ### New features @@ -42,7 +53,7 @@ This page is updated monthly, so revisit it regularly. If you're looking for ite - If you attempt to run `azcmagent connect` on a server that is already connected to Azure, the resource ID is now printed to the console to help you locate the resource in Azure. - The `azcmagent connect` timeout has been extended to 10 minutes. -- `azcmagent show` no longer prints the private link scope ID. You can check if the server is associated with an Azure Arc private link scope by reviewing the machine details in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/servers), [CLI](/cli/azure/connectedmachine?view=azure-cli-latest#az-connectedmachine-show), [PowerShell](/powershell/module/az.connectedmachine/get-azconnectedmachine), or [REST API](/rest/api/hybridcompute/machines/get). +- `azcmagent show` no longer prints the private link scope ID. You can check if the server is associated with an Azure Arc private link scope by reviewing the machine details in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/servers), [CLI](/cli/azure/connectedmachine?view=azure-cli-latest#az-connectedmachine-show&preserve-view=true), [PowerShell](/powershell/module/az.connectedmachine/get-azconnectedmachine), or [REST API](/rest/api/hybridcompute/machines/get). - `azcmagent logs` collects only the 2 most recent logs for each service to reduce ZIP file size. - `azcmagent logs` collects Guest Configuration logs again. @@ -83,12 +94,6 @@ This page is updated monthly, so revisit it regularly. If you're looking for ite - Extended the device login timeout to 5 minutes - Removed resource constraints for Azure Monitor Agent to support high throughput scenarios -## Version 1.14 - January 2022 - -### Fixed - -- A state corruption issue in the extension manager that could cause extension operations to get stuck in transient states has been fixed. Customers running agent version 1.13 are encouraged to upgrade to version 1.14 as soon as possible. If you continue to have issues with extensions after upgrading the agent, [submit a support ticket](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). - ## Next steps - Before evaluating or enabling Azure Arc-enabled servers across multiple hybrid machines, review [Connected Machine agent overview](agent-overview.md) to understand requirements, technical details about the agent, and deployment methods. diff --git a/articles/azure-arc/servers/breadcrumb/toc.yml b/articles/azure-arc/servers/breadcrumb/toc.yml index be9eac111ef3d..ea45194ad7197 100644 --- a/articles/azure-arc/servers/breadcrumb/toc.yml +++ b/articles/azure-arc/servers/breadcrumb/toc.yml @@ -9,3 +9,6 @@ - name: Arc-enabled servers tocHref: /azure/defender-for-cloud topicHref: /azure/azure-arc/servers + - name: Azure Arc + tocHref: /windows-server/manage/windows-admin-center/azure + topicHref: /azure/azure-arc/servers diff --git a/articles/azure-arc/servers/index.yml b/articles/azure-arc/servers/index.yml index 520c7201a9414..20a2eb8ab3197 100644 --- a/articles/azure-arc/servers/index.yml +++ b/articles/azure-arc/servers/index.yml @@ -100,4 +100,4 @@ landingContent: - text: Microsoft Sentinel url: scenario-onboard-azure-sentinel.md - text: Microsoft Defender for Cloud - url: /azure/defender-for-cloud/quickstart-onboard-machines?toc=%2Fazure%2Fazure-arc%2Fservers%2Ftoc.json&bc=%2Fazure%2Fazure-arc%2Fservers%2Fbreadcrumb%2Ftoc.json&pivots=azure-arc + url: ../../defender-for-cloud/quickstart-onboard-machines.md?bc=%2fazure%2fazure-arc%2fservers%2fbreadcrumb%2ftoc.json&pivots=azure-arc&toc=%2fazure%2fazure-arc%2fservers%2ftoc.json \ No newline at end of file diff --git a/articles/azure-arc/servers/learn/media/quick-enable-hybrid-vm/add-single-server-expanded.png b/articles/azure-arc/servers/learn/media/quick-enable-hybrid-vm/add-single-server-expanded.png new file mode 100644 index 0000000000000..3ba3aa0c0f6d6 Binary files /dev/null and b/articles/azure-arc/servers/learn/media/quick-enable-hybrid-vm/add-single-server-expanded.png differ diff --git a/articles/azure-arc/servers/learn/media/quick-enable-hybrid-vm/add-single-server.png b/articles/azure-arc/servers/learn/media/quick-enable-hybrid-vm/add-single-server.png new file mode 100644 index 0000000000000..2181604fc6046 Binary files /dev/null and b/articles/azure-arc/servers/learn/media/quick-enable-hybrid-vm/add-single-server.png differ diff --git a/articles/azure-arc/servers/learn/quick-enable-hybrid-vm.md b/articles/azure-arc/servers/learn/quick-enable-hybrid-vm.md index 0a9499ef29c33..8ad34e96caebe 100644 --- a/articles/azure-arc/servers/learn/quick-enable-hybrid-vm.md +++ b/articles/azure-arc/servers/learn/quick-enable-hybrid-vm.md @@ -2,7 +2,7 @@ title: Quickstart - Connect hybrid machine with Azure Arc-enabled servers description: In this quickstart, you connect and register a hybrid machine with Azure Arc-enabled servers. ms.topic: quickstart -ms.date: 03/23/2022 +ms.date: 06/06/2022 ms.custom: mode-other --- @@ -26,15 +26,19 @@ In this quickstart, you'll deploy and configure the Azure Connected Machine agen ## Generate installation script -Use the Azure portal to create a script that automates the agent download and installation, and establishes the connection with Azure Arc. +Use the Azure portal to create a script that automates the agent download and installation and establishes the connection with Azure Arc. -1. Launch the Azure Arc service in the Azure portal by searching for and selecting **Servers - Azure Arc**. + -1. On the next page, from the **Add a single server** tile, select **Generate script**. +1. [Go to the Azure portal page for adding servers with Azure Arc](https://portal.azure.com/#view/Microsoft_Azure_HybridCompute/HybridVmAddBlade). Select the **Add a single server** tile, then select **Generate script**. + + :::image type="content" source="media/quick-enable-hybrid-vm/add-single-server.png" alt-text="Screenshot of Azure portal's add server page." lightbox="media/quick-enable-hybrid-vm/add-single-server-expanded.png"::: + > [!NOTE] + > In the portal, you can also reach the page for adding servers by searching for and selecting "Servers - Azure Arc" and then selecting **+Add**. 1. Review the information on the **Prerequisites** page, then select **Next**. diff --git a/articles/azure-arc/servers/manage-automatic-vm-extension-upgrade.md b/articles/azure-arc/servers/manage-automatic-vm-extension-upgrade.md index 7e09cda832502..3a5085476f5d1 100644 --- a/articles/azure-arc/servers/manage-automatic-vm-extension-upgrade.md +++ b/articles/azure-arc/servers/manage-automatic-vm-extension-upgrade.md @@ -1,28 +1,26 @@ --- -title: Automatic Extension Upgrade (preview) for Azure Arc-enabled servers -description: Learn how to enable the Automatic Extension Upgrade (preview) for your Azure Arc-enabled servers. +title: Automatic extension upgrade (preview) for Azure Arc-enabled servers +description: Learn how to enable the automatic extension upgrades for your Azure Arc-enabled servers. ms.topic: conceptual -ms.date: 12/09/2021 +ms.date: 06/02/2021 --- -# Automatic Extension Upgrade (preview) for Azure Arc-enabled servers +# Automatic extension upgrade (preview) for Azure Arc-enabled servers -Automatic Extension Upgrade (preview) is available for Azure Arc-enabled servers that have supported VM extensions installed. When Automatic Extension Upgrade (preview) is enabled on a machine, the extension is upgraded automatically whenever the extension publisher releases a new version for that extension. +Automatic extension upgrade (preview) is available for Azure Arc-enabled servers that have supported VM extensions installed. When automatic extension upgrade is enabled on a machine, the extension is upgraded automatically whenever the extension publisher releases a new version for that extension. - Automatic Extension Upgrade has the following features: + Automatic extension upgrade has the following features: - You can opt in and out of automatic upgrades at any time. - Each supported extension is enrolled individually, and you can choose which extensions to upgrade automatically. - Supported in all public cloud regions. > [!NOTE] -> In this release, it is only possible to configure Automatic Extension Upgrade with the Azure CLI and Azure PowerShell module. +> In this release, it is only possible to configure automatic extension upgrade with the Azure CLI and Azure PowerShell module. -## How does Automatic Extension Upgrade work? +## How does automatic extension upgrade work? -The extension upgrade process replaces the existing Azure VM extension version supported by Azure Arc-enabled servers with a new version of the same extension when published by the extension publisher. - -A failed extension update is automatically retried. A retry is attempted every few days automatically without user intervention. +The extension upgrade process replaces the existing Azure VM extension version supported by Azure Arc-enabled servers with a new version of the same extension when published by the extension publisher. This feature is enabled by default for all extensions you deploy the Azure Arc-enabled servers unless you explicitly opt-out of automatic upgrades. ### Availability-first updates @@ -32,16 +30,26 @@ For a group of Arc-enabled servers undergoing an update, the Azure platform will **Across regions:** -- Geo-paired regions are not applicable. +- Geo-paired regions aren't applicable. **Within a region:** -- Availability Zones are not applicable. -- Machines are batched on a best effort basis to avoid concurrent updates for all machines registered with Arc-enabled servers in a subscription. +- Availability Zones aren't applicable. +- Machines are batched on a best effort basis to avoid concurrent updates for all machines registered with Arc-enabled servers in a subscription. + +### Automatic rollback and retries + +If an extension upgrade fails, Azure will try to repair the extension by performing the following actions: + +1. The Azure Connected Machine agent will automatically reinstall the last known good version of the extension to attempt to restore functionality. +1. If the rollback is successful, the extension status will show as **Succeeded** and the extension will be added to the automatic upgrade queue again. The next upgrade attempt can be as soon as the next hour and will continue until the upgrade is successful. +1. If the rollback fails, the extension status will show as **Failed** and the extension will no longer function as intended. You'll need to [remove](manage-vm-extensions-cli.md#remove-extensions) and [reinstall](manage-vm-extensions-cli.md#enable-extension) the extension to restore functionality. + +If you continue to have trouble upgrading an extension, you can [disable automatic extension upgrade](#disable-automatic-extension-upgrade) to prevent the system from trying again while you troubleshoot the issue. You can [enable automatic extension upgrade](#enable-automatic-extension-upgrade) again when you're ready. ## Supported extensions -Automatic Extension Upgrade (preview) supports the following extensions (and more are added periodically): +Automatic extension upgrade supports the following extensions (and more are added periodically): - Azure Monitor Agent - Linux and Windows - Azure Security agent - Linux and Windows @@ -49,11 +57,11 @@ Automatic Extension Upgrade (preview) supports the following extensions (and mor - Key Vault Extension - Linux only - Log Analytics agent (OMS agent) - Linux only -## Enabling Automatic Extension Upgrade (preview) +## Enable automatic extension upgrade -To enable Automatic Extension Upgrade (preview) for an extension, you must ensure the property `enable-auto-upgrade` is set to `true` and added to every extension definition individually. +Automatic extension upgrade is enabled by default when you install extensions on Azure Arc-enabled servers. To enable automatic extension upgrade for an existing extension, you can use Azure CLI or Azure PowerShell to set the `enableAutomaticUpgrade` property on the extension to `true`. You'll need to repeat this process for every extension where you'd like to enable automatic upgrades. -Use the [az connectedmachine extension update](/cli/azure/connectedmachine/extension) command with the `--name`, `--machine-name`, `--enable-auto-upgrade`, and `--resource-group` parameters. +Use the [az connectedmachine extension update](/cli/azure/connectedmachine/extension) command to enable automatic upgrade on an extension: ```azurecli az connectedmachine extension update \ @@ -63,38 +71,35 @@ az connectedmachine extension update \ --enable-auto-upgrade true ``` -To check the status of Automatic Extension Upgrade (preview) for all extensions on an Arc-enabled server, run the following command: +To check the status of automatic extension upgrade for all extensions on an Arc-enabled server, run the following command: ```azurecli az connectedmachine extension list --resource-group resourceGroupName --machine-name machineName --query "[].{Name:name, AutoUpgrade:properties.enableAutoUpgrade}" --output table ``` -To enable Automatic Extension Upgrade (preview) for an extension using Azure PowerShell, use the [Update-AzConnectedMachineExtension](/powershell/module/az.connectedmachine/update-azconnectedmachineextension) cmdlet with the `-Name`, `-MachineName`, `-ResourceGroup`, and `-EnableAutomaticUpgrade` parameters. +To enable automatic extension upgrade for an extension using Azure PowerShell, use the [Update-AzConnectedMachineExtension](/powershell/module/az.connectedmachine/update-azconnectedmachineextension) cmdlet: ```azurepowershell Update-AzConnectedMachineExtension -ResourceGroup resourceGroupName -MachineName machineName -Name DependencyAgentLinux -EnableAutomaticUpgrade ``` -To check the status of Automatic Extension Upgrade (preview) for all extensions on an Arc-enabled server, run the following command: +To check the status of automatic extension upgrade for all extensions on an Arc-enabled server, run the following command: ```azurepowershell Get-AzConnectedMachineExtension -ResourceGroup resourceGroupName -MachineName machineName | Format-Table Name, EnableAutomaticUpgrade ``` - ## Extension upgrades with multiple extensions A machine managed by Arc-enabled servers can have multiple extensions with automatic extension upgrade enabled. The same machine can also have other extensions without automatic extension upgrade enabled. -If multiple extension upgrades are available for a machine, the upgrades may be batched together, but each extension upgrade is applied individually on a machine. A failure on one extension does not impact the other extension(s) to be upgraded. For example, if two extensions are scheduled for an upgrade, and the first extension upgrade fails, the second extension will still be upgraded. +If multiple extension upgrades are available for a machine, the upgrades may be batched together, but each extension upgrade is applied individually on a machine. A failure on one extension doesn't impact the other extension(s) to be upgraded. For example, if two extensions are scheduled for an upgrade, and the first extension upgrade fails, the second extension will still be upgraded. -## Disable Automatic Extension Upgrade +## Disable automatic extension upgrade -To disable Automatic Extension Upgrade (preview) for an extension, you must ensure the property `enable-auto-upgrade` is set to `false` and added to every extension definition individually. +To disable automatic extension upgrade for an extension, set the `enable-auto-upgrade` property to `false`. -### Using the Azure CLI - -Use the [az connectedmachine extension update](/cli/azure/connectedmachine/extension) command with the `--name`, `--machine-name`, `--enable-auto-upgrade`, and `--resource-group` parameters. +With Azure CLI, use the [az connectedmachine extension update](/cli/azure/connectedmachine/extension) command to disable automatic upgrade on an extension: ```azurecli az connectedmachine extension update \ @@ -104,14 +109,20 @@ az connectedmachine extension update \ --enable-auto-upgrade false ``` -### Using Azure PowerShell - -Use the [Update-AzConnectedMachineExtension](/powershell/module/az.connectedmachine/update-azconnectedmachineextension) cmdlet with the `-Name`, `-MachineName`, `-ResourceGroup`, and `-EnableAutomaticUpgrade` parameters. +With Azure PowerShell, use the [Update-AzConnectedMachineExtension](/powershell/module/az.connectedmachine/update-azconnectedmachineextension) cmdlet: ```azurepowershell Update-AzConnectedMachineExtension -ResourceGroup resourceGroupName -MachineName machineName -Name DependencyAgentLinux -EnableAutomaticUpgrade:$false ``` +## Check automatic extension upgrade history + +You can use the Azure Activity Log to identify extensions that were automatically upgraded. You can find the Activity Log tab on individual Azure Arc-enabled server resources, resource groups, and subscriptions. Extension upgrades are identified by the `Upgrade Extensions on Azure Arc machines (Microsoft.HybridCompute/machines/upgradeExtensions/action)` operation. + +To view automatic extension upgrade history, search for the **Azure Activity Log** in the Azure Portal. Select **Add filter** and choose the Operation filter. For the filter criteria, search for "Upgrade Extensions on Azure Arc machines" and select that option. You can optionally add a second filter for **Event initiated by** and set "Azure Regional Service Manager" as the filter criteria to only see automatic upgrade attempts and exclude upgrades manually initiated by users. + +:::image type="content" source="media/manage-automatic-vm-extension-upgrade/azure-activity-log-extension-upgrade.png" alt-text="Azure Activity Log showing attempts to automatically upgrade extensions on Azure Arc-enabled servers." border="true"::: + ## Next steps - You can deploy, manage, and remove VM extensions using the [Azure CLI](manage-vm-extensions-cli.md), [PowerShell](manage-vm-extensions-powershell.md), or [Azure Resource Manager templates](manage-vm-extensions-template.md). diff --git a/articles/azure-arc/servers/manage-vm-extensions-template.md b/articles/azure-arc/servers/manage-vm-extensions-template.md index d95d4b05c7584..1399a66b9df22 100644 --- a/articles/azure-arc/servers/manage-vm-extensions-template.md +++ b/articles/azure-arc/servers/manage-vm-extensions-template.md @@ -1,7 +1,7 @@ --- title: Enable VM extension using Azure Resource Manager template description: This article describes how to deploy virtual machine extensions to Azure Arc-enabled servers running in hybrid cloud environments using an Azure Resource Manager template. -ms.date: 07/16/2021 +ms.date: 06/02/2022 ms.topic: conceptual ms.custom: devx-track-azurepowershell --- @@ -47,10 +47,11 @@ To easily deploy the Log Analytics agent, the following sample is provided to in "name": "[concat(parameters('vmName'),'/OMSAgentForLinux')]", "type": "Microsoft.HybridCompute/machines/extensions", "location": "[parameters('location')]", - "apiVersion": "2019-08-02-preview", + "apiVersion": "2022-03-10", "properties": { "publisher": "Microsoft.EnterpriseCloud.Monitoring", "type": "OmsAgentForLinux", + "enableAutomaticUpgrade": true, "settings": { "workspaceId": "[parameters('workspaceId')]" }, @@ -88,11 +89,12 @@ To easily deploy the Log Analytics agent, the following sample is provided to in "name": "[concat(parameters('vmName'),'/MicrosoftMonitoringAgent')]", "type": "Microsoft.HybridCompute/machines/extensions", "location": "[parameters('location')]", - "apiVersion": "2019-08-02-preview", + "apiVersion": "2022-03-10", "properties": { "publisher": "Microsoft.EnterpriseCloud.Monitoring", "type": "MicrosoftMonitoringAgent", "autoUpgradeMinorVersion": true, + "enableAutomaticUpgrade": true, "settings": { "workspaceId": "[parameters('workspaceId')]" }, @@ -171,7 +173,7 @@ The Custom Script extension configuration specifies things like script location "name": "[concat(parameters('vmName'),'/CustomScript')]", "type": "Microsoft.HybridCompute/machines/extensions", "location": "[parameters('location')]", - "apiVersion": "2019-08-02-preview", + "apiVersion": "2022-03-10", "properties": { "publisher": "Microsoft.Azure.Extensions", "type": "CustomScript", @@ -219,7 +221,7 @@ The Custom Script extension configuration specifies things like script location "name": "[concat(parameters('vmName'),'/CustomScriptExtension')]", "type": "Microsoft.HybridCompute/machines/extensions", "location": "[parameters('location')]", - "apiVersion": "2019-08-02-preview", + "apiVersion": "2022-03-10", "properties": { "publisher": "Microsoft.Compute", "type": "CustomScriptExtension", @@ -298,8 +300,8 @@ To use the Azure Monitor Dependency agent extension, the following sample is pro ```json { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", "parameters": { "vmName": { "type": "string", @@ -308,26 +310,23 @@ To use the Azure Monitor Dependency agent extension, the following sample is pro } } }, - "variables": { - "vmExtensionsApiVersion": "2017-03-30" - }, "resources": [ { "type": "Microsoft.HybridCompute/machines/extensions", "name": "[concat(parameters('vmName'),'/DAExtension')]", - "apiVersion": "[variables('vmExtensionsApiVersion')]", + "apiVersion": "2022-03-10", "location": "[resourceGroup().location]", "dependsOn": [ ], "properties": { "publisher": "Microsoft.Azure.Monitoring.DependencyAgent", "type": "DependencyAgentLinux", - "autoUpgradeMinorVersion": true + "enableAutomaticUpgrade": true } } ], - "outputs": { - } + "outputs": { + } } ``` @@ -335,8 +334,8 @@ To use the Azure Monitor Dependency agent extension, the following sample is pro ```json { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", "parameters": { "vmName": { "type": "string", @@ -345,26 +344,23 @@ To use the Azure Monitor Dependency agent extension, the following sample is pro } } }, - "variables": { - "vmExtensionsApiVersion": "2017-03-30" - }, "resources": [ { "type": "Microsoft.HybridCompute/machines/extensions", "name": "[concat(parameters('vmName'),'/DAExtension')]", - "apiVersion": "[variables('vmExtensionsApiVersion')]", + "apiVersion": "2022-03-10", "location": "[resourceGroup().location]", "dependsOn": [ ], "properties": { "publisher": "Microsoft.Azure.Monitoring.DependencyAgent", "type": "DependencyAgentWindows", - "autoUpgradeMinorVersion": true + "enableAutomaticUpgrade": true } } ], - "outputs": { - } + "outputs": { + } } ``` @@ -419,12 +415,12 @@ The following JSON shows the schema for the Key Vault VM extension (preview). Th { "type": "Microsoft.HybridCompute/machines/extensions", "name": "[concat(parameters('vmName'),'/KVVMExtensionForLinux')]", - "apiVersion": "2019-12-12", + "apiVersion": "2022-03-10", "location": "[parameters('location')]", "properties": { "publisher": "Microsoft.Azure.KeyVault", "type": "KeyVaultForLinux", - "autoUpgradeMinorVersion": true, + "enableAutomaticUpgrade": true, "settings": { "secretsManagementSettings": { "pollingIntervalInS": , @@ -433,8 +429,7 @@ The following JSON shows the schema for the Key Vault VM extension (preview). Th "observedCertificates": , - "msiClientId": + "msiEndpoint": "http://localhost:40342/metadata/identity" } } } @@ -488,24 +483,23 @@ The following JSON shows the schema for the Key Vault VM extension (preview). Th { "type": "Microsoft.HybridCompute/machines/extensions", "name": "[concat(parameters('vmName'),'/KVVMExtensionForWindows')]", - "apiVersion": "2019-12-12", + "apiVersion": "2022-03-10", "location": "[parameters('location')]", "properties": { "publisher": "Microsoft.Azure.KeyVault", "type": "KeyVaultForWindows", - "autoUpgradeMinorVersion": true, + "enableAutomaticUpgrade": true, "settings": { "secretsManagementSettings": { "pollingIntervalInS": "3600", "certificateStoreName": , "linkOnRenewal": , "certificateStoreLocation": , - "requireInitialSync": , + "requireInitialSync": , "observedCertificates": , - "msiClientId": + "msiEndpoint": "http://localhost:40342/metadata/identity" } } } @@ -530,92 +524,6 @@ Save the template file to disk. You can then deploy the extension to the connect New-AzResourceGroupDeployment -ResourceGroupName "ContosoEngineering" -TemplateFile "D:\Azure\Templates\KeyVaultExtension.json" ``` -## Deploy the Microsoft Defender for Cloud integrated scanner - -To use the Microsoft Defender for Cloud integrated scanner extension, the following sample is provided to run on Windows and Linux. If you are unfamiliar with the integrated scanner, see [Overview of Microsoft Defender for Cloud's vulnerability assessment solution](../../security-center/deploy-vulnerability-assessment-vm.md) for hybrid machines. - -### Template file for Windows - -```json -{ - "properties": { - "mode": "Incremental", - "template": { - "contentVersion": "1.0.0.0", - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "parameters": { - "vmName": { - "type": "string" - }, - "apiVersionByEnv": { - "type": "string" - } - }, - "resources": [ - { - "type": "Microsoft.HybridCompute/machines/providers/serverVulnerabilityAssessments", - "name": "[concat(parameters('vmName'), '/Microsoft.Security/default')]", - "apiVersion": "[parameters('apiVersionByEnv')]" - } - ] - }, - "parameters": { - "vmName": { - "value": "resourceName" - }, - "apiVersionByEnv": { - "value": "2015-06-01-preview" - } - } - } -} -``` - -### Template file for Linux - -```json -{ - "properties": { - "mode": "Incremental", - "template": { - "contentVersion": "1.0.0.0", - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "parameters": { - "vmName": { - "type": "string" - }, - "apiVersionByEnv": { - "type": "string" - } - }, - "resources": [ - { - "type": "Microsoft.HybridCompute/machines/providers/serverVulnerabilityAssessments", - "name": "[concat(parameters('vmName'), '/Microsoft.Security/default')]", - "apiVersion": "[parameters('apiVersionByEnv')]" - } - ] - }, - "parameters": { - "vmName": { - "value": "resourceName" - }, - "apiVersionByEnv": { - "value": "2015-06-01-preview" - } - } - } -} -``` - -### Template deployment - -Save the template file to disk. You can then deploy the extension to the connected machine with the following command. - -```powershell -New-AzResourceGroupDeployment -ResourceGroupName "ContosoEngineering" -TemplateFile "D:\Azure\Templates\AzureDefenderScanner.json" -``` - ## Next steps * You can deploy, manage, and remove VM extensions using the [Azure PowerShell](manage-vm-extensions-powershell.md), from the [Azure portal](manage-vm-extensions-portal.md), or the [Azure CLI](manage-vm-extensions-cli.md). diff --git a/articles/azure-arc/servers/media/manage-automatic-vm-extension-upgrade/azure-activity-log-extension-upgrade.png b/articles/azure-arc/servers/media/manage-automatic-vm-extension-upgrade/azure-activity-log-extension-upgrade.png new file mode 100644 index 0000000000000..475e6bb5cfff4 Binary files /dev/null and b/articles/azure-arc/servers/media/manage-automatic-vm-extension-upgrade/azure-activity-log-extension-upgrade.png differ diff --git a/articles/azure-arc/servers/network-requirements.md b/articles/azure-arc/servers/network-requirements.md index 5fcb05a656f08..fbf6fdfbc4872 100644 --- a/articles/azure-arc/servers/network-requirements.md +++ b/articles/azure-arc/servers/network-requirements.md @@ -1,7 +1,7 @@ --- title: Connected Machine agent network requirements description: Learn about the networking requirements for using the Connected Machine agent for Azure Arc-enabled servers. -ms.date: 03/14/2022 +ms.date: 05/24/2022 ms.topic: conceptual --- @@ -29,6 +29,7 @@ Be sure to allow access to the following Service Tags: * AzureResourceManager * AzureArcInfrastructure * Storage +* WindowsAdminCenter (if [using Windows Admin Center to manage Arc-enabled servers](/windows-server/manage/windows-admin-center/azure/manage-arc-hybrid-machines)) For a list of IP addresses for each service tag/region, see the JSON file [Azure IP Ranges and Service Tags – Public Cloud](https://www.microsoft.com/download/details.aspx?id=56519). Microsoft publishes weekly updates containing each Azure Service and the IP ranges it uses. This information in the JSON file is the current point-in-time list of the IP ranges that correspond to each service tag. The IP addresses are subject to change. If IP address ranges are required for your firewall configuration, then the **AzureCloud** Service Tag should be used to allow access to all Azure services. Do not disable security monitoring or inspection of these URLs, allow them as you would other Internet traffic. @@ -51,6 +52,7 @@ The table below lists the URLs that must be available in order to install and us |`*.guestconfiguration.azure.com`| Extension management and guest configuration services |Always| Private | |`guestnotificationservice.azure.com`, `*.guestnotificationservice.azure.com`|Notification service for extension and connectivity scenarios|Always| Private | |`azgn*.servicebus.windows.net`|Notification service for extension and connectivity scenarios|Always| Public | +|`*servicebus.windows.net`|For Windows Admin Center and SSH scenarios|If using SSH or Windows Admin Center from Azure|Public| |`*.blob.core.windows.net`|Download source for Azure Arc-enabled servers extensions|Always, except when using private endpoints| Not used when private link is configured | |`dc.services.visualstudio.com`|Agent telemetry|Optional| Public | diff --git a/articles/azure-arc/servers/onboard-ansible-playbooks.md b/articles/azure-arc/servers/onboard-ansible-playbooks.md index 80e5f2ae56c5d..66621342fcf72 100644 --- a/articles/azure-arc/servers/onboard-ansible-playbooks.md +++ b/articles/azure-arc/servers/onboard-ansible-playbooks.md @@ -29,68 +29,53 @@ Before you can run the script to connect your machines, you'll need to do the fo If you are onboarding machines to Azure Arc-enabled servers, copy the following Ansible playbook template and save the playbook as `arc-server-onboard-playbook.yml`. -``` +```yaml --- - name: Onboard Linux and Windows Servers to Azure Arc-enabled servers with public endpoint connectivity hosts: + vars: + azure: + service_principal_id: 'INSERT-SERVICE-PRINCIPAL-CLIENT-ID' + service_principal_secret: 'INSERT-SERVICE-PRINCIPAL-SECRET' + resource_group: 'INSERT-RESOURCE-GROUP' + tenant_id: 'INSERT-TENANT-ID' + subscription_id: 'INSERT-SUBSCRIPTION-ID' + location: 'INSERT-LOCATION' tasks: - - name: Download the Connected Machine Agent on Linux servers + - name: Download the Connected Machine Agent on Linux servers become: yes get_url: url: https://aka.ms/azcmagent dest: ~/install_linux_azcmagent.sh mode: '700' when: ansible_system == 'Linux' - - name: Download the Connected Machine Agent on Windows servers - win_get_url: - url: https://aka.ms/AzureConnectedMachineAgent - dest: C:\AzureConnectedMachineAgent.msi + - name: Download the Connected Machine Agent on Windows servers + win_get_url: + url: https://aka.ms/AzureConnectedMachineAgent + dest: C:\AzureConnectedMachineAgent.msi when: ansible_os_family == 'Windows' - name: Install the Connected Machine Agent on Linux servers become: yes shell: bash ~/install_linux_azcmagent.sh when: ansible_system == 'Linux' - name: Install the Connected Machine Agent on Windows servers - path: C:\AzureConnectedMachineAgent.msi + win_package: + path: C:\AzureConnectedMachineAgent.msi when: ansible_os_family == 'Windows' - name: Connect the Connected Machine Agent on Linux servers to Azure Arc become: yes - shell: sudo azcmagent connect --service-principal-id --service-principal-secret --resource-group --tenant-id --location --subscription-id + shell: sudo azcmagent connect --service-principal-id {{ azure.service_principal_id }} --service-principal-secret {{ azure.service_principal_secret }} --resource-group {{ azure.resource_group }} --tenant-id {{ azure.tenant_id }} --location {{ azure.location }} --subscription-id {{ azure.subscription_id }} when: ansible_system == 'Linux' - name: Connect the Connected Machine Agent on Windows servers to Azure - win_shell: '& $env:ProgramFiles\AzureConnectedMachineAgent\azcmagent.exe connect --service-principal-id --service-principal-secret --resource-group --tenant-id --location --subscription-id ' + win_shell: '& $env:ProgramFiles\AzureConnectedMachineAgent\azcmagent.exe connect --service-principal-id "{{ azure.service_principal_id }}" --service-principal-secret "{{ azure.service_principal_secret }}" --resource-group "{{ azure.resource_group }}" --tenant-id "{{ azure.tenant_id }}" --location "{{ azure.location }}" --subscription-id "{{ azure.subscription_id }}"' when: ansible_os_family == 'Windows' ``` - - ## Modify the Ansible playbook After downloading the Ansible playbook, complete the following steps: -1. Within the Ansible playbook, modify the fields under the task **Connect the Connected Machine Agent to Azure** with the service principal and Azure details collected earlier: +1. Within the Ansible playbook, modify the variables under the **vars section** with the service principal and Azure details collected earlier: * Service Principal Id * Service Principal Secret @@ -99,7 +84,7 @@ After downloading the Ansible playbook, complete the following steps: * Subscription Id * Region -1. Enter the correct hosts field capturing the target servers for onboarding to Azure Arc. You can employ Ansible patterns to selectively target which hybrid machines to onboard. +1. Enter the correct hosts field capturing the target servers for onboarding to Azure Arc. You can employ [Ansible patterns](https://docs.ansible.com/ansible/latest/user_guide/intro_patterns.html#common-patterns) to selectively target which hybrid machines to onboard. ## Run the Ansible playbook diff --git a/articles/azure-arc/servers/onboard-group-policy.md b/articles/azure-arc/servers/onboard-group-policy.md index 07decb152416a..7733e88751f9b 100644 --- a/articles/azure-arc/servers/onboard-group-policy.md +++ b/articles/azure-arc/servers/onboard-group-policy.md @@ -1,7 +1,7 @@ --- title: Connect machines at scale using group policy description: In this article, you learn how to connect machines to Azure using Azure Arc-enabled servers using group policy. -ms.date: 04/29/2022 +ms.date: 05/25/2022 ms.topic: conceptual ms.custom: template-how-to --- @@ -35,15 +35,15 @@ Before you can run the script to connect your machines, you'll need to do the fo 1. Modify and save the following configuration file to the remote share as `ArcConfig.json`. Edit the file with your Azure subscription, resource group, and location details. Use the service principal details from step 1 for the last two fields: -``` +```json { - "tenant-id": "INSERT AZURE TENANTID", - "subscription-id": "INSERT AZURE SUBSCRIPTION ID", - "resource-group": "INSERT RESOURCE GROUP NAME", - "location": "INSERT REGION", - "service-principal-id": "INSERT SPN ID", - "service-principal-secret": "INSERT SPN Secret" - } + "tenant-id": "INSERT AZURE TENANTID", + "subscription-id": "INSERT AZURE SUBSCRIPTION ID", + "resource-group": "INSERT RESOURCE GROUP NAME", + "location": "INSERT REGION", + "service-principal-id": "INSERT SPN ID", + "service-principal-secret": "INSERT SPN Secret" + } ``` The group policy will project machines as Arc-enabled servers in the Azure subscription, resource group, and region specified in this configuration file. diff --git a/articles/azure-arc/servers/onboard-portal.md b/articles/azure-arc/servers/onboard-portal.md index e09fdf9ea2b1d..ced821d783b79 100644 --- a/articles/azure-arc/servers/onboard-portal.md +++ b/articles/azure-arc/servers/onboard-portal.md @@ -23,7 +23,7 @@ The script to automate the download and installation, and to establish the conne 1. On the **Servers - Azure Arc** page, select **Add** at the upper left. -1. On the **Select a method** page, select the **Add servers using interactive script** tile, and then select **Generate script**. +1. On the **Select a method** page, select the **Add a single server** tile, and then select **Generate script**. 1. On the **Generate script** page, select the subscription and resource group where you want the machine to be managed within Azure. Select an Azure location where the machine metadata will be stored. This location can be the same or different, as the resource group's location. diff --git a/articles/azure-arc/servers/onboard-service-principal.md b/articles/azure-arc/servers/onboard-service-principal.md index b379e8e969f78..27970350ce18f 100644 --- a/articles/azure-arc/servers/onboard-service-principal.md +++ b/articles/azure-arc/servers/onboard-service-principal.md @@ -1,20 +1,29 @@ --- title: Connect hybrid machines to Azure at scale description: In this article, you learn how to connect machines to Azure using Azure Arc-enabled servers using a service principal. -ms.date: 02/23/2022 +ms.date: 05/23/2022 ms.topic: conceptual ms.custom: devx-track-azurepowershell --- # Connect hybrid machines to Azure at scale -You can enable Azure Arc-enabled servers for multiple Windows or Linux machines in your environment with several flexible options depending on your requirements. Using the template script we provide, you can automate every step of the installation, including establishing the connection to Azure Arc. However, you are required to interactively execute this script with an account that has elevated permissions on the target machine and in Azure. +You can enable Azure Arc-enabled servers for multiple Windows or Linux machines in your environment with several flexible options depending on your requirements. Using the template script we provide, you can automate every step of the installation, including establishing the connection to Azure Arc. However, you are required to execute this script manually with an account that has elevated permissions on the target machine and in Azure. -To connect the machines to Azure Arc-enabled servers, you can use an Azure Active Directory [service principal](../../active-directory/develop/app-objects-and-service-principals.md) instead of using your privileged identity to [interactively connect the machine](onboard-portal.md). This service principal is a special limited management identity that is granted only the minimum permission necessary to connect machines to Azure using the `azcmagent` command. This is safer than using a higher privileged account like a Tenant Administrator, and follows our access control security best practices. The service principal is used only during onboarding; it is not used for any other purpose. +One method to connect the machines to Azure Arc-enabled servers is to use an Azure Active Directory [service principal](../../active-directory/develop/app-objects-and-service-principals.md). This service principal method can be used instead of your privileged identity to [interactively connect the machine](onboard-portal.md). This service principal is a special limited management identity that has only the minimum permission necessary to connect machines to Azure using the `azcmagent` command. This method is safer than using a higher privileged account like a Tenant Administrator and follows our access control security best practices. **The service principal is used only during onboarding; it is not used for any other purpose.** -The installation methods to install and configure the Connected Machine agent requires that the automated method you use has administrator permissions on the machines: on Linux by using the root account, and on Windows as a member of the Local Administrators group. +Before you start connecting your machines, review the following requirements: -Before you get started, be sure to review the [prerequisites](prerequisites.md) and verify that your subscription and resources meet the requirements. For information about supported regions and other related considerations, see [supported Azure regions](overview.md#supported-regions). Also review our [at-scale planning guide](plan-at-scale-deployment.md) to understand the design and deployment criteria, as well as our management and monitoring recommendations. +1. Make sure you have administrator permission on the machines you want to onboard. + + Administrator permissions are required to install the Connected Machine agent on the machines; on Linux by using the root account, and on Windows as a member of the Local Administrators group. +1. Review the [prerequisites](prerequisites.md) and verify that your subscription and resources meet the requirements. You will need to have the **Azure Connected Machine Onboarding** role or the **Contributor** role for the resource group of the machine. + + For information about supported regions and other related considerations, see [supported Azure regions](overview.md#supported-regions). Also review our [at-scale planning guide](plan-at-scale-deployment.md) to understand the design and deployment criteria, as well as our management and monitoring recommendations. + + If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. diff --git a/articles/azure-arc/servers/overview.md b/articles/azure-arc/servers/overview.md index e6d0164ff8f6e..c35fa9593a777 100644 --- a/articles/azure-arc/servers/overview.md +++ b/articles/azure-arc/servers/overview.md @@ -36,12 +36,12 @@ When you connect your machine to Azure Arc-enabled servers, you can perform many * Perform post-deployment configuration and automation tasks using supported [Arc-enabled servers VM extensions](manage-vm-extensions.md) for your non-Azure Windows or Linux machine. * **Monitor**: * Monitor operating system performance and discover application components to monitor processes and dependencies with other resources using [VM insights](../../azure-monitor/vm/vminsights-overview.md). - * Collect other log data, such as performance data and events, from the operating system or workloads running on the machine with the [Log Analytics agent](../../azure-monitor/agents/agents-overview.md#log-analytics-agent). This data is stored in a [Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md). + * Collect other log data, such as performance data and events, from the operating system or workloads running on the machine with the [Log Analytics agent](../../azure-monitor/agents/agents-overview.md#log-analytics-agent). This data is stored in a [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md). > [!NOTE] > At this time, enabling Azure Automation Update Management directly from an Azure Arc-enabled server is not supported. See [Enable Update Management from your Automation account](../../automation/update-management/enable-from-automation-account.md) to understand requirements and [how to enable Update Management for non-Azure VMs](../../automation/update-management/enable-from-automation-account.md#enable-non-azure-vms). -Log data collected and stored in a Log Analytics workspace from the hybrid machine contains properties specific to the machine, such as a Resource ID, to support [resource-context](../../azure-monitor/logs/design-logs-deployment.md#access-mode) log access. +Log data collected and stored in a Log Analytics workspace from the hybrid machine contains properties specific to the machine, such as a Resource ID, to support [resource-context](../../azure-monitor/logs/manage-access.md#access-mode) log access. Watch this video to learn more about Azure monitoring, security, and update services across hybrid and multicloud environments. diff --git a/articles/azure-arc/servers/plan-at-scale-deployment.md b/articles/azure-arc/servers/plan-at-scale-deployment.md index 78197ee977491..7f78faf664fd8 100644 --- a/articles/azure-arc/servers/plan-at-scale-deployment.md +++ b/articles/azure-arc/servers/plan-at-scale-deployment.md @@ -61,7 +61,7 @@ In this phase, system engineers or administrators enable the core features in th |-----|-------|---------| | [Create a resource group](../../azure-resource-manager/management/manage-resource-groups-portal.md#create-resource-groups) | A dedicated resource group to include only Azure Arc-enabled servers and centralize management and monitoring of these resources. | One hour | | Apply [Tags](../../azure-resource-manager/management/tag-resources.md) to help organize machines. | Evaluate and develop an IT-aligned [tagging strategy](/azure/cloud-adoption-framework/decision-guides/resource-tagging/) that can help reduce the complexity of managing your Azure Arc-enabled servers and simplify making management decisions. | One day | -| Design and deploy [Azure Monitor Logs](../../azure-monitor/logs/data-platform-logs.md) | Evaluate [design and deployment considerations](../../azure-monitor/logs/design-logs-deployment.md) to determine if your organization should use an existing or implement another Log Analytics workspace to store collected log data from hybrid servers and machines.1 | One day | +| Design and deploy [Azure Monitor Logs](../../azure-monitor/logs/data-platform-logs.md) | Evaluate [design and deployment considerations](../../azure-monitor/logs/workspace-design.md) to determine if your organization should use an existing or implement another Log Analytics workspace to store collected log data from hybrid servers and machines.1 | One day | | [Develop an Azure Policy](../../governance/policy/overview.md) governance plan | Determine how you will implement governance of hybrid servers and machines at the subscription or resource group scope with Azure Policy. | One day | | Configure [Role based access control](../../role-based-access-control/overview.md) (RBAC) | Develop an access plan to control who has access to manage Azure Arc-enabled servers and ability to view their data from other Azure services and solutions. | One day | | Identify machines with Log Analytics agent already installed | Run the following log query in [Log Analytics](../../azure-monitor/logs/log-analytics-overview.md) to support conversion of existing Log Analytics agent deployments to extension-managed agent:
                  Heartbeat
                  | summarize arg_max(TimeGenerated, OSType, ResourceId, ComputerEnvironment) by Computer
                  | where ComputerEnvironment == "Non-Azure" and isempty(ResourceId)
                  | project Computer, OSType | One hour | diff --git a/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md b/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md index 44386b93ba225..a8cb8149b1f3f 100644 --- a/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md +++ b/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md @@ -13,7 +13,7 @@ This article is intended to help you onboard your Azure Arc-enabled server to [M Before you start, make sure that you've met the following requirements: -- A [Log Analytics workspace](../../azure-monitor/logs/data-platform-logs.md). For more information about Log Analytics workspaces, see [Designing your Azure Monitor Logs deployment](../../azure-monitor/logs/design-logs-deployment.md). +- A [Log Analytics workspace](../../azure-monitor/logs/data-platform-logs.md). For more information about Log Analytics workspaces, see [Designing your Azure Monitor Logs deployment](../../azure-monitor/logs/workspace-design.md). - Microsoft Sentinel [enabled in your subscription](../../sentinel/quickstart-onboard.md). diff --git a/articles/azure-arc/servers/toc.yml b/articles/azure-arc/servers/toc.yml index 6c83c4ce55115..aa04e799d45d0 100644 --- a/articles/azure-arc/servers/toc.yml +++ b/articles/azure-arc/servers/toc.yml @@ -107,6 +107,8 @@ href: scenario-onboard-azure-sentinel.md - name: Onboard to Microsoft Defender for Cloud href: ../../defender-for-cloud/quickstart-onboard-machines.md?toc=/azure/azure-arc/servers/toc.json&bc=/azure/azure-arc/servers/breadcrumb/toc.json + - name: Manage with Windows Admin Center + href: /windows-server/manage/windows-admin-center/azure/manage-arc-hybrid-machines?toc=/azure/azure-arc/servers/toc.json&bc=/azure/azure-arc/servers/breadcrumb/toc.json - name: Connect via SSH items: - name: SSH access to Azure Arc-enabled servers diff --git a/articles/azure-arc/system-center-virtual-machine-manager/create-virtual-machine.md b/articles/azure-arc/system-center-virtual-machine-manager/create-virtual-machine.md new file mode 100644 index 0000000000000..73ffffb26c094 --- /dev/null +++ b/articles/azure-arc/system-center-virtual-machine-manager/create-virtual-machine.md @@ -0,0 +1,52 @@ +--- +title: Create a virtual machine on System Center Virtual Machine Manager using Azure Arc (preview) +description: This article helps you create a virtual machine using Azure portal (preview). +ms.date: 05/25/2022 +ms.topic: conceptual +ms.services: azure-arc +author: jyothisuri +ms.author: jsuri +keywords: "VMM, Arc, Azure" +--- + + +# Create a virtual machine on System Center Virtual Machine Manager using Azure Arc (preview) + +Once your administrator has connected an SCVMM management server to Azure, represented VMM resources such as private clouds, VM templates in Azure, and provided you the required permissions on those resources, you'll be able to create a virtual machine in Azure. + +## Prerequisites + +- An Azure subscription and resource group where you have *Arc SCVMM VM Contributor* role. +- A cloud resource on which you have *Arc SCVMM Private Cloud Resource User* role. +- A virtual machine template resource on which you have *Arc SCVMM Private Cloud Resource User role*. +- A virtual network resource on which you have *Arc SCVMM Private Cloud Resource User* role. + +## How to create a VM in Azure portal + +1. Go to Azure portal. +2. Select **Azure Arc** as the service and then select **Azure Arc virtual machine** from the left blade. +3. Click **+ Create**, **Create an Azure Arc virtual machine** page opens. + +3. Under **Basics** > **Project details**, select the **Subscription** and **Resource group** where you want to deploy the VM. +4. Under **Instance details**, provide the following details: + - Virtual machine name - Specify the name of the virtual machine. + - Custom location - Select the custom location that your administrator has shared with you. + - Virtual machine kind – Select **System Center Virtual Machine Manager**. + - Cloud – Select the target VMM private cloud. + - Availability set - (Optional) Use availability sets to identify virtual machines that you want VMM to keep on separate hosts for improved continuity of service. +5. Under **Template details**, provide the following details: + - Template – Choose the VM template for deployment. + - Override template details - Select the checkbox to override the default CPU cores and memory on the VM templates. + - Specify computer name for the VM, if the VM template has computer name associated with it. +6. Under **Administrator account**, provide the following details and click **Next : Disks >**. + - Username + - Password + - Confirm password +7. Under **Disks**, you can optionally change the disks configured in the template. You can add more disks or update existing disks. +8. Under **Networking**, you can optionally change the network interfaces configured in the template. You can add Network interface cards (NICs) or update the existing NICs. You can also change the network that this NIC will be attached to provided you have appropriate permissions to the network resource. +9. Under **Advanced**, enable processor compatibility mode if required. +10. Under **Tags**, you can optionally add tags to the VM resource. + >[!NOTE] + > Custom properties defined for the VM in VMM will be synced as tags in Azure. + +11. Under **Review + create**, review all the properties and select **Create**. The VM will be created in a few minutes. diff --git a/articles/azure-arc/system-center-virtual-machine-manager/enable-scvmm-inventory-resources.md b/articles/azure-arc/system-center-virtual-machine-manager/enable-scvmm-inventory-resources.md new file mode 100644 index 0000000000000..9e9e06c350de2 --- /dev/null +++ b/articles/azure-arc/system-center-virtual-machine-manager/enable-scvmm-inventory-resources.md @@ -0,0 +1,63 @@ +--- +title: Enable SCVMM inventory resources in Azure Arc center (preview) +description: This article helps you enable SCVMM inventory resources from Azure portal (preview) +ms.service: azure-arc +author: jyothisuri +ms.reviewer: jsuri +ms.date: 05/25/2022 +ms.topic: how-to +keywords: "VMM, Arc, Azure" +--- + +# Enable SCVMM inventory resources from Azure portal (preview) + +The article describes how you can view SCVMM management servers and enable SCVMM inventory from Azure portal, after connecting to the SCVMM management server. + +## View SCVMM management servers + +You can view all the connected SCVMM management servers under **SCVMM management servers** in Azure Arc center. + +:::image type="content" source="media/enable-scvmm-inventory-resources/view-scvmm-servers-inline.png" alt-text="Screenshot of how to view SCVMM servers." lightbox="media/enable-scvmm-inventory-resources/view-scvmm-servers-expanded.png"::: + +In the inventory view, you can browse the virtual machines (VMs), VMM clouds, VM network, and VM templates. +Under each inventory, you can select and enable one or more SCVMM resources in Azure to create an Azure resource representing your SCVMM resource. + +You can further use the Azure resource to assign permissions or perform management operations. + +## Enable SCVMM cloud, VM templates and VM networks in Azure + +To enable the SCVMM inventory resources, follow these steps: + +1. From Azure home > **Azure Arc** center, go to **SCVMM management servers (preview)** blade and go to inventory resources blade. + + :::image type="content" source="media/enable-scvmm-inventory-resources/scvmm-server-blade-inline.png" alt-text="Screenshot of how to go to SCVMM management servers blade." lightbox="media/enable-scvmm-inventory-resources/scvmm-server-blade-expanded.png"::: + +1. Select the resource(s) you want to enable and select **Enable in Azure**. + + :::image type="content" source="media/enable-scvmm-inventory-resources/scvmm-enable-azure-inline.png" alt-text="Screenshot of how to enable in Azure option." lightbox="media/enable-scvmm-inventory-resources/scvmm-enable-azure-expanded.png"::: + +1. In **Enable in Azure**, select your **Azure subscription** and **Resource Group** and select **Enable**. + + :::image type="content" source="media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-inline.png" alt-text="Screenshot of how to select subscription and resource group." lightbox="media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-expanded.png"::: + + The deployment is initiated and it creates a resource in Azure, representing your SCVMM resources. It allows you to manage the access to these resources through the Azure role-based access control (RBAC) granularly. + + Repeat the above steps for one or more VM networks and VM template resources. + +## Enable existing virtual machines in Azure + +To enable the existing virtual machines in Azure, follow these steps: + +1. From Azure home > **Azure Arc** center, go to **SCVMM management servers (preview)** blade and go to inventory resources blade. + +1. Go to **SCVMM inventory** resource blade, select **Virtual machines** and then select the VMs you want to enable and select **Enable in Azure**. + + :::image type="content" source="media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-inline.png" alt-text="Screenshot of how to enable existing virtual machines in Azure." lightbox="media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-expanded.png"::: + +1. Select your **Azure subscription** and **Resource group**. + +1. Select **Enable** to start the deployment of the VM represented in Azure. + +## Next steps + +[Connect virtual machines to Arc](quickstart-connect-system-center-virtual-machine-manager-to-arc.md) diff --git a/articles/azure-arc/system-center-virtual-machine-manager/index.yml b/articles/azure-arc/system-center-virtual-machine-manager/index.yml new file mode 100644 index 0000000000000..19e081bca5a92 --- /dev/null +++ b/articles/azure-arc/system-center-virtual-machine-manager/index.yml @@ -0,0 +1,41 @@ +### YamlMime:Landing +title: Azure Arc-enabled System Center Virtual Machine Manager +summary: Learn how to manage Hybrid environment with Azure Arc-enabled System Center Virtual Machine Manager + +metadata: + title: Azure Arc-enabled System Center Virtual Machine Manager + description: Learn about how to manage Hybrid environment with Azure Arc-enabled System Center Virtual Machine Manager + author: jyothisuri + ms.service: azure-arc + ms.topic: landing-page + ms.date: 04/28/2022 + ms.author: jsuri + +landingContent: + - title: About Azure Arc-enabled System Center Virtual Machine Manager (preview) + linkLists: + - linkListType: overview + links: + - text: What is Azure Arc-enabled System Center Virtual Machine Manager? + url: overview.md + + - title: Get started + linkLists: + - linkListType: quickstart + links: + - text: Connect a System Center Virtual Machine Manager management server to Azure Arc + url: quickstart-connect-system-center-virtual-machine-manager-to-arc.md + + - title: Create Virtual Machine + linkLists: + - linkListType: how-to-guide + links: + - text: Create a virtual machine + url: create-virtual-machine.md + + - title: Enable SCVMM inventory in Azure + linkLists: + - linkListType: how-to-guide + links: + - text: Enable SCVMM inventory in Azure + url: enable-scvmm-inventory-resources.md \ No newline at end of file diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/architecture/arc-scvmm-architecture.png b/articles/azure-arc/system-center-virtual-machine-manager/media/architecture/arc-scvmm-architecture.png new file mode 100644 index 0000000000000..0d943d000fd8b Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/architecture/arc-scvmm-architecture.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-azure-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-azure-expanded.png new file mode 100644 index 0000000000000..931fe1fcf1250 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-azure-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-azure-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-azure-inline.png new file mode 100644 index 0000000000000..931fe1fcf1250 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-azure-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-expanded.png new file mode 100644 index 0000000000000..57c19a3ccd1c1 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-inline.png new file mode 100644 index 0000000000000..57c19a3ccd1c1 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-enable-existing-vm-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-expanded.png new file mode 100644 index 0000000000000..67bd0fa585f82 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-inline.png new file mode 100644 index 0000000000000..67bd0fa585f82 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-select-sub-resource-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-server-blade-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-server-blade-expanded.png new file mode 100644 index 0000000000000..23677332c91fa Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-server-blade-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-server-blade-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-server-blade-inline.png new file mode 100644 index 0000000000000..23677332c91fa Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/scvmm-server-blade-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/view-scvmm-servers-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/view-scvmm-servers-expanded.png new file mode 100644 index 0000000000000..61b2fb4f040d8 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/view-scvmm-servers-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/view-scvmm-servers-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/view-scvmm-servers-inline.png new file mode 100644 index 0000000000000..61b2fb4f040d8 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/enable-scvmm-inventory-resources/view-scvmm-servers-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/overview/architecture.png b/articles/azure-arc/system-center-virtual-machine-manager/media/overview/architecture.png new file mode 100644 index 0000000000000..b5fcc75adedc5 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/overview/architecture.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-expanded.png new file mode 100644 index 0000000000000..1d6de7134f6a1 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-inline.png new file mode 100644 index 0000000000000..1d6de7134f6a1 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-expanded.png b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-expanded.png new file mode 100644 index 0000000000000..f43878a86a3b4 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-expanded.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-inline.png b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-inline.png new file mode 100644 index 0000000000000..f43878a86a3b4 Binary files /dev/null and b/articles/azure-arc/system-center-virtual-machine-manager/media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-inline.png differ diff --git a/articles/azure-arc/system-center-virtual-machine-manager/overview.md b/articles/azure-arc/system-center-virtual-machine-manager/overview.md new file mode 100644 index 0000000000000..67f2dc12edf74 --- /dev/null +++ b/articles/azure-arc/system-center-virtual-machine-manager/overview.md @@ -0,0 +1,58 @@ +--- +title: Overview of the Azure Connected System Center Virtual Machine Manager (preview) +description: This article provides a detailed overview of the Azure Arc-enabled System Center Virtual Machine Manager (preview). +ms.date: 05/25/2022 +ms.topic: conceptual +ms.services: azure-arc +author: jyothisuri +ms.author: jsuri +keywords: "VMM, Arc, Azure" +ms.custom: references_regions +--- + +# Overview of Arc-enabled System Center Virtual Machine Manager (preview) + +Azure Arc-enabled System Center Virtual Machine Manager (SCVMM) empowers System Center customers to connect their VMM environment to Azure and perform VM self-service operations from Azure portal. With Azure Arc-enabled SCVMM, you get a consistent management experience across Azure. + +Azure Arc-enabled System Center Virtual Machine Manager allows you to manage your Hybrid environment and perform self-service VM operations through Azure portal. For Microsoft Azure Pack customers, this solution is intended as an alternative to perform VM self-service operations. + +Arc-enabled System Center VMM allows you to: + +- Perform various VM lifecycle operations such as start, stop, pause, delete VMs on VMM managed VMs directly from Azure. +- Empower developers and application teams to self-serve VM operations on-demand using [Azure role-based access control (RBAC)](/azure/role-based-access-control/overview). +- Browse your VMM resources (VMs, templates, VM networks, and storage) in Azure, providing you a single pane view for your infrastructure across both environments. +- Discover and onboard existing SCVMM managed VMs to Azure. + +## How does it work? + +To Arc-enable a System Center VMM management server, deploy [Azure Arc resource bridge](/azure/azure-arc/resource-bridge/overview) (preview) in the VMM environment. Arc resource bridge is a virtual appliance that connects VMM management server to Azure. Azure Arc resource bridge (preview) enables you to represent the SCVMM resources (clouds, VMs, templates etc.) in Azure and do various operations on them. + +## Architecture + +The following image shows the architecture for the Arc-enabled SCVMM: + +:::image type="content" source="media/architecture/arc-scvmm-architecture.png" alt-text="Screenshot of Arc enabled SCVMM - architecture."::: + +### Supported VMM versions + +Azure Arc-enabled SCVMM works with VMM 2016, 2019 and 2022 versions and supports SCVMM management servers with a maximum of 3500 VMS. + +### Supported scenarios + +The following scenarios are supported in Azure Arc-enabled SCVMM (preview): + +- SCVMM administrators can connect a VMM instance to Azure and browse the SCVMM virtual machine inventory in Azure. +- Administrators can use the Azure portal to browse SCVMM inventory and register SCVMM cloud, virtual machines, VM networks, and VM templates into Azure. +- Administrators can provide app teams/developers fine-grained permissions on those SCVMM resources through Azure RBAC. +- App teams can use Azure interfaces (portal, CLI, or REST API) to manage the lifecycle of on-premises VMs they use for deploying their applications (CRUD, Start/Stop/Restart). + +### Supported regions + +Azure Arc-enabled SCVMM (preview) is currently supported in the following regions: + +- East US +- West Europe + +## Next steps + +[See how to create a Azure Arc VM](create-virtual-machine.md) diff --git a/articles/azure-arc/system-center-virtual-machine-manager/quickstart-connect-system-center-virtual-machine-manager-to-arc.md b/articles/azure-arc/system-center-virtual-machine-manager/quickstart-connect-system-center-virtual-machine-manager-to-arc.md new file mode 100644 index 0000000000000..4b11048cd3d18 --- /dev/null +++ b/articles/azure-arc/system-center-virtual-machine-manager/quickstart-connect-system-center-virtual-machine-manager-to-arc.md @@ -0,0 +1,137 @@ +--- +title: Quick Start for Azure Arc-enabled System Center Virtual Machine Manager (preview) +description: In this QuickStart, you will learn how to use the helper script to connect your System Center Virtual Machine Manager management server to Azure Arc (preview). +author: jyothisuri +ms.author: jsuri +ms.topic: quickstart +ms.date: 05/25/2022 +ms.custom: references_regions +--- + +# QuickStart: Connect your System Center Virtual Machine Manager management server to Azure Arc (preview) + +Before you can start using the Azure Arc-enabled SCVMM features, you need to connect your VMM management server to Azure Arc. + +This QuickStart shows you how to connect your SCVMM management server to Azure Arc using a helper script. The script deploys a lightweight Azure Arc appliance (called Azure Arc resource bridge) as a virtual machine running in your VMM environment and installs an SCVMM cluster extension on it, to provide a continuous connection between your VMM management server and Azure Arc. + +## Prerequisites + +| **Requirement** | **Details** | +| --- | --- | +| **Azure** | An Azure subscription

                  A resource group in the above subscription where you have the *Owner/Contributor* role. | +| **SCVMM** | You need an SCVMM management server running version 2016 or later.

                  A private cloud that has at least one cluster with minimum free capacity of 16 GB of RAM, 4 vCPUs with 100 GB of free disk space.

                  A VM network with internet access, directly or through proxy. Appliance VM will be deployed using this VM network.

                  For dynamic IP allocation to appliance VM, DHCP server is required. For static IP allocation, VMM static IP pool is required. | +| **SCVMM accounts** | An SCVMM admin account that can perform all administrative actions on all objects that VMM manages.

                  The user should be part of local administrator account in the SCVMM server.

                  This will be used for the ongoing operation of Azure Arc-enabled SCVMM as well as the deployment of the Arc Resource bridge VM. | +| **Workstation** | The workstation will be used to run the helper script.

                  A Windows/Linux machine that can access both your SCVMM management server and internet, directly or through proxy.

                  The helper script can be run directly from the VMM server machine as well.

                  Note that when you execute the script from a Linux machine, the deployment takes a bit longer and you may experience performance issues. | + +## Prepare SCVMM management server + +- Create an SCVMM private cloud if you don't have one. The private cloud should have a reservation of at least 16 GB of RAM and 4 vCPUs. It should also have at least 100 GB of disk space. +- Ensure that SCVMM administrator account has the appropriate permissions. + +## Download the onboarding script + +1. Go to [Azure portal](https://aka.ms/SCVMM/MgmtServers). +1. Search and select **Azure Arc**. +1. In the **Overview** page, select **Add** in **Add your infrastructure for free** or move to the **infrastructure** tab. + + :::image type="content" source="media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-inline.png" alt-text="Screenshot of how to select Add your infrastructure for free." lightbox="media/quick-start-connect-scvmm-to-azure/overview-add-infrastructure-expanded.png"::: + +1. In the **Platform** section, in **System Center VMM** select **Add**. + + :::image type="content" source="media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-inline.png" alt-text="Screenshot of how to select System Center V M M platform." lightbox="media/quick-start-connect-scvmm-to-azure/platform-add-system-center-vmm-expanded.png"::: + +1. Select **Create new resource bridge** and select **Next**. +1. Provide a name for **Azure Arc resource bridge**. For example: *contoso-nyc-resourcebridge*. +1. Select a subscription and resource group where you want to create the resource bridge. +1. Under **Region**, select an Azure location where you want to store the resource metadata. The currently supported regions are **East US** and **West Europe**. +1. Provide a name for **Custom location**. + This is the name that you'll see when you deploy virtual machines. Name it for the datacenter or the physical location of your datacenter. For example: *contoso-nyc-dc.* +1. Leave the option **Use the same subscription and resource group as your resource bridge** selected. +1. Provide a name for your **SCVMM management server instance** in Azure. For example: *contoso-nyc-scvmm.* +1. Select **Next: Download and run script**. +1. If your subscription isn't registered with all the required resource providers, select **Register** to proceed to next step. +1. Based on the operating system of your workstation, download the PowerShell or Bash script and copy it to the workstation. +1. To see the status of your onboarding after you run the script on your workstation, select **Next:Verification**. The onboarding isn't affected when you close this page. + +## Run the script + +Use the following instructions to run the script, depending on the Operating System of the workstation. + +>[!NOTE] +>Before running the script, install the latest version of Azure CLI (2.36.0 or later). + + +### Windows + +Follow these instructions to run the script on a Windows machine. + +1. Open a new PowerShell window and verify if Azure CLI is successfully installed in the workstation, use the following command: + ```azurepowershell-interactive + az + ``` +1. Navigate to the folder where you've downloaded the PowerShell script: + *cd C:\Users\ContosoUser\Downloads* + +1. Run the following command to allow the script to run since it's an unsigned script (if you close the session before you complete all the steps, run this command again for the new session): + ```azurepowershell-interactive + Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass + ``` +1. Run the script: + ```azurepowershell-interactive + ./resource-bridge-onboarding-script.ps1 + ``` +### Linux + +Follow these instructions to run the script on a Linux machine: + +1. Open the terminal and navigate to the folder, where you've downloaded the Bash script. +2. Execute the script using the following command: + + ```sh + bash resource-bridge-onboarding-script.sh + ``` + +## Script runtime +The script execution will take up to half an hour and you'll be prompted for various details. See the following table for related information: + +| **Parameter** | **Details** | +| --- | --- | +| **Azure login** | You would be asked to log in to Azure by visiting [this site](https://www.microsoft.com/devicelogin) and pasting the prompted code. | +| **SCVMM management server FQDN/Address** | FQDN for the VMM server (or an IP address).
                  Provide role name if it’s a Highly Available VMM deployment.
                  For example: nyc-scvmm.contoso.com or 10.160.0.1 | +| **SCVMM Username**
                  (domain\username) | Username for the SCVMM administrator account. The required permissions for the account are listed in the prerequisites above.
                  Example: contoso\contosouser | +| **SCVMM password** | Password for the SCVMM admin account | +| **Private cloud selection** | Select the name of the private cloud where the Arc resource bridge VM should be deployed. | +| **Virtual Network selection** | Select the name of the virtual network to which *Arc resource bridge VM* needs to be connected. This network should allow the appliance to talk to the VMM management server and the Azure endpoints (or internet). | +| **Static IP pool** | Select the VMM static IP pool that will be used to allot IP address. | +| **Control Pane IP** | Provide a reserved IP address (a reserved IP address in your DHCP range or a static IP outside of DHCP range but still available on the network). The key thing is this IP address shouldn't be assigned to any other machine on the network. | +| **Appliance proxy settings** | Type ‘Y’ if there's a proxy in your appliance network, else type ‘N’.| +| **http** | Address of the HTTP proxy server. | +| **https** | Address of the HTTPS proxy server.| +| **NoProxy** | Addresses to be excluded from proxy.| +|**CertificateFilePath** | For SSL based proxies, provide the path to the certificate. | + +Once the command execution is completed, your setup is complete, and you can try out the capabilities of Azure Arc- enabled SCVMM. + +### Retry command - Windows + +If for any reason, the appliance creation fails, you need to retry it. Run the command with ```-Force``` to clean up and onboard again. + +```powershell-interactive + ./resource-bridge-onboarding-script.ps1-Force -Subscription -ResourceGroup -AzLocation -ApplianceName -CustomLocationName -VMMservername +``` + +### Retry command - Linux + +If for any reason, the appliance creation fails, you need to retry it. Run the command with ```--force``` to clean up and onboard again. + + ```sh + bash resource-bridge-onboarding-script.sh --force + ``` +>[!NOTE] +> - After successful deployment, we recommend to maintain the state of **Arc Resource Bridge VM** as *online*. +> - Intermittently appliance might become unreachable, when you shut down and restart the VM. + + +## Next steps + +[Create a VM](create-virtual-machine.md) diff --git a/articles/azure-arc/system-center-virtual-machine-manager/toc.yml b/articles/azure-arc/system-center-virtual-machine-manager/toc.yml new file mode 100644 index 0000000000000..4c3e8891c259b --- /dev/null +++ b/articles/azure-arc/system-center-virtual-machine-manager/toc.yml @@ -0,0 +1,29 @@ +- name: Azure Arc-enabled System Center Virtual Machine Manager (preview) + href: index.yml +- name: Overview + items: + - name: Azure Arc-enabled System Center Virtual Machine Manager (preview) + href: overview.md +- name: Quickstarts + items: + - name: Connect a System Center Virtual Machine Manager management server to Azure Arc (preview) + href: quickstart-connect-system-center-virtual-machine-manager-to-arc.md + + +- name: How-to guides + items: + - name: Create a virtual machine (preview) + href: create-virtual-machine.md + - name: Enable SCVMM inventory resources + href: enable-scvmm-inventory-resources.md + +- name: Reference + items: + - name: Azure CLI + items: + - name: Azure Arc-enabled System Center VMM (preview) + href: /cli/azure/scvmm + - name: Azure Arc resource bridge + href: /cli/azure/arcappliance + + diff --git a/articles/azure-arc/toc.yml b/articles/azure-arc/toc.yml index 3d2dfd981b2c5..321131854edac 100644 --- a/articles/azure-arc/toc.yml +++ b/articles/azure-arc/toc.yml @@ -19,15 +19,19 @@ - name: Security overview href: ./resource-bridge/security-overview.md - name: Troubleshoot - href: ./resource-bridge/troubleshoot-resource-bridge.md + href: ./resource-bridge/troubleshoot-resource-bridge.md - name: Azure Arc-enabled servers > href: ./servers/overview.md - name: Azure Arc-enabled Kubernetes > href: ./kubernetes/overview.md -- name: Azure Arc-enabled data services > +- name: Azure Arc-enabled data services > href: ./data/overview.md -- name: Azure Arc-enabled VMware vSphere > +- name: Azure Arc-enabled VMware vSphere > href: ./vmware-vsphere/overview.md +- name: Azure Arc-enabled System Center Virtual Machine Manager (preview) > + href: ./system-center-virtual-machine-manager/overview.md +- name: Azure Arc-enabled VMs on Azure Stack HCI > + href: /azure-stack/hci/manage/azure-arc-enabled-virtual-machines - name: SQL Server on Azure Arc-enabled servers > href: /sql/sql-server/azure-arc/overview - name: Arc validation program @@ -43,4 +47,4 @@ - name: Azure CLI items: - name: Azure Arc resource bridge - href: /cli/azure/arcappliance \ No newline at end of file + href: /cli/azure/arcappliance diff --git a/articles/azure-australia/australia-overview.md b/articles/azure-australia/australia-overview.md deleted file mode 100644 index 0391acffcf438..0000000000000 --- a/articles/azure-australia/australia-overview.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: What is Azure Australia? | Microsoft Docs -description: Guidance on configuring Azure within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: overview -ms.date: 07/22/2019 -ms.author: yvettep -ms.custom: references_regions ---- - -# What is Azure Australia? - -In 2014, Azure was launched in Australia, with two regions; Australia East (Sydney) and Australia Southeast (Melbourne). In April 2018, two new Azure Regions located in Canberra – Australia Central and Australia Central 2, were launched. The Australia Central and Australia Central 2 regions are purposely designed to meet the needs of government and critical national infrastructure, and offer specialised connectivity and flexibility so you can locate your systems beside the cloud, with levels of security and resilience only expected of Secret-classified networks. Azure Australia is a platform for the digital transformation of government and critical national infrastructure – and the only mission-critical cloud available in Australia designed specifically for those needs. - -There are specific Australian Government requirements for connecting to, consuming, and operating within [Microsoft Azure Australia](https://azure.microsoft.com/global-infrastructure/australia/) for Australian Government data and systems. The resources on this page also provide general guidance applicable to all customers with a specific focus on secure configuration and operation. - -Refer to the Australia page of the [Microsoft Service Trust Portal](https://aka.ms/au-irap) for current information on the Azure Australia Information Security Registered Assessor (IRAP) Assessments, certification and inclusion on the Certified Cloud Services List (CCSL). On the Australia page, you will also find other Microsoft advice specific to Government and Critical Infrastructure providers. - -## Principles for securing customer data in Azure Australia - -Azure Australia provides a range of features and services that you can use to build cloud solutions to meet your regulated/controlled data needs. A compliant customer solution is nothing more than the effective implementation of out-of-the-box Azure Australia capabilities, coupled with a solid data security practice. - -When you host a solution in Azure Australia, Microsoft handles many of these requirements at the cloud infrastructure level. - -The following diagram shows the Azure defence-in-depth model. For example, Microsoft provides basic cloud infrastructure DDoS, along with customer capabilities such as security appliances or premium DDoS services for customer-specific application needs. - -![alt text](media/defenceindepth.png) - -These articles outline the foundational principles for securing your services and applications, with guidance and best practices on how to apply these principles. In other words, how customers should make smart use of Azure Australia to meet the obligations and responsibilities that are required for a solution that handles Government sensitive and classified information. - -There are two categories of documentation provided for Australian Government agencies migrating to Azure. - -## Security in Azure Australia - -Identity, Azure role-based access control, data protection through encryption and rights management, and effective monitoring and configuration control are key elements that you need to implement. In this section, there are a series of articles explaining the built-in capabilities of Azure and how they relate to the ISM and ASD Essential 8. - -These articles can be accessed through the menu under *Concepts -> Security in Azure Australia*. - -## Gateways in Azure Australia - -Another key step for Government agencies is the establishment of perimeter security capabilities. These capabilities are called Secure Internet Gateways (SIG) and when using Azure it is your responsibility to ensure these protections are in place. Microsoft does not operate a SIG; however, by combining our edge networking services that protect all customers, and specific services deployed within your Azure environment you can operate an equivalent capability. - -These articles can be accessed through the menu under *Concepts -> Gateways in Azure Australia*. - -## Next steps - -* If your key focus is securing your data in Azure, start with [Data Security](secure-your-data.md) -* If your key focus is building a Gateway in Azure, start with [Gateway auditing, logging, and visibility](gateway-log-audit-visibility.md). diff --git a/articles/azure-australia/azure-key-vault.md b/articles/azure-australia/azure-key-vault.md deleted file mode 100644 index 83d8efbff1d16..0000000000000 --- a/articles/azure-australia/azure-key-vault.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Azure Key Vault in Azure Australia -description: Guidance on configuring and using Azure Key Vault for key management within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Azure Key Vault in Azure Australia - -The secure storage of cryptographic keys and management of the cryptographic key lifecycle are critical elements within cryptographic systems. The service that provides this capability in Azure is the Azure Key Vault. Key Vault has been IRAP security accessed and ACSC certified for PROTECTED. This article outlines the key considerations when using Key Vault to comply with the Australian Signals Directorate's (ASD) [Information Security Manual Controls](https://acsc.gov.au/infosec/ism/) (ISM). - -Azure Key Vault is a cloud service that safeguards encryption keys and secrets. Because this data is sensitive and business critical, Key Vault enables secure access to key vaults, allowing only authorized users and applications. There are three main artifacts managed and controlled by Key Vault: - -- keys -- secrets -- certificates - -This article will focus on management of keys using Key Vault. - -![Azure Key Vault](media/azure-key-vault-overview.png) - -*Diagram 1 – Azure Key Vault* - -## Key design considerations - -### Deployment options - -There are two options for creating Azure Key Vaults. Both options use the nCipher nShield family of Hardware Security Modules (HSM), are Federal Information Processing Standards (FIPS) validated, and are approved to store keys in PROTECTED environments. The options are: - -- **Software-protected vaults:** FIPS 140-2 level 1 validated. Keys stored on an HSM. Encryption and decryption operations are performed in compute resources on Azure. -- **HSM-protected vaults:** FIPS 140-2 level 2 validated. Keys stored on an HSM. Encryption and decryption operations are performed on the HSM. - -Key Vault supports Rivest-Shamir-Adleman (RSA) and Elliptic Curve Cryptography (ECC) keys. The default is RSA 2048-bit keys but there is an advanced option for RSA 3072-bit, RSA 4096-bit, and ECC keys. All keys meet the ISM controls, but Elliptic Curve keys are preferred. - -### Resource operations - -There are several personas involved in Azure Key Vault: - -- **Key Vault administrator:** Manages the lifecycle of the vault -- **Key administrator:** Manages the lifecycle of keys in the vault -- **Developer/operator:** Integrate keys from the vault into applications and services -- **Auditor:** Monitor key usage and access -- **Applications:** Use keys to secure information - -Azure Key Vault is secured with two separate interfaces: - -- **Management Plane:** This plane deals with managing the vault and it secured by Azure RBAC. -- **Data Plane:** This plane deals with managing and accessing the artifacts in the vault. Secured using Key Vault access policy. - -As required by the ISM, proper authentication and authorisation are required before a caller (a user or an application) before they can get access to key vault by either plane. - -Azure RBAC has one built-in role for Key Vault, [Key Vault Contributor](../role-based-access-control/built-in-roles.md#key-vault-contributor), to control management of the Key Vaults. The creation of custom roles aligned to more granular roles for managing your Key Vaults is recommended. - ->[!WARNING] ->When access to keys is enabled via Key Vault access policy then the user or application has that access to all keys in the key vault (for example, if a user has 'delete' access then they can delete all keys). Therefore, multiple key vaults should be deployed to align with security domains/boundaries. - -### Networking - -You can configure Key Vault firewalls and virtual networks to control access to the data plane. You can allow access to users or applications on specified networks while denying access to users or applications on all other networks. [Trusted services](../key-vault/general/overview-vnet-service-endpoints.md#trusted-services) are an exception to this control if "Allow trusted services" is enabled. The virtual networking control does not apply to the management plane. - -Access to Key Vaults should be explicitly restricted to the minimum set of networks that have users or applications requiring access to keys. - -### Bring Your Own Key (BYOK) - -Key Vault supports BYOK. BYOK enables users to import keys from their existing key infrastructures. The BYOK toolset supports the secure transfer and import of keys from an external HSM (for example, keys generated with an offline workstation) into Key Vault. - -Go to the Microsoft Download Center and [download the Azure Key Vault BYOK toolset](https://www.microsoft.com/download/details.aspx?id=45345) for Australia. The package name to download and its corresponding SHA-256 package hash are: - -|Package Name|SHA-256 Hash| -|---|---| -|KeyVault-BYOK-Tools-Australia.zip|CD0FB7365053DEF8C35116D7C92D203C64A3D3EE2452A025223EEB166901C40A| -| - -### Key Vault auditing and logging - -The ACSC requires Commonwealth entities to use the appropriate Azure services to undertake real-time monitoring and reporting on their Azure workloads. - -Logging is enabled by enabling the **_"AuditEvent"_** diagnostic setting on Key Values. Audit events will be logged to the specified storage account. **_"RetentionInDays"_** period should be set according to the data retention policy. [Operations](../key-vault/general/logging.md#interpret-your-key-vault-logs) on both the management plane and data plane are audited and logged. The [Azure Key Vault solution in Azure Monitor](../azure-monitor/insights/key-vault-insights-overview.md) can be used to review Key Vault AuditEvent logs. A number of other Azure services can be used to process and distribute Key Vault AuditEvents. - -### Key rotation - -Storing keys in Key Vault provided a single point to maintain keys outside applications that enable keys to be updated without affecting the behaviour of the applications. Storing keys in Azure Key Vault enables various strategies for supporting key rotation: - -- Manually -- Programmatically via APIs -- Automation Scripts (for example, using PowerShell and Azure Automation) - -These options enable keys to be rotated on a periodic basis to satisfy compliance requirements or on an ad-hoc basis if there are concerns that keys may have been compromised. - -#### Key rotation strategies - -It is important to develop an appropriate key rotation strategy for keys which are stored in KeyVault. Using the wrong key will lead to information being incorrectly decrypted, and losing keys can lead to the complete loss of access to information. Examples of key rotation strategies for different scenarios include: - -- **Inflight data:** volatile information is transmitted between 2 parties. When a key is rotated then both parties must have a mechanism to synchronous retrieving the updated keys from the key vault. -- **Data as rest:** A party stores encrypted data and decrypts it in the future to use. When a key is going to rotated then the data must be decrypted with the old key and then encrypted with the new, rotated key. There are approaches to minimize the impact of the decrypt/encrypt process using key encrypting keys (see example). Microsoft manages the majority of the process related to key rotation for Azure Storage (see…) -- **Access keys:** a number of Azure services have access keys that can be stored in Key Vault (for example, CosmosDB). The azure services have primary and secondary access keys. It is important that both keys are not rotated at the same time. Therefore, one key should be rotated then after a period and the key operation has been verified then the second key can be rotated. - -### High availability - -The ISM has several controls that relate to Business Continuity. -Azure Key Vault has multiple layers of redundancy with contents replicated within the region and to the secondary, [paired region](../availability-zones/cross-region-replication-azure.md). - -When the key vault is in a fail-over state, it is in read-only mode and will return to read-write mode the primary service is restored. - -The ISM has several controls related to backup. It is important to develop and execute appropriate backup/restore plans for vaults and their keys. - -## Key lifecycle - -### Key operations - -Key Vault support the following operations on a key: - -- **create:** Allows a client to create a key in Key Vault. The value of the key is generated by Key Vault and stored, and isn't released to the client. Asymmetric keys may be created in Key Vault. -- **import:** Allows a client to import an existing key to Key Vault. Asymmetric keys may be imported to Key Vault using a number of different packaging methods within a JWK construct. -- **update:** Allows a client with sufficient permissions to modify the metadata (key attributes) associated with a key previously stored within Key Vault. -- **delete:** Allows a client with sufficient permissions to delete a key from Key Vault. -- **list:** Allows a client to list all keys in a given Key Vault. -- **list versions:** Allows a client to list all versions of a given key in a given Key Vault. -- **get:** Allows a client to retrieve the public parts of a given key in a Key Vault. -- **backup:** Exports a key in a protected form. -- **restore:** Imports a previously backed up key. - -There is a corresponding set of permissions that can be granted to users, service principals, or applications using Key Vault access control entries to enable them to execute key operations. - -Key Vault has a soft delete feature to allow the recovery of deleted vaults and keys. By default, **_"soft delete"_** is not enabled, but once enabled, objects are held for 90 days (the retention period) while appearing to be deleted. An additional permission **_"purge"_**, allows the permanent deletion of keys if the **_"Purge Protection"_** option is disabled. - -Creating or importing an existing key creates a new version of the key. - -### Cryptographic operations - -Key Vault also supports cryptographic operations using keys: - -- **sign and verify:** this operation is a "sign hash" or "verify hash". Key Vault does not support hashing of content as part of signature creation. -- **key encryption/wrapping:** this operation is used to protect another key. -- **encrypt and decrypt:** the stored key is used to encrypt or decrypt a single block of data - -There is a corresponding set of permissions that can be granted to users, service principals, or applications using Key Vault access control entries to enable them to execute cryptographic operations. - -There are three key attributes that can set to control whether a key is enabled and useable of cryptographic operations: - -- **enabled** -- **nbf:** not before enabled before specified date -- **exp:** expiration date - -## Storage and keys - -Customer-managed keys are more flexibility and enable assess to and management of the keys to be controlled. They also enable auditing the encryption keys used to protect data. -There are three aspects to storage and keys stored in Key Vault: - -- Key Vault managed storage account keys -- Azure Storage Service Encryption (SSE) for data at rest -- Managed disks and Azure Disk Encryption - -Key Vault's Azure Storage account key management is an extension to Key Vault's key service that supports synchronization and regeneration (rotation) of storage account keys. [Azure Storage integration with Azure Active Directory](../storage/blobs/authorize-access-azure-active-directory.md) (preview) is recommended when released as it provides superior security and ease of use. -SSE uses two keys to manage encryption of data at rest: - -- Key Encryption Keys (KEK) -- Data Encryption Keys (DEK) - -While Microsoft manages the DEKs, SSE has an option to use customer-managed KEKs that can be stored in Key Vault. This enables the rotation of keys in Azure Key Vault as per the appropriate compliance policies. When keys are rotated, Azure Storage re-encrypts the Account Encryption Key for that storage account. This does not result in re-encryption of all data and there is no other action required. - -SSE is used for managed disks but customer-managed keys are not supported. Encryption of managed disks can be done using Azure Disk Encryption with customer-managed KEK keys in Key Vault. - -## Next Steps - -Review the article on [Identity Federation](identity-federation.md) - -Review additional Azure Key Vault documentation and tutorials in the [Reference Library](reference-library.md) \ No newline at end of file diff --git a/articles/azure-australia/azure-policy.md b/articles/azure-australia/azure-policy.md deleted file mode 100644 index cbd20252af6f3..0000000000000 --- a/articles/azure-australia/azure-policy.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Security compliance with Azure Policy and Azure Blueprints -description: Ensuring compliance and enforcing security with Azure Policy and Azure Blueprints for Australian Government agencies as it relates to the ASD ISM and Essential 8 -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Security compliance with Azure Policy and Azure Blueprints - -The challenge of enforcing governance within your IT environment, whether it be an on-premises, cloud native or a hybrid environment, exists for all organisations. A robust technical governance framework needs to be in place to ensure your Microsoft Azure environment conforms with design, regulatory, and security requirements. - -For Australian Government agencies, they key controls to consider when assessing risk are in the [Australian Cyber Security Centre (ACSC) Information Security Manual](https://acsc.gov.au/infosec/ism/index.htm) (ISM). The majority of controls detailed within the ISM require the application of technical governance to be effectively managed and enforced. It is important you have the appropriate tools to evaluate and enforce configuration in your environments. - -Microsoft Azure provides two complimentary services to assist with these challenges, Azure Policy and Azure Blueprints. - -## Azure Policy - -Azure Policy enables the application of the technical elements of an organisation's IT governance. Azure Policy contains a constantly growing library of built-in policies. Each Policy enforces rules and effects on the targeted Azure Resources. - -Once a policy is assigned to resources, the overall compliance against that policy can be evaluated, and be remediated if necessary. - -This library of built-in Azure Polices enable an organisation to quickly enforce the types of controls found in the ACSC ISM. Examples of controls include: - -* Monitoring virtual machines for missing system updates -* Auditing accounts with elevated permissions for multi-factor authentication -* Identifying unencrypted SQL Databases -* Monitoring the use of custom Azure role-based access control (Azure RBAC) -* Restricting the Azure regions that resources can be created in - -If governance or regulatory controls are not met by a built-in Azure Policy definition, a custom definition can be created and assigned. All Azure Policy definitions are defined in JSON and follow a standard [definition structure](../governance/policy/concepts/definition-structure.md). Existing Azure Policy definitions can also be duplicated and used to form the basis of a custom Policy definition. - -Assigning individual Azure Policies to resources, especially in complex environments or in environments with strict regulatory requirements, can create large overhead for your administrators. To assist with these challenges, a set of Azure Policies can be grouped together to form an Azure Policy Initiative. Policy Initiatives are used to combine related Azure policies that, when applied together as a group, form the basis of a specific security or compliance objective. Microsoft is adding built-in Azure Policy Initiative definitions, including definitions designed to meet specific regulatory requirements: - -![Regulatory Compliance Policy Initiatives](media/regulatory-initiatives.png) - -All Azure Policies and Initiatives are assigned to an assignment scope. This scope is defined at either the Azure Subscription, Azure Management Group, or Azure Resource Group levels. Once the required Azure Policies or Policy Initiatives have been assigned, an organisation will be able to enforce the configuration requirements on all newly created Azure resources. - -Assigning a new Azure Policy or Initiative will not affect existing Azure resources. Azure Policy can; however, enable an organisation to view the compliance of existing Azure resources. Any resources that have been identified as being non-compliant can be remediated at the organisation's discretion - -### Azure Policy and initiatives in action - -The available built-in Azure Policy and Initiative definitions can be found under the Definition node in the Policy section of the Azure portal: - -![Built-In Azure Policy Definitions](media/policy-definitions.png) - -Using the library of built-in definitions, you can quickly search for Policies that meet an organisational requirement, review the policy definition, and assign the Policy to the appropriate resources. For example, the ISM requires multi-factor authentication (MFA) for all privileged users, and for all users with access to important data repositories. In Azure Policy you can search for "MFA" amongst the Azure Policy definitions: - -![Azure AD MFA Policies](media/mfa-policies.png) - -Once a suitable policy is identified, you assign the policy to the desired scope. If there is no built-in policy that meets your requirements, you can duplicate the existing policy and make the desired changes: - -![Duplicate existing Azure Policy](media/duplicate-policy.png) - -Microsoft also provides a collection of Azure Policy samples on [GitHub](https://github.com/Azure/azure-policy) as a 'quickstart' for you to build custom Azure Policies. These Policy samples can be copied directly into the Azure Policy editor within the Azure portal. - -When creating Azure Policy Initiatives, you can sort the list of available policy definitions, both built-in and custom, adding the required definitions. - -For instance, you could search through the list of available Azure Policy definitions for all of the policies related to Windows virtual machines. Then you those definitions to an Initiative designed to enforce recommended virtual machine hardening practices: - -![List of Azure Policies](media/initiative-definitions.png) - -While assigning an Azure Policy or Policy Initiative to an assignment scope, it is possible for you to exclude Azure resources from the effects of the Policies by excluding either Azure Management Groups or Azure Resource Groups. - -### Real-time enforcement and compliance assessment - -Azure Policy compliance scans of in-scope Azure resources are undertaken when the following conditions are met: - -* When an Azure Policy or Azure Policy Initiative is assigned -* When the scope of an existing Azure Policy or Initiative is changed -* On demand via the API up to a maximum of 10 scans per hour -* Once every 24 hours - the default behaviour - -A policy compliance scan for a single Azure resource is undertaken 15 minutes after a change has been made to the resource. - -An overview of the Azure Policy compliance of resources can be reviewed within the Azure portal via the Policy Compliance dashboard: - -![Azure Policy compliance score](media/simple-compliance.png) - -The overall resource compliance percentage figure is an aggregate of the compliance of all in-scope deployed resources against all of your assigned Azure Policies. This allows you to identify the resources within an environment that are non-compliant and devise the plan to best remediate these resources. - -The Policy Compliance dashboard also includes the change history for each resource. If a resource is identified as no longer being compliant with assigned policy, and automatic remediation is not enabled, you can view who made the change, what was changed, and when the changes were made to that resource. - -## Azure Blueprints - -Azure Blueprints extend the capability of Azure Policy by combining them with: - -* Azure RBAC -* Azure Resource Groups -* [Azure Resource Manager Templates](../azure-resource-manager/templates/syntax.md) - -Blueprints allow for the creation of environment designs that deploy Azure resources from Resource Manager templates, configure Azure RBAC, and enforce and audit configuration by assigning Azure Policy. Blueprints form an editable and redeployable environment template. Once the blueprint has been created, it can then be assigned to an Azure Subscription. Once assigned, all of the Azure resources defined within the blueprint will be created and the Azure Policies applied. The deployment and configuration of resources defined in an Azure blueprint can be monitored from the Azure Blueprints console in the Azure portal. - -Azure Blueprints that have been edited must be republished in the Azure portal. Each time a Blueprint is republished, the version number of the Blueprint is incremented. The version number allows you to determine which specific version of a Blueprint has been deployed to an organisation's Azure Subscriptions. If desired, the currently assigned version of the Blueprint can be updated to the latest version. - -Resources deployed using an Azure blueprint can be configured with [Azure Resource Locks](../azure-resource-manager/management/lock-resources.md) at the time of deployment. Resource locks prevent resources from being accidentally modified or deleted. - -Microsoft is developing Azure Blueprints templates for a range of industries and regulatory requirements. The current library of available Azure Blueprints definitions can be viewed in the Azure portal or the [Azure Security and Compliance Blueprint](https://servicetrust.microsoft.com/ViewPage/BlueprintOverview/) page in the Service Trust Portal. - -### Azure Blueprints artifacts - -To create an Azure Blueprint, you can start with a blank Blueprint template, or use one of the existing sample Blueprints as a starting point. You can add artifacts to the Blueprint that will be configured as part of deployment: - -![Azure Blueprints Artifacts](media/blueprint-artifacts.png) - -These artifacts could include the Azure Resource Group and Resources and associated Azure Policy and Policy Initiatives to enforce the configuration required for your environment to be compliant you're your regulatory requirements, for example, the ISM controls for system hardening. - -Each of these artifacts can also be configured with parameters. These values are provided when the Blueprint has been assigned to an Azure subscription and deployed. Parameters allow for a single Blueprint to be created and used to deploy resources into different environments without having to edit the underlying Blueprint. - -Microsoft is developing Azure PowerShell and CLI cmdlets to create and manage Azure Blueprints with the intention that a Blueprint could be maintained and deployed by an organisation via a CI/CD pipeline. - -## Next steps - -This article explained how governance and security can be enforced with Azure Policy and Azure Blueprints. Now that you've been exposed at a high level, learn how to use each service in more detail: - -* [Azure Policy Overview](../governance/policy/overview.md) -* [Azure Blueprints Overview](https://azure.microsoft.com/services/blueprints/) -* [Azure Policy Samples](../governance/policy/samples/index.md) -* [Azure Policy Samples Repository](https://github.com/Azure/azure-policy) -* [Azure Policy Definition Structure](../governance/policy/concepts/definition-structure.md) -* [Azure Policy Effects](../governance/policy/concepts/effects.md) diff --git a/articles/azure-australia/gateway-egress-traffic.md b/articles/azure-australia/gateway-egress-traffic.md deleted file mode 100644 index b2ad8c3d155ff..0000000000000 --- a/articles/azure-australia/gateway-egress-traffic.md +++ /dev/null @@ -1,422 +0,0 @@ ---- -title: Controlling egress traffic in Azure Australia -description: Key elements of controlling egress traffic in Azure to meet Australian Government requirements for Secure Internet Gateways -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/29/2019 -ms.author: yvettep ---- - -# Controlling egress traffic in Azure Australia - -A fundamental component of securing ICT systems is controlling network traffic. Restricting communication to only the traffic necessary for a system to function reduces the potential for compromise. Visibility and control over the external systems that your applications and services communicate with helps detect compromised systems, and attempted or successful data exfiltration. This article provides information on how outbound (egress) network traffic works within Azure and provides recommendations for implementing network security controls for an internet connected system that aligns with the Australian Cyber Security Centre (ACSC) Consumer Guidance and the intent of the ACSC's Information Security Manual (ISM). - -## Requirements - -The overall security requirements for Commonwealth systems are defined in the ISM. To assist Commonwealth entities in implementing network security, the ACSC has published _ACSC Protect: Implementing Network Segmentation and Segregation_, and to assist with securing systems in Cloud environments the ACSC has published _Cloud Computing Security for Tenants_. - -The ACSC documents outline the context for implementing network security and controlling traffic, and provide practical recommendations for network design and configuration. - -The following key requirements for controlling egress traffic in Azure have been identified in the ACSC documents. - -Description|Source ---------------- |------------------ -**Implement Network Segmentation and Segregation**, for example, use an n-tier architecture, using host-based firewalls and network access controls to limit inbound and outbound network connectivity to only required ports and protocols.| [Cloud Computing for Tenants](https://acsc.gov.au/publications/protect/cloud-security-tenants.htm) -**Implement adequately high bandwidth, low latency, reliable network connectivity** between the tenant (including the tenant's remote users) and the cloud service to meet the tenant's availability requirements | [ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -**Apply technologies at more than just the network layer**. Each host and network should be segmented and segregated, where possible, at the lowest level that can be practically managed. In most cases, this applies from the data link layer up to and including the application layer; however, in sensitive environments, physical isolation may be appropriate. Host-based and network-wide measures should be deployed in a complementary manner and be centrally monitored. Just implementing a firewall or security appliance as the only security measure is not sufficient. |[ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -**Use the principles of least privilege and need‐to‐know**. If a host, service, or network doesn't need to communicate with another host, service, or network, it should not be allowed to. If a host, service, or network only needs to talk to another host, service, or network on a specific port or protocol, it should be restricted to only those ports and protocols. Adopting these principles across a network will complement the minimisation of user privileges and significantly increase the overall security posture of the environment. |[ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -**Separate hosts and networks based on their sensitivity or criticality to business operations**. This may include using different hardware or platforms depending on different security classifications, security domains, or availability/integrity requirements for certain hosts or networks. In particular, separate management networks and consider physically isolating out-of-band management networks for sensitive environments. |[ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -**Identify, authenticate, and authorise access by all entities to all other entities**. All users, hosts, and services should have their access to all other users, hosts, and services restricted to only those required to perform their designated duties or functions. All legacy or local services which bypass or downgrade the strength of identification, authentication, and authorisation services should be disabled wherever possible and have their use closely monitored. |[ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -**Implement allowlisting of network traffic instead of deny listing**. Only permit access for known good network traffic (traffic that is identified, authenticated, and authorised), rather than denying access to known bad network traffic (for example, blocking a specific address or service). Allowlists result in a superior security policy to deny lists, and significantly improve your capacity to detect and assess potential network intrusions. |[ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -**Defining an allowlist of permitted websites and blocking all unlisted websites** effectively removes one of the most common data delivery and exfiltration techniques used by an adversary. If users have a legitimate requirement to access numerous websites, or a rapidly changing list of websites; you should consider the costs of such an implementation. Even a relatively permissive allowlist offers better security than relying on deny lists, or no restrictions at all, while still reducing implementation costs. An example of a permissive allowlist could be permitting the entire Australian subdomain, that is '*.au', or allowing the top 1,000 sites from the Alexa site ranking (after filtering Dynamic Domain Name System (DDNS) domains and other inappropriate domains).| [Australian Government Information Security Manual (ISM)](https://www.cyber.gov.au/ism) -| - -This article provides information and recommendations on how network traffic leaving your Azure environment is controlled. It covers systems deployed in Azure using both Infrastructure as a Service (IaaS) and Platform as a Service (PaaS). - -The [Gateway Ingress Traffic](gateway-ingress-traffic.md) article addresses network traffic entering your Azure environment and is the companion to this article for full network control coverage. - -## Architecture - -To appropriately control egress traffic, when you design and implement network security, you must first understand how egress network traffic works within Azure across both IaaS and PaaS. This section provides an overview of how outbound traffic generated by a resource hosted in Azure is processed, and the security controls available to restrict, and control that traffic. - -### Architecture components - -The architectural diagram shown here depicts the possible paths that network traffic can take when exiting a system that is deployed into a subnet in a virtual network. Traffic in a virtual network is managed and governed at a subnet level, with routing and security rules applying to the resources contained within. The components related to egress traffic are divided into Systems, Effective Routes, Next Hop types, Security Controls, and PaaS egress. - -![Architecture](media/egress-traffic.png) - -### Systems - -Systems are the Azure resources and related components that generate outbound traffic within an IP subnet that is part of a virtual network. - -| Component | Description | -| --- | ---| -|Virtual Network (VNet) | A VNet is a foundational resource within Azure that provides a platform and boundary for deploying resources and enabling communication. The VNet exists within an Azure Region and defines the IP Address Space and Routing boundaries for VNet integrated resources such as Virtual Machines.| -|Subnet | A subnet is an IP address range that is created within a VNet. Multiple subnets can be created within a VNet for network segmentation.| -|Network Interface| A network interface is a resource that exists in Azure. It is attached to a Virtual Machine and assigned a private, non-Internet routable IP address from the subnet that it is associated with. This IP address is dynamically or statically assigned through Azure Resource Manager.| -|Public IPs| A Public IP is a resource that reserves one of the Microsoft owned Public, Internet-Routable IP Addresses from the specified region for use within the virtual network. It can be associated with a specific Network Interface or PaaS resource, which enables the resource to communicate with the Internet, ExpressRoute and to other PaaS systems.| -| - -### Routes - -The path that egress traffic takes is dependent on the effective routes for that resource, which is the resultant set of routes determined by the combination of routes learned from all possible sources and the application of Azure routing logic. - -| Component | Description | -|--- | ---| -|System Routes| Azure automatically creates system routes and assigns the routes to each subnet in a virtual network. System routes cannot be created or removed, but some can be overridden with custom routes. Azure creates default system routes for each subnet, and adds additional optional default routes to specific subnets, or every subnet, when specific Azure capabilities are utilised.| -|Service Endpoints| Service endpoints provide a direct, private egress connection from a subnet to a specific PaaS capability. Service endpoints, which are only available for a subset of PaaS capabilities, provide increased performance and security for resources in a VNet accessing PaaS.| -|Route Tables| A route table is a resource in Azure that can be created to specify User-Defined Routes (UDRs) that can complement or override systems routes or routes learned via Border Gateway Protocol. Each UDR specifies a network, a network mask, and a next hop. A route table can be associated to a subnet and the same route table can be associated to multiple subnets, but a subnet can only have zero or one route table.| -|Border Gateway Protocol (BGP)| BGP is an inter-autonomous system routing protocol. An autonomous system is a network or group of networks under a common administration and with common routing policies. BGP is used to exchange routing information between autonomous systems. BGP can be integrated into virtual networks through virtual network gateways.| -| - -### Next hop types defined - -Each route within Azure includes the network range and associated subnet mask and the next hop, which determines how the traffic is processed. - -Next Hop Type | Description ----- | ---- -**Virtual Network** | Routes traffic between address ranges within the address space of a virtual network. Azure creates a route with an address prefix that corresponds to each address range defined within the address space of a virtual network. If the virtual network address space has multiple addresses ranges defined, Azure creates an individual route for each address range. Azure automatically routes traffic between subnets within a VNet using the routes created for each address range. -**VNet peering** | When a virtual network peering is created between two virtual networks, a route is added for each address range of each virtual network to the virtual network it is peered to. Traffic is routed between the peered virtual networks in the same way as subnets within a virtual network. -**Virtual network gateway** | One or more routes with virtual network gateway listed as the next hop type are added when a virtual network gateway is added to a virtual network. The routes included are those that are configured within the local network gateway resource and any routes learned via BGP. -**Virtual appliance** | A virtual appliance typically runs a network application, such as a firewall. The virtual appliance allows additional processing of the traffic to occur, such as filtering, inspection, or address translation. Each route with the virtual appliance hop type must also specify a next hop IP address. -**VirtualNetworkServiceEndpoint** | The public IP addresses for a specific service are added as routes to a subnet with a next hop of VirtualNetworkServiceEndpoint when a service endpoint is enabled. Service endpoints are enabled for individual services on individual subnets within a virtual network. The public IP addresses of Azure services change periodically. Azure manages the addresses in the route table automatically when the addresses change. -**Internet** | Traffic with a next hop of Internet will exit the virtual network and automatically be translated to a Public IP address either from a dynamic pool available in the associated Azure region, or by using a Public IP address configured for that resource. If the destination address is for one of Azure's services, traffic is routed directly to the service over Azure's backbone network, rather than routing the traffic to the Internet. Traffic between Azure services does not traverse the Internet, regardless of which Azure region the virtual network exists in, or which Azure region an instance of the Azure service is deployed in. -**None** | Traffic with a next hop of none is dropped. Azure creates system default routes for reserved address prefixes with none as the next hop type. Routes with a next hop of none can also be added using route tables to prevent traffic from being routed to specific networks. -| - -### Security controls - -Control | Description ------ | ----- -**Network Security Groups (NSGs)** | NSGs control traffic into and out of virtual network resources in Azure. NSGs apply rules for the traffic flows that are permitted or denied, which includes traffic within Azure and between Azure and external networks such as on-premises or the Internet. NSGs are applied to subnets within a virtual network or to individual network interfaces. -**Azure Firewall** | Azure Firewall is a managed, cloud-based network security service that protects Azure virtual network resources. It is a fully stateful firewall as a service with built-in high availability and unrestricted cloud scalability. Azure Firewall can be configured with traditional network filtering rules based on IP addresses, protocols, and ports, but also supports filtering based on Fully Qualified Domain Names (FQDN), Service Tags and inbuilt Threat Intelligence. -**Network Virtual Appliance (NVA)** | Network Virtual Appliances are virtual machine media that can provide networking, security, and other functions to Azure. NVAs support network functionality and services in the form of VMs in virtual networks and deployments. NVAs can be used to address specific requirements, integrate with management and operational tools, or to provide consistency with existing products. Azure supports a broad list of third-party network virtual appliances including web application firewalls (WAF), firewalls, gateways/routers, application delivery controllers (ADC), and WAN optimizers. -**Service endpoint policies (Preview)** | Virtual network service endpoint policies allow you to filter virtual network traffic to Azure services, allowing only specific Azure service resources, over service endpoints. Endpoint policies provide granular access control for virtual network traffic to Azure services. -**Azure Policy** | Azure Policy is a service in Azure for creating, assigning, and managing policies. These policies use rules to control the types of resources that can be deployed and the configuration of those resources. Policies can be used to enforce compliance by preventing resources from being deployed if they do not meet requirements or can be used for monitoring to report on compliance status. -| - -### PaaS egress - -The majority of PaaS resources do not generate egress traffic, but either respond to inbound requests (such as an Application Gateway, Storage, SQL Database, etc.) or relay data from other resources (such as Service Bus and Azure Relay). The network communication flows between PaaS resources such as App Services to Storage or SQL Databases is controlled and contained by Azure and secured through identity and other resource configuration controls rather than network segmentation or segregation. - -PaaS resources deployed into a virtual network receive dedicated IP addresses and are subject to any routing controls and NSGs in the same way as other resources in the virtual network. PaaS resources that do not exist within a virtual network will utilise a pool of IP addresses that are shared across all instances of the resource, which are either published through Microsoft documentation or can be determined through Azure Resource Manager. - -## General guidance - -To design and build secure solutions within Azure, it is critical to understand and control the network traffic so that only identified and authorised communication can occur. The intent of this guidance and the specific component guidance in later sections is to describe the tools and services that can be utilised to apply the principles outlined in the [ACSC Protect: Implementing Network Segmentation and Segregation](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) across Azure workloads. This includes detailing how to create a virtual architecture for securing resources when it is not possible to apply the same traditional physical and network controls that are possible in an on-premises environment. - -### Guidance - -* Limit the number of egress points for virtual networks -* Override the system default route for all subnets that do not need direct outbound communication to the Internet -* Design and implement a complete network architecture to identify and control all ingress and egress points to Azure resources -* Consider utilising a Hub and Spoke Network Design for virtual networks as discussed in the Microsoft Virtual Data Centre (VDC) documentation -* Utilise products with inbuilt security capabilities for outbound connections to the Internet (for example, Azure Firewall, Network Virtual Appliances or Web Proxies) -* Use identity controls such as Azure role-based access control, Conditional Access, and Multi-Factor Authentication (MFA) to limit network configuration privileges -* Implement Locks to prevent modification or deletion of key elements of the network configuration -* Deploy PaaS in a VNet integrated configuration for increased segregation and control -* Implement ExpressRoute for connectivity with on-premises networks -* Implement VPNs for integration with external networks -* Utilise Azure Policy to restrict the regions and resources to only those that are necessary for system functionality -* Utilise Azure Policy to enforce baseline security configuration for resources -* Leverage Network Watcher and Azure Monitor for logging, auditing, and visibility of network traffic within Azure - -### Resources - -Item | Link ------------| --------- -_Australian Regulatory and Policy Compliance Documents including Consumer Guidance_ | [https://aka.ms/au-irap](https://aka.ms/au-irap) -_Azure Virtual Data Centre_ | [https://docs.microsoft.com/azure/architecture/vdc/networking-virtual-datacenter](/azure/architecture/vdc/networking-virtual-datacenter) -_ACSC Network Segmentation_ | [https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm) -_ACSC Cloud Security for Tenants_ | [https://acsc.gov.au/publications/protect/cloud-security-tenants.htm](https://acsc.gov.au/publications/protect/cloud-security-tenants.htm) -_ACSC Information Security Manual_ | [https://acsc.gov.au/infosec/ism/index.htm](https://acsc.gov.au/infosec/ism/index.htm) -| - -## Component guidance - -This section provides further guidance on the individual components that are relevant to egress traffic for systems deployed in Azure. Each section describes the intent of the specific component with links to documentation and configuration guides that can be used to assist with design and build activities. - -### Systems security - -All communication to resources within Azure passes through the Microsoft maintained network infrastructure, which provides connectivity and security functionality. A range of protections are automatically put in place by Microsoft to protect the Azure platform and network infrastructure and additional capabilities are available as services within Azure to control network traffic and establish network segmentation and segregation. - -### Virtual Network (VNet) - -Virtual networks are one of the fundamental building blocks for networking in Azure. Virtual networks define an IP address space and routing boundary to be used across a variety of systems. Virtual networks are divided into subnets and all subnets within a Virtual Network have a direct network route to each other. By using virtual network gateways (ExpressRoute or VPN), systems within a virtual network can integrate with on-premises and external environments and through Azure provided Network Address Translation (NAT) and Public IP address allocation, systems can connect to the Internet or other Azure Regions and Services. Understanding virtual networks and the associated configuration parameters and routing is crucial in understanding and controlling egress network traffic. - -As virtual networks form the base address space and routing boundary within Azure, it can be used as a primary building block of network segmentation and segregation. - -| Resource | Link | -| --- | --- | -| *Virtual Networks Overview* | [https://docs.microsoft.com/azure/virtual-network/virtual-networks-overview](../virtual-network/virtual-networks-overview.md) | -| *Plan Virtual Networks How-to Guide* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-vnet-plan-design-arm](../virtual-network/virtual-network-vnet-plan-design-arm.md) | -| *Create a Virtual Network Quickstart* | [https://docs.microsoft.com/azure/virtual-network/quick-create-portal](../virtual-network/quick-create-portal.md) -| - -### Subnet - -Subnets are a crucial component for network segmentation and segregation within Azure. Subnets can be used to provide separation between systems. A subnet is the resource within a virtual network where NSGs, Route Tables, and service endpoints are applied. Subnets can be used as both source and destination addresses for firewall rules and access-control lists. - -The subnets within a virtual network should be planned to meet the requirements of workloads and systems. Individuals involved in the design or implementation of subnets should refer to the ACSC guidelines for network segmentation to determine how systems should be grouped together within a subnet. - -|Resource|Link| -|---|---| -|*Add, change, or delete a virtual network subnet* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet](../virtual-network/virtual-network-manage-subnet.md) -| - -### Network interface - -Network interfaces are the source for all egress traffic from a virtual machine. Network Interfaces enable the configuration of IP Addressing, and can be used to apply NSGs or for routing traffic through an NVA. The Network Interfaces for virtual machines should be planned and configured appropriately to align with overall network segmentation and segregation objectives. - -|Resource|Link| -|---|---| -|*Create, Change, or Delete a Network Interface* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface](../virtual-network/virtual-network-network-interface.md) | -|*Network Interface IP Addressing* | [https://docs.microsoft.com/azure/virtual-network/private-ip-addresses](../virtual-network/ip-services/private-ip-addresses.md) -| - -### VNet integrated PaaS - -PaaS can provide enhanced functionality and availability and reduce management overhead but must be secured appropriately. To increase control, enforce network segmentation, or to provide a secure egress point for applications and services, many PaaS capabilities can be integrated with a virtual network. - -Using PaaS as an integrated part of system or application architecture, Microsoft provides multiple mechanisms to deploy PaaS into a virtual network. The deployment methodology can help restrict access while providing connectivity and integration with internal systems and applications. Examples include App Service Environments, SQL Managed Instance, and more. - -When deploying PaaS into a virtual network where routing and NSG controls have been implemented, it is crucial to understand the specific communication requirements of the resource, including management access from Microsoft services and the path that communications traffic will take when replying to incoming requests from these services. - -| Resource | Link | -| --- | --- | -| *Virtual network integration for Azure services* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-for-azure-services](../virtual-network/virtual-network-for-azure-services.md) | -| *Integrate your app with an Azure Virtual Network How-to guide* | [https://docs.microsoft.com/azure/app-service/web-sites-integrate-with-vnet](../app-service/overview-vnet-integration.md) -| - -### Public IP - -Public IP addresses are used when communicating outside a virtual network. This includes PaaS resources and any routes with a next hop of Internet. Commonwealth entities should plan the allocation of Public IP addresses carefully and only assign them to resources where there is a genuine requirement. As a general design practice, Public IP addresses should be allocated to controlled egress points for the virtual network such as Azure Firewall, VPN Gateway, or Network Virtual Appliances. - -|Resource|Link| -|---|---| -|*Public IP Addresses Overview* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-ip-addresses-overview-arm#public-ip-addresses](../virtual-network/ip-services/public-ip-addresses.md#public-ip-addresses) | -|*Create, change, or delete a public IP address* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address](../virtual-network/ip-services/virtual-network-public-ip-address.md) -| - -## Effective routes - -Effective routes are the resultant set of routes determined by the combination of system routes, service endpoints, Route Tables, and BGP and the application of Azure routing logic. When outbound traffic is sent from a subnet, Azure selects a route based on the destination IP address, using the longest prefix match algorithm. If multiple routes contain the same address prefix, Azure selects the route type, based on the following priority: - -1. User-defined route -2. BGP route -3. System route - -It is important to note that system routes for traffic related to virtual network, virtual network peerings, or virtual network service endpoints, are preferred routes, even if BGP routes are more specific. - -Individuals involved in the design or implementation of routing topologies in Azure should understand how Azure routes traffic and develop an architecture that balances the necessary functionality of systems with the required security and visibility. Care should be taken to plan the environment appropriately to avoid excessive interventions and exceptions to routing behaviours as this will increase complexity and may make troubleshooting and fault finding more difficult and time consuming. - -| Resource | -| --- | -| [View effective routes](../virtual-network/manage-route-table.md#view-effective-routes) - -### System routes - -For [System Routes](../virtual-network/virtual-networks-udr-overview.md#system-routes), individuals involved in the design or implementation of virtual networks should understand the default system routes and the options available to complement or override those routes. - -### Service endpoints - -Enabling [service endpoints](../virtual-network/virtual-network-service-endpoints-overview.md) on a subnet provides a direct communication path to the associated PaaS resource. This can provide increased performance and security by restricting the available communication path to just that service. The use of service endpoints does introduce a potential data exfiltration path as the default configuration allows access to all instances of the PaaS service rather than the specific instances required for an application or system. - -Commonwealth entities should evaluate the risk associated with providing direct access to the PaaS resource including the likelihood and consequence of the path being misused. - -To reduce potential risks associated with service endpoints, implement service endpoint policies where possible or consider enabling service endpoints on an Azure Firewall or NVA subnet and routing traffic from specific subnets through it where additional filtering, monitoring, or inspection can be applied. - -|Resource|Link| -|---|---| -|*Tutorial: Restrict network access to PaaS resources with virtual network service endpoints using the Azure portal* |[https://docs.microsoft.com/azure/virtual-network/tutorial-restrict-network-access-to-resources](../virtual-network/tutorial-restrict-network-access-to-resources.md)| -| - -### Route tables - -Route tables provide an administrator configured mechanism for controlling network traffic within Azure. Route tables can be utilised for forwarding traffic through to an Azure Firewall or NVA, connect directly to external resources, or to override Azure system routes. Route tables can also be used to prevent networks learned through a virtual network gateway from being made available to resources in a subnet by disabling virtual network gateway route propagation. - -|Resource|Link| -|---|---| -|*Routing Concepts - custom routes* |[https://docs.microsoft.com/azure/virtual-network/virtual-networks-udr-overview#custom-routes](../virtual-network/virtual-networks-udr-overview.md#custom-routes)| -|*Tutorial: Route network traffic* |[https://docs.microsoft.com/azure/virtual-network/tutorial-create-route-table-portal](../virtual-network/tutorial-create-route-table-portal.md)| -| - -### Border Gateway Protocol (BGP) - -BGP can be utilised by virtual network gateways to dynamically exchange routing information with on-premises or other external networks. BGP applies to a virtual network when configured through an ExpressRoute virtual network gateway over ExpressRoute private peering and when enabled on an Azure VPN Gateway. - -Individuals involved in the design or implementation of virtual networks and virtual network gateways in Azure should take time to understand the behaviour and configuration options available for BGP in Azure. - -|Resource|Link| -|---|---| -|*Routing Concepts: BGP* | [https://docs.microsoft.com/azure/virtual-network/virtual-networks-udr-overview#next-hop-types-across-azure-tools](../virtual-network/virtual-networks-udr-overview.md#next-hop-types-across-azure-tools)| -|*ExpressRoute routing requirements* | [https://docs.microsoft.com/azure/expressroute/expressroute-routing](../expressroute/expressroute-routing.md)| -|*About BGP with Azure VPN Gateway* |[https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-bgp-overview](../vpn-gateway/vpn-gateway-bgp-overview.md)| -|*Tutorial: Configure a site-to-site VPN over ExpressRoute Microsoft peering* |[https://docs.microsoft.com/azure/expressroute/site-to-site-vpn-over-microsoft-peering](../expressroute/site-to-site-vpn-over-microsoft-peering.md)| -| - -## Next hop types - -### Virtual Network - -Routes with a Next Hop of Virtual Network are added automatically as system routes, but can also be added to user-defined routes to direct traffic back to the virtual network in instances where the system route has been overridden. - -### VNet peering - -VNet peering enables communication between two disparate virtual networks. Configuring VNet peering must be enabled on each virtual network, but the virtual networks do not need to be in the same region, subscription or associated to the same Azure Active Directory (Azure AD) tenant. - -When configuring VNet peering, it is critical that individuals involved in the design or implementation of VNet peering understand the four associated configuration parameters and how they apply to each side of the peer: - -1. **Allow virtual network access:** Select **Enabled** (default) to enable communication between the two virtual networks. Enabling communication between virtual networks allows resources connected to either virtual network to communicate with each other with the same bandwidth and latency as if they were connected to the same virtual network. -2. **Allow forwarded traffic:** Check this box to allow traffic *forwarded* by a network - traffic that didn't originate from the virtual network - to flow to this virtual network through a peering. This setting is fundamental to implementing a hub and spoke network topology. -3. **Allow gateway transit:** Check this box to allow the peered virtual network to utilise the virtual network gateway attached to this virtual network. *Allow gateway transit* is enabled on the virtual network with the virtual network gateway resource, but only applies if *Use remote gateways* is enabled on the other virtual network. -4. **Use remote gateways:** Check this box to allow traffic from this virtual network to flow through a virtual network gateway attached to the virtual network being peered with. *Use remote gateways* is enabled on the virtual network without a virtual network gateway and only applies if the *Allow gateway transit* option is enabled on the other virtual network. - -|Resource|Link| -|---|---| -| Concepts: Virtual network peering | [https://docs.microsoft.com/azure/virtual-network/virtual-network-peering-overview](../virtual-network/virtual-network-peering-overview.md) | -| Create, change, or delete a virtual network peering | [https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-peering](../virtual-network/virtual-network-manage-peering.md)| -| - -### Virtual network gateway - -Virtual network gateways provide a mechanism for integrating virtual networks with external networks, such as on-premises environments, partner environments, and other cloud deployments. The two types of virtual network gateway are ExpressRoute and VPN. - -#### ExpressRoute Gateway - -ExpressRoute Gateways provide an egress point from the virtual network to an on-premises environment and should be deployed to meet security, availability, financial, and performance requirements. ExpressRoute Gateways provide a defined network bandwidth and incur usage costs after deployment. Virtual networks can have only one ExpressRoute Gateway, but this can be connected to multiple ExpressRoute circuits and can be leveraged by multiple virtual networks through VNet Peering, allowing bandwidth and connectivity to be shared. Care should be taken to configure routing between on-premises environments and virtual networks using ExpressRoute Gateways to ensure end to end connectivity using known, controlled network egress points. Commonwealth entities using ExpressRoute Gateway over ExpressRoute private peering must also deploy Network Virtual Appliances (NVA) to establish VPN connectivity to the on-premises environment for compliance with the ACSC consumer guidance. - -It is important to note that ExpressRoute Gateways have restrictions on the address ranges, communities, and other specific configuration items exchanged through BGP. - -| Resource | Link | -|---|---| -| ExpressRoute Gateway Overview | [https://docs.microsoft.com/azure/expressroute/expressroute-about-virtual-network-gateways](../expressroute/expressroute-about-virtual-network-gateways.md) | -| Configure a virtual network gateway for ExpressRoute | [https://docs.microsoft.com/azure/expressroute/expressroute-howto-add-gateway-portal-resource-manager](../expressroute/expressroute-howto-add-gateway-portal-resource-manager.md) -| - -#### VPN Gateway - -Azure VPN Gateway provides an egress network point from the virtual network to an external network for secure site-to-site connectivity. VPN Gateways provide a defined network bandwidth and incur usage costs after deployment. Commonwealth entities utilising VPN Gateway should ensure that it is configured in accordance with the ACSC consumer guidance. Virtual Networks can have only one VPN Gateway, but this can be configured with multiple tunnels and can be leveraged by multiple virtual networks through VNet Peering, allowing multiple virtual networks to share bandwidth and connectivity. VPN Gateways can be established over the Internet or over ExpressRoute through Microsoft Peering. - -| Resource | Link | -| --- | --- | -| VPN Gateway Overview| [https://docs.microsoft.com/azure/vpn-gateway](../vpn-gateway/index.yml)| -| Planning and design for VPN Gateway | [https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-plan-design](../vpn-gateway/vpn-gateway-about-vpngateways.md)| -| Azure VPN Gateway in Azure Australia | [Azure VPN Gateway in Azure Australia](vpn-gateway.md) -| - -### Next hop of virtual appliance - -The next hop of virtual appliance provides the ability to process network traffic outside the Azure networking and routing topology applied to virtual networks. Virtual appliances can apply security rules, translate addresses, establish VPNs, proxy traffic, or a range of other capabilities. The next hop of virtual appliance is applied through UDRs in a route table and can be used to direct traffic to an Azure Firewall, individual NVA, or Azure Load Balancer providing availability across multiple NVAs. To use a virtual appliance for routing, the associated network interfaces must be enabled for IP forwarding. - -| Resource | Link | -| --- | ---| -| Routing concepts: Custom Routes | [https://docs.microsoft.com/azure/virtual-network/virtual-networks-udr-overview#custom-routes](../virtual-network/virtual-networks-udr-overview.md#custom-routes) | -| Enable or Disable IP forwarding | [https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding](../virtual-network/virtual-network-network-interface.md#enable-or-disable-ip-forwarding) -| - -### Next hop of VirtualNetworkServiceEndpoint - -Routes with a next hop type of VirtualNetworkServiceEndpoint are only added when a service endpoint is configured on a subnet and cannot be manually configured through route tables. - -### Next hop of Internet - -The next hop Internet is used to reach any resources that use a Public IP address, which includes the Internet as well as PaaS and Azure Services in Azure Regions. The next hop Internet does not require a default route (0.0.0.0/0) covering all networks, but can be used to enable routing paths to specific public services. The next hop of Internet should be used for adding routes to authorised services and capabilities required for system functionality such as Microsoft management addresses. - -Examples of services that can be added using the next hop of Internet are: - -1. Key Management Services for Windows activation -2. App Service Environment management - -|Resource|Link| -|---|---| -| Outbound connections in Azure | [https://docs.microsoft.com/azure/load-balancer/load-balancer-outbound-connections](../load-balancer/load-balancer-outbound-connections.md) | -| Use Azure custom routes to enable KMS activation | [https://docs.microsoft.com/azure/virtual-machines/troubleshooting/custom-routes-enable-kms-activation](/troubleshoot/azure/virtual-machines/custom-routes-enable-kms-activation) | -| Locking down an App Service Environment | [https://docs.microsoft.com/azure/app-service/environment/firewall-integration](../app-service/environment/firewall-integration.md) | -| - -### Next hop of none - -The next hop of none can be used to prevent communication to a specific network. In contrast with an NSG, which controls whether the traffic is permitted or denied from traversing an available network path, using a next hop of none removes the network path completely. Care should be taken when creating routes with a next hop of none, especially when applying it to a default route of 0.0.0.0/0 as this can have unintended consequences and may make troubleshooting system issues complex and time consuming. - -## Security - -Implementing network segmentation and segregation controls on IaaS and PaaS capabilities is achieved through securing the capabilities themselves and by implementing controlled communication paths from the systems that will be communicating with the capability. - -Designing and building solutions in Azure is a process of creating a logical architecture to understand, control, and monitor network resources across the entire Azure presence. This logical architecture is software defined within the Azure platform and takes the place of a physical network topology that is implemented in traditional network environments. - -The logical architecture that is created must provide the functionality necessary for usability, but must also provide the visibility and control needed for security and integrity. - -Achieving this outcome is based on implementing the necessary network segmentation and segregation tools, but also in protecting and enforcing the network topology and the implementation of these tools. - -### Network Security Groups (NSGs) - -NSGs are used to specify the inbound and outbound traffic permitted for a subnet or a specific network interface. When configuring NSGs, commonwealth entities should use an approval list approach where rules are configured to permit the necessary traffic with a default rule configured to deny all traffic that does not match a specific permit statement. When planning and configuring NSGs, care must be taken to ensure that all necessary inbound and outbound traffic is captured appropriately. This includes identifying and understanding all private IP address ranges utilised within virtual networks and the on-premises environment, and specific Microsoft services such as Azure Load Balancer and PaaS management requirements. Individuals involved in the design and implementation of NSGs should also understand the use of Service Tags and Application Security Groups for creating fine-grained, service, and application-specific security rules. - -It is important to note that the default configuration for an NSG permits outbound traffic to all addresses within the virtual network and all public IP addresses. - -|Resource|Link| -|---|---| -|Network Security Overview | [https://docs.microsoft.com/azure/virtual-network/security-overview](../virtual-network/network-security-groups-overview.md)| -|Create, change, or delete a network security group | [https://docs.microsoft.com/azure/virtual-network/manage-network-security-group](../virtual-network/manage-network-security-group.md)| -| - -### Azure Firewall - -Azure Firewall can be utilised to build a hub and spoke network topology and enforce centralised network security controls. Azure Firewall can be used to meet the necessary requirements of the ISM for egress traffic by implementing an allowlisting approach where only the IP addresses, protocols, ports, and FQDNs required for system functionality are authorised. Commonwealth entities should take a risk-based approach to determine whether the security capabilities provided by Azure Firewall are sufficient for their requirements. For scenarios where additional security capabilities beyond those provided by Azure Firewall are required, commonwealth entities should consider implementing NVAs. - -|Resource|Link| -|---|---| -|*Azure Firewall Documentation* | [https://docs.microsoft.com/azure/firewall](../firewall/index.yml)| -|*Tutorial: Deploy and configure Azure Firewall in a hybrid network using Azure PowerShell* | [https://docs.microsoft.com/azure/firewall/tutorial-hybrid-ps](../firewall/tutorial-hybrid-ps.md)| -| - -### Network Virtual Appliances (NVAs) - -NVAs can be used to build a hub and spoke network topology, provide enhanced or complementary network capabilities or can be used as an alternative to Azure network mechanisms for familiarity and consistent support and management with on-premises network services. NVAs can be deployed to meet specific security requirements such as; scenarios where there is a requirement for identity awareness associated to network traffic, HTTPS decryption, content inspection, filtering, or other security capabilities. NVAs should be deployed in a highly available configuration and individuals involved in the design or implementation of NVAs should consult the appropriate vendor documentation for guidelines on deployment in Azure. - -|Resource|Link| -|---|---| -|*Deploy highly available network virtual appliances* | [https://docs.microsoft.com/azure/architecture/reference-architectures/dmz/nva-ha](/azure/architecture/reference-architectures/dmz/nva-ha)| -| - -### Service endpoint policies (Preview) - -Configure service endpoint policies based on availability of the service and a security risk assessment of the likelihood and impact of data exfiltration. Service endpoint policies should be considered for Azure Storage and managed on a case by case basis for other services based on the associated risk profile. - -| Resource | Link | -| --- | --- | -| *Service endpoint policies overview* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-service-endpoint-policies-overview](../virtual-network/virtual-network-service-endpoint-policies-overview.md) | -| *Create, change, or delete service endpoint policy using the Azure portal* | [https://docs.microsoft.com/azure/virtual-network/virtual-network-service-endpoint-policies-portal](../virtual-network/virtual-network-service-endpoint-policies-portal.md) -| - -### Azure Policy - -Azure Policy is a key component for enforcing and maintaining the integrity of the logical architecture of the Azure environment. There are a variety of services and egress network traffic paths available through Azure services. It is crucial that Commonwealth entities are aware of the resources that exist within their environment and the available network egress points. To ensure that unauthorised network egress points are not created in the Azure environment, Commonwealth entities should use Azure Policy to control the types of resources that can be deployed and the configuration of those resources. Practical examples include restricting resources to only those authorised and approved for use and requiring NSGs to be added to subnets. - -|Resource | Link| -|---|---| -|*Azure Policy Overview* | [https://docs.microsoft.com/azure/governance/policy/overview](../governance/policy/overview.md)| -|*Allowed Resource Types sample policy* | [https://docs.microsoft.com/azure/governance/policy/samples/allowed-resource-types](../governance/policy/samples/index.md)| -|*Force NSG on a subnet sample policy*| [https://docs.microsoft.com/azure/governance/policy/samples/nsg-on-subnet](../governance/policy/samples/index.md)| -| - -## PaaS egress capabilities - -PaaS capabilities provide opportunities for increased functionality and simplified management, but introduce complexities in addressing requirements for network segmentation and segregation. PaaS capabilities are typically configured with Public IP addresses and are accessible from the Internet. If you are using PaaS capabilities within your systems and solutions, care should be taken to identify the communication flows between components and create network security rules to only allow that communication. As part of a defence-in-depth approach to security, PaaS capabilities should be configured with encryption, authentication, and appropriate access controls and permissions. - -### Public IP for PaaS - -Public IP addresses for PaaS capabilities are allocated based on the region where the service is hosted or deployed. An understanding of Public IP address allocation and regions is required if you are going to build appropriate network security rules and routing topology for network segmentation and segregation covering Azure virtual networks, PaaS and ExpressRoute and Internet connectivity. Azure allocates IP addresses from a pool allocated to each Azure region. Microsoft makes the addresses used in each region available for download, which is updated in a regular and controlled manner. The services that are available in each region also frequently changes as new services are released or services are deployed more widely. Commonwealth entities should review these materials regularly and can use automation to maintain systems as required. Specific IP addresses for some services hosted in each region can be obtained by contacting Microsoft support. - -| Resource | Link | -| --- | --- | -| *Microsoft Azure Datacenter IP Ranges* | [https://www.microsoft.com/download/details.aspx?id=41653](https://www.microsoft.com/download/details.aspx?id=41653) | -| *Azure Services per region* | [https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast&products=all](https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast&products=all) | -| *Inbound and outbound IP addresses in Azure App Service* | [https://docs.microsoft.com/azure/app-service/overview-inbound-outbound-ips](../app-service/overview-inbound-outbound-ips.md) -| - -## Next steps - -Compare your overall architecture and design to the published [PROTECTED Blueprints for IaaS and PaaS Web Applications](https://aka.ms/au-protected). \ No newline at end of file diff --git a/articles/azure-australia/gateway-ingress-traffic.md b/articles/azure-australia/gateway-ingress-traffic.md deleted file mode 100644 index e57385fcbea28..0000000000000 --- a/articles/azure-australia/gateway-ingress-traffic.md +++ /dev/null @@ -1,371 +0,0 @@ ---- -title: Controlling ingress traffic in Azure Australia -description: A guide for controlling ingress traffic in Azure Australia to meet Australian Government requirements for Secure Internet Gateways -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Controlling ingress traffic in Azure Australia - -A core element of securing ICT systems is controlling network traffic. Traffic should be restricted to only that necessary for a system to functions to reduce the potential for compromise. - -This guide gives details about how inbound (ingress) network traffic works within Azure, and recommendations for implementing network security controls for an internet connected system. - -The network controls align with the Australian Cyber Security Centre (ACSC) Consumer Guidance and the intent of the ACSC's Information Security Manual (ISM). - -## Requirements - -The overall security requirements for Commonwealth systems are defined in the ISM. To assist Commonwealth entities in implementing network security, the ACSC has published [ACSC Protect: Implementing Network Segmentation and Segregation](https://www.acsc.gov.au/publications/protect/network_segmentation_segregation.htm), and to assist with securing systems in Cloud environments the ACSC has published [Cloud Computing Security for Tenants](https://www.cyber.gov.au/publications/cloud-computing-security-for-tenants). - -These guides outline the context for implementing network security and controlling traffic and include practical recommendations for network design and configuration. - -The Microsoft [Cloud Computing Security for Tenants of Microsoft Azure](https://aka.ms/au-irap) guide in the Australian page of the Service Trust Portal highlights specific Microsoft technologies that enable you to meet the advice in the ACSC publications. - -The following key requirements, identified in the publications from the ACSC, are important for controlling ingress traffic in Azure: - -|Description|Source| -|---|---| -|**Implement Network Segmentation and Segregation, for example, n-tier architecture, using host-based firewalls and CSP's network access controls to limit inbound and outbound VM network connectivity to only required ports/protocols.**| _Cloud Computing for Tenants_| -|**Implement adequately high bandwidth, low latency, reliable network connectivity** between the tenant (including the tenant's remote users) and the cloud service to meet the tenant's availability requirements | _Cloud Computing for Tenants_| -|**Apply technologies at more than just the network layer**. Each host and network should be segmented and segregated, where possible, at the lowest level that can be practically managed. In most cases, segmentation and segregation apply from the data link layer up to and including the application layer; however, in sensitive environments, physical isolation may be appropriate. Host-based and network-wide measures should be deployed in a complementary manner and be centrally monitored. Using a firewall or security appliance as the only security measure is not sufficient. |_ACSC Protect: Implementing Network Segmentation and Segregation_| -|**Use the principles of least privilege and need‐to‐know**. If a host, service or network doesn't need to communicate with another host, service, or network, it shouldn't be allowed to. If a host, service, or network only needs to talk to another host, service, or network using specific ports or protocols, then any other ports or protocols should be disabled. Adopting these principles across a network will complement the minimization of user privileges and significantly increase the overall security posture of the environment. |_ACSC Protect: Implementing Network Segmentation and Segregation_| -|**Separate hosts and networks based on their sensitivity or criticality to business operations**. Separation can be achieved by using different hardware or platforms depending on different security classifications, security domains, or availability/integrity requirements for certain hosts or networks. In particular, separate management networks and consider physically isolating out-of-band management networks for sensitive environments. |_ACSC Protect: Implementing Network Segmentation and Segregation_| -|**Identify, authenticate, and authorize access by all entities to all other entities**. All users, hosts, and services should have their access restricted to only the other users, hosts, and services required to do their designated duties or functions. All legacy or local services which bypass or downgrade the strength of identification, authentication, and authorization services should be disabled and their use should be closely monitored. |_ACSC Protect: Implementing Network Segmentation and Segregation_| -|**Implement allow listing of network traffic instead of deny listing**. Only permit access for known good network traffic (that is, that which is identified, authenticated, and authorized), rather than denying access to known bad network traffic (for example, blocking a specific address or service). Using an accepted senders list results in a superior security policy to a block list, and significantly improves an organization's capacity to detect and assess potential network intrusions. |_ACSC Protect: Implementing Network Segmentation and Segregation_| -| - -This article provides information and recommendations on how these requirements can be met for systems deployed in Azure using both Infrastructure as a Service (IaaS) and Platform as a Service (PaaS). You should also read the article on [Controlling egress traffic in Azure Australia](gateway-egress-traffic.md) to fully understand controlling network traffic within Azure. - -## Architecture - -If you are involved in the design or implementation of network security and ingress traffic controls, you must first understand how ingress network traffic works within Azure across both IaaS and PaaS. This section provides an overview of the possible entry points where network traffic could reach a resource hosted in Azure, and the security controls available to restrict and control that traffic. Each of these components is discussed in detail in the remaining sections of this guide. - -### Architecture components - -The architectural diagram shown here depicts the possible paths that network traffic can take to connect into a service hosted in Azure. These components are divided into Azure, IaaS Ingress, PaaS Ingress, and Security Control, depending on the function that they provide for ingress traffic. - -![Architecture](media/ingress-traffic.png) - -### Azure components - -|Component | Description| -|---|---| -|**DDoS Protection** | Distributed denial of service (DDoS) attacks attempt to exhaust an application's resources, making the application unavailable to legitimate users. DDoS attacks can be targeted at any endpoint that is publicly reachable through the internet. Azure includes DDoS Protection automatically through the Azure platform and provides additional mitigation capabilities that can be enabled for specific applications for more granular control.| -| **Traffic Manager** | Azure Traffic Manager is a Domain Name System (DNS) based traffic load balancer that can distribute traffic optimally to services across Azure regions, while providing high availability and responsiveness. Traffic Manager uses DNS to direct client requests to the most appropriate endpoint based on a traffic-routing method and the health of the endpoints.| -| **ExpressRoute** | ExpressRoute is a dedicated network connection for consuming Microsoft cloud services. It is provisioned through a connectivity provider and offers more reliability, faster speeds, lower latencies, and higher security than typical connections over the Internet. An ExpressRoute circuit represents the logical connection between the on-premises infrastructure and Microsoft cloud services through a connectivity provider.| -| **ExpressRoute Private Peering** | ExpressRoute Private Peering is a connection between the on-premises environment and private Azure virtual networks. Private Peering enables access to Azure services such as Virtual Machines, that are deployed within a virtual network. The resources and virtual networks accessed via private peering are considered an extension of an organization's core network. Private Peering provides bi-directional connectivity between the on-premises network and Azure virtual networks using private IP addresses.| -| **ExpressRoute Microsoft Peering** | ExpressRoute Microsoft Peering is a connection between the on-premises environment and Microsoft and Azure public services. This includes connectivity to Microsoft 365, Dynamics 365, and Azure PaaS services. Peering is established over public IP addresses that are owned by the organization or connectivity provider. No services are accessible via ExpressRoute Microsoft Peering by default and an organization must opt in to the services that are required. This process then provides connectivity to the same endpoints that are available on the Internet.| -| - -### IaaS ingress components - -|Component | Description| -|---|---| -|**Network Interface** | A network interface is a resource that exists in Azure. It is attached to a Virtual Machine and assigned a private, non-Internet routable IP address from the subnet that it is associated with. This IP address is dynamically or statically assigned through Azure Resource Manager.| -|**Subnet** | A subnet is an IP address range that is created within a VNet. Multiple subnets can be created within a VNet for network segmentation.| -| **Virtual Network (VNet)** | A VNet is a foundational resource within Azure that provides a platform and boundary for deploying resources and enabling communication. The VNet exists within an Azure Region and defines the IP Address Space and Routing boundaries for VNet integrated resources such as Virtual Machines.| -| **VNet Peering** | VNet Peering is an Azure configuration option that enables direct communication between two VNets without the need for a Virtual Network Gateway. Once peered, the two VNets can communicate directly and additional configuration can control the use of Virtual Network Gateways and other transit options.| -| **Public IP** | A Public IP is a resource that reserves one of the Microsoft owned Public, Internet-Routable IP Addresses from the specified region for use within the virtual network. It can be associated with a specific Network Interface, which enables the resource to be accessible from the Internet, ExpressRoute and PaaS systems.| -| **ExpressRoute Gateway** | An ExpressRoute Gateway is an object in a Virtual Network provides connectivity and routing from the Virtual Network to on-premises networks over Private Peering on an ExpressRoute Circuit.| -| **VPN Gateway** | A VPN Gateway is an object in a Virtual Network that provides an encrypted tunnel from a Virtual Network to an external network. The encrypted tunnel can be Site-to-Site for bi-directional communication with an on-premises environment, other virtual network, or cloud environment or Point-to-Site for communication with a single end point.| -| **PaaS VNet Integration** | Many PaaS capabilities can be deployed into, or integrated with, a Virtual Network. Some PaaS capabilities can be fully integrated with a VNet and be accessible via only private IP addresses. Others, such as Azure Load Balancer and Azure Application Gateway, can have an external interface with a public IP address and an internal interface with a private IP address inside the virtual network. In this instance, traffic can ingress into the Virtual Network via the PaaS capability.| -| - -### PaaS ingress components - -|Component | Description| -|---|---| -|**Hostname** | Azure PaaS capabilities are identified by a unique public hostname that is assigned when the resource is created. This hostname is then registered into a public DNS domain, where it can be resolved to a Public IP address.| -|**Public IP** | Unless deployed in a VNet integrated configuration, Azure PaaS capabilities are accessed via a Public, Internet-routable IP address. This address can be dedicated to the specific resources, such as a Public Load Balancer, or could be associated with a specific capability that has a shared entry point for multiple instances, such as Storage or SQL. This Public IP addressed can be accessed from the Internet, ExpressRoute or from IaaS public IP addresses through the Azure backbone network.| -|**Service endpoints** | Service endpoints provide a direct, private connection from a Virtual Network to a specific PaaS capability. Service endpoints, which are only available for a subset of PaaS capabilities, provide increased performance and security for resources in a VNet accessing PaaS.| -| - -### Security controls - -|Component | Description| -|---|---| -|**Network Security Groups (NSGs)** | NSGs control traffic into and out of virtual network resources in Azure. NSGs apply rules for the traffic flows that are permitted or denied, which includes traffic within Azure and between Azure and external networks such as on-premises or the Internet. NSGs are applied to subnets within a virtual network or to individual network interfaces.| -|**PaaS Firewall** | Many PaaS capabilities, such as Storage and SQL have an inbuilt Firewall for controlling ingress network traffic to the specific resource. Rules can be created to allow or deny connections from specific IP Addresses and/or Virtual Networks.| -|**PaaS Authentication and Access Control** | As part of a layered approach to security, PaaS capabilities provide multiple mechanisms for authenticating users and controlling privileges and access.| -|**Azure Policy** | Azure Policy is a service in Azure for creating, assigning, and managing policies. These policies use rules to control the types of resources that can be deployed and the configuration of those resources. Policies can be used to enforce compliance by preventing resources from being deployed if they do not meet requirements or can be used for monitoring to report on compliance status.| -| - -## General guidance - -To design and build secure solutions within Azure, it is critical to understand and control the network traffic so that only identified and authorized communication can occur. The intent of this guidance, and the specific component guidance in later sections, is to describe the tools and services that can be utilized to apply the principles outlined in the _ACSC Protect: Implementing Network Segmentation and Segregation_ across Azure workloads. This includes detailing how to create a virtual architecture for securing resources when it is not possible to apply the same traditional physical and network controls that are possible in an on-premises environment. - -### Specific focus areas - -* Limit the number of entry points to virtual networks -* Limit the number of Public IP addresses -* Consider utilizing a Hub and Spoke Network Design for Virtual Networks as discussed in the Microsoft Virtual Data Center (VDC) documentation -* Utilize products with inbuilt security capabilities for inbound connections from the Internet (for example, Application Gateway, API Gateway, Network Virtual Appliances) -* Restrict communication flows to PaaS capabilities to only those necessary for system functionality -* Deploy PaaS in a VNet integrated configuration for increased segregation and control -* Configure systems to use encryption mechanisms in line with the ACSC Consumer Guidance and ISM -* Use identity-based protections such as authentication and Azure role-based access control in addition to traditional network controls -* Implement ExpressRoute for connectivity with on-premises networks -* Implement VPNs for administrative traffic and integration with external networks -* Utilize Azure Policy to restrict the regions and resources to only those that are necessary for system functionality -* Utilize Azure Policy to enforce baseline security configuration for internet-accessible resources - -### Additional resources - -|Resource | Link| -|---|---| -|Australian Regulatory and Policy Compliance Documents including Consumer Guidance|[https://aka.ms/au-irap](https://aka.ms/au-irap)| -|Azure Virtual Data Center|[https://docs.microsoft.com/azure/architecture/vdc/networking-virtual-datacenter](/azure/architecture/vdc/networking-virtual-datacenter)| -|ACSC Network Segmentation|[https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm](https://acsc.gov.au/publications/protect/network_segmentation_segregation.htm)| -|ACSC Cloud Security for Tenants| [https://acsc.gov.au/publications/protect/cloud-security-tenants.htm](https://acsc.gov.au/publications/protect/cloud-security-tenants.htm)| -|ACSC Information Security Manual|[https://acsc.gov.au/infosec/ism/index.htm](https://acsc.gov.au/infosec/ism/index.htm)| - -## Component guidance - -This section provides further guidance on the individual components that are relevant to ingress traffic to systems deployed in Azure. Each section describes the intent of the specific component with links to documentation and configuration guides that can be used to assist with design and build activities. - -## Azure - -All communication to resources within Azure passes through the Microsoft maintained network infrastructure, which provides connectivity and security functionality. A range of protections are automatically put in place by Microsoft to protect the Azure platform and network infrastructure and additional capabilities are available as services within Azure to control network traffic and establish network segmentation and segregation. - -### DDoS Protection - -Internet accessible resources are susceptible to DDoS attacks. To protect against these attacks, Azure provides DDoS protections at a Basic and a Standard level. - -Basic is automatically enabled as part of the Azure platform including always-on traffic monitoring, and real-time mitigation of common network-level attacks, providing the same defenses utilized by Microsoft's online services. The entire scale of Azure's global network can be used to distribute and mitigate attack traffic across regions. Protection is provided for IPv4 and IPv6 Azure public IP addresses - -Standard provides additional mitigation capabilities over the Basic service tier that are tuned specifically to Azure Virtual Network resources. Protection policies are tuned through dedicated traffic monitoring and machine learning algorithms. Protection is provided for IPv4 Azure public IP addresses. - -|Resource|Link| -|---|---| -|Azure DDoS Protection Overview|[https://docs.microsoft.com/azure/virtual-network/ddos-protection-overview](../ddos-protection/ddos-protection-overview.md)| -|Azure DDoS Best Practices|[https://docs.microsoft.com/azure/ddos-protection/fundamental-best-practices](../ddos-protection/fundamental-best-practices.md)| -|Managing DDoS Protection|[https://docs.microsoft.com/azure/virtual-network/manage-ddos-protection](../ddos-protection/manage-ddos-protection.md)| -| - -### Traffic Manager - -Traffic Manager is used to manage ingress traffic by controlling which endpoints of an application receive connections. To protect against a loss of availability of systems or applications due to cyber security attack, or to recover services after a system compromise, Traffic Manager can be used to redirect traffic to functioning, available application instances. - -|Resource|Link| -|---|---| -|Traffic Manager Overview | [https://docs.microsoft.com/azure/traffic-manager/traffic-manager-overview](../traffic-manager/traffic-manager-overview.md)| -|Disaster recovery using Azure DNS and Traffic Manager Guide | [https://docs.microsoft.com/azure/networking/disaster-recovery-dns-traffic-manager](../networking/disaster-recovery-dns-traffic-manager.md)| -| - -### ExpressRoute - -ExpressRoute can be used to establish a private path from an on-premises environment to systems hosted in Azure. This connection can provide greater reliability and guaranteed performance with enhanced privacy for network communications. Express Route allows commonwealth entities to control inbound traffic from the on-premises environment and define dedicated addresses specific to the organization to use for inbound firewall rules and access control lists. - -|Resource | Link| -|---|---| -|ExpressRoute Overview | [https://docs.microsoft.com/azure/expressroute/](../expressroute/index.yml)| -|ExpressRoute Connectivity Models | [https://docs.microsoft.com/azure/expressroute/expressroute-connectivity-models](../expressroute/expressroute-connectivity-models.md)| -| - -### ExpressRoute Private Peering - -Private peering provides a mechanism for extending an on-premises environment into Azure using only private IP addresses. This enables commonwealth entities to integrate Azure Virtual Networks and address ranges with existing on-premises systems and services. Private Peering provides assurance that communication across ExpressRoute is only to Virtual Networks authorized by the organization. If you use Private Peering, Commonwealth entities must implement Network Virtual Appliances (NVA) instead of Azure VPN Gateway to establish the secure VPN communication to your on-premises networks as required by the ACSC consumer guidance. - -|Resource | Link| -|---|---| -|ExpressRoute Private Peering Overview | [https://docs.microsoft.com/azure/expressroute/expressroute-circuit-peerings#routingdomains](../expressroute/expressroute-circuit-peerings.md#routingdomains)| -|ExpressRoute Private Peering How-to Guide | [https://docs.microsoft.com/azure/expressroute/expressroute-howto-routing-portal-resource-manager#private](../expressroute/expressroute-howto-routing-portal-resource-manager.md#private)| -| - -### ExpressRoute Microsoft Peering - -Microsoft Peering provides a high-speed, low latency connection to Microsoft Public Services without needing to traverse the Internet. This provides greater reliability, performance, and privacy for connections. By using Route Filters, commonwealth entities can restrict communications to only the Azure Regions that they require, but this includes services hosted by other organizations and may necessitate additional filtering or inspection capabilities between the on-premises environment and Microsoft. - -Commonwealth entities can use the dedicated Public IP addresses established through the peering relationship to uniquely identify the on-premises environment for use in firewalls and access control lists within PaaS capabilities. - -As an alternative, commonwealth entities can use ExpressRoute Microsoft peering as an underlay network for establishing VPN connectivity through Azure VPN Gateway. In this model, there is no active communication from the internal on-premises network to Azure public services over ExpressRoute, but secure connectivity through to private Virtual Networks is achieved in compliance with the ACSC consumer guidance. - -|Resource | Link| -|---|---| -|ExpressRoute Microsoft Peering Overview | [https://docs.microsoft.com/azure/expressroute/expressroute-circuit-peerings#routingdomains](../expressroute/expressroute-circuit-peerings.md#routingdomains)| -|ExpressRoute Microsoft Peering How-to Guide | [https://docs.microsoft.com/azure/expressroute/expressroute-howto-routing-portal-resource-manager#msft](../expressroute/expressroute-howto-routing-portal-resource-manager.md#msft)| -| - -## IaaS ingress - -This section provides the component guidance for controlling Ingress traffic to IaaS components. IaaS includes Virtual Machines and other compute resources that can be deployed and managed within a Virtual Network in Azure. For traffic to arrive at systems deployed using IaaS it must have an entry point to the Virtual Network, which can be established through a Public IP address, Virtual Network Gateway or Virtual Network peering relationship. - -### Network interface - -Network interfaces are the ingress points for all traffic to a Virtual Machine. Network Interfaces enable the configuration of IP Addressing, and can be used to apply NSGs or for routing traffic through a Network Virtual Appliance. The Network Interfaces for Virtual Machines should be planned and configured appropriately to align with overall network segmentation and segregation objectives. - -|Resource | Link| -|---|---| -|Create, Change, or Delete a Network Interface | [https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface](../virtual-network/virtual-network-network-interface.md)| -|Network Interface IP Addressing | [https://docs.microsoft.com/azure/virtual-network/private-ip-addresses](../virtual-network/ip-services/private-ip-addresses.md)| -| - -### Subnet - -Subnets are a crucial component for network segmentation and segregation within Azure. Subnets can be used similarly to provide separation between systems. NSGs can be applied to subnets to restrict ingress communication flows to only those necessary for system functionality. Subnets can be used as both source and destination addresses for firewall rules and access-control lists and can be configured for service endpoints to provide connectivity to PaaS capabilities. - -|Resource | Link| -|---|---| -|Add, change, or delete a virtual network subnet | [https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet](../virtual-network/virtual-network-manage-subnet.md)| -| - -### Virtual Network (VNet) - -VNets are one of the fundamental building blocks for networking in Azure. Virtual Networks define an IP address space and routing boundary to be used across a variety of systems. Virtual Networks are divided into subnets and all subnets within a Virtual Network have a direct network route to each other. By using Virtual Network Gateways (ExpressRoute or VPN), systems within a Virtual Network can be made accessible to on-premises and external environments. Understanding Virtual Networks and the associated configuration parameters and routing is crucial in understanding and controlling ingress network traffic. - -|Resource | Link| -|---|---| -|Virtual Networks Overview | [https://docs.microsoft.com/azure/virtual-network/virtual-networks-overview](../virtual-network/virtual-networks-overview.md)| -|Plan Virtual Networks How-to Guide | [https://docs.microsoft.com/azure/virtual-network/virtual-network-vnet-plan-design-arm](../virtual-network/virtual-network-vnet-plan-design-arm.md)| -Create a Virtual Network Quickstart | [https://docs.microsoft.com/azure/virtual-network/quick-create-portal](../virtual-network/quick-create-portal.md)| -| - -### VNet Peering - -VNet Peering is used to provide a direct communication path between two Virtual Networks. Once peering is established, hosts in one Virtual Network have a high-speed routing path directly to hosts in another Virtual Network. NSGs still apply to the traffic as normal and advanced configuration parameters can be used to define whether communication through Virtual Network Gateways or from other external systems is permitted. - -|Resource | Link| -|---|---| -|Virtual Network Peering Overview | [https://docs.microsoft.com/azure/virtual-network/virtual-network-peering-overview](../virtual-network/virtual-network-peering-overview.md)| -|Create, change, or delete a virtual network peering | [https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-peering](../virtual-network/virtual-network-manage-peering.md)| -| - -### Public IP on VNET - -Public IP addresses are used to provide an ingress communication path to services deployed in a Virtual Network. Commonwealth entities should plan the allocation of Public IP addresses carefully and only assign them to resources where there is a genuine requirement. As a general design practice, Public IP addresses should be allocated to resources with inbuilt security capabilities such as Application Gateway or Network Virtual Appliances to provide a secure, controlled public entry point to a Virtual Network. - -|Resource | Link| -|---|---| -|Public IP Addresses Overview | [https://docs.microsoft.com/azure/virtual-network/virtual-network-ip-addresses-overview-arm#public-ip-addresses](../virtual-network/ip-services/public-ip-addresses.md#public-ip-addresses)| -|Create, change, or delete a public IP address | [https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address](../virtual-network/ip-services/virtual-network-public-ip-address.md)| -| - -### ExpressRoute Gateway - -ExpressRoute Gateways provide an ingress point from the on-premises environment and should be deployed to meet security, availability, financial, and performance requirements. ExpressRoute Gateways provide a defined network bandwidth and incur usage costs after deployment. Virtual Networks can have only one ExpressRoute Gateway, but this can be connected to multiple ExpressRoute circuits and can be leveraged by multiple Virtual Networks through VNet Peering, allowing multiple Virtual Networks to share bandwidth and connectivity. Care should be taken to configure routing between on-premises environments and Virtual Networks using ExpressRoute Gateways to ensure end to end connectivity using known, controlled network ingress points. Commonwealth entities using ExpressRoute Gateway must also deploy Network Virtual Appliances to establish VPN connectivity to the on-premises environment for compliance with the ACSC consumer guidance. - -|Resource | Link| -|---|---| -|ExpressRoute Gateway Overview | [https://docs.microsoft.com/azure/expressroute/expressroute-about-virtual-network-gateways](../expressroute/expressroute-about-virtual-network-gateways.md)| -|Configure a virtual network gateway for ExpressRoute | [https://docs.microsoft.com/azure/expressroute/expressroute-howto-add-gateway-portal-resource-manager](../expressroute/expressroute-howto-add-gateway-portal-resource-manager.md)| -| - -### VPN Gateway - -Azure VPN Gateway provides an ingress network point from an external network for secure site-to-site or point-to-site connections. VPN Gateways provide a defined network bandwidth and incur usage costs after deployment. Commonwealth entities utilizing VPN Gateway should ensure that it is configured in accordance with the ACSC consumer guidance. Virtual Networks can have only one VPN Gateway, but this can be configured with multiple tunnels and can be leveraged by multiple Virtual Networks through VNet Peering, allowing multiple Virtual Networks to share bandwidth and connectivity. VPN Gateways can be established over the Internet or over ExpressRoute through Microsoft Peering. - -|Resource | Link| -|---|---| -|VPN Gateway Overview | [https://docs.microsoft.com/azure/vpn-gateway/](../vpn-gateway/index.yml)| -|Planning and design for VPN Gateway | [https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-plan-design](../vpn-gateway/vpn-gateway-about-vpngateways.md)| -|VPN Gateway configuration for Australian Government agencies|[IPSEC configuration required for Australian Government agencies](vpn-gateway.md)| -| - -### PaaS VNet integration - -Leveraging PaaS can provide enhanced functionality and availability and reduce management overhead but must be secured appropriately. To increase control, enforce network segmentation, or to provide a secure ingress entry point for applications and services, many PaaS capabilities can be integrated with a Virtual Network. - -To provide a secure entry point, PaaS capabilities such as Application Gateway can be configured with an external, public facing interface and an internal, private interface for communicating with application services. This prevents the need to configure application servers with Public IP addresses and expose them to external networks. - -To use PaaS as an integrated part of system or application architecture, Microsoft provides multiple mechanisms to deploy PaaS into a Virtual Network. The deployment methodology restricts the inbound access from external networks such as the Internet while providing connectivity and integration with internal systems and applications. Examples include App Service Environments, SQL Managed Instance, and more. - -|Resource | Link| -|---|---| -|Virtual network integration for Azure services | [https://docs.microsoft.com/azure/virtual-network/virtual-network-for-azure-services](../virtual-network/virtual-network-for-azure-services.md)| -|Integrate your app with an Azure Virtual Network How-to guide | [https://docs.microsoft.com/azure/app-service/web-sites-integrate-with-vnet](../app-service/overview-vnet-integration.md)| -| - -## PaaS ingress - -PaaS capabilities provide opportunities for increased capability and simplified management, but introduce complexities in addressing requirements for network segmentation and segregation. PaaS capabilities are typically configured with Public IP addresses and are accessible from the Internet. When building systems using PaaS capabilities, care should be taken to identify all the necessary communication flows between components within the system and network security rules created to allow only this communication. As part of a defence-in-depth approach to security, PaaS capabilities should be configured with encryption, authentication, and appropriate access controls and permissions. - -### Hostname - -PaaS capabilities are uniquely identified by hostnames to allow multiple instances of the same service to be hosted on the same Public IP address. Unique hostnames are specified when resources are created and exist within Microsoft owned DNS domains. The specific hostnames for authorized services can be used within security tools with application level filtering capabilities. Certain services can also be configured with custom domains as required. - -|Resource | Link| -|---|---| -|Many public namespaces used by Azure services can be obtained through PowerShell by running the Get-AzureRMEnvironment command | [https://docs.microsoft.com/powershell/module/azurerm.profile/get-azurermenvironment](/powershell/module/azurerm.profile/get-azurermenvironment)| -|Configuring a custom domain name for an Azure cloud service | App Services and others can have custom domains [https://docs.microsoft.com/azure/cloud-services/cloud-services-custom-domain-name-portal](../cloud-services/cloud-services-custom-domain-name-portal.md)| -| - -### Public IP for PaaS - -Public IP addresses for PaaS capabilities are allocated based on the region where the service is hosted or deployed. To build appropriate network security rules and routing topology for network segmentation and segregation covering Azure Virtual Networks, PaaS and ExpressRoute and Internet connectivity, an understanding of Public IP address allocation and regions is required. Azure allocates IP addresses from a pool allocated to each Azure region. Microsoft makes the addresses used in each region available for download, which is updated in a regular and controlled manner. The services that are available in each region also frequently changes as new services are released or services are deployed more widely. Commonwealth entities should review these materials regularly and can leverage automation to maintain systems as required. Specific IP addresses for some services hosted in each region can be obtained by contacting Microsoft support. - -|Resource | Link| -|---|---| -|Microsoft Azure Datacenter IP Ranges | [https://www.microsoft.com/download/details.aspx?id=41653](https://www.microsoft.com/download/details.aspx?id=41653)| -|Azure Services per region | [https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast&products=all](https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast&products=all)| -| - -### Service endpoints - -Virtual Network Service endpoints provide a high-speed, private ingress network connection for subnets within a Virtual Network to consume specific PaaS capabilities. For complete network segmentation and segregation of the PaaS capability, the PaaS capability must be configured to accept connections only from the necessary virtual networks. Not all PaaS Capabilities support a combination of Firewall rules that includes service endpoints and traditional IP address-based rules, so care should be taken to understand the flow of communications required for application functionality and administration so that the implementation of these security controls does not impact service availability. - -|Resource | Link| -|---|---| -|Service endpoints overview | [https://docs.microsoft.com/azure/virtual-network/virtual-network-service-endpoints-overview](../virtual-network/virtual-network-service-endpoints-overview.md) -|Tutorial |[https://docs.microsoft.com/azure/virtual-network/tutorial-restrict-network-access-to-resources](../virtual-network/tutorial-restrict-network-access-to-resources.md)| -| - -## Security - -Implementing network segmentation and segregation controls on IaaS and PaaS capabilities is achieved through securing the capabilities themselves and by implementing controlled communication paths from the systems that will be communicating with the capability. - -Designing and building solutions in Azure is a process of creating a logical architecture to understand, control, and monitor network resources across the entire Azure presence. This logical architecture is software defined within the Azure platform and takes the place of a physical network topology that is implemented in traditional network environments. - -The logical architecture that is created must provide the functionality necessary for usability, but must also provide the visibility and control needed for security and integrity. - -Achieving this outcome is based on implementing the necessary network segmentation and segregation tools, but also in protecting and enforcing the network topology and the implementation of these tools. - -The information provided in this guide can be used to help identify the sources of ingress traffic that need to be permitted and the ways that the traffic can be further controlled or constrained. - -### Network Security Groups (NSGs) - -NSGs are used to specify the inbound and outbound traffic permitted for a subnet or a specific network interface. When configuring NSGs, commonwealth entities should use a approval list approach where rules are configured to permit the necessary traffic with a default rule configured to deny all traffic that does not match a specific permit statement. Care must be taken when planning and configuring NSGs to ensure that all necessary inbound and outbound traffic is captured appropriately. This includes identifying and understanding all private IP address ranges utilized within Azure Virtual Networks and the on-premises environment, and specific Microsoft services such as Azure Load Balancer and PaaS management requirements. Individuals involved in the design and implementation of Network Security Groups should also understand the use of Service Tags and Application Security Groups for creating fine-grained, service, and application-specific security rules. - -|Resource | Link| -|---|---| -|Network Security Overview | [https://docs.microsoft.com/azure/virtual-network/security-overview](../virtual-network/network-security-groups-overview.md) -|Create, change, or delete a network security group | [https://docs.microsoft.com/azure/virtual-network/manage-network-security-group](../virtual-network/manage-network-security-group.md)| -| - -## PaaS firewall - -A PaaS firewall is a network access control capability that can be applied to certain PaaS services. It allows IP address filtering or filtering from specific virtual networks to be configured to restrict ingress traffic to the specific PaaS instance. For PaaS capabilities that include a Firewall, network access control policies should be configured to permit only the necessary ingress traffic based on application requirements. - -|Resource | Link| -|---|---| -|Azure SQL Database and Azure Synapse Analytics IP firewall rules | [https://docs.microsoft.com/azure/sql-database/sql-database-firewall-configure](/azure/azure-sql/database/firewall-configure)| -|Storage Network Security | [https://docs.microsoft.com/azure/storage/common/storage-network-security](../storage/common/storage-network-security.md)| -| - -## PaaS authentication and access control - -Depending on the PaaS capability and its purpose, using network controls to restrict access may not be possible or practical. As part of the layered security model for PaaS, Azure provides a variety of authentication and access control mechanisms to restrict access to a service, even if network traffic is allowed. Typical authentication mechanisms for PaaS capabilities include Azure Active Directory, Application level authentication, and Shared Keys or access signatures. Once a user is securely identified, roles can be utilized to control the actions that the user can perform. These tools can be utilized as an alternative or as a complimentary measure to restrict access into services. - -|Resource | Link| -|---|---| -|Controlling and granting database access to SQL Database and Azure Synapse Analytics | [https://docs.microsoft.com/azure/sql-database/sql-database-manage-logins](/azure/azure-sql/database/logins-create-manage)| -|Authorization for the Azure Storage Services | [https://docs.microsoft.com/rest/api/storageservices/authorization-for-the-Azure-Storage-Services](/rest/api/storageservices/authorization-for-the-Azure-Storage-Services)| -| - -## Azure Policy - -Azure Policy is a key component for enforcing and maintaining the integrity of the logical architecture of the Azure environment. Given the variety of services and ingress network traffic paths available through Azure services, it is crucial that Commonwealth entities are aware of the resources that exist within their environment and the available network ingress points. To ensure that unauthorized network ingress points are not created in the Azure environment, Commonwealth entities should leverage Azure Policy to control the types of resources that can be deployed and the configuration of those resources. Practical examples include restricting resources to only those authorized and approved for use, enforcing HTTPS encryption on Storage and requiring NSGs to be added to subnets. - -|Resource | Link| -|---|---| -|Azure Policy Overview | [https://docs.microsoft.com/azure/governance/policy/overview](../governance/policy/overview.md)| -|Allowed Resource Types sample policy | [https://docs.microsoft.com/azure/governance/policy/samples/allowed-resource-types](../governance/policy/samples/index.md) -|Ensure HTTPS Storage Account sample policy|[https://docs.microsoft.com/azure/governance/policy/samples/ensure-https-storage-account](../governance/policy/samples/index.md)_ -|Force NSG on a subnet sample policy| [https://docs.microsoft.com/azure/governance/policy/samples/nsg-on-subnet](../governance/policy/samples/index.md)| -| - -## Next steps - -Review the article on [Gateway Egress Traffic Management and Control](gateway-egress-traffic.md) for details on managing traffic flows from your Azure environment to other networks using your Gateway components in Azure. \ No newline at end of file diff --git a/articles/azure-australia/gateway-log-audit-visibility.md b/articles/azure-australia/gateway-log-audit-visibility.md deleted file mode 100644 index ab0e6d97f96fe..0000000000000 --- a/articles/azure-australia/gateway-log-audit-visibility.md +++ /dev/null @@ -1,409 +0,0 @@ ---- -title: Gateway logging, auditing, and visibility in Azure Australia -description: How to configure Logging, Auditing, and Visibility within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Gateway logging, auditing, and visibility in Azure Australia - -Detecting and responding to cyber security threats relies on generating, collecting and analyzing data related to the operation of a system. - -Microsoft has built-in tools in Azure to help you implement logging, auditing, and visibility to manage the security of your systems deployed in Azure. There is also a reference architecture that aligns with the Australian Cyber Security Centre (ACSC) Consumer Guidance and the intent of the Information Security Manual (ISM). - -Gateways act as information flow control mechanisms at the network layer and may also control information at the higher layers of the Open System Interconnect (OSI) model. Gateways are necessary to control data flows between security domains and prevent unauthorised access from external networks. Given the criticality of gateways in controlling the flow of information between security domains, any failure, particularly at higher classifications, may have serious consequences. As such, robust mechanisms for alerting personnel to situations that may cause cyber security incidents are especially important for gateways. - -Implementing logging and alerting capabilities for gateways can assist in detecting cyber security incidents, attempted intrusions, and unusual usage patterns. In addition, storing event logs on a separate secure log server increases the difficulty for an adversary to delete logging information in order to destroy evidence of a targeted cyber intrusion. - -## Australian Cyber Security Centre (ACSC) requirements - -The overall security requirements for Commonwealth systems are defined in the ACSC Information Security Manual (ISM). To assist Commonwealth entities to meet these requirements within Azure, the *ACSC CONSUMER GUIDE – Microsoft Azure at PROTECTED* and *ACSC CERTIFICATION REPORT – Microsoft Azure* publications detail the following specific requirements related to Logging, Auditing, and Visibility: - -1. To mitigate the risks arising from using shared underlying cloud resources, Commonwealth entities must opt in to Microsoft Azure provided capabilities including Azure Security Centre, Azure Monitor, Azure Policy, and Azure Advisor to assist entities to perform real-time monitoring of their Azure workloads - -2. The ACSC also recommends that Commonwealth entities forward all mandated security logs to the ACSC for whole of Australian Government monitoring - -3. To assist in risk mitigation, Commonwealth entities should configure within their Azure subscriptions: - - * Enable Azure Security Centre - * Upgrade to the Standard Tier - * Enable Automatic Provisioning of the Microsoft Monitoring Agent to supported Azure VMs - * Regularly review, prioritise, and mitigate the security recommendations and alerts on the Security Centre dashboard - -4. Government entities must enable log and event forwarding from their Azure subscription to the ACSC to provide the ACSC with visibility of non-compliance with this guidance. Azure Event Hubs provides the capability to perform external log streaming to the ACSC or on-premises systems owned by the Commonwealth entity - -5. Commonwealth entities should align the logging they enable within Azure to the requirements specified in the ISM - -6. Microsoft keeps logs within Azure for 90 days. Customer entities must implement a log archival regime to ensure logs can be kept for the seven years required under the NAA AFDA - -7. Commonwealth entities that have on-premises or Azure-based Security Information and Event Management (SIEM) capabilities can also forward logs to those systems - -8. Commonwealth entities should implement Network Watcher flow logs for Network Security Groups (NSGs) and Virtual Machines. These logs should be stored in a dedicated storage account containing only security logs, and access to the storage account should be secured with Azure role-based access control (Azure RBAC) - -9. Commonwealth entities must implement ACSC Consumer Guidance to ensure Azure workloads meet the intent of the ISM for logging and monitoring. Commonwealth entities must also opt in to Azure capabilities that assist the ACSC to receive real-time monitoring, alerting, and logs associated with Australian Government usage of Azure - -## Architecture - -To confidently understand the network traffic entering and leaving your Azure environment, the necessary logging must be enabled on the right set of components. Doing this ensures complete visibility of the environment and provides the necessary data to do analysis. - -![Azure Monitoring Architecture](media/visibility.png) - -## Components - -The architecture shown above is made up of discrete components that provide the function of either Log Sources, Log Collection, Log Retention, Log Analysis or Incident Response. This architecture includes individual components that are typically involved in internet accessible Azure deployments. - -|Functions|Components| -|---|---| -|Log Sources|
                  • Application Gateway
                  • VPN Gateway
                  • Azure Firewall
                  • Network Virtual Appliances
                  • Azure Load Balancer
                  • Virtual Machines
                  • Domain Naming System (DNS) Servers
                  • Syslog and/or Log Collection Servers
                  • NSGs
                  • Azure Activity Log
                  • Azure Diagnostic Log
                  • Azure Policy
                  | -|Log Collection|
                  • Event Hubs
                  • Network Watcher
                  • Log Analytics
                  | -|Log Retention|
                  • Azure Storage
                  | -|Log Analysis|
                  • Microsoft Defender for Cloud
                  • Azure Advisor
                  • Log Analytics Solutions
                    • Traffic Analytics
                    • DNS Analytics (Preview)
                    • Activity Log Analytics
                  • SIEM
                  • ACSC
                  | -|Incident Response|
                  • Azure Alerts
                  • Azure Automation
                  | -| - -The architecture works by first generating logs from the necessary sources and then collecting them into centralised repositories. Once you've collected the logs, they can be: - -* used by Azure analysis services to get insight, -* get forwarded to external systems, or -* get archived to storage for long-term retention. - -To respond to key events or incidents identified by analysis tools, alerts can be configured, and automation developed to take necessary actions for proactive management and response. - -## General guidance - -When implementing the components listed in this article, the following general guidance applies: - -* Validate the region availability of services, ensuring that all data remains within authorised locations and deploy to AU Central or AU Central 2 as the first preference for PROTECTED workloads - -* Refer to the *Azure - ACSC Certification Report – Protected 2018* publication for the certification status of individual services and perform self-assessments on any relevant components not included in the report as per the *ACSC CONSUMER GUIDE – Microsoft Azure at PROTECTED* - -* For components not referenced in this article, Commonwealth entities should follow the principles included about generating, capturing, analysing, and keeping logs - -* Identify and prioritise the logging, auditing, and visibility on high value systems as well as all network ingress and egress points to systems hosted in Azure - -* Consolidate logs and minimise the number of instances of logging tools such as storage accounts, Log Analytics workspaces and Event Hubs - -* Restrict administrative privileges through Azure role-based access control (Azure RBAC) - -* Use Multi-Factor Authentication (MFA) for accounts administering or configuring resources in Azure - -* When centralising log collection across multiple subscriptions, ensure that administrators have the necessary privileges in each subscription - -* Ensure network connectivity and any necessary proxy configuration for Virtual Machines, including Network Virtual Appliances (NVAs), Log Collection Servers and DNS Servers, to connect to necessary Azure services such as the Log Analytics workspaces, Event Hubs, and Storage - -* Configure the Microsoft Monitoring Agent (MMA) to utilise TLS version 1.2 - -* Use Azure Policy to monitor and enforce compliance with requirements - -* Enforce encryption on all data repositories such as Storage and Databases - -* Use Locally redundant storage (LRS) and snapshots for availability of Storage Accounts and associated data - -* Consider Geo-redundant storage (GRS) or off-site storage to align with Disaster Recovery strategies - -|Resource|URL| -|---|---| -|Australian Regulatory and Policy Compliance Documents|[https://aka.ms/au-irap](https://aka.ms/au-irap)| -|Azure products - Australian regions and non-regional|[https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast](https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast)| -|Microsoft Azure Security and Audit Log Management Whitepaper|[https://download.microsoft.com/download/B/6/C/B6C0A98B-D34A-417C-826E-3EA28CDFC9DD/AzureSecurityandAuditLogManagement_11132014.pdf](https://download.microsoft.com/download/B/6/C/B6C0A98B-D34A-417C-826E-3EA28CDFC9DD/AzureSecurityandAuditLogManagement_11132014.pdf)| -|Microsoft Monitoring Agent Configuration|[https://docs.microsoft.com/azure/azure-monitor/platform/log-analytics-agent](../azure-monitor/agents/log-analytics-agent.md)| -| - -## Component guidance - -This section provides information on the purpose of each component and its role in the overall logging, auditing, and visibility architecture. Additional links are provided to access useful resources such as reference documentation, guides, and tutorials. - -## Log sources - -Before any analysis, alerting or reporting can be completed, the necessary logs must be generated. Azure logs are categorized into control/management logs, data plane logs, and processed events. - -|Type|Description| -|---|---| -|Control/management logs|Provide information about Azure Resource Manager operations| -|Data plane logs|Provide information about events raised as part of Azure resource usage, such as logs in a Virtual Machine and the diagnostics logs available through Azure Monitor| -|Processed events|Provide information about analysed events/alerts that have been processed by Azure, such as where Microsoft Defender for Cloud has processed and analysed subscriptions to provide security alerts| -| - -### Application Gateway - -Azure Application Gateway is one of the possible entry points into an Azure environment so you need to capture information related to incoming connections communicating with web applications. Application Gateway can provide crucial information relating to web application usage as well as assisting in detecting cyber security incidents. Application Gateway sends metadata to the Activity Log and Diagnostic Logs in Azure Monitor where it can be utilised in Log Analytics or distributed to an Event Hub or Storage Account. - -|Resources|Link| -|---|---| -|Application Gateway Documentation|[https://docs.microsoft.com/azure/application-gateway/](../application-gateway/index.yml)| -|Application Gateway quickstart Guide|[https://docs.microsoft.com/azure/application-gateway/quick-create-portal](../application-gateway/quick-create-portal.md)| -| - -### VPN Gateway - -The VPN Gateway is a potential entry point for a wide range of communications into the Azure environment, such as the connection to an on-premises environment and administrative traffic. Logging on VPN Gateways provides insight and traceability for the connections made to the Azure environment. Logging can provide auditing and analysis as well as assist in the detection or investigation of malicious or anomalous connections. VPN Gateway logs are sent to the Azure Monitor Activity Log where they can be utilised in Log Analytics or distributed to an Event Hub or Storage Account. - -|Resources|Link| -|---|---| -|VPN Gateway Documentation|[https://docs.microsoft.com/azure/vpn-gateway/](../vpn-gateway/index.yml)| -|Australian Government specific VPN Gateway guidance|[Azure VPN Gateway configuration](vpn-gateway.md)| -| - -### Azure Firewall - -Azure Firewall provides a controlled exit point from an Azure environment and the logs generated, which include information on attempted and successful outbound connections, are an important element in your logging strategy. These logs can validate that systems are operating as designed, as well as assist in detecting malicious code or actors attempting to connect to unauthorised external systems. Azure Firewall writes logs to the Activity Log and Diagnostic Logs in Azure Monitor where it can be used in Log Analytics, or distributed to an Event Hub or Storage Account. - -|Resources|Link| -|---|---| -|Azure Firewall Documentation|[https://docs.microsoft.com/azure/firewall/](../firewall/index.yml)| -|Tutorial: Monitor Azure Firewall logs and metrics|[https://docs.microsoft.com/azure/firewall/tutorial-diagnostics](../firewall/firewall-diagnostics.md)| -| - -### Network Virtual Appliances (NVA) - -NVAs can be used to complement the security capabilities available natively in Azure. The logs generated on NVAs can be valuable resources in detecting cyber security incidents and are a key part of an overall logging, auditing, and visibility strategy. To capture logs from NVAs, utilise the Microsoft Monitoring Agent (MMA). For NVAs that don't support the installation of the MMA, consider using a Syslog or other log collection server to relay logs. - -|Resources|Link| -|---|---| -|Overview of Network Virtual Appliances|[https://azure.microsoft.com/solutions/network-appliances](https://azure.microsoft.com/solutions/network-appliances)| -|NVA Documentation|Refer to the vendor documentation on the implementation of the relevant NVA in Azure| -| - -### Azure Load Balancer - -Azure Load Balancer logs are used to obtain useful information about the connections and usage related to systems deployed in Azure. This can be used for health and availability monitoring, but also forms another key component in gaining the necessary insight into communications traffic and detecting malicious or anomalous traffic patterns. Azure Load Balancer logs to the Activity Log and Diagnostic Logs in Azure Monitor where it can be utilised in Log Analytics or distributed to an Event Hub or Storage Account. - -|Resources|Link| -|---|---| -|Azure Load Balancer Documentation|[https://docs.microsoft.com/azure/load-balancer](../load-balancer/index.yml)| -|Metrics and health diagnostics for Standard Load Balancer|[https://docs.microsoft.com/azure/load-balancer/load-balancer-standard-diagnostics](../load-balancer/load-balancer-standard-diagnostics.md)| -| - -### Virtual machines - -Virtual machines are endpoints that send and receive network communications, process data, and provide services. As Virtual machines can host data or crucial system services, ensuring that they're operating correctly and detecting cyber security incidents can be critical. Virtual machines collect various event and audit logs that can track the operation of the system and the actions done on that system. Logs collected on Virtual Machines can be forwarded to a Log Analytics workspace using the Log Analytics agent where they can be analyzed by Microsoft Defender for Cloud. Virtual machines can also integrate directly with Azure Event Hubs or with a SIEM solution, either directly or through a log collection server. - -|Resources|Link| -|---|---| -|Virtual Machines|[https://docs.microsoft.com/azure/virtual-machines](../virtual-machines/index.yml)| -|Collect Data from Virtual Machines|[https://docs.microsoft.com/azure/log-analytics/log-analytics-quick-collect-azurevm](../azure-monitor/vm/monitor-virtual-machine.md)| -|Stream Virtual Machine Logs to Event Hubs|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/azure-diagnostics-streaming-event-hubs](../azure-monitor/agents/diagnostics-extension-stream-event-hubs.md)| -| - -### Domain Name Services (DNS) servers - -DNS Server logs provide key information related to the services that systems are trying to access, either internally or externally. Capturing DNS logs can help identify a cyber security incident and provide insight into the type of incident, and the systems that may be affected. The Microsoft Management Agent (MMA) can be used on DNS Servers to forward the logs through to Log Analytics for use in DNS Analytics (Preview). - -|Resources|Link| -|---|---| -|Azure Name Resolution for Virtual Networks|[https://docs.microsoft.com/azure/virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md)| -| - -### Syslog and log collection servers - -To receive logs from Network Virtual Appliances, or custom security logs from other systems for use within a SIEM, dedicated servers can be deployed within Azure VNets. Syslog logs can be collected on a Syslog server and relayed to Log Analytics for analysis. A Log Collection Server is a generic term for any log aggregation and distribution capability used by centralised monitoring systems or SIEMs. These can be used to simplify network architecture and security and to filter and aggregate logs before being distributed to the centralised capability. - -|Resources|Link| -|---|---| -|Syslog data sources in Log Analytics|[https://docs.microsoft.com/azure/azure-monitor/platform/data-sources-syslog](../azure-monitor/agents/data-sources-syslog.md)| -|Log Collection Server|Refer to vendor documentation for details on monitoring and SIEM architecture| -| - -### Network Security Groups (NSGs) - -NSGs control traffic into and out of virtual networks in Azure. NSGs apply rules for the traffic flows that are permitted or denied, which includes traffic within Azure and between Azure and external networks such as on-premises or the Internet. NSGs are applied to subnets within a virtual network or to individual network interfaces. To capture information on the traffic entering and leaving systems in Azure, NSG logs can be enabled through the Network Watcher NSG Flow Logs feature. These logs are used to form a baseline for the standard operation of a system and are the data source for Traffic Analytics, which provides detailed insights into the traffic patterns of systems hosted in Azure. - -|Resources|Link| -|---|---| -|Network Security Group Documentation|[https://docs.microsoft.com/azure/virtual-network/security-overview](../virtual-network/network-security-groups-overview.md)| -|Introduction to flow logging for network security groups|[https://docs.microsoft.com/azure/network-watcher/network-watcher-nsg-flow-logging-overview](../network-watcher/network-watcher-nsg-flow-logging-overview.md)| -|Tutorial: Log network traffic to and from a Virtual Machine using the Azure portal|[https://docs.microsoft.com/azure/network-watcher/network-watcher-nsg-flow-logging-portal](../network-watcher/network-watcher-nsg-flow-logging-portal.md)| -| - -### Azure Activity Log - -Azure Activity Log, which is part of Azure Monitor, is a subscription log that provides insight into subscription-level events that have occurred in Azure. The Activity Log can help determine the 'what, who, and when' for any write operations (PUT, POST, DELETE) taken ***on*** the resources in a subscription. The Activity Log is crucial for tracking the configuration changes made within the Azure environment. Azure Activity Logs are automatically available for use in Log Analytics solutions and can be sent to Event Hubs or Azure Storage for processing or retention. - -|Resources|Link| -|---|---| -|Azure Activity Log Documentation|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-overview-activity-logs](../azure-monitor/essentials/platform-logs-overview.md)| -|Stream the Azure Activity Log to Event Hubs|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-stream-activity-logs-event-hubs](../azure-monitor/essentials/activity-log.md#legacy-collection-methods)| -| - -### Azure Diagnostic Log - -Azure Monitor diagnostic logs are logs emitted by an Azure service that provide rich, frequent data about the operation of that service. Diagnostic logs provide insight into the operation of a resource at a detailed level and can be used for a range of requirements such as auditing or troubleshooting. Azure Diagnostic Logs are automatically available for use in Log Analytics solutions and can be sent to Event Hubs or Azure Storage for processing or retention. - -|Resources|Link| -|---|---| -|Azure Diagnostic Log Documentation|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-overview-of-diagnostic-logs](../azure-monitor/essentials/platform-logs-overview.md)| -|Support services for Diagnostic Logs|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-diagnostic-logs-schema](../azure-monitor/essentials/resource-logs-schema.md)| -| - -### Azure Policy - -Azure Policy enforces rules on how resources can be deployed, such as the type, location, and configuration. Azure Policy can be configured to ensure resources can only be deployed if they're compliant with requirements. Azure Policy is a core component to maintaining the integrity of an Azure environment. Events related to Azure Policy are logged to the Azure Activity Log and are automatically available for use in Log Analytics solutions or can be sent to Event Hubs or Azure Storage for processing or retention. - -|Resources|Link| -|---|---| -|Azure Policy Documentation|[https://docs.microsoft.com/azure/governance/policy](../governance/policy/index.yml)| -|Leveraging Azure Policy and Resource Manager templates using Azure Blueprints|[https://docs.microsoft.com/azure/governance/blueprints/overview](../governance/blueprints/overview.md)| -| - -## Log collection - -Once generated from the multiple log sources, logs need to be stored in a centralised location for ongoing access and analysis. Azure provides multiple methods and options for Log Collection that can be utilised depending on the log type and requirements. - -### Event Hubs - -The purpose of an Event Hub is to aggregate the log data for the various sources for distribution. From the Event Hub, the log data can be sent on to a SIEM, to the ACSC for compliance and to Storage for long-term retention. - -|Resources|Link| -|---|---| -|Event Hubs Documentation|[https://docs.microsoft.com/azure/event-hubs](../event-hubs/index.yml)| -|Guidance on Event Hubs and External Tools|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitor-stream-monitoring-data-event-hubs](../azure-monitor/essentials/stream-monitoring-data-event-hubs.md)| -| - -### Log Analytics - -Log Analytics is part of Azure Monitor and is used for log analysis. Log Analytics uses a workspace as the storage mechanism where log data can be made available for a variety of analysis tools and solutions available within Azure. Log Analytics integrates with a wide range of Azure components directly, as well as Virtual Machines through the Microsoft Monitoring Agent. - -|Resources|Link| -|---|---| -|Log Analytics Documentation|[https://docs.microsoft.com/azure/azure-monitor](../azure-monitor/index.yml)| -|Tutorial: Analyze Data in Log Analytics|[https://docs.microsoft.com/azure/azure-monitor/learn/tutorial-viewdata](../azure-monitor/logs/log-analytics-tutorial.md)| -| - -### Network Watcher - -The use of Network Watcher is recommended by the ACSC to assist in understanding and capturing network traffic in an Azure subscription. NSG Flow logs provide the input to the Traffic Analytics solution in Log Analytics, which provides increased visibility, analysis and reporting natively through Azure. Network Watcher also provides a packet capture capability directly from the Azure portal without the need to sign in to the Virtual Machine. Packet capture allows you to create packet capture sessions to track traffic to and from a virtual machine. - -|Resources|Link| -|---|---| -|Network Watcher|[https://docs.microsoft.com/azure/network-watcher](../network-watcher/index.yml)| -|Packet Capture Overview|[https://docs.microsoft.com/azure/network-watcher/network-watcher-packet-capture-overview](../network-watcher/network-watcher-packet-capture-overview.md)| -| - -## Log retention - -For Australian Government organisations, the logs captured within Azure must be retained in accordance with the National Archives of Australia [Administrative Functions Disposal Authority (AFDA)](https://www.naa.gov.au/information-management/records-authorities/types-records-authorities/afda-express-version-2-functions), which specifies retaining logs up to seven years. - -|Log Location|Retention Period| -|---|---| -|Azure Activity Log|Up to 90 days| -|Log Analytics workspace|Up to two years| -|Event Hub|Up to seven days| -| - -It is your responsibility to ensure that logs are archived appropriately to adhere to AFDA and other legislative requirements. - -### Azure Storage - -Azure Storage is the repository for logs for long-term retention in Azure. Azure Storage can be used to archive logs from Azure including Event Hubs, Azure Activity Log, and Azure Diagnostic Logs. The period of retention of data in Storage can be set to zero, or can be specified as a number of days. A retention of zero days means logs are kept forever, otherwise, the value can be any number of days between 1 and 2147483647. - -|Resources|Link| -|---|---| -|Azure Storage Documentation|[https://docs.microsoft.com/azure/storage](../storage/index.yml)| -|Capture events through Azure Event Hubs in Azure Blob Storage or Azure Data Lake Storage|[https://docs.microsoft.com/azure/event-hubs/event-hubs-capture-overview](../event-hubs/event-hubs-capture-overview.md)| -|Tutorial: Archive Azure metric and log data using Azure Storage|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitor-tutorial-archive-monitoring-data](../azure-monitor/essentials/platform-logs-overview.md)| -|Azure Storage Replication|[https://docs.microsoft.com/azure/storage/common/storage-redundancy](../storage/common/storage-redundancy.md)| -|Creating a Snapshot of a Blob|[https://docs.microsoft.com/rest/api/storageservices/creating-a-snapshot-of-a-blob](/rest/api/storageservices/creating-a-snapshot-of-a-blob)| -| - -## Log analysis - -Once generated and stored in a centralised location, the logs must be analysed to assist with detecting attempted or successful security incidents. When security incidents are detected, an agency needs the ability to respond to those incidents and to track, contain, and remediate any threats. - -### Microsoft Defender for Cloud - -Microsoft Defender for Cloud provides unified security management and advanced threat protection. Microsoft Defender for Cloud can apply security policies across workloads, limit exposure to threats, and detect and respond to attacks. Microsoft Defender for Cloud provides dashboards and analysis across a wide range of Azure components. The use of Microsoft Defender for Cloud is specified as a requirement in the ACSC consumer guidance. - -|Resources|Link| -|---|---| -|Microsoft Defender for Cloud documentation|[https://docs.microsoft.com/azure/security-center](../security-center/index.yml)| -|Quickstart: Enable Microsoft Defender for Cloud's enhanced security features|[https://docs.microsoft.com/azure/security-center/security-center-get-started](../security-center/enable-enhanced-security.md)| -||| - -### Traffic Analytics - -Traffic Analytics is a cloud-based solution that provides visibility into user and application activity in Azure. Traffic analytics analyses Network Watcher NSG flow logs to provide insights into traffic flow in Azure. Traffic Analytics is used to provide dashboards, reports, analysis, and event response capabilities related to the network traffic seen across virtual networks. Traffic Analytics gives significant insight and helps in identifying and resolving cyber security incidents. - -|Resources|Link| -|---|---| -|Traffic Analytics Documentation|[https://docs.microsoft.com/azure/network-watcher/traffic-analytics](../network-watcher/traffic-analytics.md)| -| - -### Azure Advisor - -Azure Advisor analyses resource configuration and other data to recommend solutions to help improve the performance, security, and high availability of resources while looking for opportunities to reduce overall Azure spend. Azure Advisor is recommended by the ACSC and provides easily accessible and detailed advice on the configuration of the Azure environment. - -|Resources|Link| -|---|---| -|Azure Advisor Documentation|[https://docs.microsoft.com/azure/advisor](../advisor/index.yml)| -|Get started with Azure Advisor|[https://docs.microsoft.com/azure/advisor/advisor-get-started](../advisor/advisor-get-started.md)| -| - -### DNS Analytics (Preview) - -DNS Analytics is a Log Analytics Solution that collects, analyses, and correlates Windows DNS analytic and audit logs and other related data. DNS Analytics identifies clients that try to resolve malicious domain names, stale resource records, frequently queried domain names, and talkative DNS clients. DNS Analytics also provides insight into request load on DNS servers and dynamic DNS registration failures. DNS Analytics is used to provide dashboards, reports, analysis, and event response capabilities related to the DNS queries made within an Azure environment. DNS Analytics gives significant insight and helps in identifying and resolving cyber security incidents. - -|Resources|Link| -|---|---| -|DNS Analytics Documentation|[https://docs.microsoft.com/azure/azure-monitor/insights/dns-analytics](../azure-monitor/insights/dns-analytics.md)| -| - -### Activity Log Analytics - -Activity Log Analytics is a Log Analytics Solution that helps analyse and search the Azure activity log across multiple Azure subscriptions. Activity Log Analytics is used to provide centralised dashboards, reports, analysis, and event response capabilities related to the actions that are performed on resources the whole Azure environment. Activity Log Analytics can assist with auditing and investigation. - -|Resources|Link| -|---|---| -|Collect and analyze Azure activity logs in Log Analytics|[https://docs.microsoft.com/azure/azure-monitor/platform/collect-activity-logs](../azure-monitor/essentials/activity-log.md)| -| - -### Security Information and Event Management (SIEM) - -A SIEM is a system that provides centralised storage, auditing and analysis of security logs, with defined mechanisms for ingesting a wide range of log data and intelligent tools for analysis, reporting and incident detection and response. You can use SIEM capabilities that include Azure logging information to supplement the security capabilities provided natively in Azure. Commonwealth entities can utilise a SIEM hosted on Virtual Machines in Azure, on-premises or as a Software as a Service (SaaS) capability depending on specific requirements. - -|Resources|Link| -|---|---| -|Microsoft Sentinel (Preview)|[https://azure.microsoft.com/services/azure-Sentinel](https://azure.microsoft.com/services/azure-sentinel)| -|SIEM Documentation|Refer to vendor documentation for SIEM architecture and guidance| -|Use Azure Monitor to integrate with SIEM tools|[https://azure.microsoft.com/blog/use-azure-monitor-to-integrate-with-siem-tools](https://azure.microsoft.com/blog/use-azure-monitor-to-integrate-with-siem-tools)| -| - -### Australian Cyber Security Centre - -The Australian Cyber Security Centre (ACSC) is the Australian Government's lead on national cyber security. It brings together cyber security capabilities from across the Australian Government to improve the cyber resilience of the Australian community and support the economic and social prosperity of Australia in the digital age. The ACSC recommends that Commonwealth entities forward all mandated system-generated log files, events, and logs to the ACSC for whole of Australian Government monitoring. - -|Resources|Link| -|---|---| -|Australian Cyber Security Centre website|[https://www.acsc.gov.au](https://www.acsc.gov.au)| -| - -## Incident response - -Generating the appropriate logs, collecting them into centralised repositories and performing analysis increases understanding of systems and provides mechanisms to detect cyber security incidents. After incidents or events have been detected, the next step is to react to those events and perform actions to maintain system health and protect services and data from compromise. Azure provides a combination of services to respond effectively to any events that occur. - -### Azure Alerts - -Azure Alerts can be used to notify support and security personnel in response to particular events. This allows a Commonwealth entity to proactively respond to the detection of relevant events raised by the analysis services listed in this article. - -|Resources|Link| -|---|---| -|Overview of Alerts in Microsoft Azure|[https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-overview-alerts](../azure-monitor/alerts/alerts-overview.md)| -|Managing and responding to security alerts in Microsoft Defender for Cloud|[https://docs.microsoft.com/azure/security-center/security-center-managing-and-responding-alerts](../security-center/security-center-managing-and-responding-alerts.md)| -|Azure Monitor Log Alerts|[https://docs.microsoft.com/azure/azure-monitor/learn/tutorial-response](../azure-monitor/alerts/alerts-log.md)| -| - -### Azure Automation - -Azure Automation enables Commonwealth entities to trigger actions in response to events. This could be to start a packet capture on Virtual Machines, run a workflow, stop, or start Virtual Machines or services, or a range of other tasks. Automation enables rapid response to alerts without manual intervention thus reducing the response time and severity of an incident or event. - -|Resources|Link| -|---|---| -|Azure Automation Documentation|[https://docs.microsoft.com/azure/automation](../automation/index.yml)| -|How-to guide: Use an alert to trigger an Azure Automation runbook|[https://docs.microsoft.com/azure/automation/automation-create-alert-triggered-runbook](../automation/automation-create-alert-triggered-runbook.md)| -| - -## Next steps - -Review the article on [Gateway Secure Remote Administration](gateway-secure-remote-administration.md) for details on securely managing your Gateway environment in Azure. diff --git a/articles/azure-australia/gateway-secure-remote-administration.md b/articles/azure-australia/gateway-secure-remote-administration.md deleted file mode 100644 index e6800c9973d63..0000000000000 --- a/articles/azure-australia/gateway-secure-remote-administration.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -title: Secure remote administration of gateway in Azure Australia -description: Guidance on configuring secure remote administration within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Secure remote administration of your Gateway in Azure Australia - -It's critical to the availability and integrity of any system that administrative activities are conducted securely and are controlled. Administrative activities should be done from a secure device, over a secure connection, and be backed by strong authentication and authorisation processes. Secure Remote Administration ensures that only authorised actions are performed and only by authorised administrators. - -This article provides information on implementing a secure remote administration capability for an internet accessible system hosted in Azure that aligns with the Australian Cyber Security Centre (ACSC) Consumer Guidance and the intent of the ACSC's Information Security Manual (ISM). - -## Australian Cyber Security Centre (ACSC) requirements - -The overall security requirements for Commonwealth systems are defined in the ISM. To assist Commonwealth entities in providing secure administration, the ACSC has published [ACSC Protect: Secure Administration](https://www.acsc.gov.au/publications/protect/secure-administration.htm) - -This document discusses the importance of secure administration and suggests one method of implementing a secure administration environment. The document describes the elements of a secure administration solution as follows: - -|Element |Description | -|---|---| -|Privileged access control |Controlling access to privileged accounts is a fundamental security control that will protect privileged accounts from misuse. The access control methodology will encompass the concepts of 'least privilege' and 'need to have' as well as processes and procedures for managing service accounts and staff movements. | -|Multi-factor authentication |Implementing additional factors of authentication beyond usernames and passphrases, such as physical tokens or smartcards, can help protect critical assets. If an adversary compromises credentials for privileged accounts, as all administrative actions would first need to go through some form of multi-factor authentication, the consequences can be greatly reduced.| -|Privileged workstations|The use of a known secure environment for administrative tasks can result in a lesser risk of the network being compromised due to the implementation of additional security controls.| -|Logging and auditing |Automated generation, collection, and analysis of security and administrative related events from workstations, servers, network devices, and jump boxes will enable detection of compromises and attempted compromises. Automation enables organisations to respond more quickly, reducing the implications of a compromise.| -|Network segmentation and segregation|Segmenting a network into logical zones such as differing security domains, and further segregating these logical networks by restricting the types of data that flow from one zone to another, restricts lateral movement. Segmentation prevents an adversary from gaining access to additional resources.| -|Jump boxes|A jump box is a hardened remote access server, commonly utilising Microsoft's Remote Desktop Services or Secure Shell (SSH) software. Jump boxes act as a stepping point for administrators accessing critical systems with all administrative actions performed from the dedicated host.| - -This article provides a reference architecture for how the elements above can be used for secure administration of systems deployed in Azure. - -## Architecture - -Providing a secure administration capability requires multiple components that all work together to form a cohesive solution. In the reference architecture provided, the components are mapped to the elements described in [ACSC Protect: Secure Administration](https://www.acsc.gov.au/publications/protect/secure-administration.htm) - -![Azure Secure Remote Administration Architecture](media/remote-admin.png) - -## Components - -The architecture is designed to ensure that a privileged account is granted only the necessary permissions, is securely identified, and then provided access to administrative interfaces only from an authorised device and through secure communications mechanisms that are controlled and audited. - -|Solution| Components|Elements| -|---|---|---| -|Secure Devices |
                  • Privileged Workstation
                  • Mobile Device
                  • Microsoft Intune
                  • Group Policy
                  • Jump Server / Bastion Host
                  • Just in Time (JIT) Administration
                  |
                  • Privileged workstations
                  • Jump boxes
                  | -|Secure Communication |
                  • Azure portal
                  • Azure VPN Gateway
                  • Remote Desktop (RD) Gateway
                  • Network Security Groups (NSGs)
                  |
                  • Network segmentation and segregation
                  | -|Strong Authentication |
                  • Domain Controller (DC)
                  • Azure Active Directory (Azure AD)
                  • Network Policy Server (NPS)
                  • Azure AD MFA
                  |
                  • Multi-factor authentication
                  | -|Strong Authorisation |
                  • Identity and Access Management (IAM)
                  • Privileged Identity Management (PIM)
                  • Conditional Access
                  |
                  • Privileged access control
                  | -||| - ->[!NOTE] ->For more information on the Logging and auditing element, see the article on [Gateway logging, auditing, and visibility](gateway-log-audit-visibility.md) - -## Administration workflow - -Administering systems deployed in Azure is divided into two distinct categories, administering the Azure configuration and administering workloads deployed in Azure. Azure configuration is conducted through the Azure portal and workload administration is completed through administrative mechanisms such as Remote Desktop Protocol (RDP), Secure Shell (SSH) or for PaaS capabilities, using tools such as SQL Management Studio. - -Gaining access for administration is a multi-step process involving the components listed in the architecture and requires access to the Azure portal and Azure configuration before access can be made to Azure workloads. - ->[!NOTE] -> The steps described here are the general process using the Graphical User Interface (GUI) components of Azure. These steps can also be completed using other interfaces such as PowerShell. - -### Azure configuration and Azure portal access - -|Step |Description | -|---|---| -|Privileged Workstation sign in |The administrator signs in the privileged workstation using administrative credentials. Group Policy controls prevent non-administrative accounts from authenticating to the privileged workstation and prevents administrative accounts from authenticating to non-privileged workstations. Microsoft Intune manages the compliance of the privileged workstation to ensure that it is up-to-date with software patches, antimalware, and other compliance requirements. | -|Azure portal sign in |The administrator opens a web browser to the Azure portal, which is encrypted using Transport Layer Security (TLS), and signs in on using administrative credentials. The authentication request is processed through Azure Active Directory directly or through authentication mechanisms such as Active Directory Federation Services (AD FS) or Pass-through authentication. | -|Azure AD MFA |Azure AD MFA sends an authentication request to the registered mobile device of the privileged account. The mobile device is managed by Intune to ensure compliance with security requirements. The administrator must authenticate first to the mobile device and then to the Microsoft Authenticator App using a PIN or Biometric system before the authentication attempt is authorised to Azure AD MFA. | -|Conditional Access |Conditional Access policies check the authentication attempt to ensure that it meets the necessary requirements such as the IP address the connection is coming from, group membership for the privileged account, and the management and compliance status of the privileged workstation as reported by Intune. | -|Privileged Identity Management (PIM) |Through the Azure portal the administrator can now activate or request activation for the privileged roles for which they have authorisation through PIM. PIM ensures that privileged accounts do not have any standing administrative privileges and that all requests for privileged access are only for the time required to perform administrative tasks. PIM also provides logging of all requests and activations for auditing purposes. | -|Identity and Access Management|Once the privileged account has been securely identified and roles activated, the administrator is provided access to the Azure subscriptions and resources that they have been assigned permissions to through Identity and Access Management.| - -Once the privileged account has completed the steps to gain administrative access to the Azure portal, access to the workloads can be configured and administrative connections can be made. - -### Azure workload administration - -|Step |Description| -|---|---| -|Just in Time (JIT) Access|To obtain access to virtual machines, the Administrator uses JIT to request access to RDP to the Jump Server from the RD Gateway IP address and RDP or SSH from the Jump Server to the relevant workload virtual machines.| -|Azure VPN Gateway|The administrator now establishes a Point-to-Site IPSec VPN connection from their privileged workstation to the Azure VPN Gateway, which performs certificate authentication to establish the connection.| -|RD Gateway|The administrator now attempts an RDP connection to the Jump Server with the RD Gateway specified in the Remote Desktop Connection configuration. The RD Gateway has a private IP address that is reachable through the Azure VPN Gateway connection. Policies on the RD Gateway control whether the privileged account is authorised to access the requested Jump Server. The RD Gateway prompts the administrator for credentials and forwards the authentication request to the Network Policy Server (NPS).| -|Network Policy Server (NPS)|The NPS receives the authentication request from the RD Gateway and validates the username and password against Active Directory before sending a request to Azure Active Directory to trigger an Azure AD MFA authentication request.| -|Azure AD MFA|Azure AD MFA sends an authentication request to the registered mobile device of the privileged account. The mobile device is managed by Intune to ensure compliance with security requirements. The administrator must authenticate first to the mobile device and then to the Microsoft Authenticator App using a PIN or Biometric system before the authentication attempt is authorised to Azure AD MFA.| -|Jump Server|Once successfully authenticated, the RDP connection is encrypted using Transport Layer Security (TLS) and then sent through the encrypted IPSec tunnel to the Azure VPN Gateway, through the RD Gateway and on to the Jump Server. From the Jump Server, the administrator can now RDP or SSH to workload virtual machines as specified in the JIT request.| - -## General guidance - -When implementing the components listed in this article, the following general guidance applies: - -* Validate the region availability of services, ensuring that all data remains within authorised locations and deploy to AU Central or AU Central 2 as the first preference for PROTECTED workloads - -* Refer to the *Azure - ACSC Certification Report – Protected 2018* publication for the certification status of individual services and perform self-assessments on any relevant components not included in the report as per the *ACSC CONSUMER GUIDE – Microsoft Azure at PROTECTED* - -* Ensure network connectivity and any necessary proxy configuration for access to necessary authentication components such as Azure AD, ADFS, and PTA - -* Use Azure Policy to monitor and enforce compliance with requirements - -* Ensure virtual machines, especially Active Directory Domain Controllers, are stored in encrypted storage accounts and utilise Azure Disk Encryption - -* Create and maintain robust identity and administrative privilege management processes and governance to underpin the technical controls listed in this article - -|Resource|URL| -|---|---| -|Australian Regulatory and Policy Compliance Documents|[Australian Regulatory and Policy Compliance Documents](https://aka.ms/au-irap)| -|Azure products - Australian regions and non-regional|[Azure products - Australian regions and non-regional](https://azure.microsoft.com/global-infrastructure/services/?regions=non-regional,australia-central,australia-central-2,australia-east,australia-southeast)| -|Strategies to Mitigate Cyber Security Incidents|[Strategies to Mitigate Cyber Security Incidents](https://acsc.gov.au/infosec/mitigationstrategies.htm)| -|ACSC Protect: Secure Administration|[ACSC Protect: Secure Administration](https://acsc.gov.au/publications/protect/secure-administration.htm)| -|How To: Integrate your Remote Desktop Gateway infrastructure using the Network Policy Server (NPS) extension and Azure AD|[Integrate RD Gateway with NPS and Azure AD](../active-directory/authentication/howto-mfa-nps-extension-rdg.md)| - -## Component guidance - -This section provides information on the purpose of each component and its role in the overall Secure Remote Administration architecture. Additional links are provided to access useful resources such as reference documentation, guides, and tutorials. - -## Secure devices - -The physical devices used by privileged users to perform administrative functions are valuable targets for malicious actors. Maintaining the security and integrity of the physical devices and ensuring that they are free from malicious software and protecting them from compromise is a key part of providing a secure remote administration capability. This involves high priority security configuration as specified in the ACSC's Essential Eight Strategies to Mitigate Cyber Security Incidents such as application filtering, patching applications, application hardening, and patching operating systems. These capabilities must be installed, configured, audited, validated, and reported on to ensure the state of a device is compliant with organisation requirements. - -### Privileged workstation - -The privileged workstation is a hardened machine that can be used to perform administrative duties and is only accessible to administrative accounts. The privileged workstation should have policies and configuration in place to limit the software that can be run, its access to network resources and the internet and credentials should be protected in the event that the device is stolen or compromised.| - -|Resources|Link| -|---|---| -|Privileged Access Workstations Architecture Overview|[https://4sysops.com/archives/understand-the-microsoft-privileged-access-workstation-paw-security-model/](/security/compass/privileged-access-deployment)| -|Securing Privileged Access Reference Material|[https://docs.microsoft.com/windows-server/identity/securing-privileged-access/securing-privileged-access-reference-material](/windows-server/identity/securing-privileged-access/securing-privileged-access-reference-material)| - -### Mobile device - -A mobile device is at greater risk of accidental loss or theft due to its portability and size and needs to be secured appropriately. The mobile device provides a strong additional factor for authentication given its ability to enforce authentication for device access, traceability through location services, encryption functions, and the ability to be remotely wiped. When using a mobile device as an additional authentication factor for Azure, the device should be configured to use the Microsoft Authenticator App with PIN or Biometric authentication and not through phone calls or text messages. - -|Resources|Link| -|---|---| -|Azure AD Authentication Methods|[https://docs.microsoft.com/azure/active-directory/authentication/concept-authentication-methods](../active-directory/authentication/concept-authentication-methods.md)| -|How to use the Microsoft Authenticator App|[https://support.microsoft.com/help/4026727/microsoft-account-how-to-use-the-microsoft-authenticator-app](https://support.microsoft.com/help/4026727/microsoft-account-how-to-use-the-microsoft-authenticator-app)| - -### Microsoft Intune - -Intune is the component of Enterprise Mobility + Security that manages mobile devices and apps. It integrates closely with other components like Azure Active Directory for identity and access control and Azure Information Protection for data protection. Intune provides policies for workstations and mobile devices to set compliance requirements for accessing resources and provides reporting and auditing capabilities for gaining insight into the status of administrative devices. - -|Resources|Link| -|---|---| -|Microsoft Intune Documentation|[https://docs.microsoft.com/intune/](/intune/)| -|Get started with Device Compliance in Intune|[https://docs.microsoft.com/intune/device-compliance-get-started](/intune/device-compliance-get-started)| - -### Group Policy - -Group Policy is used to control the configuration of operating systems and applications. Security policies control the authentication, authorisation, and auditing settings of a system. Group Policy is used to harden the privileged workstation, protect administrative credentials and restrict non-privileged accounts from accessing privileged devices. - -|Resources|Link| -|---|---| -|Allow sign in locally Group Policy setting|[https://docs.microsoft.com/windows/security/threat-protection/security-policy-settings/allow-log-on-locally](/windows/security/threat-protection/security-policy-settings/allow-log-on-locally)| - -### Jump Server / Bastion Host - -The Jump Server / Bastion Host is a centralised point for administration. It has the tools required to perform administrative duties, but also has the network access necessary to connect to resources on administrative ports. The Jump Server is the central point for administering Virtual Machine workloads in this article, but it can also be configured as the authorised point for administering Platform as a Service (PaaS) capabilities such as SQL. Access to PaaS capabilities can be restricted on a per service basis using identity and network controls. - -|Resources|Link| -|---|---| -|Implementing Secure Administrative Hosts|[https://docs.microsoft.com/windows-server/identity/ad-ds/plan/security-best-practices/implementing-secure-administrative-hosts](/windows-server/identity/ad-ds/plan/security-best-practices/implementing-secure-administrative-hosts)| - -### Just in Time (JIT) access - -JIT is a Microsoft Defender for Cloud capability that utilises Network Security Groups (NSGs) to block access to administrative protocols such as RDP and SSH on Virtual Machines. Applications hosted on Virtual Machines continue to function as normal, but for administrative access to be obtained it must be requested can only be granted for a set period of time. All requests are logged for auditing purposes. - -|Resources |Link | -|---|---| -|Manage Just in Time (JIT) access|[https://docs.microsoft.com/azure/security-center/security-center-just-in-time](../security-center/security-center-just-in-time.md)| -|Automating Azure Just In Time VM Access|[https://blogs.technet.microsoft.com/motiba/2018/06/24/automating-azure-just-in-time-vm-access](/archive/blogs/motiba/automating-azure-just-in-time-vm-access)| - -## Secure communication - -Communications traffic for administration activities can contain highly sensitive information, such as administrative credentials and must be managed and protected accordingly. Providing secure communication involves reliable encryption capabilities to prevent eavesdropping and network segmentation and restrictions that limit administrative traffic to authorised end points and controls lateral movement if a system is compromised. - -### Azure portal - -Communications to the Azure portal are encrypted using Transport Layer Security (TLS) and the use of the Azure portal has been certified by the ACSC. Commonwealth entities should follow the recommendations in the *ACSC Consumer Guide* and configure their web browsers to ensure that they are using the latest version of TLS and with supported cryptographic algorithms. - -|Resources |Link | -|---|---| -|Azure Encryption Overview – Encryption in transit|[https://docs.microsoft.com/azure/security/security-azure-encryption-overview#encryption-of-data-in-transit](../security/fundamentals/encryption-overview.md#encryption-of-data-in-transit)| - -### Azure VPN Gateway - -The Azure VPN Gateway provides the secure encrypted connection from the privileged workstation to Azure. The Azure VPN Gateway has been certified by the ACSC for providing secure IPSec communication. Commonwealth entities should configure the Azure VPN Gateway in accordance with the ACSC Consumer Guide, ACSC Certification Report, and other specific guidance. - -|Resources |Link | -|---|---| -|About Point-to-Site Connections|[https://docs.microsoft.com/azure/vpn-gateway/point-to-site-about](../vpn-gateway/point-to-site-about.md)| -|Azure VPN Gateway Cryptographic Details|[https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-about-compliance-crypto](../vpn-gateway/vpn-gateway-about-compliance-crypto.md)| -|Azure VPN Gateway Configuration|[Azure VPN Gateway configuration](vpn-gateway.md)| - -### Remote Desktop (RD) Gateway - -RD Gateway is a secure mechanism for controlling and authorising RDP connections to systems. It works by encapsulating RDP traffic in HyperText Transfer Protocol Secure (HTTPS) and encrypted using TLS. TLS provides an additional layer of security for administrative traffic. - -|Resources |Link | -|---|---| -|Remote Desktop Services Architecture|[https://docs.microsoft.com/windows-server/remote/remote-desktop-services/desktop-hosting-logical-architecture](/windows-server/remote/remote-desktop-services/desktop-hosting-logical-architecture)| - -### Network Security Groups (NSGs) - -NSGs function as Access Control Lists (ACLs) for network traffic entering or leaving subnets or virtual machines. NSGs provide network segmentation and provide a mechanism for controlling and limiting the communications flows permitted between systems. NSGs are a core component of Just in Time Administration (JIT) for allowing or denying access to administrative protocols. - -|Resources |Link | -|---|---| -|Azure Security Groups Overview|[https://docs.microsoft.com/azure/virtual-network/security-overview](../virtual-network/network-security-groups-overview.md)| -|How to: Plan Virtual Networks|[https://docs.microsoft.com/azure/virtual-network/virtual-network-vnet-plan-design-arm](../virtual-network/virtual-network-vnet-plan-design-arm.md)| - -## Strong authentication - -Securely identifying privileged users before granting access to systems is a core component of secure administration. Mechanisms must be in place to protect the credentials associated with a privileged account and to prevent malicious actors from gaining access to systems through impersonation or credential theft. - -### Domain Controller (DC) - -At a high level, a DC hosts a copy of the Active Directory Database, which contains all the users, computers and groups within a Domain. DCs perform authentication for users and computers. The DCs in this architecture are hosted as virtual machines within Azure and provide authentication services for privileged accounts connecting to Jump Servers and workload virtual machines. - -|Resources |Link | -|---|---| -|Active Directory Domain Services Overview|[https://docs.microsoft.com/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview](/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview)| - -### Azure Active Directory (Azure AD) - -Azure AD is the authentication service for Azure. It contains the cloud - -identities and provides authentication and authorisation for an Azure environment. Azure AD can be synchronised with Active Directory through Azure AD Connect and can provide federated authentication through Active Directory Federation Services (AD FS) and Azure AD Connect. Azure AD is a core component of secure administration. - -|Resources |Link | -|---|---| -|Azure Active Directory Documentation|[https://docs.microsoft.com/azure/active-directory](../active-directory/index.yml)| -|Hybrid Identity Documentation|[https://docs.microsoft.com/azure/active-directory/hybrid](../active-directory/hybrid/index.yml)| - -### Network Policy Server (NPS) - -An NPS is an authentication and policy server that provides advanced authentication and authorisation processes. The NPS server in this architecture is provided to integrate Azure AD MFA authentication with RD Gateway authentication requests. The NPS has a specific plug-in to support integration with Azure AD MFA in Azure AD. - -|Resources |Link | -|---|---| -|Network Policy Server Documentation|[https://docs.microsoft.com/windows-server/networking/technologies/nps/nps-top](/windows-server/networking/technologies/nps/nps-top)| - -### Azure AD MFA - -Azure AD MFA is an authentication service provided within Azure Active Directory to enable authentication requests beyond a username and password for accessing cloud resources such as the Azure portal. Azure AD MFA supports a range of authentication methods and this architecture utilises the Microsoft Authenticator App for enhanced security and integration with the NPS. - -|Resources |Link | -|---|---| -|How it works: Azure AD Multi-Factor Authentication|[https://docs.microsoft.com/azure/active-directory/authentication/concept-mfa-howitworks](../active-directory/authentication/concept-mfa-howitworks.md)| -|How to: Deploy cloud-based Azure AD Multi-Factor Authentication|[https://docs.microsoft.com/azure/active-directory/authentication/howto-mfa-getstarted](../active-directory/authentication/howto-mfa-getstarted.md)| - -## Strong authorisation - -Once a privileged account has been securely identified, it can be granted access to resources. Authorisation controls and manages the privileges that are assigned to a specific account. Strong Authorisation processes align with the ACSC's Essential Eight strategy for mitigating cyber security incidents of restricting administrative privileges. - -### Identity and access management - -Access to perform privileged actions within Azure is based on roles that are assigned to that account. Azure includes an extensive and granular range of roles with specific permissions to undertaken specific tasks. These roles can be granted at multiple levels such as a subscription or resource group. Role assignment and permission management are based on accounts and groups in Azure Active Directory and is managed through Access Control (IAM) within Azure. - -|Resources |Link | -|---|---| -|Azure role-based access control (Azure RBAC)|[https://docs.microsoft.com/azure/role-based-access-control](../role-based-access-control/index.yml)| -|Understand Role Definitions|[https://docs.microsoft.com/azure/role-based-access-control/role-definitions](../role-based-access-control/role-definitions.md)| - -### Privileged Identity Management (PIM) - -PIM is an Azure Active Directory component that controls access to privileged roles. Privileged accounts do not require permanent or standing privileged access, but can instead be granted the ability to request privileged access for a period of time in order to complete privileged activities. PIM provides additional controls around maintaining and restricting privileged access as well as logging and auditing to track instances of privilege use. - -|Resources |Link | -|---|---| -|Privileged Identity Management (PIM) Documentation|[https://docs.microsoft.com/azure/active-directory/privileged-identity-management](../active-directory/privileged-identity-management/index.yml)| -|Start using PIM|[https://docs.microsoft.com/azure/active-directory/privileged-identity-management/pim-getting-started](../active-directory/privileged-identity-management/pim-getting-started.md)| - -### Conditional access - -Conditional access is a component of Azure Active Directory that allows or denies access to resources based on conditions. These conditions can be network location based, device type, compliance status, group membership and more. Conditional Access is used to enforce MFA, device management, and compliance through Intune and group membership of administrative accounts. - -|Resources |Link | -|---|---| -|Conditional Access Documentation|[https://docs.microsoft.com/azure/active-directory/conditional-access](../active-directory/conditional-access/index.yml)| -|How to: Require Managed Devices for cloud app access with conditional access|[https://docs.microsoft.com/azure/active-directory/conditional-access/require-managed-devices](../active-directory/conditional-access/require-managed-devices.md)| - -## Next steps - -Review the article on [Gateway Ingress Traffic Management and Control](gateway-ingress-traffic.md) for details on controlling traffic flows through your Gateway components in Azure. diff --git a/articles/azure-australia/identity-federation.md b/articles/azure-australia/identity-federation.md deleted file mode 100644 index 90fa3ab960a75..0000000000000 --- a/articles/azure-australia/identity-federation.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Identity federation in Azure Australia -description: Guidance on configuring identity federation within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: quickstart -ms.date: 07/22/2019 -ms.author: yvettep -ms.custom: mode-other ---- - -# Identity federation in Azure Australia - -Identity Management and Federation with Public Cloud offerings is one of the most crucial first-steps for using the cloud. Microsoft's Azure Active Directory service stores user information to enable access to cloud services and is a pre-requisite for consuming other Azure services. - -This article covers the key design points for implementing Azure Active Directory, synchronizing users from an Active Directory Domain Services domain, and implementing secure authentication. Specific focus is placed on the recommendations in the Australian Cyber Security Center's Information Security Manual (ISM) and Azure Certification Reports. - -The classification of information stored within Azure Active Directory should inform decisions about how it is designed. The following excerpt is provided from the [ACSC Certification Report – Microsoft Azure](https://aka.ms/au-irap): - ->**ACSC Certification Report – Microsoft Azure** ->Azure Active Directory (Azure AD) must be configured with Active Directory Federation services when Commonwealth entities classify the use and data content of their Active Directory at PROTECTED. While Active Directory data at the UNCLASSIFIED Dissemination Limiting Markings (UDLM) classification does not require federation, Commonwealth entities can still implement federation to mitigate risks associated with the service being provided from outside of Australia. - -As such, what information is synchronised, and the mechanism by which users are authenticated, are the two key concerns covered here. - -## Key design considerations - -### User synchronisation - -When deploying Azure AD Connect, there are several decisions that must be made about the data that will be synchronised. Azure AD Connect is based upon Microsoft Identity Manager and provides a robust feature-set for [transforming](../active-directory/hybrid/how-to-connect-sync-best-practices-changing-default-configuration.md) data between directories. - -Microsoft Consulting Services can be engaged to do an ADRAP evaluation of your existing Windows Server Active Directory. The ADRAP assists in determining any issues that may need to be corrected before synchronising with Azure Active Directory. Microsoft Premier Support Agreements will generally include this service. - -The [IDFix tool](/office365/enterprise/install-and-run-idfix) scans your on-premises Active Directory domain for issues before synchronising with Azure AD. IDFix is a key first step before implementing Azure AD Connect. Although an IDFix scan can identify a large number of issues, many of these issues can either be resolved quickly with scripts, or worked-around using data transforms in Azure AD Connect. - -Azure AD requires that users have an externally routable top-level domain to enable authentication. If your domain has a UPN suffix that is not externally routable, the you need to set the [alternative sign in ID](../active-directory/hybrid/plan-connect-userprincipalname.md) in AD Connect to the user's mail attribute. Users then sign in to Azure services with their email address rather than their domain sign in. - -The UPN suffix on user accounts can also be altered using tools such as PowerShell however; it can have unforeseen consequences for other connected systems and is no longer considered best practice. - -In deciding which attributes to synchronise to Azure Active Directory, it's safest to assume that all attributes are required. It is rare for a directory to contain actual PROTECTED data, however conducting an audit is recommended. If PROTECTED data is found within the directory, assess the impact of omitting or transforming the attribute. As a helpful guide, there is a list of attributes which Microsoft Cloud Services [require](../active-directory/hybrid/reference-connect-sync-attributes-synchronized.md). - -### Authentication - -It's important to understand the options that are available, and how they can be used to keep end-users secure. -Microsoft offers [three native solutions](../active-directory/hybrid/plan-connect-user-signin.md) to authenticate users against Azure Active Directory: - -* Password hash synchronization - The hashed passwords from Active Directory Domain Services are synchronised by Azure AD Connect into Azure Active Directory. -* [Pass-through authentication](../active-directory/hybrid/how-to-connect-pta.md) - Passwords remain within Active Directory Domain Services. Users are authenticated against Active Directory Domain Services via an agent. No passwords are stored within Azure AD. -* [Federated SSO](../active-directory/hybrid/how-to-connect-fed-whatis.md) - Azure Active Directory is federated with Active Directory Federation Services, during sign in, Azure directs users to Active Directory Federation Services to authenticate. No passwords are stored within Azure AD. - -Password hash synchronisation can be used in scenarios where OFFICIAL:Sensitive and below data is being stored within the directory. Scenarios where PROTECTED data is being stored will require one of the two remaining options. - -All three of these options support [Password Write-Back](../active-directory/authentication/concept-sspr-writeback.md), which the [ACSC Consumer Guide](https://aka.ms/au-irap) recommends being disabled. However; organisations should evaluate the risk of disabling Password Writeback against the productivity gains and reduced support effort of using self-service password resets. - -#### Pass-Through Authentication (PTA) - -Pass-Through Authentication was released after the IRAP assessment was completed and therefore; should be individually evaluated to determine how the solution fits your organisation's risk profile. Pass-Through Authentication is preferred over Federation by Microsoft due to the improved security posture. - -![Pass-Through Authentication](media/pta1.png) - -Pass-Through Authentication presents several design factors to be considered: - -* Pass-Through Authentication Agent must be able to establish outgoing connections to Microsoft Cloud Services. -* Installing more than one agent to ensure that the service will be Highly Available. It is best practice to deploy at least three agents, and up to a maximum of 12 agents. -* Best Practice is to avoid installing the agent directly onto an Active Directory Domain Controllers. By default when deploying Azure AD Connect with Pass-Through authentication it will install the agent on the AD Connect server. -* Pass-Through Authentication is a lower maintenance option than Active Directory Federation Services because it does not require dedicated server infrastructure, certificate management, or inbound firewall rules. - -#### Active Directory Federation Services (ADFS) - -Active Directory Federation Services was included within the IRAP assessment and is approved for use in PROTECTED environments. - -![Federation](media/federated-identity.png) - -Active Directory Federation Services presents several design factors to be considered: - -* Federation Services will require network ingress for HTTPS traffic from the internet or at minimum Microsoft's service endpoints. -* Federation Services uses PKI and certificates, which require ongoing management and renewal. -* Federation Services should be deployed on dedicated servers, and will require the relevant network infrastructure to make it securely accessible externally. - -### Multi-Factor Authentication (MFA) - -The ISM section on multi-factor authentication recommends implementing it in the following scenarios based on your risk profile: - -* Authenticating Standard Users -* Authenticating Privileged accounts -* Authenticating Users Remote access -* Users doing privileged actions - -Azure Active Directory provides Multi-Factor Authentication that can be enabled for either all, or a subset of users (for example, only Privileged Accounts). Microsoft also provides a solution called Conditional Access, which allows more granular control over how Multi-Factor Authentication is applied (for example, only when users sign in from remote IP address ranges). - -Azure AD Multi-Factor Authentication supports the following ISM acceptable forms of validation: - -* Phone call -* SMS message -* Microsoft Authenticator Application -* Supported hardware tokens - -Privileged Identity Management, a component of Azure Active Directory, can be used to enforce the use of Multi-Factor authentication when users elevate their permissions to meet the fourth recommendation. - -## Next steps - -Review the article on [Azure role-based access control (Azure RBAC) and Privileged Identity Management](role-privileged.md). diff --git a/articles/azure-australia/index.yml b/articles/azure-australia/index.yml deleted file mode 100644 index 90ed2f69a7ead..0000000000000 --- a/articles/azure-australia/index.yml +++ /dev/null @@ -1,46 +0,0 @@ -### YamlMime:Landing - -title: Azure Australia documentation -summary: Microsoft Azure Australia is a cloud platform that is certified by the Australian Cyber Security Center (ACSC) and included on the Certified Cloud Services List (CCSL). - -metadata: - title: Azure Australia documentation - description: Microsoft Azure Australia is a cloud platform certified by the Australian Cyber Security Center (ACSC) and included on the Certified Cloud Services List (CCSL). - ms.service: azure-australia - ms.topic: landing-page - author: emilyre - ms.author: v-emread - ms.date: 03/11/2020 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new - -landingContent: - # Card - - title: About Azure Australia - linkLists: - - linkListType: overview - links: - - text: What is Azure Australia? - url: australia-overview.md - - # Card - - title: Security - linkLists: - - linkListType: concept - links: - - text: Data security - url: secure-your-data.md - - text: Azure VPN Gateway configuration - url: vpn-gateway.md - - text: Azure Key Vault - url: azure-key-vault.md - - # Card - - title: Gateways - linkLists: - - linkListType: concept - links: - - text: Gateway logging, auditing, and visibility - url: gateway-log-audit-visibility.md - - text: Secure remote administration of your Gateway - url: gateway-secure-remote-administration.md diff --git a/articles/azure-australia/media/asr-overview.png b/articles/azure-australia/media/asr-overview.png deleted file mode 100644 index 0fbd6c1cb8608..0000000000000 Binary files a/articles/azure-australia/media/asr-overview.png and /dev/null differ diff --git a/articles/azure-australia/media/azure-key-vault-overview.png b/articles/azure-australia/media/azure-key-vault-overview.png deleted file mode 100644 index 87eb8835bb272..0000000000000 Binary files a/articles/azure-australia/media/azure-key-vault-overview.png and /dev/null differ diff --git a/articles/azure-australia/media/backup-overview.png b/articles/azure-australia/media/backup-overview.png deleted file mode 100644 index 7b76de29b78e0..0000000000000 Binary files a/articles/azure-australia/media/backup-overview.png and /dev/null differ diff --git a/articles/azure-australia/media/blueprint-artifacts.png b/articles/azure-australia/media/blueprint-artifacts.png deleted file mode 100644 index 71951f8917a22..0000000000000 Binary files a/articles/azure-australia/media/blueprint-artifacts.png and /dev/null differ diff --git a/articles/azure-australia/media/certification.png b/articles/azure-australia/media/certification.png deleted file mode 100644 index a6e861e475970..0000000000000 Binary files a/articles/azure-australia/media/certification.png and /dev/null differ diff --git a/articles/azure-australia/media/create-policy.png b/articles/azure-australia/media/create-policy.png deleted file mode 100644 index 51fd464c692f2..0000000000000 Binary files a/articles/azure-australia/media/create-policy.png and /dev/null differ diff --git a/articles/azure-australia/media/defenceindepth.png b/articles/azure-australia/media/defenceindepth.png deleted file mode 100644 index 245fe80224481..0000000000000 Binary files a/articles/azure-australia/media/defenceindepth.png and /dev/null differ diff --git a/articles/azure-australia/media/dual-redundancy.png b/articles/azure-australia/media/dual-redundancy.png deleted file mode 100644 index dd2fd0466ef3c..0000000000000 Binary files a/articles/azure-australia/media/dual-redundancy.png and /dev/null differ diff --git a/articles/azure-australia/media/duplicate-policy.png b/articles/azure-australia/media/duplicate-policy.png deleted file mode 100644 index 5116f81d4fbc7..0000000000000 Binary files a/articles/azure-australia/media/duplicate-policy.png and /dev/null differ diff --git a/articles/azure-australia/media/egress-traffic.png b/articles/azure-australia/media/egress-traffic.png deleted file mode 100644 index f5d5ab8c47bf7..0000000000000 Binary files a/articles/azure-australia/media/egress-traffic.png and /dev/null differ diff --git a/articles/azure-australia/media/federated-identity.png b/articles/azure-australia/media/federated-identity.png deleted file mode 100644 index 6f92e2c27bde8..0000000000000 Binary files a/articles/azure-australia/media/federated-identity.png and /dev/null differ diff --git a/articles/azure-australia/media/ingress-traffic.png b/articles/azure-australia/media/ingress-traffic.png deleted file mode 100644 index 651b76a0784c8..0000000000000 Binary files a/articles/azure-australia/media/ingress-traffic.png and /dev/null differ diff --git a/articles/azure-australia/media/initiative-definitions.png b/articles/azure-australia/media/initiative-definitions.png deleted file mode 100644 index f578ba1875c83..0000000000000 Binary files a/articles/azure-australia/media/initiative-definitions.png and /dev/null differ diff --git a/articles/azure-australia/media/management-groups.png b/articles/azure-australia/media/management-groups.png deleted file mode 100644 index 20bbb9a1f59c8..0000000000000 Binary files a/articles/azure-australia/media/management-groups.png and /dev/null differ diff --git a/articles/azure-australia/media/mfa-policies.png b/articles/azure-australia/media/mfa-policies.png deleted file mode 100644 index bae561ce093d7..0000000000000 Binary files a/articles/azure-australia/media/mfa-policies.png and /dev/null differ diff --git a/articles/azure-australia/media/nci-sectors.png b/articles/azure-australia/media/nci-sectors.png deleted file mode 100644 index daa92dcec0744..0000000000000 Binary files a/articles/azure-australia/media/nci-sectors.png and /dev/null differ diff --git a/articles/azure-australia/media/overview.png b/articles/azure-australia/media/overview.png deleted file mode 100644 index 5e853b73d9d97..0000000000000 Binary files a/articles/azure-australia/media/overview.png and /dev/null differ diff --git a/articles/azure-australia/media/policy-definitions.png b/articles/azure-australia/media/policy-definitions.png deleted file mode 100644 index 9dfe33dcf1b62..0000000000000 Binary files a/articles/azure-australia/media/policy-definitions.png and /dev/null differ diff --git a/articles/azure-australia/media/pspf-classifications.png b/articles/azure-australia/media/pspf-classifications.png deleted file mode 100644 index 53f930b23c159..0000000000000 Binary files a/articles/azure-australia/media/pspf-classifications.png and /dev/null differ diff --git a/articles/azure-australia/media/pta1.png b/articles/azure-australia/media/pta1.png deleted file mode 100644 index a2801edaad790..0000000000000 Binary files a/articles/azure-australia/media/pta1.png and /dev/null differ diff --git a/articles/azure-australia/media/queries-overview.png b/articles/azure-australia/media/queries-overview.png deleted file mode 100644 index fc88bf63e0fae..0000000000000 Binary files a/articles/azure-australia/media/queries-overview.png and /dev/null differ diff --git a/articles/azure-australia/media/rbac-overview.png b/articles/azure-australia/media/rbac-overview.png deleted file mode 100644 index 90f5f485dfdf6..0000000000000 Binary files a/articles/azure-australia/media/rbac-overview.png and /dev/null differ diff --git a/articles/azure-australia/media/regulatory-initiatives.png b/articles/azure-australia/media/regulatory-initiatives.png deleted file mode 100644 index c4e0f37f795f5..0000000000000 Binary files a/articles/azure-australia/media/regulatory-initiatives.png and /dev/null differ diff --git a/articles/azure-australia/media/remote-admin.png b/articles/azure-australia/media/remote-admin.png deleted file mode 100644 index 153f8bf91f048..0000000000000 Binary files a/articles/azure-australia/media/remote-admin.png and /dev/null differ diff --git a/articles/azure-australia/media/simple-compliance.png b/articles/azure-australia/media/simple-compliance.png deleted file mode 100644 index 2945518c8e92e..0000000000000 Binary files a/articles/azure-australia/media/simple-compliance.png and /dev/null differ diff --git a/articles/azure-australia/media/visibility.png b/articles/azure-australia/media/visibility.png deleted file mode 100644 index 32971dc06664d..0000000000000 Binary files a/articles/azure-australia/media/visibility.png and /dev/null differ diff --git a/articles/azure-australia/media/vpngateway-multisite-connection-diagram.png b/articles/azure-australia/media/vpngateway-multisite-connection-diagram.png deleted file mode 100644 index e062e7df8daf9..0000000000000 Binary files a/articles/azure-australia/media/vpngateway-multisite-connection-diagram.png and /dev/null differ diff --git a/articles/azure-australia/recovery-backup.md b/articles/azure-australia/recovery-backup.md deleted file mode 100644 index 7ca715ec190f1..0000000000000 --- a/articles/azure-australia/recovery-backup.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Backup and disaster recovery in Azure Australia -description: Backup and disaster recovery in Microsoft Azure for Australian Government agencies as it relates to the ASD Essential 8 -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Backup and disaster recovery in Azure Australia - -Having backup and disaster recovery plans with the supporting infrastructure in place is critical for all organisations. The importance of having a backup solution is highlighted by its inclusion in the [Australian Cyber Security Center's Essential 8](https://acsc.gov.au/publications/protect/essential-eight-explained.htm). - -Microsoft Azure provides two services that enable resilience: Azure Backup and Azure Site Recovery. These services enable you to protect your data, both on-premises and in the cloud, for a variety of design scenarios. Azure Backup and Azure Site Recovery both use a common storage and management resource: the Azure Recovery Services Vault. This vault is used to manage, monitor, and segregate Azure Backup and Azure Site Recovery Data. - -This article details the key design elements for implementing Azure Backup and Azure Site Recovery in line with the [Australian Signals Directorate's (ASD) Information Security Manual (ISM) Controls](https://acsc.gov.au/infosec/ism/index.htm). - -## Azure Backup - -![Azure Backup](media/backup-overview.png) - -Azure Backup resembles a traditional on-premises backup solution and provides the ability to backup both on-premises and Azure hosted data. Azure Backup can be used to back up the following data types to Azure: - -* Files and folders -* Supported Windows and Linux operating systems hosted on: - * Hyper-V and VMWare Hypervisors - * Physical hardware -* Supported Microsoft applications - -### Azure Site Recovery - -![Azure Site Recovery](media/asr-overview.png) - -Azure Site Recovery replicates workloads consisting of either a single virtual machine or multi-tier applications. Replication is supported from on-premises into Azure, between Azure regions, or between on-premises locations orchestrated by Azure Site Recovery. On-premises virtual machines can be replicated to Azure or to a supported on-premises hypervisor. Once configured, Azure Site Recovery orchestrates replication, fail-over, and fail-back. - -## Key design considerations - -When implementing a backup or disaster recovery solution, the proposed solution needs to consider: - -* The scope and volume of data to be captured -* How long the data will be retained -* How this data is securely stored and managed -* The geographical locations where the data is stored -* Routine system testing procedures - -The ISM provides guidance on the security considerations that should be made when designing a solution. Microsoft Azure provides means to address these security considerations. - -### Data sovereignty - -Organisations need to ensure that data sovereignty is maintained when utilising cloud based storage locations. Azure Policy provides the means to restrict the permitted locations where an Azure resource can be created. The built-in Azure Policy "Allowed Locations" is used to ensure that any Azure resources created under the scope of an assigned Azure Policy can only be created in the nominated geographical locations. - -The Azure Policy items for geographic restriction for Azure resources are: - -* allowedLocations -* allowedSingleLocation - -These policies allow Azure administrators to restrict creation to a list of nominated locations or even as single geographic location. - -### Redundant and geographically dispersed storage - -Data stored in the Azure Recovery Service Vault is always stored on redundant storage. By default the Recovery Service Vault uses Azure Geographically Redundant Storage (GRS). Data stored using GRS is replicated to other Azure data centres in the Recovery Service Vault's [secondary paired region](../availability-zones/cross-region-replication-azure.md). This replicated data is stored as read-only and is only made writeable if there is an Azure failover event. Within the Azure data centre, the data is replicated between separate fault domains and upgrade domains to minimise the chance of hardware or maintenance-based outage. GRS provides at least 99.99999999999999% availability annually. - -The Azure Recovery Services Vault can be configured to utilise Locally Redundant Storage (LRS). LRS is a lower-cost storage option with the trade-off of reduced availability. This redundancy model employs the same replication between separate fault domains and upgrade domains but is not replicated between geographic regions. Data located on LRS storage, while not as resilient as GRS, still provides at least 99.999999999% availability annually. - -Unlike traditional offsite storage technologies like tape media, the additional copies of the data are created automatically and do not require any additional administrative overhead. - -### Restricted access and activity monitoring - -Backup data must be protected from corruption, modification, and unapproved deletion. Both Azure Backup and Azure Site Recovery make use of the common Azure management fabric. This fabric provides detailed auditing, logging, and Azure role-based access control (Azure RBAC) to resources located within Azure. Access to backup data can be restricted to select administrative staff and all actions involving backup data can be logged and audited. - -Both Azure Backup and Azure Site Recovery have built-in logging and reporting features. Any issues that occur during backup or replication are reported to administrators using the Azure management fabric. - -Azure Recovery Services Vault also has the following additional data security measures in place: - -* Backup data is retained for 14 days after a delete operation has occurred -* Alerts and Notifications for critical operations such as "Stop Backup with delete data" -* Security PIN requirements for critical operations -* Minimum retention range checks are in place - -These minimum retention range checks include: - -* For daily retention, a minimum of seven days of retention -* For weekly retention, a minimum of four weeks of retention -* For monthly retention, a minimum of three months of retention -* For yearly retention, a minimum of one year of retention - -All backup data stored within Azure is encrypted at rest using Azure's Storage Service Encryption (SSE). This is enabled for all new and existing storage accounts by default and cannot be disabled. The encrypted data is automatically decrypted during retrieval. By default, data encrypted using SSE is encrypted using a key provided by and managed by Microsoft. Organisations can choose to provide and manage their own encryption key for use with SSE. This provides an optional additional layer of security for the encrypted data. This key can be stored by the customer on-premises or securely within the Azure Key vault. - -### Secure data transport - -Azure Backup data encrypted in transit using AES 256. This data is secured via the use of a passphrase created by administrative staff when the backup is first configured. Microsoft does not have access to this passphrase meaning the customer must ensure this passphrase is stored securely. The data transfer then takes place between the on-premises environment and the Azure Recovery Services Vault via a secure HTTPS connection. The data within the Recovery Services Vault is then encrypted at rest using Azure SSE. - -Azure Site Recovery data is also always encrypted in transit. All replicated data is securely transported to Azure using HTTPS and TLS. When an Azure customer connects to Azure using an ExpressRoute connection, Azure Site Recovery data is sent via this private connection. When an Azure customer is connecting to Azure using a VPN connection, the data is replicated between on-premises and the Recovery Services vault securely over the internet. - -This secure network data transfer removes the security risk and mitigation requirements for managing traditional offsite backup storage solutions such as tape media. - -### Data retention periods - -A minimum backup retention period of three months is recommended, however, longer retention periods are commonly required. Azure Backup can provide up to 9999 copies of a backup. If a single Azure Backup of a protected instance was taken daily, this would allow for the retention of 27 years of daily backups. Individual monthly backups of a protected instance allow for 833 years of retention. As backup data is aged out and less granular backups are retained over time, the total retention window for backup data grows. Azure doesn't limit the length of time data can remain in an Azure Recovery Services Vault, only the total number of backups per instance. There is also no performance difference between restoring from old or new backups, each restore takes the same amount of time to occur. - -The Azure Recovery Services Vault has a number of default backup and retention policies in place. Administrative staff can also create custom backup and retention policies. - -![Azure Backup Policy](media/create-policy.png) - -A balance between backup frequency and long-term retention requirements needs to be found when configuring Azure Backup and retention policies. - -### Backup and restore testing - -The ISM recommends testing of backup data to ensure that the protected data is valid when a restore or failover is required. Azure Backup and Azure Site Recovery also provide the capability to test protected data once it has been backed up or replicated. Data managed by Azure Backup can be restored to a nominated location and the consistency of the data can then be validated. - -Azure Site Recovery has inbuilt capability to perform failover testing. Workloads replicated to the Recovery Services Vault can be restored to a nominated Azure environment. The target restore environment can be fully isolated from any production environment to ensure there is no impact on production systems while performing a test. Once the test is complete, the test environment and all resources can be immediately deleted to reduce operational costs. - -Failover testing and validation can be automated using the Azure Automation service built into the Azure platform. This allows for failover testing to be scheduled to occur automatically to ensure that data is being successfully replicated to Azure. - -## Next steps - -Review the article on [Ensuring Security with Azure Policy](azure-policy.md). diff --git a/articles/azure-australia/reference-library.md b/articles/azure-australia/reference-library.md deleted file mode 100644 index 083d80e8cdca3..0000000000000 --- a/articles/azure-australia/reference-library.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Additional documentation and resources -titleSuffix: Azure Australia -description: Additional documentation, tutorials or references relevant to Australian Government agencies operating securely in Azure. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/29/2019 -ms.author: yvettep ---- - -# Additional documentation and resources by focus area - -This resource library contains additional links and references that are relevant to the secure implementation of Australian Government workloads in Azure Australia. - -## General references for all security and governance in Azure Australia - -* [Microsoft Service Trust Portal Australia Page](https://aka.ms/au-irap) -* [Microsoft Trust Center CCSL Page](https://www.microsoft.com/trustcenter/compliance/ccsl) -* [Azure Security and Compliance Blueprints for PROTECTED](https://aka.ms/au-protected) -* [Tenant Isolation in Microsoft Azure](../security/fundamentals/isolation-choices.md) -* [Australian Information Security Manual](https://www.cyber.gov.au/ism) -* [Australian Cyber Security Centre (ACSC) Certified Cloud List](https://www.cyber.gov.au/irap/cloud-services) - -## Azure Key Vault - -* [Azure Key Vault Overview](../key-vault/general/overview.md) -* [About keys, secrets, and certificates](../key-vault/general/about-keys-secrets-certificates.md) -* [Configure Azure Key Vault firewalls and virtual networks](../key-vault/general/network-security.md) -* [Secure access to a key vault](../key-vault/general/security-features.md) -* [Azure Data Encryption-at-Rest](../security/fundamentals/encryption-atrest.md) -* [How to use Azure Key Vault with Azure Windows Virtual Machines in .NET](../key-vault/general/tutorial-net-virtual-machine.md) -* [Azure Key Vault managed storage account - PowerShell](../key-vault/general/tutorial-net-virtual-machine.md) -* [Setup key rotation and auditing](../key-vault/secrets/tutorial-rotation-dual.md) - -## Identity federation - -* [Azure AD Connect - Installation Guide](../active-directory/hybrid/how-to-connect-install-roadmap.md) -* [Password Write-Back](../active-directory/authentication/concept-sspr-writeback.md) -* [Install and Run the IDFix Tool](/office365/enterprise/install-and-run-idfix) -* [Azure AD UPN Population](../active-directory/hybrid/plan-connect-userprincipalname.md) -* [Azure AD Connect - Synchronised Attributes](../active-directory/hybrid/reference-connect-sync-attributes-synchronized.md) -* [Azure AD Connect - Best-Practice Configuration Guide](../active-directory/hybrid/how-to-connect-sync-best-practices-changing-default-configuration.md) -* [Azure AD Connect - User Sign-In Options](../active-directory/hybrid/plan-connect-user-signin.md) -* [Azure AD Connect and Federation](../active-directory/hybrid/how-to-connect-fed-whatis.md) -* [Pass-Through Authentication Documentation](../active-directory/hybrid/how-to-connect-pta.md) -* [Deploying Azure AD Multi-Factor Authentication](../active-directory/authentication/howto-mfa-getstarted.md) -* [Azure Privileged Identity Management](../active-directory/privileged-identity-management/pim-configure.md) - -## Azure Backup and Azure Site Recovery - -* [Introduction to Azure Backup](../backup/backup-overview.md) -* [Azure Backup Overview](../backup/backup-overview.md) -* [Azure Site Recovery Overview](../site-recovery/site-recovery-overview.md) -* [Azure Governance](../governance/index.yml) -* [Azure Paired Regions](../availability-zones/cross-region-replication-azure.md) -* [Azure Policy](../governance/policy/overview.md) -* [Azure Storage Service Encryption](../storage/common/storage-service-encryption.md) -* [Azure Backup Tutorials](../backup/index.yml) -* [Azure Site Recovery Tutorials](../site-recovery/index.yml) - -## Azure role-based access control (Azure RBAC) and Privileged Identity Management (PIM) - -* [Azure RBAC Overview](../role-based-access-control/overview.md) -* [Azure Privileged Identify Management Overview](../active-directory/privileged-identity-management/pim-configure.md) -* [Azure Management Groups Overview](../governance/management-groups/index.yml) -* [Azure Identity and Access Control Best Practices](../security/fundamentals/identity-management-best-practices.md) -* [Managing Azure AD Groups](../active-directory/fundamentals/active-directory-manage-groups.md) -* [Hybrid Identity](../active-directory/hybrid/whatis-hybrid-identity.md) -* [Azure Custom Roles](../role-based-access-control/custom-roles.md) -* [Azure Built-in Roles](../role-based-access-control/built-in-roles.md) -* [Securing Privileged Access in Hybrid Cloud Environments](../active-directory/roles/security-planning.md) -* [Azure Enterprise Scaffold](/azure/architecture/cloud-adoption/appendix/azure-scaffold) - -## System monitoring for security - -* [Azure Governance](../governance/index.yml) -* [Azure Security Best Practices](../security/fundamentals/best-practices-and-patterns.md) -* [Platforms and features supported by Microsoft Defender for Cloud](../security-center/security-center-os-coverage.md) -* [Azure Activity Log](../azure-monitor/essentials/platform-logs-overview.md) -* [Azure Diagnostic Logs](../azure-monitor/essentials/platform-logs-overview.md) -* [Microsoft Defender for Cloud Alerts](../security-center/security-center-managing-and-responding-alerts.md) -* [Azure Log Integration](/previous-versions/azure/security/fundamentals/azure-log-integration-overview) -* [Analyze Log Data in Azure Monitor](../azure-monitor/logs/log-query-overview.md) -* [Stream Azure Monitor Logs to an Event Hub](../azure-monitor/essentials/stream-monitoring-data-event-hubs.md) -* [Event Hub Security and Authentication](../event-hubs/authenticate-shared-access-signature.md) - -## Azure Policy and Azure Blueprints - -* [Azure Policy Overview](../governance/policy/overview.md) -* [Azure Blueprints Overview](https://azure.microsoft.com/services/blueprints/) -* [Azure Policy Samples](../governance/policy/samples/index.md) -* [Azure Policy Samples Repository](https://github.com/Azure/azure-policy) -* [Azure Policy Definition Structure](../governance/policy/concepts/definition-structure.md) -* [Azure Policy Effects](../governance/policy/concepts/effects.md) -* [Azure Governance](../governance/index.yml) -* [Azure Management Groups](../governance/management-groups/index.yml) -* [Azure role-based access control (Azure RBAC)](../role-based-access-control/overview.md) - -## Next steps - -Login to the [Azure portal](https://portal.azure.com) and start configuring your resources securely in Azure Australia. diff --git a/articles/azure-australia/role-privileged.md b/articles/azure-australia/role-privileged.md deleted file mode 100644 index 0f8a117dbd283..0000000000000 --- a/articles/azure-australia/role-privileged.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Azure role-based access control (Azure RBAC) and Privileged Identity Management -titleSuffix: Azure Australia -description: Guidance on Implementing Azure role-based access control (Azure RBAC) and Privileged Identity Management within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep -ms.custom: devx-track-azurepowershell ---- - -# Azure role-based access control (Azure RBAC) and Privileged Identity Management (PIM) - -Managing administrative privilege is a critical step in ensuring security within any IT environment. Restricting administrative privilege via the use of Least Privilege Security is a requirement of the [ACSC ISM](https://acsc.gov.au/infosec/ism/index.htm) and forms part of the [ACSC Essential 8](https://www.acsc.gov.au/infosec/mitigationstrategies.htm) list of security recommendations. - -Microsoft provides a suite of controls to implement Just-in-Time and Just-Enough-Access within Microsoft Azure. Understanding these controls is essential for an effective security posture in the Cloud. This guide will provide an overview of the controls themselves and the key design considerations when implementing them. - -## Azure RBAC - -Azure role-based access control (Azure RBAC) is central to the management of access to all resources within Microsoft Azure and the management of Azure Active Directory (Azure AD). Azure RBAC can be implemented alongside a number of complementary features available in Azure. This article focuses on implementing effective RBAC using Azure Management Groups, Azure Active Directory Groups, and Azure Privileged Identity Management (PIM). - -At a high level, implementing Azure RBAC requires three components: - -![Diagram shows the three components necessary for implementing R B A C, which are security principal, role definition, and scope, which all feed into role assigment.](media/rbac-overview.png) - -* **Security Principals**: A security principal can be any one of the following; a user, a group, [Service Principals](../active-directory/develop/app-objects-and-service-principals.md), or a [Managed Identity](../active-directory/managed-identities-azure-resources/overview.md). Security Principals should be assigned privileges using Azure Active Directory Groups. - -* **Role Definitions**: A Role Definition, also referred to as a Role, is a collection of permissions. These permissions define the operations that can be performed by the Security Principals assigned to the Role Definition. This functionality is provided by Azure Resource Roles and Azure Active Directory Administrator Roles. Azure comes with a set of built-in roles (link) which can be augmented with custom roles. - -* **Scope**: The scope is the set of Azure resources that a Role Definition applies to. Azure Roles can be assigned to Azure Resources using Azure Management Groups. - -These three components combine to grant Security Principals the access defined in the Role Definitions to all of the resources that fall under the Azure Management Groups' Scope, this is called a Role Assignment. Multiple Role Definitions can be assigned to a Security Principal, and multiple Security Principals can be assigned to a single Scope. - -### Azure Active Directory Groups - -When assigning privileges to individuals or teams, whenever possible the assignment should be linked to an Azure Active Directory Group and not assigned directly to the user in question. This is the same recommended practice inherited from on-premises Active Directory implementations. Where possible Azure Active Directory Groups should be created per team, complementary to the logical structure of the Azure Management Groups you have created. - -In a hybrid cloud scenario, on-premises Windows Server Active Directory Security Groups can be synchronized to your Azure Active Directory instance. If you have already implemented Azure RBAC on-premises using these Windows Server Active Directory Security Groups, these groups, once synchronized, can then be used to implement Azure RBAC for your Azure Resources. Otherwise, your cloud environment can be seen as a clean slate to design and implement a robust privilege management plan built around your Azure Active Directory implementation. - -### Azure resource roles versus Azure Active Directory Administrator roles - -Microsoft Azure offers a wide variety of built-in roles for [Azure Resources](../role-based-access-control/built-in-roles.md) and [Azure Active Directory Administration](../active-directory/roles/permissions-reference.md). Both types of Role provide specific granular access to either Azure Resources or for Azure AD administrators. It is important to note that Azure Resource roles cannot be used to provide administrative access to Azure AD and Azure AD roles do not provide specific access to Azure resources. - -Some examples of the types of access that can be assigned to an Azure resource using a built-in role are: - -* Allow one user to manage virtual machines in a subscription and another user to manage virtual networks -* Allow a DBA group to manage SQL databases in a subscription -* Allow a user to manage all resources in a resource group, such as virtual machines, websites, and subnets -* Allow an application to access all resources in a resource group - -Examples of the types of access that can be assigned for Azure AD administration are: - -* Allow helpdesk staff to reset user passwords -* Allow staff to invite external users to an Azure AD instance for B2B collaboration -* Allow administrative staff read access to sign in and audit reports -* Allow staff to manage all users and groups, including resetting passwords. - -It is important to take the time to understand the full list of allowed actions a built-in role provides to ensure that undue access to isn't granted. The list of built-in roles and the access they provide is constantly evolving, the full list of the Roles and their definitions can be viewed by reviewing the documentation linked above or by using the Azure PowerShell cmdlet: - -```PowerShell -Get-AzRoleDefinition -``` - -```output -Name : AcrDelete -Id : <> -IsCustom : False -Description : acr delete -Actions : {Microsoft.ContainerRegistry/registries/artifacts/delete} -NotActions : {} -DataActions : {} -NotDataActions : {} -AssignableScopes : {/} -... -``` - -or the Azure CLI command: - -```azurecli-interactive -az role definition list -``` - -```output -[ - { - "assignableScopes": [ - "/" - ], - "description": "acr delete", - "id": "/subscriptions/49b12d1b-4030-431c-8448-39056021c4ab/providers/Microsoft.Authorization/roleDefinitions/c2f4ef07-c644-48eb-af81-4b1b4947fb11", - "name": "c2f4ef07-c644-48eb-af81-4b1b4947fb11", - "permissions": [ - { - "actions": [ - "Microsoft.ContainerRegistry/registries/artifacts/delete" - ], - "dataActions": [], - "notActions": [], - "notDataActions": [] - } - ], - "roleName": "AcrDelete", - "roleType": "BuiltInRole", - "type": "Microsoft.Authorization/roleDefinitions" - }, -... -``` - -It is also possible to create custom Azure Resource Roles as required. These custom roles can be created in the Azure portal, via PowerShell, or the Azure CLI. When creating custom Roles, it is vital to ensure the purpose of the Role is unique and that its function is not already provided by an existing Azure Resource Role. This reduces ongoing management complexity and reduces the risk of Security Principals receiving unnecessary privileges. An example would be creating a custom Azure Resource Role that sits between the built-in Azure Resource Roles, "Virtual Machine Contributor" and "Virtual Machine Administrator Login". - -The custom Role could be based on the existing Contributor Role, which grants the following access: - -| Azure Resource | Access Level | -| --- | --- | -| Virtual Machines | Can Manage but cannot access | -| Virtual Network attached to VM | Cannot access | -| Storage attached to VM | Cannot access | -| - -The custom role could preserve this basic access, but allow the designated users some basic additional privileges to modify the network configuration of the virtual machines. - -Azure Resource Roles also have the benefit of being able to be assigned to resources via Azure Management Groups. - -### Azure Management Groups - -Azure Management Groups can be used by an organisation to manage the assignment of Roles to all of the subscriptions and their resources within an Azure Tenancy. Azure Management Groups are designed to allow you to create management hierarchies, including the ability to map your organisational structure hierarchically, within Azure. Creating organisational business units as separate logical entities allows permissions to be applied within an organisation based on each team's specific requirements. Azure Management Groups can be used to define a management hierarchy up to six levels deep. - -![Management Groups](media/management-groups.png) - -Azure Management Groups are mapped to Azure Subscriptions within an Azure Tenancy. This allows an organisation to segregate Azure Resources belonging to specific business units and provide a level of granular control over both cost management and privilege assignment. - -## Privileged Identity Management (PIM) - -Microsoft has implemented Just-In-Time (JIT) and Just-Enough-Access (JEA) through Azure Privileged Identity Management. This service enables administrative staff to control, manage, and monitor privileged access to Azure Resources. PIM allows Security Principals to be made "eligible" for a Role by administrative staff, allowing users to request the activation of the Role through the Azure portal or via PowerShell cmdlets. By default, Role assignment can be activated for a period of between 1 and 72 hours. If necessary, the user can request an extension to their Role assignment and the option to make Role assignment permanent does exist. Optionally, the requirement for Multi-factor Authentication can be enforced when users request the activation of their eligible roles. Once the allocated period of the Role activation expires, the Security Principal no longer has the privileged access granted by the Role. - -The use of PIM prevents the common privilege assignment issues that can occur in environments that don't use Just-In-Time access or don't conduct routine audits of privilege assignment. One common issue is the assignment of elevated privileges being forgotten and remaining in place long after the task requiring elevated privileges has been completed. Another issue is the proliferation of elevated privileges within an environment through the cloning of the access assigned to a Security Principal when configuring other similar Security Principals. - -## Key design considerations - -When designing an Azure RBAC strategy with the intention of enforcing Least Privilege Security, the following security requirements should be considered: - -* Requests for privileged access are validated -* Administrative privileges are restricted to the minimum access required to perform the specific duties -* Administrative privileges are restricted to the minimum period of time required to perform the specific duties -* Regular reviews of granted administrative privileges are undertaken - -The process of designing an Azure RBAC strategy will necessitate a detailed review of business functions to understand the difference in access between distinct business roles, and the type and frequency of work that requires elevated privileges. The difference in function between a Backup Operator, a Security Administrator, and an Auditor will require different levels of access at different times with varying levels of ongoing review. - -## Validate requests for access - -Elevated privileges must be explicitly approved. To support this, an approval process must be developed and appropriate staff made responsible for validating that all requests for additional privileges are legitimate. Privileged Identity Management provides multiple options for approving Role assignment. A role activation request can be configured to allow for self-approval or be gated and require nominated approvers to manually review and approve all Role activation requests. Activation requests can also be configured to require additional supporting information is included with the activation request, such as ticket numbers. - -### Restrict privilege based on duties - -Restricting the level of privilege granted to Security Principals is critical, as the over assignment of privileges is a common IT Security attack vector. The types of resources being managed, and the teams responsible, must be assessed so the minimum level of privileges required for daily duties can be assigned. Additional privileges that go beyond those required for daily duties should only ever be granted for the period of time required to perform a specific task. An example of this would be providing "Contributor" access to a customer's administrator, but allowing them to request "Owner" permissions for an Azure Resource for a specific task requiring temporary high-level access. - -This ensures that each individual administrator only has elevated access for the shortest period of time. Adherence to these practices reduces the overall attack surface for any organisations IT infrastructure. - -### Regular evaluation of administrative privilege - -It is vital that Security Principals within an environment are routinely audited to ensure that the correct level of privilege is currently assigned. Microsoft Azure provides a number of means to audit and evaluate the privileges assigned to Azure Security Principals. Privileged Identity Management allows administrative staff to periodically perform "Access Reviews" of the Roles granted to Security Principals. An Access Review can be undertaken to audit both Azure Resource Role assignment and Azure Active Directory Administrative Role assignment. An Access Review can be configured with the following properties: - -* **Review name and review start and end dates**: Reviews should be configured to be long enough for the nominated users to complete them. - -* **Role to be reviewed**: Each Access Review focuses on a single Azure Role. - -* **Nominated reviewers**: There are three options for performing a review. You can assign the review to someone else to complete, you can do it yourself, or you can have each user review their own access. - -* **Require users to provide a reason for access**: Users can be required to enter a reason for maintaining their level of privilege when completing the access review. - -The progress of pending Access Reviews can be monitored at any time via a dashboard in the Azure portal. Access to the role being reviewed will remain unchanged until the Access Review has been completed. It is also possible to [audit](../active-directory/privileged-identity-management/pim-how-to-use-audit-log.md) all PIM user assignments and activations within a nominated time period. - -## Next steps - -Review the article on [System Monitoring in Azure Australia](system-monitor.md). diff --git a/articles/azure-australia/secure-your-data.md b/articles/azure-australia/secure-your-data.md deleted file mode 100644 index 3ead971a3cc58..0000000000000 --- a/articles/azure-australia/secure-your-data.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Data security in Azure Australia -description: Configuring Azure within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Data security in Azure Australia - -The overarching principles for securing customer data are: - -* Protecting data using encryption -* Managing secrets -* Restricting data access - -## Encrypting your data - -The encryption of data can be applied at the disk level (at-rest), in databases (at-rest and in-transit), in applications (in-transit), and while on the network (in-transit). There are several ways of achieving encryption in Azure: - -|Service/Feature|Description| -|---|---| -|Storage Service Encryption|Azure Storage Service Encryption is enabled at the storage account level, resulting in block blobs and page blobs being automatically encrypted when written to Azure Storage. When you read the data from Azure Storage, it will be decrypted by the storage service before being returned. Use SSE to secure your data without having to modify or add code to any applications.| -|Azure Disk Encryption|Use Azure Disk Encryption to encrypt the OS disks and data disks used by an Azure Virtual Machine. Integration with Azure Key Vault gives you control and helps you manage disk encryption keys.| -|Client-Side Application Level Encryption|Client-Side Encryption is built into the Java and the .NET storage client libraries, which can utilize Azure Key Vault APIs, making it straightforward to implement. Use Azure Key Vault to gain access to the secrets in Azure Key Vault for specific individuals using Azure Active Directory.| -|Encryption in transit|The basic encryption available for connectivity to Azure Australia supports Transport Level Security (TLS) 1.2 protocol, and X.509 certificates. Federal Information Processing Standard (FIPS) 140-2 Level 1 cryptographic algorithms are also used for infrastructure network connections between Azure Australia data centers. Windows Server 2016, Windows 10, Windows Server 2012 R2, Windows 8.1, and Azure File shares can use SMB 3.0 for encryption between the VM and the file share. Use Client-Side Encryption to encrypt the data before it's transferred into storage in a client application, and to decrypt the data after it's transferred out of storage.| -|IaaS VMs|Use Azure Disk Encryption. Turn on Storage Service Encryption to encrypt the VHD files that are used to back up those disks in Azure Storage, but this only encrypts newly written data. This means that, if you create a VM and then enable Storage Service Encryption on the storage account that holds the VHD file, only the changes will be encrypted, not the original VHD file.| -|Client-Side Encryption|This is the most secure method for encrypting your data, because it encrypts it before transit, and encrypts the data at rest. However, it does require that you add code to your applications using storage, which you might not want to do. In those cases, you can use HTTPS for your data in transit, and Storage Service Encryption to encrypt the data at rest. Client-Side Encryption also involves more load on the client—you have to account for this in your scalability plans, especially if you're encrypting and transferring large amounts of data.| -| - -For more information on the encryption options in Azure, see the [Storage Security Guide](../storage/blobs/security-recommendations.md). - -## Protecting data by managing secrets - -Secure key management is essential for protecting data in the cloud. Customers should strive to simplify key management and maintain control of keys used by cloud applications and services to encrypt data. - -### Managing secrets - -* Use Key Vault to minimize the risks of secrets being exposed through hard-coded configuration files, scripts, or in source code. Azure Key Vault encrypts keys (such as the encryption keys for Azure Disk Encryption) and secrets (such as passwords), by storing them in FIPS 140-2 Level 2 validated hardware security modules (HSMs). For added assurance, you can import or generate keys in these HSMs. -* Application code and templates should only contain URI references to the secrets (which means the actual secrets are not in code, configuration, or source code repositories). This prevents key phishing attacks on internal or external repos, such as harvest-bots in GitHub. -* Utilize strong Azure RBAC controls within Key Vault. If a trusted operator leaves the company or transfers to a new group within the company, they should be prevented from being able to access the secrets. - -For more information, see [Azure Key Vault](azure-key-vault.md) - -## Isolation to restrict data access - -Isolation is all about using boundaries, segmentation, and containers to limit data access to only authorized users, services, and applications. For example, the separation between tenants is an essential security mechanism for multi-tenant cloud platforms such as Microsoft Azure. Logical isolation helps prevent one tenant from interfering with the operations of any other tenant. - -#### Per-customer isolation - -Azure implements network access control and segregation through layer 2 VLAN isolation, access control lists, load balancers, and IP filters. - -Customers can further isolate their resources across subscriptions, resource groups, virtual networks, and subnets. - -For more information on isolation in Microsoft Azure, see the [Isolation in the Azure Public Cloud](../security/fundamentals/isolation-choices.md). - -## Next steps - -Review the article on [Azure VPN Gateway](vpn-gateway.md) \ No newline at end of file diff --git a/articles/azure-australia/security-explained.md b/articles/azure-australia/security-explained.md deleted file mode 100644 index ae58781cbb19f..0000000000000 --- a/articles/azure-australia/security-explained.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Azure Australia security explained -description: Information most asked about the Australian regions and meeting the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# Azure Australia security explained - -This article addresses some of the common questions and areas of interest for Australian Government agencies that investigate with, design for, and deploy to Microsoft Azure Australia. - -## IRAP and Certified Cloud Services List documents - -The Australian Cyber Security Centre (ACSC) provides a Letter of Certification, a Certification Report, and a Consumer Guide for the service when it's added to the Certified Cloud Services List (CCSL). - -Microsoft is currently listed on the CCSL for Azure, Office 365, and Dynamics 365 CRM. - -Microsoft makes our audit, assessment, and ACSC certification documents available to customers and partners on an Australia-specific page of the [Microsoft Service Trust Portal](https://aka.ms/au-irap). - -## Dissemination Limiting Markers and PROTECTED certification - -The process of having systems, including cloud services, approved for use by government organisations is defined in the [Information Security Manual (ISM)](https://www.cyber.gov.au/acsc/view-all-content/ism) that's produced and published by the ACSC. The ACSC is the entity within the Australian Signals Directorate (ASD) that's responsible for cyber security and cloud certification. - -There are two steps to the approval process: - -1. **Security Assessment (IRAP)**: A process in which registered professionals assess systems, services, and solutions against the technical controls in the ISM and evaluate whether the controls were implemented effectively. The assessment also identifies any specific risks for the approval authority to consider prior to issuing an Approval to Operate (ATO). - -1. **Approval to Operate**: The process in which a senior officer of a government agency formally recognises and accepts the residual risk of a system to the information it processes, stores, and communicates. An input to this process is the Security Assessment. - -The assessment of Azure services at the PROTECTED level identifies that the implementation of the security controls required for the storage and processing of PROTECTED and below data were confirmed to be in place and are operating effectively. - -## Australian data classification changes - -On October 1, 2018, the Attorney General's Department publicly announced changes to the Protective Security Policy Framework (PSPF), specifically a new [sensitive and classified information system](https://www.protectivesecurity.gov.au/information/sensitive-classified-information/Pages/default.aspx). - -![Revised PSPF classifications](media/pspf-classifications.png) - -All Australian agencies and organisations that operate under the PSPF are affected by these changes. The primary change that affects our current IRAP/CCSL certifications is that the current Dissemination Limiting Markings (DLMs) were retired. The OFFICIAL: Sensitive marking replaces the various DLMs used for the protection of sensitive information. The change also introduced three information management markers that can be applied to all official information at all levels of sensitivity and classification. The PROTECTED classification remains unchanged. - -The term "Unclassified" is removed from the new system and the term "Unofficial" is applied to non-Government information, although it doesn't require a formal marking. - -## Choose an Azure region for OFFICIAL: Sensitive and PROTECTED workloads - -The Azure OFFICIAL: Sensitive and PROTECTED certified services are deployed to all four Australian Data Centre regions: Australia East, Australia South East, Australia Central, and Australia Central 2. The certification report issued by the ACSC recommends that PROTECTED data be deployed to the Azure Government regions in Canberra if a service is available there. For more information about the PROTECTED certified Azure services, see the [Australia page on the Service Trust Portal](https://aka.ms/au-irap). - ->[!NOTE] ->Microsoft recommends that government data of all sensitivities and classifications should be deployed to the Australia Central and Australia Central 2 regions because they're designed and operated specifically for the needs of government. - -For more information on the special nature of the Azure Australian regions, see [Azure Australia Central regions](https://azure.microsoft.com/global-infrastructure/australia/). - -## How Microsoft separates classified and official data - -Microsoft operates Azure and Office 365 in Australia as if all data is sensitive or classified, which raises our security controls to that high bar. - -The infrastructure that supports Azure potentially serves data of multiple classifications. Because we assume that the customer data is classified, the appropriate controls are in place. Microsoft has adopted a baseline security posture for our services that complies with the PSPF requirements to store and process PROTECTED classified information. - -To assure our customers that one tenant in Azure isn't at risk from other tenants, Microsoft implements comprehensive defence-in-depth controls. - -Beyond the capabilities implemented within the Microsoft Azure platform, additional customer configurable controls, such as encryption with customer-managed keys, nested virtualisation, and just-in-time administrative access, can further reduce the risk. Within the Azure Government Australia regions in Canberra, a process for formal approving only Australian and New Zealand government and national critical infrastructure organisations is in place. This community cloud provides additional assurance to organisations that are sensitive to cotenant risks. - -The Microsoft Azure PROTECTED Certification Report confirms that these controls are effective for the storage and processing of PROTECTED classified data and their isolation. - -## Relevance of IRAP/CCSL to state government and critical infrastructure providers - -Many state government and critical infrastructure providers incorporate federal government requirements into their security policy and assurance framework. These organisations also handle OFFICIAL, OFFICIAL: Sensitive, and some amount of PROTECTED classified data, either from their interaction with the federal government or in their own right. - -The Australian Government is increasingly focusing policy and legislation on the protection of non-Government data that fundamentally affect the security and economic prosperity of Australia. As such, the Azure Australia regions and the CCSL certification are relevant to all of those industries. - -![Critical infrastructure sectors](media/nci-sectors.png) - -The Microsoft certifications demonstrate that Azure services were subjected to a thorough, rigorous, and formal assessment of the security protections in place and they were approved for handling such highly sensitive data. - -## Location and control of Microsoft data centres - -It's a mandatory requirement of government and critical infrastructure to explicitly know the data centre location and ownership for cloud services processing their data. Microsoft is unique as a hyperscale cloud provider in providing extensive information about these locations and ownership. - -Microsoft's Azure Australia regions (Australia Central and Australia Central 2) operate within the facilities of CDC Datacentres. The ownership of CDC Datacentres is Australian controlled with 48% ownership from the Commonwealth Superannuation Corporation, 48% ownership from Infratil (a New Zealand-based, dual Australian and New Zealand Stock Exchange listed long-term infrastructure asset fund), and 4% Australian management. - -The management of CDC Datacentres has contractual assurances in place with the Australian Government that restrict future transfer of ownership and control. This transparency of supply chain and ownership via Microsoft's partnership with CDC Datacentres is in line with the principles of the [Whole-of-Government Hosting Strategy](https://www.dta.gov.au/our-projects/whole-government-hosting-strategy) and the definition of a Certified Sovereign Datacentre. - -## Azure services that are included in the current CCSL certification - -In June 2017, the ACSC certified 41 Azure services for the storage and processing of data at the Unclassified: DLM level. In April 2018, 24 of those services were certified for PROTECTED classified data. - -The availability of ACSC-certified Azure services across our Azure regions in Australia are as follows (services shown in bold are certified at the PROTECTED level). - -|Azure Australia Central regions|Non-regional services and other regions| -|---|---| -|API Management, App Gateway, Application Services, **Automation**, **Azure portal**, **Backup**, **Batch**, **Cloud Services**, Cosmos DB, Event Hubs, **ExpressRoute**, HDInsight, **Key Vault**, Load Balancer, Log Analytics, **Multi-factor Authentication**, Redis Cache, **Resource Manager**, **Service Bus**, **Service Fabric**, **Site Recovery**, **SQL Database**, **Storage**, Traffic Manager, **Virtual Machines**, **Virtual Network**, **VPN Gateway**|**Azure Active Directory**, CDN, Data Catalog, **Import Export**, **Information Protection**, **IOT Hub**, Machine Learning, Media Services, **Notification Hubs**, Power BI, **Scheduler**, **Security Centre**, Search, Stream Analytics| -| - -Microsoft publishes the [Overview of Microsoft Azure Compliance](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/44/Microsoft%20Azure%20Compliance%20Offerings.pdf) that lists all in-scope services for all of the Global, Government, Industry, and Regional compliance and assessment processes that Azure goes through, which includes IRAP/CCSL. - -## Azure service not listed or assessed at a lower level than needed - -Services that aren't certified, or that have been certified at the OFFICIAL: Sensitive but not the PROTECTED level, can be used alongside or as part of a solution hosting PROTECTED data provided the services are either: - -- Not storing or processing PROTECTED data unencrypted, or -- You've completed a risk assessment and approved the service to store PROTECTED data yourself. - -You can use a service that isn't included on the CCSL to store and process OFFICIAL data, but the ISM requires you to notify the ACSC in writing that you're doing so before you enter into or renew a contract with a cloud service provider. - -Any service that's used by an agency for PROTECTED workloads must be security assessed and approved in line with the processes outlined in the ISM and the Agency-managed IRAP Assessments process in the [DTA Secure Cloud Strategy](https://www.dta.gov.au/files/cloud-strategy/secure-cloud-strategy.pdf). - -![DTA Secure Cloud Strategy Certification Process](media/certification.png) - -Microsoft continually assesses our services to ensure the platform is secure and fit-for-purpose for Australian Government use. Contact Microsoft if you require assistance with a service that isn't currently on the CCSL at the PROTECTED level. - -Because Microsoft has a range of services certified on the CCSL at both the Unclassified DLM and PROTECTED classifications, the ISM requires that we undertake an IRAP assessment of our services at least every two years. Microsoft undertakes an annual assessment, which is also an opportunity to include additional services for consideration. - -## Certified PROTECTED gateway in Azure - -Microsoft doesn't operate a government-certified Secure Internet Gateway (SIG) because of restrictions on the number of SIGs permissible under the Gateway Consolidation Program. But the expected and necessary capabilities of a SIG can be configured within Microsoft Azure. - -Through the PROTECTED certification of Azure services, the ACSC has specific recommendations to agencies for connecting to Azure and when implementing network segmentation between security domains, for example, between PROTECTED and the Internet. These recommendations include the use of network security groups, firewalls, and virtual private networks. The ACSC recommends the use of a virtual gateway appliance. There are several virtual appliances available in Azure that have a physical equivalent on the ASD Evaluated Products List or have been evaluated against the Common Criteria Protection Profiles and are listed on the Common Criteria portal. These products are mutually recognised by ASD as a signatory to the Common Criteria Recognition Arrangement. - -Microsoft has produced guidance on implementing Azure-based capabilities that provide the security functions required to protect the boundary between different security domains, which, when combined, form the equivalent to a certified SIG. A number of partners can assist with design and implementation of these capabilities, and a number of partner solutions are available that do the same. - -## Security clearances and citizenship of Microsoft support personnel - -Microsoft operates our services globally with screened and trained security personnel. Personnel that have unescorted physical access to facilities in Sydney and Melbourne have Australian Government Baseline security clearances. Personnel within the Australia Central and Australia Central 2 regions have minimum Negative Vetting 1 (NV1) clearances (as appropriate for SECRET data). These clearance requirements provide additional assurance to customers that personnel within data centres operating Azure are highly trustworthy. - -Microsoft has a zero standing access policy with access granted through a system of just in time and just enough administration based on Azure role-based access control (Azure RBAC). In the vast majority of cases, our administrators don't require access or privileges to customer data in order to troubleshoot and maintain the service. High degrees of automation and scripting of tasks for remote execution negate the need for direct access to customer data. - -The Attorney General's Department has confirmed that Microsoft's personnel security policies and procedures within Azure are consistent with the intent of the PSPF Access to Information provisions in INFOSEC-9. - -## Store International Traffic of Arms Regulations (ITAR) or Export Administration Regulations (EAR) data - -The Azure technical controls that assist customers with meeting their obligations for export-controlled data are the same globally in Azure. Importantly, there's no formal assessment and certification framework for export-controlled data. - -For Azure Government and Office 365 US Government for Defense, we've put additional contractual and process measures in place to support customers subject to export controls. Those additional contractual clauses and the guaranteed U.S. national support and administration of the Azure regions isn't in place for Australia. - -That doesn't mean that Azure in Australia can't be used for ITAR/EAR, but you need to clearly understand the restrictions imposed on you through your export license. You also must implement additional protections to meet those obligations before you use Azure to store that data. For example, you might need to: - -- Build nationality as an attribute into Azure Active Directory. -- Use Azure Information Protection to enforce encryption rules over the data and limit it to only U.S. and whatever other nationalities are included on the export license. -- Encrypt all data on-premises before you store it in Azure by using a customer key or Hold Your Own Key for ITAR data. - -Because ITAR isn't a formal certification, you need to understand what the restrictions and limitations associated with the export license are. Then you can work through whether there are sufficient controls in Azure to meet those requirements. In this case, one of the issues to closely consider is the access by our engineers who might not be a nationality approved on the export license. - -## Next steps - - For ISM-compliant configuration and implementation of VPN connectivity to Azure Australia, see [Azure VPN Gateway](vpn-gateway.md). diff --git a/articles/azure-australia/system-monitor.md b/articles/azure-australia/system-monitor.md deleted file mode 100644 index 13bc62d60352a..0000000000000 --- a/articles/azure-australia/system-monitor.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: System monitoring for security in Azure Australia -description: Guidance on configuring System Monitoring within the Australian regions to meet the specific requirements of Australian Government policy, regulations, and legislation. -author: emilyre -ms.service: azure-australia -ms.topic: conceptual -ms.date: 07/22/2019 -ms.author: yvettep ---- - -# System monitoring for security in Azure Australia - -Having robust security strategies that include real-time monitoring and routine security assessments are critical for you to enhance the day to day operational security of your IT environments, including cloud. - -Cloud security is a joint effort between the customer and the cloud provider. There are four services which Microsoft Azure provides to facilitate these requirements with consideration to the recommendations contained within the [Australian Cyber Security Centre's (ACSC) Information Security Manual Controls](https://acsc.gov.au/infosec/ism/index.htm) (ISM), specifically, the implementation of centralised event logging, event log auditing, and security vulnerability assessment and management. The Microsoft Azure services are: - -* Microsoft Defender for Cloud -* Azure Monitor -* Azure Advisor -* Azure Policy - -The ACSC recommends that you use these services for **PROTECTED** data. By using these services, you can proactively monitor and analyse your IT environments, and make informed decisions on where to best allocate resources to enhance your security. Each of these services is part of a combined solution to provide you with the best insight, recommendations, and protection possible. - -## Microsoft Defender for Cloud - -[Microsoft Defender for Cloud](../security-center/security-center-introduction.md) provides a unified security management console that you use to monitor and enhance the security of Azure resources and your hosted data. Microsoft Defender for Cloud includes Secure Score, a score based on an analysis of the state of best practice configuration from Azure Advisor and the overall compliance of Azure Policy. - -Microsoft Defender for Cloud provides Azure customers with the following features: - -* Security policy, assessment, and recommendations -* Security event collection and search -* Access and application controls -* Advanced Threat Detection -* Just-in-time Virtual Machines access control -* Hybrid Security - -The scope of resources monitored by Microsoft Defender for Cloud can be expanded to include supported on-premises resources in a hybrid-cloud environment. This includes on-premises resources currently being monitored by a supported version of System Center Operations Manager. - -Defender for Cloud's enhanced security features provide cloud-based security controls required by the [ASD Essential 8](https://acsc.gov.au/publications/protect/essential-eight-explained.htm). These include application filtering and restriction of administrative privilege via just-in-time access. - -### Azure Monitor - -[Azure Monitor](../azure-monitor/overview.md) is the centralized logging solution for all Azure Resources, and includes Log Analytics and Application Insights. Two key data types are collected from Azure resources: logs and metrics. Once collected in Azure Monitor, logging information can be used by a wide range of tools and for a variety of purposes. - -![Azure Monitor Overview](media/overview.png) - -Azure Monitor also includes the "Azure Activity Log". The SActivity Log stores all subscription level events that have occurred within Azure. It allows Azure customers to see the "who, what and when" behind operations undertaken on their Azure resources. Both resource based logging sent to Azure Monitor and Azure Activity Log events can be analysed using the in-built Kusto query language. These logs can then be exported, used to create custom dashboards and views, and configured to trigger alerts and notifications. - -### Azure Advisor - -[Azure Advisor](../advisor/advisor-overview.md) analyses supported Azure resources, system-generated log files, and current resource configurations within your Azure subscription. The analysis provided in Azure Advisor is generated in real time and based upon Microsoft's recommended best practices. Any supported Azure resources added to your environment will be analysed and appropriate recommendations will be provided. Azure Advisor recommendations are categorised into four best practice categories: - -* Security -* High Availability -* Performance -* Cost - -Security recommendations generated by Azure Advisor form part of the overall security analysis provided by Microsoft Defender for Cloud. - -The information gathered by Azure Advisor provides administrators with: - -* Insight into resource configuration that does not meet recommended best practice -* Guidance on specific remediation actions to undertake -* Rankings indicating which remediation actions should be undertaken as a high priority - -### Azure Policy - -[Azure Policy](../governance/policy/overview.md) provides the ability to apply rules that govern the types of Azure resources and their allowed configuration. Policy can be used to control resource creation and configuration, or it can be used to audit configuration settings across an environment. These audit results can be used to form the basis of remediation activities. Azure Policy differs from Azure role-based access control (Azure RBAC); Azure Policy is used to restrict resources and their configuration, Azure RBAC is used to restrict privileged access to Azure users. - -Whether the specific policy is being enforced or the effect of the policy is being audited, policy compliance is continually monitored, and overall and resource-specific compliance information is provided to administrators. Azure Policy compliance data is provided to Microsoft Defender for Cloud and forms part of the Secure Score. - -## Key design considerations - -When implementing an event log strategy, the ACSC ISM highlights the following considerations: - -* Centralised logging facilities -* Specific events to be logged -* Event log protection -* Event log retention -* Event log auditing - -In additional to collecting and managing logs, the ISM also recommends routine vulnerability assessment of an organisation's IT environment. - -### Centralised logging - -Any logging solution should, wherever possible, consolidate captured logs into a single data repository. This not only reduces operational complexity and prevents the creation of multiple data silos, it enables data collected from multiple sources to be analysed together allowing any correlating events to be identified. This is critical for detecting and managing the scope of any cyber security incidents. - -This requirement is met for all Azure customers with Azure Monitor. This offering not only provides a centralised logging repository in Azure for all Azure resources, it also enables you to stream your data to an Azure Event Hub. Azure Event Hubs provides a fully managed, real-time data ingestion service. Once Azure Monitor data is streamed to an Azure Event Hub, the data can also be easily connected to existing supported Security information and event management (SIEM) repositories and additional third party monitoring tools. - -Microsoft also offers its own Azure native SIEM solution, Microsoft Sentinel. Microsoft Sentinel supports a wide variety of data connectors and can be used to monitor security events across an entire enterprise. By combining the data from supported [data connectors](../sentinel/connect-data-sources.md), Microsoft Sentinel's built-in machine learning, and the Kusto query language, security administrators are provided with a single solution for alert detection, threat visibility, proactive hunting, and threat response. Microsoft Sentinel also provides a hunting and notebook feature that allows security administrators to record all the steps undertaken as part of a security investigation in a reuseable playbook that can be shared within an organisation. Security Administrators can even use the built-in [User Analytics](../sentinel/overview.md) to investigate the actions of a single nominated user. - -### Logged events and log detail - -The ISM provides a detailed list of event log types that should be included in any logging strategy. Any captured logs must contain sufficient detail to be of any practical use in conducting analysis and investigations. - -The logs collected in Azure fall under one of following three categories: - -* **Control and Management Logs**: These logs provide information about Azure Resource Manager CREATE, UPDATE, and DELETE operations. - -* **Data Plane Logs**: These contain events raised as part of Azure resource usage. Includes sources such as Windows event logs including System, Security, and Application logs. - -* **Processed Events**: These events contain information about events and alerts that have been automatically processed on the customer's behalf by Azure. An example of a Processed Event is a Microsoft Defender for Cloud Alert. - -Azure virtual machine monitoring is enhanced by the deployment of the virtual machine agent for both Windows and Linux. This markedly increases the breadth of logging information gathered. Deployment of this agent can be configured to occur automatically via the Microsoft Defender for Cloud. - -Microsoft provides detailed information about Azure resource-specific logs and their [schemas](../security/fundamentals/log-audit.md). - -### Log retention and protection - - Event logs must be stored securely for the required retention period. The ISM advises that logs are retained for a minimum of seven years. Azure provides a number of means to ensure the long life of your collected logs. By default, the Azure Log events are stored for 90 days. Log data captured by Azure Monitor can be moved and stored on an Azure Storage account as required for long-term retention. Activity logs stored on an Azure Storage Account can be retained for a set number of days, or indefinitely if necessary. - -Azure Storage Accounts used to store Azure Log events can be made geo-redundant and can be backed up using Azure Backup. Once captured by Azure Backup, any deletion of backups containing logs requires administrative approval and backups marked for deletion are still held for 14 days allowing for recovery. Azure Backup allows for 9999 copies of a protected instance, providing over 27 years of daily backups. - -Azure role-based access control (Azure RBAC) should be used to control access to resources used for Azure logging. Azure Monitor, Azure Storage accounts, and Azure Backups should be configured with Azure RBAC to ensure the security of the data contained within the logs. - -### Log auditing - -The true value of logs is realised once they are analysed. Using both automated and manual analysis, and being familiar with the available tools, will assist you to detect and manage breaches of organisational security policy, and cyber security incidents. Azure Monitor provides a rich set of tools to analyse collected logs. The result of this analysis can then be shared between systems, visualised, or disseminated in multiple formats. - -Log data stored in Azure Monitor is kept in a Log Analytics Workspace. All analysis begins with a query. Azure Monitor queries are written in the Kusto query language. Queries form the basis of all outputs from Azure Monitor, from Azure Dashboards to Alert Rules. - -![Azure Log Queries Overview](media/queries-overview.png) - -Auditing of logs can be enhanced through the use of Monitoring Solutions. These are pre-packaged solutions that contain collection logic, queries, and data visualisation views. Microsoft [provide](../azure-monitor/monitor-reference.md) a number of Monitoring Solutions and additional solutions from product vendors can be found in the Azure Marketplace. - -### Vulnerability assessment and management - -The ISM notes that routine vulnerability assessment and management are essential. Your IT environment is constantly evolving, and the external security threat is endlessly changing. With Microsoft Defender for Cloud you can do automated vulnerability assessments and get guidance on how to plan and perform remediation activities. - -Secure Score in Microsoft Defender for Cloud gives you a list of recommendations that, when applied, will improve the security of your environment. The list is sorted by the impact on the overall Secure Score from highest to lowest. Ordering the list by impact allows you to focus on the highest priority recommendations that present the most value in enhancing your security. - -Azure Policy also plays a key part in the ongoing vulnerability assessment. The types of policy available in Azure Policy range from enforcing resource tags and values, to restricting the Azure regions in which resources can be created, to blocking the creation of particular resource types altogether. A set of Azure policies can be grouped into Initiatives. Initiatives are used to apply related Azure policies that, when applied together as a group, form the basis of a specific security or compliance objective. - -Azure Policy has a library of built-in policy definitions which is constantly growing. Azure portal also gives you the option to author your own custom Azure Policy definitions. Once you find a policy in the existing library or create a new one, you can then assign the policy to Azure resources. These assignments can be [scoped](../governance/policy/tutorials/create-and-manage.md) at various levels in the resource management hierarchy. Policy assignment is inherited, meaning all child resources within a scope receive the same policy assignment. Resources can also be excluded from scoped policy assignment as required. - -All deployed Azure policies contribute to an organisation's Secure Score. In a highly bespoke environment, custom Azure Policy definitions can be created and deployed to provide audit information tailored to specific workloads. - -## Getting started - -To start with Microsoft Defender for Cloud and make full use of Azure Monitor, Advisor and Policy, Microsoft recommends the following initial steps: - -* Enable Microsoft Defender for Cloud -* Enable Microsoft Defender for Cloud's enhanced security features -* Enable automatic provisioning of the Log Analytics agent to supported machines -* Review, prioritize, and mitigate the security recommendations and alerts shown on the Defender for Cloud dashboards - -## Next steps - -Read [Azure Policy and Azure Blueprints](azure-policy.md) for details on implementing governance and control over your Azure Australia resources to ensure policy and regulatory compliance. diff --git a/articles/azure-australia/toc.yml b/articles/azure-australia/toc.yml deleted file mode 100644 index ff5a23774cb0b..0000000000000 --- a/articles/azure-australia/toc.yml +++ /dev/null @@ -1,55 +0,0 @@ -- name: Azure Australia documentation - href: index.yml -- name: Overview - items: - - name: What is Azure Australia? - href: australia-overview.md -- name: Concepts - items: - - name: Security in Azure Australia - items: - - name: Data Security - href: secure-your-data.md - - name: Azure VPN Gateway configuration - href: vpn-gateway.md - - name: Azure Key Vault - href: azure-key-vault.md - - name: Identity federation - href: identity-federation.md - - name: Azure RBAC and PIM - href: role-privileged.md - - name: Site Recovery and Backup for Essential 8 - href: recovery-backup.md - - name: Azure system monitoring - href: system-monitor.md - - name: Azure Policy and Azure Blueprints - href: azure-policy.md - - name: Gateways in Azure Australia - items: - - name: Gateway logging, auditing, and visibility - href: gateway-log-audit-visibility.md - - name: Gateway secure remote administration - href: gateway-secure-remote-administration.md - - name: Gateway data ingress control - href: gateway-ingress-traffic.md - - name: Gateway data egress control - href: gateway-egress-traffic.md -- name: Resources - items: - - name: Azure Australia security explained - href: security-explained.md - - name: Azure Australia additional resources - href: reference-library.md - - name: Australian Cyber Security Centre (ACSC) website - href: https://cyber.gov.au/ - - name: Australian Certified Cloud Services List (CCSL) - href: https://acsc.gov.au/infosec/irap/certified_clouds.htm - - name: Australia page on the Microsoft Service Trust Portal - href: https://aka.ms/au-irap - - name: Australia PROTECTED Blueprints for IaaS and PaaS web applications - href: https://aka.ms/au-protected - - name: Pricing - href: https://azure.microsoft.com/pricing/ - - name: Pricing calculator - href: https://azure.microsoft.com/pricing/calculator/ - diff --git a/articles/azure-australia/vpn-gateway.md b/articles/azure-australia/vpn-gateway.md deleted file mode 100644 index 27b613fc2dda5..0000000000000 --- a/articles/azure-australia/vpn-gateway.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: Azure VPN Gateway in Azure Australia -description: Implementing VPN Gateway in Azure Australia to be compliant with the ISM and effectively protect Australian Government agencies -author: emilyre -ms.service: azure-australia -ms.topic: article -ms.date: 07/22/2019 -ms.author: yvettep -ms.custom: devx-track-azurepowershell ---- - -# Azure VPN Gateway in Azure Australia - -A critical service with any public cloud is the secure connection of cloud resources and services to existing on-premises systems. The service that provides this capability in Azure is Azure VPN Gateway. This article outlines the key points to consider when you configure a VPN gateway to comply with the Australian Signals Directorate's (ASD) [Information Security Manual (ISM) controls](https://acsc.gov.au/infosec/ism/). - -A VPN gateway is used to send encrypted traffic between a virtual network in Azure and another network. Three scenarios are addressed by VPN gateways: - -- Site-to-site (S2S) -- Point-to-site (P2S) -- Network-to-network - -This article focuses on S2S VPN gateways. Diagram 1 shows an example S2S VPN gateway configuration. - -![VPN gateway with multi-site connections](media/vpngateway-multisite-connection-diagram.png) - -*Diagram 1 – Azure S2S VPN Gateway* - -## Key design considerations - -There are three networking options to connect Azure to Australian Government customers: - -- ICON -- Azure ExpressRoute -- Public internet - -The Australian Cyber Security Centre's [Consumer Guide for Azure](https://servicetrust.microsoft.com/viewpage/Australia) recommends that VPN Gateway (or an equivalent PROTECTED certified third-party service) is used in conjunction with the three networking options. This recommendation is to ensure that the connections comply with the ISM controls for encryption and integrity. - -### Encryption and integrity - -By default, the VPN negotiates the encryption and integrity algorithms and parameters during the connection establishment as part of the IKE handshakes. During the IKE handshake, the configuration and order of preference depends on whether the VPN gateway is the initiator or the responder. This designation is controlled via the VPN device. The final configuration of the connection is controlled by the configuration of the VPN device. For more information on validated VPN devices and their configuration, see [About VPN services](../vpn-gateway/vpn-gateway-about-vpn-devices.md). - -VPN gateways can control encryption and integrity by configuring a custom IPsec/IKE policy on the connection. - -### Resource operations - -VPN gateways create a connection between Azure and non-Azure environments over the public internet. The ISM has controls that relate to the explicit authorization of connections. By default, it's possible to use VPN gateways to create unauthorized tunnels into secure environments. It's critical that organizations use Azure role-based access control (Azure RBAC) to control who can create and modify VPN gateways and their connections. Azure has no built-in role to manage VPN gateways, so a custom role is required. - -Access to Owner, Contributor, and Network Contributor roles is tightly controlled. We also recommend that you use Azure Active Directory Privileged Identity Management for more granular access control. - -### High availability - -Azure VPN gateways can have multiple connections and support multiple on-premises VPN devices to the same on-premises environment. See Diagram 1. - -Virtual networks in Azure can have multiple VPN gateways that can be deployed in independent, active-passive, or active-active configurations. - -We recommend that you deploy all VPN gateways in a [highly available configuration](../vpn-gateway/vpn-gateway-highlyavailable.md). An example is two on-premises VPN devices connected to two VPN gateways in either active-passive or active-active mode. See Diagram 2. - -![VPN gateway redundant connections](media/dual-redundancy.png) - -*Diagram 2 – Active-active VPN gateways and two VPN devices* - -### Forced tunneling - -Forced tunneling redirects, or forces, all Internet-bound traffic back to the on-premises environment via the VPN gateway for inspection and auditing. Without forced tunneling, Internet-bound traffic from VMs in Azure traverses the Azure network infrastructure directly out to the public internet, without the option to inspect or audit the traffic. Forced tunneling is critical when an organization is required to use a Secure Internet Gateway (SIG) for an environment. - -## Detailed configuration - -### Service attributes - -VPN gateways for S2S connections configured for the Australian Government must have the following attributes: - -|Attribute | Must| -|--- | --- | -|gatewayType | "VPN"| -| - -Attribute settings required to comply with the ISM controls for PROTECTED are: - -|Attribute | Must| -|--- |---| -|vpnType |"RouteBased"| -|vpnClientConfiguration/vpnClientProtocols | "IkeV2"| -| - -Azure VPN gateways support a range of cryptographic algorithms from the IPsec and IKE protocol standards. The default policy sets maximum interoperability with a wide range of third-party VPN devices. As a result, it's possible that during the IKE handshake a noncompliant configuration might be negotiated. We highly recommend that you apply [custom IPsec/IKE policy](../vpn-gateway/vpn-gateway-ipsecikepolicy-rm-powershell.md) parameters to vpnClientConfiguration in VPN gateways to ensure the connections meet the ISM controls for on-premises environment connections to Azure. The key attributes are shown in the following table. - -|Attribute|Should|Must| -|---|---|---| -|saLifeTimeSeconds|<14,400 secs|>300 secs| -|saDataSizeKilobytes| |>1,024 KB| -|ipsecEncryption| |AES256-GCMAES256| -|ipsecIntegrity| |SHA256-GCMAES256| -|ikeEncryption| |AES256-GCMAES256| -|ikeIntegrity| |SHA256-GCMAES256| -|dhGroup|DHGroup14, DHGroup24, ECP256, ECP384|DHGroup2| -|pfsGroup|PFS2048, PFS24, ECP256, ECP384|| -| - -For dhGroup and pfsGroup in the previous table, ECP256 and ECP384 are preferred even though other settings can be used. - -### Related services - -When you design and configure an Azure VPN gateway, a number of related services must also exist and be configured. - -|Service | Action required| -|--- | ---| -|Virtual network | VPN gateways are attached to a virtual network. Create a virtual network before you create a new VPN gateway.| -|Public IP address | S2S VPN gateways need a public IP address to establish connectivity between the on-premises VPN device and the VPN gateway. Create a public IP address before you create a S2S VPN gateway.| -|Subnet | Create a subnet of the virtual network for the VPN gateway.| -| - -## Implementation steps using PowerShell - -### Azure role-based access control - -1. Create a custom role. An example is virtualNetworkGateway Contributor. Create a role to be assigned to users who will be allowed to create and modify VPN gateways. The custom role should allow the following operations: - - Microsoft.Network/virtualNetworkGateways/* - Microsoft.Network/connections/* - Microsoft.Network/localnetworkgateways/* - Microsoft.Network/virtualNetworks/subnets/* - Microsoft.Network/publicIPAddresses/* - Microsoft.Network/publicIPPrefixes/* - Microsoft.Network/routeTables/* - -2. Add the custom role to users who are allowed to create and manage VPN gateways and connections to on-premises environments. - -### Create a VPN gateway - -These steps assume that you already created a virtual network. - -1. Create a new public IP address. -2. Create a VPN gateway subnet. -3. Create a VPN gateway IP config file. -4. Create a VPN gateway. -5. Create a local network gateway for the on-premises VPN device. -6. Create an IPsec policy. This step assumes that you're using custom IPsec/IKE policies. -7. Create a connection between the VPN gateway and a local network gateway by using the IPsec policy. - -### Enforce tunneling - -If forced tunneling is required, before you create the VPN gateway: - -1. Create a route table and route rules. -2. Associate a route table with the appropriate subnets. - -After you create the VPN gateway: - -- Set GatewayDefaultSite to the on-premises environment on the VPN gateway. - -### Example PowerShell script - -An example of PowerShell script used to create a custom IPsec/IKE policy complies with ISM controls for Australian PROTECTED security classification. - -It assumes that the virtual network, VPN gateway, and local gateways exist. - -#### Create an IPsec/IKE policy - -The following sample script creates an IPsec/IKE policy with the following algorithms and parameters: - -- IKEv2: AES256, SHA256, DHGroup ECP256 -- IPsec: AES256, SHA256, PFS ECP256, SA Lifetime 14,400 seconds, and 102,400,000 KB - -```powershell -$custompolicy = New-AzIpsecPolicy ` - -IkeEncryption AES256 ` - -IkeIntegrity SHA256 ` - -DhGroup ECP256 ` - -IpsecEncryption AES256 ` - -IpsecIntegrity SHA256 ` - -PfsGroup ECP256 ` - -SALifeTimeSeconds 14400 ` - -SADataSizeKilobytes 102400000 -``` - -#### Create a S2S VPN connection with the custom IPsec/IKE policy - -```powershell -$vpngw = Get-AzVirtualNetworkGateway ` - -Name "" ` - -ResourceGroupName "" -$localgw = Get-AzLocalNetworkGateway ` - -Name "" ` - -ResourceGroupName "" - -New-AzVirtualNetworkGatewayConnection ` - -Name "ConnectionName" ` - -ResourceGroupName "" ` - -VirtualNetworkGateway1 $vpngw ` - -LocalNetworkGateway2 $localgw ` - -Location "Australia Central" ` - -ConnectionType IPsec ` - -IpsecPolicies $custompolicy ` - -SharedKey "AzureA1b2C3" -``` - -## Next steps - -This article covered the specific configuration of VPN Gateway to meet the requirements specified in the Information Security Manual for securing Australian Government PROTECTED data while in transit. For steps on how to configure your VPN gateway, see: - -- [Azure virtual network gateway overview](../vpn-gateway/index.yml) -- [What is VPN Gateway?](../vpn-gateway/vpn-gateway-about-vpngateways.md) -- [Create a virtual network with a site-to-site VPN connection by using PowerShell](../vpn-gateway/vpn-gateway-create-site-to-site-rm-powershell.md) -- [Create and manage a VPN gateway](../vpn-gateway/tutorial-create-gateway-portal.md) diff --git a/articles/azure-cache-for-redis/TOC.yml b/articles/azure-cache-for-redis/TOC.yml index 928c25ee830da..106b2d0751a6d 100644 --- a/articles/azure-cache-for-redis/TOC.yml +++ b/articles/azure-cache-for-redis/TOC.yml @@ -151,9 +151,15 @@ href: cache-how-to-manage-redis-cache-powershell.md - name: Deploy and Manage using Azure CLI href: cli-samples.md + - name: Create Redis cache - Bicep + displayName: ARM, Resource Manager, Template + href: cache-redis-cache-bicep-provision.md - name: Create Redis cache - ARM template displayName: Resource Manager href: cache-redis-cache-arm-provision.md + - name: Create Web App with Redis cache - Bicep + displayName: ARM, Resource Manager, Template + href: cache-web-app-bicep-with-redis-cache-provision.md - name: Create Web App with Redis cache - ARM template displayName: Resource Manager href: cache-web-app-arm-with-redis-cache-provision.md diff --git a/articles/azure-cache-for-redis/cache-configure.md b/articles/azure-cache-for-redis/cache-configure.md index f412adbb1b3c2..195c2954e42f5 100644 --- a/articles/azure-cache-for-redis/cache-configure.md +++ b/articles/azure-cache-for-redis/cache-configure.md @@ -4,9 +4,9 @@ description: Understand the default Redis configuration for Azure Cache for Redi author: flang-msft ms.service: cache ms.topic: conceptual -ms.date: 03/22/2022 +ms.date: 06/07/2022 ms.author: franlanglois -ms.custom: devx-track-azurepowershell + --- @@ -138,9 +138,9 @@ Use the **Maxmemory policy**, **maxmemory-reserved**, and **maxfragmentationmemo For more information about `maxmemory` policies, see [Eviction policies](https://redis.io/topics/lru-cache#eviction-policies). -The **maxmemory-reserved** setting configures the amount of memory, in MB per instance in a cluster, that is reserved for non-cache operations, such as replication during failover. Setting this value allows you to have a more consistent Redis server experience when your load varies. This value should be set higher for workloads that write large amounts of data. When memory is reserved for such operations, it's unavailable for storage of cached data. The minimum and maximum values on the slider are 10% and 60%, shown in megabytes. You must set the value in that range. +The **maxmemory-reserved** setting configures the amount of memory in MB per instance in a cluster that is reserved for non-cache operations, such as replication during failover. Setting this value allows you to have a more consistent Redis server experience when your load varies. This value should be set higher for workloads that write large amounts of data. When memory is reserved for such operations, it's unavailable for storage of cached data. The minimum and maximum values on the slider are 10% and 60%, shown in megabytes. You must set the value in that range. -The **maxfragmentationmemory-reserved** setting configures the amount of memory, in MB per instance in a cluster, that is reserved to accommodate for memory fragmentation. When you set this value, the Redis server experience is more consistent when the cache is full or close to full and the fragmentation ratio is high. When memory is reserved for such operations, it's unavailable for storage of cached data. The minimum and maximum values on the slider are 10% and 60%, shown in megabytes. You must set the value in that range. +The **maxfragmentationmemory-reserved** setting configures the amount of memory in MB per instance in a cluster that is reserved to accommodate for memory fragmentation. When you set this value, the Redis server experience is more consistent when the cache is full or close to full and the fragmentation ratio is high. When memory is reserved for such operations, it's unavailable for storage of cached data. The minimum and maximum values on the slider are 10% and 60%, shown in megabytes. You must set the value in that range. When choosing a new memory reservation value (**maxmemory-reserved** or **maxfragmentationmemory-reserved**), consider how this change might affect a cache that is already running with large amounts of data in it. For instance, if you have a 53-GB cache with 49 GB of data, then change the reservation value to 8 GB, this change drops the max available memory for the system down to 45 GB. If either your current `used_memory` or your `used_memory_rss` values are higher than the new limit of 45 GB, then the system will have to evict data until both `used_memory` and `used_memory_rss` are below 45 GB. Eviction can increase server load and memory fragmentation. For more information on cache metrics such as `used_memory` and `used_memory_rss`, see [Available metrics and reporting intervals](cache-how-to-monitor.md#available-metrics-and-reporting-intervals). @@ -294,11 +294,11 @@ The settings in the **Administration** section allow you to perform the followin ### Import/Export -Import/Export is an Azure Cache for Redis data management operation, which allows you to import and export data in the cache by importing and exporting an Azure Cache for Redis Database (RDB) snapshot from a premium cache to a page blob in an Azure Storage Account. Import/Export enables you to migrate between different Azure Cache for Redis instances or populate the cache with data before use. +Import/Export is an Azure Cache for Redis data management operation that allows you to import and export data in the cache. You can import and export an Azure Cache for Redis Database (RDB) snapshot from a premium cache to a page blob in an Azure Storage Account. Use Import/Export to migrate between different Azure Cache for Redis instances or populate the cache with data before use. Import can be used to bring Redis compatible RDB files from any Redis server running in any cloud or environment, including Redis running on Linux, Windows, or any cloud provider such as Amazon Web Services and others. Importing data is an easy way to create a cache with pre-populated data. During the import process, Azure Cache for Redis loads the RDB files from Azure storage into memory, and then inserts the keys into the cache. -Export allows you to export the data stored in Azure Cache for Redis to Redis compatible RDB files. You can use this feature to move data from one Azure Cache for Redis instance to another or to another Redis server. During the export process, a temporary file is created on the VM that hosts the Azure Cache for Redis server instance, and the file is uploaded to the designated storage account. When the export operation completes with either a status of success or failure, the temporary file is deleted. +Export allows you to export the data stored in Azure Cache for Redis to Redis compatible RDB files. You can use this feature to move data from one Azure Cache for Redis instance to another or to another Redis server. During the export process, a temporary file is created on the VM that hosts the Azure Cache for Redis server instance. The temporary file is uploaded to the designated storage account. When the export operation completes with either a status of success or failure, the temporary file is deleted. > [!IMPORTANT] > Import/Export is only available for Premium tier caches. For more information and instructions, see [Import and Export data in Azure Cache for Redis](cache-how-to-import-export-data.md). @@ -388,8 +388,8 @@ New Azure Cache for Redis instances are configured with the following default Re | --- | --- | --- | | `databases` |16 |The default number of databases is 16 but you can configure a different number based on the pricing tier.1 The default database is DB 0, you can select a different one on a per-connection basis using `connection.GetDatabase(dbid)` where `dbid` is a number between `0` and `databases - 1`. | | `maxclients` |Depends on the pricing tier2 |This value is the maximum number of connected clients allowed at the same time. Once the limit is reached Redis closes all the new connections, returning a 'max number of clients reached' error. | -| `maxmemory-reserved` | 10% of `maxmemory` | The allowed range for `maxmemory-reserved` is 10% - 60% of `maxmemory`. If you try to set these values lower than 10% or higher than 60%, they are re-evaluated and set to the 10% minimum and 60% maximum. The values are rendered in megabytes. | -| `maxfragmentationmemory-reserved` | 10% of `maxmemory` | The allowed range for `maxfragmentationmemory-reserved` is 10% - 60% of `maxmemory`. If you try to set these values lower than 10% or higher than 60%, they are re-evaluated and set to the 10% minimum and 60% maximum. The values are rendered in megabytes. | +| `maxmemory-reserved` | 10% of `maxmemory` | The allowed range for `maxmemory-reserved` is 10% - 60% of `maxmemory`. If you try to set these values lower than 10% or higher than 60%, they're reevaluated and set to the 10% minimum and 60% maximum. The values are rendered in megabytes. | +| `maxfragmentationmemory-reserved` | 10% of `maxmemory` | The allowed range for `maxfragmentationmemory-reserved` is 10% - 60% of `maxmemory`. If you try to set these values lower than 10% or higher than 60%, they're reevaluated and set to the 10% minimum and 60% maximum. The values are rendered in megabytes. | | `maxmemory-policy` |`volatile-lru` | Maxmemory policy is the setting used by the Redis server to select what to remove when `maxmemory` (the size of the cache that you selected when you created the cache) is reached. With Azure Cache for Redis, the default setting is `volatile-lru`. This setting removes the keys with an expiration set using an LRU algorithm. This setting can be configured in the Azure portal. For more information, see [Memory policies](#memory-policies). | | `maxmemory-samples` |3 |To save memory, LRU and minimal TTL algorithms are approximated algorithms instead of precise algorithms. By default Redis checks three keys and picks the one that was used less recently. | | `lua-time-limit` |5,000 |Max execution time of a Lua script in milliseconds. If the maximum execution time is reached, Redis logs that a script is still in execution after the maximum allowed time, and starts to reply to queries with an error. | diff --git a/articles/azure-cache-for-redis/cache-how-to-geo-replication.md b/articles/azure-cache-for-redis/cache-how-to-geo-replication.md index 8015b9fda609b..d0f40a6ee5e0c 100644 --- a/articles/azure-cache-for-redis/cache-how-to-geo-replication.md +++ b/articles/azure-cache-for-redis/cache-how-to-geo-replication.md @@ -4,13 +4,13 @@ description: Learn how to replicate your Azure Cache for Redis Premium instances author: flang-msft ms.service: cache ms.topic: conceptual -ms.date: 02/08/2021 +ms.date: 05/24/2022 ms.author: franlanglois --- # Configure geo-replication for Premium Azure Cache for Redis instances -In this article, you'll learn how to configure a geo-replicated Azure Cache using the Azure portal. +In this article, you learn how to configure a geo-replicated Azure Cache using the Azure portal. Geo-replication links together two Premium Azure Cache for Redis instances and creates a data replication relationship. These cache instances are typically located in different Azure regions, though that isn't required. One instance acts as the primary, and the other as the secondary. The primary handles read and write requests and propagate changes to the secondary. This process continues until the link between the two instances is removed. @@ -154,6 +154,8 @@ Yes, geo-replication of caches in VNets is supported with caveats: - Geo-replication between caches in different VNets is also supported. - If the VNets are in the same region, you can connect them using [VNet peering](../virtual-network/virtual-network-peering-overview.md) or a [VPN Gateway VNet-to-VNet connection](../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). - If the VNets are in different regions, geo-replication using VNet peering is supported. A client VM in VNet 1 (region 1) isn't able to access the cache in VNet 2 (region 2) using its DNS name because of a constraint with Basic internal load balancers. For more information about VNet peering constraints, see [Virtual Network - Peering - Requirements and constraints](../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). We recommend using a VPN Gateway VNet-to-VNet connection. + +To configure your VNet effectively and avoid geo-replication issues, you must configure both the inbound and outbound ports correctly. For more information on avoiding the most common VNet misconfiguration issues, see [Geo-replication peer port requirements](cache-how-to-premium-vnet.md#geo-replication-peer-port-requirements). Using [this Azure template](https://azure.microsoft.com/resources/templates/redis-vnet-geo-replication/), you can quickly deploy two geo-replicated caches into a VNet connected with a VPN Gateway VNet-to-VNet connection. diff --git a/articles/azure-cache-for-redis/cache-how-to-import-export-data.md b/articles/azure-cache-for-redis/cache-how-to-import-export-data.md index 8bacf73f7d492..8fb8eab3fa07f 100644 --- a/articles/azure-cache-for-redis/cache-how-to-import-export-data.md +++ b/articles/azure-cache-for-redis/cache-how-to-import-export-data.md @@ -5,7 +5,7 @@ author: flang-msft ms.service: cache ms.topic: conceptual -ms.date: 07/31/2017 +ms.date: 06/07/2022 ms.author: franlanglois --- @@ -22,8 +22,6 @@ This article provides a guide for importing and exporting data with Azure Cache > [!IMPORTANT] > Import/Export is only available for [Premium tier](cache-overview.md#service-tiers) caches. -> -> ## Import @@ -37,15 +35,19 @@ Use import to bring Redis compatible RDB files from any Redis server running in 1. To import one or more exported cache blobs, [browse to your cache](cache-configure.md#configure-azure-cache-for-redis-settings) in the Azure portal and select **Import data** from the **Resource menu**. ![Import data](./media/cache-how-to-import-export-data/cache-import-data.png) + 2. Select **Choose Blob(s)** and select the storage account that contains the data to import. ![Choose storage account](./media/cache-how-to-import-export-data/cache-import-choose-storage-account.png) + 3. Select the container that contains the data to import. ![Choose container](./media/cache-how-to-import-export-data/cache-import-choose-container.png) + 4. Select one or more blobs to import by selecting the area to the left of the blob name, and then **Select**. ![Choose blobs](./media/cache-how-to-import-export-data/cache-import-choose-blobs.png) + 5. Select **Import** to begin the import process. > [!IMPORTANT] @@ -78,9 +80,11 @@ Export allows you to export the data stored in Azure Cache for Redis to Redis co > ![Storage account](./media/cache-how-to-import-export-data/cache-export-data-choose-account.png) + 3. Choose the blob container you want, then **Select**. To use new a container, select **Add Container** to add it first and then select it from the list. ![On Containers for contoso55, the + Container option is highlighted. There is one container in the list, cachesaves, and it is selected and highlighted. The Selection option is selected and highlighted.](./media/cache-how-to-import-export-data/cache-export-data-container.png) + 4. Type a **Blob name prefix** and select **Export** to start the export process. The blob name prefix is used to prefix the names of files generated by this export operation. ![Export](./media/cache-how-to-import-export-data/cache-export-data.png) @@ -112,7 +116,7 @@ Import/Export is available only in the premium pricing tier. ### Can I import data from any Redis server? -Yes, you can importing data exported from Azure Cache for Redis instances, and you can import RDB files from any Redis server running in any cloud or environment. The environments include Linux, Windows, or cloud providers such as Amazon Web Services. To do import this data, upload the RDB file from the Redis server you want into a page or block blob in an Azure Storage Account. Then, import it into your premium Azure Cache for Redis instance. For example, you might want to export the data from your production cache and import it into a cache that is used as part of a staging environment for testing or migration. +Yes, you can import data that was exported from Azure Cache for Redis instances. You can import RDB files from any Redis server running in any cloud or environment. The environments include Linux, Windows, or cloud providers such as Amazon Web Services. To do import this data, upload the RDB file from the Redis server you want into a page or block blob in an Azure Storage Account. Then, import it into your premium Azure Cache for Redis instance. For example, you might want to export the data from your production cache and import it into a cache that is used as part of a staging environment for testing or migration. > [!IMPORTANT] > To successfully import data exported from Redis servers other than Azure Cache for Redis when using a page blob, the page blob size must be aligned on a 512 byte boundary. For sample code to perform any required byte padding, see [Sample page blob upload](https://github.com/JimRoberts-MS/SamplePageBlobUpload). diff --git a/articles/azure-cache-for-redis/cache-how-to-premium-persistence.md b/articles/azure-cache-for-redis/cache-how-to-premium-persistence.md index 560bb9d173e7d..b3260ad5ce985 100644 --- a/articles/azure-cache-for-redis/cache-how-to-premium-persistence.md +++ b/articles/azure-cache-for-redis/cache-how-to-premium-persistence.md @@ -2,11 +2,11 @@ title: Configure data persistence - Premium Azure Cache for Redis description: Learn how to configure and manage data persistence your Premium tier Azure Cache for Redis instances author: flang-msft - ms.author: franlanglois ms.service: cache ms.topic: conceptual ms.date: 05/17/2022 + --- # Configure data persistence for a Premium Azure Cache for Redis instance @@ -17,7 +17,7 @@ Azure Cache for Redis offers Redis persistence using the Redis database (RDB) an - **RDB persistence** - When you use RDB persistence, Azure Cache for Redis persists a snapshot of your cache in a binary format. The snapshot is saved in an Azure Storage account. The configurable backup frequency determines how often to persist the snapshot. If a catastrophic event occurs that disables both the primary and replica cache, the cache is reconstructed using the most recent snapshot. Learn more about the [advantages](https://redis.io/topics/persistence#rdb-advantages) and [disadvantages](https://redis.io/topics/persistence#rdb-disadvantages) of RDB persistence. - **AOF persistence** - When you use AOF persistence, Azure Cache for Redis saves every write operation to a log. The log is saved at least once per second into an Azure Storage account. If a catastrophic event occurs that disables both the primary and replica cache, the cache is reconstructed using the stored write operations. Learn more about the [advantages](https://redis.io/topics/persistence#aof-advantages) and [disadvantages](https://redis.io/topics/persistence#aof-disadvantages) of AOF persistence. -Azure Cache for Redis persistence features are intended to be used to restore data after data loss, not importing it to a new cache. You cannot import from AOF page blob backups to a new cache. To export data for importing back to a new cache, use the export RDB feature or automatic recurring RDB export. For more information on importing to a new cache, see [Import](cache-how-to-import-export-data.md#import). +Azure Cache for Redis persistence features are intended to be used to restore data after data loss, not importing it to a new cache. You can't import from AOF page blob backups to a new cache. To export data for importing back to a new cache, use the export RDB feature or automatic recurring RDB export. For more information on importing to a new cache, see [Import](cache-how-to-import-export-data.md#import). > [!NOTE] > Importing from AOF page blob backups to a new cache is not a supported option. @@ -27,8 +27,6 @@ Persistence writes Redis data into an Azure Storage account that you own and man > [!NOTE] > > Azure Storage automatically encrypts data when it is persisted. You can use your own keys for the encryption. For more information, see [Customer-managed keys with Azure Key Vault](../storage/common/storage-service-encryption.md). -> -> ## Set up data persistence @@ -36,7 +34,7 @@ Persistence writes Redis data into an Azure Storage account that you own and man :::image type="content" source="media/cache-private-link/1-create-resource.png" alt-text="Create resource."::: -2. On the **New** page, select **Databases** and then select **Azure Cache for Redis**. +2. On the **Create a resource** page, select **Databases** and then select **Azure Cache for Redis**. :::image type="content" source="media/cache-private-link/2-select-cache.png" alt-text="Select Azure Cache for Redis."::: diff --git a/articles/azure-cache-for-redis/cache-how-to-version.md b/articles/azure-cache-for-redis/cache-how-to-version.md index 80ef530d3205f..0980a39f153a0 100644 --- a/articles/azure-cache-for-redis/cache-how-to-version.md +++ b/articles/azure-cache-for-redis/cache-how-to-version.md @@ -5,20 +5,24 @@ author: flang-msft ms.author: franlanglois ms.service: cache ms.topic: conceptual -ms.date: 10/07/2021 +ms.date: 06/03/2022 + --- # Set Redis version for Azure Cache for Redis -In this article, you'll learn how to configure the Redis software version to be used with your cache instance. Azure Cache for Redis offers the latest major version of Redis and at least one previous version. It will update these versions regularly as newer Redis software is released. You can choose between the two available versions. Keep in mind that your cache will be upgraded to the next version automatically if the version it's using currently is no longer supported. + +In this article, you'll learn how to configure the Redis software version to be used with your cache instance. Azure Cache for Redis offers the latest major version of Redis and at least one previous version. It will update these versions regularly as newer Redis software is released. You can choose between the two available versions. Keep in mind that your cache will be upgraded to the next version automatically if the version it's using currently is no longer supported. > [!NOTE] > At this time, Redis 6 does not support ACL, and geo-replication between a Redis 4 and 6 cache. > ## Prerequisites + * Azure subscription - [create one for free](https://azure.microsoft.com/free/) ## Create a cache using the Azure portal + To create a cache, follow these steps: 1. Sign in to the [Azure portal](https://portal.azure.com) and select **Create a resource**. @@ -26,9 +30,9 @@ To create a cache, follow these steps: 1. On the **New** page, select **Databases** and then select **Azure Cache for Redis**. :::image type="content" source="media/cache-create/new-cache-menu.png" alt-text="Select Azure Cache for Redis."::: - + 1. On the **Basics** page, configure the settings for your new cache. - + | Setting | Suggested value | Description | | ------------ | ------- | -------------------------------------------------- | | **Subscription** | Select your subscription. | The subscription under which to create this new Azure Cache for Redis instance. | @@ -36,21 +40,21 @@ To create a cache, follow these steps: | **DNS name** | Enter a globally unique name. | The cache name must be a string between 1 and 63 characters that contains only numbers, letters, or hyphens. The name must start and end with a number or letter, and can't contain consecutive hyphens. Your cache instance's *host name* will be *\.redis.cache.windows.net*. | | **Location** | Select a location. | Select a [region](https://azure.microsoft.com/regions/) near other services that will use your cache. | | **Cache type** | Select a [cache tier and size](https://azure.microsoft.com/pricing/details/cache/). | The pricing tier determines the size, performance, and features that are available for the cache. For more information, see [Azure Cache for Redis Overview](cache-overview.md). | - + 1. On the **Advanced** page, choose a Redis version to use. - + :::image type="content" source="media/cache-how-to-version/select-redis-version.png" alt-text="Redis version."::: -1. Select **Create**. - - It takes a while for the cache to create. You can monitor progress on the Azure Cache for Redis **Overview** page. When **Status** shows as **Running**, the cache is ready to use. +1. Select **Create**. + It takes a while for the cache to create. You can monitor progress on the Azure Cache for Redis **Overview** page. When **Status** shows as **Running**, the cache is ready to use. ## Create a cache using Azure PowerShell ```azurepowershell New-AzRedisCache -ResourceGroupName "ResourceGroupName" -Name "CacheName" -Location "West US 2" -Size 250MB -Sku "Standard" -RedisVersion "6" ``` + For more information on how to manage Azure Cache for Redis with Azure PowerShell, see [here](cache-how-to-manage-redis-cache-powershell.md) ## Create a cache using Azure CLI @@ -61,7 +65,8 @@ az redis create --resource-group resourceGroupName --name cacheName --location w For more information on how to manage Azure Cache for Redis with Azure CLI, see [here](cli-samples.md) ## Upgrade an existing Redis 4 cache to Redis 6 -Azure Cache for Redis supports upgrading your Redis cache server major version from Redis 4 to Redis 6. Please note that upgrading is permanent and it may cause a brief connection blip. As a precautionary step, we recommend exporting the data from your existing Redis 4 cache and testing your client application with a Redis 6 cache in a lower environment before upgrading. Please see [here](cache-how-to-import-export-data.md) for details on how to export. + +Azure Cache for Redis supports upgrading your Redis cache server major version from Redis 4 to Redis 6. Upgrading is permanent and it might cause a brief connection blip. As a precautionary step, we recommend exporting the data from your existing Redis 4 cache and testing your client application with a Redis 6 cache in a lower environment before upgrading. For more information, see [here](cache-how-to-import-export-data.md) for details on how to export. > [!NOTE] > Please note, upgrading is not supported on a cache with a geo-replication link, so you will have to manually unlink your cache instances before upgrading. @@ -69,6 +74,8 @@ Azure Cache for Redis supports upgrading your Redis cache server major version f To upgrade your cache, follow these steps: +### Upgrade using the Azure portal + 1. In the Azure portal, search for **Azure Cache for Redis**. Then, press enter or select it from the search suggestions. :::image type="content" source="media/cache-private-link/4-search-for-cache.png" alt-text="Search for Azure Cache for Redis."::: @@ -80,8 +87,8 @@ To upgrade your cache, follow these steps: 1. If your cache instance is eligible to be upgraded, you should see the following blue banner. If you wish to proceed, select the text in the banner. :::image type="content" source="media/cache-how-to-version/blue-banner-upgrade-cache.png" alt-text="Blue banner that says you can upgrade your Redis 6 cache with additional features and commands that enhance developer productivity and ease of use. Upgrading your cache instance cannot be reversed."::: - -1. A dialog box will then popup notifying you that upgrading is permanent and may cause a brief connection blip. Select yes if you would like to upgrade your cache instance. + +1. A dialog box displays a popup notifying you that upgrading is permanent and might cause a brief connection blip. Select **Yes** if you would like to upgrade your cache instance. :::image type="content" source="media/cache-how-to-version/dialog-version-upgrade.png" alt-text="Dialog with more information about upgrading your cache."::: @@ -89,20 +96,33 @@ To upgrade your cache, follow these steps: :::image type="content" source="media/cache-how-to-version/upgrade-status.png" alt-text="Overview shows status of cache being upgraded."::: +### Upgrade using Azure CLI + +To upgrade a cache from 4 to 6 using the Azure CLI, use the following command: + +```azurecli-interactive +az redis update --name cacheName --resource-group resourceGroupName --set redisVersion=6 +``` + +### Upgrade using PowerShell + +To upgrade a cache from 4 to 6 using PowerShell, use the following command: + +```powershell-interactive +Set-AzRedisCache -Name "CacheName" -ResourceGroupName "ResourceGroupName" -RedisVersion "6" +``` + ## FAQ ### What features aren't supported with Redis 6? -At this time, Redis 6 does not support ACL, and geo-replication between a Redis 4 and 6 cache. +At this time, Redis 6 doesn't support ACL, and geo-replication between a Redis 4 and 6 cache. ### Can I change the version of my cache after it's created? -You can upgrade your existing Redis 4 caches to Redis 6, please see [here](#upgrade-an-existing-redis-4-cache-to-redis-6) for details. Please note upgrading your cache instance is permanent and you cannot downgrade your Redis 6 caches to Redis 4 caches. +You can upgrade your existing Redis 4 caches to Redis 6, see [here](#upgrade-an-existing-redis-4-cache-to-redis-6) for details. Upgrading your cache instance is permanent and you cannot downgrade your Redis 6 caches to Redis 4 caches. ## Next Steps - To learn more about Redis 6 features, see [Diving Into Redis 6.0 by Redis](https://redis.com/blog/diving-into-redis-6/) -- To learn more about Azure Cache for Redis features: - -> [!div class="nextstepaction"] -> [Azure Cache for Redis Premium service tiers](cache-overview.md#service-tiers) +- To learn more about Azure Cache for Redis features: [Azure Cache for Redis Premium service tiers](cache-overview.md#service-tiers) diff --git a/articles/azure-cache-for-redis/cache-how-to-zone-redundancy.md b/articles/azure-cache-for-redis/cache-how-to-zone-redundancy.md index 13b69e3b7d226..3e75da03fbe40 100644 --- a/articles/azure-cache-for-redis/cache-how-to-zone-redundancy.md +++ b/articles/azure-cache-for-redis/cache-how-to-zone-redundancy.md @@ -5,10 +5,12 @@ author: flang-msft ms.author: franlanglois ms.service: cache ms.topic: conceptual -ms.date: 08/11/2020 +ms.date: 06/07/2022 + --- # Enable zone redundancy for Azure Cache for Redis + In this article, you'll learn how to configure a zone-redundant Azure Cache instance using the Azure portal. Azure Cache for Redis Standard, Premium, and Enterprise tiers provide built-in redundancy by hosting each cache on two dedicated virtual machines (VMs). Even though these VMs are located in separate [Azure fault and update domains](../virtual-machines/availability.md) and highly available, they're susceptible to datacenter level failures. Azure Cache for Redis also supports zone redundancy in its Premium and Enterprise tiers. A zone-redundant cache runs on VMs spread across multiple [Availability Zones](../availability-zones/az-overview.md). It provides higher resilience and availability. @@ -17,9 +19,11 @@ Azure Cache for Redis Standard, Premium, and Enterprise tiers provide built-in r > Data transfer between Azure Availability Zones will be charged at standard [bandwidth rates](https://azure.microsoft.com/pricing/details/bandwidth/). ## Prerequisites -* Azure subscription - [create one for free](https://azure.microsoft.com/free/) + +- Azure subscription - [create one for free](https://azure.microsoft.com/free/) ## Create a cache + To create a cache, follow these steps: 1. Sign in to the [Azure portal](https://portal.azure.com) and select **Create a resource**. @@ -27,23 +31,23 @@ To create a cache, follow these steps: 1. On the **New** page, select **Databases** and then select **Azure Cache for Redis**. :::image type="content" source="media/cache-create/new-cache-menu.png" alt-text="Select Azure Cache for Redis."::: - + 1. On the **Basics** page, configure the settings for your new cache. - + | Setting | Suggested value | Description | | ------------ | ------- | -------------------------------------------------- | - | **Subscription** | Select your subscription. | The subscription under which to create this new Azure Cache for Redis instance. | - | **Resource group** | Select a resource group, or select **Create new** and enter a new resource group name. | Name for the resource group in which to create your cache and other resources. By putting all your app resources in one resource group, you can easily manage or delete them together. | - | **DNS name** | Enter a globally unique name. | The cache name must be a string between 1 and 63 characters that contains only numbers, letters, or hyphens. The name must start and end with a number or letter, and can't contain consecutive hyphens. Your cache instance's *host name* will be *\.redis.cache.windows.net*. | + | **Subscription** | Select your subscription. | The subscription under which to create this new Azure Cache for Redis instance. | + | **Resource group** | Select a resource group, or select **Create new** and enter a new resource group name. | Name for the resource group in which to create your cache and other resources. By putting all your app resources in one resource group, you can easily manage or delete them together. | + | **DNS name** | Enter a globally unique name. | The cache name must be a string between 1 and 63 characters that contains only numbers, letters, or hyphens. The name must start and end with a number or letter, and can't contain consecutive hyphens. Your cache instance's *host name* will be *\.redis.cache.windows.net*. | | **Location** | Select a location. | Select a [region](https://azure.microsoft.com/regions/) near other services that will use your cache. | | **Cache type** | Select a [Premium or Enterprise tier](https://azure.microsoft.com/pricing/details/cache/) cache. | The pricing tier determines the size, performance, and features that are available for the cache. For more information, see [Azure Cache for Redis Overview](cache-overview.md). | - + 1. On the **Advanced** page, for a Premium tier cache, choose **Replica count**. - + :::image type="content" source="media/cache-how-to-multi-replicas/create-multi-replicas.png" alt-text="Replica count"::: -1. Select **Availability zones**. - +1. Select **Availability zones**. + :::image type="content" source="media/cache-how-to-zone-redundancy/create-zones.png" alt-text="Availability zones"::: 1. Configure your settings for clustering and/or RDB persistence. @@ -52,13 +56,12 @@ To create a cache, follow these steps: > Zone redundancy doesn't support AOF persistence or work with geo-replication currently. > -1. Select **Create**. - - It takes a while for the cache to create. You can monitor progress on the Azure Cache for Redis **Overview** page. When **Status** shows as **Running**, the cache is ready to use. - +1. Select **Create**. + + It takes a while for the cache to be created. You can monitor progress on the Azure Cache for Redis **Overview** page. When **Status** shows as **Running**, the cache is ready to use. + > [!NOTE] - > Availability zones can't be changed or enabled after a cache is created. - > + > Availability zones can't be changed or enabled after a cache is created. ## Zone Redundancy FAQ @@ -73,18 +76,18 @@ Zone redundancy is available only in Azure regions that have Availability Zones. ### Why can't I select all three zones during cache create? -A Premium cache has one primary and one replica nodes by default. To configure zone redundancy for more than two Availability Zones, you need to add [more replicas](cache-how-to-multi-replicas.md) to the cache you're creating. +A Premium cache has one primary and one replica node by default. To configure zone redundancy for more than two Availability Zones, you need to add [more replicas](cache-how-to-multi-replicas.md) to the cache you're creating. ### Can I update my existing Premium cache to use zone redundancy? -No, this is not supported currently. +No, this isn't supported currently. ### How much does it cost to replicate my data across Azure Availability Zones? -When using zone redundancy, configured with multiple Availability Zones, data is replicated from the primary cache node in one zone to the other node(s) in another zone(s). The data transfer charge is the network egress cost of data moving across the selected Availability Zones. For more information, see [Bandwidth Pricing Details](https://azure.microsoft.com/pricing/details/bandwidth/). +When using zone redundancy configured with multiple Availability Zones, data is replicated from the primary cache node in one zone to the other node(s) in another zone(s). The data transfer charge is the network egress cost of data moving across the selected Availability Zones. For more information, see [Bandwidth Pricing Details](https://azure.microsoft.com/pricing/details/bandwidth/). ## Next Steps + Learn more about Azure Cache for Redis features. -> [!div class="nextstepaction"] -> [Azure Cache for Redis Premium service tiers](cache-overview.md#service-tiers) +- [Azure Cache for Redis Premium service tiers](cache-overview.md#service-tiers) diff --git a/articles/azure-cache-for-redis/cache-managed-identity.md b/articles/azure-cache-for-redis/cache-managed-identity.md index 6fcc1ed5d8ffc..9d24b46333324 100644 --- a/articles/azure-cache-for-redis/cache-managed-identity.md +++ b/articles/azure-cache-for-redis/cache-managed-identity.md @@ -106,7 +106,7 @@ To use managed identity, you must have a premium-tier cache. ## Enable managed identity using the Azure CLI -Use the Azure CLI for creating a new cache with managed identity or updating an existing cache to use managed identity. For more information, see [az redis create](/cli/azure/redis?view=azure-cli-latest.md) or [az redis identity](/cli/azure/redis/identity?view=azure-cli-latest). +Use the Azure CLI for creating a new cache with managed identity or updating an existing cache to use managed identity. For more information, see [az redis create](/cli/azure/redis?view=azure-cli-latest.md&preserve-view=true) or [az redis identity](/cli/azure/redis/identity?view=azure-cli-latest&preserve-view=true). For example, to update a cache to use system-managed identity use the following CLI command: @@ -117,7 +117,7 @@ az redis identity assign \--mi-system-assigned \--name MyCacheName \--resource-g ## Enable managed identity using Azure PowerShell -Use Azure PowerShell for creating a new cache with managed identity or updating an existing cache to use managed identity. For more information, see [New-AzRedisCache](/powershell/module/az.rediscache/new-azrediscache?view=azps-7.1.0) or [Set-AzRedisCache](/powershell/module/az.rediscache/set-azrediscache?view=azps-7.1.0). +Use Azure PowerShell for creating a new cache with managed identity or updating an existing cache to use managed identity. For more information, see [New-AzRedisCache](/powershell/module/az.rediscache/new-azrediscache?view=azps-7.1.0&preserve-view=true) or [Set-AzRedisCache](/powershell/module/az.rediscache/set-azrediscache?view=azps-7.1.0&preserve-view=true). For example, to update a cache to use system-managed identity, use the following PowerShell command: diff --git a/articles/azure-cache-for-redis/cache-redis-cache-bicep-provision.md b/articles/azure-cache-for-redis/cache-redis-cache-bicep-provision.md new file mode 100644 index 0000000000000..541d6607228bd --- /dev/null +++ b/articles/azure-cache-for-redis/cache-redis-cache-bicep-provision.md @@ -0,0 +1,99 @@ +--- +title: Deploy Azure Cache for Redis using Bicep +description: Learn how to use Bicep to deploy an Azure Cache for Redis resource. +author: schaffererin +ms.author: v-eschaffer +ms.service: cache +ms.topic: conceptual +ms.custom: subject-armqs, devx-track-azurepowershell +ms.date: 05/24/2022 +--- + +# Quickstart: Create an Azure Cache for Redis using Bicep + +Learn how to use Bicep to deploy a cache using Azure Cache for Redis. After you deploy the cache, use it with an existing storage account to keep diagnostic data. Learn how to define which resources are deployed and how to define parameters that are specified when the deployment is executed. You can use this Bicep file for your own deployments, or customize it to meet your requirements. + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +* **Azure subscription**: If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/) before you begin. +* **A storage account**: To create one, see [Create an Azure Storage account](../storage/common/storage-account-create.md?tabs=azure-portal). The storage account is used for diagnostic data. Create the storage account in a new resource group named **exampleRG**. + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/redis-cache/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.cache/redis-cache/main.bicep"::: + +The following resources are defined in the Bicep file: + +* [Microsoft.Cache/Redis](/azure/templates/microsoft.cache/redis) +* [Microsoft.Insights/diagnosticsettings](/azure/templates/microsoft.insights/diagnosticsettings) + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters existingDiagnosticsStorageAccountName= existingDiagnosticsStorageAccountResourceGroup= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -existingDiagnosticsStorageAccountName "" -existingDiagnosticsStorageAccountResourceGroup "" + ``` + + --- + + > [!NOTE] + > Replace **\** with the name of the storage account you created at the beginning of this quickstart. Replace **\** with the name of the resource group name in which your storage account is located. + + When the deployment finishes, you see a message indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, delete the resource group, which deletes the resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +In this tutorial, you learned how to use Bicep to deploy a cache using Azure Cache for Redis. To learn more about Azure Cache for Redis and Bicep, see the articles below: + +* Learn more about [Azure Cache for Redis](../azure-cache-for-redis/cache-overview.md). +* Learn more about [Bicep](../../articles/azure-resource-manager/bicep/overview.md). diff --git a/articles/azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md b/articles/azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md new file mode 100644 index 0000000000000..1a8df36328c32 --- /dev/null +++ b/articles/azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md @@ -0,0 +1,97 @@ +--- +title: Provision Web App that uses Azure Cache for Redis using Bicep +description: Use Bicep to deploy web app with Azure Cache for Redis. +author: schaffererin +ms.service: app-service +ms.topic: conceptual +ms.date: 05/24/2022 +ms.author: v-eschaffer +ms.custom: devx-track-azurepowershell + +--- +# Create a Web App plus Azure Cache for Redis using Bicep + +In this article, you use Bicep to deploy an Azure Web App that uses Azure Cache for Redis, as well as an App Service plan. + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +You can use this Bicep file for your own deployments. The Bicep file provides unique names for the Azure Web App, the App Service plan, and the Azure Cache for Redis. If you'd like, you can customize the Bicep file after you save it to your local device to meet your requirements. + +For more information about creating Bicep files, see [Quickstart: Create Bicep files with Visual Studio Code](../azure-resource-manager/bicep/quickstart-create-bicep-use-visual-studio-code.md). To learn about Bicep syntax, see [Understand the structure and syntax of Bicep files](../azure-resource-manager/bicep/file.md). + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://github.com/Azure/azure-quickstart-templates/blob/master/quickstarts/microsoft.web/web-app-with-redis-cache/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.web/web-app-with-redis-cache/main.bicep"::: + +With this Bicep file, you deploy: + +* [**Microsoft.Cache/Redis**](/azure/templates/microsoft.cache/redis) +* [**Microsoft.Web/sites**](/azure/templates/microsoft.web/sites) +* [**Microsoft.Web/serverfarms**](/azure/templates/microsoft.web/serverfarms) + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep + ``` + + --- + + When the deployment finishes, you should see a message indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, use the Azure portal, Azure CLI, or Azure PowerShell to delete the resource group and its resources. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +To learn more about Bicep, continue to the following article: + +* [Bicep overview](../azure-resource-manager/bicep/overview.md) diff --git a/articles/azure-edge-hardware-center/TOC.yml b/articles/azure-edge-hardware-center/TOC.yml index 11d86bc46555c..e4da8ba4555df 100644 --- a/articles/azure-edge-hardware-center/TOC.yml +++ b/articles/azure-edge-hardware-center/TOC.yml @@ -27,13 +27,13 @@ - name: References items: - name: .NET APIs - href: /dotnet/api/overview/azure/edgeorder?view=azure-dotnet-preview + href: /dotnet/api/overview/azure/edgeorder - name: REST API href: /rest/api/edgehardwarecenter/ - name: Python SDK href: https://pypi.org/project/azure-mgmt-edgeorder/1.0.0/ - name: Azure PowerShell - href: /powershell/module/az.edgeorder/?view=azps-7.3.2&viewFallbackFrom=azps-7.1.0#edgeorder + href: /powershell/module/az.edgeorder/ - name: Resources items: - name: Azure Stack Edge product diff --git a/articles/azure-edge-hardware-center/azure-edge-hardware-center-create-order.md b/articles/azure-edge-hardware-center/azure-edge-hardware-center-create-order.md index 6f5aca6b9b0d5..fbd13e3df9b86 100644 --- a/articles/azure-edge-hardware-center/azure-edge-hardware-center-create-order.md +++ b/articles/azure-edge-hardware-center/azure-edge-hardware-center-create-order.md @@ -5,7 +5,7 @@ services: Azure Edge Hardware Center author: alkohli ms.service: azure-edge-hardware-center ms.topic: tutorial -ms.date: 01/03/2022 +ms.date: 05/04/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to create an order via the Azure Edge Hardware Center. --- @@ -29,7 +29,7 @@ Before you begin: For information on how to register, go to [Register resource provider](../databox-online/azure-stack-edge-gpu-manage-access-power-connectivity-mode.md#register-resource-providers). -- Make sure that all the other prerequisites related to the product that you are ordering are met. For example, if ordering Azure Stack Edge device, ensure that all the [Azure Stack Edge prerequisites](../databox-online/azure-stack-edge-gpu-deploy-prep.md#prerequisites) are completed. +- Make sure that all the other prerequisites related to the product that you're ordering are met. For example, if ordering Azure Stack Edge device, ensure that all the [Azure Stack Edge prerequisites](../databox-online/azure-stack-edge-gpu-deploy-prep.md#prerequisites) are completed. ## Create an order diff --git a/articles/azure-edge-hardware-center/azure-edge-hardware-center-faq.yml b/articles/azure-edge-hardware-center/azure-edge-hardware-center-faq.yml index c3cffd4088c8a..e9bb5e2d516dc 100644 --- a/articles/azure-edge-hardware-center/azure-edge-hardware-center-faq.yml +++ b/articles/azure-edge-hardware-center/azure-edge-hardware-center-faq.yml @@ -7,7 +7,7 @@ metadata: ms.service: azure-edge-hardware-center ms.topic: faq - ms.date: 01/03/2022 + ms.date: 06/06/2022 ms.author: alkohli ms.custom: references_regions title: "Azure Edge Hardware Center: Frequently Asked Questions" @@ -29,27 +29,35 @@ sections: answer: | Yes. If you ordered your hardware unit through the Edge Hardware Center, you can initiate the return process from the Azure portal. - In the Azure portal, go to your Azure Edge Hardware Center order item resource. In the **Overview**, go to the top command bar in the right pane and select **Return**. The return option is only enabled after you have received a device. For more information, see the steps in [Return hardware](azure-edge-hardware-center-manage-order.md#return-hardware). + In the Azure portal, go to your Azure Edge Hardware Center order item resource. In the **Overview**, go to the top command bar in the right pane and select **Return**. The return option is only enabled after you've received a device. For more information, see the steps in [Return hardware](azure-edge-hardware-center-manage-order.md#return-hardware). - question: | Can I order multiple units of hardware at the same time? If so, how? answer: | - Yes. While creating the order, you can input quantity in the **Shipping + quantity** tab. You can order a maximum of up to 20 units per order. To order more than 20 units at one time, you will need to create multiple orders. + Yes. While creating the order, you can input quantity in the **Shipping + quantity** tab. You can order a maximum of up to 20 units per order. To order more than 20 units at one time, you'll need to create multiple orders. - question: | Can I ship different hardware units within a single order to different locations? answer: | - Yes. While creating the order, you can add multiple shipping addresses in the **Shipping + quantity** tab. Make sure that all the addresses are within the same country. To ship hardware to addresses across different countries, you will have to create separate orders. + Yes. While creating the order, you can add multiple shipping addresses in the **Shipping + quantity** tab. Make sure that all the addresses are within the same country. To ship hardware to addresses across different countries, you'll have to create separate orders. - question: | In which regions, can I create Azure Edge Hardware Center orders? answer: | The Edge Hardware Center service is available in East US, West Europe, and South East Asia for Azure public cloud. However, the region in which the service is created is used to store only the relevant data and this region can be different from the region where the device is shipped. + - question: | + Can I limit the users/groups who can order Azure Stack devices? + answer: | + There are two methods to limit who can order Azure Stack devices: + + - For individual permissions, device ordering is limited to users with contributor access to the subscription. + - Use custom roles to restrict access control. For more information, see [Azure custom roles](https://docs.microsoft.com/azure/role-based-access-control/custom-roles) and [Create an Azure Edge Hardware Center](azure-edge-hardware-center-create-order.md#prerequisites). + - question: | I created an order via the Azure Edge Hardware Center. Can I now cancel the order? answer: | - Depends. You can cancel an Edge Hardware Center order after it is placed and before it is in **Confirmed** state. To view the status of your resource, go to [Track Edge Hardware Center order](azure-edge-hardware-center-manage-order.md#track-order). + Depends. You can cancel an Edge Hardware Center order after it's placed and before it's in **Confirmed** state. To view the status of your resource, go to [Track Edge Hardware Center order](azure-edge-hardware-center-manage-order.md#track-order). - question: | Can I cancel an order once it is in the Confirmed state or beyond that? @@ -60,14 +68,14 @@ sections: What happens to my order if the entire region associated with the order fails? answer: | In extreme circumstances where a region is lost because of a significant disaster, Microsoft may initiate a regional failover. No action on your part is required in this case. Your order will be fulfilled through the failover region if it is within the same country or commerce boundary. - However, some Azure regions don't have a paired region in the same geographic or commerce boundary. If there is a disaster in any of those regions, you will need to create the Edge Hardware Center order again from a different region that is available. For more information, see [Data residency for Azure Stack Edge](../databox-online/azure-stack-edge-gpu-data-residency.md#azure-edge-hardware-center-ordering-and-management-resource). + However, some Azure regions don't have a paired region in the same geographic or commerce boundary. If there's a disaster in any of those regions, you will need to create the Edge Hardware Center order again from a different region that is available. For more information, see [Data residency for Azure Stack Edge](../databox-online/azure-stack-edge-gpu-data-residency.md#azure-edge-hardware-center-ordering-and-management-resource). - question: | - Can I delete my order? I am not able to do so. + Can I delete my order? I'm not able to do so. answer: | Depends. You can't delete an active order. - - You can delete your order after it is canceled. For more information, see [Cancel the order](azure-edge-hardware-center-manage-order.md#cancel-order). + - You can delete your order after it's canceled. For more information, see [Cancel the order](azure-edge-hardware-center-manage-order.md#cancel-order). - You can delete the order after the hardware is returned to Microsoft and has passed all the inspections. For more information, see [Return your order](azure-edge-hardware-center-manage-order.md#return-hardware). - question: | diff --git a/articles/azure-edge-hardware-center/azure-edge-hardware-center-manage-order.md b/articles/azure-edge-hardware-center/azure-edge-hardware-center-manage-order.md index 20056a0d1f48b..84a4d5a8b83ab 100644 --- a/articles/azure-edge-hardware-center/azure-edge-hardware-center-manage-order.md +++ b/articles/azure-edge-hardware-center/azure-edge-hardware-center-manage-order.md @@ -5,7 +5,7 @@ services: Azure Edge Hardware Center author: alkohli ms.service: azure-edge-hardware-center ms.topic: how-to -ms.date: 01/03/2022 +ms.date: 06/01/2022 ms.author: alkohli --- # Use the Azure portal to manage your Azure Edge Hardware Center orders diff --git a/articles/azure-edge-hardware-center/media/azure-edge-hardware-center-manage-order/track-order-status-2.png b/articles/azure-edge-hardware-center/media/azure-edge-hardware-center-manage-order/track-order-status-2.png index 6439e562f60d6..51dbadbb19983 100644 Binary files a/articles/azure-edge-hardware-center/media/azure-edge-hardware-center-manage-order/track-order-status-2.png and b/articles/azure-edge-hardware-center/media/azure-edge-hardware-center-manage-order/track-order-status-2.png differ diff --git a/articles/azure-fluid-relay/TOC.yml b/articles/azure-fluid-relay/TOC.yml index acad0bdb470ea..5e3b0fd96ba7f 100644 --- a/articles/azure-fluid-relay/TOC.yml +++ b/articles/azure-fluid-relay/TOC.yml @@ -52,3 +52,5 @@ items: - name: Fluid Framework home page href: https://fluidframework.com + - name: FAQ + href: resources/faq.md \ No newline at end of file diff --git a/articles/azure-fluid-relay/concepts/data-encryption.md b/articles/azure-fluid-relay/concepts/data-encryption.md index 7bfd6288e33fc..ff37b7144eaed 100644 --- a/articles/azure-fluid-relay/concepts/data-encryption.md +++ b/articles/azure-fluid-relay/concepts/data-encryption.md @@ -13,15 +13,15 @@ ms.topic: reference > [!NOTE] > This preview version is provided without a service-level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. -Microsoft Azure Fluid Relay Server leverages the encryption-at-rest capability of [Azure Kubernetes](../../aks/enable-host-encryption.md), [Microsoft Azure Cosmos DB]/azure/cosmos-db/database-encryption-at-rest) and [Azure Blob Storage](../../storage/common/storage-service-encryption.md). The service-to-service communication between AFRS and these resources is TLS encrypted and is enclosed in with the Azure Virtual Network Boundary, protected from external interference by Network Security Rules. +Azure Fluid Relay leverages the encryption-at-rest capability of [Azure Kubernetes Service](../../aks/enable-host-encryption.md), [Azure Cosmos DB](../../cosmos-db/database-encryption-at-rest.md) and [Azure Blob Storage](../../storage/common/storage-service-encryption.md). The service-to-service communication between Azure Fluid Relay and these resources is TLS encrypted and is enclosed in with the Azure Virtual Network boundary, protected from external interference by Network Security Rules. -The diagram below shows at a high level how Azure Fluid Relay Server is implemented and how it handles data storage. +The diagram below shows at a high level how Azure Fluid Relay is implemented and how it handles data storage. :::image type="content" source="../images/data-encryption.png" alt-text="A diagram of data storage in Azure Fluid Relay"::: ## Frequently asked questions -### How much more does Azure Fluid Relay Server cost if Encryption is enabled? +### How much more does Azure Fluid Relay cost if encryption is enabled? Encryption-at-rest is enabled by default. There is no additional cost. @@ -31,7 +31,7 @@ The keys are managed by Microsoft. ### How often are encryption keys rotated? -Microsoft has a set of internal guidelines for encryption key rotation, which Azure Fluid Relay Server follows. The specific guidelines are not published. Microsoft does publish the [Security Development Lifecycle (SDL)](https://www.microsoft.com/sdl/default.aspx), which is seen as a subset of internal guidance and has useful best practices for developers. +Microsoft has a set of internal guidelines for encryption key rotation which Azure Fluid Relay follows. The specific guidelines are not published. Microsoft does publish the [Security Development Lifecycle (SDL)](https://www.microsoft.com/sdl/default.aspx), which is seen as a subset of internal guidance and has useful best practices for developers. ### Can I use my own encryption keys? @@ -39,14 +39,14 @@ No, this feature is not available yet. Keep an eye out for more updates on this. ### What regions have encryption turned on? -All Azure Fluid Relay Server regions have encryption turned on for all user data. +All Azure Fluid Relay regions have encryption turned on for all user data. -### Does encryption affect the performance latency and throughput SLAs? +### Does encryption affect the performance latency and throughput? -A: There is no impact or changes to the performance SLAs with encryption at rest enabled. +A: There is no impact or changes to performance with encryption at rest enabled. ## See also - [Overview of Azure Fluid Relay architecture](architecture.md) - [Azure Fluid Relay token contract](../how-tos/fluid-json-web-token.md) -- [Authentication and authorization in your app](authentication-authorization.md) \ No newline at end of file +- [Authentication and authorization in your app](authentication-authorization.md) diff --git a/articles/azure-fluid-relay/how-tos/connect-fluid-azure-service.md b/articles/azure-fluid-relay/how-tos/connect-fluid-azure-service.md index 72fe35b4d172b..79e50e85ed5d8 100644 --- a/articles/azure-fluid-relay/how-tos/connect-fluid-azure-service.md +++ b/articles/azure-fluid-relay/how-tos/connect-fluid-azure-service.md @@ -50,7 +50,7 @@ Now that you have an instance of `AzureClient`, you can start using it to create ### Token providers -The [AzureFunctionTokenProvider](https://github.com/microsoft/FluidFramework/blob/main/packages/framework/azure-client/src/AzureFunctionTokenProvider.ts) is an implementation of `ITokenProvider` which ensures your tenant key secret is not exposed in your client-side bundle code. The `AzureFunctionTokenProvider` takes in your Azure Function URL appended by `/api/GetAzureToken` along with the current user object. Later on, it makes a `GET` request to your Azure Function by passing in the tenantId, documentId and userId/userName as optional parameters. +The [AzureFunctionTokenProvider](https://github.com/microsoft/FluidFramework/blob/main/azure/packages/azure-client/src/AzureFunctionTokenProvider.ts) is an implementation of `ITokenProvider` which ensures your tenant key secret is not exposed in your client-side bundle code. The `AzureFunctionTokenProvider` takes in your Azure Function URL appended by `/api/GetAzureToken` along with the current user object. Later on, it makes a `GET` request to your Azure Function by passing in the tenantId, documentId and userId/userName as optional parameters. ```javascript const config = { diff --git a/articles/azure-fluid-relay/resources/faq.md b/articles/azure-fluid-relay/resources/faq.md new file mode 100644 index 0000000000000..70ad97e0db5c8 --- /dev/null +++ b/articles/azure-fluid-relay/resources/faq.md @@ -0,0 +1,50 @@ +--- +title: Azure Fluid Relay FAQ +description: Frequently asked questions about Fluid Relay +author: hickeys +ms.author: hickeys +ms.date: 6/1/2022 +ms.service: azure-fluid +ms.topic: reference +--- + +# Azure Fluid Relay FAQ + +The following are frequently asked questions about Azure Fluid Relay + +## Which Azure regions currently provide Fluid Relay? + +For a complete list of available regions, see [Azure Fluid Relay regions and availability](https://azure.microsoft.com/global-infrastructure/services/?products=fluid-relay). + +## Can I move my Fluid Relay resource from one Azure resource group to another? + +Yes. You can move the Fluid Relay resource from one resource group to another. + +## Can I move my Fluid Relay resource from one Azure subscription to another? + +Yes. You can move the Fluid Relay resource from one subscription to another. + +## Can I move Fluid Relay resource between Azure regions? + +No. Moving the Fluid Relay resource from one region to another isn’t supported. + +## Is Azure Fluid Relay certified by industry certifications? + +We adhere to the security and privacy policies and practices that other Azure services follow to help achieve those industry and regional certifications. Once Azure Fluid Relay is in General Availability, we'll be pursuing those certifications. We'll be updating our certification posture as we achieve the different certifications. For more information, see the [Microsoft Trust Center](https://www.microsoft.com/trust-center). + +## What network protocols does Fluid Relay use? + +Fluid Relay, like the Fluid Framework technology, uses both http and web sockets for communication between the clients and the service. + +## Will Azure Fluid Relay work in environments where web sockets are blocked? + +Yes. The Fluid Framework uses socket.io library for communication with the service. In environments where web sockets are blocked, the client will fall back to use long-polling with http. + +## Where does Azure Fluid Relay store customer data? + +Azure Fluid Relay stores customer data. By default, customer data is replicated to the paired region. However, the customer can choose to keep it within the same region by selecting the Basic SKU during provisioning. This option is available in select regions where the paired region is outside the country boundary of the primary region data is stored. For more information, go to [Data storage in Azure Fluid Relay](../concepts/data-storage.md). + +## Does Azure Fluid Relay support offline mode? + +Offline mode is when end users of your application are disconnected from the network. The Fluid Framework client accumulates operations locally and sends them to the service when reconnected. Currently, Azure Fluid Relay doesn't support extended periods of offline mode beyond 1 minute. We highly recommend that you listen to Disconnect signals and update your user experience to avoid accumulation of many ops that can get lost. + diff --git a/articles/azure-functions/TOC.yml b/articles/azure-functions/TOC.yml index 92fdd8900dc35..e5a264a297f95 100644 --- a/articles/azure-functions/TOC.yml +++ b/articles/azure-functions/TOC.yml @@ -52,6 +52,9 @@ href: create-first-function-cli-python.md - name: TypeScript href: create-first-function-cli-typescript.md + - name: Bicep + displayName: ARM, Resource Manager, Template + href: functions-create-first-function-bicep.md - name: ARM template displayName: Resource Manager href: functions-create-first-function-resource-manager.md @@ -183,7 +186,7 @@ displayName: best practices - name: Compare runtime versions href: functions-versions.md - displayName: migrate, migration, v3 + displayName: migrate, migration, v3, v4, update, upgrade - name: Hosting and scale items: - name: Consumption plan diff --git a/articles/azure-functions/azure-functions-az-redundancy.md b/articles/azure-functions/azure-functions-az-redundancy.md index 3cd7949bd60c3..18c2207d2edff 100644 --- a/articles/azure-functions/azure-functions-az-redundancy.md +++ b/articles/azure-functions/azure-functions-az-redundancy.md @@ -3,35 +3,39 @@ title: Azure Functions availability zone support on Elastic Premium plans description: Learn how to use availability zone redundancy with Azure Functions for high-availability function applications on Elastic Premium plans. ms.topic: conceptual ms.author: johnguo -ms.date: 09/07/2021 +ms.date: 03/24/2022 ms.custom: references_regions # Goal: Introduce AZ Redundancy in Azure Functions elastic premium plans to customers + a tutorial on how to get started with ARM templates --- # Azure Functions support for availability zone redundancy -Availability zone (AZ) support for Azure Functions is now available on Elastic Premium and Dedicated (App Service) plans. A Zone Redundant Azure Function application will automatically balance its instances between availability zones for higher availability. This document focuses on zone redundancy support for Elastic Premium Function plans. For zone redundancy on Dedicated plans, refer [here](../app-service/how-to-zone-redundancy.md). +Availability zone (AZ) support for Azure Functions is now available on Premium (Elastic Premium) and Dedicated (App Service) plans. A zone-redundant Functions application automatically balances its instances between availability zones for higher availability. This article focuses on zone redundancy support for Premium plans. For zone redundancy on Dedicated plans, refer [here](../app-service/how-to-zone-redundancy.md). + +[!INCLUDE [functions-premium-plan-note](../../includes/functions-premium-plan-note.md)] ## Overview -An [availability zone](../availability-zones/az-overview.md#availability-zones) is a high-availability offering that protects your applications and data from datacenter failures. Availability zones are unique physical locations within an Azure region. Each zone is made up of one or more datacenters equipped with independent power, cooling, and networking. To ensure resiliency, there's a minimum of three separate zones in all enabled regions. You can build high availability into your application architecture by co-locating your compute, storage, networking, and data resources within a zone and replicating in other zones. +An [availability zone](../availability-zones/az-overview.md#availability-zones) is a high-availability offering that protects your applications and data from datacenter failures. Availability zones are unique physical locations within an Azure region. Each zone comprises one or more datacenters equipped with independent power, cooling, and networking. To ensure resiliency, there's a minimum of three separate zones in all enabled regions. You can build high-availability into your application architecture by co-locating your compute, storage, networking, and data resources within a zone and replicating into other zones. -A zone redundant function app will automatically distribute load the instances that your app runs on between the availability zones in the region. For Zone Redundant Elastic Premium apps, even as the app scales in and out, the instances the app is running on are still evenly distributed between availability zones. +A zone redundant function app automatically distributes the instances your app runs on between the availability zones in the region. For apps running in a zone-redundant Premium plan, even as the app scales in and out, the instances the app is running on are still evenly distributed between availability zones. ## Requirements -> [!IMPORTANT] -> When selecting a [storage account](storage-considerations.md#storage-account-requirements) for your function app, be sure to use a [zone redundant storage account (ZRS)](../storage/common/storage-redundancy.md#zone-redundant-storage). Otherwise, in the case of a zonal outage, Functions may show unexpected behavior due to its dependency on Storage. +When hosting in a zone-redundant Premium plan, the following requirements must be met. +- You must use a [zone redundant storage account (ZRS)](../storage/common/storage-redundancy.md#zone-redundant-storage) for your function app's [storage account](storage-considerations.md#storage-account-requirements). If you use a different type of storage account, Functions may show unexpected behavior during a zonal outage. - Both Windows and Linux are supported. -- Must be hosted on an [Elastic Premium](functions-premium-plan.md) or Dedicated hosting plan. Instructions on zone redundancy with Dedicated (App Service) hosting plan can be found [here](../app-service/how-to-zone-redundancy.md). +- Must be hosted on an [Elastic Premium](functions-premium-plan.md) or Dedicated hosting plan. Instructions on zone redundancy with Dedicated (App Service) hosting plan can be found [in this article](../app-service/how-to-zone-redundancy.md). - Availability zone (AZ) support isn't currently available for function apps on [Consumption](consumption-plan.md) plans. -- Zone redundant plans must specify a minimum instance count of 3. -- Function apps on an Elastic Premium plan additionally must have a minimum [always ready instances](functions-premium-plan.md#always-ready-instances) count of 3. -- Can be enabled in any of the following regions: +- Zone redundant plans must specify a minimum instance count of three. +- Function apps hosted on a Premium plan must also have a minimum [always ready instances](functions-premium-plan.md#always-ready-instances) count of three. + +Zone-redundant Premium plans can currently be enabled in any of the following regions: - West US 2 - West US 3 - Central US + - South Central US - East US - East US 2 - Canada Central @@ -44,48 +48,86 @@ A zone redundant function app will automatically distribute load the instances t - Japan East - Southeast Asia - Australia East -- At this time, must be created through [ARM template](../azure-resource-manager/templates/index.yml). ## How to deploy a function app on a zone redundant Premium plan -For initial creation of a zone redundant Elastic Premium Functions plan, you need to deploy via [ARM templates](../azure-resource-manager/templates/quickstart-create-templates-use-visual-studio-code.md). Then, once successfully created, you can view and interact with the Function Plan via the Azure portal and CLI tooling. An ARM template is only needed for the initial creation of the Function Plan. A guide to hosting Functions on Premium plans can be found [here](functions-infrastructure-as-code.md#deploy-on-premium-plan). Once the zone redundant plan is created and deployed, any function app hosted on your new plan will now be zone redundant. +There are currently two ways to deploy a zone-redundant premium plan and function app. You can use either the [Azure portal](https://portal.azure.com) or an ARM template. + +# [Azure portal](#tab/azure-portal) + +1. Open the Azure portal and navigate to the **Create Function App** page. Information on creating a function app in the portal can be found [here](functions-create-function-app-portal.md#create-a-function-app). + +1. In the **Basics** page, fill out the fields for your function app. Pay special attention to the fields in the table below (also highlighted in the screenshot below), which have specific requirements for zone redundancy. + + | Setting | Suggested value | Notes for Zone Redundancy | + | ------------ | ---------------- | ----------- | + | **Region** | Preferred region | The subscription under which this new function app is created. You must pick a region that is AZ enabled from the [list above](#requirements). | + + ![Screenshot of Basics tab of function app create page.](./media/functions-az-redundancy\azure-functions-basics-az.png) + +1. In the **Hosting** page, fill out the fields for your function app hosting plan. Pay special attention to the fields in the table below (also highlighted in the screenshot below), which have specific requirements for zone redundancy. + + | Setting | Suggested value | Notes for Zone Redundancy | + | ------------ | ---------------- | ----------- | + | **Storage Account** | A [zone-redundant storage account](storage-considerations.md#storage-account-requirements) | As mentioned above in the [requirements](#requirements) section, we strongly recommend using a zone-redundant storage account for your zone redundant function app. | + | **Plan Type** | Functions Premium | This article details how to create a zone redundant app in a Premium plan. Zone redundancy isn't currently available in Consumption plans. Information on zone redundancy on app service plans can be found [in this article](../app-service/how-to-zone-redundancy.md). | + | **Zone Redundancy** | Enabled | This field populates the flag that determines if your app is zone redundant or not. You won't be able to select `Enabled` unless you have chosen a region supporting zone redundancy, as mentioned in step 2. | + + ![Screenshot of Hosting tab of function app create page.](./media/functions-az-redundancy\azure-functions-hosting-az.png) -The only properties to be aware of while creating a zone redundant Function plan are the new **zoneRedundant** property and the Function Plan instance count (**capacity**) fields. The **zoneRedundant** property must be set to **true** and the **capacity** property should be set based on the workload requirement, but no less than 3. Choosing the right capacity varies based on several factors and high availability/fault tolerance strategies. A good rule of thumb is to ensure sufficient instances for the application such that losing one zone of instances leaves sufficient capacity to handle expected load. +1. For the rest of the function app creation process, create your function app as normal. There are no fields in the rest of the creation process that affect zone redundancy. + +# [ARM template](#tab/arm-template) + +You can use an [ARM template](../azure-resource-manager/templates/quickstart-create-templates-use-visual-studio-code.md) to deploy to a zone-redundant Premium plan. A guide to hosting Functions on Premium plans can be found [here](functions-infrastructure-as-code.md#deploy-on-premium-plan). + +The only properties to be aware of while creating a zone-redundant hosting plan are the new `zoneRedundant` property and the plan's instance count (`capacity`) fields. The `zoneRedundant` property must be set to `true` and the `capacity` property should be set based on the workload requirement, but not less than `3`. Choosing the right capacity varies based on several factors and high availability/fault tolerance strategies. A good rule of thumb is to ensure sufficient instances for the application such that losing one zone of instances leaves sufficient capacity to handle expected load. > [!IMPORTANT] -> Azure function Apps hosted on an elastic premium, zone redundant Function plan must have a minimum [always ready instance](functions-premium-plan.md#always-ready-instances) count of 3. This is to enforce that a zone redundant function app always has enough instances to satisfy at least one worker per zone. +> Azure Functions apps hosted on an elastic premium, zone-redundant plan must have a minimum [always ready instance](functions-premium-plan.md#always-ready-instances) count of 3. This make sure that a zone-redundant function app always has enough instances to satisfy at least one worker per zone. -Below is an ARM template snippet for a zone redundant, Premium Function Plan, showing the new **zoneRedundant** field and the **capacity** specification. +Below is an ARM template snippet for a zone-redundant, Premium plan showing the `zoneRedundant` field and the `capacity` specification. -``` - "resources": [ - { - "type": "Microsoft.Web/serverfarms", - "apiVersion": "2021-01-15", - "name": "your_plan_name_here", - "location": "Central US", - "sku": { - "name": "EP3", - "tier": "ElasticPremium", - "size": "EP3", - "family": "EP", - "capacity": 3 - }, - "kind": "elastic", - "properties": { - "perSiteScaling": false, - "elasticScaleEnabled": true, - "maximumElasticWorkerCount": 20, - "isSpot": false, - "reserved": false, - "isXenon": false, - "hyperV": false, - "targetWorkerCount": 0, - "targetWorkerSizeId": 0, - "zoneRedundant": true - } +```json +"resources": [ + { + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2021-01-15", + "name": "your_plan_name_here", + "location": "Central US", + "sku": { + "name": "EP3", + "tier": "ElasticPremium", + "size": "EP3", + "family": "EP", + "capacity": 3 + }, + "kind": "elastic", + "properties": { + "perSiteScaling": false, + "elasticScaleEnabled": true, + "maximumElasticWorkerCount": 20, + "isSpot": false, + "reserved": false, + "isXenon": false, + "hyperV": false, + "targetWorkerCount": 0, + "targetWorkerSizeId": 0, + "zoneRedundant": true } - ] + } +] ``` -To learn more, see [Automate resource deployment for your function app in Azure Functions](functions-infrastructure-as-code.md). +To learn more about these templates, see [Automate resource deployment in Azure Functions](functions-infrastructure-as-code.md). + +--- + +After the zone-redundant plan is created and deployed, any function app hosted on your new plan is considered zone-redundant. + +## Next steps + +> [!div class="nextstepaction"] +> [Improve the performance and reliability of Azure Functions](performance-reliability.md) + + diff --git a/articles/azure-functions/create-first-function-vs-code-node.md b/articles/azure-functions/create-first-function-vs-code-node.md index 27697c47e4a84..b31a0bdeaa757 100644 --- a/articles/azure-functions/create-first-function-vs-code-node.md +++ b/articles/azure-functions/create-first-function-vs-code-node.md @@ -2,7 +2,7 @@ title: Create a JavaScript function using Visual Studio Code - Azure Functions description: Learn how to create a JavaScript function, then publish the local Node.js project to serverless hosting in Azure Functions using the Azure Functions extension in Visual Studio Code. ms.topic: quickstart -ms.date: 11/18/2021 +ms.date: 06/07/2022 adobe-target: true adobe-target-activity: DocsExp–386541–A/B–Enhanced-Readability-Quickstarts–2.19.2021 adobe-target-experience: Experience B @@ -27,7 +27,7 @@ Before you get started, make sure you have the following requirements in place: + An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). -+ [Node.js 14.x](https://nodejs.org/en/download/releases/) or [Node.js 16.x](https://nodejs.org/en/download/releases/) (preview). Use the `node --version` command to check your version. ++ [Node.js 14.x](https://nodejs.org/en/download/releases/) or [Node.js 16.x](https://nodejs.org/en/download/releases/). Use the `node --version` command to check your version. + [Visual Studio Code](https://code.visualstudio.com/) on one of the [supported platforms](https://code.visualstudio.com/docs/supporting/requirements#_platforms). diff --git a/articles/azure-functions/create-first-function-vs-code-other.md b/articles/azure-functions/create-first-function-vs-code-other.md index 70292a300d99e..429e7ea852700 100644 --- a/articles/azure-functions/create-first-function-vs-code-other.md +++ b/articles/azure-functions/create-first-function-vs-code-other.md @@ -167,7 +167,7 @@ The *function.json* file in the *HttpExample* folder declares an HTTP trigger fu Err(_) => 3000, }; - warp::serve(example1).run((Ipv4Addr::UNSPECIFIED, port)).await + warp::serve(example1).run((Ipv4Addr::LOCALHOST, port)).await } ``` diff --git a/articles/azure-functions/dotnet-isolated-process-guide.md b/articles/azure-functions/dotnet-isolated-process-guide.md index e5d17027e48b7..290bafcf6f261 100644 --- a/articles/azure-functions/dotnet-isolated-process-guide.md +++ b/articles/azure-functions/dotnet-isolated-process-guide.md @@ -4,7 +4,7 @@ description: Learn how to use a .NET isolated process to run your C# functions i ms.service: azure-functions ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 05/24/2022 ms.custom: template-concept recommendations: false #Customer intent: As a developer, I need to know how to create functions that run in an isolated process so that I can run my function code on current (not LTS) releases of .NET. @@ -44,7 +44,7 @@ A .NET isolated function project is basically a .NET console app project that ta + Program.cs file that's the entry point for the app. + Any code files [defining your functions](#bindings). -For complete examples, see the [.NET 6 isolated sample project](https://github.com/Azure/azure-functions-dotnet-worker/tree/main/samples/FunctionApp) and the [.NET Framework 4.8 isolated sample project](https://github.com/Azure/azure-functions-dotnet-worker/tree/main/samples/NetFxWorker). +For complete examples, see the [.NET 6 isolated sample project](https://github.com/Azure/azure-functions-dotnet-worker/tree/main/samples/FunctionApp) and the [.NET Framework 4.8 isolated sample project](https://go.microsoft.com/fwlink/p/?linkid=2197310). > [!NOTE] > To be able to publish your isolated function project to either a Windows or a Linux function app in Azure, you must set a value of `dotnet-isolated` in the remote [FUNCTIONS_WORKER_RUNTIME](functions-app-settings.md#functions_worker_runtime) application setting. To support [zip deployment](deployment-zip-push.md) and [running from the deployment package](run-functions-from-deployment-package.md) on Linux, you also need to update the `linuxFxVersion` site config setting to `DOTNET-ISOLATED|6.0`. To learn more, see [Manual version updates on Linux](set-runtime-version.md#manual-version-updates-on-linux). @@ -260,7 +260,7 @@ This section describes the current state of the functional and behavioral differ | Output binding types | `IAsyncCollector`, [DocumentClient], [BrokeredMessage], and other client-specific types | Simple types, JSON serializable types, and arrays. | | Multiple output bindings | Supported | [Supported](#multiple-output-bindings) | | HTTP trigger | [HttpRequest]/[ObjectResult] | [HttpRequestData]/[HttpResponseData] | -| Durable Functions | [Supported](durable/durable-functions-overview.md) | Not supported | +| Durable Functions | [Supported](durable/durable-functions-overview.md) | [Supported (public preview)](https://github.com/microsoft/durabletask-dotnet#usage-with-azure-functions) | | Imperative bindings | [Supported](functions-dotnet-class-library.md#binding-at-runtime) | Not supported | | function.json artifact | Generated | Not generated | | Configuration | [host.json](functions-host-json.md) | [host.json](functions-host-json.md) and custom initialization | diff --git a/articles/azure-functions/durable/durable-functions-create-first-csharp.md b/articles/azure-functions/durable/durable-functions-create-first-csharp.md index f839b25852886..1e95d4684c551 100644 --- a/articles/azure-functions/durable/durable-functions-create-first-csharp.md +++ b/articles/azure-functions/durable/durable-functions-create-first-csharp.md @@ -160,7 +160,7 @@ You have used Visual Studio Code to create and publish a C# durable function app ::: zone pivot="code-editor-visualstudio" -In this article, you learn how to use Visual Studio 2019 to locally create and test a "hello world" durable function. This function orchestrates and chains-together calls to other functions. You then publish the function code to Azure. These tools are available as part of the Azure development workload in Visual Studio 2019. +In this article, you learn how to use Visual Studio 2022 to locally create and test a "hello world" durable function. This function orchestrates and chains-together calls to other functions. You then publish the function code to Azure. These tools are available as part of the Azure development workload in Visual Studio 2022. ![Screenshot shows a Visual Studio 2019 window with a durable function.](./media/durable-functions-create-first-csharp/functions-vs-complete.png) @@ -168,7 +168,7 @@ In this article, you learn how to use Visual Studio 2019 to locally create and t To complete this tutorial: -* Install [Visual Studio 2019](https://visualstudio.microsoft.com/vs/). Make sure that the **Azure development** workload is also installed. Visual Studio 2017 also supports Durable Functions development, but the UI and steps differ. +* Install [Visual Studio 2022](https://visualstudio.microsoft.com/vs/). Make sure that the **Azure development** workload is also installed. Visual Studio 2019 also supports Durable Functions development, but the UI and steps differ. * Verify you have the [Azure Storage Emulator](../../storage/common/storage-use-emulator.md) installed and running. @@ -186,14 +186,14 @@ The Azure Functions template creates a project that can be published to a functi 1. Type a **Project name** for your project, and select **OK**. The project name must be valid as a C# namespace, so don't use underscores, hyphens, or any other nonalphanumeric characters. -1. In **Create a new Azure Functions Application**, use the settings specified in the table that follows the image. +1. Under **Additional information**, use the settings specified in the table that follows the image. ![Create a new Azure Functions Application dialog in Visual Studio](./media/durable-functions-create-first-csharp/functions-vs-new-function.png) | Setting | Suggested value | Description | | ------------ | ------- |----------------------------------------- | - | **Version** | Azure Functions 3.0
                  (.NET Core) | Creates a function project that uses the version 3.0 runtime of Azure Functions, which supports .NET Core 3.1. For more information, see [How to target Azure Functions runtime version](../functions-versions.md). | - | **Template** | Empty | Creates an empty function app. | + | **Functions worker** | .NET 6 | Creates a function project that supports .NET 6 and the Azure Functions Runtime 4.0. For more information, see [How to target Azure Functions runtime version](../functions-versions.md). | + | **Function** | Empty | Creates an empty function app. | | **Storage account** | Storage Emulator | A storage account is required for durable function state management. | 4. Select **Create** to create an empty function project. This project has the basic configuration files needed to run your functions. @@ -208,7 +208,7 @@ The following steps use a template to create the durable function code in your p 1. Verify **Azure Function** is selected from the add menu, type a name for your C# file, and then select **Add**. -1. Select the **Durable Functions Orchestration** template and then select **Ok** +1. Select the **Durable Functions Orchestration** template and then select **Add**. ![Select durable template](./media/durable-functions-create-first-csharp/functions-vs-select-template.png) @@ -244,6 +244,7 @@ Azure Functions Core Tools lets you run an Azure Functions project on your local ```json { + "name": "Durable", "instanceId": "d495cb0ac10d4e13b22729c37e335190", "runtimeStatus": "Completed", "input": null, diff --git a/articles/azure-functions/durable/durable-functions-dotnet-entities.md b/articles/azure-functions/durable/durable-functions-dotnet-entities.md index 52f72203ce7c1..72d8d3caee680 100644 --- a/articles/azure-functions/durable/durable-functions-dotnet-entities.md +++ b/articles/azure-functions/durable/durable-functions-dotnet-entities.md @@ -267,7 +267,7 @@ We also enforce some additional rules: * Entity interface methods must not have more than one parameter. * Entity interface methods must return `void`, `Task`, or `Task`. -If any of these rules are violated, an `InvalidOperationException` is thrown at runtime when the interface is used as a type argument to `SignalEntity` or `CreateProxy`. The exception message explains which rule was broken. +If any of these rules are violated, an `InvalidOperationException` is thrown at runtime when the interface is used as a type argument to `SignalEntity`, `SignalEntityAsync`, or `CreateEntityProxy`. The exception message explains which rule was broken. > [!NOTE] > Interface methods returning `void` can only be signaled (one-way), not called (two-way). Interface methods returning `Task` or `Task` can be either called or signalled. If called, they return the result of the operation, or re-throw exceptions thrown by the operation. However, when signalled, they do not return the actual result or exception from the operation, but just the default value. diff --git a/articles/azure-functions/durable/durable-functions-http-api.md b/articles/azure-functions/durable/durable-functions-http-api.md index 96101404f5840..23e8d474e1d40 100644 --- a/articles/azure-functions/durable/durable-functions-http-api.md +++ b/articles/azure-functions/durable/durable-functions-http-api.md @@ -249,6 +249,7 @@ GET /admin/extensions/DurableTaskExtension/instances &createdTimeFrom={timestamp} &createdTimeTo={timestamp} &runtimeStatus={runtimeStatus1,runtimeStatus2,...} + &instanceIdPrefix={prefix} &showInput=[true|false] &top={integer} ``` @@ -263,6 +264,7 @@ GET /runtime/webhooks/durableTask/instances? &createdTimeFrom={timestamp} &createdTimeTo={timestamp} &runtimeStatus={runtimeStatus1,runtimeStatus2,...} + &instanceIdPrefix={prefix} &showInput=[true|false] &top={integer} ``` @@ -278,6 +280,7 @@ Request parameters for this API include the default set mentioned previously as | **`createdTimeFrom`** | Query string | Optional parameter. When specified, filters the list of returned instances that were created at or after the given ISO8601 timestamp.| | **`createdTimeTo`** | Query string | Optional parameter. When specified, filters the list of returned instances that were created at or before the given ISO8601 timestamp.| | **`runtimeStatus`** | Query string | Optional parameter. When specified, filters the list of returned instances based on their runtime status. To see the list of possible runtime status values, see the [Querying instances](durable-functions-instance-management.md) article. | +| **`instanceIdPrefix`** | Query string | Optional parameter. When specified, filters the list of returned instances to include only instances whose instance id starts with the specified prefix string. Available starting with [version 2.7.2](https://www.nuget.org/packages/Microsoft.Azure.WebJobs.Extensions.DurableTask/2.7.2) of the extension. | | **`top`** | Query string | Optional parameter. When specified, limits the number of instances returned by the query. | ### Response diff --git a/articles/azure-functions/durable/durable-functions-overview.md b/articles/azure-functions/durable/durable-functions-overview.md index dc7935cb04cba..a51c93496d9f5 100644 --- a/articles/azure-functions/durable/durable-functions-overview.md +++ b/articles/azure-functions/durable/durable-functions-overview.md @@ -3,8 +3,9 @@ title: Durable Functions Overview - Azure description: Introduction to the Durable Functions extension for Azure Functions. author: cgillum ms.topic: overview -ms.date: 12/23/2020 +ms.date: 05/24/2022 ms.author: cgillum +ms.custom: devdivchpfy22 ms.reviewer: azfuncdf #Customer intent: As a < type of user >, I want < what? > so that < why? >. --- @@ -15,17 +16,15 @@ ms.reviewer: azfuncdf ## Supported languages -Durable Functions currently supports the following languages: +Durable Functions is designed to work with all Azure Functions programming languages but may have different minimum requirements for each language. The following table shows the minimum supported app configurations: -* **C#**: both [precompiled class libraries](../functions-dotnet-class-library.md) and [C# script](../functions-reference-csharp.md). -* **JavaScript**: supported only for version 2.x or later of the Azure Functions runtime. Requires version 1.7.0 of the Durable Functions extension, or a later version. -* **Python**: requires version 2.3.1 of the Durable Functions extension, or a later version. -* **F#**: precompiled class libraries and F# script. F# script is only supported for version 1.x of the Azure Functions runtime. -* **PowerShell**: Supported only for version 3.x of the Azure Functions runtime and PowerShell 7. Requires version 2.x of the bundle extensions. - -To access the latest features and updates, it is recommended you use the latest versions of the Durable Functions extension and the language-specific Durable Functions libraries. Learn more about [Durable Functions versions](durable-functions-versions.md). - -Durable Functions has a goal of supporting all [Azure Functions languages](../supported-languages.md). See the [Durable Functions issues list](https://github.com/Azure/azure-functions-durable-extension/issues) for the latest status of work to support additional languages. +| Language stack | Azure Functions Runtime versions | Language worker version | Minimum bundles version | +| - | - | - | - | +| .NET / C# / F# | Functions 1.0+ | In-process (GA)
                  Out-of-process ([preview](https://github.com/microsoft/durabletask-dotnet#usage-with-azure-functions)) | N/A | +| JavaScript/TypeScript | Functions 2.0+ | Node 8+ | 2.x bundles | +| Python | Functions 2.0+ | Python 3.7+ | 2.x bundles | +| PowerShell | Functions 3.0+ | PowerShell 7+ | 2.x bundles | +| Java (coming soon) | Functions 3.0+ | Java 8+ | 4.x bundles | Like Azure Functions, there are templates to help you develop Durable Functions using [Visual Studio 2019](durable-functions-create-first-csharp.md), [Visual Studio Code](quickstart-js-vscode.md), and the [Azure portal](durable-functions-create-portal.md). diff --git a/articles/azure-functions/durable/durable-functions-phone-verification.md b/articles/azure-functions/durable/durable-functions-phone-verification.md index ca973124815bb..53e9b577f0b39 100644 --- a/articles/azure-functions/durable/durable-functions-phone-verification.md +++ b/articles/azure-functions/durable/durable-functions-phone-verification.md @@ -95,7 +95,7 @@ The **E4_SendSmsChallenge** function uses the Twilio binding to send the SMS mes [!code-csharp[Main](~/samples-durable-functions/samples/precompiled/PhoneVerification.cs?range=72-89)] > [!NOTE] -> You will need to install the `Microsoft.Azure.WebJobs.Extensions.Twilio` Nuget package to run the sample code. +> You must first install the `Microsoft.Azure.WebJobs.Extensions.Twilio` Nuget package for Functions to run the sample code. Don't also install the main [Twilio nuget package](https://www.nuget.org/packages/Twilio/) because this can cause versioning problems that result in build errors. # [JavaScript](#tab/javascript) diff --git a/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-new-function.png b/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-new-function.png index fb4487e7ef701..c74b331c43af9 100644 Binary files a/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-new-function.png and b/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-new-function.png differ diff --git a/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-select-template.png b/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-select-template.png index fdf7f28d9cc97..024fbf1cc6896 100644 Binary files a/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-select-template.png and b/articles/azure-functions/durable/media/durable-functions-create-first-csharp/functions-vs-select-template.png differ diff --git a/articles/azure-functions/event-driven-scaling.md b/articles/azure-functions/event-driven-scaling.md index f99dad603b121..9c06705f5b64a 100644 --- a/articles/azure-functions/event-driven-scaling.md +++ b/articles/azure-functions/event-driven-scaling.md @@ -48,6 +48,15 @@ $resource.Properties.functionAppScaleLimit = $resource | Set-AzResource -Force ``` +## Scale-in behaviors + +Event-driven scaling automatically reduces capacity when demand for your functions is reduced. It does this by shutting down worker instances of your function app. Before an instance is shut down, new events stop being sent to the instance. Also, functions that are currently executing are given time to finish executing. This behavior is logged as drain mode. This shut-down period can extend up to 10 minutes for Consumption plan apps and up to 60 minutes for Premium plan apps. Event-driven scaling and this behavior don't apply to Dedicated plan apps. + +The following considerations apply for scale-in behaviors: + +* For Consumption plan function apps running on Windows, only apps created after May 2021 have drain mode behaviors enabled by default. +* To enable graceful shutdown for functions using the Service Bus trigger, use version 4.2.0 or a later version of the [Service Bus Extension](functions-bindings-service-bus.md). + ## Event Hubs trigger This section describes how scaling behaves when your function uses an [Event Hubs trigger](functions-bindings-event-hubs-trigger.md) or an [IoT Hub trigger](functions-bindings-event-iot-trigger.md). In these cases, each instance of an event triggered function is backed by a single [EventProcessorHost](/dotnet/api/microsoft.azure.eventhubs.processor) instance. The trigger (powered by Event Hubs) ensures that only one [EventProcessorHost](/dotnet/api/microsoft.azure.eventhubs.processor) instance can get a lease on a given partition. diff --git a/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md b/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md index ac9465434959d..aaa4f25ff2340 100644 --- a/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md +++ b/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md @@ -118,15 +118,11 @@ mvn azure-functions:deploy # [Browser](#tab/browser) - Copy the complete **Invoke URL** shown in the output of the publish command into a browser address bar, appending the query parameter `&name=Functions`. The browser should display similar output as when you ran the function locally. - - ![The output of the function runs on Azure in a browser](./media/functions-add-output-binding-storage-queue-cli/function-test-cloud-browser.png) + Copy the complete **Invoke URL** shown in the output of the publish command into a browser address bar, appending the query parameter `&name=Functions`. The browser should display the same output as when you ran the function locally. # [curl](#tab/curl) - Run [`curl`](https://curl.haxx.se/) with the **Invoke URL**, appending the parameter `&name=Functions`. The output of the command should be the text, "Hello Functions." - - ![The output of the function runs on Azure using CURL](./media/functions-add-output-binding-storage-queue-cli/function-test-cloud-curl.png) + Run [`curl`](https://curl.haxx.se/) with the **Invoke URL**, appending the parameter `&name=Functions`. The output should be the same as when you ran the function locally. --- diff --git a/articles/azure-functions/functions-app-settings.md b/articles/azure-functions/functions-app-settings.md index ee0ce84d46f2a..119f1f675ccf0 100644 --- a/articles/azure-functions/functions-app-settings.md +++ b/articles/azure-functions/functions-app-settings.md @@ -419,13 +419,15 @@ The above sample value of `1800` sets a timeout of 30 minutes. To learn more, se ## WEBSITE\_CONTENTAZUREFILECONNECTIONSTRING -Connection string for storage account where the function app code and configuration are stored in event-driven scaling plans running on Windows. For more information, see [Create a function app](functions-infrastructure-as-code.md?tabs=windows#create-a-function-app). +Connection string for storage account where the function app code and configuration are stored in event-driven scaling plans. For more information, see [Create a function app](functions-infrastructure-as-code.md?tabs=windows#create-a-function-app). |Key|Sample value| |---|------------| |WEBSITE_CONTENTAZUREFILECONNECTIONSTRING|`DefaultEndpointsProtocol=https;AccountName=...`| -Only used when deploying to a Windows or Linux Premium plan or to a Windows Consumption plan. Not supported for Linux Consumption plans or Windows or Linux Dedicated plans. Changing or removing this setting may cause your function app to not start. To learn more, see [this troubleshooting article](functions-recover-storage-account.md#storage-account-application-settings-were-deleted). +This setting is used for Consumption and Premium plan apps on both Windows and Linux. It's not used for Dedicated plan apps, which aren't dynamically scaled by Functions. + +Changing or removing this setting may cause your function app to not start. To learn more, see [this troubleshooting article](functions-recover-storage-account.md#storage-account-application-settings-were-deleted). ## WEBSITE\_CONTENTOVERVNET diff --git a/articles/azure-functions/functions-bindings-azure-sql-input.md b/articles/azure-functions/functions-bindings-azure-sql-input.md index ae231f717d4dc..b502e47e46783 100644 --- a/articles/azure-functions/functions-bindings-azure-sql-input.md +++ b/articles/azure-functions/functions-bindings-azure-sql-input.md @@ -528,7 +528,7 @@ The following table explains the binding configuration properties that you set i |**direction** | Required. Must be set to `in`. | |**name** | Required. The name of the variable that represents the query results in function code. | | **commandText** | Required. The Transact-SQL query command or name of the stored procedure executed by the binding. | -| **connectionStringSetting** | Required. The name of an app setting that contains the connection string for the database against which the query or stored procedure is being executed. This value isn't the actual connection string and must instead resolve to an environment variable name. | +| **connectionStringSetting** | Required. The name of an app setting that contains the connection string for the database against which the query or stored procedure is being executed. This value isn't the actual connection string and must instead resolve to an environment variable name. Optional keywords in the connection string value are [available to refine SQL bindings connectivity](./functions-bindings-azure-sql.md#sql-connection-string). | | **commandType** | Required. A [CommandType](/dotnet/api/system.data.commandtype) value, which is [Text](/dotnet/api/system.data.commandtype#fields) for a query and [StoredProcedure](/dotnet/api/system.data.commandtype#fields) for a stored procedure. | | **parameters** | Optional. Zero or more parameter values passed to the command during execution as a single string. Must follow the format `@param1=param1,@param2=param2`. Neither the parameter name nor the parameter value can contain a comma (`,`) or an equals sign (`=`). | ::: zone-end @@ -540,7 +540,7 @@ The following table explains the binding configuration properties that you set i ::: zone pivot="programming-language-csharp,programming-language-javascript,programming-language-python" -The attribute's constructor takes the SQL command text, the command type, parameters, and the connection string setting name. The command can be a Transact-SQL (T-SQL) query with the command type `System.Data.CommandType.Text` or stored procedure name with the command type `System.Data.CommandType.StoredProcedure`. The connection string setting name corresponds to the application setting (in `local.settings.json` for local development) that contains the [connection string](/dotnet/api/microsoft.data.sqlclient.sqlconnection.connectionstring?view=sqlclient-dotnet-core-3.0&preserve-view=true#Microsoft_Data_SqlClient_SqlConnection_ConnectionString) to the Azure SQL or SQL Server instance. +The attribute's constructor takes the SQL command text, the command type, parameters, and the connection string setting name. The command can be a Transact-SQL (T-SQL) query with the command type `System.Data.CommandType.Text` or stored procedure name with the command type `System.Data.CommandType.StoredProcedure`. The connection string setting name corresponds to the application setting (in `local.settings.json` for local development) that contains the [connection string](/dotnet/api/microsoft.data.sqlclient.sqlconnection.connectionstring?view=sqlclient-dotnet-core-3.1&preserve-view=true#Microsoft_Data_SqlClient_SqlConnection_ConnectionString) to the Azure SQL or SQL Server instance. ::: zone-end diff --git a/articles/azure-functions/functions-bindings-azure-sql-output.md b/articles/azure-functions/functions-bindings-azure-sql-output.md index 9e3edd6633714..3ec2d268f8e8c 100644 --- a/articles/azure-functions/functions-bindings-azure-sql-output.md +++ b/articles/azure-functions/functions-bindings-azure-sql-output.md @@ -535,7 +535,7 @@ The following table explains the binding configuration properties that you set i |**direction** | Required. Must be set to `out`. | |**name** | Required. The name of the variable that represents the entity in function code. | | **commandText** | Required. The name of the table being written to by the binding. | -| **connectionStringSetting** | Required. The name of an app setting that contains the connection string for the database to which data is being written. This isn't the actual connection string and must instead resolve to an environment variable.| +| **connectionStringSetting** | Required. The name of an app setting that contains the connection string for the database to which data is being written. This isn't the actual connection string and must instead resolve to an environment variable. Optional keywords in the connection string value are [available to refine SQL bindings connectivity](./functions-bindings-azure-sql.md#sql-connection-string). | ::: zone-end @@ -544,7 +544,7 @@ The following table explains the binding configuration properties that you set i ## Usage ::: zone pivot="programming-language-csharp,programming-language-javascript,programming-language-python" -The `CommandText` property is the name of the table where the data is to be stored. The connection string setting name corresponds to the application setting that contains the [connection string](/dotnet/api/microsoft.data.sqlclient.sqlconnection.connectionstring?view=sqlclient-dotnet-core-3.0&preserve-view=true#Microsoft_Data_SqlClient_SqlConnection_ConnectionString) to the Azure SQL or SQL Server instance. +The `CommandText` property is the name of the table where the data is to be stored. The connection string setting name corresponds to the application setting that contains the [connection string](/dotnet/api/microsoft.data.sqlclient.sqlconnection.connectionstring?view=sqlclient-dotnet-core-3.1&preserve-view=true#Microsoft_Data_SqlClient_SqlConnection_ConnectionString) to the Azure SQL or SQL Server instance. ::: zone-end diff --git a/articles/azure-functions/functions-bindings-azure-sql.md b/articles/azure-functions/functions-bindings-azure-sql.md index 5772fb52c3bdc..2ad7dd4ed29f6 100644 --- a/articles/azure-functions/functions-bindings-azure-sql.md +++ b/articles/azure-functions/functions-bindings-azure-sql.md @@ -4,7 +4,7 @@ description: Understand how to use Azure SQL bindings in Azure Functions. author: dzsquared ms.topic: reference ms.custom: event-tier1-build-2022 -ms.date: 5/24/2022 +ms.date: 6/3/2022 ms.author: drskwier ms.reviewer: glenga zone_pivot_groups: programming-languages-set-functions-lang-workers @@ -97,10 +97,17 @@ You can add the preview extension bundle by adding or replacing the following co ::: zone pivot="programming-language-python" +## Functions runtime + > [!NOTE] -> Python language support for the SQL bindings extension is only available for v4 of the [functions runtime](./set-runtime-version.md#view-and-update-the-current-runtime-version) and requires runtime v4.5.0 for deployment in Azure. Learn more about determining the runtime in the [functions runtime](./set-runtime-version.md#view-and-update-the-current-runtime-version) documentation. Please see the tracking [GitHub issue](https://github.com/Azure/azure-functions-sql-extension/issues/250) for the latest update on availability. +> Python language support for the SQL bindings extension is only available for v4 of the [functions runtime](./set-runtime-version.md#view-and-update-the-current-runtime-version) and requires runtime v4.5.0 or greater for deployment in Azure. Learn more about determining the runtime in the [functions runtime](./set-runtime-version.md#view-and-update-the-current-runtime-version) documentation. Please see the tracking [GitHub issue](https://github.com/Azure/azure-functions-sql-extension/issues/250) for the latest update on availability. -## Install bundle +The functions runtime required for local development and testing of Python functions isn't included in the current release of functions core tools and must be installed independently. The latest instructions on installing a preview version of functions core tools are available in the tracking [GitHub issue](https://github.com/Azure/azure-functions-sql-extension/issues/250). + +Alternatively, a VS Code [development container](https://code.visualstudio.com/docs/remote/containers) definition can be used to expedite your environment setup. The definition components are available in the SQL bindings [GitHub repository](https://github.com/Azure/azure-functions-sql-extension/tree/main/samples/samples-python/.devcontainer). + + +## Install bundle The SQL bindings extension is part of a preview [extension bundle], which is specified in your host.json project file. @@ -120,7 +127,7 @@ You can add the preview extension bundle by adding or replacing the following co # [Preview Bundle v3.x](#tab/extensionv3) -Python support is not available with the SQL bindings extension in the v3 version of the functions runtime. +Python support isn't available with the SQL bindings extension in the v3 version of the functions runtime. --- @@ -151,6 +158,15 @@ Support for Python durable functions with SQL bindings isn't yet available. ::: zone-end +## SQL connection string + +Azure SQL bindings for Azure Functions have a required property for connection string on both [input](./functions-bindings-azure-sql-input.md) and [output](./functions-bindings-azure-sql-output.md) bindings. SQL bindings passes the connection string to the Microsoft.Data.SqlClient library and supports the connection string as defined in the [SqlClient ConnectionString documentation](/dotnet/api/microsoft.data.sqlclient.sqlconnection.connectionstring?view=sqlclient-dotnet-core-3.1&preserve-view=true). Notable keywords include: + +- `Authentication` allows a function to connect to Azure SQL with Azure Active Directory, including [Active Directory Managed Identity](./functions-identity-access-azure-sql-with-managed-identity.md) +- `Command Timeout` allows a function to wait for specified amount of time in seconds before terminating a query (default 30 seconds) +- `ConnectRetryCount` allows a function to automatically make additional reconnection attempts, especially applicable to Azure SQL Database serverless tier (default 1) + + ## Considerations - Because the Azure SQL bindings doesn't have a trigger, you need to use another supported trigger to start a function that reads from or writes to an Azure SQL database. diff --git a/articles/azure-functions/functions-bindings-mobile-apps.md b/articles/azure-functions/functions-bindings-mobile-apps.md index 317f6a6fc1943..71d0a45202e38 100644 --- a/articles/azure-functions/functions-bindings-mobile-apps.md +++ b/articles/azure-functions/functions-bindings-mobile-apps.md @@ -286,7 +286,7 @@ The following table explains the binding configuration properties that you set i | **name**| n/a | Name of output parameter in function signature.| |**tableName** |**TableName**|Name of the mobile app's data table| |**connection**|**MobileAppUriSetting**|The name of an app setting that has the mobile app's URL. The function uses this URL to construct the required REST operations against your mobile app. Create an app setting in your function app that contains the mobile app's URL, then specify the name of the app setting in the `connection` property in your input binding. The URL looks like `http://.azurewebsites.net`. -|**apiKey**|**ApiKeySetting**|The name of an app setting that has your mobile app's API key. Provide the API key if you implement an API key in your Node.js mobile app backend, or [implement an API key in your .NET mobile app backend](https://github.com/Azure/azure-mobile-apps-net-server/wiki/Implementing-Application-Key). To provide the key, create an app setting in your function app that contains the API key, then add the `apiKey` property in your input binding with the name of the app setting. | +|**apiKey**|**ApiKeySetting**|The name of an app setting that has your mobile app's API key. Provide the API key if you implement an API key in your Node.js mobile app backend, or implement an API key in your .NET mobile app backend. To provide the key, create an app setting in your function app that contains the API key, then add the `apiKey` property in your input binding with the name of the app setting. | [!INCLUDE [app settings to local.settings.json](../../includes/functions-app-settings-local.md)] diff --git a/articles/azure-functions/functions-bindings-storage-blob.md b/articles/azure-functions/functions-bindings-storage-blob.md index 47bc759545748..418a605d8a655 100644 --- a/articles/azure-functions/functions-bindings-storage-blob.md +++ b/articles/azure-functions/functions-bindings-storage-blob.md @@ -146,7 +146,7 @@ Functions 1.x apps automatically have a reference to the extension. ## host.json settings -This section describes the function app configuration settings available for functions that this binding. These settings only apply when using extension version 5.0.0 and higher. The example host.json file below contains only the version 2.x+ settings for this binding. For more information about function app configuration settings in versions 2.x and later versions, see [host.json reference for Azure Functions](functions-host-json.md). +This section describes the function app configuration settings available for functions that use this binding. These settings only apply when using extension version 5.0.0 and higher. The example host.json file below contains only the version 2.x+ settings for this binding. For more information about function app configuration settings in versions 2.x and later versions, see [host.json reference for Azure Functions](functions-host-json.md). > [!NOTE] > This section doesn't apply to extension versions before 5.0.0. For those earlier versions, there aren't any function app-wide configuration settings for blobs. diff --git a/articles/azure-functions/functions-bindings-storage-queue-trigger.md b/articles/azure-functions/functions-bindings-storage-queue-trigger.md index dfaed46e2540c..eef2d5e64fd77 100644 --- a/articles/azure-functions/functions-bindings-storage-queue-trigger.md +++ b/articles/azure-functions/functions-bindings-storage-queue-trigger.md @@ -363,7 +363,7 @@ See the [Example section](#example) for complete examples. ::: zone pivot="programming-language-csharp" -The usage of the Blob trigger depends on the extension package version, and the C# modality used in your function app, which can be one of the following: +The usage of the Queue trigger depends on the extension package version, and the C# modality used in your function app, which can be one of the following: # [In-process class library](#tab/in-process) @@ -528,4 +528,4 @@ The host.json file contains settings that control queue trigger behavior. See th [CloudQueueMessage]: /dotnet/api/microsoft.azure.storage.queue.cloudqueuemessage -[QueueMessage]: /dotnet/api/azure.storage.queues.models.queuemessage \ No newline at end of file +[QueueMessage]: /dotnet/api/azure.storage.queues.models.queuemessage diff --git a/articles/azure-functions/functions-create-first-function-bicep.md b/articles/azure-functions/functions-create-first-function-bicep.md new file mode 100644 index 0000000000000..1c5b7f8865648 --- /dev/null +++ b/articles/azure-functions/functions-create-first-function-bicep.md @@ -0,0 +1,131 @@ +--- +title: Create your function app resources in Azure using Bicep +description: Create and deploy to Azure a simple HTTP triggered serverless function using Bicep. +author: schaffererin +ms.author: v-eschaffer +ms.date: 05/12/2022 +ms.topic: quickstart +ms.service: azure-functions +ms.custom: subject-armqs, devx-track-azurepowershell, mode-arm +--- + +# Quickstart: Create and deploy Azure Functions resources using Bicep + +In this article, you use Bicep to create a function that responds to HTTP requests. + +Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +### Azure account + +Before you begin, you must have an Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/). + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/function-app-create-dynamic/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.web/function-app-create-dynamic/main.bicep"::: + +The following four Azure resources are created by this Bicep file: + ++ [**Microsoft.Storage/storageAccounts**](/azure/templates/microsoft.storage/storageaccounts): create an Azure Storage account, which is required by Functions. ++ [**Microsoft.Web/serverfarms**](/azure/templates/microsoft.web/serverfarms): create a serverless Consumption hosting plan for the function app. ++ [**Microsoft.Web/sites**](/azure/templates/microsoft.web/sites): create a function app. ++ [**microsoft.insights/components**](/azure/templates/microsoft.insights/components): create an Application Insights instance for monitoring. + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters appInsightsLocation= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -appInsightsLocation "" + ``` + + --- + + > [!NOTE] + > Replace **\** with the region for Application Insights, which is usually the same as the resource group. + + When the deployment finishes, you should see a message indicating the deployment succeeded. + +## Validate the deployment + +Use Azure CLI or Azure PowerShell to validate the deployment. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Visit function app welcome page + +1. Use the output from the previous validation step to retrieve the unique name created for your function app. +1. Open a browser and enter the following URL: **\**. Make sure to replace **<\appName\>** with the unique name created for your function app. + +When you visit the URL, you should see a page like this: + +:::image type="content" source="../azure-functions/media/functions-create-first-function-bicep/function-app-bicep.png" alt-text="Function app welcome page" border="false"::: + +## Clean up resources + +If you continue to the next step and add an Azure Storage queue output binding, keep all your resources in place as you'll build on what you've already done. + +Otherwise, if you no longer need the resources, use Azure CLI, PowerShell, or Azure portal to delete the resource group and its resources. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +Now that you've publish your first function, learn more by adding an output binding to your function. + +# [Visual Studio Code](#tab/visual-studio-code) + +> [!div class="nextstepaction"] +> [Connect to an Azure Storage queue](functions-add-output-binding-storage-queue-vs-code.md) + +# [Visual Studio](#tab/visual-studio) + +> [!div class="nextstepaction"] +> [Connect to an Azure Storage queue](functions-add-output-binding-storage-queue-vs.md) + +# [Command line](#tab/command-line) + +> [!div class="nextstepaction"] +> [Connect to an Azure Storage queue](functions-add-output-binding-storage-queue-cli.md) + +--- diff --git a/articles/azure-functions/functions-create-your-first-function-visual-studio.md b/articles/azure-functions/functions-create-your-first-function-visual-studio.md index e0095c5da538b..e94c24aa47a51 100644 --- a/articles/azure-functions/functions-create-your-first-function-visual-studio.md +++ b/articles/azure-functions/functions-create-your-first-function-visual-studio.md @@ -31,7 +31,7 @@ There is also a [Visual Studio Code-based version](create-first-function-vs-code ## Prerequisites -+ [Visual Studio 2022](https://azure.microsoft.com/downloads/), which supports .NET 6.0. Make sure to select the **Azure development** workload during installation. ++ [Visual Studio 2022](https://visualstudio.microsoft.com/vs/), which supports .NET 6.0. Make sure to select the **Azure development** workload during installation. + [Azure subscription](../guides/developer/azure-developer-guide.md#understanding-accounts-subscriptions-and-billing). If you don't already have an account [create a free one](https://azure.microsoft.com/free/dotnet/) before you begin. @@ -45,20 +45,20 @@ The Azure Functions project template in Visual Studio creates a C# class library 1. In **Configure your new project**, enter a **Project name** for your project, and then select **Create**. The function app name must be valid as a C# namespace, so don't use underscores, hyphens, or any other nonalphanumeric characters. -1. For the **Create a new Azure Functions application** settings, use the values in the following table: +1. For the **Additional information** settings, use the values in the following table: | Setting | Value | Description | | ------------ | ------- |----------------------------------------- | - | **.NET version** | **.NET 6** | This value creates a function project that runs in-process with version 4.x of the Azure Functions runtime. You can also choose **.NET 6 (isolated)** to create a project that runs in a separate worker process. Azure Functions 1.x supports the .NET Framework. For more information, see [Azure Functions runtime versions overview](./functions-versions.md). | - | **Function template** | **HTTP trigger** | This value creates a function triggered by an HTTP request. | - | **Storage account (AzureWebJobsStorage)** | **Storage emulator** | Because a function app in Azure requires a storage account, one is assigned or created when you publish your project to Azure. An HTTP trigger doesn't use an Azure Storage account connection string; all other trigger types require a valid Azure Storage account connection string. When you select this option, the Azurite emulator is used. | + | **Functions worker** | **.NET 6** or **.NET 6 Isolated** | When you choose **.NET 6**, you create a project that runs in-process with version 4.x of the Azure Functions runtime. When you choose **.NET 6 Isolated**, you create a project that runs in a separate worker process. Azure Functions 1.x supports the .NET Framework. For more information, see [Azure Functions runtime versions overview](./functions-versions.md). | + | **Function** | **HTTP trigger** | This value creates a function triggered by an HTTP request. | + | **Use Azurite for runtime storage account (AzureWebJobsStorage)** | Enable | Because a function app in Azure requires a storage account, one is assigned or created when you publish your project to Azure. An HTTP trigger doesn't use an Azure Storage account connection string; all other trigger types require a valid Azure Storage account connection string. When you select this option, the Azurite emulator is used. | | **Authorization level** | **Anonymous** | The created function can be triggered by any client without providing a key. This authorization setting makes it easy to test your new function. For more information about keys and authorization, see [Authorization keys](./functions-bindings-http-webhook-trigger.md#authorization-keys) and [HTTP and webhook bindings](./functions-bindings-http-webhook.md). | :::image type="content" source="../../includes/media/functions-vs-tools-create/functions-project-settings-v4.png" alt-text="Azure Functions project settings"::: Make sure you set the **Authorization level** to **Anonymous**. If you choose the default level of **Function**, you're required to present the [function key](./functions-bindings-http-webhook-trigger.md#authorization-keys) in requests to access your function endpoint. -1. Select **Create** to create the function project and HTTP trigger function. +2. Select **Create** to create the function project and HTTP trigger function. Visual Studio creates a project and class that contains boilerplate code for the HTTP trigger function type. The boilerplate code sends an HTTP response that includes a value from the request body or query string. The `HttpTrigger` attribute specifies that the function is triggered by an HTTP request. @@ -72,10 +72,18 @@ The `FunctionName` method attribute sets the name of the function, which by defa 1. In the `HttpTrigger` method named `Run`, rename the `FunctionName` method attribute to `HttpExample`. -Your function definition should now look like the following code: +Your function definition should now look like the following code, depending on mode: + +# [In-process](#tab/in-process) :::code language="csharp" source="~/functions-docs-csharp/http-trigger-template/HttpExample.cs" range="15-18"::: +# [Isolated process](#tab/isolated-process) + +:::code language="csharp" source="~/functions-docs-csharp/http-trigger-isolated/HttpExample.cs" range="11-13"::: + +--- + Now that you've renamed the function, you can test it on your local computer. ## Run the function locally diff --git a/articles/azure-functions/functions-deployment-technologies.md b/articles/azure-functions/functions-deployment-technologies.md index 2f62675eb30ff..9c7c84d6f2d91 100644 --- a/articles/azure-functions/functions-deployment-technologies.md +++ b/articles/azure-functions/functions-deployment-technologies.md @@ -21,7 +21,7 @@ The following table describes the available deployment methods for your Function | -- | -- | -- | | Tools-based | • [Visual Studio Code publish](functions-develop-vs-code.md#publish-to-azure)
                  • [Visual Studio publish](functions-develop-vs.md#publish-to-azure)
                  • [Core Tools publish](functions-run-local.md#publish) | Deployments during development and other ad hoc deployments. Deployments are managed locally by the tooling. | | App Service-managed| • [Deployment Center (CI/CD)](functions-continuous-deployment.md)
                  • [Container deployments](functions-create-function-linux-custom-image.md#enable-continuous-deployment-to-azure) | Continuous deployment (CI/CD) from source control or from a container registry. Deployments are managed by the App Service platform (Kudu).| -| External pipelines|• [Azure Pipelines](functions-how-to-azure-devops.md)
                  • [GitHub actions](functions-how-to-github-actions.md) | Production and DevOps pipelines that include additional validation, testing, and other actions be run as part of an automated deployment. Deployments are managed by the pipeline. | +| External pipelines|• [Azure Pipelines](functions-how-to-azure-devops.md)
                  • [GitHub Actions](functions-how-to-github-actions.md) | Production and DevOps pipelines that include additional validation, testing, and other actions be run as part of an automated deployment. Deployments are managed by the pipeline. | While specific Functions deployments use the best technology based on their context, most deployment methods are based on [zip deployment](#zip-deploy). diff --git a/articles/azure-functions/functions-develop-local.md b/articles/azure-functions/functions-develop-local.md index 9dbad9f0ab035..6cb95baa7f676 100644 --- a/articles/azure-functions/functions-develop-local.md +++ b/articles/azure-functions/functions-develop-local.md @@ -3,12 +3,12 @@ title: Develop and run Azure Functions locally description: Learn how to code and test Azure Functions on your local computer before you run them on Azure Functions. ms.topic: conceptual -ms.date: 09/04/2018 +ms.date: 05/19/2022 --- # Code and test Azure Functions locally -While you're able to develop and test Azure Functions in the [Azure portal], many developers prefer a local development experience. Functions makes it easy to use your favorite code editor and development tools to create and test functions on your local computer. Your local functions can connect to live Azure services, and you can debug them on your local computer using the full Functions runtime. +While you're able to develop and test Azure Functions in the [Azure portal], many developers prefer a local development experience. When you use Functions, using your favorite code editor and development tools to create and test functions on your local computer becomes easier. Your local functions can connect to live Azure services, and you can debug them on your local computer using the full Functions runtime. This article provides links to specific development environments for your preferred language. It also provides some shared guidance for local development, such as working with the [local.settings.json file](#local-settings-file). @@ -21,11 +21,11 @@ The way in which you develop functions on your local computer depends on your [l |[Visual Studio Code](functions-develop-vs-code.md)| [C# (class library)](functions-dotnet-class-library.md)
                  [C# isolated process (.NET 5.0)](dotnet-isolated-process-guide.md)
                  [JavaScript](functions-reference-node.md)
                  [PowerShell](./create-first-function-vs-code-powershell.md)
                  [Python](functions-reference-python.md) | The [Azure Functions extension for VS Code](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-azurefunctions) adds Functions support to VS Code. Requires the Core Tools. Supports development on Linux, macOS, and Windows, when using version 2.x of the Core Tools. To learn more, see [Create your first function using Visual Studio Code](./create-first-function-vs-code-csharp.md). | | [Command prompt or terminal](functions-run-local.md) | [C# (class library)](functions-dotnet-class-library.md)
                  [C# isolated process (.NET 5.0)](dotnet-isolated-process-guide.md)
                  [JavaScript](functions-reference-node.md)
                  [PowerShell](functions-reference-powershell.md)
                  [Python](functions-reference-python.md) | [Azure Functions Core Tools] provides the core runtime and templates for creating functions, which enable local development. Version 2.x supports development on Linux, macOS, and Windows. All environments rely on Core Tools for the local Functions runtime. | | [Visual Studio 2019](functions-develop-vs.md) | [C# (class library)](functions-dotnet-class-library.md)
                  [C# isolated process (.NET 5.0)](dotnet-isolated-process-guide.md) | The Azure Functions tools are included in the **Azure development** workload of [Visual Studio 2019](https://www.visualstudio.com/vs/) and later versions. Lets you compile functions in a class library and publish the .dll to Azure. Includes the Core Tools for local testing. To learn more, see [Develop Azure Functions using Visual Studio](functions-develop-vs.md). | -| [Maven](./create-first-function-cli-java.md) (various) | [Java](functions-reference-java.md) | Maven archetype supports Core Tools to enable development of Java functions. Version 2.x supports development on Linux, macOS, and Windows. To learn more, see [Create your first function with Java and Maven](./create-first-function-cli-java.md). Also supports development using [Eclipse](functions-create-maven-eclipse.md) and [IntelliJ IDEA](functions-create-maven-intellij.md) | +| [Maven](./create-first-function-cli-java.md) (various) | [Java](functions-reference-java.md) | Maven archetype supports Core Tools to enable development of Java functions. Version 2.x supports development on Linux, macOS, and Windows. To learn more, see [Create your first function with Java and Maven](./create-first-function-cli-java.md). Also supports development using [Eclipse](functions-create-maven-eclipse.md) and [IntelliJ IDEA](functions-create-maven-intellij.md). | [!INCLUDE [Don't mix development environments](../../includes/functions-mixed-dev-environments.md)] -Each of these local development environments lets you create function app projects and use predefined Functions templates to create new functions. Each uses the Core Tools so that you can test and debug your functions against the real Functions runtime on your own machine just as you would any other app. You can also publish your function app project from any of these environments to Azure. +Each of these local development environments lets you create function app projects and use predefined function templates to create new functions. Each uses the Core Tools so that you can test and debug your functions against the real Functions runtime on your own machine just as you would any other app. You can also publish your function app project from any of these environments to Azure. ## Local settings file @@ -61,7 +61,7 @@ These settings are supported when you run projects locally: | Setting | Description | | ------------ | -------------------------------------- | | **`IsEncrypted`** | When this setting is set to `true`, all values are encrypted with a local machine key. Used with `func settings` commands. Default value is `false`. You might want to encrypt the local.settings.json file on your local computer when it contains secrets, such as service connection strings. The host automatically decrypts settings when it runs. Use the `func settings decrypt` command before trying to read locally encrypted settings. | -| **`Values`** | Collection of application settings used when a project is running locally. These key-value (string-string) pairs correspond to application settings in your function app in Azure, like [`AzureWebJobsStorage`]. Many triggers and bindings have a property that refers to a connection string app setting, like `Connection` for the [Blob storage trigger](functions-bindings-storage-blob-trigger.md#configuration). For these properties, you need an application setting defined in the `Values` array. See the subsequent table for a list of commonly used settings.
                  Values must be strings and not JSON objects or arrays. Setting names can't include a double underline (`__`) and should not include a colon (`:`). Double underline characters are reserved by the runtime, and the colon is reserved to support [dependency injection](functions-dotnet-dependency-injection.md#working-with-options-and-settings). | +| **`Values`** | Collection of application settings used when a project is running locally. These key-value (string-string) pairs correspond to application settings in your function app in Azure, like [`AzureWebJobsStorage`]. Many triggers and bindings have a property that refers to a connection string app setting, like `Connection` for the [Blob storage trigger](functions-bindings-storage-blob-trigger.md#configuration). For these properties, you need an application setting defined in the `Values` array. See the subsequent table for a list of commonly used settings.
                  Values must be strings and not JSON objects or arrays. Setting names can't include a double underline (`__`) and shouldn't include a colon (`:`). Double underline characters are reserved by the runtime, and the colon is reserved to support [dependency injection](functions-dotnet-dependency-injection.md#working-with-options-and-settings). | | **`Host`** | Settings in this section customize the Functions host process when you run projects locally. These settings are separate from the host.json settings, which also apply when you run projects in Azure. | | **`LocalHttpPort`** | Sets the default port used when running the local Functions host (`func host start` and `func run`). The `--port` command-line option takes precedence over this setting. For example, when running in Visual Studio IDE, you may change the port number by navigating to the "Project Properties -> Debug" window and explicitly specifying the port number in a `host start --port ` command that can be supplied in the "Application Arguments" field. | | **`CORS`** | Defines the origins allowed for [cross-origin resource sharing (CORS)](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing). Origins are supplied as a comma-separated list with no spaces. The wildcard value (\*) is supported, which allows requests from any origin. | @@ -73,9 +73,9 @@ The following application settings can be included in the **`Values`** array whe | Setting | Values | Description | |-----|-----|-----| |**`AzureWebJobsStorage`**| Storage account connection string, or
                  `UseDevelopmentStorage=true`| Contains the connection string for an Azure storage account. Required when using triggers other than HTTP. For more information, see the [`AzureWebJobsStorage`] reference.
                  When you have the [Azurite Emulator](../storage/common/storage-use-azurite.md) installed locally and you set [`AzureWebJobsStorage`] to `UseDevelopmentStorage=true`, Core Tools uses the emulator. The emulator is useful during development, but you should test with an actual storage connection before deployment.| -|**`AzureWebJobs..Disabled`**| `true`\|`false` | To disable a function when running locally, add `"AzureWebJobs..Disabled": "true"` to the collection, where `` is the name of the function. To learn more, see [How to disable functions in Azure Functions](disable-function.md#localsettingsjson) | +|**`AzureWebJobs..Disabled`**| `true`\|`false` | To disable a function when running locally, add `"AzureWebJobs..Disabled": "true"` to the collection, where `` is the name of the function. To learn more, see [How to disable functions in Azure Functions](disable-function.md#localsettingsjson). | |**`FUNCTIONS_WORKER_RUNTIME`** | `dotnet`
                  `node`
                  `java`
                  `powershell`
                  `python`| Indicates the targeted language of the Functions runtime. Required for version 2.x and higher of the Functions runtime. This setting is generated for your project by Core Tools. To learn more, see the [`FUNCTIONS_WORKER_RUNTIME`](functions-app-settings.md#functions_worker_runtime) reference.| -| **`FUNCTIONS_WORKER_RUNTIME_VERSION`** | `~7` |Indicates that PowerShell 7 be used when running locally. If not set, then PowerShell Core 6 is used. This setting is only used when running locally. When running in Azure, the PowerShell runtime version is determined by the `powerShellVersion` site configuration setting, which can be [set in the portal](functions-reference-powershell.md#changing-the-powershell-version). | +| **`FUNCTIONS_WORKER_RUNTIME_VERSION`** | `~7` |Indicates to use PowerShell 7 when running locally. If not set, then PowerShell Core 6 is used. This setting is only used when running locally. The PowerShell runtime version is determined by the `powerShellVersion` site configuration setting, when it runs in Azure, which can be [set in the portal](functions-reference-powershell.md#changing-the-powershell-version). | ## Next steps diff --git a/articles/azure-functions/functions-develop-vs-code.md b/articles/azure-functions/functions-develop-vs-code.md index 5a9ced7e6c749..8f8bab3009ec4 100644 --- a/articles/azure-functions/functions-develop-vs-code.md +++ b/articles/azure-functions/functions-develop-vs-code.md @@ -3,8 +3,8 @@ title: Develop Azure Functions by using Visual Studio Code description: Learn how to develop and test Azure Functions by using the Azure Functions extension for Visual Studio Code. ms.topic: conceptual ms.devlang: csharp, java, javascript, powershell, python -ms.custom: devx-track-csharp -ms.date: 02/21/2021 +ms.custom: devdivchpfy22 +ms.date: 05/19/2022 #Customer intent: As an Azure Functions developer, I want to understand how Visual Studio Code supports Azure Functions so that I can more efficiently create, publish, and maintain my Functions projects. --- @@ -46,7 +46,7 @@ Before you install and run the [Azure Functions extension][Azure Functions exten [!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] -Other resources that you need, like an Azure storage account, are created in your subscription when you [publish by using Visual Studio Code](#publish-to-azure). +Other resources that you need, like an Azure storage account, are created in your subscription when you [publish by using Visual Studio Code](#publish-to-azure). ### Run local requirements @@ -54,45 +54,45 @@ These prerequisites are only required to [run and debug your functions locally]( # [C\#](#tab/csharp) -+ The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. +* The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. -+ The [C# extension](https://marketplace.visualstudio.com/items?itemName=ms-dotnettools.csharp) for Visual Studio Code. +* The [C# extension](https://marketplace.visualstudio.com/items?itemName=ms-dotnettools.csharp) for Visual Studio Code. -+ [.NET Core CLI tools](/dotnet/core/tools/?tabs=netcore2x). +* [.NET Core CLI tools](/dotnet/core/tools/?tabs=netcore2x). # [Java](#tab/java) -+ The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. +* The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. -+ [Debugger for Java extension](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-debug). +* [Debugger for Java extension](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-debug). -+ [Java 8](/azure/developer/java/fundamentals/java-support-on-azure) recommended. For other supported versions, see [Java versions](functions-reference-java.md#java-versions). +* [Java 8](/azure/developer/java/fundamentals/java-support-on-azure) recommended. For other supported versions, see [Java versions](functions-reference-java.md#java-versions). -+ [Maven 3 or later](https://maven.apache.org/) +* [Maven 3 or later](https://maven.apache.org/). # [JavaScript](#tab/nodejs) -+ The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. +* The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. -+ [Node.js](https://nodejs.org/), Active LTS and Maintenance LTS versions (10.14.1 recommended). Use the `node --version` command to check your version. +* [Node.js](https://nodejs.org/), Active LTS and Maintenance LTS versions (10.14.1 recommended). Use the `node --version` command to check your version. # [PowerShell](#tab/powershell) -+ The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. +* The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools include the entire Azure Functions runtime, so download and installation might take some time. -+ [PowerShell 7](/powershell/scripting/install/installing-powershell-core-on-windows) recommended. For version information, see [PowerShell versions](functions-reference-powershell.md#powershell-versions). +* [PowerShell 7](/powershell/scripting/install/installing-powershell-core-on-windows) recommended. For version information, see [PowerShell versions](functions-reference-powershell.md#powershell-versions). -+ Both [.NET Core 3.1 runtime](https://dotnet.microsoft.com/download) and [.NET Core 2.1 runtime](https://dotnet.microsoft.com/download/dotnet/2.1) +* Both [.NET Core 3.1 runtime](https://dotnet.microsoft.com/download) and [.NET Core 2.1 runtime](https://dotnet.microsoft.com/download/dotnet/2.1). -+ The [PowerShell extension for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=ms-vscode.PowerShell). +* The [PowerShell extension for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=ms-vscode.PowerShell). # [Python](#tab/python) -+ The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. +* The [Azure Functions Core Tools](functions-run-local.md#install-the-azure-functions-core-tools) version 2.x or later. The Core Tools package is downloaded and installed automatically when you start the project locally. Core Tools includes the entire Azure Functions runtime, so download and installation might take some time. -+ [Python 3.x](https://www.python.org/downloads/). For version information, see [Python versions](functions-reference-python.md#python-version) by the Azure Functions runtime. +* [Python 3.x](https://www.python.org/downloads/). For version information, see [Python versions](functions-reference-python.md#python-version) by the Azure Functions runtime. -+ [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) for Visual Studio Code. +* [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) for Visual Studio Code. --- @@ -104,21 +104,29 @@ The Functions extension lets you create a function app project, along with your 1. From **Azure: Functions**, select the **Create Function** icon: - ![Create a function](./media/functions-develop-vs-code/create-function.png) + :::image type="content" source="./media/functions-develop-vs-code/create-function.png" alt-text=" Screenshot for Create Function."::: 1. Select the folder for your function app project, and then **Select a language for your function project**. 1. Select the **HTTP trigger** function template, or you can select **Skip for now** to create a project without a function. You can always [add a function to your project](#add-a-function-to-your-project) later. - ![Choose the HTTP trigger template](./media/functions-develop-vs-code/create-function-choose-template.png) + :::image type="content" source="./media/functions-develop-vs-code/select-http-trigger.png" alt-text="Screenshot for selecting H T T P trigger."::: 1. Type **HttpExample** for the function name and select Enter, and then select **Function** authorization. This authorization level requires you to provide a [function key](functions-bindings-http-webhook-trigger.md#authorization-keys) when you call the function endpoint. - ![Select Function authorization](./media/functions-develop-vs-code/create-function-auth.png) + :::image type="content" source="./media/functions-develop-vs-code/create-function-auth.png" alt-text="Screenshot for creating function authorization."::: - A function is created in your chosen language and in the template for an HTTP-triggered function. +1. From the dropdown list, select **Add to workplace**. - ![HTTP-triggered function template in Visual Studio Code](./media/functions-develop-vs-code/new-function-full.png) + :::image type="content" source="./media/functions-develop-vs-code/add-to-workplace.png" alt-text=" Screenshot for selectIng Add to workplace."::: + +1. In **Do you trust the authors of the files in this folder?** window, select **Yes**. + + :::image type="content" source="./media/functions-develop-vs-code/select-author-file.png" alt-text="Screenshot to confirm trust in authors of the files."::: + +1. A function is created in your chosen language and in the template for an HTTP-triggered function. + + :::image type="content" source="./media/functions-develop-vs-code/new-function-created.png" alt-text="Screenshot for H T T P-triggered function template in Visual Studio Code."::: ### Generated project files @@ -139,9 +147,9 @@ Depending on your language, these other files are created: # [Java](#tab/java) -+ A pom.xml file in the root folder that defines the project and deployment parameters, including project dependencies and the [Java version](functions-reference-java.md#java-versions). The pom.xml also contains information about the Azure resources that are created during a deployment. +* A pom.xml file in the root folder that defines the project and deployment parameters, including project dependencies and the [Java version](functions-reference-java.md#java-versions). The pom.xml also contains information about the Azure resources that are created during a deployment. -+ A [Functions.java file](functions-reference-java.md#triggers-and-annotations) in your src path that implements the function. +* A [Functions.java file](functions-reference-java.md#triggers-and-annotations) in your src path that implements the function. # [JavaScript](#tab/nodejs) @@ -152,16 +160,16 @@ Depending on your language, these other files are created: # [PowerShell](#tab/powershell) * An HttpExample folder that contains the [function.json definition file](functions-reference-powershell.md#folder-structure) and the run.ps1 file, which contains the function code. - + # [Python](#tab/python) - + * A project-level requirements.txt file that lists packages required by Functions. - + * An HttpExample folder that contains the [function.json definition file](functions-reference-python.md#folder-structure) and the \_\_init\_\_.py file, which contains the function code. --- -At this point, you can [add input and output bindings](#add-input-and-output-bindings) to your function. +At this point, you can [add input and output bindings](#add-input-and-output-bindings) to your function. You can also [add a new function to your project](#add-a-function-to-your-project). ## Install binding extensions @@ -206,7 +214,7 @@ Replace `` in the example with a specific version of the package ## Add a function to your project -You can add a new function to an existing project by using one of the predefined Functions trigger templates. To add a new function trigger, select F1 to open the command palette, and then search for and run the command **Azure Functions: Create Function**. Follow the prompts to choose your trigger type and define the required attributes of the trigger. If your trigger requires an access key or connection string to connect to a service, get it ready before you create the function trigger. +You can add a new function to an existing project by using one of the predefined Functions triggers templates. To add a new function trigger, select F1 to open the command palette, and then search for and run the command **Azure Functions: Create Function**. Follow the prompts to choose your trigger type and define the required attributes of the trigger. If your trigger requires an access key or connection string to connect to a service, get it ready before you create the function trigger. The results of this action depend on your project's language: @@ -250,7 +258,7 @@ The `msg` parameter is an `ICollector` type, which represents a collection of Messages are sent to the queue when the function completes. -To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=csharp) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=csharp). +To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=csharp) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=csharp). # [Java](#tab/java) @@ -258,13 +266,13 @@ Update the function method to add the following parameter to the `Run` method de :::code language="java" source="~/functions-quickstart-java/functions-add-output-binding-storage-queue/src/main/java/com/function/Function.java" range="20-21"::: -The `msg` parameter is an `OutputBinding` type, where is `T` is a string that is written to an output binding when the function completes. The following code sets the message in the output binding: +The `msg` parameter is an `OutputBinding` type, where `T` is a string that is written to an output binding when the function completes. The following code sets the message in the output binding: :::code language="java" source="~/functions-quickstart-java/functions-add-output-binding-storage-queue/src/main/java/com/function/Function.java" range="33-34"::: This message is sent to the queue when the function completes. -To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=java) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=java). +To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=java) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=java). # [JavaScript](#tab/nodejs) @@ -276,7 +284,7 @@ In your function code, the `msg` binding is accessed from the `context`, as in t This message is sent to the queue when the function completes. -To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=javascript) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=javascript). +To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=javascript) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=javascript). # [PowerShell](#tab/powershell) @@ -286,7 +294,7 @@ To learn more, see the [Queue storage output binding reference article](function This message is sent to the queue when the function completes. -To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=powershell) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=powershell). +To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=powershell) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=powershell). # [Python](#tab/python) @@ -302,7 +310,7 @@ The following code adds string data from the request to the output queue: This message is sent to the queue when the function completes. -To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=python) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=python). +To learn more, see the [Queue storage output binding reference article](functions-bindings-storage-queue-output.md?tabs=python) documentation. To learn more in general about which bindings can be added to a function, see [Add bindings to an existing function in Azure Functions](add-bindings-existing-function.md?tabs=python). --- @@ -312,9 +320,9 @@ To learn more, see the [Queue storage output binding reference article](function Visual Studio Code lets you publish your Functions project directly to Azure. In the process, you create a function app and related resources in your Azure subscription. The function app provides an execution context for your functions. The project is packaged and deployed to the new function app in your Azure subscription. -When you publish from Visual Studio Code to a new function app in Azure, you can choose either a quick function app create path using defaults or an advanced path, where you have more control over the remote resources created. +When you publish from Visual Studio Code to a new function app in Azure, you can choose either a quick function app create path using defaults or an advanced path. This way you'll have more control over the remote resources created. -When you publish from Visual Studio Code, you take advantage of the [Zip deploy](functions-deployment-technologies.md#zip-deploy) technology. +When you publish from Visual Studio Code, you take advantage of the [Zip deploy](functions-deployment-technologies.md#zip-deploy) technology. ### Quick function app create @@ -330,7 +338,7 @@ The following steps publish your project to a new function app created with adva 1. If you're not signed in, you're prompted to **Sign in to Azure**. You can also **Create a free Azure account**. After signing in from the browser, go back to Visual Studio Code. -1. If you have multiple subscriptions, **Select a subscription** for the function app, and then select **+ Create New Function App in Azure... _Advanced_**. This _Advanced_ option gives you more control over the resources you create in Azure. +1. If you have multiple subscriptions, **Select a subscription** for the function app, and then select **+ Create New Function App in Azure... _Advanced_**. This _Advanced_ option gives you more control over the resources you create in Azure. 1. Following the prompts, provide this information: @@ -356,7 +364,7 @@ To call an HTTP-triggered function from a client, you need the URL of the functi The function URL is copied to the clipboard, along with any required keys passed by the `code` query parameter. Use an HTTP tool to submit POST requests, or a browser for GET requests to the remote function. -When getting the URL of functions in Azure, the extension uses your Azure account to automatically retrieve the keys it needs to start the function. [Learn more about function access keys](security-concepts.md#function-access-keys). Starting non-HTTP triggered functions requires using the admin key. +When the extension gets the URL of functions in Azure, it uses your Azure account to automatically retrieve the keys it needs to start the function. [Learn more about function access keys](security-concepts.md#function-access-keys). Starting non-HTTP triggered functions requires using the admin key. ## Republish project files @@ -369,29 +377,29 @@ When you set up [continuous deployment](functions-continuous-deployment.md), you ## Run functions -The Azure Functions extension lets you run individual functions, either in your project on your local development computer or in your Azure subscription. +The Azure Functions extension lets you run individual functions. You can run functions either in your project on your local development computer or in your Azure subscription. For HTTP trigger functions, the extension calls the HTTP endpoint. For other kinds of triggers, it calls administrator APIs to start the function. The message body of the request sent to the function depends on the type of trigger. When a trigger requires test data, you're prompted to enter data in a specific JSON format. -### Run functions in Azure +### Run functions in Azure. -To execute a function in Azure from Visual Studio Code. +To execute a function in Azure from Visual Studio Code. -1. In the command pallet, enter **Azure Functions: Execute function now** and choose your Azure subscription. +1. In the command pallet, enter **Azure Functions: Execute function now** and choose your Azure subscription. -1. Choose your function app in Azure from the list. If you don't see your function app, make sure you're signed in to the correct subscription. +1. Choose your function app in Azure from the list. If you don't see your function app, make sure you're signed in to the correct subscription. -1. Choose the function you want to run from the list and type the message body of the request in **Enter request body**. Press Enter to send this request message to your function. The default text in **Enter request body** should indicate the format of the body. If your function app has no functions, a notification error is shown with this error. +1. Choose the function you want to run from the list and type the message body of the request in **Enter request body**. Press Enter to send this request message to your function. The default text in **Enter request body** should indicate the format of the body. If your function app has no functions, a notification error is shown with this error. 1. When the function executes in Azure and returns a response, a notification is raised in Visual Studio Code. - + You can also run your function from the **Azure: Functions** area by right-clicking (Ctrl-clicking on Mac) the function you want to run from your function app in your Azure subscription and choosing **Execute Function Now...**. -When running functions in Azure, the extension uses your Azure account to automatically retrieve the keys it needs to start the function. [Learn more about function access keys](security-concepts.md#function-access-keys). Starting non-HTTP triggered functions requires using the admin key. +When you run your functions in Azure from Visual Studio Code, the extension uses your Azure account to automatically retrieve the keys it needs to start the function. [Learn more about function access keys](security-concepts.md#function-access-keys). Starting non-HTTP triggered functions requires using the admin key. ### Run functions locally -The local runtime is the same runtime that hosts your function app in Azure. Local settings are read from the [local.settings.json file](#local-settings). To run your Functions project locally, you must meet [additional requirements](#run-local-requirements). +The local runtime is the same runtime that hosts your function app in Azure. Local settings are read from the [local.settings.json file](#local-settings). To run your Functions project locally, you must meet [more requirements](#run-local-requirements). #### Configure the project to run locally @@ -411,17 +419,17 @@ For more information, see [Local settings file](#local-settings). #### Debug functions locally -To debug your functions, select F5. If you haven't already downloaded [Core Tools][Azure Functions Core Tools], you're prompted to do so. When Core Tools is installed and running, output is shown in the Terminal. This is the same as running the `func host start` Core Tools command from the Terminal, but with extra build tasks and an attached debugger. +To debug your functions, select F5. If you haven't already downloaded [Core Tools][Azure Functions Core Tools], you're prompted to do so. When Core Tools is installed and running, output is shown in the Terminal. This step is the same as running the `func start` Core Tools command from the Terminal, but with extra build tasks and an attached debugger. -When the project is running, you can use the **Execute Function Now...** feature of the extension to trigger your functions as you would when the project is deployed to Azure. With the project running in debug mode, breakpoints are hit in Visual Studio Code as you would expect. +When the project is running, you can use the **Execute Function Now...** feature of the extension to trigger your functions as you would when the project is deployed to Azure. With the project running in debug mode, breakpoints are hit in Visual Studio Code as you would expect. -1. In the command pallet, enter **Azure Functions: Execute function now** and choose **Local project**. +1. In the command pallet, enter **Azure Functions: Execute function now** and choose **Local project**. -1. Choose the function you want to run in your project and type the message body of the request in **Enter request body**. Press Enter to send this request message to your function. The default text in **Enter request body** should indicate the format of the body. If your function app has no functions, a notification error is shown with this error. +1. Choose the function you want to run in your project and type the message body of the request in **Enter request body**. Press Enter to send this request message to your function. The default text in **Enter request body** should indicate the format of the body. If your function app has no functions, a notification error is shown with this error. 1. When the function runs locally and after the response is received, a notification is raised in Visual Studio Code. Information about the function execution is shown in **Terminal** panel. -Running functions locally doesn't require using keys. +Running functions locally doesn't require using keys. [!INCLUDE [functions-local-settings-file](../../includes/functions-local-settings-file.md)] @@ -446,7 +454,7 @@ The settings in the local.settings.json file in your project should be the same The easiest way to publish the required settings to your function app in Azure is to use the **Upload settings** link that appears after you publish your project: -![Upload application settings](./media/functions-develop-vs-code/upload-app-settings.png) +:::image type="content" source="./media/functions-develop-vs-code/upload-app-settings.png" alt-text="Screenshot to upload application settings."::: You can also publish settings by using the **Azure Functions: Upload Local Setting** command in the command palette. You can add individual settings to application settings in Azure by using the **Azure Functions: Add New Setting** command. @@ -457,7 +465,7 @@ If the local file is encrypted, it's decrypted, published, and encrypted again. View existing app settings in the **Azure: Functions** area by expanding your subscription, your function app, and **Application Settings**. -![View function app settings in Visual Studio Code](./media/functions-develop-vs-code/view-app-settings.png) +:::image type="content" source="./media/functions-develop-vs-code/view-app-settings.png" alt-text=" Screenshot for viewing function app settings in Visual Studio Code."::: ### Download settings from Azure @@ -473,7 +481,7 @@ When you [run functions locally](#run-functions-locally), log data is streamed t When you're developing an application, it's often useful to see logging information in near-real time. You can view a stream of log files being generated by your functions. This output is an example of streaming logs for a request to an HTTP-triggered function: -![Streaming logs output for HTTP trigger](media/functions-develop-vs-code/streaming-logs-vscode-console.png) +:::image type="content" source="media/functions-develop-vs-code/streaming-logs-vscode-console.png" alt-text="Screenshot for streaming logs output for H T T P trigger."::: To learn more, see [Streaming logs](functions-monitoring.md#streaming-logs). diff --git a/articles/azure-functions/functions-develop-vs.md b/articles/azure-functions/functions-develop-vs.md index 2dfba55fdbac0..0b1075c3e8ac9 100644 --- a/articles/azure-functions/functions-develop-vs.md +++ b/articles/azure-functions/functions-develop-vs.md @@ -1,10 +1,10 @@ --- title: Develop Azure Functions using Visual Studio -description: Learn how to develop and test Azure Functions by using Azure Functions Tools for Visual Studio 2019. +description: Learn how to develop and test Azure Functions by using Azure Functions Tools for Visual Studio 2022. ms.devlang: csharp -ms.custom: "vs-azure, devx-track-csharp" +ms.custom: devdivchpfy22 ms.topic: conceptual -ms.date: 12/10/2020 +ms.date: 05/19/2022 --- # Develop Azure Functions using Visual Studio @@ -21,46 +21,16 @@ Visual Studio provides the following benefits when you develop your functions: This article provides details about how to use Visual Studio to develop C# class library functions and publish them to Azure. Before you read this article, consider completing the [Functions quickstart for Visual Studio](functions-create-your-first-function-visual-studio.md). -Unless otherwise noted, procedures and examples shown are for Visual Studio 2019. +Unless otherwise noted, procedures and examples shown are for Visual Studio 2022. ## Prerequisites -- Azure Functions Tools. To add Azure Function Tools, include the **Azure development** workload in your Visual Studio installation. Azure Functions Tools is available in the Azure development workload starting with Visual Studio 2017. +- Azure Functions Tools. To add Azure Function Tools, include the **Azure development** workload in your Visual Studio installation. If you are using Visual Studio 2017, you may need to [follow some additional installation steps](#azure-functions-tools-with-visual-studio-2017). - Other resources that you need, such as an Azure Storage account, are created in your subscription during the publishing process. - [!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] -> [!NOTE] -> In Visual Studio 2017, the Azure development workload installs Azure Functions Tools as a separate extension. When you update your Visual Studio 2017 installation, make sure that you're using the [most recent version](#check-your-tools-version) of the Azure Functions tools. The following sections show you how to check and (if needed) update your Azure Functions Tools extension in Visual Studio 2017. -> -> Skip these sections if you're using Visual Studio 2019. - -### Check your tools version in Visual Studio 2017 - -1. From the **Tools** menu, choose **Extensions and Updates**. Expand **Installed** > **Tools**, and then choose **Azure Functions and Web Jobs Tools**. - - ![Verify the Functions tools version](./media/functions-develop-vs/functions-vstools-check-functions-tools.png) - -1. Note the installed **Version** and compare this version with the latest version listed in the [release notes](https://github.com/Azure/Azure-Functions/blob/master/VS-AzureTools-ReleaseNotes.md). - -1. If your version is older, update your tools in Visual Studio as shown in the following section. - -### Update your tools in Visual Studio 2017 - -1. In the **Extensions and Updates** dialog, expand **Updates** > **Visual Studio Marketplace**, choose **Azure Functions and Web Jobs Tools** and select **Update**. - - ![Update the Functions tools version](./media/functions-develop-vs/functions-vstools-update-functions-tools.png) - -1. After the tools update is downloaded, select **Close**, and then close Visual Studio to trigger the tools update with VSIX Installer. - -1. In VSIX Installer, choose **Modify** to update the tools. - -1. After the update is complete, choose **Close**, and then restart Visual Studio. - -> [!NOTE] -> In Visual Studio 2019 and later, the Azure Functions tools extension is updated as part of Visual Studio. - ## Create an Azure Functions project [!INCLUDE [Create a project using the Azure Functions](../../includes/functions-vstools-create.md)] @@ -72,7 +42,7 @@ After you create an Azure Functions project, the project template creates a C# p * **local.settings.json**: Maintains settings used when running functions locally. These settings aren't used when running in Azure. For more information, see [Local settings file](#local-settings). >[!IMPORTANT] - >Because the local.settings.json file can contain secrets, you must exclude it from your project source control. Ensure the **Copy to Output Directory** setting for this file is set to **Copy if newer**. + >Because the local.settings.json file can contain secrets, you must exclude it from your project source control. Make sure the **Copy to Output Directory** setting for this file is set to **Copy if newer**. For more information, see [Functions class library project](functions-dotnet-class-library.md#functions-class-library-project). @@ -88,9 +58,9 @@ The Functions runtime uses an Azure Storage account internally. For all trigger To set the storage account connection string: -1. In Visual Studio, select **View** > **Cloud Explorer**. +1. In the Azure portal, navigate to your storage account. -2. In **Cloud Explorer**, expand **Storage Accounts**, and then select your storage account. In the **Properties** tab, copy the **Primary Connection String** value. +2. In the **Access keys** tab, below **Security + networking**, copy the **Connection string** of **key1**. 2. In your project, open the local.settings.json file and set the value of the `AzureWebJobsStorage` key to the connection string you copied. @@ -104,11 +74,13 @@ In C# class library functions, the bindings used by the function are defined by 2. Select **Azure Function**, enter a **Name** for the class, and then select **Add**. -3. Choose your trigger, set the binding properties, and then select **OK**. The following example shows the settings for creating a Queue storage trigger function. +3. Choose your trigger, set the binding properties, and then select **Add**. The following example shows the settings for creating a Queue storage trigger function. ![Create a Queue storage trigger function](./media/functions-develop-vs/functions-vstools-create-queuetrigger.png) - This trigger example uses a connection string with a key named `QueueStorage`. Define this connection string setting in the [local.settings.json file](functions-develop-local.md#local-settings-file). + You will then be prompted to choose between two Azure storage emulators or referencing a provisioned Azure storage account. + + This trigger example uses a connection string with a key named `QueueStorage`. This key, stored in the [local.settings.json file](functions-develop-local.md#local-settings-file), either references the Azure storage emulators or an Azure storage account. 4. Examine the newly added class. You see a static `Run()` method that's attributed with the `FunctionName` attribute. This attribute indicates that the method is the entry point for the function. @@ -191,7 +163,7 @@ For a full list of the bindings supported by Functions, see [Supported bindings] ## Run functions locally -Azure Functions Core Tools lets you run Azure Functions project on your local development computer. When you press F5 to debug a Functions project the local Functions host (func.exe) is started listening on a local port (usually 7071). Any callable function endpoints are written to the output, and you can use these for testing your functions. For more information, see [Work with Azure Functions Core Tools](functions-run-local.md). You're prompted to install these tools the first time you start a function from Visual Studio. +Azure Functions Core Tools lets you run Azure Functions project on your local development computer. When you press F5 to debug a Functions project, the local Functions host (func.exe) starts to listen on a local port (usually 7071). Any callable function endpoints are written to the output, and you can use these for testing your functions. For more information, see [Work with Azure Functions Core Tools](functions-run-local.md). You're prompted to install these tools the first time you start a function from Visual Studio. To start your function in Visual Studio in debug mode: @@ -205,7 +177,7 @@ For a more detailed testing scenario using Visual Studio, see [Testing functions ## Publish to Azure -When you publish from Visual Studio, it uses one of two deployment methods: +When you publish from Visual Studio, it uses one of the two deployment methods: * [Web Deploy](functions-deployment-technologies.md#web-deploy-msdeploy): Packages and deploys Windows apps to any IIS server. * [Zip Deploy with run-From-package enabled](functions-deployment-technologies.md#zip-deploy): Recommended for Azure Functions deployments. @@ -216,9 +188,9 @@ Use the following steps to publish your project to a function app in Azure. ## Function app settings -Because Visual Studio doesn't upload these settings automatically when you publish the project, any settings you add in the local.settings.json you must also add to the function app in Azure. +Visual Studio doesn't upload these settings automatically when you publish the project. Any settings you add in the local.settings.json you must also add to the function app in Azure. -The easiest way to upload the required settings to your function app in Azure is to select the **Manage Azure App Service settings** link that appears after you successfully publish your project. +The easiest way to upload the required settings to your function app in Azure is to expand the three dots next to the **Hosting** section and select the **Manage Azure App Service settings** link that appears after you successfully publish your project. :::image type="content" source="./media/functions-develop-vs/functions-vstools-app-settings.png" alt-text="Settings in Publish window"::: @@ -245,7 +217,7 @@ To learn more about monitoring using Application Insights, see [Monitor Azure Fu ## Testing functions -This section describes how to create a C# function app project in Visual Studio and run and tests with [xUnit](https://github.com/xunit/xunit). +This section describes how to create a C# function app project in Visual Studio and to run and test with [xUnit](https://github.com/xunit/xunit). ![Testing Azure Functions with C# in Visual Studio](./media/functions-test-a-function/azure-functions-test-visual-studio-xunit.png) @@ -256,7 +228,7 @@ To set up your environment, create a function and test the app. The following st 1. [Create a new Functions app](functions-get-started.md) and name it **Functions** 2. [Create an HTTP function from the template](functions-get-started.md) and name it **MyHttpTrigger**. 3. [Create a timer function from the template](functions-create-scheduled-function.md) and name it **MyTimerTrigger**. -4. [Create an xUnit Test app](https://xunit.net/docs/getting-started/netcore/cmdline) in the solution and name it **Functions.Tests**. +4. [Create an xUnit Test app](https://xunit.net/docs/getting-started/netcore/cmdline) in the solution and name it **Functions.Tests**. Remove the default test files. 5. Use NuGet to add a reference from the test app to [Microsoft.AspNetCore.Mvc](https://www.nuget.org/packages/Microsoft.AspNetCore.Mvc/) 6. [Reference the *Functions* app](/visualstudio/ide/managing-references-in-a-project) from *Functions.Tests* app. @@ -266,7 +238,7 @@ Now that the projects are created, you can create the classes used to run the au Each function takes an instance of [ILogger](/dotnet/api/microsoft.extensions.logging.ilogger) to handle message logging. Some tests either don't log messages or have no concern for how logging is implemented. Other tests need to evaluate messages logged to determine whether a test is passing. -You'll create a new class named `ListLogger` which holds an internal list of messages to evaluate during a testing. To implement the required `ILogger` interface, the class needs a scope. The following class mocks a scope for the test cases to pass to the `ListLogger` class. +You'll create a new class named `ListLogger`, which holds an internal list of messages to evaluate during testing. To implement the required `ILogger` interface, the class needs a scope. The following class mocks a scope for the test cases to pass to the `ListLogger` class. Create a new class in *Functions.Tests* project named **NullScope.cs** and enter the following code: @@ -452,7 +424,7 @@ namespace Functions.Tests public void Timer_should_log_message() { var logger = (ListLogger)TestFactory.CreateLogger(LoggerTypes.List); - MyTimerTrigger.Run(null, logger); + new MyTimerTrigger().Run(null, logger); var msg = logger.Logs[0]; Assert.Contains("C# Timer trigger function executed at", msg); } @@ -466,23 +438,50 @@ The members implemented in this class are: - **Http_trigger_should_return_string_from_member_data**: This test uses xUnit attributes to provide sample data to the HTTP function. -- **Timer_should_log_message**: This test creates an instance of `ListLogger` and passes it to a timer functions. Once the function is run, then the log is checked to ensure the expected message is present. +- **Timer_should_log_message**: This test creates an instance of `ListLogger` and passes it to a timer function. Once the function is run, then the log is checked to make sure the expected message is present. If you want to access application settings in your tests, you can [inject](functions-dotnet-dependency-injection.md) an `IConfiguration` instance with mocked environment variable values into your function. ### Run tests -To run the tests, navigate to the **Test Explorer** and click **Run all**. +To run the tests, navigate to the **Test Explorer** and select **Run All Tests in View**. ![Testing Azure Functions with C# in Visual Studio](./media/functions-test-a-function/azure-functions-test-visual-studio-xunit.png) ### Debug tests -To debug the tests, set a breakpoint on a test, navigate to the **Test Explorer** and click **Run > Debug Last Run**. +To debug the tests, set a breakpoint on a test, navigate to the **Test Explorer** and select **Run > Debug Last Run**. + +## Azure Functions tools with Visual Studio 2017 + +Azure Functions Tools is available in the Azure development workload starting with Visual Studio 2017. In Visual Studio 2017, the Azure development workload installs Azure Functions Tools as a separate extension. In Visual Studio 2019 and later, the Azure Functions tools extension is updated as part of Visual Studio. + +When you update your Visual Studio 2017 installation, make sure that you're using the [most recent version](#check-your-tools-version) of the Azure Functions Tools. The following sections show you how to check and (if needed) update your Azure Functions Tools extension in Visual Studio 2017. +### Check your tools version in Visual Studio 2017 + +1. From the **Tools** menu, choose **Extensions and Updates**. Expand **Installed** > **Tools**, and then choose **Azure Functions and Web Jobs Tools**. + + ![Verify the Functions tools version](./media/functions-develop-vs/functions-vstools-check-functions-tools.png) + +1. Note the installed **Version** and compare this version with the latest version listed in the [release notes](https://github.com/Azure/Azure-Functions/blob/master/VS-AzureTools-ReleaseNotes.md). + +1. If your version is older, update your tools in Visual Studio as shown in the following section. + +### Update your tools in Visual Studio 2017 + +1. In the **Extensions and Updates** dialog, expand **Updates** > **Visual Studio Marketplace**, choose **Azure Functions and Web Jobs Tools** and select **Update**. + + ![Update the Functions tools version](./media/functions-develop-vs/functions-vstools-update-functions-tools.png) + +1. After the tools update is downloaded, select **Close**, and then close Visual Studio to trigger the tools update with VSIX Installer. + +1. In VSIX Installer, choose **Modify** to update the tools. + +1. After the update is complete, choose **Close**, and then restart Visual Studio. ## Next steps For more information about the Azure Functions Core Tools, see [Work with Azure Functions Core Tools](functions-run-local.md). -For more information about developing functions as .NET class libraries, see [Azure Functions C# developer reference](functions-dotnet-class-library.md). This article also links to examples of how to use attributes to declare the various types of bindings supported by Azure Functions. +For more information about developing functions as .NET class libraries, see [Azure Functions C# developer reference](functions-dotnet-class-library.md). This article also links to examples on how to use attributes to declare the various types of bindings supported by Azure Functions. diff --git a/articles/azure-functions/functions-event-hub-cosmos-db.md b/articles/azure-functions/functions-event-hub-cosmos-db.md index 20e25a71ad82b..fc8f36592c54e 100644 --- a/articles/azure-functions/functions-event-hub-cosmos-db.md +++ b/articles/azure-functions/functions-event-hub-cosmos-db.md @@ -214,7 +214,7 @@ az functionapp create \ --storage-account $STORAGE_ACCOUNT \ --consumption-plan-location $LOCATION \ --runtime java \ - --functions-version 2 + --functions-version 3 ``` # [Cmd](#tab/cmd) @@ -230,7 +230,7 @@ az functionapp create ^ --storage-account %STORAGE_ACCOUNT% ^ --consumption-plan-location %LOCATION% ^ --runtime java ^ - --functions-version 2 + --functions-version 3 ``` --- diff --git a/articles/azure-functions/functions-overview.md b/articles/azure-functions/functions-overview.md index d03ea3c61a98f..855ed1a6265be 100644 --- a/articles/azure-functions/functions-overview.md +++ b/articles/azure-functions/functions-overview.md @@ -4,9 +4,9 @@ description: Learn how Azure Functions can help build robust serverless apps. author: craigshoemaker ms.assetid: 01d6ca9f-ca3f-44fa-b0b9-7ffee115acd4 ms.topic: overview -ms.date: 11/20/2020 +ms.date: 05/27/2022 ms.author: cshoe -ms.custom: contperf-fy21q2 +ms.custom: contperf-fy21q2, devdivchpfy22 --- # Introduction to Azure Functions diff --git a/articles/azure-functions/functions-premium-plan.md b/articles/azure-functions/functions-premium-plan.md index 81f2057f9edb1..ac8c6063794ed 100644 --- a/articles/azure-functions/functions-premium-plan.md +++ b/articles/azure-functions/functions-premium-plan.md @@ -12,8 +12,7 @@ ms.custom: references_regions, fasttrack-edit, devx-track-azurecli The Azure Functions Elastic Premium plan is a dynamic scale hosting option for function apps. For other hosting plan options, see the [hosting plan article](functions-scale.md). ->[!IMPORTANT] ->Azure Functions runs on the Azure App Service platform. In the App Service platform, plans that host Premium plan function apps are referred to as *Elastic* Premium plans, with SKU names like `EP1`. If you choose to run your function app on a Premium plan, make sure to create a plan with an SKU name that starts with "E", such as `EP1`. App Service plan SKU names that start with "P", such as `P1V2` (Premium V2 Small plan), are actually [Dedicated hosting plans](dedicated-plan.md). Because they are Dedicated and not Elastic Premium, plans with SKU names starting with "P" won't scale dynamically and may increase your costs. +[!INCLUDE [functions-premium-plan-note](../../includes/functions-premium-plan-note.md)] Premium plan hosting provides the following benefits to your functions: diff --git a/articles/azure-functions/functions-reference-node.md b/articles/azure-functions/functions-reference-node.md index f6e382a97f8c8..c4f97fc37caa2 100644 --- a/articles/azure-functions/functions-reference-node.md +++ b/articles/azure-functions/functions-reference-node.md @@ -527,7 +527,7 @@ The following table shows current supported Node.js versions for each major vers | Functions version | Node version (Windows) | Node Version (Linux) | |---|---| --- | -| 4.x (recommended) | `~16` (preview)
                  `~14` (recommended) | `node|16` (preview)
                  `node|14` (recommended) | +| 4.x (recommended) | `~16`
                  `~14` | `node|16`
                  `node|14` | | 3.x | `~14`
                  `~12`
                  `~10` | `node|14`
                  `node|12`
                  `node|10` | | 2.x | `~12`
                  `~10`
                  `~8` | `node|10`
                  `node|8` | | 1.x | 6.11.2 (locked by the runtime) | n/a | diff --git a/articles/azure-functions/functions-reference-python.md b/articles/azure-functions/functions-reference-python.md index e5903afad5ebf..92d4478523c37 100644 --- a/articles/azure-functions/functions-reference-python.md +++ b/articles/azure-functions/functions-reference-python.md @@ -2,9 +2,9 @@ title: Python developer reference for Azure Functions description: Understand how to develop functions with Python ms.topic: article -ms.date: 11/4/2020 +ms.date: 05/25/2022 ms.devlang: python -ms.custom: devx-track-python +ms.custom: devx-track-python, devdivchpfy22 --- # Azure Functions Python developer guide @@ -15,7 +15,7 @@ As a Python developer, you may also be interested in one of the following articl | Getting started | Concepts| Scenarios/Samples | |--|--|--| -|
                  • [Python function using Visual Studio Code](./create-first-function-vs-code-python.md)
                  • [Python function with terminal/command prompt](./create-first-function-cli-python.md)
                  |
                  • [Developer guide](functions-reference.md)
                  • [Hosting options](functions-scale.md)
                  • [Performance considerations](functions-best-practices.md)
                  |
                  • [Image classification with PyTorch](machine-learning-pytorch.md)
                  • [Azure automation sample](/samples/azure-samples/azure-functions-python-list-resource-groups/azure-functions-python-sample-list-resource-groups/)
                  • [Machine learning with TensorFlow](functions-machine-learning-tensorflow.md)
                  • [Browse Python samples](/samples/browse/?products=azure-functions&languages=python)
                  | +|
                  • [Python function using Visual Studio Code](./create-first-function-vs-code-python.md)
                  • [Python function with terminal/command prompt](./create-first-function-cli-python.md)
                  |
                  • [Developer guide](functions-reference.md)
                  • [Hosting options](functions-scale.md)
                  • [Performance considerations](functions-best-practices.md)
                  |
                  • [Image classification with PyTorch](machine-learning-pytorch.md)
                  • [Azure Automation sample](/samples/azure-samples/azure-functions-python-list-resource-groups/azure-functions-python-sample-list-resource-groups/)
                  • [Machine learning with TensorFlow](functions-machine-learning-tensorflow.md)
                  • [Browse Python samples](/samples/browse/?products=azure-functions&languages=python)
                  | > [!NOTE] > While you can [develop your Python based Azure Functions locally on Windows](create-first-function-vs-code-python.md#run-the-function-locally), Python is only supported on a Linux based hosting plan when running in Azure. See the list of supported [operating system/runtime](functions-scale.md#operating-systemruntime) combinations. @@ -36,7 +36,7 @@ def main(req): return f'Hello, {user}!' ``` -You can also explicitly declare the attribute types and return type in the function using Python type annotations. This helps you use the intellisense and autocomplete features provided by many Python code editors. +You can also explicitly declare the attribute types and return type in the function using Python type annotations. This action helps you to use the IntelliSense and autocomplete features provided by many Python code editors. ```python import azure.functions @@ -103,7 +103,7 @@ The main project folder () can contain the following files: Each function has its own code file and binding configuration file (function.json). -When deploying your project to a function app in Azure, the entire contents of the main project (**) folder should be included in the package, but not the folder itself, which means `host.json` should be in the package root. We recommend that you maintain your tests in a folder along with other functions, in this example `tests/`. For more information, see [Unit Testing](#unit-testing). +When you deploy your project to a function app in Azure, the entire contents of the main project (**) folder should be included in the package, but not the folder itself, which means `host.json` should be in the package root. We recommend that you maintain your tests in a folder along with other functions, in this example `tests/`. For more information, see [Unit Testing](#unit-testing). ## Import behavior @@ -124,7 +124,7 @@ from . import example #(relative) > [!NOTE] > The *shared_code/* folder needs to contain an \_\_init\_\_.py file to mark it as a Python package when using absolute import syntax. -The following \_\_app\_\_ import and beyond top-level relative import are deprecated, since it is not supported by static type checker and not supported by Python test frameworks: +The following \_\_app\_\_ import and beyond top-level relative import are deprecated, since it isn't supported by static type checker and not supported by Python test frameworks: ```python from __app__.shared_code import my_first_helper_function #(deprecated __app__ import) @@ -263,7 +263,7 @@ To learn more about logging, see [Monitor Azure Functions](functions-monitoring. ### Log custom telemetry -By default, the Functions runtime collects logs and other telemetry data generated by your functions. This telemetry ends up as traces in Application Insights. Request and dependency telemetry for certain Azure services are also collected by default by [triggers and bindings](functions-triggers-bindings.md#supported-bindings). To collect custom request and custom dependency telemetry outside of bindings, you can use the [OpenCensus Python Extensions](https://github.com/census-ecosystem/opencensus-python-extensions-azure), which sends custom telemetry data to your Application Insights instance. You can find a list of supported extensions at the [OpenCensus repository](https://github.com/census-instrumentation/opencensus-python/tree/master/contrib). +By default, the Functions runtime collects logs and other telemetry data generated by your functions. This telemetry ends up as traces in Application Insights. Request and dependency telemetry for certain Azure services are also collected by default by [triggers and bindings](functions-triggers-bindings.md#supported-bindings). To collect custom request and custom dependency telemetry outside of bindings, you can use the [OpenCensus Python Extensions](https://github.com/census-ecosystem/opencensus-python-extensions-azure). This extension sends custom telemetry data to your Application Insights instance. You can find a list of supported extensions at the [OpenCensus repository](https://github.com/census-instrumentation/opencensus-python/tree/master/contrib). >[!NOTE] >To use the OpenCensus Python extensions, you need to enable [Python worker extensions](#python-worker-extensions) in your function app by setting `PYTHON_ENABLE_WORKER_EXTENSIONS` to `1`. You also need to switch to using the Application Insights connection string by adding the [`APPLICATIONINSIGHTS_CONNECTION_STRING`](functions-app-settings.md#applicationinsights_connection_string) setting to your [application settings](functions-how-to-use-azure-function-app-settings.md#settings), if it's not already there. @@ -345,7 +345,7 @@ Likewise, you can set the `status_code` and `headers` for the response message i ## Web frameworks -You can leverage WSGI and ASGI-compatible frameworks such as Flask and FastAPI with your HTTP-triggered Python functions. This section shows how to modify your functions to support these frameworks. +You can use WSGI and ASGI-compatible frameworks such as Flask and FastAPI with your HTTP-triggered Python functions. This section shows how to modify your functions to support these frameworks. First, the function.json file must be updated to include a `route` in the HTTP trigger, as shown in the following example: @@ -404,7 +404,7 @@ The host.json file must also be updated to include an HTTP `routePrefix`, as sho } ``` -Update the Python code file `init.py`, depending on the interface used by your framework. The following example shows either an ASGI hander approach or a WSGI wrapper approach for Flask: +Update the Python code file `init.py`, depending on the interface used by your framework. The following example shows either an ASGI handler approach or a WSGI wrapper approach for Flask: # [ASGI](#tab/asgi) @@ -470,14 +470,14 @@ Name of the function. ID of the current function invocation. `trace_context` -Context for distributed tracing. Please refer to [`Trace Context`](https://www.w3.org/TR/trace-context/) for more information.. +Context for distributed tracing. For more information, see [`Trace Context`](https://www.w3.org/TR/trace-context/). `retry_context` -Context for retries to the function. Please refer to [`retry-policies`](./functions-bindings-errors.md#retry-policies-preview) for more information. +Context for retries to the function. For more information, see [`retry-policies`](./functions-bindings-errors.md#retry-policies-preview). ## Global variables -It is not guaranteed that the state of your app will be preserved for future executions. However, the Azure Functions runtime often reuses the same process for multiple executions of the same app. In order to cache the results of an expensive computation, declare it as a global variable. +It isn't guaranteed that the state of your app will be preserved for future executions. However, the Azure Functions runtime often reuses the same process for multiple executions of the same app. In order to cache the results of an expensive computation, declare it as a global variable. ```python CACHED_DATA = None @@ -528,21 +528,19 @@ Azure Functions supports the following Python versions: | 3.x | 3.9
                  3.8
                  3.7
                  3.6 | | 2.x | 3.7
                  3.6 | -*Official CPython distributions +*Official Python distributions To request a specific Python version when you create your function app in Azure, use the `--runtime-version` option of the [`az functionapp create`](/cli/azure/functionapp#az-functionapp-create) command. The Functions runtime version is set by the `--functions-version` option. The Python version is set when the function app is created and can't be changed. -When running locally, the runtime uses the available Python version. +The runtime uses the available Python version, when you run it locally. ### Changing Python version -To set a Python function app to a specific language version, you need to specify the language as well as the version of the language in `LinuxFxVersion` field in site config. For example, to change Python app to use Python 3.8, set `linuxFxVersion` to `python|3.8`. - -To learn more about Azure Functions runtime support policy, please refer to this [article](./language-support-policy.md) - -To see the full list of supported Python versions functions apps, please refer to this [article](./supported-languages.md) +To set a Python function app to a specific language version, you need to specify the language and the version of the language in `LinuxFxVersion` field in site config. For example, to change Python app to use Python 3.8, set `linuxFxVersion` to `python|3.8`. +To learn more about Azure Functions runtime support policy, refer to this [article](./language-support-policy.md) +To see the full list of supported Python versions functions apps, refer to this [article](./supported-languages.md) # [Azure CLI](#tab/azurecli-linux) @@ -581,7 +579,7 @@ az functionapp config set --name \ --linux-fx-version ``` -Replace `` with the name of your function app. Also replace `` with the name of the resource group for your function app. Also, replace `` with the Python version you want to use, prefixed by `python|` e.g. `python|3.9` +Replace `` with the name of your function app. Also replace `` with the name of the resource group for your function app. Also, replace `` with the Python version you want to use, prefixed by `python|` for example, `python|3.9`. You can run this command from the [Azure Cloud Shell](../cloud-shell/overview.md) by choosing **Try it** in the preceding code sample. You can also use the [Azure CLI locally](/cli/azure/install-azure-cli) to execute this command after executing [az login](/cli/azure/reference-index#az-login) to sign in. @@ -606,9 +604,9 @@ pip install -r requirements.txt ## Publishing to Azure -When you're ready to publish, make sure that all your publicly available dependencies are listed in the requirements.txt file, which is located at the root of your project directory. +When you're ready to publish, make sure that all your publicly available dependencies are listed in the requirements.txt file. You can locate this file at the root of your project directory. -Project files and folders that are excluded from publishing, including the virtual environment folder, are listed in the .funcignore file. +Project files and folders that are excluded from publishing, including the virtual environment folder, you can find them in the root directory of your project. There are three build actions supported for publishing your Python project to Azure: remote build, local build, and builds using custom dependencies. @@ -616,7 +614,7 @@ You can also use Azure Pipelines to build your dependencies and publish using co ### Remote build -When using remote build, dependencies restored on the server and native dependencies match the production environment. This results in a smaller deployment package to upload. Use remote build when developing Python apps on Windows. If your project has custom dependencies, you can [use remote build with extra index URL](#remote-build-with-extra-index-url). +When you use remote build, dependencies restored on the server and native dependencies match the production environment. This results in a smaller deployment package to upload. Use remote build when developing Python apps on Windows. If your project has custom dependencies, you can [use remote build with extra index URL](#remote-build-with-extra-index-url). Dependencies are obtained remotely based on the contents of the requirements.txt file. [Remote build](functions-deployment-technologies.md#remote-build) is the recommended build method. By default, the Azure Functions Core Tools requests a remote build when you use the following [`func azure functionapp publish`](functions-run-local.md#publish) command to publish your Python project to Azure. @@ -638,7 +636,7 @@ func azure functionapp publish --build local Remember to replace `` with the name of your function app in Azure. -Using the `--build local` option, project dependencies are read from the requirements.txt file and those dependent packages are downloaded and installed locally. Project files and dependencies are deployed from your local computer to Azure. This results in a larger deployment package being uploaded to Azure. If for some reason, dependencies in your requirements.txt file can't be acquired by Core Tools, you must use the custom dependencies option for publishing. +When you use the `--build local` option, project dependencies are read from the requirements.txt file and those dependent packages are downloaded and installed locally. Project files and dependencies are deployed from your local computer to Azure. This results in a larger deployment package being uploaded to Azure. If for some reason, you can't get requirements.txt file by Core Tools, you must use the custom dependencies option for publishing. We don't recommend using local builds when developing locally on Windows. @@ -660,7 +658,7 @@ If your project uses packages not publicly available to our tools, you can make pip install --target="/.python_packages/lib/site-packages" -r requirements.txt ``` -When using custom dependencies, you should use the `--no-build` publishing option, since you have already installed the dependencies into the project folder. +When using custom dependencies, you should use the `--no-build` publishing option, since you've already installed the dependencies into the project folder. ```command func azure functionapp publish --no-build @@ -670,7 +668,7 @@ Remember to replace `` with the name of your function app in Azure. ## Unit Testing -Functions written in Python can be tested like other Python code using standard testing frameworks. For most bindings, it's possible to create a mock input object by creating an instance of an appropriate class from the `azure.functions` package. Since the [`azure.functions`](https://pypi.org/project/azure-functions/) package is not immediately available, be sure to install it via your `requirements.txt` file as described in the [package management](#package-management) section above. +Functions written in Python can be tested like other Python code using standard testing frameworks. For most bindings, it's possible to create a mock input object by creating an instance of an appropriate class from the `azure.functions` package. Since the [`azure.functions`](https://pypi.org/project/azure-functions/) package isn't immediately available, be sure to install it via your `requirements.txt` file as described in the [package management](#package-management) section above. Take *my_second_function* as an example, following is a mock test of an HTTP triggered function: @@ -788,15 +786,15 @@ from os import listdir filesDirListInTemp = listdir(tempFilePath) ``` -We recommend that you maintain your tests in a folder separate from the project folder. This keeps you from deploying test code with your app. +We recommend that you maintain your tests in a folder separate from the project folder. This action keeps you from deploying test code with your app. ## Preinstalled libraries -There are a few libraries come with the Python Functions runtime. +There are a few libraries that come with the Python Functions runtime. ### Python Standard Library -The Python Standard Library contains a list of built-in Python modules that are shipped with each Python distribution. Most of these libraries help you access system functionality, like file I/O. On Windows systems, these libraries are installed with Python. On the Unix-based systems, they are provided by package collections. +The Python Standard Library contains a list of built-in Python modules that are shipped with each Python distribution. Most of these libraries help you access system functionality, like file I/O. On Windows systems, these libraries are installed with Python. On the Unix-based systems, they're provided by package collections. To view the full details of the list of these libraries, see the links below: @@ -849,7 +847,7 @@ Extensions are imported in your function code much like a standard Python librar Review the information for a given extension to learn more about the scope in which the extension runs. -Extensions implement a Python worker extension interface that lets the Python worker process call into the extension code during the function execution lifecycle. To learn more, see [Creating extensions](#creating-extensions). +Extensions implement a Python worker extension interface. This action lets the Python worker process call into the extension code during the function execution lifecycle. To learn more, see [Creating extensions](#creating-extensions). ### Using extensions @@ -858,7 +856,7 @@ You can use a Python worker extension library in your Python functions by follow 1. Add the extension package in the requirements.txt file for your project. 1. Install the library into your app. 1. Add the application setting `PYTHON_ENABLE_WORKER_EXTENSIONS`: - + Locally: add `"PYTHON_ENABLE_WORKER_EXTENSIONS": "1"` in the `Values` section of your [local.settings.json file](functions-develop-local.md#local-settings-file) + + Locally: add `"PYTHON_ENABLE_WORKER_EXTENSIONS": "1"` in the `Values` section of your [local.settings.json file](functions-develop-local.md#local-settings-file). + Azure: add `PYTHON_ENABLE_WORKER_EXTENSIONS=1` to your [app settings](functions-how-to-use-azure-function-app-settings.md#settings). 1. Import the extension module into your function trigger. 1. Configure the extension instance, if needed. Configuration requirements should be called-out in the extension's documentation. @@ -893,6 +891,7 @@ function-level-extension==1.0.0 ``` ```python + # /Trigger/__init__.py from function_level_extension import FuncExtension @@ -905,7 +904,7 @@ def main(req, context): ### Creating extensions -Extensions are created by third-party library developers who have created functionality that can be integrated into Azure Functions. An extension developer designs, implements, and releases Python packages that contain custom logic designed specifically to be run in the context of function execution. These extensions can be published either to the PyPI registry or to GitHub repositories. +Extensions are created by third-party library developers who have created functionality that can be integrated into Azure Functions. An extension developer design, implements, and releases Python packages that contain custom logic designed specifically to be run in the context of function execution. These extensions can be published either to the PyPI registry or to GitHub repositories. To learn how to create, package, publish, and consume a Python worker extension package, see [Develop Python worker extensions for Azure Functions](develop-python-worker-extensions.md). @@ -948,18 +947,18 @@ By default, a host instance for Python can process only one function invocation ## Shared memory (preview) -To improve throughput, Functions lets your out-of-process Python language worker share memory with the Functions host process. When your function app is hitting bottlenecks, you can enable shared memory by adding an application setting named [FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED](functions-app-settings.md#functions_worker_shared_memory_data_transfer_enabled) with a value of `1`. With shared memory enabled, you can then use the [DOCKER_SHM_SIZE](functions-app-settings.md#docker_shm_size) setting to set the shared memory to something like `268435456`, which is equivalent to 256 MB. +To improve throughput, Functions let your out-of-process Python language worker share memory with the Functions host process. When your function app is hitting bottlenecks, you can enable shared memory by adding an application setting named [FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED](functions-app-settings.md#functions_worker_shared_memory_data_transfer_enabled) with a value of `1`. With shared memory enabled, you can then use the [DOCKER_SHM_SIZE](functions-app-settings.md#docker_shm_size) setting to set the shared memory to something like `268435456`, which is equivalent to 256 MB. For example, you might enable shared memory to reduce bottlenecks when using Blob storage bindings to transfer payloads larger than 1 MB. This functionality is available only for function apps running in Premium and Dedicated (App Service) plans. To learn more, see [Shared memory](https://github.com/Azure/azure-functions-python-worker/wiki/Shared-Memory). - + ## Known issues and FAQ Following is a list of troubleshooting guides for common issues: * [ModuleNotFoundError and ImportError](recover-python-functions.md#troubleshoot-modulenotfounderror) -* [Cannot import 'cygrpc'](recover-python-functions.md#troubleshoot-cannot-import-cygrpc) +* [Can't import 'cygrpc'](recover-python-functions.md#troubleshoot-cannot-import-cygrpc) All known issues and feature requests are tracked using [GitHub issues](https://github.com/Azure/azure-functions-python-worker/issues) list. If you run into a problem and can't find the issue in GitHub, open a new issue and include a detailed description of the problem. diff --git a/articles/azure-functions/functions-run-local.md b/articles/azure-functions/functions-run-local.md index 66638aafd95e1..b11afb7d857b6 100644 --- a/articles/azure-functions/functions-run-local.md +++ b/articles/azure-functions/functions-run-local.md @@ -346,7 +346,7 @@ func new --template "Http Trigger" --name MyHttpTrigger This example creates a Queue Storage trigger named `MyQueueTrigger`: ``` -func new --template "Queue Trigger" --name MyQueueTrigger +func new --template "Azure Queue Storage Trigger" --name MyQueueTrigger ``` To learn more, see the [`func new` command](functions-core-tools-reference.md#func-new). diff --git a/articles/azure-functions/functions-scale.md b/articles/azure-functions/functions-scale.md index 76b4ed5f26e15..56d880741645c 100644 --- a/articles/azure-functions/functions-scale.md +++ b/articles/azure-functions/functions-scale.md @@ -3,9 +3,9 @@ title: Azure Functions scale and hosting description: Learn how to choose between Azure Functions Consumption plan and Premium plan. ms.assetid: 5b63649c-ec7f-4564-b168-e0a74cb7e0f3 ms.topic: conceptual -ms.date: 03/24/2022 +ms.date: 04/22/2022 -ms.custom: H1Hack27Feb2017 +ms.custom: H1Hack27Feb2017,devdivchpfy22 --- # Azure Functions hosting options @@ -30,7 +30,7 @@ The following is a summary of the benefits of the three main hosting plans for F | Plan | Benefits | | --- | --- | |**[Consumption plan]**| Scale automatically and only pay for compute resources when your functions are running.

                  On the Consumption plan, instances of the Functions host are dynamically added and removed based on the number of incoming events.

                  ✔ Default hosting plan.
                  ✔ Pay only when your functions are running.
                  ✔ Scales automatically, even during periods of high load.| -|**[Premium plan]**|Automatically scales based on demand using pre-warmed workers which run applications with no delay after being idle, runs on more powerful instances, and connects to virtual networks.

                  Consider the Azure Functions Premium plan in the following situations:

                  ✔ Your function apps run continuously, or nearly continuously.
                  ✔ You have a high number of small executions and a high execution bill, but low GB seconds in the Consumption plan.
                  ✔ You need more CPU or memory options than what is provided by the Consumption plan.
                  ✔ Your code needs to run longer than the maximum execution time allowed on the Consumption plan.
                  ✔ You require features that aren't available on the Consumption plan, such as virtual network connectivity.
                  ✔ You want to provide a custom Linux image on which to run your functions. | +|**[Premium plan]**|Automatically scales based on demand using pre-warmed workers, which run applications with no delay after being idle, runs on more powerful instances, and connects to virtual networks.

                  Consider the Azure Functions Premium plan in the following situations:

                  ✔ Your function apps run continuously, or nearly continuously.
                  ✔ You have a high number of small executions and a high execution bill, but low GB seconds in the Consumption plan.
                  ✔ You need more CPU or memory options than what is provided by the Consumption plan.
                  ✔ Your code needs to run longer than the maximum execution time allowed on the Consumption plan.
                  ✔ You require features that aren't available on the Consumption plan, such as virtual network connectivity.
                  ✔ You want to provide a custom Linux image on which to run your functions. | |**[Dedicated plan]** |Run your functions within an App Service plan at regular [App Service plan rates](https://azure.microsoft.com/pricing/details/app-service/windows/).

                  Best for long-running scenarios where [Durable Functions](durable/durable-functions-overview.md) can't be used. Consider an App Service plan in the following situations:

                  ✔ You have existing, underutilized VMs that are already running other App Service instances.
                  ✔ Predictive scaling and costs are required.| The comparison tables in this article also include the following hosting options, which provide the highest amount of control and isolation in which to run your function apps. @@ -74,7 +74,7 @@ Maximum instances are given on a per-function app (Consumption) or per-plan (Pre | **[ASE][Dedicated plan]**3 | Manual/autoscale |100 | | **[Kubernetes]** | Event-driven autoscale for Kubernetes clusters using [KEDA](https://keda.sh). | Varies by cluster  | -1 During scale out, there's currently a limit of 500 instances per subscription per hour for Linux apps on a Consumption plan.
                  +1 During scale-out, there's currently a limit of 500 instances per subscription per hour for Linux apps on a Consumption plan.
                  2 In some regions, Linux apps on a Premium plan can scale to 40 instances. For more information, see the [Premium plan article](functions-premium-plan.md#region-max-scale-out).
                  3 For specific limits for the various App Service plan options, see the [App Service plan limits](../azure-resource-manager/management/azure-subscription-service-limits.md#app-service-limits). @@ -101,7 +101,7 @@ Maximum instances are given on a per-function app (Consumption) or per-plan (Pre | Plan | Details | | --- | --- | | **[Consumption plan]** | Pay only for the time your functions run. Billing is based on number of executions, execution time, and memory used. | -| **[Premium plan]** | Premium plan is based on the number of core seconds and memory used across needed and pre-warmed instances. At least one instance per plan must be kept warm at all times. This plan provides the most predictable pricing. | +| **[Premium plan]** | Premium plan is based on the number of core seconds and memory used across needed and pre-warmed instances. At least one instance per plan must always be kept warm. This plan provides the most predictable pricing. | | **[Dedicated plan]** | You pay the same for function apps in an App Service Plan as you would for other App Service resources, like web apps.| | **[App Service Environment (ASE)][Dedicated plan]** | There's a flat monthly rate for an ASE that pays for the infrastructure and doesn't change with the size of the ASE. There's also a cost per App Service plan vCPU. All apps hosted in an ASE are in the Isolated pricing SKU. | | **[Kubernetes]**| You pay only the costs of your Kubernetes cluster; no additional billing for Functions. Your function app runs as an application workload on top of your cluster, just like a regular app. | diff --git a/articles/azure-functions/functions-scenario-database-table-cleanup.md b/articles/azure-functions/functions-scenario-database-table-cleanup.md index 707483bfea46c..9db4bba600998 100644 --- a/articles/azure-functions/functions-scenario-database-table-cleanup.md +++ b/articles/azure-functions/functions-scenario-database-table-cleanup.md @@ -60,13 +60,13 @@ You must have previously published your app to Azure. If you haven't already don You need to add the NuGet package that contains the SqlClient library. This data access library is needed to connect to SQL Database. -1. Open your local function app project in Visual Studio 2019. +1. Open your local function app project in Visual Studio 2022. 1. In Solution Explorer, right-click the function app project and choose **Manage NuGet Packages**. 1. On the **Browse** tab, search for ```System.Data.SqlClient``` and, when found, select it. -1. In the **System.Data.SqlClient** page, select version `4.5.1` and then click **Install**. +1. In the **System.Data.SqlClient** page, select version `4.8.3` and then click **Install**. 1. When the install completes, review the changes and then click **OK** to close the **Preview** window. @@ -80,7 +80,7 @@ Now, you can add the C# function code that connects to your SQL Database. 1. With the **Azure Functions** template selected, name the new item something like `DatabaseCleanup.cs` and select **Add**. -1. In the **New Azure function** dialog box, choose **Timer trigger** and then **OK**. This dialog creates a code file for the timer triggered function. +1. In the **New Azure function** dialog box, choose **Timer trigger** and then **Add**. This dialog creates a code file for the timer triggered function. 1. Open the new code file and add the following using statements at the top of the file: @@ -123,7 +123,7 @@ Now, you can add the C# function code that connects to your SQL Database. On the first execution, you should update 32 rows of data. Following runs update no data rows, unless you make changes to the SalesOrderHeader table data so that more rows are selected by the `UPDATE` statement. -If you plan to [publish this function](functions-develop-vs.md#publish-to-azure), remember to change the `TimerTrigger` attribute to a more reasonable [cron schedule](functions-bindings-timer.md#ncrontab-expressions) than every 15 seconds. +If you plan to [publish this function](functions-develop-vs.md#publish-to-azure), remember to change the `TimerTrigger` attribute to a more reasonable [cron schedule](functions-bindings-timer.md#ncrontab-expressions) than every 15 seconds. Also, you need to ensure that the Function Apps instance has network access to the Azure SQL Database instance by granting access to Azure IP addresses. ## Next steps diff --git a/articles/azure-functions/functions-triggers-bindings.md b/articles/azure-functions/functions-triggers-bindings.md index 6122826bfee9b..f3ca9ed112090 100644 --- a/articles/azure-functions/functions-triggers-bindings.md +++ b/articles/azure-functions/functions-triggers-bindings.md @@ -4,15 +4,16 @@ description: Learn to use triggers and bindings to connect your Azure Function t author: craigshoemaker ms.topic: conceptual -ms.date: 02/18/2019 +ms.date: 05/25/2022 ms.author: cshoe +ms.custom: devdivchpfy22 --- # Azure Functions triggers and bindings concepts -In this article you learn the high-level concepts surrounding functions triggers and bindings. +In this article, you learn the high-level concepts surrounding functions triggers and bindings. -Triggers are what cause a function to run. A trigger defines how a function is invoked and a function must have exactly one trigger. Triggers have associated data, which is often provided as the payload of the function. +Triggers cause a function to run. A trigger defines how a function is invoked and a function must have exactly one trigger. Triggers have associated data, which is often provided as the payload of the function. Binding to a function is a way of declaratively connecting another resource to the function; bindings may be connected as *input bindings*, *output bindings*, or both. Data from bindings is provided to the function as parameters. @@ -31,7 +32,7 @@ Consider the following examples of how you could implement different functions. \* Represents different queues -These examples are not meant to be exhaustive, but are provided to illustrate how you can use triggers and bindings together. +These examples aren't meant to be exhaustive, but are provided to illustrate how you can use triggers and bindings together. ### Trigger and binding definitions diff --git a/articles/azure-functions/language-support-policy.md b/articles/azure-functions/language-support-policy.md index d970abfd644dd..a9b3fa3edeadb 100644 --- a/articles/azure-functions/language-support-policy.md +++ b/articles/azure-functions/language-support-policy.md @@ -38,6 +38,7 @@ There are few exceptions to the retirement policy outlined above. Here is a list |Node 6|30 April 2019|28 February 2022| |Node 8|31 December 2019|28 February 2022| |Node 10|30 April 2021|30 September 2022| +|Node 12|30 Apr 2022|TBA| |PowerShell Core 6| 4 September 2020|30 September 2022| |Python 3.6 |23 December 2021|30 September 2022| diff --git a/articles/azure-functions/media/functions-az-redundancy/azure-functions-basics-az.png b/articles/azure-functions/media/functions-az-redundancy/azure-functions-basics-az.png new file mode 100644 index 0000000000000..fed37f2d2caae Binary files /dev/null and b/articles/azure-functions/media/functions-az-redundancy/azure-functions-basics-az.png differ diff --git a/articles/azure-functions/media/functions-az-redundancy/azure-functions-hosting-az.png b/articles/azure-functions/media/functions-az-redundancy/azure-functions-hosting-az.png new file mode 100644 index 0000000000000..e9fb9f91c659c Binary files /dev/null and b/articles/azure-functions/media/functions-az-redundancy/azure-functions-hosting-az.png differ diff --git a/articles/azure-functions/media/functions-create-first-function-bicep/function-app-bicep.png b/articles/azure-functions/media/functions-create-first-function-bicep/function-app-bicep.png new file mode 100644 index 0000000000000..6f7ba65490ee9 Binary files /dev/null and b/articles/azure-functions/media/functions-create-first-function-bicep/function-app-bicep.png differ diff --git a/articles/azure-functions/media/functions-develop-vs-code/add-to-workplace.png b/articles/azure-functions/media/functions-develop-vs-code/add-to-workplace.png new file mode 100644 index 0000000000000..5a6073b941aba Binary files /dev/null and b/articles/azure-functions/media/functions-develop-vs-code/add-to-workplace.png differ diff --git a/articles/azure-functions/media/functions-develop-vs-code/new-function-created.png b/articles/azure-functions/media/functions-develop-vs-code/new-function-created.png new file mode 100644 index 0000000000000..69f19fb4d9484 Binary files /dev/null and b/articles/azure-functions/media/functions-develop-vs-code/new-function-created.png differ diff --git a/articles/azure-functions/media/functions-develop-vs-code/select-author-file.png b/articles/azure-functions/media/functions-develop-vs-code/select-author-file.png new file mode 100644 index 0000000000000..0e549a0db508d Binary files /dev/null and b/articles/azure-functions/media/functions-develop-vs-code/select-author-file.png differ diff --git a/articles/azure-functions/media/functions-develop-vs-code/select-http-trigger.png b/articles/azure-functions/media/functions-develop-vs-code/select-http-trigger.png new file mode 100644 index 0000000000000..ec685da811152 Binary files /dev/null and b/articles/azure-functions/media/functions-develop-vs-code/select-http-trigger.png differ diff --git a/articles/azure-functions/media/functions-develop-vs/functions-vstools-app-settings.png b/articles/azure-functions/media/functions-develop-vs/functions-vstools-app-settings.png index 8985063b807a6..253d4ee5725cc 100644 Binary files a/articles/azure-functions/media/functions-develop-vs/functions-vstools-app-settings.png and b/articles/azure-functions/media/functions-develop-vs/functions-vstools-app-settings.png differ diff --git a/articles/azure-functions/media/functions-develop-vs/functions-vstools-create-queuetrigger.png b/articles/azure-functions/media/functions-develop-vs/functions-vstools-create-queuetrigger.png index e8ee057b297a9..34fdd000db7d0 100644 Binary files a/articles/azure-functions/media/functions-develop-vs/functions-vstools-create-queuetrigger.png and b/articles/azure-functions/media/functions-develop-vs/functions-vstools-create-queuetrigger.png differ diff --git a/articles/azure-functions/media/openapi-apim-integrate-vs/api-management-test-function-api.png b/articles/azure-functions/media/openapi-apim-integrate-vs/api-management-test-function-api.png index 9bf52f9406ccf..49f94746530a5 100644 Binary files a/articles/azure-functions/media/openapi-apim-integrate-vs/api-management-test-function-api.png and b/articles/azure-functions/media/openapi-apim-integrate-vs/api-management-test-function-api.png differ diff --git a/articles/azure-functions/media/openapi-apim-integrate-vs/apim-add-policy.png b/articles/azure-functions/media/openapi-apim-integrate-vs/apim-add-policy.png index 2142353a81d60..d98d1670cceef 100644 Binary files a/articles/azure-functions/media/openapi-apim-integrate-vs/apim-add-policy.png and b/articles/azure-functions/media/openapi-apim-integrate-vs/apim-add-policy.png differ diff --git a/articles/azure-functions/media/openapi-apim-integrate-vs/download-definition.png b/articles/azure-functions/media/openapi-apim-integrate-vs/download-definition.png index 8863a21bbd8dc..1fac33476beac 100644 Binary files a/articles/azure-functions/media/openapi-apim-integrate-vs/download-definition.png and b/articles/azure-functions/media/openapi-apim-integrate-vs/download-definition.png differ diff --git a/articles/azure-functions/media/openapi-apim-integrate-vs/functions-project-settings.png b/articles/azure-functions/media/openapi-apim-integrate-vs/functions-project-settings.png index 8126b8d42a11c..ccd654e1d32a6 100644 Binary files a/articles/azure-functions/media/openapi-apim-integrate-vs/functions-project-settings.png and b/articles/azure-functions/media/openapi-apim-integrate-vs/functions-project-settings.png differ diff --git a/articles/azure-functions/media/openapi-apim-integrate-vs/inbound-processing-rule.png b/articles/azure-functions/media/openapi-apim-integrate-vs/inbound-processing-rule.png new file mode 100644 index 0000000000000..d77117ad14d76 Binary files /dev/null and b/articles/azure-functions/media/openapi-apim-integrate-vs/inbound-processing-rule.png differ diff --git a/articles/azure-functions/openapi-apim-integrate-visual-studio.md b/articles/azure-functions/openapi-apim-integrate-visual-studio.md index 85fd13f5d6dc9..811ba460166e4 100644 --- a/articles/azure-functions/openapi-apim-integrate-visual-studio.md +++ b/articles/azure-functions/openapi-apim-integrate-visual-studio.md @@ -21,11 +21,11 @@ In this tutorial, you learn how to: The serverless function you create provides an API that lets you determine whether an emergency repair on a wind turbine is cost-effective. Because both the function app and API Management instance you create use consumption plans, your cost for completing this tutorial is minimal. > [!NOTE] -> The OpenAPI and API Management integration featured in this article is currently in preview. This method for exposing a serverless API is only supported for C# class library (.NET Core 3.1) functions. All other language runtimes should instead [use Azure API Management integration from the portal](functions-openapi-definition.md). +> The OpenAPI and API Management integration featured in this article is currently in preview. This method for exposing a serverless API is only supported for [in-process](functions-dotnet-class-library.md) C# class library functions. [Isolated process](dotnet-isolated-process-guide.md) C# class library functions and all other language runtimes should instead [use Azure API Management integration from the portal](functions-openapi-definition.md). ## Prerequisites -+ [Visual Studio 2019](https://azure.microsoft.com/downloads/), version 16.10, or a later version. Make sure you select the **Azure development** workload during installation. ++ [Visual Studio 2022](https://azure.microsoft.com/downloads/). Make sure you select the **Azure development** workload during installation. + An active [Azure subscription](../guides/developer/azure-developer-guide.md#understanding-accounts-subscriptions-and-billing), create a [free account](https://azure.microsoft.com/free/dotnet/) before you begin. @@ -43,9 +43,9 @@ The Azure Functions project template in Visual Studio creates a project that you | Setting | Value | Description | | ------------ | ------- |----------------------------------------- | - | **.NET version** | **.NET Core 3 (LTS)** | This value creates a function project that uses the version 3.x runtime of Azure Functions. OpenAPI file generation is only supported for version 3.x of the Functions runtime. | + | **Functions worker** | **.NET 6** | This value creates a function project that runs in-process on version 4.x of the Azure Functions runtime. OpenAPI file generation is only supported for versions 3.x and 4.x of the Functions runtime, and isolated process isn't supported. | | **Function template** | **HTTP trigger with OpenAPI** | This value creates a function triggered by an HTTP request, with the ability to generate an OpenAPI definition file. | - | **Storage account (AzureWebJobsStorage)** | **Storage emulator** | You can use the emulator for local development of HTTP trigger functions. Because a function app in Azure requires a storage account, one is assigned or created when you publish your project to Azure. | + | **Use Azurite for runtime storage account (AzureWebJobsStorage)** | **Selected** | You can use the emulator for local development of HTTP trigger functions. Because a function app in Azure requires a storage account, one is assigned or created when you publish your project to Azure. | | **Authorization level** | **Function** | When running in Azure, clients must provide a key when accessing the endpoint. For more information about keys and authorization, see [function access keys](functions-bindings-http-webhook-trigger.md#authorization-keys). | ![Azure Functions project settings](./media/openapi-apim-integrate-vs/functions-project-settings.png) @@ -151,11 +151,13 @@ Before you can publish your project, you must have a function app in your Azure 1. In the **Publish** tab, select the ellipses (**...**) next to **Hosting** and select **Open API in Azure portal**. The API Management instance you created is opened in the Azure portal in your default browser. This API Management instance is already linked to your function app. -1. Under **APIs**, select **Azure Functions OpenAPI Extension** > **Test** > **POST Run**, then under **Inbound policy** select **Add policy**. +1. Under **APIs**, select **OpenAPI Document on Azure Functions** > **POST Run**, then under **Inbound processing** select **Add policy**. :::image type="content" source="media/openapi-apim-integrate-vs/apim-add-policy.png" alt-text="Add an inbound policy to the API"::: -1. In **Add inbound policy**, choose **Set query parameters**, type `code` for **Name**, select **+Value**, paste in the copied function key, and select **Save**. API Management includes the function key when it passes call through to the function endpoint. +1. Below **Inbound processing**, in **Set query parameters**, type `code` for **Name**, select **+Value**, paste in the copied function key, and select **Save**. API Management includes the function key when it passes calls through to the function endpoint. + + :::image type="content" source="media/openapi-apim-integrate-vs/inbound-processing-rule.png" alt-text="Provide Function credentials to the API inbound processing rule"::: Now that the function key is set, you can call the API to verify that it works when hosted in Azure. @@ -180,7 +182,7 @@ Now that the function key is set, you can call the API to verify that it works w If your API works as expected, you can download the OpenAPI definition. -1. 1. Under **APIs**, select **Azure Functions OpenAPI Extension**, select the ellipses (**...**), and select **Export**. +1. 1. Under **APIs**, select **OpenAPI Document on Azure Functions**, select the ellipses (**...**), and select **Export**. ![Download OpenAPI definition](media/openapi-apim-integrate-vs/download-definition.png) @@ -198,7 +200,7 @@ Select **Delete resource group**, type the name of your group in the text box to ## Next steps -You've used Visual Studio 2019 to create a function that is self-documenting because of the [OpenAPI Extension](https://github.com/Azure/azure-functions-openapi-extension) and integrated with API Management. You can now refine the definition in API Management in the portal. You can also [learn more about API Management](../api-management/api-management-key-concepts.md). +You've used Visual Studio 2022 to create a function that is self-documenting because of the [OpenAPI Extension](https://github.com/Azure/azure-functions-openapi-extension) and integrated with API Management. You can now refine the definition in API Management in the portal. You can also [learn more about API Management](../api-management/api-management-key-concepts.md). > [!div class="nextstepaction"] > [Edit the OpenAPI definition in API Management](../api-management/edit-api.md) diff --git a/articles/azure-functions/start-stop-vms/deploy.md b/articles/azure-functions/start-stop-vms/deploy.md index ee16d525c44b2..71fa1dd1a2aca 100644 --- a/articles/azure-functions/start-stop-vms/deploy.md +++ b/articles/azure-functions/start-stop-vms/deploy.md @@ -1,16 +1,16 @@ --- -title: Deploy Start/Stop VMs v2 (preview) -description: This article tells how to deploy the Start/Stop VMs v2 (preview) feature for your Azure VMs in your Azure subscription. +title: Deploy Start/Stop VMs v2 +description: This article tells how to deploy the Start/Stop VMs v2 feature for your Azure VMs in your Azure subscription. services: azure-functions ms.subservice: start-stop-vms -ms.date: 06/25/2021 +ms.date: 06/08/2022 ms.topic: conceptual ms.custon: subject-rbac-steps --- -# Deploy Start/Stop VMs v2 (preview) +# Deploy Start/Stop VMs v2 -Perform the steps in this topic in sequence to install the Start/Stop VMs v2 (preview) feature. After completing the setup process, configure the schedules to customize it to your requirements. +Perform the steps in this topic in sequence to install the Start/Stop VMs v2 feature. After completing the setup process, configure the schedules to customize it to your requirements. ## Permissions considerations Please keep the following in mind before and during deployment: @@ -21,18 +21,37 @@ Please keep the following in mind before and during deployment: The deployment is initiated from the Start/Stop VMs v2 GitHub organization [here](https://github.com/microsoft/startstopv2-deployments/blob/main/README.md). While this feature is intended to manage all of your VMs in your subscription across all resource groups from a single deployment within the subscription, you can install another instance of it based on the operations model or requirements of your organization. It also can be configured to centrally manage VMs across multiple subscriptions. -To simplify management and removal, we recommend you deploy Start/Stop VMs v2 (preview) to a dedicated resource group. +To simplify management and removal, we recommend you deploy Start/Stop VMs v2 to a dedicated resource group. > [!NOTE] -> Currently this preview does not support specifying an existing Storage account or Application Insights resource. +> Currently this solution does not support specifying an existing Storage account or Application Insights resource. > [!NOTE] > The naming format for the function app and storage account has changed. To guarantee global uniqueness, a random and unique string is now appended to the names of these resource. 1. Open your browser and navigate to the Start/Stop VMs v2 [GitHub organization](https://github.com/microsoft/startstopv2-deployments/blob/main/README.md). -1. Select the deployment option based on the Azure cloud environment your Azure VMs are created in. This will open the custom Azure Resource Manager deployment page in the Azure portal. +1. Select the deployment option based on the Azure cloud environment your Azure VMs are created in. 1. If prompted, sign in to the [Azure portal](https://portal.azure.com). +1. Choose the appropriate **Plan** from the drop-down box. When choosing a Zone Redundant plan (**Start/StopV2-AZ**), you must create your deployment in one of the following regions: + + Australia East + + Brazil South + + Canada Central + + Central US + + East US + + East US 2 + + France Central + + Germany West Central + + Japan East + + North Europe + + Southeast Asia + + UK South + + West Europe + + West US 2 + + West US 3 + +1. Select **Create**, which opens the custom Azure Resource Manager deployment page in the Azure portal. + 1. Enter the following values: |Name |Value | @@ -60,7 +79,7 @@ To simplify management and removal, we recommend you deploy Start/Stop VMs v2 (p ## Enable multiple subscriptions -After the Start/Stop deployment completes, perform the following steps to enable Start/Stop VMs v2 (preview) to take action across multiple subscriptions. +After the Start/Stop deployment completes, perform the following steps to enable Start/Stop VMs v2 to take action across multiple subscriptions. 1. Copy the value for the Azure Function App name that you specified during the deployment. @@ -274,7 +293,7 @@ In an environment that includes two or more components on multiple Azure Resourc ## Auto stop scenario -Start/Stop VMs v2 (preview) can help manage the cost of running Azure Resource Manager and classic VMs in your subscription by evaluating machines that aren't used during non-peak periods, such as after hours, and automatically shutting them down if processor utilization is less than a specified percentage. +Start/Stop VMs v2 can help manage the cost of running Azure Resource Manager and classic VMs in your subscription by evaluating machines that aren't used during non-peak periods, such as after hours, and automatically shutting them down if processor utilization is less than a specified percentage. The following metric alert properties in the request body support customization: @@ -373,4 +392,4 @@ To learn more about how Azure Monitor metric alerts work and how to configure th ## Next steps -To learn how to monitor status of your Azure VMs managed by the Start/Stop VMs v2 (preview) feature and perform other management tasks, see the [Manage Start/Stop VMs](manage.md) article. +To learn how to monitor status of your Azure VMs managed by the Start/Stop VMs v2 feature and perform other management tasks, see the [Manage Start/Stop VMs](manage.md) article. diff --git a/articles/azure-functions/start-stop-vms/manage.md b/articles/azure-functions/start-stop-vms/manage.md index e09f75c46ffcb..ced235dfc9616 100644 --- a/articles/azure-functions/start-stop-vms/manage.md +++ b/articles/azure-functions/start-stop-vms/manage.md @@ -1,17 +1,17 @@ --- -title: Manage Start/Stop VMs v2 (preview) -description: This article tells how to monitor status of your Azure VMs managed by the Start/Stop VMs v2 (preview) feature and perform other management tasks. +title: Manage Start/Stop VMs v2 +description: This article tells how to monitor status of your Azure VMs managed by the Start/Stop VMs v2 feature and perform other management tasks. services: azure-functions ms.subservice: start-stop-vms -ms.date: 06/25/2021 +ms.date: 06/08/2022 ms.topic: conceptual --- -# How to manage Start/Stop VMs v2 (preview) +# How to manage Start/Stop VMs v2 ## Azure dashboard -Start/Stop VMs v2 (preview) includes a [dashboard](../../azure-monitor/best-practices-analysis.md#azure-dashboards) to help you understand the management scope and recent operations against your VMs. It is a quick and easy way to verify the status of each operation that’s performed on your Azure VMs. The visualization in each tile is based on a Log query and to see the query, select the **Open in logs blade** option in the right-hand corner of the tile. This opens the [Log Analytics](../../azure-monitor/logs/log-analytics-overview.md#starting-log-analytics) tool in the Azure portal, and from here you can evaluate the query and modify to support your needs, such as custom [log alerts](../../azure-monitor/alerts/alerts-log.md), a custom [workbook](../../azure-monitor/visualize/workbooks-overview.md), etc. +Start/Stop VMs v2 includes a [dashboard](../../azure-monitor/best-practices-analysis.md#azure-dashboards) to help you understand the management scope and recent operations against your VMs. It is a quick and easy way to verify the status of each operation that’s performed on your Azure VMs. The visualization in each tile is based on a Log query and to see the query, select the **Open in logs blade** option in the right-hand corner of the tile. This opens the [Log Analytics](../../azure-monitor/logs/log-analytics-overview.md#starting-log-analytics) tool in the Azure portal, and from here you can evaluate the query and modify to support your needs, such as custom [log alerts](../../azure-monitor/alerts/alerts-log.md), a custom [workbook](../../azure-monitor/visualize/workbooks-overview.md), etc. The log data each tile in the dashboard displays is refreshed every hour, with a manual refresh option on demand by clicking the **Refresh** icon on a given visualization, or by refreshing the full dashboard. @@ -19,7 +19,7 @@ To learn about working with a log-based dashboard, see the following [tutorial]( ## Configure email notifications -To change email notifications after Start/Stop VMs v2 (preview) is deployed, you can modify the action group created during deployment. +To change email notifications after Start/Stop VMs v2 is deployed, you can modify the action group created during deployment. 1. In the Azure portal, navigate to **Monitor**, then **Alerts**. Select **Action groups**. @@ -39,4 +39,4 @@ The following screenshot is an example email that is sent when the feature shuts ## Next steps -To handle problems during VM management, see [Troubleshoot Start/Stop VMs v2](troubleshoot.md) (preview) issues. +To handle problems during VM management, see [Troubleshoot Start/Stop VMs v2](troubleshoot.md) issues. diff --git a/articles/azure-functions/start-stop-vms/overview.md b/articles/azure-functions/start-stop-vms/overview.md index c26c2db64dfb6..1fa308f8cbf23 100644 --- a/articles/azure-functions/start-stop-vms/overview.md +++ b/articles/azure-functions/start-stop-vms/overview.md @@ -1,17 +1,17 @@ --- -title: Start/Stop VMs v2 (preview) overview -description: This article describes version two of the Start/Stop VMs (preview) feature, which starts or stops Azure Resource Manager and classic VMs on a schedule. +title: Start/Stop VMs v2 overview +description: This article describes version two of the Start/Stop VMs feature, which starts or stops Azure Resource Manager and classic VMs on a schedule. ms.topic: conceptual ms.service: azure-functions ms.subservice: start-stop-vms -ms.date: 06/25/2021 +ms.date: 06/08/2022 --- -# Start/Stop VMs v2 (preview) overview +# Start/Stop VMs v2 overview -The Start/Stop VMs v2 (preview) feature starts or stops Azure virtual machines (VMs) across multiple subscriptions. It starts or stops Azure VMs on user-defined schedules, provides insights through [Azure Application Insights](../../azure-monitor/app/app-insights-overview.md), and send optional notifications by using [action groups](../../azure-monitor/alerts/action-groups.md). The feature can manage both Azure Resource Manager VMs and classic VMs for most scenarios. +The Start/Stop VMs v2 feature starts or stops Azure virtual machines (VMs) across multiple subscriptions. It starts or stops Azure VMs on user-defined schedules, provides insights through [Azure Application Insights](../../azure-monitor/app/app-insights-overview.md), and send optional notifications by using [action groups](../../azure-monitor/alerts/action-groups.md). The feature can manage both Azure Resource Manager VMs and classic VMs for most scenarios. -This new version of Start/Stop VMs v2 (preview) provides a decentralized low-cost automation option for customers who want to optimize their VM costs. It offers all of the same functionality as the [original version](../../automation/automation-solution-vm-management.md) available with Azure Automation, but it is designed to take advantage of newer technology in Azure. +This new version of Start/Stop VMs v2 provides a decentralized low-cost automation option for customers who want to optimize their VM costs. It offers all of the same functionality as the [original version](../../automation/automation-solution-vm-management.md) available with Azure Automation, but it is designed to take advantage of newer technology in Azure. > [!NOTE] > We've added a plan (**AZ - Availability Zone**) to our Start/Stop V2 solution to enable a high-availability offering. You can now choose between Consumption and Availability Zone plans before you start your deployment. In most cases, the monthly cost of the Availability Zone plan is higher when compared to the Consumption plan. @@ -22,9 +22,9 @@ This new version of Start/Stop VMs v2 (preview) provides a decentralized low-cos ## Overview -Start/Stop VMs v2 (preview) is redesigned and it doesn't depend on Azure Automation or Azure Monitor Logs, as required by the [previous version](../../automation/automation-solution-vm-management.md). This version relies on [Azure Functions](../../azure-functions/functions-overview.md) to handle the VM start and stop execution. +Start/Stop VMs v2 is redesigned and it doesn't depend on Azure Automation or Azure Monitor Logs, as required by the [previous version](../../automation/automation-solution-vm-management.md). This version relies on [Azure Functions](../../azure-functions/functions-overview.md) to handle the VM start and stop execution. -A managed identity is created in Azure Active Directory (Azure AD) for this Azure Functions application and allows Start/Stop VMs v2 (preview) to easily access other Azure AD-protected resources, such as the logic apps and Azure VMs. For more about managed identities in Azure AD, see [Managed identities for Azure resources](../../active-directory/managed-identities-azure-resources/overview.md). +A managed identity is created in Azure Active Directory (Azure AD) for this Azure Functions application and allows Start/Stop VMs v2 to easily access other Azure AD-protected resources, such as the logic apps and Azure VMs. For more about managed identities in Azure AD, see [Managed identities for Azure resources](../../active-directory/managed-identities-azure-resources/overview.md). An HTTP trigger endpoint function is created to support the schedule and sequence scenarios included with the feature, as shown in the following table. @@ -66,7 +66,7 @@ The queue-based trigger functions are required in support of this feature. All t Each Start/Stop action supports assignment of one or more subscriptions, resource groups, or a list of VMs. -An Azure Storage account, which is required by Functions, is also used by Start/Stop VMs v2 (preview) for two purposes: +An Azure Storage account, which is required by Functions, is also used by Start/Stop VMs v2 for two purposes: - Uses Azure Table Storage to store the execution operation metadata (that is, the start/stop VM action). @@ -80,7 +80,7 @@ Email notifications are also sent as a result of the actions performed on the VM ## New releases -When a new version of Start/Stop VMs v2 (preview) is released, your instance is auto-updated without having to manually redeploy. +When a new version of Start/Stop VMs v2 is released, your instance is auto-updated without having to manually redeploy. ## Supported scoping options @@ -106,8 +106,8 @@ Specifying a list of VMs can be used when you need to perform the start and stop - Your account has been granted the [Contributor](../../role-based-access-control/built-in-roles.md#contributor) permission in the subscription. -- Start/Stop VMs v2 (preview) is available in all Azure global and US Government cloud regions that are listed in [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=functions) page for Azure Functions. +- Start/Stop VMs v2 is available in all Azure global and US Government cloud regions that are listed in [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=functions) page for Azure Functions. ## Next steps -To deploy this feature, see [Deploy Start/Stop VMs](deploy.md) (preview). +To deploy this feature, see [Deploy Start/Stop VMs](deploy.md). diff --git a/articles/azure-functions/start-stop-vms/remove.md b/articles/azure-functions/start-stop-vms/remove.md index e90ff06d18f85..a1b2621b0b3e1 100644 --- a/articles/azure-functions/start-stop-vms/remove.md +++ b/articles/azure-functions/start-stop-vms/remove.md @@ -1,15 +1,15 @@ --- -title: Remove Start/Stop VMs v2 (preview) overview -description: This article describes how to remove the Start/Stop VMs v2 (preview) feature. +title: Remove Start/Stop VMs v2 overview +description: This article describes how to remove the Start/Stop VMs v2 feature. services: azure-functions ms.subservice: start-stop-vms -ms.date: 06/25/2021 +ms.date: 06/08/2022 ms.topic: conceptual --- -# How to remove Start/Stop VMs v2 (preview) +# How to remove Start/Stop VMs v2 -After you enable the Start/Stop VMs v2 (preview) feature to manage the running state of your Azure VMs, you may decide to stop using it. Removing this feature can be done by deleting the resource group dedicated to store the following resources in support of Start/Stop VMs v2 (preview): +After you enable the Start/Stop VMs v2 feature to manage the running state of your Azure VMs, you may decide to stop using it. Removing this feature can be done by deleting the resource group dedicated to store the following resources in support of Start/Stop VMs v2: - The Azure Functions applications - Schedules in Azure Logic Apps @@ -17,7 +17,7 @@ After you enable the Start/Stop VMs v2 (preview) feature to manage the running s - Azure Storage account > [!NOTE] -> If you run into problems during deployment, you encounter an issue when using Start/Stop VMs v2 (preview), or if you have a related question, you can submit an issue on [GitHub](https://github.com/microsoft/startstopv2-deployments/issues). Filing an Azure support incident from the [Azure support site](https://azure.microsoft.com/support/options/) is not available for this preview version. +> If you run into problems during deployment, you encounter an issue when using Start/Stop VMs v2, or if you have a related question, you can submit an issue on [GitHub](https://github.com/microsoft/startstopv2-deployments/issues). Filing an Azure support incident from the [Azure support site](https://azure.microsoft.com/support/options/) is not available for this version. ## Delete the dedicated resource group @@ -28,4 +28,4 @@ To delete the resource group, follow the steps outlined in the [Azure Resource M ## Next steps -To re-deploy this feature, see [Deploy Start/Stop v2](deploy.md) (preview). +To re-deploy this feature, see [Deploy Start/Stop v2](deploy.md). diff --git a/articles/azure-functions/start-stop-vms/troubleshoot.md b/articles/azure-functions/start-stop-vms/troubleshoot.md index 10983e3b7cbb1..2827f39fdee99 100644 --- a/articles/azure-functions/start-stop-vms/troubleshoot.md +++ b/articles/azure-functions/start-stop-vms/troubleshoot.md @@ -1,15 +1,15 @@ --- -title: Troubleshoot Start/Stop VMs (preview) -description: This article tells how to troubleshoot issues encountered with the Start/Stop VMs (preview) feature for your Azure VMs. +title: Troubleshoot Start/Stop VMs +description: This article tells how to troubleshoot issues encountered with the Start/Stop VMs feature for your Azure VMs. services: azure-functions ms.subservice: start-stop-vms -ms.date: 06/25/2021 +ms.date: 06/08/2022 ms.topic: conceptual --- -# Troubleshoot common issues with Start/Stop VMs (preview) +# Troubleshoot common issues with Start/Stop VMs -This article provides information on troubleshooting and resolving issues that may occur while attempting to install and configure Start/Stop VMs (preview). For general information, see [Start/Stop VMs overview](overview.md). +This article provides information on troubleshooting and resolving issues that may occur while attempting to install and configure Start/Stop VMs. For general information, see [Start/Stop VMs overview](overview.md). ## General validation and troubleshooting @@ -17,7 +17,7 @@ This section covers how to troubleshoot general issues with the schedules scenar ### Azure dashboard -You can start by reviewing the Azure shared dashboard. The Azure shared dashboard deployed as part of Start/Stop VMs v2 (preview) is a quick and easy way to verify the status of each operation that's performed on your VMs. Refer to the **Recently attempted actions on VMs** tile to see all the recent operations executed on your VMs. There is some latency, around five minutes, for data to show up in the report as it pulls data from the Application Insights resource. +You can start by reviewing the Azure shared dashboard. The Azure shared dashboard deployed as part of Start/Stop VMs v2 is a quick and easy way to verify the status of each operation that's performed on your VMs. Refer to the **Recently attempted actions on VMs** tile to see all the recent operations executed on your VMs. There is some latency, around five minutes, for data to show up in the report as it pulls data from the Application Insights resource. ### Logic Apps @@ -25,9 +25,9 @@ Depending on which Logic Apps you have enabled to support your start/stop scenar ### Azure Storage -You can review the details for the operations performed on the VMs that are written to the table **requestsstoretable** in the Azure storage account used for Start/Stop VMs v2 (preview). Perform the following steps to view those records. +You can review the details for the operations performed on the VMs that are written to the table **requestsstoretable** in the Azure storage account used for Start/Stop VMs v2. Perform the following steps to view those records. -1. Navigate to the storage account in the Azure portal and in the account select **Storage Explorer (preview)** from the left-hand pane. +1. Navigate to the storage account in the Azure portal and in the account select **Storage Explorer** from the left-hand pane. 1. Select **TABLES** and then select **requeststoretable**. 1. Each record in the table represents the start/stop action performed against an Azure VM based on the target scope defined in the logic app scenario. You can filter the results by any one of the record properties (for example, TIMESTAMP, ACTION, or TARGETTOPLEVELRESOURCENAME). @@ -42,7 +42,7 @@ From the logic app, the **Scheduled** HTTP function is invoked with Payload sche Perform the following steps to see the invocation details. 1. In the Azure portal, navigate to **Azure Functions**. -1. Select the Function app for Start/Stop VMs v2 (preview) from the list. +1. Select the Function app for Start/Stop VMs v2 from the list. 1. Select **Functions** from the left-hand pane. 1. In the list, you see several functions associated for each scenario. Select the **Scheduled** HTTP function. 1. Select **Monitor** from the left-hand pane. @@ -61,4 +61,4 @@ Learn more about monitoring Azure Functions and logic apps: * [Monitor logic apps](../../logic-apps/monitor-logic-apps.md). -* If you run into problems during deployment, you encounter an issue when using Start/Stop VMs v2 (preview), or if you have a related question, you can submit an issue on [GitHub](https://github.com/microsoft/startstopv2-deployments/issues). Filing an Azure support incident from the [Azure support site](https://azure.microsoft.com/support/options/) is also available for this preview version. +* If you run into problems during deployment, you encounter an issue when using Start/Stop VMs v2, or if you have a related question, you can submit an issue on [GitHub](https://github.com/microsoft/startstopv2-deployments/issues). Filing an Azure support incident from the [Azure support site](https://azure.microsoft.com/support/options/) is also available for this version. diff --git a/articles/azure-government/azure-secure-isolation-guidance.md b/articles/azure-government/azure-secure-isolation-guidance.md index a00f84f1287c3..a4fd91da7282a 100644 --- a/articles/azure-government/azure-secure-isolation-guidance.md +++ b/articles/azure-government/azure-secure-isolation-guidance.md @@ -6,7 +6,7 @@ ms.author: stevevi ms.service: azure-government ms.topic: article recommendations: false -ms.date: 05/12/2022 +ms.date: 06/02/2022 --- # Azure guidance for secure isolation @@ -174,7 +174,7 @@ When a managed HSM is created, the requestor also provides a list of data plane > [!IMPORTANT] > Unlike with key vaults, granting your users management plane access to a managed HSM doesn't grant them any access to data plane to access keys or data plane role assignments managed HSM local RBAC. This isolation is implemented by design to prevent inadvertent expansion of privileges affecting access to keys stored in managed HSMs. -As mentioned previously, managed HSM supports [importing keys generated](../key-vault/managed-hsm/hsm-protected-keys-byok.md) in your on-premises HSMs, ensuring the keys never leave the HSM protection boundary, also known as *bring your own key (BYOK)* scenario. Managed HSM supports integration with Azure services such as [Azure Storage](../storage/common/customer-managed-keys-overview.md), [Azure SQL Database](/azure/azure-sql/database/transparent-data-encryption-byok-overview), [Azure Information Protection](/azure/information-protection/byok-price-restrictions), and others. +As mentioned previously, managed HSM supports [importing keys generated](../key-vault/managed-hsm/hsm-protected-keys-byok.md) in your on-premises HSMs, ensuring the keys never leave the HSM protection boundary, also known as *bring your own key (BYOK)* scenario. Managed HSM supports integration with Azure services such as [Azure Storage](../storage/common/customer-managed-keys-overview.md), [Azure SQL Database](/azure/azure-sql/database/transparent-data-encryption-byok-overview), [Azure Information Protection](/azure/information-protection/byok-price-restrictions), and others. For a more complete list of Azure services that work with Managed HSM, see [Data encryption models](../security/fundamentals/encryption-models.md#supporting-services). Managed HSM enables you to use the established Azure Key Vault API and management interfaces. You can use the same application development and deployment patterns for all your applications irrespective of the key management solution: multi-tenant vault or single-tenant managed HSM. @@ -751,7 +751,7 @@ Drive encryption through BitLocker and DM-Crypt is a data protection feature tha For managed disks, Azure Disk encryption allows you to encrypt the OS and Data disks used by an IaaS virtual machine; however, Data can't be encrypted without first encrypting the OS volume. The solution relies on Azure Key Vault to help you control and manage the disk encryption keys in key vaults. You can supply your own encryption keys, which are safeguarded in Azure Key Vault to support *bring your own key (BYOK)* scenarios, as described previously in *[Data encryption key management](#data-encryption-key-management)* section. -Azure Disk encryption isn't supported by Managed HSM or an on-premises key management service. Only key vaults managed by the Azure Key Vault service can be used to safeguard customer-managed encryption keys for Azure Disk encryption. +Azure Disk encryption isn't supported by Managed HSM or an on-premises key management service. Only key vaults managed by the Azure Key Vault service can be used to safeguard customer-managed encryption keys for Azure Disk encryption. See [Encryption at host](#encryption-at-host) for other options involving Managed HSM. > [!NOTE] > Detailed instructions are available for creating and configuring a key vault for Azure Disk encryption with both **[Windows](../virtual-machines/windows/disk-encryption-key-vault.md)** and **[Linux](../virtual-machines/linux/disk-encryption-key-vault.md)** VMs. @@ -774,7 +774,7 @@ For [Windows VMs](../virtual-machines/windows/disk-encryption-faq.yml), Azure Di Customer-managed keys (CMK) enable you to have [full control](../virtual-machines/disk-encryption.md#full-control-of-your-keys) over your encryption keys. You can grant access to managed disks in your Azure Key Vault so that your keys can be used for encrypting and decrypting the DEK. You can also disable your keys or revoke access to managed disks at any time. Finally, you have full audit control over key usage with Azure Key Vault monitoring to ensure that only managed disks or other authorized resources are accessing your encryption keys. ##### *Encryption at host* -Encryption at host ensures that data stored on the VM host is encrypted at rest and flows encrypted to the Storage service. Disks with encryption at host enabled aren't encrypted with Azure Storage encryption; instead, the server hosting your VM provides the encryption for your data, and that encrypted data flows into Azure Storage. For more information, see [Encryption at host - End-to-end encryption for your VM data](../virtual-machines/disk-encryption.md#encryption-at-host---end-to-end-encryption-for-your-vm-data). +Encryption at host ensures that data stored on the VM host is encrypted at rest and flows encrypted to the Storage service. Disks with encryption at host enabled aren't encrypted with Azure Storage encryption; instead, the server hosting your VM provides the encryption for your data, and that encrypted data flows into Azure Storage. For more information, see [Encryption at host - End-to-end encryption for your VM data](../virtual-machines/disk-encryption.md#encryption-at-host---end-to-end-encryption-for-your-vm-data). As mentioned previously, [Azure Disk encryption](../security/fundamentals/azure-disk-encryption-vms-vmss.md) for VM and VMSS isn't supported by Managed HSM. However, encryption at host with CMK is supported by Managed HSM. You're [always in control of your customer data](https://www.microsoft.com/trust-center/privacy/data-management) in Azure. You can access, extract, and delete your customer data stored in Azure at will. When you terminate your Azure subscription, Microsoft takes the necessary steps to ensure that you continue to own your customer data. A common concern upon data deletion or subscription termination is whether another customer or Azure administrator can access your deleted data. The following sections explain how data deletion, retention, and destruction work in Azure. diff --git a/articles/azure-government/compare-azure-government-global-azure.md b/articles/azure-government/compare-azure-government-global-azure.md index af38eb133563e..1085dca463494 100644 --- a/articles/azure-government/compare-azure-government-global-azure.md +++ b/articles/azure-government/compare-azure-government-global-azure.md @@ -7,7 +7,7 @@ author: stevevi ms.author: stevevi ms.custom: references_regions recommendations: false -ms.date: 04/29/2022 +ms.date: 06/02/2022 --- # Compare Azure Government and global Azure @@ -16,11 +16,11 @@ Microsoft Azure Government uses same underlying technologies as global Azure, wh ## Export control implications -You are responsible for designing and deploying your applications to meet [US export control requirements](./documentation-government-overview-itar.md) such as the requirements prescribed in the EAR, ITAR, and DoE 10 CFR Part 810. In doing so, you should not include sensitive or restricted information in Azure resource names, as explained in [Considerations for naming Azure resources](./documentation-government-concept-naming-resources.md). +You're responsible for designing and deploying your applications to meet [US export control requirements](./documentation-government-overview-itar.md) such as the requirements prescribed in the EAR, ITAR, and DoE 10 CFR Part 810. In doing so, you shouldn't include sensitive or restricted information in Azure resource names, as explained in [Considerations for naming Azure resources](./documentation-government-concept-naming-resources.md). ## Guidance for developers -Azure Government services operate the same way as the corresponding services in global Azure, which is why most of the existing online Azure documentation applies equally well to Azure Government. However, there are some key differences that developers working on applications hosted in Azure Government must be aware of. For more information, see [Guidance for developers](./documentation-government-developer-guide.md). As a developer, you must know how to connect to Azure Government and once you connect you will mostly have the same experience as in global Azure. +Azure Government services operate the same way as the corresponding services in global Azure, which is why most of the existing online Azure documentation applies equally well to Azure Government. However, there are some key differences that developers working on applications hosted in Azure Government must be aware of. For more information, see [Guidance for developers](./documentation-government-developer-guide.md). As a developer, you must know how to connect to Azure Government and once you connect you'll mostly have the same experience as in global Azure. > [!NOTE] > This article has been updated to use the new Azure PowerShell Az module. You can still use the AzureRM module, which will continue to receive bug fixes until at least December 2020. To learn more about the new Az module and AzureRM compatibility, see [**Introducing the new Azure PowerShell Az module**](/powershell/azure/new-azureps-module-az). For Az module installation instructions, see [**Install the Azure Az PowerShell module**](/powershell/azure/install-az-ps). @@ -55,11 +55,11 @@ Table below lists API endpoints in Azure vs. Azure Government for accessing and ||Custom Vision|cognitiveservices.azure.com|cognitiveservices.azure.us
                  [Portal](https://www.customvision.azure.us/)|| ||Content Moderator|cognitiveservices.azure.com|cognitiveservices.azure.us|| ||Face API|cognitiveservices.azure.com|cognitiveservices.azure.us|| -||Language Understanding|cognitiveservices.azure.com|cognitiveservices.azure.us
                  [Portal](https://luis.azure.us/)|| +||Language Understanding|cognitiveservices.azure.com|cognitiveservices.azure.us
                  [Portal](https://luis.azure.us/)|Part of [Cognitive Services for Language](../cognitive-services/language-service/index.yml)| ||Personalizer|cognitiveservices.azure.com|cognitiveservices.azure.us|| -||QnA Maker|cognitiveservices.azure.com|cognitiveservices.azure.us|| +||QnA Maker|cognitiveservices.azure.com|cognitiveservices.azure.us|Part of [Cognitive Services for Language](../cognitive-services/language-service/index.yml)| ||Speech service|See [STT API docs](../cognitive-services/speech-service/rest-speech-to-text-short.md#regions-and-endpoints)|[Speech Studio](https://speech.azure.us/)

                  See [Speech service endpoints](../cognitive-services/Speech-Service/sovereign-clouds.md)

                  **Speech translation endpoints**
                  Virginia: `https://usgovvirginia.s2s.speech.azure.us`
                  Arizona: `https://usgovarizona.s2s.speech.azure.us`
                  || -||Text Analytics|cognitiveservices.azure.com|cognitiveservices.azure.us|| +||Text Analytics|cognitiveservices.azure.com|cognitiveservices.azure.us|Part of [Cognitive Services for Language](../cognitive-services/language-service/index.yml)| ||Translator|See [Translator API docs](../cognitive-services/translator/reference/v3-0-reference.md#base-urls)|cognitiveservices.azure.us|| |**Analytics**|Azure HDInsight|azurehdinsight.net|azurehdinsight.us|| ||Event Hubs|servicebus.windows.net|servicebus.usgovcloudapi.net|| @@ -116,17 +116,17 @@ Table below lists API endpoints in Azure vs. Azure Government for accessing and ## Service availability -Microsoft's goal for Azure Government is to match service availability in Azure. For service availability in Azure Government, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=all®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). Services available in Azure Government are listed by category and whether they are Generally Available or available through Preview. If a service is available in Azure Government, that fact is not reiterated in the rest of this article. Instead, you are encouraged to review [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=all®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true) for the latest, up-to-date information on service availability. +Microsoft's goal for Azure Government is to match service availability in Azure. For service availability in Azure Government, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=all®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). Services available in Azure Government are listed by category and whether they're Generally Available or available through Preview. If a service is available in Azure Government, that fact isn't reiterated in the rest of this article. Instead, you're encouraged to review [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=all®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true) for the latest, up-to-date information on service availability. In general, service availability in Azure Government implies that all corresponding service features are available to you. Variations to this approach and other applicable limitations are tracked and explained in this article based on the main service categories outlined in the [online directory of Azure services](https://azure.microsoft.com/services/). Other considerations for service deployment and usage in Azure Government are also provided. ## AI + machine learning -This section outlines variations and considerations when using **Azure Bot Service**, **Azure Machine Learning**, and **Cognitive Services** in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=machine-learning-service,bot-service,cognitive-services®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using **Azure Bot Service**, **Azure Machine Learning**, and **Cognitive Services** in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=machine-learning-service,bot-service,cognitive-services®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure Bot Service](/azure/bot-service/) -The following Azure Bot Service **features are not currently available** in Azure Government (updated 8/16/2021): +The following Azure Bot Service **features aren't currently available** in Azure Government (updated 16 August 2021): - Bot Framework Composer integration - Channels (due to availability of dependent services) @@ -144,13 +144,13 @@ For feature variations and limitations, see [Azure Machine Learning feature avai ### [Cognitive Services: Content Moderator](../cognitive-services/content-moderator/index.yml) -The following Content Moderator **features are not currently available** in Azure Government: +The following Content Moderator **features aren't currently available** in Azure Government: - Review UI and Review APIs. ### [Cognitive Services: Language Understanding (LUIS)](../cognitive-services/luis/index.yml) -The following Language Understanding **features are not currently available** in Azure Government: +The following Language Understanding **features aren't currently available** in Azure Government: - Speech Requests - Prebuilt Domains @@ -163,18 +163,18 @@ For feature variations and limitations, including API endpoints, see [Speech ser ### [Cognitive Services: Translator](../cognitive-services/translator/index.yml) -The following Translator **features are not currently available** in Azure Government: +The following Translator **features aren't currently available** in Azure Government: - Custom Translator - Translator Hub ## Analytics -This section outlines variations and considerations when using Analytics services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=data-share,power-bi-embedded,analysis-services,event-hubs,data-lake-analytics,storage,data-catalog,data-factory,synapse-analytics,stream-analytics,databricks,hdinsight®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Analytics services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=data-share,power-bi-embedded,analysis-services,event-hubs,data-lake-analytics,storage,data-catalog,data-factory,synapse-analytics,stream-analytics,databricks,hdinsight®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure HDInsight](../hdinsight/index.yml) -For secured virtual networks, you will want to allow network security groups (NSGs) access to certain IP addresses and ports. For Azure Government, you should allow the following IP addresses (all with an Allowed port of 443): +For secured virtual networks, you'll want to allow network security groups (NSGs) access to certain IP addresses and ports. For Azure Government, you should allow the following IP addresses (all with an Allowed port of 443): |**Region**|**Allowed IP addresses**|**Allowed port**| |------|--------------------|------------| @@ -196,17 +196,17 @@ To learn how to embed analytical content within your business process applicatio ## Databases -This section outlines variations and considerations when using Databases services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-api-for-fhir,data-factory,sql-server-stretch-database,redis-cache,database-migration,synapse-analytics,postgresql,mariadb,mysql,sql-database,cosmos-db®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Databases services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-api-for-fhir,data-factory,sql-server-stretch-database,redis-cache,database-migration,synapse-analytics,postgresql,mariadb,mysql,sql-database,cosmos-db®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure Database for MySQL](../mysql/index.yml) -The following Azure Database for MySQL **features are not currently available** in Azure Government: +The following Azure Database for MySQL **features aren't currently available** in Azure Government: - Advanced Threat Protection ### [Azure Database for PostgreSQL](../postgresql/index.yml) -The following Azure Database for PostgreSQL **features are not currently available** in Azure Government: +The following Azure Database for PostgreSQL **features aren't currently available** in Azure Government: - Hyperscale (Citus) deployment option - The following features of the Single server deployment option @@ -215,13 +215,13 @@ The following Azure Database for PostgreSQL **features are not currently availab ### [Azure SQL Managed Instance](/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview) -The following Azure SQL Managed Instance **features are not currently available** in Azure Government: +The following Azure SQL Managed Instance **features aren't currently available** in Azure Government: - Long-term retention ## Developer tools -This section outlines variations and considerations when using Developer tools in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=load-testing,app-configuration,devtest-lab,lab-services,azure-devops®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Developer tools in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=load-testing,app-configuration,devtest-lab,lab-services,azure-devops®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Enterprise Dev/Test subscription offer](https://azure.microsoft.com/offers/ms-azr-0148p/) @@ -229,7 +229,7 @@ This section outlines variations and considerations when using Developer tools i ## Identity -This section outlines variations and considerations when using Identity services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=information-protection,active-directory-ds,active-directory®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Identity services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=information-protection,active-directory-ds,active-directory®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure Active Directory Premium P1 and P2](../active-directory/index.yml) @@ -239,24 +239,24 @@ The following features have known limitations in Azure Government: - Limitations with B2B Collaboration in supported Azure US Government tenants: - For more information about B2B collaboration limitations in Azure Government and to find out if B2B collaboration is available in your Azure Government tenant, see [Azure AD B2B in government and national clouds](../active-directory/external-identities/b2b-government-national-clouds.md). - - B2B collaboration via Power BI is not supported. When you invite a guest user from within Power BI, the B2B flow is not used and the guest user won't appear in the tenant's user list. If a guest user is invited through other means, they'll appear in the Power BI user list, but any sharing request to the user will fail and display a 403 Forbidden error. + - B2B collaboration via Power BI isn't supported. When you invite a guest user from within Power BI, the B2B flow isn't used and the guest user won't appear in the tenant's user list. If a guest user is invited through other means, they'll appear in the Power BI user list, but any sharing request to the user will fail and display a 403 Forbidden error. - Limitations with multi-factor authentication: - - Trusted IPs are not supported in Azure Government. Instead, use Conditional Access policies with named locations to establish when multi-factor authentication should and should not be required based off the user's current IP address. + - Trusted IPs isn't supported in Azure Government. Instead, use Conditional Access policies with named locations to establish when multi-factor authentication should and shouldn't be required based off the user's current IP address. ## Management and governance -This section outlines variations and considerations when using Management and Governance services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=managed-applications,azure-policy,network-watcher,monitor,traffic-manager,automation,scheduler,site-recovery,cost-management,backup,blueprints,advisor®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Management and Governance services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=managed-applications,azure-policy,network-watcher,monitor,traffic-manager,automation,scheduler,site-recovery,cost-management,backup,blueprints,advisor®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Automation](../automation/index.yml) -The following Automation **features are not currently available** in Azure Government: +The following Automation **features aren't currently available** in Azure Government: - Automation analytics solution ### [Azure Advisor](../advisor/index.yml) -The following Azure Advisor recommendation **features are not currently available** in Azure Government: +The following Azure Advisor recommendation **features aren't currently available** in Azure Government: - Cost - (Preview) Consider App Service stamp fee reserved capacity to save over your on-demand costs. @@ -286,7 +286,7 @@ The following Azure Advisor recommendation **features are not currently availabl - Enforce 'Add or replace a tag on resources' using Azure Policy. - Enforce 'Allowed locations' using Azure Policy. - Enforce 'Allowed virtual machine SKUs' using Azure Policy. - - Enforce 'Audit VMs that do not use managed disks' using Azure Policy. + - Enforce 'Audit VMs that don't use managed disks' using Azure Policy. - Enforce 'Inherit a tag from the resource group' using Azure Policy. - Update Azure Spring Cloud API Version. - Update your outdated Azure Spring Cloud SDK to the latest version. @@ -331,11 +331,11 @@ If you want to be more aggressive at identifying underutilized virtual machines, ### [Azure Lighthouse](../lighthouse/index.yml) -The following Azure Lighthouse **features are not currently available** in Azure Government: +The following Azure Lighthouse **features aren't currently available** in Azure Government: - Managed Service offers published to Azure Marketplace -- Delegation of subscriptions across a national cloud and the Azure public cloud, or across two separate national clouds, is not supported -- Privileged Identity Management (PIM) feature is not enabled, for example, just-in-time (JIT) / eligible authorization capability +- Delegation of subscriptions across a national cloud and the Azure public cloud, or across two separate national clouds, isn't supported +- Privileged Identity Management (PIM) feature isn't enabled, for example, just-in-time (JIT) / eligible authorization capability ### [Azure Monitor](../azure-monitor/index.yml) @@ -353,9 +353,9 @@ For more information, see [Connect Operations Manager to Azure Monitor](../azure **Frequently asked questions** - Can I migrate data from Azure Monitor logs in Azure to Azure Government? - - No. It is not possible to move data or your workspace from Azure to Azure Government. + - No. It isn't possible to move data or your workspace from Azure to Azure Government. - Can I switch between Azure and Azure Government workspaces from the Operations Management Suite portal? - - No. The portals for Azure and Azure Government are separate and do not share information. + - No. The portals for Azure and Azure Government are separate and don't share information. #### [Application Insights](../azure-monitor/app/app-insights-overview.md) @@ -363,12 +363,12 @@ Application Insights (part of Azure Monitor) enables the same features in both A **Visual Studio** - In Azure Government, you can enable monitoring on your ASP.NET, ASP.NET Core, Java, and Node.js based applications running on Azure App Service. For more information, see [Application monitoring for Azure App Service overview](../azure-monitor/app/azure-web-apps.md). In Visual Studio, go to Tools|Options|Accounts|Registered Azure Clouds|Add New Azure Cloud and select Azure US Government as the Discovery endpoint. After that, adding an account in File|Account Settings will prompt you for which cloud you want to add from. -**SDK endpoint modifications** - In order to send data from Application Insights to an Azure Government region, you will need to modify the default endpoint addresses that are used by the Application Insights SDKs. Each SDK requires slightly different modifications, as described in [Application Insights overriding default endpoints](../azure-monitor/app/custom-endpoints.md). +**SDK endpoint modifications** - In order to send data from Application Insights to an Azure Government region, you'll need to modify the default endpoint addresses that are used by the Application Insights SDKs. Each SDK requires slightly different modifications, as described in [Application Insights overriding default endpoints](../azure-monitor/app/custom-endpoints.md). -**Firewall exceptions** - Application Insights uses several IP addresses. You might need to know these addresses if the app that you are monitoring is hosted behind a firewall. For more information, see [IP addresses used by Azure Monitor](../azure-monitor/app/ip-addresses.md) from where you can download Azure Government IP addresses. +**Firewall exceptions** - Application Insights uses several IP addresses. You might need to know these addresses if the app that you're monitoring is hosted behind a firewall. For more information, see [IP addresses used by Azure Monitor](../azure-monitor/app/ip-addresses.md) from where you can download Azure Government IP addresses. >[!NOTE] ->Although these addresses are static, it's possible that we will need to change them from time to time. All Application Insights traffic represents outbound traffic except for availability monitoring and webhooks, which require inbound firewall rules. +>Although these addresses are static, it's possible that we'll need to change them from time to time. All Application Insights traffic represents outbound traffic except for availability monitoring and webhooks, which require inbound firewall rules. You need to open some **outgoing ports** in your server's firewall to allow the Application Insights SDK and/or Status Monitor to send data to the portal: @@ -378,13 +378,13 @@ You need to open some **outgoing ports** in your server's firewall to allow the ### [Cost Management and Billing](../cost-management-billing/index.yml) -The following Azure Cost Management + Billing **features are not currently available** in Azure Government: +The following Azure Cost Management + Billing **features aren't currently available** in Azure Government: - Cost Management + Billing for cloud solution providers (CSPs) ## Media -This section outlines variations and considerations when using Media services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=cdn,media-services®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Media services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=cdn,media-services®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Media Services](/azure/media-services/) @@ -392,11 +392,11 @@ For Azure Media Services v3 feature variations in Azure Government, see [Azure M ## Migration -This section outlines variations and considerations when using Migration services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=database-migration,cost-management,azure-migrate,site-recovery®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Migration services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=database-migration,cost-management,azure-migrate,site-recovery®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure Migrate](../migrate/index.yml) -The following Azure Migrate **features are not currently available** in Azure Government: +The following Azure Migrate **features aren't currently available** in Azure Government: - Containerizing Java Web Apps on Apache Tomcat (on Linux servers) and deploying them on Linux containers on App Service. - Containerizing Java Web Apps on Apache Tomcat (on Linux servers) and deploying them on Linux containers on Azure Kubernetes Service (AKS). @@ -408,7 +408,7 @@ For more information, see [Azure Migrate support matrix](../migrate/migrate-supp ## Networking -This section outlines variations and considerations when using Networking services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-bastion,frontdoor,virtual-wan,dns,ddos-protection,cdn,azure-firewall,network-watcher,load-balancer,vpn-gateway,expressroute,application-gateway,virtual-network®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Networking services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-bastion,frontdoor,virtual-wan,dns,ddos-protection,cdn,azure-firewall,network-watcher,load-balancer,vpn-gateway,expressroute,application-gateway,virtual-network®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure ExpressRoute](../expressroute/index.yml) @@ -416,7 +416,8 @@ For an overview of ExpressRoute, see [What is Azure ExpressRoute?](../expressrou ### [Private Link](../private-link/index.yml) -For Private Link services availability, see [Azure Private Link availability](../private-link/availability.md). +- For Private Link services availability, see [Azure Private Link availability](../private-link/availability.md). +- For Private DNS zone names, see [Azure Private Endpoint DNS configuration](../private-link/private-endpoint-dns.md#government). ### [Traffic Manager](../traffic-manager/index.yml) @@ -424,7 +425,7 @@ Traffic Manager health checks can originate from certain IP addresses for Azure ## Security -This section outlines variations and considerations when using Security services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-sentinel,azure-dedicated-hsm,information-protection,application-gateway,vpn-gateway,security-center,key-vault,active-directory-ds,ddos-protection,active-directory®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Security services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-sentinel,azure-dedicated-hsm,information-protection,application-gateway,vpn-gateway,security-center,key-vault,active-directory-ds,ddos-protection,active-directory®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Microsoft Defender for IoT](../defender-for-iot/index.yml) @@ -444,11 +445,11 @@ For feature variations and limitations, see [Cloud feature availability for US G ## Storage -This section outlines variations and considerations when using Storage services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=hpc-cache,managed-disks,storsimple,backup,storage®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Storage services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=hpc-cache,managed-disks,storsimple,backup,storage®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [Azure managed disks](../virtual-machines/managed-disks-overview.md) -The following Azure managed disks **features are not currently available** in Azure Government: +The following Azure managed disks **features aren't currently available** in Azure Government: - Zone-redundant storage (ZRS) @@ -462,23 +463,23 @@ With Import/Export jobs for US Gov Arizona or US Gov Texas, the mailing address ## Web -This section outlines variations and considerations when using Web services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=spring-cloud,signalr-service,api-management,notification-hubs,search,cdn,app-service-linux,app-service®ions=non-regional,usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). +This section outlines variations and considerations when using Web services in the Azure Government environment. For service availability, see [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=spring-cloud,signalr-service,api-management,notification-hubs,search,cdn,app-service-linux,app-service®ions=usgov-non-regional,us-dod-central,us-dod-east,usgov-arizona,usgov-texas,usgov-virginia&rar=true). ### [API Management](../api-management/index.yml) -The following API Management **features are not currently available** in Azure Government: +The following API Management **features aren't currently available** in Azure Government: - Azure AD B2C integration ### [App Service](../app-service/index.yml) -The following App Service **resources are not currently available** in Azure Government: +The following App Service **resources aren't currently available** in Azure Government: - App Service Certificate - App Service Managed Certificate - App Service Domain -The following App Service **features are not currently available** in Azure Government: +The following App Service **features aren't currently available** in Azure Government: - Deployment - Deployment options: only Local Git Repository and External Repository are available diff --git a/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md b/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md index 0e66ebf296367..1d5784e9eeb48 100644 --- a/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md +++ b/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md @@ -113,7 +113,7 @@ This article provides a detailed list of Azure, Dynamics 365, Microsoft 365, and | [Cognitive Services: Content Moderator](../../cognitive-services/content-moderator/index.yml) | ✅ | ✅ | | [Cognitive Services Containers](../../cognitive-services/cognitive-services-container-support.md) | ✅ | ✅ | | [Cognitive Services: Custom Vision](../../cognitive-services/custom-vision-service/index.yml) | ✅ | ✅ | -| [Cognitive Services: Face](../../cognitive-services/face/index.yml) | ✅ | ✅ | +| [Cognitive Services: Face](../../cognitive-services/computer-vision/index-identity.yml) | ✅ | ✅ | | [Cognitive Services: Language Understanding (LUIS)](../../cognitive-services/luis/index.yml)
                  (part of [Cognitive Services for Language](../../cognitive-services/language-service/index.yml)) | ✅ | ✅ | | [Cognitive Services: Personalizer](../../cognitive-services/personalizer/index.yml) | ✅ | ✅ | | [Cognitive Services: QnA Maker](../../cognitive-services/qnamaker/index.yml)
                  (part of [Cognitive Services for Language](../../cognitive-services/language-service/index.yml)) | ✅ | ✅ | @@ -313,7 +313,7 @@ This article provides a detailed list of Azure, Dynamics 365, Microsoft 365, and | [Cognitive Services: Content Moderator](../../cognitive-services/content-moderator/index.yml) | ✅ | ✅ | ✅ | ✅ | | | [Cognitive Services Containers](../../cognitive-services/cognitive-services-container-support.md) | ✅ | ✅ | | | | | [Cognitive Services: Custom Vision](../../cognitive-services/custom-vision-service/index.yml) | ✅ | ✅ | ✅ | ✅ | | -| [Cognitive Services: Face](../../cognitive-services/face/index.yml) | ✅ | ✅ | ✅ | ✅ | | +| [Cognitive Services: Face](../../cognitive-services/computer-vision/index-identity.yml) | ✅ | ✅ | ✅ | ✅ | | | [Cognitive Services: LUIS](../../cognitive-services/luis/index.yml)
                  (part of [Cognitive Services for Language](../../cognitive-services/language-service/index.yml)) | ✅ | ✅ | ✅ | ✅ | | | [Cognitive Services: Personalizer](../../cognitive-services/personalizer/index.yml) | ✅ | ✅ | ✅ | ✅ | | | [Cognitive Services: QnA Maker](../../cognitive-services/qnamaker/index.yml)
                  (part of [Cognitive Services for Language](../../cognitive-services/language-service/index.yml)) | ✅ | ✅ | ✅ | ✅ | | @@ -434,7 +434,7 @@ This article provides a detailed list of Azure, Dynamics 365, Microsoft 365, and ***** Authorizations for edge devices (such as Azure Data Box and Azure Stack Edge) apply only to Azure services that support on-premises, customer-managed devices. You are wholly responsible for the authorization package that covers the physical devices. For assistance with accelerating your onboarding and authorization of devices, contact your Microsoft account representative. -****** Azure Information Protection (AIP) is part of the Microsoft Information Protection (MIP) solution - it extends the labeling and classification functionality provided by Microsoft 365. Before AIP can be used for DoD workloads at a given impact level (IL), the corresponding Microsoft 365 services must be authorized at the same IL. +****** Azure Information Protection (AIP) is part of the Microsoft Purview Information Protection solution - it extends the labeling and classification functionality provided by Microsoft 365. Before AIP can be used for DoD workloads at a given impact level (IL), the corresponding Microsoft 365 services must be authorized at the same IL. ## Next steps diff --git a/articles/azure-government/documentation-government-cognitiveservices.md b/articles/azure-government/documentation-government-cognitiveservices.md index e4d11a3b00257..f061664bffd29 100644 --- a/articles/azure-government/documentation-government-cognitiveservices.md +++ b/articles/azure-government/documentation-government-cognitiveservices.md @@ -605,7 +605,7 @@ Response: } ] ``` -For more information, see [public documentation](../cognitive-services/Face/index.yml), and [public API documentation](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) for Face API. +For more information, see [public documentation](../cognitive-services/computer-vision/index-identity.yml), and [public API documentation](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) for Face API. ## Text Analytics diff --git a/articles/azure-government/documentation-government-csp-list.md b/articles/azure-government/documentation-government-csp-list.md index 4ff36b6464a86..d23b6b980202d 100644 --- a/articles/azure-government/documentation-government-csp-list.md +++ b/articles/azure-government/documentation-government-csp-list.md @@ -71,7 +71,7 @@ Below you can find a list of all the authorized Cloud Solution Providers (CSPs), |[Bitscape](https://www.bitscape.com)| |[Bio Automation Support](https://www.stacsdna.com/)| |[Blackwood Associates, Inc. (dba BAI Federal)](https://www.blackwoodassociates.com/)| -|[Blue Source Group, Inc.](https://www.blackwoodassociates.com/)| +|[Blue Source Group, Inc.](https://bluesourcegroup.com/)| |[Blueforce Development Corporation](https://www.blueforcedev.com/)| |[Booz Allen Hamilton](https://www.boozallen.com/)| |[Bridge Partners LLC](https://www.bridgepartnersllc.com)| @@ -133,7 +133,7 @@ Below you can find a list of all the authorized Cloud Solution Providers (CSPs), |[Deborgem Enterprises Incorporated](https://deborgem.com)| |[Definitive Logic Corporation](https://www.definitivelogic.com/)| |[Dell Federal Services](https://www.dellemc.com/en-us/industry/federal/federal-government-it.htm#)| -|[Dell Marketing LP](https://www.dell.com/learn/us/en/rc1009777/fed)| +|[Dell Marketing LP](https://www.dell.com/)| |[Delphi Technology Solutions](https://delphi-ts.com/)| |[Developing Today LLC](https://www.developingtoday.net/)| |[DevHawk, LLC](https://www.devhawk.io)| @@ -141,7 +141,7 @@ Below you can find a list of all the authorized Cloud Solution Providers (CSPs), |[DirectApps, Inc. D.B.A. Direct Technology](https://directtechnology.com)| |[DominionTech Inc.](https://www.dominiontech.com)| |[DOT Personable Inc](http://solutions.personable.com/)| -|[Doublehorn, LLC](https://doublehorn.com/)| +|Doublehorn, LLC| |[DXC Technology Services LLC](https://www.dxc.technology/services)| |[DXL Enterprises, Inc.](https://mahwahnjcoc.wliinc31.com/Supply-Chain-Management/DXL-Enterprises,-Inc-1349)| |[DynTek](https://www.dyntek.com)| diff --git a/articles/azure-government/documentation-government-impact-level-5.md b/articles/azure-government/documentation-government-impact-level-5.md index d4547356b684b..57b4ecb460931 100644 --- a/articles/azure-government/documentation-government-impact-level-5.md +++ b/articles/azure-government/documentation-government-impact-level-5.md @@ -75,7 +75,7 @@ For AI and machine learning services availability in Azure Government, see [Prod - Configure encryption at rest of content in Cognitive Services Custom Vision [using customer-managed keys in Azure Key Vault](../cognitive-services/custom-vision-service/encrypt-data-at-rest.md#customer-managed-keys-with-azure-key-vault). -### [Cognitive Services: Face](../cognitive-services/face/index.yml) +### [Cognitive Services: Face](../cognitive-services/computer-vision/index-identity.yml) - Configure encryption at rest of content in the Face service by [using customer-managed keys in Azure Key Vault](../cognitive-services/face/encrypt-data-at-rest.md#customer-managed-keys-with-azure-key-vault). diff --git a/articles/azure-government/documentation-government-stig-linux-vm.md b/articles/azure-government/documentation-government-stig-linux-vm.md index 3caa94c64819b..14d053e344c87 100644 --- a/articles/azure-government/documentation-government-stig-linux-vm.md +++ b/articles/azure-government/documentation-government-stig-linux-vm.md @@ -1,12 +1,12 @@ --- title: Deploy STIG-compliant Linux Virtual Machines (Preview) -description: This quickstart shows you how to deploy a STIG-compliant Linux VM (Preview) from Azure Marketplace +description: This quickstart shows you how to deploy a STIG-compliant Linux VM (Preview) from the Azure portal or Azure Government portal. author: stevevi ms.author: stevevi ms.service: azure-government ms.topic: quickstart ms.date: 06/14/2021 -ms.custom: mode-other +ms.custom: mode-other, kr2b-contr-experiment --- # Deploy STIG-compliant Linux Virtual Machines (Preview) diff --git a/articles/azure-government/documentation-government-stig-windows-vm.md b/articles/azure-government/documentation-government-stig-windows-vm.md index 13b433b1bf1f7..49be860ef53cd 100644 --- a/articles/azure-government/documentation-government-stig-windows-vm.md +++ b/articles/azure-government/documentation-government-stig-windows-vm.md @@ -1,12 +1,12 @@ --- title: Deploy STIG-compliant Windows Virtual Machines (Preview) -description: This quickstart shows you how to deploy a STIG-compliant Windows VM (Preview) from Azure Marketplace +description: This quickstart shows you how to deploy a STIG-compliant Windows VM (Preview) from the Azure portal or Azure Government portal. author: stevevi ms.author: stevevi ms.service: azure-government ms.topic: quickstart ms.date: 06/14/2021 -ms.custom: mode-other +ms.custom: mode-other, kr2b-contr-experiment --- # Deploy STIG-compliant Windows Virtual Machines (Preview) diff --git a/articles/azure-maps/authentication-best-practices.md b/articles/azure-maps/authentication-best-practices.md index bfc8d8c450b0c..64e14e33f56b0 100644 --- a/articles/azure-maps/authentication-best-practices.md +++ b/articles/azure-maps/authentication-best-practices.md @@ -14,7 +14,7 @@ services: azure-maps The single most important part of your application is its security. No matter how good the user experience might be, if your application isn't secure a hacker can ruin it. -The following are some tips to keep your Azure Maps application secure. When using Azure, be sure to familiarize yourself with the security tools available to you. For more information, See the [introduction to Azure security](/azure/security/fundamentals/overview). +The following are some tips to keep your Azure Maps application secure. When using Azure, be sure to familiarize yourself with the security tools available to you. For more information, See the [introduction to Azure security](../security/fundamentals/overview.md). ## Understanding security threats @@ -32,17 +32,17 @@ When creating a publicly facing client application with Azure Maps using any of Subscription key-based authentication (Shared Key) can be used in either client side applications or web services, however it is the least secure approach to securing your application or web service. This is because the key grants access to all Azure Maps REST API that are available in the SKU (Pricing Tier) selected when creating the Azure Maps account and the key can be easily obtained from an HTTP request. If you do use subscription keys, be sure to [rotate them regularly](how-to-manage-authentication.md#manage-and-rotate-shared-keys) and keep in mind that Shared Key doesn't allow for configurable lifetime, it must be done manually. You should also consider using [Shared Key authentication with Azure Key Vault](how-to-secure-daemon-app.md#scenario-shared-key-authentication-with-azure-key-vault), which enables you to securely store your secret in Azure. -If using [Azure Active Directory (Azure AD) authentication](/azure/active-directory/fundamentals/active-directory-whatis) or [Shared Access Signature (SAS) Token authentication](azure-maps-authentication.md#shared-access-signature-token-authentication) (preview), access to Azure Maps REST APIs is authorized using [role-based access control (RBAC)](azure-maps-authentication.md#authorization-with-role-based-access-control). RBAC enables you to control what access is given to the issued tokens. You should consider how long access should be granted for the tokens. Unlike Shared Key authentication, the lifetime of these tokens is configurable. +If using [Azure Active Directory (Azure AD) authentication](../active-directory/fundamentals/active-directory-whatis.md) or [Shared Access Signature (SAS) Token authentication](azure-maps-authentication.md#shared-access-signature-token-authentication) (preview), access to Azure Maps REST APIs is authorized using [role-based access control (RBAC)](azure-maps-authentication.md#authorization-with-role-based-access-control). RBAC enables you to control what access is given to the issued tokens. You should consider how long access should be granted for the tokens. Unlike Shared Key authentication, the lifetime of these tokens is configurable. > [!TIP] > > For more information on configuring token lifetimes see: -> - [Configurable token lifetimes in the Microsoft identity platform (preview)](/azure/active-directory/develop/active-directory-configurable-token-lifetimes) +> - [Configurable token lifetimes in the Microsoft identity platform (preview)](../active-directory/develop/active-directory-configurable-token-lifetimes.md) > - [Create SAS tokens](azure-maps-authentication.md#create-sas-tokens) ### Public client and confidential client applications -There are different security concerns between public and confidential client applications. See [Public client and confidential client applications](/azure/active-directory/develop/msal-client-applications) in the Microsoft identity platform documentation for more information about what is considered a *public* versus *confidential* client application. +There are different security concerns between public and confidential client applications. See [Public client and confidential client applications](../active-directory/develop/msal-client-applications.md) in the Microsoft identity platform documentation for more information about what is considered a *public* versus *confidential* client application. ### Public client applications @@ -53,7 +53,7 @@ For apps that run on devices or desktop computers or in a web browser, you shoul ### Confidential client applications -For apps that run on servers (such as web services and service/daemon apps), if you prefer to avoid the overhead and complexity of managing secrets, consider [Managed Identities](/azure/active-directory/managed-identities-azure-resources/overview). Managed identities can provide an identity for your web service to use when connecting to Azure Maps using Azure Active Directory (Azure AD) authentication. In this case, your web service will use that identity to obtain the required Azure AD tokens. You should use Azure RBAC to configure what access the web service is given, using the [Least privileged roles](/azure/active-directory/roles/delegate-by-task) possible. +For apps that run on servers (such as web services and service/daemon apps), if you prefer to avoid the overhead and complexity of managing secrets, consider [Managed Identities](../active-directory/managed-identities-azure-resources/overview.md). Managed identities can provide an identity for your web service to use when connecting to Azure Maps using Azure Active Directory (Azure AD) authentication. In this case, your web service will use that identity to obtain the required Azure AD tokens. You should use Azure RBAC to configure what access the web service is given, using the [Least privileged roles](../active-directory/roles/delegate-by-task.md) possible. ## Next steps @@ -64,4 +64,4 @@ For apps that run on servers (such as web services and service/daemon apps), if > [Manage authentication in Azure Maps](how-to-manage-authentication.md) > [!div class="nextstepaction"] -> [Tutorial: Add app authentication to your web app running on Azure App Service](../app-service/scenario-secure-app-authentication-app-service.md) +> [Tutorial: Add app authentication to your web app running on Azure App Service](../app-service/scenario-secure-app-authentication-app-service.md) \ No newline at end of file diff --git a/articles/azure-maps/how-to-secure-sas-app.md b/articles/azure-maps/how-to-secure-sas-app.md index 7e588b9cea092..abf0729171900 100644 --- a/articles/azure-maps/how-to-secure-sas-app.md +++ b/articles/azure-maps/how-to-secure-sas-app.md @@ -1,65 +1,74 @@ --- -title: How to secure an application in Microsoft Azure Maps with SAS token +title: How to secure an Azure Maps application with a SAS token titleSuffix: Azure Maps -description: This article describes how to configure an application to be secured with SAS token authentication. +description: Create an Azure Maps account secured with SAS token authentication. author: stack111 ms.author: dstack -ms.date: 01/05/2022 +ms.date: 06/08/2022 ms.topic: how-to ms.service: azure-maps services: azure-maps manager: philema -custom.ms: subject-rbac-steps +ms.custom: subject-rbac-steps, kr2b-contr-experiment --- -# Secure an application with SAS token +# Secure an Azure Maps account with a SAS token -This article describes how to create an Azure Maps account with a SAS token that can be used to call the Azure Maps REST API. +This article describes how to create an Azure Maps account with a securely stored SAS token you can use to call the Azure Maps REST API. ## Prerequisites -This scenario assumes: +- An Azure subscription. If you don't already have an Azure account, [sign up for a free one](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- **Owner** role permission on the Azure subscription. You need the **Owner** permissions to: -- If you don't already have an Azure account, [sign up for a free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you continue. -- The current user must have subscription `Owner` role permissions on the Azure subscription to create an [Azure Key Vault](../key-vault/general/basic-concepts.md), user-assigned managed identity, assign the managed identity a role, and create an Azure Maps account. -- Azure CLI is installed to deploy the resources. Read more on [How to install the Azure CLI](/cli/azure/install-azure-cli). -- The current user is signed-in to Azure CLI with an active Azure subscription using `az login`. + - Create a key vault in [Azure Key Vault](../key-vault/general/basic-concepts.md). + - Create a user-assigned managed identity. + - Assign the managed identity a role. + - Create an Azure Maps account. -## Scenario: SAS token +- [Azure CLI installed](/cli/azure/install-azure-cli) to deploy the resources. -Applications that use SAS token authentication should store the keys in a secure store. A SAS token is a credential that grants the level of access specified during its creation to anyone who holds it, until the token expires or access is revoked. This scenario describes how to safely store your SAS token as a secret in Azure Key Vault and distribute the SAS token into a public client. Events in an application’s lifecycle may generate new SAS tokens without interrupting active connections using existing tokens. To understand how to configure Azure Key Vault, see the [Azure Key Vault developer's guide](../key-vault/general/developers-guide.md). +## Example scenario: SAS token secure storage -The following sample scenario will perform the steps outlined below with two Azure Resource Manager (ARM) template deployments: +A SAS token credential grants the access level it specifies to anyone who holds it, until the token expires or access is revoked. Applications that use SAS token authentication should store the keys securely. -- Create an Azure Key Vault. -- Create a user-assigned managed identity. -- Assign Azure RBAC `Azure Maps Data Reader` role to the user-assigned managed identity. -- Create a map account with a CORS configuration and attach the user-assigned managed identity. -- Create and save a SAS token into the Azure Key Vault -- Retrieve the SAS token secret from Azure Key Vault. -- Create an Azure Maps REST API request using the SAS token. +This scenario safely stores a SAS token as a secret in Key Vault, and distributes the token into a public client. Application lifecycle events can generate new SAS tokens without interrupting active connections that use existing tokens. -When completed, you should see output from Azure Maps `Search Address (Non-Batch)` REST API results on PowerShell with Azure CLI. The Azure resources will be deployed with permissions to connect to the Azure Maps account with controls for maximum rate limit, allowed regions, `localhost` configured CORS policy, and Azure RBAC. +For more information about configuring Key Vault, see the [Azure Key Vault developer's guide](../key-vault/general/developers-guide.md). -### Azure resource deployment with Azure CLI +The following example scenario uses two Azure Resource Manager (ARM) template deployments to do the following steps: -The following steps describe how to create and configure an Azure Maps account with SAS token authentication. The Azure CLI is assumed to be running in a PowerShell instance. +1. Create a key vault. +1. Create a user-assigned managed identity. +1. Assign Azure role-based access control (RBAC) **Azure Maps Data Reader** role to the user-assigned managed identity. +1. Create an Azure Maps account with a [Cross Origin Resource Sharing (CORS) configuration](azure-maps-authentication.md#cross-origin-resource-sharing-cors), and attach the user-assigned managed identity. +1. Create and save a SAS token in the Azure key vault. +1. Retrieve the SAS token secret from the key vault. +1. Create an Azure Maps REST API request that uses the SAS token. -1. Register Key Vault, Managed Identities, and Azure Maps for your subscription +When you finish, you should see Azure Maps `Search Address (Non-Batch)` REST API results on PowerShell with Azure CLI. The Azure resources deploy with permissions to connect to the Azure Maps account. There are controls for maximum rate limit, allowed regions, `localhost` configured CORS policy, and Azure RBAC. - ```azurecli - az provider register --namespace Microsoft.KeyVault - az provider register --namespace Microsoft.ManagedIdentity - az provider register --namespace Microsoft.Maps - ``` +## Azure resource deployment with Azure CLI + +The following steps describe how to create and configure an Azure Maps account with SAS token authentication. In this example, Azure CLI runs in a PowerShell instance. + +1. Sign in to your Azure subscription with `az login`. + +1. Register Key Vault, Managed Identities, and Azure Maps for your subscription. + + ```azurecli + az provider register --namespace Microsoft.KeyVault + az provider register --namespace Microsoft.ManagedIdentity + az provider register --namespace Microsoft.Maps + ``` -1. Retrieve your Azure AD object ID +1. Retrieve your Azure Active Directory (Azure AD) object ID. ```azurecli $id = $(az rest --method GET --url 'https://graph.microsoft.com/v1.0/me?$select=id' --headers 'Content-Type=application/json' --query "id") ``` -1. Create a template file `prereq.azuredeploy.json` with the following content. +1. Create a template file named *prereq.azuredeploy.json* with the following content: ```json { @@ -90,7 +99,7 @@ The following steps describe how to create and configure an Azure Maps account w "objectId": { "type": "string", "metadata": { - "description": "Specifies the object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault. The object ID must be unique for the set of access policies. Get it by using Get-AzADUser or Get-AzADServicePrincipal cmdlets." + "description": "Specifies the object ID of a user, service principal, or security group in the Azure AD tenant for the vault. The object ID must be unique for the set of access policies. Get it by using Get-AzADUser or Get-AzADServicePrincipal cmdlets." } }, "secretsPermissions": { @@ -154,14 +163,14 @@ The following steps describe how to create and configure an Azure Maps account w ``` -1. Deploy the prerequisite resources. Make sure to pick the location where the Azure Maps accounts is enabled. +1. Deploy the prerequisite resources you created in the previous step. Supply your own value for ``. Make sure to use the same `location` as the Azure Maps account. - ```azurecli - az group create --name {group-name} --location "East US" - $outputs = $(az deployment group create --name ExampleDeployment --resource-group {group-name} --template-file "./prereq.azuredeploy.json" --parameters objectId=$id --query "[properties.outputs.keyVaultName.value, properties.outputs.userAssignedIdentityPrincipalId.value, properties.outputs.userIdentityResourceId.value]" --output tsv) - ``` + ```azurecli + az group create --name --location "East US" + $outputs = $(az deployment group create --name ExampleDeployment --resource-group --template-file "./prereq.azuredeploy.json" --parameters objectId=$id --query "[properties.outputs.keyVaultName.value, properties.outputs.userAssignedIdentityPrincipalId.value, properties.outputs.userIdentityResourceId.value]" --output tsv) + ``` -1. Create a template file `azuredeploy.json` to provision the Map account, role assignment, and SAS token. +1. Create a template file *azuredeploy.json* to provision the Azure Maps account, role assignment, and SAS token. ```json { @@ -227,21 +236,21 @@ The following steps describe how to create and configure an Azure Maps account w "type": "string", "defaultValue": "[guid(resourceGroup().id)]", "metadata": { - "description": "Input string for new GUID associated with assigning built in role types" + "description": "Input string for new GUID associated with assigning built in role types." } }, "startDateTime": { "type": "string", "defaultValue": "[utcNow('u')]", "metadata": { - "description": "Current Universal DateTime in ISO 8601 'u' format to be used as start of the SAS token." + "description": "Current Universal DateTime in ISO 8601 'u' format to use as the start of the SAS token." } }, "duration" : { "type": "string", "defaultValue": "P1Y", "metadata": { - "description": "The duration of the SAS token, P1Y is maximum, ISO 8601 format is expected." + "description": "The duration of the SAS token. P1Y is maximum, ISO 8601 format is expected." } }, "maxRatePerSecond": { @@ -269,14 +278,14 @@ The following steps describe how to create and configure an Azure Maps account w "defaultValue": [], "maxLength": 10, "metadata": { - "description": "The specified application's web host header origins (example: https://www.azure.com) which the Maps account allows for Cross Origin Resource Sharing (CORS)." + "description": "The specified application's web host header origins (example: https://www.azure.com) which the Azure Maps account allows for CORS." } }, "allowedRegions": { "type": "array", "defaultValue": [], "metadata": { - "description": "The specified SAS token allowed locations which the token may be used." + "description": "The specified SAS token allowed locations where the token may be used." } } }, @@ -351,34 +360,34 @@ The following steps describe how to create and configure an Azure Maps account w } ``` -1. Deploy the template using ID parameters from the Azure Key Vault and managed identity resources created in the previous step. Note that when creating the SAS token, the `allowedRegions` parameter is set to `eastus`, `westus2`, and `westcentralus`. We use these locations because we plan to make HTTP requests to the `us.atlas.microsoft.com` endpoint. +1. Deploy the template with the ID parameters from the Key Vault and managed identity resources you created in the previous step. Supply your own value for ``. When creating the SAS token, you set the `allowedRegions` parameter to `eastus`, `westus2`, and `westcentralus`. You can then use these locations to make HTTP requests to the `us.atlas.microsoft.com` endpoint. - > [!IMPORTANT] - > We save the SAS token into the Azure Key Vault to prevent its credentials from appearing in the Azure deployment logs. The Azure Key Vault SAS token secret's `tags` also contain the start, expiry, and signing key name to help understand when the SAS token will expire. + > [!IMPORTANT] + > You save the SAS token in the key vault to prevent its credentials from appearing in the Azure deployment logs. The SAS token secret's `tags` also contain the start, expiry, and signing key name, to show when the SAS token will expire. - ```azurecli - az deployment group create --name ExampleDeployment --resource-group {group-name} --template-file "./azuredeploy.json" --parameters keyVaultName="$($outputs[0])" userAssignedIdentityPrincipalId="$($outputs[1])" userAssignedIdentityResourceId="$($outputs[2])" allowedOrigins="['http://localhost']" allowedRegions="['eastus', 'westus2', 'westcentralus']" maxRatePerSecond="10" - ``` + ```azurecli + az deployment group create --name ExampleDeployment --resource-group --template-file "./azuredeploy.json" --parameters keyVaultName="$($outputs[0])" userAssignedIdentityPrincipalId="$($outputs[1])" userAssignedIdentityResourceId="$($outputs[2])" allowedOrigins="['http://localhost']" allowedRegions="['eastus', 'westus2', 'westcentralus']" maxRatePerSecond="10" + ``` -1. Locate, then save a copy of the single SAS token secret from Azure Key Vault. +1. Locate and save a copy of the single SAS token secret from Key Vault. - ```azurecli - $secretId = $(az keyvault secret list --vault-name $outputs[0] --query "[? contains(name,'map')].id" --output tsv) - $sasToken = $(az keyvault secret show --id "$secretId" --query "value" --output tsv) - ``` + ```azurecli + $secretId = $(az keyvault secret list --vault-name $outputs[0] --query "[? contains(name,'map')].id" --output tsv) + $sasToken = $(az keyvault secret show --id "$secretId" --query "value" --output tsv) + ``` -1. Test the SAS Token by making a request to an Azure Maps endpoint. We specify the `us.atlas.microsoft.com` to ensure that our request will be routed to the US geography because our SAS Token has allowed regions within the geography. +1. Test the SAS token by making a request to an Azure Maps endpoint. This example specifies the `us.atlas.microsoft.com` to ensure your request routes to US geography. Your SAS token allows regions within the US geography. ```azurecli - az rest --method GET --url 'https://us.atlas.microsoft.com/search/address/json?api-version=1.0&query=15127 NE 24th Street, Redmond, WA 98052' --headers "Authorization=jwt-sas $($sasToken)" --query "results[].address" + az rest --method GET --url 'https://us.atlas.microsoft.com/search/address/json?api-version=1.0&query=1 Microsoft Way, Redmond, WA 98052' --headers "Authorization=jwt-sas $($sasToken)" --query "results[].address" ``` -## Complete example +## Complete script example -In the current directory of the PowerShell session you should have: +To run the complete example, the following template files must be in the same directory as the current PowerShell session: -- `prereq.azuredeploy.json` This creates the Key Vault and managed identity. -- `azuredeploy.json` This creates the Azure Maps account and configures the role assignment and managed identity, then stores the SAS Token into the Azure Key Vault. +- *prereq.azuredeploy.json* to create the key vault and managed identity. +- *azuredeploy.json* to create the Azure Maps account, configure the role assignment and managed identity, and store the SAS token in the key vault. ```powershell az login @@ -387,13 +396,40 @@ az provider register --namespace Microsoft.ManagedIdentity az provider register --namespace Microsoft.Maps $id = $(az rest --method GET --url 'https://graph.microsoft.com/v1.0/me?$select=id' --headers 'Content-Type=application/json' --query "id") -az group create --name {group-name} --location "East US" -$outputs = $(az deployment group create --name ExampleDeployment --resource-group {group-name} --template-file "./prereq.azuredeploy.json" --parameters objectId=$id --query "[properties.outputs.keyVaultName.value, properties.outputs.userAssignedIdentityPrincipalId.value, properties.outputs.userIdentityResourceId.value]" --output tsv) -az deployment group create --name ExampleDeployment --resource-group {group-name} --template-file "./azuredeploy.json" --parameters keyVaultName="$($outputs[0])" userAssignedIdentityPrincipalId="$($outputs[1])" userAssignedIdentityResourceId="$($outputs[2])" allowedOrigins="['http://localhost']" allowedRegions="['eastus', 'westus2', 'westcentralus']" maxRatePerSecond="10" +az group create --name --location "East US" +$outputs = $(az deployment group create --name ExampleDeployment --resource-group --template-file "./prereq.azuredeploy.json" --parameters objectId=$id --query "[properties.outputs.keyVaultName.value, properties.outputs.userAssignedIdentityPrincipalId.value, properties.outputs.userIdentityResourceId.value]" --output tsv) +az deployment group create --name ExampleDeployment --resource-group --template-file "./azuredeploy.json" --parameters keyVaultName="$($outputs[0])" userAssignedIdentityPrincipalId="$($outputs[1])" userAssignedIdentityResourceId="$($outputs[2])" allowedOrigins="['http://localhost']" allowedRegions="['eastus', 'westus2', 'westcentralus']" maxRatePerSecond="10" $secretId = $(az keyvault secret list --vault-name $outputs[0] --query "[? contains(name,'map')].id" --output tsv) $sasToken = $(az keyvault secret show --id "$secretId" --query "value" --output tsv) -az rest --method GET --url 'https://us.atlas.microsoft.com/search/address/json?api-version=1.0&query=15127 NE 24th Street, Redmond, WA 98052' --headers "Authorization=jwt-sas $($sasToken)" --query "results[].address" +az rest --method GET --url 'https://us.atlas.microsoft.com/search/address/json?api-version=1.0&query=1 Microsoft Way, Redmond, WA 98052' --headers "Authorization=jwt-sas $($sasToken)" --query "results[].address" +``` + +## Real-world example + +You can run requests to Azure Maps APIs from most clients, like C#, Java, or JavaScript. [Postman](https://learning.postman.com/docs/sending-requests/generate-code-snippets) converts an API request into a basic client code snippet in almost any programming language or framework you choose. You can use this generated code snippet in your front-end applications. + +The following small JavaScript code example shows how you could use your SAS token with the JavaScript [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API/Using_Fetch#supplying_request_options) to get and return Azure Maps information. The example uses the Azure Maps [Get Search Address](/rest/api/maps/search/get-search-address) API version 1.0. Supply your own value for ``. + +For this sample to work, make sure to run it from within the same origin as the `allowedOrigins` for the API call. For example, if you provide `https://contoso.com` as the `allowedOrigins` in the API call, the HTML page that hosts the JavaScript script should be `https://contoso.com`. + +```javascript +async function getData(url = 'https://us.atlas.microsoft.com/search/address/json?api-version=1.0&query=1 Microsoft Way, Redmond, WA 98052') { + const response = await fetch(url, { + method: 'GET', + mode: 'cors', + headers: { + 'Content-Type': 'application/json', + 'Authorization': 'jwt-sas ', + } + }); + return response.json(); // parses JSON response into native JavaScript objects +} + +postData('https://us.atlas.microsoft.com/search/address/json?api-version=1.0&query=1 Microsoft Way, Redmond, WA 98052') + .then(data => { + console.log(data); // JSON data parsed by `data.json()` call + }); ``` ## Clean up resources @@ -406,7 +442,11 @@ az group delete --name {group-name} ## Next steps -For more detailed examples: +Deploy a quickstart ARM template to create an Azure Maps account that uses a SAS token: +> [!div class="nextstepaction"] +> [Create an Azure Maps account](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.maps/maps-use-sas) + +For more detailed examples, see: > [!div class="nextstepaction"] > [Authentication scenarios for Azure AD](../active-directory/develop/authentication-vs-authorization.md) diff --git a/articles/azure-maps/toc.yml b/articles/azure-maps/toc.yml index 87246f1ddc590..20e19850a2585 100644 --- a/articles/azure-maps/toc.yml +++ b/articles/azure-maps/toc.yml @@ -131,7 +131,7 @@ items: href: how-to-secure-spa-users.md - name: How to secure web application href: how-to-secure-webapp-users.md - - name: How to secure sas application + - name: How to secure account with a SAS token href: how-to-secure-sas-app.md - name: How to secure non-interactive sign-in single page application href: how-to-secure-spa-app.md diff --git a/articles/azure-monitor/agents/agents-overview.md b/articles/azure-monitor/agents/agents-overview.md index 0fbee64f5255d..51c461bed0c1c 100644 --- a/articles/azure-monitor/agents/agents-overview.md +++ b/articles/azure-monitor/agents/agents-overview.md @@ -6,7 +6,7 @@ services: azure-monitor ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 05/11/2022 +ms.date: 05/24/2022 --- # Overview of Azure Monitor agents @@ -148,6 +148,7 @@ The following tables list the operating systems that are supported by the Azure | Operating system | Azure Monitor agent | Log Analytics agent | Dependency agent | Diagnostics extension | |:---|:---:|:---:|:---:|:---:| | Windows Server 2022 | X | | | | +| Windows Server 2022 Core | X | | | | | Windows Server 2019 | X | X | X | X | | Windows Server 2019 Core | X | | | | | Windows Server 2016 | X | X | X | X | @@ -168,8 +169,13 @@ The following tables list the operating systems that are supported by the Azure 2 Using the Azure Monitor agent [client installer (preview)](./azure-monitor-agent-windows-client.md) ### Linux +> [!NOTE] +> For Dependency Agent, please additionally check for supported kernel versions. See "Dependency agent Linux kernel support" table below for details + + | Operating system | Azure Monitor agent 1 | Log Analytics agent 1 | Dependency agent | Diagnostics extension 2| |:---|:---:|:---:|:---:|:---: +| AlmaLinux | X | | | | | Amazon Linux 2017.09 | | X | | | | Amazon Linux 2 | | X | | | | CentOS Linux 8 | X 3 | X | X | | @@ -190,13 +196,15 @@ The following tables list the operating systems that are supported by the Azure | Red Hat Enterprise Linux Server 7 | X | X | X | X | | Red Hat Enterprise Linux Server 6 | | X | X | | | Red Hat Enterprise Linux Server 6.7+ | | X | X | X | +| Rocky Linux | X | | | | | SUSE Linux Enterprise Server 15.2 | X 3 | | | | | SUSE Linux Enterprise Server 15.1 | X 3 | X | | | | SUSE Linux Enterprise Server 15 SP1 | X | X | X | | | SUSE Linux Enterprise Server 15 | X | X | X | | | SUSE Linux Enterprise Server 12 SP5 | X | X | X | X | | SUSE Linux Enterprise Server 12 | X | X | X | X | -| Ubuntu 20.04 LTS | X | X | X | X | +| Ubuntu 22.04 LTS | X | | | | +| Ubuntu 20.04 LTS | X | X | X | X 4 | | Ubuntu 18.04 LTS | X | X | X | X | | Ubuntu 16.04 LTS | X | X | X | X | | Ubuntu 14.04 LTS | | X | | X | @@ -205,6 +213,8 @@ The following tables list the operating systems that are supported by the Azure 3 Known issue collecting Syslog events in versions prior to 1.9.0. +4 Not all kernel versions are supported, check supported kernel versions below. + #### Dependency agent Linux kernel support Since the Dependency agent works at the kernel level, support is also dependent on the kernel version. As of Dependency agent version 9.10.* the agent supports * kernels. The following table lists the major and minor Linux OS release and supported kernel versions for the Dependency agent. diff --git a/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md b/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md index 8a299fb0a29d3..cae3eefd3775a 100644 --- a/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md +++ b/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md @@ -4,7 +4,7 @@ description: This article describes the version details for the Azure Monitor ag ms.topic: conceptual author: shseth ms.author: shseth -ms.date: 5/19/2022 +ms.date: 6/6/2022 ms.custom: references_region --- @@ -18,7 +18,8 @@ We strongly recommended to update to the latest version at all times, or opt in ## Version details | Release Date | Release notes | Windows | Linux | |:---|:---|:---|:---| -| April 2022 |
                  • Private IP information added in Log Analytics Heartbeat table for Windows
                  • Fixed bugs in Windows IIS log collection (preview)
                    • Updated IIS site column name to match backend KQL transform
                    • Added delay to IIS upload task to account for IIS buffering
                  | 1.4.1.0Hotfix | Coming soon | +| May 2022 |
                  • Fixed issue where agent stops functioning due to faulty XPath query. With this version, only query related Windows events will fail, other data types will continue to be collected
                  • Collection of Windows network troubleshooting logs added to 'CollectAMAlogs.ps1' tool
                  | 1.5.0.0 | Coming soon | +| April 2022 |
                  • Private IP information added in Log Analytics Heartbeat table for Windows and Linux
                  • Fixed bugs in Windows IIS log collection (preview)
                    • Updated IIS site column name to match backend KQL transform
                    • Added delay to IIS upload task to account for IIS buffering
                  • Fixed Linux CEF syslog forwarding for Sentinel
                  • Removed 'error' message for Azure MSI token retrieval failure on Arc to show as 'Info' instead
                  • Support added for Ubuntu 22.04, AlmaLinux and RockyLinux distros
                  | 1.4.1.0Hotfix | 1.19.3 | | March 2022 |
                  • Fixed timestamp and XML format bugs in Windows Event logs
                  • Full Windows OS information in Log Analytics Heartbeat table
                  • Fixed Linux performance counters to collect instance values instead of 'total' only
                  | 1.3.0.0 | 1.17.5.0 | | February 2022 |
                  • Bugfixes for the AMA Client installer (private preview)
                  • Versioning fix to reflect appropriate Windows major/minor/hotfix versions
                  • Internal test improvement on Linux
                  | 1.2.0.0 | 1.15.3 | | January 2022 |
                  • Syslog RFC compliance for Linux
                  • Fixed issue for Linux perf counters not flowing on restart
                  • Fixed installation failure on Windows Server 2008 R2 SP1
                  | 1.1.5.1Hotfix | 1.15.2.0Hotfix | diff --git a/articles/azure-monitor/agents/azure-monitor-agent-manage.md b/articles/azure-monitor/agents/azure-monitor-agent-manage.md index 58fa8bccf12f6..80ee3e6e8c688 100644 --- a/articles/azure-monitor/agents/azure-monitor-agent-manage.md +++ b/articles/azure-monitor/agents/azure-monitor-agent-manage.md @@ -163,7 +163,7 @@ Update-AzConnectedExtension -ResourceGroupName $env.ResourceGroupName -MachineNa ``` --- -The **recommendation** is to enable automatic update of the agent by enabling the [Automatic Extension Upgrade (preview)](../../azure-arc/servers/manage-automatic-vm-extension-upgrade.md#enabling-automatic-extension-upgrade-preview) feature, using the following PowerShell commands. +The **recommendation** is to enable automatic update of the agent by enabling the [Automatic Extension Upgrade (preview)](../../azure-arc/servers/manage-automatic-vm-extension-upgrade.md#enable-automatic-extension-upgrade) feature, using the following PowerShell commands. # [Windows](#tab/PowerShellWindowsArc) ```powershell Update-AzConnectedMachineExtension -ResourceGroup -MachineName -Name AMAWindows -EnableAutomaticUpgrade @@ -254,7 +254,7 @@ az connectedmachine upgrade-extension --extension-targets "{\"Microsoft.Azure.Mo ``` --- -The **recommendation** is to enable automatic update of the agent by enabling the [Automatic Extension Upgrade (preview)](../../azure-arc/servers/manage-automatic-vm-extension-upgrade.md#enabling-automatic-extension-upgrade-preview) feature, using the following PowerShell commands. +The **recommendation** is to enable automatic update of the agent by enabling the [Automatic Extension Upgrade (preview)](../../azure-arc/servers/manage-automatic-vm-extension-upgrade.md#enable-automatic-extension-upgrade) feature, using the following PowerShell commands. # [Windows](#tab/CLIWindowsArc) ```azurecli az connectedmachine extension update --name AzureMonitorWindowsAgent --machine-name --resource-group --enable-auto-upgrade true diff --git a/articles/azure-monitor/agents/azure-monitor-agent-overview.md b/articles/azure-monitor/agents/azure-monitor-agent-overview.md index 1359c61ad0f31..0db75380d3c7a 100644 --- a/articles/azure-monitor/agents/azure-monitor-agent-overview.md +++ b/articles/azure-monitor/agents/azure-monitor-agent-overview.md @@ -70,7 +70,7 @@ The Azure Monitor agent can coexist (run side by side on the same machine) with | Resource type | Installation method | Additional information | |:---|:---|:---| | Virtual machines, scale sets | [Virtual machine extension](./azure-monitor-agent-manage.md#virtual-machine-extension-details) | Installs the agent using Azure extension framework | -| On-premise servers (Arc-enabled servers) | [Virtual machine extension](./azure-monitor-agent-manage.md#virtual-machine-extension-details) (after installing [Arc agent](/azure/azure-arc/servers/deployment-options)) | Installs the agent using Azure extension framework, provided for on-premise by first installing [Arc agent](/azure/azure-arc/servers/deployment-options) | +| On-premise servers (Arc-enabled servers) | [Virtual machine extension](./azure-monitor-agent-manage.md#virtual-machine-extension-details) (after installing [Arc agent](../../azure-arc/servers/deployment-options.md)) | Installs the agent using Azure extension framework, provided for on-premise by first installing [Arc agent](../../azure-arc/servers/deployment-options.md) | | Windows 10, 11 desktops, workstations | [Client installer (preview)](./azure-monitor-agent-windows-client.md) | Installs the agent using a Windows MSI installer | | Windows 10, 11 laptops | [Client installer (preview)](./azure-monitor-agent-windows-client.md) | Installs the agent using a Windows MSI installer. The installs works on laptops but the agent is **not optimized yet** for battery, network consumption | @@ -211,4 +211,4 @@ To configure the agent to use private links for network communications with Azur ## Next steps - [Install the Azure Monitor agent](azure-monitor-agent-manage.md) on Windows and Linux virtual machines. -- [Create a data collection rule](data-collection-rule-azure-monitor-agent.md) to collect data from the agent and send it to Azure Monitor. +- [Create a data collection rule](data-collection-rule-azure-monitor-agent.md) to collect data from the agent and send it to Azure Monitor. \ No newline at end of file diff --git a/articles/azure-monitor/agents/data-collection-text-log.md b/articles/azure-monitor/agents/data-collection-text-log.md index 4b05ac45f6216..4b0b88ed86ccd 100644 --- a/articles/azure-monitor/agents/data-collection-text-log.md +++ b/articles/azure-monitor/agents/data-collection-text-log.md @@ -15,7 +15,7 @@ This article describes how to configure the collection of file-based text logs, ## Prerequisites To complete this procedure, you need the following: -- Log Analytics workspace where you have at least [contributor rights](../logs/manage-access.md#manage-access-using-azure-permissions) . +- Log Analytics workspace where you have at least [contributor rights](../logs/manage-access.md#azure-rbac) . - [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. - An agent with supported log file as described in the next section. @@ -356,7 +356,7 @@ The [data collection rule (DCR)](../essentials/data-collection-rule-overview.md) "Microsoft-W3CIISLog" ], "logDirectories": [ - "C:\\inetpub\\logs\\LogFiles\\*.log" + "C:\\inetpub\\logs\\LogFiles\\" ], "name": "myIisLogsDataSource" } diff --git a/articles/azure-monitor/agents/diagnostics-extension-overview.md b/articles/azure-monitor/agents/diagnostics-extension-overview.md index 0e79885245f07..1322e17fe2f2d 100644 --- a/articles/azure-monitor/agents/diagnostics-extension-overview.md +++ b/articles/azure-monitor/agents/diagnostics-extension-overview.md @@ -3,6 +3,7 @@ title: Azure Diagnostics extension overview description: Use Azure diagnostics for debugging, measuring performance, monitoring, traffic analysis in cloud services, virtual machines and service fabric ms.topic: conceptual ms.date: 04/06/2022 +ms.reviwer: dalek --- diff --git a/articles/azure-monitor/agents/om-agents.md b/articles/azure-monitor/agents/om-agents.md index 8c8e584fff456..36ddf23084594 100644 --- a/articles/azure-monitor/agents/om-agents.md +++ b/articles/azure-monitor/agents/om-agents.md @@ -31,8 +31,8 @@ Before starting, review the following requirements. * Azure Monitor only supports System Center Operations Manager 2016 or later, Operations Manager 2012 SP1 UR6 or later, and Operations Manager 2012 R2 UR2 or later. Proxy support was added in Operations Manager 2012 SP1 UR7 and Operations Manager 2012 R2 UR3. * Integrating System Center Operations Manager 2016 with US Government cloud requires an updated Advisor management pack included with Update Rollup 2 or later. System Center Operations Manager 2012 R2 requires an updated Advisor management pack included with Update Rollup 3 or later. * All Operations Manager agents must meet minimum support requirements. Ensure that agents are at the minimum update, otherwise Windows agent communication may fail and generate errors in the Operations Manager event log. -* A Log Analytics workspace. For further information, review [Log Analytics workspace overview](../logs/design-logs-deployment.md). -* You authenticate to Azure with an account that is a member of the [Log Analytics Contributor role](../logs/manage-access.md#manage-access-using-azure-permissions). +* A Log Analytics workspace. For further information, review [Log Analytics workspace overview](../logs/workspace-design.md). +* You authenticate to Azure with an account that is a member of the [Log Analytics Contributor role](../logs/manage-access.md#azure-rbac). * Supported Regions - Only the following Azure regions are supported by System Center Operations Manager to connect to a Log Analytics workspace: - West Central US diff --git a/articles/azure-monitor/alerts/action-groups.md b/articles/azure-monitor/alerts/action-groups.md index 09104469f9485..7cf2b150a63e2 100644 --- a/articles/azure-monitor/alerts/action-groups.md +++ b/articles/azure-monitor/alerts/action-groups.md @@ -3,7 +3,7 @@ title: Create and manage action groups in the Azure portal description: Learn how to create and manage action groups in the Azure portal. author: dkamstra ms.topic: conceptual -ms.date: 2/23/2022 +ms.date: 6/2/2022 ms.author: dukek ms.custom: references_regions --- @@ -95,10 +95,11 @@ Under **Instance details**: > [!NOTE] > When you configure an action to notify a person by email or SMS, they receive a confirmation indicating they have been added to the action group. + ### Test an action group in the Azure portal (Preview) When creating or updating an action group in the Azure portal, you can **test** the action group. -1. After creating an action rule, click on **Review + create**. Select *Test action group*. +1. After defining an action, click on **Review + create**. Select *Test action group*. ![The Test Action Group](./media/action-groups/test-action-group.png) @@ -118,11 +119,23 @@ To allow you to check the action groups are working as expected before you enabl All the details and links in Test email notifications for the alerts fired are a sample set for reference. +#### Azure Resource Manager role membership requirements +The following table describes the role membership requirements to use the *test actions* functionality + +| User's role membersip | Existing Action Group | Existing Resource Group and new Action Group | New Resource Group and new Action Group | +| ---------- | ------------- | ----------- | ------------- | +| Subscription Contribuutor | Supported | Supported | Supported | +| Resource Group Contributor | Supported | Supported | Not Applicable | +| Action Group resource Contributor | Supported | Not Applicable | Not Applicable | +| Azure Monitor Contributor | Supported | Supported | Not Applicable | +| Custom role | Supported | Supported | Not Applicable | + + > [!NOTE] -> You may have a limited number of actions in a test Action Group. See the [rate limiting information](./alerts-rate-limiting.md) article. +> You may perform a limited number of tests over a time period. See the [rate limiting information](./alerts-rate-limiting.md) article. > > You can opt in or opt out to the common alert schema through Action Groups, on the portal. You can [find common schema samples for test action groups for all the sample types](./alerts-common-schema-test-action-definitions.md). -> You can opt in or opt out to the non-common alert schema through Action Groups, on the portal. You can [find non-common schema alert definitions](./alerts-non-common-schema-definitions.md). +> You can [find non-common schema alert definitions](./alerts-non-common-schema-definitions.md). ## Manage your action groups diff --git a/articles/azure-monitor/alerts/activity-log-alerts.md b/articles/azure-monitor/alerts/activity-log-alerts.md deleted file mode 100644 index f098fe5028865..0000000000000 --- a/articles/azure-monitor/alerts/activity-log-alerts.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Activity log alerts in Azure Monitor -description: Be notified via SMS, webhook, SMS, email and more, when certain events occur in the activity log. -ms.topic: conceptual -ms.date: 04/04/2022 - ---- - -# Alerts on activity log - -## Overview - -Activity log alerts allow you to be notified on events and operations that are logged in [Azure Activity Log](../essentials/activity-log.md). An alert is fired when a new [activity log event](../essentials/activity-log-schema.md) occurs that matches the conditions specified in the alert rule. - -Activity log alert rules are Azure resources, so they can be created by using an Azure Resource Manager template. They also can be created, updated, or deleted in the Azure portal. This article introduces the concepts behind activity log alerts. For more information on creating or usage of activity log alert rules, see [Create and manage activity log alerts](./alerts-activity-log.md). - -## Alerting on activity log event categories - -You can create activity log alert rules to receive notifications on one of the following activity log event categories: - -| Event Category | Category Description | Example | -|----------------|-------------|---------| -| Administrative | ARM operation (e.g. create, update, delete, or action) was performed on resources in your subscription, resource group, or on a specific Azure resource.| A virtual machine in your resource group is deleted | -| Service health | Service incidents (e.g. an outage or a maintenance event) occurred that may impact services in your subscription on a specific region.| An outage impacting VMs in your subscription in East US. | -| Resource health | The health of a specific resource is degraded, or the resource becomes unavailable. | A VM in your subscription transitions to a degraded or unavailable state. | -| Autoscale | An Azure Autoscale operation has occurred, resulting in success or failure | An autoscale action on a virtual machine scale set in your subscription failed. | -| Recommendation | A new Azure Advisor recommendation is available for your subscription | A high-impact recommendation for your subscription was received. | -| Security | Events detected by Microsoft Defender for Cloud | A suspicious double extension file executed was detected in your subscription | -| Policy | Operations performed by Azure Policy | Policy Deny event occurred in your subscription. | - -> [!NOTE] -> Alert rules **cannot** be created for events in Alert category of activity log. - - -## Configuring activity log alert rules - -You can configure an activity log alert rule based on any top-level property in the JSON object for an activity log event. For more information, see [Categories in the Activity Log](../essentials/activity-log.md#view-the-activity-log). - -An alternative simple way for creating conditions for activity log alert rules is to explore or filter events via [Activity log in Azure portal](../essentials/activity-log.md#view-the-activity-log). In Azure Monitor - Activity log, one can filter and locate a required event and then create an alert rule to notify on similar events by using the **New alert rule** button. - -> [!NOTE] -> An activity log alert rule monitors only for events in the subscription in which the alert rule is created. - -Activity log events have a few common properties which can be used to define an activity log alert rule condition: - -- **Category**: Administrative, Service Health, Resource Health, Autoscale, Security, Policy, or Recommendation. -- **Scope**: The individual resource or set of resource(s) for which the alert on activity log is defined. Scope for an activity log alert can be defined at various levels: - - Resource Level: For example, for a specific virtual machine - - Resource Group Level: For example, all virtual machines in a specific resource group - - Subscription Level: For example, all virtual machines in a subscription (or) all resources in a subscription -- **Resource group**: By default, the alert rule is saved in the same resource group as that of the target defined in Scope. The user can also define the Resource Group where the alert rule should be stored. -- **Resource type**: Resource Manager defined namespace for the target of the alert rule. -- **Operation name**: The [Azure resource provider operation](../../role-based-access-control/resource-provider-operations.md) name utilized for Azure role-based access control. Operations not registered with Azure Resource Manager cannot be used in an activity log alert rule. -- **Level**: The severity level of the event (Informational, Warning, Error, or Critical). -- **Status**: The status of the event, typically Started, Failed, or Succeeded. -- **Event initiated by**: Also known as the "caller." The email address or Azure Active Directory identifier of the user (or application) who performed the operation. - -In addition to these comment properties, different activity log events have category-specific properties that can be used to configure an alert rule for events of each category. For example, when creating a service health alert rule you can configure a condition on the impacted region or service that appear in the event. - -## Using action groups - -When an activity log alert is fired, it uses an action group to trigger actions or send notifications. An action group is a reusable set of notification receivers, such as email addresses, webhook URLs, or SMS phone numbers. The receivers can be referenced from multiple alerts rules to centralize and group your notification channels. When you define your activity log alert rule, you have two options. You can: - -* Use an existing action group in your activity log alert rule. -* Create a new action group. - -To learn more about action groups, see [Create and manage action groups in the Azure portal](./action-groups.md). - -## Activity log alert rules limit -You can create up to 100 active activity log alert rules per subscription (including rules for all activity log event categories, such as resource health or service health). This limit can't be increased. -If you are reaching near this limit, there are several guidelines you can follow to optimize the use of activity log alerts rules, so that you can cover more resources and events with the same number of rules: -* A single activity log alert rule can be configured to cover the scope of a single resource, a resource group, or an entire subscription. To reduce the number of rules you're using, consider to replace multiple rules covering a narrow scope with a single rule covering a broad scope. For example, if you have multiple VMs in a subscription, and you want an alert to be triggered whenever one of them is restarted, you can use a single activity log alert rule to cover all the VMs in your subscription. The alert will be triggered whenever any VM in the subscription is restarted. -* A single service health alert rule can cover all the services and Azure regions used by your subscription. If you're using multiple service health alert rules per subscription, you can replace them with a single rule (or with a small number of rules, if you prefer). -* A single resource health alert rule can cover multiple resource types and resources in your subscription. If you're using multiple resource health alert rules per subscription, you can replace them with a smaller number of rules (or even a single rule) that covers multiple resource types. - - -## Next steps - -- Get an [overview of alerts](./alerts-overview.md). -- Learn about [create and modify activity log alerts](alerts-activity-log.md). -- Review the [activity log alert webhook schema](../alerts/activity-log-alerts-webhook.md). -- Learn more about [service health alerts](../../service-health/service-notifications.md). -- Learn more about [Resource health alerts](../../service-health/resource-health-alert-monitor-guide.md). -- Learn more about [Recommendation alerts](../../advisor/advisor-alerts-portal.md). diff --git a/articles/azure-monitor/alerts/alerts-common-schema-definitions.md b/articles/azure-monitor/alerts/alerts-common-schema-definitions.md index 9eb048e9bcc1f..859ce511e33c6 100644 --- a/articles/azure-monitor/alerts/alerts-common-schema-definitions.md +++ b/articles/azure-monitor/alerts/alerts-common-schema-definitions.md @@ -11,7 +11,7 @@ ms.date: 07/20/2021 This article describes the [common alert schema definitions](./alerts-common-schema.md) for Azure Monitor, including those for webhooks, Azure Logic Apps, Azure Functions, and Azure Automation runbooks. Any alert instance describes the resource that was affected and the cause of the alert. These instances are described in the common schema in the following sections: -* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). Definitions of severity can be found in the [alerts overview](alerts-overview.md#overview). +* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). * **Alert context**: A set of fields that describes the cause of the alert, with fields that vary based on the alert type. For example, a metric alert includes fields like the metric name and metric value in the alert context, whereas an activity log alert has information about the event that generated the alert. **Sample alert payload** diff --git a/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md b/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md index dda4f50b28c7f..63d2f509898d5 100644 --- a/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md +++ b/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md @@ -11,7 +11,7 @@ ms.date: 01/14/2022 This article describes the [common alert schema definitions](./alerts-common-schema.md) for Azure Monitor, including those for webhooks, Azure Logic Apps, Azure Functions, and Azure Automation runbooks. Any alert instance describes the resource that was affected and the cause of the alert. These instances are described in the common schema in the following sections: -* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). Definitions of severity can be found in the [alerts overview](alerts-overview.md#overview). +* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). * **Alert context**: A set of fields that describes the cause of the alert, with fields that vary based on the alert type. For example, a metric alert includes fields like the metric name and metric value in the alert context, whereas an activity log alert has information about the event that generated the alert. **Sample alert payload** diff --git a/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md b/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md index 5e06e97c88e37..e10db5b7626d6 100644 --- a/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md +++ b/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md @@ -73,7 +73,7 @@ To trigger an alert when there was a violation from a Dynamic Thresholds in 20 m ## How do you find out why a Dynamic Thresholds alert was triggered? -You can explore triggered alert instances in the alerts view either by clicking on the link in the email or text message, or browser to see the alerts view in the Azure portal. [Learn more about the alerts view](./alerts-overview.md#alerts-experience). +You can explore triggered alert instances by clicking on the link in the email or text message, or browse to see the alerts in the Azure portal. [Learn more about the alerts view](./alerts-page.md). The alert view displays: diff --git a/articles/azure-monitor/alerts/alerts-log.md b/articles/azure-monitor/alerts/alerts-log.md index 07fef8a10a4c0..c07a1248b5794 100644 --- a/articles/azure-monitor/alerts/alerts-log.md +++ b/articles/azure-monitor/alerts/alerts-log.md @@ -1,38 +1,30 @@ --- -title: Create, view, and manage log alert rules Using Azure Monitor | Microsoft Docs -description: Use Azure Monitor to create, view, and manage log alert rules +title: Create Azure Monitor log alert rules and manage alert instances | Microsoft Docs +description: Create Azure Monitor log alert rules and manage your alert instances. author: AbbyMSFT ms.author: abbyweisberg ms.topic: conceptual -ms.date: 2/23/2022 +ms.date: 05/23/2022 ms.custom: devx-track-azurepowershell, devx-track-azurecli +ms.reviewer: yanivlavi --- -# Create, view, and manage log alerts using Azure Monitor +# Create Azure Monitor log alert rules and manage alert instances -This article shows you how to create and manage log alerts. Azure Monitor log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resource logs at a set frequency and fire an alert based on the results. Rules can trigger one or more actions using [Action Groups](./action-groups.md). [Learn more about functionality and terminology of log alerts](./alerts-unified-log.md). +This article shows you how to create log alert rules and manage your alert instances. Azure Monitor log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resource logs at a set frequency and fire an alert based on the results. Rules can trigger one or more actions using [alert processing rules](alerts-action-rules.md) and [action groups](./action-groups.md). Learn the concepts behind log alerts [here](alerts-types.md#log-alerts). + +You create an alert rule by combining: + - The resource(s) to be monitored. + - The signal or telemetry from the resource + - Conditions + +And then defining these elements of the triggered alert: + - Alert processing rules + - Action groups - Alert rules are defined by three components: -- Target: A specific Azure resource to monitor. -- Criteria: Logic to evaluate. If met, the alert fires. -- Action: Notifications or automation - email, SMS, webhook, and so on. You can also [create log alert rules using Azure Resource Manager templates](../alerts/alerts-log-create-templates.md). ## Create a new log alert rule in the Azure portal -> [!NOTE] -> This article describes creating alert rules using the new alert rule wizard. -> The new alert rule experience is a little different than the old experience. Please note these changes: -> - Previously, search results were included in the payloads of the triggered alert and its associated notifications. This was a limited and error prone solution. To get detailed context information about the alert so that you can decide on the appropriate action : -> - The recommended best practice it to use [Dimensions](alerts-unified-log.md#split-by-alert-dimensions). Dimensions provide the column value that fired the alert, giving you context for why the alert fired and how to fix the issue. -> - When you need to investigate in the logs, use the link in the alert to the search results in Logs. -> - If you need the raw search results or for any other advanced customizations, use Logic Apps. -> - The new alert rule wizard does not support customization of the JSON payload. -> - Use custom properties in the [new API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules/create-or-update#actions) to add static parameters and associated values to the webhook actions triggered by the alert. -> - For more advanced customizations, use Logic Apps. -> - The new alert rule wizard does not support customization of the email subject. -> - Customers often use the custom email subject to indicate the resource on which the alert fired, instead of using the Log Analytics workspace. Use the [new API](alerts-unified-log.md#split-by-alert-dimensions) to trigger an alert of the desired resource using the resource id column. -> - For more advanced customizations, use Logic Apps. - -1. In the [portal](https://portal.azure.com/), select the relevant resource. We recommend monitoring at scale by using a subscription or resource group for the alert rule. +1. In the [portal](https://portal.azure.com/), select the relevant resource. We recommend monitoring at scale by using a subscription or resource group. 1. In the Resource menu, select **Logs**. 1. Write a query that will find the log events for which you want to create an alert. You can use the [alert query examples article](../logs/queries.md) to understand what you can discover or [get started on writing your own query](../logs/log-analytics-tutorial.md). Also, [learn how to create optimized alert queries](alerts-log-query.md). 1. From the top command bar, Select **+ New Alert rule**. @@ -40,31 +32,66 @@ You can also [create log alert rules using Azure Resource Manager templates](../ :::image type="content" source="media/alerts-log/alerts-create-new-alert-rule.png" alt-text="Create new alert rule." lightbox="media/alerts-log/alerts-create-new-alert-rule-expanded.png"::: 1. The **Condition** tab opens, populated with your log query. + + By default, the rule counts the number of results in the last 5 minutes. + + If the system detects summarized query results, the rule is automatically updated with that information. :::image type="content" source="media/alerts-log/alerts-logs-conditions-tab.png" alt-text="Conditions Tab."::: -1. In the **Measurement** section, select values for the [**Measure**](./alerts-unified-log.md#measure), [**Aggregation type**](./alerts-unified-log.md#aggregation-type), and [**Aggregation granularity**](./alerts-unified-log.md#aggregation-granularity) fields. - - By default, the rule counts the number of results in the last 5 minutes. - - If the system detects summarized query results, the rule is automatically updated to capture that. - +1. In the **Measurement** section, select values for these fields: + + |Field |Description | + |---------|---------| + |Measure|Log alerts can measure two different things, which can be used for different monitoring scenarios:
                  **Table rows**: The number of rows returned can be used to work with events such as Windows event logs, syslog, application exceptions.
                  **Calculation of a numeric column**: Calculations based on any numeric column can be used to include any number of resources. For example, CPU percentage. | + |Aggregation type| The calculation performed on multiple records to aggregate them to one numeric value using the aggregation granularity. For example: Total, Average, Minimum, or Maximum. | + |Aggregation granularity| The interval for aggregating multiple records to one numeric value.| + :::image type="content" source="media/alerts-log/alerts-log-measurements.png" alt-text="Measurements."::: -1. (Optional) In the **Split by dimensions** section, select [alert splitting by dimensions](./alerts-unified-log.md#split-by-alert-dimensions): - - If detected, The **Resource ID column** is selected automatically and changes the context of the fired alert to the record's resource. - - Clear the **Resource ID column** to fire alerts on multiple resources in subscriptions or resource groups. For example, you can create a query that checks if 80% of the resource group's virtual machines are experiencing high CPU usage. - - You can use the dimensions table to select up to six more splittings for any number or text columns types. - - Alerts are fired individually for each unique splitting combination. The alert payload includes the combination that triggered the alert. -1. In the **Alert logic** section, set the **Alert logic**: [**Operator**, **Threshold Value**](./alerts-unified-log.md#threshold-and-operator), and [**Frequency**](./alerts-unified-log.md#frequency). +1. (Optional) In the **Split by dimensions** section, you can create resource-centric alerts at scale for a subscription or resource group. Splitting by dimensions groups combinations of numerical or string columns to monitor for the same condition on multiple Azure resources. + + If you select more than one dimension value, each time series that results from the combination triggers its own alert and is charged separately. The alert payload includes the combination that triggered the alert. + + You can select up to six more splittings for any number or text columns types. + + You can also decide **not** to split when you want a condition applied to multiple resources in the scope. For example, if you want to fire an alert if at least five machines in the resource group scope have CPU usage over 80%. + + Select values for these fields: - :::image type="content" source="media/alerts-log/alerts-rule-preview-agg-params-and-splitting.png" alt-text="Preview alert rule parameters."::: + |Field |Description | + |---------|---------| + |Dimension name|Dimensions can be either number or string columns. Dimensions are used to monitor specific time series and provide context to a fired alert.
                  Splitting on the Azure Resource ID column makes the specified resource into the alert target. If a Resource ID column is detected, it is selected automatically and changes the context of the fired alert to the record's resource. | + |Operator|The operator used on the dimension name and value. | + |Dimension values|The dimension values are based on data from the last 48 hours. Select **Add custom value** to add custom dimension values. | -1. (Optional) In the **Advanced options** section, set the [**Number of violations to trigger the alert**](./alerts-unified-log.md#number-of-violations-to-trigger-alert). + :::image type="content" source="media/alerts-log/alerts-create-log-rule-dimensions.png" alt-text="Screenshot of the splitting by dimensions section of a new log alert rule."::: - :::image type="content" source="media/alerts-log/alerts-rule-preview-advanced-options.png" alt-text="Advanced options."::: +1. In the **Alert logic** section, select values for these fields: + + |Field |Description | + |---------|---------| + |Operator| The query results are transformed into a number. In this field, select the operator to use to compare the number against the threshold.| + |Threshold value| A number value for the threshold. | + |Frequency of evaluation|The interval in which the query is run. Can be set from a minute to a day. | + + :::image type="content" source="media/alerts-log/alerts-create-log-rule-logic.png" alt-text="Screenshot of alert logic section of a new log alert rule."::: + +1. (Optional) In the **Advanced options** section, you can specify the number of failures and the alert evaluation period required to trigger an alert. For example, if you set the **Aggregation granularity** to 5 minutes, you can specify that you only want to trigger an alert if there were three failures (15 minutes) in the last hour. This setting is defined by your application business policy. + + Select values for these fields under **Number of violations to trigger the alert**: + + |Field |Description | + |---------|---------| + |Number of violations|The number of violations that have to occur to trigger the alert.| + |Evaluation period|The amount of time within which those violations have to occur. | + |Override query time range| Enter a value for this field if the alert evaluation period is different than the query time range.| + + :::image type="content" source="media/alerts-log/alerts-rule-preview-advanced-options.png" alt-text="Screenshot of the advanced options section of a new log alert rule."::: 1. The **Preview** chart shows query evaluations results over time. You can change the chart period or select different time series that resulted from unique alert splitting by dimensions. - :::image type="content" source="media/alerts-log/alerts-create-alert-rule-preview.png" alt-text="Alert rule preview."::: + :::image type="content" source="media/alerts-log/alerts-create-alert-rule-preview.png" alt-text="Screenshot of a preview of a new alert rule."::: 1. From this point on, you can select the **Review + create** button at any time. 1. In the **Actions** tab, select or create the required [action groups](./action-groups.md). @@ -72,12 +99,12 @@ You can also [create log alert rules using Azure Resource Manager templates](../ :::image type="content" source="media/alerts-log/alerts-rule-actions-tab.png" alt-text="Actions tab."::: 1. In the **Details** tab, define the **Project details** and the **Alert rule details**. -1. (Optional) In the **Advanced options** section, you can set several options, including whether to **Enable upon creation**, or to [**Mute actions**](./alerts-unified-log.md#state-and-resolving-alerts) for a period after the alert rule fires. +1. (Optional) In the **Advanced options** section, you can set several options, including whether to **Enable upon creation**, or to **Mute actions** for a period of time after the alert rule fires. :::image type="content" source="media/alerts-log/alerts-rule-details-tab.png" alt-text="Details tab."::: -> [!NOTE] -> If you, or your administrator assigned the Azure Policy **Azure Log Search Alerts over Log Analytics workspaces should use customer-managed keys**, you must select **Check workspace linked storage** option in **Advanced options**, or the rule creation will fail as it will not meet the policy requirements. + > [!NOTE] + > If you, or your administrator assigned the Azure Policy **Azure Log Search Alerts over Log Analytics workspaces should use customer-managed keys**, you must select **Check workspace linked storage** option in **Advanced options**, or the rule creation will fail as it will not meet the policy requirements. 1. In the **Tags** tab, set any required tags on the alert rule resource. @@ -88,6 +115,21 @@ You can also [create log alert rules using Azure Resource Manager templates](../ :::image type="content" source="media/alerts-log/alerts-rule-review-create.png" alt-text="Review and create tab."::: +> [!NOTE] +> This section above describes creating alert rules using the new alert rule wizard. +> The new alert rule experience is a little different than the old experience. Please note these changes: +> - Previously, search results were included in the payloads of the triggered alert and its associated notifications. This was a limited solution, since the email included only 10 rows from the unfiltered results while the webhook payload contained 1000 unfiltered results. +> To get detailed context information about the alert so that you can decide on the appropriate action : +> - We recommend using [Dimensions](alerts-unified-log.md#split-by-alert-dimensions). Dimensions provide the column value that fired the alert, giving you context for why the alert fired and how to fix the issue. +> - When you need to investigate in the logs, use the link in the alert to the search results in Logs. +> - If you need the raw search results or for any other advanced customizations, use Logic Apps. +> - The new alert rule wizard does not support customization of the JSON payload. +> - Use custom properties in the [new API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules/create-or-update#actions) to add static parameters and associated values to the webhook actions triggered by the alert. +> - For more advanced customizations, use Logic Apps. +> - The new alert rule wizard does not support customization of the email subject. +> - Customers often use the custom email subject to indicate the resource on which the alert fired, instead of using the Log Analytics workspace. Use the [new API](alerts-unified-log.md#split-by-alert-dimensions) to trigger an alert of the desired resource using the resource id column. +> - For more advanced customizations, use Logic Apps. + ## Enable recommended out-of-the-box alert rules in the Azure portal (preview) > [!NOTE] > The alert rule recommendations feature is currently in preview and is only enabled for VMs. @@ -165,7 +207,7 @@ az deployment group create \ On success for creation, 201 is returned. On success for update, 200 is returned. ## Next steps -* Learn about [log alerts](./alerts-unified-log.md). +* Learn about [Log alerts](alerts-types.md#log-alerts). * Create log alerts using [Azure Resource Manager Templates](./alerts-log-create-templates.md). * Understand [webhook actions for log alerts](./alerts-log-webhook.md). * Learn more about [log queries](../logs/log-query-overview.md). diff --git a/articles/azure-monitor/alerts/alerts-managing-alert-instances.md b/articles/azure-monitor/alerts/alerts-managing-alert-instances.md deleted file mode 100644 index 47d497157b30f..0000000000000 --- a/articles/azure-monitor/alerts/alerts-managing-alert-instances.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Manage alert instances in Azure Monitor -description: Managing alert instances across Azure -ms.topic: conceptual -ms.date: 2/23/2022 - ---- -# Manage alert instances with unified alerts - -With the [unified alerts experience](./alerts-overview.md) in Azure Monitor, you can see all your different types of alerts across Azure. Unified alerts span multiple subscriptions in a single pane. This article shows how you can view your alert instances, and how to find specific alert instances for troubleshooting. - -> [!NOTE] -> You can only access alerts generated in the last 30 days. - -## Go to the alerts page - -You can go to the alerts page in any of the following ways: - -- In the [Azure portal](https://portal.azure.com/), select **Monitor** > **Alerts**. - - ![Screenshot of Monitor Alerts](media/alerts-managing-alert-instances/monitoring-alerts-managing-alert-instances-toc.jpg) - -- Use the context of a specific resource. Open a resource, go to the **Monitoring** section, and choose **Alerts**. The landing page is pre-filtered for alerts on that specific resource. - - ![Screenshot of resource Monitoring Alerts](media/alerts-managing-alert-instances/alert-resource.JPG) - -## The alerts page - -The **Alerts** page summarizes all your alert instances across Azure. -### Alert Recommendations (preview) -> [!NOTE] -> The alert rule recommendations feature is currently in preview and is only enabled for VMs. - -If you don't have alert rules defined for the selected resource, either individually or as part of a resource group or subscription, you can [create a new alert rule](alerts-log.md#create-a-new-log-alert-rule-in-the-azure-portal), or [enable recommended out-of-the-box alert rules in the Azure portal (preview)](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). - -:::image type="content" source="media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg" alt-text="Screenshot of alerts page with link to recommended alert rules."::: -### Alerts summary pane -If you have alerts configured for this resource, the alerts summary pane summarizes the alerts fired in the last 24 hours. You can modify the list of alert instances by selecting filters such as **time range**, **subscription**, **alert condition**, **severity**, and more. Select an alert instance. - -To see more details about a specific alert instance, select the alerts instance to open the **Alert Details** page. -> [!NOTE] -> If you navigated to the alerts page by selecting a specific alert severity, the list is pre-filtered for that severity. - -:::image type="content" source="media/alerts-managing-alert-instances/alerts-page.png" alt-text="Screenshot of alerts page."::: - -## The alerts details page - The **Alerts details** page provides details about the selected alert. Select **Change user response** to change the user response to the alert. You can see all closed alerts in the **History** tab. - -:::image type="content" source="media/alerts-managing-alert-instances/alerts-details-page.png" alt-text="Screenshot of alerts details page."::: diff --git a/articles/azure-monitor/alerts/alerts-managing-alert-states.md b/articles/azure-monitor/alerts/alerts-managing-alert-states.md deleted file mode 100644 index 42a9669b5fa9a..0000000000000 --- a/articles/azure-monitor/alerts/alerts-managing-alert-states.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Manage alert and smart group states -description: Managing the states of the alert and smart group instances -ms.topic: conceptual -author: anantr -ms.date: 2/23/2022 - ---- - -# Manage alert and smart group states - -Alerts in Azure Monitor now have an [alert state and a monitor condition](./alerts-overview.md) and, similarly, Smart Groups have a [smart group state](./alerts-smartgroups-overview.md?toc=%2fazure%2fazure-monitor%2ftoc.json). Changes to the state are now captured in history associated with the respective alert or smart group. This article walks you through the process of changing the state, for both an alert and a smart group. - -## Change the state of an alert - -1. You can change the state of an alert in the following different ways: - * In the All Alerts page, click the checkbox next to the alerts you wish to change the state of, and click change state. - ![Screenshot shows the All Alerts page with Change state selected.](./media/alerts-managing-alert-states/state-all-alerts.jpg) - * In the Alert Details page for a particular alert instance, you can click change state - ![Screenshot shows the Alert Details page with Change alert state selected.](./media/alerts-managing-alert-states/state-alert-details.jpg) - * In the Alert Details page for a specific alert instance, in the Smart Group pane you can click the checkbox next to the alerts you wish - ![Screenshot shows the Alert Details page for the heartbeat alert with some instances having check marks.](./media/alerts-managing-alert-states/state-alert-details-sg.jpg) - - * In the Smart Group Details page, in the list of member alerts you can click the checkbox next to the alerts you wish to change the state of and click Change Stateto change the state of and click Change State. - ![Screenshot shows the Smart Group Details page where you can select alerts for which to change state.](./media/alerts-managing-alert-states/state-sg-details-alerts.jpg) -1. On clicking Change State, a popup opens up allowing you to select the state (New/Acknowledged/Closed) and enter a comment if necessary. -![Screenshot shows the Details Change alert dialog box.](./media/alerts-managing-alert-states/state-alert-change.jpg) -1. Once this is done, the state change is recorded in the history of the respective alert. This can be viewed by opening the respective Details page, and checking the history section. -![Screenshot shows the history of state changes.](./media/alerts-managing-alert-states/state-alert-history.jpg) - -## Change the state of a smart group -1. You can change the state of a smart group in the following different ways: - 1. In the Smart Group list page, you can click the checkbox next to the smart groups you wish to change the state of and click Change State - ![Screenshot shows the Change State page for Smart Groups.](./media/alerts-managing-alert-states/state-sg-list.jpg) - 1. In the Smart Group Details page, you can click change state - ![Screenshot shows the Smart Group Details page with Change smart group state selected.](./media/alerts-managing-alert-states/state-sg-details.jpg) -1. On clicking Change State, a popup opens up allowing you to select the state (New/Acknowledged/Closed) and enter a comment if necessary. -![Screenshot shows the Change state dialog box for the smart group.](./media/alerts-managing-alert-states/state-sg-change.jpg) - > [!NOTE] - > Changing the state of a smart group does not change the state of the individual member alerts. - -1. Once this is done, the state change is recorded in the history of the respective smart group. This can be viewed by opening the respective Details page, and checking the history section. -![Screenshot shows the history of changes for the smart group.](./media/alerts-managing-alert-states/state-sg-history.jpg) \ No newline at end of file diff --git a/articles/azure-monitor/alerts/alerts-managing-smart-groups.md b/articles/azure-monitor/alerts/alerts-managing-smart-groups.md deleted file mode 100644 index dbfa3ebc4d607..0000000000000 --- a/articles/azure-monitor/alerts/alerts-managing-smart-groups.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Manage smart groups (preview) -description: Managing Smart Groups created over your alert instances -ms.topic: conceptual -ms.date: 2/23/2022 - ---- - -# Manage smart groups (preview) - -[Smart groups (preview)](./alerts-smartgroups-overview.md?toc=%2fazure%2fazure-monitor%2ftoc.json) use machine learning algorithms to group together alerts on the basis of co-occurrence or similarity, so that the user can now manage smart groups instead of having to manage each alert individually. This article will walk you through how to access and use smart groups in Azure Monitor. - -1. To see the Smart Groups created for your alert instances you can either: - - 1. Click on **Smart Groups** from the **Alerts Summary** page. - ![Screenshot shows the Alert Summary page with Smart groups highlighted.](./media/alerts-managing-smart-groups/sg-alerts-summary.jpg) - - 1. Click on Alerts by Smart Groups from the All Alerts page. - ![Screenshot shows the All Alerts page with Alert by Smart Group highlighted.](./media/alerts-managing-smart-groups/sg-all-alerts.jpg) - -2. This takes you to the list view for all Smart Groups created over your alert instances. Instead of sifting through multiple alerts, you can now deal with the smart groups instead. -![Screenshot shows the All Alerts page.](./media/alerts-managing-smart-groups/sg-list.jpg) - -3. Clicking on any Smart Group opens up the details page, where you can see the grouping reason, along with the member alerts. This aggregation allows you to deal with a singular smart group, instead of sifting through multiple alerts. -![Screenshot shows the Details page.](./media/alerts-managing-smart-groups/sg-details.jpg) diff --git a/articles/azure-monitor/alerts/alerts-metric-near-real-time.md b/articles/azure-monitor/alerts/alerts-metric-near-real-time.md index 281bef030d425..a61ffe0a9408e 100644 --- a/articles/azure-monitor/alerts/alerts-metric-near-real-time.md +++ b/articles/azure-monitor/alerts/alerts-metric-near-real-time.md @@ -5,7 +5,7 @@ author: harelbr ms.author: harelbr services: monitoring ms.topic: conceptual -ms.date: 5/11/2022 +ms.date: 5/18/2022 --- # Supported resources for metric alerts in Azure Monitor @@ -109,7 +109,7 @@ Here's the full list of Azure Monitor metric sources supported by the newer aler |Microsoft.Search/searchServices | No | No | [Search services](../essentials/metrics-supported.md#microsoftsearchsearchservices) | |Microsoft.ServiceBus/namespaces | Yes | No | [Service Bus](../essentials/metrics-supported.md#microsoftservicebusnamespaces) | |Microsoft.SignalRService/WebPubSub | Yes | No | [Web PubSub Service](../essentials/metrics-supported.md#microsoftsignalrservicewebpubsub) | -|Microsoft.Sql/managedInstances | No | Yes | [SQL Managed Instances](../essentials/metrics-supported.md#microsoftsqlmanagedinstances) | +|Microsoft.Sql/managedInstances | No | No | [SQL Managed Instances](../essentials/metrics-supported.md#microsoftsqlmanagedinstances) | |Microsoft.Sql/servers/databases | No | Yes | [SQL Databases](../essentials/metrics-supported.md#microsoftsqlserversdatabases) | |Microsoft.Sql/servers/elasticPools | No | Yes | [SQL Elastic Pools](../essentials/metrics-supported.md#microsoftsqlserverselasticpools) | |Microsoft.Storage/storageAccounts |Yes | No | [Storage Accounts](../essentials/metrics-supported.md#microsoftstoragestorageaccounts)| diff --git a/articles/azure-monitor/alerts/alerts-metric-overview.md b/articles/azure-monitor/alerts/alerts-metric-overview.md deleted file mode 100644 index a6407880caa49..0000000000000 --- a/articles/azure-monitor/alerts/alerts-metric-overview.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Understand how metric alerts work in Azure Monitor. -description: Get an overview of what you can do with metric alerts and how they work in Azure Monitor. -ms.date: 10/14/2021 -ms.topic: conceptual - ---- - -# Understand how metric alerts work in Azure Monitor - -Metric alerts in Azure Monitor work on top of multi-dimensional metrics. These metrics could be [platform metrics](alerts-metric-near-real-time.md#metrics-and-dimensions-supported), [custom metrics](../essentials/metrics-custom-overview.md), [popular logs from Azure Monitor converted to metrics](./alerts-metric-logs.md) and Application Insights metrics. Metric alerts evaluate at regular intervals to check if conditions on one or more metric time-series are true and notify you when the evaluations are met. Metric alerts are stateful by default, that is, they only send out notifications when the state changes (fired, resolved). If you want to make them stateless, see [make metric alerts occur every time my condition is met](alerts-troubleshoot-metric.md#make-metric-alerts-occur-every-time-my-condition-is-met). - -## How do metric alerts work? - -You can define a metric alert rule by specifying a target resource to be monitored, metric name, condition type (static or dynamic), and the condition (an operator and a threshold/sensitivity) and an action group to be triggered when the alert rule fires. Condition types affect the way thresholds are determined. [Learn more about Dynamic Thresholds condition type and sensitivity options](../alerts/alerts-dynamic-thresholds.md). - -### Alert rule with static condition type - -Let's say you have created a simple static threshold metric alert rule as follows: - -- Target Resource (the Azure resource you want to monitor): myVM -- Metric: Percentage CPU -- Condition Type: Static -- Aggregation type (a statistic that is run over raw metric values. [Supported aggregation types](../essentials/metrics-aggregation-explained.md#aggregation-types) are Minimum, Maximum, Average, Total, Count): Average -- Period (the look back window over which metric values are checked): Over the last 5 mins -- Frequency (the frequency with which the metric alert checks if the conditions are met): 1 min -- Operator: Greater Than -- Threshold: 70 - -From the time the alert rule is created, the monitor runs every 1 min and looks at metric values for the last 5 minutes and checks if the average of those values exceeds 70. If the condition is met that is, the average Percentage CPU for the last 5 minutes exceeds 70, the alert rule fires an activated notification. If you have configured an email or a web hook action in the action group associated with the alert rule, you will receive an activated notification on both. - -When you are using multiple conditions in one rule, the rule "ands" the conditions together. That is, an alert fires when all the conditions in the alert rule evaluate as true and resolve when one of the conditions is no longer true. An example for this type of alert rule would be to monitor an Azure virtual machine and alert when both "Percentage CPU is higher than 90%" and "Queue length is over 300 items". - -### Alert rule with dynamic condition type - -Let's say you have created a simple Dynamic Thresholds metric alert rule as follows: - -- Target Resource (the Azure resource you want to monitor): myVM -- Metric: Percentage CPU -- Condition Type: Dynamic -- Aggregation Type (a statistic that is run over raw metric values. [Supported aggregation types](../essentials/metrics-aggregation-explained.md#aggregation-types) are Minimum, Maximum, Average, Total, Count): Average -- Period (the look back window over which metric values are checked): Over the last 5 mins -- Frequency (the frequency with which the metric alert checks if the conditions are met): 1 min -- Operator: Greater Than -- Sensitivity: Medium -- Look Back Periods: 4 -- Number of Violations: 4 - -Once the alert rule is created, the Dynamic Thresholds machine learning algorithm will acquire historical data that is available, calculate threshold that best fits the metric series behavior pattern and will continuously learn based on new data to make the threshold more accurate. - -From the time the alert rule is created, the monitor runs every 1 min and looks at metric values in the last 20 minutes grouped into 5 minutes periods and checks if the average of the period values in each of the 4 periods exceeds the expected threshold. If the condition is met that is, the average Percentage CPU in the last 20 minutes (four 5 minutes periods) deviated from expected behavior four times, the alert rule fires an activated notification. If you have configured an email or a web hook action in the action group associated with the alert rule, you will receive an activated notification on both. - -### View and resolution of fired alerts - -The above examples of alert rules firing can also be viewed in the Azure portal in the **All Alerts** blade. - -Say the usage on "myVM" continues being above the threshold in subsequent checks, the alert rule will not fire again until the conditions are resolved. - -After some time, the usage on "myVM" comes back down to normal (goes below the threshold). The alert rule monitors the condition for two more times, to send out a resolved notification. The alert rule sends out a resolved/deactivated message when the alert condition is not met for three consecutive periods to reduce noise in case of flapping conditions. - -As the resolved notification is sent out via web hooks or email, the status of the alert instance (called monitor state) in Azure portal is also set to resolved. - -> [!NOTE] -> -> When an alert rule monitors multiple conditions, a fired alert will be resolved if at least one of the conditions is no longer met for three consecutive periods. - -### Using dimensions - -Metric alerts in Azure Monitor also support monitoring multiple dimensions value combinations with one rule. Let's understand why you might use multiple dimension combinations with the help of an example. - -Say you have an App Service plan for your website. You want to monitor CPU usage on multiple instances running your web site/app. You can do that using a metric alert rule as follows: - -- Target resource: myAppServicePlan -- Metric: Percentage CPU -- Condition Type: Static -- Dimensions - - Instance = InstanceName1, InstanceName2 -- Aggregation Type: Average -- Period: Over the last 5 mins -- Frequency: 1 min -- Operator: GreaterThan -- Threshold: 70 - -Like before, this rule monitors if the average CPU usage for the last 5 minutes exceeds 70%. However, with the same rule you can monitor two instances running your website. Each instance will get monitored individually and you will get notifications individually. - -Say you have a web app that is seeing massive demand and you will need to add more instances. The above rule still monitors just two instances. However, you can create a rule as follows: - -- Target resource: myAppServicePlan -- Metric: Percentage CPU -- Condition Type: Static -- Dimensions - - Instance = * -- Aggregation Type: Average -- Period: Over the last 5 mins -- Frequency: 1 min -- Operator: GreaterThan -- Threshold: 70 - -This rule will automatically monitor all values for the instance i.e you can monitor your instances as they come up without needing to modify your metric alert rule again. - -When monitoring multiple dimensions, Dynamic Thresholds alerts rule can create tailored thresholds for hundreds of metric series at a time. Dynamic Thresholds results in fewer alert rules to manage and significant time saving on management and creation of alerts rules. - -Say you have a web app with many instances and you don't know what the most suitable threshold is. The above rules will always use threshold of 70%. However, you can create a rule as follows: - -- Target resource: myAppServicePlan -- Metric: Percentage CPU -- Condition Type: Dynamic -- Dimensions - - Instance = * -- Aggregation Type: Average -- Period: Over the last 5 mins -- Frequency: 1 min -- Operator: GreaterThan -- Sensitivity: Medium -- Look Back Periods: 1 -- Number of Violations: 1 - -This rule monitors if the average CPU usage for the last 5 minutes exceeds the expected behavior for each instance. The same rule you can monitor instances as they come up without needing to modify your metric alert rule again. Each instance will get a threshold that fits the metric series behavior pattern and will continuously change based on new data to make the threshold more accurate. Like before, each instance will be monitored individually and you will get notifications individually. - -Increasing look-back periods and number of violations can also allow filtering alerts to only alert on your definition of a significant deviation. [Learn more about Dynamic Thresholds advanced options](../alerts/alerts-dynamic-thresholds.md#what-do-the-advanced-settings-in-dynamic-thresholds-mean). - -> [!NOTE] -> -> We recommend choosing an *Aggregation granularity (Period)* that is larger than the *Frequency of evaluation*, to reduce the likelihood of missing the first evaluation of added time series in the following cases: -> - Metric alert rule that monitors multiple dimensions – When a new dimension value combination is added -> - Metric alert rule that monitors multiple resources – When a new resource is added to the scope -> - Metric alert rule that monitors a metric that isn’t emitted continuously (sparse metric) – When the metric is emitted after a period longer than 24 hours in which it wasn’t emitted - -## Monitoring at scale using metric alerts in Azure Monitor - -So far, you have seen how a single metric alert could be used to monitor one or many metric time-series related to a single Azure resource. Many times, you might want the same alert rule applied to many resources. Azure Monitor also supports monitoring multiple resources (of the same type) with one metric alert rule, for resources that exist in the same Azure region. - -This feature is currently supported for platform metrics (not custom metrics) for the following services in the following Azure clouds: - -| Service | Public Azure | Government | China | -|:--------|:--------|:--------|:--------| -| Virtual machines1 | **Yes** | **Yes** | **Yes** | -| SQL server databases | **Yes** | **Yes** | **Yes** | -| SQL server elastic pools | **Yes** | **Yes** | **Yes** | -| NetApp files capacity pools | **Yes** | **Yes** | **Yes** | -| NetApp files volumes | **Yes** | **Yes** | **Yes** | -| Key vaults | **Yes** | **Yes** | **Yes** | -| Azure Cache for Redis | **Yes** | **Yes** | **Yes** | -| Data box edge devices | **Yes** | **Yes** | **Yes** | -| Recovery Services vaults | **Yes** | **No** | **No** | - -1 Not supported for virtual machine network metrics (Network In Total, Network Out Total, Inbound Flows, Outbound Flows, Inbound Flows Maximum Creation Rate, Outbound Flows Maximum Creation Rate). - -You can specify the scope of monitoring by a single metric alert rule in one of three ways. For example, with virtual machines you can specify the scope as: - -- a list of virtual machines (in one Azure region) within a subscription -- all virtual machines (in one Azure region) in one or more resource groups in a subscription -- all virtual machines (in one Azure region) in a subscription - -> [!NOTE] -> -> The scope of a multi-resource metric alert rule must contain at least one resource of the selected resource type. - -Creating metric alert rules that monitor multiple resources is like [creating any other metric alert](../alerts/alerts-metric.md) that monitors a single resource. Only difference is that you would select all the resources you want to monitor. You can also create these rules through [Azure Resource Manager templates](./alerts-metric-create-templates.md#template-for-a-metric-alert-that-monitors-multiple-resources). You will receive individual notifications for each monitored resource. - -> [!NOTE] -> -> In a metric alert rule that monitors multiple resources, only one condition is allowed. - -## Typical latency - -For metric alerts, typically you will get notified in under 5 minutes if you set the alert rule frequency to be 1 min. In cases of heavy load for notification systems, you might see a longer latency. - -## Supported resource types for metric alerts - -You can find the full list of supported resource types in this [article](./alerts-metric-near-real-time.md#metrics-and-dimensions-supported). - -## Pricing model - -Each Metrics Alert rule is billed based for time series monitored. Prices for Metric Alert rules are available on the [Azure Monitor pricing page](https://azure.microsoft.com/pricing/details/monitor/). - -## Next steps - -- [Learn how to create, view, and manage metric alerts in Azure](../alerts/alerts-metric.md) -- [Learn how to create alerts within Azure Monitor Metrics Explorer](../essentials/metrics-charts.md#alert-rules) -- [Learn how to deploy metric alerts using Azure Resource Manager templates](./alerts-metric-create-templates.md) -- [Learn more about action groups](./action-groups.md) -- [Learn more about Dynamic Thresholds condition type](../alerts/alerts-dynamic-thresholds.md) -- [Learn more about troubleshooting problems in metric alerts](alerts-troubleshoot-metric.md) diff --git a/articles/azure-monitor/alerts/alerts-overview.md b/articles/azure-monitor/alerts/alerts-overview.md index 0b22f92374d61..06f37bcdfa88c 100644 --- a/articles/azure-monitor/alerts/alerts-overview.md +++ b/articles/azure-monitor/alerts/alerts-overview.md @@ -1,184 +1,108 @@ --- -title: Overview of alerting and notification monitoring in Azure -description: Overview of alerting in Azure Monitor -ms.topic: conceptual -ms.date: 02/14/2021 - +title: Overview of Azure Monitor Alerts +description: Learn about Azure Monitor alerts, alert rules, action processing rules, and action groups. You will learn how all of these work together to monitor your system and notify you if something is wrong. +author: AbbyMSFT +ms.author: abbyweisberg +ms.topic: overview +ms.date: 06/09/2022 +ms.custom: template-overview +ms.reviewer: harelb --- +# What are Azure Monitor Alerts? -# Overview of alerts in Microsoft Azure - -This article describes what alerts are, their benefits, and how to get started using them. - -## What are alerts in Microsoft Azure? - -Alerts proactively notify you when issues are found with your infrastructure or application using your monitoring data in Azure Monitor. They allow you to identify and address issues before the users of your system notice them. - -## Overview - -The diagram below represents the flow of alerts. - -![Diagram of alert flow](media/alerts-overview/Azure-Monitor-Alerts.svg) - -Alert rules are separated from alerts and the actions taken when an alert fires. The alert rule captures the target and criteria for alerting. The alert rule can be in an enabled or a disabled state. Alerts only fire when enabled. - -The following are key attributes of an alert rule: - -**Target Resource** - Defines the scope and signals available for alerting. A target can be any Azure resource. Example targets: - -- Virtual machines. -- Storage accounts. -- Log Analytics workspace. -- Application Insights. +Alerts help you detect and address issues before users notice them by proactively notifying you when Azure Monitor data indicates that there may be a problem with your infrastructure or application. -For certain resources (like virtual machines), you can specify multiple resources as the target of the alert rule. +You can alert on any metric or log data source in the Azure Monitor data platform. -**Signal** - Emitted by the target resource. Signals can be of the following types: metric, activity log, Application Insights, and log. +This diagram shows you how alerts work: -**Criteria** - A combination of signal and logic applied on a target resource. Examples: +:::image type="content" source="media/alerts-overview/alerts-flow.png" alt-text="Graphic explaining Azure Monitor alerts."::: -- Percentage CPU > 70% -- Server Response Time > 4 ms -- Result count of a log query > 100 - -**Alert Name** - A specific name for the alert rule configured by the user. - -**Alert Description** - A description for the alert rule configured by the user. - -**Severity** - The severity of the alert after the criteria specified in the alert rule is met. Severity can range from 0 to 4. - -- Sev 0 = Critical -- Sev 1 = Error -- Sev 2 = Warning -- Sev 3 = Informational -- Sev 4 = Verbose - -**Action** - A specific action taken when the alert is fired. For more information, see [Action Groups](../alerts/action-groups.md). - -## What you can alert on - -You can alert on metrics and logs, as described in [monitoring data sources](./../agents/data-sources.md). Signals include but aren't limited to: +An **alert rule** monitors your telemetry and captures a signal that indicates that something is happening on a specified target. The alert rule captures the signal and checks to see if the signal meets the criteria of the condition. If the conditions are met, an alert is triggered, which initiates the associated action group and updates the state of the alert. + +You create an alert rule by combining: + - The resource(s) to be monitored. + - The signal or telemetry from the resource + - Conditions + +If you're monitoring more than one resource, the condition is evaluated separately for each of the resources and alerts are fired for each resource separately. + +Once an alert is triggered, the alert is made up of: + - An **alert processing rule** allows you to apply processing on fired alerts. Alert processing rules modify the fired alerts as they are being fired. You can use alert processing rules to add or suppress action groups, apply filters or have the rule processed on a pre-defined schedule. + - An **action group** can trigger notifications or an automated workflow to let users know that an alert has been triggered. Action groups can include: + - Notification methods such as email, SMS, and push notifications. + - Automation Runbooks + - Azure functions + - ITSM incidents + - Logic Apps + - Secure webhooks + - Webhooks + - Event hubs +- The **alert condition** is set by the system. When an alert fires, the alert’s monitor condition is set to ‘fired’, and when the underlying condition that caused the alert to fire clears, the monitor condition is set to ‘resolved’. +- The **user response** is set by the user and doesn’t change until the user changes it. + +You can see all alert instances in all your Azure resources generated in the last 30 days on the **[Alerts page](alerts-page.md)** in the Azure portal. +## Types of alerts + +There are four types of alerts. This table provides a brief description of each alert type. +See [this article](alerts-types.md) for detailed information about each alert type and how to choose which alert type best suits your needs. + +|Alert type|Description| +|:---------|:---------| +|[Metric alerts](alerts-types.md#metric-alerts)|Metric alerts evaluate resource metrics at regular intervals. Metrics can be platform metrics, custom metrics, logs from Azure Monitor converted to metrics or Application Insights metrics. Metric alerts have several additional features (link), such as the ability to apply multiple conditions and dynamic thresholds.| +|[Log alerts](alerts-types.md#log-alerts)|Log alerts allow users to use a Log Analytics query to evaluate resource logs at a predefined frequency.| +|[Activity log alerts](alerts-types.md#activity-log-alerts)|Activity log alerts are triggered when a new activity log event occurs that matches the defined conditions.| +|[Smart detection alerts](alerts-types.md#smart-detection-alerts)|Smart detection on an Application Insights resource automatically warns you of potential performance problems and failure anomalies in your web application. You can migrate smart detection on your Application Insights resource to create alert rules for the different smart detection modules.| +## Out-of-the-box alert rules (preview) + +If you don't have alert rules defined for the selected resource, you can [enable recommended out-of-the-box alert rules in the Azure portal](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). -- Metric values -- Log search queries -- Activity log events -- Health of the underlying Azure platform -- Tests for website availability -## Alerts experience -### Alerts page -The Alerts page provides a summary of the alerts created in the last 24 hours. -### Alert Recommendations (preview) > [!NOTE] > The alert rule recommendations feature is currently in preview and is only enabled for VMs. -If you don't have alert rules defined for the selected resource, either individually or as part of a resource group or subscription, you can [create a new alert rule](alerts-log.md#create-a-new-log-alert-rule-in-the-azure-portal), or [enable recommended out-of-the-box alert rules in the Azure portal (preview)](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). - -:::image type="content" source="media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg" alt-text="Screenshot of alerts page with link to recommended alert rules."::: -### Alerts summary pane -If you have alerts configured for this resource, the alerts summary pane summarizes the alerts fired in the last 24 hours. You can filter the list by the subscription or any of the filter parameters at the top of the page. The page displays the total alerts for each severity. Select a severity to filter the alerts by that severity. -> [!NOTE] - > You can only access alerts generated in the last 30 days. - -You can also [programmatically enumerate the alert instances generated on your subscriptions by using REST APIs](#manage-your-alert-instances-programmatically). +## Azure role-based access control (Azure RBAC) for alerts -:::image type="content" source="media/alerts-overview/alerts-page.png" alt-text="Screenshot of alerts page."::: +You can only access, create, or manage alerts for resources for which you have permissions. +To create an alert rule, you need to have the following permissions: + - Read permission on the target resource of the alert rule + - Write permission on the resource group in which the alert rule is created (if you’re creating the alert rule from the Azure portal, the alert rule is created by default in the same resource group in which the target resource resides) + - Read permission on any action group associated to the alert rule (if applicable) +These built-in Azure roles, supported at all Azure Resource Manager scopes, have permissions to and access alerts information and create alert rules: + - monitoring contributor + - monitoring reader -You can narrow down the list by selecting values from any of these filters at the top of the page: +## Alerts and State -| Column | Description | -|:---|:---| -| Subscription | Select the Azure subscriptions for which you want to view the alerts. You can optionally choose to select all your subscriptions. Only alerts that you have access to in the selected subscriptions are included in the view. | -| Resource group | Select a single resource group. Only alerts with targets in the selected resource group are included in the view. | -| Resource type | Select one or more resource types. Only alerts with targets of the selected type are included in the view. This column is only available after a resource group has been specified. | -| Resource | Select a resource. Only alerts with that resource as a target are included in the view. This column is only available after a resource type has been specified. | -| Severity | Select an alert severity, or select **All** to include alerts of all severities. | -| Alert condition | Select an alert condition, or select **All** to include alerts of all conditions. | -| User response | Select a user response, or select **All** to include alerts of all user responses. | -| Monitor service | Select a service, or select **All** to include all services. Only alerts created by rules that use service as a target are included. | -| Time range | Only alerts fired within the selected time range are included in the view. Supported values are the past hour, the past 24 hours, the past seven days, and the past 30 days. | +You can configure whether log or metric alerts are stateful or stateless. Activity log alerts are stateless. +- Stateless alerts fire each time the condition is met, even if fired previously. +- Stateful alerts fire when the condition is met and then don't fire again or trigger any more actions until the conditions are resolved. +For stateful alerts, the alert is considered resolved when: -Select **Columns** at the top of the page to select which columns to show. -### Alert details pane - -When you select an alert, this alert details pane provides details of the alert and enables you to change how you want to respond to the alert. - -:::image type="content" source="media/alerts-overview/alert-detail-pane.png" alt-text="Screenshot of alert details pane."::: - -The Alert details pane includes: - - -|Section |Description | +|Alert type |The alert is resolved when | |---------|---------| -|Summary | Displays the properties and other significant information about the alert. | -|History | Lists all actions on the alert and any changes made to the alert. | -## Manage alerts - -You can set the user response of an alert to specify where it is in the resolution process. When the criteria specified in the alert rule is met, an alert is created or fired, and it has a status of *New*. You can change the status when you acknowledge an alert and when you close it. All user response changes are stored in the history of the alert. +|Metric alerts|The alert condition isn't met for three consecutive checks.| +|Log alerts|The alert condition isn't met for 30 minutes for a specific evaluation period (to account for log ingestion delay), and
                  the alert condition isn't met for three consecutive checks.| -The following user responses are supported. +When the alert is considered resolved, the alert rule sends out a resolved notification using webhooks or email and the monitor state in the Azure portal is set to resolved. -| User Response | Description | -|:---|:---| -| New | The issue has been detected and hasn't yet been reviewed. | -| Acknowledged | An administrator has reviewed the alert and started working on it. | -| Closed | The issue has been resolved. After an alert has been closed, you can reopen it by changing it to another user response. | +## Manage your alerts programmatically -The *user response* is different and independent of the *alert condition*. The response is set by the user, while the alert condition is set by the system. When an alert fires, the alert's alert condition is set to *'fired'*, and when the underlying condition that caused the alert to fire clears, the alert condition is set to *'resolved'*. -## Manage alert rules - -To show the **Rules** page, select **Manage alert rules**. The Rules page is a single place for managing all alert rules across your Azure subscriptions. It lists all alert rules and can be sorted based on target resources, resource groups, rule name, or status. You can also edit, enable, or disable alert rules from this page. - - :::image type="content" source="media/alerts-overview/alerts-rules.png" alt-text="Screenshot of alert rules page."::: -## Create an alert rule -You can author alert rules in a consistent manner, whatever of the monitoring service or signal type. - -> [!VIDEO https://www.microsoft.com/en-us/videoplayer/embed/RE4tflw] +You can query you alerts instances to create custom views outside of the Azure portal, or to analyze your alerts to identify patterns and trends. +We recommended that you use [Azure Resource Graphs](https://portal.azure.com/?feature.customportal=false#blade/HubsExtension/ArgQueryBlade) with the 'AlertsManagementResources' schema for managing alerts across multiple subscriptions. For an sample query, see [Azure Resource Graph sample queries for Azure Monitor](../resource-graph-samples.md). +You can use Azure Resource Graphs: + - with [Azure PowerShell](/powershell/module/az.monitor/) + - with the [Azure CLI](/cli/azure/monitor?view=azure-cli-latest&preserve-view=true) + - in the Azure portal -Here's how to create a new alert rule: -1. Pick the _target_ for the alert. -1. Select the _signal_ from the available signals for the target. -1. Specify the _logic_ to be applied to data from the signal. - -This simplified authoring process no longer requires you to know the monitoring source or signals that are supported before selecting an Azure resource. The list of available signals is automatically filtered based on the target resource that you select. Also based on that target, you're guided through defining the logic of the alert rule automatically. - -You can learn more about how to create alert rules in [Create, view, and manage alerts using Azure Monitor](../alerts/alerts-metric.md). - -Alerts are available across several Azure monitoring services. For information about how and when to use each of these services, see [Monitoring Azure applications and resources](../overview.md). - -## Azure role-based access control (Azure RBAC) for your alert instances - -The consumption and management of alert instances requires the user to have the Azure built-in roles of either [monitoring contributor](../../role-based-access-control/built-in-roles.md#monitoring-contributor) or [monitoring reader](../../role-based-access-control/built-in-roles.md#monitoring-reader). These roles are supported at any Azure Resource Manager scope, from the subscription level to granular assignments at a resource level. For example, if a user only has monitoring contributor access for virtual machine `ContosoVM1`, that user can consume and manage only alerts generated on `ContosoVM1`. - -## Manage your alert instances programmatically - -You might want to query programmatically for alerts generated against your subscription. Queries might be to create custom views outside of the Azure portal, or to analyze your alerts to identify patterns and trends. - -We recommended that you use [Azure Resource Graph](../../governance/resource-graph/overview.md) with the `AlertsManagementResources` schema for querying fired alerts. Resource Graph is recommended when you have to manage alerts generated across multiple subscriptions. - -The following sample request to the Resource Graph REST API returns alerts within one subscription in the last day: - -```json -{ - "subscriptions": [ - - ], - "query": "alertsmanagementresources | where properties.essentials.lastModifiedDateTime > ago(1d) | project alertInstanceId = id, parentRuleId = tolower(tostring(properties['essentials']['alertRule'])), sourceId = properties['essentials']['sourceCreatedId'], alertName = name, severity = properties.essentials.severity, status = properties.essentials.monitorCondition, state = properties.essentials.alertState, affectedResource = properties.essentials.targetResourceName, monitorService = properties.essentials.monitorService, signalType = properties.essentials.signalType, firedTime = properties['essentials']['startDateTime'], lastModifiedDate = properties.essentials.lastModifiedDateTime, lastModifiedBy = properties.essentials.lastModifiedUserName" -} -``` - -You can also see the result of this Resource Graph query in the portal with Azure Resource Graph Explorer: [portal.azure.com](https://portal.azure.com/?feature.customportal=false#blade/HubsExtension/ArgQueryBlade/query/alertsmanagementresources%0A%7C%20where%20properties.essentials.lastModifiedDateTime%20%3E%20ago(1d)%0A%7C%20project%20alertInstanceId%20%3D%20id%2C%20parentRuleId%20%3D%20tolower(tostring(properties%5B'essentials'%5D%5B'alertRule'%5D))%2C%20sourceId%20%3D%20properties%5B'essentials'%5D%5B'sourceCreatedId'%5D%2C%20alertName%20%3D%20name%2C%20severity%20%3D%20properties.essentials.severity%2C%20status%20%3D%20properties.essentials.monitorCondition%2C%20state%20%3D%20properties.essentials.alertState%2C%20affectedResource%20%3D%20properties.essentials.targetResourceName%2C%20monitorService%20%3D%20properties.essentials.monitorService%2C%20signalType%20%3D%20properties.essentials.signalType%2C%20firedTime%20%3D%20properties%5B'essentials'%5D%5B'startDateTime'%5D%2C%20lastModifiedDate%20%3D%20properties.essentials.lastModifiedDateTime%2C%20lastModifiedBy%20%3D%20properties.essentials.lastModifiedUserName) - -You can also use the [Alert Management REST API](/rest/api/monitor/alertsmanagement/alerts) in lower scale querying scenarios or to update fired alerts. - -## Smart groups +You can also use the [Alert Management REST API](/rest/api/monitor/alertsmanagement/alerts) for lower scale querying or to update fired alerts. -Smart groups are aggregations of alerts based on machine learning algorithms, which can help reduce alert noise and aid in troubleshooting. [Learn more about Smart Groups](./alerts-smartgroups-overview.md?toc=%2fazure%2fazure-monitor%2ftoc.json) and [how to manage your smart groups](./alerts-managing-smart-groups.md?toc=%2fazure%2fazure-monitor%2ftoc.json). +## Pricing +See the [Azure Monitor pricing page](https://azure.microsoft.com/pricing/details/monitor/) for information about pricing. ## Next steps -- [Learn more about Smart Groups](./alerts-smartgroups-overview.md?toc=%2fazure%2fazure-monitor%2ftoc.json) +- [See your alert instances](./alerts-page.md) +- [Create a new alert rule](alerts-log.md) - [Learn about action groups](../alerts/action-groups.md) -- [Managing your alert instances in Azure](./alerts-managing-alert-instances.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -- [Managing Smart Groups](./alerts-managing-smart-groups.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -- [Learn more about Azure alerts pricing](https://azure.microsoft.com/pricing/details/monitor/) +- [Learn about alert processing rules](alerts-action-rules.md) diff --git a/articles/azure-monitor/alerts/alerts-page.md b/articles/azure-monitor/alerts/alerts-page.md new file mode 100644 index 0000000000000..c5b446cbaac8e --- /dev/null +++ b/articles/azure-monitor/alerts/alerts-page.md @@ -0,0 +1,50 @@ +--- +title: View and manage your alert instances +description: The alerts page summarizes all alert instances in all your Azure resources generated in the last 30 days. +ms.topic: conceptual +ms.date: 2/23/2022 +ms.reviewer: harelb + +--- +# View and manage your alert instances + +The alerts page summarizes all alert instances in all your Azure resources generated in the last 30 days. You can see all your different types of alerts from multiple subscriptions in a single pane, and you can find specific alert instances for troubleshooting purposes. + +You can get to the alerts page in any of the following ways: + +- From the home page in the [Azure portal](https://portal.azure.com/), select **Monitor** > **Alerts**. + + :::image type="content" source="media/alerts-managing-alert-instances/alerts-monitor-menu.png" alt-text="Screenshot of alerts link on monitor menu. "::: + +- From a specific resource, go to the **Monitoring** section, and choose **Alerts**. The landing page is pre-filtered for alerts on that specific resource. + + :::image type="content" source="media/alerts-managing-alert-instances/alerts-resource-menu.png" alt-text="Screenshot of alerts link on a resource's menu."::: +## Alert rule recommendations (preview) + +> [!NOTE] +> The alert rule recommendations feature is currently in preview and is only enabled for VMs. + +If you don't have alert rules defined for the selected resource, either individually or as part of a resource group or subscription, you can [create a new alert rule](alerts-log.md#create-a-new-log-alert-rule-in-the-azure-portal), or [enable recommended out-of-the-box alert rules in the Azure portal (preview)](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). + +:::image type="content" source="media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg" alt-text="Screenshot of alerts page with link to recommended alert rules."::: + +## The alerts summary pane + +If you have alerts configured for this resource, the alerts summary pane summarizes the alerts fired in the last 24 hours. You can modify the list of alert instances by selecting filters such as **time range**, **subscription**, **alert condition**, **severity**, and more. Select an alert instance. + +To see more details about a specific alert instance, select the alerts instance to open the **Alert Details** page. +> [!NOTE] +> If you navigated to the alerts page by selecting a specific alert severity, the list is pre-filtered for that severity. + +:::image type="content" source="media/alerts-managing-alert-instances/alerts-page.png" alt-text="Screenshot of alerts page."::: + +## The alerts details page + + The **Alerts details** page provides details about the selected alert. Select **Change user response** to change the user response to the alert. You can see all closed alerts in the **History** tab. + +:::image type="content" source="media/alerts-managing-alert-instances/alerts-details-page.png" alt-text="Screenshot of alerts details page."::: + +## Next steps + +- [Learn about Azure Monitor alerts](./alerts-overview.md) +- [Create a new alert rule](alerts-log.md) \ No newline at end of file diff --git a/articles/azure-monitor/alerts/alerts-prepare-migration.md b/articles/azure-monitor/alerts/alerts-prepare-migration.md index 7ff93cad64001..88e2ab2fe006b 100644 --- a/articles/azure-monitor/alerts/alerts-prepare-migration.md +++ b/articles/azure-monitor/alerts/alerts-prepare-migration.md @@ -23,7 +23,7 @@ The following table is a reference to the programmatic interfaces for both class | Deployment script type | Classic alerts | New metric alerts | | ---------------------- | -------------- | ----------------- | |REST API | [microsoft.insights/alertrules](/rest/api/monitor/alertrules) | [microsoft.insights/metricalerts](/rest/api/monitor/metricalerts) | -|Azure CLI | [az monitor alert](/cli/azure/monitor/alert) | [az monitor metrics alert](/cli/azure/monitor/metrics/alert) | +|Azure CLI | [az monitor alert](/cli/monitor/alert) | [az monitor metrics alert](/cli/azure/monitor/metrics/alert) | |PowerShell | [Reference](/powershell/module/az.monitor/add-azmetricalertrule) | [Reference](/powershell/module/az.monitor/add-azmetricalertrulev2) | | Azure Resource Manager template | [For classic alerts](./alerts-enable-template.md)|[For new metric alerts](./alerts-metric-create-templates.md)| @@ -157,4 +157,4 @@ If you're using a partner integration that's not listed here, confirm with the p ## Next steps - [How to use the migration tool](alerts-using-migration-tool.md) -- [Understand how the migration tool works](alerts-understand-migration.md) \ No newline at end of file +- [Understand how the migration tool works](alerts-understand-migration.md) diff --git a/articles/azure-monitor/alerts/alerts-resource-move.md b/articles/azure-monitor/alerts/alerts-resource-move.md index c17e976029de1..a904d4eb7bb2c 100644 --- a/articles/azure-monitor/alerts/alerts-resource-move.md +++ b/articles/azure-monitor/alerts/alerts-resource-move.md @@ -94,9 +94,9 @@ Navigate to Alerts > Alert processing rules (preview) > filter by the containing ### Change scope of a rule using PowerShell -1. Get the existing rule ([metric alerts](/powershell/module/az.monitor/get-azmetricalertrulev2), [activity log alerts](/powershell/module/az.monitor/get-azactivitylogalert), [alert processing rules](/powershell/module/az.alertsmanagement/get-azactionrule)). +1. Get the existing rule ([metric alerts](/powershell/module/az.monitor/get-azmetricalertrulev2), [activity log alerts](/powershell/module/az.monitor/get-azactivitylogalert), alert [processing rules](/powershell/module/az.alertsmanagement/get-azalertprocessingrule)). 2. Modify the scope. If needed, split into two rules (relevant for some cases of metric alerts, as noted above). -3. Redeploy the rule ([metric alerts](/powershell/module/az.monitor/add-azmetricalertrulev2), [activity log alerts](/powershell/module/az.monitor/enable-azactivitylogalert), [alert processing rules](/powershell/module/az.alertsmanagement/set-azactionrule)). +3. Redeploy the rule ([metric alerts](/powershell/module/az.monitor/add-azmetricalertrulev2), [activity log alerts](/powershell/module/az.monitor/enable-azactivitylogalert), [alert processing rules](/powershell/module/az.alertsmanagement/set-azalertprocessingrule)). ### Change the scope of a rule using Azure CLI diff --git a/articles/azure-monitor/alerts/alerts-smartgroups-overview.md b/articles/azure-monitor/alerts/alerts-smartgroups-overview.md deleted file mode 100644 index fe1089dc5aa2c..0000000000000 --- a/articles/azure-monitor/alerts/alerts-smartgroups-overview.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Smart groups (preview) -description: Smart Groups are aggregations of alerts that help you reduce alert noise -ms.topic: conceptual -ms.date: 2/23/2022 ---- - -# Smart groups (preview) - -A common challenge faced when dealing with alerts is sifting through the noise to find out what actually matters - smart groups are intended to be the solution to that problem. - -Smart groups are automatically created by using machine learning algorithms to combine related alerts that represent a single issue. When an alert is created, the algorithm adds it to a new smart group or an existing smart group based on information such as historical patterns, similar properties, and similar structure. For example, if % CPU on several virtual machines in a subscription simultaneously spikes leading to many individual alerts, and if such alerts have occurred together anytime in the past, these alerts will likely be grouped into a single Smart Group, suggesting a potential common root cause. This means that for someone troubleshooting alerts, smart groups not only allows them to reduce noise by managing related alerts as a single aggregated unit, it also guides them towards possible common root causes for their alerts. - -Currently, the algorithm only considers alerts from the same monitor service within a subscription. Smart groups can reduce up to 99% of alert noise through this consolidation. You can view the reason that alerts were included in a group in the smart group details page. - -You can view the details of smart groups and set the state similarly to how you can with alerts. Each alert is a member of one and only one smart group. - -## Smart group state - -Smart group state is a similar concept to the alert state, which allows you to manage the resolution process at the level of a smart group. Similar to the alert state, when a smart group is created, it has the **New** state, which can be changed to either **Acknowledged** or **Closed**. - -The following smart group states are supported. - -| State | Description | -|:---|:---| -| New | The issue has just been detected and has not yet been reviewed. | -| Acknowledged | An administrator has reviewed the smart group and started working on it. | -| Closed | The issue has been resolved. After a smart group has been closed, you can reopen it by changing it to another state. | - -[Learn how to change the state of your smart group.](./alerts-managing-alert-states.md?toc=%2fazure%2fazure-monitor%2ftoc.json) - -> [!NOTE] -> Changing the state of a smart group does not change the state of the individual member alerts. - -## Smart group details page - -The Smart group detail page is displayed when you select a smart group. It provides details about the smart group, including the reasoning that was used to create the group, and enables you to change its state. - -![Smart group detail](media/alerts-smartgroups-overview/smart-group-detail.png) - - -The smart group detail page includes the following sections. - -| Section | Description | -|:---|:---| -| Alerts | Lists the individual alerts that are included in the smart group. Select an alert to open its alert detail page. | -| History | Lists each action taken by the smart group and any changes that are made to it. This is currently limited to state changes and alert membership changes. | - -## Smart group taxonomy - -The name of a smart group is the name of its first alert. You can't create or rename a smart group. - -## Next steps - -- [Manage smart groups](./alerts-managing-smart-groups.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -- [Change your alert and smart group state](./alerts-managing-alert-states.md?toc=%2fazure%2fazure-monitor%2ftoc.json) diff --git a/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md b/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md index 72e45fbbd7b2f..aba573d2ff37f 100644 --- a/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md +++ b/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md @@ -4,7 +4,7 @@ description: Common issues with Azure Monitor metric alerts and possible solutio author: harelbr ms.author: harelbr ms.topic: troubleshooting -ms.date: 2/23/2022 +ms.date: 5/25/2022 --- # Troubleshooting problems in Azure Monitor metric alerts @@ -233,6 +233,16 @@ To create a metric alert rule, you’ll need to have the following permissions: - Write permission on the resource group in which the alert rule is created (if you’re creating the alert rule from the Azure portal, the alert rule is created by default in the same resource group in which the target resource resides) - Read permission on any action group associated to the alert rule (if applicable) +## Subscription registration to the Microsoft.Insights resource provider + +Metric alerts can only access resources in subscriptions registered to the Microsoft.Insights resource provider. +Therefore, to create a metric alert rule, all involved subscriptions must be registered to this resource provider: + +- The subscription containing the alert rule's target resource (scope) +- The subscription containing the action groups associated with the alert rule (if defined) +- The subscription in which the alert rule is saved + +Learn more about [registering resource providers](../../azure-resource-manager/management/resource-providers-and-types.md). ## Naming restrictions for metric alert rules @@ -383,4 +393,4 @@ The table below lists the metrics that aren't supported by dynamic thresholds. ## Next steps -- For general troubleshooting information about alerts and notifications, see [Troubleshooting problems in Azure Monitor alerts](alerts-troubleshoot.md). +- For general troubleshooting information about alerts and notifications, see [Troubleshooting problems in Azure Monitor alerts](alerts-troubleshoot.md). \ No newline at end of file diff --git a/articles/azure-monitor/alerts/alerts-types.md b/articles/azure-monitor/alerts/alerts-types.md new file mode 100644 index 0000000000000..3dd7e79e2b7da --- /dev/null +++ b/articles/azure-monitor/alerts/alerts-types.md @@ -0,0 +1,180 @@ +--- +title: Types of Azure Monitor Alerts +description: This article explains the different types of Azure Monitor alerts and when to use each type. +author: AbbyMSFT +ms.author: abbyweisberg +ms.topic: conceptual +ms.date: 04/26/2022 +ms.custom: template-concept +ms.reviewer: harelb +--- + +# Types of Azure Monitor alerts + +This article describes the kinds of Azure Monitor alerts you can create, and helps you understand when to use each type of alert. + +There are four types of alerts: +- [Metric alerts](#metric-alerts) +- [Log alerts](#log-alerts) +- [Activity log alerts](#activity-log-alerts) +- [Smart detection alerts](#smart-detection-alerts) + +## Choosing the right alert type + +This table can help you decide when to use what type of alert. For more detailed information about pricing, see the [pricing page](https://azure.microsoft.com/pricing/details/monitor/). + +|Alert Type |When to Use |Pricing Information| +|---------|---------|---------| +|Metric alert|Metric alerts are useful when you want to be alerted about data that requires little or no manipulation. Metric data is stored in the system already pre-computed, so metric alerts are less expensive than log alerts. If the data you want to monitor is available in metric data, you would want to metric alerts.|Each metrics alert rule is charged based on the number of time-series that are monitored. | +|Log alert|Log alerts allow you to perform advanced logic operations on your data. If the data you want to monitor is available in logs, or requires advanced logic, you can use the robust features of KQL for data manipulation using log alerts. Log alerts are more expensive than metric alerts.|Each Log Alert rule is billed based the interval at which the log query is evaluated (more frequent query evaluation results in a higher cost). Additionally, for Log Alerts configured for [at scale monitoring](#splitting-by-dimensions-in-log-alert-rules), the cost will also depend on the number of time series created by the dimensions resulting from your query. | +|Activity Log alert|Activity logs provide auditing of all actions that occurred on resources. Use activity log alerts if you want to be alerted when a specific event happens to a resource, for example, a restart, a shutdown, or the creation or deletion of a resource.|For more information, see the [pricing page](https://azure.microsoft.com/pricing/details/monitor/).| + +## Metric alerts + +A metric alert rule monitors a resource by evaluating conditions on the resource metrics at regular intervals. If the conditions are met, an alert is fired. A metric time-series is a series of metric values captured over a period of time. + +You can create rules using these metrics: +- [Platform metrics](alerts-metric-near-real-time.md#metrics-and-dimensions-supported) +- [Custom metrics](../essentials/metrics-custom-overview.md) +- [Application Insights custom metrics](../app/api-custom-events-metrics.md) +- [Selected logs from a Log Analytics workspace converted to metrics](alerts-metric-logs.md) + +Metric alert rules include these features: +- You can use multiple conditions on an alert rule for a single resource. +- You can add granularity by [monitoring multiple metric dimensions](#narrow-the-target-using-dimensions). +- You can use [Dynamic thresholds](#dynamic-thresholds) driven by machine learning. +- You can configure if metric alerts are [stateful or stateless](alerts-overview.md#alerts-and-state). Metric alerts are stateful by default. + +The target of the metric alert rule can be: +- A single resource, such as a VM. See this article for supported resource types. +- [Multiple resources](#monitor-multiple-resources) of the same type in the same Azure region, such as a resource group. + +### Multiple conditions + +When you create an alert rule for a single resource, you can apply multiple conditions. For example, you could create an alert rule to monitor an Azure virtual machine and alert when both "Percentage CPU is higher than 90%" and "Queue length is over 300 items". When an alert rule has multiple conditions, the alert fires when all the conditions in the alert rule are true and is resolved when at least one of the conditions is no longer true for three consecutive checks. +### Narrow the target using Dimensions + +Dimensions are name-value pairs that contain additional data about the metric value. Using dimensions allows you to filter the metrics and monitor specific time-series, instead of monitoring the aggregate of all the dimensional values. +For example, the Transactions metric of a storage account can have an API name dimension that contains the name of the API called by each transaction (for example, GetBlob, DeleteBlob, PutPage). You can choose to have an alert fired when there is a high number of transactions in any API name (which is the aggregated data), or you can use dimensions to further break it down to alert only when the number of transactions is high for specific API names. +If you use more than one dimension, the metric alert rule can monitor multiple dimension values from different dimensions of a metric. +The alert rule separately monitors all the dimensions value combinations. +See [this article](alerts-metric-multiple-time-series-single-rule.md) for detailed instructions on using dimensions in metric alert rules. + +### Create resource-centric alerts using splitting by dimensions + +To monitor for the same condition on multiple Azure resources, you can use splitting by dimensions. Splitting by dimensions allows you to create resource-centric alerts at scale for a subscription or resource group. Alerts are split into separate alerts by grouping combinations. Splitting on Azure resource ID column makes the specified resource into the alert target. + +You may also decide not to split when you want a condition applied to multiple resources in the scope. For example, if you want to fire an alert if at least five machines in the resource group scope have CPU usage over 80%. + +### Monitor multiple resources + +You can monitor at scale by applying the same metric alert rule to multiple resources of the same type for resources that exist in the same Azure region. Individual notifications are sent for each monitored resource. + +These platform metrics for these services in the following Azure clouds are supported: + +| Service | Global Azure | Government | China | +|:-----------------------------|:-------------|:-----------|:--------| +| Virtual machines* | Yes |Yes | Yes | +| SQL server databases | Yes | Yes | Yes | +| SQL server elastic pools | Yes | Yes | Yes | +| NetApp files capacity pools | Yes | Yes | Yes | +| NetApp files volumes | Yes | Yes | Yes | +| Key vaults | Yes | Yes | Yes | +| Azure Cache for Redis | Yes | Yes | Yes | +| Azure Stack Edge devices | Yes | Yes | Yes | +| Recovery Services vaults | Yes | No | No | + + > [!NOTE] + > Platform metrics are not supported for virtual machine network metrics (Network In Total, Network Out Total, Inbound Flows, Outbound Flows, Inbound Flows Maximum Creation Rate, Outbound Flows Maximum Creation Rate). + +You can specify the scope of monitoring with a single metric alert rule in one of three ways. For example, with virtual machines you can specify the scope as: + +- a list of virtual machines (in one Azure region) within a subscription +- all virtual machines (in one Azure region) in one or more resource groups in a subscription +- all virtual machines (in one Azure region) in a subscription + +### Dynamic thresholds + +Dynamic thresholds use advanced machine learning (ML) to: +- Learn the historical behavior of metrics +- Identify patterns and adapt to metric changes over time, such as hourly, daily or weekly patterns. +- Recognize anomalies that indicate possible service issues +- Calculate the most appropriate threshold for the metric + +Machine Learning continuously uses new data to learn more and make the threshold more accurate. Because the system adapts to the metrics’ behavior over time, and alerts based on deviations from its pattern, you don't have to know the "right" threshold for each metric. + +Dynamic thresholds help you: +- Create scalable alerts for hundreds of metric series with one alert rule. Fewer alert rules leads to to less time that you have to spend on creating and managing alerts rules. +- Create rules without having to know what threshold to configure +- Configure up metric alerts using high-level concepts without extensive domain knowledge about the metric +- Prevent noisy (low precision) or wide (low recall) thresholds that don’t have an expected pattern +- Handle noisy metrics (such as machine CPU or memory) and metrics with low dispersion (such as availability and error rate). + +See [this article](alerts-dynamic-thresholds.md) for detailed instructions on using dynamic thresholds in metric alert rules. + +## Log alerts +A log alert rule monitors a resource by using a Log Analytics query to evaluate resource logs at a set frequency. If the conditions are met, an alert is fired. Because you can use Log Analytics queries, log alerts allow you to perform advanced logic operations on your data and to use the robust features of KQL for data manipulation of log data. + +The target of the log alert rule can be: +- A single resource, such as a VM. +- Multiple resources of the same type in the same Azure region, such as a resource group. This is currently available for selected resource types. +- Multiple resources using [cross-resource query](../logs/cross-workspace-query.md#querying-across-log-analytics-workspaces-and-from-application-insights). + +Log alerts can measure two different things, which can be used for different monitoring scenarios: +- Table rows: The number of rows returned can be used to work with events such as Windows event logs, syslog, application exceptions. +- Calculation of a numeric column: Calculations based on any numeric column can be used to include any number of resources. For example, CPU percentage. + +You can configure if log alerts are [stateful or stateless](alerts-overview.md#alerts-and-state) (currently in preview). + +> [!NOTE] +> Log alerts work best when you are trying to detect specific data in the logs, as opposed to when you are trying to detect a **lack** of data in the logs. Since logs are semi-structured data, they are inherently more latent than metric data on information like a VM heartbeat. To avoid misfires when you are trying to detect a lack of data in the logs, consider using [metric alerts](#metric-alerts). You can send data to the metric store from logs using [metric alerts for logs](alerts-metric-logs.md). + +### Dimensions in log alert rules +You can use dimensions when creating log alert rules to monitor the values of multiple instances of a resource with one rule. For example, you can monitor CPU usage on multiple instances running your website or app. Each instance is monitored individually notifications are sent for each instance. + +### Splitting by dimensions in log alert rules +To monitor for the same condition on multiple Azure resources, you can use splitting by dimensions. Splitting by dimensions allows you to create resource-centric alerts at scale for a subscription or resource group. Alerts are split into separate alerts by grouping combinations using numerical or string columns. Splitting on the Azure resource ID column makes the specified resource into the alert target. +You may also decide not to split when you want a condition applied to multiple resources in the scope. For example, if you want to fire an alert if at least five machines in the resource group scope have CPU usage over 80%. + +### Using the API +Manage new rules in your workspaces using the [ScheduledQueryRules](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules) API. + +> [!NOTE] +> Log alerts for Log Analytics used to be managed using the legacy [Log Analytics Alert API](api-alerts.md). Learn more about [switching to the current ScheduledQueryRules API](alerts-log-api-switch.md). +## Log alerts on your Azure bill +Log Alerts are listed under resource provider microsoft.insights/scheduledqueryrules with: +- Log Alerts on Application Insights shown with exact resource name along with resource group and alert properties. +- Log Alerts on Log Analytics shown with exact resource name along with resource group and alert properties; when created using scheduledQueryRules API. +- Log alerts created from [legacy Log Analytics API](./api-alerts.md) aren't tracked [Azure Resources](../../azure-resource-manager/management/overview.md) and don't have enforced unique resource names. These alerts are still created on `microsoft.insights/scheduledqueryrules` as hidden resources, which have this resource naming structure `|||`. Log Alerts on legacy API are shown with above hidden resource name along with resource group and alert properties. +> [!Note] +> Unsupported resource characters such as <, >, %, &, \, ?, / are replaced with _ in the hidden resource names and this will also reflect in the billing information. +## Activity log alerts +An activity log alert monitors a resource by checking the activity logs for a new activity log event that matches the defined conditions. + +You may want to use activity log alerts for these types of scenarios: +- When a specific operation occurs on resources in a specific resource group or subscription. For example, you may want to be notified when: + - Any virtual machine in a production resource group is deleted. + - Any new roles are assigned to a user in your subscription. +- A service health event occurs. Service health events include notifications of incidents and maintenance events that apply to resources in your subscription. + +You can create an activity log alert on: +- Any of the activity log [event categories](../essentials/activity-log-schema.md), other than on alert events. +- Any activity log event in top-level property in the JSON object. + +Activity log alert rules are Azure resources, so they can be created by using an Azure Resource Manager template. They also can be created, updated, or deleted in the Azure portal. + +An activity log alert only monitors events in the subscription in which the alert is created. + +## Smart Detection alerts +After setting up Application Insights for your project, when your app generates a certain minimum amount of data, Smart Detection takes 24 hours to learn the normal behavior of your app. Your app's performance has a typical pattern of behavior. Some requests or dependency calls will be more prone to failure than others; and the overall failure rate may go up as load increases. Smart Detection uses machine learning to find these anomalies. Smart Detection monitors the data received from your app, and in particular the failure rates. Application Insights automatically alerts you in near real time if your web app experiences an abnormal rise in the rate of failed requests. + +As data comes into Application Insights from your web app, Smart Detection compares the current behavior with the patterns seen over the past few days. If there is an abnormal rise in failure rate compared to previous performance, an analysis is triggered. To help you triage and diagnose the problem, an analysis of the characteristics of the failures and related application data is provided in the alert details. There are also links to the Application Insights portal for further diagnosis. The feature needs no set-up nor configuration, as it uses machine learning algorithms to predict the normal failure rate. + +While metric alerts tell you there might be a problem, Smart Detection starts the diagnostic work for you, performing much of the analysis you would otherwise have to do yourself. You get the results neatly packaged, helping you to get quickly to the root of the problem. + +Smart detection works for any web app, hosted in the cloud or on your own servers, that generate application request or dependency data. + +## Next steps +- Get an [overview of alerts](alerts-overview.md). +- [Create an alert rule](alerts-log.md). +- Learn more about [Smart Detection](../app/proactive-failure-diagnostics.md). diff --git a/articles/azure-monitor/alerts/alerts-unified-log.md b/articles/azure-monitor/alerts/alerts-unified-log.md deleted file mode 100644 index ad364ec353222..0000000000000 --- a/articles/azure-monitor/alerts/alerts-unified-log.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Log alerts in Azure Monitor -description: Trigger emails, notifications, call websites URLs (webhooks), or automation when the log query condition you specify is met -author: yanivlavi -ms.author: yalavi -ms.topic: conceptual -ms.date: 2/23/2022 ---- - -# Log alerts in Azure Monitor - -## Overview - -Log alerts are one of the alert types that are supported in [Azure Alerts](./alerts-overview.md). Log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resources logs every set frequency, and fire an alert based on the results. Rules can trigger one or more actions using [Action Groups](./action-groups.md). - -> [!NOTE] -> Log data from a [Log Analytics workspace](../logs/log-analytics-tutorial.md) can be sent to the Azure Monitor metrics store. Metrics alerts have [different behavior](alerts-metric-overview.md), which may be more desirable depending on the data you are working with. For information on what and how you can route logs to metrics, see [Metric Alert for Logs](alerts-metric-logs.md). - -## Prerequisites - -Log alerts run queries on Log Analytics data. First you should start [collecting log data](../essentials/resource-logs.md) and query the log data for issues. You can use the [alert query examples topic](../logs/queries.md) in Log Analytics to understand what you can discover or [get started on writing your own query](../logs/log-analytics-tutorial.md). - -[Azure Monitoring Contributor](../roles-permissions-security.md) is a common role that is needed for creating, modifying, and updating log alerts. Access & query execution rights for the resource logs are also needed. Partial access to resource logs can fail queries or return partial results. [Learn more about configuring log alerts in Azure](./alerts-log.md). - -> [!NOTE] -> Log alerts for Log Analytics used to be managed using the legacy [Log Analytics Alert API](./api-alerts.md). [Learn more about switching to the current ScheduledQueryRules API](../alerts/alerts-log-api-switch.md). - -## Query evaluation definition - -Log search rules condition definition starts from: - -- What query to run? -- How to use the results? - -The following sections describe the different parameters you can use to set the above logic. - -### Log query -The [Log Analytics](../logs/log-analytics-tutorial.md) query used to evaluate the rule. The results returned by this query are used to determine whether an alert is to be triggered. The query can be scoped to: - -- A specific resource, such as a virtual machine. -- An at scale resource, such as a subscription or resource group. -- Multiple resources using [cross-resource query](../logs/cross-workspace-query.md#querying-across-log-analytics-workspaces-and-from-application-insights). - -> [!IMPORTANT] -> Alert queries have constraints to ensure optimal performance and the relevance of the results. [Learn more here](./alerts-log-query.md). - -> [!IMPORTANT] -> Resource centric and [cross-resource query](../logs/cross-workspace-query.md#querying-across-log-analytics-workspaces-and-from-application-insights) are only supported using the current scheduledQueryRules API. If you use the legacy [Log Analytics Alert API](./api-alerts.md), you will need to switch. [Learn more about switching](./alerts-log-api-switch.md) - -#### Query time Range - -Time range is set in the rule condition definition. It's called **Override query time range** in the advance settings section. - -Unlike log analytics, the time range in alerts is limited to a maximum of two days of data. Even if longer range **ago** command is used in the query, the time range will apply. For example, a query scans up to 2 days, even if the text contains **ago(7d)**. - -If you use **ago** command in the query, the range is automatically set to two days. You can also change time range manually in cases the query requires more data than the alert evaluation even if there is no **ago** command in the query. - -### Measure - -Log alerts turn log into numeric values that can be evaluated. You can measure two different things: -* Result count -* Calculation of a value - -#### Result count - -Count of results is the default measure and is used when you set a **Measure** with a selection of **Table rows**. Ideal for working with events such as Windows event logs, syslog, application exceptions. Triggers when log records happen or doesn't happen in the evaluated time window. - -Log alerts work best when you try to detect data in the log. It works less well when you try to detect lack of data in the logs. For example, alerting on virtual machine heartbeat. - -> [!NOTE] -> Since logs are semi-structured data, they are inherently more latent than metric, you may experience misfires when trying to detect lack of data in the logs, and you should consider using [metric alerts](alerts-metric-overview.md). You can send data to the metric store from logs using [metric alerts for logs](alerts-metric-logs.md). - -##### Example of result count use case - -You want to know when your application responded with error code 500 (Internal Server Error). You would create an alert rule with the following details: - -- **Query:** - -```Kusto -requests -| where resultCode == "500" -``` - -- **Aggregation granularity:** 15 minutes -- **Alert frequency:** 15 minutes -- **Threshold value:** Greater than 0 - -Then alert rules monitors for any requests ending with 500 error code. The query runs every 15 minutes, over the last 15 minutes. If even one record is found, it fires the alert and triggers the actions configured. - -### Calculation of a value - -Calculation of a value is used when you select a column name of a numeric column for the **Measure**, and the result is a calculation that you perform on the values in that column. This would be used, for example, as CPU counter value. -### Aggregation type - -The calculation that is done on multiple records to aggregate them to one numeric value using the [**Aggregation granularity**](#aggregation-granularity) defined. For example: -- **Sum** returns the sum of measure column. -- **Average** returns the average of the measure column. - -### Aggregation granularity - -Determines the interval that is used to aggregate multiple records to one numeric value. For example, if you specified **5 minutes**, records would be grouped by 5-minute intervals using the **Aggregation type** specified. - -> [!NOTE] -> As [bin()](/azure/kusto/query/binfunction) can result in uneven time intervals, the alert service will automatically convert [bin()](/azure/kusto/query/binfunction) function to [bin_at()](/azure/kusto/query/binatfunction) function with appropriate time at runtime, to ensure results with a fixed point. - -### Split by alert dimensions - -Split alerts by number or string columns into separate alerts by grouping into unique combinations. It's configured in **Split by dimensions** section of the condition (limited to six splits). When creating resource-centric alerts at scale (subscription or resource group scope), you can split by Azure resource ID column. Splitting on Azure resource ID column will change the target of the alert to the specified resource. - -Splitting by Azure resource ID column is recommended when you want to monitor the same condition on multiple Azure resources. For example, monitoring all virtual machines for CPU usage over 80%. You may also decide not to split when you want a condition on multiple resources in the scope. Such as monitoring that at least five machines in the resource group scope have CPU usage over 80%. -#### Example of splitting by alert dimensions - -For example, you want to monitor errors for multiple virtual machines running your web site/app in a specific resource group. You can do that using a log alert rule as follows: - -- **Query:** - - ```Kusto - // Reported errors - union Event, Syslog // Event table stores Windows event records, Syslog stores Linux records - | where EventLevelName == "Error" // EventLevelName is used in the Event (Windows) records - or SeverityLevel== "err" // SeverityLevel is used in Syslog (Linux) records - ``` - -- **Resource ID Column:** _ResourceId -- **Dimensions:** - - Computer = VM1, VM2 (Filtering values in alert rules definition isn't available currently for workspaces and Application Insights. Filter in the query text.) -- **Aggregation granularity:** 15 minutes -- **Alert frequency:** 15 minutes -- **Threshold value:** Greater than 0 - -This rule monitors if any virtual machine had error events in the last 15 minutes. Each virtual machine is monitored separately and will trigger actions individually. - -> [!NOTE] -> Split by alert dimensions is only available for the current scheduledQueryRules API. If you use the legacy [Log Analytics Alert API](./api-alerts.md), you will need to switch. [Learn more about switching](./alerts-log-api-switch.md). Resource centric alerting at scale is only supported in the API version `2021-08-01` and above. - -## Alert logic definition - -Once you define the query to run and evaluation of the results, you need to define the alerting logic and when to fire actions. The following sections describe the different parameters you can use: - -### Threshold and operator - -The query results are transformed into a number that is compared against the threshold and operator. - -### Frequency - -The interval in which the query is run. Can be set from a minute to a day. - -### Number of violations to trigger alert - -You can specify the alert evaluation period and the number of failures needed to trigger an alert. Allowing you to better define an impact time to trigger an alert. - -For example, if your rule [**Aggregation granularity**](#aggregation-granularity) is defined as '5 minutes', you can trigger an alert only if three failures (15 minutes) of the last hour occurred. This setting is defined by your application business policy. - -## State and resolving alerts - -Log alerts can either be stateless or stateful (currently in preview). - -Stateless alerts fire each time the condition is met, even if fired previously. You can [mark the alert as closed](../alerts/alerts-managing-alert-states.md) once the alert instance is resolved. You can also mute actions to prevent them from triggering for a period after an alert rule fired using the **Mute Actions** option in the alert details section. - -See this alert stateless evaluation example: - -| Time | Log condition evaluation | Result -| ------- | ----------| ----------| ------- -| 00:05 | FALSE | Alert doesn't fire. No actions called. -| 00:10 | TRUE | Alert fires and action groups called. New alert state ACTIVE. -| 00:15 | TRUE | Alert fires and action groups called. New alert state ACTIVE. -| 00:20 | FALSE | Alert doesn't fire. No actions called. Pervious alerts state remains ACTIVE. - -Stateful alerts fire once per incident and resolve. The alert rule resolves when the alert condition isn't met for 30 minutes for a specific evaluation period (to account for [log ingestion delay](../alerts/alerts-troubleshoot-log.md#data-ingestion-time-for-logs)), and for three consecutive evaluations to reduce noise if there is flapping conditions. For example, with a frequency of 5 minutes, the alert resolve after 40 minutes or with a frequency of 1 minute, the alert resolve after 32 minutes. The resolved notification is sent out via web-hooks or email, the status of the alert instance (called monitor state) in Azure portal is also set to resolved. - -Stateful alerts feature is currently in preview. You can set this using **Automatically resolve alerts** in the alert details section. - -## Location selection in log alerts - -Log alerts allow you to set a location for alert rules. You can select any of the supported locations, which align to [Log Analytics supported region list](https://azure.microsoft.com/global-infrastructure/services/?products=monitor). - -Location affects which region the alert rule is evaluated in. Queries are executed on the log data in the selected region, that said, the alert service end to end is global. Meaning alert rule definition, fired alerts, notifications, and actions aren't bound to the location in the alert rule. Data is transfer from the set region since the Azure Monitor alerts service is a [non-regional service](https://azure.microsoft.com/global-infrastructure/services/?products=monitor®ions=non-regional). - -## Pricing model - -Each Log Alert rule is billed based the interval at which the log query is evaluated (more frequent query evaluation results in a higher cost). Additionally, for Log Alerts configured for [at scale monitoring](#split-by-alert-dimensions), the cost will also depend on the number of time series created by the dimensions resulting from your query. - -Prices for Log Alert rules are available on the [Azure Monitor pricing page](https://azure.microsoft.com/pricing/details/monitor/). - -### Calculating the price for a Log Alert rule without dimensions - -The price of an alert rule which queries 1 resource event every 15-minutes can be calculated as: - -Total monthly price = 1 resource * 1 log alert rule * price per 15-minute internal log alert rule per month. - -### Calculating the price for a Log Alert rule with dimensions - -The price of an alert rule which monitors 10 VM resources at 1-minute frequency, using resource centric log monitoring, can be calculated as Price of alert rule + Price of number of dimensions. For example: - -Total monthly price = price per 1-minute log alert rule per month + ( 10 time series - 1 included free time series ) * price per 1-min interval monitored per month. - -Pricing of at scale log monitoring is applicable from Scheduled Query Rules API version 2021-02-01. - -## View log alerts usage on your Azure bill - -Log Alerts are listed under resource provider `microsoft.insights/scheduledqueryrules` with: - -- Log Alerts on Application Insights shown with exact resource name along with resource group and alert properties. -- Log Alerts on Log Analytics shown with exact resource name along with resource group and alert properties; when created using [scheduledQueryRules API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules). -- Log alerts created from [legacy Log Analytics API](./api-alerts.md) aren't tracked [Azure Resources](../../azure-resource-manager/management/overview.md) and don't have enforced unique resource names. These alerts are still created on `microsoft.insights/scheduledqueryrules` as hidden resources, which have this resource naming structure `|||`. Log Alerts on legacy API are shown with above hidden resource name along with resource group and alert properties. - -> [!NOTE] -> Unsupported resource characters such as `<, >, %, &, \, ?, /` are replaced with `_` in the hidden resource names and this will also reflect in the billing information. - -> [!NOTE] -> Log alerts for Log Analytics used to be managed using the legacy [Log Analytics Alert API](./api-alerts.md) and legacy templates of [Log Analytics saved searches and alerts](../insights/solutions.md). [Learn more about switching to the current ScheduledQueryRules API](../alerts/alerts-log-api-switch.md). Any alert rule management should be done using [legacy Log Analytics API](./api-alerts.md) until you decide to switch and you can't use the hidden resources. - -## Next steps - -* Learn about [creating in log alerts in Azure](./alerts-log.md). -* Understand [webhooks in log alerts in Azure](../alerts/alerts-log-webhook.md). -* Learn about [Azure Alerts](./alerts-overview.md). -* Learn more about [Log Analytics](../logs/log-query-overview.md). diff --git a/articles/azure-monitor/alerts/itsm-connector-secure-webhook-connections-azure-configuration.md b/articles/azure-monitor/alerts/itsm-connector-secure-webhook-connections-azure-configuration.md index 1138ff0479e0b..5656fd982efed 100644 --- a/articles/azure-monitor/alerts/itsm-connector-secure-webhook-connections-azure-configuration.md +++ b/articles/azure-monitor/alerts/itsm-connector-secure-webhook-connections-azure-configuration.md @@ -34,6 +34,9 @@ After your application is registered with Azure AD, you can create work items in Action groups provide a modular and reusable way of triggering actions for Azure alerts. You can use action groups with metric alerts, Activity Log alerts, and Azure Log Analytics alerts in the Azure portal. To learn more about action groups, see [Create and manage action groups in the Azure portal](../alerts/action-groups.md). +> [!NOTE] +> If you are using a log alert, the query results must include a “Computer” column containing the configuration items list. + To add a webhook to an action, follow these instructions for Secure Webhook: 1. In the [Azure portal](https://portal.azure.com/), search for and select **Monitor**. The **Monitor** pane consolidates all your monitoring settings and data in one view. diff --git a/articles/azure-monitor/alerts/itsmc-connections.md b/articles/azure-monitor/alerts/itsmc-connections.md index 3e1df5cbe0b3c..307bf01c741af 100644 --- a/articles/azure-monitor/alerts/itsmc-connections.md +++ b/articles/azure-monitor/alerts/itsmc-connections.md @@ -12,7 +12,7 @@ To set up your ITSM environment: 1. Connect to your ITSM. - For ServiceNow ITSM, see [the ServiceNow connection instructions](./itsmc-connections-servicenow.md). - - For SCSM, see [the System Center Service Manager connection instructions](./itsmc-connections-scsm.md). + - For SCSM, see [the System Center Service Manager connection instructions](/azure/azure-monitor/alerts/itsmc-connections). >[!NOTE] > As of March 1, 2022, System Center ITSM integrations with Azure alerts is no longer enabled for new customers. New System Center ITSM Connections are not supported. diff --git a/articles/azure-monitor/alerts/itsmc-definition.md b/articles/azure-monitor/alerts/itsmc-definition.md index 47261e9262918..8adff9efbe361 100644 --- a/articles/azure-monitor/alerts/itsmc-definition.md +++ b/articles/azure-monitor/alerts/itsmc-definition.md @@ -60,7 +60,7 @@ After you've prepped your ITSM tool, complete these steps to create a connection 1. Specify the connection settings for the ITSM product that you're using: - [ServiceNow](./itsmc-connections-servicenow.md) - - [System Center Service Manager](./itsmc-connections-scsm.md) + - [System Center Service Manager](/azure/azure-monitor/alerts/itsmc-connections) > [!NOTE] > By default, ITSMC refreshes the connection's configuration data once every 24 hours. To refresh your connection's data instantly to reflect any edits or template updates that you make, select the **Sync** button on your connection's pane: diff --git a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-dimensions.png b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-dimensions.png new file mode 100644 index 0000000000000..53fc9a8288c11 Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-dimensions.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-logic.png b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-logic.png new file mode 100644 index 0000000000000..35e1abdd95358 Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-logic.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png index 31fc297f07f55..95217dc8a334b 100644 Binary files a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png and b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-monitor-menu.png b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-monitor-menu.png new file mode 100644 index 0000000000000..a188f6f8bfc4b Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-monitor-menu.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-resource-menu.png b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-resource-menu.png new file mode 100644 index 0000000000000..53585e8aa47ee Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-resource-menu.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg index af87e87e78d5d..a530db5c2d500 100644 Binary files a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg and b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg differ diff --git a/articles/azure-monitor/alerts/media/alerts-overview/alerts-flow.png b/articles/azure-monitor/alerts/media/alerts-overview/alerts-flow.png new file mode 100644 index 0000000000000..160d3032da89d Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-overview/alerts-flow.png differ diff --git a/articles/azure-monitor/app/annotations.md b/articles/azure-monitor/app/annotations.md index 3bb3937df858d..0e9f847cc236d 100644 --- a/articles/azure-monitor/app/annotations.md +++ b/articles/azure-monitor/app/annotations.md @@ -3,6 +3,8 @@ title: Release annotations for Application Insights | Microsoft Docs description: Learn how to create annotations to track deployment or other significant events with Application Insights. ms.topic: conceptual ms.date: 07/20/2021 +ms.reviewer: casocha + --- # Release annotations for Application Insights diff --git a/articles/azure-monitor/app/api-custom-events-metrics.md b/articles/azure-monitor/app/api-custom-events-metrics.md index 569326b553842..d7461f6211a04 100644 --- a/articles/azure-monitor/app/api-custom-events-metrics.md +++ b/articles/azure-monitor/app/api-custom-events-metrics.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 05/11/2020 ms.devlang: csharp, java, javascript, vb ms.custom: "devx-track-js, devx-track-csharp" +ms.reviewer: mmcc --- # Application Insights API for custom events and metrics diff --git a/articles/azure-monitor/app/api-filtering-sampling.md b/articles/azure-monitor/app/api-filtering-sampling.md index 5bb417070f752..5c38c1c2de8f8 100644 --- a/articles/azure-monitor/app/api-filtering-sampling.md +++ b/articles/azure-monitor/app/api-filtering-sampling.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 11/23/2016 ms.devlang: csharp, javascript, python ms.custom: "devx-track-js, devx-track-csharp" +ms.reviewer: cithomas --- # Filter and preprocess telemetry in the Application Insights SDK diff --git a/articles/azure-monitor/app/app-map.md b/articles/azure-monitor/app/app-map.md index b8c856b6b72d8..1eea709745e39 100644 --- a/articles/azure-monitor/app/app-map.md +++ b/articles/azure-monitor/app/app-map.md @@ -5,7 +5,7 @@ ms.topic: conceptual ms.date: 05/16/2022 ms.devlang: csharp, java, javascript, python ms.custom: devx-track-csharp -ms.reviewer: sdash +ms.reviewer: rijolly --- # Application Map: Triage Distributed Applications @@ -325,7 +325,10 @@ Adjust sensitivity to achieve the desired confidence level in highlighted edges. ### Limitations of Intelligent View -The Intelligent View works well for large distributed applications but sometimes it can take around one minute to load. File a support ticket if your map doesn't load or a timeout occurs. +* Large distributed applications may take a minute to load Intelligent View. +* Timeframes of up to seven days are supported. + +We would love to hear your feedback. ([Portal feedback](#portal-feedback)) ## Troubleshooting @@ -379,7 +382,10 @@ In a case where an edge is highlighted the explanation from the model should poi #### Intelligent View doesn't load -If Intelligent View doesn't load, ensure that you've opted into the preview on Application Map. +Follow these steps if Intelligent View doesn't load. + +1. Set the configured time frame to six days or less. +1. The `Try preview` button must be selected to opt in. :::image type="content" source="media/app-map/intelligent-view-try-preview.png" alt-text="Screenshot of the Application Map user interface preview opt-in button." lightbox="media/app-map/intelligent-view-try-preview.png"::: diff --git a/articles/azure-monitor/app/asp-net-core.md b/articles/azure-monitor/app/asp-net-core.md index 1b715f01c0b3e..086cb268f1c4d 100644 --- a/articles/azure-monitor/app/asp-net-core.md +++ b/articles/azure-monitor/app/asp-net-core.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 10/12/2021 +ms.reviewer: casocha --- # Application Insights for ASP.NET Core applications diff --git a/articles/azure-monitor/app/asp-net-dependencies.md b/articles/azure-monitor/app/asp-net-dependencies.md index 53408d25f5c7b..22a89a240122f 100644 --- a/articles/azure-monitor/app/asp-net-dependencies.md +++ b/articles/azure-monitor/app/asp-net-dependencies.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/26/2020 ms.devlang: csharp ms.custom: devx-track-csharp +ms.reviewer: casocha --- # Dependency Tracking in Azure Application Insights diff --git a/articles/azure-monitor/app/asp-net-exceptions.md b/articles/azure-monitor/app/asp-net-exceptions.md index 083c250f6d491..ea5c81c3fe273 100644 --- a/articles/azure-monitor/app/asp-net-exceptions.md +++ b/articles/azure-monitor/app/asp-net-exceptions.md @@ -5,8 +5,10 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 05/19/2021 +ms.reviewer: casocha --- + # Diagnose exceptions in web apps with Application Insights Exceptions in web applications can be reported with [Application Insights](./app-insights-overview.md). You can correlate failed requests with exceptions and other events on both the client and server, so that you can quickly diagnose the causes. In this article, you'll learn how to set up exception reporting, report exceptions explicitly, diagnose failures, and more. diff --git a/articles/azure-monitor/app/asp-net-trace-logs.md b/articles/azure-monitor/app/asp-net-trace-logs.md index d33d6932bb1c3..1728be2d10d7a 100644 --- a/articles/azure-monitor/app/asp-net-trace-logs.md +++ b/articles/azure-monitor/app/asp-net-trace-logs.md @@ -5,8 +5,10 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 05/08/2019 +ms.reviewer: casocha --- + # Explore .NET/.NET Core and Python trace logs in Application Insights Send diagnostic tracing logs for your ASP.NET/ASP.NET Core application from ILogger, NLog, log4Net, or System.Diagnostics.Trace to [Azure Application Insights][start]. For Python applications, send diagnostic tracing logs using AzureLogHandler in OpenCensus Python for Azure Monitor. You can then explore and search them. Those logs are merged with the other log files from your application, so you can identify traces that are associated with each user request and correlate them with other events and exception reports. diff --git a/articles/azure-monitor/app/asp-net-troubleshoot-no-data.md b/articles/azure-monitor/app/asp-net-troubleshoot-no-data.md index 8e8db85e9f5d1..b8c9e0cf1a589 100644 --- a/articles/azure-monitor/app/asp-net-troubleshoot-no-data.md +++ b/articles/azure-monitor/app/asp-net-troubleshoot-no-data.md @@ -5,8 +5,10 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 05/21/2020 +ms.reviewer: casocha --- + # Troubleshooting no data - Application Insights for .NET/.NET Core [!INCLUDE [azure-monitor-log-analytics-rebrand](../../../includes/azure-monitor-instrumentation-key-deprecation.md)] diff --git a/articles/azure-monitor/app/auto-collect-dependencies.md b/articles/azure-monitor/app/auto-collect-dependencies.md index d221da2d47f4e..118463a0884f6 100644 --- a/articles/azure-monitor/app/auto-collect-dependencies.md +++ b/articles/azure-monitor/app/auto-collect-dependencies.md @@ -5,6 +5,7 @@ ms.topic: reference ms.devlang: csharp, java, javascript ms.custom: devx-track-dotnet ms.date: 05/06/2020 +ms.reviewer: mmcc --- # Dependency auto-collection diff --git a/articles/azure-monitor/app/automate-custom-reports.md b/articles/azure-monitor/app/automate-custom-reports.md index 89fa0a577a60d..9dd29ec56e564 100644 --- a/articles/azure-monitor/app/automate-custom-reports.md +++ b/articles/azure-monitor/app/automate-custom-reports.md @@ -3,7 +3,7 @@ title: Automate custom reports with Application Insights data description: Automate custom daily/weekly/monthly reports with Azure Monitor Application Insights data ms.topic: conceptual ms.date: 05/20/2019 -ms.reviewer: sdash +ms.reviewer: tilee --- # Automate custom reports with Application Insights data diff --git a/articles/azure-monitor/app/availability-alerts.md b/articles/azure-monitor/app/availability-alerts.md index c55cd9cebfa34..d6d545c173be0 100644 --- a/articles/azure-monitor/app/availability-alerts.md +++ b/articles/azure-monitor/app/availability-alerts.md @@ -4,6 +4,7 @@ description: Learn how to set up web tests in Application Insights. Get alerts i ms.topic: conceptual ms.date: 06/19/2019 ms.reviewer: sdash + --- # Availability alerts diff --git a/articles/azure-monitor/app/availability-multistep.md b/articles/azure-monitor/app/availability-multistep.md index 1eaf689fa0436..b2c3c13a6f68b 100644 --- a/articles/azure-monitor/app/availability-multistep.md +++ b/articles/azure-monitor/app/availability-multistep.md @@ -3,6 +3,7 @@ title: Monitor with multi-step web tests - Azure Application Insights description: Set up multi-step web tests to monitor your web applications with Azure Application Insights ms.topic: conceptual ms.date: 07/21/2021 +ms.reviewer: shyamala --- # Multi-step web tests diff --git a/articles/azure-monitor/app/availability-overview.md b/articles/azure-monitor/app/availability-overview.md index c33a0fa360bbf..7ca9aa4f6f630 100644 --- a/articles/azure-monitor/app/availability-overview.md +++ b/articles/azure-monitor/app/availability-overview.md @@ -3,6 +3,7 @@ title: Application Insights availability tests description: Set up recurring web tests to monitor availability and responsiveness of your app or website. ms.topic: conceptual ms.date: 07/13/2021 +ms.reviewer: shyamala --- # Application Insights availability tests diff --git a/articles/azure-monitor/app/availability-private-test.md b/articles/azure-monitor/app/availability-private-test.md index df5767f6e3673..1b98065a44f4b 100644 --- a/articles/azure-monitor/app/availability-private-test.md +++ b/articles/azure-monitor/app/availability-private-test.md @@ -3,6 +3,7 @@ title: Private availability testing - Azure Monitor Application Insights description: Learn how to use availability tests on internal servers that run behind a firewall with private testing. ms.topic: conceptual ms.date: 05/14/2021 +ms.reviewer: shyamala --- # Private testing diff --git a/articles/azure-monitor/app/azure-ad-authentication.md b/articles/azure-monitor/app/azure-ad-authentication.md index 4edfc86e40631..242d91ef4ffa3 100644 --- a/articles/azure-monitor/app/azure-ad-authentication.md +++ b/articles/azure-monitor/app/azure-ad-authentication.md @@ -4,6 +4,7 @@ description: Learn how to enable Azure Active Directory (Azure AD) authenticatio ms.topic: conceptual ms.date: 08/02/2021 ms.devlang: csharp, java, javascript, python +ms.reviewer: rijolly --- # Azure AD authentication for Application Insights (Preview) diff --git a/articles/azure-monitor/app/azure-functions-supported-features.md b/articles/azure-monitor/app/azure-functions-supported-features.md index a6632e85e802e..b08b98872886c 100644 --- a/articles/azure-monitor/app/azure-functions-supported-features.md +++ b/articles/azure-monitor/app/azure-functions-supported-features.md @@ -4,6 +4,7 @@ description: Application Insights Supported Features for Azure Functions ms.topic: reference ms.date: 4/23/2019 ms.devlang: csharp +ms.reviewer: quying --- # Application Insights for Azure Functions supported features diff --git a/articles/azure-monitor/app/azure-vm-vmss-apps.md b/articles/azure-monitor/app/azure-vm-vmss-apps.md index 7eaf28c070375..777a93e098578 100644 --- a/articles/azure-monitor/app/azure-vm-vmss-apps.md +++ b/articles/azure-monitor/app/azure-vm-vmss-apps.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/26/2019 ms.devlang: csharp, java, javascript, python ms.custom: devx-track-azurepowershell +ms.reviewer: abinetabate --- # Deploy the Azure Monitor Application Insights Agent on Azure virtual machines and Azure virtual machine scale sets diff --git a/articles/azure-monitor/app/azure-web-apps-java.md b/articles/azure-monitor/app/azure-web-apps-java.md index 2f4c163c6134f..70766bf5e1fbf 100644 --- a/articles/azure-monitor/app/azure-web-apps-java.md +++ b/articles/azure-monitor/app/azure-web-apps-java.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/05/2021 ms.devlang: java ms.custom: "devx-track-java" +ms.reviewer: abinetabate --- # Application Monitoring for Azure App Service and Java @@ -19,9 +20,7 @@ You can apply additional configurations, and then based on your specific scenari ### Auto-instrumentation through Azure portal -You can turn on monitoring for your Java apps running in Azure App Service just with one click, no code change required. -Application Insights for Java is integrated with Azure App Service on Linux - both code-based and custom containers, and with App Service on Windows for code-based apps. -The integration adds [Application Insights Java 3.x](./java-in-process-agent.md) and you will get the telemetry auto-collected. +You can turn on monitoring for your Java apps running in Azure App Service just with one click, no code change required. The integration adds [Application Insights Java 3.x](./java-in-process-agent.md) and you will get the telemetry auto-collected. 1. **Select Application Insights** in the Azure control panel for your app service, then select **Enable**. @@ -102,4 +101,4 @@ For the latest updates and bug fixes, [consult the release notes](web-app-extens * [Monitor service health metrics](../data-platform.md) to make sure your service is available and responsive. * [Receive alert notifications](../alerts/alerts-overview.md) whenever operational events happen or metrics cross a threshold. * Use [Application Insights for JavaScript apps and web pages](javascript.md) to get client telemetry from the browsers that visit a web page. -* [Set up Availability web tests](monitor-web-app-availability.md) to be alerted if your site is down. \ No newline at end of file +* [Set up Availability web tests](monitor-web-app-availability.md) to be alerted if your site is down. diff --git a/articles/azure-monitor/app/azure-web-apps-net-core.md b/articles/azure-monitor/app/azure-web-apps-net-core.md index 0f452af553b29..8a4c13c529420 100644 --- a/articles/azure-monitor/app/azure-web-apps-net-core.md +++ b/articles/azure-monitor/app/azure-web-apps-net-core.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/05/2021 ms.devlang: csharp ms.custom: devx-track-dotnet +ms.reviewer: abinetabate --- # Application Monitoring for Azure App Service and ASP.NET Core @@ -88,7 +89,7 @@ To enable telemetry collection with Application Insights, only the Application s |App setting name | Definition | Value | |-----------------|:------------|-------------:| |ApplicationInsightsAgent_EXTENSION_VERSION | Main extension, which controls runtime monitoring. | `~2` for Windows or `~3` for Linux | -|XDT_MicrosoftApplicationInsights_Mode | In default mode, only essential features are enabled to insure optimal performance. | `disabled` or `recommended`. | +|XDT_MicrosoftApplicationInsights_Mode | In default mode, only essential features are enabled to ensure optimal performance. | `disabled` or `recommended`. | |XDT_MicrosoftApplicationInsights_PreemptSdk | For ASP.NET Core apps only. Enables Interop (interoperation) with Application Insights SDK. Loads the extension side-by-side with the SDK and uses it to send telemetry (disables the Application Insights SDK). |`1`| @@ -99,11 +100,11 @@ To enable telemetry collection with Application Insights, only the Application s ### Upgrade from versions 2.8.9 and up -Upgrading from version 2.8.9 happens automatically, without any additional actions. The new monitoring bits are delivered in the background to the target app service, and on application restart they'll be picked up. +Upgrading from version 2.8.9 happens automatically, without any extra actions. The new monitoring bits are delivered in the background to the target app service, and on application restart they'll be picked up. To check which version of the extension you're running, go to `https://yoursitename.scm.azurewebsites.net/ApplicationInsights`. -:::image type="content"source="./media/azure-web-apps/extension-version.png" alt-text="Screenshot of the URL path to check the version of the extension you are running." border="false"::: +:::image type="content"source="./media/azure-web-apps/extension-version.png" alt-text="Screenshot of the URL path to check the version of the extension you're running." border="false"::: ### Upgrade from versions 1.0.0 - 2.6.5 @@ -140,10 +141,9 @@ Below is our step-by-step troubleshooting guide for extension/agent based monito If a similar value isn't present, it means the application isn't currently running or isn't supported. To ensure that the application is running, try manually visiting the application url/application endpoints, which will allow the runtime information to become available. - - Confirm that `IKeyExists` is `true` - If it is `false`, add `APPINSIGHTS_INSTRUMENTATIONKEY` and `APPLICATIONINSIGHTS_CONNECTION_STRING` with your ikey guid to your application settings. + - Confirm that `IKeyExists` is `true`. If it's `false`, add `APPINSIGHTS_INSTRUMENTATIONKEY` and `APPLICATIONINSIGHTS_CONNECTION_STRING` with your ikey GUID to your application settings. - - In case your application refers to any Application Insights packages, for example if you've previously instrumented (or attempted to instrument) your app with the [ASP.NET Core SDK](./asp-net-core.md), enabling the App Service integration may not take effect and the data may not appear in Application Insights. To fix the issue, in portal turn on "Interop with Application Insights SDK" and you'll start seeing the data in Application Insights. + - If your application refers to any Application Insights packages, enabling the App Service integration may not take effect and the data may not appear in Application Insights. An example would be if you've previously instrumented, or attempted to instrument, your app with the [ASP.NET Core SDK](./asp-net-core.md). To fix the issue, in portal turn on "Interop with Application Insights SDK" and you'll start seeing the data in Application Insights. - > [!IMPORTANT] > This functionality is in preview @@ -157,97 +157,26 @@ Below is our step-by-step troubleshooting guide for extension/agent based monito # [Linux](#tab/linux) -1. Check that `ApplicationInsightsAgent_EXTENSION_VERSION` app setting is set to a value of "~3". -2. Navigate to */home\LogFiles\ApplicationInsights\status* and open *status_557de146e7fa_27_1.json*. - - Confirm that `AppAlreadyInstrumented` is set to false, `AiHostingStartupLoaded` to true and `IKeyExists` to true. - - Below is an example of the JSON file: - - ```json - "AppType":".NETCoreApp,Version=v6.0", - - "MachineName":"557de146e7fa", - - "PID":"27", - - "AppDomainId":"1", - - "AppDomainName":"dotnet6demo", - - "InstrumentationEngineLoaded":false, - - "InstrumentationEngineExtensionLoaded":false, - - "HostingStartupBootstrapperLoaded":true, - - "AppAlreadyInstrumented":false, - - "AppDiagnosticSourceAssembly":"System.Diagnostics.DiagnosticSource, Version=6.0.0.0, Culture=neutral, PublicKeyToken=cc7b13ffcd2ddd51", - - "AiHostingStartupLoaded":true, - - "IKeyExists":true, - - "IKey":"00000000-0000-0000-0000-000000000000", - - "ConnectionString":"InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/" - - ``` - - If `AppAlreadyInstrumented` is true this indicates that the extension detected that some aspect of the SDK is already present in the Application, and will back-off. - -##### No Data - -1. List and identify the process that is hosting an app. Navigate to your terminal and on the command line type `ps ax`. - - The output should be similar to: - - ```bash - PID TTY STAT TIME COMMAND - - 1 ? SNs 0:00 /bin/bash /opt/startup/startup.sh - - 19 ? SNs 0:00 /usr/sbin/sshd - - 27 ? SNLl 5:52 dotnet dotnet6demo.dll - - 50 ? SNs 0:00 sshd: root@pts/0 - - 53 pts/0 SNs+ 0:00 -bash - - 55 ? SNs 0:00 sshd: root@pts/1 - - 57 pts/1 SNs+ 0:00 -bash - ``` - - -1. Then list environment variables from app process. On the command line type `cat /proc/27/environ | tr '\0' '\n`. - - The output should be similar to: +1. Check that `ApplicationInsightsAgent_EXTENSION_VERSION` app setting is set to a value of "~2" +1. Browse to https:// your site name .scm.azurewebsites.net/ApplicationInsights +1. Within this site, confirm: + * The status source exists and looks like: `Status source /var/log/applicationinsights/status_abcde1234567_89_0.json` + * `Auto-Instrumentation enabled successfully`, is displayed. If a similar value isn't present, it means the application isn't running or isn't supported. To ensure that the application is running, try manually visiting the application url/application endpoints, which will allow the runtime information to become available. + * `IKeyExists` is `true`. If it's `false`, add `APPINSIGHTS_INSTRUMENTATIONKEY` and `APPLICATIONINSIGHTS_CONNECTION_STRING` with your ikey GUID to your application settings. - ```bash - ASPNETCORE_HOSTINGSTARTUPASSEMBLIES=Microsoft.ApplicationInsights.StartupBootstrapper - - DOTNET_STARTUP_HOOKS=/DotNetCoreAgent/2.8.39/StartupHook/Microsoft.ApplicationInsights.StartupHook.dll - - APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/ - - ``` - -1. Validate that `ASPNETCORE_HOSTINGSTARTUPASSEMBLIES`, `DOTNET_STARTUP_HOOKS`, and `APPLICATIONINSIGHTS_CONNECTION_STRING` are set. +:::image type="content" source="media/azure-web-apps-net-core/auto-instrumentation-status.png" alt-text="Screenshot displaying auto instrumentation status web page." lightbox="media/azure-web-apps-net-core/auto-instrumentation-status.png"::: --- -#### Default website deployed with web apps does not support automatic client-side monitoring +#### Default website deployed with web apps doesn't support automatic client-side monitoring -When you create a web app with the `ASP.NET Core` runtimes in Azure App Services, it deploys a single static HTML page as a starter website. The static webpage also loads a ASP.NET managed web part in IIS. This allows for testing codeless server-side monitoring, but doesn't support automatic client-side monitoring. +When you create a web app with the `ASP.NET Core` runtimes in Azure App Services, it deploys a single static HTML page as a starter website. The static webpage also loads an ASP.NET managed web part in IIS. This behavior allows for testing codeless server-side monitoring, but doesn't support automatic client-side monitoring. If you wish to test out codeless server and client-side monitoring for ASP.NET Core in an Azure App Services web app, we recommend following the official guides for [creating a ASP.NET Core web app](../../app-service/quickstart-dotnetcore.md). Then use the instructions in the current article to enable monitoring. [!INCLUDE [azure-web-apps-troubleshoot](../../../includes/azure-monitor-app-insights-azure-web-apps-troubleshoot.md)] -### PHP and WordPress are not supported +### PHP and WordPress aren't supported PHP and WordPress sites aren't supported. There's currently no officially supported SDK/agent for server-side monitoring of these workloads. However, manually instrumenting client-side transactions on a PHP or WordPress site by adding the client-side JavaScript to your web pages can be accomplished by using the [JavaScript SDK](./javascript.md). @@ -255,9 +184,9 @@ The table below provides a more detailed explanation of what these values mean, |Problem Value |Explanation |Fix | |---- |----|---| -| `AppAlreadyInstrumented:true` | This value indicates that the extension detected that some aspect of the SDK is already present in the Application, and will back-off. It can be due to a reference to `Microsoft.ApplicationInsights.AspNetCore`, or `Microsoft.ApplicationInsights` | Remove the references. Some of these references are added by default from certain Visual Studio templates, and older versions of Visual Studio may add references to `Microsoft.ApplicationInsights`. | +| `AppAlreadyInstrumented:true` | This value indicates that the extension detected that some aspect of the SDK is already present in the Application, and will back-off. It can be due to a reference to `Microsoft.ApplicationInsights.AspNetCore`, or `Microsoft.ApplicationInsights` | Remove the references. Some of these references are added by default from certain Visual Studio templates, and older versions of Visual Studio reference `Microsoft.ApplicationInsights`. | |`AppAlreadyInstrumented:true` | This value can also be caused by the presence of Microsoft.ApplicationsInsights dll in the app folder from a previous deployment. | Clean the app folder to ensure that these dlls are removed. Check both your local app's bin directory, and the wwwroot directory on the App Service. (To check the wwwroot directory of your App Service web app: Advanced Tools (Kudu) > Debug console > CMD > home\site\wwwroot). | -|`IKeyExists:false`|This value indicates that the instrumentation key is not present in the AppSetting, `APPINSIGHTS_INSTRUMENTATIONKEY`. Possible causes: The values may have been accidentally removed, forgot to set the values in automation script, etc. | Make sure the setting is present in the App Service application settings. | +|`IKeyExists:false`|This value indicates that the instrumentation key isn't present in the AppSetting, `APPINSIGHTS_INSTRUMENTATIONKEY`. Possible causes: The values may have been accidentally removed, forgot to set the values in automation script, etc. | Make sure the setting is present in the App Service application settings. | ## Release notes diff --git a/articles/azure-monitor/app/azure-web-apps-net.md b/articles/azure-monitor/app/azure-web-apps-net.md index d28df4981caac..a0b750b8ed705 100644 --- a/articles/azure-monitor/app/azure-web-apps-net.md +++ b/articles/azure-monitor/app/azure-web-apps-net.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/05/2021 ms.devlang: javascript ms.custom: devx-track-dotnet +ms.reviewer: abinetabate --- # Application Monitoring for Azure App Service and ASP.NET diff --git a/articles/azure-monitor/app/azure-web-apps-nodejs.md b/articles/azure-monitor/app/azure-web-apps-nodejs.md index acef930dfbf9b..8eac1607970b0 100644 --- a/articles/azure-monitor/app/azure-web-apps-nodejs.md +++ b/articles/azure-monitor/app/azure-web-apps-nodejs.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/05/2021 ms.devlang: javascript ms.custom: "devx-track-js" +ms.reviewer: abinetabate --- # Application Monitoring for Azure App Service and Node.js diff --git a/articles/azure-monitor/app/azure-web-apps.md b/articles/azure-monitor/app/azure-web-apps.md index da5c503774a46..71cbbc8a644f1 100644 --- a/articles/azure-monitor/app/azure-web-apps.md +++ b/articles/azure-monitor/app/azure-web-apps.md @@ -18,7 +18,7 @@ There are two ways to enable application monitoring for Azure App Services hoste - This method is the easiest to enable, and no code change or advanced configurations are required. It is often referred to as "runtime" monitoring. For Azure App Services we recommend at a minimum enabling this level of monitoring, and then based on your specific scenario you can evaluate whether more advanced monitoring through manual instrumentation is needed. - - The following are support for auto-instrumentation monitoring: + - The following are supported for auto-instrumentation monitoring: - [.NET Core](./azure-web-apps-net-core.md) - [.NET](./azure-web-apps-net.md) - [Java](./azure-web-apps-java.md) diff --git a/articles/azure-monitor/app/cloudservices.md b/articles/azure-monitor/app/cloudservices.md index 355a8d0d26a6e..1b7004c8800c3 100644 --- a/articles/azure-monitor/app/cloudservices.md +++ b/articles/azure-monitor/app/cloudservices.md @@ -4,7 +4,8 @@ description: Monitor your web and worker roles effectively with Application Insi ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp -ms.date: 09/05/2018 +ms.date: 06/02/2022 +ms.reviewer: abinetabate --- # Application Insights for Azure cloud services @@ -177,12 +178,10 @@ If there is no data, do the following: 1. In the app, open various pages so that it generates some telemetry. 1. Wait a few seconds, and then click **Refresh**. -For more information, see [Troubleshooting](https://docs.microsoft.com/azure/azure-monitor/faq#application-insights). - ## View Azure Diagnostics events You can find the [Azure Diagnostics](../agents/diagnostics-extension-overview.md) information in Application Insights in the following locations: -* Performance counters are displayed as custom metrics. +* Performance counters are displayed as custom metrics. * Windows event logs are shown as traces and custom events. * Application logs, ETW logs, and any diagnostics infrastructure logs appear as traces. diff --git a/articles/azure-monitor/app/codeless-overview.md b/articles/azure-monitor/app/codeless-overview.md index 65868c2b155d3..6b6df1bfb24e9 100644 --- a/articles/azure-monitor/app/codeless-overview.md +++ b/articles/azure-monitor/app/codeless-overview.md @@ -3,6 +3,7 @@ title: Monitor your apps without code changes - auto-instrumentation for Azure M description: Overview of auto-instrumentation for Azure Monitor Application Insights - codeless application performance management ms.topic: conceptual ms.date: 08/31/2021 +ms.reviewer: abinetabate --- # What is auto-instrumentation for Azure Monitor application insights? @@ -21,7 +22,8 @@ As we're adding new integrations, the auto-instrumentation capability matrix bec |Environment/Resource Provider | .NET | .NET Core | Java | Node.js | Python | |---------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------| -|Azure App Service on Windows | GA, OnBD* | GA, opt-in | Public Preview, Container and Custom Containers are GA | Public Preview | Not supported | +|Azure App Service on Windows - Publish as Code | GA, OnBD* | GA | GA | GA, OnBD* | Not supported | +|Azure App Service on Windows - Publish as Docker | Public Preview | Public Preview | Public Preview | Not supported | Not supported | |Azure App Service on Linux | N/A | Public Preview | GA | GA | Not supported | |Azure Functions - basic | GA, OnBD* | GA, OnBD* | GA, OnBD* | GA, OnBD* | GA, OnBD* | |Azure Functions - dependencies | Not supported | Not supported | Public Preview | Not supported | Through [extension](monitor-functions.md#distributed-tracing-for-python-function-apps) | diff --git a/articles/azure-monitor/app/configuration-with-applicationinsights-config.md b/articles/azure-monitor/app/configuration-with-applicationinsights-config.md index 2e1c5b73c1c83..a3d890493bca0 100644 --- a/articles/azure-monitor/app/configuration-with-applicationinsights-config.md +++ b/articles/azure-monitor/app/configuration-with-applicationinsights-config.md @@ -5,7 +5,8 @@ ms.topic: conceptual ms.date: 05/22/2019 ms.devlang: csharp ms.custom: devx-track-csharp -ms.reviewer: olegan +ms.reviewer: casocha + --- # Configuring the Application Insights SDK with ApplicationInsights.config or .xml diff --git a/articles/azure-monitor/app/console.md b/articles/azure-monitor/app/console.md index b412b6ab3f245..533308afe0148 100644 --- a/articles/azure-monitor/app/console.md +++ b/articles/azure-monitor/app/console.md @@ -5,7 +5,7 @@ ms.topic: conceptual ms.date: 05/21/2020 ms.devlang: csharp ms.custom: devx-track-csharp -ms.reviewer: lmolkova +ms.reviewer: casocha --- # Application Insights for .NET console applications diff --git a/articles/azure-monitor/app/continuous-monitoring.md b/articles/azure-monitor/app/continuous-monitoring.md index fd46e4554b431..36c849b52f10c 100644 --- a/articles/azure-monitor/app/continuous-monitoring.md +++ b/articles/azure-monitor/app/continuous-monitoring.md @@ -3,6 +3,7 @@ title: Continuous monitoring of your DevOps release pipeline with Azure Pipeline description: Provides instructions to quickly set up continuous monitoring with Application Insights ms.topic: conceptual ms.date: 05/01/2020 +ms.reviewer: vitalyg --- # Add continuous monitoring to your release pipeline diff --git a/articles/azure-monitor/app/convert-classic-resource.md b/articles/azure-monitor/app/convert-classic-resource.md index 72d0d8cb1ebfa..60950007fc26e 100644 --- a/articles/azure-monitor/app/convert-classic-resource.md +++ b/articles/azure-monitor/app/convert-classic-resource.md @@ -4,6 +4,7 @@ description: Learn about the steps required to upgrade your Azure Monitor Applic ms.topic: conceptual ms.date: 09/23/2020 ms.custom: devx-track-azurepowershell +ms.reviewer: cawa --- # Migrate to workspace-based Application Insights resources @@ -40,7 +41,7 @@ If you don't need to migrate an existing resource, and instead want to create a - A Log Analytics workspace with the access control mode set to the **`use resource or workspace permissions`** setting. - - Workspace-based Application Insights resources aren't compatible with workspaces set to the dedicated **`workspace based permissions`** setting. To learn more about Log Analytics workspace access control, consult the [Log Analytics configure access control mode guidance](../logs/manage-access.md#configure-access-control-mode) + - Workspace-based Application Insights resources aren't compatible with workspaces set to the dedicated **`workspace based permissions`** setting. To learn more about Log Analytics workspace access control, consult the [access control mode guidance](../logs/manage-access.md#access-control-mode) - If you don't already have an existing Log Analytics Workspace, [consult the Log Analytics workspace creation documentation](../logs/quick-create-workspace.md). @@ -224,7 +225,7 @@ From within the Application Insights resource pane, select **Properties** > **Ch **Error message:** *The selected workspace is configured with workspace-based access mode. Some APM features may be impacted. Select another workspace or allow resource-based access in the workspace settings. You can override this error by using CLI.* -In order for your workspace-based Application Insights resource to operate properly you need to change the access control mode of your target Log Analytics workspace to the **resource or workspace permissions** setting. This setting is located in the Log Analytics workspace UI under **Properties** > **Access control mode**. For detailed instructions, consult the [Log Analytics configure access control mode guidance](../logs/manage-access.md#configure-access-control-mode). If your access control mode is set to the exclusive **Require workspace permissions** setting, migration via the portal migration experience will remain blocked. +In order for your workspace-based Application Insights resource to operate properly you need to change the access control mode of your target Log Analytics workspace to the **resource or workspace permissions** setting. This setting is located in the Log Analytics workspace UI under **Properties** > **Access control mode**. For detailed instructions, consult the [Log Analytics configure access control mode guidance](../logs/manage-access.md#access-control-mode). If your access control mode is set to the exclusive **Require workspace permissions** setting, migration via the portal migration experience will remain blocked. If you can’t change the access control mode for security reasons for your current target workspace, we recommend creating a new Log Analytics workspace to use for the migration. @@ -293,7 +294,7 @@ Legacy table: availability |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -337,7 +338,7 @@ Legacy table: browserTimings |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -381,7 +382,7 @@ Legacy table: dependencies |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -426,7 +427,7 @@ Legacy table: customEvents |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -463,7 +464,7 @@ Legacy table: customMetrics |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -504,7 +505,7 @@ Legacy table: pageViews |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -545,7 +546,7 @@ Legacy table: performanceCounters |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |category|string|Category|string| @@ -584,7 +585,7 @@ Legacy table: requests |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| @@ -628,7 +629,7 @@ Legacy table: exceptions |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |assembly|string|Assembly|string| @@ -680,7 +681,7 @@ Legacy table: traces |ApplicationInsights|Type|LogAnalytics|Type| |:---|:---|:---|:---| -|appId|string|\_ResourceGUID|string| +|appId|string|ResourceGUID|string| |application_Version|string|AppVersion|string| |appName|string|\_ResourceId|string| |client_Browser|string|ClientBrowser|string| diff --git a/articles/azure-monitor/app/correlation.md b/articles/azure-monitor/app/correlation.md index 3eb4e93ccc2ea..79dbae77555da 100644 --- a/articles/azure-monitor/app/correlation.md +++ b/articles/azure-monitor/app/correlation.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 06/07/2019 ms.devlang: csharp, java, javascript, python ms.custom: "devx-track-python, devx-track-csharp" +ms.reviewer: rijolly --- # Telemetry correlation in Application Insights @@ -206,6 +207,12 @@ The `operation_ParentId` field is in the format `.`, where OpenCensus Python enables you to correlate logs by adding a trace ID, a span ID, and a sampling flag to log records. You add these attributes by installing OpenCensus [logging integration](https://pypi.org/project/opencensus-ext-logging/). The following attributes will be added to Python `LogRecord` objects: `traceId`, `spanId`, and `traceSampled`. (applicable only for loggers that are created after the integration) +Install the OpenCensus logging integration: + +```console +python -m pip install opencensus-ext-logging +``` + **Sample application** ```python diff --git a/articles/azure-monitor/app/create-new-resource.md b/articles/azure-monitor/app/create-new-resource.md index a877716a35d28..06c05370d462e 100644 --- a/articles/azure-monitor/app/create-new-resource.md +++ b/articles/azure-monitor/app/create-new-resource.md @@ -4,6 +4,7 @@ description: Manually set up Application Insights monitoring for a new live appl ms.topic: conceptual ms.date: 02/10/2021 ms.custom: devx-track-azurepowershell, devx-track-azurecli +ms.reviewer: dalek --- # Create an Application Insights resource diff --git a/articles/azure-monitor/app/custom-data-correlation.md b/articles/azure-monitor/app/custom-data-correlation.md index 4ddba51735ef1..3346cd7182069 100644 --- a/articles/azure-monitor/app/custom-data-correlation.md +++ b/articles/azure-monitor/app/custom-data-correlation.md @@ -3,6 +3,7 @@ title: Azure Application Insights | Microsoft Docs description: Correlate data from Application Insights to other datasets, such as data enrichment or lookup tables, non-Application Insights data sources, and custom data. ms.topic: conceptual ms.date: 08/08/2018 +ms.reviewer: evternov --- # Correlating Application Insights data with custom data sources diff --git a/articles/azure-monitor/app/custom-endpoints.md b/articles/azure-monitor/app/custom-endpoints.md index e5cf1486a0e6f..6a5570bf312b9 100644 --- a/articles/azure-monitor/app/custom-endpoints.md +++ b/articles/azure-monitor/app/custom-endpoints.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 07/26/2019 ms.devlang: csharp, java, javascript, python ms.custom: references_regions, devx-track-js +ms.reviewer: newylie --- # Application Insights overriding default endpoints diff --git a/articles/azure-monitor/app/custom-operations-tracking.md b/articles/azure-monitor/app/custom-operations-tracking.md index a3bd7d64d0e70..5eb166c4e7214 100644 --- a/articles/azure-monitor/app/custom-operations-tracking.md +++ b/articles/azure-monitor/app/custom-operations-tracking.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 11/26/2019 +ms.reviewer: casocha --- # Track custom operations with Application Insights .NET SDK diff --git a/articles/azure-monitor/app/data-model-context.md b/articles/azure-monitor/app/data-model-context.md index 6db897d93bfd0..080eb30e67567 100644 --- a/articles/azure-monitor/app/data-model-context.md +++ b/articles/azure-monitor/app/data-model-context.md @@ -3,6 +3,7 @@ title: Azure Application Insights Telemetry Data Model - Telemetry Context | Mic description: Application Insights telemetry context data model ms.topic: conceptual ms.date: 05/15/2017 +ms.reviewer: osrosado --- # Telemetry context: Application Insights data model diff --git a/articles/azure-monitor/app/data-model-event-telemetry.md b/articles/azure-monitor/app/data-model-event-telemetry.md index 4370e4881d129..219a125429627 100644 --- a/articles/azure-monitor/app/data-model-event-telemetry.md +++ b/articles/azure-monitor/app/data-model-event-telemetry.md @@ -3,6 +3,7 @@ title: Azure Application Insights Telemetry Data Model - Event Telemetry | Micro description: Application Insights data model for event telemetry ms.topic: conceptual ms.date: 04/25/2017 +ms.reviewer: mmcc --- # Event telemetry: Application Insights data model diff --git a/articles/azure-monitor/app/data-model-exception-telemetry.md b/articles/azure-monitor/app/data-model-exception-telemetry.md index c77ee670202ef..e1c0a4d49927d 100644 --- a/articles/azure-monitor/app/data-model-exception-telemetry.md +++ b/articles/azure-monitor/app/data-model-exception-telemetry.md @@ -3,6 +3,7 @@ title: Azure Application Insights Exception Telemetry Data model description: Application Insights data model for exception telemetry ms.topic: conceptual ms.date: 04/25/2017 +ms.reviewer: casocha --- # Exception telemetry: Application Insights data model diff --git a/articles/azure-monitor/app/data-model-metric-telemetry.md b/articles/azure-monitor/app/data-model-metric-telemetry.md index 06f7aa599460d..c715ff04d8b32 100644 --- a/articles/azure-monitor/app/data-model-metric-telemetry.md +++ b/articles/azure-monitor/app/data-model-metric-telemetry.md @@ -3,6 +3,7 @@ title: Data model for metric telemetry - Azure Application Insights description: Application Insights data model for metric telemetry ms.topic: conceptual ms.date: 04/25/2017 +ms.reviewer: vitalyg --- # Metric telemetry: Application Insights data model diff --git a/articles/azure-monitor/app/data-model-pageview-telemetry.md b/articles/azure-monitor/app/data-model-pageview-telemetry.md index 41ccaf0185711..cf99533680d4e 100644 --- a/articles/azure-monitor/app/data-model-pageview-telemetry.md +++ b/articles/azure-monitor/app/data-model-pageview-telemetry.md @@ -3,15 +3,15 @@ title: Azure Application Insights Data Model - PageView Telemetry description: Application Insights data model for page view telemetry ms.topic: conceptual ms.date: 03/24/2022 -ms.reviewer: vgorbenko +ms.reviewer: mmcc --- # PageView telemetry: Application Insights data model -PageView telemetry (in [Application Insights](./app-insights-overview.md)) is logged when an application user opens a new page of a monitored application. The `Page` in this context is a logical unit that is defined by the developer to be an application tab or a screen and is not necessarily correlated to a browser webpage load or refresh action. This distinction can be further understood in the context of single-page applications (SPA) where the switch between pages is not tied to browser page actions. [`pageViews.duration`](https://docs.microsoft.com/azure/azure-monitor/reference/tables/pageviews) is the time it takes for the application to present the page to the user. +PageView telemetry (in [Application Insights](./app-insights-overview.md)) is logged when an application user opens a new page of a monitored application. The `Page` in this context is a logical unit that is defined by the developer to be an application tab or a screen and is not necessarily correlated to a browser webpage load or refresh action. This distinction can be further understood in the context of single-page applications (SPA) where the switch between pages is not tied to browser page actions. [`pageViews.duration`](/azure/azure-monitor/reference/tables/pageviews) is the time it takes for the application to present the page to the user. > [!NOTE] -> By default, Application Insights SDKs log single PageView events on each browser webpage load action, with [`pageViews.duration`](https://docs.microsoft.com/azure/azure-monitor/reference/tables/pageviews) populated by [browser timing](#measuring-browsertiming-in-application-insights). Developers can extend additional tracking of PageView events by using the [trackPageView API call](./api-custom-events-metrics.md#page-views). +> By default, Application Insights SDKs log single PageView events on each browser webpage load action, with [`pageViews.duration`](/azure/azure-monitor/reference/tables/pageviews) populated by [browser timing](#measuring-browsertiming-in-application-insights). Developers can extend additional tracking of PageView events by using the [trackPageView API call](./api-custom-events-metrics.md#page-views). ## Measuring browserTiming in Application Insights @@ -34,4 +34,4 @@ Modern browsers expose measurements for page load actions with the [Performance * If it’s not, then the *deprecated* [`PerformanceTiming`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming) interface is used and the delta between [`NavigationStart`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming/navigationStart) and [`LoadEventEnd`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming/loadEventEnd) is calculated. * The developer specifies a duration value when logging custom PageView events using the [trackPageView API call](./api-custom-events-metrics.md#page-views). -![Screenshot of the Metrics page in Application Insights showing graphic displays of metrics data for a web application.](./media/javascript/page-view-load-time.png) +![Screenshot of the Metrics page in Application Insights showing graphic displays of metrics data for a web application.](./media/javascript/page-view-load-time.png) \ No newline at end of file diff --git a/articles/azure-monitor/app/data-model-request-telemetry.md b/articles/azure-monitor/app/data-model-request-telemetry.md index f99132f28b721..52317a7b55059 100644 --- a/articles/azure-monitor/app/data-model-request-telemetry.md +++ b/articles/azure-monitor/app/data-model-request-telemetry.md @@ -3,6 +3,7 @@ title: Data model for request telemetry - Azure Application Insights description: Application Insights data model for request telemetry ms.topic: conceptual ms.date: 01/07/2019 +ms.reviewer: mmcc --- # Request telemetry: Application Insights data model diff --git a/articles/azure-monitor/app/data-model.md b/articles/azure-monitor/app/data-model.md index b208a41accb87..2357c9de0b481 100644 --- a/articles/azure-monitor/app/data-model.md +++ b/articles/azure-monitor/app/data-model.md @@ -8,6 +8,7 @@ ms.workload: TBD ms.tgt_pltfrm: ibiza ms.topic: conceptual ms.date: 10/14/2019 +ms.reviewer: mmcc --- # Application Insights telemetry data model diff --git a/articles/azure-monitor/app/data-retention-privacy.md b/articles/azure-monitor/app/data-retention-privacy.md index 7cd0d66031136..6446ff84a14d6 100644 --- a/articles/azure-monitor/app/data-retention-privacy.md +++ b/articles/azure-monitor/app/data-retention-privacy.md @@ -4,6 +4,7 @@ description: Retention and privacy policy statement ms.topic: conceptual ms.date: 06/30/2020 ms.custom: "devx-track-js, devx-track-csharp" +ms.reviewer: saars --- # Data collection, retention, and storage in Application Insights diff --git a/articles/azure-monitor/app/devops.md b/articles/azure-monitor/app/devops.md index 3d28a85557a86..d68c957ea737e 100644 --- a/articles/azure-monitor/app/devops.md +++ b/articles/azure-monitor/app/devops.md @@ -3,6 +3,7 @@ title: Web app performance monitoring - Azure Application Insights description: How Application Insights fits into the DevOps cycle ms.topic: conceptual ms.date: 12/21/2018 +ms.reviewer: cogoodson --- # Deep diagnostics for web apps and services with Application Insights diff --git a/articles/azure-monitor/app/diagnostic-search.md b/articles/azure-monitor/app/diagnostic-search.md index c02af885cee50..579ae52ffc468 100644 --- a/articles/azure-monitor/app/diagnostic-search.md +++ b/articles/azure-monitor/app/diagnostic-search.md @@ -3,6 +3,8 @@ title: Using Search in Azure Application Insights | Microsoft Docs description: Search and filter raw telemetry sent by your web app. ms.topic: conceptual ms.date: 07/30/2019 +ms.reviewer: saars + --- # Using Search in Application Insights diff --git a/articles/azure-monitor/app/distributed-tracing.md b/articles/azure-monitor/app/distributed-tracing.md index 755f41b3a4f14..e68ecf8cf2d3a 100644 --- a/articles/azure-monitor/app/distributed-tracing.md +++ b/articles/azure-monitor/app/distributed-tracing.md @@ -4,6 +4,7 @@ description: Provides information about Microsoft's support for distributed trac ms.topic: conceptual ms.custom: devx-track-dotnet ms.date: 09/17/2018 +ms.reviewer: rijolly --- # What is Distributed Tracing? diff --git a/articles/azure-monitor/app/eventcounters.md b/articles/azure-monitor/app/eventcounters.md index 910d5217663ae..9f399c2762c60 100644 --- a/articles/azure-monitor/app/eventcounters.md +++ b/articles/azure-monitor/app/eventcounters.md @@ -4,6 +4,7 @@ description: Monitor system and custom .NET/.NET Core EventCounters in Applicati ms.topic: conceptual ms.date: 09/20/2019 ms.custom: devx-track-csharp +ms.reviewer: cithomas --- # EventCounters introduction diff --git a/articles/azure-monitor/app/export-data-model.md b/articles/azure-monitor/app/export-data-model.md index bbca3df6e0bf5..d2c9af90e645a 100644 --- a/articles/azure-monitor/app/export-data-model.md +++ b/articles/azure-monitor/app/export-data-model.md @@ -3,6 +3,7 @@ title: Azure Application Insights Data Model | Microsoft Docs description: Describes properties exported from continuous export in JSON, and used as filters. ms.topic: conceptual ms.date: 01/08/2019 +ms.reviewer: mmcc --- # Application Insights Export Data Model diff --git a/articles/azure-monitor/app/export-power-bi.md b/articles/azure-monitor/app/export-power-bi.md index 63be9b1de3b47..a1ea76afdbb88 100644 --- a/articles/azure-monitor/app/export-power-bi.md +++ b/articles/azure-monitor/app/export-power-bi.md @@ -3,6 +3,7 @@ title: Export to Power BI from Azure Application Insights | Microsoft Docs description: Analytics queries can be displayed in Power BI. ms.topic: conceptual ms.date: 08/10/2018 +ms.reviewer: mmcc --- # Feed Power BI from Application Insights diff --git a/articles/azure-monitor/app/export-telemetry.md b/articles/azure-monitor/app/export-telemetry.md index 7bf398b163f15..a6642b4fcbb54 100644 --- a/articles/azure-monitor/app/export-telemetry.md +++ b/articles/azure-monitor/app/export-telemetry.md @@ -4,6 +4,7 @@ description: Export diagnostic and usage data to storage in Microsoft Azure, and ms.topic: conceptual ms.date: 02/19/2021 ms.custom: references_regions +ms.reviewer: mmcc --- # Export telemetry from Application Insights diff --git a/articles/azure-monitor/app/get-metric.md b/articles/azure-monitor/app/get-metric.md index d34c98649acc8..01edd8a5dc0e9 100644 --- a/articles/azure-monitor/app/get-metric.md +++ b/articles/azure-monitor/app/get-metric.md @@ -5,6 +5,7 @@ ms.service: azure-monitor ms.topic: conceptual ms.date: 04/28/2020 ms.devlang: csharp +ms.reviewer: casocha --- # Custom metric collection in .NET and .NET Core diff --git a/articles/azure-monitor/app/ilogger.md b/articles/azure-monitor/app/ilogger.md index f512c7f1ac437..52d25d9e7b3dc 100644 --- a/articles/azure-monitor/app/ilogger.md +++ b/articles/azure-monitor/app/ilogger.md @@ -4,6 +4,7 @@ description: Learn how to use Application Insights with the ILogger interface in ms.topic: conceptual ms.date: 05/20/2021 ms.devlang: csharp +ms.reviewer: casocha --- # Application Insights logging with .NET diff --git a/articles/azure-monitor/app/ip-addresses.md b/articles/azure-monitor/app/ip-addresses.md index 9c3f02d1ebbfd..2f78540c3c034 100644 --- a/articles/azure-monitor/app/ip-addresses.md +++ b/articles/azure-monitor/app/ip-addresses.md @@ -3,6 +3,7 @@ title: IP addresses used by Azure Monitor description: Server firewall exceptions required by Application Insights ms.topic: conceptual ms.date: 01/27/2020 +ms.reviewer: saars --- # IP addresses used by Azure Monitor diff --git a/articles/azure-monitor/app/ip-collection.md b/articles/azure-monitor/app/ip-collection.md index 291057344b4d3..03073805a89b4 100644 --- a/articles/azure-monitor/app/ip-collection.md +++ b/articles/azure-monitor/app/ip-collection.md @@ -4,6 +4,7 @@ description: Understand how Application Insights handles IP addresses and geoloc ms.topic: conceptual ms.date: 09/23/2020 ms.custom: devx-track-js, devx-track-azurepowershell +ms.reviewer: saars --- # Geolocation and IP address handling diff --git a/articles/azure-monitor/app/java-2x-agent.md b/articles/azure-monitor/app/java-2x-agent.md index e6e55cc792cca..b5fbdde55d85e 100644 --- a/articles/azure-monitor/app/java-2x-agent.md +++ b/articles/azure-monitor/app/java-2x-agent.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 01/10/2019 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Monitor dependencies, caught exceptions, and method execution times in Java web apps diff --git a/articles/azure-monitor/app/java-2x-collectd.md b/articles/azure-monitor/app/java-2x-collectd.md index b3ee5bbd37bff..a845b810ed2ee 100644 --- a/articles/azure-monitor/app/java-2x-collectd.md +++ b/articles/azure-monitor/app/java-2x-collectd.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 03/14/2019 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # collectd: Linux performance metrics in Application Insights [Deprecated] diff --git a/articles/azure-monitor/app/java-2x-filter-telemetry.md b/articles/azure-monitor/app/java-2x-filter-telemetry.md index e5630c2ec271e..e30588fe29f76 100644 --- a/articles/azure-monitor/app/java-2x-filter-telemetry.md +++ b/articles/azure-monitor/app/java-2x-filter-telemetry.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 3/14/2019 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Filter telemetry in your Java web app diff --git a/articles/azure-monitor/app/java-2x-get-started.md b/articles/azure-monitor/app/java-2x-get-started.md index d9c981d106a3f..25cc03acd18f9 100644 --- a/articles/azure-monitor/app/java-2x-get-started.md +++ b/articles/azure-monitor/app/java-2x-get-started.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 11/22/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Get started with Application Insights in a Java web project diff --git a/articles/azure-monitor/app/java-2x-micrometer.md b/articles/azure-monitor/app/java-2x-micrometer.md index 0a812958343c4..64ebefc02b01c 100644 --- a/articles/azure-monitor/app/java-2x-micrometer.md +++ b/articles/azure-monitor/app/java-2x-micrometer.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.devlang: java ms.custom: devx-track-java ms.date: 11/01/2018 +ms.reviewer: mmcc --- # How to use Micrometer with Azure Application Insights Java SDK (not recommended) diff --git a/articles/azure-monitor/app/java-2x-trace-logs.md b/articles/azure-monitor/app/java-2x-trace-logs.md index 10a49ea130d73..9d833bfc3f051 100644 --- a/articles/azure-monitor/app/java-2x-trace-logs.md +++ b/articles/azure-monitor/app/java-2x-trace-logs.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 05/18/2019 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Explore Java trace logs in Application Insights diff --git a/articles/azure-monitor/app/java-2x-troubleshoot.md b/articles/azure-monitor/app/java-2x-troubleshoot.md index d9860b5ee1b77..1789be6c971be 100644 --- a/articles/azure-monitor/app/java-2x-troubleshoot.md +++ b/articles/azure-monitor/app/java-2x-troubleshoot.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 03/14/2019 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Troubleshooting and Q and A for Application Insights for Java SDK diff --git a/articles/azure-monitor/app/java-jmx-metrics-configuration.md b/articles/azure-monitor/app/java-jmx-metrics-configuration.md index 8327c9bb4fd36..c10462866586b 100644 --- a/articles/azure-monitor/app/java-jmx-metrics-configuration.md +++ b/articles/azure-monitor/app/java-jmx-metrics-configuration.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 03/16/2021 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Configuring JMX metrics diff --git a/articles/azure-monitor/app/java-on-premises.md b/articles/azure-monitor/app/java-on-premises.md index 24b7a51dad2ba..25fdd57efbd88 100644 --- a/articles/azure-monitor/app/java-on-premises.md +++ b/articles/azure-monitor/app/java-on-premises.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.devlang: java ms.custom: devx-track-java ms.date: 04/16/2020 +ms.reviewer: abinetabate --- # Java codeless application monitoring on-premises - Azure Monitor Application Insights diff --git a/articles/azure-monitor/app/java-standalone-arguments.md b/articles/azure-monitor/app/java-standalone-arguments.md index 1c88dcd7e4768..73ab769c0bd90 100644 --- a/articles/azure-monitor/app/java-standalone-arguments.md +++ b/articles/azure-monitor/app/java-standalone-arguments.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 04/16/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Tips for updating your JVM args - Azure Monitor Application Insights for Java diff --git a/articles/azure-monitor/app/java-standalone-config.md b/articles/azure-monitor/app/java-standalone-config.md index 6738864e73f54..2e34338a111df 100644 --- a/articles/azure-monitor/app/java-standalone-config.md +++ b/articles/azure-monitor/app/java-standalone-config.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 11/04/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Configuration options - Azure Monitor Application Insights for Java diff --git a/articles/azure-monitor/app/java-standalone-sampling-overrides.md b/articles/azure-monitor/app/java-standalone-sampling-overrides.md index 7b28e1c3d35d4..7af9f74f7e530 100644 --- a/articles/azure-monitor/app/java-standalone-sampling-overrides.md +++ b/articles/azure-monitor/app/java-standalone-sampling-overrides.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 03/22/2021 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Sampling overrides (preview) - Azure Monitor Application Insights for Java diff --git a/articles/azure-monitor/app/java-standalone-telemetry-processors-examples.md b/articles/azure-monitor/app/java-standalone-telemetry-processors-examples.md index f70f33f387c99..095f2b7d267e4 100644 --- a/articles/azure-monitor/app/java-standalone-telemetry-processors-examples.md +++ b/articles/azure-monitor/app/java-standalone-telemetry-processors-examples.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 12/29/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Telemetry processor examples - Azure Monitor Application Insights for Java diff --git a/articles/azure-monitor/app/java-standalone-telemetry-processors.md b/articles/azure-monitor/app/java-standalone-telemetry-processors.md index ede8c574f8d6e..bffa28bde2e50 100644 --- a/articles/azure-monitor/app/java-standalone-telemetry-processors.md +++ b/articles/azure-monitor/app/java-standalone-telemetry-processors.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 10/29/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Telemetry processors (preview) - Azure Monitor Application Insights for Java diff --git a/articles/azure-monitor/app/java-standalone-troubleshoot.md b/articles/azure-monitor/app/java-standalone-troubleshoot.md index b1d4fe8d8acfa..3e7754676483d 100644 --- a/articles/azure-monitor/app/java-standalone-troubleshoot.md +++ b/articles/azure-monitor/app/java-standalone-troubleshoot.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 11/30/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Troubleshooting guide: Azure Monitor Application Insights for Java diff --git a/articles/azure-monitor/app/java-standalone-upgrade-from-2x.md b/articles/azure-monitor/app/java-standalone-upgrade-from-2x.md index 6d94a9fddbfc3..01126fdb95bfc 100644 --- a/articles/azure-monitor/app/java-standalone-upgrade-from-2x.md +++ b/articles/azure-monitor/app/java-standalone-upgrade-from-2x.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 11/25/2020 ms.devlang: java ms.custom: devx-track-java +ms.reviewer: mmcc --- # Upgrading from Application Insights Java 2.x SDK diff --git a/articles/azure-monitor/app/javascript-angular-plugin.md b/articles/azure-monitor/app/javascript-angular-plugin.md index 431b4910bf820..6e2fed0419480 100644 --- a/articles/azure-monitor/app/javascript-angular-plugin.md +++ b/articles/azure-monitor/app/javascript-angular-plugin.md @@ -7,6 +7,7 @@ ms.tgt_pltfrm: ibiza ms.topic: conceptual ms.date: 10/07/2020 ms.devlang: javascript +ms.reviewer: mmcc --- # Angular plugin for Application Insights JavaScript SDK diff --git a/articles/azure-monitor/app/javascript-click-analytics-plugin.md b/articles/azure-monitor/app/javascript-click-analytics-plugin.md index 3907a91a562ff..e2c2abd3b6453 100644 --- a/articles/azure-monitor/app/javascript-click-analytics-plugin.md +++ b/articles/azure-monitor/app/javascript-click-analytics-plugin.md @@ -7,6 +7,7 @@ ms.tgt_pltfrm: ibiza ms.topic: conceptual ms.date: 01/14/2021 ms.devlang: javascript +ms.reviewer: mmcc --- # Click Analytics Auto-collection plugin for Application Insights JavaScript SDK diff --git a/articles/azure-monitor/app/javascript-react-native-plugin.md b/articles/azure-monitor/app/javascript-react-native-plugin.md index 4bef45cf934ae..c6c1e7f388589 100644 --- a/articles/azure-monitor/app/javascript-react-native-plugin.md +++ b/articles/azure-monitor/app/javascript-react-native-plugin.md @@ -7,6 +7,7 @@ ms.tgt_pltfrm: ibiza ms.topic: conceptual ms.date: 08/06/2020 ms.devlang: javascript +ms.reviewer: mmcc --- # React Native plugin for Application Insights JavaScript SDK diff --git a/articles/azure-monitor/app/javascript-react-plugin.md b/articles/azure-monitor/app/javascript-react-plugin.md index c01615080a798..043ea977328f6 100644 --- a/articles/azure-monitor/app/javascript-react-plugin.md +++ b/articles/azure-monitor/app/javascript-react-plugin.md @@ -7,6 +7,7 @@ ms.tgt_pltfrm: ibiza ms.topic: conceptual ms.date: 07/28/2020 ms.devlang: javascript +ms.reviewer: mmcc --- # React plugin for Application Insights JavaScript SDK diff --git a/articles/azure-monitor/app/javascript-sdk-load-failure.md b/articles/azure-monitor/app/javascript-sdk-load-failure.md index ae01ba4a0fd68..d745efdaeb730 100644 --- a/articles/azure-monitor/app/javascript-sdk-load-failure.md +++ b/articles/azure-monitor/app/javascript-sdk-load-failure.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 06/05/2020 ms.devlang: javascript ms.custom: devx-track-js +ms.reviewer: mmcc --- # Troubleshooting SDK load failure for JavaScript web apps diff --git a/articles/azure-monitor/app/javascript.md b/articles/azure-monitor/app/javascript.md index a8a0de567b4cb..a2f6b390a89d8 100644 --- a/articles/azure-monitor/app/javascript.md +++ b/articles/azure-monitor/app/javascript.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 08/06/2020 ms.devlang: javascript ms.custom: devx-track-js +ms.reviewer: mmcc --- # Application Insights for web pages diff --git a/articles/azure-monitor/app/kubernetes-codeless.md b/articles/azure-monitor/app/kubernetes-codeless.md index 577641959acf8..13e508dd457f8 100644 --- a/articles/azure-monitor/app/kubernetes-codeless.md +++ b/articles/azure-monitor/app/kubernetes-codeless.md @@ -3,6 +3,7 @@ title: Monitor applications on Azure Kubernetes Service (AKS) with Application I description: Azure Monitor seamlessly integrates with your application running on Kubernetes, and allows you to spot the problems with your apps in no time. ms.topic: conceptual ms.date: 05/13/2020 +ms.reviewer: abinetabate --- # Zero instrumentation application monitoring for Kubernetes - Azure Monitor Application Insights diff --git a/articles/azure-monitor/app/legacy-pricing.md b/articles/azure-monitor/app/legacy-pricing.md index 26f2b12950160..4a148d94fdc84 100644 --- a/articles/azure-monitor/app/legacy-pricing.md +++ b/articles/azure-monitor/app/legacy-pricing.md @@ -3,6 +3,7 @@ title: Application Insights legacy enterprise (per node) pricing tier description: Describes the legacy pricing tier for Application Insights. ms.topic: conceptual ms.date: 02/18/2022 +ms.reviewer: dalek --- # Application Insights legacy enterprise (per node) pricing tier diff --git a/articles/azure-monitor/app/live-stream.md b/articles/azure-monitor/app/live-stream.md index 586c9aaf32da7..e423397a64b6f 100644 --- a/articles/azure-monitor/app/live-stream.md +++ b/articles/azure-monitor/app/live-stream.md @@ -2,7 +2,7 @@ title: Diagnose with Live Metrics Stream - Azure Application Insights description: Monitor your web app in real time with custom metrics, and diagnose issues with a live feed of failures, traces, and events. ms.topic: conceptual -ms.date: 10/12/2021 +ms.date: 05/31/2022 ms.reviewer: sdash ms.devlang: csharp --- @@ -16,7 +16,7 @@ Monitor your live, in-production web application by using Live Metrics Stream (a With Live Metrics Stream, you can: -* Validate a fix while it is released, by watching performance and failure counts. +* Validate a fix while it's released, by watching performance and failure counts. * Watch the effect of test loads, and diagnose issues live. * Focus on particular test sessions or filter out known issues, by selecting and filtering the metrics you want to watch. * Get exception traces as they happen. @@ -52,7 +52,7 @@ Live Metrics are currently supported for ASP.NET, ASP.NET Core, Azure Functions, ### Enable LiveMetrics using code for any .NET application -Even though LiveMetrics is enabled by default when onboarding using recommended instructions for .NET Applications, the following shows how to setup Live Metrics +Even though LiveMetrics is enabled by default when onboarding using recommended instructions for .NET Applications, the following shows how to set up Live Metrics manually. 1. Install the NuGet package [Microsoft.ApplicationInsights.PerfCounterCollector](https://www.nuget.org/packages/Microsoft.ApplicationInsights.PerfCounterCollector) @@ -114,7 +114,7 @@ namespace LiveMetricsDemo } ``` -While the above sample is for a console app, the same code can be used in any .NET applications. If any other TelemetryModules are enabled which auto-collects telemetry, it is important to ensure the same configuration used for initializing those modules is used for Live Metrics module as well. +While the above sample is for a console app, the same code can be used in any .NET applications. If any other TelemetryModules are enabled which auto-collects telemetry, it's important to ensure the same configuration used for initializing those modules is used for Live Metrics module as well. ## How does Live Metrics Stream differ from Metrics Explorer and Analytics? @@ -123,7 +123,7 @@ While the above sample is for a console app, the same code can be used in any .N |**Latency**|Data displayed within one second|Aggregated over minutes| |**No retention**|Data persists while it's on the chart, and is then discarded|[Data retained for 90 days](./data-retention-privacy.md#how-long-is-the-data-kept)| |**On demand**|Data is only streamed while the Live Metrics pane is open |Data is sent whenever the SDK is installed and enabled| -|**Free**|There is no charge for Live Stream data|Subject to [pricing](../logs/cost-logs.md#application-insights-billing) +|**Free**|There's no charge for Live Stream data|Subject to [pricing](../logs/cost-logs.md#application-insights-billing) |**Sampling**|All selected metrics and counters are transmitted. Failures and stack traces are sampled. |Events may be [sampled](./api-filtering-sampling.md)| |**Control channel**|Filter control signals are sent to the SDK. We recommend you secure this channel.|Communication is one way, to the portal| @@ -131,7 +131,7 @@ While the above sample is for a console app, the same code can be used in any .N (Available with ASP.NET, ASP.NET Core, and Azure Functions (v2).) -You can monitor custom KPI live by applying arbitrary filters on any Application Insights telemetry from the portal. Click the filter control that shows when you mouse-over any of the charts. The following chart is plotting a custom Request count KPI with filters on URL and Duration attributes. Validate your filters with the Stream Preview section that shows a live feed of telemetry that matches the criteria you have specified at any point in time. +You can monitor custom KPI live by applying arbitrary filters on any Application Insights telemetry from the portal. Select the filter control that shows when you mouse-over any of the charts. The following chart is plotting a custom Request count KPI with filters on URL and Duration attributes. Validate your filters with the Stream Preview section that shows a live feed of telemetry that matches the criteria you've specified at any point in time. ![Filter request rate](./media/live-stream/filter-request.png) @@ -144,11 +144,11 @@ In addition to Application Insights telemetry, you can also monitor any Windows Live metrics are aggregated at two points: locally on each server, and then across all servers. You can change the default at either by selecting other options in the respective drop-downs. ## Sample Telemetry: Custom Live Diagnostic Events -By default, the live feed of events shows samples of failed requests and dependency calls, exceptions, events, and traces. Click the filter icon to see the applied criteria at any point in time. +By default, the live feed of events shows samples of failed requests and dependency calls, exceptions, events, and traces. Select the filter icon to see the applied criteria at any point in time. ![Filter button](./media/live-stream/filter.png) -As with metrics, you can specify any arbitrary criteria to any of the Application Insights telemetry types. In this example, we are selecting specific request failures, and events. +As with metrics, you can specify any arbitrary criteria to any of the Application Insights telemetry types. In this example, we're selecting specific request failures, and events. ![Query Builder](./media/live-stream/query-builder.png) @@ -237,9 +237,9 @@ For Azure Function Apps (v2), securing the channel with an API key can be accomp Create an API key from within your Application Insights resource and go to **Settings > Configuration** for your Function App. Select **New application setting** and enter a name of `APPINSIGHTS_QUICKPULSEAUTHAPIKEY` and a value that corresponds to your API key. -However, if you recognize and trust all the connected servers, you can try the custom filters without the authenticated channel. This option is available for six months. This override is required once every new session, or when a new server comes online. +Securing the control channel is not necessary if you recognize and trust all the connected servers. This option is made available so that you can try custom filters without having to set up an authenticated channel. If you choose this option you will have to authorize the connected servers once every new session or when a new server comes online. We strongly discourage the use of unsecured channels and will disable this option 6 months after you start using it. To use custom filters without a secure channel simply click on any of the filter icons and authorize the connected servers. The “Authorize connected servers” dialog displays the date (highlighted below) after which this option will be disabled. -![Live Metrics Auth options](./media/live-stream/live-stream-auth.png) +:::image type="content" source="media/live-stream/live-stream-auth.png" alt-text="Screenshot displaying the authorize connected servers dialog." lightbox="media/live-stream/live-stream-auth.png"::: > [!NOTE] > We strongly recommend that you set up the authenticated channel before entering potentially sensitive information like CustomerID in the filter criteria. diff --git a/articles/azure-monitor/app/media/azure-web-apps-net-core/auto-instrumentation-status.png b/articles/azure-monitor/app/media/azure-web-apps-net-core/auto-instrumentation-status.png new file mode 100644 index 0000000000000..6faa3856af38b Binary files /dev/null and b/articles/azure-monitor/app/media/azure-web-apps-net-core/auto-instrumentation-status.png differ diff --git a/articles/azure-monitor/app/media/live-stream/live-stream-auth.png b/articles/azure-monitor/app/media/live-stream/live-stream-auth.png index afe6bedcb7179..2b54a2e52f743 100644 Binary files a/articles/azure-monitor/app/media/live-stream/live-stream-auth.png and b/articles/azure-monitor/app/media/live-stream/live-stream-auth.png differ diff --git a/articles/azure-monitor/app/media/profiler-aspnetcore-linux/view-traces.PNG b/articles/azure-monitor/app/media/profiler-aspnetcore-linux/view-traces.PNG deleted file mode 100644 index 78d4e8e265bcd..0000000000000 Binary files a/articles/azure-monitor/app/media/profiler-aspnetcore-linux/view-traces.PNG and /dev/null differ diff --git a/articles/azure-monitor/app/media/profiler-troubleshooting/profiler-search-telemetry.png b/articles/azure-monitor/app/media/profiler-troubleshooting/profiler-search-telemetry.png deleted file mode 100644 index a8fe600f2a474..0000000000000 Binary files a/articles/azure-monitor/app/media/profiler-troubleshooting/profiler-search-telemetry.png and /dev/null differ diff --git a/articles/azure-monitor/app/media/profiler/AppInsights-AppServices.png b/articles/azure-monitor/app/media/profiler/AppInsights-AppServices.png deleted file mode 100644 index 85412e947cf7e..0000000000000 Binary files a/articles/azure-monitor/app/media/profiler/AppInsights-AppServices.png and /dev/null differ diff --git a/articles/azure-monitor/app/media/profiler/Enablement_UI.png b/articles/azure-monitor/app/media/profiler/Enablement_UI.png deleted file mode 100644 index b0251798e3757..0000000000000 Binary files a/articles/azure-monitor/app/media/profiler/Enablement_UI.png and /dev/null differ diff --git a/articles/azure-monitor/app/media/profiler/disable-profiler-webjob.png b/articles/azure-monitor/app/media/profiler/disable-profiler-webjob.png deleted file mode 100644 index 2744185f45b4d..0000000000000 Binary files a/articles/azure-monitor/app/media/profiler/disable-profiler-webjob.png and /dev/null differ diff --git a/articles/azure-monitor/app/media/profiler/profiler-app-setting.png b/articles/azure-monitor/app/media/profiler/profiler-app-setting.png deleted file mode 100644 index 523272154c365..0000000000000 Binary files a/articles/azure-monitor/app/media/profiler/profiler-app-setting.png and /dev/null differ diff --git a/articles/azure-monitor/app/migrate-from-instrumentation-keys-to-connection-strings.md b/articles/azure-monitor/app/migrate-from-instrumentation-keys-to-connection-strings.md index 02591409b47b7..a040d5010118d 100644 --- a/articles/azure-monitor/app/migrate-from-instrumentation-keys-to-connection-strings.md +++ b/articles/azure-monitor/app/migrate-from-instrumentation-keys-to-connection-strings.md @@ -3,6 +3,7 @@ title: Migrate from Application Insights instrumentation keys to connection stri description: Learn the steps required to upgrade from Azure Monitor Application Insights instrumentation keys to connection strings ms.topic: conceptual ms.date: 02/14/2022 +ms.reviewer: cogoodson --- # Migrate from Application Insights instrumentation keys to connection strings diff --git a/articles/azure-monitor/app/monitor-functions.md b/articles/azure-monitor/app/monitor-functions.md index 95e951c613a72..94c67293ebc44 100644 --- a/articles/azure-monitor/app/monitor-functions.md +++ b/articles/azure-monitor/app/monitor-functions.md @@ -3,6 +3,7 @@ title: Monitor applications running on Azure Functions with Application Insights description: Azure Monitor seamlessly integrates with your application running on Azure Functions, and allows you to monitor the performance and spot the problems with your apps in no time. ms.topic: conceptual ms.date: 08/27/2021 +ms.reviewer: abinetabate --- # Monitoring Azure Functions with Azure Monitor Application Insights diff --git a/articles/azure-monitor/app/nodejs.md b/articles/azure-monitor/app/nodejs.md index 01b8dd4df2859..c7ffd0946cce5 100644 --- a/articles/azure-monitor/app/nodejs.md +++ b/articles/azure-monitor/app/nodejs.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 10/12/2021 ms.devlang: javascript ms.custom: devx-track-js +ms.reviewer: mmcc --- # Monitor your Node.js services and apps with Application Insights diff --git a/articles/azure-monitor/app/opencensus-python-request.md b/articles/azure-monitor/app/opencensus-python-request.md index b74d796eeb6f6..d22b54ae3fc60 100644 --- a/articles/azure-monitor/app/opencensus-python-request.md +++ b/articles/azure-monitor/app/opencensus-python-request.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 10/15/2019 ms.devlang: python ms.custom: devx-track-python +ms.reviewer: mmcc --- # Track incoming requests with OpenCensus Python diff --git a/articles/azure-monitor/app/opencensus-python.md b/articles/azure-monitor/app/opencensus-python.md index a1d5202005ea5..e482556b0c294 100644 --- a/articles/azure-monitor/app/opencensus-python.md +++ b/articles/azure-monitor/app/opencensus-python.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 10/12/2021 ms.devlang: python ms.custom: devx-track-python +ms.reviewer: mmcc --- # Set up Azure Monitor for your Python application @@ -348,7 +349,7 @@ OpenCensus.stats supports 4 aggregation methods but provides partial support for main() ``` -1. The exporter sends metric data to Azure Monitor at a fixed interval. The default is every 15 seconds. We're tracking a single metric, so this metric data, with whatever value and time stamp it contains, is sent every interval. The value is cumulative, can only increase and resets to 0 on restart. You can find the data under `customMetrics`, but `customMetrics` properties valueCount, valueSum, valueMin, valueMax, and valueStdDev are not effectively used. +1. The exporter sends metric data to Azure Monitor at a fixed interval. The default is every 15 seconds. To modify the export interval, pass in `export_interval` as a parameter in seconds to `new_metrics_exporter()`. We're tracking a single metric, so this metric data, with whatever value and time stamp it contains, is sent every interval. The value is cumulative, can only increase and resets to 0 on restart. You can find the data under `customMetrics`, but `customMetrics` properties valueCount, valueSum, valueMin, valueMax, and valueStdDev are not effectively used. ### Setting custom dimensions in metrics diff --git a/articles/azure-monitor/app/opentelemetry-enable.md b/articles/azure-monitor/app/opentelemetry-enable.md index 333e0994388f8..15914616710f4 100644 --- a/articles/azure-monitor/app/opentelemetry-enable.md +++ b/articles/azure-monitor/app/opentelemetry-enable.md @@ -4,11 +4,14 @@ description: This article provides guidance on how to enable Azure Monitor on ap ms.topic: conceptual ms.date: 10/11/2021 ms.devlang: csharp, javascript, python +ms.reviewer: mmcc --- # Enable Azure Monitor OpenTelemetry Exporter for .NET, Node.js, and Python applications (preview) -This article describes how to enable and configure the OpenTelemetry-based Azure Monitor Preview offering. After you finish the instructions in this article, you'll be able to send OpenTelemetry traces to Azure Monitor Application Insights. To learn more about OpenTelemetry, see the [OpenTelemetry overview](opentelemetry-overview.md) or [OpenTelemetry FAQ](/azure/azure-monitor/faq#opentelemetry). +The Azure Monitor OpenTelemetry Exporter is a component that sends traces (and eventually all application telemetry) to Azure Monitor Application Insights. To learn more about OpenTelemetry concepts, see the [OpenTelemetry overview](opentelemetry-overview.md) or [OpenTelemetry FAQ](/azure/azure-monitor/faq#opentelemetry). + +This article describes how to enable and configure the OpenTelemetry-based Azure Monitor Preview offering. After you finish the instructions in this article, you'll be able to send OpenTelemetry traces to Azure Monitor Application Insights. > [!IMPORTANT] > Azure Monitor OpenTelemetry Exporter for .NET, Node.js, and Python applications is currently in preview. diff --git a/articles/azure-monitor/app/opentelemetry-overview.md b/articles/azure-monitor/app/opentelemetry-overview.md index d01dec20efe85..b9d7ea42da056 100644 --- a/articles/azure-monitor/app/opentelemetry-overview.md +++ b/articles/azure-monitor/app/opentelemetry-overview.md @@ -3,6 +3,7 @@ title: OpenTelemetry with Azure Monitor overview description: Provides an overview of how to use OpenTelemetry with Azure Monitor. ms.topic: conceptual ms.date: 10/11/2021 +ms.reviewer: mmcc --- # OpenTelemetry overview @@ -20,7 +21,7 @@ Telemetry, the data collected to observe your application, can be broken into th Initially the OpenTelemetry community took on Distributed Tracing. Metrics and Logs are still in progress. A complete observability story includes all three pillars, but currently our [Azure Monitor OpenTelemetry-based exporter **preview** offerings for .NET, Python, and JavaScript](opentelemetry-enable.md) **only include Distributed Tracing**. -There are several sources that explain the three pillars in detail including the [OpenTelemetry community website](https://opentelemetry.io/docs/concepts/data-sources/), [OpenTelemetry Specifications](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md), and [Distributed Systems Observability](https://www.oreilly.com/library/view/distributed-systems-observability/9781492033431/ch04.html) by Cindy Sridharan. +There are several sources that explain the three pillars in detail including the [OpenTelemetry community website](https://opentelemetry.io/docs/concepts/data-collection/), [OpenTelemetry Specifications](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md), and [Distributed Systems Observability](https://www.oreilly.com/library/view/distributed-systems-observability/9781492033431/ch04.html) by Cindy Sridharan. In the following sections, we'll cover some telemetry collection basics. diff --git a/articles/azure-monitor/app/performance-counters.md b/articles/azure-monitor/app/performance-counters.md index 18ebe41acb867..022437855f5dc 100644 --- a/articles/azure-monitor/app/performance-counters.md +++ b/articles/azure-monitor/app/performance-counters.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 12/13/2018 ms.devlang: csharp ms.custom: devx-track-csharp +ms.reviewer: rijolly --- # System performance counters in Application Insights diff --git a/articles/azure-monitor/app/platforms.md b/articles/azure-monitor/app/platforms.md index 34a988de95f30..64b7ffb96c2ed 100644 --- a/articles/azure-monitor/app/platforms.md +++ b/articles/azure-monitor/app/platforms.md @@ -3,7 +3,7 @@ title: 'Application Insights: languages, platforms, and integrations | Microsoft description: Languages, platforms, and integrations available for Application Insights ms.topic: conceptual ms.date: 10/29/2021 -ms.reviewer: olegan +ms.reviewer: mmcc --- # Supported languages diff --git a/articles/azure-monitor/app/powershell-azure-diagnostics.md b/articles/azure-monitor/app/powershell-azure-diagnostics.md index 5a60a708634fa..15d4272d105cc 100644 --- a/articles/azure-monitor/app/powershell-azure-diagnostics.md +++ b/articles/azure-monitor/app/powershell-azure-diagnostics.md @@ -4,6 +4,7 @@ description: Automate configuring Azure Diagnostics to pipe data to Application ms.topic: conceptual ms.date: 08/06/2019 ms.custom: devx-track-azurepowershell +ms.reviwer: cogoodson --- # Using PowerShell to set up Application Insights for Azure Cloud Services diff --git a/articles/azure-monitor/app/powershell.md b/articles/azure-monitor/app/powershell.md index 10fcc4a82045f..d5d38342d6a8b 100644 --- a/articles/azure-monitor/app/powershell.md +++ b/articles/azure-monitor/app/powershell.md @@ -4,6 +4,7 @@ description: Automate creating and managing resources, alerts, and availability ms.topic: conceptual ms.date: 05/02/2020 ms.custom: devx-track-azurepowershell +ms.reviewer: vitalyg --- # Manage Application Insights resources using PowerShell @@ -157,7 +158,8 @@ Create a new .json file - let's call it `template1.json` in this example. Copy t "tags": {}, "properties": { "ApplicationId": "[parameters('appName')]", - "retentionInDays": "[parameters('retentionInDays')]" + "retentionInDays": "[parameters('retentionInDays')]", + "ImmediatePurgeDataOn30Days": "[parameters('ImmediatePurgeDataOn30Days')]" }, "dependsOn": [] }, diff --git a/articles/azure-monitor/app/pre-aggregated-metrics-log-metrics.md b/articles/azure-monitor/app/pre-aggregated-metrics-log-metrics.md index 70c3609a437d3..f9f651a2ea508 100644 --- a/articles/azure-monitor/app/pre-aggregated-metrics-log-metrics.md +++ b/articles/azure-monitor/app/pre-aggregated-metrics-log-metrics.md @@ -3,6 +3,7 @@ title: Log-based and pre-aggregated metrics in Azure Application Insights | Micr description: Why to use log-based versus pre-aggregated metrics in Azure Application Insights ms.topic: conceptual ms.date: 09/18/2018 +ms.reviewer: vitalyg --- # Log-based and pre-aggregated metrics in Application Insights diff --git a/articles/azure-monitor/app/proactive-application-security-detection-pack.md b/articles/azure-monitor/app/proactive-application-security-detection-pack.md index 8b8c61a8130f5..f9adc2587f67a 100644 --- a/articles/azure-monitor/app/proactive-application-security-detection-pack.md +++ b/articles/azure-monitor/app/proactive-application-security-detection-pack.md @@ -3,6 +3,7 @@ title: Security detection Pack with Azure Application Insights description: Monitor application with Azure Application Insights and smart detection for potential security issues. ms.topic: conceptual ms.date: 12/12/2017 +ms.reviewer: yagil --- # Application security detection pack (preview) diff --git a/articles/azure-monitor/app/proactive-arm-config.md b/articles/azure-monitor/app/proactive-arm-config.md index 70fba69d8de66..6b7b306c3d5b2 100644 --- a/articles/azure-monitor/app/proactive-arm-config.md +++ b/articles/azure-monitor/app/proactive-arm-config.md @@ -3,6 +3,7 @@ title: Smart detection rule settings - Azure Application Insights description: Automate management and configuration of Azure Application Insights smart detection rules with Azure Resource Manager Templates ms.topic: conceptual ms.date: 02/14/2021 +ms.reviewer: yagil --- # Manage Application Insights smart detection rules using Azure Resource Manager templates diff --git a/articles/azure-monitor/app/proactive-cloud-services.md b/articles/azure-monitor/app/proactive-cloud-services.md index 68fa4f97150e6..bd2d0d135b9e4 100644 --- a/articles/azure-monitor/app/proactive-cloud-services.md +++ b/articles/azure-monitor/app/proactive-cloud-services.md @@ -3,7 +3,7 @@ title: Alert on issues in Azure Cloud Services using the Azure Diagnostics integ description: Monitor for issues like startup failures, crashes, and role recycle loops in Azure Cloud Services with Azure Application Insights ms.topic: conceptual ms.date: 06/07/2018 -ms.reviewer: harelbr +ms.reviewer: yagil --- # Alert on issues in Azure Cloud Services using the Azure diagnostics integration with Azure Application Insights diff --git a/articles/azure-monitor/app/proactive-diagnostics.md b/articles/azure-monitor/app/proactive-diagnostics.md index 4c083a9ef2500..d60f04cc80b93 100644 --- a/articles/azure-monitor/app/proactive-diagnostics.md +++ b/articles/azure-monitor/app/proactive-diagnostics.md @@ -3,6 +3,7 @@ title: Smart detection in Azure Application Insights | Microsoft Docs description: Application Insights performs automatic deep analysis of your app telemetry and warns you of potential problems. ms.topic: conceptual ms.date: 02/07/2019 +ms.reviewer: yagil --- # Smart detection in Application Insights diff --git a/articles/azure-monitor/app/proactive-email-notification.md b/articles/azure-monitor/app/proactive-email-notification.md index 1dbe9e40bfe86..786ff246ca99c 100644 --- a/articles/azure-monitor/app/proactive-email-notification.md +++ b/articles/azure-monitor/app/proactive-email-notification.md @@ -3,6 +3,7 @@ title: Smart Detection notification change - Azure Application Insights description: Change to the default notification recipients from Smart Detection. Smart Detection lets you monitor application traces with Azure Application Insights for unusual patterns in trace telemetry. ms.topic: conceptual ms.date: 02/14/2021 +ms.reviewer: yagil --- # Smart Detection e-mail notification change diff --git a/articles/azure-monitor/app/proactive-exception-volume.md b/articles/azure-monitor/app/proactive-exception-volume.md index 0167f4187b13f..62038fd4f3562 100644 --- a/articles/azure-monitor/app/proactive-exception-volume.md +++ b/articles/azure-monitor/app/proactive-exception-volume.md @@ -3,6 +3,7 @@ title: Abnormal rise in exception volume - Azure Application Insights description: Monitor application exceptions with smart detection in Azure Application Insights for unusual patterns in exception volume. ms.topic: conceptual ms.date: 12/08/2017 +ms.reviewer: yagil --- # Abnormal rise in exception volume (preview) diff --git a/articles/azure-monitor/app/proactive-failure-diagnostics.md b/articles/azure-monitor/app/proactive-failure-diagnostics.md index 1633bf61f4176..060d04528bf2b 100644 --- a/articles/azure-monitor/app/proactive-failure-diagnostics.md +++ b/articles/azure-monitor/app/proactive-failure-diagnostics.md @@ -1,5 +1,5 @@ --- -title: Smart Detection - failure anomalies, in Application Insights | Microsoft Docs +title: Smart Detection of Failure Anomalies in Application Insights | Microsoft Docs description: Alerts you to unusual changes in the rate of failed requests to your web app, and provides diagnostic analysis. No configuration is needed. ms.topic: conceptual ms.date: 12/18/2018 @@ -11,7 +11,7 @@ ms.reviewer: yalavi This feature works for any web app, hosted in the cloud or on your own servers, that generate application request or dependency data. For example, if you have a worker role that calls [TrackRequest()](./api-custom-events-metrics.md#trackrequest) or [TrackDependency()](./api-custom-events-metrics.md#trackdependency). -After setting up [Application Insights for your project](./app-insights-overview.md), and if your app generates a certain minimum amount of data, Smart Detection of failure anomalies takes 24 hours to learn the normal behavior of your app, before it is switched on and can send alerts. +After setting up [Application Insights for your project](./app-insights-overview.md), and if your app generates a certain minimum amount of data, Smart Detection of Failure Anomalies takes 24 hours to learn the normal behavior of your app, before it is switched on and can send alerts. Here's a sample alert: @@ -74,7 +74,15 @@ Click the alert to configure it. :::image type="content" source="./media/proactive-failure-diagnostics/032.png" alt-text="Rule configuration screen." lightbox="./media/proactive-failure-diagnostics/032.png"::: -Notice that you can disable or delete a Failure Anomalies alert rule, but you can't create another one on the same Application Insights resource. +## Delete alerts + +You can disable or delete a Failure Anomalies alert rule, but once deleted you can't create another one for the same Application Insights resource. + +Notice that if you delete an Application Insights resource, the associated Failure Anomalies alert rule doesn't get deleted automatically. You can do so manually on the Alert rules page or with the following Azure CLI command: + +```azurecli +az resource delete --ids +``` ## Example of Failure Anomalies alert webhook payload @@ -403,11 +411,11 @@ Click **Alerts** in the Application Insights resource page to get to the most re :::image type="content" source="./media/proactive-failure-diagnostics/070.png" alt-text="Alerts summary." lightbox="./media/proactive-failure-diagnostics/070.png"::: ## What's the difference ... -Smart Detection of failure anomalies complements other similar but distinct features of Application Insights. +Smart Detection of Failure Anomalies complements other similar but distinct features of Application Insights. -* [metric alerts](../alerts/alerts-log.md) are set by you and can monitor a wide range of metrics such as CPU occupancy, request rates, page load times, and so on. You can use them to warn you, for example, if you need to add more resources. By contrast, Smart Detection of failure anomalies covers a small range of critical metrics (currently only failed request rate), designed to notify you in near real-time manner once your web app's failed request rate increases compared to web app's normal behavior. Unlike metric alerts, Smart Detection automatically sets and updates thresholds in response changes in the behavior. Smart Detection also starts the diagnostic work for you, saving you time in resolving issues. +* [metric alerts](../alerts/alerts-log.md) are set by you and can monitor a wide range of metrics such as CPU occupancy, request rates, page load times, and so on. You can use them to warn you, for example, if you need to add more resources. By contrast, Smart Detection of Failure Anomalies covers a small range of critical metrics (currently only failed request rate), designed to notify you in near real-time manner once your web app's failed request rate increases compared to web app's normal behavior. Unlike metric alerts, Smart Detection automatically sets and updates thresholds in response changes in the behavior. Smart Detection also starts the diagnostic work for you, saving you time in resolving issues. -* [Smart Detection of performance anomalies](proactive-performance-diagnostics.md) also uses machine intelligence to discover unusual patterns in your metrics, and no configuration by you is required. But unlike Smart Detection of failure anomalies, the purpose of Smart Detection of performance anomalies is to find segments of your usage manifold that might be badly served - for example, by specific pages on a specific type of browser. The analysis is performed daily, and if any result is found, it's likely to be much less urgent than an alert. By contrast, the analysis for failure anomalies is performed continuously on incoming application data, and you will be notified within minutes if server failure rates are greater than expected. +* [Smart Detection of performance anomalies](proactive-performance-diagnostics.md) also uses machine intelligence to discover unusual patterns in your metrics, and no configuration by you is required. But unlike Smart Detection of Failure Anomalies, the purpose of Smart Detection of performance anomalies is to find segments of your usage manifold that might be badly served - for example, by specific pages on a specific type of browser. The analysis is performed daily, and if any result is found, it's likely to be much less urgent than an alert. By contrast, the analysis for Failure Anomalies is performed continuously on incoming application data, and you will be notified within minutes if server failure rates are greater than expected. ## If you receive a Smart Detection alert *Why have I received this alert?* @@ -448,4 +456,4 @@ These diagnostic tools help you inspect the data from your app: Smart detections are automatic. But maybe you'd like to set up some more alerts? * [Manually configured metric alerts](../alerts/alerts-log.md) -* [Availability web tests](./monitor-web-app-availability.md) \ No newline at end of file +* [Availability web tests](./monitor-web-app-availability.md) diff --git a/articles/azure-monitor/app/proactive-performance-diagnostics.md b/articles/azure-monitor/app/proactive-performance-diagnostics.md index 5c5729bb1bcbf..20a0fd8d961c2 100644 --- a/articles/azure-monitor/app/proactive-performance-diagnostics.md +++ b/articles/azure-monitor/app/proactive-performance-diagnostics.md @@ -8,7 +8,7 @@ ms.date: 05/04/2017 # Smart detection - Performance Anomalies >[!NOTE] ->You can migrate your Application Insight resources to alerts-bases smart detection (preview). The migration creates alert rules for the different smart detection modules. Once created, you can manage and configure these rules just like any other Azure Monitor alert rules. You can also configure action groups for these rules, thus enabling multiple methods of taking actions or triggering notification on new detections. +>You can migrate your Application Insight resources to alerts-based smart detection (preview). The migration creates alert rules for the different smart detection modules. Once created, you can manage and configure these rules just like any other Azure Monitor alert rules. You can also configure action groups for these rules, thus enabling multiple methods of taking actions or triggering notification on new detections. > > For more information on the migration process, see [Smart Detection Alerts migration](../alerts/alerts-smart-detections-migration.md). diff --git a/articles/azure-monitor/app/proactive-potential-memory-leak.md b/articles/azure-monitor/app/proactive-potential-memory-leak.md index 933a52a076a80..089ab562ab328 100644 --- a/articles/azure-monitor/app/proactive-potential-memory-leak.md +++ b/articles/azure-monitor/app/proactive-potential-memory-leak.md @@ -3,6 +3,7 @@ title: Detect memory leak - Azure Application Insights smart detection description: Monitor applications with Azure Application Insights for potential memory leaks. ms.topic: conceptual ms.date: 12/12/2017 +ms.reviewer: yagil --- # Memory leak detection (preview) diff --git a/articles/azure-monitor/app/proactive-trace-severity.md b/articles/azure-monitor/app/proactive-trace-severity.md index 4e2ee74feb37d..6bce7c1181eb7 100644 --- a/articles/azure-monitor/app/proactive-trace-severity.md +++ b/articles/azure-monitor/app/proactive-trace-severity.md @@ -3,6 +3,7 @@ title: Degradation in trace severity ratio - Azure Application Insights description: Monitor application traces with Azure Application Insights for unusual patterns in trace telemetry with smart detection. ms.topic: conceptual ms.date: 11/27/2017 +ms.reviewer: yagil --- # Degradation in trace severity ratio (preview) diff --git a/articles/azure-monitor/app/profiler.md b/articles/azure-monitor/app/profiler.md deleted file mode 100644 index 66c12af6fb982..0000000000000 --- a/articles/azure-monitor/app/profiler.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Profile live Azure App Service apps with Application Insights | Microsoft Docs -description: Profile live apps on Azure App Service with Application Insights Profiler. -ms.topic: conceptual -ms.date: 08/06/2018 ---- - -# Profile live Azure App Service apps with Application Insights - -You can run Profiler on ASP.NET and ASP.NET Core apps that are running on Azure App Service using Basic service tier or higher. Enabling Profiler on Linux is currently only possible via [this method](profiler-aspnetcore-linux.md). - -## Enable Profiler for your app -To enable Profiler for an app, follow the instructions below. If you're running a different type of Azure service, here are instructions for enabling Profiler on other supported platforms: -* [Cloud Services](./profiler-cloudservice.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -* [Service Fabric Applications](./profiler-servicefabric.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -* [Virtual Machines](./profiler-vm.md?toc=%2fazure%2fazure-monitor%2ftoc.json) - -Application Insights Profiler is pre-installed as part of the App Services runtime. The steps below will show you how to enable it for your App Service. Follow these steps even if you've included the App Insights SDK in your application at build time. - -> [!NOTE] -> Codeless installation of Application Insights Profiler follows the .NET Core support policy. -> For more information about supported runtimes, see [.NET Core Support Policy](https://dotnet.microsoft.com/platform/support/policy/dotnet-core). - -1. Navigate to the Azure control panel for your App Service. -1. Enable "Always On" setting for your app service. You can find this setting under **Settings**, **Configuration** page (see screenshot in the next step), and select the **General settings** tab. -1. Navigate to **Settings > Application Insights** page. - - ![Enable App Insights on App Services portal](./media/profiler/AppInsights-AppServices.png) - -1. Either follow the instructions on the pane to create a new resource or select an existing App Insights resource to monitor your app. Also make sure the Profiler is **On**. If your Application Insights resource is in a different subscription from your App Service, you can't use this page to configure Application Insights. You can still do it manually though by creating the necessary app settings manually. [The next section contains instructions for manually enabling Profiler.](#enable-profiler-manually-or-with-azure-resource-manager) - - ![Add App Insights site extension][Enablement UI] - -1. Profiler is now enabled using an App Services App Setting. - - ![App Setting for Profiler][profiler-app-setting] - -## Enable Profiler manually or with Azure Resource Manager -Application Insights Profiler can be enabled by creating app settings for your Azure App Service. The page with the options shown above creates these app settings for you. But you can automate the creation of these settings using a template or other means. These settings will also work if your Application Insights resource is in a different subscription from your Azure App Service. -Here are the settings needed to enable the profiler: - -|App Setting | Value | -|---------------|----------| -|APPINSIGHTS_INSTRUMENTATIONKEY | iKey for your Application Insights resource | -|APPINSIGHTS_PROFILERFEATURE_VERSION | 1.0.0 | -|DiagnosticServices_EXTENSION_VERSION | ~3 | - - -You can set these values using [Azure Resource Manager Templates](./azure-web-apps-net-core.md#app-service-application-settings-with-azure-resource-manager), [Azure PowerShell](/powershell/module/az.websites/set-azwebapp), [Azure CLI](/cli/azure/webapp/config/appsettings). - -## Enable Profiler for other clouds - -Currently the only regions that require endpoint modifications are [Azure Government](../../azure-government/compare-azure-government-global-azure.md#application-insights) and [Azure China](/azure/china/resources-developer-guide). - -|App Setting | US Government Cloud | China Cloud | -|---------------|---------------------|-------------| -|ApplicationInsightsProfilerEndpoint | `https://profiler.monitor.azure.us` | `https://profiler.monitor.azure.cn` | -|ApplicationInsightsEndpoint | `https://dc.applicationinsights.us` | `https://dc.applicationinsights.azure.cn` | - -## Enable Azure Active Directory authentication for profile ingestion - -Application Insights Profiler supports Azure AD authentication for profiles ingestion. This means, for all profiles of your application to be ingested, your application must be authenticated and provide the required application settings to the Profiler agent. - -As of today, Profiler only supports Azure AD authentication when you reference and configure Azure AD using the Application Insights SDK in your application. - -Below you can find all the steps required to enable Azure AD for profiles ingestion: -1. Create and add the managed identity you want to use to authenticate against your Application Insights resource to your App Service. - - a. For System-Assigned Managed identity, see the following [documentation](../../app-service/overview-managed-identity.md?tabs=portal%2chttp#add-a-system-assigned-identity) - - b. For User-Assigned Managed identity, see the following [documentation](../../app-service/overview-managed-identity.md?tabs=portal%2chttp#add-a-user-assigned-identity) - -2. Configure and enable Azure AD in your Application Insights resource. For more information, see the following [documentation](./azure-ad-authentication.md?tabs=net#configuring-and-enabling-azure-ad-based-authentication) -3. Add the following application setting, used to let Profiler agent know which managed identity to use: - -For System-Assigned Identity: - -|App Setting | Value | -|---------------|----------| -|APPLICATIONINSIGHTS_AUTHENTICATION_STRING | Authorization=AAD | - -For User-Assigned Identity: - -|App Setting | Value | -|---------------|----------| -|APPLICATIONINSIGHTS_AUTHENTICATION_STRING | Authorization=AAD;ClientId={Client id of the User-Assigned Identity} | - -## Disable Profiler - -To stop or restart Profiler for an individual app's instance, on the left sidebar, select **WebJobs** and stop the webjob named `ApplicationInsightsProfiler3`. - - ![Disable Profiler for a web job][disable-profiler-webjob] - -We recommend that you have Profiler enabled on all your apps to discover any performance issues as early as possible. - -Profiler's files can be deleted when using WebDeploy to deploy changes to your web application. You can prevent the deletion by excluding the App_Data folder from being deleted during deployment. - - -## Next steps - -* [Working with Application Insights in Visual Studio](./visual-studio.md) - -[Enablement UI]: ./media/profiler/Enablement_UI.png -[profiler-app-setting]:./media/profiler/profiler-app-setting.png -[disable-profiler-webjob]: ./media/profiler/disable-profiler-webjob.png \ No newline at end of file diff --git a/articles/azure-monitor/app/remove-application-insights.md b/articles/azure-monitor/app/remove-application-insights.md index cbed4ae6440a5..0c10de47e1ce7 100644 --- a/articles/azure-monitor/app/remove-application-insights.md +++ b/articles/azure-monitor/app/remove-application-insights.md @@ -3,6 +3,7 @@ title: Remove Application Insights in Visual Studio - Azure Monitor description: How to remove Application Insights SDK for ASP.NET and ASP.NET Core in Visual Studio. ms.topic: conceptual ms.date: 04/06/2020 +ms.reviewer: cithomas --- # How to remove Application Insights in Visual Studio diff --git a/articles/azure-monitor/app/resource-manager-app-resource.md b/articles/azure-monitor/app/resource-manager-app-resource.md index cc9b9d30804ae..f0db356a63b7c 100644 --- a/articles/azure-monitor/app/resource-manager-app-resource.md +++ b/articles/azure-monitor/app/resource-manager-app-resource.md @@ -4,6 +4,7 @@ description: Sample Azure Resource Manager templates to deploy Application Insig ms.topic: sample ms.date: 04/27/2022 ms.custom: ignite-fall-2021 +ms.reviewer: vitalyg --- # Resource Manager template samples for creating Application Insights resources diff --git a/articles/azure-monitor/app/resource-manager-web-app.md b/articles/azure-monitor/app/resource-manager-web-app.md index d4de23fb0ac29..f9b78fc5b03d3 100644 --- a/articles/azure-monitor/app/resource-manager-web-app.md +++ b/articles/azure-monitor/app/resource-manager-web-app.md @@ -4,6 +4,7 @@ description: Sample Azure Resource Manager templates to deploy an Azure App Serv ms.topic: sample ms.custom: devx-track-dotnet ms.date: 04/27/2022 +ms.reviewer: vitalyg --- # Resource Manager template samples for creating Azure App Services web apps with Application Insights monitoring diff --git a/articles/azure-monitor/app/resources-roles-access-control.md b/articles/azure-monitor/app/resources-roles-access-control.md index aa8d6e3f19222..1ecc289033346 100644 --- a/articles/azure-monitor/app/resources-roles-access-control.md +++ b/articles/azure-monitor/app/resources-roles-access-control.md @@ -4,6 +4,7 @@ description: Owners, contributors and readers of your organization's insights. ms.topic: conceptual ms.date: 02/14/2019 ms.custom: devx-track-azurepowershell +ms.reviewer: jogrima --- # Resources, roles, and access control in Application Insights diff --git a/articles/azure-monitor/app/sampling.md b/articles/azure-monitor/app/sampling.md index c16843621236c..70146a1058255 100644 --- a/articles/azure-monitor/app/sampling.md +++ b/articles/azure-monitor/app/sampling.md @@ -3,8 +3,8 @@ title: Telemetry sampling in Azure Application Insights | Microsoft Docs description: How to keep the volume of telemetry under control. ms.topic: conceptual ms.date: 08/26/2021 -ms.reviewer: vitalyg ms.custom: fasttrack-edit +ms.reviewer: mmcc --- # Sampling in Application Insights @@ -104,11 +104,11 @@ In [`ApplicationInsights.config`](./configuration-with-applicationinsights-confi The amount of telemetry to sample when the app has just started. Don't reduce this value while you're debugging. -* `Trace;Exception` +* `type;type` A semi-colon delimited list of types that you do not want to be subject to sampling. Recognized types are: `Dependency`, `Event`, `Exception`, `PageView`, `Request`, `Trace`. All telemetry of the specified types is transmitted; the types that are not specified will be sampled. -* `Request;Dependency` +* `type;type` A semi-colon delimited list of types that you do want to subject to sampling. Recognized types are: `Dependency`, `Event`, `Exception`, `PageView`, `Request`, `Trace`. The specified types will be sampled; all telemetry of the other types will always be transmitted. diff --git a/articles/azure-monitor/app/sdk-connection-string.md b/articles/azure-monitor/app/sdk-connection-string.md index d3d4de110a305..7eb46548c6198 100644 --- a/articles/azure-monitor/app/sdk-connection-string.md +++ b/articles/azure-monitor/app/sdk-connection-string.md @@ -4,6 +4,7 @@ description: How to use connection strings. ms.topic: conceptual ms.date: 04/13/2022 ms.custom: "devx-track-js, devx-track-csharp" +ms.reviewer: cogoodson --- # Connection strings diff --git a/articles/azure-monitor/app/sdk-support-guidance.md b/articles/azure-monitor/app/sdk-support-guidance.md new file mode 100644 index 0000000000000..12b48ae5f6042 --- /dev/null +++ b/articles/azure-monitor/app/sdk-support-guidance.md @@ -0,0 +1,36 @@ +--- +title: Application Insights SDK support guidance +description: Support guidance for Application Insights legacy and preview SDKs +services: azure-monitor +ms.topic: conceptual +ms.date: 03/24/2022 +ms.reviewer: vgorbenko +--- + +# Application Insights SDK support guidance + +Microsoft announces feature deprecations or breaking changes at least three years in advance and strives to provide a seamless process for migration to the replacement experience. + +The [Microsoft Azure SDK lifecycle policy](https://docs.microsoft.com/lifecycle/faq/azure) is followed when features are enhanced in a new SDK or before an SDK is designated as legacy. Microsoft strives to retain legacy SDK functionality, but newer features may not be available with older versions. + +> [!NOTE] +> Diagnostic tools often provide better insight into the root cause of a problem when the latest stable SDK version is used. + +Support engineers are expected to provide SDK update guidance according to the following table, referencing the current SDK version in use and any alternatives. + +|Current SDK version in use |Alternative version available |Update policy for support | +|---------|---------|---------| +|Stable and less than one year old | Newer supported stable version | **UPDATE RECOMMENDED** | +|Stable and more than one year old | Newer supported stable version | **UPDATE REQUIRED** | +|Unsupported ([support policy](https://docs.microsoft.com/lifecycle/faq/azure)) | Any supported version | **UPDATE REQUIRED** | +|Preview | Stable version | **UPDATE REQUIRED** | +|Preview | Older stable version | **UPDATE RECOMMENDED** | +|Preview | Newer preview version, no older stable version | **UPDATE RECOMMENDED** | + +> [!TIP] +> Switching to [auto-instrumentation](codeless-overview.md) eliminates the need for manual SDK updates. + +> [!WARNING] +> Only commercially reasonable support is provided for Preview versions of the SDK. If a support incident requires escalation to development for further guidance, customers will be asked to use a fully supported SDK version to continue support. Commercially reasonable support does not include an option to engage Microsoft product development resources; technical workarounds may be limited or not possible. + +To see the current version of Application Insights SDKs and previous versions release dates, reference the [release notes](release-notes.md). \ No newline at end of file diff --git a/articles/azure-monitor/app/separate-resources.md b/articles/azure-monitor/app/separate-resources.md index 3e394d8799575..b581c91592186 100644 --- a/articles/azure-monitor/app/separate-resources.md +++ b/articles/azure-monitor/app/separate-resources.md @@ -3,6 +3,7 @@ title: How to design your Application Insights deployment - One vs many resource description: Direct telemetry to different resources for development, test, and production stamps. ms.topic: conceptual ms.date: 05/11/2020 +ms.reviewer: rijolly --- # How many Application Insights resources should I deploy diff --git a/articles/azure-monitor/app/sharepoint.md b/articles/azure-monitor/app/sharepoint.md index 908cfb0253832..538fbf166c92a 100644 --- a/articles/azure-monitor/app/sharepoint.md +++ b/articles/azure-monitor/app/sharepoint.md @@ -3,6 +3,7 @@ title: Monitor a SharePoint site with Application Insights description: Start monitoring a new application with a new instrumentation key ms.topic: conceptual ms.date: 09/08/2020 +ms.reviewer: newylie --- # Monitor a SharePoint site with Application Insights diff --git a/articles/azure-monitor/app/sla-report.md b/articles/azure-monitor/app/sla-report.md index 90d7773c27d66..64740a843bc1f 100644 --- a/articles/azure-monitor/app/sla-report.md +++ b/articles/azure-monitor/app/sla-report.md @@ -3,6 +3,7 @@ title: Downtime, SLA, and outage workbook - Application Insights description: Calculate and report SLA for Web Test through a single pane of glass across your Application Insights resources and Azure subscriptions. ms.topic: conceptual ms.date: 05/4/2021 +ms.reviwer: casocha --- # Downtime, SLA, and outages workbook diff --git a/articles/azure-monitor/app/snapshot-collector-release-notes.md b/articles/azure-monitor/app/snapshot-collector-release-notes.md index 570bc21e35250..89c8b0fce2f7f 100644 --- a/articles/azure-monitor/app/snapshot-collector-release-notes.md +++ b/articles/azure-monitor/app/snapshot-collector-release-notes.md @@ -3,6 +3,7 @@ title: Release Notes for Microsoft.ApplicationInsights.SnapshotCollector NuGet p description: Release notes for the Microsoft.ApplicationInsights.SnapshotCollector NuGet package used by the Application Insights Snapshot Debugger. ms.topic: conceptual ms.date: 11/10/2020 +ms.reviewer: pharring --- # Release notes for Microsoft.ApplicationInsights.SnapshotCollector diff --git a/articles/azure-monitor/app/snapshot-debugger-function-app.md b/articles/azure-monitor/app/snapshot-debugger-function-app.md index 6ed59d4fdd8f4..cc530fa052fa5 100644 --- a/articles/azure-monitor/app/snapshot-debugger-function-app.md +++ b/articles/azure-monitor/app/snapshot-debugger-function-app.md @@ -3,6 +3,7 @@ title: Enable Snapshot Debugger for .NET and .NET Core apps in Azure Functions | description: Enable Snapshot Debugger for .NET and .NET Core apps in Azure Functions ms.topic: conceptual ms.date: 12/18/2020 +ms.reviewer: jogrima --- # Enable Snapshot Debugger for .NET and .NET Core apps in Azure Functions diff --git a/articles/azure-monitor/app/snapshot-debugger-troubleshoot.md b/articles/azure-monitor/app/snapshot-debugger-troubleshoot.md index 4b621c5961c11..5339f1a8503df 100644 --- a/articles/azure-monitor/app/snapshot-debugger-troubleshoot.md +++ b/articles/azure-monitor/app/snapshot-debugger-troubleshoot.md @@ -3,6 +3,7 @@ title: Troubleshoot Azure Application Insights Snapshot Debugger description: This article presents troubleshooting steps and information to help developers enable and use Application Insights Snapshot Debugger. ms.topic: conceptual ms.date: 03/07/2019 +ms.reviewer: jogrima --- # Troubleshoot problems enabling Application Insights Snapshot Debugger or viewing snapshots diff --git a/articles/azure-monitor/app/snapshot-debugger-upgrade.md b/articles/azure-monitor/app/snapshot-debugger-upgrade.md index 919647b681ca2..a34ac93c5e17c 100644 --- a/articles/azure-monitor/app/snapshot-debugger-upgrade.md +++ b/articles/azure-monitor/app/snapshot-debugger-upgrade.md @@ -3,6 +3,7 @@ title: Upgrading Azure Application Insights Snapshot Debugger description: How to upgrade Snapshot Debugger for .NET apps to the latest version on Azure App Services, or via Nuget packages ms.topic: conceptual ms.date: 03/28/2019 +ms.reviewer: pharring --- # Upgrading the Snapshot Debugger diff --git a/articles/azure-monitor/app/snapshot-debugger-vm.md b/articles/azure-monitor/app/snapshot-debugger-vm.md index 99138a7ca794f..046b33bc961b3 100644 --- a/articles/azure-monitor/app/snapshot-debugger-vm.md +++ b/articles/azure-monitor/app/snapshot-debugger-vm.md @@ -3,6 +3,7 @@ title: Enable Snapshot Debugger for .NET apps in Azure Service Fabric, Cloud Ser description: Enable Snapshot Debugger for .NET apps in Azure Service Fabric, Cloud Service, and Virtual Machines ms.topic: conceptual ms.date: 03/07/2019 +ms.reviewer: jogrima --- # Enable Snapshot Debugger for .NET apps in Azure Service Fabric, Cloud Service, and Virtual Machines diff --git a/articles/azure-monitor/app/snapshot-debugger.md b/articles/azure-monitor/app/snapshot-debugger.md index ddd27344eaa89..23ebdd87abd54 100644 --- a/articles/azure-monitor/app/snapshot-debugger.md +++ b/articles/azure-monitor/app/snapshot-debugger.md @@ -4,7 +4,7 @@ description: Debug snapshots are automatically collected when exceptions are thr ms.topic: conceptual ms.custom: devx-track-dotnet ms.date: 10/12/2021 -ms.reviewer: cweining +ms.reviewer: saars --- # Debug snapshots on exceptions in .NET apps diff --git a/articles/azure-monitor/app/source-map-support.md b/articles/azure-monitor/app/source-map-support.md index 8066a7d8e1b5f..260cc96a64567 100644 --- a/articles/azure-monitor/app/source-map-support.md +++ b/articles/azure-monitor/app/source-map-support.md @@ -4,6 +4,7 @@ description: Learn how to upload source maps to your own storage account Blob co ms.topic: conceptual ms.date: 06/23/2020 ms.custom: devx-track-js +ms.reviewer: mmcc --- # Source map support for JavaScript applications diff --git a/articles/azure-monitor/app/standard-metrics.md b/articles/azure-monitor/app/standard-metrics.md index 6e50b7337a0fb..354e48e00606a 100644 --- a/articles/azure-monitor/app/standard-metrics.md +++ b/articles/azure-monitor/app/standard-metrics.md @@ -4,6 +4,7 @@ description: This article lists Azure Application Insights metrics with supporte services: azure-monitor ms.topic: reference ms.date: 07/03/2019 +ms.reviewer: vitalyg --- # Application Insights standard metrics diff --git a/articles/azure-monitor/app/statsbeat.md b/articles/azure-monitor/app/statsbeat.md index 3f1e23496953f..e57fd4dc78ec9 100644 --- a/articles/azure-monitor/app/statsbeat.md +++ b/articles/azure-monitor/app/statsbeat.md @@ -4,6 +4,7 @@ description: Statistics about Application Insights SDKs and Auto-Instrumentation ms.topic: conceptual ms.date: 09/20/2021 ms.custom: references_regions +ms.reviwer: heya --- # Statsbeat in Azure Application Insights diff --git a/articles/azure-monitor/app/status-monitor-v2-get-started.md b/articles/azure-monitor/app/status-monitor-v2-get-started.md index 81f6f646a98bc..bc4af2c5caf43 100644 --- a/articles/azure-monitor/app/status-monitor-v2-get-started.md +++ b/articles/azure-monitor/app/status-monitor-v2-get-started.md @@ -4,6 +4,7 @@ description: A quickstart guide for Application Insights Agent. Monitor website ms.topic: conceptual ms.date: 01/22/2021 ms.custom: devx-track-azurepowershell +ms.reviewer: abinetabate --- # Get started with Azure Monitor Application Insights Agent for on-premises servers diff --git a/articles/azure-monitor/app/status-monitor-v2-overview.md b/articles/azure-monitor/app/status-monitor-v2-overview.md index f2813266ab9ff..649a094265c34 100644 --- a/articles/azure-monitor/app/status-monitor-v2-overview.md +++ b/articles/azure-monitor/app/status-monitor-v2-overview.md @@ -3,6 +3,7 @@ title: Azure Application Insights Agent overview | Microsoft Docs description: An overview of Application Insights Agent. Monitor website performance without redeploying the website. Works with ASP.NET web apps hosted on-premises, in VMs, or on Azure. ms.topic: conceptual ms.date: 09/16/2019 +ms.reviewer: abinetabate --- # Deploy Azure Monitor Application Insights Agent for on-premises servers diff --git a/articles/azure-monitor/app/status-monitor-v2-troubleshoot.md b/articles/azure-monitor/app/status-monitor-v2-troubleshoot.md index 3ae85f88377a6..f2772c4345257 100644 --- a/articles/azure-monitor/app/status-monitor-v2-troubleshoot.md +++ b/articles/azure-monitor/app/status-monitor-v2-troubleshoot.md @@ -3,6 +3,7 @@ title: Azure Application Insights Agent troubleshooting and known issues | Micro description: The known issues of Application Insights Agent and troubleshooting examples. Monitor website performance without redeploying the website. Works with ASP.NET web apps hosted on-premises, in VMs, or on Azure. ms.topic: conceptual ms.date: 04/23/2019 +ms.reviewer: abinetabate --- # Troubleshooting Application Insights Agent (formerly named Status Monitor v2) diff --git a/articles/azure-monitor/app/telemetry-channels.md b/articles/azure-monitor/app/telemetry-channels.md index 5fe37c1e6e3b6..2f9652ad11a5a 100644 --- a/articles/azure-monitor/app/telemetry-channels.md +++ b/articles/azure-monitor/app/telemetry-channels.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 05/14/2019 ms.devlang: csharp ms.custom: devx-track-csharp +ms.reviewer: casocha --- # Telemetry channels in Application Insights diff --git a/articles/azure-monitor/app/troubleshoot-availability.md b/articles/azure-monitor/app/troubleshoot-availability.md index cd5d44c4101bd..1761c6dccba41 100644 --- a/articles/azure-monitor/app/troubleshoot-availability.md +++ b/articles/azure-monitor/app/troubleshoot-availability.md @@ -3,7 +3,7 @@ title: Troubleshoot your Azure Application Insights availability tests description: Troubleshoot web tests in Azure Application Insights. Get alerts if a website becomes unavailable or responds slowly. ms.topic: conceptual ms.date: 02/14/2021 -ms.reviewer: sdash +ms.reviewer: casocha --- # Troubleshooting diff --git a/articles/azure-monitor/app/tutorial-alert.md b/articles/azure-monitor/app/tutorial-alert.md index 5ce24d1496dbe..79a7985b8b3e9 100644 --- a/articles/azure-monitor/app/tutorial-alert.md +++ b/articles/azure-monitor/app/tutorial-alert.md @@ -4,6 +4,7 @@ description: Tutorial to send alerts in response to errors in your application u ms.topic: tutorial ms.date: 04/10/2019 ms.custom: mvc +ms.reviewer: vitalyg --- # Monitor and alert on application health with Azure Application Insights diff --git a/articles/azure-monitor/app/tutorial-app-dashboards.md b/articles/azure-monitor/app/tutorial-app-dashboards.md index 43c25bb2440d8..c6476e7669ab1 100644 --- a/articles/azure-monitor/app/tutorial-app-dashboards.md +++ b/articles/azure-monitor/app/tutorial-app-dashboards.md @@ -4,6 +4,7 @@ description: Tutorial to create custom KPI dashboards using Azure Application In ms.topic: tutorial ms.date: 09/30/2020 ms.custom: mvc, contperf-fy21q1 +ms.reviewer: vitalyg --- # Create custom KPI dashboards using Azure Application Insights diff --git a/articles/azure-monitor/app/tutorial-performance.md b/articles/azure-monitor/app/tutorial-performance.md index 6df121b133807..f8db4e3a21963 100644 --- a/articles/azure-monitor/app/tutorial-performance.md +++ b/articles/azure-monitor/app/tutorial-performance.md @@ -4,6 +4,7 @@ description: Tutorial to find and diagnose performance issues in your applicatio ms.topic: tutorial ms.date: 06/15/2020 ms.custom: mvc +ms.reviewer: vitalyg --- # Find and diagnose performance issues with Azure Application Insights @@ -25,7 +26,7 @@ To complete this tutorial: - ASP.NET and web development - Azure development - Deploy a .NET application to Azure and [enable the Application Insights SDK](../app/asp-net.md). -- [Enable the Application Insights profiler](../app/profiler.md#installation) for your application. +- [Enable the Application Insights profiler](../app/profiler.md) for your application. ## Log in to Azure Log in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). diff --git a/articles/azure-monitor/app/tutorial-runtime-exceptions.md b/articles/azure-monitor/app/tutorial-runtime-exceptions.md index 4a2878a7a9b8a..1ae939630a8e6 100644 --- a/articles/azure-monitor/app/tutorial-runtime-exceptions.md +++ b/articles/azure-monitor/app/tutorial-runtime-exceptions.md @@ -4,6 +4,7 @@ description: Tutorial to find and diagnose run-time exceptions in your applicati ms.topic: tutorial ms.date: 09/19/2017 ms.custom: mvc +ms.reviewer: vitalyg --- # Find and diagnose run-time exceptions with Azure Application Insights diff --git a/articles/azure-monitor/app/tutorial-users.md b/articles/azure-monitor/app/tutorial-users.md index fba152c1e80c4..29aa7fb5b5258 100644 --- a/articles/azure-monitor/app/tutorial-users.md +++ b/articles/azure-monitor/app/tutorial-users.md @@ -4,6 +4,7 @@ description: Tutorial on using Application Insights to understand how customers ms.topic: tutorial ms.date: 07/30/2021 ms.custom: mvc +ms.reviewer: vitalyg --- # Use Azure Application Insights to understand how customers are using your application diff --git a/articles/azure-monitor/app/usage-flows.md b/articles/azure-monitor/app/usage-flows.md index bd1c0d49b7658..123dbc266795c 100644 --- a/articles/azure-monitor/app/usage-flows.md +++ b/articles/azure-monitor/app/usage-flows.md @@ -3,6 +3,7 @@ title: Application Insights User Flows analyzes navigation flows description: Analyze how users navigate between the pages and features of your web app. ms.topic: conceptual ms.date: 07/30/2021 +ms.reviewer: mmcc --- # Analyze user navigation patterns with User Flows in Application Insights diff --git a/articles/azure-monitor/app/usage-funnels.md b/articles/azure-monitor/app/usage-funnels.md index ae2698480a3ea..f16e2be4b0456 100644 --- a/articles/azure-monitor/app/usage-funnels.md +++ b/articles/azure-monitor/app/usage-funnels.md @@ -3,6 +3,7 @@ title: Application Insights Funnels description: Learn how you can use Funnels to discover how customers are interacting with your application. ms.topic: conceptual ms.date: 07/30/2021 +ms.reviewer: mmcc --- # Discover how customers are using your application with Application Insights Funnels diff --git a/articles/azure-monitor/app/usage-heart.md b/articles/azure-monitor/app/usage-heart.md index 3015a925cc44d..354085866bc4f 100644 --- a/articles/azure-monitor/app/usage-heart.md +++ b/articles/azure-monitor/app/usage-heart.md @@ -3,6 +3,7 @@ title: HEART analytics workbook description: Product teams use the HEART Workbook to measure success across five user-centric dimensions to deliver better software. ms.topic: conceptual ms.date: 11/11/2021 +ms.reviewer: mmccgit --- # Analyzing product usage with HEART diff --git a/articles/azure-monitor/app/usage-overview.md b/articles/azure-monitor/app/usage-overview.md index 27e76ea7e7a0b..36099643c4730 100644 --- a/articles/azure-monitor/app/usage-overview.md +++ b/articles/azure-monitor/app/usage-overview.md @@ -3,6 +3,7 @@ title: Usage analysis with Application Insights | Azure Monitor description: Understand your users and what they do with your app. ms.topic: conceptual ms.date: 07/30/2021 +ms.reviewer: mmcc --- # Usage analysis with Application Insights diff --git a/articles/azure-monitor/app/usage-retention.md b/articles/azure-monitor/app/usage-retention.md index f19d4c753ee0b..ea5dfeef7bb22 100644 --- a/articles/azure-monitor/app/usage-retention.md +++ b/articles/azure-monitor/app/usage-retention.md @@ -3,6 +3,7 @@ title: Analyze web app user retention with Application Insights description: How many users return to your app? ms.topic: conceptual ms.date: 07/30/2021 +ms.reviewer: mmcc --- # User retention analysis for web applications with Application Insights diff --git a/articles/azure-monitor/app/usage-segmentation.md b/articles/azure-monitor/app/usage-segmentation.md index 7b542de369ec2..8087baa2060b0 100644 --- a/articles/azure-monitor/app/usage-segmentation.md +++ b/articles/azure-monitor/app/usage-segmentation.md @@ -3,6 +3,7 @@ title: User, session, and event analysis in Application Insights description: Demographic analysis of users of your web app. ms.topic: conceptual ms.date: 07/30/2021 +ms.reviewer: mmcc --- # Users, sessions, and events analysis in Application Insights diff --git a/articles/azure-monitor/app/usage-troubleshoot.md b/articles/azure-monitor/app/usage-troubleshoot.md index 53692809ab6c6..e3c888578478f 100644 --- a/articles/azure-monitor/app/usage-troubleshoot.md +++ b/articles/azure-monitor/app/usage-troubleshoot.md @@ -3,6 +3,7 @@ title: Troubleshoot user analytics tools - Application Insights description: Troubleshooting guide - analyzing site and app usage with Application Insights. ms.topic: conceptual ms.date: 07/30/2021 +ms.reviewer: mmcc --- # Troubleshoot user behavior analytics tools in Application Insights diff --git a/articles/azure-monitor/app/visual-studio-codelens.md b/articles/azure-monitor/app/visual-studio-codelens.md index 56841cd61ef7a..6524f4e5c7dc4 100644 --- a/articles/azure-monitor/app/visual-studio-codelens.md +++ b/articles/azure-monitor/app/visual-studio-codelens.md @@ -4,6 +4,7 @@ description: Quickly access your Application Insights request and exception tele ms.topic: conceptual ms.date: 03/17/2017 ms.custom: vs-azure +ms.reviewer: masoucou --- # Application Insights telemetry in Visual Studio CodeLens diff --git a/articles/azure-monitor/app/visual-studio.md b/articles/azure-monitor/app/visual-studio.md index e473e05279402..7096ed1cad911 100644 --- a/articles/azure-monitor/app/visual-studio.md +++ b/articles/azure-monitor/app/visual-studio.md @@ -4,6 +4,7 @@ description: Web app performance analysis and diagnostics during debugging and i ms.topic: conceptual ms.date: 03/17/2017 ms.custom: vs-azure +ms.reviewer: daviste --- # Debug your applications with Azure Application Insights in Visual Studio diff --git a/articles/azure-monitor/app/web-app-extension-release-notes.md b/articles/azure-monitor/app/web-app-extension-release-notes.md index f2c29e77109cc..35ab9af8be741 100644 --- a/articles/azure-monitor/app/web-app-extension-release-notes.md +++ b/articles/azure-monitor/app/web-app-extension-release-notes.md @@ -3,6 +3,7 @@ title: Release Notes for Azure web app extension - Application Insights description: Releases notes for Azure Web Apps Extension for runtime instrumentation with Application Insights. ms.topic: conceptual ms.date: 06/26/2020 +ms.reviewer: rajrang --- # Release notes for Azure Web App extension for Application Insights diff --git a/articles/azure-monitor/app/windows-desktop.md b/articles/azure-monitor/app/windows-desktop.md index dae27de89c916..9bb9e0492b97d 100644 --- a/articles/azure-monitor/app/windows-desktop.md +++ b/articles/azure-monitor/app/windows-desktop.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 06/11/2020 ms.devlang: csharp ms.custom: fasttrack-edit +ms.reviewer: abinetabate --- # Monitoring usage and performance in Classic Windows Desktop apps diff --git a/articles/azure-monitor/app/work-item-integration.md b/articles/azure-monitor/app/work-item-integration.md index 34fd239e6d9a4..f4cb7b9526b95 100644 --- a/articles/azure-monitor/app/work-item-integration.md +++ b/articles/azure-monitor/app/work-item-integration.md @@ -3,6 +3,7 @@ title: Work Item Integration - Application Insights description: Learn how to create work items in GitHub or Azure DevOps with Application Insights data embedded in them. ms.topic: conceptual ms.date: 06/27/2021 +ms.reviewer: casocha --- # Work Item Integration diff --git a/articles/azure-monitor/app/worker-service.md b/articles/azure-monitor/app/worker-service.md index ca2c0dfb1c964..88ecc0e7a2928 100644 --- a/articles/azure-monitor/app/worker-service.md +++ b/articles/azure-monitor/app/worker-service.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 05/12/2022 +ms.reviewer: cithomas --- # Application Insights for Worker Service applications (non-HTTP applications) @@ -247,9 +248,14 @@ Full example is shared [here](https://github.com/microsoft/ApplicationInsights-d IServiceCollection services = new ServiceCollection(); // Being a regular console app, there is no appsettings.json or configuration providers enabled by default. - // Hence connection string and any changes to default logging level must be specified here. + // Hence instrumentation key/ connection string and any changes to default logging level must be specified here. services.AddLogging(loggingBuilder => loggingBuilder.AddFilter("Category", LogLevel.Information)); - services.AddApplicationInsightsTelemetryWorkerService("connection string here"); + services.AddApplicationInsightsTelemetryWorkerService("instrumentation key here"); + + // To pass a connection string + // - aiserviceoptions must be created + // - set connectionstring on it + // - pass it to AddApplicationInsightsTelemetryWorkerService() // Build ServiceProvider. IServiceProvider serviceProvider = services.BuildServiceProvider(); diff --git a/articles/azure-monitor/autoscale/autoscale-best-practices.md b/articles/azure-monitor/autoscale/autoscale-best-practices.md index 9578797f1163d..047382ad75e1f 100644 --- a/articles/azure-monitor/autoscale/autoscale-best-practices.md +++ b/articles/azure-monitor/autoscale/autoscale-best-practices.md @@ -4,6 +4,7 @@ description: Autoscale patterns in Azure for Web Apps, Virtual Machine Scale set ms.topic: conceptual ms.date: 04/22/2022 ms.subservice: autoscale +ms.reviewer: riroloff --- # Best practices for Autoscale Azure Monitor autoscale applies only to [Virtual Machine Scale Sets](https://azure.microsoft.com/services/virtual-machine-scale-sets/), [Cloud Services](https://azure.microsoft.com/services/cloud-services/), [App Service - Web Apps](https://azure.microsoft.com/services/app-service/web/), and [API Management services](../../api-management/api-management-key-concepts.md). @@ -150,6 +151,14 @@ You can also use an Activity Log alert to monitor the health of the autoscale en In addition to using activity log alerts, you can also configure email or webhook notifications to get notified for scale actions via the notifications tab on the autoscale setting. +## Send data securely using TLS 1.2 +To ensure the security of data in transit to Azure Monitor, we strongly encourage you to configure the agent to use at least Transport Layer Security (TLS) 1.2. Older versions of TLS/Secure Sockets Layer (SSL) have been found to be vulnerable and while they still currently work to allow backwards compatibility, they are **not recommended**, and the industry is quickly moving to abandon support for these older protocols. + +The [PCI Security Standards Council](https://www.pcisecuritystandards.org/) has set a deadline of [June 30th, 2018](https://www.pcisecuritystandards.org/pdfs/PCI_SSC_Migrating_from_SSL_and_Early_TLS_Resource_Guide.pdf) to disable older versions of TLS/SSL and upgrade to more secure protocols. Once Azure drops legacy support, if your agents cannot communicate over at least TLS 1.2 you would not be able to send data to Azure Monitor Logs. + +We recommend you do NOT explicit set your agent to only use TLS 1.2 unless absolutely necessary. Allowing the agent to automatically detect, negotiate, and take advantage of future security standards is preferable. Otherwise you may miss the added security of the newer standards and possibly experience problems if TLS 1.2 is ever deprecated in favor of those newer standards. + + ## Next Steps - [Create an Activity Log Alert to monitor all autoscale engine operations on your subscription.](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/monitor-autoscale-alert) - [Create an Activity Log Alert to monitor all failed autoscale scale in/scale out operations on your subscription](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/monitor-autoscale-failed-alert) diff --git a/articles/azure-monitor/autoscale/autoscale-common-metrics.md b/articles/azure-monitor/autoscale/autoscale-common-metrics.md index e8fa9d97c0bb8..a7fc5674bc0a7 100644 --- a/articles/azure-monitor/autoscale/autoscale-common-metrics.md +++ b/articles/azure-monitor/autoscale/autoscale-common-metrics.md @@ -5,6 +5,8 @@ ms.topic: conceptual ms.date: 04/22/2022 ms.subservice: autoscale ms.custom: devx-track-azurepowershell +ms.reviewer: riroloff + --- # Azure Monitor autoscaling common metrics diff --git a/articles/azure-monitor/autoscale/autoscale-common-scale-patterns.md b/articles/azure-monitor/autoscale/autoscale-common-scale-patterns.md index d3dca2dd5dffd..344fb91c318d6 100644 --- a/articles/azure-monitor/autoscale/autoscale-common-scale-patterns.md +++ b/articles/azure-monitor/autoscale/autoscale-common-scale-patterns.md @@ -4,6 +4,7 @@ description: Learn some of the common patterns to auto scale your resource in Az ms.topic: conceptual ms.date: 04/22/2022 ms.subservice: autoscale +ms.reviewer: riroloff --- # Overview of common autoscale patterns This article describes some of the common patterns to scale your resource in Azure. diff --git a/articles/azure-monitor/autoscale/autoscale-custom-metric.md b/articles/azure-monitor/autoscale/autoscale-custom-metric.md index 49850c0d14eb9..6b994904ba7db 100644 --- a/articles/azure-monitor/autoscale/autoscale-custom-metric.md +++ b/articles/azure-monitor/autoscale/autoscale-custom-metric.md @@ -4,6 +4,7 @@ description: Learn how to scale your resource by custom metric in Azure. ms.topic: conceptual ms.date: 05/07/2017 ms.subservice: autoscale +ms.reviewer: riroloff --- # Get started with auto scale by custom metric in Azure This article describes how to scale your resource by a custom metric in Azure portal. diff --git a/articles/azure-monitor/autoscale/autoscale-get-started.md b/articles/azure-monitor/autoscale/autoscale-get-started.md index be15853422dbb..fb3537bf5825d 100644 --- a/articles/azure-monitor/autoscale/autoscale-get-started.md +++ b/articles/azure-monitor/autoscale/autoscale-get-started.md @@ -4,6 +4,7 @@ description: "Learn how to scale your resource web app, cloud service, virtual m ms.topic: conceptual ms.date: 04/05/2022 ms.subservice: autoscale +ms.reviewer: riroloff --- # Get started with Autoscale in Azure This article describes how to set up your Autoscale settings for your resource in the Microsoft Azure portal. diff --git a/articles/azure-monitor/autoscale/autoscale-overview.md b/articles/azure-monitor/autoscale/autoscale-overview.md index c6559521ed7ec..e716d13b025e8 100644 --- a/articles/azure-monitor/autoscale/autoscale-overview.md +++ b/articles/azure-monitor/autoscale/autoscale-overview.md @@ -4,6 +4,7 @@ description: "Autoscale in Microsoft Azure" ms.subservice: autoscale ms.topic: conceptual ms.date: 04/22/2022 +ms.reviewer: riroloff --- diff --git a/articles/azure-monitor/autoscale/autoscale-predictive.md b/articles/azure-monitor/autoscale/autoscale-predictive.md index 011083ca5311a..fd8e386c3b527 100644 --- a/articles/azure-monitor/autoscale/autoscale-predictive.md +++ b/articles/azure-monitor/autoscale/autoscale-predictive.md @@ -5,6 +5,7 @@ ms.topic: conceptual ms.date: 01/24/2022 ms.subservice: autoscale ms.custom: references_regions +ms.reviewer: riroloff --- # Use predictive autoscale to scale out before load demands in virtual machine scale sets (Preview) diff --git a/articles/azure-monitor/autoscale/autoscale-resource-log-schema.md b/articles/azure-monitor/autoscale/autoscale-resource-log-schema.md index 354818df5b16f..b537fbdf9dff7 100644 --- a/articles/azure-monitor/autoscale/autoscale-resource-log-schema.md +++ b/articles/azure-monitor/autoscale/autoscale-resource-log-schema.md @@ -4,6 +4,7 @@ description: Format of logs for monitoring and troubleshooting autoscale actions ms.topic: conceptual ms.date: 11/14/2019 ms.subservice: autoscale +ms.reviewer: riroloff --- # Azure Monitor autoscale actions resource log schema diff --git a/articles/azure-monitor/autoscale/autoscale-troubleshoot.md b/articles/azure-monitor/autoscale/autoscale-troubleshoot.md index 3b5fbba9be5a9..dec22df55ce6f 100644 --- a/articles/azure-monitor/autoscale/autoscale-troubleshoot.md +++ b/articles/azure-monitor/autoscale/autoscale-troubleshoot.md @@ -4,6 +4,7 @@ description: Tracking down problems with Azure Monitor autoscaling used in Servi ms.topic: conceptual ms.date: 11/4/2019 ms.subservice: autoscale +ms.reviewer: riroloff --- diff --git a/articles/azure-monitor/autoscale/autoscale-understanding-settings.md b/articles/azure-monitor/autoscale/autoscale-understanding-settings.md index e988456bd1e60..970902e310361 100644 --- a/articles/azure-monitor/autoscale/autoscale-understanding-settings.md +++ b/articles/azure-monitor/autoscale/autoscale-understanding-settings.md @@ -4,6 +4,7 @@ description: "A detailed breakdown of autoscale settings and how they work. Appl ms.topic: conceptual ms.date: 12/18/2017 ms.subservice: autoscale +ms.reviewer: riroloff --- # Understand Autoscale settings Autoscale settings help ensure that you have the right amount of resources running to handle the fluctuating load of your application. You can configure Autoscale settings to be triggered based on metrics that indicate load or performance, or triggered at a scheduled date and time. This article takes a detailed look at the anatomy of an Autoscale setting. The article begins with the schema and properties of a setting, and then walks through the different profile types that can be configured. Finally, the article discusses how the Autoscale feature in Azure evaluates which profile to execute at any given time. diff --git a/articles/azure-monitor/autoscale/autoscale-virtual-machine-scale-sets.md b/articles/azure-monitor/autoscale/autoscale-virtual-machine-scale-sets.md index 59b81649ad8a4..efcfcca4f39ab 100644 --- a/articles/azure-monitor/autoscale/autoscale-virtual-machine-scale-sets.md +++ b/articles/azure-monitor/autoscale/autoscale-virtual-machine-scale-sets.md @@ -7,7 +7,7 @@ ms.topic: conceptual ms.service: virtual-machine-scale-sets ms.subservice: autoscale ms.date: 06/25/2020 -ms.reviewer: jushiman +ms.reviewer: riroloff ms.custom: mimckitt --- diff --git a/articles/azure-monitor/autoscale/autoscale-webhook-email.md b/articles/azure-monitor/autoscale/autoscale-webhook-email.md index b9452edc643d1..abc101ff58eb6 100644 --- a/articles/azure-monitor/autoscale/autoscale-webhook-email.md +++ b/articles/azure-monitor/autoscale/autoscale-webhook-email.md @@ -4,6 +4,7 @@ description: Learn how to use autoscale actions to call web URLs or send email n ms.topic: conceptual ms.date: 04/03/2017 ms.subservice: autoscale +ms.reviewer: riroloff --- # Use autoscale actions to send email and webhook alert notifications in Azure Monitor This article shows you how set up triggers so that you can call specific web URLs or send emails based on autoscale actions in Azure. diff --git a/articles/azure-monitor/autoscale/tutorial-autoscale-performance-schedule.md b/articles/azure-monitor/autoscale/tutorial-autoscale-performance-schedule.md index 25e747bbfd5f0..8e419f337540d 100644 --- a/articles/azure-monitor/autoscale/tutorial-autoscale-performance-schedule.md +++ b/articles/azure-monitor/autoscale/tutorial-autoscale-performance-schedule.md @@ -9,6 +9,7 @@ ms.date: 12/11/2017 ms.author: ancav ms.custom: mvc ms.subservice: autoscale +ms.reviewer: riroloff --- # Create an Autoscale Setting for Azure resources based on performance data or a schedule diff --git a/articles/azure-monitor/azure-monitor-monitoring-reference.md b/articles/azure-monitor/azure-monitor-monitoring-reference.md index 32a87709db210..56c7e71199d01 100644 --- a/articles/azure-monitor/azure-monitor-monitoring-reference.md +++ b/articles/azure-monitor/azure-monitor-monitoring-reference.md @@ -474,4 +474,4 @@ The following schemas are relevant to action groups, which are part of the notif ## See Also - See [Monitoring Azure Azure Monitor](monitor-azure-monitor.md) for a description of what Azure Monitor monitors in itself. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](./essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/azure-monitor/best-practices-data-collection.md b/articles/azure-monitor/best-practices-data-collection.md index cf31da9296416..4c4592a85a683 100644 --- a/articles/azure-monitor/best-practices-data-collection.md +++ b/articles/azure-monitor/best-practices-data-collection.md @@ -14,12 +14,15 @@ This article is part of the scenario [Recommendations for configuring Azure Moni > [!IMPORTANT] > The features of Azure Monitor and their configuration will vary depending on your business requirements balanced with the cost of the enabled features. Each step below will identify whether there is potential cost, and you should assess these costs before proceeding. See [Azure Monitor pricing](https://azure.microsoft.com/pricing/details/monitor/) for complete pricing details. -## Create Log Analytics workspace -You require at least one Log Analytics workspace to enable [Azure Monitor Logs](logs/data-platform-logs.md), which is required for collecting such data as logs from Azure resources, collecting data from the guest operating system of Azure virtual machines, and for most Azure Monitor insights. Other services such as Microsoft Sentinel and Microsoft Defender for Cloud also use a Log Analytics workspace and can share the same one that you use for Azure Monitor. You can start with a single workspace to support this monitoring, but see [Designing your Azure Monitor Logs deployment](logs/design-logs-deployment.md) for guidance on when to use multiple workspaces. +## Design Log Analytics workspace architecture +You require at least one Log Analytics workspace to enable [Azure Monitor Logs](logs/data-platform-logs.md), which is required for collecting such data as logs from Azure resources, collecting data from the guest operating system of Azure virtual machines, and for most Azure Monitor insights. Other services such as Microsoft Sentinel and Microsoft Defender for Cloud also use a Log Analytics workspace and can share the same one that you use for Azure Monitor. -There is no cost for creating a Log Analytics workspace, but there is a potential charge once you configure data to be collected into it. See [Azure Monitor Logs pricing details](logs/cost-logs.md) for details. +There is no cost for creating a Log Analytics workspace, but there is a potential charge once you configure data to be collected into it. See [Azure Monitor Logs pricing details](logs/cost-logs.md) for details on how log data is charged. + +See [Create a Log Analytics workspace in the Azure portal](logs/quick-create-workspace.md) to create an initial Log Analytics workspace and [Manage access to Log Analytics workspaces](logs/manage-access.md) to configure access. You can use scalable methods such as Resource Manager templates to configure workspaces, though this is often not required since most environments will require a minimal number. + +Start with a single workspace to support initial monitoring, but see [Design a Log Analytics workspace configuration](logs/workspace-design.md) for guidance on when to use multiple workspaces and how to locate and configure them. -See [Create a Log Analytics workspace in the Azure portal](logs/quick-create-workspace.md) to create an initial Log Analytics workspace. See [Manage access to log data and workspaces in Azure Monitor](logs/manage-access.md) to configure access. You can use scalable methods such as Resource Manager templates to configure workspaces though, this is often not required since most environments will require a minimal number. ## Collect data from Azure resources Some monitoring of Azure resources is available automatically with no configuration required, while you must perform configuration steps to collect additional monitoring data. The following table illustrates the configuration steps required to collect all available data from your Azure resources, including at which step data is sent to Azure Monitor Metrics and Azure Monitor Logs. The sections below describe each step in further detail. diff --git a/articles/azure-monitor/change/change-analysis-custom-filters.md b/articles/azure-monitor/change/change-analysis-custom-filters.md index 4c4d75ac224e9..8453da3a2a97a 100644 --- a/articles/azure-monitor/change/change-analysis-custom-filters.md +++ b/articles/azure-monitor/change/change-analysis-custom-filters.md @@ -9,6 +9,7 @@ ms.reviewer: cawa ms.date: 05/09/2022 ms.subservice: change-analysis ms.custom: devx-track-azurepowershell +ms.reviwer: cawa --- # Navigate to a change using custom filters in Change Analysis diff --git a/articles/azure-monitor/change/change-analysis-powershell.md b/articles/azure-monitor/change/change-analysis-powershell.md index dd8f3f1af26b8..4f5d1faa84129 100644 --- a/articles/azure-monitor/change/change-analysis-powershell.md +++ b/articles/azure-monitor/change/change-analysis-powershell.md @@ -9,6 +9,7 @@ ms.devlang: azurepowershell ms.date: 04/11/2022 ms.subservice: change-analysis ms.custom: devx-track-azurepowershell +ms.reviwer: cawa --- # Azure PowerShell for Change Analysis in Azure Monitor (preview) diff --git a/articles/azure-monitor/change/change-analysis-query.md b/articles/azure-monitor/change/change-analysis-query.md index 889a732d4f499..758479ae06895 100644 --- a/articles/azure-monitor/change/change-analysis-query.md +++ b/articles/azure-monitor/change/change-analysis-query.md @@ -8,6 +8,7 @@ ms.contributor: cawa ms.date: 05/12/2022 ms.subservice: change-analysis ms.custom: devx-track-azurepowershell +ms.reviwer: cawa --- # Pin and share a Change Analysis query to the Azure dashboard diff --git a/articles/azure-monitor/change/change-analysis-troubleshoot.md b/articles/azure-monitor/change/change-analysis-troubleshoot.md index 4e0f63b9da756..af5713ea6e0ee 100644 --- a/articles/azure-monitor/change/change-analysis-troubleshoot.md +++ b/articles/azure-monitor/change/change-analysis-troubleshoot.md @@ -8,6 +8,7 @@ ms.contributor: cawa ms.date: 03/21/2022 ms.subservice: change-analysis ms.custom: devx-track-azurepowershell +ms.reviewer: cawa --- # Troubleshoot Azure Monitor's Change Analysis (preview) diff --git a/articles/azure-monitor/change/change-analysis-visualizations.md b/articles/azure-monitor/change/change-analysis-visualizations.md index c1e513b402c03..f2ed20c00f6bb 100644 --- a/articles/azure-monitor/change/change-analysis-visualizations.md +++ b/articles/azure-monitor/change/change-analysis-visualizations.md @@ -8,6 +8,7 @@ ms.contributor: cawa ms.date: 04/18/2022 ms.subservice: change-analysis ms.custom: devx-track-azurepowershell +ms.reviewer: cawa --- # Visualizations for Change Analysis in Azure Monitor (preview) diff --git a/articles/azure-monitor/change/change-analysis.md b/articles/azure-monitor/change/change-analysis.md index a7393b85dde0e..f05c30a31f958 100644 --- a/articles/azure-monitor/change/change-analysis.md +++ b/articles/azure-monitor/change/change-analysis.md @@ -8,6 +8,7 @@ ms.contributor: cawa ms.date: 05/20/2022 ms.subservice: change-analysis ms.custom: devx-track-azurepowershell + --- # Use Change Analysis in Azure Monitor (preview) diff --git a/articles/azure-monitor/cli-samples.md b/articles/azure-monitor/cli-samples.md deleted file mode 100644 index 6eb39c5eb25aa..0000000000000 --- a/articles/azure-monitor/cli-samples.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: Azure Monitor CLI samples -description: Sample CLI commands for Azure Monitor features. Azure Monitor is a Microsoft Azure service, which allows you to send alert notifications, call web URLs based on values of configured telemetry data, and autoScale Cloud Services, Virtual Machines, and Web Apps. -ms.topic: sample -author: bwren -ms.author: bwren -ms.date: 05/16/2018 -ms.custom: devx-track-azurecli - ---- - -# Azure Monitor CLI samples -This article shows you sample command-line interface (CLI) commands to help you access Azure Monitor features. Azure Monitor allows you to AutoScale Cloud Services, Virtual Machines, and Web Apps and to send alert notifications or call web URLs based on values of configured telemetry data. - -## Prerequisites - -If you haven't already installed the Azure CLI, follow the instructions for [Install the Azure CLI](/cli/azure/install-azure-cli). You can also use [Azure Cloud Shell](/azure/cloud-shell) to run the CLI as an interactive experience in your browser. See a full reference of all available commands in the [Azure Monitor CLI reference](/cli/azure/monitor). - -## Log in to Azure -The first step is to log in to your Azure account. - -```azurecli -az login -``` - -After running this command, you have to sign in via the instructions on the screen. All commands work in the context of your default subscription. - -List the details of your current subscription. - -```azurecli -az account show -``` - -Change working context to a different subscription. - -```azurecli -az account set -s -``` - -View a list of all supported Azure Monitor commands. - -```azurecli -az monitor -h -``` - -## View activity log - -View a list of activity log events. - -```azurecli -az monitor activity-log list -``` - -View all available options. - -```azurecli -az monitor activity-log list -h -``` - -List logs by a resourceGroup. - -```azurecli -az monitor activity-log list --resource-group -``` - -List logs by caller. - -```azurecli -az monitor activity-log list --caller myname@company.com -``` - -List logs by caller on a resource type, within a date range. - -```azurecli -az monitor activity-log list --resource-provider Microsoft.Web \ - --caller myname@company.com \ - --start-time 2016-03-08T00:00:00Z \ - --end-time 2016-03-16T00:00:00Z -``` - -## Work with alerts -> [!NOTE] -> Only alerts (classic) is supported in CLI at this time. - -### Get alert (classic) rules in a resource group - -```azurecli -az monitor activity-log alert list --resource-group -az monitor activity-log alert show --resource-group --name -``` - -### Create a metric alert (classic) rule - -```azurecli -az monitor alert create --name --resource-group \ - --action email \ - --action webhook \ - --target \ - --condition " {>,>=,<,<=} {avg,min,max,total,last} ##h##m##s" -``` - -### Delete an alert (classic) rule - -```azurecli -az monitor alert delete --name --resource-group -``` - -## Log profiles - -Use the information in this section to work with log profiles. - -### Get a log profile - -```azurecli -az monitor log-profiles list -az monitor log-profiles show --name -``` - -### Add a log profile with retention - -```azurecli -az monitor log-profiles create --name --location \ - --locations \ - --categories \ - --days <# days to retain> \ - --enabled true \ - --storage-account-id -``` - -### Add a log profile with retention and EventHub - -```azurecli -az monitor log-profiles create --name --location \ - --locations \ - --categories \ - --days <# days to retain> \ - --enabled true - --storage-account-id - --service-bus-rule-id -``` - -### Remove a log profile - -```azurecli -az monitor log-profiles delete --name -``` - -## Diagnostics - -Use the information in this section to work with diagnostic settings. - -### Get a diagnostic setting - -```azurecli -az monitor diagnostic-settings list --resource -``` - -### Create a diagnostic setting - -```azurecli -az monitor diagnostic-settings create --name \ - --storage-account \ - --resource \ - --logs '[ - { - "category": , - "enabled": true, - "retentionPolicy": { - "days": <# days to retain>, - "enabled": true - } - }]' -``` - -### Delete a diagnostic setting - -```azurecli -az monitor diagnostic-settings delete --name \ - --resource -``` - -## Autoscale - -Use the information in this section to work with autoscale settings. You need to modify these examples. - -### Get autoscale settings for a resource group - -```azurecli -az monitor autoscale list --resource-group -``` - -### Get autoscale settings by name in a resource group - -```azurecli -az monitor autoscale show --name --resource-group -``` - -### Set autoscale settings - -```azurecli -az monitor autoscale create --name --resource-group \ - --count <# instances> \ - --resource -``` diff --git a/articles/azure-monitor/containers/container-insights-agent-config.md b/articles/azure-monitor/containers/container-insights-agent-config.md index 5fb8340f242ec..82beffdc66b75 100644 --- a/articles/azure-monitor/containers/container-insights-agent-config.md +++ b/articles/azure-monitor/containers/container-insights-agent-config.md @@ -3,6 +3,7 @@ title: Configure Container insights agent data collection | Microsoft Docs description: This article describes how you can configure the Container insights agent to control stdout/stderr and environment variables log collection. ms.topic: conceptual ms.date: 10/09/2020 +ms.reviewer: aul --- # Configure agent data collection for Container insights diff --git a/articles/azure-monitor/containers/container-insights-analyze.md b/articles/azure-monitor/containers/container-insights-analyze.md index 7298e7d092cb8..8657ae9a64a99 100644 --- a/articles/azure-monitor/containers/container-insights-analyze.md +++ b/articles/azure-monitor/containers/container-insights-analyze.md @@ -3,6 +3,7 @@ title: Kubernetes monitoring with Container insights | Microsoft Docs description: This article describes how you can view and analyze the performance of a Kubernetes cluster with Container insights. ms.topic: conceptual ms.date: 03/26/2020 +ms.reviewer: aul --- # Monitor your Kubernetes cluster performance with Container insights @@ -15,11 +16,8 @@ For information about how to enable Container insights, see [Onboard Container i Azure Monitor provides a multi-cluster view that shows the health status of all monitored Kubernetes clusters running Linux and Windows Server 2019 deployed across resource groups in your subscriptions. It shows clusters discovered across all environments that aren't monitored by the solution. You can immediately understand cluster health, and from here, you can drill down to the node and controller performance page or navigate to see performance charts for the cluster. For AKS clusters that were discovered and identified as unmonitored, you can enable monitoring for them at any time. -The main differences in monitoring a Windows Server cluster with Container insights compared to a Linux cluster are described [here](container-insights-overview.md#what-does-container-insights-provide) in the overview article. +The main differences in monitoring a Windows Server cluster with Container insights compared to a Linux cluster are described in [Feature of Container insights](container-insights-overview.md#features-of-container-insights) in the overview article. -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com). ## Multi-cluster view from Azure Monitor diff --git a/articles/azure-monitor/containers/container-insights-azure-redhat-setup.md b/articles/azure-monitor/containers/container-insights-azure-redhat-setup.md deleted file mode 100644 index 7f53e384ec956..0000000000000 --- a/articles/azure-monitor/containers/container-insights-azure-redhat-setup.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -title: Configure Azure Red Hat OpenShift v3.x with Container insights | Microsoft Docs -description: This article describes how to configure monitoring of a Kubernetes cluster with Azure Monitor hosted on Azure Red Hat OpenShift version 3 and higher. -ms.topic: conceptual -ms.date: 06/30/2020 ---- - -# Configure Azure Red Hat OpenShift v3 with Container insights - ->[!IMPORTANT] -> Azure Red Hat OpenShift 3.11 will be retired June 2022. -> -> As of October 2020 you will no longer be able to create new 3.11 clusters. -> Existing 3.11 clusters will continue to operate until June 2022 but will no be longer supported after that date. -> -> Follow this guide to [create an Azure Red Hat OpenShift 4 cluster](../../openshift/tutorial-create-cluster.md). -> If you have specific questions, [please contact us](mailto:aro-feedback@microsoft.com). - -Container insights provides rich monitoring experience for the Azure Kubernetes Service (AKS) and AKS Engine clusters. This article describes how to enable monitoring of Kubernetes clusters hosted on [Azure Red Hat OpenShift](../../openshift/intro-openshift.md) version 3 and latest supported version of version 3, to achieve a similar monitoring experience. - ->[!NOTE] ->Support for Azure Red Hat OpenShift is a feature in public preview at this time. -> - -Container insights can be enabled for new, or one or more existing deployments of Azure Red Hat OpenShift using the following supported methods: - -- For an existing cluster from the Azure portal or using Azure Resource Manager template. -- For a new cluster using Azure Resource Manager template, or while creating a new cluster using the [Azure CLI](/cli/azure/openshift#az-openshift-create). - -## Supported and unsupported features - -Container insights supports monitoring Azure Red Hat OpenShift as described in the [Overview](container-insights-overview.md) article, except for the following features: - -- Live Data (preview) -- [Collect metrics](container-insights-update-metrics.md) from cluster nodes and pods and storing them in the Azure Monitor metrics database - -## Prerequisites - -- A [Log Analytics workspace](../logs/design-logs-deployment.md). - - Container insights supports a Log Analytics workspace in the regions listed in Azure [Products by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). To create your own workspace, it can be created through [Azure Resource Manager](../logs/resource-manager-workspace.md), through [PowerShell](../logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../logs/quick-create-workspace.md). - -- To enable and access the features in Container insights, at a minimum you need to be a member of the Azure *Contributor* role in the Azure subscription, and a member of the [*Log Analytics Contributor*](../logs/manage-access.md#manage-access-using-azure-permissions) role of the Log Analytics workspace configured with Container insights. - -- To view the monitoring data, you are a member of the [*Log Analytics reader*](../logs/manage-access.md#manage-access-using-azure-permissions) role permission with the Log Analytics workspace configured with Container insights. - -## Identify your Log Analytics workspace ID - - To integrate with an existing Log Analytics workspace, start by identifying the full resource ID of your Log Analytics workspace. The resource ID of the workspace is required for the parameter `workspaceResourceId` when you enable monitoring using the Azure Resource Manager template method. - -1. List all the subscriptions that you have access to by running the following command: - - ```azurecli - az account list --all -o table - ``` - - The output will look like the following: - - ```azurecli - Name CloudName SubscriptionId State IsDefault - ------------------------------------ ----------- ------------------------------------ ------- ----------- - Microsoft Azure AzureCloud 0fb60ef2-03cc-4290-b595-e71108e8f4ce Enabled True - ``` - -1. Copy the value for **SubscriptionId**. - -1. Switch to the subscription that hosts the Log Analytics workspace by running the following command: - - ```azurecli - az account set -s - ``` - -1. Display the list of workspaces in your subscriptions in the default JSON format by running the following command: - - ``` - az resource list --resource-type Microsoft.OperationalInsights/workspaces -o json - ``` - -1. In the output, find the workspace name, and then copy the full resource ID of that Log Analytics workspace under the field **ID**. - -## Enable for a new cluster using an Azure Resource Manager template - -Perform the following steps to deploy an Azure Red Hat OpenShift cluster with monitoring enabled. Before proceeding, review the tutorial [Create an Azure Red Hat OpenShift cluster](../../openshift/tutorial-create-cluster.md) to understand the dependencies that you need to configure so your environment is set up correctly. - -This method includes two JSON templates. One template specifies the configuration to deploy the cluster with monitoring enabled, and the other contains parameter values that you configure to specify the following: - -- The Azure Red Hat OpenShift cluster resource ID. - -- The resource group the cluster is deployed in. - -- [Azure Active Directory tenant ID](../../openshift/howto-create-tenant.md#create-a-new-azure-ad-tenant) noted after performing the steps to create one or one already created. - -- [Azure Active Directory client application ID](../../openshift/howto-aad-app-configuration.md#create-an-azure-ad-app-registration) noted after performing the steps to create one or one already created. - -- [Azure Active Directory Client secret](../../openshift/howto-aad-app-configuration.md#create-a-client-secret) noted after performing the steps to create one or one already created. - -- [Azure AD security group](../../openshift/howto-aad-app-configuration.md#create-an-azure-ad-security-group) noted after performing the steps to create one or one already created. - -- Resource ID of an existing Log Analytics workspace. See [Identify your Log Analytics workspace ID](#identify-your-log-analytics-workspace-id) to learn how to get this information. - -- The number of master nodes to create in the cluster. - -- The number of compute nodes in the agent pool profile. - -- The number of infrastructure nodes in the agent pool profile. - -If you are unfamiliar with the concept of deploying resources by using a template, see: - -- [Deploy resources with Resource Manager templates and Azure PowerShell](../../azure-resource-manager/templates/deploy-powershell.md) - -- [Deploy resources with Resource Manager templates and the Azure CLI](../../azure-resource-manager/templates/deploy-cli.md) - -If you choose to use the Azure CLI, you first need to install and use the CLI locally. You must be running the Azure CLI version 2.0.65 or later. To identify your version, run `az --version`. If you need to install or upgrade the Azure CLI, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -1. Download and save to a local folder, the Azure Resource Manager template and parameter file, to create a cluster with the monitoring add-on using the following commands: - - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/aro/enable_monitoring_to_new_cluster/newClusterWithMonitoring.json` - - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/aro/enable_monitoring_to_new_cluster/newClusterWithMonitoringParam.json` - -2. Sign in to Azure - - ```azurecli - az login - ``` - - If you have access to multiple subscriptions, run `az account set -s {subscription ID}` replacing `{subscription ID}` with the subscription you want to use. - -3. Create a resource group for your cluster if you don't already have one. For a list of Azure regions that supports OpenShift on Azure, see [Supported Regions](../../openshift/supported-resources.md#azure-regions). - - ```azurecli - az group create -g -l - ``` - -4. Edit the JSON parameter file **newClusterWithMonitoringParam.json** and update the following values: - - - *location* - - *clusterName* - - *aadTenantId* - - *aadClientId* - - *aadClientSecret* - - *aadCustomerAdminGroupId* - - *workspaceResourceId* - - *masterNodeCount* - - *computeNodeCount* - - *infraNodeCount* - -5. The following step deploys the cluster with monitoring enabled by using the Azure CLI. - - ```azurecli - az deployment group create --resource-group --template-file ./newClusterWithMonitoring.json --parameters @./newClusterWithMonitoringParam.json - ``` - - The output resembles the following: - - ```output - provisioningState : Succeeded - ``` - -## Enable for an existing cluster - -Perform the following steps to enable monitoring of an Azure Red Hat OpenShift cluster deployed in Azure. You can accomplish this from the Azure portal or using the provided templates. - -### From the Azure portal - -1. Sign in to the [Azure portal](https://portal.azure.com). - -2. On the Azure portal menu or from the Home page, select **Azure Monitor**. Under the **Insights** section, select **Containers**. - -3. On the **Monitor - containers** page, select **Non-monitored clusters**. - -4. From the list of non-monitored clusters, find the cluster in the list and click **Enable**. You can identify the results in the list by looking for the value **ARO** under the column **CLUSTER TYPE**. - -5. On the **Onboarding to Container insights** page, if you have an existing Log Analytics workspace in the same subscription as the cluster, select it from the drop-down list. - The list preselects the default workspace and location that the cluster is deployed to in the subscription. - - ![Enable monitoring for non-monitored clusters](./media/container-insights-onboard/kubernetes-onboard-brownfield-01.png) - - >[!NOTE] - >If you want to create a new Log Analytics workspace for storing the monitoring data from the cluster, follow the instructions in [Create a Log Analytics workspace](../logs/quick-create-workspace.md). Be sure to create the workspace in the same subscription that the RedHat OpenShift cluster is deployed to. - -After you've enabled monitoring, it might take about 15 minutes before you can view health metrics for the cluster. - -### Enable using an Azure Resource Manager template - -This method includes two JSON templates. One template specifies the configuration to enable monitoring, and the other contains parameter values that you configure to specify the following: - -- The Azure RedHat OpenShift cluster resource ID. - -- The resource group the cluster is deployed in. - -- A Log Analytics workspace. See [Identify your Log Analytics workspace ID](#identify-your-log-analytics-workspace-id) to learn how to get this information. - -If you are unfamiliar with the concept of deploying resources by using a template, see: - -- [Deploy resources with Resource Manager templates and Azure PowerShell](../../azure-resource-manager/templates/deploy-powershell.md) - -- [Deploy resources with Resource Manager templates and the Azure CLI](../../azure-resource-manager/templates/deploy-cli.md) - -If you choose to use the Azure CLI, you first need to install and use the CLI locally. You must be running the Azure CLI version 2.0.65 or later. To identify your version, run `az --version`. If you need to install or upgrade the Azure CLI, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -1. Download the template and parameter file to update your cluster with the monitoring add-on using the following commands: - - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/aro/enable_monitoring_to_existing_cluster/existingClusterOnboarding.json` - - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/aro/enable_monitoring_to_existing_cluster/existingClusterParam.json` - -2. Sign in to Azure - - ```azurecli - az login - ``` - - If you have access to multiple subscriptions, run `az account set -s {subscription ID}` replacing `{subscription ID}` with the subscription you want to use. - -3. Specify the subscription of the Azure RedHat OpenShift cluster. - - ```azurecli - az account set --subscription "Subscription Name" - ``` - -4. Run the following command to identify the cluster location and resource ID: - - ```azurecli - az openshift show -g -n - ``` - -5. Edit the JSON parameter file **existingClusterParam.json** and update the values *aroResourceId* and *aroResourceLocation*. The value for **workspaceResourceId** is the full resource ID of your Log Analytics workspace, which includes the workspace name. - -6. To deploy with Azure CLI, run the following commands: - - ```azurecli - az deployment group create --resource-group --template-file ./ExistingClusterOnboarding.json --parameters @./existingClusterParam.json - ``` - - The output resembles the following: - - ```output - provisioningState : Succeeded - ``` - -## Next steps - -- With monitoring enabled to collect health and resource utilization of your RedHat OpenShift cluster and workloads running on them, learn [how to use](container-insights-analyze.md) Container insights. - -- By default, the containerized agent collects the stdout/ stderr container logs of all the containers running in all the namespaces except kube-system. To configure container log collection specific to particular namespace or namespaces, review [Container Insights agent configuration](container-insights-agent-config.md) to configure desired data collection settings to your ConfigMap configurations file. - -- To scrape and analyze Prometheus metrics from your cluster, review [Configure Prometheus metrics scraping](container-insights-prometheus-integration.md) - -- To learn how to stop monitoring your cluster with Container insights, see [How to Stop Monitoring Your Azure Red Hat OpenShift cluster](./container-insights-optout-openshift-v3.md). \ No newline at end of file diff --git a/articles/azure-monitor/containers/container-insights-azure-redhat4-setup.md b/articles/azure-monitor/containers/container-insights-azure-redhat4-setup.md deleted file mode 100644 index 34d406ecf98ce..0000000000000 --- a/articles/azure-monitor/containers/container-insights-azure-redhat4-setup.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Configure Azure Red Hat OpenShift v4.x with Container insights | Microsoft Docs -description: This article describes how to configure monitoring for a Kubernetes cluster with Azure Monitor that's hosted on Azure Red Hat OpenShift version 4 or later. -ms.topic: conceptual -ms.date: 03/05/2021 ---- - -# Configure Azure Red Hat OpenShift v4.x with Container insights - -Container insights provides a rich monitoring experience for Azure Kubernetes Service (AKS) and AKS engine clusters. This article describes how to achieve a similar monitoring experience by enabling monitoring for Kubernetes clusters that are hosted on [Azure Red Hat OpenShift](../../openshift/intro-openshift.md) version 4.x. - ->[!NOTE] -> We are phasing out Container Insights support for Azure Red Hat OpenShift v4.x by May 2022. We recommend customers to migrate Container Insights on Azure Arc enabled Kubernetes, which offers an upgraded experience and 1-click onboarding. For more information, please visit our [documentation](./container-insights-enable-arc-enabled-clusters.md) -> - - ->[!NOTE] ->Support for Azure Red Hat OpenShift is a feature in public preview at this time. -> - -You can enable Container insights for one or more existing deployments of Azure Red Hat OpenShift v4.x by using the supported methods described in this article. - -For an existing cluster, run this [Bash script in the Azure CLI](/cli/azure/openshift#az-openshift-create&preserve-view=true). - -## Supported and unsupported features - -Container insights supports monitoring Azure Red Hat OpenShift v4.x as described in [Container insights overview](container-insights-overview.md), except for the following features: - -- Live Data (preview) -- [Collecting metrics](container-insights-update-metrics.md) from cluster nodes and pods and storing them in the Azure Monitor metrics database - -## Prerequisites - -- The Azure CLI version 2.0.72 or later - -- The [Helm 3](https://helm.sh/docs/intro/install/) CLI tool - -- Latest version of [OpenShift CLI](https://docs.openshift.com/container-platform/4.7/cli_reference/openshift_cli/getting-started-cli.html) - -- [Bash version 4](https://www.gnu.org/software/bash/) - -- The [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command-line tool - -- A [Log Analytics workspace](../logs/design-logs-deployment.md). - - Container insights supports a Log Analytics workspace in the regions listed in Azure [Products by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). To create your own workspace, it can be created through [Azure Resource Manager](../logs/resource-manager-workspace.md), through [PowerShell](../logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../logs/quick-create-workspace.md). - -- To enable and access the features in Container insights, you need to have, at minimum, an Azure *Contributor* role in the Azure subscription and a [*Log Analytics Contributor*](../logs/manage-access.md#manage-access-using-azure-permissions) role in the Log Analytics workspace, configured with Container insights. - -- To view the monitoring data, you need to have [*Log Analytics reader*](../logs/manage-access.md#manage-access-using-azure-permissions) role in the Log Analytics workspace, configured with Container insights. - -## Enable monitoring for an existing cluster - -To enable monitoring for an Azure Red Hat OpenShift version 4 or later cluster that's deployed in Azure by using the provided Bash script, do the following: - -1. Sign in to Azure by running the following command: - - ```azurecli - az login - ``` - -1. Download and save to a local folder the script that configures your cluster with the monitoring add-in by running the following command: - - `curl -o enable-monitoring.sh -L https://aka.ms/enable-monitoring-bash-script` - -1. Connect to ARO v4 cluster using the instructions in [Tutorial: Connect to an Azure Red Hat OpenShift 4 cluster](../../openshift/tutorial-connect-cluster.md). - - -### Integrate with an existing workspace - -In this section, you enable monitoring of your cluster using the Bash script you downloaded earlier. To integrate with an existing Log Analytics workspace, start by identifying the full resource ID of your Log Analytics workspace that's required for the `logAnalyticsWorkspaceResourceId` parameter, and then run the command to enable the monitoring add-in against the specified workspace. - -If you don't have a workspace to specify, you can skip to the [Integrate with the default workspace](#integrate-with-the-default-workspace) section and let the script create a new workspace for you. - -1. List all the subscriptions that you have access to by running the following command: - - ```azurecli - az account list --all -o table - ``` - - The output will look like the following: - - ```azurecli - Name CloudName SubscriptionId State IsDefault - ------------------------------------ ----------- ------------------------------------ ------- ----------- - Microsoft Azure AzureCloud 0fb60ef2-03cc-4290-b595-e71108e8f4ce Enabled True - ``` - -1. Copy the value for **SubscriptionId**. - -1. Switch to the subscription that hosts the Log Analytics workspace by running the following command: - - ```azurecli - az account set -s - ``` - -1. Display the list of workspaces in your subscriptions in the default JSON format by running the following command: - - ``` - az resource list --resource-type Microsoft.OperationalInsights/workspaces -o json - ``` - -1. In the output, find the workspace name, and then copy the full resource ID of that Log Analytics workspace under the field **ID**. - -1. To enable monitoring, run the following command. Replace the values for the `azureAroV4ClusterResourceId` and `logAnalyticsWorkspaceResourceId` parameters. - - ```bash - export azureAroV4ClusterResourceId="/subscriptions//resourceGroups//providers/Microsoft.RedHatOpenShift/OpenShiftClusters/" - export logAnalyticsWorkspaceResourceId="/subscriptions//resourceGroups//providers/microsoft.operationalinsights/workspaces/" - ``` - - Here is the command you must run once you have populated the variables with Export commands: - - `bash enable-monitoring.sh --resource-id $azureAroV4ClusterResourceId --workspace-id $logAnalyticsWorkspaceResourceId` - -After you've enabled monitoring, it might take about 15 minutes before you can view the health metrics for the cluster. - -### Integrate with the default workspace - -In this section, you enable monitoring for your Azure Red Hat OpenShift v4.x cluster by using the Bash script that you downloaded. - -In this example, you're not required to pre-create or specify an existing workspace. This command simplifies the process for you by creating a default workspace in the default resource group of the cluster subscription, if one doesn't already exist in the region. - -The default workspace that's created is in the format of *DefaultWorkspace-\-\*. - -Replace the value for the `azureAroV4ClusterResourceId` parameter. - -```bash -export azureAroV4ClusterResourceId="/subscriptions//resourceGroups//providers/Microsoft.RedHatOpenShift/OpenShiftClusters/" -``` - -For example: - -`bash enable-monitoring.sh --resource-id $azureAroV4ClusterResourceId - -After you've enabled monitoring, it might take about 15 minutes before you can view health metrics for the cluster. - -### Enable monitoring from the Azure portal - -The multi-cluster view in Container insights highlights your Azure Red Hat OpenShift clusters that don't have monitoring enabled under the **Unmonitored clusters** tab. The **Enable** option next to your cluster doesn't initiate onboarding of monitoring from the portal. You're redirected to this article to enable monitoring manually by following the steps that were outlined earlier in this article. - -1. Sign in to the [Azure portal](https://portal.azure.com). - -1. On the left pane or from the home page, select **Azure Monitor**. - -1. In the **Insights** section, select **Containers**. - -1. On the **Monitor - containers** page, select **Unmonitored clusters**. - -1. In the list of non-monitored clusters, select the cluster, and then select **Enable**. - - You can identify the results in the list by looking for the **ARO** value in the **Cluster Type** column. After you select **Enable**, you're redirected to this article. - -## Next steps - -- Now that you've enabled monitoring to collect health and resource utilization of your RedHat OpenShift version 4.x cluster and the workloads that are running on them, learn [how to use](container-insights-analyze.md) Container insights. - -- By default, the containerized agent collects the *stdout* and *stderr* container logs of all the containers that are running in all the namespaces except kube-system. To configure a container log collection that's specific to a particular namespace or namespaces, review [Container Insights agent configuration](container-insights-agent-config.md) to configure the data collection settings you want for your *ConfigMap* configuration file. - -- To scrape and analyze Prometheus metrics from your cluster, review [Configure Prometheus metrics scraping](container-insights-prometheus-integration.md). - -- To learn how to stop monitoring your cluster by using Container insights, see [How to stop monitoring your Azure Red Hat OpenShift cluster](./container-insights-optout-openshift-v3.md). diff --git a/articles/azure-monitor/containers/container-insights-cost.md b/articles/azure-monitor/containers/container-insights-cost.md index c266b4711caf8..ee715ac9dad74 100644 --- a/articles/azure-monitor/containers/container-insights-cost.md +++ b/articles/azure-monitor/containers/container-insights-cost.md @@ -3,16 +3,15 @@ title: Monitoring cost for Container insights | Microsoft Docs description: This article describes the monitoring cost for metrics & inventory data collected by Container insights to help customers manage their usage and associated costs. ms.topic: conceptual ms.date: 05/29/2020 +ms.reviewer: aul --- # Understand monitoring costs for Container insights This article provides pricing guidance for Container insights to help you understand the following: -* How to estimate costs up-front before you enable this Insight - +* How to estimate costs up-front before you enable Container Insights. * How to measure costs after Container insights has been enabled for one or more containers - * How to control the collection of data and make cost reductions Azure Monitor Logs collects, indexes, and stores data generated by your Kubernetes cluster. @@ -25,13 +24,9 @@ The Azure Monitor pricing model is primarily based on the amount of data ingeste The following is a summary of what types of data are collected from a Kubernetes cluster with Container insights that influences cost and can be customized based on your usage: - Stdout, stderr container logs from every monitored container in every Kubernetes namespace in the cluster - - Container environment variables from every monitored container in the cluster - - Completed Kubernetes jobs/pods in the cluster that does not require monitoring - - Active scraping of Prometheus metrics - - [Diagnostic log collection](../../aks/monitor-aks.md#configure-monitoring) of Kubernetes master node logs in your AKS cluster to analyze log data generated by master components such as the *kube-apiserver* and *kube-controller-manager*. ## What is collected from Kubernetes clusters diff --git a/articles/azure-monitor/containers/container-insights-deployment-hpa-metrics.md b/articles/azure-monitor/containers/container-insights-deployment-hpa-metrics.md index 74e0c75bf0326..565764ef09fa4 100644 --- a/articles/azure-monitor/containers/container-insights-deployment-hpa-metrics.md +++ b/articles/azure-monitor/containers/container-insights-deployment-hpa-metrics.md @@ -3,6 +3,7 @@ title: Deployment & HPA metrics with Container insights | Microsoft Docs description: This article describes what deployment & HPA (Horizontal pod autoscaler) metrics are collected with Container insights. ms.topic: conceptual ms.date: 08/09/2020 +ms.reviewer: aul --- # Deployment & HPA metrics with Container insights diff --git a/articles/azure-monitor/containers/container-insights-enable-aks-policy.md b/articles/azure-monitor/containers/container-insights-enable-aks-policy.md index dac06883a6f93..771baaf755002 100644 --- a/articles/azure-monitor/containers/container-insights-enable-aks-policy.md +++ b/articles/azure-monitor/containers/container-insights-enable-aks-policy.md @@ -3,16 +3,21 @@ title: Enable AKS Monitoring Addon using Azure Policy description: Describes how to enable AKS Monitoring Addon using Azure Custom Policy. ms.topic: conceptual ms.date: 02/04/2021 +ms.reviewer: aul --- # Enable AKS monitoring addon using Azure Policy -This article describes how to enable AKS Monitoring Addon using Azure Custom Policy. Monitoring Addon Custom Policy can be assigned either at subscription or resource group scope. If Azure Log Analytics workspace and AKS cluster are in different subscriptions then the managed identity used by the policy assignment has to have the required role permissions on both the subscriptions or least on the resource of the Log Analytics workspace. Similarly, if the policy is scoped to the resource group, then the managed identity should have the required role permissions on the Log Analytics workspace if the workspace not in the selected resource group scope. +This article describes how to enable AKS Monitoring Addon using Azure Custom Policy. +## Permissions required Monitoring Addon require following roles on the managed identity used by Azure Policy: - [azure-kubernetes-service-contributor-role](../../role-based-access-control/built-in-roles.md#azure-kubernetes-service-contributor-role) - [log-analytics-contributor](../../role-based-access-control/built-in-roles.md#log-analytics-contributor) +Monitoring Addon Custom Policy can be assigned at either the subscription or resource group scope. If the Log Analytics workspace and AKS cluster are in different subscriptions, then the managed identity used by the policy assignment must have the required role permissions on both the subscriptions or on the Log Analytics workspace resource. Similarly, if the policy is scoped to the resource group, then the managed identity should have the required role permissions on the Log Analytics workspace if the workspace is not in the selected resource group scope. + + ## Create and assign policy definition using Azure portal ### Create policy definition diff --git a/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md b/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md index 9af6358c430d1..4438257a9125d 100644 --- a/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md +++ b/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md @@ -1,10 +1,11 @@ --- -title: "Monitor Azure Arc-enabled Kubernetes clusters" -ms.date: 04/05/2021 +title: Monitor Azure Arc-enabled Kubernetes clusters +ms.date: 05/24/2022 ms.topic: article author: shashankbarsin ms.author: shasb -description: "Collect metrics and logs of Azure Arc-enabled Kubernetes clusters using Azure Monitor" +description: Collect metrics and logs of Azure Arc-enabled Kubernetes clusters using Azure Monitor. +ms.reviewer: aul --- # Azure Monitor Container Insights for Azure Arc-enabled Kubernetes clusters @@ -25,10 +26,10 @@ description: "Collect metrics and logs of Azure Arc-enabled Kubernetes clusters ## Prerequisites -- You've met the pre-requisites listed under the [generic cluster extensions documentation](../../azure-arc/kubernetes/extensions.md#prerequisites). -- A Log Analytics workspace: Azure Monitor Container Insights supports a Log Analytics workspace in the regions listed under Azure [products by region page](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). You can create your own workspace through [Azure Resource Manager](../logs/resource-manager-workspace.md), [PowerShell](../logs/powershell-workspace-configuration.md), or [Azure portal](../logs/quick-create-workspace.md). -- You need to have [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role assignment on the Azure subscription containing the Azure Arc-enabled Kubernetes resource. If the Log Analytics workspace is in a different subscription, then [Log Analytics Contributor](../logs/manage-access.md#manage-access-using-azure-permissions) role assignment is needed on the Log Analytics workspace. -- To view the monitoring data, you need to have [Log Analytics Reader](../logs/manage-access.md#manage-access-using-azure-permissions) role assignment on the Log Analytics workspace. +- Pre-requisites listed under the [generic cluster extensions documentation](../../azure-arc/kubernetes/extensions.md#prerequisites). +- og Analytics workspace. Azure Monitor Container Insights supports a Log Analytics workspace in the regions listed under Azure [products by region page](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). You can create your own workspace using [Azure Resource Manager](../logs/resource-manager-workspace.md), [PowerShell](../logs/powershell-workspace-configuration.md), or [Azure portal](../logs/quick-create-workspace.md). +- [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role assignment on the Azure subscription containing the Azure Arc-enabled Kubernetes resource. If the Log Analytics workspace is in a different subscription, then [Log Analytics Contributor](../logs/manage-access.md#azure-rbac) role assignment is needed on the Log Analytics workspace. +- To view the monitoring data, you need to have [Log Analytics Reader](../logs/manage-access.md#azure-rbac) role assignment on the Log Analytics workspace. - The following endpoints need to be enabled for outbound access in addition to the ones mentioned under [connecting a Kubernetes cluster to Azure Arc](../../azure-arc/kubernetes/quickstart-connect-cluster.md#meet-network-requirements). | Endpoint | Port | diff --git a/articles/azure-monitor/containers/container-insights-enable-existing-clusters.md b/articles/azure-monitor/containers/container-insights-enable-existing-clusters.md index 4ce186b3ed63d..b3dbfab766c14 100644 --- a/articles/azure-monitor/containers/container-insights-enable-existing-clusters.md +++ b/articles/azure-monitor/containers/container-insights-enable-existing-clusters.md @@ -2,27 +2,16 @@ title: Monitor an Azure Kubernetes Service (AKS) cluster deployed | Microsoft Docs description: Learn how to enable monitoring of an Azure Kubernetes Service (AKS) cluster with Container insights already deployed in your subscription. ms.topic: conceptual -ms.date: 09/12/2019 +ms.date: 05/24/2022 ms.custom: devx-track-terraform, devx-track-azurepowershell, devx-track-azurecli +ms.reviewer: aul --- # Enable monitoring of Azure Kubernetes Service (AKS) cluster already deployed - This article describes how to set up Container insights to monitor managed Kubernetes cluster hosted on [Azure Kubernetes Service](../../aks/index.yml) that have already been deployed in your subscription. -You can enable monitoring of an AKS cluster that's already deployed using one of the supported methods: - -* Azure CLI -* [Terraform](#enable-using-terraform) -* [From Azure Monitor](#enable-from-azure-monitor-in-the-portal) or [directly from the AKS cluster](#enable-directly-from-aks-cluster-in-the-portal) in the Azure portal -* With the [provided Azure Resource Manager template](#enable-using-an-azure-resource-manager-template) by using the Azure PowerShell cmdlet `New-AzResourceGroupDeployment` or with Azure CLI. - If you're connecting an existing AKS cluster to an Azure Log Analytics workspace in another subscription, the Microsoft.ContainerService resource provider must be registered in the subscription in which the Log Analytics workspace was created. For more information, see [Register resource provider](../../azure-resource-manager/management/resource-providers-and-types.md#register-resource-provider). -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com). - ## Enable using Azure CLI The following step enables monitoring of your AKS cluster using Azure CLI. In this example, you are not required to pre-create or specify an existing workspace. This command simplifies the process for you by creating a default workspace in the default resource group of the AKS cluster subscription if one does not already exist in the region. The default workspace created resembles the format of *DefaultWorkspace-\-\*. diff --git a/articles/azure-monitor/containers/container-insights-enable-new-cluster.md b/articles/azure-monitor/containers/container-insights-enable-new-cluster.md index 18cad95f835b3..bad85b46a501a 100644 --- a/articles/azure-monitor/containers/container-insights-enable-new-cluster.md +++ b/articles/azure-monitor/containers/container-insights-enable-new-cluster.md @@ -2,19 +2,16 @@ title: Monitor a new Azure Kubernetes Service (AKS) cluster | Microsoft Docs description: Learn how to enable monitoring for a new Azure Kubernetes Service (AKS) cluster with Container insights subscription. ms.topic: conceptual -ms.date: 04/25/2019 +ms.date: 05/24/2022 ms.custom: devx-track-terraform, devx-track-azurecli ms.devlang: azurecli +ms.reviewer: aul --- # Enable monitoring of a new Azure Kubernetes Service (AKS) cluster This article describes how to set up Container insights to monitor managed Kubernetes cluster hosted on [Azure Kubernetes Service](../../aks/index.yml) that you are preparing to deploy in your subscription. -You can enable monitoring of an AKS cluster using one of the supported methods: - -* Azure CLI -* Terraform ## Enable using Azure CLI @@ -26,12 +23,12 @@ To enable monitoring of a new AKS cluster created with Azure CLI, follow the ste ## Enable using Terraform -If you are [deploying a new AKS cluster using Terraform](/azure/developer/terraform/create-k8s-cluster-with-tf-and-aks), you specify the arguments required in the profile [to create a Log Analytics workspace](https://www.terraform.io/docs/providers/azurerm/r/log_analytics_workspace.html) if you do not chose to specify an existing one. +If you are [deploying a new AKS cluster using Terraform](/azure/developer/terraform/create-k8s-cluster-with-tf-and-aks), you specify the arguments required in the profile [to create a Log Analytics workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) if you do not choose to specify an existing one. >[!NOTE] >If you choose to use Terraform, you must be running the Terraform Azure RM Provider version 1.17.0 or above. -To add Container insights to the workspace, see [azurerm_log_analytics_solution](https://www.terraform.io/docs/providers/azurerm/r/log_analytics_solution.html) and complete the profile by including the [**addon_profile**](https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster.html#addon_profile) and specify **oms_agent**. +To add Container insights to the workspace, see [azurerm_log_analytics_solution](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) and complete the profile by including the [**addon_profile**](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) and specify **oms_agent**. After you've enabled monitoring and all configuration tasks are completed successfully, you can monitor the performance of your cluster in either of two ways: diff --git a/articles/azure-monitor/containers/container-insights-gpu-monitoring.md b/articles/azure-monitor/containers/container-insights-gpu-monitoring.md index 885cd8cfabfe8..1abc324fc1955 100644 --- a/articles/azure-monitor/containers/container-insights-gpu-monitoring.md +++ b/articles/azure-monitor/containers/container-insights-gpu-monitoring.md @@ -1,8 +1,9 @@ --- -title: Configure GPU monitoring with Container insights | Microsoft Docs +title: Configure GPU monitoring with Container insights description: This article describes how you can configure monitoring Kubernetes clusters with NVIDIA and AMD GPU enabled nodes with Container insights. ms.topic: conceptual -ms.date: 03/27/2020 +ms.date: 05/24/2022 +ms.reviewer: aul --- # Configure GPU monitoring with Container insights diff --git a/articles/azure-monitor/containers/container-insights-hybrid-setup.md b/articles/azure-monitor/containers/container-insights-hybrid-setup.md index fdb683f1cfed3..a301f23e48fe4 100644 --- a/articles/azure-monitor/containers/container-insights-hybrid-setup.md +++ b/articles/azure-monitor/containers/container-insights-hybrid-setup.md @@ -3,6 +3,7 @@ title: Configure Hybrid Kubernetes clusters with Container insights | Microsoft description: This article describes how you can configure Container insights to monitor Kubernetes clusters hosted on Azure Stack or other environment. ms.topic: conceptual ms.date: 06/30/2020 +ms.reviewer: aul --- # Configure hybrid Kubernetes clusters with Container insights @@ -31,9 +32,7 @@ The following configurations are officially supported with Container insights. I Before you start, make sure that you have the following: -- A [Log Analytics workspace](../logs/design-logs-deployment.md). - - Container insights supports a Log Analytics workspace in the regions listed in Azure [Products by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). To create your own workspace, it can be created through [Azure Resource Manager](../logs/resource-manager-workspace.md), through [PowerShell](../logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../logs/quick-create-workspace.md). +- [Log Analytics workspace](../logs/design-logs-deployment.md). Container insights supports a Log Analytics workspace in the regions listed in Azure [Products by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). To create your own workspace, it can be created through [Azure Resource Manager](../logs/resource-manager-workspace.md), through [PowerShell](../logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../logs/quick-create-workspace.md). >[!NOTE] >Enable monitoring of multiple clusters with the same cluster name to same Log Analytics workspace is not supported. Cluster names must be unique. @@ -41,7 +40,7 @@ Before you start, make sure that you have the following: - You are a member of the **Log Analytics contributor role** to enable container monitoring. For more information about how to control access to a Log Analytics workspace, see [Manage access to workspace and log data](../logs/manage-access.md). -- To view the monitoring data, you need to have [*Log Analytics reader*](../logs/manage-access.md#manage-access-using-azure-permissions) role in the Log Analytics workspace, configured with Container insights. +- To view the monitoring data, you need to have [*Log Analytics reader*](../logs/manage-access.md#azure-rbac) role in the Log Analytics workspace, configured with Container insights. - [HELM client](https://helm.sh/docs/using_helm/) to onboard the Container insights chart for the specified Kubernetes cluster. diff --git a/articles/azure-monitor/containers/container-insights-livedata-deployments.md b/articles/azure-monitor/containers/container-insights-livedata-deployments.md index bcda2550fa487..16034f993664d 100644 --- a/articles/azure-monitor/containers/container-insights-livedata-deployments.md +++ b/articles/azure-monitor/containers/container-insights-livedata-deployments.md @@ -4,6 +4,7 @@ description: This article describes the real-time view of Kubernetes Deployments ms.topic: conceptual ms.date: 10/15/2019 ms.custom: references_regions +ms.reviewer: aul --- # How to view Deployments (preview) in real-time diff --git a/articles/azure-monitor/containers/container-insights-livedata-metrics.md b/articles/azure-monitor/containers/container-insights-livedata-metrics.md index 5587286fb9e72..39879333fda47 100644 --- a/articles/azure-monitor/containers/container-insights-livedata-metrics.md +++ b/articles/azure-monitor/containers/container-insights-livedata-metrics.md @@ -1,9 +1,10 @@ --- -title: View metrics in real-time with Container insights | Microsoft Docs +title: View metrics in real-time with Container insights description: This article describes the real-time view of metrics without using kubectl with Container insights. ms.topic: conceptual -ms.date: 10/15/2019 +ms.date: 05/24/2022 ms.custom: references_regions +ms.reviewer: aul --- # How to view metrics in real-time diff --git a/articles/azure-monitor/containers/container-insights-livedata-overview.md b/articles/azure-monitor/containers/container-insights-livedata-overview.md index fc23b04bcb770..7ffffa567c24d 100644 --- a/articles/azure-monitor/containers/container-insights-livedata-overview.md +++ b/articles/azure-monitor/containers/container-insights-livedata-overview.md @@ -1,9 +1,10 @@ --- -title: View Live Data with Container insights | Microsoft Docs +title: View Live Data with Container insights description: This article describes the real-time view of Kubernetes logs, events, and pod metrics without using kubectl in Container insights. ms.topic: conceptual -ms.date: 03/04/2021 +ms.date: 05/24/2022 ms.custom: references_regions +ms.reviewer: aul --- # How to view Kubernetes logs, events, and pod metrics in real-time @@ -12,7 +13,7 @@ Container insights includes the Live Data feature, which is an advanced diagnost This article provides a detailed overview and helps you understand how to use this feature. -For help setting up or troubleshooting the Live Data feature, review our [setup guide](container-insights-livedata-setup.md). This feature directly access the Kubernetes API, and additional information about the authentication model can be found [here](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). +For help setting up or troubleshooting the Live Data feature, review our [setup guide](container-insights-livedata-setup.md). This feature directly accesses the Kubernetes API, and additional information about the authentication model can be found [here](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). ## View AKS resource live logs Use the following procedure to view the live logs for pods, deployments, and replica sets with or without Container insights from the AKS resource view. @@ -75,7 +76,7 @@ The pane title shows the name of the Pod the container is grouped with. ### Filter events -While viewing events, you can additionally limit the results using the **Filter** pill found to the right of the search bar. Depending on what resource you have selected, the pill lists a Pod, Namespace, or cluster to chose from. +While viewing events, you can additionally limit the results using the **Filter** pill found to the right of the search bar. Depending on what resource you have selected, the pill lists a Pod, Namespace, or cluster to choose from. ## View metrics @@ -108,7 +109,7 @@ The Live Data feature includes search functionality. In the **Search** field, yo ### Scroll Lock and Pause -To suspend autoscroll and control the behavior of the pane, allowing you to manually scroll through the new data read, you can use the **Scroll** option. To re-enable autoscroll, simply select the **Scroll** option again. You can also pause retrieval of log or event data by selecting the the **Pause** option, and when you are ready to resume, simply select **Play**. +To suspend autoscroll and control the behavior of the pane, allowing you to manually scroll through the new data read, you can use the **Scroll** option. To re-enable autoscroll, simply select the **Scroll** option again. You can also pause retrieval of log or event data by selecting the **Pause** option, and when you are ready to resume, simply select **Play**. ![Live Data console pane pause live view](./media/container-insights-livedata-overview/livedata-pane-scroll-pause-example.png) diff --git a/articles/azure-monitor/containers/container-insights-livedata-setup.md b/articles/azure-monitor/containers/container-insights-livedata-setup.md index 1823c27f3262a..2a6604e80c5bc 100644 --- a/articles/azure-monitor/containers/container-insights-livedata-setup.md +++ b/articles/azure-monitor/containers/container-insights-livedata-setup.md @@ -1,14 +1,15 @@ --- -title: Set up Container insights Live Data (preview) | Microsoft Docs +title: Configure live data in Container insights description: This article describes how to set up the real-time view of container logs (stdout/stderr) and events without using kubectl with Container insights. ms.topic: conceptual -ms.date: 01/08/2020 +ms.date: 05/24/2022 ms.custom: references_regions +ms.reviewer: aul --- -# How to set up the Live Data (preview) feature +# How to configure Live Data in Container insights -To view Live Data (preview) with Container insights from Azure Kubernetes Service (AKS) clusters, you need to configure authentication to grant permission to access to your Kubernetes data. This security configuration allows real-time access to your data through the Kubernetes API directly in the Azure portal. +To view Live Data with Container insights from Azure Kubernetes Service (AKS) clusters, you need to configure authentication to grant permission to access to your Kubernetes data. This security configuration allows real-time access to your data through the Kubernetes API directly in the Azure portal. This feature supports the following methods to control access to the logs, events, and metrics: @@ -19,7 +20,7 @@ This feature supports the following methods to control access to the logs, event These instructions require both administrative access to your Kubernetes cluster, and if configuring to use Azure Active Directory (AD) for user authentication, administrative access to Azure AD. -This article explains how to configure authentication to control access to the Live Data (preview) feature from the cluster: +This article explains how to configure authentication to control access to the Live Data feature from the cluster: - Kubernetes role-based access control (Kubernetes RBAC) enabled AKS cluster - Azure Active Directory integrated AKS cluster. @@ -27,7 +28,7 @@ This article explains how to configure authentication to control access to the L ## Authentication model -The Live Data (preview) features utilizes the Kubernetes API, identical to the `kubectl` command-line tool. The Kubernetes API endpoints utilize a self-signed certificate, which your browser will be unable to validate. This feature utilizes an internal proxy to validate the certificate with the AKS service, ensuring the traffic is trusted. +The Live Data features utilizes the Kubernetes API, identical to the `kubectl` command-line tool. The Kubernetes API endpoints utilize a self-signed certificate, which your browser will be unable to validate. This feature utilizes an internal proxy to validate the certificate with the AKS service, ensuring the traffic is trusted. The Azure portal prompts you to validate your login credentials for an Azure Active Directory cluster, and redirect you to the client registration setup during cluster creation (and re-configured in this article). This behavior is similar to the authentication process required by `kubectl`. @@ -39,9 +40,9 @@ The Azure portal prompts you to validate your login credentials for an Azure Act ## Using clusterMonitoringUser with Kubernetes RBAC-enabled clusters -To eliminate the need to apply additional configuration changes to allow the Kubernetes user role binding **clusterUser** access to the Live Data (preview) feature after [enabling Kubernetes RBAC](#configure-kubernetes-rbac-authorization) authorization, AKS has added a new Kubernetes cluster role binding called **clusterMonitoringUser**. This cluster role binding has all the necessary permissions out-of-the-box to access the Kubernetes API and the endpoints for utilizing the Live Data (preview) feature. +To eliminate the need to apply additional configuration changes to allow the Kubernetes user role binding **clusterUser** access to the Live Data feature after [enabling Kubernetes RBAC](#configure-kubernetes-rbac-authorization) authorization, AKS has added a new Kubernetes cluster role binding called **clusterMonitoringUser**. This cluster role binding has all the necessary permissions out-of-the-box to access the Kubernetes API and the endpoints for utilizing the Live Data feature. -In order to utilize the Live Data (preview) feature with this new user, you need to be a member of the [Azure Kubernetes Service Cluster User](../../role-based-access-control/built-in-roles.md#azure-kubernetes-service-cluster-user-role) or [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role on the AKS cluster resource. Container insights, when enabled, is configured to authenticate using the clusterMonitoringUser by default. If the clusterMonitoringUser role binding does not exist on a cluster, **clusterUser** is used for authentication instead. Contributor gives you access to the clusterMonitoringUser (if it exists) and Azure Kuberenetes Service Cluster User gives you access to the clusterUser. Any of these two roles give sufficient access to use this feature. +In order to utilize the Live Data feature with this new user, you need to be a member of the [Azure Kubernetes Service Cluster User](../../role-based-access-control/built-in-roles.md#azure-kubernetes-service-cluster-user-role) or [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role on the AKS cluster resource. Container insights, when enabled, is configured to authenticate using the clusterMonitoringUser by default. If the clusterMonitoringUser role binding does not exist on a cluster, **clusterUser** is used for authentication instead. Contributor gives you access to the clusterMonitoringUser (if it exists) and Azure Kuberenetes Service Cluster User gives you access to the clusterUser. Any of these two roles give sufficient access to use this feature. AKS released this new role binding in January 2020, so clusters created before January 2020 do not have it. If you have a cluster that was created before January 2020, the new **clusterMonitoringUser** can be added to an existing cluster by performing a PUT operation on the cluster, or performing any other operation on the cluster that performs a PUT operation on the cluster, such as updating the cluster version. @@ -124,7 +125,7 @@ For more information on advanced security setup in Kubernetes, review the [Kuber ## Grant permission -Each Azure AD account must be granted permission to the appropriate APIs in Kubernetes in order to access the Live Data (preview) feature. The steps to grant the Azure Active Directory account are similar to the steps described in the [Kubernetes RBAC authentication](#configure-kubernetes-rbac-authorization) section. Before applying the yaml configuration template to your cluster, replace **clusterUser** under **ClusterRoleBinding** with the desired user. +Each Azure AD account must be granted permission to the appropriate APIs in Kubernetes in order to access the Live Data feature. The steps to grant the Azure Active Directory account are similar to the steps described in the [Kubernetes RBAC authentication](#configure-kubernetes-rbac-authorization) section. Before applying the yaml configuration template to your cluster, replace **clusterUser** under **ClusterRoleBinding** with the desired user. >[!IMPORTANT] >If the user you grant the Kubernetes RBAC binding for is in the same Azure AD tenant, assign permissions based on the userPrincipalName. If the user is in a different Azure AD tenant, query for and use the objectId property. diff --git a/articles/azure-monitor/containers/container-insights-log-alerts.md b/articles/azure-monitor/containers/container-insights-log-alerts.md index dbaa0cc9b46a2..f0f82c268865c 100644 --- a/articles/azure-monitor/containers/container-insights-log-alerts.md +++ b/articles/azure-monitor/containers/container-insights-log-alerts.md @@ -3,6 +3,7 @@ title: Log alerts from Container insights | Microsoft Docs description: This article describes how to create custom log alerts for memory and CPU utilization from Container insights. ms.topic: conceptual ms.date: 07/29/2021 +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/container-insights-log-query.md b/articles/azure-monitor/containers/container-insights-log-query.md index d79504ecd3545..3c5be3eaab3ea 100644 --- a/articles/azure-monitor/containers/container-insights-log-query.md +++ b/articles/azure-monitor/containers/container-insights-log-query.md @@ -3,6 +3,7 @@ title: How to query logs from Container insights description: Container insights collects metrics and log data and this article describes the records and includes sample queries. ms.topic: conceptual ms.date: 07/19/2021 +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/container-insights-logging-v2.md b/articles/azure-monitor/containers/container-insights-logging-v2.md index 23cfeea517c24..64ef01e741a54 100644 --- a/articles/azure-monitor/containers/container-insights-logging-v2.md +++ b/articles/azure-monitor/containers/container-insights-logging-v2.md @@ -3,18 +3,18 @@ title: Configure ContainerLogv2 schema (preview) for Container Insights description: Switch your ContainerLog table to the ContainerLogv2 schema author: aul ms.author: bwren -ms.reviewer: bwren ms.subservice: logs ms.custom: event-tier1-build-2022 ms.topic: conceptual ms.date: 05/11/2022 +ms.reviewer: aul --- # Enable ContainerLogV2 schema (preview) -Azure Monitor Container Insights is now in Public Preview of new schema for container logs called ContainerLogV2. As part of this schema, there new fields to make common queries to view AKS (Azure Kubernetes Service) and Azure Arc enabled Kubernetes data. In addition, this schema is compatible as a part of [Basic Logs](../logs/basic-logs-configure.md), which offer a low cost alternative to standard analytics logs. +Azure Monitor Container Insights is now in Public Preview of new schema for container logs called ContainerLogV2. As part of this schema, there are new fields to make common queries to view AKS (Azure Kubernetes Service) and Azure Arc enabled Kubernetes data. In addition, this schema is compatible as a part of [Basic Logs](../logs/basic-logs-configure.md), which offer a low cost alternative to standard analytics logs. > [!NOTE] -> The ContainerLogv2 schema is currently a preview feature, some features may be limited in the Portal experience from Container Insights +> The ContainerLogv2 schema is currently a preview feature, Container Insights does not yet support the "View in Analytics" option, however the data is still available when queried directly from the [Log Analytics](./container-insights-log-query.md) interface. >[!NOTE] >The new fields are: @@ -39,28 +39,29 @@ Azure Monitor Container Insights is now in Public Preview of new schema for cont 3. Follow the instructions accordingly when configuring an existing ConfigMap or using a new one. ### Configuring an existing ConfigMap -When configuring an existing ConfigMap, we have to append the following section in your existing ConfigMap yaml file: +If your ConfigMap doesn't yet have the "[log_collection_settings.schema]" field, you'll need to append the following section in your existing ConfigMap yaml file: ```yaml [log_collection_settings.schema] - # In the absense of this configmap, default value for containerlog_schema_version is "v1" + # In the absence of this configmap, default value for containerlog_schema_version is "v1" # Supported values for this setting are "v1","v2" - # See documentation for benefits of v2 schema over v1 schema before opting for "v2" schema + # See documentation at https://aka.ms/ContainerLogv2 for benefits of v2 schema over v1 schema before opting for "v2" schema containerlog_schema_version = "v2" ``` ### Configuring a new ConfigMap -1. Download the new ConfigMap from [here](https://aka.ms/container-azm-ms-agentconfig). For new downloaded configmapdefault the value for containerlog_schema_version is "v1" +1. Download the new ConfigMap from [here](https://aka.ms/container-azm-ms-agentconfig). For the newly downloaded configmapdefault, the value for containerlog_schema_version is "v1" 1. Update the "containerlog_schema_version = "v2"" - ```yaml - [log_collection_settings.schema] - # In the absense of this configmap, default value for containerlog_schema_version is "v1" - # Supported values for this setting are "v1","v2" - # See documentation for benefits of v2 schema over v1 schema before opting for "v2" schema - containerlog_schema_version = "v2" - ``` -1. Once you have finished configuring the configmap Run the following kubectl command: kubectl apply -f `` +```yaml +[log_collection_settings.schema] + # In the absence of this configmap, default value for containerlog_schema_version is "v1" + # Supported values for this setting are "v1","v2" + # See documentation at https://aka.ms/ContainerLogv2 for benefits of v2 schema over v1 schema before opting for "v2" schema + containerlog_schema_version = "v2" +``` + +1. Once you have finished configuring the configmap, run the following kubectl command: kubectl apply -f `` >[!TIP] >Example: kubectl apply -f container-azm-ms-agentconfig.yaml. diff --git a/articles/azure-monitor/containers/container-insights-manage-agent.md b/articles/azure-monitor/containers/container-insights-manage-agent.md index 54a85039a4541..89f26ae6d4aa3 100644 --- a/articles/azure-monitor/containers/container-insights-manage-agent.md +++ b/articles/azure-monitor/containers/container-insights-manage-agent.md @@ -3,7 +3,7 @@ title: How to manage the Container insights agent | Microsoft Docs description: This article describes managing the most common maintenance tasks with the containerized Log Analytics agent used by Container insights. ms.topic: conceptual ms.date: 07/21/2020 - +ms.reviewer: aul --- # How to manage the Container insights agent diff --git a/articles/azure-monitor/containers/container-insights-metric-alerts.md b/articles/azure-monitor/containers/container-insights-metric-alerts.md index 19a002974c13f..7e9b5fc0e435f 100644 --- a/articles/azure-monitor/containers/container-insights-metric-alerts.md +++ b/articles/azure-monitor/containers/container-insights-metric-alerts.md @@ -2,8 +2,8 @@ title: Metric alerts from Container insights description: This article reviews the recommended metric alerts available from Container insights in public preview. ms.topic: conceptual -ms.date: 10/28/2020 - +ms.date: 05/24/2022 +ms.reviewer: aul --- # Recommended metric alerts (preview) from Container insights diff --git a/articles/azure-monitor/containers/container-insights-onboard.md b/articles/azure-monitor/containers/container-insights-onboard.md index 893e92e10cb9b..8ad42cf24877a 100644 --- a/articles/azure-monitor/containers/container-insights-onboard.md +++ b/articles/azure-monitor/containers/container-insights-onboard.md @@ -1,15 +1,17 @@ --- -title: Enable Container insights | Microsoft Docs +title: Enable Container insights description: This article describes how to enable and configure Container insights so that you can understand how your container is performing and what performance-related issues have been identified. ms.topic: conceptual -ms.date: 06/30/2020 - +ms.date: 05/24/2022 +ms.reviewer: aul --- # Enable Container insights +This article provides an overview of the requirements and options that are available for configuring Container insights to monitor the performance of workloads that are deployed to Kubernetes environments. You can enable Container insights for a new deployment or for one or more existing deployments of Kubernetes by using a number of supported methods. -This article provides an overview of the options that are available for setting up Container insights to monitor the performance of workloads that are deployed to Kubernetes environments and hosted on: +## Supported configurations +Container insights supports the following environments: - [Azure Kubernetes Service (AKS)](../../aks/index.yml) - [Azure Arc-enabled Kubernetes cluster](../../azure-arc/kubernetes/overview.md) @@ -18,62 +20,40 @@ This article provides an overview of the options that are available for setting - [Azure Red Hat OpenShift](../../openshift/intro-openshift.md) version 4.x - [Red Hat OpenShift](https://docs.openshift.com/container-platform/4.3/welcome/index.html) version 4.x -You can enable Container insights for a new deployment or for one or more existing deployments of Kubernetes by using any of the following supported methods: - -- The Azure portal -- Azure PowerShell -- The Azure CLI -- [Terraform and AKS](/azure/developer/terraform/create-k8s-cluster-with-tf-and-aks) - -For any non-AKS kubernetes cluster, you will need to first connect your cluster to [Azure Arc](../../azure-arc/kubernetes/overview.md) before enabling monitoring. -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] +## Supported Kubernetes versions +The versions of Kubernetes and support policy are the same as those [supported in Azure Kubernetes Service (AKS)](../../aks/supported-kubernetes-versions.md). ## Prerequisites - Before you start, make sure that you've met the following requirements: -> [!IMPORTANT] -> Log Analytics Containerized Linux Agent (replicaset pod) makes API calls to all the Windows nodes on Kubelet Secure Port (10250) within the cluster to collect Node and Container Performance related Metrics. -Kubelet secure port (:10250) should be opened in the cluster's virtual network for both inbound and outbound for Windows Node and container performance related metrics collection to work. -> -> If you have a Kubernetes cluster with Windows nodes, then please review and configure the Network Security Group and Network Policies to make sure the Kubelet secure port (:10250) is opened for both inbound and outbound in cluster's virtual network. - +**Log Analytics workspace** +Container insights supports a [Log Analytics workspace](../logs/log-analytics-workspace-overview.md) in the regions that are listed in [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). For a list of the supported mapping pairs to use for the default workspace, see [Region mappings supported by Container insights](container-insights-region-mapping.md). -- You have a Log Analytics workspace. +You can let the onboarding experience create a default workspace in the default resource group of the AKS cluster subscription. If you already have a workspace though, then you will most likely want to use that one. See [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md) for details. - Container insights supports a Log Analytics workspace in the regions that are listed in [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). +An AKS cluster can be attached to a Log Analytics workspace in a different Azure subscription in the same Azure AD Tenant. This cannot currently be done with the Azure portal, but can be done with Azure CLI or Resource Manager template. - You can create a workspace when you enable monitoring for your new AKS cluster, or you can let the onboarding experience create a default workspace in the default resource group of the AKS cluster subscription. - - If you choose to create the workspace yourself, you can create it through: - - [Azure Resource Manager](../logs/resource-manager-workspace.md) - - [PowerShell](../logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json) - - [The Azure portal](../logs/quick-create-workspace.md) - - For a list of the supported mapping pairs to use for the default workspace, see [Region mapping for Container insights](container-insights-region-mapping.md). -- You are a member of the *Log Analytics contributor* group for enabling container monitoring. For more information about how to control access to a Log Analytics workspace, see [Manage workspaces](../logs/manage-access.md). +**Permissions** +To enable container monitoring, you require the following permissions: -- You are a member of the [*Owner* group](../../role-based-access-control/built-in-roles.md#owner) on the AKS cluster resource. +- Member of the [Log Analytics contributor](../logs/manage-access.md#azure-rbac) role. +- Member of the [*Owner* group](../../role-based-access-control/built-in-roles.md#owner) on any AKS cluster resources. - [!INCLUDE [log-analytics-agent-note](../../../includes/log-analytics-agent-note.md)] +To enable container monitoring, you require the following permissions: -- To view the monitoring data, you need to have [*Log Analytics reader*](../logs/manage-access.md#manage-access-using-azure-permissions) role in the Log Analytics workspace, configured with Container insights. +- Member of [Log Analytics reader](../logs/manage-access.md#azure-rbac) role if you aren't already a member of [Log Analytics contributor](../logs/manage-access.md#azure-rbac). -- Prometheus metrics aren't collected by default. Before you [configure the agent](container-insights-prometheus-integration.md) to collect the metrics, it's important to review the [Prometheus documentation](https://prometheus.io/) to understand what data can be scraped and what methods are supported. -- An AKS cluster can be attached to a Log Analytics workspace in a different Azure subscription in the same Azure AD Tenant. This cannot currently be done with the Azure Portal, but can be done with Azure CLI or Resource Manager template. +**Prometheus** +Prometheus metrics aren't collected by default. Before you [configure the agent](container-insights-prometheus-integration.md) to collect the metrics, it's important to review the [Prometheus documentation](https://prometheus.io/) to understand what data can be scraped and what methods are supported. -## Supported configurations +**Kubelet secure port** +Log Analytics Containerized Linux Agent (replicaset pod) makes API calls to all the Windows nodes on Kubelet Secure Port (10250) within the cluster to collect Node and Container Performance related Metrics. Kubelet secure port (:10250) should be opened in the cluster's virtual network for both inbound and outbound for Windows Node and container performance related metrics collection to work. -Container insights officially supports the following configurations: +If you have a Kubernetes cluster with Windows nodes, then please review and configure the Network Security Group and Network Policies to make sure the Kubelet secure port (:10250) is opened for both inbound and outbound in cluster's virtual network. -- Environments: Azure Red Hat OpenShift, Kubernetes on-premises, and the AKS engine on Azure and Azure Stack. For more information, see [the AKS engine on Azure Stack](/azure-stack/user/azure-stack-kubernetes-aks-engine-overview). -- The versions of Kubernetes and support policy are the same as those [supported in Azure Kubernetes Service (AKS)](../../aks/supported-kubernetes-versions.md). -- We recommend connecting your cluster to [Azure Arc](../../azure-arc/kubernetes/overview.md) and enabling monitoring through Container Insights via Azure Arc. -> [!IMPORTANT] -> Please note that the monitoring add-on is not currently supported for AKS clusters configured with the [HTTP Proxy (preview)](../../aks/http-proxy.md) ## Network firewall requirements @@ -103,39 +83,37 @@ The following table lists the proxy and firewall configuration information for A | `*.oms.opinsights.azure.us` | 443 | OMS onboarding | | `dc.services.visualstudio.com` | 443 | For agent telemetry that uses Azure Public Cloud Application Insights | -## Components +## Agent +Container insights relies on a containerized Log Analytics agent for Linux. This specialized agent collects performance and event data from all nodes in the cluster, and the agent is automatically deployed and registered with the specified Log Analytics workspace during deployment. -Your ability to monitor performance relies on a containerized Log Analytics agent for Linux that's specifically developed for Container insights. This specialized agent collects performance and event data from all nodes in the cluster, and the agent is automatically deployed and registered with the specified Log Analytics workspace during deployment. +The agent version is *microsoft/oms:ciprod04202018* or later, and it's represented by a date in the following format: *mmddyyyy*. When a new version of the agent is released, it's automatically upgraded on your managed Kubernetes clusters that are hosted on Azure Kubernetes Service (AKS). To track which versions are released, see [agent release announcements](https://github.com/microsoft/docker-provider/tree/ci_feature_prod). -The agent version is microsoft/oms:ciprod04202018 or later, and it's represented by a date in the following format: *mmddyyyy*. >[!NOTE] >With the general availability of Windows Server support for AKS, an AKS cluster with Windows Server nodes has a preview agent installed as a daemonset pod on each individual Windows server node to collect logs and forward it to Log Analytics. For performance metrics, a Linux node that's automatically deployed in the cluster as part of the standard deployment collects and forwards the data to Azure Monitor on behalf all Windows nodes in the cluster. -When a new version of the agent is released, it's automatically upgraded on your managed Kubernetes clusters that are hosted on Azure Kubernetes Service (AKS). To track which versions are released, see [agent release announcements](https://github.com/microsoft/docker-provider/tree/ci_feature_prod). > [!NOTE] -> If you've already deployed an AKS cluster, you've enabled monitoring by using either the Azure CLI or a provided Azure Resource Manager template, as demonstrated later in this article. You can't use `kubectl` to upgrade, delete, redeploy, or deploy the agent. -> -> The template needs to be deployed in the same resource group as the cluster. +> If you've already deployed an AKS cluster and enabled monitoring using either the Azure CLI or a Azure Resource Manager template, you can't use `kubectl` to upgrade, delete, redeploy, or deploy the agent. The template needs to be deployed in the same resource group as the cluster. +## Installation options To enable Container insights, use one of the methods that's described in the following table: -| Deployment state | Method | Description | -|------------------|--------|-------------| -| New Kubernetes cluster | [Create an AKS cluster by using the Azure CLI](../../aks/learn/quick-kubernetes-deploy-cli.md)| You can enable monitoring for a new AKS cluster that you create by using the Azure CLI. | -| | [Create an AKS cluster by using Terraform](container-insights-enable-new-cluster.md#enable-using-terraform)| You can enable monitoring for a new AKS cluster that you create by using the open-source tool Terraform. | -| | [Create an OpenShift cluster by using an Azure Resource Manager template](container-insights-azure-redhat-setup.md#enable-for-a-new-cluster-using-an-azure-resource-manager-template) | You can enable monitoring for a new OpenShift cluster that you create by using a preconfigured Azure Resource Manager template. | -| | [Create an OpenShift cluster by using the Azure CLI](/cli/azure/openshift#az-openshift-create) | You can enable monitoring when you deploy a new OpenShift cluster by using the Azure CLI. | -| Existing AKS cluster | [Enable monitoring of an AKS cluster by using the Azure CLI](container-insights-enable-existing-clusters.md#enable-using-azure-cli) | You can enable monitoring for an AKS cluster that's already deployed by using the Azure CLI. | -| |[Enable for AKS cluster using Terraform](container-insights-enable-existing-clusters.md#enable-using-terraform) | You can enable monitoring for an AKS cluster that's already deployed by using the open-source tool Terraform. | -| | [Enable for AKS cluster from Azure Monitor](container-insights-enable-existing-clusters.md#enable-from-azure-monitor-in-the-portal)| You can enable monitoring for one or more AKS clusters that are already deployed from the multi-cluster page in Azure Monitor. | -| | [Enable from AKS cluster](container-insights-enable-existing-clusters.md#enable-directly-from-aks-cluster-in-the-portal)| You can enable monitoring directly from an AKS cluster in the Azure portal. | -| | [Enable for AKS cluster using an Azure Resource Manager template](container-insights-enable-existing-clusters.md#enable-using-an-azure-resource-manager-template)| You can enable monitoring for an AKS cluster by using a preconfigured Azure Resource Manager template. | -| Existing non-AKS Kubernetes cluster | [Enable for non-AKS Kubernetes cluster by using the Azure CLI](container-insights-enable-arc-enabled-clusters.md#create-extension-instance-using-azure-cli). | You can enable monitoring for your Kubernetes clusters that are hosted outside of Azure and enabled with Azure Arc, this includes hybrid, OpenShift, and multi-cloud using Azure CLI. | -| | [Enable for non-AKS Kubernetes cluster using an Azure Resource Manager template](container-insights-enable-arc-enabled-clusters.md#create-extension-instance-using-azure-resource-manager) | You can enable monitoring for your clusters enabled with Arc by using a preconfigured Azure Resource Manager template. | -| | [Enable for non-AKS Kubernetes cluster from Azure Monitor](container-insights-enable-arc-enabled-clusters.md#create-extension-instance-using-azure-portal) | You can enable monitoring for one or more clusters enabled with Arc that are already deployed from the multicluster page in Azure Monitor. | +| Deployment state | Method | +|------------------|--------| +| New Kubernetes cluster | [Enable monitoring for a new AKS cluster using the Azure CLI](../../aks/learn/quick-kubernetes-deploy-cli.md)| +| | [Enable for a new AKS cluster by using the open-source tool Terraform](container-insights-enable-new-cluster.md#enable-using-terraform)| +| | [Enable for a new OpenShift cluster by using an Azure Resource Manager template](container-insights-azure-redhat-setup.md#enable-for-a-new-cluster-using-an-azure-resource-manager-template) | +| | [Enable for a new OpenShift cluster by using the Azure CLI](/azure/openshift/#az-openshift-create) | +| Existing AKS cluster | [Enable monitoring for an existing AKS cluster using the Azure CLI](container-insights-enable-existing-clusters.md#enable-using-azure-cli) | +| |[Enable for an existing AKS cluster using Terraform](container-insights-enable-existing-clusters.md#enable-using-terraform) | +| | [Enable for an existing AKS cluster from Azure Monitor](container-insights-enable-existing-clusters.md#enable-from-azure-monitor-in-the-portal)| +| | [Enable directly from an AKS cluster in the Azure portal](container-insights-enable-existing-clusters.md#enable-directly-from-aks-cluster-in-the-portal)| +| | [Enable for AKS cluster using an Azure Resource Manager template](container-insights-enable-existing-clusters.md#enable-using-an-azure-resource-manager-template)| +| Existing non-AKS Kubernetes cluster | [Enable for non-AKS Kubernetes cluster hosted outside of Azure and enabled with Azure Arc using the Azure CLI](container-insights-enable-arc-enabled-clusters.md#create-extension-instance-using-azure-cli). | +| | [Enable for non-AKS Kubernetes cluster hosted outside of Azure and enabled with Azure Arc using a preconfigured Azure Resource Manager template](container-insights-enable-arc-enabled-clusters.md#create-extension-instance-using-azure-resource-manager) | +| | [Enable for non-AKS Kubernetes cluster hosted outside of Azure and enabled with Azure Arc from the multicluster page Azure Monitor](container-insights-enable-arc-enabled-clusters.md#create-extension-instance-using-azure-portal) | ## Next steps +Once you've enabled monitoring, you can begin analyzing the performance of your Kubernetes clusters that are hosted on Azure Kubernetes Service (AKS), Azure Stack, or another environment. To learn how to use Container insights, see [View Kubernetes cluster performance](container-insights-analyze.md). -Now that you've enabled monitoring, you can begin analyzing the performance of your Kubernetes clusters that are hosted on Azure Kubernetes Service (AKS), Azure Stack, or another environment. To learn how to use Container insights, see [View Kubernetes cluster performance](container-insights-analyze.md). diff --git a/articles/azure-monitor/containers/container-insights-optout-hybrid.md b/articles/azure-monitor/containers/container-insights-optout-hybrid.md index 71fcc79971cc6..e925cb9f8a47d 100644 --- a/articles/azure-monitor/containers/container-insights-optout-hybrid.md +++ b/articles/azure-monitor/containers/container-insights-optout-hybrid.md @@ -2,9 +2,9 @@ title: How to stop monitoring your hybrid Kubernetes cluster | Microsoft Docs description: This article describes how you can stop monitoring of your hybrid Kubernetes cluster with Container insights. ms.topic: conceptual -ms.date: 06/16/2020 +ms.date: 05/24/2022 ms.custom: devx-track-azurepowershell - +ms.reviewer: aul --- # How to stop monitoring your hybrid cluster diff --git a/articles/azure-monitor/containers/container-insights-optout-openshift-v3.md b/articles/azure-monitor/containers/container-insights-optout-openshift-v3.md index 7579e889f5d21..da6188f6e6554 100644 --- a/articles/azure-monitor/containers/container-insights-optout-openshift-v3.md +++ b/articles/azure-monitor/containers/container-insights-optout-openshift-v3.md @@ -2,8 +2,9 @@ title: How to stop monitoring your Azure Red Hat OpenShift v3 cluster | Microsoft Docs description: This article describes how you can stop monitoring of your Azure Red Hat OpenShift cluster with Container insights. ms.topic: conceptual -ms.date: 04/24/2020 +ms.date: 05/24/2022 ms.custom: devx-track-azurepowershell +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/container-insights-optout-openshift-v4.md b/articles/azure-monitor/containers/container-insights-optout-openshift-v4.md index 3e952dc50f88f..a3b4bb2ae4210 100644 --- a/articles/azure-monitor/containers/container-insights-optout-openshift-v4.md +++ b/articles/azure-monitor/containers/container-insights-optout-openshift-v4.md @@ -2,7 +2,8 @@ title: How to stop monitoring your Azure and Red Hat OpenShift v4 cluster | Microsoft Docs description: This article describes how you can stop monitoring of your Azure Red Hat OpenShift and Red Hat OpenShift version 4 cluster with Container insights. ms.topic: conceptual -ms.date: 04/24/2020 +ms.date: 05/24/2022 +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/container-insights-optout.md b/articles/azure-monitor/containers/container-insights-optout.md index 6c3440719ac1c..b14e66aee4db1 100644 --- a/articles/azure-monitor/containers/container-insights-optout.md +++ b/articles/azure-monitor/containers/container-insights-optout.md @@ -2,9 +2,10 @@ title: How to Stop Monitoring Your Azure Kubernetes Service cluster | Microsoft Docs description: This article describes how you can discontinue monitoring of your Azure AKS cluster with Container insights. ms.topic: conceptual -ms.date: 08/19/2019 +ms.date: 05/24/2022 ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/container-insights-overview.md b/articles/azure-monitor/containers/container-insights-overview.md index e1012e3937fb9..c7a321395da3a 100644 --- a/articles/azure-monitor/containers/container-insights-overview.md +++ b/articles/azure-monitor/containers/container-insights-overview.md @@ -2,8 +2,9 @@ title: Overview of Container insights | Microsoft Docs description: This article describes Container insights that monitors AKS Container Insights solution and the value it delivers by monitoring the health of your AKS clusters and Container Instances in Azure. ms.topic: conceptual +ms.custom: references_regions ms.date: 09/08/2020 - +ms.reviewer: aul --- # Container insights overview @@ -14,54 +15,52 @@ Container insights is a feature designed to monitor the performance of container - Self-managed Kubernetes clusters hosted on Azure using [AKS Engine](https://github.com/Azure/aks-engine) - [Azure Container Instances](../../container-instances/container-instances-overview.md) - Self-managed Kubernetes clusters hosted on [Azure Stack](/azure-stack/user/azure-stack-kubernetes-aks-engine-overview) or on-premises -- [Azure Red Hat OpenShift](../../openshift/intro-openshift.md) - [Azure Arc-enabled Kubernetes](../../azure-arc/kubernetes/overview.md) (preview) Container insights supports clusters running the Linux and Windows Server 2019 operating system. The container runtimes it supports are Docker, Moby, and any CRI compatible runtime such as CRI-O and ContainerD. Monitoring your containers is critical, especially when you're running a production cluster, at scale, with multiple applications. -Container insights gives you performance visibility by collecting memory and processor metrics from controllers, nodes, and containers that are available in Kubernetes through the Metrics API. Container logs are also collected. After you enable monitoring from Kubernetes clusters, metrics and logs are automatically collected for you through a containerized version of the Log Analytics agent for Linux. Metrics are written to the metrics store and log data is written to the logs store associated with your [Log Analytics](../logs/log-query-overview.md) workspace. +Container insights gives you performance visibility by collecting memory and processor metrics from controllers, nodes, and containers that are available in Kubernetes through the Metrics API. After you enable monitoring from Kubernetes clusters, metrics and Container logs are automatically collected for you through a containerized version of the Log Analytics agent for Linux. Metrics are sent to the [metrics database in Azure Monitor](../essentials/data-platform-metrics.md), and log data is sent to your [Log Analytics workspace](../logs/log-analytics-workspace-overview.md). -![Container insights architecture](./media/container-insights-overview/azmon-containers-architecture-01.png) +:::image type="content" source="media/container-insights-overview/azmon-containers-architecture-01.png" lightbox="media/container-insights-overview/azmon-containers-architecture-01.png" alt-text="Overview diagram of Container insights"::: -## What does Container insights provide? +## Features of Container insights -Container insights delivers a comprehensive monitoring experience using different features of Azure Monitor. These features enable you to understand the performance and health of your Kubernetes cluster running Linux and Windows Server 2019 operating system, and the container workloads. With Container insights you can: +Container insights delivers a comprehensive monitoring experience to understand the performance and health of your Kubernetes cluster and container workloads. -* Identify AKS containers that are running on the node and their average processor and memory utilization. This knowledge can help you identify resource bottlenecks. -* Identify processor and memory utilization of container groups and their containers hosted in Azure Container Instances. -* Identify where the container resides in a controller or a pod. This knowledge can help you view the controller's or pod's overall performance. -* Review the resource utilization of workloads running on the host that are unrelated to the standard processes that support the pod. -* Understand the behavior of the cluster under average and heaviest loads. This knowledge can help you identify capacity needs and determine the maximum load that the cluster can sustain. -* Configure alerts to proactively notify you or record it when CPU and memory utilization on nodes or containers exceed your thresholds, or when a health state change occurs in the cluster at the infrastructure or nodes health rollup. -* Integrate with [Prometheus](https://prometheus.io/docs/introduction/overview/) to view application and workload metrics it collects from nodes and Kubernetes using [queries](container-insights-log-query.md) to create custom alerts, dashboards, and perform detailed analysis. -* Monitor container workloads [deployed to AKS Engine](https://github.com/Azure/aks-engine) on-premises and [AKS Engine on Azure Stack](/azure-stack/user/azure-stack-kubernetes-aks-engine-overview). -* Monitor container workloads [deployed to Azure Red Hat OpenShift](../../openshift/intro-openshift.md). +- Identify resource bottlenecks by identifying AKS containers running on the node and their average processor and memory utilization. +- Identify processor and memory utilization of container groups and their containers hosted in Azure Container Instances. +- View the controller's or pod's overall performance by identifying where the container resides in a controller or a pod. +- Review the resource utilization of workloads running on the host that are unrelated to the standard processes that support the pod. +- Identify capacity needs and determine the maximum load that the cluster can sustain by understanding the behavior of the cluster under average and heaviest loads. +- Configure alerts to proactively notify you or record it when CPU and memory utilization on nodes or containers exceed your thresholds, or when a health state change occurs in the cluster at the infrastructure or nodes health rollup. +- Integrate with [Prometheus](https://prometheus.io/docs/introduction/overview/) to view application and workload metrics it collects from nodes and Kubernetes using [queries](container-insights-log-query.md) to create custom alerts, dashboards, and perform detailed analysis. +- Monitor container workloads [deployed to AKS Engine](https://github.com/Azure/aks-engine) on-premises and [AKS Engine on Azure Stack](/azure-stack/user/azure-stack-kubernetes-aks-engine-overview). +- Monitor container workloads [deployed to Azure Arc-enabled Kubernetes](../../azure-arc/kubernetes/overview.md). - >[!NOTE] - >Support for Azure Red Hat OpenShift is a feature in public preview at this time. - > -* Monitor container workloads [deployed to Azure Arc-enabled Kubernetes](../../azure-arc/kubernetes/overview.md). -The main differences in monitoring a Windows Server cluster compared to a Linux cluster are the following: +Check out the following video providing an intermediate level deep dive to help you learn about monitoring your AKS cluster with Container insights. Note that the video refers to *Azure Monitor for Containers* which is the previous name for *Container insights*. -- Windows doesn't have a Memory RSS metric, and as a result it isn't available for Windows node and containers. The [Working Set](/windows/win32/memory/working-set) metric is available. -- Disk storage capacity information isn't available for Windows nodes. -- Only pod environments are monitored, not Docker environments. -- With the preview release, a maximum of 30 Windows Server containers are supported. This limitation doesn't apply to Linux containers. - -Check out the following video providing an intermediate level deep dive to help you learn about monitoring your AKS cluster with Container insights. +[!VIDEO https://www.youtube.com/embed/XEdwGvS2AwA] -> [!VIDEO https://www.youtube.com/embed/XEdwGvS2AwA] -## How do I access this feature? -You can access Container insights two ways, from Azure Monitor or directly from the selected AKS cluster. From Azure Monitor, you have a global perspective of all the containers deployed, which are monitored and which are not, allowing you to search and filter across your subscriptions and resource groups, and then drill into Container insights from the selected container. Otherwise, you can access the feature directly from a selected AKS container from the AKS page. +## How to access Container insights +Access Container insights in the Azure portal from Azure Monitor or directly from the selected AKS cluster. The Azure Monitor menu gives you the global perspective of all the containers deployed amd which are monitored, allowing you to search and filter across your subscriptions and resource groups. You can then drill into Container insights from the selected container. Access Container insights for a particular AKS container directly from the AKS page. ![Overview of methods to access Container insights](./media/container-insights-overview/azmon-containers-experience.png) + +## Differences between Windows and Linux clusters +The main differences in monitoring a Windows Server cluster compared to a Linux cluster include the following: + +- Windows doesn't have a Memory RSS metric, and as a result it isn't available for Windows node and containers. The [Working Set](/windows/win32/memory/working-set) metric is available. +- Disk storage capacity information isn't available for Windows nodes. +- Only pod environments are monitored, not Docker environments. +- With the preview release, a maximum of 30 Windows Server containers are supported. This limitation doesn't apply to Linux containers. + ## Next steps To begin monitoring your Kubernetes cluster, review [How to enable Container insights](container-insights-onboard.md) to understand the requirements and available methods to enable monitoring. diff --git a/articles/azure-monitor/containers/container-insights-persistent-volumes.md b/articles/azure-monitor/containers/container-insights-persistent-volumes.md index 8fe7b690e5992..4438dd89a000a 100644 --- a/articles/azure-monitor/containers/container-insights-persistent-volumes.md +++ b/articles/azure-monitor/containers/container-insights-persistent-volumes.md @@ -2,7 +2,8 @@ title: Configure PV monitoring with Container insights | Microsoft Docs description: This article describes how you can configure monitoring Kubernetes clusters with persistent volumes with Container insights. ms.topic: conceptual -ms.date: 03/03/2021 +ms.date: 05/24/2022 +ms.reviewer: aul --- # Configure PV monitoring with Container insights diff --git a/articles/azure-monitor/containers/container-insights-prometheus-integration.md b/articles/azure-monitor/containers/container-insights-prometheus-integration.md index 8d612a1ec3f78..7956d389d6045 100644 --- a/articles/azure-monitor/containers/container-insights-prometheus-integration.md +++ b/articles/azure-monitor/containers/container-insights-prometheus-integration.md @@ -3,6 +3,7 @@ title: Configure Container insights Prometheus Integration | Microsoft Docs description: This article describes how you can configure the Container insights agent to scrape metrics from Prometheus with your Kubernetes cluster. ms.topic: conceptual ms.date: 04/22/2020 +ms.reviewer: aul --- # Configure scraping of Prometheus metrics with Container insights diff --git a/articles/azure-monitor/containers/container-insights-region-mapping.md b/articles/azure-monitor/containers/container-insights-region-mapping.md index 13e62117c3af5..7b9c8b5b3023f 100644 --- a/articles/azure-monitor/containers/container-insights-region-mapping.md +++ b/articles/azure-monitor/containers/container-insights-region-mapping.md @@ -2,8 +2,9 @@ title: Container insights region mappings description: Describes the region mappings supported between Container insights, Log Analytics Workspace, and custom metrics. ms.topic: conceptual -ms.date: 09/22/2020 +ms.date: 05/27/2022 ms.custom: references_regions +ms.reviewer: aul --- # Region mappings supported by Container insights @@ -11,7 +12,6 @@ ms.custom: references_regions When enabling Container insights, only certain regions are supported for linking a Log Analytics workspace and an AKS cluster, and collecting custom metrics submitted to Azure Monitor. ## Log Analytics workspace supported mappings - Supported AKS regions are listed in [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?products=kubernetes-service). The Log Analytics workspace must be in the same region except for the regions listed in the following table. Watch [AKS release notes](https://github.com/Azure/AKS/releases) for updates. @@ -37,10 +37,9 @@ Supported AKS regions are listed in [Products available by region](https://azure |**Korea** | | |KoreaSouth |KoreaCentral | |**US** | | -|WestCentralUS1|EastUS1| +|WestCentralUS1|EastUS | -1 Due to capacity restraints, the region isn't available when creating new resources. This includes a Log Analytics workspace. However, preexisting linked resources in the region should continue to work. ## Custom metrics supported regions diff --git a/articles/azure-monitor/containers/container-insights-reports.md b/articles/azure-monitor/containers/container-insights-reports.md index 2338dedfa4c1f..12c035a440af6 100644 --- a/articles/azure-monitor/containers/container-insights-reports.md +++ b/articles/azure-monitor/containers/container-insights-reports.md @@ -2,7 +2,8 @@ title: Reports in Container insights description: Describes reports available to analyze data collected by Container insights. ms.topic: conceptual -ms.date: 03/02/2021 +ms.date: 05/24/2022 +ms.reviewer: aul --- # Reports in Container insights diff --git a/articles/azure-monitor/containers/container-insights-transition-hybrid.md b/articles/azure-monitor/containers/container-insights-transition-hybrid.md index 439401180e73a..cadbf5f82a048 100644 --- a/articles/azure-monitor/containers/container-insights-transition-hybrid.md +++ b/articles/azure-monitor/containers/container-insights-transition-hybrid.md @@ -5,6 +5,7 @@ ms.topic: article author: austonli ms.author: aul description: "Learn how to migrate from using script-based hybrid monitoring solutions to Container Insights on Azure Arc-enabled Kubernetes clusters" +ms.reviewer: aul --- # Transition to using Container Insights on Azure Arc-enabled Kubernetes diff --git a/articles/azure-monitor/containers/container-insights-transition-solution.md b/articles/azure-monitor/containers/container-insights-transition-solution.md index d8fa2130cfb4f..1f3afe4655086 100644 --- a/articles/azure-monitor/containers/container-insights-transition-solution.md +++ b/articles/azure-monitor/containers/container-insights-transition-solution.md @@ -1,10 +1,11 @@ --- -title: "Transition from the Container Monitoring Solution to using Container Insights" +title: Transition from the Container Monitoring Solution to using Container Insights ms.date: 1/18/2022 ms.topic: article author: austonli ms.author: aul description: "Learn how to migrate from using the legacy OMS solution to monitoring your containers using Container Insights" +ms.reviewer: aul --- # Transition from the Container Monitoring Solution to using Container Insights diff --git a/articles/azure-monitor/containers/container-insights-troubleshoot.md b/articles/azure-monitor/containers/container-insights-troubleshoot.md index bcac690abcf75..cf5ef2346b8d5 100644 --- a/articles/azure-monitor/containers/container-insights-troubleshoot.md +++ b/articles/azure-monitor/containers/container-insights-troubleshoot.md @@ -2,7 +2,8 @@ title: How to Troubleshoot Container insights | Microsoft Docs description: This article describes how you can troubleshoot and resolve issues with Container insights. ms.topic: conceptual -ms.date: 03/25/2021 +ms.date: 05/24/2022 +ms.reviewer: aul --- @@ -23,8 +24,7 @@ You can also manually grant this role from the Azure portal by performing the fo For detailed steps, see [Assign Azure roles using the Azure portal](../../role-based-access-control/role-assignments-portal.md). ## Container insights is enabled but not reporting any information - -If Container insights is successfully enabled and configured, but you cannot view status information or no results are returned from a log query, you diagnose the problem by following these steps: +Use the following steps to diagnose the problem if you can't view status information or no results are returned from a log query: 1. Check the status of the agent by running the command: @@ -60,7 +60,7 @@ If Container insights is successfully enabled and configured, but you cannot vie omsagent 1 1 1 1 3h ``` -4. Check the status of the pod to verify that it is running using the command: `kubectl get pods --namespace=kube-system` +4. Check the status of the pod to verify that it's running using the command: `kubectl get pods --namespace=kube-system` The output should resemble the following example with a status of *Running* for the omsagent: @@ -82,12 +82,12 @@ The table below summarizes known errors you may encounter while using Container | Error messages | Action | | ---- | --- | | Error Message `No data for selected filters` | It may take some time to establish monitoring data flow for newly created clusters. Allow at least 10 to 15 minutes for data to appear for your cluster. | -| Error Message `Error retrieving data` | While Azure Kubernetes Service cluster is setting up for health and performance monitoring, a connection is established between the cluster and Azure Log Analytics workspace. A Log Analytics workspace is used to store all monitoring data for your cluster. This error may occur when your Log Analytics workspace has been deleted. Check if the workspace was deleted and if it was, you will need to re-enable monitoring of your cluster with Container insights and specify an existing or create a new workspace. To re-enable, you will need to [disable](container-insights-optout.md) monitoring for the cluster and [enable](container-insights-enable-new-cluster.md) Container insights again. | -| `Error retrieving data` after adding Container insights through az aks cli | When enable monitoring using `az aks cli`, Container insights may not be properly deployed. Check whether the solution is deployed. To verify, go to your Log Analytics workspace and see if the solution is available by selecting **Solutions** from the pane on the left-hand side. To resolve this issue, you will need to redeploy the solution by following the instructions on [how to deploy Container insights](container-insights-onboard.md) | +| Error Message `Error retrieving data` | While Azure Kubernetes Service cluster is setting up for health and performance monitoring, a connection is established between the cluster and Azure Log Analytics workspace. A Log Analytics workspace is used to store all monitoring data for your cluster. This error may occur when your Log Analytics workspace has been deleted. Check if the workspace was deleted. If it was, you'll need to re-enable monitoring of your cluster with Container insights and either specify an existing workspace or create a new one. To re-enable, you'll need to [disable](container-insights-optout.md) monitoring for the cluster and [enable](container-insights-enable-new-cluster.md) Container insights again. | +| `Error retrieving data` after adding Container insights through az aks cli | When enable monitoring using `az aks cli`, Container insights may not be properly deployed. Check whether the solution is deployed. To verify, go to your Log Analytics workspace and see if the solution is available by selecting **Solutions** from the pane on the left-hand side. To resolve this issue, you'll need to redeploy the solution by following the instructions on [how to deploy Container insights](container-insights-onboard.md) | -To help diagnose the problem, we have provided a [troubleshooting script](https://github.com/microsoft/Docker-Provider/tree/ci_dev/scripts/troubleshoot). +To help diagnose the problem, we've provided a [troubleshooting script](https://github.com/microsoft/Docker-Provider/tree/ci_dev/scripts/troubleshoot). -## Container insights agent ReplicaSet Pods are not scheduled on non-Azure Kubernetes cluster +## Container insights agent ReplicaSet Pods aren't scheduled on non-Azure Kubernetes cluster Container insights agent ReplicaSet Pods has a dependency on the following node selectors on the worker (or agent) nodes for the scheduling: @@ -101,9 +101,9 @@ If your worker nodes don’t have node labels attached, then agent ReplicaSet Po ## Performance charts don't show CPU or memory of nodes and containers on a non-Azure cluster -Container insights agent Pods uses the cAdvisor endpoint on the node agent to gather the performance metrics. Verify the containerized agent on the node is configured to allow `cAdvisor port: 10255` to be opened on all nodes in the cluster to collect performance metrics. +Container insights agent pods use the cAdvisor endpoint on the node agent to gather the performance metrics. Verify the containerized agent on the node is configured to allow `cAdvisor port: 10255` to be opened on all nodes in the cluster to collect performance metrics. -## Non-Azure Kubernetes cluster are not showing in Container insights +## Non-Azure Kubernetes cluster aren't showing in Container insights To view the non-Azure Kubernetes cluster in Container insights, Read access is required on the Log Analytics workspace supporting this Insight and on the Container Insights solution resource **ContainerInsights (*workspace*)**. @@ -116,7 +116,7 @@ To view the non-Azure Kubernetes cluster in Container insights, Read access is r ``` azurecli az role assignment list --assignee "SP/UserassignedMSI for omsagent" --scope "/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/" --role "Monitoring Metrics Publisher" ``` - For clusters with MSI, the user assigned client id for omsagent changes every time monitoring is enabled and disabled, so the role assignment should exist on the current msi client id. + For clusters with MSI, the user assigned client ID for omsagent changes every time monitoring is enabled and disabled, so the role assignment should exist on the current msi client ID. 3. For clusters with Azure Active Directory pod identity enabled and using MSI: @@ -154,10 +154,10 @@ To view the non-Azure Kubernetes cluster in Container insights, Read access is r ``` ## Installation of Azure Monitor Containers Extension fail with an error containing “manifests contain a resource that already exists” on Azure Arc Enabled Kubernetes cluster -The error _manifests contain a resource that already exists_ indicates that resources of the Container Insights agent already exist on the Azure Arc Enabled Kubernetes cluster. This indicates that the container insights agent is already installed either through azuremonitor-containers HELM chart or Monitoring Addon if it is AKS Cluster which is connected Azure Arc. The solution to this issue, is to clean up the existing resources of container insights agent if it exists and then enable Azure Monitor Containers Extension. +The error _manifests contain a resource that already exists_ indicates that resources of the Container Insights agent already exist on the Azure Arc Enabled Kubernetes cluster. This indicates that the container insights agent is already installed, either through azuremonitor-containers HELM chart or Monitoring Addon if it's AKS Cluster that's connected Azure Arc. The solution to this issue is to clean up the existing resources of container insights agent if it exists. Then enable Azure Monitor Containers Extension. ### For non-AKS clusters -1. Against the K8s cluster which is connected to Azure Arc, run below command to verify whether the azmon-containers-release-1 helm chart release exists or not: +1. Against the K8s cluster that's connected to Azure Arc, run below command to verify whether the azmon-containers-release-1 helm chart release exists or not: `helm list -A` @@ -166,18 +166,18 @@ The error _manifests contain a resource that already exists_ indicates that reso `helm del azmon-containers-release-1` ### For AKS clusters -1. Run below commands and look for omsagent addon profile to verify the AKS monitoring addon enabled or not: +1. Run the following commands and look for omsagent addon profile to verify whether the AKS monitoring addon is enabled: ``` az account set -s az aks show -g -n ``` -2. If there is omsagent addon profile config with log analytics workspace resource Id in the output of the above command indicates that, AKS Monitoring addon enabled and which needs to be disabled: +2. If the output includes an omsagent addon profile config with a log analytics workspace resource ID, this indicates that AKS Monitoring addon is enabled and needs to be disabled: `az aks disable-addons -a monitoring -g -n ` -If above steps didn’t resolve the installation of Azure Monitor Containers Extension issues, please create a ticket to Microsoft for further investigation. +If above steps didn’t resolve the installation of Azure Monitor Containers Extension issues, create a ticket to Microsoft for further investigation. ## Next steps diff --git a/articles/azure-monitor/containers/container-insights-update-metrics.md b/articles/azure-monitor/containers/container-insights-update-metrics.md index d068eb008e90d..1729b029e33dc 100644 --- a/articles/azure-monitor/containers/container-insights-update-metrics.md +++ b/articles/azure-monitor/containers/container-insights-update-metrics.md @@ -4,6 +4,7 @@ description: This article describes how you update Container insights to enable ms.topic: conceptual ms.date: 10/09/2020 ms.custom: devx-track-azurecli +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/containers.md b/articles/azure-monitor/containers/containers.md index 58fd4c9a75b89..8bd7c22957640 100644 --- a/articles/azure-monitor/containers/containers.md +++ b/articles/azure-monitor/containers/containers.md @@ -5,6 +5,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 07/06/2020 +ms.reviewer: aul --- diff --git a/articles/azure-monitor/containers/resource-manager-container-insights.md b/articles/azure-monitor/containers/resource-manager-container-insights.md index f0d764c2bdf8a..bfb4bbbfbd9ae 100644 --- a/articles/azure-monitor/containers/resource-manager-container-insights.md +++ b/articles/azure-monitor/containers/resource-manager-container-insights.md @@ -2,9 +2,9 @@ title: Resource Manager template samples for Container insights description: Sample Azure Resource Manager templates to deploy and configureContainer insights. ms.topic: sample -author: bwren ms.author: bwren ms.date: 05/05/2022 +ms.reviewer: aulgit --- diff --git a/articles/azure-monitor/continuous-monitoring.md b/articles/azure-monitor/continuous-monitoring.md index 621cc9e35b8e6..ee765441c24fe 100644 --- a/articles/azure-monitor/continuous-monitoring.md +++ b/articles/azure-monitor/continuous-monitoring.md @@ -4,7 +4,7 @@ description: Describes specific steps for using Azure Monitor to enable Continuo ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 10/12/2018 +ms.date: 06/07/2022 --- diff --git a/articles/azure-monitor/essentials/activity-log.md b/articles/azure-monitor/essentials/activity-log.md index 44e4d0abb2590..a6ab07f60c166 100644 --- a/articles/azure-monitor/essentials/activity-log.md +++ b/articles/azure-monitor/essentials/activity-log.md @@ -308,9 +308,9 @@ Before using Activity log insights, you'll have to [enable sending logs to your ### How does Activity log insights work? -Activity logs you send to a [Log Analytics workspace](/articles/azure-monitor/logs/log-analytics-workspace-overview.md) are stored in a table called AzureActivity. +Activity logs you send to a [Log Analytics workspace](/azure/azure-monitor/logs/log-analytics-workspace-overview) are stored in a table called AzureActivity. -Activity log insights are a curated [Log Analytics workbook](/articles/azure-monitor/visualize/workbooks-overview.md) with dashboards that visualize the data in the AzureActivity table. For example, which administrators deleted, updated or created resources, and whether the activities failed or succeeded. +Activity log insights are a curated [Log Analytics workbook](/azure/azure-monitor/visualize/workbooks-overview) with dashboards that visualize the data in the AzureActivity table. For example, which administrators deleted, updated or created resources, and whether the activities failed or succeeded. :::image type="content" source="media/activity-log/activity-logs-insights-main-screen.png" lightbox= "media/activity-log/activity-logs-insights-main-screen.png" alt-text="A screenshot showing Azure Activity logs insights dashboards."::: @@ -342,7 +342,7 @@ To view Activity log insights on a resource level: 1. At the top of the **Activity Logs Insights** page, select: 1. A time range for which to view data from the **TimeRange** dropdown. - * **Azure Activity Log Entries** shows the count of Activity log records in each [activity log category](/articles/azure-monitor/essentials/activity-log-schema#categories). + * **Azure Activity Log Entries** shows the count of Activity log records in each [activity log category](/azure/azure-monitor/essentials/activity-log#categories). :::image type="content" source="media/activity-log/activity-logs-insights-category-value.png" lightbox= "media/activity-log/activity-logs-insights-category-value.png" alt-text="Screenshot of Azure Activity Logs by Category Value"::: diff --git a/articles/azure-monitor/essentials/diagnostic-settings.md b/articles/azure-monitor/essentials/diagnostic-settings.md index cecdab36427b2..0466520204cb0 100644 --- a/articles/azure-monitor/essentials/diagnostic-settings.md +++ b/articles/azure-monitor/essentials/diagnostic-settings.md @@ -65,7 +65,7 @@ Platform logs and metrics can be sent to the destinations in the following table | Destination | Description | |:---|:---| -| [Log Analytics workspace](../logs/design-logs-deployment.md) | Metrics are converted to log form. This option may not be available for all resource types. Sending them to the Azure Monitor Logs store (which is searchable via Log Analytics) helps you to integrate them into queries, alerts, and visualizations with existing log data. +| [Log Analytics workspace](../logs/workspace-design.md) | Metrics are converted to log form. This option may not be available for all resource types. Sending them to the Azure Monitor Logs store (which is searchable via Log Analytics) helps you to integrate them into queries, alerts, and visualizations with existing log data. | [Azure storage account](../../storage/blobs/index.yml) | Archiving logs and metrics to an Azure storage account is useful for audit, static analysis, or backup. Compared to Azure Monitor Logs and a Log Analytics workspace, Azure storage is less expensive and logs can be kept there indefinitely. | | [Event Hubs](../../event-hubs/index.yml) | Sending logs and metrics to Event Hubs allows you to stream data to external systems such as third-party SIEMs and other Log Analytics solutions. | | [Azure Monitor partner integrations](../../partner-solutions/overview.md)| Specialized integrations between Azure Monitor and other non-Microsoft monitoring platforms. Useful when you are already using one of the partners. | diff --git a/articles/azure-monitor/essentials/metrics-supported.md b/articles/azure-monitor/essentials/metrics-supported.md index be30b52263120..dd1dce36497c7 100644 --- a/articles/azure-monitor/essentials/metrics-supported.md +++ b/articles/azure-monitor/essentials/metrics-supported.md @@ -1720,7 +1720,7 @@ This latest update adds a new column and reorders the metrics to be alphabetical |---|---|---|---|---|---|---| |AddRegion|Yes|Region Added|Count|Count|Region Added|Region| |AutoscaleMaxThroughput|No|Autoscale Max Throughput|Count|Maximum|Autoscale Max Throughput|DatabaseName, CollectionName| -|AvailableStorage|No|(deprecated) Available Storage|Bytes|Total|"Available Storage"will be removed from Azure Monitor at the end of September 2023. Cosmos DB collection storage size is now unlimited. The only restriction is that the storage size for each logical partition key is 20GB. You can enable PartitionKeyStatistics in Diagnostic Log to know the storage consumption for top partition keys. For more info about Cosmos DB storage quota, please check this doc https://docs.microsoft.com/azure/cosmos-db/concepts-limits. After deprecation, the remaining alert rules still defined on the deprecated metric will be automatically disabled post the deprecation date.|CollectionName, DatabaseName, Region| +|AvailableStorage|No|(deprecated) Available Storage|Bytes|Total|"Available Storage"will be removed from Azure Monitor at the end of September 2023. Cosmos DB collection storage size is now unlimited. The only restriction is that the storage size for each logical partition key is 20GB. You can enable PartitionKeyStatistics in Diagnostic Log to know the storage consumption for top partition keys. For more info about Cosmos DB storage quota, see [Azure Cosmos DB service quotas](../../cosmos-db/concepts-limits.md). After deprecation, the remaining alert rules still defined on the deprecated metric will be automatically disabled post the deprecation date.|CollectionName, DatabaseName, Region| |CassandraConnectionClosures|No|Cassandra Connection Closures|Count|Total|Number of Cassandra connections that were closed, reported at a 1 minute granularity|APIType, Region, ClosureReason| |CassandraConnectorAvgReplicationLatency|No|Cassandra Connector Average ReplicationLatency|MilliSeconds|Average|Cassandra Connector Average ReplicationLatency|No Dimensions| |CassandraConnectorReplicationHealthStatus|No|Cassandra Connector Replication Health Status|Count|Count|Cassandra Connector Replication Health Status|NotStarted, ReplicationInProgress, Error| diff --git a/articles/azure-monitor/essentials/resource-logs-schema.md b/articles/azure-monitor/essentials/resource-logs-schema.md index 7d6d3da399924..7ede4b29cc894 100644 --- a/articles/azure-monitor/essentials/resource-logs-schema.md +++ b/articles/azure-monitor/essentials/resource-logs-schema.md @@ -90,7 +90,7 @@ The schema for resource logs varies depending on the resource and log category. | Azure Storage | [Blobs](../../storage/blobs/monitor-blob-storage-reference.md#resource-logs-preview), [Files](../../storage/files/storage-files-monitoring-reference.md#resource-logs-preview), [Queues](../../storage/queues/monitor-queue-storage-reference.md#resource-logs-preview), [Tables](../../storage/tables/monitor-table-storage-reference.md#resource-logs-preview) | | Azure Stream Analytics |[Job logs](../../stream-analytics/stream-analytics-job-diagnostic-logs.md) | | Azure Traffic Manager | [Traffic Manager log schema](../../traffic-manager/traffic-manager-diagnostic-logs.md) | -| Azure Video Indexer|[Monitor Azure Video Indexer data reference](/azure/azure-video-indexer/monitor-video-indexer-data-reference)| +| Azure Video Indexer|[Monitor Azure Video Indexer data reference](../../azure-video-indexer/monitor-video-indexer-data-reference.md)| | Azure Virtual Network | Schema not available | | Virtual network gateways | [Logging for Virtual Network Gateways](../../vpn-gateway/troubleshoot-vpn-with-azure-diagnostics.md)| @@ -102,4 +102,4 @@ The schema for resource logs varies depending on the resource and log category. * [Learn more about resource logs](../essentials/platform-logs-overview.md) * [Stream resource logs to Event Hubs](./resource-logs.md#send-to-azure-event-hubs) * [Change resource log diagnostic settings by using the Azure Monitor REST API](/rest/api/monitor/diagnosticsettings) -* [Analyze logs from Azure Storage with Log Analytics](./resource-logs.md#send-to-log-analytics-workspace) +* [Analyze logs from Azure Storage with Log Analytics](./resource-logs.md#send-to-log-analytics-workspace) \ No newline at end of file diff --git a/articles/azure-monitor/faq.yml b/articles/azure-monitor/faq.yml index 1ff6c08c9311d..c6f1ae9facba2 100644 --- a/articles/azure-monitor/faq.yml +++ b/articles/azure-monitor/faq.yml @@ -133,7 +133,7 @@ sections: - question: | What is a Log Analytics workspace? answer: | - All log data collected by Azure Monitor is stored in a Log Analytics workspace. A workspace is essentially a container where log data is collected from various sources. You may have a single Log Analytics workspace for all your monitoring data or may have requirements for multiple workspaces. See [Designing your Azure Monitor Logs deployment](logs/design-logs-deployment.md). + All log data collected by Azure Monitor is stored in a Log Analytics workspace. A workspace is essentially a container where log data is collected from a variety of sources. You may have a single Log Analytics workspace for all your monitoring data or may have requirements for multiple workspaces. See Design a Log Analytics workspace configuration(logs/workspace-design.md). - question: | Can you move an existing Log Analytics workspace to another Azure subscription? @@ -1179,8 +1179,6 @@ sections: - question: | I don't see some or any data in the performance charts for my VM answer: | - Our performance charts have been updated to use data stored in the *InsightsMetrics* table. To see data in these charts you'll need to upgrade to use the new VM Insights solution. Please refer to our [GA FAQ](vm/vminsights-ga-release-faq.yml) for more information. - If you don't see performance data in the disk table or in some of the performance charts then your performance counters may not be configured in the workspace. To resolve, run the following [PowerShell script](./vm/vminsights-enable-powershell.md). - question: | diff --git a/articles/azure-monitor/insights/alert-management-solution.md b/articles/azure-monitor/insights/alert-management-solution.md index 516e27fc0cdda..8932c7fc3ebca 100644 --- a/articles/azure-monitor/insights/alert-management-solution.md +++ b/articles/azure-monitor/insights/alert-management-solution.md @@ -13,14 +13,14 @@ ms.date: 01/02/2022 ![Alert Management icon](media/alert-management-solution/icon.png) > [!CAUTION] -> This solution is no longer in active development and may not work as expected. We suggest you try using [Azure Resource Graph to query Azure Monitor alerts](../alerts/alerts-overview.md#manage-your-alert-instances-programmatically). +> This solution is no longer in active development and may not work as expected. We suggest you try using [Azure Resource Graph to query Azure Monitor alerts](../alerts/alerts-overview.md#manage-your-alerts-programmatically). -The Alert Management solution helps you analyze all of the alerts in your Log Analytics repository. These alerts may have come from a variety of sources including those sources [created by Log Analytics](../alerts/alerts-overview.md) or [imported from Nagios or Zabbix](../vm/monitor-virtual-machine.md). The solution also imports alerts from any [connected System Center Operations Manager management groups](../agents/om-agents.md). +The Alert Management solution helps you analyze all of the alerts in your Log Analytics repository. These alerts may have come from a variety of sources including those sources [created by Log Analytics](../alerts/alerts-types.md#log-alerts) or [imported from Nagios or Zabbix](../vm/monitor-virtual-machine.md). The solution also imports alerts from any [connected System Center Operations Manager management groups](../agents/om-agents.md). ## Prerequisites The solution works with any records in the Log Analytics repository with a type of **Alert**, so you must perform whatever configuration is required to collect these records. -- For Log Analytics alerts, [create alert rules](../alerts/alerts-overview.md) to create alert records directly in the repository. +- For Log Analytics alerts, [create alert rules](../alerts/alerts-log.md) to create alert records directly in the repository. - For Nagios and Zabbix alerts, [configure those servers](../vm/monitor-virtual-machine.md) to send alerts to Log Analytics. - For System Center Operations Manager alerts, [connect your Operations Manager management group to your Log Analytics workspace](../agents/om-agents.md). Any alerts created in System Center Operations Manager are imported into Log Analytics. diff --git a/articles/azure-monitor/insights/azure-cli-application-insights-component.md b/articles/azure-monitor/insights/azure-cli-application-insights-component.md deleted file mode 100644 index 1460eac596875..0000000000000 --- a/articles/azure-monitor/insights/azure-cli-application-insights-component.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Manage Application Insights components in Azure CLI -description: Use this sample code to manage components in Application Insights. This feature is part of Azure Monitor. -ms.topic: sample -author: bwren -ms.author: bwren -ms.date: 09/10/2012 -ms.custom: devx-track-azurecli - ---- - -# Manage Application Insights components by using Azure CLI - -In Azure Monitor, components are independently deployable parts of your distributed or microservices application. Use these Azure CLI commands to manage components in Application Insights. - -The examples in this article do the following management tasks: - -- Create a component. -- Connect a component to a webapp. -- Link a component to a storage account with a component. -- Create a continuous export configuration for a component. - -[!INCLUDE [Prepare your Azure CLI environment](../../../includes/azure-cli-prepare-your-environment.md)] - -## Create a component - -If you don't already have a resource group and workspace, create them by using [az group create](/cli/azure/group#az-group-create) and [az monitor log-analytics workspace create](/cli/azure/monitor/log-analytics/workspace#az-monitor-log-analytics-workspace-create): - -```azurecli -az group create --name ContosoAppInsightRG --location eastus2 -az monitor log-analytics workspace create --resource-group ContosoAppInsightRG \ - --workspace-name AppInWorkspace -``` - -To create a component, run the [az monitor app-insights component create](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-create) command. The [az monitor app-insights component show](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-show) command displays the component. - -```azurecli -az monitor app-insights component create --resource-group ContosoAppInsightRG \ - --app ContosoApp --location eastus2 --kind web --application-type web \ - --retention-time 120 -az monitor app-insights component show --resource-group ContosoAppInsightRG --app ContosoApp -``` - -## Connect a webapp - -This example connects your component to a webapp. You can create a webapp by using the [az appservice plan create](/cli/azure/appservice/plan#az-appservice-plan-create) and [az webapp create](/cli/azure/webapp#az-webapp-create) commands: - -```azurecli -az appservice plan create --resource-group ContosoAppInsightRG --name ContosoAppService -az webapp create --resource-group ContosoAppInsightRG --name ContosoApp \ - --plan ContosoAppService --name ContosoApp8765 -``` - -Run the [az monitor app-insights component connect-webapp](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-connect-webapp) command to connect your component to the webapp: - -```azurecli -az monitor app-insights component connect-webapp --resource-group ContosoAppInsightRG \ - --app ContosoApp --web-app ContosoApp8765 --enable-debugger false --enable-profiler false -``` - -You can instead connect to an Azure function by using the [az monitor app-insights component connect-function](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-connect-function) command. - -## Link a component to storage - -You can link a component to a storage account. To create a storage account, use the [az storage account create](/cli/azure/storage/account#az-storage-account-create) command: - -```azurecli -az storage account create --resource-group ContosoAppInsightRG \ - --name contosolinkedstorage --location eastus2 --sku Standard_LRS -``` - -To link your component to the storage account, run the [az monitor app-insights component linked-storage link](/cli/azure/monitor/app-insights/component/linked-storage#az-monitor-app-insights-component-linked-storage-link) command. You can see the existing links by using the [az monitor app-insights component linked-storage show](/cli/azure/monitor/app-insights/component/linked-storage#az-monitor-app-insights-component-linked-storage-show) command: - - -```azurecli -az monitor app-insights component linked-storage link --resource-group ContosoAppInsightRG \ - --app ContosoApp --storage-account contosolinkedstorage -az monitor app-insights component linked-storage show --resource-group ContosoAppInsightRG \ - --app ContosoApp -``` - -To unlink the storage, run the [az monitor app-insights component linked-storage unlink](/cli/azure/monitor/app-insights/component/linked-storage#az-monitor-app-insights-component-linked-storage-unlink) command: - -```AzureCLI -az monitor app-insights component linked-storage unlink \ - --resource-group ContosoAppInsightRG --app ContosoApp -``` - -## Set up continuous export - -Continuous export saves events from Application Insights portal in a storage container in JSON format. - -> [!NOTE] -> Continuous export is only supported for classic Application Insights resources. [Workspace-based Application Insights resources](../app/create-workspace-resource.md) must use [diagnostic settings](../app/create-workspace-resource.md#export-telemetry). -> - -To create a storage container, run the [az storage container create](/cli/azure/storage/container#az-storage-container-create) command. - -```azurecli -az storage container create --name contosostoragecontainer --account-name contosolinkedstorage \ - --public-access blob -``` - -You need access for the container to be write only. Run the [az storage container policy create](/cli/azure/storage/container/policy#az-storage-container-policy-create) cmdlet: - -```azurecli -az storage container policy create --container-name contosostoragecontainer \ - --account-name contosolinkedstorage --name WAccessPolicy --permissions w -``` - -Create an SAS key by using the [az storage container generate-sas](/cli/azure/storage/container#az-storage-container-generate-sas) command. Be sure to use the `--output tsv` parameter value to save the key without unwanted formatting like quotation marks. For more information, see [Use Azure CLI effectively](/cli/azure/use-cli-effectively). - -```azurecli -containersas=$(az storage container generate-sas --name contosostoragecontainer \ - --account-name contosolinkedstorage --permissions w --output tsv) -``` - -To create a continuous export, run the [az monitor app-insights component continues-export create](/cli/azure/monitor/app-insights/component/continues-export#az-monitor-app-insights-component-continues-export-create) command: - -```azurecli -az monitor app-insights component continues-export create --resource-group ContosoAppInsightRG \ - --app ContosoApp --record-types Event --dest-account contosolinkedstorage \ - --dest-container contosostoragecontainer --dest-sub-id 00000000-0000-0000-0000-000000000000 \ - --dest-sas $containersas -``` - -You can delete a configured continuous export by using the [az monitor app-insights component continues-export delete](/cli/azure/monitor/app-insights/component/continues-export#az-monitor-app-insights-component-continues-export-delete) command: - -```azurecli -az monitor app-insights component continues-export list \ - --resource-group ContosoAppInsightRG --app ContosoApp -az monitor app-insights component continues-export delete \ - --resource-group ContosoAppInsightRG --app ContosoApp --id abcdefghijklmnopqrstuvwxyz= -``` - -## Clean up deployment - -If you created a resource group to test these commands, you can remove the resource group and all its contents by using the [az group delete](/cli/azure/group#az-group-delete) command: - -```azurecli -az group delete --name ContosoAppInsightRG -``` - -## Azure CLI commands used in this article - -- [az appservice plan create](/cli/azure/appservice/plan#az-appservice-plan-create) -- [az group create](/cli/azure/group#az-group-create) -- [az group delete](/cli/azure/group#az-group-delete) -- [az monitor app-insights component connect-webapp](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-connect-webapp) -- [az monitor app-insights component continues-export create](/cli/azure/monitor/app-insights/component/continues-export#az-monitor-app-insights-component-continues-export-create) -- [az monitor app-insights component continues-export delete](/cli/azure/monitor/app-insights/component/continues-export#az-monitor-app-insights-component-continues-export-delete) -- [az monitor app-insights component continues-export list](/cli/azure/monitor/app-insights/component/continues-export#az-monitor-app-insights-component-continues-export-list) -- [az monitor app-insights component create](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-create) -- [az monitor app-insights component linked-storage link](/cli/azure/monitor/app-insights/component/linked-storage#az-monitor-app-insights-component-linked-storage-link) -- [az monitor app-insights component linked-storage unlink](/cli/azure/monitor/app-insights/component/linked-storage#az-monitor-app-insights-component-linked-storage-unlink) -- [az monitor app-insights component show](/cli/azure/monitor/app-insights/component#az-monitor-app-insights-component-show) -- [az monitor log-analytics workspace create](/cli/azure/monitor/log-analytics/workspace#az-monitor-log-analytics-workspace-create) -- [az storage account create](/cli/azure/storage/account#az-storage-account-create) -- [az storage container create](/cli/azure/storage/container#az-storage-container-create) -- [az storage container generate-sas](/cli/azure/storage/container#az-storage-container-generate-sas) -- [az storage container policy create](/cli/azure/storage/container/policy#az-storage-container-policy-create) -- [az webapp create](/cli/azure/webapp#az-webapp-create) - -## Next steps - -[Azure Monitor CLI samples](../cli-samples.md) - -[Find and diagnose performance issues](../app/tutorial-performance.md) - -[Monitor and alert on application health](../app/tutorial-alert.md) diff --git a/articles/azure-monitor/insights/data-explorer.md b/articles/azure-monitor/insights/data-explorer.md deleted file mode 100644 index 63a41adbdcafb..0000000000000 --- a/articles/azure-monitor/insights/data-explorer.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Azure Data Explorer Insights| Microsoft Docs -description: This article describes how to use Azure Data Explorer Insights. -services: azure-monitor -ms.topic: conceptual -ms.date: 01/05/2021 - ---- - -# Azure Data Explorer Insights - -Azure Data Explorer Insights provides comprehensive monitoring of your clusters by delivering a unified view of your cluster performance, operations, usage, and failures. - -It offers: - -- **At-scale perspective**. A snapshot view of your clusters' primary metrics helps you track performance of queries, ingestion, and export operations. -- **Drill-down analysis**. You can drill down into a particular Azure Data Explorer cluster to perform detailed analysis. -- **Customization**. You can change which metrics you want to see, modify or set thresholds that align with your limits, and save your own custom workbooks. Charts in a workbook can be pinned to Azure dashboards. - -This article will help you understand how to onboard and use Azure Data Explorer Insights. - -## View from Azure Monitor (at-scale perspective) - -From Azure Monitor, you can view the main performance metrics for the cluster. These metrics include information about queries, ingestion, and export operations from multiple clusters in your subscription. They can help you identify performance problems. - -To view the performance of your clusters across all your subscriptions: - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Select **Monitor** from the left pane. In the **Insights Hub** section, select **Azure Data Explorer Clusters**. - -![Screenshot of selections for viewing the performance of Azure Data Explorer clusters.](./media/data-explorer/insights-hub.png) - -### Overview tab - -On the **Overview** tab for the selected subscription, the table displays interactive metrics for the Azure Data Explorer clusters grouped within the subscription. You can filter results based on the options that you select from the following dropdown lists: - -* **Subscriptions**: Only subscriptions that have Azure Data Explorer clusters are listed. - -* **Azure Data Explorer clusters**: By default, up to five clusters are pre-selected. If you select all or multiple clusters in the scope selector, up to 200 clusters will be returned. - -* **Time Range**: By default, the table displays the last 24 hours of information based on the corresponding selections made. - -The counter tile, under the dropdown list, gives the total number of Azure Data Explorer clusters in the selected subscriptions and shows how many are selected. There are conditional color codings for the columns: **Keep alive**, **CPU**, **Ingestion Utilization**, and **Cache Utilization**. Orange-coded cells have values that are not sustainable for the cluster. - -To better understand what each of the metrics represent, we recommend reading through the documentation on [Azure Data Explorer metrics](/azure/data-explorer/using-metrics#cluster-metrics). - -### Query Performance tab - -The **Query Performance** tab shows the query duration, the total number of concurrent queries, and the total number of throttled queries. - -![Screenshot of the Query Performance tab.](./media/data-explorer/query-performance.png) - -### Ingestion Performance tab - -The **Ingestion Performance** tab shows the ingestion latency, succeeded ingestion results, failed ingestion results, ingestion volume, and events processed for event hubs and IoT hubs. - -[![Screenshot of the Ingestion Performance tab.](./media/data-explorer/ingestion-performance.png)](./media/data-explorer/ingestion-performance.png#lightbox) - -### Streaming Ingest Performance tab - -The **Streaming Ingest Performance** tab provides information on the average data rate, average duration, and request rate. - -### Export Performance tab - -The **Export Performance** tab provides information on exported records, lateness, pending count, and utilization percentage for continuous export operations. - -## View from an Azure Data Explorer Cluster resource (drill-down analysis) - -To access Azure Data Explorer Insights directly from an Azure Data Explorer cluster: - -1. In the Azure portal, select **Azure Data Explorer Clusters**. - -2. From the list, choose an Azure Data Explorer cluster. In the monitoring section, select **Insights**. - -You can also access these views by selecting the resource name of an Azure Data Explorer cluster from within the Azure Monitor insights view. - -> [!NOTE] -> Azure Data Explorer Insights combines both logs and metrics to provide a global monitoring solution. The inclusion of logs-based visualizations requires users to [enable diagnostic logging of their Azure Data Explorer cluster and send them to a Log Analytics workspace](/azure/data-explorer/using-diagnostic-logs?tabs=commands-and-queries#enable-diagnostic-logs). The diagnostic logs that should be enabled are **Command**, **Query**, **SucceededIngestion**, **FailedIngestion**, **IngestionBatching**, **TableDetails**, and **TableUsageStatistics**. (Enabling **SucceededIngestion** logs might be costly. Enable them only if you need to monitor successful ingestions.) - -![Screenshot of the button for configuring logs for monitoring.](./media/data-explorer/enable-logs.png) - -### Overview tab - -The **Overview** tab shows: - -- Metrics tiles that highlight the availability and overall status of the cluster for quick health assessment. - -- A summary of active [Azure Advisor recommendations](/azure/data-explorer/azure-advisor) and [resource health](/azure/data-explorer/monitor-with-resource-health) status. - -- Charts that show the top CPU and memory consumers and the number of unique users over time. - -[![Screenshot of the view from an Azure Data Explorer cluster resource.](./media/data-explorer/overview.png)](./media/data-explorer/overview.png#lightbox) - -### Key Metrics tab - -The **Key Metrics** tab shows a unified view of some of the cluster's metrics. They're grouped into general metrics, query-related metrics, ingestion-related metrics, and streaming ingestion-related metrics. - -[![Screenshot of graphs on the Key Metrics tab.](./media/data-explorer/key-metrics.png)](./media/data-explorer/key-metrics.png#lightbox) - -### Usage tab - -The **Usage** tab allows users to deep dive into the performance of the cluster's commands and queries. On this tab, you can: - -- See which workload groups, users, and applications are sending the most queries or consuming the most CPU and memory. You can then understand which workloads are submitting the heaviest queries for the cluster to process. -- Identify top workload groups, users, and applications by failed queries. -- Identify recent changes in the number of queries, compared to the historical daily average (over the past 16 days), by workload group, user, and application. -- Identify trends and peaks in the number of queries, memory, and CPU consumption by workload group, user, application, and command type. - -The **Usage** tab includes actions that are performed directly by users. Internal cluster operations are not included in this tab. - -[![Screenshot of the operations view with donut charts related to commands and queries.](./media/data-explorer/usage.png)](./media/data-explorer/usage.png#lightbox) - -[![Screenshot of the operations view with line charts related to queries and memory.](./media/data-explorer/usage-2.png)](./media/data-explorer/usage-2.png#lightbox) - -### Tables tab - -The **Tables** tab shows the latest and historical properties of tables in the cluster. You can see which tables are consuming the most space. You can also track growth history by table size, hot data, and the number of rows over time. - -### Cache tab - -The **Cache** tab allows users to analyze their actual queries' lookback window patterns and compare them to the configured cache policy (for each table). You can identify tables used by the most queries and tables that are not queried at all, and adapt the cache policy accordingly. - -You might get cache policy recommendations on specific tables in Azure Advisor. Currently, cache recommendations are available only from the [main Azure Advisor dashboard](/azure/data-explorer/azure-advisor#use-the-azure-advisor-recommendations). They're based on actual queries' lookback window in the past 30 days and an unoptimized cache policy for at least 95 percent of the queries. - -Cache reduction recommendations in Azure Advisor are available for clusters that are "bounded by data." That means the cluster has low CPU and low ingestion utilization, but because of high data capacity, the cluster can't scale in or scale down. - -[![Screenshot of cache details.](./media/data-explorer/cache-tab.png)](./media/data-explorer/cache-tab.png#lightbox) - -### Cluster Boundaries tab - -The **Cluster Boundaries** tab displays the cluster boundaries based on your usage. On this tab, you can inspect the CPU, ingestion, and cache utilization. These metrics are scored as **Low**, **Medium**, or **High**. These metrics and scores are important when you're deciding on the optimal SKU and instance count for your cluster. They're taken into account in Azure Advisor SKU/size recommendations. - -On this tab, you can select a metric tile and deep dive to understand its trend and how its score is decided. You can also view the Azure Advisor SKU/size recommendation for your cluster. For example, in the following image, you can see that all metrics are scored as **Low**. The cluster receives a cost recommendation that will allow it to scale in/down and save cost. - -> [!div class="mx-imgBorder"] -> [![Screenshot of cluster boundaries.](./media/data-explorer/cluster-boundaries.png)](./media/data-explorer/cluster-boundaries.png#lightbox) - -## Pin to an Azure dashboard - -You can pin any one of the metric sections (of the "at-scale" perspective) to an Azure dashboard by selecting the pushpin icon at the upper right of the section. - -![Screenshot of the pin icon selected.](./media/data-explorer/pin.png) - -## Customize Azure Data Explorer Insights - -You can edit the workbook to customize it in support of your data analytics needs: -* Scope the workbook to always select a particular subscription or Azure Data Explorer clusters. -* Change metrics in the grid. -* Change thresholds or color rendering/coding. - -You can begin customizations by selecting the **Customize** button on the top toolbar. - -![Screenshot of the Customize button.](./media/data-explorer/customize.png) - -Customizations are saved to a custom workbook to prevent overwriting the default configuration in a published workbook. Workbooks are saved within a resource group, either in the **My Reports** section that's private to you or in the **Shared Reports** section that's accessible to everyone with access to the resource group. After you save the custom workbook, go to the workbook gallery to open it. - -![Screenshot of the workbook gallery.](./media/data-explorer/gallery.png) - -## Troubleshooting - -For general troubleshooting guidance, see the [Troubleshooting workbook-based insights](troubleshoot-workbooks.md) article. - -The following sections will help you diagnose and troubleshoot of some of the common problems that you might encounter when using Azure Data Explorer Insights. - -### Why don't I see all my subscriptions in the subscription picker? - -Azure Data Explorer Insights shows only subscriptions that contain Azure Data Explorer clusters chosen from the selected subscription filter. You select a subscription filter under **Directory + subscription** in the Azure portal. - -![Screenshot of selecting a subscription filter.](./media/key-vaults-insights-overview/Subscriptions.png) - -### Why don't I see any data for my Azure Data Explorer cluster under the Usage, Tables, or Cache section? - -To view your logs-based data, you need to [enable diagnostic logs](/azure/data-explorer/using-diagnostic-logs?tabs=commands-and-queries#enable-diagnostic-logs) for each Azure Data Explorer cluster that you want to monitor. You can do this under the diagnostic settings for each cluster. You'll need to send your data to a Log Analytics workspace. The diagnostic logs that should be enabled are **Command**, **Query**, **TableDetails**, and **TableUsageStatistics**. - -### I've already enabled logs for my Azure Data Explorer cluster. Why am I still unable to see my data under Commands and Queries? - -Currently, diagnostic logs don't work retroactively. The data will start appearing after actions have been taken in Azure Data Explorer. It might take some time, ranging from hours to a day, depending on how active your Azure Data Explorer cluster is. - -## Next steps - -Learn the scenarios that workbooks are designed to support, how to author new and customize existing reports, and more by reviewing [Create interactive reports with Azure Monitor workbooks](../visualize/workbooks-overview.md). diff --git a/articles/azure-monitor/insights/media/data-explorer/cache-tab.png b/articles/azure-monitor/insights/media/data-explorer/cache-tab.png deleted file mode 100644 index 7cb896d07be16..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/cache-tab.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/cluster-boundaries.png b/articles/azure-monitor/insights/media/data-explorer/cluster-boundaries.png deleted file mode 100644 index e931424c9d197..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/cluster-boundaries.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/customize.png b/articles/azure-monitor/insights/media/data-explorer/customize.png deleted file mode 100644 index c4f22350ee0c4..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/customize.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/enable-logs.png b/articles/azure-monitor/insights/media/data-explorer/enable-logs.png deleted file mode 100644 index 91963467abde0..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/enable-logs.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/gallery.png b/articles/azure-monitor/insights/media/data-explorer/gallery.png deleted file mode 100644 index 97001779ba81c..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/gallery.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/ingestion-performance.png b/articles/azure-monitor/insights/media/data-explorer/ingestion-performance.png deleted file mode 100644 index 74e21901ec752..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/ingestion-performance.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/insights-hub.png b/articles/azure-monitor/insights/media/data-explorer/insights-hub.png deleted file mode 100644 index 0168f88cf5213..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/insights-hub.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/key-metrics.png b/articles/azure-monitor/insights/media/data-explorer/key-metrics.png deleted file mode 100644 index a79966ce1fd29..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/key-metrics.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/overview.png b/articles/azure-monitor/insights/media/data-explorer/overview.png deleted file mode 100644 index 43514597b018f..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/overview.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/pin.png b/articles/azure-monitor/insights/media/data-explorer/pin.png deleted file mode 100644 index 72b4d8821b3ee..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/pin.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/query-performance.png b/articles/azure-monitor/insights/media/data-explorer/query-performance.png deleted file mode 100644 index 7b5dad7b06c14..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/query-performance.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/usage-2.png b/articles/azure-monitor/insights/media/data-explorer/usage-2.png deleted file mode 100644 index 553d209aade9c..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/usage-2.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/usage.png b/articles/azure-monitor/insights/media/data-explorer/usage.png deleted file mode 100644 index 7fcb3dc986221..0000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/usage.png and /dev/null differ diff --git a/articles/azure-monitor/insights/solution-targeting.md b/articles/azure-monitor/insights/solution-targeting.md index efae5d3f60a73..bcb57f2c26d08 100644 --- a/articles/azure-monitor/insights/solution-targeting.md +++ b/articles/azure-monitor/insights/solution-targeting.md @@ -4,7 +4,7 @@ description: Targeting monitoring solutions allows you to limit monitoring solut ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 04/27/2017 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/insights/sql-insights-overview.md b/articles/azure-monitor/insights/sql-insights-overview.md index ca75148b6398e..72be4217b74e6 100644 --- a/articles/azure-monitor/insights/sql-insights-overview.md +++ b/articles/azure-monitor/insights/sql-insights-overview.md @@ -13,7 +13,7 @@ ms.date: 04/14/2022 SQL Insights (preview) is a comprehensive solution for monitoring any product in the [Azure SQL family](/azure/azure-sql/index). SQL Insights uses [dynamic management views](/azure/azure-sql/database/monitoring-with-dmvs) to expose the data that you need to monitor health, diagnose problems, and tune performance. SQL Insights performs all monitoring remotely. Monitoring agents on dedicated virtual machines connect to your SQL resources and remotely gather data. The gathered data is stored in [Azure Monitor Logs](../logs/data-platform-logs.md) to enable easy aggregation, filtering, and trend analysis. You can view the collected data from the SQL Insights [workbook template](../visualize/workbooks-overview.md), or you can delve directly into the data by using [log queries](../logs/get-started-queries.md). -The following diagram details the steps taken by information from the database engine and Azure resource logs, and how they can be surfaced. For a more detailed diagram of Azure SQL logging, see [Monitoring and diagnostic telemetry](/azure/azure-sql/database/monitor-tune-overview.md#monitoring-and-diagnostic-telemetry). +The following diagram details the steps taken by information from the database engine and Azure resource logs, and how they can be surfaced. For a more detailed diagram of Azure SQL logging, see [Monitoring and diagnostic telemetry](/azure/azure-sql/database/monitor-tune-overview#monitoring-and-diagnostic-telemetry). :::image type="content" source="media/sql-insights/azure-sql-insights-horizontal-analytics.svg" alt-text="Diagram showing how database engine information and resource logs are surfaced through AzureDiagnostics and Log Analytics."::: @@ -133,4 +133,4 @@ The tables have the following columns: ## Next steps - For frequently asked questions about SQL Insights (preview), see [Frequently asked questions](../faq.yml). -- [Monitoring and performance tuning in Azure SQL Database and Azure SQL Managed Instance](/azure/azure-sql/database/monitor-tune-overview) \ No newline at end of file +- [Monitoring and performance tuning in Azure SQL Database and Azure SQL Managed Instance](/azure/azure-sql/database/monitor-tune-overview) diff --git a/articles/azure-monitor/logs/basic-logs-configure.md b/articles/azure-monitor/logs/basic-logs-configure.md index 65132e842ba40..07f2c15680eaa 100644 --- a/articles/azure-monitor/logs/basic-logs-configure.md +++ b/articles/azure-monitor/logs/basic-logs-configure.md @@ -27,6 +27,27 @@ You can currently configure the following tables for Basic Logs: ## Set table configuration + +# [Portal](#tab/portal-1) + +To configure a table for Basic Logs or Analytics Logs in the Azure portal: + +1. From the **Log Analytics workspaces** menu, select **Tables (preview)**. + + The **Tables (preview)** screen lists all of the tables in the workspace. + +1. Select the context menu for the table you want to configure and select **Manage table**. + + :::image type="content" source="media/basic-logs-configure/log-analytics-table-configuration.png" lightbox="media/basic-logs-configure/log-analytics-table-configuration.png" alt-text="Screenshot showing the Manage table button for one of the tables in a workspace."::: + +1. From the **Table plan** dropdown on the table configuration screen, select **Basic** or **Analytics**. + + The **Table plan** dropdown is enabled only for [tables that support Basic Logs](#which-tables-support-basic-logs). + + :::image type="content" source="media/basic-logs-configure/log-analytics-configure-table-plan.png" lightbox="media/basic-logs-configure/log-analytics-configure-table-plan.png" alt-text="Screenshot showing the Table plan dropdown on the table configuration screen."::: + +1. Select **Save**. + # [API](#tab/api-1) To configure a table for Basic Logs or Analytics Logs, call the **Tables - Update** API: @@ -45,12 +66,14 @@ PATCH https://management.azure.com/subscriptions//resourcegroups **Example** -This example configures the `ContainerLog` table for Basic Logs. +This example configures the `ContainerLogV2` table for Basic Logs. + +Container Insights uses ContainerLog by default, to switch to using ContainerLogV2, please follow these [instructions](../containers/container-insights-logging-v2.md) before attempting to convert the table to Basic Logs. **Sample request** ```http -PATCH https://management.azure.com/subscriptions/ContosoSID/resourcegroups/ContosoRG/providers/Microsoft.OperationalInsights/workspaces/ContosoWorkspace/tables/ContainerLog?api-version=2021-12-01-preview +PATCH https://management.azure.com/subscriptions/ContosoSID/resourcegroups/ContosoRG/providers/Microsoft.OperationalInsights/workspaces/ContosoWorkspace/tables/ContainerLogV2?api-version=2021-12-01-preview ``` Use this request body to change to Basic Logs: @@ -90,7 +113,7 @@ Status code: 200 "schema": {...} }, "id": "subscriptions/ContosoSID/resourcegroups/ContosoRG/providers/Microsoft.OperationalInsights/workspaces/ContosoWorkspace", - "name": "ContainerLog" + "name": "ContainerLogV2" } ``` @@ -103,21 +126,23 @@ For example: - To set Basic Logs: ```azurecli - az monitor log-analytics workspace table update --subscription ContosoSID --resource-group ContosoRG --workspace-name ContosoWorkspace --name ContainerLog --plan Basic + az monitor log-analytics workspace table update --subscription ContosoSID --resource-group ContosoRG --workspace-name ContosoWorkspace --name ContainerLogV2 --plan Basic ``` - To set Analytics Logs: ```azurecli - az monitor log-analytics workspace table update --subscription ContosoSID --resource-group ContosoRG --workspace-name ContosoWorkspace --name ContainerLog --plan Analytics + az monitor log-analytics workspace table update --subscription ContosoSID --resource-group ContosoRG --workspace-name ContosoWorkspace --name ContainerLogV2 --plan Analytics ``` --- ## Check table configuration -# [Portal](#tab/portal-1) +# [Portal](#tab/portal-2) + +To check table configuration in the Azure portal, you can open the table configuration screen, as described in [Set table configuration](#set-table-configuration). -To check the configuration of a table in the Azure portal: +Alternatively: 1. From the **Azure Monitor** menu, select **Logs** and select your workspace for the [scope](scope.md). See [Log Analytics tutorial](log-analytics-tutorial.md#view-table-information) for a walkthrough. 1. Open the **Tables** tab, which lists all tables in the workspace. @@ -126,7 +151,7 @@ To check the configuration of a table in the Azure portal: ![Screenshot of the Basic Logs table icon in the table list.](./media/basic-logs-configure/table-icon.png#lightbox) - You can also hover over a table name for the table information view. This will specify that the table is configured as Basic Logs: + You can also hover over a table name for the table information view, which indicates whether the table is configured as Basic Logs: ![Screenshot of the Basic Logs table indicator in the table details.](./media/basic-logs-configure/table-info.png#lightbox) @@ -151,7 +176,7 @@ GET https://management.azure.com/subscriptions/{subscriptionId}/resourcegroups/{ **Sample Request** ```http -GET https://management.azure.com/subscriptions/ContosoSID/resourcegroups/ContosoRG/providers/Microsoft.OperationalInsights/workspaces/ContosoWorkspace/tables/ContainerLog?api-version=2021-12-01-preview +GET https://management.azure.com/subscriptions/ContosoSID/resourcegroups/ContosoRG/providers/Microsoft.OperationalInsights/workspaces/ContosoWorkspace/tables/ContainerLogV2?api-version=2021-12-01-preview ``` @@ -170,7 +195,7 @@ Status code: 200 "provisioningState": "Succeeded" }, "id": "subscriptions/ContosoSID/resourcegroups/ContosoRG/providers/Microsoft.OperationalInsights/workspaces/ContosoWorkspace", - "name": "ContainerLog" + "name": "ContainerLogV2" } ``` diff --git a/articles/azure-monitor/logs/cost-logs.md b/articles/azure-monitor/logs/cost-logs.md index 50cdfea638182..d0ecb8b45be72 100644 --- a/articles/azure-monitor/logs/cost-logs.md +++ b/articles/azure-monitor/logs/cost-logs.md @@ -4,23 +4,24 @@ description: Cost details for data stored in a Log Analytics workspace in Azure ms.topic: conceptual ms.reviewer: Dale.Koetke ms.date: 03/24/2022 +ms.reviwer: dalek git --- # Azure Monitor Logs pricing details The most significant charges for most Azure Monitor implementations will typically be ingestion and retention of data in your Log Analytics workspaces. Several features in Azure Monitor do not have a direct cost but add to the workspace data that's collected. This article describes how data charges are calculated for your Log Analytics workspaces and Application Insights resources and the different configuration options that affect your costs. ## Pricing model -The default pricing for Log Analytics is a Pay-As-You-Go model that's based on ingested data volume and data retention. Each Log Analytics workspace is charged as a separate service and contributes to the bill for your Azure subscription. The amount of data ingestion can be considerable, depending on the following factors: +The default pricing for Log Analytics is a Pay-As-You-Go model that's based on ingested data volume and data retention. Each Log Analytics workspace is charged as a separate service and contributes to the bill for your Azure subscription. [Pricing for Log Analytics](https://azure.microsoft.com/pricing/details/monitor/) is set regionally. The amount of data ingestion can be considerable, depending on the following factors: - The set of management solutions enabled and their configuration - The number and type of monitored resources -- Type of data collected from each monitored resource +- The types of data collected from each monitored resource ## Data size calculation Data volume is measured as the size of the data that will be stored in GB (10^9 bytes). The data size of a single record is calculated from a string representation of the columns that are stored in the Log Analytics workspace for that record, regardless of whether the data is sent from an agent or added during the ingestion process. This includes any custom columns added by the [custom logs API](custom-logs-overview.md), [ingestion-time transformations](ingestion-time-transformations.md), or [custom fields](custom-fields.md) that are added as data is collected and then stored in the workspace. >[!NOTE] ->The billable data volume calculation is substantially smaller than the size of the entire incoming JSON-packaged event, often less than 50%. It is essential to understand this calculation of billed data size when estimating costs and comparing to other pricing models. +>The billable data volume calculation is substantially smaller than the size of the entire incoming JSON-packaged event, often less than 50% for small events. It is essential to understand this calculation of billed data size when estimating costs and comparing to other pricing models. ### Excluded columns The following [standard columns](log-standard-columns.md) that are common to all tables, are excluded in the calculation of the record size. All other columns stored in Log Analytics are included in the calculation of the record size. @@ -115,7 +116,6 @@ When Microsoft Sentinel is enabled in a Log Analytics workspace, all data collec - [SecurityDetection](/azure/azure-monitor/reference/tables/securitydetection) - [SecurityEvent](/azure/azure-monitor/reference/tables/securityevent) - [WindowsFirewall](/azure/azure-monitor/reference/tables/windowsfirewall) -- [MaliciousIPCommunication](/azure/azure-monitor/reference/tables/maliciousipcommunication) - [LinuxAuditLog](/azure/azure-monitor/reference/tables/linuxauditlog) - [SysmonEvent](/azure/azure-monitor/reference/tables/sysmonevent) - [ProtectionStatus](/azure/azure-monitor/reference/tables/protectionstatus) diff --git a/articles/azure-monitor/logs/cross-workspace-query.md b/articles/azure-monitor/logs/cross-workspace-query.md index 233ec68a22333..2e81b2bbc834f 100644 --- a/articles/azure-monitor/logs/cross-workspace-query.md +++ b/articles/azure-monitor/logs/cross-workspace-query.md @@ -17,7 +17,7 @@ If you manage subscriptions in other Azure Active Directory (Azure AD) tenants t There are two methods to query data that is stored in multiple workspace and apps: 1. Explicitly by specifying the workspace and app details. This technique is detailed in this article. -2. Implicitly using [resource-context queries](./design-logs-deployment.md#access-mode). When you query in the context of a specific resource, resource group or a subscription, the relevant data will be fetched from all workspaces that contains data for these resources. Application Insights data that is stored in apps, will not be fetched. +2. Implicitly using [resource-context queries](manage-access.md#access-mode). When you query in the context of a specific resource, resource group or a subscription, the relevant data will be fetched from all workspaces that contains data for these resources. Application Insights data that is stored in apps, will not be fetched. > [!IMPORTANT] > If you are using a [workspace-based Application Insights resource](../app/create-workspace-resource.md), telemetry is stored in a Log Analytics workspace with all other log data. Use the workspace() expression to write a query that includes applications in multiple workspaces. For multiple applications in the same workspace, you don't need a cross workspace query. diff --git a/articles/azure-monitor/logs/customer-managed-keys.md b/articles/azure-monitor/logs/customer-managed-keys.md index e7ee415e5bbc8..73d9ba0ba9334 100644 --- a/articles/azure-monitor/logs/customer-managed-keys.md +++ b/articles/azure-monitor/logs/customer-managed-keys.md @@ -254,10 +254,9 @@ When link your own storage (BYOS) to workspace, the service stores *saved-search * You need to have "write" permissions on your workspace and Storage Account. * Make sure to create your Storage Account in the same region as your Log Analytics workspace is located. * The *saves searches* in storage is considered as service artifacts and their format may change. -* Existing *saves searches* are removed from your workspace. Copy and any *saves searches* that you need before the configuration. You can view your *saved-searches* using [PowerShell](/powershell/module/az.operationalinsights/get-azoperationalinsightssavedsearch). -* Query history isn't supported and you won't be able to see queries that you ran. +* Existing *saves searches* are removed from your workspace. Copy any *saves searches* that you need before this configuration. You can view your *saved-searches* using [PowerShell](/powershell/module/az.operationalinsights/get-azoperationalinsightssavedsearch). +* Query 'history' and 'pin to dashboard' aren't supported when linking Storage Account for queries. * You can link a single Storage Account to a workspace, which can be used for both *saved-searches* and *log alerts* queries. -* Pin to dashboard isn't supported. * Fired log alerts will not contains search results or alert query. You can use [alert dimensions](../alerts/alerts-unified-log.md#split-by-alert-dimensions) to get context in the fired alerts. **Configure BYOS for saved-searches queries** @@ -380,21 +379,15 @@ Customer-Managed key is provided on dedicated cluster and these operations are r ## Limitations and constraints -- The max number of cluster per region and subscription is two. +- A maximum of five active clusters can be created in each region and subscription. -- The maximum number of workspaces that can be linked to a cluster is 1000. +- A maximum number of seven reserved clusters (active or recently deleted) can exist in each region and subscription. -- You can link a workspace to your cluster and then unlink it. The number of workspace link operations on particular workspace is limited to two in a period of 30 days. +- A maximum of 1,000 Log Analytics workspaces can be linked to a cluster. -- Customer-managed key encryption applies to newly ingested data after the configuration time. Data that was ingested prior to the configuration, remains encrypted with Microsoft key. You can query data ingested before and after the Customer-managed key configuration seamlessly. - -- The Azure Key Vault must be configured as recoverable. These properties aren't enabled by default and should be configured using CLI or PowerShell:
                  - - [Soft Delete](../../key-vault/general/soft-delete-overview.md). - - [Purge protection](../../key-vault/general/soft-delete-overview.md#purge-protection) should be turned on to guard against force deletion of the secret, vault even after soft delete. - -- Cluster move to another resource group or subscription isn't supported currently. +- A maximum of two workspace link operations on particular workspace is allowed in 30 day period. -- Your Azure Key Vault, cluster and workspaces must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. +- Moving a cluster to another resource group or subscription isn't currently supported. - Cluster update should not include both identity and key identifier details in the same operation. In case you need to update both, the update should be in two consecutive operations. @@ -404,9 +397,19 @@ Customer-Managed key is provided on dedicated cluster and these operations are r - If you create a cluster and get an error—"region-name doesn’t support Double Encryption for clusters", you can still create the cluster without Double encryption, by adding `"properties": {"isDoubleEncryptionEnabled": false}` in the REST request body. - Double encryption setting can not be changed after the cluster has been created. - - Setting the cluster's `identity` `type` to `None` also revokes access to your data, but this approach isn't recommended since you can't revert it without contacting support. The recommended way to revoke access to your data is [key revocation](#key-revocation). +Deleting a linked workspace is permitted while linked to cluster. If you decide to [recover](./delete-workspace.md#recover-workspace) the workspace during the [soft-delete](./delete-workspace.md#soft-delete-behavior) period, it returns to previous state and remains linked to cluster. + +- Customer-managed key encryption applies to newly ingested data after the configuration time. Data that was ingested prior to the configuration, remains encrypted with Microsoft key. You can query data ingested before and after the Customer-managed key configuration seamlessly. + +- The Azure Key Vault must be configured as recoverable. These properties aren't enabled by default and should be configured using CLI or PowerShell:
                  + - [Soft Delete](../../key-vault/general/soft-delete-overview.md). + - [Purge protection](../../key-vault/general/soft-delete-overview.md#purge-protection) should be turned on to guard against force deletion of the secret, vault even after soft delete. + +- Your Azure Key Vault, cluster and workspaces must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. + +- Setting the cluster's `identity` `type` to `None` also revokes access to your data, but this approach isn't recommended since you can't revert it without contacting support. The recommended way to revoke access to your data is [key revocation](#key-revocation). - - You can't use Customer-managed key with User-assigned managed identity if your Key Vault is in Private-Link (vNet). You can use System-assigned managed identity in this scenario. +- You can't use Customer-managed key with User-assigned managed identity if your Key Vault is in Private-Link (vNet). You can use System-assigned managed identity in this scenario. ## Troubleshooting @@ -472,4 +475,4 @@ Customer-Managed key is provided on dedicated cluster and these operations are r ## Next steps - Learn about [Log Analytics dedicated cluster billing](cost-logs.md#dedicated-clusters) -- Learn about [proper design of Log Analytics workspaces](./design-logs-deployment.md) +- Learn about [proper design of Log Analytics workspaces](./workspace-design.md) diff --git a/articles/azure-monitor/logs/data-collector-api.md b/articles/azure-monitor/logs/data-collector-api.md index f53c6f703fc97..85fd99c4fbaf9 100644 --- a/articles/azure-monitor/logs/data-collector-api.md +++ b/articles/azure-monitor/logs/data-collector-api.md @@ -47,7 +47,7 @@ To use the HTTP Data Collector API, you create a POST request that includes the | Authorization |The authorization signature. Later in the article, you can read about how to create an HMAC-SHA256 header. | | Log-Type |Specify the record type of the data that's being submitted. It can contain only letters, numbers, and the underscore (_) character, and it can't exceed 100 characters. | | x-ms-date |The date that the request was processed, in RFC 7234 format. | -| x-ms-AzureResourceId | The resource ID of the Azure resource that the data should be associated with. It populates the [_ResourceId](./log-standard-columns.md#_resourceid) property and allows the data to be included in [resource-context](./design-logs-deployment.md#access-mode) queries. If this field isn't specified, the data won't be included in resource-context queries. | +| x-ms-AzureResourceId | The resource ID of the Azure resource that the data should be associated with. It populates the [_ResourceId](./log-standard-columns.md#_resourceid) property and allows the data to be included in [resource-context](manage-access.md#access-mode) queries. If this field isn't specified, the data won't be included in resource-context queries. | | time-generated-field | The name of a field in the data that contains the timestamp of the data item. If you specify a field, its contents are used for **TimeGenerated**. If you don't specify this field, the default for **TimeGenerated** is the time that the message is ingested. The contents of the message field should follow the ISO 8601 format YYYY-MM-DDThh:mm:ssZ. Note: the Time Generated value cannot be older than 3 days before received time or the row will be dropped.| | | | diff --git a/articles/azure-monitor/logs/data-platform-logs.md b/articles/azure-monitor/logs/data-platform-logs.md index eee5866f85afd..fcb2d3a502573 100644 --- a/articles/azure-monitor/logs/data-platform-logs.md +++ b/articles/azure-monitor/logs/data-platform-logs.md @@ -48,7 +48,7 @@ This configuration will be different depending on the data source. For example: For a complete list of data sources that you can configure to send data to Azure Monitor Logs, see [What is monitored by Azure Monitor?](../monitor-reference.md). ## Log Analytics workspaces -Azure Monitor Logs stores the data that it collects in one or more [Log Analytics workspaces](./design-logs-deployment.md). You must create at least one workspace to use Azure Monitor Logs. See [Log Analytics workspace overview](log-analytics-workspace-overview.md) For a description of Log Analytics workspaces. +Azure Monitor Logs stores the data that it collects in one or more [Log Analytics workspaces](./workspace-design.md). You must create at least one workspace to use Azure Monitor Logs. See [Log Analytics workspace overview](log-analytics-workspace-overview.md) For a description of Log Analytics workspaces. ## Log Analytics Log Analytics is a tool in the Azure portal. Use it to edit and run log queries and interactively analyze their results. You can then use those queries to support other features in Azure Monitor, such as log query alerts and workbooks. Access Log Analytics from the **Logs** option on the Azure Monitor menu or from most other services in the Azure portal. diff --git a/articles/azure-monitor/logs/data-retention-archive.md b/articles/azure-monitor/logs/data-retention-archive.md index 0a4774cc776a6..b16534a341812 100644 --- a/articles/azure-monitor/logs/data-retention-archive.md +++ b/articles/azure-monitor/logs/data-retention-archive.md @@ -23,7 +23,7 @@ During the interactive retention period, data is available for monitoring, troub > The archive feature is currently in public preview and can only be set at the table level, not at the workspace level. ## Configure the default workspace retention policy -You can set the workspace default retention policy in the Azure portal to 30, 31, 60, 90, 120, 180, 270, 365, 550, and 730 days. To set a different policy, use the Resource Manager configuration method described below. If you're on the *free* tier, you need to upgrade to the paid tier to change the data retention period. +You can set the workspace default retention policy in the Azure portal to 30, 31, 60, 90, 120, 180, 270, 365, 550, and 730 days. You can set a different policy for specific tables by [configuring retention and archive policy at the table level](#set-retention-and-archive-policy-by-table). If you're on the *free* tier, you'll need to upgrade to the paid tier to change the data retention period. To set the default workspace retention policy: @@ -37,17 +37,25 @@ To set the default workspace retention policy: ## Set retention and archive policy by table -You can set retention policies for individual tables, except for workspaces in the legacy Free Trial pricing tier, using Azure Resource Manager APIs. You can’t currently configure data retention for individual tables in the Azure portal. +By default, all tables in your workspace inherit the workspace's interactive retention setting and have no archive policy. You can modify the retention and archive policies of individual tables, except for workspaces in the legacy Free Trial pricing tier. You can keep data in interactive retention between 4 and 730 days. You can set the archive period for a total retention time of up to 2,555 days (seven years). -Each table is a subresource of the workspace it's in. For example, you can address the `SecurityEvent` table in [Azure Resource Manager](../../azure-resource-manager/management/overview.md) as: +# [Portal](#tab/portal-1) -``` -/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/MyResourceGroupName/providers/Microsoft.OperationalInsights/workspaces/MyWorkspaceName/Tables/SecurityEvent -``` +To set the retention and archive duration for a table in the Azure portal: + +1. From the **Log Analytics workspaces** menu, select **Tables (preview)**. + + The **Tables (preview)** screen lists all of the tables in the workspace. + +1. Select the context menu for the table you want to configure and select **Manage table**. + + :::image type="content" source="media/basic-logs-configure/log-analytics-table-configuration.png" lightbox="media/basic-logs-configure/log-analytics-table-configuration.png" alt-text="Screenshot showing the Manage table button for one of the tables in a workspace."::: -The table name is case-sensitive. +1. Configure the retention and archive duration in **Data retention settings** section of the table configuration screen. + + :::image type="content" source="media/data-retention-configure/log-analytics-configure-table-retention-archive.png" lightbox="media/data-retention-configure/log-analytics-configure-table-retention-archive.png" alt-text="Screenshot showing the data retention settings on the table configuration screen."::: # [API](#tab/api-1) @@ -58,7 +66,7 @@ PATCH https://management.azure.com/subscriptions/{subscriptionId}/resourcegroups ``` > [!NOTE] -> You don't explicitly specify the archive duration in the API call. Instead, you set the total retention, which specifies the retention plus the archive duration. +> You don't explicitly specify the archive duration in the API call. Instead, you set the total retention, which is the sum of the interactive retention plus the archive duration. You can use either PUT or PATCH, with the following difference: @@ -133,6 +141,15 @@ az monitor log-analytics workspace table update --subscription ContosoSID --reso ## Get retention and archive policy by table +# [Portal](#tab/portal-2) + +To view the retention and archive duration for a table in the Azure portal, from the **Log Analytics workspaces** menu, select **Tables (preview)**. + +The **Tables (preview)** screen shows the interactive retention and archive period for all of the tables in the workspace. + +:::image type="content" source="media/data-retention-configure/log-analytics-view-table-retention-archive.png" lightbox="media/data-retention-configure/log-analytics-view-table-retention-archive.png" alt-text="Screenshot showing the Manage table button for one of the tables in a workspace."::: + + # [API](#tab/api-2) To get the retention policy of a particular table (in this example, `SecurityEvent`), call the **Tables - Get** API: diff --git a/articles/azure-monitor/logs/design-logs-deployment.md b/articles/azure-monitor/logs/design-logs-deployment.md deleted file mode 100644 index fdbbedcc6d933..0000000000000 --- a/articles/azure-monitor/logs/design-logs-deployment.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Designing your Azure Monitor Logs deployment | Microsoft Docs -description: This article describes the considerations and recommendations for customers preparing to deploy a workspace in Azure Monitor. -ms.topic: conceptual -author: guywi-ms -ms.author: guywild -ms.reviewer: meirm -ms.date: 05/04/2022 - ---- - -# Designing your Azure Monitor Logs deployment - -Azure Monitor stores [log](data-platform-logs.md) data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. While you can deploy one or more workspaces in your Azure subscription, there are several considerations you should understand in order to ensure your initial deployment is following our guidelines to provide you with a cost effective, manageable, and scalable deployment meeting your organization's needs. - -Data in a workspace is organized into tables, each of which stores different kinds of data and has its own unique set of properties based on the resource generating the data. Most data sources will write to their own tables in a Log Analytics workspace. - -![Example workspace data model](./media/design-logs-deployment/logs-data-model-01.png) - -A Log Analytics workspace provides: - -* A geographic location for data storage. -* Data isolation by granting different users access rights following one of our recommended design strategies. -* Scope for configuration of settings like [pricing tier](cost-logs.md#commitment-tiers), [retention](data-retention-archive.md), and [data capping](daily-cap.md). - -Workspaces are hosted on physical clusters. By default, the system is creating and managing these clusters. Customers that ingest more than 4TB/day are expected to create their own dedicated clusters for their workspaces - it enables them better control and higher ingestion rate. - -This article provides a detailed overview of the design and migration considerations, access control overview, and an understanding of the design implementations we recommend for your IT organization. - - - -## Important considerations for an access control strategy - -Identifying the number of workspaces you need is influenced by one or more of the following requirements: - -* You are a global company and you need log data stored in specific regions for data sovereignty or compliance reasons. -* You are using Azure and you want to avoid outbound data transfer charges by having a workspace in the same region as the Azure resources it manages. -* You manage multiple departments or business groups, and you want each to see their own data, but not data from others. Also, there is no business requirement for a consolidated cross department or business group view. - -IT organizations today are modeled following either a centralized, decentralized, or an in-between hybrid of both structures. As a result, the following workspace deployment models have been commonly used to map to one of these organizational structures: - -* **Centralized**: All logs are stored in a central workspace and administered by a single team, with Azure Monitor providing differentiated access per-team. In this scenario, it is easy to manage, search across resources, and cross-correlate logs. The workspace can grow significantly depending on the amount of data collected from multiple resources in your subscription, with additional administrative overhead to maintain access control to different users. This model is known as "hub and spoke". -* **Decentralized**: Each team has their own workspace created in a resource group they own and manage, and log data is segregated per resource. In this scenario, the workspace can be kept secure and access control is consistent with resource access, but it's difficult to cross-correlate logs. Users who need a broad view of many resources cannot analyze the data in a meaningful way. -* **Hybrid**: Security audit compliance requirements further complicate this scenario because many organizations implement both deployment models in parallel. This commonly results in a complex, expensive, and hard-to-maintain configuration with gaps in logs coverage. - -When using the Log Analytics agents to collect data, you need to understand the following in order to plan your agent deployment: - -* To collect data from Windows agents, you can [configure each agent to report to one or more workspaces](./../agents/agent-windows.md), even while it is reporting to a System Center Operations Manager management group. The Windows agent can report up to four workspaces. -* The Linux agent does not support multi-homing and can only report to a single workspace. - -If you are using System Center Operations Manager 2012 R2 or later: - -* Each Operations Manager management group can be [connected to only one workspace](../agents/om-agents.md). -* Linux computers reporting to a management group must be configured to report directly to a Log Analytics workspace. If your Linux computers are already reporting directly to a workspace and you want to monitor them with Operations Manager, follow these steps to [report to an Operations Manager management group](../agents/agent-manage.md#configure-agent-to-report-to-an-operations-manager-management-group). -* You can install the Log Analytics Windows agent on the Windows computer and have it report to both Operations Manager integrated with a workspace, and a different workspace. - -## Access control overview - -With Azure role-based access control (Azure RBAC), you can grant users and groups only the amount of access they need to work with monitoring data in a workspace. This allows you to align with your IT organization operating model using a single workspace to store collected data enabled on all your resources. For example, you grant access to your team responsible for infrastructure services hosted on Azure virtual machines (VMs), and as a result they'll have access to only the logs generated by the VMs. This is following our new resource-context log model. The basis for this model is for every log record emitted by an Azure resource, it is automatically associated with this resource. Logs are forwarded to a central workspace that respects scoping and Azure RBAC based on the resources. - -The data a user has access to is determined by a combination of factors that are listed in the following table. Each is described in the sections below. - -| Factor | Description | -|:---|:---| -| [Access mode](#access-mode) | Method the user uses to access the workspace. Defines the scope of the data available and the access control mode that's applied. | -| [Access control mode](#access-control-mode) | Setting on the workspace that defines whether permissions are applied at the workspace or resource level. | -| [Permissions](./manage-access.md) | Permissions applied to individual or groups of users for the workspace or resource. Defines what data the user will have access to. | -| [Table level Azure RBAC](./manage-access.md#table-level-azure-rbac) | Optional granular permissions that apply to all users regardless of their access mode or access control mode. Defines which data types a user can access. | - -## Access mode - -The *access mode* refers to how a user accesses a Log Analytics workspace and defines the scope of data they can access. - -Users have two options for accessing the data: - -* **Workspace-context**: You can view all logs in the workspace you have permission to. Queries in this mode are scoped to all data in all tables in the workspace. This is the access mode used when logs are accessed with the workspace as the scope, such as when you select **Logs** from the **Azure Monitor** menu in the Azure portal. - - ![Log Analytics context from workspace](./media/design-logs-deployment/query-from-workspace.png) - -* **Resource-context**: When you access the workspace for a particular resource, resource group, or subscription, such as when you select **Logs** from a resource menu in the Azure portal, you can view logs for only resources in all tables that you have access to. Queries in this mode are scoped to only data associated with that resource. This mode also enables granular Azure RBAC. - - ![Log Analytics context from resource](./media/design-logs-deployment/query-from-resource.png) - - > [!NOTE] - > Logs are available for resource-context queries only if they were properly associated with the relevant resource. Currently, the following resources have limitations: - > - Computers outside of Azure - Supported for resource-context only via [Azure Arc for Servers](../../azure-arc/servers/index.yml) - > - Service Fabric - > - Application Insights - Supported for resource-context only when using [Workspace-based Application Insights resource](../app/create-workspace-resource.md) - > - > You can test if logs are properly associated with their resource by running a query and inspecting the records you're interested in. If the correct resource ID is in the [_ResourceId](./log-standard-columns.md#_resourceid) property, then data is available to resource-centric queries. - -Azure Monitor automatically determines the right mode depending on the context you perform the log search from. The scope is always presented in the top-left section of Log Analytics. - -### Comparing access modes - -The following table summarizes the access modes: - -| Issue | Workspace-context | Resource-context | -|:---|:---|:---| -| Who is each model intended for? | Central administration. Administrators who need to configure data collection and users who need access to a wide variety of resources. Also currently required for users who need to access logs for resources outside of Azure. | Application teams. Administrators of Azure resources being monitored. | -| What does a user require to view logs? | Permissions to the workspace. See **Workspace permissions** in [Manage access using workspace permissions](./manage-access.md#manage-access-using-workspace-permissions). | Read access to the resource. See **Resource permissions** in [Manage access using Azure permissions](./manage-access.md#manage-access-using-azure-permissions). Permissions can be inherited (such as from the containing resource group) or directly assigned to the resource. Permission to the logs for the resource will be automatically assigned. | -| What is the scope of permissions? | Workspace. Users with access to the workspace can query all logs in the workspace from tables that they have permissions to. See [Table access control](./manage-access.md#table-level-azure-rbac) | Azure resource. User can query logs for specific resources, resource groups, or subscription they have access to from any workspace but can't query logs for other resources. | -| How can user access logs? |
                  • Start **Logs** from **Azure Monitor** menu.
                  • Start **Logs** from **Log Analytics workspaces**.
                  • From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks).
                  |
                  • Start **Logs** from the menu for the Azure resource
                  • Start **Logs** from **Azure Monitor** menu.
                  • Start **Logs** from **Log Analytics workspaces**.
                  • From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks).
                  | - -## Access control mode - -The *Access control mode* is a setting on each workspace that defines how permissions are determined for the workspace. - -* **Require workspace permissions**: This control mode does not allow granular Azure RBAC. For a user to access the workspace, they must be granted permissions to the workspace or to specific tables. - - If a user accesses the workspace following the workspace-context mode, they have access to all data in any table they've been granted access to. If a user accesses the workspace following the resource-context mode, they have access to only data for that resource in any table they've been granted access to. - - This is the default setting for all workspaces created before March 2019. - -* **Use resource or workspace permissions**: This control mode allows granular Azure RBAC. Users can be granted access to only data associated with resources they can view by assigning Azure `read` permission. - - When a user accesses the workspace in workspace-context mode, workspace permissions apply. When a user accesses the workspace in resource-context mode, only resource permissions are verified, and workspace permissions are ignored. Enable Azure RBAC for a user by removing them from workspace permissions and allowing their resource permissions to be recognized. - - This is the default setting for all workspaces created after March 2019. - - > [!NOTE] - > If a user has only resource permissions to the workspace, they are only able to access the workspace using resource-context mode assuming the workspace access mode is set to **Use resource or workspace permissions**. - -To learn how to change the access control mode in the portal, with PowerShell, or using a Resource Manager template, see [Configure access control mode](./manage-access.md#configure-access-control-mode). - -## Scale and ingestion volume rate limit - -Azure Monitor is a high scale data service that serves thousands of customers sending petabytes of data each month at a growing pace. Workspaces are not limited in their storage space and can grow to petabytes of data. There is no need to split workspaces due to scale. - -To protect and isolate Azure Monitor customers and its backend infrastructure, there is a default ingestion rate limit that is designed to protect from spikes and floods situations. The rate limit default is about **6 GB/minute** and is designed to enable normal ingestion. For more details on ingestion volume limit measurement, see [Azure Monitor service limits](../service-limits.md#data-ingestion-volume-rate). - -Customers that ingest less than 4TB/day will usually not meet these limits. Customers that ingest higher volumes or that have spikes as part of their normal operations shall consider moving to [dedicated clusters](./logs-dedicated-clusters.md) where the ingestion rate limit could be raised. - -When the ingestion rate limit is activated or get to 80% of the threshold, an event is added to the *Operation* table in your workspace. It is recommended to monitor it and create an alert. See more details in [data ingestion volume rate](../service-limits.md#data-ingestion-volume-rate). - - -## Recommendations - -![Resource-context design example](./media/design-logs-deployment/workspace-design-resource-context-01.png) - -This scenario covers a single workspace design in your IT organization's subscription that is not constrained by data sovereignty or regulatory compliance, or needs to map to the regions your resources are deployed within. It allows your organization's security and IT admin teams the ability to leverage the improved integration with Azure access management and more secure access control. - -All resources, monitoring solutions, and Insights such as Application Insights and VM insights, supporting infrastructure and applications maintained by the different teams are configured to forward their collected log data to the IT organization's centralized shared workspace. Users on each team are granted access to logs for resources they have been given access to. - -Once you have deployed your workspace architecture, you can enforce this on Azure resources with [Azure Policy](../../governance/policy/overview.md). It provides a way to define policies and ensure compliance with your Azure resources so they send all their resource logs to a particular workspace. For example, with Azure virtual machines or virtual machine scale sets, you can use existing policies that evaluate workspace compliance and report results, or customize to remediate if non-compliant. - -## Workspace consolidation migration strategy - -For customers who have already deployed multiple workspaces and are interested in consolidating to the resource-context access model, we recommend you take an incremental approach to migrate to the recommended access model, and you don't attempt to achieve this quickly or aggressively. Following a phased approach to plan, migrate, validate, and retire following a reasonable timeline will help avoid any unplanned incidents or unexpected impact to your cloud operations. If you do not have a data retention policy for compliance or business reasons, you need to assess the appropriate length of time to retain data in the workspace you are migrating from during the process. While you are reconfiguring resources to report to the shared workspace, you can still analyze the data in the original workspace as necessary. Once the migration is complete, if you're governed to retain data in the original workspace before the end of the retention period, don't delete it. - -While planning your migration to this model, consider the following: - -* Understand what industry regulations and internal policies regarding data retention you must comply with. -* Make sure that your application teams can work within the existing resource-context functionality. -* Identify the access granted to resources for your application teams and test in a development environment before implementing in production. -* Configure the workspace to enable **Use resource or workspace permissions**. -* Remove application teams permission to read and query the workspace. -* Enable and configure any monitoring solutions, Insights such as Container insights and/or Azure Monitor for VMs, your Automation account(s), and management solutions such as Update Management, Start/Stop VMs, etc., that were deployed in the original workspace. - -## Next steps - -To implement the security permissions and controls recommended in this guide, review [manage access to logs](./manage-access.md). diff --git a/articles/azure-monitor/logs/log-analytics-workspace-overview.md b/articles/azure-monitor/logs/log-analytics-workspace-overview.md index 8e183a3e9dda0..23653b9540997 100644 --- a/articles/azure-monitor/logs/log-analytics-workspace-overview.md +++ b/articles/azure-monitor/logs/log-analytics-workspace-overview.md @@ -14,7 +14,7 @@ A Log Analytics workspace is a unique environment for log data from Azure Monito You can use a single workspace for all your data collection, or you may create multiple workspaces based on a variety of requirements such as the geographic location of the data, access rights that define which users can access data, and configuration settings such as the pricing tier and data retention. -To create a new workspace, see [Create a Log Analytics workspace in the Azure portal](./quick-create-workspace.md). For considerations on creating multiple workspaces, see [Designing your Azure Monitor Logs deployment](design-logs-deployment.md). +To create a new workspace, see [Create a Log Analytics workspace in the Azure portal](./quick-create-workspace.md). For considerations on creating multiple workspaces, see [Design a Log Analytics workspace configuration](./workspace-design.md). ## Data structure @@ -70,12 +70,12 @@ To access archived data, you must first retrieve data from it in an Analytics Lo ## Permissions -Permission to data in a Log Analytics workspace is defined by the [access control mode](design-logs-deployment.md#access-control-mode), which is a setting on each workspace. Users can either be given explicit access to the workspace using a [built-in or custom role](../roles-permissions-security.md), or you can allow access to data collected for Azure resources to users with access to those resources. +Permission to data in a Log Analytics workspace is defined by the [access control mode](manage-access.md#access-control-mode), which is a setting on each workspace. Users can either be given explicit access to the workspace using a [built-in or custom role](../roles-permissions-security.md), or you can allow access to data collected for Azure resources to users with access to those resources. See [Manage access to log data and workspaces in Azure Monitor](manage-access.md) for details on the different permission options and on configuring permissions. ## Next steps - [Create a new Log Analytics workspace](quick-create-workspace.md) -- See [Designing your Azure Monitor Logs deployment](design-logs-deployment.md) for considerations on creating multiple workspaces. +- See Design a Log Analytics workspace configuration(workspace-design.md) for considerations on creating multiple workspaces. - [Learn about log queries to retrieve and analyze data from a Log Analytics workspace.](./log-query-overview.md) diff --git a/articles/azure-monitor/logs/logs-dedicated-clusters.md b/articles/azure-monitor/logs/logs-dedicated-clusters.md index ac5c08eba8fc4..8c156f7cd48a0 100644 --- a/articles/azure-monitor/logs/logs-dedicated-clusters.md +++ b/articles/azure-monitor/logs/logs-dedicated-clusters.md @@ -39,16 +39,16 @@ Log Analytics Dedicated Clusters use a commitment tier pricing model of at least Provide the following properties when creating new dedicated cluster: -- **ClusterName**--must be unique per resource group -- **ResourceGroupName**--use central IT resource group since clusters are usually shared by many teams in the organization. For more design considerations, review [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md). +- **ClusterName**: Must be unique for the resource group. +- **ResourceGroupName**: You should use a central IT resource group because clusters are usually shared by many teams in the organization. For more design considerations, review Design a Log Analytics workspace configuration(../logs/workspace-design.md). - **Location** -- **SkuCapacity**--the Commitment Tier (formerly called capacity reservations) can be set to 500, 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicate clusters](./cost-logs.md#dedicated-clusters). +- **SkuCapacity**: The Commitment Tier (formerly called capacity reservations) can be set to 500, 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicate clusters](./cost-logs.md#dedicated-clusters). The user account that creates the clusters must have the standard Azure resource creation permission: `Microsoft.Resources/deployments/*` and cluster write permission `Microsoft.OperationalInsights/clusters/write` by having in their role assignments this specific action or `Microsoft.OperationalInsights/*` or `*/write`. After you create your cluster resource, you can edit additional properties such as *sku*, *keyVaultProperties, or *billingType*. See more details below. -You can have up to five active clusters per subscription per region. If the cluster is deleted, it is still reserved for 14 days. You can have up to four reserved clusters per subscription per region (active or recently deleted). +You can have up to five active clusters per subscription per region. If the cluster is deleted, it is still reserved for 14 days. You can have up to seven reserved clusters per subscription per region (active or recently deleted). > [!NOTE] > Cluster creation triggers resource allocation and provisioning. This operation can take a few hours to complete. @@ -581,7 +581,7 @@ Authorization: Bearer - A maximum of five active clusters can be created in each region and subscription. -- A maximum number of four reserved clusters (active or recently deleted) can be created in each region and subscription. +- A maximum number of seven reserved clusters (active or recently deleted) can exist in each region and subscription. - A maximum of 1,000 Log Analytics workspaces can be linked to a cluster. @@ -654,4 +654,4 @@ Authorization: Bearer ## Next steps - Learn about [Log Analytics dedicated cluster billing](cost-logs.md#dedicated-clusters) -- Learn about [proper design of Log Analytics workspaces](../logs/design-logs-deployment.md) +- Learn about [proper design of Log Analytics workspaces](../logs/workspace-design.md) diff --git a/articles/azure-monitor/logs/manage-access.md b/articles/azure-monitor/logs/manage-access.md index 88c9f4069f349..039b4f2233eda 100644 --- a/articles/azure-monitor/logs/manage-access.md +++ b/articles/azure-monitor/logs/manage-access.md @@ -1,5 +1,5 @@ --- -title: Manage Log Analytics workspaces in Azure Monitor | Microsoft Docs +title: Manage access to Log Analytics workspaces description: You can manage access to data stored in a Log Analytics workspace in Azure Monitor using resource, workspace, or table-level permissions. This article details how to complete. ms.topic: conceptual ms.reviewer: MeirMen @@ -8,44 +8,85 @@ ms.custom: devx-track-azurepowershell --- -# Manage access to log data and workspaces in Azure Monitor +# Manage access to Log Analytics workspaces + The data in a Log Analytics workspace that a user can access is determined by a combination of factors including settings on the workspace itself, the user's access to resources sending data to the workspace, and the method that the user accesses the workspace. This article describes how access is managed and how to perform any required configuration. -Azure Monitor stores [log](../logs/data-platform-logs.md) data in a Log Analytics workspace. A workspace is a container that includes data and configuration information. To manage access to log data, you perform various administrative tasks related to your workspace. +## Overview +The factors that define the data a user can access are briefly described in the following table. Each is further described in the sections below. -This article explains how to manage access to logs and to administer the workspaces that contain them, including how to grant access to: +| Factor | Description | +|:---|:---| +| [Access mode](#access-mode) | Method the user uses to access the workspace. Defines the scope of the data available and the access control mode that's applied. | +| [Access control mode](#access-control-mode) | Setting on the workspace that defines whether permissions are applied at the workspace or resource level. | +| [Azure RBAC](#azure-rbac) | Permissions applied to individual or groups of users for the workspace or resource sending data to the workspace. Defines what data the user will have access to. | +| [Table level Azure RBAC](#table-level-azure-rbac) | Optional permissions that defines specific data types in the workspace that a user can access. Apply to all users regardless of their access mode or access control mode. | -* The workspace using workspace permissions. -* Users who need access to log data from specific resources using Azure role-based access control (Azure RBAC) - also known as [resource-context](../logs/design-logs-deployment.md#access-mode) -* Users who need access to log data in a specific table in the workspace using Azure RBAC. -To understand the Logs concepts around Azure RBAC and access strategies, read [designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md) +## Access mode +The *access mode* refers to how a user accesses a Log Analytics workspace and defines the data they can access during the current session. The mode is determined according to the [scope](scope.md) you select in Log Analytics. -## Configure access control mode +There are two access modes: -You can view the [access control mode](../logs/design-logs-deployment.md) configured on a workspace from the Azure portal or with Azure PowerShell. You can change this setting using one of the following supported methods: +- **Workspace-context**: You can view all logs in the workspace that you have permission to. Queries in this mode are scoped to all data in all tables in the workspace. This is the access mode used when logs are accessed with the workspace as the scope, such as when you select **Logs** from the **Azure Monitor** menu in the Azure portal. -* Azure portal + - **Resource-context**: When you access the workspace for a particular resource, resource group, or subscription, such as when you select **Logs** from a resource menu in the Azure portal, you can view logs for only resources in all tables that you have access to. Queries in this mode are scoped to only data associated with that resource. This mode also enables granular Azure RBAC. Workspaces use a resource-context log model where every log record emitted by an Azure resource, is automatically associated with this resource. -* Azure PowerShell + +Records are only available in resource-context queries if they are associated with the relevant resource. You can check this association by running a query and verifying that the [_ResourceId](./log-standard-columns.md#_resourceid) column is populated. -* Azure Resource Manager template +There are known limitations with the following resources: -### From the Azure portal +- Computers outside of Azure. Resource-context is only supported with [Azure Arc for Servers](../../azure-arc/servers/index.yml). +- Application Insights. Supported for resource-context only when using [Workspace-based Application Insights resource](../app/create-workspace-resource.md) +- Service Fabric -You can view the current workspace access control mode on the **Overview** page for the workspace in the **Log Analytics workspace** menu. -![View workspace access control mode](media/manage-access/view-access-control-mode.png) +### Comparing access modes + +The following table summarizes the access modes: + +| Issue | Workspace-context | Resource-context | +|:---|:---|:---| +| Who is each model intended for? | Central administration.
                  Administrators who need to configure data collection and users who need access to a wide variety of resources. Also currently required for users who need to access logs for resources outside of Azure. | Application teams.
                  Administrators of Azure resources being monitored. Allows them to focus on their resource without filtering. | +| What does a user require to view logs? | Permissions to the workspace.
                  See **Workspace permissions** in [Manage access using workspace permissions](./manage-access.md#azure-rbac). | Read access to the resource.
                  See **Resource permissions** in [Manage access using Azure permissions](./manage-access.md#azure-rbac). Permissions can be inherited from the resource group or subscription or directly assigned to the resource. Permission to the logs for the resource will be automatically assigned. The user doesn't require access to the workspace.| +| What is the scope of permissions? | Workspace.
                  Users with access to the workspace can query all logs in the workspace from tables that they have permissions to. See [Table access control](./manage-access.md#table-level-azure-rbac) | Azure resource.
                  User can query logs for specific resources, resource groups, or subscription they have access to in any workspace but can't query logs for other resources. | +| How can user access logs? | Start **Logs** from **Azure Monitor** menu.

                  Start **Logs** from **Log Analytics workspaces**.

                  From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks). | Start **Logs** from the menu for the Azure resource. User will have access to data for that resource.

                  Start **Logs** from **Azure Monitor** menu. User will have access to data for all resources they have access to.

                  Start **Logs** from **Log Analytics workspaces**. User will have access to data for all resources they have access to.

                  From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks). | + +## Access control mode + +The *Access control mode* is a setting on each workspace that defines how permissions are determined for the workspace. + +* **Require workspace permissions**. This control mode does not allow granular Azure RBAC. For a user to access the workspace, they must be [granted permissions to the workspace](#azure-rbac) or to [specific tables](#table-level-azure-rbac). + + If a user accesses the workspace in [workspace-context mode](#access-mode), they have access to all data in any table they've been granted access to. If a user accesses the workspace in [resource-context mode](#access-mode), they have access to only data for that resource in any table they've been granted access to. + + This is the default setting for all workspaces created before March 2019. + +* **Use resource or workspace permissions**. This control mode allows granular Azure RBAC. Users can be granted access to only data associated with resources they can view by assigning Azure `read` permission. + + When a user accesses the workspace in [workspace-context mode](#access-mode), workspace permissions apply. When a user accesses the workspace in [resource-context mode](#access-mode), only resource permissions are verified, and workspace permissions are ignored. Enable Azure RBAC for a user by removing them from workspace permissions and allowing their resource permissions to be recognized. + + This is the default setting for all workspaces created after March 2019. + + > [!NOTE] + > If a user has only resource permissions to the workspace, they are only able to access the workspace using resource-context mode assuming the workspace access mode is set to **Use resource or workspace permissions**. + +### Configure access control mode for a workspace + -1. Sign in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). -1. In the Azure portal, select Log Analytics workspaces > your workspace. +# [Azure portal](#tab/portal) + +View the current workspace access control mode on the **Overview** page for the workspace in the **Log Analytics workspace** menu. + +![View workspace access control mode](media/manage-access/view-access-control-mode.png) -You can change this setting from the **Properties** page of the workspace. Changing the setting will be disabled if you don't have permissions to configure the workspace. +Change this setting from the **Properties** page of the workspace. Changing the setting will be disabled if you don't have permissions to configure the workspace. ![Change workspace access mode](media/manage-access/change-access-control-mode.png) -### Using PowerShell +# [PowerShell](#tab/powershell) -Use the following command to examine the access control mode for all workspaces in the subscription: +Use the following command to view the access control mode for all workspaces in the subscription: ```powershell Get-AzResource -ResourceType Microsoft.OperationalInsights/workspaces -ExpandProperties | foreach {$_.Name + ": " + $_.Properties.features.enableLogAccessUsingOnlyResourcePermissions} @@ -58,13 +99,13 @@ DefaultWorkspace38917: True DefaultWorkspace21532: False ``` -A value of `False` means the workspace is configured with the workspace-context access mode. A value of `True` means the workspace is configured with the resource-context access mode. +A value of `False` means the workspace is configured with *workspace-context* access mode. A value of `True` means the workspace is configured with *resource-context* access mode. > [!NOTE] > If a workspace is returned without a boolean value and is blank, this also matches the results of a `False` value. > -Use the following script to set the access control mode for a specific workspace to the resource-context permission: +Use the following script to set the access control mode for a specific workspace to *resource-context* permission: ```powershell $WSName = "my-workspace" @@ -76,7 +117,7 @@ else Set-AzResource -ResourceId $Workspace.ResourceId -Properties $Workspace.Properties -Force ``` -Use the following script to set the access control mode for all workspaces in the subscription to the resource-context permission: +Use the following script to set the access control mode for all workspaces in the subscription to *resource-context* permission: ```powershell Get-AzResource -ResourceType Microsoft.OperationalInsights/workspaces -ExpandProperties | foreach { @@ -88,78 +129,84 @@ Set-AzResource -ResourceId $_.ResourceId -Properties $_.Properties -Force } ``` -### Using a Resource Manager template +# [Resource Manager](#tab/arm) To configure the access mode in an Azure Resource Manager template, set the **enableLogAccessUsingOnlyResourcePermissions** feature flag on the workspace to one of the following values. -* **false**: Set the workspace to workspace-context permissions. This is the default setting if the flag isn't set. -* **true**: Set the workspace to resource-context permissions. +* **false**: Set the workspace to *workspace-context* permissions. This is the default setting if the flag isn't set. +* **true**: Set the workspace to *resource-context* permissions. -## Manage access using workspace permissions - -Each workspace can have multiple accounts associated with it, and each account can have access to multiple workspaces. Access is managed using [Azure role-based access control (Azure RBAC)](../../role-based-access-control/role-assignments-portal.md). +--- -The following activities also require Azure permissions: +## Azure RBAC +Access to a workspace is managed using [Azure role-based access control (Azure RBAC)](../../role-based-access-control/role-assignments-portal.md). To grant access to the Log Analytics workspace using Azure permissions, follow the steps in [assign Azure roles to manage access to your Azure subscription resources](../../role-based-access-control/role-assignments-portal.md). +### Workspace permissions +Each workspace can have multiple accounts associated with it, and each account can have access to multiple workspaces. The following table lists the Azure permissions for different workspace actions: |Action |Azure Permissions Needed |Notes | |-------|-------------------------|------| -| Adding and removing monitoring solutions | `Microsoft.Resources/deployments/*`
                  `Microsoft.OperationalInsights/*`
                  `Microsoft.OperationsManagement/*`
                  `Microsoft.Automation/*`
                  `Microsoft.Resources/deployments/*/write` | These permissions need to be granted at resource group or subscription level. | -| Changing the pricing tier | `Microsoft.OperationalInsights/workspaces/*/write` | | -| Viewing data in the *Backup* and *Site Recovery* solution tiles | Administrator / Co-administrator | Accesses resources deployed using the classic deployment model | -| Creating a workspace in the Azure portal | `Microsoft.Resources/deployments/*`
                  `Microsoft.OperationalInsights/workspaces/*` || -| View workspace basic properties and enter the workspace blade in the portal | `Microsoft.OperationalInsights/workspaces/read` || -| Query logs using any interface | `Microsoft.OperationalInsights/workspaces/query/read` || -| Access all log types using queries | `Microsoft.OperationalInsights/workspaces/query/*/read` || -| Access a specific log table | `Microsoft.OperationalInsights/workspaces/query//read` || -| Read the workspace keys to allow sending logs to this workspace | `Microsoft.OperationalInsights/workspaces/sharedKeys/action` || +| Change the pricing tier | `Microsoft.OperationalInsights/workspaces/*/write` | +| Creating a workspace in the Azure portal | `Microsoft.Resources/deployments/*`
                  `Microsoft.OperationalInsights/workspaces/*` | +| View workspace basic properties and enter the workspace blade in the portal | `Microsoft.OperationalInsights/workspaces/read` | +| Query logs using any interface | `Microsoft.OperationalInsights/workspaces/query/read` | +| Access all log types using queries | `Microsoft.OperationalInsights/workspaces/query/*/read` | +| Access a specific log table | `Microsoft.OperationalInsights/workspaces/query//read` | +| Read the workspace keys to allow sending logs to this workspace | `Microsoft.OperationalInsights/workspaces/sharedKeys/action` | +| Add and remove monitoring solutions | `Microsoft.Resources/deployments/*`
                  `Microsoft.OperationalInsights/*`
                  `Microsoft.OperationsManagement/*`
                  `Microsoft.Automation/*`
                  `Microsoft.Resources/deployments/*/write`

                  These permissions need to be granted at resource group or subscription level. | +| View data in the *Backup* and *Site Recovery* solution tiles | Administrator / Co-administrator

                  Accesses resources deployed using the classic deployment model | + +### Built-in roles +Assign users to these roles to give them access at different scopes: -## Manage access using Azure permissions +* Subscription - Access to all workspaces in the subscription +* Resource Group - Access to all workspace in the resource group +* Resource - Access to only the specified workspace -To grant access to the Log Analytics workspace using Azure permissions, follow the steps in [assign Azure roles to manage access to your Azure subscription resources](../../role-based-access-control/role-assignments-portal.md). For example custom roles, see [Example custom roles](#custom-role-examples) +Create assignments at the resource level (workspace) to assure accurate access control. Use [custom roles](../../role-based-access-control/custom-roles.md) to create roles with the specific permissions needed. -Azure has two built-in user roles for Log Analytics workspaces: +> [!NOTE] +> To add and remove users to a user role, you must to have `Microsoft.Authorization/*/Delete` and `Microsoft.Authorization/*/Write` permission. -* Log Analytics Reader -* Log Analytics Contributor + +#### Log Analytics Reader +Members of the *Log Analytics Reader* role can view all monitoring data and monitoring settings, including the configuration of Azure diagnostics on all Azure resources. Members of the *Log Analytics Reader* role can: -* View and search all monitoring data -* View monitoring settings, including viewing the configuration of Azure diagnostics on all Azure resources. +- View and search all monitoring data +- View monitoring settings, including viewing the configuration of Azure diagnostics on all Azure resources. -The Log Analytics Reader role includes the following Azure actions: +*Log Analytics Reader* includes the following Azure actions: | Type | Permission | Description | | ------- | ---------- | ----------- | -| Action | `*/read` | Ability to view all Azure resources and resource configuration. Includes viewing:
                  Virtual machine extension status
                  Configuration of Azure diagnostics on resources
                  All properties and settings of all resources.
                  For workspaces, it allows full unrestricted permissions to read the workspace settings and perform query on the data. See more granular options above. | -| Action | `Microsoft.OperationalInsights/workspaces/analytics/query/action` | Deprecated, no need to assign them to users. | -| Action | `Microsoft.OperationalInsights/workspaces/search/action` | Deprecated, no need to assign them to users. | +| Action | `*/read` | Ability to view all Azure resources and resource configuration.
                  Includes viewing:
                  - Virtual machine extension status
                  - Configuration of Azure diagnostics on resources
                  - All properties and settings of all resources.

                  For workspaces, allows full unrestricted permissions to read the workspace settings and query data. See more granular options above. | | Action | `Microsoft.Support/*` | Ability to open support cases | |Not Action | `Microsoft.OperationalInsights/workspaces/sharedKeys/read` | Prevents reading of workspace key required to use the data collection API and to install agents. This prevents the user from adding new resources to the workspace | +| Action | `Microsoft.OperationalInsights/workspaces/analytics/query/action` | Deprecated. | +| Action | `Microsoft.OperationalInsights/workspaces/search/action` | Deprecated. | +#### Log Analytics Contributor Members of the *Log Analytics Contributor* role can: -* Includes all the privileges of the *Log Analytics Reader role*, allowing the user to read all monitoring data -* Create and configure Automation accounts -* Add and remove management solutions - - > [!NOTE] - > In order to successfully perform the last two actions, this permission needs to be granted at the resource group or subscription level. +- Read all monitoring data granted by the *Log Analytics Reader role*. +- Edit monitoring settings for Azure resources, including + - Adding the VM extension to VMs + - Configuring Azure diagnostics on all Azure resources +- Create and configure Automation accounts. Permission needs to be granted at the resource group or subscription level. +- Add and remove management solutions. Permission needs to be granted at the resource group or subscription level. +- Read storage account keys +- Configure the collection of logs from Azure Storage -* Read storage account keys -* Configure the collection of logs from Azure Storage -* Edit monitoring settings for Azure resources, including - * Adding the VM extension to VMs - * Configuring Azure diagnostics on all Azure resources -> [!NOTE] -> You can use the ability to add a virtual machine extension to a virtual machine to gain full control over a virtual machine. +> [!WARNING] +> You can use the permission to add a virtual machine extension to a virtual machine to gain full control over a virtual machine. The Log Analytics Contributor role includes the following Azure actions: | Permission | Description | | ---------- | ----------- | -| `*/read` | Ability to view all resources and resource configuration. Includes viewing:
                  Virtual machine extension status
                  Configuration of Azure diagnostics on resources
                  All properties and settings of all resources.
                  For workspaces, it allows full unrestricted permissions to read the workspace setting and perform query on the data. See more granular options above. | +| `*/read` | Ability to view all Azure resources and resource configuration.

                  Includes viewing:
                  - Virtual machine extension status
                  - Configuration of Azure diagnostics on resources
                  - All properties and settings of all resources.

                  For workspaces, allows full unrestricted permissions to read the workspace settings and query data. See more granular options above. | | `Microsoft.Automation/automationAccounts/*` | Ability to create and configure Azure Automation accounts, including adding and editing runbooks | | `Microsoft.ClassicCompute/virtualMachines/extensions/*`
                  `Microsoft.Compute/virtualMachines/extensions/*` | Add, update and remove virtual machine extensions, including the Microsoft Monitoring Agent extension and the OMS Agent for Linux extension | | `Microsoft.ClassicStorage/storageAccounts/listKeys/action`
                  `Microsoft.Storage/storageAccounts/listKeys/action` | View the storage account key. Required to configure Log Analytics to read logs from Azure storage accounts | @@ -170,19 +217,11 @@ The Log Analytics Contributor role includes the following Azure actions: | `Microsoft.Resources/deployments/*` | Create and delete deployments. Required for adding and removing solutions, workspaces, and automation accounts | | `Microsoft.Resources/subscriptions/resourcegroups/deployments/*` | Create and delete deployments. Required for adding and removing solutions, workspaces, and automation accounts | -To add and remove users to a user role, it is necessary to have `Microsoft.Authorization/*/Delete` and `Microsoft.Authorization/*/Write` permission. - -Use these roles to give users access at different scopes: - -* Subscription - Access to all workspaces in the subscription -* Resource Group - Access to all workspace in the resource group -* Resource - Access to only the specified workspace -We recommend performing assignments at the resource level (workspace) to assure accurate access control. Use [custom roles](../../role-based-access-control/custom-roles.md) to create roles with the specific permissions needed. ### Resource permissions -When users query logs from a workspace using resource-context access, they'll have the following permissions on the resource: +When users query logs from a workspace using [resource-context access](#access-mode), they'll have the following permissions on the resource: | Permission | Description | | ---------- | ----------- | @@ -191,61 +230,56 @@ When users query logs from a workspace using resource-context access, they'll ha `/read` permission is usually granted from a role that includes _\*/read or_ _\*_ permissions such as the built-in [Reader](../../role-based-access-control/built-in-roles.md#reader) and [Contributor](../../role-based-access-control/built-in-roles.md#contributor) roles. Custom roles that include specific actions or dedicated built-in roles might not include this permission. -See [Defining per-table access control](#table-level-azure-rbac) below if you want to create different access control for different tables. - -## Custom role examples - -1. To grant a user access to log data from their resources, perform the following: - - * Configure the workspace access control mode to **use workspace or resource permissions** - * Grant users `*/read` or `Microsoft.Insights/logs/*/read` permissions to their resources. If they are already assigned the [Log Analytics Reader](../../role-based-access-control/built-in-roles.md#reader) role on the workspace, it is sufficient. +### Custom role examples +In addition to using the built-in roles for Log Analytics workspace, you can create custom roles to assign more granular permissions. Following are some common examples. -2. To grant a user access to log data from their resources and configure their resources to send logs to the workspace, perform the following: +**Grant a user access to log data from their resources.** - * Configure the workspace access control mode to **use workspace or resource permissions** +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users `*/read` or `Microsoft.Insights/logs/*/read` permissions to their resources. If they are already assigned the [Log Analytics Reader](../../role-based-access-control/built-in-roles.md#reader) role on the workspace, it is sufficient. - * Grant users the following permissions on the workspace: `Microsoft.OperationalInsights/workspaces/read` and `Microsoft.OperationalInsights/workspaces/sharedKeys/action`. With these permissions, users cannot perform any workspace-level queries. They can only enumerate the workspace and use it as a destination for diagnostic settings or agent configuration. +**Grant a user access to log data from their resources and configure their resources to send logs to the workspace.** - * Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read` and `Microsoft.Insights/diagnosticSettings/write`. If they are already assigned the [Log Analytics Contributor](../../role-based-access-control/built-in-roles.md#contributor) role, assigned the Reader role, or granted `*/read` permissions on this resource, it is sufficient. +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users the following permissions on the workspace: `Microsoft.OperationalInsights/workspaces/read` and `Microsoft.OperationalInsights/workspaces/sharedKeys/action`. With these permissions, users cannot perform any workspace-level queries. They can only enumerate the workspace and use it as a destination for diagnostic settings or agent configuration. +- Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read` and `Microsoft.Insights/diagnosticSettings/write`. If they are already assigned the [Log Analytics Contributor](../../role-based-access-control/built-in-roles.md#contributor) role, assigned the Reader role, or granted `*/read` permissions on this resource, it is sufficient. -3. To grant a user access to log data from their resources without being able to read security events and send data, perform the following: +**Grant a user access to log data from their resources without being able to read security events and send data.** - * Configure the workspace access control mode to **use workspace or resource permissions** +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read`. +- Add the following NonAction to block users from reading the SecurityEvent type: `Microsoft.Insights/logs/SecurityEvent/read`. The NonAction shall be in the same custom role as the action that provides the read permission (`Microsoft.Insights/logs/*/read`). If the user inherent the read action from another role that is assigned to this resource or to the subscription or resource group, they would be able to read all log types. This is also true if they inherit `*/read`, that exist for example, with the Reader or Contributor role. - * Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read`. +**Grant a user access to log data from their resources and read all Azure AD sign-in and read Update Management solution log data from the workspace.** - * Add the following NonAction to block users from reading the SecurityEvent type: `Microsoft.Insights/logs/SecurityEvent/read`. The NonAction shall be in the same custom role as the action that provides the read permission (`Microsoft.Insights/logs/*/read`). If the user inherent the read action from another role that is assigned to this resource or to the subscription or resource group, they would be able to read all log types. This is also true if they inherit `*/read`, that exist for example, with the Reader or Contributor role. - -4. To grant a user access to log data from their resources and read all Azure AD sign-in and read Update Management solution log data from the workspace, perform the following: - - * Configure the workspace access control mode to **use workspace or resource permissions** - - * Grant users the following permissions on the workspace: - - * `Microsoft.OperationalInsights/workspaces/read` – required so the user can enumerate the workspace and open the workspace blade in the Azure portal - * `Microsoft.OperationalInsights/workspaces/query/read` – required for every user that can execute queries - * `Microsoft.OperationalInsights/workspaces/query/SigninLogs/read` – to be able to read Azure AD sign-in logs - * `Microsoft.OperationalInsights/workspaces/query/Update/read` – to be able to read Update Management solution logs - * `Microsoft.OperationalInsights/workspaces/query/UpdateRunProgress/read` – to be able to read Update Management solution logs - * `Microsoft.OperationalInsights/workspaces/query/UpdateSummary/read` – to be able to read Update management logs - * `Microsoft.OperationalInsights/workspaces/query/Heartbeat/read` – required to be able to use Update Management solution - * `Microsoft.OperationalInsights/workspaces/query/ComputerGroup/read` – required to be able to use Update Management solution - - * Grant users the following permissions to their resources: `*/read`, assigned to the Reader role, or `Microsoft.Insights/logs/*/read`. +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users the following permissions on the workspace: + - `Microsoft.OperationalInsights/workspaces/read` – required so the user can enumerate the workspace and open the workspace blade in the Azure portal + - `Microsoft.OperationalInsights/workspaces/query/read` – required for every user that can execute queries + - `Microsoft.OperationalInsights/workspaces/query/SigninLogs/read` – to be able to read Azure AD sign-in logs + - `Microsoft.OperationalInsights/workspaces/query/Update/read` – to be able to read Update Management solution logs + - `Microsoft.OperationalInsights/workspaces/query/UpdateRunProgress/read` – to be able to read Update Management solution logs + - `Microsoft.OperationalInsights/workspaces/query/UpdateSummary/read` – to be able to read Update management logs + - `Microsoft.OperationalInsights/workspaces/query/Heartbeat/read` – required to be able to use Update Management solution + - `Microsoft.OperationalInsights/workspaces/query/ComputerGroup/read` – required to be able to use Update Management solution +- Grant users the following permissions to their resources: `*/read`, assigned to the Reader role, or `Microsoft.Insights/logs/*/read`. ## Table level Azure RBAC +Table level Azure RBAC allows you to define more granular control to data in a Log Analytics workspace by defining specific data types that are accessible only to a specific set of users. + +Implement table access control with [Azure custom roles](../../role-based-access-control/custom-roles.md) to either grant access to specific [tables](../logs/data-platform-logs.md) in the workspace. These roles are applied to workspaces with either workspace-context or resource-context [access control modes](#access-control-mode) regardless of the user's [access mode](#access-mode). -**Table level Azure RBAC** allows you to define more granular control to data in a Log Analytics workspace in addition to the other permissions. This control allows you to define specific data types that are accessible only to a specific set of users. +Create a [custom role](../../role-based-access-control/custom-roles.md) with the following actions to define access to a particular table. -You implement table access control with [Azure custom roles](../../role-based-access-control/custom-roles.md) to either grant access to specific [tables](../logs/data-platform-logs.md) in the workspace. These roles are applied to workspaces with either workspace-context or resource-context [access control modes](../logs/design-logs-deployment.md#access-control-mode) regardless of the user's [access mode](../logs/design-logs-deployment.md#access-mode). +* Include the **Actions** section of the role definition. To subtract access from the allowed **Actions**, include it in the **NotActions** section. +* Use `Microsoft.OperationalInsights/workspaces/query/*` to specify all tables. -Create a [custom role](../../role-based-access-control/custom-roles.md) with the following actions to define access to table access control. -* To grant access to a table, include it in the **Actions** section of the role definition. To subtract access from the allowed **Actions**, include it in the **NotActions** section. -* Use Microsoft.OperationalInsights/workspaces/query/* to specify all tables. +### Examples +Following are examples of custom role actions to grant and deny access to specific tables. -For example, to create a role with access to the _Heartbeat_ and _AzureActivity_ tables, create a custom role using the following actions: +**Grant access to the _Heartbeat_ and _AzureActivity_ tables.** ``` "Actions": [ @@ -256,7 +290,7 @@ For example, to create a role with access to the _Heartbeat_ and _AzureActivity_ ], ``` -To create a role with access to only the _SecurityBaseline_ table, create a custom role using the following actions: +**Grant access to only the _SecurityBaseline_ table.** ``` "Actions": [ @@ -265,7 +299,9 @@ To create a role with access to only the _SecurityBaseline_ table, create a cust "Microsoft.OperationalInsights/workspaces/query/SecurityBaseline/read" ], ``` -The examples above define a list of tables that are allowed. This example shows blocked list definition when a user can access all tables but the _SecurityAlert_ table: + + +**Grant access to all tables except the _SecurityAlert_ table.** ``` "Actions": [ @@ -280,9 +316,12 @@ The examples above define a list of tables that are allowed. This example shows ### Custom logs - Custom logs are created from data sources such as custom logs and HTTP Data Collector API. The easiest way to identify the type of log is by checking the tables listed under [Custom Logs in the log schema](./log-analytics-tutorial.md#view-table-information). + Custom logs are tables created from data sources such as [text logs](../agents/data-sources-custom-logs.md) and [HTTP Data Collector API](data-collector-api.md). The easiest way to identify the type of log is by checking the tables listed under [Custom Logs in the log schema](./log-analytics-tutorial.md#view-table-information). - You can't grant access to individual custom logs, but you can grant access to all custom logs. To create a role with access to all custom logs, create a custom role using the following actions: +> [!NOTE] +> Tables created by the [custom logs API](../essentials/../logs/custom-logs-overview.md) does not yet support table level RBAC. + + You can't grant access to individual custom logs tables, but you can grant access to all custom logs. To create a role with access to all custom log tables, create a custom role using the following actions: ``` "Actions": [ @@ -291,17 +330,18 @@ The examples above define a list of tables that are allowed. This example shows "Microsoft.OperationalInsights/workspaces/query/Tables.Custom/read" ], ``` -An alternative approach to manage access to custom logs is to assign them to an Azure resource and manage access using the resource-context paradigm. To use this method, you must include the resource ID by specifying it in the [x-ms-AzureResourceId](../logs/data-collector-api.md#request-headers) header when data is ingested to Log Analytics via the [HTTP Data Collector API](../logs/data-collector-api.md). The resource ID must be valid and have access rules applied to it. After the logs are ingested, they are accessible to those with read access to the resource, as explained here. -Sometimes custom logs come from sources that are not directly associated to a specific resource. In this case, create a resource group just to manage access to these logs. The resource group does not incur any cost, but gives you a valid resource ID to control access to the custom logs. For example, if a specific firewall is sending custom logs, create a resource group called "MyFireWallLogs" and make sure that the API requests contain the resource ID of "MyFireWallLogs". The firewall log records are then accessible only to users that were granted access to either MyFireWallLogs or those with full workspace access. +An alternative approach to manage access to custom logs is to assign them to an Azure resource and manage access using resource-context access control.Include the resource ID by specifying it in the [x-ms-AzureResourceId](../logs/data-collector-api.md#request-headers) header when data is ingested to Log Analytics via the [HTTP Data Collector API](../logs/data-collector-api.md). The resource ID must be valid and have access rules applied to it. After the logs are ingested, they are accessible to users with read access to the resource. + +Some custom logs come from sources that are not directly associated to a specific resource. In this case, create a resource group to manage access to these logs. The resource group does not incur any cost, but gives you a valid resource ID to control access to the custom logs. For example, if a specific firewall is sending custom logs, create a resource group called *MyFireWallLogs* and make sure that the API requests contain the resource ID of *MyFireWallLogs*. The firewall log records are then accessible only to users that were granted access to either MyFireWallLogs or those with full workspace access ### Considerations -* If a user is granted global read permission with the standard Reader or Contributor roles that include the _\*/read_ action, it will override the per-table access control and give them access to all log data. -* If a user is granted per-table access but no other permissions, they would be able to access log data from the API but not from the Azure portal. To provide access from the Azure portal, use Log Analytics Reader as its base role. -* Administrators and owners of the subscription will have access to all data types regardless of any other permission settings. -* Workspace owners are treated like any other user for per-table access control. -* We recommend assigning roles to security groups instead of individual users to reduce the number of assignments. This will also help you use existing group management tools to configure and verify access. +- If a user is granted global read permission with the standard Reader or Contributor roles that include the _\*/read_ action, it will override the per-table access control and give them access to all log data. +- If a user is granted per-table access but no other permissions, they would be able to access log data from the API but not from the Azure portal. To provide access from the Azure portal, use Log Analytics Reader as its base role. +- Administrators and owners of the subscription will have access to all data types regardless of any other permission settings. +- Workspace owners are treated like any other user for per-table access control. +- Assign roles to security groups instead of individual users to reduce the number of assignments. This will also help you use existing group management tools to configure and verify access. ## Next steps diff --git a/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-configure-table-plan.png b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-configure-table-plan.png new file mode 100644 index 0000000000000..ce52c164a24f3 Binary files /dev/null and b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-configure-table-plan.png differ diff --git a/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-table-configuration.png b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-table-configuration.png new file mode 100644 index 0000000000000..aeab8ac15f7d0 Binary files /dev/null and b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-table-configuration.png differ diff --git a/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-configure-table-retention-archive.png b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-configure-table-retention-archive.png new file mode 100644 index 0000000000000..c910cedd170e3 Binary files /dev/null and b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-configure-table-retention-archive.png differ diff --git a/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-view-table-retention-archive.png b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-view-table-retention-archive.png new file mode 100644 index 0000000000000..2b2f60108f2c3 Binary files /dev/null and b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-view-table-retention-archive.png differ diff --git a/articles/azure-monitor/logs/oms-portal-transition.md b/articles/azure-monitor/logs/oms-portal-transition.md index 150fc050f1cb5..d553dff5bb849 100644 --- a/articles/azure-monitor/logs/oms-portal-transition.md +++ b/articles/azure-monitor/logs/oms-portal-transition.md @@ -39,7 +39,7 @@ While most features will continue to work without performing any migration, you Refer to [Common questions for transition from OMS portal to Azure portal for Log Analytics users](../overview.md) for information about how to transition to the Azure portal. ## User access and role migration -Azure portal access management is richer and more powerful than the access management in the OMS Portal. See [Designing your Azure Monitor Logs workspace](../logs/design-logs-deployment.md) for details of access management in Log Analytics. +Azure portal access management is richer and more powerful than the access management in the OMS Portal. See [Designing your Azure Monitor Logs workspace](../logs/workspace-design.md) for details of access management in Log Analytics. > [!NOTE] > Previous versions of this article stated that the permissions would automatically be converted from the OMS portal to the Azure portal. This automatic conversion is no longer planned, and you must perform the conversion yourself. diff --git a/articles/azure-monitor/logs/service-providers.md b/articles/azure-monitor/logs/service-providers.md deleted file mode 100644 index 89e418afb95b8..0000000000000 --- a/articles/azure-monitor/logs/service-providers.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Azure Monitor Logs for Service Providers | Microsoft Docs -description: Azure Monitor Logs can help Managed Service Providers (MSPs), large enterprises, Independent Software Vendors (ISVs) and hosting service providers manage and monitor servers in customer's on-premises or cloud infrastructure. -ms.topic: conceptual -author: MeirMen -ms.author: meirm -ms.date: 02/03/2020 - ---- - -# Azure Monitor Logs for Service Providers - -Log Analytics workspaces in Azure Monitor can help managed service providers (MSPs), large enterprises, independent software vendors (ISVs), and hosting service providers manage and monitor servers in customer's on-premises or cloud infrastructure. - -Large enterprises share many similarities with service providers, particularly when there is a centralized IT team that is responsible for managing IT for many different business units. For simplicity, this document uses the term *service provider* but the same functionality is also available for enterprises and other customers. - -For partners and service providers who are part of the [Cloud Solution Provider (CSP)](https://partner.microsoft.com/membership/cloud-solution-provider) program, Log Analytics in Azure Monitor is one of the Azure services available in Azure CSP subscriptions. - -Log Analytics in Azure Monitor can also be used by a service provider managing customer resources through the Azure delegated resource management capability in [Azure Lighthouse](../../lighthouse/overview.md). - -## Architectures for Service Providers - -Log Analytics workspaces provide a method for the administrator to control the flow and isolation of [log](../logs/data-platform-logs.md) data and create an architecture that addresses its specific business needs. [This article](../logs/design-logs-deployment.md) explains the design, deployment, and migration considerations for a workspace, and the [manage access](../logs/manage-access.md) article discusses how to apply and manage permissions to log data. Service providers have additional considerations. - -There are three possible architectures for service providers regarding Log Analytics workspaces: - -### 1. Distributed - Logs are stored in workspaces located in the customer's tenant - -In this architecture, a workspace is deployed in the customer's tenant that is used for all the logs of that customer. - -There are two ways that service provider administrators can gain access to a Log Analytics workspace in a customer tenant: - -- A customer can add individual users from the service provider as [Azure Active Directory guest users (B2B)](../../active-directory/external-identities/what-is-b2b.md). The service provider administrators will have to sign in to each customer's directory in the Azure portal to be able to access these workspaces. This also requires the customers to manage individual access for each service provider administrator. -- For greater scalability and flexibility, service providers can use [Azure Lighthouse](../../lighthouse/overview.md) to access the customer’s tenant. With this method, the service provider administrators are included in an Azure AD user group in the service provider’s tenant, and this group is granted access during the onboarding process for each customer. These administrators can then access each customer’s workspaces from within their own service provider tenant, rather than having to log into each customer’s tenant individually. Accessing your customers’ Log Analytics workspaces resources in this way reduces the work required on the customer side, and can make it easier to gather and analyze data across multiple customers managed by the same service provider via tools such as [Azure Monitor Workbooks](../visualize/workbooks-overview.md). For more info, see [Monitor customer resources at scale](../../lighthouse/how-to/monitor-at-scale.md). - -The advantages of the distributed architecture are: - -* The customer can confirm specific levels of permissions via [Azure delegated resource management](../../lighthouse/concepts/architecture.md), or can manage access to the logs using their own [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). -* Logs can be collected from all types of resources, not just agent-based VM data. For example, Azure Audit Logs. -* Each customer can have different settings for their workspace such as retention and data capping. -* Isolation between customers for regulatory and compliancy. -* The charge for each workspace will be rolled into the customer's subscription. - -The disadvantages of the distributed architecture are: - -* Centrally visualizing and analyzing data [across customer tenants](cross-workspace-query.md) with tools such as Azure Monitor Workbooks can result in slower experiences, especially when analyzing data across more than 50+ workspaces. -* If customers are not onboarded for Azure delegated resource management, service provider administrators must be provisioned in the customer directory, and it is harder for the service provider to manage a large number of customer tenants at once. - -### 2. Central - Logs are stored in a workspace located in the service provider tenant - -In this architecture, the logs are not stored in the customer's tenants but only in a central location within one of the service provider's subscriptions. The agents that are installed on the customer's VMs are configured to send their logs to this workspace using the workspace ID and secret key. - -The advantages of the centralized architecture are: - -* It is easy to manage a large number of customers and integrate them to various backend systems. -* The service provider has full ownership over the logs and the various artifacts such as functions and saved queries. -* The service provider can perform analytics across all of its customers. - -The disadvantages of the centralized architecture are: - -* This architecture is applicable only for agent-based VM data, it will not cover PaaS, SaaS and Azure fabric data sources. -* It might be hard to separate the data between the customers when they are merged into a single workspace. The only good method to do so is to use the computer's fully qualified domain name (FQDN) or via the Azure subscription ID. -* All data from all customers will be stored in the same region with a single bill and same retention and configuration settings. -* Azure fabric and PaaS services such as Azure Diagnostics and Azure Audit Logs requires the workspace to be in the same tenant as the resource, thus they cannot send the logs to the central workspace. -* All VM agents from all customers will be authenticated to the central workspace using the same workspace ID and key. There is no method to block logs from a specific customer without interrupting other customers. - -### 3. Hybrid - Logs are stored in workspace located in the customer's tenant and some of them are pulled to a central location. - -The third architecture mix between the two options. It is based on the first distributed architecture where the logs are local to each customer but using some mechanism to create a central repository of logs. A portion of the logs is pulled into a central location for reporting and analytics. This portion could be small number of data types or a summary of the activity such as daily statistics. - -There are two options to implement logs in a central location: - -1. Central workspace: The service provider can create a workspace in its tenant and use a script that utilizes the [Query API](https://dev.loganalytics.io/) with the [Data Collection API](../logs/data-collector-api.md) to bring the data from the various workspaces to this central location. Another option, other than a script, is to use [Azure Logic Apps](../../logic-apps/logic-apps-overview.md). - -2. Power BI as a central location: Power BI can act as the central location when the various workspaces export data to it using the integration between the Log Analytics workspace and [Power BI](./log-powerbi.md). - -## Next steps - -* Automate creation and configuration of workspaces using [Resource Manager templates](../logs/resource-manager-workspace.md) - -* Automate creation of workspaces using [PowerShell](../logs/powershell-workspace-configuration.md) - -* Use [Alerts](../alerts/alerts-overview.md) to integrate with existing systems - -* Generate summary reports using [Power BI](./log-powerbi.md) - -* Onboard customers to [Azure delegated resource management](../../lighthouse/concepts/architecture.md). \ No newline at end of file diff --git a/articles/azure-monitor/logs/tutorial-custom-logs-api.md b/articles/azure-monitor/logs/tutorial-custom-logs-api.md index 0bcc3dfac7a4b..473997d0172b5 100644 --- a/articles/azure-monitor/logs/tutorial-custom-logs-api.md +++ b/articles/azure-monitor/logs/tutorial-custom-logs-api.md @@ -8,6 +8,8 @@ ms.date: 01/19/2022 # Tutorial: Send custom logs to Azure Monitor Logs using Resource Manager templates (preview) [Custom logs](custom-logs-overview.md) in Azure Monitor allow you to send custom data to tables in a Log Analytics workspace with a REST API. This tutorial walks through configuration of a new table and a sample application to send custom logs to Azure Monitor using Resource Manager templates. +[!INCLUDE [Sign up for preview](../../../includes/azure-monitor-custom-logs-signup.md)] + > [!NOTE] > This tutorial uses Resource Manager templates and REST API to configure custom logs. See [Tutorial: Send custom logs to Azure Monitor Logs using the Azure portal (preview)](tutorial-custom-logs.md) for a similar tutorial using the Azure portal. @@ -26,7 +28,7 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac) . - [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. ## Collect workspace details @@ -443,4 +445,4 @@ The cache that drives IntelliSense may take up to 24 hours to update. - [Complete a similar tutorial using the Azure portal.](tutorial-custom-logs.md) - [Read more about custom logs.](custom-logs-overview.md) -- [Learn more about writing transformation queries](../essentials/data-collection-rule-transformations.md) \ No newline at end of file +- [Learn more about writing transformation queries](../essentials/data-collection-rule-transformations.md) diff --git a/articles/azure-monitor/logs/tutorial-custom-logs.md b/articles/azure-monitor/logs/tutorial-custom-logs.md index 1c6ecca24bc41..74987f90e4ecf 100644 --- a/articles/azure-monitor/logs/tutorial-custom-logs.md +++ b/articles/azure-monitor/logs/tutorial-custom-logs.md @@ -8,6 +8,8 @@ ms.date: 01/19/2022 # Tutorial: Send custom logs to Azure Monitor Logs using the Azure portal (preview) [Custom logs](custom-logs-overview.md) in Azure Monitor allow you to send external data to a Log Analytics workspace with a REST API. This tutorial walks through configuration of a new table and a sample application to send custom logs to Azure Monitor. +[!INCLUDE [Sign up for preview](../../../includes/azure-monitor-custom-logs-signup.md)] + > [!NOTE] > This tutorial uses the Azure portal. See [Tutorial: Send custom logs to Azure Monitor Logs using resource manager templates (preview)](tutorial-custom-logs-api.md) for a similar tutorial using resource manager templates. @@ -23,7 +25,7 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac) . - [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. @@ -253,7 +255,7 @@ Instead of directly configuring the schema of the table, the portal allows you t ```kusto source | extend TimeGenerated = todatetime(Time) - | parse RawData.value with + | parse RawData with ClientIP:string ' ' * ' ' * diff --git a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md index 8f194fd5766a5..6007b43780455 100644 --- a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md +++ b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md @@ -27,8 +27,10 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . -- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac). +- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- The table must already have some data. +- The table can't be linked to the [workspace's transformation DCR](../essentials/data-collection-rule-overview.md#types-of-data-collection-rules). ## Overview of tutorial diff --git a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md index b68fd47d77f9a..ed371a98a4c12 100644 --- a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md +++ b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md @@ -23,8 +23,10 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . -- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac). +- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- The table must already have some data. +- The table can't be linked to the [workspace's transformation DCR](../essentials/data-collection-rule-overview.md#types-of-data-collection-rules). ## Overview of tutorial diff --git a/articles/azure-monitor/logs/workspace-design-service-providers.md b/articles/azure-monitor/logs/workspace-design-service-providers.md new file mode 100644 index 0000000000000..5b5a0fce9cbc5 --- /dev/null +++ b/articles/azure-monitor/logs/workspace-design-service-providers.md @@ -0,0 +1,87 @@ +--- +title: Azure Monitor Logs for Service Providers | Microsoft Docs +description: Azure Monitor Logs can help Managed Service Providers (MSPs), large enterprises, Independent Software Vendors (ISVs) and hosting service providers manage and monitor servers in customer's on-premises or cloud infrastructure. +ms.topic: conceptual +author: MeirMen +ms.author: meirm +ms.date: 02/03/2020 + +--- + +# Log Analytics workspace design for service providers + +Log Analytics workspaces in Azure Monitor can help managed service providers (MSPs), large enterprises, independent software vendors (ISVs), and hosting service providers manage and monitor servers in customer's on-premises or cloud infrastructure. + +Large enterprises share many similarities with service providers, particularly when there is a centralized IT team that is responsible for managing IT for many different business units. For simplicity, this document uses the term *service provider* but the same functionality is also available for enterprises and other customers. + +For partners and service providers who are part of the [Cloud Solution Provider (CSP)](https://partner.microsoft.com/membership/cloud-solution-provider) program, Log Analytics in Azure Monitor is one of the Azure services available in Azure CSP subscriptions. + +Log Analytics in Azure Monitor can also be used by a service provider managing customer resources through the Azure delegated resource management capability in [Azure Lighthouse](../../lighthouse/overview.md). + +## Architectures for Service Providers + +Log Analytics workspaces provide a method for the administrator to control the flow and isolation of [log](../logs/data-platform-logs.md) data and create an architecture that addresses its specific business needs. [This article](../logs/workspace-design.md) explains the design, deployment, and migration considerations for a workspace, and the [manage access](../logs/manage-access.md) article discusses how to apply and manage permissions to log data. Service providers have additional considerations. + +There are three possible architectures for service providers regarding Log Analytics workspaces: + +### 1. Distributed - Logs are stored in workspaces located in the customer's tenant + +In this architecture, a workspace is deployed in the customer's tenant that is used for all the logs of that customer. + +There are two ways that service provider administrators can gain access to a Log Analytics workspace in a customer tenant: + +- A customer can add individual users from the service provider as [Azure Active Directory guest users (B2B)](../../active-directory/external-identities/what-is-b2b.md). The service provider administrators will have to sign in to each customer's directory in the Azure portal to be able to access these workspaces. This also requires the customers to manage individual access for each service provider administrator. +- For greater scalability and flexibility, service providers can use [Azure Lighthouse](../../lighthouse/overview.md) to access the customer’s tenant. With this method, the service provider administrators are included in an Azure AD user group in the service provider’s tenant, and this group is granted access during the onboarding process for each customer. These administrators can then access each customer’s workspaces from within their own service provider tenant, rather than having to log into each customer’s tenant individually. Accessing your customers’ Log Analytics workspaces resources in this way reduces the work required on the customer side, and can make it easier to gather and analyze data across multiple customers managed by the same service provider via tools such as [Azure Monitor Workbooks](../visualize/workbooks-overview.md). For more info, see [Monitor customer resources at scale](../../lighthouse/how-to/monitor-at-scale.md). + +The advantages of the distributed architecture are: + +* The customer can confirm specific levels of permissions via [Azure delegated resource management](../../lighthouse/concepts/architecture.md), or can manage access to the logs using their own [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). +* Logs can be collected from all types of resources, not just agent-based VM data. For example, Azure Audit Logs. +* Each customer can have different settings for their workspace such as retention and data capping. +* Isolation between customers for regulatory and compliancy. +* The charge for each workspace will be rolled into the customer's subscription. + +The disadvantages of the distributed architecture are: + +* Centrally visualizing and analyzing data [across customer tenants](cross-workspace-query.md) with tools such as Azure Monitor Workbooks can result in slower experiences, especially when analyzing data across more than 50+ workspaces. +* If customers are not onboarded for Azure delegated resource management, service provider administrators must be provisioned in the customer directory, and it is harder for the service provider to manage a large number of customer tenants at once. + +### 2. Central - Logs are stored in a workspace located in the service provider tenant + +In this architecture, the logs are not stored in the customer's tenants but only in a central location within one of the service provider's subscriptions. The agents that are installed on the customer's VMs are configured to send their logs to this workspace using the workspace ID and secret key. + +The advantages of the centralized architecture are: + +* It is easy to manage a large number of customers and integrate them to various backend systems. +* The service provider has full ownership over the logs and the various artifacts such as functions and saved queries. +* The service provider can perform analytics across all of its customers. + +The disadvantages of the centralized architecture are: + +* This architecture is applicable only for agent-based VM data, it will not cover PaaS, SaaS and Azure fabric data sources. +* It might be hard to separate the data between the customers when they are merged into a single workspace. The only good method to do so is to use the computer's fully qualified domain name (FQDN) or via the Azure subscription ID. +* All data from all customers will be stored in the same region with a single bill and same retention and configuration settings. +* Azure fabric and PaaS services such as Azure Diagnostics and Azure Audit Logs requires the workspace to be in the same tenant as the resource, thus they cannot send the logs to the central workspace. +* All VM agents from all customers will be authenticated to the central workspace using the same workspace ID and key. There is no method to block logs from a specific customer without interrupting other customers. + +### 3. Hybrid - Logs are stored in workspace located in the customer's tenant and some of them are pulled to a central location. + +The third architecture mix between the two options. It is based on the first distributed architecture where the logs are local to each customer but using some mechanism to create a central repository of logs. A portion of the logs is pulled into a central location for reporting and analytics. This portion could be small number of data types or a summary of the activity such as daily statistics. + +There are two options to implement logs in a central location: + +1. Central workspace: The service provider can create a workspace in its tenant and use a script that utilizes the [Query API](https://dev.loganalytics.io/) with the [Data Collection API](../logs/data-collector-api.md) to bring the data from the various workspaces to this central location. Another option, other than a script, is to use [Azure Logic Apps](../../logic-apps/logic-apps-overview.md). + +2. Power BI as a central location: Power BI can act as the central location when the various workspaces export data to it using the integration between the Log Analytics workspace and [Power BI](./log-powerbi.md). + +## Next steps + +* Automate creation and configuration of workspaces using [Resource Manager templates](../logs/resource-manager-workspace.md) + +* Automate creation of workspaces using [PowerShell](../logs/powershell-workspace-configuration.md) + +* Use [Alerts](../alerts/alerts-overview.md) to integrate with existing systems + +* Generate summary reports using [Power BI](./log-powerbi.md) + +* Onboard customers to [Azure delegated resource management](../../lighthouse/concepts/architecture.md). \ No newline at end of file diff --git a/articles/azure-monitor/logs/workspace-design.md b/articles/azure-monitor/logs/workspace-design.md new file mode 100644 index 0000000000000..e4a2e11696fe6 --- /dev/null +++ b/articles/azure-monitor/logs/workspace-design.md @@ -0,0 +1,190 @@ +--- +title: Design a Log Analytics workspace architecture +description: Describes the considerations and recommendations for customers preparing to deploy a workspace in Azure Monitor. +ms.topic: conceptual +ms.date: 05/25/2022 + +--- + +# Design a Log Analytics workspace architecture +While a single [Log Analytics workspace](log-analytics-workspace-overview.md) may be sufficient for many environments using Azure Monitor and Microsoft Sentinel, many organizations will create multiple workspaces to optimize costs and better meet different business requirements. This article presents a set of criteria for determining whether to use a single workspace or multiple workspaces and the configuration and placement of those workspace to meet your particular requirements while optimizing your costs. + +> [!NOTE] +> This article includes both Azure Monitor and Microsoft Sentinel since many customers need to consider both in their design, and most of the decision criteria applies to both. If you only use one of these services, then you can simply ignore the other in your evaluation. + +## Design strategy +Your design should always start with a single workspace since this reduces the complexity of managing multiple workspaces and in querying data from them. There are no performance limitations from the amount of data in your workspace, and multiple services and data sources can send data to the same workspace. As you identify criteria to create additional workspaces, your design should use the fewest number that will match your particular requirements. + +Designing a workspace configuration includes evaluation of multiple criteria, some of which may in conflict. For example, you may be able to reduce egress charges by creating a separate workspace in each Azure region, but consolidating into a single workspace might allow you to reduce charges even more with a commitment tier. Evaluate each of the criteria below independently and consider your particular requirements and priorities in determining which design will be most effective for your particular environment. + + +## Design criteria +The following table briefly presents the criteria that you should consider in designing your workspace architecture. The sections below describe each of these criteria in full detail. + +| Criteria | Description | +|:---|:---| +| [Segregate operational and security data](#segregate-operational-and-security-data) | Many customers will create separate workspaces for their operational and security data for data ownership and the additional cost from Microsoft Sentinel. In some cases though, you may be able to save cost by consolidating into a single workspace to qualify for a commitment tier. | +| [Azure tenants](#azure-tenants) | If you have multiple Azure tenants, you'll usually create a workspace in each because several data sources can only send monitoring data to a workspace in the same Azure tenant. | +| [Azure regions](#azure-regions) | Each workspace resides in a particular Azure region, and you may have regulatory or compliance requirements to store data in particular locations. | +| [Data ownership](#data-ownership) | You may choose to create separate workspaces to define data ownership, for example by subsidiaries or affiliated companies. | +| [Split billing](#split-billing) | By placing workspaces in separate subscriptions, they can be billed to different parties. | +| [Data retention and archive](#data-retention-and-archive) | You can set different retention settings for each table in a workspace, but you need a separate workspace if you require different retention settings for different resources that send data to the same tables. | +| [Commitment tiers](#commitment-tiers) | Commitment tiers allow you to reduce your ingestion cost by committing to a minimum amount of daily data in a single workspace. | +| [Legacy agent limitations](#legacy-agent-limitations) | Legacy virtual machine agents have limitations on the number of workspaces they can connect to. | +| [Data access control](#data-access-control) | Configure access to the workspace and to different tables and data from different resources. | + +### Segregate operational and security data +Most customers who use both Azure Monitor and Microsoft Sentinel will create a dedicated workspace for each to segregate ownership of data between your operational and security teams and also to optimize costs. If Microsoft Sentinel is enabled in a workspace, then all data in that workspace is subject to Sentinel pricing, even if it's operational data collected by Azure Monitor. While a workspace with Sentinel gets 3 months of free data retention instead of 31 days, this will typically result in higher cost for operational data in a workspace without Sentinel. See [Azure Monitor Logs pricing details](cost-logs.md#workspaces-with-microsoft-sentinel). + +The exception is if combining data in the same workspace helps you reach a [commitment tier](#commitment-tiers), which provides a discount to your ingestion charges. For example, consider an organization that has operational data and security data each ingesting about 50 GB per day. Combining the data in the same workspace would allow a commitment tier at 100 GB per day that would provide a 15% discount for Azure Monitor and 50% discount for Sentinel. + +If you create separate workspaces for other criteria then you'll usually create additional workspace pairs. For example, if you have two Azure tenants, you may create four workspaces - an operational and security workspace in each tenant. + + +- **If you use both Azure Monitor and Microsoft Sentinal**, create a separate workspace for each. Consider combining the two if it helps you reach a commitment tier. + + +### Azure tenants +Most resources can only send monitoring data to a workspace in the same Azure tenant. Virtual machines using the [Azure Monitor agent](../agents/azure-monitor-agent-overview.md) or the [Log Analytics agents](../agents/log-analytics-agent.md) can send data to workspaces in separate Azure tenants, which may be a scenario that you consider as a [service provider](#multiple-tenant-strategies). + +- **If you have a single Azure tenant**, then create a single workspace for that tenant. +- **If you have multiple Azure tenants**, then create a workspace for each tenant. See [Multiple tenant strategies](#multiple-tenant-strategies) for other options including strategies for service providers. + +### Azure regions +Log Analytics workspaces each reside in a [particular Azure region](https://azure.microsoft.com/global-infrastructure/geographies/), and you may have regulatory or compliance purposes for keeping data in a particular region. For example, an international company might locate a workspace in each major geographical region, such as United States and Europe. + +- **If you have requirements for keeping data in a particular geography**, create a separate workspace for each region with such requirements. +- **If you do not have requirements for keeping data in a particular geography**, use a single workspace for all regions. + +You should also consider potential [bandwidth charges](https://azure.microsoft.com/pricing/details/bandwidth/) that may apply when sending data to a workspace from a resource in another region, although these charges are usually minor relative to data ingestion costs for most customers. These charges will typically result from sending data to the workspace from a virtual machine. Monitoring data from other Azure resources using [diagnostic settings](../essentials/diagnostic-settings.md) does not [incur egress charges](../usage-estimated-costs.md#data-transfer-charges). + +Use the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator) to estimate the cost and determine which regions you actually need. Consider workspaces in multiple regions if bandwidth charges are significant. + + +- **If bandwidth charges are significant enough to justify the additional complexity**, create a separate workspace for each region with virtual machines. +- **If bandwidth charges are not significant enough to justify the additional complexity**, use a single workspace for all regions. + + +### Data ownership +You may have a requirement to segregate data or define boundaries based on ownership. For example, you may have different subsidiaries or affiliated companies that require delineation of their monitoring data. + +- **If you require data segregation**, use a separate workspace for each data owner. +- **If you do not require data segregation**, use a single workspace for all data owners. + +### Split billing +You may need to split billing between different parties or perform charge back to a customer or internal business unit. [Azure Cost Management + Billing](../usage-estimated-costs.md#azure-cost-management--billing) allows you to view charges by workspace. You can also use a log query to view [billable data volume by Azure resource, resource group, or subscription](analyze-usage.md#data-volume-by-azure-resource-resource-group-or-subscription), which may be sufficient for your billing requirements. + +- **If you do not need to split billing or perform charge back**, use a single workspace for all cost owners. +- **If you need to split billing or perform charge back**, consider whether [Azure Cost Management + Billing](../usage-estimated-costs.md#azure-cost-management--billing) or a log query provides granular enough cost reporting for your requirements. If not, use a separate workspace for each cost owner. + +### Data retention and archive +You can configure default [data retention and archive settings](data-retention-archive.md) for a workspace or [configure different settings for each table](data-retention-archive.md#set-retention-and-archive-policy-by-table). You may require different settings for different sets of data in a particular table. If this is the case, then you would need to separate that data into different workspaces, each with unique retention settings. + +- **If you can use the same retention and archive settings for all data in each table**, use a single workspace for all resources. +- **If you can require different retention and archive settings for different resources in the same table**, use a separate workspace for different resources. + + + +### Commitment tiers +[Commitment tiers](../logs/cost-logs.md#commitment-tiers) provide a discount to your workspace ingestion costs when you commit to a particular amount of daily data. You may choose to consolidate data in a single workspace in order to reach the level of a particular tier. This same volume of data spread across multiple workspaces would not be eligible for the same tier, unless you have a dedicated cluster. + +If you can commit to daily ingestion of at least 500 GB/day, then you should implement a [dedicated cluster](../logs/cost-logs.md#dedicated-clusters) which provides additional functionality and performance. Dedicated clusters also allow you to combine the data from multiple workspaces in the cluster to reach the level of a commitment tier. + +- **If you will ingest at least 500 GB/day across all resources**, create a dedicated cluster and set the appropriate commitment tier. +- **If you will ingest at least 100 GB/day across resources**, consider combining them into a single workspace to take advantage of a commitment tier. + + + +### Legacy agent limitations +While you should avoid sending duplicate data to multiple workspaces because of the additional charges, you may have virtual machines connected to multiple workspaces. The most common scenario is an agent connected to separate workspaces for Azure Monitor and Microsoft Sentinel. + + The [Azure Monitor agent](../agents/azure-monitor-agent-overview.md) and [Log Analytics agent for Windows](../agents/log-analytics-agent.md) can connect to multiple workspaces. The [Log Analytics agent for Linux](../agents/log-analytics-agent.md) though can only connect to a single workspace. + +- **If you use the Log Analytics agent for Linux**, migrate to the [Azure Monitor agent](../agents/azure-monitor-agent-overview.md) or ensure that your Linux machines only require access to a single workspace. + + +### Data access control +When you grant a user [access to a workspace](manage-access.md#azure-rbac), they have access to all data in that workspace. This is appropriate for a member of a central administration or security team who must access data for all resources. Access to the workspace is also determined by resource-context RBAC and table-level RBAC. + +Resource-context RBAC](manage-access.md#access-mode) +By default, if a user has read access to an Azure resource, they inherit permissions to any of that resource's monitoring data sent to the workspace. This allows users to access information about resources they manage without being granted explicit access to the workspace. If you need to block this access, you can change the [access control mode](manage-access.md#access-control-mode) to require explicit workspace permissions. + +- **If you want users to be able to access data for their resources**, keep the default access control mode of *Use resource or workspace permissions*. +- **If you want to explicitly assign permissions for all users**, change the access control mode to *Require workspace permissions*. + + +[Table-level RBAC](manage-access.md#table-level-azure-rbac) +With table-level RBAC, you can grant or deny access to specific tables in the workspace. This allows you to implement granular permissions required for specific situations in your environment. + +For example, you might grant access to only specific tables collected by Sentinel to an internal auditing team. Or you might deny access to security related tables to resource owners who need operational data related to their resources. + +- **If you don't require granular access control by table**, grant the operations and security team access to their resources and allow resource owners to use resource-context RBAC for their resources. +- **If you require granular access control by table**, grant or deny access to specific tables using table-level RBAC. + + +## Working with multiple workspaces +Since many designs will include multiple workspaces, Azure Monitor and Microsoft Sentinel include features to assist you in analyzing this data across workspaces. For details, see the following: + +- [Create a log query across multiple workspaces and apps in Azure Monitor](cross-workspace-query.md) +- [Extend Microsoft Sentinel across workspaces and tenants](../../sentinel/extend-sentinel-across-workspaces-tenants.md). +## Multiple tenant strategies +Environments with multiple Azure tenants, including service providers (MSPs), independent software vendors (ISVs), and large enterprises, often require a strategy where a central administration team has access to administer workspaces located in other tenants. Each of the tenants may represent separate customers or different business units. + +> [!NOTE] +> For partners and service providers who are part of the [Cloud Solution Provider (CSP) program](https://partner.microsoft.com/membership/cloud-solution-provider), Log Analytics in Azure Monitor is one of the Azure services available in Azure CSP subscriptions. + +There are two basic strategies for this functionality as described below. + +### Distributed architecture +In a distributed architecture, a Log Analytics workspace is created in each Azure tenant. This is the only option you can use if you're monitoring Azure services other than virtual machines. + +There are two options to allow service provider administrators to access the workspaces in the customer tenants. + + +- Use [Azure Lighthouse](../../lighthouse/overview.md) to access each customer tenant. The service provider administrators are included in an Azure AD user group in the service provider’s tenant, and this group is granted access during the onboarding process for each customer. The administrators can then access each customer’s workspaces from within their own service provider tenant, rather than having to log into each customer’s tenant individually. For more information, see [Monitor customer resources at scale](../../lighthouse/how-to/monitor-at-scale.md). + +- Add individual users from the service provider as [Azure Active Directory guest users (B2B)](../../active-directory/external-identities/what-is-b2b.md). The customer tenant administrators manage individual access for each service provider administrator, and the service provider administrators must log in to the directory for each tenant in the Azure portal to be able to access these workspaces. + + +Advantages to this strategy are: + +- Logs can be collected from all types of resources. +- The customer can confirm specific levels of permissions with [Azure delegated resource management](../../lighthouse/concepts/architecture.md), or can manage access to the logs using their own [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). +- Each customer can have different settings for their workspace such as retention and data cap. +- Isolation between customers for regulatory and compliance. +- The charge for each workspace in included in the bill for the customer's subscription. + +Disadvantages to this strategy are: + +- Centrally visualizing and analyzing data across customer tenants with tools such as Azure Monitor Workbooks can result in slower experiences, especially when analyzing data across more than 50 workspaces. +- If customers are not onboarded for Azure delegated resource management, service provider administrators must be provisioned in the customer directory. This makes it more difficult for the service provider to manage a large number of customer tenants at once. +### Centralized +A single workspace is created in the service provider's subscription. This option can only collect data from customer virtual machines. Agents installed on the virtual machines are configured to send their logs to this central workspace. + +Advantages to this strategy are: + +- Easy to manage a large number of customers. +- Service provider has full ownership over the logs and the various artifacts such as functions and saved queries. +- Service provider can perform analytics across all of its customers. + +Disadvantages to this strategy are: + +- Logs can only be collected from virtual machines with an agent. It will not work with PaaS, SaaS and Azure fabric data sources. +- It may be difficult to separate data between customers, since their data shares a single workspace. Queries need to use the computer's fully qualified domain name (FQDN) or the Azure subscription ID. +- All data from all customers will be stored in the same region with a single bill and same retention and configuration settings. + + +### Hybrid +In a hybrid model, each tenant has its own workspace, and some mechanism is used to pull data into a central location for reporting and analytics. This data could include a small number of data types or a summary of the activity such as daily statistics. + +There are two options to implement logs in a central location: + +- Central workspace. The service provider creates a workspace in its tenant and use a script that utilizes the [Query API](api/overview.md) with the [custom logs API](custom-logs-overview.md) to bring the data from the tenant workspaces to this central location. Another option is to use [Azure Logic Apps](../../logic-apps/logic-apps-overview.md) to copy data to the central workspace. + +- Power BI. The tenant workspaces export data to Power BI using the integration between the [Log Analytics workspace and Power BI](log-powerbi.md). + + +## Next steps + +- [Learn more about designing and configuring data access in a workspace.](manage-access.md) +- [Get sample workspace architectures for Microsoft Sentinel.](../../sentinel/sample-workspace-designs.md) diff --git a/articles/azure-monitor/monitor-reference.md b/articles/azure-monitor/monitor-reference.md index b4325c5988b1c..e9e157795e748 100644 --- a/articles/azure-monitor/monitor-reference.md +++ b/articles/azure-monitor/monitor-reference.md @@ -30,14 +30,14 @@ The table below lists the available curated visualizations and more detailed inf | [Azure Monitor for Azure Cache for Redis (preview)](./insights/redis-cache-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/redisCacheInsights) | Provides a unified, interactive view of overall performance, failures, capacity, and operational health | | [Azure Cosmos DB Insights](./insights/cosmosdb-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/cosmosDBInsights) | Provides a view of the overall performance, failures, capacity, and operational health of all your Azure Cosmos DB resources in a unified interactive experience. | | [Azure Container Insights](/azure/azure-monitor/insights/container-insights-overview) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/containerInsights) | Monitors the performance of container workloads that are deployed to managed Kubernetes clusters hosted on Azure Kubernetes Service (AKS). It gives you performance visibility by collecting metrics from controllers, nodes, and containers that are available in Kubernetes through the Metrics API. Container logs are also collected. After you enable monitoring from Kubernetes clusters, these metrics and logs are automatically collected for you through a containerized version of the Log Analytics agent for Linux. | -| [Azure Data Explorer insights](./insights/data-explorer.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/adxClusterInsights) | Azure Data Explorer Insights provides comprehensive monitoring of your clusters by delivering a unified view of your cluster performance, operations, usage, and failures. | +| [Azure Data Explorer insights](/azure/data-explorer/data-explorer-insights) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/adxClusterInsights) | Azure Data Explorer Insights provides comprehensive monitoring of your clusters by delivering a unified view of your cluster performance, operations, usage, and failures. | | [Azure HDInsight (preview)](../hdinsight/log-analytics-migration.md#insights) | Preview | No | An Azure Monitor workbook that collects important performance metrics from your HDInsight cluster and provides the visualizations and dashboards for most common scenarios. Gives a complete view of a single HDInsight cluster including resource utilization and application status| | [Azure IoT Edge](../iot-edge/how-to-explore-curated-visualizations.md) | GA | No | Visualize and explore metrics collected from the IoT Edge device right in the Azure portal using Azure Monitor Workbooks based public templates. The curated workbooks use built-in metrics from the IoT Edge runtime. These views don't need any metrics instrumentation from the workload modules. | | [Azure Key Vault Insights (preview)](./insights/key-vault-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/keyvaultsInsights) | Provides comprehensive monitoring of your key vaults by delivering a unified view of your Key Vault requests, performance, failures, and latency. | | [Azure Monitor Application Insights](./app/app-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/applicationsInsights) | Extensible Application Performance Management (APM) service which monitors the availability, performance, and usage of your web applications whether they're hosted in the cloud or on-premises. It leverages the powerful data analysis platform in Azure Monitor to provide you with deep insights into your application's operations. It enables you to diagnose errors without waiting for a user to report them. Application Insights includes connection points to a variety of development tools and integrates with Visual Studio to support your DevOps processes. | | [Azure Monitor Log Analytics Workspace](./logs/log-analytics-workspace-insights-overview.md) | Preview | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/lawsInsights) | Log Analytics Workspace Insights (preview) provides comprehensive monitoring of your workspaces through a unified view of your workspace usage, performance, health, agent, queries, and change log. This article will help you understand how to onboard and use Log Analytics Workspace Insights (preview). | | [Azure Service Bus Insights](../service-bus-messaging/service-bus-insights.md) | Preview | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/serviceBusInsights) | Azure Service Bus insights provide a view of the overall performance, failures, capacity, and operational health of all your Service Bus resources in a unified interactive experience. | - | [Azure SQL insights (preview)](./insights/sql-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/sqlWorkloadInsights) | A comprehensive interface for monitoring any product in the Azure SQL family. SQL insights uses dynamic management views to expose the data you need to monitor health, diagnose problems, and tune performance. Note: If you are just setting up SQL monitoring, use this instead of the SQL Analytics solution. | + | [Azure SQL insights (preview)](./insights/sql-insights-overview.md) | Preview | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/sqlWorkloadInsights) | A comprehensive interface for monitoring any product in the Azure SQL family. SQL insights uses dynamic management views to expose the data you need to monitor health, diagnose problems, and tune performance. Note: If you are just setting up SQL monitoring, use this instead of the SQL Analytics solution. | | [Azure Storage Insights](/azure/azure-monitor/insights/storage-insights-overview) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/storageInsights) | Provides comprehensive monitoring of your Azure Storage accounts by delivering a unified view of your Azure Storage services performance, capacity, and availability. | | [Azure Network Insights](./insights/network-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/networkInsights) | Provides a comprehensive view of health and metrics for all your network resource. The advanced search capability helps you identify resource dependencies, enabling scenarios like identifying resource that are hosting your website, by simply searching for your website name. | | [Azure Monitor for Resource Groups](./insights/resource-group-insights.md) | GA | No | Triage and diagnose any problems your individual resources encounter, while offering context as to the health and performance of the resource group as a whole. | diff --git a/articles/azure-monitor/overview.md b/articles/azure-monitor/overview.md index fee3b2f93708c..c1c5072cab5ed 100644 --- a/articles/azure-monitor/overview.md +++ b/articles/azure-monitor/overview.md @@ -17,7 +17,7 @@ Just a few examples of what you can do with Azure Monitor include: - Detect and diagnose issues across applications and dependencies with [Application Insights](app/app-insights-overview.md). - Correlate infrastructure issues with [VM insights](vm/vminsights-overview.md) and [Container insights](containers/container-insights-overview.md). - Drill into your monitoring data with [Log Analytics](logs/log-query-overview.md) for troubleshooting and deep diagnostics. -- Support operations at scale with [smart alerts](alerts/alerts-smartgroups-overview.md) and [automated actions](alerts/alerts-action-rules.md). +- Support operations at scale with [automated actions](alerts/alerts-action-rules.md). - Create visualizations with Azure [dashboards](visualize/tutorial-logs-dashboards.md) and [workbooks](visualize/workbooks-overview.md). - Collect data from [monitored resources](./monitor-reference.md) using [Azure Monitor Metrics](./essentials/data-platform-metrics.md). - Investigate change data for routine monitoring or for triaging incidents using [Change Analysis](./change/change-analysis.md). diff --git a/articles/azure-monitor/powershell-samples.md b/articles/azure-monitor/powershell-samples.md deleted file mode 100644 index 72192c73bd621..0000000000000 --- a/articles/azure-monitor/powershell-samples.md +++ /dev/null @@ -1,397 +0,0 @@ ---- -title: Azure Monitor PowerShell samples -description: Use PowerShell to access Azure Monitor features such as autoscale, alerts, webhooks and searching Activity logs. -ms.topic: sample -author: bwren -ms.author: bwren -ms.date: 2/14/2018 -ms.custom: devx-track-azurepowershell - ---- - -# Azure Monitor PowerShell samples -This article shows you sample PowerShell commands to help you access Azure Monitor features. - -> [!NOTE] -> Azure Monitor is the new name for what was called "Azure Insights" until Sept 25th, 2016. However, the namespaces and thus the following commands still contain the word *insights*. - -[!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] - -## Set up PowerShell -If you haven't already, set up PowerShell to run on your computer. For more information, see [How to Install and Configure PowerShell](/powershell/azure/). - -## Examples in this article -The examples in the article illustrate how you can use Azure Monitor cmdlets. You can also review the entire list of Azure Monitor PowerShell cmdlets at [Azure Monitor (Insights) Cmdlets](/powershell/module/az.applicationinsights). - -## Sign in and use subscriptions -First, log in to your Azure subscription. - -```powershell -Connect-AzAccount -``` - -You'll see a sign in screen. Once you sign in your Account, TenantID, and default Subscription ID are displayed. All the Azure cmdlets work in the context of your default subscription. To view the list of subscriptions you have access to, use the following command: - -```powershell -Get-AzSubscription -``` - -To see your working context (which subscription your commands are run against), use the following command: - -```powershell -Get-AzContext -``` -To change your working context to a different subscription, use the following command: - -```powershell -Set-AzContext -SubscriptionId -``` - - -## Retrieve Activity log -Use the [Get-AzLog](/powershell/module/az.monitor/get-azlog) cmdlet. The following are some common examples. The Activity Log holds the last 90 days of operations. Using dates before this time results in an error message. - -See what the current date/time are to verify what times to use in the commands below: -```powershell -Get-Date -``` - -Get log entries from this time/date to present: - -```powershell -Get-AzLog -StartTime 2019-03-01T10:30 -``` - -Get log entries between a time/date range: - -```powershell -Get-AzLog -StartTime 2019-01-01T10:30 -EndTime 2015-01-01T11:30 -``` - -Get log entries from a specific resource group: - -```powershell -Get-AzLog -ResourceGroup 'myrg1' -``` - -Get log entries from a specific resource provider between a time/date range: - -```powershell -Get-AzLog -ResourceProvider 'Microsoft.Web' -StartTime 2015-01-01T10:30 -EndTime 2015-01-01T11:30 -``` - -Get all log entries with a specific caller: - -```powershell -Get-AzLog -Caller 'myname@company.com' -``` - -The following command retrieves the last 1000 events from the activity log: - -```powershell -Get-AzLog -MaxRecord 1000 -``` - -`Get-AzLog` supports many other parameters. See the `Get-AzLog` reference for more information. - -> [!NOTE] -> `Get-AzLog` only provides 15 days of history. Using the **-MaxRecords** parameter allows you to query the last N events, beyond 15 days. To access events older than 15 days, use the REST API or SDK (C# sample using the SDK). If you do not include **StartTime**, then the default value is **EndTime** minus one hour. If you do not include **EndTime**, then the default value is current time. All times are in UTC. -> -> - -## Retrieve alerts history -To view all alert events, you can query the Azure Resource Manager logs using the following examples. - -```powershell -Get-AzLog -Caller "Microsoft.Insights/alertRules" -DetailedOutput -StartTime 2015-03-01 -``` - -To view the history for a specific alert rule, you can use the `Get-AzAlertHistory` cmdlet, passing in the resource ID of the alert rule. - -```powershell -Get-AzAlertHistory -ResourceId /subscriptions/s1/resourceGroups/rg1/providers/microsoft.insights/alertrules/myalert -StartTime 2016-03-1 -Status Activated -``` - -The `Get-AzAlertHistory` cmdlet supports various parameters. More information, see [Get-AlertHistory](/previous-versions/azure/mt282453(v=azure.100)). - -## Retrieve information on alert rules -All of the following commands act on a Resource Group named "montest". - -View all the properties of the alert rule: - -```powershell -Get-AzAlertRule -Name simpletestCPU -ResourceGroup montest -DetailedOutput -``` - -Retrieve all alerts on a resource group: - -```powershell -Get-AzAlertRule -ResourceGroup montest -``` - -Retrieve all alert rules set for a target resource. For example, all alert rules set on a VM. - -```powershell -Get-AzAlertRule -ResourceGroup montest -TargetResourceId /subscriptions/s1/resourceGroups/montest/providers/Microsoft.Compute/virtualMachines/testconfig -``` - -`Get-AzAlertRule` supports other parameters. See [Get-AlertRule](/previous-versions/azure/mt282459(v=azure.100)) for more information. - -## Create metric alerts -You can use the `Add-AlertRule` cmdlet to create, update, or disable an alert rule. - -You can create email and webhook properties using `New-AzAlertRuleEmail` and `New-AzAlertRuleWebhook`, respectively. In the Alert rule cmdlet, assign these properties as actions to the **Actions** property of the Alert Rule. - -The following table describes the parameters and values used to create an alert using a metric. - -| parameter | value | -| --- | --- | -| Name |simpletestdiskwrite | -| Location of this alert rule |East US | -| ResourceGroup |montest | -| TargetResourceId |/subscriptions/s1/resourceGroups/montest/providers/Microsoft.Compute/virtualMachines/testconfig | -| MetricName of the alert that is created |\PhysicalDisk(_Total)\Disk Writes/sec. See the `Get-MetricDefinitions` cmdlet about how to retrieve the exact metric names | -| operator |GreaterThan | -| Threshold value (count/sec in for this metric) |1 | -| WindowSize (hh:mm:ss format) |00:05:00 | -| aggregator (statistic of the metric, which uses Average count, in this case) |Average | -| custom emails (string array) |'foo@example.com','bar@example.com' | -| send email to owners, contributors and readers |-SendToServiceOwners | - -Create an Email action - -```powershell -$actionEmail = New-AzAlertRuleEmail -CustomEmail myname@company.com -``` - -Create a Webhook action - -```powershell -$actionWebhook = New-AzAlertRuleWebhook -ServiceUri https://example.com?token=mytoken -``` - -Create the alert rule on the CPU% metric on a classic VM - -```powershell -Add-AzMetricAlertRule -Name vmcpu_gt_1 -Location "East US" -ResourceGroup myrg1 -TargetResourceId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.ClassicCompute/virtualMachines/my_vm1 -MetricName "Percentage CPU" -Operator GreaterThan -Threshold 1 -WindowSize 00:05:00 -TimeAggregationOperator Average -Action $actionEmail, $actionWebhook -Description "alert on CPU > 1%" -``` - -Retrieve the alert rule - -```powershell -Get-AzAlertRule -Name vmcpu_gt_1 -ResourceGroup myrg1 -DetailedOutput -``` - -The Add alert cmdlet also updates the rule if an alert rule already exists for the given properties. To disable an alert rule, include the parameter **-DisableRule**. - -## Get a list of available metrics for alerts -You can use the `Get-AzMetricDefinition` cmdlet to view the list of all metrics for a specific resource. - -```powershell -Get-AzMetricDefinition -ResourceId -``` - -The following example generates a table with the metric Name and the Unit for it. - -```powershell -Get-AzMetricDefinition -ResourceId | Format-Table -Property Name,Unit -``` - -A full list of available options for `Get-AzMetricDefinition` is available at [Get-MetricDefinitions](/previous-versions/azure/mt282458(v=azure.100)). - -## Create and manage Activity Log alerts -You can use the `Set-AzActivityLogAlert` cmdlet to set an Activity Log alert. An Activity Log alert requires that you first define your conditions as a dictionary of conditions, then create an alert that uses those conditions. - -```powershell - -$condition1 = New-AzActivityLogAlertCondition -Field 'category' -Equal 'Administrative' -$condition2 = New-AzActivityLogAlertCondition -Field 'operationName' -Equal 'Microsoft.Compute/virtualMachines/write' -$additionalWebhookProperties = New-Object "System.Collections.Generic.Dictionary``2[System.String,System.String]" -$additionalWebhookProperties.Add('customProperty', 'someValue') -$actionGrp1 = New-AzActionGroup -ActionGroupId '/subscriptions//providers/Microsoft.Insights/actiongr1' -WebhookProperty $additionalWebhookProperties -Set-AzActivityLogAlert -Location 'Global' -Name 'alert on VM create' -ResourceGroupName 'myResourceGroup' -Scope '/subscriptions/' -Action $actionGrp1 -Condition $condition1, $condition2 - -``` - -The additional webhook properties are optional. You can get back the contents of an Activity Log Alert using `Get-AzActivityLogAlert`. - -## Create and manage AutoScale settings - -> [!NOTE] -> For Cloud Services (Microsoft.ClassicCompute), autoscale supports a time grain of 5 minutes (PT5M). For the other services autoscale supports a time grain of minimum of 1 minute (PT1M) - -A resource (a Web app, VM, Cloud Service, or Virtual Machine Scale Set) can have only one autoscale setting configured for it. -However, each autoscale setting can have multiple profiles. For example, one for a performance-based scale profile and a second one for a schedule-based profile. Each profile can have multiple rules configured on it. For more information about Autoscale, see [How to Autoscale an Application](../cloud-services/cloud-services-how-to-scale-portal.md). - -Here are the steps to use: - -1. Create rule(s). -2. Create profile(s) mapping the rules that you created previously to the profiles. -3. Optional: Create notifications for autoscale by configuring webhook and email properties. -4. Create an autoscale setting with a name on the target resource by mapping the profiles and notifications that you created in the previous steps. - -The following examples show you how you can create an Autoscale setting for a Virtual Machine Scale Set for a Windows operating system based by using the CPU utilization metric. - -First, create a rule to scale out, with an instance count increase. - -```powershell -$rule1 = New-AzAutoscaleRule -MetricName "Percentage CPU" -MetricResourceId /subscriptions/s1/resourceGroups/big2/providers/Microsoft.Compute/virtualMachineScaleSets/big2 -Operator GreaterThan -MetricStatistic Average -Threshold 60 -TimeGrain 00:01:00 -TimeWindow 00:10:00 -ScaleActionCooldown 00:10:00 -ScaleActionDirection Increase -ScaleActionValue 1 -``` - -Next, create a rule to scale in, with an instance count decrease. - -```powershell -$rule2 = New-AzAutoscaleRule -MetricName "Percentage CPU" -MetricResourceId /subscriptions/s1/resourceGroups/big2/providers/Microsoft.Compute/virtualMachineScaleSets/big2 -Operator GreaterThan -MetricStatistic Average -Threshold 30 -TimeGrain 00:01:00 -TimeWindow 00:10:00 -ScaleActionCooldown 00:10:00 -ScaleActionDirection Decrease -ScaleActionValue 1 -``` - -Then, create a profile for the rules. - -```powershell -$profile1 = New-AzAutoscaleProfile -DefaultCapacity 2 -MaximumCapacity 10 -MinimumCapacity 2 -Rules $rule1,$rule2 -Name "My_Profile" -``` - -Create a webhook property. - -```powershell -$webhook_scale = New-AzAutoscaleWebhook -ServiceUri "https://example.com?mytoken=mytokenvalue" -``` - -Create the notification property for the autoscale setting, including email and the webhook that you created previously. - -```powershell -$notification1= New-AzAutoscaleNotification -CustomEmails ashwink@microsoft.com -SendEmailToSubscriptionAdministrators SendEmailToSubscriptionCoAdministrators -Webhooks $webhook_scale -``` - -Finally, create the autoscale setting to add the profile that you created previously. - -```powershell -Add-AzAutoscaleSetting -Location "East US" -Name "MyScaleVMSSSetting" -ResourceGroup big2 -TargetResourceId /subscriptions/s1/resourceGroups/big2/providers/Microsoft.Compute/virtualMachineScaleSets/big2 -AutoscaleProfiles $profile1 -Notifications $notification1 -``` - -For more information about managing Autoscale settings, see [Get-AutoscaleSetting](/previous-versions/azure/mt282461(v=azure.100)). - -## Autoscale history -The following example shows you how you can view recent autoscale and alert events. Use the activity log search to view the autoscale history. - -```powershell -Get-AzLog -Caller "Microsoft.Insights/autoscaleSettings" -DetailedOutput -StartTime 2015-03-01 -``` - -You can use the `Get-AzAutoScaleHistory` cmdlet to retrieve AutoScale history. - -```powershell -Get-AzAutoScaleHistory -ResourceId /subscriptions/s1/resourceGroups/myrg1/providers/microsoft.insights/autoscalesettings/myScaleSetting -StartTime 2016-03-15 -DetailedOutput -``` - -For more information, see [Get-AutoscaleHistory](/previous-versions/azure/mt282464(v=azure.100)). - -### View details for an autoscale setting -You can use the `Get-Autoscalesetting` cmdlet to retrieve more information about the autoscale setting. - -The following example shows details about all autoscale settings in the resource group 'myrg1'. - -```powershell -Get-AzAutoscalesetting -ResourceGroup myrg1 -DetailedOutput -``` - -The following example shows details about all autoscale settings in the resource group 'myrg1' and specifically the autoscale setting named 'MyScaleVMSSSetting'. - -```powershell -Get-AzAutoscalesetting -ResourceGroup myrg1 -Name MyScaleVMSSSetting -DetailedOutput -``` - -### Remove an autoscale setting -You can use the `Remove-Autoscalesetting` cmdlet to delete an autoscale setting. - -```powershell -Remove-AzAutoscalesetting -ResourceGroup myrg1 -Name MyScaleVMSSSetting -``` - -## Manage log profiles for activity log -You can create a *log profile* and export data from your activity log to a storage account and you can configure data retention for it. Optionally, you can also stream the data to your Event Hub. This feature is currently in Preview and you can only create one log profile per subscription. You can use the following cmdlets with your current subscription to create and manage log profiles. You can also choose a particular subscription. Although PowerShell defaults to the current subscription, you can always change that using `Set-AzContext`. You can configure activity log to route data to any storage account or Event Hub within that subscription. Data is written as blob files in JSON format. - -### Get a log profile -To fetch your existing log profiles, use the `Get-AzLogProfile` cmdlet. - -### Add a log profile without data retention -```powershell -Add-AzLogProfile -Name my_log_profile_s1 -StorageAccountId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Storage/storageAccounts/my_storage -Location global,westus,eastus,northeurope,westeurope,eastasia,southeastasia,japaneast,japanwest,northcentralus,southcentralus,eastus2,centralus,australiaeast,australiasoutheast,brazilsouth,centralindia,southindia,westindia -``` - -### Remove a log profile -```powershell -Remove-AzLogProfile -name my_log_profile_s1 -``` - -### Add a log profile with data retention -You can specify the **-RetentionInDays** property with the number of days, as a positive integer, where the data is retained. - -```powershell -Add-AzLogProfile -Name my_log_profile_s1 -StorageAccountId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Storage/storageAccounts/my_storage -Location global,westus,eastus,northeurope,westeurope,eastasia,southeastasia,japaneast,japanwest,northcentralus,southcentralus,eastus2,centralus,australiaeast,australiasoutheast,brazilsouth,centralindia,southindia,westindia -RetentionInDays 90 -``` - -### Add log profile with retention and EventHub -In addition to routing your data to storage account, you can also stream it to an Event Hub. In this preview release the storage account configuration is mandatory but Event Hub configuration is optional. - -```powershell -Add-AzLogProfile -Name my_log_profile_s1 -StorageAccountId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Storage/storageAccounts/my_storage -serviceBusRuleId /subscriptions/s1/resourceGroups/Default-ServiceBus-EastUS/providers/Microsoft.ServiceBus/namespaces/mytestSB/authorizationrules/RootManageSharedAccessKey -Location global,westus,eastus,northeurope,westeurope,eastasia,southeastasia,japaneast,japanwest,northcentralus,southcentralus,eastus2,centralus,australiaeast,australiasoutheast,brazilsouth,centralindia,southindia,westindia -RetentionInDays 90 -``` - -## Configure diagnostics logs -Many Azure services provide additional logs and telemetry that can do one or more of the following: - - be configured to save data in your Azure Storage account - - sent to Event Hubs - - sent to a Log Analytics workspace. - -The operation can only be performed at a resource level. The storage account or event hub should be present in the same region as the target resource where the diagnostics setting is configured. - -### Get diagnostic setting -```powershell -Get-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Logic/workflows/andy0315logicapp -``` - -Disable diagnostic setting - -```powershell -Set-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Logic/workflows/andy0315logicapp -StorageAccountId /subscriptions/s1/resourceGroups/Default-Storage-WestUS/providers/Microsoft.Storage/storageAccounts/mystorageaccount -Enable $false -``` - -Enable diagnostic setting without retention - -```powershell -Set-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Logic/workflows/andy0315logicapp -StorageAccountId /subscriptions/s1/resourceGroups/Default-Storage-WestUS/providers/Microsoft.Storage/storageAccounts/mystorageaccount -Enable $true -``` - -Enable diagnostic setting with retention - -```powershell -Set-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Logic/workflows/andy0315logicapp -StorageAccountId /subscriptions/s1/resourceGroups/Default-Storage-WestUS/providers/Microsoft.Storage/storageAccounts/mystorageaccount -Enable $true -RetentionEnabled $true -RetentionInDays 90 -``` - -Enable diagnostic setting with retention for a specific log category - -```powershell -Set-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/insights-integration/providers/Microsoft.Network/networkSecurityGroups/viruela1 -StorageAccountId /subscriptions/s1/resourceGroups/myrg1/providers/Microsoft.Storage/storageAccounts/sakteststorage -Categories NetworkSecurityGroupEvent -Enable $true -RetentionEnabled $true -RetentionInDays 90 -``` - -Enable diagnostic setting for Event Hubs - -```powershell -Set-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/insights-integration/providers/Microsoft.Network/networkSecurityGroups/viruela1 -serviceBusRuleId /subscriptions/s1/resourceGroups/Default-ServiceBus-EastUS/providers/Microsoft.ServiceBus/namespaces/mytestSB/authorizationrules/RootManageSharedAccessKey -Enable $true -``` - -Enable diagnostic setting for Log Analytics - -```powershell -Set-AzDiagnosticSetting -ResourceId /subscriptions/s1/resourceGroups/insights-integration/providers/Microsoft.Network/networkSecurityGroups/viruela1 -WorkspaceId /subscriptions/s1/resourceGroups/insights-integration/providers/providers/microsoft.operationalinsights/workspaces/myWorkspace -Enabled $true - -``` - -Note that the WorkspaceId property takes the *resource ID* of the workspace. You can obtain the resource ID of your Log Analytics workspace using the following command: - -```powershell -(Get-AzOperationalInsightsWorkspace).ResourceId - -``` - -These commands can be combined to send data to multiple destinations. diff --git a/articles/azure-monitor/app/media/profiler-aspnetcore-linux/create-deployment-credentials.png b/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/create-deployment-credentials.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-aspnetcore-linux/create-deployment-credentials.png rename to articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/create-deployment-credentials.png diff --git a/articles/azure-monitor/app/media/profiler-aspnetcore-linux/create-linux-appservice.png b/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/create-linux-app-service.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-aspnetcore-linux/create-linux-appservice.png rename to articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/create-linux-app-service.png diff --git a/articles/azure-monitor/app/media/profiler-aspnetcore-linux/profiler-traces.png b/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/profiler-traces.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-aspnetcore-linux/profiler-traces.png rename to articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/profiler-traces.png diff --git a/articles/azure-monitor/app/media/profiler-aspnetcore-linux/setup-git-repo.png b/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/setup-git-repo.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-aspnetcore-linux/setup-git-repo.png rename to articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/setup-git-repo.png diff --git a/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/view-traces.png b/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/view-traces.png new file mode 100644 index 0000000000000..55edd0f83f723 Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler-aspnetcore-linux/view-traces.png differ diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/appinsights-key.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/app-insights-key.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/appinsights-key.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/app-insights-key.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/app-insights-menu.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/app-insights-menu.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/app-insights-menu.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/app-insights-menu.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/app-setting-1.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/app-setting-1.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/app-setting-1.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/app-setting-1.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/app-setting-2.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/app-setting-2.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/app-setting-2.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/app-setting-2.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/app-settings-table.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/app-settings-table.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/app-settings-table.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/app-settings-table.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/choose-plan.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/choose-plan.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/choose-plan.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/choose-plan.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/configuration-menu.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/configuration-menu.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/configuration-menu.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/configuration-menu.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/continue-button.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/continue-button.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/continue-button.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/continue-button.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/new-setting-button.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/new-setting-button.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/new-setting-button.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/new-setting-button.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/performance-menu.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/performance-menu.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/performance-menu.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/performance-menu.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/profiler-function-app.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/profiler-function-app.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/profiler-function-app.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/profiler-function-app.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/save-button.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/save-button.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/save-button.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/save-button.png diff --git a/articles/azure-monitor/app/media/profiler-azure-functions/view-app-insights-data.png b/articles/azure-monitor/profiler/media/profiler-azure-functions/view-app-insights-data.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-azure-functions/view-app-insights-data.png rename to articles/azure-monitor/profiler/media/profiler-azure-functions/view-app-insights-data.png diff --git a/articles/azure-monitor/app/media/profiler-bring-your-own-storage/figure-11.png b/articles/azure-monitor/profiler/media/profiler-bring-your-own-storage/figure-11.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-bring-your-own-storage/figure-11.png rename to articles/azure-monitor/profiler/media/profiler-bring-your-own-storage/figure-11.png diff --git a/articles/azure-monitor/app/media/profiler-bring-your-own-storage/figure-20.png b/articles/azure-monitor/profiler/media/profiler-bring-your-own-storage/figure-20.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-bring-your-own-storage/figure-20.png rename to articles/azure-monitor/profiler/media/profiler-bring-your-own-storage/figure-20.png diff --git a/articles/azure-monitor/app/media/profiler-cloudservice/cloud-service-performance.png b/articles/azure-monitor/profiler/media/profiler-cloudservice/cloud-service-performance.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-cloudservice/cloud-service-performance.png rename to articles/azure-monitor/profiler/media/profiler-cloudservice/cloud-service-performance.png diff --git a/articles/azure-monitor/app/media/profiler-cloudservice/diagnostics-file.png b/articles/azure-monitor/profiler/media/profiler-cloudservice/diagnostics-file.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-cloudservice/diagnostics-file.png rename to articles/azure-monitor/profiler/media/profiler-cloudservice/diagnostics-file.png diff --git a/articles/azure-monitor/app/media/profiler-cloudservice/enable-app-insights.png b/articles/azure-monitor/profiler/media/profiler-cloudservice/enable-app-insights.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-cloudservice/enable-app-insights.png rename to articles/azure-monitor/profiler/media/profiler-cloudservice/enable-app-insights.png diff --git a/articles/azure-monitor/app/media/profiler-cloudservice/profile-now.png b/articles/azure-monitor/profiler/media/profiler-cloudservice/profile-now.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-cloudservice/profile-now.png rename to articles/azure-monitor/profiler/media/profiler-cloudservice/profile-now.png diff --git a/articles/azure-monitor/app/media/profiler-cloudservice/select-profiler.png b/articles/azure-monitor/profiler/media/profiler-cloudservice/select-profiler.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-cloudservice/select-profiler.png rename to articles/azure-monitor/profiler/media/profiler-cloudservice/select-profiler.png diff --git a/articles/azure-monitor/app/media/profiler-containerinstances/application-insights-key.png b/articles/azure-monitor/profiler/media/profiler-containerinstances/application-insights-key.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-containerinstances/application-insights-key.png rename to articles/azure-monitor/profiler/media/profiler-containerinstances/application-insights-key.png diff --git a/articles/azure-monitor/app/media/profiler-containerinstances/profiler_traces.png b/articles/azure-monitor/profiler/media/profiler-containerinstances/profiler-traces.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-containerinstances/profiler_traces.png rename to articles/azure-monitor/profiler/media/profiler-containerinstances/profiler-traces.png diff --git a/articles/azure-monitor/app/media/profiler-overview/configure-blade-inline.png b/articles/azure-monitor/profiler/media/profiler-overview/configure-blade-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/configure-blade-inline.png rename to articles/azure-monitor/profiler/media/profiler-overview/configure-blade-inline.png diff --git a/articles/azure-monitor/app/media/profiler-overview/configure-blade.png b/articles/azure-monitor/profiler/media/profiler-overview/configure-blade.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/configure-blade.png rename to articles/azure-monitor/profiler/media/profiler-overview/configure-blade.png diff --git a/articles/azure-monitor/app/media/profiler-overview/performance-blade-v2-examples.png b/articles/azure-monitor/profiler/media/profiler-overview/performance-blade-v2-examples.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/performance-blade-v2-examples.png rename to articles/azure-monitor/profiler/media/profiler-overview/performance-blade-v2-examples.png diff --git a/articles/azure-monitor/app/media/profiler-overview/profiler-button-inline.png b/articles/azure-monitor/profiler/media/profiler-overview/profiler-button-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/profiler-button-inline.png rename to articles/azure-monitor/profiler/media/profiler-overview/profiler-button-inline.png diff --git a/articles/azure-monitor/app/media/profiler-overview/profiler-button.png b/articles/azure-monitor/profiler/media/profiler-overview/profiler-button.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/profiler-button.png rename to articles/azure-monitor/profiler/media/profiler-overview/profiler-button.png diff --git a/articles/azure-monitor/app/media/profiler-overview/trace-explorer-inline.png b/articles/azure-monitor/profiler/media/profiler-overview/trace-explorer-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/trace-explorer-inline.png rename to articles/azure-monitor/profiler/media/profiler-overview/trace-explorer-inline.png diff --git a/articles/azure-monitor/app/media/profiler-overview/trace-explorer.png b/articles/azure-monitor/profiler/media/profiler-overview/trace-explorer.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-overview/trace-explorer.png rename to articles/azure-monitor/profiler/media/profiler-overview/trace-explorer.png diff --git a/articles/azure-monitor/app/media/profiler-settings/appsettings-for-profiler-01.png b/articles/azure-monitor/profiler/media/profiler-settings/app-settings-for-profiler-01.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/appsettings-for-profiler-01.png rename to articles/azure-monitor/profiler/media/profiler-settings/app-settings-for-profiler-01.png diff --git a/articles/azure-monitor/app/media/profiler-settings/change-and-save-appinsights-01.png b/articles/azure-monitor/profiler/media/profiler-settings/change-and-save-app-insights-01.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/change-and-save-appinsights-01.png rename to articles/azure-monitor/profiler/media/profiler-settings/change-and-save-app-insights-01.png diff --git a/articles/azure-monitor/app/media/profiler-settings/check-extension-update-01.png b/articles/azure-monitor/profiler/media/profiler-settings/check-extension-update-01.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/check-extension-update-01.png rename to articles/azure-monitor/profiler/media/profiler-settings/check-extension-update-01.png diff --git a/articles/azure-monitor/app/media/profiler-settings/configure-blade-inline.png b/articles/azure-monitor/profiler/media/profiler-settings/configure-blade-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/configure-blade-inline.png rename to articles/azure-monitor/profiler/media/profiler-settings/configure-blade-inline.png diff --git a/articles/azure-monitor/app/media/profiler-settings/configure-blade.png b/articles/azure-monitor/profiler/media/profiler-settings/configure-blade.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/configure-blade.png rename to articles/azure-monitor/profiler/media/profiler-settings/configure-blade.png diff --git a/articles/azure-monitor/app/media/profiler-settings/configure-performance-test.png b/articles/azure-monitor/profiler/media/profiler-settings/configure-performance-test.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/configure-performance-test.png rename to articles/azure-monitor/profiler/media/profiler-settings/configure-performance-test.png diff --git a/articles/azure-monitor/app/media/profiler-settings/configure-profiler-inline.png b/articles/azure-monitor/profiler/media/profiler-settings/configure-profiler-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/configure-profiler-inline.png rename to articles/azure-monitor/profiler/media/profiler-settings/configure-profiler-inline.png diff --git a/articles/azure-monitor/app/media/profiler-settings/configure-profiler.png b/articles/azure-monitor/profiler/media/profiler-settings/configure-profiler.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/configure-profiler.png rename to articles/azure-monitor/profiler/media/profiler-settings/configure-profiler.png diff --git a/articles/azure-monitor/app/media/profiler-settings/cpu-memory-trigger-settings.png b/articles/azure-monitor/profiler/media/profiler-settings/cpu-memory-trigger-settings.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/cpu-memory-trigger-settings.png rename to articles/azure-monitor/profiler/media/profiler-settings/cpu-memory-trigger-settings.png diff --git a/articles/azure-monitor/app/media/profiler-settings/enable-app-insights-blade-01.png b/articles/azure-monitor/profiler/media/profiler-settings/enable-app-insights-blade-01.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/enable-app-insights-blade-01.png rename to articles/azure-monitor/profiler/media/profiler-settings/enable-app-insights-blade-01.png diff --git a/articles/azure-monitor/app/media/profiler-settings/load-test-inprogress.png b/articles/azure-monitor/profiler/media/profiler-settings/load-test-in-progress.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/load-test-inprogress.png rename to articles/azure-monitor/profiler/media/profiler-settings/load-test-in-progress.png diff --git a/articles/azure-monitor/app/media/profiler-settings/load-test-queued.png b/articles/azure-monitor/profiler/media/profiler-settings/load-test-queued.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/load-test-queued.png rename to articles/azure-monitor/profiler/media/profiler-settings/load-test-queued.png diff --git a/articles/azure-monitor/app/media/profiler-settings/new-performance-test.png b/articles/azure-monitor/profiler/media/profiler-settings/new-performance-test.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/new-performance-test.png rename to articles/azure-monitor/profiler/media/profiler-settings/new-performance-test.png diff --git a/articles/azure-monitor/app/media/profiler-settings/operation-entry-inline.png b/articles/azure-monitor/profiler/media/profiler-settings/operation-entry-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/operation-entry-inline.png rename to articles/azure-monitor/profiler/media/profiler-settings/operation-entry-inline.png diff --git a/articles/azure-monitor/app/media/profiler-settings/operation-entry.png b/articles/azure-monitor/profiler/media/profiler-settings/operation-entry.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/operation-entry.png rename to articles/azure-monitor/profiler/media/profiler-settings/operation-entry.png diff --git a/articles/azure-monitor/app/media/profiler-settings/performance-blade-inline.png b/articles/azure-monitor/profiler/media/profiler-settings/performance-blade-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/performance-blade-inline.png rename to articles/azure-monitor/profiler/media/profiler-settings/performance-blade-inline.png diff --git a/articles/azure-monitor/app/media/profiler-settings/performance-blade.png b/articles/azure-monitor/profiler/media/profiler-settings/performance-blade.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/performance-blade.png rename to articles/azure-monitor/profiler/media/profiler-settings/performance-blade.png diff --git a/articles/azure-monitor/app/media/profiler-settings/profiler-button-inline.png b/articles/azure-monitor/profiler/media/profiler-settings/profiler-button-inline.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/profiler-button-inline.png rename to articles/azure-monitor/profiler/media/profiler-settings/profiler-button-inline.png diff --git a/articles/azure-monitor/app/media/profiler-settings/profiler-button.png b/articles/azure-monitor/profiler/media/profiler-settings/profiler-button.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/profiler-button.png rename to articles/azure-monitor/profiler/media/profiler-settings/profiler-button.png diff --git a/articles/azure-monitor/app/media/profiler-settings/Profiler-on-demand.png b/articles/azure-monitor/profiler/media/profiler-settings/profiler-on-demand.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/Profiler-on-demand.png rename to articles/azure-monitor/profiler/media/profiler-settings/profiler-on-demand.png diff --git a/articles/azure-monitor/app/media/profiler-settings/profiler-timeout.png b/articles/azure-monitor/profiler/media/profiler-settings/profiler-time-out.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/profiler-timeout.png rename to articles/azure-monitor/profiler/media/profiler-settings/profiler-time-out.png diff --git a/articles/azure-monitor/app/media/profiler-settings/sampling-trigger-settings.png b/articles/azure-monitor/profiler/media/profiler-settings/sampling-trigger-settings.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/sampling-trigger-settings.png rename to articles/azure-monitor/profiler/media/profiler-settings/sampling-trigger-settings.png diff --git a/articles/azure-monitor/app/media/profiler-settings/CPUTrigger.PNG b/articles/azure-monitor/profiler/media/profiler-settings/trigger-central-p-u.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/CPUTrigger.PNG rename to articles/azure-monitor/profiler/media/profiler-settings/trigger-central-p-u.png diff --git a/articles/azure-monitor/app/media/profiler-settings/update-site-extension-01.png b/articles/azure-monitor/profiler/media/profiler-settings/update-site-extension-01.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-settings/update-site-extension-01.png rename to articles/azure-monitor/profiler/media/profiler-settings/update-site-extension-01.png diff --git a/articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-search-telemetry.png b/articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-search-telemetry.png new file mode 100644 index 0000000000000..d4d79992d653a Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-search-telemetry.png differ diff --git a/articles/azure-monitor/app/media/profiler-troubleshooting/profiler-webjob-log.png b/articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-web-job-log.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-troubleshooting/profiler-webjob-log.png rename to articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-web-job-log.png diff --git a/articles/azure-monitor/app/media/profiler-troubleshooting/profiler-webjob.png b/articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-web-job.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-troubleshooting/profiler-webjob.png rename to articles/azure-monitor/profiler/media/profiler-troubleshooting/profiler-web-job.png diff --git a/articles/azure-monitor/app/media/profiler-vm/azure-resource-explorer.png b/articles/azure-monitor/profiler/media/profiler-vm/azure-resource-explorer.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-vm/azure-resource-explorer.png rename to articles/azure-monitor/profiler/media/profiler-vm/azure-resource-explorer.png diff --git a/articles/azure-monitor/app/media/profiler-vm/resource-explorer-put.png b/articles/azure-monitor/profiler/media/profiler-vm/resource-explorer-put.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-vm/resource-explorer-put.png rename to articles/azure-monitor/profiler/media/profiler-vm/resource-explorer-put.png diff --git a/articles/azure-monitor/app/media/profiler-vm/resource-explorer-sinks-config.png b/articles/azure-monitor/profiler/media/profiler-vm/resource-explorer-sinks-config.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-vm/resource-explorer-sinks-config.png rename to articles/azure-monitor/profiler/media/profiler-vm/resource-explorer-sinks-config.png diff --git a/articles/azure-monitor/app/media/profiler-vm/wad-extension.png b/articles/azure-monitor/profiler/media/profiler-vm/wad-extension.png similarity index 100% rename from articles/azure-monitor/app/media/profiler-vm/wad-extension.png rename to articles/azure-monitor/profiler/media/profiler-vm/wad-extension.png diff --git a/articles/azure-monitor/profiler/media/profiler/always-on.png b/articles/azure-monitor/profiler/media/profiler/always-on.png new file mode 100644 index 0000000000000..b2e2ea9d6e714 Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/always-on.png differ diff --git a/articles/azure-monitor/profiler/media/profiler/app-insights-menu.png b/articles/azure-monitor/profiler/media/profiler/app-insights-menu.png new file mode 100644 index 0000000000000..7c925fed8707f Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/app-insights-menu.png differ diff --git a/articles/azure-monitor/profiler/media/profiler/configuration-menu.png b/articles/azure-monitor/profiler/media/profiler/configuration-menu.png new file mode 100644 index 0000000000000..cb6366adad712 Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/configuration-menu.png differ diff --git a/articles/azure-monitor/profiler/media/profiler/enable-app-insights.png b/articles/azure-monitor/profiler/media/profiler/enable-app-insights.png new file mode 100644 index 0000000000000..8099b8c2e457f Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/enable-app-insights.png differ diff --git a/articles/azure-monitor/profiler/media/profiler/enable-profiler.png b/articles/azure-monitor/profiler/media/profiler/enable-profiler.png new file mode 100644 index 0000000000000..4ba20e28db06d Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/enable-profiler.png differ diff --git a/articles/azure-monitor/profiler/media/profiler/stop-web-job.png b/articles/azure-monitor/profiler/media/profiler/stop-web-job.png new file mode 100644 index 0000000000000..1c81bdbdb42cf Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/stop-web-job.png differ diff --git a/articles/azure-monitor/profiler/media/profiler/web-jobs-menu.png b/articles/azure-monitor/profiler/media/profiler/web-jobs-menu.png new file mode 100644 index 0000000000000..d43a7b72a058f Binary files /dev/null and b/articles/azure-monitor/profiler/media/profiler/web-jobs-menu.png differ diff --git a/articles/azure-monitor/app/profiler-aspnetcore-linux.md b/articles/azure-monitor/profiler/profiler-aspnetcore-linux.md similarity index 92% rename from articles/azure-monitor/app/profiler-aspnetcore-linux.md rename to articles/azure-monitor/profiler/profiler-aspnetcore-linux.md index e1407dd5d734a..2c7b24ca19b53 100644 --- a/articles/azure-monitor/app/profiler-aspnetcore-linux.md +++ b/articles/azure-monitor/profiler/profiler-aspnetcore-linux.md @@ -5,13 +5,14 @@ ms.topic: conceptual ms.devlang: csharp ms.custom: devx-track-csharp ms.date: 02/23/2018 +ms.reviewer: jogrima --- # Profile ASP.NET Core Azure Linux web apps with Application Insights Profiler This feature is currently in preview. -Find out how much time is spent in each method of your live web application when using [Application Insights](./app-insights-overview.md). Application Insights Profiler is now available for ASP.NET Core web apps that are hosted in Linux on Azure App Service. This guide provides step-by-step instructions on how the Profiler traces can be collected for ASP.NET Core Linux web apps. +Find out how much time is spent in each method of your live web application when using [Application Insights](../app/app-insights-overview.md). Application Insights Profiler is now available for ASP.NET Core web apps that are hosted in Linux on Azure App Service. This guide provides step-by-step instructions on how the Profiler traces can be collected for ASP.NET Core Linux web apps. After you complete this walkthrough, your app can collect Profiler traces like the traces that are shown in the image. In this example, the Profiler trace indicates that a particular web request is slow because of time spent waiting. The *hot path* in the code that's slowing the app is marked by a flame icon. The **About** method in the **HomeController** section is slowing the web app because the method is calling the **Thread.Sleep** function. @@ -79,7 +80,7 @@ The following instructions apply to all Windows, Linux, and Mac development envi 1. Create the web app environment by using App Service on Linux: - :::image type="content" source="./media/profiler-aspnetcore-linux/create-linux-appservice.png" alt-text="Create the Linux web app"::: + :::image type="content" source="./media/profiler-aspnetcore-linux/create-linux-app-service.png" alt-text="Create the Linux web app"::: 2. Create the deployment credentials: @@ -135,7 +136,7 @@ For more deployment options, see [App Service documentation](../../app-service/i ## Add Application Insights to monitor your web apps -1. [Create an Application Insights resource](./create-new-resource.md). +1. [Create an Application Insights resource](../app/create-new-resource.md). 2. Copy the **iKey** value of the Application Insights resource and set the following settings in your web apps: diff --git a/articles/azure-monitor/app/profiler-azure-functions.md b/articles/azure-monitor/profiler/profiler-azure-functions.md similarity index 87% rename from articles/azure-monitor/app/profiler-azure-functions.md rename to articles/azure-monitor/profiler/profiler-azure-functions.md index 69461de31464e..c309d4315e777 100644 --- a/articles/azure-monitor/app/profiler-azure-functions.md +++ b/articles/azure-monitor/profiler/profiler-azure-functions.md @@ -1,12 +1,10 @@ --- title: Profile Azure Functions app with Application Insights Profiler description: Enable Application Insights Profiler for Azure Functions app. -ms.author: hannahhunter -author: hhunter-ms -ms.reviewer: brwoldey ms.contributor: charles.weininger ms.topic: conceptual ms.date: 05/03/2022 +ms.reviewer: jogrima --- # Profile live Azure Functions app with Application Insights @@ -26,7 +24,7 @@ In this article, you'll use the Azure portal to: :::image type="content" source="./media/profiler-azure-functions/choose-plan.png" alt-text="Screenshot of where to select App Service plan from drop-down in Functions app creation."::: -- Linked to [an Application Insights resource](./create-new-resource.md). Make note of the instrumentation key. +- Linked to [an Application Insights resource](../app/create-new-resource.md). Make note of the instrumentation key. ## App settings for enabling Profiler @@ -45,7 +43,7 @@ From your Functions app overview page in the Azure portal: 1. In the **Application settings** tab, verify the `APPINSIGHTS_INSTRUMENTATIONKEY` setting is included in the settings list. - :::image type="content" source="./media/profiler-azure-functions/appinsights-key.png" alt-text="Screenshot showing the App Insights Instrumentation Key setting in the list."::: + :::image type="content" source="./media/profiler-azure-functions/app-insights-key.png" alt-text="Screenshot showing the App Insights Instrumentation Key setting in the list."::: 1. Select **New application setting**. @@ -93,5 +91,5 @@ The app settings now show up in the table: ## Next Steps -- Set these values using [Azure Resource Manager Templates](./azure-web-apps-net-core.md#app-service-application-settings-with-azure-resource-manager), [Azure PowerShell](/powershell/module/az.websites/set-azwebapp), or the [Azure CLI](/cli/azure/webapp/config/appsettings). -- Learn more about [Profiler settings](profiler-settings.md). +- Set these values using [Azure Resource Manager Templates](../app/azure-web-apps-net-core.md#app-service-application-settings-with-azure-resource-manager), [Azure PowerShell](/powershell/module/az.websites/set-azwebapp), or the [Azure CLI](/cli/azure/webapp/config/appsettings). +- Learn more about [Profiler settings](profiler-settings.md). \ No newline at end of file diff --git a/articles/azure-monitor/app/profiler-bring-your-own-storage.md b/articles/azure-monitor/profiler/profiler-bring-your-own-storage.md similarity index 99% rename from articles/azure-monitor/app/profiler-bring-your-own-storage.md rename to articles/azure-monitor/profiler/profiler-bring-your-own-storage.md index 1724845fdb2e2..7da0eeff79d8b 100644 --- a/articles/azure-monitor/app/profiler-bring-your-own-storage.md +++ b/articles/azure-monitor/profiler/profiler-bring-your-own-storage.md @@ -3,6 +3,7 @@ title: Configure BYOS (Bring Your Own Storage) for Profiler & Snapshot Debugger description: Configure BYOS (Bring Your Own Storage) for Profiler & Snapshot Debugger ms.topic: conceptual ms.date: 01/14/2021 +ms.reviewer: jogrima --- # Configure Bring Your Own Storage (BYOS) for Application Insights Profiler and Snapshot Debugger @@ -274,7 +275,7 @@ _Figure 2.0_ For general Profiler troubleshooting, refer to the [Profiler Troubleshoot documentation](profiler-troubleshooting.md). -For general Snapshot Debugger troubleshooting, refer to the [Snapshot Debugger Troubleshoot documentation](snapshot-debugger-troubleshoot.md). +For general Snapshot Debugger troubleshooting, refer to the [Snapshot Debugger Troubleshoot documentation](../app/snapshot-debugger-troubleshoot.md). ## FAQs * If I have Profiler or Snapshot enabled, and then I enabled BYOS, will my data be migrated into my Storage Account? diff --git a/articles/azure-monitor/app/profiler-cloudservice.md b/articles/azure-monitor/profiler/profiler-cloudservice.md similarity index 99% rename from articles/azure-monitor/app/profiler-cloudservice.md rename to articles/azure-monitor/profiler/profiler-cloudservice.md index 82c4df72d6324..dded80d4f8c37 100644 --- a/articles/azure-monitor/app/profiler-cloudservice.md +++ b/articles/azure-monitor/profiler/profiler-cloudservice.md @@ -84,4 +84,4 @@ For more instructions on profiling sessions, see the [Profiler overview](./profi ## Next steps - Learn more about [configuring Profiler](./profiler-settings.md). -- [Troubleshoot Profiler issues](./profiler-troubleshooting.md). \ No newline at end of file +- [Troubleshoot Profiler issues](./profiler-troubleshooting.md). diff --git a/articles/azure-monitor/app/profiler-containers.md b/articles/azure-monitor/profiler/profiler-containers.md similarity index 86% rename from articles/azure-monitor/app/profiler-containers.md rename to articles/azure-monitor/profiler/profiler-containers.md index e066533c9acfd..b47c94a323c3f 100644 --- a/articles/azure-monitor/app/profiler-containers.md +++ b/articles/azure-monitor/profiler/profiler-containers.md @@ -1,11 +1,10 @@ --- title: Profile Azure Containers with Application Insights Profiler description: Enable Application Insights Profiler for Azure Containers. -ms.author: hannahhunter -author: hhunter-ms ms.contributor: charles.weininger ms.topic: conceptual -ms.date: 04/25/2022 +ms.date: 05/26/2022 +ms.reviewer: jogrima --- # Profile live Azure containers with Application Insights @@ -22,7 +21,7 @@ In this article, you'll learn the various ways you can: ## Pre-requisites -- [An Application Insights resource](./create-new-resource.md). Make note of the instrumentation key. +- [An Application Insights resource](../app/create-new-resource.md). Make note of the instrumentation key. - [Docker Desktop](https://www.docker.com/products/docker-desktop/) to build docker images. - [.NET 6 SDK](https://dotnet.microsoft.com/download/dotnet/6.0) installed. @@ -63,6 +62,17 @@ In this article, you'll learn the various ways you can: } ``` +1. Enable Application Insights and Profiler in `Startup.cs`: + + ```csharp + public void ConfigureServices(IServiceCollection services) + { + services.AddApplicationInsightsTelemetry(); // Add this line of code to enable Application Insights. + services.AddServiceProfiler(); // Add this line of code to Enable Profiler + services.AddControllersWithViews(); + } + ``` + ## Pull the latest ASP.NET Core build/runtime images 1. Navigate to the .NET Core 6.0 example directory. @@ -118,7 +128,7 @@ In this article, you'll learn the various ways you can: To hit the endpoint, either: -- Visit [http://localhost:8080/weatherforecast](http://localhost:8080/weatherforecast) in your browser, or +- Visit `http://localhost:8080/weatherforecast` in your browser, or - Use curl: ```terraform @@ -149,7 +159,7 @@ Service Profiler session finished. # A profiling session is complet 1. Open the **Performance** blade in your Application Insights resource. 1. Once the trace process is complete, you will see the Profiler Traces button like it below: - :::image type="content" source="./media/profiler-containerinstances/profiler_traces.png" alt-text="Profile traces in the performance blade"::: + :::image type="content" source="./media/profiler-containerinstances/profiler-traces.png" alt-text="Profile traces in the performance blade"::: @@ -164,4 +174,4 @@ docker rm -f testapp ## Next Steps - Learn more about [Application Insights Profiler](./profiler-overview.md). -- Learn how to enable Profiler in your [ASP.NET Core applications run on Linux](./profiler-aspnetcore-linux.md). \ No newline at end of file +- Learn how to enable Profiler in your [ASP.NET Core applications run on Linux](./profiler-aspnetcore-linux.md). diff --git a/articles/azure-monitor/app/profiler-overview.md b/articles/azure-monitor/profiler/profiler-overview.md similarity index 98% rename from articles/azure-monitor/app/profiler-overview.md rename to articles/azure-monitor/profiler/profiler-overview.md index fc5e8f500f4b9..216148e245602 100644 --- a/articles/azure-monitor/app/profiler-overview.md +++ b/articles/azure-monitor/profiler/profiler-overview.md @@ -1,12 +1,10 @@ --- title: Profile production apps in Azure with Application Insights Profiler description: Identify the hot path in your web server code with a low-footprint profiler -ms.author: hannahhunter -author: hhunter-ms ms.contributor: charles.weininger ms.topic: conceptual -ms.date: 05/11/2022 -ms.reviewer: mbullwin +ms.date: 05/26/2022 +ms.reviewer: jogrima --- # Profile production applications in Azure with Application Insights @@ -165,7 +163,7 @@ For these metrics, you can get a value of greater than 100% by consuming multipl ## Limitations -The default data retention period is five days. The maximum data ingested per day is 10 GB. +The default data retention period is five days. There are no charges for using the Profiler service. To use it, your web app must be hosted in the basic tier of the Web Apps feature of Azure App Service, at minimum. diff --git a/articles/azure-monitor/app/profiler-servicefabric.md b/articles/azure-monitor/profiler/profiler-servicefabric.md similarity index 93% rename from articles/azure-monitor/app/profiler-servicefabric.md rename to articles/azure-monitor/profiler/profiler-servicefabric.md index fae18d47b6f2c..8fc124148dcf2 100644 --- a/articles/azure-monitor/app/profiler-servicefabric.md +++ b/articles/azure-monitor/profiler/profiler-servicefabric.md @@ -4,6 +4,7 @@ description: Enable Profiler for a Service Fabric application ms.topic: conceptual ms.custom: devx-track-dotnet ms.date: 08/06/2018 +ms.reviewer: jogrima --- # Profile live Azure Service Fabric applications with Application Insights @@ -42,14 +43,14 @@ To set up your environment, take the following actions: If your settings are correct, Application Insights Profiler will be installed and enabled when the Azure Diagnostics extension is installed. 1. Add Application Insights to your Service Fabric application. - For Profiler to collect profiles for your requests, your application must be tracking operations with Application Insights. For stateless APIs, you can refer to instructions for [tracking Requests for profiling](profiler-trackrequests.md?toc=/azure/azure-monitor/toc.json). For more information about tracking custom operations in other kinds of apps, see [track custom operations with Application Insights .NET SDK](custom-operations-tracking.md?toc=/azure/azure-monitor/toc.json). + For Profiler to collect profiles for your requests, your application must be tracking operations with Application Insights. For stateless APIs, you can refer to instructions for [tracking Requests for profiling](profiler-trackrequests.md?toc=/azure/azure-monitor/toc.json). For more information about tracking custom operations in other kinds of apps, see [track custom operations with Application Insights .NET SDK](../app/custom-operations-tracking.md). 1. Redeploy your application. ## Next steps -* Generate traffic to your application (for example, launch an [availability test](monitor-web-app-availability.md)). Then, wait 10 to 15 minutes for traces to start to be sent to the Application Insights instance. +* Generate traffic to your application (for example, launch an [availability test](../app/monitor-web-app-availability.md)). Then, wait 10 to 15 minutes for traces to start to be sent to the Application Insights instance. * See [Profiler traces](profiler-overview.md?toc=/azure/azure-monitor/toc.json) in the Azure portal. * For help with troubleshooting Profiler issues, see [Profiler troubleshooting](profiler-troubleshooting.md?toc=/azure/azure-monitor/toc.json). diff --git a/articles/azure-monitor/app/profiler-settings.md b/articles/azure-monitor/profiler/profiler-settings.md similarity index 90% rename from articles/azure-monitor/app/profiler-settings.md rename to articles/azure-monitor/profiler/profiler-settings.md index 4755771127fbc..826788cea9221 100644 --- a/articles/azure-monitor/app/profiler-settings.md +++ b/articles/azure-monitor/profiler/profiler-settings.md @@ -1,12 +1,10 @@ --- -title: Use the Azure Application Insights Profiler settings pane | Microsoft Docs -description: See Profiler status and start profiling sessions -ms.author: hannahhunter -author: hhunter-ms +title: Configure Application Insights Profiler | Microsoft Docs +description: Use the Azure Application Insights Profiler settings pane to see Profiler status and start profiling sessions ms.contributor: Charles.Weininger ms.topic: conceptual ms.date: 04/26/2022 -ms.reviewer: mbullwin +ms.reviewer: jogrima --- # Configure Application Insights Profiler @@ -48,7 +46,7 @@ Recent profiling sessions | Displays information about past profiling sessions, ## Profile Now Select **Profile Now** to start a profiling session on demand. When you click this link, all profiler agents that are sending data to this Application Insights instance will start to capture a profile. After 5 to 10 minutes, the profile session will show in the list below. -To manually trigger a profiler session, you'll need, at minimum, *write* access on your role for the Application Insights component. In most cases, you get write access automatically. If you're having issues, you'll need the "Application Insights Component Contributor" subscription scope role added. [See more about role access control with Azure Monitoring](./resources-roles-access-control.md). +To manually trigger a profiler session, you'll need, at minimum, *write* access on your role for the Application Insights component. In most cases, you get write access automatically. If you're having issues, you'll need the "Application Insights Component Contributor" subscription scope role added. [See more about role access control with Azure Monitoring](../app/resources-roles-access-control.md). ## Trigger Settings @@ -97,17 +95,17 @@ Memory % | Percentage of memory that was being used while the profiler was runni ## Next steps [Enable Profiler and view traces](profiler-overview.md?toc=/azure/azure-monitor/toc.json) -[profiler-on-demand]: ./media/profiler-settings/Profiler-on-demand.png +[profiler-on-demand]: ./media/profiler-settings/profiler-on-demand.png [performance-blade]: ./media/profiler-settings/performance-blade.png [configure-profiler-page]: ./media/profiler-settings/configureBlade.png -[trigger-settings-flyout]: ./media/profiler-settings/CPUTrigger.png +[trigger-settings-flyout]: ./media/profiler-settings/trigger-central-p-u.png [create-performance-test]: ./media/profiler-settings/new-performance-test.png [configure-performance-test]: ./media/profiler-settings/configure-performance-test.png [load-test-queued]: ./media/profiler-settings/load-test-queued.png -[load-test-in-progress]: ./media/profiler-settings/load-test-inprogress.png +[load-test-in-progress]: ./media/profiler-settings/load-test-in-progress.png [enable-app-insights]: ./media/profiler-settings/enable-app-insights-blade-01.png [update-site-extension]: ./media/profiler-settings/update-site-extension-01.png -[change-and-save-appinsights]: ./media/profiler-settings/change-and-save-appinsights-01.png -[app-settings-for-profiler]: ./media/profiler-settings/appsettings-for-profiler-01.png +[change-and-save-appinsights]: ./media/profiler-settings/change-and-save-app-insights-01.png +[app-settings-for-profiler]: ./media/profiler-settings/app-settings-for-profiler-01.png [check-for-extension-update]: ./media/profiler-settings/check-extension-update-01.png -[profiler-timeout]: ./media/profiler-settings/profiler-timeout.png +[profiler-timeout]: ./media/profiler-settings/profiler-time-out.png diff --git a/articles/azure-monitor/app/profiler-trackrequests.md b/articles/azure-monitor/profiler/profiler-trackrequests.md similarity index 99% rename from articles/azure-monitor/app/profiler-trackrequests.md rename to articles/azure-monitor/profiler/profiler-trackrequests.md index 2e10a4bc79b41..501c775f63b39 100644 --- a/articles/azure-monitor/app/profiler-trackrequests.md +++ b/articles/azure-monitor/profiler/profiler-trackrequests.md @@ -4,6 +4,7 @@ description: Write code to track requests with Application Insights so you can g ms.topic: conceptual ms.custom: devx-track-csharp ms.date: 08/06/2018 +ms.reviewer: jogrima --- # Write code to track requests with Application Insights diff --git a/articles/azure-monitor/app/profiler-troubleshooting.md b/articles/azure-monitor/profiler/profiler-troubleshooting.md similarity index 97% rename from articles/azure-monitor/app/profiler-troubleshooting.md rename to articles/azure-monitor/profiler/profiler-troubleshooting.md index 03ee05f03e3c2..bb954b4463147 100644 --- a/articles/azure-monitor/app/profiler-troubleshooting.md +++ b/articles/azure-monitor/profiler/profiler-troubleshooting.md @@ -3,6 +3,7 @@ title: Troubleshoot problems with Azure Application Insights Profiler description: This article presents troubleshooting steps and information to help developers enable and use Application Insights Profiler. ms.topic: conceptual ms.date: 08/06/2018 +ms.reviewer: jogrima --- # Troubleshoot problems enabling or viewing Application Insights Profiler @@ -116,7 +117,7 @@ You can use the Kudu management site for App Service to get the base url of this It will end like this: `https:///DiagnosticServices` It will display a Status Page similar like the below: -![Diagnostic Services Status Page](./media/diagnostic-services-site-extension/status-page.png) +![Diagnostic Services Status Page](../app/media/diagnostic-services-site-extension/status-page.png) ### Manual installation @@ -223,5 +224,5 @@ The IPs used by Application Insights Profiler are included in the Azure Monitor [profiler-search-telemetry]:./media/profiler-troubleshooting/Profiler-Search-Telemetry.png -[profiler-webjob]:./media/profiler-troubleshooting/Profiler-webjob.png -[profiler-webjob-log]:./media/profiler-troubleshooting/Profiler-webjob-log.png \ No newline at end of file +[profiler-webjob]:./media/profiler-troubleshooting/profiler-web-job.png +[profiler-webjob-log]:./media/profiler-troubleshooting/profiler-web-job-log.png \ No newline at end of file diff --git a/articles/azure-monitor/app/profiler-vm.md b/articles/azure-monitor/profiler/profiler-vm.md similarity index 97% rename from articles/azure-monitor/app/profiler-vm.md rename to articles/azure-monitor/profiler/profiler-vm.md index 107c6eaa19290..c4e646b4808ee 100644 --- a/articles/azure-monitor/app/profiler-vm.md +++ b/articles/azure-monitor/profiler/profiler-vm.md @@ -3,6 +3,7 @@ title: Profile web apps on an Azure VM - Application Insights Profiler description: Profile web apps on an Azure VM by using Application Insights Profiler. ms.topic: conceptual ms.date: 11/08/2019 +ms.reviewer: jogrima --- # Profile web apps running on an Azure virtual machine or a virtual machine scale set by using Application Insights Profiler @@ -17,7 +18,7 @@ You can also deploy Azure Application Insights Profiler on these services: ## Deploy Profiler on a virtual machine or a virtual machine scale set This article shows you how to get Application Insights Profiler running on your Azure virtual machine (VM) or Azure virtual machine scale set. Profiler is installed with the Azure Diagnostics extension for VMs. Configure the extension to run Profiler, and build the Application Insights SDK into your application. -1. Add the Application Insights SDK to your [ASP.NET application](./asp-net.md). +1. Add the Application Insights SDK to your [ASP.NET application](../app/asp-net.md). To view profiles for your requests, you must send request telemetry to Application Insights. @@ -103,7 +104,7 @@ We have no plan to support Application Insights Profiler for on-premises servers ## Next steps -- Generate traffic to your application (for example, launch an [availability test](monitor-web-app-availability.md)). Then, wait 10 to 15 minutes for traces to start to be sent to the Application Insights instance. +- Generate traffic to your application (for example, launch an [availability test](../app/monitor-web-app-availability.md)). Then, wait 10 to 15 minutes for traces to start to be sent to the Application Insights instance. - See [Profiler traces](profiler-overview.md?toc=/azure/azure-monitor/toc.json) in the Azure portal. - For help with troubleshooting Profiler issues, see [Profiler troubleshooting](profiler-troubleshooting.md?toc=/azure/azure-monitor/toc.json). diff --git a/articles/azure-monitor/profiler/profiler.md b/articles/azure-monitor/profiler/profiler.md new file mode 100644 index 0000000000000..126f7d0163fb7 --- /dev/null +++ b/articles/azure-monitor/profiler/profiler.md @@ -0,0 +1,134 @@ +--- +title: Enable Profiler for Azure App Service apps | Microsoft Docs +description: Profile live apps on Azure App Service with Application Insights Profiler. +ms.topic: conceptual +ms.date: 05/11/2022 +ms.reviewer: jogrima +--- + +# Enable Profiler for Azure App Service apps + +Application Insights Profiler is pre-installed as part of the App Services runtime. You can run Profiler on ASP.NET and ASP.NET Core apps running on Azure App Service using Basic service tier or higher. Follow these steps even if you've included the App Insights SDK in your application at build time. + +To enable Profiler on Linux, walk through the [ASP.NET Core Azure Linux web apps instructions](profiler-aspnetcore-linux.md). + +> [!NOTE] +> Codeless installation of Application Insights Profiler follows the .NET Core support policy. +> For more information about supported runtime, see [.NET Core Support Policy](https://dotnet.microsoft.com/platform/support/policy/dotnet-core). + + +## Pre-requisites + +- An [Azure App Services ASP.NET/ASP.NET Core app](/app-service/quickstart-dotnetcore.md). +- [Application Insights resource](../app/create-new-resource.md) connected to your App Service app. + +## Verify "Always On" setting is enabled + +1. In the Azure portal, navigate to your App Service. +1. Under **Settings** in the left side menu, select **Configuration**. + + :::image type="content" source="./media/profiler/configuration-menu.png" alt-text="Screenshot of selecting Configuration from the left side menu."::: + +1. Select the **General settings** tab. +1. Verify **Always On** > **On** is selected. + + :::image type="content" source="./media/profiler/always-on.png" alt-text="Screenshot of the General tab on the Configuration pane and showing the Always On being enabled."::: + +1. Select **Save** if you've made changes. + +## Enable Application Insights and Profiler + +1. Under **Settings** in the left side menu, select **Application Insights**. + + :::image type="content" source="./media/profiler/app-insights-menu.png" alt-text="Screenshot of selecting Application Insights from the left side menu."::: + +1. Under **Application Insights**, select **Enable**. +1. Verify you've connected an Application Insights resource to your app. + + :::image type="content" source="./media/profiler/enable-app-insights.png" alt-text="Screenshot of enabling App Insights on your app."::: + +1. Scroll down and select the **.NET** or **.NET Core** tab, depending on your app. +1. Verify **Collection Level** > **Recommended** is selected. +1. Under **Profiler**, select **On**. + - If you chose the **Basic** collection level earlier, the Profiler setting is disabled. +1. Select **Apply**, then **Yes** to confirm. + + :::image type="content" source="./media/profiler/enable-profiler.png" alt-text="Screenshot of enabling Profiler on your app."::: + +## Enable Profiler using app settings + +If your Application Insights resource is in a different subscription from your App Service, you'll need to enable Profiler manually by creating app settings for your Azure App Service. You can automate the creation of these settings using a template or other means. The settings needed to enable the profiler: + +|App Setting | Value | +|---------------|----------| +|APPINSIGHTS_INSTRUMENTATIONKEY | iKey for your Application Insights resource | +|APPINSIGHTS_PROFILERFEATURE_VERSION | 1.0.0 | +|DiagnosticServices_EXTENSION_VERSION | ~3 | + +Set these values using: +- [Azure Resource Manager Templates](../app/azure-web-apps-net-core.md#app-service-application-settings-with-azure-resource-manager) +- [Azure PowerShell](/powershell/module/az.websites/set-azwebapp) +- [Azure CLI](/cli/azure/webapp/config/appsettings) + +## Enable Profiler for other clouds + +Currently the only regions that require endpoint modifications are [Azure Government](../../azure-government/compare-azure-government-global-azure.md#application-insights) and [Azure China](/azure/china/resources-developer-guide). + +|App Setting | US Government Cloud | China Cloud | +|---------------|---------------------|-------------| +|ApplicationInsightsProfilerEndpoint | `https://profiler.monitor.azure.us` | `https://profiler.monitor.azure.cn` | +|ApplicationInsightsEndpoint | `https://dc.applicationinsights.us` | `https://dc.applicationinsights.azure.cn` | + +## Enable Azure Active Directory authentication for profile ingestion + +Application Insights Profiler supports Azure AD authentication for profiles ingestion. For all profiles of your application to be ingested, your application must be authenticated and provide the required application settings to the Profiler agent. + +Profiler only supports Azure AD authentication when you reference and configure Azure AD using the Application Insights SDK in your application. + +To enable Azure AD for profiles ingestion: + +1. Create and add the managed identity to authenticate against your Application Insights resource to your App Service. + + a. [System-Assigned Managed identity documentation](../../app-service/overview-managed-identity.md?tabs=portal%2chttp#add-a-system-assigned-identity) + + b. [User-Assigned Managed identity documentation](../../app-service/overview-managed-identity.md?tabs=portal%2chttp#add-a-user-assigned-identity) + +1. [Configure and enable Azure AD](../app/azure-ad-authentication.md?tabs=net#configuring-and-enabling-azure-ad-based-authentication) in your Application Insights resource. + +1. Add the following application setting to let the Profiler agent know which managed identity to use: + + For System-Assigned Identity: + + |App Setting | Value | + |---------------|----------| + |APPLICATIONINSIGHTS_AUTHENTICATION_STRING | Authorization=AAD | + + For User-Assigned Identity: + + |App Setting | Value | + |---------------|----------| + |APPLICATIONINSIGHTS_AUTHENTICATION_STRING | Authorization=AAD;ClientId={Client id of the User-Assigned Identity} | + +## Disable Profiler + +To stop or restart Profiler for an individual app's instance: + +1. Under **Settings** in the left side menu, select **WebJobs**. + + :::image type="content" source="./media/profiler/web-jobs-menu.png" alt-text="Screenshot of selecting web jobs from the left side menu."::: + +1. Select the webjob named `ApplicationInsightsProfiler3`. + +1. Click **Stop** from the top menu. + + :::image type="content" source="./media/profiler/stop-web-job.png" alt-text="Screenshot of selecting stop for stopping the webjob."::: + +1. Select **Yes** to confirm. + +We recommend that you have Profiler enabled on all your apps to discover any performance issues as early as possible. + +Profiler's files can be deleted when using WebDeploy to deploy changes to your web application. You can prevent the deletion by excluding the App_Data folder from being deleted during deployment. + +## Next steps + +* [Working with Application Insights in Visual Studio](../app/visual-studio.md) diff --git a/articles/azure-monitor/service-limits.md b/articles/azure-monitor/service-limits.md index b004ce0c2f28e..4149a8cd180b3 100644 --- a/articles/azure-monitor/service-limits.md +++ b/articles/azure-monitor/service-limits.md @@ -47,9 +47,9 @@ This article lists limits in different areas of Azure Monitor. ## Application Insights -[!INCLUDE [monitoring-limits](../../includes/azure-monitor-limits-app-insights.md)] +[!INCLUDE [monitoring-limits](../../includes/application-insights-limits.md)] ## Next Steps - [Azure Monitor pricing](https://azure.microsoft.com/pricing/details/monitor/) -- [Monitoring usage and estimated costs in Azure Monitor](./usage-estimated-costs.md) +- [Monitoring usage and estimated costs in Azure Monitor](./usage-estimated-costs.md) \ No newline at end of file diff --git a/articles/azure-monitor/terminology.md b/articles/azure-monitor/terminology.md index 1a59f56e76219..cc00cf96901db 100644 --- a/articles/azure-monitor/terminology.md +++ b/articles/azure-monitor/terminology.md @@ -4,7 +4,7 @@ description: Describes recent terminology changes made to Azure monitoring servi ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 10/08/2019 +ms.date: 06/07/2022 --- diff --git a/articles/azure-monitor/toc.yml b/articles/azure-monitor/toc.yml index d9bf0d785cfce..0b2e44249e8d7 100644 --- a/articles/azure-monitor/toc.yml +++ b/articles/azure-monitor/toc.yml @@ -91,8 +91,6 @@ items: href: app/resource-manager-web-app.md - name: Azure Functions href: app/resource-manager-function-app.md - - name: Manage with Azure CLI - href: insights/azure-cli-application-insights-component.md - name: Container insights href: containers/resource-manager-container-insights.md - name: Data collection rules @@ -117,12 +115,8 @@ items: href: agents/data-collection-rule-sample-agent.md - name: Custom logs href: logs/data-collection-rule-sample-custom-logs.md - - name: Azure CLI - href: cli-samples.md - name: Monitor Logs in Azure CLI href: logs\azure-cli-log-analytics-workspace-sample.md - - name: Azure PowerShell - href: powershell-samples.md - name: Azure Resource Graph queries href: ./resource-graph-samples.md - name: Azure CLI metric alerts @@ -142,21 +136,6 @@ items: - name: Logs displayName: Azure Monitor Logs, Logs, Log Analytics, log query, log queries, query, queries href: logs/data-platform-logs.md - - name: Workspaces - items: - - name: Overview - href: logs/log-analytics-workspace-overview.md - - name: Design a workspace deployment - href: logs/design-logs-deployment.md - - name: Design for service providers - href: logs/service-providers.md - - name: Dedicated clusters - href: logs/logs-dedicated-clusters.md - - name: Availability zones - href: logs/availability-zones.md - - name: Log data ingestion time - displayName: latency - href: logs/data-ingestion-time.md - name: Usage and cost items: - name: View and estimate charges @@ -173,8 +152,6 @@ items: href: roles-permissions-security.md - name: Log data href: logs/data-security.md - - name: Azure AD Authentication for Logs - href: logs/azure-ad-authentication-logs.md - name: Customer-managed keys href: logs/customer-managed-keys.md - name: Private Link networking @@ -195,12 +172,8 @@ items: items: - name: Overview href: alerts/alerts-overview.md - - name: Metric alerts - href: alerts/alerts-metric-overview.md - - name: Log alerts - href: alerts/alerts-unified-log.md - - name: Activity log alerts - href: alerts/activity-log-alerts.md + - name: Alert Types + href: alerts/alerts-types.md - name: Partner integrations href: partners.md - name: Security @@ -298,6 +271,8 @@ items: href: logs/daily-cap.md - name: Configure Basic Logs href: logs/basic-logs-configure.md + - name: Use Azure AD authentication + href: logs/azure-ad-authentication-logs.md - name: Archive and restore items: - name: Set retention and archive policy @@ -306,12 +281,23 @@ items: href: logs/search-jobs.md - name: Restore logs href: logs/restore.md + - name: Design + items: + - name: Design a workspace architecture + href: logs/workspace-design.md + - name: Dedicated clusters + href: logs/logs-dedicated-clusters.md + - name: Availability zones + href: logs/availability-zones.md - name: Monitor items: - name: Monitor a workspace href: logs/monitor-workspace.md - name: Analyze usage and cost href: logs/analyze-usage.md + - name: Log data ingestion time + displayName: latency + href: logs/data-ingestion-time.md - name: Move and delete items: - name: Move a workspace @@ -730,8 +716,8 @@ items: href: alerts/action-groups-logic-app.md - name: Alert user interface items: - - name: Manage alert instances - href: alerts/alerts-managing-alert-instances.md + - name: View and manage alert instances + href: alerts/alerts-page.md - name: Troubleshoot alerts href: alerts/alerts-troubleshoot.md - name: Alerts and region moves @@ -914,6 +900,8 @@ items: href: app/api-custom-events-metrics.md - name: Remove Application Insights href: app/remove-application-insights.md + - name: SDK support policy + href: app/sdk-support-guidance.md - name: OpenTelemetry-based instrumentation items: - name: Overview @@ -1014,33 +1002,6 @@ items: href: app/standard-metrics.md - name: Transaction search href: app/diagnostic-search.md - - name: Profiler - items: - - name: Overview - displayName: profiling, debugging, code performance, perf - href: app/profiler-overview.md - - name: Enable Profiler for an App Service - href: app/profiler.md - - name: Enable Profiler for an Azure Functions app - href: app/profiler-azure-functions.md - - name: Enable Profiler for a Cloud Service - href: app/profiler-cloudservice.md - - name: Enable Profiler for a Service Fabric Application - href: app/profiler-servicefabric.md - - name: Enable Profiler for an Azure VM - href: app/profiler-vm.md - - name: Enable Profiler for Linux App services (preview) - href: app/profiler-aspnetcore-linux.md - - name: Enable Profiler for a Container - href: app/profiler-containers.md - - name: Profiler Settings - href: app/profiler-settings.md - - name: Track Requests for Profiling - href: app/profiler-trackrequests.md - - name: Configure BYOS (Bring Your Own Storage) - href: app/profiler-bring-your-own-storage.md - - name: Profiler Troubleshooting - href: app/profiler-troubleshooting.md - name: Snapshot Debugger items: - name: Overview @@ -1055,7 +1016,7 @@ items: - name: Upgrade Snapshot Debugger href: app/snapshot-debugger-upgrade.md - name: Configure BYOS (Bring Your Own Storage) - href: app/profiler-bring-your-own-storage.md + href: profiler/profiler-bring-your-own-storage.md - name: Snapshot Debugger troubleshooting displayName: troubleshoot, troubleshooting href: app/snapshot-debugger-troubleshoot.md @@ -1182,6 +1143,37 @@ items: href: app/sharepoint.md - name: Windows desktop href: app/windows-desktop.md + - name: Profiler + items: + - name: Overview + displayName: profiling, debugging, code performance, perf + href: profiler/profiler-overview.md + - name: Enable Profiler + items: + - name: App Service + href: profiler/profiler.md + - name: Azure Functions app + href: profiler/profiler-azure-functions.md + - name: Cloud Service + href: profiler/profiler-cloudservice.md + - name: Service Fabric Application + href: profiler/profiler-servicefabric.md + - name: Azure VM + href: profiler/profiler-vm.md + - name: Linux App services (preview) + href: profiler/profiler-aspnetcore-linux.md + - name: ASP.NET Core Container + href: profiler/profiler-containers.md + - name: Configure + items: + - name: Configure Profiler + href: profiler/profiler-settings.md + - name: Configure BYOS (Bring Your Own Storage) + href: profiler/profiler-bring-your-own-storage.md + - name: Track Requests + href: profiler/profiler-trackrequests.md + - name: Troubleshooting + href: profiler/profiler-troubleshooting.md - name: Change Analysis items: - name: Overview @@ -1200,8 +1192,6 @@ items: items: - name: Overview href: vm/vminsights-overview.md - - name: General Availability FAQ - href: vm/vminsights-ga-release-faq.yml - name: FAQ href: /azure/azure-monitor/faq#vm-insights - name: Configure workspace @@ -1243,7 +1233,7 @@ items: - name: Troubleshoot href: vm/vminsights-health-troubleshoot.md - name: Analyze data with log queries - href: vm/vminsights-log-search.md + href: vm/vminsights-log-query.md - name: Monitor changes href: vm/vminsights-change-analysis.md - name: Visualize data with workbooks @@ -1258,38 +1248,38 @@ items: href: containers/container-insights-overview.md - name: FAQ href: /azure/azure-monitor/faq#container-insights - - name: Enable monitoring + - name: Enable + displayName: container insights items: - - name: Enable monitoring overview + - name: Overview + displayName: container insights href: containers/container-insights-onboard.md - - name: Enable for new AKS cluster + - name: New AKS cluster href: containers/container-insights-enable-new-cluster.md - - name: Enable for existing AKS cluster + - name: Existing AKS cluster href: containers/container-insights-enable-existing-clusters.md - - name: Enable for Azure Arc-enabled cluster + - name: Azure Arc-enabled cluster href: containers/container-insights-enable-arc-enabled-clusters.md - - name: Enable for hybrid cluster + - name: Hybrid cluster href: containers/container-insights-hybrid-setup.md - - name: Enable for Azure Red Hat OpenShift v3 - href: containers/container-insights-azure-redhat-setup.md - - name: Enable for Azure Red Hat OpenShift v4 - href: containers/container-insights-azure-redhat4-setup.md - name: Enable with Azure Policy href: containers/container-insights-enable-aks-policy.md - - name: Region mappings + - name: Supported region mappings href: containers/container-insights-region-mapping.md + - name: Migrate from Azure Red Hat OpenShift v4.x + href: containers/container-insights-transition-hybrid.md + - name: Configure + items: - name: Agent management href: containers/container-insights-manage-agent.md - - name: Configure Prometheus metric scraping - href: containers/container-insights-prometheus-integration.md - - name: Configure agent collection settings + - name: Agent collection settings href: containers/container-insights-agent-config.md - - name: Update to enable metrics + - name: Prometheus metric scraping + href: containers/container-insights-prometheus-integration.md + - name: Enable metrics href: containers/container-insights-update-metrics.md - name: Enable ContainerLogv2 (preview) href: containers/container-insights-logging-v2.md - - name: Migration from Azure Red Hat OpenShift v4.x to Azure Arc enabled cluster - href: containers/container-insights-transition-hybrid.md - name: Monitor items: - name: Monitor performance @@ -1343,7 +1333,7 @@ items: - name: Azure Cosmos DB href: insights/cosmosdb-insights-overview.md - name: Azure Data Explorer - href: insights/data-explorer.md + href: /azure/data-explorer/data-explorer-insights - name: Log Analytics workspace href: logs/log-analytics-workspace-insights-overview.md - name: Networks diff --git a/articles/azure-monitor/usage-estimated-costs.md b/articles/azure-monitor/usage-estimated-costs.md index 8354d5089616b..5f8483806ad13 100644 --- a/articles/azure-monitor/usage-estimated-costs.md +++ b/articles/azure-monitor/usage-estimated-costs.md @@ -20,8 +20,8 @@ Several other features don't have a direct cost, but you instead pay for the ing |:---|:---| | Logs | Ingestion, retention, and export of data in Log Analytics workspaces and legacy Application insights resources. This will typically be the bulk of Azure Monitor charges for most customers. There is no charge for querying this data except in the case of [Basic Logs](logs/basic-logs-configure.md) or [Archived Logs](logs/data-retention-archive.md).

                  Charges for Logs can vary significantly on the configuration that you choose. See [Azure Monitor Logs pricing details](logs/cost-logs.md) for details on how charges for Logs data are calculated and the different pricing tiers available. | | Platform Logs | Processing of [diagnostic and auditing information](essentials/resource-logs.md) is charged for [certain services](essentials/resource-logs-categories.md#costs) when sent to destinations other than a Log Analytics workspace. There's no direct charge when this data is sent to a Log Analytics workspace, but there is a charge for the workspace data ingestion and collection. | -| Metrics | There is no charge for [standard metrics](essentials/metrics-supported.md) collected from Azure resources. There is a cost for cost for collecting [custom metrics](essentials/metrics-custom-overview.md) and for retrieving metrics from the [REST API](essentials/rest-api-walkthrough.md#retrieve-metric-values). | -| Alerts | Charged based on the type and number of [signals](alerts/alerts-overview.md#what-you-can-alert-on) used by the alert rule, its frequency, and the type of [notification](alerts/action-groups.md) used in response. For [log alerts](alerts/alerts-unified-log.md) configured for [at scale monitoring](alerts/alerts-unified-log.md#split-by-alert-dimensions), the cost will also depend on the number of time series created by the dimensions resulting from your query. | +| Metrics | There is no charge for [standard metrics](essentials/metrics-supported.md) collected from Azure resources. There is a cost for collecting [custom metrics](essentials/metrics-custom-overview.md) and for retrieving metrics from the [REST API](essentials/rest-api-walkthrough.md#retrieve-metric-values). | +| Alerts | Charged based on the type and number of signals used by the alert rule, its frequency, and the type of [notification](alerts/action-groups.md) used in response. For [Log alerts](alerts/alerts-types.md#log-alerts) configured for [at scale monitoring](alerts/alerts-types.md#splitting-by-dimensions-in-log-alert-rules), the cost will also depend on the number of time series created by the dimensions resulting from your query. | | Web tests | There is a cost for [standard web tests](app/availability-standard-tests.md) and [multi-step web tests](app/availability-multistep.md) in Application Insights. Multi-step web tests have been deprecated. ## Data transfer charges @@ -136,7 +136,7 @@ To view data allocation benefits from sources such as [Microsoft Defender for Se Customers who purchased Microsoft Operations Management Suite E1 and E2 are eligible for per-node data ingestion entitlements for Log Analytics and Application Insights. Each Application Insights node includes up to 200 MB of data ingested per day (separate from Log Analytics data ingestion), with 90-day data retention at no extra cost. -To receive these entitlements for Log Analytics workspaces or Application Insights resources in a subscription, they must be use the Per-Node (OMS) pricing tier. This entitlement isn't visible in the estimated costs shown in the Usage and estimated cost pane. +To receive these entitlements for Log Analytics workspaces or Application Insights resources in a subscription, they must use the Per-Node (OMS) pricing tier. This entitlement isn't visible in the estimated costs shown in the Usage and estimated cost pane. Depending on the number of nodes of the suite that your organization purchased, moving some subscriptions into a Per GB (pay-as-you-go) pricing tier might be advantageous, but this requires careful consideration. diff --git a/articles/azure-monitor/visualize/view-designer.md b/articles/azure-monitor/visualize/view-designer.md index f36b783953fa2..89cd26dd51f0d 100644 --- a/articles/azure-monitor/visualize/view-designer.md +++ b/articles/azure-monitor/visualize/view-designer.md @@ -36,7 +36,7 @@ The views that you create with View Designer contain the elements that are descr | Visualization parts | Present a visualization of data in the Log Analytics workspace based on one or more [log queries](../logs/log-query-overview.md). Most parts include a header, which provides a high-level visualization, and a list, which displays the top results. Each part type provides a different visualization of the records in the Log Analytics workspace. You select elements in the part to perform a log query that provides detailed records. | ## Required permissions -You require at least [contributor level permissions](../logs/manage-access.md#manage-access-using-azure-permissions) in the Log Analytics workspace to create or modify views. If you don't have this permission, then the View Designer option won't be displayed in the menu. +You require at least [contributor level permissions](../logs/manage-access.md#azure-rbac) in the Log Analytics workspace to create or modify views. If you don't have this permission, then the View Designer option won't be displayed in the menu. ## Work with an existing view diff --git a/articles/azure-monitor/visualize/workbooks-chart-visualizations.md b/articles/azure-monitor/visualize/workbooks-chart-visualizations.md index 75f7e5df44abe..3132b90d8d36e 100644 --- a/articles/azure-monitor/visualize/workbooks-chart-visualizations.md +++ b/articles/azure-monitor/visualize/workbooks-chart-visualizations.md @@ -102,7 +102,7 @@ requests | summarize Request = count() by bin(timestamp, 1h), RequestName = name ``` -Even though the underlying result set is different. All a user has to do is set the visualization to area, line, bar, or time and Workbooks will take care of the rest. +Even though the queries return results in different formats, when a user sets the visualization to area, line, bar, or time, Workbooks understands how to handle the data to create the visualization. [![Screenshot of a log line chart made from a make-series query](./media/workbooks-chart-visualizations/log-chart-line-make-series.png)](./media/workbooks-chart-visualizations/log-chart-line-make-series.png#lightbox) @@ -221,4 +221,4 @@ The series setting tab lets you adjust the labels and colors shown for series in ## Next steps - Learn how to create a [tile in workbooks](workbooks-tile-visualizations.md). -- Learn how to create [interactive workbooks](workbooks-interactive.md). \ No newline at end of file +- Learn how to create [interactive workbooks](workbooks-interactive.md). diff --git a/articles/azure-monitor/vm/media/vminsights-workbooks/workbook-gallery-01.png b/articles/azure-monitor/vm/media/vminsights-workbooks/workbook-gallery-01.png index 8aff71f5e2885..8a65b528b1b9a 100644 Binary files a/articles/azure-monitor/vm/media/vminsights-workbooks/workbook-gallery-01.png and b/articles/azure-monitor/vm/media/vminsights-workbooks/workbook-gallery-01.png differ diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md b/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md index 36f287f49bd53..28f7ddd599963 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md @@ -6,6 +6,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 06/21/2021 +ms.reviewer: Xema Pathak --- @@ -83,7 +84,7 @@ Use a rule with the following query. ```kusto Heartbeat -| summarize TimeGenerated=max(TimeGenerated) by Computer +| summarize TimeGenerated=max(TimeGenerated) by Computer, _ResourceId | extend Duration = datetime_diff('minute',now(),TimeGenerated) | summarize AggregatedValue = min(Duration) by Computer, bin(TimeGenerated,5m), _ResourceId ``` diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-analyze.md b/articles/azure-monitor/vm/monitor-virtual-machine-analyze.md index 95e026e2ff426..4fe577f4a8550 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-analyze.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-analyze.md @@ -6,6 +6,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 06/21/2021 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-configure.md b/articles/azure-monitor/vm/monitor-virtual-machine-configure.md index 17cba6b883179..dc384dffc6c51 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-configure.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-configure.md @@ -6,6 +6,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 06/21/2021 +ms.reviewer: Xema Pathak --- @@ -42,7 +43,7 @@ You require at least one Log Analytics workspace to support VM insights and to c Many environments use a single workspace for all their virtual machines and other Azure resources they monitor. You can even share a workspace used by [Microsoft Defender for Cloud and Microsoft Sentinel](monitor-virtual-machine-security.md), although many customers choose to segregate their availability and performance telemetry from security data. If you're getting started with Azure Monitor, start with a single workspace and consider creating more workspaces as your requirements evolve. -For complete details on logic that you should consider for designing a workspace configuration, see [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md). +For complete details on logic that you should consider for designing a workspace configuration, see Design a Log Analytics workspace configuration(../logs/workspace-design.md). ### Multihoming agents Multihoming refers to a virtual machine that connects to multiple workspaces. Typically, there's little reason to multihome agents for Azure Monitor alone. Having an agent send data to multiple workspaces most likely creates duplicate data in each workspace, which increases your overall cost. You can combine data from multiple workspaces by using [cross-workspace queries](../logs/cross-workspace-query.md) and [workbooks](../visualizations/../visualize/workbooks-overview.md). diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-security.md b/articles/azure-monitor/vm/monitor-virtual-machine-security.md index 826811e9dd50e..d4fa4469a4b73 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-security.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-security.md @@ -6,6 +6,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 06/21/2021 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-workloads.md b/articles/azure-monitor/vm/monitor-virtual-machine-workloads.md index 96573ffea7b1b..ffc6d9d3e6d9a 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-workloads.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-workloads.md @@ -6,6 +6,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 06/21/2021 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/monitor-virtual-machine.md b/articles/azure-monitor/vm/monitor-virtual-machine.md index a5e91b320c225..daa9b074fd6ce 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine.md @@ -6,6 +6,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 06/02/2021 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/resource-manager-vminsights.md b/articles/azure-monitor/vm/resource-manager-vminsights.md index c5d52307c08c7..54e6def53d2fd 100644 --- a/articles/azure-monitor/vm/resource-manager-vminsights.md +++ b/articles/azure-monitor/vm/resource-manager-vminsights.md @@ -5,6 +5,7 @@ ms.topic: sample author: bwren ms.author: bwren ms.date: 05/18/2020 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/service-map-scom.md b/articles/azure-monitor/vm/service-map-scom.md index 538a7a7b7d58d..5c95797c5bed6 100644 --- a/articles/azure-monitor/vm/service-map-scom.md +++ b/articles/azure-monitor/vm/service-map-scom.md @@ -5,6 +5,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 07/12/2019 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/service-map.md b/articles/azure-monitor/vm/service-map.md index bda13ea36b93c..79ddf42a11619 100644 --- a/articles/azure-monitor/vm/service-map.md +++ b/articles/azure-monitor/vm/service-map.md @@ -5,6 +5,7 @@ ms.topic: conceptual author: bwren ms.author: bwren ms.date: 07/24/2019 +ms.reviewer: Xema Pathak --- diff --git a/articles/azure-monitor/vm/tutorial-monitor-vm-alert.md b/articles/azure-monitor/vm/tutorial-monitor-vm-alert.md index 03f348f565975..103c7be23b5ff 100644 --- a/articles/azure-monitor/vm/tutorial-monitor-vm-alert.md +++ b/articles/azure-monitor/vm/tutorial-monitor-vm-alert.md @@ -5,6 +5,7 @@ ms.service: azure-monitor ms.topic: article ms.custom: subject-monitoring ms.date: 11/04/2021 +ms.reviewer: Xema Pathak --- # Tutorial: Create alert when Azure virtual machine is unavailable diff --git a/articles/azure-monitor/vm/tutorial-monitor-vm-enable.md b/articles/azure-monitor/vm/tutorial-monitor-vm-enable.md index 181b4405d3f5d..0c5c11c5c7e67 100644 --- a/articles/azure-monitor/vm/tutorial-monitor-vm-enable.md +++ b/articles/azure-monitor/vm/tutorial-monitor-vm-enable.md @@ -5,6 +5,7 @@ ms.service: azure-monitor ms.topic: article ms.custom: subject-monitoring ms.date: 11/04/2021 +ms.reviewer: Xema Pathak --- # Tutorial: Enable monitoring for Azure virtual machine diff --git a/articles/azure-monitor/vm/tutorial-monitor-vm-guest.md b/articles/azure-monitor/vm/tutorial-monitor-vm-guest.md index 64ef66b10fa81..760516e7ece49 100644 --- a/articles/azure-monitor/vm/tutorial-monitor-vm-guest.md +++ b/articles/azure-monitor/vm/tutorial-monitor-vm-guest.md @@ -5,6 +5,7 @@ ms.service: azure-monitor ms.topic: article ms.custom: subject-monitoring ms.date: 11/08/2021 +ms.reviewer: Xema Pathak --- # Tutorial: Collect guest logs and metrics from Azure virtual machine diff --git a/articles/azure-monitor/vm/vminsights-configure-workspace.md b/articles/azure-monitor/vm/vminsights-configure-workspace.md index 130afaf6504fb..0fd17d02d4c06 100644 --- a/articles/azure-monitor/vm/vminsights-configure-workspace.md +++ b/articles/azure-monitor/vm/vminsights-configure-workspace.md @@ -5,7 +5,7 @@ ms.topic: conceptual ms.custom: references_regions, devx-track-azurepowershell author: bwren ms.author: bwren -ms.date: 12/22/2020 +ms.date: 06/07/2022 --- @@ -30,7 +30,7 @@ Access Log Analytics workspaces in the Azure portal from the **Log Analytics wor [![Log Anlytics workspaces](media/vminsights-configure-workspace/log-analytics-workspaces.png)](media/vminsights-configure-workspace/log-analytics-workspaces.png#lightbox) -You can create a new Log Analytics workspace using any of the following methods. See [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md) for guidance on determining the number of workspaces you should use in your environment and how to design their access strategy. +You can create a new Log Analytics workspace using any of the following methods. See Design a Log Analytics workspace configuration(../logs/workspace-design.md) for guidance on determining the number of workspaces you should use in your environment and how to design their access strategy. * [Azure portal](../logs/quick-create-workspace.md) @@ -46,7 +46,7 @@ VM insights supports a Log Analytics workspace in any of the [regions supported >You can monitor Azure VMs in any region. The VMs themselves aren't limited to the regions supported by the Log Analytics workspace. ## Azure role-based access control -To enable and access the features in VM insights, you must have the [Log Analytics contributor role](../logs/manage-access.md#manage-access-using-azure-permissions) in the workspace. To view performance, health, and map data, you must have the [monitoring reader role](../roles-permissions-security.md#built-in-monitoring-roles) for the Azure VM. For more information about how to control access to a Log Analytics workspace, see [Manage workspaces](../logs/manage-access.md). +To enable and access the features in VM insights, you must have the [Log Analytics contributor role](../logs/manage-access.md#azure-rbac) in the workspace. To view performance, health, and map data, you must have the [monitoring reader role](../roles-permissions-security.md#built-in-monitoring-roles) for the Azure VM. For more information about how to control access to a Log Analytics workspace, see [Manage workspaces](../logs/manage-access.md). ## Add VMInsights solution to workspace Before a Log Analytics workspace can be used with VM insights, it must have the *VMInsights* solution installed. The methods for configuring the workspace are described in the following sections. diff --git a/articles/azure-monitor/vm/vminsights-enable-hybrid.md b/articles/azure-monitor/vm/vminsights-enable-hybrid.md index 44c0ec27dd68b..39e37f248af6b 100644 --- a/articles/azure-monitor/vm/vminsights-enable-hybrid.md +++ b/articles/azure-monitor/vm/vminsights-enable-hybrid.md @@ -4,7 +4,7 @@ description: This article describes how you enable VM insights for a hybrid clou ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 07/27/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-enable-overview.md b/articles/azure-monitor/vm/vminsights-enable-overview.md index 93c2705ce7bc9..a7eb8bc25a5e3 100644 --- a/articles/azure-monitor/vm/vminsights-enable-overview.md +++ b/articles/azure-monitor/vm/vminsights-enable-overview.md @@ -4,7 +4,7 @@ description: Learn how to deploy and configure VM insights. Find out the system ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 12/22/2020 +ms.date: 06/08/2022 ms.custom: references_regions --- diff --git a/articles/azure-monitor/vm/vminsights-enable-policy.md b/articles/azure-monitor/vm/vminsights-enable-policy.md index ef5140777db33..2a5429528c52f 100644 --- a/articles/azure-monitor/vm/vminsights-enable-policy.md +++ b/articles/azure-monitor/vm/vminsights-enable-policy.md @@ -4,7 +4,7 @@ description: Describes how you enable VM insights for multiple Azure virtual mac ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 07/27/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-enable-portal.md b/articles/azure-monitor/vm/vminsights-enable-portal.md index 0f4f96624a7ba..2ae2cbff4fcd5 100644 --- a/articles/azure-monitor/vm/vminsights-enable-portal.md +++ b/articles/azure-monitor/vm/vminsights-enable-portal.md @@ -4,7 +4,7 @@ description: Learn how to enable VM insights on a single Azure virtual machine o ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 07/27/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-enable-powershell.md b/articles/azure-monitor/vm/vminsights-enable-powershell.md index 5e31d583c0ead..6b305fc5d9096 100644 --- a/articles/azure-monitor/vm/vminsights-enable-powershell.md +++ b/articles/azure-monitor/vm/vminsights-enable-powershell.md @@ -4,7 +4,7 @@ description: Describes how to enable VM insights for Azure virtual machines or v ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 07/27/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-enable-resource-manager.md b/articles/azure-monitor/vm/vminsights-enable-resource-manager.md index ccc4ff964bdc2..3db561340ba71 100644 --- a/articles/azure-monitor/vm/vminsights-enable-resource-manager.md +++ b/articles/azure-monitor/vm/vminsights-enable-resource-manager.md @@ -4,7 +4,7 @@ description: This article describes how you enable VM insights for one or more A ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 07/27/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-ga-release-faq.yml b/articles/azure-monitor/vm/vminsights-ga-release-faq.yml deleted file mode 100644 index e6d87f8809efb..0000000000000 --- a/articles/azure-monitor/vm/vminsights-ga-release-faq.yml +++ /dev/null @@ -1,132 +0,0 @@ -### YamlMime:FAQ -metadata: - title: 'VM insights (GA) frequently asked questions | Microsoft Docs' - description: 'VM insights is a solution in Azure that combines health and performance monitoring of the Azure VM operating system, as well as automatically discovering application components and dependencies with other resources and maps the communication between them. This article answers common questions about the GA release.' - ms.topic: faq - author: bwren - ms.author: bwren - ms.date: 01/31/2020 - ms.service: azure-monitor -title: VM insights Generally Available (GA) Frequently Asked Questions -summary: This General Availability FAQ covers changes that were made in Q4 2019 and Q1 2020 as we prepared for GA. - - -sections: - - name: Ignored - questions: - - question: | - Updates for VM insights - answer: | - We released a new version of VM insights in January 2020 ahead of our GA announcement. Customers enabling VM insights will now receive the GA version, but existing customers using the version of VM insights from Q4 2019 and earlier will be prompted to upgrade. This FAQ offers guidance to perform an upgrade at scale if you have large deployments across multiple workspaces. - - - With this upgrade, VM insights performance data is stored in the same *InsightsMetrics* table as [Container insights](../containers/container-insights-overview.md), which makes it easier for you to query the two data sets. Also, you are able to store more diverse data sets that we could not store in the table previously used. - - Our performance views are now using the data we store in the *InsightsMetrics* table. If you have not yet upgraded to use the latest VMInsights solution on your workspace, your charts will no longer display information. You can upgrade from our **Get Started** page as described below. - - - - question: | - What is changing? - answer: | - We have released a new solution, named VMInsights, that includes more capabilities for data collection along with a new location for storing this data in your Log Analytics workspace. - - In the past, we enabled the ServiceMap solution on your workspace and setup performance counters in your Log Analytics workspace to send the data to the *Perf* table. This new solution sends the data to a table named *InsightsMetrics* that is also used by Container insights. This table schema allows us to store more metrics and service data sets that are not compatible with the *Perf* table format. - - We have updated our Performance charts to use the data we store in the *InsightsMetrics* table. You can upgrade to use the *InsightsMetrics* table from our **Get Started** page as described below. - - - - question: | - How do I upgrade? - answer: | - When a Log Analytics workspace is upgraded to the latest version of Azure Monitor to VMs, it will upgrade the dependency agent on each of the VMs attached to that workspace. Each VM requiring upgrade will be identified in the **Get Started** tab in VM insights in the Azure portal. When you choose to upgrade a VM, it will upgrade the workspace for that VM along with any other VMs attached to that workspace. You can select a single VM or multiple VMs, resource groups, or subscriptions. - - Use the following command to upgrade a workspace using PowerShell: - - ```PowerShell - Set-AzOperationalInsightsIntelligencePack -ResourceGroupName -WorkspaceName -IntelligencePackName "VMInsights" -Enabled $True - ``` - - - question: | - What should I do about the Performance counters in my workspace if I install the VMInsights solution? - answer: | - The previous method of enabling VM insights used performance counters in your workspace. The current version stores this data in a table named `InsightsMetrics`. You may choose to disable these performance counters in your workspace if you no longer need to use them. - - >[!NOTE] - >If you have Alert Rules that reference these counters in the `Perf` table, you need to update them to reference new data stored in the `InsightsMetrics` table. Refer to our documentation for example log queries that you can use that refer to this table. - > - - If you decide to keep the performance counters enabled, you will be billed for the data ingested and stored in the `Perf` table based on [Log Analytics pricing](https://azure.microsoft.com/pricing/details/monitor/). - - - question: | - How will this change affect my alert rules? - answer: | - If you have created [Log alerts](../alerts/alerts-unified-log.md) that query the `Perf` table targeting performance counters that were enabled in the workspace, you should update these rules to refer to the `InsightsMetrics` table instead. This guidance also applies to any log search rules using `ServiceMapComputer_CL` and `ServiceMapProcess_CL`, because those data sets are moving to `VMComputer` and `VMProcess` tables. - - We will update this FAQ and our documentation to include example log search alert rules for the data sets we collect. - - - question: | - How will this change affect my bill? - answer: | - Billing is still based on data ingested and retained in your Log Analytics workspace. - - The machine level performance data that we collect is the same, is of a similar size to the data we stored in the `Perf` table, and will cost approximately the same amount. - - - question: | - What if I only want to use Service Map? - answer: | - That is fine. You will see prompts in the Azure portal when viewing VM insights about the upcoming update. Once released, you will receive a prompt requesting that you update to the new version. If you prefer to only use the [Maps](vminsights-maps.md) feature, you can choose not to upgrade and continue to use the Maps feature in VM insights and the Service Map solution accessed from your workspace or dashboard tile. - - If you chose to manually enable the performance counters in your workspace, then you may be able to see data in some of our performance charts viewed from Azure Monitor. Once the new solution is released we will update our performance charts to query the data stored in the `InsightsMetrics` table. If you would like to see data from that table in these charts, you will need to upgrade to the new version of VM insights. - - The changes to move data from `ServiceMapComputer_CL` and `ServiceMapProcess_CL` will affect both Service Map and VM insights, so you still need to plan for this update. - - If you chose to not upgrade to the **VMInsights** solution, we will continue to provide legacy versions of our performance workbooks that refer to data in the `Perf` table. - - - question: | - Will the Service Map data sets also be stored in InsightsMetrics? - answer: | - The data sets will not be duplicated if you use both solutions. Both offerings share the data sets that will be stored in `VMComputer` (formerly ServiceMapComputer_CL), `VMProcess` (formerly ServiceMapProcess_CL), `VMConnection`, and `VMBoundPort` tables to store the map data sets that we collect. - - The `InsightsMetrics` table will store VM, process, and service data sets that we collect and will only be populated if you are using VM insights and the VM Insights solution. The Service Map solution will not collect or store data in the `InsightsMetrics` table. - - - question: | - Will I be double charged if I have the Service Map and VMInsights solutions in my workspace? - answer: | - No, the two solutions share the map data sets that we store in `VMComputer` (formerly ServiceMapComputer_CL), `VMProcess` (formerly ServiceMapProcess_CL), `VMConnection`, and `VMBoundPort`. You will not be double charged for this data if you have both solutions in your workspace. - - - question: | - If I remove either the Service Map or VMInsights solution, will it remove my data? - answer: | - No, the two solutions share the map data sets that we store in `VMComputer` (formerly ServiceMapComputer_CL), `VMProcess` (formerly ServiceMapProcess_CL), `VMConnection`, and `VMBoundPort`. If you remove one of the solutions, these data sets notice that there is still a solution in place that uses the data and it remains in the Log Analytics workspace. You need to remove both solutions from your workspace in order for the data to be removed from it. - - - question: | - Health feature is in limited public preview - answer: | - We have received a lot of great feedback from customers about our VM Health feature set. There is asignificant interest around this feature and excitement over its potential for supporting monitoring workflows. We are planning to make a series of changes to add functionality and address the feedback we have received. - - To minimize impact of these changes to new customers, we have moved this feature into a **limited public preview**. This update happened in October 2019. - - We plan to re-launch this Health feature in 2020, after VM insights is in GA. - - - question: | - How do existing customers access the Health feature? - answer: | - Existing customers that are using the Health feature will continue to have access to it, but it will not be offered to new customers. - - To access the feature, you can add the following feature flag `feature.vmhealth=true` to the Azure portal URL [https://portal.azure.com](https://portal.azure.com). Example `https://portal.azure.com/?feature.vmhealth=true`. - - You can also use this short url, which sets the feature flag automatically: [https://aka.ms/vmhealthpreview](https://aka.ms/vmhealthpreview). - - As an existing customer, you can continue to use the Health feature on VMs that are connected to an existing workspace setup with the health functionality. - - - question: | - I use VM Health now with one environment and would like to deploy it to a new one - answer: | - If you are an existing customer that is using the Health feature and want to use it for a new roll-out, contact us at vminsights@microsoft.com to request instructions. - - -additionalContent: | - - ## Next steps - - To understand the requirements and methods that help you monitor your virtual machines, review [Deploy VM insights](./vminsights-enable-overview.md). diff --git a/articles/azure-monitor/vm/vminsights-health-alerts.md b/articles/azure-monitor/vm/vminsights-health-alerts.md index 98c5b2c5fd84b..5365b64e1eec1 100644 --- a/articles/azure-monitor/vm/vminsights-health-alerts.md +++ b/articles/azure-monitor/vm/vminsights-health-alerts.md @@ -33,7 +33,7 @@ An [Azure alert](../alerts/alerts-overview.md) will be created for each virtual If an alert is already in **Fired** state when the virtual machine state changes, then a second alert won't be created, but the severity of the same alert will be changed to match the state of the virtual machine. For example, if the virtual machine changes to **Critical** state when a **Warning** alert was already in **Fired** state, that alert's severity will be changed to **Sev1**. If the virtual machine changes to a **Warning** state when a **Sev1** alert was already in **Fired** state, that alert's severity will be changed to **Sev2**. If the virtual machine moves back to a **Healthy** state, then the alert will be resolved with severity changed to **Sev4**. ## Viewing alerts -View alerts created by VM insights guest health with other [alerts in the Azure portal](../alerts/alerts-overview.md#alerts-experience). You can select **Alerts** from the **Azure Monitor** menu to view alerts for all monitored resources, or select **Alerts** from a virtual machine's menu to view alerts for just that virtual machine. +View alerts created by VM insights guest health with other [alerts in the Azure portal](../alerts/alerts-page.md). You can select **Alerts** from the **Azure Monitor** menu to view alerts for all monitored resources, or select **Alerts** from a virtual machine's menu to view alerts for just that virtual machine. ## Alert properties diff --git a/articles/azure-monitor/vm/vminsights-log-query.md b/articles/azure-monitor/vm/vminsights-log-query.md new file mode 100644 index 0000000000000..761381fe1794c --- /dev/null +++ b/articles/azure-monitor/vm/vminsights-log-query.md @@ -0,0 +1,472 @@ +--- +title: How to Query Logs from VM insights +description: VM insights solution collects metrics and log data to and this article describes the records and includes sample queries. +ms.topic: conceptual +author: bwren +ms.author: bwren +ms.date: 06/08/2022 +--- + +# How to query logs from VM insights + +VM insights collects performance and connection metrics, computer and process inventory data, and health state information and forwards it to the Log Analytics workspace in Azure Monitor. This data is available for [query](../logs/log-query-overview.md) in Azure Monitor. You can apply this data to scenarios that include migration planning, capacity analysis, discovery, and on-demand performance troubleshooting. + +## Map records + +One record is generated per hour for each unique computer and process, in addition to the records that are generated when a process or computer starts or is added to VM insights. The fields and values in the ServiceMapComputer_CL events map to fields of the Machine resource in the ServiceMap Azure Resource Manager API. The fields and values in the ServiceMapProcess_CL events map to the fields of the Process resource in the ServiceMap Azure Resource Manager API. The ResourceName_s field matches the name field in the corresponding Resource Manager resource. + +There are internally generated properties you can use to identify unique processes and computers: + +- Computer: Use *ResourceId* or *ResourceName_s* to uniquely identify a computer within a Log Analytics workspace. +- Process: Use *ResourceId* to uniquely identify a process within a Log Analytics workspace. *ResourceName_s* is unique within the context of the machine on which the process is running (MachineResourceName_s) + +Because multiple records can exist for a specified process and computer in a specified time range, queries can return more than one record for the same computer or process. To include only the most recent record, add `| summarize arg_max(TimeGenerated, *) by ResourceId` to the query. + +### Connections and ports + +The Connection Metrics feature introduces two new tables in Azure Monitor logs - VMConnection and VMBoundPort. These tables provide information about the connections for a machine (inbound and outbound), as well as the server ports that are open/active on them. ConnectionMetrics are also exposed via APIs that provide the means to obtain a specific metric during a time window. TCP connections resulting from *accepting* on a listening socket are inbound, while those created by *connecting* to a given IP and port are outbound. The direction of a connection is represented by the Direction property, which can be set to either **inbound** or **outbound**. + +Records in these tables are generated from data reported by the Dependency Agent. Every record represents an observation over a 1-minute time interval. The TimeGenerated property indicates the start of the time interval. Each record contains information to identify the respective entity, that is, connection or port, as well as metrics associated with that entity. Currently, only network activity that occurs using TCP over IPv4 is reported. + +#### Common fields and conventions + +The following fields and conventions apply to both VMConnection and VMBoundPort: + +- Computer: Fully-qualified domain name of reporting machine +- AgentId: The unique identifier for a machine with the Log Analytics agent +- Machine: Name of the Azure Resource Manager resource for the machine exposed by ServiceMap. It is of the form *m-{GUID}*, where *GUID* is the same GUID as AgentId +- Process: Name of the Azure Resource Manager resource for the process exposed by ServiceMap. It is of the form *p-{hex string}*. Process is unique within a machine scope and to generate a unique process ID across machines, combine Machine and Process fields. +- ProcessName: Executable name of the reporting process. +- All IP addresses are strings in IPv4 canonical format, for example *13.107.3.160* + +To manage cost and complexity, connection records do not represent individual physical network connections. Multiple physical network connections are grouped into a logical connection, which is then reflected in the respective table. Meaning, records in *VMConnection* table represent a logical grouping and not the individual physical connections that are being observed. Physical network connection sharing the same value for the following attributes during a given one-minute interval, are aggregated into a single logical record in *VMConnection*. + +| Property | Description | +|:--|:--| +|Direction |Direction of the connection, value is *inbound* or *outbound* | +|Machine |The computer FQDN | +|Process |Identity of process or groups of processes, initiating/accepting the connection | +|SourceIp |IP address of the source | +|DestinationIp |IP address of the destination | +|DestinationPort |Port number of the destination | +|Protocol |Protocol used for the connection. Values is *tcp*. | + +To account for the impact of grouping, information about the number of grouped physical connections is provided in the following properties of the record: + +| Property | Description | +|:--|:--| +|LinksEstablished |The number of physical network connections that have been established during the reporting time window | +|LinksTerminated |The number of physical network connections that have been terminated during the reporting time window | +|LinksFailed |The number of physical network connections that have failed during the reporting time window. This information is currently available only for outbound connections. | +|LinksLive |The number of physical network connections that were open at the end of the reporting time window| + +#### Metrics + +In addition to connection count metrics, information about the volume of data sent and received on a given logical connection or network port are also included in the following properties of the record: + +| Property | Description | +|:--|:--| +|BytesSent |Total number of bytes that have been sent during the reporting time window | +|BytesReceived |Total number of bytes that have been received during the reporting time window | +|Responses |The number of responses observed during the reporting time window. +|ResponseTimeMax |The largest response time (milliseconds) observed during the reporting time window. If no value, the property is blank.| +|ResponseTimeMin |The smallest response time (milliseconds) observed during the reporting time window. If no value, the property is blank.| +|ResponseTimeSum |The sum of all response times (milliseconds) observed during the reporting time window. If no value, the property is blank.| + +The third type of data being reported is response time - how long does a caller spend waiting for a request sent over a connection to be processed and responded to by the remote endpoint. The response time reported is an estimation of the true response time of the underlying application protocol. It is computed using heuristics based on the observation of the flow of data between the source and destination end of a physical network connection. Conceptually, it is the difference between the time the last byte of a request leaves the sender, and the time when the last byte of the response arrives back to it. These two timestamps are used to delineate request and response events on a given physical connection. The difference between them represents the response time of a single request. + +In this first release of this feature, our algorithm is an approximation that may work with varying degree of success depending on the actual application protocol used for a given network connection. For example, the current approach works well for request-response based protocols such as HTTP(S), but does not work with one-way or message queue-based protocols. + +Here are some important points to consider: + +1. If a process accepts connections on the same IP address but over multiple network interfaces, a separate record for each interface will be reported. +2. Records with wildcard IP will contain no activity. They are included to represent the fact that a port on the machine is open to inbound traffic. +3. To reduce verbosity and data volume, records with wildcard IP will be omitted when there is a matching record (for the same process, port, and protocol) with a specific IP address. When a wildcard IP record is omitted, the IsWildcardBind record property with the specific IP address, will be set to "True" to indicate that the port is exposed over every interface of the reporting machine. +4. Ports that are bound only on a specific interface have IsWildcardBind set to *False*. + +#### Naming and Classification + +For convenience, the IP address of the remote end of a connection is included in the RemoteIp property. For inbound connections, RemoteIp is the same as SourceIp, while for outbound connections, it is the same as DestinationIp. The RemoteDnsCanonicalNames property represents the DNS canonical names reported by the machine for RemoteIp. The RemoteDnsQuestions property represents the DNS questions reported by the machine for RemoteIp. The RemoveClassification property is reserved for future use. + +#### Geolocation + +*VMConnection* also includes geolocation information for the remote end of each connection record in the following properties of the record: + +| Property | Description | +|:--|:--| +|RemoteCountry |The name of the country/region hosting RemoteIp. For example, *United States* | +|RemoteLatitude |The geolocation latitude. For example, *47.68* | +|RemoteLongitude |The geolocation longitude. For example, *-122.12* | + +#### Malicious IP + +Every RemoteIp property in *VMConnection* table is checked against a set of IPs with known malicious activity. If the RemoteIp is identified as malicious the following properties will be populated (they are empty, when the IP is not considered malicious) in the following properties of the record: + +| Property | Description | +|:--|:--| +|MaliciousIp |The RemoteIp address | +|IndicatorThreadType |Threat indicator detected is one of the following values, *Botnet*, *C2*, *CryptoMining*, *Darknet*, *DDos*, *MaliciousUrl*, *Malware*, *Phishing*, *Proxy*, *PUA*, *Watchlist*. | +|Description |Description of the observed threat. | +|TLPLevel |Traffic Light Protocol (TLP) Level is one of the defined values, *White*, *Green*, *Amber*, *Red*. | +|Confidence |Values are *0 – 100*. | +|Severity |Values are *0 – 5*, where *5* is the most severe and *0* is not severe at all. Default value is *3*. | +|FirstReportedDateTime |The first time the provider reported the indicator. | +|LastReportedDateTime |The last time the indicator was seen by Interflow. | +|IsActive |Indicates indicators are deactivated with *True* or *False* value. | +|ReportReferenceLink |Links to reports related to a given observable. | +|AdditionalInformation |Provides additional information, if applicable, about the observed threat. | + +### Ports + +Ports on a machine that actively accept incoming traffic or could potentially accept traffic, but are idle during the reporting time window, are written to the VMBoundPort table. + +Every record in VMBoundPort is identified by the following fields: + +| Property | Description | +|:--|:--| +|Process | Identity of process (or groups of processes) with which the port is associated with.| +|Ip | Port IP address (can be wildcard IP, *0.0.0.0*) | +|Port |The Port number | +|Protocol | The protocol. Example, *tcp* or *udp* (only *tcp* is currently supported).| + +The identity a port is derived from the above five fields and is stored in the PortId property. This property can be used to quickly find records for a specific port across time. + +#### Metrics + +Port records include metrics representing the connections associated with them. Currently, the following metrics are reported (the details for each metric are described in the previous section): + +- BytesSent and BytesReceived +- LinksEstablished, LinksTerminated, LinksLive +- ResposeTime, ResponseTimeMin, ResponseTimeMax, ResponseTimeSum + +Here are some important points to consider: + +- If a process accepts connections on the same IP address but over multiple network interfaces, a separate record for each interface will be reported. +- Records with wildcard IP will contain no activity. They are included to represent the fact that a port on the machine is open to inbound traffic. +- To reduce verbosity and data volume, records with wildcard IP will be omitted when there is a matching record (for the same process, port, and protocol) with a specific IP address. When a wildcard IP record is omitted, the *IsWildcardBind* property for the record with the specific IP address, will be set to *True*. This indicates the port is exposed over every interface of the reporting machine. +- Ports that are bound only on a specific interface have IsWildcardBind set to *False*. + +### VMComputer records + +Records with a type of *VMComputer* have inventory data for servers with the Dependency agent. These records have the properties in the following table: + +| Property | Description | +|:--|:--| +|TenantId | The unique identifier for the workspace | +|SourceSystem | *Insights* | +|TimeGenerated | Timestamp of the record (UTC) | +|Computer | The computer FQDN | +|AgentId | The unique ID of the Log Analytics agent | +|Machine | Name of the Azure Resource Manager resource for the machine exposed by ServiceMap. It is of the form *m-{GUID}*, where *GUID* is the same GUID as AgentId. | +|DisplayName | Display name | +|FullDisplayName | Full display name | +|HostName | The name of machine without domain name | +|BootTime | The machine boot time (UTC) | +|TimeZone | The normalized time zone | +|VirtualizationState | *virtual*, *hypervisor*, *physical* | +|Ipv4Addresses | Array of IPv4 addresses | +|Ipv4SubnetMasks | Array of IPv4 subnet masks (in the same order as Ipv4Addresses). | +|Ipv4DefaultGateways | Array of IPv4 gateways | +|Ipv6Addresses | Array of IPv6 addresses | +|MacAddresses | Array of MAC addresses | +|DnsNames | Array of DNS names associated with the machine. | +|DependencyAgentVersion | The version of the Dependency agent running on the machine. | +|OperatingSystemFamily | *Linux*, *Windows* | +|OperatingSystemFullName | The full name of the operating system | +|PhysicalMemoryMB | The physical memory in megabytes | +|Cpus | The number of processors | +|CpuSpeed | The CPU speed in MHz | +|VirtualMachineType | *hyperv*, *vmware*, *xen* | +|VirtualMachineNativeId | The VM ID as assigned by its hypervisor | +|VirtualMachineNativeName | The name of the VM | +|VirtualMachineHypervisorId | The unique identifier of the hypervisor hosting the VM | +|HypervisorType | *hyperv* | +|HypervisorId | The unique ID of the hypervisor | +|HostingProvider | *azure* | +|_ResourceId | The unique identifier for an Azure resource | +|AzureSubscriptionId | A globally unique identifier that identifies your subscription | +|AzureResourceGroup | The name of the Azure resource group the machine is a member of. | +|AzureResourceName | The name of the Azure resource | +|AzureLocation | The location of the Azure resource | +|AzureUpdateDomain | The name of the Azure update domain | +|AzureFaultDomain | The name of the Azure fault domain | +|AzureVmId | The unique identifier of the Azure virtual machine | +|AzureSize | The size of the Azure VM | +|AzureImagePublisher | The name of the Azure VM publisher | +|AzureImageOffering | The name of the Azure VM offer type | +|AzureImageSku | The SKU of the Azure VM image | +|AzureImageVersion | The version of the Azure VM image | +|AzureCloudServiceName | The name of the Azure cloud service | +|AzureCloudServiceDeployment | Deployment ID for the Cloud Service | +|AzureCloudServiceRoleName | Cloud Service role name | +|AzureCloudServiceRoleType | Cloud Service role type: *worker* or *web* | +|AzureCloudServiceInstanceId | Cloud Service role instance ID | +|AzureVmScaleSetName | The name of the virtual machine scale set | +|AzureVmScaleSetDeployment | Virtual machine scale set deployment ID | +|AzureVmScaleSetResourceId | The unique identifier of the virtual machine scale set resource.| +|AzureVmScaleSetInstanceId | The unique identifier of the virtual machine scale set | +|AzureServiceFabricClusterId | The unique identifer of the Azure Service Fabric cluster | +|AzureServiceFabricClusterName | The name of the Azure Service Fabric cluster | + +### VMProcess records + +Records with a type of *VMProcess* have inventory data for TCP-connected processes on servers with the Dependency agent. These records have the properties in the following table: + +| Property | Description | +|:--|:--| +|TenantId | The unique identifier for the workspace | +|SourceSystem | *Insights* | +|TimeGenerated | Timestamp of the record (UTC) | +|Computer | The computer FQDN | +|AgentId | The unique ID of the Log Analytics agent | +|Machine | Name of the Azure Resource Manager resource for the machine exposed by ServiceMap. It is of the form *m-{GUID}*, where *GUID* is the same GUID as AgentId. | +|Process | The unique identifier of the Service Map process. It is in the form of *p-{GUID}*. +|ExecutableName | The name of the process executable | +|DisplayName | Process display name | +|Role | Process role: *webserver*, *appServer*, *databaseServer*, *ldapServer*, *smbServer* | +|Group | Process group name. Processes in the same group are logically related, e.g., part of the same product or system component. | +|StartTime | The process pool start time | +|FirstPid | The first PID in the process pool | +|Description | The process description | +|CompanyName | The name of the company | +|InternalName | The internal name | +|ProductName | The name of the product | +|ProductVersion | The version of the product | +|FileVersion | The version of the file | +|ExecutablePath |The path of the executable | +|CommandLine | The command line | +|WorkingDirectory | The working directory | +|Services | An array of services under which the process is executing | +|UserName | The account under which the process is executing | +|UserDomain | The domain under which the process is executing | +|_ResourceId | The unique identifier for a process within the workspace | + + +## Sample map queries + +### List all known machines + +```kusto +VMComputer | summarize arg_max(TimeGenerated, *) by _ResourceId +``` + +### When was the VM last rebooted + +```kusto +let Today = now(); VMComputer | extend DaysSinceBoot = Today - BootTime | summarize by Computer, DaysSinceBoot, BootTime | sort by BootTime asc +``` + +### Summary of Azure VMs by image, location, and SKU + +```kusto +VMComputer | where AzureLocation != "" | summarize by Computer, AzureImageOffering, AzureLocation, AzureImageSku +``` + +### List the physical memory capacity of all managed computers + +```kusto +VMComputer | summarize arg_max(TimeGenerated, *) by _ResourceId | project PhysicalMemoryMB, Computer +``` + +### List computer name, DNS, IP, and OS + +```kusto +VMComputer | summarize arg_max(TimeGenerated, *) by _ResourceId | project Computer, OperatingSystemFullName, DnsNames, Ipv4Addresses +``` + +### Find all processes with "sql" in the command line + +```kusto +VMProcess | where CommandLine contains_cs "sql" | summarize arg_max(TimeGenerated, *) by _ResourceId +``` + +### Find a machine (most recent record) by resource name + +```kusto +search in (VMComputer) "m-4b9c93f9-bc37-46df-b43c-899ba829e07b" | summarize arg_max(TimeGenerated, *) by _ResourceId +``` + +### Find a machine (most recent record) by IP address + +```kusto +search in (VMComputer) "10.229.243.232" | summarize arg_max(TimeGenerated, *) by _ResourceId +``` + +### List all known processes on a specified machine + +```kusto +VMProcess | where Machine == "m-559dbcd8-3130-454d-8d1d-f624e57961bc" | summarize arg_max(TimeGenerated, *) by _ResourceId +``` + +### List all computers running SQL Server + +```kusto +VMComputer | where AzureResourceName in ((search in (VMProcess) "*sql*" | distinct Machine)) | distinct Computer +``` + +### List all unique product versions of curl in my datacenter + +```kusto +VMProcess | where ExecutableName == "curl" | distinct ProductVersion +``` + +### Create a computer group of all computers running CentOS + +```kusto +VMComputer | where OperatingSystemFullName contains_cs "CentOS" | distinct Computer +``` + +### Bytes sent and received trends + +```kusto +VMConnection | summarize sum(BytesSent), sum(BytesReceived) by bin(TimeGenerated,1hr), Computer | order by Computer desc | render timechart +``` + +### Which Azure VMs are transmitting the most bytes + +```kusto +VMConnection | join kind=fullouter(VMComputer) on $left.Computer == $right.Computer | summarize count(BytesSent) by Computer, AzureVMSize | sort by count_BytesSent desc +``` + +### Link status trends + +```kusto +VMConnection | where TimeGenerated >= ago(24hr) | where Computer == "acme-demo" | summarize dcount(LinksEstablished), dcount(LinksLive), dcount(LinksFailed), dcount(LinksTerminated) by bin(TimeGenerated, 1h) | render timechart +``` + +### Connection failures trend + +```kusto +VMConnection | where Computer == "acme-demo" | extend bythehour = datetime_part("hour", TimeGenerated) | project bythehour, LinksFailed | summarize failCount = count() by bythehour | sort by bythehour asc | render timechart +``` + +### Bound Ports + +```kusto +VMBoundPort +| where TimeGenerated >= ago(24hr) +| where Computer == 'admdemo-appsvr' +| distinct Port, ProcessName +``` + +### Number of open ports across machines + +```kusto +VMBoundPort +| where Ip != "127.0.0.1" +| summarize by Computer, Machine, Port, Protocol +| summarize OpenPorts=count() by Computer, Machine +| order by OpenPorts desc +``` + +### Score processes in your workspace by the number of ports they have open + +```kusto +VMBoundPort +| where Ip != "127.0.0.1" +| summarize by ProcessName, Port, Protocol +| summarize OpenPorts=count() by ProcessName +| order by OpenPorts desc +``` + +### Aggregate behavior for each port + +This query can then be used to score ports by activity, e.g., ports with most inbound/outbound traffic, ports with most connections +```kusto +// +VMBoundPort +| where Ip != "127.0.0.1" +| summarize BytesSent=sum(BytesSent), BytesReceived=sum(BytesReceived), LinksEstablished=sum(LinksEstablished), LinksTerminated=sum(LinksTerminated), arg_max(TimeGenerated, LinksLive) by Machine, Computer, ProcessName, Ip, Port, IsWildcardBind +| project-away TimeGenerated +| order by Machine, Computer, Port, Ip, ProcessName +``` + +### Summarize the outbound connections from a group of machines + +```kusto +// the machines of interest +let machines = datatable(m: string) ["m-82412a7a-6a32-45a9-a8d6-538354224a25"]; +// map of ip to monitored machine in the environment +let ips=materialize(VMComputer +| summarize ips=makeset(todynamic(Ipv4Addresses)) by MonitoredMachine=AzureResourceName +| mvexpand ips to typeof(string)); +// all connections to/from the machines of interest +let out=materialize(VMConnection +| where Machine in (machines) +| summarize arg_max(TimeGenerated, *) by ConnectionId); +// connections to localhost augmented with RemoteMachine +let local=out +| where RemoteIp startswith "127." +| project ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteIp, RemoteMachine=Machine; +// connections not to localhost augmented with RemoteMachine +let remote=materialize(out +| where RemoteIp !startswith "127." +| join kind=leftouter (ips) on $left.RemoteIp == $right.ips +| summarize by ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteIp, RemoteMachine=MonitoredMachine); +// the remote machines to/from which we have connections +let remoteMachines = remote | summarize by RemoteMachine; +// all augmented connections +(local) +| union (remote) +//Take all outbound records but only inbound records that come from either //unmonitored machines or monitored machines not in the set for which we are computing dependencies. +| where Direction == 'outbound' or (Direction == 'inbound' and RemoteMachine !in (machines)) +| summarize by ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteIp, RemoteMachine +// identify the remote port +| extend RemotePort=iff(Direction == 'outbound', DestinationPort, 0) +// construct the join key we'll use to find a matching port +| extend JoinKey=strcat_delim(':', RemoteMachine, RemoteIp, RemotePort, Protocol) +// find a matching port +| join kind=leftouter (VMBoundPort +| where Machine in (remoteMachines) +| summarize arg_max(TimeGenerated, *) by PortId +| extend JoinKey=strcat_delim(':', Machine, Ip, Port, Protocol)) on JoinKey +// aggregate the remote information +| summarize Remote=makeset(iff(isempty(RemoteMachine), todynamic('{}'), pack('Machine', RemoteMachine, 'Process', Process1, 'ProcessName', ProcessName1))) by ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol +``` + +## Performance records +Records with a type of *InsightsMetrics* have performance data from the guest operating system of the virtual machine. These records have the properties in the following table: + + +| Property | Description | +|:--|:--| +|TenantId | Unique identifier for the workspace | +|SourceSystem | *Insights* | +|TimeGenerated | Time the value was collected (UTC) | +|Computer | The computer FQDN | +|Origin | *vm.azm.ms* | +|Namespace | Category of the performance counter | +|Name | Name of the performance counter | +|Val | Collected value | +|Tags | Related details about the record. See the table below for tags used with different record types. | +|AgentId | Unique identifier for each computer's agent | +|Type | *InsightsMetrics* | +|_ResourceId_ | Resource ID of the virtual machine | + +The performance counters currently collected into the *InsightsMetrics* table are listed in the following table: + +| Namespace | Name | Description | Unit | Tags | +|:---|:---|:---|:---|:---| +| Computer | Heartbeat | Computer Heartbeat | | | +| Memory | AvailableMB | Memory Available Bytes | Megabytes | memorySizeMB - Total memory size| +| Network | WriteBytesPerSecond | Network Write Bytes Per Second | BytesPerSecond | NetworkDeviceId - Id of the device
                  bytes - Total sent bytes | +| Network | ReadBytesPerSecond | Network Read Bytes Per Second | BytesPerSecond | networkDeviceId - Id of the device
                  bytes - Total received bytes | +| Processor | UtilizationPercentage | Processor Utilization Percentage | Percent | totalCpus - Total CPUs | +| LogicalDisk | WritesPerSecond | Logical Disk Writes Per Second | CountPerSecond | mountId - Mount ID of the device | +| LogicalDisk | WriteLatencyMs | Logical Disk Write Latency Millisecond | MilliSeconds | mountId - Mount ID of the device | +| LogicalDisk | WriteBytesPerSecond | Logical Disk Write Bytes Per Second | BytesPerSecond | mountId - Mount ID of the device | +| LogicalDisk | TransfersPerSecond | Logical Disk Transfers Per Second | CountPerSecond | mountId - Mount ID of the device | +| LogicalDisk | TransferLatencyMs | Logical Disk Transfer Latency Millisecond | MilliSeconds | mountId - Mount ID of the device | +| LogicalDisk | ReadsPerSecond | Logical Disk Reads Per Second | CountPerSecond | mountId - Mount ID of the device | +| LogicalDisk | ReadLatencyMs | Logical Disk Read Latency Millisecond | MilliSeconds | mountId - Mount ID of the device | +| LogicalDisk | ReadBytesPerSecond | Logical Disk Read Bytes Per Second | BytesPerSecond | mountId - Mount ID of the device | +| LogicalDisk | FreeSpacePercentage | Logical Disk Free Space Percentage | Percent | mountId - Mount ID of the device | +| LogicalDisk | FreeSpaceMB | Logical Disk Free Space Bytes | Megabytes | mountId - Mount ID of the device
                  diskSizeMB - Total disk size | +| LogicalDisk | BytesPerSecond | Logical Disk Bytes Per Second | BytesPerSecond | mountId - Mount ID of the device | + + +## Next steps + +* If you are new to writing log queries in Azure Monitor, review [how to use Log Analytics](../logs/log-analytics-tutorial.md) in the Azure portal to write log queries. + +* Learn about [writing search queries](../logs/get-started-queries.md). \ No newline at end of file diff --git a/articles/azure-monitor/vm/vminsights-log-search.md b/articles/azure-monitor/vm/vminsights-log-search.md deleted file mode 100644 index e68631df144af..0000000000000 --- a/articles/azure-monitor/vm/vminsights-log-search.md +++ /dev/null @@ -1,473 +0,0 @@ ---- -title: How to Query Logs from VM insights -description: VM insights solution collects metrics and log data to and this article describes the records and includes sample queries. -ms.topic: conceptual -author: bwren -ms.author: bwren -ms.date: 03/12/2020 - ---- - -# How to query logs from VM insights - -VM insights collects performance and connection metrics, computer and process inventory data, and health state information and forwards it to the Log Analytics workspace in Azure Monitor. This data is available for [query](../logs/log-query-overview.md) in Azure Monitor. You can apply this data to scenarios that include migration planning, capacity analysis, discovery, and on-demand performance troubleshooting. - -## Map records - -One record is generated per hour for each unique computer and process, in addition to the records that are generated when a process or computer starts or is added to VM insights. The fields and values in the ServiceMapComputer_CL events map to fields of the Machine resource in the ServiceMap Azure Resource Manager API. The fields and values in the ServiceMapProcess_CL events map to the fields of the Process resource in the ServiceMap Azure Resource Manager API. The ResourceName_s field matches the name field in the corresponding Resource Manager resource. - -There are internally generated properties you can use to identify unique processes and computers: - -- Computer: Use *ResourceId* or *ResourceName_s* to uniquely identify a computer within a Log Analytics workspace. -- Process: Use *ResourceId* to uniquely identify a process within a Log Analytics workspace. *ResourceName_s* is unique within the context of the machine on which the process is running (MachineResourceName_s) - -Because multiple records can exist for a specified process and computer in a specified time range, queries can return more than one record for the same computer or process. To include only the most recent record, add `| summarize arg_max(TimeGenerated, *) by ResourceId` to the query. - -### Connections and ports - -The Connection Metrics feature introduces two new tables in Azure Monitor logs - VMConnection and VMBoundPort. These tables provide information about the connections for a machine (inbound and outbound), as well as the server ports that are open/active on them. ConnectionMetrics are also exposed via APIs that provide the means to obtain a specific metric during a time window. TCP connections resulting from *accepting* on a listening socket are inbound, while those created by *connecting* to a given IP and port are outbound. The direction of a connection is represented by the Direction property, which can be set to either **inbound** or **outbound**. - -Records in these tables are generated from data reported by the Dependency Agent. Every record represents an observation over a 1-minute time interval. The TimeGenerated property indicates the start of the time interval. Each record contains information to identify the respective entity, that is, connection or port, as well as metrics associated with that entity. Currently, only network activity that occurs using TCP over IPv4 is reported. - -#### Common fields and conventions - -The following fields and conventions apply to both VMConnection and VMBoundPort: - -- Computer: Fully-qualified domain name of reporting machine -- AgentId: The unique identifier for a machine with the Log Analytics agent -- Machine: Name of the Azure Resource Manager resource for the machine exposed by ServiceMap. It is of the form *m-{GUID}*, where *GUID* is the same GUID as AgentId -- Process: Name of the Azure Resource Manager resource for the process exposed by ServiceMap. It is of the form *p-{hex string}*. Process is unique within a machine scope and to generate a unique process ID across machines, combine Machine and Process fields. -- ProcessName: Executable name of the reporting process. -- All IP addresses are strings in IPv4 canonical format, for example *13.107.3.160* - -To manage cost and complexity, connection records do not represent individual physical network connections. Multiple physical network connections are grouped into a logical connection, which is then reflected in the respective table. Meaning, records in *VMConnection* table represent a logical grouping and not the individual physical connections that are being observed. Physical network connection sharing the same value for the following attributes during a given one-minute interval, are aggregated into a single logical record in *VMConnection*. - -| Property | Description | -|:--|:--| -|Direction |Direction of the connection, value is *inbound* or *outbound* | -|Machine |The computer FQDN | -|Process |Identity of process or groups of processes, initiating/accepting the connection | -|SourceIp |IP address of the source | -|DestinationIp |IP address of the destination | -|DestinationPort |Port number of the destination | -|Protocol |Protocol used for the connection. Values is *tcp*. | - -To account for the impact of grouping, information about the number of grouped physical connections is provided in the following properties of the record: - -| Property | Description | -|:--|:--| -|LinksEstablished |The number of physical network connections that have been established during the reporting time window | -|LinksTerminated |The number of physical network connections that have been terminated during the reporting time window | -|LinksFailed |The number of physical network connections that have failed during the reporting time window. This information is currently available only for outbound connections. | -|LinksLive |The number of physical network connections that were open at the end of the reporting time window| - -#### Metrics - -In addition to connection count metrics, information about the volume of data sent and received on a given logical connection or network port are also included in the following properties of the record: - -| Property | Description | -|:--|:--| -|BytesSent |Total number of bytes that have been sent during the reporting time window | -|BytesReceived |Total number of bytes that have been received during the reporting time window | -|Responses |The number of responses observed during the reporting time window. -|ResponseTimeMax |The largest response time (milliseconds) observed during the reporting time window. If no value, the property is blank.| -|ResponseTimeMin |The smallest response time (milliseconds) observed during the reporting time window. If no value, the property is blank.| -|ResponseTimeSum |The sum of all response times (milliseconds) observed during the reporting time window. If no value, the property is blank.| - -The third type of data being reported is response time - how long does a caller spend waiting for a request sent over a connection to be processed and responded to by the remote endpoint. The response time reported is an estimation of the true response time of the underlying application protocol. It is computed using heuristics based on the observation of the flow of data between the source and destination end of a physical network connection. Conceptually, it is the difference between the time the last byte of a request leaves the sender, and the time when the last byte of the response arrives back to it. These two timestamps are used to delineate request and response events on a given physical connection. The difference between them represents the response time of a single request. - -In this first release of this feature, our algorithm is an approximation that may work with varying degree of success depending on the actual application protocol used for a given network connection. For example, the current approach works well for request-response based protocols such as HTTP(S), but does not work with one-way or message queue-based protocols. - -Here are some important points to consider: - -1. If a process accepts connections on the same IP address but over multiple network interfaces, a separate record for each interface will be reported. -2. Records with wildcard IP will contain no activity. They are included to represent the fact that a port on the machine is open to inbound traffic. -3. To reduce verbosity and data volume, records with wildcard IP will be omitted when there is a matching record (for the same process, port, and protocol) with a specific IP address. When a wildcard IP record is omitted, the IsWildcardBind record property with the specific IP address, will be set to "True" to indicate that the port is exposed over every interface of the reporting machine. -4. Ports that are bound only on a specific interface have IsWildcardBind set to *False*. - -#### Naming and Classification - -For convenience, the IP address of the remote end of a connection is included in the RemoteIp property. For inbound connections, RemoteIp is the same as SourceIp, while for outbound connections, it is the same as DestinationIp. The RemoteDnsCanonicalNames property represents the DNS canonical names reported by the machine for RemoteIp. The RemoteDnsQuestions property represents the DNS questions reported by the machine for RemoteIp. The RemoveClassification property is reserved for future use. - -#### Geolocation - -*VMConnection* also includes geolocation information for the remote end of each connection record in the following properties of the record: - -| Property | Description | -|:--|:--| -|RemoteCountry |The name of the country/region hosting RemoteIp. For example, *United States* | -|RemoteLatitude |The geolocation latitude. For example, *47.68* | -|RemoteLongitude |The geolocation longitude. For example, *-122.12* | - -#### Malicious IP - -Every RemoteIp property in *VMConnection* table is checked against a set of IPs with known malicious activity. If the RemoteIp is identified as malicious the following properties will be populated (they are empty, when the IP is not considered malicious) in the following properties of the record: - -| Property | Description | -|:--|:--| -|MaliciousIp |The RemoteIp address | -|IndicatorThreadType |Threat indicator detected is one of the following values, *Botnet*, *C2*, *CryptoMining*, *Darknet*, *DDos*, *MaliciousUrl*, *Malware*, *Phishing*, *Proxy*, *PUA*, *Watchlist*. | -|Description |Description of the observed threat. | -|TLPLevel |Traffic Light Protocol (TLP) Level is one of the defined values, *White*, *Green*, *Amber*, *Red*. | -|Confidence |Values are *0 – 100*. | -|Severity |Values are *0 – 5*, where *5* is the most severe and *0* is not severe at all. Default value is *3*. | -|FirstReportedDateTime |The first time the provider reported the indicator. | -|LastReportedDateTime |The last time the indicator was seen by Interflow. | -|IsActive |Indicates indicators are deactivated with *True* or *False* value. | -|ReportReferenceLink |Links to reports related to a given observable. | -|AdditionalInformation |Provides additional information, if applicable, about the observed threat. | - -### Ports - -Ports on a machine that actively accept incoming traffic or could potentially accept traffic, but are idle during the reporting time window, are written to the VMBoundPort table. - -Every record in VMBoundPort is identified by the following fields: - -| Property | Description | -|:--|:--| -|Process | Identity of process (or groups of processes) with which the port is associated with.| -|Ip | Port IP address (can be wildcard IP, *0.0.0.0*) | -|Port |The Port number | -|Protocol | The protocol. Example, *tcp* or *udp* (only *tcp* is currently supported).| - -The identity a port is derived from the above five fields and is stored in the PortId property. This property can be used to quickly find records for a specific port across time. - -#### Metrics - -Port records include metrics representing the connections associated with them. Currently, the following metrics are reported (the details for each metric are described in the previous section): - -- BytesSent and BytesReceived -- LinksEstablished, LinksTerminated, LinksLive -- ResposeTime, ResponseTimeMin, ResponseTimeMax, ResponseTimeSum - -Here are some important points to consider: - -- If a process accepts connections on the same IP address but over multiple network interfaces, a separate record for each interface will be reported. -- Records with wildcard IP will contain no activity. They are included to represent the fact that a port on the machine is open to inbound traffic. -- To reduce verbosity and data volume, records with wildcard IP will be omitted when there is a matching record (for the same process, port, and protocol) with a specific IP address. When a wildcard IP record is omitted, the *IsWildcardBind* property for the record with the specific IP address, will be set to *True*. This indicates the port is exposed over every interface of the reporting machine. -- Ports that are bound only on a specific interface have IsWildcardBind set to *False*. - -### VMComputer records - -Records with a type of *VMComputer* have inventory data for servers with the Dependency agent. These records have the properties in the following table: - -| Property | Description | -|:--|:--| -|TenantId | The unique identifier for the workspace | -|SourceSystem | *Insights* | -|TimeGenerated | Timestamp of the record (UTC) | -|Computer | The computer FQDN | -|AgentId | The unique ID of the Log Analytics agent | -|Machine | Name of the Azure Resource Manager resource for the machine exposed by ServiceMap. It is of the form *m-{GUID}*, where *GUID* is the same GUID as AgentId. | -|DisplayName | Display name | -|FullDisplayName | Full display name | -|HostName | The name of machine without domain name | -|BootTime | The machine boot time (UTC) | -|TimeZone | The normalized time zone | -|VirtualizationState | *virtual*, *hypervisor*, *physical* | -|Ipv4Addresses | Array of IPv4 addresses | -|Ipv4SubnetMasks | Array of IPv4 subnet masks (in the same order as Ipv4Addresses). | -|Ipv4DefaultGateways | Array of IPv4 gateways | -|Ipv6Addresses | Array of IPv6 addresses | -|MacAddresses | Array of MAC addresses | -|DnsNames | Array of DNS names associated with the machine. | -|DependencyAgentVersion | The version of the Dependency agent running on the machine. | -|OperatingSystemFamily | *Linux*, *Windows* | -|OperatingSystemFullName | The full name of the operating system | -|PhysicalMemoryMB | The physical memory in megabytes | -|Cpus | The number of processors | -|CpuSpeed | The CPU speed in MHz | -|VirtualMachineType | *hyperv*, *vmware*, *xen* | -|VirtualMachineNativeId | The VM ID as assigned by its hypervisor | -|VirtualMachineNativeName | The name of the VM | -|VirtualMachineHypervisorId | The unique identifier of the hypervisor hosting the VM | -|HypervisorType | *hyperv* | -|HypervisorId | The unique ID of the hypervisor | -|HostingProvider | *azure* | -|_ResourceId | The unique identifier for an Azure resource | -|AzureSubscriptionId | A globally unique identifier that identifies your subscription | -|AzureResourceGroup | The name of the Azure resource group the machine is a member of. | -|AzureResourceName | The name of the Azure resource | -|AzureLocation | The location of the Azure resource | -|AzureUpdateDomain | The name of the Azure update domain | -|AzureFaultDomain | The name of the Azure fault domain | -|AzureVmId | The unique identifier of the Azure virtual machine | -|AzureSize | The size of the Azure VM | -|AzureImagePublisher | The name of the Azure VM publisher | -|AzureImageOffering | The name of the Azure VM offer type | -|AzureImageSku | The SKU of the Azure VM image | -|AzureImageVersion | The version of the Azure VM image | -|AzureCloudServiceName | The name of the Azure cloud service | -|AzureCloudServiceDeployment | Deployment ID for the Cloud Service | -|AzureCloudServiceRoleName | Cloud Service role name | -|AzureCloudServiceRoleType | Cloud Service role type: *worker* or *web* | -|AzureCloudServiceInstanceId | Cloud Service role instance ID | -|AzureVmScaleSetName | The name of the virtual machine scale set | -|AzureVmScaleSetDeployment | Virtual machine scale set deployment ID | -|AzureVmScaleSetResourceId | The unique identifier of the virtual machine scale set resource.| -|AzureVmScaleSetInstanceId | The unique identifier of the virtual machine scale set | -|AzureServiceFabricClusterId | The unique identifer of the Azure Service Fabric cluster | -|AzureServiceFabricClusterName | The name of the Azure Service Fabric cluster | - -### VMProcess records - -Records with a type of *VMProcess* have inventory data for TCP-connected processes on servers with the Dependency agent. These records have the properties in the following table: - -| Property | Description | -|:--|:--| -|TenantId | The unique identifier for the workspace | -|SourceSystem | *Insights* | -|TimeGenerated | Timestamp of the record (UTC) | -|Computer | The computer FQDN | -|AgentId | The unique ID of the Log Analytics agent | -|Machine | Name of the Azure Resource Manager resource for the machine exposed by ServiceMap. It is of the form *m-{GUID}*, where *GUID* is the same GUID as AgentId. | -|Process | The unique identifier of the Service Map process. It is in the form of *p-{GUID}*. -|ExecutableName | The name of the process executable | -|DisplayName | Process display name | -|Role | Process role: *webserver*, *appServer*, *databaseServer*, *ldapServer*, *smbServer* | -|Group | Process group name. Processes in the same group are logically related, e.g., part of the same product or system component. | -|StartTime | The process pool start time | -|FirstPid | The first PID in the process pool | -|Description | The process description | -|CompanyName | The name of the company | -|InternalName | The internal name | -|ProductName | The name of the product | -|ProductVersion | The version of the product | -|FileVersion | The version of the file | -|ExecutablePath |The path of the executable | -|CommandLine | The command line | -|WorkingDirectory | The working directory | -|Services | An array of services under which the process is executing | -|UserName | The account under which the process is executing | -|UserDomain | The domain under which the process is executing | -|_ResourceId | The unique identifier for a process within the workspace | - - -## Sample map queries - -### List all known machines - -```kusto -VMComputer | summarize arg_max(TimeGenerated, *) by _ResourceId -``` - -### When was the VM last rebooted - -```kusto -let Today = now(); VMComputer | extend DaysSinceBoot = Today - BootTime | summarize by Computer, DaysSinceBoot, BootTime | sort by BootTime asc -``` - -### Summary of Azure VMs by image, location, and SKU - -```kusto -VMComputer | where AzureLocation != "" | summarize by Computer, AzureImageOffering, AzureLocation, AzureImageSku -``` - -### List the physical memory capacity of all managed computers - -```kusto -VMComputer | summarize arg_max(TimeGenerated, *) by _ResourceId | project PhysicalMemoryMB, Computer -``` - -### List computer name, DNS, IP, and OS - -```kusto -VMComputer | summarize arg_max(TimeGenerated, *) by _ResourceId | project Computer, OperatingSystemFullName, DnsNames, Ipv4Addresses -``` - -### Find all processes with "sql" in the command line - -```kusto -VMProcess | where CommandLine contains_cs "sql" | summarize arg_max(TimeGenerated, *) by _ResourceId -``` - -### Find a machine (most recent record) by resource name - -```kusto -search in (VMComputer) "m-4b9c93f9-bc37-46df-b43c-899ba829e07b" | summarize arg_max(TimeGenerated, *) by _ResourceId -``` - -### Find a machine (most recent record) by IP address - -```kusto -search in (VMComputer) "10.229.243.232" | summarize arg_max(TimeGenerated, *) by _ResourceId -``` - -### List all known processes on a specified machine - -```kusto -VMProcess | where Machine == "m-559dbcd8-3130-454d-8d1d-f624e57961bc" | summarize arg_max(TimeGenerated, *) by _ResourceId -``` - -### List all computers running SQL Server - -```kusto -VMComputer | where AzureResourceName in ((search in (VMProcess) "*sql*" | distinct Machine)) | distinct Computer -``` - -### List all unique product versions of curl in my datacenter - -```kusto -VMProcess | where ExecutableName == "curl" | distinct ProductVersion -``` - -### Create a computer group of all computers running CentOS - -```kusto -VMComputer | where OperatingSystemFullName contains_cs "CentOS" | distinct Computer -``` - -### Bytes sent and received trends - -```kusto -VMConnection | summarize sum(BytesSent), sum(BytesReceived) by bin(TimeGenerated,1hr), Computer | order by Computer desc | render timechart -``` - -### Which Azure VMs are transmitting the most bytes - -```kusto -VMConnection | join kind=fullouter(VMComputer) on $left.Computer == $right.Computer | summarize count(BytesSent) by Computer, AzureVMSize | sort by count_BytesSent desc -``` - -### Link status trends - -```kusto -VMConnection | where TimeGenerated >= ago(24hr) | where Computer == "acme-demo" | summarize dcount(LinksEstablished), dcount(LinksLive), dcount(LinksFailed), dcount(LinksTerminated) by bin(TimeGenerated, 1h) | render timechart -``` - -### Connection failures trend - -```kusto -VMConnection | where Computer == "acme-demo" | extend bythehour = datetime_part("hour", TimeGenerated) | project bythehour, LinksFailed | summarize failCount = count() by bythehour | sort by bythehour asc | render timechart -``` - -### Bound Ports - -```kusto -VMBoundPort -| where TimeGenerated >= ago(24hr) -| where Computer == 'admdemo-appsvr' -| distinct Port, ProcessName -``` - -### Number of open ports across machines - -```kusto -VMBoundPort -| where Ip != "127.0.0.1" -| summarize by Computer, Machine, Port, Protocol -| summarize OpenPorts=count() by Computer, Machine -| order by OpenPorts desc -``` - -### Score processes in your workspace by the number of ports they have open - -```kusto -VMBoundPort -| where Ip != "127.0.0.1" -| summarize by ProcessName, Port, Protocol -| summarize OpenPorts=count() by ProcessName -| order by OpenPorts desc -``` - -### Aggregate behavior for each port - -This query can then be used to score ports by activity, e.g., ports with most inbound/outbound traffic, ports with most connections -```kusto -// -VMBoundPort -| where Ip != "127.0.0.1" -| summarize BytesSent=sum(BytesSent), BytesReceived=sum(BytesReceived), LinksEstablished=sum(LinksEstablished), LinksTerminated=sum(LinksTerminated), arg_max(TimeGenerated, LinksLive) by Machine, Computer, ProcessName, Ip, Port, IsWildcardBind -| project-away TimeGenerated -| order by Machine, Computer, Port, Ip, ProcessName -``` - -### Summarize the outbound connections from a group of machines - -```kusto -// the machines of interest -let machines = datatable(m: string) ["m-82412a7a-6a32-45a9-a8d6-538354224a25"]; -// map of ip to monitored machine in the environment -let ips=materialize(VMComputer -| summarize ips=makeset(todynamic(Ipv4Addresses)) by MonitoredMachine=AzureResourceName -| mvexpand ips to typeof(string)); -// all connections to/from the machines of interest -let out=materialize(VMConnection -| where Machine in (machines) -| summarize arg_max(TimeGenerated, *) by ConnectionId); -// connections to localhost augmented with RemoteMachine -let local=out -| where RemoteIp startswith "127." -| project ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteIp, RemoteMachine=Machine; -// connections not to localhost augmented with RemoteMachine -let remote=materialize(out -| where RemoteIp !startswith "127." -| join kind=leftouter (ips) on $left.RemoteIp == $right.ips -| summarize by ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteIp, RemoteMachine=MonitoredMachine); -// the remote machines to/from which we have connections -let remoteMachines = remote | summarize by RemoteMachine; -// all augmented connections -(local) -| union (remote) -//Take all outbound records but only inbound records that come from either //unmonitored machines or monitored machines not in the set for which we are computing dependencies. -| where Direction == 'outbound' or (Direction == 'inbound' and RemoteMachine !in (machines)) -| summarize by ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteIp, RemoteMachine -// identify the remote port -| extend RemotePort=iff(Direction == 'outbound', DestinationPort, 0) -// construct the join key we'll use to find a matching port -| extend JoinKey=strcat_delim(':', RemoteMachine, RemoteIp, RemotePort, Protocol) -// find a matching port -| join kind=leftouter (VMBoundPort -| where Machine in (remoteMachines) -| summarize arg_max(TimeGenerated, *) by PortId -| extend JoinKey=strcat_delim(':', Machine, Ip, Port, Protocol)) on JoinKey -// aggregate the remote information -| summarize Remote=makeset(iff(isempty(RemoteMachine), todynamic('{}'), pack('Machine', RemoteMachine, 'Process', Process1, 'ProcessName', ProcessName1))) by ConnectionId, Direction, Machine, Process, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol -``` - -## Performance records -Records with a type of *InsightsMetrics* have performance data from the guest operating system of the virtual machine. These records have the properties in the following table: - - -| Property | Description | -|:--|:--| -|TenantId | Unique identifier for the workspace | -|SourceSystem | *Insights* | -|TimeGenerated | Time the value was collected (UTC) | -|Computer | The computer FQDN | -|Origin | *vm.azm.ms* | -|Namespace | Category of the performance counter | -|Name | Name of the performance counter | -|Val | Collected value | -|Tags | Related details about the record. See the table below for tags used with different record types. | -|AgentId | Unique identifier for each computer's agent | -|Type | *InsightsMetrics* | -|_ResourceId_ | Resource ID of the virtual machine | - -The performance counters currently collected into the *InsightsMetrics* table are listed in the following table: - -| Namespace | Name | Description | Unit | Tags | -|:---|:---|:---|:---|:---| -| Computer | Heartbeat | Computer Heartbeat | | | -| Memory | AvailableMB | Memory Available Bytes | Megabytes | memorySizeMB - Total memory size| -| Network | WriteBytesPerSecond | Network Write Bytes Per Second | BytesPerSecond | NetworkDeviceId - Id of the device
                  bytes - Total sent bytes | -| Network | ReadBytesPerSecond | Network Read Bytes Per Second | BytesPerSecond | networkDeviceId - Id of the device
                  bytes - Total received bytes | -| Processor | UtilizationPercentage | Processor Utilization Percentage | Percent | totalCpus - Total CPUs | -| LogicalDisk | WritesPerSecond | Logical Disk Writes Per Second | CountPerSecond | mountId - Mount ID of the device | -| LogicalDisk | WriteLatencyMs | Logical Disk Write Latency Millisecond | MilliSeconds | mountId - Mount ID of the device | -| LogicalDisk | WriteBytesPerSecond | Logical Disk Write Bytes Per Second | BytesPerSecond | mountId - Mount ID of the device | -| LogicalDisk | TransfersPerSecond | Logical Disk Transfers Per Second | CountPerSecond | mountId - Mount ID of the device | -| LogicalDisk | TransferLatencyMs | Logical Disk Transfer Latency Millisecond | MilliSeconds | mountId - Mount ID of the device | -| LogicalDisk | ReadsPerSecond | Logical Disk Reads Per Second | CountPerSecond | mountId - Mount ID of the device | -| LogicalDisk | ReadLatencyMs | Logical Disk Read Latency Millisecond | MilliSeconds | mountId - Mount ID of the device | -| LogicalDisk | ReadBytesPerSecond | Logical Disk Read Bytes Per Second | BytesPerSecond | mountId - Mount ID of the device | -| LogicalDisk | FreeSpacePercentage | Logical Disk Free Space Percentage | Percent | mountId - Mount ID of the device | -| LogicalDisk | FreeSpaceMB | Logical Disk Free Space Bytes | Megabytes | mountId - Mount ID of the device
                  diskSizeMB - Total disk size | -| LogicalDisk | BytesPerSecond | Logical Disk Bytes Per Second | BytesPerSecond | mountId - Mount ID of the device | - - -## Next steps - -* If you are new to writing log queries in Azure Monitor, review [how to use Log Analytics](../logs/log-analytics-tutorial.md) in the Azure portal to write log queries. - -* Learn about [writing search queries](../logs/get-started-queries.md). \ No newline at end of file diff --git a/articles/azure-monitor/vm/vminsights-maps.md b/articles/azure-monitor/vm/vminsights-maps.md index afb4b9b13cb00..03f8410f862ab 100644 --- a/articles/azure-monitor/vm/vminsights-maps.md +++ b/articles/azure-monitor/vm/vminsights-maps.md @@ -4,7 +4,7 @@ description: Map is a feature of VM insights. It automatically discovers applica ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 03/20/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-optout.md b/articles/azure-monitor/vm/vminsights-optout.md index ff07535362248..7fd203c28c6db 100644 --- a/articles/azure-monitor/vm/vminsights-optout.md +++ b/articles/azure-monitor/vm/vminsights-optout.md @@ -4,7 +4,7 @@ description: This article describes how to stop monitoring your virtual machines ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 03/12/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-overview.md b/articles/azure-monitor/vm/vminsights-overview.md index 6a16c1fbb4b8a..fd00be6a5dc87 100644 --- a/articles/azure-monitor/vm/vminsights-overview.md +++ b/articles/azure-monitor/vm/vminsights-overview.md @@ -4,14 +4,16 @@ description: Overview of VM insights, which monitors the health and performance ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 07/22/2020 - +ms.date: 06/08/2022 --- # Overview of VM insights VM insights monitors the performance and health of your virtual machines and virtual machine scale sets, including their running processes and dependencies on other resources. It can help deliver predictable performance and availability of vital applications by identifying performance bottlenecks and network issues and can also help you understand whether an issue is related to other dependencies. +> [!NOTE] +> VM insights does not currently support [Azure Monitor agent](../agents/azure-monitor-agent-overview.md). You can + VM insights supports Windows and Linux operating systems on the following machines: - Azure virtual machines diff --git a/articles/azure-monitor/vm/vminsights-performance.md b/articles/azure-monitor/vm/vminsights-performance.md index 1ef13df7eaf3d..5e3a29716f859 100644 --- a/articles/azure-monitor/vm/vminsights-performance.md +++ b/articles/azure-monitor/vm/vminsights-performance.md @@ -4,7 +4,7 @@ description: Performance is a feature of the VM insights that automatically disc ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 05/31/2020 +ms.date: 06/08/2022 --- diff --git a/articles/azure-monitor/vm/vminsights-troubleshoot.md b/articles/azure-monitor/vm/vminsights-troubleshoot.md index 9548b04803d4f..f19af0dd7e046 100644 --- a/articles/azure-monitor/vm/vminsights-troubleshoot.md +++ b/articles/azure-monitor/vm/vminsights-troubleshoot.md @@ -4,7 +4,7 @@ description: Troubleshoot VM insights installation. ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 03/15/2021 +ms.date: 06/08/2022 ms.custom: references_regions --- diff --git a/articles/azure-monitor/vm/vminsights-workbooks.md b/articles/azure-monitor/vm/vminsights-workbooks.md index 26bbc524c061c..09fa7155d0d90 100644 --- a/articles/azure-monitor/vm/vminsights-workbooks.md +++ b/articles/azure-monitor/vm/vminsights-workbooks.md @@ -4,7 +4,7 @@ description: Simplify complex reporting with predefined and custom parameterized ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 03/12/2020 +ms.date: 05/27/2022 --- @@ -41,29 +41,25 @@ VM insights includes the following workbooks. You can use these workbooks or use | Failed Connections | Display the count of failed connections on your monitored VMs, the failure trend, and if the percentage of failures is increasing over time. | | Security and Audit | An analysis of your TCP/IP traffic that reports on overall connections, malicious connections, where the IP endpoints reside globally. To enable all features, you will need to enable Security Detection. | | TCP Traffic | A ranked report for your monitored VMs and their sent, received, and total network traffic in a grid and displayed as a trend line. | -| Traffic Comparison | This workbooks lets you compare network traffic trends for a single machine or a group of machines. | +| Traffic Comparison | This workbook lets you compare network traffic trends for a single machine or a group of machines. | ## Creating a new workbook A workbook is made up of sections consisting of independently editable charts, tables, text, and input controls. To better understand workbooks, let's start by opening a template and walk through creating a custom workbook. -1. Sign in to the [Azure portal](https://portal.azure.com). +1. Go to the **Monitor** menu in the Azure portal. -2. Select **Virtual Machines**. +2. Select a virtual machine. -3. From the list, select a VM. +3. On the VM insights page, select **Performance** or **Maps** tab and then select **View Workbooks** from the link on the page. From the drop-down list, select **Go to Gallery**. -4. On the VM page, in the **Monitoring** section, select **Insights**. - -5. On the VM insights page, select **Performance** or **Maps** tab and then select **View Workbooks** from the link on the page. From the drop-down list, select **Go to Gallery**. - - ![Screenshot of workbook drop-down list](media/vminsights-workbooks/workbook-dropdown-gallery-01.png) + :::image type="content" source="media/vminsights-workbooks/workbook-dropdown-gallery-01.png" lightbox="media/vminsights-workbooks/workbook-dropdown-gallery-01.png" alt-text="Screenshot of workbook drop-down list in V M insights."::: This launches the workbook gallery with a number of prebuilt workbooks to help you get started. 7. Create a new workbook by selecting **New**. - ![Screenshot of workbook gallery](media/vminsights-workbooks/workbook-gallery-01.png) +:::image type="content" source="media/vminsights-workbooks/workbook-gallery-01.png" lightbox="media/vminsights-workbooks/workbook-gallery-01.png" alt-text="Screenshot of workbook gallery in V M insights."::: ## Editing workbook sections diff --git a/articles/azure-monitor/whats-new.md b/articles/azure-monitor/whats-new.md index 50ce16a09bd60..e39788cbc8b1d 100644 --- a/articles/azure-monitor/whats-new.md +++ b/articles/azure-monitor/whats-new.md @@ -510,7 +510,7 @@ This article lists significant changes to Azure Monitor documentation. **Updated articles** -- [Azure Data Explorer Insights](insights/data-explorer.md) +- [Azure Data Explorer Insights](/azure/data-explorer/data-explorer-insights) - [Agent Health solution in Azure Monitor](insights/solution-agenthealth.md) - [Monitoring solutions in Azure Monitor](insights/solutions.md) - [Monitor your SQL deployments with SQL Insights (preview)](insights/sql-insights-overview.md) diff --git a/articles/azure-netapp-files/TOC.yml b/articles/azure-netapp-files/TOC.yml index 1806379e293c3..95013e5253b24 100644 --- a/articles/azure-netapp-files/TOC.yml +++ b/articles/azure-netapp-files/TOC.yml @@ -31,6 +31,8 @@ href: monitor-azure-netapp-files.md - name: Metrics for Azure NetApp Files href: azure-netapp-files-metrics.md + - name: Azure Policy definitions for Azure NetApp Files + href: azure-policy-definitions.md - name: Storage service add-ons href: storage-service-add-ons.md - name: Cost model for Azure NetApp Files @@ -69,7 +71,7 @@ href: performance-linux-nfs-read-ahead.md - name: SMB performance best practices href: azure-netapp-files-smb-performance.md - - name: Azure virtual machine SKUs best practices + - name: Azure virtual machine SKUs' best practices href: performance-virtual-machine-sku.md - name: Application volume groups items: @@ -153,7 +155,7 @@ href: create-active-directory-connections.md - name: Modify Active Directory connections href: modify-active-directory-connections.md - - name: Enable ADDS LDAP authentication for NFS volumes + - name: Enable AD DS LDAP authentication for NFS volumes href: configure-ldap-over-tls.md - name: Manage capacity pools items: @@ -173,7 +175,7 @@ href: azure-netapp-files-configure-nfsv41-domain.md - name: Configure NFSv4.1 Kerberos encryption href: configure-kerberos-encryption.md - - name: Configure ADDS LDAP with extended groups for NFS + - name: Configure AD DS LDAP with extended groups for NFS href: configure-ldap-extended-groups.md - name: Configure an NFS client for Azure NetApp Files href: configure-nfs-clients.md @@ -343,6 +345,8 @@ href: https://azure.microsoft.com/support/legal/sla/netapp/ - name: Videos href: azure-netapp-files-videos.md + - name: Select an Azure data store for your application + href: https://docs.microsoft.com/azure/architecture/guide/technology-choices/data-store-decision-tree - name: Compare access with NFS to Azure Blob Storage, Azure Files, and Azure NetApp Files href: ../storage/common/nfs-comparison.md?toc=%2fazure%2fazure-netapp-files%2ftoc.json - name: Azure Files and Azure NetApp Files comparison diff --git a/articles/azure-netapp-files/azacsnap-installation.md b/articles/azure-netapp-files/azacsnap-installation.md index 8e5e54cd894a2..110f918d657ff 100644 --- a/articles/azure-netapp-files/azacsnap-installation.md +++ b/articles/azure-netapp-files/azacsnap-installation.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 02/05/2022 +ms.date: 06/01/2022 ms.author: phjensen --- @@ -215,6 +215,9 @@ This section explains how to enable communication with storage. Ensure the stora # [SAP HANA](#tab/sap-hana) +> [!IMPORTANT] +> If deploying to a centralized virtual machine, then it will need to have the SAP HANA client installed and set up so the AzAcSnap user can run `hdbsql` and `hdbuserstore` commands. The SAP HANA Client can downloaded from https://tools.hana.ondemand.com/#hanatools. + The snapshot tools communicate with SAP HANA and need a user with appropriate permissions to initiate and release the database save-point. The following example shows the setup of the SAP HANA v2 user and the `hdbuserstore` for communication to the SAP HANA database. diff --git a/articles/azure-netapp-files/azacsnap-preview.md b/articles/azure-netapp-files/azacsnap-preview.md index 7a1d7d969c265..8a479fe7ea7c5 100644 --- a/articles/azure-netapp-files/azacsnap-preview.md +++ b/articles/azure-netapp-files/azacsnap-preview.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: reference -ms.date: 03/07/2022 +ms.date: 06/01/2022 ms.author: phjensen --- @@ -341,7 +341,7 @@ The following example commands set up a user (AZACSNAP) in the Oracle database, 1. Copy the ZIP file to the target system (for example, the centralized virtual machine running AzAcSnap). - > [!NOTE] + > [!IMPORTANT] > If deploying to a centralized virtual machine, then it will need to have the Oracle instant client installed and set up so the AzAcSnap user can > run `sqlplus` commands. The Oracle Instant Client can downloaded from https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html. > In order for SQL\*Plus to run correctly, download both the required package (for example, Basic Light Package) and the optional SQL\*Plus tools package. diff --git a/articles/azure-netapp-files/azacsnap-release-notes.md b/articles/azure-netapp-files/azacsnap-release-notes.md index de71c9ab049fb..49e83acdee75f 100644 --- a/articles/azure-netapp-files/azacsnap-release-notes.md +++ b/articles/azure-netapp-files/azacsnap-release-notes.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: conceptual -ms.date: 03/08/2022 +ms.date: 05/24/2022 ms.author: phjensen --- @@ -20,6 +20,23 @@ ms.author: phjensen This page lists major changes made to AzAcSnap to provide new functionality or resolve defects. +## May-2022 + +### AzAcSnap v5.0.3 (Build: 20220524.14204) - Patch update to v5.0.2 + +AzAcSnap v5.0.3 (Build: 20220524.14204) is provided as a patch update to the v5.0 branch with the following fix: + +- Fix for handling delimited identifiers when querying SAP HANA. This issue only impacted SAP HANA in HSR-HA node when there is a Secondary node configured with 'logreplay_readaccss' and has been resolved. + +Download the [latest release](https://aka.ms/azacsnapinstaller) of the installer and review how to [get started](azacsnap-get-started.md). + +### AzAcSnap v5.1 Preview (Build: 20220524.15550) + +AzAcSnap v5.1 Preview (Build: 20220524.15550) is an updated build to extend the preview expiry date for 90 days. This update contains the fix for handling delimited identifiers when querying SAP HANA as provided in v5.0.3. + +Read about the [AzAcSnap Preview](azacsnap-preview.md). +Download the [latest release of the Preview installer](https://aka.ms/azacsnap-preview-installer). + ## Mar-2022 ### AzAcSnap v5.1 Preview (Build: 20220302.81795) @@ -29,11 +46,6 @@ AzAcSnap v5.1 Preview (Build: 20220302.81795) has been released with the followi - Azure Key Vault support for securely storing the Service Principal. - A new option for `-c backup --volume` which has the `all` parameter value. -Details of these new features are in the AzAcSnap Preview documentation. - -Read about the new features and how to use the [AzAcSnap Preview](azacsnap-preview.md). -Download the [latest release of the Preview installer](https://aka.ms/azacsnap-preview-installer). - ## Feb-2022 ### AzAcSnap v5.1 Preview (Build: 20220220.55340) @@ -69,8 +81,6 @@ AzAcSnap v5.0.2 (Build: 20210827.19086) is provided as a patch update to the v5. - Fix the installer's check for the location of the hdbuserstore. The installer would check for the existence of an incorrect source directory for the hdbuserstore for the user running the install - this is fixed to check for `~/.hdb`. This fix is applicable to systems (for example, Azure Large Instance) where the hdbuserstore was pre-configured for the `root` user before installing `azacsnap`. - Installer now shows the version it will install/extract (if the installer is run without any arguments). -Download the [latest release](https://aka.ms/azacsnapinstaller) of the installer and review how to [get started](azacsnap-get-started.md). - ## May-2021 ### AzAcSnap v5.0.1 (Build: 20210524.14837) - Patch update to v5.0 diff --git a/articles/azure-netapp-files/azure-netapp-files-solution-architectures.md b/articles/azure-netapp-files/azure-netapp-files-solution-architectures.md index 8af49d6401613..751909e8192fb 100644 --- a/articles/azure-netapp-files/azure-netapp-files-solution-architectures.md +++ b/articles/azure-netapp-files/azure-netapp-files-solution-architectures.md @@ -107,7 +107,7 @@ This section provides references to SAP on Azure solutions. ### SAP AnyDB -* [SAP System on Oracle Database on Azure - Azure Architecture Center](/azure/architecture/example-scenario/apps/sap-on-oracle) +* [SAP System on Oracle Database on Azure - Azure Architecture Center](/azure/architecture/example-scenario/apps/sap-production) * [Oracle Azure Virtual Machines DBMS deployment for SAP workload - Azure Virtual Machines](../virtual-machines/workloads/sap/dbms_guide_oracle.md#oracle-configuration-guidelines-for-sap-installations-in-azure-vms-on-linux) * [Deploy SAP AnyDB (Oracle 19c) with Azure NetApp Files](https://techcommunity.microsoft.com/t5/running-sap-applications-on-the/deploy-sap-anydb-oracle-19c-with-azure-netapp-files/ba-p/2064043) * [Manual Recovery Guide for SAP Oracle 19c on Azure VMs from Azure NetApp Files snapshot with AzAcSnap](https://techcommunity.microsoft.com/t5/running-sap-applications-on-the/manual-recovery-guide-for-sap-oracle-19c-on-azure-vms-from-azure/ba-p/3242408) diff --git a/articles/azure-netapp-files/azure-policy-definitions.md b/articles/azure-netapp-files/azure-policy-definitions.md new file mode 100644 index 0000000000000..30cff1c5b292a --- /dev/null +++ b/articles/azure-netapp-files/azure-policy-definitions.md @@ -0,0 +1,59 @@ +--- +title: Azure Policy definitions for Azure NetApp Files | Microsoft Docs +description: Describes the Azure Policy custom definitions and built-in definitions that you can use with Azure NetApp Files. +services: azure-netapp-files +documentationcenter: '' +author: b-hchen +manager: '' +editor: '' + +ms.assetid: +ms.service: azure-netapp-files +ms.workload: storage +ms.tgt_pltfrm: na +ms.devlang: na +ms.topic: conceptual +ms.date: 06/02/2022 +ms.author: anfdocs +--- +# Azure Policy definitions for Azure NetApp Files + +[Azure Policy](../governance/policy/overview.md) helps to enforce organizational standards and to assess compliance at-scale. Through its compliance dashboard, it provides an aggregated view to evaluate the overall state of the environment, with the ability to drill down to the per-resource, per-policy granularity. It also helps to bring your resources to compliance through bulk remediation for existing resources and automatic remediation for new resources. + +Common use cases for Azure Policy include implementing governance for resource consistency, regulatory compliance, security, cost, and management. Policy definitions for these common use cases are already available in your Azure environment as built-ins to help you get started. + +The process of [creating and implementing a policy in Azure Policy](../governance/policy/tutorials/create-and-manage.md) begins with creating a (built-in or custom) [policy definition](../governance/policy/overview.md#policy-definition). Every policy definition has conditions under which it's enforced. It also has a defined [***effect***](../governance/policy/concepts/effects.md) that takes place if the conditions are met. Azure NetApp Files is supported with both Azure Policy custom and built-in policy definitions. + +## Custom policy definitions + +Azure NetApp Files supports Azure Policy. You can integrate Azure NetApp Files with Azure Policy through [creating custom policy definitions](../governance/policy/tutorials/create-custom-policy-definition.md). You can find examples in [Enforce Snapshot Policies with Azure Policy](https://anfcommunity.com/2021/08/30/enforce-snapshot-policies-with-azure-policy/) and [Azure Policy now available for Azure NetApp Files](https://anfcommunity.com/2021/04/19/azure-policy-now-available-for-azure-netapp-files/). + +## Built-in policy definitions + +The Azure Policy built-in definitions for Azure NetApp Files enable organization admins to restrict creation of unsecure volumes or audit existing volumes. Each policy definition in Azure Policy has a single *effect*. That effect determines what happens when the policy rule is evaluated to match. + +The following effects of Azure Policy can be used with Azure NetApp Files: + +* *Deny* creation of non-compliant volumes +* *Audit* existing volumes for compliance +* *Disable* a policy definition + +The following Azure Policy built-in definitions are available for use with Azure NetApp Files: + +* *Azure NetApp Files volumes should not use NFSv3 protocol type.* + This policy definition disallows the use of the NFSv3 protocol type to prevent unsecure access to volumes. NFSv4.1 or NFSv4.1 with Kerberos protocol should be used to access NFS volumes to ensure data integrity and encryption. + +* *Azure NetApp Files volumes of type NFSv4.1 should use Kerberos data encryption.* + This policy definition allows only the use of Kerberos privacy (`krb5p`) security mode to ensure that data is encrypted. + +* *Azure NetApp Files volumes of type NFSv4.1 should use Kerberos data integrity or data privacy.* + This policy definition ensures that either Kerberos integrity (`krb5i`) or Kerberos privacy (`krb5p`) is selected to ensure data integrity and data privacy. + +* *Azure NetApp Files SMB volumes should use SMB3 encryption.* + This policy definition disallows the creation of SMB volumes without SMB3 encryption to ensure data integrity and data privacy. + +To learn how to assign a policy to resources and view compliance report, see [Assign the Policy](../storage/common/transport-layer-security-configure-minimum-version.md#assign-the-policy). + +## Next steps + +* [Azure Policy documentation](/azure/governance/policy/) diff --git a/articles/azure-netapp-files/configure-ldap-extended-groups.md b/articles/azure-netapp-files/configure-ldap-extended-groups.md index 741da3e818bd9..497299426d11a 100644 --- a/articles/azure-netapp-files/configure-ldap-extended-groups.md +++ b/articles/azure-netapp-files/configure-ldap-extended-groups.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 03/15/2022 +ms.date: 05/27/2022 ms.author: anfdocs --- # Enable Active Directory Domain Services (ADDS) LDAP authentication for NFS volumes @@ -24,7 +24,7 @@ Azure NetApp Files supports fetching of extended groups from the LDAP name servi When it’s determined that LDAP will be used for operations such as name lookup and fetching extended groups, the following process occurs: 1. Azure NetApp Files uses an LDAP client configuration to make a connection attempt to the ADDS/AADDS LDAP server that is specified in the [Azure NetApp Files AD configuration](create-active-directory-connections.md). -1. If the TCP connection over the defined ADDS/AADDS LDAP service port is successful, then the Azure NetApp Files LDAP client attempts to “bind” (log in) to the ADDS/AADDS LDAP server (domain controller) by using the defined credentials in the LDAP client configuration. +1. If the TCP connection over the defined ADDS/AADDS LDAP service port is successful, then the Azure NetApp Files LDAP client attempts to “bind” (sign in) to the ADDS/AADDS LDAP server (domain controller) by using the defined credentials in the LDAP client configuration. 1. If the bind is successful, then the Azure NetApp Files LDAP client uses the RFC 2307bis LDAP schema to make an LDAP search query to the ADDS/AADDS LDAP server (domain controller). The following information is passed to the server in the query: * [Base/user DN](configure-ldap-extended-groups.md#ldap-search-scope) (to narrow search scope) @@ -98,7 +98,7 @@ The following information is passed to the server in the query: ![Screenshot that shows Create a Volume page with LDAP option.](../media/azure-netapp-files/create-nfs-ldap.png) 7. Optional - You can enable local NFS client users not present on the Windows LDAP server to access an NFS volume that has LDAP with extended groups enabled. To do so, enable the **Allow local NFS users with LDAP** option as follows: - 1. Click **Active Directory connections**. On an existing Active Directory connection, click the context menu (the three dots `…`), and select **Edit**. + 1. Select **Active Directory connections**. On an existing Active Directory connection, select the context menu (the three dots `…`), and select **Edit**. 2. On the **Edit Active Directory settings** window that appears, select the **Allow local NFS users with LDAP** option. ![Screenshot that shows the Allow local NFS users with LDAP option](../media/azure-netapp-files/allow-local-nfs-users-with-ldap.png) @@ -119,5 +119,6 @@ The following information is passed to the server in the query: * [Create an NFS volume for Azure NetApp Files](azure-netapp-files-create-volumes.md) * [Create and manage Active Directory connections](create-active-directory-connections.md) * [Configure NFSv4.1 domain](azure-netapp-files-configure-nfsv41-domain.md#configure-nfsv41-domain) +* [Configure an NFS client for Azure NetApp Files](configure-nfs-clients.md) * [Troubleshoot volume errors for Azure NetApp Files](troubleshoot-volumes.md) * [Modify Active Directory connections for Azure NetApp Files](modify-active-directory-connections.md) diff --git a/articles/azure-netapp-files/configure-nfs-clients.md b/articles/azure-netapp-files/configure-nfs-clients.md index 70a6bd334bee0..d5eb98e6f8089 100644 --- a/articles/azure-netapp-files/configure-nfs-clients.md +++ b/articles/azure-netapp-files/configure-nfs-clients.md @@ -12,12 +12,12 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 09/22/2021 +ms.date: 05/27/2022 ms.author: anfdocs --- # Configure an NFS client for Azure NetApp Files -The NFS client configuration described in this article is part of the setup when you [configure NFSv4.1 Kerberos encryption](configure-kerberos-encryption.md) or [create a dual-protocol volume](create-volumes-dual-protocol.md). A wide variety of Linux distributions are available to use with Azure NetApp Files. This article describes configurations for two of the more commonly used environments: RHEL 8 and Ubuntu 18.04. +The NFS client configuration described in this article is part of the setup when you [configure NFSv4.1 Kerberos encryption](configure-kerberos-encryption.md) or [create a dual-protocol volume](create-volumes-dual-protocol.md) or [NFSv3/NFSv4.1 with LDAP](configure-ldap-extended-groups.md). A wide variety of Linux distributions are available to use with Azure NetApp Files. This article describes configurations for two of the more commonly used environments: RHEL 8 and Ubuntu 18.04. ## Requirements and considerations diff --git a/articles/azure-netapp-files/create-active-directory-connections.md b/articles/azure-netapp-files/create-active-directory-connections.md index e6d9635963b7c..d8956a0b0ca4a 100644 --- a/articles/azure-netapp-files/create-active-directory-connections.md +++ b/articles/azure-netapp-files/create-active-directory-connections.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 04/29/2022 +ms.date: 05/24/2022 ms.author: anfdocs --- # Create and manage Active Directory connections for Azure NetApp Files @@ -198,30 +198,11 @@ This setting is configured in the **Active Directory Connections** under **NetAp ![Active Directory AES encryption](../media/azure-netapp-files/active-directory-aes-encryption.png) - * **LDAP Signing** + * **LDAP Signing** Select this checkbox to enable LDAP signing. This functionality enables secure LDAP lookups between the Azure NetApp Files service and the user-specified [Active Directory Domain Services domain controllers](/windows/win32/ad/active-directory-domain-services). For more information, see [ADV190023 | Microsoft Guidance for Enabling LDAP Channel Binding and LDAP Signing](https://portal.msrc.microsoft.com/en-us/security-guidance/advisory/ADV190023). ![Active Directory LDAP signing](../media/azure-netapp-files/active-directory-ldap-signing.png) - The **LDAP Signing** feature is currently in preview. If this is your first time using this feature, register the feature before using it: - - ```azurepowershell-interactive - Register-AzProviderFeature -ProviderNamespace Microsoft.NetApp -FeatureName ANFLdapSigning - ``` - - Check the status of the feature registration: - - > [!NOTE] - > The **RegistrationState** may be in the `Registering` state for up to 60 minutes before changing to`Registered`. Wait until the status is `Registered` before continuing. - - ```azurepowershell-interactive - Get-AzProviderFeature -ProviderNamespace Microsoft.NetApp -FeatureName ANFLdapSigning - ``` - - You can also use [Azure CLI commands](/cli/azure/feature) `az feature register` and `az feature show` to register the feature and display the registration status. - - - * **LDAP over TLS** See [Enable Active Directory Domain Services (AD DS) LDAP authentication for NFS volumes](configure-ldap-over-tls.md) for information about this option. diff --git a/articles/azure-netapp-files/faq-integration.md b/articles/azure-netapp-files/faq-integration.md index cbfb2d1d2cd79..3e457668ea9bb 100644 --- a/articles/azure-netapp-files/faq-integration.md +++ b/articles/azure-netapp-files/faq-integration.md @@ -6,7 +6,7 @@ ms.workload: storage ms.topic: conceptual author: b-hchen ms.author: anfdocs -ms.date: 10/11/2021 +ms.date: 06/02/2022 --- # Integration FAQs for Azure NetApp Files @@ -20,11 +20,6 @@ You can mount Azure NetApp Files NFS volumes on AVS Windows VMs or Linux VMs. Yo Using Azure NetApp Files NFS or SMB volumes with AVS for *Guest OS mounts* is supported in [all AVS and ANF enabled regions](https://azure.microsoft.com/global-infrastructure/services/?products=azure-vmware,netapp). -## Does Azure NetApp Files work with Azure Policy? - -Yes. Azure NetApp Files is a first-party service. It fully adheres to Azure Resource Provider standards. As such, Azure NetApp Files can be integrated into Azure Policy via *custom policy definitions*. For information about how to implement custom policies for Azure NetApp Files, see -[Azure Policy now available for Azure NetApp Files](https://techcommunity.microsoft.com/t5/azure/azure-policy-now-available-for-azure-netapp-files/m-p/2282258) on Microsoft Tech Community. - ## Which Unicode Character Encoding is supported by Azure NetApp Files for the creation and display of file and directory names? Azure NetApp Files only supports file and directory names that are encoded with the UTF-8 Unicode Character Encoding format for both NFS and SMB volumes. diff --git a/articles/azure-netapp-files/snapshots-manage-policy.md b/articles/azure-netapp-files/snapshots-manage-policy.md index 9df053af3245c..133aefaca209d 100644 --- a/articles/azure-netapp-files/snapshots-manage-policy.md +++ b/articles/azure-netapp-files/snapshots-manage-policy.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 01/05/2022 +ms.date: 05/25/2022 ms.author: anfdocs --- @@ -24,13 +24,13 @@ ms.author: anfdocs A snapshot policy enables you to specify the snapshot creation frequency in hourly, daily, weekly, or monthly cycles. You also need to specify the maximum number of snapshots to retain for the volume. -1. From the NetApp Account view, click **Snapshot policy**. +1. From the NetApp Account view, select **Snapshot policy**. ![Screenshot that shows how to navigate to Snapshot Policy.](../media/azure-netapp-files/snapshot-policy-navigation.png) 2. In the Snapshot Policy window, set Policy State to **Enabled**. -3. Click the **Hourly**, **Daily**, **Weekly**, or **Monthly** tab to create hourly, daily, weekly, or monthly snapshot policies. Specify the **Number of snapshots to keep**. +3. Select the **Hourly**, **Daily**, **Weekly**, or **Monthly** tab to create hourly, daily, weekly, or monthly snapshot policies. Specify the **Number of snapshots to keep**. > [!IMPORTANT] > For *monthly* snapshot policy definition, be sure to specify a day that will work for all intended months. If you intend for the monthly snapshot configuration to work for all months in the year, pick a day of the month between 1 and 28. For example, if you specify `31` (day of the month), the monthly snapshot configuration is skipped for the months that have less than 31 days. @@ -53,7 +53,7 @@ A snapshot policy enables you to specify the snapshot creation frequency in hour ![Screenshot that shows the monthly snapshot policy.](../media/azure-netapp-files/snapshot-policy-monthly.png) -4. Click **Save**. +4. Select **Save**. If you need to create additional snapshot policies, repeat Step 3. The policies you created appear in the Snapshot policy page. @@ -70,33 +70,37 @@ You cannot apply a snapshot policy to a destination volume in cross-region repli ![Screenshot that shows the Volumes right-click menu.](../media/azure-netapp-files/volume-right-cick-menu.png) -2. In the Edit window, under **Snapshot policy**, select a policy to use for the volume. Click **OK** to apply the policy. +2. In the Edit window, under **Snapshot policy**, select a policy to use for the volume. Select **OK** to apply the policy. ![Screenshot that shows the Snapshot policy menu.](../media/azure-netapp-files/snapshot-policy-edit.png) ## Modify a snapshot policy -You can modify an existing snapshot policy to change the policy state, snapshot frequency (hourly, daily, weekly, or monthly), or number of snapshots to keep. +You can modify an existing snapshot policy to change the policy state, snapshot frequency (hourly, daily, weekly, or monthly), or number of snapshots to keep. + +When modifying a snapshot policy, snapshots created with an old schedule will not be deleted or overwritten by the new schedule or disable the schedule. If you proceed with the update, you will have to manually delete the old snapshots. -1. From the NetApp Account view, click **Snapshot policy**. +1. From the NetApp Account view, select **Snapshot policy**. 2. Right-click the snapshot policy you want to modify, then select **Edit**. ![Screenshot that shows the Snapshot policy right-click menu.](../media/azure-netapp-files/snapshot-policy-right-click-menu.png) -3. Make the changes in the Snapshot Policy window that appears, then click **Save**. +3. Make the changes in the Snapshot Policy window that appears, then select **Save**. + +4. You will receive a prompt asking you to confirm that you want to update the Snapshot Policy. Select **Yes** to confirm your choice. ## Delete a snapshot policy You can delete a snapshot policy that you no longer want to keep. -1. From the NetApp Account view, click **Snapshot policy**. +1. From the NetApp Account view, select **Snapshot policy**. 2. Right-click the snapshot policy you want to modify, then select **Delete**. ![Screenshot that shows the Delete menu item.](../media/azure-netapp-files/snapshot-policy-right-click-menu.png) -3. Click **Yes** to confirm that you want to delete the snapshot policy. +3. Select **Yes** to confirm that you want to delete the snapshot policy. ![Screenshot that shows snapshot policy delete confirmation.](../media/azure-netapp-files/snapshot-policy-delete-confirm.png) diff --git a/articles/azure-netapp-files/whats-new.md b/articles/azure-netapp-files/whats-new.md index 90d1e52a4742f..02b923e3f0587 100644 --- a/articles/azure-netapp-files/whats-new.md +++ b/articles/azure-netapp-files/whats-new.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: overview -ms.date: 05/18/2022 +ms.date: 06/07/2022 ms.author: anfdocs --- @@ -20,8 +20,24 @@ ms.author: anfdocs Azure NetApp Files is updated regularly. This article provides a summary about the latest new features and enhancements. +## June 2022 + +* [Azure NetApp Files datastores for Azure VMware Solution](../azure-vmware/attach-azure-netapp-files-to-azure-vmware-solution-hosts.md) (Preview) + + [Azure NetApp Files datastores for Azure VMware Solution](https://azure.microsoft.com/blog/power-your-file-storageintensive-workloads-with-azure-vmware-solution) is now in public preview. This new integration between Azure VMware Solution and Azure NetApp Files will enable you to [create datastores via the Azure VMware Solution resource provider with Azure NetApp Files NFS volumes](../azure-vmware/attach-azure-netapp-files-to-azure-vmware-solution-hosts.md) and mount the datastores on your private cloud clusters of choice. Along with the integration of Azure disk pools for Azure VMware Solution, this will provide more choice to scale storage needs independently of compute resources. For your storage-intensive workloads running on Azure VMware Solution, the integration with Azure NetApp Files helps to easily scale storage capacity and performance beyond the limits of native vSAN built on top of the AVS nodes and lower your overall total cost of ownership. + + Regional Coverage: Australia East, Australia Southeast, Brazil South, Canada Central, Canada East, Central US, East US, France Central, Germany West Central, Japan West, North Central US, North Europe, South Central US, Southeast Asia, Switzerland West, UK South, UK West, West US. Regional coverage will expand as the preview progresses. + +* [Azure Policy built-in definitions for Azure NetApp](azure-policy-definitions.md#built-in-policy-definitions) + + Azure Policy helps to enforce organizational standards and assess compliance at scale. Through its compliance dashboard, it provides an aggregated view to evaluate the overall state of the environment, with the ability to drill down to the per-resource, per-policy granularity. It also helps to bring your resources to compliance through bulk remediation for existing resources and automatic remediation for new resources. Azure NetApp Files already supports Azure Policy via custom policy definitions. Azure NetApp Files now also provides built-in policy to enable organization admins to restrict creation of unsecure NFS volumes or audit existing volumes more easily. + ## May 2022 +* [LDAP signing](create-active-directory-connections.md#ldap-signing) now generally available (GA) + + The LDAP signing feature is now generally available. You no longer need to register the feature before using it. + * [SMB Continuous Availability (CA) shares support for Citrix App Layering](enable-continuous-availability-existing-smb.md) (Preview) [Citrix App Layering](https://docs.citrix.com/en-us/citrix-app-layering/4.html) radically reduces the time it takes to manage Windows applications and images. App Layering separates the management of your OS and apps from your infrastructure. You can install each app and OS patch once, update the associated templates, and redeploy your images. You can publish layered images as open standard virtual disks, usable in any environment. App Layering can be used to provide dynamic access application layer virtual disks stored on SMB shared networked storage, including Azure NetApp Files. To enhance App Layering resiliency to events of storage service maintenance, Azure NetApp Files has extended support for [SMB Transparent Failover via SMB Continuous Availability (CA) shares on Azure NetApp Files](azure-netapp-files-create-volumes-smb.md#continuous-availability) for App Layering virtual disks. For more information, see [Azure NetApp Files Azure Virtual Desktop Infrastructure solutions | Citrix](azure-netapp-files-solution-architectures.md#citrix). diff --git a/articles/azure-percept/azure-percept-devkit-software-release-notes.md b/articles/azure-percept/azure-percept-devkit-software-release-notes.md index cf4d6de20fb42..aae5f0b23713c 100644 --- a/articles/azure-percept/azure-percept-devkit-software-release-notes.md +++ b/articles/azure-percept/azure-percept-devkit-software-release-notes.md @@ -15,6 +15,11 @@ This page provides information of changes and fixes for each Azure Percept DK OS To download the update images, refer to [Azure Percept DK software releases for USB cable update](./software-releases-usb-cable-updates.md) or [Azure Percept DK software releases for OTA update](./software-releases-over-the-air-updates.md). +## May (2205) Release + +- Operating System + - Latest security updates on BIND, Node.js, Cyrus SASL, libxml2, and OpenSSL packages. + ## March (2203) Release - Operating System diff --git a/articles/azure-percept/overview-ai-models.md b/articles/azure-percept/overview-ai-models.md index 60782947da64b..83bd210c84a44 100644 --- a/articles/azure-percept/overview-ai-models.md +++ b/articles/azure-percept/overview-ai-models.md @@ -26,7 +26,7 @@ With pre-trained models, no coding or training data collection is required. Simp ## Reference solutions -A [people counting reference solution](https://github.com/microsoft/Azure-Percept-Reference-Solutions/tree/main/people-detection-app) is also available. This reference solution is an open-source AI application providing edge-based people counting with user-defined zone entry/exit events. Video and AI output from the on-premise edge device is egressed to [Azure Data Lake](https://azure.microsoft.com/solutions/data-lake/), with the user interface running as an Azure website. AI inferencing is provided by an open-source AI model for people detection. +A people counting reference solution is also available. This reference solution is an open-source AI application providing edge-based people counting with user-defined zone entry/exit events. Video and AI output from the on-premise edge device is egressed to [Azure Data Lake](https://azure.microsoft.com/solutions/data-lake/), with the user interface running as an Azure website. AI inferencing is provided by an open-source AI model for people detection. :::image type="content" source="./media/overview-ai-models/people-detector.gif" alt-text="Spatial analytics pre-built solution gif."::: diff --git a/articles/azure-percept/software-releases-usb-cable-updates.md b/articles/azure-percept/software-releases-usb-cable-updates.md index fc2ea5ab8e6e4..7b4ea5e7e2d05 100644 --- a/articles/azure-percept/software-releases-usb-cable-updates.md +++ b/articles/azure-percept/software-releases-usb-cable-updates.md @@ -23,7 +23,7 @@ This page provides information and download links for all the dev kit OS/firmwar ## Latest releases - **Latest service release** -March Service Release (2203): [Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip]() +May Service Release (2205): [Azure-Percept-DK-1.0.20220511.1756-public_preview_1.0.zip]() - **Latest major update or known stable version** Feature Update (2104): [Azure-Percept-DK-1.0.20210409.2055.zip](https://download.microsoft.com/download/6/4/d/64d53e60-f702-432d-a446-007920a4612c/Azure-Percept-DK-1.0.20210409.2055.zip) @@ -31,6 +31,7 @@ Feature Update (2104): [Azure-Percept-DK-1.0.20210409.2055.zip](https://download |Release|Download Links|Note| |---|---|:---:| +|May Service Release (2205)|[Azure-Percept-DK-1.0.20220511.1756-public_preview_1.0.zip]()|| |March Service Release (2203)|[Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip]()|| |February Service Release (2202)|[Azure-Percept-DK-1.0.20220209.1156-public_preview_1.0.zip]()|| |January Service Release (2201)|[Azure-Percept-DK-1.0.20220112.1519-public_preview_1.0.zip]()|| diff --git a/articles/azure-percept/voice-control-your-inventory-then-visualize-with-power-bi-dashboard.md b/articles/azure-percept/voice-control-your-inventory-then-visualize-with-power-bi-dashboard.md index a7ba7e62d24c5..b6cbec6b79d1f 100644 --- a/articles/azure-percept/voice-control-your-inventory-then-visualize-with-power-bi-dashboard.md +++ b/articles/azure-percept/voice-control-your-inventory-then-visualize-with-power-bi-dashboard.md @@ -38,7 +38,7 @@ In this tutorial, you learn how to: - The [Azure Functions Core Tools](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/azure-functions/functions-run-local.md) version 3.x. - The [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) for Visual Studio Code. - The [Azure Functions extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-azurefunctions) for Visual Studio Code. -- Create an [Azure SQL server](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/azure-sql/database/single-database-create-quickstart.md) +- Create an [Azure SQL server](/azure/azure-sql/database/single-database-create-quickstart) ## Software architecture diff --git a/articles/azure-portal/get-subscription-tenant-id.md b/articles/azure-portal/get-subscription-tenant-id.md index a4e7f328b6c64..b7be78e6363bc 100644 --- a/articles/azure-portal/get-subscription-tenant-id.md +++ b/articles/azure-portal/get-subscription-tenant-id.md @@ -21,7 +21,7 @@ Follow these steps to retrieve the ID for a subscription in the Azure portal. 1. Copy the **Subscription ID**. You can paste this value into a text document or other location. > [!TIP] -> You can also list your subscriptions and view their IDs programmatically by using [Get-AzSubscription](/powershell/module/az.accounts/get-azsubscription?view=latest) (Azure PowerShell) or [az account list](/cli/azure/account?view=azure-cli-latest) (Azure CLI). +> You can also list your subscriptions and view their IDs programmatically by using [Get-AzSubscription](/powershell/module/az.accounts/get-azsubscription?view=latest&preserve-view=true) (Azure PowerShell) or [az account list](/cli/azure/account?view=azure-cli-latest&preserve-view=true) (Azure CLI). ## Find your Azure AD tenant diff --git a/articles/azure-portal/set-preferences.md b/articles/azure-portal/set-preferences.md index 4a165495c795a..df32f4fa936cf 100644 --- a/articles/azure-portal/set-preferences.md +++ b/articles/azure-portal/set-preferences.md @@ -1,7 +1,7 @@ --- title: Manage Azure portal settings and preferences description: Change Azure portal settings such as default subscription/directory, timeouts, menu mode, contrast, theme, notifications, language/region and more. -ms.date: 03/23/2022 +ms.date: 06/01/2022 ms.topic: how-to --- @@ -61,7 +61,7 @@ To create a new filter, select **Create a filter**. You can create up to ten fil Each filter must have a unique name that is between 8 and 50 characters long and contains only letters, numbers, and hyphens. -:::image type="content" source="media/set-preferences/azure-portal-settings-filtering-create.png" alt-text="Screenshot showing the Create a filter options."::: +:::image type="content" source="media/set-preferences/azure-portal-settings-filtering-create.png" alt-text="Screenshot showing options for Create a filter."::: After you've named your filter, enter at least one condition. In the **Filter type** field, select either **Subscription name**, **Subscription ID**, or **Subscription state**. Then select an operator and enter a value to filter on. @@ -97,6 +97,12 @@ The theme that you choose affects the background and font colors that appear in Alternatively, you can choose a theme from the **High contrast theme** section. These themes can make the Azure portal easier to read, especially if you have a visual impairment. Selecting either the white or black high-contrast theme will override any other theme selections. +### Focus navigation + +Choose whether or not to enable focus navigation. + +If enabled, only one screen at a time will be visible as you step through a process in the portal. If disabled, as you move through the steps of a process, you'll be able to move between them through a horizontal scroll bar. + ### Startup page Choose one of the following options for the page you'll see when you first sign in to the Azure portal. @@ -109,7 +115,7 @@ Choose one of the following options for the page you'll see when you first sign Choose one of the following options for the directory to work in when you first sign in to the Azure portal. - **Sign in to your last visited directory**: When you sign in to the Azure portal, you'll start in whichever directory you'd been working in last time. -- **Select a directory**: Choose this option to select one of your directory. You'll start in that directory every time you sign in to the Azure portal, even if you had been working in a different directory last time. +- **Select a directory**: Choose this option to select one of your directories. You'll start in that directory every time you sign in to the Azure portal, even if you had been working in a different directory last time. :::image type="content" source="media/set-preferences/azure-portal-settings-startup-views.png" alt-text="Screenshot showing the Startup section of Appearance + startup views."::: @@ -210,7 +216,7 @@ To confirm that the inactivity timeout policy is set correctly, select **Notific ### Enable or disable pop-up notifications -Notifications are system messages related to your current session. They provide information such as showing your current credit balance, confirming your last action, or letting you know when resources you created become . When pop-up notifications are turned on, the messages briefly display in the top corner of your screen. +Notifications are system messages related to your current session. They provide information such as showing your current credit balance, confirming your last action, or letting you know when resources you created become available. When pop-up notifications are turned on, the messages briefly display in the top corner of your screen. To enable or disable pop-up notifications, select or clear **Enable pop-up notifications**. diff --git a/articles/azure-portal/supportability/how-to-create-azure-support-request.md b/articles/azure-portal/supportability/how-to-create-azure-support-request.md index 4963f3fbee1fe..c2d6a202046b3 100644 --- a/articles/azure-portal/supportability/how-to-create-azure-support-request.md +++ b/articles/azure-portal/supportability/how-to-create-azure-support-request.md @@ -3,7 +3,7 @@ title: How to create an Azure support request description: Customers who need assistance can use the Azure portal to find self-service solutions and to create and manage support requests. ms.topic: how-to ms.custom: support-help-page -ms.date: 02/01/2022 +ms.date: 06/02/2022 --- # Create an Azure support request @@ -14,7 +14,6 @@ Azure enables you to create and manage support requests, also known as support t > The Azure portal URL is specific to the Azure cloud where your organization is deployed. > >- Azure portal for commercial use is: [https://portal.azure.com](https://portal.azure.com) ->- Azure portal for Germany is: [https://portal.microsoftazure.de](https://portal.microsoftazure.de) >- Azure portal for the United States government is: [https://portal.azure.us](https://portal.azure.us) Azure provides unlimited support for subscription management, which includes billing, quota adjustments, and account transfers. For technical support, you need a support plan. For more information, see [Compare support plans](https://azure.microsoft.com/support/plans). @@ -25,7 +24,7 @@ You can get to **Help + support** in the Azure portal. It's available from the A ### Azure role-based access control -To create a support request, you must have the [Owner](../../role-based-access-control/built-in-roles.md#owner), [Contributor](../../role-based-access-control/built-in-roles.md#contributor), [Support Request Contributor](../../role-based-access-control/built-in-roles.md#support-request-contributor) role, or a custom role with [Microsoft.Support/*](../../role-based-access-control/resource-provider-operations.md#microsoftsupport), at the subscription level. +You must have the appropriate access to a subscription before you can create a support request for it. This means you must have the [Owner](../../role-based-access-control/built-in-roles.md#owner), [Contributor](../../role-based-access-control/built-in-roles.md#contributor), or [Support Request Contributor](../../role-based-access-control/built-in-roles.md#support-request-contributor) role, or a custom role with [Microsoft.Support/*](../../role-based-access-control/resource-provider-operations.md#microsoftsupport), at the subscription level. To create a support request without a subscription, for example an Azure Active Directory scenario, you must be an [Admin](../../active-directory/roles/permissions-reference.md). @@ -63,7 +62,9 @@ We'll walk you through some steps to gather information about your problem and h The first step of the support request process is to select an issue type. You'll then be prompted for more information, which can vary depending on what type of issue you selected. If you select **Technical**, you'll need to specify the service that your issue relates to. Depending on the service, you'll see additional options for **Problem type** and **Problem subtype**. > [!IMPORTANT] -> In most cases, you'll need to specify a subscription. Choose the subscription where you are experiencing the problem. The support engineer assigned to your case will be able to access the subscription you specify here. You can tell them about additional subscriptions in your description (or by [sending a message](how-to-manage-azure-support-request.md#send-a-message) later), but the support engineer will only be able to work on [subscriptions to which you have access](#azure-role-based-access-control). +> In most cases, you'll need to specify a subscription. Be sure to choose the subscription where you are experiencing the problem. The support engineer assigned to your case will only be able to access resources in the subscription you specify. The access requirement serves as a point of confirmation that the support engineer is sharing information to the right audience, which is a key factor for ensuring the security and privacy of customer data. For details on how Azure treats customer data, see [Data Privacy in the Trusted Cloud](https://azure.microsoft.com/overview/trusted-cloud/privacy/). +> +> If the issue applies to multiple subscriptions, you can mention additional subscriptions in your description, or by [sending a message](how-to-manage-azure-support-request.md#send-a-message) later. However, the support engineer will only be able to work on [subscriptions to which you have access](#azure-role-based-access-control). If you don't have the required access for a subscription, we won't be able to work on it as part of your request. :::image type="content" source="media/how-to-create-azure-support-request/basics2lower.png" alt-text="Screenshot of the Problem description step of the support request process."::: diff --git a/articles/azure-relay/ip-firewall-virtual-networks.md b/articles/azure-relay/ip-firewall-virtual-networks.md index 283c78eb2d390..9f368a4fa240f 100644 --- a/articles/azure-relay/ip-firewall-virtual-networks.md +++ b/articles/azure-relay/ip-firewall-virtual-networks.md @@ -11,10 +11,6 @@ By default, Relay namespaces are accessible from internet as long as the request This feature is helpful in scenarios in which Azure Relay should be only accessible from certain well-known sites. Firewall rules enable you to configure rules to accept traffic originating from specific IPv4 addresses. For example, if you use Relay with [Azure Express Route](../expressroute/expressroute-faqs.md#supported-services), you can create a **firewall rule** to allow traffic from only your on-premises infrastructure IP addresses. -> [!IMPORTANT] -> This feature is currently in preview. - - ## Enable IP firewall rules The IP firewall rules are applied at the namespace level. Therefore, the rules apply to all connections from clients using any supported protocol. Any connection attempt from an IP address that does not match an allowed IP rule on the namespace is rejected as unauthorized. The response does not mention the IP rule. IP filter rules are applied in order, and the first rule that matches the IP address determines the accept or reject action. diff --git a/articles/azure-resource-manager/bicep/bicep-functions-resource.md b/articles/azure-resource-manager/bicep/bicep-functions-resource.md index 9a6c0f152f0b0..f4b7ded05caa1 100644 --- a/articles/azure-resource-manager/bicep/bicep-functions-resource.md +++ b/articles/azure-resource-manager/bicep/bicep-functions-resource.md @@ -271,7 +271,7 @@ The possible uses of `list*` are shown in the following table. | Microsoft.ApiManagement/service/namedValues | [listValue](/rest/api/apimanagement/current-ga/named-value/list-value) | | Microsoft.ApiManagement/service/openidConnectProviders | [listSecrets](/rest/api/apimanagement/current-ga/openid-connect-provider/list-secrets) | | Microsoft.ApiManagement/service/subscriptions | [listSecrets](/rest/api/apimanagement/current-ga/subscription/list-secrets) | -| Microsoft.AppConfiguration/configurationStores | [ListKeys](/rest/api/appconfiguration/configurationstores/listkeys) | +| Microsoft.AppConfiguration/configurationStores | [ListKeys](/rest/api/appconfiguration/stable/configuration-stores/list-keys) | | Microsoft.AppPlatform/Spring | [listTestKeys](/rest/api/azurespringapps/services/list-test-keys) | | Microsoft.Automation/automationAccounts | [listKeys](/rest/api/automation/keys/listbyautomationaccount) | | Microsoft.Batch/batchAccounts | [listkeys](/rest/api/batchmanagement/batchaccount/getkeys) | diff --git a/articles/azure-resource-manager/bicep/toc.yml b/articles/azure-resource-manager/bicep/toc.yml index 7e5dddf9ec7e5..8d087cc588e97 100644 --- a/articles/azure-resource-manager/bicep/toc.yml +++ b/articles/azure-resource-manager/bicep/toc.yml @@ -69,10 +69,16 @@ href: ../../hdinsight/kafka/apache-kafka-quickstart-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: HDInsight - Spark href: ../../hdinsight/spark/apache-spark-jupyter-spark-use-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json + - name: Stream Analytics + href: ../../stream-analytics/quick-create-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json + - name: Azure Synapse Analytics - Dedicated SQL pool + href: ../../synapse-analytics/sql-data-warehouse/quickstart-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Compute items: - name: Batch href: ../../batch/quick-create-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json + - name: Functions + href: ../../azure-functions/functions-create-first-function-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Linux virtual machine href: ../../virtual-machines/linux/quick-create-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Windows virtual machine @@ -83,6 +89,8 @@ href: ../../container-instances/container-instances-quickstart-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Databases items: + - name: Cache for Redis + href: ../../azure-cache-for-redis/cache-redis-cache-bicep-provision.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Cosmos DB href: ../../cosmos-db/sql/quick-create-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Database for MariaDB @@ -95,6 +103,8 @@ href: ../../dms/create-dms-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: SQL Database href: /azure/azure-sql/database/single-database-create-bicep-quickstart + - name: SQL Managed Instance + href: /azure/azure-sql/managed-instance/create-bicep-quickstart - name: DevOps items: - name: App Configuration @@ -155,6 +165,10 @@ items: - name: API Management href: ../../api-management/quickstart-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json + - name: Notification Hubs + href: ../../notification-hubs/create-notification-hub-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json + - name: Redis Web App + href: ../../azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: SignalR Service href: ../../azure-signalr/signalr-quickstart-azure-signalr-service-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Concepts diff --git a/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-create.md b/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-create.md index 9865725e43016..2b6f0cfe18f51 100644 --- a/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-create.md +++ b/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-create.md @@ -3,7 +3,7 @@ title: Create and use a custom provider description: This tutorial shows how to create and use an Azure Custom Provider. Use custom providers to change workflows on Azure. author: jjbfour ms.topic: tutorial -ms.date: 06/19/2019 +ms.date: 05/06/2022 ms.author: jobreen ms.custom: devx-track-azurecli --- @@ -70,7 +70,7 @@ You can deploy the previous custom provider by using an Azure Resource Manager t ```JSON { - "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "$schema": "http://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": [ { @@ -101,11 +101,11 @@ You can deploy the previous custom provider by using an Azure Resource Manager t ## Use custom actions and resources -After you create a custom provider, you can use the new Azure APIs. The following tabs explain how to call and use a custom provider. +After you create a custom provider, you can use the new Azure APIs. The following sections explain how to call and use a custom provider. ### Custom actions -# [Azure CLI](#tab/azure-cli) +#### Azure CLI > [!NOTE] > You must replace the `{subscriptionId}` and `{resourceGroupName}` placeholders with the subscription and resource group of where you deployed the custom provider. @@ -121,15 +121,9 @@ az resource invoke-action --action myCustomAction \ Parameter | Required | Description ---|---|--- -*action* | Yes | The name of the action defined in the custom provider -*ids* | Yes | The resource ID of the custom provider -*request-body* | No | The request body that will be sent to the endpoint - -# [Template](#tab/template) - -None. - ---- +*action* | Yes | The name of the action defined in the custom provider. +*ids* | Yes | The resource ID of the custom provider. +*request-body* | No | The request body that will be sent to the endpoint. ### Custom resources @@ -184,7 +178,7 @@ A sample Resource Manager template: ```JSON { - "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "$schema": "http://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": [ { @@ -215,5 +209,5 @@ Parameter | Required | Description In this article, you learned about custom providers. For more information, see: -- [How to: Adding custom actions to Azure REST API](./custom-providers-action-endpoint-how-to.md) -- [How to: Adding custom resources to Azure REST API](./custom-providers-resources-endpoint-how-to.md) +- [How to: Add custom actions to Azure REST API](./custom-providers-action-endpoint-how-to.md) +- [How to: Add custom resources to Azure REST API](./custom-providers-resources-endpoint-how-to.md) diff --git a/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-authoring.md b/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-authoring.md index 6928d99f269fb..20518bd5ba8bd 100644 --- a/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-authoring.md +++ b/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-authoring.md @@ -3,7 +3,7 @@ title: Author a RESTful endpoint description: This tutorial shows how to author a RESTful endpoint for custom providers. It details how to handle requests and responses for the supported RESTful HTTP methods. author: jjbfour ms.topic: tutorial -ms.date: 01/13/2021 +ms.date: 05/06/2022 ms.author: jobreen --- @@ -24,7 +24,7 @@ In this tutorial, you update the function app to work as a RESTful endpoint for - **POST**: Trigger an action - **GET (collection)**: List all existing resources - For this tutorial, you use Azure Table storage. But any database or storage service can work. + For this tutorial, you use Azure Table storage, but any database or storage service works. ## Partition custom resources in storage @@ -36,7 +36,7 @@ The following example shows an `x-ms-customproviders-requestpath` header for a c X-MS-CustomProviders-RequestPath: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomProviders/resourceProviders/{resourceProviderName}/{myResourceType}/{myResourceName} ``` -Based on the example's `x-ms-customproviders-requestpath` header, you can create the *partitionKey* and *rowKey* parameters for your storage as shown in the following table: +Based on the `x-ms-customproviders-requestpath` header, you can create the *partitionKey* and *rowKey* parameters for your storage as shown in the following table: Parameter | Template | Description ---|---|--- @@ -60,6 +60,7 @@ public class CustomResource : ITableEntity public ETag ETag { get; set; } } ``` + **CustomResource** is a simple, generic class that accepts any input data. It's based on **ITableEntity**, which is used to store data. The **CustomResource** class implements all properties from interface **ITableEntity**: **timestamp**, **eTag**, **partitionKey**, and **rowKey**. ## Support custom provider RESTful methods @@ -93,7 +94,7 @@ public static async Task TriggerCustomAction(HttpRequestMes } ``` -The **TriggerCustomAction** method accepts an incoming request and simply echoes back the response with a status code. +The **TriggerCustomAction** method accepts an incoming request and echoes back the response with a status code. ### Create a custom resource @@ -134,7 +135,7 @@ public static async Task CreateCustomResource(HttpRequestMe } ``` -The **CreateCustomResource** method updates the incoming request to include the Azure-specific fields **id**, **name**, and **type**. These fields are top-level properties used by services across Azure. They let the custom provider interoperate with other services like Azure Policy, Azure Resource Manager Templates, and Azure Activity Log. +The **CreateCustomResource** method updates the incoming request to include the Azure-specific fields **id**, **name**, and **type**. These fields are top-level properties used by services across Azure. They let the custom provider interoperate with other services like Azure Policy, Azure Resource Manager templates, and Azure Activity Log. Property | Example | Description ---|---|--- diff --git a/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-setup.md b/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-setup.md index f7da7ebc0e41d..048221056604d 100644 --- a/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-setup.md +++ b/articles/azure-resource-manager/custom-providers/tutorial-custom-providers-function-setup.md @@ -3,11 +3,11 @@ title: Set up Azure Functions description: This tutorial goes over how to create a function app in Azure Functions and set it up to work with Azure Custom Providers. author: jjbfour ms.topic: tutorial -ms.date: 06/19/2019 +ms.date: 05/06/2022 ms.author: jobreen --- -# Set up Azure Functions for Azure Custom Providers +# Set up Azure Functions for custom providers A custom provider is a contract between Azure and an endpoint. With custom providers, you can change workflows in Azure. This tutorial shows how to set up a function app in Azure Functions to work as a custom provider endpoint. @@ -16,7 +16,7 @@ A custom provider is a contract between Azure and an endpoint. With custom provi > [!NOTE] > In this tutorial, you create a simple service endpoint that uses a function app in Azure Functions. However, a custom provider can use any publicly accessible endpoint. Alternatives include Azure Logic Apps, Azure API Management, and the Web Apps feature of Azure App Service. -To start this tutorial, you should first follow the tutorial [Create your first function app in the Azure portal](../../azure-functions/functions-get-started.md). That tutorial creates a .NET core webhook function that can be modified in the Azure portal. It is also the foundation for the current tutorial. +To start this tutorial, you should first follow the tutorial [Create your first function app in the Azure portal](../../azure-functions/functions-get-started.md). That tutorial creates a .NET core webhook function that can be modified in the Azure portal. It's also the foundation for the current tutorial. ## Install Azure Table storage bindings @@ -26,8 +26,8 @@ To install the Azure Table storage bindings: 1. Select **+ New Input**. 1. Select **Azure Table Storage**. 1. Install the Microsoft.Azure.WebJobs.Extensions.Storage extension if it isn't already installed. -1. In the **Table parameter name** box, enter **tableStorage**. -1. In the **Table name** box, enter **myCustomResources**. +1. In the **Table parameter name** box, enter *tableStorage*. +1. In the **Table name** box, enter *myCustomResources*. 1. Select **Save** to save the updated input parameter. ![Custom provider overview showing table bindings](./media/create-custom-provider/azure-functions-table-bindings.png) @@ -44,7 +44,7 @@ To set up the Azure function to include the custom provider RESTful request meth ## Add Azure Resource Manager NuGet packages > [!NOTE] -> If your C# project file is missing from the project directory, you can add it manually. Or it will appear after the Microsoft.Azure.WebJobs.Extensions.Storage extension is installed on the function app. +> If your C# project file is missing from the project directory, you can add it manually, or it will appear after the Microsoft.Azure.WebJobs.Extensions.Storage extension is installed on the function app. Next, update the C# project file to include helpful NuGet libraries. These libraries make it easier to parse incoming requests from custom providers. Follow the steps to [add extensions from the portal](../../azure-functions/functions-bindings-register.md) and update the C# project file to include the following package references: @@ -72,6 +72,6 @@ The following XML element is an example C# project file: ## Next steps -In this tutorial, you set up a function app in Azure Functions to work as an Azure custom provider endpoint. +In this tutorial, you set up a function app in Azure Functions to work as an Azure Custom Provider endpoint. To learn how to author a RESTful custom provider endpoint, see [Tutorial: Authoring a RESTful custom provider endpoint](./tutorial-custom-providers-function-authoring.md). diff --git a/articles/azure-resource-manager/custom-providers/tutorial-resource-onboarding.md b/articles/azure-resource-manager/custom-providers/tutorial-resource-onboarding.md index 2b5de01b926a7..6f4dd6df5046f 100644 --- a/articles/azure-resource-manager/custom-providers/tutorial-resource-onboarding.md +++ b/articles/azure-resource-manager/custom-providers/tutorial-resource-onboarding.md @@ -1,36 +1,36 @@ --- -title: Tutorial - resource onboarding +title: Extend resources with custom providers description: Resource onboarding through custom providers allows you to manipulate and extend existing Azure resources. ms.topic: tutorial ms.author: jobreen author: jjbfour -ms.date: 09/17/2019 +ms.date: 05/06/2022 --- -# Tutorial: Resource onboarding with Azure Custom Providers +# Extend resources with custom providers -In this tutorial, you'll deploy to Azure a custom resource provider that extends the Azure Resource Manager API with the Microsoft.CustomProviders/associations resource type. The tutorial shows how to extend existing resources that are outside the resource group where the custom provider instance is located. In this tutorial, the custom resource provider is powered by an Azure logic app, but you can use any public API endpoint. +In this tutorial, you deploy a custom resource provider to Azure that extends the Azure Resource Manager API with the Microsoft.CustomProviders/associations resource type. The tutorial shows how to extend existing resources that are outside the resource group where the custom provider instance is located. In this tutorial, the custom resource provider is powered by an Azure logic app, but you can use any public API endpoint. ## Prerequisites -To complete this tutorial, you need to know: +To complete this tutorial, make sure you review the following: * The capabilities of [Azure Custom Providers](overview.md). * Basic information about [resource onboarding with custom providers](concepts-resource-onboarding.md). ## Get started with resource onboarding -In this tutorial, there are two pieces that need to be deployed: the custom provider and the association. To make the process easier, you can optionally use a single template that deploys both. +In this tutorial, there are two pieces that need to be deployed: **the custom provider** and **the association**. To make the process easier, you can optionally use a single template that deploys both. The template will use these resources: -* Microsoft.CustomProviders/resourceProviders -* Microsoft.Logic/workflows -* Microsoft.CustomProviders/associations +* [Microsoft.CustomProviders/resourceProviders](/azure/templates/microsoft.customproviders/resourceproviders) +* [Microsoft.Logic/workflows](/azure/templates/microsoft.logic/workflows) +* [Microsoft.CustomProviders/associations](/azure/templates/microsoft.customproviders/associations) ```json { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { @@ -76,7 +76,7 @@ The template will use these resources: "resources": [ { "type": "Microsoft.Resources/deployments", - "apiVersion": "2017-05-10", + "apiVersion": "2021-04-01", "condition": "[empty(parameters('customResourceProviderId'))]", "name": "customProviderInfrastructureTemplate", "properties": { @@ -93,7 +93,7 @@ The template will use these resources: "resources": [ { "type": "Microsoft.Logic/workflows", - "apiVersion": "2017-07-01", + "apiVersion": "2019-05-01", "name": "[parameters('logicAppName')]", "location": "[parameters('location')]", "properties": { @@ -160,7 +160,7 @@ The template will use these resources: "name": "associations", "mode": "Secure", "routingType": "Webhook,Cache,Extension", - "endpoint": "[[listCallbackURL(concat(resourceId('Microsoft.Logic/workflows', parameters('logicAppName')), '/triggers/CustomProviderWebhook'), '2017-07-01').value]" + "endpoint": "[[listCallbackURL(concat(resourceId('Microsoft.Logic/workflows', parameters('logicAppName')), '/triggers/CustomProviderWebhook'), '2019-05-01').value]" } ] } @@ -202,7 +202,7 @@ The template will use these resources: The first part of the template deploys the custom provider infrastructure. This infrastructure defines the effect of the associations resource. If you're not familiar with custom providers, see [Custom provider basics](overview.md). -Let's deploy the custom provider infrastructure. Either copy, save, and deploy the preceding template, or follow along and deploy the infrastructure by using the Azure portal. +Let's deploy the custom provider infrastructure. Either copy, save, and deploy the preceding template, or follow along and deploy the infrastructure using the Azure portal. 1. Go to the [Azure portal](https://portal.azure.com). @@ -214,7 +214,7 @@ Let's deploy the custom provider infrastructure. Either copy, save, and deploy t ![Select Add](media/tutorial-resource-onboarding/templatesadd.png) -4. Under **General**, enter a **Name** and **Description** for the new template: +4. Under **General**, enter a *Name* and *Description* for the new template: ![Template name and description](media/tutorial-resource-onboarding/templatesdescription.png) @@ -258,13 +258,13 @@ Let's deploy the custom provider infrastructure. Either copy, save, and deploy t After you have the custom provider infrastructure set up, you can easily deploy more associations. The resource group for additional associations doesn't have to be the same as the resource group where you deployed the custom provider infrastructure. To create an association, you need to have Microsoft.CustomProviders/resourceproviders/write permissions on the specified Custom Resource Provider ID. -1. Go to the custom provider **Microsoft.CustomProviders/resourceProviders** resource in the resource group of the previous deployment. You'll need to select the **Show hidden types** check box: +1. Go to the custom provider **Microsoft.CustomProviders/resourceProviders** resource in the resource group of the previous deployment. You need to select the **Show hidden types** check box: ![Go to the resource](media/tutorial-resource-onboarding/showhidden.png) 2. Copy the Resource ID property of the custom provider. -3. Search for **templates** in **All Services** or by using the main search box: +3. Search for *templates* in **All Services** or by using the main search box: ![Search for templates](media/tutorial-resource-onboarding/templates.png) @@ -278,9 +278,11 @@ After you have the custom provider infrastructure set up, you can easily deploy ![New associations resource](media/tutorial-resource-onboarding/createdassociationresource.png) -If you want, you can go back to the logic app **Run history** and see that another call was made to the logic app. You can update the logic app to augment additional functionality for each created association. +You can go back to the logic app **Run history** and see that another call was made to the logic app. You can update the logic app to augment additional functionality for each created association. -## Getting help +## Next steps -If you have questions about Azure Custom Providers, try asking them on [Stack Overflow](https://stackoverflow.com/questions/tagged/azure-custom-providers). A similar question might have already been answered, so check first before posting. Add the tag `azure-custom-providers` to get a fast response! +In this article, you deployed a custom resource provider to Azure that extends the Azure Resource Manager API with the Microsoft.CustomProviders/associates resource type. To continue learning about custom providers, see: +* [Deploy associations for a custom provider using Azure Policy](./concepts-built-in-policy.md) +* [Azure Custom Providers resource onboarding overview](./concepts-resource-onboarding.md) diff --git a/articles/azure-resource-manager/management/extension-resource-types.md b/articles/azure-resource-manager/management/extension-resource-types.md index 2554a358c328c..1b0d0d7ae7edf 100644 --- a/articles/azure-resource-manager/management/extension-resource-types.md +++ b/articles/azure-resource-manager/management/extension-resource-types.md @@ -2,7 +2,7 @@ title: Extension resource types description: Lists the Azure resource types are used to extend the capabilities of other resource types. ms.topic: conceptual -ms.date: 04/20/2022 +ms.date: 06/03/2022 --- # Resource types that extend capabilities of other resources @@ -71,8 +71,6 @@ An extension resource is a resource that adds to another resource's capabilities * artifactSetDefinitions * artifactSetSnapshots -* chaosProviderConfigurations -* chaosTargets * targets ## Microsoft.Consumption @@ -131,12 +129,6 @@ An extension resource is a resource that adds to another resource's capabilities * backupInstances -## Microsoft.Diagnostics - -* apollo -* insights -* solutions - ## Microsoft.EventGrid * eventSubscriptions @@ -176,6 +168,7 @@ An extension resource is a resource that adds to another resource's capabilities ## Microsoft.KubernetesConfiguration * extensions +* extensionTypes * fluxConfigurations * namespaces * sourceControlConfigurations @@ -239,6 +232,7 @@ An extension resource is a resource that adds to another resource's capabilities * Compliances * dataCollectionAgents * deviceSecurityGroups +* governanceRules * InformationProtectionPolicies * insights * jitPolicies @@ -255,6 +249,7 @@ An extension resource is a resource that adds to another resource's capabilities * automationRules * bookmarks * cases +* dataConnectorDefinitions * dataConnectors * enrichment * entities @@ -264,6 +259,7 @@ An extension resource is a resource that adds to another resource's capabilities * metadata * MitreCoverageRecords * onboardingStates +* overview * securityMLAnalyticsSettings * settings * sourceControls diff --git a/articles/azure-resource-manager/management/overview.md b/articles/azure-resource-manager/management/overview.md index 50ef80189a856..5391befe2123f 100644 --- a/articles/azure-resource-manager/management/overview.md +++ b/articles/azure-resource-manager/management/overview.md @@ -2,7 +2,7 @@ title: Azure Resource Manager overview description: Describes how to use Azure Resource Manager for deployment, management, and access control of resources on Azure. ms.topic: overview -ms.date: 02/03/2022 +ms.date: 05/26/2022 ms.custom: contperf-fy21q1,contperf-fy21q3-portal --- # What is Azure Resource Manager? @@ -13,7 +13,7 @@ To learn about Azure Resource Manager templates (ARM templates), see the [ARM te ## Consistent management layer -When a user sends a request from any of the Azure tools, APIs, or SDKs, Resource Manager receives the request. It authenticates and authorizes the request. Resource Manager sends the request to the Azure service, which takes the requested action. Because all requests are handled through the same API, you see consistent results and capabilities in all the different tools. +When you send a request through any of the Azure APIs, tools, or SDKs, Resource Manager receives the request. It authenticates and authorizes the request before forwarding it to the appropriate Azure service. Because all requests are handled through the same API, you see consistent results and capabilities in all the different tools. The following image shows the role Azure Resource Manager plays in handling Azure requests. @@ -32,6 +32,8 @@ If you're new to Azure Resource Manager, there are some terms you might not be f * **ARM template** - A JavaScript Object Notation (JSON) file that defines one or more resources to deploy to a resource group, subscription, management group, or tenant. The template can be used to deploy the resources consistently and repeatedly. See [Template deployment overview](../templates/overview.md). * **Bicep file** - A file for declaratively deploying Azure resources. Bicep is a language that's been designed to provide the best authoring experience for infrastructure as code solutions in Azure. See [Bicep overview](../bicep/overview.md). +For more definitions of Azure terminology, see [Azure fundamental concepts](/azure/cloud-adoption-framework/ready/considerations/fundamental-concepts). + ## The benefits of using Resource Manager With Resource Manager, you can: diff --git a/articles/azure-resource-manager/management/resource-name-rules.md b/articles/azure-resource-manager/management/resource-name-rules.md index 2ad1f243e10c3..e6d547e7ac769 100644 --- a/articles/azure-resource-manager/management/resource-name-rules.md +++ b/articles/azure-resource-manager/management/resource-name-rules.md @@ -4,7 +4,7 @@ description: Shows the rules and restrictions for naming Azure resources. ms.topic: conceptual author: tfitzmac ms.author: tomfitz -ms.date: 05/17/2022 +ms.date: 05/25/2022 --- # Naming rules and restrictions for Azure resources @@ -95,7 +95,7 @@ In the following tables, the term alphanumeric refers to: > | locks | scope of assignment | 1-90 | Alphanumerics, periods, underscores, hyphens, and parenthesis.

                  Can't end in period. | > | policyAssignments | scope of assignment | 1-128 display name

                  1-64 resource name

                  1-24 resource name at management group scope | Display name can contain any characters.

                  Resource name can't use:
                  `<>*%&:\?.+/` or control characters.

                  Can't end with period or space. | > | policyDefinitions | scope of definition | 1-128 display name

                  1-64 resource name | Display name can contain any characters.

                  Resource name can't use:
                  `<>*%&:\?.+/` or control characters.

                  Can't end with period or space. | -> | policySetDefinitions | scope of definition | 1-128 display name

                  1-64 resource name

                  1-24 resource name at management group scope | Display name can contain any characters.

                  Resource name can't use:
                  `<>*%&:\?.+/` or control characters.

                  Can't end with period or space. | +> | policySetDefinitions | scope of definition | 1-128 display name

                  1-64 resource name

                  1-64 resource name at management group scope | Display name can contain any characters.

                  Resource name can't use:
                  `<>*%&:\?.+/` or control characters.

                  Can't end with period or space. | > | roleAssignments | tenant | 36 | Must be a globally unique identifier (GUID). | > | roleDefinitions | tenant | 36 | Must be a globally unique identifier (GUID). | @@ -537,7 +537,7 @@ In the following tables, the term alphanumeric refers to: > [!div class="mx-tableFixed"] > | Entity | Scope | Length | Valid Characters | > | --- | --- | --- | --- | -> | mediaservices | resource group | 3-24 | Lowercase letters and numbers. | +> | mediaservices | Azure region | 3-24 | Lowercase letters and numbers. | > | mediaservices / liveEvents | Media service | 1-32 | Alphanumerics and hyphens.

                  Start with alphanumeric. | > | mediaservices / liveEvents / liveOutputs | Live event | 1-256 | Alphanumerics and hyphens.

                  Start with alphanumeric. | > | mediaservices / streamingEndpoints | Media service | 1-24 | Alphanumerics and hyphens.

                  Start with alphanumeric. | @@ -818,14 +818,15 @@ In the following tables, the term alphanumeric refers to: > | --- | --- | --- | --- | > | certificates | resource group | 1-260 | Can't use:
                  `/`

                  Can't end with space or period. | > | serverfarms | resource group | 1-40 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode | -> | sites / functions / slots | global or per domain. See note below. | 2-60 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode

                  Can't start or end with hyphen. | +> | sites | global or per domain. See note below. | 2-60 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode

                  Can't start or end with hyphen. | +> | sites / slots | site | 2-59 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode | > [!NOTE] > A web site must have a globally unique URL. When you create a web site that uses a hosting plan, the URL is `http://.azurewebsites.net`. The app name must be globally unique. When you create a web site that uses an App Service Environment, the app name must be unique within the [domain for the App Service Environment](../../app-service/environment/using-an-ase.md#app-access). For both cases, the URL of the site is globally unique. > > Azure Functions has the same naming rules and restrictions as Microsoft.Web/sites. When generating the host ID, the function app name is truncated to 32 characters. This can cause host ID collision when a shared storage account is used. For more information, see [Host ID considerations](../../azure-functions/storage-considerations.md#host-id-considerations). > -> Unicode characters are parsed to Punycode using the following method: https://docs.microsoft.com/dotnet/api/system.globalization.idnmapping.getascii +> Unicode characters are parsed to Punycode using the [IdnMapping.GetAscii method](/dotnet/api/system.globalization.idnmapping.getascii) ## Next steps diff --git a/articles/azure-resource-manager/management/resources-without-resource-group-limit.md b/articles/azure-resource-manager/management/resources-without-resource-group-limit.md index d4fdf4de25b0e..05262858b8268 100644 --- a/articles/azure-resource-manager/management/resources-without-resource-group-limit.md +++ b/articles/azure-resource-manager/management/resources-without-resource-group-limit.md @@ -2,7 +2,7 @@ title: Resources without 800 count limit description: Lists the Azure resource types that can have more than 800 instances in a resource group. ms.topic: conceptual -ms.date: 04/20/2022 +ms.date: 06/03/2022 --- # Resources not limited to 800 instances per resource group @@ -47,7 +47,9 @@ Some resources have a limit on the number instances per region. This limit is di ## Microsoft.ContainerInstance +* containerGroupProfiles * containerGroups +* containerScaleSets ## Microsoft.ContainerRegistry diff --git a/articles/azure-resource-manager/management/tag-resources.md b/articles/azure-resource-manager/management/tag-resources.md index a19ef772af156..ecb785439da8c 100644 --- a/articles/azure-resource-manager/management/tag-resources.md +++ b/articles/azure-resource-manager/management/tag-resources.md @@ -2,13 +2,15 @@ title: Tag resources, resource groups, and subscriptions for logical organization description: Shows how to apply tags to organize Azure resources for billing and managing. ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 05/25/2022 ms.custom: devx-track-azurecli, devx-track-azurepowershell --- # Use tags to organize your Azure resources and management hierarchy -You apply tags to your Azure resources, resource groups, and subscriptions to logically organize them by values that make sense for your organization. Each tag consists of a name and a value pair. For example, you can apply the name _Environment_ and the value _Production_ to all the resources in production. +Tags are metadata elements that you apply to your Azure resources. They're key-value pairs that help you identify resources based on settings that are relevant to your organization. If you want to track the deployment environment for your resources, add a key named Environment. To identify the resources deployed to production, give them a value of Production. Fully formed, the key-value pair becomes, Environment = Production. + +You can apply tags to your Azure resources, resource groups, and subscriptions. For recommendations on how to implement a tagging strategy, see [Resource naming and tagging decision guide](/azure/cloud-adoption-framework/decision-guides/resource-tagging/?toc=/azure/azure-resource-manager/management/toc.json). @@ -18,7 +20,7 @@ Resource tags support all cost-accruing services. To ensure that cost-accruing s > Tags are stored as plain text. Never add sensitive values to tags. Sensitive values could be exposed through many methods, including cost reports, commands that return existing tag definitions, deployment histories, exported templates, and monitoring logs. > [!IMPORTANT] -> Tag names are case-insensitive for operations. A tag with a tag name, regardless of casing, is updated or retrieved. However, the resource provider might keep the casing you provide for the tag name. You'll see that casing in cost reports. +> Tag names are case-insensitive for operations. A tag with a tag name, regardless of the casing, is updated or retrieved. However, the resource provider might keep the casing you provide for the tag name. You'll see that casing in cost reports. > > Tag values are case-sensitive. @@ -28,17 +30,17 @@ Resource tags support all cost-accruing services. To ensure that cost-accruing s There are two ways to get the required access to tag resources. -- You can have write access to the `Microsoft.Resources/tags` resource type. This access lets you tag any resource, even if you don't have access to the resource itself. The [Tag Contributor](../../role-based-access-control/built-in-roles.md#tag-contributor) role grants this access. Currently, the tag contributor role can't apply tags to resources or resource groups through the portal. It can apply tags to subscriptions through the portal. It supports all tag operations through PowerShell and REST API. +- You can have write access to the `Microsoft.Resources/tags` resource type. This access lets you tag any resource, even if you don't have access to the resource itself. The [Tag Contributor](../../role-based-access-control/built-in-roles.md#tag-contributor) role grants this access. The tag contributor role, for example, can't apply tags to resources or resource groups through the portal. It can, however, apply tags to subscriptions through the portal. It supports all tag operations through Azure PowerShell and REST API. -- You can have write access to the resource itself. The [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role grants the required access to apply tags to any entity. To apply tags to only one resource type, use the contributor role for that resource. For example, to apply tags to virtual machines, use the [Virtual Machine Contributor](../../role-based-access-control/built-in-roles.md#virtual-machine-contributor). +- You can have write access to the resource itself. The [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role grants the required access to apply tags to any entity. To apply tags to only one resource type, use the contributor role for that resource. To apply tags to virtual machines, for example, use the [Virtual Machine Contributor](../../role-based-access-control/built-in-roles.md#virtual-machine-contributor). ## PowerShell ### Apply tags -Azure PowerShell offers two commands for applying tags: [New-AzTag](/powershell/module/az.resources/new-aztag) and [Update-AzTag](/powershell/module/az.resources/update-aztag). You must have the `Az.Resources` module 1.12.0 or later. You can check your version with `Get-InstalledModule -Name Az.Resources`. You can install that module or [install Azure PowerShell](/powershell/azure/install-az-ps) 3.6.1 or later. +Azure PowerShell offers two commands to apply tags: [New-AzTag](/powershell/module/az.resources/new-aztag) and [Update-AzTag](/powershell/module/az.resources/update-aztag). You need to have the `Az.Resources` module 1.12.0 version or later. You can check your version with `Get-InstalledModule -Name Az.Resources`. You can install that module or [install Azure PowerShell](/powershell/azure/install-az-ps) version 3.6.1 or later. -The `New-AzTag` replaces all tags on the resource, resource group, or subscription. When calling the command, pass in the resource ID of the entity you wish to tag. +The `New-AzTag` replaces all tags on the resource, resource group, or subscription. When you call the command, pass the resource ID of the entity you want to tag. The following example applies a set of tags to a storage account: @@ -58,7 +60,7 @@ Properties : Status Normal ``` -If you run the command again but this time with different tags, notice that the earlier tags are removed. +If you run the command again, but this time with different tags, notice that the earlier tags disappear. ```azurepowershell-interactive $tags = @{"Team"="Compliance"; "Environment"="Production"} @@ -80,7 +82,7 @@ $tags = @{"Dept"="Finance"; "Status"="Normal"} Update-AzTag -ResourceId $resource.id -Tag $tags -Operation Merge ``` -Notice that the two new tags were added to the two existing tags. +Notice that the existing tags grow with the addition of the two new tags. ```output Properties : @@ -92,7 +94,7 @@ Properties : Environment Production ``` -Each tag name can have only one value. If you provide a new value for a tag, the old value is replaced even if you use the merge operation. The following example changes the `Status` tag from _Normal_ to _Green_. +Each tag name can have only one value. If you provide a new value for a tag, it replaces the old value even if you use the merge operation. The following example changes the `Status` tag from _Normal_ to _Green_. ```azurepowershell-interactive $tags = @{"Status"="Green"} @@ -109,7 +111,7 @@ Properties : Environment Production ``` -When you set the `-Operation` parameter to `Replace`, the existing tags are replaced by the new set of tags. +When you set the `-Operation` parameter to `Replace`, the new set of tags replaces the existing tags. ```azurepowershell-interactive $tags = @{"Project"="ECommerce"; "CostCenter"="00123"; "Team"="Web"} @@ -127,7 +129,7 @@ Properties : Project ECommerce ``` -The same commands also work with resource groups or subscriptions. You pass in the identifier for the resource group or subscription you want to tag. +The same commands also work with resource groups or subscriptions. Pass them in the identifier of the resource group or subscription you want to tag. To add a new set of tags to a resource group, use: @@ -170,7 +172,7 @@ $resource | ForEach-Object { Update-AzTag -Tag @{ "Dept"="IT"; "Environment"="Te ### List tags -To get the tags for a resource, resource group, or subscription, use the [Get-AzTag](/powershell/module/az.resources/get-aztag) command and pass in the resource ID for the entity. +To get the tags for a resource, resource group, or subscription, use the [Get-AzTag](/powershell/module/az.resources/get-aztag) command and pass the resource ID of the entity. To see the tags for a resource, use: @@ -215,7 +217,7 @@ To get resource groups that have a specific tag name and value, use: ### Remove tags -To remove specific tags, use `Update-AzTag` and set `-Operation` to `Delete`. Pass in the tags you want to delete. +To remove specific tags, use `Update-AzTag` and set `-Operation` to `Delete`. Pass the resource IDs of the tags you want to delete. ```azurepowershell-interactive $removeTags = @{"Project"="ECommerce"; "Team"="Web"} @@ -242,9 +244,9 @@ Remove-AzTag -ResourceId "/subscriptions/$subscription" ### Apply tags -Azure CLI offers two commands for applying tags: [az tag create](/cli/azure/tag#az-tag-create) and [az tag update](/cli/azure/tag#az-tag-update). You must have Azure CLI 2.10.0 or later. You can check your version with `az version`. To update or install, see [Install the Azure CLI](/cli/azure/install-azure-cli). +Azure CLI offers two commands to apply tags: [az tag create](/cli/azure/tag#az-tag-create) and [az tag update](/cli/azure/tag#az-tag-update). You need to have the Azure CLI 2.10.0 version or later. You can check your version with `az version`. To update or install it, see [Install the Azure CLI](/cli/azure/install-azure-cli). -The `az tag create` replaces all tags on the resource, resource group, or subscription. When calling the command, pass in the resource ID of the entity you wish to tag. +The `az tag create` replaces all tags on the resource, resource group, or subscription. When you call the command, pass the resource ID of the entity you want to tag. The following example applies a set of tags to a storage account: @@ -264,7 +266,7 @@ When the command completes, notice that the resource has two tags. }, ``` -If you run the command again but this time with different tags, notice that the earlier tags are removed. +If you run the command again, but this time with different tags, notice that the earlier tags disappear. ```azurecli-interactive az tag create --resource-id $resource --tags Team=Compliance Environment=Production @@ -285,7 +287,7 @@ To add tags to a resource that already has tags, use `az tag update`. Set the `- az tag update --resource-id $resource --operation Merge --tags Dept=Finance Status=Normal ``` -Notice that the two new tags were added to the two existing tags. +Notice that the existing tags grow with the addition of the two new tags. ```output "properties": { @@ -298,7 +300,7 @@ Notice that the two new tags were added to the two existing tags. }, ``` -Each tag name can have only one value. If you provide a new value for a tag, the old value is replaced even if you use the merge operation. The following example changes the `Status` tag from _Normal_ to _Green_. +Each tag name can have only one value. If you provide a new value for a tag, the new tag replaces the old value, even if you use the merge operation. The following example changes the `Status` tag from _Normal_ to _Green_. ```azurecli-interactive az tag update --resource-id $resource --operation Merge --tags Status=Green @@ -315,7 +317,7 @@ az tag update --resource-id $resource --operation Merge --tags Status=Green }, ``` -When you set the `--operation` parameter to `Replace`, the existing tags are replaced by the new set of tags. +When you set the `--operation` parameter to `Replace`, the new set of tags replaces the existing tags. ```azurecli-interactive az tag update --resource-id $resource --operation Replace --tags Project=ECommerce CostCenter=00123 Team=Web @@ -333,7 +335,7 @@ Only the new tags remain on the resource. }, ``` -The same commands also work with resource groups or subscriptions. You pass in the identifier for the resource group or subscription you want to tag. +The same commands also work with resource groups or subscriptions. Pass them in the identifier of the resource group or subscription you want to tag. To add a new set of tags to a resource group, use: @@ -363,7 +365,7 @@ az tag update --resource-id /subscriptions/$sub --operation Merge --tags Team="W ### List tags -To get the tags for a resource, resource group, or subscription, use the [az tag list](/cli/azure/tag#az-tag-list) command and pass in the resource ID for the entity. +To get the tags for a resource, resource group, or subscription, use the [az tag list](/cli/azure/tag#az-tag-list) command and pass the resource ID of the entity. To see the tags for a resource, use: @@ -408,13 +410,13 @@ az group list --tag Dept=Finance ### Remove tags -To remove specific tags, use `az tag update` and set `--operation` to `Delete`. Pass in the tags you want to delete. +To remove specific tags, use `az tag update` and set `--operation` to `Delete`. Pass the resource ID of the tags you want to delete. ```azurecli-interactive az tag update --resource-id $resource --operation Delete --tags Project=ECommerce Team=Web ``` -The specified tags are removed. +You've removed the specified tags. ```output "properties": { @@ -432,7 +434,7 @@ az tag delete --resource-id $resource ### Handling spaces -If your tag names or values include spaces, enclose them in double quotes. +If your tag names or values include spaces, enclose them in quotation marks. ```azurecli-interactive az tag update --resource-id $group --operation Merge --tags "Cost Center"=Finance-1222 Location="West US" @@ -440,7 +442,7 @@ az tag update --resource-id $group --operation Merge --tags "Cost Center"=Financ ## ARM templates -You can tag resources, resource groups, and subscriptions during deployment with an Azure Resource Manager template (ARM template). +You can tag resources, resource groups, and subscriptions during deployment with an ARM template. > [!NOTE] > The tags you apply through an ARM template or Bicep file overwrite any existing tags. @@ -511,7 +513,7 @@ resource stgAccount 'Microsoft.Storage/storageAccounts@2021-04-01' = { ### Apply an object -You can define an object parameter that stores several tags, and apply that object to the tag element. This approach provides more flexibility than the previous example because the object can have different properties. Each property in the object becomes a separate tag for the resource. The following example has a parameter named `tagValues` that is applied to the tag element. +You can define an object parameter that stores several tags and apply that object to the tag element. This approach provides more flexibility than the previous example because the object can have different properties. Each property in the object becomes a separate tag for the resource. The following example has a parameter named `tagValues` that's applied to the tag element. # [JSON](#tab/json) @@ -628,7 +630,7 @@ resource stgAccount 'Microsoft.Storage/storageAccounts@2021-04-01' = { ### Apply tags from resource group -To apply tags from a resource group to a resource, use the [resourceGroup()](../templates/template-functions-resource.md#resourcegroup) function. When getting the tag value, use the `tags[tag-name]` syntax instead of the `tags.tag-name` syntax, because some characters aren't parsed correctly in the dot notation. +To apply tags from a resource group to a resource, use the [resourceGroup()](../templates/template-functions-resource.md#resourcegroup) function. When you get the tag value, use the `tags[tag-name]` syntax instead of the `tags.tag-name` syntax, because some characters aren't parsed correctly in the dot notation. # [JSON](#tab/json) @@ -685,7 +687,7 @@ resource stgAccount 'Microsoft.Storage/storageAccounts@2021-04-01' = { ### Apply tags to resource groups or subscriptions -You can add tags to a resource group or subscription by deploying the `Microsoft.Resources/tags` resource type. The tags are applied to the target resource group or subscription for the deployment. Each time you deploy the template you replace any tags there were previously applied. +You can add tags to a resource group or subscription by deploying the `Microsoft.Resources/tags` resource type. You can apply the tags to the target resource group or subscription you want to deploy. Each time you deploy the template you replace any previous tags. # [JSON](#tab/json) @@ -736,7 +738,7 @@ resource applyTags 'Microsoft.Resources/tags@2021-04-01' = { --- -To apply the tags to a resource group, use either PowerShell or Azure CLI. Deploy to the resource group that you want to tag. +To apply the tags to a resource group, use either Azure PowerShell or Azure CLI. Deploy to the resource group that you want to tag. ```azurepowershell-interactive New-AzResourceGroupDeployment -ResourceGroupName exampleGroup -TemplateFile https://raw.githubusercontent.com/Azure/azure-docs-json-samples/master/azure-resource-manager/tags.json @@ -825,7 +827,7 @@ To work with tags through the Azure REST API, use: ## SDKs -For samples of applying tags with SDKs, see: +For examples of applying tags with SDKs, see: * [.NET](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/resourcemanager/Azure.ResourceManager/samples/Sample2_ManagingResourceGroups.md) * [Java](https://github.com/Azure-Samples/resources-java-manage-resource-group/blob/master/src/main/java/com/azure/resourcemanager/resources/samples/ManageResourceGroup.java) @@ -834,13 +836,13 @@ For samples of applying tags with SDKs, see: ## Inherit tags -Tags applied to the resource group or subscription aren't inherited by the resources. To apply tags from a subscription or resource group to the resources, see [Azure Policies - tags](tag-policies.md). +Resources don't inherit the tags you apply to a resource group or a subscription. To apply tags from a subscription or resource group to the resources, see [Azure Policies - tags](tag-policies.md). ## Tags and billing -You can use tags to group your billing data. For example, if you're running multiple VMs for different organizations, use the tags to group usage by cost center. You can also use tags to categorize costs by runtime environment, such as the billing usage for VMs running in the production environment. +You can use tags to group your billing data. If you're running multiple VMs for different organizations, for example, use the tags to group usage by cost center. You can also use tags to categorize costs by runtime environment, such as the billing usage for VMs running in the production environment. -You can retrieve information about tags by downloading the usage file, a comma-separated values (CSV) file available from the Azure portal. For more information, see [Download or view your Azure billing invoice and daily usage data](../../cost-management-billing/manage/download-azure-invoice-daily-usage-date.md). For services that support tags with billing, the tags appear in the **Tags** column. +You can retrieve information about tags by downloading the usage file available from the Azure portal. For more information, see [Download or view your Azure billing invoice and daily usage data](../../cost-management-billing/manage/download-azure-invoice-daily-usage-date.md). For services that support tags with billing, the tags appear in the **Tags** column. For REST API operations, see [Azure Billing REST API Reference](/rest/api/billing/). @@ -849,14 +851,14 @@ For REST API operations, see [Azure Billing REST API Reference](/rest/api/billin The following limitations apply to tags: * Not all resource types support tags. To determine if you can apply a tag to a resource type, see [Tag support for Azure resources](tag-support.md). -* Each resource, resource group, and subscription can have a maximum of 50 tag name/value pairs. If you need to apply more tags than the maximum allowed number, use a JSON string for the tag value. The JSON string can contain many values that are applied to a single tag name. A resource group or subscription can contain many resources that each have 50 tag name/value pairs. -* The tag name is limited to 512 characters, and the tag value is limited to 256 characters. For storage accounts, the tag name is limited to 128 characters, and the tag value is limited to 256 characters. -* Tags can't be applied to classic resources such as Cloud Services. -* Azure IP Groups and Azure Firewall Policies don't support PATCH operations, which means they don't support updating tags through the portal. Instead, use the update commands for those resources. For example, you can update tags for an IP group with the [az network ip-group update](/cli/azure/network/ip-group#az-network-ip-group-update) command. +* Each resource, resource group, and subscription can have a maximum of 50 tag name-value pairs. If you need to apply more tags than the maximum allowed number, use a JSON string for the tag value. The JSON string can contain many of the values that you apply to a single tag name. A resource group or subscription can contain many resources that each have 50 tag name-value pairs. +* The tag name has a limit of 512 characters and the tag value has a limit of 256 characters. For storage accounts, the tag name has a limit of 128 characters and the tag value has a limit of 256 characters. +* Classic resources such as Cloud Services don't support tags. +* Azure IP Groups and Azure Firewall Policies don't support PATCH operations. PATCH API method operations, therefore, can't update tags through the portal. Instead, you can use the update commands for those resources. You can update tags for an IP group, for example, with the [az network ip-group update](/cli/azure/network/ip-group#az-network-ip-group-update) command. * Tag names can't contain these characters: `<`, `>`, `%`, `&`, `\`, `?`, `/` > [!NOTE] - > * Azure DNS zones don't support the use of spaces in the tag or a tag that starts with a number. Azure DNS tag names do not support special and unicode characters. The value can contain all characters. + > * Azure Domain Name System (DNS) zones don't support the use of spaces in the tag or a tag that starts with a number. Azure DNS tag names don't support special and unicode characters. The value can contain all characters. > > * Traffic Manager doesn't support the use of spaces, `#` or `:` in the tag name. The tag name can't start with a number. > @@ -864,9 +866,8 @@ The following limitations apply to tags: > > * The following Azure resources only support 15 tags: > * Azure Automation - > * Azure CDN + > * Azure Content Delivery Network (CDN) > * Azure DNS (Zone and A records) - > * Azure Private DNS (Zone, A records, and virtual network link) ## Next steps diff --git a/articles/azure-resource-manager/management/tag-support.md b/articles/azure-resource-manager/management/tag-support.md index 83fb46f8d032b..554b084dfb9ac 100644 --- a/articles/azure-resource-manager/management/tag-support.md +++ b/articles/azure-resource-manager/management/tag-support.md @@ -2,7 +2,7 @@ title: Tag support for resources description: Shows which Azure resource types support tags. Provides details for all Azure services. ms.topic: conceptual -ms.date: 05/13/2022 +ms.date: 06/03/2022 --- # Tag support for Azure resources @@ -247,6 +247,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | ------------- | ----------- | ----------- | > | accounts | Yes | Yes | > | accounts / datapools | No | No | +> | workspaces | Yes | Yes | ## Microsoft.AutonomousSystems @@ -302,7 +303,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | DataControllers | Yes | Yes | > | DataControllers / ActiveDirectoryConnectors | No | No | > | PostgresInstances | Yes | Yes | -> | sqlManagedInstances | Yes | Yes | +> | SqlManagedInstances | Yes | Yes | > | SqlServerInstances | Yes | Yes | ## Microsoft.AzureCIS @@ -366,7 +367,10 @@ To get the same data as a file of comma-separated values, download [tag-support. > | clusters | Yes | Yes | > | clusters / arcSettings | No | No | > | clusters / arcSettings / extensions | No | No | -> | galleryimages | Yes | Yes | +> | clusters / offers | No | No | +> | clusters / publishers | No | No | +> | clusters / publishers / offers | No | No | +> | galleryImages | Yes | Yes | > | networkinterfaces | Yes | Yes | > | virtualharddisks | Yes | Yes | > | virtualmachines | Yes | Yes | @@ -395,6 +399,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | ------------- | ----------- | ----------- | > | batchAccounts | Yes | Yes | > | batchAccounts / certificates | No | No | +> | batchAccounts / detectors | No | No | > | batchAccounts / pools | No | No | ## Microsoft.Billing @@ -639,6 +644,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | profiles / endpoints / origins | No | No | > | profiles / origingroups | No | No | > | profiles / origingroups / origins | No | No | +> | profiles / policies | No | No | > | profiles / rulesets | No | No | > | profiles / rulesets / rules | No | No | > | profiles / secrets | No | No | @@ -672,9 +678,6 @@ To get the same data as a file of comma-separated values, download [tag-support. > | ------------- | ----------- | ----------- | > | artifactSetDefinitions | No | No | > | artifactSetSnapshots | No | No | -> | chaosExperiments | Yes | Yes | -> | chaosProviderConfigurations | No | No | -> | chaosTargets | No | No | > | experiments | Yes | Yes | > | targets | No | No | @@ -959,7 +962,9 @@ To get the same data as a file of comma-separated values, download [tag-support. > [!div class="mx-tableFixed"] > | Resource type | Supports tags | Tag in cost report | > | ------------- | ----------- | ----------- | +> | containerGroupProfiles | Yes | Yes | > | containerGroups | Yes | Yes | +> | containerScaleSets | Yes | Yes | > | serviceAssociationLinks | No | No | ## Microsoft.ContainerRegistry @@ -1088,6 +1093,8 @@ To get the same data as a file of comma-separated values, download [tag-support. > | Resource type | Supports tags | Tag in cost report | > | ------------- | ----------- | ----------- | > | grafana | Yes | Yes | +> | grafana / privateEndpointConnections | No | No | +> | grafana / privateLinkResources | No | No | ## Microsoft.DataBox @@ -1396,16 +1403,6 @@ To get the same data as a file of comma-separated values, download [tag-support. > | labs / virtualMachines | Yes | Yes | > | schedules | Yes | Yes | -## Microsoft.Diagnostics - -> [!div class="mx-tableFixed"] -> | Resource type | Supports tags | Tag in cost report | -> | ------------- | ----------- | ----------- | -> | apollo | No | No | -> | azureKB | No | No | -> | insights | No | No | -> | solutions | No | No | - ## Microsoft.DigitalTwins > [!div class="mx-tableFixed"] @@ -1687,37 +1684,6 @@ To get the same data as a file of comma-separated values, download [tag-support. > | ------------- | ----------- | ----------- | > | jobs | Yes | Yes | -## Microsoft.IndustryDataLifecycle - -> [!div class="mx-tableFixed"] -> | Resource type | Supports tags | Tag in cost report | -> | ------------- | ----------- | ----------- | -> | baseModels | Yes | Yes | -> | baseModels / entities | No | No | -> | baseModels / relationships | No | No | -> | builtInModels | No | No | -> | builtInModels / entities | No | No | -> | builtInModels / relationships | No | No | -> | collaborativeInvitations | No | No | -> | custodianCollaboratives | Yes | Yes | -> | custodianCollaboratives / collaborativeImage | No | No | -> | custodianCollaboratives / dataModels | No | No | -> | custodianCollaboratives / dataModels / mergePipelines | No | No | -> | custodianCollaboratives / invitations | No | No | -> | custodianCollaboratives / invitations / termsOfUseDocuments | No | No | -> | custodianCollaboratives / receivedDataPackages | No | No | -> | custodianCollaboratives / termsOfUseDocuments | No | No | -> | dataConsumerCollaboratives | Yes | Yes | -> | dataproviders | No | No | -> | derivedModels | Yes | Yes | -> | derivedModels / entities | No | No | -> | derivedModels / relationships | No | No | -> | generateMappingTemplate | No | No | -> | memberCollaboratives | Yes | Yes | -> | memberCollaboratives / sharedDataPackages | No | No | -> | modelMappings | Yes | Yes | -> | pipelineSets | Yes | Yes | - ## microsoft.insights > [!div class="mx-tableFixed"] @@ -1869,6 +1835,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | Resource type | Supports tags | Tag in cost report | > | ------------- | ----------- | ----------- | > | extensions | No | No | +> | extensionTypes | No | No | > | fluxConfigurations | No | No | > | namespaces | No | No | > | privateLinkScopes | Yes | Yes | @@ -1963,9 +1930,11 @@ To get the same data as a file of comma-separated values, download [tag-support. > | workspaces / components / versions | No | No | > | workspaces / computes | No | No | > | workspaces / data | No | No | +> | workspaces / data / versions | No | No | > | workspaces / datasets | No | No | > | workspaces / datastores | No | No | > | workspaces / environments | No | No | +> | workspaces / environments / versions | No | No | > | workspaces / eventGridFilters | No | No | > | workspaces / jobs | No | No | > | workspaces / labelingJobs | No | No | @@ -2048,10 +2017,14 @@ To get the same data as a file of comma-separated values, download [tag-support. > | privateStoreClient | No | No | > | privateStores | No | No | > | privateStores / AdminRequestApprovals | No | No | +> | privateStores / anyExistingOffersInTheCollections | No | No | > | privateStores / billingAccounts | No | No | > | privateStores / bulkCollectionsAction | No | No | > | privateStores / collections | No | No | +> | privateStores / collections / approveAllItems | No | No | +> | privateStores / collections / disableApproveAllItems | No | No | > | privateStores / collections / offers | No | No | +> | privateStores / collections / offers / upsertOfferWithMultiContext | No | No | > | privateStores / collections / transferOffers | No | No | > | privateStores / collectionsToSubscriptionsMapping | No | No | > | privateStores / fetchAllSubscriptionsInTenant | No | No | @@ -2060,6 +2033,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | privateStores / queryApprovedPlans | No | No | > | privateStores / queryNotificationsState | No | No | > | privateStores / queryOffers | No | No | +> | privateStores / queryUserOffers | No | No | > | privateStores / RequestApprovals | No | No | > | privateStores / requestApprovals / query | No | No | > | privateStores / requestApprovals / withdrawPlan | No | No | @@ -2232,6 +2206,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | frontdoorWebApplicationFirewallPolicies | Yes, but limited (see [note below](#network-limitations)) | Yes | > | getDnsResourceReference | No | No | > | internalNotify | No | No | +> | internalPublicIpAddresses | No | No | > | ipGroups | Yes | Yes | > | loadBalancers | Yes | Yes | > | localNetworkGateways | Yes | Yes | @@ -2307,6 +2282,9 @@ To get the same data as a file of comma-separated values, download [tag-support. > | bareMetalMachines | Yes | Yes | > | clusterManagers | Yes | Yes | > | clusters | Yes | Yes | +> | hybridAksClusters | Yes | Yes | +> | hybridAksManagementDomains | Yes | Yes | +> | hybridAksVirtualMachines | Yes | Yes | > | rackManifests | Yes | Yes | > | racks | Yes | Yes | > | virtualMachines | Yes | Yes | @@ -2318,6 +2296,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | Resource type | Supports tags | Tag in cost report | > | ------------- | ----------- | ----------- | > | azureTrafficCollectors | Yes | Yes | +> | azureTrafficCollectors / collectorPolicies | Yes | Yes | > | meshVpns | Yes | Yes | > | meshVpns / connectionPolicies | Yes | Yes | > | meshVpns / privateEndpointConnectionProxies | No | No | @@ -2410,11 +2389,12 @@ To get the same data as a file of comma-separated values, download [tag-support. > [!div class="mx-tableFixed"] > | Resource type | Supports tags | Tag in cost report | > | ------------- | ----------- | ----------- | -> | playeraccountpools | Yes | Yes | +> | playerAccountPools | Yes | Yes | > | titles | Yes | Yes | > | titles / segments | No | No | -> | titles / titledatakeyvalues | No | No | -> | titles / titleinternaldatakeyvalues | No | No | +> | titles / titleDataSets | No | No | +> | titles / titleInternalDataKeyValues | No | No | +> | titles / titleInternalDataSets | No | No | ## Microsoft.PolicyInsights @@ -2620,12 +2600,12 @@ To get the same data as a file of comma-separated values, download [tag-support. > [!div class="mx-tableFixed"] > | Resource type | Supports tags | Tag in cost report | > | ------------- | ----------- | ----------- | -> | availabilitysets | Yes | Yes | +> | AvailabilitySets | Yes | Yes | > | Clouds | Yes | Yes | > | VirtualMachines | Yes | Yes | > | VirtualMachineTemplates | Yes | Yes | > | VirtualNetworks | Yes | Yes | -> | vmmservers | Yes | Yes | +> | VMMServers | Yes | Yes | > | VMMServers / InventoryItems | No | No | ## Microsoft.Search @@ -2682,6 +2662,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | MdeOnboardings | No | No | > | policies | No | No | > | pricings | No | No | +> | query | No | No | > | regulatoryComplianceStandards | No | No | > | regulatoryComplianceStandards / regulatoryComplianceControls | No | No | > | regulatoryComplianceStandards / regulatoryComplianceControls / regulatoryComplianceAssessments | No | No | @@ -2703,6 +2684,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | subAssessments | No | No | > | tasks | No | No | > | topologies | No | No | +> | vmScanners | Yes | Yes | > | workspaceSettings | No | No | ## Microsoft.SecurityDetonation @@ -2731,6 +2713,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | automationRules | No | No | > | bookmarks | No | No | > | cases | No | No | +> | dataConnectorDefinitions | No | No | > | dataConnectors | No | No | > | enrichment | No | No | > | entities | No | No | @@ -2740,6 +2723,7 @@ To get the same data as a file of comma-separated values, download [tag-support. > | metadata | No | No | > | MitreCoverageRecords | No | No | > | onboardingStates | No | No | +> | overview | No | No | > | securityMLAnalyticsSettings | No | No | > | settings | No | No | > | sourceControls | No | No | @@ -2969,7 +2953,9 @@ To get the same data as a file of comma-separated values, download [tag-support. > | storageAccounts / queueServices | No | No | > | storageAccounts / services | No | No | > | storageAccounts / services / metricDefinitions | No | No | +> | storageAccounts / storageTaskAssignments | No | No | > | storageAccounts / tableServices | No | No | +> | storageTasks | Yes | Yes | > | usages | No | No | ## Microsoft.StorageCache @@ -3198,7 +3184,9 @@ To get the same data as a file of comma-separated values, download [tag-support. > | sourceControls | No | No | > | staticSites | Yes | Yes | > | staticSites / builds | No | No | +> | staticSites / builds / linkedBackends | No | No | > | staticSites / builds / userProvidedFunctionApps | No | No | +> | staticSites / linkedBackends | No | No | > | staticSites / userProvidedFunctionApps | No | No | > | validate | No | No | > | verifyHostingEnvironmentVnet | No | No | diff --git a/articles/azure-resource-manager/templates/best-practices.md b/articles/azure-resource-manager/templates/best-practices.md index b3122ac56e071..012947d0951fb 100644 --- a/articles/azure-resource-manager/templates/best-practices.md +++ b/articles/azure-resource-manager/templates/best-practices.md @@ -2,7 +2,7 @@ title: Best practices for templates description: Describes recommended approaches for authoring Azure Resource Manager templates (ARM templates). Offers suggestions to avoid common problems when using templates. ms.topic: conceptual -ms.date: 04/23/2021 +ms.date: 05/26/2022 --- # ARM template best practices @@ -10,13 +10,13 @@ This article shows you how to use recommended practices when constructing your A ## Template limits -Limit the size of your template to 4 MB. The 4-MB limit applies to the final state of the template after it has been expanded with iterative resource definitions, and values for variables and parameters. The parameter file is also limited to 4 MB. You may get an error with a template or parameter file of less than 4 MB, if the total size of the request is too large. For more information about how to simplify your template to avoid a large request, see [Resolve errors for job size exceeded](error-job-size-exceeded.md). +Limit the size of your template to 4 MB. The 4-MB limit applies to the final state of the template after it has been expanded with iterative resource definitions, and values for variables and parameters. The parameter file is also limited to 4 MB. You may get an error with a template or parameter file of less than 4 MB if the total size of the request is too large. For more information about how to simplify your template to avoid a large request, see [Resolve errors for job size exceeded](error-job-size-exceeded.md). You're also limited to: * 256 parameters * 256 variables -* 800 resources (including copy count) +* 800 resources (including [copy count](copy-resources.md)) * 64 output values * 24,576 characters in a template expression @@ -164,7 +164,7 @@ When deciding what [dependencies](./resource-dependency.md) to set, use the foll * Set a child resource as dependent on its parent resource. -* Resources with the [condition element](conditional-resource-deployment.md) set to false are automatically removed from the dependency order. Set the dependencies as if the resource is always deployed. +* Resources with the [condition element](conditional-resource-deployment.md) set to `false` are automatically removed from the dependency order. Set the dependencies as if the resource is always deployed. * Let dependencies cascade without setting them explicitly. For example, your virtual machine depends on a virtual network interface, and the virtual network interface depends on a virtual network and public IP addresses. Therefore, the virtual machine is deployed after all three resources, but don't explicitly set the virtual machine as dependent on all three resources. This approach clarifies the dependency order and makes it easier to change the template later. @@ -222,15 +222,14 @@ The following information can be helpful when you work with [resources](./syntax } ``` -* Assign public IP addresses to a virtual machine only when an application requires it. To connect to a virtual machine (VM) for debugging, or for management or administrative purposes, use inbound NAT rules, a virtual network gateway, or a jumpbox. +* Assign public IP addresses to a virtual machine only when an application requires it. To connect to a virtual machine for administrative purposes, use inbound NAT rules, a virtual network gateway, or a jumpbox. For more information about connecting to virtual machines, see: - * [Run VMs for an N-tier architecture in Azure](/azure/architecture/reference-architectures/n-tier/n-tier-sql-server) - * [Set up WinRM access for VMs in Azure Resource Manager](../../virtual-machines/windows/winrm.md) - * [Allow external access to your VM by using the Azure portal](../../virtual-machines/windows/nsg-quickstart-portal.md) - * [Allow external access to your VM by using PowerShell](../../virtual-machines/windows/nsg-quickstart-powershell.md) - * [Allow external access to your Linux VM by using Azure CLI](../../virtual-machines/linux/nsg-quickstart.md) + * [What is Azure Bastion?](../../bastion/bastion-overview.md) + * [How to connect and sign on to an Azure virtual machine running Windows](../../virtual-machines/windows/connect-logon.md) + * [Setting up WinRM access for Virtual Machines in Azure Resource Manager](../../virtual-machines/windows/winrm.md) + * [Connect to a Linux VM](../../virtual-machines/linux-vm-connect.md) * The `domainNameLabel` property for public IP addresses must be unique. The `domainNameLabel` value must be between 3 and 63 characters long, and follow the rules specified by this regular expression: `^[a-z][a-z0-9-]{1,61}[a-z0-9]$`. Because the `uniqueString` function generates a string that is 13 characters long, the `dnsPrefixString` parameter is limited to 50 characters. diff --git a/articles/azure-resource-manager/templates/deploy-what-if.md b/articles/azure-resource-manager/templates/deploy-what-if.md index ac7107e1dbfc6..9e84ddf56fd1e 100644 --- a/articles/azure-resource-manager/templates/deploy-what-if.md +++ b/articles/azure-resource-manager/templates/deploy-what-if.md @@ -390,6 +390,7 @@ You can use the what-if operation through the Azure SDKs. ## Next steps +- [ARM Deployment Insights](https://marketplace.visualstudio.com/items?itemName=AuthorityPartnersInc.arm-deployment-insights) extension provides an easy way to integrate the what-if operation in your Azure DevOps pipeline. - To use the what-if operation in a pipeline, see [Test ARM templates with What-If in a pipeline](https://4bes.nl/2021/03/06/test-arm-templates-with-what-if/). - If you notice incorrect results from the what-if operation, please report the issues at [https://aka.ms/whatifissues](https://aka.ms/whatifissues). - For a Microsoft Learn module that covers using what if, see [Preview changes and validate Azure resources by using what-if and the ARM template test toolkit](/learn/modules/arm-template-test/). diff --git a/articles/azure-resource-manager/templates/deployment-complete-mode-deletion.md b/articles/azure-resource-manager/templates/deployment-complete-mode-deletion.md index ab383f9ba34de..685df22515e01 100644 --- a/articles/azure-resource-manager/templates/deployment-complete-mode-deletion.md +++ b/articles/azure-resource-manager/templates/deployment-complete-mode-deletion.md @@ -2,7 +2,7 @@ title: Complete mode deletion description: Shows how resource types handle complete mode deletion in Azure Resource Manager templates. ms.topic: conceptual -ms.date: 04/20/2022 +ms.date: 06/03/2022 --- # Deletion of Azure resources for complete mode deployments @@ -248,6 +248,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | ------------- | ----------- | > | accounts | Yes | > | accounts / datapools | No | +> | workspaces | Yes | ## Microsoft.AutonomousSystems @@ -303,7 +304,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | DataControllers | Yes | > | DataControllers / ActiveDirectoryConnectors | No | > | PostgresInstances | Yes | -> | sqlManagedInstances | Yes | +> | SqlManagedInstances | Yes | > | SqlServerInstances | Yes | ## Microsoft.AzureCIS @@ -367,7 +368,10 @@ The resources are listed by resource provider namespace. To match a resource pro > | clusters | Yes | > | clusters / arcSettings | No | > | clusters / arcSettings / extensions | No | -> | galleryimages | Yes | +> | clusters / offers | No | +> | clusters / publishers | No | +> | clusters / publishers / offers | No | +> | galleryImages | Yes | > | networkinterfaces | Yes | > | virtualharddisks | Yes | > | virtualmachines | Yes | @@ -396,6 +400,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | ------------- | ----------- | > | batchAccounts | Yes | > | batchAccounts / certificates | No | +> | batchAccounts / detectors | No | > | batchAccounts / pools | No | ## Microsoft.Billing @@ -640,6 +645,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | profiles / endpoints / origins | No | > | profiles / origingroups | No | > | profiles / origingroups / origins | No | +> | profiles / policies | No | > | profiles / rulesets | No | > | profiles / rulesets / rules | No | > | profiles / secrets | No | @@ -673,9 +679,6 @@ The resources are listed by resource provider namespace. To match a resource pro > | ------------- | ----------- | > | artifactSetDefinitions | No | > | artifactSetSnapshots | No | -> | chaosExperiments | Yes | -> | chaosProviderConfigurations | No | -> | chaosTargets | No | > | experiments | Yes | > | targets | No | @@ -956,7 +959,9 @@ The resources are listed by resource provider namespace. To match a resource pro > [!div class="mx-tableFixed"] > | Resource type | Complete mode deletion | > | ------------- | ----------- | +> | containerGroupProfiles | Yes | > | containerGroups | Yes | +> | containerScaleSets | Yes | > | serviceAssociationLinks | No | ## Microsoft.ContainerRegistry @@ -1085,6 +1090,8 @@ The resources are listed by resource provider namespace. To match a resource pro > | Resource type | Complete mode deletion | > | ------------- | ----------- | > | grafana | Yes | +> | grafana / privateEndpointConnections | No | +> | grafana / privateLinkResources | No | ## Microsoft.DataBox @@ -1390,16 +1397,6 @@ The resources are listed by resource provider namespace. To match a resource pro > | labs / virtualMachines | Yes | > | schedules | Yes | -## Microsoft.Diagnostics - -> [!div class="mx-tableFixed"] -> | Resource type | Complete mode deletion | -> | ------------- | ----------- | -> | apollo | No | -> | azureKB | No | -> | insights | No | -> | solutions | No | - ## Microsoft.DigitalTwins > [!div class="mx-tableFixed"] @@ -1681,37 +1678,6 @@ The resources are listed by resource provider namespace. To match a resource pro > | ------------- | ----------- | > | jobs | Yes | -## Microsoft.IndustryDataLifecycle - -> [!div class="mx-tableFixed"] -> | Resource type | Complete mode deletion | -> | ------------- | ----------- | -> | baseModels | Yes | -> | baseModels / entities | No | -> | baseModels / relationships | No | -> | builtInModels | No | -> | builtInModels / entities | No | -> | builtInModels / relationships | No | -> | collaborativeInvitations | No | -> | custodianCollaboratives | Yes | -> | custodianCollaboratives / collaborativeImage | No | -> | custodianCollaboratives / dataModels | No | -> | custodianCollaboratives / dataModels / mergePipelines | No | -> | custodianCollaboratives / invitations | No | -> | custodianCollaboratives / invitations / termsOfUseDocuments | No | -> | custodianCollaboratives / receivedDataPackages | No | -> | custodianCollaboratives / termsOfUseDocuments | No | -> | dataConsumerCollaboratives | Yes | -> | dataproviders | No | -> | derivedModels | Yes | -> | derivedModels / entities | No | -> | derivedModels / relationships | No | -> | generateMappingTemplate | No | -> | memberCollaboratives | Yes | -> | memberCollaboratives / sharedDataPackages | No | -> | modelMappings | Yes | -> | pipelineSets | Yes | - ## microsoft.insights > [!div class="mx-tableFixed"] @@ -1863,6 +1829,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | Resource type | Complete mode deletion | > | ------------- | ----------- | > | extensions | No | +> | extensionTypes | No | > | fluxConfigurations | No | > | namespaces | No | > | privateLinkScopes | Yes | @@ -1957,9 +1924,11 @@ The resources are listed by resource provider namespace. To match a resource pro > | workspaces / components / versions | No | > | workspaces / computes | No | > | workspaces / data | No | +> | workspaces / data / versions | No | > | workspaces / datasets | No | > | workspaces / datastores | No | > | workspaces / environments | No | +> | workspaces / environments / versions | No | > | workspaces / eventGridFilters | No | > | workspaces / jobs | No | > | workspaces / labelingJobs | No | @@ -2039,10 +2008,14 @@ The resources are listed by resource provider namespace. To match a resource pro > | privateStoreClient | No | > | privateStores | No | > | privateStores / AdminRequestApprovals | No | +> | privateStores / anyExistingOffersInTheCollections | No | > | privateStores / billingAccounts | No | > | privateStores / bulkCollectionsAction | No | > | privateStores / collections | No | +> | privateStores / collections / approveAllItems | No | +> | privateStores / collections / disableApproveAllItems | No | > | privateStores / collections / offers | No | +> | privateStores / collections / offers / upsertOfferWithMultiContext | No | > | privateStores / collections / transferOffers | No | > | privateStores / collectionsToSubscriptionsMapping | No | > | privateStores / fetchAllSubscriptionsInTenant | No | @@ -2051,6 +2024,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | privateStores / queryApprovedPlans | No | > | privateStores / queryNotificationsState | No | > | privateStores / queryOffers | No | +> | privateStores / queryUserOffers | No | > | privateStores / RequestApprovals | No | > | privateStores / requestApprovals / query | No | > | privateStores / requestApprovals / withdrawPlan | No | @@ -2223,6 +2197,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | frontdoorWebApplicationFirewallPolicies | Yes | > | getDnsResourceReference | No | > | internalNotify | No | +> | internalPublicIpAddresses | No | > | ipGroups | Yes | > | loadBalancers | Yes | > | localNetworkGateways | Yes | @@ -2288,6 +2263,9 @@ The resources are listed by resource provider namespace. To match a resource pro > | bareMetalMachines | Yes | > | clusterManagers | Yes | > | clusters | Yes | +> | hybridAksClusters | Yes | +> | hybridAksManagementDomains | Yes | +> | hybridAksVirtualMachines | Yes | > | rackManifests | Yes | > | racks | Yes | > | virtualMachines | Yes | @@ -2299,6 +2277,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | Resource type | Complete mode deletion | > | ------------- | ----------- | > | azureTrafficCollectors | Yes | +> | azureTrafficCollectors / collectorPolicies | Yes | > | meshVpns | Yes | > | meshVpns / connectionPolicies | Yes | > | meshVpns / privateEndpointConnectionProxies | No | @@ -2391,11 +2370,12 @@ The resources are listed by resource provider namespace. To match a resource pro > [!div class="mx-tableFixed"] > | Resource type | Complete mode deletion | > | ------------- | ----------- | -> | playeraccountpools | Yes | +> | playerAccountPools | Yes | > | titles | Yes | > | titles / segments | No | -> | titles / titledatakeyvalues | No | -> | titles / titleinternaldatakeyvalues | No | +> | titles / titleDataSets | No | +> | titles / titleInternalDataKeyValues | No | +> | titles / titleInternalDataSets | No | ## Microsoft.PolicyInsights @@ -2601,12 +2581,12 @@ The resources are listed by resource provider namespace. To match a resource pro > [!div class="mx-tableFixed"] > | Resource type | Complete mode deletion | > | ------------- | ----------- | -> | availabilitysets | Yes | +> | AvailabilitySets | Yes | > | Clouds | Yes | > | VirtualMachines | Yes | > | VirtualMachineTemplates | Yes | > | VirtualNetworks | Yes | -> | vmmservers | Yes | +> | VMMServers | Yes | > | VMMServers / InventoryItems | No | ## Microsoft.Search @@ -2663,6 +2643,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | MdeOnboardings | No | > | policies | No | > | pricings | No | +> | query | No | > | regulatoryComplianceStandards | No | > | regulatoryComplianceStandards / regulatoryComplianceControls | No | > | regulatoryComplianceStandards / regulatoryComplianceControls / regulatoryComplianceAssessments | No | @@ -2684,6 +2665,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | subAssessments | No | > | tasks | No | > | topologies | No | +> | vmScanners | Yes | > | workspaceSettings | No | ## Microsoft.SecurityDetonation @@ -2712,6 +2694,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | automationRules | No | > | bookmarks | No | > | cases | No | +> | dataConnectorDefinitions | No | > | dataConnectors | No | > | enrichment | No | > | entities | No | @@ -2721,6 +2704,7 @@ The resources are listed by resource provider namespace. To match a resource pro > | metadata | No | > | MitreCoverageRecords | No | > | onboardingStates | No | +> | overview | No | > | securityMLAnalyticsSettings | No | > | settings | No | > | sourceControls | No | @@ -2944,7 +2928,9 @@ The resources are listed by resource provider namespace. To match a resource pro > | storageAccounts / queueServices | No | > | storageAccounts / services | No | > | storageAccounts / services / metricDefinitions | No | +> | storageAccounts / storageTaskAssignments | No | > | storageAccounts / tableServices | No | +> | storageTasks | Yes | > | usages | No | ## Microsoft.StorageCache @@ -3170,7 +3156,9 @@ The resources are listed by resource provider namespace. To match a resource pro > | sourceControls | No | > | staticSites | Yes | > | staticSites / builds | No | +> | staticSites / builds / linkedBackends | No | > | staticSites / builds / userProvidedFunctionApps | No | +> | staticSites / linkedBackends | No | > | staticSites / userProvidedFunctionApps | No | > | validate | No | > | verifyHostingEnvironmentVnet | No | diff --git a/articles/azure-resource-manager/templates/frequently-asked-questions.yml b/articles/azure-resource-manager/templates/frequently-asked-questions.yml index 9ea9f5505c46b..be9b1ca817f22 100644 --- a/articles/azure-resource-manager/templates/frequently-asked-questions.yml +++ b/articles/azure-resource-manager/templates/frequently-asked-questions.yml @@ -200,7 +200,7 @@ sections: Yes. For an explanation of how to use template and pipelines, see [Tutorial: Continuous integration of ARM templates with Azure Pipelines](deployment-tutorial-pipeline.md) and [Integrate ARM templates with Azure Pipelines](add-template-to-azure-pipelines.md). - question: | - Can I use GitHub actions to deploy a template? + Can I use GitHub Actions to deploy a template? answer: | Yes, see [Deploy ARM templates by using GitHub Actions](deploy-github-actions.md). diff --git a/articles/azure-resource-manager/templates/overview.md b/articles/azure-resource-manager/templates/overview.md index 09a59e7e96792..e05485f55f3db 100644 --- a/articles/azure-resource-manager/templates/overview.md +++ b/articles/azure-resource-manager/templates/overview.md @@ -2,7 +2,7 @@ title: Templates overview description: Describes the benefits using Azure Resource Manager templates (ARM templates) for deployment of resources. ms.topic: conceptual -ms.date: 12/01/2021 +ms.date: 05/26/2022 --- # What are ARM templates? diff --git a/articles/azure-resource-manager/templates/template-functions-resource.md b/articles/azure-resource-manager/templates/template-functions-resource.md index 7283e28700b83..ce9a3bc0c9224 100644 --- a/articles/azure-resource-manager/templates/template-functions-resource.md +++ b/articles/azure-resource-manager/templates/template-functions-resource.md @@ -135,7 +135,7 @@ The possible uses of `list*` are shown in the following table. | Microsoft.ApiManagement/service/namedValues | [listValue](/rest/api/apimanagement/current-ga/named-value/list-value) | | Microsoft.ApiManagement/service/openidConnectProviders | [listSecrets](/rest/api/apimanagement/current-ga/openid-connect-provider/list-secrets) | | Microsoft.ApiManagement/service/subscriptions | [listSecrets](/rest/api/apimanagement/current-ga/subscription/list-secrets) | -| Microsoft.AppConfiguration/configurationStores | [ListKeys](/rest/api/appconfiguration/configurationstores/listkeys) | +| Microsoft.AppConfiguration/configurationStores | [ListKeys](/rest/api/appconfiguration/stable/configuration-stores/list-keys) | | Microsoft.AppPlatform/Spring | [listTestKeys](/rest/api/azurespringapps/services/list-test-keys) | | Microsoft.Automation/automationAccounts | [listKeys](/rest/api/automation/keys/listbyautomationaccount) | | Microsoft.Batch/batchAccounts | [listKeys](/rest/api/batchmanagement/batchaccount/getkeys) | diff --git a/articles/azure-resource-manager/templates/template-spec-convert.md b/articles/azure-resource-manager/templates/template-spec-convert.md index a384410d44c81..1b51895147643 100644 --- a/articles/azure-resource-manager/templates/template-spec-convert.md +++ b/articles/azure-resource-manager/templates/template-spec-convert.md @@ -2,7 +2,7 @@ title: Convert portal template to template spec description: Describes how to convert an existing template in the Azure portal gallery to a template specs. ms.topic: conceptual -ms.date: 02/04/2021 +ms.date: 05/25/2022 ms.author: tomfitz author: tfitzmac --- @@ -12,6 +12,10 @@ The Azure portal provides a way to store Azure Resource Manager templates (ARM t To see if you have any templates to convert, view the [template gallery in the portal](https://portal.azure.com/#blade/HubsExtension/BrowseResourceBlade/resourceType/Microsoft.Gallery%2Fmyareas%2Fgalleryitems). These templates have the resource type `Microsoft.Gallery/myareas/galleryitems`. +## Deprecation of portal feature + +**The template gallery in the portal is being deprecated on March 31, 2025**. To continue using a template in the template gallery, you need to migrate it to a template spec. Use one of the methods shown in this article to migrate the template. + ## Convert with PowerShell script To simplify converting templates in the template gallery, use a PowerShell script from the Azure Quickstart Templates repo. When you run the script, you can either create a new template spec for each template or download a template that creates the template spec. The script doesn't delete the template from the template gallery. diff --git a/articles/azure-resource-manager/templates/toc.yml b/articles/azure-resource-manager/templates/toc.yml index 8dc46a1bcfd9a..7075b4066ae4b 100644 --- a/articles/azure-resource-manager/templates/toc.yml +++ b/articles/azure-resource-manager/templates/toc.yml @@ -111,8 +111,6 @@ href: ../../hdinsight/interactive-query/quickstart-resource-manager-template.md?toc=/azure/azure-resource-manager/templates/toc.json - name: HDInsight - Kafka href: ../../hdinsight/kafka/apache-kafka-quickstart-resource-manager-template.md?toc=/azure/azure-resource-manager/templates/toc.json - - name: HDInsight - ML Services - href: ../../hdinsight/r-server/quickstart-resource-manager-template.md?toc=/azure/azure-resource-manager/templates/toc.json - name: HDInsight - Spark href: ../../hdinsight/spark/apache-spark-jupyter-spark-sql.md?toc=/azure/azure-resource-manager/templates/toc.json - name: Stream Analytics diff --git a/articles/azure-signalr/concept-connection-string.md b/articles/azure-signalr/concept-connection-string.md index cae3ba14cc29a..d947f07a8d229 100644 --- a/articles/azure-signalr/concept-connection-string.md +++ b/articles/azure-signalr/concept-connection-string.md @@ -7,6 +7,7 @@ ms.topic: conceptual ms.date: 03/25/2022 ms.author: kenchen --- + # Connection string in Azure SignalR Service Connection string is an important concept that contains information about how to connect to SignalR service. In this article, you'll learn the basics of connection string and how to configure it in your application. @@ -15,54 +16,136 @@ Connection string is an important concept that contains information about how to When an application needs to connect to Azure SignalR Service, it will need the following information: -* The HTTP endpoint of the SignalR service instance -* How to authenticate with the service endpoint +- The HTTP endpoint of the SignalR service instance +- How to authenticate with the service endpoint + +Connection string contains such information. + +## What connection string looks like + +A connection string consists of a series of key/value pairs separated by semicolons(;) and we use an equal sign(=) to connect each key and its value. Keys aren't case sensitive. -Connection string contains such information. To see how a connection string looks like, you can open a SignalR service resource in Azure portal and go to "Keys" tab. You'll see two connection strings (primary and secondary) in the following format: +For example, a typical connection string may look like this: ``` Endpoint=https://.service.signalr.net;AccessKey=;Version=1.0; ``` -> [!NOTE] -> Besides portal, you can also use Azure CLI to get the connection string: -> -> ```bash -> az signalr key list -g -n -> ``` - You can see in the connection string, there are two main information: -* `Endpoint=https://.service.signalr.net` is the endpoint URL of the resource -* `AccessKey=` is the key to authenticate with the service. When access key is specified in connection string, SignalR service SDK will use it to generate a token that can be validated by the service. +- `Endpoint=https://.service.signalr.net` is the endpoint URL of the resource +- `AccessKey=` is the key to authenticate with the service. When access key is specified in connection string, SignalR service SDK will use it to generate a token that can be validated by the service. ->[!NOTE] -> For more information about how access tokens are generated and validated, see this [article](https://github.com/Azure/azure-signalr/blob/dev/docs/rest-api.md#authenticate-via-azure-signalr-service-accesskey). +The following table lists all the valid names for key/value pairs in the connection string. + +| key | Description | Required | Default value | Example value | +| -------------- | ----------------------------------------------------------------------------------------- | -------- | -------------------------------------- | --------------------------------------------- | +| Endpoint | The URI of your ASRS instance. | Y | N/A | https://foo.service.signalr.net | +| Port | The port that your ASRS instance is listening on. | N | 80/443, depends on endpoint uri schema | 8080 | +| Version | The version of given connection string. | N | 1.0 | 1.0 | +| ClientEndpoint | The URI of your reverse proxy, like App Gateway or API Management | N | null | https://foo.bar | +| AuthType | The auth type, we'll use AccessKey to authorize requests by default. **Case insensitive** | N | null | azure, azure.msi, azure.app | + +### Use AccessKey + +Local auth method will be used when `AuthType` is set to null. + +| key | Description | Required | Default value | Example value | +| --------- | ---------------------------------------------------------------- | -------- | ------------- | ---------------------------------------- | +| AccessKey | The key string in base64 format for building access token usage. | Y | null | ABCDEFGHIJKLMNOPQRSTUVWEXYZ0123456789+=/ | + +### Use Azure Active Directory + +Azure AD auth method will be used when `AuthType` is set to `azure`, `azure.app` or `azure.msi`. + +| key | Description | Required | Default value | Example value | +| -------------- | ------------------------------------------------------------------ | -------- | ------------- | ------------------------------------------ | +| ClientId | A guid represents an Azure application or an Azure identity. | N | null | `00000000-0000-0000-0000-000000000000` | +| TenantId | A guid represents an organization in Azure Active Directory. | N | null | `00000000-0000-0000-0000-000000000000` | +| ClientSecret | The password of an Azure application instance. | N | null | `***********************.****************` | +| ClientCertPath | The absolute path of a cert file to an Azure application instance. | N | null | `/usr/local/cert/app.cert` | + +Different `TokenCredential` will be used to generate Azure AD tokens with the respect of params you have given. + +- `type=azure` + + [DefaultAzureCredential](/dotnet/api/azure.identity.defaultazurecredential) will be used. + + ``` + Endpoint=xxx;AuthType=azure + ``` + +- `type=azure.msi` + + 1. User-assigned managed identity will be used if `clientId` has been given in connection string. + + ``` + Endpoint=xxx;AuthType=azure.msi;ClientId=00000000-0000-0000-0000-000000000000 + ``` + + - [ManagedIdentityCredential(clientId)](/dotnet/api/azure.identity.managedidentitycredential) will be used. + + 2. Otherwise system-assigned managed identity will be used. + + ``` + Endpoint=xxx;AuthType=azure.msi; + ``` + + - [ManagedIdentityCredential()](/dotnet/api/azure.identity.managedidentitycredential) will be used. + + +- `type=azure.app` + + `clientId` and `tenantId` are required to use [Azure AD application with service principal](/azure/active-directory/develop/howto-create-service-principal-portal). + + 1. [ClientSecretCredential(clientId, tenantId, clientSecret)](/dotnet/api/azure.identity.clientsecretcredential) will be used if `clientSecret` is given. + ``` + Endpoint=xxx;AuthType=azure.msi;ClientId=00000000-0000-0000-0000-000000000000;TenantId=00000000-0000-0000-0000-000000000000;clientScret=****** + ``` + + 2. [ClientCertificateCredential(clientId, tenantId, clientCertPath)](/dotnet/api/azure.identity.clientcertificatecredential) will be used if `clientCertPath` is given. + ``` + Endpoint=xxx;AuthType=azure.msi;ClientId=00000000-0000-0000-0000-000000000000;TenantId=00000000-0000-0000-0000-000000000000;clientCertPath=/path/to/cert + ``` + +## How to get my connection strings -## Other authentication types +### From Azure portal -Besides access key, SignalR service also supports other types of authentication methods in connection string. +Open your SignalR service resource in Azure portal and go to `Keys` tab. -### Azure Active Directory Application +You'll see two connection strings (primary and secondary) in the following format: + +> Endpoint=https://.service.signalr.net;AccessKey=;Version=1.0; + +### From Azure CLI + +You can also use Azure CLI to get the connection string: + +```bash +az signalr key list -g -n +``` + +### For using Azure AD application You can use [Azure AD application](../active-directory/develop/app-objects-and-service-principals.md) to connect to SignalR service. As long as the application has the right permission to access SignalR service, no access key is needed. -To use Azure AD authentication, you need to remove `AccessKey` from connection string and add `AuthType=aad`. You also need to specify the credentials of your Azure AD application, including client ID, client secret and tenant ID. The connection string will look as follows: +To use Azure AD authentication, you need to remove `AccessKey` from connection string and add `AuthType=azure.app`. You also need to specify the credentials of your Azure AD application, including client ID, client secret and tenant ID. The connection string will look as follows: ``` -Endpoint=https://.service.signalr.net;AuthType=aad;ClientId=;ClientSecret=;TenantId=;Version=1.0; +Endpoint=https://.service.signalr.net;AuthType=azure.app;ClientId=;ClientSecret=;TenantId=;Version=1.0; ``` For more information about how to authenticate using Azure AD application, see this [article](signalr-howto-authorize-application.md). -### Managed identity +### For using Managed identity You can also use [managed identity](../active-directory/managed-identities-azure-resources/overview.md) to authenticate with SignalR service. -There are two types of managed identities, to use system assigned identity, you just need to add `AuthType=aad` to the connection string: +There are two types of managed identities, to use system assigned identity, you just need to add `AuthType=azure.msi` to the connection string: ``` -Endpoint=https://.service.signalr.net;AuthType=aad;Version=1.0; +Endpoint=https://.service.signalr.net;AuthType=azure.msi;Version=1.0; ``` SignalR service SDK will automatically use the identity of your app server. @@ -70,7 +153,7 @@ SignalR service SDK will automatically use the identity of your app server. To use user assigned identity, you also need to specify the client ID of the managed identity: ``` -Endpoint=https://.service.signalr.net;AuthType=aad;ClientId=;Version=1.0; +Endpoint=https://.service.signalr.net;AuthType=azure.msi;ClientId=;Version=1.0; ``` For more information about how to configure managed identity, see this [article](signalr-howto-authorize-managed-identity.md). @@ -78,11 +161,29 @@ For more information about how to configure managed identity, see this [article] > [!NOTE] > It's highly recommended to use Azure AD to authenticate with SignalR service as it's a more secure way comparing to using access key. If you don't use access key authentication at all, consider to completely disable it (go to Azure portal -> Keys -> Access Key -> Disable). If you still use access key, it's highly recommended to rotate them regularly (more information can be found [here](signalr-howto-key-rotation.md)). +### Use connection string generator + +It may be cumbersome and error-prone to build connection strings manually. + +To avoid making mistakes, we built a tool to help you generate connection string with Azure AD identities like `clientId`, `tenantId`, etc. + +To use connection string generator, open your SignalR resource in Azure portal, go to `Connection strings` tab: + +:::image type="content" source="media/concept-connection-string/generator.png" alt-text="Screenshot showing connection string generator of SignalR service in Azure portal."::: + +In this page you can choose different authentication types (access key, managed identity or Azure AD application) and input information like client endpoint, client ID, client secret, etc. Then connection string will be automatically generated. You can copy and use it in your application. + +> [!NOTE] +> Everything you input on this page won't be saved after you leave the page (since they're only client side information), so please copy and save it in a secure place for your application to use. + +> [!NOTE] +> For more information about how access tokens are generated and validated, see this [article](https://github.com/Azure/azure-signalr/blob/dev/docs/rest-api.md#authenticate-via-azure-signalr-service-accesskey). + ## Client and server endpoints Connection string contains the HTTP endpoint for app server to connect to SignalR service. This is also the endpoint server will return to clients in negotiate response, so client can also connect to the service. -But in some applications there may be an additional component in front of SignalR service and all client connections need to go through that component first (to gain additional benefits like network security, [Azure Application Gateway](../application-gateway/overview.md) is a common service that provides such functionality). +But in some applications there may be an extra component in front of SignalR service and all client connections need to go through that component first (to gain extra benefits like network security, [Azure Application Gateway](../application-gateway/overview.md) is a common service that provides such functionality). In such case, the client will need to connect to an endpoint different than SignalR service. Instead of manually replace the endpoint at client side, you can add `ClientEndpoint` to connecting string: @@ -101,19 +202,6 @@ Similarly, when server wants to make [server connections](signalr-concept-intern Endpoint=https://.service.signalr.net;AccessKey=;ServerEndpoint=https://;Version=1.0; ``` -## Use connection string generator - -It may be cumbersome and error-prone to compose connection string manually. In Azure portal, there is a tool to help you generate connection string with additional information like client endpoint and auth type. - -To use connection string generator, open the SignalR resource in Azure portal, go to "Connection strings" tab: - -:::image type="content" source="media/concept-connection-string/generator.png" alt-text="Screenshot showing connection string generator of SignalR service in Azure portal."::: - -In this page you can choose different authentication types (access key, managed identity or Azure AD application) and input information like client endpoint, client ID, client secret, etc. Then connection string will be automatically generated. You can copy and use it in your application. - -> [!NOTE] -> Everything you input in this page won't be saved after you leave the page (since they're only client side information), so please copy and save it in a secure place for your application to use. - ## Configure connection string in your application There are two ways to configure connection string in your application. @@ -126,10 +214,10 @@ services.AddSignalR().AddAzureSignalR(""); Or you can call `AddAzureSignalR()` without any arguments, then service SDK will read the connection string from a config named `Azure:SignalR:ConnectionString` in your [config providers](/dotnet/core/extensions/configuration-providers). -In a local development environment, the config is usually stored in file (appsettings.json or secrets.json) or environment variables, so you can use one of the following ways to configure connection string: +In a local development environment, the config is stored in file (appsettings.json or secrets.json) or environment variables, so you can use one of the following ways to configure connection string: -* Use .NET secret manager (`dotnet user-secrets set Azure:SignalR:ConnectionString ""`) -* Set connection string to environment variable named `Azure__SignalR__ConnectionString` (colon needs to replaced with double underscore in [environment variable config provider](/dotnet/core/extensions/configuration-providers#environment-variable-configuration-provider)). +- Use .NET secret manager (`dotnet user-secrets set Azure:SignalR:ConnectionString ""`) +- Set connection string to environment variable named `Azure__SignalR__ConnectionString` (colon needs to replaced with double underscore in [environment variable config provider](/dotnet/core/extensions/configuration-providers#environment-variable-configuration-provider)). In production environment, you can use other Azure services to manage config/secrets like Azure [Key Vault](../key-vault/general/overview.md) and [App Configuration](../azure-app-configuration/overview.md). See their documentation to learn how to set up config provider for those services. @@ -138,38 +226,38 @@ In production environment, you can use other Azure services to manage config/sec ### Configure multiple connection strings -Azure SignalR Service also allows server to connect to multiple service endpoints at the same time, so it can handle more connections which are beyond one service instance's limit. Also if one service instance is down, other service instances can be used as backup. For more information about how to use multiple instances, see this [article](signalr-howto-scale-multi-instances.md). +Azure SignalR Service also allows server to connect to multiple service endpoints at the same time, so it can handle more connections, which are beyond one service instance's limit. Also if one service instance is down, other service instances can be used as backup. For more information about how to use multiple instances, see this [article](signalr-howto-scale-multi-instances.md). There are also two ways to configure multiple instances: -* Through code +- Through code - ```cs - services.AddSignalR().AddAzureSignalR(options => - { - options.Endpoints = new ServiceEndpoint[] - { - new ServiceEndpoint("", name: "name_a"), - new ServiceEndpoint("", name: "name_b", type: EndpointType.Primary), - new ServiceEndpoint("", name: "name_c", type: EndpointType.Secondary), - }; - }); - ``` + ```cs + services.AddSignalR().AddAzureSignalR(options => + { + options.Endpoints = new ServiceEndpoint[] + { + new ServiceEndpoint("", name: "name_a"), + new ServiceEndpoint("", name: "name_b", type: EndpointType.Primary), + new ServiceEndpoint("", name: "name_c", type: EndpointType.Secondary), + }; + }); + ``` - You can assign a name and type to each service endpoint so you can distinguish them later. + You can assign a name and type to each service endpoint so you can distinguish them later. -* Through config +- Through config - You can use any supported config provider (secret manager, environment variables, key vault, etc.) to store connection strings. Take secret manager as an example: + You can use any supported config provider (secret manager, environment variables, key vault, etc.) to store connection strings. Take secret manager as an example: - ```bash - dotnet user-secrets set Azure:SignalR:ConnectionString:name_a - dotnet user-secrets set Azure:SignalR:ConnectionString:name_b:primary - dotnet user-secrets set Azure:SignalR:ConnectionString:name_c:secondary - ``` + ```bash + dotnet user-secrets set Azure:SignalR:ConnectionString:name_a + dotnet user-secrets set Azure:SignalR:ConnectionString:name_b:primary + dotnet user-secrets set Azure:SignalR:ConnectionString:name_c:secondary + ``` - You can also assign name and type to each endpoint, by using a different config name in the following format: + You can also assign name and type to each endpoint, by using a different config name in the following format: - ``` - Azure:SignalR:ConnectionString:: - ``` \ No newline at end of file + ``` + Azure:SignalR:ConnectionString:: + ``` diff --git a/articles/azure-signalr/index.yml b/articles/azure-signalr/index.yml index f217820c35df0..6e7343a2e2a6b 100644 --- a/articles/azure-signalr/index.yml +++ b/articles/azure-signalr/index.yml @@ -10,7 +10,7 @@ metadata: ms.topic: landing-page author: vicancy ms.author: lianwei - ms.date: 03/11/2020 + ms.date: 05/22/2022 # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new @@ -22,6 +22,16 @@ landingContent: links: - text: What is Azure SignalR Service? url: signalr-overview.md + - linkListType: concept + links: + - text: Service mode + url: concept-service-mode.md + - text: Messages and connections + url: signalr-concept-messages-and-connections.md + - text: Service internals + url: signalr-concept-internals.md + - text: Azure SignalR service FAQ + url: signalr-resource-faq.yml # Card - title: Get started @@ -40,10 +50,12 @@ landingContent: url: signalr-quickstart-azure-functions-java.md - text: Azure Functions - Python url: signalr-quickstart-azure-functions-python.md + - text: Broadcast real-time messages from console app + url: signalr-quickstart-rest-api.md + - linkListType: deploy + links: - text: Azure SignalR Service deployment - ARM template url: signalr-quickstart-azure-signalr-service-arm-template.md - - text: REST API - url: signalr-quickstart-rest-api.md # Card - title: Build Azure SignalR Service apps @@ -54,9 +66,85 @@ landingContent: url: signalr-concept-authenticate-oauth.md - text: Build a Serverless Real-time App with Authentication url: signalr-tutorial-authenticate-azure-functions.md + - text: Build a Blazor server chat app + url: signalr-tutorial-build-blazor-server-chat-app.md - linkListType: sample links: - text: Azure SignalR Code Samples url: https://github.com/aspnet/AzureSignalR-samples + + # Card + - title: High availability + linkLists: + - linkListType: concept + links: + - text: Performance consideration + url: signalr-concept-performance.md + - linkListType: how-to-guide + links: + - text: Resiliency and disaster recovery + url: signalr-concept-disaster-recovery.md + - text: Autoscale + url: signalr-howto-scale-autoscale.md + - text: Server graceful shutdown + url: server-graceful-shutdown.md + - text: Availability Zones + url: availability-zones.md + + # Card + - title: Serverless + linkLists: + - linkListType: concept + links: + - text: Serverless scenarios + url: signalr-concept-azure-functions.md + - linkListType: how-to-guide + links: + - text: Develop and configure with Azure Functions + url: signalr-concept-serverless-development-config.md + - text: Upstream settings + url: concept-upstream.md + - linkListType: reference + links: + - text: Azure Functions bindings + url: https://docs.microsoft.com/azure/azure-functions/functions-bindings-signalr-service + + # Card + - title: Common tasks + linkLists: + - linkListType: concept + links: + - text: Metrics + url: concept-metrics.md + - linkListType: how-to-guide + links: + - text: Use private endpoints + url: howto-private-endpoints.md + - text: Manage network acess + url: howto-network-access-control.md + - text: Custom domain + url: howto-custom-domain.md + - text: Managed identites + url: howto-use-managed-identity.md + - text: Availability Zones + url: availability-zones.md + + # Card + - title: More resources + linkLists: + - linkListType: concept + links: + - text: Choose between Azure SignalR service and Azure Web PubSub service + url: signalr-resource-faq.yml#how-do-i-choose-between-azure-signalr-service-and-azure-web-pubsub-service- + - linkListType: how-to-guide + links: + - text: Troubleshooting guide + url: signalr-howto-troubleshoot-guide.md + - linkListType: reference + links: + - text: REST API + url: https://docs.microsoft.com/rest/api/signalr/ - text: Azure CLI - url: signalr-reference-cli.md \ No newline at end of file + url: https://docs.microsoft.com/cli/azure/signalr?view=azure-cli-latest + - text: ASP.NET Core SignalR + url: https://docs.microsoft.com/aspnet/core/signalr/introduction diff --git a/articles/azure-signalr/media/signalr-concept-performance/server-load.png b/articles/azure-signalr/media/signalr-concept-performance/server-load.png new file mode 100644 index 0000000000000..7baacf9d37c0f Binary files /dev/null and b/articles/azure-signalr/media/signalr-concept-performance/server-load.png differ diff --git a/articles/azure-signalr/signalr-concept-performance.md b/articles/azure-signalr/signalr-concept-performance.md index 3328037c3eece..3a2eb072137da 100644 --- a/articles/azure-signalr/signalr-concept-performance.md +++ b/articles/azure-signalr/signalr-concept-performance.md @@ -13,6 +13,19 @@ One of the key benefits of using Azure SignalR Service is the ease of scaling Si In this guide, we'll introduce the factors that affect SignalR application performance. We'll describe typical performance in different use-case scenarios. In the end, we'll introduce the environment and tools that you can use to generate a performance report. +## Quick evaluation using metrics + Before going through the factors that impact the performance, let's first introduce an easy way to monitor the pressure of your service. There's a metrics called **Server Load** on the Portal. + + ![Screenshot of the Server Load metric of Azure SignalR on Portal. The metrics shows Server Load is at about 8 percent usage. ](./media/signalr-concept-performance/server-load.png "Server Load") + + + It shows the computing pressure of your SignalR service. You could test on your own scenario and check this metrics to decide whether to scale up. The latency inside SignalR service would remain low if the Server Load is below 70%. + +> [!NOTE] +> If you are using unit 50 or unit 100 **and** your scenario is mainly sending to small groups (group size <100) or single connection, you need to check [sending to small group](#small-group) or [sending to connection](#send-to-connection) for reference. In those scenarios there is large routing cost which is not included in the Server Load. + + Below are detailed concepts for evaluating performance. + ## Term definitions *Inbound*: The incoming message to Azure SignalR Service. diff --git a/articles/azure-signalr/signalr-howto-diagnostic-logs.md b/articles/azure-signalr/signalr-howto-diagnostic-logs.md index b1c322d46a11b..efee03781e623 100644 --- a/articles/azure-signalr/signalr-howto-diagnostic-logs.md +++ b/articles/azure-signalr/signalr-howto-diagnostic-logs.md @@ -213,9 +213,9 @@ If you find that you can't establish SignalR client connections to Azure SignalR When encountering message related problem, you can take advantage of messaging logs to troubleshoot. Firstly, [enable resource logs](#enable-resource-logs) in service, logs for server and client. > [!NOTE] -> For ASP.NET Core, see [here](https://docs.microsoft.com/aspnet/core/signalr/diagnostics) to enable logging in server and client. +> For ASP.NET Core, see [here](/aspnet/core/signalr/diagnostics) to enable logging in server and client. > -> For ASP.NET, see [here](https://docs.microsoft.com/aspnet/signalr/overview/testing-and-debugging/enabling-signalr-tracing) to enable logging in server and client. +> For ASP.NET, see [here](/aspnet/signalr/overview/testing-and-debugging/enabling-signalr-tracing) to enable logging in server and client. If you don't mind potential performance impact and no client-to-server direction message, check the `Messaging` in `Log Source Settings/Types` to enable *collect-all* log collecting behavior. For more information about this behavior, see [collect all section](#collect-all). @@ -239,7 +239,7 @@ For **collect all** collecting behavior: SignalR service only trace messages in direction **from server to client via SignalR service**. The tracing ID will be generated in server, the message will carry the tracing ID to SignalR service. > [!NOTE] -> If you want to trace message and [send messages from outside a hub](https://docs.microsoft.com/aspnet/core/signalr/hubcontext) in your app server, you need to enable **collect all** collecting behavior to collect message logs for the messages which are not originated from diagnostic clients. +> If you want to trace message and [send messages from outside a hub](/aspnet/core/signalr/hubcontext) in your app server, you need to enable **collect all** collecting behavior to collect message logs for the messages which are not originated from diagnostic clients. > Diagnostic clients works for both **collect all** and **collect partially** collecting behaviors. It has higher priority to collect logs. For more information, see [diagnostic client section](#diagnostic-client). By checking the sign in server and service side, you can easily find out whether the message is sent from server, arrives at SignalR service, and leaves from SignalR service. Basically, by checking if the *received* and *sent* message are matched or not based on message tracing ID, you can tell whether the message loss issue is in server or SignalR service in this direction. For more information, see the [details](#message-flow-detail-for-path3) below. diff --git a/articles/azure-signalr/signalr-howto-move-across-regions.md b/articles/azure-signalr/signalr-howto-move-across-regions.md index 18c6f414323e7..1b98c008cdd4e 100644 --- a/articles/azure-signalr/signalr-howto-move-across-regions.md +++ b/articles/azure-signalr/signalr-howto-move-across-regions.md @@ -1,47 +1,50 @@ --- -title: Move an Azure SignalR resource to another region | Microsoft Docs -description: Shows you how to move an Azure SignalR resource to another region. +title: Move an Azure SignalR resource to another region +description: Learn how to use an Azure Resource Manager template to export the configuration of an Azure SignalR resource to a different Azure region. author: vicancy ms.service: signalr ms.topic: how-to -ms.date: 12/22/2021 +ms.date: 05/23/2022 ms.author: lianwei -ms.custom: subject-moving-resources +ms.custom: +- subject-moving-resources +- kr2b-contr-experiment --- # Move an Azure SignalR resource to another region -There are various scenarios in which you'd want to move your existing SignalR resource from one region to another. **Azure SignalR resource are region specific and can't be moved from one region to another.** You can however, use an Azure Resource Manager template to export the existing configuration of an Azure SignalR resource, modify the parameters to match the destination region, and then create a copy of your SignalR resource in another region. For more information on Resource Manager and templates, see [Quickstart: Create and deploy Azure Resource Manager templates by using the Azure portal](../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md). +Azure SignalR resources are region specific and can't be moved from one region to another. There are, however, scenarios where you might want to move your existing SignalR resource to another region. -## Prerequisites - -- Ensure that the service and features that your are using are supported in the target region. +You can use an Azure Resource Manager template to export the existing configuration of an Azure SignalR resource, modify the parameters to match the destination region, and then create a copy of your SignalR resource in another region. For more information on Resource Manager and templates, see [Quickstart: Create and deploy Azure Resource Manager templates by using the Azure portal](../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md). -- Verify that your Azure subscription allows you to create SignalR resource in the target region that's used. Contact support to enable the required quota. +## Prerequisites +- Ensure that the service and features that you're using are supported in the target region. +- Verify that your Azure subscription allows you to create SignalR resource in the target region that's used. +- Contact support to enable the required quota. - For preview features, ensure that your subscription is allowlisted for the target region. -## Prepare and move +## Prepare and move your SignalR resource To get started, export, and then modify a Resource Manager template. -### Export the template and deploy from the Portal +### Export the template and deploy from the Azure portal The following steps show how to prepare the SignalR resource move using a Resource Manager template, and move it to the target region using the portal. -1. Sign in to the [Azure portal](https://portal.azure.com) > **Resource Groups**. +1. Sign in to the [Azure portal](https://portal.azure.com). -2. Locate the Resource Group that contains the source SignalR resource and click on it. +1. Select **Resource Groups**. Locate the resource group that contains the source SignalR resource and select it. -3. Select > **Automation** > **Export template**. +1. Under **Automation**, select **Export template**. -4. Choose **Deploy** in the **Export template** blade. +1. Select **Deploy**. -5. Click **TEMPLATE** > **Edit parameters** to open the **parameters.json** file in the online editor. +1. Select **TEMPLATE** > **Edit parameters** to open the *parameters.json* file in the online editor. -6. To edit the parameter of the SignalR resource name, change the **value** property under **parameters**: +1. To edit the parameter of the SignalR resource name, change the `value` property under `parameters`: ```json { @@ -55,13 +58,13 @@ The following steps show how to prepare the SignalR resource move using a Resour } ``` -7. Change the value in the editor to a name of your choice for the target SignalR resource. Ensure you enclose the name in quotes. +1. Change the value in the editor to a name of your choice for the target SignalR resource. Ensure you enclose the name in quotes. -8. Click **Save** in the editor. +1. Select **Save** in the editor. -9. Click **TEMPLATE** > **Edit template** to open the **template.json** file in the online editor. +1. Select **TEMPLATE** > **Edit template** to open the *template.json* file in the online editor. -10. To edit the target region, change the **location** property under **resources** in the online editor: +1. To edit the target region, change the `location` property under `resources` in the online editor: ```json "resources": [ @@ -77,20 +80,19 @@ The following steps show how to prepare the SignalR resource move using a Resour ``` -11. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. - -12. You can also change other parameters in the template if you choose, and are optional depending on your requirements. +1. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. -13. Click **Save** in the online editor. +1. You can also change other parameters in the template if you choose, and are optional depending on your requirements. -14. Click **BASICS** > **Subscription** to choose the subscription where the target resource will be deployed. +1. Select **Save** in the online editor. -15. Click **BASICS** > **Resource group** to choose the resource group where the target resource will be deployed. You can click **Create new** to create a new resource group for the target resource. Ensure the name isn't the same as the source resource group of the existing resource. +1. Select **BASICS** > **Subscription** to choose the subscription where the target resource will be deployed. -16. Verify **BASICS** > **Location** is set to the target location where you wish for the resource to be deployed. +1. Select **BASICS** > **Resource group** to choose the resource group where the target resource will be deployed. You can select **Create new** to create a new resource group for the target resource. Ensure the name isn't the same as the source resource group of the existing resource. -17. Click the **Review + create** button to deploy the target Azure SignalR resource. +1. Verify **BASICS** > **Location** is set to the target location where you wish for the resource to be deployed. +1. Select **Review + create** to deploy the target Azure SignalR resource. ### Export the template and deploy using Azure PowerShell @@ -102,14 +104,14 @@ To export a template by using PowerShell: Connect-AzAccount ``` -2. If your identity is associated with more than one subscription, then set your active subscription to subscription of the SignalR resource that you want to move. +1. If your identity is associated with more than one subscription, then set your active subscription to subscription of the SignalR resource that you want to move. ```azurepowershell-interactive $context = Get-AzSubscription -SubscriptionId Set-AzContext $context ``` -3. Export the template of your source SignalR resource. These commands save a json template to your current directory. +1. Export the template of your source SignalR resource. These commands save a JSON template to your current directory. ```azurepowershell-interactive $resource = Get-AzResource ` @@ -122,14 +124,14 @@ To export a template by using PowerShell: -IncludeParameterDefaultValue ``` -4. The file downloaded will be named after the resource group the resource was exported from. Locate the file that was exported from the command named **\.json** and open it in an editor of your choice: - +1. The file downloaded will be named after the resource group the resource was exported from. Locate the file that was exported from the command named *\.json* and open it in an editor of your choice: + ```azurepowershell notepad .json ``` -5. To edit the parameter of the SignalR resource name, change the property **defaultValue** of the source SignalR resource name to the name of your target SignalR resource, ensure the name is in quotes: - +1. To edit the parameter of the SignalR resource name, change the property `defaultValue` of the source SignalR resource name to the name of your target SignalR resource. Ensure the name is in quotes: + ```json { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", @@ -143,7 +145,7 @@ To export a template by using PowerShell: } ``` -6. To edit the target region where the SignalR resource will be moved, change the **location** property under resources: +1. To edit the target region where the SignalR resource will be moved, change the `location` property under `resources`: ```json "resources": [ @@ -158,49 +160,46 @@ To export a template by using PowerShell: ] ``` -7. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. +1. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. + + You can also change other parameters in the template if you choose, depending on your requirements. -8. You can also change other parameters in the template if you choose, and are optional depending on your requirements. +1. Save the *\.json* file. -9. Save the **\.json** file. +1. Create a resource group in the target region for the target SignalR resource to be deployed using [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). -10. Create a resource group in the target region for the target SignalR resource to be deployed using [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). - ```azurepowershell-interactive New-AzResourceGroup -Name -location ``` -11. Deploy the edited **\.json** file to the resource group created in the previous step using [New-AzResourceGroupDeployment](/powershell/module/az.resources/new-azresourcegroupdeployment): +1. Deploy the edited *\.json* file to the resource group created in the previous step using [New-AzResourceGroupDeployment](/powershell/module/az.resources/new-azresourcegroupdeployment): ```azurepowershell-interactive New-AzResourceGroupDeployment -ResourceGroupName -TemplateFile .json ``` -12. To verify the resources were created in the target region, use [Get-AzResourceGroup](/powershell/module/az.resources/get-azresourcegroup) and [Get-AzSignalR](/powershell/module/az.signalr/get-azsignalr): - - ```azurepowershell-interactive - Get-AzResourceGroup -Name - ``` +1. To verify that the resources were created in the target region, use [Get-AzResourceGroup](/powershell/module/az.resources/get-azresourcegroup) and [Get-AzSignalR](/powershell/module/az.signalr/get-azsignalr): ```azurepowershell-interactive + Get-AzResourceGroup -Name Get-AzSignalR -Name -ResourceGroupName ``` -## Discard - -After the deployment, if you wish to start over or discard the SignalR resource in the target, delete the resource group that was created in the target and the moved SignalR resource will be deleted. To do so, select the resource group from your dashboard in the portal and select **Delete** at the top of the overview page. Alternatively you can use [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup): - -```azurepowershell-interactive -Remove-AzResourceGroup -Name -``` +> [!NOTE] +> +> After the deployment, if you wish to start over or discard the SignalR resource in the target, delete the resource group that was created in the target, which deletes the moved SignalR resource. To do so, select the resource group from your dashboard in the portal and select **Delete** at the top of the overview page. Alternatively you can use [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup): +> +> ```azurepowershell-interactive +> Remove-AzResourceGroup -Name +> ``` -## Clean up +## Clean up source region To commit the changes and complete the move of the SignalR resource, delete the source SignalR resource or resource group. To do so, select the SignalR resource or resource group from your dashboard in the portal and select **Delete** at the top of each page. ## Next steps -In this tutorial, you moved an Azure SignalR resource from one region to another and cleaned up the source resources. To learn more about moving resources between regions and disaster recovery in Azure, refer to: +In this tutorial, you moved an Azure SignalR resource from one region to another and cleaned up the source resources. To learn more about moving resources between regions and disaster recovery in Azure, see: - [Move resources to a new resource group or subscription](../azure-resource-manager/management/move-resource-group-and-subscription.md) - [Move Azure VMs to another region](../site-recovery/azure-to-azure-tutorial-migrate.md) diff --git a/articles/azure-signalr/signalr-howto-scale-multi-instances.md b/articles/azure-signalr/signalr-howto-scale-multi-instances.md index 9cfe53b578283..a492295f0d0f0 100644 --- a/articles/azure-signalr/signalr-howto-scale-multi-instances.md +++ b/articles/azure-signalr/signalr-howto-scale-multi-instances.md @@ -234,7 +234,7 @@ private class CustomRouter : EndpointRouterDecorator ## Dynamic Scale ServiceEndpoints -From SDK version 1.5.0, we're enabling dynamic scale ServiceEndpoints for ASP.NET Core version first. So you don't have to restart app server when you need to add/remove a ServiceEndpoint. As ASP.NET Core is supporting default configuration like `appsettings.json` with `reloadOnChange: true`, you don't need to change a code and it's supported by nature. And if you'd like to add some customized configuration and work with hot-reload, please refer to [this](/aspnet/core/fundamentals/configuration/?view=aspnetcore-3.1). +From SDK version 1.5.0, we're enabling dynamic scale ServiceEndpoints for ASP.NET Core version first. So you don't have to restart app server when you need to add/remove a ServiceEndpoint. As ASP.NET Core is supporting default configuration like `appsettings.json` with `reloadOnChange: true`, you don't need to change a code and it's supported by nature. And if you'd like to add some customized configuration and work with hot-reload, please refer to [this](/aspnet/core/fundamentals/configuration/?view=aspnetcore-3.1&preserve-view=true). > [!NOTE] > diff --git a/articles/azure-signalr/signalr-quickstart-dotnet-core.md b/articles/azure-signalr/signalr-quickstart-dotnet-core.md index 1f76bf8531fd6..357c028b4682d 100644 --- a/articles/azure-signalr/signalr-quickstart-dotnet-core.md +++ b/articles/azure-signalr/signalr-quickstart-dotnet-core.md @@ -22,6 +22,14 @@ The code for this tutorial is available for download in the [AzureSignalR-sample [!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note-dotnet.md)] +Ready to start? + +> [!div class="nextstepaction"] +> [Step by step build](#prerequisites) + +> [!div class="nextstepaction"] +> [Try chat demo now](https://asrs-simplechat-live-demo.azurewebsites.net/) + ## Prerequisites * Install the [.NET Core SDK](https://dotnet.microsoft.com/download). diff --git a/articles/azure-signalr/signalr-resource-faq.yml b/articles/azure-signalr/signalr-resource-faq.yml index bed3f3d7d68bc..a66c8d059c3c0 100644 --- a/articles/azure-signalr/signalr-resource-faq.yml +++ b/articles/azure-signalr/signalr-resource-faq.yml @@ -95,3 +95,20 @@ sections: Where does my data reside? answer: | Azure SignalR Service works as a data processor service. It won't store any customer content, and data residency is included by design. If you use Azure SignalR Service together with other Azure services, like Azure Storage for diagnostics, see [this white paper](https://azure.microsoft.com/resources/achieving-compliant-data-residency-and-security-with-azure/) for guidance about how to keep data residency in Azure regions. + + - question: | + How do I choose between Azure SignalR Service and Azure Web PubSub service? + answer: | + Both [Azure SignalR Service](https://azure.microsoft.com/services/signalr-service) and [Azure Web PubSub service](https://azure.microsoft.com/services/web-pubsub) help customers build real-time web applications easily with large scale and high availability and enable customers to focus on their business logic instead of managing the messaging infrastructure. In general, you may choose Azure SignalR Service if you already use SignalR library to build real-time applications. Instead, if you're looking for a generic solution to build real-time application based on WebSocket and publish-subscribe pattern, you may choose Azure Web PubSub service. The Azure Web PubSub service is not a replacement for Azure SignalR Service and vice-versa; they target different scenarios. The following guidance will help you decide which service to use for your scenario. + + Azure SignalR Service is more suitable if: + + - You're already using ASP.NET or ASP.NET Core SignalR, primarily using .NET or need to integrate with .NET ecosystem (like Blazor). + - There's a SignalR client available for your platform. + - You need an established protocol that supports a wide variety of calling patterns (RPC and streaming), transports (WebSocket, server sent events, and long polling) and with a client that manages the connection lifetime on your behalf. + + Azure Web PubSub service is more suitable for situations where: + + - You need to build real-time applications based on WebSocket technology or publish-subscribe over WebSocket. + - You want to build your own subprotocol or use existing advanced protocols over WebSocket (for example, MQTT, AMQP over WebSocket). + - You're looking for a lightweight server, for example, sending messages to client without going through the configured backend. diff --git a/articles/azure-signalr/signalr-tutorial-build-blazor-server-chat-app.md b/articles/azure-signalr/signalr-tutorial-build-blazor-server-chat-app.md index e9d6f51abf725..5b39b28249a64 100644 --- a/articles/azure-signalr/signalr-tutorial-build-blazor-server-chat-app.md +++ b/articles/azure-signalr/signalr-tutorial-build-blazor-server-chat-app.md @@ -4,7 +4,7 @@ description: In this tutorial, you learn how to build and modify a Blazor Server author: vicancy ms.service: signalr ms.topic: tutorial -ms.date: 09/09/2020 +ms.date: 05/22/2022 ms.author: lianwei ms.devlang: csharp --- @@ -19,6 +19,14 @@ This tutorial shows you how to build and modify a Blazor Server app. You'll lear > * Quick-deploy to Azure App Service in Visual Studio. > * Migrate from local SignalR to Azure SignalR Service. +Ready to start? + +> [!div class="nextstepaction"] +> [Step by step build](#prerequisites) + +> [!div class="nextstepaction"] +> [Try Blazor demo now](https://asrs-blazorchat-live-demo.azurewebsites.net/chatroom) + ## Prerequisites * Install [.NET Core 3.0 SDK](https://dotnet.microsoft.com/download/dotnet-core/3.0) (Version >= 3.0.100) diff --git a/articles/azure-video-analyzer/video-analyzer-docs/cloud/faq.yml b/articles/azure-video-analyzer/video-analyzer-docs/cloud/faq.yml index 63907437b7133..5caa023a42cf6 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/cloud/faq.yml +++ b/articles/azure-video-analyzer/video-analyzer-docs/cloud/faq.yml @@ -90,7 +90,7 @@ sections: answer: | **How is Azure Video Analyzer billed?** - For billing details, see [Video Analyzer pricing](https://azure.microsoft.com/pricing/details/video-analyzer/). + For billing details, see [Video Analyzer pricing](/azure/azure-video-analyzer/video-analyzer-docs/). additionalContent: | diff --git a/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md b/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md index f1b55a2d27d92..0dc81aa2be2f7 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md +++ b/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md @@ -93,7 +93,7 @@ The following depicts the overall flow of the document and in 5 simple steps you ## Next steps * Try motion detection along with recording relevant videos in the Cloud. Follow the steps from the [detect motion and record video clips](detect-motion-record-video-edge-devices.md) quickstart. -* Use our [VS Code extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.live-video-analytics-edge) to view additional pipelines. +* Use our [VS Code extension](https://marketplace.visualstudio.com/vscode) to view additional pipelines. * Use an [IP camera](https://en.wikipedia.org/wiki/IP_camera) that supports RTSP instead of using the RTSP simulator. You can find IP cameras that support RTSP on the [ONVIF conformant products](https://www.onvif.org/conformant-products/) page. Look for devices that conform with profiles G, S, or T. * Run [AI on Live Video](analyze-live-video-use-your-model-http.md#overview) (you can skip the prerequisite setup as it has already been done above). diff --git a/articles/azure-video-analyzer/video-analyzer-docs/edge/faq.yml b/articles/azure-video-analyzer/video-analyzer-docs/edge/faq.yml index 1fa5c6fe9eaba..dc1b850b6cb58 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/edge/faq.yml +++ b/articles/azure-video-analyzer/video-analyzer-docs/edge/faq.yml @@ -224,7 +224,7 @@ sections: answer: | **How is Video Analyzer billed?** - For billing details, see [Video Analyzer pricing](https://azure.microsoft.com/pricing/details/video-analyzer/). + For billing details, see [Video Analyzer pricing](/azure/azure-video-analyzer/video-analyzer-docs/). additionalContent: | diff --git a/articles/azure-video-analyzer/video-analyzer-docs/index.yml b/articles/azure-video-analyzer/video-analyzer-docs/index.yml index 6710be5f5dcfa..089ce4b1c0492 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/index.yml +++ b/articles/azure-video-analyzer/video-analyzer-docs/index.yml @@ -130,7 +130,7 @@ landingContent: - text: Release notes url: release-notes.md - text: Pricing - url: https://azure.microsoft.com/pricing/details/video-analyzer/ + url: /azure/azure-video-analyzer/video-analyzer-docs/ - text: Edge module FAQ url: edge/faq.yml - text: Service FAQ diff --git a/articles/azure-video-analyzer/video-analyzer-docs/toc.yml b/articles/azure-video-analyzer/video-analyzer-docs/toc.yml index e097ae808145d..8104e4cd89952 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/toc.yml +++ b/articles/azure-video-analyzer/video-analyzer-docs/toc.yml @@ -227,7 +227,7 @@ - name: Service FAQ href: cloud/faq.yml - name: Pricing - href: https://azure.microsoft.com/pricing/details/video-analyzer/ + href: https://azure.microsoft.com/pricing/ - name: Quotas and limitations href: quotas-limitations.md - name: Terraform resource diff --git a/articles/azure-video-indexer/audio-effects-detection.md b/articles/azure-video-indexer/audio-effects-detection.md index f75106fe67654..af2146e7a88cd 100644 --- a/articles/azure-video-indexer/audio-effects-detection.md +++ b/articles/azure-video-indexer/audio-effects-detection.md @@ -18,7 +18,7 @@ Some scenarios where this feature is useful: ## Supported audio categories -**Audio effect detection** can detect and classify 7 different categories. In the next table, you can find the different categories split in to the different presets, divided to **Standard** and **Advanced**. For more information, see [pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +**Audio effect detection** can detect and classify 7 different categories. In the next table, you can find the different categories split in to the different presets, divided to **Standard** and **Advanced**. For more information, see [pricing](https://azure.microsoft.com/pricing/details/media-services/). |Indexing type |Standard indexing| Advanced indexing| |---|---|---| diff --git a/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md b/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md index 375f33604b8c3..82c87b6dec8c1 100644 --- a/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md +++ b/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md @@ -9,7 +9,7 @@ ms.author: juliako # Compare Azure Media Services v3 presets and Azure Video Indexer -This article compares the capabilities of **Azure Video Indexer (formerly Video Indexer) APIs** and **Media Services v3 APIs**. +This article compares the capabilities of **Azure Video Indexer APIs** and **Media Services v3 APIs**. Currently, there is an overlap between features offered by the [Azure Video Indexer APIs](https://api-portal.videoindexer.ai/) and the [Media Services v3 APIs](https://github.com/Azure/azure-rest-api-specs/blob/master/specification/mediaservices/resource-manager/Microsoft.Media/stable/2018-07-01/Encoding.json). The following table offers the current guideline for understanding the differences and similarities. @@ -19,7 +19,7 @@ Currently, there is an overlap between features offered by the [Azure Video Inde |---|---|---| |Media Insights|[Enhanced](video-indexer-output-json-v2.md) |[Fundamentals](/azure/media-services/latest/analyze-video-audio-files-concept)| |Experiences|See the full list of supported features:
                  [Overview](video-indexer-overview.md)|Returns video insights only| -|Billing|[Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/#analytics)|[Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/#analytics)| +|Billing|[Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/#analytics) |[Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/#analytics) | |Compliance|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Azure Video Indexer" to see if it complies with a certificate of interest.|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Media Services" to see if it complies with a certificate of interest.| |Free Trial|East US|Not available| |Region availability|See [Cognitive Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services)|See [Media Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=media-services).| diff --git a/articles/azure-video-indexer/concepts-overview.md b/articles/azure-video-indexer/concepts-overview.md index ee680d7a7fd60..47ac3dfd58ff8 100644 --- a/articles/azure-video-indexer/concepts-overview.md +++ b/articles/azure-video-indexer/concepts-overview.md @@ -42,10 +42,6 @@ The confidence score indicates the confidence in an insight. It is a number betw Use textual and visual content moderation models to keep your users safe from inappropriate content and validate that the content you publish matches your organization's values. You can automatically block certain videos or alert your users about the content. For more information, see [Insights: visual and textual content moderation](video-indexer-output-json-v2.md#visualcontentmoderation). -## Blocks - -Blocks are meant to make it easier to go through the data. For example, block might be broken down based on when speakers change or there is a long pause. - ## Project and editor The [Azure Video Indexer](https://www.videoindexer.ai/) website enables you to use your video's deep insights to: find the right media content, locate the parts that you’re interested in, and use the results to create an entirely new project. Once created, the project can be rendered and downloaded from Azure Video Indexer and be used in your own editing applications or downstream workflows. diff --git a/articles/azure-video-indexer/connect-to-azure.md b/articles/azure-video-indexer/connect-to-azure.md index ef2e82847795c..f453e192d0422 100644 --- a/articles/azure-video-indexer/connect-to-azure.md +++ b/articles/azure-video-indexer/connect-to-azure.md @@ -16,10 +16,11 @@ When creating an Azure Video Indexer account, you can choose a free trial accoun 1. [Azure Video Indexer portal](https://aka.ms/vi-portal-link) 2. [Azure portal](https://portal.azure.com/#home) - 3. [QuickStart ARM template](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account) To read more on how to create a **new ARM-Based** Azure Video Indexer account, read this [article](create-video-analyzer-for-media-account.md) +For more details, see [pricing](https://azure.microsoft.com/pricing/details/video-indexer/). + ## How to create classic accounts This article shows how to create an Azure Video Indexer classic account. The topic provides steps for connecting to Azure using the automatic (default) flow. It also shows how to connect to Azure manually (advanced). @@ -66,11 +67,11 @@ The article also covers [Linking an Azure Video Indexer account to Azure Governm If the connection to Azure failed, you can attempt to troubleshoot the problem by connecting manually. > [!NOTE] -> It's mandatory to have the following three accounts in the same region: the Azure Video Indexer account that you're connecting with the Media Services account, as well as the Azure storage account connected to the same Media Services account. +> It's mandatory to have the following three accounts in the same region: the Azure Video Indexer account that you're connecting with the Media Services account, as well as the Azure storage account connected to the same Media Services account. When you create an Azure Video Indexer account and connect it to Media Services, the media and metadata files are stored in the Azure storage account associated with that Media Services account. ### Create and configure a Media Services account -1. Use the [Azure](https://portal.azure.com/) portal to create an Azure Media Services account, as described in [Create an account](/azure/azure/media-services/previous/media-services-portal-create-account). +1. Use the [Azure](https://portal.azure.com/) portal to create an Azure Media Services account, as described in [Create an account](/azure/media-services/previous/media-services-portal-create-account). Make sure the Media Services account was created with the classic APIs. @@ -88,10 +89,10 @@ If the connection to Azure failed, you can attempt to troubleshoot the problem b In the new Media Services account, select **Streaming endpoints**. Then select the streaming endpoint and press start. :::image type="content" alt-text="Screenshot that shows how to specify streaming endpoints." source="./media/create-account/create-ams-account-se.png"::: -4. For Azure Video Indexer to authenticate with Media Services API, an AD app needs to be created. The following steps guide you through the Azure AD authentication process described in [Get started with Azure AD authentication by using the Azure portal](/azure/azure/media-services/previous/media-services-portal-get-started-with-aad): +4. For Azure Video Indexer to authenticate with Media Services API, an AD app needs to be created. The following steps guide you through the Azure AD authentication process described in [Get started with Azure AD authentication by using the Azure portal](/azure/media-services/previous/media-services-portal-get-started-with-aad): 1. In the new Media Services account, select **API access**. - 2. Select [Service principal authentication method](/azure/azure/media-services/previous/media-services-portal-get-started-with-aad). + 2. Select [Service principal authentication method](/azure/media-services/previous/media-services-portal-get-started-with-aad). 3. Get the client ID and client secret After you select **Settings**->**Keys**, add **Description**, press **Save**, and the key value gets populated. diff --git a/articles/azure-video-indexer/considerations-when-use-at-scale.md b/articles/azure-video-indexer/considerations-when-use-at-scale.md index aee846af7b7a2..d29470add6ad7 100644 --- a/articles/azure-video-indexer/considerations-when-use-at-scale.md +++ b/articles/azure-video-indexer/considerations-when-use-at-scale.md @@ -43,7 +43,7 @@ To see an example of how to upload videos using URL, check out [this example](up ## Automatic Scaling of Media Reserved Units -Starting August 1st 2021, Azure Video Indexer enabled [Reserved Units](/azure/azure/media-services/latest/concept-media-reserved-units)(MRUs) auto scaling by [Azure Media Services](/azure/azure/media-services/latest/media-services-overview) (AMS), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, e.g. price reduction in many cases, based on your business needs as it is being auto scaled. +Starting August 1st 2021, Azure Video Indexer enabled [Reserved Units](/azure/media-services/latest/concept-media-reserved-units)(MRUs) auto scaling by [Azure Media Services](/azure/media-services/latest/media-services-overview) (AMS), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, e.g. price reduction in many cases, based on your business needs as it is being auto scaled. ## Respect throttling diff --git a/articles/azure-video-indexer/customize-person-model-with-website.md b/articles/azure-video-indexer/customize-person-model-with-website.md index 9cffa583c3449..2a240ca20bc0a 100644 --- a/articles/azure-video-indexer/customize-person-model-with-website.md +++ b/articles/azure-video-indexer/customize-person-model-with-website.md @@ -5,7 +5,7 @@ services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.date: 12/16/2020 +ms.date: 05/31/2022 ms.author: juliako --- @@ -34,7 +34,7 @@ You can use the Azure Video Indexer website to edit faces that were detected in ## Create a new Person model 1. Select the **+ Add model** button on the right. -1. Enter the name of the model. You can now add new people and faces to the new Person model. +1. Enter the name of the model and select the check button to save the new model created. You can now add new people and faces to the new Person model. 1. Select the list menu button and choose **+ Add person**. > [!div class="mx-imgBorder"] @@ -77,7 +77,7 @@ You can delete any Person model that you created in your account. However, you c ## Manage existing people in a Person model -To look at the contents of any of your Person models, select the arrow next to the name of the Person model. The drop-down shows you all of the people in that particular Person model. If you select the list menu button next to each of the people, you see manage, rename, and delete options. +To look at the contents of any of your Person models, select the arrow next to the name of the Person model. Then you can view all of the people in that particular Person model. If you select the list menu button next to each of the people, you see manage, rename, and delete options. ![Screenshot shows a contextual menu with options to Manage, Rename, and Delete.](./media/customize-face-model/manage-people.png) @@ -108,7 +108,7 @@ You can add more faces to the person by selecting **Add images**. Select the image you wish to delete and click **Delete**. -#### Rename and delete the person +#### Rename and delete a person You can use the manage pane to rename the person and to delete the person from the Person model. @@ -174,6 +174,10 @@ To delete a detected face in your video, go to the Insights pane and select the The person, if they had been named, will also continue to exist in the Person model that was used to index the video from which you deleted the face unless you specifically delete the person from the Person model. +## Optimize the ability of your model to recognize a person + +To optimize your model ability to recognize the person, upload as many different images as possible and from different angles. To get optimal results, use high resolution images. + ## Next steps [Customize Person model using APIs](customize-person-model-with-api.md) diff --git a/articles/azure-video-indexer/deploy-with-arm-template.md b/articles/azure-video-indexer/deploy-with-arm-template.md index dd8443ced8e0b..99ac42e2435b6 100644 --- a/articles/azure-video-indexer/deploy-with-arm-template.md +++ b/articles/azure-video-indexer/deploy-with-arm-template.md @@ -2,7 +2,7 @@ title: Deploy Azure Video Indexer with ARM template description: In this tutorial you will create an Azure Video Indexer account by using Azure Resource Manager (ARM) template. ms.topic: tutorial -ms.date: 12/01/2021 +ms.date: 05/23/2022 ms.author: juliako --- @@ -20,7 +20,7 @@ The resource will be deployed to your subscription and will create the Azure Vid ## Prerequisites -* An Azure Media Services (AMS) account. You can create one for free through the [Create AMS Account](/azure/azure/media-services/latest/account-create-how-to). +* An Azure Media Services (AMS) account. You can create one for free through the [Create AMS Account](/azure/media-services/latest/account-create-how-to). ## Deploy the sample @@ -28,13 +28,13 @@ The resource will be deployed to your subscription and will create the Azure Vid ### Option 1: Click the "Deploy To Azure Button", and fill in the missing parameters -[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2Fmedia-services-video-indexer%2Fmaster%2FARM-Samples%2FCreate-Account%2Favam.template.json) +[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2Fmedia-services-video-indexer%2Fmaster%2FARM-Quick-Start%2Favam.template.json) ---- ### Option 2 : Deploy using PowerShell Script -1. Open The [template file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.json) file and inspect its content. +1. Open The [template file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Quick-Start/avam.template.json) file and inspect its content. 2. Fill in the required parameters (see below) 3. Run the Following PowerShell commands: @@ -52,7 +52,7 @@ The resource will be deployed to your subscription and will create the Azure Vid ``` > [!NOTE] -> If you would like to work with bicep format, inspect the [bicep file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.bicep) on this repo. +> If you would like to work with bicep format, inspect the [bicep file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Quick-Start/avam.template.bicep) on this repo. ## Parameters @@ -94,7 +94,7 @@ The resource will be deployed to your subscription and will create the Azure Vid If you're new to Azure Video Indexer, see: -* [Azure Video Indexer Documentation](/azure/azure-video-indexer) +* [Azure Video Indexer Documentation](./index.yml) * [Azure Video Indexer Developer Portal](https://api-portal.videoindexer.ai/) * After completing this tutorial, head to other Azure Video Indexer samples, described on [README.md](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/README.md) @@ -106,4 +106,4 @@ If you're new to template deployment, see: ## Next steps -[Connect an existing classic paid Azure Video Indexer account to ARM-based account](connect-classic-account-to-arm.md) \ No newline at end of file +[Connect an existing classic paid Azure Video Indexer account to ARM-based account](connect-classic-account-to-arm.md) diff --git a/articles/azure-video-indexer/edit-transcript-lines-portal.md b/articles/azure-video-indexer/edit-transcript-lines-portal.md new file mode 100644 index 0000000000000..48e3418e8f09a --- /dev/null +++ b/articles/azure-video-indexer/edit-transcript-lines-portal.md @@ -0,0 +1,51 @@ +--- +title: Insert or remove transcript lines in Azure Video Indexer portal +description: This article explains how to insert or remove a transcript line in Azure Video Indexer portal. +ms.author: itnorman +ms.topic: how-to +ms.date: 05/03/2022 +--- + +# Insert or remove transcript lines in Video Indexer portal + +This article explains how to insert or remove a transcript line in Azure Video Indexer portal. + +## Add new line to the transcript timeline + +While in the edit mode, hover between two transcription lines. You'll find a gap between **ending time** of the **transcript line** and the beginning of the following transcript line, user should see the following **add new transcription line** option. + +:::image type="content" alt-text="Screenshot of how to add new transcription." source="./media/edit-transcript-lines-portal/add-new-transcription-line.png"::: + +After clicking the add new transcription line, there will be an option to add the new text and the time stamp for the new line. Enter the text, choose the time stamp for the new line, and select **save**. Default timestamp is the gap between the previous and next transcript line. + +:::image type="content" alt-text="Screenshot of a new transcript time stamp line." source="./media/edit-transcript-lines-portal/transcript-time-stamp.png"::: + +If there isn’t an option to add a new line, you can adjust the end/start time of the relevant transcript lines to fit a new line in your desired place. + +Choose an existing line in the transcript line, click the **three dots** icon, select edit and change the time stamp accordingly. + +> [!NOTE] +> New lines will not appear as part of the **From transcript edits** in the **Content model customization** under languages. +> +> While using the API, when adding a new line, **Speaker name** can be added using free text. For example, *Speaker 1* can now become *Adam*. + +## Edit existing line + +While in the edit mode, select the three dots icon. The editing options were enhanced, they now contain not just the text but also the timestamp with accuracy of milliseconds. + +## Delete line + +Lines can now be deleted through the same three dots icon. + +## Example how and when to use this feature + +To consolidate two lines which you believe should appear as one. + +1. Go to line number 2, select edit. +1. Copy the text +1. Delete the line +1. Go to line 1, edit, paste the text and save. + +## Next steps + +For updating transcript lines and text using API visit [Azure Video Indexer Developer portal](https://aka.ms/avam-dev-portal) diff --git a/articles/azure-video-indexer/faq.yml b/articles/azure-video-indexer/faq.yml index 33e0fef9e0ba7..ca89c4eb9ed07 100644 --- a/articles/azure-video-indexer/faq.yml +++ b/articles/azure-video-indexer/faq.yml @@ -2,11 +2,11 @@ metadata: title: Frequently asked questions about Azure Video Indexer - Azure description: This article gives answers to frequently asked questions about Azure Video Indexer. - services: azure-video-analyzer + services: azure-video-indexer author: Juliako manager: femila ms.topic: faq - ms.date: 05/25/2021 + ms.date: 06/01/2022 ms.author: juliako title: Azure Video Indexer frequently asked questions summary: This article answers frequently asked questions about Azure Video Indexer. @@ -37,7 +37,7 @@ sections: answer: | Azure Video Indexer includes a free trial offering that provides you with 600 minutes in the web-based interface and 2,400 minutes via the API. You can [login to the Azure Video Indexer web-based interface](https://www.videoindexer.ai/) and try it for yourself using any web identity and without having to set up an Azure Subscription. Follow [this easy introduction lab](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/IntroToVideoIndexer.md) to get better idea of how to use Azure Video Indexer. - To index videos and audio flies at scale, you can connect Azure Video Indexer to a paid Microsoft Azure subscription. You can find more information on pricing on the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. + To index videos and audio flies at scale, you can connect Azure Video Indexer to a paid Microsoft Azure subscription. You can find more information on pricing on the [pricing](https://azure.microsoft.com/pricing/details/video-indexer/) page. You can find more information on getting started in [Get started](video-indexer-get-started.md). @@ -57,6 +57,10 @@ sections: In the Azure Video Indexer web-based portal, you can upload a media file using the file upload dialog or by pointing to a URL that directly hosts the source file (see [example](https://nimbuscdn-nimbuspm.streaming.mediaservices.windows.net/2b533311-b215-4409-80af-529c3e853622/Ignite-short.mp4)). Any URL that hosts the media content using an iFrame or embed code will not work (see [example](https://www.videoindexer.ai/accounts/7e1282e8-083c-46ab-8c20-84cae3dc289d/videos/5cfa29e152/?t=4.11)). For more information, please see this [how-to guide](./upload-index-videos.md). + + - question: How many files can I stitch and render in a project? + answer: | + In Azure Video Indexer you can create a project and add multiple files to stitch and render as a new file. The number of source files is set to 10 in the portal UI and 100 in the API. This is a limit set by the Azure Media Services Media Encoder Standard API where we have a dependency on. - question: How long does it take Azure Video Indexer to extract insights from media? answer: | @@ -95,7 +99,7 @@ sections: - question: What is the SLA for Azure Video Indexer? answer: | - Azure Media Service’s SLA covers Azure Video Indexer and can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/azure/media-services/v1_2/) page. The SLA only applies to Azure Video Indexer paid accounts and does not apply to the free trial. + Azure Media Service’s SLA covers Azure Video Indexer and can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/media-services/v1_2/) page. The SLA only applies to Azure Video Indexer paid accounts and does not apply to the free trial. - name: Privacy Questions questions: @@ -141,6 +145,8 @@ sections: answer: | When uploading a video to Azure Video Indexer, an automatic content analysis is done by algorithms and models in order to make sure no inappropriate content will be presented publicly. If a video is found to be suspicious as containing explicit content, it will not be possible to set it as public. However, the account members can still access it as a private video (view it, download the insights and extracted artifacts, and perform other operations available to account members). + [!INCLUDE [artifacts](./includes/artifacts.md)] + In order to set the video for public access, you can either: * Build your own interface layer (such as app or website) and use it to interact with the Azure Video Indexer service. This way the video remains private in our portal and your users can interact with it through your interface. For example, you can still get the insights or allow viewing of the video in your own interface. @@ -190,7 +196,7 @@ sections: questions: - question: How much does Azure Video Indexer cost? answer: | - Azure Video Indexer uses a simple pay-as-you-go pricing model based on the duration of the content input that you index. Additional charges may apply for encoding, streaming, storage, network usage, and media reserved units. For more information, see the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. + Azure Video Indexer uses a simple pay-as-you-go pricing model based on the duration of the content input that you index. Additional charges may apply for encoding, streaming, storage, network usage, and media reserved units. For more information, see the [pricing](/azure/azure-video-indexer/) page. - question: When am I billed for using Azure Video Indexer? answer: When sending a video to be indexed, the user will define the indexing to be video analysis, audio analysis or both. This will determine which SKUs will be charged. If there is a critical level error during processing, an error code will be returned as a response. In such a case, no billing occurs. A critical error can be caused by a bug in our code or a critical failure in an internal dependency the service has. Errors such as wrong identification or insight extraction are not considered as critical and a response is returned. In any case where a valid (non-error code) response is returned, billing occurs. diff --git a/articles/azure-video-indexer/includes/artifacts.md b/articles/azure-video-indexer/includes/artifacts.md new file mode 100644 index 0000000000000..9f93ca3aacb14 --- /dev/null +++ b/articles/azure-video-indexer/includes/artifacts.md @@ -0,0 +1,10 @@ +--- +author: Juliako +ms.topic: include +ms.service: azure-video-indexer +ms.date: 06/02/2022 +ms.author: juliako +--- + +> [!WARNING] +> We do not recommend that you use data directly from the artifacts folder for production purposes. Artifacts are intermediate outputs of the indexing process. They are essentially raw outputs of the various AI engines that analyze the videos; the artifacts schema may change over time. It is recommended that you use the [Get Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index) API, as described in [Get insights and artifacts produced by the API](../video-indexer-output-json-v2.md#get-insights-produced-by-the-api). diff --git a/articles/azure-video-indexer/includes/insights.md b/articles/azure-video-indexer/includes/insights.md new file mode 100644 index 0000000000000..21f74382ebec2 --- /dev/null +++ b/articles/azure-video-indexer/includes/insights.md @@ -0,0 +1,10 @@ +--- +author: Juliako +ms.topic: include +ms.service: azure-video-indexer +ms.date: 06/02/2022 +ms.author: juliako +--- + +> [!TIP] +> The JSON output produced by the website or API contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). diff --git a/articles/azure-video-indexer/includes/regulation.md b/articles/azure-video-indexer/includes/regulation.md index 991c7fe77cb37..d836f1ad93f8c 100644 --- a/articles/azure-video-indexer/includes/regulation.md +++ b/articles/azure-video-indexer/includes/regulation.md @@ -6,4 +6,4 @@ ms.author: juliako --- > [!Warning] -> On June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or functionality included in Azure Services, such as Face or Azure Video Indexer, if a customers is, or is allowing use of such services by or for, a police department in the United States. +> On June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or functionality included in Azure Services, such as Face or Azure Video Indexer, if a customer is, or is allowing use of such services by or for, a police department in the United States. diff --git a/articles/azure-video-indexer/manage-account-connected-to-azure.md b/articles/azure-video-indexer/manage-account-connected-to-azure.md index 56deb8a7884da..def37b0b37352 100644 --- a/articles/azure-video-indexer/manage-account-connected-to-azure.md +++ b/articles/azure-video-indexer/manage-account-connected-to-azure.md @@ -64,7 +64,7 @@ If your account needs some adjustments, you see relevant errors and warnings abo * Media reserved units - You must allocate Media Reserved Units on your Media Service resource in order to index videos. For optimal indexing performance, it's recommended to allocate at least 10 S3 Reserved Units. For pricing information, see the FAQ section of the [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/) page. + You must allocate Media Reserved Units on your Media Service resource in order to index videos. For optimal indexing performance, it's recommended to allocate at least 10 S3 Reserved Units. For pricing information, see the FAQ section of the [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/) page. ## Next steps diff --git a/articles/azure-video-indexer/media/edit-transcript-lines-portal/add-new-transcription-line.png b/articles/azure-video-indexer/media/edit-transcript-lines-portal/add-new-transcription-line.png new file mode 100644 index 0000000000000..fb68f334f82c2 Binary files /dev/null and b/articles/azure-video-indexer/media/edit-transcript-lines-portal/add-new-transcription-line.png differ diff --git a/articles/azure-video-indexer/media/edit-transcript-lines-portal/transcript-time-stamp.png b/articles/azure-video-indexer/media/edit-transcript-lines-portal/transcript-time-stamp.png new file mode 100644 index 0000000000000..790d8ae72cdf8 Binary files /dev/null and b/articles/azure-video-indexer/media/edit-transcript-lines-portal/transcript-time-stamp.png differ diff --git a/articles/azure-video-indexer/monitor-video-indexer-data-reference.md b/articles/azure-video-indexer/monitor-video-indexer-data-reference.md index b5bc077244d93..cd057eebcb0e1 100644 --- a/articles/azure-video-indexer/monitor-video-indexer-data-reference.md +++ b/articles/azure-video-indexer/monitor-video-indexer-data-reference.md @@ -41,7 +41,7 @@ Azure Video Indexer currently does not support any monitoring on metrics. --------------**OPTION 2 EXAMPLE** ------------- - @@ -163,7 +163,7 @@ This section refers to all of the Azure Monitor Logs Kusto tables relevant to Az @@ -229,7 +229,7 @@ The following table lists the operations related to Azure Video Indexer that may -For more information on the schema of Activity Log entries, see [Activity Log schema](/azure/azure-monitor/essentials/activity-log-schema). +For more information on the schema of Activity Log entries, see [Activity Log schema](../azure-monitor/essentials/activity-log-schema.md). ## Schemas @@ -268,5 +268,5 @@ The following schemas are in use by Azure Video Indexer ## Next steps -- See [Monitoring Azure Azure Video Indexer](monitor-video-indexer.md) for a description of monitoring Azure Video Indexer. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure Video Indexer](monitor-video-indexer.md) for a description of monitoring Azure Video Indexer. +- See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/azure-video-indexer/monitor-video-indexer.md b/articles/azure-video-indexer/monitor-video-indexer.md index 6147e8934bbeb..f76efef9c7bec 100644 --- a/articles/azure-video-indexer/monitor-video-indexer.md +++ b/articles/azure-video-indexer/monitor-video-indexer.md @@ -25,7 +25,7 @@ Keep the headings in this order. When you have critical applications and business processes relying on Azure resources, you want to monitor those resources for their availability, performance, and operation. -This article describes the monitoring data generated by Azure Video Indexer. Azure Video Indexer uses [Azure Monitor](/azure/azure-monitor/overview). If you are unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource). +This article describes the monitoring data generated by Azure Video Indexer. Azure Video Indexer uses [Azure Monitor](../azure-monitor/overview.md). If you are unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md). @@ -51,7 +51,7 @@ Some services in Azure have a special focused pre-built monitoring dashboard in ## Monitoring data -Azure Video Indexer collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](/azure/azure-monitor/essentials/monitor-azure-resource#monitoring-data-from-Azure-resources). +Azure Video Indexer collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](../azure-monitor/essentials/monitor-azure-resource.md#monitoring-data-from-azure-resources). See [Monitoring *Azure Video Indexer* data reference](monitor-video-indexer-data-reference.md) for detailed information on the metrics and logs metrics created by Azure Video Indexer. @@ -86,7 +86,7 @@ Currently Azure Video Indexer does not support monitoring of metrics. - @@ -106,9 +106,9 @@ If you don't support resource logs, say so. Some services may be only onboarded Data in Azure Monitor Logs is stored in tables where each table has its own set of unique properties. -All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](/azure/azure-monitor/essentials/resource-logs-schema) The schema for Azure Video Indexer resource logs is found in the [Azure Video Indexer Data Reference](monitor-video-indexer-data-reference.md#schemas) +All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](../azure-monitor/essentials/resource-logs-schema.md) The schema for Azure Video Indexer resource logs is found in the [Azure Video Indexer Data Reference](monitor-video-indexer-data-reference.md#schemas) -The [Activity log](/azure/azure-monitor/essentials/activity-log) is a type of platform sign-in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. +The [Activity log](../azure-monitor/essentials/activity-log.md) is a type of platform sign-in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. For a list of the types of resource logs collected for Azure Video Indexer, see [Monitoring Azure Video Indexer data reference](monitor-video-indexer-data-reference.md#resource-logs) @@ -122,7 +122,7 @@ For a list of the tables used by Azure Monitor Logs and queryable by Log Analyti > [!IMPORTANT] -> When you select **Logs** from the Azure Video Indexer account menu, Log Analytics is opened with the query scope set to the current Azure Video Indexer account. This means that log queries will only include data from that resource. If you want to run a query that includes data from other Azure Video Indexer account or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](/azure/azure-monitor/logs/scope) for details. +> When you select **Logs** from the Azure Video Indexer account menu, Log Analytics is opened with the query scope set to the current Azure Video Indexer account. This means that log queries will only include data from that resource. If you want to run a query that includes data from other Azure Video Indexer account or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](../azure-monitor/logs/scope.md) for details. @@ -151,10 +151,10 @@ VIAudit This information is the BIGGEST request we get in Azure Monitor so do not avoid it long term. People don't know what to monitor for best results. Be prescriptive --> -Azure Monitor alerts proactively notify you when important conditions are found in your monitoring data. They allow you to identify and address issues in your system before your customers notice them. You can set alerts on [metrics](/azure/azure-monitor/alerts/alerts-metric-overview), [logs](/azure/azure-monitor/alerts/alerts-unified-log), and the [activity log](/azure/azure-monitor/alerts/activity-log-alerts). Different types of alerts have benefits and drawbacks. +Azure Monitor alerts proactively notify you when important conditions are found in your monitoring data. They allow you to identify and address issues in your system before your customers notice them. You can set alerts on [metrics](../azure-monitor/alerts/alerts-metric-overview.md), [logs](../azure-monitor/alerts/alerts-unified-log.md), and the [activity log](../azure-monitor/alerts/activity-log-alerts.md). Different types of alerts have benefits and drawbacks. - The following table lists common and recommended alert rules for Azure Video Indexer. @@ -176,4 +176,4 @@ VIAudit - See [Monitoring Azure Video Indexer data reference](monitor-video-indexer-data-reference.md) for a reference of the metrics, logs, and other important values created by Azure Video Indexer account. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. diff --git a/articles/azure-video-indexer/network-security.md b/articles/azure-video-indexer/network-security.md index 2fa208e0aaea9..1611725c345b4 100644 --- a/articles/azure-video-indexer/network-security.md +++ b/articles/azure-video-indexer/network-security.md @@ -1,6 +1,6 @@ --- title: How to enable network security -description: This article gives an overview of the Azure Video Indexer (formerly Video Analyzer for Media) network security options. +description: This article gives an overview of the Azure Video Indexer network security options. ms.topic: article ms.date: 04/11/2022 ms.author: juliako @@ -8,7 +8,7 @@ ms.author: juliako # NSG service tags for Azure Video Indexer -Azure Video Indexer (formerly Video Analyzer for Media) is a service hosted on Azure. In some architecture cases the service needs to interact with other services in order to index video files (that is, a Storage Account) or when a customer orchestrates indexing jobs against our API endpoint using their own service hosted on Azure (i.e AKS, Web Apps, Logic Apps, Functions). Customers who would like to limit access to their resources on a network level can use [Network Security Groups with Service Tags](https://docs.microsoft.com/azure/virtual-network/service-tags-overview). A service tag represents a group of IP address prefixes from a given Azure service, in this case Azure Video Indexer. Microsoft manages the address prefixes grouped by the service tag and automatically updates the service tag as addresses change in our backend, minimizing the complexity of frequent updates to network security rules by the customer. +Azure Video Indexer is a service hosted on Azure. In some architecture cases the service needs to interact with other services in order to index video files (that is, a Storage Account) or when a customer orchestrates indexing jobs against our API endpoint using their own service hosted on Azure (i.e AKS, Web Apps, Logic Apps, Functions). Customers who would like to limit access to their resources on a network level can use [Network Security Groups with Service Tags](/azure/virtual-network/service-tags-overview). A service tag represents a group of IP address prefixes from a given Azure service, in this case Azure Video Indexer. Microsoft manages the address prefixes grouped by the service tag and automatically updates the service tag as addresses change in our backend, minimizing the complexity of frequent updates to network security rules by the customer. ## Get started with service tags @@ -34,7 +34,7 @@ This tag contains the IP addresses of Azure Video Indexer services for all regio ## Using Azure CLI -You can also use Azure CLI to create a new or update an existing NSG rule and add the **AzureVideoAnalyzerForMedia** service tag using the `--source-address-prefixes`. For a full list of CLI commands and parameters see [az network nsg](https://docs.microsoft.com/cli/azure/network/nsg/rule?view=azure-cli-latest) +You can also use Azure CLI to create a new or update an existing NSG rule and add the **AzureVideoAnalyzerForMedia** service tag using the `--source-address-prefixes`. For a full list of CLI commands and parameters see [az network nsg](/cli/azure/network/nsg/rule?view=azure-cli-latest&preserve-view=true) Example of a security rule using service tags. For more details, visit https://aka.ms/servicetags diff --git a/articles/azure-video-indexer/odrv-download.md b/articles/azure-video-indexer/odrv-download.md index 14b4394d46bfe..523bf5e75877a 100644 --- a/articles/azure-video-indexer/odrv-download.md +++ b/articles/azure-video-indexer/odrv-download.md @@ -7,11 +7,11 @@ ms.date: 12/17/2021 # Index your videos stored on OneDrive -This article shows how to index videos stored on OneDrive by using the Azure Video Indexer (formerly Azure Azure Video Indexer) website. +This article shows how to index videos stored on OneDrive by using the Azure Video Indexer website. ## Supported file formats -For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/azure/media-services/latest/encode-media-encoder-standard-formats-reference). +For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/media-services/latest/encode-media-encoder-standard-formats-reference). ## Index a video by using the website @@ -91,7 +91,7 @@ Use this parameter to define an AI bundle that you want to apply on your audio o Azure Video Indexer covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. -Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). #### priority @@ -108,7 +108,7 @@ When you're using the [Upload Video](https://api-portal.videoindexer.ai/api-deta After the indexing and encoding jobs are done, the video is published so you can also stream your video. The streaming endpoint from which you want to stream the video must be in the **Running** state. For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Azure Video Indexer encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. -The default setting is [content-aware encoding](/azure/azure/media-services/latest/encode-content-aware-concept). +The default setting is [content-aware encoding](/azure/media-services/latest/encode-content-aware-concept). If you only want to index your video and not encode it, set `streamingPreset` to `NoStreaming`. @@ -118,6 +118,9 @@ This parameter specifies the URL of the video or audio file to be indexed. If th ### Code sample +> [!NOTE] +> The following sample is intended for Classic accounts only and isn't compatible with ARM accounts. For an updated sample for ARM, see [this ARM sample repo](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ApiUsage/ArmBased/Program.cs). + The following C# code snippets demonstrate the usage of all the Azure Video Indexer APIs together. ### [Classic account](#tab/With-classic-account/) diff --git a/articles/azure-video-indexer/release-notes.md b/articles/azure-video-indexer/release-notes.md index 6e0ccead7ae4b..9f80cecf6b057 100644 --- a/articles/azure-video-indexer/release-notes.md +++ b/articles/azure-video-indexer/release-notes.md @@ -45,7 +45,7 @@ var uploadRequestResult = await client.PostAsync($"{apiUrl}/{accountInfo.Loc ### Line breaking in transcripts -Improved line break logic to better split transcript into sentences. New editing capabilities are now available through the Azure Video Indexer portal, such as adding a new line and editing the line’s timestamp. +Improved line break logic to better split transcript into sentences. New editing capabilities are now available through the Azure Video Indexer portal, such as adding a new line and editing the line’s timestamp. For more information, see [Insert or remove transcript lines](edit-transcript-lines-portal.md). ### Azure Monitor integration @@ -100,8 +100,7 @@ Azure Video Indexer website is now supporting account management based on ARM in ### Leverage open-source code to create ARM based account -Added new code samples including HTTP calls to use Azure Video Indexer create, read, update and delete (CRUD) ARM API for solution developers. See [this sample](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account -). +Added new code samples including HTTP calls to use Azure Video Indexer create, read, update and delete (CRUD) ARM API for solution developers. See [this sample](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Quick-Start). ## January 2022 @@ -182,7 +181,7 @@ Fixed bugs related to CSS, theming and accessibility: ### Automatic Scaling of Media Reserved Units -Starting August 1st 2021, Azure Video Indexer enabled [Media Reserved Units (MRUs)](/azure/azure/media-services/latest/concept-media-reserved-units) auto scaling by [Azure Media Services](/azure/azure/media-services/latest/media-services-overview), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, for example price reduction in many cases, based on your business needs as it is being auto scaled. +Starting August 1st 2021, Azure Video Indexer enabled [Media Reserved Units (MRUs)](/azure/media-services/latest/concept-media-reserved-units) auto scaling by [Azure Media Services](/azure/media-services/latest/media-services-overview), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, for example price reduction in many cases, based on your business needs as it is being auto scaled. ## June 2021 @@ -249,7 +248,7 @@ You can now see the detected acoustic events in the closed captions file. The fi ### Audio analysis -Audio analysis is available now in additional new bundle of audio features at different price point. The new **Basic Audio** analysis preset provides a low-cost option to only extract speech transcription, translation and format output captions and subtitles. The **Basic Audio** preset will produce two separate meters on your bill, including a line for transcription and a separate line for caption and subtitle formatting. More information on the pricing, see the [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/) page. +Audio analysis is available now in additional new bundle of audio features at different price point. The new **Basic Audio** analysis preset provides a low-cost option to only extract speech transcription, translation and format output captions and subtitles. The **Basic Audio** preset will produce two separate meters on your bill, including a line for transcription and a separate line for caption and subtitle formatting. More information on the pricing, see the [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/) page. The newly added bundle is available when indexing or re-indexing your file by choosing the **Advanced option** -> **Basic Audio** preset (under the **Video + audio indexing** drop-down box). diff --git a/articles/azure-video-indexer/scenes-shots-keyframes.md b/articles/azure-video-indexer/scenes-shots-keyframes.md index 02bc622bd4842..8f6e7d9265d60 100644 --- a/articles/azure-video-indexer/scenes-shots-keyframes.md +++ b/articles/azure-video-indexer/scenes-shots-keyframes.md @@ -2,7 +2,7 @@ title: Azure Video Indexer scenes, shots, and keyframes description: This topic gives an overview of the Azure Video Indexer scenes, shots, and keyframes. ms.topic: how-to -ms.date: 07/05/2019 +ms.date: 06/07/2022 ms.author: juliako --- @@ -35,11 +35,11 @@ To extract high-resolution keyframes for your video, you must first upload and i #### With the Azure Video Indexer website -To extract keyframes using the Azure Video Indexer website, upload and index your video. Once the indexing job is complete, click on the **Download** button and select **Artifacts (ZIP)**. This will download the artifacts folder to your computer. +To extract keyframes using the Azure Video Indexer website, upload and index your video. Once the indexing job is complete, click on the **Download** button and select **Artifacts (ZIP)**. This will download the artifacts folder to your computer (make sure to view the warning regarding artifacts below). Unzip and open the folder. In the *_KeyframeThumbnail* folder, and you will find all of the keyframes that were extracted from your video. ![Screenshot that shows the "Download" drop-down with "Artifacts" selected.](./media/scenes-shots-keyframes/extracting-keyframes2.png) -Unzip and open the folder. In the *_KeyframeThumbnail* folder, and you will find all of the keyframes that were extracted from your video. +[!INCLUDE [artifacts](./includes/artifacts.md)] #### With the Azure Video Indexer API diff --git a/articles/azure-video-indexer/toc.yml b/articles/azure-video-indexer/toc.yml index dc2e0e7a801ca..6253ac5408c2d 100644 --- a/articles/azure-video-indexer/toc.yml +++ b/articles/azure-video-indexer/toc.yml @@ -94,6 +94,8 @@ href: network-security.md - name: Disaster recovery href: video-indexer-disaster-recovery.md + - name: Insert or remove transcript lines + href: edit-transcript-lines-portal.md - name: Customize content models items: - name: Animated characters @@ -131,7 +133,7 @@ - name: Azure Roadmap href: https://azure.microsoft.com/roadmap/?category=web-mobile - name: Pricing - href: https://azure.microsoft.com/pricing/details/azure/media-services/ + href: https://azure.microsoft.com/pricing/details/video-indexer/ - name: Regional availability href: https://azure.microsoft.com/global-infrastructure/services/ - name: Regions diff --git a/articles/azure-video-indexer/upload-index-videos.md b/articles/azure-video-indexer/upload-index-videos.md index bbc492580b690..2cdedd3d83fd8 100644 --- a/articles/azure-video-indexer/upload-index-videos.md +++ b/articles/azure-video-indexer/upload-index-videos.md @@ -15,17 +15,19 @@ When you're creating an Azure Video Indexer account, you choose between: - A free trial account. Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2,400 minutes of free indexing to API users. - A paid option where you're not limited by a quota. You create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for indexed minutes. -For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). + +After you upload and index a video, you can use [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video (see [Examine the Azure Video Indexer output](video-indexer-output-json-v2.md)). When you're uploading videos by using the API, you have the following options: * Upload your video from a URL (preferred). * Send the video file as a byte array in the request body. -* Use existing an Azure Media Services asset by providing the [asset ID](/azure/azure/media-services/latest/assets-concept). This option is supported in paid accounts only. +* Use existing an Azure Media Services asset by providing the [asset ID](/azure/media-services/latest/assets-concept). This option is supported in paid accounts only. ## Supported file formats -For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/azure/media-services/latest/encode-media-encoder-standard-formats-reference). +For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/media-services/latest/encode-media-encoder-standard-formats-reference). ## Storage of video files @@ -89,7 +91,7 @@ Use this parameter to define an AI bundle that you want to apply on your audio o Azure Video Indexer covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. -Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). #### priority @@ -106,7 +108,7 @@ When you're using the [Upload Video](https://api-portal.videoindexer.ai/api-deta After the indexing and encoding jobs are done, the video is published so you can also stream your video. The streaming endpoint from which you want to stream the video must be in the **Running** state. For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Azure Video Indexer encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. -The default setting is [content-aware encoding](/azure/azure/media-services/latest/encode-content-aware-concept). +The default setting is [content-aware encoding](/azure/media-services/latest/encode-content-aware-concept). If you only want to index your video and not encode it, set `streamingPreset` to `NoStreaming`. diff --git a/articles/azure-video-indexer/video-indexer-get-started.md b/articles/azure-video-indexer/video-indexer-get-started.md index 60e565eecbe6e..0cb23060c1304 100644 --- a/articles/azure-video-indexer/video-indexer-get-started.md +++ b/articles/azure-video-indexer/video-indexer-get-started.md @@ -11,7 +11,7 @@ ms.custom: mode-other This getting started quickstart shows how to sign in to the Azure Video Indexer website and how to upload your first video. -When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you aren't limited by the quota). With free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With paid option, you create an Azure Video Indexer account that is [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you aren't limited by the quota). With free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With paid option, you create an Azure Video Indexer account that is [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). ## Sign up for Azure Video Indexer @@ -38,27 +38,29 @@ See the [input container/file formats](/azure/media-services/latest/encode-media > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/video-indexer-upload.png" alt-text="Upload"::: -1. Once your video has been uploaded, Azure Video Indexer starts indexing and analyzing the video. You see the progress. +1. Once your video has been uploaded, Azure Video Indexer starts indexing and analyzing the video. As a result a JSON output with insights is produced. + + You see the progress. > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Progress of the upload"::: + > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Progress of the upload"::: + + The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). 1. Once Azure Video Indexer is done analyzing, you'll get an email with a link to your video and a short description of what was found in your video. For example: people, spoken and written words, topics, and named entities. 1. You can later find your video in the library list and perform different operations. For example: search, reindex, edit. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/uploaded.png" alt-text="Uploaded the upload"::: -## Supported browsers +After you upload and index a video, you can continue using [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video (see [Examine the Azure Video Indexer output](video-indexer-output-json-v2.md)). -For more information, see [supported browsers](video-indexer-overview.md#supported-browsers). +For more details, see [Upload and index videos](upload-index-videos.md). -## See also +To start using the APIs, see [use APIs](video-indexer-use-apis.md) -See [Upload and index videos](upload-index-videos.md) for more details. - -After you upload and index a video, you can start using [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video. +## Supported browsers -[Start using APIs](video-indexer-use-apis.md) +For more information, see [supported browsers](video-indexer-overview.md#supported-browsers). ## Next steps diff --git a/articles/azure-video-indexer/video-indexer-output-json-v2.md b/articles/azure-video-indexer/video-indexer-output-json-v2.md index 918e74fc0d389..5a504fea43696 100644 --- a/articles/azure-video-indexer/video-indexer-output-json-v2.md +++ b/articles/azure-video-indexer/video-indexer-output-json-v2.md @@ -5,7 +5,7 @@ services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.date: 11/16/2020 +ms.date: 05/19/2022 ms.author: juliako --- @@ -13,35 +13,60 @@ ms.author: juliako When a video is indexed, Azure Video Indexer produces the JSON content that contains details of the specified video insights. The insights include transcripts, optical character recognition elements (OCRs), faces, topics, blocks, and similar details. Each insight type includes instances of time ranges that show when the insight appears in the video. -You can visually examine the video's summarized insights by pressing the **Play** button on the video on the [Azure Video Indexer](https://www.videoindexer.ai/) website. - -You can also use the Get Video Index API. If the response status is `OK`, you get a detailed JSON output as the response content. +To visually examine the video's insights, press the **Play** button on the video on the [Azure Video Indexer](https://www.videoindexer.ai/) website. ![Screenshot of the Insights tab in Azure Video Indexer.](./media/video-indexer-output-json/video-indexer-summarized-insights.png) +When indexing with an API and the response status is OK, you get a detailed JSON output as the response content. When calling the [Get Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index) API, we recommend passing `&includeSummarizedInsights=false`. + +[!INCLUDE [insights](./includes/insights.md)] + This article examines the Azure Video Indexer output (JSON content). For information about what features and insights are available to you, see [Azure Video Indexer insights](video-indexer-overview.md#video-insights). > [!NOTE] > All the access tokens in Azure Video Indexer expire in one hour. -## Get the insights +## Get the insights using the website To get insights produced on the website or the Azure portal: 1. Browse to the [Azure Video Indexer](https://www.videoindexer.ai/) website and sign in. 1. Find a video whose output you want to examine. 1. Press **Play**. -1. Select the **Insights** tab to get summarized insights. Or select the **Timeline** tab to filter the relevant insights. -1. Download artifacts and what's in them. +1. Choose the **Insights** tab. +2. Select which insights you want to view (under the **View** drop-down). +3. Go to the **Timeline** tab to see timestamped transcript lines. +4. Select **Download** > **Insights (JSON)** to get the insights output file. +5. If you want to download artifacts, beware of the following: + + [!INCLUDE [artifacts](./includes/artifacts.md)] For more information, see [View and edit video insights](video-indexer-view-edit.md). -To get insights produced by the API: +## Get insights produced by the API + +To retrieve the JSON file (OCR, face, keyframe, etc.) or an artifact type, call the [Get Video Index API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index). + +This API returns a URL only with a link to the specific resource type you request. An additional GET request must be made to this URL for the specific artifact. The file types for each artifact type vary depending on the artifact: -- To retrieve the JSON file, call the [Get Video Index API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index). -- If you're interested in specific artifacts, call the [Get Video Artifact Download URL API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Artifact-Download-Url). +### JSON - In the API call, specify the requested artifact type (for example, OCR, face, or keyframe). +* OCR +* Faces +* VisualContentModeration +* LanguageDetection +* MultiLanguageDetection +* Metadata +* Emotions +* TextualContentModeration +* AudioEffects +* ObservedPeople +* Labels + +### Zip file containing JPG images + +* KeyframesThumbnails +* FacesThumbnails ## Root elements of the insights @@ -59,7 +84,7 @@ To get insights produced by the API: |`isEditable`|Indicates whether the current user is authorized to edit the playlist.| |`isBase`|Indicates whether the playlist is a base playlist (a video) or a playlist made of other videos (derived).| |`durationInSeconds`|The total duration of the playlist.| -|`summarizedInsights`|Contains one [summarized insight](#summarizedinsights). +|`summarizedInsights`|Contains one [summarized insight](#summary-of-the-insights). |`videos`|A list of [videos](#videos) that construct the playlist.
                  If this playlist is constructed of time ranges of other videos (derived), the videos in this list will contain only data from the included time ranges.| ```json @@ -81,10 +106,13 @@ To get insights produced by the API: } ``` -## summarizedInsights +## Summary of the insights This section shows a summary of the insights. +> [!TIP] +> The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). + |Attribute | Description| |---|---| |`name`|The name of the video. For example: `Azure Monitor`.| @@ -168,7 +196,7 @@ A face might have an ID, a name, a thumbnail, other metadata, and a list of its |`transcript`|The [transcript](#transcript) insight.| |`ocr`|The [OCR](#ocr) insight.| |`keywords`|The [keywords](#keywords) insight.| -|`blocks`|Might contain one or more [blocks](#blocks).| +|`transcripts`|Might contain one or more [transcript](#transcript).| |`faces/animatedCharacters`|The [faces/animatedCharacters](#facesanimatedcharacters) insight.| |`labels`|The [labels](#labels) insight.| |`shots`|The [shots](#shots) insight.| @@ -202,13 +230,6 @@ Example: } ``` -#### blocks - -Attribute | Description ----|--- -`id`|The ID of the block.| -`instances`|A list of time ranges for this block.| - #### transcript |Name|Description| @@ -664,7 +685,7 @@ Sentiments are aggregated by their `sentimentType` field (`Positive`, `Neutral`, #### visualContentModeration -The `visualContentModeration` block contains time ranges that Azure Video Indexer found to potentially have adult content. If `visualContentModeration` is empty, no adult content was identified. +The `visualContentModeration` transcript contains time ranges that Azure Video Indexer found to potentially have adult content. If `visualContentModeration` is empty, no adult content was identified. Videos that contain adult or racy content might be available for private view only. Users have the option to submit a request for a human review of the content. In that case, the `IsAdult` attribute will contain the result of the human review. @@ -879,7 +900,7 @@ Azure Video Indexer makes an inference of main topics from transcripts. When pos } ] }, -` ` ` + ``` ## Next steps diff --git a/articles/azure-video-indexer/video-indexer-overview.md b/articles/azure-video-indexer/video-indexer-overview.md index 750965a146ac8..f1940158e5716 100644 --- a/articles/azure-video-indexer/video-indexer-overview.md +++ b/articles/azure-video-indexer/video-indexer-overview.md @@ -2,7 +2,7 @@ title: What is Azure Video Indexer? description: This article gives an overview of the Azure Video Indexer service. ms.topic: overview -ms.date: 02/15/2022 +ms.date: 06/09/2022 ms.author: juliako --- @@ -10,17 +10,18 @@ ms.author: juliako [!INCLUDE [regulation](./includes/regulation.md)] -Azure Video Indexer is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Azure Video Indexer video and audio models. - -To start extracting insights with Azure Video Indexer, you need to create an account and upload videos. When you upload your videos to Azure Video Indexer, it analyses both visuals and audio by running different AI models. As Azure Video Indexer analyzes your video, the insights that are extracted by the AI models. +> [!NOTE] +> The service is now rebranded from Azure Video Analyzer for Media to **Azure Video Indexer**. Click [here](https://vi.microsoft.com) to read more. -When you create an Azure Video Indexer account and connect it to Media Services, the media and metadata files are stored in the Azure storage account associated with that Media Services account. For more information, see [Create an Azure Video Indexer account connected to Azure](connect-to-azure.md). +Azure Video Indexer is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Azure Video Indexer video and audio models. -The following diagram is an illustration and not a technical explanation of how Azure Video Indexer works in the backend. +Azure Video Indexer analyzes the video and audio content by running 30+ AI models, generating rich insights. Below is an illustration of the audio and video analysis performed by Azure Video Indexer in the background. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-overview/model-chart.png" alt-text="Azure Video Indexer flow diagram"::: +To start extracting insights with Azure Video Indexer, you need to [create an account](connect-to-azure.md) and upload videos, see the [how can i get started](#how-can-i-get-started-with-azure-video-indexer) section below. + ## Compliance, Privacy and Security As an important reminder, you must comply with all applicable laws in your use of Azure Video Indexer, and you may not use Azure Video Indexer or any Azure service in a manner that violates the rights of others, or that may be harmful to others. @@ -34,7 +35,7 @@ To learn about compliance, privacy and security in Azure Video Indexer please vi Azure Video Indexer's insights can be applied to many scenarios, among them are: * *Deep search*: Use the insights extracted from the video to enhance the search experience across a video library. For example, indexing spoken words and faces can enable the search experience of finding moments in a video where a person spoke certain words or when two people were seen together. Search based on such insights from videos is applicable to news agencies, educational institutes, broadcasters, entertainment content owners, enterprise LOB apps, and in general to any industry that has a video library that users need to search against. -* *Content creation*: Create trailers, highlight reels, social media content, or news clips based on the insights Azure Video Indexer extracts from your content. Keyframes, scenes markers, and timestamps for the people and label appearances make the creation process much smoother and easier, and allows you to get to the parts of the video you need for the content you're creating. +* *Content creation*: Create trailers, highlight reels, social media content, or news clips based on the insights Azure Video Indexer extracts from your content. Keyframes, scenes markers, and timestamps of the people and label appearances make the creation process smoother and easier, enabling you to easily get to the parts of the video you need when creating content. * *Accessibility*: Whether you want to make your content available for people with disabilities or if you want your content to be distributed to different regions using different languages, you can use the transcription and translation provided by Azure Video Indexer in multiple languages. * *Monetization*: Azure Video Indexer can help increase the value of videos. For example, industries that rely on ad revenue (news media, social media, and so on) can deliver relevant ads by using the extracted insights as additional signals to the ad server. * *Content moderation*: Use textual and visual content moderation models to keep your users safe from inappropriate content and validate that the content you publish matches your organization's values. You can automatically block certain videos or alert your users about the content. @@ -76,7 +77,6 @@ The following list shows the insights you can retrieve from your videos using Az * **Speaker enumeration**: Maps and understands which speaker spoke which words and when. Sixteen speakers can be detected in a single audio-file. * **Speaker statistics**: Provides statistics for speakers' speech ratios. * **Textual content moderation**: Detects explicit text in the audio transcript. -* **Audio effects** (preview): Detects the following audio effects in the non-speech segments of the content: Gunshot, Glass shatter, Alarm, Siren, Explosion, Dog Bark, Screaming, Laughter, Crowd reactions (cheering, clapping, and booing) and Silence. Note: the full set of events is available only when choosing ‘Advanced Audio Analysis’ in upload preset, otherwise only ‘Silence’ and ‘Crowd reaction’ will be available. * **Emotion detection**: Identifies emotions based on speech (what's being said) and voice tonality (how it's being said). The emotion could be joy, sadness, anger, or fear. * **Translation**: Creates translations of the audio transcript to 54 different languages. * **Audio effects detection** (preview): Detects the following audio effects in the non-speech segments of the content: alarm or siren, dog barking, crowd reactions (cheering, clapping, and booing), gunshot or explosion, laughter, breaking glass, and silence. @@ -133,6 +133,7 @@ The following list shows the supported browsers that you can use for the Azure V You're ready to get started with Azure Video Indexer. For more information, see the following articles: +- [Pricing](https://azure.microsoft.com/pricing/details/video-indexer/) - [Get started with the Azure Video Indexer website](video-indexer-get-started.md). - [Process content with Azure Video Indexer REST API](video-indexer-use-apis.md). - [Embed visual widgets in your application](video-indexer-embed-widgets.md). diff --git a/articles/azure-video-indexer/video-indexer-use-apis.md b/articles/azure-video-indexer/video-indexer-use-apis.md index e2470bd3647d4..5aa9353310379 100644 --- a/articles/azure-video-indexer/video-indexer-use-apis.md +++ b/articles/azure-video-indexer/video-indexer-use-apis.md @@ -1,7 +1,7 @@ --- title: Use the Azure Video Indexer API description: This article describes how to get started with Azure Video Indexer API. -ms.date: 01/07/2021 +ms.date: 06/01/2022 ms.topic: tutorial ms.custom: devx-track-csharp --- @@ -10,7 +10,7 @@ ms.custom: devx-track-csharp Azure Video Indexer consolidates various audio and video artificial intelligence (AI) technologies offered by Microsoft into one integrated service, making development simpler. The APIs are designed to enable developers to focus on consuming Media AI technologies without worrying about scale, global reach, availability, and reliability of cloud platforms. You can use the API to upload your files, get detailed video insights, get URLs of embeddable insight and player widgets, and more. -When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With a paid option, you create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With a paid option, you create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). This article shows how the developers can take advantage of the [Azure Video Indexer API](https://api-portal.videoindexer.ai/). @@ -107,6 +107,9 @@ This section lists some recommendations when using Azure Video Indexer API. The following C# code snippet demonstrates the usage of all the Azure Video Indexer APIs together. +> [!NOTE] +> The following sample is intended for Classic accounts only and not compatible with ARM accounts. For an updated sample for ARM please see [this ARM sample repo](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ApiUsage/ArmBased/Program.cs). + ```csharp var apiUrl = "https://api.videoindexer.ai"; var accountId = "..."; diff --git a/articles/azure-video-indexer/video-indexer-view-edit.md b/articles/azure-video-indexer/video-indexer-view-edit.md index d129e1bbae376..543d1b2593283 100644 --- a/articles/azure-video-indexer/video-indexer-view-edit.md +++ b/articles/azure-video-indexer/video-indexer-view-edit.md @@ -1,11 +1,10 @@ --- title: View and edit Azure Video Indexer insights description: This article demonstrates how to view and edit Azure Video Indexer insights. -services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.date: 05/15/2019 +ms.date: 06/07/2022 ms.author: juliako --- @@ -17,16 +16,25 @@ This topic shows you how to view and edit the Azure Video Indexer insights of a 2. Find a video from which you want to create your Azure Video Indexer insights. For more information, see [Find exact moments within videos](video-indexer-search.md). 3. Press **Play**. - The page shows the video's summarized insights. + The page shows the video's insights. ![Insights](./media/video-indexer-view-edit/video-indexer-summarized-insights.png) - -4. View the summarized insights of the video. +4. View the insights of the video. Summarized insights show an aggregated view of the data: faces, keywords, sentiments. For example, you can see the faces of people and the time ranges each face appears in and the % of the time it is shown. + [!INCLUDE [insights](./includes/insights.md)] + + Select the **Timeline** tab to see transcripts with timelines and other information that you can choose from the **View** drop-down. + The player and the insights are synchronized. For example, if you click a keyword or the transcript line, the player brings you to that moment in the video. You can achieve the player/insights view and synchronization in your application. For more information, see [Embed Azure Indexer widgets into your application](video-indexer-embed-widgets.md). + If you want to download artifact files, beware of the following: + + [!INCLUDE [artifacts](./includes/artifacts.md)] + + For more information, see [Insights output](video-indexer-output-json-v2.md). + ## Next steps [Use your videos' deep insights](use-editor-create-project.md) diff --git a/articles/azure-vmware/attach-azure-netapp-files-to-azure-vmware-solution-hosts.md b/articles/azure-vmware/attach-azure-netapp-files-to-azure-vmware-solution-hosts.md new file mode 100644 index 0000000000000..0ba1d01e9baa0 --- /dev/null +++ b/articles/azure-vmware/attach-azure-netapp-files-to-azure-vmware-solution-hosts.md @@ -0,0 +1,208 @@ +--- +title: Attach Azure NetApp Files datastores to Azure VMware Solution hosts (Preview) +description: Learn how to create Azure NetApp Files-based NSF datastores for Azure VMware Solution hosts. +ms.topic: how-to +ms.service: azure-vmware +ms.date: 05/10/2022 +ms.custom: references_regions +--- + +# Attach Azure NetApp Files datastores to Azure VMware Solution hosts (Preview) + +[Azure NetApp Files](/azure/azure-netapp-files/azure-netapp-files-introduction?) is an enterprise-class, high-performance, metered file storage service. The service supports the most demanding enterprise file-workloads in the cloud: databases, SAP, and high-performance computing applications, with no code changes. For more information on Azure NetApp Files, see [Azure NetApp Files](https://docs.microsoft.com/azure/azure-netapp-files/) documentation. + +[Azure VMware Solution](/azure/azure-vmware/introduction) supports attaching Network File System (NFS) datastores as a persistent storage option. You can create NFS datastores with Azure NetApp Files volumes and attach them to clusters of your choice. You can also create virtual machines (VMs) for optimal cost and performance. + +> [!IMPORTANT] +> Azure NetApp Files datastores for Azure VMware Solution hosts is currently in public preview. This version is provided without a service-level agreement and is not recommended for production workloads. Some features may not be supported or may have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +By using NFS datastores backed by Azure NetApp Files, you can expand your storage instead of scaling the clusters. You can also use Azure NetApp Files volumes to replicate data from on-premises or primary VMware environments for the secondary site. + +Create your Azure VMware Solution and create Azure NetApp Files NFS volumes in the virtual network connected to it using an ExpressRoute. Ensure there's connectivity from the private cloud to the NFS volumes created. Use those volumes to create NFS datastores and attach the datastores to clusters of your choice in a private cloud. As a native integration, no other permissions configured via vSphere are needed. + +The following diagram demonstrates a typical architecture of Azure NetApp Files backed NFS datastores attached to an Azure VMware Solution private cloud via ExpressRoute. + +:::image type="content" source="media/attach-netapp-files-to-cloud/architecture-netapp-files-nfs-datastores.png" alt-text="Diagram shows the architecture of Azure NetApp Files backed NFS datastores attached to an Azure VMware Solution private cloud." lightbox="media/attach-netapp-files-to-cloud/architecture-netapp-files-nfs-datastores.png"::: + +## Prerequisites + +Before you begin the prerequisites, review the [Performance best practices](#performance-best-practices) section to learn about optimal performance of NFS datastores on Azure NetApp Files volumes. + +1. [Deploy Azure VMware Solution](https://docs.microsoft.com/azure/azure-vmware/deploy-azure-vmware-solution?) private cloud in a configured virtual network. For more information, see [Network planning checklist](/azure/azure-vmware/tutorial-network-checklist) and [Configure networking for your VMware private cloud](https://review.docs.microsoft.com/azure/azure-vmware/tutorial-configure-networking?). +1. Create an [NFSv3 volume for Azure NetApp Files](/azure/azure-netapp-files/azure-netapp-files-create-volumes) in the same virtual network as the Azure VMware Solution private cloud. + 1. Verify connectivity from the private cloud to Azure NetApp Files volume by pinging the attached target IP. + 2. Verify the subscription is registered to the `ANFAvsDataStore` feature in the `Microsoft.NetApp` namespace. If the subscription isn't registered, register it now. + + `az feature register --name "ANFAvsDataStore" --namespace "Microsoft.NetApp"` + + `az feature show --name "ANFAvsDataStore" --namespace "Microsoft.NetApp" --query properties.state` + 1. Based on your performance requirements, select the correct service level needed for the Azure NetApp Files capacity pool. For optimal performance, it's recommended to use the Ultra tier. Select option **Azure VMware Solution Datastore** listed under the **Protocol** section. + 1. Create a volume with **Standard** [network features](/azure/azure-netapp-files/configure-network-features) if available for ExpressRoute FastPath connectivity. + 1. Under the **Protocol** section, select **Azure VMware Solution Datastore** to indicate the volume is created to use as a datastore for Azure VMware Solution private cloud. + 1. If you're using [export policies](/azure/azure-netapp-files/azure-netapp-files-configure-export-policy) to control access to Azure NetApp Files volumes, enable the Azure VMware private cloud IP range, not individual host IPs. Faulty hosts in a private cloud could get replaced so if the IP isn't enabled, connectivity to datastore will be impacted. + +## Supported regions + +Azure VMware Solution currently supports the following regions: + +**America** : East US, West US, Central US, South Central US, North Central US, Canada East, Canada Central . + +**Europe** : North Europe, UK West, UK South, France Central, Switzerland West, Germany West Central. + +**Asia** : Southeast Asia, Japan West. + +**Australia** : Australia East, Australia Southeast. + +**Brazil** : Brazil South. + +The list of supported regions will expand as the preview progresses. + +## Performance best practices + +There are some important best practices to follow for optimal performance of NFS datastores on Azure NetApp Files volumes. + +- Create Azure NetApp Files volumes using **Standard** network features to enable optimized connectivity from Azure VMware Solution private cloud via ExpressRoute FastPath connectivity. +- For optimized performance, choose **UltraPerformance** gateway and enable [ExpressRoute FastPath](/azure/expressroute/expressroute-howto-linkvnet-arm#configure-expressroute-fastpath) from a private cloud to Azure NetApp Files volumes virtual network. View more detailed information on gateway SKUs at [About ExpressRoute virtual network gateways](/azure/expressroute/expressroute-about-virtual-network-gateways). +- Based on your performance requirements, select the correct service level needed for the Azure NetApp Files capacity pool. For best performance, it's recommended to use the Ultra tier. +- Create multiple datastores of 4-TB size for better performance. The default limit is 8 but it can be increased up to a maximum of 256 by submitting a support ticket. To submit a support ticket, go to [Create an Azure support request](/azure/azure-portal/supportability/how-to-create-azure-support-request). +- Work with your Microsoft representative to ensure that the Azure VMware Solution private cloud and the Azure NetApp Files volumes are deployed within same [Availability Zone](https://docs.microsoft.com/azure/availability-zones/az-overview#availability-zones). + +## Attach an Azure NetApp Files volume to your private cloud + +### [Portal](#tab/azure-portal) + +To attach an Azure NetApp Files volume to your private cloud using Portal, follow these steps: + +1. Sign in to the Azure portal. +1. Select **Subscriptions** to see a list of subscriptions. +1. From the list, select the subscription you want to use. +1. Under Settings, select **Resource providers**. +1. Search for **Microsoft.AVS** and select it. +1. Select **Register**. +1. Under **Settings**, select **Preview features**. + 1. Verify you're registered for both the `CloudSanExperience` and `AnfDatstoreExperience` features. +1. Navigate to your Azure VMware Solution. +Under **Manage**, select **Storage (preview)**. +1. Select **Connect Azure NetApp Files volume**. +1. In **Connect Azure NetApp Files volume**, select the **Subscription**, **NetApp account**, **Capacity pool**, and **Volume** to be attached as a datastore. + + :::image type="content" source="media/attach-netapp-files-to-cloud/connect-netapp-files-portal-experience-1.png" alt-text="Image shows the navigation to Connect Azure NetApp Files volume pop-up window." lightbox="media/attach-netapp-files-to-cloud/connect-netapp-files-portal-experience-1.png"::: + +1. Verify the protocol is NFS. You'll need to verify the virtual network and subnet to ensure connectivity to the Azure VMware Solution private cloud. +1. Under **Associated cluster**, select the **Client cluster** to associate the NFS volume as a datastore +1. Under **Data store**, create a personalized name for your **Datastore name**. + 1. When the datastore is created, you should see all of your datastores in the **Storage (preview)**. + 2. You'll also notice that the NFS datastores are added in vCenter. + + +### [Azure CLI](#tab/azure-cli) + +To attach an Azure NetApp Files volume to your private cloud using Azure CLI, follow these steps: + +1. Verify the subscription is registered to `CloudSanExperience` feature in the **Microsoft.AVS** namespace. If it's not already registered, then register it. + + `az feature show --name "CloudSanExperience" --namespace "Microsoft.AVS"` + + `az feature register --name "CloudSanExperience" --namespace "Microsoft.AVS"` +1. The registration should take approximately 15 minutes to complete. You can also check the status. + + `az feature show --name "CloudSanExperience" --namespace "Microsoft.AVS" --query properties.state` +1. If the registration is stuck in an intermediate state for longer than 15 minutes, unregister, then re-register the flag. + + `az feature unregister --name "CloudSanExperience" --namespace "Microsoft.AVS"` + + `az feature register --name "CloudSanExperience" --namespace "Microsoft.AVS"` +1. Verify the subscription is registered to `AnfDatastoreExperience` feature in the **Microsoft.AVS** namespace. If it's not already registered, then register it. + + `az feature register --name " AnfDatastoreExperience" --namespace "Microsoft.AVS"` + + `az feature show --name "AnfDatastoreExperience" --namespace "Microsoft.AVS" --query properties.state` +1. Verify the VMware extension is installed. If the extension is already installed, verify you're using the latest version of the Azure CLI extension. If an older version is installed, update the extension. + + `az extension show --name vmware` + + `az extension list-versions -n vmware` + + `az extension update --name vmware` +1. If the VMware extension isn't already installed, install it. + + `az extension add --name vmware` +1. Create a datastore using an existing ANF volume in Azure VMware Solution private cloud cluster. + + `az vmware datastore netapp-volume create --name MyDatastore1 --resource-group MyResourceGroup –-cluster Cluster-1 --private-cloud MyPrivateCloud –-volume-id /subscriptions//resourceGroups//providers/Microsoft.NetApp/netAppAccounts//capacityPools//volumes/` +1. If needed, you can display the help on the datastores. + + `az vmware datastore -h` +1. Show the details of an ANF-based datastore in a private cloud cluster. + + `az vmware datastore show --name ANFDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud` +1. List all of the datastores in a private cloud cluster. + + `az vmware datastore list --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud` + +--- + +## Disconnect an Azure NetApp Files-based datastore from your private cloud + +You can use the instructions provided to disconnect an Azure NetApp Files-based (ANF) datastore using either Azure portal or Azure CLI. There's no maintenance window required for this operation. The disconnect action only disconnects the ANF volume as a datastore, it doesn't delete the data or the ANF volume. + +**Disconnect an ANF datastore using the Azure Portal** + +1. Select the datastore you want to disconnect from. +1. Right-click on the datastore and select **disconnect**. + +**Disconnect an ANF datastore using Azure CLI** + + `az vmware datastore delete --name ANFDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud` + +## Next steps + +Now that you've attached a datastore on Azure NetApp Files-based NFS volume to your Azure VMware Solution hosts, you can create your VMs. Use the following resources to learn more. + +- [Service levels for Azure NetApp Files](/azure/azure-netapp-files/azure-netapp-files-service-levels) +- Datastore protection using [Azure NetApp Files snapshots](/azure/azure-netapp-files/snapshots-introduction) +- [About ExpressRoute virtual network gateways](https://docs.microsoft.com/azure/expressroute/expressroute-about-virtual-network-gateways) +- [Understand Azure NetApp Files backup](/azure/azure-netapp-files/backup-introduction) +- [Guidelines for Azure NetApp Files network planning](https://docs.microsoft.com/azure/azure-netapp-files/azure-netapp-files-network-topologies) + +## FAQs + +- **Are there any special permissions required to create the datastore with the Azure NetApp Files volume and attach it onto the clusters in a private cloud?** + + No other special permissions are needed. The datastore creation and attachment is implemented via Azure VMware Solution control plane. + +- **Which NFS versions are supported?** + + NFSv3 is supported for datastores on Azure NetApp Files. + +- **Should Azure NetApp Files be in the same subscription as the private cloud?** + + It's recommended to create the Azure NetApp Files volumes for the datastores in the same VNet that has connectivity to the private cloud. + +- **How many datastores are we supporting with Azure VMware Solution?** + + The default limit is 8 but it can be increased up to a maximum of 256 by submitting a support ticket. To submit a support ticket, go to [Create an Azure support request](/azure/azure-portal/supportability/how-to-create-azure-support-request). + +- **What latencies and bandwidth can be expected from the datastores backed by Azure NetApp Files?** + + We're currently validating and working on benchmarking. For now, follow the [Performance best practices](#performance-best-practices) outlined in this article. + +- **What are my options for backup and recovery?** + + Azure NetApp Files (ANF) supports [snapshots](/azure/azure-netapp-files/azure-netapp-files-manage-snapshots) of datastores for quick checkpoints for near term recovery or quick clones. ANF backup lets you offload your ANF snapshots to Azure storage. This feature is available in public preview. Only for this technology are copies and stores-changed blocks relative to previously offloaded snapshots in an efficient format. This ability decreases Recovery Point Objective (RPO) and Recovery Time Objective (RTO) while lowering backup data transfer burden on the Azure VMware Solution service. + +- **How do I monitor Storage Usage?** + + Use [Metrics for Azure NetApp Files](/azure/azure-netapp-files/azure-netapp-files-metrics) to monitor storage and performance usage for the Datastore volume and to set alerts. + +- **What metrics are available for monitoring?** + + Usage and performance metrics are available for monitoring the Datastore volume. Replication metrics are also available for ANF datastore that can be replicated to another region using Cross Regional Replication. For more information about metrics, see [Metrics for Azure NetApp Files](/azure/azure-netapp-files/azure-netapp-files-metrics). + +- **What happens if a new node is added to the cluster, or an existing node is removed from the cluster?** + + When you add a new node to the cluster, it will automatically gain access to the datastore. Removing an existing node from the cluster won't affect the datastore. + +- **How are the datastores charged, is there an additional charge?** + + Azure NetApp Files NFS volumes that are used as datastores will be billed following the [capacity pool based billing model](/azure/azure-netapp-files/azure-netapp-files-cost-model). Billing will depend on the service level. There's no extra charge for using Azure NetApp Files NFS volumes as datastores. diff --git a/articles/azure-vmware/attach-disk-pools-to-azure-vmware-solution-hosts.md b/articles/azure-vmware/attach-disk-pools-to-azure-vmware-solution-hosts.md index 6dc53596dd8a2..ce20f4505504a 100644 --- a/articles/azure-vmware/attach-disk-pools-to-azure-vmware-solution-hosts.md +++ b/articles/azure-vmware/attach-disk-pools-to-azure-vmware-solution-hosts.md @@ -2,6 +2,7 @@ title: Attach Azure disk pools to Azure VMware Solution hosts (Preview) description: Learn how to attach an Azure disk pool surfaced through an iSCSI target as the VMware vSphere datastore of an Azure VMware Solution private cloud. Once the datastore is configured, you can create volumes on it and consume them from your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 11/02/2021 #Customer intent: As an Azure service administrator, I want to scale my AVS hosts using disk pools instead of scaling clusters. So that I can use block storage for active working sets and tier less frequently accessed data from vSAN to disks. I can also replicate data from on-premises or primary VMware vSphere environment to disk storage for the secondary site. ms.custom: ignite-fall-2021, devx-track-azurecli diff --git a/articles/azure-vmware/azure-security-integration.md b/articles/azure-vmware/azure-security-integration.md index 8848810c57f1c..f985665b41271 100644 --- a/articles/azure-vmware/azure-security-integration.md +++ b/articles/azure-vmware/azure-security-integration.md @@ -2,6 +2,7 @@ title: Integrate Microsoft Defender for Cloud with Azure VMware Solution description: Learn how to protect your Azure VMware Solution VMs with Azure's native security tools from the workload protection dashboard. ms.topic: how-to +ms.service: azure-vmware ms.date: 06/14/2021 --- diff --git a/articles/azure-vmware/azure-vmware-solution-citrix.md b/articles/azure-vmware/azure-vmware-solution-citrix.md index 7a8d00855e6c5..c5f129ab6fa0e 100644 --- a/articles/azure-vmware/azure-vmware-solution-citrix.md +++ b/articles/azure-vmware/azure-vmware-solution-citrix.md @@ -2,6 +2,7 @@ title: Deploy Citrix on Azure VMware Solution description: Learn how to deploy VMware Citrix on Azure VMware Solution. ms.topic: how-to +ms.service: azure-vmware ms.date: 11/02/2021 ms.custom: ignite-fall-2021 --- diff --git a/articles/azure-vmware/azure-vmware-solution-horizon.md b/articles/azure-vmware/azure-vmware-solution-horizon.md index 3ec10a880bd9a..4d46a82e05a5a 100644 --- a/articles/azure-vmware/azure-vmware-solution-horizon.md +++ b/articles/azure-vmware/azure-vmware-solution-horizon.md @@ -2,7 +2,8 @@ title: Deploy Horizon on Azure VMware Solution description: Learn how to deploy VMware Horizon on Azure VMware Solution. ms.topic: how-to -ms.date: 09/29/2020 +ms.service: azure-vmware +ms.date: 04/11/2022 --- @@ -29,12 +30,12 @@ Horizon 2006 and later versions on the Horizon 8 release line supports both on-p ## Deploy Horizon in a hybrid cloud -You can deploy Horizon in a hybrid cloud environment by using Horizon Cloud Pod Architecture (CPA) to interconnect on-premises and Azure datacenters. CPA scales up your deployment, builds a hybrid cloud, and provides redundancy for Business Continuity and Disaster Recovery. For more information, see [Expanding Existing Horizon 7 Environments](https://techzone.vmware.com/resource/business-continuity-vmware-horizon#_Toc41650874). +You can deploy Horizon in a hybrid cloud environment by using Horizon Cloud Pod Architecture (CPA) to interconnect on-premises and Azure data centers. CPA scales up your deployment, builds a hybrid cloud, and provides redundancy for Business Continuity and Disaster Recovery. For more information, see [Expanding Existing Horizon 7 Environments](https://techzone.vmware.com/resource/business-continuity-vmware-horizon#_Toc41650874). >[!IMPORTANT] >CPA is not a stretched deployment; each Horizon pod is distinct, and all Connection Servers that belong to each of the individual pods are required to be located in a single location and run on the same broadcast domain from a network perspective. -Like on-premises or private datacenter, you can deploy Horizon in an Azure VMware Solution private cloud. We'll discuss key differences in deploying Horizon on-premises and Azure VMware Solution in the following sections. +Like on-premises or private data centers, you can deploy Horizon in an Azure VMware Solution private cloud. We'll discuss key differences in deploying Horizon on-premises and Azure VMware Solution in the following sections. The _Azure private cloud_ is conceptually the same as the _VMware SDDC_, a term typically used in Horizon documentation. The rest of this document uses both terms interchangeably. @@ -43,11 +44,11 @@ The Horizon Cloud Connector is required for Horizon on Azure VMware Solution to >[!IMPORTANT] >Horizon Control Plane support for Horizon on Azure VMware Solution is not yet available. Be sure to download the VHD version of Horizon Cloud Connector. -## vCenter Cloud Admin role +## vCenter Server Cloud Admin role -Since Azure VMware Solution is an SDDC service and Azure manages the lifecycle of the SDDC on Azure VMware Solution, the vCenter permission model on Azure VMware Solution is limited by design. +Since Azure VMware Solution is an SDDC service and Azure manages the lifecycle of the SDDC on Azure VMware Solution, the vCenter Server permission model on Azure VMware Solution is limited by design. -Customers are required to use the Cloud Admin role, which has a limited set of vCenter permissions. The Horizon product was modified to work with the Cloud Admin role on Azure VMware Solution, specifically: +Customers are required to use the Cloud Admin role, which has a limited set of vCenter Server permissions. The Horizon product was modified to work with the Cloud Admin role on Azure VMware Solution, specifically: * Instant clone provisioning was modified to run on Azure VMware Solution. @@ -63,9 +64,9 @@ Customers are required to use the Cloud Admin role, which has a limited set of v ## Horizon on Azure VMware Solution deployment architecture -A typical Horizon architecture design uses a pod and block strategy. A block is a single vCenter, while multiple blocks combined make a pod. A Horizon pod is a unit of organization determined by Horizon scalability limits. Each Horizon pod has a separate management portal, and so a standard design practice is to minimize the number of pods. +A typical Horizon architecture design uses a pod and block strategy. A block is a single vCenter Server, while multiple blocks combined make a pod. A Horizon pod is a unit of organization determined by Horizon scalability limits. Each Horizon pod has a separate management portal, and so a standard design practice is to minimize the number of pods. -Every cloud has its own network connectivity scheme. Combined with VMware SDDC networking / NSX Edge, the Azure VMware Solution network connectivity presents unique requirements for deploying Horizon that is different from on-premises. +Every cloud has its own network connectivity scheme. Combined with VMware SDDC networking / NSX-T Data Center, the Azure VMware Solution network connectivity presents unique requirements for deploying Horizon that is different from on-premises. Each Azure private cloud and SDDC can handle 4,000 desktop or application sessions, assuming: @@ -106,27 +107,27 @@ You connect your AD domain controller in Azure Virtual Network with your on-prem A variation on the basic example might be to support connectivity for on-premises resources. For example, users access desktops and generate virtual desktop application traffic or connect to an on-premises Horizon pod using CPA. -The diagram shows how to support connectivity for on-premises resources. To connect to your corporate network to the Azure Virtual Network, you'll need an ExpressRoute circuit. You'll also need to connect your corporate network with each of the private cloud and SDDCs using ExpressRoute Global Reach. It allows the connectivity from the SDDC to the ExpressRoute circuit and on-premises resources. +The diagram shows how to support connectivity for on-premises resources. To connect to your corporate network to the Azure Virtual Network, you'll need an ExpressRoute circuit. You'll also need to connect your corporate network with each of the private cloud and SDDCs using ExpressRoute Global Reach. It allows the connectivity from the SDDC to the ExpressRoute circuit and on-premises resources. :::image type="content" source="media/vmware-horizon/connect-corporate-network-azure-virtual-network.png" alt-text="Diagram showing the connection of a corporate network to an Azure Virtual Network." border="false"::: ### Multiple Horizon pods on Azure VMware Solution across multiple regions -Another scenario is scaling Horizon across multiple pods. In this scenario, you deploy two Horizon pods in two different regions and federate them using CPA. It's similar to the network configuration in the previous example, but with some additional cross-regional links. +Another scenario is scaling Horizon across multiple pods. In this scenario, you deploy two Horizon pods in two different regions and federate them using CPA. It's similar to the network configuration in the previous example, but with some additional cross-regional links. You'll connect the Azure Virtual Network in each region to the private clouds/SDDCs in the other region. It allows Horizon connection servers part of the CPA federation to connect to all desktops under management. Adding extra private clouds/SDDCs to this configuration would allow you to scale to 24,000 sessions overall.  -The same principles apply if you deploy two Horizon pods in the same region. Make sure to deploy the second Horizon pod in a *separate Azure Virtual Network*. Just like the single pod example, you can connect your corporate network and on-premises pod to this multi-pod/region example using ExpressRoute and Global Reach. +The same principles apply if you deploy two Horizon pods in the same region. Make sure to deploy the second Horizon pod in a *separate Azure Virtual Network*. Just like the single pod example, you can connect your corporate network and on-premises pod to this multi-pod/region example using ExpressRoute and Global Reach. :::image type="content" source="media/vmware-horizon/multiple-horizon-pod-azure-vmware-solution.png" alt-text=" Diagram showing multiple Horizon pods on Azure VMware Solution across multiple regions." border="false"::: ## Size Azure VMware Solution hosts for Horizon deployments -Horizon's sizing methodology on a host running in Azure VMware Solution is simpler than Horizon on-premises. That's because the Azure VMware Solution host is standardized. Exact host sizing helps determine the number of hosts needed to support your VDI requirements. It's central to determining the cost-per-desktop. +Horizon's sizing methodology on a host running in Azure VMware Solution is simpler than Horizon on-premises. That's because the Azure VMware Solution host is standardized. Exact host sizing helps determine the number of hosts needed to support your VDI requirements. It's central to determining the cost-per-desktop. ### Sizing tables -Specific vCPU/vRAM requirements for Horizon virtual desktops depend on the customer’s specific workload profile. Work with your MSFT and VMware sales team to help determine your vCPU/vRAM requirements for your virtual desktops. +Specific vCPU/vRAM requirements for Horizon virtual desktops depend on the customer’s specific workload profile. Work with your MSFT and VMware sales team to help determine your vCPU/vRAM requirements for your virtual desktops. | vCPU per VM | vRAM per VM (GB) | Instance | 100 VMs | 200 VMs | 300 VMs | 400 VMs | 500 VMs | 600 VMs | 700 VMs | 800 VMs | 900 VMs | 1000 VMs | 2000 VMs | 3000 VMs | 4000 VMs | 5000 VMs | 6000 VMs | 6400 VMs | |:-----------:|:----------------:|:--------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| diff --git a/articles/azure-vmware/azure-vmware-solution-platform-updates.md b/articles/azure-vmware/azure-vmware-solution-platform-updates.md index 4b8c56de0f13a..fbeedd0c1253d 100644 --- a/articles/azure-vmware/azure-vmware-solution-platform-updates.md +++ b/articles/azure-vmware/azure-vmware-solution-platform-updates.md @@ -3,6 +3,7 @@ title: Platform updates for Azure VMware Solution description: Learn about the platform updates to Azure VMware Solution. ms.topic: reference ms.custom: references_regions +ms.service: azure-vmware ms.date: 12/22/2021 --- @@ -10,6 +11,21 @@ ms.date: 12/22/2021 Azure VMware Solution will apply important updates starting in March 2021. You'll receive a notification through Azure Service Health that includes the timeline of the maintenance. For more information, see [Host maintenance and lifecycle management](concepts-private-clouds-clusters.md#host-maintenance-and-lifecycle-management). + +## June 7, 2022 + +All new Azure VMware Solution private clouds in regions (East US2, Canada Central, North Europe, and Japan East), are now deployed in with VMware vCenter Server version 7.0 Update 3c and ESXi version 7.0 Update 3c. + +Any existing private clouds in the above mentioned regions will also be upgraded to these versions. For more information, please see [VMware ESXi 7.0 Update 3c Release Notes](https://docs.vmware.com/VMware-vSphere/7.0/rn/vsphere-esxi-70u3c-release-notes.html) and [VMware vCenter Server 7.0 Update 3c Release Notes](https://docs.vmware.com/VMware-vSphere/7.0/rn/vsphere-vcenter-server-70u3c-release-notes.html). + +## May 23, 2022 + +All new Azure VMware Solution private clouds in regions (Germany West Central, Australia East, Central US and UK West), are now deployed with VMware vCenter Server version 7.0 Update 3c and ESXi version 7.0 Update 3c. + +Any existing private clouds in the previously mentioned regions will be upgraded to those versions. For more information, please see [VMware ESXi 7.0 Update 3c Release Notes](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-esxi-70u3c-release-notes.html) and [VMware vCenter Server 7.0 Update 3c Release Notes](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-vcenter-server-70u3c-release-notes.html). + +You'll receive a notification through Azure Service Health that includes the timeline of the upgrade. You can reschedule an upgrade as needed. This notification also provides details on the upgraded component, its effect on workloads, private cloud access, and other Azure services. + ## May 9, 2022 All new Azure VMware Solution private clouds in regions (France Central, Brazil South, Japan West, Australia Southeast, Canada East, East Asia, and Southeast Asia), are now deployed with VMware vCenter Server version 7.0 Update 3c and ESXi version 7.0 Update 3c. diff --git a/articles/azure-vmware/backup-azure-vmware-solution-virtual-machines.md b/articles/azure-vmware/backup-azure-vmware-solution-virtual-machines.md index 09251a622de1f..37237d16bdb62 100644 --- a/articles/azure-vmware/backup-azure-vmware-solution-virtual-machines.md +++ b/articles/azure-vmware/backup-azure-vmware-solution-virtual-machines.md @@ -2,6 +2,7 @@ title: Back up Azure VMware Solution VMs with Azure Backup Server description: Configure your Azure VMware Solution environment to back up virtual machines by using Azure Backup Server. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/06/2022 --- diff --git a/articles/azure-vmware/bitnami-appliances-deployment.md b/articles/azure-vmware/bitnami-appliances-deployment.md index e4371fd93395a..c12597d457978 100644 --- a/articles/azure-vmware/bitnami-appliances-deployment.md +++ b/articles/azure-vmware/bitnami-appliances-deployment.md @@ -2,8 +2,8 @@ title: Deploy Bitnami virtual appliances description: Learn about the virtual appliances packed by Bitnami to deploy in your Azure VMware Solution private cloud. ms.topic: how-to -ms.date: 09/15/2021 - +ms.service: azure-vmware +ms.date: 04/11/2022 --- # Bitnami appliance deployment @@ -54,11 +54,11 @@ In this article, you'll learn how to install and configure the following virtual -## Step 2. Access the local vCenter of your private cloud +## Step 2. Access the local vCenter Server of your private cloud 1. Sign in to the [Azure portal](https://portal.azure.com), select your private cloud, and then **Manage** > **Identity**. -1. Copy the vCenter URL, username, and password. You'll use them to access your virtual machine (VM). +1. Copy the vCenter Server URL, username, and password. You'll use them to access your virtual machine (VM). 1. Select **Overview**, select the VM, and then connect to it through RDP. If you need help with connecting, see [connect to a virtual machine](../virtual-machines/windows/connect-logon.md#connect-to-the-virtual-machine) for details. @@ -71,7 +71,7 @@ In this article, you'll learn how to install and configure the following virtual -## Step 3. Install the Bitnami OVA/OVF file in vCenter +## Step 3. Install the Bitnami OVA/OVF file in vCenter Server 1. Right-click the cluster that you want to install the LAMP virtual appliance and select **Deploy OVF Template**. @@ -97,7 +97,7 @@ In this article, you'll learn how to install and configure the following virtual 1. After the installation finishes, under **Actions**, select **Power on** to turn on the appliance. -1. From the vCenter console, select **Launch Web Console** and sign in to the Bitnami virtual appliance. Check the [Bitnami virtual appliance support documentation](https://docs.bitnami.com/vmware-marketplace/faq/get-started/find-credentials/) for the default username and password. +1. From the vCenter Server console, select **Launch Web Console** and sign in to the Bitnami virtual appliance. Check the [Bitnami virtual appliance support documentation](https://docs.bitnami.com/vmware-marketplace/faq/get-started/find-credentials/) for the default username and password. >[!NOTE] >You can change the default password to a more secure one. For more information, see ... diff --git a/articles/azure-vmware/concepts-api-management.md b/articles/azure-vmware/concepts-api-management.md index 45546ebefc854..d780679e4815f 100644 --- a/articles/azure-vmware/concepts-api-management.md +++ b/articles/azure-vmware/concepts-api-management.md @@ -2,6 +2,7 @@ title: Concepts - API Management description: Learn how API Management protects APIs running on Azure VMware Solution virtual machines (VMs) ms.topic: conceptual +ms.service: azure-vmware ms.date: 04/28/2021 --- diff --git a/articles/azure-vmware/concepts-design-public-internet-access.md b/articles/azure-vmware/concepts-design-public-internet-access.md new file mode 100644 index 0000000000000..48a496bb9ebfc --- /dev/null +++ b/articles/azure-vmware/concepts-design-public-internet-access.md @@ -0,0 +1,75 @@ +--- +title: Concept - Internet connectivity design considerations (Preview) +description: Options for Azure VMware Solution Internet Connectivity. +ms.topic: conceptual +ms.service: azure-vmware +ms.date: 5/12/2022 +--- + +# Internet connectivity design considerations (Preview) + +There are three primary patterns for creating outbound access to the Internet from Azure VMware Solution and to enable inbound Internet access to resources on your Azure VMware Solution private cloud. + +- [Internet Service hosted in Azure](#internet-service-hosted-in-azure) +- [Azure VMware Solution Managed SNAT](#azure-vmware-solution-managed-snat) +- [Public IP to NSX edge](#public-ip-to-nsx-edge) + +Your requirements for security controls, visibility, capacity, and operations drive the selection of the appropriate method for delivery of Internet access to the Azure VMware Solution private cloud. + +## Internet Service hosted in Azure + +There are multiple ways to generate a default route in Azure and send it towards your Azure VMware Solution private cloud or on-premise. The options are as follows: + +- An Azure firewall in a Virtual WAN Hub. +- A third-party Network Virtual Appliance in a Virtual WAN Hub Spoke Virtual Network. +- A third-party Network Virtual Appliance in a Native Azure Virtual Network using Azure Route Server. +- A default route from on-premises transferred to Azure VMware Solution over Global Reach. + +Use any of these patterns to provide an outbound SNAT service with the ability to control what sources are allowed out, to view the connection logs, and for some services, do further traffic inspection. + +The same service can also consume an Azure Public IP and create an inbound DNAT from the Internet towards targets in Azure VMware Solution. + +An environment can also be built that utilizes multiple paths for Internet traffic. One for outbound SNAT (for example, a third-party security NVA), and another for inbound DNAT (like a third party Load balancer NVA using SNAT pools for return traffic). + +## Azure VMware Solution Managed SNAT + +A Managed SNAT service provides a simple method for outbound internet access from an Azure VMware Solution private cloud. Features of this service include the following. + +- Easily enabled – select the radio button on the Internet Connectivity tab and all workload networks will have immediate outbound access to the Internet through a SNAT gateway. +- No control over SNAT rules, all sources that reach the SNAT service are allowed. +- No visibility into connection logs. +- Two Public IPs are used and rotated to support up to 128k simultaneous outbound connections. +- No inbound DNAT capability is available with the Azure VMware Solution Managed SNAT. + +## Public IP to NSX edge + +This option brings an allocated Azure Public IP directly to the NSX Edge for consumption. It allows the Azure VMware Solution private cloud to directly consume and apply public network addresses in NSX as required. These addresses are used for the following types of connections: +- Outbound SNAT +- Inbound DNAT +- Load balancing using VMware AVI third-party Network Virtual Appliances +- Applications directly connected to a workload VM interface. + +This option also lets you configure the public address on a third-party Network Virtual Appliance to create a DMZ within the Azure VMware Solution private cloud. + +Features include: + + - Scale – the soft limit of 64 public IPs can be increased by request to 1000s of Public IPs allocated if required by an application. + - Flexibility – A Public IP can be applied anywhere in the NSX ecosystem. It can be used to provide SNAT or DNAT, on load balancers like VMware’s AVI, or third-party Network Virtual Appliances. It can also be used on third-party Network Virtual Security Appliances on VMware segments or on directly on VMs. + - Regionality – the Public IP to the NSX Edge is unique to the local SDDC. For “multi private cloud in distributed regions,” with local exit to Internet intentions, it’s much easier to direct traffic locally versus trying to control default route propagation for a security or SNAT service hosted in Azure. If you've two or more Azure VMware Solution private clouds connected with a Public IP configured, they can both have a local exit. + +## Considerations for selecting an option + +The option that you select depends on the following factors: + +- To add an Azure VMware private cloud to a security inspection point provisioned in Azure native that inspects all Internet traffic from Azure native endpoints, use an Azure native construct and leak a default route from Azure to your Azure VMware Solution private cloud. +- If you need to run a third-party Network Virtual Appliance to conform to existing standards for security inspection or streamlined opex, you have two options. You can run your Public IP in Azure native with the default route method or run it in Azure VMware Solution using Public IP to NSX edge. +- There are scale limits on how many Public IPs can be allocated to a Network Virtual Appliance running in native Azure or provisioned on Azure Firewall. The Public IP to NSX edge option allows for much higher allocations (1000s versus 100s). +- Use a Public IP to the NSX for a localized exit to the Internet from each private cloud in its local region. Using multiple Azure VMware Solution private clouds in several Azure regions that need to communicate with each other and the Internet, it can be challenging to match an Azure VMware Solution private cloud with a security service in Azure. The difficulty is due to the way a default route from Azure works. + +## Next Steps + +[Enable Managed SNAT for Azure VMware Solution Workloads](enable-managed-snat-for-workloads.md) + +[Enable Public IP to the NSX Edge for Azure VMware Solution (Preview)](enable-public-ip-nsx-edge.md) + +[Disable Internet access or enable a default route](disable-internet-access.md) \ No newline at end of file diff --git a/articles/azure-vmware/concepts-hub-and-spoke.md b/articles/azure-vmware/concepts-hub-and-spoke.md index 542eaa3cf45a6..12a60d0b39366 100644 --- a/articles/azure-vmware/concepts-hub-and-spoke.md +++ b/articles/azure-vmware/concepts-hub-and-spoke.md @@ -2,6 +2,7 @@ title: Concept - Integrate an Azure VMware Solution deployment in a hub and spoke architecture description: Learn about integrating an Azure VMware Solution deployment in a hub and spoke architecture on Azure. ms.topic: conceptual +ms.service: azure-vmware ms.date: 10/26/2020 --- @@ -37,7 +38,7 @@ The architecture has the following main components: - **ExpressRoute gateway:** Enables the communication between Azure VMware Solution private cloud, shared services on Hub virtual network, and workloads running on Spoke virtual networks. -- **ExpressRoute Global Reach:** Enables the connectivity between on-premises and Azure VMware Solution private cloud. The connectivity between Azure VMware Solution and the Azure fabric is through ExpressRoute Global Reach only. You can't select any option beyond ExpressRoute Fast Path. ExpressRoute Direct isn't supported. +- **ExpressRoute Global Reach:** Enables the connectivity between on-premises and Azure VMware Solution private cloud. The connectivity between Azure VMware Solution and the Azure fabric is through ExpressRoute Global Reach only. - **S2S VPN considerations:** For Azure VMware Solution production deployments, Azure S2S VPN isn't supported due to network requirements for VMware HCX. However, you can use it for a PoC deployment. diff --git a/articles/azure-vmware/concepts-identity.md b/articles/azure-vmware/concepts-identity.md index 276a597375576..05e6245ea9428 100644 --- a/articles/azure-vmware/concepts-identity.md +++ b/articles/azure-vmware/concepts-identity.md @@ -2,29 +2,28 @@ title: Concepts - Identity and access description: Learn about the identity and access concepts of Azure VMware Solution ms.topic: conceptual -ms.date: 07/29/2021 +ms.service: azure-vmware +ms.date: 06/06/2022 --- # Azure VMware Solution identity concepts -Azure VMware Solution private clouds are provisioned with a vCenter Server and NSX-T Manager. You'll use vCenter to manage virtual machine (VM) workloads and NSX-T Manager to manage and extend the private cloud. The CloudAdmin role is used for vCenter Server and restricted administrator rights for NSX-T Manager. +Azure VMware Solution private clouds are provisioned with a vCenter Server and NSX-T Manager. You'll use vCenter to manage virtual machine (VM) workloads and NSX-T Manager to manage and extend the private cloud. The CloudAdmin role is used for vCenter Server and the administrator role (with restricted permissions) is used for NSX-T Manager. ## vCenter Server access and identity [!INCLUDE [vcenter-access-identity-description](includes/vcenter-access-identity-description.md)] > [!IMPORTANT] -> Azure VMware Solution offers custom roles on vCenter Server but currently doesn't offer them on the Azure VMware Solution portal. For more information, see the [Create custom roles on vCenter Server](#create-custom-roles-on-vcenter-server) section later in this article. +> Azure VMware Solution offers custom roles on vCenter Server but currently doesn't offer them on the Azure VMware Solution portal. For more information, see the [Create custom roles on vCenter Server](#create-custom-roles-on-vcenter-server) section later in this article. ### View the vCenter privileges You can view the privileges granted to the Azure VMware Solution CloudAdmin role on your Azure VMware Solution private cloud vCenter. -1. Sign in to the vSphere Client and go to **Menu** > **Administration**. - +1. Sign into the vSphere Client and go to **Menu** > **Administration**. 1. Under **Access Control**, select **Roles**. - -1. From the list of roles, select **CloudAdmin** and then select **Privileges**. +1. From the list of roles, select **CloudAdmin** and then select **Privileges**. :::image type="content" source="media/concepts/role-based-access-control-cloudadmin-privileges.png" alt-text="Screenshot showing the roles and privileges for CloudAdmin in the vSphere Client."::: @@ -53,9 +52,9 @@ The CloudAdmin role in Azure VMware Solution has the following privileges on vCe ### Create custom roles on vCenter Server -Azure VMware Solution supports the use of custom roles with equal or lesser privileges than the CloudAdmin role. +Azure VMware Solution supports the use of custom roles with equal or lesser privileges than the CloudAdmin role. -You'll use the CloudAdmin role to create, modify, or delete custom roles with privileges lesser than or equal to their current role. You can create roles with privileges greater than CloudAdmin, but you can't assign the role to any users or groups or delete the role. +You'll use the CloudAdmin role to create, modify, or delete custom roles with privileges lesser than or equal to their current role. You can create roles with privileges greater than CloudAdmin. You can't assign the role to any users or groups or delete the role. To prevent creating roles that can't be assigned or deleted, clone the CloudAdmin role as the basis for creating new custom roles. @@ -66,14 +65,13 @@ To prevent creating roles that can't be assigned or deleted, clone the CloudAdmi 1. Select the **CloudAdmin** role and select the **Clone role action** icon. - >[!NOTE] + >[!NOTE] >Don't clone the **Administrator** role because you can't use it. Also, the custom role created can't be deleted by cloudadmin\@vsphere.local. 1. Provide the name you want for the cloned role. 1. Add or remove privileges for the role and select **OK**. The cloned role is visible in the **Roles** list. - #### Apply a custom role 1. Navigate to the object that requires the added permission. For example, to apply permission to a folder, navigate to **Menu** > **VMs and Templates** > **Folder Name**. @@ -90,12 +88,59 @@ To prevent creating roles that can't be assigned or deleted, clone the CloudAdmi ## NSX-T Manager access and identity ->[!NOTE] ->NSX-T [!INCLUDE [nsxt-version](includes/nsxt-version.md)] is currently supported for all new private clouds. +When a private cloud is provisioned using Azure portal, Software Defined Data Center (SDDC) management components like vCenter and NSX-T Manager are provisioned for customers. + +Microsoft is responsible for the lifecycle management of NSX-T appliances like NSX-T Managers and NSX-T Edges. They're responsible for bootstrapping network configuration, like creating the Tier-0 gateway. + +You're responsible for NSX-T software-defined networking (SDN) configuration, for example: + +- Network segments +- Other Tier-1 gateways +- Distributed firewall rules +- Stateful services like gateway firewall +- Load balancer on Tier-1 gateways + +You can access NSX-T Manager using the built-in local user "admin" assigned to **Enterprise admin** role that gives full privileges to a user to manage NSX-T. While Microsoft manages the lifecycle of NSX-T, certain operations aren't allowed by a user. Operations not allowed include editing the configuration of host and edge transport nodes or starting an upgrade. For new users, Azure VMware Solution deploys them with a specific set of permissions needed by that user. The purpose is to provide a clear separation of control between the Azure VMware Solution control plane configuration and Azure VMware Solution private cloud user. + +For new private cloud deployments (in US West and Australia East) starting **June 2022**, NSX-T access will be provided with a built-in local user `cloudadmin` with a specific set of permissions to use only NSX-T functionality for workloads. The new **cloudadmin** user role will be rolled out in other regions in phases. + +> [!NOTE] +> Admin access to NSX-T will not be provided to users for private cloud deployments created after **June 2022**. + +### NSX-T cloud admin user permissions + +The following permissions are assigned to the **cloudadmin** user in Azure VMware Solution NSX-T. + +| Category | Type | Operation | Permission | +|-----------------|-----------------------|----------------------------------------------------------------------|------------------------------------------------------------------| +| Networking | Connectivity | Tier-0 Gateways
                  Tier-1 Gateways
                  Segments | Read-only
                  Full Access
                  Full Access | +| Networking | Network Services | VPN
                  NAT
                  Load Balancing
                  Forwarding Policy
                  Statistics | Full Access
                  Full Access
                  Full Access
                  Read-only
                  Full Access | +| Networking | IP Management | DNS
                  DHCP
                  IP Address Pools | Full Access
                  Full Access
                  Full Access | +| Networking | Profiles | | Full Access | +| Security | East West Security | Distributed Firewall
                  Distributed IDS and IPS
                  Identity Firewall | Full Access
                  Full Access
                  Full Access | +| Security | North South Security | Gateway Firewall
                  URL Analysis | Full Access
                  Full Access | +| Security | Network Introspection | | Read-only | +| Security | Endpoint Protection | | Read-only | +| Security | Settings | | Full Access | +| Inventory | | | Full Access | +| Troubleshooting | IPFIX | | Full Access | +| Troubleshooting | Port Mirroring | | Full Access | +| Troubleshooting | Traceflow | | Full Access | +| System | Configuration
                  Settings
                  Settings
                  Settings | Identity firewall
                  Users and Roles
                  Certificate Management
                  User Interface Settings | Full Access
                  Full Access
                  Full Access
                  Full Access | +| System | All other | | Read-only | + + +You can view the permissions granted to the Azure VMware Solution CloudAdmin role using the following steps: + +1. Log in to the NSX-T Manager. +1. Navigate to **Systems** > **Users and Roles** and locate **User Role Assignment**. +1. The **Roles** column for the CloudAdmin user provides information on the NSX role-based access control (RBAC) roles assigned. +1. Select the the **Roles** tab to view specific permissions associated with each of the NSX RBAC roles. +1. To view **Permissions**, expand the **CloudAdmin** role and select a category like, Networking or Security. -Use the *admin* account to access NSX-T Manager. It has full privileges and lets you create and manage Tier-1 (T1) gateways, segments (logical switches), and all services. In addition, the privileges give you access to the NSX-T Tier-0 (T0) gateway. A change to the T0 gateway could result in degraded network performance or no private cloud access. Open a support request in the Azure portal to request any changes to your NSX-T T0 gateway. +> [!NOTE] +> The current Azure VMware Solution with **NSX-T admin user** will eventually switch from **admin** user to **cloudadmin** user. You'll receive a notification through Azure Service Health that includes the timeline of this change so you can change the NSX-T credentials you've used for the other integration. - ## Next steps Now that you've covered Azure VMware Solution access and identity concepts, you may want to learn about: diff --git a/articles/azure-vmware/concepts-network-design-considerations.md b/articles/azure-vmware/concepts-network-design-considerations.md index 934e2b048ff08..896626a15e6d7 100644 --- a/articles/azure-vmware/concepts-network-design-considerations.md +++ b/articles/azure-vmware/concepts-network-design-considerations.md @@ -2,6 +2,7 @@ title: Concepts - Network design considerations description: Learn about network design considerations for Azure VMware Solution ms.topic: conceptual +ms.service: azure-vmware ms.date: 03/04/2022 --- diff --git a/articles/azure-vmware/concepts-networking.md b/articles/azure-vmware/concepts-networking.md index 420577eb3caa8..a9c0166c49136 100644 --- a/articles/azure-vmware/concepts-networking.md +++ b/articles/azure-vmware/concepts-networking.md @@ -2,6 +2,7 @@ title: Concepts - Network interconnectivity description: Learn about key aspects and use cases of networking and interconnectivity in Azure VMware Solution. ms.topic: conceptual +ms.service: azure-vmware ms.date: 06/28/2021 --- diff --git a/articles/azure-vmware/concepts-private-clouds-clusters.md b/articles/azure-vmware/concepts-private-clouds-clusters.md index e636ca1de902a..d9a6924cc8070 100644 --- a/articles/azure-vmware/concepts-private-clouds-clusters.md +++ b/articles/azure-vmware/concepts-private-clouds-clusters.md @@ -2,6 +2,7 @@ title: Concepts - Private clouds and clusters description: Learn about the key capabilities of Azure VMware Solution software-defined data centers and VMware vSphere clusters. ms.topic: conceptual +ms.service: azure-vmware ms.date: 08/25/2021 --- diff --git a/articles/azure-vmware/concepts-run-command.md b/articles/azure-vmware/concepts-run-command.md index 1836e389e3509..fcebd4f5d49e2 100644 --- a/articles/azure-vmware/concepts-run-command.md +++ b/articles/azure-vmware/concepts-run-command.md @@ -2,6 +2,7 @@ title: Concepts - Run command in Azure VMware Solution (Preview) description: Learn about using run commands in Azure VMware Solution. ms.topic: conceptual +ms.service: azure-vmware ms.date: 09/17/2021 --- diff --git a/articles/azure-vmware/concepts-security-recommendations.md b/articles/azure-vmware/concepts-security-recommendations.md index 2fafefe545e76..633cbb5336459 100644 --- a/articles/azure-vmware/concepts-security-recommendations.md +++ b/articles/azure-vmware/concepts-security-recommendations.md @@ -2,6 +2,7 @@ title: Concepts - Security recommendations for Azure VMware Solution description: Learn about tips and best practices to help protect Azure VMware Solution deployments from vulnerabilities and malicious actors. ms.topic: conceptual +ms.service: azure-vmware ms.date: 01/10/2022 --- diff --git a/articles/azure-vmware/concepts-storage.md b/articles/azure-vmware/concepts-storage.md index f1409af05fdfb..565e75549222d 100644 --- a/articles/azure-vmware/concepts-storage.md +++ b/articles/azure-vmware/concepts-storage.md @@ -3,6 +3,7 @@ title: Concepts - Storage description: Learn about storage capacity, storage policies, fault tolerance, and storage integration in Azure VMware Solution private clouds. ms.topic: conceptual ms.custom: contperf-fy21q4 +ms.service: azure-vmware ms.date: 05/02/2022 --- diff --git a/articles/azure-vmware/configure-alerts-for-azure-vmware-solution.md b/articles/azure-vmware/configure-alerts-for-azure-vmware-solution.md index ef8061e88372e..8a9c06c0c804c 100644 --- a/articles/azure-vmware/configure-alerts-for-azure-vmware-solution.md +++ b/articles/azure-vmware/configure-alerts-for-azure-vmware-solution.md @@ -2,6 +2,7 @@ title: Configure alerts and work with metrics in Azure VMware Solution description: Learn how to use alerts to receive notifications. Also learn how to work with metrics to gain deeper insights into your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 07/23/2021 --- diff --git a/articles/azure-vmware/configure-dhcp-azure-vmware-solution.md b/articles/azure-vmware/configure-dhcp-azure-vmware-solution.md index 8f44c0aeed6fa..fa0a2f68a446a 100644 --- a/articles/azure-vmware/configure-dhcp-azure-vmware-solution.md +++ b/articles/azure-vmware/configure-dhcp-azure-vmware-solution.md @@ -3,6 +3,7 @@ title: Configure DHCP for Azure VMware Solution description: Learn how to configure DHCP by using either NSX-T Manager to host a DHCP server or use a third-party external DHCP server. ms.topic: how-to ms.custom: contperf-fy21q2, contperf-fy22q1 +ms.service: azure-vmware ms.date: 04/08/2022 # Customer intent: As an Azure service administrator, I want to configure DHCP by using either NSX-T Manager to host a DHCP server or use a third-party external DHCP server. diff --git a/articles/azure-vmware/configure-dns-azure-vmware-solution.md b/articles/azure-vmware/configure-dns-azure-vmware-solution.md index 1dfac66963ec4..c611fec2d549c 100644 --- a/articles/azure-vmware/configure-dns-azure-vmware-solution.md +++ b/articles/azure-vmware/configure-dns-azure-vmware-solution.md @@ -3,6 +3,7 @@ title: Configure DNS forwarder for Azure VMware Solution description: Learn how to configure DNS forwarder for Azure VMware Solution using the Azure portal. ms.topic: how-to ms.custom: contperf-fy22q1 +ms.service: azure-vmware ms.date: 04/11/2022 #Customer intent: As an Azure service administrator, I want to diff --git a/articles/azure-vmware/configure-github-enterprise-server.md b/articles/azure-vmware/configure-github-enterprise-server.md index 78e1196c30ca4..86097e22039c0 100644 --- a/articles/azure-vmware/configure-github-enterprise-server.md +++ b/articles/azure-vmware/configure-github-enterprise-server.md @@ -2,6 +2,7 @@ title: Configure GitHub Enterprise Server on Azure VMware Solution description: Learn how to Set up GitHub Enterprise Server on your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 07/07/2021 --- diff --git a/articles/azure-vmware/configure-hcx-network-extension-high-availability.md b/articles/azure-vmware/configure-hcx-network-extension-high-availability.md index 59d85aea66e6e..d2b45d622c812 100644 --- a/articles/azure-vmware/configure-hcx-network-extension-high-availability.md +++ b/articles/azure-vmware/configure-hcx-network-extension-high-availability.md @@ -2,6 +2,7 @@ title: Configure HCX network extension high availability description: Learn how to configure HCX network extension high availability ms.topic: how-to +ms.service: azure-vmware ms.date: 05/06/2022 --- diff --git a/articles/azure-vmware/configure-hcx-network-extension.md b/articles/azure-vmware/configure-hcx-network-extension.md index caf0df262a505..42d7ab9af8e46 100644 --- a/articles/azure-vmware/configure-hcx-network-extension.md +++ b/articles/azure-vmware/configure-hcx-network-extension.md @@ -2,6 +2,7 @@ title: Create an HCX network extension description: Learn how to extend any networks from your on-premises environment to Azure VMware Solution. ms.topic: how-to +ms.service: azure-vmware ms.date: 09/07/2021 --- diff --git a/articles/azure-vmware/configure-identity-source-vcenter.md b/articles/azure-vmware/configure-identity-source-vcenter.md index 3a7dda21d73d8..9b49945a44440 100644 --- a/articles/azure-vmware/configure-identity-source-vcenter.md +++ b/articles/azure-vmware/configure-identity-source-vcenter.md @@ -2,10 +2,8 @@ title: Configure external identity source for vCenter Server description: Learn how to configure Active Directory over LDAP or LDAPS for vCenter Server as an external identity source. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/22/2022 - - - --- # Configure external identity source for vCenter Server diff --git a/articles/azure-vmware/configure-l2-stretched-vmware-hcx-networks.md b/articles/azure-vmware/configure-l2-stretched-vmware-hcx-networks.md index f541e4684da8f..dedf3fc101f08 100644 --- a/articles/azure-vmware/configure-l2-stretched-vmware-hcx-networks.md +++ b/articles/azure-vmware/configure-l2-stretched-vmware-hcx-networks.md @@ -3,6 +3,7 @@ title: Configure DHCP on L2 stretched VMware HCX networks description: Learn how to send DHCP requests from your Azure VMware Solution VMs to a non-NSX-T DHCP server. ms.topic: how-to ms.custom: contperf-fy22q1 +ms.service: azure-vmware ms.date: 04/11/2022 # Customer intent: As an Azure service administrator, I want to configure DHCP on L2 stretched VMware HCX networks to send DHCP requests from my Azure VMware Solution VMs to a non-NSX-T DHCP server. diff --git a/articles/azure-vmware/configure-nsx-network-components-azure-portal.md b/articles/azure-vmware/configure-nsx-network-components-azure-portal.md index 42b2112cd9617..e1c598c5a8875 100644 --- a/articles/azure-vmware/configure-nsx-network-components-azure-portal.md +++ b/articles/azure-vmware/configure-nsx-network-components-azure-portal.md @@ -2,6 +2,7 @@ title: Configure NSX-T Data Center network components using Azure VMware Solution description: Learn how to use the Azure VMware Solution to configure NSX-T Data Center network segments. ms.topic: reference +ms.service: azure-vmware ms.date: 04/11/2022 # Customer intent: As an Azure service administrator, I want to configure NSX-T Data Center network components using a simplified view of NSX-T Data Center operations a VMware administrator needs daily. The simplified view is targeted at users unfamiliar with NSX-T Manager. diff --git a/articles/azure-vmware/configure-port-mirroring-azure-vmware-solution.md b/articles/azure-vmware/configure-port-mirroring-azure-vmware-solution.md index 1cc61ff260a2b..1f4cf91408dc3 100644 --- a/articles/azure-vmware/configure-port-mirroring-azure-vmware-solution.md +++ b/articles/azure-vmware/configure-port-mirroring-azure-vmware-solution.md @@ -3,6 +3,7 @@ title: Configure port mirroring for Azure VMware Solution description: Learn how to configure port mirroring to monitor network traffic that involves forwarding a copy of each packet from one network switch port to another. ms.topic: how-to ms.custom: contperf-fy22q1 +ms.service: azure-vmware ms.date: 04/11/2022 # Customer intent: As an Azure service administrator, I want to configure port mirroring to monitor network traffic that involves forwarding a copy of each packet from one network switch port to another. diff --git a/articles/azure-vmware/configure-site-to-site-vpn-gateway.md b/articles/azure-vmware/configure-site-to-site-vpn-gateway.md index 9feef620df913..10a84493b74a7 100644 --- a/articles/azure-vmware/configure-site-to-site-vpn-gateway.md +++ b/articles/azure-vmware/configure-site-to-site-vpn-gateway.md @@ -3,6 +3,7 @@ title: Configure a site-to-site VPN in vWAN for Azure VMware Solution description: Learn how to establish a VPN (IPsec IKEv1 and IKEv2) site-to-site tunnel into Azure VMware Solutions. ms.topic: how-to ms.custom: contperf-fy22q1 +ms.service: azure-vmware ms.date: 04/11/2022 --- diff --git a/articles/azure-vmware/configure-storage-policy.md b/articles/azure-vmware/configure-storage-policy.md index 9b60561dcf22a..be5b7bb7808da 100644 --- a/articles/azure-vmware/configure-storage-policy.md +++ b/articles/azure-vmware/configure-storage-policy.md @@ -2,6 +2,7 @@ title: Configure storage policy description: Learn how to configure storage policy for your Azure VMware Solution virtual machines. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 #Customer intent: As an Azure service administrator, I want set the VMware vSAN storage policies to determine how storage is allocated to the VM. diff --git a/articles/azure-vmware/configure-vmware-hcx.md b/articles/azure-vmware/configure-vmware-hcx.md index 52626ec0d2679..db03350c0c2e7 100644 --- a/articles/azure-vmware/configure-vmware-hcx.md +++ b/articles/azure-vmware/configure-vmware-hcx.md @@ -2,6 +2,7 @@ title: Configure VMware HCX in Azure VMware Solution description: Configure the on-premises VMware HCX Connector for your Azure VMware Solution private cloud. ms.topic: tutorial +ms.service: azure-vmware ms.date: 09/07/2021 --- diff --git a/articles/azure-vmware/configure-vmware-syslogs.md b/articles/azure-vmware/configure-vmware-syslogs.md index 308b91d815fb1..246f415c630b6 100644 --- a/articles/azure-vmware/configure-vmware-syslogs.md +++ b/articles/azure-vmware/configure-vmware-syslogs.md @@ -2,6 +2,7 @@ title: Configure VMware syslogs for Azure VMware Solution description: Learn how to configure diagnostic settings to collect VMware syslogs for your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 #Customer intent: As an Azure service administrator, I want to collect VMware syslogs and store it in my storage account so that I can view the vCenter Server logs and analyze for any diagnostic purposes. @@ -13,6 +14,15 @@ ms.date: 04/11/2022 Diagnostic settings are used to configure streaming export of platform logs and metrics for a resource to the destination of your choice. You can create up to five different diagnostic settings to send different logs and metrics to independent destinations. In this article, you'll configure a diagnostic setting to collect VMware syslogs for your Azure VMware Solution private cloud. You'll store the syslog to a storage account to view the vCenter Server logs and analyze for diagnostic purposes. + >[!IMPORTANT] + >The **VMware syslogs** contains the following logs: + > - Distributed Firewall Logs + >- NSX-T Manager Logs + >- Gateway Firewall Logs + >- ESXi Logs + >- vCenter Logs + >- NSX Edge Logs + ## Prerequisites diff --git a/articles/azure-vmware/configure-windows-server-failover-cluster.md b/articles/azure-vmware/configure-windows-server-failover-cluster.md index 017dbf8f259e5..bd649a5a7dd95 100644 --- a/articles/azure-vmware/configure-windows-server-failover-cluster.md +++ b/articles/azure-vmware/configure-windows-server-failover-cluster.md @@ -2,6 +2,7 @@ title: Configure Windows Server Failover Cluster on Azure VMware Solution vSAN description: Learn how to configure Windows Server Failover Cluster (WSFC) on Azure VMware Solution vSAN with native shared disks. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 --- diff --git a/articles/azure-vmware/connect-multiple-private-clouds-same-region.md b/articles/azure-vmware/connect-multiple-private-clouds-same-region.md index 47b27fcb665b7..a3c244e946cd5 100644 --- a/articles/azure-vmware/connect-multiple-private-clouds-same-region.md +++ b/articles/azure-vmware/connect-multiple-private-clouds-same-region.md @@ -2,6 +2,7 @@ title: Connect multiple Azure VMware Solution private clouds in the same region description: Learn how to create a network connection between two or more Azure VMware Solution private clouds located in the same region. ms.topic: how-to +ms.service: azure-vmware ms.date: 09/20/2021 #Customer intent: As an Azure service administrator, I want create a network connection between two or more Azure VMware Solution private clouds located in the same region. diff --git a/articles/azure-vmware/create-placement-policy.md b/articles/azure-vmware/create-placement-policy.md index 54005c698dde4..577c98a87e8f7 100644 --- a/articles/azure-vmware/create-placement-policy.md +++ b/articles/azure-vmware/create-placement-policy.md @@ -2,6 +2,7 @@ title: Create placement policy description: Learn how to create a placement policy in Azure VMware Solution to control the placement of virtual machines (VMs) on hosts within a cluster through the Azure portal. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/07/2022 #Customer intent: As an Azure service administrator, I want to control the placement of virtual machines on hosts within a cluster in my private cloud. diff --git a/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md b/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md index 8080604706262..0457a7b13c08a 100644 --- a/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md +++ b/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md @@ -2,27 +2,28 @@ title: Deploy Arc for Azure VMware Solution (Preview) description: Learn how to set up and enable Arc for your Azure VMware Solution private cloud. ms.topic: how-to -ms.date: 01/31/2022 +ms.service: azure-vmware +ms.date: 04/11/2022 ms.custom: references_regions --- # Deploy Arc for Azure VMware Solution (Preview) -In this article, you'll learn how to deploy Arc for Azure VMware Solution. Once you've set up the components needed for this public preview, you'll be ready to execute operations in Azure VMware Solution vCenter from the Azure portal. Operations are related to Create, Read, Update, and Delete (CRUD) virtual machines (VMs) in an Arc-enabled Azure VMware Solution private cloud. Users can also enable guest management and install Azure extensions once the private cloud is Arc-enabled. +In this article, you'll learn how to deploy Arc for Azure VMware Solution. Once you've set up the components needed for this public preview, you'll be ready to execute operations in Azure VMware Solution vCenter Server from the Azure portal. Operations are related to Create, Read, Update, and Delete (CRUD) virtual machines (VMs) in an Arc-enabled Azure VMware Solution private cloud. Users can also enable guest management and install Azure extensions once the private cloud is Arc-enabled. Before you begin checking off the prerequisites, verify the following actions have been done: - You deployed an Azure VMware Solution private cluster. - You have a connection to the Azure VMware Solution private cloud through your on-prem environment or your native Azure Virtual Network. -- There should be an isolated NSX-T segment for deploying the Arc for Azure VMware Solution Open Virtualization Appliance (OVA). If an isolated NSX-T segment doesn't exist, one will be created. +- There should be an isolated NSX-T Data Center segment for deploying the Arc for Azure VMware Solution Open Virtualization Appliance (OVA). If an isolated NSX-T Data Center segment doesn't exist, one will be created. ## Prerequisites The following items are needed to ensure you're set up to begin the onboarding process to deploy Arc for Azure VMware Solution (Preview). - A jump box virtual machine (VM) with network access to the Azure VMware Solution vCenter. - - From the jump-box VM, verify you have access to [vCenter and NSX-T portals](./tutorial-configure-networking.md). + - From the jump-box VM, verify you have access to [vCenter Server and NSX-T Manager portals](./tutorial-configure-networking.md). - Verify that your Azure subscription has been enabled or you have connectivity to Azure end points, mentioned in the [Appendices](#appendices). - Resource group in the subscription where you have owner or contributor role. - A minimum of three free non-overlapping IPs addresses. @@ -34,7 +35,7 @@ The following items are needed to ensure you're set up to begin the onboarding p > [!NOTE] > Only the default port of 443 is supported. If you use a different port, Appliance VM creation will fail. -At this point, you should have already deployed an Azure VMware Solution private cluster. You need to have a connection from your on-prem environment or your native Azure Virtual Network to the Azure VMware Solution private cloud. +At this point, you should have already deployed an Azure VMware Solution private cloud. You need to have a connection from your on-prem environment or your native Azure Virtual Network to the Azure VMware Solution private cloud. For Network planning and setup, use the [Network planning checklist - Azure VMware Solution | Microsoft Docs](./tutorial-network-checklist.md) @@ -64,7 +65,7 @@ az feature show --name AzureArcForAVS --namespace Microsoft.AVS Use the following steps to guide you through the process to onboard in Arc for Azure VMware Solution (Preview). -1. Sign into the jumpbox VM and extract the contents from the compressed file from the following [location](https://github.com/Azure/ArcOnAVS/releases/tag/v2.0.0). The extracted file contains the scripts to install the preview software. +1. Sign into the jumpbox VM and extract the contents from the compressed file from the following [location](https://github.com/Azure/ArcOnAVS/releases/latest). The extracted file contains the scripts to install the preview software. 1. Open the 'config_avs.json' file and populate all the variables. **Config JSON** @@ -143,8 +144,8 @@ Use the following steps to guide you through the process to onboard in Arc for A When Arc appliance is successfully deployed on your private cloud, you can do the following actions. - View the status from within the private cloud under **Operations > Azure Arc**, located in the left navigation. -- View the VMware infrastructure resources from the private cloud left navigation under **Private cloud** then select **Azure Arc vCenter resources**. -- Discover your VMware infrastructure resources and project them to Azure using the same browser experience, **Private cloud > Arc vCenter resources > Virtual Machines**. +- View the VMware vSphere infrastructure resources from the private cloud left navigation under **Private cloud** then select **Azure Arc vCenter resources**. +- Discover your VMware vSphere infrastructure resources and project them to Azure using the same browser experience, **Private cloud > Arc vCenter resources > Virtual Machines**. - Similar to VMs, customers can enable networks, templates, resource pools, and data-stores in Azure. After you've enabled VMs to be managed from Azure, you can install guest management and do the following actions. @@ -153,7 +154,7 @@ After you've enabled VMs to be managed from Azure, you can install guest managem - To enable guest management, customers will be required to use admin credentials - VMtools should already be running on the VM > [!NOTE] -> Azure VMware Solution vCenter will be available in global search but will NOT be available in the list of vCenters for Arc for VMware. +> Azure VMware Solution vCenter Server will be available in global search but will NOT be available in the list of vCenter Servers for Arc for VMware. - Customers can view the list of VM extensions available in public preview. - Change tracking @@ -169,25 +170,25 @@ When the script has run successfully, you can check the status to see if Azure A :::image type="content" source="media/deploy-arc-for-azure-vmware-solution/arc-private-cloud-configured.png" alt-text="Image showing navigation to Azure Arc state to verify it's configured."lightbox="media/deploy-arc-for-azure-vmware-solution/arc-private-cloud-configured.png"::: -**Arc enabled VMware resources** +**Arc enabled VMware vSphere resources** After the private cloud is Arc-enabled, vCenter resources should appear under **Virtual machines**. - From the left navigation, under **Azure Arc VMware resources (preview)**, locate **Virtual machines**. -- Choose **Virtual machines** to view the vCenter resources. +- Choose **Virtual machines** to view the vCenter Server resources. ### Manage access to VMware resources through Azure Role-Based Access Control -After your Azure VMware Solution vCenter resources have been enabled for access through Azure, there's one final step in setting up a self-service experience for your teams. You'll need to provide your teams with access to: compute, storage, networking, and other vCenter resources used to configure VMs. +After your Azure VMware Solution vCenter resources have been enabled for access through Azure, there's one final step in setting up a self-service experience for your teams. You'll need to provide your teams with access to: compute, storage, networking, and other vCenter Server resources used to configure VMs. -This section will demonstrate how to use custom roles to manage granular access to VMware resources through Azure. +This section will demonstrate how to use custom roles to manage granular access to VMware vSphere resources through Azure. #### Arc-enabled VMware vSphere custom roles Three custom roles are provided to meet your Role-based access control (RBAC) requirements. These roles can be applied to a whole subscription, resource group, or a single resource. -- Azure Arc VMware Administrator role -- Azure Arc VMware Private Cloud User role -- Azure Arc VMware VM Contributor role +- Azure Arc VMware vSphere Administrator role +- Azure Arc VMware vSphere Private Cloud User role +- Azure Arc VMware vSphere VM Contributor role The first role is for an Administrator. The other two roles apply to anyone who needs to deploy or manage a VM. @@ -211,7 +212,7 @@ We recommend assigning this role at the subscription level or resource group you 1. Navigate to the Azure portal. 1. Locate the subscription, resource group, or the resource at the scope you want to provide for the custom role. -1. Find the Arc-enabled Azure VMware Solution vCenter resources. +1. Find the Arc-enabled Azure VMware Solution vCenter Server resources. 1. Navigate to the resource group and select the **Show hidden types** checkbox. 1. Search for "Azure VMware Solution". 1. Select **Access control (IAM)** in the table of contents located on the left navigation. @@ -225,7 +226,7 @@ We recommend assigning this role at the subscription level or resource group you ## Create Arc-enabled Azure VMware Solution virtual machine -This section shows users how to create a virtual machine (VM) on VMware vCenter using Azure Arc. Before you begin, check the following prerequisite list to ensure you're set up and ready to create an Arc-enabled Azure VMware Solution VM. +This section shows users how to create a virtual machine (VM) on VMware vCenter Server using Azure Arc. Before you begin, check the following prerequisite list to ensure you're set up and ready to create an Arc-enabled Azure VMware Solution VM. ### Prerequisites @@ -255,7 +256,7 @@ Near the top of the **Virtual machines** page, you'll find five tabs labeled: ** 1. The connectivity method defaults to **Public endpoint**. Create a **Username**, **Password**, and **Confirm password**. **Disks** - - You can opt to change the disks configured in the template, add more disks, or update existing disks. These disks will be created on the default datastore per the VMware vCenter storage policies. + - You can opt to change the disks configured in the template, add more disks, or update existing disks. These disks will be created on the default datastore per the VMware vCenter Server storage policies. - You can change the network interfaces configured in the template, add Network interface cards (NICs), or update existing NICs. You can also change the network that the NIC will be attached to provided you have permissions to the network resource. **Networking** @@ -270,12 +271,12 @@ Near the top of the **Virtual machines** page, you'll find five tabs labeled: ** ## Enable guest management and extension installation -The guest management must be enabled on the VMware virtual machine (VM) before you can install an extension. Use the following prerequisite steps to enable guest management. +The guest management must be enabled on the VMware vSphere virtual machine (VM) before you can install an extension. Use the following prerequisite steps to enable guest management. **Prerequisite** 1. Navigate to [Azure portal](https://ms.portal.azure.com/). -1. Locate the VMware VM you want to check for guest management and install extensions on, select the name of the VM. +1. Locate the VMware vSphere VM you want to check for guest management and install extensions on, select the name of the VM. 1. Select **Configuration** from the left navigation for a VMware VM. 1. Verify **Enable guest management** has been checked. @@ -283,7 +284,7 @@ The guest management must be enabled on the VMware virtual machine (VM) before y > The following conditions are necessary to enable guest management on a VM. - The machine must be running a [Supported operating system](../azure-arc/servers/agent-overview.md). -- The machine needs to connect through the firewall to communicate over the Internet. Make sure the [URLs](../azure-arc/servers/agent-overview.md) listed aren't blocked. +- The machine needs to connect through the firewall to communicate over the internet. Make sure the [URLs](../azure-arc/servers/agent-overview.md) listed aren't blocked. - The machine can't be behind a proxy, it's not supported yet. - If you're using Linux VM, the account must not prompt to sign in on pseudo commands. @@ -311,7 +312,7 @@ When the extension installation steps are completed, they trigger deployment and Use the following guide to change your Arc appliance credential once you've changed your SDDC credentials. -Use the **`Set Credential`** command to update the provider credentials for appliance resource. When **cloud admin** credentials are updated, use the following steps to update the credentials in the appliance store. +Use the **`Set Credential`** command to update the provider credentials for appliance resource. When **cloudadmin** credentials are updated, use the following steps to update the credentials in the appliance store. 1. Log into the jumpbox VM from where onboarding was performed. Change the directory to **onboarding directory**. 1. Run the following command for Windows-based jumpbox VM. @@ -342,7 +343,7 @@ The following command invokes the set credential for the specified appliance res Use the following steps to perform a manual upgrade for Arc appliance virtual machine (VM). -1. Log into vCenter. +1. Log into vCenter Server. 1. Locate the Arc appliance VM, which should be in the resource pool that was configured during onboarding. 1. Power off the VM. 1. Delete the VM. @@ -355,7 +356,7 @@ Use the following steps to perform a manual upgrade for Arc appliance virtual ma ## Off board from Azure Arc-enabled Azure VMware Solution -This section demonstrates how to remove your VMware virtual machines (VMs) from Azure management services. +This section demonstrates how to remove your VMware vSphere virtual machines (VMs) from Azure management services. If you've enabled guest management on your Arc-enabled Azure VMware Solution VMs and onboarded them to Azure management services by installing VM extensions on them, you'll need to uninstall the extensions to prevent continued billing. For example, if you installed an MMA extension to collect and send logs to an Azure Log Analytics workspace, you'll need to uninstall that extension. You'll also need to uninstall the Azure Connected Machine agent to avoid any problems installing the agent in future. @@ -382,23 +383,23 @@ To avoid problems onboarding the same VM to **Guest management**, we recommend y ## Remove Arc-enabled Azure VMware Solution vSphere resources from Azure -When you activate Arc-enabled Azure VMware Solution resources in Azure, a representation is created for them in Azure. Before you can delete the vCenter resource in Azure, you'll need to delete all of the Azure resource representations you created for your vSphere resources. To delete the Azure resource representations you created, do the following steps: +When you activate Arc-enabled Azure VMware Solution resources in Azure, a representation is created for them in Azure. Before you can delete the vCenter Server resource in Azure, you'll need to delete all of the Azure resource representations you created for your vSphere resources. To delete the Azure resource representations you created, do the following steps: 1. Go to the Azure portal. -1. Choose **Virtual machines** from Arc-enabled VMware resources in the private cloud. +1. Choose **Virtual machines** from Arc-enabled VMware vSphere resources in the private cloud. 1. Select all the VMs that have an Azure Enabled value as **Yes**. -1. Select **Remove from Azure**. This step will start deployment and remove these resources from Azure. The resources will remain in your vCenter. +1. Select **Remove from Azure**. This step will start deployment and remove these resources from Azure. The resources will remain in your vCenter Server. 1. Repeat steps 2, 3 and 4 for **Resourcespools/clusters/hosts**, **Templates**, **Networks**, and **Datastores**. 1. When the deletion completes, select **Overview**. 1. Note the Custom location and the Azure Arc Resource bridge resources in the Essentials section. 1. Select **Remove from Azure** to remove the vCenter resource from Azure. -1. Go to vCenter resource in Azure and delete it. +1. Go to vCenter Server resource in Azure and delete it. 1. Go to the Custom location resource and select **Delete**. 1. Go to the Azure Arc Resource bridge resources and select **Delete**. At this point, all of your Arc-enabled VMware vSphere resources have been removed from Azure. -## Delete Arc resources from vCenter +## Delete Arc resources from vCenter Server For the final step, you'll need to delete the resource bridge VM and the VM template that were created during the onboarding process. Once that step is done, Arc won't work on the Azure VMware Solution SDDC. When you delete Arc resources from vCenter, it won't affect the Azure VMware Solution private cloud for the customer. @@ -492,4 +493,4 @@ Appendix 1 shows proxy URLs required by the Azure Arc-enabled private cloud. The **Additional URL resources** - [Google Container Registry](http://gcr.io/) -- [Red Hat Quay.io](http://quay.io/) \ No newline at end of file +- [Red Hat Quay.io](http://quay.io/) diff --git a/articles/azure-vmware/deploy-azure-vmware-solution.md b/articles/azure-vmware/deploy-azure-vmware-solution.md index a63b42e42f5f5..e2a5526e0b9f1 100644 --- a/articles/azure-vmware/deploy-azure-vmware-solution.md +++ b/articles/azure-vmware/deploy-azure-vmware-solution.md @@ -3,6 +3,7 @@ title: Deploy and configure Azure VMware Solution description: Learn how to use the information gathered in the planning stage to deploy and configure the Azure VMware Solution private cloud. ms.topic: tutorial ms.custom: contperf-fy22q1, devx-track-azurecli +ms.service: azure-vmware ms.date: 07/28/2021 --- diff --git a/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md b/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md index 7cfe146ee453a..269ed8a0c7e5d 100644 --- a/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md +++ b/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md @@ -2,6 +2,7 @@ title: Deploy disaster recovery using JetStream DR description: Learn how to implement JetStream DR for your Azure VMware Solution private cloud and on-premises VMware workloads. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 ms.custom: references_regions --- @@ -233,7 +234,7 @@ Once JetStream DR MSA and JetStream VIB are installed on the Azure VMware Soluti 1. [Select the VMs](https://www.jetstreamsoft.com/portal/jetstream-knowledge-base/select-vms-for-protection/) you want to protect and then [start VM protection](https://www.jetstreamsoft.com/portal/jetstream-knowledge-base/start-vm-protection/). -For remaining configuration steps for JetStream DR, such as creating a failover runbook, invoking failover to the DR site, and invoking failback to the primary site, see the [JetStream Admin Guide documentation](https://www.jetstreamsoft.com/portal/jetstream-article-categories/product-manual/). +For remaining configuration steps for JetStream DR, such as creating a failover runbook, invoking failover to the DR site, and invoking failback to the primary site, see the [JetStream Admin Guide documentation](https://docs.delphix.com/docs51/delphix-jet-stream/jet-stream-admin-guide). ## Disable JetStream DR on an Azure VMware Solution cluster diff --git a/articles/azure-vmware/deploy-disaster-recovery-using-vmware-hcx.md b/articles/azure-vmware/deploy-disaster-recovery-using-vmware-hcx.md index 4d0c7a83a949d..dedb1617077f5 100644 --- a/articles/azure-vmware/deploy-disaster-recovery-using-vmware-hcx.md +++ b/articles/azure-vmware/deploy-disaster-recovery-using-vmware-hcx.md @@ -2,6 +2,7 @@ title: Deploy disaster recovery using VMware HCX description: Learn how to deploy disaster recovery of your virtual machines (VMs) with VMware HCX Disaster Recovery. Also learn how to use Azure VMware Solution as the recovery or target site. ms.topic: how-to +ms.service: azure-vmware ms.date: 06/10/2021 --- diff --git a/articles/azure-vmware/deploy-traffic-manager-balance-workloads.md b/articles/azure-vmware/deploy-traffic-manager-balance-workloads.md index 2ab39ce9399bc..a3c13bef251ef 100644 --- a/articles/azure-vmware/deploy-traffic-manager-balance-workloads.md +++ b/articles/azure-vmware/deploy-traffic-manager-balance-workloads.md @@ -2,6 +2,7 @@ title: Deploy Traffic Manager to balance Azure VMware Solution workloads description: Learn how to integrate Traffic Manager with Azure VMware Solution to balance application workloads across multiple endpoints in different regions. ms.topic: how-to +ms.service: azure-vmware ms.date: 02/08/2021 --- diff --git a/articles/azure-vmware/deploy-vm-content-library.md b/articles/azure-vmware/deploy-vm-content-library.md index 41113a9960d24..3bf9b8b54051d 100644 --- a/articles/azure-vmware/deploy-vm-content-library.md +++ b/articles/azure-vmware/deploy-vm-content-library.md @@ -2,7 +2,8 @@ title: Create a content library to deploy VMs in Azure VMware Solution description: Create a content library to deploy a VM in an Azure VMware Solution private cloud. ms.topic: how-to -ms.date: 06/28/2021 +ms.service: azure-vmware +ms.date: 04/11/2022 --- # Create a content library to deploy VMs in Azure VMware Solution @@ -13,7 +14,7 @@ In this article, you'll create a content library in the vSphere Client and then ## Prerequisites -An NSX-T segment and a managed DHCP service are required to complete this tutorial. For more information, see [Configure DHCP for Azure VMware Solution](configure-dhcp-azure-vmware-solution.md). +An NSX-T Data Center segment and a managed DHCP service are required to complete this tutorial. For more information, see [Configure DHCP for Azure VMware Solution](configure-dhcp-azure-vmware-solution.md). ## Create a content library @@ -25,7 +26,7 @@ An NSX-T segment and a managed DHCP service are required to complete this tutori :::image type="content" source="media/content-library/create-new-content-library.png" alt-text="Screenshot showing how to create a new content library in vSphere."::: -1. Provide a name and confirm the IP address of the vCenter server and select **Next**. +1. Provide a name and confirm the IP address of the vCenter Server and select **Next**. :::image type="content" source="media/content-library/new-content-library-step-1.png" alt-text="Screenshot showing the name and vCenter Server IP for the new content library."::: diff --git a/articles/azure-vmware/deploy-zerto-disaster-recovery.md b/articles/azure-vmware/deploy-zerto-disaster-recovery.md index 7d01fc90ce478..79b9f381eac4d 100644 --- a/articles/azure-vmware/deploy-zerto-disaster-recovery.md +++ b/articles/azure-vmware/deploy-zerto-disaster-recovery.md @@ -2,6 +2,7 @@ title: Deploy Zerto disaster recovery on Azure VMware Solution (Initial Availability) description: Learn how to implement Zerto disaster recovery for on-premises VMware or Azure VMware Solution virtual machines. ms.topic: how-to +ms.service: azure-vmware ms.date: 10/25/2021 --- @@ -123,7 +124,7 @@ You can reuse pre-existing Zerto product licenses for Azure VMware Solution envi ### How is Zerto supported? -Zerto disaster recovery is a solution that is sold and supported by Zerto. For any support issue with Zerto disaster recovery, always contact [Zerto support](https://www.zerto.com/company/support-and-service/support/). +Zerto disaster recovery is a solution that is sold and supported by Zerto. For any support issue with Zerto disaster recovery, always contact [Zerto support](https://www.zerto.com/support-and-services/). Zerto and Microsoft support teams will engage each other as needed to troubleshoot Zerto disaster recovery issues on Azure VMware Solution. diff --git a/articles/azure-vmware/disable-internet-access.md b/articles/azure-vmware/disable-internet-access.md new file mode 100644 index 0000000000000..7b9d7512e536e --- /dev/null +++ b/articles/azure-vmware/disable-internet-access.md @@ -0,0 +1,36 @@ +--- +title: Disable internet access or enable a default route +description: This article explains how to disable internet access for Azure VMware Solution and enable default route for Azure VMware Solution. +ms.topic: how-to +ms.service: azure-vmware +ms.date: 05/12/2022 +--- + +# Disable internet access or enable a default route +In this article, you'll learn how to disable Internet access or enable a default route for your Azure VMware Solution private cloud. There are multiple ways to set up a default route. You can use a Virtual WAN hub, Network Virtual Appliance in a Virtual Network, or use a default route from on-premise. If you don't set up a default route, there will be no Internet access to your Azure VMware Solution private cloud. + +With a default route setup, you can achieve the following tasks: +- Disable Internet access to your Azure VMware Solution private cloud. + + > [!Note] + > Ensure that a default route is not advertised from on-premises or Azure as that will override this setup. + +- Enable Internet access by generating a default route from Azure Firewall or third-party Network Virtual Appliance. +## Prerequisites +- If Internet access is required, a default route must be advertised from an Azure Firewall, Network Virtual Appliance or Virtual WAN Hub. +- Azure VMware Solution private cloud. +## Disable Internet access or enable a default route in the Azure portal +1. Log in to the Azure portal. +1. Search for **Azure VMware Solution** and select it. +1. Locate and select your Azure VMware Solution private cloud. +1. On the left navigation, under **Workload networking**, select **Internet connectivity**. +1. Select the **Don't connect or connect using default route from Azure** button and select **Save**. +If you don't have a default route from on-premises or from Azure, you have successfully disabled Internet connectivity to your Azure VMware Solution private cloud. + +## Next steps + +[Internet connectivity design considerations (Preview)](concepts-design-public-internet-access.md) + +[Enable Managed SNAT for Azure VMware Solution Workloads](enable-managed-snat-for-workloads.md) + +[Enable Public IP to the NSX Edge for Azure VMware Solution](enable-public-ip-nsx-edge.md) diff --git a/articles/azure-vmware/disaster-recovery-using-vmware-site-recovery-manager.md b/articles/azure-vmware/disaster-recovery-using-vmware-site-recovery-manager.md index ed911e6666def..76bb918b135ed 100644 --- a/articles/azure-vmware/disaster-recovery-using-vmware-site-recovery-manager.md +++ b/articles/azure-vmware/disaster-recovery-using-vmware-site-recovery-manager.md @@ -2,7 +2,8 @@ title: Deploy disaster recovery with VMware Site Recovery Manager description: Deploy disaster recovery with VMware Site Recovery Manager (SRM) in your Azure VMware Solution private cloud. ms.topic: how-to -ms.date: 10/04/2021 +ms.service: azure-vmware +ms.date: 04/11/2022 --- # Deploy disaster recovery with VMware Site Recovery Manager @@ -20,7 +21,7 @@ In this article, you'll implement disaster recovery for on-premises VMware virtu SRM helps you plan, test, and run the recovery of VMs between a protected vCenter Server site and a recovery vCenter Server site. You can use SRM with Azure VMware Solution with the following two DR scenarios: -- On-premise VMware to Azure VMware Solution private cloud disaster recovery +- On-premises VMware to Azure VMware Solution private cloud disaster recovery - Primary Azure VMware Solution to Secondary Azure VMware Solution private cloud disaster recovery The diagram shows the deployment of the primary Azure VMware Solution to secondary Azure VMware Solution scenario. @@ -49,13 +50,13 @@ You can use SRM to implement different types of recovery, such as: ## Deployment workflow -The workflow diagram shows the Primary Azure VMware Solution to secondary workflow. In addition, it shows steps to take within the Azure portal and the VMware environments of Azure VMware Solution to achieve the end-to-end protection of VMs. +The workflow diagram shows the Primary Azure VMware Solution to secondary workflow. In addition, it shows steps to take within the Azure portal and the VMware vSphere environments of Azure VMware Solution to achieve the end-to-end protection of VMs. :::image type="content" source="media/vmware-srm-vsphere-replication/site-recovery-manager-workflow.png" alt-text="Diagram showing the deployment workflow for VMware Site Recovery Manager on Azure VMware Solution." border="false"::: ## Prerequisites -Make sure you've explicitly provided the remote user the VRM administrator and SRM administrator roles in the remote vCenter. +Make sure you've explicitly provided the remote user the VRM administrator and SRM administrator roles in the remote vCenter Server. ### Scenario: On-premises to Azure VMware Solution @@ -81,7 +82,7 @@ Make sure you've explicitly provided the remote user the VRM administrator and S ## Install SRM in Azure VMware Solution -1. In your on-premises datacenter, install VMware SRM and vSphere. +1. In your on-premises datacenter, install VMware SRM and vSphere Replication. >[!NOTE] >Use the [Two-site Topology with one vCenter Server instance per PSC](https://docs.vmware.com/en/Site-Recovery-Manager/8.4/com.vmware.srm.install_config.doc/GUID-F474543A-88C5-4030-BB86-F7CC51DADE22.html) deployment model. Also, make sure that the [required vSphere Replication Network ports](https://kb.VMware.com/s/article/2087769) are opened. @@ -124,11 +125,11 @@ After the SRM appliance installs successfully, you'll need to install the vSpher :::image type="content" source="media/vmware-srm-vsphere-replication/vsphere-replication-3.png" alt-text="Screenshot showing that both SRM and the replication appliance are installed."::: -## Configure site pairing in vCenter +## Configure site pairing in vCenter Server -After installing VMware SRM and vSphere Replication, you need to complete the configuration and site pairing in vCenter. +After installing VMware SRM and vSphere Replication, you need to complete the configuration and site pairing in vCenter Server. -1. Sign in to vCenter as cloudadmin@vsphere.local. +1. Sign in to vCenter Server as cloudadmin@vsphere.local. 1. Navigate to **Site Recovery**, check the status of both vSphere Replication and VMware SRM, and then select **OPEN Site Recovery** to launch the client. @@ -142,13 +143,13 @@ After installing VMware SRM and vSphere Replication, you need to complete the co 1. Enter the remote site details, and then select **NEXT**. >[!NOTE] - >An Azure VMware Solution private cloud operates with an embedded Platform Services Controller (PSC), so only one local vCenter can be selected. If the remote vCenter is using an embedded Platform Service Controller (PSC), use the vCenter's FQDN (or its IP address) and port to specify the PSC. + >An Azure VMware Solution private cloud operates with an embedded Platform Services Controller (PSC), so only one local vCenter can be selected. If the remote vCenter Server is using an embedded Platform Service Controller (PSC), use the vCenter Server's FQDN (or its IP address) and port to specify the PSC. > - >The remote user must have sufficient permissions to perform the pairings. An easy way to ensure this is to give that user the VRM administrator and SRM administrator roles in the remote vCenter. For a remote Azure VMware Solution private cloud, cloudadmin is configured with those roles. + >The remote user must have sufficient permissions to perform the pairings. An easy way to ensure this is to give that user the VRM administrator and SRM administrator roles in the remote vCenter Server. For a remote Azure VMware Solution private cloud, cloudadmin is configured with those roles. :::image type="content" source="media/vmware-srm-vsphere-replication/pair-the-sites-specify-details.png" alt-text="Screenshot showing the Site details for the new site pair." border="true" lightbox="media/vmware-srm-vsphere-replication/pair-the-sites-specify-details.png"::: -1. Select **CONNECT** to accept the certificate for the remote vCenter. +1. Select **CONNECT** to accept the certificate for the remote vCenter Server. At this point, the client should discover the VRM and SRM appliances on both sides as services to pair. @@ -156,9 +157,9 @@ After installing VMware SRM and vSphere Replication, you need to complete the co :::image type="content" source="media/vmware-srm-vsphere-replication/pair-the-sites-new-site.png" alt-text="Screenshot showing the vCenter Server and services details for the new site pair." border="true" lightbox="media/vmware-srm-vsphere-replication/pair-the-sites-new-site.png"::: -1. Select **CONNECT** to accept the certificates for the remote VMware SRM and the remote vCenter (again). +1. Select **CONNECT** to accept the certificates for the remote VMware SRM and the remote vCenter Server (again). -1. Select **CONNECT** to accept the certificates for the local VMware SRM and the local vCenter. +1. Select **CONNECT** to accept the certificates for the local VMware SRM and the local vCenter Server. 1. Review the settings and then select **FINISH**. @@ -169,7 +170,7 @@ After installing VMware SRM and vSphere Replication, you need to complete the co >[!NOTE] >The SR client sometimes takes a long time to refresh. If an operation seems to take too long or appears "stuck", select the refresh icon on the menu bar. -1. Select **VIEW DETAILS** to open the panel for remote site pairing, which opens a dialog to sign in to the remote vCenter. +1. Select **VIEW DETAILS** to open the panel for remote site pairing, which opens a dialog to sign in to the remote vCenter Server. :::image type="content" source="media/vmware-srm-vsphere-replication/view-details-remote-pairing.png" alt-text="Screenshot showing the new site pair details for Site Recovery Manager and vSphere Replication." border="true" lightbox="media/vmware-srm-vsphere-replication/view-details-remote-pairing.png"::: @@ -178,7 +179,7 @@ After installing VMware SRM and vSphere Replication, you need to complete the co For pairing, the login, which is often a different user, is a one-time action to establish pairing. The SR client requires this login every time the client is launched to work with the pairing. >[!NOTE] - >The user with sufficient permissions should have **VRM administrator** and **SRM administrator** roles given to them in the remote vCenter. The user should also have access to the remote vCenter inventory, like folders and datastores. For a remote Azure VMware Solution private cloud, the cloudadmin user has the appropriate permissions and access. + >The user with sufficient permissions should have **VRM administrator** and **SRM administrator** roles given to them in the remote vCenter Server. The user should also have access to the remote vCenter Server inventory, like folders and datastores. For a remote Azure VMware Solution private cloud, the cloudadmin user has the appropriate permissions and access. :::image type="content" source="media/vmware-srm-vsphere-replication/sign-into-remote-vcenter.png" alt-text="Screenshot showing the vCenter Server credentials." border="true"::: @@ -288,7 +289,7 @@ If you no longer require SRM, you must uninstall it in a clean manner. Before yo ## Support -VMware SRM is a Disaster Recovery solution from VMware. +VMware Site Recovery Manager (SRM) is a Disaster Recovery solution from VMware. Microsoft only supports install/uninstall of SRM and vSphere Replication Manager and scale up/down of vSphere Replication appliances within Azure VMware Solution. diff --git a/articles/azure-vmware/ecosystem-app-monitoring-solutions.md b/articles/azure-vmware/ecosystem-app-monitoring-solutions.md index e7f0e46bf0eb0..426096ffda904 100644 --- a/articles/azure-vmware/ecosystem-app-monitoring-solutions.md +++ b/articles/azure-vmware/ecosystem-app-monitoring-solutions.md @@ -2,6 +2,7 @@ title: Application performance monitoring and troubleshooting solutions for Azure VMware Solution description: Learn about leading application monitoring and troubleshooting solutions for your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 --- diff --git a/articles/azure-vmware/ecosystem-back-up-vms.md b/articles/azure-vmware/ecosystem-back-up-vms.md index f5574a0812ac5..d94373d360b09 100644 --- a/articles/azure-vmware/ecosystem-back-up-vms.md +++ b/articles/azure-vmware/ecosystem-back-up-vms.md @@ -2,6 +2,7 @@ title: Backup solutions for Azure VMware Solution virtual machines description: Learn about leading backup and restore solutions for your Azure VMware Solution virtual machines. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/21/2021 --- diff --git a/articles/azure-vmware/ecosystem-disaster-recovery-vms.md b/articles/azure-vmware/ecosystem-disaster-recovery-vms.md index a1a20a0b255d8..09c63d818056d 100644 --- a/articles/azure-vmware/ecosystem-disaster-recovery-vms.md +++ b/articles/azure-vmware/ecosystem-disaster-recovery-vms.md @@ -2,8 +2,10 @@ title: Disaster recovery solutions for Azure VMware Solution virtual machines description: Learn about leading disaster recovery solutions for your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 11/29/2021 --- + # Disaster recovery solutions for Azure VMware Solution virtual machines (VMs) One of the most important aspects of any Azure VMware Solution deployment is disaster recovery, which can be achieved by creating disaster recovery plans between different Azure VMware Solution regions or between Azure and an on-premises vSphere environment. diff --git a/articles/azure-vmware/ecosystem-migration-vms.md b/articles/azure-vmware/ecosystem-migration-vms.md index d38d1037ec258..ea93ddcafa5ff 100644 --- a/articles/azure-vmware/ecosystem-migration-vms.md +++ b/articles/azure-vmware/ecosystem-migration-vms.md @@ -2,6 +2,7 @@ title: Migration solutions for Azure VMware Solution virtual machines description: Learn about leading migration solutions for your Azure VMware Solution virtual machines. ms.topic: reference +ms.service: azure-vmware ms.date: 03/22/2021 --- diff --git a/articles/azure-vmware/ecosystem-os-vms.md b/articles/azure-vmware/ecosystem-os-vms.md index 871219d4a43d5..7540cb23b6e5d 100644 --- a/articles/azure-vmware/ecosystem-os-vms.md +++ b/articles/azure-vmware/ecosystem-os-vms.md @@ -2,6 +2,7 @@ title: Operating system support for Azure VMware Solution virtual machines description: Learn about operating system support for your Azure VMware Solution virtual machines. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 --- diff --git a/articles/azure-vmware/ecosystem-security-solutions.md b/articles/azure-vmware/ecosystem-security-solutions.md index 28abe937fc1d7..aebfc08c25aa5 100644 --- a/articles/azure-vmware/ecosystem-security-solutions.md +++ b/articles/azure-vmware/ecosystem-security-solutions.md @@ -2,8 +2,10 @@ title: Security solutions for Azure VMware Solution description: Learn about leading security solutions for your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/11/2022 --- + # Security solutions for Azure VMware Solution A fundamental part of Azure VMware Solution is security. It allows customers to run their VMware-based workloads in a safe and trustable environment. diff --git a/articles/azure-vmware/enable-managed-snat-for-workloads.md b/articles/azure-vmware/enable-managed-snat-for-workloads.md new file mode 100644 index 0000000000000..de73315b878cd --- /dev/null +++ b/articles/azure-vmware/enable-managed-snat-for-workloads.md @@ -0,0 +1,41 @@ +--- +title: Enable Managed SNAT for Azure VMware Solution Workloads +description: This article explains how to enable Managed SNAT for Azure VMware Solution Workloads. +ms.topic: how-to +ms.service: azure-vmware +ms.date: 05/12/2022 +--- + +# Enable Managed SNAT for Azure VMware Solution workloads + +In this article, you'll learn how to enable Azure VMware Solution’s Managed Source NAT (SNAT) to connect to the Internet outbound. A SNAT service translates from RFC1918 space to the public Internet for simple outbound Internet access. The SNAT service won't work when you have a default route from Azure. + +With this capability, you: + +- Have a basic SNAT service with outbound Internet connectivity from your Azure VMware Solution private cloud. +- Have no control of outbound SNAT rules. +- Are unable to view connection logs. +- Have a limit of 128 000 concurrent connections. + +## Prerequisites +- Azure Solution VMware private cloud +- DNS Server configured on the NSX-T Datacenter + +## Reference architecture +The architecture shows Internet access to and from your Azure VMware Solution private cloud using a Public IP directly to the NSX Edge. +:::image type="content" source="media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png" alt-text="Diagram that shows architecture of Internet access to and from your Azure VMware Solution Private Cloud using a Public IP directly to the NSX Edge." border="false" lightbox="media/public-ip-usage/architecture-internet-access-avs-public-ip.png"::: + +## Configure Outbound Internet access using Managed SNAT in the Azure portal + +1. Log in to the Azure portal and then search for and select **Azure VMware Solution**. +2. Select the Azure VMware Solution private cloud. +1. In the left navigation, under **Workload Networking**, select **Internet Connectivity**. +4. Select **Connect using SNAT** button and select **Save**. + You have successfully enabled outbound Internet access for your Azure VMware Solution private cloud using our Managed SNAT service. + +## Next steps +[Internet connectivity design considerations (Preview)](concepts-design-public-internet-access.md) + +[Enable Public IP to the NSX Edge for Azure VMware Solution (Preview)](enable-public-ip-nsx-edge.md) + +[Disable Internet access or enable a default route](disable-internet-access.md) diff --git a/articles/azure-vmware/enable-public-internet-access.md b/articles/azure-vmware/enable-public-internet-access.md index 10ea7a8ef8a8d..409aa82af67f9 100644 --- a/articles/azure-vmware/enable-public-internet-access.md +++ b/articles/azure-vmware/enable-public-internet-access.md @@ -2,8 +2,10 @@ title: Enable public internet for Azure VMware Solution workloads description: This article explains how to use the public IP functionality in Azure Virtual WAN. ms.topic: how-to +ms.service: azure-vmware ms.date: 06/25/2021 --- + # Enable public internet for Azure VMware Solution workloads Public IP is a feature in Azure VMware Solution connectivity. It makes resources, such as web servers, virtual machines (VMs), and hosts accessible through a public network. @@ -54,8 +56,6 @@ In this scenario, you'll publish the IIS webserver to the internet. Use the publ 1. Select the Azure VMware Solution private cloud. - :::image type="content" source="media/public-ip-usage/avs-private-cloud-resource.png" alt-text="Screenshot of the Azure VMware Solution private cloud." lightbox="media/public-ip-usage/avs-private-cloud-resource.png"::: - 1. Under **Manage**, select **Connectivity**. :::image type="content" source="media/public-ip-usage/avs-private-cloud-manage-menu.png" alt-text="Screenshot of the Connectivity section." lightbox="media/public-ip-usage/avs-private-cloud-manage-menu.png"::: @@ -142,7 +142,7 @@ Once all components are deployed, you can see them in the added Resource group. 1. Select a hub from the list and select **Add**. - :::image type="content" source="media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png" alt-text="Screenshot that shows the selected hubs that will be converted to Secured Virtual Hubs." lightbox="media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png"::: + :::image type="content" source="media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png" alt-text="Screenshot that shows the selected hubs that will be converted to Secured Virtual Hubs." lightbox="media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png"::: 1. Select **Next: Tags**. diff --git a/articles/azure-vmware/enable-public-ip-nsx-edge.md b/articles/azure-vmware/enable-public-ip-nsx-edge.md new file mode 100644 index 0000000000000..d28a564e618b1 --- /dev/null +++ b/articles/azure-vmware/enable-public-ip-nsx-edge.md @@ -0,0 +1,127 @@ +--- +title: Enable Public IP to the NSX Edge for Azure VMware Solution (Preview) +description: This article explains how to enable internet access for your Azure VMware Solution. +ms.topic: how-to +ms.service: azure-vmware +ms.date: 05/12/2022 +--- + +# Enable Public IP to the NSX Edge for Azure VMware Solution (Preview) + +In this article, you'll learn how to enable Public IP to the NSX Edge for your Azure VMware Solution. + +>[!TIP] +>Before you enable Internet access to your Azure VMware Solution, review the [Internet connectivity design considerations](concepts-design-public-internet-access.md). + +Public IP to the NSX Edge is a feature in Azure VMware Solution that enables inbound and outbound internet access for your Azure VMware Solution environment. The Public IP is configured in Azure VMware Solution through the Azure portal and the NSX-T Data center interface within your Azure VMware Solution private cloud. +With this capability, you have the following features: +- A cohesive and simplified experience for reserving and using a Public IP down to the NSX Edge. +- The ability to receive up to 1000 or more Public IPs, enabling Internet access at scale. +- Inbound and outbound internet access for your workload VMs. +- DDoS Security protection against network traffic in and out of the Internet. +- HCX Migration support over the Public Internet. + +## Reference architecture +The architecture shows Internet access to and from your Azure VMware Solution private cloud using a Public IP directly to the NSX Edge. +:::image type="content" source="media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png" alt-text="Diagram that shows architecture of Internet access to and from your Azure VMware Solution Private Cloud using a Public IP directly to the NSX Edge." border="false" lightbox="media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png"::: + +## Configure a Public IP in the Azure portal +1. Log on to the Azure portal. +1. Search for and select Azure VMware Solution. +2. Select the Azure VMware Solution private cloud. +1. In the left navigation, under **Workload Networking**, select **Internet connectivity**. +4. Select the **Connect using Public IP down to the NSX-T Edge** button. + +>[!TIP] +>Before selecting a Public IP, ensure you understand the implications to your existing environment. For more information, see [Internet connectivity design considerations](concepts-design-public-internet-access.md). + +5. Select **Public IP**. + :::image type="content" source="media/public-ip-nsx-edge/public-ip-internet-connectivity.png" alt-text="Diagram that shows how to select public IP to the NSX Edge"::: +6. Enter the **Public IP name** and select a subnet size from the **Address space** dropdown and select **Configure**. +7. This Public IP should be configured within 20 minutes and will show the subnet. + :::image type="content" source="media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png" alt-text="Diagram that shows Internet connectivity in Azure VMware Solution."::: +1. If you don't see the subnet, refresh the list. If the refresh fails, try the configuration again. + +9. After configuring the Public IP, select the **Connect using the Public IP down to the NSX-T Edge** checkbox to disable all other Internet options. +10. Select **Save**. + +You have successfully enabled Internet connectivity for your Azure VMware Solution private cloud and reserved a Microsoft allocated Public IP. You can now configure this Public IP down to the NSX Edge for your workloads. The NSX-T Datacenter is used for all VM communication. There are several options for configuring your reserved Public IP down to the NSX Edge. + +There are three options for configuring your reserved Public IP down to the NSX Edge: Outbound Internet Access for VMs, Inbound Internet Access for VMs, and Gateway Firewall used to Filter Traffic to VMs at T1 Gateways. + +### Outbound Internet access for VMs + +A Sourced Network Translation Service (SNAT) with Port Address Translation (PAT) is used to allow many VMs to one SNAT service. This connection means you can provide Internet connectivity for many VMs. + +**Add rule** +1. From your Azure VMware Solution private cloud, select **vCenter Credentials** +2. Locate your NSX-T URL and credentials. +3. Log in to **VMWare NSX-T**. +4. Navigate to **NAT Rules**. +5. Select the T1 Router. +1. select **ADD NAT RULE**. + +**Configure rule** + +1. Enter a name. +1. Select **SNAT**. +1. Optionally enter a source such as a subnet to SNAT or destination. +1. Enter the translated IP. This IP is from the range of Public IPs you reserved from the Azure VMware Solution Portal. +1. Optionally give the rule a higher priority number. This prioritization will move the rule further down the rule list to ensure more specific rules are matched first. +1. Click **SAVE**. + +Logging can be enabled by way of the logging slider. For more information on NSX-T NAT configuration and options, see the +[NSX-T NAT Administration Guide](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-7AD2C384-4303-4D6C-A44A-DEF45AA18A92.html) +### Inbound Internet Access for VMs +A Destination Network Translation Service (DNAT) is used to expose a VM on a specific Public IP address and/or a specific port. This service provides inbound internet access to your workload VMs. + +**Log in VMware NSX-T** +1. From your Azure VMware Solution private cloud, select **VMware credentials**. +2. Locate your NSX-T URL and credentials. +3. Log in to **VMware NSX-T**. + +**Configure the DNAT rule** + 1. Name the rule. + 1. Select **DNAT** as the action. + 1. Enter the reserved Public IP in the destination match. + 1. Enter the VM Private IP in the translated IP. This IP is from the range of Public IPs reserved from the Azure VMware Solution Portal. + 1. Select **SAVE**. + 1. Optionally, configure the Translated Port or source IP for more specific matches. + +The VM is now exposed to the internet on the specific Public IP and/or specific ports. + +### Gateway Firewall used to filter traffic to VMs at T1 Gateways + +You can provide security protection for your network traffic in and out of the public Internet through your Gateway Firewall. +1. From your Azure VMware Solution Private Cloud, select **VMware credentials** +2. Locate your NSX-T URL and credentials. +3. Log in to **VMware NSX-T**. +4. From the NSX-T home screen, select **Gateway Policies**. +5. Select **Gateway Specific Rules**, choose the T1 Gateway and select **ADD POLICY**. +6. Select **New Policy** and enter a policy name. +7. Select the Policy and select **ADD RULE**. +8. Configure the rule. + + 1. Select **New Rule**. + 1. Enter a descriptive name. + 1. Configure the source, destination, services, and action. + +1. Select **Match External Address** to apply firewall rules to the external address of a NAT rule. + +For example, the following rule is set to Match External Address, and this setting will allow SSH traffic inbound to the Public IP. + :::image type="content" source="media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png" alt-text="Screenshot Internet connectivity inbound Public IP." lightbox="media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png"::: + +If **Match Internal Address** was specified, the destination would be the internal or private IP address of the VM. +For more information on the NSX-T Gateway Firewall see the [NSX-T Gateway Firewall Administration Guide]( https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-A52E1A6F-F27D-41D9-9493-E3A75EC35481.html) +The Distributed Firewall could be used to filter traffic to VMs. This feature is outside the scope of this document. For more information, see [NSX-T Distributed Firewall Administration Guide]( https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-6AB240DB-949C-4E95-A9A7-4AC6EF5E3036.html)git status. + +To enable this feature for your subscription, register the ```PIPOnNSXEnabled``` flag and follow these steps to [set up the preview feature in your Azure subscription](https://docs.microsoft.com/azure/azure-resource-manager/management/preview-features?tabs=azure-portal). + + +## Next steps +[Internet connectivity design considerations (Preview)](concepts-design-public-internet-access.md) + +[Enable Managed SNAT for Azure VMware Solution Workloads (Preview)](enable-managed-snat-for-workloads.md) + +[Disable Internet access or enable a default route](disable-internet-access.md) + diff --git a/articles/azure-vmware/faq.yml b/articles/azure-vmware/faq.yml index 42c05ba7f91a0..e71ea6da377e2 100644 --- a/articles/azure-vmware/faq.yml +++ b/articles/azure-vmware/faq.yml @@ -6,7 +6,8 @@ metadata: ms.service: azure-vmware ms.custom: contperf-fy21q4 ms.date: 09/29/2021 -title: Common questions about Azure VMware Solution + +title: Common questions about Azure VMware Solution summary: This article answers commonly asked questions about Azure VMware Solution. sections: diff --git a/articles/azure-vmware/fix-deployment-failures.md b/articles/azure-vmware/fix-deployment-failures.md index 6c2585a9912ca..1ff99c478b09b 100644 --- a/articles/azure-vmware/fix-deployment-failures.md +++ b/articles/azure-vmware/fix-deployment-failures.md @@ -2,6 +2,7 @@ title: Support for Azure VMware Solution deployment or provisioning failure description: Get information from your Azure VMware Solution private cloud to file a service request for an Azure VMware Solution deployment or provisioning failure. ms.topic: how-to +ms.service: azure-vmware ms.date: 10/28/2020 --- diff --git a/articles/azure-vmware/includes/add-network-segment-steps.md b/articles/azure-vmware/includes/add-network-segment-steps.md index b705ae6013df9..bbfb81a4d27e1 100644 --- a/articles/azure-vmware/includes/add-network-segment-steps.md +++ b/articles/azure-vmware/includes/add-network-segment-steps.md @@ -2,10 +2,10 @@ title: Add an NSX-T network segment description: Steps to add an NSX-T network segment for Azure VMware Solution in NSX-T Manager. ms.topic: include +ms.service: azure-vmware ms.date: 03/13/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/azure-vmware-solution-networking-description.md b/articles/azure-vmware/includes/azure-vmware-solution-networking-description.md index 4bfcbf4b049e9..48e176b85d195 100644 --- a/articles/azure-vmware/includes/azure-vmware-solution-networking-description.md +++ b/articles/azure-vmware/includes/azure-vmware-solution-networking-description.md @@ -2,10 +2,10 @@ title: Azure VMware Solution networking and connectivity description: Azure VMware Solution networking and connectivity description. ms.topic: include +ms.service: azure-vmware ms.date: 08/10/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/azure-vmware-solutions-limits.md b/articles/azure-vmware/includes/azure-vmware-solutions-limits.md index e1be3faf2693a..9e81196dddaee 100644 --- a/articles/azure-vmware/includes/azure-vmware-solutions-limits.md +++ b/articles/azure-vmware/includes/azure-vmware-solutions-limits.md @@ -2,10 +2,10 @@ title: Azure VMware Solution limits description: Azure VMware Solution limitations. ms.topic: include +ms.service: azure-vmware ms.date: 09/02/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/connect-expressroute-vnet.md b/articles/azure-vmware/includes/connect-expressroute-vnet.md index 2ff9c4e70dcfd..00a604f01ccc3 100644 --- a/articles/azure-vmware/includes/connect-expressroute-vnet.md +++ b/articles/azure-vmware/includes/connect-expressroute-vnet.md @@ -2,10 +2,10 @@ title: Connect ExpressRoute to the virtual network gateway description: Steps to connect ExpressRoute to the virtual network gateway. ms.topic: include +ms.service: azure-vmware ms.date: 12/08/2020 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/create-nsxt-segment-azure-portal-steps.md b/articles/azure-vmware/includes/create-nsxt-segment-azure-portal-steps.md index 16eab9958fbfa..8b46b396e5777 100644 --- a/articles/azure-vmware/includes/create-nsxt-segment-azure-portal-steps.md +++ b/articles/azure-vmware/includes/create-nsxt-segment-azure-portal-steps.md @@ -2,10 +2,10 @@ title: Add an NSX-T network segment using Azure VMware Solution description: Steps to add an NSX-T network segment for Azure VMware Solution in the Azure portal. ms.topic: include +ms.service: azure-vmware ms.date: 07/16/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md b/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md index 5c30503b7beea..30429aed1c802 100644 --- a/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md +++ b/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md @@ -2,10 +2,10 @@ title: Create an Azure VMware Solution private cloud description: Steps to create an Azure VMware Solution private cloud using the Azure portal. ms.topic: include -ms.date: 08/05/2021 +ms.service: azure-vmware +ms.date: 05/31/2022 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- @@ -17,12 +17,14 @@ You can create an Azure VMware Solution private cloud using the Azure portal or 1. Sign in to the [Azure portal](https://portal.azure.com). -1. Select **Create a new resource**. +1. Select **Create a resource**. -1. In the **Search the Marketplace** text box, type `Azure VMware Solution` and select it from the results. +1. In the **Search services and marketplace** text box, type `Azure VMware Solution` and select it from the search results. 1. On the **Azure VMware Solution** window, select **Create**. +1. If you need more hosts, [request a host quota increase]( https://docs.microsoft.com/azure/azure-vmware/request-host-quota-azure-vmware-solution?WT.mc_id=Portal-VMCP). + 1. On the **Basics** tab, enter values for the fields and then select **Review + Create**. >[!TIP] @@ -35,7 +37,7 @@ You can create an Azure VMware Solution private cloud using the Azure portal or | **Resource name** | Provide the name of your Azure VMware Solution private cloud. | | **Location** | Select a location, such as **east us**. It's the *region* you defined during the planning phase. | | **Size of host** | Select **AV36**. | - | **Number of hosts** | Number of hosts allocated for the private cloud cluster. The default value is 3, which you can increase or decrease after deployment. | + | **Number of hosts** | Number of hosts allocated for the private cloud cluster. The default value is 3, which you can increase or decrease after deployment. If these nodes are not listed as available, please contact to support to request a quota increase (https://docs.microsoft.com/azure/azure-vmware/request-host-quota-azure-vmware-solution?WT.mc_id=Portal-VMCP). You can also click the "If you need more hosts, request a quota increase" link in the Azure portal. | | **Address block for private cloud** | Provide an IP address block for the private cloud. The CIDR represents the private cloud management network and is used for the cluster management services, such as vCenter Server and NSX-T Manager. Use /22 address space, for example, 10.175.0.0/22. The address should be unique and not overlap with other Azure Virtual Networks and with on-premises networks. | diff --git a/articles/azure-vmware/includes/customer-communications.md b/articles/azure-vmware/includes/customer-communications.md index 92112d22f4ea0..222fe0413f820 100644 --- a/articles/azure-vmware/includes/customer-communications.md +++ b/articles/azure-vmware/includes/customer-communications.md @@ -2,10 +2,10 @@ title: Azure VMware Solution customer communications description: Azure VMware Solution customer communications description. ms.topic: include +ms.service: azure-vmware ms.date: 03/24/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/dhcp-dns-in-azure-vmware-solution-description.md b/articles/azure-vmware/includes/dhcp-dns-in-azure-vmware-solution-description.md index 148f2a3c0b035..0e42f59297168 100644 --- a/articles/azure-vmware/includes/dhcp-dns-in-azure-vmware-solution-description.md +++ b/articles/azure-vmware/includes/dhcp-dns-in-azure-vmware-solution-description.md @@ -2,10 +2,10 @@ title: DHCP and DNS in Azure VMware Solution description description: Azure VMware Solution DHCP and DNS description. ms.topic: include +ms.service: azure-vmware ms.date: 05/28/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/disk-capabilities-of-the-host.md b/articles/azure-vmware/includes/disk-capabilities-of-the-host.md index f659bf19e3d99..e8d5f49528187 100644 --- a/articles/azure-vmware/includes/disk-capabilities-of-the-host.md +++ b/articles/azure-vmware/includes/disk-capabilities-of-the-host.md @@ -2,10 +2,10 @@ title: Disk capabilities of the hosts description: Hosts used to build or scale clusters come from an isolated pool of hosts. ms.topic: include +ms.service: azure-vmware ms.date: 04/23/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/disk-pool-planning-note.md b/articles/azure-vmware/includes/disk-pool-planning-note.md index 3ee55557ea33a..1ccdcbaf58469 100644 --- a/articles/azure-vmware/includes/disk-pool-planning-note.md +++ b/articles/azure-vmware/includes/disk-pool-planning-note.md @@ -2,10 +2,10 @@ title: Disk pool planning note for vNet description: Important note about the importance of deploying a vNet closer to Azure VMware Solution hosts. ms.topic: include +ms.service: azure-vmware ms.date: 07/14/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware # used in: # articles\azure-vmware\attach-disk-pools-to-azure-vmware-solution.md diff --git a/articles/azure-vmware/includes/expressroute-global-reach.md b/articles/azure-vmware/includes/expressroute-global-reach.md index 25d446704969b..1a2a2a235c273 100644 --- a/articles/azure-vmware/includes/expressroute-global-reach.md +++ b/articles/azure-vmware/includes/expressroute-global-reach.md @@ -2,10 +2,10 @@ title: Azure VMware Solution networking and connectivity description: Azure VMware Solution networking and connectivity description. ms.topic: include +ms.service: azure-vmware ms.date: 08/10/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/nsxt-version.md b/articles/azure-vmware/includes/nsxt-version.md index ca668ab784525..1053a52439b4e 100644 --- a/articles/azure-vmware/includes/nsxt-version.md +++ b/articles/azure-vmware/includes/nsxt-version.md @@ -2,10 +2,10 @@ title: NSX-T version description: NSX-T version ms.topic: include +ms.service: azure-vmware ms.date: 08/09/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- 3.1.2 diff --git a/articles/azure-vmware/includes/register-resource-provider-steps.md b/articles/azure-vmware/includes/register-resource-provider-steps.md index b1f344b61f78e..8a425811eb1cc 100644 --- a/articles/azure-vmware/includes/register-resource-provider-steps.md +++ b/articles/azure-vmware/includes/register-resource-provider-steps.md @@ -2,10 +2,10 @@ title: Register the Azure VMware Solution resource provider description: Steps to register the Azure VMware Solution resource provider. ms.topic: include +ms.service: azure-vmware ms.date: 02/17/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/request-authorization-key.md b/articles/azure-vmware/includes/request-authorization-key.md index 6fccc2d108fe2..ca7b24778c8f4 100644 --- a/articles/azure-vmware/includes/request-authorization-key.md +++ b/articles/azure-vmware/includes/request-authorization-key.md @@ -2,10 +2,10 @@ title: Request authorization key for ExpressRoute description: Steps to request an authorization key for ExpressRoute. ms.topic: include +ms.service: azure-vmware ms.date: 03/15/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/vcenter-access-identity-description.md b/articles/azure-vmware/includes/vcenter-access-identity-description.md index 099884808c023..8e8e5709e0376 100644 --- a/articles/azure-vmware/includes/vcenter-access-identity-description.md +++ b/articles/azure-vmware/includes/vcenter-access-identity-description.md @@ -2,6 +2,7 @@ title: vCenter Server access and identity description description: vCenter Server has a built-in local user called cloudadmin and is assigned to the CloudAdmin role. ms.topic: include +ms.service: azure-vmware ms.date: 04/07/2022 --- diff --git a/articles/azure-vmware/includes/vmware-software-update-frequency.md b/articles/azure-vmware/includes/vmware-software-update-frequency.md index 945de79650eb4..dc43869639762 100644 --- a/articles/azure-vmware/includes/vmware-software-update-frequency.md +++ b/articles/azure-vmware/includes/vmware-software-update-frequency.md @@ -2,10 +2,10 @@ title: VMware software update frequency description: Supported VMware software update frequency for Azure VMware Solution. ms.topic: include +ms.service: azure-vmware ms.date: 08/24/2021 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- diff --git a/articles/azure-vmware/includes/vmware-software-versions.md b/articles/azure-vmware/includes/vmware-software-versions.md index fa8a1fd9214f8..4c57c197ee6b6 100644 --- a/articles/azure-vmware/includes/vmware-software-versions.md +++ b/articles/azure-vmware/includes/vmware-software-versions.md @@ -2,26 +2,26 @@ title: VMware software versions description: Supported VMware software versions for Azure VMware Solution. ms.topic: include -ms.date: 07/20/2021 +ms.service: azure-vmware +ms.date: 06/02/2022 author: suzizuber ms.author: v-szuber -ms.service: azure-vmware --- -The VMware software versions used in new deployments of Azure VMware Solution private clouds clusters are: +The VMware solution software versions used in new deployments of Azure VMware Solution private cloud clusters are: | Software | Version | | :--- | :---: | -| vCenter | 6.7 U3p | -| ESXi | 6.7 P05 | -| vSAN | 6.7 P05 | -| HCX | 4.2.2 | -| NSX-T
                  **NOTE:** NSX-T is the only supported version of NSX. | [[!INCLUDE [nsxt-version](nsxt-version.md)]](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/rn/VMware-NSX-T-Data-Center-312-Release-Notes.html) | - +| vCenter Server | 7.0 U3c | +| ESXi | 7.0 U3c | +| vSAN | 7.0 U3c | +| vSAN on-disk format | 10 | +| HCX | 4.2.2 | +| NSX-T Data Center
                  **NOTE:** NSX-T Data Center is the only supported version of NSX Data Center. | [[!INCLUDE [nsxt-version](nsxt-version.md)]](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/rn/VMware-NSX-T-Data-Center-312-Release-Notes.html) | -The currently running software version is applied to new clusters added to an existing private cloud. For more information, see the [VMware software version requirements](https://docs.vmware.com/en/VMware-HCX/4.1/hcx-user-guide/GUID-54E5293B-8707-4D29-BFE8-EE63539CC49B.html). +The currently running software version is applied to new clusters added to an existing private cloud. For more information, see the [VMware software version requirements](https://docs.vmware.com/en/VMware-HCX/4.1/hcx-user-guide/GUID-54E5293B-8707-4D29-BFE8-EE63539CC49B.html) and [Understanding vSAN on-disk format versions and compatibility](https://kb.vmware.com/s/article/2148493). diff --git a/articles/azure-vmware/index.yml b/articles/azure-vmware/index.yml index e2c9083526fc8..60f2be1568a78 100644 --- a/articles/azure-vmware/index.yml +++ b/articles/azure-vmware/index.yml @@ -1,16 +1,16 @@ ### YamlMime:Landing title: Azure VMware Solution documentation -summary: Learn how to use Azure VMware Solution to deploy a VMware private cloud to Azure +summary: Learn how to use Azure VMware Solution to deploy a VMware Software-Defined Data Center (SDDC) private cloud to Azure metadata: title: Azure VMware Solution documentation - description: Learn how to use Azure VMware Solution to deploy a VMware private cloud to Azure + description: Learn how to use Azure VMware Solution to deploy a VMware Software-Defined Data Center (SDDC) private cloud to Azure ms.service: azure-vmware ms.topic: landing-page author: suzizuber ms.author: v-szuber - ms.date: 11/12/2021 + ms.date: 04/22/2022 # card 1 @@ -33,6 +33,8 @@ landingContent: url: concepts-api-management.md - text: Hub and spoke url: concepts-hub-and-spoke.md + - text: Internet connectivity design considerations + url: concepts-design-public-internet-access.md - text: Network design considerations url: concepts-network-design-considerations.md - text: Networking and interconnectivity @@ -164,6 +166,8 @@ landingContent: url: configure-alerts-for-azure-vmware-solution.md - text: Attach disk pools to Azure VMware Solution hosts url: attach-disk-pools-to-azure-vmware-solution-hosts.md + - text: Attach Azure NetApp Files datastores to Azure VMware Solution hosts + url: attach-azure-netapp-files-to-azure-vmware-solution-hosts.md - text: Attach Azure NetApp Files to Azure VMware Solution VMs url: netapp-files-with-azure-vmware-solution.md diff --git a/articles/azure-vmware/install-vmware-hcx.md b/articles/azure-vmware/install-vmware-hcx.md index 215f11936c566..ea9468e27babb 100644 --- a/articles/azure-vmware/install-vmware-hcx.md +++ b/articles/azure-vmware/install-vmware-hcx.md @@ -2,6 +2,7 @@ title: Install VMware HCX in Azure VMware Solution description: Install VMware HCX in your Azure VMware Solution private cloud. ms.topic: how-to +ms.service: azure-vmware ms.date: 03/29/2022 --- diff --git a/articles/azure-vmware/integrate-azure-native-services.md b/articles/azure-vmware/integrate-azure-native-services.md index e378ca9e41c88..1c288a3587a83 100644 --- a/articles/azure-vmware/integrate-azure-native-services.md +++ b/articles/azure-vmware/integrate-azure-native-services.md @@ -2,6 +2,7 @@ title: Monitor and protect VMs with Azure native services description: Learn how to integrate and deploy Microsoft Azure native tools to monitor and manage your Azure VMware Solution workloads. ms.topic: how-to +ms.service: azure-vmware ms.date: 08/15/2021 --- @@ -60,7 +61,7 @@ You can configure the Log Analytics workspace with Microsoft Sentinel for alert If you are new to Azure or unfamiliar with any of the services previously mentioned, review the following articles: - [Automation account authentication overview](../automation/automation-security-overview.md) -- [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md) and [Azure Monitor](../azure-monitor/overview.md) +- [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md) and [Azure Monitor](../azure-monitor/overview.md) - [Planning](../security-center/security-center-planning-and-operations-guide.md) and [Supported platforms](../security-center/security-center-os-coverage.md) for Microsoft Defender for Cloud - [Enable Azure Monitor for VMs overview](../azure-monitor/vm/vminsights-enable-overview.md) - [What is Azure Arc enabled servers?](../azure-arc/servers/overview.md) and [What is Azure Arc enabled Kubernetes?](../azure-arc/kubernetes/overview.md) @@ -133,7 +134,7 @@ Can collect data from different [sources to monitor and analyze](../azure-monito Monitor guest operating system performance to discover and map application dependencies for Azure VMware Solution or on-premises VMs. Your Log Analytics workspace in Azure Monitor enables log collection and performance counter collection using the Log Analytics agent or extensions. -1. [Design your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md) +1. [Design your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md) 1. [Enable Azure Monitor for VMs overview](../azure-monitor/vm/vminsights-enable-overview.md) diff --git a/articles/azure-vmware/introduction.md b/articles/azure-vmware/introduction.md index 1d45ae4bd4dd7..c4e5c47a6e438 100644 --- a/articles/azure-vmware/introduction.md +++ b/articles/azure-vmware/introduction.md @@ -2,6 +2,7 @@ title: Introduction description: Learn the features and benefits of Azure VMware Solution to deploy and manage VMware-based workloads in Azure. Azure VMware Solution SLA guarantees that Azure VMware management tools (vCenter Server and NSX Manager) will be available at least 99.9% of the time. ms.topic: overview +ms.service: azure-vmware ms.date: 04/20/2021 --- diff --git a/articles/azure-vmware/media/attach-netapp-files-to-cloud/architecture-netapp-files-nfs-datastores.png b/articles/azure-vmware/media/attach-netapp-files-to-cloud/architecture-netapp-files-nfs-datastores.png new file mode 100644 index 0000000000000..fd1fba0f6c79c Binary files /dev/null and b/articles/azure-vmware/media/attach-netapp-files-to-cloud/architecture-netapp-files-nfs-datastores.png differ diff --git a/articles/azure-vmware/media/attach-netapp-files-to-cloud/connect-netapp-files-portal-experience-1.png b/articles/azure-vmware/media/attach-netapp-files-to-cloud/connect-netapp-files-portal-experience-1.png new file mode 100644 index 0000000000000..eb1aac5e86b10 Binary files /dev/null and b/articles/azure-vmware/media/attach-netapp-files-to-cloud/connect-netapp-files-portal-experience-1.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png b/articles/azure-vmware/media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png new file mode 100644 index 0000000000000..6d7e1136d8fa0 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png new file mode 100644 index 0000000000000..0ba110c260ee9 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png new file mode 100644 index 0000000000000..9d6f6e3d8f6b3 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-internet-connectivity.png b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-internet-connectivity.png new file mode 100644 index 0000000000000..481ed65ea1936 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png new file mode 100644 index 0000000000000..e8f0d25335d37 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png b/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png index fc0e44054ff86..e86b61d46f9e2 100644 Binary files a/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png and b/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/architecture-internet-access-avs-public-ip.png b/articles/azure-vmware/media/public-ip-usage/architecture-internet-access-avs-public-ip.png new file mode 100644 index 0000000000000..6d7e1136d8fa0 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/architecture-internet-access-avs-public-ip.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/avs-private-cloud-resource.png b/articles/azure-vmware/media/public-ip-usage/avs-private-cloud-resource.png deleted file mode 100644 index 7f2297b167e71..0000000000000 Binary files a/articles/azure-vmware/media/public-ip-usage/avs-private-cloud-resource.png and /dev/null differ diff --git a/articles/azure-vmware/media/public-ip-usage/private-cloud-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/private-cloud-internet-connectivity.png new file mode 100644 index 0000000000000..b1e75f9d01a95 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/private-cloud-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/private-cloud-save-snat-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/private-cloud-save-snat-internet-connectivity.png new file mode 100644 index 0000000000000..ebf97e4048854 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/private-cloud-save-snat-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/private-cloud-workload-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/private-cloud-workload-internet-connectivity.png new file mode 100644 index 0000000000000..214e9b7997e68 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/private-cloud-workload-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png b/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png index 20d8be67a5f77..6d271395f28e9 100644 Binary files a/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png and b/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/public-ip-block-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/public-ip-block-internet-connectivity.png new file mode 100644 index 0000000000000..f08ea165a9c4e Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/public-ip-block-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/public-ip-subnet-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/public-ip-subnet-internet-connectivity.png new file mode 100644 index 0000000000000..e8f0d25335d37 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/public-ip-subnet-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png b/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png deleted file mode 100644 index 1ecaf653624d1..0000000000000 Binary files a/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png and /dev/null differ diff --git a/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png b/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png new file mode 100644 index 0000000000000..d5075c32cdcc1 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png b/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png index 39bce924a644e..dafc16f5ab4a4 100644 Binary files a/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png and b/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png differ diff --git a/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png b/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png index 1031e9a8bcb74..f9546d782c2a1 100644 Binary files a/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png and b/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png differ diff --git a/articles/azure-vmware/move-azure-vmware-solution-across-regions.md b/articles/azure-vmware/move-azure-vmware-solution-across-regions.md index a509da9591ac7..99f90c27d18e0 100644 --- a/articles/azure-vmware/move-azure-vmware-solution-across-regions.md +++ b/articles/azure-vmware/move-azure-vmware-solution-across-regions.md @@ -3,7 +3,8 @@ title: Move Azure VMware Solution resources across regions description: This article describes how to move Azure VMware Solution resources from one Azure region to another. ms.custom: subject-moving-resources ms.topic: how-to -ms.date: 06/01/2021 +ms.service: azure-vmware +ms.date: 04/11/2022 # Customer intent: As an Azure service administrator, I want to move my Azure VMware Solution resources from Azure Region A to Azure Region B. --- @@ -67,9 +68,9 @@ Before you can move the source configuration, you'll need to [deploy the target ### Back up the source configuration -Back up the Azure VMware Solution (source) configuration that includes VC, NSX-T, and firewall policies and rules. +Back up the Azure VMware Solution (source) configuration that includes vCenter Server, NSX-T Data Center, and firewall policies and rules. -- **Compute:** Export existing inventory configuration. For Inventory backup, you can use RVtool (an open-source app). +- **Compute:** Export existing inventory configuration. For Inventory backup, you can use RVtools (an open-source app). - **Network and firewall policies and rules:** On the Azure VMware Solution target, create the same network segments as the source environment. @@ -133,17 +134,17 @@ Now that you have the ExpressRoute circuit IDs and authorization keys for both e After you establish connectivity, you'll create a VMware HCX site pairing between the private clouds to facilitate the migration of your VMs. You can connect or pair the VMware HCX Cloud Manager in Azure VMware Solution with the VMware HCX Connector in your data center. -1. Sign in to your source's vCenter, and under **Home**, select **HCX**. +1. Sign in to your source's vCenter Server, and under **Home**, select **HCX**. 1. Under **Infrastructure**, select **Site Pairing** and select the **Connect To Remote Site** option (in the middle of the screen). -1. Enter the Azure VMware Solution HCX Cloud Manager URL or IP address you noted earlier `https://x.x.x.9`, the Azure VMware Solution cloudadmin\@vsphere.local username, and the password. Then select **Connect**. +1. Enter the Azure VMware Solution HCX Cloud Manager URL or IP address you noted earlier `https://x.x.x.9`, the Azure VMware Solution cloudadmin@vsphere.local username, and the password. Then select **Connect**. > [!NOTE] > To successfully establish a site pair: > * Your VMware HCX Connector must be able to route to your HCX Cloud Manager IP over port 443. > - > * Use the same password that you used to sign in to vCenter. You defined this password on the initial deployment screen. + > * Use the same password that you used to sign in to vCenter Server. You defined this password on the initial deployment screen. You'll see a screen showing that your VMware HCX Cloud Manager in Azure VMware Solution and your on-premises VMware HCX Connector are connected (paired). @@ -227,11 +228,11 @@ In this section, you'll migrate the: In this step, you'll copy the source's vSphere configuration and move it to the target environment. -1. From the source's vCenter, use the same resource pool configuration and [create the same resource pool configuration](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.resmgmt.doc/GUID-0F6C6709-A5DA-4D38-BE08-6CB1002DD13D.html#example-creating-resource-pools-4) on the target's vCenter. +1. From the source's vCenter Server, use the same resource pool configuration and [create the same resource pool configuration](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.resmgmt.doc/GUID-0F6C6709-A5DA-4D38-BE08-6CB1002DD13D.html#example-creating-resource-pools-4) on the target's vCenter Server. -2. From the source's vCenter, use the same VM folder name and [create the same VM folder](https://docs.vmware.com/en/VMware-Validated-Design/6.1/sddc-deployment-of-cloud-operations-and-automation-in-the-first-region/GUID-9D935BBC-1228-4F9D-A61D-B86C504E469C.html) on the target's vCenter under **Folders**. +2. From the source's vCenter Server, use the same VM folder name and [create the same VM folder](https://docs.vmware.com/en/VMware-Validated-Design/6.1/sddc-deployment-of-cloud-operations-and-automation-in-the-first-region/GUID-9D935BBC-1228-4F9D-A61D-B86C504E469C.html) on the target's vCenter Server under **Folders**. -3. Use VMware HCX to migrate all VM templates from the source's vCenter to the target's vCenter. +3. Use VMware HCX to migrate all VM templates from the source's vCenter Server to the target's vCenter. 1. From the source, convert the existing templates to VMs and then migrate them to the target. @@ -239,7 +240,7 @@ In this step, you'll copy the source's vSphere configuration and move it to the 4. From the source environment, use the same VM Tags name and [create them on the target's vCenter](https://docs.vmware.com/en/VMware-vSphere/6.7/com.vmware.vsphere.vcenterhost.doc/GUID-05323758-1EBF-406F-99B6-B1A33E893453.html). -5. From the source's vCenter Content Library, use the subscribed library option to copy the ISO, OVF, OVA, and VM Templates to the target content library: +5. From the source's vCenter Server Content Library, use the subscribed library option to copy the ISO, OVF, OVA, and VM Templates to the target content library: 1. If the content library isn't already published, select the **Enable publishing** option. @@ -250,14 +251,14 @@ In this step, you'll copy the source's vSphere configuration and move it to the 4. Select **Sync Now**. -### Configure the target NSX-T environment +### Configure the target NSX-T Data Center environment -In this step, you'll use the source NSX-T configuration to configure the target NSX-T environment. +In this step, you'll use the source NSX-T Data Center configuration to configure the target NSX-T environment. >[!NOTE] ->You'll have multiple features configured on the source NSX-T, so you must copy or read from the source NXS-T and recreate it in the target private cloud. Use L2 Extension to keep same IP address and Mac Address of the VM while migrating Source to target AVS Private Cloud to avoid downtime due to IP change and related configuration. +>You'll have multiple features configured on the source NSX-T Data Center, so you must copy or read from the source NSX-T Data Center and recreate it in the target private cloud. Use L2 Extension to keep same IP address and Mac Address of the VM while migrating Source to target AVS Private Cloud to avoid downtime due to IP change and related configuration. -1. [Configure NSX network components](tutorial-nsx-t-network-segment.md) required in the target environment under default Tier-1 gateway. +1. [Configure NSX-T Data Center network components](tutorial-nsx-t-network-segment.md) required in the target environment under default Tier-1 gateway. 1. [Create the security group configuration](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-41CC06DF-1CD4-4233-B43E-492A9A3AD5F6.html). @@ -271,7 +272,7 @@ In this step, you'll use the source NSX-T configuration to configure the target 1. [Configure DNS forwarder](configure-dns-azure-vmware-solution.md). -1. [Configure a new Tier-1 gateway (other than default)](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-A6042263-374F-4292-892E-BC86876325A4.html). This configuration is based on the NSX-T configured on the source. +1. [Configure a new Tier-1 gateway (other than default)](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-A6042263-374F-4292-892E-BC86876325A4.html). This configuration is based on the NSX-T Data Center configured on the source. ### Migrate the VMs from the source diff --git a/articles/azure-vmware/move-ea-csp-subscriptions.md b/articles/azure-vmware/move-ea-csp-subscriptions.md index 06cf49956d8a3..ee73c83c132a5 100644 --- a/articles/azure-vmware/move-ea-csp-subscriptions.md +++ b/articles/azure-vmware/move-ea-csp-subscriptions.md @@ -3,6 +3,7 @@ title: Move Azure VMware Solution subscription to another subscription description: This article describes how to move Azure VMware Solution subscription to another subscription. You might move your resources for various reasons, such as billing. ms.custom: subject-moving-resources ms.topic: how-to +ms.service: azure-vmware ms.date: 04/26/2021 # Customer intent: As an Azure service administrator, I want to move my Azure VMware Solution subscription to another subscription. diff --git a/articles/azure-vmware/netapp-files-with-azure-vmware-solution.md b/articles/azure-vmware/netapp-files-with-azure-vmware-solution.md index 8c3b75f0e2840..e28ccd64fc666 100644 --- a/articles/azure-vmware/netapp-files-with-azure-vmware-solution.md +++ b/articles/azure-vmware/netapp-files-with-azure-vmware-solution.md @@ -2,6 +2,7 @@ title: Attach Azure NetApp Files to Azure VMware Solution VMs description: Use Azure NetApp Files with Azure VMware Solution VMs to migrate and sync data across on-premises servers, Azure VMware Solution VMs, and cloud infrastructures. ms.topic: how-to +ms.service: azure-vmware ms.date: 05/10/2022 --- diff --git a/articles/azure-vmware/plan-private-cloud-deployment.md b/articles/azure-vmware/plan-private-cloud-deployment.md index 92edd0f5bf0c7..ed2173fbeadb6 100644 --- a/articles/azure-vmware/plan-private-cloud-deployment.md +++ b/articles/azure-vmware/plan-private-cloud-deployment.md @@ -3,6 +3,7 @@ title: Plan the Azure VMware Solution deployment description: Learn how to plan your Azure VMware Solution deployment. ms.topic: tutorial ms.custom: contperf-fy21q4 +ms.service: azure-vmware ms.date: 09/27/2021 --- diff --git a/articles/azure-vmware/protect-azure-vmware-solution-with-application-gateway.md b/articles/azure-vmware/protect-azure-vmware-solution-with-application-gateway.md index 7580c8d266ee0..27ca50f953bd7 100644 --- a/articles/azure-vmware/protect-azure-vmware-solution-with-application-gateway.md +++ b/articles/azure-vmware/protect-azure-vmware-solution-with-application-gateway.md @@ -2,6 +2,7 @@ title: Protect web apps on Azure VMware Solution with Azure Application Gateway description: Configure Azure Application Gateway to securely expose your web apps running on Azure VMware Solution. ms.topic: how-to +ms.service: azure-vmware ms.date: 02/10/2021 --- diff --git a/articles/azure-vmware/request-host-quota-azure-vmware-solution.md b/articles/azure-vmware/request-host-quota-azure-vmware-solution.md index c5137c5b63f3f..575d53b4aaed7 100644 --- a/articles/azure-vmware/request-host-quota-azure-vmware-solution.md +++ b/articles/azure-vmware/request-host-quota-azure-vmware-solution.md @@ -3,6 +3,7 @@ title: Request host quota for Azure VMware Solution description: Learn how to request host quota/capacity for Azure VMware Solution. You can also request more hosts in an existing Azure VMware Solution private cloud. ms.topic: how-to ms.custom: contperf-fy21q3 +ms.service: azure-vmware ms.date: 09/27/2021 #Customer intent: As an Azure service admin, I want to request hosts for either a new private cloud deployment or I want to have more hosts allocated in an existing private cloud. diff --git a/articles/azure-vmware/reserved-instance.md b/articles/azure-vmware/reserved-instance.md index fd5a11bac5c7b..bc9492567a0a9 100644 --- a/articles/azure-vmware/reserved-instance.md +++ b/articles/azure-vmware/reserved-instance.md @@ -2,6 +2,7 @@ title: Reserved instances of Azure VMware Solution description: Learn how to buy a reserved instance for Azure VMware Solution. The reserved instance covers only the compute part of your usage and includes software licensing costs. ms.topic: how-to +ms.service: azure-vmware ms.date: 05/13/2021 --- diff --git a/articles/azure-vmware/rotate-cloudadmin-credentials.md b/articles/azure-vmware/rotate-cloudadmin-credentials.md index a1a43c8a45c76..6a69029dc7e76 100644 --- a/articles/azure-vmware/rotate-cloudadmin-credentials.md +++ b/articles/azure-vmware/rotate-cloudadmin-credentials.md @@ -2,9 +2,10 @@ title: Rotate the cloudadmin credentials for Azure VMware Solution description: Learn how to rotate the vCenter Server credentials for your Azure VMware Solution private cloud. ms.topic: how-to -ms.date: 09/10/2021 +ms.service: azure-vmware +ms.date: 04/11/2022 -#Customer intent: As an Azure service administrator, I want to rotate my cloudadmin credentials so that the HCX Connector has the latest vCenter CloudAdmin credentials. +#Customer intent: As an Azure service administrator, I want to rotate my cloudadmin credentials so that the HCX Connector has the latest vCenter Server CloudAdmin credentials. --- @@ -16,17 +17,17 @@ ms.date: 09/10/2021 In this article, you'll rotate the cloudadmin credentials (vCenter Server *CloudAdmin* credentials) for your Azure VMware Solution private cloud. Although the password for this account doesn't expire, you can generate a new one at any time. >[!CAUTION] ->If you use your cloudadmin credentials to connect services to vCenter in your private cloud, those connections will stop working once you rotate your password. Those connections will also lock out the cloudadmin account unless you stop those services before rotating the password. +>If you use your cloudadmin credentials to connect services to vCenter Server in your private cloud, those connections will stop working once you rotate your password. Those connections will also lock out the cloudadmin account unless you stop those services before rotating the password. ## Prerequisites -Consider and determine which services connect to vCenter as *cloudadmin@vsphere.local* before you rotate the password. These services may include VMware services such as HCX, vRealize Orchestrator, vRealize Operations Manager, VMware Horizon, or other third-party tools used for monitoring or provisioning. +Consider and determine which services connect to vCenter Server as *cloudadmin@vsphere.local* before you rotate the password. These services may include VMware services such as HCX, vRealize Orchestrator, vRealize Operations Manager, VMware Horizon, or other third-party tools used for monitoring or provisioning. -One way to determine which services authenticate to vCenter with the cloudadmin user is to inspect vSphere events using the vSphere Client for your private cloud. After you identify such services, and before rotating the password, you must stop these services. Otherwise, the services won't work after you rotate the password. You'll also experience temporary locks on your vCenter CloudAdmin account, as these services continuously attempt to authenticate using a cached version of the old credentials. +One way to determine which services authenticate to vCenter Server with the cloudadmin user is to inspect vSphere events using the vSphere Client for your private cloud. After you identify such services, and before rotating the password, you must stop these services. Otherwise, the services won't work after you rotate the password. You'll also experience temporary locks on your vCenter Server CloudAdmin account, as these services continuously attempt to authenticate using a cached version of the old credentials. Instead of using the cloudadmin user to connect services to vCenter, we recommend individual accounts for each service. For more information about setting up separate accounts for connected services, see [Access and Identity Concepts](./concepts-identity.md). -## Reset your vCenter credentials +## Reset your vCenter Server credentials ### [Portal](#tab/azure-portal) @@ -34,7 +35,7 @@ Instead of using the cloudadmin user to connect services to vCenter, we recommen 1. Select **Generate new password**. - :::image type="content" source="media/rotate-cloudadmin-credentials/reset-vcenter-credentials-1.png" alt-text="Screenshot showing the vCenter credentials and a way to copy them or generate a new password." lightbox="media/rotate-cloudadmin-credentials/reset-vcenter-credentials-1.png"::: + :::image type="content" source="media/rotate-cloudadmin-credentials/reset-vcenter-credentials-1.png" alt-text="Screenshot showing the vCenter Server credentials and a way to copy them or generate a new password." lightbox="media/rotate-cloudadmin-credentials/reset-vcenter-credentials-1.png"::: 1. Select the confirmation checkbox and then select **Generate password**. @@ -71,12 +72,12 @@ To begin using Azure CLI: 3. Select the correct connection to Azure VMware Solution and select **Edit Connection**. -4. Provide the new vCenter user credentials and select **Edit**, which saves the credentials. Save should show successful. +4. Provide the new vCenter Server user credentials and select **Edit**, which saves the credentials. Save should show successful. ## Next steps -Now that you've covered resetting your vCenter credentials for Azure VMware Solution, you may want to learn about: +Now that you've covered resetting your vCenter Server credentials for Azure VMware Solution, you may want to learn about: - [Integrating Azure native services in Azure VMware Solution](integrate-azure-native-services.md) - [Deploying disaster recovery for Azure VMware Solution workloads using VMware HCX](deploy-disaster-recovery-using-vmware-hcx.md) diff --git a/articles/azure-vmware/set-up-backup-server-for-azure-vmware-solution.md b/articles/azure-vmware/set-up-backup-server-for-azure-vmware-solution.md index f4498b9365103..d2caca1e95d78 100644 --- a/articles/azure-vmware/set-up-backup-server-for-azure-vmware-solution.md +++ b/articles/azure-vmware/set-up-backup-server-for-azure-vmware-solution.md @@ -2,6 +2,7 @@ title: Set up Azure Backup Server for Azure VMware Solution description: Set up your Azure VMware Solution environment to back up virtual machines using Azure Backup Server. ms.topic: how-to +ms.service: azure-vmware ms.date: 04/06/2022 --- diff --git a/articles/azure-vmware/toc.yml b/articles/azure-vmware/toc.yml index 217dd4c1516f0..790d774e4893d 100644 --- a/articles/azure-vmware/toc.yml +++ b/articles/azure-vmware/toc.yml @@ -44,6 +44,8 @@ href: concepts-api-management.md - name: Hub and spoke href: concepts-hub-and-spoke.md + - name: Internet connectivity design considerations + href: concepts-design-public-internet-access.md - name: Network design considerations href: concepts-network-design-considerations.md - name: Networking and interconnectivity @@ -72,6 +74,8 @@ href: configure-alerts-for-azure-vmware-solution.md - name: Attach disk pools to Azure VMware Solution hosts href: attach-disk-pools-to-azure-vmware-solution-hosts.md + - name: Attach Azure NetApp Files datastores to Azure VMware Solution hosts + href: attach-azure-netapp-files-to-azure-vmware-solution-hosts.md - name: Attach Azure NetApp Files to Azure VMware Solution VMs href: netapp-files-with-azure-vmware-solution.md - name: Backup with Azure Backup Server @@ -104,6 +108,16 @@ href: configure-identity-source-vcenter.md - name: Configure GitHub Enterprise Server href: configure-github-enterprise-server.md + - name: Configure Internet connectivity + items: + - name: Enable Managed SNAT for Azure VMware Solution Workloads + href: enable-managed-snat-for-workloads.md + - name: Enable Public IP to the NSX Edge for Azure VMware Solution + href: enable-public-ip-nsx-edge.md + - name: Disable Internet access or enable a default route + href: disable-internet-access.md + - name: Enable public internet access + href: enable-public-internet-access.md - name: Configure networking items: - name: Configure DHCP server or relay @@ -120,8 +134,6 @@ href: configure-port-mirroring-azure-vmware-solution.md - name: Configure a site-to-site VPN in vWAN href: configure-site-to-site-vpn-gateway.md - - name: Enable public internet access - href: enable-public-internet-access.md - name: HCX Mobility Optimized Networking (MON) guidance href: vmware-hcx-mon-guidance.md - name: Configure storage policies diff --git a/articles/azure-vmware/tutorial-access-private-cloud.md b/articles/azure-vmware/tutorial-access-private-cloud.md index 768a1da870bf6..bcdaf1792e438 100644 --- a/articles/azure-vmware/tutorial-access-private-cloud.md +++ b/articles/azure-vmware/tutorial-access-private-cloud.md @@ -2,6 +2,7 @@ title: Tutorial - Access your private cloud description: Learn how to access an Azure VMware Solution private cloud ms.topic: tutorial +ms.service: azure-vmware ms.date: 08/13/2021 --- diff --git a/articles/azure-vmware/tutorial-configure-networking.md b/articles/azure-vmware/tutorial-configure-networking.md index f67db24e49d58..d46739358d0ad 100644 --- a/articles/azure-vmware/tutorial-configure-networking.md +++ b/articles/azure-vmware/tutorial-configure-networking.md @@ -3,7 +3,8 @@ title: Tutorial - Configure networking for your VMware private cloud in Azure description: Learn to create and configure the networking needed to deploy your private cloud in Azure ms.topic: tutorial ms.custom: contperf-fy22q1 -ms.date: 07/30/2021 +ms.service: azure-vmware +ms.date: 05/31/2022 --- @@ -29,7 +30,7 @@ In this tutorial, you learn how to: ## Connect with the Azure vNet connect feature -You can use the **Azure vNet connect** feature to use an existing vNet or create a new vNet to connect to Azure VMware Solution. +You can use the **Azure vNet connect** feature to use an existing vNet or create a new vNet to connect to Azure VMware Solution. **Azure vNet connect** is a function to configure vNet connectivity, it does not record configuration state; browse the Azure portal to check what settings have been configured. >[!NOTE] >Address space in the vNet cannot overlap with the Azure VMware Solution private cloud CIDR. @@ -42,6 +43,7 @@ Before selecting an existing vNet, there are specific requirements that must be 1. In the same region as Azure VMware Solution private cloud. 1. In the same resource group as Azure VMware Solution private cloud. 1. vNet must contain an address space that doesn't overlap with Azure VMware Solution. +1. Validate solution design is within Azure VMware Solution limits (https://docs.microsoft.com/azure/azure-resource-manager/management/azure-subscription-service-limits). ### Select an existing vNet diff --git a/articles/azure-vmware/tutorial-create-private-cloud.md b/articles/azure-vmware/tutorial-create-private-cloud.md index 9ebdba199cfd9..817ed85bb2089 100644 --- a/articles/azure-vmware/tutorial-create-private-cloud.md +++ b/articles/azure-vmware/tutorial-create-private-cloud.md @@ -2,6 +2,7 @@ title: Tutorial - Deploy an Azure VMware Solution private cloud description: Learn how to create and deploy an Azure VMware Solution private cloud ms.topic: tutorial +ms.service: azure-vmware ms.date: 09/29/2021 --- diff --git a/articles/azure-vmware/tutorial-delete-private-cloud.md b/articles/azure-vmware/tutorial-delete-private-cloud.md index 05001138ecba5..27a71014c2673 100644 --- a/articles/azure-vmware/tutorial-delete-private-cloud.md +++ b/articles/azure-vmware/tutorial-delete-private-cloud.md @@ -2,6 +2,7 @@ title: Tutorial - Delete an Azure VMware Solution private cloud description: Learn how to delete an Azure VMware Solution private cloud that you no longer need. ms.topic: tutorial +ms.service: azure-vmware ms.date: 03/13/2021 --- diff --git a/articles/azure-vmware/tutorial-expressroute-global-reach-private-cloud.md b/articles/azure-vmware/tutorial-expressroute-global-reach-private-cloud.md index b3bb060324f58..b7cf0158dd27d 100644 --- a/articles/azure-vmware/tutorial-expressroute-global-reach-private-cloud.md +++ b/articles/azure-vmware/tutorial-expressroute-global-reach-private-cloud.md @@ -3,6 +3,7 @@ title: Peer on-premises environments to Azure VMware Solution description: Learn how to create ExpressRoute Global Reach peering to a private cloud in Azure VMware Solution. ms.topic: tutorial ms.custom: contperf-fy21q4, contperf-fy22q1 +ms.service: azure-vmware ms.date: 07/28/2021 --- diff --git a/articles/azure-vmware/tutorial-network-checklist.md b/articles/azure-vmware/tutorial-network-checklist.md index 088d25465d812..025f3c854d031 100644 --- a/articles/azure-vmware/tutorial-network-checklist.md +++ b/articles/azure-vmware/tutorial-network-checklist.md @@ -2,6 +2,7 @@ title: Tutorial - Network planning checklist description: Learn about the network requirements for network connectivity and network ports on Azure VMware Solution. ms.topic: tutorial +ms.service: azure-vmware ms.date: 07/01/2021 --- diff --git a/articles/azure-vmware/tutorial-nsx-t-network-segment.md b/articles/azure-vmware/tutorial-nsx-t-network-segment.md index 70891d82a3052..0c325ca9ee2a4 100644 --- a/articles/azure-vmware/tutorial-nsx-t-network-segment.md +++ b/articles/azure-vmware/tutorial-nsx-t-network-segment.md @@ -3,6 +3,7 @@ title: Tutorial - Add a network segment in Azure VMware Solution description: Learn how to add a network segment to use for virtual machines (VMs) in vCenter Server. ms.topic: tutorial ms.custom: contperf-fy22q1 +ms.service: azure-vmware ms.date: 07/16/2021 --- diff --git a/articles/azure-vmware/tutorial-scale-private-cloud.md b/articles/azure-vmware/tutorial-scale-private-cloud.md index 0a1c9833802ee..85c1d26e0eeaf 100644 --- a/articles/azure-vmware/tutorial-scale-private-cloud.md +++ b/articles/azure-vmware/tutorial-scale-private-cloud.md @@ -2,6 +2,7 @@ title: Tutorial - Scale clusters in a private cloud description: In this tutorial, you use the Azure portal to scale an Azure VMware Solution private cloud. ms.topic: tutorial +ms.service: azure-vmware ms.date: 08/03/2021 #Customer intent: As a VMware administrator, I want to learn how to scale an Azure VMware Solution private cloud in the Azure portal. diff --git a/articles/azure-vmware/vmware-hcx-mon-guidance.md b/articles/azure-vmware/vmware-hcx-mon-guidance.md index 80cb03b2398fe..bf109864549bb 100644 --- a/articles/azure-vmware/vmware-hcx-mon-guidance.md +++ b/articles/azure-vmware/vmware-hcx-mon-guidance.md @@ -2,6 +2,7 @@ title: VMware HCX Mobility Optimized Networking (MON) guidance description: Learn about Azure VMware Solution-specific use cases for Mobility Optimized Networking (MON). ms.topic: reference +ms.service: azure-vmware ms.date: 04/11/2022 --- diff --git a/articles/azure-vmware/vrealize-operations-for-azure-vmware-solution.md b/articles/azure-vmware/vrealize-operations-for-azure-vmware-solution.md index 7589d28aa87db..51622dec4c8d1 100644 --- a/articles/azure-vmware/vrealize-operations-for-azure-vmware-solution.md +++ b/articles/azure-vmware/vrealize-operations-for-azure-vmware-solution.md @@ -2,13 +2,14 @@ title: Configure vRealize Operations for Azure VMware Solution description: Learn how to set up vRealize Operations for your Azure VMware Solution private cloud. ms.topic: how-to -ms.date: 01/26/2021 +ms.service: azure-vmware +ms.date: 04/11/2022 --- # Configure vRealize Operations for Azure VMware Solution -vRealize Operations Manager is an operations management platform that allows VMware infrastructure administrators to monitor system resources. These system resources could be application-level or infrastructure level (both physical and virtual) objects. Most VMware administrators have used vRealize Operations to monitor and manage the VMware private cloud components – vCenter, ESXi, NSX-T, vSAN, and VMware HCX. Each provisioned Azure VMware Solution private cloud includes a dedicated vCenter, NSX-T, vSAN, and HCX deployment. +vRealize Operations is an operations management platform that allows VMware infrastructure administrators to monitor system resources. These system resources could be application-level or infrastructure level (both physical and virtual) objects. Most VMware administrators have used vRealize Operations to monitor and manage the VMware private cloud components – vCenter Server, ESXi, NSX-T Data Center, vSAN, and VMware HCX. Each provisioned Azure VMware Solution private cloud includes a dedicated vCenter Server, NSX-T Data Center, vSAN, and HCX deployment. Thoroughly review [Before you begin](#before-you-begin) and [Prerequisites](#prerequisites) first. Then, we'll walk you through the two typical deployment topologies: @@ -30,11 +31,11 @@ Thoroughly review [Before you begin](#before-you-begin) and [Prerequisites](#pre ## On-premises vRealize Operations managing Azure VMware Solution deployment -Most customers have an existing on-premise deployment of vRealize Operations to manage one or more on-premise vCenters domains. When they provision an Azure VMware Solution private cloud, they connect their on-premises environment with their private cloud using an Azure ExpressRoute or a Layer 3 VPN solution. +Most customers have an existing on-premise deployment of vRealize Operations to manage one or more on-premises vCenter Server domains. When they provision an Azure VMware Solution private cloud, they connect their on-premises environment with their private cloud using an Azure ExpressRoute or a Layer 3 VPN solution. :::image type="content" source="media/vrealize-operations-manager/vrealize-operations-deployment-option-1.png" alt-text="Diagram showing the on-premises vRealize Operations managing Azure VMware Solution deployment." border="false"::: -To extend the vRealize Operations capabilities to the Azure VMware Solution private cloud, you create an adapter [instance for the private cloud resources](https://docs.vmware.com/en/vRealize-Operations-Manager/8.1/com.vmware.vcom.config.doc/GUID-640AD750-301E-4D36-8293-1BFEB67E2600.html). It collects data from the Azure VMware Solution private cloud and brings it into on-premises vRealize Operations. The on-premises vRealize Operations Manager instance can directly connect to the vCenter and NSX-T manager on Azure VMware Solution. Optionally, you can deploy a vRealize Operations Remote Collector on the Azure VMware Solution private cloud. The collector compresses and encrypts the data collected from the private cloud before it's sent over the ExpressRoute or VPN network to the vRealize Operations Manager running on-premise. +To extend the vRealize Operations capabilities to the Azure VMware Solution private cloud, you create an adapter [instance for the private cloud resources](https://docs.vmware.com/en/vRealize-Operations-Manager/8.1/com.vmware.vcom.config.doc/GUID-640AD750-301E-4D36-8293-1BFEB67E2600.html). It collects data from the Azure VMware Solution private cloud and brings it into on-premises vRealize Operations. The on-premises vRealize Operations Manager instance can directly connect to the vCenter Server and NSX-T Manager on Azure VMware Solution. Optionally, you can deploy a vRealize Operations Remote Collector on the Azure VMware Solution private cloud. The collector compresses and encrypts the data collected from the private cloud before it's sent over the ExpressRoute or VPN network to the vRealize Operations Manager running on-premise. > [!TIP] > Refer to the [VMware documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.1/com.vmware.vcom.vapp.doc/GUID-7FFC61A0-7562-465C-A0DC-46D092533984.html) for step-by-step guide for installing vRealize Operations Manager. @@ -50,13 +51,13 @@ Another option is to deploy an instance of vRealize Operations Manager on a vSph :::image type="content" source="media/vrealize-operations-manager/vrealize-operations-deployment-option-2.png" alt-text="Diagram showing the vRealize Operations running on Azure VMware Solution." border="false"::: -Once the instance has been deployed, you can configure vRealize Operations to collect data from vCenter, ESXi, NSX-T, vSAN, and HCX. +Once the instance has been deployed, you can configure vRealize Operations to collect data from vCenter Server, ESXi, NSX-T Data Center, vSAN, and HCX. ## Known limitations -- The **cloudadmin\@vsphere.local** user in Azure VMware Solution has [limited privileges](concepts-identity.md). Virtual machines (VMs) on Azure VMware Solution doesn't support in-guest memory collection using VMware tools. Active and consumed memory utilization continues to work in this case. +- The **cloudadmin@vsphere.local** user in Azure VMware Solution has [limited privileges](concepts-identity.md). Virtual machines (VMs) on Azure VMware Solution doesn't support in-guest memory collection using VMware tools. Active and consumed memory utilization continues to work in this case. - Workload optimization for host-based business intent doesn't work because Azure VMware Solutions manage cluster configurations, including DRS settings. - Workload optimization for the cross-cluster placement within the SDDC using the cluster-based business intent is fully supported with vRealize Operations Manager 8.0 and onwards. However, workload optimization isn't aware of resource pools and places the VMs at the cluster level. A user can manually correct it in the Azure VMware Solution vCenter Server interface. - You can't sign in to vRealize Operations Manager using your Azure VMware Solution vCenter Server credentials. @@ -66,11 +67,11 @@ When you connect the Azure VMware Solution vCenter to vRealize Operations Manage :::image type="content" source="./media/vrealize-operations-manager/warning-adapter-instance-creation-succeeded.png" alt-text="Screenshot showing a Warning message that states the adapter instance was created successfully."::: -The warning occurs because the **cloudadmin\@vsphere.local** user in Azure VMware Solution doesn't have sufficient privileges to do all vCenter Server actions required for registration. However, the privileges are sufficient for the adapter instance to do data collection, as seen below: +The warning occurs because the **cloudadmin@vsphere.local** user in Azure VMware Solution doesn't have sufficient privileges to do all vCenter Server actions required for registration. However, the privileges are sufficient for the adapter instance to do data collection, as seen below: :::image type="content" source="./media/vrealize-operations-manager/adapter-instance-to-perform-data-collection.png" alt-text="Screenshot showing the adapter instance to collect data."::: -For more information, see [Privileges Required for Configuring a vCenter Adapter Instance](https://docs.vmware.com/en/vRealize-Operations-Manager/8.1/com.vmware.vcom.core.doc/GUID-3BFFC92A-9902-4CF2-945E-EA453733B426.html). +For more information, see [Privileges Required for Configuring a vCenter Server Adapter Instance](https://docs.vmware.com/en/vRealize-Operations-Manager/8.1/com.vmware.vcom.core.doc/GUID-3BFFC92A-9902-4CF2-945E-EA453733B426.html). diff --git a/articles/azure-web-pubsub/concept-metrics.md b/articles/azure-web-pubsub/concept-metrics.md index 34fe90dcb99ce..e41f171384da3 100644 --- a/articles/azure-web-pubsub/concept-metrics.md +++ b/articles/azure-web-pubsub/concept-metrics.md @@ -23,6 +23,7 @@ Metrics provide the running info of the service. The available metrics are: |Connection Quota Utilization|Percent|Max / Avg|The percentage of connection connected relative to connection quota.|No Dimensions| |Inbound Traffic|Bytes|Sum|The inbound traffic of service|No Dimensions| |Outbound Traffic|Bytes|Sum|The outbound traffic of service|No Dimensions| +|Server Load|Percent|Max / Avg|The percentage of server load|No Dimensions| ### Understand Dimensions diff --git a/articles/azure-web-pubsub/concept-performance.md b/articles/azure-web-pubsub/concept-performance.md index 0cb029a78da8a..67c63503bcc82 100644 --- a/articles/azure-web-pubsub/concept-performance.md +++ b/articles/azure-web-pubsub/concept-performance.md @@ -14,6 +14,18 @@ One of the key benefits of using Azure Web PubSub Service is the ease of scaling In this guide, we'll introduce the factors that affect Web PubSub upstream application performance. We'll describe typical performance in different use-case scenarios. +## Quick evaluation using metrics + Before going through the factors that impact the performance, let's first introduce an easy way to monitor the pressure of your service. There's a metrics called **Server Load** on the Portal. + + ![Screenshot of the Server Load metric of Azure Web PubSub on Portal. The metrics shows Server Load is at about 8 percent usage. ](./media/concept-performance/server-load.png "Server Load") + + + It shows the computing pressure of your Azure Web PubSub service. You could test on your own scenario and check this metrics to decide whether to scale up. The latency inside Azure Web PubSub service would remain low if the Server Load is below 70%. + +> [!NOTE] +> If you are using unit 50 or unit 100 **and** your scenario is mainly sending to small groups (group size <100), you need to check [sending to small group](#small-group) for reference. In those scenarios there is large routing cost which is not included in the Server Load. + + Below are detailed concepts for evaluating performance. ## Term definitions *Inbound*: The incoming message to Azure Web PubSub Service. @@ -183,4 +195,4 @@ The bandwidth limit is the same as that for **send to big group**. ## Next steps -[!INCLUDE [next step](includes/include-next-step.md)] \ No newline at end of file +[!INCLUDE [next step](includes/include-next-step.md)] diff --git a/articles/azure-web-pubsub/includes/cli-awps-creation.md b/articles/azure-web-pubsub/includes/cli-awps-creation.md index 34d3a9a435fa3..d199a1c49b7e4 100644 --- a/articles/azure-web-pubsub/includes/cli-awps-creation.md +++ b/articles/azure-web-pubsub/includes/cli-awps-creation.md @@ -6,17 +6,17 @@ ms.date: 08/06/2021 ms.author: lianwei --- -Use the Azure CLI [az webpubsub create](/cli/azure/webpubsub#az-webpubsub-create) command to create a Web PubSub in the resource group from the previous step, providing the following information: +Run [az extension add](/cli/azure/extension#az-extension-add) to install or upgrade the *webpubsub* extension to the current version. -- Resource name: A string of 3 to 24 characters that can contain only numbers (0-9), letters (a-z, A-Z), and hyphens (-) +```azurecli-interactive +az extension add --upgrade --name webpubsub +``` + +Use the Azure CLI [az webpubsub create](/cli/azure/webpubsub#az-webpubsub-create) command to create a Web PubSub in the resource group you've created. The following command creates a _Free_ Web PubSub resource under resource group _myResourceGroup_ in _EastUS_: > [!Important] > Each Web PubSub resource must have a unique name. Replace <your-unique-resource-name> with the name of your Web PubSub in the following examples. -- Resource group name: **myResourceGroup**. -- The location: **EastUS**. -- Sku: **Free_F1** - ```azurecli-interactive az webpubsub create --name "" --resource-group "myResourceGroup" --location "EastUS" --sku Free_F1 ``` diff --git a/articles/azure-web-pubsub/includes/cli-delete-resources.md b/articles/azure-web-pubsub/includes/cli-delete-resources.md index 6e5a829970a13..aefcfccee3d36 100644 --- a/articles/azure-web-pubsub/includes/cli-delete-resources.md +++ b/articles/azure-web-pubsub/includes/cli-delete-resources.md @@ -6,7 +6,7 @@ ms.date: 08/06/2021 ms.author: lianwei --- -Other quickstarts and tutorials in this collection build upon this quickstart. If you plan to continue on to work with subsequent quickstarts and tutorials, you may wish to leave these resources in place. +If you plan to continue on to work with subsequent quickstarts and tutorials, you may wish to leave these resources in place. When no longer needed, you can use the Azure CLI [az group delete](/cli/azure/group) command to remove the resource group and all related resources: diff --git a/articles/azure-web-pubsub/index.yml b/articles/azure-web-pubsub/index.yml index 3b0b4b5c924fc..b604a4a0ce3ee 100644 --- a/articles/azure-web-pubsub/index.yml +++ b/articles/azure-web-pubsub/index.yml @@ -10,7 +10,7 @@ metadata: ms.topic: landing-page author: yjin81 ms.author: yajin1 - ms.date: 01/27/2021 + ms.date: 05/22/2022 # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new @@ -24,33 +24,41 @@ landingContent: links: - text: What is Azure Web PubSub service? url: overview.md - - text: Key concepts in Azure Web PubSub service + - linkListType: concept + links: + - text: About hubs, groups, and connections url: key-concepts.md + - text: Client protocols + url: concept-client-protocols.md + - text: Billing model + url: concept-billing-model.md + - text: Event handler + url: howto-develop-eventhandler.md # Card - title: Get started linkLists: - linkListType: quickstart links: - - text: Connect to the instance - url: quickstart-cli-try.md - - text: Start with a simple chatroom serverless application - url: quickstart-serverless.md - - text: Serverless Development with Azure Functions - url: tutorial-serverless-notification.md - - linkListType: how-to-guide + - text: Create from Azure portal + url: howto-develop-create-instance.md + - text: Create from Azure CLI + url: quickstart-cli-create.md + - linkListType: tutorial links: - text: Build a chat app url: tutorial-build-chat.md - - text: Client streaming using service-supported subprotocol + - text: Build a notification app with Azure Functions + url: tutorial-serverless-notification.md + - text: Build a simple chatroom serverless application + url: quickstart-serverless.md + - text: Client streaming using subprotocol url: tutorial-subprotocol.md - - text: Authentication and permissions - url: tutorial-permission.md - + # Card - title: Live Demos linkLists: - - linkListType: quickstart + - linkListType: sample links: - text: Whiteboard url: https://azure.github.io/azure-webpubsub/demos/whiteboard @@ -58,4 +66,55 @@ landingContent: url: https://azure.github.io/azure-webpubsub/demos/chat - text: Code streaming url: https://azure.github.io/azure-webpubsub/demos/code-streaming - \ No newline at end of file + - text: Score board + url: https://azure.github.io/azure-webpubsub/demos/scoreboard + + # Card + - title: Common tasks + linkLists: + - linkListType: tutorial + links: + - text: Authentication and permissions + url: tutorial-permission.md + - linkListType: how-to-guide + links: + - text: Use private endpoints + url: howto-secure-private-endpoints.md + - text: Secure network access + url: howto-secure-network-access-control.md + - text: Audit compliance with Azure Policy + url: howto-monitor-azure-policy.md + + # Card + - title: Advanced usage + linkLists: + - linkListType: concept + links: + - text: High availability + url: concept-disaster-recovery.md + - text: Performance consideration + url: concept-performance.md + - linkListType: how-to-guide + links: + - text: Create reliable Websocket clients + url: howto-develop-reliable-clients.md + - text: Troubleshoot with resource logs + url: howto-troubleshoot-resource-logs.md + - text: Debug event handler locally + url: howto-local-debug-event-handler.md + + # Card + - title: More resources + linkLists: + - linkListType: concept + links: + - text: Choose between Azure Web PubSub service and Azure SignalR service + url: https://docs.microsoft.com/azure/azure-web-pubsub/resource-faq#how-do-i-choose-between-azure-signalr-service-and-azure-web-pubsub-service + - linkListType: reference + links: + - text: REST API + url: https://docs.microsoft.com/rest/api/webpubsub/ + - text: Azure CLI + url: https://docs.microsoft.com/cli/azure/service-page/azure%20web%20pubsub?view=azure-cli-latest + - text: Azure Functions bindings + url: reference-functions-bindings.md diff --git a/articles/azure-web-pubsub/media/concept-performance/server-load.png b/articles/azure-web-pubsub/media/concept-performance/server-load.png new file mode 100644 index 0000000000000..dab23ee300f19 Binary files /dev/null and b/articles/azure-web-pubsub/media/concept-performance/server-load.png differ diff --git a/articles/azure-web-pubsub/media/tutorial-serverless-iot/iot-devices-sample.png b/articles/azure-web-pubsub/media/tutorial-serverless-iot/iot-devices-sample.png new file mode 100644 index 0000000000000..e9ff20bd3ddea Binary files /dev/null and b/articles/azure-web-pubsub/media/tutorial-serverless-iot/iot-devices-sample.png differ diff --git a/articles/azure-web-pubsub/media/tutorial-serverless-static-web-app/tutorial-serverless-static-web-app.png b/articles/azure-web-pubsub/media/tutorial-serverless-static-web-app/tutorial-serverless-static-web-app.png new file mode 100644 index 0000000000000..67ac8e88d07f4 Binary files /dev/null and b/articles/azure-web-pubsub/media/tutorial-serverless-static-web-app/tutorial-serverless-static-web-app.png differ diff --git a/articles/azure-web-pubsub/overview.md b/articles/azure-web-pubsub/overview.md index cc938f3d427b5..32785387a5818 100644 --- a/articles/azure-web-pubsub/overview.md +++ b/articles/azure-web-pubsub/overview.md @@ -8,7 +8,7 @@ ms.topic: overview ms.date: 11/08/2021 --- -# What is Azure Web PubSub service? +# What is Azure Web PubSub service? The Azure Web PubSub Service helps you build real-time messaging web applications using WebSockets and the publish-subscribe pattern easily. This real-time functionality allows publishing content updates between server and connected clients (for example a single page web application or mobile application). The clients do not need to poll the latest updates, or submit new HTTP requests for updates. @@ -59,6 +59,14 @@ There are many different ways to program with Azure Web PubSub service, as some - **Use provided SDKs to manage the WebSocket connections in self-host app servers** - Azure Web PubSub service provides SDKs in C#, JavaScript, Java and Python to manage the WebSocket connections easily, including broadcast messages to the connections, add connections to some groups, or close the connections, etc. - **Send messages from server to clients via REST API** - Azure Web PubSub service provides REST API to enable applications to post messages to clients connected, in any REST capable programming languages. +## Quick start + +> [!div class="nextstepaction"] +> [Play with chat demo](https://azure.github.io/azure-webpubsub/demos/chat) + +> [!div class="nextstepaction"] +> [Build a chat app](tutorial-build-chat.md) + ## Next steps [!INCLUDE [next step](includes/include-next-step.md)] diff --git a/articles/azure-web-pubsub/quickstart-use-sdk.md b/articles/azure-web-pubsub/quickstart-use-sdk.md index 47c44c1eb2663..7a161c63032af 100644 --- a/articles/azure-web-pubsub/quickstart-use-sdk.md +++ b/articles/azure-web-pubsub/quickstart-use-sdk.md @@ -139,7 +139,7 @@ Now let's use Azure Web PubSub SDK to publish a message to the connected client. console.log('Usage: node publish '); return 1; } - const hub = "pubsub"; + const hub = "myHub1"; let service = new WebPubSubServiceClient(process.env.WebPubSubConnectionString, hub); // by default it uses `application/json`, specify contentType as `text/plain` if you want plain-text service.sendToAll(process.argv[2], { contentType: "text/plain" }); diff --git a/articles/azure-web-pubsub/reference-rest-api-data-plane.md b/articles/azure-web-pubsub/reference-rest-api-data-plane.md new file mode 100644 index 0000000000000..f9fc5b7a0254d --- /dev/null +++ b/articles/azure-web-pubsub/reference-rest-api-data-plane.md @@ -0,0 +1,65 @@ +--- +title: Azure Web PubSub service data plane REST API reference overview +description: Describes the REST APIs Azure Web PubSub supports to manage the WebSocket connections and send messages to them. +author: vicancy +ms.author: lianwei +ms.service: azure-web-pubsub +ms.topic: reference +ms.date: 06/09/2022 +--- + +# Azure Web PubSub service data plane REST API reference + +![Diagram showing the Web PubSub service workflow.](./media/concept-service-internals/workflow.png) + +As illustrated by the above workflow graph, and also detailed workflow described in [internals](./concept-service-internals.md), your app server can send messages to clients or to manage the connected clients using REST APIs exposed by Web PubSub service. This article describes the REST APIs in detail. + +## Using REST API + +### Authenticate via Azure Web PubSub Service AccessKey + +In each HTTP request, an authorization header with a [JSON Web Token (JWT)](https://en.wikipedia.org/wiki/JSON_Web_Token) is required to authenticate with Azure Web PubSub Service. + + +#### Signing Algorithm and Signature + +`HS256`, namely HMAC-SHA256, is used as the signing algorithm. + +You should use the `AccessKey` in Azure Web PubSub Service instance's connection string to sign the generated JWT token. + +#### Claims + +Below claims are required to be included in the JWT token. + +Claim Type | Is Required | Description +---|---|--- +`aud` | true | Should be the **SAME** as your HTTP request url, trailing slash and query parameters not included. For example, a broadcast request's audience looks like: `https://example.webpubsub.azure.com/api/hubs/myhub`. +`exp` | true | Epoch time when this token will be expired. + +A pseudo code in JS: +```js +const bearerToken = jwt.sign({}, connectionString.accessKey, { + audience: request.url, + expiresIn: "1h", + algorithm: "HS256", + }); +``` + +### Authenticate via Azure Active Directory Token (Azure AD Token) + +Like using `AccessKey`, a [JSON Web Token (JWT)](https://en.wikipedia.org/wiki/JSON_Web_Token) is also required to authenticate the HTTP request. + +**The difference is**, in this scenario, JWT Token is generated by Azure Active Directory. + +[Learn how to generate Azure AD Tokens](/azure/active-directory/develop/reference-v2-libraries) + +You could also use **Role Based Access Control (RBAC)** to authorize the request from your server to Azure Web PubSub Service. + +[Learn how to configure Role Based Access Control roles for your resource](./howto-authorize-from-application.md#add-role-assignments-on-azure-portal) + +## APIs + +| Operation Group | Description | +|-----------------|-------------| +|[Service Status](/rest/api/webpubsub/dataplane/health-api)| Provides operations to check the service status | +|[Hub Operations](/rest/api/webpubsub/dataplane/web-pub-sub)| Provides operations to manage the connections and send messages to them. | \ No newline at end of file diff --git a/articles/azure-web-pubsub/reference-server-sdk-js.md b/articles/azure-web-pubsub/reference-server-sdk-js.md index 149cdc66ad57e..2375af7526176 100644 --- a/articles/azure-web-pubsub/reference-server-sdk-js.md +++ b/articles/azure-web-pubsub/reference-server-sdk-js.md @@ -222,7 +222,7 @@ When a WebSocket connection connects, the Web PubSub service transforms the conn [Source code](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/web-pubsub/web-pubsub-express) | [Package (NPM)](https://www.npmjs.com/package/@azure/web-pubsub-express) | -[API reference documentation](/javascript/api/overview/azure/web-pubsub-express-readme?view=azure-node-latest) | +[API reference documentation](/javascript/api/overview/azure/web-pubsub-express-readme?view=azure-node-latest&preserve-view=true) | [Product documentation](./index.yml) | [Samples][samples_ref] diff --git a/articles/azure-web-pubsub/reference-server-sdk-python.md b/articles/azure-web-pubsub/reference-server-sdk-python.md index 28d784b0aea92..e3b027b84e24a 100644 --- a/articles/azure-web-pubsub/reference-server-sdk-python.md +++ b/articles/azure-web-pubsub/reference-server-sdk-python.md @@ -1,18 +1,19 @@ --- title: Reference - Python server SDK for Azure Web PubSub -description: This reference describes the Python server SDK for the Azure Web PubSub service. +description: Learn about the Python server SDK for the Azure Web PubSub service. You can use this library in your app server to manage the WebSocket client connections. author: vicancy ms.author: lianwei ms.service: azure-web-pubsub -ms.topic: conceptual -ms.date: 11/08/2021 +ms.topic: how-to +ms.custom: kr2b-contr-experiment +ms.date: 05/23/2022 --- # Azure Web PubSub service client library for Python [Azure Web PubSub Service](./index.yml) is an Azure-managed service that helps developers easily build web applications with real-time features and publish-subscribe pattern. Any scenario that requires real-time publish-subscribe messaging between server and clients or among clients can use Azure Web PubSub service. Traditional real-time features that often require polling from server or submitting HTTP requests can also use Azure Web PubSub service. -You can use this library in your app server side to manage the WebSocket client connections, as shown in below diagram: +You can use this library in your app server side to manage the WebSocket client connections, as shown in following diagram: ![The overflow diagram shows the overflow of using the service client library.](media/sdk-reference/service-client-overflow.png) @@ -21,31 +22,29 @@ Use this library to: - Send messages to hubs and groups. - Send messages to particular users and connections. - Organize users and connections into groups. -- Close connections -- Grant, revoke, and check permissions for an existing connection +- Close connections. +- Grant, revoke, and check permissions for an existing connection. -[Source code](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/webpubsub/azure-messaging-webpubsubservice) | [Package (Pypi)][package] | [API reference documentation](/python/api/overview/azure/messaging-webpubsubservice-readme) | [Product documentation][webpubsubservice_docs] +## Prerequisites -> [!IMPORTANT] -> Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691. +- Python 3.6 or later is required to use this package. +- You need an [Azure subscription][azure_sub] and an [Azure WebPubSub service instance][webpubsubservice_docs] to use this package. +- An existing Azure Web PubSub service instance. -## Getting started +> [!IMPORTANT] +> Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information, see [Azure SDK Python packages support](https://github.com/Azure/azure-sdk-for-python/issues/20691). -### Prerequisites +## Install the package -- Python 2.7, or 3.6 or later is required to use this package. -- You need an [Azure subscription][azure_sub] and a [Azure WebPubSub service instance][webpubsubservice_docs] to use this package. -- An existing Azure Web PubSub service instance. - -### 1. Install the package +Use this command to install the package: ```bash python -m pip install azure-messaging-webpubsubservice ``` -### 2. Create and authenticate a WebPubSubServiceClient +## Create and authenticate a WebPubSubServiceClient -You can authenticate the `WebPubSubServiceClient` using [connection string][connection_string]: +You can authenticate the `WebPubSubServiceClient` using a [connection string][connection_string]: ```python >>> from azure.messaging.webpubsubservice import WebPubSubServiceClient @@ -53,7 +52,7 @@ You can authenticate the `WebPubSubServiceClient` using [connection string][conn >>> service = WebPubSubServiceClient.from_connection_string(connection_string='', hub='hub') ``` -Or using the service endpoint and the access key: +Or use the service endpoint and the access key: ```python >>> from azure.messaging.webpubsubservice import WebPubSubServiceClient @@ -62,11 +61,11 @@ Or using the service endpoint and the access key: >>> service = WebPubSubServiceClient(endpoint='', hub='hub', credential=AzureKeyCredential("")) ``` -Or using [Azure Active Directory][aad_doc]: +Or use [Azure Active Directory][aad_doc] (Azure AD): -1. [pip][pip] install [`azure-identity`][azure_identity_pip] -2. Follow the document to [enable AAD authentication on your Webpubsub resource][aad_doc] -3. Update code to use [DefaultAzureCredential][default_azure_credential] +1. [pip][pip] install [`azure-identity`][azure_identity_pip]. +2. [Enable Azure AD authentication on your Webpubsub resource][aad_doc]. +3. Update code to use [DefaultAzureCredential][default_azure_credential]. ```python >>> from azure.messaging.webpubsubservice import WebPubSubServiceClient @@ -88,7 +87,7 @@ Or using [Azure Active Directory][aad_doc]: }) ``` -The WebSocket client will receive JSON serialized text: `{"from": "user1", "data": "Hello world"}`. +The WebSocket client receives JSON serialized text: `{"from": "user1", "data": "Hello world"}`. ### Broadcast messages in plain-text format @@ -98,7 +97,7 @@ The WebSocket client will receive JSON serialized text: `{"from": "user1", "data >>> service.send_to_all(message = 'Hello world', content_type='text/plain') ``` -The WebSocket client will receive text: `Hello world`. +The WebSocket client receives text: `Hello world`. ### Broadcast messages in binary format @@ -109,14 +108,12 @@ The WebSocket client will receive text: `Hello world`. >>> service.send_to_all(message=io.StringIO('Hello World'), content_type='application/octet-stream') ``` -The WebSocket client will receive binary text: `b'Hello world'`. - -## Troubleshooting +The WebSocket client receives binary text: `b'Hello world'`. -### Logging +## Logging This SDK uses Python standard logging library. -You can configure logging print out debugging information to the stdout or anywhere you want. +You can configure logging to print debugging information to the `stdout` or anywhere you want. ```python import sys @@ -139,35 +136,30 @@ credential = DefaultAzureCredential() service = WebPubSubServiceClient(endpoint=endpoint, hub='hub', credential=credential, logging_enable=True) ``` -Similarly, `logging_enable` can enable detailed logging for a single call, -even when it isn't enabled for the WebPubSubServiceClient: +Similarly, `logging_enable` can enable detailed logging for a single call, even when it isn't enabled for the `WebPubSubServiceClient`: ```python result = service.send_to_all(..., logging_enable=True) ``` -Http request and response details are printed to stdout with this logging config. +HTTP request and response details are printed to `stdout` with this logging configuration. ## Next steps -Check [more samples here][samples]. +- [Source code](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/webpubsub/azure-messaging-webpubsubservice) +- [Package (Pypi)][package] +- [API reference documentation](/python/api/overview/azure/messaging-webpubsubservice-readme) +- [Product documentation][webpubsubservice_docs] + +For more samples, see [Azure Web PubSub service client library for Python Samples][samples]. ## Contributing -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For more information, see [Contributor License Agreement](https://cla.microsoft.com). -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. +When you submit a pull request, a CLA-bot automatically determines whether you need to provide a CLA and decorate the PR appropriately, for example, "label", "comment". Follow the instructions provided by the bot. You only need to do this action once across all repos using our CLA. -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. +This project has adopted the Microsoft Open Source Code of Conduct. For more information, see [Code of Conduct][code_of_conduct] FAQ or contact [Open Source Conduct Team](mailto:opencode@microsoft.com) with questions or comments. [webpubsubservice_docs]: ./index.yml diff --git a/articles/azure-web-pubsub/toc.yml b/articles/azure-web-pubsub/toc.yml index a53febf4a2162..274cac21df35c 100644 --- a/articles/azure-web-pubsub/toc.yml +++ b/articles/azure-web-pubsub/toc.yml @@ -33,10 +33,14 @@ - name: Serverless development expanded: true items: - - name: Build a notification app with Azure Functions + - name: Build a notification app by using Azure Functions href: tutorial-serverless-notification.md - name: Build a real-time chat app with client authentication href: quickstart-serverless.md + - name: Build a real-time chat app by using an Azure Static Web App + href: tutorial-serverless-static-web-app.md + - name: Visualize IoT data by using an Azure Function + href: tutorial-serverless-iot.md - name: Publish and subscribe messages href: tutorial-pub-sub-messages.md - name: Build a chat app @@ -138,8 +142,10 @@ href: reference-cloud-events.md - name: Functions trigger and bindings href: reference-functions-bindings.md - - name: Server SDKs + - name: Data plane REST API and SDKs items: + - name: REST API + href: reference-rest-api-data-plane.md - name: Server SDK - C# href: reference-server-sdk-csharp.md - name: Server SDK - Java @@ -148,7 +154,7 @@ href: reference-server-sdk-js.md - name: Server SDK - Python href: reference-server-sdk-python.md - - name: REST API + - name: Control plane href: /rest/api/webpubsub/ - name: Resources diff --git a/articles/azure-web-pubsub/tutorial-serverless-iot.md b/articles/azure-web-pubsub/tutorial-serverless-iot.md new file mode 100644 index 0000000000000..7b060fdcfe2e1 --- /dev/null +++ b/articles/azure-web-pubsub/tutorial-serverless-iot.md @@ -0,0 +1,484 @@ +--- +title: Tutorial - Visualize IoT device data from IoT Hub using Azure Web PubSub service and Azure Functions +description: A tutorial to walk through how to use Azure Web PubSub service and Azure Functions to monitor device data from IoT Hub. +author: vicancy +ms.author: lianwei +ms.service: azure-web-pubsub +ms.topic: tutorial +ms.date: 06/01/2022 +--- + +# Tutorial: Visualize IoT device data from IoT Hub using Azure Web PubSub service and Azure Functions + +In this tutorial, you learn how to use Azure Web PubSub service and Azure Functions to build a serverless application with real-time data visualization from IoT Hub. + +In this tutorial, you learn how to: + +> [!div class="checklist"] +> * Build a serverless data visualization app +> * Work together with Web PubSub function input and output bindings and Azure IoT hub +> * Run the sample functions locally + +## Prerequisites + +# [JavaScript](#tab/javascript) + +* A code editor, such as [Visual Studio Code](https://code.visualstudio.com/) + +* [Node.js](https://nodejs.org/en/download/), version 10.x. + > [!NOTE] + > For more information about the supported versions of Node.js, see [Azure Functions runtime versions documentation](../azure-functions/functions-versions.md#languages). + +* [Azure Functions Core Tools](https://github.com/Azure/azure-functions-core-tools#installing) (v3 or higher preferred) to run Azure Function apps locally and deploy to Azure. + +* The [Azure CLI](/cli/azure) to manage Azure resources. + +--- + +[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] + +[!INCLUDE [iot-hub-include-create-hub](../../includes/iot-hub-include-create-hub-quickstart.md)] + +## Create a Web PubSub instance +If you already have a Web PubSub instance in your Azure subscription, you can skip this section. + +[!INCLUDE [create-instance-cli](includes/cli-awps-creation.md)] + + +## Create and run the functions locally + +1. Make sure you have [Azure Functions Core Tools](https://github.com/Azure/azure-functions-core-tools#installing) installed. And then create an empty directory for the project. Run command under this working directory. + + # [JavaScript](#tab/javascript) + ```bash + func init --worker-runtime javascript + ``` + --- + +2. Update `host.json`'s `extensionBundle` to version larger than _3.3.0_ which contains Web PubSub support. + +```json +{ + "version": "2.0", + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[3.3.*, 4.0.0)" + } +} +``` + +3. Create an `index` function to read and host a static web page for clients. + ```bash + func new -n index -t HttpTrigger + ``` + # [JavaScript](#tab/javascript) + - Update `index/index.js` with following code that serve the html content as a static site. + ```js + var fs = require("fs"); + var path = require("path"); + + module.exports = function (context, req) { + let index = path.join( + context.executionContext.functionDirectory, + "index.html" + ); + fs.readFile(index, "utf8", function (err, data) { + if (err) { + console.log(err); + context.done(err); + return; + } + context.res = { + status: 200, + headers: { + "Content-Type": "text/html", + }, + body: data, + }; + context.done(); + }); + }; + + ``` + +4. Create this _index.html_ file under the same folder as file _index.js_: + + ```html + + + + + + + + + + + + + Temperature Real-time Data + + + +

                  + Temperature Real-time Data + 0 devices +

                  +
                  + +
                  + + + + ``` + +5. Create a `negotiate` function to help clients get service connection url with access token. + ```bash + func new -n negotiate -t HttpTrigger + ``` + # [JavaScript](#tab/javascript) + - Update `negotiate/function.json` to include input binding [`WebPubSubConnection`](reference-functions-bindings.md#input-binding), with the following json codes. + ```json + { + "bindings": [ + { + "authLevel": "anonymous", + "type": "httpTrigger", + "direction": "in", + "name": "req" + }, + { + "type": "http", + "direction": "out", + "name": "res" + }, + { + "type": "webPubSubConnection", + "name": "connection", + "hub": "%hubName%", + "direction": "in" + } + ] + } + ``` + - Update `negotiate/index.js` and to return the `connection` binding which contains the generated token. + ```js + module.exports = function (context, req, connection) { + // Add your own auth logic here + context.res = { body: connection }; + context.done(); + }; + ``` + +6. Create a `messagehandler` function to generate notifications with template `"IoT Hub (Event Hub)"`. + ```bash + func new --template "IoT Hub (Event Hub)" --name messagehandler + ``` + # [JavaScript](#tab/javascript) + - Update _messagehandler/function.json_ to add [Web PubSub output binding](reference-functions-bindings.md#output-binding) with the following json code. Please note that we use variable `%hubName%` as the hub name for both IoT eventHubName and Web PubSub hub. + ```json + { + "bindings": [ + { + "type": "eventHubTrigger", + "name": "IoTHubMessages", + "direction": "in", + "eventHubName": "%hubName%", + "connection": "IOTHUBConnectionString", + "cardinality": "many", + "consumerGroup": "$Default", + "dataType": "string" + }, + { + "type": "webPubSub", + "name": "actions", + "hub": "%hubName%", + "direction": "out" + } + ] + } + ``` + - Update `messagehandler/index.js` with the following code. It sends every message from IoT hub to every client connected to Web PubSub service using Web PubSub output bindings. + ```js + module.exports = function (context, IoTHubMessages) { + IoTHubMessages.forEach((message) => { + const deviceMessage = JSON.parse(message); + context.log(`Processed message: ${message}`); + context.bindings.actions = { + actionName: "sendToAll", + data: JSON.stringify({ + IotData: deviceMessage, + MessageDate: deviceMessage.date || new Date().toISOString(), + DeviceId: deviceMessage.deviceId, + }), + }; + }); + + context.done(); + }; + ``` + +7. Update the Function settings + + 1. Add `hubName` setting and replace `{YourIoTHubName}` with the hub name you used when creating your IoT Hub: + + ```bash + func settings add hubName "{YourIoTHubName}" + ``` + + 2. Get the **Service Connection String** for IoT Hub using below CLI command: + + ```azcli + az iot hub connection-string show --policy-name service --hub-name {YourIoTHubName} --output table --default-eventhub + ``` + + And set `IOTHubConnectionString` using below command, replacing `` with the value: + + ```bash + func settings add IOTHubConnectionString "" + ``` + + 3. Get the **Connection String** for Web PubSub using below CLI command: + + ```azcli + az webpubsub key show --name "" --resource-group "" --query primaryConnectionString + ``` + + And set `WebPubSubConnectionString` using below command, replacing `` with the value: + + ```bash + func settings add WebPubSubConnectionString "" + ``` + + > [!NOTE] + > `IoT Hub (Event Hub)` Function trigger used in the sample has dependency on Azure Storage, but you can use local storage emulator when the Function is running locally. If you got some error like `There was an error performing a read operation on the Blob Storage Secret Repository. Please ensure the 'AzureWebJobsStorage' connection string is valid.`, you'll need to download and enable [Storage Emulator](../storage/common/storage-use-emulator.md). + +8. Run the function locally + + Now you're able to run your local function by command below. + + ```bash + func start + ``` + + And checking the running logs, you can visit your local host static page by visiting: `https://localhost:7071/api/index`. + +## Run the device to send data + +### Register a device + +A device must be registered with your IoT hub before it can connect. + +If you already have a device registered in your IoT hub, you can skip this section. + +1. Run the [az iot hub device-identity create](/cli/azure/iot/hub/device-identity#az-iot-hub-device-identity-create) command in Azure Cloud Shell to create the device identity. + + **YourIoTHubName**: Replace this placeholder below with the name you chose for your IoT hub. + + ```azurecli-interactive + az iot hub device-identity create --hub-name {YourIoTHubName} --device-id simDevice + ``` + +2. Run the [az iot hub device-identity connection-string show](/cli/azure/iot/hub/device-identity/connection-string#az-iot-hub-device-identity-connection-string-show) command in Azure Cloud Shell to get the _device connection string_ for the device you just registered: + + **YourIoTHubName**: Replace this placeholder below with the name you chose for your IoT hub. + + ```azurecli-interactive + az iot hub device-identity connection-string show --hub-name {YourIoTHubName} --device-id simDevice --output table + ``` + + Make a note of the device connection string, which looks like: + + `HostName={YourIoTHubName}.azure-devices.net;DeviceId=simDevice;SharedAccessKey={YourSharedAccessKey}` + +- For quickest results, simulate temperature data using the [Raspberry Pi Azure IoT Online Simulator](https://azure-samples.github.io/raspberry-pi-web-simulator/#Getstarted). Paste in the **device connection string**, and select the **Run** button. + +- If you have a physical Raspberry Pi and BME280 sensor, you may measure and report real temperature and humidity values by following the [Connect Raspberry Pi to Azure IoT Hub (Node.js)](/azure/iot-hub/iot-hub-raspberry-pi-kit-node-get-started) tutorial. + +## Run the visualization website +Open function host index page: `http://localhost:7071/api/index` to view the real-time dashboard. Register multiple devices and you can see the dashboard updates multiple devices in real-time. Open multiple browsers and you can see every page are updated in real-time. + +:::image type="content" source="media/tutorial-serverless-iot/iot-devices-sample.png" alt-text="Screenshot of multiple devices data visualization using Web PubSub service."::: + +## Clean up resources + +[!INCLUDE [quickstarts-free-trial-note](./includes/cli-delete-resources.md)] + +## Next steps + +In this quickstart, you learned how to run a serverless chat application. Now, you could start to build your own application. + +> [!div class="nextstepaction"] +> [Tutorial: Create a simple chatroom with Azure Web PubSub](https://azure.github.io/azure-webpubsub/getting-started/create-a-chat-app/js-handle-events) + +> [!div class="nextstepaction"] +> [Azure Web PubSub bindings for Azure Functions](https://azure.github.io/azure-webpubsub/references/functions-bindings) + +> [!div class="nextstepaction"] +> [Explore more Azure Web PubSub samples](https://github.com/Azure/azure-webpubsub/tree/main/samples) diff --git a/articles/azure-web-pubsub/tutorial-serverless-static-web-app.md b/articles/azure-web-pubsub/tutorial-serverless-static-web-app.md new file mode 100644 index 0000000000000..2a2f7d1fa855c --- /dev/null +++ b/articles/azure-web-pubsub/tutorial-serverless-static-web-app.md @@ -0,0 +1,212 @@ +--- +title: Tutorial - Create a serverless chat app with Azure Web PubSub service and Azure Static Web Apps +description: A tutorial about how to use Azure Web PubSub service and Azure Static Web Apps to build a serverless chat application. +author: JialinXin +ms.author: jixin +ms.service: azure-web-pubsub +ms.topic: tutorial +ms.date: 06/03/2022 +--- + +# Tutorial: Create a serverless chat app with Azure Web PubSub service and Azure Static Web Apps + +Azure Web PubSub service helps you build real-time messaging web applications using WebSockets. By using Azure Static Web Apps, you can automatically build and deploy full-stack web apps to Azure from a code repository. In this tutorial, you'll learn how to use Web PubSub service and Static Web Apps to build a serverless, real-time chat room messaging application. + +In this tutorial, you'll learn how to: + +> [!div class="checklist"] +> * Build a serverless chat app +> * Work with Web PubSub function input and output bindings +> * Work with Static Web Apps + +## Overview + +:::image type="content" source="media/tutorial-serverless-static-web-app/tutorial-serverless-static-web-app.png" alt-text="Diagram showing how Azure Web PubSub works with Azure Static Web Apps." border="false"::: + +GitHub or Azure Repos provide source control for Static Web Apps. Azure monitors the repo branch you select, and every time there's a code change to the source repo a new build of your web app is automatically run and deployed to Azure. Continuous delivery is provided by GitHub Actions and Azure Pipelines. Static Web Apps detects the new build and presents it to the endpoint user. + +The sample chat room application provided with this tutorial has the following workflow. + +1. When a user signs in to the app, the Azure Functions `login` API will be triggered to generate a Web PubSub service client connection URL. +1. When the client initializes the connection request to Web PubSub, the service sends a system `connect` event that triggers the Functions `connect` API to authenticate the user. +1. When a client sends a message to Azure Web PubSub service, the service will respond with a user `message` event and the Functions `message` API will be triggered to broadcast the message to all the connected clients. +1. The Functions `validate` API is triggered periodically for [CloudEvents Abuse Protection](https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection) when the events in Azure Web PubSub are configured with predefined parameter `{event}`, that is, https://$STATIC_WEB_APP/api/{event}. + +> [!NOTE] +> The Functions APIs `connect` and `message` are triggered when Azure Web PubSub service is configured with these two events. + +## Prerequisites + +* A [GitHub](https://github.com/) account. +* An [Azure](https://portal.azure.com/) account. If you don't have an Azure subscription, create an [Azure free account](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin. +* [Azure CLI](/cli/azure/install-azure-cli) (version 2.29.0 or higher) or [Azure Cloud Shell](../cloud-shell/quickstart.md) to manage Azure resources. + +## Create a Web PubSub resource + +1. Sign in to the Azure CLI by using the following command. + + ```azurecli-interactive + az login + ``` + +1. Create a resource group. + + ```azurecli-interactive + az group create \ + --name my-awps-swa-group \ + --location "eastus2" + ``` + +1. Create a Web PubSub resource. + + ```azurecli-interactive + az webpubsub create \ + --name my-awps-swa \ + --resource-group my-awps-swa-group \ + --location "eastus2" \ + --sku Free_F1 + ``` + +1. Get and hold the access key for later use. + + ```azurecli-interactive + az webpubsub key show \ + --name my-awps-swa \ + --resource-group my-awps-swa-group + ``` + + ```azurecli-interactive + AWPS_ACCESS_KEY= + ``` + + Replace the placeholder `` with the value for `primaryConnectionString` from the previous step. + +## Create a repository + +This article uses a GitHub template repository to make it easy for you to get started. The template features a starter app that you will deploy to Azure Static Web Apps. + +1. Go to [https://github.com/Azure/awps-swa-sample/generate](https://github.com/login?return_to=/Azure/awps-swa-sample/generate) to create a new repo for this tutorial. +1. Select yourself as **Owner** and name your repository **my-awps-swa-app**. +1. You can create a **Public** or **Private** repo according to your preference. Both work for the tutorial. +1. Select **Create repository from template**. + +## Create a static web app + +Now that the repository is created, you can create a static web app from the Azure CLI. + +1. Create a variable to hold your GitHub user name. + + ```azurecli-interactive + GITHUB_USER_NAME= + ``` + + Replace the placeholder `` with your GitHub user name. + +1. Create a new static web app from your repository. When you run this command, the CLI starts a GitHub interactive sign-in. Follow the message to complete authorization. + + ```azurecli-interactive + az staticwebapp create \ + --name my-awps-swa-app \ + --resource-group my-awps-swa-group \ + --source https://github.com/$GITHUB_USER_NAME/my-awps-swa-app \ + --location "eastus2" \ + --branch main \ + --app-location "src" \ + --api-location "api" \ + --login-with-github + ``` + + > [!IMPORTANT] + > The URL passed to the `--source` parameter must not include the `.git` suffix. + +1. Go to **https://github.com/login/device**. + +1. Enter the user code as displayed your console's message. + +1. Select **Continue**. + +1. Select **Authorize AzureAppServiceCLI**. + +1. Configure the static web app settings. + + ```azurecli-interactive + az staticwebapp appsettings set \ + -n my-awps-swa-app \ + --setting-names WebPubSubConnectionString=$AWPS_ACCESS_KEY WebPubSubHub=sample_swa + ``` + +## View the website + +There are two aspects to deploying a static app: The first creates the underlying Azure resources that make up your app. The second is a GitHub Actions workflow that builds and publishes your application. + +Before you can navigate to your new static site, the deployment build must first finish running. + +1. Return to your console window and run the following command to list the URLs associated with your app. + + ```azurecli-interactive + az staticwebapp show \ + --name my-awps-swa-app \ + --query "repositoryUrl" + ``` + + The output of this command returns the URL to your GitHub repository. + +1. Copy the **repository URL** and paste it into the browser. + +1. Select the **Actions** tab. + + At this point, Azure is creating the resources to support your static web app. Wait until the icon next to the running workflow turns into a check mark with green background ✅. This operation may take a few minutes to complete. + + Once the success icon appears, the workflow is complete and you can return to your console window. + +1. Run the following command to query for your website's URL. + + ```azurecli-interactive + az staticwebapp show \ + --name my-awps-swa-app \ + --query "defaultHostname" + ``` + + Hold the url to set in the Web PubSub event handler. + + ```azurecli-interactive + STATIC_WEB_APP= + ``` + +## Configure the Web PubSub event handler + +You're very close to complete. The last step is to configure Web PubSub transfer client requests to your function APIs. + +1. Run the following command to configure Web PubSub service events. It maps functions under the `api` folder in your repo to the Web PubSub event handler. + + ```azurecli-interactive + az webpubsub hub create \ + -n "my-awps-swa" \ + -g "my-awps-swa-group" \ + --hub-name "sample_swa" \ + --event-handler url-template=https://$STATIC_WEB_APP/api/{event} user-event-pattern="*" \ + --event-handler url-template=https://$STATIC_WEB_APP/api/{event} system-event="connect" + ``` + +Now you're ready to play with your website ****. Copy it to browser and select **Continue** to start chatting with your friends. + +## Clean up resources + +If you're not going to continue to use this application, you can delete the resource group and the static web app by running the following command. + +```azurecli-interactive +az group delete --name my-awps-swa-group +``` + +## Next steps + +In this quickstart, you learned how to run a serverless chat application. Now, you could start to build your own application. + +> [!div class="nextstepaction"] +> [Tutorial: Client streaming using subprotocol](tutorial-subprotocol.md) + +> [!div class="nextstepaction"] +> [Azure Web PubSub bindings for Azure Functions](reference-functions-bindings.md) + +> [!div class="nextstepaction"] +> [Explore more Azure Web PubSub samples](https://github.com/Azure/azure-webpubsub/tree/main/samples) diff --git a/articles/backup/archive-tier-support.md b/articles/backup/archive-tier-support.md index 17b68e762d822..2b2d0cc505408 100644 --- a/articles/backup/archive-tier-support.md +++ b/articles/backup/archive-tier-support.md @@ -2,7 +2,7 @@ title: Azure Backup - Archive tier overview description: Learn about Archive tier support for Azure Backup. ms.topic: overview -ms.date: 05/12/2022 +ms.date: 06/06/2022 ms.custom: references_regions author: v-amallick ms.service: backup @@ -26,6 +26,8 @@ Archive tier supports the following workloads: | Azure Virtual Machines | Only monthly and yearly recovery points. Daily and weekly recovery points aren't supported.

                  Age >= 3 months in Vault-standard tier

                  Retention left >= 6 months.

                  No active daily and weekly dependencies. | | SQL Server in Azure Virtual Machines

                  SAP HANA in Azure Virtual Machines | Only full recovery points. Logs and differentials aren't supported.

                  Age >= 45 days in Vault-standard tier.

                  Retention left >= 6 months.

                  No dependencies. | +A recovery point becomes archivable only if all the above conditions are met. + >[!Note] >- Archive tier support for Azure Virtual Machines, SQL Servers in Azure VMs and SAP HANA in Azure VM is now generally available in multiple regions. For the detailed list of supported regions, see the [support matrix](#support-matrix). >- Archive tier support for Azure Virtual Machines for the remaining regions is in limited public preview. To sign up for limited public preview, fill [this form](https://forms.office.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR463S33c54tEiJLEM6Enqb9UNU5CVTlLVFlGUkNXWVlMNlRPM1lJWUxLRy4u). @@ -80,7 +82,7 @@ If you delete recovery points that haven't stayed in archive for a minimum of 18 Stop protection and delete data deletes all recovery points. For recovery points in archive that haven't stayed for a duration of 180 days in archive tier, deletion of recovery points leads to early deletion cost. -## Archive Tier pricing +## Archive tier pricing You can view the Archive tier pricing from our [pricing page](azure-backup-pricing.md). diff --git a/articles/backup/backup-azure-database-postgresql-troubleshoot.md b/articles/backup/backup-azure-database-postgresql-troubleshoot.md index b8904cedb3852..2e48b1ba9fd5d 100644 --- a/articles/backup/backup-azure-database-postgresql-troubleshoot.md +++ b/articles/backup/backup-azure-database-postgresql-troubleshoot.md @@ -2,7 +2,7 @@ title: Troubleshoot Azure Database for PostgreSQL backup description: Troubleshooting information for backing up Azure Database for PostgreSQL. ms.topic: troubleshooting -ms.date: 01/24/2022 +ms.date: 06/07/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -94,6 +94,47 @@ Establish network line of sight by enabling the **Allow access to Azure services ![Screenshot showing how to search for vault name.](./media/backup-azure-database-postgresql/search-for-vault-name.png) +## UserErrorDBUserAuthFailed + +The Azure Backup service uses the credentials mentioned in the key-vault to access the database as a database user. The relevant key vault and the secret are [provided during configuration of backup](backup-azure-database-postgresql.md#configure-backup-on-azure-postgresql-databases). Ensure that the credentials stored as part of the secret value in the key vault are valid. Ensure that the specified database user has login access. + +## UserErrorInvalidSecret + +The Azure Backup service uses the credentials mentioned in the key-vault to access the database as a database user. The relevant key vault and the secret are [provided during configuration of backup](backup-azure-database-postgresql.md#configure-backup-on-azure-postgresql-databases). Ensure that the specified secret name is present in the key vault. + +## UserErrorMissingDBPermissions + +The Azure Backup service uses the credentials mentioned in the key-vault to access the database as a database user. The relevant key vault and the secret are [provided during configuration of backup](backup-azure-database-postgresql.md#configure-backup-on-azure-postgresql-databases). Grant appropriate permissions to the relevant backup or the database user to perform this operation on the database. + +## UserErrorSecretValueInUnsupportedFormat + +The Azure Backup service uses the credentials mentioned in the key-vault to access the database as a database user. The relevant key vault and the secret are [provided during configuration of backup](backup-azure-database-postgresql.md#configure-backup-on-azure-postgresql-databases). However the secret value is not in a format supported by Azure Backup. Check the supported format as documented [here](backup-azure-database-postgresql.md#create-secrets-in-the-key-vault). + +## UserErrorInvalidSecretStore + +The Azure Backup service uses the credentials mentioned in the key-vault to access the database as a database user. The relevant key vault and the secret are [provided during configuration of backup](backup-azure-database-postgresql.md#configure-backup-on-azure-postgresql-databases). Ensure that the given key vault exists and the backup service is given access as documented [here](backup-azure-database-postgresql-overview.md#set-of-permissions-needed-for-azure-postgresql-database-backup). + +## UserErrorMissingPermissionsOnSecretStore + +The Azure Backup service uses the credentials mentioned in the key-vault to access the database as a database user. The relevant key vault and the secret are [provided during configuration of backup](backup-azure-database-postgresql.md#configure-backup-on-azure-postgresql-databases). Ensure that backup vault's MSI is given access to key vault as documented [here](backup-azure-database-postgresql-overview.md#set-of-permissions-needed-for-azure-postgresql-database-backup). + +## UserErrorSSLDisabled + +SSL needs to be enabled for connections to the server. + +## UserErrorDBNotFound + +Ensure that the database and the relevant server exist. + +## UserErrorDatabaseNameAlreadyInUse + +The name given for the restored database already exists and hence the restore operation failed. Retry the restore operation with a different name. + +## UserErrorServerConnectionClosed + +The operation failed because the server closed the connection unexpectedly. Retry the operation and if the error still persists, please contact Microsoft Support. + + ## Next steps [About Azure Database for PostgreSQL backup](backup-azure-database-postgresql-overview.md) diff --git a/articles/backup/backup-azure-database-postgresql.md b/articles/backup/backup-azure-database-postgresql.md index 0608b619c962c..eb8fea1696ddf 100644 --- a/articles/backup/backup-azure-database-postgresql.md +++ b/articles/backup/backup-azure-database-postgresql.md @@ -2,7 +2,7 @@ title: Back up Azure Database for PostgreSQL description: Learn about Azure Database for PostgreSQL backup with long-term retention ms.topic: conceptual -ms.date: 02/25/2022 +ms.date: 06/07/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -10,7 +10,7 @@ ms.author: v-amallick # Azure Database for PostgreSQL backup with long-term retention -This article describes how to back up Azure Database for PostgreSQL server. Before you begin, review the [supported configurations, feature considerations and known limitations](https://docs.microsoft.com/azure/backup/backup-azure-database-postgresql-support-matrix) +This article describes how to back up Azure Database for PostgreSQL server. Before you begin, review the [supported configurations, feature considerations and known limitations](./backup-azure-database-postgresql-support-matrix.md) ## Configure backup on Azure PostgreSQL databases @@ -31,7 +31,8 @@ You can configure backup on multiple databases across multiple Azure PostgreSQL 1. **Select Azure PostgreSQL databases to back up**: Choose one of the Azure PostgreSQL servers across subscriptions if they're in the same region as that of the vault. Expand the arrow to see the list of databases within a server. >[!Note] - >You don't need to back up the databases *azure_maintenance* and *azure_sys*. Additionally, you can't back up a database already backed-up to a Backup vault. + >- You don't need to back up the databases *azure_maintenance* and *azure_sys*. Additionally, you can't back up a database already backed-up to a Backup vault. + >- Backup of Azure PostgreSQL servers with Private endpoint enabled is currently not supported. :::image type="content" source="./media/backup-azure-database-postgresql/select-azure-postgresql-databases-to-back-up-inline.png" alt-text="Screenshot showing the option to select an Azure PostgreSQL database." lightbox="./media/backup-azure-database-postgresql/select-azure-postgresql-databases-to-back-up-expanded.png"::: @@ -183,4 +184,4 @@ Azure Backup service creates a job for scheduled backups or if you trigger on-de ## Next steps -[Troubleshoot PostgreSQL database backup by using Azure Backup](backup-azure-database-postgresql-troubleshoot.md) +[Troubleshoot PostgreSQL database backup by using Azure Backup](backup-azure-database-postgresql-troubleshoot.md) \ No newline at end of file diff --git a/articles/backup/backup-azure-manage-vms.md b/articles/backup/backup-azure-manage-vms.md index c6a9b48085436..12c979721eb64 100644 --- a/articles/backup/backup-azure-manage-vms.md +++ b/articles/backup/backup-azure-manage-vms.md @@ -2,7 +2,10 @@ title: Manage and monitor Azure VM backups description: Learn how to manage and monitor Azure VM backups by using the Azure Backup service. ms.topic: conceptual -ms.date: 09/17/2021 +ms.date: 06/03/2022 +author: v-amallick +ms.service: backup +ms.author: v-amallick --- # Manage Azure VM backups with Azure Backup service @@ -17,7 +20,7 @@ In the Azure portal, the Recovery Services vault dashboard provides access to va You can manage backups by using the dashboard and by drilling down to individual VMs. To begin machine backups, open the vault on the dashboard: -![Full dashboard view with slider](./media/backup-azure-manage-vms/bottom-slider.png) +:::image type="content" source="./media/backup-azure-manage-vms/bottom-slider-inline.png" alt-text="Screenshot showing the full dashboard view with slider." lightbox="./media/backup-azure-manage-vms/bottom-slider-expanded.png"::: [!INCLUDE [backup-center.md](../../includes/backup-center.md)] @@ -28,30 +31,30 @@ To view VMs on the vault dashboard: 1. Sign in to the [Azure portal](https://portal.azure.com/). 1. On the left menu, select **All services**. - ![Select All services](./media/backup-azure-manage-vms/select-all-services.png) + :::image type="content" source="./media/backup-azure-manage-vms/select-all-services.png" alt-text="Screenshot showing to select All services."::: 1. In the **All services** dialog box, enter *Recovery Services*. The list of resources filters according to your input. In the list of resources, select **Recovery Services vaults**. - ![Enter and choose Recovery Services vaults](./media/backup-azure-manage-vms/all-services.png) + :::image type="content" source="./media/backup-azure-manage-vms/all-services.png" alt-text="Screenshot showing to enter and choose Recovery Services vaults."::: The list of Recovery Services vaults in the subscription appears. 1. For ease of use, select the pin icon next to your vault name and select **Pin to dashboard**. 1. Open the vault dashboard. - ![Open the vault dashboard and Settings pane](./media/backup-azure-manage-vms/full-view-rs-vault.png) + :::image type="content" source="./media/backup-azure-manage-vms/full-view-rs-vault-inline.png" alt-text="Screenshot showing to open the vault dashboard and Settings pane." lightbox="./media/backup-azure-manage-vms/full-view-rs-vault-expanded.png"::: 1. On the **Backup Items** tile, select **Azure Virtual Machine**. - ![Open the Backup Items tile](./media/backup-azure-manage-vms/azure-virtual-machine.png) + :::image type="content" source="./media/backup-azure-manage-vms/azure-virtual-machine-inline.png" alt-text="Screenshot showing to open the Backup Items tile." lightbox="./media/backup-azure-manage-vms/azure-virtual-machine-expanded.png"::: 1. On the **Backup Items** pane, you can view the list of protected VMs. In this example, the vault protects one virtual machine: *myVMR1*. - ![View the Backup Items pane](./media/backup-azure-manage-vms/backup-items-blade-select-item.png) + :::image type="content" source="./media/backup-azure-manage-vms/backup-items-blade-select-item-inline.png" alt-text="Screenshot showing to view the Backup Items pane." lightbox="./media/backup-azure-manage-vms/backup-items-blade-select-item-expanded.png"::: 1. From the vault item's dashboard, you can modify backup policies, run an on-demand backup, stop or resume protection of VMs, delete backup data, view restore points, and run a restore. - ![The Backup Items dashboard and the Settings pane](./media/backup-azure-manage-vms/item-dashboard-settings.png) + :::image type="content" source="./media/backup-azure-manage-vms/item-dashboard-settings-inline.png" alt-text="Screenshot showing the Backup Items dashboard and the Settings pane." lightbox="./media/backup-azure-manage-vms/item-dashboard-settings-expanded.png"::: ## Manage backup policy for a VM @@ -70,17 +73,17 @@ To manage a backup policy: 1. Sign in to the [Azure portal](https://portal.azure.com/). Open the vault dashboard. 2. On the **Backup Items** tile, select **Azure Virtual Machine**. - ![Open the Backup Items tile](./media/backup-azure-manage-vms/azure-virtual-machine.png) + :::image type="content" source="./media/backup-azure-manage-vms/azure-virtual-machine-inline.png" alt-text="Screenshot showing to open the Backup Items tile." lightbox="./media/backup-azure-manage-vms/azure-virtual-machine-expanded.png"::: 3. On the **Backup Items** pane, you can view the list of protected VMs and last backup status with latest restore points time. - ![View the Backup Items pane](./media/backup-azure-manage-vms/backup-items-blade-select-item.png) + :::image type="content" source="./media/backup-azure-manage-vms/backup-items-blade-select-item-inline.png" alt-text="Screenshot showing to view the Backup Items pane." lightbox="./media/backup-azure-manage-vms/backup-items-blade-select-item-expanded.png"::: 4. From the vault item's dashboard, you can select a backup policy. - * To switch policies, select a different policy and then select **Save**. The new policy is immediately applied to the vault. + To switch policies, select a different policy and then select **Save**. The new policy is immediately applied to the vault. - ![Choose a backup policy](./media/backup-azure-manage-vms/backup-policy-create-new.png) + :::image type="content" source="./media/backup-azure-manage-vms/backup-policy-create-new-inline.png" alt-text="Screenshot showing to choose a backup policy." lightbox="./media/backup-azure-manage-vms/backup-policy-create-new-expanded.png"::: ## Run an on-demand backup @@ -97,13 +100,13 @@ To trigger an on-demand backup: 1. On the [vault item dashboard](#view-vms-on-the-dashboard), under **Protected Item**, select **Backup Item**. - ![The Backup now option](./media/backup-azure-manage-vms/backup-now-button.png) + :::image type="content" source="./media/backup-azure-manage-vms/backup-now-button.png" alt-text="Screenshot showing the Backup now option."::: 2. From **Backup Management Type**, select **Azure Virtual Machine**. The **Backup Item (Azure Virtual Machine)** pane appears. 3. Select a VM and select **Backup Now** to create an on-demand backup. The **Backup Now** pane appears. 4. In the **Retain Backup Till** field, specify a date for the backup to be retained. - ![The Backup Now calendar](./media/backup-azure-manage-vms/backup-now-check.png) + :::image type="content" source="./media/backup-azure-manage-vms/backup-now-check.png" alt-text="Screenshot showing the Backup Now calendar."::: 5. Select **OK** to run the backup job. @@ -127,7 +130,7 @@ To stop protection and retain data of a VM: 1. On the [vault item's dashboard](#view-vms-on-the-dashboard), select **Stop backup**. 2. Choose **Retain Backup Data**, and confirm your selection as needed. Add a comment if you want. If you aren't sure of the item's name, hover over the exclamation mark to view the name. - ![Retain Backup data](./media/backup-azure-manage-vms/retain-backup-data.png) + :::image type="content" source="./media/backup-azure-manage-vms/retain-backup-data.png" alt-text="Screenshot showing to retain Backup data."::: A notification lets you know that the backup jobs have been stopped. @@ -142,7 +145,7 @@ To stop protection and delete data of a VM: 1. On the [vault item's dashboard](#view-vms-on-the-dashboard), select **Stop backup**. 2. Choose **Delete Backup Data**, and confirm your selection as needed. Enter the name of the backup item and add a comment if you want. - ![Delete backup data](./media/backup-azure-manage-vms/delete-backup-data.png) + :::image type="content" source="./media/backup-azure-manage-vms/delete-backup-data.png" alt-text="Screenshot showing to delete backup data."::: > [!NOTE] > After completing the delete operation the backed up data will be retained for 14 days in the [soft deleted state](./soft-delete-virtual-machines.md).
                  In addition, you can also [enable or disable soft delete](./backup-azure-security-feature-cloud.md#enabling-and-disabling-soft-delete). @@ -158,7 +161,7 @@ To resume protection for a VM: 2. Follow the steps in [Manage backup policies](#manage-backup-policy-for-a-vm) to assign the policy for the VM. You don't need to choose the VM's initial protection policy. 3. After you apply the backup policy to the VM, you see the following message: - ![Message indicating a successfully protected VM](./media/backup-azure-manage-vms/success-message.png) + :::image type="content" source="./media/backup-azure-manage-vms/success-message.png" alt-text="Screenshot showing message indicating a successfully protected VM."::: ## Delete backup data @@ -166,16 +169,16 @@ There are two ways to delete a VM's backup data: * From the vault item dashboard, select Stop backup and follow the instructions for [Stop protection and delete backup data](#stop-protection-and-delete-backup-data) option. - ![Select Stop backup](./media/backup-azure-manage-vms/stop-backup-button.png) + :::image type="content" source="./media/backup-azure-manage-vms/stop-backup-button.png" alt-text="Screenshot showing to select Stop backup."::: * From the vault item dashboard, select Delete backup data. This option is enabled if you had chosen to [Stop protection and retain backup data](#stop-protection-and-retain-backup-data) option during stop VM protection. - ![Select Delete backup](./media/backup-azure-manage-vms/delete-backup-button.png) + :::image type="content" source="./media/backup-azure-manage-vms/delete-backup-button.png" alt-text="Screenshot showing to select Delete backup."::: * On the [vault item dashboard](#view-vms-on-the-dashboard), select **Delete backup data**. * Type the name of the backup item to confirm that you want to delete the recovery points. - ![Delete backup data](./media/backup-azure-manage-vms/delete-backup-data.png) + :::image type="content" source="./media/backup-azure-manage-vms/delete-backup-data.png" alt-text="Screenshot showing to delete backup data."::: * To delete the backup data for the item, select **Delete**. A notification message lets you know that the backup data has been deleted. diff --git a/articles/backup/backup-azure-mars-troubleshoot.md b/articles/backup/backup-azure-mars-troubleshoot.md index feffa932bec05..585c4ad04d289 100644 --- a/articles/backup/backup-azure-mars-troubleshoot.md +++ b/articles/backup/backup-azure-mars-troubleshoot.md @@ -2,7 +2,7 @@ title: Troubleshoot the Azure Backup agent description: In this article, learn how to troubleshoot the installation and registration of the Azure Backup agent. ms.topic: troubleshooting -ms.date: 04/05/2022 +ms.date: 05/31/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -103,7 +103,7 @@ We recommend that you check the following before you start troubleshooting Micro - You can use [Add Exclusion rules to existing policy](./backup-azure-manage-mars.md#add-exclusion-rules-to-existing-policy) to exclude unsupported, missing, or deleted files from your backup policy to ensure successful backups. -- Avoid deleting and recreating protected folders with the same names in the top-level folder. Doing so could result in the backup completing with warnings with the error *A critical inconsistency was detected, therefore changes cannot be replicated.* If you need to delete and recreate folders, then consider doing so in subfolders under the protected top-level folder. +- Avoid deleting and recreating protected folders with the same names in the top-level folder. Doing so could result in the backup completing with warnings with the error: *A critical inconsistency was detected, therefore changes cannot be replicated.* If you need to delete and recreate folders, then consider doing so in subfolders under the protected top-level folder. ## Failed to set the encryption key for secure backups @@ -121,7 +121,7 @@ We recommend that you check the following before you start troubleshooting Micro | Error | Possible causes | Recommended actions | |---------|---------|---------| -|
                  Error 34506. The encryption passphrase stored on this computer is not correctly configured. |
                • The scratch folder is located on a volume that doesn't have enough space.
                • The scratch folder has been incorrectly moved.
                • The OnlineBackup.KEK file is missing. |
                • Upgrade to the [latest version](https://aka.ms/azurebackup_agent) of the MARS Agent.
                • Move the scratch folder or cache location to a volume with free space that's between 5% and 10% of the total size of the backup data. To correctly move the cache location, refer to the steps in [Common questions about backing up files and folders](./backup-azure-file-folder-backup-faq.yml).
                • Ensure that the OnlineBackup.KEK file is present.
                  *The default location for the scratch folder or the cache path is C:\Program Files\Microsoft Azure Recovery Services Agent\Scratch*. | +|
                  Error 34506. The encryption passphrase stored on this computer is not correctly configured. |
                • The scratch folder is located on a volume that doesn't have enough space.
                • The scratch folder has been incorrectly moved.
                • The OnlineBackup.KEK file is missing. |
                • Upgrade to the [latest version](https://aka.ms/azurebackup_agent) of the MARS Agent.
                • Move the scratch folder or cache location to a volume with free space that's between 5% and 10% of the total size of the backup data. To correctly move the cache location, refer to the steps in [Common questions about backing up files and folders](./backup-azure-file-folder-backup-faq.yml).
                • Ensure that the OnlineBackup.KEK file is present.
                  *The default location for the scratch folder or the cache path is C:\Program Files\Microsoft Azure Recovery Services Agent\Scratch*.
                • If you've recently moved your scratch folder, ensure that the path of your scratch folder location matches the values of the registry key entries shown below:

                  **Registry path**: `HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows Azure Backup\Config`
                  **Registry Key**: ScratchLocation
                  **Value**: *New cache folder location*

                  **Registry path**: `HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows Azure Backup\Config\CloudBackupProvider`
                  **Registry Key**: ScratchLocation
                  **Value**: *New cache folder location* | ## Backups don't run according to schedule diff --git a/articles/backup/backup-azure-monitor-alert-faq.yml b/articles/backup/backup-azure-monitor-alert-faq.yml index eb29a92d69d68..ee45bb07a49a4 100644 --- a/articles/backup/backup-azure-monitor-alert-faq.yml +++ b/articles/backup/backup-azure-monitor-alert-faq.yml @@ -35,7 +35,7 @@ sections: answer: | As per backup alerts: - - **Alert rule**: Refers to a user-created rule that specifies the condition on which an alert should be fired. [Learn more](../azure-monitor/alerts/alerts-overview.md#overview) + - **Alert rule**: Refers to a user-created rule that specifies the condition on which an alert should be fired. [Learn more](../azure-monitor/alerts/alerts-overview.md) - **Alert processing rule (earlier called Action rule)**: Refers to a user-created rule that specifies the notification channels a particular fired alert should be routed to. You can also use alert processing rules to suppress notifications for a period of time. [Learn more](../azure-monitor/alerts/alerts-action-rules.md?tabs=portal) - **Action group**: Refers to the notification channel (such as email, ITSM endpoint, logic app, webhook, and so on) that a fired alert can be routed to. [Learn more](../azure-monitor/alerts/action-groups.md) diff --git a/articles/backup/backup-azure-sap-hana-database.md b/articles/backup/backup-azure-sap-hana-database.md index 1c15b5bfbeb6a..9924cc778ae73 100644 --- a/articles/backup/backup-azure-sap-hana-database.md +++ b/articles/backup/backup-azure-sap-hana-database.md @@ -2,7 +2,7 @@ title: Back up an SAP HANA database to Azure with Azure Backup description: In this article, learn how to back up an SAP HANA database to Azure virtual machines with the Azure Backup service. ms.topic: conceptual -ms.date: 04/28/2022 +ms.date: 06/01/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -40,7 +40,7 @@ The following table lists the various alternatives you can use for establishing | Private endpoints | Allow backups over private IPs inside the virtual network

                  Provide granular control on the network and vault side | Incurs standard private endpoint [costs](https://azure.microsoft.com/pricing/details/private-link/) | | NSG service tags | Easier to manage as range changes are automatically merged

                  No additional costs | Can be used with NSGs only

                  Provides access to the entire service | | Azure Firewall FQDN tags | Easier to manage since the required FQDNs are automatically managed | Can be used with Azure Firewall only | -| Allow access to service FQDNs/IPs | No additional costs

                  Works with all network security appliances and firewalls | A broad set of IPs or FQDNs may be required to be accessed | +| Allow access to service FQDNs/IPs | No additional costs.

                  Works with all network security appliances and firewalls.

                  You can also use service endpoints for *Storage* and *Azure Active Directory*. However, for Azure Backup, you need to assign the access to the corresponding IPs/FQDNs. | A broad set of IPs or FQDNs may be required to be accessed. | | [Virtual Network Service Endpoint](../virtual-network/virtual-network-service-endpoints-overview.md) | Can be used for Azure Storage (= Recovery Services vault).

                  Provides large benefit to optimize performance of data plane traffic. | Can’t be used for Azure AD, Azure Backup service. | | Network Virtual Appliance | Can be used for Azure Storage, Azure AD, Azure Backup service.

                  **Data plane**
                  • Azure Storage: `*.blob.core.windows.net`, `*.queue.core.windows.net`, `*.blob.storage.azure.net`


                  **Management plane**
                  • Azure AD: Allow access to FQDNs mentioned in sections 56 and 59 of [Microsoft 365 Common and Office Online](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide&preserve-view=true#microsoft-365-common-and-office-online).
                  • Azure Backup service: `.backup.windowsazure.com`

                  Learn more about [Azure Firewall service tags](../firewall/fqdn-tags.md). | Adds overhead to data plane traffic and decrease throughput/performance. | diff --git a/articles/backup/backup-azure-vm-backup-faq.yml b/articles/backup/backup-azure-vm-backup-faq.yml index a1dcc8749de8d..1cd02a7395b22 100644 --- a/articles/backup/backup-azure-vm-backup-faq.yml +++ b/articles/backup/backup-azure-vm-backup-faq.yml @@ -4,7 +4,7 @@ metadata: description: In this article, discover answers to common questions about backing up Azure VMs with the Azure Backup service. ms.topic: faq ms.service: backup - ms.date: 05/06/2022 + ms.date: 05/27/2022 author: v-amallick ms.author: v-amallick @@ -26,7 +26,7 @@ sections: To improve backup performance see, [backup best practices](./backup-azure-vms-introduction.md#best-practices); [Backup considerations](./backup-azure-vms-introduction.md#backup-and-restore-considerations) and [Backup Performance](./backup-azure-vms-introduction.md#backup-performance) - Although the total backup time for incremental backups is less than 24 hours, that might not be the case for the first backup. + Although the total backup time for incremental backups is less than 24 hours that might not be the case for the first backup. - question: Is the backup cost included in the VM cost? answer: | @@ -125,7 +125,7 @@ sections: - question: Does Azure Backup interfere with application performance? answer: | - Creating a VM Snapshot takes few minutes, and there will be a very minimal interference on application performance at this stage. But, data transfer to a vault takes a couple of hours; so we recommend to schedule backups during off business hours. Learn more about [best practices for backup and restore](./backup-azure-vms-introduction.md#backup-and-restore-considerations). + Creating a VM Snapshot takes few minutes, and there will be a very minimal interference on application performance at this stage. But, data transfer to a vault takes a couple of hours; so we recommend scheduling backups during off business hours. Learn more about [best practices for backup and restore](./backup-azure-vms-introduction.md#backup-and-restore-considerations). - question: Will a new disk added to VM be backed up automatically? answer: | @@ -140,6 +140,10 @@ sections: answer: | Yes, you can do this when *Transfer data to vault* phase is in progress. + - question: Does Azure Backup take backup of keys for ADE encrypted VMs and restore it along with the restored disk? + answer: | + Azure Backup backs up encryption keys and secrets of the backup data. Generally, the keys are not restored in the Key vault, but Azure Backup allows restoring the keys during the loss of keys. + - name: Restore questions: - question: How do I decide whether to restore disks only or a full VM? @@ -217,6 +221,10 @@ sections: answer: | Yes, you can delete these files once the restoration process is complete. By default, Azure Backup retains these files for future use. + - question: How do I run restore operation for Cross Region Restore (CRR) of ADE encrypted VMs? + answer: | + The encrypted keys are not expected to be present in the target region as part of Cross Regions Restore (CRR). Therefore, you need to restore the encrypted keys and secrets using the restored file. When the restore is complete, you can create Azure encrypted VM using restored disks. + - name: Manage VM backups questions: - question: What happens if I modify a backup policy? @@ -262,7 +270,7 @@ sections: One way to view the retention settings for your backups, is to navigate to the backup item [dashboard](./backup-azure-manage-vms.md#view-vms-on-the-dashboard) for your VM, in the Azure portal. Selecting the link to its backup policy helps you view the retention duration of all the daily, weekly, monthly and yearly retention points associated with the VM. - You can also use [Backup Explorer](./monitor-azure-backup-with-backup-explorer.md) to view the retention settings for all your VMs within a single pane of glass. Navigate to Backup Explorer from any Recovery Services vault, go to the **Backup Items** tab and select the Advanced View to see detailed retention information for each VM. + You can also use [Backup Explorer](./monitor-azure-backup-with-backup-explorer.md) to view the retention settings for all your VMs within a single pane of glass. Go to Backup Explorer from any Recovery Services vault, go to the **Backup Items** tab and select the Advanced View to see detailed retention information for each VM. - question: When the snapshot is moved from a storage account to a vault, how is encryption in the transit managed? answer: Azure VM Backup uses [HTTPS communication for encryption in transit](guidance-best-practices.md#encryption-of-data-in-transit-and-at-rest). The data transfer uses Azure fabric (and not public endpoints), which do not need Internet access for VM backup. @@ -305,6 +313,10 @@ sections: answer: | This error appears when you try to start a VM after creating an Azure VM from a non-Marketplace image or swap the OS disk of a VM with a non-Marketplace image, and then the VM deployment fails. To resolve this issue, remove the plan information from the VM. + - question: How do I manage key rotations? How to ensure which key is used during backup and if it’s present to be used with the restored VM? + answer: | + Azure Backup backs up the secrets and KEK data of the key version during backup, and restores the same. However, booting ADE VMs with older version keys is also possible. + diff --git a/articles/backup/backup-azure-vms-enhanced-policy.md b/articles/backup/backup-azure-vms-enhanced-policy.md index 1611ddd7cb85e..e4d4caf108f83 100644 --- a/articles/backup/backup-azure-vms-enhanced-policy.md +++ b/articles/backup/backup-azure-vms-enhanced-policy.md @@ -2,7 +2,7 @@ title: Back up Azure VMs with Enhanced policy description: Learn how to configure Enhanced policy to back up VMs. ms.topic: how-to -ms.date: 05/06/2022 +ms.date: 06/08/2022 ms.reviewer: geg author: v-amallick ms.service: backup @@ -21,7 +21,7 @@ Azure Backup now supports _Enhanced policy_ that's needed to support new Azure o You must enable backup of Trusted Launch VM through enhanced policy only. Enhanced policy provides the following features: - Supports *Multiple Backups Per Day* (in preview). -- Instant Restore tier is zonally redundant using Zone-redundant storage (ZRS) resiliency. See the [pricing details for Enhanced policy storage here](https://azure.microsoft.com/pricing/details/managed-disks/). +- Instant Restore tier is zonally redundant using Zone-redundant storage (ZRS) resiliency. See the [pricing details for Managed Disk Snapshots](https://azure.microsoft.com/pricing/details/managed-disks/). :::image type="content" source="./media/backup-azure-vms-enhanced-policy/enhanced-backup-policy-settings.png" alt-text="Screenshot showing the enhanced backup policy options."::: diff --git a/articles/backup/backup-instant-restore-capability.md b/articles/backup/backup-instant-restore-capability.md index 9ff88e2afade6..7e19da141f844 100644 --- a/articles/backup/backup-instant-restore-capability.md +++ b/articles/backup/backup-instant-restore-capability.md @@ -115,6 +115,11 @@ In a scenario where a retention policy is set as “1”, you can find two snaps - You clean up snapshots, which are past retention. - The garbage collector (GC) in the backend is under heavy load. +> [!NOTE] +> Azure Backup manages backups in automatic way. Azure Backup retains old snapshop as these are needed to mantain this backup for consistency purpose. If you delete snapshot manually, you might encounter problem in backup consistency. +> If there are errors in your backup history, you need to stop backup with retain data option and resume the backup. +> Consider creating a **backup strategy** if you've a particular scenario (for example, a virtual machine with multiple disks and requires oversize space). You need to separately create a backup for **VM with OS Disk** and create a different backup for **the other disks**. + ### I don’t need Instant Restore functionality. Can it be disabled? Instant restore feature is enabled for everyone and can't be disabled. You can reduce the snapshot retention to a minimum of one day. diff --git a/articles/backup/backup-rbac-rs-vault.md b/articles/backup/backup-rbac-rs-vault.md index 52d358a335a6e..dd5c2eee12883 100644 --- a/articles/backup/backup-rbac-rs-vault.md +++ b/articles/backup/backup-rbac-rs-vault.md @@ -104,6 +104,48 @@ The following table captures the Backup management actions and corresponding Azu >[!Note] >If you've contributor access at the resource group level and want to configure backup from file share blade, ensure to get *microsoft.recoveryservices/Locations/backupStatus/action* permission at the subscription level. To do so, create a [*custom role*](../role-based-access-control/custom-roles-portal.md#start-from-scratch) and assign this permission. +### Minimum role requirements for Azure disk backup + +| Management Operation | Minimum Azure role required | Scope Required | Alternative | +| --- | --- | --- | --- | +| Validate before configuring backup | Backup Operator | Backup vault | | +| | Disk Backup Reader | Disk to be backed up| | +| Enable backup from backup vault | Backup Operator | Backup vault | | +| | Disk Backup Reader | Disk to be backed up | In addition, the backup vault MSI should be given [these permissions](./disk-backup-faq.yml) | +| On demand backup of disk | Backup Operator | Backup vault | | +| Validate before restoring a disk | Backup Operator | Backup vault | | +| | Disk Restore Operator | Resource group where disks will be restored to | | +| Restoring a disk | Backup Operator | Backup vault | | +| | Disk Restore Operator | Resource group where disks will be restored to | In addition, the backup vault MSI should be given [these permissions](./disk-backup-faq.yml) | + +### Minimum role requirements for Azure blob backup + +| Management Operation | Minimum Azure role required | Scope Required | Alternative | +| --- | --- | --- | --- | +| Validate before configuring backup | Backup Operator | Backup vault | | +| | Storage account backup contributor | Storage account containing the blob | | +| Enable backup from backup vault | Backup Operator | Backup vault | | +| | Storage account backup contributor | Storage account containing the blob | In addition, the backup vault MSI should be given [these permissions](./blob-backup-configure-manage.md#grant-permissions-to-the-backup-vault-on-storage-accounts) | +| On demand backup of blob | Backup Operator | Backup vault | | +| Validate before restoring a blob | Backup Operator | Backup vault | | +| | Storage account backup contributor | Storage account containing the blob | | +| Restoring a blob | Backup Operator | Backup vault | | +| | Storage account backup contributor | Storage account containing the blob | In addition, the backup vault MSI should be given [these permissions](./blob-backup-configure-manage.md#grant-permissions-to-the-backup-vault-on-storage-accounts) | + +### Minimum role requirements for Azure database for PostGreSQL server backup + +| Management Operation | Minimum Azure role required | Scope Required | Alternative | +| --- | --- | --- | --- | +| Validate before configuring backup | Backup Operator | Backup vault | | +| | Reader | Azure PostGreSQL server | | +| Enable backup from backup vault | Backup Operator | Backup vault | | +| | Contributor | Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read In addition, the backup vault MSI should be given [these permissions](./backup-azure-database-postgresql-overview.md#set-of-permissions-needed-for-azure-postgresql-database-backup) | +| On demand backup of PostGreSQL server | Backup Operator | Backup vault | | +| Validate before restoring a server | Backup Operator | Backup vault | | +| | Contributor | Target Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read +| Restoring a server | Backup Operator | Backup vault | | +| | Contributor | Target Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read In addition, the backup vault MSI should be given [these permissions](./backup-azure-database-postgresql-overview.md#set-of-permissions-needed-for-azure-postgresql-database-restore) | + ## Next steps * [Azure role-based access control (Azure RBAC)](../role-based-access-control/role-assignments-portal.md): Get started with Azure RBAC in the Azure portal. @@ -111,4 +153,4 @@ The following table captures the Backup management actions and corresponding Azu * [PowerShell](../role-based-access-control/role-assignments-powershell.md) * [Azure CLI](../role-based-access-control/role-assignments-cli.md) * [REST API](../role-based-access-control/role-assignments-rest.md) -* [Azure role-based access control troubleshooting](../role-based-access-control/troubleshooting.md): Get suggestions for fixing common issues. +* [Azure role-based access control troubleshooting](../role-based-access-control/troubleshooting.md): Get suggestions for fixing common issues. \ No newline at end of file diff --git a/articles/backup/backup-sql-server-database-azure-vms.md b/articles/backup/backup-sql-server-database-azure-vms.md index b9444bf118046..c0a4d6e5a1c95 100644 --- a/articles/backup/backup-sql-server-database-azure-vms.md +++ b/articles/backup/backup-sql-server-database-azure-vms.md @@ -2,7 +2,7 @@ title: Back up multiple SQL Server VMs from the vault description: In this article, learn how to back up SQL Server databases on Azure virtual machines with Azure Backup from the Recovery Services vault ms.topic: conceptual -ms.date: 04/28/2022 +ms.date: 06/01/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -48,7 +48,7 @@ The following table lists the various alternatives you can use for establishing | Private endpoints | Allow backups over private IPs inside the virtual network

                  Provide granular control on the network and vault side | Incurs standard private endpoint [costs](https://azure.microsoft.com/pricing/details/private-link/) | | NSG service tags | Easier to manage as range changes are automatically merged

                  No additional costs | Can be used with NSGs only

                  Provides access to the entire service | | Azure Firewall FQDN tags | Easier to manage since the required FQDNs are automatically managed | Can be used with Azure Firewall only | -| Allow access to service FQDNs/IPs | No additional costs

                  Works with all network security appliances and firewalls | A broad set of IPs or FQDNs may be required to be accessed | +| Allow access to service FQDNs/IPs | No additional costs.

                  Works with all network security appliances and firewalls.

                  You can also use service endpoints for *Storage* and *Azure Active Directory*. However, for Azure Backup, you need to assign the access to the corresponding IPs/FQDNs. | A broad set of IPs or FQDNs may be required to be accessed. | | Use an HTTP proxy | Single point of internet access to VMs | Additional costs to run a VM with the proxy software | The following sections provide more details around using these options. diff --git a/articles/backup/backup-support-matrix-mars-agent.md b/articles/backup/backup-support-matrix-mars-agent.md index a0e03999b9c9d..16a3b6556e4e0 100644 --- a/articles/backup/backup-support-matrix-mars-agent.md +++ b/articles/backup/backup-support-matrix-mars-agent.md @@ -61,6 +61,7 @@ The operating systems must be 64 bit and should be running the latest services p **Operating system** | **Files/folders** | **System state** | **Software/Module requirements** --- | --- | --- | --- +Windows 11 (Enterprise, Pro, Home) | Yes | No | Check the corresponding server version for software/module requirements Windows 10 (Enterprise, Pro, Home) | Yes | No | Check the corresponding server version for software/module requirements Windows Server 2022 (Standard, Datacenter, Essentials) | Yes | Yes | Check the corresponding server version for software/module requirements Windows 8.1 (Enterprise, Pro)| Yes |No | Check the corresponding server version for software/module requirements diff --git a/articles/backup/media/backup-azure-manage-vms/azure-virtual-machine.png b/articles/backup/media/backup-azure-manage-vms/azure-virtual-machine-expanded.png similarity index 100% rename from articles/backup/media/backup-azure-manage-vms/azure-virtual-machine.png rename to articles/backup/media/backup-azure-manage-vms/azure-virtual-machine-expanded.png diff --git a/articles/backup/media/backup-azure-manage-vms/azure-virtual-machine-inline.png b/articles/backup/media/backup-azure-manage-vms/azure-virtual-machine-inline.png new file mode 100644 index 0000000000000..f296942289dd4 Binary files /dev/null and b/articles/backup/media/backup-azure-manage-vms/azure-virtual-machine-inline.png differ diff --git a/articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item.png b/articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item-expanded.png similarity index 100% rename from articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item.png rename to articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item-expanded.png diff --git a/articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item-inline.png b/articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item-inline.png new file mode 100644 index 0000000000000..3eb8d4e07b293 Binary files /dev/null and b/articles/backup/media/backup-azure-manage-vms/backup-items-blade-select-item-inline.png differ diff --git a/articles/backup/media/backup-azure-manage-vms/backup-policy-create-new.png b/articles/backup/media/backup-azure-manage-vms/backup-policy-create-new-expanded.png similarity index 100% rename from articles/backup/media/backup-azure-manage-vms/backup-policy-create-new.png rename to articles/backup/media/backup-azure-manage-vms/backup-policy-create-new-expanded.png diff --git a/articles/backup/media/backup-azure-manage-vms/backup-policy-create-new-inline.png b/articles/backup/media/backup-azure-manage-vms/backup-policy-create-new-inline.png new file mode 100644 index 0000000000000..26348085548a2 Binary files /dev/null and b/articles/backup/media/backup-azure-manage-vms/backup-policy-create-new-inline.png differ diff --git a/articles/backup/media/backup-azure-manage-vms/bottom-slider.png b/articles/backup/media/backup-azure-manage-vms/bottom-slider-expanded.png similarity index 100% rename from articles/backup/media/backup-azure-manage-vms/bottom-slider.png rename to articles/backup/media/backup-azure-manage-vms/bottom-slider-expanded.png diff --git a/articles/backup/media/backup-azure-manage-vms/bottom-slider-inline.png b/articles/backup/media/backup-azure-manage-vms/bottom-slider-inline.png new file mode 100644 index 0000000000000..fc2a45d80237e Binary files /dev/null and b/articles/backup/media/backup-azure-manage-vms/bottom-slider-inline.png differ diff --git a/articles/backup/media/backup-azure-manage-vms/full-view-rs-vault.png b/articles/backup/media/backup-azure-manage-vms/full-view-rs-vault-expanded.png similarity index 100% rename from articles/backup/media/backup-azure-manage-vms/full-view-rs-vault.png rename to articles/backup/media/backup-azure-manage-vms/full-view-rs-vault-expanded.png diff --git a/articles/backup/media/backup-azure-manage-vms/full-view-rs-vault-inline.png b/articles/backup/media/backup-azure-manage-vms/full-view-rs-vault-inline.png new file mode 100644 index 0000000000000..507dfc494f9b6 Binary files /dev/null and b/articles/backup/media/backup-azure-manage-vms/full-view-rs-vault-inline.png differ diff --git a/articles/backup/media/backup-azure-manage-vms/item-dashboard-settings.png b/articles/backup/media/backup-azure-manage-vms/item-dashboard-settings-expanded.png similarity index 100% rename from articles/backup/media/backup-azure-manage-vms/item-dashboard-settings.png rename to articles/backup/media/backup-azure-manage-vms/item-dashboard-settings-expanded.png diff --git a/articles/backup/media/backup-azure-manage-vms/item-dashboard-settings-inline.png b/articles/backup/media/backup-azure-manage-vms/item-dashboard-settings-inline.png new file mode 100644 index 0000000000000..d35027409558b Binary files /dev/null and b/articles/backup/media/backup-azure-manage-vms/item-dashboard-settings-inline.png differ diff --git a/articles/backup/multi-user-authorization-concept.md b/articles/backup/multi-user-authorization-concept.md index 870409f6fec4e..9d1f40bc3251c 100644 --- a/articles/backup/multi-user-authorization-concept.md +++ b/articles/backup/multi-user-authorization-concept.md @@ -2,7 +2,7 @@ title: Multi-user authorization using Resource Guard description: An overview of Multi-user authorization using Resource Guard. ms.topic: conceptual -ms.date: 05/05/2022 +ms.date: 06/08/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -26,9 +26,9 @@ The following table lists the operations defined as critical operations and can --- | --- Disable soft delete | Mandatory Disable MUA protection | Mandatory -Modify backup policy | Optional: Can be excluded -Modify protection | Optional: Can be excluded -Stop protection | Optional: Can be excluded +Modify backup policy (reduced retention) | Optional: Can be excluded +Modify protection (reduced retention) | Optional: Can be excluded +Stop protection with delete data | Optional: Can be excluded Change MARS security PIN | Optional: Can be excluded ### Concepts and process diff --git a/articles/backup/whats-new.md b/articles/backup/whats-new.md index d4db278c79aef..88b72f97835a5 100644 --- a/articles/backup/whats-new.md +++ b/articles/backup/whats-new.md @@ -2,7 +2,7 @@ title: What's new in Azure Backup description: Learn about new features in Azure Backup. ms.topic: conceptual -ms.date: 05/24/2022 +ms.date: 05/26/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -17,7 +17,6 @@ You can learn more about the new releases by bookmarking this page or by [subscr ## Updates summary - May 2022 - - [Multi-user authorization using Resource Guard is now generally available](#multi-user-authorization-using-resource-guard-is-now-generally-available) - [Archive tier support for Azure Virtual Machines is now generally available](#archive-tier-support-for-azure-virtual-machines-is-now-generally-available) - February 2022 - [Multiple backups per day for Azure Files is now generally available](#multiple-backups-per-day-for-azure-files-is-now-generally-available) @@ -41,12 +40,6 @@ You can learn more about the new releases by bookmarking this page or by [subscr - February 2021 - [Backup for Azure Blobs (in preview)](#backup-for-azure-blobs-in-preview) -## Multi-user authorization using Resource Guard is now generally available - -Azure Backup now supports multi-user authorization (MUA) that allows you to add an additional layer of protection to critical operations on your Recovery Services vaults. For MUA, Azure Backup uses the Azure resource, Resource Guard, to ensure critical operations are performed only with applicable authorization. - -For more information, see [how to protect Recovery Services vault and manage critical operations with MUA](multi-user-authorization.md). - ## Archive tier support for Azure Virtual Machines is now generally available Azure Backup now supports the movement of recovery points to the Vault-archive tier for Azure Virtual Machines from the Azure portal. This allows you to move the archivable/recommended recovery points (corresponding to a backup item) to the Vault-archive tier at one go. diff --git a/articles/baremetal-infrastructure/concepts-baremetal-infrastructure-overview.md b/articles/baremetal-infrastructure/concepts-baremetal-infrastructure-overview.md index d1641dc113c34..3ba702e0120a4 100644 --- a/articles/baremetal-infrastructure/concepts-baremetal-infrastructure-overview.md +++ b/articles/baremetal-infrastructure/concepts-baremetal-infrastructure-overview.md @@ -32,7 +32,7 @@ BareMetal Infrastructure offers these benefits: - Certified hardware for specialized workloads - SAP (Refer to [SAP Note #1928533](https://launchpad.support.sap.com/#/notes/1928533). You'll need an SAP account for access.) - - Oracle (Refer to [Oracle document ID #948372.1](https://support.oracle.com/epmos/faces/DocumentDisplay?_afrLoop=52088246571495&id=948372.1&_adf.ctrl-state=kwnkj1hzm_52). You'll need an Oracle account for access.) + - Oracle (You'll need an Oracle account for access.) - Non-hypervised BareMetal instance, single tenant ownership - Low latency between Azure hosted application VMs to BareMetal instances (0.35 ms) - All Flash SSD and NVMe diff --git a/articles/bastion/bastion-connect-vm-scale-set.md b/articles/bastion/bastion-connect-vm-scale-set.md index 870d2c2da1b74..65b5da4c4bf65 100644 --- a/articles/bastion/bastion-connect-vm-scale-set.md +++ b/articles/bastion/bastion-connect-vm-scale-set.md @@ -1,39 +1,43 @@ --- -title: 'Connect to a Windows virtual machine scale set using Azure Bastion' +title: 'Connect to a virtual machine scale set using Azure Bastion' description: Learn how to connect to an Azure virtual machine scale set using Azure Bastion. -services: bastion author: cherylmc - ms.service: bastion ms.topic: how-to -ms.date: 09/20/2021 +ms.date: 05/24/2022 ms.author: cherylmc --- # Connect to a virtual machine scale set using Azure Bastion -This article shows you how to securely and seamlessly RDP to your Windows virtual machine scale set instance in an Azure virtual network using Azure Bastion. You can connect to a virtual machine scale set instance directly from the Azure portal. When using Azure Bastion, VMs don't require a client, agent, or additional software. For more information about Azure Bastion, see the [Overview](bastion-overview.md). +This article shows you how to securely and seamlessly connect to your virtual machine scale set instance in an Azure virtual network directly from the Azure portal using Azure Bastion. When you use Azure Bastion, VMs don't require a client, agent, or additional software. For more information about Azure Bastion, see the [Overview](bastion-overview.md). For more information about virtual machine scale sets, see [What are virtual machine scale sets?](../virtual-machine-scale-sets/overview.md) ## Prerequisites -Make sure that you have set up an Azure Bastion host for the virtual network in which the virtual machine scale set resides. For more information, see [Create an Azure Bastion host](./tutorial-create-host-portal.md). Once the Bastion service is provisioned and deployed in your virtual network, you can use it to connect to a virtual machine scale set instance in this virtual network. Bastion assumes that you are using RDP to connect to a Windows virtual machine scale set, and SSH to connect to your Linux virtual machine scale set. For information about connection to a Linux VM, see [Connect to a VM - Linux](bastion-connect-vm-ssh-linux.md). +Make sure that you have set up an Azure Bastion host for the virtual network in which the virtual machine scale set resides. For more information, see [Create an Azure Bastion host](tutorial-create-host-portal.md). Once the Bastion service is provisioned and deployed in your virtual network, you can use it to connect to a virtual machine scale set instance in this virtual network. + +## Connect -## Connect using RDP +This section shows you the basic steps to connect to your virtual machine scale set. 1. Open the [Azure portal](https://portal.azure.com). Go to the virtual machine scale set that you want to connect to. - ![navigate](./media/bastion-connect-vm-scale-set/1.png) -2. Go to the virtual machine scale set instance that you want to connect to, then select **Connect**. When using an RDP connection, the virtual machine scale set should be a Windows virtual machine scale set. + :::image type="content" source="./media/bastion-connect-vm-scale-set/select-scale-set.png" alt-text="Screenshot shows virtual machine scale sets." lightbox="./media/bastion-connect-vm-scale-set/select-scale-set.png"::: + +1. Go to the virtual machine scale set instance that you want to connect to. + + :::image type="content" source="./media/bastion-connect-vm-scale-set/select-instance.png" alt-text="Screenshot shows virtual machine scale set instances." lightbox="./media/bastion-connect-vm-scale-set/select-instance.png"::: + +1. Select **Connect** at the top of the page, then choose **Bastion** from the dropdown. + + :::image type="content" source="./media/bastion-connect-vm-scale-set/select-connect.png" alt-text="Screenshot shows select the connect button and choose Bastion from the dropdown." lightbox="./media/bastion-connect-vm-scale-set/select-connect.png"::: - ![virtual machine scale set](./media/bastion-connect-vm-scale-set/2.png) -3. After you select **Connect**, a side bar appears that has three tabs – RDP, SSH, and Bastion. Select the **Bastion** tab from the side bar. If you didn't provision Bastion for the virtual network, you can select the link to configure Bastion. For configuration instructions, see [Configure Bastion](./tutorial-create-host-portal.md). +1. On the **Bastion** page, fill in the required settings. The settings you can select depend on the virtual machine to which you're connecting, and the [Bastion SKU](configuration-settings.md#skus) tier that you're using. The Standard SKU gives you more connection options than the Basic SKU. For more information about settings, see [Bastion configuration settings](configuration-settings.md). - ![Bastion tab](./media/bastion-connect-vm-scale-set/3.png) -4. On the Bastion tab, enter the username and password for your virtual machine scale set, then select **Connect**. + :::image type="content" source="./media/bastion-connect-vm-scale-set/connection-settings.png" alt-text="Screenshot shows connection settings options with Open in new browser tab selected." lightbox="./media/bastion-connect-vm-scale-set/connection-settings.png"::: - ![connect](./media/bastion-connect-vm-scale-set/4.png) -5. The RDP connection to this virtual machine via Bastion will open directly in the Azure portal (over HTML5) using port 443 and the Bastion service. +1. After filling in the values on the Bastion page, select **Connect** to connect to the instance. ## Next steps diff --git a/articles/bastion/connect-ip-address.md b/articles/bastion/connect-ip-address.md index 7642b5e3d39de..5048eea466246 100644 --- a/articles/bastion/connect-ip-address.md +++ b/articles/bastion/connect-ip-address.md @@ -53,7 +53,7 @@ Before you begin these steps, verify that you have the following environment set 1. To connect to a VM using a specified private IP address, you make the connection from Bastion to the VM, not directly from the VM page. On your Bastion page, select **Connect** to open the Connect page. -1. On the Bastion **Connect** page, for **Hostname**, enter the private IP address of the target VM. +1. On the Bastion **Connect** page, for **IP address**, enter the private IP address of the target VM. :::image type="content" source="./media/connect-ip-address/ip-address.png" alt-text="Screenshot of the Connect using Azure Bastion page." lightbox="./media/connect-ip-address/ip-address.png"::: diff --git a/articles/bastion/connect-native-client-windows.md b/articles/bastion/connect-native-client-windows.md index 0f628c443b13a..a47f638d5114d 100644 --- a/articles/bastion/connect-native-client-windows.md +++ b/articles/bastion/connect-native-client-windows.md @@ -159,7 +159,7 @@ Use the example that corresponds to the type of target VM to which you want to c If you’re signing in to an Azure AD login-enabled VM, use the following command. For more information, see [Azure Linux VMs and Azure AD](../active-directory/devices/howto-vm-sign-in-azure-ad-linux.md). ```azurecli - az network bastion ssh --name "" --resource-group "" --target-resource-id "" --auth-type "AAD" + az network bastion ssh --name "" --resource-group "" --target-resource-id "" --auth-type "AAD" ``` **SSH:** @@ -167,7 +167,7 @@ Use the example that corresponds to the type of target VM to which you want to c The extension can be installed by running, ```az extension add --name ssh```. To sign in using an SSH key pair, use the following example. ```azurecli - az network bastion ssh --name "" --resource-group "" --target-resource-id "" --auth-type "ssh-key" --username "" --ssh-key "" + az network bastion ssh --name "" --resource-group "" --target-resource-id "" --auth-type "ssh-key" --username "" --ssh-key "" ``` **Username/password:** @@ -175,7 +175,7 @@ Use the example that corresponds to the type of target VM to which you want to c If you’re signing in using a local username and password, use the following command. You’ll then be prompted for the password for the target VM. ```azurecli - az network bastion ssh --name "" --resource-group "" --target-resource-id "" --auth-type "password" --username "" + az network bastion ssh --name "" --resource-group "" --target-resource-id "" --auth-type "password" --username "" ``` 1. Once you sign in to your target VM, the native client on your computer will open up with your VM session; **MSTSC** for RDP sessions, and **SSH CLI extension (az ssh)** for SSH sessions. @@ -197,7 +197,7 @@ This connection supports file upload from the local computer to the target VM. F 1. Open the tunnel to your target VM using the following command. ```azurecli - az network bastion tunnel --name "" --resource-group "" --target-resource-id "" --resource-port "" --port "" + az network bastion tunnel --name "" --resource-group "" --target-resource-id "" --resource-port "" --port "" ``` 1. Connect to your target VM using SSH or RDP, the native client of your choice, and the local machine port you specified in Step 2. diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/1.png b/articles/bastion/media/bastion-connect-vm-scale-set/1.png deleted file mode 100644 index cd96ea25cd274..0000000000000 Binary files a/articles/bastion/media/bastion-connect-vm-scale-set/1.png and /dev/null differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/2.png b/articles/bastion/media/bastion-connect-vm-scale-set/2.png deleted file mode 100644 index 28d2e4dac1dca..0000000000000 Binary files a/articles/bastion/media/bastion-connect-vm-scale-set/2.png and /dev/null differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/3.png b/articles/bastion/media/bastion-connect-vm-scale-set/3.png deleted file mode 100644 index af538376a1db4..0000000000000 Binary files a/articles/bastion/media/bastion-connect-vm-scale-set/3.png and /dev/null differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/4.png b/articles/bastion/media/bastion-connect-vm-scale-set/4.png deleted file mode 100644 index b069d16af085c..0000000000000 Binary files a/articles/bastion/media/bastion-connect-vm-scale-set/4.png and /dev/null differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/connection-settings.png b/articles/bastion/media/bastion-connect-vm-scale-set/connection-settings.png new file mode 100644 index 0000000000000..60664a3a26e35 Binary files /dev/null and b/articles/bastion/media/bastion-connect-vm-scale-set/connection-settings.png differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/select-connect.png b/articles/bastion/media/bastion-connect-vm-scale-set/select-connect.png new file mode 100644 index 0000000000000..0f24e67001f13 Binary files /dev/null and b/articles/bastion/media/bastion-connect-vm-scale-set/select-connect.png differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/select-instance.png b/articles/bastion/media/bastion-connect-vm-scale-set/select-instance.png new file mode 100644 index 0000000000000..4112a1ac640d4 Binary files /dev/null and b/articles/bastion/media/bastion-connect-vm-scale-set/select-instance.png differ diff --git a/articles/bastion/media/bastion-connect-vm-scale-set/select-scale-set.png b/articles/bastion/media/bastion-connect-vm-scale-set/select-scale-set.png new file mode 100644 index 0000000000000..6aefe75cf730e Binary files /dev/null and b/articles/bastion/media/bastion-connect-vm-scale-set/select-scale-set.png differ diff --git a/articles/bastion/media/quickstart-host-portal/connect-vm.png b/articles/bastion/media/quickstart-host-portal/connect-vm.png index 7e6a9aab924cb..bc3e6ddbfdcad 100644 Binary files a/articles/bastion/media/quickstart-host-portal/connect-vm.png and b/articles/bastion/media/quickstart-host-portal/connect-vm.png differ diff --git a/articles/bastion/media/quickstart-host-portal/creating-bastion.png b/articles/bastion/media/quickstart-host-portal/creating-bastion.png index 332dd169ad901..a71a625b9fb7d 100644 Binary files a/articles/bastion/media/quickstart-host-portal/creating-bastion.png and b/articles/bastion/media/quickstart-host-portal/creating-bastion.png differ diff --git a/articles/bastion/media/quickstart-host-portal/deploy-bastion.png b/articles/bastion/media/quickstart-host-portal/deploy-bastion.png index 87bb9a8e1e9fa..0c5c2f3416dff 100644 Binary files a/articles/bastion/media/quickstart-host-portal/deploy-bastion.png and b/articles/bastion/media/quickstart-host-portal/deploy-bastion.png differ diff --git a/articles/bastion/quickstart-host-portal.md b/articles/bastion/quickstart-host-portal.md index 768ae3837599f..e3a989f30fdc1 100644 --- a/articles/bastion/quickstart-host-portal.md +++ b/articles/bastion/quickstart-host-portal.md @@ -6,7 +6,7 @@ services: bastion author: cherylmc ms.service: bastion ms.topic: quickstart -ms.date: 02/25/2022 +ms.date: 06/05/2022 ms.author: cherylmc ms.custom: ignite-fall-2021, mode-other #Customer intent: As someone with a networking background, I want to connect to a virtual machine securely via RDP/SSH using a private IP address through my browser. @@ -81,7 +81,7 @@ In this quickstart, you deploy Bastion from your virtual machine settings in the 1. Sign in to the [Azure portal](https://portal.azure.com). 1. In the portal, go to the VM to which you want to connect. The values from the virtual network in which this VM resides will be used to create the Bastion deployment. -1. Select **Bastion** in the left menu. You can view some of the values that will be used when creating the bastion host for your virtual network. Select **Deploy Bastion**. +1. Select **Bastion** in the left menu. You can view some of the values that will be used when creating the bastion host for your virtual network. Select **Create Azure Bastion using defaults**. :::image type="content" source="./media/quickstart-host-portal/deploy-bastion.png" alt-text="Screenshot of Deploy Bastion." lightbox="./media/quickstart-host-portal/deploy-bastion.png"::: 1. Bastion begins deploying. This can take around 10 minutes to complete. diff --git a/articles/batch/TOC.yml b/articles/batch/TOC.yml index 613b50e4e6d9a..d9f7878445057 100644 --- a/articles/batch/TOC.yml +++ b/articles/batch/TOC.yml @@ -84,7 +84,7 @@ displayName: compute node, application package, scaling, schedule, os, configuration href: nodes-and-pools.md - name: Jobs and tasks - displayName: Batch job, start task, environment + displayName: Batch job, start task, environment href: jobs-and-tasks.md - name: Files and directories displayName: Batch file, working directory @@ -113,8 +113,12 @@ href: batch-management-dotnet.md - name: Get cost analysis and set budgets href: budget.md + - name: Configure public network access with Batch accounts + href: public-network-access.md - name: Use private endpoints with Batch accounts href: private-connectivity.md + - name: Manage private endpoint connections with Batch accounts + href: manage-private-endpoint-connections.md - name: Configure customer-managed keys href: batch-customer-managed-key.md - name: Move between regions @@ -170,6 +174,9 @@ - name: Create a pool without public IP addresses displayName: private href: batch-pool-no-public-ip-address.md + - name: Create a simplified node communication pool without public IP addresses + displayName: private + href: simplified-node-communication-pool-no-public-ip.md - name: Create a pool with ephemeral OS disk nodes href: create-pool-ephemeral-os-disk.md - name: Use extensions with pools @@ -253,7 +260,7 @@ href: batch-js-get-started.md - name: Run workloads items: - - name: MPI + - name: MPI displayName: multi-instance, message passing interface href: batch-mpi.md - name: Container workloads diff --git a/articles/batch/batch-account-create-portal.md b/articles/batch/batch-account-create-portal.md index c6bf4f7e42600..2936d2e732682 100644 --- a/articles/batch/batch-account-create-portal.md +++ b/articles/batch/batch-account-create-portal.md @@ -39,7 +39,11 @@ For background about Batch accounts and scenarios, see [Batch service workflow a :::image type="content" source="media/batch-account-create-portal/storage_account.png" alt-text="Screenshot of the options when creating a storage account."::: -1. If desired, select **Advanced** to specify **Identity type**, **Public network access** or **Pool allocation mode**. For most scenarios, the default options are fine. +1. If desired, select **Advanced** to specify **Identity type**, **Pool allocation mode** or **Authentication mode**. For most scenarios, the default options are fine. + +1. If desired, select **Networking** to configure [public network access](public-network-access.md) with your Batch account. + + :::image type="content" source="media/batch-account-create-portal/batch-account-networking.png" alt-text="Screenshot of the networking options when creating a Batch account."::: 1. Select **Review + create**, then select **Create** to create the account. @@ -50,7 +54,7 @@ Once the account has been created, select the account to access its settings and > [!NOTE] > The name of the Batch account is its ID and can't be changed. If you need to change the name of a Batch account, you'll need to delete the account and create a new one with the intended name. -:::image type="content" source="media/batch-account-create-portal/batch_blade.png" alt-text="Screenshot of the Batch account page in the Azure portal."::: +:::image type="content" source="media/batch-account-create-portal/batch-blade.png" alt-text="Screenshot of the Batch account page in the Azure portal."::: When you develop an application with the [Batch APIs](batch-apis-tools.md#azure-accounts-for-batch-development), you need an account URL and key to access your Batch resources. (Batch also supports Azure Active Directory authentication.) To view the Batch account access information, select **Keys**. @@ -127,7 +131,7 @@ Make sure to set the following parameters based on your Batch pool's configurati For example: ```powershell -Get-AzMarketplaceTerms -Publisher 'microsoft-azure-batch' -Product 'ubuntu-server-container' -Name '20-04-lts' | Set-AzMarketplaceTerms -Accept +Get-AzMarketplaceTerms -Publisher 'microsoft-azure-batch' -Product 'ubuntu-server-container' -Name '20-04-lts' | Set-AzMarketplaceTerms -Accept ``` diff --git a/articles/batch/batch-cli-templates.md b/articles/batch/batch-cli-templates.md index 645a07b92e2be..0dcff7bec60db 100644 --- a/articles/batch/batch-cli-templates.md +++ b/articles/batch/batch-cli-templates.md @@ -89,10 +89,10 @@ The following is an example of a template that creates a pool of Linux VMs with "imageReference": { "publisher": "Canonical", "offer": "UbuntuServer", - "sku": "16.04-LTS", + "sku": "18.04-LTS", "version": "latest" }, - "nodeAgentSKUId": "batch.node.ubuntu 16.04" + "nodeAgentSKUId": "batch.node.ubuntu 18.04" }, "vmSize": "STANDARD_D3_V2", "targetDedicatedNodes": "[parameters('nodeCount')]", diff --git a/articles/batch/batch-parallel-node-tasks.md b/articles/batch/batch-parallel-node-tasks.md index 5b8725d80f0fa..6a88608e8e3b2 100644 --- a/articles/batch/batch-parallel-node-tasks.md +++ b/articles/batch/batch-parallel-node-tasks.md @@ -145,7 +145,7 @@ For more information on adding pools by using the REST API, see [Add a pool to a "offer": "ubuntuserver", "sku": "18.04-lts" }, - "nodeAgentSKUId": "batch.node.ubuntu 16.04" + "nodeAgentSKUId": "batch.node.ubuntu 18.04" }, "targetDedicatedComputeNodes":2, "taskSlotsPerNode":4, diff --git a/articles/batch/batch-pool-no-public-ip-address.md b/articles/batch/batch-pool-no-public-ip-address.md index 8ae990eb670e7..cae2a24133c63 100644 --- a/articles/batch/batch-pool-no-public-ip-address.md +++ b/articles/batch/batch-pool-no-public-ip-address.md @@ -6,16 +6,17 @@ ms.date: 01/11/2022 ms.custom: references_regions --- -# Create an Azure Batch pool without public IP addresses (preview) +# Create a Batch pool without public IP addresses (preview) > [!IMPORTANT] -> Support for pools without public IP addresses in Azure Batch is currently in public preview for the following regions: France Central, East Asia, West Central US, South Central US, West US 2, East US, North Europe, East US 2, Central US, West Europe, North Central US, West US, Australia East, Japan East, Japan West. -> This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. -> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). +> - Support for pools without public IP addresses in Azure Batch is currently in public preview for the following regions: France Central, East Asia, West Central US, South Central US, West US 2, East US, North Europe, East US 2, Central US, West Europe, North Central US, West US, Australia East, Japan East, Japan West. +> - This preview version will be replaced by [Simplified node communication pool without public IP addresses](simplified-node-communication-pool-no-public-ip.md). +> - This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> - For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). When you create an Azure Batch pool, you can provision the virtual machine configuration pool without a public IP address. This article explains how to set up a Batch pool without public IP addresses. -## Why use a pool without public IP Addresses? +## Why use a pool without public IP addresses? By default, all the compute nodes in an Azure Batch virtual machine configuration pool are assigned a public IP address. This address is used by the Batch service to schedule tasks and for communication with compute nodes, including outbound access to the internet. @@ -59,7 +60,7 @@ To restrict access to these nodes and reduce the discoverability of these nodes ## Use the Batch REST API to create a pool without public IP addresses -The example below shows how to use the [Azure Batch REST API](/rest/api/batchservice/pool/add) to create a pool that uses public IP addresses. +The example below shows how to use the [Batch Service REST API](/rest/api/batchservice/pool/add) to create a pool that uses public IP addresses. ### REST API URI @@ -78,9 +79,9 @@ client-request-id: 00000000-0000-0000-0000-000000000000 "imageReference": { "publisher": "Canonical", "offer": "UbuntuServer", - "sku": "16.040-LTS" + "sku": "18.04-lts" }, - "nodeAgentSKUId": "batch.node.ubuntu 16.04" + "nodeAgentSKUId": "batch.node.ubuntu 18.04" } "networkConfiguration": { "subnetId": "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", @@ -98,11 +99,11 @@ client-request-id: 00000000-0000-0000-0000-000000000000 "enableAutoScale": false, "enableInterNodeCommunication": true, "metadata": [ - { - "name": "myproperty", - "value": "myvalue" - } - ] + { + "name": "myproperty", + "value": "myvalue" + } + ] } ``` diff --git a/articles/batch/create-pool-availability-zones.md b/articles/batch/create-pool-availability-zones.md index b0d7f1c669299..21fa9483461ae 100644 --- a/articles/batch/create-pool-availability-zones.md +++ b/articles/batch/create-pool-availability-zones.md @@ -58,12 +58,12 @@ Request body "imageReference": { "publisher": "Canonical", "offer": "UbuntuServer", - "sku": "16.040-LTS" + "sku": "18.04-lts" }, "nodePlacementConfiguration": { "policy": "Zonal" } - "nodeAgentSKUId": "batch.node.ubuntu 16.04" + "nodeAgentSKUId": "batch.node.ubuntu 18.04" }, "resizeTimeout": "PT15M", "targetDedicatedNodes": 5, @@ -78,4 +78,4 @@ Request body - Learn about the [Batch service workflow and primary resources](batch-service-workflow-features.md) such as pools, nodes, jobs, and tasks. - Learn about [creating a pool in a subnet of an Azure virtual network](batch-virtual-network.md). -- Learn about [creating an Azure Batch pool without public IP addresses](./batch-pool-no-public-ip-address.md). +- Learn about [creating an Azure Batch pool without public IP addresses](./simplified-node-communication-pool-no-public-ip.md). diff --git a/articles/batch/create-pool-ephemeral-os-disk.md b/articles/batch/create-pool-ephemeral-os-disk.md index 531c3d01a2d1a..dbbfa7f7871f7 100644 --- a/articles/batch/create-pool-ephemeral-os-disk.md +++ b/articles/batch/create-pool-ephemeral-os-disk.md @@ -26,7 +26,7 @@ For Batch workloads, the main benefits of using ephemeral OS disks are reduced c To determine whether a VM series supports ephemeral OS disks, check the documentation for each VM instance. For example, the [Ddv4 and Ddsv4-series](../virtual-machines/ddv4-ddsv4-series.md) supports ephemeral OS disks. -Alternately, you can programmatically query to check the 'EphemeralOSDiskSupported' capability. An example PowerShell cmdlet to query this capability is provided in the [ephemeral OS disk frequently asked questions](../virtual-machines/ephemeral-os-disks.md#frequently-asked-questions). +Alternately, you can programmatically query to check the 'EphemeralOSDiskSupported' capability. An example PowerShell cmdlet to query this capability is provided in the [ephemeral OS disk frequently asked questions](../virtual-machines/ephemeral-os-disks-faq.md). ## Create a pool that uses ephemeral OS disks diff --git a/articles/batch/create-pool-public-ip.md b/articles/batch/create-pool-public-ip.md index 5817d9a05837f..28551d33a0861 100644 --- a/articles/batch/create-pool-public-ip.md +++ b/articles/batch/create-pool-public-ip.md @@ -7,11 +7,11 @@ ms.date: 12/20/2021 # Create an Azure Batch pool with specified public IP addresses -In Azure Batch, you can [create a Batch pool in a subnet of an Azure virtual network (VNet)](batch-virtual-network.md). Virtual machines (VMs) in the Batch pool are accessible through public IP addresses that Batch creates. These public IP addresses can change over the lifetime of the pool. If the IP addresses aren't refreshed, your network settings might become outdated. +In Azure Batch, you can [create a Batch pool in a subnet of an Azure virtual network (VNet)](batch-virtual-network.md). Virtual machines (VMs) in the Batch pool are accessible through public IP addresses that Batch creates. These public IP addresses can change over the lifetime of the pool. If the IP addresses aren't refreshed, your network settings might become outdated. -You can create a list of static public IP addresses to use with the VMs in your pool instead. In some cases, you might need to control the list of public IP addresses to make sure they don't change unexpectedly. For example, you might be working with an external service, such as a database, which restricts access to specific IP addresses. +You can create a list of static public IP addresses to use with the VMs in your pool instead. In some cases, you might need to control the list of public IP addresses to make sure they don't change unexpectedly. For example, you might be working with an external service, such as a database, which restricts access to specific IP addresses. -For information about creating pools without public IP addresses, read [Create an Azure Batch pool without public IP addresses](./batch-pool-no-public-ip-address.md). +For information about creating pools without public IP addresses, read [Create an Azure Batch pool without public IP addresses](./simplified-node-communication-pool-no-public-ip.md). ## Prerequisites @@ -27,7 +27,7 @@ For information about creating pools without public IP addresses, read [Create a Create one or more public IP addresses through one of these methods: - Use the [Azure portal](../virtual-network/ip-services/virtual-network-public-ip-address.md#create-a-public-ip-address) - Use the [Azure Command-Line Interface (Azure CLI)](/cli/azure/network/public-ip#az-network-public-ip-create) -- Use [Azure PowerShell](/powershell/module/az.network/new-azpublicipaddress). +- Use [Azure PowerShell](/powershell/module/az.network/new-azpublicipaddress). Make sure your public IP addresses meet the following requirements: @@ -35,11 +35,11 @@ Make sure your public IP addresses meet the following requirements: - Set the **IP address assignment** to **Static**. - Set the **SKU** to **Standard**. - Specify a DNS name. -- Make sure no other resources use these public IP addresses, or the pool might experience allocation failures. Only use these public IP addresses for the VM configuration pools. +- Make sure no other resources use these public IP addresses, or the pool might experience allocation failures. Only use these public IP addresses for the VM configuration pools. - Make sure that no security policies or resource locks restrict user access to the public IP address. -- Create enough public IP addresses for the pool to accommodate the number of target VMs. - - This number must equal at least the sum of the **targetDedicatedNodes** and **targetLowPriorityNodes** properties of the pool. - - If you don't create enough IP addresses, the pool partially allocates the compute nodes, and a resize error happens. +- Create enough public IP addresses for the pool to accommodate the number of target VMs. + - This number must equal at least the sum of the **targetDedicatedNodes** and **targetLowPriorityNodes** properties of the pool. + - If you don't create enough IP addresses, the pool partially allocates the compute nodes, and a resize error happens. - Currently, Batch uses one public IP address for every 100 VMs. - Also create a buffer of public IP addresses. A buffer helps Batch with internal optimization for scaling down. A buffer also allows quicker scaling up after an unsuccessful scale up or scale down. We recommend adding one of the following amounts of buffer IP addresses; choose whichever number is greater. - Add at least one more IP address. @@ -69,9 +69,9 @@ Request body: "imageReference": { "publisher": "Canonical", "offer": "UbuntuServer", - "sku": "16.04.0-LTS" + "sku": "18.04-LTS" }, - "nodeAgentSKUId": "batch.node.ubuntu 16.04" + "nodeAgentSKUId": "batch.node.ubuntu 18.04" }, "networkConfiguration": { "subnetId": "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", diff --git a/articles/batch/manage-private-endpoint-connections.md b/articles/batch/manage-private-endpoint-connections.md new file mode 100644 index 0000000000000..6dff863f0ef43 --- /dev/null +++ b/articles/batch/manage-private-endpoint-connections.md @@ -0,0 +1,70 @@ +--- +title: Manage private endpoint connections with Azure Batch accounts +description: Learn how to manage private endpoint connections with Azure Batch accounts, including list, approve, reject and remove. +ms.topic: how-to +ms.date: 05/26/2022 +--- + +# Manage private endpoint connections with Azure Batch accounts + +You can query and manage all existing private endpoint connections for your Batch account. Supported management operations include: + +- Approve a pending connection. +- Reject a connection (either in pending or approved state). +- Remove a connection, which will remove the connection from Batch account and mark the associated private endpoint resource as Disconnected state. + +## Azure portal + +1. Go to your Batch account in Azure portal. +1. In **Settings**, select **Networking** and go to tab **Private Access**. +1. Select the private connection, then perform the Approve/Reject/Remove operation. + + :::image type="content" source="media/private-connectivity/manage-private-connections.png" alt-text="Screenshot of managing private endpoint connections."::: + +## Az PowerShell module + +Examples using Az PowerShell module [`Az.Network`](/powershell/module/az.network#networking): + +```PowerShell +$accountResourceId = "/subscriptions//resourceGroups//providers/Microsoft.Batch/batchAccounts/" +$pecResourceId = "$accountResourceId/privateEndpointConnections/" + +# List all private endpoint connections for Batch account +Get-AzPrivateEndpointConnection -PrivateLinkResourceId $accountResourceId + +# Show the specified private endpoint connection +Get-AzPrivateEndpointConnection -ResourceId $pecResourceId + +# Approve connection +Approve-AzPrivateEndpointConnection -Description "Approved!" -ResourceId $pecResourceId + +# Reject connection +Deny-AzPrivateEndpointConnection -Description "Rejected!" -ResourceId $pecResourceId + +# Remove connection +Remove-AzPrivateEndpointConnection -ResourceId $pecResourceId +``` + +## Azure CLI + +Examples using Azure CLI ([`az network private-endpoint`](/cli/azure/network/private-endpoint)): + +```sh +accountResourceId="/subscriptions//resourceGroups//providers/Microsoft.Batch/batchAccounts/" +pecResourceId="$accountResourceId/privateEndpointConnections/" + +# List all private endpoint connections for Batch account +az network private-endpoint-connection list --id $accountResourceId + +# Show the specified private endpoint connection +az network private-endpoint-connection show --id $pecResourceId + +# Approve connection +az network private-endpoint-connection approve --description "Approved!" --id $pecResourceId + +# Reject connection +az network private-endpoint-connection reject --description "Rejected!" --id $pecResourceId + +# Remove connection +az network private-endpoint-connection delete --id $pecResourceId +``` diff --git a/articles/batch/media/batch-account-create-portal/batch-account-keys.png b/articles/batch/media/batch-account-create-portal/batch-account-keys.png index d0f7ccb082fd1..da757fe2af7eb 100644 Binary files a/articles/batch/media/batch-account-create-portal/batch-account-keys.png and b/articles/batch/media/batch-account-create-portal/batch-account-keys.png differ diff --git a/articles/batch/media/batch-account-create-portal/batch-account-networking.png b/articles/batch/media/batch-account-create-portal/batch-account-networking.png new file mode 100644 index 0000000000000..37a3a34e5bc6a Binary files /dev/null and b/articles/batch/media/batch-account-create-portal/batch-account-networking.png differ diff --git a/articles/batch/media/batch-account-create-portal/batch-account-portal.png b/articles/batch/media/batch-account-create-portal/batch-account-portal.png index cae6cb6d39621..d8a810e1e300b 100644 Binary files a/articles/batch/media/batch-account-create-portal/batch-account-portal.png and b/articles/batch/media/batch-account-create-portal/batch-account-portal.png differ diff --git a/articles/batch/media/batch-account-create-portal/batch-blade.png b/articles/batch/media/batch-account-create-portal/batch-blade.png new file mode 100644 index 0000000000000..6f8674861d9d3 Binary files /dev/null and b/articles/batch/media/batch-account-create-portal/batch-blade.png differ diff --git a/articles/batch/media/batch-account-create-portal/batch_blade.png b/articles/batch/media/batch-account-create-portal/batch_blade.png deleted file mode 100644 index a3d6e05e22b5b..0000000000000 Binary files a/articles/batch/media/batch-account-create-portal/batch_blade.png and /dev/null differ diff --git a/articles/batch/media/private-connectivity/access-private.png b/articles/batch/media/private-connectivity/access-private.png index dd3e9bf98d31e..b9f83b3be430d 100644 Binary files a/articles/batch/media/private-connectivity/access-private.png and b/articles/batch/media/private-connectivity/access-private.png differ diff --git a/articles/batch/media/private-connectivity/create-private-endpoint-basics.png b/articles/batch/media/private-connectivity/create-private-endpoint-basics.png new file mode 100644 index 0000000000000..f0d498eca0fcd Binary files /dev/null and b/articles/batch/media/private-connectivity/create-private-endpoint-basics.png differ diff --git a/articles/batch/media/private-connectivity/create-private-endpoint.png b/articles/batch/media/private-connectivity/create-private-endpoint.png index 63b8f7a69a222..5c04de8bbe42f 100644 Binary files a/articles/batch/media/private-connectivity/create-private-endpoint.png and b/articles/batch/media/private-connectivity/create-private-endpoint.png differ diff --git a/articles/batch/media/private-connectivity/manage-private-connections.png b/articles/batch/media/private-connectivity/manage-private-connections.png new file mode 100644 index 0000000000000..dbb01d7f4ad16 Binary files /dev/null and b/articles/batch/media/private-connectivity/manage-private-connections.png differ diff --git a/articles/batch/media/private-connectivity/private-endpoint-connections.png b/articles/batch/media/private-connectivity/private-endpoint-connections.png index b0d04b009a789..ba5770c84156c 100644 Binary files a/articles/batch/media/private-connectivity/private-endpoint-connections.png and b/articles/batch/media/private-connectivity/private-endpoint-connections.png differ diff --git a/articles/batch/media/public-access/batch-account-endpoints.png b/articles/batch/media/public-access/batch-account-endpoints.png new file mode 100644 index 0000000000000..5f3718c275615 Binary files /dev/null and b/articles/batch/media/public-access/batch-account-endpoints.png differ diff --git a/articles/batch/media/public-access/configure-public-access.png b/articles/batch/media/public-access/configure-public-access.png new file mode 100644 index 0000000000000..e6f2b826ca0b3 Binary files /dev/null and b/articles/batch/media/public-access/configure-public-access.png differ diff --git a/articles/batch/private-connectivity.md b/articles/batch/private-connectivity.md index c7888e9c6506d..2bb634962c9fc 100644 --- a/articles/batch/private-connectivity.md +++ b/articles/batch/private-connectivity.md @@ -1,111 +1,122 @@ --- title: Use private endpoints with Azure Batch accounts -description: Learn how to connect privately to an Azure Batch account by using private endpoints. +description: Learn how to connect privately to an Azure Batch account by using private endpoints. ms.topic: how-to -ms.date: 08/03/2021 +ms.date: 05/26/2022 ms.custom: references_regions --- # Use private endpoints with Azure Batch accounts -By default, [Azure Batch accounts](accounts.md) have a public endpoint and are publicly accessible. The Batch service offers the ability to create private Batch accounts, disabling the public network access. +By default, [Azure Batch accounts](accounts.md) have public endpoints and are publicly accessible. The Batch service offers the ability to create private endpoint for Batch accounts, allowing private network access to the Batch service. By using [Azure Private Link](../private-link/private-link-overview.md), you can connect to an Azure Batch account via a [private endpoint](../private-link/private-endpoint-overview.md). The private endpoint is a set of private IP addresses in a subnet within your virtual network. You can then limit access to an Azure Batch account over private IP addresses. Private Link allows users to access an Azure Batch account from within the virtual network or from any peered virtual network. Resources mapped to Private Link are also accessible on-premises over private peering through VPN or [Azure ExpressRoute](../expressroute/expressroute-introduction.md). You can connect to an Azure Batch account configured with Private Link by using the [automatic or manual approval method](../private-link/private-endpoint-overview.md#access-to-a-private-link-resource-using-approval-workflow). -> [!IMPORTANT] -> Support for private connectivity in Azure Batch is currently available for all regions except Germany Central and Germany Northeast. +This article describes the steps to create a private endpoint to access Batch account endpoints. + +## Private endpoint sub-resources supported for Batch account + +Batch account resource has two endpoints supported to access with private endpoints: + +- Account endpoint (sub-resource: **batchAccount**): this is the endpoint for [Batch Service REST API](/rest/api/batchservice/) (data plane), for example managing pools, compute nodes, jobs, tasks, etc. -This article describes the steps to create a private Batch account and access it using a private endpoint. +- Node management endpoint (sub-resource: **nodeManagement**): used by Batch pool nodes to access Batch node management service. This is only applicable when using [simplified compute node communication](simplified-compute-node-communication.md). This feature is in preview. + +> [!IMPORTANT] +> - This preview sub-resource is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> - For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). ## Azure portal -Use the following steps to create a private Batch account using the Azure portal: - -1. From the **Create a resource** pane, choose **Batch Service** and then select **Create**. -2. Enter the subscription, resource group, region and Batch account name in the **Basics** tab, then select **Next: Advanced**. -3. In the **Advanced** tab, set **Public network access** to **Disabled**. -4. In **Settings**, select **Private endpoint connections** and then select **+ Private endpoint**. - :::image type="content" source="media/private-connectivity/private-endpoint-connections.png" alt-text="Private endpoint connections"::: -5. In the **Basics** pane, enter or select the subscription, resource group, private endpoint resource name and region details, then select **Next: Resource**. -6. In the **Resource** pane, set the **Resource type** to **Microsoft.Batch/batchAccounts**. Select the private Batch account you want to access, then select **Next: Configuration**. - :::image type="content" source="media/private-connectivity/create-private-endpoint.png" alt-text="Create a private endpoint - Resource pane"::: -7. In the **Configuration** pane, enter or select this information: - - **Virtual network**: Select your virtual network. - - **Subnet**: Select your subnet. - - **Integrate with private DNS zone**: Select **Yes**. To connect privately with your private endpoint, you need a DNS record. We recommend that you integrate your private endpoint with a private DNS zone. You can also use your own DNS servers or create DNS records by using the host files on your virtual machines. - - **Private DNS Zone**: Select privatelink.\.batch.azure.com. The private DNS zone is determined automatically. You can't change it by using the Azure portal. -8. Select **Review + create**, then wait for Azure to validate your configuration. -9. When you see the **Validation passed** message, select **Create**. - -After the private endpoint is provisioned, you can access the Batch account from VMs in the same virtual network using the private endpoint. +Use the following steps to create a private endpoint with your Batch account using the Azure portal: + +1. Go to your Batch account in the Azure portal. +2. In **Settings**, select **Networking** and go to the tab **Private Access**. Then, select **+ Private endpoint**. + :::image type="content" source="media/private-connectivity/private-endpoint-connections.png" alt-text="Screenshot of private endpoint connections."::: +3. In the **Basics** pane, enter or select the subscription, resource group, private endpoint resource name and region details, then select **Next: Resource**. + :::image type="content" source="media/private-connectivity/create-private-endpoint-basics.png" alt-text="Screenshot of creating a private endpoint - Basics pane."::: +4. In the **Resource** pane, set the **Resource type** to **Microsoft.Batch/batchAccounts**. Select the Batch account you want to access, select the target sub-resource, then select **Next: Configuration**. + :::image type="content" source="media/private-connectivity/create-private-endpoint.png" alt-text="Screenshot of creating a private endpoint - Resource pane."::: +5. In the **Configuration** pane, enter or select this information: + - For **Virtual network**, select your virtual network. + - For **Subnet**, select your subnet. + - For **Private IP configuration**, select the default **Dynamically allocate IP address**. + - For **Integrate with private DNS zone**, select **Yes**. To connect privately with your private endpoint, you need a DNS record. We recommend that you integrate your private endpoint with a private DNS zone. You can also use your own DNS servers or create DNS records by using the host files on your virtual machines. + - For **Private DNS Zone**, select **privatelink.batch.azure.com**. The private DNS zone is determined automatically. You can't change this setting by using the Azure portal. > [!IMPORTANT] -> Performing operations outside of the virtual network where the private endpoint is provisioned will result in an "AuthorizationFailure" message in the Azure Portal. +> If you have existing private endpoints created with previous private DNS zone `privatelink..batch.azure.com`, please follow [Migration with existing Batch account private endpoints](#migration-with-existing-batch-account-private-endpoints). + +6. Select **Review + create**, then wait for Azure to validate your configuration. +7. When you see the **Validation passed** message, select **Create**. -To view the IP address from the Azure portal: +> [!NOTE] +> You can also create the private endpoint from **Private Link Center** in Azure portal, or create a new resource by searching **private endpoint**. + +## Use the private endpoint + +After the private endpoint is provisioned, you can access the Batch account from within the same virtual network using the private endpoint. + +- Private endpoint for **batchAccount**: can access Batch account data plane to manage pools/jobs/tasks. + +- Private endpoint for **nodeManagement**: Batch pool's compute nodes can connect to and be managed by Batch node management service. + +> [!IMPORTANT] +> If [public network access](public-network-access.md) is disabled with Batch account, performing account operations (for example pools, jobs) outside of the virtual network where the private endpoint is provisioned will result in an "AuthorizationFailure" message for Batch account in the Azure Portal. + +To view the IP addresses for the private endpoint from the Azure portal: 1. Select **All resources**. 2. Search for the private endpoint that you created earlier. -3. Select the **Overview** tab to see the DNS settings and IP addresses. +3. Select the **DNS Configuration** tab to see the DNS settings and IP addresses. :::image type="content" source="media/private-connectivity/access-private.png" alt-text="Private endpoint DNS settings and IP addresses"::: -## Azure Resource Manager template - -When [creating a Batch account by using Azure Resource Manager template](quick-create-template.md), modify the template to set **publicNetworkAccess** to **Disabled** as shown below. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "batchAccountName": { - "type": "string", - }, - "location": { - "type": "string", - } - }, - "resources": [ - { - "name": "[parameters('batchAccountName')]", - "type": "Microsoft.Batch/batchAccounts", - "apiVersion": "2020-03-01-preview", - "location": "[parameters('location')]", - "dependsOn": [] - "properties": { - "poolAllocationMode": "BatchService" - "publicNetworkAccess": "Disabled" - } - } - ] -} -``` - ## Configure DNS zones Use a [private DNS zone](../dns/private-dns-privatednszone.md) within the subnet where you've created the private endpoint. Configure the endpoints so that each private IP address is mapped to a DNS entry. When you're creating the private endpoint, you can integrate it with a [private DNS zone](../dns/private-dns-privatednszone.md) in Azure. If you choose to instead use a [custom domain](../dns/dns-custom-domain.md), you must configure it to add DNS records for all private IP addresses reserved for the private endpoint. +## Migration with existing Batch account private endpoints + +With the introduction of the new private endpoint sub-resource `nodeManagement` for Batch node management endpoint, the default private DNS zone for Batch account is simplified from `privatelink..batch.azure.com` to `privatelink.batch.azure.com`. The existing private endpoints for sub-resource `batchAccount` will continue to work, and no action is needed. + +However, if you have existing `batchAccount` private endpoints that are enabled with automatic private DNS integration using previous private DNS zone, extra configuration is needed for the new `batchAccount` private endpoint to create in the same virtual network: + +- If you don't need the previous private endpoint anymore, delete the private endpoint. Also unlink the previous private DNS zone from your virtual network. No more configuration is needed for the new private endpoint. + +- Otherwise, after the new private endpoint is created: + + 1. make sure the automatic private DNS integration has a DNS A record created in the new private DNS zone `privatelink.batch.azure.com`. For example, `myaccount. A `. + + 1. Go to previous private DNS zone `privatelink..batch.azure.com`. + + 1. Manually add a DNS CNAME record. For example, `myaccount CNAME => myaccount..privatelink.batch.azure.com`. + +> [!IMPORTANT] +> This manual mitigation is only needed when you create a new **batchAccount** private endpoint with private DNS integration in the same virtual network which has existing private endpoints. + ## Pricing For details on costs related to private endpoints, see [Azure Private Link pricing](https://azure.microsoft.com/pricing/details/private-link/). ## Current limitations and best practices -When creating your private Batch account, keep in mind the following: +When creating a private endpoint with your Batch account, keep in mind the following: -- Private endpoint resources must be created in the same subscription as the Batch account. -- To delete the private connection, you must delete the private endpoint resource. -- Once a Batch account is created with public network access, you can't change it to private access only. -- DNS records in the private DNS zone are not removed automatically when you delete a private endpoint or when you remove a region from the Batch account. You must manually remove the DNS records before adding a new private endpoint linked to this private DNS zone. If you don't clean up the DNS records, unexpected data plane issues might happen, such as data outages to regions added after private endpoint removal or region removal. +- Private endpoint resources with the sub-resource **batchAccount** must be created in the same subscription as the Batch account. +- Resource movement is not supported for private endpoints with Batch accounts. +- If a Batch account resource is moved to a different resource group or subscription, the private endpoints can still work, but the association to the Batch account breaks. If you delete the private endpoint resource, its associated private endpoint connection still exists in your Batch account. You can manually remove connection from your Batch account. +- To delete the private connection, either delete the private endpoint resource, or delete the private connection in the Batch account (this action disconnects the related private endpoint resource). +- DNS records in the private DNS zone are not removed automatically when you delete a private endpoint connection from the Batch account. You must manually remove the DNS records before adding a new private endpoint linked to this private DNS zone. If you don't clean up the DNS records, unexpected access issues might happen. ## Next steps - Learn how to [create Batch pools in virtual networks](batch-virtual-network.md). -- Learn how to [create Batch pools without public IP addresses](batch-pool-no-public-ip-address.md) -- Learn how to [create Batch pools with specified public IP addresses](create-pool-public-ip.md). +- Learn how to [create Batch pools without public IP addresses](simplified-node-communication-pool-no-public-ip.md). +- Learn how to [configure public network access for Batch accounts](public-network-access.md). +- Learn how to [manage private endpoint connections for Batch accounts](manage-private-endpoint-connections.md). - Learn about [Azure Private Link](../private-link/private-link-overview.md). diff --git a/articles/batch/public-network-access.md b/articles/batch/public-network-access.md new file mode 100644 index 0000000000000..f974f1c5f2b25 --- /dev/null +++ b/articles/batch/public-network-access.md @@ -0,0 +1,68 @@ +--- +title: Configure public network access with Azure Batch accounts +description: Learn how to configure public network access with Azure Batch accounts, for example enable, disable, or manage network rules for public network access. +ms.topic: how-to +ms.date: 05/26/2022 +--- + +# Configure public network access with Azure Batch accounts + +By default, [Azure Batch accounts](accounts.md) have public endpoints and are publicly accessible. This article shows how to configure your Batch account to allow access from only specific public IP addresses or IP address ranges. + +IP network rules are configured on the public endpoints. IP network rules don't apply to private endpoints configured with [Private Link](private-connectivity.md). + +Each endpoint supports a maximum of 200 IP network rules. + +## Batch account public endpoints + +Batch accounts have two public endpoints: + +- The *Account endpoint* is the endpoint for [Batch Service REST API](/rest/api/batchservice/) (data plane). Use this endpoint for managing pools, compute nodes, jobs, tasks, etc. +- The *Node management endpoint* is used by Batch pool nodes to access the Batch node management service. This endpoint only applicable when using [simplified compute node communication](simplified-compute-node-communication.md). + +You can check both endpoints in account properties when you query the Batch account with [Batch Management REST API](/rest/api/batchmanagement/batch-account/get). You can also check them in the overview for your Batch account in the Azure portal: + + :::image type="content" source="media/public-access/batch-account-endpoints.png" alt-text="Screenshot of Batch account endpoints."::: + +You can configure public network access to Batch account endpoints with the following options: + +- **All networks**: allow public network access with no restriction. +- **Selected networks**: allow public network access with allowed network rules. +- **Disabled**: disable public network access, and private endpoints are required to access Batch account endpoints. + +## Access from selected public networks + +1. In the portal, navigate to your Batch account. +1. Under **Settings**, select **Networking**. +1. On the **Public access** tab, select to allow public access from **Selected networks**. +1. Under access for each endpoint, enter a public IP address or address range in CIDR notation one by one. + :::image type="content" source="media/public-access/configure-public-access.png" alt-text="Screenshot of public access with Batch account."::: +1. Select **Save**. + +> [!NOTE] +> After adding a rule, it takes a few minutes for the rule to take effect. + +> [!TIP] +> To configure IP network rules for node management endpoint, you will need to know the public IP addresses or address ranges used by Batch pool's internet outbound access. This can typically be determined with Batch pools created in [virtual network](batch-virtual-network.md) or with [specified public IP addresses](create-pool-public-ip.md). + +## Disable public network access + +Optionally, disable public network access to Batch account endpoints. Disabling the public network access overrides all IP network rules configurations. For example, you might want to disable public access to a Batch account secured in a virtual network using [Private Link](private-connectivity.md). + +1. In the portal, navigate to your Batch account and select **Settings > Networking**. +1. On the **Public access** tab, select **Disabled**. +1. Select **Save**. + +## Restore public network access + +To re-enable the public network access, update the networking settings to allow public access. Enabling the public access overrides all IP network rule configurations, and will allow access from any IP addresses. + +1. In the portal, navigate to your Batch account and select **Settings > Networking**. +1. On the **Public access** tab, select **All networks**. +1. Select **Save**. + +## Next steps + +- Learn how to [use private endpoints with Batch accounts](private-connectivity.md). +- Learn how to [use simplified compute node communication](simplified-compute-node-communication.md). +- Learn more about [creating pools in a virtual network](batch-virtual-network.md). diff --git a/articles/batch/quick-create-cli.md b/articles/batch/quick-create-cli.md index 88abcc8b1abcd..898908d654d5d 100644 --- a/articles/batch/quick-create-cli.md +++ b/articles/batch/quick-create-cli.md @@ -46,7 +46,7 @@ az storage account create \ Create a Batch account with the [az batch account create](/cli/azure/batch/account#az-batch-account-create) command. You need an account to create compute resources (pools of compute nodes) and Batch jobs. -The following example creates a Batch account named *mybatchaccount* in *QuickstartBatch-rg*, and links the storage account you created. +The following example creates a Batch account named *mybatchaccount* in *QuickstartBatch-rg*, and links the storage account you created. ```azurecli-interactive az batch account create \ @@ -67,14 +67,14 @@ az batch account login \ ## Create a pool of compute nodes -Now that you have a Batch account, create a sample pool of Linux compute nodes using the [az batch pool create](/cli/azure/batch/pool#az-batch-pool-create) command. The following example creates a pool named *mypool* of two *Standard_A1_v2* nodes running Ubuntu 16.04 LTS. The suggested node size offers a good balance of performance versus cost for this quick example. - +Now that you have a Batch account, create a sample pool of Linux compute nodes using the [az batch pool create](/cli/azure/batch/pool#az-batch-pool-create) command. The following example creates a pool named *mypool* of two *Standard_A1_v2* nodes running Ubuntu 18.04 LTS. The suggested node size offers a good balance of performance versus cost for this quick example. + ```azurecli-interactive az batch pool create \ --id mypool --vm-size Standard_A1_v2 \ --target-dedicated-nodes 2 \ - --image canonical:ubuntuserver:16.04-LTS \ - --node-agent-sku-id "batch.node.ubuntu 16.04" + --image canonical:ubuntuserver:18.04-LTS \ + --node-agent-sku-id "batch.node.ubuntu 18.04" ``` Batch creates the pool immediately, but it takes a few minutes to allocate and start the compute nodes. During this time, the pool is in the `resizing` state. To see the status of the pool, run the [az batch pool show](/cli/azure/batch/pool#az-batch-pool-show) command. This command shows all the properties of the pool, and you can query for specific properties. The following command gets the allocation state of the pool: diff --git a/articles/batch/scripts/batch-cli-sample-add-application.md b/articles/batch/scripts/batch-cli-sample-add-application.md index 3782ccc17acd8..37699a727ad00 100644 --- a/articles/batch/scripts/batch-cli-sample-add-application.md +++ b/articles/batch/scripts/batch-cli-sample-add-application.md @@ -2,33 +2,61 @@ title: Azure CLI Script Example - Add an Application in Batch | Microsoft Docs description: Learn how to add an application for use with an Azure Batch pool or a task using the Azure CLI. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, azure cli samples, azure cli code samples, azure cli script samples --- # CLI example: Add an application to an Azure Batch account -This script demonstrates how to add an application for use with an Azure Batch pool or task. To set up an application to add to your Batch account, package your executable, together with any dependencies, into a zip file. +This script demonstrates how to add an application for use with an Azure Batch pool or task. To set up an application to add to your Batch account, package your executable, together with any dependencies, into a zip file. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - - This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] + +### Create batch account and new application + +:::code language="azurecli" source="~/azure_cli_scripts/batch/add-application/add-application.sh" id="FullScript"::: + +### Create batch application package -## Example script +An application can reference multiple application executable packages of different versions. The executables and any dependencies need to be zipped up for the package. Once uploaded, the CLI attempts to activate the package so that it's ready for use. -[!code-azurecli-interactive[main](../../../cli_scripts/batch/add-application/add-application.sh "Add Application")] +```azurecli +az batch application package create \ + --resource-group $resourceGroup \ + --name $batchAccount \ + --application-name "MyApplication" \ + --package-file my-application-exe.zip \ + --version-name 1.0 +``` + +### Update the application + +Update the application to assign the newly added application package as the default version. + +```azurecli +az batch application set \ + --resource-group $resourceGroup \ + --name $batchAccount \ + --application-name "MyApplication" \ + --default-version 1.0 +``` -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-create-account.md b/articles/batch/scripts/batch-cli-sample-create-account.md index 7f8139f10aa9a..135b192d2e549 100644 --- a/articles/batch/scripts/batch-cli-sample-create-account.md +++ b/articles/batch/scripts/batch-cli-sample-create-account.md @@ -1,8 +1,8 @@ --- title: Azure CLI Script Example - Create Batch account - Batch service | Microsoft Docs -description: Learn how to create a Batch account in Batch service mode with this Azure CLI script example. This also script shows how to query or update various properties of the account. +description: Learn how to create a Batch account in Batch service mode with this Azure CLI script example. This script also shows how to query or update various properties of the account. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, azure cli samples, azure cli code samples, azure cli script samples --- @@ -10,27 +10,29 @@ keywords: batch, azure cli samples, azure cli code samples, azure cli script sam # CLI example: Create a Batch account in Batch service mode This script creates an Azure Batch account in Batch service mode and shows how to query or update various properties of the account. When you create a Batch account in the default Batch service mode, its compute nodes are assigned internally by the Batch -service. Allocated compute nodes are subject to a separate vCPU (core) quota and the account can be -authenticated either via shared key credentials or an Azure Active Directory token. +service. Allocated compute nodes are subject to a separate vCPU (core) quota and the account can be authenticated either via shared key credentials or an Azure Active Directory token. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] -## Example script +### Run the script -[!code-azurecli-interactive[main](../../../cli_scripts/batch/create-account/create-account.sh "Create Account")] +:::code language="azurecli" source="~/azure_cli_scripts/batch/create-account/create-account.sh" id="FullScript"::: -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md b/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md index ccfa51a25e88a..f46fd5510342a 100644 --- a/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md +++ b/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md @@ -2,7 +2,7 @@ title: Azure CLI Script Example - Create Batch account - user subscription | Microsoft Docs description: Learn how to create an Azure Batch account in user subscription mode. This account allocates compute nodes into your subscription. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, azure cli samples, azure cli examples, azure cli code samples --- @@ -11,23 +11,27 @@ keywords: batch, azure cli samples, azure cli examples, azure cli code samples This script creates an Azure Batch account in user subscription mode. An account that allocates compute nodes into your subscription must be authenticated via an Azure Active Directory token. The compute nodes allocated count toward your subscription's vCPU (core) quota. +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] -## Example script +### Run the script -[!code-azurecli-interactive[main](../../../cli_scripts/batch/create-account/create-account-user-subscription.sh "Create Account using user subscription")] +:::code language="azurecli" source="~/azure_cli_scripts/batch/create-account/create-account-user-subscription.sh" id="FullScript"::: -## Clean up deployment +## Clean up resources -Run the following command to remove the resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md b/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md index 61050c82a197e..8c5d4dffa087d 100644 --- a/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md +++ b/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md @@ -2,7 +2,7 @@ title: Azure CLI Script Example - Linux Pool in Batch | Microsoft Docs description: Learn the commands available in the Azure CLI to create and manage a pool of Linux compute nodes in Azure Batch. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: linux, azure cli samples, azure cli code samples, azure cli script samples --- @@ -11,24 +11,48 @@ keywords: linux, azure cli samples, azure cli code samples, azure cli script sam This script demonstrates some of the commands available in the Azure CLI to create and manage a pool of Linux compute nodes in Azure Batch. +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] + +### To create a Linux pool in Azure Batch + +:::code language="azurecli" source="~/azure_cli_scripts/batch/manage-pool/manage-pool-linux.sh" id="FullScript"::: + +### To reboot a batch node -## Example script +If a particular node in the pool is having issues, it can be rebooted or reimaged. The ID of the node can be retrieved with the list command above. A typical node ID is in the format `tvm-xxxxxxxxxx_1-`. -[!code-azurecli-interactive[main](../../../cli_scripts/batch/manage-pool/manage-pool-linux.sh "Manage Linux Virtual Machine Pool")] +```azurecli +az batch node reboot \ + --pool-id mypool-linux \ + --node-id tvm-123_1-20170316t000000z +``` + +### To delete a batch node + +One or more compute nodes can be deleted from the pool, and any work already assigned to it can be re-allocated to another node. + +```azurecli +az batch node delete \ + --pool-id mypool-linux \ + --node-list tvm-123_1-20170316t000000z tvm-123_2-20170316t000000z \ + --node-deallocation-option requeue +``` -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md b/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md index 056dd5502980f..5ec90314f674f 100644 --- a/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md +++ b/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md @@ -2,7 +2,7 @@ title: Azure CLI Script Example - Windows Pool in Batch | Microsoft Docs description: Learn some of the commands available in the Azure CLI to create and manage a pool of Windows compute nodes in Azure Batch. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: windows pool, azure cli samples, azure cli code samples, azure cli script samples --- @@ -10,27 +10,29 @@ keywords: windows pool, azure cli samples, azure cli code samples, azure cli scr # CLI example: Create and manage a Windows pool in Azure Batch This script demonstrates some of the commands available in the Azure CLI to create and -manage a pool of Windows compute nodes in Azure Batch. A Windows pool can be configured in two ways, with either a Cloud Services configuration -or a Virtual Machine configuration. This example shows how to create a Windows pool with the Cloud Services configuration. +manage a pool of Windows compute nodes in Azure Batch. A Windows pool can be configured in two ways, with either a Cloud Services configuration or a Virtual Machine configuration. This example shows how to create a Windows pool with the Cloud Services configuration. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] -## Example script +### Run the script -[!code-azurecli-interactive[main](../../../cli_scripts/batch/manage-pool/manage-pool-windows.sh "Manage Windows Cloud Services Pool")] +:::code language="azurecli" source="~/azure_cli_scripts/batch/manage-pool/manage-pool-windows.sh" id="FullScript"::: -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-run-job.md b/articles/batch/scripts/batch-cli-sample-run-job.md index 8f23e47135dd6..1d1c4d7ce585d 100644 --- a/articles/batch/scripts/batch-cli-sample-run-job.md +++ b/articles/batch/scripts/batch-cli-sample-run-job.md @@ -2,34 +2,70 @@ title: Azure CLI Script Example - Run a Batch job | Microsoft Docs description: Learn how to create a Batch job and add a series of tasks to the job using the Azure CLI. This article also shows how to monitor a job and its tasks. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, batch job, monitor job, azure cli samples, azure cli code samples, azure cli script samples --- # CLI example: Run a job and tasks with Azure Batch -This script creates a Batch job and adds a series of tasks to the job. It also demonstrates -how to monitor a job and its tasks. +This script creates a Batch job and adds a series of tasks to the job. It also demonstrates how to monitor a job and its tasks. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] + +### Create a Batch account in Batch service mode + +:::code language="azurecli" source="~/azure_cli_scripts/batch/run-job/run-job.sh" id="FullScript"::: + +### To add many tasks at once + +To add many tasks at once, specify the tasks in a JSON file, and pass it to the command. For format, see https://github.com/Azure/azure-docs-cli-python-samples/blob/master/batch/run-job/tasks.json. Provide the absolute path to the JSON file. For an example JSON file, see https://github.com/Azure-Samples/azure-cli-samples/blob/master/batch/run-job/tasks.json. + +```azurecli +az batch task create \ + --job-id myjob \ + --json-file tasks.json +``` + +### To update the job + +Update the job so that it is automatically marked as completed once all the tasks are finished. + +```azurecli +az batch job set \ +--job-id myjob \ +--on-all-tasks-complete terminatejob +``` + +### To monitor the status of the job + +```azurecli +az batch job show --job-id myjob +``` -## Example script +### To monitor the status of a task -[!code-azurecli-interactive[main](../../../cli_scripts/batch/run-job/run-job.sh "Run Job")] +```azurecli +az batch task show \ + --job-id myjob \ + --task-id task1 +``` -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/security-best-practices.md b/articles/batch/security-best-practices.md index c14a25380d0bd..12a2dfe5d8589 100644 --- a/articles/batch/security-best-practices.md +++ b/articles/batch/security-best-practices.md @@ -13,7 +13,7 @@ By default, Azure Batch accounts have a public endpoint and are publicly accessi :::image type="content" source="media/security-best-practices/typical-environment.png" alt-text="Diagram showing a typical Batch environment."::: -Many features are available to help you create a more secure Azure Batch deployment. You can restrict access to nodes and reduce the discoverability of the nodes from the internet by [provisioning the pool without public IP addresses](batch-pool-no-public-ip-address.md). The compute nodes can securely communicate with other virtual machines or with an on-premises network by [provisioning the pool in a subnet of an Azure virtual network](batch-virtual-network.md). And you can enable [private access from virtual networks](private-connectivity.md) from a service powered by Azure Private Link. +Many features are available to help you create a more secure Azure Batch deployment. You can restrict access to nodes and reduce the discoverability of the nodes from the internet by [provisioning the pool without public IP addresses](simplified-node-communication-pool-no-public-ip.md). The compute nodes can securely communicate with other virtual machines or with an on-premises network by [provisioning the pool in a subnet of an Azure virtual network](batch-virtual-network.md). And you can enable [private access from virtual networks](private-connectivity.md) from a service powered by Azure Private Link. :::image type="content" source="media/security-best-practices/secure-environment.png" alt-text="Diagram showing a more secure Batch environment."::: @@ -58,9 +58,9 @@ Batch management operations via Azure Resource Manager are encrypted using HTTPS The Batch service communicates with a Batch node agent that runs on each node in the pool. For example, the service instructs the node agent to run a task, stop a task, or get the files for a task. Communication with the node agent is enabled by one or more load balancers, the number of which depends on the number of nodes in a pool. The load balancer forwards the communication to the desired node, with each node being addressed by a unique port number. By default, load balancers have public IP addresses associated with them. You can also remotely access pool nodes via RDP or SSH (this access is enabled by default, with communication via load balancers). -### Restricting access to Batch endpoints +### Restricting access to Batch endpoints -Several capabilities are available to limit access to the various Batch endpoints, especially when the solution uses a virtual network. +Several capabilities are available to limit access to the various Batch endpoints, especially when the solution uses a virtual network. #### Use private endpoints @@ -99,7 +99,7 @@ By default, all the compute nodes in an Azure Batch virtual machine configuratio To restrict access to these nodes and reduce the discoverability of these nodes from the internet, you can provision the pool without public IP addresses. -For more information, see [Create a pool without public IP addresses](batch-pool-no-public-ip-address.md). +For more information, see [Create a pool without public IP addresses](simplified-node-communication-pool-no-public-ip.md). #### Limit remote access to pool nodes @@ -108,7 +108,7 @@ By default, Batch allows a node user with network connectivity to connect extern To limit remote access to nodes, use one of the following methods: - Configure the [PoolEndpointConfiguration](/rest/api/batchservice/pool/add#poolendpointconfiguration) to deny access. The appropriate network security group (NSG) will be associated with the pool. -- Create your pool [without public IP addresses](batch-pool-no-public-ip-address.md). By default, these pools can't be accessed outside of the VNet. +- Create your pool [without public IP addresses](simplified-node-communication-pool-no-public-ip.md). By default, these pools can't be accessed outside of the VNet. - Associate an NSG with the VNet to deny access to the RDP or SSH ports. - Don't create any users on the node. Without any node users, remote access won't be possible. diff --git a/articles/batch/simplified-compute-node-communication.md b/articles/batch/simplified-compute-node-communication.md index ca4db45fbb255..2e9a157137135 100644 --- a/articles/batch/simplified-compute-node-communication.md +++ b/articles/batch/simplified-compute-node-communication.md @@ -1,8 +1,9 @@ --- -title: Use simplified compute node communication -description: Learn how the Azure Batch service is simplifying the way Batch pool infrastructure is managed and how to opt in or out of the . +title: Use simplified compute node communication +description: Learn how the Azure Batch service is simplifying the way Batch pool infrastructure is managed and how to opt in or out of the feature. ms.topic: how-to -ms.date: 10/21/2021 +ms.date: 06/02/2022 +ms.custom: references_regions --- # Use simplified compute node communication @@ -15,7 +16,17 @@ This document describes forthcoming changes with how the Azure Batch service com > Support for simplified compute node communication in Azure Batch is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. > For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). -Opting in is not required at this time. However, in the future, using simplified compute node communication will be required for all Batch accounts. At that time, an official retirement notice will be provided, with an opportunity to migrate your Batch pools before that happens. +Opting in isn't required at this time. However, in the future, using simplified compute node communication will be required for all Batch accounts. At that time, an official retirement notice will be provided, with an opportunity to migrate your Batch pools before that happens. + +## Supported regions + +Simplified compute node communication in Azure Batch is currently available for the following regions: + +- Public: Central US EUAP, East US 2 EUAP, West Central US, North Central US, South Central US, East US, East US 2, West US 2, West US, Central US, West US 3, East Asia, South East Asia, Australia East, Australia Southeast, Brazil Southeast, Brazil South, Canada Central, Canada East, North Europe, West Europe, Central India, Japan East, Japan West, Korea Central, Korea South, Switzerland North, UK West, UK South, UAE North, France Central, Germany West Central, Norway East, South Africa North. + +- Government: USGov Arizona, USGov Virginia, USGov Texas. + +- China: China North 3. ## Compute node communication changes @@ -38,7 +49,7 @@ With the new model, Batch pools in accounts that use simplified compute node com - Outbound: - Destination port 443 over TCP to BatchNodeManagement.*region* -Outbound requirements for a Batch account can be discovered using the [List Outbound Network Dependencies Endpoints API](/rest/api/batchmanagement/batch-account/list-outbound-network-dependencies-endpoints). This API will report the base set of dependencies, depending upon the Batch account pool communication model. User-specific workloads may need additional rules such as opening traffic to other Azure resources (such as Azure Storage for Application Packages, Azure Container Registry, etc.) or endpoints like the Microsoft package repository for virtual file system mounting functionality. +Outbound requirements for a Batch account can be discovered using the [List Outbound Network Dependencies Endpoints API](/rest/api/batchmanagement/batch-account/list-outbound-network-dependencies-endpoints). This API will report the base set of dependencies, depending upon the Batch account pool communication model. User-specific workloads may need additional rules such as opening traffic to other Azure resources (such as Azure Storage for Application Packages, Azure Container Registry, etc.) or endpoints like the Microsoft package repository for virtual file system mounting functionality. ## Benefits of the new model @@ -48,11 +59,11 @@ Simplified compute node communication helps reduce security risks by removing th The new model also provides more fine-grained data exfiltration control, since outbound communication to Storage.*region* is no longer required. You can explicitly lock down outbound communication to Azure Storage if required for your workflow (such as AppPackage storage accounts, other storage accounts for resource files or output files, or other similar scenarios). -Even if your workloads are not currently impacted by the changes (as described in the next section), you may still want to [opt in to use simplified compute node communication](#opt-your-batch-account-in-or-out-of-simplified-compute-node-communication) now. This will ensure your Batch workloads are ready for any future improvements enabled by this model. +Even if your workloads aren't currently impacted by the changes (as described in the next section), you may still want to [opt in to use simplified compute node communication](#opt-your-batch-account-in-or-out-of-simplified-compute-node-communication) now. This will ensure your Batch workloads are ready for any future improvements enabled by this model. ## Scope of impact -In many cases, this new communication model will not directly affect your Batch workloads. However, simplified compute node communication will have an impact for the following cases: +In many cases, this new communication model won't directly affect your Batch workloads. However, simplified compute node communication will have an impact for the following cases: - Users who specify a Virtual Network as part of creating a Batch pool and do one or both of the following: - Explicitly disable outbound network traffic rules that are incompatible with simplified compute node communication. @@ -63,7 +74,7 @@ If either of these cases applies to you, and you would like to opt in to the pre ### Required network configuration changes -For impacted users, the following set of steps are required to migrate to the new communication model: +For impacted users, the following set of steps is required to migrate to the new communication model: 1. Ensure your networking configuration as applicable to Batch pools (NSGs, UDRs, firewalls, etc.) includes a union of the models (that is, the network rules prior to simplified compute node communication and after). At a minimum, these rules would be: - Inbound: @@ -71,7 +82,7 @@ For impacted users, the following set of steps are required to migrate to the ne - Outbound: - Destination port 443 over TCP to Storage.*region* - Destination port 443 over TCP to BatchNodeManagement.*region* -1. If you have any additional inbound or outbound scenarios required by your workflow, you will need to ensure that your rules reflect these requirements. +1. If you have any additional inbound or outbound scenarios required by your workflow, you'll need to ensure that your rules reflect these requirements. 1. [Opt in to simplified compute node communication](#opt-your-batch-account-in-or-out-of-simplified-compute-node-communication) as described below. 1. Use one of the following options to update your workloads to use the new communication model. Whichever method you use, keep in mind that pools without public IP addresses are unaffected and can't currently use simplified compute node communication. Please see the [Current limitations](#current-limitations) section. 1. Create new pools and validate that the new pools are working correctly. Migrate your workload to the new pools and delete any earlier pools. @@ -109,22 +120,22 @@ Use the following options when creating your request. 1. For **Problem type**, select **Batch Accounts**. 1. For **Problem subtype**, select **Other issues with Batch Accounts**. 1. Select **Next**, then select **Next** again to go to the **Additional details** page. -1. In **Additional details**, you can optionally specify that you want to enable all of the Batch accounts in your subscription, or across multiple subscription. If you do so, be sure to include the subscription IDs here. +1. In **Additional details**, you can optionally specify that you want to enable all of the Batch accounts in your subscription, or across multiple subscriptions. If you do so, be sure to include the subscription IDs here. 1. Make any other required selections on the page, then select **Next**. 1. Review your request details, then select **Create** to submit your support request. -After your request has been submitted, you will be notified once the account has been opted in (or out). +After your request has been submitted, you'll be notified once the account has been opted in (or out). ## Current limitations The following are known limitations for accounts that opt in to simplified compute node communication: -- [Creating pools without public IP addresses](batch-pool-no-public-ip-address.md) isn't currently supported for accounts which have opted in. -- Previously created pools without public IP addresses won't use simplified compute node communication, even if the Batch account has opted in. -- [Private Batch accounts](private-connectivity.md) can opt in to simplified compute node communication, but Batch pools created by these Batch accounts must have public IP addresses in order to use simplified compute node communication. +- Limited migration support for previously created pools without public IP addresses ([V1 preview](batch-pool-no-public-ip-address.md)). They can only be migrated if created in a [virtual network](batch-virtual-network.md), otherwise they won't use simplified compute node communication, even if the Batch account has opted in. - Cloud Service Configuration pools are currently not supported for simplified compute node communication and are generally deprecated. We recommend using Virtual Machine Configuration for your Batch pools. For more information, see [Migrate Batch pool configuration from Cloud Services to Virtual Machine](batch-pool-cloud-service-to-virtual-machine-configuration.md). ## Next steps +- Learn how to [use private endpoints with Batch accounts](private-connectivity.md). - Learn more about [pools in virtual networks](batch-virtual-network.md). -- Learn how to [create a pool pool with specified public IP addresses](create-pool-public-ip.md). +- Learn how to [create a pool with specified public IP addresses](create-pool-public-ip.md). +- Learn how to [create a pool without public IP addresses](simplified-node-communication-pool-no-public-ip.md). diff --git a/articles/batch/simplified-node-communication-pool-no-public-ip.md b/articles/batch/simplified-node-communication-pool-no-public-ip.md new file mode 100644 index 0000000000000..cebabc72af6d9 --- /dev/null +++ b/articles/batch/simplified-node-communication-pool-no-public-ip.md @@ -0,0 +1,142 @@ +--- +title: Create a simplified node communication pool without public IP addresses (preview) +description: Learn how to create an Azure Batch simplified node communication pool without public IP addresses. +ms.topic: how-to +ms.date: 05/26/2022 +ms.custom: references_regions +--- + +# Create a simplified node communication pool without public IP addresses (preview) + +> [!NOTE] +> This replaces the previous preview version of [Azure Batch pool without public IP addresses](batch-pool-no-public-ip-address.md). This new version requires [using simplified compute node communication](simplified-compute-node-communication.md). + +> [!IMPORTANT] +> - Support for pools without public IP addresses in Azure Batch is currently in public preview for [selected regions](simplified-compute-node-communication.md#supported-regions). +> - This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> - For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +When you create an Azure Batch pool, you can provision the virtual machine (VM) configuration pool without a public IP address. This article explains how to set up a Batch pool without public IP addresses. + +## Why use a pool without public IP addresses? + +By default, all the compute nodes in an Azure Batch VM configuration pool are assigned a public IP address. This address is used by the Batch service to support outbound access to the internet, as well inbound access to compute nodes from the internet. + +To restrict access to these nodes and reduce the discoverability of these nodes from the internet, you can provision the pool without public IP addresses. + +## Prerequisites + +> [!IMPORTANT] +> The prerequisites have changed from the previous version of this preview. Make sure to review each item for changes before proceeding. + +- Use simplified compute node communication. For more information, see [Use simplified compute node communication](simplified-compute-node-communication.md). + +- The Batch client API must use Azure Active Directory (AD) authentication. Azure Batch support for Azure AD is documented in [Authenticate Batch service solutions with Active Directory](batch-aad-auth.md). + +- Create your pool in an [Azure virtual network (VNet)](batch-virtual-network.md), follow these requirements and configurations. To prepare a VNet with one or more subnets in advance, you can use the Azure portal, Azure PowerShell, the Azure Command-Line Interface (Azure CLI), or other methods. + + - The VNet must be in the same subscription and region as the Batch account you use to create your pool. + + - The subnet specified for the pool must have enough unassigned IP addresses to accommodate the number of VMs targeted for the pool; that is, the sum of the `targetDedicatedNodes` and `targetLowPriorityNodes` properties of the pool. If the subnet doesn't have enough unassigned IP addresses, the pool partially allocates the compute nodes, and a resize error occurs. + + - If you plan to use a [private endpoint with Batch accounts](private-connectivity.md), you must disable private endpoint network policies. Run the following Azure CLI command: + + `az network vnet subnet update --vnet-name -n --resource-group --disable-private-endpoint-network-policies` + +- Enable outbound access for Batch node management. A pool with no public IP addresses doesn't have internet outbound access enabled by default. To allow compute nodes to access the Batch node management service (see [Use simplified compute node communication](simplified-compute-node-communication.md)) either: + + - Use `nodeManagement` [private endpoint with Batch accounts](private-connectivity.md). This is the preferred method. + + - Alternatively, provide your own internet outbound access support (see [Outbound access to the internet](#outbound-access-to-the-internet)). + +## Current limitations + +1. Pools without public IP addresses must use Virtual Machine Configuration and not Cloud Services Configuration. +1. [Custom endpoint configuration](pool-endpoint-configuration.md) for Batch compute nodes doesn't work with pools without public IP addresses. +1. Because there are no public IP addresses, you can't [use your own specified public IP addresses](create-pool-public-ip.md) with this type of pool. + +## Create a pool without public IP addresses in the Azure portal + +1. Navigate to your Batch account in the Azure portal. +1. In the **Settings** window on the left, select **Pools**. +1. In the **Pools** window, select **Add**. +1. On the **Add Pool** window, select the option you intend to use from the **Image Type** dropdown. +1. Select the correct **Publisher/Offer/Sku** of your image. +1. Specify the remaining required settings, including the **Node size**, **Target dedicated nodes**, and **Target Spot/low-priority nodes**, as well as any desired optional settings. +1. Select a virtual network and subnet you wish to use. This virtual network must be in the same location as the pool you are creating. +1. In **IP address provisioning type**, select **NoPublicIPAddresses**. + +![Screenshot of the Add pool screen with NoPublicIPAddresses selected.](./media/batch-pool-no-public-ip-address/create-pool-without-public-ip-address.png) + +## Use the Batch REST API to create a pool without public IP addresses + +The example below shows how to use the [Batch Service REST API](/rest/api/batchservice/pool/add) to create a pool that uses public IP addresses. + +### REST API URI + +```http +POST {batchURL}/pools?api-version=2020-03-01.11.0 +client-request-id: 00000000-0000-0000-0000-000000000000 +``` + +### Request body + +```json +"pool": { + "id": "pool2", + "vmSize": "standard_a1", + "virtualMachineConfiguration": { + "imageReference": { + "publisher": "Canonical", + "offer": "UbuntuServer", + "sku": "18.04-lts" + }, + "nodeAgentSKUId": "batch.node.ubuntu 18.04" + } + "networkConfiguration": { + "subnetId": "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + "publicIPAddressConfiguration": { + "provision": "NoPublicIPAddresses" + } + }, + "resizeTimeout": "PT15M", + "targetDedicatedNodes": 5, + "targetLowPriorityNodes": 0, + "taskSlotsPerNode": 3, + "taskSchedulingPolicy": { + "nodeFillType": "spread" + }, + "enableAutoScale": false, + "enableInterNodeCommunication": true, + "metadata": [ + { + "name": "myproperty", + "value": "myvalue" + } + ] +} +``` + +## Outbound access to the internet + +In a pool without public IP addresses, your virtual machines won't be able to access the public internet unless you configure your network setup appropriately, such as by using [virtual network NAT](../virtual-network/nat-gateway/nat-overview.md). Note that NAT only allows outbound access to the internet from the virtual machines in the virtual network. Batch-created compute nodes won't be publicly accessible, since they don't have public IP addresses associated. + +Another way to provide outbound connectivity is to use a user-defined route (UDR). This lets you route traffic to a proxy machine that has public internet access, for example [Azure Firewall](../firewall/overview.md). + +> [!IMPORTANT] +> There is no extra network resource (load balancer, network security group) created for simplified node communication pools without public IP addresses. Since the compute nodes in the pool are not bound to any load balancer, Azure may provide [Default Outbound Access](../virtual-network/ip-services/default-outbound-access.md). However, Default Outbound Access is not suitable for production workloads, so it is strongly recommended to bring your own Internet outbound access. + +## Migration from previous preview version of No Public IP pools + +For existing pools that use the [previous preview version of Azure Batch No Public IP pool](batch-pool-no-public-ip-address.md), it's only possible to migrate pools created in a [virtual network](batch-virtual-network.md). To migrate the pool, follow the [opt-in process for simplified node communication](simplified-compute-node-communication.md): + +1. Opt in to use simplified node communication. +1. Create a [private endpoint for Batch node management](private-connectivity.md) in the virtual network. +1. Scale down the pool to zero nodes. +1. Scale out the pool again. The pool is then automatically migrated to the new version of the preview. + +## Next steps + +- Learn how to [use simplified compute node communication](simplified-compute-node-communication.md). +- Learn more about [creating pools in a virtual network](batch-virtual-network.md). +- Learn how to [use private endpoints with Batch accounts](private-connectivity.md). diff --git a/articles/cdn/cdn-custom-ssl.md b/articles/cdn/cdn-custom-ssl.md index abc52979887ef..09b052ef83f8a 100644 --- a/articles/cdn/cdn-custom-ssl.md +++ b/articles/cdn/cdn-custom-ssl.md @@ -5,7 +5,7 @@ services: cdn author: duongau ms.service: azure-cdn ms.topic: tutorial -ms.date: 12/06/2021 +ms.date: 06/06/2022 ms.author: duau ms.custom: mvc #Customer intent: As a website owner, I want to enable HTTPS on the custom domain of my CDN endpoint so that my users can use my custom domain to access my content securely. @@ -149,9 +149,9 @@ Grant Azure CDN permission to access the certificates (secrets) in your Azure Ke :::image type="content" source="./media/cdn-custom-ssl/cdn-access-policy-settings.png" alt-text="Select service principal of Azure CDN" border="true"::: -4. Select **Certificate permissions**. Select the check boxes for **Get** and **List** to allow CDN permissions to get and list the certificates. +4. Select **Certificate permissions**. Select the check box for **Get** to allow CDN permissions to get the certificates. -5. Select **Secret permissions**. Select the check boxes for **Get** and **List** to allow CDN permissions to get and list the secrets: +5. Select **Secret permissions**. Select the check box for **Get** to allow CDN permissions to get the secrets: :::image type="content" source="./media/cdn-custom-ssl/cdn-vault-permissions.png" alt-text="Select permissions for CDN to keyvault" border="true"::: diff --git a/articles/cdn/cdn-manage-expiration-of-blob-content.md b/articles/cdn/cdn-manage-expiration-of-blob-content.md index fb0ea0572c04c..4307897a511d7 100644 --- a/articles/cdn/cdn-manage-expiration-of-blob-content.md +++ b/articles/cdn/cdn-manage-expiration-of-blob-content.md @@ -3,7 +3,7 @@ title: Manage expiration of Azure Blob storage titleSuffix: Azure Content Delivery Network description: Learn about the options for controlling time-to-live for blobs in Azure CDN caching. services: cdn -documentationcenter: '' +documentationcenter: author: zhangmanling manager: erikre editor: '' @@ -112,7 +112,7 @@ $blob.ICloudBlob.SetProperties() > ## Setting Cache-Control headers by using .NET -To specify a blob's `Cache-Control` header by using .NET code, use the [Azure Storage Client Library for .NET](../storage/blobs/storage-quickstart-blobs-dotnet.md) to set the [BlobHttpHeaders.CacheControl](/dotnet/api/azure.storage.blobs.models.blobhttpheaders.cachecontrol?view=azure-dotnet) property. +To specify a blob's `Cache-Control` header by using .NET code, use the [Azure Storage Client Library for .NET](../storage/blobs/storage-quickstart-blobs-dotnet.md) to set the [BlobHttpHeaders.CacheControl](/dotnet/api/azure.storage.blobs.models.blobhttpheaders.cachecontrol?view=azure-dotnet&preserve-view=true) property. For example: diff --git a/articles/cdn/media/cdn-custom-ssl/cdn-vault-permissions.png b/articles/cdn/media/cdn-custom-ssl/cdn-vault-permissions.png index 985551d61fbcb..a3d53a3d81ad4 100644 Binary files a/articles/cdn/media/cdn-custom-ssl/cdn-vault-permissions.png and b/articles/cdn/media/cdn-custom-ssl/cdn-vault-permissions.png differ diff --git a/articles/cdn/scripts/cli/cdn-azure-cli-create-endpoint.md b/articles/cdn/scripts/cli/cdn-azure-cli-create-endpoint.md index f38a200605423..c5deeac65b2ea 100644 --- a/articles/cdn/scripts/cli/cdn-azure-cli-create-endpoint.md +++ b/articles/cdn/scripts/cli/cdn-azure-cli-create-endpoint.md @@ -7,8 +7,8 @@ manager: danielgi ms.date: 03/09/2021 ms.topic: sample ms.service: azure-cdn -ms.devlang: azurecli -ms.custom: devx-track-azurecli +ms.devlang: azurecli +ms.tool: azure-cli --- # Create an Azure CDN profile and endpoint using the Azure CLI diff --git a/articles/certification/TOC.yml b/articles/certification/TOC.yml index e9da57482017c..a41d08a23c156 100644 --- a/articles/certification/TOC.yml +++ b/articles/certification/TOC.yml @@ -44,7 +44,7 @@ items: - name: How to edit a published device href: how-to-edit-published-device.md - - name: How to certify a indirectly connected device + - name: How to certify an indirectly connected device href: how-to-indirectly-connected-devices.md - name: How to test your IoT Plug and Play device with CLI href: how-to-test-pnp.md diff --git a/articles/certification/how-to-indirectly-connected-devices.md b/articles/certification/how-to-indirectly-connected-devices.md index 697ffe9a7d46b..ec74d83740b3e 100644 --- a/articles/certification/how-to-indirectly-connected-devices.md +++ b/articles/certification/how-to-indirectly-connected-devices.md @@ -1,86 +1,106 @@ --- # Mandatory fields. -title: Certifing device bundles and indirectly connected devices +title: Certify bundled or indirectly connected devices titleSuffix: Azure Certified -description: See how to submit an indirectly connected device for certification. +description: Learn how to submit a bundled or indirectly connected device for Azure Certified Device certification. See how to configure dependencies and components. author: cbroad ms.author: cbroad # Microsoft employees only -ms.date: 02/23/2021 +ms.date: 06/07/2022 ms.topic: how-to ms.service: certification - +ms.custom: kr2b-contr-experiment # Optional fields. Don't forget to remove # if you need a field. -# ms.custom: can-be-multiple-comma-separated # ms.reviewer: MSFT-alias-of-reviewer # manager: MSFT-alias-of-manager-or-PM-counterpart --- # Device bundles and indirectly connected devices -To support devices that interact with Azure through a device, SaaS or PaaS offerings, our submission portal (https://certify.azure.com/), and device catalog (https://devicecatalog.azure.com) enable concepts of bundling and dependencies to promote and enable these device combinations access to our Azure Certified Device program. +Many devices interact with Azure indirectly. Some communicate through another device, such as a gateway. Others connect through software as a service (SaaS) or platform as a service (PaaS) offerings. + +The [submission portal](https://certify.azure.com/) and [device catalog](https://devicecatalog.azure.com) offer support for indirectly connected devices: + +- By listing dependencies in the portal, you can specify that your device needs another device or service to connect to Azure. +- By adding components, you can indicate that your device is part of a bundle. + +This functionality gives indirectly connected devices access to the Azure Certified Device program. -Depending on your product line and services offered, your situation may require a combination of these steps: +Depending on your product line and the services that you offer or use, your situation might require a combination of dependencies and bundling. The Azure Edge Certification Portal provides a way for you to list dependencies and additional components. +:::image type="content" source="./media/indirect-connected-device/picture-1.png" alt-text="Screenshot of the Azure Edge Certification Portal. On the Create a certify project page, the Dependencies tab is open."::: -![Create project dependencies](./media/indirect-connected-device/picture-1.png ) ## Sensors and indirect devices -Many sensors require a device to connect to Azure. In addition, you may have multiple compatible devices that will work with the sensor device. **To accommodate these scenarios, you must first certify the device(s) before certifying the sensor that will pass information through them.** -Example matrix of submission combinations -![Submission example](./media/indirect-connected-device/picture-2.png ) +Many sensors require a device to connect to Azure. In addition, you might have multiple compatible devices that work with the sensor. **To accommodate these scenarios, certify the devices before you certify the sensor that passes information through them.** + +The following matrix provides some examples of submission combinations: + +:::image type="content" source="./media/indirect-connected-device/picture-2.png" alt-text="Sensor and gateway icons and a table that lists submissions. The table ordering shows that gateways are submitted before sensors that depend on them."::: + +To certify a sensor that requires a separate device: + +1. Go to the [Azure Certified Device portal](https://certify.azure.com) to certify the device and publish it to the Azure Certified Device catalog. If you have multiple, compatible pass-through devices, as in the earlier example, submit them separately for certification and catalog publication. -To certify your sensor, which requires a separate device: -1. First, [certify the device](https://certify.azure.com) and publish to the Azure Certified Device Catalog - - If you have multiple, compatible passthrough devices (as in the example above), Submit them separately for certification and publish to the catalog as well -2. With the sensor connected through the device, submit the sensor for certification - * In the “Dependencies” tab of the “Device details” section, set the following values - * Dependency type = “Hardware gateway” - * Dependency URL = “URL link to the device on the device catalog” - * Used during testing = “Yes” - * Add any Customer-facing comments that should be provided to a user who sees the product description in the device catalog. (example: “Series 100 devices are required for sensors to connect to Azure”) +1. With the sensor connected through the device, submit the sensor for certification. In the **Dependencies** tab of the **Device details** section, set the following values: -3. If you have more devices you would like added as optional for this device, you can select “+ Add additional dependency”. Then follow the same guidance and note that it was not used during testing. In the Customer-facing comments, ensure your customers are aware that other devices are associated with this sensor are available (as an alternative to the device that was used during testing). + - **Dependency type**: Select **Hardware gateway**. + - **Dependency URL**: Enter the URL of the device in the device catalog. + - **Used during testing**: Select **Yes**. + - **Customer-facing comments**: Enter any comments that you'd like to provide to a user who sees the product description in the device catalog. For example, you might enter **Series 100 devices are required for sensors to connect to Azure**. -![Alt text](./media/indirect-connected-device/picture-3.png "Hardware dependency type") +1. If you'd like to add more devices as optional for this device: + + 1. Select **Add additional dependency**. + 1. Enter **Dependency type** and **Dependency URL** values. + 1. For **Used during testing**, select **No**. + 1. For **Customer-facing comments**, enter a comment that informs your customers that other devices are available as alternatives to the device that was used during testing. + +:::image type="content" source="./media/indirect-connected-device/picture-3.png" alt-text="Screenshot of the Dependencies tab in the portal. The Dependency type, Dependency U R L, and Used during testing fields are called out."::: ## PaaS and SaaS offerings -As part of your product portfolio, you may have devices that you certify, but your device also requires other services from your company or other third-party companies. To add this dependency, follow these steps: -1. Start the submission process for your device -2. In the “Dependencies” tab, set the following values - - Dependency type = “Software service” - - Service name = “[your product name]” - - Dependency URL = “URL link to a product page that describes the service” - - Add any customer facing comments that should be provided to a user who sees the product description in the Azure Certified Device Catalog -3. If you have other software, services or hardware dependencies you would like added as optional for this device, you can select “+ Add additional dependency” and follow the same guidance. -![Software dependency type](./media/indirect-connected-device/picture-4.png ) +As part of your product portfolio, you might certify a device that requires services from your company or third-party companies. To add this type of dependency: + +1. Go to the [Azure Certified Device portal](https://certify.azure.com) and start the submission process for your device. + +1. In the **Dependencies** tab, enter the following values: + + - **Dependency type**: Select **Software service**. + - **Service name**: Enter the name of your product. + - **Dependency URL**: Enter the URL of a product page that describes the service. + - **Customer-facing comments**: Enter any comments that you'd like to provide to a user who sees the product description in the Azure Certified Device catalog. + +1. If you have other software, services, or hardware dependencies that you'd like to add as optional for this device, select **Add additional dependency** and enter the required information. + +:::image type="content" source="./media/indirect-connected-device/picture-4.png" alt-text="Screenshot of the Dependencies tab in the portal. The Dependency type, Service name, and Dependency U R L fields are called out."::: ## Bundled products -Bundled product listings are simply the successful certification of a device with another components that will be sold as part of the bundle in one product listing. You have the ability to submit a device that includes extra components such as a temperature sensor and a camera sensor (#1) or you could submit a touch sensor that includes a passthrough device (#2). Through the “Component” feature, you have the ability to add multiple components to your listing. -If you intend to do this, you format the product listing image to indicate this product comes with other components. In addition, if your bundle requires additional services to certify, you will need to identify those through the services dependency. -Example matrix of bundled products +With bundled product listings, a device is successfully certified in the Azure Certified Device program with other components. The device and the components are then sold together under one product listing. + +The following matrix provides some examples of bundled products. You can submit a device that includes extra components such as a temperature sensor and a camera sensor, as in submission example 1. You can also submit a touch sensor that includes a pass-through device, as in submission example 2. -![Bundle submission example](./media/indirect-connected-device/picture-5.png ) +:::image type="content" source="./media/indirect-connected-device/picture-5.png" alt-text="Sensor and gateway icons and a table that lists submissions and their bundled components. Sensors and gateways are listed as devices and components."::: -For a more detailed description on how to use the component functionality in the Azure Certified Device portal, see our [help documentation](./how-to-using-the-components-feature.md). +Use the component feature to add multiple components to your listing. Format the product listing image to indicate that your product comes with other components. If your bundle requires additional services for certification, identify those services through service dependencies. -If a device is a passthrough device with a separate sensor in the same product, create one component to reflect the passthrough device, and another component to reflect the sensor. Components can be added to your project in the Product details tab of the Device details section: +For a more detailed description of how to use the component functionality in the Azure Certified Device portal, see [Add components on the portal](./how-to-using-the-components-feature.md). -![Adding components](./media/indirect-connected-device/picture-6.png ) +If a device is a pass-through device with a separate sensor in the same product, create one component to reflect the pass-through device, and another component to reflect the sensor. As the following screenshot shows, you can add components to your project in the **Product details** tab of the **Device details** section: -For the passthrough device, set the Component type as a Customer Ready Product, and fill in the other fields as relevant for your product. Example: +:::image type="content" source="./media/indirect-connected-device/picture-6.png" alt-text="Screenshot of the Device details page. The Product details tab is open, and the Add a component button is called out."::: -![Component details](./media/indirect-connected-device/picture-7.png ) +Configure the pass-through device first. For **Component type**, select **Customer Ready Product**. Enter the other values, as relevant for your product. The following screenshot provides an example: -For the sensor, add a second component, setting the Component type as Peripheral and Attachment method as Discrete. Example: +:::image type="content" source="./media/indirect-connected-device/picture-7.png" alt-text="Screenshot that shows input fields. The General tab is open. For Component type, Customer Ready Product is selected."::: -![Second component details](./media/indirect-connected-device/picture-8.png ) +For the sensor, add a second component. For **Component type**, select **Peripheral**. For **Attachment method**, select **Discrete**. The following screenshot provides an example: -Once the Sensor component has been created, Edit the details, navigate to the Sensors tab, and then add the sensor details. Example: +:::image type="content" source="./media/indirect-connected-device/picture-8.png" alt-text="Screenshot that shows input fields. The General tab is open. For Component type, Peripheral is selected. For Attachment method, Discrete is selected."::: -![Sensor details](./media/indirect-connected-device/picture-9.png ) +After you've created the sensor component, enter its information. Then go to the **Sensors** tab and enter detailed sensor information, as the following screenshot shows. -Complete your projects details and Submit your device for certification as normal. +:::image type="content" source="./media/indirect-connected-device/picture-9.png" alt-text="Screenshot that shows the Sensors tab. Values are visible in the Supported sensor type, Included with device, and Sensor details fields."::: +Complete the rest of your project's details, and then submit your device for certification as usual. diff --git a/articles/certification/how-to-test-pnp.md b/articles/certification/how-to-test-pnp.md index 322c499bd9d25..ea140032a4a2d 100644 --- a/articles/certification/how-to-test-pnp.md +++ b/articles/certification/how-to-test-pnp.md @@ -42,12 +42,12 @@ To meet the certification requirements, your device must: ## Test with the Azure IoT Extension CLI -The [Azure IoT CLI extension](/cli/azure/ext/azure-iot/iot/product?view=azure-cli-latest) lets you validate that the device implementation matches the model before you submit the device for certification through the Azure Certified Device portal. +The [Azure IoT CLI extension](/cli/azure/ext/azure-iot/iot/product?view=azure-cli-latest&preserve-view=true) lets you validate that the device implementation matches the model before you submit the device for certification through the Azure Certified Device portal. The following steps show you how to prepare for and run the certification tests using the CLI: ### Install the Azure IoT extension for the Azure CLI -Install the [Azure CLI](/cli/azure/install-azure-cli) and review the installation instructions to set up the [Azure CLI](/cli/azure/iot?view=azure-cli-latest) in your environment. +Install the [Azure CLI](/cli/azure/install-azure-cli) and review the installation instructions to set up the [Azure CLI](/cli/azure/iot?view=azure-cli-latest&preserve-view=true) in your environment. To install the Azure IoT Extension, run the following command: @@ -55,7 +55,7 @@ To install the Azure IoT Extension, run the following command: az extension add --name azure-iot ``` -To learn more, see [Azure CLI for Azure IoT](/cli/azure/iot/product?view=azure-cli-latest). +To learn more, see [Azure CLI for Azure IoT](/cli/azure/iot/product?view=azure-cli-latest&preserve-view=true). ### Create a new product test diff --git a/articles/chaos-studio/TOC.yml b/articles/chaos-studio/TOC.yml index b4bda0b5d7eec..be23b8ef2a15c 100644 --- a/articles/chaos-studio/TOC.yml +++ b/articles/chaos-studio/TOC.yml @@ -75,7 +75,7 @@ - name: REST API documentation href: /rest/api/chaosstudio/ - name: Python SDK - href: /python/api/azure-mgmt-chaos/?view=azure-python-preview + href: /python/api/azure-mgmt-chaos/ - name: .NET SDK href: https://www.nuget.org/packages/Microsoft.Azure.Management.Chaos/0.9.15-preview - name: Resources diff --git a/articles/chaos-studio/chaos-studio-fault-library.md b/articles/chaos-studio/chaos-studio-fault-library.md index 4e19406ce272a..ea6326e46aea9 100644 --- a/articles/chaos-studio/chaos-studio-fault-library.md +++ b/articles/chaos-studio/chaos-studio-fault-library.md @@ -47,7 +47,7 @@ The following faults are available for use today. Visit the [Fault Providers](./ | Capability Name | CPUPressure-1.0 | | Target type | Microsoft-Agent | | Supported OS Types | Windows, Linux | -| Description | Add CPU pressure up to the specified value on the VM where this fault is injected for the duration of the fault action. The artificial CPU pressure is removed at the end of the duration or if the experiment is canceled. | +| Description | Add CPU pressure up to the specified value on the VM where this fault is injected for the duration of the fault action. The artificial CPU pressure is removed at the end of the duration or if the experiment is canceled. On Windows, the "% Processor Utility" performance counter is used at fault start to determine current CPU percentage and this is subtracted from the pressureLevel defined in the fault so that % Processor Utility will hit approximately the pressureLevel defined in the fault parameters. | | Prerequisites | **Linux:** Running the fault on a Linux VM requires the **stress-ng** utility to be installed. You can install it using the package manager for your Linux distro,
                  APT Command to install stress-ng: *sudo apt-get update && sudo apt-get -y install unzip && sudo apt-get -y install stress-ng*
                  YUM Command to install stress-ng: *sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && sudo yum -y install stress-ng* | | | **Windows:** None. | | Urn | urn:csci:microsoft:agent:cpuPressure/1.0 | diff --git a/articles/chaos-studio/chaos-studio-overview.md b/articles/chaos-studio/chaos-studio-overview.md index a383736622594..8e7384322c3bc 100644 --- a/articles/chaos-studio/chaos-studio-overview.md +++ b/articles/chaos-studio/chaos-studio-overview.md @@ -1,54 +1,71 @@ --- -title: What is Azure Chaos Studio? -description: Understand Azure Chaos Studio, an Azure service that helps you to measure, understand, and build application and service resilience to real world incidents using chaos engineering to inject faults against your service then monitor how the service responds to disruptions. +title: What is Azure Chaos Studio (Preview)? +description: Measure, understand, and build resilience to incidents by using chaos engineering to inject faults and monitor how your application responds. services: chaos-studio author: johnkemnetz ms.topic: overview -ms.date: 11/11/2021 +ms.date: 05/27/2022 ms.author: johnkem ms.service: chaos-studio -ms.custom: template-overview,ignite-fall-2021 +ms.custom: template-overview,ignite-fall-2021, kr2b-contr-experiment --- -# What is Azure Chaos Studio Preview? +# What is Azure Chaos Studio (Preview)? -Azure Chaos Studio is a managed service for improving resilience by injecting faults into your Azure applications. Running controlled fault injection experiments against your applications, a practice known as chaos engineering, helps you to measure, understand, and improve resilience against real-world incidents, such as a region outages or application failures causing high CPU utilization on a VM. +[Azure Chaos Studio](https://azure.microsoft.com/services/chaos-studio) is a managed service that uses chaos engineering to help you measure, understand, and improve your cloud application and service resilience. Chaos engineering is a methodology by which you inject real-world faults into your application to run controlled fault injection experiments. + +Resilience is the capability of a system to handle and recover from disruptions. Application disruptions can cause errors and failures that can adversely affect your business or mission. Whether you're developing, migrating, or operating Azure applications, it's important to validate and improve your application's resilience. + +Chaos Studio helps you avoid negative consequences by validating that your application responds effectively to disruptions and failures. You can use Chaos Studio to test resilience against real-world incidents, like outages or high CPU utilization on virtual machines (VMs). + +The following video provides more background about Azure Chaos Studio: > [!VIDEO https://aka.ms/docs/player?id=29017ee4-bdfa-491e-acfe-8876e93c505b] -## Why should I use Chaos Studio? +## Chaos Studio scenarios + +You can use chaos engineering for various resilience validation scenarios that span the service development and operations lifecycle. There are two types of scenarios: + +- *Shift right* scenarios use a production or pre-production environment. Usually, you do shift right scenarios with real customer traffic or simulated load. +- *Shift left* scenarios can use a development or shared test environment. You can do shift left scenarios without any real customer traffic. -Whether you are developing a new application that will be hosted on Azure, migrating an existing application to Azure, or operating an application that already runs on Azure, it is important to validate and improve your application's resilience. Resilience is the capability of a system to handle and recover from disruptions. Disruptions in your application's availability can result in errors and failures for users, which in turn can have negative consequences on your business or mission. +You can use Chaos Studio for the following common chaos engineering scenarios: -When running an application in the cloud, avoiding these negative consequences requires you to validate that your application responds effectively to disruptions that could be caused by a service you depend on, disruptions caused by a failure in the service itself, or even disruptions to incident response tooling and processes. Chaos experimentation enables you to test that your cloud-hosted application is resilient to failures. +- Reproduce an incident that affected your application, to better understand the failure. Ensure that post-incident repairs prevent the incident from recurring. +- Prepare for a major event or season with "game day" load, scale, performance, and resilience validation. +- Do business continuity and disaster recovery (BCDR) drills to ensure that your application can recover quickly and preserve critical data in a disaster. +- Run high availability (HA) drills to test application resilience against region outages, network configuration errors, high stress events, or noisy neighbor issues. +- Develop application performance benchmarks. +- Plan capacity needs for production environments. +- Run stress tests or load tests. +- Ensure that services migrated from an on-premises or other cloud environment remain resilient to known failures. +- Build confidence in services built on cloud-native architectures. +- Validate that live site tooling, observability data, and on-call processes still work in unexpected conditions. -## When would I use Chaos Studio? +For many of these scenarios, you first build resilience using ad-hoc chaos experiments. Then, you continuously validate that new deployments won't regress resilience, by running chaos experiments as deployment gates in your continuous integration/continuous deployment (CI/CD) pipelines. -Chaos engineering can be used for a wide variety of resilience validation scenarios. These scenarios span the entire service development and operation lifecycle and can be categorized as either *shift right,* wherein the scenario is best validated in a production or pre-production environment, or *shift left,* wherein the scenario could be validated in a development environment or shared test environment. Typically shift right scenarios should be done with real customer traffic or simulated load whereas shift left scenarios can be done without any real customer traffic. Some common scenarios where chaos engineering can be applied are: -* Reproducing an incident that impacted your application to better understand the failure mode or ensure that post-incident repair items will prevent the incident from recurring. -* Running "game days" - load, scale, performance, and resilience validation of a service in preparation for a major user event or season. -* Performing business continuity / disaster recovery (BCDR) drills to ensure that if your application were impacted by a major disaster it could recover quickly and critical data is preserved. -* Running high availability drills to test application resilience against specific failures such as region outages, network configuration errors, high stress events, or noisy neighbor issues. -* Developing application performance benchmarks. -* Planning capacity needs for production environments. -* Running stress tests or load tests. -* Ensuring services migrated from an on-premises or other cloud environment remain resilient to known failures. -* Building confidence in services built on cloud-native architectures. -* Validating that live site tooling, observability data, and on-call processes work as expected under unexpected conditions. +## How Chaos Studio works -For many of these scenarios, you first build resilience using ad-hoc chaos experiments then continuously validate that new deployments won't regress resilience using chaos experiments as a deployment gate in your CI/CD pipeline. +With Chaos Studio, you can orchestrate safe, controlled fault injection on your Azure resources. Chaos experiments are the core of Chaos Studio. A chaos experiment describes the faults to run and the resources to run against. You can organize faults to run in parallel or sequence, depending on your needs. -## How does Chaos Studio work? +Chaos Studio supports two types of faults: -Chaos Studio enables you to orchestrate fault injection on your Azure resources in a safe and controlled way. At the core of Chaos Studio is chaos experiment. A chaos experiment is an Azure resource that describes the faults that should be run and the resources those faults should be run against. Faults can be organized to run in parallel or sequentially, depending on your needs. Chaos Studio supports two types of faults - *service-direct* faults, which run directly against an Azure resource without any installation or instrumentation (for example, rebooting an Azure Cache for Redis cluster or adding network latency to AKS pods), and *agent-based* faults, which run in virtual machines or virtual machine scale sets to perform in-guest failures (for example, applying virtual memory pressure or killing a process). Each fault has specific parameters you can control, like which process to kill or how much memory pressure to generate. +- *Service-direct* faults run directly against an Azure resource, without any installation or instrumentation. Examples include rebooting an Azure Cache for Redis cluster, or adding network latency to Azure Kubernetes Service (AKS) pods. +- *Agent-based* faults run in VMs or virtual machine scale sets to do in-guest failures. Examples include applying virtual memory pressure or killing a process. -When you build a chaos experiment, you define one or more *steps* that execute sequentially, each step containing one or more *branches* that run in parallel within the step, and each branch containing one or more *actions* such as injecting a fault or waiting for a certain duration. Finally, you organize the resources (*targets*) that each fault will be run against into groups called selectors so that you can easily reference a group of resources in each action. +Each fault has specific parameters you can configure, like which process to kill or how much memory pressure to generate. + +When you build a chaos experiment, you define one or more *steps* that execute sequentially. Each step contains one or more *branches* that run in parallel within the step. Each branch contains one or more *actions*, such as injecting a fault or waiting for a certain duration. + +You organize resource *targets* to run faults against into groups called *selectors*, so you can easily reference a group of resources in each action. + +The following diagram shows the layout of a chaos experiment in Chaos Studio: ![Diagram showing the layout of a chaos experiment.](images/chaos-experiment.png) -A chaos experiment is an Azure resource that lives in a subscription and resource group. You can use the Azure portal or the Chaos Studio REST API to create, update, start, cancel, and view the status of an experiment. +A chaos experiment is an Azure resource in a subscription and resource group. You can use the Azure portal or the [Chaos Studio REST API](/rest/api/chaosstudio) to create, update, start, cancel, and view the status of experiments. ## Next steps -Get started creating and running chaos experiments to improve application resilience with Chaos Studio using the links below. + - [Create and run your first experiment](chaos-studio-tutorial-service-direct-portal.md) - [Learn more about chaos engineering](chaos-studio-chaos-engineering-overview.md) diff --git a/articles/cloud-services-extended-support/available-sizes.md b/articles/cloud-services-extended-support/available-sizes.md index c58e82524fb54..8c8c2357febb6 100644 --- a/articles/cloud-services-extended-support/available-sizes.md +++ b/articles/cloud-services-extended-support/available-sizes.md @@ -26,9 +26,11 @@ This article describes the available virtual machine sizes for Cloud Services (e |[G](../virtual-machines/sizes-previous-gen.md?bc=%2fazure%2fvirtual-machines%2flinux%2fbreadcrumb%2ftoc.json&toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#g-series) | 180-240* | |[H](../virtual-machines/h-series.md) | 290 - 300* | + >[!NOTE] > ACUs marked with a * use Intel® Turbo technology to increase CPU frequency and provide a performance boost. The amount of the boost can vary based on the VM size, workload, and other workloads running on the same host. + ## Configure sizes for Cloud Services (extended support) You can specify the virtual machine size of a role instance as part of the service model in the service definition file. The size of the role determines the number of CPU cores, memory capacity and the local file system size. @@ -39,6 +41,8 @@ For example, setting the web role instance size to `Standard_D2`: ``` +>[!IMPORTANT] +> Microsoft Azure has introduced newer generations of high-performance computing (HPC), general purpose, and memory-optimized virtual machines (VMs). For this reason, we recommend that you migrate workloads from the original H-series and H-series Promo VMs to our newer offerings by August 31, 2022. Azure [HC](../virtual-machines/hc-series.md), [HBv2](../virtual-machines/hbv2-series.md), [HBv3](../virtual-machines/hbv3-series.md), [Dv4](../virtual-machines/dv4-dsv4-series.md), [Dav4](../virtual-machines/dav4-dasv4-series.md), [Ev4](../virtual-machines/ev4-esv4-series.md), and [Eav4](../virtual-machines/eav4-easv4-series.md) VMs have greater memory bandwidth, improved networking capabilities, and better cost and performance across various HPC workloads. ## Change the size of an existing role diff --git a/articles/cloud-services-extended-support/cloud-services-model-and-package.md b/articles/cloud-services-extended-support/cloud-services-model-and-package.md index df9111e958b94..5d66f53df9b88 100644 --- a/articles/cloud-services-extended-support/cloud-services-model-and-package.md +++ b/articles/cloud-services-extended-support/cloud-services-model-and-package.md @@ -20,11 +20,6 @@ Once the cloud service is running in Azure, you can reconfigure it through the * * I want to know more about the [ServiceDefinition.csdef](#csdef) and [ServiceConfig.cscfg](#cscfg) files. * I already know about that, give me [some examples](#next-steps) on what I can configure. * I want to create the [ServicePackage.cspkg](#cspkg). -* I am using Visual Studio and I want to... - * [Create a cloud service][vs_create] - * [Reconfigure an existing cloud service][vs_reconfigure] - * [Deploy a Cloud Service project][vs_deploy] - * [Remote desktop into a cloud service instance][remotedesktop] diff --git a/articles/cloud-services-extended-support/deploy-prerequisite.md b/articles/cloud-services-extended-support/deploy-prerequisite.md index 23565f69125d7..a86117a5c9709 100644 --- a/articles/cloud-services-extended-support/deploy-prerequisite.md +++ b/articles/cloud-services-extended-support/deploy-prerequisite.md @@ -20,6 +20,9 @@ To ensure a successful Cloud Services (extended support) deployment review the b Cloud Service (extended support) deployments must be in a virtual network. Virtual network can be created through [Azure portal](../virtual-network/quick-create-portal.md), [PowerShell](../virtual-network/quick-create-powershell.md), [Azure CLI](../virtual-network/quick-create-cli.md) or [ARM Template](../virtual-network/quick-create-template.md). The virtual network and subnets must also be referenced in the Service Configuration (.cscfg) under the [NetworkConfiguration](schema-cscfg-networkconfiguration.md) section. For a virtual networks belonging to the same resource group as the cloud service, referencing only the virtual network name in the Service Configuration (.cscfg) file is sufficient. If the virtual network and cloud service are in two different resource groups, then the complete Azure Resource Manager ID of the virtual network needs to be specified in the Service Configuration (.cscfg) file. + +> [!NOTE] +> Virtual Network and cloud service located in a different resource groups is not supported in Visual Studio 2019. Please consider using the ARM template or Portal for successful deployments in such scenarios #### Virtual Network located in same resource group ```xml diff --git a/articles/cloud-services-extended-support/in-place-migration-technical-details.md b/articles/cloud-services-extended-support/in-place-migration-technical-details.md index ebec330a22e78..3f97d34f0adb1 100644 --- a/articles/cloud-services-extended-support/in-place-migration-technical-details.md +++ b/articles/cloud-services-extended-support/in-place-migration-technical-details.md @@ -87,7 +87,7 @@ These are top scenarios involving combinations of resources, features and Cloud | Migration of empty Cloud Service (Cloud Service with no deployment) | Not supported. | | Migration of deployment containing the remote desktop plugin and the remote desktop extensions | Option 1: Remove the remote desktop plugin before migration. This requires changes to deployment files. The migration will then go through.

                  Option 2: Remove remote desktop extension and migrate the deployment. Post-migration, remove the plugin and install the extension. This requires changes to deployment files.

                  Remove the plugin and extension before migration. [Plugins are not recommended](./deploy-prerequisite.md#required-service-definition-file-csdef-updates) for use on Cloud Services (extended support).| | Virtual networks with both PaaS and IaaS deployment |Not Supported

                  Move either the PaaS or IaaS deployments into a different virtual network. This will cause downtime. | -Cloud Service deployments using legacy role sizes (such as Small or ExtraLarge). | The migration will complete, but the role sizes will be updated to use modern role sizes. There is no change in cost or SKU properties and virtual machine will not be rebooted for this change. Update all deployment artifacts to reference these new modern role sizes. For more information, see [Available VM sizes](available-sizes.md)| +Cloud Service deployments using legacy role sizes (such as Small or ExtraLarge). | The role sizes need to be updated before migration. Update all deployment artifacts to reference these new modern role sizes. For more information, see [Available VM sizes](available-sizes.md)| | Migration of Cloud Service to different virtual network | Not supported

                  1. Move the deployment to a different classic virtual network before migration. This will cause downtime.
                  2. Migrate the new virtual network to Azure Resource Manager.

                  Or

                  1. Migrate the virtual network to Azure Resource Manager
                  2. Move the Cloud Service to a new virtual network. This will cause downtime. | | Cloud Service in a virtual network but does not have an explicit subnet assigned | Not supported. Mitigation involves moving the role into a subnet, which requires a role restart (downtime) | @@ -130,4 +130,4 @@ As part of migration, the resource names are changed, and few Cloud Services fea Validate is designed to be quick. Prepare is longest running and takes some time depending on total number of role instances being migrated. Abort and commit can also take time but will take less time compared to prepare. All operations will time out after 24 hrs. ## Next steps -For assistance migrating your Cloud Services (classic) deployment to Cloud Services (extended support) see our [Support and troubleshooting](support-help.md) landing page. \ No newline at end of file +For assistance migrating your Cloud Services (classic) deployment to Cloud Services (extended support) see our [Support and troubleshooting](support-help.md) landing page. diff --git a/articles/cloud-services-extended-support/override-sku.md b/articles/cloud-services-extended-support/override-sku.md index 145585c794dcd..ff811cebf2e4a 100644 --- a/articles/cloud-services-extended-support/override-sku.md +++ b/articles/cloud-services-extended-support/override-sku.md @@ -33,7 +33,7 @@ Setting the **allowModelOverride** property to `true` here will update the cloud "packageUrl": "[parameters('packageSasUri')]", "configurationUrl": "[parameters('configurationSasUri')]", "upgradeMode": "[parameters('upgradeMode')]", - “allowModelOverride” : true, + "allowModelOverride": true, "roleProfile": { "roles": [ { diff --git a/articles/cloud-services-extended-support/swap-cloud-service.md b/articles/cloud-services-extended-support/swap-cloud-service.md index 14971ead4a266..32b20b3e26756 100644 --- a/articles/cloud-services-extended-support/swap-cloud-service.md +++ b/articles/cloud-services-extended-support/swap-cloud-service.md @@ -58,7 +58,7 @@ To save compute costs, you can delete one of the cloud services (designated as a ## REST API -To use the [REST API](/rest/api/compute/load-balancers/swap-public-ip-addresses) to swap to a new cloud services deployment in Azure Cloud Services (extended support), use the following command and JSON configuration: +To use the [REST API](/rest/api/load-balancer/load-balancers/swap-public-ip-addresses) to swap to a new cloud services deployment in Azure Cloud Services (extended support), use the following command and JSON configuration: ```http POST https://management.azure.com/subscriptions/subid/providers/Microsoft.Network/locations/westus/setLoadBalancerFrontendPublicIpAddresses?api-version=2021-02-01 diff --git a/articles/cloud-services/cloud-services-dotnet-get-started.md b/articles/cloud-services/cloud-services-dotnet-get-started.md index 8f383b248e153..7ff7caf4bbf0e 100644 --- a/articles/cloud-services/cloud-services-dotnet-get-started.md +++ b/articles/cloud-services/cloud-services-dotnet-get-started.md @@ -28,7 +28,7 @@ The application is an advertising bulletin board. Users create an ad by entering The application uses the [queue-centric work pattern](https://www.asp.net/aspnet/overview/developing-apps-with-windows-azure/building-real-world-cloud-apps-with-windows-azure/queue-centric-work-pattern) to off-load the CPU-intensive work of creating thumbnails to a back-end process. ## Alternative architecture: App Service and WebJobs -This tutorial shows how to run both front-end and back-end in an Azure cloud service. An alternative is to run the front-end in [Azure App Service](../app-service/index.yml) and use the [WebJobs](https://go.microsoft.com/fwlink/?LinkId=390226) feature for the back-end. For a tutorial that uses WebJobs, see [Get Started with the Azure WebJobs SDK](https://github.com/Azure/azure-webjobs-sdk/wiki). For information about how to choose the services that best fit your scenario, see [Azure App Service, Cloud Services, and virtual machines comparison](/azure/architecture/guide/technology-choices/compute-decision-tree). +This tutorial shows how to run both front-end and back-end in an Azure cloud service. An alternative is to run the front-end in [Azure App Service](../app-service/index.yml) and use the [WebJobs](/azure/app-service/webjobs-create) feature for the back-end. For a tutorial that uses WebJobs, see [Get Started with the Azure WebJobs SDK](https://github.com/Azure/azure-webjobs-sdk/wiki). For information about how to choose the services that best fit your scenario, see [Azure App Service, Cloud Services, and virtual machines comparison](/azure/architecture/guide/technology-choices/compute-decision-tree). ## What you'll learn * How to enable your machine for Azure development by installing the Azure SDK. diff --git a/articles/cloud-services/cloud-services-guestos-msrc-releases.md b/articles/cloud-services/cloud-services-guestos-msrc-releases.md index d4543d43d5163..3d073ad507e58 100644 --- a/articles/cloud-services/cloud-services-guestos-msrc-releases.md +++ b/articles/cloud-services/cloud-services-guestos-msrc-releases.md @@ -11,43 +11,40 @@ ms.service: cloud-services ms.topic: article ms.tgt_pltfrm: na ms.workload: tbd -ms.date: 5/11/2022 +ms.date: 5/26/2022 ms.author: gunnarc --- # Azure Guest OS The following tables show the Microsoft Security Response Center (MSRC) updates applied to the Azure Guest OS. Search this article to determine if a particular update applies to the Guest OS you are using. Updates always carry forward for the particular [family][family-explain] they were introduced in. ->[!NOTE] - ->The May Guest OS is currently being rolled out to Cloud Service VMs that are configured for automatic updates. When the rollout is complete, this version will be made available for manual updates through the Azure portal and configuration files. The following patches are included in the May Guest OS. This list is subject to change. ## May 2022 Guest OS | Product Category | Parent KB Article | Vulnerability Description | Guest OS | Date First Introduced | | --- | --- | --- | --- | --- | -| Rel 22-05 | [5013941] | Latest Cumulative Update(LCU) | 6.44 | May 10, 2022 | -| Rel 22-05 | [5011486] | IE Cumulative Updates | 2.123, 3.110, 4.103 | Mar 8, 2022 | -| Rel 22-05 | [5013944] | Latest Cumulative Update(LCU) | 7.12 | May 10, 2022 | -| Rel 22-05 | [5013952] | Latest Cumulative Update(LCU) | 5.68 | May 10, 2022 | -| Rel 22-05 | [5013637] | .NET Framework 3.5 Security and Quality Rollup | 2.123 | May 10, 2022 | -| Rel 22-05 | [5012141] | .NET Framework 4.5.2 Security and Quality Rollup | 2.123 | Apr 12, 2022 | -| Rel 22-05 | [5013638] | .NET Framework 3.5 Security and Quality Rollup | 4.103 | May 10, 2022 | -| Rel 22-05 | [5012142] | .NET Framework 4.5.2 Security and Quality Rollup | 4.103 | Apr 12, 2022 | -| Rel 22-05 | [5013635] | .NET Framework 3.5 Security and Quality Rollup | 3.110 | May 10, 2022 | -| Rel 22-05 | [5012140] | . NET Framework 4.5.2 Security and Quality Rollup | 3.110 | Apr 12, 2022 | -| Rel 22-05 | [5013641] | . NET Framework 3.5 and 4.7.2 Cumulative Update | 6.44 | May 10, 2022 | -| Rel 22-05 | [5013630] | .NET Framework 4.8 Security and Quality Rollup | 7.12 | May 10, 2022 | -| Rel 22-05 | [5014012] | Monthly Rollup | 2.123 | May 10, 2022 | -| Rel 22-05 | [5014017] | Monthly Rollup | 3.110 | May 10, 2022 | -| Rel 22-05 | [5014011] | Monthly Rollup | 4.103 | May 10, 2022 | -| Rel 22-05 | [5014027] | Servicing Stack update | 3.110 | May 10, 2022 | -| Rel 22-05 | [5014025] | Servicing Stack update | 4.103 | May 10, 2022 | -| Rel 22-05 | [4578013] | Standalone Security Update | 4.103 | Aug 19, 2020 | -| Rel 22-05 | [5014026] | Servicing Stack update | 5.68 | May 10, 2022 | -| Rel 22-05 | [5011649] | Servicing Stack update | 2.123 | Mar 8, 2022 | -| Rel 22-05 | [4494175] | Microcode | 5.68 | Sep 1, 2020 | -| Rel 22-05 | [4494174] | Microcode | 6.44 | Sep 1, 2020 | +| Rel 22-05 | [5013941] | Latest Cumulative Update(LCU) | [6.44] | May 10, 2022 | +| Rel 22-05 | [5011486] | IE Cumulative Updates | [2.123], [3.110], [4.103] | Mar 8, 2022 | +| Rel 22-05 | [5013944] | Latest Cumulative Update(LCU) | [7.12] | May 10, 2022 | +| Rel 22-05 | [5013952] | Latest Cumulative Update(LCU) | [5.68] | May 10, 2022 | +| Rel 22-05 | [5013637] | .NET Framework 3.5 Security and Quality Rollup | [2.123] | May 10, 2022 | +| Rel 22-05 | [5012141] | .NET Framework 4.5.2 Security and Quality Rollup | [2.123] | Apr 12, 2022 | +| Rel 22-05 | [5013638] | .NET Framework 3.5 Security and Quality Rollup | [4.103] | May 10, 2022 | +| Rel 22-05 | [5012142] | .NET Framework 4.5.2 Security and Quality Rollup | [4.103] | Apr 12, 2022 | +| Rel 22-05 | [5013635] | .NET Framework 3.5 Security and Quality Rollup | [3.110] | May 10, 2022 | +| Rel 22-05 | [5012140] | . NET Framework 4.5.2 Security and Quality Rollup | [3.110] | Apr 12, 2022 | +| Rel 22-05 | [5013641] | . NET Framework 3.5 and 4.7.2 Cumulative Update | [6.44] | May 10, 2022 | +| Rel 22-05 | [5013630] | .NET Framework 4.8 Security and Quality Rollup | [7.12] | May 10, 2022 | +| Rel 22-05 | [5014012] | Monthly Rollup | [2.123] | May 10, 2022 | +| Rel 22-05 | [5014017] | Monthly Rollup | [3.110] | May 10, 2022 | +| Rel 22-05 | [5014011] | Monthly Rollup | [4.103] | May 10, 2022 | +| Rel 22-05 | [5014027] | Servicing Stack update | [3.110] | May 10, 2022 | +| Rel 22-05 | [5014025] | Servicing Stack update | [4.103] | May 10, 2022 | +| Rel 22-05 | [4578013] | Standalone Security Update | [4.103] | Aug 19, 2020 | +| Rel 22-05 | [5014026] | Servicing Stack update | [5.68] | May 10, 2022 | +| Rel 22-05 | [5011649] | Servicing Stack update | [2.123] | Mar 8, 2022 | +| Rel 22-05 | [4494175] | Microcode | [5.68] | Sep 1, 2020 | +| Rel 22-05 | [4494174] | Microcode | [6.44] | Sep 1, 2020 | [5013941]: https://support.microsoft.com/kb/5013941 [5011486]: https://support.microsoft.com/kb/5011486 @@ -71,6 +68,12 @@ The following tables show the Microsoft Security Response Center (MSRC) updates [5011649]: https://support.microsoft.com/kb/5011649 [4494175]: https://support.microsoft.com/kb/4494175 [4494174]: https://support.microsoft.com/kb/4494174 +[2.123]: ./cloud-services-guestos-update-matrix.md#family-2-releases +[3.110]: ./cloud-services-guestos-update-matrix.md#family-3-releases +[4.103]: ./cloud-services-guestos-update-matrix.md#family-4-releases +[5.68]: ./cloud-services-guestos-update-matrix.md#family-5-releases +[6.44]: ./cloud-services-guestos-update-matrix.md#family-6-releases +[7.12]: ./cloud-services-guestos-update-matrix.md#family-7-releases ## April 2022 Guest OS diff --git a/articles/cloud-services/cloud-services-guestos-update-matrix.md b/articles/cloud-services/cloud-services-guestos-update-matrix.md index 3e7ebef46fd0c..2f44a7b06f97a 100644 --- a/articles/cloud-services/cloud-services-guestos-update-matrix.md +++ b/articles/cloud-services/cloud-services-guestos-update-matrix.md @@ -11,7 +11,7 @@ ms.service: cloud-services ms.topic: article ms.tgt_pltfrm: na ms.workload: tbd -ms.date: 4/30/2022 +ms.date: 5/26/2022 ms.author: gunnarc --- # Azure Guest OS releases and SDK compatibility matrix @@ -37,6 +37,9 @@ Unsure about how to update your Guest OS? Check [this][cloud updates] out. ## News updates +###### **May 26, 2022** +The May Guest OS has released. + ###### **April 30, 2022** The April Guest OS has released. @@ -179,8 +182,9 @@ The September Guest OS has released. | Configuration string | Release date | Disable date | | --- | --- | --- | +| WA-GUEST-OS-7.12_202205-01 | May 26, 2022 | Post 7.14 | | WA-GUEST-OS-7.11_202204-01 | April 30, 2022 | Post 7.13 | -| WA-GUEST-OS-7.10_202203-01 | March 19, 2022 | Post 7.12 | +|~~WA-GUEST-OS-7.10_202203-01~| March 19, 2022 | May 26, 2022 | |~~WA-GUEST-OS-7.9_202202-01~~| March 2, 2022 | April 30, 2022 | |~~WA-GUEST-OS-7.8_202201-02~~| February 11, 2022 | March 19, 2022 | |~~WA-GUEST-OS-7.6_202112-01~~| January 10, 2022 | March 2, 2022 | @@ -200,8 +204,9 @@ The September Guest OS has released. | Configuration string | Release date | Disable date | | --- | --- | --- | +| WA-GUEST-OS-6.44_202205-01 | May 26, 2022 | Post 6.46 | | WA-GUEST-OS-6.43_202204-01 | April 30, 2022 | Post 6.45 | -| WA-GUEST-OS-6.42_202203-01 | March 19, 2022 | Post 6.44 | +|~~WA-GUEST-OS-6.42_202203-01~| March 19, 2022 | May 26, 2022 | |~~WA-GUEST-OS-6.41_202202-01~~| March 2, 2022 | April 30, 2022 | |~~WA-GUEST-OS-6.40_202201-02~~| February 11, 2022 | March 19, 2022 | |~~WA-GUEST-OS-6.38_202112-01~~| January 10, 2022 | March 2, 2022 | @@ -255,8 +260,9 @@ The September Guest OS has released. | Configuration string | Release date | Disable date | | --- | --- | --- | +| WA-GUEST-OS-5.68_202205-01 | May 26, 2022 | Post 5.70 | | WA-GUEST-OS-5.67_202204-01 | April 30, 2022 | Post 5.69 | -| WA-GUEST-OS-5.66_202203-01 | March 19, 2022 | Post 5.68 | +|~~WA-GUEST-OS-5.66_202203-01~~| March 19, 2022 | May 26, 2022 | |~~WA-GUEST-OS-5.65_202202-01~~| March 2, 2022 | April 30, 2022 | |~~WA-GUEST-OS-5.64_202201-02~~| February 11, 2022 | March 19, 2022 | |~~WA-GUEST-OS-5.62_202112-01~~| January 10, 2022 | March 2, 2022 | @@ -307,8 +313,9 @@ The September Guest OS has released. | Configuration string | Release date | Disable date | | --- | --- | --- | +| WA-GUEST-OS-4.103_202205-01 | May 26, 2022 | Post 4.105 | | WA-GUEST-OS-4.102_202204-01 | April 30, 2022 | Post 4.104 | -| WA-GUEST-OS-4.101_202203-01 | March 19, 2022 | Post 4.103 | +|~~WA-GUEST-OS-4.101_202203-01~| March 19, 2022 | May 26, 2022 | |~~WA-GUEST-OS-4.100_202202-01~~| March 2, 2022 | April 30, 2022 | |~~WA-GUEST-OS-4.99_202201-02~~| February 11 , 2022 | March 19, 2022 | |~~WA-GUEST-OS-4.97_202112-01~~| January 10 , 2022 | March 2, 2022 | @@ -359,8 +366,9 @@ The September Guest OS has released. | Configuration string | Release date | Disable date | | --- | --- | --- | +| WA-GUEST-OS-3.110_202205-01 | May 26, 2022 | Post 3.112 | | WA-GUEST-OS-3.109_202204-01 | April 30, 2022 | Post 3.111 | -| WA-GUEST-OS-3.108_202203-01 | March 19, 2022 | Post 3.110 | +|~~WA-GUEST-OS-3.108_202203-01~~| March 19, 2022 | May 26, 2022 | |~~WA-GUEST-OS-3.107_202202-01~~| March 2, 2022 | April 30, 2022 | |~~WA-GUEST-OS-3.106_202201-02~~| February 11, 2022 | March 19, 2022 | |~~WA-GUEST-OS-3.104_202112-01~~| January 10, 2022 | March 2, 2022| @@ -411,8 +419,9 @@ The September Guest OS has released. | Configuration string | Release date | Disable date | | --- | --- | --- | +| WA-GUEST-OS-2.123_202205-01 | May 26, 2022 | Post 2.125 | | WA-GUEST-OS-2.122_202204-01 | April 30, 2022 | Post 2.124 | -| WA-GUEST-OS-2.121_202203-01 | March 19, 2022 | Post 2.123 | +|~~WA-GUEST-OS-2.121_202203-01~~| March 19, 2022 | May 26, 2022 | |~~WA-GUEST-OS-2.120_202202-01~~| March 2, 2022 | April 30, 2022 | |~~WA-GUEST-OS-2.119_202201-02~~| February 11, 2022 | March 19, 2022 | |~~WA-GUEST-OS-2.117_202112-01~~| January 10, 2022 | March 2, 2022 | diff --git a/articles/cloud-services/cloud-services-nodejs-chat-app-socketio.md b/articles/cloud-services/cloud-services-nodejs-chat-app-socketio.md index 5385151136590..ddd215fc8470e 100644 --- a/articles/cloud-services/cloud-services-nodejs-chat-app-socketio.md +++ b/articles/cloud-services/cloud-services-nodejs-chat-app-socketio.md @@ -166,14 +166,14 @@ Azure emulator: > [!IMPORTANT] > Be sure to use a unique name, otherwise the publish process will fail. After the deployment has completed, the browser will open and navigate to the deployed service. > - > If you receive an error stating that the provided subscription name doesn't exist in the imported publish profile, you must download and import the publishing profile for your subscription before deploying to Azure. See the **Deploying the Application to Azure** section of [Build and deploy a Node.js application to an Azure Cloud Service](https://azure.microsoft.com/develop/nodejs/tutorials/getting-started/) + > If you receive an error stating that the provided subscription name doesn't exist in the imported publish profile, you must download and import the publishing profile for your subscription before deploying to Azure. See the **Deploying the Application to Azure** section of [Build and deploy a Node.js application to an Azure Cloud Service](/azure/cloud-services/cloud-services-nodejs-develop-deploy-app) > > ![A browser window displaying the service hosted on Azure][completed-app] > [!NOTE] - > If you receive an error stating that the provided subscription name doesn't exist in the imported publish profile, you must download and import the publishing profile for your subscription before deploying to Azure. See the **Deploying the Application to Azure** section of [Build and deploy a Node.js application to an Azure Cloud Service](https://azure.microsoft.com/develop/nodejs/tutorials/getting-started/) + > If you receive an error stating that the provided subscription name doesn't exist in the imported publish profile, you must download and import the publishing profile for your subscription before deploying to Azure. See the **Deploying the Application to Azure** section of [Build and deploy a Node.js application to an Azure Cloud Service](/azure/cloud-services/cloud-services-nodejs-develop-deploy-app) > > diff --git a/articles/cloud-services/cloud-services-python-how-to-use-service-management.md b/articles/cloud-services/cloud-services-python-how-to-use-service-management.md index d00545d36b776..277e228ba4b4c 100644 --- a/articles/cloud-services/cloud-services-python-how-to-use-service-management.md +++ b/articles/cloud-services/cloud-services-python-how-to-use-service-management.md @@ -25,7 +25,7 @@ To use the Service Management API, you need to [create an Azure account](https:/ The Azure SDK for Python wraps the [Service Management API][svc-mgmt-rest-api], which is a REST API. All API operations are performed over TLS and mutually authenticated by using X.509 v3 certificates. The management service can be accessed from within a service running in Azure. It also can be accessed directly over the Internet from any application that can send an HTTPS request and receive an HTTPS response. ## Installation -All the features described in this article are available in the `azure-servicemanagement-legacy` package, which you can install by using pip. For more information about installation (for example, if you're new to Python), see [Install Python and the Azure SDK](/azure/developer/python/azure-sdk-install). +All the features described in this article are available in the `azure-servicemanagement-legacy` package, which you can install by using pip. For more information about installation (for example, if you're new to Python), see [Install Python and the Azure SDK](/azure/developer/python/sdk/azure-sdk-install). ## Connect to service management To connect to the service management endpoint, you need your Azure subscription ID and a valid management certificate. You can obtain your subscription ID through the [Azure portal][management-portal]. @@ -468,4 +468,4 @@ For more information, see the [Python Developer Center](https://azure.microsoft. [svc-mgmt-rest-api]: /previous-versions/azure/ee460799(v=azure.100) -[cloud service]:/azure/cloud-services/ \ No newline at end of file +[cloud service]:/azure/cloud-services/ diff --git a/articles/cloud-services/cloud-services-sizes-specs.md b/articles/cloud-services/cloud-services-sizes-specs.md index 013086cea4238..ea4ef09be2c09 100644 --- a/articles/cloud-services/cloud-services-sizes-specs.md +++ b/articles/cloud-services/cloud-services-sizes-specs.md @@ -187,6 +187,24 @@ In addition to the substantial CPU power, the H-series offers diverse options fo \*RDMA capable +>[!IMPORTANT] +> Microsoft Azure has introduced newer generations of high-performance computing (HPC), general purpose, and memory-optimized virtual machines (VMs). For this reason, we recommend that you migrate workloads from the original H-series and H-series Promo VMs to our newer offerings by August 31, 2022. Azure [HC](../virtual-machines/hc-series.md), [HBv2](../virtual-machines/hbv2-series.md), [HBv3](../virtual-machines/hbv3-series.md), [Dv4](../virtual-machines/dv4-dsv4-series.md), [Dav4](../virtual-machines/dav4-dasv4-series.md), [Ev4](../virtual-machines/ev4-esv4-series.md), and [Eav4](../virtual-machines/eav4-easv4-series.md) VMs have greater memory bandwidth, improved networking capabilities, and better cost and performance across various HPC workloads. + + On August 31, 2022, we're retiring the following H-series Azure VM sizes: + +- H8 +- H8m +- H16 +- H16r +- H16m +- H16mr +- H8 Promo +- H8m Promo +- H16 Promo +- H16r Promo +- H16m Promo +- H16mr Promo + ## Configure sizes for Cloud Services You can specify the Virtual Machine size of a role instance as part of the service model described by the [service definition file](cloud-services-model-and-package.md#csdef). The size of the role determines the number of CPU cores, the memory capacity, and the local file system size that is allocated to a running instance. Choose the role size based on your application's resource requirement. diff --git a/articles/cloud-shell/example-terraform-bash.md b/articles/cloud-shell/example-terraform-bash.md index 2926e9bd6b8ae..b78b0745112ba 100644 --- a/articles/cloud-shell/example-terraform-bash.md +++ b/articles/cloud-shell/example-terraform-bash.md @@ -13,6 +13,7 @@ ms.tgt_pltfrm: vm-linux ms.topic: article ms.date: 11/15/2017 ms.author: tarcher +ms.tool: terraform ms.custom: devx-track-terraform --- diff --git a/articles/cognitive-services/Anomaly-Detector/How-to/multivariate-how-to.md b/articles/cognitive-services/Anomaly-Detector/How-to/multivariate-how-to.md index 91a880f375eb8..3d26af2805153 100644 --- a/articles/cognitive-services/Anomaly-Detector/How-to/multivariate-how-to.md +++ b/articles/cognitive-services/Anomaly-Detector/How-to/multivariate-how-to.md @@ -7,8 +7,8 @@ author: mrbullwinkle manager: nitinme ms.service: cognitive-services ms.subservice: anomaly-detector -ms.topic: how-to -ms.date: 01/18/2022 +ms.topic: conceptual +ms.date: 06/07/2022 ms.author: mbullwin --- @@ -23,7 +23,7 @@ The following are the basic steps needed to use MVAD: 1. Get model status. 1. Detect anomalies during the inference process with the trained MVAD model. -To test out this feature, try this SDK [Notebook](https://github.com/Azure-Samples/AnomalyDetector/blob/master/ipython-notebook/API%20Sample/Multivariate%20API%20Demo%20Notebook.ipynb). +To test out this feature, try this SDK [Notebook](https://github.com/Azure-Samples/AnomalyDetector/blob/master/ipython-notebook/API%20Sample/Multivariate%20API%20Demo%20Notebook.ipynb). For more instructions on how to run a jupyter notebook, please refer to [Install and Run a Jupyter Notebook](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/install.html#). ## Multivariate Anomaly Detector APIs overview @@ -357,10 +357,10 @@ The response contains the result status, variable information, inference paramet * Error code `InsufficientHistoricalData`. This usually happens only with the first few timestamps because the model inferences data in a window-based manner and it needs historical data to make a decision. For the first few timestamps, there is insufficient historical data, so inference cannot be performed on them. In this case, the error message can be ignored. * `"isAnomaly": false` indicates the current timestamp is not an anomaly. - * `severity ` indicates the relative severity of the anomaly and for normal data it is always 0. + * `severity` indicates the relative severity of the anomaly and for normal data it is always 0. * `score` is the raw output of the model on which the model makes a decision, which could be non-zero even for normal data points. * `"isAnomaly": true` indicates an anomaly at the current timestamp. - * `severity ` indicates the relative severity of the anomaly and for abnormal data it is always greater than 0. + * `severity` indicates the relative severity of the anomaly and for abnormal data it is always greater than 0. * `score` is the raw output of the model on which the model makes a decision. `severity` is a derived value from `score`. Every data point has a `score`. * `contributors` is a list containing the contribution score of each variable. Higher contribution scores indicate higher possibility of the root cause. This list is often used for interpreting anomalies and diagnosing the root causes. @@ -397,7 +397,7 @@ A sample request looks like following format, this case is detecting last two ti "2021-01-01T00:00:00Z", "2021-01-01T00:01:00Z", "2021-01-01T00:02:00Z" - //more variables + //more timestamps ], "values": [ 0.4551378545933972, @@ -412,7 +412,7 @@ A sample request looks like following format, this case is detecting last two ti "2021-01-01T00:00:00Z", "2021-01-01T00:01:00Z", "2021-01-01T00:02:00Z" - //more variables + //more timestamps ], "values": [ 0.9617871613964145, @@ -427,7 +427,7 @@ A sample request looks like following format, this case is detecting last two ti "2021-01-01T00:00:00Z", "2021-01-01T00:01:00Z", "2021-01-01T00:02:00Z" - //more variables + //more timestamps ], "values": [ 0.4030756879437628, @@ -541,5 +541,5 @@ See the following example of a JSON response: ## Next steps -* [What is the Multivariate Anomaly Detector API?](../overview-multivariate.md) -* [Join us to get more supports!](https://aka.ms/adadvisorsjoin) +* [Best practices for using the Multivariate Anomaly Detector API](../concepts/best-practices-multivariate.md) +* [Join us to get more supports!](https://aka.ms/adadvisorsjoin) \ No newline at end of file diff --git a/articles/cognitive-services/Anomaly-Detector/concepts/best-practices-multivariate.md b/articles/cognitive-services/Anomaly-Detector/concepts/best-practices-multivariate.md index 26efa012b3cb6..23bc4a1d6425e 100644 --- a/articles/cognitive-services/Anomaly-Detector/concepts/best-practices-multivariate.md +++ b/articles/cognitive-services/Anomaly-Detector/concepts/best-practices-multivariate.md @@ -1,5 +1,5 @@ --- -title: Best practices for using the Anomaly Detector Multivariate API +title: Best practices for using the Multivariate Anomaly Detector API titleSuffix: Azure Cognitive Services description: Best practices for using the Anomaly Detector Multivariate API's to apply anomaly detection to your time series data. services: cognitive-services @@ -8,12 +8,12 @@ manager: nitinme ms.service: cognitive-services ms.subservice: anomaly-detector ms.topic: conceptual -ms.date: 04/01/2021 +ms.date: 06/07/2022 ms.author: mbullwin keywords: anomaly detection, machine learning, algorithms --- -# Best practices for using the Anomaly Detector multivariate API +# Best practices for using the Multivariate Anomaly Detector API This article will provide guidance around recommended practices to follow when using the multivariate Anomaly Detector (MVAD) APIs. In this tutorial, you'll: @@ -35,25 +35,25 @@ Follow the instructions in this section to avoid errors while using MVAD. If you ## Data engineering -Now you're able to run the your code with MVAD APIs without any error. What could be done to improve your model accuracy? +Now you're able to run your code with MVAD APIs without any error. What could be done to improve your model accuracy? ### Data quality -* As the model learns normal patterns from historical data, the training data should represent the **overall normal** state of the system. It is hard for the model to learn these types of patterns if the training data is full of anomalies. An empirical threshold of abnormal rate is **1%** and below for good accuracy. -* In general, the **missing value ratio of training data should be under 20%**. Too much missing data may end up with automatically filled values (usually linear values or constant values) being learnt as normal patterns. That may result in real (not missing) data points being detected as anomalies. - However, there are cases when a high missing ratio is acceptable. For example, if you have two variables (time series) in a group using `Outer` mode to align their timestamps. One of them has one-minute granularity, the other one has hourly granularity. Then the hourly variable by nature has at least 59 / 60 = 98.33% missing data points. In such cases, it's fine to fill the hourly variable using the only value available (not missing) if it typically does not fluctuate too much. +* As the model learns normal patterns from historical data, the training data should represent the **overall normal** state of the system. It's hard for the model to learn these types of patterns if the training data is full of anomalies. An empirical threshold of abnormal rate is **1%** and below for good accuracy. +* In general, the **missing value ratio of training data should be under 20%**. Too much missing data may end up with automatically filled values (usually linear values or constant values) being learned as normal patterns. That may result in real (not missing) data points being detected as anomalies. + ### Data quantity * The underlying model of MVAD has millions of parameters. It needs a minimum number of data points to learn an optimal set of parameters. The empirical rule is that you need to provide **15,000 or more data points (timestamps) per variable** to train the model for good accuracy. In general, the more the training data, better the accuracy. However, in cases when you're not able to accrue that much data, we still encourage you to experiment with less data and see if the compromised accuracy is still acceptable. * Every time when you call the inference API, you need to ensure that the source data file contains just enough data points. That is normally `slidingWindow` + number of data points that **really** need inference results. For example, in a streaming case when every time you want to inference on **ONE** new timestamp, the data file could contain only the leading `slidingWindow` plus **ONE** data point; then you could move on and create another zip file with the same number of data points (`slidingWindow` + 1) but moving ONE step to the "right" side and submit for another inference job. - Anything beyond that or "before" the leading sliding window will not impact the inference result at all and may only cause performance downgrade.Anything below that may lead to an `NotEnoughInput` error. + Anything beyond that or "before" the leading sliding window won't impact the inference result at all and may only cause performance downgrade.Anything below that may lead to an `NotEnoughInput` error. ### Timestamp round-up -In a group of variables (time series), each variable may be collected from an independent source. The timestamps of different variables may be inconsistent with each other and with the known frequencies. Here is a simple example. +In a group of variables (time series), each variable may be collected from an independent source. The timestamps of different variables may be inconsistent with each other and with the known frequencies. Here's a simple example. *Variable-1* @@ -75,7 +75,7 @@ In a group of variables (time series), each variable may be collected from an in | 12:01:34 | 1.7 | | 12:02:04 | 2.0 | -We have two variables collected from two sensors which send one data point every 30 seconds. However, the sensors are not sending data points at a strict even frequency, but sometimes earlier and sometimes later. Because MVAD will take into consideration correlations between different variables, timestamps must be properly aligned so that the metrics can correctly reflect the condition of the system. In the above example, timestamps of variable 1 and variable 2 must be properly 'rounded' to their frequency before alignment. +We have two variables collected from two sensors which send one data point every 30 seconds. However, the sensors aren't sending data points at a strict even frequency, but sometimes earlier and sometimes later. Because MVAD will take into consideration correlations between different variables, timestamps must be properly aligned so that the metrics can correctly reflect the condition of the system. In the above example, timestamps of variable 1 and variable 2 must be properly 'rounded' to their frequency before alignment. Let's see what happens if they're not pre-processed. If we set `alignMode` to be `Outer` (which means union of two sets), the merged table will be @@ -92,7 +92,7 @@ Let's see what happens if they're not pre-processed. If we set `alignMode` to be | 12:02:04 | `nan` | 2.0 | | 12:02:08 | 1.3 | `nan` | -`nan` indicates missing values. Obviously, the merged table is not what you might have expected. Variable 1 and variable 2 interleave, and the MVAD model cannot extract information about correlations between them. If we set `alignMode` to `Inner`, the merged table will be empty as there is no common timestamp in variable 1 and variable 2. +`nan` indicates missing values. Obviously, the merged table isn't what you might have expected. Variable 1 and variable 2 interleave, and the MVAD model can't extract information about correlations between them. If we set `alignMode` to `Inner`, the merged table will be empty as there's no common timestamp in variable 1 and variable 2. Therefore, the timestamps of variable 1 and variable 2 should be pre-processed (rounded to the nearest 30-second timestamps) and the new time series are @@ -128,19 +128,46 @@ Now the merged table is more reasonable. Values of different variables at close timestamps are well aligned, and the MVAD model can now extract correlation information. +### Limitations + +There are some limitations in both the training and inference APIs, you should be aware of these limitations to avoid errors. + +#### General Limitations +* Sliding window: 28-2880 timestamps, default is 300. For periodic data, set the length of 2-4 cycles as the sliding window. +* API calls: At most 20 API calls per minute. +* Variable numbers: For training and asynchronized inference, at most 301 variables. +#### Training Limitations +* Timestamps: At most 1000000. Too few timestamps may decrease model quality. Recommend having more than 15000 timestamps. +* Granularity: The minimum granularity is `per_second`. + +#### Asynchronized inference limitations +* Timestamps: At most 20000, at least 1 sliding window length. +#### Synchronized inference limitations +* Timestamps: At most 2880, at least 1 sliding window length. +* Detecting timestamps: From 1 to 10. + +## Model quality + +### How to deal with false positive and false negative in real scenarios? +We have provided severity which indicates the significance of anomalies. False positives may be filtered out by setting up a threshold on the severity. Sometimes too many false positives may appear when there are pattern shifts in the inference data. In such cases a model may need to be retrained on new data. If the training data contains too many anomalies, there could be false negatives in the detection results. This is because the model learns patterns from the training data and anomalies may bring bias to the model. Thus proper data cleaning may help reduce false negatives. + +### How to estimate which model is best to use according to training loss and validation loss? +Generally speaking, it's hard to decide which model is the best without a labeled dataset. However, we can leverage the training and validation losses to have a rough estimation and discard those bad models. First, we need to observe whether training losses converge. Divergent losses often indicate poor quality of the model. Second, loss values may help identify whether underfitting or overfitting occurs. Models that are underfitting or overfitting may not have desired performance. Third, although the definition of the loss function doesn't reflect the detection performance directly, loss values may be an auxiliary tool to estimate model quality. Low loss value is a necessary condition for a good model, thus we may discard models with high loss values. + + ## Common pitfalls Apart from the [error code table](./troubleshoot.md), we've learned from customers like you some common pitfalls while using MVAD APIs. This table will help you to avoid these issues. | Pitfall | Consequence |Explanation and solution | | --------- | ----- | ----- | -| Timestamps in training data and/or inference data were not rounded up to align with the respective data frequency of each variable. | The timestamps of the inference results are not as expected: either too few timestamps or too many timestamps. | Please refer to [Timestamp round-up](#timestamp-round-up). | +| Timestamps in training data and/or inference data weren't rounded up to align with the respective data frequency of each variable. | The timestamps of the inference results aren't as expected: either too few timestamps or too many timestamps. | Please refer to [Timestamp round-up](#timestamp-round-up). | | Too many anomalous data points in the training data | Model accuracy is impacted negatively because it treats anomalous data points as normal patterns during training. | Empirically, keep the abnormal rate at or below **1%** will help. | | Too little training data | Model accuracy is compromised. | Empirically, training a MVAD model requires 15,000 or more data points (timestamps) per variable to keep a good accuracy.| -| Taking all data points with `isAnomaly`=`true` as anomalies | Too many false positives | You should use both `isAnomaly` and `severity` (or `score`) to sift out anomalies that are not severe and (optionally) use grouping to check the duration of the anomalies to suppress random noises. Please refer to the [FAQ](#faq) section below for the difference between `severity` and `score`. | +| Taking all data points with `isAnomaly`=`true` as anomalies | Too many false positives | You should use both `isAnomaly` and `severity` (or `score`) to sift out anomalies that aren't severe and (optionally) use grouping to check the duration of the anomalies to suppress random noises. Please refer to the [FAQ](#faq) section below for the difference between `severity` and `score`. | | Sub-folders are zipped into the data file for training or inference. | The csv data files inside sub-folders are ignored during training and/or inference. | No sub-folders are allowed in the zip file. Please refer to [Folder structure](#folder-structure) for details. | | Too much data in the inference data file: for example, compressing all historical data in the inference data zip file | You may not see any errors but you'll experience degraded performance when you try to upload the zip file to Azure Blob as well as when you try to run inference. | Please refer to [Data quantity](#data-quantity) for details. | -| Creating Anomaly Detector resources on Azure regions that don't support MVAD yet and calling MVAD APIs | You will get a "resource not found" error while calling the MVAD APIs. | During preview stage, MVAD is available on limited regions only. Please bookmark [What's new in Anomaly Detector](../whats-new.md) to keep up to date with MVAD region roll-outs. You could also file a GitHub issue or contact us at AnomalyDetector@microsoft.com to request for specific regions. | +| Creating Anomaly Detector resources on Azure regions that don't support MVAD yet and calling MVAD APIs | You'll get a "resource not found" error while calling the MVAD APIs. | During preview stage, MVAD is available on limited regions only. Please bookmark [What's new in Anomaly Detector](../whats-new.md) to keep up to date with MVAD region roll-outs. You could also file a GitHub issue or contact us at AnomalyDetector@microsoft.com to request for specific regions. | ## FAQ @@ -148,26 +175,27 @@ Apart from the [error code table](./troubleshoot.md), we've learned from custome Let's use two examples to learn how MVAD's sliding window works. Suppose you have set `slidingWindow` = 1,440, and your input data is at one-minute granularity. -* **Streaming scenario**: You want to predict whether the ONE data point at "2021-01-02T00:00:00Z" is anomalous. Your `startTime` and `endTime` will be the same value ("2021-01-02T00:00:00Z"). Your inference data source, however, must contain at least 1,440 + 1 timestamps. Because, MVAD will take the leading data before the target data point ("2021-01-02T00:00:00Z") to decide whether the target is an anomaly. The length of the needed leading data is `slidingWindow` or 1,440 in this case. 1,440 = 60 * 24, so your input data must start from at latest "2021-01-01T00:00:00Z". +* **Streaming scenario**: You want to predict whether the ONE data point at "2021-01-02T00:00:00Z" is anomalous. Your `startTime` and `endTime` will be the same value ("2021-01-02T00:00:00Z"). Your inference data source, however, must contain at least 1,440 + 1 timestamps. Because MVAD will take the leading data before the target data point ("2021-01-02T00:00:00Z") to decide whether the target is an anomaly. The length of the needed leading data is `slidingWindow` or 1,440 in this case. 1,440 = 60 * 24, so your input data must start from at latest "2021-01-01T00:00:00Z". * **Batch scenario**: You have multiple target data points to predict. Your `endTime` will be greater than your `startTime`. Inference in such scenarios is performed in a "moving window" manner. For example, MVAD will use data from `2021-01-01T00:00:00Z` to `2021-01-01T23:59:00Z` (inclusive) to determine whether data at `2021-01-02T00:00:00Z` is anomalous. Then it moves forward and uses data from `2021-01-01T00:01:00Z` to `2021-01-02T00:00:00Z` (inclusive) to determine whether data at `2021-01-02T00:01:00Z` is anomalous. It moves on in the same manner (taking 1,440 data points to compare) until the last timestamp specified by `endTime` (or the actual latest timestamp). Therefore, your inference data source must contain data starting from `startTime` - `slidingWindow` and ideally contains in total of size `slidingWindow` + (`endTime` - `startTime`). -### Why only accepting zip files for training and inference? +### Why does the service only accept zip files for training and inference when sending data asynchronously? -We use zip files because in batch scenarios, we expect the size of both training and inference data would be very large and cannot be put in the HTTP request body. This allows users to perform batch inference on historical data either for model validation or data analysis. +We use zip files because in batch scenarios, we expect the size of both training and inference data would be very large and can't be put in the HTTP request body. This allows users to perform batch inference on historical data either for model validation or data analysis. However, this might be somewhat inconvenient for streaming inference and for high frequency data. We have a plan to add a new API specifically designed for streaming inference that users can pass data in the request body. ### What's the difference between `severity` and `score`? -Normally we recommend you use `severity` as the filter to sift out 'anomalies' that are not so important to your business. Depending on your scenario and data pattern, those anomalies that are less important often have relatively lower `severity` values or standalone (discontinuous) high `severity` values like random spikes. +Normally we recommend you to use `severity` as the filter to sift out 'anomalies' that aren't so important to your business. Depending on your scenario and data pattern, those anomalies that are less important often have relatively lower `severity` values or standalone (discontinuous) high `severity` values like random spikes. In cases where you've found a need of more sophisticated rules than thresholds against `severity` or duration of continuous high `severity` values, you may want to use `score` to build more powerful filters. Understanding how MVAD is using `score` to determine anomalies may help: -We consider whether a data point is anomalous from both global and local perspective. If `score` at a timestamp is higher than a certain threshold, then the timestamp is marked as an anomaly. If `score` is lower than the threshold but is relatively higher in a segment, it is also marked as an anomaly. +We consider whether a data point is anomalous from both global and local perspective. If `score` at a timestamp is higher than a certain threshold, then the timestamp is marked as an anomaly. If `score` is lower than the threshold but is relatively higher in a segment, it's also marked as an anomaly. + ## Next steps * [Quickstarts: Use the Anomaly Detector multivariate client library](../quickstarts/client-libraries-multivariate.md). -* [Learn about the underlying algorithms that power Anomaly Detector Multivariate](https://arxiv.org/abs/2009.02040) +* [Learn about the underlying algorithms that power Anomaly Detector Multivariate](https://arxiv.org/abs/2009.02040) \ No newline at end of file diff --git a/articles/cognitive-services/Anomaly-Detector/index.yml b/articles/cognitive-services/Anomaly-Detector/index.yml index 50a15404bf947..2aefe1d61ee45 100644 --- a/articles/cognitive-services/Anomaly-Detector/index.yml +++ b/articles/cognitive-services/Anomaly-Detector/index.yml @@ -13,52 +13,63 @@ metadata: ms.service: cognitive-services ms.subservice: anomaly-detector ms.topic: landing-page - ms.date: 05/12/2021 + ms.date: 02/10/2022 ms.author: mbullwin + +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new + + landingContent: -- title: About the Anomaly Detector APIs + +# Card 1 +- title: About Azure Cognitive Services Anomaly Detector linkLists: - linkListType: overview links: - - text: What is Anomaly Detector (univariate)? + - text: What is Anomaly Detector? url: overview.md - - text: What is Anomaly Detector (multivariate)? - url: overview-multivariate.md - - text: What's new in Anomaly Detector? + - linkListType: whats-new + links: + - text: 🆕What's new in Anomaly Detector? url: whats-new.md - linkListType: get-started links: - - text: Demo web application (univariate & multivariate) + - text: Demo page of Anomaly Detector url: https://aka.ms/adDemo + - text: Sample Notebook + url: https://github.com/Azure-Samples/AnomalyDetector/tree/master/ipython-notebook - linkListType: video links: - - text: Introducing the Anomaly Detector API (univariate) + - text: Introducing the Univariate Anomaly Detector API url: /shows/AI-Show/Introducing-Azure-Anomaly-Detector?WT.mc_id=ai-c9-niner - - text: Introducing the new multivariate capabilities + - text: Introducing the Multivariate Anomaly Detector API url: /shows/AI-Show/New-to-Anomaly-Detector-Multivariate-Capabilities - text: More videos ... url: whats-new.md#videos - linkListType: reference links: - - text: Introducing Azure Anomaly Detector API (univariate) - url: https://techcommunity.microsoft.com/t5/AI-Customer-Engineering-Team/Introducing-Azure-Anomaly-Detector-API/ba-p/490162 - - text: Introducing Multivariate Anomaly Detection - url: https://techcommunity.microsoft.com/t5/azure-ai/introducing-multivariate-anomaly-detection/ba-p/2260679 - - text: More technical articles ... + - text: Support and help options + url: ../cognitive-services-support-options.md?context=/azure/cognitive-services/anomaly-detector/context/context + - text: Join the Anomaly Detector community for better support on Microsoft Teams + url: https://aka.ms/adadvisorsjoin + - text: Technical articles about Anomaly Detector url: whats-new.md#technical-articles - -- title: "Detect anomalies in your data" +# Card 2 - MVAD + +- title: "Multivariate Anomaly Detector" linkLists: - - linkListType: quickstart + - linkListType: overview links: - - text: Using C# (univariate) - url: ../anomaly-detector/quickstarts/client-libraries.md?pivots=programming-language-csharp - - text: Using JavaScript (univariate) - url: ../anomaly-detector/quickstarts/client-libraries.md?pivots=programming-language-javascript - - text: Using Python (univariate) - url: ../anomaly-detector/quickstarts/client-libraries.md?pivots=programming-language-python + - text: What is Multivariate Anomaly Detector? + url: overview-multivariate.md + - text: Introducing Multivariate Anomaly Detection + url: https://techcommunity.microsoft.com/t5/azure-ai/introducing-multivariate-anomaly-detection/ba-p/2260679 + - linkListType: how-to-guide + links: + - text: 🆕Use Multivariate Anomaly Detector on your time series data + url: How-to/multivariate-how-to.md - linkListType: quickstart links: - text: Using C# (multivariate) @@ -69,19 +80,49 @@ landingContent: url: ../anomaly-detector/quickstarts/client-libraries-multivariate.md?pivots=programming-language-python - text: Using Java (multivariate) url: ../anomaly-detector/quickstarts/client-libraries-multivariate.md?pivots=programming-language-java + + +# Card 3 - UVAD + +- title: "Univariate Anomaly Detector" + linkLists: + - linkListType: overview + links: + - text: What is Univariate Anomaly Detector? + url: overview.md + - text: Introducing Azure Anomaly Detector API (univariate) + url: https://techcommunity.microsoft.com/t5/AI-Customer-Engineering-Team/Introducing-Azure-Anomaly-Detector-API/ba-p/490162 + - linkListType: how-to-guide + links: + - text: Adjust anomaly detection modes and parameters for your data (univariate) + url: how-to/identify-anomalies.md + - linkListType: quickstart + links: + - text: Using C# (univariate) + url: ../anomaly-detector/quickstarts/client-libraries.md?pivots=programming-language-csharp + - text: Using JavaScript (univariate) + url: ../anomaly-detector/quickstarts/client-libraries.md?pivots=programming-language-javascript + - text: Using Python (univariate) + url: ../anomaly-detector/quickstarts/client-libraries.md?pivots=programming-language-python + + + + +# Card 4 - tutorial + +- title: Tutorials + linkLists: - linkListType: tutorial links: - - text: Learn Multivariate Anomaly Detection in one hour - url: tutorials/learn-multivariate-anomaly-detection.md - - text: Visualize (univariate) anomalies using batch detection and Power BI - url: tutorials/batch-anomaly-detection-powerbi.md + - text: Visualize (univariate) anomalies using batch detection and Power BI + url: tutorials/batch-anomaly-detection-powerbi.md + +# Card 5 - title: Optimize your detection results linkLists: - linkListType: concept links: - - text: Adjust anomaly detection modes and parameters for your data (univariate) - url: how-to/identify-anomalies.md - text: Use best practices to optimize your anomaly detection results (univariate) url: concepts/anomaly-detection-best-practices.md - text: Use best practices to optimize your anomaly detection results (multivariate) @@ -91,6 +132,8 @@ landingContent: - text: Anomaly Detector best practices (univariate) url: /shows/AI-Show/Anomaly-Detector-v10-Best-Practices +# Card 6 + - title: Use Docker containers linkLists: - linkListType: how-to-guide @@ -107,20 +150,9 @@ landingContent: links: - text: Bring Anomaly Detector on-premises with containers support (univariate) url: /shows/AI-Show/Bring-Anomaly-Detector-on-premise-with-containers-support - -- title: Help and feedback - linkLists: - - linkListType: learn - links: - - text: Join the Anomaly Detector Advisors group on Microsoft Teams - url: https://aka.ms/adadvisorsjoin - - - linkListType: reference - links: - - text: Error codes (multivariate) - url: concepts/troubleshoot.md - - text: Support and help options - url: ../cognitive-services-support-options.md?context=/azure/cognitive-services/anomaly-detector/context/context + + +# Card 7 - title: Reference linkLists: @@ -151,4 +183,4 @@ landingContent: - text: Java SDK (multivariate) url: /java/api/com.azure.ai.anomalydetector - text: Node.js SDK (multivariate) - url: /javascript/api/overview/azure/ai-anomaly-detector-readme + url: /javascript/api/overview/azure/ai-anomaly-detector-readme \ No newline at end of file diff --git a/articles/cognitive-services/Anomaly-Detector/whats-new.md b/articles/cognitive-services/Anomaly-Detector/whats-new.md index c76f1c44c6377..4dbf9b9a93cbf 100644 --- a/articles/cognitive-services/Anomaly-Detector/whats-new.md +++ b/articles/cognitive-services/Anomaly-Detector/whats-new.md @@ -4,7 +4,7 @@ description: This article is regularly updated with news about the Azure Cogniti ms.service: cognitive-services ms.subservice: anomaly-detector ms.topic: overview -ms.date: 01/16/2022 +ms.date: 06/03/2022 --- # What's new in Anomaly Detector @@ -15,11 +15,23 @@ We've also added links to some user-generated content. Those items will be marke ## Release notes +### May 2022 + +* New blog released: [Detect anomalies in equipment with Multivariate Anomaly Detector in Azure Databricks](https://techcommunity.microsoft.com/t5/ai-cognitive-services-blog/detect-anomalies-in-equipment-with-anomaly-detector-in-azure/ba-p/3390688). + +### April 2022 +* Univariate Anomaly Detector is now integrated in Azure Data Explorer(ADX). Check out this [announcement blog post](https://techcommunity.microsoft.com/t5/ai-cognitive-services-blog/announcing-univariate-anomaly-detector-in-azure-data-explorer/ba-p/3285400) to learn more! + +### March 2022 +* Anomaly Detector (univariate) available in Sweden Central. + +### February 2022 +* **Multivariate Anomaly Detector API has been integrated with Synapse.** Check out this [blog](https://techcommunity.microsoft.com/t5/ai-cognitive-services-blog/announcing-multivariate-anomaly-detector-in-synapseml/ba-p/3122486) to learn more! + ### January 2022 * **Multivariate Anomaly Detector API v1.1-preview.1 public preview on 1/18.** In this version, Multivariate Anomaly Detector supports synchronous API for inference and added new fields in API output interpreting the correlation change of variables. * Univariate Anomaly Detector added new fields in API output. - ### November 2021 * Multivariate Anomaly Detector available in six more regions: UAE North, France Central, North Central US, Switzerland North, South Africa North, Jio India West. Now in total 26 regions are supported. diff --git a/articles/cognitive-services/Face/build-enrollment-app.md b/articles/cognitive-services/Computer-vision/Tutorials/build-enrollment-app.md similarity index 90% rename from articles/cognitive-services/Face/build-enrollment-app.md rename to articles/cognitive-services/Computer-vision/Tutorials/build-enrollment-app.md index 43bde1c05174b..d1c73cd00df94 100644 --- a/articles/cognitive-services/Face/build-enrollment-app.md +++ b/articles/cognitive-services/Computer-vision/Tutorials/build-enrollment-app.md @@ -59,7 +59,7 @@ Now that you have set up the sample app, you can tailor it to your own needs. For example, you may want to add situation-specific information on your consent page: > [!div class="mx-imgBorder"] -> ![app consent page](./media/enrollment-app/1-consent-1.jpg) +> ![app consent page](../media/enrollment-app/1-consent-1.jpg) Many face recognition issues are caused by low-quality reference images. Some factors that can degrade model performance are: * Face size (faces that are distant from the camera) @@ -72,20 +72,20 @@ The service provides image quality checks to help you make the choice of whether > [!div class="mx-imgBorder"] -> ![app image capture instruction page](./media/enrollment-app/4-instruction.jpg) +> ![app image capture instruction page](../media/enrollment-app/4-instruction.jpg) Notice the app also offers functionality for deleting the user's information and the option to re-add. > [!div class="mx-imgBorder"] -> ![profile management page](./media/enrollment-app/10-manage-2.jpg) +> ![profile management page](../media/enrollment-app/10-manage-2.jpg) -To extend the app's functionality to cover the full experience, read the [overview](enrollment-overview.md) for additional features to implement and best practices. +To extend the app's functionality to cover the full experience, read the [overview](../enrollment-overview.md) for additional features to implement and best practices. ## Deploy the app #### [Android](#tab/android) -First, make sure that your app is ready for production deployment: remove any keys or secrets from the app code and make sure you have followed the [security best practices](../cognitive-services-security.md?tabs=command-line%2ccsharp). +First, make sure that your app is ready for production deployment: remove any keys or secrets from the app code and make sure you have followed the [security best practices](../../cognitive-services-security.md?tabs=command-line%2ccsharp). When you're ready to release your app for production, you'll generate a release-ready APK file, which is the package file format for Android apps. This APK file must be signed with a private key. With this release build, you can begin distributing the app to your devices directly. @@ -95,7 +95,7 @@ Once you've created a signed APK, see the created a Computer Vision resource and obtained a key and endpoint URL. If you're using a client SDK, you'll also need to authenticate a client object. If you haven't done these steps, follow the [quickstart](../quickstarts-sdk/image-analysis-client-library.md) to get started. - -## Submit data to the service - -The code in this guide uses remote images referenced by URL. You may want to try different images on your own to see the full capability of the Image Analysis features. - -#### [REST](#tab/rest) - -When analyzing a local image, you put the binary image data in the HTTP request body. For a remote image, you specify the image's URL by formatting the request body like this: `{"url":"http://example.com/images/test.jpg"}`. - -#### [C#](#tab/csharp) - -In your main class, save a reference to the URL of the image you want to analyze. - -[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/ComputerVision/ImageAnalysisQuickstart.cs?name=snippet_analyze_url)] - -#### [Java](#tab/java) - -In your main class, save a reference to the URL of the image you want to analyze. - -[!code-java[](~/cognitive-services-quickstart-code/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java?name=snippet_urlimage)] - -#### [JavaScript](#tab/javascript) - -In your main function, save a reference to the URL of the image you want to analyze. - -[!code-javascript[](~/cognitive-services-quickstart-code/javascript/ComputerVision/ImageAnalysisQuickstart.js?name=snippet_describe_image)] - -#### [Python](#tab/python) - -Save a reference to the URL of the image you want to analyze. - -[!code-python[](~/cognitive-services-quickstart-code/python/ComputerVision/ImageAnalysisQuickstart.py?name=snippet_remoteimage)] - ---- - - -## Determine how to process the data - -### Select visual features - -The Analyze API gives you access to all of the service's image analysis features. Choose which operations to do based on your own use case. See the [overview](../overview.md) for a description of each feature. The examples below add all of the available visual features, but for practical usage you'll likely only need one or two. - -#### [REST](#tab/rest) - -You can specify which features you want to use by setting the URL query parameters of the [Analyze API](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b). A parameter can have multiple values, separated by commas. Each feature you specify will require more computation time, so only specify what you need. - -|URL parameter | Value | Description| -|---|---|--| -|`visualFeatures`|`Adult` | detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts extreme violence or blood). Sexually suggestive content ("racy" content) is also detected.| -|`visualFeatures`|`Brands` | detects various brands within an image, including the approximate location. The Brands argument is only available in English.| -|`visualFeatures`|`Categories` | categorizes image content according to a taxonomy defined in documentation. This value is the default value of `visualFeatures`.| -|`visualFeatures`|`Color` | determines the accent color, dominant color, and whether an image is black&white.| -|`visualFeatures`|`Description` | describes the image content with a complete sentence in supported languages.| -|`visualFeatures`|`Faces` | detects if faces are present. If present, generate coordinates, gender and age.| -|`visualFeatures`|`ImageType` | detects if image is clip art or a line drawing.| -|`visualFeatures`|`Objects` | detects various objects within an image, including the approximate location. The Objects argument is only available in English.| -|`visualFeatures`|`Tags` | tags the image with a detailed list of words related to the image content.| -|`details`| `Celebrities` | identifies celebrities if detected in the image.| -|`details`|`Landmarks` |identifies landmarks if detected in the image.| - -A populated URL might look like this: - -`https://{endpoint}/vision/v2.1/analyze?visualFeatures=Description,Tags&details=Celebrities` - -#### [C#](#tab/csharp) - -Define your new method for image analysis. Add the code below, which specifies visual features you'd like to extract in your analysis. See the **[VisualFeatureTypes](/dotnet/api/microsoft.azure.cognitiveservices.vision.computervision.models.visualfeaturetypes)** enum for a complete list. - -[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/ComputerVision/ImageAnalysisQuickstart.cs?name=snippet_visualfeatures)] - - -#### [Java](#tab/java) - -Specify which visual features you'd like to extract in your analysis. See the [VisualFeatureTypes](/java/api/com.microsoft.azure.cognitiveservices.vision.computervision.models.visualfeaturetypes) enum for a complete list. - -[!code-java[](~/cognitive-services-quickstart-code/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java?name=snippet_features_remote)] - -#### [JavaScript](#tab/javascript) - -Specify which visual features you'd like to extract in your analysis. See the [VisualFeatureTypes](/javascript/api/@azure/cognitiveservices-computervision/visualfeaturetypes?view=azure-node-latest) enum for a complete list. - -[!code-javascript[](~/cognitive-services-quickstart-code/javascript/ComputerVision/ImageAnalysisQuickstart.js?name=snippet_features_remote)] - -#### [Python](#tab/python) - -Specify which visual features you'd like to extract in your analysis. See the [VisualFeatureTypes](/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.models.visualfeaturetypes?view=azure-python) enum for a complete list. - -[!code-python[](~/cognitive-services-quickstart-code/python/ComputerVision/ImageAnalysisQuickstart.py?name=snippet_features_remote)] - - ---- - - -### Specify languages - -You can also specify the language of the returned data. - -#### [REST](#tab/rest) - -The following URL query parameter specifies the language. The default value is `en`. - -|URL parameter | Value | Description| -|---|---|--| -|`language`|`en` | English| -|`language`|`es` | Spanish| -|`language`|`ja` | Japanese| -|`language`|`pt` | Portuguese| -|`language`|`zh` | Simplified Chinese| - -A populated URL might look like this: - -`https://{endpoint}/vision/v2.1/analyze?visualFeatures=Description,Tags&details=Celebrities&language=en` - -#### [C#](#tab/csharp) - -Use the *language* parameter of [AnalyzeImageAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.computervision.computervisionclientextensions.analyzeimageasync?view=azure-dotnet#microsoft-azure-cognitiveservices-vision-computervision-computervisionclientextensions-analyzeimageasync(microsoft-azure-cognitiveservices-vision-computervision-icomputervisionclient-system-string-system-collections-generic-ilist((system-nullable((microsoft-azure-cognitiveservices-vision-computervision-models-visualfeaturetypes))))-system-collections-generic-ilist((system-nullable((microsoft-azure-cognitiveservices-vision-computervision-models-details))))-system-string-system-collections-generic-ilist((system-nullable((microsoft-azure-cognitiveservices-vision-computervision-models-descriptionexclude))))-system-string-system-threading-cancellationtoken)) call to specify a language. A method call that specifies a language might look like the following. - -```csharp -ImageAnalysis results = await client.AnalyzeImageAsync(imageUrl, visualFeatures: features, language: "en"); -``` - -#### [Java](#tab/java) - -Use the [AnalyzeImageOptionalParameter](/java/api/com.microsoft.azure.cognitiveservices.vision.computervision.models.analyzeimageoptionalparameter) input in your Analyze call to specify a language. A method call that specifies a language might look like the following. - - -```java -ImageAnalysis analysis = compVisClient.computerVision().analyzeImage().withUrl(pathToRemoteImage) - .withVisualFeatures(featuresToExtractFromLocalImage) - .language("en") - .execute(); -``` - -#### [JavaScript](#tab/javascript) - -Use the **language** property of the [ComputerVisionClientAnalyzeImageOptionalParams](/javascript/api/@azure/cognitiveservices-computervision/computervisionclientanalyzeimageoptionalparams) input in your Analyze call to specify a language. A method call that specifies a language might look like the following. - -```javascript -const result = (await computerVisionClient.analyzeImage(imageURL,{visualFeatures: features, language: 'en'})); -``` - -#### [Python](#tab/python) - -Use the *language* parameter of your [analyze_image](/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.operations.computervisionclientoperationsmixin?view=azure-python#azure-cognitiveservices-vision-computervision-operations-computervisionclientoperationsmixin-analyze-image) call to specify a language. A method call that specifies a language might look like the following. - -```python -results_remote = computervision_client.analyze_image(remote_image_url , remote_image_features, remote_image_details, 'en') -``` - ---- - - -## Get results from the service - -This section shows you how to parse the results of the API call. It includes the API call itself. - -> [!NOTE] -> **Scoped API calls** -> -> Some of the features in Image Analysis can be called directly as well as through the Analyze API call. For example, you can do a scoped analysis of only image tags by making a request to `https://{endpoint}/vision/v3.2/tag` (or to the corresponding method in the SDK). See the [reference documentation](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b) for other features that can be called separately. - -#### [REST](#tab/rest) - -The service returns a `200` HTTP response, and the body contains the returned data in the form of a JSON string. The following text is an example of a JSON response. - -```json -{ - "tags":[ - { - "name":"outdoor", - "score":0.976 - }, - { - "name":"bird", - "score":0.95 - } - ], - "description":{ - "tags":[ - "outdoor", - "bird" - ], - "captions":[ - { - "text":"partridge in a pear tree", - "confidence":0.96 - } - ] - } -} -``` - -See the following table for explanations of the fields in this example: - -Field | Type | Content -------|------|------| -Tags | `object` | The top-level object for an array of tags. -tags[].Name | `string` | The keyword from the tags classifier. -tags[].Score | `number` | The confidence score, between 0 and 1. -description | `object` | The top-level object for an image description. -description.tags[] | `string` | The list of tags. If there is insufficient confidence in the ability to produce a caption, the tags might be the only information available to the caller. -description.captions[].text | `string` | A phrase describing the image. -description.captions[].confidence | `number` | The confidence score for the phrase. - -### Error codes - -See the following list of possible errors and their causes: - -* 400 - * `InvalidImageUrl` - Image URL is badly formatted or not accessible. - * `InvalidImageFormat` - Input data is not a valid image. - * `InvalidImageSize` - Input image is too large. - * `NotSupportedVisualFeature` - Specified feature type isn't valid. - * `NotSupportedImage` - Unsupported image, for example child pornography. - * `InvalidDetails` - Unsupported `detail` parameter value. - * `NotSupportedLanguage` - The requested operation isn't supported in the language specified. - * `BadArgument` - More details are provided in the error message. -* 415 - Unsupported media type error. The Content-Type isn't in the allowed types: - * For an image URL, Content-Type should be `application/json` - * For a binary image data, Content-Type should be `application/octet-stream` or `multipart/form-data` -* 500 - * `FailedToProcess` - * `Timeout` - Image processing timed out. - * `InternalServerError` - - -#### [C#](#tab/csharp) - -The following code calls the Image Analysis API and prints the results to the console. - -[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/ComputerVision/ImageAnalysisQuickstart.cs?name=snippet_analyze)] - -#### [Java](#tab/java) - -The following code calls the Image Analysis API and prints the results to the console. - -[!code-java[](~/cognitive-services-quickstart-code/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java?name=snippet_analyze)] - -#### [JavaScript](#tab/javascript) - -The following code calls the Image Analysis API and prints the results to the console. - -[!code-javascript[](~/cognitive-services-quickstart-code/javascript/ComputerVision/ImageAnalysisQuickstart.js?name=snippet_analyze)] - -#### [Python](#tab/python) - -The following code calls the Image Analysis API and prints the results to the console. - -[!code-python[](~/cognitive-services-quickstart-code/python/ComputerVision/ImageAnalysisQuickstart.py?name=snippet_analyze)] - - ---- - -> [!TIP] -> While working with Computer Vision, you might encounter transient failures caused by [rate limits](https://azure.microsoft.com/pricing/details/cognitive-services/computer-vision/) enforced by the service, or other transient problems like network outages. For information about handling these types of failures, see [Retry pattern](/azure/architecture/patterns/retry) in the Cloud Design Patterns guide, and the related [Circuit Breaker pattern](/azure/architecture/patterns/circuit-breaker). - - -## Next steps - -* Explore the [concept articles](../concept-object-detection.md) to learn more about each feature. -* See the [API reference](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b) to learn more about the API functionality. \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/computer-vision-how-to-install-containers.md b/articles/cognitive-services/Computer-vision/computer-vision-how-to-install-containers.md index de0e20a580565..931c239b03ab9 100644 --- a/articles/cognitive-services/Computer-vision/computer-vision-how-to-install-containers.md +++ b/articles/cognitive-services/Computer-vision/computer-vision-how-to-install-containers.md @@ -20,7 +20,7 @@ keywords: on-premises, OCR, Docker, container Containers enable you to run the Computer Vision APIs in your own environment. Containers are great for specific security and data governance requirements. In this article you'll learn how to download, install, and run Computer Vision containers. -The *Read* OCR container allows you to extract printed and handwritten text from images and documents with support for JPEG, PNG, BMP, PDF, and TIFF file formats. For more information, see the [Read API how-to guide](Vision-API-How-to-Topics/call-read-api.md). +The *Read* OCR container allows you to extract printed and handwritten text from images and documents with support for JPEG, PNG, BMP, PDF, and TIFF file formats. For more information, see the [Read API how-to guide](how-to/call-read-api.md). ## What's new The `3.2-model-2022-04-30` GA version of the Read container is available with support for [164 languages and other enhancements](./whats-new.md#may-2022). If you are an existing customer, please follow the [download instructions](#docker-pull-for-the-read-ocr-container) to get started. diff --git a/articles/cognitive-services/Computer-vision/concept-detecting-faces.md b/articles/cognitive-services/Computer-vision/concept-detecting-faces.md index 4af6101316e34..490a79c0a4b8d 100644 --- a/articles/cognitive-services/Computer-vision/concept-detecting-faces.md +++ b/articles/cognitive-services/Computer-vision/concept-detecting-faces.md @@ -19,7 +19,7 @@ ms.custom: seodec18 Computer Vision can detect human faces within an image and generate rectangle coordinates for each detected face. > [!NOTE] -> This feature is also offered by the Azure [Face](../face/index.yml) service. Use this alternative for more detailed face analysis, including face identification and head pose detection. +> This feature is also offered by the Azure [Face](./index-identity.yml) service. Use this alternative for more detailed face analysis, including face identification and head pose detection. ## Face detection examples diff --git a/articles/cognitive-services/Computer-vision/concept-face-detection.md b/articles/cognitive-services/Computer-vision/concept-face-detection.md new file mode 100644 index 0000000000000..163e08204226f --- /dev/null +++ b/articles/cognitive-services/Computer-vision/concept-face-detection.md @@ -0,0 +1,109 @@ +--- +title: "Face detection and attributes concepts" +titleSuffix: Azure Cognitive Services +description: Learn more about face detection; face detection is the action of locating human faces in an image and optionally returning different kinds of face-related data. +services: cognitive-services +author: PatrickFarley +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: conceptual +ms.date: 10/27/2021 +ms.author: pafarley +--- + +# Face detection and attributes + +This article explains the concepts of face detection and face attribute data. Face detection is the process of locating human faces in an image and optionally returning different kinds of face-related data. + +You use the [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API to detect faces in an image. To get started using the REST API or a client SDK, follow a [quickstart](./quickstarts-sdk/identity-client-library.md). Or, for a more in-depth guide, see [Call the detect API](./how-to/identity-detect-faces.md). + +## Face rectangle + +Each detected face corresponds to a `faceRectangle` field in the response. This is a set of pixel coordinates for the left, top, width, and height of the detected face. Using these coordinates, you can get the location and size of the face. In the API response, faces are listed in size order from largest to smallest. + +## Face ID + +The face ID is a unique identifier string for each detected face in an image. You can request a face ID in your [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API call. + +## Face landmarks + +Face landmarks are a set of easy-to-find points on a face, such as the pupils or the tip of the nose. By default, there are 27 predefined landmark points. The following figure shows all 27 points: + +![A face diagram with all 27 landmarks labeled](./media/landmarks.1.jpg) + +The coordinates of the points are returned in units of pixels. + +The Detection_03 model currently has the most accurate landmark detection. The eye and pupil landmarks it returns are precise enough to enable gaze tracking of the face. + +## Attributes + +Attributes are a set of features that can optionally be detected by the [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API. The following attributes can be detected: + +* **Accessories**. Whether the given face has accessories. This attribute returns possible accessories including headwear, glasses, and mask, with confidence score between zero and one for each accessory. +* **Age**. The estimated age in years of a particular face. +* **Blur**. The blurriness of the face in the image. This attribute returns a value between zero and one and an informal rating of low, medium, or high. +* **Emotion**. A list of emotions with their detection confidence for the given face. Confidence scores are normalized, and the scores across all emotions add up to one. The emotions returned are happiness, sadness, neutral, anger, contempt, disgust, surprise, and fear. +* **Exposure**. The exposure of the face in the image. This attribute returns a value between zero and one and an informal rating of underExposure, goodExposure, or overExposure. +* **Facial hair**. The estimated facial hair presence and the length for the given face. +* **Gender**. The estimated gender of the given face. Possible values are male, female, and genderless. +* **Glasses**. Whether the given face has eyeglasses. Possible values are NoGlasses, ReadingGlasses, Sunglasses, and Swimming Goggles. +* **Hair**. The hair type of the face. This attribute shows whether the hair is visible, whether baldness is detected, and what hair colors are detected. +* **Head pose**. The face's orientation in 3D space. This attribute is described by the roll, yaw, and pitch angles in degrees, which are defined according to the [right-hand rule](https://en.wikipedia.org/wiki/Right-hand_rule). The order of three angles is roll-yaw-pitch, and each angle's value range is from -180 degrees to 180 degrees. 3D orientation of the face is estimated by the roll, yaw, and pitch angles in order. See the following diagram for angle mappings: + + ![A head with the pitch, roll, and yaw axes labeled](./media/headpose.1.jpg) + + For more details on how to use these values, see the [Head pose how-to guide](./how-to/use-headpose.md). +* **Makeup**. Whether the face has makeup. This attribute returns a Boolean value for eyeMakeup and lipMakeup. +* **Mask**. Whether the face is wearing a mask. This attribute returns a possible mask type, and a Boolean value to indicate whether nose and mouth are covered. +* **Noise**. The visual noise detected in the face image. This attribute returns a value between zero and one and an informal rating of low, medium, or high. +* **Occlusion**. Whether there are objects blocking parts of the face. This attribute returns a Boolean value for eyeOccluded, foreheadOccluded, and mouthOccluded. +* **Smile**. The smile expression of the given face. This value is between zero for no smile and one for a clear smile. +* **QualityForRecognition** The overall image quality regarding whether the image being used in the detection is of sufficient quality to attempt face recognition on. The value is an informal rating of low, medium, or high. Only "high" quality images are recommended for person enrollment, and quality at or above "medium" is recommended for identification scenarios. + >[!NOTE] + > The availability of each attribute depends on the detection model specified. QualityForRecognition attribute also depends on the recognition model, as it is currently only available when using a combination of detection model detection_01 or detection_03, and recognition model recognition_03 or recognition_04. + +> [!IMPORTANT] +> Face attributes are predicted through the use of statistical algorithms. They might not always be accurate. Use caution when you make decisions based on attribute data. + +## Input data + +Use the following tips to make sure that your input images give the most accurate detection results: + +* The supported input image formats are JPEG, PNG, GIF (the first frame), BMP. +* The image file size should be no larger than 6 MB. +* The minimum detectable face size is 36 x 36 pixels in an image that is no larger than 1920 x 1080 pixels. Images with larger than 1920 x 1080 pixels have a proportionally larger minimum face size. Reducing the face size might cause some faces not to be detected, even if they are larger than the minimum detectable face size. +* The maximum detectable face size is 4096 x 4096 pixels. +* Faces outside the size range of 36 x 36 to 4096 x 4096 pixels will not be detected. +* Some faces might not be recognized because of technical challenges, such as: + * Images with extreme lighting, for example, severe backlighting. + * Obstructions that block one or both eyes. + * Differences in hair type or facial hair. + * Changes in facial appearance because of age. + * Extreme facial expressions. + +### Input data with orientation information: + +Some input images with JPEG format might contain orientation information in Exchangeable image file format (Exif) metadata. If Exif orientation is available, images will be automatically rotated to the correct orientation before sending for face detection. The face rectangle, landmarks, and head pose for each detected face will be estimated based on the rotated image. + +To properly display the face rectangle and landmarks, you need to make sure the image is rotated correctly. Most of image visualization tools will auto-rotate the image according to its Exif orientation by default. For other tools, you might need to apply the rotation using your own code. The following examples show a face rectangle on a rotated image (left) and a non-rotated image (right). + +![Two face images with and without rotation](./media/image-rotation.png) + +### Video input + +If you're detecting faces from a video feed, you may be able to improve performance by adjusting certain settings on your video camera: + +* **Smoothing**: Many video cameras apply a smoothing effect. You should turn this off if you can because it creates a blur between frames and reduces clarity. +* **Shutter Speed**: A faster shutter speed reduces the amount of motion between frames and makes each frame clearer. We recommend shutter speeds of 1/60 second or faster. +* **Shutter Angle**: Some cameras specify shutter angle instead of shutter speed. You should use a lower shutter angle if possible. This will result in clearer video frames. + + >[!NOTE] + > A camera with a lower shutter angle will receive less light in each frame, so the image will be darker. You'll need to determine the right level to use. + +## Next steps + +Now that you're familiar with face detection concepts, learn how to write a script that detects faces in a given image. + +* [Call the detect API](./how-to/identity-detect-faces.md) diff --git a/articles/cognitive-services/Computer-vision/concept-face-recognition.md b/articles/cognitive-services/Computer-vision/concept-face-recognition.md new file mode 100644 index 0000000000000..d1c7d197a00bd --- /dev/null +++ b/articles/cognitive-services/Computer-vision/concept-face-recognition.md @@ -0,0 +1,72 @@ +--- +title: "Face recognition concepts" +titleSuffix: Azure Cognitive Services +description: Learn the concept of Face recognition, its related operations, and the underlying data structures. +services: cognitive-services +author: PatrickFarley +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: conceptual +ms.date: 10/27/2021 +ms.author: pafarley +--- + +# Face recognition concepts + +This article explains the concept of Face recognition, its related operations, and the underlying data structures. Broadly, Face recognition refers to the method of verifying or identifying an individual by their face. + +Verification is one-to-one matching that takes two faces and returns whether they are the same face, and identification is one-to-many matching that takes a single face as input and returns a set of matching candidates. Face recognition is important in implementing the identity verification scenario, which enterprises and apps can use to verify that a (remote) user is who they claim to be. + +## Related data structures + +The recognition operations use mainly the following data structures. These objects are stored in the cloud and can be referenced by their ID strings. ID strings are always unique within a subscription, but name fields may be duplicated. + +|Name|Description| +|:--|:--| +|DetectedFace| This single face representation is retrieved by the [face detection](./how-to/identity-detect-faces.md) operation. Its ID expires 24 hours after it's created.| +|PersistedFace| When DetectedFace objects are added to a group, such as FaceList or Person, they become PersistedFace objects. They can be [retrieved](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524c) at any time and don't expire.| +|[FaceList](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524b) or [LargeFaceList](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc)| This data structure is an assorted list of PersistedFace objects. A FaceList has a unique ID, a name string, and optionally a user data string.| +|[Person](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523c)| This data structure is a list of PersistedFace objects that belong to the same person. It has a unique ID, a name string, and optionally a user data string.| +|[PersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244) or [LargePersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d)| This data structure is an assorted list of Person objects. It has a unique ID, a name string, and optionally a user data string. A PersonGroup must be [trained](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395249) before it can be used in recognition operations.| +|PersonDirectory | This data structure is like **LargePersonGroup** but offers additional storage capacity and other added features. For more information, see [Use the PersonDirectory structure](./how-to/use-persondirectory.md). + +## Recognition operations + +This section details how the underlying operations use the above data structures to identify and verify a face. + +### PersonGroup creation and training + +You need to create a [PersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244) or [LargePersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d) to store the set of people to match against. PersonGroups hold [Person](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523c) objects, which each represent an individual person and hold a set of face data belonging to that person. + +The [Train](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395249) operation prepares the data set to be used in face data comparisons. + +### Identification + +The [Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239) operation takes one or several source face IDs (from a DetectedFace or PersistedFace object) and a PersonGroup or LargePersonGroup. It returns a list of the Person objects that each source face might belong to. Returned Person objects are wrapped as Candidate objects, which have a prediction confidence value. + +### Verification + +The [Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a) operation takes a single face ID (from a DetectedFace or PersistedFace object) and a Person object. It determines whether the face belongs to that same person. Verification is one-to-one matching and can be used as a final check on the results from the Identify API call. However, you can optionally pass in the PersonGroup to which the candidate Person belongs to improve the API performance. + +## Input data + +Use the following tips to ensure that your input images give the most accurate recognition results: + +* The supported input image formats are JPEG, PNG, GIF (the first frame), BMP. +* Image file size should be no larger than 6 MB. +* When you create Person objects, use photos that feature different kinds of angles and lighting. +* Some faces might not be recognized because of technical challenges, such as: + * Images with extreme lighting, for example, severe backlighting. + * Obstructions that block one or both eyes. + * Differences in hair type or facial hair. + * Changes in facial appearance because of age. + * Extreme facial expressions. +* You can utilize the qualityForRecognition attribute in the [face detection](./how-to/identity-detect-faces.md) operation when using applicable detection models as a general guideline of whether the image is likely of sufficient quality to attempt face recognition on. Only "high" quality images are recommended for person enrollment and quality at or above "medium" is recommended for identification scenarios. + +## Next steps + +Now that you're familiar with face recognition concepts, Write a script that identifies faces against a trained PersonGroup. + +* [Face client library quickstart](./quickstarts-sdk/identity-client-library.md) \ No newline at end of file diff --git a/articles/cognitive-services/Face/enrollment-overview.md b/articles/cognitive-services/Computer-vision/enrollment-overview.md similarity index 97% rename from articles/cognitive-services/Face/enrollment-overview.md rename to articles/cognitive-services/Computer-vision/enrollment-overview.md index 812ffb0b7bb32..5c5ffca8c9794 100644 --- a/articles/cognitive-services/Face/enrollment-overview.md +++ b/articles/cognitive-services/Computer-vision/enrollment-overview.md @@ -46,4 +46,4 @@ Before you design an enrollment flow, think about how the application you're bui ## Next steps -Follow the [Build an enrollment app](build-enrollment-app.md) guide to get started with a sample enrollment app. Then customize it or write your own app to suit the needs of your product. \ No newline at end of file +Follow the [Build an enrollment app](Tutorials/build-enrollment-app.md) guide to get started with a sample enrollment app. Then customize it or write your own app to suit the needs of your product. \ No newline at end of file diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-add-faces.md b/articles/cognitive-services/Computer-vision/how-to/add-faces.md similarity index 100% rename from articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-add-faces.md rename to articles/cognitive-services/Computer-vision/how-to/add-faces.md diff --git a/articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/HowtoAnalyzeVideo_Vision.md b/articles/cognitive-services/Computer-vision/how-to/analyze-video.md similarity index 100% rename from articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/HowtoAnalyzeVideo_Vision.md rename to articles/cognitive-services/Computer-vision/how-to/analyze-video.md diff --git a/articles/cognitive-services/Computer-vision/how-to/call-analyze-image.md b/articles/cognitive-services/Computer-vision/how-to/call-analyze-image.md new file mode 100644 index 0000000000000..6f37fd4e94ef2 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/call-analyze-image.md @@ -0,0 +1,280 @@ +--- +title: Call the Image Analysis API +titleSuffix: Azure Cognitive Services +description: Learn how to call the Image Analysis API and configure its behavior. +services: cognitive-services +manager: nitinme + +ms.service: cognitive-services +ms.subservice: computer-vision +ms.topic: how-to +ms.date: 04/11/2022 +ms.custom: "seodec18" +--- + +# Call the Image Analysis API + +This article demonstrates how to call the Image Analysis API to return information about an image's visual features. It also shows you how to parse the returned information using the client SDKs or REST API. + +This guide assumes you have already created a Computer Vision resource and obtained a key and endpoint URL. If you're using a client SDK, you'll also need to authenticate a client object. If you haven't done these steps, follow the [quickstart](../quickstarts-sdk/image-analysis-client-library.md) to get started. + +## Submit data to the service + +The code in this guide uses remote images referenced by URL. You may want to try different images on your own to see the full capability of the Image Analysis features. + +#### [REST](#tab/rest) + +When analyzing a local image, you put the binary image data in the HTTP request body. For a remote image, you specify the image's URL by formatting the request body like this: `{"url":"http://example.com/images/test.jpg"}`. + +#### [C#](#tab/csharp) + +In your main class, save a reference to the URL of the image you want to analyze. + +[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/ComputerVision/ImageAnalysisQuickstart.cs?name=snippet_analyze_url)] + +#### [Java](#tab/java) + +In your main class, save a reference to the URL of the image you want to analyze. + +[!code-java[](~/cognitive-services-quickstart-code/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java?name=snippet_urlimage)] + +#### [JavaScript](#tab/javascript) + +In your main function, save a reference to the URL of the image you want to analyze. + +[!code-javascript[](~/cognitive-services-quickstart-code/javascript/ComputerVision/ImageAnalysisQuickstart.js?name=snippet_describe_image)] + +#### [Python](#tab/python) + +Save a reference to the URL of the image you want to analyze. + +[!code-python[](~/cognitive-services-quickstart-code/python/ComputerVision/ImageAnalysisQuickstart.py?name=snippet_remoteimage)] + +--- + + +## Determine how to process the data + +### Select visual features + +The Analyze API gives you access to all of the service's image analysis features. Choose which operations to do based on your own use case. See the [overview](../overview.md) for a description of each feature. The examples below add all of the available visual features, but for practical usage you'll likely only need one or two. + +#### [REST](#tab/rest) + +You can specify which features you want to use by setting the URL query parameters of the [Analyze API](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b). A parameter can have multiple values, separated by commas. Each feature you specify will require more computation time, so only specify what you need. + +|URL parameter | Value | Description| +|---|---|--| +|`visualFeatures`|`Adult` | detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts extreme violence or blood). Sexually suggestive content ("racy" content) is also detected.| +|`visualFeatures`|`Brands` | detects various brands within an image, including the approximate location. The Brands argument is only available in English.| +|`visualFeatures`|`Categories` | categorizes image content according to a taxonomy defined in documentation. This value is the default value of `visualFeatures`.| +|`visualFeatures`|`Color` | determines the accent color, dominant color, and whether an image is black&white.| +|`visualFeatures`|`Description` | describes the image content with a complete sentence in supported languages.| +|`visualFeatures`|`Faces` | detects if faces are present. If present, generate coordinates, gender and age.| +|`visualFeatures`|`ImageType` | detects if image is clip art or a line drawing.| +|`visualFeatures`|`Objects` | detects various objects within an image, including the approximate location. The Objects argument is only available in English.| +|`visualFeatures`|`Tags` | tags the image with a detailed list of words related to the image content.| +|`details`| `Celebrities` | identifies celebrities if detected in the image.| +|`details`|`Landmarks` |identifies landmarks if detected in the image.| + +A populated URL might look like this: + +`https://{endpoint}/vision/v2.1/analyze?visualFeatures=Description,Tags&details=Celebrities` + +#### [C#](#tab/csharp) + +Define your new method for image analysis. Add the code below, which specifies visual features you'd like to extract in your analysis. See the **[VisualFeatureTypes](/dotnet/api/microsoft.azure.cognitiveservices.vision.computervision.models.visualfeaturetypes)** enum for a complete list. + +[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/ComputerVision/ImageAnalysisQuickstart.cs?name=snippet_visualfeatures)] + + +#### [Java](#tab/java) + +Specify which visual features you'd like to extract in your analysis. See the [VisualFeatureTypes](/java/api/com.microsoft.azure.cognitiveservices.vision.computervision.models.visualfeaturetypes) enum for a complete list. + +[!code-java[](~/cognitive-services-quickstart-code/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java?name=snippet_features_remote)] + +#### [JavaScript](#tab/javascript) + +Specify which visual features you'd like to extract in your analysis. See the [VisualFeatureTypes](/javascript/api/@azure/cognitiveservices-computervision/visualfeaturetypes) enum for a complete list. + +[!code-javascript[](~/cognitive-services-quickstart-code/javascript/ComputerVision/ImageAnalysisQuickstart.js?name=snippet_features_remote)] + +#### [Python](#tab/python) + +Specify which visual features you'd like to extract in your analysis. See the [VisualFeatureTypes](/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.models.visualfeaturetypes) enum for a complete list. + +[!code-python[](~/cognitive-services-quickstart-code/python/ComputerVision/ImageAnalysisQuickstart.py?name=snippet_features_remote)] + + +--- + + +### Specify languages + +You can also specify the language of the returned data. + +#### [REST](#tab/rest) + +The following URL query parameter specifies the language. The default value is `en`. + +|URL parameter | Value | Description| +|---|---|--| +|`language`|`en` | English| +|`language`|`es` | Spanish| +|`language`|`ja` | Japanese| +|`language`|`pt` | Portuguese| +|`language`|`zh` | Simplified Chinese| + +A populated URL might look like this: + +`https://{endpoint}/vision/v2.1/analyze?visualFeatures=Description,Tags&details=Celebrities&language=en` + +#### [C#](#tab/csharp) + +Use the *language* parameter of [AnalyzeImageAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.computervision.computervisionclientextensions.analyzeimageasync#microsoft-azure-cognitiveservices-vision-computervision-computervisionclientextensions-analyzeimageasync(microsoft-azure-cognitiveservices-vision-computervision-icomputervisionclient-system-string-system-collections-generic-ilist((system-nullable((microsoft-azure-cognitiveservices-vision-computervision-models-visualfeaturetypes))))-system-collections-generic-ilist((system-nullable((microsoft-azure-cognitiveservices-vision-computervision-models-details))))-system-string-system-collections-generic-ilist((system-nullable((microsoft-azure-cognitiveservices-vision-computervision-models-descriptionexclude))))-system-string-system-threading-cancellationtoken)) call to specify a language. A method call that specifies a language might look like the following. + +```csharp +ImageAnalysis results = await client.AnalyzeImageAsync(imageUrl, visualFeatures: features, language: "en"); +``` + +#### [Java](#tab/java) + +Use the [AnalyzeImageOptionalParameter](/java/api/com.microsoft.azure.cognitiveservices.vision.computervision.models.analyzeimageoptionalparameter) input in your Analyze call to specify a language. A method call that specifies a language might look like the following. + + +```java +ImageAnalysis analysis = compVisClient.computerVision().analyzeImage().withUrl(pathToRemoteImage) + .withVisualFeatures(featuresToExtractFromLocalImage) + .language("en") + .execute(); +``` + +#### [JavaScript](#tab/javascript) + +Use the **language** property of the [ComputerVisionClientAnalyzeImageOptionalParams](/javascript/api/@azure/cognitiveservices-computervision/computervisionclientanalyzeimageoptionalparams) input in your Analyze call to specify a language. A method call that specifies a language might look like the following. + +```javascript +const result = (await computerVisionClient.analyzeImage(imageURL,{visualFeatures: features, language: 'en'})); +``` + +#### [Python](#tab/python) + +Use the *language* parameter of your [analyze_image](/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.operations.computervisionclientoperationsmixin#azure-cognitiveservices-vision-computervision-operations-computervisionclientoperationsmixin-analyze-image) call to specify a language. A method call that specifies a language might look like the following. + +```python +results_remote = computervision_client.analyze_image(remote_image_url , remote_image_features, remote_image_details, 'en') +``` + +--- + + +## Get results from the service + +This section shows you how to parse the results of the API call. It includes the API call itself. + +> [!NOTE] +> **Scoped API calls** +> +> Some of the features in Image Analysis can be called directly as well as through the Analyze API call. For example, you can do a scoped analysis of only image tags by making a request to `https://{endpoint}/vision/v3.2/tag` (or to the corresponding method in the SDK). See the [reference documentation](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b) for other features that can be called separately. + +#### [REST](#tab/rest) + +The service returns a `200` HTTP response, and the body contains the returned data in the form of a JSON string. The following text is an example of a JSON response. + +```json +{ + "tags":[ + { + "name":"outdoor", + "score":0.976 + }, + { + "name":"bird", + "score":0.95 + } + ], + "description":{ + "tags":[ + "outdoor", + "bird" + ], + "captions":[ + { + "text":"partridge in a pear tree", + "confidence":0.96 + } + ] + } +} +``` + +See the following table for explanations of the fields in this example: + +Field | Type | Content +------|------|------| +Tags | `object` | The top-level object for an array of tags. +tags[].Name | `string` | The keyword from the tags classifier. +tags[].Score | `number` | The confidence score, between 0 and 1. +description | `object` | The top-level object for an image description. +description.tags[] | `string` | The list of tags. If there is insufficient confidence in the ability to produce a caption, the tags might be the only information available to the caller. +description.captions[].text | `string` | A phrase describing the image. +description.captions[].confidence | `number` | The confidence score for the phrase. + +### Error codes + +See the following list of possible errors and their causes: + +* 400 + * `InvalidImageUrl` - Image URL is badly formatted or not accessible. + * `InvalidImageFormat` - Input data is not a valid image. + * `InvalidImageSize` - Input image is too large. + * `NotSupportedVisualFeature` - Specified feature type isn't valid. + * `NotSupportedImage` - Unsupported image, for example child pornography. + * `InvalidDetails` - Unsupported `detail` parameter value. + * `NotSupportedLanguage` - The requested operation isn't supported in the language specified. + * `BadArgument` - More details are provided in the error message. +* 415 - Unsupported media type error. The Content-Type isn't in the allowed types: + * For an image URL, Content-Type should be `application/json` + * For a binary image data, Content-Type should be `application/octet-stream` or `multipart/form-data` +* 500 + * `FailedToProcess` + * `Timeout` - Image processing timed out. + * `InternalServerError` + + +#### [C#](#tab/csharp) + +The following code calls the Image Analysis API and prints the results to the console. + +[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/ComputerVision/ImageAnalysisQuickstart.cs?name=snippet_analyze)] + +#### [Java](#tab/java) + +The following code calls the Image Analysis API and prints the results to the console. + +[!code-java[](~/cognitive-services-quickstart-code/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java?name=snippet_analyze)] + +#### [JavaScript](#tab/javascript) + +The following code calls the Image Analysis API and prints the results to the console. + +[!code-javascript[](~/cognitive-services-quickstart-code/javascript/ComputerVision/ImageAnalysisQuickstart.js?name=snippet_analyze)] + +#### [Python](#tab/python) + +The following code calls the Image Analysis API and prints the results to the console. + +[!code-python[](~/cognitive-services-quickstart-code/python/ComputerVision/ImageAnalysisQuickstart.py?name=snippet_analyze)] + + +--- + +> [!TIP] +> While working with Computer Vision, you might encounter transient failures caused by [rate limits](https://azure.microsoft.com/pricing/details/cognitive-services/computer-vision/) enforced by the service, or other transient problems like network outages. For information about handling these types of failures, see [Retry pattern](/azure/architecture/patterns/retry) in the Cloud Design Patterns guide, and the related [Circuit Breaker pattern](/azure/architecture/patterns/circuit-breaker). + + +## Next steps + +* Explore the [concept articles](../concept-object-detection.md) to learn more about each feature. +* See the [API reference](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b) to learn more about the API functionality. \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/call-read-api.md b/articles/cognitive-services/Computer-vision/how-to/call-read-api.md similarity index 100% rename from articles/cognitive-services/Computer-vision/Vision-API-How-to-Topics/call-read-api.md rename to articles/cognitive-services/Computer-vision/how-to/call-read-api.md diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/find-similar-faces.md b/articles/cognitive-services/Computer-vision/how-to/find-similar-faces.md similarity index 97% rename from articles/cognitive-services/Face/Face-API-How-to-Topics/find-similar-faces.md rename to articles/cognitive-services/Computer-vision/how-to/find-similar-faces.md index da32fb84eb324..fd9b626eee095 100644 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/find-similar-faces.md +++ b/articles/cognitive-services/Computer-vision/how-to/find-similar-faces.md @@ -18,14 +18,14 @@ ms.custom: The Find Similar operation does face matching between a target face and a set of candidate faces, finding a smaller set of faces that look similar to the target face. This is useful for doing a face search by image. -This guide demonstrates how to use the Find Similar feature in the different language SDKs. The following sample code assumes you have already authenticated a Face client object. For details on how to do this, follow a [quickstart](../Quickstarts/client-libraries.md). +This guide demonstrates how to use the Find Similar feature in the different language SDKs. The following sample code assumes you have already authenticated a Face client object. For details on how to do this, follow a [quickstart](../quickstarts-sdk/identity-client-library.md). ## Set up sample URL This guide uses remote images that are accessed by URL. Save a reference to the following URL string. All of the images accessed in this guide are located at this URL path. ``` -"https://csdx.blob.core.windows.net/resources/Face/Images/" +"https://csdx.blob.core.windows.net/resources/Face/media/" ``` ## Detect faces for comparison diff --git a/articles/cognitive-services/Computer-vision/how-to/identity-analyze-video.md b/articles/cognitive-services/Computer-vision/how-to/identity-analyze-video.md new file mode 100644 index 0000000000000..226932294093c --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/identity-analyze-video.md @@ -0,0 +1,177 @@ +--- +title: "Example: Real-time video analysis - Face" +titleSuffix: Azure Cognitive Services +description: Use the Face service to perform near-real-time analysis on frames taken from a live video stream. +services: cognitive-services +author: SteveMSFT +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: how-to +ms.date: 03/01/2018 +ms.author: sbowles +ms.devlang: csharp +ms.custom: devx-track-csharp +--- + +# Example: How to Analyze Videos in Real-time + +This guide will demonstrate how to perform near-real-time analysis on frames taken from a live video stream. The basic components in such a system are: + +- Acquire frames from a video source +- Select which frames to analyze +- Submit these frames to the API +- Consume each analysis result that is returned from the API call + +These samples are written in C# and the code can be found on GitHub here: [https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/). + +## The Approach + +There are multiple ways to solve the problem of running near-real-time analysis on video streams. We will start by outlining three approaches in increasing levels of sophistication. + +### A Simple Approach + +The simplest design for a near-real-time analysis system is an infinite loop, where each iteration grabs a frame, analyzes it, and then consumes the result: + +```csharp +while (true) +{ + Frame f = GrabFrame(); + if (ShouldAnalyze(f)) + { + AnalysisResult r = await Analyze(f); + ConsumeResult(r); + } +} +``` + +If our analysis consisted of a lightweight client-side algorithm, this approach would be suitable. However, when analysis happens in the cloud, the latency involved means that an API call might take several seconds. During this time, we are not capturing images, and our thread is essentially doing nothing. Our maximum frame-rate is limited by the latency of the API calls. + +### Parallelizing API Calls + +While a simple single-threaded loop makes sense for a lightweight client-side algorithm, it doesn't fit well with the latency involved in cloud API calls. The solution to this problem is to allow the long-running API calls to execute in parallel with the frame-grabbing. In C#, we could achieve this using Task-based parallelism, for example: + +```csharp +while (true) +{ + Frame f = GrabFrame(); + if (ShouldAnalyze(f)) + { + var t = Task.Run(async () => + { + AnalysisResult r = await Analyze(f); + ConsumeResult(r); + } + } +} +``` + +This code launches each analysis in a separate Task, which can run in the background while we continue grabbing new frames. With this method we avoid blocking the main thread while waiting for an API call to return, but we have lost some of the guarantees that the simple version provided. Multiple API calls might occur in parallel, and the results might get returned in the wrong order. This could also cause multiple threads to enter the ConsumeResult() function simultaneously, which could be dangerous, if the function is not thread-safe. Finally, this simple code does not keep track of the Tasks that get created, so exceptions will silently disappear. Therefore, the final step is to add a "consumer" thread that will track the analysis tasks, raise exceptions, kill long-running tasks, and ensure that the results get consumed in the correct order. + +### A Producer-Consumer Design + +In our final "producer-consumer" system, we have a producer thread that looks similar to our previous infinite loop. However, instead of consuming analysis results as soon as they are available, the producer simply puts the tasks into a queue to keep track of them. + +```csharp +// Queue that will contain the API call tasks. +var taskQueue = new BlockingCollection>(); + +// Producer thread. +while (true) +{ + // Grab a frame. + Frame f = GrabFrame(); + + // Decide whether to analyze the frame. + if (ShouldAnalyze(f)) + { + // Start a task that will run in parallel with this thread. + var analysisTask = Task.Run(async () => + { + // Put the frame, and the result/exception into a wrapper object. + var output = new ResultWrapper(f); + try + { + output.Analysis = await Analyze(f); + } + catch (Exception e) + { + output.Exception = e; + } + return output; + } + + // Push the task onto the queue. + taskQueue.Add(analysisTask); + } +} +``` + +We also have a consumer thread that takes tasks off the queue, waits for them to finish, and either displays the result or raises the exception that was thrown. By using the queue, we can guarantee that results get consumed one at a time, in the correct order, without limiting the maximum frame-rate of the system. + +```csharp +// Consumer thread. +while (true) +{ + // Get the oldest task. + Task analysisTask = taskQueue.Take(); + + // Await until the task is completed. + var output = await analysisTask; + + // Consume the exception or result. + if (output.Exception != null) + { + throw output.Exception; + } + else + { + ConsumeResult(output.Analysis); + } +} +``` + +## Implementing the Solution + +### Getting Started + +To get your app up and running as quickly as possible, you will use a flexible implementation of the system described above. To access the code, go to [https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis). + +The library contains the class FrameGrabber, which implements the producer-consumer system discussed above to process video frames from a webcam. The user can specify the exact form of the API call, and the class uses events to let the calling code know when a new frame is acquired or a new analysis result is available. + +To illustrate some of the possibilities, there are two sample apps that use the library. The first is a simple console app, and a simplified version of it is reproduced below. It grabs frames from the default webcam, and submits them to the Face service for face detection. + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/analyze.cs"::: + +The second sample app is a bit more interesting, and allows you to choose which API to call on the video frames. On the left-hand side, the app shows a preview of the live video, on the right-hand side it shows the most recent API result overlaid on the corresponding frame. + +In most modes, there will be a visible delay between the live video on the left, and the visualized analysis on the right. This delay is the time taken to make the API call. One exception is the "EmotionsWithClientFaceDetect" mode, which performs face detection locally on the client computer using OpenCV, before submitting any images to Cognitive Services. This way, we can visualize the detected face immediately and then update the emotions once the API call returns. This is an example of a "hybrid" approach, where the client can perform some simple processing, and Cognitive Services APIs can augment this with more advanced analysis when necessary. + +![HowToAnalyzeVideo](../../Video/Images/FramebyFrame.jpg) + +### Integrating into your codebase + +To get started with this sample, follow these steps: + +1. Create an [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you already have one, you can skip to the next step. +2. Create resources for Computer Vision and Face in the Azure portal to get your key and endpoint. Make sure to select the free tier (F0) during setup. + - [Computer Vision](https://portal.azure.com/#create/Microsoft.CognitiveServicesComputerVision) + - [Face](https://portal.azure.com/#create/Microsoft.CognitiveServicesFace) + After the resources are deployed, click **Go to resource** to collect your key and endpoint for each resource. +3. Clone the [Cognitive-Samples-VideoFrameAnalysis](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/) GitHub repo. +4. Open the sample in Visual Studio, and build and run the sample applications: + - For BasicConsoleSample, the Face key is hard-coded directly in [BasicConsoleSample/Program.cs](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/blob/master/Windows/BasicConsoleSample/Program.cs). + - For LiveCameraSample, the keys should be entered into the Settings pane of the app. They will be persisted across sessions as user data. + + +When you're ready to integrate, **reference the VideoFrameAnalyzer library from your own projects.** + +## Summary + +In this guide, you learned how to run near-real-time analysis on live video streams using the Face, Computer Vision, and Emotion APIs, and how to use our sample code to get started. + +Feel free to provide feedback and suggestions in the [GitHub repository](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/) or, for broader API feedback, on our [UserVoice](https://feedback.azure.com/d365community/forum/09041fae-0b25-ec11-b6e6-000d3a4f0858) site. + +## Related Topics +- [Call the detect API](identity-detect-faces.md) diff --git a/articles/cognitive-services/Computer-vision/how-to/identity-detect-faces.md b/articles/cognitive-services/Computer-vision/how-to/identity-detect-faces.md new file mode 100644 index 0000000000000..32be425f7bcaa --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/identity-detect-faces.md @@ -0,0 +1,92 @@ +--- +title: "Call the Detect API - Face" +titleSuffix: Azure Cognitive Services +description: This guide demonstrates how to use face detection to extract attributes like age, emotion, or head pose from a given image. +services: cognitive-services +author: PatrickFarley +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: how-to +ms.date: 08/04/2021 +ms.author: pafarley +ms.devlang: csharp +ms.custom: devx-track-csharp +--- + +# Call the Detect API + +This guide demonstrates how to use the face detection API to extract attributes like age, emotion, or head pose from a given image. You'll learn the different ways to configure the behavior of this API to meet your needs. + +The code snippets in this guide are written in C# by using the Azure Cognitive Services Face client library. The same functionality is available through the [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + + +## Setup + +This guide assumes that you already constructed a [FaceClient](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceclient) object, named `faceClient`, with a Face key and endpoint URL. For instructions on how to set up this feature, follow one of the quickstarts. + +## Submit data to the service + +To find faces and get their locations in an image, call the [DetectWithUrlAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithurlasync) or [DetectWithStreamAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithstreamasync) method. **DetectWithUrlAsync** takes a URL string as input, and **DetectWithStreamAsync** takes the raw byte stream of an image as input. + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="basic1"::: + +You can query the returned [DetectedFace](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.detectedface) objects for their unique IDs and a rectangle that gives the pixel coordinates of the face. This way, you can tell which face ID maps to which face in the original image. + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="basic2"::: + +For information on how to parse the location and dimensions of the face, see [FaceRectangle](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.facerectangle). Usually, this rectangle contains the eyes, eyebrows, nose, and mouth. The top of head, ears, and chin aren't necessarily included. To use the face rectangle to crop a complete head or get a mid-shot portrait, you should expand the rectangle in each direction. + +## Determine how to process the data + +This guide focuses on the specifics of the Detect call, such as what arguments you can pass and what you can do with the returned data. We recommend that you query for only the features you need. Each operation takes more time to complete. + +### Get face landmarks + +[Face landmarks](../concept-face-detection.md#face-landmarks) are a set of easy-to-find points on a face, such as the pupils or the tip of the nose. To get face landmark data, set the _detectionModel_ parameter to `DetectionModel.Detection01` and the _returnFaceLandmarks_ parameter to `true`. + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="landmarks1"::: + +### Get face attributes + +Besides face rectangles and landmarks, the face detection API can analyze several conceptual attributes of a face. For a full list, see the [Face attributes](../concept-face-detection.md#attributes) conceptual section. + +To analyze face attributes, set the _detectionModel_ parameter to `DetectionModel.Detection01` and the _returnFaceAttributes_ parameter to a list of [FaceAttributeType Enum](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.faceattributetype) values. + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="attributes1"::: + + +## Get results from the service + +### Face landmark results + +The following code demonstrates how you might retrieve the locations of the nose and pupils: + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="landmarks2"::: + +You also can use face landmark data to accurately calculate the direction of the face. For example, you can define the rotation of the face as a vector from the center of the mouth to the center of the eyes. The following code calculates this vector: + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="direction"::: + +When you know the direction of the face, you can rotate the rectangular face frame to align it more properly. To crop faces in an image, you can programmatically rotate the image so the faces always appear upright. + + +### Face attribute results + +The following code shows how you might retrieve the face attribute data that you requested in the original call. + +:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="attributes2"::: + +To learn more about each of the attributes, see the [Face detection and attributes](../concept-face-detection.md) conceptual guide. + +## Next steps + +In this guide, you learned how to use the various functionalities of face detection and analysis. Next, integrate these features into an app to add face data from users. + +- [Tutorial: Add users to a Face service](../enrollment-overview.md) + +## Related articles + +- [Reference documentation (REST)](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) +- [Reference documentation (.NET SDK)](/dotnet/api/overview/azure/cognitiveservices/face-readme) \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/how-to/migrate-face-data.md b/articles/cognitive-services/Computer-vision/how-to/migrate-face-data.md new file mode 100644 index 0000000000000..348797b5511c5 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/migrate-face-data.md @@ -0,0 +1,239 @@ +--- +title: "Migrate your face data across subscriptions - Face" +titleSuffix: Azure Cognitive Services +description: This guide shows you how to migrate your stored face data from one Face subscription to another. +services: cognitive-services +author: nitinme +manager: nitinme + +ms.service: cognitive-services +ms.subservice: computer-vision +ms.topic: how-to +ms.date: 02/22/2021 +ms.author: nitinme +ms.devlang: csharp +ms.custom: [devx-track-csharp, cogserv-non-critical-vision] +--- + +# Migrate your face data to a different Face subscription + +This guide shows you how to move face data, such as a saved PersonGroup object with faces, to a different Azure Cognitive Services Face subscription. To move the data, you use the Snapshot feature. This way you avoid having to repeatedly build and train a PersonGroup or FaceList object when you move or expand your operations. For example, perhaps you created a PersonGroup object with a free subscription and now want to migrate it to your paid subscription. Or you might need to sync face data across subscriptions in different regions for a large enterprise operation. + +This same migration strategy also applies to LargePersonGroup and LargeFaceList objects. If you aren't familiar with the concepts in this guide, see their definitions in the [Face recognition concepts](../concept-face-recognition.md) guide. This guide uses the Face .NET client library with C#. + +> [!WARNING] +> The Snapshot feature might move your data outside the geographic region you originally selected. Data might move to West US, West Europe, and Southeast Asia regions. + +## Prerequisites + +You need the following items: + +- Two Face keys, one with the existing data and one to migrate to. To subscribe to the Face service and get your key, follow the instructions in [Create a Cognitive Services account](../../cognitive-services-apis-create-account.md). +- The Face subscription ID string that corresponds to the target subscription. To find it, select **Overview** in the Azure portal. +- Any edition of [Visual Studio 2015 or 2017](https://www.visualstudio.com/downloads/). + +## Create the Visual Studio project + +This guide uses a simple console app to run the face data migration. For a full implementation, see the [Face snapshot sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceApiSnapshotSample/FaceApiSnapshotSample) on GitHub. + +1. In Visual Studio, create a new Console app .NET Framework project. Name it **FaceApiSnapshotSample**. +1. Get the required NuGet packages. Right-click your project in the Solution Explorer, and select **Manage NuGet Packages**. Select the **Browse** tab, and select **Include prerelease**. Find and install the following package: + - [Microsoft.Azure.CognitiveServices.Vision.Face 2.3.0-preview](https://www.nuget.org/packages/Microsoft.Azure.CognitiveServices.Vision.Face/2.2.0-preview) + +## Create face clients + +In the **Main** method in *Program.cs*, create two [FaceClient](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceclient) instances for your source and target subscriptions. This example uses a Face subscription in the East Asia region as the source and a West US subscription as the target. This example demonstrates how to migrate data from one Azure region to another. + +[!INCLUDE [subdomains-note](../../../../includes/cognitive-services-custom-subdomains-note.md)] + +```csharp +var FaceClientEastAsia = new FaceClient(new ApiKeyServiceClientCredentials("")) + { + Endpoint = "https://southeastasia.api.cognitive.microsoft.com/>" + }; + +var FaceClientWestUS = new FaceClient(new ApiKeyServiceClientCredentials("")) + { + Endpoint = "https://westus.api.cognitive.microsoft.com/" + }; +``` + +Fill in the key values and endpoint URLs for your source and target subscriptions. + + +## Prepare a PersonGroup for migration + +You need the ID of the PersonGroup in your source subscription to migrate it to the target subscription. Use the [PersonGroupOperationsExtensions.ListAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.persongroupoperationsextensions.listasync) method to retrieve a list of your PersonGroup objects. Then get the [PersonGroup.PersonGroupId](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.persongroup.persongroupid#Microsoft_Azure_CognitiveServices_Vision_Face_Models_PersonGroup_PersonGroupId) property. This process looks different based on what PersonGroup objects you have. In this guide, the source PersonGroup ID is stored in `personGroupId`. + +> [!NOTE] +> The [sample code](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceApiSnapshotSample/FaceApiSnapshotSample) creates and trains a new PersonGroup to migrate. In most cases, you should already have a PersonGroup to use. + +## Take a snapshot of a PersonGroup + +A snapshot is temporary remote storage for certain Face data types. It functions as a kind of clipboard to copy data from one subscription to another. First, you take a snapshot of the data in the source subscription. Then you apply it to a new data object in the target subscription. + +Use the source subscription's FaceClient instance to take a snapshot of the PersonGroup. Use [TakeAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.snapshotoperationsextensions.takeasync) with the PersonGroup ID and the target subscription's ID. If you have multiple target subscriptions, add them as array entries in the third parameter. + +```csharp +var takeSnapshotResult = await FaceClientEastAsia.Snapshot.TakeAsync( + SnapshotObjectType.PersonGroup, + personGroupId, + new[] { "" /* Put other IDs here, if multiple target subscriptions wanted */ }); +``` + +> [!NOTE] +> The process of taking and applying snapshots doesn't disrupt any regular calls to the source or target PersonGroups or FaceLists. Don't make simultaneous calls that change the source object, such as [FaceList management calls](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.facelistoperations) or the [PersonGroup Train](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.persongroupoperations) call, for example. The snapshot operation might run before or after those operations or might encounter errors. + +## Retrieve the snapshot ID + +The method used to take snapshots is asynchronous, so you must wait for its completion. Snapshot operations can't be canceled. In this code, the `WaitForOperation` method monitors the asynchronous call. It checks the status every 100 ms. After the operation finishes, retrieve an operation ID by parsing the `OperationLocation` field. + +```csharp +var takeOperationId = Guid.Parse(takeSnapshotResult.OperationLocation.Split('/')[2]); +var operationStatus = await WaitForOperation(FaceClientEastAsia, takeOperationId); +``` + +A typical `OperationLocation` value looks like this: + +```csharp +"/operations/a63a3bdd-a1db-4d05-87b8-dbad6850062a" +``` + +The `WaitForOperation` helper method is here: + +```csharp +/// +/// Waits for the take/apply operation to complete and returns the final operation status. +/// +/// The final operation status. +private static async Task WaitForOperation(IFaceClient client, Guid operationId) +{ + OperationStatus operationStatus = null; + do + { + if (operationStatus != null) + { + Thread.Sleep(TimeSpan.FromMilliseconds(100)); + } + + // Get the status of the operation. + operationStatus = await client.Snapshot.GetOperationStatusAsync(operationId); + + Console.WriteLine($"Operation Status: {operationStatus.Status}"); + } + while (operationStatus.Status != OperationStatusType.Succeeded + && operationStatus.Status != OperationStatusType.Failed); + + return operationStatus; +} +``` + +After the operation status shows `Succeeded`, get the snapshot ID by parsing the `ResourceLocation` field of the returned OperationStatus instance. + +```csharp +var snapshotId = Guid.Parse(operationStatus.ResourceLocation.Split('/')[2]); +``` + +A typical `resourceLocation` value looks like this: + +```csharp +"/snapshots/e58b3f08-1e8b-4165-81df-aa9858f233dc" +``` + +## Apply a snapshot to a target subscription + +Next, create the new PersonGroup in the target subscription by using a randomly generated ID. Then use the target subscription's FaceClient instance to apply the snapshot to this PersonGroup. Pass in the snapshot ID and the new PersonGroup ID. + +```csharp +var newPersonGroupId = Guid.NewGuid().ToString(); +var applySnapshotResult = await FaceClientWestUS.Snapshot.ApplyAsync(snapshotId, newPersonGroupId); +``` + + +> [!NOTE] +> A Snapshot object is valid for only 48 hours. Only take a snapshot if you intend to use it for data migration soon after. + +A snapshot apply request returns another operation ID. To get this ID, parse the `OperationLocation` field of the returned applySnapshotResult instance. + +```csharp +var applyOperationId = Guid.Parse(applySnapshotResult.OperationLocation.Split('/')[2]); +``` + +The snapshot application process is also asynchronous, so again use `WaitForOperation` to wait for it to finish. + +```csharp +operationStatus = await WaitForOperation(FaceClientWestUS, applyOperationId); +``` + +## Test the data migration + +After you apply the snapshot, the new PersonGroup in the target subscription populates with the original face data. By default, training results are also copied. The new PersonGroup is ready for face identification calls without needing retraining. + +To test the data migration, run the following operations and compare the results they print to the console: + +```csharp +await DisplayPersonGroup(FaceClientEastAsia, personGroupId); +await IdentifyInPersonGroup(FaceClientEastAsia, personGroupId); + +await DisplayPersonGroup(FaceClientWestUS, newPersonGroupId); +// No need to retrain the PersonGroup before identification, +// training results are copied by snapshot as well. +await IdentifyInPersonGroup(FaceClientWestUS, newPersonGroupId); +``` + +Use the following helper methods: + +```csharp +private static async Task DisplayPersonGroup(IFaceClient client, string personGroupId) +{ + var personGroup = await client.PersonGroup.GetAsync(personGroupId); + Console.WriteLine("PersonGroup:"); + Console.WriteLine(JsonConvert.SerializeObject(personGroup)); + + // List persons. + var persons = await client.PersonGroupPerson.ListAsync(personGroupId); + + foreach (var person in persons) + { + Console.WriteLine(JsonConvert.SerializeObject(person)); + } + + Console.WriteLine(); +} +``` + +```csharp +private static async Task IdentifyInPersonGroup(IFaceClient client, string personGroupId) +{ + using (var fileStream = new FileStream("data\\PersonGroup\\Daughter\\Daughter1.jpg", FileMode.Open, FileAccess.Read)) + { + var detectedFaces = await client.Face.DetectWithStreamAsync(fileStream); + + var result = await client.Face.IdentifyAsync(detectedFaces.Select(face => face.FaceId.Value).ToList(), personGroupId); + Console.WriteLine("Test identify against PersonGroup"); + Console.WriteLine(JsonConvert.SerializeObject(result)); + Console.WriteLine(); + } +} +``` + +Now you can use the new PersonGroup in the target subscription. + +To update the target PersonGroup again in the future, create a new PersonGroup to receive the snapshot. To do this, follow the steps in this guide. A single PersonGroup object can have a snapshot applied to it only one time. + +## Clean up resources + +After you finish migrating face data, manually delete the snapshot object. + +```csharp +await FaceClientEastAsia.Snapshot.DeleteAsync(snapshotId); +``` + +## Next steps + +Next, see the relevant API reference documentation, explore a sample app that uses the Snapshot feature, or follow a how-to guide to start using the other API operations mentioned here: + +- [Snapshot reference documentation (.NET SDK)](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.snapshotoperations) +- [Face snapshot sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceApiSnapshotSample/FaceApiSnapshotSample) +- [Add faces](add-faces.md) +- [Call the detect API](identity-detect-faces.md) diff --git a/articles/cognitive-services/Computer-vision/how-to/mitigate-latency.md b/articles/cognitive-services/Computer-vision/how-to/mitigate-latency.md new file mode 100644 index 0000000000000..1829e77e7afc1 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/mitigate-latency.md @@ -0,0 +1,98 @@ +--- +title: How to mitigate latency when using the Face service +titleSuffix: Azure Cognitive Services +description: Learn how to mitigate latency when using the Face service. +services: cognitive-services +author: PatrickFarley +manager: nitinme +ms.service: cognitive-services +ms.topic: how-to +ms.date: 1/5/2021 +ms.author: pafarley +ms.devlang: csharp +ms.custom: cogserv-non-critical-vision +--- + +# How to: mitigate latency when using the Face service + +You may encounter latency when using the Face service. Latency refers to any kind of delay that occurs when communicating over a network. In general, possible causes of latency include: +- The physical distance each packet must travel from source to destination. +- Problems with the transmission medium. +- Errors in routers or switches along the transmission path. +- The time required by antivirus applications, firewalls, and other security mechanisms to inspect packets. +- Malfunctions in client or server applications. + +This article talks about possible causes of latency specific to using the Azure Cognitive Services, and how you can mitigate these causes. + +> [!NOTE] +> Azure Cognitive Services does not provide any Service Level Agreement (SLA) regarding latency. + +## Possible causes of latency + +### Slow connection between the Cognitive Service and a remote URL + +Some Azure services provide methods that obtain data from a remote URL that you provide. For example, when you call the [DetectWithUrlAsync method](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithurlasync#Microsoft_Azure_CognitiveServices_Vision_Face_FaceOperationsExtensions_DetectWithUrlAsync_Microsoft_Azure_CognitiveServices_Vision_Face_IFaceOperations_System_String_System_Nullable_System_Boolean__System_Nullable_System_Boolean__System_Collections_Generic_IList_System_Nullable_Microsoft_Azure_CognitiveServices_Vision_Face_Models_FaceAttributeType___System_String_System_Nullable_System_Boolean__System_String_System_Threading_CancellationToken_) of the Face service, you can specify the URL of an image in which the service tries to detect faces. + +```csharp +var faces = await client.Face.DetectWithUrlAsync("https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"); +``` + +The Face service must then download the image from the remote server. If the connection from the Face service to the remote server is slow, that will affect the response time of the Detect method. + +To mitigate this situation, consider [storing the image in Azure Premium Blob Storage](../../../storage/blobs/storage-upload-process-images.md?tabs=dotnet). For example: + +``` csharp +var faces = await client.Face.DetectWithUrlAsync("https://csdx.blob.core.windows.net/resources/Face/Images/Family1-Daughter1.jpg"); +``` + +### Large upload size + +Some Azure services provide methods that obtain data from a file that you upload. For example, when you call the [DetectWithStreamAsync method](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithstreamasync#Microsoft_Azure_CognitiveServices_Vision_Face_FaceOperationsExtensions_DetectWithStreamAsync_Microsoft_Azure_CognitiveServices_Vision_Face_IFaceOperations_System_IO_Stream_System_Nullable_System_Boolean__System_Nullable_System_Boolean__System_Collections_Generic_IList_System_Nullable_Microsoft_Azure_CognitiveServices_Vision_Face_Models_FaceAttributeType___System_String_System_Nullable_System_Boolean__System_String_System_Threading_CancellationToken_) of the Face service, you can upload an image in which the service tries to detect faces. + +```csharp +using FileStream fs = File.OpenRead(@"C:\images\face.jpg"); +System.Collections.Generic.IList faces = await client.Face.DetectWithStreamAsync(fs, detectionModel: DetectionModel.Detection02); +``` + +If the file to upload is large, that will impact the response time of the `DetectWithStreamAsync` method, for the following reasons: +- It takes longer to upload the file. +- It takes the service longer to process the file, in proportion to the file size. + +Mitigations: +- Consider [storing the image in Azure Premium Blob Storage](../../../storage/blobs/storage-upload-process-images.md?tabs=dotnet). For example: +``` csharp +var faces = await client.Face.DetectWithUrlAsync("https://csdx.blob.core.windows.net/resources/Face/Images/Family1-Daughter1.jpg"); +``` +- Consider uploading a smaller file. + - See the guidelines regarding [input data for face detection](../concept-face-detection.md#input-data) and [input data for face recognition](../concept-face-recognition.md#input-data). + - For face detection, when using detection model `DetectionModel.Detection01`, reducing the image file size will increase processing speed. When you use detection model `DetectionModel.Detection02`, reducing the image file size will only increase processing speed if the image file is smaller than 1920x1080. + - For face recognition, reducing the face size to 200x200 pixels doesn't affect the accuracy of the recognition model. + - The performance of the `DetectWithUrlAsync` and `DetectWithStreamAsync` methods also depends on how many faces are in an image. The Face service can return up to 100 faces for an image. Faces are ranked by face rectangle size from large to small. + - If you need to call multiple service methods, consider calling them in parallel if your application design allows for it. For example, if you need to detect faces in two images to perform a face comparison: +```csharp +var faces_1 = client.Face.DetectWithUrlAsync("https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"); +var faces_2 = client.Face.DetectWithUrlAsync("https://www.biography.com/.image/t_share/MTQ1NDY3OTIxMzExNzM3NjE3/john-f-kennedy---debating-richard-nixon.jpg"); +Task.WaitAll (new Task>[] { faces_1, faces_2 }); +IEnumerable results = faces_1.Result.Concat (faces_2.Result); +``` + +### Slow connection between your compute resource and the Face service + +If your computer has a slow connection to the Face service, this will affect the response time of service methods. + +Mitigations: +- When you create your Face subscription, make sure to choose the region closest to where your application is hosted. +- If you need to call multiple service methods, consider calling them in parallel if your application design allows for it. See the previous section for an example. +- If longer latencies affect the user experience, choose a timeout threshold (for example, maximum 5 seconds) before retrying the API call. + +## Next steps + +In this guide, you learned how to mitigate latency when using the Face service. Next, learn how to scale up from existing PersonGroup and FaceList objects to LargePersonGroup and LargeFaceList objects, respectively. + +> [!div class="nextstepaction"] +> [Example: Use the large-scale feature](use-large-scale.md) + +## Related topics + +- [Reference documentation (REST)](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) +- [Reference documentation (.NET SDK)](/dotnet/api/overview/azure/cognitiveservices/face-readme) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/specify-detection-model.md b/articles/cognitive-services/Computer-vision/how-to/specify-detection-model.md similarity index 94% rename from articles/cognitive-services/Face/Face-API-How-to-Topics/specify-detection-model.md rename to articles/cognitive-services/Computer-vision/how-to/specify-detection-model.md index a63432c80ed43..6997dd67ef8a0 100644 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/specify-detection-model.md +++ b/articles/cognitive-services/Computer-vision/how-to/specify-detection-model.md @@ -29,12 +29,12 @@ If you aren't sure whether you should use the latest model, skip to the [Evaluat You should be familiar with the concept of AI face detection. If you aren't, see the face detection conceptual guide or how-to guide: -* [Face detection concepts](../concepts/face-detection.md) -* [Call the detect API](HowtoDetectFacesinImage.md) +* [Face detection concepts](../concept-face-detection.md) +* [Call the detect API](identity-detect-faces.md) ## Detect faces with specified model -Face detection finds the bounding-box locations of human faces and identifies their visual landmarks. It extracts the face's features and stores them for later use in [recognition](../concepts/face-recognition.md) operations. +Face detection finds the bounding-box locations of human faces and identifies their visual landmarks. It extracts the face's features and stores them for later use in [recognition](../concept-face-recognition.md) operations. When you use the [Face - Detect] API, you can assign the model version with the `detectionModel` parameter. The available values are: @@ -108,8 +108,8 @@ The best way to compare the performances of the detection models is to use them In this article, you learned how to specify the detection model to use with different Face APIs. Next, follow a quickstart to get started with face detection and analysis. -* [Face .NET SDK](../quickstarts/client-libraries.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp) -* [Face Python SDK](../quickstarts/client-libraries.md?pivots=programming-language-python%253fpivots%253dprogramming-language-python) +* [Face .NET SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp) +* [Face Python SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-python%253fpivots%253dprogramming-language-python) [Face - Detect]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d [Face - Find Similar]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237 diff --git a/articles/cognitive-services/Computer-vision/how-to/specify-recognition-model.md b/articles/cognitive-services/Computer-vision/how-to/specify-recognition-model.md new file mode 100644 index 0000000000000..645cf5052a129 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/specify-recognition-model.md @@ -0,0 +1,131 @@ +--- +title: How to specify a recognition model - Face +titleSuffix: Azure Cognitive Services +description: This article will show you how to choose which recognition model to use with your Azure Face application. +services: cognitive-services +author: longli0 +manager: nitinme +ms.service: cognitive-services +ms.subservice: computer-vision +ms.topic: how-to +ms.date: 03/05/2021 +ms.author: longl +ms.devlang: csharp +ms.custom: devx-track-csharp +--- + +# Specify a face recognition model + +This guide shows you how to specify a face recognition model for face detection, identification and similarity search using the Azure Face service. + +The Face service uses machine learning models to perform operations on human faces in images. We continue to improve the accuracy of our models based on customer feedback and advances in research, and we deliver these improvements as model updates. Developers can specify which version of the face recognition model they'd like to use. They can choose the model that best fits their use case. + +The Azure Face service has four recognition models available. The models _recognition_01_ (published 2017), _recognition_02_ (published 2019), and _recognition_03_ (published 2020) are continually supported to ensure backwards compatibility for customers using FaceLists or **PersonGroup**s created with these models. A **FaceList** or **PersonGroup** will always use the recognition model it was created with, and new faces will become associated with this model when they're added. This can't be changed after creation and customers will need to use the corresponding recognition model with the corresponding **FaceList** or **PersonGroup**. + +You can move to later recognition models at your own convenience; however, you'll need to create new FaceLists and PersonGroups with the recognition model of your choice. + +The _recognition_04_ model (published 2021) is the most accurate model currently available. If you're a new customer, we recommend using this model. _Recognition_04_ will provide improved accuracy for both similarity comparisons and person-matching comparisons. _Recognition_04_ improves recognition for enrolled users wearing face covers (surgical masks, N95 masks, cloth masks). Now you can build safe and seamless user experiences that use the latest _detection_03_ model to detect whether an enrolled user is wearing a face cover. Then you can use the latest _recognition_04_ model to recognize their identity. Each model operates independently of the others, and a confidence threshold set for one model isn't meant to be compared across the other recognition models. + +Read on to learn how to specify a selected model in different Face operations while avoiding model conflicts. If you're an advanced user and would like to determine whether you should switch to the latest model, skip to the [Evaluate different models](#evaluate-different-models) section. You can evaluate the new model and compare results using your current data set. + + +## Prerequisites + +You should be familiar with the concepts of AI face detection and identification. If you aren't, see these guides first: + +* [Face detection concepts](../concept-face-detection.md) +* [Face recognition concepts](../concept-face-recognition.md) +* [Call the detect API](identity-detect-faces.md) + +## Detect faces with specified model + +Face detection identifies the visual landmarks of human faces and finds their bounding-box locations. It also extracts the face's features and stores them for use in identification. All of this information forms the representation of one face. + +The recognition model is used when the face features are extracted, so you can specify a model version when performing the Detect operation. + +When using the [Face - Detect] API, assign the model version with the `recognitionModel` parameter. The available values are: +* recognition_01 +* recognition_02 +* recognition_03 +* recognition_04 + + +Optionally, you can specify the _returnRecognitionModel_ parameter (default **false**) to indicate whether _recognitionModel_ should be returned in response. So, a request URL for the [Face - Detect] REST API will look like this: + +`https://westus.api.cognitive.microsoft.com/face/v1.0/detect[?returnFaceId][&returnFaceLandmarks][&returnFaceAttributes][&recognitionModel][&returnRecognitionModel]&subscription-key=` + +If you're using the client library, you can assign the value for `recognitionModel` by passing a string representing the version. If you leave it unassigned, a default model version of `recognition_01` will be used. See the following code example for the .NET client library. + +```csharp +string imageUrl = "https://news.microsoft.com/ceo/assets/photos/06_web.jpg"; +var faces = await faceClient.Face.DetectWithUrlAsync(imageUrl, true, true, recognitionModel: "recognition_01", returnRecognitionModel: true); +``` + +## Identify faces with specified model + +The Face service can extract face data from an image and associate it with a **Person** object (through the [Add face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) API call, for example), and multiple **Person** objects can be stored together in a **PersonGroup**. Then, a new face can be compared against a **PersonGroup** (with the [Face - Identify] call), and the matching person within that group can be identified. + +A **PersonGroup** should have one unique recognition model for all of the **Person**s, and you can specify this using the `recognitionModel` parameter when you create the group ([PersonGroup - Create] or [LargePersonGroup - Create]). If you don't specify this parameter, the original `recognition_01` model is used. A group will always use the recognition model it was created with, and new faces will become associated with this model when they're added to it. This can't be changed after a group's creation. To see what model a **PersonGroup** is configured with, use the [PersonGroup - Get] API with the _returnRecognitionModel_ parameter set as **true**. + +See the following code example for the .NET client library. + +```csharp +// Create an empty PersonGroup with "recognition_04" model +string personGroupId = "mypersongroupid"; +await faceClient.PersonGroup.CreateAsync(personGroupId, "My Person Group Name", recognitionModel: "recognition_04"); +``` + +In this code, a **PersonGroup** with ID `mypersongroupid` is created, and it's set up to use the _recognition_04_ model to extract face features. + +Correspondingly, you need to specify which model to use when detecting faces to compare against this **PersonGroup** (through the [Face - Detect] API). The model you use should always be consistent with the **PersonGroup**'s configuration; otherwise, the operation will fail due to incompatible models. + +There is no change in the [Face - Identify] API; you only need to specify the model version in detection. + +## Find similar faces with specified model + +You can also specify a recognition model for similarity search. You can assign the model version with `recognitionModel` when creating the **FaceList** with [FaceList - Create] API or [LargeFaceList - Create]. If you don't specify this parameter, the `recognition_01` model is used by default. A **FaceList** will always use the recognition model it was created with, and new faces will become associated with this model when they're added to the list; you can't change this after creation. To see what model a **FaceList** is configured with, use the [FaceList - Get] API with the _returnRecognitionModel_ parameter set as **true**. + +See the following code example for the .NET client library. + +```csharp +await faceClient.FaceList.CreateAsync(faceListId, "My face collection", recognitionModel: "recognition_04"); +``` + +This code creates a **FaceList** called `My face collection`, using the _recognition_04_ model for feature extraction. When you search this **FaceList** for similar faces to a new detected face, that face must have been detected ([Face - Detect]) using the _recognition_04_ model. As in the previous section, the model needs to be consistent. + +There is no change in the [Face - Find Similar] API; you only specify the model version in detection. + +## Verify faces with specified model + +The [Face - Verify] API checks whether two faces belong to the same person. There is no change in the Verify API with regard to recognition models, but you can only compare faces that were detected with the same model. + +## Evaluate different models + +If you'd like to compare the performances of different recognition models on your own data, you'll need to: +1. Create four PersonGroups using _recognition_01_, _recognition_02_, _recognition_03_, and _recognition_04_ respectively. +1. Use your image data to detect faces and register them to **Person**s within these four **PersonGroup**s. +1. Train your PersonGroups using the PersonGroup - Train API. +1. Test with Face - Identify on all four **PersonGroup**s and compare the results. + + +If you normally specify a confidence threshold (a value between zero and one that determines how confident the model must be to identify a face), you may need to use different thresholds for different models. A threshold for one model isn't meant to be shared to another and won't necessarily produce the same results. + +## Next steps + +In this article, you learned how to specify the recognition model to use with different Face service APIs. Next, follow a quickstart to get started with face detection. + +* [Face .NET SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp) +* [Face Python SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-python%253fpivots%253dprogramming-language-python) + +[Face - Detect]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d +[Face - Find Similar]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237 +[Face - Identify]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239 +[Face - Verify]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a +[PersonGroup - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244 +[PersonGroup - Get]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395246 +[PersonGroup Person - Add Face]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b +[PersonGroup - Train]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395249 +[LargePersonGroup - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d +[FaceList - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524b +[FaceList - Get]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524c +[LargeFaceList - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc diff --git a/articles/cognitive-services/Computer-vision/how-to/use-headpose.md b/articles/cognitive-services/Computer-vision/how-to/use-headpose.md new file mode 100644 index 0000000000000..1b7551144f4a4 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/use-headpose.md @@ -0,0 +1,134 @@ +--- +title: Use the HeadPose attribute +titleSuffix: Azure Cognitive Services +description: Learn how to use the HeadPose attribute to automatically rotate the face rectangle or detect head gestures in a video feed. +author: PatrickFarley +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: how-to +ms.date: 02/23/2021 +ms.author: pafarley +ms.devlang: csharp +ms.custom: devx-track-csharp +--- + +# Use the HeadPose attribute + +In this guide, you'll see how you can use the HeadPose attribute of a detected face to enable some key scenarios. + +## Rotate the face rectangle + +The face rectangle, returned with every detected face, marks the location and size of the face in the image. By default, the rectangle is always aligned with the image (its sides are vertical and horizontal); this can be inefficient for framing angled faces. In situations where you want to programmatically crop faces in an image, it's better to be able to rotate the rectangle to crop. + +The [Cognitive Services Face WPF (Windows Presentation Foundation)](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/Cognitive-Services-Face-WPF) sample app uses the HeadPose attribute to rotate its detected face rectangles. + +### Explore the sample code + +You can programmatically rotate the face rectangle by using the HeadPose attribute. If you specify this attribute when detecting faces (see [Call the detect API](identity-detect-faces.md)), you will be able to query it later. The following method from the [Cognitive Services Face WPF](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/Cognitive-Services-Face-WPF) app takes a list of **DetectedFace** objects and returns a list of **[Face](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/blob/master/app-samples/Cognitive-Services-Face-WPF/Sample-WPF/Controls/Face.cs)** objects. **Face** here is a custom class that stores face data, including the updated rectangle coordinates. New values are calculated for **top**, **left**, **width**, and **height**, and a new field **FaceAngle** specifies the rotation. + +```csharp +/// +/// Calculate the rendering face rectangle +/// +/// Detected face from service +/// Image rendering size +/// Image width and height +/// Face structure for rendering +public static IEnumerable CalculateFaceRectangleForRendering(IList faces, int maxSize, Tuple imageInfo) +{ + var imageWidth = imageInfo.Item1; + var imageHeight = imageInfo.Item2; + var ratio = (float)imageWidth / imageHeight; + int uiWidth = 0; + int uiHeight = 0; + if (ratio > 1.0) + { + uiWidth = maxSize; + uiHeight = (int)(maxSize / ratio); + } + else + { + uiHeight = maxSize; + uiWidth = (int)(ratio * uiHeight); + } + + var uiXOffset = (maxSize - uiWidth) / 2; + var uiYOffset = (maxSize - uiHeight) / 2; + var scale = (float)uiWidth / imageWidth; + + foreach (var face in faces) + { + var left = (int)(face.FaceRectangle.Left * scale + uiXOffset); + var top = (int)(face.FaceRectangle.Top * scale + uiYOffset); + + // Angle of face rectangles, default value is 0 (not rotated). + double faceAngle = 0; + + // If head pose attributes have been obtained, re-calculate the left & top (X & Y) positions. + if (face.FaceAttributes?.HeadPose != null) + { + // Head pose's roll value acts directly as the face angle. + faceAngle = face.FaceAttributes.HeadPose.Roll; + var angleToPi = Math.Abs((faceAngle / 180) * Math.PI); + + // _____ | / \ | + // |____| => |/ /| + // | \ / | + // Re-calculate the face rectangle's left & top (X & Y) positions. + var newLeft = face.FaceRectangle.Left + + face.FaceRectangle.Width / 2 - + (face.FaceRectangle.Width * Math.Sin(angleToPi) + face.FaceRectangle.Height * Math.Cos(angleToPi)) / 2; + + var newTop = face.FaceRectangle.Top + + face.FaceRectangle.Height / 2 - + (face.FaceRectangle.Height * Math.Sin(angleToPi) + face.FaceRectangle.Width * Math.Cos(angleToPi)) / 2; + + left = (int)(newLeft * scale + uiXOffset); + top = (int)(newTop * scale + uiYOffset); + } + + yield return new Face() + { + FaceId = face.FaceId?.ToString(), + Left = left, + Top = top, + OriginalLeft = (int)(face.FaceRectangle.Left * scale + uiXOffset), + OriginalTop = (int)(face.FaceRectangle.Top * scale + uiYOffset), + Height = (int)(face.FaceRectangle.Height * scale), + Width = (int)(face.FaceRectangle.Width * scale), + FaceAngle = faceAngle, + }; + } +} +``` + +### Display the updated rectangle + +From here, you can use the returned **Face** objects in your display. The following lines from [FaceDetectionPage.xaml](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/blob/master/app-samples/Cognitive-Services-Face-WPF/Sample-WPF/Controls/FaceDetectionPage.xaml) show how the new rectangle is rendered from this data: + +```xaml + + + + + + + +``` + +## Detect head gestures + +You can detect head gestures like nodding and head shaking by tracking HeadPose changes in real time. You can use this feature as a custom liveness detector. + +Liveness detection is the task of determining that a subject is a real person and not an image or video representation. A head gesture detector could serve as one way to help verify liveness, especially as opposed to an image representation of a person. + +> [!CAUTION] +> To detect head gestures in real time, you'll need to call the Face API at a high rate (more than once per second). If you have a free-tier (f0) subscription, this will not be possible. If you have a paid-tier subscription, make sure you've calculated the costs of making rapid API calls for head gesture detection. + +See the [Face HeadPose Sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceAPIHeadPoseSample) on GitHub for a working example of head gesture detection. + +## Next steps + +See the [Cognitive Services Face WPF](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/Cognitive-Services-Face-WPF) app on GitHub for a working example of rotated face rectangles. Or, see the [Face HeadPose Sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples) app, which tracks the HeadPose attribute in real time to detect head movements. \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/how-to/use-large-scale.md b/articles/cognitive-services/Computer-vision/how-to/use-large-scale.md new file mode 100644 index 0000000000000..62eb1a69a47d2 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/how-to/use-large-scale.md @@ -0,0 +1,276 @@ +--- +title: "Example: Use the Large-Scale feature - Face" +titleSuffix: Azure Cognitive Services +description: This guide is an article on how to scale up from existing PersonGroup and FaceList objects to LargePersonGroup and LargeFaceList objects. +services: cognitive-services +author: SteveMSFT +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: how-to +ms.date: 05/01/2019 +ms.author: sbowles +ms.devlang: csharp +ms.custom: devx-track-csharp +--- + +# Example: Use the large-scale feature + +This guide is an advanced article on how to scale up from existing PersonGroup and FaceList objects to LargePersonGroup and LargeFaceList objects, respectively. This guide demonstrates the migration process. It assumes a basic familiarity with PersonGroup and FaceList objects, the [Train](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599ae2d16ac60f11b48b5aa4) operation, and the face recognition functions. To learn more about these subjects, see the [face recognition](../concept-face-recognition.md) conceptual guide. + +LargePersonGroup and LargeFaceList are collectively referred to as large-scale operations. LargePersonGroup can contain up to 1 million persons, each with a maximum of 248 faces. LargeFaceList can contain up to 1 million faces. The large-scale operations are similar to the conventional PersonGroup and FaceList but have some differences because of the new architecture. + +The samples are written in C# by using the Azure Cognitive Services Face client library. + +> [!NOTE] +> To enable Face search performance for Identification and FindSimilar in large scale, introduce a Train operation to preprocess the LargeFaceList and LargePersonGroup. The training time varies from seconds to about half an hour based on the actual capacity. During the training period, it's possible to perform Identification and FindSimilar if a successful training operating was done before. The drawback is that the new added persons and faces don't appear in the result until a new post migration to large-scale training is completed. + +## Step 1: Initialize the client object + +When you use the Face client library, the key and subscription endpoint are passed in through the constructor of the FaceClient class. For example: + +```csharp +string SubscriptionKey = ""; +// Use your own subscription endpoint corresponding to the key. +string SubscriptionEndpoint = "https://westus.api.cognitive.microsoft.com"; +private readonly IFaceClient faceClient = new FaceClient( + new ApiKeyServiceClientCredentials(subscriptionKey), + new System.Net.Http.DelegatingHandler[] { }); +faceClient.Endpoint = SubscriptionEndpoint +``` + +To get the key with its corresponding endpoint, go to the Azure Marketplace from the Azure portal. +For more information, see [Subscriptions](https://azure.microsoft.com/services/cognitive-services/directory/vision/). + +## Step 2: Code migration + +This section focuses on how to migrate PersonGroup or FaceList implementation to LargePersonGroup or LargeFaceList. Although LargePersonGroup or LargeFaceList differs from PersonGroup or FaceList in design and internal implementation, the API interfaces are similar for backward compatibility. + +Data migration isn't supported. You re-create the LargePersonGroup or LargeFaceList instead. + +### Migrate a PersonGroup to a LargePersonGroup + +Migration from a PersonGroup to a LargePersonGroup is simple. They share exactly the same group-level operations. + +For PersonGroup- or person-related implementation, it's necessary to change only the API paths or SDK class/module to LargePersonGroup and LargePersonGroup Person. + +Add all of the faces and persons from the PersonGroup to the new LargePersonGroup. For more information, see [Add faces](add-faces.md). + +### Migrate a FaceList to a LargeFaceList + +| FaceList APIs | LargeFaceList APIs | +|:---:|:---:| +| Create | Create | +| Delete | Delete | +| Get | Get | +| List | List | +| Update | Update | +| - | Train | +| - | Get Training Status | + +The preceding table is a comparison of list-level operations between FaceList and LargeFaceList. As is shown, LargeFaceList comes with new operations, Train and Get Training Status, when compared with FaceList. Training the LargeFaceList is a precondition of the +[FindSimilar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) operation. Training isn't required for FaceList. The following snippet is a helper function to wait for the training of a LargeFaceList: + +```csharp +/// +/// Helper function to train LargeFaceList and wait for finish. +/// +/// +/// The time interval can be adjusted considering the following factors: +/// - The training time which depends on the capacity of the LargeFaceList. +/// - The acceptable latency for getting the training status. +/// - The call frequency and cost. +/// +/// Estimated training time for LargeFaceList in different scale: +/// - 1,000 faces cost about 1 to 2 seconds. +/// - 10,000 faces cost about 5 to 10 seconds. +/// - 100,000 faces cost about 1 to 2 minutes. +/// - 1,000,000 faces cost about 10 to 30 minutes. +/// +/// The Id of the LargeFaceList for training. +/// The time interval for getting training status in milliseconds. +/// A task of waiting for LargeFaceList training finish. +private static async Task TrainLargeFaceList( + string largeFaceListId, + int timeIntervalInMilliseconds = 1000) +{ + // Trigger a train call. + await FaceClient.LargeTrainLargeFaceListAsync(largeFaceListId); + + // Wait for training finish. + while (true) + { + Task.Delay(timeIntervalInMilliseconds).Wait(); + var status = await faceClient.LargeFaceList.TrainAsync(largeFaceListId); + + if (status.Status == Status.Running) + { + continue; + } + else if (status.Status == Status.Succeeded) + { + break; + } + else + { + throw new Exception("The train operation is failed!"); + } + } +} +``` + +Previously, a typical use of FaceList with added faces and FindSimilar looked like the following: + +```csharp +// Create a FaceList. +const string FaceListId = "myfacelistid_001"; +const string FaceListName = "MyFaceListDisplayName"; +const string ImageDir = @"/path/to/FaceList/images"; +faceClient.FaceList.CreateAsync(FaceListId, FaceListName).Wait(); + +// Add Faces to the FaceList. +Parallel.ForEach( + Directory.GetFiles(ImageDir, "*.jpg"), + async imagePath => + { + using (Stream stream = File.OpenRead(imagePath)) + { + await faceClient.FaceList.AddFaceFromStreamAsync(FaceListId, stream); + } + }); + +// Perform FindSimilar. +const string QueryImagePath = @"/path/to/query/image"; +var results = new List(); +using (Stream stream = File.OpenRead(QueryImagePath)) +{ + var faces = faceClient.Face.DetectWithStreamAsync(stream).Result; + foreach (var face in faces) + { + results.Add(await faceClient.Face.FindSimilarAsync(face.FaceId, FaceListId, 20)); + } +} +``` + +When migrating it to LargeFaceList, it becomes the following: + +```csharp +// Create a LargeFaceList. +const string LargeFaceListId = "mylargefacelistid_001"; +const string LargeFaceListName = "MyLargeFaceListDisplayName"; +const string ImageDir = @"/path/to/FaceList/images"; +faceClient.LargeFaceList.CreateAsync(LargeFaceListId, LargeFaceListName).Wait(); + +// Add Faces to the LargeFaceList. +Parallel.ForEach( + Directory.GetFiles(ImageDir, "*.jpg"), + async imagePath => + { + using (Stream stream = File.OpenRead(imagePath)) + { + await faceClient.LargeFaceList.AddFaceFromStreamAsync(LargeFaceListId, stream); + } + }); + +// Train() is newly added operation for LargeFaceList. +// Must call it before FindSimilarAsync() to ensure the newly added faces searchable. +await TrainLargeFaceList(LargeFaceListId); + +// Perform FindSimilar. +const string QueryImagePath = @"/path/to/query/image"; +var results = new List(); +using (Stream stream = File.OpenRead(QueryImagePath)) +{ + var faces = faceClient.Face.DetectWithStreamAsync(stream).Result; + foreach (var face in faces) + { + results.Add(await faceClient.Face.FindSimilarAsync(face.FaceId, largeFaceListId: LargeFaceListId)); + } +} +``` + +As previously shown, the data management and the FindSimilar part are almost the same. The only exception is that a fresh preprocessing Train operation must complete in the LargeFaceList before FindSimilar works. + +## Step 3: Train suggestions + +Although the Train operation speeds up [FindSimilar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) +and [Identification](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), the training time suffers, especially when coming to large scale. The estimated training time in different scales is listed in the following table. + +| Scale for faces or persons | Estimated training time | +|:---:|:---:| +| 1,000 | 1-2 sec | +| 10,000 | 5-10 sec | +| 100,000 | 1-2 min | +| 1,000,000 | 10-30 min | + +To better utilize the large-scale feature, we recommend the following strategies. + +### Step 3.1: Customize time interval + +As is shown in `TrainLargeFaceList()`, there's a time interval in milliseconds to delay the infinite training status checking process. For LargeFaceList with more faces, using a larger interval reduces the call counts and cost. Customize the time interval according to the expected capacity of the LargeFaceList. + +The same strategy also applies to LargePersonGroup. For example, when you train a LargePersonGroup with 1 million persons, `timeIntervalInMilliseconds` might be 60,000, which is a 1-minute interval. + +### Step 3.2: Small-scale buffer + +Persons or faces in a LargePersonGroup or a LargeFaceList are searchable only after being trained. In a dynamic scenario, new persons or faces are constantly added and must be immediately searchable, yet training might take longer than desired. + +To mitigate this problem, use an extra small-scale LargePersonGroup or LargeFaceList as a buffer only for the newly added entries. This buffer takes a shorter time to train because of the smaller size. The immediate search capability on this temporary buffer should work. Use this buffer in combination with training on the master LargePersonGroup or LargeFaceList by running the master training on a sparser interval. Examples are in the middle of the night and daily. + +An example workflow: + +1. Create a master LargePersonGroup or LargeFaceList, which is the master collection. Create a buffer LargePersonGroup or LargeFaceList, which is the buffer collection. The buffer collection is only for newly added persons or faces. +1. Add new persons or faces to both the master collection and the buffer collection. +1. Only train the buffer collection with a short time interval to ensure that the newly added entries take effect. +1. Call Identification or FindSimilar against both the master collection and the buffer collection. Merge the results. +1. When the buffer collection size increases to a threshold or at a system idle time, create a new buffer collection. Trigger the Train operation on the master collection. +1. Delete the old buffer collection after the Train operation finishes on the master collection. + +### Step 3.3: Standalone training + +If a relatively long latency is acceptable, it isn't necessary to trigger the Train operation right after you add new data. Instead, the Train operation can be split from the main logic and triggered regularly. This strategy is suitable for dynamic scenarios with acceptable latency. It can be applied to static scenarios to further reduce the Train frequency. + +Suppose there's a `TrainLargePersonGroup` function similar to `TrainLargeFaceList`. A typical implementation of the standalone training on a LargePersonGroup by invoking the [`Timer`](/dotnet/api/system.timers.timer) class in `System.Timers` is: + +```csharp +private static void Main() +{ + // Create a LargePersonGroup. + const string LargePersonGroupId = "mylargepersongroupid_001"; + const string LargePersonGroupName = "MyLargePersonGroupDisplayName"; + faceClient.LargePersonGroup.CreateAsync(LargePersonGroupId, LargePersonGroupName).Wait(); + + // Set up standalone training at regular intervals. + const int TimeIntervalForStatus = 1000 * 60; // 1-minute interval for getting training status. + const double TimeIntervalForTrain = 1000 * 60 * 60; // 1-hour interval for training. + var trainTimer = new Timer(TimeIntervalForTrain); + trainTimer.Elapsed += (sender, args) => TrainTimerOnElapsed(LargePersonGroupId, TimeIntervalForStatus); + trainTimer.AutoReset = true; + trainTimer.Enabled = true; + + // Other operations like creating persons, adding faces, and identification, except for Train. + // ... +} + +private static void TrainTimerOnElapsed(string largePersonGroupId, int timeIntervalInMilliseconds) +{ + TrainLargePersonGroup(largePersonGroupId, timeIntervalInMilliseconds).Wait(); +} +``` + +For more information about data management and identification-related implementations, see [Add faces](add-faces.md). + +## Summary + +In this guide, you learned how to migrate the existing PersonGroup or FaceList code, not data, to the LargePersonGroup or LargeFaceList: + +- LargePersonGroup and LargeFaceList work similar to PersonGroup or FaceList, except that the Train operation is required by LargeFaceList. +- Take the proper Train strategy to dynamic data update for large-scale data sets. + +## Next steps + +Follow a how-to guide to learn how to add faces to a PersonGroup or write a script to do the Identify operation on a PersonGroup. + +- [Add faces](add-faces.md) +- [Face client library quickstart](../quickstarts-sdk/identity-client-library.md) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/use-persondirectory.md b/articles/cognitive-services/Computer-vision/how-to/use-persondirectory.md similarity index 100% rename from articles/cognitive-services/Face/Face-API-How-to-Topics/use-persondirectory.md rename to articles/cognitive-services/Computer-vision/how-to/use-persondirectory.md diff --git a/articles/cognitive-services/Face/APIReference.md b/articles/cognitive-services/Computer-vision/identity-api-reference.md similarity index 100% rename from articles/cognitive-services/Face/APIReference.md rename to articles/cognitive-services/Computer-vision/identity-api-reference.md diff --git a/articles/cognitive-services/Face/encrypt-data-at-rest.md b/articles/cognitive-services/Computer-vision/identity-encrypt-data-at-rest.md similarity index 100% rename from articles/cognitive-services/Face/encrypt-data-at-rest.md rename to articles/cognitive-services/Computer-vision/identity-encrypt-data-at-rest.md diff --git a/articles/cognitive-services/Computer-vision/includes/create-face-resource.md b/articles/cognitive-services/Computer-vision/includes/create-face-resource.md new file mode 100644 index 0000000000000..370a6b2b079a4 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/includes/create-face-resource.md @@ -0,0 +1,33 @@ +--- +title: Container support +titleSuffix: Azure Cognitive Services +services: cognitive-services +author: aahill +manager: nitinme +ms.service: cognitive-services +ms.topic: include +ms.date: 7/5/2019 +ms.author: aahi +--- + +## Create a Face resource + +1. Sign into the [Azure portal](https://portal.azure.com) +1. Select [Create **Face**](https://portal.azure.com/#create/Microsoft.CognitiveServicesFace) resource +1. Enter all required settings: + + |Setting|Value| + |--|--| + |Name|Desired name (2-64 characters)| + |Subscription|Select appropriate subscription| + |Location|Select any nearby and available location| + |Pricing Tier|`F0` - the minimal pricing tier| + |Resource Group|Select an available resource group| + +1. Select **Create** and wait for the resource to be created. After it's created, navigate to the resource page +1. Collect configured `endpoint` and an API key: + + |Resource Tab in Portal|Setting|Value| + |--|--|--| + |**Overview**|Endpoint|Copy the endpoint. It looks similar to `https://face.cognitiveservices.azure.com/face/v1.0`| + |**Keys**|API Key|Copy one of the two keys. It's a 32 alphanumeric-character string with no spaces or dashes, `xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`.| diff --git a/articles/cognitive-services/Computer-vision/includes/curl-quickstart.md b/articles/cognitive-services/Computer-vision/includes/curl-quickstart.md index fbe536f584e76..9811c5b3b9cac 100644 --- a/articles/cognitive-services/Computer-vision/includes/curl-quickstart.md +++ b/articles/cognitive-services/Computer-vision/includes/curl-quickstart.md @@ -53,7 +53,7 @@ curl -v -X POST "https://westcentralus.api.cognitive.microsoft.com/vision/v3.2/r The response will include an `Operation-Location` header, whose value is a unique URL. You use this URL to query the results of the Read operation. The URL expires in 48 hours. ### Optionally, specify the model version -As an optional step, see [How to specify the model version](../Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, use `model-version=2022-04-30` as the parameter. Skipping the parameter or using `model-version=latest` automatically uses the most recent GA model. +As an optional step, see [How to specify the model version](../how-to/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, use `model-version=2022-04-30` as the parameter. Skipping the parameter or using `model-version=latest` automatically uses the most recent GA model. ```bash curl -v -X POST "https://westcentralus.api.cognitive.microsoft.com/vision/v3.2/read/analyze?model-version=2022-04-30" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: " --data-ascii "{\"url\":\"https://upload.wikimedia.org/wikipedia/commons/thumb/a/af/Atomist_quote_from_Democritus.png/338px-Atomist_quote_from_Democritus.png\"}" ``` @@ -140,6 +140,6 @@ A successful response is returned in JSON. The sample application parses and dis In this quickstart, you learned how to call the Read REST API. Next, learn more about the Read API features. > [!div class="nextstepaction"] ->[Call the Read API](../Vision-API-How-to-Topics/call-read-api.md) +>[Call the Read API](../how-to/call-read-api.md) * [OCR overview](../overview-ocr.md) diff --git a/articles/cognitive-services/Computer-vision/includes/identity-curl-quickstart.md b/articles/cognitive-services/Computer-vision/includes/identity-curl-quickstart.md new file mode 100644 index 0000000000000..874943b4d4833 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/includes/identity-curl-quickstart.md @@ -0,0 +1,87 @@ +--- +title: "Face REST API quickstart" +description: Use the Face REST API with cURL to detect and analyze faces. +services: cognitive-services +author: PatrickFarley +manager: nitinme +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: include +ms.date: 12/06/2020 +ms.author: pafarley +--- + +Get started with facial recognition using the Face REST API. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. + +> [!NOTE] +> This quickstart uses cURL commands to call the REST API. You can also call the REST API using a programming language. Complex scenarios like face identification are easier to implement using a language SDK. See the GitHub samples for examples in [C#](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/dotnet/Face/rest), [Python](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/python/Face/rest), [Java](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/java/Face/rest), [JavaScript](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/javascript/Face/rest), and [Go](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/go/Face/rest). + +## Prerequisites + +* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) +* [!INCLUDE [contributor-requirement](../../includes/quickstarts/contributor-requirement.md)] +* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. + * You'll need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. + * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. +* [PowerShell version 6.0+](/powershell/scripting/install/installing-powershell-core-on-windows), or a similar command-line application. + + +## Identify faces + +1. First, call the Detect API on the source face. This is the face that we'll try to identify from the larger group. Copy the following command to a text editor, insert your own key, and then copy it into a shell window and run it. + + :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_detect"::: + + Save the returned face ID string to a temporary location. You'll use it again at the end. + +1. Next you'll need to create a **LargePersonGroup**. This object will store the aggregated face data of several persons. Run the following command, inserting your own key. Optionally, change the group's name and metadata in the request body. + + :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_create_persongroup"::: + + Save the returned ID of the created group to a temporary location. + +1. Next, you'll create **Person** objects that belong to the group. Run the following command, inserting your own key and the ID of the **LargePersonGroup** from the previous step. This command creates a **Person** named "Family1-Dad". + + :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_create_person"::: + + After you run this command, run it again with different input data to create more **Person** objects: "Family1-Mom", "Family1-Son", "Family1-Daughter", "Family2-Lady", and "Family2-Man". + + Save the IDs of each **Person** created; it's important to keep track of which person name has which ID. + +1. Next you'll need to detect new faces and associate them with the **Person** objects that exist. The following command detects a face from the image *Family1-Dad.jpg* and adds it to the corresponding person. You need to specify the `personId` as the ID that was returned when you created the "Family1-Dad" **Person** object. The image name corresponds to the name of the created **Person**. Also enter the **LargePersonGroup** ID and your key in the appropriate fields. + + :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_add_face"::: + + Then, run the above command again with a different source image and target **Person**. The images available are: *Family1-Dad1.jpg*, *Family1-Dad2.jpg* *Family1-Mom1.jpg*, *Family1-Mom2.jpg*, *Family1-Son1.jpg*, *Family1-Son2.jpg*, *Family1-Daughter1.jpg*, *Family1-Daughter2.jpg*, *Family2-Lady1.jpg*, *Family2-Lady2.jpg*, *Family2-Man1.jpg*, and *Family2-Man2.jpg*. Be sure that the **Person** whose ID you specify in the API call matches the name of the image file in the request body. + + At the end of this step, you should have multiple **Person** objects that each have one or more corresponding faces, detected directly from the provided images. + +1. Next, train the **LargePersonGroup** with the current face data. The training operation teaches the model how to associate facial features, sometimes aggregated from multiple source images, to each single person. Insert the **LargePersonGroup** ID and your key before running the command. + + :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_train"::: + +1. Now you're ready to call the Identify API, using the source face ID from the first step and the **LargePersonGroup** ID. Insert these values into the appropriate fields in the request body, and insert your key. + + :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_identify"::: + + The response should give you a **Person** ID indicating the person identified with the source face. It should be the ID that corresponds to the "Family1-Dad" person, because the source face is of that person. + +## Clean up resources + +To delete the **LargePersonGroup** you created in this exercise, run the LargePersonGroup - Delete call. + +:::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_delete"::: + +If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. + +* [Portal](../../cognitive-services-apis-create-account.md#clean-up-resources) +* [Azure CLI](../../cognitive-services-apis-create-account-cli.md#clean-up-resources) + +## Next steps + +In this quickstart, you learned how to use the Face REST API to do basic facial recognition tasks. Next, learn about the different face detection models and how to specify the right model for your use case. + +> [!div class="nextstepaction"] +> [Specify a face detection model version](../how-to/specify-detection-model.md) + +* [What is the Face service?](../overview-identity.md) \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/includes/image-analysis-curl-quickstart.md b/articles/cognitive-services/Computer-vision/includes/image-analysis-curl-quickstart.md index 852df6593f2ec..1c47dbcd4f118 100644 --- a/articles/cognitive-services/Computer-vision/includes/image-analysis-curl-quickstart.md +++ b/articles/cognitive-services/Computer-vision/includes/image-analysis-curl-quickstart.md @@ -139,6 +139,6 @@ A successful response writes the thumbnail image to the file specified in ` [!div class="nextstepaction"] ->[Call the Analyze API](../Vision-API-How-to-Topics/HowToCallVisionAPI.md) +>[Call the Analyze API](../how-to/call-analyze-image.md) * [Image Analysis overview](../overview-image-analysis.md) diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/csharp-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/csharp-sdk.md index dcd006f98ef37..01477d55bed26 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/csharp-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/csharp-sdk.md @@ -88,7 +88,7 @@ Use the OCR client library to read printed and handwritten text from a remote im > [!IMPORTANT] > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. For example, [Azure key vault](../../../../key-vault/general/overview.md). -1. As an optional step, see [How to specify the model version](../../Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, edit the `ReadAsync` call as shown. Skipping the parameter or using `"latest"` automatically uses the most recent GA model. +1. As an optional step, see [How to specify the model version](../../how-to/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, edit the `ReadAsync` call as shown. Skipping the parameter or using `"latest"` automatically uses the most recent GA model. ```csharp // Read text from URL with a specific model version @@ -150,7 +150,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the OCR client library and use the Read API. Next, learn more about the Read API features. > [!div class="nextstepaction"] ->[Call the Read API](../../Vision-API-How-to-Topics/call-read-api.md) +>[Call the Read API](../../how-to/call-read-api.md) * [OCR overview](../../overview-ocr.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/dotnet/ComputerVision/ComputerVisionQuickstart.cs). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-csharp-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-csharp-sdk.md new file mode 100644 index 0000000000000..c5ad09e7e5bad --- /dev/null +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-csharp-sdk.md @@ -0,0 +1,162 @@ +--- +title: "Face .NET client library quickstart" +description: Use the Face client library for .NET to detect and identify faces (facial recognition search). +services: cognitive-services +author: PatrickFarley +manager: nitinme +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: include +ms.date: 05/03/2022 +ms.author: pafarley +--- + +Get started with facial recognition using the Face client library for .NET. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. Follow these steps to install the package and try out the example code for basic face identification using remote images. + +[Reference documentation](/dotnet/api/overview/azure/cognitiveservices/face-readme) | [Library source code](https://github.com/Azure/azure-sdk-for-net/tree/master/sdk/cognitiveservices/Vision.Face) | [Package (NuGet)](https://www.nuget.org/packages/Microsoft.Azure.CognitiveServices.Vision.Face/2.7.0-preview.1) | [Samples](/samples/browse/?products=azure&term=face) + +## Prerequisites + +* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) +* The [Visual Studio IDE](https://visualstudio.microsoft.com/vs/) or current version of [.NET Core](https://dotnet.microsoft.com/download/dotnet-core). +* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] +* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. + * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. + * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. + +## Identify faces + +1. Create a new C# application + + #### [Visual Studio IDE](#tab/visual-studio) + + Using Visual Studio, create a new .NET Core application. + + ### Install the client library + + Once you've created a new project, install the client library by right-clicking on the project solution in the **Solution Explorer** and selecting **Manage NuGet Packages**. In the package manager that opens select **Browse**, check **Include prerelease**, and search for `Microsoft.Azure.CognitiveServices.Vision.Face`. Select version `2.7.0-preview.1`, and then **Install**. + + #### [CLI](#tab/cli) + + In a console window (such as cmd, PowerShell, or Bash), use the `dotnet new` command to create a new console app with the name `face-quickstart`. This command creates a simple "Hello World" C# project with a single source file: *program.cs*. + + ```console + dotnet new console -n face-quickstart + ``` + + Change your directory to the newly created app folder. You can build the application with: + + ```console + dotnet build + ``` + + The build output should contain no warnings or errors. + + ```console + ... + Build succeeded. + 0 Warning(s) + 0 Error(s) + ... + ``` + + ### Install the client library + + Within the application directory, install the Face client library for .NET with the following command: + + ```console + dotnet add package Microsoft.Azure.CognitiveServices.Vision.Face --version 2.7.0-preview.1 + ``` + + --- +1. Add the following code into the *Program.cs* file. + + [!code-csharp[](~/cognitive-services-quickstart-code/dotnet/Face/FaceQuickstart-single.cs?name=snippet_single)] + + +1. Enter your key and endpoint in the corresponding fields. + + > [!IMPORTANT] + > Go to the Azure portal. If the Face resource you created in the **Prerequisites** section deployed successfully, click the **Go to Resource** button under **Next Steps**. You can find your key and endpoint in the resource's **key and endpoint** page, under **resource management**. + + > [!IMPORTANT] + > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. See the Cognitive Services [security](../../../cognitive-services-security.md) article for more information. + +1. Run the application + + #### [Visual Studio IDE](#tab/visual-studio) + + Run the application by clicking the **Debug** button at the top of the IDE window. + + #### [CLI](#tab/cli) + + Run the application from your application directory with the `dotnet run` command. + + ```dotnet + dotnet run + ``` + + --- + +## Output + +```console +========IDENTIFY FACES======== + +Create a person group (3972c063-71b3-4328-8579-6d190ee76f99). +Create a person group person 'Family1-Dad'. +Add face to the person group person(Family1-Dad) from image `Family1-Dad1.jpg` +Add face to the person group person(Family1-Dad) from image `Family1-Dad2.jpg` +Create a person group person 'Family1-Mom'. +Add face to the person group person(Family1-Mom) from image `Family1-Mom1.jpg` +Add face to the person group person(Family1-Mom) from image `Family1-Mom2.jpg` +Create a person group person 'Family1-Son'. +Add face to the person group person(Family1-Son) from image `Family1-Son1.jpg` +Add face to the person group person(Family1-Son) from image `Family1-Son2.jpg` +Create a person group person 'Family1-Daughter'. +Create a person group person 'Family2-Lady'. +Add face to the person group person(Family2-Lady) from image `Family2-Lady1.jpg` +Add face to the person group person(Family2-Lady) from image `Family2-Lady2.jpg` +Create a person group person 'Family2-Man'. +Add face to the person group person(Family2-Man) from image `Family2-Man1.jpg` +Add face to the person group person(Family2-Man) from image `Family2-Man2.jpg` + +Train person group 3972c063-71b3-4328-8579-6d190ee76f99. +Training status: Succeeded. + +4 face(s) with 4 having sufficient quality for recognition detected from image `identification1.jpg` +Person 'Family1-Dad' is identified for face in: identification1.jpg - 994bfd7a-0d8f-4fae-a5a6-c524664cbee7, confidence: 0.96725. +Person 'Family1-Mom' is identified for face in: identification1.jpg - 0c9da7b9-a628-429d-97ff-cebe7c638fb5, confidence: 0.96921. +No person is identified for face in: identification1.jpg - a881259c-e811-4f7e-a35e-a453e95ca18f, +Person 'Family1-Son' is identified for face in: identification1.jpg - 53772235-8193-46eb-bdfc-1ebc25ea062e, confidence: 0.92886. + +End of quickstart. +``` + +> [!TIP] +> The Face API runs on a set of pre-built models that are static by nature (the model's performance will not regress or improve as the service is run). The results that the model produces might change if Microsoft updates the model's backend without migrating to an entirely new model version. To take advantage of a newer version of a model, you can retrain your **PersonGroup**, specifying the newer model as a parameter with the same enrollment images. + +## Clean up resources + +If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. + +* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) +* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) + +To delete the **PersonGroup** you created in this quickstart, run the following code in your program: + +[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/Face/FaceQuickstart.cs?name=snippet_persongroup_delete)] + +Define the deletion method with the following code: + +[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/Face/FaceQuickstart.cs?name=snippet_deletepersongroup)] + +## Next steps + +In this quickstart, you learned how to use the Face client library for .NET to do basic face identification. Next, learn about the different face detection models and how to specify the right model for your use case. + +> [!div class="nextstepaction"] +> [Specify a face detection model version](../../how-to/specify-detection-model.md) + +* [What is the Face service?](../../overview.md) +* More extensive sample code can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/dotnet/Face/FaceQuickstart.cs). \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-go-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-go-sdk.md new file mode 100644 index 0000000000000..90404dcf68ea7 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-go-sdk.md @@ -0,0 +1,252 @@ +--- +title: "Face Go client library quickstart" +description: Use the Face client library for Go to detect and identify faces (facial recognition search). +services: cognitive-services +author: PatrickFarley +manager: nitinme +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: include +ms.date: 10/26/2020 +ms.author: pafarley +--- +Get started with facial recognition using the Face client library for Go. Follow these steps to install the package and try out the example code for basic tasks. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. + +Use the Face service client library for Go to: + +* [Detect and analyze faces](#detect-and-analyze-faces) +* [Identify a face](#identify-a-face) +* [Verify faces](#verify-faces) + +[Reference documentation](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face) | [Library source code](https://github.com/Azure/azure-sdk-for-go/tree/master/services/cognitiveservices/v1.0/face) | [SDK download](https://github.com/Azure/azure-sdk-for-go) + +## Prerequisites + +* The latest version of [Go](https://go.dev/dl/) +* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) +* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] +* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. + * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. + * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. +* After you get a key and endpoint, [create environment variables](../../../cognitive-services-apis-create-account.md#configure-an-environment-variable-for-authentication) for the key and endpoint, named `FACE_SUBSCRIPTION_KEY` and `FACE_ENDPOINT`, respectively. + +## Setting up + +### Create a Go project directory + +In a console window (cmd, PowerShell, Terminal, Bash), create a new workspace for your Go project, named `my-app`, and navigate to it. + +``` +mkdir -p my-app/{src, bin, pkg} +cd my-app +``` + +Your workspace will contain three folders: + +* **src** - This directory will contain source code and packages. Any packages installed with the `go get` command will be in this folder. +* **pkg** - This directory will contain the compiled Go package objects. These files all have a `.a` extension. +* **bin** - This directory will contain the binary executable files that are created when you run `go install`. + +> [!TIP] +> To learn more about the structure of a Go workspace, see the [Go language documentation](https://go.dev/doc/code.html#Workspaces). This guide includes information for setting `$GOPATH` and `$GOROOT`. + +### Install the client library for Go + +Next, install the client library for Go: + +```bash +go get -u github.com/Azure/azure-sdk-for-go/tree/master/services/cognitiveservices/v1.0/face +``` + +or if you use dep, within your repo run: + +```bash +dep ensure -add https://github.com/Azure/azure-sdk-for-go/tree/master/services/cognitiveservices/v1.0/face +``` + +### Create a Go application + +Next, create a file in the **src** directory named `sample-app.go`: + +```bash +cd src +touch sample-app.go +``` + +Open `sample-app.go` in your preferred IDE or text editor. Then add the package name and import the following libraries: + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_imports)] + +Next, you'll begin adding code to carry out different Face service operations. + +## Object model + +The following classes and interfaces handle some of the major features of the Face service Go client library. + +|Name|Description| +|---|---| +|[BaseClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#BaseClient) | This class represents your authorization to use the Face service, and you need it for all Face functionality. You instantiate it with your subscription information, and you use it to produce instances of other classes. | +|[Client](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client)|This class handles the basic detection and recognition tasks that you can do with human faces. | +|[DetectedFace](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#DetectedFace)|This class represents all of the data that was detected from a single face in an image. You can use it to retrieve detailed information about the face.| +|[ListClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#ListClient)|This class manages the cloud-stored **FaceList** constructs, which store an assorted set of faces. | +|[PersonGroupPersonClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupPersonClient)| This class manages the cloud-stored **Person** constructs, which store a set of faces that belong to a single person.| +|[PersonGroupClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupClient)| This class manages the cloud-stored **PersonGroup** constructs, which store a set of assorted **Person** objects. | +|[SnapshotClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#SnapshotClient)|This class manages the Snapshot functionality. You can use it to temporarily save all of your cloud-based Face data and migrate that data to a new Azure subscription. | + +## Code examples + +These code samples show you how to complete basic tasks using the Face service client library for Go: + +* [Authenticate the client](#authenticate-the-client) +* [Detect and analyze faces](#detect-and-analyze-faces) +* [Identify a face](#identify-a-face) +* [Verify faces](#verify-faces) + +## Authenticate the client + +> [!NOTE] +> This quickstart assumes you've [created environment variables](../../../cognitive-services-apis-create-account.md#configure-an-environment-variable-for-authentication) for your Face key and endpoint, named `FACE_SUBSCRIPTION_KEY` and `FACE_ENDPOINT` respectively. + +Create a **main** function and add the following code to it to instantiate a client with your endpoint and key. You create a **[CognitiveServicesAuthorizer](https://godoc.org/github.com/Azure/go-autorest/autorest#CognitiveServicesAuthorizer)** object with your key, and use it with your endpoint to create a **[Client](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client)** object. This code also instantiates a context object, which is needed for the creation of client objects. It also defines a remote location where some of the sample images in this quickstart are found. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_main_client)] + + +## Detect and analyze faces + +Face detection is required as a first step in Face Analysis and Identity Verification. This section shows how to return the extra face attribute data. If you only want to detect faces for face identification or verification, skip to the later sections. + + +Add the following code in your **main** method. This code defines a remote sample image and specifies which face features to extract from the image. It also specifies which AI model to use to extract data from the detected face(s). See [Specify a recognition model](../../how-to/specify-recognition-model.md) for information on these options. Finally, the **[DetectWithURL](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client.DetectWithURL)** method does the face detection operation on the image and saves the results in program memory. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_detect)] + +> [!TIP] +> You can also detect faces in a local image. See the [Client](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client) methods such as **DetectWithStream**. + +### Display detected face data + +The next block of code takes the first element in the array of **[DetectedFace](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#DetectedFace)** objects and prints its attributes to the console. If you used an image with multiple faces, you should iterate through the array instead. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_detect_display)] + + + + + +## Identify a face + +The Identify operation takes an image of a person (or multiple people) and looks to find the identity of each face in the image (facial recognition search). It compares each detected face to a **PersonGroup**, a database of different **Person** objects whose facial features are known. + +### Get Person images + +To step through this scenario, you need to save the following images to the root directory of your project: https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/Face/images. + +This group of images contains three sets of single-face images that correspond to three different people. The code will define three **PersonGroup Person** objects and associate them with image files that start with `woman`, `man`, and `child`. + +### Create a PersonGroup + +Once you've downloaded your images, add the following code to the bottom of your **main** method. This code authenticates a **[PersonGroupClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupClient)** object and then uses it to define a new **PersonGroup**. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pg_setup)] + +### Create PersonGroup Persons + +The next block of code authenticates a **[PersonGroupPersonClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupPersonClient)** and uses it to define three new **PersonGroup Person** objects. These objects each represent a single person in the set of images. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pgp_setup)] + +### Assign faces to Persons + +The following code sorts the images by their prefix, detects faces, and assigns the faces to each respective **PersonGroup Person** object, based on the image file name. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pgp_assign)] + +> [!TIP] +> You can also create a **PersonGroup** from remote images referenced by URL. See the [PersonGroupPersonClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupPersonClient) methods such as **AddFaceFromURL**. + +### Train the PersonGroup + +Once you've assigned faces, you train the **PersonGroup** so it can identify the visual features associated with each of its **Person** objects. The following code calls the asynchronous **train** method and polls the result, printing the status to the console. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pg_train)] + +> [!TIP] +> The Face API runs on a set of pre-built models that are static by nature (the model's performance will not regress or improve as the service is run). The results that the model produces might change if Microsoft updates the model's backend without migrating to an entirely new model version. To take advantage of a newer version of a model, you can retrain your **PersonGroup**, specifying the newer model as a parameter with the same enrollment images. + +### Get a test image + +The following code looks in the root of your project for an image _test-image-person-group.jpg_ and loads it into program memory. You can find this image in the same repo as the images used to create the **PersonGroup**: https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/Face/images. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id_source_get)] + +### Detect source faces in test image + +The next code block does ordinary face detection on the test image to retrieve all of the faces and save them to an array. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id_source_detect)] + +### Identify faces from source image + +The **[Identify](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client.Identify)** method takes the array of detected faces and compares them to the given **PersonGroup** (defined and trained in the earlier section). If it can match a detected face to a **Person** in the group, it saves the result. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id)] + +This code then prints detailed match results to the console. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id_print)] + + +### Verify faces + +The Verify operation takes a face ID and either another face ID or a **Person** object and determines whether they belong to the same person. Verification can be used to double-check the face match returned by the Identify operation. + +The following code detects faces in two source images and then verifies each of them against a face detected from a target image. + +### Get test images + +The following code blocks declare variables that will point to the target and source images for the verification operation. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver_images)] + +### Detect faces for verification + +The following code detects faces in the source and target images and saves them to variables. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver_detect_source)] + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver_detect_target)] + +### Get verification results + +The following code compares each of the source images to the target image and prints a message indicating whether they belong to the same person. + +[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver)] + + +## Run the application + +Run your face recognition app from the application directory with the `go run ` command. + +```bash +go run sample-app.go +``` + +## Clean up resources + +If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. + +* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) +* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) + +If you created a **PersonGroup** in this quickstart and you want to delete it, call the **[Delete](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupClient.Delete)** method. + +## Next steps + +In this quickstart, you learned how to use the Face client library for Go to do basis facial recognition tasks. Next, learn about the different face detection models and how to specify the right model for your use case. + +> [!div class="nextstepaction"] +> [Specify a face detection model version](../../how-to/specify-detection-model.md) + +* [What is the Face service?](../../overview.md) +* The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/go/Face/FaceQuickstart.go). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-javascript-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-javascript-sdk.md new file mode 100644 index 0000000000000..bdeac9a53890e --- /dev/null +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-javascript-sdk.md @@ -0,0 +1,121 @@ +--- +title: "Face JavaScript client library quickstart" +description: Use the Face client library for JavaScript to detect and identify faces (facial recognition search). +services: cognitive-services +author: PatrickFarley +manager: nitinme +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: include +ms.date: 05/03/2022 +ms.author: pafarley +--- + +## Quickstart: Face client library for JavaScript + +Get started with facial recognition using the Face client library for JavaScript. Follow these steps to install the package and try out the example code for basic tasks. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. Follow these steps to install the package and try out the example code for basic face identification using remote images. + +[Reference documentation](/javascript/api/overview/azure/cognitiveservices/face) | [Library source code](https://github.com/Azure/azure-sdk-for-js/tree/master/sdk/cognitiveservices/cognitiveservices-face) | [Package (npm)](https://www.npmjs.com/package/@azure/cognitiveservices-face) | [Samples](/samples/browse/?products=azure&term=face&languages=javascript) + +## Prerequisites + +* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) +* The latest version of [Node.js](https://nodejs.org/en/) +* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] +* Once you have your Azure subscription, [Create a Face resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesFace) in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. + * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. + * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. + +## Identify faces + +1. Create a new Node.js application + + In a console window (such as cmd, PowerShell, or Bash), create a new directory for your app, and navigate to it. + + ```console + mkdir myapp && cd myapp + ``` + + Run the `npm init` command to create a node application with a `package.json` file. + + ```console + npm init + ``` + +1. Install the `ms-rest-azure` and `azure-cognitiveservices-face` NPM packages: + + ```console + npm install @azure/cognitiveservices-face @azure/ms-rest-js uuid + ``` + + Your app's `package.json` file will be updated with the dependencies. + +1. Create a file named `index.js`, open it in a text editor, and paste in the following code: + + :::code language="js" source="~/cognitive-services-quickstart-code/javascript/Face/sdk_quickstart-single.js" id="snippet_single"::: + + +1. Enter your key and endpoint into the corresponding fields. + + > [!IMPORTANT] + > Go to the Azure portal. If the Face resource you created in the **Prerequisites** section deployed successfully, click the **Go to Resource** button under **Next Steps**. You can find your key and endpoint in the resource's **key and endpoint** page, under **resource management**. + + > [!IMPORTANT] + > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. See the Cognitive Services [security](../../../cognitive-services-security.md) article for more information. + +1. Run the application with the `node` command on your quickstart file. + + ```console + node index.js + ``` + +## Output + +```console +========IDENTIFY FACES======== + +Creating a person group with ID: c08484e0-044b-4610-8b7e-c957584e5d2d +Adding faces to person group... +Create a persongroup person: Family1-Dad. +Create a persongroup person: Family1-Mom. +Create a persongroup person: Family2-Lady. +Create a persongroup person: Family1-Son. +Create a persongroup person: Family1-Daughter. +Create a persongroup person: Family2-Man. +Add face to the person group person: (Family1-Son) from image: Family1-Son2.jpg. +Add face to the person group person: (Family1-Dad) from image: Family1-Dad2.jpg. +Add face to the person group person: (Family1-Mom) from image: Family1-Mom1.jpg. +Add face to the person group person: (Family2-Man) from image: Family2-Man1.jpg. +Add face to the person group person: (Family1-Son) from image: Family1-Son1.jpg. +Add face to the person group person: (Family2-Lady) from image: Family2-Lady2.jpg. +Add face to the person group person: (Family1-Mom) from image: Family1-Mom2.jpg. +Add face to the person group person: (Family1-Dad) from image: Family1-Dad1.jpg. +Add face to the person group person: (Family2-Man) from image: Family2-Man2.jpg. +Add face to the person group person: (Family2-Lady) from image: Family2-Lady1.jpg. +Done adding faces to person group. + +Training person group: c08484e0-044b-4610-8b7e-c957584e5d2d. +Waiting 10 seconds... +Training status: succeeded. + +Person: Family1-Mom is identified for face in: identification1.jpg with ID: b7f7f542-c338-4a40-ad52-e61772bc6e14. Confidence: 0.96921. +Person: Family1-Son is identified for face in: identification1.jpg with ID: 600dc1b4-b2c4-4516-87de-edbbdd8d7632. Confidence: 0.92886. +Person: Family1-Dad is identified for face in: identification1.jpg with ID: e83b494f-9ad2-473f-9d86-3de79c01e345. Confidence: 0.96725. +``` + +## Clean up resources + +If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. + +* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) +* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) + +## Next steps + +In this quickstart, you learned how to use the Face client library for JavaScript to do basic face identification. Next, learn about the different face detection models and how to specify the right model for your use case. + +> [!div class="nextstepaction"] +> [Specify a face detection model version](../../how-to/specify-detection-model.md) + +* [What is the Face service?](../../overview.md) +* More extensive sample code can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/javascript/Face/sdk_quickstart.js). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-python-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-python-sdk.md new file mode 100644 index 0000000000000..0432d074cac08 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/identity-python-sdk.md @@ -0,0 +1,80 @@ +--- +title: "Face Python client library quickstart" +description: Use the Face client library for Python to detect faces and identify faces (facial recognition search). +services: cognitive-services +author: PatrickFarley +manager: nitinme +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: include +ms.date: 05/03/2022 +ms.author: pafarley +--- + +Get started with facial recognition using the Face client library for Python. Follow these steps to install the package and try out the example code for basic tasks. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. Follow these steps to install the package and try out the example code for basic face identification using remote images. + +[Reference documentation](/python/api/overview/azure/cognitiveservices/face-readme) | [Library source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/cognitiveservices/azure-cognitiveservices-vision-face) | [Package (PiPy)](https://pypi.org/project/azure-cognitiveservices-vision-face/) | [Samples](/samples/browse/?products=azure&term=face) + +## Prerequisites + +* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) +* [Python 3.x](https://www.python.org/) + * Your Python installation should include [pip](https://pip.pypa.io/en/stable/). You can check if you have pip installed by running `pip --version` on the command line. Get pip by installing the latest version of Python. +* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] +* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. + * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. + * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. + +## Identify faces + +1. Install the client library + + After installing Python, you can install the client library with: + + ```console + pip install --upgrade azure-cognitiveservices-vision-face + ``` + +1. Create a new Python application + + Create a new Python script—*quickstart-file.py*, for example. Then open it in your preferred editor or IDE and paste in the following code. + + [!code-python[](~/cognitive-services-quickstart-code/python/Face/FaceQuickstart-single.py?name=snippet_single)] + +1. Enter your key and endpoint into the corresponding fields. + + > [!IMPORTANT] + > Go to the Azure portal. If the Face resource you created in the **Prerequisites** section deployed successfully, click the **Go to Resource** button under **Next Steps**. You can find your key and endpoint in the resource's **key and endpoint** page, under **resource management**. + + > [!IMPORTANT] + > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. See the Cognitive Services [security](../../../cognitive-services-security.md) article for more information. + +1. Run your face recognition app from the application directory with the `python` command. + + ```console + python quickstart-file.py + ``` + + > [!TIP] + > The Face API runs on a set of pre-built models that are static by nature (the model's performance will not regress or improve as the service is run). The results that the model produces might change if Microsoft updates the model's backend without migrating to an entirely new model version. To take advantage of a newer version of a model, you can retrain your **PersonGroup**, specifying the newer model as a parameter with the same enrollment images. + +## Clean up resources + +If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. + +* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) +* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) + +To delete the **PersonGroup** you created in this quickstart, run the following code in your script: + +[!code-python[](~/cognitive-services-quickstart-code/python/Face/FaceQuickstart.py?name=snippet_deletegroup)] + +## Next steps + +In this quickstart, you learned how to use the Face client library for Python to do basic face identification. Next, learn about the different face detection models and how to specify the right model for your use case. + +> [!div class="nextstepaction"] +> [Specify a face detection model version](../../how-to/specify-detection-model.md) + +* [What is the Face service?](../../overview.md) +* More extensive sample code can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/Face/FaceQuickstart.py). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-csharp-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-csharp-sdk.md index beed9149f661f..33660c5058b7a 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-csharp-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-csharp-sdk.md @@ -21,7 +21,7 @@ Use the Image Analysis client library for C# to analyze an image for content tag > You can also analyze a local image. See the [ComputerVisionClient](/dotnet/api/microsoft.azure.cognitiveservices.vision.computervision.computervisionclient) methods, such as **AnalyzeImageInStreamAsync**. Or, see the sample code on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/dotnet/ComputerVision/ImageAnalysisQuickstart.cs) for scenarios involving local images. > [!TIP] -> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) for examples that showcase all of the available features. +> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../how-to/call-analyze-image.md) for examples that showcase all of the available features. ## Prerequisites @@ -146,7 +146,7 @@ In this quickstart, you learned how to install the Image Analysis client library > [!div class="nextstepaction"] ->[Call the Analyze API](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) +>[Call the Analyze API](../../how-to/call-analyze-image.md) * [Image Analysis overview](../../overview-image-analysis.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/dotnet/ComputerVision/ImageAnalysisQuickstart.cs). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-java-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-java-sdk.md index e26179300600f..2d9f2339f53ed 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-java-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-java-sdk.md @@ -20,7 +20,7 @@ Use the Image Analysis client library to analyze a remote image for tags, text d > You can also analyze a local. See the [ComputerVision](/java/api/com.microsoft.azure.cognitiveservices.vision.computervision.computervision) methods, such as **AnalyzeImage**. Or, see the sample code on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java) for scenarios involving remote images. > [!TIP] -> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) for examples that showcase all of the available features. +> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../how-to/call-analyze-image.md) for examples that showcase all of the available features. [Reference documentation](/java/api/overview/azure/cognitiveservices/client/computervision) | [Library source code](https://github.com/Azure/azure-sdk-for-java/tree/master/sdk/cognitiveservices/ms-azure-cs-computervision) |[Artifact (Maven)](https://search.maven.org/artifact/com.microsoft.azure.cognitiveservices/azure-cognitiveservices-computervision) | [Samples](https://azure.microsoft.com/resources/samples/?service=cognitive-services&term=vision&sort=0) @@ -121,7 +121,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the Image Analysis client library and make basic image analysis calls. Next, learn more about the Analyze API features. > [!div class="nextstepaction"] ->[Call the Analyze API](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) +>[Call the Analyze API](../../how-to/call-analyze-image.md) * [Image Analysis overview](../../overview-image-analysis.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/java/ComputerVision/src/main/java/ImageAnalysisQuickstart.java). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-node-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-node-sdk.md index 0d2e09d0afae2..4432c03e30f54 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-node-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-node-sdk.md @@ -21,7 +21,7 @@ Use the Image Analysis client library for JavaScript to analyze a remote image f > You can also analyze a local image. See the [ComputerVisionClient](/javascript/api/@azure/cognitiveservices-computervision/computervisionclient) methods, such as **describeImageInStream**. Or, see the sample code on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/javascript/ComputerVision/ImageAnalysisQuickstart.js) for scenarios involving local images. > [!TIP] -> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) for examples that showcase all of the available features. +> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../how-to/call-analyze-image.md) for examples that showcase all of the available features. [Reference documentation](/javascript/api/@azure/cognitiveservices-computervision/) | [Library source code](https://github.com/Azure/azure-sdk-for-js/tree/master/sdk/cognitiveservices/cognitiveservices-computervision) | [Package (npm)](https://www.npmjs.com/package/@azure/cognitiveservices-computervision) | [Samples](https://azure.microsoft.com/resources/samples/?service=cognitive-services&term=vision&sort=0) @@ -112,7 +112,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the Image Analysis client library and make basic image analysis calls. Next, learn more about the Analyze API features. > [!div class="nextstepaction"] ->[Call the Analyze API](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) +>[Call the Analyze API](../../how-to/call-analyze-image.md) * [Image Analysis overview](../../overview-image-analysis.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/javascript/ComputerVision/ImageAnalysisQuickstart.js). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-python-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-python-sdk.md index 323b1a1cc7dc0..8ef19b44ded47 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-python-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/image-analysis-python-sdk.md @@ -19,7 +19,7 @@ Use the Image Analysis client library for Python to analyze a remote image for c > You can also analyze a local image. See the [ComputerVisionClientOperationsMixin](/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.operations.computervisionclientoperationsmixin) methods, such as **analyze_image_in_stream**. Or, see the sample code on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/ComputerVision/ImageAnalysisQuickstart.py) for scenarios involving local images. > [!TIP] -> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) for examples that showcase all of the available features. +> The Analyze API can do many different operations other than generate image tags. See the [Image Analysis how-to guide](../../how-to/call-analyze-image.md) for examples that showcase all of the available features. [Reference documentation](/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision) | [Library source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision) | [Package (PiPy)](https://pypi.org/project/azure-cognitiveservices-vision-computervision/) | [Samples](https://azure.microsoft.com/resources/samples/?service=cognitive-services&term=vision&sort=0) @@ -109,7 +109,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the Image Analysis client library and make basic image analysis calls. Next, learn more about the Analyze API features. > [!div class="nextstepaction"] ->[Call the Analyze API](../../Vision-API-How-to-Topics/HowToCallVisionAPI.md) +>[Call the Analyze API](../../how-to/call-analyze-image.md) * [Image Analysis overview](../../overview-image-analysis.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/ComputerVision/ImageAnalysisQuickstart.py). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/java-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/java-sdk.md index 8f55c4eaaf433..6ebcd3f83d45d 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/java-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/java-sdk.md @@ -123,7 +123,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the OCR client library and use the Read API. Next, learn more about the Read API features. > [!div class="nextstepaction"] ->[Call the Read API](../../Vision-API-How-to-Topics/call-read-api.md) +>[Call the Read API](../../how-to/call-read-api.md) * [OCR overview](../../overview-ocr.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/java/ComputerVision/src/main/java/ComputerVisionQuickstart.java). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/node-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/node-sdk.md index 64713ffff9d40..e610ce9ba811b 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/node-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/node-sdk.md @@ -78,7 +78,7 @@ Use the Optical character recognition client library to read printed and handwri > [!IMPORTANT] > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. For example, [Azure key vault](../../../../key-vault/general/overview.md). -1. As an optional step, see [How to specify the model version](../../Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, edit the `read` statement as shown. Skipping the parameter or using `"latest"` automatically uses the most recent GA model. +1. As an optional step, see [How to specify the model version](../../how-to/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, edit the `read` statement as shown. Skipping the parameter or using `"latest"` automatically uses the most recent GA model. ```JS let result = await client.read(url,{modelVersion:"2022-04-30"}); @@ -129,7 +129,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the OCR client library and use the Read API. Next, learn more about the Read API features. > [!div class="nextstepaction"] ->[Call the Read API](../../Vision-API-How-to-Topics/call-read-api.md) +>[Call the Read API](../../how-to/call-read-api.md) * [OCR overview](../../overview-ocr.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/javascript/ComputerVision/ComputerVisionQuickstart.js). diff --git a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/python-sdk.md b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/python-sdk.md index 08b61b0286eb8..b0d080ba013f3 100644 --- a/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/python-sdk.md +++ b/articles/cognitive-services/Computer-vision/includes/quickstarts-sdk/python-sdk.md @@ -63,7 +63,7 @@ Use the OCR client library to read printed and handwritten text from a remote im > [!IMPORTANT] > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. For example, [Azure key vault](../../../../key-vault/general/overview.md). -1. As an optional step, see [How to specify the model version](../../Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, edit the `read` statement as shown. Skipping the parameter or using `"latest"` automatically uses the most recent GA model. +1. As an optional step, see [How to specify the model version](../../how-to/call-read-api.md#determine-how-to-process-the-data-optional). For example, to explicitly specify the latest GA model, edit the `read` statement as shown. Skipping the parameter or using `"latest"` automatically uses the most recent GA model. ```python # Call API with URL and raw response (allows you to get the operation location) @@ -103,7 +103,7 @@ If you want to clean up and remove a Cognitive Services subscription, you can de In this quickstart, you learned how to install the OCR client library and use the Read API. Next, learn more about the Read API features. > [!div class="nextstepaction"] ->[Call the Read API](../../Vision-API-How-to-Topics/call-read-api.md) +>[Call the Read API](../../how-to/call-read-api.md) * [OCR overview](../../overview-ocr.md) * The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/ComputerVision/ComputerVisionQuickstart.py). diff --git a/articles/cognitive-services/Computer-vision/index-identity.yml b/articles/cognitive-services/Computer-vision/index-identity.yml new file mode 100644 index 0000000000000..b5485bb11f5ff --- /dev/null +++ b/articles/cognitive-services/Computer-vision/index-identity.yml @@ -0,0 +1,105 @@ +### YamlMime:Landing + +title: Face documentation # < 60 characters +summary: "The Azure Face service provides AI algorithms that detect, recognize, and analyze human faces in images. Facial recognition software is important in many different scenarios, such as identity verification, touchless access control, and face blurring for privacy. \n\nOn June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or features included in Azure Services, such as Face or Azure Video Analyzer for Media, if a customer is, or is allowing use of such services by or for, a police department in the United States." +metadata: + title: Face documentation - Quickstarts, tutorials, API reference - Azure Cognitive Services | Microsoft Docs + description: The cloud-based Azure Face service provides developers with access to advanced face algorithms. Face algorithms enable face attribute detection and face recognition for comparison and identification. Learn how to analyze content in different ways with quickstarts, tutorials, and samples. + services: cognitive-services + ms.service: cognitive-services + ms.subservice: computer-vision + ms.topic: landing-page + author: PatrickFarley + ms.author: pafarley + ms.date: 05/25/2022 + + +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new + +landingContent: +# Cards and links should be based on top customer tasks or top subjects +# Start card title with a verb + # Card + - title: About the Face service + linkLists: + - linkListType: overview + links: + - text: What is the Face service? + url: overview-identity.md + - linkListType: whats-new + links: + - text: What's new in Face service? + url: whats-new.md + + - title: Face recognition + linkLists: + - linkListType: concept + links: + - text: Identity verification + url: concept-face-recognition.md + - linkListType: quickstart + links: + - text: Identify faces using a client library SDK or REST API + url: ./quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp + - linkListType: how-to-guide + links: + - text: Add faces to a group + url: how-to/add-faces.md + - text: Use the large-scale feature + url: how-to/use-large-scale.md + - text: Use the PersonDirectory structure + url: how-to/use-persondirectory.md + - text: Specify a face recognition model version + url: how-to/specify-recognition-model.md + + - title: Face detection and analysis + linkLists: + - linkListType: concept + links: + - text: Face detection and attributes + url: concept-face-detection.md + - linkListType: quickstart + links: + - text: Detect faces using a client library SDK or REST API + url: quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp + - linkListType: how-to-guide + links: + - text: Detect and analyze faces + url: how-to/identity-detect-faces.md + - text: Specify a face detection model version + url: how-to/specify-detection-model.md + + - title: Face rectangles + linkLists: + - linkListType: how-to-guide + links: + - text: Detect and analyze faces + url: how-to/identity-detect-faces.md + - text: Use the HeadPose attribute + url: how-to/use-headpose.md + + - title: Reference + linkLists: + - linkListType: reference + links: + - text: REST API + url: /rest/api/face/ + - text: .NET SDK + url: /dotnet/api/overview/azure/cognitiveservices/face-readme + - text: Python SDK + url: /python/api/overview/azure/cognitiveservices/face-readme + - text: Java SDK + url: /java/api/overview/azure/cognitiveservices/client/faceapi + - text: Node.js SDK + url: /javascript/api/overview/azure/cognitiveservices/face + - text: Azure PowerShell + url: /powershell/module/az.cognitiveservices/#cognitive_services + - text: Azure Command-Line Interface (CLI) + url: /cli/azure/cognitiveservices + + - title: Help and feedback + linkLists: + - linkListType: reference + links: + - text: Support and help options + url: ../cognitive-services-support-options.md?context=/azure/cognitive-services/face/context/context \ No newline at end of file diff --git a/articles/cognitive-services/Computer-vision/index-image-analysis.yml b/articles/cognitive-services/Computer-vision/index-image-analysis.yml index 12d26ea5f0074..d8ee1c19a7f72 100644 --- a/articles/cognitive-services/Computer-vision/index-image-analysis.yml +++ b/articles/cognitive-services/Computer-vision/index-image-analysis.yml @@ -54,7 +54,7 @@ landingContent: - linkListType: how-to-guide links: - text: Call the Image Analysis API - url: Vision-API-How-to-Topics/HowToCallVisionAPI.md + url: how-to/call-analyze-image.md - linkListType: tutorial links: - text: Generate metadata for images diff --git a/articles/cognitive-services/Computer-vision/index-ocr.yml b/articles/cognitive-services/Computer-vision/index-ocr.yml index fbbf91c0a55ca..72c670853ba04 100644 --- a/articles/cognitive-services/Computer-vision/index-ocr.yml +++ b/articles/cognitive-services/Computer-vision/index-ocr.yml @@ -31,7 +31,7 @@ landingContent: - linkListType: how-to-guide links: - text: Call the Read API - url: Vision-API-How-to-Topics/call-read-api.md + url: how-to/call-read-api.md - text: Upgrade from Read 2.x to Read 3.x url: upgrade-api-versions.md - title: Use the Read OCR container diff --git a/articles/cognitive-services/Computer-vision/index.yml b/articles/cognitive-services/Computer-vision/index.yml index 650f5ca32a376..2690007ce84c0 100644 --- a/articles/cognitive-services/Computer-vision/index.yml +++ b/articles/cognitive-services/Computer-vision/index.yml @@ -39,7 +39,7 @@ conceptualContent: url: quickstarts-sdk/client-library.md - itemType: how-to-guide text: Call the Read API - url: Vision-API-How-to-Topics/call-read-api.md + url: how-to/call-read-api.md - itemType: deploy text: Use the Read OCR container url: computer-vision-how-to-install-containers.md @@ -75,6 +75,32 @@ conceptualContent: footerLink: text: More url: index-image-analysis.yml + - title: Face + links: + - itemType: overview + text: About Face service + url: overview-identity.md + - itemType: quickstart + text: Get started with Azure Identity + url: quickstarts-sdk/identity-client-library.md + - itemType: how-to-guide + text: Detect faces in an image + url: how-to/identity-detect-faces.md + - itemType: concept + text: Face detection + url: concept-face-detection.md + - itemType: concept + text: Face recognition + url: concept-face-recognition.md + - itemType: tutorial + text: Add users to a Face service + url: Tutorials/build-enrollment-app.md + - itemType: reference + text: Face API reference + url: /rest/api/face/ + footerLink: + text: More + url: index-identity.yml - title: Spatial Analysis links: - itemType: overview diff --git a/articles/cognitive-services/Computer-vision/intro-to-spatial-analysis-public-preview.md b/articles/cognitive-services/Computer-vision/intro-to-spatial-analysis-public-preview.md index 37b9dad3a6dea..f62c1d81e7777 100644 --- a/articles/cognitive-services/Computer-vision/intro-to-spatial-analysis-public-preview.md +++ b/articles/cognitive-services/Computer-vision/intro-to-spatial-analysis-public-preview.md @@ -19,7 +19,7 @@ You can use Computer Vision Spatial Analysis to ingest streaming video from came diff --git a/articles/cognitive-services/Computer-vision/language-support.md b/articles/cognitive-services/Computer-vision/language-support.md index 9ef229e882fbd..49be9f6ebc928 100644 --- a/articles/cognitive-services/Computer-vision/language-support.md +++ b/articles/cognitive-services/Computer-vision/language-support.md @@ -25,7 +25,7 @@ The Computer Vision [Read API](./overview-ocr.md#read-api) supports many languag > > `Read` OCR's deep-learning-based universal models extract all multi-lingual text in your documents, including text lines with mixed languages, and do not require specifying a language code. Do not provide the language code as the parameter unless you are sure about the language and want to force the service to apply only the relevant model. Otherwise, the service may return incomplete and incorrect text. -See [How to specify the `Read` model](./Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional) to use the new languages. +See [How to specify the `Read` model](./how-to/call-read-api.md#determine-how-to-process-the-data-optional) to use the new languages. ### Handwritten text @@ -161,8 +161,8 @@ Some features of the [Analyze - Image](https://westcentralus.dev.cognitive.micro |Japanese |`ja`|✅ | ✅| ✅|||||| |✅|✅| |Kazakh |`kk`| | ✅| |||||| ||| |Korean |`ko`| | ✅| |||||| ||| -|Lithuanian |`It`| | ✅| |||||| ||| -|Latvian |`Iv`| | ✅| |||||| ||| +|Lithuanian |`lt`| | ✅| |||||| ||| +|Latvian |`lv`| | ✅| |||||| ||| |Macedonian |`mk`| | ✅| |||||| ||| |Malay Malaysia |`ms`| | ✅| |||||| ||| |Norwegian (Bokmal) |`nb`| | ✅| |||||| ||| @@ -170,7 +170,8 @@ Some features of the [Analyze - Image](https://westcentralus.dev.cognitive.micro |Polish |`pl`| | ✅| |||||| ||| |Dari |`prs`| | ✅| |||||| ||| | Portuguese-Brazil|`pt-BR`| | ✅| |||||| ||| -| Portuguese-Portugal |`pt`/`pt-PT`|✅ | ✅| ✅|||||| |✅|✅| +| Portuguese-Portugal |`pt`|✅ | ✅| ✅|||||| |✅|✅| +| Portuguese-Portugal |`pt-PT`| | ✅| |||||| ||| |Romanian |`ro`| | ✅| |||||| ||| |Russian |`ru`| | ✅| |||||| ||| |Slovak |`sk`| | ✅| |||||| ||| @@ -182,5 +183,6 @@ Some features of the [Analyze - Image](https://westcentralus.dev.cognitive.micro |Turkish |`tr`| | ✅| |||||| ||| |Ukrainian |`uk`| | ✅| |||||| ||| |Vietnamese |`vi`| | ✅| |||||| ||| -|Chinese Simplified |`zh`/ `zh-Hans`|✅ | ✅| ✅|||||| |✅|✅| +|Chinese Simplified |`zh`|✅ | ✅| ✅|||||| |✅|✅| +|Chinese Simplified |`zh-Hans`| | ✅| |||||| ||| |Chinese Traditional |`zh-Hant`| | ✅| |||||| ||| diff --git a/articles/cognitive-services/Face/Images/Face.detection.jpg b/articles/cognitive-services/Computer-vision/media/Face.detection.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/Face.detection.jpg rename to articles/cognitive-services/Computer-vision/media/Face.detection.jpg diff --git a/articles/cognitive-services/Face/Images/FaceFindSimilar.Candidates.jpg b/articles/cognitive-services/Computer-vision/media/FaceFindSimilar.Candidates.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/FaceFindSimilar.Candidates.jpg rename to articles/cognitive-services/Computer-vision/media/FaceFindSimilar.Candidates.jpg diff --git a/articles/cognitive-services/Face/Images/FaceFindSimilar.QueryFace.jpg b/articles/cognitive-services/Computer-vision/media/FaceFindSimilar.QueryFace.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/FaceFindSimilar.QueryFace.jpg rename to articles/cognitive-services/Computer-vision/media/FaceFindSimilar.QueryFace.jpg diff --git a/articles/cognitive-services/Face/Images/android_getstarted1.1.PNG b/articles/cognitive-services/Computer-vision/media/android_getstarted1.1.PNG similarity index 100% rename from articles/cognitive-services/Face/Images/android_getstarted1.1.PNG rename to articles/cognitive-services/Computer-vision/media/android_getstarted1.1.PNG diff --git a/articles/cognitive-services/Face/Images/android_getstarted2.1.PNG b/articles/cognitive-services/Computer-vision/media/android_getstarted2.1.PNG similarity index 100% rename from articles/cognitive-services/Face/Images/android_getstarted2.1.PNG rename to articles/cognitive-services/Computer-vision/media/android_getstarted2.1.PNG diff --git a/articles/cognitive-services/Face/media/enrollment-app/0-welcome.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/0-welcome.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/0-welcome.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/0-welcome.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/1-consent-1.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/1-consent-1.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/1-consent-1.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/1-consent-1.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/10-manage-2.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/10-manage-2.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/10-manage-2.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/10-manage-2.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/2-consent-2.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/2-consent-2.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/2-consent-2.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/2-consent-2.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/3-signIn.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/3-signIn.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/3-signIn.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/3-signIn.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/4-instruction.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/4-instruction.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/4-instruction.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/4-instruction.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/5-enrolling.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/5-enrolling.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/5-enrolling.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/5-enrolling.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/6-enrollsuccess.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/6-enrollsuccess.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/6-enrollsuccess.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/6-enrollsuccess.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/7-receipt-1.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/7-receipt-1.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/7-receipt-1.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/7-receipt-1.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/8-receipt-2.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/8-receipt-2.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/8-receipt-2.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/8-receipt-2.jpg diff --git a/articles/cognitive-services/Face/media/enrollment-app/9-manage-1.jpg b/articles/cognitive-services/Computer-vision/media/enrollment-app/9-manage-1.jpg similarity index 100% rename from articles/cognitive-services/Face/media/enrollment-app/9-manage-1.jpg rename to articles/cognitive-services/Computer-vision/media/enrollment-app/9-manage-1.jpg diff --git a/articles/cognitive-services/Face/Images/face-detect-javascript.png b/articles/cognitive-services/Computer-vision/media/face-detect-javascript.png similarity index 100% rename from articles/cognitive-services/Face/Images/face-detect-javascript.png rename to articles/cognitive-services/Computer-vision/media/face-detect-javascript.png diff --git a/articles/cognitive-services/Face/Images/face-rectangle-result.png b/articles/cognitive-services/Computer-vision/media/face-rectangle-result.png similarity index 100% rename from articles/cognitive-services/Face/Images/face-rectangle-result.png rename to articles/cognitive-services/Computer-vision/media/face-rectangle-result.png diff --git a/articles/cognitive-services/Face/Images/face-tut-java-gradle.png b/articles/cognitive-services/Computer-vision/media/face-tut-java-gradle.png similarity index 100% rename from articles/cognitive-services/Face/Images/face-tut-java-gradle.png rename to articles/cognitive-services/Computer-vision/media/face-tut-java-gradle.png diff --git a/articles/cognitive-services/Face/Images/getting-started-cs-detected.png b/articles/cognitive-services/Computer-vision/media/getting-started-cs-detected.png similarity index 100% rename from articles/cognitive-services/Face/Images/getting-started-cs-detected.png rename to articles/cognitive-services/Computer-vision/media/getting-started-cs-detected.png diff --git a/articles/cognitive-services/Face/Images/getting-started-cs-ui.png b/articles/cognitive-services/Computer-vision/media/getting-started-cs-ui.png similarity index 100% rename from articles/cognitive-services/Face/Images/getting-started-cs-ui.png rename to articles/cognitive-services/Computer-vision/media/getting-started-cs-ui.png diff --git a/articles/cognitive-services/Face/Images/group.image.1.jpg b/articles/cognitive-services/Computer-vision/media/group.image.1.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/group.image.1.jpg rename to articles/cognitive-services/Computer-vision/media/group.image.1.jpg diff --git a/articles/cognitive-services/Face/Images/headpose.1.jpg b/articles/cognitive-services/Computer-vision/media/headpose.1.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/headpose.1.jpg rename to articles/cognitive-services/Computer-vision/media/headpose.1.jpg diff --git a/articles/cognitive-services/Face/Images/identificationResult.1.jpg b/articles/cognitive-services/Computer-vision/media/identificationResult.1.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/identificationResult.1.jpg rename to articles/cognitive-services/Computer-vision/media/identificationResult.1.jpg diff --git a/articles/cognitive-services/Face/Images/image-rotation.png b/articles/cognitive-services/Computer-vision/media/image-rotation.png similarity index 100% rename from articles/cognitive-services/Face/Images/image-rotation.png rename to articles/cognitive-services/Computer-vision/media/image-rotation.png diff --git a/articles/cognitive-services/Face/Images/labelled-faces-python.png b/articles/cognitive-services/Computer-vision/media/labelled-faces-python.png similarity index 100% rename from articles/cognitive-services/Face/Images/labelled-faces-python.png rename to articles/cognitive-services/Computer-vision/media/labelled-faces-python.png diff --git a/articles/cognitive-services/Face/Images/landmarks.1.jpg b/articles/cognitive-services/Computer-vision/media/landmarks.1.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/landmarks.1.jpg rename to articles/cognitive-services/Computer-vision/media/landmarks.1.jpg diff --git a/articles/cognitive-services/Face/media/overview/banner.png b/articles/cognitive-services/Computer-vision/media/overview/banner.png similarity index 100% rename from articles/cognitive-services/Face/media/overview/banner.png rename to articles/cognitive-services/Computer-vision/media/overview/banner.png diff --git a/articles/cognitive-services/Face/media/overview/scenarios.png b/articles/cognitive-services/Computer-vision/media/overview/scenarios.png similarity index 100% rename from articles/cognitive-services/Face/media/overview/scenarios.png rename to articles/cognitive-services/Computer-vision/media/overview/scenarios.png diff --git a/articles/cognitive-services/Face/Images/person.group.clare.jpg b/articles/cognitive-services/Computer-vision/media/person.group.clare.jpg similarity index 100% rename from articles/cognitive-services/Face/Images/person.group.clare.jpg rename to articles/cognitive-services/Computer-vision/media/person.group.clare.jpg diff --git a/articles/cognitive-services/Face/media/quickstarts/detection-1.jpg b/articles/cognitive-services/Computer-vision/media/quickstarts/detection-1.jpg similarity index 100% rename from articles/cognitive-services/Face/media/quickstarts/detection-1.jpg rename to articles/cognitive-services/Computer-vision/media/quickstarts/detection-1.jpg diff --git a/articles/cognitive-services/Face/media/quickstarts/detection-5.jpg b/articles/cognitive-services/Computer-vision/media/quickstarts/detection-5.jpg similarity index 100% rename from articles/cognitive-services/Face/media/quickstarts/detection-5.jpg rename to articles/cognitive-services/Computer-vision/media/quickstarts/detection-5.jpg diff --git a/articles/cognitive-services/Face/media/quickstarts/detection-6.jpg b/articles/cognitive-services/Computer-vision/media/quickstarts/detection-6.jpg similarity index 100% rename from articles/cognitive-services/Face/media/quickstarts/detection-6.jpg rename to articles/cognitive-services/Computer-vision/media/quickstarts/detection-6.jpg diff --git a/articles/cognitive-services/Face/media/quickstarts/family-1-dad-1.jpg b/articles/cognitive-services/Computer-vision/media/quickstarts/family-1-dad-1.jpg similarity index 100% rename from articles/cognitive-services/Face/media/quickstarts/family-1-dad-1.jpg rename to articles/cognitive-services/Computer-vision/media/quickstarts/family-1-dad-1.jpg diff --git a/articles/cognitive-services/Face/media/quickstarts/find-similar.jpg b/articles/cognitive-services/Computer-vision/media/quickstarts/find-similar.jpg similarity index 100% rename from articles/cognitive-services/Face/media/quickstarts/find-similar.jpg rename to articles/cognitive-services/Computer-vision/media/quickstarts/find-similar.jpg diff --git a/articles/cognitive-services/Face/media/quickstarts/lillian-gish.jpg b/articles/cognitive-services/Computer-vision/media/quickstarts/lillian-gish.jpg similarity index 100% rename from articles/cognitive-services/Face/media/quickstarts/lillian-gish.jpg rename to articles/cognitive-services/Computer-vision/media/quickstarts/lillian-gish.jpg diff --git a/articles/cognitive-services/Computer-vision/overview-identity.md b/articles/cognitive-services/Computer-vision/overview-identity.md new file mode 100644 index 0000000000000..f442a591f6ba7 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/overview-identity.md @@ -0,0 +1,105 @@ +--- +title: What is the Azure Face service? +titleSuffix: Azure Cognitive Services +description: The Azure Face service provides AI algorithms that you use to detect, recognize, and analyze human faces in images. +author: PatrickFarley +manager: nitinme + +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: overview +ms.date: 02/28/2022 +ms.author: pafarley +ms.custom: cog-serv-seo-aug-2020 +keywords: facial recognition, facial recognition software, facial analysis, face matching, face recognition app, face search by image, facial recognition search +#Customer intent: As the developer of an app that deals with images of humans, I want to learn what the Face service does so I can determine if I should use its features. +--- + +# What is the Azure Face service? + +> [!WARNING] +> On June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or functionality included in Azure Services, such as Face or Video Indexer, if a customer is, or is allowing use of such services by or for, a police department in the United States. When you create a new Face resource, you must acknowledge and agree in the Azure Portal that you will not use the service by or for a police department in the United States and that you have reviewed the Responsible AI documentation and will use this service in accordance with it. + +The Azure Face service provides AI algorithms that detect, recognize, and analyze human faces in images. Facial recognition software is important in many different scenarios, such as identity verification, touchless access control, and face blurring for privacy. + +This documentation contains the following types of articles: +* The [quickstarts](./quickstarts-sdk/identity-client-library.md) are step-by-step instructions that let you make calls to the service and get results in a short period of time. +* The [how-to guides](./how-to/identity-detect-faces.md) contain instructions for using the service in more specific or customized ways. +* The [conceptual articles](./concept-face-detection.md) provide in-depth explanations of the service's functionality and features. +* The [tutorials](./enrollment-overview.md) are longer guides that show you how to use this service as a component in broader business solutions. + +## Example use cases + +**Identity verification**: Verify someone's identity against a government-issued ID card like a passport or driver's license or other enrollment image. You can use this verification to grant access to digital or physical services or recover an account. Specific access scenarios include opening a new account, verifying a worker, or administering an online assessment. Identity verification can be done once when a person is onboarded, and repeated when they access a digital or physical service. + +**Touchless access control**: Compared to today’s methods like cards or tickets, opt-in face identification enables an enhanced access control experience while reducing the hygiene and security risks from card sharing, loss, or theft. Facial recognition assists the check-in process with a human in the loop for check-ins in airports, stadiums, theme parks, buildings, reception kiosks at offices, hospitals, gyms, clubs, or schools. + +**Face redaction**: Redact or blur detected faces of people recorded in a video to protect their privacy. + + +## Face detection and analysis + +Face detection is required as a first step in all the other scenarios. The Detect API detects human faces in an image and returns the rectangle coordinates of their locations. It also returns a unique ID that represents the stored face data. This is used in later operations to identify or verify faces. + +Optionally, face detection can extract a set of face-related attributes, such as head pose, age, emotion, facial hair, and glasses. These attributes are general predictions, not actual classifications. Some attributes are useful to ensure that your application is getting high-quality face data when users add themselves to a Face service. For example, your application could advise users to take off their sunglasses if they're wearing sunglasses. + +> [!NOTE] +> The face detection feature is also available through the [Computer Vision service](../computer-vision/overview.md). However, if you want to use other Face operations like Identify, Verify, Find Similar, or Face grouping, you should use this service instead. + +For more information on face detection and analysis, see the [Face detection](concept-face-detection.md) concepts article. Also see the [Detect API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) reference documentation. + + +## Identity verification + +Modern enterprises and apps can use the Face identification and Face verification operations to verify that a user is who they claim to be. + +### Identification + +Face identification can address "one-to-many" matching of one face in an image to a set of faces in a secure repository. Match candidates are returned based on how closely their face data matches the query face. This scenario is used in granting building or airport access to a certain group of people or verifying the user of a device. + +The following image shows an example of a database named `"myfriends"`. Each group can contain up to 1 million different person objects. Each person object can have up to 248 faces registered. + +![A grid with three columns for different people, each with three rows of face images](./media/person.group.clare.jpg) + +After you create and train a group, you can do identification against the group with a new detected face. If the face is identified as a person in the group, the person object is returned. + +### Verification + +The verification operation answers the question, "Do these two faces belong to the same person?". + +Verification is also a "one-to-one" matching of a face in an image to a single face from a secure repository or photo to verify that they're the same individual. Verification can be used for Identity Verification, such as a banking app that enables users to open a credit account remotely by taking a new picture of themselves and sending it with a picture of their photo ID. + +For more information about identity verification, see the [Facial recognition](concept-face-recognition.md) concepts guide or the [Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239) and [Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a) API reference documentation. + + +## Find similar faces + +The Find Similar operation does face matching between a target face and a set of candidate faces, finding a smaller set of faces that look similar to the target face. This is useful for doing a face search by image. + +The service supports two working modes, **matchPerson** and **matchFace**. The **matchPerson** mode returns similar faces after filtering for the same person by using the [Verify API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a). The **matchFace** mode ignores the same-person filter. It returns a list of similar candidate faces that may or may not belong to the same person. + +The following example shows the target face: + +![A woman smiling](./media/FaceFindSimilar.QueryFace.jpg) + +And these images are the candidate faces: + +![Five images of people smiling. Images A and B show the same person.](./media/FaceFindSimilar.Candidates.jpg) + +To find four similar faces, the **matchPerson** mode returns A and B, which show the same person as the target face. The **matchFace** mode returns A, B, C, and D, which is exactly four candidates, even if some aren't the same person as the target or have low similarity. For more information, see the [Facial recognition](concept-face-recognition.md) concepts guide or the [Find Similar API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) reference documentation. + +## Group faces + +The Group operation divides a set of unknown faces into several smaller groups based on similarity. Each group is a disjoint proper subset of the original set of faces. It also returns a single "messyGroup" array that contains the face IDs for which no similarities were found. + +All of the faces in a returned group are likely to belong to the same person, but there can be several different groups for a single person. Those groups are differentiated by another factor, such as expression, for example. For more information, see the [Facial recognition](concept-face-recognition.md) concepts guide or the [Group API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395238) reference documentation. + +## Data privacy and security + +As with all of the Cognitive Services resources, developers who use the Face service must be aware of Microsoft's policies on customer data. For more information, see the [Cognitive Services page](https://www.microsoft.com/trustcenter/cloudservices/cognitiveservices) on the Microsoft Trust Center. + +## Next steps + +Follow a quickstart to code the basic components of a face recognition app in the language of your choice. + +- [Client library quickstart](quickstarts-sdk/identity-client-library.md). diff --git a/articles/cognitive-services/Computer-vision/overview-image-analysis.md b/articles/cognitive-services/Computer-vision/overview-image-analysis.md index 311958a1134c8..a590978878096 100644 --- a/articles/cognitive-services/Computer-vision/overview-image-analysis.md +++ b/articles/cognitive-services/Computer-vision/overview-image-analysis.md @@ -25,7 +25,7 @@ You can use Image Analysis through a client library SDK or by calling the [REST This documentation contains the following types of articles: * The [quickstarts](./quickstarts-sdk/image-analysis-client-library.md) are step-by-step instructions that let you make calls to the service and get results in a short period of time. -* The [how-to guides](./Vision-API-How-to-Topics/HowToCallVisionAPI.md) contain instructions for using the service in more specific or customized ways. +* The [how-to guides](./how-to/call-analyze-image.md) contain instructions for using the service in more specific or customized ways. * The [conceptual articles](concept-tagging-images.md) provide in-depth explanations of the service's functionality and features. * The [tutorials](./tutorials/storage-lab-tutorial.md) are longer guides that show you how to use this service as a component in broader business solutions. @@ -64,7 +64,7 @@ Generate a description of an entire image in human-readable language, using comp ### Detect faces -Detect faces in an image and provide information about each detected face. Computer Vision returns the coordinates, rectangle, gender, and age for each detected face.
                  Computer Vision provides a subset of the [Face](../face/index.yml) service functionality. You can use the Face service for more detailed analysis, such as facial identification and pose detection. [Detect faces](concept-detecting-faces.md) +Detect faces in an image and provide information about each detected face. Computer Vision returns the coordinates, rectangle, gender, and age for each detected face.
                  Computer Vision provides a subset of the [Face](./index-identity.yml) service functionality. You can use the Face service for more detailed analysis, such as facial identification and pose detection. [Detect faces](concept-detecting-faces.md) ### Detect image types diff --git a/articles/cognitive-services/Computer-vision/overview-ocr.md b/articles/cognitive-services/Computer-vision/overview-ocr.md index 78e496edae566..6bec780494823 100644 --- a/articles/cognitive-services/Computer-vision/overview-ocr.md +++ b/articles/cognitive-services/Computer-vision/overview-ocr.md @@ -23,8 +23,8 @@ Optical character recognition (OCR) allows you to extract printed or handwritten This documentation contains the following types of articles: * The [quickstarts](./quickstarts-sdk/client-library.md) are step-by-step instructions that let you make calls to the service and get results in a short period of time. -* The [how-to guides](./Vision-API-How-to-Topics/call-read-api.md) contain instructions for using the service in more specific or customized ways. - ## Read API @@ -49,7 +49,7 @@ OCR for print text includes support for English, French, German, Italian, Portug OCR for handwritten text includes support for English, Chinese Simplified, French, German, Italian, Japanese, Korean, Portuguese, Spanish languages. -See [How to specify the model version](./Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional) to use the preview languages and features. Refer to the full list of [OCR-supported languages](./language-support.md#optical-character-recognition-ocr). +See [How to specify the model version](./how-to/call-read-api.md#determine-how-to-process-the-data-optional) to use the preview languages and features. Refer to the full list of [OCR-supported languages](./language-support.md#optical-character-recognition-ocr). ## Key features @@ -65,7 +65,7 @@ The Read API includes the following features. * Handwriting classification for text lines (Latin only) * Available as Distroless Docker container for on-premises deployment -Learn [how to use the OCR features](./vision-api-how-to-topics/call-read-api.md). +Learn [how to use the OCR features](./how-to/call-read-api.md). ## Use the cloud API or deploy on-premises The Read 3.x cloud APIs are the preferred option for most customers because of ease of integration and fast productivity out of the box. Azure and the Computer Vision service handle scale, performance, data security, and compliance needs while you focus on meeting your customers' needs. diff --git a/articles/cognitive-services/Computer-vision/overview.md b/articles/cognitive-services/Computer-vision/overview.md index 24f1478443d42..e241f9b0f8ac2 100644 --- a/articles/cognitive-services/Computer-vision/overview.md +++ b/articles/cognitive-services/Computer-vision/overview.md @@ -24,6 +24,7 @@ Azure's Computer Vision service gives you access to advanced algorithms that pro |---|---| | [Optical Character Recognition (OCR)](overview-ocr.md)|The Optical Character Recognition (OCR) service extracts text from images. You can use the new Read API to extract printed and handwritten text from photos and documents. It uses deep-learning-based models and works with text on a variety of surfaces and backgrounds. These include business documents, invoices, receipts, posters, business cards, letters, and whiteboards. The OCR APIs support extracting printed text in [several languages](./language-support.md). Follow the [OCR quickstart](quickstarts-sdk/client-library.md) to get started.| |[Image Analysis](overview-image-analysis.md)| The Image Analysis service extracts many visual features from images, such as objects, faces, adult content, and auto-generated text descriptions. Follow the [Image Analysis quickstart](quickstarts-sdk/image-analysis-client-library.md) to get started.| +| [Face](overview-identity.md) | The Face service provides AI algorithms that detect, recognize, and analyze human faces in images. Facial recognition software is important in many different scenarios, such as identity verification, touchless access control, and face blurring for privacy. Follow the [Face quickstart](quickstarts-sdk/identity-client-library.md) to get started. | | [Spatial Analysis](intro-to-spatial-analysis-public-preview.md)| The Spatial Analysis service analyzes the presence and movement of people on a video feed and produces events that other systems can respond to. Install the [Spatial Analysis container](spatial-analysis-container.md) to get started.| ## Computer Vision for digital asset management diff --git a/articles/cognitive-services/Computer-vision/quickstarts-sdk/identity-client-library.md b/articles/cognitive-services/Computer-vision/quickstarts-sdk/identity-client-library.md new file mode 100644 index 0000000000000..6da117cca77a5 --- /dev/null +++ b/articles/cognitive-services/Computer-vision/quickstarts-sdk/identity-client-library.md @@ -0,0 +1,43 @@ +--- +title: 'Quickstart: Use the Face client library' +titleSuffix: Azure Cognitive Services +description: The Face API offers client libraries that makes it easy to detect, find similar, identify, verify and more. +services: cognitive-services +author: PatrickFarley +manager: nitinme +zone_pivot_groups: programming-languages-set-face +ms.service: cognitive-services +ms.subservice: face-api +ms.topic: quickstart +ms.date: 09/27/2021 +ms.author: pafarley +ms.devlang: csharp, golang, javascript, python +ms.custom: devx-track-python, devx-track-csharp, cog-serv-seo-aug-2020, mode-api +keywords: face search by image, facial recognition search, facial recognition, face recognition app +--- + +# Quickstart: Use the Face client library + +::: zone pivot="programming-language-csharp" + +[!INCLUDE [C# quickstart](../includes/quickstarts-sdk/identity-csharp-sdk.md)] + +::: zone-end + +::: zone pivot="programming-language-javascript" + +[!INCLUDE [JavaScript quickstart](../includes/quickstarts-sdk/identity-javascript-sdk.md)] + +::: zone-end + +::: zone pivot="programming-language-python" + +[!INCLUDE [Python quickstart](../includes/quickstarts-sdk/identity-python-sdk.md)] + +::: zone-end + +::: zone pivot="programming-language-rest-api" + +[!INCLUDE [cURL quickstart](../includes/identity-curl-quickstart.md)] + +::: zone-end diff --git a/articles/cognitive-services/Computer-vision/toc.yml b/articles/cognitive-services/Computer-vision/toc.yml index e646b64278748..eb08bec31756c 100644 --- a/articles/cognitive-services/Computer-vision/toc.yml +++ b/articles/cognitive-services/Computer-vision/toc.yml @@ -37,7 +37,7 @@ items: - name: How-to guides items: - name: Call the Read API - href: Vision-API-How-to-Topics/call-read-api.md + href: how-to/call-read-api.md - name: Upgrade from Read 2.x to Read 3.x href: upgrade-api-versions.md - name: Use the Read container @@ -95,9 +95,9 @@ items: - name: How-to guides items: - name: Call the Image Analysis API - href: Vision-API-How-to-Topics/HowToCallVisionAPI.md + href: how-to/call-analyze-image.md - name: Analyze videos in real time - href: Vision-API-How-to-Topics/HowtoAnalyzeVideo_Vision.md + href: how-to/analyze-video.md - name: Concepts items: - name: Object detection @@ -150,6 +150,66 @@ items: href: /cli/azure/cognitiveservices#az_cognitiveservices_list - name: Azure PowerShell href: /powershell/module/azurerm.cognitiveservices/ +- name: Face + items: + - name: Face overview + href: overview-identity.md + - name: Face quickstart + href: quickstarts-sdk/identity-client-library.md + - name: Samples + href: /samples/browse/?products=azure&term=face + - name: How-to guides + items: + - name: Detect and analyze faces + href: how-to/identity-detect-faces.md + - name: Find similar faces + href: how-to/find-similar-faces.md + - name: Specify a face detection model version + href: how-to/specify-detection-model.md + - name: Specify a face recognition model version + href: how-to/specify-recognition-model.md + - name: Add faces to a group + href: how-to/add-faces.md + - name: Use the large-scale feature + href: how-to/use-large-scale.md + - name: Use the PersonDirectory structure + href: how-to/use-persondirectory.md + - name: Use the HeadPose attribute + href: how-to/use-headpose.md + - name: Mitigate latency when using the Face service + href: how-to/mitigate-latency.md + - name: Analyze videos in real time + href: how-to/identity-analyze-video.md + - name: Migrate face data + href: how-to/migrate-face-data.md + - name: Use customer-managed keys + href: identity-encrypt-data-at-rest.md + - name: Concepts + items: + - name: Face detection and analysis + href: concept-face-detection.md + - name: Face recognition + href: concept-face-recognition.md + - name: Tutorials + items: + - name: Add users to a Face service + items: + - name: Best practices for enrolling users + href: enrollment-overview.md + - name: Build a React app to enroll users + href: Tutorials/build-enrollment-app.md + - name: Reference + items: + - name: Face REST API + href: identity-api-reference.md + - name: .NET + href: /dotnet/api/overview/azure/cognitiveservices/face-readme + - name: Java + href: /java/api/overview/azure/cognitiveservices/client/faceapi + - name: Node.js + href: /javascript/api/overview/azure/cognitiveservices/face + - name: Python + href: /python/api/overview/azure/cognitiveservices/face-readme - name: Spatial Analysis items: - name: Spatial Analysis overview diff --git a/articles/cognitive-services/Computer-vision/whats-new.md b/articles/cognitive-services/Computer-vision/whats-new.md index 8d0a74714a296..4315ee12782e6 100644 --- a/articles/cognitive-services/Computer-vision/whats-new.md +++ b/articles/cognitive-services/Computer-vision/whats-new.md @@ -1,20 +1,20 @@ --- title: What's new in Computer Vision? titleSuffix: Azure Cognitive Services -description: This article contains news about Computer Vision. +description: Stay up to date on recent releases and updates to Azure Computer Vision. services: cognitive-services author: PatrickFarley manager: nitinme ms.service: cognitive-services ms.subservice: computer-vision ms.topic: overview -ms.date: 05/02/2022 +ms.date: 05/25/2022 ms.author: pafarley --- # What's new in Computer Vision -Learn what's new in the service. These items may be release notes, videos, blog posts, and other types of information. Bookmark this page to stay up to date with the service. +Learn what's new in the service. These items may be release notes, videos, blog posts, and other types of information. Bookmark this page to stay up to date with new features, enhancements, fixes, and documentation updates. ## May 2022 @@ -30,7 +30,7 @@ Computer Vision's [OCR (Read) API](overview-ocr.md) latest model with [164 suppo * Performance and latency improvements. * Available as [cloud service](overview-ocr.md#read-api) and [Docker container](computer-vision-how-to-install-containers.md). -See the [OCR how-to guide](Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional) to learn how to use the GA model. +See the [OCR how-to guide](how-to/call-read-api.md#determine-how-to-process-the-data-optional) to learn how to use the GA model. > [!div class="nextstepaction"] > [Get Started with the Read API](./quickstarts-sdk/client-library.md) @@ -46,11 +46,14 @@ Computer Vision's [OCR (Read) API](overview-ocr.md) expands [supported languages * Enhancements including better support for extracting handwritten dates, amounts, names, and single character boxes. * General performance and AI quality improvements -See the [OCR how-to guide](Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional) to learn how to use the new preview features. +See the [OCR how-to guide](how-to/call-read-api.md#determine-how-to-process-the-data-optional) to learn how to use the new preview features. > [!div class="nextstepaction"] > [Get Started with the Read API](./quickstarts-sdk/client-library.md) +### New Quality Attribute in Detection_01 and Detection_03 +* To help system builders and their customers capture high quality images which are necessary for high quality outputs from Face API, we’re introducing a new quality attribute **QualityForRecognition** to help decide whether an image is of sufficient quality to attempt face recognition. The value is an informal rating of low, medium, or high. The new attribute is only available when using any combinations of detection models `detection_01` or `detection_03`, and recognition models `recognition_03` or `recognition_04`. Only "high" quality images are recommended for person enrollment and quality above "medium" is recommended for identification scenarios. To learn more about the new quality attribute, see [Face detection and attributes](concept-face-detection.md) and see how to use it with [QuickStart](./quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp&tabs=visual-studio). + ## September 2021 @@ -63,7 +66,7 @@ Computer Vision's [OCR (Read) API](overview-ocr.md) expands [supported languages * Enhancements for processing digital PDFs and Machine Readable Zone (MRZ) text in identity documents. * General performance and AI quality improvements -See the [OCR how-to guide](Vision-API-How-to-Topics/call-read-api.md#determine-how-to-process-the-data-optional) to learn how to use the new preview features. +See the [OCR how-to guide](how-to/call-read-api.md#determine-how-to-process-the-data-optional) to learn how to use the new preview features. > [!div class="nextstepaction"] > [Get Started with the Read API](./quickstarts-sdk/client-library.md) @@ -74,6 +77,13 @@ See the [OCR how-to guide](Vision-API-How-to-Topics/call-read-api.md#determine-h The [latest version (v3.2)](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f200) of the Image tagger now supports tags in 50 languages. See the [language support](language-support.md) page for more information. +## July 2021 + +### New HeadPose and Landmarks improvements for Detection_03 + +* The Detection_03 model has been updated to support facial landmarks. +* The landmarks feature in Detection_03 is much more precise, especially in the eyeball landmarks which are crucial for gaze tracking. + ## May 2021 ### Spatial Analysis container update @@ -92,14 +102,19 @@ A new version of the [Spatial Analysis container](spatial-analysis-container.md) The Computer Vision API v3.2 is now generally available with the following updates: -* Improved image tagging model: analyzes visual content and generates relevant tags based on objects, actions, and content displayed in the image. This model is available through the [Tag Image API](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f200). See the Image Analysis [how-to guide](./vision-api-how-to-topics/howtocallvisionapi.md) and [overview](./overview-image-analysis.md) to learn more. -* Updated content moderation model: detects presence of adult content and provides flags to filter images containing adult, racy, and gory visual content. This model is available through the [Analyze API](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b). See the Image Analysis [how-to guide](./vision-api-how-to-topics/howtocallvisionapi.md) and [overview](./overview-image-analysis.md) to learn more. +* Improved image tagging model: analyzes visual content and generates relevant tags based on objects, actions, and content displayed in the image. This model is available through the [Tag Image API](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f200). See the Image Analysis [how-to guide](./how-to/call-analyze-image.md) and [overview](./overview-image-analysis.md) to learn more. +* Updated content moderation model: detects presence of adult content and provides flags to filter images containing adult, racy, and gory visual content. This model is available through the [Analyze API](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/56f91f2e778daf14a499f21b). See the Image Analysis [how-to guide](./how-to/call-analyze-image.md) and [overview](./overview-image-analysis.md) to learn more. * [OCR (Read) available for 73 languages](./language-support.md#optical-character-recognition-ocr) including Simplified and Traditional Chinese, Japanese, Korean, and Latin languages. * [OCR (Read)](./overview-ocr.md) also available as a [Distroless container](./computer-vision-how-to-install-containers.md?tabs=version-3-2) for on-premise deployment. > [!div class="nextstepaction"] > [See Computer Vision v3.2 GA](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/5d986960601faab4bf452005) +### PersonDirectory data structure + +* In order to perform face recognition operations such as Identify and Find Similar, Face API customers need to create an assorted list of **Person** objects. The new **PersonDirectory** is a data structure that contains unique IDs, optional name strings, and optional user metadata strings for each **Person** identity added to the directory. Currently, the Face API offers the **LargePersonGroup** structure which has similar functionality but is limited to 1 million identities. The **PersonDirectory** structure can scale up to 75 million identities. +* Another major difference between **PersonDirectory** and previous data structures is that you'll no longer need to make any Train calls after adding faces to a **Person** object—the update process happens automatically. For more details see [Use the PersonDirectory structure](how-to/use-persondirectory.md). + ## March 2021 ### Computer Vision 3.2 Public Preview update @@ -121,11 +136,19 @@ The Computer Vision Read API v3.2 public preview, available as cloud service and * Extract text only for selected pages for a multi-page document. * Available as a [Distroless container](./computer-vision-how-to-install-containers.md?tabs=version-3-2) for on-premise deployment. -See the [Read API how-to guide](Vision-API-How-to-Topics/call-read-api.md) to learn more. +See the [Read API how-to guide](how-to/call-read-api.md) to learn more. > [!div class="nextstepaction"] > [Use the Read API v3.2 Public Preview](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-2/operations/5d986960601faab4bf452005) + +### New Face API detection model +* The new Detection 03 model is the most accurate detection model currently available. If you're a new a customer, we recommend using this model. Detection 03 improves both recall and precision on smaller faces found within images (64x64 pixels). Additional improvements include an overall reduction in false positives and improved detection on rotated face orientations. Combining Detection 03 with the new Recognition 04 model will provide improved recognition accuracy as well. See [Specify a face detection model](./how-to/specify-detection-model.md) for more details. +### New detectable Face attributes +* The `faceMask` attribute is available with the latest Detection 03 model, along with the additional attribute `"noseAndMouthCovered"` which detects whether the face mask is worn as intended, covering both the nose and mouth. To use the latest mask detection capability, users need to specify the detection model in the API request: assign the model version with the _detectionModel_ parameter to `detection_03`. See [Specify a face detection model](./how-to/specify-detection-model.md) for more details. +### New Face API Recognition Model +* The new Recognition 04 model is the most accurate recognition model currently available. If you're a new customer, we recommend using this model for verification and identification. It improves upon the accuracy of Recognition 03, including improved recognition for users wearing face covers (surgical masks, N95 masks, cloth masks). Note that we recommend against enrolling images of users wearing face covers as this will lower recognition quality. Now customers can build safe and seamless user experiences that detect whether a user is wearing a face cover with the latest Detection 03 model, and recognize them with the latest Recognition 04 model. See [Specify a face recognition model](./how-to/specify-recognition-model.md) for more details. + ## January 2021 ### Spatial Analysis container update @@ -142,6 +165,17 @@ A new version of the [Spatial Analysis container](spatial-analysis-container.md) * Added support for auto recalibration (by default disabled) via the `enable_recalibration` parameter, please refer to [Spatial Analysis operations](./spatial-analysis-operations.md) for details * Camera calibration parameters to the `DETECTOR_NODE_CONFIG`. Refer to [Spatial Analysis operations](./spatial-analysis-operations.md) for details. +### Mitigate latency +* The Face team published a new article detailing potential causes of latency when using the service and possible mitigation strategies. See [Mitigate latency when using the Face service](./how-to/mitigate-latency.md). + +## December 2020 +### Customer configuration for Face ID storage +* While the Face Service does not store customer images, the extracted face feature(s) will be stored on server. The Face ID is an identifier of the face feature and will be used in [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). The stored face features will expire and be deleted 24 hours after the original detection call. Customers can now determine the length of time these Face IDs are cached. The maximum value is still up to 24 hours, but a minimum value of 60 seconds can now be set. The new time ranges for Face IDs being cached is any value between 60 seconds and 24 hours. More details can be found in the [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API reference (the *faceIdTimeToLive* parameter). + +## November 2020 +### Sample Face enrollment app +* The team published a sample Face enrollment app to demonstrate best practices for establishing meaningful consent and creating high-accuracy face recognition systems through high-quality enrollments. The open-source sample can be found in the [Build an enrollment app](Tutorials/build-enrollment-app.md) guide and on [GitHub](https://github.com/Azure-Samples/cognitive-services-FaceAPIEnrollmentSample), ready for developers to deploy or customize. + ## October 2020 ### Computer Vision API v3.1 GA @@ -164,11 +198,15 @@ The Computer Vision Read API v3.1 public preview adds these capabilities: * This preview version of the Read API supports English, Dutch, French, German, Italian, Japanese, Portuguese, Simplified Chinese, and Spanish languages. -See the [Read API how-to guide](Vision-API-How-to-Topics/call-read-api.md) to learn more. +See the [Read API how-to guide](how-to/call-read-api.md) to learn more. > [!div class="nextstepaction"] > [Learn more about Read API v3.1 Public Preview 2](https://westus2.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-1-preview-2/operations/5d986960601faab4bf452005) +## August 2020 +### Customer-managed encryption of data at rest +* The Face service automatically encrypts your data when persisting it to the cloud. The Face service encryption protects your data to help you meet your organizational security and compliance commitments. By default, your subscription uses Microsoft-managed encryption keys. There is also a new option to manage your subscription with your own keys called customer-managed keys (CMK). More details can be found at [Customer-managed keys](./identity-encrypt-data-at-rest.md). + ## July 2020 ### Read API v3.1 Public Preview with OCR for Simplified Chinese @@ -177,7 +215,7 @@ The Computer Vision Read API v3.1 public preview adds support for Simplified Chi * This preview version of the Read API supports English, Dutch, French, German, Italian, Portuguese, Simplified Chinese, and Spanish languages. -See the [Read API how-to guide](Vision-API-How-to-Topics/call-read-api.md) to learn more. +See the [Read API how-to guide](how-to/call-read-api.md) to learn more. > [!div class="nextstepaction"] > [Learn more about Read API v3.1 Public Preview 1](https://westus.dev.cognitive.microsoft.com/docs/services/computer-vision-v3-1-preview-1/operations/5d986960601faab4bf452005) @@ -193,6 +231,10 @@ Computer Vision API v3.0 entered General Availability, with updates to the Read See the [OCR overview](overview-ocr.md) to learn more. +## April 2020 +### New Face API Recognition Model +* The new recognition 03 model is the most accurate model currently available. If you're a new customer, we recommend using this model. Recognition 03 will provide improved accuracy for both similarity comparisons and person-matching comparisons. More details can be found at [Specify a face recognition model](./how-to/specify-recognition-model.md). + ## March 2020 * TLS 1.2 is now enforced for all HTTP requests to this service. For more information, see [Azure Cognitive Services security](../cognitive-services-security.md). @@ -210,6 +252,87 @@ You now can use version 3.0 of the Read API to extract printed or handwritten te Follow an [Extract text quickstart](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/dotnet/ComputerVision/REST/CSharp-hand-text.md?tabs=version-3) to get starting using the 3.0 API. + +## June 2019 + +### New Face API detection model +* The new Detection 02 model features improved accuracy on small, side-view, occluded, and blurry faces. Use it through [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250), [LargeFaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3), [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) and [LargePersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42) by specifying the new face detection model name `detection_02` in `detectionModel` parameter. More details in [How to specify a detection model](how-to/specify-detection-model.md). + +## April 2019 + +### Improved attribute accuracy +* Improved overall accuracy of the `age` and `headPose` attributes. The `headPose` attribute is also updated with the `pitch` value enabled now. Use these attributes by specifying them in the `returnFaceAttributes` parameter of [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. +### Improved processing speeds +* Improved speeds of [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250), [LargeFaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3), [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) and [LargePersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42) operations. + +## March 2019 + +### New Face API recognition model +* The Recognition 02 model has improved accuracy. Use it through [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524b), [LargeFaceList - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc), [PersonGroup - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244) and [LargePersonGroup - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d) by specifying the new face recognition model name `recognition_02` in `recognitionModel` parameter. More details in [How to specify a recognition model](how-to/specify-recognition-model.md). + +## January 2019 + +### Face Snapshot feature +* This feature allows the service to support data migration across subscriptions: [Snapshot](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/snapshot-get). More details in [How to Migrate your face data to a different Face subscription](how-to/migrate-face-data.md). + +## October 2018 + +### API messages +* Refined description for `status`, `createdDateTime`, `lastActionDateTime`, and `lastSuccessfulTrainingDateTime` in [PersonGroup - Get Training Status](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395247), [LargePersonGroup - Get Training Status](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599ae32c6ac60f11b48b5aa5), and [LargeFaceList - Get Training Status](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a1582f8d2de3616c086f2cf). + +## May 2018 + +### Improved attribute accuracy +* Improved `gender` attribute significantly and also improved `age`, `glasses`, `facialHair`, `hair`, `makeup` attributes. Use them through [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. +### Increased file size limit +* Increased input image file size limit from 4 MB to 6 MB in [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250), [LargeFaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3), [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) and [LargePersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42). + +## March 2018 + +### New data structure +* [LargeFaceList](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc) and [LargePersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d). More details in [How to use the large-scale feature](how-to/use-large-scale.md). +* Increased [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239) `maxNumOfCandidatesReturned` parameter from [1, 5] to [1, 100] and default to 10. + +## May 2017 + +### New detectable Face attributes +* Added `hair`, `makeup`, `accessory`, `occlusion`, `blur`, `exposure`, and `noise` attributes in [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. +* Supported 10K persons in a PersonGroup and [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239). +* Supported pagination in [PersonGroup Person - List](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395241) with optional parameters: `start` and `top`. +* Supported concurrency in adding/deleting faces against different FaceLists and different persons in PersonGroup. + +## March 2017 + +### New detectable Face attribute +* Added `emotion` attribute in [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. +### Fixed issues +* Face could not be re-detected with rectangle returned from [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) as `targetFace` in [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250) and [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). +* The detectable face size is set to ensure it is strictly between 36x36 to 4096x4096 pixels. + +## November 2016 +### New subscription tier +* Added Face Storage Standard subscription to store additional persisted faces when using [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) or [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250) for identification or similarity matching. The stored images are charged at $0.5 per 1000 faces and this rate is prorated on a daily basis. Free tier subscriptions continue to be limited to 1,000 total persons. + +## October 2016 +### API messages +* Changed the error message of more than one face in the `targetFace` from 'There are more than one face in the image' to 'There is more than one face in the image' in [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250) and [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). + +## July 2016 +### New features +* Supported Face to Person object authentication in [Face - Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a). +* Added optional `mode` parameter enabling selection of two working modes: `matchPerson` and `matchFace` in [Face - Find Similar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) and default is `matchPerson`. +* Added optional `confidenceThreshold` parameter for user to set the threshold of whether one face belongs to a Person object in [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239). +* Added optional `start` and `top` parameters in [PersonGroup - List](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395248) to enable user to specify the start point and the total PersonGroups number to list. + +## V1.0 changes from V0 + +* Updated service root endpoint from ```https://westus.api.cognitive.microsoft.com/face/v0/``` to ```https://westus.api.cognitive.microsoft.com/face/v1.0/```. Changes applied to: + [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Find Similar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) and [Face - Group](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395238). +* Updated the minimal detectable face size to 36x36 pixels. Faces smaller than 36x36 pixels will not be detected. +* Deprecated the PersonGroup and Person data in Face V0. Those data cannot be accessed with the Face V1.0 service. +* Deprecated the V0 endpoint of Face API on June 30, 2016. + + ## Cognitive Service updates [Azure update announcements for Cognitive Services](https://azure.microsoft.com/updates/?product=cognitive-services) diff --git a/articles/cognitive-services/Custom-Vision-Service/faq.yml b/articles/cognitive-services/Custom-Vision-Service/faq.yml index 2e5dc7d0c1f84..ef3cd71c01d43 100644 --- a/articles/cognitive-services/Custom-Vision-Service/faq.yml +++ b/articles/cognitive-services/Custom-Vision-Service/faq.yml @@ -49,7 +49,7 @@ sections: - question: | Once I've trained a Custom Vision model, can I manage/deploy the same model to different regions? answer: | - We don't have a use case for publishing a model to a different region, but we do offer the ability to export/import a project into different regions. See [Copy and back up Custom Vision projects](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/copy-move-projects). + We don't have a use case for publishing a model to a different region, but we do offer the ability to export/import a project into different regions. See [Copy and back up Custom Vision projects](./copy-move-projects.md). - question: | What is the difference between Custom Vision and AutoML? answer: | @@ -64,12 +64,12 @@ sections: - question: | What is the difference between the free and standard pricing tiers? answer: | - See the [Limits and quotas](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/limits-and-quotas) page. + See the [Limits and quotas](./limits-and-quotas.md) page. - question: | How can users be added to a Cognitive Services multi-service account to collaborate on a Custom Vision project in the web portal? answer: | - You can use Azure RBAC roles to give specific users access to collaborate on a custom vision portal project. See the [Role-based access control docs](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/role-based-access-control) + You can use Azure RBAC roles to give specific users access to collaborate on a custom vision portal project. See the [Role-based access control docs](./role-based-access-control.md) - question: | Can training images be exported with the tags that were added in the Custom Vision portal? answer: | @@ -88,5 +88,4 @@ sections: - question: | How can I write logs on this service? answer: | - Use [Diagnostic logging](https://docs.microsoft.com/azure/cognitive-services/diagnostic-logging). - + Use [Diagnostic logging](../diagnostic-logging.md). \ No newline at end of file diff --git a/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md b/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md index 7070b6322f948..867e745bd44c1 100644 --- a/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md +++ b/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md @@ -26,6 +26,7 @@ The process to enable Customer-Managed Keys with Azure Key Vault for Cognitive S * [Language Understanding service encryption of data at rest](../LUIS/encrypt-data-at-rest.md) * [QnA Maker encryption of data at rest](../QnAMaker/encrypt-data-at-rest.md) * [Translator encryption of data at rest](../translator/encrypt-data-at-rest.md) +* [Language service encryption of data at rest](../language-service/concepts/encryption-data-at-rest.md) ## Speech @@ -39,4 +40,4 @@ The process to enable Customer-Managed Keys with Azure Key Vault for Cognitive S ## Next steps * [What is Azure Key Vault](../../key-vault/general/overview.md)? -* [Cognitive Services Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk) \ No newline at end of file +* [Cognitive Services Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoAnalyzeVideo_Face.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoAnalyzeVideo_Face.md deleted file mode 100644 index a42a917644805..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoAnalyzeVideo_Face.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: "Example: Real-time video analysis - Face" -titleSuffix: Azure Cognitive Services -description: Use the Face service to perform near-real-time analysis on frames taken from a live video stream. -services: cognitive-services -author: SteveMSFT -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: how-to -ms.date: 03/01/2018 -ms.author: sbowles -ms.devlang: csharp -ms.custom: devx-track-csharp ---- - -# Example: How to Analyze Videos in Real-time - -This guide will demonstrate how to perform near-real-time analysis on frames taken from a live video stream. The basic components in such a system are: - -- Acquire frames from a video source -- Select which frames to analyze -- Submit these frames to the API -- Consume each analysis result that is returned from the API call - -These samples are written in C# and the code can be found on GitHub here: [https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/). - -## The Approach - -There are multiple ways to solve the problem of running near-real-time analysis on video streams. We will start by outlining three approaches in increasing levels of sophistication. - -### A Simple Approach - -The simplest design for a near-real-time analysis system is an infinite loop, where each iteration grabs a frame, analyzes it, and then consumes the result: - -```csharp -while (true) -{ - Frame f = GrabFrame(); - if (ShouldAnalyze(f)) - { - AnalysisResult r = await Analyze(f); - ConsumeResult(r); - } -} -``` - -If our analysis consisted of a lightweight client-side algorithm, this approach would be suitable. However, when analysis happens in the cloud, the latency involved means that an API call might take several seconds. During this time, we are not capturing images, and our thread is essentially doing nothing. Our maximum frame-rate is limited by the latency of the API calls. - -### Parallelizing API Calls - -While a simple single-threaded loop makes sense for a lightweight client-side algorithm, it doesn't fit well with the latency involved in cloud API calls. The solution to this problem is to allow the long-running API calls to execute in parallel with the frame-grabbing. In C#, we could achieve this using Task-based parallelism, for example: - -```csharp -while (true) -{ - Frame f = GrabFrame(); - if (ShouldAnalyze(f)) - { - var t = Task.Run(async () => - { - AnalysisResult r = await Analyze(f); - ConsumeResult(r); - } - } -} -``` - -This code launches each analysis in a separate Task, which can run in the background while we continue grabbing new frames. With this method we avoid blocking the main thread while waiting for an API call to return, but we have lost some of the guarantees that the simple version provided. Multiple API calls might occur in parallel, and the results might get returned in the wrong order. This could also cause multiple threads to enter the ConsumeResult() function simultaneously, which could be dangerous, if the function is not thread-safe. Finally, this simple code does not keep track of the Tasks that get created, so exceptions will silently disappear. Therefore, the final step is to add a "consumer" thread that will track the analysis tasks, raise exceptions, kill long-running tasks, and ensure that the results get consumed in the correct order. - -### A Producer-Consumer Design - -In our final "producer-consumer" system, we have a producer thread that looks similar to our previous infinite loop. However, instead of consuming analysis results as soon as they are available, the producer simply puts the tasks into a queue to keep track of them. - -```csharp -// Queue that will contain the API call tasks. -var taskQueue = new BlockingCollection>(); - -// Producer thread. -while (true) -{ - // Grab a frame. - Frame f = GrabFrame(); - - // Decide whether to analyze the frame. - if (ShouldAnalyze(f)) - { - // Start a task that will run in parallel with this thread. - var analysisTask = Task.Run(async () => - { - // Put the frame, and the result/exception into a wrapper object. - var output = new ResultWrapper(f); - try - { - output.Analysis = await Analyze(f); - } - catch (Exception e) - { - output.Exception = e; - } - return output; - } - - // Push the task onto the queue. - taskQueue.Add(analysisTask); - } -} -``` - -We also have a consumer thread that takes tasks off the queue, waits for them to finish, and either displays the result or raises the exception that was thrown. By using the queue, we can guarantee that results get consumed one at a time, in the correct order, without limiting the maximum frame-rate of the system. - -```csharp -// Consumer thread. -while (true) -{ - // Get the oldest task. - Task analysisTask = taskQueue.Take(); - - // Await until the task is completed. - var output = await analysisTask; - - // Consume the exception or result. - if (output.Exception != null) - { - throw output.Exception; - } - else - { - ConsumeResult(output.Analysis); - } -} -``` - -## Implementing the Solution - -### Getting Started - -To get your app up and running as quickly as possible, you will use a flexible implementation of the system described above. To access the code, go to [https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis). - -The library contains the class FrameGrabber, which implements the producer-consumer system discussed above to process video frames from a webcam. The user can specify the exact form of the API call, and the class uses events to let the calling code know when a new frame is acquired or a new analysis result is available. - -To illustrate some of the possibilities, there are two sample apps that use the library. The first is a simple console app, and a simplified version of it is reproduced below. It grabs frames from the default webcam, and submits them to the Face service for face detection. - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/analyze.cs"::: - -The second sample app is a bit more interesting, and allows you to choose which API to call on the video frames. On the left-hand side, the app shows a preview of the live video, on the right-hand side it shows the most recent API result overlaid on the corresponding frame. - -In most modes, there will be a visible delay between the live video on the left, and the visualized analysis on the right. This delay is the time taken to make the API call. One exception is the "EmotionsWithClientFaceDetect" mode, which performs face detection locally on the client computer using OpenCV, before submitting any images to Cognitive Services. This way, we can visualize the detected face immediately and then update the emotions once the API call returns. This is an example of a "hybrid" approach, where the client can perform some simple processing, and Cognitive Services APIs can augment this with more advanced analysis when necessary. - -![HowToAnalyzeVideo](../../Video/Images/FramebyFrame.jpg) - -### Integrating into your codebase - -To get started with this sample, follow these steps: - -1. Create an [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you already have one, you can skip to the next step. -2. Create resources for Computer Vision and Face in the Azure portal to get your key and endpoint. Make sure to select the free tier (F0) during setup. - - [Computer Vision](https://portal.azure.com/#create/Microsoft.CognitiveServicesComputerVision) - - [Face](https://portal.azure.com/#create/Microsoft.CognitiveServicesFace) - After the resources are deployed, click **Go to resource** to collect your key and endpoint for each resource. -3. Clone the [Cognitive-Samples-VideoFrameAnalysis](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/) GitHub repo. -4. Open the sample in Visual Studio, and build and run the sample applications: - - For BasicConsoleSample, the Face key is hard-coded directly in [BasicConsoleSample/Program.cs](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/blob/master/Windows/BasicConsoleSample/Program.cs). - - For LiveCameraSample, the keys should be entered into the Settings pane of the app. They will be persisted across sessions as user data. - - -When you're ready to integrate, **reference the VideoFrameAnalyzer library from your own projects.** - -## Summary - -In this guide, you learned how to run near-real-time analysis on live video streams using the Face, Computer Vision, and Emotion APIs, and how to use our sample code to get started. - -Feel free to provide feedback and suggestions in the [GitHub repository](https://github.com/Microsoft/Cognitive-Samples-VideoFrameAnalysis/) or, for broader API feedback, on our [UserVoice](https://feedback.azure.com/d365community/forum/09041fae-0b25-ec11-b6e6-000d3a4f0858) site. - -## Related Topics -- [Call the detect API](HowtoDetectFacesinImage.md) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoDetectFacesinImage.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoDetectFacesinImage.md deleted file mode 100644 index 06aa48cbd63f1..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/HowtoDetectFacesinImage.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "Call the Detect API - Face" -titleSuffix: Azure Cognitive Services -description: This guide demonstrates how to use face detection to extract attributes like age, emotion, or head pose from a given image. -services: cognitive-services -author: PatrickFarley -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: how-to -ms.date: 08/04/2021 -ms.author: pafarley -ms.devlang: csharp -ms.custom: devx-track-csharp ---- - -# Call the Detect API - -This guide demonstrates how to use the face detection API to extract attributes like age, emotion, or head pose from a given image. You'll learn the different ways to configure the behavior of this API to meet your needs. - -The code snippets in this guide are written in C# by using the Azure Cognitive Services Face client library. The same functionality is available through the [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). - - -## Setup - -This guide assumes that you already constructed a [FaceClient](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceclient) object, named `faceClient`, with a Face key and endpoint URL. For instructions on how to set up this feature, follow one of the quickstarts. - -## Submit data to the service - -To find faces and get their locations in an image, call the [DetectWithUrlAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithurlasync) or [DetectWithStreamAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithstreamasync) method. **DetectWithUrlAsync** takes a URL string as input, and **DetectWithStreamAsync** takes the raw byte stream of an image as input. - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="basic1"::: - -You can query the returned [DetectedFace](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.detectedface) objects for their unique IDs and a rectangle that gives the pixel coordinates of the face. This way, you can tell which face ID maps to which face in the original image. - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="basic2"::: - -For information on how to parse the location and dimensions of the face, see [FaceRectangle](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.facerectangle). Usually, this rectangle contains the eyes, eyebrows, nose, and mouth. The top of head, ears, and chin aren't necessarily included. To use the face rectangle to crop a complete head or get a mid-shot portrait, you should expand the rectangle in each direction. - -## Determine how to process the data - -This guide focuses on the specifics of the Detect call, such as what arguments you can pass and what you can do with the returned data. We recommend that you query for only the features you need. Each operation takes more time to complete. - -### Get face landmarks - -[Face landmarks](../concepts/face-detection.md#face-landmarks) are a set of easy-to-find points on a face, such as the pupils or the tip of the nose. To get face landmark data, set the _detectionModel_ parameter to `DetectionModel.Detection01` and the _returnFaceLandmarks_ parameter to `true`. - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="landmarks1"::: - -### Get face attributes - -Besides face rectangles and landmarks, the face detection API can analyze several conceptual attributes of a face. For a full list, see the [Face attributes](../concepts/face-detection.md#attributes) conceptual section. - -To analyze face attributes, set the _detectionModel_ parameter to `DetectionModel.Detection01` and the _returnFaceAttributes_ parameter to a list of [FaceAttributeType Enum](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.faceattributetype) values. - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="attributes1"::: - - -## Get results from the service - -### Face landmark results - -The following code demonstrates how you might retrieve the locations of the nose and pupils: - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="landmarks2"::: - -You also can use face landmark data to accurately calculate the direction of the face. For example, you can define the rotation of the face as a vector from the center of the mouth to the center of the eyes. The following code calculates this vector: - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="direction"::: - -When you know the direction of the face, you can rotate the rectangular face frame to align it more properly. To crop faces in an image, you can programmatically rotate the image so the faces always appear upright. - - -### Face attribute results - -The following code shows how you might retrieve the face attribute data that you requested in the original call. - -:::code language="csharp" source="~/cognitive-services-quickstart-code/dotnet/Face/sdk/detect.cs" id="attributes2"::: - -To learn more about each of the attributes, see the [Face detection and attributes](../concepts/face-detection.md) conceptual guide. - -## Next steps - -In this guide, you learned how to use the various functionalities of face detection and analysis. Next, integrate these features into an app to add face data from users. - -- [Tutorial: Add users to a Face service](../enrollment-overview.md) - -## Related articles - -- [Reference documentation (REST)](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) -- [Reference documentation (.NET SDK)](/dotnet/api/overview/azure/cognitiveservices/face-readme) \ No newline at end of file diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-migrate-face-data.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-migrate-face-data.md deleted file mode 100644 index 8b3abeca1a7b9..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-migrate-face-data.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -title: "Migrate your face data across subscriptions - Face" -titleSuffix: Azure Cognitive Services -description: This guide shows you how to migrate your stored face data from one Face subscription to another. -services: cognitive-services -author: nitinme -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: how-to -ms.date: 02/22/2021 -ms.author: nitinme -ms.devlang: csharp -ms.custom: [devx-track-csharp, cogserv-non-critical-vision] ---- - -# Migrate your face data to a different Face subscription - -This guide shows you how to move face data, such as a saved PersonGroup object with faces, to a different Azure Cognitive Services Face subscription. To move the data, you use the Snapshot feature. This way you avoid having to repeatedly build and train a PersonGroup or FaceList object when you move or expand your operations. For example, perhaps you created a PersonGroup object with a free subscription and now want to migrate it to your paid subscription. Or you might need to sync face data across subscriptions in different regions for a large enterprise operation. - -This same migration strategy also applies to LargePersonGroup and LargeFaceList objects. If you aren't familiar with the concepts in this guide, see their definitions in the [Face recognition concepts](../concepts/face-recognition.md) guide. This guide uses the Face .NET client library with C#. - -> [!WARNING] -> The Snapshot feature might move your data outside the geographic region you originally selected. Data might move to West US, West Europe, and Southeast Asia regions. - -## Prerequisites - -You need the following items: - -- Two Face keys, one with the existing data and one to migrate to. To subscribe to the Face service and get your key, follow the instructions in [Create a Cognitive Services account](../../cognitive-services-apis-create-account.md). -- The Face subscription ID string that corresponds to the target subscription. To find it, select **Overview** in the Azure portal. -- Any edition of [Visual Studio 2015 or 2017](https://www.visualstudio.com/downloads/). - -## Create the Visual Studio project - -This guide uses a simple console app to run the face data migration. For a full implementation, see the [Face snapshot sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceApiSnapshotSample/FaceApiSnapshotSample) on GitHub. - -1. In Visual Studio, create a new Console app .NET Framework project. Name it **FaceApiSnapshotSample**. -1. Get the required NuGet packages. Right-click your project in the Solution Explorer, and select **Manage NuGet Packages**. Select the **Browse** tab, and select **Include prerelease**. Find and install the following package: - - [Microsoft.Azure.CognitiveServices.Vision.Face 2.3.0-preview](https://www.nuget.org/packages/Microsoft.Azure.CognitiveServices.Vision.Face/2.2.0-preview) - -## Create face clients - -In the **Main** method in *Program.cs*, create two [FaceClient](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceclient) instances for your source and target subscriptions. This example uses a Face subscription in the East Asia region as the source and a West US subscription as the target. This example demonstrates how to migrate data from one Azure region to another. - -[!INCLUDE [subdomains-note](../../../../includes/cognitive-services-custom-subdomains-note.md)] - -```csharp -var FaceClientEastAsia = new FaceClient(new ApiKeyServiceClientCredentials("")) - { - Endpoint = "https://southeastasia.api.cognitive.microsoft.com/>" - }; - -var FaceClientWestUS = new FaceClient(new ApiKeyServiceClientCredentials("")) - { - Endpoint = "https://westus.api.cognitive.microsoft.com/" - }; -``` - -Fill in the key values and endpoint URLs for your source and target subscriptions. - - -## Prepare a PersonGroup for migration - -You need the ID of the PersonGroup in your source subscription to migrate it to the target subscription. Use the [PersonGroupOperationsExtensions.ListAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.persongroupoperationsextensions.listasync) method to retrieve a list of your PersonGroup objects. Then get the [PersonGroup.PersonGroupId](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.models.persongroup.persongroupid#Microsoft_Azure_CognitiveServices_Vision_Face_Models_PersonGroup_PersonGroupId) property. This process looks different based on what PersonGroup objects you have. In this guide, the source PersonGroup ID is stored in `personGroupId`. - -> [!NOTE] -> The [sample code](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceApiSnapshotSample/FaceApiSnapshotSample) creates and trains a new PersonGroup to migrate. In most cases, you should already have a PersonGroup to use. - -## Take a snapshot of a PersonGroup - -A snapshot is temporary remote storage for certain Face data types. It functions as a kind of clipboard to copy data from one subscription to another. First, you take a snapshot of the data in the source subscription. Then you apply it to a new data object in the target subscription. - -Use the source subscription's FaceClient instance to take a snapshot of the PersonGroup. Use [TakeAsync](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.snapshotoperationsextensions.takeasync) with the PersonGroup ID and the target subscription's ID. If you have multiple target subscriptions, add them as array entries in the third parameter. - -```csharp -var takeSnapshotResult = await FaceClientEastAsia.Snapshot.TakeAsync( - SnapshotObjectType.PersonGroup, - personGroupId, - new[] { "" /* Put other IDs here, if multiple target subscriptions wanted */ }); -``` - -> [!NOTE] -> The process of taking and applying snapshots doesn't disrupt any regular calls to the source or target PersonGroups or FaceLists. Don't make simultaneous calls that change the source object, such as [FaceList management calls](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.facelistoperations) or the [PersonGroup Train](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.persongroupoperations) call, for example. The snapshot operation might run before or after those operations or might encounter errors. - -## Retrieve the snapshot ID - -The method used to take snapshots is asynchronous, so you must wait for its completion. Snapshot operations can't be canceled. In this code, the `WaitForOperation` method monitors the asynchronous call. It checks the status every 100 ms. After the operation finishes, retrieve an operation ID by parsing the `OperationLocation` field. - -```csharp -var takeOperationId = Guid.Parse(takeSnapshotResult.OperationLocation.Split('/')[2]); -var operationStatus = await WaitForOperation(FaceClientEastAsia, takeOperationId); -``` - -A typical `OperationLocation` value looks like this: - -```csharp -"/operations/a63a3bdd-a1db-4d05-87b8-dbad6850062a" -``` - -The `WaitForOperation` helper method is here: - -```csharp -/// -/// Waits for the take/apply operation to complete and returns the final operation status. -/// -/// The final operation status. -private static async Task WaitForOperation(IFaceClient client, Guid operationId) -{ - OperationStatus operationStatus = null; - do - { - if (operationStatus != null) - { - Thread.Sleep(TimeSpan.FromMilliseconds(100)); - } - - // Get the status of the operation. - operationStatus = await client.Snapshot.GetOperationStatusAsync(operationId); - - Console.WriteLine($"Operation Status: {operationStatus.Status}"); - } - while (operationStatus.Status != OperationStatusType.Succeeded - && operationStatus.Status != OperationStatusType.Failed); - - return operationStatus; -} -``` - -After the operation status shows `Succeeded`, get the snapshot ID by parsing the `ResourceLocation` field of the returned OperationStatus instance. - -```csharp -var snapshotId = Guid.Parse(operationStatus.ResourceLocation.Split('/')[2]); -``` - -A typical `resourceLocation` value looks like this: - -```csharp -"/snapshots/e58b3f08-1e8b-4165-81df-aa9858f233dc" -``` - -## Apply a snapshot to a target subscription - -Next, create the new PersonGroup in the target subscription by using a randomly generated ID. Then use the target subscription's FaceClient instance to apply the snapshot to this PersonGroup. Pass in the snapshot ID and the new PersonGroup ID. - -```csharp -var newPersonGroupId = Guid.NewGuid().ToString(); -var applySnapshotResult = await FaceClientWestUS.Snapshot.ApplyAsync(snapshotId, newPersonGroupId); -``` - - -> [!NOTE] -> A Snapshot object is valid for only 48 hours. Only take a snapshot if you intend to use it for data migration soon after. - -A snapshot apply request returns another operation ID. To get this ID, parse the `OperationLocation` field of the returned applySnapshotResult instance. - -```csharp -var applyOperationId = Guid.Parse(applySnapshotResult.OperationLocation.Split('/')[2]); -``` - -The snapshot application process is also asynchronous, so again use `WaitForOperation` to wait for it to finish. - -```csharp -operationStatus = await WaitForOperation(FaceClientWestUS, applyOperationId); -``` - -## Test the data migration - -After you apply the snapshot, the new PersonGroup in the target subscription populates with the original face data. By default, training results are also copied. The new PersonGroup is ready for face identification calls without needing retraining. - -To test the data migration, run the following operations and compare the results they print to the console: - -```csharp -await DisplayPersonGroup(FaceClientEastAsia, personGroupId); -await IdentifyInPersonGroup(FaceClientEastAsia, personGroupId); - -await DisplayPersonGroup(FaceClientWestUS, newPersonGroupId); -// No need to retrain the PersonGroup before identification, -// training results are copied by snapshot as well. -await IdentifyInPersonGroup(FaceClientWestUS, newPersonGroupId); -``` - -Use the following helper methods: - -```csharp -private static async Task DisplayPersonGroup(IFaceClient client, string personGroupId) -{ - var personGroup = await client.PersonGroup.GetAsync(personGroupId); - Console.WriteLine("PersonGroup:"); - Console.WriteLine(JsonConvert.SerializeObject(personGroup)); - - // List persons. - var persons = await client.PersonGroupPerson.ListAsync(personGroupId); - - foreach (var person in persons) - { - Console.WriteLine(JsonConvert.SerializeObject(person)); - } - - Console.WriteLine(); -} -``` - -```csharp -private static async Task IdentifyInPersonGroup(IFaceClient client, string personGroupId) -{ - using (var fileStream = new FileStream("data\\PersonGroup\\Daughter\\Daughter1.jpg", FileMode.Open, FileAccess.Read)) - { - var detectedFaces = await client.Face.DetectWithStreamAsync(fileStream); - - var result = await client.Face.IdentifyAsync(detectedFaces.Select(face => face.FaceId.Value).ToList(), personGroupId); - Console.WriteLine("Test identify against PersonGroup"); - Console.WriteLine(JsonConvert.SerializeObject(result)); - Console.WriteLine(); - } -} -``` - -Now you can use the new PersonGroup in the target subscription. - -To update the target PersonGroup again in the future, create a new PersonGroup to receive the snapshot. To do this, follow the steps in this guide. A single PersonGroup object can have a snapshot applied to it only one time. - -## Clean up resources - -After you finish migrating face data, manually delete the snapshot object. - -```csharp -await FaceClientEastAsia.Snapshot.DeleteAsync(snapshotId); -``` - -## Next steps - -Next, see the relevant API reference documentation, explore a sample app that uses the Snapshot feature, or follow a how-to guide to start using the other API operations mentioned here: - -- [Snapshot reference documentation (.NET SDK)](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.snapshotoperations) -- [Face snapshot sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceApiSnapshotSample/FaceApiSnapshotSample) -- [Add faces](how-to-add-faces.md) -- [Call the detect API](HowtoDetectFacesinImage.md) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-mitigate-latency.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-mitigate-latency.md deleted file mode 100644 index 5326f20beb5d7..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-mitigate-latency.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: How to mitigate latency when using the Face service -titleSuffix: Azure Cognitive Services -description: Learn how to mitigate latency when using the Face service. -services: cognitive-services -author: PatrickFarley -manager: nitinme -ms.service: cognitive-services -ms.topic: how-to -ms.date: 1/5/2021 -ms.author: pafarley -ms.devlang: csharp -ms.custom: cogserv-non-critical-vision ---- - -# How to: mitigate latency when using the Face service - -You may encounter latency when using the Face service. Latency refers to any kind of delay that occurs when communicating over a network. In general, possible causes of latency include: -- The physical distance each packet must travel from source to destination. -- Problems with the transmission medium. -- Errors in routers or switches along the transmission path. -- The time required by antivirus applications, firewalls, and other security mechanisms to inspect packets. -- Malfunctions in client or server applications. - -This topic talks about possible causes of latency specific to using the Azure Cognitive Services, and how you can mitigate these causes. - -> [!NOTE] -> Azure Cognitive Services do not provide any Service Level Agreement (SLA) regarding latency. - -## Possible causes of latency - -### Slow connection between the Cognitive Service and a remote URL - -Some Azure Cognitive Services provide methods that obtain data from a remote URL that you provide. For example, when you call the [DetectWithUrlAsync method](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithurlasync#Microsoft_Azure_CognitiveServices_Vision_Face_FaceOperationsExtensions_DetectWithUrlAsync_Microsoft_Azure_CognitiveServices_Vision_Face_IFaceOperations_System_String_System_Nullable_System_Boolean__System_Nullable_System_Boolean__System_Collections_Generic_IList_System_Nullable_Microsoft_Azure_CognitiveServices_Vision_Face_Models_FaceAttributeType___System_String_System_Nullable_System_Boolean__System_String_System_Threading_CancellationToken_) of the Face service, you can specify the URL of an image in which the service tries to detect faces. - -```csharp -var faces = await client.Face.DetectWithUrlAsync("https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"); -``` - -The Face service must then download the image from the remote server. If the connection from the Face service to the remote server is slow, that will impact the response time of the Detect method. - -To mitigate this, consider [storing the image in Azure Premium Blob Storage](../../../storage/blobs/storage-upload-process-images.md?tabs=dotnet). For example: - -``` csharp -var faces = await client.Face.DetectWithUrlAsync("https://csdx.blob.core.windows.net/resources/Face/Images/Family1-Daughter1.jpg"); -``` - -### Large upload size - -Some Azure Cognitive Services provide methods that obtain data from a file that you upload. For example, when you call the [DetectWithStreamAsync method](/dotnet/api/microsoft.azure.cognitiveservices.vision.face.faceoperationsextensions.detectwithstreamasync#Microsoft_Azure_CognitiveServices_Vision_Face_FaceOperationsExtensions_DetectWithStreamAsync_Microsoft_Azure_CognitiveServices_Vision_Face_IFaceOperations_System_IO_Stream_System_Nullable_System_Boolean__System_Nullable_System_Boolean__System_Collections_Generic_IList_System_Nullable_Microsoft_Azure_CognitiveServices_Vision_Face_Models_FaceAttributeType___System_String_System_Nullable_System_Boolean__System_String_System_Threading_CancellationToken_) of the Face service, you can upload an image in which the service tries to detect faces. - -```csharp -using FileStream fs = File.OpenRead(@"C:\images\face.jpg"); -System.Collections.Generic.IList faces = await client.Face.DetectWithStreamAsync(fs, detectionModel: DetectionModel.Detection02); -``` - -If the file to upload is large, that will impact the response time of the `DetectWithStreamAsync` method, for the following reasons: -- It takes longer to upload the file. -- It takes the service longer to process the file, in proportion to the file size. - -Mitigations: -- Consider [storing the image in Azure Premium Blob Storage](../../../storage/blobs/storage-upload-process-images.md?tabs=dotnet). For example: -``` csharp -var faces = await client.Face.DetectWithUrlAsync("https://csdx.blob.core.windows.net/resources/Face/Images/Family1-Daughter1.jpg"); -``` -- Consider uploading a smaller file. - - See the guidelines regarding [input data for face detection](../concepts/face-detection.md#input-data) and [input data for face recognition](../concepts/face-recognition.md#input-data). - - For face detection, when using detection model `DetectionModel.Detection01`, reducing the image file size will increase processing speed. When using detection model `DetectionModel.Detection02`, reducing the image file size will only increase processing speed if the image file is smaller than 1920x1080. - - For face recognition, reducing the face size to 200x200 pixels does not affect the accuracy of the recognition model. - - The performance of the `DetectWithUrlAsync` and `DetectWithStreamAsync` methods also depends on how many faces are in an image. The Face service can return up to 100 faces for an image. Faces are ranked by face rectangle size from large to small. - - If you need to call multiple service methods, consider calling them in parallel if your application design allows for it. For example, if you need to detect faces in two images to perform a face comparison: -```csharp -var faces_1 = client.Face.DetectWithUrlAsync("https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"); -var faces_2 = client.Face.DetectWithUrlAsync("https://www.biography.com/.image/t_share/MTQ1NDY3OTIxMzExNzM3NjE3/john-f-kennedy---debating-richard-nixon.jpg"); -Task.WaitAll (new Task>[] { faces_1, faces_2 }); -IEnumerable results = faces_1.Result.Concat (faces_2.Result); -``` - -### Slow connection between your compute resource and the Face service - -If your computer has a slow connection to the Face service, that will impact the response time of service methods. - -Mitigations: -- When you create your Face subscription, make sure to choose the region closest to where your application is hosted. -- If you need to call multiple service methods, consider calling them in parallel if your application design allows for it. See the previous section for an example. -- If longer latencies impact the user experience, choose a timeout threshold (e.g. maximum 5s) before retrying the API call. - -## Next steps - -In this guide, you learned how to mitigate latency when using the Face service. Next, learn how to scale up from existing PersonGroup and FaceList objects to LargePersonGroup and LargeFaceList objects, respectively. - -> [!div class="nextstepaction"] -> [Example: Use the large-scale feature](how-to-use-large-scale.md) - -## Related topics - -- [Reference documentation (REST)](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) -- [Reference documentation (.NET SDK)](/dotnet/api/overview/azure/cognitiveservices/face-readme) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-headpose.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-headpose.md deleted file mode 100644 index bab89525169e6..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-headpose.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Use the HeadPose attribute -titleSuffix: Azure Cognitive Services -description: Learn how to use the HeadPose attribute to automatically rotate the face rectangle or detect head gestures in a video feed. -author: PatrickFarley -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: how-to -ms.date: 02/23/2021 -ms.author: pafarley -ms.devlang: csharp -ms.custom: devx-track-csharp ---- - -# Use the HeadPose attribute - -In this guide, you'll see how you can use the HeadPose attribute of a detected face to enable some key scenarios. - -## Rotate the face rectangle - -The face rectangle, returned with every detected face, marks the location and size of the face in the image. By default, the rectangle is always aligned with the image (its sides are vertical and horizontal); this can be inefficient for framing angled faces. In situations where you want to programmatically crop faces in an image, it's better to be able to rotate the rectangle to crop. - -The [Cognitive Services Face WPF (Windows Presentation Foundation)](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/Cognitive-Services-Face-WPF) sample app uses the HeadPose attribute to rotate its detected face rectangles. - -### Explore the sample code - -You can programmatically rotate the face rectangle by using the HeadPose attribute. If you specify this attribute when detecting faces (see [Call the detect API](HowtoDetectFacesinImage.md)), you will be able to query it later. The following method from the [Cognitive Services Face WPF](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/Cognitive-Services-Face-WPF) app takes a list of **DetectedFace** objects and returns a list of **[Face](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/blob/master/app-samples/Cognitive-Services-Face-WPF/Sample-WPF/Controls/Face.cs)** objects. **Face** here is a custom class that stores face data, including the updated rectangle coordinates. New values are calculated for **top**, **left**, **width**, and **height**, and a new field **FaceAngle** specifies the rotation. - -```csharp -/// -/// Calculate the rendering face rectangle -/// -/// Detected face from service -/// Image rendering size -/// Image width and height -/// Face structure for rendering -public static IEnumerable CalculateFaceRectangleForRendering(IList faces, int maxSize, Tuple imageInfo) -{ - var imageWidth = imageInfo.Item1; - var imageHeight = imageInfo.Item2; - var ratio = (float)imageWidth / imageHeight; - int uiWidth = 0; - int uiHeight = 0; - if (ratio > 1.0) - { - uiWidth = maxSize; - uiHeight = (int)(maxSize / ratio); - } - else - { - uiHeight = maxSize; - uiWidth = (int)(ratio * uiHeight); - } - - var uiXOffset = (maxSize - uiWidth) / 2; - var uiYOffset = (maxSize - uiHeight) / 2; - var scale = (float)uiWidth / imageWidth; - - foreach (var face in faces) - { - var left = (int)(face.FaceRectangle.Left * scale + uiXOffset); - var top = (int)(face.FaceRectangle.Top * scale + uiYOffset); - - // Angle of face rectangles, default value is 0 (not rotated). - double faceAngle = 0; - - // If head pose attributes have been obtained, re-calculate the left & top (X & Y) positions. - if (face.FaceAttributes?.HeadPose != null) - { - // Head pose's roll value acts directly as the face angle. - faceAngle = face.FaceAttributes.HeadPose.Roll; - var angleToPi = Math.Abs((faceAngle / 180) * Math.PI); - - // _____ | / \ | - // |____| => |/ /| - // | \ / | - // Re-calculate the face rectangle's left & top (X & Y) positions. - var newLeft = face.FaceRectangle.Left + - face.FaceRectangle.Width / 2 - - (face.FaceRectangle.Width * Math.Sin(angleToPi) + face.FaceRectangle.Height * Math.Cos(angleToPi)) / 2; - - var newTop = face.FaceRectangle.Top + - face.FaceRectangle.Height / 2 - - (face.FaceRectangle.Height * Math.Sin(angleToPi) + face.FaceRectangle.Width * Math.Cos(angleToPi)) / 2; - - left = (int)(newLeft * scale + uiXOffset); - top = (int)(newTop * scale + uiYOffset); - } - - yield return new Face() - { - FaceId = face.FaceId?.ToString(), - Left = left, - Top = top, - OriginalLeft = (int)(face.FaceRectangle.Left * scale + uiXOffset), - OriginalTop = (int)(face.FaceRectangle.Top * scale + uiYOffset), - Height = (int)(face.FaceRectangle.Height * scale), - Width = (int)(face.FaceRectangle.Width * scale), - FaceAngle = faceAngle, - }; - } -} -``` - -### Display the updated rectangle - -From here, you can use the returned **Face** objects in your display. The following lines from [FaceDetectionPage.xaml](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/blob/master/app-samples/Cognitive-Services-Face-WPF/Sample-WPF/Controls/FaceDetectionPage.xaml) show how the new rectangle is rendered from this data: - -```xaml - - - - - - - -``` - -## Detect head gestures - -You can detect head gestures like nodding and head shaking by tracking HeadPose changes in real time. You can use this feature as a custom liveness detector. - -Liveness detection is the task of determining that a subject is a real person and not an image or video representation. A head gesture detector could serve as one way to help verify liveness, especially as opposed to an image representation of a person. - -> [!CAUTION] -> To detect head gestures in real time, you'll need to call the Face API at a high rate (more than once per second). If you have a free-tier (f0) subscription, this will not be possible. If you have a paid-tier subscription, make sure you've calculated the costs of making rapid API calls for head gesture detection. - -See the [Face HeadPose Sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/FaceAPIHeadPoseSample) on GitHub for a working example of head gesture detection. - -## Next steps - -See the [Cognitive Services Face WPF](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples/Cognitive-Services-Face-WPF) app on GitHub for a working example of rotated face rectangles. Or, see the [Face HeadPose Sample](https://github.com/Azure-Samples/cognitive-services-dotnet-sdk-samples/tree/master/app-samples) app, which tracks the HeadPose attribute in real time to detect head movements. \ No newline at end of file diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-large-scale.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-large-scale.md deleted file mode 100644 index 1298dda8ec116..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/how-to-use-large-scale.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: "Example: Use the Large-Scale feature - Face" -titleSuffix: Azure Cognitive Services -description: This guide is an article on how to scale up from existing PersonGroup and FaceList objects to LargePersonGroup and LargeFaceList objects. -services: cognitive-services -author: SteveMSFT -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: how-to -ms.date: 05/01/2019 -ms.author: sbowles -ms.devlang: csharp -ms.custom: devx-track-csharp ---- - -# Example: Use the large-scale feature - -This guide is an advanced article on how to scale up from existing PersonGroup and FaceList objects to LargePersonGroup and LargeFaceList objects, respectively. This guide demonstrates the migration process. It assumes a basic familiarity with PersonGroup and FaceList objects, the [Train](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599ae2d16ac60f11b48b5aa4) operation, and the face recognition functions. To learn more about these subjects, see the [face recognition](../concepts/face-recognition.md) conceptual guide. - -LargePersonGroup and LargeFaceList are collectively referred to as large-scale operations. LargePersonGroup can contain up to 1 million persons, each with a maximum of 248 faces. LargeFaceList can contain up to 1 million faces. The large-scale operations are similar to the conventional PersonGroup and FaceList but have some differences because of the new architecture. - -The samples are written in C# by using the Azure Cognitive Services Face client library. - -> [!NOTE] -> To enable Face search performance for Identification and FindSimilar in large scale, introduce a Train operation to preprocess the LargeFaceList and LargePersonGroup. The training time varies from seconds to about half an hour based on the actual capacity. During the training period, it's possible to perform Identification and FindSimilar if a successful training operating was done before. The drawback is that the new added persons and faces don't appear in the result until a new post migration to large-scale training is completed. - -## Step 1: Initialize the client object - -When you use the Face client library, the key and subscription endpoint are passed in through the constructor of the FaceClient class. For example: - -```csharp -string SubscriptionKey = ""; -// Use your own subscription endpoint corresponding to the key. -string SubscriptionEndpoint = "https://westus.api.cognitive.microsoft.com"; -private readonly IFaceClient faceClient = new FaceClient( - new ApiKeyServiceClientCredentials(subscriptionKey), - new System.Net.Http.DelegatingHandler[] { }); -faceClient.Endpoint = SubscriptionEndpoint -``` - -To get the key with its corresponding endpoint, go to the Azure Marketplace from the Azure portal. -For more information, see [Subscriptions](https://azure.microsoft.com/services/cognitive-services/directory/vision/). - -## Step 2: Code migration - -This section focuses on how to migrate PersonGroup or FaceList implementation to LargePersonGroup or LargeFaceList. Although LargePersonGroup or LargeFaceList differs from PersonGroup or FaceList in design and internal implementation, the API interfaces are similar for backward compatibility. - -Data migration isn't supported. You re-create the LargePersonGroup or LargeFaceList instead. - -### Migrate a PersonGroup to a LargePersonGroup - -Migration from a PersonGroup to a LargePersonGroup is simple. They share exactly the same group-level operations. - -For PersonGroup- or person-related implementation, it's necessary to change only the API paths or SDK class/module to LargePersonGroup and LargePersonGroup Person. - -Add all of the faces and persons from the PersonGroup to the new LargePersonGroup. For more information, see [Add faces](how-to-add-faces.md). - -### Migrate a FaceList to a LargeFaceList - -| FaceList APIs | LargeFaceList APIs | -|:---:|:---:| -| Create | Create | -| Delete | Delete | -| Get | Get | -| List | List | -| Update | Update | -| - | Train | -| - | Get Training Status | - -The preceding table is a comparison of list-level operations between FaceList and LargeFaceList. As is shown, LargeFaceList comes with new operations, Train and Get Training Status, when compared with FaceList. Training the LargeFaceList is a precondition of the -[FindSimilar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) operation. Training isn't required for FaceList. The following snippet is a helper function to wait for the training of a LargeFaceList: - -```csharp -/// -/// Helper function to train LargeFaceList and wait for finish. -/// -/// -/// The time interval can be adjusted considering the following factors: -/// - The training time which depends on the capacity of the LargeFaceList. -/// - The acceptable latency for getting the training status. -/// - The call frequency and cost. -/// -/// Estimated training time for LargeFaceList in different scale: -/// - 1,000 faces cost about 1 to 2 seconds. -/// - 10,000 faces cost about 5 to 10 seconds. -/// - 100,000 faces cost about 1 to 2 minutes. -/// - 1,000,000 faces cost about 10 to 30 minutes. -/// -/// The Id of the LargeFaceList for training. -/// The time interval for getting training status in milliseconds. -/// A task of waiting for LargeFaceList training finish. -private static async Task TrainLargeFaceList( - string largeFaceListId, - int timeIntervalInMilliseconds = 1000) -{ - // Trigger a train call. - await FaceClient.LargeTrainLargeFaceListAsync(largeFaceListId); - - // Wait for training finish. - while (true) - { - Task.Delay(timeIntervalInMilliseconds).Wait(); - var status = await faceClient.LargeFaceList.TrainAsync(largeFaceListId); - - if (status.Status == Status.Running) - { - continue; - } - else if (status.Status == Status.Succeeded) - { - break; - } - else - { - throw new Exception("The train operation is failed!"); - } - } -} -``` - -Previously, a typical use of FaceList with added faces and FindSimilar looked like the following: - -```csharp -// Create a FaceList. -const string FaceListId = "myfacelistid_001"; -const string FaceListName = "MyFaceListDisplayName"; -const string ImageDir = @"/path/to/FaceList/images"; -faceClient.FaceList.CreateAsync(FaceListId, FaceListName).Wait(); - -// Add Faces to the FaceList. -Parallel.ForEach( - Directory.GetFiles(ImageDir, "*.jpg"), - async imagePath => - { - using (Stream stream = File.OpenRead(imagePath)) - { - await faceClient.FaceList.AddFaceFromStreamAsync(FaceListId, stream); - } - }); - -// Perform FindSimilar. -const string QueryImagePath = @"/path/to/query/image"; -var results = new List(); -using (Stream stream = File.OpenRead(QueryImagePath)) -{ - var faces = faceClient.Face.DetectWithStreamAsync(stream).Result; - foreach (var face in faces) - { - results.Add(await faceClient.Face.FindSimilarAsync(face.FaceId, FaceListId, 20)); - } -} -``` - -When migrating it to LargeFaceList, it becomes the following: - -```csharp -// Create a LargeFaceList. -const string LargeFaceListId = "mylargefacelistid_001"; -const string LargeFaceListName = "MyLargeFaceListDisplayName"; -const string ImageDir = @"/path/to/FaceList/images"; -faceClient.LargeFaceList.CreateAsync(LargeFaceListId, LargeFaceListName).Wait(); - -// Add Faces to the LargeFaceList. -Parallel.ForEach( - Directory.GetFiles(ImageDir, "*.jpg"), - async imagePath => - { - using (Stream stream = File.OpenRead(imagePath)) - { - await faceClient.LargeFaceList.AddFaceFromStreamAsync(LargeFaceListId, stream); - } - }); - -// Train() is newly added operation for LargeFaceList. -// Must call it before FindSimilarAsync() to ensure the newly added faces searchable. -await TrainLargeFaceList(LargeFaceListId); - -// Perform FindSimilar. -const string QueryImagePath = @"/path/to/query/image"; -var results = new List(); -using (Stream stream = File.OpenRead(QueryImagePath)) -{ - var faces = faceClient.Face.DetectWithStreamAsync(stream).Result; - foreach (var face in faces) - { - results.Add(await faceClient.Face.FindSimilarAsync(face.FaceId, largeFaceListId: LargeFaceListId)); - } -} -``` - -As previously shown, the data management and the FindSimilar part are almost the same. The only exception is that a fresh preprocessing Train operation must complete in the LargeFaceList before FindSimilar works. - -## Step 3: Train suggestions - -Although the Train operation speeds up [FindSimilar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) -and [Identification](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), the training time suffers, especially when coming to large scale. The estimated training time in different scales is listed in the following table. - -| Scale for faces or persons | Estimated training time | -|:---:|:---:| -| 1,000 | 1-2 sec | -| 10,000 | 5-10 sec | -| 100,000 | 1-2 min | -| 1,000,000 | 10-30 min | - -To better utilize the large-scale feature, we recommend the following strategies. - -### Step 3.1: Customize time interval - -As is shown in `TrainLargeFaceList()`, there's a time interval in milliseconds to delay the infinite training status checking process. For LargeFaceList with more faces, using a larger interval reduces the call counts and cost. Customize the time interval according to the expected capacity of the LargeFaceList. - -The same strategy also applies to LargePersonGroup. For example, when you train a LargePersonGroup with 1 million persons, `timeIntervalInMilliseconds` might be 60,000, which is a 1-minute interval. - -### Step 3.2: Small-scale buffer - -Persons or faces in a LargePersonGroup or a LargeFaceList are searchable only after being trained. In a dynamic scenario, new persons or faces are constantly added and must be immediately searchable, yet training might take longer than desired. - -To mitigate this problem, use an extra small-scale LargePersonGroup or LargeFaceList as a buffer only for the newly added entries. This buffer takes a shorter time to train because of the smaller size. The immediate search capability on this temporary buffer should work. Use this buffer in combination with training on the master LargePersonGroup or LargeFaceList by running the master training on a sparser interval. Examples are in the middle of the night and daily. - -An example workflow: - -1. Create a master LargePersonGroup or LargeFaceList, which is the master collection. Create a buffer LargePersonGroup or LargeFaceList, which is the buffer collection. The buffer collection is only for newly added persons or faces. -1. Add new persons or faces to both the master collection and the buffer collection. -1. Only train the buffer collection with a short time interval to ensure that the newly added entries take effect. -1. Call Identification or FindSimilar against both the master collection and the buffer collection. Merge the results. -1. When the buffer collection size increases to a threshold or at a system idle time, create a new buffer collection. Trigger the Train operation on the master collection. -1. Delete the old buffer collection after the Train operation finishes on the master collection. - -### Step 3.3: Standalone training - -If a relatively long latency is acceptable, it isn't necessary to trigger the Train operation right after you add new data. Instead, the Train operation can be split from the main logic and triggered regularly. This strategy is suitable for dynamic scenarios with acceptable latency. It can be applied to static scenarios to further reduce the Train frequency. - -Suppose there's a `TrainLargePersonGroup` function similar to `TrainLargeFaceList`. A typical implementation of the standalone training on a LargePersonGroup by invoking the [`Timer`](/dotnet/api/system.timers.timer) class in `System.Timers` is: - -```csharp -private static void Main() -{ - // Create a LargePersonGroup. - const string LargePersonGroupId = "mylargepersongroupid_001"; - const string LargePersonGroupName = "MyLargePersonGroupDisplayName"; - faceClient.LargePersonGroup.CreateAsync(LargePersonGroupId, LargePersonGroupName).Wait(); - - // Set up standalone training at regular intervals. - const int TimeIntervalForStatus = 1000 * 60; // 1-minute interval for getting training status. - const double TimeIntervalForTrain = 1000 * 60 * 60; // 1-hour interval for training. - var trainTimer = new Timer(TimeIntervalForTrain); - trainTimer.Elapsed += (sender, args) => TrainTimerOnElapsed(LargePersonGroupId, TimeIntervalForStatus); - trainTimer.AutoReset = true; - trainTimer.Enabled = true; - - // Other operations like creating persons, adding faces, and identification, except for Train. - // ... -} - -private static void TrainTimerOnElapsed(string largePersonGroupId, int timeIntervalInMilliseconds) -{ - TrainLargePersonGroup(largePersonGroupId, timeIntervalInMilliseconds).Wait(); -} -``` - -For more information about data management and identification-related implementations, see [Add faces](how-to-add-faces.md). - -## Summary - -In this guide, you learned how to migrate the existing PersonGroup or FaceList code, not data, to the LargePersonGroup or LargeFaceList: - -- LargePersonGroup and LargeFaceList work similar to PersonGroup or FaceList, except that the Train operation is required by LargeFaceList. -- Take the proper Train strategy to dynamic data update for large-scale data sets. - -## Next steps - -Follow a how-to guide to learn how to add faces to a PersonGroup or write a script to do the Identify operation on a PersonGroup. - -- [Add faces](how-to-add-faces.md) -- [Face client library quickstart](../Quickstarts/client-libraries.md) diff --git a/articles/cognitive-services/Face/Face-API-How-to-Topics/specify-recognition-model.md b/articles/cognitive-services/Face/Face-API-How-to-Topics/specify-recognition-model.md deleted file mode 100644 index 94bb8e1a2b0f2..0000000000000 --- a/articles/cognitive-services/Face/Face-API-How-to-Topics/specify-recognition-model.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: How to specify a recognition model - Face -titleSuffix: Azure Cognitive Services -description: This article will show you how to choose which recognition model to use with your Azure Face application. -services: cognitive-services -author: longli0 -manager: nitinme -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: how-to -ms.date: 03/05/2021 -ms.author: longl -ms.devlang: csharp -ms.custom: devx-track-csharp ---- - -# Specify a face recognition model - -This guide shows you how to specify a face recognition model for face detection, identification and similarity search using the Azure Face service. - -The Face service uses machine learning models to perform operations on human faces in images. We continue to improve the accuracy of our models based on customer feedback and advances in research, and we deliver these improvements as model updates. Developers have the option to specify which version of the face recognition model they'd like to use; they can choose the model that best fits their use case. - -The Azure Face service has four recognition models available. The models _recognition_01_ (published 2017), _recognition_02_ (published 2019), and _recognition_03_ (published 2020) are continually supported to ensure backwards compatibility for customers using FaceLists or **PersonGroup**s created with these models. A **FaceList** or **Persongroup** will always use the recognition model it was created with, and new faces will become associated with this model when they are added. This cannot be changed after creation and customers will need to use the corresponding recognition model with the corresponding **FaceList** or **PersonGroup**. - -You can move to later recognition models at your own convenience; however, you will need to create new FaceLists and PersonGroups with the recognition model of your choice. - -The _recognition_04_ model (published 2021) is the most accurate model currently available. If you're a new customer, we recommend using this model. _Recognition_04_ will provide improved accuracy for both similarity comparisons and person-matching comparisons. _Recognition_04_ improves recognition for enrolled users wearing face covers (surgical masks, N95 masks, cloth masks). Now you can build safe and seamless user experiences that use the latest _detection_03_ model to detect whether an enrolled user is wearing a face cover, and then recognize who they are with the latest _recognition_04_ model. Note that each model operates independently of the others, and a confidence threshold set for one model is not meant to be compared across the other recognition models. - -Read on to learn how to specify a selected model in different Face operations while avoiding model conflicts. If you are an advanced user and would like to determine whether you should switch to the latest model, skip to the [Evaluate different models](#evaluate-different-models) section to evaluate the new model and compare results using your current data set. - - -## Prerequisites - -You should be familiar with the concepts of AI face detection and identification. If you aren't, see these guides first: - -* [Face detection concepts](../concepts/face-detection.md) -* [Face recognition concepts](../concepts/face-recognition.md) -* [Call the detect API](HowtoDetectFacesinImage.md) - -## Detect faces with specified model - -Face detection identifies the visual landmarks of human faces and finds their bounding-box locations. It also extracts the face's features and stores them for use in identification. All of this information forms the representation of one face. - -The recognition model is used when the face features are extracted, so you can specify a model version when performing the Detect operation. - -When using the [Face - Detect] API, assign the model version with the `recognitionModel` parameter. The available values are: -* recognition_01 -* recognition_02 -* recognition_03 -* recognition_04 - - -Optionally, you can specify the _returnRecognitionModel_ parameter (default **false**) to indicate whether _recognitionModel_ should be returned in response. So, a request URL for the [Face - Detect] REST API will look like this: - -`https://westus.api.cognitive.microsoft.com/face/v1.0/detect[?returnFaceId][&returnFaceLandmarks][&returnFaceAttributes][&recognitionModel][&returnRecognitionModel]&subscription-key=` - -If you are using the client library, you can assign the value for `recognitionModel` by passing a string representing the version. If you leave it unassigned, a default model version of `recognition_01` will be used. See the following code example for the .NET client library. - -```csharp -string imageUrl = "https://news.microsoft.com/ceo/assets/photos/06_web.jpg"; -var faces = await faceClient.Face.DetectWithUrlAsync(imageUrl, true, true, recognitionModel: "recognition_01", returnRecognitionModel: true); -``` - -## Identify faces with specified model - -The Face service can extract face data from an image and associate it with a **Person** object (through the [Add face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) API call, for example), and multiple **Person** objects can be stored together in a **PersonGroup**. Then, a new face can be compared against a **PersonGroup** (with the [Face - Identify] call), and the matching person within that group can be identified. - -A **PersonGroup** should have one unique recognition model for all of the **Person**s, and you can specify this using the `recognitionModel` parameter when you create the group ([PersonGroup - Create] or [LargePersonGroup - Create]). If you do not specify this parameter, the original `recognition_01` model is used. A group will always use the recognition model it was created with, and new faces will become associated with this model when they are added to it; this cannot be changed after a group's creation. To see what model a **PersonGroup** is configured with, use the [PersonGroup - Get] API with the _returnRecognitionModel_ parameter set as **true**. - -See the following code example for the .NET client library. - -```csharp -// Create an empty PersonGroup with "recognition_04" model -string personGroupId = "mypersongroupid"; -await faceClient.PersonGroup.CreateAsync(personGroupId, "My Person Group Name", recognitionModel: "recognition_04"); -``` - -In this code, a **PersonGroup** with ID `mypersongroupid` is created, and it is set up to use the _recognition_04_ model to extract face features. - -Correspondingly, you need to specify which model to use when detecting faces to compare against this **PersonGroup** (through the [Face - Detect] API). The model you use should always be consistent with the **PersonGroup**'s configuration; otherwise, the operation will fail due to incompatible models. - -There is no change in the [Face - Identify] API; you only need to specify the model version in detection. - -## Find similar faces with specified model - -You can also specify a recognition model for similarity search. You can assign the model version with `recognitionModel` when creating the **FaceList** with [FaceList - Create] API or [LargeFaceList - Create]. If you do not specify this parameter, the `recognition_01` model is used by default. A **FaceList** will always use the recognition model it was created with, and new faces will become associated with this model when they are added to the list; you cannot change this after creation. To see what model a **FaceList** is configured with, use the [FaceList - Get] API with the _returnRecognitionModel_ parameter set as **true**. - -See the following code example for the .NET client library. - -```csharp -await faceClient.FaceList.CreateAsync(faceListId, "My face collection", recognitionModel: "recognition_04"); -``` - -This code creates a **FaceList** called `My face collection`, using the _recognition_04_ model for feature extraction. When you search this **FaceList** for similar faces to a new detected face, that face must have been detected ([Face - Detect]) using the _recognition_04_ model. As in the previous section, the model needs to be consistent. - -There is no change in the [Face - Find Similar] API; you only specify the model version in detection. - -## Verify faces with specified model - -The [Face - Verify] API checks whether two faces belong to the same person. There is no change in the Verify API with regard to recognition models, but you can only compare faces that were detected with the same model. - -## Evaluate different models - -If you'd like to compare the performances of different recognition models on your own data, you will need to: -1. Create four PersonGroups using _recognition_01_, _recognition_02_, _recognition_03_, and _recognition_04_ respectively. -1. Use your image data to detect faces and register them to **Person**s within these four **PersonGroup**s. -1. Train your PersonGroups using the PersonGroup - Train API. -1. Test with Face - Identify on all four **PersonGroup**s and compare the results. - - -If you normally specify a confidence threshold (a value between zero and one that determines how confident the model must be to identify a face), you may need to use different thresholds for different models. A threshold for one model is not meant to be shared to another and will not necessarily produce the same results. - -## Next steps - -In this article, you learned how to specify the recognition model to use with different Face service APIs. Next, follow a quickstart to get started with face detection. - -* [Face .NET SDK](../quickstarts/client-libraries.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp) -* [Face Python SDK](../quickstarts/client-libraries.md?pivots=programming-language-python%253fpivots%253dprogramming-language-python) - -[Face - Detect]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d -[Face - Find Similar]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237 -[Face - Identify]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239 -[Face - Verify]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a -[PersonGroup - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244 -[PersonGroup - Get]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395246 -[PersonGroup Person - Add Face]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b -[PersonGroup - Train]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395249 -[LargePersonGroup - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d -[FaceList - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524b -[FaceList - Get]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524c -[LargeFaceList - Create]: https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc diff --git a/articles/cognitive-services/Face/Overview.md b/articles/cognitive-services/Face/Overview.md deleted file mode 100644 index 46a4b73826e02..0000000000000 --- a/articles/cognitive-services/Face/Overview.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: What is the Azure Face service? -titleSuffix: Azure Cognitive Services -description: The Azure Face service provides AI algorithms that you use to detect, recognize, and analyze human faces in images. -author: PatrickFarley -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: overview -ms.date: 02/28/2022 -ms.author: pafarley -ms.custom: cog-serv-seo-aug-2020 -keywords: facial recognition, facial recognition software, facial analysis, face matching, face recognition app, face search by image, facial recognition search -#Customer intent: As the developer of an app that deals with images of humans, I want to learn what the Face service does so I can determine if I should use its features. ---- - -# What is the Azure Face service? - -> [!WARNING] -> On June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or functionality included in Azure Services, such as Face or Video Indexer, if a customer is, or is allowing use of such services by or for, a police department in the United States. When you create a new Face resource, you must acknowledge and agree in the Azure Portal that you will not use the service by or for a police department in the United States and that you have reviewed the Responsible AI documentation and will use this service in accordance with it. - -The Azure Face service provides AI algorithms that detect, recognize, and analyze human faces in images. Facial recognition software is important in many different scenarios, such as identity verification, touchless access control, and face blurring for privacy. - -This documentation contains the following types of articles: -* The [quickstarts](./Quickstarts/client-libraries.md) are step-by-step instructions that let you make calls to the service and get results in a short period of time. -* The [how-to guides](./Face-API-How-to-Topics/HowtoDetectFacesinImage.md) contain instructions for using the service in more specific or customized ways. -* The [conceptual articles](./concepts/face-detection.md) provide in-depth explanations of the service's functionality and features. -* The [tutorials](./enrollment-overview.md) are longer guides that show you how to use this service as a component in broader business solutions. - -## Example use cases - -**Identity verification**: Verify someone's identity against a government-issued ID card like a passport or driver's license or other enrollment image. You can use this verification to grant access to digital or physical services or recover an account. Specific access scenarios include opening a new account, verifying a worker, or administering an online assessment. Identity verification can be done once when a person is onboarded, and repeated when they access a digital or physical service. - -**Touchless access control**: Compared to today’s methods like cards or tickets, opt-in face identification enables an enhanced access control experience while reducing the hygiene and security risks from card sharing, loss, or theft. Facial recognition assists the check-in process with a human in the loop for check-ins in airports, stadiums, theme parks, buildings, reception kiosks at offices, hospitals, gyms, clubs, or schools. - -**Face redaction**: Redact or blur detected faces of people recorded in a video to protect their privacy. - - -## Face detection and analysis - -Face detection is required as a first step in all the other scenarios. The Detect API detects human faces in an image and returns the rectangle coordinates of their locations. It also returns a unique ID that represents the stored face data. This is used in later operations to identify or verify faces. - -Optionally, face detection can extract a set of face-related attributes, such as head pose, age, emotion, facial hair, and glasses. These attributes are general predictions, not actual classifications. Some attributes are useful to ensure that your application is getting high-quality face data when users add themselves to a Face service. For example, your application could advise users to take off their sunglasses if they're wearing sunglasses. - -> [!NOTE] -> The face detection feature is also available through the [Computer Vision service](../computer-vision/overview.md). However, if you want to use other Face operations like Identify, Verify, Find Similar, or Face grouping, you should use this service instead. - -For more information on face detection and analysis, see the [Face detection](concepts/face-detection.md) concepts article. Also see the [Detect API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) reference documentation. - - -## Identity verification - -Modern enterprises and apps can use the Face identification and Face verification operations to verify that a user is who they claim to be. - -### Identification - -Face identification can address "one-to-many" matching of one face in an image to a set of faces in a secure repository. Match candidates are returned based on how closely their face data matches the query face. This scenario is used in granting building or airport access to a certain group of people or verifying the user of a device. - -The following image shows an example of a database named `"myfriends"`. Each group can contain up to 1 million different person objects. Each person object can have up to 248 faces registered. - -![A grid with three columns for different people, each with three rows of face images](./Images/person.group.clare.jpg) - -After you create and train a group, you can do identification against the group with a new detected face. If the face is identified as a person in the group, the person object is returned. - -### Verification - -The verification operation answers the question, "Do these two faces belong to the same person?". - -Verification is also a "one-to-one" matching of a face in an image to a single face from a secure repository or photo to verify that they're the same individual. Verification can be used for Identity Verification, such as a banking app that enables users to open a credit account remotely by taking a new picture of themselves and sending it with a picture of their photo ID. - -For more information about identity verification, see the [Facial recognition](concepts/face-recognition.md) concepts guide or the [Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239) and [Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a) API reference documentation. - - -## Find similar faces - -The Find Similar operation does face matching between a target face and a set of candidate faces, finding a smaller set of faces that look similar to the target face. This is useful for doing a face search by image. - -The service supports two working modes, **matchPerson** and **matchFace**. The **matchPerson** mode returns similar faces after filtering for the same person by using the [Verify API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a). The **matchFace** mode ignores the same-person filter. It returns a list of similar candidate faces that may or may not belong to the same person. - -The following example shows the target face: - -![A woman smiling](./Images/FaceFindSimilar.QueryFace.jpg) - -And these images are the candidate faces: - -![Five images of people smiling. Images A and B show the same person.](./Images/FaceFindSimilar.Candidates.jpg) - -To find four similar faces, the **matchPerson** mode returns A and B, which show the same person as the target face. The **matchFace** mode returns A, B, C, and D, which is exactly four candidates, even if some aren't the same person as the target or have low similarity. For more information, see the [Facial recognition](concepts/face-recognition.md) concepts guide or the [Find Similar API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) reference documentation. - -## Group faces - -The Group operation divides a set of unknown faces into several smaller groups based on similarity. Each group is a disjoint proper subset of the original set of faces. It also returns a single "messyGroup" array that contains the face IDs for which no similarities were found. - -All of the faces in a returned group are likely to belong to the same person, but there can be several different groups for a single person. Those groups are differentiated by another factor, such as expression, for example. For more information, see the [Facial recognition](concepts/face-recognition.md) concepts guide or the [Group API](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395238) reference documentation. - -## Data privacy and security - -As with all of the Cognitive Services resources, developers who use the Face service must be aware of Microsoft's policies on customer data. For more information, see the [Cognitive Services page](https://www.microsoft.com/trustcenter/cloudservices/cognitiveservices) on the Microsoft Trust Center. - -## Next steps - -Follow a quickstart to code the basic components of a face recognition app in the language of your choice. - -- [Client library quickstart](quickstarts/client-libraries.md). diff --git a/articles/cognitive-services/Face/QuickStarts/client-libraries.md b/articles/cognitive-services/Face/QuickStarts/client-libraries.md deleted file mode 100644 index a1bedb0d3ef96..0000000000000 --- a/articles/cognitive-services/Face/QuickStarts/client-libraries.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Quickstart: Use the Face client library' -titleSuffix: Azure Cognitive Services -description: The Face API offers client libraries that makes it easy to detect, find similar, identify, verify and more. -services: cognitive-services -author: PatrickFarley -manager: nitinme -zone_pivot_groups: programming-languages-set-face -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: quickstart -ms.date: 09/27/2021 -ms.author: pafarley -ms.devlang: csharp, golang, javascript, python -ms.custom: devx-track-python, devx-track-csharp, cog-serv-seo-aug-2020, mode-api -keywords: face search by image, facial recognition search, facial recognition, face recognition app ---- - -# Quickstart: Use the Face client library - -::: zone pivot="programming-language-csharp" - -[!INCLUDE [C# quickstart](../includes/quickstarts/csharp-sdk.md)] - -::: zone-end - -::: zone pivot="programming-language-javascript" - -[!INCLUDE [JavaScript quickstart](../includes/quickstarts/javascript-sdk.md)] - -::: zone-end - -::: zone pivot="programming-language-python" - -[!INCLUDE [Python quickstart](../includes/quickstarts/python-sdk.md)] - -::: zone-end - -::: zone pivot="programming-language-rest-api" - -[!INCLUDE [cURL quickstart](../includes/quickstarts/rest-api.md)] - -::: zone-end diff --git a/articles/cognitive-services/Face/ReleaseNotes.md b/articles/cognitive-services/Face/ReleaseNotes.md deleted file mode 100644 index ae01c8d70dfb0..0000000000000 --- a/articles/cognitive-services/Face/ReleaseNotes.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: What's new in Azure Face service? -titleSuffix: Azure Cognitive Services -description: Stay up to date on recent releases and updates to the Azure Face service. -services: cognitive-services -author: PatrickFarley -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: overview -ms.date: 09/27/2021 -ms.author: pafarley -ms.custom: contperf-fy21q3, contperf-fy22q1 ---- - -# What's new in Azure Face service? - -The Azure Face service is updated on an ongoing basis. Use this article to stay up to date with new features, enhancements, fixes, and documentation updates. - -## February 2022 - -### New Quality Attribute in Detection_01 and Detection_03 -* To help system builders and their customers capture high quality images which are necessary for high quality outputs from Face API, we’re introducing a new quality attribute **QualityForRecognition** to help decide whether an image is of sufficient quality to attempt face recognition. The value is an informal rating of low, medium, or high. The new attribute is only available when using any combinations of detection models `detection_01` or `detection_03`, and recognition models `recognition_03` or `recognition_04`. Only "high" quality images are recommended for person enrollment and quality above "medium" is recommended for identification scenarios. To learn more about the new quality attribute, see [Face detection and attributes](concepts/face-detection.md) and see how to use it with [QuickStart](./quickstarts/client-libraries.md?pivots=programming-language-csharp&tabs=visual-studio). - - -## July 2021 - -### New HeadPose and Landmarks improvements for Detection_03 - -* The Detection_03 model has been updated to support facial landmarks. -* The landmarks feature in Detection_03 is much more precise, especially in the eyeball landmarks which are crucial for gaze tracking. - - -## April 2021 - -### PersonDirectory data structure - -* In order to perform face recognition operations such as Identify and Find Similar, Face API customers need to create an assorted list of **Person** objects. The new **PersonDirectory** is a data structure that contains unique IDs, optional name strings, and optional user metadata strings for each **Person** identity added to the directory. Currently, the Face API offers the **LargePersonGroup** structure which has similar functionality but is limited to 1 million identities. The **PersonDirectory** structure can scale up to 75 million identities. -* Another major difference between **PersonDirectory** and previous data structures is that you'll no longer need to make any Train calls after adding faces to a **Person** object—the update process happens automatically. For more details see [Use the PersonDirectory structure](Face-API-How-to-Topics/use-persondirectory.md). - - -## February 2021 - -### New Face API detection model -* The new Detection 03 model is the most accurate detection model currently available. If you're a new a customer, we recommend using this model. Detection 03 improves both recall and precision on smaller faces found within images (64x64 pixels). Additional improvements include an overall reduction in false positives and improved detection on rotated face orientations. Combining Detection 03 with the new Recognition 04 model will provide improved recognition accuracy as well. See [Specify a face detection model](./face-api-how-to-topics/specify-detection-model.md) for more details. -### New detectable Face attributes -* The `faceMask` attribute is available with the latest Detection 03 model, along with the additional attribute `"noseAndMouthCovered"` which detects whether the face mask is worn as intended, covering both the nose and mouth. To use the latest mask detection capability, users need to specify the detection model in the API request: assign the model version with the _detectionModel_ parameter to `detection_03`. See [Specify a face detection model](./face-api-how-to-topics/specify-detection-model.md) for more details. -### New Face API Recognition Model -* The new Recognition 04 model is the most accurate recognition model currently available. If you're a new customer, we recommend using this model for verification and identification. It improves upon the accuracy of Recognition 03, including improved recognition for users wearing face covers (surgical masks, N95 masks, cloth masks). Note that we recommend against enrolling images of users wearing face covers as this will lower recognition quality. Now customers can build safe and seamless user experiences that detect whether a user is wearing a face cover with the latest Detection 03 model, and recognize them with the latest Recognition 04 model. See [Specify a face recognition model](./face-api-how-to-topics/specify-recognition-model.md) for more details. - - -## January 2021 -### Mitigate latency -* The Face team published a new article detailing potential causes of latency when using the service and possible mitigation strategies. See [Mitigate latency when using the Face service](./face-api-how-to-topics/how-to-mitigate-latency.md). - -## December 2020 -### Customer configuration for Face ID storage -* While the Face Service does not store customer images, the extracted face feature(s) will be stored on server. The Face ID is an identifier of the face feature and will be used in [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). The stored face features will expire and be deleted 24 hours after the original detection call. Customers can now determine the length of time these Face IDs are cached. The maximum value is still up to 24 hours, but a minimum value of 60 seconds can now be set. The new time ranges for Face IDs being cached is any value between 60 seconds and 24 hours. More details can be found in the [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API reference (the *faceIdTimeToLive* parameter). - -## November 2020 -### Sample Face enrollment app -* The team published a sample Face enrollment app to demonstrate best practices for establishing meaningful consent and creating high-accuracy face recognition systems through high-quality enrollments. The open-source sample can be found in the [Build an enrollment app](build-enrollment-app.md) guide and on [GitHub](https://github.com/Azure-Samples/cognitive-services-FaceAPIEnrollmentSample), ready for developers to deploy or customize. - -## August 2020 -### Customer-managed encryption of data at rest -* The Face service automatically encrypts your data when persisting it to the cloud. The Face service encryption protects your data to help you meet your organizational security and compliance commitments. By default, your subscription uses Microsoft-managed encryption keys. There is also a new option to manage your subscription with your own keys called customer-managed keys (CMK). More details can be found at [Customer-managed keys](./encrypt-data-at-rest.md). - -## April 2020 -### New Face API Recognition Model -* The new recognition 03 model is the most accurate model currently available. If you're a new customer, we recommend using this model. Recognition 03 will provide improved accuracy for both similarity comparisons and person-matching comparisons. More details can be found at [Specify a face recognition model](./face-api-how-to-topics/specify-recognition-model.md). - -## June 2019 - -### New Face API detection model -* The new Detection 02 model features improved accuracy on small, side-view, occluded, and blurry faces. Use it through [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250), [LargeFaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3), [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) and [LargePersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42) by specifying the new face detection model name `detection_02` in `detectionModel` parameter. More details in [How to specify a detection model](Face-API-How-to-Topics/specify-detection-model.md). - -## April 2019 - -### Improved attribute accuracy -* Improved overall accuracy of the `age` and `headPose` attributes. The `headPose` attribute is also updated with the `pitch` value enabled now. Use these attributes by specifying them in the `returnFaceAttributes` parameter of [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. -### Improved processing speeds -* Improved speeds of [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250), [LargeFaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3), [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) and [LargePersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42) operations. - -## March 2019 - -### New Face API recognition model -* The Recognition 02 model has improved accuracy. Use it through [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524b), [LargeFaceList - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc), [PersonGroup - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244) and [LargePersonGroup - Create](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d) by specifying the new face recognition model name `recognition_02` in `recognitionModel` parameter. More details in [How to specify a recognition model](Face-API-How-to-Topics/specify-recognition-model.md). - -## January 2019 - -### Face Snapshot feature -* This feature allows the service to support data migration across subscriptions: [Snapshot](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/snapshot-get). More details in [How to Migrate your face data to a different Face subscription](Face-API-How-to-Topics/how-to-migrate-face-data.md). - -## October 2018 - -### API messages -* Refined description for `status`, `createdDateTime`, `lastActionDateTime`, and `lastSuccessfulTrainingDateTime` in [PersonGroup - Get Training Status](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395247), [LargePersonGroup - Get Training Status](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599ae32c6ac60f11b48b5aa5), and [LargeFaceList - Get Training Status](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a1582f8d2de3616c086f2cf). - -## May 2018 - -### Improved attribute accuracy -* Improved `gender` attribute significantly and also improved `age`, `glasses`, `facialHair`, `hair`, `makeup` attributes. Use them through [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. -### Increased file size limit -* Increased input image file size limit from 4 MB to 6 MB in [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250), [LargeFaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3), [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) and [LargePersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42). - -## March 2018 - -### New data structure -* [LargeFaceList](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc) and [LargePersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d). More details in [How to use the large-scale feature](Face-API-How-to-Topics/how-to-use-large-scale.md). -* Increased [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239) `maxNumOfCandidatesReturned` parameter from [1, 5] to [1, 100] and default to 10. - -## May 2017 - -### New detectable Face attributes -* Added `hair`, `makeup`, `accessory`, `occlusion`, `blur`, `exposure`, and `noise` attributes in [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. -* Supported 10K persons in a PersonGroup and [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239). -* Supported pagination in [PersonGroup Person - List](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395241) with optional parameters: `start` and `top`. -* Supported concurrency in adding/deleting faces against different FaceLists and different persons in PersonGroup. - -## March 2017 - -### New detectable Face attribute -* Added `emotion` attribute in [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) `returnFaceAttributes` parameter. -### Fixed issues -* Face could not be re-detected with rectangle returned from [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) as `targetFace` in [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250) and [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). -* The detectable face size is set to ensure it is strictly between 36x36 to 4096x4096 pixels. - -## November 2016 -### New subscription tier -* Added Face Storage Standard subscription to store additional persisted faces when using [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b) or [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250) for identification or similarity matching. The stored images are charged at $0.5 per 1000 faces and this rate is prorated on a daily basis. Free tier subscriptions continue to be limited to 1,000 total persons. - -## October 2016 -### API messages -* Changed the error message of more than one face in the `targetFace` from 'There are more than one face in the image' to 'There is more than one face in the image' in [FaceList - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250) and [PersonGroup Person - Add Face](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). - -## July 2016 -### New features -* Supported Face to Person object authentication in [Face - Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a). -* Added optional `mode` parameter enabling selection of two working modes: `matchPerson` and `matchFace` in [Face - Find Similar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) and default is `matchPerson`. -* Added optional `confidenceThreshold` parameter for user to set the threshold of whether one face belongs to a Person object in [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239). -* Added optional `start` and `top` parameters in [PersonGroup - List](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395248) to enable user to specify the start point and the total PersonGroups number to list. - -## V1.0 changes from V0 - -* Updated service root endpoint from ```https://westus.api.cognitive.microsoft.com/face/v0/``` to ```https://westus.api.cognitive.microsoft.com/face/v1.0/```. Changes applied to: - [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), [Face - Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Find Similar](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) and [Face - Group](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395238). -* Updated the minimal detectable face size to 36x36 pixels. Faces smaller than 36x36 pixels will not be detected. -* Deprecated the PersonGroup and Person data in Face V0. Those data cannot be accessed with the Face V1.0 service. -* Deprecated the V0 endpoint of Face API on June 30, 2016. diff --git a/articles/cognitive-services/Face/breadcrumb/toc.yml b/articles/cognitive-services/Face/breadcrumb/toc.yml deleted file mode 100644 index c0cd44f8feaad..0000000000000 --- a/articles/cognitive-services/Face/breadcrumb/toc.yml +++ /dev/null @@ -1,11 +0,0 @@ -- name: Azure - tocHref: /azure/ - topicHref: /azure/index - items: - - name: Cognitive Services - tocHref: /azure/cognitive-services/ - topicHref: /azure/cognitive-services/index - items: - - name: Face - tocHref: /azure/cognitive-services/ - topicHref: /azure/cognitive-services/face/index \ No newline at end of file diff --git a/articles/cognitive-services/Face/concepts/face-detection.md b/articles/cognitive-services/Face/concepts/face-detection.md deleted file mode 100644 index 912e7e9f0e683..0000000000000 --- a/articles/cognitive-services/Face/concepts/face-detection.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Face detection and attributes concepts" -titleSuffix: Azure Cognitive Services -description: Learn more about face detection; face detection is the action of locating human faces in an image and optionally returning different kinds of face-related data. -services: cognitive-services -author: PatrickFarley -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: conceptual -ms.date: 10/27/2021 -ms.author: pafarley ---- - -# Face detection and attributes - -This article explains the concepts of face detection and face attribute data. Face detection is the process of locating human faces in an image and optionally returning different kinds of face-related data. - -You use the [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API to detect faces in an image. To get started using the REST API or a client SDK, follow a [quickstart](../Quickstarts/client-libraries.md). Or, for a more in-depth guide, see [Call the detect API](../Face-API-How-to-Topics/HowtoDetectFacesinImage.md). - -## Face rectangle - -Each detected face corresponds to a `faceRectangle` field in the response. This is a set of pixel coordinates for the left, top, width, and height of the detected face. Using these coordinates, you can get the location and size of the face. In the API response, faces are listed in size order from largest to smallest. - -## Face ID - -The face ID is a unique identifier string for each detected face in an image. You can request a face ID in your [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API call. - -## Face landmarks - -Face landmarks are a set of easy-to-find points on a face, such as the pupils or the tip of the nose. By default, there are 27 predefined landmark points. The following figure shows all 27 points: - -![A face diagram with all 27 landmarks labeled](../Images/landmarks.1.jpg) - -The coordinates of the points are returned in units of pixels. - -The Detection_03 model currently has the most accurate landmark detection. The eye and pupil landmarks it returns are precise enough to enable gaze tracking of the face. - -## Attributes - -Attributes are a set of features that can optionally be detected by the [Face - Detect](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236) API. The following attributes can be detected: - -* **Accessories**. Whether the given face has accessories. This attribute returns possible accessories including headwear, glasses, and mask, with confidence score between zero and one for each accessory. -* **Age**. The estimated age in years of a particular face. -* **Blur**. The blurriness of the face in the image. This attribute returns a value between zero and one and an informal rating of low, medium, or high. -* **Emotion**. A list of emotions with their detection confidence for the given face. Confidence scores are normalized, and the scores across all emotions add up to one. The emotions returned are happiness, sadness, neutral, anger, contempt, disgust, surprise, and fear. -* **Exposure**. The exposure of the face in the image. This attribute returns a value between zero and one and an informal rating of underExposure, goodExposure, or overExposure. -* **Facial hair**. The estimated facial hair presence and the length for the given face. -* **Gender**. The estimated gender of the given face. Possible values are male, female, and genderless. -* **Glasses**. Whether the given face has eyeglasses. Possible values are NoGlasses, ReadingGlasses, Sunglasses, and Swimming Goggles. -* **Hair**. The hair type of the face. This attribute shows whether the hair is visible, whether baldness is detected, and what hair colors are detected. -* **Head pose**. The face's orientation in 3D space. This attribute is described by the roll, yaw, and pitch angles in degrees, which are defined according to the [right-hand rule](https://en.wikipedia.org/wiki/Right-hand_rule). The order of three angles is roll-yaw-pitch, and each angle's value range is from -180 degrees to 180 degrees. 3D orientation of the face is estimated by the roll, yaw, and pitch angles in order. See the following diagram for angle mappings: - - ![A head with the pitch, roll, and yaw axes labeled](../Images/headpose.1.jpg) - - For more details on how to use these values, see the [Head pose how-to guide](../Face-API-How-to-Topics/how-to-use-headpose.md). -* **Makeup**. Whether the face has makeup. This attribute returns a Boolean value for eyeMakeup and lipMakeup. -* **Mask**. Whether the face is wearing a mask. This attribute returns a possible mask type, and a Boolean value to indicate whether nose and mouth are covered. -* **Noise**. The visual noise detected in the face image. This attribute returns a value between zero and one and an informal rating of low, medium, or high. -* **Occlusion**. Whether there are objects blocking parts of the face. This attribute returns a Boolean value for eyeOccluded, foreheadOccluded, and mouthOccluded. -* **Smile**. The smile expression of the given face. This value is between zero for no smile and one for a clear smile. -* **QualityForRecognition** The overall image quality regarding whether the image being used in the detection is of sufficient quality to attempt face recognition on. The value is an informal rating of low, medium, or high. Only "high" quality images are recommended for person enrollment, and quality at or above "medium" is recommended for identification scenarios. - >[!NOTE] - > The availability of each attribute depends on the detection model specified. QualityForRecognition attribute also depends on the recognition model, as it is currently only available when using a combination of detection model detection_01 or detection_03, and recognition model recognition_03 or recognition_04. - -> [!IMPORTANT] -> Face attributes are predicted through the use of statistical algorithms. They might not always be accurate. Use caution when you make decisions based on attribute data. - -## Input data - -Use the following tips to make sure that your input images give the most accurate detection results: - -* The supported input image formats are JPEG, PNG, GIF (the first frame), BMP. -* The image file size should be no larger than 6 MB. -* The minimum detectable face size is 36 x 36 pixels in an image that is no larger than 1920 x 1080 pixels. Images with larger than 1920 x 1080 pixels have a proportionally larger minimum face size. Reducing the face size might cause some faces not to be detected, even if they are larger than the minimum detectable face size. -* The maximum detectable face size is 4096 x 4096 pixels. -* Faces outside the size range of 36 x 36 to 4096 x 4096 pixels will not be detected. -* Some faces might not be recognized because of technical challenges, such as: - * Images with extreme lighting, for example, severe backlighting. - * Obstructions that block one or both eyes. - * Differences in hair type or facial hair. - * Changes in facial appearance because of age. - * Extreme facial expressions. - -### Input data with orientation information: - -Some input images with JPEG format might contain orientation information in Exchangeable image file format (Exif) metadata. If Exif orientation is available, images will be automatically rotated to the correct orientation before sending for face detection. The face rectangle, landmarks, and head pose for each detected face will be estimated based on the rotated image. - -To properly display the face rectangle and landmarks, you need to make sure the image is rotated correctly. Most of image visualization tools will auto-rotate the image according to its Exif orientation by default. For other tools, you might need to apply the rotation using your own code. The following examples show a face rectangle on a rotated image (left) and a non-rotated image (right). - -![Two face images with and without rotation](../Images/image-rotation.png) - -### Video input - -If you're detecting faces from a video feed, you may be able to improve performance by adjusting certain settings on your video camera: - -* **Smoothing**: Many video cameras apply a smoothing effect. You should turn this off if you can because it creates a blur between frames and reduces clarity. -* **Shutter Speed**: A faster shutter speed reduces the amount of motion between frames and makes each frame clearer. We recommend shutter speeds of 1/60 second or faster. -* **Shutter Angle**: Some cameras specify shutter angle instead of shutter speed. You should use a lower shutter angle if possible. This will result in clearer video frames. - - >[!NOTE] - > A camera with a lower shutter angle will receive less light in each frame, so the image will be darker. You'll need to determine the right level to use. - -## Next steps - -Now that you're familiar with face detection concepts, learn how to write a script that detects faces in a given image. - -* [Call the detect API](../Face-API-How-to-Topics/HowtoDetectFacesinImage.md) diff --git a/articles/cognitive-services/Face/concepts/face-recognition.md b/articles/cognitive-services/Face/concepts/face-recognition.md deleted file mode 100644 index e41351f92e108..0000000000000 --- a/articles/cognitive-services/Face/concepts/face-recognition.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Face recognition concepts" -titleSuffix: Azure Cognitive Services -description: Learn the concept of Face recognition, its related operations, and the underlying data structures. -services: cognitive-services -author: PatrickFarley -manager: nitinme - -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: conceptual -ms.date: 10/27/2021 -ms.author: pafarley ---- - -# Face recognition concepts - -This article explains the concept of Face recognition, its related operations, and the underlying data structures. Broadly, Face recognition refers to the method of verifying or identifying an individual by their face. - -Verification is one-to-one matching that takes two faces and returns whether they are the same face, and identification is one-to-many matching that takes a single face as input and returns a set of matching candidates. Face recognition is important in implementing the identity verification scenario, which enterprises and apps can use to verify that a (remote) user is who they claim to be. - -## Related data structures - -The recognition operations use mainly the following data structures. These objects are stored in the cloud and can be referenced by their ID strings. ID strings are always unique within a subscription, but name fields may be duplicated. - -|Name|Description| -|:--|:--| -|DetectedFace| This single face representation is retrieved by the [face detection](../Face-API-How-to-Topics/HowtoDetectFacesinImage.md) operation. Its ID expires 24 hours after it's created.| -|PersistedFace| When DetectedFace objects are added to a group, such as FaceList or Person, they become PersistedFace objects. They can be [retrieved](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524c) at any time and don't expire.| -|[FaceList](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524b) or [LargeFaceList](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/5a157b68d2de3616c086f2cc)| This data structure is an assorted list of PersistedFace objects. A FaceList has a unique ID, a name string, and optionally a user data string.| -|[Person](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523c)| This data structure is a list of PersistedFace objects that belong to the same person. It has a unique ID, a name string, and optionally a user data string.| -|[PersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244) or [LargePersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d)| This data structure is an assorted list of Person objects. It has a unique ID, a name string, and optionally a user data string. A PersonGroup must be [trained](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395249) before it can be used in recognition operations.| -|PersonDirectory | This data structure is like **LargePersonGroup** but offers additional storage capacity and other added features. For more information, see [Use the PersonDirectory structure](../Face-API-How-to-Topics/use-persondirectory.md). - -## Recognition operations - -This section details how the underlying operations use the above data structures to identify and verify a face. - -### PersonGroup creation and training - -You need to create a [PersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395244) or [LargePersonGroup](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/599acdee6ac60f11b48b5a9d) to store the set of people to match against. PersonGroups hold [Person](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523c) objects, which each represent an individual person and hold a set of face data belonging to that person. - -The [Train](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395249) operation prepares the data set to be used in face data comparisons. - -### Identification - -The [Identify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239) operation takes one or several source face IDs (from a DetectedFace or PersistedFace object) and a PersonGroup or LargePersonGroup. It returns a list of the Person objects that each source face might belong to. Returned Person objects are wrapped as Candidate objects, which have a prediction confidence value. - -### Verification - -The [Verify](https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a) operation takes a single face ID (from a DetectedFace or PersistedFace object) and a Person object. It determines whether the face belongs to that same person. Verification is one-to-one matching and can be used as a final check on the results from the Identify API call. However, you can optionally pass in the PersonGroup to which the candidate Person belongs to improve the API performance. - -## Input data - -Use the following tips to ensure that your input images give the most accurate recognition results: - -* The supported input image formats are JPEG, PNG, GIF (the first frame), BMP. -* Image file size should be no larger than 6 MB. -* When you create Person objects, use photos that feature different kinds of angles and lighting. -* Some faces might not be recognized because of technical challenges, such as: - * Images with extreme lighting, for example, severe backlighting. - * Obstructions that block one or both eyes. - * Differences in hair type or facial hair. - * Changes in facial appearance because of age. - * Extreme facial expressions. -* You can utilize the qualityForRecognition attribute in the [face detection](../Face-API-How-to-Topics/HowtoDetectFacesinImage.md) operation when using applicable detection models as a general guideline of whether the image is likely of sufficient quality to attempt face recognition on. Only "high" quality images are recommended for person enrollment and quality at or above "medium" is recommended for identification scenarios. - -## Next steps - -Now that you're familiar with face recognition concepts, Write a script that identifies faces against a trained PersonGroup. - -* [Face client library quickstart](../Quickstarts/client-libraries.md) \ No newline at end of file diff --git a/articles/cognitive-services/Face/context/context.yml b/articles/cognitive-services/Face/context/context.yml deleted file mode 100644 index ca7f24c8d40c8..0000000000000 --- a/articles/cognitive-services/Face/context/context.yml +++ /dev/null @@ -1,4 +0,0 @@ -### YamlMime: ContextObject -brand: azure -breadcrumb_path: ../breadcrumb/toc.yml -toc_rel: ../toc.yml \ No newline at end of file diff --git a/articles/cognitive-services/Face/includes/create-face-resource.md b/articles/cognitive-services/Face/includes/create-face-resource.md deleted file mode 100644 index f67003da42f0c..0000000000000 --- a/articles/cognitive-services/Face/includes/create-face-resource.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Container support -titleSuffix: Azure Cognitive Services -services: cognitive-services -author: aahill -manager: nitinme -ms.service: cognitive-services -ms.topic: include -ms.date: 7/5/2019 -ms.author: aahi ---- - -## Create an Face resource - -1. Sign into the [Azure portal](https://portal.azure.com) -1. Click [Create **Face**](https://portal.azure.com/#create/Microsoft.CognitiveServicesFace) resource -1. Enter all required settings: - - |Setting|Value| - |--|--| - |Name|Desired name (2-64 characters)| - |Subscription|Select appropriate subscription| - |Location|Select any nearby and available location| - |Pricing Tier|`F0` - the minimal pricing tier| - |Resource Group|Select an available resource group| - -1. Click **Create** and wait for the resource to be created. After it is created, navigate to the resource page -1. Collect configured `endpoint` and an API key: - - |Resource Tab in Portal|Setting|Value| - |--|--|--| - |**Overview**|Endpoint|Copy the endpoint. It looks similar to `https://face.cognitiveservices.azure.com/face/v1.0`| - |**Keys**|API Key|Copy 1 of the two keys. It is a 32 alphanumeric-character string with no spaces or dashes, `xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`.| diff --git a/articles/cognitive-services/Face/includes/quickstarts/csharp-sdk.md b/articles/cognitive-services/Face/includes/quickstarts/csharp-sdk.md deleted file mode 100644 index f5045a57b7e03..0000000000000 --- a/articles/cognitive-services/Face/includes/quickstarts/csharp-sdk.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: "Face .NET client library quickstart" -description: Use the Face client library for .NET to detect and identify faces (facial recognition search). -services: cognitive-services -author: PatrickFarley -manager: nitinme -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: include -ms.date: 05/03/2022 -ms.author: pafarley ---- - -Get started with facial recognition using the Face client library for .NET. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. Follow these steps to install the package and try out the example code for basic face identification using remote images. - -[Reference documentation](/dotnet/api/overview/azure/cognitiveservices/face-readme) | [Library source code](https://github.com/Azure/azure-sdk-for-net/tree/master/sdk/cognitiveservices/Vision.Face) | [Package (NuGet)](https://www.nuget.org/packages/Microsoft.Azure.CognitiveServices.Vision.Face/2.7.0-preview.1) | [Samples](/samples/browse/?products=azure&term=face) - -## Prerequisites - -* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) -* The [Visual Studio IDE](https://visualstudio.microsoft.com/vs/) or current version of [.NET Core](https://dotnet.microsoft.com/download/dotnet-core). -* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] -* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. - * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. - * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. - -## Identify faces - -1. Create a new C# application - - #### [Visual Studio IDE](#tab/visual-studio) - - Using Visual Studio, create a new .NET Core application. - - ### Install the client library - - Once you've created a new project, install the client library by right-clicking on the project solution in the **Solution Explorer** and selecting **Manage NuGet Packages**. In the package manager that opens select **Browse**, check **Include prerelease**, and search for `Microsoft.Azure.CognitiveServices.Vision.Face`. Select version `2.7.0-preview.1`, and then **Install**. - - #### [CLI](#tab/cli) - - In a console window (such as cmd, PowerShell, or Bash), use the `dotnet new` command to create a new console app with the name `face-quickstart`. This command creates a simple "Hello World" C# project with a single source file: *program.cs*. - - ```console - dotnet new console -n face-quickstart - ``` - - Change your directory to the newly created app folder. You can build the application with: - - ```console - dotnet build - ``` - - The build output should contain no warnings or errors. - - ```console - ... - Build succeeded. - 0 Warning(s) - 0 Error(s) - ... - ``` - - ### Install the client library - - Within the application directory, install the Face client library for .NET with the following command: - - ```console - dotnet add package Microsoft.Azure.CognitiveServices.Vision.Face --version 2.7.0-preview.1 - ``` - - --- -1. Add the following code into the *Program.cs* file. - - [!code-csharp[](~/cognitive-services-quickstart-code/dotnet/Face/FaceQuickstart-single.cs?name=snippet_single)] - - -1. Enter your key and endpoint in the corresponding fields. - - > [!IMPORTANT] - > Go to the Azure portal. If the Face resource you created in the **Prerequisites** section deployed successfully, click the **Go to Resource** button under **Next Steps**. You can find your key and endpoint in the resource's **key and endpoint** page, under **resource management**. - - > [!IMPORTANT] - > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. See the Cognitive Services [security](../../../cognitive-services-security.md) article for more information. - -1. Run the application - - #### [Visual Studio IDE](#tab/visual-studio) - - Run the application by clicking the **Debug** button at the top of the IDE window. - - #### [CLI](#tab/cli) - - Run the application from your application directory with the `dotnet run` command. - - ```dotnet - dotnet run - ``` - - --- - -## Output - -```console -========IDENTIFY FACES======== - -Create a person group (3972c063-71b3-4328-8579-6d190ee76f99). -Create a person group person 'Family1-Dad'. -Add face to the person group person(Family1-Dad) from image `Family1-Dad1.jpg` -Add face to the person group person(Family1-Dad) from image `Family1-Dad2.jpg` -Create a person group person 'Family1-Mom'. -Add face to the person group person(Family1-Mom) from image `Family1-Mom1.jpg` -Add face to the person group person(Family1-Mom) from image `Family1-Mom2.jpg` -Create a person group person 'Family1-Son'. -Add face to the person group person(Family1-Son) from image `Family1-Son1.jpg` -Add face to the person group person(Family1-Son) from image `Family1-Son2.jpg` -Create a person group person 'Family1-Daughter'. -Create a person group person 'Family2-Lady'. -Add face to the person group person(Family2-Lady) from image `Family2-Lady1.jpg` -Add face to the person group person(Family2-Lady) from image `Family2-Lady2.jpg` -Create a person group person 'Family2-Man'. -Add face to the person group person(Family2-Man) from image `Family2-Man1.jpg` -Add face to the person group person(Family2-Man) from image `Family2-Man2.jpg` - -Train person group 3972c063-71b3-4328-8579-6d190ee76f99. -Training status: Succeeded. - -4 face(s) with 4 having sufficient quality for recognition detected from image `identification1.jpg` -Person 'Family1-Dad' is identified for face in: identification1.jpg - 994bfd7a-0d8f-4fae-a5a6-c524664cbee7, confidence: 0.96725. -Person 'Family1-Mom' is identified for face in: identification1.jpg - 0c9da7b9-a628-429d-97ff-cebe7c638fb5, confidence: 0.96921. -No person is identified for face in: identification1.jpg - a881259c-e811-4f7e-a35e-a453e95ca18f, -Person 'Family1-Son' is identified for face in: identification1.jpg - 53772235-8193-46eb-bdfc-1ebc25ea062e, confidence: 0.92886. - -End of quickstart. -``` - -> [!TIP] -> The Face API runs on a set of pre-built models that are static by nature (the model's performance will not regress or improve as the service is run). The results that the model produces might change if Microsoft updates the model's backend without migrating to an entirely new model version. To take advantage of a newer version of a model, you can retrain your **PersonGroup**, specifying the newer model as a parameter with the same enrollment images. - -## Clean up resources - -If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. - -* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) -* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) - -To delete the **PersonGroup** you created in this quickstart, run the following code in your program: - -[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/Face/FaceQuickstart.cs?name=snippet_persongroup_delete)] - -Define the deletion method with the following code: - -[!code-csharp[](~/cognitive-services-quickstart-code/dotnet/Face/FaceQuickstart.cs?name=snippet_deletepersongroup)] - -## Next steps - -In this quickstart, you learned how to use the Face client library for .NET to do basic face identification. Next, learn about the different face detection models and how to specify the right model for your use case. - -> [!div class="nextstepaction"] -> [Specify a face detection model version](../../Face-API-How-to-Topics/specify-detection-model.md) - -* [What is the Face service?](../../overview.md) -* More extensive sample code can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/dotnet/Face/FaceQuickstart.cs). \ No newline at end of file diff --git a/articles/cognitive-services/Face/includes/quickstarts/go-sdk.md b/articles/cognitive-services/Face/includes/quickstarts/go-sdk.md deleted file mode 100644 index 5c8fc26c91868..0000000000000 --- a/articles/cognitive-services/Face/includes/quickstarts/go-sdk.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -title: "Face Go client library quickstart" -description: Use the Face client library for Go to detect and identify faces (facial recognition search). -services: cognitive-services -author: PatrickFarley -manager: nitinme -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: include -ms.date: 10/26/2020 -ms.author: pafarley ---- -Get started with facial recognition using the Face client library for Go. Follow these steps to install the package and try out the example code for basic tasks. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. - -Use the Face service client library for Go to: - -* [Detect and analyze faces](#detect-and-analyze-faces) -* [Identify a face](#identify-a-face) -* [Verify faces](#verify-faces) - -[Reference documentation](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face) | [Library source code](https://github.com/Azure/azure-sdk-for-go/tree/master/services/cognitiveservices/v1.0/face) | [SDK download](https://github.com/Azure/azure-sdk-for-go) - -## Prerequisites - -* The latest version of [Go](https://go.dev/dl/) -* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) -* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] -* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. - * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. - * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. -* After you get a key and endpoint, [create environment variables](../../../cognitive-services-apis-create-account.md#configure-an-environment-variable-for-authentication) for the key and endpoint, named `FACE_SUBSCRIPTION_KEY` and `FACE_ENDPOINT`, respectively. - -## Setting up - -### Create a Go project directory - -In a console window (cmd, PowerShell, Terminal, Bash), create a new workspace for your Go project, named `my-app`, and navigate to it. - -``` -mkdir -p my-app/{src, bin, pkg} -cd my-app -``` - -Your workspace will contain three folders: - -* **src** - This directory will contain source code and packages. Any packages installed with the `go get` command will be in this folder. -* **pkg** - This directory will contain the compiled Go package objects. These files all have a `.a` extension. -* **bin** - This directory will contain the binary executable files that are created when you run `go install`. - -> [!TIP] -> To learn more about the structure of a Go workspace, see the [Go language documentation](https://go.dev/doc/code.html#Workspaces). This guide includes information for setting `$GOPATH` and `$GOROOT`. - -### Install the client library for Go - -Next, install the client library for Go: - -```bash -go get -u github.com/Azure/azure-sdk-for-go/tree/master/services/cognitiveservices/v1.0/face -``` - -or if you use dep, within your repo run: - -```bash -dep ensure -add https://github.com/Azure/azure-sdk-for-go/tree/master/services/cognitiveservices/v1.0/face -``` - -### Create a Go application - -Next, create a file in the **src** directory named `sample-app.go`: - -```bash -cd src -touch sample-app.go -``` - -Open `sample-app.go` in your preferred IDE or text editor. Then add the package name and import the following libraries: - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_imports)] - -Next, you'll begin adding code to carry out different Face service operations. - -## Object model - -The following classes and interfaces handle some of the major features of the Face service Go client library. - -|Name|Description| -|---|---| -|[BaseClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#BaseClient) | This class represents your authorization to use the Face service, and you need it for all Face functionality. You instantiate it with your subscription information, and you use it to produce instances of other classes. | -|[Client](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client)|This class handles the basic detection and recognition tasks that you can do with human faces. | -|[DetectedFace](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#DetectedFace)|This class represents all of the data that was detected from a single face in an image. You can use it to retrieve detailed information about the face.| -|[ListClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#ListClient)|This class manages the cloud-stored **FaceList** constructs, which store an assorted set of faces. | -|[PersonGroupPersonClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupPersonClient)| This class manages the cloud-stored **Person** constructs, which store a set of faces that belong to a single person.| -|[PersonGroupClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupClient)| This class manages the cloud-stored **PersonGroup** constructs, which store a set of assorted **Person** objects. | -|[SnapshotClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#SnapshotClient)|This class manages the Snapshot functionality. You can use it to temporarily save all of your cloud-based Face data and migrate that data to a new Azure subscription. | - -## Code examples - -These code samples show you how to complete basic tasks using the Face service client library for Go: - -* [Authenticate the client](#authenticate-the-client) -* [Detect and analyze faces](#detect-and-analyze-faces) -* [Identify a face](#identify-a-face) -* [Verify faces](#verify-faces) - -## Authenticate the client - -> [!NOTE] -> This quickstart assumes you've [created environment variables](../../../cognitive-services-apis-create-account.md#configure-an-environment-variable-for-authentication) for your Face key and endpoint, named `FACE_SUBSCRIPTION_KEY` and `FACE_ENDPOINT` respectively. - -Create a **main** function and add the following code to it to instantiate a client with your endpoint and key. You create a **[CognitiveServicesAuthorizer](https://godoc.org/github.com/Azure/go-autorest/autorest#CognitiveServicesAuthorizer)** object with your key, and use it with your endpoint to create a **[Client](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client)** object. This code also instantiates a context object, which is needed for the creation of client objects. It also defines a remote location where some of the sample images in this quickstart are found. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_main_client)] - - -## Detect and analyze faces - -Face detection is required as a first step in Face Analysis and Identity Verification. This section shows how to return the extra face attribute data. If you only want to detect faces for face identification or verification, skip to the later sections. - - -Add the following code in your **main** method. This code defines a remote sample image and specifies which face features to extract from the image. It also specifies which AI model to use to extract data from the detected face(s). See [Specify a recognition model](../../Face-API-How-to-Topics/specify-recognition-model.md) for information on these options. Finally, the **[DetectWithURL](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client.DetectWithURL)** method does the face detection operation on the image and saves the results in program memory. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_detect)] - -> [!TIP] -> You can also detect faces in a local image. See the [Client](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client) methods such as **DetectWithStream**. - -### Display detected face data - -The next block of code takes the first element in the array of **[DetectedFace](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#DetectedFace)** objects and prints its attributes to the console. If you used an image with multiple faces, you should iterate through the array instead. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_detect_display)] - - - - - -## Identify a face - -The Identify operation takes an image of a person (or multiple people) and looks to find the identity of each face in the image (facial recognition search). It compares each detected face to a **PersonGroup**, a database of different **Person** objects whose facial features are known. - -### Get Person images - -To step through this scenario, you need to save the following images to the root directory of your project: https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/Face/images. - -This group of images contains three sets of single-face images that correspond to three different people. The code will define three **PersonGroup Person** objects and associate them with image files that start with `woman`, `man`, and `child`. - -### Create a PersonGroup - -Once you've downloaded your images, add the following code to the bottom of your **main** method. This code authenticates a **[PersonGroupClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupClient)** object and then uses it to define a new **PersonGroup**. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pg_setup)] - -### Create PersonGroup Persons - -The next block of code authenticates a **[PersonGroupPersonClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupPersonClient)** and uses it to define three new **PersonGroup Person** objects. These objects each represent a single person in the set of images. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pgp_setup)] - -### Assign faces to Persons - -The following code sorts the images by their prefix, detects faces, and assigns the faces to each respective **PersonGroup Person** object, based on the image file name. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pgp_assign)] - -> [!TIP] -> You can also create a **PersonGroup** from remote images referenced by URL. See the [PersonGroupPersonClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupPersonClient) methods such as **AddFaceFromURL**. - -### Train the PersonGroup - -Once you've assigned faces, you train the **PersonGroup** so it can identify the visual features associated with each of its **Person** objects. The following code calls the asynchronous **train** method and polls the result, printing the status to the console. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_pg_train)] - -> [!TIP] -> The Face API runs on a set of pre-built models that are static by nature (the model's performance will not regress or improve as the service is run). The results that the model produces might change if Microsoft updates the model's backend without migrating to an entirely new model version. To take advantage of a newer version of a model, you can retrain your **PersonGroup**, specifying the newer model as a parameter with the same enrollment images. - -### Get a test image - -The following code looks in the root of your project for an image _test-image-person-group.jpg_ and loads it into program memory. You can find this image in the same repo as the images used to create the **PersonGroup**: https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/Face/images. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id_source_get)] - -### Detect source faces in test image - -The next code block does ordinary face detection on the test image to retrieve all of the faces and save them to an array. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id_source_detect)] - -### Identify faces from source image - -The **[Identify](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#Client.Identify)** method takes the array of detected faces and compares them to the given **PersonGroup** (defined and trained in the earlier section). If it can match a detected face to a **Person** in the group, it saves the result. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id)] - -This code then prints detailed match results to the console. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_id_print)] - - -### Verify faces - -The Verify operation takes a face ID and either another face ID or a **Person** object and determines whether they belong to the same person. Verification can be used to double-check the face match returned by the Identify operation. - -The following code detects faces in two source images and then verifies each of them against a face detected from a target image. - -### Get test images - -The following code blocks declare variables that will point to the target and source images for the verification operation. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver_images)] - -### Detect faces for verification - -The following code detects faces in the source and target images and saves them to variables. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver_detect_source)] - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver_detect_target)] - -### Get verification results - -The following code compares each of the source images to the target image and prints a message indicating whether they belong to the same person. - -[!code-go[](~/cognitive-services-quickstart-code/go/Face/FaceQuickstart.go?name=snippet_ver)] - - -## Run the application - -Run your face recognition app from the application directory with the `go run ` command. - -```bash -go run sample-app.go -``` - -## Clean up resources - -If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. - -* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) -* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) - -If you created a **PersonGroup** in this quickstart and you want to delete it, call the **[Delete](https://godoc.org/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/face#PersonGroupClient.Delete)** method. - -## Next steps - -In this quickstart, you learned how to use the Face client library for Go to do basis facial recognition tasks. Next, learn about the different face detection models and how to specify the right model for your use case. - -> [!div class="nextstepaction"] -> [Specify a face detection model version](../../Face-API-How-to-Topics/specify-detection-model.md) - -* [What is the Face service?](../../overview.md) -* The source code for this sample can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/go/Face/FaceQuickstart.go). diff --git a/articles/cognitive-services/Face/includes/quickstarts/javascript-sdk.md b/articles/cognitive-services/Face/includes/quickstarts/javascript-sdk.md deleted file mode 100644 index 689e638976d67..0000000000000 --- a/articles/cognitive-services/Face/includes/quickstarts/javascript-sdk.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: "Face JavaScript client library quickstart" -description: Use the Face client library for JavaScript to detect and identify faces (facial recognition search). -services: cognitive-services -author: PatrickFarley -manager: nitinme -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: include -ms.date: 05/03/2022 -ms.author: pafarley ---- - -## Quickstart: Face client library for JavaScript - -Get started with facial recognition using the Face client library for JavaScript. Follow these steps to install the package and try out the example code for basic tasks. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. Follow these steps to install the package and try out the example code for basic face identification using remote images. - -[Reference documentation](/javascript/api/overview/azure/cognitiveservices/face) | [Library source code](https://github.com/Azure/azure-sdk-for-js/tree/master/sdk/cognitiveservices/cognitiveservices-face) | [Package (npm)](https://www.npmjs.com/package/@azure/cognitiveservices-face) | [Samples](/samples/browse/?products=azure&term=face&languages=javascript) - -## Prerequisites - -* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) -* The latest version of [Node.js](https://nodejs.org/en/) -* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] -* Once you have your Azure subscription, [Create a Face resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesFace) in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. - * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. - * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. - -## Identify faces - -1. Create a new Node.js application - - In a console window (such as cmd, PowerShell, or Bash), create a new directory for your app, and navigate to it. - - ```console - mkdir myapp && cd myapp - ``` - - Run the `npm init` command to create a node application with a `package.json` file. - - ```console - npm init - ``` - -1. Install the `ms-rest-azure` and `azure-cognitiveservices-face` NPM packages: - - ```console - npm install @azure/cognitiveservices-face @azure/ms-rest-js uuid - ``` - - Your app's `package.json` file will be updated with the dependencies. - -1. Create a file named `index.js`, open it in a text editor, and paste in the following code: - - :::code language="js" source="~/cognitive-services-quickstart-code/javascript/Face/sdk_quickstart-single.js" id="snippet_single"::: - - -1. Enter your key and endpoint into the corresponding fields. - - > [!IMPORTANT] - > Go to the Azure portal. If the Face resource you created in the **Prerequisites** section deployed successfully, click the **Go to Resource** button under **Next Steps**. You can find your key and endpoint in the resource's **key and endpoint** page, under **resource management**. - - > [!IMPORTANT] - > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. See the Cognitive Services [security](../../../cognitive-services-security.md) article for more information. - -1. Run the application with the `node` command on your quickstart file. - - ```console - node index.js - ``` - -## Output - -```console -========IDENTIFY FACES======== - -Creating a person group with ID: c08484e0-044b-4610-8b7e-c957584e5d2d -Adding faces to person group... -Create a persongroup person: Family1-Dad. -Create a persongroup person: Family1-Mom. -Create a persongroup person: Family2-Lady. -Create a persongroup person: Family1-Son. -Create a persongroup person: Family1-Daughter. -Create a persongroup person: Family2-Man. -Add face to the person group person: (Family1-Son) from image: Family1-Son2.jpg. -Add face to the person group person: (Family1-Dad) from image: Family1-Dad2.jpg. -Add face to the person group person: (Family1-Mom) from image: Family1-Mom1.jpg. -Add face to the person group person: (Family2-Man) from image: Family2-Man1.jpg. -Add face to the person group person: (Family1-Son) from image: Family1-Son1.jpg. -Add face to the person group person: (Family2-Lady) from image: Family2-Lady2.jpg. -Add face to the person group person: (Family1-Mom) from image: Family1-Mom2.jpg. -Add face to the person group person: (Family1-Dad) from image: Family1-Dad1.jpg. -Add face to the person group person: (Family2-Man) from image: Family2-Man2.jpg. -Add face to the person group person: (Family2-Lady) from image: Family2-Lady1.jpg. -Done adding faces to person group. - -Training person group: c08484e0-044b-4610-8b7e-c957584e5d2d. -Waiting 10 seconds... -Training status: succeeded. - -Person: Family1-Mom is identified for face in: identification1.jpg with ID: b7f7f542-c338-4a40-ad52-e61772bc6e14. Confidence: 0.96921. -Person: Family1-Son is identified for face in: identification1.jpg with ID: 600dc1b4-b2c4-4516-87de-edbbdd8d7632. Confidence: 0.92886. -Person: Family1-Dad is identified for face in: identification1.jpg with ID: e83b494f-9ad2-473f-9d86-3de79c01e345. Confidence: 0.96725. -``` - -## Clean up resources - -If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. - -* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) -* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) - -## Next steps - -In this quickstart, you learned how to use the Face client library for JavaScript to do basic face identification. Next, learn about the different face detection models and how to specify the right model for your use case. - -> [!div class="nextstepaction"] -> [Specify a face detection model version](../../Face-API-How-to-Topics/specify-detection-model.md) - -* [What is the Face service?](../../overview.md) -* More extensive sample code can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/javascript/Face/sdk_quickstart.js). diff --git a/articles/cognitive-services/Face/includes/quickstarts/python-sdk.md b/articles/cognitive-services/Face/includes/quickstarts/python-sdk.md deleted file mode 100644 index c15843c49ca3d..0000000000000 --- a/articles/cognitive-services/Face/includes/quickstarts/python-sdk.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: "Face Python client library quickstart" -description: Use the Face client library for Python to detect faces and identify faces (facial recognition search). -services: cognitive-services -author: PatrickFarley -manager: nitinme -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: include -ms.date: 05/03/2022 -ms.author: pafarley ---- - -Get started with facial recognition using the Face client library for Python. Follow these steps to install the package and try out the example code for basic tasks. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. Follow these steps to install the package and try out the example code for basic face identification using remote images. - -[Reference documentation](/python/api/overview/azure/cognitiveservices/face-readme) | [Library source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/cognitiveservices/azure-cognitiveservices-vision-face) | [Package (PiPy)](https://pypi.org/project/azure-cognitiveservices-vision-face/) | [Samples](/samples/browse/?products=azure&term=face) - -## Prerequisites - -* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) -* [Python 3.x](https://www.python.org/) - * Your Python installation should include [pip](https://pip.pypa.io/en/stable/). You can check if you have pip installed by running `pip --version` on the command line. Get pip by installing the latest version of Python. -* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] -* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. - * You will need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. - * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. - -## Identify faces - -1. Install the client library - - After installing Python, you can install the client library with: - - ```console - pip install --upgrade azure-cognitiveservices-vision-face - ``` - -1. Create a new Python application - - Create a new Python script—*quickstart-file.py*, for example. Then open it in your preferred editor or IDE and paste in the following code. - - [!code-python[](~/cognitive-services-quickstart-code/python/Face/FaceQuickstart-single.py?name=snippet_single)] - -1. Enter your key and endpoint into the corresponding fields. - - > [!IMPORTANT] - > Go to the Azure portal. If the Face resource you created in the **Prerequisites** section deployed successfully, click the **Go to Resource** button under **Next Steps**. You can find your key and endpoint in the resource's **key and endpoint** page, under **resource management**. - - > [!IMPORTANT] - > Remember to remove the key from your code when you're done, and never post it publicly. For production, consider using a secure way of storing and accessing your credentials. See the Cognitive Services [security](../../../cognitive-services-security.md) article for more information. - -1. Run your face recognition app from the application directory with the `python` command. - - ```console - python quickstart-file.py - ``` - - > [!TIP] - > The Face API runs on a set of pre-built models that are static by nature (the model's performance will not regress or improve as the service is run). The results that the model produces might change if Microsoft updates the model's backend without migrating to an entirely new model version. To take advantage of a newer version of a model, you can retrain your **PersonGroup**, specifying the newer model as a parameter with the same enrollment images. - -## Clean up resources - -If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. - -* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) -* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) - -To delete the **PersonGroup** you created in this quickstart, run the following code in your script: - -[!code-python[](~/cognitive-services-quickstart-code/python/Face/FaceQuickstart.py?name=snippet_deletegroup)] - -## Next steps - -In this quickstart, you learned how to use the Face client library for Python to do basic face identification. Next, learn about the different face detection models and how to specify the right model for your use case. - -> [!div class="nextstepaction"] -> [Specify a face detection model version](../../Face-API-How-to-Topics/specify-detection-model.md) - -* [What is the Face service?](../../overview.md) -* More extensive sample code can be found on [GitHub](https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/Face/FaceQuickstart.py). diff --git a/articles/cognitive-services/Face/includes/quickstarts/rest-api.md b/articles/cognitive-services/Face/includes/quickstarts/rest-api.md deleted file mode 100644 index 5b2aac8d7d5be..0000000000000 --- a/articles/cognitive-services/Face/includes/quickstarts/rest-api.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Face REST API quickstart" -description: Use the Face REST API with cURL to detect and analyze faces. -services: cognitive-services -author: PatrickFarley -manager: nitinme -ms.service: cognitive-services -ms.subservice: face-api -ms.topic: include -ms.date: 12/06/2020 -ms.author: pafarley ---- - -Get started with facial recognition using the Face REST API. The Face service provides you with access to advanced algorithms for detecting and recognizing human faces in images. - -> [!NOTE] -> This quickstart uses cURL commands to call the REST API. You can also call the REST API using a programming language. Complex scenarios like face identification are easier to implement using a language SDK. See the GitHub samples for examples in [C#](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/dotnet/Face/rest), [Python](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/python/Face/rest), [Java](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/java/Face/rest), [JavaScript](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/javascript/Face/rest), and [Go](https://github.com/Azure-Samples/cognitive-services-quickstart-code/tree/master/go/Face/rest). - -## Prerequisites - -* Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/) -* [!INCLUDE [contributor-requirement](../../../includes/quickstarts/contributor-requirement.md)] -* Once you have your Azure subscription, create a Face resource in the Azure portal to get your key and endpoint. After it deploys, click **Go to resource**. - * You'll need the key and endpoint from the resource you create to connect your application to the Face API. You'll paste your key and endpoint into the code below later in the quickstart. - * You can use the free pricing tier (`F0`) to try the service, and upgrade later to a paid tier for production. -* [PowerShell version 6.0+](/powershell/scripting/install/installing-powershell-core-on-windows), or a similar command-line application. - - -## Identify faces - -1. First, call the Detect API on the source face. This is the face that we'll try to identify from the larger group. Copy the following command to a text editor, insert your own key, and then copy it into a shell window and run it. - - :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_detect"::: - - Save the returned face ID string to a temporary location. You'll use it again at the end. - -1. Next you'll need to create a **LargePersonGroup**. This object will store the aggregated face data of several persons. Run the following command, inserting your own key. Optionally, change the group's name and metadata in the request body. - - :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_create_persongroup"::: - - Save the returned ID of the created group to a temporary location. - -1. Next, you'll create **Person** objects that belong to the group. Run the following command, inserting your own key and the ID of the **LargePersonGroup** from the previous step. This command creates a **Person** named "Family1-Dad". - - :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_create_person"::: - - After you run this command, run it again with different input data to create more **Person** objects: "Family1-Mom", "Family1-Son", "Family1-Daughter", "Family2-Lady", and "Family2-Man". - - Save the IDs of each **Person** created; it's important to keep track of which person name has which ID. - -1. Next you'll need to detect new faces and associate them with the **Person** objects that exist. The following command detects a face from the image *Family1-Dad.jpg* and adds it to the corresponding person. You need to specify the `personId` as the ID that was returned when you created the "Family1-Dad" **Person** object. The image name corresponds to the name of the created **Person**. Also enter the **LargePersonGroup** ID and your key in the appropriate fields. - - :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_add_face"::: - - Then, run the above command again with a different source image and target **Person**. The images available are: *Family1-Dad1.jpg*, *Family1-Dad2.jpg* *Family1-Mom1.jpg*, *Family1-Mom2.jpg*, *Family1-Son1.jpg*, *Family1-Son2.jpg*, *Family1-Daughter1.jpg*, *Family1-Daughter2.jpg*, *Family2-Lady1.jpg*, *Family2-Lady2.jpg*, *Family2-Man1.jpg*, and *Family2-Man2.jpg*. Be sure that the **Person** whose ID you specify in the API call matches the name of the image file in the request body. - - At the end of this step, you should have multiple **Person** objects that each have one or more corresponding faces, detected directly from the provided images. - -1. Next, train the **LargePersonGroup** with the current face data. The training operation teaches the model how to associate facial features, sometimes aggregated from multiple source images, to each single person. Insert the **LargePersonGroup** ID and your key before running the command. - - :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_train"::: - -1. Now you're ready to call the Identify API, using the source face ID from the first step and the **LargePersonGroup** ID. Insert these values into the appropriate fields in the request body, and insert your key. - - :::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_identify"::: - - The response should give you a **Person** ID indicating the person identified with the source face. It should be the ID that corresponds to the "Family1-Dad" person, because the source face is of that person. - -## Clean up resources - -To delete the **LargePersonGroup** you created in this exercise, run the LargePersonGroup - Delete call. - -:::code source="~/cognitive-services-quickstart-code/curl/face/detect.sh" ID="identify_delete"::: - -If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources associated with it. - -* [Portal](../../../cognitive-services-apis-create-account.md#clean-up-resources) -* [Azure CLI](../../../cognitive-services-apis-create-account-cli.md#clean-up-resources) - -## Next steps - -In this quickstart, you learned how to use the Face REST API to do basic facial recognition tasks. Next, learn about the different face detection models and how to specify the right model for your use case. - -> [!div class="nextstepaction"] -> [Specify a face detection model version](../../Face-API-How-to-Topics/specify-detection-model.md) - -* [What is the Face service?](../../overview.md) \ No newline at end of file diff --git a/articles/cognitive-services/Face/index.yml b/articles/cognitive-services/Face/index.yml deleted file mode 100644 index f8854276a71c0..0000000000000 --- a/articles/cognitive-services/Face/index.yml +++ /dev/null @@ -1,105 +0,0 @@ -### YamlMime:Landing - -title: Face documentation # < 60 characters -summary: "The Azure Face service provides AI algorithms that detect, recognize, and analyze human faces in images. Facial recognition software is important in many different scenarios, such as identity verification, touchless access control, and face blurring for privacy. \n\nOn June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or features included in Azure Services, such as Face or Azure Video Analyzer for Media, if a customer is, or is allowing use of such services by or for, a police department in the United States." -metadata: - title: Face documentation - Quickstarts, tutorials, API reference - Azure Cognitive Services | Microsoft Docs - description: The cloud-based Azure Face service provides developers with access to advanced face algorithms. Face algorithms enable face attribute detection and face recognition for comparison and identification. Learn how to analyze content in different ways with quickstarts, tutorials, and samples. - services: cognitive-services - ms.service: cognitive-services - ms.subservice: face-api - ms.topic: landing-page - author: PatrickFarley - ms.author: pafarley - ms.date: 10/27/2021 - - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - # Card - - title: About the Face service - linkLists: - - linkListType: overview - links: - - text: What is the Face service? - url: Overview.md - - linkListType: whats-new - links: - - text: What's new in Face service? - url: releasenotes.md - - - title: Face recognition - linkLists: - - linkListType: concept - links: - - text: Identity verification - url: concepts/face-recognition.md - - linkListType: quickstart - links: - - text: Identify faces using a client library SDK or REST API - url: ./quickstarts/client-libraries.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp - - linkListType: how-to-guide - links: - - text: Add faces to a group - url: Face-API-How-to-Topics/how-to-add-faces.md - - text: Use the large-scale feature - url: Face-API-How-to-Topics/how-to-use-large-scale.md - - text: Use the PersonDirectory structure - url: Face-API-How-to-Topics/use-persondirectory.md - - text: Specify a face recognition model version - url: Face-API-How-to-Topics/specify-recognition-model.md - - - title: Face detection and analysis - linkLists: - - linkListType: concept - links: - - text: Face detection and attributes - url: concepts/face-detection.md - - linkListType: quickstart - links: - - text: Detect faces using a client library SDK or REST API - url: quickstarts/client-libraries.md?pivots=programming-language-csharp - - linkListType: how-to-guide - links: - - text: Detect and analyze faces - url: Face-API-How-to-Topics/howtodetectfacesinimage.md - - text: Specify a face detection model version - url: Face-API-How-to-Topics/specify-detection-model.md - - - title: Face rectangles - linkLists: - - linkListType: how-to-guide - links: - - text: Detect and analyze faces - url: Face-API-How-to-Topics/howtodetectfacesinimage.md - - text: Use the HeadPose attribute - url: Face-API-How-to-Topics/how-to-use-headpose.md - - - title: Reference - linkLists: - - linkListType: reference - links: - - text: REST API - url: /rest/api/face/ - - text: .NET SDK - url: /dotnet/api/overview/azure/cognitiveservices/face-readme - - text: Python SDK - url: /python/api/overview/azure/cognitiveservices/face-readme - - text: Java SDK - url: /java/api/overview/azure/cognitiveservices/client/faceapi - - text: Node.js SDK - url: /javascript/api/overview/azure/cognitiveservices/face - - text: Azure PowerShell - url: /powershell/module/az.cognitiveservices/#cognitive_services - - text: Azure Command-Line Interface (CLI) - url: /cli/azure/cognitiveservices - - - title: Help and feedback - linkLists: - - linkListType: reference - links: - - text: Support and help options - url: ../cognitive-services-support-options.md?context=/azure/cognitive-services/face/context/context diff --git a/articles/cognitive-services/Face/media/index/logo_Csharp.svg b/articles/cognitive-services/Face/media/index/logo_Csharp.svg deleted file mode 100644 index 8e43620336283..0000000000000 --- a/articles/cognitive-services/Face/media/index/logo_Csharp.svg +++ /dev/null @@ -1 +0,0 @@ -logo_Csharp \ No newline at end of file diff --git a/articles/cognitive-services/Face/media/index/logo_go.svg b/articles/cognitive-services/Face/media/index/logo_go.svg deleted file mode 100644 index 55a5071714970..0000000000000 --- a/articles/cognitive-services/Face/media/index/logo_go.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/articles/cognitive-services/Face/media/index/logo_java.svg b/articles/cognitive-services/Face/media/index/logo_java.svg deleted file mode 100644 index 4d87bef869dbb..0000000000000 --- a/articles/cognitive-services/Face/media/index/logo_java.svg +++ /dev/null @@ -1 +0,0 @@ -logo_java \ No newline at end of file diff --git a/articles/cognitive-services/Face/media/index/logo_nodejs.svg b/articles/cognitive-services/Face/media/index/logo_nodejs.svg deleted file mode 100644 index 2deab0dd1a47a..0000000000000 --- a/articles/cognitive-services/Face/media/index/logo_nodejs.svg +++ /dev/null @@ -1 +0,0 @@ -logo_nodejs \ No newline at end of file diff --git a/articles/cognitive-services/Face/media/index/logo_python.svg b/articles/cognitive-services/Face/media/index/logo_python.svg deleted file mode 100644 index 7292d8814c42b..0000000000000 --- a/articles/cognitive-services/Face/media/index/logo_python.svg +++ /dev/null @@ -1 +0,0 @@ -logo_python \ No newline at end of file diff --git a/articles/cognitive-services/Face/toc.yml b/articles/cognitive-services/Face/toc.yml deleted file mode 100644 index b23b4a7724972..0000000000000 --- a/articles/cognitive-services/Face/toc.yml +++ /dev/null @@ -1,88 +0,0 @@ -items: -- name: Face Documentation - href: index.yml -- name: Overview - expanded: true - items: - - name: What is the Face service? - href: Overview.md - - name: Pricing - href: https://azure.microsoft.com/pricing/details/cognitive-services/face-api - - name: What's new? - href: releasenotes.md -- name: Quickstart - href: Quickstarts/client-libraries.md -- name: Samples - href: /samples/browse/?products=azure&term=face -- name: How-to guides - items: - - name: Detect and analyze faces - href: Face-API-How-to-Topics/HowtoDetectFacesinImage.md - - name: Find similar faces - href: Face-API-How-to-Topics/find-similar-faces.md - - name: Specify a face detection model version - href: Face-API-How-to-Topics/specify-detection-model.md - - name: Specify a face recognition model version - href: Face-API-How-to-Topics/specify-recognition-model.md - - name: Add faces to a group - href: Face-API-How-to-Topics/how-to-add-faces.md - - name: Use the large-scale feature - href: Face-API-How-to-Topics/how-to-use-large-scale.md - - name: Use the PersonDirectory structure - href: Face-API-How-to-Topics/use-persondirectory.md - - name: Use the HeadPose attribute - href: Face-API-How-to-Topics/how-to-use-headpose.md - - name: Mitigate latency when using the Face service - href: Face-API-How-to-Topics/how-to-mitigate-latency.md - - name: Analyze videos in real time - href: Face-API-How-to-Topics/HowtoAnalyzeVideo_Face.md - - name: Migrate face data - href: Face-API-How-to-Topics/how-to-migrate-face-data.md - - name: Enterprise readiness - items: - - name: Set up Virtual Networks - href: ../cognitive-services-virtual-networks.md?context=/azure/cognitive-services/face/context/context - - name: Use customer-managed keys - href: encrypt-data-at-rest.md - - name: Use Azure AD authentication - href: ../authentication.md?context=/azure/cognitive-services/face/context/context -- name: Concepts - items: - - name: Face detection and analysis - href: concepts/face-detection.md - - name: Face recognition - href: concepts/face-recognition.md -- name: Tutorials - items: - - name: Add users to a Face service - items: - - name: Best practices for enrolling users - href: enrollment-overview.md - - name: Build a React app to enroll users - href: build-enrollment-app.md -- name: Reference - items: - - name: Face REST API - href: APIReference.md - - name: .NET - href: /dotnet/api/overview/azure/cognitiveservices/face-readme - - name: Java - href: /java/api/overview/azure/cognitiveservices/client/faceapi - - name: Node.js - href: /javascript/api/overview/azure/cognitiveservices/face - - name: Python - href: /python/api/overview/azure/cognitiveservices/face-readme -- name: Resources - items: - - name: Enterprise readiness - items: - - name: Region support - href: https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services - - name: Compliance and certification - href: https://azure.microsoft.com/support/legal/cognitive-services-compliance-and-privacy/ - - name: Learn modules - href: /learn/modules/detect-analyze-faces/ - - name: Support and help options - href: ../cognitive-services-support-options.md?context=/azure/cognitive-services/face/context/context - - name: Azure updates - href: https://azure.microsoft.com/updates/?product=cognitive-services diff --git a/articles/cognitive-services/LUIS/concepts/entities.md b/articles/cognitive-services/LUIS/concepts/entities.md index 05faa5a566628..350f5eb3be877 100644 --- a/articles/cognitive-services/LUIS/concepts/entities.md +++ b/articles/cognitive-services/LUIS/concepts/entities.md @@ -133,7 +133,7 @@ You can use entities as a signal for an intent. For example, the presence of a c | Example utterance | Entity | Intent | |--|--|--| -| Book me a _fight to New York_. | City | Book Flight | +| Book me a _flight to New York_. | City | Book Flight | | Book me the _main conference room_. | Room | Reserve Room | ## Entities as Feature for entities diff --git a/articles/cognitive-services/QnAMaker/Overview/language-support.md b/articles/cognitive-services/QnAMaker/Overview/language-support.md index 40cf417503c28..ea44e071f6f78 100644 --- a/articles/cognitive-services/QnAMaker/Overview/language-support.md +++ b/articles/cognitive-services/QnAMaker/Overview/language-support.md @@ -32,7 +32,7 @@ Consider the following: ## Supporting multiple languages in one QnA Maker resource -This functionality is not supported in our current Generally Available (GA) stable release. Check out [question answering](https://docs.microsoft.com/azure/cognitive-services/language-service/question-answering/overview) to test out this functionality. +This functionality is not supported in our current Generally Available (GA) stable release. Check out [question answering](../../language-service/question-answering/overview.md) to test out this functionality. ## Supporting multiple languages in one knowledge base @@ -129,4 +129,4 @@ This additional ranking is an internal working of the QnA Maker's ranker. ## Next steps > [!div class="nextstepaction"] -> [Language selection](../index.yml) +> [Language selection](../index.yml) \ No newline at end of file diff --git a/articles/cognitive-services/QnAMaker/limits.md b/articles/cognitive-services/QnAMaker/limits.md index 70056781437ad..a12b2898158b8 100644 --- a/articles/cognitive-services/QnAMaker/limits.md +++ b/articles/cognitive-services/QnAMaker/limits.md @@ -124,8 +124,8 @@ These represent the limits when Prebuilt API is used to *Generate response* or c > Support for unstructured file/content and is available only in question answering. ## Alterations limits -[Alterations](https://docs.microsoft.com/rest/api/cognitiveservices/qnamaker/alterations/replace) do not allow these special characters: ',', '?', ':', ';', '\"', '\'', '(', ')', '{', '}', '[', ']', '-', '+', '.', '/', '!', '*', '-', '_', '@', '#' +[Alterations](/rest/api/cognitiveservices/qnamaker/alterations/replace) do not allow these special characters: ',', '?', ':', ';', '\"', '\'', '(', ')', '{', '}', '[', ']', '-', '+', '.', '/', '!', '*', '-', '_', '@', '#' ## Next steps -Learn when and how to change [service pricing tiers](How-To/set-up-qnamaker-service-azure.md#upgrade-qna-maker-sku). +Learn when and how to change [service pricing tiers](How-To/set-up-qnamaker-service-azure.md#upgrade-qna-maker-sku). \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/captioning-concepts.md b/articles/cognitive-services/Speech-Service/captioning-concepts.md index d1df98893dc6b..19dc671fc8603 100644 --- a/articles/cognitive-services/Speech-Service/captioning-concepts.md +++ b/articles/cognitive-services/Speech-Service/captioning-concepts.md @@ -8,14 +8,16 @@ manager: nitinme ms.service: cognitive-services ms.subservice: speech-service ms.topic: conceptual -ms.date: 04/12/2022 +ms.date: 06/02/2022 ms.author: eur zone_pivot_groups: programming-languages-speech-sdk-cli --- # Captioning with speech to text -In this guide, you learn how to create captions with speech to text. Concepts include how to synchronize captions with your input audio, apply profanity filters, get partial results, apply customizations, and identify spoken languages for multilingual scenarios. This guide covers captioning for speech, but doesn't include speaker ID or sound effects such as bells ringing. +In this guide, you learn how to create captions with speech to text. Captioning is the process of converting the audio content of a television broadcast, webcast, film, video, live event, or other production into text, and then displaying the text on a screen, monitor, or other visual display system. + +Concepts include how to synchronize captions with your input audio, apply profanity filters, get partial results, apply customizations, and identify spoken languages for multilingual scenarios. This guide covers captioning for speech, but doesn't include speaker ID or sound effects such as bells ringing. Here are some common captioning scenarios: - Online courses and instructional videos @@ -30,7 +32,7 @@ The following are aspects to consider when using captioning: * Consider output formats such as SRT (SubRip Text) and WebVTT (Web Video Text Tracks). These can be loaded onto most video players such as VLC, automatically adding the captions on to your video. > [!TIP] -> Try the [Azure Video Indexer](/azure/azure-video-indexer/video-indexer-overview) as a demonstration of how you can get captions for videos that you upload. +> Try the [Azure Video Indexer](../../azure-video-indexer/video-indexer-overview.md) as a demonstration of how you can get captions for videos that you upload. Captioning can accompany real time or pre-recorded speech. Whether you're showing captions in real time or with a recording, you can use the [Speech SDK](speech-sdk.md) or [Speech CLI](spx-overview.md) to recognize speech and get transcriptions. You can also use the [Batch transcription API](batch-transcription.md) for pre-recorded video. diff --git a/articles/cognitive-services/Speech-Service/custom-neural-voice.md b/articles/cognitive-services/Speech-Service/custom-neural-voice.md index f0f8fbbb79ca7..b7527d4b31e7d 100644 --- a/articles/cognitive-services/Speech-Service/custom-neural-voice.md +++ b/articles/cognitive-services/Speech-Service/custom-neural-voice.md @@ -33,7 +33,7 @@ Next, the phoneme sequence goes into the neural acoustic model to predict acoust Neural text-to-speech voice models are trained by using deep neural networks based on the recording samples of human voices. For more information, see [this Microsoft blog post](https://techcommunity.microsoft.com/t5/azure-ai/neural-text-to-speech-extends-support-to-15-more-languages-with/ba-p/1505911). To learn more about how a neural vocoder is trained, see [this Microsoft blog post](https://techcommunity.microsoft.com/t5/azure-ai/azure-neural-tts-upgraded-with-hifinet-achieving-higher-audio/ba-p/1847860). -You can adapt the neural text-to-speech engine to fit your needs. To create a custom neural voice, use [Speech Studio](https://speech.microsoft.com/customvoice) to upload the recorded audio and corresponding scripts, train the model, and deploy the voice to a custom endpoint. Custom Neural Voice can use text provided by the user to convert text into speech in real time, or generate audio content offline with text input. You can do this by using the [REST API](./rest-text-to-speech.md), the [Speech SDK](./get-started-text-to-speech.md), or the [web portal](https://speech.microsoft.com/audiocontentcreation). +You can adapt the neural text-to-speech engine to fit your needs. To create a custom neural voice, use [Speech Studio](https://aka.ms/speechstudio/customvoice) to upload the recorded audio and corresponding scripts, train the model, and deploy the voice to a custom endpoint. Custom Neural Voice can use text provided by the user to convert text into speech in real time, or generate audio content offline with text input. You can do this by using the [REST API](./rest-text-to-speech.md), the [Speech SDK](./get-started-text-to-speech.md), or the [web portal](https://speech.microsoft.com/audiocontentcreation). ## Custom Neural Voice project types diff --git a/articles/cognitive-services/Speech-Service/custom-speech-overview.md b/articles/cognitive-services/Speech-Service/custom-speech-overview.md index e55e35d7a3dc4..9dbcbd868f516 100644 --- a/articles/cognitive-services/Speech-Service/custom-speech-overview.md +++ b/articles/cognitive-services/Speech-Service/custom-speech-overview.md @@ -17,11 +17,9 @@ ms.custom: contperf-fy21q2, references_regions With Custom Speech, you can evaluate and improve the Microsoft speech-to-text accuracy for your applications and products. -Out of the box, speech to text utilizes a Universal Language Model as a base model that is trained with Microsoft-owned data and reflects commonly used spoken language. This base model is pre-trained with dialects and phonetics representing a variety of common domains. As a result, consuming the base model requires no additional configuration and works very well in most scenarios. When you make a speech recognition request, the current base model for each [supported language](language-support.md) is used by default. +Out of the box, speech to text utilizes a Universal Language Model as a base model that is trained with Microsoft-owned data and reflects commonly used spoken language. The base model is pre-trained with dialects and phonetics representing a variety of common domains. When you make a speech recognition request, the most recent base model for each [supported language](language-support.md) is used by default. The base model works very well in most speech recognition scenarios. -A custom model can be used to augment the base model to improve recognition of domain-specific vocabulary specific to the application by providing text data to train the model. It can also be used to improve recognition based for the specific audio conditions of the application by providing audio data with reference transcriptions - -For more information, see [Choose a model for Custom Speech](how-to-custom-speech-choose-model.md). +A custom model can be used to augment the base model to improve recognition of domain-specific vocabulary specific to the application by providing text data to train the model. It can also be used to improve recognition based for the specific audio conditions of the application by providing audio data with reference transcriptions. ## How does it work? @@ -31,17 +29,17 @@ With Custom Speech, you can upload your own data, test and train a custom model, Here's more information about the sequence of steps shown in the previous diagram: -1. [Choose a model](how-to-custom-speech-choose-model.md) and create a Custom Speech project. Use a Speech resource that you create in the Azure portal. +1. [Create a project](how-to-custom-speech-create-project.md) and choose a model. Use a Speech resource that you create in the Azure portal. 1. [Upload test data](./how-to-custom-speech-upload-data.md). Upload test data to evaluate the Microsoft speech-to-text offering for your applications, tools, and products. -1. [Test recognition quality](how-to-custom-speech-inspect-data.md). Use the [Speech Studio](https://speech.microsoft.com/customspeech) to play back uploaded audio and inspect the speech recognition quality of your test data. +1. [Test recognition quality](how-to-custom-speech-inspect-data.md). Use the [Speech Studio](https://aka.ms/speechstudio/customspeech) to play back uploaded audio and inspect the speech recognition quality of your test data. 1. [Test model quantitatively](how-to-custom-speech-evaluate-data.md). Evaluate and improve the accuracy of the speech-to-text model. The Speech service provides a quantitative word error rate (WER), which you can use to determine if additional training is required. 1. [Train a model](how-to-custom-speech-train-model.md). Provide written transcripts and related text, along with the corresponding audio data. Testing a model before and after training is optional but recommended. -1. [Deploy a model](how-to-custom-speech-deploy-model.md). Once you're satisfied with the test results, deploy the model to a custom endpoint. +1. [Deploy a model](how-to-custom-speech-deploy-model.md). Once you're satisfied with the test results, deploy the model to a custom endpoint. With the exception of [batch transcription](batch-transcription.md), you must deploy a custom endpoint to use a Custom Speech model. -If you will train a custom model with audio data, choose a Speech resource [region](regions.md#speech-to-text-pronunciation-assessment-text-to-speech-and-translation) with dedicated hardware for training audio data. In regions with dedicated hardware for Custom Speech training, the Speech service will use up to 20 hours of your audio training data, and can process about 10 hours of data per day. In other regions, the Speech service uses up to 8 hours of your audio data, and can process about 1 hour of data per day. After a model is trained, you can copy it to a Speech resource that's in another region as needed for deployment. +If you will train a custom model with audio data, choose a Speech resource [region](regions.md#speech-to-text-pronunciation-assessment-text-to-speech-and-translation) with dedicated hardware for training audio data. In regions with dedicated hardware for Custom Speech training, the Speech service will use up to 20 hours of your audio training data, and can process about 10 hours of data per day. In other regions, the Speech service uses up to 8 hours of your audio data, and can process about 1 hour of data per day. After a model is trained, you can copy it to a Speech resource in another region as needed. ## Next steps -* [Choose a model](how-to-custom-speech-choose-model.md) +* [Create a project](how-to-custom-speech-create-project.md) * [Upload test data](./how-to-custom-speech-upload-data.md) * [Train a model](how-to-custom-speech-train-model.md) diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-commands-integrate-remote-skills.md b/articles/cognitive-services/Speech-Service/how-to-custom-commands-integrate-remote-skills.md deleted file mode 100644 index 24a636f711c94..0000000000000 --- a/articles/cognitive-services/Speech-Service/how-to-custom-commands-integrate-remote-skills.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: 'How To: Export Custom Commands application as a remote skill - Speech service' -titleSuffix: Azure Cognitive Services -description: In this article, you learn how to export Custom Commands application as a skill -services: cognitive-services -author: eric-urban -manager: nitinme -ms.service: cognitive-services -ms.subservice: speech-service -ms.topic: how-to -ms.date: 09/30/2020 -ms.author: eur -ms.custom: cogserv-non-critical-speech ---- - -# Export Custom Commands application as a remote skill - -In this article, you will learn how to export a Custom Commands application as a remote skill. - -## Prerequisites -> [!div class="checklist"] -> * [Understanding of Bot Framework Skill](/azure/bot-service/skills-conceptual) -> * [Understanding of Skill Manifest](https://aka.ms/speech/cc-skill-manifest) -> * [How to invoke a skill from a Bot Framework Bot](/azure/bot-service/skills-about-skill-consumers) -> * An exisiting Custom Commands application. In case you don't have any Custom Commands application, try out with - [Quickstart: Create a voice assistant using Custom Commands](quickstart-custom-commands-application.md) - -## Custom Commands as remote skills -* Bot Framework Skills are re-usable conversational skill building-blocks covering conversational use-cases enabling you to add extensive functionality to a Bot within minutes. To read more on this, go to [Bot Framework Skill](https://microsoft.github.io/botframework-solutions/overview/skills/). -* A Custom Commands application can be exported as a skill. This skill can then be invoked over the remote skills protocol from a Bot Framework bot. - -## Configure an application to be exposed as a remote skill - -### Application level settings -1. In the left panel, select **Settings** > **Remote skills**. -1. Set **Remote skills enabled** toggle to on. - -### Authentication to skills -1. If you want to enable authentication, add Microsoft Application Ids of the Bot Framework Bots you want to configure to call the custom commands application. - > [!div class="mx-imgBorder"] - > ![Add a MSA id to skill](media/custom-commands/skill-add-msa-id.png) - -1. If you have at least one entry added to the list, authentication will be enabled on the application, and only the allowed bots will be able to call the application. -> [!TIP] -> To disable authentication, delete all the Microsoft Application Ids from the allowed list. - - ### Enable/disable commands to be exposed as skills - -You have the option to choose which commands you want to export over Remote Skills. - -1. To expose a command over skills, select **Enable a new command** under the **Enable commands for skills**. -1. From the dropdown, select the command you intend to add. -1. Select **Save**. - -### Configure triggering utterances for commands -Custom Commands uses the example sentences which are configured for the commands in order to generate the skills triggering utterances. These **triggering utterances** will be used to generate the **dispatcher** section [**skill manifest**](https://microsoft.github.io/botframework-solutions/skills/handbook/manifest/). - -As an author, you might want to control which of your **example sentences** are used to generate the triggering utterances for skills. -1. By default, all the **Triggering examples** from a command will be included the manifest file. -1. If you want to explicitly eliminate any one example, select **Edit** icon on the command from **Enabled commands for skills** section. - > [!div class="mx-imgBorder"] - > ![Edit an enabled command for skill](media/custom-commands/skill-edit-enabled-command.png) - -1. Next, on the example sentences you want to omit, **right click** > **Disable Example Sentence**. - > [!div class="mx-imgBorder"] - > ![Disable examples](media/custom-commands/skill-disable-example-sentences.png) - -1. Select **Save**. -1. You will notice that you can't add a new example in this window. If there's a need to do so, proceed to the exit out of the settings section and select the relevant command from **Commands** accordion. At this point, you can add the new entry in the **Example sentences** section. This change will be automatically reflected in the remote skills settings value for the command. - -> [!IMPORTANT] -> In case your existing example sentences have references to **String > Catalog** data-type, those sentences will be automatically omitted from the skills triggering utterances list. - -## Download skill manifest -1. After, you have **published** your application, you can download the skill manifest file. -1. Use the skill manifest to configure your Bot Framework consumer bot to call in to the Custom Commands skill. -> [!IMPORTANT] -> You must **publish** your Custom Commands application in order to download the skill manifest.
                  -> Additionally, if you made **any changes** to the application, you need to publish the application again for the latest changes to be reflected in the manifest file. - -> [!NOTE] -> If you face any issues with publishing the application and the error directs to skills triggering utterances, please re-check the configuration for **Enabled commands for skills**. Each of the exposed commands must have at least one valid triggering utterance. - - -## Next steps - -> [!div class="nextstepaction"] -> [Update a command from the client](./how-to-custom-commands-update-command-from-client.md) diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-choose-model.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-choose-model.md deleted file mode 100644 index 16061c3b979f9..0000000000000 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-choose-model.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Choose a model for Custom Speech - Speech service -titleSuffix: Azure Cognitive Services -description: Learn about how to choose a model for Custom Speech. -services: cognitive-services -author: eric-urban -manager: nitinme -ms.service: cognitive-services -ms.subservice: speech-service -ms.topic: how-to -ms.date: 05/08/2022 -ms.author: eur -ms.custom: contperf-fy21q2, references_regions ---- - -# Choose a model for Custom Speech - -Custom Speech models are created by customizing a base model with data from your particular customer scenario. Once you create a custom model, the speech recognition accuracy and quality will remain consistent, even when a new base model is released. - -Base models are updated periodically to improve accuracy and quality. We recommend that if you use base models, use the latest default base models. But with Custom Speech you can take a snapshot of a particular base model without training it. In this case, "custom" means that speech recognition is pinned to a base model from a particular point in time. - -New base models are released periodically to improve accuracy and quality. We recommend that you chose the latest base model when creating your custom model. If a required customization capability is only available with an older model, then you can choose an older base model. - -> [!NOTE] -> The name of the base model corresponds to the date when it was released in YYYYMMDD format. The customization capabilities of the base model are listed in parenthesis after the model name in Speech Studio - -A model deployed to an endpoint using Custom Speech is fixed until you decide to update it. You can also choose to deploy a base model without training, which means that base model is fixed. This allows you to lock in the behavior of a specific model until you decide to use a newer model. - -Whether you train your own model or use a snapshot of a base model, you can use the model for a limited time. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). - -## Choose your model - -There are a few approaches to using speech-to-text models: -- The base model provides accurate speech recognition out of the box for a range of [scenarios](overview.md#speech-scenarios). -- A custom model augments the base model to include domain-specific vocabulary shared across all areas of the custom domain. -- Multiple custom models can be used when the custom domain has multiple areas, each with a specific vocabulary. - -One recommended way to see if the base model will suffice is to analyze the transcription produced from the base model and compare it with a human-generated transcript for the same audio. You can use the Speech Studio, Speech CLI, or REST API to compare the transcripts and obtain a [word error rate (WER)](how-to-custom-speech-evaluate-data.md#evaluate-word-error-rate) score. If there are multiple incorrect word substitutions when evaluating the results, then training a custom model to recognize those words is recommended. - -Multiple models are recommended if the vocabulary varies across the domain areas. For instance, Olympic commentators report on various events, each associated with its own vernacular. Because each Olympic event vocabulary differs significantly from others, building a custom model specific to an event increases accuracy by limiting the utterance data relative to that particular event. As a result, the model doesn’t need to sift through unrelated data to make a match. Regardless, training still requires a decent variety of training data. Include audio from various commentators who have different accents, gender, age, etcetera. - -## Create a Custom Speech project - -Custom Speech projects contain models, training and testing datasets, and deployment endpoints. Each project is specific to a country or language. For example, you might create a project for English in the United States. - -1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). -1. Select the subscription and Speech resource to work with. -1. Select **Custom speech** > **Create a new project**. -1. Follow the instructions provided by the wizard to create your project. - -Select the new project by name or select **Go to project**. You will see these menu items in the left panel: **Speech datasets**, **Train custom models**, **Test models**, and **Deploy models**. - -If you want to use a base model right away, you can skip the training and testing steps. See [Deploy a Custom Speech model](how-to-custom-speech-deploy-model.md) to start using a base or custom model. - -## Next steps - -* [Training and testing datasets](./how-to-custom-speech-test-and-train.md) -* [Test model quantitatively](how-to-custom-speech-evaluate-data.md) -* [Train a model](how-to-custom-speech-train-model.md) diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-create-project.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-create-project.md new file mode 100644 index 0000000000000..060297c740d20 --- /dev/null +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-create-project.md @@ -0,0 +1,152 @@ +--- +title: Create a Custom Speech project - Speech service +titleSuffix: Azure Cognitive Services +description: Learn about how to create a project for Custom Speech. +services: cognitive-services +author: eric-urban +manager: nitinme +ms.service: cognitive-services +ms.subservice: speech-service +ms.topic: how-to +ms.date: 05/22/2022 +ms.author: eur +zone_pivot_groups: speech-studio-cli-rest +--- + +# Create a Custom Speech project + +Custom Speech projects contain models, training and testing datasets, and deployment endpoints. Each project is specific to a [locale](language-support.md). For example, you might create a project for English in the United States. + +## Create a project + +::: zone pivot="speech-studio" + +To create a Custom Speech project, follow these steps: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select the subscription and Speech resource to work with. +1. Select **Custom speech** > **Create a new project**. +1. Follow the instructions provided by the wizard to create your project. + +Select the new project by name or select **Go to project**. You will see these menu items in the left panel: **Speech datasets**, **Train custom models**, **Test models**, and **Deploy models**. + +::: zone-end + +::: zone pivot="speech-cli" + +To create a project, use the `spx csr project create` command. Construct the request parameters according to the following instructions: + +- Set the required `language` parameter. The locale of the project and the contained datasets should be the same. The locale can't be changed later. The Speech CLI `language` parameter corresponds to the `locale` property in the JSON request and response. +- Set the required `name` parameter. This is the name that will be displayed in the Speech Studio. The Speech CLI `name` parameter corresponds to the `displayName` property in the JSON request and response. + +Here's an example Speech CLI command that creates a project: + +```azurecli-interactive +spx csr project create --name "My Project" --description "My Project Description" --language "en-US" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed", + "links": { + "evaluations": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/evaluations", + "datasets": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/datasets", + "models": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/models", + "endpoints": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/endpoints", + "transcriptions": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/transcriptions" + }, + "properties": { + "datasetCount": 0, + "evaluationCount": 0, + "modelCount": 0, + "transcriptionCount": 0, + "endpointCount": 0 + }, + "createdDateTime": "2022-05-17T22:15:18Z", + "locale": "en-US", + "displayName": "My Project", + "description": "My Project Description" +} +``` + +The top-level `self` property in the response body is the project's URI. Use this URI to get details about the project's evaluations, datasets, models, endpoints, and transcriptions. You also use this URI to update or delete a project. + +For Speech CLI help with projects, run the following command: + +```azurecli-interactive +spx help csr project +``` + +::: zone-end + +::: zone pivot="rest-api" + +To create a project, use the [CreateProject](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateProject) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the required `locale` property. This should be the locale of the contained datasets. The locale can't be changed later. +- Set the required `displayName` property. This is the project name that will be displayed in the Speech Studio. + +Make an HTTP POST request using the URI as shown in the following [CreateProject](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateProject) example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "displayName": "My Project", + "description": "My Project Description", + "locale": "en-US" +} ' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/projects" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed", + "links": { + "evaluations": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/evaluations", + "datasets": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/datasets", + "models": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/models", + "endpoints": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/endpoints", + "transcriptions": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/1cdfa276-0f9d-425b-a942-5f2be93017ed/transcriptions" + }, + "properties": { + "datasetCount": 0, + "evaluationCount": 0, + "modelCount": 0, + "transcriptionCount": 0, + "endpointCount": 0 + }, + "createdDateTime": "2022-05-17T22:15:18Z", + "locale": "en-US", + "displayName": "My Project", + "description": "My Project Description" +} +``` + +The top-level `self` property in the response body is the project's URI. Use this URI to [get](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProject) details about the project's evaluations, datasets, models, endpoints, and transcriptions. You also use this URI to [update](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateProject) or [delete](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/DeleteProject) a project. + +::: zone-end + +## Choose your model + +There are a few approaches to using Custom Speech models: +- The base model provides accurate speech recognition out of the box for a range of [scenarios](overview.md#speech-scenarios). Base models are updated periodically to improve accuracy and quality. We recommend that if you use base models, use the latest default base models. If a required customization capability is only available with an older model, then you can choose an older base model. +- A custom model augments the base model to include domain-specific vocabulary shared across all areas of the custom domain. +- Multiple custom models can be used when the custom domain has multiple areas, each with a specific vocabulary. + +One recommended way to see if the base model will suffice is to analyze the transcription produced from the base model and compare it with a human-generated transcript for the same audio. You can compare the transcripts and obtain a [word error rate (WER)](how-to-custom-speech-evaluate-data.md#evaluate-word-error-rate) score. If the WER score is high, training a custom model to recognize the incorrectly identified words is recommended. + +Multiple models are recommended if the vocabulary varies across the domain areas. For instance, Olympic commentators report on various events, each associated with its own vernacular. Because each Olympic event vocabulary differs significantly from others, building a custom model specific to an event increases accuracy by limiting the utterance data relative to that particular event. As a result, the model doesn't need to sift through unrelated data to make a match. Regardless, training still requires a decent variety of training data. Include audio from various commentators who have different accents, gender, age, etcetera. + +## Model stability and lifecycle + +A base model or custom model deployed to an endpoint using Custom Speech is fixed until you decide to update it. The speech recognition accuracy and quality will remain consistent, even when a new base model is released. This allows you to lock in the behavior of a specific model until you decide to use a newer model. + +Whether you train your own model or use a snapshot of a base model, you can use the model for a limited time. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). + +## Next steps + +* [Training and testing datasets](./how-to-custom-speech-test-and-train.md) +* [Test model quantitatively](how-to-custom-speech-evaluate-data.md) +* [Train a model](how-to-custom-speech-train-model.md) diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-deploy-model.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-deploy-model.md index 2efe9ecf94083..aee61cc7189a0 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-deploy-model.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-deploy-model.md @@ -10,20 +10,25 @@ ms.subservice: speech-service ms.topic: how-to ms.date: 05/08/2022 ms.author: eur +zone_pivot_groups: speech-studio-cli-rest --- # Deploy a Custom Speech model -In this article, you'll learn how to deploy an endpoint for a Custom Speech model. A custom endpoint that you deploy is required to use a Custom Speech model. +In this article, you'll learn how to deploy an endpoint for a Custom Speech model. With the exception of [batch transcription](batch-transcription.md), you must deploy a custom endpoint to use a Custom Speech model. + +You can deploy an endpoint for a base or custom model, and then [update](#change-model-and-redeploy-endpoint) the endpoint later to use a better trained model. > [!NOTE] -> You can deploy an endpoint to use a base model without training or testing. For example, you might want to quickly create a custom endpoint to start testing your application. The endpoint can be [updated](#change-model-and-redeploy-endpoint) later to use a trained and tested model. +> Endpoints used by `F0` Speech resources are deleted after seven days. ## Add a deployment endpoint +::: zone pivot="speech-studio" + To create a custom endpoint, follow these steps: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Deploy models**. If this is your first endpoint, you'll notice that there are no endpoints listed in the table. After you create an endpoint, you use this page to track each deployed endpoint. @@ -39,41 +44,344 @@ To create a custom endpoint, follow these steps: :::image type="content" source="./media/custom-speech/custom-speech-deploy-model.png" alt-text="Screenshot of the New endpoint page that shows the checkbox to enable logging."::: 1. Select **Add** to save and deploy the endpoint. - - > [!NOTE] - > Endpoints used by `F0` Speech resources are deleted after seven days. On the main **Deploy models** page, details about the new endpoint are displayed in a table, such as name, description, status, and expiration date. It can take up to 30 minutes to instantiate a new endpoint that uses your custom models. When the status of the deployment changes to **Succeeded**, the endpoint is ready to use. -Note the model expiration date, and update the endpoint's model before that date to ensure uninterrupted service. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). +> [!IMPORTANT] +> Take note of the model expiration date. This is the last date that you can use your custom model for speech recognition. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). Select the endpoint link to view information specific to it, such as the endpoint key, endpoint URL, and sample code. +::: zone-end + +::: zone pivot="speech-cli" + +To create an endpoint and deploy a model, use the `spx csr endpoint create` command. Construct the request parameters according to the following instructions: + +- Set the `project` parameter to the ID of an existing project. This is recommended so that you can also view and manage the endpoint in Speech Studio. You can run the `spx csr project list` command to get available projects. +- Set the required `model` parameter to the ID of the model that you want deployed to the endpoint. +- Set the required `language` parameter. The endpoint locale must match the locale of the model. The locale can't be changed later. The Speech CLI `language` parameter corresponds to the `locale` property in the JSON request and response. +- Set the required `name` parameter. This is the name that will be displayed in the Speech Studio. The Speech CLI `name` parameter corresponds to the `displayName` property in the JSON request and response. +- Optionally, you can set the `logging` parameter. Set this to `enabled` to enable audio and diagnostic [logging](#view-logging-data) of the endpoint's traffic. The default is `false`. + +Here's an example Speech CLI command to create an endpoint and deploy a model: + +```azurecli-interactive +spx csr endpoint create --project YourProjectId --model YourModelId --name "My Endpoint" --description "My Endpoint Description" --language "en-US" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790", + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/ae8d1643-53e4-4554-be4c-221dcfb471c5" + }, + "links": { + "logs": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790/files/logs", + "restInteractive": "https://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restConversation": "https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restDictation": "https://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketInteractive": "wss://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketConversation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketDictation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/d40f2eb8-1abf-4f72-9008-a5ae8add82a4" + }, + "properties": { + "loggingEnabled": true + }, + "lastActionDateTime": "2022-05-19T15:27:51Z", + "status": "NotStarted", + "createdDateTime": "2022-05-19T15:27:51Z", + "locale": "en-US", + "displayName": "My Endpoint", + "description": "My Endpoint Description" +} +``` + +The top-level `self` property in the response body is the endpoint's URI. Use this URI to get details about the endpoint's project, model, and logs. You also use this URI to update the endpoint. + +For Speech CLI help with endpoints, run the following command: + +```azurecli-interactive +spx help csr endpoint +``` + +::: zone-end + +::: zone pivot="rest-api" + +To create an endpoint and deploy a model, use the [CreateEndpoint](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateEndpoint) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the `project` property to the URI of an existing project. This is recommended so that you can also view and manage the endpoint in Speech Studio. You can make a [GetProjects](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request to get available projects. +- Set the required `model` property to the URI of the model that you want deployed to the endpoint. +- Set the required `locale` property. The endpoint locale must match the locale of the model. The locale can't be changed later. +- Set the required `displayName` property. This is the name that will be displayed in the Speech Studio. +- Optionally, you can set the `loggingEnabled` property within `properties`. Set this to `true` to enable audio and diagnostic [logging](#view-logging-data) of the endpoint's traffic. The default is `false`. + +Make an HTTP POST request using the URI as shown in the following [CreateEndpoint](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateEndpoint) example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/d40f2eb8-1abf-4f72-9008-a5ae8add82a4" + }, + "properties": { + "loggingEnabled": true + }, + "displayName": "My Endpoint", + "description": "My Endpoint Description", + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/ae8d1643-53e4-4554-be4c-221dcfb471c5" + }, + "locale": "en-US", +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790", + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/ae8d1643-53e4-4554-be4c-221dcfb471c5" + }, + "links": { + "logs": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790/files/logs", + "restInteractive": "https://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restConversation": "https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restDictation": "https://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketInteractive": "wss://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketConversation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketDictation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/d40f2eb8-1abf-4f72-9008-a5ae8add82a4" + }, + "properties": { + "loggingEnabled": true + }, + "lastActionDateTime": "2022-05-19T15:27:51Z", + "status": "NotStarted", + "createdDateTime": "2022-05-19T15:27:51Z", + "locale": "en-US", + "displayName": "My Endpoint", + "description": "My Endpoint Description" +} +``` + +The top-level `self` property in the response body is the endpoint's URI. Use this URI to [get](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetEndpoint) details about the endpoint's project, model, and logs. You also use this URI to [update](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateEndpoint) or [delete](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/DeleteEndpoint) the endpoint. + +::: zone-end + ## Change model and redeploy endpoint An endpoint can be updated to use another model that was created by the same Speech resource. As previously mentioned, you must update the endpoint's model before the [model expires](./how-to-custom-speech-model-and-endpoint-lifecycle.md). +::: zone pivot="speech-studio" + To use a new model and redeploy the custom endpoint: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Deploy models**. 1. Select the link to an endpoint by name, and then select **Change model**. 1. Select the new model that you want the endpoint to use. 1. Select **Done** to save and redeploy the endpoint. +::: zone-end + +::: zone pivot="speech-cli" + +To redeploy the custom endpoint with a new model, use the `spx csr model update` command. Construct the request parameters according to the following instructions: + +- Set the required `endpoint` parameter to the ID of the endpoint that you want deployed. +- Set the required `model` parameter to the ID of the model that you want deployed to the endpoint. + +Here's an example Speech CLI command that redeploys the custom endpoint with a new model: + +```azurecli-interactive +spx csr endpoint update --endpoint YourEndpointId --model YourModelId +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790", + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/1e47c19d-12ca-4ba5-b177-9e04bd72cf98" + }, + "links": { + "logs": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790/files/logs", + "restInteractive": "https://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restConversation": "https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restDictation": "https://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketInteractive": "wss://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketConversation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketDictation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/639d5280-8995-40cc-9329-051fd0fddd46" + }, + "properties": { + "loggingEnabled": true + }, + "lastActionDateTime": "2022-05-19T23:01:34Z", + "status": "NotStarted", + "createdDateTime": "2022-05-19T15:41:27Z", + "locale": "en-US", + "displayName": "My Endpoint", + "description": "My Updated Endpoint Description" +} +``` + +For Speech CLI help with endpoints, run the following command: + +```azurecli-interactive +spx help csr endpoint +``` + +::: zone-end + +::: zone pivot="rest-api" + +To redeploy the custom endpoint with a new model, use the [UpdateEndpoint](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateEndpoint) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the `model` property to the URI of the model that you want deployed to the endpoint. + +Make an HTTP PATCH request using the URI as shown in the following example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, replace `YourEndpointId` with your endpoint ID, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X PATCH -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/1e47c19d-12ca-4ba5-b177-9e04bd72cf98" + } +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/YourEndpointId" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790", + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/1e47c19d-12ca-4ba5-b177-9e04bd72cf98" + }, + "links": { + "logs": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790/files/logs", + "restInteractive": "https://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restConversation": "https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restDictation": "https://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketInteractive": "wss://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketConversation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketDictation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/639d5280-8995-40cc-9329-051fd0fddd46" + }, + "properties": { + "loggingEnabled": true + }, + "lastActionDateTime": "2022-05-19T23:01:34Z", + "status": "NotStarted", + "createdDateTime": "2022-05-19T15:41:27Z", + "locale": "en-US", + "displayName": "My Endpoint", + "description": "My Updated Endpoint Description" +} +``` + +::: zone-end + The redeployment takes several minutes to complete. In the meantime, your endpoint will use the previous model without interruption of service. ## View logging data Logging data is available for export if you configured it while creating the endpoint. +::: zone pivot="speech-studio" + To download the endpoint logs: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Deploy models**. 1. Select the link by endpoint name. 1. Under **Content logging**, select **Download log**. +::: zone-end + +::: zone pivot="speech-cli" + +To gets logs for an endpoint, use the `spx csr endpoint list` command. Construct the request parameters according to the following instructions: + +- Set the required `endpoint` parameter to the ID of the endpoint that you want to get logs. + +Here's an example Speech CLI command that gets logs for an endpoint: + +```azurecli-interactive +spx csr endpoint list --endpoint YourEndpointId +``` + +The location of each log file with more details are returned in the response body. + +::: zone-end + +::: zone pivot="rest-api" + +To get logs for an endpoint, start by using the [GetEndpoint](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetEndpoint) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). + +Make an HTTP GET request using the URI as shown in the following example. Replace `YourEndpointId` with your endpoint ID, replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. + +```azurecli-interactive +curl -v -X GET "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/YourEndpointId" -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790", + "model": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/1e47c19d-12ca-4ba5-b177-9e04bd72cf98" + }, + "links": { + "logs": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/98375aaa-40c2-42c4-b65c-f76734fc7790/files/logs", + "restInteractive": "https://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restConversation": "https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "restDictation": "https://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketInteractive": "wss://eastus.stt.speech.microsoft.com/speech/recognition/interactive/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketConversation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790", + "webSocketDictation": "wss://eastus.stt.speech.microsoft.com/speech/recognition/dictation/cognitiveservices/v1?cid=98375aaa-40c2-42c4-b65c-f76734fc7790" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/2f78cdb7-58ac-4bd9-9bc6-170e31483b26" + }, + "properties": { + "loggingEnabled": true + }, + "lastActionDateTime": "2022-05-19T23:41:05Z", + "status": "Succeeded", + "createdDateTime": "2022-05-19T23:41:05Z", + "locale": "en-US", + "displayName": "My Endpoint", + "description": "My Updated Endpoint Description" +} +``` + +Make an HTTP GET request using the "logs" URI from the previous response body. Replace `YourEndpointId` with your endpoint ID, replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. + + +```curl +curl -v -X GET "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/endpoints/YourEndpointId/files/logs" -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" +``` + +The location of each log file with more details are returned in the response body. + +::: zone-end + Logging data is available on Microsoft-owned storage for 30 days, after which it will be removed. If your own storage account is linked to the Cognitive Services subscription, the logging data won't be automatically deleted. ## Next steps diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-evaluate-data.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-evaluate-data.md index 22eef84155f6f..2e15c363c08ce 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-evaluate-data.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-evaluate-data.md @@ -11,19 +11,26 @@ ms.topic: how-to ms.date: 05/08/2022 ms.author: eur ms.custom: ignite-fall-2021 +zone_pivot_groups: speech-studio-cli-rest +show_latex: true +no-loc: [$$, '\times', '\over'] --- # Test accuracy of a Custom Speech model -In this article, you learn how to quantitatively measure and improve the accuracy of the Microsoft speech-to-text model or your own custom models. [Audio + human-labeled transcript](how-to-custom-speech-test-and-train.md#audio--human-labeled-transcript-data-for-training-or-testing) data is required to test accuracy, and 30 minutes to 5 hours of representative audio should be provided. +In this article, you learn how to quantitatively measure and improve the accuracy of the Microsoft speech-to-text model or your own custom models. [Audio + human-labeled transcript](how-to-custom-speech-test-and-train.md#audio--human-labeled-transcript-data-for-training-or-testing) data is required to test accuracy. You should provide from 30 minutes to 5 hours of representative audio. + +[!INCLUDE [service-pricing-advisory](includes/service-pricing-advisory.md)] ## Create a test -You can test the accuracy of your custom model by creating a test. A test requires a collection of audio files and their corresponding transcriptions. You can compare a custom model's accuracy a Microsoft speech-to-text base model or another custom model. +You can test the accuracy of your custom model by creating a test. A test requires a collection of audio files and their corresponding transcriptions. You can compare a custom model's accuracy a Microsoft speech-to-text base model or another custom model. After you [get](#get-test-results) the test results, [evaluate](#evaluate-word-error-rate) the word error rate (WER) compared to speech recognition results. + +::: zone pivot="speech-studio" Follow these steps to create a test: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Test models**. 1. Select **Create new test**. 1. Select **Evaluate accuracy** > **Next**. @@ -36,19 +43,355 @@ Follow these steps to create a test: 1. Enter the test name and description, and then select **Next**. 1. Review the test details, and then select **Save and close**. -After your test has been successfully created, you can compare the [word error rate (WER)](#evaluate-word-error-rate) and recognition results side by side. -## Side-by-side comparison +::: zone-end -After the test is complete, as indicated by the status change to *Succeeded*, you'll find a WER number for both models included in your test. Select the test name to view the test details page. This page lists all the utterances in your dataset and the recognition results of the two models, alongside the transcription from the submitted dataset. +::: zone pivot="speech-cli" -To inspect the side-by-side comparison, you can toggle various error types, including insertion, deletion, and substitution. By listening to the audio and comparing recognition results in each column, which display the human-labeled transcription and the results for two speech-to-text models, you can decide which model meets your needs and determine where additional training and improvements are required. +To create a test, use the `spx csr evaluation create` command. Construct the request parameters according to the following instructions: -## Evaluate word error rate +- Set the `project` parameter to the ID of an existing project. This is recommended so that you can also view the test in Speech Studio. You can run the `spx csr project list` command to get available projects. +- Set the required `model1` parameter to the ID of a model that you want to test. +- Set the required `model2` parameter to the ID of another model that you want to test. If you don't want to compare two models, use the same model for both `model1` and `model2`. +- Set the required `dataset` parameter to the ID of a dataset that you want to use for the test. +- Set the `language` parameter, otherwise the Speech CLI will set "en-US" by default. This should be the locale of the dataset contents. The locale can't be changed later. The Speech CLI `language` parameter corresponds to the `locale` property in the JSON request and response. +- Set the required `name` parameter. This is the name that will be displayed in the Speech Studio. The Speech CLI `name` parameter corresponds to the `displayName` property in the JSON request and response. + +Here's an example Speech CLI command that creates a test: + +```azurecli-interactive +spx csr evaluation create --project 9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226 --dataset be378d9d-a9d7-4d4a-820a-e0432e8678c7 --model1 ff43e922-e3e6-4bf0-8473-55c08fd68048 --model2 1aae1070-7972-47e9-a977-87e3b05c457d --name "My Evaluation" --description "My Evaluation Description" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": -1.0, + "wordErrorRate1": -1.0, + "sentenceErrorRate2": -1.0, + "sentenceCount2": -1, + "wordCount2": -1, + "correctWordCount2": -1, + "wordSubstitutionCount2": -1, + "wordDeletionCount2": -1, + "wordInsertionCount2": -1, + "sentenceErrorRate1": -1.0, + "sentenceCount1": -1, + "wordCount1": -1, + "correctWordCount1": -1, + "wordSubstitutionCount1": -1, + "wordDeletionCount1": -1, + "wordInsertionCount1": -1 + }, + "lastActionDateTime": "2022-05-20T16:42:43Z", + "status": "NotStarted", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Evaluation", + "description": "My Evaluation Description" +} +``` + +The top-level `self` property in the response body is the evaluation's URI. Use this URI to get details about the project and test results. You also use this URI to update or delete the evaluation. + +For Speech CLI help with evaluations, run the following command: + +```azurecli-interactive +spx help csr evaluation +``` + +::: zone-end + +::: zone pivot="rest-api" + +To create a test, use the [CreateEvaluation](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateEvaluation) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the `project` property to the URI of an existing project. This is recommended so that you can also view the test in Speech Studio. You can make a [GetProjects](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request to get available projects. +- Set the `testingKind` property to `Evaluation` within `customProperties`. If you don't specify `Evaluation`, the test is treated as a quality inspection test. Whether the `testingKind` property is set to `Evaluation` or `Inspection`, or not set, you can access the accuracy scores via the API, but not in the Speech Studio. +- Set the required `model1` property to the URI of a model that you want to test. +- Set the required `model2` property to the URI of another model that you want to test. If you don't want to compare two models, use the same model for both `model1` and `model2`. +- Set the required `dataset` property to the URI of a dataset that you want to use for the test. +- Set the required `locale` property. This should be the locale of the dataset contents. The locale can't be changed later. +- Set the required `displayName` property. This is the name that will be displayed in the Speech Studio. + +Make an HTTP POST request using the URI as shown in the following example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "displayName": "My Evaluation", + "description": "My Evaluation Description", + "customProperties": { + "testingKind": "Evaluation" + }, + "locale": "en-US" +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": -1.0, + "wordErrorRate1": -1.0, + "sentenceErrorRate2": -1.0, + "sentenceCount2": -1, + "wordCount2": -1, + "correctWordCount2": -1, + "wordSubstitutionCount2": -1, + "wordDeletionCount2": -1, + "wordInsertionCount2": -1, + "sentenceErrorRate1": -1.0, + "sentenceCount1": -1, + "wordCount1": -1, + "correctWordCount1": -1, + "wordSubstitutionCount1": -1, + "wordDeletionCount1": -1, + "wordInsertionCount1": -1 + }, + "lastActionDateTime": "2022-05-20T16:42:43Z", + "status": "NotStarted", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Evaluation", + "description": "My Evaluation Description", + "customProperties": { + "testingKind": "Evaluation" + } +} +``` + +The top-level `self` property in the response body is the evaluation's URI. Use this URI to [get](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetEvaluation) details about the evaluation's project and test results. You also use this URI to [update](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateEvaluation) or [delete](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/DeleteEvaluation) the evaluation. + +::: zone-end + +## Get test results + +You should get the test results and [evaluate](#evaluate-word-error-rate) the word error rate (WER) compared to speech recognition results. + +::: zone pivot="speech-studio" + +Follow these steps to get test results: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Test models**. +1. Select the link by test name. +1. After the test is complete, as indicated by the status set to *Succeeded*, you should see results that include the WER number for each tested model. + +This page lists all the utterances in your dataset and the recognition results, alongside the transcription from the submitted dataset. You can toggle various error types, including insertion, deletion, and substitution. By listening to the audio and comparing recognition results in each column, you can decide which model meets your needs and determine where additional training and improvements are required. -The industry standard for measuring model accuracy is [word error rate (WER)](https://en.wikipedia.org/wiki/Word_error_rate). WER counts the number of incorrect words identified during recognition, divides the sum by the total number of words provided in the human-labeled transcript (shown in the following formula as N), and then multiplies that quotient by 100 to calculate the error rate as a percentage. +::: zone-end -![Screenshot showing the WER formula.](./media/custom-speech/custom-speech-wer-formula.png) +::: zone pivot="speech-cli" + +To get test results, use the `spx csr evaluation status` command. Construct the request parameters according to the following instructions: + +- Set the required `evaluation` parameter to the ID of the evaluation that you want to get test results. + +Here's an example Speech CLI command that gets test results: + +```azurecli-interactive +spx csr evaluation status --evaluation 8bfe6b05-f093-4ab4-be7d-180374b751ca +``` + +The word error rates and more details are returned in the response body. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": 4.62, + "wordErrorRate1": 4.6, + "sentenceErrorRate2": 66.7, + "sentenceCount2": 3, + "wordCount2": 173, + "correctWordCount2": 166, + "wordSubstitutionCount2": 7, + "wordDeletionCount2": 0, + "wordInsertionCount2": 1, + "sentenceErrorRate1": 66.7, + "sentenceCount1": 3, + "wordCount1": 174, + "correctWordCount1": 166, + "wordSubstitutionCount1": 7, + "wordDeletionCount1": 1, + "wordInsertionCount1": 0 + }, + "lastActionDateTime": "2022-05-20T16:42:56Z", + "status": "Succeeded", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Evaluation", + "description": "My Evaluation Description", + "customProperties": { + "testingKind": "Evaluation" + } +} +``` + +For Speech CLI help with evaluations, run the following command: + +```azurecli-interactive +spx help csr evaluation +``` + +::: zone-end + +::: zone pivot="rest-api" + +To get test results, start by using the [GetEvaluation](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetEvaluation) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). + +Make an HTTP GET request using the URI as shown in the following example. Replace `YourEvaluationId` with your evaluation ID, replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. + +```azurecli-interactive +curl -v -X GET "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/YourEvaluationId" -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" +``` + +The word error rates and more details are returned in the response body. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": 4.62, + "wordErrorRate1": 4.6, + "sentenceErrorRate2": 66.7, + "sentenceCount2": 3, + "wordCount2": 173, + "correctWordCount2": 166, + "wordSubstitutionCount2": 7, + "wordDeletionCount2": 0, + "wordInsertionCount2": 1, + "sentenceErrorRate1": 66.7, + "sentenceCount1": 3, + "wordCount1": 174, + "correctWordCount1": 166, + "wordSubstitutionCount1": 7, + "wordDeletionCount1": 1, + "wordInsertionCount1": 0 + }, + "lastActionDateTime": "2022-05-20T16:42:56Z", + "status": "Succeeded", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Evaluation", + "description": "My Evaluation Description", + "customProperties": { + "testingKind": "Evaluation" + } +} +``` + +::: zone-end + + +## Evaluate word error rate + +The industry standard for measuring model accuracy is [word error rate (WER)](https://en.wikipedia.org/wiki/Word_error_rate). WER counts the number of incorrect words identified during recognition, and divides the sum by the total number of words provided in the human-labeled transcript (N). Incorrectly identified words fall into three categories: @@ -56,10 +399,23 @@ Incorrectly identified words fall into three categories: * Deletion (D): Words that are undetected in the hypothesis transcript * Substitution (S): Words that were substituted between reference and hypothesis -Here's an example: +In the Speech Studio, the quotient is multiplied by 100 and shown as a percentage. The Speech CLI and REST API results aren't multiplied by 100. + +$$ +WER = {{I+D+S}\over N} \times 100 +$$ + +Here's an example that shows incorrectly identified words, when compared to the human-labeled transcript: ![Screenshot showing an example of incorrectly identified words.](./media/custom-speech/custom-speech-dis-words.png) +The speech recognition result erred as follows: +* Insertion (I): Added the word "a" +* Deletion (D): Deleted the word "are" +* Substitution (S): Substituted the word "Jones" for "John" + +The word error rate from the previous example is 60%. + If you want to replicate WER measurements locally, you can use the sclite tool from the [NIST Scoring Toolkit (SCTK)](https://github.com/usnistgov/SCTK). ## Resolve errors and improve WER diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-inspect-data.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-inspect-data.md index 481da866edfca..da858be42fcfe 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-inspect-data.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-inspect-data.md @@ -10,17 +10,21 @@ ms.subservice: speech-service ms.topic: how-to ms.date: 05/08/2022 ms.author: eur +zone_pivot_groups: speech-studio-cli-rest --- # Test recognition quality of a Custom Speech model You can inspect the recognition quality of a Custom Speech model in the [Speech Studio](https://aka.ms/speechstudio/customspeech). You can play back uploaded audio and determine if the provided recognition result is correct. After a test has been successfully created, you can see how a model transcribed the audio dataset, or compare results from two models side by side. -> [!TIP] -> You can also use the [online transcription editor](how-to-custom-speech-transcription-editor.md) to create and refine labeled audio datasets. +Side-by-side model testing is useful to validate which speech recognition model is best for an application. For an objective measure of accuracy, which requires transcription datasets input, see [Test model quantitatively](how-to-custom-speech-evaluate-data.md). + +[!INCLUDE [service-pricing-advisory](includes/service-pricing-advisory.md)] ## Create a test +::: zone pivot="speech-studio" + Follow these instructions to create a test: 1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). @@ -29,23 +33,389 @@ Follow these instructions to create a test: 1. Select **Inspect quality (Audio-only data)** > **Next**. 1. Choose an audio dataset that you'd like to use for testing, and then select **Next**. If there aren't any datasets available, cancel the setup, and then go to the **Speech datasets** menu to [upload datasets](how-to-custom-speech-upload-data.md). - :::image type="content" source="media/custom-speech/custom-speech-choose-test-data.png" alt-text="Review your keyword"::: + :::image type="content" source="media/custom-speech/custom-speech-choose-test-data.png" alt-text="Screenshot of choosing a dataset dialog"::: 1. Choose one or two models to evaluate and compare accuracy. 1. Enter the test name and description, and then select **Next**. 1. Review your settings, and then select **Save and close**. -[!INCLUDE [service-pricing-advisory](includes/service-pricing-advisory.md)] +::: zone-end + +::: zone pivot="speech-cli" + +To create a test, use the `spx csr evaluation create` command. Construct the request parameters according to the following instructions: + +- Set the `project` parameter to the ID of an existing project. This is recommended so that you can also view the test in Speech Studio. You can run the `spx csr project list` command to get available projects. +- Set the required `model1` parameter to the ID of a model that you want to test. +- Set the required `model2` parameter to the ID of another model that you want to test. If you don't want to compare two models, use the same model for both `model1` and `model2`. +- Set the required `dataset` parameter to the ID of a dataset that you want to use for the test. +- Set the `language` parameter, otherwise the Speech CLI will set "en-US" by default. This should be the locale of the dataset contents. The locale can't be changed later. The Speech CLI `language` parameter corresponds to the `locale` property in the JSON request and response. +- Set the required `name` parameter. This is the name that will be displayed in the Speech Studio. The Speech CLI `name` parameter corresponds to the `displayName` property in the JSON request and response. + +Here's an example Speech CLI command that creates a test: + +```azurecli-interactive +spx csr evaluation create --project 9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226 --dataset be378d9d-a9d7-4d4a-820a-e0432e8678c7 --model1 ff43e922-e3e6-4bf0-8473-55c08fd68048 --model2 1aae1070-7972-47e9-a977-87e3b05c457d --name "My Inspection" --description "My Inspection Description" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": -1.0, + "wordErrorRate1": -1.0, + "sentenceErrorRate2": -1.0, + "sentenceCount2": -1, + "wordCount2": -1, + "correctWordCount2": -1, + "wordSubstitutionCount2": -1, + "wordDeletionCount2": -1, + "wordInsertionCount2": -1, + "sentenceErrorRate1": -1.0, + "sentenceCount1": -1, + "wordCount1": -1, + "correctWordCount1": -1, + "wordSubstitutionCount1": -1, + "wordDeletionCount1": -1, + "wordInsertionCount1": -1 + }, + "lastActionDateTime": "2022-05-20T16:42:43Z", + "status": "NotStarted", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Inspection", + "description": "My Inspection Description" +} +``` + +The top-level `self` property in the response body is the evaluation's URI. Use this URI to get details about the project and test results. You also use this URI to update or delete the evaluation. + +For Speech CLI help with evaluations, run the following command: + +```azurecli-interactive +spx help csr evaluation +``` + +::: zone-end + +::: zone pivot="rest-api" + +To create a test, use the [CreateEvaluation](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateEvaluation) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the `project` property to the URI of an existing project. This is recommended so that you can also view the test in Speech Studio. You can make a [GetProjects](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request to get available projects. +- Set the required `model1` property to the URI of a model that you want to test. +- Set the required `model2` property to the URI of another model that you want to test. If you don't want to compare two models, use the same model for both `model1` and `model2`. +- Set the required `dataset` property to the URI of a dataset that you want to use for the test. +- Set the required `locale` property. This should be the locale of the dataset contents. The locale can't be changed later. +- Set the required `displayName` property. This is the name that will be displayed in the Speech Studio. + +Make an HTTP POST request using the URI as shown in the following example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "displayName": "My Inspection", + "description": "My Inspection Description", + "locale": "en-US" +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": -1.0, + "wordErrorRate1": -1.0, + "sentenceErrorRate2": -1.0, + "sentenceCount2": -1, + "wordCount2": -1, + "correctWordCount2": -1, + "wordSubstitutionCount2": -1, + "wordDeletionCount2": -1, + "wordInsertionCount2": -1, + "sentenceErrorRate1": -1.0, + "sentenceCount1": -1, + "wordCount1": -1, + "correctWordCount1": -1, + "wordSubstitutionCount1": -1, + "wordDeletionCount1": -1, + "wordInsertionCount1": -1 + }, + "lastActionDateTime": "2022-05-20T16:42:43Z", + "status": "NotStarted", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Inspection", + "description": "My Inspection Description" +} +``` + +The top-level `self` property in the response body is the evaluation's URI. Use this URI to [get](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetEvaluation) details about the evaluation's project and test results. You also use this URI to [update](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateEvaluation) or [delete](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/DeleteEvaluation) the evaluation. + +::: zone-end + + +## Get test results + +You should get the test results and [inspect](#compare-transcription-with-audio) the audio datasets compared to transcription results for each model. + +::: zone pivot="speech-studio" + +Follow these steps to get test results: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Test models**. +1. Select the link by test name. +1. After the test is complete, as indicated by the status set to *Succeeded*, you should see results that include the WER number for each tested model. + +This page lists all the utterances in your dataset and the recognition results, alongside the transcription from the submitted dataset. You can toggle various error types, including insertion, deletion, and substitution. By listening to the audio and comparing recognition results in each column, you can decide which model meets your needs and determine where additional training and improvements are required. + +::: zone-end + +::: zone pivot="speech-cli" + +To get test results, use the `spx csr evaluation status` command. Construct the request parameters according to the following instructions: + +- Set the required `evaluation` parameter to the ID of the evaluation that you want to get test results. + +Here's an example Speech CLI command that gets test results: + +```azurecli-interactive +spx csr evaluation status --evaluation 8bfe6b05-f093-4ab4-be7d-180374b751ca +``` + +The models, audio dataset, transcriptions, and more details are returned in the response body. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": 4.62, + "wordErrorRate1": 4.6, + "sentenceErrorRate2": 66.7, + "sentenceCount2": 3, + "wordCount2": 173, + "correctWordCount2": 166, + "wordSubstitutionCount2": 7, + "wordDeletionCount2": 0, + "wordInsertionCount2": 1, + "sentenceErrorRate1": 66.7, + "sentenceCount1": 3, + "wordCount1": 174, + "correctWordCount1": 166, + "wordSubstitutionCount1": 7, + "wordDeletionCount1": 1, + "wordInsertionCount1": 0 + }, + "lastActionDateTime": "2022-05-20T16:42:56Z", + "status": "Succeeded", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Inspection", + "description": "My Inspection Description" +} +``` + +For Speech CLI help with evaluations, run the following command: + +```azurecli-interactive +spx help csr evaluation +``` + +::: zone-end + +::: zone pivot="rest-api" + +To get test results, start by using the [GetEvaluation](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetEvaluation) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). + +Make an HTTP GET request using the URI as shown in the following example. Replace `YourEvaluationId` with your evaluation ID, replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. + +```azurecli-interactive +curl -v -X GET "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/YourEvaluationId" -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" +``` + +The models, audio dataset, transcriptions, and more details are returned in the response body. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca", + "model1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/ff43e922-e3e6-4bf0-8473-55c08fd68048" + }, + "model2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "dataset": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/be378d9d-a9d7-4d4a-820a-e0432e8678c7" + }, + "transcription2": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/6eaf6a15-6076-466a-83d4-a30dba78ca63" + }, + "transcription1": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/transcriptions/0c5b1630-fadf-444d-827f-d6da9c0cf0c3" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/9f8c4cbb-f9a5-4ec1-8bb0-53cfa9221226" + }, + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/evaluations/8bfe6b05-f093-4ab4-be7d-180374b751ca/files" + }, + "properties": { + "wordErrorRate2": 4.62, + "wordErrorRate1": 4.6, + "sentenceErrorRate2": 66.7, + "sentenceCount2": 3, + "wordCount2": 173, + "correctWordCount2": 166, + "wordSubstitutionCount2": 7, + "wordDeletionCount2": 0, + "wordInsertionCount2": 1, + "sentenceErrorRate1": 66.7, + "sentenceCount1": 3, + "wordCount1": 174, + "correctWordCount1": 166, + "wordSubstitutionCount1": 7, + "wordDeletionCount1": 1, + "wordInsertionCount1": 0 + }, + "lastActionDateTime": "2022-05-20T16:42:56Z", + "status": "Succeeded", + "createdDateTime": "2022-05-20T16:42:43Z", + "locale": "en-US", + "displayName": "My Inspection", + "description": "My Inspection Description" +} +``` + +::: zone-end + +## Compare transcription with audio + +You can inspect the transcription output by each model tested, against the audio input dataset. If you included two models in the test, you can compare their transcription quality side by side. + +::: zone pivot="speech-studio" + +To review the quality of transcriptions: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Test models**. +1. Select the link by test name. +1. Play an audio file while the reading the corresponding transcription by a model. + +If the test dataset included multiple audio files, you'll see multiple rows in the table. If you included two models in the test, transcriptions are shown in side-by-side columns. Transcription differences between models are shown in blue text font. + +:::image type="content" source="media/custom-speech/custom-speech-inspect-compare.png" alt-text="Screenshot of comparing transcriptions by two models"::: + +::: zone-end + +::: zone pivot="speech-cli" + +The audio test dataset, transcriptions, and models tested are returned in the [test results](#get-test-results). If only one model was tested, the `model1` value will match `model2`, and the `transcription1` value will match `transcription2`. + +To review the quality of transcriptions: +1. Download the audio test dataset, unless you already have a copy. +1. Download the output transcriptions. +1. Play an audio file while the reading the corresponding transcription by a model. + +If you're comparing quality between two models, pay particular attention to differences between each model's transcriptions. + +::: zone-end +::: zone pivot="rest-api" -## Side-by-side model comparisons +The audio test dataset, transcriptions, and models tested are returned in the [test results](#get-test-results). If only one model was tested, the `model1` value will match `model2`, and the `transcription1` value will match `transcription2`. -When the test status is *Succeeded*, select the test item name to see details of the test. This detail page lists all the utterances in your dataset, and shows the recognition results of the two models you are comparing. +To review the quality of transcriptions: +1. Download the audio test dataset, unless you already have a copy. +1. Download the output transcriptions. +1. Play an audio file while the reading the corresponding transcription by a model. -To help inspect the side-by-side comparison, you can toggle various error types including insertion, deletion, and substitution. By listening to the audio and comparing recognition results in each column (showing human-labeled transcription and the results of two speech-to-text models), you can decide which model meets your needs and where improvements are needed. +If you're comparing quality between two models, pay particular attention to differences between each model's transcriptions. -Side-by-side model testing is useful to validate which speech recognition model is best for an application. For an objective measure of accuracy, requiring transcribed audio, see [Test model quantitatively](how-to-custom-speech-evaluate-data.md). +::: zone-end ## Next steps diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-model-and-endpoint-lifecycle.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-model-and-endpoint-lifecycle.md index 2f35cc08e9d77..275def1e4aab2 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-model-and-endpoint-lifecycle.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-model-and-endpoint-lifecycle.md @@ -10,15 +10,14 @@ ms.subservice: speech-service ms.topic: conceptual ms.date: 05/08/2022 ms.author: heikora +zone_pivot_groups: speech-studio-cli-rest --- # Custom Speech model lifecycle -Speech recognition models that are provided by Microsoft are referred to as base models. When you make a speech recognition request, the current base model for each [supported language](language-support.md) is used by default. Base models are updated periodically to improve accuracy and quality. +You can use a Custom Speech model for some time after it's deployed to your custom endpoint. But when new base models are made available, the older models are expired. You must periodically recreate and train your custom model from the latest base model to take advantage of the improved accuracy and quality. -You can use a custom model for some time after it's trained and deployed. You must periodically recreate and train your custom model from the latest base model to take advantage of the improved accuracy and quality. - -Some key terms related to the model lifecycle include: +Here are some key terms related to the model lifecycle: * **Training**: Taking a base model and customizing it to your domain/scenario by using text data and/or audio data. In some contexts such as the REST API properties, training is also referred to as **adaptation**. * **Transcription**: Using a model and performing speech recognition (decoding audio into text). @@ -26,7 +25,7 @@ Some key terms related to the model lifecycle include: ## Expiration timeline -When new models are made available, the older models are retired. Here are timelines for model adaptation and transcription expiration: +Here are timelines for model adaptation and transcription expiration: - Training is available for one year after the quarter when the base model was created by Microsoft. - Transcription with a base model is available for two years after the quarter when the base model was created by Microsoft. @@ -34,39 +33,55 @@ When new models are made available, the older models are retired. Here are timel In this context, quarters end on January 15th, April 15th, July 15th, and October 15th. -## What happens when models expire and how to update them +## What to do when a model expires + +When a custom model or base model expires, it is no longer available for transcription. You can change the model that is used by your custom speech endpoint without downtime. + +|Transcription route |Expired model result |Recommendation | +|---------|---------|---------| +|Custom endpoint|Speech recognition requests will fall back to the most recent base model for the same [locale](language-support.md). You will get results, but recognition might not accurately transcribe your domain data. |Update the endpoint's model as described in the [Deploy a Custom Speech model](how-to-custom-speech-deploy-model.md) guide. | +|Batch transcription |[Batch transcription](batch-transcription.md) requests for expired models will fail with a 4xx error. |In each [CreateTranscription](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateTranscription) REST API request body, set the `model` property to a base model or custom model that hasn't yet expired. Otherwise don't include the `model` property to always use the latest base model. | + + +## Get base model expiration dates + +::: zone pivot="speech-studio" -When a custom model or base model expires, typically speech recognition requests will fall back to the most recent base model for the same language. In this case, your implementation won't break, but recognition might not accurately transcribe your domain data. +The last date that you could use the base model for training was shown when you created the custom model. For more information, see [Train a Custom Speech model](how-to-custom-speech-train-model.md). -You can change the model that is used by your custom speech endpoint without downtime: - - In the Speech Studio, go to your Custom Speech project and select **Deploy models**. Select the endpoint name to see its details, and then select **Change model**. Choose a new model and select **Done**. - - Update the endpoint's model property via the [`UpdateEndpoint`](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateEndpoint) REST API. +Follow these instructions to get the transcription expiration date for a base model: -[Batch transcription](batch-transcription.md) requests for retired models will fail with a 4xx error. In the [`CreateTranscription`](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateTranscription) REST API request body, update the `model` parameter to use a base model or custom model that hasn't yet retired. Otherwise you can remove the `model` entry from the JSON to always use the latest base model. +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Deploy models**. +1. The expiration date for the model is shown in the **Expiration** column. This is the last date that you can use the model for transcription. -## Find out when a model expires -You can get the adaptation and transcription expiration dates for a model via the Speech Studio and REST API. + :::image type="content" source="media/custom-speech/custom-speech-model-expiration.png" alt-text="Screenshot of the deploy models page that shows the transcription expiration date."::: -### Model expiration dates via Speech Studio -Here's an example adaptation expiration date shown on the train new model dialog: -:::image type="content" source="media/custom-speech/custom-speech-adaptation-end-date.png" alt-text="Screenshot of the train new model dialog that shows the adaptation expiration date."::: +::: zone-end -Here's an example transcription expiration date shown on the deployment detail page: +::: zone pivot="speech-cli" -:::image type="content" source="media/custom-speech/custom-speech-deploy-details.png" alt-text="Screenshot of the train new model dialog that shows the transcription expiration date."::: +To get the training and transcription expiration dates for a base model, use the `spx csr model status` command. Construct the request parameters according to the following instructions: -### Model expiration dates via REST API -You can also check the expiration dates via the [`GetBaseModel`](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetBaseModel) and [`GetModel`](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetModel) REST API. The `deprecationDates` property in the JSON response includes the adaptation and transcription expiration dates for each model +- Set the `url` parameter to the URI of the base model that you want to get. You can run the `spx csr list --base` command to get available base models for all locales. -Here's an example base model retrieved via [`GetBaseModel`](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetBaseModel): +Here's an example Speech CLI command to get the training and transcription expiration dates for a base model: + +```azurecli-interactive +spx csr model status --model https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/b0bbc1e0-78d5-468b-9b7c-a5a43b2bb83f +``` + +In the response, take note of the date in the `adaptationDateTime` property. This is the last date that you can use the base model for training. Also take note of the date in the `transcriptionDateTime` property. This is the last date that you can use the base model for transcription. + +You should receive a response body in the following format: ```json { - "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/e065c68b-21d3-4b28-ae61-eb4c7e797789", + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d", "datasets": [], "links": { - "manifest": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/e065c68b-21d3-4b28-ae61-eb4c7e797789/manifest" + "manifest": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d/manifest" }, "properties": { "deprecationDates": { @@ -74,53 +89,191 @@ Here's an example base model retrieved via [`GetBaseModel`](https://westus2.dev. "transcriptionDateTime": "2024-01-15T00:00:00Z" } }, - "lastActionDateTime": "2021-10-29T07:19:01Z", + "lastActionDateTime": "2022-05-06T10:52:02Z", "status": "Succeeded", - "createdDateTime": "2021-10-29T06:58:14Z", + "createdDateTime": "2021-10-13T00:00:00Z", "locale": "en-US", - "displayName": "20211012 (CLM public preview)", + "displayName": "20210831 + Audio file adaptation", "description": "en-US base model" } ``` -Here's an example custom model retrieved via [`GetModel`](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetBaseModel). The custom model was trained from the previously mentioned base model (`e065c68b-21d3-4b28-ae61-eb4c7e797789`): +For Speech CLI help with models, run the following command: + +```azurecli-interactive +spx help csr model +``` + +::: zone-end + +::: zone pivot="rest-api" + +To get the training and transcription expiration dates for a base model, use the [GetBaseModel](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetBaseModel) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). You can make a [GetBaseModels](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetBaseModels) request to get available base models for all locales. + +Make an HTTP GET request using the model URI as shown in the following example. Replace `BaseModelId` with your model ID, replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. + +```azurecli-interactive +curl -v -X GET "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/BaseModelId" -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" +``` + +In the response, take note of the date in the `adaptationDateTime` property. This is the last date that you can use the base model for training. Also take note of the date in the `transcriptionDateTime` property. This is the last date that you can use the base model for transcription. + +You should receive a response body in the following format: ```json { - "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/{custom-model-id}", + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d", + "datasets": [], + "links": { + "manifest": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d/manifest" + }, + "properties": { + "deprecationDates": { + "adaptationDateTime": "2023-01-15T00:00:00Z", + "transcriptionDateTime": "2024-01-15T00:00:00Z" + } + }, + "lastActionDateTime": "2022-05-06T10:52:02Z", + "status": "Succeeded", + "createdDateTime": "2021-10-13T00:00:00Z", + "locale": "en-US", + "displayName": "20210831 + Audio file adaptation", + "description": "en-US base model" +} +``` + +::: zone-end + + +## Get custom model expiration dates + +::: zone pivot="speech-studio" + +Follow these instructions to get the transcription expiration date for a custom model: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Train custom models**. +1. The expiration date the custom model is shown in the **Expiration** column. This is the last date that you can use the custom model for transcription. Base models are not shown on the **Train custom models** page. + + :::image type="content" source="media/custom-speech/custom-speech-custom-model-expiration.png" alt-text="Screenshot of the train custom models page that shows the transcription expiration date."::: + +You can also follow these instructions to get the transcription expiration date for a custom model: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Deploy models**. +1. The expiration date for the model is shown in the **Expiration** column. This is the last date that you can use the model for transcription. + + :::image type="content" source="media/custom-speech/custom-speech-model-expiration.png" alt-text="Screenshot of the deploy models page that shows the transcription expiration date."::: + + +::: zone-end + +::: zone pivot="speech-cli" + +To get the transcription expiration date for your custom model, use the `spx csr model status` command. Construct the request parameters according to the following instructions: + +- Set the `url` parameter to the URI of the model that you want to get. Replace `YourModelId` with your model ID and replace `YourServiceRegion` with your Speech resource region. + +Here's an example Speech CLI command to get the transcription expiration date for your custom model: + +```azurecli-interactive +spx csr model status --model https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/models/YourModelId +``` + +In the response, take note of the date in the `transcriptionDateTime` property. This is the last date that you can use your custom model for transcription. The `adaptationDateTime` property is not applicable, since custom models are not used to train other custom models. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7", "baseModel": { - "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/e065c68b-21d3-4b28-ae61-eb4c7e797789" + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" }, "datasets": [ { - "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/f1a72db2-1e89-496d-859f-f1af7a363bb5" + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/69e46263-ab10-4ab4-abbe-62e370104d95" } ], "links": { - "manifest": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/{custom-model-id}/manifest", - "copyTo": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/{custom-model-id}/copyto" + "manifest": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/manifest", + "copyTo": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/copyto" }, "project": { - "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/projects/ee3b1c83-c194-490c-bdb1-b6b1a6be6f59" + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/5d25e60a-7f4a-4816-afd9-783bb8daccfc" }, "properties": { "deprecationDates": { "adaptationDateTime": "2023-01-15T00:00:00Z", - "transcriptionDateTime": "2024-04-15T00:00:00Z" + "transcriptionDateTime": "2024-07-15T00:00:00Z" } }, - "lastActionDateTime": "2022-02-27T13:03:54Z", + "lastActionDateTime": "2022-05-21T13:21:01Z", "status": "Succeeded", - "createdDateTime": "2022-02-27T13:03:46Z", + "createdDateTime": "2022-05-22T16:37:01Z", "locale": "en-US", - "displayName": "Custom model A", - "description": "My first custom model", - "customProperties": { - "PortalAPIVersion": "3" - } + "displayName": "My Model", + "description": "My Model Description" } ``` +For Speech CLI help with models, run the following command: + +```azurecli-interactive +spx help csr model +``` + +::: zone-end + +::: zone pivot="rest-api" + +To get the transcription expiration date for your custom model, use the [GetModel](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetModel) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). + +Make an HTTP GET request using the model URI as shown in the following example. Replace `YourModelId` with your model ID, replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. + +```azurecli-interactive +curl -v -X GET "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/models/YourModelId" -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" +``` + +In the response, take note of the date in the `transcriptionDateTime` property. This is the last date that you can use your custom model for transcription. The `adaptationDateTime` property is not applicable, since custom models are not used to train other custom models. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7", + "baseModel": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "datasets": [ + { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/69e46263-ab10-4ab4-abbe-62e370104d95" + } + ], + "links": { + "manifest": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/manifest", + "copyTo": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/copyto" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/5d25e60a-7f4a-4816-afd9-783bb8daccfc" + }, + "properties": { + "deprecationDates": { + "adaptationDateTime": "2023-01-15T00:00:00Z", + "transcriptionDateTime": "2024-07-15T00:00:00Z" + } + }, + "lastActionDateTime": "2022-05-21T13:21:01Z", + "status": "Succeeded", + "createdDateTime": "2022-05-22T16:37:01Z", + "locale": "en-US", + "displayName": "My Model", + "description": "My Model Description" +} +``` + +::: zone-end + ## Next steps - [Train a model](how-to-custom-speech-train-model.md) diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-train-model.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-train-model.md index 0333f3923dbbb..72ac272cf8714 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-train-model.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-train-model.md @@ -11,27 +11,32 @@ ms.topic: how-to ms.date: 05/08/2022 ms.author: eur ms.custom: ignite-fall-2021 +zone_pivot_groups: speech-studio-cli-rest --- # Train a Custom Speech model -In this article, you'll learn how to train a Custom Speech model to improve recognition accuracy from the Microsoft base model. Training a model is typically an iterative process. You will first select a base model that is the starting point for a new model. You train a model with [datasets](./how-to-custom-speech-test-and-train.md) that can include text and audio, and then you test and refine the model with more data. +In this article, you'll learn how to train a custom model to improve recognition accuracy from the Microsoft base model. The speech recognition accuracy and quality of a Custom Speech model will remain consistent, even when a new base model is released. -You can use a custom model for a limited time after it's trained and [deployed](how-to-custom-speech-deploy-model.md). You must periodically recreate and adapt your custom model from the latest base model to take advantage of the improved accuracy and quality. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). +Training a model is typically an iterative process. You will first select a base model that is the starting point for a new model. You train a model with [datasets](./how-to-custom-speech-test-and-train.md) that can include text and audio, and then you test. If the recognition quality or accuracy doesn't meet your requirements, you can create a new model with additional or modified training data, and then test again. + +You can use a custom model for a limited time after it's trained. You must periodically recreate and adapt your custom model from the latest base model to take advantage of the improved accuracy and quality. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). > [!NOTE] > You pay to use Custom Speech models, but you are not charged for training a model. -## Train the model +If you plan to train a model with audio data, use a Speech resource in a [region](regions.md#speech-to-text-pronunciation-assessment-text-to-speech-and-translation) with dedicated hardware for training. After a model is trained, you can [copy it to a Speech resource](#copy-a-model) in another region as needed. + +## Create a model -If you plan to train a model with audio data, use a Speech resource in a [region](regions.md#speech-to-text-pronunciation-assessment-text-to-speech-and-translation) with dedicated hardware for training. +::: zone pivot="speech-studio" After you've uploaded [training datasets](./how-to-custom-speech-test-and-train.md), follow these instructions to start training your model: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Train custom models**. 1. Select **Train a new model**. -1. On the **Select a baseline model** page, select a base model, and then select **Next**. If you aren't sure, select the most recent model from the top of the list. +1. On the **Select a baseline model** page, select a base model, and then select **Next**. If you aren't sure, select the most recent model from the top of the list. The name of the base model corresponds to the date when it was released in YYYYMMDD format. The customization capabilities of the base model are listed in parenthesis after the model name in Speech Studio. > [!IMPORTANT] > Take note of the **Expiration for adaptation** date. This is the last date that you can use the base model for training. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). @@ -40,11 +45,311 @@ After you've uploaded [training datasets](./how-to-custom-speech-test-and-train. 1. Enter a name and description for your custom model, and then select **Next**. 1. Optionally, check the **Add test in the next step** box. If you skip this step, you can run the same tests later. For more information, see [Test recognition quality](how-to-custom-speech-inspect-data.md) and [Test model quantitatively](how-to-custom-speech-evaluate-data.md). 1. Select **Save and close** to kick off the build for your custom model. +1. Return to the **Train custom models** page. + + > [!IMPORTANT] + > Take note of the **Expiration** date. This is the last date that you can use your custom model for speech recognition. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). + +::: zone-end + +::: zone pivot="speech-cli" + +To create a model with datasets for training, use the `spx csr model create` command. Construct the request parameters according to the following instructions: + +- Set the `project` parameter to the ID of an existing project. This is recommended so that you can also view and manage the model in Speech Studio. You can run the `spx csr project list` command to get available projects. +- Set the required `dataset` parameter to the ID of a dataset that you want used for training. To specify multiple datasets, set the `datasets` (plural) parameter and separate the IDs with a semicolon. +- Set the required `language` parameter. The dataset locale must match the locale of the project. The locale can't be changed later. The Speech CLI `language` parameter corresponds to the `locale` property in the JSON request and response. +- Set the required `name` parameter. This is the name that will be displayed in the Speech Studio. The Speech CLI `name` parameter corresponds to the `displayName` property in the JSON request and response. +- Optionally, you can set the `baseModel` parameter. If you don't specify the `baseModel`, the default base model for the locale is used. + +Here's an example Speech CLI command that creates a model with datasets for training: + +```azurecli-interactive +spx csr model create --project YourProjectId --name "My Model" --description "My Model Description" --dataset YourDatasetId --language "en-US" +``` +> [!NOTE] +> In this example, the `baseModel` isn't set, so the default base model for the locale is used. The base model URI is returned in the response. + +You should receive a response body in the following format: -On the main **Train custom models** page, details about the new model are displayed in a table, such as name, description, status (*Processing*, *Succeeded*, or *Failed*), and expiration date. +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7", + "baseModel": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "datasets": [ + { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/69e46263-ab10-4ab4-abbe-62e370104d95" + } + ], + "links": { + "manifest": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/manifest", + "copyTo": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/copyto" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/5d25e60a-7f4a-4816-afd9-783bb8daccfc" + }, + "properties": { + "deprecationDates": { + "adaptationDateTime": "2023-01-15T00:00:00Z", + "transcriptionDateTime": "2024-07-15T00:00:00Z" + } + }, + "lastActionDateTime": "2022-05-21T13:21:01Z", + "status": "NotStarted", + "createdDateTime": "2022-05-21T13:21:01Z", + "locale": "en-US", + "displayName": "My Model", + "description": "My Model Description" +} +``` > [!IMPORTANT] -> Take note of the date in the **Expiration** column. This is the last date that you can use your custom model for speech recognition. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). +> Take note of the date in the `adaptationDateTime` property. This is the last date that you can use the base model for training. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). +> +> Take note of the date in the `transcriptionDateTime` property. This is the last date that you can use your custom model for speech recognition. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). + +The top-level `self` property in the response body is the model's URI. Use this URI to get details about the model's project, manifest, and deprecation dates. You also use this URI to update or delete a model. + +For Speech CLI help with models, run the following command: + +```azurecli-interactive +spx help csr model +``` + +::: zone-end + +::: zone pivot="rest-api" + +To create a model with datasets for training, use the [CreateModel](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateModel) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the `project` property to the URI of an existing project. This is recommended so that you can also view and manage the model in Speech Studio. You can make a [GetProjects](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request to get available projects. +- Set the required `datasets` property to the URI of the datasets that you want used for training. +- Set the required `locale` property. The model locale must match the locale of the project and base model. The locale can't be changed later. +- Set the required `displayName` property. This is the name that will be displayed in the Speech Studio. +- Optionally, you can set the `baseModel` property. If you don't specify the `baseModel`, the default base model for the locale is used. + +Make an HTTP POST request using the URI as shown in the following example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/5d25e60a-7f4a-4816-afd9-783bb8daccfc" + }, + "displayName": "My Model", + "description": "My Model Description", + "baseModel": null, + "datasets": [ + { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/69e46263-ab10-4ab4-abbe-62e370104d95" + } + ], + "locale": "en-US" +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/models" +``` + +> [!NOTE] +> In this example, the `baseModel` isn't set, so the default base model for the locale is used. The base model URI is returned in the response. + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7", + "baseModel": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/1aae1070-7972-47e9-a977-87e3b05c457d" + }, + "datasets": [ + { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/69e46263-ab10-4ab4-abbe-62e370104d95" + } + ], + "links": { + "manifest": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/manifest", + "copyTo": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/models/86c4ebd7-d70d-4f67-9ccc-84609504ffc7/copyto" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/5d25e60a-7f4a-4816-afd9-783bb8daccfc" + }, + "properties": { + "deprecationDates": { + "adaptationDateTime": "2023-01-15T00:00:00Z", + "transcriptionDateTime": "2024-07-15T00:00:00Z" + } + }, + "lastActionDateTime": "2022-05-21T13:21:01Z", + "status": "NotStarted", + "createdDateTime": "2022-05-21T13:21:01Z", + "locale": "en-US", + "displayName": "My Model", + "description": "My Model Description" +} +``` + +> [!IMPORTANT] +> Take note of the date in the `adaptationDateTime` property. This is the last date that you can use the base model for training. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). +> +> Take note of the date in the `transcriptionDateTime` property. This is the last date that you can use your custom model for speech recognition. For more information, see [Model and endpoint lifecycle](./how-to-custom-speech-model-and-endpoint-lifecycle.md). + +The top-level `self` property in the response body is the model's URI. Use this URI to [get](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetModel) details about the model's project, manifest, and deprecation dates. You also use this URI to [update](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateModel) or [delete](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/DeleteModel) the model. + +::: zone-end + + +## Copy a model + +You can copy a model to another project that uses the same locale. For example, after a model is trained with audio data in a [region](regions.md#speech-to-text-pronunciation-assessment-text-to-speech-and-translation) with dedicated hardware for training, you can copy it to a Speech resource in another region as needed. + +::: zone pivot="speech-studio" + +Follow these instructions to copy a model to a project in another region: + +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). +1. Select **Custom Speech** > Your project name > **Train custom models**. +1. Select **Copy to**. +1. On the **Copy speech model** page, select a target region where you want to copy the model. + :::image type="content" source="./media/custom-speech/custom-speech-copy-to-zoom.png" alt-text="Screenshot of a phrase list applied in Speech Studio." lightbox="./media/custom-speech/custom-speech-copy-to-full.png"::: +1. Select a Speech resource in the target region, or create a new Speech resource. +1. Select a project where you want to copy the model, or create a new project. +1. Select **Copy**. + +After the model is successfully copied, you'll be notified and can view it in the target project. + +::: zone-end + +::: zone pivot="speech-cli" + +Copying a model directly to a project in another region is not supported with the Speech CLI. You can copy a model to a project in another region using the [Speech Studio](https://aka.ms/speechstudio/customspeech) or [Speech-to-text REST API v3.0](rest-speech-to-text.md). + +::: zone-end + +::: zone pivot="rest-api" + +To copy a model to another Speech resource, use the [CopyModel](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CopyModel) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the required `targetSubscriptionKey` property to the key of the destination Speech resource. + +Make an HTTP POST request using the URI as shown in the following example. Use the region and URI of the model you want to copy from. Replace `YourModelId` with the model ID, replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "targetSubscriptionKey": "ModelDestinationSpeechResourceKey" +} ' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/models/YourModelId/copyto" +``` + +> [!NOTE] +> Only the `targetSubscriptionKey` property in the request body has information about the destination Speech resource. + +You should receive a response body in the following format: + +```json +{ + "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/9df35ddb-edf9-4e91-8d1a-576d09aabdae", + "baseModel": { + "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/base/eb5450a7-3ca2-461a-b2d7-ddbb3ad96540" + }, + "links": { + "manifest": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/9df35ddb-edf9-4e91-8d1a-576d09aabdae/manifest", + "copyTo": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/models/9df35ddb-edf9-4e91-8d1a-576d09aabdae/copyto" + }, + "properties": { + "deprecationDates": { + "adaptationDateTime": "2023-01-15T00:00:00Z", + "transcriptionDateTime": "2024-07-15T00:00:00Z" + } + }, + "lastActionDateTime": "2022-05-22T23:15:27Z", + "status": "NotStarted", + "createdDateTime": "2022-05-22T23:15:27Z", + "locale": "en-US", + "displayName": "My Model", + "description": "My Model Description", + "customProperties": { + "PortalAPIVersion": "3", + "Purpose": "", + "VadKind": "None", + "ModelClass": "None", + "UsesHalide": "False", + "IsDynamicGrammarSupported": "False" + } +} +``` + +::: zone-end + + +## Connect a model + +Models might have been copied from one project using the Speech CLI or REST API, without being connected to another project. Connecting a model is a matter of updating the model with a reference to the project. + +::: zone pivot="speech-studio" + +If you are prompted in Speech Studio, you can connect them by selecting the **Connect** button. + +:::image type="content" source="./media/custom-speech/custom-speech-connect-model.png" alt-text="Screenshot of the connect training page that shows models that can be connected to the current project."::: + +::: zone-end + +::: zone pivot="speech-cli" + +To connect a model to a project, use the `spx csr model update` command. Construct the request parameters according to the following instructions: + +- Set the `project` parameter to the URI of an existing project. This is recommended so that you can also view and manage the model in Speech Studio. You can run the `spx csr project list` command to get available projects. +- Set the required `modelId` parameter to the ID of the model that you want to connect to the project. + +Here's an example Speech CLI command that connects a model to a project: + +```azurecli-interactive +spx csr model update --model YourModelId --project YourProjectId +``` + +You should receive a response body in the following format: + +```json +{ + "project": { + "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/projects/e6ffdefd-9517-45a9-a89c-7b5028ed0e56" + }, +} +``` + +For Speech CLI help with models, run the following command: + +```azurecli-interactive +spx help csr model +``` + +::: zone-end + +::: zone pivot="rest-api" + +To connect a new model to a project of the Speech resource where the model was copied, use the [UpdateModel](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateModel) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the required `project` property to the URI of an existing project. This is recommended so that you can also view and manage the model in Speech Studio. You can make a [GetProjects](https://westus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request to get available projects. + +Make an HTTP PATCH request using the URI as shown in the following example. Use the URI of the new model. You can get the new model ID from the `self` property of the [CopyModel](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CopyModel) response body. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X PATCH -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "project": { + "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/projects/e6ffdefd-9517-45a9-a89c-7b5028ed0e56" + }, +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/models" +``` + +You should receive a response body in the following format: + +```json +{ + "project": { + "self": "https://westus2.api.cognitive.microsoft.com/speechtotext/v3.0/projects/e6ffdefd-9517-45a9-a89c-7b5028ed0e56" + }, +} +``` + +::: zone-end + ## Next steps diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-transcription-editor.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-transcription-editor.md index 19f22e3248131..93a6007926cda 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-transcription-editor.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-transcription-editor.md @@ -31,7 +31,7 @@ Datasets in the **Training and testing dataset** tab can't be updated. You can i To import a dataset to the Editor, follow these steps: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Speech datasets** > **Editor**. 1. Select **Import data** 1. Select datasets. You can select audio data only, audio + human-labeled data, or both. For audio-only data, you can use the default models to automatically generate machine transcription after importing to the editor. @@ -47,7 +47,7 @@ Once a dataset has been imported to the Editor, you can start editing the datase To edit a dataset's transcription in the Editor, follow these steps: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Speech datasets** > **Editor**. 1. Select the link to a dataset by name. 1. From the **Audio + text files** table, select the link to an audio file by name. @@ -63,7 +63,7 @@ Datasets in the Editor can be exported to the **Training and testing dataset** t To export datasets from the Editor, follow these steps: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Speech datasets** > **Editor**. 1. Select the link to a dataset by name. 1. Select one or more rows from the **Audio + text files** table. diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-speech-upload-data.md b/articles/cognitive-services/Speech-Service/how-to-custom-speech-upload-data.md index 03376f1b89bcd..81ccc6a5ccdb1 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-speech-upload-data.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-speech-upload-data.md @@ -10,17 +10,23 @@ ms.subservice: speech-service ms.topic: how-to ms.date: 05/08/2022 ms.author: eur +zone_pivot_groups: speech-studio-cli-rest --- # Upload training and testing datasets for Custom Speech You need audio or text data for testing the accuracy of Microsoft speech recognition or training your custom models. For information about the data types supported for testing or training your model, see [Training and testing datasets](how-to-custom-speech-test-and-train.md). -## Upload datasets in Speech Studio +> [!TIP] +> You can also use the [online transcription editor](how-to-custom-speech-transcription-editor.md) to create and refine labeled audio datasets. + +## Upload datasets + +::: zone pivot="speech-studio" To upload your own datasets in Speech Studio, follow these steps: -1. Sign in to the [Speech Studio](https://speech.microsoft.com/customspeech). +1. Sign in to the [Speech Studio](https://aka.ms/speechstudio/customspeech). 1. Select **Custom Speech** > Your project name > **Speech datasets** > **Upload data**. 1. Select the **Training data** or **Testing data** tab. 1. Select a dataset type, and then select **Next**. @@ -30,29 +36,121 @@ To upload your own datasets in Speech Studio, follow these steps: After your dataset is uploaded, go to the **Train custom models** page to [train a custom model](how-to-custom-speech-train-model.md) -### Upload datasets via REST API +::: zone-end + +::: zone pivot="speech-cli" + +[!INCLUDE [Map CLI and API kind to Speech Studio options](includes/how-to/custom-speech/cli-api-kind.md)] + +To create a dataset and connect it to an existing project, use the `spx csr dataset create` command. Construct the request parameters according to the following instructions: -You can use [Speech-to-text REST API v3.0](rest-speech-to-text.md) to upload a dataset by using the [CreateDataset](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateDataset) request. +- Set the `project` parameter to the ID of an existing project. This is recommended so that you can also view and manage the dataset in Speech Studio. You can run the `spx csr project list` command to get available projects. +- Set the required `kind` parameter. The possible set of values for dataset kind are: Language, Acoustic, Pronunciation, and AudioFiles. +- Set the required `contentUrl` parameter. This is the location of the dataset. +- Set the required `language` parameter. The dataset locale must match the locale of the project. The locale can't be changed later. The Speech CLI `language` parameter corresponds to the `locale` property in the JSON request and response. +- Set the required `name` parameter. This is the name that will be displayed in the Speech Studio. The Speech CLI `name` parameter corresponds to the `displayName` property in the JSON request and response. -To connect the dataset to an existing project, fill out the request body according to the following format: +Here's an example Speech CLI command that creates a dataset and connects it to an existing project: + +```azurecli-interactive +spx csr dataset create --kind "Acoustic" --name "My Acoustic Dataset" --description "My Acoustic Dataset Description" --project YourProjectId --content YourContentUrl --language "en-US" +``` + +You should receive a response body in the following format: ```json { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/e0ea620b-e8c3-4a26-acb2-95fd0cbc625c", "kind": "Acoustic", "contentUrl": "https://contoso.com/mydatasetlocation", + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/e0ea620b-e8c3-4a26-acb2-95fd0cbc625c/files" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/70ccbffc-cafb-4301-aa9f-ef658559d96e" + }, + "properties": { + "acceptedLineCount": 0, + "rejectedLineCount": 0 + }, + "lastActionDateTime": "2022-05-20T14:07:11Z", + "status": "NotStarted", + "createdDateTime": "2022-05-20T14:07:11Z", "locale": "en-US", - "displayName": "My speech dataset name", - "description": "My speech dataset description", + "displayName": "My Acoustic Dataset", + "description": "My Acoustic Dataset Description" +} +``` + +The top-level `self` property in the response body is the dataset's URI. Use this URI to get details about the dataset's project and files. You also use this URI to update or delete a dataset. + +For Speech CLI help with datasets, run the following command: + +```azurecli-interactive +spx help csr dataset +``` + +::: zone-end + +::: zone pivot="rest-api" + +[!INCLUDE [Map CLI and API kind to Speech Studio options](includes/how-to/custom-speech/cli-api-kind.md)] + +To create a dataset and connect it to an existing project, use the [CreateDataset](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CreateDataset) operation of the [Speech-to-text REST API v3.0](rest-speech-to-text.md). Construct the request body according to the following instructions: + +- Set the `project` property to the URI of an existing project. This is recommended so that you can also view and manage the dataset in Speech Studio. You can make a [GetProjects](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request to get available projects. +- Set the required `kind` property. The possible set of values for dataset kind are: Language, Acoustic, Pronunciation, and AudioFiles. +- Set the required `contentUrl` property. This is the location of the dataset. +- Set the required `locale` property. The dataset locale must match the locale of the project. The locale can't be changed later. +- Set the required `displayName` property. This is the name that will be displayed in the Speech Studio. + +Make an HTTP POST request using the URI as shown in the following example. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and set the request body properties as previously described. + +```azurecli-interactive +curl -v -X POST -H "Ocp-Apim-Subscription-Key: YourSubscriptionKey" -H "Content-Type: application/json" -d '{ + "kind": "Acoustic", + "displayName": "My Acoustic Dataset", + "description": "My Acoustic Dataset Description", "project": { - "self": "https://westeurope.api.cognitive.microsoft.com/speechtotext/v3.0/projects/c1c643ae-7da5-4e38-9853-e56e840efcb2" - } + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/70ccbffc-cafb-4301-aa9f-ef658559d96e" + }, + "contentUrl": "https://contoso.com/mydatasetlocation", + "locale": "en-US", +}' "https://YourServiceRegion.api.cognitive.microsoft.com/speechtotext/v3.0/datasets" +``` + +You should receive a response body in the following format: + +```json +{ + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/e0ea620b-e8c3-4a26-acb2-95fd0cbc625c", + "kind": "Acoustic", + "contentUrl": "https://contoso.com/mydatasetlocation", + "links": { + "files": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/datasets/e0ea620b-e8c3-4a26-acb2-95fd0cbc625c/files" + }, + "project": { + "self": "https://eastus.api.cognitive.microsoft.com/speechtotext/v3.0/projects/70ccbffc-cafb-4301-aa9f-ef658559d96e" + }, + "properties": { + "acceptedLineCount": 0, + "rejectedLineCount": 0 + }, + "lastActionDateTime": "2022-05-20T14:07:11Z", + "status": "NotStarted", + "createdDateTime": "2022-05-20T14:07:11Z", + "locale": "en-US", + "displayName": "My Acoustic Dataset", + "description": "My Acoustic Dataset Description" } ``` -You can get a list of existing project URLs that can be used in the `project` element by using the [GetProjects](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetProjects) request. +The top-level `self` property in the response body is the dataset's URI. Use this URI to [get](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/GetDataset) details about the dataset's project and files. You also use this URI to [update](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/UpdateDataset) or [delete](https://eastus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/DeleteDataset) the dataset. + +::: zone-end -> [!NOTE] -> Connecting a dataset to a Custom Speech project isn't required to train and test a custom model using the REST API or Speech CLI. But if the dataset is not connected to any project, you won't be able to train or test a model in the [Speech Studio](https://aka.ms/speechstudio/customspeech). +> [!IMPORTANT] +> Connecting a dataset to a Custom Speech project isn't required to train and test a custom model using the REST API or Speech CLI. But if the dataset is not connected to any project, you can't select it for training or testing in the [Speech Studio](https://aka.ms/speechstudio/customspeech). ## Next steps diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-voice-create-voice.md b/articles/cognitive-services/Speech-Service/how-to-custom-voice-create-voice.md index fde40ba1d02a0..60bd1ef83eccc 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-voice-create-voice.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-voice-create-voice.md @@ -96,7 +96,7 @@ The issues are divided into three types. Refer to the following tables to check **Auto-rejected** -Data with these errors will not be used for training. Imported data with errors will be ignored, so you don't need to delete them. You can resubmit the corrected data for training. +Data with these errors won't be used for training. Imported data with errors will be ignored, so you don't need to delete them. You can resubmit the corrected data for training. | Category | Name | Description | | --------- | ----------- | --------------------------- | @@ -147,9 +147,11 @@ Unresolved errors listed in the next table affect the quality of training, but d After you validate your data files, you can use them to build your Custom Neural Voice model. -1. On the **Train model** tab, select **Train model** to create a voice model with the data you've uploaded. +1. On the **Train model** tab, select **Train a new model** to create a voice model with the data you've uploaded. -1. Select the neural training method for your model and target language. By default, your voice model is trained in the same language of your training data. You can also select to create a secondary language for your voice model. For more information, see [language support for Custom Neural Voice](language-support.md#custom-neural-voice). Also see information about [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services/) for neural training. +1. Select the neural training method for your model and target language. + + By default, your voice model is trained in the same language of your training data. You can also select to create a secondary language for your voice model. For more information, see [language support for Custom Neural Voice](language-support.md#custom-neural-voice). Also see information about [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services/) for neural training. 1. Choose the data you want to use for training, and specify a speaker file. @@ -157,7 +159,7 @@ After you validate your data files, you can use them to build your Custom Neural >- To create a custom neural voice, select at least 300 utterances. >- To train a neural voice, you must specify a voice talent profile. This profile must provide the audio consent file of the voice talent, acknowledging to use his or her speech data to train a custom neural voice model. Custom Neural Voice is available with limited access. Make sure you understand the [responsible AI requirements](/legal/cognitive-services/speech-service/custom-neural-voice/limited-access-custom-neural-voice?context=%2fazure%2fcognitive-services%2fspeech-service%2fcontext%2fcontext) and [apply the access](https://aka.ms/customneural). -1. Choose your test script. Each training generates 100 sample audio files automatically, to help you test the model with a default script. You can also provide your own test script. The test script must exclude the filenames (the ID of each utterance). Otherwise, these IDs are spoken. Here's an example of how the utterances are organized in one .txt file: +1. Choose your test script. Each training generates 100 sample audio files automatically, to help you test the model with a default script. You can also provide your own test script, including up to 100 utterances. The test script must exclude the filenames (the ID of each utterance). Otherwise, these IDs are spoken. Here's an example of how the utterances are organized in one .txt file: ``` This is the waistline, and it's falling. @@ -180,18 +182,25 @@ After you validate your data files, you can use them to build your Custom Neural > [!NOTE] > Duplicate audio names will be removed from the training. Make sure the data you select don't contain the same audio names across multiple .zip files. - The **Train model** table displays a new entry that corresponds to this newly created model. The table also displays the status: processing, succeeded, or failed. The status reflects the process of converting your data to a voice model, as shown in this table: + The **Train model** table displays a new entry that corresponds to this newly created model. + + When the model is training, you can select **Cancel training** to cancel your voice model. You're not charged for this canceled training. + + :::image type="content" source="media/custom-voice/cnv-cancel-training.png" alt-text="Screenshot that shows how to cancel training for a model."::: + + The table displays the status: processing, succeeded, failed, and canceled. The status reflects the process of converting your data to a voice model, as shown in this table: | State | Meaning | | ----- | ------- | | Processing | Your voice model is being created. | | Succeeded | Your voice model has been created and can be deployed. | | Failed | Your voice model has failed in training. The cause of the failure might be, for example, unseen data problems or network issues. | + | Canceled | The training for your voice model was canceled. | Training duration varies depending on how much data you're training. It takes about 40 compute hours on average to train a custom neural voice. > [!NOTE] - > Standard subscription (S0) users can train three voices simultaneously. If you reach the limit, wait until at least one of your voice models finishes training, and then try again. + > Standard subscription (S0) users can train four voices simultaneously. If you reach the limit, wait until at least one of your voice models finishes training, and then try again. 1. After you finish training the model successfully, you can review the model details. @@ -204,10 +213,36 @@ The quality of the voice depends on many factors, such as: - The accuracy of the transcript file. - How well the recorded voice in the training data matches the personality of the designed voice for your intended use case. +### Rename your model + +If you want to rename the model you built, you can select **Clone model** to create a clone of the model with a new name in the current project. + +:::image type="content" source="media/custom-voice/cnv-clone-model.png" alt-text="Screenshot of selecting the Clone model button."::: + +Enter the new name on the **Clone voice model** window, then click **Submit**. The text 'Neural' will be automatically added as a suffix to your new model name. + +:::image type="content" source="media/custom-voice/cnv-clone-model-rename.png" alt-text="Screenshot of cloning a model with a new name."::: + +### Test your voice model + +After you've trained your voice model, you can test the model on the model details page. Select **DefaultTests** under **Testing** to listen to the sample audios. The default test samples include 100 sample audios generated automatically during training to help you test the model. In addition to these 100 audios provided by default, your own test script (at most 100 utterances) provided during training are also added to **DefaultTests** set. You're not charged for the testing with **DefaultTests**. + +:::image type="content" source="media/custom-voice/cnv-model-default-test.png" alt-text="Screenshot of selecting DefaultTests under Testing."::: + +If you want to upload your own test scripts to further test your model, select **Add test scripts** to upload your own test script. + +:::image type="content" source="media/custom-voice/cnv-model-add-testscripts.png" alt-text="Screenshot of adding model test scripts."::: + +Before uploading test script, check the [test script requirements](#train-your-custom-neural-voice-model). You'll be charged for the additional testing with the batch synthesis based on the number of billable characters. See [pricing page](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services/). + +On **Add test scripts** window, click **Browse for a file** to select your own script, then select **Add** to upload it. + +:::image type="content" source="media/custom-voice/cnv-model-upload-testscripts.png" alt-text="Screenshot of uploading model test scripts."::: + For more information, [learn more about the capabilities and limits of this feature, and the best practice to improve your model quality](/legal/cognitive-services/speech-service/custom-neural-voice/characteristics-and-limitations-custom-neural-voice?context=%2fazure%2fcognitive-services%2fspeech-service%2fcontext%2fcontext). > [!NOTE] -> Custom Neural Voice training is only available in the three regions: East US, Southeast Asia, and UK South. But you can easily copy a neural voice model from the three regions to other regions. For more information, see the [regions for Custom Neural Voice](regions.md#text-to-speech). +> Custom Neural Voice training is only available in some regions. But you can easily copy a neural voice model from these regions to other regions. For more information, see the [regions for Custom Neural Voice](regions.md#text-to-speech). ## Next steps diff --git a/articles/cognitive-services/Speech-Service/how-to-custom-voice.md b/articles/cognitive-services/Speech-Service/how-to-custom-voice.md index 8c422dd5796ee..69f2f8c5d1bb5 100644 --- a/articles/cognitive-services/Speech-Service/how-to-custom-voice.md +++ b/articles/cognitive-services/Speech-Service/how-to-custom-voice.md @@ -28,7 +28,7 @@ A Speech service subscription is required before you can use Custom Neural Voice Once you've created an Azure account and a Speech service subscription, you'll need to sign in to Speech Studio and connect your subscription. 1. Get your Speech service subscription key from the Azure portal. -1. Sign in to [Speech Studio](https://speech.microsoft.com), and then select **Custom Voice**. +1. Sign in to [Speech Studio](https://aka.ms/speechstudio/customvoice), and then select **Custom Voice**. 1. Select your subscription and create a speech project. 1. If you want to switch to another Speech subscription, select the **cog** icon at the top. @@ -41,7 +41,7 @@ Content like data, models, tests, and endpoints are organized into projects in S To create a custom voice project: -1. Sign in to [Speech Studio](https://speech.microsoft.com). +1. Sign in to [Speech Studio](https://aka.ms/speechstudio/customvoice). 1. Select **Text-to-Speech** > **Custom Voice** > **Create project**. See [Custom Neural Voice project types](custom-neural-voice.md#custom-neural-voice-project-types) for information about capabilities, requirements, and differences between Custom Neural Voice Pro and Custom Neural Voice Lite projects. @@ -68,7 +68,7 @@ After the recordings are ready, follow [Prepare training data](how-to-custom-voi ### Training -After you've prepared the training data, go to [Speech Studio](https://aka.ms/custom-voice) to create your custom neural voice. Select at least 300 utterances to create a custom neural voice. A series of data quality checks are automatically performed when you upload them. To build high-quality voice models, you should fix any errors and submit again. +After you've prepared the training data, go to [Speech Studio](https://aka.ms/speechstudio/customvoice) to create your custom neural voice. Select at least 300 utterances to create a custom neural voice. A series of data quality checks are automatically performed when you upload them. To build high-quality voice models, you should fix any errors and submit again. ### Testing diff --git a/articles/cognitive-services/Speech-Service/how-to-migrate-to-custom-neural-voice.md b/articles/cognitive-services/Speech-Service/how-to-migrate-to-custom-neural-voice.md index 1da06a149c4fc..246bf50007041 100644 --- a/articles/cognitive-services/Speech-Service/how-to-migrate-to-custom-neural-voice.md +++ b/articles/cognitive-services/Speech-Service/how-to-migrate-to-custom-neural-voice.md @@ -34,7 +34,7 @@ Before you can migrate to custom neural voice, your [application](https://aka.ms > Even without an Azure account, you can listen to voice samples in [Speech Studio](https://aka.ms/customvoice) and determine the right voice for your business needs. 1. Learn more about our [policy on the limit access](/legal/cognitive-services/speech-service/custom-neural-voice/limited-access-custom-neural-voice?context=%2fazure%2fcognitive-services%2fspeech-service%2fcontext%2fcontext) and then [apply here](https://aka.ms/customneural). -2. Once your application is approved, you will be provided with the access to the "neural" training feature. Make sure you log in to [Speech Studio](https://speech.microsoft.com) using the same Azure subscription that you provide in your application. +2. Once your application is approved, you will be provided with the access to the "neural" training feature. Make sure you log in to [Speech Studio](https://aka.ms/speechstudio/customvoice) using the same Azure subscription that you provide in your application. > [!IMPORTANT] > To train a neural voice, you must create a voice talent profile with an audio file recorded by the voice talent consenting to the usage of their speech data to train a custom voice model. When preparing your recording script, make sure you include the statement sentence. You can find the statement in multiple languages [here](https://github.com/Azure-Samples/Cognitive-Speech-TTS/blob/master/CustomVoice/script/verbal-statement-all-locales.txt). The language of the verbal statement must be the same as your recording. You need to upload this audio file to the Speech Studio as shown below to create a voice talent profile, which is used to verify against your training data when you create a voice model. Read more about the [voice talent verification](/legal/cognitive-services/speech-service/custom-neural-voice/data-privacy-security-custom-neural-voice?context=%2fazure%2fcognitive-services%2fspeech-service%2fcontext%2fcontext) here. diff --git a/articles/cognitive-services/Speech-Service/how-to-specify-source-language.md b/articles/cognitive-services/Speech-Service/how-to-specify-source-language.md deleted file mode 100644 index 2f86416194378..0000000000000 --- a/articles/cognitive-services/Speech-Service/how-to-specify-source-language.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Specify source language for speech to text -titleSuffix: Azure Cognitive Services -description: The Speech SDK allows you to specify the source language when you convert speech to text. This article describes how to use the FromConfig and SourceLanguageConfig methods to let the Speech service know the source language and provide a custom model target. -services: cognitive-services -author: susanhu -manager: nitinme -ms.service: cognitive-services -ms.subservice: speech-service -ms.topic: how-to -ms.date: 05/19/2020 -ms.author: qiohu -zone_pivot_groups: programming-languages-set-two -ms.devlang: cpp, csharp, java, javascript, objective-c, python -ms.custom: "devx-track-js, devx-track-csharp" ---- - -# Specify source language for speech-to-text - -In this article, you'll learn how to specify the source language for an audio input passed to the Speech SDK for speech recognition. The example code that's provided specifies a custom speech model for improved recognition. - -::: zone pivot="programming-language-csharp" - -## Specify source language in C# - -In the following example, the source language is provided explicitly as a parameter by using the `SpeechRecognizer` construct: - -```csharp -var recognizer = new SpeechRecognizer(speechConfig, "de-DE", audioConfig); -``` - -In the following example, the source language is provided by using `SourceLanguageConfig`. Then, `sourceLanguageConfig` is passed as a parameter to the `SpeechRecognizer` construct. - -```csharp -var sourceLanguageConfig = SourceLanguageConfig.FromLanguage("de-DE"); -var recognizer = new SpeechRecognizer(speechConfig, sourceLanguageConfig, audioConfig); -``` - -In the following example, the source language and custom endpoint are provided by using `SourceLanguageConfig`. Then, `sourceLanguageConfig` is passed as a parameter to the `SpeechRecognizer` construct. - -```csharp -var sourceLanguageConfig = SourceLanguageConfig.FromLanguage("de-DE", "The Endpoint ID for your custom model."); -var recognizer = new SpeechRecognizer(speechConfig, sourceLanguageConfig, audioConfig); -``` - ->[!Note] -> The `SpeechRecognitionLanguage` and `EndpointId` set methods are deprecated from the `SpeechConfig` class in C#. The use of these methods is discouraged. Don't use them when you create a `SpeechRecognizer` construct. - -::: zone-end - -::: zone pivot="programming-language-cpp" - -## Specify source language in C++ - -In the following example, the source language is provided explicitly as a parameter by using the `FromConfig` method. - -```C++ -auto recognizer = SpeechRecognizer::FromConfig(speechConfig, "de-DE", audioConfig); -``` - -In the following example, the source language is provided by using `SourceLanguageConfig`. Then, `sourceLanguageConfig` is passed as a parameter to `FromConfig` when you create the `recognizer` construct. - -```C++ -auto sourceLanguageConfig = SourceLanguageConfig::FromLanguage("de-DE"); -auto recognizer = SpeechRecognizer::FromConfig(speechConfig, sourceLanguageConfig, audioConfig); -``` - -In the following example, the source language and custom endpoint are provided by using `SourceLanguageConfig`. Then, `sourceLanguageConfig` is passed as a parameter to `FromConfig` when you create the `recognizer` construct. - -```C++ -auto sourceLanguageConfig = SourceLanguageConfig::FromLanguage("de-DE", "The Endpoint ID for your custom model."); -auto recognizer = SpeechRecognizer::FromConfig(speechConfig, sourceLanguageConfig, audioConfig); -``` - ->[!Note] -> `SetSpeechRecognitionLanguage` and `SetEndpointId` are deprecated methods from the `SpeechConfig` class in C++ and Java. The use of these methods is discouraged. Don't use them when you create a `SpeechRecognizer` construct. - -::: zone-end - -::: zone pivot="programming-language-java" - -## Specify source language in Java - -In the following example, the source language is provided explicitly when you create a new `SpeechRecognizer` construct. - -```Java -SpeechRecognizer recognizer = new SpeechRecognizer(speechConfig, "de-DE", audioConfig); -``` - -In the following example, the source language is provided by using `SourceLanguageConfig`. Then, `sourceLanguageConfig` is passed as a parameter when you create a new `SpeechRecognizer` construct. - -```Java -SourceLanguageConfig sourceLanguageConfig = SourceLanguageConfig.fromLanguage("de-DE"); -SpeechRecognizer recognizer = new SpeechRecognizer(speechConfig, sourceLanguageConfig, audioConfig); -``` - -In the following example, the source language and custom endpoint are provided by using `SourceLanguageConfig`. Then, `sourceLanguageConfig` is passed as a parameter when you create a new `SpeechRecognizer` construct. - -```Java -SourceLanguageConfig sourceLanguageConfig = SourceLanguageConfig.fromLanguage("de-DE", "The Endpoint ID for your custom model."); -SpeechRecognizer recognizer = new SpeechRecognizer(speechConfig, sourceLanguageConfig, audioConfig); -``` - ->[!Note] -> `setSpeechRecognitionLanguage` and `setEndpointId` are deprecated methods from the `SpeechConfig` class in C++ and Java. The use of these methods is discouraged. Don't use them when you create a `SpeechRecognizer` construct. - -::: zone-end - -::: zone pivot="programming-language-python" - -## Specify source language in Python - -In the following example, the source language is provided explicitly as a parameter by using the `SpeechRecognizer` construct. - -```Python -speech_recognizer = speechsdk.SpeechRecognizer( - speech_config=speech_config, language="de-DE", audio_config=audio_config) -``` - -In the following example, the source language is provided by using `SourceLanguageConfig`. Then, `SourceLanguageConfig` is passed as a parameter to the `SpeechRecognizer` construct. - -```Python -source_language_config = speechsdk.languageconfig.SourceLanguageConfig("de-DE") -speech_recognizer = speechsdk.SpeechRecognizer( - speech_config=speech_config, source_language_config=source_language_config, audio_config=audio_config) -``` - -In the following example, the source language and custom endpoint are provided by using `SourceLanguageConfig`. Then, `SourceLanguageConfig` is passed as a parameter to the `SpeechRecognizer` construct. - -```Python -source_language_config = speechsdk.languageconfig.SourceLanguageConfig("de-DE", "The Endpoint ID for your custom model.") -speech_recognizer = speechsdk.SpeechRecognizer( - speech_config=speech_config, source_language_config=source_language_config, audio_config=audio_config) -``` - ->[!Note] -> The `speech_recognition_language` and `endpoint_id` properties are deprecated from the `SpeechConfig` class in Python. The use of these properties is discouraged. Don't use them when you create a `SpeechRecognizer` construct. - -::: zone-end - -::: zone pivot="programming-language-more" - -## Specify source language in JavaScript - -The first step is to create a `SpeechConfig` construct: - -```Javascript -var speechConfig = sdk.SpeechConfig.fromSubscription("YourSubscriptionkey", "YourRegion"); -``` - -Next, specify the source language of your audio with `speechRecognitionLanguage`: - -```Javascript -speechConfig.speechRecognitionLanguage = "de-DE"; -``` - -If you're using a custom model for recognition, you can specify the endpoint with `endpointId`: - -```Javascript -speechConfig.endpointId = "The Endpoint ID for your custom model."; -``` - -## Specify source language in Objective-C - -In the following example, the source language is provided explicitly as a parameter by using the `SPXSpeechRecognizer` construct. - -```Objective-C -SPXSpeechRecognizer* speechRecognizer = \ - [[SPXSpeechRecognizer alloc] initWithSpeechConfiguration:speechConfig language:@"de-DE" audioConfiguration:audioConfig]; -``` - -In the following example, the source language is provided by using `SPXSourceLanguageConfiguration`. Then, `SPXSourceLanguageConfiguration` is passed as a parameter to the `SPXSpeechRecognizer` construct. - -```Objective-C -SPXSourceLanguageConfiguration* sourceLanguageConfig = [[SPXSourceLanguageConfiguration alloc]init:@"de-DE"]; -SPXSpeechRecognizer* speechRecognizer = [[SPXSpeechRecognizer alloc] initWithSpeechConfiguration:speechConfig - sourceLanguageConfiguration:sourceLanguageConfig - audioConfiguration:audioConfig]; -``` - -In the following example, the source language and custom endpoint are provided by using `SPXSourceLanguageConfiguration`. Then, `SPXSourceLanguageConfiguration` is passed as a parameter to the `SPXSpeechRecognizer` construct. - -```Objective-C -SPXSourceLanguageConfiguration* sourceLanguageConfig = \ - [[SPXSourceLanguageConfiguration alloc]initWithLanguage:@"de-DE" - endpointId:@"The Endpoint ID for your custom model."]; -SPXSpeechRecognizer* speechRecognizer = [[SPXSpeechRecognizer alloc] initWithSpeechConfiguration:speechConfig - sourceLanguageConfiguration:sourceLanguageConfig - audioConfiguration:audioConfig]; -``` - ->[!Note] -> The `speechRecognitionLanguage` and `endpointId` properties are deprecated from the `SPXSpeechConfiguration` class in Objective-C. The use of these properties is discouraged. Don't use them when you create a `SPXSpeechRecognizer` construct. - -::: zone-end - -## Next steps - -- [Language support](language-support.md) diff --git a/articles/cognitive-services/Speech-Service/improve-accuracy-phrase-list.md b/articles/cognitive-services/Speech-Service/improve-accuracy-phrase-list.md index 8e9637c48af37..f1530369273ea 100644 --- a/articles/cognitive-services/Speech-Service/improve-accuracy-phrase-list.md +++ b/articles/cognitive-services/Speech-Service/improve-accuracy-phrase-list.md @@ -50,8 +50,7 @@ Now try Speech Studio to see how phrase list can improve recognition accuracy. > [!NOTE] > You may be prompted to select your Azure subscription and Speech resource, and then acknowledge billing for your region. -1. Sign in to [Speech Studio](https://speech.microsoft.com/). -1. Select **Real-time Speech-to-text**. +1. Go to **Real-time Speech-to-text** in [Speech Studio](https://aka.ms/speechstudio/speechtotexttool). 1. You test speech recognition by uploading an audio file or recording audio with a microphone. For example, select **record audio with a microphone** and then say "Hi Rehaan, this is Jessie from Contoso bank. " Then select the red button to stop recording. 1. You should see the transcription result in the **Test results** text box. If "Rehaan", "Jessie", or "Contoso" were recognized incorrectly, you can add the terms to a phrase list in the next step. 1. Select **Show advanced options** and turn on **Phrase list**. diff --git a/articles/cognitive-services/Speech-Service/includes/cognitive-services-speech-service-endpoints-text-to-speech.md b/articles/cognitive-services/Speech-Service/includes/cognitive-services-speech-service-endpoints-text-to-speech.md index 84c403208c57d..891e8fa007e47 100644 --- a/articles/cognitive-services/Speech-Service/includes/cognitive-services-speech-service-endpoints-text-to-speech.md +++ b/articles/cognitive-services/Speech-Service/includes/cognitive-services-speech-service-endpoints-text-to-speech.md @@ -53,40 +53,40 @@ Use this table to determine *availability of neural voices* by region or endpoin If you've created a custom neural voice font, use the endpoint that you've created. You can also use the following endpoints. Replace `{deploymentId}` with the deployment ID for your neural voice model. -| Region | Endpoint | -|--------|----------| -| Australia East | `https://australiaeast.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Brazil South | `https://brazilsouth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Canada Central | `https://canadacentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Central US | `https://centralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| East Asia | `https://eastasia.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| East US | `https://eastus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| East US 2 | `https://eastus2.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| France Central | `https://francecentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Germany West Central | `https://germanywestcentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| India Central | `https://centralindia.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Japan East | `https://japaneast.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Japan West | `https://japanwest.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Jio India West | `https://jioindiawest.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Korea Central | `https://koreacentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| North Central US | `https://northcentralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| North Europe | `https://northeurope.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Norway East| `https://norwayeast.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| South Africa North | `https://southafricanorth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| South Central US | `https://southcentralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Southeast Asia | `https://southeastasia.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Switzerland North | `https://switzerlandnorth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| Switzerland West | `https://switzerlandwest.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| UAE North | `https://uaenorth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}`| -| UK South | `https://uksouth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| West Central US | `https://westcentralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| West Europe | `https://westeurope.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| West US | `https://westus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| West US 2 | `https://westus2.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | -| West US 3 | `https://westus3.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Region | Training |Deployment |Endpoint | +|--------|----------|----------|----------| +| Australia East |Yes|Yes| `https://australiaeast.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Brazil South | No |Yes| `https://brazilsouth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Canada Central | No |Yes|`https://canadacentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Central US | No |Yes| `https://centralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| East Asia | No |Yes| `https://eastasia.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| East US |Yes| Yes | `https://eastus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| East US 2 |Yes| Yes |`https://eastus2.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| France Central | No |Yes| `https://francecentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Germany West Central | No |Yes| `https://germanywestcentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| India Central |Yes| Yes | `https://centralindia.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Japan East |Yes| Yes | `https://japaneast.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Japan West | No |Yes| `https://japanwest.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Jio India West | No |Yes| `https://jioindiawest.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Korea Central |Yes|Yes| `https://koreacentral.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| North Central US | No |Yes| `https://northcentralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| North Europe |Yes|Yes| `https://northeurope.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Norway East| No |Yes| `https://norwayeast.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| South Africa North | No |Yes| `https://southafricanorth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| South Central US |Yes|Yes| `https://southcentralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Southeast Asia |Yes|Yes| `https://southeastasia.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Switzerland North | No |Yes| `https://switzerlandnorth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| Switzerland West | No |Yes| `https://switzerlandwest.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| UAE North | No |Yes| `https://uaenorth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}`| +| UK South |Yes| Yes | `https://uksouth.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| West Central US | No |Yes| `https://westcentralus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| West Europe |Yes|Yes| `https://westeurope.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| West US |Yes|Yes| `https://westus.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| West US 2 |Yes|Yes| `https://westus2.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | +| West US 3 | No |Yes| `https://westus3.voice.speech.microsoft.com/cognitiveservices/v1?deploymentId={deploymentId}` | > [!NOTE] -> The preceding regions are available for neural voice model hosting and real-time synthesis. Custom neural voice training is available in only these three regions: East US, Southeast Asia, and UK South. But users can easily copy a neural voice model from the three regions to other regions in the preceding list. +> The preceding regions are available for neural voice model hosting and real-time synthesis. Custom neural voice training is only available in some regions. But users can easily copy a neural voice model from these regions to other regions in the preceding list. ### Long Audio API diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/custom-domain.md b/articles/cognitive-services/Speech-Service/includes/how-to/custom-domain.md index 89b31ccb90e40..e9124de84b774 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/custom-domain.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/custom-domain.md @@ -13,7 +13,7 @@ Follow these steps to create a [custom subdomain name for Cognitive Services](.. > [!CAUTION] > When you turn on a custom domain name, the operation is [not reversible](../../../cognitive-services-custom-subdomains.md#can-i-change-a-custom-domain-name). The only way to go back to the [regional name](../../../cognitive-services-custom-subdomains.md#is-there-a-list-of-regional-endpoints) is to create a new Speech resource. > -> If your Speech resource has a lot of associated custom models and projects created via [Speech Studio](https://speech.microsoft.com/), we strongly recommend trying the configuration with a test resource before you modify the resource used in production. +> If your Speech resource has a lot of associated custom models and projects created via [Speech Studio](https://aka.ms/speechstudio/customspeech), we strongly recommend trying the configuration with a test resource before you modify the resource used in production. # [Azure portal](#tab/portal) diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/custom-speech/cli-api-kind.md b/articles/cognitive-services/Speech-Service/includes/how-to/custom-speech/cli-api-kind.md new file mode 100644 index 0000000000000..1757ae1776fa9 --- /dev/null +++ b/articles/cognitive-services/Speech-Service/includes/how-to/custom-speech/cli-api-kind.md @@ -0,0 +1,22 @@ +--- +author: eric-urban +ms.service: cognitive-services +ms.subservice: speech-service +ms.topic: include +ms.date: 05/25/2022 +ms.author: eur +--- + +With the [Speech CLI](~/articles/cognitive-services/speech-service/spx-overview.md) and [Speech-to-text REST API v3.0](~/articles/cognitive-services/speech-service/rest-speech-to-text.md), unlike the Speech Studio, you don't choose whether a dataset is for testing or training at the time of upload. You specify how a dataset is used when you [train a model](~/articles/cognitive-services/speech-service/how-to-custom-speech-train-model.md) or [run a test](~/articles/cognitive-services/speech-service/how-to-custom-speech-evaluate-data.md). + +Although you don't indicate whether the dataset is for testing or training, you must specify the dataset kind. The dataset kind is used to determine which type of dataset is created. In some cases, a dataset kind is only used for testing or training, but you shouldn't take a dependency on that. The Speech CLI and REST API `kind` values correspond to the options in the Speech Studio as described in the following table: + +|CLI and API kind |Speech Studio options | +|---------|---------| +|Acoustic |Training data: Audio + human-labeled transcript
                  Testing data: Transcript (automatic audio synthesis)
                  Testing data: Audio + human-labeled transcript | +|AudioFiles |Testing data: Audio | +|Language |Training data: Plain text | +|Pronunciation |Training data: Pronunciation | + +> [!NOTE] +> Structured text in markdown format training datasets are not supported by the [Speech CLI](~/articles/cognitive-services/speech-service/spx-overview.md) or [Speech-to-text REST API v3.0](~/articles/cognitive-services/speech-service/rest-speech-to-text.md). \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/cpp.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/cpp.md index 7724f1272eaee..cf951e8f471d1 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/cpp.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/cpp.md @@ -203,3 +203,12 @@ config->SetSpeechRecognitionLanguage("de-DE"); [`SetSpeechRecognitionLanguage`](/cpp/cognitive-services/speech/speechconfig#setspeechrecognitionlanguage) is a parameter that takes a string as an argument. You can provide any value in the [list of supported locales/languages](../../../language-support.md). +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```cpp +auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion"); +speechConfig->SetEndpointId("YourEndpointId"); +auto speechRecognizer = SpeechRecognizer::FromConfig(speechConfig); +``` \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/csharp.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/csharp.md index 39a65151170ce..4700691a86677 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/csharp.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/csharp.md @@ -278,3 +278,13 @@ speechConfig.SpeechRecognitionLanguage = "it-IT"; The [`SpeechRecognitionLanguage`](/dotnet/api/microsoft.cognitiveservices.speech.speechconfig.speechrecognitionlanguage) property expects a language-locale format string. You can provide any value in the **Locale** column in the [list of supported locales/languages](../../../language-support.md). + +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```csharp +var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion"); +speechConfig.EndpointId = "YourEndpointId"; +var speechRecognizer = new SpeechRecognizer(speechConfig); +``` diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/java.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/java.md index c1472d5720944..9e3bf29041dd1 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/java.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/java.md @@ -221,3 +221,12 @@ config.setSpeechRecognitionLanguage("fr-FR"); [`setSpeechRecognitionLanguage`](/java/api/com.microsoft.cognitiveservices.speech.speechconfig.setspeechrecognitionlanguage) is a parameter that takes a string as an argument. You can provide any value in the [list of supported locales/languages](../../../language-support.md). +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```java +SpeechConfig speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion"); +speechConfig.setEndpointId("YourEndpointId"); +SpeechRecognizer speechRecognizer = new SpeechRecognizer(speechConfig); +``` diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/javascript.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/javascript.md index 4f54981b8e76d..dee3d7e9d7a25 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/javascript.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/javascript.md @@ -201,3 +201,12 @@ speechConfig.speechRecognitionLanguage = "it-IT"; The [`speechRecognitionLanguage`](/javascript/api/microsoft-cognitiveservices-speech-sdk/speechconfig#speechrecognitionlanguage) property expects a language-locale format string. You can provide any value in the **Locale** column in the [list of supported locales/languages](../../../language-support.md). +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```javascript +var speechConfig = SpeechSDK.SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion"); +speechConfig.endpointId = "YourEndpointId"; +var speechRecognizer = new SpeechSDK.SpeechRecognizer(speechConfig); +``` diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/objectivec.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/objectivec.md index dbc58dcea3194..28e0b70f5ea81 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/objectivec.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/objectivec.md @@ -19,3 +19,13 @@ The [Azure-Samples/cognitive-services-speech-sdk](https://github.com/Azure-Sampl * [Additional samples for Objective-C on iOS](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/samples/objective-c/ios) For more information, see the [Speech SDK for Objective-C reference](/objectivec/cognitive-services/speech/). + +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```Objective-C +SPXSpeechConfiguration *speechConfig = [[SPXSpeechConfiguration alloc] initWithSubscription:"YourSubscriptionKey" region:"YourServiceRegion"]; +speechConfig.endpointId = "YourEndpointId"; +SPXSpeechRecognizer* speechRecognizer = [[SPXSpeechRecognizer alloc] init:speechConfig]; +``` diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/python.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/python.md index 9f951a528ed62..7374e2f045926 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/python.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/python.md @@ -168,3 +168,12 @@ speech_config.speech_recognition_language="de-DE" [`speech_recognition_language`](/python/api/azure-cognitiveservices-speech/azure.cognitiveservices.speech.speechconfig#speech-recognition-language) is a parameter that takes a string as an argument. You can provide any value in the [list of supported locales/languages](../../../language-support.md). +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```python +speech_config = speechsdk.SpeechConfig(subscription="YourSubscriptionKey", region="YourServiceRegion") +speech_config.endpoint_id = "YourEndpointId" +speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config) +``` diff --git a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/swift.md b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/swift.md index 53904c5d667d9..3f2b48432d020 100644 --- a/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/swift.md +++ b/articles/cognitive-services/Speech-Service/includes/how-to/recognize-speech/swift.md @@ -18,3 +18,13 @@ The [Azure-Samples/cognitive-services-speech-sdk](https://github.com/Azure-Sampl * [Recognize speech in Swift on iOS](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/quickstart/swift/ios/from-microphone) For more information, see the [Speech SDK for Swift reference](/objectivec/cognitive-services/speech/). + +## Use a custom endpoint + +With [Custom Speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint. + +```swift +let speechConfig = SPXSpeechConfiguration(subscription: "YourSubscriptionKey", region: "YourServiceRegion"); +speechConfig.endpointId = "YourEndpointId"; +let speechRecognizer = SPXSpeechRecognizer(speechConfiguration: speechConfig); +``` diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md index 42a1dda2aa2fe..9b668810964d6 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md @@ -30,18 +30,21 @@ You must also install [GStreamer](~/articles/cognitive-services/speech-service/h Follow these steps to create a new console application and install the Speech SDK. -1. Download or copy the [scenarios/cpp/windows/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/go/captioning/) sample files from GitHub into a local directory. -1. Open `captioning.sln` in Visual Studio. +1. Download or copy the scenarios/cpp/windows/captioning/ sample files from GitHub into a local directory. +1. Open the `captioning.sln` solution file in Visual Studio. 1. Install the Speech SDK in your project with the NuGet package manager. ```powershell Install-Package Microsoft.CognitiveServices.Speech ``` -1. Make sure the compiler is set to **ISO C++17 Standard (/std:c++17)** at **Project** > **Properties** > **General** > **C++ Language Standard**. -1. Enter your preferred command line arguments at **Project** > **Properties** > **Debugging** > **Command Arguments**. See [usage and arguments](#usage-and-arguments) for the available options. Here is an example. Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region: +1. Open **Project** > **Properties** > **General**. Set **Configuration** to `All configurations`. Set **C++ Language Standard** to `ISO C++17 Standard (/std:c++17)`. +1. Open **Build** > **Configuration Manager**. + - On a 64-bit Windows installation, set **Active solution platform** to `x64`. + - On a 32-bit Windows installation, set **Active solution platform** to `x86`. +1. Open **Project** > **Properties** > **Debugging**. Enter your preferred command line arguments at **Command Arguments**. See [usage and arguments](#usage-and-arguments) for the available options. Here is an example: ``` --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.mp4 --format any --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. 1. Build and run the console application. The output file with complete captions is written to `c:\caption\caption.output.txt`. Intermediate results are shown in the console: ```console 00:00:00,180 --> 00:00:01,600 diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md index 0ed0f3aee40de..071a6773fa858 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md @@ -37,7 +37,7 @@ Follow these steps to create a new console application and install the Speech SD ```dotnetcli dotnet add package Microsoft.CognitiveServices.Speech ``` -1. Copy the [scenarios/csharp/dotnetcore/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/csharp/dotnetcore/captioning/) sample files from GitHub into your project directory. Overwrite the local copy of `Program.cs` with the file that you copy from GitHub. +1. Copy the scenarios/csharp/dotnetcore/captioning/ sample files from GitHub into your project directory. Overwrite the local copy of `Program.cs` with the file that you copy from GitHub. 1. Build the project with the .NET CLI. ```dotnetcli dotnet build @@ -46,7 +46,7 @@ Follow these steps to create a new console application and install the Speech SD ```dotnetcli dotnet run --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.mp4 --format any --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `c:\caption\caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md index 04bb8987ea0dc..0e8853eb9e90f 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md @@ -30,7 +30,7 @@ You must also install [GStreamer](~/articles/cognitive-services/speech-service/h Follow these steps to create a new GO module and install the Speech SDK. -1. Download or copy the [scenarios/go/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/go/captioning/) sample files from GitHub into a local directory. +1. Download or copy the scenarios/go/captioning/ sample files from GitHub into a local directory. 1. Open a command prompt in the same directory as `captioning.go`. 1. Run the following commands to create a `go.mod` file that links to the Speech SDK components hosted on GitHub: ```console @@ -45,7 +45,7 @@ Follow these steps to create a new GO module and install the Speech SDK. ```console go run captioning --key YourSubscriptionKey --region YourServiceRegion --input caption.this.mp4 --format any --output caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md index c9303b7fafde3..8520fdde5639c 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md @@ -71,7 +71,7 @@ Before you can do anything, you need to install the Speech SDK. The sample in th Follow these steps to create a new console application and install the Speech SDK. -1. Copy the [scenarios/java/jre/console/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/java/jre/console/captioning/) sample files from GitHub into your project directory. +1. Copy the scenarios/java/jre/captioning/ sample files from GitHub into your project directory. 1. Open a command prompt and run this command to compile the project files. ```console javac Captioning.java -cp ".;target\dependency\*" @@ -80,7 +80,7 @@ Follow these steps to create a new console application and install the Speech SD ```console java -cp ".;target\dependency\*" Captioning --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.mp4 --format any --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `c:\caption\caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md index b4b0721172d1f..f719fe6e74775 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md @@ -28,7 +28,7 @@ Before you can do anything, you need to install the Speech SDK for JavaScript. I Follow these steps to create a new console application and install the Speech SDK. -1. Copy the [scenarios/javascript/node/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/javascript/node/captioning/) sample files from GitHub into your project directory. +1. Copy the scenarios/javascript/node/captioning/ sample files from GitHub into your project directory. 1. Open a command prompt in the same directory as `Captioning.js`. 1. Install the Speech SDK for JavaScript: ```console @@ -38,7 +38,7 @@ Follow these steps to create a new console application and install the Speech SD ```console node captioning.js --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.wav --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. > [!NOTE] > The Speech SDK for JavaScript does not support [compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). You must use a WAV file as shown in the example. diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md index ef9260ab70b48..ba74e9d57fad2 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md @@ -33,7 +33,7 @@ The Speech SDK for Python is available as a [Python Package Index (PyPI) module] Follow these steps to create a new console application. -1. Download or copy the [scenarios/python/console/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/python/console/captioning/) sample files from GitHub into a local directory. +1. Download or copy the scenarios/python/console/captioning/ sample files from GitHub into a local directory. 1. Open a command prompt in the same directory as `captioning.py`. 1. Run this command to install the Speech SDK: ```console @@ -43,7 +43,7 @@ Follow these steps to create a new console application. ```console python captioning.py --key YourSubscriptionKey --region YourServiceRegion --input caption.this.mp4 --format any --output caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md index 23eed7987660c..21809c51a91b4 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md @@ -9,12 +9,12 @@ ms.author: eur Connection options include: - `--key`: Your Speech resource key. -- `--region REGION`: Your Speech resource region. Examples: `westus`, `eastus` +- `--region REGION`: Your Speech resource region. Examples: `westus`, `northeurope` Input options include: - `--input FILE`: Input audio from file. The default input is the microphone. -- `--format FORMAT`: Use compressed audio format. Valid only with `--file`. Valid values are `alaw`, `any`, `flac`, `mp3`, `mulaw`, and `ogg_opus`. The default value is `any`. This option is not available with the JavaScript captioning sample. For compressed audio files such as MP4, install GStreamer and see [How to use compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). +- `--format FORMAT`: Use compressed audio format. Valid only with `--file`. Valid values are `alaw`, `any`, `flac`, `mp3`, `mulaw`, and `ogg_opus`. The default value is `any`. To use a `wav` file, don't specify the format. This option is not available with the JavaScript captioning sample. For compressed audio files such as MP4, install GStreamer and see [How to use compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). Language options include: diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/linux.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/linux.md index 5b28f7fffc48d..1bd237ad4156b 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/linux.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/linux.md @@ -29,7 +29,7 @@ Before you get started, make sure to: 1. Replace the string `whatstheweatherlike.wav` with your own filename. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ## Build the app diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/macos.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/macos.md index efbdde1953144..fba80b3d01300 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/macos.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/macos.md @@ -29,7 +29,7 @@ Before you get started, make sure to: 1. Replace the string `whatstheweatherlike.wav` with your own filename. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ## Build the app diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/windows.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/windows.md index cdda9f84032cb..802f6af4d25f4 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/windows.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/cpp/windows.md @@ -33,7 +33,7 @@ Before you get started, make sure to: 1. From the menu bar, choose **File** > **Save All**. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ## Build and run the application diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/csharp/dotnet.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/csharp/dotnet.md index ffc540626d0fb..1d6427aa41134 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/csharp/dotnet.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/csharp/dotnet.md @@ -61,7 +61,7 @@ Before you can initialize a `SpeechRecognizer` object, you need to create a conf > [!NOTE] > This sample uses the `FromSubscription()` method to build the `SpeechConfig`. For a full list of available methods, see [SpeechConfig Class](/dotnet/api/microsoft.cognitiveservices.speech.speechconfig). -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ```csharp // Replace with your own subscription key and region identifier from here: https://aka.ms/speech/sdkregion diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/java/jre.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/java/jre.md index 67f50c4030ec5..4367a60ed74ff 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/java/jre.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/java/jre.md @@ -111,7 +111,7 @@ ms.author: eur 1. Save changes to the project. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ## Build and run the app diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/browser.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/browser.md index 6912af9cee587..e8681a1e4f59c 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/browser.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/browser.md @@ -140,7 +140,7 @@ Now we'll add some basic UI for input boxes, reference the Speech SDK's JavaScri Before you can initialize a `SpeechRecognizer` object, you need to create a configuration that uses your subscription key and subscription region. Insert this code in the `startRecognizeOnceAsyncButton.addEventListener()` method. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ```JavaScript diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/nodejs.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/nodejs.md index 485a2dedded1e..20fd64fc27fab 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/nodejs.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/javascript/nodejs.md @@ -62,7 +62,7 @@ For NodeJS the Speech SDK doesn't natively support file access directly, so we'l Before you can initialize a `SpeechRecognizer` object, you need to create a configuration that uses your subscription key and subscription region. Insert this code next. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ```javascript // now create the audio-config pointing to our stream and diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/python/python.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/python/python.md index 5ed45e9825838..dd2b3dba60df8 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/python/python.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-file/python/python.md @@ -40,7 +40,7 @@ Or you can download this quickstart tutorial as a [Jupyter](https://jupyter.org) ### Sample code > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ```python import azure.cognitiveservices.speech as speechsdk diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/code-explanation.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/code-explanation.md index 6a5f9439e1ecc..c4f5674f636c7 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/code-explanation.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/code-explanation.md @@ -11,4 +11,4 @@ The Speech resource subscription key and region are required to create a speech The recognizer instance exposes multiple ways to recognize speech. In this example, speech is recognized once. This functionality lets the Speech service know that you're sending a single phrase for recognition, and that once the phrase is identified to stop recognizing speech. Once the result is yielded, the code will write the recognition reason to the console. > [!TIP] -> The Speech SDK will default to recognizing using `en-us` for the language, see [Specify source language for speech to text](../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using `en-us` for the language, see [How to recognize speech](../../../how-to-recognize-speech.md) for information on choosing the source language. diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/go/go.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/go/go.md index aa0741c745ad0..932fc9d8eedf8 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/go/go.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/from-microphone/go/go.md @@ -111,7 +111,7 @@ You're now set up to build your project and test speech recognition using the Sp > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ## Next steps diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/cpp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/cpp.md index fe9497073e547..c4da4b1015a2b 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/cpp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/cpp.md @@ -52,7 +52,7 @@ Insert this code in the `recognizeIntent()` method. Make sure you update these v This sample uses the `FromSubscription()` method to build the `SpeechConfig`. For a full list of available methods, see [SpeechConfig Class](/cpp/cognitive-services/speech/speechconfig). -The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../how-to-specify-source-language.md) for information on choosing the source language. +The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../how-to-recognize-speech.md) for information on choosing the source language. ## Initialize an IntentRecognizer diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/csharp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/csharp.md index cbe98e2335f9f..1df3856e9bb4c 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/csharp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/csharp.md @@ -52,7 +52,7 @@ Insert this code in the `RecognizeIntentAsync()` method. Make sure you update th This sample uses the `FromSubscription()` method to build the `SpeechConfig`. For a full list of available methods, see [SpeechConfig Class](/dotnet/api/microsoft.cognitiveservices.speech.speechconfig). -The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../how-to-specify-source-language.md) for information on choosing the source language. +The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../how-to-recognize-speech.md) for information on choosing the source language. ## Initialize an IntentRecognizer diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/java.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/java.md index d95b610525667..cb5d2d916367e 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/java.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/java.md @@ -49,7 +49,7 @@ Insert this code in the try / catch block in `main()`. Make sure you update thes This sample uses the `FromSubscription()` method to build the `SpeechConfig`. For a full list of available methods, see [SpeechConfig Class](/dotnet/api/microsoft.cognitiveservices.speech.speechconfig). -The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../how-to-specify-source-language.md) for information on choosing the source language. +The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../how-to-recognize-speech.md) for information on choosing the source language. ## Initialize an IntentRecognizer diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/javascript.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/javascript.md index 04c6d9f37d0eb..744980cbf60c9 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/javascript.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/javascript.md @@ -147,7 +147,7 @@ Now we'll add some basic UI for input boxes, reference the Speech SDK's JavaScri Before you can initialize a `SpeechRecognizer` object, you need to create a configuration that uses your subscription key and subscription region. Insert this code in the `startRecognizeOnceAsyncButton.addEventListener()` method. > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../how-to-recognize-speech.md) for information on choosing the source language. ```JavaScript diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/python.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/python.md index 535be4619c718..1874c8d83f57b 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/python.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/intent-recognition/python.md @@ -48,7 +48,7 @@ Insert this code in `quickstart.py`. Make sure you update these values: This sample constructs the `SpeechConfig` object using LUIS key and region. For a full list of available methods, see [SpeechConfig Class](/python/api/azure-cognitiveservices-speech/azure.cognitiveservices.speech.speechconfig). -The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../how-to-specify-source-language.md) for information on choosing the source language. +The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../how-to-recognize-speech.md) for information on choosing the source language. ## Initialize an IntentRecognizer diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cli.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cli.md index 96f40934d16dd..e4bfea3f0b10b 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cli.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cli.md @@ -12,16 +12,10 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment [!INCLUDE [SPX Setup](../../spx-setup-quick.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a microphone Run the following command to start speech recognition from a microphone: @@ -37,9 +31,6 @@ Connection CONNECTED... RECOGNIZED: I'm excited to try speech to text. ``` -> [!div class="nextstepaction"] -> I ran into an issue - Now that you've transcribed speech to text, here are some suggested modifications to try out: - To recognize speech from an audio file, use `--file` instead of `--microphone`. For compressed audio files such as MP4, install GStreamer and use `--format`. For more information, see [How to use compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). # [Terminal](#tab/terminal) diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cpp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cpp.md index 6bb1a0344211d..fa61aad0dca71 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cpp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/cpp.md @@ -14,15 +14,9 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment The Speech SDK is available as a [NuGet package](https://www.nuget.org/packages/Microsoft.CognitiveServices.Speech) and implements .NET Standard 2.0. You install the Speech SDK in the next section of this article, but first check the [platform-specific installation instructions](../../../quickstarts/setup-platform.md?pivots=programming-language-cpp) for any more requirements. -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a microphone Follow these steps to create a new console application and install the Speech SDK. @@ -92,9 +86,6 @@ Speak into your microphone. RECOGNIZED: Text=I'm excited to try speech to text. ``` -> [!div class="nextstepaction"] -> I ran into an issue - Here are some additional considerations: - This example uses the `RecognizeOnceAsync` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/csharp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/csharp.md index 323d2b867a5a3..546eecb874d03 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/csharp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/csharp.md @@ -14,15 +14,9 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment The Speech SDK is available as a [NuGet package](https://www.nuget.org/packages/Microsoft.CognitiveServices.Speech) and implements .NET Standard 2.0. You install the Speech SDK in the next section of this article, but first check the [platform-specific installation instructions](../../../quickstarts/setup-platform.md?pivots=programming-language-csharp) for any more requirements. -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a microphone Follow these steps to create a new console application and install the Speech SDK. @@ -104,9 +98,6 @@ Speak into your microphone. RECOGNIZED: Text=I'm excited to try speech to text. ``` -> [!div class="nextstepaction"] -> I ran into an issue - Here are some additional considerations: - This example uses the `RecognizeOnceAsync` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/go.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/go.md index bbe72ef76f008..b80a2a8fc9d45 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/go.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/go.md @@ -14,16 +14,10 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment Install the [Speech SDK for Go](../../../quickstarts/setup-platform.md?pivots=programming-language-go&tabs=dotnet%252cwindows%252cjre%252cbrowser). Check the [platform-specific installation instructions](../../../quickstarts/setup-platform.md?pivots=programming-language-go) for any more requirements. -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a microphone Follow these steps to create a new GO module. @@ -118,9 +112,6 @@ go build go run speech-recognition ``` -> [!div class="nextstepaction"] -> I ran into an issue - ## Clean up resources [!INCLUDE [Delete resource](../../common/delete-resource.md)] diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/intro.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/intro.md index 7726893e7b21c..bc280360df140 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/intro.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/intro.md @@ -9,4 +9,4 @@ ms.author: eur In this quickstart, you run an application to recognize and transcribe human speech (often called speech-to-text). > [!TIP] -> To try the Speech service without writing any code, create a project in [Speech Studio](~/articles/cognitive-services/speech-service/speech-studio-overview.md). +> To try the Speech service without writing any code, create a project in [Speech Studio](https://aka.ms/speechstudio/speechtotexttool). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/java.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/java.md index 5b570c70ac3be..5e267bf15b1f9 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/java.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/java.md @@ -14,16 +14,10 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment Before you can do anything, you need to install the Speech SDK. The sample in this quickstart works with the [Java Runtime](~/articles/cognitive-services/speech-service/quickstarts/setup-platform.md?pivots=programming-language-java&tabs=jre). -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a microphone Follow these steps to create a new console application for speech recognition. @@ -94,9 +88,6 @@ Speak into your microphone. RECOGNIZED: Text=I'm excited to try speech to text. ``` -> [!div class="nextstepaction"] -> I ran into an issue - Here are some additional considerations: - This example uses the `RecognizeOnceAsync` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/javascript.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/javascript.md index d5c24d8e3ed44..d98eb792872ca 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/javascript.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/javascript.md @@ -14,17 +14,10 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment Before you can do anything, you need to install the Speech SDK for JavaScript. If you just want the package name to install, run `npm install microsoft-cognitiveservices-speech-sdk`. For guided installation instructions, see [Set up the development environment](../../../quickstarts/setup-platform.md?pivots=programming-language-javascript). -> [!div class="nextstepaction"] -> I ran into an issue - - ## Recognize speech from a file Follow these steps to create a new console application for speech recognition. @@ -87,9 +80,6 @@ The speech from the audio file should be output as text: RECOGNIZED: Text=I'm excited to try speech to text. ``` -> [!div class="nextstepaction"] -> I ran into an issue - This example uses the `recognizeOnceAsync` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). > [!NOTE] diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/objectivec.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/objectivec.md index 9f7915d9ba8a3..92178d288e55b 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/objectivec.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/objectivec.md @@ -14,19 +14,12 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment The Speech SDK for Objective-C is distributed as a framework bundle. The framework supports both Objective-C and Swift on both iOS and macOS. The Speech SDK can be used in Xcode projects as a [CocoaPod](https://cocoapods.org/), or downloaded directly [here](https://aka.ms/csspeech/macosbinary) and linked manually. This guide uses a CocoaPod. Install the CocoaPod dependency manager as described in its [installation instructions](https://guides.cocoapods.org/using/getting-started.html). -> [!div class="nextstepaction"] -> I ran into an issue - - ## Recognize speech from a microphone Follow these steps to recognize speech in a macOS application. @@ -74,9 +67,6 @@ Follow these steps to recognize speech in a macOS application. After you select the button in the app and say a few words, you should see the text you have spoken on the lower part of the screen. When you run the app for the first time, you should be prompted to give the app access to your computer's microphone. -> [!div class="nextstepaction"] -> I ran into an issue - Here are some additional considerations: - This example uses the `recognizeOnce` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/python.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/python.md index 90c3cb94f7c11..e85800a10cf76 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/python.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/python.md @@ -14,9 +14,6 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment The Speech SDK for Python is available as a [Python Package Index (PyPI) module](https://pypi.org/project/azure-cognitiveservices-speech/). The Speech SDK for Python is compatible with Windows, Linux, and macOS. @@ -25,9 +22,6 @@ The Speech SDK for Python is available as a [Python Package Index (PyPI) module] Install a version of [Python from 3.7 to 3.10](https://www.python.org/downloads/). First check the [platform-specific installation instructions](../../../quickstarts/setup-platform.md?pivots=programming-language-python) for any more requirements. -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a microphone Follow these steps to create a new console application. @@ -81,9 +75,6 @@ Speak into your microphone. RECOGNIZED: Text=I'm excited to try speech to text. ``` -> [!div class="nextstepaction"] -> I ran into an issue - Here are some additional considerations: - This example uses the `recognize_once_async` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/rest.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/rest.md index 67640ef72ab56..83a8e2c29604c 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/rest.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/rest.md @@ -14,9 +14,6 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Recognize speech from a file At a command prompt, run the following cURL command. Insert the following values into the command. Replace `YourSubscriptionKey` with your Speech resource key, replace `YourServiceRegion` with your Speech resource region, and replace `YourAudioFile.wav` with the path and name of your audio file. @@ -46,10 +43,6 @@ You should receive a response similar to what is shown here. The `DisplayText` s For more information, see [speech-to-text REST API for short audio](../../../rest-speech-to-text-short.md). -> [!div class="nextstepaction"] -> I ran into an issue - - ## Clean up resources [!INCLUDE [Delete resource](../../common/delete-resource.md)] diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/swift.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/swift.md index fb664df8e6a8b..30c2ad3110bfd 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/swift.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/speech-to-text-basics/swift.md @@ -14,19 +14,12 @@ ms.author: eur [!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] -> [!div class="nextstepaction"] -> I ran into an issue - ## Set up the environment The Speech SDK for Swift is distributed as a framework bundle. The framework supports both Objective-C and Swift on both iOS and macOS. The Speech SDK can be used in Xcode projects as a [CocoaPod](https://cocoapods.org/), or downloaded directly [here](https://aka.ms/csspeech/macosbinary) and linked manually. This guide uses a CocoaPod. Install the CocoaPod dependency manager as described in its [installation instructions](https://guides.cocoapods.org/using/getting-started.html). -> [!div class="nextstepaction"] -> I ran into an issue - - ## Recognize speech from a microphone Follow these steps to recognize speech in a macOS application. @@ -128,9 +121,6 @@ Follow these steps to recognize speech in a macOS application. After you select the button in the app and say a few words, you should see the text you have spoken on the lower part of the screen. When you run the app for the first time, you should be prompted to give the app access to your computer's microphone. -> [!div class="nextstepaction"] -> I ran into an issue - This example uses the `recognizeOnce` operation to transcribe utterances of up to 30 seconds, or until silence is detected. For information about continuous recognition for longer audio, including multi-lingual conversations, see [How to recognize speech](~/articles/cognitive-services/speech-service/how-to-recognize-speech.md). ## Clean up resources diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/text-to-speech-basics/intro.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/text-to-speech-basics/intro.md index 5569ed5ab1b0b..9e55dfb83c6f2 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/text-to-speech-basics/intro.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/text-to-speech-basics/intro.md @@ -9,4 +9,4 @@ ms.author: eur In this quickstart, you run an application that does text to speech synthesis. > [!TIP] -> To try the Speech service without writing any code, create a project in [Speech Studio](~/articles/cognitive-services/speech-service/speech-studio-overview.md). +> To try the Speech service without writing any code, create a project in [Speech Studio](https://aka.ms/speechstudio/voicegallery). diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/voice-assistants/go/go.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/voice-assistants/go/go.md index fa53807ae9dca..422b06e677d2f 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/voice-assistants/go/go.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/voice-assistants/go/go.md @@ -117,7 +117,7 @@ You're now set up to build your project and test your custom voice assistant usi > [!NOTE] -> The Speech SDK will default to recognizing using en-us for the language, see [Specify source language for speech to text](../../../../how-to-specify-source-language.md) for information on choosing the source language. +> The Speech SDK will default to recognizing using en-us for the language, see [How to recognize speech](../../../../how-to-recognize-speech.md) for information on choosing the source language. ## Next steps diff --git a/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-cli.md b/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-cli.md index 57e17124d5a6c..819e0eaac104a 100644 --- a/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-cli.md +++ b/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-cli.md @@ -6,6 +6,17 @@ ms.date: 01/08/2022 ms.author: eur --- +### Speech CLI 1.22.0: June 2022 release + +Uses Speech SDK 1.22.0. + +#### New features + +- Added `spx init` command to guide users through the Speech resource key creation without going to Azure Web Portal. +- Speech docker containers now have Azure CLI included, so the `spx init` command will work out of the box. +- Added timestamp as an event output option, to make SPX more useful when calculating latencies. + + ### Speech CLI 1.21.0: April 2022 release Uses Speech SDK 1.21.0. diff --git a/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-sdk.md b/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-sdk.md index 999247dcedd0b..4bd60076b6f4f 100644 --- a/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-sdk.md +++ b/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-sdk.md @@ -6,6 +6,29 @@ ms.date: 02/22/2022 ms.author: eur --- +### Speech SDK 1.22.0: June 2022 release + +#### New features + +- **Java**: IntentRecognitionResult API for getEntities(), applyLanguageModels(), and recognizeOnceAsync(text) added to support the simple pattern matching engine. +- **Unity**: Added support for Mac M1 (Apple Silicon) for Unity package ([GitHub issue](https://github.com/Azure-Samples/cognitive-services-speech-sdk/issues/1465)) +- **C#**: Added support for x86_64 for Xamarin Android ([GitHub issue](https://github.com/Azure-Samples/cognitive-services-speech-sdk/issues/1457)) +- **C#**: .NET framework minimum version updated to v4.6.2 for SDK C# package as v4.6.1 has retired (see [Microsoft .NET Framework Component Lifecycle Policy](/lifecycle/products/microsoft-net-framework)) +- **Linux**: Added support for Debian 11 and Ubuntu 22.04 LTS. Ubuntu 22.04 LTS requires manual installation of libssl1.1 either as a binary package from [here](http://security.ubuntu.com/ubuntu/pool/main/o/openssl) (e.g. libssl1.1_1.1.1l-1ubuntu1.3_amd64.deb or newer for x64), or by compiling from sources. + +#### Bug fixes + +- **UWP**: OpenSSL dependency removed from UWP libraries and replaced with WinRT websocket and HTTP APIs to meet security compliance and smaller binary footprint. +- **Mac**: Fixed "MicrosoftCognitiveServicesSpeech Module Not Found" issue when using Swift projects targeting MacOS platform +- **Windows, Mac**: Fixed a platform-specific issue where audio sources that were configured via properties to stream at a real-time rate sometimes fell behind and eventually exceeded capacity + +#### Samples ([GitHub](https://github.com/Azure-Samples/cognitive-services-speech-sdk)) + +- **C#**: .NET framework samples updated to use v4.6.2 +- **Unity**: Virtual-assistant sample fixed for Android and UWP +- **Unity**: Unity samples updated for Unity 2020 LTS version + + ### Speech SDK 1.21.0: April 2022 release #### New features @@ -513,7 +536,7 @@ Stay healthy! - Keyword recognition support added for Android `.aar` package and added support for x86 and x64 flavors. - Objective-C: `SendMessage` and `SetMessageProperty` methods added to `Connection` object. See documentation [here](/objectivec/cognitive-services/speech/spxconnection). - TTS C++ api now supports `std::wstring` as synthesis text input, removing the need to convert a wstring to string before passing it to the SDK. See details [here](/cpp/cognitive-services/speech/speechsynthesizer#speaktextasync). -- C#: [Language ID](../../how-to-automatic-language-detection.md?pivots=programming-language-csharp) and [source language config](../../how-to-specify-source-language.md?pivots=programming-language-csharp) are now available. +- C#: [Language ID](../../how-to-automatic-language-detection.md?pivots=programming-language-csharp) and [source language config](../../how-to-recognize-speech.md) are now available. - JavaScript: Added a feature to `Connection` object to pass through custom messages from the Speech service as callback `receivedServiceMessage`. - JavaScript: Added support for `FromHost API` to ease use with on-prem containers and sovereign clouds. See documentation [here](../../speech-container-howto.md). - JavaScript: We now honor `NODE_TLS_REJECT_UNAUTHORIZED` thanks to a contribution from [orgads](https://github.com/orgads). See details [here](https://github.com/microsoft/cognitive-services-speech-sdk-js/pull/75). diff --git a/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-tts.md b/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-tts.md index 8e01e9302ad85..b99ac0c989b90 100644 --- a/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-tts.md +++ b/articles/cognitive-services/Speech-Service/includes/release-notes/release-notes-tts.md @@ -10,11 +10,15 @@ ms.author: eur #### Prebuilt Neural TTS Voice * Released 5 new voices in public preview with multiple styles to enrich the variety in American English. See [full language and voice list](../../language-support.md#prebuilt-neural-voices). - * Support these new styles `Angry`, `Excited`, `Friendly`, `Hopeful`, `Sad`, `Shouting`, `Unfriendly`, `Terrified` and `Whispering` in public preview for `en-US-AriaNeural`. * Support these new styles `Angry`, `Cheerful`, `Excited`, `Friendly`, `Hopeful`, `Sad`, `Shouting`, `Unfriendly`, `Terrified` and `Whispering` in public preview for `en-US-GuyNeural`, `en-US-JennyNeural`. * Support these new styles `Excited`, `Friendly`, `Hopeful`, `Shouting`, `Unfriendly`, `Terrified` and `Whispering` in public preview for `en-US-SaraNeural`. See [voice styles and roles](../../language-support.md#voice-styles-and-roles). * Released disconnected containers for prebuilt neural TTS voices in public preview. See [use Docker containers in disconnected environments](../../../containers/disconnected-containers.md). +* Released new voices `zh-CN-YunjianNeural`, `zh-CN-YunhaoNeural`, and `zh-CN-YunfengNeural` in public preview. See [full language and voice list](../../language-support.md#prebuilt-neural-voices). +* Support 2 new styles `sports-commentary`, `sports-commentary-excited` in public preview for `zh-CN-YunjianNeural`. See [voice styles and roles](../../language-support.md#voice-styles-and-roles). +* Support 1 new style `advertisement-upbeat` in public preview for `zh-CN-YunhaoNeural`. See [voice styles and roles](../../language-support.md#voice-styles-and-roles). +* The `cheerful` and `sad` styles for `fr-FR-DeniseNeural` are generally available in all regions. +* SSML updated to support MathML elements for en-US and en-AU voices. Learn more at [speech synthesis markup](../../speech-synthesis-markup.md#supported-mathml-elements). ### 2022-March release diff --git a/articles/cognitive-services/Speech-Service/includes/service-pricing-advisory.md b/articles/cognitive-services/Speech-Service/includes/service-pricing-advisory.md index c17fb42e8db0e..1a4c8196f5994 100644 --- a/articles/cognitive-services/Speech-Service/includes/service-pricing-advisory.md +++ b/articles/cognitive-services/Speech-Service/includes/service-pricing-advisory.md @@ -11,5 +11,5 @@ ms.date: 12/02/2019 ms.author: eur --- -> [!NOTE] -> When testing, the system will perform a transcription. This is important to keep in mind, as pricing varies per service offering and subscription level. Always refer to the official Azure Cognitive Services pricing - Speech service for [the latest details](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services). +> [!IMPORTANT] +> When testing, the system will perform a transcription. This is important to keep in mind, as pricing varies per service offering and subscription level. Always refer to the official Azure Cognitive Services pricing for [the latest details](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services). \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/index-speech-to-text.yml b/articles/cognitive-services/Speech-Service/index-speech-to-text.yml index 108738334fde3..db39a111d9843 100644 --- a/articles/cognitive-services/Speech-Service/index-speech-to-text.yml +++ b/articles/cognitive-services/Speech-Service/index-speech-to-text.yml @@ -36,8 +36,6 @@ landingContent: links: - text: Choose speech recognition mode url: ./get-started-speech-to-text.md - - text: Change speech recognition source language - url: how-to-specify-source-language.md - text: Improve accuracy with Custom Speech url: ./custom-speech-overview.md - text: Use compressed audio input formats diff --git a/articles/cognitive-services/Speech-Service/language-support.md b/articles/cognitive-services/Speech-Service/language-support.md index 0af3eb80bdeae..d1beb79312d7a 100644 --- a/articles/cognitive-services/Speech-Service/language-support.md +++ b/articles/cognitive-services/Speech-Service/language-support.md @@ -726,47 +726,50 @@ The following neural voices are in public preview. | Language | Locale | Gender | Voice name | Style support | |----------------------------------|---------|--------|----------------------------------------|---------------| -| English (United Kingdom) | `en-GB` | Female | `en-GB-AbbiNeural` New | General | -| English (United Kingdom) | `en-GB` | Female | `en-GB-BellaNeural` New | General | -| English (United Kingdom) | `en-GB` | Female | `en-GB-HollieNeural` New | General | -| English (United Kingdom) | `en-GB` | Female | `en-GB-OliviaNeural` New | General | -| English (United Kingdom) | `en-GB` | Female | `en-GB-MaisieNeural` New | General, child voice | -| English (United Kingdom) | `en-GB` | Male | `en-GB-AlfieNeural` New | General | -| English (United Kingdom) | `en-GB` | Male | `en-GB-ElliotNeural` New | General | -| English (United Kingdom) | `en-GB` | Male | `en-GB-EthanNeural` New | General | -| English (United Kingdom) | `en-GB` | Male | `en-GB-NoahNeural` New | General | -| English (United Kingdom) | `en-GB` | Male | `en-GB-OliverNeural` New | General | -| English (United Kingdom) | `en-GB` | Male | `en-GB-ThomasNeural` New | General | -| English (United States) | `en-US` | Male | `en-US-DavisNeural` | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | -| English (United States) | `en-US` | Female | `en-US-JaneNeural` | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | -| English (United States) | `en-US` | Male | `en-US-JasonNeural` | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | -| English (United States) | `en-US` | Female | `en-US-NancyNeural` | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | -| English (United States) | `en-US` | Male | `en-US-TonyNeural` | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | -| French (France) | `fr-FR` | Female | `fr-FR-BrigitteNeural` New | General | -| French (France) | `fr-FR` | Female | `fr-FR-CelesteNeural` New | General | -| French (France) | `fr-FR` | Female | `fr-FR-CoralieNeural` New | General | -| French (France) | `fr-FR` | Female | `fr-FR-JacquelineNeural` New | General | -| French (France) | `fr-FR` | Female | `fr-FR-JosephineNeural` New | General | -| French (France) | `fr-FR` | Female | `fr-FR-YvetteNeural` New | General | -| French (France) | `fr-FR` | Female | `fr-FR-EloiseNeural` New | General, child voice | -| French (France) | `fr-FR` | Male | `fr-FR-AlainNeural` New | General | -| French (France) | `fr-FR` | Male | `fr-FR-ClaudeNeural` New | General | -| French (France) | `fr-FR` | Male | `fr-FR-JeromeNeural` New | General | -| French (France) | `fr-FR` | Male | `fr-FR-MauriceNeural` New | General | -| French (France) | `fr-FR` | Male | `fr-FR-YvesNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-AmalaNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-ElkeNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-KlarissaNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-LouisaNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-MajaNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-TanjaNeural` New | General | -| German (Germany) | `de-DE` | Female | `de-DE-GiselaNeural` New | General, child voice | -| German (Germany) | `de-DE` | Male | `de-DE-BerndNeural` New | General | -| German (Germany) | `de-DE` | Male | `de-DE-ChristophNeural` New | General | -| German (Germany) | `de-DE` | Male | `de-DE-KasperNeural` New | General | -| German (Germany) | `de-DE` | Male | `de-DE-KillianNeural` New | General | -| German (Germany) | `de-DE` | Male | `de-DE-KlausNeural` New | General | -| German (Germany) | `de-DE` | Male | `de-DE-RalfNeural` New | General | +| Chinese (Mandarin, Simplified) | `zh-CN` | Male | `zh-CN-YunjianNeural` New | Optimized for broadcasting sports event, 2 new multiple styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| Chinese (Mandarin, Simplified) | `zh-CN` | Male | `zh-CN-YunhaoNeural` New | Optimized for promoting a product or service, 1 new multiple style available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| Chinese (Mandarin, Simplified) | `zh-CN` | Male | `zh-CN-YunfengNeural` New | General, multiple styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| English (United Kingdom) | `en-GB` | Female | `en-GB-AbbiNeural` | General | +| English (United Kingdom) | `en-GB` | Female | `en-GB-BellaNeural` | General | +| English (United Kingdom) | `en-GB` | Female | `en-GB-HollieNeural` | General | +| English (United Kingdom) | `en-GB` | Female | `en-GB-OliviaNeural` | General | +| English (United Kingdom) | `en-GB` | Female | `en-GB-MaisieNeural` | General, child voice | +| English (United Kingdom) | `en-GB` | Male | `en-GB-AlfieNeural` | General | +| English (United Kingdom) | `en-GB` | Male | `en-GB-ElliotNeural` | General | +| English (United Kingdom) | `en-GB` | Male | `en-GB-EthanNeural` | General | +| English (United Kingdom) | `en-GB` | Male | `en-GB-NoahNeural` | General | +| English (United Kingdom) | `en-GB` | Male | `en-GB-OliverNeural` | General | +| English (United Kingdom) | `en-GB` | Male | `en-GB-ThomasNeural` | General | +| English (United States) | `en-US` | Male | `en-US-DavisNeural` New | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| English (United States) | `en-US` | Female | `en-US-JaneNeural` New | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| English (United States) | `en-US` | Male | `en-US-JasonNeural` New | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| English (United States) | `en-US` | Female | `en-US-NancyNeural` New | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| English (United States) | `en-US` | Male | `en-US-TonyNeural` New | General, multiple voice styles available [using SSML](speech-synthesis-markup.md#adjust-speaking-styles) | +| French (France) | `fr-FR` | Female | `fr-FR-BrigitteNeural` | General | +| French (France) | `fr-FR` | Female | `fr-FR-CelesteNeural` | General | +| French (France) | `fr-FR` | Female | `fr-FR-CoralieNeural` | General | +| French (France) | `fr-FR` | Female | `fr-FR-JacquelineNeural` | General | +| French (France) | `fr-FR` | Female | `fr-FR-JosephineNeural` | General | +| French (France) | `fr-FR` | Female | `fr-FR-YvetteNeural` | General | +| French (France) | `fr-FR` | Female | `fr-FR-EloiseNeural` | General, child voice | +| French (France) | `fr-FR` | Male | `fr-FR-AlainNeural` | General | +| French (France) | `fr-FR` | Male | `fr-FR-ClaudeNeural` | General | +| French (France) | `fr-FR` | Male | `fr-FR-JeromeNeural` | General | +| French (France) | `fr-FR` | Male | `fr-FR-MauriceNeural` | General | +| French (France) | `fr-FR` | Male | `fr-FR-YvesNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-AmalaNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-ElkeNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-KlarissaNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-LouisaNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-MajaNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-TanjaNeural` | General | +| German (Germany) | `de-DE` | Female | `de-DE-GiselaNeural` | General, child voice | +| German (Germany) | `de-DE` | Male | `de-DE-BerndNeural` | General | +| German (Germany) | `de-DE` | Male | `de-DE-ChristophNeural` | General | +| German (Germany) | `de-DE` | Male | `de-DE-KasperNeural` | General | +| German (Germany) | `de-DE` | Male | `de-DE-KillianNeural` | General | +| German (Germany) | `de-DE` | Male | `de-DE-KlausNeural` | General | +| German (Germany) | `de-DE` | Male | `de-DE-RalfNeural` | General | ### Voice styles and roles @@ -782,88 +785,93 @@ Use the following table to determine supported styles and roles for each neural |Voice|Styles|Style degree|Roles| |-----|-----|-----|-----| |en-US-AriaNeural|`angry`, `chat`, `cheerful`, `customerservice`, `empathetic`, `excited`, `friendly`, `hopeful`, `narration-professional`, `newscast-casual`, `newscast-formal`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| -|en-US-DavisNeural|`angry`, `chat`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| +|en-US-DavisNeural Public preview|`angry`, `chat`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| |en-US-GuyNeural|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `newscast`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| -|en-US-JaneNeural|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| -|en-US-JasonNeural|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| +|en-US-JaneNeural Public preview|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| +|en-US-JasonNeural Public preview|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| |en-US-JennyNeural|`angry`, `assistant`, `chat`, `cheerful`,`customerservice`, `excited`, `friendly`, `hopeful`, `newscast`, `sad`, `shouting`, `terrified`, , `unfriendly`, `whispering`||| -|en-US-NancyNeural|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| +|en-US-NancyNeural Public preview|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| |en-US-SaraNeural|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| -|en-US-TonyNeural|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| -|fr-FR-DeniseNeural |`cheerful` Public preview, `sad`Public preview||| +|en-US-TonyNeural Public preview|`angry`, `cheerful`, `excited`, `friendly`, `hopeful`, `sad`, `shouting`, `terrified`, `unfriendly`, `whispering`||| +|fr-FR-DeniseNeural |`cheerful`, `sad`||| |ja-JP-NanamiNeural|`chat`, `cheerful`, `customerservice`||| |pt-BR-FranciscaNeural|`calm`||| |zh-CN-XiaohanNeural|`affectionate`, `angry`, `calm`, `cheerful`, `disgruntled`, `embarrassed`, `fearful`, `gentle`, `sad`, `serious`|Supported|| |zh-CN-XiaomoNeural|`affectionate`, `angry`, `calm`, `cheerful`, `depressed`, `disgruntled`, `embarrassed`, `envious`, `fearful`, `gentle`, `sad`, `serious`|Supported|Supported| |zh-CN-XiaoruiNeural|`angry`, `calm`, `fearful`, `sad`|Supported|| |zh-CN-XiaoshuangNeural|`chat`|Supported|| -|zh-CN-XiaoxiaoNeural|`affectionate`, `angry`, `assistant`, `calm`, `chat`, `cheerful`, `customerservice`, `disgruntled`, `fearful`, `gentle`, `lyrical`, `newscast`, `sad`, `serious`|Supported|| +|zh-CN-XiaoxiaoNeural|`affectionate`, `angry`, `assistant`, `calm`, `chat`, `cheerful`, `customerservice`, `disgruntled`, `fearful`, `gentle`, `lyrical`, `newscast`, `poetry-reading`, `sad`, `serious`|Supported|| |zh-CN-XiaoxuanNeural|`angry`, `calm`, `cheerful`, `depressed`, `disgruntled`, `fearful`, `gentle`, `serious`|Supported|Supported| |zh-CN-YunxiNeural|`angry`, `assistant`, `cheerful`, `depressed`, `disgruntled`, `embarrassed`, `fearful`, `narration-relaxed`, `sad`, `serious`|Supported|Supported| |zh-CN-YunyangNeural|`customerservice`, `narration-professional`, `newscast-casual`|Supported|| |zh-CN-YunyeNeural|`angry`, `calm`, `cheerful`, `disgruntled`, `embarrassed`, `fearful`, `sad`, `serious`|Supported|Supported| +|zh-CN-YunjianNeural Public preview|`narration-relaxed`, `sports-commentary` Public preview, `sports-commentary-excited` Public preview|Supported|| +|zh-CN-YunhaoNeural Public preview|`general`, `advertisement-upbeat` Public preview|Supported|| +|zh-CN-YunfengNeural Public preview|`calm`, `angry`, ` disgruntled`, `cheerful`, `fearful`, `sad`, `serious`, `depressed`|Supported|| ### Custom Neural Voice Custom Neural Voice lets you create synthetic voices that are rich in speaking styles. You can create a unique brand voice in multiple languages and styles by using a small set of recording data. -Select the right locale that matches the training data you have to train a custom neural voice model. For example, if the recording data you have is spoken in English with a British accent, select `en-GB`. - -With the cross-lingual feature (preview), you can transfer your custom neural voice model to speak a second language. For example, with the `zh-CN` data, you can create a voice that speaks `en-AU` or any of the languages marked "Yes" in the Cross-lingual column in the following table. - -| Language | Locale | Cross-lingual (preview) | -|--|--|--| -| Arabic (Egypt) | `ar-EG` | No | -| Arabic (Saudi Arabia) | `ar-SA` | No | -| Bulgarian (Bulgaria) | `bg-BG` | No | -| Catalan (Spain) | `ca-ES` | No | -| Chinese (Cantonese, Traditional) | `zh-HK` | No | -| Chinese (Mandarin, Simplified) | `zh-CN` | Yes | -| Chinese (Mandarin, Simplified), English bilingual | `zh-CN` bilingual | Yes | -| Chinese (Taiwanese Mandarin) | `zh-TW` | No | -| Croatian (Croatia) | `hr-HR` | No | -| Czech (Czech) | `cs-CZ` | No | -| Danish (Denmark) | `da-DK` | No | -| Dutch (Netherlands) | `nl-NL` | No | -| English (Australia) | `en-AU` | Yes | -| English (Canada) | `en-CA` | No | -| English (India) | `en-IN` | No | -| English (Ireland) | `en-IE` | No | -| English (United Kingdom) | `en-GB` | Yes | -| English (United States) | `en-US` | Yes | -| Finnish (Finland) | `fi-FI` | No | -| French (Canada) | `fr-CA` | Yes | -| French (France) | `fr-FR` | Yes | -| French (Switzerland) | `fr-CH` | No | -| German (Austria) | `de-AT` | No | -| German (Germany) | `de-DE` | Yes | -| German (Switzerland) | `de-CH` | No | -| Greek (Greece) | `el-GR` | No | -| Hebrew (Israel) | `he-IL` | No | -| Hindi (India) | `hi-IN` | No | -| Hungarian (Hungary) | `hu-HU` | No | -| Indonesian (Indonesia) | `id-ID` | No | -| Italian (Italy) | `it-IT` | Yes | -| Japanese (Japan) | `ja-JP` | Yes | -| Korean (Korea) | `ko-KR` | Yes | -| Malay (Malaysia) | `ms-MY` | No | -| Norwegian (Bokmål, Norway) | `nb-NO` | No | -| Polish (Poland) | `pl-PL` | No | -| Portuguese (Brazil) | `pt-BR` | Yes | -| Portuguese (Portugal) | `pt-PT` | No | -| Romanian (Romania) | `ro-RO` | No | -| Russian (Russia) | `ru-RU` | Yes | -| Slovak (Slovakia) | `sk-SK` | No | -| Slovenian (Slovenia) | `sl-SI` | No | -| Spanish (Mexico) | `es-MX` | Yes | -| Spanish (Spain) | `es-ES` | Yes | -| Swedish (Sweden) | `sv-SE` | No | -| Tamil (India) | `ta-IN` | No | -| Telugu (India) | `te-IN` | No | -| Thai (Thailand) | `th-TH` | No | -| Turkish (Turkey) | `tr-TR` | No | -| Vietnamese (Vietnam) | `vi-VN` | No | +Select the right locale that matches your training data to train a custom neural voice model. For example, if the recording data is spoken in English with a British accent, select `en-GB`. + +With the cross-lingual feature (preview), you can transfer your custom neural voice model to speak a second language. For example, with the `zh-CN` data, you can create a voice that speaks `en-AU` or any of the languages marked with "Yes" in the Cross-lingual column in the following table. + +There are two Custom Neural Voice (CNV) project types: CNV Pro and CNV Lite (preview). In the following table, all the languages are supported by CNV Pro, and the languages marked with "Yes" in the Custom Neural Voice Lite column are supported by CNV Lite. + +| Language | Locale | Cross-lingual (preview) |Custom Neural Voice Lite (preview)| +|--|--|--|--| +| Arabic (Egypt) | `ar-EG` | No |No| +| Arabic (Saudi Arabia) | `ar-SA` | No |No| +| Bulgarian (Bulgaria) | `bg-BG` | No |No| +| Catalan (Spain) | `ca-ES` | No |No| +| Chinese (Cantonese, Traditional) | `zh-HK` | No |No| +| Chinese (Mandarin, Simplified) | `zh-CN` | Yes |Yes| +| Chinese (Mandarin, Simplified), English bilingual | `zh-CN` bilingual | Yes |No| +| Chinese (Taiwanese Mandarin) | `zh-TW` | No |No| +| Croatian (Croatia) | `hr-HR` | No |No| +| Czech (Czech) | `cs-CZ` | No |No| +| Danish (Denmark) | `da-DK` | No |No| +| Dutch (Netherlands) | `nl-NL` | No |No| +| English (Australia) | `en-AU` | Yes |No| +| English (Canada) | `en-CA` | No |Yes| +| English (India) | `en-IN` | No |No| +| English (Ireland) | `en-IE` | No |No| +| English (United Kingdom) | `en-GB` | Yes |Yes| +| English (United States) | `en-US` | Yes |Yes| +| Finnish (Finland) | `fi-FI` | No |No| +| French (Canada) | `fr-CA` | Yes |No| +| French (France) | `fr-FR` | Yes |Yes| +| French (Switzerland) | `fr-CH` | No |No| +| German (Austria) | `de-AT` | No |No| +| German (Germany) | `de-DE` | Yes |Yes| +| German (Switzerland) | `de-CH` | No |No| +| Greek (Greece) | `el-GR` | No |No| +| Hebrew (Israel) | `he-IL` | No |No| +| Hindi (India) | `hi-IN` | No |No| +| Hungarian (Hungary) | `hu-HU` | No |No| +| Indonesian (Indonesia) | `id-ID` | No |No| +| Italian (Italy) | `it-IT` | Yes |Yes| +| Japanese (Japan) | `ja-JP` | Yes |No| +| Korean (Korea) | `ko-KR` | Yes |Yes| +| Malay (Malaysia) | `ms-MY` | No |No| +| Norwegian (Bokmål, Norway) | `nb-NO` | No |No| +| Polish (Poland) | `pl-PL` | No |No| +| Portuguese (Brazil) | `pt-BR` | Yes |Yes| +| Portuguese (Portugal) | `pt-PT` | No |No| +| Romanian (Romania) | `ro-RO` | No |No| +| Russian (Russia) | `ru-RU` | Yes |No| +| Slovak (Slovakia) | `sk-SK` | No |No| +| Slovenian (Slovenia) | `sl-SI` | No |No| +| Spanish (Mexico) | `es-MX` | Yes |Yes| +| Spanish (Spain) | `es-ES` | Yes |No| +| Swedish (Sweden) | `sv-SE` | No |No| +| Tamil (India) | `ta-IN` | No |No | +| Telugu (India) | `te-IN` | No |No | +| Thai (Thailand) | `th-TH` | No |No | +| Turkish (Turkey) | `tr-TR` | No |No| +| Vietnamese (Vietnam) | `vi-VN` | No |No| ## Language identification @@ -917,6 +925,7 @@ The following table lists the released languages and public preview languages. |English (United Kingdom)|`en-GB`Public preview | |English (United States)|`en-US`General available| |French (France)|`fr-FR`Public preview | +|German (Germany)|`de-DE`Public preview | |Spanish (Spain)|`es-ES`Public preview | > [!NOTE] diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-connect-model.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-connect-model.png new file mode 100644 index 0000000000000..1bac53a27dc24 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-connect-model.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-copy-to-full.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-copy-to-full.png new file mode 100644 index 0000000000000..5d5913a2a228a Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-copy-to-full.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-copy-to-zoom.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-copy-to-zoom.png new file mode 100644 index 0000000000000..54e783d389b20 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-copy-to-zoom.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-custom-model-expiration.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-custom-model-expiration.png new file mode 100644 index 0000000000000..fc53531a59bd6 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-custom-model-expiration.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-deploy-details.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-deploy-details.png deleted file mode 100644 index 0de73912fd8af..0000000000000 Binary files a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-deploy-details.png and /dev/null differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-inspect-compare.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-inspect-compare.png new file mode 100644 index 0000000000000..61c7fec07e8b1 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-inspect-compare.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-model-expiration.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-model-expiration.png new file mode 100644 index 0000000000000..e65d9512e9262 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-model-expiration.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-wer-formula.png b/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-wer-formula.png deleted file mode 100644 index 7f30cfe035906..0000000000000 Binary files a/articles/cognitive-services/Speech-Service/media/custom-speech/custom-speech-wer-formula.png and /dev/null differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-cancel-training.png b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-cancel-training.png new file mode 100644 index 0000000000000..8de7bc179fb97 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-cancel-training.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-clone-model-rename.png b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-clone-model-rename.png new file mode 100644 index 0000000000000..b61a04df53f58 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-clone-model-rename.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-clone-model.png b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-clone-model.png new file mode 100644 index 0000000000000..c79a9d7896bc0 Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-clone-model.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-add-testscripts.png b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-add-testscripts.png new file mode 100644 index 0000000000000..b3f31507974bb Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-add-testscripts.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-default-test.png b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-default-test.png new file mode 100644 index 0000000000000..1545601f33d3b Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-default-test.png differ diff --git a/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-upload-testscripts.png b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-upload-testscripts.png new file mode 100644 index 0000000000000..4079c865e3cee Binary files /dev/null and b/articles/cognitive-services/Speech-Service/media/custom-voice/cnv-model-upload-testscripts.png differ diff --git a/articles/cognitive-services/Speech-Service/quickstart-custom-commands-application.md b/articles/cognitive-services/Speech-Service/quickstart-custom-commands-application.md index d508a47f4b1cb..c83c961c24706 100644 --- a/articles/cognitive-services/Speech-Service/quickstart-custom-commands-application.md +++ b/articles/cognitive-services/Speech-Service/quickstart-custom-commands-application.md @@ -30,7 +30,7 @@ At this time, Custom Commands supports speech subscriptions created in regions t ## Go to the Speech Studio for Custom Commands -1. In a web browser, go to [Speech Studio](https://speech.microsoft.com/). +1. In a web browser, go to [Speech Studio](https://aka.ms/speechstudio/customcommands). 1. Enter your credentials to sign in to the portal. The default view is your list of Speech subscriptions. diff --git a/articles/cognitive-services/Speech-Service/regions.md b/articles/cognitive-services/Speech-Service/regions.md index af5be83289f52..3016a8c3d35c0 100644 --- a/articles/cognitive-services/Speech-Service/regions.md +++ b/articles/cognitive-services/Speech-Service/regions.md @@ -15,7 +15,7 @@ ms.custom: references_regions, ignite-fall-2021 # Speech service supported regions -The Speech service allows your application to convert audio to text, perform speech translation, and convert text to speech. The service is available in multiple regions with unique endpoints for the Speech SDK and REST APIs. You can perform custom configurations to your speech experience, for all regions, at the [Speech Studio](https://speech.microsoft.com). +The Speech service allows your application to convert audio to text, perform speech translation, and convert text to speech. The service is available in multiple regions with unique endpoints for the Speech SDK and REST APIs. You can perform custom configurations to your speech experience, for all regions, at the [Speech Studio](https://aka.ms/speechstudio/). Keep in mind the following points: @@ -36,10 +36,10 @@ The Speech service is available in these regions for speech-to-text, pronunciati [!INCLUDE [](../../../includes/cognitive-services-speech-service-region-identifier.md)] -If you plan to train a custom model with audio data, use one of the regions with dedicated hardware for faster training. You can use the [REST API](https://centralus.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CopyModelToSubscription) to copy the fully trained model to another region later. +If you plan to train a custom model with audio data, use one of the regions with dedicated hardware for faster training. Then you can use the [Speech-to-text REST API v3.0](rest-speech-to-text.md) to [copy the trained model](how-to-custom-speech-train-model.md#copy-a-model) to another region. > [!TIP] -> For pronunciation assessment feature, `en-US` and `en-GB` are available in all regions listed above, `zh-CN` is available in East Asia and Southeast Asia regions, `es-ES` and `fr-FR` are available in West Europe region, and `en-AU` is available in Australia East region. +> For pronunciation assessment, `en-US` and `en-GB` are available in all regions listed above, `zh-CN` is available in East Asia and Southeast Asia regions, `es-ES` and `fr-FR` are available in West Europe region, and `en-AU` is available in Australia East region. ### Intent recognition diff --git a/articles/cognitive-services/Speech-Service/releasenotes.md b/articles/cognitive-services/Speech-Service/releasenotes.md index e895adc92dc5b..d37fa64afaf6c 100644 --- a/articles/cognitive-services/Speech-Service/releasenotes.md +++ b/articles/cognitive-services/Speech-Service/releasenotes.md @@ -19,10 +19,9 @@ See below for information about changes to Speech services and resources. ## What's new? -* Speech SDK 1.21.0 and Speech CLI 1.21.0 were released in April 2022. See details below. +* Speech SDK 1.22.0 and Speech CLI 1.22.0 were released in June 2022. See details below. * Custom speech-to-text container v3.1.0 released in March 2022, with support to get display models. * TTS Service March 2022, public preview of Cheerful and Sad styles with fr-FR-DeniseNeural. -* TTS Service February 2022, public preview of Custom Neural Voice Lite, extended CNV language support to 49 locales. ## Release notes diff --git a/articles/cognitive-services/Speech-Service/resiliency-and-recovery-plan.md b/articles/cognitive-services/Speech-Service/resiliency-and-recovery-plan.md index 5cf8335a054f6..0e326c1875dbd 100644 --- a/articles/cognitive-services/Speech-Service/resiliency-and-recovery-plan.md +++ b/articles/cognitive-services/Speech-Service/resiliency-and-recovery-plan.md @@ -34,11 +34,11 @@ Datasets for customer-created data assets, such as customized speech models, cus While some customers use our default endpoints to transcribe audio or standard voices for speech synthesis, other customers create assets for customization. -These assets are backed up regularly and automatically by the repositories themselves, so **no data loss will occur** if a region becomes unavailable. However, you must take steps to ensure service continuity in the event of a region outage. +These assets are backed up regularly and automatically by the repositories themselves, so **no data loss will occur** if a region becomes unavailable. However, you must take steps to ensure service continuity if there's a region outage. ## How to monitor service availability -If you use our default endpoints, you should configure your client code to monitor for errors, and if errors persist, be prepared to re-direct to another region of your choice where you have a service subscription. +If you use the default endpoints, you should configure your client code to monitor for errors. If errors persist, be prepared to redirect to another region where you have a service subscription. Follow these steps to configure your client to monitor for errors: @@ -47,26 +47,26 @@ Follow these steps to configure your client to monitor for errors: 3. From Azure portal, create Speech Service resources for each region. - If you have set a specific quota, you may also consider setting the same quota in the backup regions. See details in [Speech service Quotas and Limits](./speech-services-quotas-and-limits.md). -4. Note that each region has its own STS token service. For the primary region and any backup regions your client configuration file needs to know the: +4. Each region has its own STS token service. For the primary region and any backup regions your client configuration file needs to know the: - Regional Speech service endpoints - [Regional subscription key and the region code](./rest-speech-to-text.md) -5. Configure your code to monitor for connectivity errors (typically connection timeouts and service unavailability errors). Here is sample code in C#: [GitHub: Adding Sample for showing a possible candidate for switching regions](https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/fa6428a0837779cbeae172688e0286625e340942/samples/csharp/sharedcontent/console/speech_recognition_samples.cs#L965). +5. Configure your code to monitor for connectivity errors (typically connection timeouts and service unavailability errors). Here's sample code in C#: [GitHub: Adding Sample for showing a possible candidate for switching regions](https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/fa6428a0837779cbeae172688e0286625e340942/samples/csharp/sharedcontent/console/speech_recognition_samples.cs#L965). 1. Since networks experience transient errors, for single connectivity issue occurrences, the suggestion is to retry. 2. For persistence redirect traffic to the new STS token service and Speech service endpoint. (For Text-to-Speech, reference sample code: [GitHub: TTS public voice switching region](https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/master/samples/csharp/sharedcontent/console/speech_synthesis_samples.cs#L880). -The recovery from regional failures for this usage type can be instantaneous and at a very low cost. All that is required is the development of this functionality on the client side. The data loss that will incur assuming no backup of the audio stream will be minimal. +The recovery from regional failures for this usage type can be instantaneous and at a low cost. All that is required is the development of this functionality on the client side. The data loss that will incur assuming no backup of the audio stream will be minimal. ## Custom endpoint recovery -Data assets, models or deployments in one region cannot be made visible or accessible in any other region. +Data assets, models or deployments in one region can't be made visible or accessible in any other region. You should create Speech Service resources in both a main and a secondary region by following the same steps as used for default endpoints. ### Custom Speech -Custom Speech Service does not support automatic failover. We suggest the following steps to prepare for manual or automatic failover implemented in your client code. In these steps you replicate custom models in a secondary region. With this preparation, your client code can switch to a secondary region when the primary region fails. +Custom Speech Service doesn't support automatic failover. We suggest the following steps to prepare for manual or automatic failover implemented in your client code. In these steps, you replicate custom models in a secondary region. With this preparation, your client code can switch to a secondary region when the primary region fails. 1. Create your custom model in one main region (Primary). 2. Run the [Model Copy API](https://eastus2.dev.cognitive.microsoft.com/docs/services/speech-to-text-api-v3-0/operations/CopyModelToSubscription) to replicate the custom model to all prepared regions (Secondary). @@ -74,11 +74,11 @@ Custom Speech Service does not support automatic failover. We suggest the follow - If you have set a specific quota, also consider setting the same quota in the backup regions. See details in [Speech service Quotas and Limits](./speech-services-quotas-and-limits.md). 4. Configure your client to fail over on persistent errors as with the default endpoints usage. -Your client code can monitor availability of your deployed models in your primary region, and redirect their audio traffic to the secondary region when the primary fails. If you do not require real-time failover, you can still follow these steps to prepare for a manual failover. +Your client code can monitor availability of your deployed models in your primary region, and redirect their audio traffic to the secondary region when the primary fails. If you don't require real-time failover, you can still follow these steps to prepare for a manual failover. #### Offline failover -If you do not require real-time failover you can decide to import your data, create and deploy your models in the secondary region at a later time with the understanding that these tasks will take time to complete. +If you don't require real-time failover you can decide to import your data, create and deploy your models in the secondary region at a later time with the understanding that these tasks will take time to complete. #### Failover time requirements @@ -91,11 +91,11 @@ This section provides general guidance about timing. The times were recorded to - Model copy API call: **10 mins** - Client code reconfiguration and deployment: **Depending on the client system** -It is nonetheless advisable to create keys for a primary and secondary region for production models with real-time requirements. +It's nonetheless advisable to create keys for a primary and secondary region for production models with real-time requirements. ### Custom Voice -Custom Voice does not support automatic failover. Handle real-time synthesis failures with these two options. +Custom Voice doesn't support automatic failover. Handle real-time synthesis failures with these two options. **Option 1: Fail over to public voice in the same region.** @@ -106,15 +106,15 @@ Check the [public voices available](./language-support.md#prebuilt-neural-voices **Option 2: Fail over to custom voice on another region.** 1. Create and deploy your custom voice in one main region (primary). -2. Copy your custom voice model to another region (the secondary region) in [Speech Studio](https://speech.microsoft.com). +2. Copy your custom voice model to another region (the secondary region) in [Speech Studio](https://aka.ms/speechstudio/). 3. Go to Speech Studio and switch to the Speech resource in the secondary region. Load the copied model and create a new endpoint. - Voice model deployment usually finishes **in 3 minutes**. - - Note: additional endpoint is subjective to additional charges. [Check the pricing for model hosting here](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services/). + - Each endpoint is subject to extra charges. [Check the pricing for model hosting here](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services/). 4. Configure your client to fail over to the secondary region. See sample code in C#: [GitHub: custom voice failover to secondary region](https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/master/samples/csharp/sharedcontent/console/speech_synthesis_samples.cs#L920). ### Speaker Recognition -Speaker Recognition uses [Azure paired regions](../../availability-zones/cross-region-replication-azure.md) to automatically failover operations. Speaker enrollments and voice signatures are backed up regularly to prevent data loss and to be used in case of an outage. +Speaker Recognition uses [Azure paired regions](../../availability-zones/cross-region-replication-azure.md) to automatically fail over operations. Speaker enrollments and voice signatures are backed up regularly to prevent data loss and to be used if there's an outage. -During an outage, Speaker Recognition service will automatically failover to a paired region and use the backed up data to continue processing requests until the main region is back online. \ No newline at end of file +During an outage, Speaker Recognition service will automatically fail over to a paired region and use the backed-up data to continue processing requests until the main region is back online. \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/speech-container-howto.md b/articles/cognitive-services/Speech-Service/speech-container-howto.md index 750b97e8e170f..ad8a3782a859e 100644 --- a/articles/cognitive-services/Speech-Service/speech-container-howto.md +++ b/articles/cognitive-services/Speech-Service/speech-container-howto.md @@ -333,7 +333,7 @@ If you have multiple phrases to add, call `.addPhrase()` for each phrase to add # [Custom speech-to-text](#tab/cstt) -The custom speech-to-text container relies on a Custom Speech model. The custom model has to have been [trained](how-to-custom-speech-train-model.md) by using the [Speech Studio](https://speech.microsoft.com/customspeech). +The custom speech-to-text container relies on a Custom Speech model. The custom model has to have been [trained](how-to-custom-speech-train-model.md) by using the [Speech Studio](https://aka.ms/speechstudio/customspeech). The custom speech **Model ID** is required to run the container. For more information about how to get the model ID, see [Custom Speech model lifecycle](how-to-custom-speech-model-and-endpoint-lifecycle.md). diff --git a/articles/cognitive-services/Speech-Service/speech-services-quotas-and-limits.md b/articles/cognitive-services/Speech-Service/speech-services-quotas-and-limits.md index ca27f08a9bb71..7a240f9da6714 100644 --- a/articles/cognitive-services/Speech-Service/speech-services-quotas-and-limits.md +++ b/articles/cognitive-services/Speech-Service/speech-services-quotas-and-limits.md @@ -111,7 +111,7 @@ In the following tables, the parameters without the **Adjustable** row aren't ad 3 For the free (F0) pricing tier, see also the monthly allowances at the [pricing page](https://azure.microsoft.com/pricing/details/cognitive-services/speech-services/).
                  4 See [additional explanations](#detailed-description-quota-adjustment-and-best-practices) and [best practices](#general-best-practices-to-mitigate-throttling-during-autoscaling).
                  -5 See [additional explanations](#detailed-description-quota-adjustment-and-best-practices), [best practices](#general-best-practices-to-mitigate-throttling-during-autoscaling), and [adjustment instructions](#text-to-speech-increase-concurrent-request-limit-for-custom-neural-voices).
                  +5 See [additional explanations](#detailed-description-quota-adjustment-and-best-practices), [best practices](#general-best-practices-to-mitigate-throttling-during-autoscaling), and [adjustment instructions](#text-to-speech-increase-concurrent-request-limit).
                  ## Detailed description, quota adjustment, and best practices @@ -166,7 +166,7 @@ How to get information for the base model: How to get information for the custom model: -1. Go to the [Speech Studio](https://speech.microsoft.com/) portal. +1. Go to the [Speech Studio](https://aka.ms/speechstudio/customspeech) portal. 1. Sign in if necessary, and go to **Custom Speech**. 1. Select your project, and go to **Deployment**. 1. Select the required endpoint. @@ -204,9 +204,9 @@ Suppose that a Speech service resource has the concurrent request limit set to 3 Generally, it's a very good idea to test the workload and the workload patterns before going to production. -### Text-to-speech: increase concurrent request limit for custom neural voices +### Text-to-speech: increase concurrent request limit -By default, the number of concurrent requests for Custom Neural Voice endpoints is limited to 10. For the standard pricing tier, you can increase this amount. Before submitting the request, ensure that you're familiar with the material discussed earlier in this article, such as the best practices to mitigate throttling. +For the standard pricing tier, you can increase this amount. Before submitting the request, ensure that you're familiar with the material discussed earlier in this article, such as the best practices to mitigate throttling. Increasing the limit of concurrent requests doesn't directly affect your costs. Speech service uses a payment model that requires that you pay only for what you use. The limit defines how high the service can scale before it starts throttle your requests. @@ -219,7 +219,7 @@ You aren't able to see the existing value of the concurrent request limit parame To create an increase request, you provide your deployment region and the custom endpoint ID. To get it, perform the following actions: -1. Go to the [Speech Studio](https://speech.microsoft.com/) portal. +1. Go to the [Speech Studio](https://aka.ms/speechstudio/customvoice) portal. 1. Sign in if necessary, and go to **Custom Voice**. 1. Select your project, and go to **Deployment**. 1. Select the required endpoint. diff --git a/articles/cognitive-services/Speech-Service/speech-studio-overview.md b/articles/cognitive-services/Speech-Service/speech-studio-overview.md index 1c452139129bc..27b5749f5459d 100644 --- a/articles/cognitive-services/Speech-Service/speech-studio-overview.md +++ b/articles/cognitive-services/Speech-Service/speech-studio-overview.md @@ -14,27 +14,27 @@ ms.author: eur # What is Speech Studio? -[Speech Studio](https://speech.microsoft.com) is a set of UI-based tools for building and integrating features from Azure Cognitive Services Speech service in your applications. You create projects in Speech Studio by using a no-code approach, and then reference those assets in your applications by using the [Speech SDK](speech-sdk.md), the [Speech CLI](spx-overview.md), or the REST APIs. +[Speech Studio](https://aka.ms/speechstudio/) is a set of UI-based tools for building and integrating features from Azure Cognitive Services Speech service in your applications. You create projects in Speech Studio by using a no-code approach, and then reference those assets in your applications by using the [Speech SDK](speech-sdk.md), the [Speech CLI](spx-overview.md), or the REST APIs. ## Speech Studio features In Speech Studio, the following Speech service features are available as project types: -* **Real-time speech-to-text**: Quickly test speech-to-text by dragging audio files here without having to use any code. This is a demo tool for seeing how speech-to-text works on your audio samples. To explore the full functionality, see [What is speech-to-text?](speech-to-text.md). +* [Real-time speech-to-text](https://aka.ms/speechstudio/speechtotexttool): Quickly test speech-to-text by dragging audio files here without having to use any code. This is a demo tool for seeing how speech-to-text works on your audio samples. To explore the full functionality, see [What is speech-to-text?](speech-to-text.md). -* **Custom Speech**: Create speech recognition models that are tailored to specific vocabulary sets and styles of speaking. In contrast to the base speech recognition model, Custom Speech models become part of your unique competitive advantage because they're not publicly accessible. To get started with uploading sample audio to create a Custom Speech model, see [Upload training and testing datasets](how-to-custom-speech-upload-data.md). +* [Custom Speech](https://aka.ms/speechstudio/customspeech): Create speech recognition models that are tailored to specific vocabulary sets and styles of speaking. In contrast to the base speech recognition model, Custom Speech models become part of your unique competitive advantage because they're not publicly accessible. To get started with uploading sample audio to create a Custom Speech model, see [Upload training and testing datasets](how-to-custom-speech-upload-data.md). -* **Pronunciation assessment**: Evaluate speech pronunciation and give speakers feedback on the accuracy and fluency of spoken audio. Speech Studio provides a sandbox for testing this feature quickly, without code. To use the feature with the Speech SDK in your applications, see the [Pronunciation assessment](how-to-pronunciation-assessment.md) article. +* [Pronunciation assessment](https://aka.ms/speechstudio/pronunciationassessment): Evaluate speech pronunciation and give speakers feedback on the accuracy and fluency of spoken audio. Speech Studio provides a sandbox for testing this feature quickly, without code. To use the feature with the Speech SDK in your applications, see the [Pronunciation assessment](how-to-pronunciation-assessment.md) article. -* **Voice Gallery**: Build apps and services that speak naturally. Choose from more than 170 voices in over 70 languages and variants. Bring your scenarios to life with highly expressive and human-like neural voices. +* [Voice Gallery](https://aka.ms/speechstudio/voicegallery): Build apps and services that speak naturally. Choose from more than 170 voices in over 70 languages and variants. Bring your scenarios to life with highly expressive and human-like neural voices. -* **Custom Voice**: Create custom, one-of-a-kind voices for text-to-speech. You supply audio files and create matching transcriptions in Speech Studio, and then use the custom voices in your applications. To create and use custom voices via endpoints, see [Create and use your voice model](how-to-custom-voice-create-voice.md). +* [Custom Voice](https://aka.ms/speechstudio/customvoice): Create custom, one-of-a-kind voices for text-to-speech. You supply audio files and create matching transcriptions in Speech Studio, and then use the custom voices in your applications. To create and use custom voices via endpoints, see [Create and use your voice model](how-to-custom-voice-create-voice.md). -* **Audio Content Creation**: Build highly natural audio content for a variety of scenarios, such as audiobooks, news broadcasts, video narrations, and chat bots, with the easy-to-use [Audio Content Creation](how-to-audio-content-creation.md) tool. With Speech Studio, you can export these audio files to use in your applications. +* [Audio Content Creation](https://aka.ms/speechstudio/audiocontentcreation): Build highly natural audio content for a variety of scenarios, such as audiobooks, news broadcasts, video narrations, and chat bots, with the easy-to-use [Audio Content Creation](how-to-audio-content-creation.md) tool. With Speech Studio, you can export these audio files to use in your applications. -* **Custom Keyword**: A custom keyword is a word or short phrase that you can use to voice-activate a product. You create a custom keyword in Speech Studio, and then generate a binary file to [use with the Speech SDK](custom-keyword-basics.md) in your applications. +* [Custom Keyword](https://aka.ms/speechstudio/customkeyword): A custom keyword is a word or short phrase that you can use to voice-activate a product. You create a custom keyword in Speech Studio, and then generate a binary file to [use with the Speech SDK](custom-keyword-basics.md) in your applications. -* **Custom Commands**: Easily build rich, voice-command apps that are optimized for voice-first interaction experiences. Custom Commands provides a code-free authoring experience in Speech Studio, an automatic hosting model, and relatively lower complexity. The feature helps you focus on building the best solution for your voice-command scenarios. For more information, see the [Develop Custom Commands applications](how-to-develop-custom-commands-application.md) guide. Also see [Integrate with a client application by using the Speech SDK](how-to-custom-commands-setup-speech-sdk.md). +* [Custom Commands](https://aka.ms/speechstudio/customcommands): Easily build rich, voice-command apps that are optimized for voice-first interaction experiences. Custom Commands provides a code-free authoring experience in Speech Studio, an automatic hosting model, and relatively lower complexity. The feature helps you focus on building the best solution for your voice-command scenarios. For more information, see the [Develop Custom Commands applications](how-to-develop-custom-commands-application.md) guide. Also see [Integrate with a client application by using the Speech SDK](how-to-custom-commands-setup-speech-sdk.md). ## Next steps diff --git a/articles/cognitive-services/Speech-Service/speech-synthesis-markup.md b/articles/cognitive-services/Speech-Service/speech-synthesis-markup.md index c8d840fe46b0f..b98f7ce57a6f2 100644 --- a/articles/cognitive-services/Speech-Service/speech-synthesis-markup.md +++ b/articles/cognitive-services/Speech-Service/speech-synthesis-markup.md @@ -158,6 +158,7 @@ The following table has descriptions of each supported style. |Style|Description| |-----------|-------------| +|`style="advertisement-upbeat"`|Expresses an excited and high-energy tone for promoting a product or service.| |`style="affectionate"`|Expresses a warm and affectionate tone, with higher pitch and vocal energy. The speaker is in a state of attracting the attention of the listener. The personality of the speaker is often endearing in nature.| |`style="angry"`|Expresses an angry and annoyed tone.| |`style="assistant"`|Expresses a warm and relaxed tone for digital assistants.| @@ -181,9 +182,12 @@ The following table has descriptions of each supported style. |`style="newscast"`|Expresses a formal and professional tone for narrating news.| |`style="newscast-casual"`|Expresses a versatile and casual tone for general news delivery.| |`style="newscast-formal"`|Expresses a formal, confident, and authoritative tone for news delivery.| +|`style="poetry-reading"`|Expresses an emotional and rhythmic tone while reading a poem.| |`style="sad"`|Expresses a sorrowful tone.| |`style="serious"`|Expresses a strict and commanding tone. Speaker often sounds stiffer and much less relaxed with firm cadence.| |`style="shouting"`|Speaks like from a far distant or outside and to make self be clearly heard| +|`style="sports-commentary"`|Expresses a relaxed and interesting tone for broadcasting a sports event.| +|`style="sports-commentary-excited"`|Expresses an intensive and energetic tone for broadcasting exciting moments in a sports event.| |`style="whispering"`|Speaks very softly and make a quiet and gentle sound| |`style="terrified"`|Expresses a very scared tone, with faster pace and a shakier voice. It sounds like the speaker is in an unsteady and frantic status.| |`style="unfriendly"`|Expresses a cold and indifferent tone.| diff --git a/articles/cognitive-services/Speech-Service/toc.yml b/articles/cognitive-services/Speech-Service/toc.yml index 04be6cf0dcaa9..72b6922204795 100644 --- a/articles/cognitive-services/Speech-Service/toc.yml +++ b/articles/cognitive-services/Speech-Service/toc.yml @@ -56,8 +56,6 @@ items: href: get-started-speech-to-text.md - name: How to recognize speech href: how-to-recognize-speech.md - - name: Set recognition source language - href: how-to-specify-source-language.md - name: Get speech recognition results href: get-speech-recognition-results.md - name: How to use batch transcription @@ -68,8 +66,8 @@ items: href: custom-speech-overview.md - name: Create a Custom Speech model items: - - name: Choose a model - href: how-to-custom-speech-choose-model.md + - name: Create a project + href: how-to-custom-speech-create-project.md - name: Upload training and testing datasets href: how-to-custom-speech-upload-data.md - name: Test recognition quality diff --git a/articles/cognitive-services/Speech-Service/voice-assistants.md b/articles/cognitive-services/Speech-Service/voice-assistants.md index e3a983ffd9288..94130cd4d0a69 100644 --- a/articles/cognitive-services/Speech-Service/voice-assistants.md +++ b/articles/cognitive-services/Speech-Service/voice-assistants.md @@ -26,7 +26,7 @@ The first step in creating a voice assistant is to decide what you want it to do | If you want... | Consider using... | Examples | |-------------------|------------------|----------------| |Open-ended conversation with robust skills integration and full deployment control | Azure Bot Service bot with [Direct Line Speech](direct-line-speech.md) channel |
                  • "I need to go to Seattle"
                  • "What kind of pizza can I order?"
                  -|Voice-command or simple task-oriented conversations with simplified authoring and hosting | [Custom Commands](custom-commands.md) |
                  • "Turn on the overhead light"
                  • "Make it 5 degrees warmer"
                  • More examples at [Speech Studio](https://speech.microsoft.com/customcommands)
                  +|Voice-command or simple task-oriented conversations with simplified authoring and hosting | [Custom Commands](custom-commands.md) |
                  • "Turn on the overhead light"
                  • "Make it 5 degrees warmer"
                  • More examples at [Speech Studio](https://aka.ms/speechstudio/customcommands)
                  If you aren't yet sure what you want your assistant to do, we recommend [Direct Line Speech](direct-line-speech.md) as the best option. It offers integration with a rich set of tools and authoring aids, such as the [Virtual Assistant solution and enterprise template](/azure/bot-service/bot-builder-enterprise-template-overview) and the [QnA Maker service](../qnamaker/overview/overview.md), to build on common patterns and use your existing knowledge sources. diff --git a/articles/cognitive-services/Translator/custom-translator/training-and-model.md b/articles/cognitive-services/Translator/custom-translator/training-and-model.md index 89f2716616eaa..d9fc5a18ca72f 100644 --- a/articles/cognitive-services/Translator/custom-translator/training-and-model.md +++ b/articles/cognitive-services/Translator/custom-translator/training-and-model.md @@ -1,5 +1,5 @@ --- -title: "Legacy: What are trainings and models? - Custom Translator" +title: "Legacy: What are trainings and modeling? - Custom Translator" titleSuffix: Azure Cognitive Services description: A model is the system, which provides translation for a specific language pair. The outcome of a successful training is a model. To train a model, three mutually exclusive data sets are required training dataset, tuning dataset, and testing dataset. author: laujan @@ -12,7 +12,7 @@ ms.author: lajanuar #Customer intent: As a Custom Translator user, I want to concept of a model and training, so that I can efficiently use training, tuning and testing datasets the helps me build a translation model. --- -# What are trainings and models? +# What are training and modeling? A model is the system, which provides translation for a specific language pair. The outcome of a successful training is a model. To train a model, three mutually exclusive document types are required: training, tuning, and testing. Dictionary document type can also be provided. For more information, _see_ [Sentence alignment](./sentence-alignment.md#suggested-minimum-number-of-sentences). @@ -48,4 +48,4 @@ The test data should include parallel documents where the target language senten You don't need more than 2,500 sentences as the testing data. When you let the system choose the testing set automatically, it will use a random subset of sentences from your bilingual training documents, and exclude these sentences from the training material itself. -You can view the custom translations of the testing set, and compare them to the translations provided in your testing set, by navigating to the test tab within a model. \ No newline at end of file +You can view the custom translations of the testing set, and compare them to the translations provided in your testing set, by navigating to the test tab within a model. diff --git a/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md b/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md index 72673ed42d0a3..7fe27e9af35ff 100644 --- a/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md +++ b/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md @@ -1,27 +1,97 @@ --- -title: Create shared access signature (SAS) tokens for containers and blobs with Microsoft Storage Explorer +title: Create shared access signature (SAS) tokens for storage containers and blobs description: How to create Shared Access Signature tokens (SAS) for containers and blobs with Microsoft Storage Explorer and the Azure portal. ms.topic: how-to manager: nitinme ms.author: lajanuar author: laujan -ms.date: 04/26/2022 +ms.date: 05/27/2022 --- # Create SAS tokens for your storage containers -In this article, you'll learn how to create shared access signature (SAS) tokens using the Azure Storage Explorer or the Azure portal. A SAS token provides secure, delegated access to resources in your Azure storage account. +In this article, you'll learn how to create user delegation, shared access signature (SAS) tokens, using the Azure portal or Azure Storage Explorer. User delegation SAS tokens are secured with Azure AD credentials. SAS tokens provide secure, delegated access to resources in your Azure storage account. -## Create your SAS tokens with Azure Storage Explorer +At a high level, here's how SAS tokens work: -### Prerequisites +* Your application submits the SAS token to Azure Storage as part of a REST API request. -* You'll need a [**Azure Storage Explorer**](../../../vs-azure-tools-storage-manage-with-storage-explorer.md) app installed in your Windows, macOS, or Linux development environment. Azure Storage Explorer is a free tool that enables you to easily manage your Azure cloud storage resources. -* After the Azure Storage Explorer app is installed, [connect it the storage account](../../../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#connect-to-a-storage-account-or-service) you're using for Document Translation. +* If the storage service verifies that the SAS is valid, the request is authorized. -### Create your tokens +* If the SAS token is deemed invalid, the request is declined and the error code 403 (Forbidden) is returned. -### [SAS tokens for containers](#tab/Containers) +Azure Blob Storage offers three resource types: + +* **Storage** accounts provide a unique namespace in Azure for your data. +* **Data storage containers** are located in storage accounts and organize sets of blobs (files, text, or images). +* **Blobs** are located in containers and store text and binary data such as files, text, and images. + +> [!IMPORTANT] +> +> * SAS tokens are used to grant permissions to storage resources, and should be protected in the same manner as an account key. +> +> * Operations that use SAS tokens should be performed only over an HTTPS connection, and SAS URIs should only be distributed on a secure connection such as HTTPS. + +## Prerequisites + +To get started, you'll need the following resources: + +* An active [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [create a free account](https://azure.microsoft.com/free/). + +* A [Translator](https://ms.portal.azure.com/#create/Microsoft.CognitiveServicesTextTranslation) resource. + +* A **standard performance** [Azure Blob Storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You'll create containers to store and organize your files within your storage account. If you don't know how to create an Azure storage account with a storage container, follow these quickstarts: + + * [Create a storage account](../../../storage/common/storage-account-create.md). When you create your storage account, select **Standard** performance in the **Instance details** > **Performance** field. + * [Create a container](../../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). When you create your container, set **Public access level** to **Container** (anonymous read access for containers and files) in the **New Container** window. + +## Create SAS tokens in the Azure portal + + + +Go to the [Azure portal](https://portal.azure.com/#home) and navigate to your container or a specific file as follows and continue with the steps below: + +| Create SAS token for a container| Create SAS token for a specific file| +|:-----:|:-----:| +**Your storage account** → **containers** → **your container** |**Your storage account** → **containers** → **your container**→ **your file** | + +1. Right-click the container or file and select **Generate SAS** from the drop-down menu. + +1. Select **Signing method** → **User delegation key**. + +1. Define **Permissions** by checking and/or clearing the appropriate check box: + + * Your **source** container or file must have designated **read** and **list** access. + + * Your **target** container or file must have designated **write** and **list** access. + +1. Specify the signed key **Start** and **Expiry** times. + + * When you create a shared access signature (SAS), the default duration is 48 hours. After 48 hours, you'll need to create a new token. + * Consider setting a longer duration period for the time you'll be using your storage account for Translator Service operations. + * The value for the expiry time is a maximum of seven days from the creation of the SAS token. + +1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. + +1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the SAS. The default value is HTTPS. + +1. Review then select **Generate SAS token and URL**. + +1. The **Blob SAS token** query string and **Blob SAS URL** will be displayed in the lower area of window. + +1. **Copy and paste the Blob SAS token and URL values in a secure location. They'll only be displayed once and cannot be retrieved once the window is closed.** + +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. + +## Create SAS tokens with Azure Storage Explorer + +Azure Storage Explorer is a free standalone app that enables you to easily manage your Azure cloud storage resources from your desktop. + +* You'll need the [**Azure Storage Explorer**](../../../vs-azure-tools-storage-manage-with-storage-explorer.md) app installed in your Windows, macOS, or Linux development environment. + +* After the Azure Storage Explorer app is installed, [connect it to the storage account](../../../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#connect-to-a-storage-account-or-service) you're using for Document Translation. Follow the steps below to create tokens for a storage container or specific blob file: + +### [SAS tokens for storage containers](#tab/Containers) 1. Open the Azure Storage Explorer app on your local machine and navigate to your connected **Storage Accounts**. 1. Expand the Storage Accounts node and select **Blob Containers**. @@ -34,74 +104,72 @@ In this article, you'll learn how to create shared access signature (SAS) tokens * Define your container **Permissions** by checking and/or clearing the appropriate check box. * Review and select **Create**. -1. A new window will appear with the **Container** name, **URI**, and **Query string** for your container. +1. A new window will appear with the **Container** name, **URI**, and **Query string** for your container. 1. **Copy and paste the container, URI, and query string values in a secure location. They'll only be displayed once and can't be retrieved once the window is closed.** -1. To construct a SAS URL, append the SAS token (URI) to the URL for a storage service. +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. -### [SAS tokens for blobs](#tab/blobs) +### [SAS tokens for specific blob file](#tab/blobs) 1. Open the Azure Storage Explorer app on your local machine and navigate to your connected **Storage Accounts**. 1. Expand your storage node and select **Blob Containers**. 1. Expand the Blob Containers node and select a **container** node to display the contents in the main window. -1. Select the blob where you wish to delegate SAS access and right-click to display the options menu. +1. Select the file where you wish to delegate SAS access and right-click to display the options menu. 1. Select **Get Shared Access Signature...** from options menu. 1. In the **Shared Access Signature** window, make the following selections: * Select your **Access policy** (the default is none). * Specify the signed key **Start** and **Expiry** date and time. A short lifespan is recommended because, once generated, a SAS can't be revoked. * Select the **Time zone** for the Start and Expiry date and time (default is Local). * Define your container **Permissions** by checking and/or clearing the appropriate check box. + * Your **source** container or file must have designated **read** and **list** access. + * Your **target** container or file must have designated **write** and **list** access. + * Select **key1** or **key2**. * Review and select **Create**. -1. A new window will appear with the **Blob** name, **URI**, and **Query string** for your blob. + +1. A new window will appear with the **Blob** name, **URI**, and **Query string** for your blob. 1. **Copy and paste the blob, URI, and query string values in a secure location. They will only be displayed once and cannot be retrieved once the window is closed.** -1. To construct a SAS URL, append the SAS token (URI) to the URL for a storage service. +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. --- -## Create SAS tokens for blobs in the Azure portal +### Use your SAS URL to grant access - -### Prerequisites +The SAS URL includes a special set of [query parameters](/rest/api/storageservices/create-user-delegation-sas#assign-permissions-with-rbac). Those parameters indicate how the resources may be accessed by the client. -To get started, you'll need: +You can include your SAS URL with REST API requests in two ways: -* An active [**Azure account**](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [**create a free account**](https://azure.microsoft.com/free/). -* A [**Translator**](https://portal.azure.com/#create/Microsoft) service resource (**not** a Cognitive Services multi-service resource. *See* [Create a new Azure resource](../../cognitive-services-apis-create-account.md#create-a-new-azure-cognitive-services-resource). -* An [**Azure Blob Storage account**](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You will create containers to store and organize your blob data within your storage account. +* Use the **SAS URL** as your sourceURL and targetURL values. -### Create your tokens +* Append the **SAS query string** to your existing sourceURL and targetURL values. -Go to the [Azure portal](https://portal.azure.com/#home) and navigate as follows: +Here is a sample REST API request: - **Your storage account** → **containers** → **your container** → **your blob** +```json +{ + "inputs": [ + { + "storageType": "File", + "source": { + "sourceUrl": "https://my.blob.core.windows.net/source-en/source-english.docx?sv=2019-12-12&st=2021-01-26T18%3A30%3A20Z&se=2021-02-05T18%3A30%3A00Z&sr=c&sp=rl&sig=d7PZKyQsIeE6xb%2B1M4Yb56I%2FEEKoNIF65D%2Fs0IFsYcE%3D" + }, + "targets": [ + { + "targetUrl": "https://my.blob.core.windows.net/target/try/Target-Spanish.docx?sv=2019-12-12&st=2021-01-26T18%3A31%3A11Z&se=2021-02-05T18%3A31%3A00Z&sr=c&sp=wl&sig=AgddSzXLXwHKpGHr7wALt2DGQJHCzNFF%2F3L94JHAWZM%3D", + "language": "es" + }, + { + "targetUrl": "https://my.blob.core.windows.net/target/try/Target-German.docx?sv=2019-12-12&st=2021-01-26T18%3A31%3A11Z&se=2021-02-05T18%3A31%3A00Z&sr=c&sp=wl&sig=AgddSzXLXwHKpGHr7wALt2DGQJHCzNFF%2F3L94JHAWZM%3D", + "language": "de" + } + ] + } + ] +} +``` -1. Select **Generate SAS** from the menu near the top of the page. - -1. Select **Signing method** → **User delegation key**. - -1. Define **Permissions** by checking and/or clearing the appropriate check box. - -1. Specify the signed key **Start** and **Expiry** times. - -1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. - -1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the SAS. The default value is HTTPS. - -1. Review then select **Generate SAS token and URL**. - -1. The **Blob SAS token** query string and **Blob SAS URL** will be displayed in the lower area of window. - -1. **Copy and paste the Blob SAS token and URL values in a secure location. They'll only be displayed once and cannot be retrieved once the window is closed.** - -1. To construct a SAS URL, append the SAS token (URI) to the URL for a storage service. - -## Learn more - -* [Create SAS tokens for blobs or containers programmatically](../../../storage/blobs/sas-service-create.md) -* [Permissions for a directory, container, or blob](/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob) +That's it! You've learned how to create SAS tokens to authorize how clients access your data. ## Next steps > [!div class="nextstepaction"] > [Get Started with Document Translation](get-started-with-document-translation.md) > -> diff --git a/articles/cognitive-services/Translator/document-translation/overview.md b/articles/cognitive-services/Translator/document-translation/overview.md index c9846b633341f..9ef39b8d33ef8 100644 --- a/articles/cognitive-services/Translator/document-translation/overview.md +++ b/articles/cognitive-services/Translator/document-translation/overview.md @@ -76,6 +76,16 @@ The following document file types are supported by Document Translation: |Tab Separated Values/TAB|tsv/tab| A tab-delimited raw-data file used by spreadsheet programs.| |Text|txt| An unformatted text document.| +### Legacy file types + +Source file types will be preserved during the document translation with the following **exceptions**: + +| Source file extension | Translated file extension| +| --- | --- | +| .doc, .odt, .rtf, | .docx | +| .xls, .ods | .xlsx | +| .ppt, .odp | .pptx | + ## Supported glossary formats The following glossary file types are supported by Document Translation: diff --git a/articles/cognitive-services/Translator/how-to-create-translator-resource.md b/articles/cognitive-services/Translator/how-to-create-translator-resource.md index 9d98c96040db3..ba436dc15a204 100644 --- a/articles/cognitive-services/Translator/how-to-create-translator-resource.md +++ b/articles/cognitive-services/Translator/how-to-create-translator-resource.md @@ -20,31 +20,16 @@ In this article, you'll learn how to create a Translator resource in the Azure p To get started, you'll need an active [**Azure account**](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [**create a free 12-month subscription**](https://azure.microsoft.com/free/). -## Translator resource types +## Create your resource The Translator service can be accessed through two different resource types: -* **Single-service** resource types enable access to a single service API key and endpoint. - -* **Multi-service** resource types enable access to multiple Cognitive Services using a single API key and endpoint. The Cognitive Services resource is currently available for the following services: - * Language ([Translator](../translator/translator-overview.md), [Language Understanding (LUIS)](../luis/what-is-luis.md), [Language service](../text-analytics/overview.md)) - * Vision ([Computer Vision](../computer-vision/overview.md)), ([Face](../face/overview.md)) - * Decision ([Content Moderator](../content-moderator/overview.md)) - -## Create your resource - -* Navigate directly to the [**Create Translator**](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextTranslation) page in the Azure portal to complete your project details. +* [**Single-service**](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextTranslation) resource types enable access to a single service API key and endpoint. -* Navigate directly to the [**Create Cognitive Services**](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) page in the Azure portal to complete your project details. +* [**Multi-service**](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource types enable access to multiple Cognitive Services using a single API key and endpoint. The Cognitive Services resource is currently available for the following services: ->[!TIP] ->If you prefer, you can start on the Azure Portal home page to begin the **Create** process as follows: -> -> 1. Navigate to the [**Azure Portal**](https://portal.azure.com/#home) home page. -> 1. Select ➕**Create a resource** from the Azure services menu. ->1. In the **Search the Marketplace** search box, enter and select **Translator** (single-service resource) or **Cognitive Services** (multi-service resource). *See* [Choose your resource type](#create-your-resource), above. -> 1. Select **Create** and you will be taken to the project details page. ->

                  +> [!TIP] +> Create a Cognitive Services resource if you plan to access multiple cognitive services under a single endpoint/key. For Translator Service access only, create a Translator single-service resource. Please note that you'll need a single-service resource if you intend to use [Azure Active Directory authentication](../../active-directory/authentication/overview-authentication.md). ## Complete your project and instance details diff --git a/articles/cognitive-services/Translator/language-support.md b/articles/cognitive-services/Translator/language-support.md index 4c440b93d52da..9d82944ab18dd 100644 --- a/articles/cognitive-services/Translator/language-support.md +++ b/articles/cognitive-services/Translator/language-support.md @@ -28,24 +28,22 @@ ms.author: lajanuar > [!NOTE] > Language code `pt` will default to `pt-br`, Portuguese (Brazil). -> -> ☼ Indicates the language is not available for scanned PDF document translation. -|Language | Language code | ☼ Cloud – Text Translation and Document Translation | Containers – Text Translation|Custom Translator|Auto Language Detection|Dictionary +|Language | Language code | Cloud – Text Translation and Document Translation | Containers – Text Translation|Custom Translator|Auto Language Detection|Dictionary |:-|:-:|:-:|:-:|:-:|:-:|:-:| | Afrikaans | `af` |✔|✔|✔|✔|✔| | Albanian | `sq` |✔|✔||✔|| -| Amharic ☼ | `am` |✔|✔|||| +| Amharic | `am` |✔|✔|||| | Arabic | `ar` |✔|✔|✔|✔|✔| -| Armenian ☼ | `hy` |✔|✔||✔|| -| Assamese ☼ | `as` |✔|✔|✔||| +| Armenian | `hy` |✔|✔||✔|| +| Assamese | `as` |✔|✔|✔||| | Azerbaijani (Latin) | `az` |✔|✔|||| -| Bangla ☼ | `bn` |✔|✔|✔||✔| -| Bashkir ☼ | `ba` |✔||||| +| Bangla | `bn` |✔|✔|✔||✔| +| Bashkir | `ba` |✔||||| | Basque | `eu` |✔||||| | Bosnian (Latin) | `bs` |✔|✔|✔||✔| | Bulgarian | `bg` |✔|✔|✔|✔|✔| -| Cantonese (Traditional) ☼ | `yue` |✔|✔|||| +| Cantonese (Traditional) | `yue` |✔|✔|||| | Catalan | `ca` |✔|✔|✔|✔|✔| | Chinese (Literary) | `lzh` |✔||||| | Chinese Simplified | `zh-Hans` |✔|✔|✔|✔|✔| @@ -54,7 +52,7 @@ ms.author: lajanuar | Czech | `cs` |✔|✔|✔|✔|✔| | Danish | `da` |✔|✔|✔|✔|✔| | Dari | `prs` |✔|✔|||| -| Divehi ☼ | `dv` |✔|||✔|| +| Divehi | `dv` |✔|||✔|| | Dutch | `nl` |✔|✔|✔|✔|✔| | English | `en` |✔|✔|✔|✔|✔| | Estonian | `et` |✔|✔|✔|✔|| @@ -65,55 +63,55 @@ ms.author: lajanuar | French | `fr` |✔|✔|✔|✔|✔| | French (Canada) | `fr-ca` |✔|✔|||| | Galician | `gl` |✔||||| -| Georgian ☼ | `ka` |✔|||✔|| +| Georgian | `ka` |✔|||✔|| | German | `de` |✔|✔|✔|✔|✔| -| Greek ☼ | `el` |✔|✔|✔|✔|✔| -| Gujarati ☼ | `gu` |✔|✔|✔|✔|| +| Greek | `el` |✔|✔|✔|✔|✔| +| Gujarati | `gu` |✔|✔|✔|✔|| | Haitian Creole | `ht` |✔|✔||✔|✔| -| Hebrew ☼ | `he` |✔|✔|✔|✔|✔| +| Hebrew | `he` |✔|✔|✔|✔|✔| | Hindi | `hi` |✔|✔|✔|✔|✔| | Hmong Daw (Latin) | `mww` |✔|✔|||✔| | Hungarian | `hu` |✔|✔|✔|✔|✔| | Icelandic | `is` |✔|✔|✔|✔|✔| | Indonesian | `id` |✔|✔|✔|✔|✔| -| Inuinnaqtun ☼ | `ikt` |✔||||| -| Inuktitut ☼ | `iu` |✔|✔|✔|✔|| +| Inuinnaqtun | `ikt` |✔||||| +| Inuktitut | `iu` |✔|✔|✔|✔|| | Inuktitut (Latin) | `iu-Latn` |✔||||| | Irish | `ga` |✔|✔|✔|✔|| | Italian | `it` |✔|✔|✔|✔|✔| | Japanese | `ja` |✔|✔|✔|✔|✔| -| Kannada ☼ | `kn` |✔|✔|✔||| +| Kannada | `kn` |✔|✔|✔||| | Kazakh | `kk` |✔|✔|||| -| Khmer ☼ | `km` |✔|✔||✔|| +| Khmer | `km` |✔|✔||✔|| | Klingon | `tlh-Latn` |✔| ||✔|✔| -| Klingon (plqaD) ☼ | `tlh-Piqd` |✔| ||✔|| +| Klingon (plqaD) | `tlh-Piqd` |✔| ||✔|| | Korean | `ko` |✔|✔|✔|✔|✔| | Kurdish (Central) | `ku` |✔|✔||✔|| -| Kurdish (Northern) ☼ | `kmr` |✔|✔|||| +| Kurdish (Northern) | `kmr` |✔|✔|||| | Kyrgyz (Cyrillic) | `ky` |✔||||| -| Lao ☼ | `lo` |✔|✔||✔|| -| Latvian ☼| `lv` |✔|✔|✔|✔|✔| +| Lao | `lo` |✔|✔||✔|| +| Latvian | `lv` |✔|✔|✔|✔|✔| | Lithuanian | `lt` |✔|✔|✔|✔|✔| -| Macedonian ☼ | `mk` |✔|||✔|| -| Malagasy ☼ | `mg` |✔|✔|✔||| +| Macedonian | `mk` |✔|||✔|| +| Malagasy | `mg` |✔|✔|✔||| | Malay (Latin) | `ms` |✔|✔|✔|✔|✔| -| Malayalam ☼ | `ml` |✔|✔|✔||| +| Malayalam | `ml` |✔|✔|✔||| | Maltese | `mt` |✔|✔|✔|✔|✔| | Maori | `mi` |✔|✔|✔||| | Marathi | `mr` |✔|✔|✔||| -| Mongolian (Cyrillic) ☼| `mn-Cyrl` |✔||||| -| Mongolian (Traditional) ☼ | `mn-Mong` |✔|||✔|| -| Myanmar ☼ | `my` |✔|✔||✔|| +| Mongolian (Cyrillic) | `mn-Cyrl` |✔||||| +| Mongolian (Traditional) | `mn-Mong` |✔|||✔|| +| Myanmar | `my` |✔|✔||✔|| | Nepali | `ne` |✔|✔|||| | Norwegian | `nb` |✔|✔|✔|✔|✔| -| Odia ☼ | `or` |✔|✔|✔||| +| Odia | `or` |✔|✔|✔||| | Pashto | `ps` |✔|✔||✔|| | Persian | `fa` |✔|✔|✔|✔|✔| | Polish | `pl` |✔|✔|✔|✔|✔| | Portuguese (Brazil) | `pt` |✔|✔|✔|✔|✔| | Portuguese (Portugal) | `pt-pt` |✔|✔|||| | Punjabi | `pa` |✔|✔|✔||| -| Queretaro Otomi ☼ | `otq` |✔|✔|||| +| Queretaro Otomi | `otq` |✔|✔|||| | Romanian | `ro` |✔|✔|✔|✔|✔| | Russian | `ru` |✔|✔|✔|✔|✔| | Samoan (Latin) | `sm` |✔| |✔||| @@ -125,13 +123,13 @@ ms.author: lajanuar | Spanish | `es` |✔|✔|✔|✔|✔| | Swahili (Latin) | `sw` |✔|✔|✔|✔|✔| | Swedish | `sv` |✔|✔|✔|✔|✔| -| Tahitian ☼ | `ty` |✔| |✔|✔|| -| Tamil ☼ | `ta` |✔|✔|✔||✔| +| Tahitian | `ty` |✔| |✔|✔|| +| Tamil | `ta` |✔|✔|✔||✔| | Tatar (Latin) | `tt` |✔||||| -| Telugu ☼ | `te` |✔|✔|✔||| -| Thai ☼ | `th` |✔| |✔|✔|✔| -| Tibetan ☼ | `bo` |✔|||| -| Tigrinya ☼ | `ti` |✔|✔|||| +| Telugu | `te` |✔|✔|✔||| +| Thai | `th` |✔| |✔|✔|✔| +| Tibetan | `bo` |✔|||| +| Tigrinya | `ti` |✔|✔|||| | Tongan | `to` |✔|✔|✔||| | Turkish | `tr` |✔|✔|✔|✔|✔| | Turkmen (Latin) | `tk` |✔|||| @@ -140,7 +138,7 @@ ms.author: lajanuar | Urdu | `ur` |✔|✔|✔|✔|✔| | Uyghur (Arabic) | `ug` |✔|||| | Uzbek (Latin | `uz` |✔|||✔|| -| Vietnamese ☼ | `vi` |✔|✔|✔|✔|✔| +| Vietnamese | `vi` |✔|✔|✔|✔|✔| | Welsh | `cy` |✔|✔|✔|✔|✔| | Yucatec Maya | `yua` |✔|✔||✔|| | Zulu | `zu` |✔||||| diff --git a/articles/cognitive-services/Translator/toc.yml b/articles/cognitive-services/Translator/toc.yml index 8e373bfaf27c3..4511a116a984b 100644 --- a/articles/cognitive-services/Translator/toc.yml +++ b/articles/cognitive-services/Translator/toc.yml @@ -209,7 +209,7 @@ items: href: custom-translator/sentence-alignment.md - name: Data filtering href: custom-translator/data-filtering.md - - name: Training and model + - name: Training and modeling href: custom-translator/training-and-model.md - name: BLEU score href: custom-translator/what-is-bleu-score.md diff --git a/articles/cognitive-services/big-data/cognitive-services-for-big-data.md b/articles/cognitive-services/big-data/cognitive-services-for-big-data.md index 949fe76ba013d..33fc9e25f9301 100644 --- a/articles/cognitive-services/big-data/cognitive-services-for-big-data.md +++ b/articles/cognitive-services/big-data/cognitive-services-for-big-data.md @@ -31,7 +31,7 @@ Cognitive Services for Big Data can use services from any region in the world, a |Service Name|Service Description| |:-----------|:------------------| |[Computer Vision](../computer-vision/index.yml "Computer Vision")| The Computer Vision service provides you with access to advanced algorithms for processing images and returning information. | -|[Face](../face/index.yml "Face")| The Face service provides access to advanced face algorithms, enabling face attribute detection and recognition. | +|[Face](../computer-vision/index-identity.yml "Face")| The Face service provides access to advanced face algorithms, enabling face attribute detection and recognition. | ### Speech diff --git a/articles/cognitive-services/cognitive-services-and-machine-learning.md b/articles/cognitive-services/cognitive-services-and-machine-learning.md index 6409fce4c5b77..4b4a008813545 100644 --- a/articles/cognitive-services/cognitive-services-and-machine-learning.md +++ b/articles/cognitive-services/cognitive-services-and-machine-learning.md @@ -10,7 +10,7 @@ ms.date: 10/28/2021 --- # Cognitive Services and machine learning -Cognitive Services provides machine learning capabilities to solve general problems such as analyzing text for emotional sentiment or analyzing images to recognize objects or faces. You don't need special machine learning or data science knowledge to use these services. +Cognitive Services provides machine learning capabilities to solve general problems such as analyzing text for emotional sentiment or analyzing images to recognize objects or faces. You don't need special machine learning or data science knowledge to use these services. [Cognitive Services](./what-are-cognitive-services.md) is a group of services, each supporting different, generalized prediction capabilities. The services are divided into different categories to help you find the right service. @@ -25,9 +25,9 @@ Cognitive Services provides machine learning capabilities to solve general probl Use Cognitive Services when you: * Can use a generalized solution. -* Access solution from a programming REST API or SDK. +* Access solution from a programming REST API or SDK. -Use another machine-learning solution when you: +Use other machine-learning solutions when you: * Need to choose the algorithm and need to train on very specific data. @@ -50,7 +50,7 @@ Both have the end-goal of applying artificial intelligence (AI) to enhance busin Generally, the audiences are different: * Cognitive Services are for developers without machine-learning experience. -* Azure Machine Learning is tailored for data scientists. +* Azure Machine Learning is tailored for data scientists. ## How is a Cognitive Service different from machine learning? diff --git a/articles/cognitive-services/cognitive-services-apis-create-account-cli.md b/articles/cognitive-services/cognitive-services-apis-create-account-cli.md index a752053f0289d..0da7734302831 100644 --- a/articles/cognitive-services/cognitive-services-apis-create-account-cli.md +++ b/articles/cognitive-services/cognitive-services-apis-create-account-cli.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services keywords: cognitive services, cognitive intelligence, cognitive solutions, ai services ms.topic: quickstart -ms.date: 03/02/2022 +ms.date: 06/06/2022 ms.author: aahi ms.custom: mode-api, devx-track-azurecli ms.devlang: azurecli @@ -16,13 +16,11 @@ ms.devlang: azurecli # Quickstart: Create a Cognitive Services resource using the Azure CLI -Use this quickstart to get started with Azure Cognitive Services using [Azure Command-Line Interface (CLI)](/cli/azure/install-azure-cli) commands. +Use this quickstart to create a Cognitive Services resource using [Azure Command-Line Interface (CLI)](/cli/azure/install-azure-cli) commands. After creating the resource, use the keys and endpoint generated for you to authenticate your applications. -Azure Cognitive Services are cloud-based services with REST APIs, and client library SDKs available to help developers build cognitive intelligence into applications without having direct artificial intelligence (AI) or data science skills or knowledge. Azure Cognitive Services enables developers to easily add cognitive features into their applications with cognitive solutions that can see, hear, speak, understand, and even begin to reason. +Azure Cognitive Services is a cloud-based service with REST APIs, and client library SDKs available to help developers build cognitive intelligence into applications without having direct artificial intelligence (AI) or data science skills or knowledge. Azure Cognitive Services enables developers to easily add cognitive features into their applications with cognitive solutions that can see, hear, speak, understand, and even begin to reason. -Cognitive Services are represented by Azure [resources](../azure-resource-manager/management/manage-resources-portal.md) that you create in your Azure subscription. After creating the resource, Use the keys and endpoint generated for you to authenticate your applications. - -In this quickstart, you'll learn how to sign up for Azure Cognitive Services and create an account that has a single-service or multi-service subscription via the [Azure CLI](/cli/azure/install-azure-cli). These services are represented by Azure [resources](../azure-resource-manager/management/manage-resources-portal.md), which enable you to connect to one or more of the Azure Cognitive Services APIs. +## Types of Cognitive Services resource [!INCLUDE [cognitive-services-subscription-types](../../includes/cognitive-services-subscription-types.md)] @@ -45,7 +43,7 @@ You can also use the green **Try It** button to run these commands in your brows ## Create a new Azure Cognitive Services resource group -Before creating a Cognitive Services resource, you must have an Azure resource group to contain the resource. When you create a new resource, you have the option to either create a new resource group, or use an existing one. This article shows how to create a new resource group. +Before creating a Cognitive Services resource, you must have an Azure resource group to contain the resource. When you create a new resource, you can either create a new resource group, or use an existing one. This article shows how to create a new resource group. ### Choose your resource group location @@ -75,7 +73,7 @@ az group create \ ### Choose a cognitive service and pricing tier -When creating a new resource, you will need to know the "kind" of service you want to use, along with the [pricing tier](https://azure.microsoft.com/pricing/details/cognitive-services/) (or sku) you want. You will use this and other information as parameters when creating the resource. +When creating a new resource, you'll need to know the "kind" of service you want to use, along with the [pricing tier](https://azure.microsoft.com/pricing/details/cognitive-services/) (or sku) you want. You'll use this and other information as parameters when creating the resource. [!INCLUDE [cognitive-services-subscription-types](../../includes/cognitive-services-subscription-types.md)] @@ -90,7 +88,7 @@ az cognitiveservices account list-kinds ### Add a new resource to your resource group -To create and subscribe to a new Cognitive Services resource, use the [az cognitiveservices account create](/cli/azure/cognitiveservices/account#az-cognitiveservices-account-create) command. This command adds a new billable resource to the resource group created earlier. When creating your new resource, you will need to know the "kind" of service you want to use, along with its pricing tier (or sku) and an Azure location: +To create and subscribe to a new Cognitive Services resource, use the [az cognitiveservices account create](/cli/azure/cognitiveservices/account#az-cognitiveservices-account-create) command. This command adds a new billable resource to the resource group created earlier. When creating your new resource, you'll need to know the "kind" of service you want to use, along with its pricing tier (or sku) and an Azure location: You can create an F0 (free) resource for Anomaly Detector, named `anomaly-detector-resource` with the command below. diff --git a/articles/cognitive-services/cognitive-services-apis-create-account.md b/articles/cognitive-services/cognitive-services-apis-create-account.md index 0a33766c2749e..eeba3444f8cda 100644 --- a/articles/cognitive-services/cognitive-services-apis-create-account.md +++ b/articles/cognitive-services/cognitive-services-apis-create-account.md @@ -8,16 +8,18 @@ manager: nitinme keywords: cognitive services, cognitive intelligence, cognitive solutions, ai services ms.service: cognitive-services ms.topic: conceptual -ms.date: 03/03/2022 +ms.date: 06/06/2022 ms.author: aahi --- # Quickstart: Create a Cognitive Services resource using the Azure portal -Use this quickstart to start using Azure Cognitive Services. After creating a Cognitive Service resource in the Azure portal, you'll get an endpoint and a key for authenticating your applications. +Use this quickstart to create a Cognitive Services resource. After you create a Cognitive Service resource in the Azure portal , you'll get an endpoint and a key for authenticating your applications. Azure Cognitive Services are cloud-based services with REST APIs, and client library SDKs available to help developers build cognitive intelligence into applications without having direct artificial intelligence (AI) or data science skills or knowledge. Azure Cognitive Services enables developers to easily add cognitive features into their applications with cognitive solutions that can see, hear, speak, understand, and even begin to reason. +## Types of Cognitive Services resource + [!INCLUDE [cognitive-services-subscription-types](../../includes/cognitive-services-subscription-types.md)] ## Prerequisites @@ -25,7 +27,6 @@ Azure Cognitive Services are cloud-based services with REST APIs, and client lib * A valid Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/). * [!INCLUDE [contributor-requirement](./includes/quickstarts/contributor-requirement.md)] - ## Create a new Azure Cognitive Services resource ### [Multi-service](#tab/multiservice) @@ -110,10 +111,11 @@ The multi-service resource is named **Cognitive Services** in the portal. The mu :::image type="content" source="media/cognitive-services-apis-create-account/cognitive-services-resource-deployed.png" alt-text="Get resource keys screen"::: 1. From the quickstart pane that opens, you can access the resource endpoint and manage keys. - + [!INCLUDE [cognitive-services-environment-variables](../../includes/cognitive-services-environment-variables.md)] @@ -122,8 +124,9 @@ The multi-service resource is named **Cognitive Services** in the portal. The mu If you want to clean up and remove a Cognitive Services subscription, you can delete the resource or resource group. Deleting the resource group also deletes any other resources contained in the group. 1. In the Azure portal, expand the menu on the left side to open the menu of services, and choose **Resource Groups** to display the list of your resource groups. -1. Locate the resource group containing the resource to be deleted -1. Right-click on the resource group listing. Select **Delete resource group**, and confirm. +1. Locate the resource group containing the resource to be deleted. +1. If you want to delete the entire resource group, select the resource group name. On the next page, Select **Delete resource group**, and confirm. +1. If you want to delete only the Cognitive Service resource, select the resource group to see all the resources within it. On the next page, select the resource that you want to delete, click the ellipsis menu for that row, and select **Delete**. If you need to recover a deleted resource, see [Recover deleted Cognitive Services resources](manage-resources.md). diff --git a/articles/cognitive-services/cognitive-services-security.md b/articles/cognitive-services/cognitive-services-security.md index 0f2c79dec4bed..d41c112da2cb0 100644 --- a/articles/cognitive-services/cognitive-services-security.md +++ b/articles/cognitive-services/cognitive-services-security.md @@ -197,6 +197,10 @@ NSString* value = Customer Lockbox is available for this service: * Translator +* Conversational language understanding +* Custom text classification +* Custom named entity recognition +* Orchestration workflow For the following services, Microsoft engineers will not access any customer data in the E0 tier: diff --git a/articles/cognitive-services/cognitive-services-virtual-networks.md b/articles/cognitive-services/cognitive-services-virtual-networks.md index 12faa208ae137..bf214bf772ae6 100644 --- a/articles/cognitive-services/cognitive-services-virtual-networks.md +++ b/articles/cognitive-services/cognitive-services-virtual-networks.md @@ -52,7 +52,7 @@ Virtual networks (VNETs) are supported in [regions where Cognitive Services are > [!NOTE] -> If you're using LUIS or Speech Services, the **CognitiveServicesManagement** tag only enables you use the service using the SDK or REST API. To access and use LUIS portal and/or Speech Studio from a virtual network, you will need to use the following tags: +> If you're using LUIS, Speech Services, or Language services, the **CognitiveServicesManagement** tag only enables you use the service using the SDK or REST API. To access and use LUIS portal , Speech Studio or Language Studio from a virtual network, you will need to use the following tags: > * **AzureActiveDirectory** > * **AzureFrontDoor.Frontend** > * **AzureResourceManager** diff --git a/articles/cognitive-services/containers/includes/create-container-instances-resource-from-azure-cli.md b/articles/cognitive-services/containers/includes/create-container-instances-resource-from-azure-cli.md index 359baf82f0fec..7170c955acff4 100644 --- a/articles/cognitive-services/containers/includes/create-container-instances-resource-from-azure-cli.md +++ b/articles/cognitive-services/containers/includes/create-container-instances-resource-from-azure-cli.md @@ -9,6 +9,7 @@ ms.service: cognitive-services ms.topic: include ms.date: 04/01/2020 ms.author: aahi +ms.tool: azure-cli --- ## Create an Azure Container Instance resource from the Azure CLI diff --git a/articles/cognitive-services/create-account-bicep.md b/articles/cognitive-services/create-account-bicep.md index f7bdb20fc7b90..cf6bbe30e8503 100644 --- a/articles/cognitive-services/create-account-bicep.md +++ b/articles/cognitive-services/create-account-bicep.md @@ -13,18 +13,20 @@ ms.custom: subject-armqs, mode-arm # Quickstart: Create a Cognitive Services resource using Bicep -This quickstart describes how to use Bicep to create Cognitive Services. +Follow this quickstart to create Cognitive Services resource using Bicep. Azure Cognitive Services are cloud-base services with REST APIs, and client library SDKs available to help developers build cognitive intelligence into applications without having direct artificial intelligence (AI) or data science skills or knowledge. Azure Cognitive Services enables developers to easily add cognitive features into their applications with cognitive solutions that can see, hear, speak, understand, and even begin to reason. -Create a resource using Bicep. This multi-service resource lets you: +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Things to consider + +Using Bicep to create a Cognitive Service resource lets you create a multi-service resource. This enables you to: * Access multiple Azure Cognitive Services with a single key and endpoint. * Consolidate billing from the services you use. * [!INCLUDE [terms-azure-portal](./includes/quickstarts/terms-azure-portal.md)] -[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] - ## Prerequisites * If you don't have an Azure subscription, [create one for free](https://azure.microsoft.com/free/cognitive-services). diff --git a/articles/cognitive-services/includes/register-resource-subscription.md b/articles/cognitive-services/includes/register-resource-subscription.md index 5858297b9699e..b2d74681446f2 100644 --- a/articles/cognitive-services/includes/register-resource-subscription.md +++ b/articles/cognitive-services/includes/register-resource-subscription.md @@ -2,8 +2,8 @@ title: include file description: include file ms.topic: include -ms.date: 07/27/2020 +ms.date: 05/25/2022 --- > [!Tip] -> If your subscription doesn't allow you to create a Cognitive Service resource, you may need to enable that ability of the [Azure resource provider](../../azure-resource-manager/management/resource-providers-and-types.md#azure-portal) with the [Azure portal](../../azure-resource-manager/management/resource-providers-and-types.md#azure-powershell), [PowerShell command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-powershell) or an [Azure CLI command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-cli). If you are not the subscription owner, ask the _Subscription Owner_ or someone with a role of _admin_ to complete the registration for you or ask for the /register/action privileges granted to your account. \ No newline at end of file +> If your subscription doesn't allow you to create a Cognitive Service resource, you may need to enable that ability of the [Azure resource provider](../../azure-resource-manager/management/resource-providers-and-types.md#register-resource-provider) with the [Azure portal](../../azure-resource-manager/management/resource-providers-and-types.md#azure-portal), [PowerShell command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-powershell) or an [Azure CLI command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-cli). If you are not the subscription owner, ask the _Subscription Owner_ or someone with a role of _admin_ to complete the registration for you or ask for the /register/action privileges granted to your account. \ No newline at end of file diff --git a/articles/cognitive-services/index.yml b/articles/cognitive-services/index.yml index 102bc91e96092..0c215622b54ef 100644 --- a/articles/cognitive-services/index.yml +++ b/articles/cognitive-services/index.yml @@ -55,9 +55,9 @@ conceptualContent: - url: ./custom-vision-service/index.yml itemType: overview text: Custom Vision - - url: ./face/index.yml + - url: ./computer-vision/index-identity.yml itemType: overview - text: Face + text: Identity - title: Language summary: Allow your apps to process natural language with pre-built scripts, evaluate sentiment and learn how to recognize what users want. links: diff --git a/articles/cognitive-services/language-service/concepts/data-limits.md b/articles/cognitive-services/language-service/concepts/data-limits.md index 22250eaeaf4a7..e1a48d546d1af 100644 --- a/articles/cognitive-services/language-service/concepts/data-limits.md +++ b/articles/cognitive-services/language-service/concepts/data-limits.md @@ -9,7 +9,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: conceptual -ms.date: 02/25/2022 +ms.date: 06/02/2022 ms.author: aahi --- @@ -82,7 +82,7 @@ Exceeding the following document limits will generate an HTTP 400 error code. | Key Phrase Extraction | 10 | | Named Entity Recognition (NER) | 5 | | Personally Identifying Information (PII) detection | 5 | -| Text summarization | 25 | +| Document summarization | 25 | | Entity Linking | 5 | | Text Analytics for health | 10 for the web-based API, 1000 for the container. | diff --git a/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md b/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md new file mode 100644 index 0000000000000..3c87343ec684a --- /dev/null +++ b/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md @@ -0,0 +1,72 @@ +--- +title: Language service encryption of data at rest +description: Learn how the Language service encrypts your data when it's persisted to the cloud. +titleSuffix: Azure Cognitive Services +author: aahill +manager: nitinme +ms.service: cognitive-services +ms.topic: conceptual +ms.date: 05/24/2022 +ms.author: aahi +#Customer intent: As a user of the Language service, I want to learn how encryption at rest works. +--- + +# Language services encryption of data at rest + +The Language services automatically encrypt your data when it is persisted to the cloud. The Language services encryption protects your data and helps you meet your organizational security and compliance commitments. + +## About Cognitive Services encryption + +Data is encrypted and decrypted using [FIPS 140-2](https://en.wikipedia.org/wiki/FIPS_140-2) compliant [256-bit AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard) encryption. Encryption and decryption are transparent, meaning encryption and access are managed for you. Your data is secure by default and you don't need to modify your code or applications to take advantage of encryption. + +## About encryption key management + +By default, your subscription uses Microsoft-managed encryption keys. There is also the option to manage your subscription with your own keys called customer-managed keys (CMK). CMK offers greater flexibility to create, rotate, disable, and revoke access controls. You can also audit the encryption keys used to protect your data. + +## Customer-managed keys with Azure Key Vault + +There is also an option to manage your subscription with your own keys. Customer-managed keys (CMK), also known as Bring your own key (BYOK), offer greater flexibility to create, rotate, disable, and revoke access controls. You can also audit the encryption keys used to protect your data. + +You must use Azure Key Vault to store your customer-managed keys. You can either create your own keys and store them in a key vault, or you can use the Azure Key Vault APIs to generate keys. The Cognitive Services resource and the key vault must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. For more information about Azure Key Vault, see [What is Azure Key Vault?](../../../key-vault/general/overview.md). + +### Customer-managed keys for Language services + +To request the ability to use customer-managed keys, fill out and submit the [Language Service Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk). It will take approximately 3-5 business days to hear back on the status of your request. Depending on demand, you may be placed in a queue and approved as space becomes available. Once approved for using CMK with Language services, you'll need to create a new Language resource from the Azure portal. + + +### Enable customer-managed keys + +A new Cognitive Services resource is always encrypted using Microsoft-managed keys. It's not possible to enable customer-managed keys at the time that the resource is created. Customer-managed keys are stored in Azure Key Vault, and the key vault must be provisioned with access policies that grant key permissions to the managed identity that is associated with the Cognitive Services resource. The managed identity is available only after the resource is created using the Pricing Tier for CMK. + +To learn how to use customer-managed keys with Azure Key Vault for Cognitive Services encryption, see: + +- [Configure customer-managed keys with Key Vault for Cognitive Services encryption from the Azure portal](../../encryption/cognitive-services-encryption-keys-portal.md) + +Enabling customer managed keys will also enable a system assigned managed identity, a feature of Azure AD. Once the system assigned managed identity is enabled, this resource will be registered with Azure Active Directory. After being registered, the managed identity will be given access to the Key Vault selected during customer managed key setup. You can learn more about [Managed Identities](../../../active-directory/managed-identities-azure-resources/overview.md). + +> [!IMPORTANT] +> If you disable system assigned managed identities, access to the key vault will be removed and any data encrypted with the customer keys will no longer be accessible. Any features depended on this data will stop working. + +> [!IMPORTANT] +> Managed identities do not currently support cross-directory scenarios. When you configure customer-managed keys in the Azure portal, a managed identity is automatically assigned under the covers. If you subsequently move the subscription, resource group, or resource from one Azure AD directory to another, the managed identity associated with the resource is not transferred to the new tenant, so customer-managed keys may no longer work. For more information, see **Transferring a subscription between Azure AD directories** in [FAQs and known issues with managed identities for Azure resources](../../../active-directory/managed-identities-azure-resources/known-issues.md#transferring-a-subscription-between-azure-ad-directories). + +### Store customer-managed keys in Azure Key Vault + +To enable customer-managed keys, you must use an Azure Key Vault to store your keys. You must enable both the **Soft Delete** and **Do Not Purge** properties on the key vault. + +Only RSA keys of size 2048 are supported with Cognitive Services encryption. For more information about keys, see **Key Vault keys** in [About Azure Key Vault keys, secrets and certificates](../../../key-vault/general/about-keys-secrets-certificates.md). + +### Rotate customer-managed keys + +You can rotate a customer-managed key in Azure Key Vault according to your compliance policies. When the key is rotated, you must update the Cognitive Services resource to use the new key URI. To learn how to update the resource to use a new version of the key in the Azure portal, see the section titled **Update the key version** in [Configure customer-managed keys for Cognitive Services by using the Azure portal](../../encryption/cognitive-services-encryption-keys-portal.md). + +Rotating the key does not trigger re-encryption of data in the resource. There is no further action required from the user. + +### Revoke access to customer-managed keys + +To revoke access to customer-managed keys, use PowerShell or Azure CLI. For more information, see [Azure Key Vault PowerShell](/powershell/module/az.keyvault//) or [Azure Key Vault CLI](/cli/azure/keyvault). Revoking access effectively blocks access to all data in the Cognitive Services resource, as the encryption key is inaccessible by Cognitive Services. + +## Next steps + +* [Language Service Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk) +* [Learn more about Azure Key Vault](../../../key-vault/general/overview.md) \ No newline at end of file diff --git a/articles/cognitive-services/language-service/concepts/model-lifecycle.md b/articles/cognitive-services/language-service/concepts/model-lifecycle.md index db56a37fca3d2..1ac568f08ec0f 100644 --- a/articles/cognitive-services/language-service/concepts/model-lifecycle.md +++ b/articles/cognitive-services/language-service/concepts/model-lifecycle.md @@ -9,7 +9,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: conceptual -ms.date: 05/09/2022 +ms.date: 05/25/2022 ms.author: aahi --- @@ -44,6 +44,8 @@ For asynchronous endpoints, use the `model-version` property in the request body The model-version used in your API request will be included in the response object. +> [!NOTE] +> If you are using an model version that is not listed in the table, then it was subjected to the expiration policy. Use the table below to find which model versions are supported by each feature: @@ -59,8 +61,8 @@ Use the table below to find which model versions are supported by each feature: | Question answering | `2021-10-01` | `2021-10-01` | | | Text Analytics for health | `2021-05-15`, `2022-03-01` | `2022-03-01` | | | Key phrase extraction | `2021-06-01` | `2021-06-01` | | -| Text summarization | `2021-08-01` | `2021-08-01` | | - +| Document summarization (preview) | `2021-08-01` | | `2021-08-01` | +| Conversation summarization (preview) | `2022-05-15-preview` | | `2022-05-15-preview` | ## Custom features @@ -99,10 +101,10 @@ Use the table below to find which API versions are supported by each feature: | Feature | Supported versions | Latest Generally Available version | Latest preview version | |-----------------------------------------------------|---------------------------------------------------------------------|------------------------------------|------------------------| -| Custom text classification | `2022-03-01-preview` | | `2022-03-01-preview` | -| Conversational language understanding | `2022-03-01-preview` | | `2022-03-01-preview` | -| Custom named entity recognition | `2022-03-01-preview` | | `2022-03-01-preview` | -| Orchestration workflow | `2022-03-01-preview` | | `2022-03-01-preview` | +| Custom text classification | `2022-05-01` ,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | +| Conversational language understanding | `2022-05-01` ,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | +| Custom named entity recognition | `2022-05-01` ,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | +| Orchestration workflow | `2022-05-01`,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | ## Next steps diff --git a/articles/cognitive-services/language-service/concepts/use-asynchronously.md b/articles/cognitive-services/language-service/concepts/use-asynchronously.md index 5fb6bcdb15f7a..f5d87a5b76e0c 100644 --- a/articles/cognitive-services/language-service/concepts/use-asynchronously.md +++ b/articles/cognitive-services/language-service/concepts/use-asynchronously.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 12/03/2021 +ms.date: 05/27/2022 ms.author: aahi --- @@ -18,11 +18,12 @@ The Language service enables you to send API requests asynchronously, using eith Currently, the following features are available to be used asynchronously: * Entity linking -* Extractive summarization +* Document summarization +* Conversation summarization * Key phrase extraction * Language detection * Named Entity Recognition (NER) -* Personally Identifiable Information (PII) detection +* Customer content detection * Sentiment analysis and opinion mining * Text Analytics for health diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/concepts/entity-components.md b/articles/cognitive-services/language-service/conversational-language-understanding/concepts/entity-components.md index 3868e8c35e7ed..0630f14b5c053 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/concepts/entity-components.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/concepts/entity-components.md @@ -31,6 +31,8 @@ The learned component uses the entity tags you label your utterances with to tra The list component represents a fixed, closed set of related words along with their synonyms. The component performs an exact text match against the list of values you provide as synonyms. Each synonym belongs to a "list key", which can be used as the normalized, standard value for the synonym that will return in the output if the list component is matched. List keys are **not** used for matching. +In multilingual projects, you can specify a different set of synonyms for each language. While using the prediction API, you can specify the language in the input request, which will only match the synonyms associated to that language. + :::image type="content" source="../media/list-component.png" alt-text="A screenshot showing an example of list components for entities." lightbox="../media/list-component.png"::: @@ -46,7 +48,7 @@ The prebuilt component allows you to select from a library of common types such When multiple components are defined for an entity, their predictions may overlap. When an overlap occurs, each entity's final prediction is determined by one of the following options. -### Combine option +### Combine components Combine components as one entity when they overlap by taking the union of all the components. @@ -56,22 +58,22 @@ Use this to combine all components when they overlap. When components are combin Suppose you have an entity called Software that has a list component, which contains “Proseware OS” as an entry. In your utterance data, you have “I want to buy Proseware OS 9” with “Proseware OS 9” tagged as Software: -:::image type="content" source="../media/require-exact-overlap-example-1.svg" alt-text="A screenshot showing an example of exact overlap results for components." lightbox="../media/require-exact-overlap-example-1.svg"::: +:::image type="content" source="../media/union-overlap-example-1.svg" alt-text="A screenshot showing a learned and list entity overlapped." lightbox="../media/union-overlap-example-1.svg"::: By using combine components, the entity will return with the full context as “Proseware OS 9” along with the key from the list component: -:::image type="content" source="../media/require-exact-overlap-example-1.svg" alt-text="A screenshot showing an example of exact overlap results for components." lightbox="../media/require-exact-overlap-example-1.svg"::: +:::image type="content" source="../media/union-overlap-example-1-part-2.svg" alt-text="A screenshot showing the result of a combined component." lightbox="../media/union-overlap-example-1-part-2.svg"::: Suppose you had the same utterance but only “OS 9” was predicted by the learned component: -:::image type="content" source="../media/require-exact-overlap-example-1.svg" alt-text="A screenshot showing an example of exact overlap results for components." lightbox="../media/require-exact-overlap-example-1.svg"::: +:::image type="content" source="../media/union-overlap-example-2.svg" alt-text="A screenshot showing an utterance with O S 9 predicted by the learned component." lightbox="../media/union-overlap-example-2.svg"::: With combine components, the entity will still return as “Proseware OS 9” with the key from the list component: -:::image type="content" source="../media/require-exact-overlap-example-1.svg" alt-text="A screenshot showing an example of exact overlap results for components." lightbox="../media/require-exact-overlap-example-1.svg"::: +:::image type="content" source="../media/union-overlap-example-2-part-2.svg" alt-text="A screenshot showing the returned software entity." lightbox="../media/union-overlap-example-2-part-2.svg"::: -### Do not combine option +### Do not combine components Each overlapping component will return as a separate instance of the entity. Apply your own logic after prediction with this option. @@ -79,11 +81,25 @@ Each overlapping component will return as a separate instance of the entity. App Suppose you have an entity called Software that has a list component, which contains “Proseware Desktop” as an entry. In your utterance data, you have “I want to buy Proseware Desktop Pro” with “Proseware Desktop Pro” tagged as Software: -:::image type="content" source="../media/require-exact-overlap-example-1.svg" alt-text="A screenshot showing an example of exact overlap results for components." lightbox="../media/require-exact-overlap-example-1.svg"::: +:::image type="content" source="../media/separated-overlap-example-1.svg" alt-text="A screenshot showing an example of a learned and list entity overlapped." lightbox="../media/separated-overlap-example-1.svg"::: When you do not combine components, the entity will return twice: -:::image type="content" source="../media/require-exact-overlap-example-1.svg" alt-text="A screenshot showing an example of exact overlap results for components." lightbox="../media/require-exact-overlap-example-1.svg"::: +:::image type="content" source="../media/separated-overlap-example-1-part-2.svg" alt-text="A screenshot showing the entity returned twice." lightbox="../media/separated-overlap-example-1-part-2.svg"::: + + +> [!NOTE] +> During public preview of the service, there were 4 available options: **Longest overlap**, **Exact overlap**, **Union overlap**, and **Return all separately**. **Longest overlap** and **exact overlap** are deprecated and will only be supported for projects that previously had those options selected. **Union overlap** has been renamed to **Combine components**, while **Return all separately** has been renamed to **Do not combine components**. + +## How to use components and options + +Components give you the flexibility to define your entity in more than one way. When you combine components, you make sure that each component is represented and you reduce the number of entities returned in your predictions. + +A common practice is to extend a prebuilt component with a list of values that the prebuilt might not support. For example, if you have an **Organization** entity, which has a _General.Organization_ prebuilt component added to it, the entity may not predict all the organizations specific to your domain. You can use a list component to extend the values of the Organization entity and thereby extending the prebuilt with your own organizations. + +Other times you may be interested in extracting an entity through context such as a **Product** in a retail project. You would label for the learned component of the product to learn _where_ a product is based on its position within the sentence. You may also have a list of products that you already know before hand that you'd like to always extract. Combining both components in one entity allows you to get both options for the entity. + +When you do not combine components, you simply allow every component to act as an independent entity extractor. One way of using this option is to separate the entities extracted from a list to the ones extracted through the learned or prebuilt components to handle and treat them differently. ## Next steps diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/faq.md b/articles/cognitive-services/language-service/conversational-language-understanding/faq.md index fc80815c11902..9d35f9783ad79 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/faq.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/faq.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: quickstart -ms.date: 05/23/2022 +ms.date: 05/31/2022 ms.author: aahi ms.custom: ignite-fall-2021, mode-other --- @@ -21,9 +21,14 @@ Use this article to quickly get the answers to common questions about conversati See the [quickstart](./quickstart.md) to quickly create your first project, or the [how-to article](./how-to/create-project.md) for more details. -## How do I connect conversation language projects to other service applications? -See the [orchestration workflow documentation](../orchestration-workflow/overview.md) for more information. +## Can I use more than one conversational language understanding project together? + +Yes, using orchestration workflow. See the [orchestration workflow documentation](../orchestration-workflow/overview.md) for more information. + +## What is the difference between LUIS and conversational language understanding? + +Conversational language understanding is the next generation of LUIS. ## Training is taking a long time, is this expected? @@ -61,13 +66,31 @@ Yes, you can [import any LUIS application](./concepts/backwards-compatibility.md No, the service only supports JSON format. You can go to LUIS, import the `.LU` file and export it as a JSON file. +## Can I use conversational language understanding with custom question answering? + +Yes, you can use [orchestration workflow](../orchestration-workflow/overview.md) to orchestrate between different conversational language understanding and [question answering](../question-answering/overview.md) projects. Start by creating orchestration workflow projects, then connect your conversational language understanding and custom question answering projects. To perform this action, make sure that your projects are under the same Language resource. + +## How do I handle out of scope or domain utterances that aren't relevant to my intents? + +Add any out of scope utterances to the [none intent](./concepts/none-intent.md). + +## How do I control the none intent? + +You can control the none intent threshhold from UI through the project settings, by changing the none inten threshold value. The values can be between 0.0 and 1.0. Also, you can change this threshold from the APIs by changing the *confidenceThreshold* in settings object. Learn more about [none intent](./concepts/none-intent.md#none-score-threshold) + ## Is there any SDK support? -Yes, only for predictions, and [samples are available](https://aka.ms/cluSampleCode). There is currently no authoring support for the SDK. +Yes, only for predictions, and samples are available for [Python](https://aka.ms/sdk-samples-conversation-python) and [C#](https://aka.ms/sdk-sample-conversation-dot-net). There is currently no authoring support for the SDK. + +## What are the training modes? + -## Can I connect to Orchestration workflow projects? +|Training mode | Description | Language availability | Pricing | +|---------|---------|---------|---------| +|Standard training | Faster training times for quicker model iteration. | Can only train projects in English. | Included in your [pricing tier](https://azure.microsoft.com/pricing/details/cognitive-services/language-service/). | +|Advanced training | Slower training times using fine-tuned neural network transformer models. | Can train [multilingual projects](language-support.md#multi-lingual-option). | May incur [additional charges](https://azure.microsoft.com/pricing/details/cognitive-services/language-service/). -Yes, you can connect your CLU project in orchestration workflow. All you need is to make sure that both projects are under the same Language resource +See [training modes](how-to/train-model.md#training-modes) for more information. ## Are there APIs for this feature? diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md b/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md index b25fff1aeff4c..041145de67da9 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md @@ -21,7 +21,7 @@ Use this article to learn about some of the definitions and terms you may encoun Entities are words in utterances that describe information used to fulfill or identify an intent. If your entity is complex and you would like your model to identify specific parts, you can break your model into subentities. For example, you might want your model to predict an address, but also the subentities of street, city, state, and zipcode. ## F1 score -The F1 score is a function of Precision and Recall. It's needed when you seek a balance between [precision](#precision) and [recall](#recall]. +The F1 score is a function of Precision and Recall. It's needed when you seek a balance between [precision](#precision) and [recall](#recall). ## Intent An intent represents a task or action the user wants to perform. It's a purpose or goal expressed in a user's input, such as booking a flight, or paying a bill. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md index 8cd146049f46a..71862d7376fc4 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md @@ -43,7 +43,7 @@ They might create an intent to represent each of these actions. They might also To build a project schema within [Language Studio](https://aka.ms/languageStudio): -1. Select **Build schema** from the left side menu. +1. Select **Schema definition** from the left side menu. 2. From the top pivots, you can change the view to be **Intents** or **Entities**. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md index aaf42234384ba..f454fead71bd7 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/13/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-clu, ignite-fall-2021 --- @@ -20,7 +20,7 @@ You can query the deployment programmatically through the [prediction API](https ## Test deployed model -You can use the Language Studio to submit an utterance, get predictions and visualize the results. +You can use Language Studio to submit an utterance, get predictions and visualize the results. [!INCLUDE [Test model](../includes/language-studio/test-model.md)] @@ -68,8 +68,8 @@ You can also use the client libraries provided by the Azure SDK to send requests |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [1.0.0-beta.3 ](https://www.nuget.org/packages/Azure.AI.Language.Conversations/1.0.0-beta.3) | + |Python | [1.1.0b1](https://pypi.org/project/azure-ai-language-conversations/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/create-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/create-project.md index 3a223c645a4b1..6d9ec21834cd5 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/create-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/create-project.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/13/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-clu, ignite-fall-2021 --- @@ -34,7 +34,7 @@ Before you start using CLU, you will need an Azure Language resource. [!INCLUDE [create a new resource from the Azure portal](../includes/resource-creation-azure-portal.md)] -[!INCLUDE [create a new resource from the Language Studio](../includes/resource-creation-language-studio.md)] +[!INCLUDE [create a new resource from Language Studio](../includes/resource-creation-language-studio.md)] ## Sign in to Language Studio diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md index 0262e5405ddd3..b5eeadc6c7d1a 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 04/26/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-clu, ignite-fall-2021 --- @@ -87,11 +87,11 @@ Use the following steps to label your utterances: * *Unique utterances per labeled entity* where each utterance is counted if it contains at least one labeled instance of this entity. * *Utterances per intent* where you can view count of utterances per intent. -:::image type="content" source="../media/label-distribution.png" alt-text="A screenshot showing entity distribution in the Language Studio." lightbox="../media/label-distribution.png"::: +:::image type="content" source="../media/label-distribution.png" alt-text="A screenshot showing entity distribution in Language Studio." lightbox="../media/label-distribution.png"::: > [!NOTE] - > list and prebuilt components are not shown in the tag utterances page, and all labels here only apply to the **learned component**. + > list and prebuilt components are not shown in the data labeling page, and all labels here only apply to the **learned component**. To remove a label: 1. From within your utterance, select the entity you want to remove a label from. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md index 6a1978f8a2d92..e14168fd48900 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md @@ -33,6 +33,9 @@ See the [project development lifecycle](../overview.md#project-development-lifec ### [Language studio](#tab/Language-studio) +> [!Note] +> The results here are for the machine learning entity component only. + In the **view model details** page, you'll be able to see all your models, with their current training status, and the date they were last trained. [!INCLUDE [Model performance](../includes/language-studio/model-performance.md)] diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md index 5d36ed8b51055..a86367f82a337 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md @@ -12,7 +12,7 @@ ms.author: aahi To delete your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **View model details** from the left side menu. +1. Select **Model performance** from the left side menu. 2. Click on the **model name** you want to delete and click **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md index 9e89615901325..4c3f82d6ea90c 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md @@ -12,9 +12,9 @@ ms.author: aahi To deploy your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Deploy model** from the left side menu. +1. Select **Deploying a model** from the left side menu. -2. Click on **Start deployment job** to start a new deployment job. +2. Click on **Add deployment** to start a new deployment job. :::image type="content" source="../../media/add-deployment-model.png" alt-text="A screenshot showing the model deployment button in Language Studio." lightbox="../../media/add-deployment-model.png"::: diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md index fae7a0e0ee196..18f0ed8fa0a40 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md @@ -15,12 +15,10 @@ ms.custom: language-service-clu 1. After the deployment job is completed successfully, select the deployment you want to use and from the top menu click on **Get prediction URL**. - + :::image type="content" source="../../media/prediction-url.png" alt-text="A screenshot showing the prediction URL in Language Studio." lightbox="../../media/prediction-url.png"::: 2. In the window that appears, copy the sample request URL and body into your command line. 3. Replace `` with the actual text you want to send to extract intents and entities from. - - 4. Submit the `POST` cURL request in your terminal or command prompt. You'll receive a 202 response with the API results if the request was successful. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md index fb9c689e72da3..4f739377d997c 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md @@ -20,5 +20,5 @@ ms.author: aahi :::image type="content" source="../../media/projects-page.png" alt-text="A screenshot showing the conversation project page in Language Studio." lightbox="../../media/projects-page.png"::: -3. In the window that appears, upload the JSON file you want to import. Make sure that your file follows the [supported JSON format](). +3. In the window that appears, upload the JSON file you want to import. Make sure that your file follows the [supported JSON format](../../concepts/data-formats.md). diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md index 4f0db66ccbb1a..443463af7d894 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md @@ -11,7 +11,7 @@ ms.author: aahi 1. Go to your project page in [Language Studio](https://aka.ms/languageStudio). -2. Select **View model details** from the menu on the left side of the screen. +2. Select **Model performance** from the menu on the left side of the screen. 3. In this page you can only view the successfully trained models, F1 score of each model and [model expiration date](../../../concepts/model-lifecycle.md#expiration-timeline). You can click on the model name for more details about its performance. @@ -23,4 +23,4 @@ ms.author: aahi > [!NOTE] > If you don't see any of the intents or entities you have in your model displayed here, it is because they weren't in any of the utterances that were used for the test set. - \ No newline at end of file + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/test-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/test-model.md index 6988ac8eab870..86a6728258e2f 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/test-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/test-model.md @@ -5,14 +5,14 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 05/12/2022 +ms.date: 06/03/2022 ms.author: aahi --- ## Test the model -To test your model from the Language studio +To test your model from Language studio 1. Select **Test model** from the left side menu. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md index c45270a6c5930..ec5ee03d9acac 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md @@ -24,9 +24,9 @@ To start training your model from within the [Language Studio](https://aka.ms/la 6. Click on the **Train** button. -5. Click on the Training Job ID from the list, a side pane will appear where you can check **Training progress** and **Job status** and other details for this job. + :::image type="content" source="../../media/train-model.png" alt-text="A screenshot showing the training page in Language Studio." lightbox="../../media/train-model.png"::: - +5. Click on the Training Job ID from the list, a side pane will appear where you can check **Training progress** and **Job status** and other details for this job. > [!NOTE] > * Only successfully completed training jobs will generate models. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md index 7d3ad3de8f5d7..be746c05912e1 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md @@ -5,7 +5,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 05/16/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: ignite-fall-2021 --- @@ -22,15 +22,15 @@ ms.custom: ignite-fall-2021 Once you have a Language resource created, create a conversational language understanding project. A project is a work area for building your custom ML models based on your data. Your project can only be accessed by you and others who have access to the Language resource being used. -For this quickstart, you can download [this sample project]() and import it. This project can predict the intended commands from user input, such as: reading emails, deleting emails, and attaching a document to an email. +For this quickstart, you can download [this sample project](https://go.microsoft.com/fwlink/?linkid=2196152) and import it. This project can predict the intended commands from user input, such as: reading emails, deleting emails, and attaching a document to an email. [!INCLUDE [Import project](../language-studio/import-project.md)] -Once the upload is complete, you will land on **Build schema** page. For this quickstart, the schema is already built, and utterances are already tagged with intents and entities. +Once the upload is complete, you will land on **Build schema** page. For this quickstart, the schema is already built, and utterances are already labeled with intents and entities. ## Train your model -Typically, after you create a project, you should [build schema]() and [tag utterances](). For this quickstart, we already imported a ready project with built schema and tagged utterances. +Typically, after you create a project, you should [build schema](../../how-to/build-schema.md) and [label utterances](../../how-to/tag-utterances.md). For this quickstart, we already imported a ready project with built schema and labeled utterances. To train a model, you need to start a training job. The output of a successful training job is your trained model. @@ -38,7 +38,7 @@ To train a model, you need to start a training job. The output of a successful t ## Deploy your model -Generally after training a model you would review its evaluation details. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/clu-apis). +Generally after training a model you would review its evaluation details. In this quickstart, you will just deploy your model, and make it available for you to try in Language studio, or you can call the [prediction API](https://aka.ms/clu-apis). [!INCLUDE [Deploy model](../language-studio/deploy-model.md)] diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md index 30de64140aa91..4dbf048a91f5c 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md @@ -14,8 +14,6 @@ ms.custom: ignite-fall-2021 * Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services). -## Create a Language resource from Azure portal - [!INCLUDE [create a new resource from the Azure portal](../resource-creation-azure-portal.md)] ## Get your resource keys and endpoint diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md index aedf441bdf3e4..993074985b056 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md @@ -16,15 +16,15 @@ Create a **POST** request using the following URL, headers, and JSON body to can Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{JOB-ID}` | This is the training job ID. |`XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -34,5 +34,5 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +Once you send your API request, you will receive a 202 response indicating success, which means your training job has been canceled. A successful call results with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md index 7e4e7520fffdd..b0436f1cf2539 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md @@ -23,7 +23,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -39,14 +39,11 @@ Use the following sample JSON as your body. ```json { - "projectKind": "conversation", - "settings": { - "confidenceThreshold": 0 - }, "projectName": "{PROJECT-NAME}", - "multilingual": true, + "language": "{LANGUAGE-CODE}", + "projectKind": "Conversation", "description": "Project description", - "language": "{LANGUAGE-CODE}" + "multilingual": true } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md index a04b0b40850b4..cb1c2f7b5b12f 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md @@ -9,15 +9,13 @@ ms.date: 05/16/2022 ms.author: aahi --- - - Create a **DELETE** request using the following URL, headers, and JSON body to delete a conversational language understanding deployment. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +23,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md index 7f576ca3e5bdd..e2b6d98981c2a 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md @@ -17,7 +17,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +25,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -36,4 +36,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your model has been deleted. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md index 34a8bb73cc9a9..a41c4f9e5ca9f 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md @@ -22,7 +22,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md index a5fb5835bb126..b2f77f3eb3982 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md @@ -15,7 +15,7 @@ Create a **PUT** request using the following URL, headers, and JSON body to star #### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -23,7 +23,7 @@ Create a **PUT** request using the following URL, headers, and JSON body to star |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -49,7 +49,7 @@ Use the following header to authenticate your request. Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` You can use this URL to get the deployment job status. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md index 8040ddc127a98..c84c384e3872d 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md @@ -16,14 +16,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -37,7 +37,7 @@ Use the following header to authenticate your request. Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` `JOB-ID` is used to identify your request, since this operation is asynchronous. Use this URL to get the exported project JSON, using the same authentication method. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md index 0af1708e93f56..f722f463c7841 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md @@ -14,7 +14,7 @@ Use the following **GET** request to get the status of your deployment job. Repl ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -23,7 +23,7 @@ Use the following **GET** request to get the status of your deployment job. Repl |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received from the API in response to your model deployment request. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md index 12ce15fd89fe9..91c530af03267 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md @@ -12,7 +12,7 @@ ms.author: aahi Use the following **GET** request to query the status of your export job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -20,7 +20,7 @@ Use the following **GET** request to query the status of your export job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -34,18 +34,12 @@ Use the following header to authenticate your request. ```json { - "resultUrl": "{RESULT-URL}", - "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", - "status": "unknown", - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "resultUrl": "{Endpoint}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/xxxxxx-xxxxx-xxxxx-xx/result?api-version={API-VERSION}", + "jobId": "xxxx-xxxxx-xxxxx-xxx", + "createdDateTime": "2022-04-18T15:23:07Z", + "lastUpdatedDateTime": "2022-04-18T15:23:08Z", + "expirationDateTime": "2022-04-25T15:23:07Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md index 842e32d705d0b..e9bd93f4cde87 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md @@ -14,7 +14,7 @@ ms.custom: language-service-clu Use the following **GET** request to query the status of your import job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -22,7 +22,7 @@ Use the following **GET** request to query the status of your import job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -39,31 +39,11 @@ Once you send the request, you will get the following response. Keep polling thi ```json { - "jobId": "string", - "createdDateTime": "2022-04-25T10:54:07.950Z", - "lastUpdatedDateTime": "2022-04-25T10:54:07.950Z", - "expirationDateTime": "2022-04-25T10:54:07.950Z", - "status": "unknown", - "warnings": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ], - "errors": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ] + "jobId": "xxxxx-xxxxx-xxxx-xxxxx", + "createdDateTime": "2022-04-18T15:17:20Z", + "lastUpdatedDateTime": "2022-04-18T15:17:22Z", + "expirationDateTime": "2022-04-25T15:17:20Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md index fd6e2c47da6e3..6fdf64effa2b7 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md @@ -12,14 +12,14 @@ ms.author: aahi Use the following **GET** request to get your project details. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -32,16 +32,15 @@ Use the following header to authenticate your request. #### Response body ```json - { - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", - "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", - "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "type": "conversation", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} - } +{ + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Conversation", + "projectName": "{PROJECT-NAME}", + "multilingual": true, + "description": "This is a sample conversation project.", + "language": "{LANGUAGE-CODE}" +} ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md index 062e0ef42ae4c..094f9bf18ef50 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md @@ -14,7 +14,7 @@ Use the following **GET** request to get the status of your model's training pro ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of your model's training pro |`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received when submitted your training job. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -43,18 +43,21 @@ Once you send the request, you will get the following response. Keep polling thi "modelLabel": "{MODEL-LABEL}", "trainingConfigVersion": "{TRAINING-CONFIG-VERSION}", "trainingMode": "{TRAINING-MODE}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", "trainingStatus": { - "percentComplete": 2, - "startDateTime": "{START-TIME}", - "status": "{STATUS}" + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" }, - "evaluationStatus": { "percentComplete": 0, "status": "notStarted" }, - "estimatedEndDateTime": "{ESTIMATED-END-TIME}" + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" + } }, - "jobId": "{JOB-ID}", - "createdDateTime": "{CREATED-TIME}", - "lastUpdatedDateTime": "{UPDATED-TIME}", - "expirationDateTime": "{EXPIRATION-TIME}", + "jobId": "xxxxx-xxxxx-xxxx-xxxxx-xxxx", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", "status": "running" } ``` @@ -72,5 +75,3 @@ Once you send the request, you will get the following response. Keep polling thi |`lastUpdatedDateTime`| Training job last updated date and time | `2022-04-14T10:23:45Z`| |`expirationDateTime`| Training job expiration date and time | `2022-04-14T10:22:42Z`| - - diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md index a64d6f4649448..4489dc9e13f53 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md @@ -24,7 +24,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -41,7 +41,7 @@ Use the following sample JSON as your body. ```json { "api-version":"{API-VERSION}" , - "stringIndexType": "Utf16CodeUnit", + "stringIndexType": "Utf16CodeUnit", "metadata": { "projectKind": "conversation", "settings": { @@ -53,6 +53,7 @@ Use the following sample JSON as your body. "language": "{LANGUAGE-CODE}" }, "assets": { + "projectKind": "luis", "intents": [ { "category": "Read" @@ -95,7 +96,7 @@ Use the following sample JSON as your body. |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| `api-version` | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-03-01-preview` | +| `api-version` | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-05-01` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `EmailApp` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterances used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the utterances. |`en-us`| | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any [supported language](../../language-support.md); not necessarily a language included in your training documents. | `true`| diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md index b2a2980b9c2bc..d14cb442a80a9 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md @@ -24,7 +24,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -40,51 +40,51 @@ Use the following sample JSON as your body. ```json { - "api-version":"{API-VERSION}" , - "stringIndexType": "Utf16CodeUnit", - "metadata": { - "projectKind": "conversation", - "settings": { - "confidenceThreshold": 0.7 - }, - "projectName": "{PROJECT-NAME}", - "multilingual": true, - "description": "Trying out CLU", - "language": "{LANGUAGE-CODE}" + "projectFileVersion": "{API-VERSION}", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectKind": "Conversation", + "settings": { + "confidenceThreshold": 0.7 }, + "projectName": "{PROJECT-NAME}", + "multilingual": true, + "description": "Trying out CLU", + "language": "{LANGUAGE-CODE}" + }, "assets": { + "projectKind": "Conversation", "intents": [ { - "category": "Read" + "category": "intent1" }, { - "category": "Delete" + "category": "intent2" } ], "entities": [ { - "category": "Sender" + "category": "entity1" } ], "utterances": [ { - "text": "Open Blake's email", - "language": "{LANGUAGE-CODE}", + "text": "text1", "dataset": "{DATASET}", - "intent": "Read", + "intent": "intent1", "entities": [ { - "category": "Sender", + "category": "entity1", "offset": 5, "length": 5 } ] }, { - "text": "Delete last email", + "text": "text2", "language": "{LANGUAGE-CODE}", "dataset": "{DATASET}", - "intent": "Attach", + "intent": "intent2", "entities": [] } ] @@ -95,7 +95,7 @@ Use the following sample JSON as your body. |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| api-version | `{API-VERSION}` | The version of the API you're calling. The version used here must be the same API model version in the URL. See the [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) article to learn more. | `2022-03-01-preview` | +| api-version | `{API-VERSION}` | The version of the API you're calling. The version used here must be the same API model version in the URL. See the [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) article to learn more. | `2022-05-01` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `EmailApp` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterances used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the utterances. |`en-us`| | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset. When your model is deployed, you can query the model in any [supported language](../../language-support.md#multi-lingual-option). This includes languages that aren't included in your training documents. | `true`| diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md index a60d0dbe8309c..cb0f248fad3ec 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md @@ -9,15 +9,13 @@ ms.date: 05/16/2022 ms.author: aahi --- - - Create a **GET** request using the following URL, headers, and JSON body to get the trained model evaluation summary. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +23,7 @@ Create a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,7 +33,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Response Body diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md index 37c22cef2ee7d..2c0bba856388d 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md @@ -21,7 +21,7 @@ To get your project details, submit a **GET** request using the following URL an |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -37,20 +37,16 @@ Once you send the request, you will get the following response. ```json { - "createdDateTime": "{CREATED-TIME}", - "lastModifiedDateTime": "{CREATED-TIME}", - "lastTrainedDateTime": "{CREATED-TIME}", - "lastDeployedDateTime": "{CREATED-TIME}", - "projectKind": "conversation", - "settings": { - "confidenceThreshold": 0 - }, + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Conversation", "projectName": "{PROJECT-NAME}", "multilingual": true, - "description": "string", + "description": "This is a sample conversation project.", "language": "{LANGUAGE-CODE}" } - ``` -Once you send your API request, you will receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md index 4be4d2bbf46fa..9aeb8bb5e1669 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md @@ -21,7 +21,7 @@ Create a **POST** request using the following URL, headers, and JSON body to sta |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -36,20 +36,19 @@ Use the following header to authenticate your request. ```json { - "kind": "CustomConversation", - "analysisInput": { - "conversationItem": { - "participantId":"{JOB-NAME}", - "id":"{JOB-NAME}", - "modality":"text", - "text":"{TEST-UTTERANCE}", - "language":"{LANGUAGE-CODE}", - } - }, - "parameters": { - "projectName": "{PROJECT-NAME}", - "deploymentName": "{DEPLOYMENT-NAME}" - } + "kind": "Conversation", + "analysisInput": { + "conversationItem": { + "id": "1", + "participantId": "1", + "text": "Text 1" + } + }, + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}", + "stringIndexType": "TextElement_V8" + } } ``` @@ -59,8 +58,6 @@ Use the following header to authenticate your request. | `participantId` | `{JOB-NAME}` | | `"MyJobName` | | `id` | `{JOB-NAME}` | | `"MyJobName` | | `text` | `{TEST-UTTERANCE}` | The utterance that you want to predict its intent and extract entities from. | `"Read Matt's email` | -| `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterance submitted. Learn more about supported language codes [here](../../language-support.md) |`en-us`| -| `id` | `{JOB-NAME}` | | `"MyJobName` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `deploymentName` | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | @@ -70,33 +67,37 @@ Once you send the request, you will get the following response for the predictio ```json { - "kind": "CustomConversationResult", - "results": { - "query": "Read Matt's email", - "prediction": { - "projectKind": "conversation", - "topIntent": "Read", - "intents": [ - { - "category": "Read", - "confidenceScore": 0.9403077 - }, - { - "category": "Delete", - "confidenceScore": 0.016843017 - }, - ], - "entities": [ - { - "category": "SenderName", - "text": "Matt", - "offset": 5, - "length": 4, - "confidenceScore": 1 - } - ] + "kind": "ConversationResult", + "result": { + "query": "Text1", + "prediction": { + "topIntent": "inten1", + "projectKind": "Conversation", + "intents": [ + { + "category": "intent1", + "confidenceScore": 1 + }, + { + "category": "intent2", + "confidenceScore": 0 + }, + { + "category": "intent3", + "confidenceScore": 0 + } + ], + "entities": [ + { + "category": "entity1", + "text": 'text1", + "offset": 29, + "length": 12, + "confidenceScore": 1 } + ] } + } } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md index 3f1920cd1cea6..05b799eee97ca 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md @@ -15,14 +15,14 @@ Create a **POST** request using the following URL, headers, and JSON body to sta ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -32,7 +32,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | ### Request Body @@ -44,10 +43,9 @@ Use the following header to authenticate your request. ``` -|Key| value| Example| -|--|--|--| -|`firstDeploymentName` | The name for your first deployment. This value is case-sensitive. | `production` | -|`secondDeploymentName` | The name for your second deployment. This value is case-sensitive. | `staging` | - +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md index bd8462fba7c0e..a47c8d085244a 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md @@ -16,14 +16,14 @@ Create a **POST** request using the following URL, headers, and JSON body to sub Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -32,7 +32,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request body @@ -41,12 +41,12 @@ Use the following object in your request. The model will be named `MyModel` once ```json { "modelLabel": "{MODEL-NAME}", - "trainingConfigVersion": "{CONFIG-VERSION}", "trainingMode": "{TRAINING-MODE}", + "trainingConfigVersion": "{CONFIG-VERSION}", "evaluationOptions": { "kind": "percentage", - "trainingSplitPercentage": 0, - "testingSplitPercentage": 0 + "testingSplitPercentage": 20, + "trainingSplitPercentage": 80 } } ``` @@ -65,6 +65,6 @@ Use the following object in your request. The model will be named `MyModel` once Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` You can use this URL to get the training job status. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md b/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md index 35309f037c2b3..d773cc83797d3 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md @@ -19,6 +19,9 @@ Use this article to learn about the languages currently supported by CLU feature ## Multi-lingual option +> [!TIP] +> See [How to train a model](how-to/train-model.md#training-modes) for information on which training mode you should use for multilingual projects. + With conversational language understanding, you can train a model in one language and use to predict intents and entities from utterances in another language. This feature is powerful because it helps save time and effort. Instead of building separate projects for every language, you can handle multi-lingual dataset in one project. Your dataset doesn't have to be entirely in the same language but you should enable the multi-lingual option for your project while creating or later in project settings. If you notice your model performing poorly in certain languages during the evaluation process, consider adding more data in these languages to your training set. You can train your project entirely with English utterances, and query it in: French, German, Mandarin, Japanese, Korean, and others. Conversational language understanding makes it easy for you to scale your projects to multiple languages by using multilingual technology to train your models. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png b/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png index 41a9ed122d4dc..2e388bf621a8c 100644 Binary files a/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png and b/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png differ diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png b/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png index abcc27ea11be0..28c97fea76da8 100644 Binary files a/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png and b/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png differ diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/separated-overlap-example-1-part-2.svg b/articles/cognitive-services/language-service/conversational-language-understanding/media/separated-overlap-example-1-part-2.svg new file mode 100644 index 0000000000000..5827f8f2d111c --- /dev/null +++ b/articles/cognitive-services/language-service/conversational-language-understanding/media/separated-overlap-example-1-part-2.svg @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/separated-overlap-example-1.svg b/articles/cognitive-services/language-service/conversational-language-understanding/media/separated-overlap-example-1.svg new file mode 100644 index 0000000000000..010871a296d5a --- /dev/null +++ b/articles/cognitive-services/language-service/conversational-language-understanding/media/separated-overlap-example-1.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/train-model-tutorial.png b/articles/cognitive-services/language-service/conversational-language-understanding/media/train-model-tutorial.png deleted file mode 100644 index 2bd94dacdb8e4..0000000000000 Binary files a/articles/cognitive-services/language-service/conversational-language-understanding/media/train-model-tutorial.png and /dev/null differ diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-1-part-2.svg b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-1-part-2.svg new file mode 100644 index 0000000000000..a122d9d98ec18 --- /dev/null +++ b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-1-part-2.svg @@ -0,0 +1,10 @@ + + + + + + + + + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-1.svg b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-1.svg new file mode 100644 index 0000000000000..45bbcdb16be27 --- /dev/null +++ b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-1.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-2-part-2.svg b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-2-part-2.svg new file mode 100644 index 0000000000000..c70b137cb6bd9 --- /dev/null +++ b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-2-part-2.svg @@ -0,0 +1,10 @@ + + + + + + + + + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-2.svg b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-2.svg new file mode 100644 index 0000000000000..2d6a0595eaaba --- /dev/null +++ b/articles/cognitive-services/language-service/conversational-language-understanding/media/union-overlap-example-2.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/overview.md b/articles/cognitive-services/language-service/conversational-language-understanding/overview.md index e605fbbbe099d..d1b3884d34d5a 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/overview.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/overview.md @@ -17,7 +17,7 @@ ms.custom: language-service-clu, ignite-fall-2021 Conversational language understanding is one of the custom features offered by [Azure Cognitive Service for Language](../overview.md). It is a cloud-based API service that applies machine-learning intelligence to enable you to build natural language understanding component to be used in an end-to-end conversational application. -Conversational language understanding (CLU) enables users to build custom natural language understanding models to predict the overall intention of an incoming utterance and extract important information from it. CLU only provides the intelligence to understand the input text for the client application and doesn't perform any actions. By creating a CLU project, developers can iteratively tag utterances, train and evaluate model performance before making it available for consumption. The quality of the tagged data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). +Conversational language understanding (CLU) enables users to build custom natural language understanding models to predict the overall intention of an incoming utterance and extract important information from it. CLU only provides the intelligence to understand the input text for the client application and doesn't perform any actions. By creating a CLU project, developers can iteratively label utterances, train and evaluate model performance before making it available for consumption. The quality of the labeled data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). This documentation contains the following article types: @@ -57,9 +57,9 @@ Follow these steps to get the most out of your model: 1. **Build schema**: Know your data and define the actions and relevant information that needs to be recognized from user's input utterances. In this step you create the [intents](glossary.md#intent) that you want to assign to user's utterances, and the relevant [entities](glossary.md#entity) you want extracted. -2. **Tag data**: The quality of data tagging is a key factor in determining model performance. +2. **Label data**: The quality of data labeling is a key factor in determining model performance. -3. **Train model**: Your model starts learning from your tagged data. +3. **Train model**: Your model starts learning from your labeled data. 4. **View model evaluation details**: View the evaluation details for your model to determine how well it performs when introduced to new data. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md b/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md index bafb6f422a29d..abb7198593cfe 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md @@ -9,7 +9,7 @@ ms.reviewer: cahann, hazemelh ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 05/17/2022 +ms.date: 05/25/2022 --- # Integrate conversational language understanding with Bot Framework @@ -23,23 +23,22 @@ This tutorial will explain how to integrate your own conversational language und - Create a [Language resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) in the Azure portal to get your key and endpoint. After it deploys, select **Go to resource**. - You will need the key and endpoint from the resource you create to connect your bot to the API. You'll paste your key and endpoint into the code below later in the tutorial. -- Download the **Core Bot** for CLU [sample in C#](https://aka.ms/clu-botframework-overview). - - Clone the entire Bot Framework Samples repository to get access to this sample project. - +- Download the **CoreBotWithCLU** [sample](https://aka.ms/clu-botframework-overview). + - Clone the entire samples repository to get access to this solution. ## Import a project in conversational language understanding -1. Copy the [FlightBooking.json](https://aka.ms/clu-botframework-json) file in the **Core Bot** for CLU sample. +1. Download the [FlightBooking.json](https://aka.ms/clu-botframework-json) file in the **Core Bot with CLU** sample, in the _Cognitive Models_ folder. 2. Sign into the [Language Studio](https://language.cognitive.azure.com/) and select your Language resource. 3. Navigate to [Conversational Language Understanding](https://language.cognitive.azure.com/clu/projects) and click on the service. This will route you the projects page. Click the Import button next to the Create New Project button. Import the FlightBooking.json file with the project name as **FlightBooking**. This will automatically import the CLU project with all the intents, entities, and utterances. :::image type="content" source="../media/import.png" alt-text="A screenshot showing where to import a J son file." lightbox="../media/import.png"::: -4. Once the project is loaded, click on **Training** on the left. Press on Start a training job, provide the model name **v1** and press Train. All other settings such as **Standard Training** and the evaluation settings can be left as is. +4. Once the project is loaded, click on **Training jobs** on the left. Press on Start a training job, provide the model name **v1** and press Train. All other settings such as **Standard Training** and the evaluation settings can be left as is. - :::image type="content" source="../media/train-model-tutorial.png" alt-text="A screenshot of the training page in C L U." lightbox="../media/train-model-tutorial.png"::: + :::image type="content" source="../media/train-model.png" alt-text="A screenshot of the training page in C L U." lightbox="../media/train-model.png"::: -5. Once training is complete, click to **Deployments** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. +5. Once training is complete, click to **Deploying a model** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. :::image type="content" source="../media/deploy-model-tutorial.png" alt-text="A screenshot of the deployment page within the deploy model screen in C L U." lightbox="../media/deploy-model-tutorial.png"::: @@ -52,7 +51,7 @@ In the **Core Bot** sample, update your [appsettings.json](https://aka.ms/clu-bo - The _CluProjectName_ is **FlightBooking**. - The _CluDeploymentName_ is **Testing** - The _CluAPIKey_ can be either of the keys in the **Keys and Endpoint** section for your Language resource in the [Azure portal](https://portal.azure.com). You can also copy your key from the Project Settings tab in CLU. -- The _CluAPIHostName_ is the endpoint found in the **Keys and Endpoint** section for your Language resource in the Azure portal. Note the format should be ```.cognitiveservices.azure.com``` without `https://` +- The _CluAPIHostName_ is the endpoint found in the **Keys and Endpoint** section for your Language resource in the Azure portal. Note the format should be ```.cognitiveservices.azure.com``` without `https://`. ```json { @@ -67,7 +66,7 @@ In the **Core Bot** sample, update your [appsettings.json](https://aka.ms/clu-bo ## Identify integration points -In the Core Bot sample, under the CLU folder, you can check out the **FlightBookingRecognizer.cs** file. Here is where the CLU API call to the deployed endpoint is made to retrieve the CLU prediction for intents and entities. +In the Core Bot sample, you can check out the **FlightBookingRecognizer.cs** file. Here is where the CLU API call to the deployed endpoint is made to retrieve the CLU prediction for intents and entities. ```csharp public FlightBookingRecognizer(IConfiguration configuration) @@ -91,7 +90,7 @@ In the Core Bot sample, under the CLU folder, you can check out the **FlightBook ``` -Under the folder Dialogs folder, find the **MainDialog** which uses the following to make a CLU prediction. +Under the Dialogs folder, find the **MainDialog** which uses the following to make a CLU prediction. ```csharp var cluResult = await _cluRecognizer.RecognizeAsync(stepContext.Context, cancellationToken); @@ -136,7 +135,7 @@ Run the sample locally on your machine **OR** run the bot from a terminal or fro ### Run the bot from a terminal -From a terminal, navigate to `samples/csharp_dotnetcore/90.core-bot-with-clu/90.core-bot-with-clu` +From a terminal, navigate to the `cognitive-service-language-samples/CoreBotWithCLU` folder. Then run the following command @@ -149,8 +148,8 @@ dotnet run 1. Launch Visual Studio 1. From the top navigation menu, select **File**, **Open**, then **Project/Solution** -1. Navigate to the `samples/csharp_dotnetcore/90.core-bot-with-clu/90.core-bot-with-clu` folder -1. Select the `CoreBotWithCLU.csproj` file +1. Navigate to the `cognitive-service-language-samples/CoreBotWithCLU` folder +1. Select the `CoreBotCLU.csproj` file 1. Press `F5` to run the project diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/data-formats.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/data-formats.md index 80207d290aa55..25a4bd243d816 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/data-formats.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/data-formats.md @@ -8,14 +8,14 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 05/06/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 --- # Accepted custom NER data formats -If you are trying to [import your data](../how-to/create-project.md#import-project) into custom NER, it has to follow a specific format. If you don't have data to import, you can [create your project](../how-to/create-project.md) and use the Language Studio to [label your documents](../how-to/tag-data.md). +If you are trying to [import your data](../how-to/create-project.md#import-project) into custom NER, it has to follow a specific format. If you don't have data to import, you can [create your project](../how-to/create-project.md) and use Language Studio to [label your documents](../how-to/tag-data.md). ## Labels file format @@ -23,62 +23,78 @@ Your Labels file should be in the `json` format below to be used in [importing]( ```json { + "projectFileVersion": "2022-05-01", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectKind": "CustomEntityRecognition", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project-description", + "language": "en-us" + }, + "assets": { "entities": [ - { - "category": "Entity1" - }, - { - "category": "Entity2" - } + { + "category": "Entity1" + }, + { + "category": "Entity2" + } ], "documents": [ - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "entities": [ - { - "regionOffset": 0, - "regionLength": 500, - "labels": [ - { - "category": "Entity1", - "offset": 25, - "length": 10 - }, - { - "category": "Entity2", - "offset": 120, - "length": 8 - } - ] - } + { + "location": "{DOCUMENT-NAME}", + "language": "{LANGUAGE-CODE}", + "dataset": "{DATASET}", + "entities": [ + { + "regionOffset": 0, + "regionLength": 500, + "labels": [ + { + "category": "Entity1", + "offset": 25, + "length": 10 + }, + { + "category": "Entity2", + "offset": 120, + "length": 8 + } ] - }, - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "entities": [ - { - "regionOffset": 0, - "regionLength": 100, - "labels": [ - { - "category": "Entity2", - "offset": 20, - "length": 5 - } - ] - } + } + ] + }, + { + "location": "{DOCUMENT-NAME}", + "language": "{LANGUAGE-CODE}", + "dataset": "{DATASET}", + "entities": [ + { + "regionOffset": 0, + "regionLength": 100, + "labels": [ + { + "category": "Entity2", + "offset": 20, + "length": 5 + } ] - } + } + ] + } ] + } } + ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| +| `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents). See [language support](../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +|`projectName`|`{PROJECT-NAME}`|Project name|`myproject`| +| storageInputContainerName|`{CONTAINER-NAME}`|Container name|`mycontainer`| | `entities` | | Array containing all the entity types you have in the project. These are the entity types that will be extracted from your documents into.| | | `documents` | | Array containing all the documents in your project and list of the entities labeled within each document. | [] | | `location` | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container this should be the document name.|`doc1.txt`| diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/evaluation-metrics.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/evaluation-metrics.md index 0e60a4704e2f7..42520a4047eed 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/evaluation-metrics.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/concepts/evaluation-metrics.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 05/06/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 --- @@ -133,5 +133,5 @@ Similarly, ## Next steps -* [View a model's evaluation in Language Studio](../how-to/view-model-evaluation.md) +* [View a model's performance in Language Studio](../how-to/view-model-evaluation.md) * [Train a model](../how-to/train-model.md) diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/glossary.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/glossary.md index 30da5f4a24d1b..d42b48b358588 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/glossary.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/glossary.md @@ -30,7 +30,7 @@ For example, in the sentence "*John borrowed 25,000 USD from Fred.*" the entitie | Loan Amount | *25,000 USD* | ## F1 score -The F1 score is a function of Precision and Recall. It's needed when you seek a balance between [precision](#precision) and [recall](#recall]. +The F1 score is a function of Precision and Recall. It's needed when you seek a balance between [precision](#precision) and [recall](#recall). ## Model diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md index ce4770538c4c5..7c887cd6b089e 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/09/2022 +ms.date: 06/03/2022 ms.author: aahi ms.devlang: csharp, python ms.custom: language-service-custom-ner, event-tier1-build-2022 @@ -17,11 +17,11 @@ ms.custom: language-service-custom-ner, event-tier1-build-2022 # Query deployment to extract entities After the deployment is added successfully, you can query the deployment to extract entities from your text based on the model you assigned to the deployment. -You can query the deployment programmatically using the [Prediction API](https://aka.ms/ct-runtime-swagger) or through the [Client libraries (Azure SDK)](#get-task-results). +You can query the deployment programmatically using the [Prediction API](https://aka.ms/ct-runtime-api) or through the [Client libraries (Azure SDK)](#get-task-results). ## Test deployed model -You can use the Language Studio to submit the custom entity recognition task and visualize the results. +You can use Language Studio to submit the custom entity recognition task and visualize the results. [!INCLUDE [Test model](../includes/language-studio/test-model.md)] @@ -57,10 +57,10 @@ First you will need to get your resource key and endpoint: |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Java | [5.2.0-beta.2](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.2) | - |JavaScript | [5.2.0-beta.2](https://www.npmjs.com/package/@azure/ai-text-analytics/v/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [5.2.0-beta.3](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.3) | + |Java | [5.2.0-beta.3](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.3) | + |JavaScript | [6.0.0-beta.1](https://www.npmjs.com/package/@azure/ai-text-analytics/v/6.0.0-beta.1) | + |Python | [5.2.0b4](https://pypi.org/project/azure-ai-textanalytics/5.2.0b4/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. @@ -80,4 +80,7 @@ First you will need to get your resource key and endpoint: ## Next steps -* [Custom NER overview](../overview.md) +* [Enrich a Cognitive Search index tutorial](../tutorials/cognitive-search.md) + + + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/create-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/create-project.md index 2e4a8484a7fc9..fa17db55f964b 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/create-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/create-project.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/06/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-custom-ner, references_regions, ignite-fall-2021, event-tier1-build-2022 --- @@ -27,7 +27,7 @@ Before you start using custom NER, you will need: Before you start using custom NER, you will need an Azure Language resource. It is recommended to create your Language resource and connect a storage account to it in the Azure portal. Creating a resource in the Azure portal lets you create an Azure storage account at the same time, with all of the required permissions pre-configured. You can also read further in the article to learn how to use a pre-existing resource, and configure it to work with custom named entity recognition. -You also will need an Azure storage account where you will upload your `.txt` files that will be used to train a model to extract entities. +You also will need an Azure storage account where you will upload your `.txt` documents that will be used to train a model to extract entities. > [!NOTE] > * You need to have an **owner** role assigned on the resource group to create a Language resource. @@ -41,9 +41,12 @@ You can create a resource in the following ways: * Language Studio * PowerShell +> [!Note] +> You shouldn't move the storage account to a different resource group or subscription once it's linked with the Language resource. + [!INCLUDE [create a new resource from the Azure portal](../includes/resource-creation-azure-portal.md)] -[!INCLUDE [create a new resource from the Language Studio](../includes/language-studio/resource-creation-language-studio.md)] +[!INCLUDE [create a new resource from Language Studio](../includes/language-studio/resource-creation-language-studio.md)] [!INCLUDE [create a new resource with Azure PowerShell](../includes/resource-creation-powershell.md)] @@ -100,7 +103,7 @@ If you have already labeled data, you can use it to get started with the service ### [Language Studio](#tab/language-studio) -[!INCLUDE [Delete project using the Language studio](../includes/language-studio/delete-project.md)] +[!INCLUDE [Delete project using Language studio](../includes/language-studio/delete-project.md)] ### [Rest APIs](#tab/rest-api) @@ -112,4 +115,4 @@ If you have already labeled data, you can use it to get started with the service * You should have an idea of the [project schema](design-schema.md) you will use to label your data. -* After your project is created, you can start [tagging your data](tag-data.md), which will inform your entity extraction model how to interpret text, and is used for training and evaluation. +* After your project is created, you can start [labeling your data](tag-data.md), which will inform your entity extraction model how to interpret text, and is used for training and evaluation. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/design-schema.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/design-schema.md index 43fc100909e4e..7ee892774fa5a 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/design-schema.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/design-schema.md @@ -29,13 +29,13 @@ The schema defines the entity types/categories that you need your model to extra * Avoid entity types ambiguity. - **Ambiguity** happens when entity types you select are similar to each other. The more ambiguous your schema the more tagged data you will need to differentiate between different entity types. + **Ambiguity** happens when entity types you select are similar to each other. The more ambiguous your schema the more labeled data you will need to differentiate between different entity types. For example, if you are extracting data from a legal contract, to extract "Name of first party" and "Name of second party" you will need to add more examples to overcome ambiguity since the names of both parties look similar. Avoid ambiguity as it saves time, effort, and yields better results. * Avoid complex entities. Complex entities can be difficult to pick out precisely from text, consider breaking it down into multiple entities. - For example, extracting "Address" would be challenging if it's not broken down to smaller entities. There are so many variations of how addresses appear, it would take large number of tagged entities to teach the model to extract an address, as a whole, without breaking it down. However, if you replace "Address" with "Street Name", "PO Box", "City", "State" and "Zip", the model will require fewer tags per entity. + For example, extracting "Address" would be challenging if it's not broken down to smaller entities. There are so many variations of how addresses appear, it would take large number of labeled entities to teach the model to extract an address, as a whole, without breaking it down. However, if you replace "Address" with "Street Name", "PO Box", "City", "State" and "Zip", the model will require fewer labels per entity. ## Data selection @@ -61,10 +61,14 @@ As a prerequisite for creating a project, your training data needs to be uploade * [Create and upload documents from Azure](../../../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container) * [Create and upload documents using Azure Storage Explorer](../../../../vs-azure-tools-storage-explorer-blobs.md) -You can only use `.txt` documents. If your data is in other format, you can use [CLUtils parse command](https://github.com/microsoft/CognitiveServicesLanguageUtilities/blob/main/CustomTextAnalytics.CLUtils/Solution/CogSLanguageUtilities.ViewLayer.CliCommands/Commands/ParseCommand/README.md) to change your file format. +You can only use `.txt` documents. If your data is in other format, you can use [CLUtils parse command](https://github.com/microsoft/CognitiveServicesLanguageUtilities/blob/main/CustomTextAnalytics.CLUtils/Solution/CogSLanguageUtilities.ViewLayer.CliCommands/Commands/ParseCommand/README.md) to change your document format. -You can upload an annotated dataset, or you can upload an unannotated one and [tag your data](../how-to/tag-data.md) in Language studio. +You can upload an annotated dataset, or you can upload an unannotated one and [label your data](../how-to/tag-data.md) in Language studio. +## Test set + +When defining the testing set, make sure to include example documents that are not present in the training set. Defining the testing set is an important step to calculate the [model performance](view-model-evaluation.md#model-details). Also, make sure that the testing set include documents that represent all entities used in your project. + ## Next steps If you haven't already, create a custom NER project. If it's your first time using custom NER, consider following the [quickstart](../quickstart.md) to create an example project. You can also see the [how-to article](../how-to/create-project.md) for more details on what you need to create a project. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/improve-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/improve-model.md index 5aa83798d5b8a..9031459753463 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/improve-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/improve-model.md @@ -15,13 +15,13 @@ ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 # Improve model performance -In some cases, the model is expected to extract entities that are inconsistent with your tagged ones. In this page you can observe these inconsistencies and decide on the needed changes needed to improve your model performance. +In some cases, the model is expected to extract entities that are inconsistent with your labeled ones. In this page you can observe these inconsistencies and decide on the needed changes needed to improve your model performance. ## Prerequisites * A successfully [created project](create-project.md) with a configured Azure blob storage account * Text data that [has been uploaded](design-schema.md#data-preparation) to your storage account. -* [Tagged data](tag-data.md) +* [Labeled data](tag-data.md) * A [successfully trained model](train-model.md) * Reviewed the [model evaluation details](view-model-evaluation.md) to determine how your model is performing. * Familiarized yourself with the [evaluation metrics](../concepts/evaluation-metrics.md). @@ -31,7 +31,7 @@ See the [project development lifecycle](../overview.md#project-development-lifec ## Review test set predictions -After you have viewed your [model's evaluation](view-model-evaluation.md), you'll have formed an idea on your model performance. In this page, you can view how your model performs vs how it's expected to perform. You can view predicted and tagged entities side by side for each document in your test set. You can review entities that were extracted differently than they were originally tagged. +After you have viewed your [model's evaluation](view-model-evaluation.md), you'll have formed an idea on your model performance. In this page, you can view how your model performs vs how it's expected to perform. You can view predicted and labeled entities side by side for each document in your test set. You can review entities that were extracted differently than they were originally labeled. To review inconsistent predictions in the [test set](train-model.md) from within the [Language Studio](https://aka.ms/LanguageStudio): @@ -42,15 +42,15 @@ To review inconsistent predictions in the [test set](train-model.md) from within 3. For easier analysis, you can toggle **Show incorrect predictions only** to view entities that were incorrectly predicted only. You should see all documents that include entities that were incorrectly predicted. -5. You can expand each document to see more details about predicted and tagged entities. +5. You can expand each document to see more details about predicted and labeled entities. Use the following information to help guide model improvements. - * If entity `X` is constantly identified as entity `Y`, it means that there is ambiguity between these entity types and you need to reconsider your schema. Learn more about [data selection and schema design](design-schema.md#schema-design). Another solution is to consider tagging more instances of these entities, to help the model improve and differentiate between them. + * If entity `X` is constantly identified as entity `Y`, it means that there is ambiguity between these entity types and you need to reconsider your schema. Learn more about [data selection and schema design](design-schema.md#schema-design). Another solution is to consider labeling more instances of these entities, to help the model improve and differentiate between them. * If a complex entity is repeatedly not predicted, consider [breaking it down to simpler entities](design-schema.md#schema-design) for easier extraction. - * If an entity is predicted while it was not tagged in your data, this means to you need to review your tags. Be sure that all instances of an entity are properly tagged in all documents. + * If an entity is predicted while it was not labeled in your data, this means to you need to review your labels. Be sure that all instances of an entity are properly labeled in all documents. :::image type="content" source="../media/review-predictions.png" alt-text="A screenshot showing model predictions in Language Studio." lightbox="../media/review-predictions.png"::: diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/tag-data.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/tag-data.md index 7cb4cc52e42ec..74d68bbc82531 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/tag-data.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/tag-data.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/09/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 --- @@ -30,7 +30,7 @@ See the [project development lifecycle](../overview.md#project-development-lifec ## Data labeling guidelines -After [preparing your data, designing your schema](design-schema.md) and [creating your project](create-project.md), you will need to label your data. Labeling your data is important so your model knows which words will be associated with the entity types you need to extract. When you label your data in [Language Studio](https://aka.ms/languageStudio) (or import labeled data), these labels will be stored in the JSON file in your storage container that you have connected to this project. +After [preparing your data, designing your schema](design-schema.md) and [creating your project](create-project.md), you will need to label your data. Labeling your data is important so your model knows which words will be associated with the entity types you need to extract. When you label your data in [Language Studio](https://aka.ms/languageStudio) (or import labeled data), these labels will be stored in the JSON document in your storage container that you have connected to this project. As you label your data, keep in mind: @@ -39,8 +39,8 @@ As you label your data, keep in mind: * The precision, consistency and completeness of your labeled data are key factors to determining model performance. * **Label precisely**: Label each entity to its right type always. Only include what you want extracted, avoid unnecessary data in your labels. - * **Label consistently**: The same entity should have the same label across all the files. - * **Label completely**: Label all the instances of the entity in all your files. You can use the [auto-labeling feature](use-autotagging.md) to ensure complete labeling. + * **Label consistently**: The same entity should have the same label across all the documents. + * **Label completely**: Label all the instances of the entity in all your documents. You can use the [auto-labeling feature](use-autotagging.md) to ensure complete labeling. > [!NOTE] > There is no fixed number of labels that can guarantee your model will perform the best. Model performance is dependent on possible ambiguity in your [schema](design-schema.md), and the quality of your labeled data. Nevertheless, we recommend having around 50 labeled instances per entity type. @@ -56,10 +56,10 @@ Use the following steps to label your data: >[!TIP] - > You can use the filters in top menu to view the unlabeled files so that you can start labeling them. + > You can use the filters in top menu to view the unlabeled documents so that you can start labeling them. > You can also use the filters to view the documents that are labeled with a specific entity type. -3. Change to a single file view from the left side in the top menu or select a specific file to start labeling. You can find a list of all `.txt` files available in your project to the left. You can use the **Back** and **Next** button from the bottom of the page to navigate through your documents. +3. Change to a single document view from the left side in the top menu or select a specific document to start labeling. You can find a list of all `.txt` documents available in your project to the left. You can use the **Back** and **Next** button from the bottom of the page to navigate through your documents. > [!NOTE] > If you enabled multiple languages for your project, you will find a **Language** dropdown in the top menu, which lets you select the language of each document. @@ -77,18 +77,18 @@ Use the following steps to label your data: The below screenshot shows labeling using a brush. - :::image type="content" source="../media/tag-options.png" alt-text="A screenshot showing the tagging options offered in Custom NER." lightbox="../media/tag-options.png"::: + :::image type="content" source="../media/tag-options.png" alt-text="A screenshot showing the labeling options offered in Custom NER." lightbox="../media/tag-options.png"::: 6. In the right side pane under the **Labels** pivot you can find all the entity types in your project and the count of labeled instances per each. -6. In the bottom section of the right side pane you can add the current file you are viewing to the training set or the testing set. By default all the documents are added to your training set. Learn more about [training and testing sets](train-model.md#data-splitting) and how they are used for model training and evaluation. +6. In the bottom section of the right side pane you can add the current document you are viewing to the training set or the testing set. By default all the documents are added to your training set. Learn more about [training and testing sets](train-model.md#data-splitting) and how they are used for model training and evaluation. > [!TIP] > If you are planning on using **Automatic** data splitting, use the default option of assigning all the documents into your training set. 7. Under the **Distribution** pivot you can view the distribution across training and testing sets. You have two options for viewing: - * *Files with at least one label* where each document is counted if it contains at least one labeled instance of this entity. - * *Total instances throughout files* where you can view count of all labeled instances of a specific entity type. + * *Total instances* where you can view count of all labeled instances of a specific entity type. + * *documents with at least one label* where each document is counted if it contains at least one labeled instance of this entity. 7. When you're labeling, your changes will be synced periodically, if they have not been saved yet you will find a warning at the top of your page. If you want to save manually, click on **Save labels** button at the bottom of the page. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/train-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/train-model.md index 6d41d8c967028..25e2452c4807d 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/train-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/train-model.md @@ -15,13 +15,12 @@ ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 # Train your custom named entity recognition model -Training is the process where the model learns from your [labeled data](tag-data.md). After training is completed, you will be able to [view model performance](view-model-evaluation.md) to determine if you need to [improve your model](improve-model.md). +Training is the process where the model learns from your [labeled data](tag-data.md). After training is completed, you'll be able to [view model performance](view-model-evaluation.md) to determine if you need to [improve your model](improve-model.md). To train a model, you start a training job and only successfully completed jobs create a model. Training jobs expire after seven days, which means you won't be able to retrieve the job details after this time. If your training job completed successfully and a model was created, the model won't be affected. You can only have one training job running at a time, and you can't start other jobs in the same project. The training times can be anywhere from a few minutes when dealing with few documents, up to several hours depending on the dataset size and the complexity of your schema. -Model evaluation is triggered automatically after training is completed successfully. The evaluation process starts by using the trained model to extract user defined entities from documents in the testing set, and compares them with the provided data labels (which establishes a baseline of truth). The results are returned so you can review the [model’s performance](view-model-evaluation.md). ## Prerequisites @@ -41,7 +40,7 @@ It's recommended to make sure that all your entities are adequately represented Custom NER supports two methods for data splitting: -* **Automatically splitting the testing set from training data**:The system will split your tagged data between the training and testing sets, according to the percentages you choose. The recommended percentage split is 80% for training and 20% for testing. +* **Automatically splitting the testing set from training data**:The system will split your labeled data between the training and testing sets, according to the percentages you choose. The recommended percentage split is 80% for training and 20% for testing. > [!NOTE] > If you choose the **Automatically splitting the testing set from training data** option, only the data assigned to training set will be split according to the percentages provided. @@ -82,4 +81,4 @@ Training could take sometime depending on the size of your training data and com ## Next steps -After training is completed, you will be able to [view model performance](view-model-evaluation.md) to optionally [improve your model](improve-model.md) if needed. Once you're satisfied with your model, you can deploy it, making it available to use for [extracting entities](call-api.md) from text. +After training is completed, you'll be able to [view model performance](view-model-evaluation.md) to optionally [improve your model](improve-model.md) if needed. Once you're satisfied with your model, you can deploy it, making it available to use for [extracting entities](call-api.md) from text. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/use-autotagging.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/use-autotagging.md index cc700b79be206..6174c04b00d20 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/use-autotagging.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/use-autotagging.md @@ -13,73 +13,73 @@ ms.date: 05/09/2022 ms.author: aahi --- -# How to use autotagging +# How to use auto-labeling -[Tagging process](tag-data.md) is an important part of preparing your dataset. Since this process requires a lot of time and effort, you can use the autotagging feature to automatically tag your entities. With autotagging, you can start tagging a few of your files, train a model, then create an autotagging job to produce tagged entities on your behalf, automatically. This feature can save you the time and effort of manually tagging your entities. +[Labeling process](tag-data.md) is an important part of preparing your dataset. Since this process requires a lot of time and effort, you can use the auto-labeling feature to automatically label your entities. With auto-labeling, you can start labeling a few of your documents, train a model, then create an auto-labeling job to produce labeling entities on your behalf, automatically. This feature can save you the time and effort of manually labeling your entities. ## Prerequisites -Before you can use autotagging, you must have a [trained model](train-model.md). +Before you can use auto-labeling, you must have a [trained model](train-model.md). -## Trigger an autotagging job +## Trigger an auto-labeling job -When you trigger an autotagging job, there's a monthly limit of 5,000 text records per month, per resource. This means the same limit will apply on all projects within the same resource. +When you trigger an auto-labeling job, there's a monthly limit of 5,000 text records per month, per resource. This means the same limit will apply on all projects within the same resource. > [!TIP] -> A text record is calculated as the ceiling of (Number of characters in a file / 1,000). For example, if a file has 8921 characters, the number of text records is: +> A text record is calculated as the ceiling of (Number of characters in a document / 1,000). For example, if a document has 8921 characters, the number of text records is: > > `ceil(8921/1000) = ceil(8.921)`, which is 9 text records. -1. From the left navigation menu, select **Autotag data**. -2. Select **Trigger Autotag** to start an autotagging job +1. From the left navigation menu, select **Data auto-labeling**. +2. Select **Trigger Auto-label** to start an auto-labeling job :::image type="content" source="../media/trigger-autotag.png" alt-text="A screenshot showing how to trigger an autotag job." lightbox="../media/trigger-autotag.png"::: -3. Choose a trained model. It's recommended to check the model performance before using it for autotagging. +3. Choose a trained model. It's recommended to check the model performance before using it for auto-labeling. :::image type="content" source="../media/choose-model.png" alt-text="A screenshot showing how to choose trained model for autotagging." lightbox="../media/choose-model.png"::: -4. Choose the entities you want to be included in the autotagging job. By default, all entities are selected. You can see the total tags, precision and recall of each entity. It's recommended to include entities that perform well to ensure the quality of the automatically tagged entities. +4. Choose the entities you want to be included in the auto-labeling job. By default, all entities are selected. You can see the total labels, precision and recall of each entity. It's recommended to include entities that perform well to ensure the quality of the automatically labeled entities. :::image type="content" source="../media/choose-entities.png" alt-text="A screenshot showing which entities to be included in autotag job." lightbox="../media/choose-entities.png"::: -5. Choose the files you want to be automatically tagged. You'll see the number of text records of each file. When you select one or more files, you should see the number of texts records selected. It's recommended to choose the untagged files from the filter. +5. Choose the documents you want to be automatically labeled. You'll see the number of text records of each document. When you select one or more documents, you should see the number of texts records selected. It's recommended to choose the unlabeled documents from the filter. > [!NOTE] - > * If an entity was automatically tagged, but has a user defined tag, only the user defined tag will be used and be visible. - > * You can view the files by clicking on the file name. + > * If an entity was automatically labeled, but has a user defined label, only the user defined label will be used and be visible. + > * You can view the documents by clicking on the document name. - :::image type="content" source="../media/choose-files.png" alt-text="A screenshot showing which files to be included in the autotag job." lightbox="../media/choose-files.png"::: + :::image type="content" source="../media/choose-files.png" alt-text="A screenshot showing which documents to be included in the autotag job." lightbox="../media/choose-files.png"::: -6. Select **Autotag** to trigger the autotagging job. -You should see the model used, number of files included in the autotag job, number of text records and entities to be automatically tagged. Autotag jobs can take anywhere from a few seconds to a few minutes, depending on the number of files you included. +6. Select **Autolabel** to trigger the auto-labeling job. +You should see the model used, number of documents included in the auto-labeling job, number of text records and entities to be automatically labeled. Auto-labeling jobs can take anywhere from a few seconds to a few minutes, depending on the number of documents you included. :::image type="content" source="../media/review-autotag.png" alt-text="A screenshot showing the review screen for an autotag job." lightbox="../media/review-autotag.png"::: -## Review the tagged files +## Review the auto labeled documents -When the autotag job is complete, you can see the output files in the **Tag data** page of Language Studio. Select **Review files with autotags** to view the files with the **Auto tagged** filter applied. +When the auto-labeling job is complete, you can see the output documents in the **Data labeling** page of Language Studio. Select **Review documents with autolabels** to view the documents with the **Auto labeled** filter applied. -:::image type="content" source="../media/open-autotag-files.png" alt-text="A screenshot showing the autotagged files, and an autotagged job ID." lightbox="../media/open-autotag-files.png"::: +:::image type="content" source="../media/open-autotag-files.png" alt-text="A screenshot showing the auto-labeled documents" lightbox="../media/open-autotag-files.png"::: -Entities that have been automatically tagged will appear with a dotted line. These entities will have two selectors (a checkmark and an "X") that will let you accept or reject the automatic tag. +Entities that have been automatically labeled will appear with a dotted line. These entities will have two selectors (a checkmark and an "X") that will let you accept or reject the automatic label. -Once an entity is accepted, the dotted line will change to solid line, and this tag will be included in any further model training and be a user defined tag. +Once an entity is accepted, the dotted line will change to solid line, and this label will be included in any further model training and be a user defined label. -Alternatively, you can accept or reject all automatically tagged entities within the file, using **Accept all** or **Reject all** in the top right corner of the screen. +Alternatively, you can accept or reject all automatically labeled entities within the document, using **Accept all** or **Reject all** in the top right corner of the screen. -After you accept or reject the tagged entities, select **Save tags** to apply the changes. +After you accept or reject the labeled entities, select **Save labels** to apply the changes. > [!NOTE] -> * We recommend validating automatically tagged entities before accepting them. -> * All tags that were not accepted will be deleted when you train your model. +> * We recommend validating automatically labeled entities before accepting them. +> * All labels that were not accepted will be deleted when you train your model. -:::image type="content" source="../media/accept-reject-entities.png" alt-text="A screenshot showing how to accept and reject autotagged entities." lightbox="../media/accept-reject-entities.png"::: +:::image type="content" source="../media/accept-reject-entities.png" alt-text="A screenshot showing how to accept and reject auto-labeled entities." lightbox="../media/accept-reject-entities.png"::: ## Next steps -* Learn more about [tagging your data](tag-data.md). +* Learn more about [labeling your data](tag-data.md). diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/view-model-evaluation.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/view-model-evaluation.md index 7c747570febd6..35e3824f0b98c 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/view-model-evaluation.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/view-model-evaluation.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/09/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 --- @@ -19,7 +19,7 @@ ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 After your model has finished training, you can view the model performance and see the extracted entities for the documents in the test set. > [!NOTE] -> Using the **Automatically split the testing set from training data** option may result in different model evaluation result every time you [train a new model](train-model.md), as the test set is selected randomly from the data. To make sure that the evaulation is calcualted on the same test set every time you train a model, make sure to use the **Use a manual split of training and testing data** option when starting a training job and define your **Test** documents when [tagging data](tag-data.md). +> Using the **Automatically split the testing set from training data** option may result in different model evaluation result every time you [train a new model](train-model.md), as the test set is selected randomly from the data. To make sure that the evaulation is calcualted on the same test set every time you train a model, make sure to use the **Use a manual split of training and testing data** option when starting a training job and define your **Test** documents when [labeling data](tag-data.md). ## Prerequisites @@ -27,7 +27,7 @@ Before viewing model evaluation, you need: * A successfully [created project](create-project.md) with a configured Azure blob storage account. * Text data that [has been uploaded](design-schema.md#data-preparation) to your storage account. -* [Tagged data](tag-data.md) +* [Labeled data](tag-data.md) * A [successfully trained model](train-model.md) See the [project development lifecycle](../overview.md#project-development-lifecycle) for more information. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/cancel-training.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/cancel-training.md index fe564261ffe5f..26eceb3ae9422 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/cancel-training.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/cancel-training.md @@ -6,8 +6,8 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 05/24/2022 ms.author: aahi --- -To cancel a training job from within [Language Studio](https://aka.ms/languageStudio), go to the **Train model** page. Select the training job you want to cancel and click on **Cancel** from the top menu. +To cancel a training job from within [Language Studio](https://aka.ms/languageStudio), go to the **Training jobs** page. Select the training job you want to cancel and click on **Cancel** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/create-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/create-project.md index 331f1ff94c52f..1ef3beafda93b 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/create-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/create-project.md @@ -6,13 +6,13 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 06/03/2022 ms.author: aahi --- 1. Sign into the [Language Studio](https://aka.ms/languageStudio). A window will appear to let you select your subscription and Language resource. Select the Language resource you created in the above step. -2. Under the **Entity extraction** section of Language Studio, select **Custom named entity recognition**. +2. Under the **Extract information** section of Language Studio, select **Custom named entity recognition**. :::image type="content" source="../../media/select-custom-ner.png" alt-text="A screenshot showing the location of custom NER in the Language Studio landing page." lightbox="../../media/select-custom-ner.png"::: diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-deployment.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-deployment.md index 990ea888892bb..f950e22e05509 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-deployment.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-deployment.md @@ -7,8 +7,8 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/09/2022 +ms.date: 05/24/2022 ms.author: aahi --- -To delete a deployment from within [Language Studio](https://aka.ms/laguageStudio), go to the **Deploy model** page. Select the deployment you want to delete and click on **Delete deployment** from the top menu. +To delete a deployment from within [Language Studio](https://aka.ms/laguageStudio), go to the **Deploying a model** page. Select the deployment you want to delete and click on **Delete deployment** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-model.md index dbf82d8c12157..7b1b193061e25 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-model.md @@ -6,14 +6,14 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 04/25/2022 +ms.date: 05/24/2022 ms.author: aahi --- To delete your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **View model details** from the left side menu. +1. Select **Model performance** from the left side menu. 2. Click on the **model name** you want to delete and click **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-project.md index d8cd6bd5d4d93..81ee806355da3 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/delete-project.md @@ -5,9 +5,9 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 02/02/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-classification, event-tier1-build-2022 --- -When you don't need your project anymore, you can delete your project using [Language Studio](https://aka.ms/custom-extraction). Select **Custom named entity recognition (NER)** in the left navigation menu, select project you want to delete and click on **Delete** from the top menu. +When you don't need your project anymore, you can delete your project using [Language Studio](https://aka.ms/custom-extraction). Select **Custom named entity recognition (NER)** from the top, select project you want to delete and click on **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/deploy-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/deploy-model.md index aaaefe03313ae..1b7f6e00e63ae 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/deploy-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/deploy-model.md @@ -12,9 +12,9 @@ ms.author: aahi To deploy your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Deploy model** from the left side menu. +1. Select **Deploying a model** from the left side menu. -2. Click on **Start deployment job** to start a new deployment job. +2. Click on **Add deployment** to start a new deployment job. :::image type="content" source="../../media/deploy-model.png" alt-text="A screenshot showing the deployment button" lightbox="../../media/deploy-model.png"::: diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/model-evaluation.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/model-evaluation.md index e87f7e488e37b..5689ba6b91a53 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/model-evaluation.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/model-evaluation.md @@ -12,7 +12,7 @@ ms.author: aahi 1. Go to your project page in [Language Studio](https://aka.ms/languageStudio). -2. Select **View model details** from the menu on the left side of the screen. +2. Select **Model performance** from the menu on the left side of the screen. 3. In this page you can only view the successfully trained models, F1 score of each model and [model expiry date](../../../concepts/model-lifecycle.md#custom-features). You can click on the model name for more details about its performance. @@ -22,7 +22,7 @@ ms.author: aahi 5. The [confusion matrix](../../concepts/evaluation-metrics.md#confusion-matrix) for the model is located under **Test set confusion matrix**. - :::image type="content" source="../../media/model-details.png" alt-text="A screenshot of the model performance metrics in Language Studio." lightbox="../../media/model-details.png"::: + :::image type="content" source="../../media/confusion-matrix.png" alt-text="A screenshot of a confusion matrix in Language Studio." lightbox="../../media/confusion-matrix.png"::: > [!NOTE] -> Entities that are neither tagged nor predicted in the test set will not be part of the displayed results. +> Entities that are neither labeled nor predicted in the test set will not be part of the displayed results. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/project-details.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/project-details.md index b10f7a1662c65..3c9ccc530510d 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/project-details.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/project-details.md @@ -11,7 +11,7 @@ ms.date: 05/10/2022 ms.author: aahi --- -1. Go to your project settings page in [Language Studio](https://aka.ms/languageStudio). +1. Go to your **project settings** page in [Language Studio](https://aka.ms/languageStudio). 2. You can see project details. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/swap-deployment.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/swap-deployment.md index 1db708ebc471b..4d4eacb2eda84 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/swap-deployment.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/swap-deployment.md @@ -13,8 +13,6 @@ ms.author: aahi To swap deployments from within [Language Studio](https://aka.ms/laguageStudio): -1. In the **Deploy model** page, select the two deployments you want to swap and click on **Swap deployments** from the top menu. +1. In the **Deploying a model** page, select the two deployments you want to swap and click on **Swap deployments** from the top menu. 2. From the window that appears, select the names of the deployments you want to swap. - - diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/test-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/test-model.md index 29a60115841d9..28f69ba5cac16 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/test-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/test-model.md @@ -11,7 +11,7 @@ ms.custom: language-service-custom-classification, event-tier1-build-2022 --- To test your deployed models from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Test model** from the left side menu. +1. Select **Testing deployments** from the left side menu. 2. Select the model you want to test. You can only test models that are assigned to deployments. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/train-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/train-model.md index eaff19a252f82..52309daa25a6d 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/train-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/language-studio/train-model.md @@ -12,7 +12,7 @@ ms.author: aahi To start training your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Train model** from the left side menu. +1. Select **Training jobs** from the left side menu. 2. Select **Start a training job** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/language-studio.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/language-studio.md index b6e6d49a8aeae..69608df641019 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/language-studio.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/language-studio.md @@ -6,7 +6,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 06/02/2022 ms.author: aahi --- @@ -19,7 +19,7 @@ ms.author: aahi Before you can use custom NER, you’ll need to create an Azure Language resource, which will give you the credentials that you need to create a project and start training a model. You’ll also need an Azure storage account, where you can upload your dataset that will be used in building your model. > [!IMPORTANT] -> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language, and create and/or connect a storage account at the same time, which is easier than doing it later. +> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language resource, and create and/or connect a storage account at the same time, which is easier than doing it later. > > If you have a pre-existing resource that you'd like to use, you will need to connect it to storage account. See [guidance to using a pre-existing resource](../../how-to/create-project.md#using-a-pre-existing-language-resource) for information. @@ -43,7 +43,7 @@ Typically after you create a project, you go ahead and start [tagging the docume ## Deploy your model -Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). +Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). [!INCLUDE [Deploy a model using Language Studio](../language-studio/deploy-model.md)] diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/rest-api.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/rest-api.md index 122d27ac1021d..117de325b1674 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/rest-api.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/quickstarts/rest-api.md @@ -5,7 +5,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/06/2022 +ms.date: 06/03/2022 ms.author: aahi --- @@ -18,7 +18,7 @@ ms.author: aahi Before you can use custom NER, you’ll need to create an Azure Language resource, which will give you the credentials that you need to create a project and start training a model. You’ll also need an Azure storage account, where you can upload your dataset that will be used in building your model. > [!IMPORTANT] -> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language, and create and/or connect a storage account at the same time, which is easier than doing it later. +> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language resource, and create and/or connect a storage account at the same time, which is easier than doing it later. > > If you have a pre-existing resource that you'd like to use, you will need to connect it to storage account. See [create project](../../how-to/create-project.md#using-a-pre-existing-language-resource) for information. @@ -64,7 +64,7 @@ Training could take sometime between 10 and 30 minutes for this sample dataset. ## Deploy your model -Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). +Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in Language Studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). ### Start deployment job diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md index 38cb4538e218e..aafe53acd78db 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md @@ -6,7 +6,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 06/02/2022 ms.author: aahi --- @@ -14,24 +14,18 @@ ms.author: aahi 1. Go to the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) to create a new Azure Language resource. -2. Click on **Create a new resource** - -3. In the window that appears, search for **Language service** - -4. Click **Create** - -5. In the window that appears, select **Custom text classification & custom named entity recognition** from the custom features. Click **Continue to create your resource**. +1. In the window that appears, select **Custom text classification & custom named entity recognition (preview)** from the custom features. Click **Continue to create your resource** at the bottom of the screen. :::image type="content" source="../media/select-custom-feature-azure-portal.png" alt-text="A screenshot showing custom text classification & custom named entity recognition in the Azure portal." lightbox="../media/select-custom-feature-azure-portal.png"::: -6. Create a Language resource with following details. +1. Create a Language resource with following details. |Instance detail | Description | |---------|---------| - |Location | The [location](../service-limits.md#regional-availability) of your Language resource. | - |Pricing tier | The [pricing tier](../service-limits.md#language-resource-limits) for your Language resource. | + |Location | The [location](../service-limits.md#regional-availability) of your Language resource. You can use "West US 2" for this quickstart. | + |Pricing tier | The [pricing tier](../service-limits.md#language-resource-limits) for your Language resource. You can use the Free (F0) tier for this quickstart. | -7. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **Create a new storage account**. These values are to help you get started, and not necessarily the [storage account values](/azure/storage/common/storage-account-overview) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. +1. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **New storage account**. These values are to help you get started, and not necessarily the [storage account values](/azure/storage/common/storage-account-overview) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. |Storage account value |Recommended value | |---------|---------| @@ -39,3 +33,5 @@ ms.author: aahi | Account kind| Storage (general purpose v1) | | Performance | Standard | | Replication | Locally redundant storage (LRS) | + +1. Make sure the **Responsible AI Notice** is checked. Select **Review + create** at the bottom of the page, then select **Create**. \ No newline at end of file diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md index dcca4887e3381..7b331b1289376 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md @@ -37,4 +37,4 @@ New-AzResourceGroupDeployment -Name ExampleDeployment -ResourceGroupName Example -TemplateParameterFile ``` -See the ARM template documentation for information on [deploying templates](/azure/azure-resource-manager/templates/deploy-powershell#parameter-files) and [parameter files](/azure/azure-resource-manager/templates/parameter-files#parameter-file). +See the ARM template documentation for information on [deploying templates](../../../../azure-resource-manager/templates/deploy-powershell.md#parameter-files) and [parameter files](../../../../azure-resource-manager/templates/parameter-files.md#parameter-file). \ No newline at end of file diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md index 934f1b4eee820..455ae61beaaff 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md @@ -9,23 +9,22 @@ ms.date: 05/06/2022 ms.author: aahi --- - -Create a **POST** request using the following URL, headers, and JSON body to cancel a training job. +Create a **POST** request by using the following URL, headers, and JSON body to cancel a training job. ### Request URL Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{JOB-ID}` | This value is the training job ID.| `XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you're calling. The value referenced is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,4 +34,4 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +After you send your API request, you'll receive a 202 response with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md index 0d153efe901f3..96dfe10babb8d 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md @@ -8,21 +8,27 @@ ms.topic: include ms.date: 04/06/2022 ms.author: aahi --- +To start creating a custom named entity recognition model, you need to create a project. Creating a project will let you label data, train, evaluate, improve, and deploy your models. > [!NOTE] -> The project name is case sensitive for all operations. +> The project name is case-sensitive for all operations. -Create a **POST** request using the following URL, headers, and JSON body to create your project and import the tags file. +Create a **PATCH** request using the following URL, headers, and JSON body to create your project. -Use the following URL to create a project and import your tags file. Replace the placeholder values below with your own values. +### Request URL + +Use the following URL to create a project. Replace the placeholder values below with your own values. ```rest -{YOUR-ENDPOINT}/language/analyze-text/projects/{projectName}/:import?api-version=2021-11-01-preview +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| -|`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + ### Headers @@ -34,91 +40,34 @@ Use the following header to authenticate your request. ### Body -Use the following JSON in your request. Replace the placeholder values below with your own values. Use the tags file available in the [sample data](https://github.com/Azure-Samples/cognitive-services-sample-data-files) tab +Use the following JSON in your request. Replace the placeholder values below with your own values. ```json { - "api-version": "2021-11-01-preview", - "metadata": { - "name": "MyProject", - "multiLingual": true, - "description": "Trying out custom NER", - "modelType": "Extraction", - "language": "string", - "storageInputContainerName": "YOUR-CONTAINER-NAME", - "settings": {} - }, - "assets": { - "extractors": [ - { - "name": "Entity1" - }, - { - "name": "Entity2" - } - ], - "documents": [ - { - "location": "doc1.txt", - "language": "en-us", - "dataset": "Train", - "extractors": [ - { - "regionOffset": 0, - "regionLength": 500, - "labels": [ - { - "extractorName": "Entity1", - "offset": 25, - "length": 10 - }, - { - "extractorName": "Entity2", - "offset": 120, - "length": 8 - } - ] - } - ] - }, - { - "location": "doc2.txt", - "language": "en-us", - "dataset": "Test", - "extractors": [ - { - "regionOffset": 0, - "regionLength": 100, - "labels": [ - { - "extractorName": "Entity2", - "offset": 20, - "length": 5 - } - ] - } - ] - } - ] - } + "projectName": "{PROJECT-NAME}", + "language": "{LANGUAGE-CODE}", + "projectKind": "CustomEntityRecognition", + "description": "Project description", + "multilingual": "True", + "storageInputContainerName": "{CONTAINER-NAME}" } + ``` -For the metadata key: -|Key |Value | Example | -|---------|---------|---------| -| `modelType` | Your Model type. | Extraction | -|`storageInputContainerName` | The name of your Azure blob storage container. | `myContainer` | +|Key |Placeholder|Value | Example | +|---------|---------|---------|--| +| projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | +| language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| projectKind | `CustomEntityRecognition` | Your project kind. | `CustomEntityRecognition` | +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +| storageInputContainerName | `{CONTAINER-NAME` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | -For the documents key: -|Key |Value | Example | -|---------|---------|---------| -| `location` | Document name on the blob store. | `doc2.txt` | -|`language` | The language of the document. | `en-us` | -|`dataset` | Optional field to specify the dataset which this document will belong to. | `Train` or `Test` | -This request will return an error if: +This request will return a 201 response, which means that the project is created. + +This request will return an error if: * The selected resource doesn't have proper permission for the storage account. + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md index c7e5444c999d0..e6ed2ba22f600 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md @@ -16,7 +16,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -24,8 +24,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `prod` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -34,7 +33,8 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | -Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. A successful call results with an `Operation-Location` header used to check the status of the job. + + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md index 701eb26e050aa..adba4130cc771 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md @@ -10,15 +10,13 @@ ms.date: 05/09/2022 ms.author: aahi --- - - -Create a **DELETE** request using the following URL, headers, and JSON body to delete a model. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a trained model. ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,8 +24,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -38,4 +35,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your trained model has been deleted. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md index 093b037c5a56c..b6014ca7afe49 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md @@ -12,15 +12,14 @@ ms.author: aahi When you no longer need your project, you can delete it with the following **DELETE** request. Replace the placeholder values with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -31,4 +30,4 @@ Use the following header to authenticate your request. |Ocp-Apim-Subscription-Key| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. A successful call results with an Operation-Location header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md index f0555a0c5d8f6..ff56bb64c7c81 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md @@ -13,7 +13,7 @@ ms.author: aahi Submit a **PUT** request using the following URL, headers, and JSON body to submit a deployment job. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` | Placeholder |Value | Example | @@ -21,8 +21,7 @@ Submit a **PUT** request using the following URL, headers, and JSON body to subm | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -46,10 +45,10 @@ Use the following JSON in the body of your request. Use the name of the model yo |---------|---------|-----|----| | trainedModelLabel | `{MODEL-NAME}` | The model name that will be assigned to your deployment. You can only assign successfully trained models. This value is case-sensitive. | `myModel` | -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` -`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You can use this URL to get the deployment status. +`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You can use this URL to get the deployment status. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md index 05b717ab973e1..e8de2da47b6dc 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md @@ -17,15 +17,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `MyProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -51,4 +50,4 @@ Once you send your API request, you’ll receive a `202` response indicating tha {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` -{JOB-ID} is used to identify your request, since this operation is asynchronous. You’ll use this URL to get the export job status. +`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You’ll use this URL to get the export job status. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md index 2dce6782b23d5..1f03f702088fa 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md @@ -21,9 +21,8 @@ Use the following **GET** request to query the status of the deployment job. You | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | -|`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -36,7 +35,7 @@ Use the following header to authenticate your request. ### Response Body -Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". +Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". You should get a `200` code to indicate the success of the request. ```json { @@ -47,3 +46,4 @@ Once you send the request, you will get the following response. Keep polling thi "status":"running" } ``` + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md index fcb9fb3b49f79..08a21e709d2f6 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md @@ -22,8 +22,7 @@ Use the following **GET** request to get the status of exporting your project as |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -39,9 +38,9 @@ Use the following header to authenticate your request. { "resultUrl": "{RESULT-URL}", "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", + "createdDateTime": "2021-10-19T23:24:41.572Z", + "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", + "expirationDateTime": "2021-10-19T23:24:41.572Z", "status": "unknown", "errors": [ { diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md index 43573fb361d31..33d6875e4411a 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md @@ -22,8 +22,7 @@ Use the following **GET** request to get the status of your importing your proje |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -32,3 +31,4 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md index f7397748659b2..04977c1f1ea65 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md @@ -19,9 +19,7 @@ Use the following **GET** request to get your project details. Replace the place |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -39,12 +37,12 @@ Use the following header to authenticate your request. "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "modelType": "{MODEL-TYPE}", + "projectKind": "CustomEntityRecognition", "storageInputContainerName": "{CONTAINER-NAME}", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project description", + "language": "{LANGUAGE-CODE}" } ``` + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md index 0ea16acbc6bf5..79c066f5a91aa 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md @@ -13,8 +13,14 @@ ms.author: aahi Use the following **GET** request to query the status/results of the custom entity recognition task. ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/{JOB-ID} +{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION} ``` + +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers |Key|Value| @@ -27,69 +33,62 @@ The response will be a JSON document with the following parameters ```json { - "createdDateTime": "2021-05-19T14:32:25.578Z", - "displayName": "MyJobName", - "expirationDateTime": "2021-05-19T14:32:25.578Z", - "jobId": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", - "status": "completed", - "errors": [], - "tasks": { - "details": { - "name": "{JOB-NAME}", - "lastUpdateDateTime": "2021-03-29T19:50:23Z", - "status": "completed" - }, - "completed": 1, - "failed": 0, - "inProgress": 0, - "total": 1, - "tasks": { - "customEntityRecognitionTasks": [ - { - "lastUpdateDateTime": "2021-05-19T14:32:25.579Z", - "name": "{JOB-NAME}", - "status": "completed", - "results": { - "documents": [ - { - "id": "{DOC-ID}", - "entities": [ - { - "text": "Government", - "category": "restaurant_name", - "offset": 23, - "length": 10, - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - }, - { - "id": "{DOC-ID}", - "entities": [ - { - "text": "David Schmidt", - "category": "artist", - "offset": 0, - "length": 13, - "confidenceScore": 0.8022353 - } - ], - "warnings": [] - } - ], - "errors": [], - "statistics": { - "documentsCount":0, - "validDocumentsCount":0, - "erroneousDocumentsCount":0, - "transactionsCount":0 + "createdDateTime": "2021-05-19T14:32:25.578Z", + "displayName": "MyJobName", + "expirationDateTime": "2021-05-19T14:32:25.578Z", + "jobId": "xxxx-xxxx-xxxxx-xxxxx", + "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", + "status": "succeeded", + "tasks": { + "completed": 1, + "failed": 0, + "inProgress": 0, + "total": 1, + "items": [ + { + "kind": "EntityRecognitionLROResults", + "taskName": "Recognize Entities", + "lastUpdateDateTime": "2020-10-01T15:01:03Z", + "status": "succeeded", + "results": { + "documents": [ + { + "entities": [ + { + "category": "Event", + "confidenceScore": 0.61, + "length": 4, + "offset": 18, + "text": "trip" + }, + { + "category": "Location", + "confidenceScore": 0.82, + "length": 7, + "offset": 26, + "subcategory": "GPE", + "text": "Seattle" + }, + { + "category": "DateTime", + "confidenceScore": 0.8, + "length": 9, + "offset": 34, + "subcategory": "DateRange", + "text": "last week" } - } - } - ] + ], + "id": "1", + "warnings": [] + } + ], + "errors": [], + "modelVersion": "2020-04-01" } - } + } + ] + } +} + ``` diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md index 875c47f47ae81..5df144cb68478 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md @@ -22,8 +22,7 @@ Use the following **GET** request to get the status of your model's training pro |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -39,39 +38,25 @@ Once you send the request, you’ll get the following response. ```json { - "jobs": [ - { - "result": { - "trainedModelLabel": "{MODEL-NAME}", - "trainingConfigVersion": "string", - "trainStatus": { - "percentComplete": 0, - "elapsedTime": "string" - }, - "evaluationStatus": { - "percentComplete": 0, - "elapsedTime": "string" - } - }, - "jobId": "string", - "createdDateTime": "2022-04-12T12:13:28.771Z", - "lastUpdatedDateTime": "2022-04-12T12:13:28.771Z", - "expirationDateTime": "2022-04-12T12:13:28.771Z", - "status": "unknown", - "warnings": [ - { - "code": "unknown", - "message": "string" - } - ], - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "result": { + "modelLabel": "{MODEL-NAME}", + "trainingConfigVersion": "{CONFIG-VERSION}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", + "trainingStatus": { + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" + }, + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" } - ] + }, + "jobId": "{JOB-ID}", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", + "status": "running" } ``` diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md index 0e35074ac24df..707e3cc8b6019 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md @@ -10,18 +10,19 @@ ms.date: 05/05/2022 ms.author: aahi --- -Submit a **POST** request using the following URL, headers, and JSON body to import your tags file. Make sure that your tags file follow the [accepted tags file format](../../concepts/data-formats.md). +Submit a **POST** request using the following URL, headers, and JSON body to import your labels file. Make sure that your labels file follow the [accepted format](../../concepts/data-formats.md). + +If a project with the same name already exists, the data of that project is replaced. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/:import?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -31,16 +32,18 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| + ### Body Use the following JSON in your request. Replace the placeholder values below with your own values. + ```json { - "api-version": "{API-VERSION}", + "projectFileVersion": "{API-VERSION}", "stringIndexType": "Utf16CodeUnit", "metadata": { "projectName": "{PROJECT-NAME}", - "projectKind": "customNamedEntityRecognition", + "projectKind": "CustomEntityRecognition", "description": "Trying out custom NER", "language": "{LANGUAGE-CODE}", "multilingual": true, @@ -48,6 +51,7 @@ Use the following JSON in your request. Replace the placeholder values below wit "settings": {} }, "assets": { + "projectKind": "CustomEntityRecognition", "entities": [ { "category": "Entity1" @@ -107,7 +111,7 @@ Use the following JSON in your request. Replace the placeholder values below wit |---------|---------|----------|--| | `api-version` | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | -| `projectKind` | `customNamedEntityRecognition` | Your project kind. | `customNamedEntityRecognition` | +| `projectKind` | `CustomEntityRecognition` | Your project kind. | `CustomEntityRecognition` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the documents. |`en-us`| | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) for information on multilingual support. | `true`| | `storageInputContainerName` | {CONTAINER-NAME} | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | @@ -117,7 +121,7 @@ Use the following JSON in your request. Replace the placeholder values below wit | `dataset` | `{DATASET}` | The test set to which this file will go to when split before training. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information on how your data is split. Possible values for this field are `Train` and `Test`. |`Train`| -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} @@ -127,7 +131,7 @@ Once you send your API request, you’ll receive a `202` response indicating tha Possible error scenarios for this request: -* The selected resource doesn't have proper permission for the storage account. Learn more about [required permissions](../../how-to/create-project.md#create-a-language-resource) for storage account. +* The selected resource doesn't have [proper permissions](../../how-to/create-project.md#using-a-pre-existing-language-resource) for the storage account. * The `storageInputContainerName` specified doesn't exist. * Invalid language code is used, or if the language code type isn't string. -* `multilingual` value is string and not boolean. +* `multilingual` value is a string and not a boolean. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md index 9ff034fd24ed6..ddd828c655450 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md @@ -18,7 +18,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -43,8 +43,8 @@ Once you send the request, you will get the following response. ```json { - "projectKind": "customNamedEntityRecognition", - "customNamedEntityRecognitionEvaluation": { + "projectKind": "CustomEntityRecognition", + "customEntityRecognitionEvaluation": { "confusionMatrix": { "additionalProp1": { "additionalProp1": { diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md index 188a9f0ad7a9c..dfa0b591e591c 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md @@ -11,8 +11,7 @@ ms.date: 05/06/2022 ms.author: aahi --- - -To get custom named entity recognition project details, submit a **GET** request using the following URL and headers. Replace the placeholder values with your own values. +Use the following **GET** request to get your project details. Replace the placeholder values below with your own values. ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}?api-version={API-VERSION} @@ -22,41 +21,39 @@ To get custom named entity recognition project details, submit a **GET** request |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | -### Headers +#### Headers Use the following header to authenticate your request. |Key|Value| |--|--| -|Ocp-Apim-Subscription-Key| The key to your resource. Used for authenticating your API requests.| +|`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -### Response Body +#### Response body -Once you send the request, you will get the following response. ```json -{ - "createdDateTime": "2022-04-23T13:39:09.384Z", - "lastModifiedDateTime": "2022-04-23T13:39:09.384Z", - "lastTrainedDateTime": "2022-04-23T13:39:09.384Z", - "lastDeployedDateTime": "2022-04-23T13:39:09.384Z", - "projectKind": "customNamedEntityRecognition", - "storageInputContainerName": "string", - "settings": {}, - "projectName": "string", - "multilingual": true, - "description": "string", - "language": "string" -} - + { + "createdDateTime": "2021-10-19T23:24:41.572Z", + "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", + "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", + "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", + "projectKind": "CustomEntityRecognition", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project description", + "language": "{LANGUAGE-CODE}" + } ``` + |Value | Placeholder | Description | Example | |---------|---------|---------|---------| -| `projectKind` | `customNamedEntityRecognition` | Your project kind. | `customNamedEntityRecognition` | +| `projectKind` | `CustomEntityRecognition` | Your project kind. | `CustomEntityRecognition` | | `storageInputContainerName` | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. For more information about multilingual support, see [Language support](../../language-support.md#multi-lingual-option). | `true`| | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the documents. |`en-us`| -Once you send your API request, you will receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md index acc461c28d753..6045a864e54d1 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md @@ -10,12 +10,17 @@ ms.date: 05/05/2022 ms.author: aahi --- -Use this **POST** request to submit an entity extraction task. +Use this **POST** request to start a text classification task. ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze +{ENDPOINT}/language/analyze-text/jobs?api-version={API-VERSION} ``` +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers |Key|Value| @@ -26,45 +31,46 @@ Use this **POST** request to submit an entity extraction task. ```json { - "displayName": "{JOB-NAME}", - "analysisInput": { - "documents": [ - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - }, - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - } - ] - }, - "tasks": { - "customEntityRecognitionTasks": [ - { - "parameters": { - "project-name": "`{PROJECT-NAME}`", - "deployment-name": "`{DEPLOYMENT-NAME}`" - } - } - ] + "displayName": "Extracting entities", + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "{LANGUAGE-CODE}", + "text": "Text1" + }, + { + "id": "2", + "language": "{LANGUAGE-CODE}", + "text": "Text2" + } + ] + }, + "tasks": [ + { + "kind": "CustomEntityRecognition", + "taskName": "Entity Recognition", + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}" + } } + ] } ``` + |Key |Placeholder |Value | Example | |---------|---------|----------|--| | `displayName` | `{JOB-NAME}` | Your job name. | `MyJobName` | | `documents` | [{},{}] | List of documents to run tasks on. | `[{},{}]` | | `id` | `{DOC-ID}` | Document name or ID. | `doc1`| -| `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. In case this key is not specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. If this key isn't specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) for a list of supported language codes. |`en-us`| | `text` | `{DOC-TEXT}` | Document task to run the tasks on. | `Lorem ipsum dolor sit amet` | -|`tasks`|`[]`| List of tasks we want to perform.|`[]`| -| |customEntityRecognitionTasks|Task identifer for task we want to perform. | | -|`parameters`|`[]`|List of parameters to pass to task|`[]`| +|`tasks`| | List of tasks we want to perform.|`[]`| +| `taskName`|`CustomEntityRecognition`|The task name|CustomEntityRecognition| +|`parameters`| |List of parameters to pass to the task.| | | `project-name` |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | | `deployment-name` |`{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `prod` | @@ -75,7 +81,7 @@ You will receive a 202 response indicating that your task has been submitted suc `operation-location` is formatted like this: ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/{JOB-ID} +{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION} ``` You can use this URL to query the task completion status and get the results when task is completed. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md index 2447a4ecc3f9b..3b8ed42317b65 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md @@ -25,7 +25,7 @@ Create a **POST** request using the following URL, headers, and JSON body to sta |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -35,9 +35,9 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | -### Request body + +### Request Body ```json { @@ -47,10 +47,10 @@ Use the following header to authenticate your request. ``` -|Key| value| Example| -|--|--|--| -|firstDeploymentName | The name for your first deployment. This value is case-sensitive. | `production` | -|secondDeploymentName | The name for your second deployment. This value is case-sensitive. | `staging` | +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md index 42b01d6d8eaa2..2c4ae74a9be44 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md @@ -20,7 +20,7 @@ Submit a **POST** request using the following URL, headers, and JSON body to sub |---------|---------|---------| | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -34,27 +34,26 @@ Use the following header to authenticate your request. Use the following JSON in your request body. The model will be given the `{MODEL-NAME}` once training is complete. Only successful training jobs will produce models. + ```json { "modelLabel": "{MODEL-NAME}", "trainingConfigVersion": "{CONFIG-VERSION}", "evaluationOptions": { - "kind": "percentage", + "kind": "percentage", "trainingSplitPercentage": 80, "testingSplitPercentage": 20 - } } - ``` |Key |Placeholder |Value | Example | |---------|---------|-----|----| -| `modelLabel` | `{MODEL-NAME}` | The model name that will be assigned to your model once trained successfully. | `myModel` | -| `trainingConfigVersion` | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01` | -| `evaluationOptions` | `{}` | Option to split your data across training and testing sets. | `{}` | -| `kind` | `percentage` | Split methods. Possible Values are `percentage` or `manual`. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information on how your data is split. |`percentage`| -| `trainingSplitPercentage` | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| -| `testingSplitPercentage` | `20` | Percentage of your tagged data to be included in the testing set. Recommended value is `20`. | `20` | +| modelLabel | `{MODEL-NAME}` | The model name that will be assigned to your model once trained successfully. | `myModel` | +| trainingConfigVersion | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01`| +| evaluationOptions | | Option to split your data across training and testing sets. | `{}` | +| kind | `percentage` | Split methods. Possible values are `percentage` or `manual`. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information. |`percentage`| +| trainingSplitPercentage | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| +| testingSplitPercentage | `20` | Percentage of your tagged data to be included in the testing set. Recommended value is `20`. | `20` | > [!NOTE] > The `trainingSplitPercentage` and `testingSplitPercentage` are only required if `Kind` is set to `percentage` and the sum of both percentages should be equal to 100. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png index e0ac8297da15c..161b669bd5c2a 100644 Binary files a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png and b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png differ diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png index 1f8942a8cfb1e..3e98d419233ec 100644 Binary files a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png and b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png differ diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png index a7586d561e163..b74d4e1ec7df5 100644 Binary files a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png and b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png differ diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md index c896d39813463..32adc1d2e345b 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md @@ -13,11 +13,11 @@ ms.author: aahi ms.custom: language-service-custom-ner, ignite-fall-2021, event-tier1-build-2022 --- -# What is custom named entity recognition (preview)? +# What is custom named entity recognition? Custom NER is one of the custom features offered by [Azure Cognitive Service for Language](../overview.md). It is a cloud-based API service that applies machine-learning intelligence to enable you to build custom models for custom named entity recognition tasks. -Custom NER enables users to build custom AI models to extract domain-specific entities from unstructured text, such as contracts or financial documents. By creating a Custom NER project, developers can iteratively tag data, train, evaluate, and improve model performance before making it available for consumption. The quality of the tagged data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). +Custom NER enables users to build custom AI models to extract domain-specific entities from unstructured text, such as contracts or financial documents. By creating a Custom NER project, developers can iteratively label data, train, evaluate, and improve model performance before making it available for consumption. The quality of the labeled data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). This documentation contains the following article types: @@ -49,12 +49,12 @@ Using custom NER typically involves several different steps. 1. **Define your schema**: Know your data and identify the [entities](glossary.md#entity) you want extracted. Avoid ambiguity. -2. **Tag your data**: Tagging data is a key factor in determining model performance. Tag precisely, consistently and completely. - 1. **Tag precisely**: Tag each entity to its right type always. Only include what you want extracted, avoid unnecessary data in your tag. - 2. **Tag consistently**: The same entity should have the same tag across all the files. - 3. **Tag completely**: Tag all the instances of the entity in all your files. +2. **Label your data**: Labeling data is a key factor in determining model performance. Label precisely, consistently and completely. + 1. **Label precisely**: Label each entity to its right type always. Only include what you want extracted, avoid unnecessary data in your labels. + 2. **Label consistently**: The same entity should have the same label across all the files. + 3. **Label completely**: Label all the instances of the entity in all your files. -3. **Train model**: Your model starts learning from your tagged data. +3. **Train model**: Your model starts learning from your labeled data. 4. **View the model evaluation details**: After training is completed, view the model's evaluation details and its performance. @@ -79,7 +79,7 @@ As you use custom NER, see the following reference documentation and samples for ## Responsible AI -An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom NER]() to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom NER](/legal/cognitive-services/language-service/cner-transparency-note?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: [!INCLUDE [Responsible AI links](../includes/overview-responsible-ai-links.md)] diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/quickstart.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/quickstart.md index 6bdb6a2183b23..d1a04eb2a2eae 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/quickstart.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/quickstart.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: quickstart -ms.date: 04/25/2022 +ms.date: 06/02/2022 ms.author: aahi ms.custom: language-service-custom-ner, ignite-fall-2021, mode-other, event-tier1-build-2022 zone_pivot_groups: usage-custom-language-features @@ -18,7 +18,11 @@ zone_pivot_groups: usage-custom-language-features Use this article to get started with creating a custom NER project where you can train custom models for custom entity recognition. A model is an object that's trained to do a certain task. For this system, the models extract named entities. Models are trained by learning from tagged data. -In this article, we use the Language studio to demonstrate key concepts of custom Named Entity Recognition (NER). As an example we’ll build a custom NER model to extract relevant entities from loan agreements. +In this article, we use Language Studio to demonstrate key concepts of custom Named Entity Recognition (NER). As an example we’ll build a custom NER model to extract relevant entities from loan agreements, such as the: +* Date of the agreement +* Borrower's name, address, city and state +* Lender's name, address, city and state +* Loan and interest amounts ::: zone pivot="language-studio" diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md index 3a54be878e766..9ea6ab30f2a53 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md @@ -47,9 +47,7 @@ Custom named entity recognition is only available in some Azure regions. To use * West Europe * North Europe * UK south -* Southeast Asia * Australia East -* Sweden Central ## API limits diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/tutorials/cognitive-search.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/tutorials/cognitive-search.md index ad22b41781d5f..c6d2870147476 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/tutorials/cognitive-search.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/tutorials/cognitive-search.md @@ -143,9 +143,9 @@ Generally after training a model you would review its [evaluation details](../ho 6. Get your custom NER project secrets - 1. You’ll need your **project-name**, project names are case-sensitive. + 1. You will need your **project-name**, project names are case-sensitive. Project names can be found in **project settings** page. - 2. You’ll also need the **deployment-name**. + 2. You will also need the **deployment-name**. Deployment names can be found in **Deploying a model** page. ### Run the indexer command diff --git a/articles/cognitive-services/language-service/custom-text-classification/concepts/data-formats.md b/articles/cognitive-services/language-service/custom-text-classification/concepts/data-formats.md index 4898a737cb8d4..d3bdceb69fd4c 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/concepts/data-formats.md +++ b/articles/cognitive-services/language-service/custom-text-classification/concepts/data-formats.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 05/04/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1-build-2022 --- @@ -25,34 +25,49 @@ Your Labels file should be in the `json` format below. This will enable you to [ ```json { - "classes": [ + "projectFileVersion": "2022-05-01", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectKind": "CustomMultiLabelClassification", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project-description", + "language": "en-us" + }, + "assets": { + "projectKind": "CustomMultiLabelClassification", + "classes": [ { - "category": "Class1" + "category": "Class1" }, { - "category": "Class2" + "category": "Class2" } - ], - "documents": [ - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "classes": [ - { - "category": "Class1" - }, - { - "category": "Class2" - } - ] - } - ] -} + ], + "documents": [ + { + "location": "{DOCUMENT-NAME}", + "language": "{LANGUAGE-CODE}", + "dataset": "{DATASET}", + "classes": [ + { + "category": "Class1" + }, + { + "category": "Class2" + } + ] + } + ] + } ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents). See [language support](../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +|projectName|`{PROJECT-NAME}`|Project name|myproject| +| storageInputContainerName|`{CONTAINER-NAME}`|Container name|`mycontainer`| | classes | [] | Array containing all the classes you have in the project. These are the classes you want to classify your documents into.| [] | | documents | [] | Array containing all the documents in your project and the classes labeled for this document. | [] | | location | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container, this value should be the document name.|`doc1.txt`| @@ -63,36 +78,53 @@ Your Labels file should be in the `json` format below. This will enable you to [ ```json { - "classes": [ - { - "category": "Class1" - }, - { - "category": "Class2" - } - ], - "documents": [ - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "class": { - "category": "Class2" - } - }, - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "class": { - "category": "Class1" - } - } - ] -} + + "projectFileVersion": "2022-05-01", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectKind": "CustomSingleLabelClassification", + "storageInputContainerName": "{CONTAINER-NAME}", + "settings": {}, + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project-description", + "language": "en-us" + }, + "assets": { + "projectKind": "CustomSingleLabelClassification", + "classes": [ + { + "category": "Class1" + }, + { + "category": "Class2" + } + ], + "documents": [ + { + "location": "{DOCUMENT-NAME}", + "language": "{LANGUAGE-CODE}", + "dataset": "{DATASET}", + "class": { + "category": "Class2" + } + }, + { + "location": "{DOCUMENT-NAME}", + "language": "{LANGUAGE-CODE}", + "dataset": "{DATASET}", + "class": { + "category": "Class1" + } + } + ] + } ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| +|projectName|`{PROJECT-NAME}`|Project name|myproject| +| storageInputContainerName|`{CONTAINER-NAME}`|Container name|`mycontainer`| +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents). See [language support](../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| | classes | [] | Array containing all the classes you have in the project. These are the classes you want to classify your documents into.| [] | | documents | [] | Array containing all the documents in your project and which class this document belongs to. | [] | | location | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container this should be the document name.|`doc1.txt`| diff --git a/articles/cognitive-services/language-service/custom-text-classification/concepts/evaluation-metrics.md b/articles/cognitive-services/language-service/custom-text-classification/concepts/evaluation-metrics.md index 7a734081d8e59..8df7c2f6654aa 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/concepts/evaluation-metrics.md +++ b/articles/cognitive-services/language-service/custom-text-classification/concepts/evaluation-metrics.md @@ -139,5 +139,5 @@ Similarly, ## Next steps -* [View a model's evaluation in Language Studio](../how-to/view-model-evaluation.md) +* [View a model's performance in Language Studio](../how-to/view-model-evaluation.md) * [Train a model](../how-to/train-model.md) diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md index f889a7e5b72df..2dc9a4496afc3 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 03/15/2022 +ms.date: 06/03/2022 ms.author: aahi ms.devlang: csharp, python ms.custom: language-service-clu, ignite-fall-2021, event-tier1-build-2022 @@ -17,11 +17,11 @@ ms.custom: language-service-clu, ignite-fall-2021, event-tier1-build-2022 # Query deployment to classify text After the deployment is added successfully, you can query the deployment to classify text based on the model you assigned to the deployment. -You can query the deployment programmatically [Prediction API](https://aka.ms/ct-runtime-swagger) or through the [client libraries (Azure SDK)](#get-task-results). +You can query the deployment programmatically [Prediction API](https://aka.ms/ct-runtime-api) or through the [client libraries (Azure SDK)](#get-task-results). ## Test deployed model -You can use the Language Studio to submit the custom text classification task and visualize the results. +You can use Language Studio to submit the custom text classification task and visualize the results. [!INCLUDE [Test model](../includes/language-studio/test-model.md)] @@ -61,10 +61,10 @@ First you will need to get your resource key and endpoint: |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Java | [5.2.0-beta.2](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.2) | - |JavaScript | [5.2.0-beta.2](https://www.npmjs.com/package/@azure/ai-text-analytics/v/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [5.2.0-beta.3](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.3) | + |Java | [5.2.0-beta.3](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.3) | + |JavaScript | [6.0.0-beta.1](https://www.npmjs.com/package/@azure/ai-text-analytics/v/6.0.0-beta.1) | + |Python | [5.2.0b4](https://pypi.org/project/azure-ai-textanalytics/5.2.0b4/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/create-project.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/create-project.md index 4dbe41481c956..9c214e85f80da 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/create-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/create-project.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/06/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-custom-classification, references_regions, ignite-fall-2021, event-tier1-build-2022 --- @@ -27,7 +27,7 @@ Before you start using custom text classification, you will need: Before you start using custom text classification, you will need an Azure Language resource. It is recommended to create your Language resource and connect a storage account to it in the Azure portal. Creating a resource in the Azure portal lets you create an Azure storage account at the same time, with all of the required permissions pre-configured. You can also read further in the article to learn how to use a pre-existing resource, and configure it to work with custom text classification. -You also will need an Azure storage account where you will upload your `.txt` files that will be used to train a model to classify text. +You also will need an Azure storage account where you will upload your `.txt` documents that will be used to train a model to classify text. > [!NOTE] > * You need to have an **owner** role assigned on the resource group to create a Language resource. @@ -35,13 +35,17 @@ You also will need an Azure storage account where you will upload your `.txt` fi ## Create Language resource and connect storage account + +> [!Note] +> You shouldn't move the storage account to a different resource group or subscription once it's linked with the Language resource. + ### [Using the Azure portal](#tab/azure-portal) [!INCLUDE [create a new resource from the Azure portal](../includes/resource-creation-azure-portal.md)] ### [Using Language Studio](#tab/language-studio) -[!INCLUDE [create a new resource from the Language Studio](../includes/language-studio/resource-creation-language-studio.md)] +[!INCLUDE [create a new resource from Language Studio](../includes/language-studio/resource-creation-language-studio.md)] ### [Using Azure PowerShell](#tab/azure-powershell) @@ -103,7 +107,7 @@ If you have already labeled data, you can use it to get started with the service ### [Language Studio](#tab/studio) -[!INCLUDE [Delete project using the Language studio](../includes/language-studio/delete-project.md)] +[!INCLUDE [Delete project using Language Studio](../includes/language-studio/delete-project.md)] ### [Rest APIs](#tab/apis) diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/design-schema.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/design-schema.md index cdb49f8a3605a..bb1f823acea84 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/design-schema.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/design-schema.md @@ -25,11 +25,11 @@ The schema defines the classes that you need your model to classify your text in For example, if you are classifying support tickets, you might need the following classes: *login issue*, *hardware issue*, *connectivity issue*, and *new equipment request*. -* **Avoid ambiguity in classes**: Ambiguity arises when the classes you specify share similar meaning to one another. The more ambiguous your schema is, the more tagged data you may need to differentiate between different classes. +* **Avoid ambiguity in classes**: Ambiguity arises when the classes you specify share similar meaning to one another. The more ambiguous your schema is, the more labeled data you may need to differentiate between different classes. - For example, if you are classifying food recipes, they may be similar to an extent. To differentiate between *dessert recipe* and *main dish recipe*, you may need to tag more examples to help your model distinguish between the two classes. Avoiding ambiguity saves time and yields better results. + For example, if you are classifying food recipes, they may be similar to an extent. To differentiate between *dessert recipe* and *main dish recipe*, you may need to label more examples to help your model distinguish between the two classes. Avoiding ambiguity saves time and yields better results. -* **Out of scope data**: When using your model in production, consider adding an *out of scope* class to your schema if you expect documents that don't belong to any of your classes. Then add a few documents to your dataset to be tagged as *out of scope*. The model can learn to recognize irrelevant documents, and predict their tags accordingly. +* **Out of scope data**: When using your model in production, consider adding an *out of scope* class to your schema if you expect documents that don't belong to any of your classes. Then add a few documents to your dataset to be labeled as *out of scope*. The model can learn to recognize irrelevant documents, and predict their labels accordingly. ## Data selection @@ -58,8 +58,11 @@ As a prerequisite for creating a custom text classification project, your traini You can only use `.txt`. documents for custom text. If your data is in other format, you can use [CLUtils parse command](https://github.com/microsoft/CognitiveServicesLanguageUtilities/blob/main/CustomTextAnalytics.CLUtils/Solution/CogSLanguageUtilities.ViewLayer.CliCommands/Commands/ParseCommand/README.md) to change your file format. - You can upload an annotated dataset, or you can upload an unannotated one and [tag your data](../how-to/tag-data.md) in Language studio. + You can upload an annotated dataset, or you can upload an unannotated one and [label your data](../how-to/tag-data.md) in Language studio. +## Test set + +When defining the testing set, make sure to include example documents that are not present in the training set. Defining the testing set is an important step to calculate the [model performance](view-model-evaluation.md#model-details). Also, make sure that the testing set include documents that represent all classes used in your project. ## Next steps diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/improve-model.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/improve-model.md index e38d97880a123..86fa448807359 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/improve-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/improve-model.md @@ -15,7 +15,7 @@ ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1 # Improve custom text classification model performance -In some cases, the model is expected to make predictions that are inconsistent with your tagged classes. Use this article to learn how to observe these inconsistencies and decide on the needed changes needed to improve your model performance. +In some cases, the model is expected to make predictions that are inconsistent with your labeled classes. Use this article to learn how to observe these inconsistencies and decide on the needed changes needed to improve your model performance. ## Prerequisites @@ -24,7 +24,7 @@ To optionally improve a model, you'll need to have: * [A custom text classification project](create-project.md) with a configured Azure blob storage account, * Text data that has [been uploaded](design-schema.md#data-preparation) to your storage account. -* [Tagged data](tag-data.md) to successfully [train a model](train-model.md). +* [Labeled data](tag-data.md) to successfully [train a model](train-model.md). * Reviewed the [model evaluation details](view-model-evaluation.md) to determine how your model is performing. * Familiarized yourself with the [evaluation metrics](../concepts/evaluation-metrics.md). @@ -32,7 +32,7 @@ See the [project development lifecycle](../overview.md#project-development-lifec ## Review test set predictions -After you have viewed your [model's evaluation](view-model-evaluation.md), you'll have formed an idea on your model performance. In this page, you can view how your model performs vs how it's expected to perform. You can view predicted and tagged classes side by side for each document in your test set. You can review documents that were predicted differently than they were originally tagged. +After you have viewed your [model's evaluation](view-model-evaluation.md), you'll have formed an idea on your model performance. In this page, you can view how your model performs vs how it's expected to perform. You can view predicted and labeled classes side by side for each document in your test set. You can review documents that were predicted differently than they were originally labeled. To review inconsistent predictions in the [test set](train-model.md#data-splitting) from within the [Language Studio](https://aka.ms/LanguageStudio): @@ -45,7 +45,11 @@ To review inconsistent predictions in the [test set](train-model.md#data-splitti Use the following information to help guide model improvements. -* If a file that should belong to class `X` is constantly classified as class `Y`, it means that there is ambiguity between these classes and you need to reconsider your schema. Learn more about [data selection and schema design](design-schema.md#schema-design). Another solution is to consider adding more data to these classes, to help the model improve and differentiate between them. +* If a file that should belong to class `X` is constantly classified as class `Y`, it means that there is ambiguity between these classes and you need to reconsider your schema. Learn more about [data selection and schema design](design-schema.md#schema-design). + +* Another solution is to consider adding more data to these classes, to help the model improve and differentiate between them. + +* Consider adding more data, to help the model differentiate between different classes. :::image type="content" source="../media/review-validation-set.png" alt-text="A screenshot showing model predictions in Language Studio." lightbox="../media/review-validation-set.png"::: diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md index f7f8d2450b06c..3e0c95f6e8aeb 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md @@ -58,8 +58,6 @@ Use the following steps to label your data: 4. In the right side pane, **Add class** to your project so you can start labeling your data with them. - :::image type="content" source="../media/tag-1.png" alt-text="A screenshot showing the data tagging screen" lightbox="../media/tag-1.png"::: - 5. Start labeling your files. # [Multi label classification](#tab/multi-classification) @@ -83,7 +81,9 @@ Use the following steps to label your data: > [!TIP] > If you are planning on using **Automatic** data spliting use the default option of assigning all the documents into your training set. -8. Under the **Distribution** pivot you can view the distribution of your labeled documents across training and testing sets. You can learn more about the training testing sets and how they are used [here](train-model.md#data-splitting). +8. Under the **Distribution** pivot you can view the distribution across training and testing sets. You have two options for viewing: + * *Total instances* where you can view count of all labeled instances of a specific class. + * *documents with at least one label* where each document is counted if it contains at least one labeled instance of this class. 9. While you're labeling, your changes will be synced periodically, if they have not been saved yet you will find a warning at the top of your page. If you want to save manually, click on **Save labels** button at the bottom of the page. diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/train-model.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/train-model.md index 0f53cc283c613..97c78465b54ef 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/train-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/train-model.md @@ -21,7 +21,7 @@ To train a model, start a training job. Only successfully completed jobs create The training times can be anywhere from a few minutes when dealing with few documents, up to several hours depending on the dataset size and the complexity of your schema. -Model evaluation is triggered automatically after training is completed successfully. The evaluation process starts by using the trained model to predict user defined classes for documents in the test set, and compares them with the provided data tags (which establishes a baseline of truth). The results are returned so you can review the [model’s performance](view-model-evaluation.md). + ## Prerequisites @@ -43,7 +43,7 @@ It is recommended to make sure that all your classes are adequately represented Custom text classification supports two methods for data splitting: -* **Automatically splitting the testing set from training data**: The system will split your tagged data between the training and testing sets, according to the percentages you choose. The recommended percentage split is 80% for training and 20% for testing. +* **Automatically splitting the testing set from training data**: The system will split your labeled data between the training and testing sets, according to the percentages you choose. The recommended percentage split is 80% for training and 20% for testing. > [!NOTE] > If you choose the **Automatically splitting the testing set from training data** option, only the data assigned to training set will be split according to the percentages provided. diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/view-model-evaluation.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/view-model-evaluation.md index aa501a2cab118..6c0850454b864 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/view-model-evaluation.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/view-model-evaluation.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 05/05/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1-build-2022 --- @@ -18,7 +18,7 @@ ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1 After your model has finished training, you can view the model performance and see the predicted classes for the documents in the test set. > [!NOTE] -> Using the **Automatically split the testing set from training data** option may result in different model evaluation result every time you [train a new model](train-model.md), as the test set is selected randomly from the data. To make sure that the evaluation is calculated on the same test set every time you train a model, make sure to use the **Use a manual split of training and testing data** option when starting a training job and define your **Test** documents when [tagging data](tag-data.md). +> Using the **Automatically split the testing set from training data** option may result in different model evaluation result every time you [train a new model](train-model.md), as the test set is selected randomly from the data. To make sure that the evaluation is calculated on the same test set every time you train a model, make sure to use the **Use a manual split of training and testing data** option when starting a training job and define your **Test** documents when [labeling data](tag-data.md). ## Prerequisites @@ -26,7 +26,7 @@ Before viewing model evaluation you need: * [A custom text classification project](create-project.md) with a configured Azure blob storage account. * Text data that has [been uploaded](design-schema.md#data-preparation) to your storage account. -* [Tagged data](tag-data.md) +* [Labeled data](tag-data.md) * A successfully [trained model](train-model.md) See the [project development lifecycle](../overview.md#project-development-lifecycle) for more information. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/cancel-training.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/cancel-training.md index 0ec189982cde8..a1c822ebacafe 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/cancel-training.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/cancel-training.md @@ -10,4 +10,4 @@ ms.author: aahi --- -To cancel a training job in [Language Studio](https://aka.ms/laguageStudio), go to the **Train model** page. Select the training job you want to cancel, and click on **Cancel** from the top menu. +To cancel a training job in [Language Studio](https://aka.ms/laguageStudio), go to the **Training jobs** page. Select the training job you want to cancel, and click on **Cancel** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/create-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/create-project.md index 8aaf34c719648..f5fffcc7dc317 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/create-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/create-project.md @@ -4,7 +4,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 04/22/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: ignite-fall-2021, event-tier1-build-2022 --- @@ -16,7 +16,7 @@ ms.custom: ignite-fall-2021, event-tier1-build-2022 :::image type="content" source="../../media/select-custom-text-classification.png" alt-text="A screenshot showing the location of custom text classification in the Language Studio landing page." lightbox="../../media/select-custom-text-classification.png"::: -3. Select **Create new project** from the top menu in your projects page. Creating a project will let you tag data, train, evaluate, improve, and deploy your models. +3. Select **Create new project** from the top menu in your projects page. Creating a project will let you label data, train, evaluate, improve, and deploy your models. :::image type="content" source="../../media/create-project.png" alt-text="A screenshot of the project creation page." lightbox="../../media/create-project.png"::: @@ -34,7 +34,7 @@ ms.custom: ignite-fall-2021, event-tier1-build-2022 :::image type="content" source="../../media/project-types.png" alt-text="A screenshot of the available custom classification project types." lightbox="../../media/project-types.png"::: -5. Enter the project information, including a name, description, and the language of the files in your project. You won’t be able to change the name of your project later. Click **Next**. +5. Enter the project information, including a name, description, and the language of the documents in your project. You won’t be able to change the name of your project later. Click **Next**. >[!TIP] > Your dataset doesn't have to be entirely in the same language. You can have multiple documents, each with different supported languages. If your dataset contains documents of different languages or if you expect text from different languages during runtime, select **enable multi-lingual dataset** option when you enter the basic information for your project. This option can be enabled later from the **Project settings** page. @@ -42,6 +42,6 @@ ms.custom: ignite-fall-2021, event-tier1-build-2022 6. Select the container where you have uploaded your dataset. >[!Note] - > If you have already labeled your data make sure it follows the [supported format](../../concepts/data-formats.md) and click on **Yes, my files are already labels and I have formatted JSON labels file** and select the labels file from the drop-down menu below. Click **Next**. + > If you have already labeled your data make sure it follows the [supported format](../../concepts/data-formats.md) and click on **Yes, my documents are already labeled and I have formatted JSON labels file** and select the labels file from the drop-down menu below. Click **Next**. 7. Review the data you entered and select **Create Project**. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-deployment.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-deployment.md index 747fafc9c393d..7cfcea7426a18 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-deployment.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-deployment.md @@ -5,8 +5,8 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/04/2022 +ms.date: 05/24/2022 ms.author: aahi --- -To delete a deployment from within [Language Studio](https://aka.ms/laguageStudio), go to the **Deploy model** page. Select the deployment you want to delete and click on **Delete deployment** from the top menu. +To delete a deployment from within [Language Studio](https://aka.ms/laguageStudio), go to the **Deploying a model** page. Select the deployment you want to delete and click on **Delete deployment** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-model.md index 093635a347ca4..7b1b193061e25 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-model.md @@ -6,14 +6,14 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 05/24/2022 ms.author: aahi --- To delete your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **View model details** from the left side menu. +1. Select **Model performance** from the left side menu. 2. Click on the **model name** you want to delete and click **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-project.md index b2a7d07612a1c..fca1980ecc16d 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/delete-project.md @@ -5,8 +5,8 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 04/22/2022 +ms.date: 05/24/2022 ms.author: aahi --- -When you don't need your project anymore, you can delete your project using [Language Studio](https://aka.ms/custom-classification). Select **Custom text classification** in the left navigation menu, then select the project you want to delete. Click on **Delete** from the top menu to delete the project. +When you don't need your project anymore, you can delete your project using [Language Studio](https://aka.ms/custom-classification). Select **Custom text classification** in the top, then select the project you want to delete. Click on **Delete** from the top menu to delete the project. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/deploy-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/deploy-model.md index a83bc0a7ab14d..0fdaec477d431 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/deploy-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/deploy-model.md @@ -5,16 +5,16 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 04/22/2022 +ms.date: 05/24/2022 ms.author: aahi --- To deploy your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Deploy model** from the left side menu. +1. Select **Deploying a model** from the left side menu. -2. Click on **Start deployment job** to start a new deployment job. +2. Click on **Add deployment** to start a new deployment job. :::image type="content" source="../../media/deploy-model.png" alt-text="A screenshot showing the deployment button" lightbox="../../media/deploy-model.png"::: diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/import-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/import-project.md index cee1bc7532ec4..ab049c497649b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/import-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/import-project.md @@ -7,7 +7,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/06/2022 +ms.date: 05/24/2022 ms.author: aahi --- @@ -18,7 +18,7 @@ ms.author: aahi :::image type="content" source="../../media/select-custom-text-classification.png" alt-text="A screenshot showing the location of custom text classification in the Language Studio landing page." lightbox="../../media/select-custom-text-classification.png"::: -3. Select **Create new project** from the top menu in your projects page. Creating a project will let you tag data, train, evaluate, improve, and deploy your models. +3. Select **Create new project** from the top menu in your projects page. Creating a project will let you label data, train, evaluate, improve, and deploy your models. :::image type="content" source="../../media/create-project.png" alt-text="A screenshot of the project creation page." lightbox="../../media/create-project.png"::: @@ -36,14 +36,14 @@ ms.author: aahi :::image type="content" source="../../media/project-types.png" alt-text="A screenshot of the available custom classification project types." lightbox="../../media/project-types.png"::: -5. Enter the project information, including a name, description, and the language of the files in your project. You won’t be able to change the name of your project later. Click **Next**. +5. Enter the project information, including a name, description, and the language of the documents in your project. You won’t be able to change the name of your project later. Click **Next**. >[!TIP] > Your dataset doesn't have to be entirely in the same language. You can have multiple documents, each with different supported languages. If your dataset contains documents of different languages or if you expect text from different languages during runtime, select **enable multi-lingual dataset** option when you enter the basic information for your project. This option can be enabled later from the **Project settings** page. 6. Select the container where you have uploaded your dataset. -7. Click on **Yes, my files are already labeled and I have formatted JSON labels file** and select the labels file from the drop-down menu below to import your JSON tags file. Make sure it follows the [supported format](../../concepts/data-formats.md). +7. Click on **Yes, my documents are already labeled and I have formatted JSON labels file** and select the labels file from the drop-down menu below to import your JSON labels file. Make sure it follows the [supported format](../../concepts/data-formats.md). 7. Click **Next**. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/model-evaluation.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/model-evaluation.md index c5ddb24b7e797..a95f634f7ac4f 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/model-evaluation.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/model-evaluation.md @@ -11,7 +11,7 @@ ms.author: aahi 1. Go to your project page in [Language Studio](https://aka.ms/languageStudio). -2. Select **View model details** from the menu on the left side of the screen. +2. Select **Model performance** from the menu on the left side of the screen. 3. In this page you can only view the successfully trained models, F1 score for each model and [model expiration date](../../../concepts/model-lifecycle.md). You can click on the model name for more details about its performance. @@ -20,5 +20,5 @@ ms.author: aahi :::image type="content" source="../../media/model-details-2.png" alt-text="A screenshot of the model performance metrics in Language Studio." lightbox="../../media/model-details-2.png"::: > [!NOTE] - > Classes that are neither tagged nor predicted in the test set will not be part of the displayed results. + > Classes that are neither labeled nor predicted in the test set will not be part of the displayed results. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/project-details.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/project-details.md index b035dae4c977d..439f665979186 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/project-details.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/project-details.md @@ -7,11 +7,11 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/06/2022 +ms.date: 05/24/2022 ms.author: aahi --- -1. Go to your project settings page in [Language Studio](https://aka.ms/languageStudio). +1. Go to your **project settings** page in [Language Studio](https://aka.ms/languageStudio). 2. You can see project details. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/swap-deployment.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/swap-deployment.md index b0c5e66770801..193f40232bc8b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/swap-deployment.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/swap-deployment.md @@ -13,7 +13,7 @@ ms.author: aahi To swap deployments from within [Language Studio](https://aka.ms/laguageStudio) -1. In the **Deploy model** page, select the two deployments you want to swap and click on **Swap deployments** from the top menu. +1. In **Deploying a model** page, select the two deployments you want to swap and click on **Swap deployments** from the top menu. 2. From the window that appears, select the names of the deployments you want to swap. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/test-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/test-model.md index b938863c39bc7..bb31b6e43ec0b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/test-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/test-model.md @@ -5,14 +5,14 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 04/22/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-classification, event-tier1-build-2022 --- To test your deployed models within [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Test model** from the menu on the left side of the screen. +1. Select **Testing deployments** from the menu on the left side of the screen. 2. Select the model you want to test. You can only test models that are assigned to deployments. @@ -20,7 +20,7 @@ To test your deployed models within [Language Studio](https://aka.ms/LanguageStu 4. Select the deployment you want to query/test from the dropdown. -5. Enter the text you want to submit in the request, or upload a `.txt` file to use. +5. Enter the text you want to submit in the request, or upload a `.txt` document to use. 6. Click on **Run the test** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/train-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/train-model.md index 366395f21565b..85b35f0631759 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/train-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/language-studio/train-model.md @@ -5,14 +5,14 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 04/22/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-classification, event-tier1-build-2022 --- To start training your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Train model** from the left side menu. +1. Select **Training jobs** from the left side menu. 2. Select **Start a training job** from the top menu. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/language-studio.md b/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/language-studio.md index 1db57777a80b7..9dcd900a48455 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/language-studio.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/language-studio.md @@ -5,7 +5,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 04/14/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1-build-2022 --- @@ -19,7 +19,7 @@ ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1 Before you can use custom text classification, you’ll need to create an Azure Language resource, which will give you the credentials that you need to create a project and start training a model. You’ll also need an Azure storage account, where you can upload your dataset that will be used in building your model. > [!IMPORTANT] -> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language, and create and/or connect a storage account at the same time, which is easier than doing it later. +> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language resource, and create and/or connect a storage account at the same time, which is easier than doing it later. > > If you have a [pre-existing resource](../../how-to/create-project.md#using-a-pre-existing-language-resource) that you'd like to use, you will need to connect it to storage account. @@ -37,13 +37,13 @@ Once your resource and storage container are configured, create a new custom tex ## Train your model -Typically after you create a project, you go ahead and start [tagging the documents](../../how-to/tag-data.md) you have in the container connected to your project. For this quickstart, you have imported a sample tagged dataset and initialized your project with the sample JSON tags file. +Typically after you create a project, you go ahead and start [labeling the documents](../../how-to/tag-data.md) you have in the container connected to your project. For this quickstart, you have imported a sample labeled dataset and initialized your project with the sample JSON labels file. [!INCLUDE [Train a model using Language Studio](../language-studio/train-model.md)] ## Deploy your model -Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). +Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in Language Studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). [!INCLUDE [Deploy a model using Language Studio](../language-studio/deploy-model.md)] diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/rest-api.md b/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/rest-api.md index 622e34025ca94..0a74d31aa628a 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/rest-api.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/quickstarts/rest-api.md @@ -4,7 +4,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 05/06/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1-build-2022 --- @@ -18,7 +18,7 @@ ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1 Before you can use custom text classification, you’ll need to create an Azure Language resource, which will give you the credentials that you need to create a project and start training a model. You’ll also need an Azure storage account, where you can upload your dataset that will be used in building your model. > [!IMPORTANT] -> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language, and create and/or connect a storage account at the same time, which is easier than doing it later. +> To get started quickly, we recommend creating a new Azure Language resource using the steps provided in this article, which will let you create the Language resource, and create and/or connect a storage account at the same time, which is easier than doing it later. > > If you have a [pre-existing resource](../../how-to/create-project.md#using-a-pre-existing-language-resource) that you'd like to use, you will need to connect it to storage account. @@ -63,7 +63,7 @@ Training could take sometime between 10 and 30 minutes. You can use the followin ## Deploy your model -Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). +Generally after training a model you would review it's [evaluation details](../../how-to/view-model-evaluation.md) and [make improvements](../../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in Language Studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). ### Submit deployment job diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md b/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md index 9c61583055480..17b1fbe932430 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md @@ -5,7 +5,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 05/09/2022 +ms.date: 06/02/2022 ms.author: aahi ms.custom: language-service-custom-classification, event-tier1-build-2022 --- @@ -14,25 +14,19 @@ ms.custom: language-service-custom-classification, event-tier1-build-2022 1. Go to the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) to create a new Azure Language resource. -2. Click on **Create a new resource** - -3. In the window that appears, search for **Language service** - -4. Click **Create** - -5. In the window that appears, select **Custom text classification & custom named entity recognition** from the custom features. Click **Continue to create your resource**. +1. In the window that appears, select **Custom text classification & custom named entity recognition (preview)** from the custom features. Click **Continue to create your resource** at the bottom of the screen. :::image type="content" source="../media/select-custom-feature-azure-portal.png" alt-text="A screenshot showing the selection option for custom text classification and custom named entity recognition in Azure portal." lightbox="../media/select-custom-feature-azure-portal.png"::: -6. Create a Language resource with following details. +1. Create a Language resource with following details. |Instance detail |Required value | |---------|---------| - |Location | Learn more about [supported regions](../service-limits.md#regional-availability). | - |Pricing tier | Learn more about [supported pricing tiers](../service-limits.md#pricing-tiers). | + |Region | One of the [supported regions](../service-limits.md#regional-availability). You can use "West US 2" for this quickstart. | + |Pricing tier | One of the [supported pricing tiers](../service-limits.md#pricing-tiers). You can use the Free (F0) tier for this quickstart. | -7. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **Create a new storage account**. Note that these values are to help you get started, and not necessarily the [storage account values](/azure/storage/common/storage-account-overview) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. +1. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **New storage account**. Note that these values are to help you get started, and not necessarily the [storage account values](/azure/storage/common/storage-account-overview) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. |Storage account value |Recommended value | |---------|---------| @@ -40,3 +34,5 @@ ms.custom: language-service-custom-classification, event-tier1-build-2022 | Account kind| Storage (general purpose v1) | | Performance | Standard | | Replication | Locally redundant storage (LRS) | + +1. Make sure the **Responsible AI Notice** is checked. Select **Review + create** at the bottom of the page, then select. \ No newline at end of file diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md index 6f17a67f9b76b..96fa6a13bc243 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md @@ -5,27 +5,27 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 05/24/2022 ms.author: aahi --- -Create a **POST** request using the following URL, headers, and JSON body to cancel a training job. +Create a **POST** request by using the following URL, headers, and JSON body to cancel a training job. ### Request URL Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{JOB-ID}` | This value is the training job ID.| `XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you're calling. The value referenced is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,4 +35,4 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +After you send your API request, you'll receive a 202 response with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md index f8ded084ec6b5..b7490b169ec73 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md @@ -4,27 +4,30 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 04/05/2022 +ms.date: 05/24/2022 ms.author: aahi --- -To start creating a custom text classification model, you need to create a project. Creating a project will let you tag data, train, evaluate, improve, and deploy your models. +To start creating a custom text classification model, you need to create a project. Creating a project will let you label data, train, evaluate, improve, and deploy your models. > [!NOTE] > The project name is case-sensitive for all operations. -Create a **POST** request using the following URL, headers, and JSON body to create your project and import the tags file. +Create a **PATCH** request using the following URL, headers, and JSON body to create your project. ### Request URL -Use the following URL to create a project and import your tags file. Replace the placeholder values below with your own values. +Use the following URL to create a project. Replace the placeholder values below with your own values. ```rest -{YOUR-ENDPOINT}/language/analyze-text/projects/{projectName}/:import?api-version=2021-11-01-preview +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| -|`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + ### Headers @@ -38,54 +41,54 @@ Use the following header to authenticate your request. Use the following JSON in your request. Replace the placeholder values below with your own values. +# [Multi label classification](#tab/multi-classification) + ```json { - "api-version": "2021-11-01-preview", - "metadata": { - "name": "MyProject", - "multiLingual": true, - "description": "Trying out custom text classification", - "modelType": "multiClassification", - "language": "string", - "storageInputContainerName": "YOUR-CONTAINER-NAME", - "settings": {} - }, - "assets": { - "classifiers": [ - { - "name": "Class1" - } - ], - "documents": [ - { - "location": "doc1.txt", - "language": "en-us", - "dataset": "Train", - "classifiers": [ - { - "classifierName": "Class1" - } - ] - } - ] - } + "projectName": "{PROJECT-NAME}", + "language": "{LANGUAGE-CODE}", + "projectKind": "customMultiLabelClassification", + "description": "Project description", + "multilingual": "True", + "storageInputContainerName": "{CONTAINER-NAME}" } + ``` -For the metadata key: -|Key |Value | Example | -|---------|---------|---------| -| `modelType ` | Your Model type, for single label classification use `singleClassification`. | multiClassification | -|`storageInputContainerName` | The name of your Azure blob storage container. | `myContainer` | +|Key |Placeholder|Value | Example | +|---------|---------|---------|--| +| projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | +| language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| projectKind | `customMultiLabelClassification` | Your project kind. | `customMultiLabelClassification` | +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | -For the documents key: +--- -|Key |Value | Example | -|---------|---------|---------| -| `location` | Document name on the blob store. | `doc2.txt` | -|`language` | The language of the document. | `en-us` | -|`dataset` | Optional field to specify the dataset which this document will belong to. | `Train` or `Test` | +# [Single label classification](#tab/single-classification) -This request will return an error if: +```json +{ + "projectName": "{PROJECT-NAME}", + "language": "{LANGUAGE-CODE}", + "projectKind": "customSingleLabelClassification", + "description": "Project description", + "multilingual": "True", + "storageInputContainerName": "{CONTAINER-NAME}" +} +``` +|Key |Placeholder|Value | Example | +|---------|---------|---------|--| +| projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | +| language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| projectKind | `customSingleLabelClassification` | Your project kind. | `customSingleLabelClassification` | +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | + +--- -* The selected resource doesn't have proper permission for the storage account. \ No newline at end of file +This request will return a 201 response, which means that the project is created. + + +This request will return an error if: +* The selected resource doesn't have proper permission for the storage account. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md index 5ad5b4254419e..6a7835dfcae03 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md @@ -18,7 +18,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `prod` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -37,4 +37,6 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. A successful call results with an `Operation-Location` header used to check the status of the job. + + diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md index d05a551424a85..9edc29f982347 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md @@ -12,13 +12,13 @@ ms.author: aahi -Create a **DELETE** request using the following URL, headers, and JSON body to delete a model. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a trained model. ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -37,4 +37,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your trained model has been deleted. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md index 99589d0a44566..974394d4a0e3e 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md @@ -12,14 +12,14 @@ ms.author: aahi When you no longer need your project, you can delete it with the following **DELETE** request. Replace the placeholder values with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -30,4 +30,4 @@ Use the following header to authenticate your request. |Ocp-Apim-Subscription-Key| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. A successful call results with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md index d7dc85af40e51..c374499d7a431 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md @@ -13,7 +13,7 @@ ms.author: aahi Submit a **PUT** request using the following URL, headers, and JSON body to submit a deployment job. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` | Placeholder |Value | Example | @@ -21,7 +21,7 @@ Submit a **PUT** request using the following URL, headers, and JSON body to subm | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -45,7 +45,7 @@ Use the following JSON in the body of your request. Use the name of the model yo |---------|---------|-----|----| | trainedModelLabel | `{MODEL-NAME}` | The model name that will be assigned to your deployment. You can only assign successfully trained models. This value is case-sensitive. | `myModel` | -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md index 9caaf50e801fc..60144a9f98c45 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md @@ -6,7 +6,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/05/2022 +ms.date: 05/24/2022 ms.author: aahi --- @@ -18,14 +18,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `MyProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -51,4 +51,4 @@ Once you send your API request, you’ll receive a `202` response indicating tha {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` -{JOB-ID} is used to identify your request, since this operation is asynchronous. You’ll use this URL to get the export job status. +`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You’ll use this URL to get the export job status. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md index 2b6d73dbe80f2..2802c682324d5 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to query the status of the deployment job. You | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -35,7 +35,7 @@ Use the following header to authenticate your request. ### Response Body -Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". +Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". You should get a `200` code to indicate the success of the request. ```json { diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md index 0a42cbeeb725b..2d1041f06e8f7 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of exporting your project as |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -38,9 +38,9 @@ Use the following header to authenticate your request. { "resultUrl": "{RESULT-URL}", "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", + "createdDateTime": "2021-10-19T23:24:41.572Z", + "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", + "expirationDateTime": "2021-10-19T23:24:41.572Z", "status": "unknown", "errors": [ { diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md index 87a0fe43a83e0..7f7f544fb05fa 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of your importing your proje |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md index f74204df65b8d..faab888e356b4 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md @@ -20,7 +20,7 @@ Use the following **GET** request to get your project details. Replace the place |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -38,12 +38,13 @@ Use the following header to authenticate your request. "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "modelType": "{MODEL-TYPE}", + "projectKind": "customMultiLabelClassification", "storageInputContainerName": "{CONTAINER-NAME}", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project description", + "language": "{LANGUAGE-CODE}" } ``` +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. + diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md index 7ab2b71b81aef..21112c68c66bd 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md @@ -9,10 +9,15 @@ ms.date: 05/04/2022 ms.author: aahi --- +Use the following **GET** request to query the status/results of the text classification task. -Use the following **GET** request to query the status/results of the custom classification task. You can use the endpoint you received from the previous step. - -`{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/`. +```rest +{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION} +``` +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) for more information on other available API versions. | `2022-05-01` | #### Headers @@ -28,67 +33,44 @@ The response will be a JSON document with the following parameters. ```json { - "createdDateTime": "2021-05-19T14:32:25.578Z", - "displayName": "{JOB-NAME}", - "expirationDateTime": "2021-05-19T14:32:25.578Z", - "jobId": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", - "status": "completed", - "errors": [], - "tasks": { - "details": { - "name": "{JOB-NAME}", - "lastUpdateDateTime": "2021-03-29T19:50:23Z", - "status": "completed" - }, - "completed": 1, - "failed": 0, - "inProgress": 0, - "total": 1, - "tasks": { - "customMultiClassificationTasks": [ - { - "lastUpdateDateTime": "2021-05-19T14:32:25.579Z", - "name": "{JOB-NAME}", - "status": "completed", - "results": { - "documents": [ - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_1", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - }, - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_1", - "confidenceScore": 0.0551877357 - }, - { - "category": "Class_2", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - } - ], - "errors": [], - "statistics": { - "documentsCount":0, - "erroneousDocumentsCount":0, - "transactionsCount":0 - } - } - } - ] + "createdDateTime": "2021-05-19T14:32:25.578Z", + "displayName": "MyJobName", + "expirationDateTime": "2021-05-19T14:32:25.578Z", + "jobId": "xxxx-xxxxxx-xxxxx-xxxx", + "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", + "status": "succeeded", + "tasks": { + "completed": 1, + "failed": 0, + "inProgress": 0, + "total": 1, + "items": [ + { + "kind": "customMultiClassificationTasks", + "taskName": "Classify documents", + "lastUpdateDateTime": "2020-10-01T15:01:03Z", + "status": "succeeded", + "results": { + "documents": [ + { + "id": "{DOC-ID}", + "classes": [ + { + "category": "Class_1", + "confidenceScore": 0.0551877357 + } + ], + "warnings": [] + } + ], + "errors": [], + "modelVersion": "2020-04-01" } - } + } + ] + } +} + ``` # [Single label classification](#tab/single-classification) @@ -96,63 +78,44 @@ The response will be a JSON document with the following parameters. ```json { - "createdDateTime": "2021-05-19T14:32:25.578Z", - "displayName": "{JOB-NAME}", - "expirationDateTime": "2021-05-19T14:32:25.578Z", - "jobId": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", - "status": "completed", - "errors": [], - "tasks": { - "details": { - "name": "{JOB-NAME}", - "lastUpdateDateTime": "2021-03-29T19:50:23Z", - "status": "completed" - }, - "completed": 1, - "failed": 0, - "inProgress": 0, - "total": 1, - "tasks": { - "customSingleClassificationTasks": [ - { - "lastUpdateDateTime": "2021-05-19T14:32:25.579Z", - "name": "{JOB-NAME}", - "status": "completed", - "results": { - "documents": [ - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_1", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - }, - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_2", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - } - ], - "errors": [], - "statistics": { - "documentsCount":0, - "erroneousDocumentsCount":0, - "transactionsCount":0 - } - } - } - ] + "createdDateTime": "2021-05-19T14:32:25.578Z", + "displayName": "MyJobName", + "expirationDateTime": "2021-05-19T14:32:25.578Z", + "jobId": "xxxx-xxxxxx-xxxxx-xxxx", + "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", + "status": "succeeded", + "tasks": { + "completed": 1, + "failed": 0, + "inProgress": 0, + "total": 1, + "items": [ + { + "kind": "customSingleClassificationTasks", + "taskName": "Classify documents", + "lastUpdateDateTime": "2020-10-01T15:01:03Z", + "status": "succeeded", + "results": { + "documents": [ + { + "id": "{DOC-ID}", + "classes": [ + { + "category": "Class_1", + "confidenceScore": 0.0551877357 + } + ], + "warnings": [] + } + ], + "errors": [], + "modelVersion": "2020-04-01" } - } + } + ] + } +} + ``` --- diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md index d67f8776f9d47..43407cf354ff8 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of your model's training pro |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -38,39 +38,25 @@ Once you send the request, you’ll get the following response. ```json { - "jobs": [ - { - "result": { - "trainedModelLabel": "{MODEL-NAME}", - "trainingConfigVersion": "string", - "trainStatus": { - "percentComplete": 0, - "elapsedTime": "string" - }, - "evaluationStatus": { - "percentComplete": 0, - "elapsedTime": "string" - } - }, - "jobId": "string", - "createdDateTime": "2022-04-12T12:13:28.771Z", - "lastUpdatedDateTime": "2022-04-12T12:13:28.771Z", - "expirationDateTime": "2022-04-12T12:13:28.771Z", - "status": "unknown", - "warnings": [ - { - "code": "unknown", - "message": "string" - } - ], - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "result": { + "modelLabel": "{MODEL-NAME}", + "trainingConfigVersion": "{CONFIG-VERSION}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", + "trainingStatus": { + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" + }, + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" } - ] + }, + "jobId": "{JOB-ID}", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", + "status": "running" } ``` diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md index c66396d220451..4d72c9a68e43e 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md @@ -5,22 +5,23 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: include -ms.date: 05/04/2022 +ms.date: 05/24/2022 ms.author: aahi --- -Submit a **POST** request using the following URL, headers, and JSON body to import your tags file. Make sure that your tags file follow the [accepted tags file format](../../concepts/data-formats.md). +Submit a **POST** request using the following URL, headers, and JSON body to import your labels file. Make sure that your labels file follow the [accepted format](../../concepts/data-formats.md). +If a project with the same name already exists, the data of that project is replaced. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/:import?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -39,83 +40,87 @@ Use the following JSON in your request. Replace the placeholder values below wit ```json { - "api-version": "{API-VERSION}", - "stringIndexType": "Utf16CodeUnit", - "metadata": { - "projectName": "{PROJECT-NAME}", - "projectKind": "customMultiLabelClassification", - "description": "Trying out custom multi label text classification", + "projectFileVersion": "{API-VERSION}", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectName": "{PROJECT-NAME}", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectKind": "customMultiLabelClassification", + "description": "Trying out custom multi label text classification", + "language": "{LANGUAGE-CODE}", + "multilingual": true, + "settings": {} + }, + "assets": { + "projectKind": "customMultiLabelClassification", + "classes": [ + { + "category": "Class1" + }, + { + "category": "Class2" + } + ], + "documents": [ + { + "location": "{DOCUMENT-NAME}", "language": "{LANGUAGE-CODE}", - "multilingual": true, - "storageInputContainerName": "{CONTAINER-NAME}", - "settings": {} - }, - "assets": { + "dataset": "{DATASET}", "classes": [ - { - "category": "Class1" - }, - { - "category": "Class2" - } - ], - "documents": [ - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "classes": [ - { - "category": "Class1" - }, - { - "category": "Class2" - } - ] - }, - { - "location": "{DOCUMENT-NAME}", - "language": "{LANGUAGE-CODE}", - "dataset": "{DATASET}", - "classes": [ - { - "category": "Class2" - } - ] - } - } + { + "category": "Class1" + }, + { + "category": "Class2" + } + ] + }, + { + "location": "{DOCUMENT-NAME}", + "language": "{LANGUAGE-CODE}", + "dataset": "{DATASET}", + "classes": [ + { + "category": "Class2" + } + ] + } + ] + } } + ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | | projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | projectKind | `customMultiLabelClassification` | Your project kind. | `customMultiLabelClassification` | | language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. |`en-us`| | multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| -| storageInputContainerName | {CONTAINER-NAME} | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | | classes | [] | Array containing all the classes you have in the project. These are the classes you want to classify your documents into.| [] | | documents | [] | Array containing all the documents in your project and what the classes labeled for this document. | [] | | location | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container this should be the document name.|`doc1.txt`| -| dataset | `{DATASET}` | The test set to which this file will go to when split before training. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information on data splitting. Possible values for this field are `Train` and `Test`. |`Train`| +| dataset | `{DATASET}` | The test set to which this document will go to when split before training. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information on data splitting. Possible values for this field are `Train` and `Test`. |`Train`| # [Single label classification](#tab/single-classification) ```json { - "api-version": "{API-VERSION}", - "stringIndexType": "Utf16CodeUnit", - "metadata": { - "projectName": "{PROJECT-NAME}", - "projectKind": "customSingleLabelClassification", - "description": "Trying out custom single label text classification", - "language": "{LANGUAGE-CODE}", - "multilingual": true, - "storageInputContainerName": "{CONTAINER-NAME}", - "settings": {} - }, - "assets": { + "projectFileVersion": "{API-VERSION}", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectName": "{PROJECT-NAME}", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectKind": "customSingleLabelClassification", + "description": "Trying out custom multi label text classification", + "language": "{LANGUAGE-CODE}", + "multilingual": true, + "settings": {} + }, + "assets": { + "projectKind": "customSingleLabelClassification", "classes": [ { "category": "Class1" @@ -147,20 +152,20 @@ Use the following JSON in your request. Replace the placeholder values below wit ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-03-01-preview` | +| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-05-01` | | projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | projectKind | `customSingleLabelClassification` | Your project kind. | `customSingleLabelClassification` | | language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| | multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| -| storageInputContainerName | {CONTAINER-NAME} | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | | classes | [] | Array containing all the classes you have in the project. These are the classes you want to classify your documents into.| [] | | documents | [] | Array containing all the documents in your project and which class this document belongs to. | [] | | location | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container this should be the document name.|`doc1.txt`| -| dataset | `{DATASET}` | The test set to which this file will go to when split before training. See [How to train a model](../../how-to/train-model.md#data-splitting) to learn more about data splitting. Possible values for this field are `Train` and `Test`. |`Train`| +| dataset | `{DATASET}` | The test set to which this document will go to when split before training. See [How to train a model](../../how-to/train-model.md#data-splitting) to learn more about data splitting. Possible values for this field are `Train` and `Test`. |`Train`| --- -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md index d1a15c4adc396..67ab2d2c75542 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md @@ -18,7 +18,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md index 31e7ac26266c4..fa54e558fd7c6 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md @@ -18,7 +18,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md index 718a6e7484cd7..175eb4f35c9f1 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md @@ -22,7 +22,7 @@ To get custom text classification project details, submit a **GET** request usin |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -42,12 +42,11 @@ Once you send the request, you will get the following response. "lastTrainedDateTime": "2022-04-23T13:39:09.384Z", "lastDeployedDateTime": "2022-04-23T13:39:09.384Z", "projectKind": "customSingleLabelClassification", - "storageInputContainerName": "string", - "settings": {}, - "projectName": "string", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectName": "{PROJECT-NAME}", "multilingual": true, - "description": "string", - "language": "string" + "description": "Project description", + "language": "{LANGUAGE-CODE}" } ``` @@ -60,4 +59,4 @@ Once you send the request, you will get the following response. | `multilingual` | | A boolean value that enables you to have documents in multiple languages in your dataset. When your model is deployed, you can query the model in any supported language (not necessarily included in your training documents. For more information on multilingual support, see [language support](../../language-support.md#multi-lingual-option). | `true`| | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| -Once you send your API request, you'll receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you'll receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md index 3d5d86c1c8dd8..8e50d3d056a8a 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md @@ -12,9 +12,14 @@ ms.author: aahi Use this **POST** request to start a text classification task. ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze +{ENDPOINT}/language/analyze-text/jobs?api-version={API-VERSION} ``` +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers |Key|Value| @@ -27,35 +32,34 @@ Use this **POST** request to start a text classification task. ```json { - "displayName": "{JOB-NAME}", - "analysisInput": { - "documents": [ - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - }, - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - } - ] - }, - "tasks": { - "customMultiLabelClassificationTasks": [ - { - "parameters": { - "project-name": "{PROJECT-NAME}", - "deployment-name": "{DEPLOYMENT-NAME}" - } - } - ] + "displayName": "Classifying documents", + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "{LANGUAGE-CODE}", + "text": "Text1" + }, + { + "id": "2", + "language": "{LANGUAGE-CODE}", + "text": "Text2" + } + ] + }, + "tasks": [ + { + "kind": "CustomMultiLabelClassification", + "taskName": "Multi Label Classification", + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}" + } } + ] } ``` - |Key |Placeholder |Value | Example | |---------|---------|----------|--| | `displayName` | `{JOB-NAME}` | Your job name. | `MyJobName` | @@ -64,7 +68,7 @@ Use this **POST** request to start a text classification task. | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. If this key isn't specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) for a list of supported language codes. |`en-us`| | `text` | `{DOC-TEXT}` | Document task to run the tasks on. | `Lorem ipsum dolor sit amet` | |`tasks`| | List of tasks we want to perform.|`[]`| -| `customMultiLabelClassificationTasks` | |Task identifier for task we want to perform. | | +| `taskName`|CustomMultiLabelClassification|The task name|CustomMultiLabelClassification| |`parameters`| |List of parameters to pass to the task.| | | `project-name` |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | | `deployment-name` |`{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `prod` | @@ -73,31 +77,31 @@ Use this **POST** request to start a text classification task. ```json { - "displayName": "{JOB-NAME}", - "analysisInput": { - "documents": [ - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - }, - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - } - ] - }, - "tasks": { - "customSingleLabelClassificationTasks": [ - { - "parameters": { - "project-name": "`{PROJECT-NAME}`", - "deployment-name": "`{DEPLOYMENT-NAME}`" - } - } - ] + "displayName": "Classifying documents", + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "{LANGUAGE-CODE}", + "text": "Text1" + }, + { + "id": "2", + "language": "{LANGUAGE-CODE}", + "text": "Text2" + } + ] + }, + "tasks": [ + { + "kind": "CustomSingleLabelClassification", + "taskName": "Single Classification Label", + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}" + } } + ] } ``` @@ -108,8 +112,8 @@ Use this **POST** request to start a text classification task. | `id` | `{DOC-ID}` | Document name or ID. | `doc1`| | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. If this key isn't specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) for a list of supported language codes. |`en-us`| | `text` | `{DOC-TEXT}` | Document task to run the tasks on. | `Lorem ipsum dolor sit amet` | -|`tasks`| | List of tasks we want to perform.| | -| `customSingleLabelClassificationTasks` ||Task identifier for task we want to perform. | | +| `taskName`|CustomSingleLabelClassification|The task name|CustomSingleLabelClassification| +|`tasks`|[] | Array of tasks we want to perform.|[] | |`parameters`| |List of parameters to pass to the task.| | | `project-name` |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | | `deployment-name` |`{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `prod` | @@ -121,6 +125,6 @@ Use this **POST** request to start a text classification task. You will receive a 202 response indicating success. In the response **headers**, extract `operation-location`. `operation-location` is formatted like this: - `{YOUR-ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/` +`{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION}` You can use this URL to query the task completion status and get the results when task is completed. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md index e7a188cb13992..958254031cedc 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md @@ -11,7 +11,6 @@ ms.custom: ignite-fall-2021, event-tier1-build-2022 --- - Create a **POST** request using the following URL, headers, and JSON body to start a swap deployments job. @@ -25,7 +24,7 @@ Create a **POST** request using the following URL, headers, and JSON body to sta |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -35,7 +34,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request Body @@ -47,10 +46,10 @@ Use the following header to authenticate your request. ``` -|Key| Value| Example| -|--|--|--| -|firstDeploymentName | The name for your first deployment. This value is case-sensitive. | `production` | -|secondDeploymentName | The name for your second deployment. This value is case-sensitive. | `staging` | +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md index 0fd7417666716..0a51d0a10871b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md @@ -4,16 +4,22 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: include -ms.date: 05/04/2022 +ms.date: 05/24/2022 ms.author: aahi --- > [!NOTE] > Project names are case sensitive. -Use this **POST** request to start an entity extraction task. Replace `{projectName}` with the project name where you have the model you want to use. +Use this **POST** request to text classification task. Replace `{projectName}` with the project name where you have the model you want to use. + +`{ENDPOINT}/language/:analyze-text?api-version={API-VERSION}` + +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | -`{YOUR-ENDPOINT}/text/analytics/v3.2-preview.2/analyze` #### Headers @@ -25,30 +31,22 @@ Use this **POST** request to start an entity extraction task. Replace `{projectN ```json { - "displayName": "MyJobName", - "analysisInput": { - "documents": [ - { - "id": "doc1", - "text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc tempus, felis sed vehicula lobortis, lectus ligula facilisis quam, quis aliquet lectus diam id erat. Vivamus eu semper tellus. Integer placerat sem vel eros iaculis dictum. Sed vel congue urna." - }, - { - "id": "doc2", - "text": "Mauris dui dui, ultricies vel ligula ultricies, elementum viverra odio. Donec tempor odio nunc, quis fermentum lorem egestas commodo. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos." - } - ] - }, - "tasks": { - "customMultiClassificationTasks": [ - { - "parameters": { - "project-name": "MyProject", - "deployment-name": "MyDeploymentName" - "stringIndexType": "TextElements_v8" - } - } - ] - } + "kind": "customMultiClassificationTasks", + "parameters": { + "modelVersion": "{CONFIG-VERSION}" + }, + "analysisInput": { + "documents": [ + { + "id": "doc1", + "text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc tempus, felis sed vehicula lobortis, lectus ligula facilisis quam, quis aliquet lectus diam id erat. Vivamus eu semper tellus. Integer placerat sem vel eros iaculis dictum. Sed vel congue urna." + }, + { + "id": "doc2", + "text": "Mauris dui dui, ultricies vel ligula ultricies, elementum viverra odio. Donec tempor odio nunc, quis fermentum lorem egestas commodo. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos." + } + ] + } } ``` @@ -68,9 +66,9 @@ Replace the text of the document with movie summaries to classify. #### Response -You will receive a 202 response indicating success. In the response **headers**, extract `operation-location`. +You will receive a 200 response indicating success. In the response **headers**, extract `operation-location`. `operation-location` is formatted like this: - `{YOUR-ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/` + `{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/` -You will use this endpoint to get the custom text classification task results. \ No newline at end of file +You will use this endpoint to get the custom text classification task results. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md index 9744eae72b86c..1c221236baef0 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md @@ -19,7 +19,7 @@ Submit a **POST** request using the following URL, headers, and JSON body to sub |---------|---------|---------| | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -48,7 +48,7 @@ Use the following JSON in your request body. The model will be given the `{MODEL |Key |Placeholder |Value | Example | |---------|---------|-----|----| | modelLabel | `{MODEL-NAME}` | The model name that will be assigned to your model once trained successfully. | `myModel` | -| trainingConfigVersion | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01` | +| trainingConfigVersion | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01`| | evaluationOptions | | Option to split your data across training and testing sets. | `{}` | | kind | `percentage` | Split methods. Possible values are `percentage` or `manual`. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information. |`percentage`| | trainingSplitPercentage | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png b/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png index d473ac7f9dd29..ece8f39c728a5 100644 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png and b/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png b/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png index ef3396e5e8dba..ebb615bf433a0 100644 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png and b/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/single.png b/articles/cognitive-services/language-service/custom-text-classification/media/single.png index 25638db4aae9c..ee77d4a320063 100644 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/single.png and b/articles/cognitive-services/language-service/custom-text-classification/media/single.png differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/tag-1.png b/articles/cognitive-services/language-service/custom-text-classification/media/tag-1.png deleted file mode 100644 index c5e822aa4cbf4..0000000000000 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/tag-1.png and /dev/null differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/overview.md b/articles/cognitive-services/language-service/custom-text-classification/overview.md index ffb495d026f11..80bc5a148e021 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/overview.md +++ b/articles/cognitive-services/language-service/custom-text-classification/overview.md @@ -8,16 +8,16 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: overview -ms.date: 05/06/2022 +ms.date: 05/24/2022 ms.author: aahi ms.custom: language-service-custom-classification, ignite-fall-2021, event-tier1-build-2022 --- -# What is custom text classification (preview)? +# What is custom text classification? Custom text classification is one of the custom features offered by [Azure Cognitive Service for Language](../overview.md). It is a cloud-based API service that applies machine-learning intelligence to enable you to build custom models for text classification tasks. -Custom text classification enables users to build custom AI models to classify text into custom classes pre-defined by the user. By creating a custom text classification project, developers can iteratively tag data, train, evaluate, and improve model performance before making it available for consumption. The quality of the tagged data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). +Custom text classification enables users to build custom AI models to classify text into custom classes pre-defined by the user. By creating a custom text classification project, developers can iteratively label data, train, evaluate, and improve model performance before making it available for consumption. The quality of the labeled data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). Custom text classification supports two types of projects: @@ -52,9 +52,9 @@ Follow these steps to get the most out of your model: 1. **Define schema**: Know your data and identify the [classes](glossary.md#class) you want differentiate between, avoid ambiguity. -2. **Tag data**: The quality of data tagging is a key factor in determining model performance. Documents that belong to the same class should always have the same class, if you have a document that can fall into two classes use **Multi label classification** projects. Avoid class ambiguity, make sure that your classes are clearly separable from each other, especially with single label classification projects. +2. **Label data**: The quality of data labeling is a key factor in determining model performance. Documents that belong to the same class should always have the same class, if you have a document that can fall into two classes use **Multi label classification** projects. Avoid class ambiguity, make sure that your classes are clearly separable from each other, especially with single label classification projects. -3. **Train model**: Your model starts learning from your tagged data. +3. **Train model**: Your model starts learning from your labeled data. 4. **View model evaluation details**: View the evaluation details for your model to determine how well it performs when introduced to new data. @@ -79,7 +79,7 @@ As you use custom text classification, see the following reference documentation ## Responsible AI -An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom text classification]() to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom text classification](/legal/cognitive-services/language-service/ctc-transparency-note?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: [!INCLUDE [Responsible AI links](../includes/overview-responsible-ai-links.md)] diff --git a/articles/cognitive-services/language-service/custom-text-classification/quickstart.md b/articles/cognitive-services/language-service/custom-text-classification/quickstart.md index 28dc3dcd478e7..d31a10be4d612 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/quickstart.md +++ b/articles/cognitive-services/language-service/custom-text-classification/quickstart.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: quickstart -ms.date: 05/06/2022 +ms.date: 06/02/2022 ms.author: aahi ms.custom: language-service-custom-classification, ignite-fall-2021, mode-other zone_pivot_groups: usage-custom-language-features diff --git a/articles/cognitive-services/language-service/custom-text-classification/service-limits.md b/articles/cognitive-services/language-service/custom-text-classification/service-limits.md index 3447bcc96bb4b..b65a2bf538f80 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/service-limits.md +++ b/articles/cognitive-services/language-service/custom-text-classification/service-limits.md @@ -51,9 +51,8 @@ Custom text classification is only available in some Azure regions. To use custo * West Europe * North Europe * UK south -* Southeast Asia * Australia East -* Sweden Central + ## API limits diff --git a/articles/cognitive-services/language-service/custom-text-classification/tutorials/cognitive-search.md b/articles/cognitive-services/language-service/custom-text-classification/tutorials/cognitive-search.md index d9a6e99b8b487..8e709a15bcd36 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/tutorials/cognitive-search.md +++ b/articles/cognitive-services/language-service/custom-text-classification/tutorials/cognitive-search.md @@ -8,12 +8,12 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 04/14/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: --- -# Tutorial: Enrich Cognitive search index with custom classes from your data +# Tutorial: Enrich Cognitive Search index with custom classes from your data With the abundance of electronic documents within the enterprise, the problem of search through them becomes a tiring and expensive task. [Azure Cognitive Search](../../../../search/search-create-service-portal.md) helps with searching through your files based on their indices. Custom text classification helps in enriching the indexing of these files by classifying them into your custom classes. @@ -51,7 +51,7 @@ Typically after you create a project, you go ahead and start [tagging the docume ## Deploy your model -Generally after training a model you would review it's [evaluation details](../how-to/view-model-evaluation.md) and [make improvements](../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). +Generally after training a model you would review it's [evaluation details](../how-to/view-model-evaluation.md) and [make improvements](../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in Language Studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). [!INCLUDE [Deploy a model using Language Studio](../includes/language-studio/deploy-model.md)] @@ -92,7 +92,7 @@ Training could take sometime between 10 and 30 minutes for this sample dataset. ## Deploy your model -Generally after training a model you would review it's [evaluation details](../how-to/view-model-evaluation.md) and [make improvements](../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). +Generally after training a model you would review it's [evaluation details](../how-to/view-model-evaluation.md) and [make improvements](../how-to/improve-model.md) if necessary. In this quickstart, you will just deploy your model, and make it available for you to try in Language Studio, or you can call the [prediction API](https://aka.ms/ct-runtime-swagger). ### Submit deployment job @@ -143,9 +143,9 @@ Generally after training a model you would review it's [evaluation details](../h 6. Get your custom text classification project secrets - 1. You will need your **project-name**, project names are case-sensitive. + 1. You will need your **project-name**, project names are case-sensitive. Project names can be found in **project settings** page. - 2. You will also need the **deployment-name**. + 2. You will also need the **deployment-name**. Deployment names can be found in **Deploying a model** page. ### Run the indexer command diff --git a/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md b/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md index 93e9f810c2b1e..4fd5e788dcd1d 100644 --- a/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md +++ b/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md @@ -8,9 +8,9 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 11/02/2021 +ms.date: 05/27/2022 ms.author: aahi -ms.custom: language-service-key-phrase, ignite-fall-2021 +ms.custom: language-service-key-phrase, ignite-fall-2021, cogserv-non-critical-language --- # Tutorial: Extract key phrases from text stored in Power BI diff --git a/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md b/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md index 762994dd3425b..616a07c0919d7 100644 --- a/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md +++ b/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md @@ -10,7 +10,7 @@ ms.subservice: language-service ms.topic: tutorial ms.date: 11/02/2021 ms.author: aahi -ms.custom: language-service-ner, ignite-fall-2021 +ms.custom: language-service-ner, ignite-fall-2021, cogserv-non-critical-language --- # Extract information in Excel using Named Entity Recognition(NER) and Power Automate diff --git a/articles/cognitive-services/language-service/orchestration-workflow/concepts/none-intent.md b/articles/cognitive-services/language-service/orchestration-workflow/concepts/none-intent.md index 7c0b92b6525f2..0e76055bf9304 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/concepts/none-intent.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/concepts/none-intent.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 05/19/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-orchestration ms.reviewer: haelhamm @@ -30,7 +30,7 @@ The score should be set according to your own observations of prediction scores, When you export a project's JSON file, the None score threshold is defined in the _**"settings"**_ parameter of the JSON as the _**"confidenceThreshold"**_, which accepts a decimal value between 0.0 and 1.0. -The default score for Orchestration Workflow projects is set at **0.5** when creating new project in the language studio. +The default score for Orchestration Workflow projects is set at **0.5** when creating new project in Language Studio. > [!NOTE] > During model evaluation of your test set, the None score threshold is not applied. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md b/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md index ff98945f077de..979f5c1e9c49f 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md @@ -34,7 +34,7 @@ Consider the following guidelines and recommendations for your project: To build a project schema within [Language Studio](https://aka.ms/languageStudio): -1. Select **Build schema** from the left side menu. +1. Select **Schema definition** from the left side menu. 2. To create an intent, select **Add** from the top menu. You will be prompted to type in a name for the intent. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md b/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md index 8d1a5bc6866ef..de8b280a02b43 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md @@ -21,7 +21,7 @@ You can query the deployment programmatically [Prediction API](https://aka.ms/ct ## Test deployed model -You can use the Language Studio to submit an utterance, get predictions and visualize the results. +You can use Language Studio to submit an utterance, get predictions and visualize the results. [!INCLUDE [Test model](../includes/language-studio/test-model.md)] @@ -69,8 +69,8 @@ You can also use the client libraries provided by the Azure SDK to send requests |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [1.0.0-beta.3 ](https://www.nuget.org/packages/Azure.AI.Language.Conversations/1.0.0-beta.3) | + |Python | [1.1.0b1](https://pypi.org/project/azure-ai-language-conversations/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/how-to/create-project.md b/articles/cognitive-services/language-service/orchestration-workflow/how-to/create-project.md index ca57725f61f10..ab1880c8738b0 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/how-to/create-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/how-to/create-project.md @@ -36,7 +36,7 @@ Before you start using orchestration workflow, you will need an Azure Language r [!INCLUDE [create a new resource from the Azure portal](../includes/resource-creation-azure-portal.md)] -[!INCLUDE [create a new resource from the Language Studio](../includes/resource-creation-language-studio.md)] +[!INCLUDE [create a new resource from Language Studio](../includes/resource-creation-language-studio.md)] ## Sign in to Language Studio diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md index 408eccfebc39c..0fa49a96d85e0 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md @@ -12,7 +12,7 @@ ms.author: aahi To delete your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **View model details** from the left side menu. +1. Select **Model performance** from the left side menu. 2. Click on the **model name** you want to delete and click **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md index 6be65efceeed6..4f1f2f89e0293 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md @@ -11,9 +11,9 @@ ms.author: aahi To deploy your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Deploy model** from the left side menu. +1. Select **Deploying a model** from the left side menu. -2. Click on **Start deployment job** to start a new deployment job. +2. Click on **Add deployment** to start a new deployment job. :::image type="content" source="../../media/add-deployment-model.png" alt-text="A screenshot showing the model deployment button in Language Studio." lightbox="../../media/add-deployment-model.png"::: diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md index c8b568bbe68b3..6b4d898a10ff9 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md @@ -11,7 +11,7 @@ ms.author: aahi 1. Go to your project page in [Language Studio](https://aka.ms/languageStudio). -2. Select **View model details** from the menu on the left side of the screen. +2. Select **Model performance** from the menu on the left side of the screen. 3. In this page you can only view the successfully trained models, F1 score for each model and [model expiration date](../../../concepts/model-lifecycle.md#expiration-timeline). You can click on the model name for more details about its performance. @@ -25,4 +25,4 @@ You can click on the model name for more details about its performance. > [!NOTE] > If you don't see any of the intents you have in your model displayed here, it is because they weren't in any of the utterances that were used for the test set. - \ No newline at end of file + diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/test-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/test-model.md index 9a1e47e16fc18..ddbaf6779a0f5 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/test-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/test-model.md @@ -9,7 +9,7 @@ ms.date: 05/17/2022 ms.author: aahi --- -To test your model from the Language studio +To test your model from Language Studio 1. Select **Testing deployments** from the left side menu. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/quickstarts/language-studio.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/quickstarts/language-studio.md index 7fbb64ff9b750..33e493b1b9fe3 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/quickstarts/language-studio.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/quickstarts/language-studio.md @@ -51,7 +51,7 @@ To train a model, you need to start a training job. The output of a successful t ## Deploy your model -Generally after training a model you would review its evaluation details. In this quickstart, you will just deploy your model, and make it available for you to try in the Language studio, or you can call the [prediction API](https://aka.ms/clu-apis). +Generally after training a model you would review its evaluation details. In this quickstart, you will just deploy your model, and make it available for you to try in Language Studio, or you can call the [prediction API](https://aka.ms/clu-apis). [!INCLUDE [Deploy model](../language-studio/deploy-model.md)] diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md index 9d8902a768a61..8b011763f86e6 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md @@ -16,15 +16,15 @@ Create a **POST** request using the following URL, headers, and JSON body to can Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [released model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{JOB-ID}` | This is the training job ID |`XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions | `2022-05-01` | ### Headers @@ -34,5 +34,6 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +Once you send your API request, you will receive a 202 response indicating success, which means your training job has been canceled. A successful call results with an Operation-Location header used to check the status of the job. + diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md index df9daa738319b..113cd2e900171 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md @@ -23,7 +23,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -39,20 +39,15 @@ Use the following sample JSON as your body. ```json { - "projectKind": "orchestration", - "settings": { - "confidenceThreshold": 0 - }, "projectName": "{PROJECT-NAME}", - "multilingual": true, - "description": "Project description", - "language": "{LANGUAGE-CODE}" -} + "language": "{LANGUAGE-CODE}", + "projectKind": "Orchestration", + "description": "Project description" + } ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `EmailApp` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterances used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the utterances. |`en-us`| -| `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language, not just ones included in your training documents. See [language support](../../language-support.md#multilingual-options). | `true`| diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md index ce42deecb6f18..4617da5da38d1 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md @@ -11,14 +11,13 @@ ms.custom: ignite-fall-2021 --- - -Create a **DELETE** request using the following URL, headers, and JSON body to delete an orchestration workflow deployment. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a conversational language understanding deployment. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +25,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,7 +34,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md index f879b09da2f08..46a3bce2632b2 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md @@ -18,7 +18,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -37,4 +37,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your model has been deleted. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md index debbb7655cfa0..f27ebcad1eac7 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md @@ -9,20 +9,20 @@ ms.date: 05/17/2022 ms.author: aahi --- -Create a **DELETE** request using the following URL, headers, and JSON body to delete an orchestration workflow project. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a conversational language understanding project. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -31,7 +31,5 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | - Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md index 1ce4d3e135f51..6940f8c678180 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md @@ -9,15 +9,12 @@ ms.date: 05/17/2022 ms.author: aahi --- - - Create a **PUT** request using the following URL, headers, and JSON body to start deploying an orchestration workflow model. - -### Request URL +#### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,36 +22,33 @@ Create a **PUT** request using the following URL, headers, and JSON body to star |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | -### Headers +#### Headers Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | -### Request Body +#### Request Body ```json { - "trainedModelLabel":"{MODEL-LABEL}" + "trainedModelLabel": "{MODEL-NAME}", } ``` +|Key |Placeholder |Value | Example | +|---------|---------|-----|----| +| trainedModelLabel | `{MODEL-NAME}` | The model name that will be assigned to your deployment. You can only assign successfully trained models. This value is case-sensitive. | `myModel` | -|Key| value| Example| -|--|--|--| -|`trainedModelLabel` | The name for your trained model. This value is case-sensitive. | `Model1` | - - -Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` -`JOB-ID` is used to identify your request, since this operation is asynchronous. You will use this URL in the next step to get the training status. +You can use this URL to get the deployment job status. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md index 701e24aa2248d..27997bdae1771 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md @@ -16,14 +16,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -32,15 +32,13 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -| `format` | `clu` | Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` `JOB-ID` is used to identify your request, since this operation is asynchronous. Use this URL to get the exported project JSON, using the same authentication method. - diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md index 273fb55682975..fac7b170d6b97 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md @@ -10,13 +10,12 @@ ms.author: aahi ms.custom: ignite-fall-2021 --- -Use the following **GET** request to query the status of your model's deployment process. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. - +Use the following **GET** request to get the status of your deployment job. Replace the placeholder values below with your own values. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -24,8 +23,8 @@ Use the following **GET** request to query the status of your model's deployment |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received from the API in response to your model deployment request. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md index ff2418e8bba46..813549f4923c9 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md @@ -8,11 +8,10 @@ ms.topic: include ms.date: 05/19/2022 ms.author: aahi --- - Use the following **GET** request to query the status of your export job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -20,7 +19,7 @@ Use the following **GET** request to query the status of your export job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -34,18 +33,12 @@ Use the following header to authenticate your request. ```json { - "resultUrl": "{RESULT-URL}", - "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", - "status": "unknown", - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "resultUrl": "{Endpoint}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/xxxxxx-xxxxx-xxxxx-xx/result?api-version={API-VERSION}", + "jobId": "xxxx-xxxxx-xxxxx-xxx", + "createdDateTime": "2022-04-18T15:23:07Z", + "lastUpdatedDateTime": "2022-04-18T15:23:08Z", + "expirationDateTime": "2022-04-25T15:23:07Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md index 8ad93c8ca8a50..ec4f532143875 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md @@ -8,11 +8,10 @@ ms.topic: include ms.date: 05/19/2022 ms.author: aahi --- - Use the following **GET** request to query the status of your import job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -20,7 +19,7 @@ Use the following **GET** request to query the status of your import job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -37,31 +36,11 @@ Once you send the request, you will get the following response. Keep polling thi ```json { - "jobId": "string", - "createdDateTime": "2022-04-25T10:54:07.950Z", - "lastUpdatedDateTime": "2022-04-25T10:54:07.950Z", - "expirationDateTime": "2022-04-25T10:54:07.950Z", - "status": "unknown", - "warnings": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ], - "errors": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ] + "jobId": "xxxxx-xxxxx-xxxx-xxxxx", + "createdDateTime": "2022-04-18T15:17:20Z", + "lastUpdatedDateTime": "2022-04-18T15:17:22Z", + "expirationDateTime": "2022-04-25T15:17:20Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md index 56f1f6e554709..15896c6dcff7f 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md @@ -8,18 +8,17 @@ ms.topic: include ms.date: 05/19/2022 ms.author: aahi --- - Use the following **GET** request to get your project details. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}?api-version=2021-11-01-preview +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -32,16 +31,14 @@ Use the following header to authenticate your request. #### Response body ```json - { - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", - "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", - "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "type": "orchestration", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} - } +{ + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Orchestration", + "projectName": "{PROJECT-NAME}", + "description": "This is a sample Orchestration project.", + "language": "{LANGUAGE-CODE}" +} ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md index 95d8b721e08b2..c4afc8ab68d16 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md @@ -9,22 +9,20 @@ ms.date: 05/17/2022 ms.author: aahi --- - - -Use the following **GET** request to query the status of your model's training process. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. +Use the following **GET** request to get the status of your model's training progress. Replace the placeholder values below with your own values. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| -|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received when submitted your training job. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -37,26 +35,28 @@ Use the following header to authenticate your request. ### Response Body -Once you send the request, you will get the following response. Keep polling this endpoint until the `status` parameter changes to `succeeded`. +Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". ```json { "result": { "modelLabel": "{MODEL-LABEL}", - "trainingConfigVersion": "{TRAINING-CONGIF-VERSION}", - "trainingMode": "{TRAINING-MODE}", + "trainingConfigVersion": "{TRAINING-CONFIG-VERSION}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", "trainingStatus": { - "percentComplete": 2, - "startDateTime": "{START-TIME}", - "status": "{STATUS}" + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" }, - "evaluationStatus": { "percentComplete": 0, "status": "notStarted" }, - "estimatedEndDateTime": "{ESTIMATED-END-TIME}" + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" + } }, - "jobId": "{JOB-ID}", - "createdDateTime": "{CREATED-TIME}", - "lastUpdatedDateTime": "{UPDATED-TIME}", - "expirationDateTime": "{EXPIRATION-TIME}", + "jobId": "xxxxxx-xxxxx-xxxxxx-xxxxxx", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", "status": "running" } ``` @@ -64,15 +64,11 @@ Once you send the request, you will get the following response. Keep polling thi |Key |Value | Example | |---------|----------|--| | `modelLabel` |The model name| `Model1` | -| `trainingConfigVersion` | The training configuration version. By default, the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) is used. | `2022-05-01` | -| `trainingMode` | Your training mode.| `standard` | -| `startDateTime` | The time training started. |`2022-04-14T10:23:04.2598544Z`| -| `status` | The status of the training job. | `running`| -| `estimatedEndDateTime` | Estimated time for the training job to finish.| `2022-04-14T10:29:38.2598544Z`| -|`jobId`| Your training job ID.| `xxxxx-xxxx-xxxx-xxxx-xxxxxxxxx`| -|`createdDateTime`| Training job creation date and time. | `2022-04-14T10:22:42Z`| -|`lastUpdatedDateTime`| Training job last updated date and time. | `2022-04-14T10:23:45Z`| -|`expirationDateTime`| Training job expiration date and time. | `2022-04-14T10:22:42Z`| - - - +| `trainingConfigVersion` | The training configuration version. By default, the [latest version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) is used. | `2022-05-01` | +| `startDateTime` | The time training started |`2022-04-14T10:23:04.2598544Z`| +| `status` | The status of the training job | `running`| +|`estimatedEndDateTime` | Estimated time for the training job to finish| `2022-04-14T10:29:38.2598544Z`| +|`jobId`| Your training job ID| `xxxxx-xxxx-xxxx-xxxx-xxxxxxxxx`| +|`createdDateTime`| Training job creation date and time | `2022-04-14T10:22:42Z`| +|`lastUpdatedDateTime`| Training job last updated date and time | `2022-04-14T10:23:45Z`| +|`expirationDateTime`| Training job expiration date and time | `2022-04-14T10:22:42Z`| diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md index 370f2cae4131c..c4a76c30bb96c 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md @@ -9,20 +9,21 @@ ms.date: 05/17/2022 ms.author: aahi --- +Submit a **POST** request using the following URL, headers, and JSON body to import your project. ### Request URL -Create a **POST** request using the following URL, headers, and JSON body to import your project. Use the following URL when creating your API request. Replace the placeholder values below with your own values. +Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | -|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -31,7 +32,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -| `format` | `clu` | ### Body @@ -39,19 +39,19 @@ Use the following sample JSON as your body. ```json { - "api-version": "{API-VERSION}", + "projectFileVersion": "{API-VERSION}", "stringIndexType": "Utf16CodeUnit", "metadata": { - "projectKind": "orchestration", + "projectKind": "Orchestration", "settings": { "confidenceThreshold": 0 }, "projectName": "{PROJECT-NAME}", - "multilingual": true, "description": "Project description", "language": "{LANGUAGE-CODE}" }, "assets": { + "projectKind": "Orchestration", "intents": [ { "category": "string", diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md index e16f3572e35a3..e92664c07adc7 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md @@ -9,13 +9,13 @@ ms.date: 05/20/2022 ms.author: aahi --- -Create a **GET** request using the following URL, headers, and JSON body to get trained model evaluation summary. +Create a **GET** request using the following URL, headers, and JSON body to get the trained model evaluation summary. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -23,7 +23,7 @@ Create a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -33,7 +33,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | ### Response Body diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md index 1097d579b7c56..c9767ff1cfdd5 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md @@ -10,9 +10,7 @@ ms.date: 05/20/2022 ms.author: aahi --- - To get an orchestration workflow project's details, submit a **GET** request using the following URL and headers. Replace the placeholder values with your own values. - ```rest {ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` @@ -21,7 +19,7 @@ To get an orchestration workflow project's details, submit a **GET** request usi |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -37,20 +35,15 @@ Once you send the request, you will get the following response. ```json { - "createdDateTime": "{CREATED-TIME}", - "lastModifiedDateTime": "{CREATED-TIME}", - "lastTrainedDateTime": "{CREATED-TIME}", - "lastDeployedDateTime": "{CREATED-TIME}", - "projectKind": "orchestration", - "settings": { - "confidenceThreshold": 0 - }, + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Orchestration", "projectName": "{PROJECT-NAME}", - "multilingual": true, - "description": "string", + "description": "This is a sample orchestration project.", "language": "{LANGUAGE-CODE}" } - ``` -Once you send your API request, you will receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md index 4a2f6b458df0d..043bfbd7a5c87 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md @@ -11,19 +11,16 @@ ms.author: aahi Create a **POST** request using the following URL, headers, and JSON body to start testing an orchestration workflow model. - ### Request URL ```rest -{ENDPOINT}/language/:analyze-conversations?projectName={PROJECT-NAME}&deploymentName={DEPLOYMENT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/:analyze-conversations?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | -|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -33,13 +30,38 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request Body ```json { - "query":"attach a docx file" + "kind": "Conversation", + "analysisInput": { + "conversationItem": { + "text": "Text1", + "participantId": "1", + "id": "1" + } + }, + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}", + "directTarget": "qnaProject", + "targetProjectParameters": { + "qnaProject": { + "targetProjectKind": "QuestionAnswering", + "callingOptions": { + "context": { + "previousUserQuery": "Meet Surface Pro 4", + "previousQnaId": 4 + }, + "top": 1, + "question": "App Service overview" + } + } + } + } } ``` @@ -49,17 +71,38 @@ Once you send the request, you will get the following response for the predictio ```json { - "query": "attach a docx file", - "prediction": { - "topIntent": "Attach", - "projectKind": "workflow", - "intents": [ - { "category": "Attach", "confidenceScore": 0.9998592 }, - { "category": "Read", "confidenceScore": 0.00010551753 }, - { "category": "Delete", "confidenceScore": 3.5209276e-5 } - ] + "kind": "ConversationResult", + "result": { + "query": "App Service overview", + "prediction": { + "projectKind": "Orchestration", + "topIntent": "qnaTargetApp", + "intents": { + "qnaTargetApp": { + "targetProjectKind": "QuestionAnswering", + "confidenceScore": 1, + "result": { + "answers": [ + { + "questions": [ + "App Service overview" + ], + "answer": "The compute resources you use are determined by the *App Service plan* that you run your apps on.", + "confidenceScore": 0.7384000000000001, + "id": 1, + "source": "https://docs.microsoft.com/en-us/azure/app-service/overview", + "metadata": {}, + "dialog": { + "isContextOnly": false, + "prompts": [] + } + } + ] + } + } + } + } } } - ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md index 8f0a301e04721..b132c9a22d56c 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md @@ -10,21 +10,20 @@ ms.author: aahi --- - Create a **POST** request using the following URL, headers, and JSON body to start a swap deployments job. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -34,7 +33,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | ### Request Body @@ -46,10 +44,9 @@ Use the following header to authenticate your request. ``` -|Key| value| Example| -|--|--|--| -|`firstDeploymentName` | The name for your first deployment. This value is case-sensitive. | `production` | -|`secondDeploymentName` | The name for your second deployment. This value is case-sensitive. | `staging` | - +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md index 87375d7b93e11..2918fbffc9f5d 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md @@ -8,19 +8,21 @@ ms.date: 05/17/2022 ms.author: aahi --- +Create a **POST** request using the following URL, headers, and JSON body to submit a training job. + ### Request URL -Create a **POST** request using the following URL, headers, and JSON body to start training. Use the following URL when creating your API request. Replace the placeholder values below with your own values. +Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -29,7 +31,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request body @@ -39,24 +41,27 @@ Use the following object in your request. The model will be named `MyModel` once { "modelLabel": "{MODEL-NAME}", "trainingConfigVersion": "{CONFIG-VERSION}", - "trainingMode": "{TRAINING-MODE}", "evaluationOptions": { "kind": "percentage", - "trainingSplitPercentage": 80, - "testingSplitPercentage": 20 + "testingSplitPercentage": 20, + "trainingSplitPercentage": 80 } } ``` |Key |Placeholder|Value | Example | |---------|-----|----|---------| -|`modelLabel` | `{MODEL-NAME}`|Your model name. | `Model1` | -| `trainingConfigVersion` |`{CONFIG-VERSION}`| The [training configuration version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). By default, the latest version is used. | `2022-05-01` | -| `trainingMode` |`{TRAINING-MODE}`| Your training mode. | `advanced` | +|`modelLabel` | `{MODEL-NAME}`|Your Model name. | `Model1` | +| `trainingConfigVersion` |`{CONFIG-VERSION}`| The training configuration model version. By default, the latest [model version](../../../concepts/model-lifecycle.md) is used. | `2022-05-01` | +| `kind` | `percentage` | Split methods. Possible values are `percentage` or `manual`. See [how to train a model](../../how-to/train-model.md#data-splitting) for more information. |`percentage`| +| `trainingSplitPercentage` | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| +| `testingSplitPercentage` | `20` | Percentage of your tagged data to be included in the testing set. Recommended value is `20`. | `20` | + + > [!NOTE] + > The `trainingSplitPercentage` and `testingSplitPercentage` are only required if `Kind` is set to `percentage` and the sum of both percentages should be equal to 100. Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` - -`JOB-ID` is used to identify your request, since this operation is asynchronous. You will use this URL in the next step to get the training status. +You can use this URL to get the training job status. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md b/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md index dcd2f96fb18f5..c914f4a05927e 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md @@ -1,5 +1,5 @@ --- -title: Intergate custom question answering and conversational language understanding into orchestration workflows +title: Integrate custom question answering and conversational language understanding with orchestration workflow description: Learn how to connect different projects with orchestration workflow. keywords: conversational language understanding, bot framework, bot, language understanding, nlu author: aahill @@ -9,16 +9,16 @@ ms.reviewer: cahann, hazemelh ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 05/17/2022 +ms.date: 05/25/2022 --- -# Connect different services with orchestration workflow +# Connect different services with Orchestration workflow Orchestration workflow is a feature that allows you to connect different projects from LUIS, conversational language understanding, and custom question answering in one project. You can then use this project for predictions under one endpoint. The orchestration project makes a prediction on which project should be called and automatically routes the request to that project, and returns with its response. In this tutorial, you will learn how to connect a custom question answering knowledge base with a conversational language understanding project. You will then call the project using the .NET SDK sample for orchestration. -This tutorial will include creating a **chit chat** knowledge base and **email commands** project. Chit chat will deal with common niceties and greetings with static responses. +This tutorial will include creating a **chit chat** knowledge base and **email commands** project. Chit chat will deal with common niceties and greetings with static responses. Email commands will predict among a few simple actions for an email assistant. The tutorial will then teach you to call the Orchestrator using the SDK in a .NET environment using a sample solution. ## Prerequisites @@ -27,12 +27,12 @@ This tutorial will include creating a **chit chat** knowledge base and **email c - You will need the key and endpoint from the resource you create to connect your bot to the API. You'll paste your key and endpoint into the code below later in the tutorial. Copy them from the **Keys and Endpoint** tab in your resource. - When you enable custom question answering, you must select an Azure search resource to connect to. - Make sure the region of your resource is supported by [conversational language understanding](../../conversational-language-understanding/service-limits.md#regional-availability). -- Download the **OrchestrationWorkflowSample** sample in [**.NET**](https://aka.ms/orchestration-sample). +- Download the **OrchestrationWorkflowSample** [sample](https://aka.ms/orchestration-sample). ## Create a custom question answering knowledge base 1. Sign into the [Language Studio](https://language.cognitive.azure.com/) and select your Language resource. -2. Find and select the [custom question answering](https://language.cognitive.azure.com/questionAnswering/projects/) card in the homepage. +2. Find and select the [Custom question answering](https://language.cognitive.azure.com/questionAnswering/projects/) card in the homepage. 3. Click on **Create new project** and add the name **chitchat** with the language _English_ before clicking on **Create project**. 4. When the project loads, click on **Add source** and select _Chit chat_. Select the professional personality for chit chat before @@ -40,38 +40,38 @@ This tutorial will include creating a **chit chat** knowledge base and **email c 5. Go to **Deploy knowledge base** from the left navigation menu and click on **Deploy** and confirm the popup that shows up. -You are now done with deploying your knowledge base for chit chat. You can explore the type of questions and answers to expect in the **Edit knowledge base** tab. +You are now done with deploying your knowledge base for chit chat. You can explore the type of questions and answers to expect in the **Edit knowledge base** page. ## Create a conversational language understanding project -1. In Language Studio, go to the [conversational language understanding](https://language.cognitive.azure.com/clu/projects) service. +1. In Language Studio, go to the [Conversational language understanding](https://language.cognitive.azure.com/clu/projects) service. 2. Download the **EmailProject.json** sample file [here](https://aka.ms/clu-sample-json). -3. Click on the arrow next to **Create new project** and select **Import**. Browse to the downloaded EmailProject.json file you downloaded and press Done. +3. Click on the **Import** button. Browse to the EmailProject.json file you downloaded and press Done. :::image type="content" source="../media/import-export.png" alt-text="A screenshot showing where to import a J son file." lightbox="../media/import-export.png"::: -4. Once the project is loaded, click on **Training** on the left. Press on Start a training job, provide the model name **v1** and press Train. All other settings such as **Standard Training** and the evaluation settings can be left as is. +4. Once the project is loaded, click on **Training jobs** on the left. Press on Start a training job, provide the model name **v1** and press Train. :::image type="content" source="../media/train-model.png" alt-text="A screenshot of the training page." lightbox="../media/train-model.png"::: -5. Once training is complete, click to **Deployments** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. +5. Once training is complete, click to **Deploying a model** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. :::image type="content" source="../media/deploy-model-tutorial.png" alt-text="A screenshot showing the model deployment page." lightbox="../media/deploy-model-tutorial.png"::: -You are now done with deploying a conversational language understanding project for email commands. You can explore the different commands in the **Utterances** page. +You are now done with deploying a conversational language understanding project for email commands. You can explore the different commands in the **Data labeling** page. -## Create an orchestration workflow project +## Create an Orchestration workflow project -1. In Language Studio, go to the [orchestration workflow](https://language.cognitive.azure.com/orchestration/projects) service. +1. In Language Studio, go to the [Orchestration workflow](https://language.cognitive.azure.com/orchestration/projects) service. 2. Click on **Create new project**. Use the name **Orchestrator** and the language _English_ before clicking next then done. -3. Once the project is created, click on **Add** in the **Build schema** page. +3. Once the project is created, click on **Add** in the **Schema definition** page. 4. Select _Yes, I want to connect it to an existing project_. Add the intent name **EmailIntent** and select **Conversational Language Understanding** as the connected service. Select the recently created **EmailProject** project for the project name before clicking on **Add Intent**. :::image type="content" source="../media/connect-intent-tutorial.png" alt-text="A screenshot of the connect intent popup in orchestration workflow." lightbox="../media/connect-intent-tutorial.png"::: 5. Add another intent but now select **Question Answering** as the service and select **chitchat** as the project name. -6. Similar to conversational language understanding, go to **Training** and start a new training job with the name **v1** and press Train. -7. Once training is complete, click to **Deployments** on the left. Click on Add deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment and press Next. +6. Similar to conversational language understanding, go to **Training jobs** and start a new training job with the name **v1** and press Train. +7. Once training is complete, click to **Deploying a model** on the left. Click on Add deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment and press Next. 8. On the next page, select the deployment name **Testing** for the **EmailIntent**. This tells the orchestrator to call the **Testing** deployment in **EmailProject** when it routes to it. Custom question answering projects only have one deployment by default. :::image type="content" source="../media/deployment-orchestrator-tutorial.png" alt-text="A screenshot of the deployment popup for orchestration workflow." lightbox="../media/deployment-orchestrator-tutorial.png"::: @@ -80,27 +80,29 @@ Now your orchestration project is ready to be used. Any incoming request will be ## Call the orchestration project with the Conversations SDK -1. In the downloaded **OrchestrationWorkflowSample** solution, make sure to install all the required packages. In Visual Studio, go to _Tools_, _NuGet Package Manager_ and select _Package Manager Console_ and run the following command. +1. In the downloaded sample, open OrchestrationWorkflowSample.sln in Visual Studio. + +2. In the OrchestrationWorkflowSample solution, make sure to install all the required packages. In Visual Studio, go to _Tools_, _NuGet Package Manager_ and select _Package Manager Console_ and run the following command. ```powershell dotnet add package Azure.AI.Language.Conversations ``` -2. In `Program.cs`, replace `{api-key}` and the placeholder endpoint. Use the key and endpoint for the Language resource you created earlier. You can find them in the **Keys and Endpoint** tab in your Language resource in Azure. +3. In `Program.cs`, replace `{api-key}` and the `{endpoint}` variables. Use the key and endpoint for the Language resource you created earlier. You can find them in the **Keys and Endpoint** tab in your Language resource in Azure. ```csharp -Uri endpoint = new Uri("https://myaccount.api.cognitive.microsoft.com"); +Uri endpoint = new Uri("{endpoint}"); AzureKeyCredential credential = new AzureKeyCredential("{api-key}"); ``` -3. Replace the orchestrationProject parameters to **Orchestrator** and **Testing** as below if they are not set already. +4. Replace the orchestrationProject parameters to **Orchestrator** and **Testing** as below if they are not set already. ```csharp ConversationsProject orchestrationProject = new ConversationsProject("Orchestrator", "Testing"); ``` -4. Run the project or press F5 in Visual Studio. -5. Input a query such as "read the email from matt" or "hello how are you". You'll now observe different responses for each, a conversational language understanding **EmailProject** response from the first, and the answer from the **chitchat** for the second query. +5. Run the project or press F5 in Visual Studio. +6. Input a query such as "read the email from matt" or "hello how are you". You'll now observe different responses for each, a conversational language understanding **EmailProject** response from the first query, and the answer from the **chitchat** knowledge base for the second query. **Conversational Language Understanding**: :::image type="content" source="../media/clu-response-orchestration.png" alt-text="A screenshot showing the sample response from conversational language understanding." lightbox="../media/clu-response-orchestration.png"::: diff --git a/articles/cognitive-services/language-service/overview.md b/articles/cognitive-services/language-service/overview.md index c865a89b1ec71..1c4234dc35d9e 100644 --- a/articles/cognitive-services/language-service/overview.md +++ b/articles/cognitive-services/language-service/overview.md @@ -40,10 +40,10 @@ Azure Cognitive Service for Language provides the following features: > | [Custom NER](custom-named-entity-recognition/overview.md) | Build an AI model to extract custom entity categories, using unstructured text that you provide. | * [Language Studio](custom-named-entity-recognition/quickstart.md?pivots=language-studio)
                  * [REST API](custom-named-entity-recognition/quickstart.md?pivots=rest-api) | > | [Analyze sentiment and opinions](sentiment-opinion-mining/overview.md) | This pre-configured feature provides sentiment labels (such as "*negative*", "*neutral*" and "*positive*") for sentences and documents. This feature can additionally provide granular information about the opinions related to words that appear in the text, such as the attributes of products or services. | * [Language Studio](language-studio.md)
                  * [REST API and client-library](sentiment-opinion-mining/quickstart.md)
                  * [Docker container](sentiment-opinion-mining/how-to/use-containers.md) > |[Language detection](language-detection/overview.md) | This pre-configured feature evaluates text, and determines the language it was written in. It returns a language identifier and a score that indicates the strength of the analysis. | * [Language Studio](language-studio.md)
                  * [REST API and client-library](language-detection/quickstart.md)
                  * [Docker container](language-detection/how-to/use-containers.md) | -> |[Custom text classification (preview)](custom-classification/overview.md) | Build an AI model to classify unstructured text into custom classes that you define. | * [Language Studio](custom-classification/quickstart.md?pivots=language-studio)
                  * [REST API](language-detection/quickstart.md?pivots=rest-api) | +> |[Custom text classification](custom-classification/overview.md) | Build an AI model to classify unstructured text into custom classes that you define. | * [Language Studio](custom-classification/quickstart.md?pivots=language-studio)
                  * [REST API](language-detection/quickstart.md?pivots=rest-api) | > | [Document summarization (preview)](summarization/overview.md?tabs=document-summarization) | This pre-configured feature extracts key sentences that collectively convey the essence of a document. | * [Language Studio](language-studio.md)
                  * [REST API and client-library](summarization/quickstart.md) | > | [Conversation summarization (preview)](summarization/overview.md?tabs=conversation-summarization) | This pre-configured feature summarizes issues and summaries in transcripts of customer-service conversations. | * [Language Studio](language-studio.md)
                  * [REST API](summarization/quickstart.md?tabs=rest-api) | -> | [Conversational language understanding (preview)](conversational-language-understanding/overview.md) | Build an AI model to bring the ability to understand natural language into apps, bots, and IoT devices. | * [Language Studio](conversational-language-understanding/quickstart.md) +> | [Conversational language understanding](conversational-language-understanding/overview.md) | Build an AI model to bring the ability to understand natural language into apps, bots, and IoT devices. | * [Language Studio](conversational-language-understanding/quickstart.md) > | [Question answering](question-answering/overview.md) | This pre-configured feature provides answers to questions extracted from text input, using semi-structured content such as: FAQs, manuals, and documents. | * [Language Studio](language-studio.md)
                  * [REST API and client-library](question-answering/quickstart/sdk.md) | > | [Orchestration workflow](orchestration-workflow/overview.md) | Train language models to connect your applications to question answering, conversational language understanding, and LUIS | * [Language Studio](orchestration-workflow/quickstart.md?pivots=language-studio)
                  * [REST API](orchestration-workflow/quickstart.md?pivots=rest-api) | diff --git a/articles/cognitive-services/language-service/personally-identifiable-information/concepts/conversations-entity-categories.md b/articles/cognitive-services/language-service/personally-identifiable-information/concepts/conversations-entity-categories.md index ba026f55000d8..e694f69b815ee 100644 --- a/articles/cognitive-services/language-service/personally-identifiable-information/concepts/conversations-entity-categories.md +++ b/articles/cognitive-services/language-service/personally-identifiable-information/concepts/conversations-entity-categories.md @@ -23,7 +23,7 @@ The PII preview feature includes the ability to detect personal (`PII`) informat The following entity categories are returned when you're sending API requests PII feature. -## Category: Person +## Category: Name This category contains the following entity: @@ -31,7 +31,7 @@ This category contains the following entity: :::column span=""::: **Entity** - Person + Name :::column-end::: :::column span="2"::: @@ -39,7 +39,7 @@ This category contains the following entity: All first, middle, last or full name is considered PII regardless of whether it is the speaker’s name, the agent’s name, someone else’s name or a different version of the speaker’s full name (Chris vs. Christopher). - To get this entity category, add `Person` to the `pii-categories` parameter. `Person` will be returned in the API response if detected. + To get this entity category, add `Name` to the `pii-categories` parameter. `Name` will be returned in the API response if detected. :::column-end::: diff --git a/articles/cognitive-services/language-service/question-answering/concepts/best-practices.md b/articles/cognitive-services/language-service/question-answering/concepts/best-practices.md index 759ca65b2121e..14215ef7c9430 100644 --- a/articles/cognitive-services/language-service/question-answering/concepts/best-practices.md +++ b/articles/cognitive-services/language-service/question-answering/concepts/best-practices.md @@ -6,7 +6,7 @@ ms.subservice: language-service author: jboback ms.author: jboback ms.topic: conceptual -ms.date: 01/26/2022 +ms.date: 06/03/2022 ms.custom: language-service-question-answering --- @@ -190,7 +190,7 @@ Question answering allows users to collaborate on a project/knowledge base. User ## Active learning -[Active learning](../tutorials/active-learning.md) does the best job of suggesting alternative questions when it has a wide range of quality and quantity of user-based queries. It’s important to allow client-applications' user queries to participate in the active learning feedback loop without censorship. Once questions are suggested in the Language Studio portal, you can review and accept or reject those suggestions. +[Active learning](../tutorials/active-learning.md) does the best job of suggesting alternative questions when it has a wide range of quality and quantity of user-based queries. It’s important to allow client-applications' user queries to participate in the active learning feedback loop without censorship. Once questions are suggested in Language Studio, you can review and accept or reject those suggestions. ## Next steps diff --git a/articles/cognitive-services/language-service/question-answering/concepts/plan.md b/articles/cognitive-services/language-service/question-answering/concepts/plan.md index c026225764e56..152af172d57d7 100644 --- a/articles/cognitive-services/language-service/question-answering/concepts/plan.md +++ b/articles/cognitive-services/language-service/question-answering/concepts/plan.md @@ -6,7 +6,7 @@ ms.subservice: language-service author: jboback ms.author: jboback ms.topic: conceptual -ms.date: 11/02/2021 +ms.date: 06/03/2022 --- # Plan your question answering app @@ -62,7 +62,7 @@ Question answering also supports unstructured content. You can upload a file tha Currently we do not support URLs for unstructured content. -The ingestion process converts supported content types to markdown. All further editing of the *answer* is done with markdown. After you create a knowledge base, you can edit QnA pairs in the Language Studio portal with rich text authoring. +The ingestion process converts supported content types to markdown. All further editing of the *answer* is done with markdown. After you create a knowledge base, you can edit QnA pairs in Language Studio with rich text authoring. ### Data format considerations @@ -104,7 +104,7 @@ Question answering uses _active learning_ to improve your knowledge base by sugg ### Providing a default answer -If your knowledge base doesn't find an answer, it returns the _default answer_. This answer is configurable on the **Settings** page.). +If your knowledge base doesn't find an answer, it returns the _default answer_. This answer is configurable on the **Settings** page. This default answer is different from the Azure bot default answer. You configure the default answer for your Azure bot in the Azure portal as part of configuration settings. It's returned when the score threshold isn't met. diff --git a/articles/cognitive-services/language-service/question-answering/concepts/project-development-lifecycle.md b/articles/cognitive-services/language-service/question-answering/concepts/project-development-lifecycle.md index c3e0d02fbd93f..fe996e34feb15 100644 --- a/articles/cognitive-services/language-service/question-answering/concepts/project-development-lifecycle.md +++ b/articles/cognitive-services/language-service/question-answering/concepts/project-development-lifecycle.md @@ -6,7 +6,7 @@ ms.subservice: language-service author: jboback ms.author: jboback ms.topic: conceptual -ms.date: 11/02/2021 +ms.date: 06/03/2022 --- # Question answering project lifecycle @@ -25,7 +25,7 @@ Learn how to [create a knowledge base](../how-to/create-test-deploy.md). ## Testing and updating your project -The project is ready for testing once it is populated with content, either editorially or through automatic extraction. Interactive testing can be done in the Language Studio portal, in the custom question answering menu through the **Test** panel. You enter common user queries. Then you verify that the responses returned with both the correct response and a sufficient confidence score. +The project is ready for testing once it is populated with content, either editorially or through automatic extraction. Interactive testing can be done in Language Studio, in the custom question answering menu through the **Test** panel. You enter common user queries. Then you verify that the responses returned with both the correct response and a sufficient confidence score. * **To fix low confidence scores**: add alternate questions. * **When a query incorrectly returns the [default response](../How-to/change-default-answer.md)**: add new answers to the correct question. @@ -48,7 +48,7 @@ Based on what you learn from your analytics, make appropriate updates to your pr ## Version control for data in your knowledge base -Version control for data is provided through the import/export features on the project page in the question answering section of the Language Studio portal. +Version control for data is provided through the import/export features on the project page in the question answering section of Language Studio. You can back up a project/knowledge base by exporting the project, in either `.tsv` or `.xls` format. Once exported, include this file as part of your regular source control check. @@ -62,7 +62,7 @@ A project/knowledge base has two states: *test* and *published*. ### Test project/knowledge base -The *test knowledge base* is the version currently edited and saved. The test version has been tested for accuracy, and for completeness of responses. Changes made to the test knowledge base don't affect the end user of your application or chat bot. The test knowledge base is known as `test` in the HTTP request. The `test` knowledge is available with the Language Studio's interactive **Test** pane. +The *test knowledge base* is the version currently edited and saved. The test version has been tested for accuracy, and for completeness of responses. Changes made to the test knowledge base don't affect the end user of your application or chat bot. The test knowledge base is known as `test` in the HTTP request. The `test` knowledge is available with Language Studio's interactive **Test** pane. ### Production project/knowledge base diff --git a/articles/cognitive-services/language-service/question-answering/how-to/authoring.md b/articles/cognitive-services/language-service/question-answering/how-to/authoring.md index 76b95091b5d33..1cacafa2fd926 100644 --- a/articles/cognitive-services/language-service/question-answering/how-to/authoring.md +++ b/articles/cognitive-services/language-service/question-answering/how-to/authoring.md @@ -475,7 +475,7 @@ If you try to access the resultUrl directly, you will get a 404 error. You must ```bash curl -X POST -H "Ocp-Apim-Subscription-Key: {API-KEY}" -H "Content-Type: application/json" -d '{ - "ImportJobOptions": {"fileUri": "FILE-URI-PATH"} + "fileUri": "FILE-URI-PATH" }' -i 'https://{ENDPOINT}.api.cognitive.microsoft.com/language/query-knowledgebases/projects/{PROJECT-NAME}/:import?api-version=2021-10-01&format=tsv' ``` diff --git a/articles/cognitive-services/language-service/question-answering/how-to/best-practices.md b/articles/cognitive-services/language-service/question-answering/how-to/best-practices.md new file mode 100644 index 0000000000000..f7388e833b1e7 --- /dev/null +++ b/articles/cognitive-services/language-service/question-answering/how-to/best-practices.md @@ -0,0 +1,98 @@ +--- +title: Project best practices +description: Best practices for Question Answering +ms.service: cognitive-services +ms.subservice: qna-maker +ms.topic: how-to +author: jboback +ms.author: jboback +recommendations: false +ms.date: 06/03/2022 +--- + +# Project best practices + +The following list of QnA pairs will be used to represent a project (knowledge base) to highlight best practices when authoring in custom question answering. + +|Question |Answer | +|-------------------------------------|-------------------------------------------------------| +|I want to buy a car. |There are three options for buying a car. | +|I want to purchase software license. |Software licenses can be purchased online at no cost. | +|How to get access to WPA? |WPA can be accessed via the company portal. | +|What is the price of Microsoft stock?|$200. | +|How do I buy Microsoft Services? |Microsoft services can be bought online. | +|I want to sell car. |Please send car pictures and documents. | +|How do I get an identification card? |Apply via company portal to get an identification card.| +|How do I use WPA? |WPA is easy to use with the provided manual. | +|What is the utility of WPA? |WPA provides a secure way to access company resources. | + +## When should you add alternate questions to a QnA? + +- Question answering employs a transformer-based ranker that takes care of user queries that are semantically similar to questions in the knowledge base. For example, consider the following question answer pair: + + **Question: “What is the price of Microsoft Stock?”** + + **Answer: “$200”.** + + The service can return expected responses for semantically similar queries such as: + + "How much is Microsoft stock worth?" + + "How much is Microsoft's share value?" + + "How much does a Microsoft share cost?" + + "What is the market value of Microsoft stock?" + + "What is the market value of a Microsoft share?" + + However, please note that the confidence score with which the system returns the correct response will vary based on the input query and how different it is from the original question answer pair. + +- There are certain scenarios which require the customer to add an alternate question. When a query does not return the correct answer despite it being present in the knowledge base, we advise adding that query as an alternate question to the intended QnA pair. + +## How many alternate questions per QnA is optimal? + +- Users can add up to 10 alternate questions depending on their scenario. Alternate questions beyond the first 10 aren’t considered by our core ranker. However, they are evaluated in the other processing layers resulting in better output overall. All the alternate questions will be considered in the preprocessing step to look for an exact match. + +- Semantic understanding in question answering should be able to take care of similar alternate questions. + +- The return on investment will start diminishing once you exceed 10 questions. Even if you’re adding more than 10 alternate questions, try to make the initial 10 questions as semantically dissimilar as possible so that all intents for the answer are captured by these 10 questions. For the knowledge base above, in QNA #1, adding alternate questions such as "How can I buy a car?", "I wanna buy a car." are not required. Whereas adding alternate questions such as "How to purchase a car.", "What are the options for buying a vehicle?" can be useful. + +## When to add synonyms to a knowledge base + +- Question answering provides the flexibility to use synonyms at the knowledge base level, unlike QnA Maker where synonyms are shared across knowledge bases for the entire service. + +- For better relevance, the customer needs to provide a list of acronyms that the end user intends to use interchangeably. For instance, the following is a list of acceptable acronyms: + + MSFT – Microsoft + + ID – Identification + + ETA – Estimated time of Arrival + +- Apart from acronyms, if you think your words are similar in context of a particular domain and generic language models won’t consider them similar, it’s better to add them as synonyms. For instance, if an auto company producing a car model X receives queries such as "my car’s audio isn’t working" and the knowledge base has questions on "fixing audio for car X", then we need to add "X" and "car" as synonyms. + +- The Transformer based model already takes care of most of the common synonym cases, for e.g.- Purchase – Buy, Sell - Auction, Price – Value. For example, consider the following QnA pair: Q: "What is the price of Microsoft Stock?" A: "$200". + +If we receive user queries like "Microsoft stock value", "Microsoft share value", "Microsoft stock worth", "Microsoft share worth", "stock value", etc., they should be able to get correct answer even though these queries have words like share, value, worth which are not originally present in the knowledge base. + +## How are lowercase/uppercase characters treated? + +Question answering takes casing into account but it's intelligent enough to understand when it is to be ignored. You should not be seeing any perceivable difference due to wrong casing. + +## How are QnAs prioritized for multi-turn questions? + +When a KB has hierarchical relationships (either added manually or via extraction) and the previous response was an answer related to other QnAs, for the next query we give slight preference to all the children QnAs, sibling QnAs and grandchildren QnAs in that order. Along with any query, the [Question Answering API] (/rest/api/cognitiveservices/questionanswering/question-answering/get-answers) expects a "context" object with the property "previousQnAId" which denotes the last top answer. Based on this previous QnA ID, all the related QnAs are boosted. + +## How are accents treated? + +Accents are supported for all major European languages. If the query has an incorrect accent, confidence score might be slightly different, but the service still returns the relevant answer and takes care of minor errors by leveraging fuzzy search. + +## How is punctuation in a user query treated? + +Punctuation is ignored in user query before sending it to the ranking stack. Ideally it should not impact the relevance scores. Punctuations that are ignored are as follows: ,?:;\"'(){}[]-+。./!*؟ + +## Next steps + +> [!div class="nextstepaction"] +> [Get started with Question Answering](../quickstart/sdk.md) \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/how-to/encrypt-data-at-rest.md b/articles/cognitive-services/language-service/question-answering/how-to/encrypt-data-at-rest.md index 851fb33e397b6..486f907463fa7 100644 --- a/articles/cognitive-services/language-service/question-answering/how-to/encrypt-data-at-rest.md +++ b/articles/cognitive-services/language-service/question-answering/how-to/encrypt-data-at-rest.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 11/02/2021 +ms.date: 06/03/2022 ms.author: egeaney ms.custom: language-service-question-answering, ignite-fall-2021 --- @@ -49,7 +49,7 @@ Customer-managed keys are available in all Azure Search regions. ## Encryption of data in transit -The language studio portal runs in the user's browser. Every action triggers a direct call to the respective Cognitive Service API. Hence, question answering is compliant for data in transit. +Language Studio runs in the user's browser. Every action triggers a direct call to the respective Cognitive Service API. Hence, question answering is compliant for data in transit. ## Next steps diff --git a/articles/cognitive-services/language-service/question-answering/how-to/manage-knowledge-base.md b/articles/cognitive-services/language-service/question-answering/how-to/manage-knowledge-base.md index dc0002a37ebad..a1e392d6b2fbf 100644 --- a/articles/cognitive-services/language-service/question-answering/how-to/manage-knowledge-base.md +++ b/articles/cognitive-services/language-service/question-answering/how-to/manage-knowledge-base.md @@ -4,7 +4,7 @@ description: Custom question answering allows you to manage projects by providin ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 11/02/2021 +ms.date: 06/03/2022 ms.custom: language-service-question-answering, ignite-fall-2021 --- @@ -72,7 +72,7 @@ From the **Edit knowledge base page** you can: ## Delete project -Deleting a project is a permanent operation. It can't be undone. Before deleting a project, you should export the project from the main question answering page within the Language Studio. +Deleting a project is a permanent operation. It can't be undone. Before deleting a project, you should export the project from the main question answering page within Language Studio. If you share your project with collaborators and then later delete it, everyone loses access to the project. diff --git a/articles/cognitive-services/language-service/question-answering/how-to/migrate-qnamaker.md b/articles/cognitive-services/language-service/question-answering/how-to/migrate-qnamaker.md index 629bb01f44e06..53e0769590c49 100644 --- a/articles/cognitive-services/language-service/question-answering/how-to/migrate-qnamaker.md +++ b/articles/cognitive-services/language-service/question-answering/how-to/migrate-qnamaker.md @@ -63,23 +63,23 @@ You can follow the steps below to migrate knowledge bases: > [!div class="mx-imgBorder"] > ![Migrate QnAMaker with red selection box around the knowledge base selection option with a drop-down displaying three knowledge base names](../media/migrate-qnamaker/select-knowledge-bases.png) -8. You can review the knowledge bases you plan to migrate. There could be some validation errors in project names as we follow stricter validation rules for custom question answering projects. +8. You can review the knowledge bases you plan to migrate. There could be some validation errors in project names as we follow stricter validation rules for custom question answering projects. To resolve these errors occuring due to invalid characters, select the checkbox (in red) and click **Next**. This is a one-click method to replace the problematic characters in the name with the accepted characters. If there's a duplicate, a new unique project name is generated by the system. > [!CAUTION] > If you migrate a knowledge base with the same name as a project that already exists in the target language resource, **the content of the project will be overridden** by the content of the selected knowledge base. > [!div class="mx-imgBorder"] - > ![Screenshot of an error message starting project names can't contain special characters](../media/migrate-qnamaker/special-characters.png) + > ![Screenshot of an error message starting project names can't contain special characters](../media/migrate-qnamaker/migration-kb-name-validation.png) -9. After resolving any validation errors, select **Next** +9. After resolving the validation errors, select **Start migration** > [!div class="mx-imgBorder"] - > ![Screenshot with special characters removed](../media/migrate-qnamaker/validation-errors.png) + > ![Screenshot with special characters removed](../media/migrate-qnamaker/migration-kb-name-validation-success.png) 10. It will take a few minutes for the migration to occur. Do not cancel the migration while it is in progress. You can navigate to the migrated projects within the [Language Studio](https://language.azure.com/) post migration. > [!div class="mx-imgBorder"] - > ![Screenshot of successfully migrated knowledge bases with information that you can publish by using the Language Studio](../media/migrate-qnamaker/migration-success.png) + > ![Screenshot of successfully migrated knowledge bases with information that you can publish by using Language Studio](../media/migrate-qnamaker/migration-success.png) If any knowledge bases fail to migrate to custom question answering projects, an error will be displayed. The most common migration errors occur when: diff --git a/articles/cognitive-services/language-service/question-answering/how-to/network-isolation.md b/articles/cognitive-services/language-service/question-answering/how-to/network-isolation.md index e9d7193855e39..8753fd0aa6ccf 100644 --- a/articles/cognitive-services/language-service/question-answering/how-to/network-isolation.md +++ b/articles/cognitive-services/language-service/question-answering/how-to/network-isolation.md @@ -55,7 +55,7 @@ This will establish a private endpoint connection between language resource and Follow the steps below to restrict public access to question answering language resources. Protect a Cognitive Services resource from public access by [configuring the virtual network](../../../cognitive-services-virtual-networks.md?tabs=portal). -After restricting access to Cognitive Service resource based on VNet, To browse knowledge bases on the Language Studio portal from your on-premises network or your local browser. +After restricting access to Cognitive Service resource based on VNet, To browse knowledge bases on Language Studio from your on-premises network or your local browser. - Grant access to [on-premises network](../../../cognitive-services-virtual-networks.md?tabs=portal#configuring-access-from-on-premises-networks). - Grant access to your [local browser/machine](../../../cognitive-services-virtual-networks.md?tabs=portal#managing-ip-network-rules). - Add the **public IP address of the machine under the Firewall** section of the **Networking** tab. By default `portal.azure.com` shows the current browsing machine's public IP (select this entry) and then select **Save**. diff --git a/articles/cognitive-services/language-service/question-answering/includes/rest.md b/articles/cognitive-services/language-service/question-answering/includes/rest.md index f1fbeada5b15c..9515b4db124ae 100644 --- a/articles/cognitive-services/language-service/question-answering/includes/rest.md +++ b/articles/cognitive-services/language-service/question-answering/includes/rest.md @@ -14,7 +14,7 @@ ms.custom: ignite-fall-2021 * Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services) * Question answering, requires a [Language resource](https://portal.azure.com/?quickstart=true#create/Microsoft.CognitiveServicesTextAnalytics) with the custom question answering feature enabled to generate an API key and endpoint. * After your Language resource deploys, select **Go to resource**. You will need the key and endpoint from the resource you create to connect to the API. Paste your key and endpoint into the code below later in the quickstart. -* To create a Language resource with [Azure CLI](/azure/cognitive-services/cognitive-services-apis-create-account-cli) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` +* To create a Language resource with [Azure CLI](../../../cognitive-services-apis-create-account-cli.md) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` * An existing knowledge base to query. If you have not setup a knowledge base, you can follow the instructions in the [**Language Studio quickstart**](../quickstart/sdk.md). Or add a knowledge base that uses this [Surface User Guide URL](https://download.microsoft.com/download/7/B/1/7B10C82E-F520-4080-8516-5CF0D803EEE0/surface-book-user-guide-EN.pdf) as a data source. ## Query a knowledge base @@ -151,4 +151,4 @@ This example will return a result of: } ] } -``` +``` \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md b/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md index a0f8babda7940..f650d5c332ec9 100644 --- a/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md +++ b/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md @@ -26,7 +26,7 @@ Use this quickstart for the question answering client library for .NET to: * The [Visual Studio IDE](https://visualstudio.microsoft.com/vs/) or current version of [.NET Core](https://dotnet.microsoft.com/download/dotnet-core). * Question answering, requires a [Language resource](https://portal.azure.com/?quickstart=true#create/Microsoft.CognitiveServicesTextAnalytics) with the custom question answering feature enabled to generate an API key and endpoint. * After your Language resource deploys, select **Go to resource**. You will need the key and endpoint from the resource you create to connect to the API. Paste your key and endpoint into the code below later in the quickstart. -* To create a Language resource with [Azure CLI](/azure/cognitive-services/cognitive-services-apis-create-account-cli) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` +* To create a Language resource with [Azure CLI](../../../cognitive-services-apis-create-account-cli.md) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` * An existing knowledge base to query. If you have not setup a knowledge base, you can follow the instructions in the [**Language Studio quickstart**](../quickstart/sdk.md). Or add a knowledge base that uses this [Surface User Guide URL](https://download.microsoft.com/download/7/B/1/7B10C82E-F520-4080-8516-5CF0D803EEE0/surface-book-user-guide-EN.pdf) as a data source. ## Setting up @@ -225,4 +225,4 @@ namespace questionansweringcsharp To run the code above, replace the `Program.cs` with the contents of the script block above and modify the `endpoint` and `credential` variables to correspond to the language resource you created as part of the prerequisites. -In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with `GetAnswersFromText`. +In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with `GetAnswersFromText`. \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md b/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md index b75f50bc008f1..094a18898910d 100644 --- a/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md +++ b/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md @@ -27,7 +27,7 @@ Use this quickstart for the question answering client library for Python to: * [Python 3.x](https://www.python.org/) * Question answering, requires a [Language resource](https://portal.azure.com/?quickstart=true#create/Microsoft.CognitiveServicesTextAnalytics) with the custom question answering feature enabled to generate an API key and endpoint. * After your Language resource deploys, select **Go to resource**. You will need the key and endpoint from the resource you create to connect to the API. Paste your key and endpoint into the code below later in the quickstart. -* To create a Language resource with [Azure CLI](/azure/cognitive-services/cognitive-services-apis-create-account-cli) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` +* To create a Language resource with [Azure CLI](../../../cognitive-services-apis-create-account-cli.md) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` * An existing knowledge base to query. If you have not setup a knowledge base, you can follow the instructions in the [**Language Studio quickstart**](../quickstart/sdk.md). Or add a knowledge base that uses this [Surface User Guide URL](https://download.microsoft.com/download/7/B/1/7B10C82E-F520-4080-8516-5CF0D803EEE0/surface-book-user-guide-EN.pdf) as a data source. ## Setting up @@ -175,4 +175,4 @@ A: Power and charging. It takes two to four hours to charge the Surface Pro 4 ba Confidence Score: 0.9254655838012695 ``` -In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with [get_answers_from_text](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.html#azure.ai.language.questionanswering.QuestionAnsweringClient.get-answers-from-text), review the [AnswersFromTextOptions parameters](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.models.html#azure.ai.language.questionanswering.models.AnswersFromTextOptions). +In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with [get_answers_from_text](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.html#azure.ai.language.questionanswering.QuestionAnsweringClient.get-answers-from-text), review the [AnswersFromTextOptions parameters](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.models.html#azure.ai.language.questionanswering.models.AnswersFromTextOptions). \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png index 8c672c5ed6078..299f43a02a90d 100644 Binary files a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png and b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png differ diff --git a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png index 76a97043172b1..d945cf17cf1bb 100644 Binary files a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png and b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png differ diff --git a/articles/cognitive-services/language-service/question-answering/overview.md b/articles/cognitive-services/language-service/question-answering/overview.md index 96eb571d52412..fdaceac558925 100644 --- a/articles/cognitive-services/language-service/question-answering/overview.md +++ b/articles/cognitive-services/language-service/question-answering/overview.md @@ -7,7 +7,7 @@ author: jboback ms.author: jboback recommendations: false ms.topic: overview -ms.date: 11/02/2021 +ms.date: 06/03/2022 keywords: "qna maker, low code chat bots, multi-turn conversations" ms.custom: language-service-question-answering, ignite-fall-2021 --- @@ -63,7 +63,7 @@ Once a question answering knowledge base is published, a client application send ## Build low code chat bots -The language studio portal provides the complete project/knowledge base authoring experience. You can import documents, in their current form, to your knowledge base. These documents (such as an FAQ, product manual, spreadsheet, or web page) are converted into question and answer pairs. Each pair is analyzed for follow-up prompts and connected to other pairs. The final _markdown_ format supports rich presentation including images and links. +Language Studio portal provides the complete project/knowledge base authoring experience. You can import documents, in their current form, to your knowledge base. These documents (such as an FAQ, product manual, spreadsheet, or web page) are converted into question and answer pairs. Each pair is analyzed for follow-up prompts and connected to other pairs. The final _markdown_ format supports rich presentation including images and links. Once your knowledge base is edited, publish the knowledge base to a working [Azure Web App bot](https://azure.microsoft.com/services/bot-service/) without writing any code. Test your bot in the [Azure portal](https://portal.azure.com) or download it and continue development. diff --git a/articles/cognitive-services/language-service/question-answering/reference/document-format-guidelines.md b/articles/cognitive-services/language-service/question-answering/reference/document-format-guidelines.md index 777421a5399c7..1ffaa145a4e97 100644 --- a/articles/cognitive-services/language-service/question-answering/reference/document-format-guidelines.md +++ b/articles/cognitive-services/language-service/question-answering/reference/document-format-guidelines.md @@ -155,7 +155,7 @@ Question answering can process semi-structured support web pages, such as web ar ## Import and export knowledge base -**TSV and XLS files**, from exported knowledge bases, can only be used by importing the files from the **Settings** page in the language studio. They cannot be used as data sources during knowledge base creation or from the **+ Add file** or **+ Add URL** feature on the **Settings** page. +**TSV and XLS files**, from exported knowledge bases, can only be used by importing the files from the **Settings** page in Language Studio. They cannot be used as data sources during knowledge base creation or from the **+ Add file** or **+ Add URL** feature on the **Settings** page. When you import the knowledge base through these **TSV and XLS files**, the question answer pairs get added to the editorial source and not the sources from which the question and answers were extracted in the exported knowledge base. diff --git a/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md b/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md index a608c7c0e88d1..0a17124ae340e 100644 --- a/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md +++ b/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md @@ -26,9 +26,9 @@ Let’s us add the following words and their alterations to improve the results: |Word | Alterations| |--------------|--------------------------------| -| fix problems | `troubleshoot`, `trouble-shoot`| -| whiteboard | `white-board`, `white board` | -| bluetooth | `blue-tooth`, `blue tooth` | +| fix problems | `troubleshoot`, `diagnostic`| +| whiteboard | `white board`, `white canvas` | +| bluetooth | `blue tooth`, `BT` | ```json { @@ -37,21 +37,21 @@ Let’s us add the following words and their alterations to improve the results: "alterations": [ "fix problems", "troubleshoot", - "trouble-shoot", + "diagnostic", ] }, { "alterations": [ "whiteboard", - "white-board", - "white board" + "white board", + "white canvas" ] }, { "alterations": [ "bluetooth", - "blue-tooth", - "blue tooth" + "blue tooth", + "BT" ] } ] @@ -76,12 +76,38 @@ As you can see, when `troubleshoot` was not added as a synonym, we got a low con > [!IMPORTANT] > Synonyms are case insensitive. Synonyms also might not work as expected if you add stop words as synonyms. The list of stop words can be found here: [List of stop words](https://github.com/Azure-Samples/azure-search-sample-data/blob/master/STOPWORDS.md). > For instance, if you add the abbreviation **IT** for Information technology, the system might not be able to recognize Information Technology because **IT** is a stop word and is filtered when a query is processed. -> Synonyms do not allow these special characters: ',', '?', ':', ';', '\"', '\'', '(', ')', '{', '}', '[', ']', '-', '+', '.', '/', '!', '*', '-', '_', '@', '#' ## Notes * Synonyms can be added in any order. The ordering is not considered in any computational logic. -* Special characters are not allowed for synonyms. For hyphenated words like "COVID-19", they are treated the same as "COVID 19", and "space" can be used as a term separator. +* Synonyms can only be added to a project that has at least one question and answer pair. +* Synonyms can be added only when there is at least one question and answer pair present in a knowledge base. * In case of overlapping synonym words between 2 sets of alterations, it may have unexpected results and it is not recommended to use overlapping sets. +* Special characters are not allowed for synonyms. For hyphenated words like "COVID-19", they are treated the same as "COVID 19", and "space" can be used as a term separator. Following is the list of special characters **not allowed**: + +|Special character | Symbol| +|--------------|--------------------------------| +|Comma | ,| +|Question mark | ?| +|Colon| :| +|Semicolon| ;| +|Double quotation mark| \"| +|Single quotation mark| \'| +|Open parenthesis|(| +|Close parenthesis|)| +|Open brace|{| +|Close brace|}| +|Open bracket|[| +|Close bracket|]| +|Hyphen/dash|-| +|Plus sign|+| +|Period|.| +|Forward slash|/| +|Exclamation mark|!| +|Asterisk|\*| +|Underscore|\_| +|Ampersand|@| +|Hash|#| + ## Next steps diff --git a/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md b/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md index 21d5431f9f561..fbaa0b07accdb 100644 --- a/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md +++ b/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md @@ -5,7 +5,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial ms.date: 11/02/2021 -ms.custom: language-service-question-answering, ignite-fall-2021 +ms.custom: language-service-question-answering, ignite-fall-2021, cogserv-non-critical-language --- # Tutorial: Create a FAQ bot @@ -33,7 +33,7 @@ After deploying your project/knowledge base, you can create a bot from the **Dep * When you make changes to the knowledge base and redeploy, you don't need to take further action with the bot. It's already configured to work with the knowledge base, and works with all future changes to the knowledge base. Every time you publish a knowledge base, all the bots connected to it are automatically updated. -1. In the Language Studio portal, on the question answering **Deploy knowledge base** page, select **Create bot**. +1. In Language Studio, on the question answering **Deploy knowledge base** page, select **Create bot**. > [!div class="mx-imgBorder"] > ![Screenshot of UI with option to create a bot in Azure.](../media/bot-service/create-bot-in-azure.png) diff --git a/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md b/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md index edb4fe4a1c3e3..53e01e412c4cd 100644 --- a/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md +++ b/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md @@ -7,7 +7,7 @@ ms.topic: tutorial author: jboback ms.author: jboback ms.date: 11/02/2021 -ms.custom: language-service-question-answering, ignite-fall-2021 +ms.custom: language-service-question-answering, ignite-fall-2021, cogserv-non-critical-language --- # Add multiple categories to your FAQ bot diff --git a/articles/cognitive-services/language-service/summarization/how-to/conversation-summarization.md b/articles/cognitive-services/language-service/summarization/how-to/conversation-summarization.md index 8770deaedc7c8..62945b2c48af1 100644 --- a/articles/cognitive-services/language-service/summarization/how-to/conversation-summarization.md +++ b/articles/cognitive-services/language-service/summarization/how-to/conversation-summarization.md @@ -16,7 +16,7 @@ ms.custom: language-service-summarization, ignite-fall-2021, event-tier1-build-2 # How to use conversation summarization (preview) > [!IMPORTANT] -> conversation summarization feature is a preview capability provided “AS IS” and “WITH ALL FAULTS.” As such, Conversation Summarization (preview) should not be implemented or deployed in any production use. The customer is solely responsible for any use of conversation summarization. +> The conversation summarization feature is a preview capability provided “AS IS” and “WITH ALL FAULTS.” As such, Conversation Summarization (preview) should not be implemented or deployed in any production use. The customer is solely responsible for any use of conversation summarization. Conversation summarization is designed to summarize text chat logs between customers and customer-service agents. This feature is capable of providing both issues and resolutions present in these logs. @@ -26,17 +26,13 @@ The AI models used by the API are provided by the service, you just have to send The conversation summarization API uses natural language processing techniques to locate key issues and resolutions in text-based chat logs. Conversation summarization will return issues and resolutions found from the text input. -Each returned summary contains: -* A numerical ID for identifying the summary. -* A participant ID (such as *Agent* or *Customer*) for determining which participant of the chat log was associated with the summary. - -There is another feature in Azure Cognitive Service for Language, [document summarization](../overview.md?tabs=document-summarization), that can summarize sentences from large documents. When deciding between document summarization and conversation summarization, consider the following: +There's another feature in Azure Cognitive Service for Language, [document summarization](../overview.md?tabs=document-summarization), that can summarize sentences from large documents. When you're deciding between document summarization and conversation summarization, consider the following points: * Extractive summarization returns sentences that collectively represent the most important or relevant information within the original content. * Conversation summarization returns summaries based on full chat logs including a reason for the chat (a problem), and the resolution. For example, a chat log between a customer and a customer service agent. ## Submitting data -You submit documents to the API as strings of text. Analysis is performed upon receipt of the request. Because the API is [asynchronous](../../concepts/use-asynchronously.md), there may be a delay between sending an API request, and receiving the results. For information on the size and number of requests you can send per minute and second, see the data limits below. +You submit documents to the API as strings of text. Analysis is performed upon receipt of the request. Because the API is [asynchronous](../../concepts/use-asynchronously.md), there may be a delay between sending an API request and receiving the results. For information on the size and number of requests you can send per minute and second, see the data limits below. When using this feature, the API results are available for 24 hours from the time the request was ingested, and is indicated in the response. After this time period, the results are purged and are no longer available for retrieval. @@ -46,7 +42,7 @@ When you submit data to conversation summarization, we recommend sending one cha When you get results from language detection, you can stream the results to an application or save the output to a file on the local system. -The following is an example of content you might submit for summarization. This is only an example, the API can accept much longer input text. See [data limits](../../concepts/data-limits.md) for more information. +The following text is an example of content you might submit for summarization. This is only an example, the API can accept much longer input text. See [data limits](../../concepts/data-limits.md) for more information. **Agent**: "*Hello, how can I help you*?" diff --git a/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md b/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md index b27f8cce1e402..b2be9edf00fdd 100644 --- a/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md +++ b/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 03/16/2022 +ms.date: 05/26/2022 ms.author: aahi ms.custom: language-service-summarization, ignite-fall-2021 --- @@ -93,4 +93,4 @@ Using the above example, the API might return the following summarized sentences ## See also -* [Document summarization overview](../overview.md) +* [Summarization overview](../overview.md) diff --git a/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md b/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md index 1b4cc98c00303..887fc1e3b775a 100644 --- a/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md +++ b/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md @@ -177,7 +177,7 @@ The following cURL commands are executed from a BASH shell. Edit these commands [!INCLUDE [REST API quickstart instructions](../../../includes/rest-api-instructions.md)] ```bash -curl -i -X POST https://your-language-endpoint-here/language/analyze-conversations?api-version=2022-05-15-preview \ +curl -i -X POST https://your-language-endpoint-here/language/analyze-conversations/jobs?api-version=2022-05-15-preview \ -H "Content-Type: application/json" \ -H "Ocp-Apim-Subscription-Key: your-key-here" \ -d \ @@ -273,10 +273,10 @@ curl -X GET https://your-language-endpoint-here/language/analyze-conversation ```json { - "jobId": "28261846-59bc-435a-a73a-f47c2feb245e", - "lastUpdatedDateTime": "2022-05-11T23:16:48Z", - "createdDateTime": "2022-05-11T23:16:44Z", - "expirationDateTime": "2022-05-12T23:16:44Z", + "jobId": "738120e1-7987-4d19-af0c-89d277762a2f", + "lastUpdatedDateTime": "2022-05-31T16:52:59Z", + "createdDateTime": "2022-05-31T16:52:51Z", + "expirationDateTime": "2022-06-01T16:52:51Z", "status": "succeeded", "errors": [], "displayName": "Analyze conversations from 123", @@ -289,7 +289,7 @@ curl -X GET https://your-language-endpoint-here/language/analyze-conversation { "kind": "conversationalSummarizationResults", "taskName": "analyze 1", - "lastUpdateDateTime": "2022-05-11T23:16:48.9553011Z", + "lastUpdateDateTime": "2022-05-31T16:52:59.85913Z", "status": "succeeded", "results": { "conversations": [ @@ -298,11 +298,11 @@ curl -X GET https://your-language-endpoint-here/language/analyze-conversation "summaries": [ { "aspect": "issue", - "text": "Customer tried to set up wifi connection for Smart Brew 300 medication machine, but it didn't work" + "text": "Customer tried to set up wifi connection for Smart Brew 300 machine, but it didn't work" }, { "aspect": "resolution", - "text": "Asked customer to try the following steps | Asked customer for the power light | Helped customer to connect to the machine" + "text": "Asked customer to try the following steps | Asked customer for the power light | Checked if the app is prompting to connect to the machine | Transferred the call to a tech support" } ], "warnings": [] diff --git a/articles/cognitive-services/language-service/summarization/language-support.md b/articles/cognitive-services/language-service/summarization/language-support.md index 0c0d6aa4093de..8c908b6bc28bd 100644 --- a/articles/cognitive-services/language-service/summarization/language-support.md +++ b/articles/cognitive-services/language-service/summarization/language-support.md @@ -1,5 +1,5 @@ --- -title: Document summarization language support +title: Summarization language support titleSuffix: Azure Cognitive Services description: Learn about which languages are supported by document summarization. services: cognitive-services @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 05/11/2022 +ms.date: 06/02/2022 ms.author: aahi ms.custom: language-service-summarization, ignite-fall-2021 --- @@ -36,9 +36,9 @@ Document summarization supports the following languages: | Portuguese (Brazil) | `pt-BR` | 2021-08-01 | | | Portuguese (Portugal) | `pt-PT` | 2021-08-01 | `pt` also accepted | -# [Conversation summarization](#tab/conversation-summarization) +# [Conversation summarization (preview)](#tab/conversation-summarization) -## Languages supported by conversation summarization +## Languages supported by conversation summarization (preview) Conversation summarization supports the following languages: @@ -50,4 +50,4 @@ Conversation summarization supports the following languages: ## Next steps -[Document summarization overview](overview.md) +* [Summarization overview](overview.md) diff --git a/articles/cognitive-services/language-service/summarization/overview.md b/articles/cognitive-services/language-service/summarization/overview.md index 8a788eefd74da..f07e8cf0a9833 100644 --- a/articles/cognitive-services/language-service/summarization/overview.md +++ b/articles/cognitive-services/language-service/summarization/overview.md @@ -1,5 +1,5 @@ --- -title: What is document summarization in Azure Cognitive Service for Language (preview)? +title: What is document and conversation summarization (preview)? titleSuffix: Azure Cognitive Services description: Learn about summarizing text. services: cognitive-services @@ -8,14 +8,14 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: overview -ms.date: 05/06/2022 +ms.date: 06/03/2022 ms.author: aahi ms.custom: language-service-summarization, ignite-fall-2021, event-tier1-build-2022 --- # What is document and conversation summarization (preview)? -Document summarization is one of the features offered by [Azure Cognitive Service for Language](../overview.md), a collection of machine learning and AI algorithms in the cloud for developing intelligent applications that involve written language. Use this article to learn more about this feature, and how to use it in your applications. +Summarization is one of the features offered by [Azure Cognitive Service for Language](../overview.md), a collection of machine learning and AI algorithms in the cloud for developing intelligent applications that involve written language. Use this article to learn more about this feature, and how to use it in your applications. # [Document summarization](#tab/document-summarization) @@ -48,7 +48,7 @@ Document summarization supports the following features: This documentation contains the following article types: * [**Quickstarts**](quickstart.md?pivots=rest-api&tabs=conversation-summarization) are getting-started instructions to guide you through making requests to the service. -* [**How-to guides**](how-to/document-summarization.md) contain instructions for using the service in more specific or customized ways. +* [**How-to guides**](how-to/conversation-summarization.md) contain instructions for using the service in more specific or customized ways. Conversation summarization is a broad topic, consisting of several approaches to represent relevant information in text. The conversation summarization feature described in this documentation enables you to use abstractive text summarization to produce a summary of issues and resolutions in transcripts of web chats and service call transcripts between customer-service agents, and your customers. @@ -86,21 +86,21 @@ Conversation summarization feature would simplify the text into the following: |Example summary | Format | Conversation aspect | |---------|----|----| -| Customer wants to use the wifi connection on their Smart Brew 300. They can’t connect it using the Contoso Coffee app. | One or two sentences | issue | -| Checked if the power light is blinking slowly. Tried to do a factory reset. | One or more sentences, generated from multiple lines of the transcript. | resolution | +| Customer wants to use the wifi connection on their Smart Brew 300. But it didn't work. | One or two sentences | issue | +| Checked if the power light is blinking slowly. Checked the Contoso coffee app. It had no prompt. Tried to do a factory reset. | One or more sentences, generated from multiple lines of the transcript. | resolution | --- -## Get started with text summarization +## Get started with summarization # [Document summarization](#tab/document-summarization) -To use this feature, you submit raw unstructured text for analysis and handle the API output in your application. Analysis is performed as-is, with no additional customization to the model used on your data. There are two ways to use text summarization: +To use this feature, you submit raw unstructured text for analysis and handle the API output in your application. Analysis is performed as-is, with no additional customization to the model used on your data. There are two ways to use summarization: |Development option |Description | Links | |---------|---------|---------| -| Language Studio | A web-based platform that enables you to try document summarization without needing writing code. | • [Language Studio website](https://language.cognitive.azure.com/tryout/summarization)
                  • [Quickstart: Use the Language studio](../language-studio.md) | +| Language Studio | A web-based platform that enables you to try document summarization without needing writing code. | • [Language Studio website](https://language.cognitive.azure.com/tryout/summarization)
                  • [Quickstart: Use Language Studio](../language-studio.md) | | REST API or Client library (Azure SDK) | Integrate document summarization into your applications using the REST API, or the client library available in a variety of languages. | • [Quickstart: Use document summarization](quickstart.md) | @@ -119,14 +119,14 @@ To use this feature, you submit raw text for analysis and handle the API output # [Document summarization](#tab/document-summarization) -* Text summarization takes raw unstructured text for analysis. See [Data and service limits](../concepts/data-limits.md) in the how-to guide for more information. -* Text summarization works with a variety of written languages. See [language support](language-support.md) for more information. +* Summarization takes raw unstructured text for analysis. See [Data and service limits](../concepts/data-limits.md) in the how-to guide for more information. +* Summarization works with a variety of written languages. See [language support](language-support.md?tabs=document-summarization) for more information. # [Conversation summarization](#tab/conversation-summarization) * Conversation summarization takes structured text for analysis. See the [data and service limits](../concepts/data-limits.md) for more information. -* Conversation summarization accepts text in English. See [language support](language-support.md) for more information. +* Conversation summarization accepts text in English. See [language support](language-support.md?tabs=conversation-summarization) for more information. --- @@ -144,6 +144,9 @@ As you use document summarization in your applications, see the following refere ## Responsible AI -An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it’s deployed. Read the [transparency note for document summarization](/legal/cognitive-services/language-service/transparency-note-extractive-summarization?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it’s deployed. Read the [transparency note for summarization](/legal/cognitive-services/language-service/transparency-note-extractive-summarization?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: -[!INCLUDE [Responsible AI links](../includes/overview-responsible-ai-links.md)] +* [Transparency note for Azure Cognitive Service for Language](/legal/cognitive-services/language-service/transparency-note?context=/azure/cognitive-services/language-service/context/context) +* [Integration and responsible use](/legal/cognitive-services/language-service/guidance-integration-responsible-use-summarization?context=/azure/cognitive-services/language-service/context/context) +* [Characteristics and limitations of summarization](/legal/cognitive-services/language-service/characteristics-and-limitations-summarization?context=/azure/cognitive-services/language-service/context/context) +* [Data, privacy, and security](/legal/cognitive-services/language-service/data-privacy?context=/azure/cognitive-services/language-service/context/context) \ No newline at end of file diff --git a/articles/cognitive-services/language-service/text-analytics-for-health/overview.md b/articles/cognitive-services/language-service/text-analytics-for-health/overview.md index 105199c66cead..bdb3828a80066 100644 --- a/articles/cognitive-services/language-service/text-analytics-for-health/overview.md +++ b/articles/cognitive-services/language-service/text-analytics-for-health/overview.md @@ -40,7 +40,7 @@ To use this feature, you submit raw unstructured text for analysis and handle th |Development option |Description | Links | |---------|---------|---------| -| Language Studio | A web-based platform that enables you to try Text Analytics for health without needing writing code. | • [Language Studio website](https://language.cognitive.azure.com/tryout/healthAnalysis)
                  • [Quickstart: Use the Language studio](../language-studio.md) | +| Language Studio | A web-based platform that enables you to try Text Analytics for health without needing writing code. | • [Language Studio website](https://language.cognitive.azure.com/tryout/healthAnalysis)
                  • [Quickstart: Use Language Studio](../language-studio.md) | | REST API or Client library (Azure SDK) | Integrate Text Analytics for health into your applications using the REST API, or the client library available in a variety of languages. | • [Quickstart: Use Text Analytics for health](quickstart.md) | | Docker container | Use the available Docker container to deploy this feature on-premises, letting you bring the service closer to your data for compliance, security, or other operational reasons. | • [How to deploy on-premises](how-to/use-containers.md) | diff --git a/articles/cognitive-services/language-service/toc.yml b/articles/cognitive-services/language-service/toc.yml index 27b467736d370..c7979c8ee1d55 100644 --- a/articles/cognitive-services/language-service/toc.yml +++ b/articles/cognitive-services/language-service/toc.yml @@ -66,11 +66,11 @@ items: - name: Data selection and schema design href: custom-text-classification/how-to/design-schema.md displayName: Best practices - - name: Tag data + - name: Label data href: custom-text-classification/how-to/tag-data.md - name: Train a model href: custom-text-classification/how-to/train-model.md - - name: View model evaluation + - name: View model performance href: custom-text-classification/how-to/view-model-evaluation.md - name: Improve a model href: custom-text-classification/how-to/improve-model.md @@ -84,17 +84,9 @@ items: items: - name: Evaluation metrics href: custom-text-classification/concepts/evaluation-metrics.md - - name: Accepted data tag formats + - name: Accepted data formats href: custom-text-classification/concepts/data-formats.md displayName: Data representation - - name: Enterprise readiness - items: - - name: Virtual networks - href: ../cognitive-services-virtual-networks.md?context=/azure/cognitive-services/language-service/context/context - - name: Cognitive Services security - href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context - - name: Encryption of data at rest - href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context - name: Tutorials items: - name: Enrich a Cognitive Search index @@ -106,22 +98,34 @@ items: displayName: Model, Project, Class - name: Service limits href: custom-text-classification/service-limits.md - - name: REST API + - name: Authoring API items: - - name: Authoring API - href: https://westus.dev.cognitive.microsoft.com/docs/services/language-custom-text-authoring-apis-2022-03-01-preview/operations/Projects_TriggerExportProjectJob - - name: Runtime prediction API - href: https://aka.ms/ct-runtime-swagger - - name: SDKs (v3.2-preview.2) + - name: REST API + href: https://aka.ms/ct-authoring-apis + - name: Runtime prediction API items: - - name: .NET + - name: REST API + href: https://aka.ms/ct-runtime-api + - name: SDK + items: + - name: C# href: /dotnet/api/azure.ai.textanalytics?view=azure-dotnet-preview&preserve-view=true - - name: Python - href: /python/api/azure-ai-textanalytics/azure.ai.textanalytics?view=azure-python-preview&preserve-view=true - name: Java href: /java/api/overview/azure/ai-textanalytics-readme?view=azure-java-preview&preserve-view=true - - name: Node.js + - name: JavaScript href: /javascript/api/overview/azure/ai-text-analytics-readme?view=azure-node-preview&preserve-view=true + - name: Python + href: /python/api/overview/azure/ai-textanalytics-readme?view=azure-python-preview&preserve-view=true + - name: Samples + items: + - name: Java + href: https://aka.ms/sdk-samples-java + - name: JavaScript + href: https://aka.ms/sdk-samples-java-script + - name: C# + href: https://aka.ms/sdk-samples-dot-net + - name: Python + href: https://aka.ms/sdk-samples-python - name: Custom named entity recognition (NER) items: - name: Overview @@ -152,13 +156,13 @@ items: href: custom-named-entity-recognition/how-to/create-project.md - name: Data selection and schema design href: custom-named-entity-recognition/how-to/design-schema.md - - name: Tag data + - name: Label data href: custom-named-entity-recognition/how-to/tag-data.md - - name: Use autotagging (preview) to automatically tag your data + - name: Auto label your data (preview) href: custom-named-entity-recognition/how-to/use-autotagging.md - name: Train a model href: custom-named-entity-recognition/how-to/train-model.md - - name: View model evaluation + - name: View model performance href: custom-named-entity-recognition/how-to/view-model-evaluation.md - name: Improve a model href: custom-named-entity-recognition/how-to/improve-model.md @@ -176,39 +180,43 @@ items: items: - name: Evaluation metrics href: custom-named-entity-recognition/concepts/evaluation-metrics.md - - name: Accepted data tag formats + - name: Accepted data formats href: custom-named-entity-recognition/concepts/data-formats.md - - name: Enterprise readiness - items: - - name: Virtual networks - href: ../cognitive-services-virtual-networks.md?context=/azure/cognitive-services/language-service/context/context - - name: Cognitive Services security - href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context - - name: Encryption of data at rest - href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context - name: Reference items: - name: Glossary href: custom-named-entity-recognition/glossary.md - name: Service limits href: custom-named-entity-recognition/service-limits.md - - name: REST API + - name: Authoring API items: - - name: Authoring API - href: https://westus.dev.cognitive.microsoft.com/docs/services/language-custom-text-authoring-apis-2022-03-01-preview/operations/Projects_TriggerExportProjectJob - - name: Runtime prediction API - href: https://aka.ms/ct-runtime-swagger - - name: SDKs (v3.2-preview.2) + - name: REST API + href: https://aka.ms/ct-authoring-apis + - name: Runtime prediction API items: - - name: .NET + - name: REST API + href: https://aka.ms/ct-runtime-api + - name: SDK + items: + - name: C# href: /dotnet/api/azure.ai.textanalytics?view=azure-dotnet-preview&preserve-view=true - - name: Python - href: /python/api/azure-ai-textanalytics/azure.ai.textanalytics?view=azure-python-preview&preserve-view=true - name: Java href: /java/api/overview/azure/ai-textanalytics-readme?view=azure-java-preview&preserve-view=true - - name: Node.js + - name: JavaScript href: /javascript/api/overview/azure/ai-text-analytics-readme?view=azure-node-preview&preserve-view=true -- name: Conversational language understanding (preview) + - name: Python + href: /python/api/overview/azure/ai-textanalytics-readme?view=azure-python-preview&preserve-view=true + - name: Samples + items: + - name: Java + href: https://aka.ms/sdk-samples-java + - name: JavaScript + href: https://aka.ms/sdk-samples-java-script + - name: C# + href: https://aka.ms/sdk-samples-dot-net + - name: Python + href: https://aka.ms/sdk-samples-python +- name: Conversational language understanding items: - name: Overview href: conversational-language-understanding/overview.md @@ -224,11 +232,11 @@ items: href: conversational-language-understanding/how-to/create-project.md - name: Build a schema href: conversational-language-understanding/how-to/build-schema.md - - name: Tag utterances + - name: Label utterances href: conversational-language-understanding/how-to/tag-utterances.md - name: Train a model href: conversational-language-understanding/how-to/train-model.md - - name: View model evaluation + - name: View model performance href: conversational-language-understanding/how-to/view-model-evaluation.md - name: Deploy a model href: conversational-language-understanding/how-to/deploy-model.md @@ -238,7 +246,7 @@ items: href: conversational-language-understanding/how-to/fail-over.md - name: Concepts items: - - name: Backwards compatibility + - name: Backwards compatibility with LUIS href: conversational-language-understanding/concepts/backwards-compatibility.md - name: Entity components href: conversational-language-understanding/concepts/entity-components.md @@ -248,16 +256,6 @@ items: href: conversational-language-understanding/concepts/data-formats.md - name: None intent href: conversational-language-understanding/concepts/none-intent.md - - name: Using multiple languages - href: conversational-language-understanding/concepts/multiple-languages.md - - name: Enterprise readiness - items: - - name: Virtual networks - href: ../cognitive-services-virtual-networks.md?context=/azure/cognitive-services/language-service/context/context - - name: Cognitive Services security - href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context - - name: Encryption of data at rest - href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context - name: Tutorials items: - name: Use Bot Framework @@ -270,18 +268,26 @@ items: href: conversational-language-understanding/service-limits.md - name: Glossary href: conversational-language-understanding/glossary.md - - name: REST API + - name: Authoring API items: - - name: Authoring - href: https://westus.dev.cognitive.microsoft.com/docs/services/language-authoring-clu-apis-2022-03-01-preview/operations/Projects_TriggerImportProjectJob - - name: Runtime API - href: https://aka.ms/clu-apis - - name: SDKs + - name: REST API + href: https://aka.ms/clu-authoring-apis + - name: Runtime prediction API items: - - name: .NET - href: /dotnet/api/azure.ai.language.conversations?view=azure-dotnet-preview&preserve-view=true - - name: Python - href: /python/api/azure-ai-language-conversations/azure.ai.language.conversations?view=azure-python-preview&preserve-view=true + - name: REST API + href: https://aka.ms/clu-runtime-api + - name: SDK + items: + - name: C# + href: /dotnet/api/overview/azure/ai.language.conversations-readme-pre?view=azure-dotnet-preview&preserve-view=true + - name: Python + href: /python/api/overview/azure/ai-language-conversations-readme?view=azure-python-preview&preserve-view=true + - name: Samples + items: + - name: C# + href: https://aka.ms/sdk-sample-conversation-dot-net + - name: Python + href: https://aka.ms/sdk-samples-conversation-python - name: Entity linking items: - name: Overview @@ -529,27 +535,27 @@ items: href: /java/api/overview/azure/ai-textanalytics-readme?preserve-view=true - name: Node.js href: /javascript/api/overview/azure/ai-text-analytics-readme?view=azure-node-latest&preserve-view=true -- name: Orchestration workflow (preview) +- name: Orchestration workflow items: - name: Overview href: orchestration-workflow/overview.md - - name: FAQ - href: orchestration-workflow/faq.md - name: Quickstart href: orchestration-workflow/quickstart.md + - name: FAQ + href: orchestration-workflow/faq.md - name: Language support href: orchestration-workflow/language-support.md - name: How-to guides items: - - name: Build schema - href: orchestration-workflow/how-to/build-schema.md - name: Create project href: orchestration-workflow/how-to/create-project.md - - name: Tag utterances + - name: Build schema + href: orchestration-workflow/how-to/build-schema.md + - name: Label utterances href: orchestration-workflow/how-to/tag-utterances.md - name: Train a model href: orchestration-workflow/how-to/train-model.md - - name: View model evaluation + - name: View model performance href: orchestration-workflow/how-to/view-model-evaluation.md - name: Deploy a model href: orchestration-workflow/how-to/deploy-model.md @@ -577,18 +583,26 @@ items: href: orchestration-workflow/service-limits.md - name: Glossary href: orchestration-workflow/glossary.md - - name: REST API + - name: Authoring API items: - - name: Authoring - href: https://westus.dev.cognitive.microsoft.com/docs/services/language-authoring-clu-apis-2022-03-01-preview/operations/Projects_TriggerImportProjectJob - - name: Runtime API - href: https://aka.ms/clu-apis - - name: SDKs + - name: REST API + href: https://aka.ms/clu-authoring-apis + - name: Runtime prediction API items: - - name: .NET - href: /dotnet/api/azure.ai.language.conversations?view=azure-dotnet-preview&preserve-view=true - - name: Python - href: /python/api/azure-ai-language-conversations/azure.ai.language.conversations?view=azure-python-preview&preserve-view=true + - name: REST API + href: https://aka.ms/clu-runtime-api + - name: SDK + items: + - name: C# + href: /dotnet/api/overview/azure/ai.language.conversations-readme-pre?view=azure-dotnet-preview&preserve-view=true + - name: Python + href: /python/api/overview/azure/ai-language-conversations-readme?view=azure-python-preview&preserve-view=true + - name: Samples + items: + - name: C# + href: https://aka.ms/sdk-sample-conversation-dot-net + - name: Python + href: https://aka.ms/sdk-samples-conversation-python - name: Personally Identifying Information (PII) detection items: - name: Overview @@ -612,12 +626,12 @@ items: items: - name: Call PII href: personally-identifiable-information/how-to-call.md - - name: Call PII for Conversation (preview) + - name: Call PII for conversations (preview) href: personally-identifiable-information/how-to-call-for-conversations.md - name: Concepts items: - name: Recognized entity categories - href: personally-identifiable-information/concepts/entity-categories.md + href: personally-identifiable-information/concepts/entity-categories.md - name: Recognized entity categories for conversation href: personally-identifiable-information/concepts/conversations-entity-categories.md - name: Reference @@ -680,7 +694,7 @@ items: href: question-answering/how-to/export-import-refresh.md - name: Encrypt data at rest href: question-answering/how-to/encrypt-data-at-rest.md - - name: Migrate knowledge base + - name: Move knowledge base href: question-answering/how-to/migrate-knowledge-base.md - name: Add chit-chat href: question-answering/how-to/chit-chat.md @@ -698,6 +712,8 @@ items: href: question-answering/how-to/authoring.md - name: Prebuilt API href: question-answering/how-to/prebuilt.md + - name: Project Best practices + href: question-answering/how-to/best-practices.md - name: Troubleshooting href: question-answering/how-to/troubleshooting.md - name: Concepts @@ -903,12 +919,14 @@ items: href: summarization/language-support.md - name: Responsible use of AI items: - - name: Transparency note for text summarization + - name: Transparency note for summarization href: /legal/cognitive-services/language-service/transparency-note-extractive-summarization?context=/azure/cognitive-services/language-service/context/context - displayName: Transparency note for text summarization, text summarization transparency, Responsible AI, Responsible use of AI + displayName: Transparency note for summarization, summarization transparency, Responsible AI, Responsible use of AI - name: Integration and responsible use - href: /legal/cognitive-services/language-service/guidance-integration-responsible-use?context=/azure/cognitive-services/language-service/context/context + href: /legal/cognitive-services/language-service/guidance-integration-responsible-use-summarization?context=/azure/cognitive-services/language-service/context/context displayName: Responsible deployment, Responsible use, Responsible integration, AI deployment, AI use + - name: Characteristics and limitations + href: /legal/cognitive-services/language-service/characteristics-and-limitations-summarization?context=/azure/cognitive-services/language-service/context/context - name: Data, privacy, and security href: /legal/cognitive-services/language-service/data-privacy?context=/azure/cognitive-services/language-service/context/context displayName: Data privacy, logging, data retention @@ -940,6 +958,8 @@ items: href: /javascript/api/overview/azure/ai-text-analytics-readme?view=azure-node-preview&preserve-view=true - name: Concepts items: + - name: Data limits + href: concepts/data-limits.md - name: Multilingual and emoji support href: concepts/multilingual-emoji-support.md - name: Migrate from LUIS, QnA Maker, and Text Analytics @@ -948,21 +968,27 @@ items: href: concepts/model-lifecycle.md - name: Send requests asynchronously href: concepts/use-asynchronously.md + - name: Enterprise readiness + items: + - name: Virtual networks + href: ../cognitive-services-virtual-networks.md?context=/azure/cognitive-services/language-service/context/context + - name: Cognitive Services security + href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context + - name: Encryption of data at rest + href: concepts/encryption-data-at-rest.md + - name: Region support + href: https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services + - name: Compliance and certification + href: https://azure.microsoft.com/support/legal/cognitive-services-compliance-and-privacy/ - name: Tutorials items: - name: Use Cognitive Services in canvas apps href: /powerapps/maker/canvas-apps/cognitive-services-api?context=/azure/cognitive-services/language-service/context/context - name: Use Azure Kubernetes Service href: tutorials/use-kubernetes-service.md -- name: Enterprise readiness - items: - - name: Region support - href: https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services - - name: Compliance and certification - href: https://azure.microsoft.com/support/legal/cognitive-services-compliance-and-privacy/ - name: Resources items: - name: Support and help options href: ../cognitive-services-support-options.md?context=/azure/cognitive-services/language-service/context/context - name: Migrate to the latest version - href: concepts/migrate-language-service-latest.md \ No newline at end of file + href: concepts/migrate-language-service-latest.md diff --git a/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md b/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md index 22bb67bcedcc6..6018348e99f0c 100644 --- a/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md +++ b/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md @@ -8,9 +8,9 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 11/02/2021 +ms.date: 05/27/2022 ms.author: aahi -ms.custom: ignite-fall-2021 +ms.custom: ignite-fall-2021, cogserv-non-critical-language --- # Deploy a key phrase extraction container to Azure Kubernetes Service diff --git a/articles/cognitive-services/language-service/whats-new.md b/articles/cognitive-services/language-service/whats-new.md index eca8f47b05e3b..4301dbab0b7eb 100644 --- a/articles/cognitive-services/language-service/whats-new.md +++ b/articles/cognitive-services/language-service/whats-new.md @@ -26,13 +26,15 @@ Azure Cognitive Service for Language is updated on an ongoing basis. To stay up- * The following features are now Generally Available (GA): * Custom text classification * Custom Named Entity Recognition (NER) + * Conversational language understanding + * Orchestration workflow * The following updates for custom text classification, custom Named Entity Recognition (NER), conversational language understanding, and orchestration workflow: * Data splitting controls. * Ability to cancel training jobs. * Custom deployments can be named. You can have up to 10 deployments. * Ability to swap deployments. - * Auto tagging (preview) for custom named entity recognition + * Auto labeling (preview) for custom named entity recognition * Enterprise readiness support * Training modes for conversational language understanding * Updated service limits diff --git a/articles/cognitive-services/language-support.md b/articles/cognitive-services/language-support.md index cdd44db3c7b69..c8af857d3e111 100644 --- a/articles/cognitive-services/language-support.md +++ b/articles/cognitive-services/language-support.md @@ -20,14 +20,14 @@ These Cognitive Services are language agnostic and don't have limitations based * [Anomaly Detector (Preview)](./anomaly-detector/index.yml) * [Custom Vision](./custom-vision-service/index.yml) -* [Face](./face/index.yml) +* [Face](./computer-vision/index-identity.yml) * [Personalizer](./personalizer/index.yml) ## Vision * [Computer Vision](./computer-vision/language-support.md) * [Ink Recognizer (Preview)](/previous-versions/azure/cognitive-services/Ink-Recognizer/language-support) -* [Video Indexer](/azure/azure-video-indexer/language-identification-model.md#guidelines-and-limitations) +* [Video Indexer](../azure-video-indexer/language-identification-model.md#guidelines-and-limitations) ## Language diff --git a/articles/cognitive-services/openai/breadcrumb/toc.yml b/articles/cognitive-services/openai/breadcrumb/toc.yml index e108cc7b30fd1..e24ffb8f093d3 100644 --- a/articles/cognitive-services/openai/breadcrumb/toc.yml +++ b/articles/cognitive-services/openai/breadcrumb/toc.yml @@ -15,7 +15,7 @@ items: - name: Cognitive Services # Original doc set name tocHref: /legal/cognitive-services/openai # Destination doc set route - topicHref: /azure/cognitive-services/overview # Original doc set route + topicHref: /azure/cognitive-services/what-are-cognitive-services # Original doc set route items: - name: Azure OpenAI Service # Destination doc set name tocHref: /legal/cognitive-services/openai # Destination doc set route diff --git a/articles/cognitive-services/personalizer/concept-active-inactive-events.md b/articles/cognitive-services/personalizer/concept-active-inactive-events.md index 7975ab4e0723e..2c87b44665f50 100644 --- a/articles/cognitive-services/personalizer/concept-active-inactive-events.md +++ b/articles/cognitive-services/personalizer/concept-active-inactive-events.md @@ -1,8 +1,8 @@ --- title: Active and inactive events - Personalizer description: This article discusses the use of active and inactive events within the Personalizer service. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concept-active-learning.md b/articles/cognitive-services/personalizer/concept-active-learning.md index 8ff984f975613..39aa46532d925 100644 --- a/articles/cognitive-services/personalizer/concept-active-learning.md +++ b/articles/cognitive-services/personalizer/concept-active-learning.md @@ -1,8 +1,8 @@ --- title: Learning policy - Personalizer description: Learning settings determine the *hyperparameters* of the model training. Two models of the same data that are trained on different learning settings will end up different. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concept-apprentice-mode.md b/articles/cognitive-services/personalizer/concept-apprentice-mode.md index f22467df7bf73..a969b1b38b5ab 100644 --- a/articles/cognitive-services/personalizer/concept-apprentice-mode.md +++ b/articles/cognitive-services/personalizer/concept-apprentice-mode.md @@ -1,8 +1,8 @@ --- title: Apprentice mode - Personalizer description: Learn how to use apprentice mode to gain confidence in a model without changing any code. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer @@ -12,7 +12,7 @@ ms.date: 05/01/2020 # Use Apprentice mode to train Personalizer without affecting your existing application -Due to the nature of **real-world** Reinforcement Learning, a Personalizer model can only be trained in a production environment. When deploying a new use case, the Personalizer model is not performing efficiently because it takes time for the model to be sufficiently trained. **Apprentice mode** is a learning behavior that eases this situation and allows you to gain confidence in the model – without the developer changing any code. +Due to the nature of **real-world** Reinforcement Learning, a Personalizer model can only be trained in a production environment. When deploying a new use case, the Personalizer model isn't performing efficiently because it takes time for the model to be sufficiently trained. **Apprentice mode** is a learning behavior that eases this situation and allows you to gain confidence in the model – without the developer changing any code. [!INCLUDE [Important Blue Box - Apprentice mode pricing tier](./includes/important-apprentice-mode.md)] @@ -30,18 +30,18 @@ Apprentice mode gives you trust in the Personalizer service and its machine lear The two main reasons to use Apprentice mode are: -* Mitigating **Cold Starts**: Apprentice mode helps manage and assess the cost of a "new" model's learning time - when it is not returning the best action and not achieved a satisfactory level of effectiveness of around 60-80%. +* Mitigating **Cold Starts**: Apprentice mode helps manage and assess the cost of a "new" model's learning time - when it isn't returning the best action and not achieved a satisfactory level of effectiveness of around 60-80%. * **Validating Action and Context Features**: Features sent in actions and context may be inadequate or inaccurate - too little, too much, incorrect, or too specific to train Personalizer to attain the ideal effectiveness rate. Use [feature evaluations](concept-feature-evaluation.md) to find and fix issues with features. ## When should you use Apprentice mode? Use Apprentice mode to train Personalizer to improve its effectiveness through the following scenarios while leaving the experience of your users unaffected by Personalizer: -* You are implementing Personalizer in a new use case. -* You have significantly changed the features you send in Context or Actions. -* You have significantly changed when and how you calculate rewards. +* You're implementing Personalizer in a new use case. +* You've significantly changed the features you send in Context or Actions. +* You've significantly changed when and how you calculate rewards. -Apprentice mode is not an effective way of measuring the impact Personalizer is having on reward scores. To measure how effective Personalizer is at choosing the best possible action for each Rank call, use [Offline evaluations](concepts-offline-evaluation.md). +Apprentice mode isn't an effective way of measuring the impact Personalizer is having on reward scores. To measure how effective Personalizer is at choosing the best possible action for each Rank call, use [Offline evaluations](concepts-offline-evaluation.md). ## Who should use Apprentice mode? @@ -51,7 +51,7 @@ Apprentice mode is useful for developers, data scientists and business decision * **Data scientists** can use Apprentice mode to validate that the features are effective to train the Personalizer models, that the reward wait times aren’t too long or short. -* **Business Decision Makers** can use Apprentice mode to assess the potential of Personalizer to improve results (i.e. rewards) compared to existing business logic. This allows them to make a informed decision impacting user experience, where real revenue and user satisfaction are at stake. +* **Business Decision Makers** can use Apprentice mode to assess the potential of Personalizer to improve results (i.e. rewards) compared to existing business logic. This allows them to make an informed decision impacting user experience, where real revenue and user satisfaction are at stake. ## Comparing Behaviors - Apprentice mode and Online mode @@ -61,8 +61,8 @@ Learning when in Apprentice mode differs from Online mode in the following ways. |--|--|--| |Impact on User Experience|You can use existing user behavior to train Personalizer by letting it observe (not affect) what your **default action** would have been and the reward it obtained. This means your users’ experience and the business results from them won’t be impacted.|Display top action returned from Rank call to affect user behavior.| |Learning speed|Personalizer will learn more slowly when in Apprentice mode than when learning in Online mode. Apprentice mode can only learn by observing the rewards obtained by your **default action**, which limits the speed of learning, as no exploration can be performed.|Learns faster because it can both exploit the current model and explore for new trends.| -|Learning effectiveness "Ceiling"|Personalizer can approximate, very rarely match, and never exceed the performance of your base business logic (the reward total achieved by the **default action** of each Rank call). This approximation cieling is reduced by exploration. For example, with exploration at 20% it is very unlikely apprentice mode performance will exceed 80%, and 60% is a reasonable target at which to graduate to online mode.|Personalizer should exceed applications baseline, and over time where it stalls you should conduct on offline evaluation and feature evaluation to continue to get improvements to the model. | -|Rank API value for rewardActionId|The users' experience doesn’t get impacted, as _rewardActionId_ is always the first action you send in the Rank request. In other words, the Rank API does nothing visible for your application during Apprentice mode. Reward APIs in your application should not change how it uses the Reward API between one mode and another.|Users' experience will be changed by the _rewardActionId_ that Personalizer chooses for your application. | +|Learning effectiveness "Ceiling"|Personalizer can approximate, very rarely match, and never exceed the performance of your base business logic (the reward total achieved by the **default action** of each Rank call). This approximation ceiling is reduced by exploration. For example, with exploration at 20% it's very unlikely apprentice mode performance will exceed 80%, and 60% is a reasonable target at which to graduate to online mode.|Personalizer should exceed applications baseline, and over time where it stalls you should conduct on offline evaluation and feature evaluation to continue to get improvements to the model. | +|Rank API value for rewardActionId|The users' experience doesn’t get impacted, as _rewardActionId_ is always the first action you send in the Rank request. In other words, the Rank API does nothing visible for your application during Apprentice mode. Reward APIs in your application shouldn't change how it uses the Reward API between one mode and another.|Users' experience will be changed by the _rewardActionId_ that Personalizer chooses for your application. | |Evaluations|Personalizer keeps a comparison of the reward totals that your default business logic is getting, and the reward totals Personalizer would be getting if in Online mode at that point. A comparison is available in the Azure portal for that resource|Evaluate Personalizer’s effectiveness by running [Offline evaluations](concepts-offline-evaluation.md), which let you compare the total rewards Personalizer has achieved against the potential rewards of the application’s baseline.| A note about apprentice mode's effectiveness: @@ -76,27 +76,27 @@ Apprentice Mode attempts to train the Personalizer model by attempting to imitat ### Scenarios where Apprentice Mode May Not be Appropriate: #### Editorially chosen Content: -In some scenarios such as news or entertainment, the baseline item could be manually assigned by an editorial team. This means humans are using their knowledge about the broader world, and understanding of what may be appealing content, to choose specific articles or media out of a pool, and flagging them as "preferred" or "hero" articles. Because these editors are not an algorithm, and the factors considered by editors can be nuanced and not included as features of the context and actions, Apprentice mode is unlikely to be able to predict the next baseline action. In these situations you can: +In some scenarios such as news or entertainment, the baseline item could be manually assigned by an editorial team. This means humans are using their knowledge about the broader world, and understanding of what may be appealing content, to choose specific articles or media out of a pool, and flagging them as "preferred" or "hero" articles. Because these editors aren't an algorithm, and the factors considered by editors can be nuanced and not included as features of the context and actions, Apprentice mode is unlikely to be able to predict the next baseline action. In these situations you can: -* Test Personalizer in Online Mode: Apprentice mode not predicting baselines does not imply Personalizer can't achieve as-good or even better results. Consider putting Personalizer in Online Mode for a period of time or in an A/B test if you have the infrastructure, and then run an Offline Evaluation to assess the difference. +* Test Personalizer in Online Mode: Apprentice mode not predicting baselines doesn't imply Personalizer can't achieve as-good or even better results. Consider putting Personalizer in Online Mode for a period of time or in an A/B test if you have the infrastructure, and then run an Offline Evaluation to assess the difference. * Add editorial considerations and recommendations as features: Ask your editors what factors influence their choices, and see if you can add those as features in your context and action. For example, editors in a media company may highlight content while a certain celebrity is in the news: This knowledge could be added as a Context feature. ### Factors that will improve and accelerate Apprentice Mode -If apprentice mode is learning and attaining Matched rewards above zero but seems to be growing slowly (not getting to 60%..80% matched rewards within 2 weeks), it is possible that the challenge is having too little data. Taking the following steps could accelerate the learning. +If apprentice mode is learning and attaining Matched rewards above zero but seems to be growing slowly (not getting to 60% to 80% matched rewards within two weeks), it's possible that the challenge is having too little data. Taking the following steps could accelerate the learning. 1. Adding more events with positive rewards over time: Apprentice mode will perform better in use cases where your application gets more than 100 positive rewards per day. For example, if a website rewarding a click has 2% clickthrough, it should be having at least 5,000 visits per day to have noticeable learning. 2. Try a reward score that is simpler and happens more frequently. For example going from "Did users finish reading the article" to "Did users start reading the article". 3. Adding differentiating features: You can do a visual inspection of the actions in a Rank call and their features. Does the baseline action have features that are differentiated from other actions? If they look mostly the same, add more features that will make them less similar. -4. Reducing Actions per Event: Personalizer will use the Explore % setting to discover preferences and trends. When a Rank call has more actions, the chance of an Action being chosen for exploration becomes lower. Reduce the number of actions sent in each Rank call to a smaller number, to less than 10. This can be a temporary adjustement to show that Apprentice Mode has the right data to match rewards. +4. Reducing Actions per Event: Personalizer will use the Explore % setting to discover preferences and trends. When a Rank call has more actions, the chance of an Action being chosen for exploration becomes lower. Reduce the number of actions sent in each Rank call to a smaller number, to less than 10. This can be a temporary adjustment to show that Apprentice Mode has the right data to match rewards. ## Using Apprentice mode to train with historical data If you have a significant amount of historical data, you’d like to use to train Personalizer, you can use Apprentice mode to replay the data through Personalizer. -Set up the Personalizer in Apprentice Mode and create a script that calls Rank with the actions and context features from the historical data. Call the Reward API based on your calculations of the records in this data. You will need approximately 50,000 historical events to see some results but 500,000 is recommended for higher confidence in the results. +Set up the Personalizer in Apprentice Mode and create a script that calls Rank with the actions and context features from the historical data. Call the Reward API based on your calculations of the records in this data. You'll need approximately 50,000 historical events to see some results but 500,000 is recommended for higher confidence in the results. -When training from historical data, it is recommended that the data sent in (features for context and actions, their layout in the JSON used for Rank requests, and the calculation of reward in this training data set), matches the data (features and calculation of reward) available from the existing application. +When training from historical data, it's recommended that the data sent in (features for context and actions, their layout in the JSON used for Rank requests, and the calculation of reward in this training data set), matches the data (features and calculation of reward) available from the existing application. Offline and post-facto data tends to be more incomplete and noisier and differs in format. While training from historical data is possible, the results from doing so may be inconclusive and not a good predictor of how well Personalizer will learn, especially if the features vary between past data and the existing application. @@ -104,7 +104,7 @@ Typically for Personalizer, when compared to training with historical data, chan ## Using Apprentice Mode versus A/B Tests -It is only useful to do A/B tests of Personalizer treatments once it has been validated and is learning in Online mode. In Apprentice mode, only the **default action** is used, which means all users would effectively see the control experience. +It's only useful to do A/B tests of Personalizer treatments once it has been validated and is learning in Online mode. In Apprentice mode, only the **default action** is used, which means all users would effectively see the control experience. Even if Personalizer is just the _treatment_, the same challenge is present when validating the data is good for training Personalizer. Apprentice mode could be used instead, with 100% of traffic, and with all users getting the control (unaffected) experience. diff --git a/articles/cognitive-services/personalizer/concept-auto-optimization.md b/articles/cognitive-services/personalizer/concept-auto-optimization.md index af86d3b9c3013..f3f5bd0afab3d 100644 --- a/articles/cognitive-services/personalizer/concept-auto-optimization.md +++ b/articles/cognitive-services/personalizer/concept-auto-optimization.md @@ -1,8 +1,8 @@ --- title: Auto-optimize - Personalizer description: This article provides a conceptual overview of the auto-optimize feature for Azure Personalizer service. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concept-feature-evaluation.md b/articles/cognitive-services/personalizer/concept-feature-evaluation.md index 7d4b975b0528a..b4276a121579e 100644 --- a/articles/cognitive-services/personalizer/concept-feature-evaluation.md +++ b/articles/cognitive-services/personalizer/concept-feature-evaluation.md @@ -2,8 +2,8 @@ title: Feature evaluation - Personalizer titleSuffix: Azure Cognitive Services description: When you run an Evaluation in your Personalizer resource from the Azure portal, Personalizer provides information about what features of context and actions are influencing the model. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer @@ -18,7 +18,7 @@ When you run an Evaluation in your Personalizer resource from the [Azure portal] This is useful in order to: * Imagine additional features you could use, getting inspiration from what features are more important in the model. -* See what features are not important, and potentially remove them or further analyze what may be affecting usage. +* See what features aren't important, and potentially remove them or further analyze what may be affecting usage. * Provide guidance to editorial or curation teams about new content or products worth bringing into the catalog. * Troubleshoot common problems and mistakes that happen when sending features to Personalizer. @@ -30,7 +30,7 @@ To see feature importance results, you must run an evaluation. The evaluation cr The resulting information about feature importance represents the current Personalizer online model. The evaluation analyzes feature importance of the model saved at the end date of the evaluation period, after undergoing all the training done during the evaluation, with the current online learning policy. -The feature importance results do not represent other policies and models tested or created during the evaluation. The evaluation will not include features sent to Personalizer after the end of the evaluation period. +The feature importance results don't represent other policies and models tested or created during the evaluation. The evaluation won't include features sent to Personalizer after the end of the evaluation period. ## How to interpret the feature importance evaluation @@ -38,14 +38,14 @@ Personalizer evaluates features by creating "groups" of features that have simil Information about each Feature includes: -* Whether the feature comes from Context or Actions. -* Feature Key and Value. +* Whether the feature comes from Context or Actions +* Feature Key and Value -For example, an ice cream shop ordering app may see "Context.Weather:Hot" as a very important feature. +For example, an ice cream shop ordering app may see `Context.Weather:Hot` as a very important feature. Personalizer displays correlations of features that, when taken into account together, produce higher rewards. -For example, you may see "Context.Weather:Hot *with* Action.MenuItem:IceCream" as well as "Context.Weather:Cold *with* Action.MenuItem:WarmTea: +For example, you may see `Context.Weather:Hot` *with* `Action.MenuItem:IceCream` as well as `Context.Weather:Cold` *with* `Action.MenuItem:WarmTea:`. ## Actions you can take based on feature evaluation @@ -53,15 +53,15 @@ For example, you may see "Context.Weather:Hot *with* Action.MenuItem:IceCream" a Get inspiration from the more important features in the model. For example, if you see "Context.MobileBattery:Low" in a video mobile app, you may think that connection type may also make customers choose to see one video clip over another, then add features about connectivity type and bandwidth into your app. -### See what features are not important +### See what features aren't important -Potentially remove unimportant features or further analyze what may affect usage. Features may rank low for many reasons. One could be that genuinely the feature doesn't affect user behavior. But it could also mean that the feature is not apparent to the user. +Potentially remove unimportant features or further analyze what may affect usage. Features may rank low for many reasons. One could be that genuinely the feature doesn't affect user behavior. But it could also mean that the feature isn't apparent to the user. For example, a video site could see that "Action.VideoResolution=4k" is a low-importance feature, contradicting user research. The cause could be that the application doesn't even mention or show the video resolution, so users wouldn't change their behavior based on it. ### Provide guidance to editorial or curation teams -Provide guidance about new content or products worth bringing into the catalog. Personalizer is designed to be a tool that augments human insight and teams. One way it does this is by providing information to editorial groups on what is it about products, articles or content that drives behavior. For example, the video application scenario may show that there is an important feature called "Action.VideoEntities.Cat:true", prompting the editorial team to bring in more cat videos. +Provide guidance about new content or products worth bringing into the catalog. Personalizer is designed to be a tool that augments human insight and teams. One way it does this is by providing information to editorial groups on what is it about products, articles or content that drives behavior. For example, the video application scenario may show that there's an important feature called "Action.VideoEntities.Cat:true", prompting the editorial team to bring in more cat videos. ### Troubleshoot common problems and mistakes @@ -69,9 +69,9 @@ Common problems and mistakes can be fixed by changing your application code so i Common mistakes when sending features include the following: -* Sending personally identifiable information (PII). PII specific to one individual (such as name, phone number, credit card numbers, IP Addresses) should not be used with Personalizer. If your application needs to track users, use a non-identifying UUID or some other UserID number. In most scenarios this is also problematic. -* With large numbers of users, it is unlikely that each user's interaction will weigh more than all the population's interaction, so sending user IDs (even if non-PII) will probably add more noise than value to the model. -* Sending date-time fields as precise timestamps instead of featurized time values. Having features such as Context.TimeStamp.Day=Monday or "Context.TimeStamp.Hour"="13" is more useful. There will be at most 7 or 24 feature values for each. But "Context.TimeStamp":"1985-04-12T23:20:50.52Z" is so precise that there will be no way to learn from it because it will never happen again. +* Sending personally identifiable information (PII). PII specific to one individual (such as name, phone number, credit card numbers, IP Addresses) shouldn't be used with Personalizer. If your application needs to track users, use a non-identifying UUID or some other UserID number. In most scenarios this is also problematic. +* With large numbers of users, it's unlikely that each user's interaction will weigh more than all the population's interaction, so sending user IDs (even if non-PII) will probably add more noise than value to the model. +* Sending date-time fields as precise timestamps instead of featurized time values. Having features such as Context.TimeStamp.Day=Monday or "Context.TimeStamp.Hour"="13" is more useful. There will be at most 7 or 24 feature values for each. But `"Context.TimeStamp":"1985-04-12T23:20:50.52Z"` is so precise that there will be no way to learn from it because it will never happen again. ## Next steps diff --git a/articles/cognitive-services/personalizer/concept-multi-slot-personalization.md b/articles/cognitive-services/personalizer/concept-multi-slot-personalization.md index d61691339954b..17af3030a4bcf 100644 --- a/articles/cognitive-services/personalizer/concept-multi-slot-personalization.md +++ b/articles/cognitive-services/personalizer/concept-multi-slot-personalization.md @@ -2,8 +2,8 @@ title: Multi-slot personalization description: Learn where and when to use single-slot and multi-slot personalization with the Personalizer Rank and Reward APIs. services: cognitive-services -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concept-rewards.md b/articles/cognitive-services/personalizer/concept-rewards.md index 4db4a09ebffaf..ab6c16a248ebf 100644 --- a/articles/cognitive-services/personalizer/concept-rewards.md +++ b/articles/cognitive-services/personalizer/concept-rewards.md @@ -1,8 +1,8 @@ --- title: Reward score - Personalizer description: The reward score indicates how well the personalization choice, RewardActionID, resulted for the user. The value of the reward score is determined by your business logic, based on observations of user behavior. Personalizer trains its machine learning models by evaluating the rewards. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concepts-exploration.md b/articles/cognitive-services/personalizer/concepts-exploration.md index 6e8f7e3f92549..d928892b4d45b 100644 --- a/articles/cognitive-services/personalizer/concepts-exploration.md +++ b/articles/cognitive-services/personalizer/concepts-exploration.md @@ -2,8 +2,8 @@ title: Exploration - Personalizer titleSuffix: Azure Cognitive Services description: With exploration, Personalizer is able to continue delivering good results, even as user behavior changes. Choosing an exploration setting is a business decision about the proportion of user interactions to explore with, in order to improve the model. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concepts-features.md b/articles/cognitive-services/personalizer/concepts-features.md index ab5a748367186..d566514f9a1bb 100644 --- a/articles/cognitive-services/personalizer/concepts-features.md +++ b/articles/cognitive-services/personalizer/concepts-features.md @@ -2,8 +2,8 @@ title: "Features: Action and context - Personalizer" titleSuffix: Azure Cognitive Services description: Personalizer uses features, information about actions and context, to make better ranking suggestions. Features can be very generic, or specific to an item. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concepts-offline-evaluation.md b/articles/cognitive-services/personalizer/concepts-offline-evaluation.md index d318fdd163b5c..f89a13d2148aa 100644 --- a/articles/cognitive-services/personalizer/concepts-offline-evaluation.md +++ b/articles/cognitive-services/personalizer/concepts-offline-evaluation.md @@ -2,8 +2,8 @@ title: Use the Offline Evaluation method - Personalizer titleSuffix: Azure Cognitive Services description: This article will explain how to use offline evaluation to measure effectiveness of your app and analyze your learning loop. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/concepts-reinforcement-learning.md b/articles/cognitive-services/personalizer/concepts-reinforcement-learning.md index ef54c8989a64c..6119c5cda6112 100644 --- a/articles/cognitive-services/personalizer/concepts-reinforcement-learning.md +++ b/articles/cognitive-services/personalizer/concepts-reinforcement-learning.md @@ -2,8 +2,8 @@ title: Reinforcement Learning - Personalizer titleSuffix: Azure Cognitive Services description: Personalizer uses information about actions and current context to make better ranking suggestions. The information about these actions and context are attributes or properties that are referred to as features. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer @@ -26,7 +26,7 @@ While there are many subtypes and styles of reinforcement learning, this is how * Your application provides information about each alternative and the context of the user. * Your application computes a _reward score_. -Unlike some approaches to reinforcement learning, Personalizer does not require a simulation to work in. Its learning algorithms are designed to react to an outside world (versus control it) and learn from each data point with an understanding that it is a unique opportunity that cost time and money to create, and that there is a non-zero regret (loss of possible reward) if suboptimal performance happens. +Unlike some approaches to reinforcement learning, Personalizer doesn't require a simulation to work in. Its learning algorithms are designed to react to an outside world (versus control it) and learn from each data point with an understanding that it's a unique opportunity that cost time and money to create, and that there's a non-zero regret (loss of possible reward) if suboptimal performance happens. ## What type of reinforcement learning algorithms does Personalizer use? @@ -41,7 +41,7 @@ The explore/exploit traffic allocation is made randomly following the percentage John Langford coined the name Contextual Bandits (Langford and Zhang [2007]) to describe a tractable subset of reinforcement learning and has worked on a half-dozen papers improving our understanding of how to learn in this paradigm: * Beygelzimer et al. [2011] -* Dudík et al. [2011a,b] +* Dudík et al. [2011a, b] * Agarwal et al. [2014, 2012] * Beygelzimer and Langford [2009] * Li et al. [2010] diff --git a/articles/cognitive-services/personalizer/concepts-scalability-performance.md b/articles/cognitive-services/personalizer/concepts-scalability-performance.md index 746270daa0f67..882e6bc3ee8a9 100644 --- a/articles/cognitive-services/personalizer/concepts-scalability-performance.md +++ b/articles/cognitive-services/personalizer/concepts-scalability-performance.md @@ -2,8 +2,8 @@ title: Scalability and Performance - Personalizer titleSuffix: Azure Cognitive Services description: "High-performance and high-traffic websites and applications have two main factors to consider with Personalizer for scalability and performance: latency and training throughput." -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer @@ -31,16 +31,16 @@ Some applications require low latencies when returning a rank. Low latencies are Personalizer works by updating a model that is retrained based on messages sent asynchronously by Personalizer after Rank and Reward APIs. These messages are sent using an Azure EventHub for the application. - It is unlikely most applications will reach the maximum joining and training throughput of Personalizer. While reaching this maximum will not slow down the application, it would imply Event Hub queues are getting filled internally faster than they can be cleaned up. + It's unlikely most applications will reach the maximum joining and training throughput of Personalizer. While reaching this maximum won't slow down the application, it would imply event hub queues are getting filled internally faster than they can be cleaned up. ## How to estimate your throughput requirements * Estimate the average number of bytes per ranking event adding the lengths of the context and action JSON documents. * Divide 20MB/sec by this estimated average bytes. -For example, if your average payload has 500 features and each is an estimated 20 characters, then each event is approximately 10kb. With these estimates, 20,000,000 / 10,000 = 2,000 events/sec, which is about 173 million events/day. +For example, if your average payload has 500 features and each is an estimated 20 characters, then each event is approximately 10 kb. With these estimates, 20,000,000 / 10,000 = 2,000 events/sec, which is about 173 million events/day. -If you are reaching these limits, please contact our support team for architecture advice. +If you're reaching these limits, please contact our support team for architecture advice. ## Next steps diff --git a/articles/cognitive-services/personalizer/encrypt-data-at-rest.md b/articles/cognitive-services/personalizer/encrypt-data-at-rest.md index 9601d5bd9b7e3..0d1229fe7ab0a 100644 --- a/articles/cognitive-services/personalizer/encrypt-data-at-rest.md +++ b/articles/cognitive-services/personalizer/encrypt-data-at-rest.md @@ -2,13 +2,13 @@ title: Personalizer service encryption of data at rest titleSuffix: Azure Cognitive Services description: Microsoft offers Microsoft-managed encryption keys, and also lets you manage your Cognitive Services subscriptions with your own keys, called customer-managed keys (CMK). This article covers data encryption at rest for Personalizer, and how to enable and manage CMK. -author: jeffmend +author: jcodella manager: venkyv ms.service: cognitive-services ms.subservice: personalizer ms.topic: conceptual ms.date: 08/28/2020 -ms.author: jeffme +ms.author: jacodel #Customer intent: As a user of the Personalizer service, I want to learn how encryption at rest works. --- diff --git a/articles/cognitive-services/personalizer/ethics-responsible-use.md b/articles/cognitive-services/personalizer/ethics-responsible-use.md index 2d350945275c3..97e83d404d214 100644 --- a/articles/cognitive-services/personalizer/ethics-responsible-use.md +++ b/articles/cognitive-services/personalizer/ethics-responsible-use.md @@ -2,8 +2,8 @@ title: Ethics and responsible use - Personalizer titleSuffix: Azure Cognitive Services description: These guidelines are aimed at helping you to implement personalization in a way that helps you build trust in your company and service. Be sure to pause to research, learn and deliberate on the impact of the personalization on people's lives. When in doubt, seek guidance. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/frequently-asked-questions.yml b/articles/cognitive-services/personalizer/frequently-asked-questions.yml index 541cf255e19b3..060fe430b1b25 100644 --- a/articles/cognitive-services/personalizer/frequently-asked-questions.yml +++ b/articles/cognitive-services/personalizer/frequently-asked-questions.yml @@ -6,8 +6,8 @@ metadata: ms.subservice: personalizer ms.topic: faq ms.date: 03/14/2022 - ms.author: jeffme - author: jeffmend + ms.author: jacodel + author: jcodella ms.manager: nitinme title: Personalizer frequently asked questions summary: This article contains answers to frequently asked troubleshooting questions about the Personalizer service. diff --git a/articles/cognitive-services/personalizer/how-personalizer-works.md b/articles/cognitive-services/personalizer/how-personalizer-works.md index 6fe18fa5fe636..d54a423125877 100644 --- a/articles/cognitive-services/personalizer/how-personalizer-works.md +++ b/articles/cognitive-services/personalizer/how-personalizer-works.md @@ -1,8 +1,8 @@ --- title: How Personalizer Works - Personalizer description: The Personalizer _loop_ uses machine learning to build the model that predicts the top action for your content. The model is trained exclusively on your data that you sent to it with the Rank and Reward calls. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/how-to-create-resource.md b/articles/cognitive-services/personalizer/how-to-create-resource.md index 6c4e934269bc3..508c812f6ad27 100644 --- a/articles/cognitive-services/personalizer/how-to-create-resource.md +++ b/articles/cognitive-services/personalizer/how-to-create-resource.md @@ -1,8 +1,8 @@ --- title: Create Personalizer resource description: In this article, learn how to create a personalizer resource in the Azure portal for each feedback loop. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/how-to-learning-behavior.md b/articles/cognitive-services/personalizer/how-to-learning-behavior.md index a38ceb6e5eba8..4fac18120fab6 100644 --- a/articles/cognitive-services/personalizer/how-to-learning-behavior.md +++ b/articles/cognitive-services/personalizer/how-to-learning-behavior.md @@ -1,8 +1,8 @@ --- title: Configure learning behavior description: Apprentice mode gives you confidence in the Personalizer service and its machine learning capabilities, and provides metrics that the service is sent information that can be learned from – without risking online traffic. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/how-to-manage-model.md b/articles/cognitive-services/personalizer/how-to-manage-model.md index d374688aafed8..2ce699b534526 100644 --- a/articles/cognitive-services/personalizer/how-to-manage-model.md +++ b/articles/cognitive-services/personalizer/how-to-manage-model.md @@ -1,8 +1,8 @@ --- title: Manage model and learning settings - Personalizer description: The machine-learned model and learning settings can be exported for backup in your own source control system. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer @@ -23,15 +23,15 @@ From the Resource management's section for **Model and learning settings**, revi ## Clear data for your learning loop 1. In the Azure portal, for your Personalizer resource, on the **Model and learning settings** page, select **Clear data**. -1. In order to clear all data, and reset the learning loop to the original state, select all 3 check boxes. +1. In order to clear all data, and reset the learning loop to the original state, select all three check boxes. ![In Azure portal, clear data from Personalizer resource.](./media/settings/clear-data-from-personalizer-resource.png) |Value|Purpose| |--|--| - |Logged personalization and reward data.|This logging data is used in offline evaluations. Clear the data if you are resetting your resource.| + |Logged personalization and reward data.|This logging data is used in offline evaluations. Clear the data if you're resetting your resource.| |Reset the Personalizer model.|This model changes on every retraining. This frequency of training is specified in **upload model frequency** on the **Configuration** page. | - |Set the learning policy to default.|If you have changed the learning policy as part of an offline evaluation, this resets to the original learning policy.| + |Set the learning policy to default.|If you've changed the learning policy as part of an offline evaluation, this resets to the original learning policy.| 1. Select **Clear selected data** to begin the clearing process. Status is reported in Azure notifications, in the top-right navigation. diff --git a/articles/cognitive-services/personalizer/how-to-multi-slot.md b/articles/cognitive-services/personalizer/how-to-multi-slot.md index 8cac66c39659f..f7756cb5b6a03 100644 --- a/articles/cognitive-services/personalizer/how-to-multi-slot.md +++ b/articles/cognitive-services/personalizer/how-to-multi-slot.md @@ -2,8 +2,8 @@ title: How to use multi-slot with Personalizer description: Learn how to use multi-slot with Personalizer to improve content recommendations provided by the service. services: cognitive-services -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/how-to-offline-evaluation.md b/articles/cognitive-services/personalizer/how-to-offline-evaluation.md index 0cb524a3fbeb4..3b78a6494a9f4 100644 --- a/articles/cognitive-services/personalizer/how-to-offline-evaluation.md +++ b/articles/cognitive-services/personalizer/how-to-offline-evaluation.md @@ -2,8 +2,8 @@ title: How to perform offline evaluation - Personalizer titleSuffix: Azure Cognitive Services description: This article will show you how to use offline evaluation to measure effectiveness of your app and analyze your learning loop. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/how-to-settings.md b/articles/cognitive-services/personalizer/how-to-settings.md index ac5da842807ea..197fd3819a58f 100644 --- a/articles/cognitive-services/personalizer/how-to-settings.md +++ b/articles/cognitive-services/personalizer/how-to-settings.md @@ -1,8 +1,8 @@ --- title: Configure Personalizer description: Service configuration includes how the service treats rewards, how often the service explores, how often the model is retrained, and how much data is stored. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/includes/find-azure-resource-info.md b/articles/cognitive-services/personalizer/includes/find-azure-resource-info.md index edbaa2dc4b1b5..9c30b2aee1a6a 100644 --- a/articles/cognitive-services/personalizer/includes/find-azure-resource-info.md +++ b/articles/cognitive-services/personalizer/includes/find-azure-resource-info.md @@ -2,13 +2,13 @@ title: Find your Personalizer resource endpoint and key titleSuffix: Azure Cognitive Services services: cognitive-services -author: jeffmend +author: jcodella manager: nitinme ms.service: cognitive-services ms.subservice: personalizer ms.topic: include ms.date: 08/25/2019 -ms.author: jeffme +ms.author: jacodel --- > [!IMPORTANT] diff --git a/articles/cognitive-services/personalizer/index.yml b/articles/cognitive-services/personalizer/index.yml index 92a8e6ea70bd2..7c6c04391adb8 100644 --- a/articles/cognitive-services/personalizer/index.yml +++ b/articles/cognitive-services/personalizer/index.yml @@ -6,8 +6,8 @@ summary: Learn how to use Personalizer to allow your application to choose the b metadata: ms.topic: landing-page ms.date: 06/11/2020 - ms.author: jeffme - author: jeffmend + ms.author: jacodel + author: jcodella ms.service: cognitive-services ms.subservice: personalizer services: cognitive-services diff --git a/articles/cognitive-services/personalizer/quickstart-personalizer-sdk.md b/articles/cognitive-services/personalizer/quickstart-personalizer-sdk.md index 69a0bfaf90499..9ae23da6dd6dd 100644 --- a/articles/cognitive-services/personalizer/quickstart-personalizer-sdk.md +++ b/articles/cognitive-services/personalizer/quickstart-personalizer-sdk.md @@ -1,8 +1,8 @@ --- title: "Quickstart: Create and use learning loop with SDK - Personalizer" description: This quickstart shows you how to create and manage your knowledge base using the Personalizer client library. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/responsible-characteristics-and-limitations.md b/articles/cognitive-services/personalizer/responsible-characteristics-and-limitations.md new file mode 100644 index 0000000000000..3150195e0564f --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-characteristics-and-limitations.md @@ -0,0 +1,88 @@ +--- +title: Characteristics and limitations of Personalizer +titleSuffix: Azure Cognitive Services +description: Characteristics and limitations of Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + + +# Characteristics and limitations of Personalizer + +Azure Personalizer can work in many scenarios. To understand where you can apply Personalizer, make sure the requirements of your scenario meet the [expectations for Personalizer to work](where-can-you-use-personalizer.md#expectations-required-to-use-personalizer). To understand whether Personalizer should be used and how to integrate it into your applications, see [Use Cases for Personalizer](responsible-use-cases.md). You'll find criteria and guidance on choosing use cases, designing features, and reward functions for your uses of Personalizer. + +Before you read this article, it's helpful to understand some background information about [how Personalizer works](how-personalizer-works.md). + + +## Select features for Personalizer + +Personalizing content depends on having useful information about the content and the user. For some applications and industries, some user features can be directly or indirectly considered discriminatory and potentially illegal. See the [Personalizer integration and responsible use guidelines](responsible-guidance-integration.md) on assessing features to use with Personalizer. + + +## Computing rewards for Personalizer + +Personalizer learns to improve action choices based on the reward score provided by your application business logic. +A well-built reward score will act as a short-term proxy to a business goal that's tied to an organization's mission. +For example, rewarding on clicks will make Personalizer seek clicks at the expense of everything else, even if what's clicked is distracting to the user or not tied to a business outcome. +In contrast, a news site might want to set rewards tied to something more meaningful than clicks, such as "Did the user spend enough time to read the content?" or "Did the user click relevant articles or references?" With Personalizer, it's easy to tie metrics closely to rewards. However, you will need to be careful not to confound short-term user engagement with desired outcomes. + + +## Unintended consequences from reward scores + +Even if built with the best intentions reward scores might create unexpected consequences or unintended results because of how Personalizer ranks content. + +Consider the following examples: + +- Rewarding video content personalization on the percentage of the video length watched will probably tend to rank shorter videos higher than longer videos. +- Rewarding social media shares, without sentiment analysis of how it's shared or the content itself, might lead to ranking offensive, unmoderated, or inflammatory content. This type of content tends to incite a lot of engagement but is often damaging. +- Rewarding the action on user interface elements that users don't expect to change might interfere with the usability and predictability of the user interface. For example, buttons that change location or purpose without warning might make it harder for certain groups of users to stay productive. + +Implement these best practices: + +- Run offline experiments with your system by using different reward approaches to understand impact and side effects. +- Evaluate your reward functions, and ask yourself how a naïve person might alter its interpretation which may result in unintentional or undesirable outcomes. +- Archive information and assets, such as models, learning policies, and other data, that Personalizer uses to function, so that results can be reproducible. + + +## General guidelines to understand and improve performance + +Because Personalizer is based on Reinforcement Learning and learns from rewards to make better choices over time, performance isn't measured in traditional supervised learning terms used in classifiers, such as precision and recall. The performance of Personalizer is directly measured as the sum of reward scores it receives from your application via the Reward API. + +When you use Personalizer, the product user interface in the Azure portal provides performance information so you can monitor and act on it. The performance can be seen in the following ways: + +- If Personalizer is in Online Learning mode, you can perform [offline evaluations](concepts-offline-evaluation.md). +- If Personalizer is in [Apprentice mode](concept-apprentice-mode.md), you can see the performance metrics (events imitated and rewards imitated) in the Evaluation pane in the Azure portal. + +We recommend you perform frequent offline evaluations to maintain oversight. This task will help you monitor trends and ensure effectiveness. For example, you could decide to temporarily put Personalizer in Apprentice Mode if reward performance has a dip. + +### Personalizer performance estimates shown in Offline Evaluations: Limitations + +We define the "performance" of Personalizer as the total rewards it obtains during use. Personalizer performance estimates shown in Offline Evaluations are computed instead of measured. It is important to understand the limitations of these estimates: + +- The estimates are based on past data, so future performance may vary as the world and your users change. +- The estimates for baseline performance are computed probabilistically. For this reason, the confidence band for the baseline average reward is important. The estimate will get more precise with more events. If you use a smaller number of actions in each Rank call the performance estimate may increase in confidence as there is a higher probability that Personalizer may choose any one of them (including the baseline action) for every event. +- Personalizer constantly trains a model in near real time to improve the actions chosen for each event, and as a result, it will affect the total rewards obtained. The model performance will vary over time, depending on the recent past training data. +- Exploration and action choice are stochastic processes guided by the Personalizer model. The random numbers used for these stochastic processes are seeded from the Event Id. To ensure reproducibility of explore-exploit and other stochastic processes, use the same Event Id. +- Online performance may be capped by [exploration](concepts-exploration.md). Lowering exploration settings will limit how much information is harvested to stay on top of changing trends and usage patterns, so the balance depends on each use case. Some use cases merit starting off with higher exploration settings and reducing them over time (e.g., start with 30% and reduce to 10%). + + +### Check existing models that might accidentally bias Personalizer + +Existing recommendations, customer segmentation, and propensity model outputs can be used by your application as inputs to Personalizer. Personalizer learns to disregard features that don't contribute to rewards. Review and evaluate any propensity models to determine if they're good at predicting rewards and contain strong biases that might generate harm as a side effect. For example, look for recommendations that might be based on harmful stereotypes. Consider using tools such as [FairLearn](https://fairlearn.org/) to facilitate the process. + + +## Proactive assessments during your project lifecycle + +Consider creating methods for team members, users, and business owners to report concerns regarding responsible use and a process that prioritizes their resolution. Consider treating tasks for responsible use just like other crosscutting tasks in the application lifecycle, such as tasks related to user experience, security, or DevOps. Tasks related to responsible use and their requirements shouldn’t be afterthoughts. Responsible use should be discussed and implemented throughout the application lifecycle. + + +## Next steps + +- [Responsible use and integration](responsible-guidance-integration.md) +- [Offline evaluations](concepts-offline-evaluation.md) +- [Features for context and actions](concepts-features.md) diff --git a/articles/cognitive-services/personalizer/responsible-data-and-privacy.md b/articles/cognitive-services/personalizer/responsible-data-and-privacy.md new file mode 100644 index 0000000000000..e2edcfeaed085 --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-data-and-privacy.md @@ -0,0 +1,123 @@ +--- +title: Data and privacy for Personalizer +titleSuffix: Azure Cognitive Services +description: Data and privacy for Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + +# Data and privacy for Personalizer + +This article provides information about what data Azure Personalizer uses to work, how it processes that data, and how you can control that data. It assumes basic familiarity with [what Personalizer is](what-is-personalizer.md) and [how Personalizer works](how-personalizer-works.md). Specific terms can be found in Terminology. + + +## What data does Personalizer process? + +Personalizer processes the following types of data: +- **Context features and Action features**: Your application sends information about users, and the products or content to personalize, in aggregated form. This data is sent to Personalizer in each Rank API call in arguments for Context and Actions. You decide what to send to the API and how to aggregate it. The data is expressed as attributes or features. You provide information about your users, such as their device and their environment, as Context features. You shouldn't send features specific to a user like a phone number or email or User IDs. Action features include information about your content and product, such as movie genre or product price. For more information, see [Features for Actions and Context](concepts-features.md). +- **Reward information**: A reward score (a number between 0 and 1) ranks how well the user interaction resulting from the personalization choice mapped to a business goal. For example, an event might get a reward of "1" if a recommended article was clicked on. For more information, see [Rewards](concept-rewards.md). + +To understand more about what information you typically use with Personalizer, see [Features are information about Actions and Context](concepts-features.md). + +[!TIP] You decide which features to use, how to aggregate them, and where the information comes from when you call the Personalizer Rank API in your application. You also determine how to create reward scores. To make informed decisions about what information to use with Personalizer, see the [Personalizer responsible use guidelines](responsible-use-cases.md). + + +## How does Personalizer process data? + +The following diagram illustrates how your data is processed. + +![Diagram that shows how Personalizer processes data.](media/how-personalizer-works/personalization-how-it-works.png) + +Personalizer processes data as follows: + +1. Personalizer receives data each time the application calls the Rank API for a personalization event. The data is sent via the arguments for the Context and Actions. + +2. Personalizer uses the information in the Context and Actions, its internal AI models, and service configuration to return the rank response for the ID of the action to use. The contents of the Context and Actions are stored for no more than 48 hours in transient caches with the EventID used or generated in the Rank API. +3. The application then calls the Reward API with one or more reward scores. This information is also stored in transient caches and matched with the Actions and Context information. +4. After the rank and reward information for events is correlated, it's removed from transient caches and placed in more permanent storage. It remains in permanent storage until the number of days specified in the Data Retention setting has gone by, at which time the information is deleted. If you choose not to specify a number of days in the Data Retention setting, this data will be saved as long as the Personalizer Azure Resource is not deleted or until you choose to Clear Data via the UI or APIs. You can change the Data Retention setting at any time. +5. Personalizer continuously trains internal Personalizer AI models specific to this Personalizer loop by using the data in the permanent storage and machine learning configuration parameters in [Learning settings](concept-active-learning.md). +6. Personalizer creates [offline evaluations either](concepts-offline-evaluation.md) automatically or on demand. +Offline evaluations contain a report of rewards obtained by Personalizer models during a past time period. An offline evaluation embeds the models active at the time of their creation, and the learning settings used to create them, as well as a historical aggregate of average reward per event for that time window. Evaluations also include [feature importance](concept-feature-evaluation.md), which is a list of features observed in the time period, and their relative importance in the model. + + +### Independence of Personalizer loops + +Each Personalizer loop is separate and independent from others, as follows: + +- **No external data augmentation**: Each Personalizer loop only uses the data supplied to it by you via Rank and Reward API calls to train models. Personalizer doesn't use any additional information from any origin, such as other Personalizer loops in your own Azure subscription, Microsoft, third-party sources or subprocessors. +- **No data, model, or information sharing**: A Personalizer loop won't share information about events, features, and models with any other Personalizer loop in your subscription, Microsoft, third parties or subprocessors. + + +## How is data retained and what customer controls are available? + +Personalizer retains different types of data in different ways and provides the following controls for each. + + +### Personalizer rank and reward data + +Personalizer stores the features about Actions and Context sent via rank and reward calls for the number of days specified in configuration under Data Retention. +To control this data retention, you can: + +1. Specify the number of days to retain log storage in the [Azure portal for the Personalizer resource](how-to-settings.md)under **Configuration** > **Data Retention** or via the API. The default **Data Retention** setting is seven days. Personalizer deletes all Rank and Reward data older than this number of days automatically. + +2. Clear data for logged personalization and reward data in the Azure portal under **Model and learning settings** > **Clear data** > **Logged personalization and reward data** or via the API. + +3. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + +You can't access past data from Rank and Reward API calls in the Personalizer resource directly. If you want to see all the data that's being saved, configure log mirroring to create a copy of this data on an Azure Blob Storage resource you've created and are responsible for managing. + + +### Personalizer transient cache + +Personalizer stores partial data about an event separate from rank and reward calls in transient caches. Events are automatically purged from the transient cache 48 hours from the time the event occurred. + +To delete transient data, you can: + +1. Clear data for logged personalization and reward data in the Azure portal under **Model and learning settings** > **Clear data** or via the API. + +2. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + + +### Personalizer models and learning settings + +A Personalizer loop trains models with data from Rank and Reward API calls, driven by the hyperparameters and configuration specified in **Model and learning settings** in the Azure portal. Models are volatile. They're constantly changing and being trained on additional data in near real time. Personalizer doesn't automatically save older models and keeps overwriting them with the latest models. For more information, see ([How to manage models and learning settings](how-to-manage-model.md)). To clear models and learning settings: + +1. Reset them in the Azure portal under **Model and learning settings** > **Clear data** or via the API. + +2. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + + +### Personalizer evaluation reports + +Personalizer also retains the information generated in [offline evaluations](concepts-offline-evaluation.md) for reports. + +To delete offline evaluation reports, you can: + +1. Go to the Personalizer loop under the Azure portal. Go to **Evaluations** and delete the relevant evaluation. + +2. Delete evaluations via the Evaluations API. + +3. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + + +### Further storage considerations + +- **Customer managed keys**: Customers can configure the service to encrypt data at rest with their own managed keys. This second layer of encryption is on top of Microsoft's own encryption. +- **Geography**: In all cases, the incoming data, models, and evaluations are processed and stored in the same geography where the Personalizer resource was created. + +Also see: + +- [How to manage model and learning settings](how-to-manage-model.md) +- [Configure Personalizer learning loop](how-to-settings.md) + + +## Next steps + +- [See Responsible use guidelines for Personalizer](responsible-use-cases.md). + +To learn more about Microsoft's privacy and security commitments, see the[Microsoft Trust Center](https://www.microsoft.com/trust-center). diff --git a/articles/cognitive-services/personalizer/responsible-guidance-integration.md b/articles/cognitive-services/personalizer/responsible-guidance-integration.md new file mode 100644 index 0000000000000..16c7e3cb1ac6e --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-guidance-integration.md @@ -0,0 +1,69 @@ +--- +title: Guidance for integration and responsible use of Personalizer +titleSuffix: Azure Cognitive Services +description: Guidance for integration and responsible use of Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + + +# Guidance for integration and responsible use of Personalizer + +Microsoft works to help customers responsibly develop and deploy solutions by using Azure Personalizer. Our principled approach upholds personal agency and dignity by considering the AI system's: + +- Fairness, reliability, and safety. +- Privacy and security. +- Inclusiveness. +- Transparency. +- Human accountability. + +These considerations reflect our commitment to developing responsible AI. + + +## General guidelines for integration and responsible use principles + +When you get ready to integrate and responsibly use AI-powered products or features, the following activities will help to set you up for success: + +- Understand what it can do. Fully assess the potential of Personalizer to understand its capabilities and limitations. Understand how it will perform in your particular scenario and context by thoroughly testing it with real-life conditions and data. + +- **Respect an individual's right to privacy**. Only collect data and information from individuals for lawful and justifiable purposes. Only use data and information that you have consent to use for this purpose. + +- **Obtain legal review**. Obtain appropriate legal advice to review Personalizer and how you are using it in your solution, particularly if you will use it in sensitive or high-risk applications. Understand what restrictions you might need to work within and your responsibility to resolve any issues that might come up in the future. + +- **Have a human in the loop**. Include human oversight as a consistent pattern area to explore. Ensure constant human oversight of the AI-powered product or feature. Maintain the role of humans in decision making. Make sure you can have real-time human intervention in the solution to prevent harm and manage situations when the AI system doesn’t perform as expected. + +- **Build trust with affected stakeholders**. Communicate the expected benefits and potential risks to affected stakeholders. Help people understand why the data is needed and how the use of the data will lead to their benefit. Describe data handling in an understandable way. + +- **Create a customer feedback loop**. Provide a feedback channel that allows users and individuals to report issues with the service after it's deployed. After you've deployed an AI-powered product or feature, it requires ongoing monitoring and improvement. Be ready to implement any feedback and suggestions for improvement. Establish channels to collect questions and concerns from affected stakeholders. People who might be directly or indirectly affected by the system include employees, visitors, and the general public. + +- **Feedback**: Seek feedback from a diverse sampling of the community during the development and evaluation process (for example, historically marginalized groups, people with disabilities, and service workers). For more information, see Community jury. + +- **User Study**: Any consent or disclosure recommendations should be framed in a user study. Evaluate the first and continuous-use experience with a representative sample of the community to validate that the design choices lead to effective disclosure. Conduct user research with 10-20 community members (affected stakeholders) to evaluate their comprehension of the information and to determine if their expectations are met. + +- **Transparency**: Consider providing users with information about how the content was personalized. For example, you can give your users a button labeled Why These Suggestions? that shows which top features of the user and actions played a role in producing the Personalizer results. + +- **Adversarial use**: consider establishing a process to detect and act on malicious manipulation. There are actors that will take advantage of machine learning and AI systems' ability to learn from their environment. With coordinated attacks, they can artificially fake patterns of behavior that shift the data and AI models toward their goals. If your use of Personalizer could influence important choices, make sure you have the appropriate means to detect and mitigate these types of attacks in place. + + +## Your responsibility + +All guidelines for responsible implementation build on the foundation that developers and businesses using Personalizer are responsible and accountable for the effects of using these algorithms in society. If you're developing an application that your organization will deploy, you should recognize your role and responsibility for its operation and how it affects people. If you're designing an application to be deployed by a third party, come to a shared understanding of who is ultimately responsible for the behavior of the application. Make sure to document that understanding. + + +## Questions and feedback + +Microsoft is continuously upgrading tools and documents to help you act on these responsibilities. Our team invites you to [provide feedback to Microsoft](mailto:cogsvcs-RL-feedback@microsoft.com?subject%3DPersonalizer%20Responsible%20Use%20Feedback&body%3D%5BPlease%20share%20any%20question%2C%20idea%20or%20concern%5D) if you believe other tools, product features, and documents would help you implement these guidelines for using Personalizer. + + +## Recommended reading +- See Microsoft's six principles for the responsible development of AI published in the January 2018 book, [The Future Computed](https://news.microsoft.com/futurecomputed/). + + +## Next steps + +Understand how the Personalizer API receives features: [Features: Action and Context](concepts-features.md) diff --git a/articles/cognitive-services/personalizer/responsible-use-cases.md b/articles/cognitive-services/personalizer/responsible-use-cases.md new file mode 100644 index 0000000000000..878fed5b4da1d --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-use-cases.md @@ -0,0 +1,75 @@ +--- +title: Transparency note for Personalizer +titleSuffix: Azure Cognitive Services +description: Transparency Note for Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + +# Use cases for Personalizer + +## What is a Transparency Note? + +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Creating a system that is fit for its intended purpose requires an understanding of how the technology works, its capabilities and limitations, and how to achieve the best performance. + +Microsoft provides *Transparency Notes* to help you understand how our AI technology works. This includes the choices system owners can make that influence system performance and behavior, and the importance of thinking about the whole system, including the technology, the people, and the environment. You can use Transparency Notes when developing or deploying your own system, or share them with the people who will use or be affected by your system. + +Transparency Notes are part of a broader effort at Microsoft to put our AI principles into practice. To find out more, see [Microsoft AI Principles](https:// +www.microsoft.com/ai/responsible-ai). + +## Introduction to Personalizer + +Azure Personalizer is a cloud-based service that helps your applications choose the best content item to show your users. You can use Personalizer to determine what product to suggest to shoppers or to figure out the optimal position for an advertisement. After the content is shown to the user, your application monitors the user's reaction and reports a reward score back to Personalizer. The reward score is used to continuously improve the machine learning model using reinforcement learning. This enhances the ability of Personalizer to select the best content item in subsequent interactions based on the contextual information it receives for each. + +For more information, see: + +- [What is Personalizer?](what-is-personalizer.md) +- [Where can you use Personalizer](where-can-you-use-personalizer.md) +- [How Personalizer works](how-personalizer-works.md) + +## Key terms + +|Term| Definition| +|:-----|:----| +|**Learning Loop** | You create a Personalizer resource, called a learning loop, for every part of your application that can benefit from personalization. If you have more than one experience to personalize, create a loop for each. | +|**Online model** | The default [learning behavior](terminology.md#learning-behavior) for Personalizer where your learning loop, uses machine learning to build the model that predicts the **top action** for your content. | +|**Apprentice mode** | A [learning behavior](terminology.md#learning-behavior) that helps warm-start a Personalizer model to train without impacting the applications outcomes and actions. | +|**Rewards**| A measure of how the user responded to the Rank API's returned reward action ID, as a score between 0 to 1. The 0 to 1 value is set by your business logic, based on how the choice helped achieve your business goals of personalization. The learning loop doesn't store this reward as individual user history. | +|**Exploration**| The Personalizer service is exploring when, instead of returning the best action, it chooses a different action for the user. The Personalizer service avoids drift, stagnation, and can adapt to ongoing user behavior by exploring. | + +For more information, and additional key terms, please refer to the [Personalizer Terminology](terminology.md) and [conceptual documentation](how-personalizer-works.md). + +## Example use cases + +Some common customer motivations for using Personalizer are to: + +- **User engagement**: Capture user interest by choosing content to increase clickthrough, or to prioritize the next best action to improve average revenue. Other mechanisms to increase user engagement might include selecting videos or music in a dynamic channel or playlist. +- **Content optimization**: Images can be optimized for a product (such as selecting a movie poster from a set of options) to optimize clickthrough, or the UI layout, colors, images, and blurbs can be optimized on a web page to increase conversion and purchase. +- **Maximize conversions using discounts and coupons**: To get the best balance of margin and conversion choose which discounts the application will provide to users, or decide which product to highlight from the results of a recommendation engine to maximize conversion. +- **Maximize positive behavior change**: Select which wellness tip question to send in a notification, messaging, or SMS push to maximize positive behavior change. +- **Increase productivity** in customer service and technical support by highlighting the most relevant next best actions or the appropriate content when users are looking for documents, manuals, or database items. + +## Considerations when choosing a use case + +- Using a service that learns to personalize content and user interfaces is useful. However, it can also be misapplied if the personalization creates harmful side effects in the real world. Consider how personalization also helps your users achieve their goals. +- Consider what the negative consequences in the real world might be if Personalizer isn't suggesting particular items because the system is trained with a bias to the behavior patterns of the majority of the system users. +- Consider situations where the exploration behavior of Personalizer might cause harm. +- Carefully consider personalizing choices that are consequential or irreversible, and that should not be determined by short-term signals and rewards. +- Don't provide actions to Personalizer that shouldn't be chosen. For example, inappropriate movies should be filtered out of the actions to personalize if making a recommendation for an anonymous or underage user. + +Here are some scenarios where the above guidance will play a role in whether, and how, to apply Personalizer: + +- Avoid using Personalizer for ranking offers on specific loan, financial, and insurance products, where personalization features are regulated, based on data the individuals don't know about, can't obtain, or can't dispute; and choices needing years and information “beyond the click” to truly assess how good recommendations were for the business and the users. +- Carefully consider personalizing highlights of school courses and education institutions where recommendations without enough exploration might propagate biases and reduce users' awareness of other options. +- Avoid using Personalizer to synthesize content algorithmically with the goal of influencing opinions in democracy and civic participation, as it is consequential in the long term, and can be manipulative if the user's goal for the visit is to be informed, not influenced. + + +## Next steps + +* [Characteristics and limitations for Personalizer](responsible-characteristics-and-limitations.md) +* [Where can you use Personalizer?](where-can-you-use-personalizer.md) diff --git a/articles/cognitive-services/personalizer/terminology.md b/articles/cognitive-services/personalizer/terminology.md index ba12a0c928bb9..568e639784901 100644 --- a/articles/cognitive-services/personalizer/terminology.md +++ b/articles/cognitive-services/personalizer/terminology.md @@ -1,8 +1,8 @@ --- title: Terminology - Personalizer description: Personalizer uses terminology from reinforcement learning. These terms are used in the Azure portal and the APIs. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/toc.yml b/articles/cognitive-services/personalizer/toc.yml index 2219d392d8c4b..81b4f63425364 100644 --- a/articles/cognitive-services/personalizer/toc.yml +++ b/articles/cognitive-services/personalizer/toc.yml @@ -20,8 +20,16 @@ href: https://github.com/Azure-Samples/cognitive-services-personalizer-samples - name: Responsible use of AI items: - - name: Ethics & responsible use - href: ethics-responsible-use.md + - name: Transparency notes + items: + - name: Use cases + href: responsible-use-cases.md + - name: Characteristics and limitations + href: responsible-characteristics-and-limitations.md + - name: Data and privacy + href: responsible-data-and-privacy.md + - name: Guidance for integration and responsible use + href: responsible-guidance-integration.md - name: How-to guides items: - name: Create Personalizer Resource diff --git a/articles/cognitive-services/personalizer/tutorial-use-azure-notebook-generate-loop-data.md b/articles/cognitive-services/personalizer/tutorial-use-azure-notebook-generate-loop-data.md index b4437b366c36a..b07575b2e47b8 100644 --- a/articles/cognitive-services/personalizer/tutorial-use-azure-notebook-generate-loop-data.md +++ b/articles/cognitive-services/personalizer/tutorial-use-azure-notebook-generate-loop-data.md @@ -2,8 +2,8 @@ title: "Tutorial: Azure Notebook - Personalizer" titleSuffix: Azure Cognitive Services description: This tutorial simulates a Personalizer loop _system in an Azure Notebook, which suggests which type of coffee a customer should order. The users and their preferences are stored in a user dataset. Information about the coffee is also available and stored in a coffee dataset. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/tutorial-use-personalizer-chat-bot.md b/articles/cognitive-services/personalizer/tutorial-use-personalizer-chat-bot.md index 47212633fa5e4..40bf80def639e 100644 --- a/articles/cognitive-services/personalizer/tutorial-use-personalizer-chat-bot.md +++ b/articles/cognitive-services/personalizer/tutorial-use-personalizer-chat-bot.md @@ -1,8 +1,8 @@ --- title: Use Personalizer in chat bot - Personalizer description: Customize a C# .NET chat bot with a Personalizer loop to provide the correct content to a user based on actions (with features) and context features. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/tutorial-use-personalizer-web-app.md b/articles/cognitive-services/personalizer/tutorial-use-personalizer-web-app.md index 6228cc7d30533..62f6c6084c2ce 100644 --- a/articles/cognitive-services/personalizer/tutorial-use-personalizer-web-app.md +++ b/articles/cognitive-services/personalizer/tutorial-use-personalizer-web-app.md @@ -1,8 +1,8 @@ --- title: Use web app - Personalizer description: Customize a C# .NET web app with a Personalizer loop to provide the correct content to a user based on actions (with features) and context features. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/what-is-personalizer.md b/articles/cognitive-services/personalizer/what-is-personalizer.md index 6177d70d4e6e2..5983901a766eb 100644 --- a/articles/cognitive-services/personalizer/what-is-personalizer.md +++ b/articles/cognitive-services/personalizer/what-is-personalizer.md @@ -1,8 +1,8 @@ --- title: What is Personalizer? description: Personalizer is a cloud-based service that allows you to choose the best experience to show to your users, learning from their real-time behavior. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/whats-new.md b/articles/cognitive-services/personalizer/whats-new.md index bf84a0d136b1a..6cab04c400e3d 100644 --- a/articles/cognitive-services/personalizer/whats-new.md +++ b/articles/cognitive-services/personalizer/whats-new.md @@ -2,8 +2,8 @@ title: What's new - Personalizer titleSuffix: Azure Cognitive Services description: This article contains news about Personalizer. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/personalizer/where-can-you-use-personalizer.md b/articles/cognitive-services/personalizer/where-can-you-use-personalizer.md index c21366f3a17b1..85e89c3086154 100644 --- a/articles/cognitive-services/personalizer/where-can-you-use-personalizer.md +++ b/articles/cognitive-services/personalizer/where-can-you-use-personalizer.md @@ -1,8 +1,8 @@ --- title: Where and how to use - Personalizer description: Personalizer can be applied in any situation where your application can select the right item, action, or product to display - in order to make the experience better, achieve better business results, or improve productivity. -author: jeffmend -ms.author: jeffme +author: jcodella +ms.author: jacodel ms.manager: nitinme ms.service: cognitive-services ms.subservice: personalizer diff --git a/articles/cognitive-services/video-indexer/toc.yml b/articles/cognitive-services/video-indexer/toc.yml index e035f09369393..b592ffd8a9815 100644 --- a/articles/cognitive-services/video-indexer/toc.yml +++ b/articles/cognitive-services/video-indexer/toc.yml @@ -2,36 +2,36 @@ - name: Overview items: - name: What is Video Indexer? - href: /azure-video-indexer/video-indexer-overview.md + href: /azure/azure-video-indexer/video-indexer-overview - name: Concepts - href: /azure-video-indexer/video-indexer-overview.md + href: /azure/azure-video-indexer/video-indexer-overview - name: Quickstarts expanded: true items: - name: Get started - href: /azure-video-indexer/video-indexer-get-started.md + href: /azure/azure-video-indexer/video-indexer-get-started - name: How to guides items: - name: Migrate from v1 to v2 - href: /azure-video-indexer/video-indexer-overview.md + href: /azure/azure-video-indexer/video-indexer-overview - name: Connect to Azure - href: /azure-video-indexer/connect-to-azure.md + href: /azure/azure-video-indexer/connect-to-azure - name: Manage account connected to Azure - href: /azure-video-indexer/manage-account-connected-to-azure.md + href: /azure/azure-video-indexer/manage-account-connected-to-azure - name: Use Video Indexer API - href: /azure-video-indexer/video-indexer-use-apis.md + href: /azure/azure-video-indexer/video-indexer-use-apis - name: Use API to upload and index videos - href: /azure-video-indexer/upload-index-videos.md + href: /azure/azure-video-indexer/upload-index-videos - name: Examine Video Indexer output - href: /azure-video-indexer/video-indexer-output-json-v2.md + href: /azure/azure-video-indexer/video-indexer-output-json-v2 - name: Find exact moments within videos - href: /azure-video-indexer/video-indexer-search.md + href: /azure/azure-video-indexer/video-indexer-search - name: View and edit Video Indexer insights - href: /app-service/quickstart-dotnetcore.md?tabs=netframework48 + href: /azure/app-service/quickstart-dotnetcore?tabs=netframework48 - name: Create video insights from existing videos - href: /azure-video-indexer/use-editor-create-project.md + href: /azure/azure-video-indexer/use-editor-create-project - name: Embed widgets into your application - href: /azure-video-indexer/video-indexer-embed-widgets.md + href: /azure/azure-video-indexer/video-indexer-embed-widgets - name: Reference items: - name: Video Indexer API @@ -39,8 +39,8 @@ - name: Resources items: - name: Pricing - href: https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/ + href: https://azure.microsoft.com/pricing/ - name: Stack Overflow href: https://stackoverflow.com/questions/tagged/microsoft-cognitive - name: Azure Roadmap - href: https://azure.microsoft.com/roadmap/?category=ai-machine-learning \ No newline at end of file + href: https://azure.microsoft.com/roadmap/?category=ai-machine-learning diff --git a/articles/cognitive-services/what-are-cognitive-services.md b/articles/cognitive-services/what-are-cognitive-services.md index 34de810f277ec..6ce7f77c12a8e 100644 --- a/articles/cognitive-services/what-are-cognitive-services.md +++ b/articles/cognitive-services/what-are-cognitive-services.md @@ -34,7 +34,7 @@ See the tables below to learn about the services offered within those categories |:-----------|:------------------|--| |[Computer Vision](./computer-vision/index.yml "Computer Vision")|The Computer Vision service provides you with access to advanced cognitive algorithms for processing images and returning information.| [Computer Vision quickstart](./computer-vision/quickstarts-sdk/client-library.md)| |[Custom Vision](./custom-vision-service/index.yml "Custom Vision Service")|The Custom Vision Service lets you build, deploy, and improve your own image classifiers. An image classifier is an AI service that applies labels to images, based on their visual characteristics. | [Custom Vision quickstart](./custom-vision-service/getting-started-build-a-classifier.md)| -|[Face](./face/index.yml "Face")| The Face service provides access to advanced face algorithms, enabling face attribute detection and recognition.| [Face quickstart](./face/quickstarts/client-libraries.md)| +|[Face](./computer-vision/index-identity.yml "Face")| The Face service provides access to advanced face algorithms, enabling face attribute detection and recognition.| [Face quickstart](./face/quickstarts/client-libraries.md)| ## Speech APIs diff --git a/articles/communication-services/concepts/analytics/enable-logging.md b/articles/communication-services/concepts/analytics/enable-logging.md index 5cddf8884a4c6..c58dac910e969 100644 --- a/articles/communication-services/concepts/analytics/enable-logging.md +++ b/articles/communication-services/concepts/analytics/enable-logging.md @@ -54,7 +54,7 @@ You'll also be prompted to select a destination to store the logs. Platform logs | Destination | Description | |:------------|:------------| -| [Log Analytics workspace](../../../azure-monitor/logs/design-logs-deployment.md) | Sending logs and metrics to a Log Analytics workspace allows you to analyze them with other monitoring data collected by Azure Monitor using powerful log queries and also to use other Azure Monitor features such as alerts and visualizations. | +| [Log Analytics workspace](../../../azure-monitor/logs/log-analytics-workspace-overview.md) | Sending logs and metrics to a Log Analytics workspace allows you to analyze them with other monitoring data collected by Azure Monitor using powerful log queries and also to use other Azure Monitor features such as alerts and visualizations. | | [Event Hubs](../../../event-hubs/index.yml) | Sending logs and metrics to Event Hubs allows you to stream data to external systems such as third-party SIEMs and other log analytics solutions. | | [Azure storage account](../../../storage/blobs/index.yml) | Archiving logs and metrics to an Azure storage account is useful for audit, static analysis, or backup. Compared to Azure Monitor Logs and a Log Analytics workspace, Azure storage is less expensive and logs can be kept there indefinitely. | diff --git a/articles/communication-services/concepts/analytics/log-analytics.md b/articles/communication-services/concepts/analytics/log-analytics.md index 4197cc9eddcc2..6a9b508a1180a 100644 --- a/articles/communication-services/concepts/analytics/log-analytics.md +++ b/articles/communication-services/concepts/analytics/log-analytics.md @@ -16,7 +16,7 @@ ms.subservice: data ## Overview and access -Before you can take advantage of [Log Analytics](../../../azure-monitor/logs/log-analytics-overview.md) for your Communications Services logs, you must first follow the steps outlined in [Enable logging in Diagnostic Settings](enable-logging.md). Once you have enabled your logs and a [Log Analytics Workspace](../../../azure-monitor/logs/design-logs-deployment.md), you will have access to many helpful [default query packs](../../../azure-monitor/logs/query-packs.md#default-query-pack) that will help you quickly visualize and understand the data available in your logs, which are described below. Through Log Analytics, you also get access to more Communications Services Insights via Azure Monitor Workbooks (see: [Communications Services Insights](insights.md)), the ability to create our own queries and Workbooks, [REST API access](https://dev.loganalytics.io/) to any query. +Before you can take advantage of [Log Analytics](../../../azure-monitor/logs/log-analytics-overview.md) for your Communications Services logs, you must first follow the steps outlined in [Enable logging in Diagnostic Settings](enable-logging.md). Once you have enabled your logs and a [Log Analytics Workspace](../../../azure-monitor/logs/workspace-design.md), you will have access to many helpful [default query packs](../../../azure-monitor/logs/query-packs.md#default-query-pack) that will help you quickly visualize and understand the data available in your logs, which are described below. Through Log Analytics, you also get access to more Communications Services Insights via Azure Monitor Workbooks (see: [Communications Services Insights](insights.md)), the ability to create our own queries and Workbooks, [REST API access](https://dev.loganalytics.io/) to any query. ### Access You can access the queries by starting on your Communications Services resource page, and then clicking on "Logs" in the left navigation within the Monitor section: diff --git a/articles/communication-services/concepts/best-practices.md b/articles/communication-services/concepts/best-practices.md index 57c0762245b33..22df95132becb 100644 --- a/articles/communication-services/concepts/best-practices.md +++ b/articles/communication-services/concepts/best-practices.md @@ -57,10 +57,20 @@ You can request device permissions using the SDK: #### Camera being used by another process - On Windows Chrome and Windows Edge, if you start/join/accept a call with video on and the camera device is being used by another process other than the browser that the web sdk is running on, then the call will be started with audio only and no video. A cameraStartFailed UFD will be raised because the camera failed to start since it was being used by another process. Same applies to turning video on mid-call. You can turn off the camera in the other process so that that process releases the camera device, and then start video again from the call and video will now turn on for the call and remote participants will start seeing your video. -- This is not an issue in MacOS Chrome nor MacOS Safari because the OS will let processes/threads share the camera device. +- This is not an issue in macOS Chrome nor macOS Safari because the OS will let processes/threads share the camera device. - On mobile devices, if a ProcessA requests the camera device and it is being used by ProcessB, then ProcessA will overtake the camera device and ProcessB will stop using the camera device - On iOS safari, you cannot have the camera on for multiple call clients within the same tab nor across tabs. When any call client uses the camera, it will overtake the camera from any previous call client that was using it. Previous call client will get a cameraStoppedUnexpectedly UFD. +### Screen sharing +#### Closing out of application does not stop it from being shared +For example, lets say that from Chromium, you screen share the Microsoft Teams application. You then click on the "X" button on the Teams application to close it. The Teams application will not be closed and it will still be running in the background. You will even still see the icon in the bottom right of your desktop bar. Since the Teams application is still running, that means that it is still being screen shared and the remote participant in the call can still see your Teams application being screen shared. In order to stop the application from being screen shared, you will have to right click its icon on the desktop bar and then click on quit. Or you will have to click on "Stop sharing" button on the browser. Or call the sdk's Call.stopScreenSharing() API. + +#### Safari can only do full screen sharing +Safari only allows to screen share the entire screen. Unlike Chromium, which lets you screen share full screen, specific desktop app, or specific browser tab. + +#### Screen sharing permissions on macOS +In order to do screen sharing in macOS Safari or macOs Chrome, screen recording permissions must be granted to the browsers in the OS menu: "Systems Preferences" -> "Security & Privacy" -> "Screen Recording". + ## Next steps For more information, see the following articles: diff --git a/articles/communication-services/concepts/developer-tools/network-diagnostic.md b/articles/communication-services/concepts/developer-tools/network-diagnostic.md index 588a0dd822a05..06422b8a5b6da 100644 --- a/articles/communication-services/concepts/developer-tools/network-diagnostic.md +++ b/articles/communication-services/concepts/developer-tools/network-diagnostic.md @@ -15,11 +15,13 @@ ms.service: azure-communication-services [!INCLUDE [Private Preview Disclaimer](../../includes/private-preview-include-section.md)] -The Network Diagnostics Tool enables Azure Communication Services developers to ensure that their device and network conditions are optimal for connecting to the service to ensure a great call experience. The tool can be found at [aka.ms/acsdiagnostics](https://acs-network-diagnostic-tool.azurewebsites.net/). Users can quickly run a test, by pressing the start test button. The tool performs diagnostics on the network, devices, and call quality. The results of the diagnostics are directly provided through the tools UI. No sign-in required to use the tool. +The **Network Diagnostics Tool** enables Azure Communication Services developers to ensure that their device and network conditions are optimal for connecting to the service to ensure a great call experience. The tool can be found at [aka.ms/acsdiagnostics](https://azurecommdiagnostics.net/). Users can quickly run a test, by pressing the start test button. The tool performs diagnostics on the network, devices, and call quality. The results of the diagnostics are directly provided through the tools UI. No sign-in required to use the tool. After the test, a GUID is presented which can be provided to our support team for further help. ![Network Diagnostic Tool home screen](../media/network-diagnostic-tool.png) As part of the diagnostics performed, the user is asked to enable permissions for the tool to access their devices. Next, the user is asked to record their voice, which is then played back using an echo bot to ensure that the microphone is working. The tool finally, performs a video test. The test uses the camera to detect video and measure the quality for sent and received frames. + +If you are looking to build your own Network Diagnostic Tool or to perform deeper integration of this tool into your application, you can levearge [pre-call diagnostic APIs](../voice-video-calling/pre-call-diagnostics.md) for the calling SDK. ## Performed tests @@ -36,10 +38,15 @@ As part of the diagnostics performed, the user is asked to enable permissions fo ## Privacy When a user runs a network diagnostic, the tool collects and store service and client telemetry data to verify your network conditions and ensure that they're compatible with Azure Communication Services. The telemetry collected doesn't contain personal identifiable information. The test utilizes both audio and video collected through your device for this verification. The audio and video used for the test aren't stored. + +## Support + +The test provides a **unique identifier** for your test which you can provide our support team who can provide further help. For more information see [help and support options](../../support.md) ## Next Steps +- [Use Pre-Call Diagnostic APIs to build your own tech check](../voice-video-calling/pre-call-diagnostics.md) - [Explore User-Facing Diagnostic APIs](../voice-video-calling/user-facing-diagnostics.md) - [Enable Media Quality Statistics in your application](../voice-video-calling/media-quality-sdk.md) -- [Add Real-Time Inspection tool to your application](./real-time-inspection.md) +- [Debug your application with Monitoring tool](./real-time-inspection.md) - [Consume call logs with Azure Monitor](../analytics/call-logs-azure-monitor.md) diff --git a/articles/communication-services/concepts/developer-tools/real-time-inspection.md b/articles/communication-services/concepts/developer-tools/real-time-inspection.md index e39fd4a943515..0f6803dce6f31 100644 --- a/articles/communication-services/concepts/developer-tools/real-time-inspection.md +++ b/articles/communication-services/concepts/developer-tools/real-time-inspection.md @@ -1,6 +1,6 @@ --- -title: Developer Tools - Real-Time Inspection for Azure Communication Services -description: Conceptual documentation outlining the capabilities provided by the Real-Time Inspection tool. +title: Developer Tools - Azure Communication Services Communication Monitoring +description: Conceptual documentation outlining the capabilities provided by the Communication Monitoring tool. author: ddematheu2 manager: chpalm services: azure-communication-services @@ -11,18 +11,18 @@ ms.topic: conceptual ms.service: azure-communication-services --- -# Real-time Inspection Tool for Azure Communication Services +# Azure Communication Services communication monitoring [!INCLUDE [Private Preview Disclaimer](../../includes/private-preview-include-section.md)] -The Real-time Inspection Tool enables Azure Communication Services developers to inspect the state of the `Call` to debug or monitor their solution. For developers building an Azure Communication Services solution, they might need visibility for debugging into general call information such as the `Call ID` or advanced states, such as did a user facing diagnostic fire. The Real-time Inspection Tool provides developers this information and more. It can be easily added to any JavaScript (Web) solution by downloading the npm package `azure/communication-tools`. +The Azure Communication Services communication monitoring tool enables developers to inspect the state of the `Call` to debug or monitor their solution. For developers building an Azure Communication Services solution, they might need visibility for debugging into general call information such as the `Call ID` or advanced states, such as did a user facing diagnostic fire. The communication monitoring tool provides developers this information and more. It can be easily added to any JavaScript (Web) solution by downloading the npm package `@azure/communication-monitoring`. >[!NOTE] ->Find the open-source repository for the tool [here](https://github.com/Azure/communication-inspection). +>Find the open-source repository for the tool [here](https://github.com/Azure/communication-monitoring). ## Capabilities -The Real-time Inspection Tool provides developers three categories of information that can be used for debugging purposes: +The Communication Monitoring tool provides developers three categories of information that can be used for debugging purposes: | Category | Descriptions | |--------------------------------|-----------------------------------| @@ -32,65 +32,73 @@ The Real-time Inspection Tool provides developers three categories of informatio Data collected by the tool is only kept locally and temporarily. It can be downloaded from within the interface. -Real-time Inspection Tool is compatible with the same browsers as the Calling SDK [here](../voice-video-calling/calling-sdk-features.md?msclkid=f9cf66e6a6de11ec977ae3f6d266ba8d#javascript-calling-sdk-support-by-os-and-browser). +Communication Monitoring is compatible with the same browsers as the Calling SDK [here](../voice-video-calling/calling-sdk-features.md?msclkid=f9cf66e6a6de11ec977ae3f6d266ba8d#javascript-calling-sdk-support-by-os-and-browser). -## Get started with Real-time Inspection Tool +## Get started with Communication Monitoring -The tool can be accessed through an npm package `azure/communication-inspection`. The package contains the `InspectionTool` object that can be attached to a `Call`. The Call Inspector requires an `HTMLDivElement` as part of its constructor on which it will be rendered. The `HTMLDivElement` will dictate the size of the Call Inspector. +The tool can be accessed through an npm package `@azure/communication-monitoring`. The package contains the `CommunicationMonitoring` object that can be attached to a `Call`. Instructions on how to initialize the required `CallClient` and `CallAgent` objects can be found [here](https://docs.microsoft.com/azure/communication-services/how-tos/calling-sdk/manage-calls?pivots=platform-web#initialize-required-objects). `CommunicationMonitoring` also requires an `HTMLDivElement` as part of its constructor on which it will be rendered. The `HTMLDivElement` will dictate the size of the rendered panel. -### Installing Real-time Inspection Tool +### Installing Communication Monitoring ```bash -npm i @azure/communication-inspection +npm i @azure/communication-monitoring ``` -### Initialize Real-time Inspection Tool +### Initialize Communication Monitoring ```javascript -import { CallClient, CallAgent } from "@azure/communication-calling"; -import { InspectionTool } from "@azure/communication-tools"; +import { CallAgent, CallClient } from '@azure/communication-calling' +import { CommunicationMonitoring } from '@azure/communication-monitoring' -const callClient = new callClient(); -const callAgent = await callClient.createCallAgent({INSERT TOKEN CREDENTIAL}); -const call = callAgent.startCall({INSERT CALL INFORMATION}); +const selectedDiv = document.getElementById('selectedDiv') -const inspectionTool = new InspectionTool(call, {HTMLDivElement}); +const options = { + callClient = {INSERT CALL CLIENT OBJECT}, + callAgent = {INSERT CALL AGENT OBJECT}, + divElement = selectedDiv, +} + +const communicationMonitoring = new CommunicationMonitoring(options) ``` ## Usage -`start`: enable the `InspectionTool` to start reading data from the call object and storing it locally for visualization. +`start`: enable the `CommunicationMonitoring` instance to start reading data from the call object and storing it locally for visualization. ```javascript -inspectionTool.start() +communicationMonitoring.start() ``` -`stop`: disable the `InspectionTool` from reading data from the call object. +`stop`: disable the `CommunicationMonitoring` instance from reading data from the call object. ```javascript -inspectionTool.stop() +communicationMonitoring.stop() ``` -`open`: Open the `InspectionTool` in the UI. +`open`: Open the `CommunicationMonitoring` instance in the UI. ```javascript -inspectionTool.open() +communicationMonitoring.open() ``` -`close`: Dismiss the `InspectionTool` in the UI. +`close`: Dismiss the `CommunicationMonitoring` instance in the UI. ```javascript -inspectionTool.close() +communicationMonitoring.close() ``` +## Download logs + +The tool includes the ability to download the logs captured using the `Download logs` button on the top right. The tool will generate a compressed log file that can be provided to our customer support team for debugging. + ## Next Steps - [Explore User-Facing Diagnostic APIs](../voice-video-calling/user-facing-diagnostics.md) diff --git a/articles/communication-services/concepts/email/email-authentication-best-practice.md b/articles/communication-services/concepts/email/email-authentication-best-practice.md index 0416512c2a79b..d1ef54eba7223 100644 --- a/articles/communication-services/concepts/email/email-authentication-best-practice.md +++ b/articles/communication-services/concepts/email/email-authentication-best-practice.md @@ -58,9 +58,9 @@ A DMARC policy record allows a domain to announce that their email uses authenti ## Next steps -* [Best practices for implementing DMARC](https://docs.microsoft.com/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?view=o365-worldwide#best-practices-for-implementing-dmarc-in-microsoft-365&preserve-view=true) +* [Best practices for implementing DMARC](/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?preserve-view=true&view=o365-worldwide#best-practices-for-implementing-dmarc-in-microsoft-365) -* [Troubleshoot your DMARC implementation](https://docs.microsoft.com/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?view=o365-worldwide#troubleshooting-your-dmarc-implementation&preserve-view=true) +* [Troubleshoot your DMARC implementation](/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?preserve-view=true&view=o365-worldwide#troubleshooting-your-dmarc-implementation) * [Email domains and sender authentication for Azure Communication Services](./email-domain-and-sender-authentication.md) @@ -72,4 +72,4 @@ The following documents may be interesting to you: - Familiarize yourself with the [Email client library](../email/sdk-features.md) - How to send emails with custom verified domains?[Add custom domains](../../quickstarts/email/add-custom-verified-domains.md) -- How to send emails with Azure Managed Domains?[Add Azure Managed domains](../../quickstarts/email/add-azure-managed-domains.md) +- How to send emails with Azure Managed Domains?[Add Azure Managed domains](../../quickstarts/email/add-azure-managed-domains.md) \ No newline at end of file diff --git a/articles/communication-services/concepts/government.md b/articles/communication-services/concepts/government.md new file mode 100644 index 0000000000000..5aa3095f569e2 --- /dev/null +++ b/articles/communication-services/concepts/government.md @@ -0,0 +1,25 @@ +--- +title: Azure Communication Services in Azure Government +description: Learn about using Azure Communication Services in US Government regions +author: harazi +manager: mharbut +services: azure-communication-services + +ms.date: 06/02/2022 +ms.author: harazi +ms.topic: conceptual +ms.service: azure-communication-services +--- + + + + +# Azure Communication Services for US Government + + +Azure Communication Services can be used within [Azure Government](https://azure.microsoft.com/global-infrastructure/government/) to provide compliance with US government requirements for cloud services. In addition to enjoying the features and capabilities of Messaging, Voice and Video calling, developers benefit from the following features that are unique to Azure Government: +- Your personal data is logically segregated from customer content in the commercial Azure cloud. +- Your resource’s customer content is stored within the United States. +- Access to your organization's customer content is restricted to screened Microsoft personnel. + +You can find more information about the Office 365 Government – GCC High offering for US Government customers at [Office 365 Government plans](https://products.office.com/government/compare-office-365-government-plans). Please see [eligibility requirements](https://azure.microsoft.com/global-infrastructure/government/how-to-buy/) for Azure Government. diff --git a/articles/communication-services/concepts/includes/sms-tollfree-pricing.md b/articles/communication-services/concepts/includes/sms-tollfree-pricing.md index 1173063a4b434..9c40146d5c3d3 100644 --- a/articles/communication-services/concepts/includes/sms-tollfree-pricing.md +++ b/articles/communication-services/concepts/includes/sms-tollfree-pricing.md @@ -13,7 +13,7 @@ ms.custom: include file ms.author: prakulka --- >[!Important] ->Toll-free availability is currently restricted to Azure subscriptions that have a billing address in the United States. +>In most cases, customers with Azure subscriptions locations that match the country of the Number offer will be able to buy the Number. However, US and Canada numbers may be purchased by customers with Azure subscription locations in other countries. Please see [here](../numbers/sub-eligibility-number-capability.md) for details on in-country and cross-country purchases. The Toll-free SMS service requires provisioning a toll-free number through the Azure portal. Once a toll-free number is provisioned, pay-as-you-go pricing applies to the leasing fee, and the usage fee. The leasing fee, and the usage fee is determined by the short code type, location of the short code, and the destination. @@ -22,21 +22,29 @@ The Toll-free SMS service requires provisioning a toll-free number through the A ### Leasing Fee Fees for toll-free leasing are charged after provisioning and then recur on a month-to-month basis: -|Number type |Monthly fee | -|--------------|-----------| -|Toll-free (United States) |$2/mo| +|Country |Number type |Monthly fee| +|--------|-----------|------------| +|United States|Toll-free |$2/mo| +|Canada| Toll-free |$2/mo| ### Usage Fee -SMS offers pay-as-you-go pricing. The price is a per-message segment charge based on the destination of the message. Messages can be sent by toll-free phone numbers to phone numbers located within the United States. +SMS offers pay-as-you-go pricing. The price is a per-message segment* charge based on the destination of the message. Messages can be sent by toll-free phone numbers to phone numbers located within the United States, Canada, and Puerto Rico. The following prices include required communications taxes and fees: -|Message Type |Usage Fee | -|-----------|------------| -|Send messages (per message segment*) |$0.0075 | -|Receive messages (per message segment*) |$0.0075 | +|Country| Send Message | Receive Message| +|-----------|---------|--------------| +|United States| $0.0075 | $0.0075| +|Canada | $0.0075 | $0.0075| *Please see our guide on [SMS character limits](../sms/sms-faq.md#what-is-the-sms-character-limit) to learn more about message segments. ## Carrier surcharge -A standard carrier surcharge of $0.0025/sent message segment and $0.0010/received message segment is also applicable. A carrier surcharge is subject to change. See our guide on [Carrier surcharges](https://github.com/Azure/Communication/blob/master/sms-carrier-surcharge.md) for details. +A standard carrier surcharge is applicable to messages exchanged via toll-free numbers. A carrier surcharge is a per-message segment* charge and is subject to change. Carrier surcharge is calculated based on the destination of the message for sent messages and based on the sender of the message for received messages. See our guide on [Carrier surcharges](https://github.com/Azure/Communication/blob/master/sms-carrier-surcharge.md) for details. See our pricing example [here](../pricing.md#pricing-example-11-sms-sending) to see how SMS prices are calculated. + +|Country| Send Message | Receive Message| +|-----------|---------|--------------| +|United States| $0.0025 | $0.0010| +|Canada | $0.0085 | NA| + +*Please see our guide on [SMS character limits](../sms/sms-faq.md#what-is-the-sms-character-limit) to learn more about message segments. diff --git a/articles/communication-services/concepts/interop/teams-user-calling.md b/articles/communication-services/concepts/interop/teams-user-calling.md index dab6d228c6f22..9b3a134206dfd 100644 --- a/articles/communication-services/concepts/interop/teams-user-calling.md +++ b/articles/communication-services/concepts/interop/teams-user-calling.md @@ -61,6 +61,7 @@ The following list presents the set of features, which are currently available i | | Place a group call with PSTN participants | ✔️ | | | Promote a one-to-one call with a PSTN participant into a group call | ✔️ | | | Dial-out from a group call as a PSTN participant | ✔️ | +| | Suppport for early media | ❌ | | General | Test your mic, speaker, and camera with an audio testing service (available by calling 8:echo123) | ✔️ | | Device Management | Ask for permission to use audio and/or video | ✔️ | | | Get camera list | ✔️ | diff --git a/articles/communication-services/concepts/known-issues.md b/articles/communication-services/concepts/known-issues.md index a9a35eafddc3b..07975d9017166 100644 --- a/articles/communication-services/concepts/known-issues.md +++ b/articles/communication-services/concepts/known-issues.md @@ -52,6 +52,8 @@ Occasionally, microphone or camera devices won't be released on time, and that c Incoming video streams won't stop rendering if the user is on iOS 15.2+ and is using SDK version 1.4.1-beta.1+, the unmute/start video steps will still be required to re-start outgoing audio and video. +For iOS 15.4+, audio and video should be able to auto recover on most of the cases. On some edge cases, to unmute, an api to 'unmute' must be called by the application (can be as a result of user action) to recover the outgoing audio. + ### iOS with Safari crashes and refreshes the page if a user tries to switch from front camera to back camera. Azure Communication Services Calling SDK version 1.2.3-beta.1 introduced a bug that affects all of the calls made from iOS Safari. The problem occurs when a user tries to switch the camera video stream from front to back. Switching camera results in Safari browser to crash and reload the page. diff --git a/articles/communication-services/concepts/numbers/number-types.md b/articles/communication-services/concepts/numbers/number-types.md index 0916c676fd556..5262c66de4437 100644 --- a/articles/communication-services/concepts/numbers/number-types.md +++ b/articles/communication-services/concepts/numbers/number-types.md @@ -18,6 +18,8 @@ Azure Communication Services allows you to use phone numbers to make voice calls ## Available options +[!INCLUDE [Regional Availability Notice](../../includes/regional-availability-include.md)] + Azure Communication Services offers three types of Numbers: Toll-Free, Local, and Short Codes. - **To send or receive an SMS**, choose a Toll-Free Number or a Short Code @@ -27,7 +29,7 @@ The table below summarizes these number types with supported capabilities: | Type | Example | Send SMS | Receive SMS | Make Calls | Receive Calls | Typical Use Case | Restrictions | | :-------------------------------------------------------------------- | :---------------- | :------: | :---------: | :--------: | :-----------: | :------------------------------------------- | :------------- | -| [Toll-Free](../../quickstarts/telephony/get-phone-number.md) | +1 (8AB) XYZ PQRS | Yes | Yes | Yes | Yes | Receive calls on IVR bots, SMS Notifications | SMS in US only | +| [Toll-Free](../../quickstarts/telephony/get-phone-number.md) | +1 (8AB) XYZ PQRS | Yes | Yes | Yes | Yes | Receive calls on IVR bots, SMS Notifications | SMS in US and CA only | | [Local (Geographic)](../../quickstarts/telephony/get-phone-number.md) | +1 (ABC) XYZ PQRS | No | No | Yes | Yes | Geography Specific Number | Calling Only | | [Short-Codes](../../quickstarts/sms/apply-for-short-code.md) | ABC-XYZ | Yes | Yes | No | No | High-velocity SMS | SMS only | diff --git a/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md b/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md index 9a5f501bdae62..e739855bd9d1c 100644 --- a/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md +++ b/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md @@ -10,6 +10,7 @@ ms.author: sadas ms.date: 03/04/2022 ms.topic: conceptual ms.service: azure-communication-services +ms.custom: references_regions --- # Subscription eligibility and number capabilities @@ -22,10 +23,12 @@ To acquire a phone number, you need to be on a paid Azure subscription. Phone nu Additional details on eligible subscription types are as follows: -| Number Type | Eligible Azure Agreement Type | -| :------------------------------- | :------------------------------------------------------------------------------------------------------- | -| Toll-Free and Local (Geographic) | Modern Customer Agreement (Field and Customer Led), Modern Partner Agreement (CSP), Enterprise Agreement | -| Short-Codes | Modern Customer Agreement (Field Led) and Enterprise Agreement Only | +| Number Type | Eligible Azure Agreement Type | +| :------------------------------- | :-------------------------------------------------------------------------------------------------------- | +| Toll-Free and Local (Geographic) | Modern Customer Agreement (Field and Customer Led), Modern Partner Agreement (CSP), Enterprise Agreement* | +| Short-Codes | Modern Customer Agreement (Field Led) and Enterprise Agreement Only | + +\* Allowing the purchase of Italian phone numbers for CSP and LSP customers is planned only for General Availability launch. ## Number capabilities @@ -38,39 +41,90 @@ The tables below summarize current availability: | Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | | :---------------- | :---------- | :------------------- | :------------------- | :------------------- | :--------------------- | | USA & Puerto Rico | Toll-Free | General Availability | General Availability | General Availability | General Availability\* | -| USA & Puerto Rico | Local | Not Available | Not Available | General Availability | General Availability\* | -| USA | Short-Codes | Public Preview | Public Preview\* | Not Available | Not Available | +| USA & Puerto Rico | Local | - | - | General Availability | General Availability\* | +| USA | Short-Codes | Public Preview | Public Preview\* | - | - | \* Available through Azure Bot Framework and Dynamics only ## Customers with UK Azure billing addresses +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :----------------- | :------------- | :------------------- | :------------------- | :--------------- | :--------------- | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | +| USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | + +\* Available through Azure Bot Framework and Dynamics only + +## Customers with Ireland Azure billing addresses + +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :---------------- | :-------- | :------------------- | :------------------- | :------------------- | :--------------- | +| Ireland | Toll-Free | - | - | Public Preview | Public Preview\* | +| Ireland | Local | - | - | Public Preview | Public Preview\* | +| USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | + + +\* Available through Azure Bot Framework and Dynamics only + +## Customers with Denmark Azure billing addresses + | Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | | :---------------- | :-------- | :------------------- | :------------------- | :------------- | :--------------- | -| UK | Toll-Free | Not Available | Not Available | Public Preview | Public Preview\* | -| UK | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| Denmark | Toll-Free | - | - | Public Preview | Public Preview\* | +| Denmark | Local | - | - | Public Preview | Public Preview\* | | USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | -| USA & Puerto Rico | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | \* Available through Azure Bot Framework and Dynamics only -## Customers with Ireland Azure billing addresses +## Customers with Canada Azure billing addresses + +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :---------------- | :-------- | :------------------- | :------------------- | :------------- | :--------------- | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | +| USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | -| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | -| :---------------- | :-------- | :------------------- | :------------------- | :------------------- | :--------------------- | -| USA & Puerto Rico | Toll-Free | General Availability | General Availability | General Availability | General Availability\* | -| USA & Puerto Rico | Local | Not Available | Not Available | General Availability | General Availability\* | \* Available through Azure Bot Framework and Dynamics only -## Customers with Denmark Azure Billing Addresses +## Customers with Italy Azure billing addresses | Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | | :------ | :-------- | :------------ | :------------ | :------------- | :--------------- | -| Denmark | Toll-Free | Not Available | Not Available | Public Preview | Public Preview\* | -| Denmark | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| Italy | Toll-Free** | - | - | Public Preview | Public Preview\* | +| Italy | Local** | - | - | Public Preview | Public Preview\* | + +\* Available through Azure Bot Framework and Dynamics only + +\** Allowing the purchase of Italian phone numbers for CSP and LSP customers is planned only for General Availability launch. + +## Customers with Sweden Azure billing addresses + +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :---------------- | :-------- | :------------------- | :------------------- | :------------- | :--------------- | +| Sweden | Toll-Free | - | - | Public Preview | Public Preview\* | +| Sweden | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | | USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | -| USA & Puerto Rico | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | \* Available through Azure Bot Framework and Dynamics only diff --git a/articles/communication-services/concepts/pricing.md b/articles/communication-services/concepts/pricing.md index d7e5db7a6882a..ccc9483f8aba3 100644 --- a/articles/communication-services/concepts/pricing.md +++ b/articles/communication-services/concepts/pricing.md @@ -127,7 +127,7 @@ Alice has ordered a product from Contoso and struggles to set it up. Alice calls - One participant (Bob) x 30 minutes x $0.004 per participant per minute = $0.12 [both video and audio are charged at the same rate] - One participant (Charlie) x 25 minutes x $0.000 per participant per minute = $0.0*. -*Charlie's participation is covered by her Teams license. +*Charlie's participation is covered by his Teams license. **Total cost of the visit**: - Teams cost for a user joining using the Communication Services JavaScript SDK: 25 minutes from Teams minute pool @@ -190,13 +190,47 @@ Rose sees the messages and starts chatting. In the meanwhile Casey gets a call a - Number of messages sent (20 + 30 + 18 + 30 + 25 + 35) x $0.0008 = $0.1264 -## SMS (Short Messaging Service) and Telephony +## SMS (Short Messaging Service) -Please refer to the following links for details on SMS and Telephony pricing +Azure Communication Services allows for adding SMS messaging capabilities to your applications. You can embed the experience into your applications using JavaScript, Java, Python, or .NET SDKs. Refer to our [full list of available SDKs](./sdk-options.md). -- [SMS Pricing Details](./sms-pricing.md) -- [PSTN Pricing Details](./pstn-pricing.md) +### Pricing + +The SMS usage price is a per-message segment charge based on the destination of the message. The carrier surcharge is calculated based on the destination of the message for sent messages and based on the sender of the message for received messages. Please refer to the [SMS Pricing Page](./sms-pricing.md) for pricing details. + +### Pricing example: 1:1 SMS sending + +Contoso is a healthcare company with clinics in US and Canada. Contoso has a Patient Appointment Reminder application that sends out SMS appointment reminders to patients regarding upcoming appointments. + +- The application sends appointment reminders to 20 US patients and 30 Canada patients using a US toll-free number. +- Message length of the reminder message is 150 chars < 1 message segment*. Hence, total sent messages are 20 message segments for US and 30 message segments for CA. + +**Cost calculations** + +- US - 20 message segments x $0.0075 per sent message segment + 20 message segments x $0.0025 carrier surcharge per sent message segment = $0.20 +- CA - 30 message segments x $0.0075 per sent message segment + 30 message segments x $0.0085 carrier surcharge per sent message segment = $0.48 + +**Total cost for the appointment reminders for 20 US patients and 30 CA patients**: $0.20 + $0.48 = $0.68 +### Pricing example: 1:1 SMS receiving + +Contoso is a healthcare company with clinics in US and Canada. Contoso has a Patient Appointment Reminder application that sends out SMS appointment reminders to patients regarding upcoming appointments. Patients can respond to the messages with "Reschedule" and include their date/time preference to reschedule their appointments. + +- The application sends appointment reminders to 20 US patients and 30 Canada patients using a CA toll-free number. +- 6 US patients and 4 CA patients respond back to reschedule their appointments. Contoso receives 10 SMS responses in total. +- Message length of the reschedule messages is less than 1 message segment*. Hence, total messages received are 6 message segments for US and 4 message segments for CA. + +**Cost calculations** + +- US - 6 message segments x $0.0075 per received message segment + 6 message segments x $0.0010 carrier surcharge per received message segment = $0.051 +- CA - 4 message segments x $0.0075 per received message segment = $0.03 + +**Total cost for receiving patient responses from 6 US patients and 4 CA patients**: $0.051 + $0.03 = $0.081 + +## Telephony +Please refer to the following links for details on Telephony pricing + +- [PSTN Pricing Details](./pstn-pricing.md) ## Next Steps Get started with Azure Communication Services: diff --git a/articles/communication-services/concepts/pstn-pricing.md b/articles/communication-services/concepts/pstn-pricing.md index b4a8ee9c9ce2f..97608ec9d057f 100644 --- a/articles/communication-services/concepts/pstn-pricing.md +++ b/articles/communication-services/concepts/pstn-pricing.md @@ -8,7 +8,7 @@ ms.date: 1/28/2022 ms.topic: conceptual ms.service: azure-communication-services --- -# Telephony (PSTN) Pricing +# Telephony (PSTN) pricing > [!IMPORTANT] > Number Retention and Portability: Phone numbers that are assigned to you during any preview program may need to be returned to Microsoft if you do not meet regulatory requirements before General Availability. During private preview and public preview, telephone numbers are not eligible for porting. [Details on offers in Public Preview / GA](../concepts/numbers/sub-eligibility-number-capability.md) @@ -19,15 +19,15 @@ In most cases, customers with Azure subscriptions locations that match the count All prices shown below are in USD. -## United States Telephony Offers +## United States telephony offers -### Phone Number Leasing Charges +### Phone number leasing charges |Number type |Monthly fee | |--------------|-----------| |Geographic |USD 1.00/mo | |Toll-Free |USD 2.00/mo | -### Usage Charges +### Usage charges |Number type |To make calls* |To receive calls| |--------------|-----------|------------| |Geographic |Starting at USD 0.0130/min |USD 0.0085/min | @@ -35,15 +35,15 @@ All prices shown below are in USD. \* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) -## United Kingdom Telephony Offers +## United Kingdom telephony offers -### Phone Number Leasing Charges +### Phone number leasing charges |Number type |Monthly fee | |--------------|-----------| |Geographic |USD 1.00/mo | |Toll-Free |USD 2.00/mo | -### Usage Charges +### Usage charges |Number type |To make calls* |To receive calls| |--------------|-----------|------------| |Geographic |Starting at USD 0.0150/min |USD 0.0090/min | @@ -51,15 +51,15 @@ All prices shown below are in USD. \* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) -## Denmark Telephony Offers +## Denmark telephony offers -### Phone Number Leasing Charges +### Phone number leasing charges |Number type |Monthly fee | |--------------|-----------| |Geographic |USD 0.82/mo | |Toll-Free |USD 25.00/mo | -### Usage Charges +### Usage charges |Number type |To make calls* |To receive calls| |--------------|-----------|------------| |Geographic |Starting at USD 0.0190/min |USD 0.0100/min | @@ -67,6 +67,70 @@ All prices shown below are in USD. \* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) +## Canada telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 1.00/mo | +|Toll-Free |USD 2.00/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0130/min |USD 0.0085/min | +|Toll-free |Starting at USD 0.0130/min |USD 0.0220/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + +## Ireland telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 1.50/mo | +|Toll-Free |USD 19.88/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0160/min |USD 0.0100/min | +|Toll-free |Starting at USD 0.0160/min |Starting at USD 0.0448/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + +## Italy telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 2.92/mo | +|Toll-Free |USD 23.39/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0160/min |USD 0.0100/min | +|Toll-free |Starting at USD 0.0160/min |USD 0.3415/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + +## Sweden telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 1.00/mo | +|Toll-Free |USD 21.05/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0160/min |USD 0.0080/min | +|Toll-free |Starting at USD 0.0160/min |USD 0.1138/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + *** Note: Pricing for all countries is subject to change as pricing is market-based and depends on third-party suppliers of telephony services. Additionally, pricing may include requisite taxes and fees. diff --git a/articles/communication-services/concepts/reference.md b/articles/communication-services/concepts/reference.md index 0a2a88df41a2e..c141b29e164f2 100644 --- a/articles/communication-services/concepts/reference.md +++ b/articles/communication-services/concepts/reference.md @@ -24,6 +24,7 @@ For each area, we have external pages to track and review our SDKs. You can cons | Calling | [npm](https://www.npmjs.com/package/@azure/communication-calling) | - | - | - | [GitHub](https://github.com/Azure/Communication/releases) ([docs](/objectivec/communication-services/calling/)) | [Maven](https://search.maven.org/artifact/com.azure.android/azure-communication-calling/) | - | | Chat | [npm](https://www.npmjs.com/package/@azure/communication-chat) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Chat) | [PyPi](https://pypi.org/project/azure-communication-chat/) | [Maven](https://search.maven.org/search?q=a:azure-communication-chat) | [GitHub](https://github.com/Azure/azure-sdk-for-ios/releases) | [Maven](https://search.maven.org/search?q=a:azure-communication-chat) | - | | Common | [npm](https://www.npmjs.com/package/@azure/communication-common) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Common/) | N/A | [Maven](https://search.maven.org/search?q=a:azure-communication-common) | [GitHub](https://github.com/Azure/azure-sdk-for-ios/releases) | [Maven](https://search.maven.org/artifact/com.azure.android/azure-communication-common) | - | +| Email | [npm](https://www.npmjs.com/package/@azure/communication-email) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Email) | - | - | - | - | - | | Identity | [npm](https://www.npmjs.com/package/@azure/communication-identity) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Identity) | [PyPi](https://pypi.org/project/azure-communication-identity/) | [Maven](https://search.maven.org/search?q=a:azure-communication-identity) | - | - | - | | Network Traversal | [npm](https://www.npmjs.com/package/@azure/communication-network-traversal) | [NuGet](https://www.nuget.org/packages/Azure.Communication.NetworkTraversal) | [PyPi](https://pypi.org/project/azure-communication-networktraversal/) | [Maven](https://search.maven.org/search?q=a:azure-communication-networktraversal) | - | - | - | | Phone numbers | [npm](https://www.npmjs.com/package/@azure/communication-phone-numbers) | [NuGet](https://www.nuget.org/packages/Azure.Communication.phonenumbers) | [PyPi](https://pypi.org/project/azure-communication-phonenumbers/) | [Maven](https://search.maven.org/search?q=a:azure-communication-phonenumbers) | - | - | - | diff --git a/articles/communication-services/concepts/sms-pricing.md b/articles/communication-services/concepts/sms-pricing.md index 0272bca370a94..961f475c5ec91 100644 --- a/articles/communication-services/concepts/sms-pricing.md +++ b/articles/communication-services/concepts/sms-pricing.md @@ -12,7 +12,7 @@ zone_pivot_groups: acs-tollfree-shortcode # SMS Pricing > [!IMPORTANT] -> SMS messages can be sent to and received from United States phone numbers. Phone numbers located in other geographies are not yet supported by Communication Services SMS. +> SMS messages can be sent to and received from United States and Canada phone numbers. Phone numbers located in other geographies are not yet supported by Communication Services SMS. ::: zone pivot="tollfree" [!INCLUDE [Toll-Free](./includes/sms-tollfree-pricing.md)] @@ -30,7 +30,7 @@ In this quickstart, you learned how to send SMS messages using Azure Communicati > [Learn more about SMS](../concepts/sms/concepts.md) The following documents may be interesting to you: -- Familiarize yourself with the [SMS SDK](../concepts/sms/sdk-features.md) +- Familiarize yourself with one of the [SMS SDKs](../concepts/sms/sdk-features.md) - Get an SMS capable [phone number](../quickstarts/telephony/get-phone-number.md) - Get a [short code](../quickstarts/sms/apply-for-short-code.md) - [Phone number types in Azure Communication Services](../concepts/telephony/plan-solution.md) diff --git a/articles/communication-services/concepts/sms/messaging-policy.md b/articles/communication-services/concepts/sms/messaging-policy.md index d15c574483f20..2c3795c32fab4 100644 --- a/articles/communication-services/concepts/sms/messaging-policy.md +++ b/articles/communication-services/concepts/sms/messaging-policy.md @@ -13,7 +13,7 @@ ms.service: azure-communication-services --- # Azure Communication Services Messaging Policy -Azure Communication Services is transforming the way our customers engage with their clients by building rich, custom communication experiences that take advantage of the same enterprise-grade services that back Microsoft Teams and Skype. Integrate SMS messaging functionality into your communications solutions to reach your customers anytime and anywhere they need support. You just need to keep in mind a few messaging requirements and industry standards to get started. +Azure Communication Services is transforming the way our customers engage with their clients by building rich, custom communication experiences that take advantage of the same enterprise-grade services that back Microsoft Teams, Skype, and Exchange. You can easily integrate SMS and email messaging functionality into your communications solutions to reach your customers anytime and anywhere they need support. You just need to keep in mind a few messaging requirements and industry standards to get started. We know that messaging requirements can seem daunting to learn, but they're as easy as remembering “COMS”: @@ -30,15 +30,15 @@ We developed this messaging policy to help you satisfy regulatory requirements a ### What is consent? -Consent is an agreement between you and the message recipient that allows you to send automated messages to them. You must obtain consent before sending the first message, and you should make clear to the recipient that they're agreeing to receive messages from you. This procedure is known as receiving “prior express consent” from the individual you intend to message. +Consent is an agreement between you and the message recipient that allows you to send application to person (A2P) messages to them. You must obtain consent before sending the first message, and you should make clear to the recipient that they're agreeing to receive messages from you. This procedure is known as receiving "prior express consent" from the individual you intend to message. -The messages that you send must be the same type of messages that the recipient agreed to receive and should only be sent to the number that the recipient provided to you. If you intend to send informational messages, such as appointment reminders or alerts, then consent can be either written or oral. If you intend to send promotional messages, such as sales or marketing messages that promote a product or service, then consent must be written. +The messages that you send must be the same type of messages that the recipient agreed to receive and should only be sent to the number or email address that the recipient provided to you. If you intend to send informational messages, such as appointment reminders or alerts, then consent can be either written or oral. If you intend to send promotional messages, such as sales or marketing messages that promote a product or service, then consent must be written. ### How do you obtain consent? Consent can be obtained in a variety of ways, such as: -- When a user enters their telephone number into a website, +- When a user enters their telephone number or email address into a website, - When a user initiates a text message exchange, or - When a user sends a sign-up keyword to your phone number. @@ -49,7 +49,7 @@ Regardless of how consent is obtained, you and your customers must ensure that t - Provide a “Call to Action” before obtaining consent. You and your customers should provide potential message recipients with a “call to action” that invites them to opt-in to your messaging program. The call to action should include, at a minimum: (1) the identity of the message sender, (2) clear opt-in instructions, (3) opt-out instructions, and (4) any associated messaging fees. - Consent isn't transferable or assignable. Any consent that an individual provides to you cannot be transferred or sold to an unaffiliated third party. If you collect an individual’s consent for a third party, then you must clearly identify the third party to the individual. You must also state that the consent you obtained applies only to communications from the third party. -- Consent is limited in purpose. An individual who provides their number for a particular purpose consents to receive communications only for that specific purpose and from that specific message sender. Before obtaining consent, you should clearly notify the intended message recipient if you'll send recurring messages or messages from an affiliate. +- Consent is limited in purpose. An individual who provides their number or an email address for a particular purpose consents to receive communications only for that specific purpose and from that specific message sender. Before obtaining consent, you should clearly notify the intended message recipient if you'll send recurring messages or messages from an affiliate. ### Consent best practices: @@ -57,7 +57,7 @@ In addition to the messaging requirements discussed above, you may want to imple - Detailed “Call to Action” information. To ensure that you obtain appropriate consent, provide - The name or description of your messaging program or product - - The number(s) from which recipients will receive messages, and + - The number(s) or email address(es) from which recipients will receive messages, and - Any applicable terms and conditions before an individual opts-in to receiving messages from you. - Accurate records of consent. You should retain records of any consent that an individual provides to you for at least four years. Records of consent can include: - Timestamps @@ -104,16 +104,24 @@ Message recipients may revoke consent and opt-out of receiving future messages t Ensure that message recipients can opt-out of future messages at any time. You must also offer multiple opt-out options. After a message recipient opts-out, you should not send additional messages unless the individual provides renewed consent. -One of the most common opt-out mechanisms is to include a “STOP” keyword in the initial message of every new conversation. Be prepared to remove customers that reply with a lowercase “stop” or other common keywords, such as “unsubscribe” or “cancel.” After an individual revokes consent, you should remove them from all recurring messaging campaigns unless they expressly elect to continue receiving messages from a particular program. +One of the most common opt-out mechanisms in SMS applications is to include a “STOP” keyword in the initial message of every new conversation. Be prepared to remove customers that reply with a lowercase “stop” or other common keywords, such as “unsubscribe” or “cancel.” + +For email, it is to embed a link to unsubscribe in every email sent to the customer. If the customer selects the unsubscribe link, you should be prepared to remove that customer email address(es) from your communication list. + +After an individual revokes consent, you should remove them from all recurring messaging campaigns unless they expressly elect to continue receiving messages from a particular program. ### Opt-out best practices: -In addition to keywords, other common opt-out mechanisms include providing customers with a designated opt-out e-mail address, the phone number of customer support staff, or a link to unsubscribe on your webpage. +In addition to keywords, other common opt-out mechanisms include providing customers with a designated opt-out e-mail address, the phone number of customer support staff, or a link to unsubscribe embedded in an email message you sent or available on your webpage. -### How we handle opt-out requests: +### How we handle opt-out requests for SMS If an individual requests to opt-out of future messages on an Azure Communication Services toll-free number, then all further traffic from that number will be automatically stopped. However, you must still ensure that you do not send additional messages for that messaging campaign from new or different numbers. If you have separately obtained express consent for a different messaging campaign, then you may continue to send messages from a different number for that campaign. Check out our FAQ section to learn more on [Opt-out handling](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/communication-services/concepts/sms/sms-faq.md#how-can-i-receive-messages-using-azure-communication-services) +### How we handle opt-out requests for email + +If an individual requests to opt out of future messages on Azure Communication Services using the unsubscribe UI page to process the unsubscribe requests, you will have to add the requested recipient's email address to the suppression list that will be used to filter recipients during the send-mail process. + ## Message content ### Adult content: @@ -141,7 +149,7 @@ We reserve the right to modify the list of prohibited message content at any tim ## Spoofing -Spoofing is the act of causing a misleading or inaccurate originating number to display on a message recipient’s device. We strongly discourage you and any service provider that you use from sending spoofed messages. Spoofing shields the identity of the message sender and prevents message recipients from easily opting out of unwanted communications. We also require that you abide by all applicable spoofing laws. +Spoofing is the act of causing a misleading or inaccurate originating number or email address to display on a message recipient’s device. We strongly discourage you and any service provider that you use from sending spoofed messages. Spoofing shields the identity of the message sender and prevents message recipients from easily opting out of unwanted communications. We also require that you abide by all applicable spoofing laws. ## Final thoughts diff --git a/articles/communication-services/concepts/sms/sms-faq.md b/articles/communication-services/concepts/sms/sms-faq.md index 88a52a99765d4..aa7a3b8f7e29a 100644 --- a/articles/communication-services/concepts/sms/sms-faq.md +++ b/articles/communication-services/concepts/sms/sms-faq.md @@ -108,7 +108,7 @@ Rate Limits for SMS: ## Carrier Fees ### What are the carrier fees for SMS? -In July 2021, US carriers started charging an added fee for SMS messages sent and/or received from toll-free numbers and short codes. Carrier fees for SMS are charged per message segment based on the destination. Azure Communication Services charges a standard carrier fee per message segment. Carrier fees are subject to change by mobile carriers. Please refer to [SMS pricing](../sms-pricing.md) for more details. +US and CA carriers charge an added fee for SMS messages sent and/or received from toll-free numbers and short codes. The carrier surcharge is calculated based on the destination of the message for sent messages and based on the sender of the message for received messages. Azure Communication Services charges a standard carrier fee per message segment. Carrier fees are subject to change by mobile carriers. Please refer to [SMS pricing](../sms-pricing.md) for more details. ### When will we come to know of changes to these surcharges? As with similar Azure services, customers will be notified at least 30 days prior to the implementation of any price changes. These charges will be reflected on our SMS pricing page along with the effective dates. diff --git a/articles/communication-services/concepts/telephony/direct-routing-infrastructure.md b/articles/communication-services/concepts/telephony/direct-routing-infrastructure.md index 566486fbeeb4d..f530a68ae7401 100644 --- a/articles/communication-services/concepts/telephony/direct-routing-infrastructure.md +++ b/articles/communication-services/concepts/telephony/direct-routing-infrastructure.md @@ -103,11 +103,11 @@ The SBC makes a DNS query to resolve sip.pstnhub.microsoft.com. Based on the SBC ## Media traffic: IP and Port ranges -The media traffic flows to and from a separate service called Media Processor. At the moment of publishing, Media Processor for Communication Services can use any Azure IP address. -Download [the full list of addresses](https://www.microsoft.com/download/details.aspx?id=56519). +The media traffic flows to and from a separate service in the Microsoft Cloud called Media Processor. The IP address range for media traffic: +- `20.202.0.0/16 (IP addresses from 20.202.0.1 to 20.202.255.254)` -### Port range -The port range of the Media Processors is shown in the following table: +### Port ranges +The port ranges of the Media Processors are shown in the following table: |Traffic|From|To|Source port|Destination port| |:--- |:--- |:--- |:--- |:--- | @@ -120,7 +120,7 @@ The port range of the Media Processors is shown in the following table: ## Media traffic: Media processors geography -The media traffic flows via components called media processors. Media processors are placed in the same datacenters as SIP proxies: +Media Processors are placed in the same datacenters as SIP proxies: - NOAM (US South Central, two in US West and US East datacenters) - Europe (UK South, France Central, Amsterdam and Dublin datacenters) - Asia (Singapore datacenter) diff --git a/articles/communication-services/concepts/telephony/direct-routing-provisioning.md b/articles/communication-services/concepts/telephony/direct-routing-provisioning.md index 6de9600fc3374..9b3e6c8e7f494 100644 --- a/articles/communication-services/concepts/telephony/direct-routing-provisioning.md +++ b/articles/communication-services/concepts/telephony/direct-routing-provisioning.md @@ -1,18 +1,19 @@ --- -title: Azure direct routing provisioning and configuration - Azure Communication Services -description: Learn how to add a Session Border Controller and configure voice routing for Azure Communication Services direct routing +title: Use direct routing to connect existing telephony service +description: Learn how to add a Session Border Controller and configure voice routing for Azure Communication Services direct routing. author: boris-bazilevskiy manager: nmurav services: azure-communication-services ms.author: bobazile -ms.date: 06/30/2021 +ms.date: 05/26/2022 ms.topic: conceptual ms.service: azure-communication-services ms.subservice: pstn +ms.custom: kr2b-contr-experiment --- -# Session Border Controllers and voice routing +# Use direct routing to connect to existing telephony service Azure Communication Services direct routing enables you to connect your existing telephony infrastructure to Azure. The article lists the high-level steps required for connecting a supported Session Border Controller (SBC) to direct routing and how voice routing works for the enabled Communication resource. [!INCLUDE [Public Preview](../../includes/public-preview-include-document.md)] @@ -24,22 +25,34 @@ For information about whether Azure Communication Services direct routing is the ### Configure using Azure portal 1. In the left navigation, select Direct routing under Voice Calling - PSTN and then select Configure from the Session Border Controller tab. -1. Enter a fully qualified domain name and signaling port for the SBC. - -- SBC certificate must match the name; wildcard certificates are supported. -- The *.onmicrosoft.com domain can’t be used for the FQDN of the SBC. -For the full list of requirements, refer to [Azure direct routing infrastructure requirements](./direct-routing-infrastructure.md). - :::image type="content" source="../media/direct-routing-provisioning/add-session-border-controller.png" alt-text="Adding Session Border Controller."::: -- When you're done, select Next. -If everything set up correctly, you should see exchange of OPTIONS messages between Microsoft and your Session Border Controller, user your SBC monitoring/logs to validate the connection. +2. Enter a fully qualified domain name and signaling port for the SBC. + - SBC certificate must match the name; wildcard certificates are supported. + - The *.onmicrosoft.com domain can’t be used for the FQDN of the SBC. + + For the full list of requirements, refer to [Azure direct routing infrastructure requirements](./direct-routing-infrastructure.md). + + :::image type="content" source="../media/direct-routing-provisioning/add-session-border-controller.png" alt-text="Screenshot of Adding Session Border Controller."::: + +3. When you're done, select Next. + + If everything is set up correctly, you should see an exchange of OPTIONS messages between Microsoft and your Session Border Controller. Use your SBC monitoring/logs to validate the connection. ## Voice routing considerations -Azure Communication Services direct routing has a routing mechanism that allows a call to be sent to a specific Session Border Controller (SBC) based on the called number pattern. -When you add a direct routing configuration to a resource, all calls made from this resource’s instances (identities) will try a direct routing trunk first. The routing is based on a dialed number and a match in voice routes configured for the resource. If there's a match, the call goes through the direct routing trunk. If there's no match, the next step is to process the `alternateCallerId` parameter of the `callAgent.startCall` method. If the resource is enabled for Voice Calling (PSTN) and has at least one number purchased from Microsoft, the `alternateCallerId` is checked. If the `alternateCallerId` matches one of a purchased number for the resource, the call is routed through the Voice Calling (PSTN) using Microsoft infrastructure. If `alternateCallerId` parameter doesn't match any of the purchased numbers, the call will fail. The diagram below demonstrates the Azure Communication Services voice routing logic. +Azure Communication Services direct routing has a routing mechanism that allows a call to be sent to a specific SBC based on the called number pattern. + +When you add a direct routing configuration to a resource, all calls made from this resource’s instances (identities) will try a direct routing trunk first. The routing is based on a dialed number and a match in voice routes configured for the resource. + +- If there's a match, the call goes through the direct routing trunk. +- If there's no match, the next step is to process the `alternateCallerId` parameter of the `callAgent.startCall` method. +- If the resource is enabled for Voice Calling (PSTN) and has at least one number purchased from Microsoft, the `alternateCallerId` is checked. +- If the `alternateCallerId` matches a purchased number for the resource, the call is routed through the Voice Calling (PSTN) using Microsoft infrastructure. +- If `alternateCallerId` parameter doesn't match any of the purchased numbers, the call will fail. + +The diagram below demonstrates the Azure Communication Services voice routing logic. -:::image type="content" source="../media/direct-routing-provisioning/voice-routing-diagram.png" alt-text="Communication Services outgoing voice routing."::: +:::image type="content" source="../media/direct-routing-provisioning/voice-routing-diagram.png" alt-text="Diagram of outgoing voice routing flowchart."::: ## Voice routing examples The following examples display voice routing in a call flow. @@ -66,9 +79,9 @@ If you created one voice route with a pattern `^\+1(425|206)(\d{7})$` and added ### Configure using Azure portal -:::image type="content" source="../media/direct-routing-provisioning/voice-routing-configuration.png" alt-text="Communication Services outgoing voice routing configuration."::: +:::image type="content" source="../media/direct-routing-provisioning/voice-routing-configuration.png" alt-text="Screenshot of outgoing voice routing configuration."::: -Give your Voice Route a name, specify the number pattern using regular expressions, and select SBC for that pattern. +Give your voice route a name, specify the number pattern using regular expressions, and select SBC for that pattern. Here are some examples of basic regular expressions: - `^\+\d+$` - matches a telephone number with one or more digits that start with a plus - `^+1(\d[10])$` - matches a telephone number with a ten digits after a `+1` @@ -83,7 +96,7 @@ You can select multiple SBCs for a single pattern. In such a case, the routing a ### Delete using Azure portal -#### To delete a Voice Route: +#### To delete a voice route: 1. In the left navigation, go to Direct routing under Voice Calling - PSTN and then select the Voice Routes tab. 1. Select route or routes you want to delete using a checkbox. 1. Select Remove. diff --git a/articles/communication-services/concepts/ui-library/includes/mobile-ui-library.md b/articles/communication-services/concepts/ui-library/includes/mobile-ui-library.md index 94f1f488a5e6e..1bb49ebeac525 100644 --- a/articles/communication-services/concepts/ui-library/includes/mobile-ui-library.md +++ b/articles/communication-services/concepts/ui-library/includes/mobile-ui-library.md @@ -5,64 +5,67 @@ description: In this document, introduce the Mobile UI Library author: jorgegarc ms.author: jorgegarc -ms.date: 09/14/2021 +ms.date: 5/27/2022 ms.topic: include ms.service: azure-communication-services +ms.custom: kr2b-contr-experiment --- [!INCLUDE [Public Preview Notice](../../../includes/public-preview-include.md)] -- **Composites.** - These components are turn-key solutions that implement common communication scenarios. You can quickly add video calling experience to your applications. Composites are open-source higher-order components this developer can take advantage of the reduced development time and engineering complexity. +*Composites* are turn-key solutions that implement common communication scenarios. You can add video calling experiences to your applications. Composites are open-source higher-order components that developers can take advantage of to reduce development time and engineering complexity. ## Composites overview -| Composite | Use Cases | -| --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [CallComposite](../../../quickstarts/ui-library/get-started-composites.md) | Calling experience that allows users to start or join a call. Inside the experience users can configure their devices, participate in the call with video, and see other participants, including those ones with video turned on. For Teams Interop, lobby functionality in included so users can wait to be admitted. | +| Composite | Use Cases | +| :-------- | :-------- | +| [CallComposite](../../../quickstarts/ui-library/get-started-composites.md) | Calling experience that allows users to start or join a call. Inside the experience, users can configure their devices, participate in the call with video, and see other participants, including those ones with video turned on. For Teams interoperability, `CallComposite` includes lobby functionality so that users can wait to be admitted. | -## Scenarios +## Composites scenarios ### Joining a video/audio call -Users can join easily over the call using the *Teams meeting URL* or they can set up an Azure Communication Services Call to a simpler and great experience, just like the Teams application. Adding the capability for the user to be part of extensive live video or audio call, without losing the experience of the simplicity and focusing in what really matters. +Users can join a call using the *Teams meeting URL* or they can set up an Azure Communication Services Call. This approach offers a simpler experience, just like the Teams application. ### Pre-call experience -As a participant of the calls, you can provide a name and set up a default configuration for audio and video devices, and you're ready to jump into the call. +As a participant of the call, you can provide a name and set up a default configuration for audio and video devices. Then you're ready to jump into the call. -:::image type="content" source="../../media/mobile-ui/teams-meet.png" alt-text="Pre-meeting experience."::: +:::image type="content" source="../../media/mobile-ui/teams-meet.png" alt-text="Screenshot shows the pre-meeting experience, a page with a message for the participant."::: ### Call experience -The calling composite provide an end two end experience, optimizing development time, and focusing into a clean layout. +The calling composite provides an end-to-end experience, optimizes development time, and focuses on clean layout. -:::image type="content" source="../../media/mobile-ui/calling-composite.png" alt-text="Meeting experience."::: +:::image type="content" source="../../media/mobile-ui/calling-composite.png" alt-text="Screenshot shows the meeting experience, with icons or video of participants."::: -**The calling experience provides all these capabilities in one single composite component, providing a clear path without complex code which leads to faster development time.** +The calling experience provides all these capabilities in one composite component, providing a clear path without complex code, which leads to faster development time. ### Quality and security -Mobile Composites are initialized using [Azure Communication Services access tokens](../../../quickstarts/access-tokens.md). +Mobile composites are initialized using [Azure Communication Services access tokens](../../../quickstarts/access-tokens.md). ### More details -If you need more details about mobile composites, please visit [use cases site](../ui-library-use-cases.md) to discover more. +If you need more details about mobile composites, see [use cases](../ui-library-use-cases.md). -## What UI Artifact is Best for my Project? +## What UI artifact is best for my project? -Understanding these requirements will help you choose the right client library: +These requirements help you choose the right client library: -- **How much customization do you desire?** Azure Communication core client libraries don't have a UX and are designed so you can build whatever UX you want. UI Library components provide UI assets at the cost of reduced customization. +- **How much customization do you desire?** Azure Communication Services core client libraries don't have a UX and are designed so you can build whatever UX you want. UI Library components provide UI assets at the cost of reduced customization. - **What platforms are you targeting?** Different platforms have different capabilities. -Details about the feature availability in the [UI Library is available here](../ui-library-use-cases.md), but key trade-offs are summarized below. -| Client library / SDK | Implementation Complexity | Customization Ability | Calling | [Teams Interop](../../teams-interop.md) | -| --------------------- | ------------------------- | --------------------- | ---- | ----------------------------------------------------------------------------------------------- | -| Composite Components | Low | Low | ✔ | ✔ | -| [Core client libraries](../../voice-video-calling/calling-sdk-features.md#detailed-capabilities) | High | High | ✔ | ✔ | +Here are some key trade-offs: + +| Client library / SDK | Implementation complexity | Customization ability | Calling | [Teams interoperability](../../teams-interop.md) | +| :-------------------- | :-----------------------: | :-------------------: | :-----: | :----------------------------------------------: | +| Composite Components | Low | Low | ✔ | ✔ | +| [Core client libraries](../../voice-video-calling/calling-sdk-features.md#detailed-capabilities) | High | High | ✔ | ✔ | + +For more information about feature availability in the UI Library, see [UI Library use cases](../ui-library-use-cases.md). > [!div class="nextstepaction"] > [Quickstart guides](../../../quickstarts/ui-library/get-started-composites.md) diff --git a/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md b/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md index 7fed98c431f83..8745a99ef96d7 100644 --- a/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md +++ b/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md @@ -61,6 +61,7 @@ The following list presents the set of features which are currently available in | | Place a group call with PSTN participants | ✔️ | ✔️ | ✔️ | ✔️ | | | Promote a one-to-one call with a PSTN participant into a group call | ✔️ | ✔️ | ✔️ | ✔️ | | | Dial-out from a group call as a PSTN participant | ✔️ | ✔️ | ✔️ | ✔️ | +| | Support for early media | ❌ | ✔️ | ✔️ | ✔️ | | General | Test your mic, speaker, and camera with an audio testing service (available by calling 8:echo123) | ✔️ | ✔️ | ✔️ | ✔️ | | Device Management | Ask for permission to use audio and/or video | ✔️ | ✔️ | ✔️ | ✔️ | | | Get camera list | ✔️ | ✔️ | ✔️ | ✔️ | @@ -107,7 +108,7 @@ The maximum call duration is 30 hours, participants that reach the maximum call ## JavaScript Calling SDK support by OS and browser -The following table represents the set of supported browsers which are currently available. **We support the most recent three versions of the browser** unless otherwise indicated. +The following table represents the set of supported browsers which are currently available. **We support the most recent three major versions of the browser (most recent three minor versions for Safari)** unless otherwise indicated. | Platform | Chrome | Safari | Edge (Chromium) | | ------------ | ------ | ------ | -------------- | diff --git a/articles/communication-services/concepts/voice-video-calling/closed-captions.md b/articles/communication-services/concepts/voice-video-calling/closed-captions.md index 37daae907e943..09e0f92702726 100644 --- a/articles/communication-services/concepts/voice-video-calling/closed-captions.md +++ b/articles/communication-services/concepts/voice-video-calling/closed-captions.md @@ -38,7 +38,7 @@ Here are main scenarios where Closed Captions are useful: ## Availability -The private preview will be available on all platforms. +Closed Captions are supported in Private Preview only in ACS to ACS calls on all platforms. - Android - iOS - Web diff --git a/articles/communication-services/concepts/voice-video-calling/network-requirements.md b/articles/communication-services/concepts/voice-video-calling/network-requirements.md index 57801d9c4415e..0456946313d0f 100644 --- a/articles/communication-services/concepts/voice-video-calling/network-requirements.md +++ b/articles/communication-services/concepts/voice-video-calling/network-requirements.md @@ -58,9 +58,18 @@ Communication Services connections require internet connectivity to specific por | Category | IP ranges or FQDN | Ports | | :-- | :-- | :-- | -| Media traffic | [Range of Azure public cloud IP addresses](https://www.microsoft.com/download/confirmation.aspx?id=56519) | UDP 3478 through 3481, TCP ports 443 | +| Media traffic | Range of Azure public cloud IP addresses 20.202.0.0/16 The range provided above is the range of IP addresses on either Media processor or ACS TURN service. | UDP 3478 through 3481, TCP ports 443 | | Signaling, telemetry, registration| *.skype.com, *.microsoft.com, *.azure.net, *.azure.com, *.azureedge.net, *.office.com, *.trouter.io | TCP 443, 80 | + +The endpoints below should be reachable for U.S. Government GCC High customers only + +| Category | IP ranges or FQDN | Ports | +| :-- | :-- | :-- | +| Media traffic | 52.127.88.0/21, 52.238.114.160/32, 52.238.115.146/32, 52.238.117.171/32, 52.238.118.132/32, 52.247.167.192/32, 52.247.169.1/32, 52.247.172.50/32, 52.247.172.103/32, 104.212.44.0/22, 195.134.228.0/22 | UDP 3478 through 3481, TCP ports 443 | +| Signaling, telemetry, registration| *.gov.teams.microsoft.us, *.infra.gov.skypeforbusiness.us, *.online.gov.skypeforbusiness.us, gov.teams.microsoft.us | TCP 443, 80 | + + ## Network optimization The following tasks are optional and aren't required for rolling out Communication Services. Use this guidance to optimize your network and Communication Services performance or if you know you have some network limitations. diff --git a/articles/communication-services/how-tos/calling-sdk/browser-support.md b/articles/communication-services/how-tos/calling-sdk/browser-support.md new file mode 100644 index 0000000000000..8801ef89460d5 --- /dev/null +++ b/articles/communication-services/how-tos/calling-sdk/browser-support.md @@ -0,0 +1,52 @@ +--- +title: Verify if a web browser is supported +titleSuffix: An Azure Communication Services how-to guide +description: Learn how to get current browser environment details using the Azure Communication Services Calling SDK for JavaScript +author: sloanster +ms.author: micahvivion +ms.service: azure-communication-services +ms.subservice: calling +ms.topic: how-to +ms.date: 06/08/2021 +ms.custom: template-how-to + +#Customer intent: As a developer, I can verify that a browser an end user is trying to do a call on is supported by Azure Communication Services. + +--- + +# How to verify if your application is running in a web browser supported by Azure Communication Services + +There are many different browsers available in the market today, but not all of them can properly support audio and video calling. To determine if the browser your application is running on is a supported browser you can use the `getEnvironmentInfo` to check for browser support. + +A `CallClient` instance is required for this operation. When you have a `CallClient` instance, you can use the `getEnvironmentInfo` method on the `CallClient` instance to obtain details about the current environment of your app: + + +```javascript +const callClient = new CallClient(options); +const environmentInfo = await callClient.getEnvironmentInfo(); +``` + +The `getEnvironmentInfo` method asynchronously returns an object of type `EnvironmentInfo`. + +- The `EnvironmentInfo` type is defined as: + +```javascript +{ + environment: Environment; + isSupportedPlatform: boolean; + isSupportedBrowser: boolean; + isSupportedBrowserVersion: boolean; + isSupportedEnvironment: boolean; +} +``` +- The `Environment` type within the `EnvironmentInfo` type is defined as: + +```javascript +{ + platform: string; + browser: string; + browserVersion: string; +} +``` + +A supported environment is a combination of an operating system, a browser, and the minimum version required for that browser. diff --git a/articles/communication-services/how-tos/calling-sdk/includes/manage-video/manage-video-web.md b/articles/communication-services/how-tos/calling-sdk/includes/manage-video/manage-video-web.md index cd83be7de09f4..9a383886e01e7 100644 --- a/articles/communication-services/how-tos/calling-sdk/includes/manage-video/manage-video-web.md +++ b/articles/communication-services/how-tos/calling-sdk/includes/manage-video/manage-video-web.md @@ -284,12 +284,16 @@ const isAvailable: boolean = remoteVideoStream.isAvailable; ``` - `isReceiving`: - - ***This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. To use this api please use 'alpha' release of Azure Communication Services Calling Web SDK.*** - - Will inform the application if remote video stream data is being received. Such cases are: - - When the remote mobile participant has their video on and they put the browser app in the background, they will stop sending video stream data until the app is brought back to the foreground. - - When the remote participant has their video on and they have bad network connectivity and video is cutting off / lagging - - This feature improves the user experience for rendering remote video streams. - - You can display a loading spinner over the remote video stream when isReceiving flag changes to false. You don't have to do a loading spinner, you can do anything you desire, but a loading spinner is the most common usage + - ***This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. To use this api please use 1.5.4-beta.1+ release of Azure Communication Services Calling Web SDK.*** + - Will inform the application if remote video stream data is being received or not. Such scenarios are: + - I am viewing the video of a remote participant who is on mobile browser. The remote participant brings the mobile browser app to the background. I now see the RemoteVideoStream.isReceiving flag go to false and I see his video with black frames / frozen. When the remote participant brings the mobile browser back to the foreground, I now see the RemoteVideoStream.isReceiving flag to back to true and I see his video playing normally. + - I am viewing the video of a remote participant who is on whatever platforms. There are network issues from either side, his video start to look pretty laggy, bad quality, probbaly because of network issues, so i see the RemoteVideoStream.isReceiving flag go to false. + - I am viewing the video of a Remote participant who is On MacOS/iOS Safari, and from their address bar, they click on "Pause" / "Resume" camera. I'll see a black/frozen video since they paused their camera and I'll see the RemoteVideoStream.isReceiving flag go to false. Once they resume playing the camera, then I'll see the RemoteVideoStream.isReceiving flag go to true. + - I am viewing the video of a remote participant who in on whatever platform. And for whatever reason their network disconnects. This will actually leave the remote participant in the call for a little while and I'll see his video frozen/black frame, and ill see RemoteVideoStream.isReceiving flag go to false. The remote participant can get network back and reconnect and his audio/video should start flowing normally and I'll see the RemoteVideoStream.isReceiving flag to true. + - I am viewing the video of a remote participant who is on mobile browser. The remote participant terminates/kills the mobile browser. Since that remote participant was on mobile, this will actually leave the participant in the call for a little while and I will still see him in the call and his video will be frozen, and so I'll see the RemoteVideoStream.isReceiving flag go to false. At some point, service will kick participant out of the call and I would just see that the participant disconnected from the call. + - I am viewing the video of a remote participant who is on mobile browser and they locks device. I'll see the RemoteVideoStream.isReceiving flag go to false and. Once the remote participant unlocks the device and navigates to the acs call, then ill see the flag go back to true. Same behavior when remote participant is on desktop and the desktop locks/sleeps + - This feature improves the user experience for rendering remote video streams. + - You can display a loading spinner over the remote video stream when isReceiving flag changes to false. You don't have to do a loading spinner, you can do anything you desire, but a loading spinner is the most common usage for better user experience. ```js const isReceiving: boolean = remoteVideoStream.isReceiving; ``` diff --git a/articles/communication-services/how-tos/cte-calling-sdk/includes/manage-calls/manage-calls-web.md b/articles/communication-services/how-tos/cte-calling-sdk/includes/manage-calls/manage-calls-web.md index dfe09030548b2..bd69cfb6ec38f 100644 --- a/articles/communication-services/how-tos/cte-calling-sdk/includes/manage-calls/manage-calls-web.md +++ b/articles/communication-services/how-tos/cte-calling-sdk/includes/manage-calls/manage-calls-web.md @@ -296,10 +296,10 @@ Providing a chat ID is mandatory for making calls and adding participants to an 1. Create a chat thread between Alice and Bob, record `threadId` 1. Alice calls Bob using `startCall` method on `callAgent` and specifies the `threadId` -1. Add Charlie to chat thread with `threadId` using [Chat Graph API to add member](/graph/api/chat-post-members?tabs=http&view=graph-rest-1.0) +1. Add Charlie to chat thread with `threadId` using [Chat Graph API to add member](/graph/api/chat-post-members?tabs=http&view=graph-rest-1.0&preserve-view=true) 1. Alice adds Charlie to the call using `addParticipant` method on `call` and specifies the `threadId` 1. Alice removes Charlie from the call using `removeParticipant` method on `call` and specifies the `threadId` -1. Remove Charlie from chat thread with `threadId` using [Chat Graph API to remove member](/graph/api/chat-delete-members?tabs=http&view=graph-rest-1.0) +1. Remove Charlie from chat thread with `threadId` using [Chat Graph API to remove member](/graph/api/chat-delete-members?tabs=http&view=graph-rest-1.0&preserve-view=true) If Teams user stops call recording, the recording is placed into chat associated with the thread. Provided chat ID impacts the experience of Teams users in Teams clients. diff --git a/articles/communication-services/how-tos/ui-library-sdk/includes/data-model/android.md b/articles/communication-services/how-tos/ui-library-sdk/includes/data-model/android.md index 28e18c0986b6a..5df68fa3b795f 100644 --- a/articles/communication-services/how-tos/ui-library-sdk/includes/data-model/android.md +++ b/articles/communication-services/how-tos/ui-library-sdk/includes/data-model/android.md @@ -31,17 +31,17 @@ To use the `LocalSettings`, pass the instance of `ParticipantViewData` and injec #### [Kotlin](#tab/kotlin) ```kotlin -val viewData = ParticipantViewData("user_name", bitmap) +val viewData = ParticipantViewData("user_name") // bitmap is optional val localSettings = LocalSettings(viewData) -callComposite.launch(callLauncherActivity, groupCallOptions, localSettings) +callComposite.launch(this, options, localSettings) ``` #### [Java](#tab/java) ```java -ParticipantViewData viewData = new ParticipantViewData("user_name", bitmap); +ParticipantViewData viewData = new ParticipantViewData("user_name", bitmap); // bitmap is optional LocalSettings localSettings = new LocalSettings(viewData); -callComposite.launch(callLauncherActivity, groupCallOptions, localSettings); +callComposite.launch(this, options, localSettings); ``` ----- @@ -64,7 +64,7 @@ callComposite.setOnRemoteParticipantJoinedHandler { remoteParticipantJoinedEvent remoteParticipantJoinedEvent.identifiers.forEach { identifier -> // get displayName, bitmap for identifier callComposite.setRemoteParticipantViewData(identifier, - ParticipantViewData("display_name", bitmap)) + ParticipantViewData("display_name")) } } ``` @@ -72,12 +72,13 @@ callComposite.setOnRemoteParticipantJoinedHandler { remoteParticipantJoinedEvent #### [Java](#tab/java) ```java - for (CommunicationIdentifier identifier: participantJoinedEvent.getIdentifiers()) { + callComposite.setOnRemoteParticipantJoinedHandler( (remoteParticipantJoinedEvent) -> { + for (CommunicationIdentifier identifier: remoteParticipantJoinedEvent.getIdentifiers()) { // get displayName, bitmap for identifier - callComposite.setRemoteParticipantViewData(identifier, - new ParticipantViewData("display_name", bitmap)) + callComposite.setRemoteParticipantViewData(identifier, + new ParticipantViewData("display_name")); } - }); + }); ``` ----- diff --git a/articles/communication-services/includes/phone-number-special-order.md b/articles/communication-services/includes/phone-number-special-order.md index a4f1021534c07..4151b4e4ff543 100644 --- a/articles/communication-services/includes/phone-number-special-order.md +++ b/articles/communication-services/includes/phone-number-special-order.md @@ -8,5 +8,4 @@ ms.custom: references_regions --- > [!IMPORTANT] -> For high-volume orders or in the event that your desired phone number is unavailable, complete **[this form](https://github.com/Azure/Communication/blob/master/Forms/ACS%20-%20Bulk%20Number%20Acquisition.docx)** and email it to acstnrequest@microsoft.com with a subject line beginning with "Azure Communication Services Number Request:". - +> For high-volume orders or in the event that your desired phone number is unavailable, please visit **[this page](https://github.com/Azure/Communication/blob/master/special-order-numbers.md)** for further assistance. \ No newline at end of file diff --git a/articles/communication-services/includes/regional-availability-include.md b/articles/communication-services/includes/regional-availability-include.md index 1539e8e28a7f7..b845b0f0335dd 100644 --- a/articles/communication-services/includes/regional-availability-include.md +++ b/articles/communication-services/includes/regional-availability-include.md @@ -8,4 +8,4 @@ ms.custom: references_regions --- > [!IMPORTANT] -> The capabilities available (PSTN/SMS, Inbound/Outbound) depend on the country that you're operating within (your Azure billing address location), your use case, and the phone number type that you've selected. These capabilities vary by country due to regulatory requirements. For more information, visit the [Phone number types](../concepts/numbers/number-types.md) documentation. +> The capabilities available (PSTN/SMS, Inbound/Outbound) depend on the country that you're operating within (your Azure billing address location), your use case, and the phone number type that you've selected. These capabilities vary by country due to regulatory requirements. For more information, visit the [Subscription eligibility](../concepts/numbers/sub-eligibility-number-capability.md) documentation. diff --git a/articles/communication-services/index.yml b/articles/communication-services/index.yml index 2bc102ecaff42..bb53b94574796 100644 --- a/articles/communication-services/index.yml +++ b/articles/communication-services/index.yml @@ -115,6 +115,32 @@ landingContent: url: concepts/chat/concepts.md - text: Chat SDK overview url: concepts/chat/sdk-features.md + - title: Email + linkLists: + - linkListType: quickstart + links: + - text: Create a Email Communication Services resource + url: quickstarts/email/create-email-communication-resource.md + - text: Add Azure Managed domain + url: quickstarts/email/add-azure-managed-domains.md + - text: Add Custom domain + url: quickstarts/email/add-custom-verified-domains.md + - text: Connect domain to send Email + url: quickstarts/email/connect-email-communication-resource.md + - text: Send an Email + url: quickstarts/email/send-email.md + - linkListType: concept + links: + - text: Email concepts + url: concepts/email/email-overview.md + - text: What is Email Communication Service + url: concepts/email/prepare-email-communication-resource.md + - text: Email domains and sender authentication + url: concepts/email/email-domain-and-sender-authentication.md + - text: Best practices for sender authentication support + url: concepts/email/email-authentication-best-practice.md + - text: Email client library + url: concepts/email/sdk-features.md - title: Samples linkLists: - linkListType: get-started diff --git a/articles/communication-services/quickstarts/email/create-email-communication-resource.md b/articles/communication-services/quickstarts/email/create-email-communication-resource.md index 39e624410cf5a..d47969ea88a1f 100644 --- a/articles/communication-services/quickstarts/email/create-email-communication-resource.md +++ b/articles/communication-services/quickstarts/email/create-email-communication-resource.md @@ -16,7 +16,7 @@ ms.custom: private_preview, event-tier1-build-2022 [!INCLUDE [Public Preview Notice](../../includes/public-preview-include.md)] -Get started with Email by provisioning your first Email Communication Services resource. Communication services resources can be provisioned through the [Azure portal](https://portal.azure.com) or with the .NET management client library. The management client library and the Azure portal allow you to create, configure, update and delete your resources and interface with [Azure Resource Manager](../../../azure-resource-manager/management/overview.md), Azure's deployment and management service. All functionality available in the client libraries is available in the Azure portal. +Get started with Email by provisioning your first Email Communication Services resource. Communication services resources can be provisioned through the [Azure portal](https://portal.azure.com/) or with the .NET management client library. The management client library and the Azure portal allow you to create, configure, update and delete your resources and interface with [Azure Resource Manager](../../../azure-resource-manager/management/overview.md), Azure's deployment and management service. All functionality available in the client libraries is available in the Azure portal. ## Create the Email Communications Service resource using portal diff --git a/articles/communication-services/quickstarts/email/includes/send-email-js.md b/articles/communication-services/quickstarts/email/includes/send-email-js.md index 1d1b4ae55392e..efda25eef0261 100644 --- a/articles/communication-services/quickstarts/email/includes/send-email-js.md +++ b/articles/communication-services/quickstarts/email/includes/send-email-js.md @@ -11,7 +11,7 @@ ms.service: azure-communication-services ms.custom: private_preview, event-tier1-build-2022 --- -Get started with Azure Communication Services by using the Communication Services C# Email client library to send Email messages. +Get started with Azure Communication Services by using the Communication Services JS Email client library to send Email messages. Completing this quick start incurs a small cost of a few USD cents or less in your Azure account. @@ -42,7 +42,7 @@ Run `npm init -y` to create a **package.json** file with default settings. npm init -y ``` -Use a text editor to create a file called **send-email.js** in the project root directory. You'll add all the source code for this quickstart to this file in the following sections. +Use a text editor to create a file called **send-email.js** in the project root directory. Change the "main" property in **package.json** to "send-email.js". You'll add all the source code for this quickstart to this file in the following sections. ### Install the package Use the `npm install` command to install the Azure Communication Services Email client library for JavaScript. @@ -59,27 +59,30 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | --------------------| -----------------------------------------------------------------------------------------------------------------------------------------------------| -| EmailAddress | This class contains an email address and an option for a display name. | -| EmailAttachment | This class creates an email attachment by accepting a unique ID, email attachment type, and a string of content bytes. | +| EmailAddress | This interface contains an email address and an option for a display name. | +| EmailAttachment | This interface creates an email attachment by accepting a unique ID, email attachment type, and a string of content bytes. | | EmailClient | This class is needed for all email functionality. You instantiate it with your connection string and use it to send email messages. | -| EmailClientOptions | This class can be added to the EmailClient instantiation to target a specific API version. | -| EmailContent | This class contains the subject and the body of the email message. The importance can also be set within the EmailContent class. | -| EmailCustomHeader | This class allows for the addition of a name and value pair for a custom header. | -| EmailMessage | This class combines the sender, content, and recipients. Custom headers, attachments, and reply-to email addresses can optionally be added, as well. | -| EmailRecipients | This class holds lists of EmailAddress objects for recipients of the email message, including optional lists for CC & BCC recipients. | -| SendStatusResult | This class holds lists of status of the email message delivery. +| EmailClientOptions | This interface can be added to the EmailClient instantiation to target a specific API version. | +| EmailContent | This interface contains the subject, plaintext, and html of the email message. | +| EmailCustomHeader | This interface allows for the addition of a name and value pair for a custom header. | +| EmailMessage | This interface combines the sender, content, and recipients. Custom headers, importance, attachments, and reply-to email addresses can optionally be added, as well. | +| EmailRecipients | This interface holds lists of EmailAddress objects for recipients of the email message, including optional lists for CC & BCC recipients. | +| SendStatusResult | This interface holds the messageId and status of the email message delivery. ## Authenticate the client - Import the **EmailClient** from the client library and instantiate it with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). +Import the **EmailClient** from the client library and instantiate it with your connection string. + +The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING` using the dotenv package. Use the `npm install` command to install the dotenv package. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). + +```console +npm install dotenv +``` Add the following code to **send-email.js**: ```javascript -const { EmailRestApiClient } = require("@azure/communication-email"); -const communication_common = require("@azure/communication-common"); -const core_http = require("@azure/core-http"); -const uuid = require("uuid"); +const { EmailClient } = require("@azure/communication-email"); require("dotenv").config(); // This code demonstrates how to fetch your connection string @@ -93,7 +96,7 @@ To send an Email message, you need to - Add Recipients - Construct your email message with your Sender information you get your MailFrom address from your verified domain. - Include your Email Content and Recipients and include attachments if any -- Calling the SendEmail method: +- Calling the send method: Replace with your domain details and modify the content, recipient details as required @@ -101,37 +104,23 @@ Replace with your domain details and modify the content, recipient details as re async function main() { try { - const { url, credential } = communication_common.parseClientArguments(connectionString); - const options = {}; - options.userAgentOptions = {}; - options.userAgentOptions.userAgentPrefix = `azsdk-js-communication-email/1.0.0`; - const authPolicy = communication_common.createCommunicationAuthPolicy(credential); - const pipeline = core_http.createPipelineFromOptions(options, authPolicy); - this.api = new EmailRestApiClient(url, pipeline); + var client = new EmailClient(connectionString); //send mail - const unique_id = uuid.v4(); - const repeatabilityFirstSent = new Date().toUTCString(); const emailMessage = { sender: "", content: { subject: "Welcome to Azure Communication Service Email.", - body: { - plainText: "" - }, + plainText: "" }, recipients: { - toRecipients: [ + to: [ { - email: "emailalias@emaildomain.com>", + email: "", }, ], }, }; - var response = await this.api.email.sendEmail( - unique_id, - repeatabilityFirstSent, - emailMessage - ); + var response = await client.send(emailMessage); } catch (e) { console.log(e); } @@ -140,11 +129,10 @@ main(); ``` ## Getting MessageId to track email delivery -To track the status of email delivery, you need to get the MessageId back from response and track the status. If there's no MessageId retry the request. +To track the status of email delivery, you need to get the MessageId back from response and track the status. If there's no MessageId, retry the request. ```javascript - // check mail status, wait for 5 seconds, check for 60 seconds. - const messageId = response._response.parsedHeaders.xMsRequestId; + const messageId = response.messageId; if (messageId === null) { console.log("Message Id not found."); return; @@ -154,13 +142,12 @@ To track the status of email delivery, you need to get the MessageId back from r ## Getting status on email delivery To get the delivery status of email call GetMessageStatus API with MessageId ```javascript - - const context = this; + // check mail status, wait for 5 seconds, check for 60 seconds. let counter = 0; const statusInterval = setInterval(async function () { counter++; try { - const response = await context.api.email.getSendStatus(messageId); + const response = await client.getSendStatus(messageId); if (response) { console.log(`Email status for ${messageId}: ${response.status}`); if (response.status.toLowerCase() !== "queued" || counter > 12) { @@ -176,14 +163,9 @@ To get the delivery status of email call GetMessageStatus API with MessageId | Status Name | Description | | --------------------| -----------------------------------------------------------------------------------------------------------------------------------------------------| -| None | An email with this messageId couldn't be found. | | Queued | The email has been placed in the queue for delivery. | | OutForDelivery | The email is currently en route to its recipient(s). | -| InternalError | An error occurred internally during the delivery of this message. Try again. | | Dropped | The email message was dropped before the delivery could be successfully completed. | -| InvalidEmailAddress | The sender and/or recipient email address(es) is/are not valid. | -| InvalidAttachments | The content bytes string for the attachment isn't valid. | -| InvalidSenderDomain | The sender's email address domain isn't valid. | ## Run the code @@ -194,4 +176,4 @@ node ./send-email.js ``` ## Sample code -You can download the sample app from [GitHub](https://github.com/moirf/communication-services-javascript-quickstarts/tree/main/send-email) +You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-javascript-quickstarts/tree/main/send-email) diff --git a/articles/communication-services/quickstarts/email/includes/send-email-net.md b/articles/communication-services/quickstarts/email/includes/send-email-net.md index 8b5c5906e212d..abadac851aae7 100644 --- a/articles/communication-services/quickstarts/email/includes/send-email-net.md +++ b/articles/communication-services/quickstarts/email/includes/send-email-net.md @@ -47,7 +47,7 @@ dotnet build While still in the application directory, install the Azure Communication Services Email client library for .NET package by using the `dotnet add package` command. ```console -dotnet add package Azure.Communication.Email --version 1.0.0 +dotnet add package Azure.Communication.Email --prerelease ``` Open **Program.cs** and replace the existing code with the following @@ -62,6 +62,7 @@ using System.Threading.Tasks; using Azure; using Azure.Communication.Email; +using Azure.Communication.Email.Models; namespace SendEmail { @@ -90,17 +91,17 @@ The following classes and interfaces handle some of the major features of the Az | EmailCustomHeader | This class allows for the addition of a name and value pair for a custom header. | | EmailMessage | This class combines the sender, content, and recipients. Custom headers, attachments, and reply-to email addresses can optionally be added, as well. | | EmailRecipients | This class holds lists of EmailAddress objects for recipients of the email message, including optional lists for CC & BCC recipients. | -| SendStatusResult | This class holds lists of status of the email message delivery . | +| SendStatusResult | This class holds lists of status of the email message delivery. | ## Authenticate the client - Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `EmailClient` with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage you resource's connection string](../../create-communication-resource.md#store-your-connection-string). + Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `EmailClient` with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). ```csharp // This code demonstrates how to fetch your connection string // from an environment variable. string connectionString = Environment.GetEnvironmentVariable("COMMUNICATION_SERVICES_CONNECTION_STRING"); - +EmailClient emailClient = new EmailClient(connectionString); ``` ## Send an email message @@ -109,20 +110,19 @@ To send an Email message, you need to - Add Recipients - Construct your email message with your Sender information you get your MailFrom address from your verified domain. - Include your Email Content and Recipients and include attachments if any -- Calling the SendEmail method. Add this code to the end of `Main` method in **Program.cs**: +- Calling the Send method. Add this code to the end of `Main` method in **Program.cs**: Replace with your domain details and modify the content, recipient details as required ```csharp //Replace with your domain and modify the content, recipient details as required -EmailContent emailContent = new EmailContent(); -emailContent.Subject = "Welcome to Azure Communication Service Email."; -emailContent.PlainText = "This email meessage is sent from Azure Communication Service Email using .NET SDK."; -List emailAddresses = new List { new EmailAddress("emailalias@emaildomain.com") { DisplayName = "Friendly Display Name" }}; +EmailContent emailContent = new EmailContent("Welcome to Azure Communication Service Email APIs."); +emailContent.PlainText = "This email message is sent from Azure Communication Service Email using .NET SDK."; +List emailAddresses = new List { new EmailAddress("emailalias@contoso.com") { DisplayName = "Friendly Display Name" }}; EmailRecipients emailRecipients = new EmailRecipients(emailAddresses); EmailMessage emailMessage = new EmailMessage("donotreply@xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.azurecomm.net", emailContent, emailRecipients); -var response = emailClient.Send(emailMessage, Guid.NewGuid(), DateTime.Now); +SendEmailResult emailResult = emailClient.Send(emailMessage,CancellationToken.None); ``` ## Getting MessageId to track email delivery @@ -130,37 +130,25 @@ var response = emailClient.Send(emailMessage, Guid.NewGuid(), DateTime.Now); To track the status of email delivery, you need to get the MessageId back from response and track the status. If there's no MessageId retry the request. ```csharp -string messageId = string.Empty; -if (!response.IsError) -{ - if (!response.Headers.TryGetValue("x-ms-request-id", out messageId)) - { - Console.WriteLine("MessageId not found"); - return; - } - else - { - Console.WriteLine($"MessageId = {messageId}"); - } -} + Console.WriteLine($"MessageId = {emailResult.MessageId}"); ``` ## Getting status on email delivery To get the delivery status of email call GetMessageStatus API with MessageId ```csharp Response messageStatus = null; -messageStatus = emailClient.GetSendStatus(messageId); +messageStatus = emailClient.GetSendStatus(emailResult.MessageId); Console.WriteLine($"MessageStatus = {messageStatus.Value.Status}"); TimeSpan duration = TimeSpan.FromMinutes(3); long start = DateTime.Now.Ticks; do { - messageStatus = emailClient.GetSendStatus(messageId); - if (messageStatus.Value.Status != SendStatus.Queued ) + messageStatus = emailClient.GetSendStatus(emailResult.MessageId); + if (messageStatus.Value.Status != SendStatus.Queued) { Console.WriteLine($"MessageStatus = {messageStatus.Value.Status}"); break; } - Thread.Sleep(60000); + Thread.Sleep(10000); Console.WriteLine($"..."); } while (DateTime.Now.Ticks - start < duration.Ticks); @@ -187,4 +175,4 @@ dotnet run ## Sample code -You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/send-email) +You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendEmail) diff --git a/articles/communication-services/quickstarts/manage-teams-identity.md b/articles/communication-services/quickstarts/manage-teams-identity.md index 1db03e2f33fd2..496207166322b 100644 --- a/articles/communication-services/quickstarts/manage-teams-identity.md +++ b/articles/communication-services/quickstarts/manage-teams-identity.md @@ -15,6 +15,8 @@ ms.custom: mode-other --- # Quickstart: Set up and manage access tokens for Teams users +[!INCLUDE [Public Preview](../../communication-services/includes/public-preview-include-document.md)] + In this quickstart, you'll build a .NET console application to authenticate a Microsoft 365 user by using the Microsoft Authentication Library (MSAL) and retrieving a Microsoft Azure Active Directory (Azure AD) user token. You'll then exchange that token for an access token of Teams user with the Azure Communication Services Identity SDK. The access token for Teams user can then be used by the Communication Services Calling SDK to build a custom Teams endpoint. > [!NOTE] diff --git a/articles/communication-services/quickstarts/sms/handle-sms-events.md b/articles/communication-services/quickstarts/sms/handle-sms-events.md index c40827908402b..206c73d17f5c2 100644 --- a/articles/communication-services/quickstarts/sms/handle-sms-events.md +++ b/articles/communication-services/quickstarts/sms/handle-sms-events.md @@ -1,100 +1,110 @@ --- -title: Quickstart - Handle SMS events for Delivery Reports and Inbound Messages -titleSuffix: An Azure Communication Services quickstart -description: Learn how to handle SMS events using Azure Communication Services. +title: Quickstart - Handle SMS and delivery report events +titleSuffix: Azure Communication Services +description: "In this quickstart, you'll learn how to handle Azure Communication Services events. See how to create, receive, and subscribe to SMS and delivery report events." author: probableprime manager: chpalm services: azure-communication-services ms.author: rifox -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: quickstart ms.service: azure-communication-services ms.subservice: sms -ms.custom: mode-other +ms.custom: + - mode-other + - kr2b-contr-experiment --- -# Quickstart: Handle SMS events for Delivery Reports and Inbound Messages +# Quickstart: Handle SMS and delivery report events + +Get started with Azure Communication Services by using Azure Event Grid to handle Communication Services SMS events. After subscribing to SMS events such as inbound messages and delivery reports, you generate and receive these events. Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. [!INCLUDE [Regional Availability Notice](../../includes/regional-availability-include.md)] -Get started with Azure Communication Services by using Azure Event Grid to handle Communication Services SMS events. +## Prerequisites -## About Azure Event Grid +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- A Communication Services resource. For detailed information, see [Create an Azure Communication Services resource](../create-communication-resource.md). +- An SMS-enabled telephone number. [Get a phone number](../telephony/get-phone-number.md). -[Azure Event Grid](../../../event-grid/overview.md) is a cloud-based eventing service. In this article, you'll learn how to subscribe to events for [communication service events](../../../event-grid/event-schema-communication-services.md), and trigger an event to view the result. Typically, you send events to an endpoint that processes the event data and takes actions. In this article, we'll send the events to a web app that collects and displays the messages. +## About Event Grid -## Prerequisites -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- An Azure Communication Service resource. Further details can be found in the [Create an Azure Communication Services resource](../create-communication-resource.md) quickstart. -- An SMS enabled telephone number. [Get a phone number](../telephony/get-phone-number.md). +[Event Grid](../../../event-grid/overview.md) is a cloud-based eventing service. In this article, you'll learn how to subscribe to [communication service events](../../../event-grid/event-schema-communication-services.md), and trigger an event to view the result. Typically, you send events to an endpoint that processes the event data and takes actions. In this article, we'll send the events to a web app that collects and displays the messages. + +## Set up the environment + +To set up the environment that we'll use to generate and receive events, take the steps in the following sections. + +### Register an Event Grid resource provider -## Setting up +If you haven't previously used Event Grid in your Azure subscription, you might need to register your Event Grid resource provider. To register the provider, follow these steps: -### Enable Event Grid resource provider +1. Go to the Azure portal. +1. On the left menu, select **Subscriptions**. +1. Select the subscription that you use for Event Grid. +1. On the left menu, under **Settings**, select **Resource providers**. +1. Find **Microsoft.EventGrid**. +1. If your resource provider isn't registered, select **Register**. -If you haven't previously used Event Grid in your Azure subscription, you may need to register the Event Grid resource provider following the steps below: +It might take a moment for the registration to finish. Select **Refresh** to update the status. When **Registered** appears under **Status**, you're ready to continue. -In the Azure portal: +### Deploy the Event Grid viewer -1. Select **Subscriptions** on the left menu. -2. Select the subscription you're using for Event Grid. -3. On the left menu, under **Settings**, select **Resource providers**. -4. Find **Microsoft.EventGrid**. -5. If not registered, select **Register**. +For this quickstart, we'll use an Event Grid viewer to view events in near-real time. The viewer provides the user with the experience of a real-time feed. Also, the payload of each event should be available for inspection. -It may take a moment for the registration to finish. Select **Refresh** to update the status. When **Status** is **Registered**, you're ready to continue. +To set up the viewer, follow the steps in [Azure Event Grid Viewer](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/). -### Event Grid Viewer deployment +## Subscribe to SMS events by using web hooks -For this quickstart, we will use the [Azure Event Grid Viewer Sample](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) to view events in near-real time. This will provide the user with the experience of a real-time feed. In addition, the payload of each event should be available for inspection as well. +You can subscribe to specific events to provide Event Grid with information about where to send the events that you want to track. -## Subscribe to the SMS events using web hooks +1. In the portal, go to the Communication Services resource that you created. -In the portal, navigate to your Azure Communication Services Resource that you created. Inside the Communication Service resource, select **Events** from the left menu of the **Communication Services** page. +1. Inside the Communication Services resource, on the left menu of the **Communication Services** page, select **Events**. -:::image type="content" source="./media/handle-sms-events/select-events.png" alt-text="Screenshot showing selecting the event subscription button within a resource's events page."::: +1. Select **Add Event Subscription**. -Press **Add Event Subscription** to enter the creation wizard. + :::image type="content" source="./media/handle-sms-events/select-events.png" alt-text="Screenshot that shows the Events page of an Azure Communication Services resource. The Event Subscription button is called out."::: -On the **Create Event Subscription** page, Enter a **name** for the event subscription. +1. On the **Create Event Subscription** page, enter a **name** for the event subscription. -You can subscribe to specific events to tell Event Grid which of the SMS events you want to track, and where to send the events. Select the events you'd like to subscribe to from the dropdown menu. For SMS you'll have the option to choose `SMS Received` and `SMS Delivery Report Received`. +1. Under **Event Types**, select the events that you'd like to subscribe to. For SMS, you can choose `SMS Received` and `SMS Delivery Report Received`. -If you're prompted to provide a **System Topic Name**, feel free to provide a unique string. This field has no impact on your experience and is used for internal telemetry purposes. +1. If you're prompted to provide a **System Topic Name**, feel free to provide a unique string. This field has no impact on your experience and is used for internal telemetry purposes. -Check out the full list of [events supported by Azure Communication Services](../../../event-grid/event-schema-communication-services.md). + :::image type="content" source="./media/handle-sms-events/select-events-create-eventsub.png" alt-text="Screenshot that shows the Create Event Subscription dialog. Under Event Types, SMS Received and SMS Delivery Report Received are selected."::: -:::image type="content" source="./media/handle-sms-events/select-events-create-eventsub.png" alt-text="Screenshot showing the SMS Received and SMS Delivery Report Received event types being selected."::: +1. For **Endpoint type**, select **Web Hook**. -Select **Web Hook** for **Endpoint type**. + :::image type="content" source="./media/handle-sms-events/select-events-create-linkwebhook.png" alt-text="Screenshot that shows a detail of the Create Event Subscription dialog. In the Endpoint Type list, Web Hook is selected."::: -:::image type="content" source="./media/handle-sms-events/select-events-create-linkwebhook.png" alt-text="Screenshot showing the Endpoint Type field being set to Web Hook."::: +1. For **Endpoint**, select **Select an endpoint**, and then enter the URL of your web app. -For **Endpoint**, click **Select an endpoint**, and enter the URL of your web app. + In this case, we'll use the URL from the [Event Grid viewer](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) that we set up earlier in the quickstart. The URL for the sample has this format: `https://{{site-name}}.azurewebsites.net/api/updates` -In this case, we will use the URL from the [Azure Event Grid Viewer Sample](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) we set up earlier in the quickstart. The URL for the sample will be in the format: `https://{{site-name}}.azurewebsites.net/api/updates` +1. Select **Confirm Selection**. -Then select **Confirm Selection**. + :::image type="content" source="./media/handle-sms-events/select-events-create-selectwebhook-epadd.png" alt-text="Screenshot that shows the Select Web Hook dialog. The Subscriber Endpoint box contains a U R L, and a Confirm Selection button is visible."::: -:::image type="content" source="./media/handle-sms-events/select-events-create-selectwebhook-epadd.png" alt-text="Screenshot showing confirming a Web Hook Endpoint."::: +## View SMS events -## Viewing SMS events +To generate and receive SMS events, take the steps in the following sections. -### Triggering SMS events +### Trigger SMS events -To view event triggers, we must generate events in the first place. +To view event triggers, we need to generate some events. -- `SMS Received` events are generated when the Communication Services phone number receives a text message. To trigger an event, just send a message from your phone to the phone number attached to your Communication Services resource. -- `SMS Delivery Report Received` events are generated when you send an SMS to a user using a Communication Services phone number. To trigger an event, you are required to enable `Delivery Report` in the options of the [sent SMS](../sms/send.md). Try sending a message to your phone with `Delivery Report`. Completing this action incurs a small cost of a few USD cents or less in your Azure account. +- `SMS Received` events are generated when the Communication Services phone number receives a text message. To trigger an event, send a message from your phone to the phone number that's attached to your Communication Services resource. +- `SMS Delivery Report Received` events are generated when you send an SMS to a user by using a Communication Services phone number. To trigger an event, you need to turn on the `Delivery Report` option of the [SMS that you send](../sms/send.md). Try sending a message to your phone with `Delivery Report` turned on. Completing this action incurs a small cost of a few USD cents or less in your Azure account. -Check out the full list of [events supported by Azure Communication Services](../../../event-grid/event-schema-communication-services.md). +Check out the full list of [events that Communication Services supports](../../../event-grid/event-schema-communication-services.md). -### Receiving SMS events +### Receive SMS events -Once you complete either action above you will notice that `SMS Received` and `SMS Delivery Report Received` events are sent to your endpoint. These events will show up in the [Azure Event Grid Viewer Sample](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) we set up at the beginning. You can press the eye icon next to the event to see the entire payload. Events will look like this: +After you generate an event, you'll notice that `SMS Received` and `SMS Delivery Report Received` events are sent to your endpoint. These events show up in the [Event Grid viewer](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) that we set up at the beginning of this quickstart. Select the eye icon next to the event to see the entire payload. Events should look similar to the following data: -:::image type="content" source="./media/handle-sms-events/sms-received.png" alt-text="Screenshot showing the Event Grid Schema for an SMS Received Event."::: +:::image type="content" source="./media/handle-sms-events/sms-received.png" alt-text="Screenshot of the Azure Event Grid viewer that shows the Event Grid schema for an SMS received event."::: -:::image type="content" source="./media/handle-sms-events/sms-delivery-report-received.png" alt-text="Screenshot showing the Event Grid Schema for an SMS Delivery Report Event."::: +:::image type="content" source="./media/handle-sms-events/sms-delivery-report-received.png" alt-text="Screenshot of the Azure Event Grid viewer that shows the Event Grid schema for an SMS delivery report event."::: Learn more about the [event schemas and other eventing concepts](../../../event-grid/event-schema-communication-services.md). @@ -109,7 +119,7 @@ In this quickstart, you learned how to consume SMS events. You can receive SMS m > [!div class="nextstepaction"] > [Send SMS](../sms/send.md) -You may also want to: +You might also want to: - [Learn about event handling concepts](../../../event-grid/event-schema-communication-services.md) - [Learn about Event Grid](../../../event-grid/overview.md) diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-java.md b/articles/communication-services/quickstarts/sms/includes/send-sms-java.md index fc28cc58107c2..07e39346a26d6 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-java.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-java.md @@ -7,7 +7,7 @@ manager: ankita ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: pvicencio @@ -18,32 +18,34 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-java-quickstarts/tree/main/send-sms-quickstart) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-java-quickstarts/tree/main/send-sms-quickstart). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- [Java Development Kit (JDK)](/java/azure/jdk/) version 8 or above. +- [Java Development Kit (JDK)](/java/azure/jdk/) version 8 or later. - [Apache Maven](https://maven.apache.org/download.cgi). - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check -- In a terminal or command window, run `mvn -v` to check that maven is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- In a terminal or command window, run `mvn -v` to check that Maven is installed. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment + +To set up an environment for sending messages, take the steps in the following sections. ### Create a new Java application -Open your terminal or command window and navigate to the directory where you would like to create your Java application. Run the command below to generate the Java project from the maven-archetype-quickstart template. +Open your terminal or command window and navigate to the directory where you would like to create your Java application. Run the following command to generate the Java project from the maven-archetype-quickstart template. ```console mvn archetype:generate -DgroupId=com.communication.quickstart -DartifactId=communication-quickstart -DarchetypeArtifactId=maven-archetype-quickstart -DarchetypeVersion=1.4 -DinteractiveMode=false ``` -The 'generate' goal will create a directory with the same name as the artifactId. Under this directory, the **src/main/java** directory contains the project source code, the **src/test/java directory** contains the test source, and the **pom.xml** file is the project's Project Object Model, or POM. +The `generate` goal creates a directory with the same name as the `artifactId` value. Under this directory, the **src/main/java** directory contains the project source code, the **src/test/java directory** contains the test source, and the **pom.xml** file is the project's Project Object Model (POM). ### Install the package @@ -59,7 +61,7 @@ Open the **pom.xml** file in your text editor. Add the following dependency elem ### Set up the app framework -Open **/src/main/java/com/communication/quickstart/App.java** in a text editor, add import directives and remove the `System.out.println("Hello world!");` statement: +Open **/src/main/java/com/communication/quickstart/App.java** in a text editor, add import directives, and remove the `System.out.println("Hello world!");` statement: ```java package com.communication.quickstart; @@ -74,7 +76,7 @@ public class App { public static void main( String[] args ) { - // Quickstart code goes here + // Quickstart code goes here. } } @@ -86,19 +88,19 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | ---------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | -| SmsClientBuilder | This class creates the SmsClient. You provide it with endpoint, credential, and an http client. | +| SmsClientBuilder | This class creates the SmsClient. You provide it with an endpoint, a credential, and an HTTP client. | | SmsClient | This class is needed for all SMS functionality. You use it to send SMS messages. | -| SmsSendOptions | This class provides options to add custom tags and configure delivery reporting. If deliveryReportEnabled is set to true, then an event will be emitted when delivery was successful | +| SmsSendOptions | This class provides options to add custom tags and configure delivery reporting. If deliveryReportEnabled is set to true, an event is emitted when delivery is successful. | | SmsSendResult | This class contains the result from the SMS service. | ## Authenticate the client -Instantiate an `SmsClient` with your connection string. (Credential is the `Key` from the Azure portal. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). In addition, you can initialize the client with any custom HTTP client the implements the `com.azure.core.http.HttpClient` interface. +To authenticate a client, you instantiate an `SmsClient` with your connection string. For the credential, use the `Key` from the Azure portal. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). You can also initialize the client with any custom HTTP client that implements the `com.azure.core.http.HttpClient` interface. -Add the following code to the `main` method: +To instantiate a client, add the following code to the `main` method: ```java -// You can find your endpoint and access key from your resource in the Azure portal +// You can get your endpoint and access key from your resource in the Azure portal. String endpoint = "https://.communication.azure.com/"; AzureKeyCredential azureKeyCredential = new AzureKeyCredential(""); @@ -108,9 +110,9 @@ SmsClient smsClient = new SmsClientBuilder() .buildClient(); ``` -You can also provide the entire connection string using the connectionString() function instead of providing the endpoint and access key. +You can also provide the entire connection string by using the `connectionString` function instead of providing the endpoint and access key. ```java -// You can find your connection string from your resource in the Azure portal +// You can get your connection string from your resource in the Azure portal. String connectionString = "endpoint=https://.communication.azure.com/;accesskey="; SmsClient smsClient = new SmsClientBuilder() @@ -120,7 +122,7 @@ SmsClient smsClient = new SmsClientBuilder() ## Send a 1:1 SMS message -To send an SMS message to a single recipient, call the `send` method from the SmsClient with a single recipient phone number. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. +To send an SMS message to a single recipient, call the `send` method from the SmsClient with a single recipient phone number. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. ```java SmsSendResult sendResult = smsClient.send( @@ -133,13 +135,17 @@ System.out.println("Recipient Number: " + sendResult.getTo()); System.out.println("Send Result Successful:" + sendResult.isSuccessful()); ``` -You should replace `` with an SMS enabled phone number associated with your Communication Services resource and `` with a phone number you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` with a phone number that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. ## Send a 1:N SMS message with options -To send an SMS message to a list of recipients, call the `send` method with a list of recipient phone numbers. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. + +To send an SMS message to a list of recipients, call the `send` method with a list of recipient phone numbers. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. ```java SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); @@ -159,37 +165,40 @@ for (SmsSendResult result : sendResults) { } ``` -You should replace `` with an SMS enabled phone number associated with your Communication Services resource and `` and `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -The `setDeliveryReportEnabled` method is used to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. +The `setDeliveryReportEnabled` method is used to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. -The `setTag` method is used to apply a tag to the Delivery Report. +You can use the `setTag` method to apply a tag to the delivery report. ## Run the code -Navigate to the directory containing the **pom.xml** file and compile the project using the `mvn` command. +1. Navigate to the directory that contains the **pom.xml** file and compile the project by using the `mvn` command. -```console + ```console -mvn compile + mvn compile -``` + ``` -Then, build the package. +1. Build the package. -```console + ```console -mvn package + mvn package -``` + ``` -Run the following `mvn` command to execute the app. +1. Run the following `mvn` command to execute the app. -```console + ```console -mvn exec:java -Dexec.mainClass="com.communication.quickstart.App" -Dexec.cleanupDaemonThreads=false + mvn exec:java -Dexec.mainClass="com.communication.quickstart.App" -Dexec.cleanupDaemonThreads=false -``` + ``` diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-js.md b/articles/communication-services/quickstarts/sms/includes/send-sms-js.md index d8db07ed61cdd..7b7328a392edb 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-js.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-js.md @@ -7,7 +7,7 @@ manager: ankita ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: bertong @@ -18,37 +18,41 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-javascript-quickstarts/tree/main/send-sms) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-javascript-quickstarts/tree/main/send-sms). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- [Node.js](https://nodejs.org/) Active LTS and Maintenance LTS versions (8.11.1 and 10.14.1 recommended). +- [Node.js](https://nodejs.org/) Active LTS and Maintenance LTS versions (8.11.1 and 10.14.1 are recommended). - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check - In a terminal or command window, run `node --version` to check that Node.js is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment -### Create a new Node.js Application +To set up an environment for sending messages, take the steps in the following sections. -First, open your terminal or command window, create a new directory for your app, and navigate to it. +### Create a new Node.js application -```console -mkdir sms-quickstart && cd sms-quickstart -``` +1. Open your terminal or command window, and then run the following command to create a new directory for your app and navigate to it. -Run `npm init -y` to create a **package.json** file with default settings. + ```console + mkdir sms-quickstart && cd sms-quickstart + ``` -```console -npm init -y -``` +1. Run the following command to create a **package.json** file with default settings. + + ```console + npm init -y + ``` + +1. Use a text editor to create a file called **send-sms.js** in the project root directory. -Use a text editor to create a file called **send-sms.js** in the project root directory. You'll add all the source code for this quickstart to this file in the following sections. +In the following sections, you'll add all the source code for this quickstart to the **send-sms.js** file that you just created. ### Install the package @@ -67,30 +71,34 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | ------------------------------------- | ------------------------------------------------------------ | | SmsClient | This class is needed for all SMS functionality. You instantiate it with your subscription information, and use it to send SMS messages. | -| SmsSendRequest | This interface is the model for building the sms request (eg. configure the to and from phone numbers and the sms content). | -| SmsSendOptions | This interface provides options to configure delivery reporting. If `enableDeliveryReport` is set to `true`, then an event will be emitted when delivery is successful. | +| SmsSendRequest | This interface is the model for building the SMS request. You use it to configure the to and from phone numbers and the SMS content. | +| SmsSendOptions | This interface provides options for configuring delivery reporting. If `enableDeliveryReport` is set to `true`, an event is emitted when delivery is successful. | | SmsSendResult | This class contains the result from the SMS service. | ## Authenticate the client -Import the **SmsClient** from the SDK and instantiate it with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). +To authenticate a client, you import the **SmsClient** from the SDK and instantiate it with your connection string. You can retrieve the connection string for the resource from an environment variable. For instance, the code in this section retrieves the connection string from the `COMMUNICATION_SERVICES_CONNECTION_STRING` environment variable. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). -Create and open a file named **send-sms.js** and add the following code: +To import the client and instantiate it: + +1. Create a file named **send-sms.js**. + +1. Add the following code to **send-sms.js**. ```javascript const { SmsClient } = require('@azure/communication-sms'); -// This code demonstrates how to fetch your connection string +// This code retrieves your connection string // from an environment variable. const connectionString = process.env['COMMUNICATION_SERVICES_CONNECTION_STRING']; -// Instantiate the SMS client +// Instantiate the SMS client. const smsClient = new SmsClient(connectionString); ``` ## Send a 1:N SMS message -To send an SMS message to a list of recipients, call the `send` function from the SmsClient with a list of recipients phone numbers (if you wish to send a message to a single recipient, only include one number in the list). Add this code to the end of **send-sms.js**: +To send an SMS message to a list of recipients, call the `send` function from the SmsClient with a list of recipient phone numbers. If you'd like to send a message to a single recipient, include only one number in the list. Add this code to the end of **send-sms.js**: ```javascript async function main() { @@ -100,8 +108,8 @@ async function main() { message: "Hello World 👋🏻 via SMS" }); - // individual messages can encounter errors during sending - // use the "successful" property to verify + // Individual messages can encounter errors during sending. + // Use the "successful" property to verify the status. for (const sendResult of sendResults) { if (sendResult.successful) { console.log("Success: ", sendResult); @@ -113,14 +121,18 @@ async function main() { main(); ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` and `` with the phone number(s) you wish to send a message to. + +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` and `` with the phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. ## Send a 1:N SMS message with options -You may also pass in an options object to specify whether the delivery report should be enabled and to set custom tags. +You can also provide an options object to specify whether the delivery report should be enabled and to set custom tags. ```javascript @@ -130,13 +142,13 @@ async function main() { to: ["", ""], message: "Weekly Promotion!" }, { - //Optional parameters + // Optional parameters enableDeliveryReport: true, tag: "marketing" }); - // individual messages can encounter errors during sending - // use the "successful" property to verify + // Individual messages can encounter errors during sending. + // Use the "successful" property to verify the status. for (const sendResult of sendResults) { if (sendResult.successful) { console.log("Success: ", sendResult); @@ -149,17 +161,20 @@ async function main() { main(); ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` and `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -The `enableDeliveryReport` parameter is an optional parameter that you can use to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. -`tag` is an optional parameter that you can use to apply a tag to the Delivery Report. +The `enableDeliveryReport` parameter is an optional parameter that you can use to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. +The `tag` parameter is optional. You can use it to apply a tag to the delivery report. ## Run the code -Use the `node` command to run the code you added to the **send-sms.js** file. +Use the `node` command to run the code that you added to the **send-sms.js** file. ```console diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-net.md b/articles/communication-services/quickstarts/sms/includes/send-sms-net.md index 8bfd2498372df..6a5c43a9ab7b0 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-net.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-net.md @@ -7,7 +7,7 @@ manager: rejooyan ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: peiliu @@ -18,57 +18,59 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- The latest version [.NET Core SDK](https://dotnet.microsoft.com/download/dotnet-core) for your operating system. +- The latest version of [.NET Core SDK](https://dotnet.microsoft.com/download/dotnet-core) for your operating system. - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check - In a terminal or command window, run the `dotnet` command to check that the .NET SDK is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment + +To set up an environment for sending messages, take the steps in the following sections. ### Create a new C# application -In a console window (such as cmd, PowerShell, or Bash), use the `dotnet new` command to create a new console app with the name `SmsQuickstart`. This command creates a simple "Hello World" C# project with a single source file: **Program.cs**. +1. In a console window, such as cmd, PowerShell, or Bash, use the `dotnet new` command to create a new console app with the name `SmsQuickstart`. This command creates a simple "Hello World" C# project with a single source file, **Program.cs**. -```console -dotnet new console -o SmsQuickstart -``` + ```console + dotnet new console -o SmsQuickstart + ``` -Change your directory to the newly created app folder and use the `dotnet build` command to compile your application. +1. Change your directory to the newly created app folder and use the `dotnet build` command to compile your application. -```console -cd SmsQuickstart -dotnet build -``` + ```console + cd SmsQuickstart + dotnet build + ``` ### Install the package -While still in the application directory, install the Azure Communication Services SMS SDK for .NET package by using the `dotnet add package` command. +1. While still in the application directory, install the Azure Communication Services SMS SDK for .NET package by using the following command. -```console -dotnet add package Azure.Communication.Sms --version 1.0.0 -``` + ```console + dotnet add package Azure.Communication.Sms --version 1.0.0 + ``` -Add a `using` directive to the top of **Program.cs** to include the `Azure.Communication` namespace. +1. Add a `using` directive to the top of **Program.cs** to include the `Azure.Communication` namespace. -```csharp + ```csharp -using System; -using System.Collections.Generic; + using System; + using System.Collections.Generic; -using Azure; -using Azure.Communication; -using Azure.Communication.Sms; + using Azure; + using Azure.Communication; + using Azure.Communication.Sms; -``` + ``` ## Object model @@ -77,16 +79,16 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | ------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | SmsClient | This class is needed for all SMS functionality. You instantiate it with your subscription information, and use it to send SMS messages. | -| SmsSendOptions | This class provides options to configure delivery reporting. If enable_delivery_report is set to True, then an event will be emitted when delivery was successful | +| SmsSendOptions | This class provides options for configuring delivery reporting. If enable_delivery_report is set to True, an event is emitted when delivery is successful. | | SmsSendResult | This class contains the result from the SMS service. | ## Authenticate the client - Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `SmsClient` with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). +Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `SmsClient` with your connection string. The following code retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). ```csharp -// This code demonstrates how to fetch your connection string +// This code retrieves your connection string // from an environment variable. string connectionString = Environment.GetEnvironmentVariable("COMMUNICATION_SERVICES_CONNECTION_STRING"); @@ -95,7 +97,7 @@ SmsClient smsClient = new SmsClient(connectionString); ## Send a 1:1 SMS message -To send an SMS message to a single recipient, call the `Send` or `SendAsync` function from the SmsClient. Add this code to the end of `Main` method in **Program.cs**: +To send an SMS message to a single recipient, call the `Send` or `SendAsync` function from the SmsClient. Add this code to the end of the `Main` method in **Program.cs**: ```csharp SmsSendResult sendResult = smsClient.Send( @@ -106,13 +108,18 @@ SmsSendResult sendResult = smsClient.Send( Console.WriteLine($"Sms id: {sendResult.MessageId}"); ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` with the phone number you wish to send a message to. + +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` with the phone number that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. ## Send a 1:N SMS message with options -To send an SMS message to a list of recipients, call the `Send` or `SendAsync` function from the SmsClient with a list of recipient's phone numbers. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. + +To send an SMS message to a list of recipients, call the `Send` or `SendAsync` function from the SmsClient with a list of recipient phone numbers. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. ```csharp Response> response = smsClient.Send( @@ -132,14 +139,17 @@ foreach (SmsSendResult result in results) } ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` and `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -The `enableDeliveryReport` parameter is an optional parameter that you can use to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. +The `enableDeliveryReport` parameter is an optional parameter that you can use to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. -`Tag` is used to apply a tag to the Delivery Report +You can use the `Tag` parameter to apply a tag to the delivery report. ## Run the code @@ -149,6 +159,6 @@ Run the application from your application directory with the `dotnet run` comman dotnet run ``` -## Sample Code +## Sample code -You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS) +You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS). diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-python.md b/articles/communication-services/quickstarts/sms/includes/send-sms-python.md index 197ac29f05b9f..e8e63ec2c03dc 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-python.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-python.md @@ -7,7 +7,7 @@ manager: ankita ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: lakshmans @@ -18,46 +18,50 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-python-quickstarts/tree/main/send-sms-quickstart) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-python-quickstarts/tree/main/send-sms-quickstart). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). - [Python](https://www.python.org/downloads/) 2.7 or 3.6+. - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check - In a terminal or command window, run the `python --version` command to check that Python is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment + +To set up an environment for sending messages, take the steps in the following sections. ### Create a new Python application -Open your terminal or command window, create a new directory for your app, and navigate to it. +1. Open your terminal or command window. Then use the following command to create a new directory for your app and navigate to it. -```console -mkdir sms-quickstart && cd sms-quickstart -``` + ```console + mkdir sms-quickstart && cd sms-quickstart + ``` -Use a text editor to create a file called **send-sms.py** in the project root directory and add the structure for the program, including basic exception handling. You'll add all the source code for this quickstart to this file in the following sections. +1. Use a text editor to create a file called **send-sms.py** in the project root directory and add the structure for the program, including basic exception handling. -```python -import os -from azure.communication.sms import SmsClient + ```python + import os + from azure.communication.sms import SmsClient -try: - # Quickstart code goes here -except Exception as ex: - print('Exception:') - print(ex) -``` + try: + # Quickstart code goes here. + except Exception as ex: + print('Exception:') + print(ex) + ``` + +In the following sections, you'll add all the source code for this quickstart to the **send-sms.py** file that you just created. ### Install the package -While still in the application directory, install the Azure Communication Services SMS SDK for Python package by using the `pip install` command. +While still in the application directory, install the Azure Communication Services SMS SDK for Python package by using the following command. ```console pip install azure-communication-sms @@ -77,18 +81,18 @@ The following classes and interfaces handle some of the major features of the Az Instantiate an **SmsClient** with your connection string. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). ```python -# Create the SmsClient object which will be used to send SMS messages +# Create the SmsClient object that you use to send SMS messages. sms_client = SmsClient.from_connection_string() ``` -For simplicity we are using connection strings in this quickstart, but in production environments we recommend using [service principals](../../../quickstarts/identity/service-principal.md). +For simplicity, this quickstart uses connection strings, but in production environments, we recommend using [service principals](../../../quickstarts/identity/service-principal.md). -## Send a 1:1 SMS Message +## Send a 1:1 SMS message -To send an SMS message to a single recipient, call the ```send``` method from the **SmsClient** with a single recipient phone number. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of `try` block in **send-sms.py**: +To send an SMS message to a single recipient, call the `send` method from the **SmsClient** with a single recipient phone number. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of the `try` block in **send-sms.py**: ```python -# calling send() with sms values +# Call send() with SMS values. sms_responses = sms_client.send( from_="", to="", @@ -98,18 +102,21 @@ sms_responses = sms_client.send( ``` -You should replace `` with an SMS enabled phone number associated with your communication service and `` with the phone number you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your communication service. +- Replace `` with the phone number that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -## Send a 1:N SMS Message +## Send a 1:N SMS message -To send an SMS message to a list of recipients, call the ```send``` method from the **SmsClient** with a list of recipient's phone numbers. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of `try` block in **send-sms.py**: +To send an SMS message to a list of recipients, call the `send` method from the **SmsClient** with a list of recipient phone numbers. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of the `try` block in **send-sms.py**: ```python -# calling send() with sms values +# Call send() with SMS values. sms_responses = sms_client.send( from_="", to=["", ""], @@ -119,25 +126,29 @@ sms_responses = sms_client.send( ``` -You should replace `` with an SMS enabled phone number associated with your communication service and `` `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your communication service. +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -## Optional Parameters +## Optional parameters -The `enable_delivery_report` parameter is an optional parameter that you can use to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. +The `enable_delivery_report` parameter is an optional parameter that you can use to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. -The `tag` parameter is an optional parameter that you can use to apply a tag to the Delivery Report. +The `tag` parameter is an optional parameter that you can use to apply a tag to the delivery report. ## Run the code + Run the application from your application directory with the `python` command. ```console python send-sms.py ``` -The complete Python script should look something like: +The complete Python script should look something like the following code: ```python @@ -145,9 +156,9 @@ import os from azure.communication.sms import SmsClient try: - # Create the SmsClient object which will be used to send SMS messages + # Create the SmsClient object that you use to send SMS messages. sms_client = SmsClient.from_connection_string("") - # calling send() with sms values + # Call send() with SMS values. sms_responses = sms_client.send( from_="", to="", diff --git a/articles/communication-services/quickstarts/sms/send.md b/articles/communication-services/quickstarts/sms/send.md index 41ed5380d2c1c..37341e9840de8 100644 --- a/articles/communication-services/quickstarts/sms/send.md +++ b/articles/communication-services/quickstarts/sms/send.md @@ -1,16 +1,20 @@ --- title: Quickstart - Send an SMS message -titleSuffix: An Azure Communication Services quickstart -description: Learn how to send an SMS message using Azure Communication Services. +titleSuffix: Azure Communication Services +description: "In this quickstart, you'll learn how to send an SMS message by using Azure Communication Services. See code examples in C#, JavaScript, Java, and Python." author: probableprime manager: chpalm services: azure-communication-services ms.author: rifox -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: quickstart ms.service: azure-communication-services ms.subservice: sms -ms.custom: tracking-python, devx-track-js, mode-other +ms.custom: + - tracking-python + - devx-track-js + - mode-other + - kr2b-contr-experiment zone_pivot_groups: acs-js-csharp-java-python --- # Quickstart: Send an SMS message @@ -18,8 +22,8 @@ zone_pivot_groups: acs-js-csharp-java-python [!INCLUDE [Regional Availability Notice](../../includes/regional-availability-include.md)] > [!IMPORTANT] -> SMS messages can be sent to and received from United States phone numbers. Phone numbers located in other geographies are not yet supported by Communication Services SMS. -> For more information, see **[Phone number types](../../concepts/telephony/plan-solution.md)**. +> SMS messages can be sent to and received from United States phone numbers. Phone numbers that are located in other geographies are not yet supported by Azure Communication Services SMS. +> For more information, see [Phone number types](../../concepts/telephony/plan-solution.md).

                  >[!VIDEO https://www.youtube.com/embed/YEyxSZqzF4o] @@ -50,7 +54,7 @@ If you want to clean up and remove a Communication Services subscription, you ca ## Next steps -In this quickstart, you learned how to send SMS messages using Azure Communication Services. +In this quickstart, you learned how to send SMS messages by using Communication Services. > [!div class="nextstepaction"] > [Receive SMS and Delivery Report Events](./handle-sms-events.md) diff --git a/articles/communication-services/quickstarts/ui-library/includes/get-started-call/android.md b/articles/communication-services/quickstarts/ui-library/includes/get-started-call/android.md index 29220da4258ac..3cc302a8a3d02 100644 --- a/articles/communication-services/quickstarts/ui-library/includes/get-started-call/android.md +++ b/articles/communication-services/quickstarts/ui-library/includes/get-started-call/android.md @@ -62,6 +62,7 @@ For `Android Studio (2020.*)` the `repositories` are in `settings.gradle` `depen If you are using old versions of `Android Studio (4.*)` then the `repositories` will be in project level `build.gradle` `allprojects{}`. ```groovy +// dependencyResolutionManagement repositories { ... mavenCentral() @@ -218,7 +219,7 @@ The following classes and interfaces handle some of the major features of the Az | [CallComposite](#create-call-composite) | Composite component that renders a call experience with participant gallery and controls. | | [CallCompositeBuilder](#create-call-composite) | Builder to build CallComposite with options. | | [GroupCallOptions](#group-call) | Passed in CallComposite launch to start group call. | -| [TeamsMeetingOptions](#teams-meeting) | Passed to CallComposite launch to join Teams meeting meeting. | +| [TeamsMeetingOptions](#teams-meeting) | Passed to CallComposite launch to join Teams meeting. | | [ThemeConfiguration](#apply-theme-configuration) | Injected as optional in CallCompositeBuilder to change primary color of composite. | | [LocalizationConfiguration](#apply-localization-configuration) | Injected as optional in CallCompositeBuilder to set language of composite. | @@ -419,7 +420,7 @@ To change the language of composite, create a `LocalizationConfiguration` with ` ```kotlin import com.azure.android.communication.ui.calling.models.LocalizationConfiguration -// LanguageCode.values() provides list of supported languages +// CommunicationUISupportedLocale provides list of supported locale val callComposite: CallComposite = CallCompositeBuilder().localization( LocalizationConfiguration(Locale(CommunicationUISupportedLocale.EN)) @@ -431,13 +432,18 @@ val callComposite: CallComposite = ```java import com.azure.android.communication.ui.calling.models.LocalizationConfiguration; -// LanguageCode.values() provides list of supported languages +// CommunicationUISupportedLocale provides list of supported locale CallComposite callComposite = new CallCompositeBuilder() - .localization(new LocalizationConfiguration(new Locale(CommunicationUISupportedLocale.EN))) + .localization(new LocalizationConfiguration(CommunicationUISupportedLocale.EN)) .build(); ``` +----- +### Additional Features + +The list of [use cases](../../../../concepts/ui-library/ui-library-use-cases.md) has detailed information of additional features. + ----- ### Add notifications into your mobile app diff --git a/articles/communication-services/quickstarts/voice-video-calling/get-started-with-voice-video-calling-custom-teams-client.md b/articles/communication-services/quickstarts/voice-video-calling/get-started-with-voice-video-calling-custom-teams-client.md index 9a296533e6131..59e4044262f00 100644 --- a/articles/communication-services/quickstarts/voice-video-calling/get-started-with-voice-video-calling-custom-teams-client.md +++ b/articles/communication-services/quickstarts/voice-video-calling/get-started-with-voice-video-calling-custom-teams-client.md @@ -13,6 +13,8 @@ ms.custom: mode-other # QuickStart: Add 1:1 video calling to your customized Teams application +[!INCLUDE [Public Preview](../../../communication-services/includes/public-preview-include-document.md)] + [!INCLUDE [Video calling with JavaScript](./includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md)] ## Clean up resources diff --git a/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md b/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md index 7fdecf044e5fe..c1a13c86953aa 100644 --- a/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md +++ b/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md @@ -28,10 +28,10 @@ mkdir calling-quickstart && cd calling-quickstart ### Install the package Use the `npm install` command to install the Azure Communication Services Calling SDK for JavaScript. > [!IMPORTANT] -> This quickstart uses the Azure Communication Services Calling SDK version `1.3.2-beta.1`. +> This quickstart uses the Azure Communication Services Calling SDK version `1.5.4-beta.1`. ```console npm install @azure/communication-common --save -npm install @azure/communication-calling@1.3.2-beta.1 --save +npm install @azure/communication-calling@1.5.4-beta.1 --save ``` ### Set up the app framework This quickstart uses webpack to bundle the application assets. Run the following command to install the `webpack`, `webpack-cli` and `webpack-dev-server` npm packages and list them as development dependencies in your `package.json`: @@ -408,4 +408,4 @@ On the first tab, enter the Azure Communication Services user identity of the se From the second tab, select the "Accept Call" button. The call will be answered and connected. Tabs should show the similar result like the following image: :::image type="content" source="../../media/javascript/1-on-1-video-calling-d.png" alt-text="Screenshot is showing two tabs, with ongoing call between two Teams users, each logged in the individual tab." lightbox="../../media/javascript/1-on-1-video-calling-d.png"::: -Both tabs are now successfully in a 1:1 video call. Both users can hear each other's audio and see each other video stream. \ No newline at end of file +Both tabs are now successfully in a 1:1 video call. Both users can hear each other's audio and see each other video stream. diff --git a/articles/communication-services/quickstarts/voice-video-calling/includes/get-started/get-started-javascript.md b/articles/communication-services/quickstarts/voice-video-calling/includes/get-started/get-started-javascript.md index 95fdf248d35a7..4d9942510b3e9 100644 --- a/articles/communication-services/quickstarts/voice-video-calling/includes/get-started/get-started-javascript.md +++ b/articles/communication-services/quickstarts/voice-video-calling/includes/get-started/get-started-javascript.md @@ -23,7 +23,7 @@ You can download the sample app from [GitHub](https://github.com/Azure-Samples/c [!INCLUDE [Calling with JavaScript](./get-started-javascript-setup.md)] -Here's the html: +Here's the html, that we need to add to the `index.html` file which we just created: ```html @@ -59,7 +59,7 @@ Here's the html: Hang Up - + ``` diff --git a/articles/communication-services/quickstarts/voice-video-calling/includes/teams-interop/teams-interop-javascript.md b/articles/communication-services/quickstarts/voice-video-calling/includes/teams-interop/teams-interop-javascript.md index bb0c8981a2b7a..8a84b7382b6ad 100644 --- a/articles/communication-services/quickstarts/voice-video-calling/includes/teams-interop/teams-interop-javascript.md +++ b/articles/communication-services/quickstarts/voice-video-calling/includes/teams-interop/teams-interop-javascript.md @@ -44,7 +44,7 @@ The text box will be used to enter the Teams meeting context and the button will Hang Up - + @@ -52,7 +52,7 @@ The text box will be used to enter the Teams meeting context and the button will ## Enable the Teams UI controls -Replace content of client.js file with following snippet. +Replace content of app.js file with following snippet. ```javascript import { CallClient } from "@azure/communication-calling"; @@ -115,13 +115,13 @@ You can also get the required meeting information from the **Join Meeting** URL ## Run the code -Webpack users can use the `webpack-dev-server` to build and run your app. Run the following command to bundle your application host on a local webserver: +Run the following command to bundle your application host on a local webserver: ```console -npx webpack-dev-server --entry ./client.js --output bundle.js --debug --devtool inline-source-map +npx parcel index.html ``` -Open your browser and navigate to http://localhost:8080/. You should see the following: +Open your browser and navigate to http://localhost:1234/. You should see the following: :::image type="content" source="../../media/javascript/acs-join-teams-meeting-quickstart.PNG" alt-text="Screenshot of the completed JavaScript Application."::: diff --git a/articles/communication-services/quickstarts/voice-video-calling/includes/video-calling/video-calling-javascript.md b/articles/communication-services/quickstarts/voice-video-calling/includes/video-calling/video-calling-javascript.md index cf3d0465d9c7f..5572b3ea82916 100644 --- a/articles/communication-services/quickstarts/voice-video-calling/includes/video-calling/video-calling-javascript.md +++ b/articles/communication-services/quickstarts/voice-video-calling/includes/video-calling/video-calling-javascript.md @@ -14,7 +14,7 @@ If you'd like to skip ahead to the end, you can download this quickstart as a sa ## Prerequisites - Obtain an Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- [Node.js](https://nodejs.org/en/) Active LTS and Maintenance LTS versions (8.11.1 and 10.14.1) +- [Node.js](https://nodejs.org/en/) Active LTS and Maintenance LTS versions - Create an active Communication Services resource. [Create a Communication Services resource](../../../create-communication-resource.md?pivots=platform-azp&tabs=windows). - Create a User Access Token to instantiate the call client. [Learn how to create and manage user access tokens](../../../access-tokens.md?pivots=programming-language-csharp). diff --git a/articles/communication-services/toc.yml b/articles/communication-services/toc.yml index 1fc4edf08a3fc..84d0c45ae7500 100644 --- a/articles/communication-services/toc.yml +++ b/articles/communication-services/toc.yml @@ -96,6 +96,8 @@ items: items: - name: Virtual visit scenarios href: tutorials/virtual-visits.md + - name: Virtual event scenarios + href: tutorials/events-playbook.md - name: Use Postman to send SMS messages href: tutorials/postman-tutorial.md - name: Sign an HTTP request with HMAC using C# @@ -105,7 +107,7 @@ items: - name: Prepare a Node.js web app for Calling href: tutorials/building-app-start.md - name: Export SDK telemetry to Application Insights - href: quickstarts/telemetry-application-insights.md + href: quickstarts/telemetry-application-insights.md - name: Add file sharing to your application with UI Library href: tutorials/file-sharing-tutorial.md - name: Concepts @@ -153,7 +155,7 @@ items: href: concepts/telephony/emergency-calling-concept.md - name: Direct routing infrastructure requirements href: concepts/telephony/direct-routing-infrastructure.md - - name: Pair Session Border Controllers + - name: Connect to existing telephony href: concepts/telephony/direct-routing-provisioning.md - name: Certified Session Border Controllers href: concepts/telephony/certified-session-border-controllers.md @@ -252,8 +254,12 @@ items: href: concepts/network-traversal.md - name: Data residency and user privacy href: concepts/privacy.md + - name: Azure Government + href: concepts/government.md - name: Pricing href: concepts/pricing.md + - name: Email Pricing + href: concepts/email-pricing.md - name: SMS Pricing href: concepts/sms-pricing.md - name: PSTN Pricing @@ -290,6 +296,8 @@ items: href: how-tos/calling-sdk/teams-interoperability.md - name: Subscribe to SDK events href: how-tos/calling-sdk/events.md + - name: Check if user is running supported browser + href: how-tos/calling-sdk/browser-support.md - name: Using the UI Library items: - name: Setup Localization @@ -323,7 +331,7 @@ items: - name: Calling (JavaScript) href: /javascript/api/azure-communication-services/@azure/communication-calling/ - name: Calling (Android/Java) - href: /java/api/com.azure.android.communication.calling?view=communication-services-java-android + href: /java/api/com.azure.android.communication.calling - name: Calling (iOS) href: /objectivec/communication-services/calling/ - name: Chat libraries @@ -371,11 +379,11 @@ items: - name: UI Library items: - name: UI Library (JavaScript) - href: /javascript/api/@azure/communication-react/?view=azure-node-latest + href: /javascript/api/@azure/communication-react/ - name: UI Library (iOS) - href: https://htmlpreview.github.io/?https://github.com/Azure/azure-sdk-for-ios/blob/communication/CommunicationUIJazzy/build/jazzy/AzureCommunicationUI/index.html + href: https://azure.github.io/azure-sdk-for-ios/AzureCommunicationUICalling/index.html - name: UI Library (Android) - href: https://azure.github.io/azure-sdk-for-android/azure-communication-mobileui/index.html + href: https://azure.github.io/azure-sdk-for-android/azure-communication-mobileui/com/azure/android/communication/ui/calling/package-summary.html - name: Resources items: diff --git a/articles/communication-services/tutorials/events-playbook.md b/articles/communication-services/tutorials/events-playbook.md new file mode 100644 index 0000000000000..25c0bf7909ecd --- /dev/null +++ b/articles/communication-services/tutorials/events-playbook.md @@ -0,0 +1,122 @@ +--- +title: Build a custom event management platform with Microsoft Teams, Graph and Azure Communication Services +titleSuffix: An Azure Communication Services tutorial +description: Learn how to use Microsoft Teams, Graph and Azure Communication Services to build a custom event management platform. +author: ddematheu2 +manager: chpalm +services: azure-communication-services + +ms.author: dademath +ms.date: 03/31/2022 +ms.topic: tutorial +ms.service: azure-communication-services +ms.subservice: teams-interop +--- + +# Build a custom event management platform with Microsoft Teams, Graph and Azure Communication Services + +The goal of this document is to reduce the time it takes for Event Management Platforms to apply the power of Microsoft Teams Webinars through integration with Graph APIs and ACS UI Library. The target audience is developers and decision makers. To achieve the goal, this document provides the following two functions: 1) an aid to help event management platforms quickly decide what level of integration would be right for them, and 2) a step-by-step end-to-end QuickStart to speed up implementation. + +## What are virtual events and event management platforms? + +Microsoft empowers event platforms to integrate event capabilities using [Microsoft Teams](/microsoftteams/quick-start-meetings-live-events), [Graph](/graph/api/application-post-onlinemeetings?tabs=http&view=graph-rest-beta) and [Azure Communication Services](../overview.md). Virtual Events are a communication modality where event organizers schedule and configure a virtual environment for event presenters and participants to engage with content through voice, video, and chat. Event management platforms enable users to configure events and for attendees to participate in those events, within their platform, applying in-platform capabilities and gamification. Learn more about[ Teams Meetings, Webinars and Live Events](/microsoftteams/quick-start-meetings-live-events) that are used throughout this article to enable virtual event scenarios. + +## What are the building blocks of an event management platform? + +Event platforms require three core building blocks to deliver a virtual event experience. + +### 1. Event Scheduling and Management + +To get started, event organizers must schedule and configure the event. This process creates the virtual container that event attendees and presenters will enter to interact. As part of configuration, organizers might choose to add registration requirements for the event. Microsoft provides two patterns for organizers to create events: + +- Teams Client (Web or Desktop): Organizers can directly create events using their Teams client where they can choose a time and place, configure registration, and send to a list of attendees. + +- Microsoft Graph: Programmatically, event platforms can schedule and configure a Teams event on behalf of a user by using their Microsoft 365 license. + +### 2. Attendee experience + +For event attendees, they are presented with an experience that enables them to attend, participate, and engage with an event’s content. This experience might include capabilities like watching content, sharing their camera stream, asking questions, responding to polls, and more. Microsoft provides two options for attendees to consume events powered by Teams and Azure Communication Services: + +- Teams Client (Web or Desktop): Attendees can directly join events using a Teams Client by using a provided join link. They get access to the full Teams experience. + +- Azure Communication Services: Attendees can join events through a custom client powered by [Azure Communication Services](../overview.md) using [Teams Interoperability](../concepts/join-teams-meeting.md). This client can be directly embedded into an Event Platform so that attendees never need to leave the experience. This experience can be built from the ground up using Azure Communication Services SDKs for [calling](../quickstarts/voice-video-calling/get-started-teams-interop.md?pivots=platform-web) and [chat](../quickstarts/chat/meeting-interop.md?pivots=platform-web) or by applying our low-code [UI Library](../quickstarts/ui-library/get-started-composites.md?pivots=platform-web&tabs=kotlin). + +### 3. Host & Organizer experience + +Event hosts and organizers require the ability to present content, manage attendees (mute, change roles, etc.) and manage the event (start, end, etc.). + +- Teams Client (Web or Desktop): Presenters can join using the fully fledged Teams client for web or mobile. The Teams client provides presenters a full set of capabilities to deliver their content. Learn more about [presenter capabilities for Teams](https://support.microsoft.com/office/present-in-a-live-event-in-teams-d58fc9db-ff5b-4633-afb3-b4b2ddef6c0a). + +## Building a custom solution for event management with Azure Communication Services and Microsoft Graph + +Throughout the rest of this tutorial, we will focus on how using Azure Communication Services and Microsoft Graph to build a custom event management platform. We will be using the sample architecture below. Based on that architecture we will be focusing on setting up scheduling and registration flows and embedding the attendee experience right on the event platform to join the event. + +:::image type="content" source="./media/event-management-platform-architecture.svg" alt-text="Diagram showing sample architecture for event management platform"::: + +## Leveraging Microsoft Graph to schedule events and register attendees + +Microsoft Graph enables event management platforms to empower organizers to schedule and manage their events directly through the event management platform. For attendees, event management platforms can build custom registration flows right on their platform that registers the attendee for the event and generates unique credentials for them to join the Teams hosted event. + +>[!NOTE] +>For each required Graph API has different required scopes, ensure that your application has the correct scopes to access the data. + +### Scheduling registration-enabled events with Microsoft Graph + +1. Authorize application to use Graph APIs on behalf of service account. This authorization is required in order to have the application use credentials to interact with your tenant to schedule events and register attendees. + + 1. Create an account that will own the meetings and is branded appropriately. This is the account that will create the events and which will receive notifications for it. We recommend to not user a personal production account given the overhead it might incur in the form of remainders. + + 1. As part of the application setup, the service account is used to login into the solution once. With this permission the application can retrieve and store an access token on behalf of the service account that will own the meetings. Your application will need to store the tokens generated from the login and place them in a secure location such as a key vault. The application will need to store both the access token and the refresh token. Learn more about [auth tokens](../../active-directory/develop/access-tokens.md). and [refresh tokens](../../active-directory/develop/refresh-tokens.md). + + 1. The application will require "on behalf of" permissions with the [offline scope](../../active-directory/develop/v2-permissions-and-consent.md#offline_access) to act on behalf of the service account for the purpose of creating meetings. Individual Graph APIs require different scopes, learn more in the links detailed below as we introduce the required APIs. + + 1. Refresh tokens can be revoked in the event of a breach or account termination + + >[!NOTE] + >Authorization is required by both developers for testing and organizers who will be using your event platform to set up their events. + +2. Organizer logins to Contoso platform to create an event and generate a registration URL. To enable these capabilities developers should use: + + 1. The [Create Calendar Event API](/graph/api/user-post-events?tabs=http&view=graph-rest-1.0) to POST the new event to be created. The Event object returned will contain the join URL required for the next step. Need to set the following parameter: `isonlinemeeting: true` and `onlineMeetingProvider: "teamsForBusiness"`. Set a time zone for the event, using the `Prefer` header. + + 1. Next, use the [Create Online Meeting API](/graph/api/application-post-onlinemeetings?tabs=http&view=graph-rest-beta) to `GET` the online meeting information using the join URL generated from the step above. The `OnlineMeeting` object will contain the `meetingId` required for the registration steps. + + 1. By using these APIs, developers are creating a calendar event to show up in the Organizer’s calendar and the Teams online meeting where attendees will join. + +>[!NOTE] +>Known issue with double calendar entries for organizers when using the Calendar and Online Meeting APIs. + +3. To enable registration for an event, Contoso can use the [External Meeting Registration API](/graph/api/resources/externalmeetingregistration?view=graph-rest-beta) to POST. The API requires Contoso to pass in the `meetingId` of the `OnlineMeeting` created above. Registration is optional. You can set options on who can register. + +### Register attendees with Microsoft Graph + +Event management platforms can use a custom registration flow to register attendees. This flow is powered by the [External Meeting Registrant API](/graph/api/externalmeetingregistrant-post?tabs=http&view=graph-rest-beta). By using the API Contoso will receive a unique `Teams Join URL` for each attendee. This URL will be used as part of the attendee experience either through Teams or Azure Communication Services to have the attendee join the meeting. + +### Communicate with your attendees using Azure Communication Services + +Through Azure Communication Services, developers can use SMS and Email capabilities to send remainders to attendees for the event they have registered. Communication can also include confirmation for the event as well as information for joining and participating. +- [SMS capabilities](../quickstarts/sms/send.md) enable you to send text messages to your attendees. +- [Email capabilities](../quickstarts/email/send-email.md) support direct communication to your attendees using custom domains. + +### Leverage Azure Communication Services to build a custom attendee experience + +>[!NOTE] +> Limitations when using Azure Communication Services as part of a Teams Webinar experience. Please visit our [documentation for more details.](../concepts/join-teams-meeting.md#limitations-and-known-issues) + +Attendee experience can be directly embedded into an application or platform using [Azure Communication Services](../overview.md) so that your attendees never need to leave your platform. It provides low-level calling and chat SDKs which support [interoperability with Teams Events](../concepts/teams-interop.md), as well as a turn-key UI Library which can be used to reduce development time and easily embed communications. Azure Communication Services enables developers to have flexibility with the type of solution they need. Review [limitations](../concepts/join-teams-meeting.md#limitations-and-known-issues) of using Azure Communication Services for webinar scenarios. + +1. To start, developers can leverage Microsoft Graph APIs to retrieve the join URL. This URL is provided uniquely per attendee during [registration](/graph/api/externalmeetingregistrant-post?tabs=http&view=graph-rest-beta). Alternatively, it can be [requested for a given meeting](/graph/api/onlinemeeting-get?tabs=http&view=graph-rest-beta). + +2. Before developers dive into using [Azure Communication Services](../overview.md), they must [create a resource](../quickstarts/create-communication-resource.md?pivots=platform-azp&tabs=windows). + +3. Once a resource is created, developers must [generate access tokens](../quickstarts/access-tokens.md?pivots=programming-language-javascript) for attendees to access Azure Communication Services. We recommend using a [trusted service architecture](../concepts/client-and-server-architecture.md). + +4. Developers can leverage [headless SDKs](../concepts/teams-interop.md) or [UI Library](https://azure.github.io/communication-ui-library/) using the join link URL to join the Teams meeting through [Teams Interoperability](../concepts/teams-interop.md). Details below: + +|Headless SDKs | UI Library | +|----------------------------------------|---------------------------------------| +| Developers can leverage the [calling](../quickstarts/voice-video-calling/get-started-teams-interop.md?pivots=platform-javascript) and [chat](../quickstarts/chat/meeting-interop.md?pivots=platform-javascript) SDKs to join a Teams meeting with your custom client | Developers can choose between the [call + chat](https://azure.github.io/communication-ui-library/?path=/docs/composites-meeting-basicexample--basic-example) or pure [call](https://azure.github.io/communication-ui-library/?path=/docs/composites-call-basicexample--basic-example) and [chat](https://azure.github.io/communication-ui-library/?path=/docs/composites-chat-basicexample--basic-example) composites to build their experience. Alternatively, developers can leverage [composable components](https://azure.github.io/communication-ui-library/?path=/docs/quickstarts-uicomponents--page) to build a custom Teams interop experience.| + + +>[!NOTE] +>Azure Communication Services is a consumption-based service billed through Azure. For more information on pricing visit our resources. \ No newline at end of file diff --git a/articles/communication-services/tutorials/file-sharing-tutorial.md b/articles/communication-services/tutorials/file-sharing-tutorial.md index d207747681af5..2cd15cefd41b2 100644 --- a/articles/communication-services/tutorials/file-sharing-tutorial.md +++ b/articles/communication-services/tutorials/file-sharing-tutorial.md @@ -15,6 +15,8 @@ ms.subservice: chat # Enable file sharing using UI Library and Azure Blob Storage +[!INCLUDE [Public Preview Notice](../includes/public-preview-include.md)] + In this tutorial, we'll be configuring the Azure Communication Services UI Library Chat Composite to enable file sharing. The UI Library Chat Composite provides a set of rich components and UI controls that can be used to enable file sharing. We will be leveraging Azure Blob Storage to enable the storage of the files that are shared through the chat thread. >[!IMPORTANT] @@ -52,7 +54,7 @@ The diagram below shows a typical flow of a file sharing scenario for both uploa ## Setup File Storage using Azure Blob -You can follow the tutorial [Upload file to Azure Blob Storage with an Azure Function](https://docs.microsoft.com/azure/developer/javascript/how-to/with-web-app/azure-function-file-upload) to write the backend code required for file sharing. +You can follow the tutorial [Upload file to Azure Blob Storage with an Azure Function](/azure/developer/javascript/how-to/with-web-app/azure-function-file-upload) to write the backend code required for file sharing. Once implemented, you can call this Azure Function inside the `uploadHandler` function to upload files to Azure Blob Storage. For the remaining of the tutorial, we will assume you have generated the function using the tutorial for Azure Blob Storage linked above. @@ -330,4 +332,4 @@ You may also want to: - [Add chat to your app](../quickstarts/chat/get-started.md) - [Creating user access tokens](../quickstarts/access-tokens.md) - [Learn about client and server architecture](../concepts/client-and-server-architecture.md) -- [Learn about authentication](../concepts/authentication.md) +- [Learn about authentication](../concepts/authentication.md) \ No newline at end of file diff --git a/articles/communication-services/tutorials/media/event-management-platform-architecture.svg b/articles/communication-services/tutorials/media/event-management-platform-architecture.svg new file mode 100644 index 0000000000000..97c877c73b592 --- /dev/null +++ b/articles/communication-services/tutorials/media/event-management-platform-architecture.svg @@ -0,0 +1,2064 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Export + + + + + Box + text + + + + + + + text + + Sheet.1 + + + + User.2072 + User + + Sheet.3 + + Sheet.4 + + + + + Sheet.5 + + Sheet.6 + + + + + + + User + + + Web App (Was Websites).2077 + Client Application + + Sheet.8 + + Sheet.9 + + + + + Sheet.10 + + Sheet.11 + + Sheet.12 + + Sheet.13 + + + + + Sheet.14 + + + + Sheet.15 + + Sheet.16 + + + + + Sheet.17 + + Sheet.18 + + + + + Sheet.19 + + Sheet.20 + + + + + Sheet.21 + + Sheet.22 + + + + + Sheet.23 + + Sheet.24 + + + + + Sheet.25 + + Sheet.26 + + + + + Sheet.27 + + + + Sheet.28 + + + + + Sheet.29 + + Sheet.30 + + + + + Sheet.31 + + Sheet.32 + + + + + Sheet.33 + + Sheet.34 + + + + + + + + Client Application + + + Dynamic connector.2105 + + + + Sheet.36 + + + + + + Mobile App (Was Mobile Services).2106 + + Sheet.38 + + + + Sheet.39 + + + + + Sheet.40 + 4 + + + + 4 + + Sheet.41 + Azure Communication Services + + + + Azure Communication Services + + User.2111 + Presenter + + Sheet.43 + + Sheet.44 + + + + + Sheet.45 + + Sheet.46 + + + + + + + Presenter + + + Dynamic connector.2116 + + + + Sheet.48 + 5 + + + + 5 + + Teams.2120 + Teams + + + + + Sheet.50 + + Sheet.51 + + + + + + + Sheet.52 + + + + + + + Sheet.53 + + + + + + + Sheet.54 + + + + + + + Sheet.55 + + + + + + + Sheet.56 + + + + + + + Sheet.57 + + + + + + + Sheet.58 + + + + + + + + + + Teams + + + Teams.2130 + M365 Calendar (Graph) + + + + + Sheet.60 + + Sheet.61 + + + + + + + Sheet.62 + + + + + + + Sheet.63 + + + + + + + Sheet.64 + + + + + + + Sheet.65 + + + + + + + Sheet.66 + + + + + + + Sheet.67 + + + + + + + Sheet.68 + + + + + + + + + + M365 Calendar (Graph) + + + Dynamic connector.2140 + + + + Sheet.70 + 3 + + + + 3 + + Dynamic connector.2142 + + + + Sheet.72 + Voice, Video & Text Communication + + + + Voice, Video & Text Communication + + Sheet.73 + Schedule Event + + + + Schedule Event + + Sheet.74 + Start Event + + + + Start Event + + Sheet.75 + Join Event + + + + Join Event + + User.2147 + Organizer + + Sheet.77 + + Sheet.78 + + + + + Sheet.79 + + Sheet.80 + + + + + + + Organizer + + + Sheet.81 + Microsoft Graph + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Microsoft Graph + + Dynamic connector.2152 + + + + + + + + + + + + + + + + + + + Dynamic connector.2153 + + + + + + + + + + + + + + + + + + + Sheet.84 + 1 + + + + 1 + + Sheet.85 + + + + + + Sheet.86 + + + + + + Dynamic connector.2155 + + + + + + + + + + + + + + + + + + + Sheet.88 + Register + + + + Register + + Sheet.89 + 2 + + + + 2 + + Web App (Was Websites).2158 + Client Application + + Sheet.91 + + Sheet.92 + + + + + Sheet.93 + + Sheet.94 + + Sheet.95 + + Sheet.96 + + + + + Sheet.97 + + + + Sheet.98 + + Sheet.99 + + + + + Sheet.100 + + Sheet.101 + + + + + Sheet.102 + + Sheet.103 + + + + + Sheet.104 + + Sheet.105 + + + + + Sheet.106 + + Sheet.107 + + + + + Sheet.108 + + Sheet.109 + + + + + Sheet.110 + + + + Sheet.111 + + + + + Sheet.112 + + Sheet.113 + + + + + Sheet.114 + + Sheet.115 + + + + + Sheet.116 + + Sheet.117 + + + + + + + + Client Application + + + Dynamic connector.2186 + + + + Sheet.119 + 2 + + + + 2 + + Sheet.120 + Email remainder + + + + Email remainder + + diff --git a/articles/communication-services/tutorials/media/sample-builder/azure-complete-deployment.png b/articles/communication-services/tutorials/media/virtual-visits/azure-complete-deployment.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/azure-complete-deployment.png rename to articles/communication-services/tutorials/media/virtual-visits/azure-complete-deployment.png diff --git a/articles/communication-services/tutorials/media/sample-builder/azure-resource-final.png b/articles/communication-services/tutorials/media/virtual-visits/azure-resource-final.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/azure-resource-final.png rename to articles/communication-services/tutorials/media/virtual-visits/azure-resource-final.png diff --git a/articles/communication-services/tutorials/media/sample-builder/azure-resources.png b/articles/communication-services/tutorials/media/virtual-visits/azure-resources.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/azure-resources.png rename to articles/communication-services/tutorials/media/virtual-visits/azure-resources.png diff --git a/articles/communication-services/tutorials/media/virtual-visits/bookings-acs-app-integration-url.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-acs-app-integration-url.png new file mode 100644 index 0000000000000..897fdef5d64f9 Binary files /dev/null and b/articles/communication-services/tutorials/media/virtual-visits/bookings-acs-app-integration-url.png differ diff --git a/articles/communication-services/tutorials/media/virtual-visits/bookings-services-online-meeting.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-services-online-meeting.png new file mode 100644 index 0000000000000..d122c7e42f2c4 Binary files /dev/null and b/articles/communication-services/tutorials/media/virtual-visits/bookings-services-online-meeting.png differ diff --git a/articles/communication-services/tutorials/media/virtual-visits/bookings-services.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-services.png new file mode 100644 index 0000000000000..8131a1982a0bb Binary files /dev/null and b/articles/communication-services/tutorials/media/virtual-visits/bookings-services.png differ diff --git a/articles/communication-services/tutorials/media/sample-builder/bookings-url.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-url.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/bookings-url.png rename to articles/communication-services/tutorials/media/virtual-visits/bookings-url.png diff --git a/articles/communication-services/tutorials/media/sample-builder/sample-builder-arm.png b/articles/communication-services/tutorials/media/virtual-visits/sample-builder-arm.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/sample-builder-arm.png rename to articles/communication-services/tutorials/media/virtual-visits/sample-builder-arm.png diff --git a/articles/communication-services/tutorials/media/sample-builder/sample-builder-landing.png b/articles/communication-services/tutorials/media/virtual-visits/sample-builder-landing.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/sample-builder-landing.png rename to articles/communication-services/tutorials/media/virtual-visits/sample-builder-landing.png diff --git a/articles/communication-services/tutorials/media/sample-builder/sample-builder-start.png b/articles/communication-services/tutorials/media/virtual-visits/sample-builder-start.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/sample-builder-start.png rename to articles/communication-services/tutorials/media/virtual-visits/sample-builder-start.png diff --git a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-arch.svg b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-arch.svg similarity index 98% rename from articles/communication-services/tutorials/media/sample-builder/virtual-visit-arch.svg rename to articles/communication-services/tutorials/media/virtual-visits/virtual-visit-arch.svg index 61b62b6372732..1b40351c4af7e 100644 --- a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-arch.svg +++ b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-arch.svg @@ -1,598 +1,598 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Export - - - - - Sheet.1076 - - - - Sheet.13000 - - - - User.1974 - User - - Sheet.13002 - - Sheet.13003 - - - - - Sheet.13004 - - Sheet.13005 - - - - - - - User - - - Web App (Was Websites).1979 - Client Application - - Sheet.13007 - - Sheet.13008 - - - - - Sheet.13009 - - Sheet.13010 - - Sheet.13011 - - Sheet.13012 - - - - - Sheet.13013 - - - - Sheet.13014 - - Sheet.13015 - - - - - Sheet.13016 - - Sheet.13017 - - - - - Sheet.13018 - - Sheet.13019 - - - - - Sheet.13020 - - Sheet.13021 - - - - - Sheet.13022 - - Sheet.13023 - - - - - Sheet.13024 - - Sheet.13025 - - - - - Sheet.13026 - - - - Sheet.13027 - - - - - Sheet.13028 - - Sheet.13029 - - - - - Sheet.13030 - - Sheet.13031 - - - - - Sheet.13032 - - Sheet.13033 - - - - - - - - Client Application - - - Dynamic connector.2007 - - - - Sheet.1 - - - - - - Mobile App (Was Mobile Services).2008 - - Sheet.13036 - - - - Sheet.13037 - - - - - Sheet.13038 - 4 - - - - 4 - - Sheet.13039 - Azure Communication Services - - - - Azure Communication Services - - User.2014 - User - - Sheet.13041 - - Sheet.13042 - - - - - Sheet.13043 - - Sheet.13044 - - - - - - - User - - - Dynamic connector.2032 - - - - Sheet.13046 - 5 - - - - 5 - - Dynamic connector.2034 - - - - Sheet.13048 - 1-2 - - - - 1-2 - - Teams.2036 - Teams - - - - - Sheet.13050 - - Sheet.13051 - - - - - - - Sheet.13052 - - - - - - - Sheet.13053 - - - - - - - Sheet.13054 - - - - - - - Sheet.13055 - - - - - - - Sheet.13056 - - - - - - - Sheet.13057 - - - - - - - Sheet.13058 - - - - - - - - - - Teams - - - Teams.2046 - Microsoft 365 Calendar (Graph) - - - - - Sheet.13060 - - Sheet.13061 - - - - - - - Sheet.13062 - - - - - - - Sheet.13063 - - - - - - - Sheet.13064 - - - - - - - Sheet.13065 - - - - - - - Sheet.13066 - - - - - - - Sheet.13067 - - - - - - - Sheet.13068 - - - - - - - - - - Microsoft 365 Calendar (Graph) - - - Dynamic connector.2056 - - - - Sheet.13070 - 3 - - - - 3 - - Dynamic connector.2058 - - - - Sheet.13072 - Voice, Video & Text Communication - - - - Voice, Video & Text Communication - - Sheet.13073 - Schedule Visit & Receive Reminder - - - - Schedule Visit & Receive Reminder - - Sheet.13074 - Start Meeting - - - - Start Meeting - - Sheet.13075 - Join Meeting - - - - Join Meeting - - Dynamic connector.2070 - - - - Sheet.13076 - - Sheet.13077 - B - - - - B - - Sheet.13078 - Microsoft 365 Booking - - - - Microsoft 365Booking - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Export + + + + + Sheet.1076 + + + + Sheet.13000 + + + + User.1974 + User + + Sheet.13002 + + Sheet.13003 + + + + + Sheet.13004 + + Sheet.13005 + + + + + + + User + + + Web App (Was Websites).1979 + Client Application + + Sheet.13007 + + Sheet.13008 + + + + + Sheet.13009 + + Sheet.13010 + + Sheet.13011 + + Sheet.13012 + + + + + Sheet.13013 + + + + Sheet.13014 + + Sheet.13015 + + + + + Sheet.13016 + + Sheet.13017 + + + + + Sheet.13018 + + Sheet.13019 + + + + + Sheet.13020 + + Sheet.13021 + + + + + Sheet.13022 + + Sheet.13023 + + + + + Sheet.13024 + + Sheet.13025 + + + + + Sheet.13026 + + + + Sheet.13027 + + + + + Sheet.13028 + + Sheet.13029 + + + + + Sheet.13030 + + Sheet.13031 + + + + + Sheet.13032 + + Sheet.13033 + + + + + + + + Client Application + + + Dynamic connector.2007 + + + + Sheet.1 + + + + + + Mobile App (Was Mobile Services).2008 + + Sheet.13036 + + + + Sheet.13037 + + + + + Sheet.13038 + 4 + + + + 4 + + Sheet.13039 + Azure Communication Services + + + + Azure Communication Services + + User.2014 + User + + Sheet.13041 + + Sheet.13042 + + + + + Sheet.13043 + + Sheet.13044 + + + + + + + User + + + Dynamic connector.2032 + + + + Sheet.13046 + 5 + + + + 5 + + Dynamic connector.2034 + + + + Sheet.13048 + 1-2 + + + + 1-2 + + Teams.2036 + Teams + + + + + Sheet.13050 + + Sheet.13051 + + + + + + + Sheet.13052 + + + + + + + Sheet.13053 + + + + + + + Sheet.13054 + + + + + + + Sheet.13055 + + + + + + + Sheet.13056 + + + + + + + Sheet.13057 + + + + + + + Sheet.13058 + + + + + + + + + + Teams + + + Teams.2046 + Microsoft 365 Calendar (Graph) + + + + + Sheet.13060 + + Sheet.13061 + + + + + + + Sheet.13062 + + + + + + + Sheet.13063 + + + + + + + Sheet.13064 + + + + + + + Sheet.13065 + + + + + + + Sheet.13066 + + + + + + + Sheet.13067 + + + + + + + Sheet.13068 + + + + + + + + + + Microsoft 365 Calendar (Graph) + + + Dynamic connector.2056 + + + + Sheet.13070 + 3 + + + + 3 + + Dynamic connector.2058 + + + + Sheet.13072 + Voice, Video & Text Communication + + + + Voice, Video & Text Communication + + Sheet.13073 + Schedule Visit & Receive Reminder + + + + Schedule Visit & Receive Reminder + + Sheet.13074 + Start Meeting + + + + Start Meeting + + Sheet.13075 + Join Meeting + + + + Join Meeting + + Dynamic connector.2070 + + + + Sheet.13076 + + Sheet.13077 + B + + + + B + + Sheet.13078 + Microsoft 365 Booking + + + + Microsoft 365Booking + + + diff --git a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-options.svg b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-options.svg similarity index 98% rename from articles/communication-services/tutorials/media/sample-builder/virtual-visit-options.svg rename to articles/communication-services/tutorials/media/virtual-visits/virtual-visit-options.svg index a1f1fba751860..52ea4667f0524 100644 --- a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-options.svg +++ b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-options.svg @@ -1,999 +1,999 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - Export - - - - - Sheet.1076 - - - - Sheet.1077 - - - - Sheet.1078 - - - - Sheet.1079 - - - - User.1974 - External User - - Sheet.1081 - - Sheet.1082 - - - - - Sheet.1083 - - Sheet.1084 - - - - - - - External User - - - Web App (Was Websites).1979 - Custom Azure App - - Sheet.1086 - - Sheet.1087 - - - - - Sheet.1088 - - Sheet.1089 - - Sheet.1090 - - Sheet.1091 - - - - - Sheet.1092 - - - - Sheet.1093 - - Sheet.1094 - - - - - Sheet.1095 - - Sheet.1096 - - - - - Sheet.1097 - - Sheet.1098 - - - - - Sheet.1099 - - Sheet.1100 - - - - - Sheet.1101 - - Sheet.1102 - - - - - Sheet.1103 - - Sheet.1104 - - - - - Sheet.1105 - - - - Sheet.1106 - - - - - Sheet.1107 - - Sheet.1108 - - - - - Sheet.1109 - - Sheet.1110 - - - - - Sheet.1111 - - Sheet.1112 - - - - - - - - Custom Azure App - - - Mobile App (Was Mobile Services).2008 - - Sheet.1114 - - - - Sheet.1115 - - - - - User.2014 - Hosting Employee - - Sheet.1117 - - Sheet.1118 - - - - - Sheet.1119 - - Sheet.1120 - - - - - - - HostingEmployee - - - Dynamic connector.2032 - - - - Teams.2036 - Teams - - - - - Sheet.1123 - - Sheet.1124 - - - - - - - Sheet.1125 - - - - - - - Sheet.1126 - - - - - - - Sheet.1127 - - - - - - - Sheet.1128 - - - - - - - Sheet.1129 - - - - - - - Sheet.1130 - - - - - - - Sheet.1131 - - - - - - - - - - Teams - - - User.2073 - External User - - Sheet.1133 - - Sheet.1134 - - - - - Sheet.1135 - - Sheet.1136 - - - - - - - External User - - - User.2109 - Hosting Employee - - Sheet.1138 - - Sheet.1139 - - - - - Sheet.1140 - - Sheet.1141 - - - - - - - HostingEmployee - - - Dynamic connector.2114 - - - - Teams.2115 - Teams - - - - - Sheet.1144 - - Sheet.1145 - - - - - - - Sheet.1146 - - - - - - - Sheet.1147 - - - - - - - Sheet.1148 - - - - - - - Sheet.1149 - - - - - - - Sheet.1150 - - - - - - - Sheet.1151 - - - - - - - Sheet.1152 - - - - - - - - - - Teams - - - Teams.2125 - Teams - - - - - Sheet.1154 - - Sheet.1155 - - - - - - - Sheet.1156 - - - - - - - Sheet.1157 - - - - - - - Sheet.1158 - - - - - - - Sheet.1159 - - - - - - - Sheet.1160 - - - - - - - Sheet.1161 - - - - - - - Sheet.1162 - - - - - - - - - - Teams - - - User.2135 - External User - - Sheet.1164 - - Sheet.1165 - - - - - Sheet.1166 - - Sheet.1167 - - - - - - - External User - - - Web App (Was Websites).2140 - Custom Azure App - - Sheet.1169 - - Sheet.1170 - - - - - Sheet.1171 - - Sheet.1172 - - Sheet.1173 - - Sheet.1174 - - - - - Sheet.1175 - - - - Sheet.1176 - - Sheet.1177 - - - - - Sheet.1178 - - Sheet.1179 - - - - - Sheet.1180 - - Sheet.1181 - - - - - Sheet.1182 - - Sheet.1183 - - - - - Sheet.1184 - - Sheet.1185 - - - - - Sheet.1186 - - Sheet.1187 - - - - - Sheet.1188 - - - - Sheet.1189 - - - - - Sheet.1190 - - Sheet.1191 - - - - - Sheet.1192 - - Sheet.1193 - - - - - Sheet.1194 - - Sheet.1195 - - - - - - - - Custom Azure App - - - Mobile App (Was Mobile Services).2168 - - Sheet.1197 - - - - Sheet.1198 - - - - - User.2171 - Hosting Employee - - Sheet.1200 - - Sheet.1201 - - - - - Sheet.1202 - - Sheet.1203 - - - - - - - HostingEmployee - - - Web App (Was Websites).2218 - Custom Azure App - - Sheet.1205 - - Sheet.1206 - - - - - Sheet.1207 - - Sheet.1208 - - Sheet.1209 - - Sheet.1210 - - - - - Sheet.1211 - - - - Sheet.1212 - - Sheet.1213 - - - - - Sheet.1214 - - Sheet.1215 - - - - - Sheet.1216 - - Sheet.1217 - - - - - Sheet.1218 - - Sheet.1219 - - - - - Sheet.1220 - - Sheet.1221 - - - - - Sheet.1222 - - Sheet.1223 - - - - - Sheet.1224 - - - - Sheet.1225 - - - - - Sheet.1226 - - Sheet.1227 - - - - - Sheet.1228 - - Sheet.1229 - - - - - Sheet.1230 - - Sheet.1231 - - - - - - - - Custom Azure App - - - Mobile App (Was Mobile Services).2246 - - Sheet.1233 - - - - Sheet.1234 - - - - - Dynamic connector.2249 - - - - Sheet.1236 - Microsoft 365 - - - - Microsoft 365 - - Sheet.1237 - Microsoft 365 & Azure Hybrid - - - - Microsoft 365 & Azure Hybrid - - Sheet.1238 - Azure Custom - - - - Azure Custom - - + + + + + + + + + + + + + + + + + + + + + + + + + + + Export + + + + + Sheet.1076 + + + + Sheet.1077 + + + + Sheet.1078 + + + + Sheet.1079 + + + + User.1974 + External User + + Sheet.1081 + + Sheet.1082 + + + + + Sheet.1083 + + Sheet.1084 + + + + + + + External User + + + Web App (Was Websites).1979 + Custom Azure App + + Sheet.1086 + + Sheet.1087 + + + + + Sheet.1088 + + Sheet.1089 + + Sheet.1090 + + Sheet.1091 + + + + + Sheet.1092 + + + + Sheet.1093 + + Sheet.1094 + + + + + Sheet.1095 + + Sheet.1096 + + + + + Sheet.1097 + + Sheet.1098 + + + + + Sheet.1099 + + Sheet.1100 + + + + + Sheet.1101 + + Sheet.1102 + + + + + Sheet.1103 + + Sheet.1104 + + + + + Sheet.1105 + + + + Sheet.1106 + + + + + Sheet.1107 + + Sheet.1108 + + + + + Sheet.1109 + + Sheet.1110 + + + + + Sheet.1111 + + Sheet.1112 + + + + + + + + Custom Azure App + + + Mobile App (Was Mobile Services).2008 + + Sheet.1114 + + + + Sheet.1115 + + + + + User.2014 + Hosting Employee + + Sheet.1117 + + Sheet.1118 + + + + + Sheet.1119 + + Sheet.1120 + + + + + + + HostingEmployee + + + Dynamic connector.2032 + + + + Teams.2036 + Teams + + + + + Sheet.1123 + + Sheet.1124 + + + + + + + Sheet.1125 + + + + + + + Sheet.1126 + + + + + + + Sheet.1127 + + + + + + + Sheet.1128 + + + + + + + Sheet.1129 + + + + + + + Sheet.1130 + + + + + + + Sheet.1131 + + + + + + + + + + Teams + + + User.2073 + External User + + Sheet.1133 + + Sheet.1134 + + + + + Sheet.1135 + + Sheet.1136 + + + + + + + External User + + + User.2109 + Hosting Employee + + Sheet.1138 + + Sheet.1139 + + + + + Sheet.1140 + + Sheet.1141 + + + + + + + HostingEmployee + + + Dynamic connector.2114 + + + + Teams.2115 + Teams + + + + + Sheet.1144 + + Sheet.1145 + + + + + + + Sheet.1146 + + + + + + + Sheet.1147 + + + + + + + Sheet.1148 + + + + + + + Sheet.1149 + + + + + + + Sheet.1150 + + + + + + + Sheet.1151 + + + + + + + Sheet.1152 + + + + + + + + + + Teams + + + Teams.2125 + Teams + + + + + Sheet.1154 + + Sheet.1155 + + + + + + + Sheet.1156 + + + + + + + Sheet.1157 + + + + + + + Sheet.1158 + + + + + + + Sheet.1159 + + + + + + + Sheet.1160 + + + + + + + Sheet.1161 + + + + + + + Sheet.1162 + + + + + + + + + + Teams + + + User.2135 + External User + + Sheet.1164 + + Sheet.1165 + + + + + Sheet.1166 + + Sheet.1167 + + + + + + + External User + + + Web App (Was Websites).2140 + Custom Azure App + + Sheet.1169 + + Sheet.1170 + + + + + Sheet.1171 + + Sheet.1172 + + Sheet.1173 + + Sheet.1174 + + + + + Sheet.1175 + + + + Sheet.1176 + + Sheet.1177 + + + + + Sheet.1178 + + Sheet.1179 + + + + + Sheet.1180 + + Sheet.1181 + + + + + Sheet.1182 + + Sheet.1183 + + + + + Sheet.1184 + + Sheet.1185 + + + + + Sheet.1186 + + Sheet.1187 + + + + + Sheet.1188 + + + + Sheet.1189 + + + + + Sheet.1190 + + Sheet.1191 + + + + + Sheet.1192 + + Sheet.1193 + + + + + Sheet.1194 + + Sheet.1195 + + + + + + + + Custom Azure App + + + Mobile App (Was Mobile Services).2168 + + Sheet.1197 + + + + Sheet.1198 + + + + + User.2171 + Hosting Employee + + Sheet.1200 + + Sheet.1201 + + + + + Sheet.1202 + + Sheet.1203 + + + + + + + HostingEmployee + + + Web App (Was Websites).2218 + Custom Azure App + + Sheet.1205 + + Sheet.1206 + + + + + Sheet.1207 + + Sheet.1208 + + Sheet.1209 + + Sheet.1210 + + + + + Sheet.1211 + + + + Sheet.1212 + + Sheet.1213 + + + + + Sheet.1214 + + Sheet.1215 + + + + + Sheet.1216 + + Sheet.1217 + + + + + Sheet.1218 + + Sheet.1219 + + + + + Sheet.1220 + + Sheet.1221 + + + + + Sheet.1222 + + Sheet.1223 + + + + + Sheet.1224 + + + + Sheet.1225 + + + + + Sheet.1226 + + Sheet.1227 + + + + + Sheet.1228 + + Sheet.1229 + + + + + Sheet.1230 + + Sheet.1231 + + + + + + + + Custom Azure App + + + Mobile App (Was Mobile Services).2246 + + Sheet.1233 + + + + Sheet.1234 + + + + + Dynamic connector.2249 + + + + Sheet.1236 + Microsoft 365 + + + + Microsoft 365 + + Sheet.1237 + Microsoft 365 & Azure Hybrid + + + + Microsoft 365 & Azure Hybrid + + Sheet.1238 + Azure Custom + + + + Azure Custom + + diff --git a/articles/communication-services/tutorials/virtual-visits.md b/articles/communication-services/tutorials/virtual-visits.md index 3d1de17d3c41b..27dab4634f56a 100644 --- a/articles/communication-services/tutorials/virtual-visits.md +++ b/articles/communication-services/tutorials/virtual-visits.md @@ -6,7 +6,7 @@ manager: chpalm services: azure-communication-services ms.author: chpalm -ms.date: 01/10/2022 +ms.date: 05/24/2022 ms.topic: tutorial ms.service: azure-communication-services ms.custom: event-tier1-build-2022 @@ -29,7 +29,7 @@ Azure and Teams are interoperable. This interoperability gives organizations cho - **Microsoft 365 + Azure hybrid.** Combine Microsoft 365 Teams and Bookings with a custom Azure application for the consumer experience. Organizations take advantage of Microsoft 365's employee familiarity but customize and embed the consumer visit experience in their own application. - **Azure custom.** Build the entire solution on Azure primitives: the business experience, the consumer experience, and scheduling systems. -![Diagram of virtual visit implementation options](./media/sample-builder/virtual-visit-options.svg) +![Diagram of virtual visit implementation options](./media/virtual-visits/virtual-visit-options.svg) These three **implementation options** are columns in the table below, while each row provides a **use case** and the **enabling technologies**. @@ -49,7 +49,7 @@ There are other ways to customize and combine Microsoft tools to deliver a virtu ## Extend Microsoft 365 with Azure The rest of this tutorial focuses on Microsoft 365 and Azure hybrid solutions. These hybrid configurations are popular because they combine employee familiarity of Microsoft 365 with the ability to customize the consumer experience. They’re also a good launching point to understanding more complex and customized architectures. The diagram below shows user steps for a virtual visit: -![High-level architecture of a hybrid virtual visits solution](./media/sample-builder/virtual-visit-arch.svg) +![High-level architecture of a hybrid virtual visits solution](./media/virtual-visits/virtual-visit-arch.svg) 1. Consumer schedules the visit using Microsoft 365 Bookings. 2. Consumer gets a visit reminder through SMS and Email. 3. Provider joins the visit using Microsoft Teams. @@ -63,40 +63,55 @@ In this section we’re going to use a Sample Builder tool to deploy a Microsoft This sample uses takes advantage of the Microsoft 365 Bookings app to power the consumer scheduling experience and create meetings for providers. Thus the first step is creating a Bookings calendar and getting the Booking page URL from https://outlook.office.com/bookings/calendar. -![Booking configuration experience](./media/sample-builder/bookings-url.png) +![Screenshot of Booking configuration experience](./media/virtual-visits/bookings-url.png) + +Make sure online meeting is enable for the calendar by going to https://outlook.office.com/bookings/services. + +![Screenshot of Booking services configuration experience](./media/virtual-visits/bookings-services.png) + +And then make sure "Add online meeting" is enable. + +![Screenshot of Booking services online meeting configuration experience](./media/virtual-visits/bookings-services-online-meeting.png) + ### Step 2 – Sample Builder Use the Sample Builder to customize the consumer experience. You can reach the Sampler Builder using this [link](https://aka.ms/acs-sample-builder), or navigating to the page within the Azure Communication Services resource in the Azure portal. Step through the Sample Builder wizard and configure if Chat or Screen Sharing should be enabled. Change themes and text to you match your application. You can preview your configuration live from the page in both Desktop and Mobile browser form-factors. -[ ![Sample builder start page](./media/sample-builder/sample-builder-start.png)](./media/sample-builder/sample-builder-start.png#lightbox) +[ ![Screenshot of Sample builder start page](./media/virtual-visits/sample-builder-start.png)](./media/virtual-visits/sample-builder-start.png#lightbox) ### Step 3 - Deploy At the end of the Sample Builder wizard, you can **Deploy to Azure** or download the code as a zip. The sample builder code is publicly available on [GitHub](https://github.com/Azure-Samples/communication-services-virtual-visits-js). -[ ![Sample builder deployment page](./media/sample-builder/sample-builder-landing.png)](./media/sample-builder/sample-builder-landing.png#lightbox) +[ ![Screenshot of Sample builder deployment page](./media/virtual-visits/sample-builder-landing.png)](./media/virtual-visits/sample-builder-landing.png#lightbox) The deployment launches an Azure Resource Manager (ARM) template that deploys the themed application you configured. -![Sample builder arm template](./media/sample-builder/sample-builder-arm.png) +![Screenshot of Sample builder arm template](./media/virtual-visits/sample-builder-arm.png) After walking through the ARM template you can **Go to resource group** -![Screenshot of a completed Azure Resource Manager Template](./media/sample-builder/azure-complete-deployment.png) +![Screenshot of a completed Azure Resource Manager Template](./media/virtual-visits/azure-complete-deployment.png) ### Step 4 - Test The Sample Builder creates three resources in the selected Azure subscriptions. The **App Service** is the consumer front end, powered by Azure Communication Services. -![produced azure resources in azure portal](./media/sample-builder/azure-resources.png) +![Screenshot of produced azure resources in azure portal](./media/virtual-visits/azure-resources.png) + +Opening the App Service’s URL and navigating to `https:///VISIT` allows you to try out the consumer experience and join a Teams meeting. `https:///BOOK` embeds the Booking experience for consumer scheduling. + +![Screenshot of final view of azure app service](./media/virtual-visits/azure-resource-final.png) + +### Step 5 - Set deployed app URL in Bookings -Opening the App Service’s URL and navigating to `https:///VISITS` allows you to try out the consumer experience and join a Teams meeting. `https:///BOOK` embeds the Booking experience for consumer scheduling. +Copy your application url into your calendar Business information setting by going to https://outlook.office.com/bookings/businessinformation. -![final view of azure app service](./media/sample-builder/azure-resource-final.png) +![Screenshot of final view of bookings business information](./media/virtual-visits/bookings-acs-app-integration-url.png) ## Going to production The Sample Builder gives you the basics of a Microsoft 365 and Azure virtual visit: consumer scheduling via Bookings, consumer joins via custom app, and the provider joins via Teams. However, there are several things to consider as you take this scenario to production. ### Launching patterns -Consumers want to jump directly to the virtual visit from the scheduling reminders they receive from Bookings. In Bookings, you can provide a URL prefix that will be used in reminders. If your prefix is `https:///VISITS`, Bookings will point users to `https:///VISITS?=.` +Consumers want to jump directly to the virtual visit from the scheduling reminders they receive from Bookings. In Bookings, you can provide a URL prefix that will be used in reminders. If your prefix is `https:///VISIT`, Bookings will point users to `https:///VISIT?MEETINGURL=.` ### Integrate into your existing app The app service generated by the Sample Builder is a stand-alone artifact, designed for desktop and mobile browsers. However you may have a website or mobile application already and need to migrate these experiences to that existing codebase. The code generated by the Sample Builder should help, but you can also use: diff --git a/articles/confidential-computing/confidential-containers.md b/articles/confidential-computing/confidential-containers.md index ab92a2020fc29..bac8d3c9d347b 100644 --- a/articles/confidential-computing/confidential-containers.md +++ b/articles/confidential-computing/confidential-containers.md @@ -37,7 +37,7 @@ You can enable confidential containers in Azure Partners and Open Source Softwar ### Fortanix -[Fortanix](https://www.fortanix.com/) has portal and Command Line Interface (CLI) experiences to convert their containerized applications to SGX-capable confidential containers. You don't need to modify or recompile the application. Fortanix provides the flexibility to run and manage a broad set of applications. You can use existing applications, new enclave-native applications, and pre-packaged applications. Start with Fortanix's [Enclave Manager](https://em.fortanix.com/) UI or [REST APIs](https://www.fortanix.com/api/em/). Create confidential containers using the Fortanix's [quickstart guide for AKS](https://hubs.li/Q017JnNt0). +[Fortanix](https://www.fortanix.com/) has portal and Command Line Interface (CLI) experiences to convert their containerized applications to SGX-capable confidential containers. You don't need to modify or recompile the application. Fortanix provides the flexibility to run and manage a broad set of applications. You can use existing applications, new enclave-native applications, and pre-packaged applications. Start with Fortanix's [Enclave Manager](https://em.fortanix.com/) UI or [REST APIs](https://www.fortanix.com/api/). Create confidential containers using the Fortanix's [quickstart guide for AKS](https://hubs.li/Q017JnNt0). ![Diagram of Fortanix deployment process, showing steps to move applications to confidential containers and deploy.](./media/confidential-containers/fortanix-confidential-containers-flow.png) @@ -68,7 +68,7 @@ Get started with a sample Redis Cache and Python Custom Application [here](https [Gramine](https://grapheneproject.io/) is a lightweight guest OS, designed to run a single Linux application with minimal host requirements. Gramine can run applications in an isolated environment. There's tooling support for converting existing Docker container applications to Gramine Shielded Containers (GSCs). -For more information, see the Gramine's [sample application and deployment on AKS](https://graphene.readthedocs.io/en/latest/cloud-deployment.html#azure-kubernetes-service-aks) +For more information, see the Gramine's [sample application and deployment on AKS](https://github.com/gramineproject/contrib/tree/master/Examples/aks-attestation) ### Occlum @@ -98,4 +98,4 @@ Do you have questions about your implementation? Do you want to become an enable - [Deploy AKS cluster with Intel SGX Confidential VM Nodes](./confidential-enclave-nodes-aks-get-started.md) - [Microsoft Azure Attestation](../attestation/overview.md) - [Intel SGX Confidential Virtual Machines](virtual-machine-solutions-sgx.md) -- [Azure Kubernetes Service (AKS)](../aks/intro-kubernetes.md) \ No newline at end of file +- [Azure Kubernetes Service (AKS)](../aks/intro-kubernetes.md) diff --git a/articles/confidential-computing/confidential-nodes-aks-addon.md b/articles/confidential-computing/confidential-nodes-aks-addon.md index 3e4818f865f03..e9a6b5e854670 100644 --- a/articles/confidential-computing/confidential-nodes-aks-addon.md +++ b/articles/confidential-computing/confidential-nodes-aks-addon.md @@ -21,7 +21,7 @@ The SGX Device plugin implements the Kubernetes device plugin interface for Encl ## PSW with SGX quote helper -Enclave applications that do remote attestation need to generate a quote. The quote provides cryptographic proof of the identity and the state of the application, along with the enclave's host environment. Quote generation relies on certain trusted software components from Intel, which are part of the SGX Platform Software Components (PSW/DCAP). This PSW is packaged as a daemon set that runs per node. You can use the PSW when requesting attestation quote from enclave apps. Using the AKS provided service helps better maintain the compatibility between the PSW and other SW components in the host. Read the feature details below. +Enclave applications that do remote attestation need to generate a quote. The quote provides cryptographic proof of the identity and the state of the application, along with the enclave's host environment. Quote generation relies on certain trusted software components from Intel, which are part of the SGX Platform Software Components (PSW/DCAP). This PSW is packaged as a daemon set that runs per node. You can use PSW when requesting attestation quote from enclave apps. Using the AKS provided service helps better maintain the compatibility between the PSW and other SW components in the host. Read the feature details below. [Enclave applications](confidential-computing-enclaves.md) that do remote attestation require a generated quote. This quote provides cryptographic proof of the application's identity, state, and running environment. The generation requires trusted software components that are part of Intel's PSW. @@ -30,13 +30,13 @@ Enclave applications that do remote attestation need to generate a quote. The qu > [!NOTE] > This feature is only required for DCsv2/DCsv3 VMs that use specialized Intel SGX hardware. -Intel supports two attestation modes to run the quote generation. For how to choose which type, see the [attestation type differences](#attestation-type-differences). +Intel supports two attestation modes to run the quote generation. For how to choose which type, see the [attestation type differences] (#attestation-type-differences). - **in-proc**: hosts the trusted software components inside the enclave application process. This method is useful when you are performing local attestation (between 2 enclave apps in a single VM node) - **out-of-proc**: hosts the trusted software components outside of the enclave application. This is a preferred method when performing remote attestation. -SGX applications built using Open Enclave SDK by default use in-proc attestation mode. SGX-based applications allow out-of-proc and require extra hosting. These applications expose the required components such as Architectural Enclave Service Manager (AESM), external to the application. +SGX applications are built using Open Enclave SDK by default use in-proc attestation mode. SGX-based applications allow out-of-proc and require extra hosting. These applications expose the required components such as Architectural Enclave Service Manager (AESM), external to the application. It's highly recommended to use this feature. This feature enhances uptime for your enclave apps during Intel Platform updates or DCAP driver updates. @@ -44,9 +44,9 @@ It's highly recommended to use this feature. This feature enhances uptime for yo No updates are required for quote generation components of PSW for each containerized application. -With out-of-proc, container owners don’t need to manage updates within their container. Container owners instead rely on the provided interface that invokes the centralized service outside of the container. The provider update sand manages this service. +With out-of-proc, container owners don’t need to manage updates within their container. Container owners instead rely on the provided interface that invokes the centralized service outside of the container. -For out-of-proc, there's not a concern of failures because of out-of-date PSW components. The quote generation involves the trusted SW components - Quoting Enclave (QE) & Provisioning Certificate Enclave (PCE), which are part of the trusted computing base (TCB). These SW components must be up to date to maintain the attestation requirements. The provider manages the updates to these components. Customers never have to deal with attestation failures because of out-of-date trusted SW components within their container. +For out-of-proc, there's no concern of failures because of out-of-date PSW components. The quote generation involves the trusted SW components - Quoting Enclave (QE) & Provisioning Certificate Enclave (PCE), which are part of the trusted computing base (TCB). These SW components must be up to date to maintain the attestation requirements. The provider manages the updates to these components. Customers never have to deal with attestation failures because of out-of-date trusted SW components within their container. Out-of-proc better uses EPC memory. In in-proc attestation mode, each enclave application instantiates the copy of QE and PCE for remote attestation. With out-of-proc, the container doesn't host those enclaves, and doesn’t consume enclave memory from the container quota. @@ -60,21 +60,26 @@ The out-of-proc attestation model works for confidential workloads. The quote re ![Diagram of quote requestor and quote generation interface.](./media/confidential-nodes-out-of-proc-attestation/aesmmanager.png) -The abstract model applies to confidential workload scenarios. This model uses already available AESM service. AESM is containerized and deployed as a daemon set across the Kubernetes cluster. Kubernetes guarantees a single instance of an AESM service container, wrapped in a pod, to be deployed on each agent node. The new SGX Quote daemon set has a dependency on the `sgx-device-plugin` daemon set, since the AESM service container would request EPC memory from `sgx-device-plugin` for launching QE and PCE enclaves. +The abstract model applies to confidential workload scenarios. This model uses the already available AESM service. AESM is containerized and deployed as a daemon set across the Kubernetes cluster. Kubernetes guarantees a single instance of an AESM service container, wrapped in a pod, to be deployed on each agent node. The new SGX Quote daemon set has a dependency on the `sgx-device-plugin` daemon set, since the AESM service container would request EPC memory from `sgx-device-plugin` for launching QE and PCE enclaves. Each container needs to opt in to use out-of-proc quote generation by setting the environment variable `SGX_AESM_ADDR=1` during creation. The container also must include the package `libsgx-quote-ex`, which directs the request to default Unix domain socket An application can still use the in-proc attestation as before. However, you can't simultaneously use both in-proc and out-of-proc within an application. The out-of-proc infrastructure is available by default and consumes resources. > [!NOTE] -> If you are using a Intel SGX wrapper software(OSS/ISV) to run you unmodified containers the attestation interaction with hardware is typically handled for your higher level apps. Please refer to the attestation implementation per provider. +> If you are using a Intel SGX wrapper software (OSS/ISV) to run you unmodified containers the attestation interaction with hardware is typically handled for your higher level apps. Please refer to the attestation implementation per provider. ### Sample implementation -The below docker file is a sample for an Open Enclave-based application. Set the `SGX_AESM_ADDR=1` environment variable in the Docker file. Or, set the variable in the deployment file. Follow this sample for the Docker file and deployment YAML details. +By default, this service is not enabled for your AKS Cluster with "confcom" addon. Please update the addon with the below command + +```azurecli +az aks addon update --addon confcom --name " YourAKSClusterName " --resource-group "YourResourceGroup " --enable-sgxquotehelper +``` +Once the service is up, use the below docker sample for an Open Enclave-based application to validate the flow. Set the `SGX_AESM_ADDR=1` environment variable in the Docker file. Or, set the variable in the deployment file. Follow this sample for the Docker file and deployment YAML details. > [!Note] -> The **libsgx-quote-ex** package from Intel needs to be packaged in the application container for out-of-proc attestation to work properly. +> The **libsgx-quote-ex** package from Intel needs to be packaged in the application container for out-of-proc attestation to work properly. The instructions below have the details. ```yaml # Refer to Intel_SGX_Installation_Guide_Linux for detail @@ -104,7 +109,7 @@ RUN apt-get update && apt-get install -y \ WORKDIR /opt/openenclave/share/openenclave/samples/remote_attestation RUN . /opt/openenclave/share/openenclave/openenclaverc \ && make build -# this sets the flag for out of proc attestation mode. alternatively you can set this flag on the deployment files +# this sets the flag for out of proc attestation mode, alternatively you can set this flag on the deployment files ENV SGX_AESM_ADDR=1 CMD make run @@ -138,6 +143,9 @@ spec: path: /var/run/aesmd ``` +The deployment should succeed and allow your apps to perform remote attestation using the SGX Quote Helper service. + + ## Next Steps - [Set up Confidential Nodes (DCsv2/DCsv3-Series) on AKS](./confidential-enclave-nodes-aks-get-started.md) diff --git a/articles/confidential-ledger/create-client-certificate.md b/articles/confidential-ledger/create-client-certificate.md index 3e3d2089e2b8d..9e0d2bd52f87e 100755 --- a/articles/confidential-ledger/create-client-certificate.md +++ b/articles/confidential-ledger/create-client-certificate.md @@ -17,7 +17,7 @@ You will need a certificate in PEM format. You can create more than one certific ## OpenSSL -We recommending using OpenSSL to generate certificates. If you have git installed, you can run OpenSSL in the git shell. Otherwise, you can install OpenSSL for your OS. +We recommend using OpenSSL to generate certificates. If you have git installed, you can run OpenSSL in the git shell. Otherwise, you can install OpenSSL for your OS. - **Windows**: Install [chocolatey for Windows](https://chocolatey.org/install), open a PowerShell terminal windows in admin mode, and run `choco install openssl`. Alternatively, you can install OpenSSL for Windows directly from [here](http://gnuwin32.sourceforge.net/packages/openssl.htm). - **Linux**: Run `sudo apt-get install openssl` @@ -31,4 +31,4 @@ openssl req -new -key "privkey_name.pem" -x509 -nodes -days 365 -out "cert.pem" ## Next steps -- [Overview of Microsoft Azure confidential ledger](overview.md) \ No newline at end of file +- [Overview of Microsoft Azure confidential ledger](overview.md) diff --git a/articles/connectors/connectors-create-api-azureblobstorage.md b/articles/connectors/connectors-create-api-azureblobstorage.md index 63e8aa195eb9b..e4f50bf008a83 100644 --- a/articles/connectors/connectors-create-api-azureblobstorage.md +++ b/articles/connectors/connectors-create-api-azureblobstorage.md @@ -5,7 +5,7 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 04/18/2022 +ms.date: 05/28/2022 tags: connectors --- @@ -15,11 +15,6 @@ From your workflow in Azure Logic Apps, you can access and manage files stored a You can connect to Blob Storage from both **Logic App (Consumption)** and **Logic App (Standard)** resource types. You can use the connector with logic app workflows in multi-tenant Azure Logic Apps, single-tenant Azure Logic Apps, and the integration service environment (ISE). With **Logic App (Standard)**, you can use either the *built-in* **Azure Blob** operations or the **Azure Blob Storage** managed connector operations. -> [!IMPORTANT] -> A logic app workflow can't directly access a storage account behind a firewall if they're both in the same region. -> As a workaround, your logic app and storage account can be in different regions. For more information about enabling -> access from Azure Logic Apps to storage accounts behind firewalls, review the [Access storage accounts behind firewalls](#access-storage-accounts-behind-firewalls) section later in this topic. - ## Prerequisites - An Azure account and subscription. If you don't have an Azure subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). @@ -42,7 +37,7 @@ You can connect to Blob Storage from both **Logic App (Consumption)** and **Logi ## Connector reference -For more technical details about this connector, such as triggers, actions, and limits, review the [connector's reference page](/connectors/azureblobconnector/). If you don't want to use the Blob operations, you can use the [use HTTP trigger or action along with a a managed identity for blob operations instead](#access-blob-storage-with-managed-identities). +For more technical details about this connector, such as triggers, actions, and limits, review the [connector's reference page](/connectors/azureblobconnector/). @@ -116,7 +111,7 @@ To add a Blob trigger to a logic app workflow in single-tenant Azure Logic Apps, | Task | Path syntax | |------|-------------| - | Check the root folder for a newly added blob. | **<*container-name*>** | + | Check the root folder and its nested subfolders for a newly added blob. | **<*container-name*>** | | Check the root folder for changes to a specific blob. | **<*container-name*>/<*blob-name*>.<*blob-extension*>** | | Check the root folder for changes to any blobs with the same extension, for example, **.txt**. | **<*container-name*>/{name}.txt**

                  **Important**: Make sure that you use **{name}** as a literal. | | Check the root folder for changes to any blobs with names starting with a specific string, for example, **Sample-**. | **<*container-name*>/Sample-{name}**

                  **Important**: Make sure that you use **{name}** as a literal. | @@ -156,7 +151,7 @@ To add a Blob action to a logic app workflow in multi-tenant Azure Logic Apps, f This example starts with the [**Recurrence** trigger](connectors-native-recurrence.md). -1. Under the trigger or action where you want to add the Blob action, select **New step** or **Add an action**, if between steps. +1. Under the trigger or action where you want to add the Blob action, select **New step** or **Add an action**, if between steps. This example uses the built-in Azure Blob action. 1. Under the designer search box, make sure that **All** is selected. In the search box, enter **Azure blob**. Select the Blob action that you want to use. @@ -298,7 +293,7 @@ You can add network security to an Azure storage account by [restricting access - To access storage accounts behind firewalls using the Azure Blob Storage managed connector in Consumption, Standard, and ISE-based logic apps, review the following documentation: - - [Access storage accounts with managed identities](#access-blob-storage-with-managed-identities) + - [Access storage accounts in same region with managed identities](#access-blob-storage-in-same-region-with-managed-identities) - [Access storage accounts in other regions](#access-storage-accounts-in-other-regions) @@ -350,7 +345,7 @@ To add your outbound IP addresses to the storage account firewall, follow these You don't have to create a private endpoint. You can just permit traffic through the ISE outbound IPs on the storage account. -### Access Blob Storage with managed identities +### Access Blob Storage in same region with managed identities To connect to Azure Blob Storage in any region, you can use [managed identities for authentication](../active-directory/managed-identities-azure-resources/overview.md). You can create an exception that gives Microsoft trusted services, such as a managed identity, access to your storage account through a firewall. diff --git a/articles/connectors/connectors-create-api-informix.md b/articles/connectors/connectors-create-api-informix.md index 6b4f6721176c2..e04a8dbc5c55b 100644 --- a/articles/connectors/connectors-create-api-informix.md +++ b/articles/connectors/connectors-create-api-informix.md @@ -3,8 +3,8 @@ title: Connect to IBM Informix database description: Automate tasks and workflows that manage resources stored in IBM Informix by using Azure Logic Apps services: logic-apps ms.suite: integration -author: ChristopherHouser -ms.author: daberry +author: mamccrea +ms.author: mamccrea ms.reviewer: estfan, azla ms.topic: how-to ms.date: 01/07/2020 diff --git a/articles/connectors/connectors-create-api-sqlazure.md b/articles/connectors/connectors-create-api-sqlazure.md index c093bcf261b25..3ba8ab0214a48 100644 --- a/articles/connectors/connectors-create-api-sqlazure.md +++ b/articles/connectors/connectors-create-api-sqlazure.md @@ -1,30 +1,47 @@ --- title: Connect to SQL databases -description: Automate workflows for SQL databases on premises or in the cloud with Azure Logic Apps. +description: Connect to SQL databases from workflows in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 04/18/2022 +ms.date: 06/08/2022 tags: connectors --- -# Connect to a SQL database from Azure Logic Apps +# Connect to a SQL database from workflows in Azure Logic Apps -This article shows how to access your SQL database with the SQL Server connector in Azure Logic Apps. You can then create automated workflows that are triggered by events in your SQL database or other systems and manage your SQL data and resources. +This article shows how to access your SQL database from a workflow in Azure Logic Apps with the SQL Server connector. You can then create automated workflows that run when triggered by events in your SQL database or in other systems and run actions to manage your SQL data and resources. -For example, you can use actions that get, insert, and delete data along with running SQL queries and stored procedures. You can create workflow that checks for new records in a non-SQL database, does some processing work, creates new records in your SQL database using the results, and sends email alerts about the new records in your SQL database. +For example, your workflow can run actions that get, insert, and delete data or that can run SQL queries and stored procedures. Your workflow can check for new records in a non-SQL database, do some processing work, use the results to create new records in your SQL database, and send email alerts about the new records. - The SQL Server connector supports the following SQL editions: +If you're new to Azure Logic Apps, review the following get started documentation: + +* [What is Azure Logic Apps](../logic-apps/logic-apps-overview.md) +* [Quickstart: Create your first logic app workflow](../logic-apps/quickstart-create-first-logic-app-workflow.md) + +## Supported SQL editions + +The SQL Server connector supports the following SQL editions: * [SQL Server](/sql/sql-server/sql-server-technical-documentation) * [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview) * [Azure SQL Managed Instance](/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview) -If you're new to Azure Logic Apps, review the following documentation: +## Connector technical reference -* [What is Azure Logic Apps](../logic-apps/logic-apps-overview.md) -* [Quickstart: Create your first logic app workflow](../logic-apps/quickstart-create-first-logic-app-workflow.md) +The SQL Server connector has different versions, based on [logic app type and host environment](../logic-apps/logic-apps-overview.md#resource-environment-differences). + +| Logic app | Environment | Connector version | +|-----------|-------------|-------------------| +| **Consumption** | Multi-tenant Azure Logic Apps | [Managed connector - Standard class](managed.md). For operations, limits, and other information, review the [SQL Server managed connector reference](/connectors/sql). | +| **Consumption** | Integration service environment (ISE) | [Managed connector - Standard class](managed.md) and ISE version. For operations, managed connector limits, and other information, review the [SQL Server managed connector reference](/connectors/sql). For ISE-versioned limits, review the [ISE message limits](../logic-apps/logic-apps-limits-and-config.md#message-size-limits), not the managed connector's message limits. | +| **Standard** | Single-tenant Azure Logic Apps and App Service Environment v3 (Windows plans only) | [Managed connector - Standard class](managed.md) and [built-in connector](built-in.md), which is [service provider based](../logic-apps/custom-connector-overview.md#service-provider-interface-implementation). For managed connector operations, limits, and other information, review the [SQL Server managed connector reference](/connectors/sql/).

                  The built-in connector differs in the following ways:

                  - The built-in version has no triggers.

                  - The built-in version has a single **Execute Query** action. This action can directly access Azure virtual networks with a connection string and doesn't need the on-premises data gateway.

                  For built-in connector operations, limits, and other information, review the [SQL Server built-in connector reference](#built-in-connector-operations). | +|||| + +## Limitations + +For more information, review the [SQL Server managed connector reference](/connectors/sql/) or the [SQL Server built-in connector reference](#built-in-connector-operations). ## Prerequisites @@ -32,7 +49,7 @@ If you're new to Azure Logic Apps, review the following documentation: * [SQL Server database](/sql/relational-databases/databases/create-a-database), [Azure SQL Database](/azure/azure-sql/database/single-database-create-quickstart), or [SQL Managed Instance](/azure/azure-sql/managed-instance/instance-create-quickstart). - The SQL connector requires that your tables contain data so that SQL connector operations can return results when called. For example, if you use Azure SQL Database, you can use the included sample databases to try the SQL connector operations. + The SQL Server connector requires that your tables contain data so that the connector operations can return results when called. For example, if you use Azure SQL Database, you can use the included sample databases to try the SQL Server connector operations. * The information required to create a SQL database connection, such as your SQL server and database names. If you're using Windows Authentication or SQL Server Authentication to authenticate access, you also need your user name and password. You can usually find this information in the connection string. @@ -53,37 +70,23 @@ If you're new to Azure Logic Apps, review the following documentation: `Server={your-server-address};Database={your-database-name};User Id={your-user-name};Password={your-password};` -* The logic app workflow where you want to access your SQL database. If you want to start your workflow with a SQL Server trigger operation, you have to start with a blank workflow. +* The logic app workflow where you want to access your SQL database. To start your workflow with a SQL Server trigger, you have to start with a blank workflow. To use a SQL Server action, start your workflow with any trigger. -* To connect to an on-premises SQL server, the following extra requirements apply based on whether you have a Consumption logic app workflow, either in multi-tenant Azure Logic Apps or an [integration service environment (ISE)](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md), or if you have a Standard logic app workflow in [single-tenant Azure Logic Apps](../logic-apps/single-tenant-overview-compare.md). +* To connect to an on-premises SQL server, the following extra requirements apply, based on whether you have a Consumption or Standard logic app workflow. * Consumption logic app workflow * In multi-tenant Azure Logic Apps, you need the [on-premises data gateway](../logic-apps/logic-apps-gateway-install.md) installed on a local computer and a [data gateway resource that's already created in Azure](../logic-apps/logic-apps-gateway-connection.md). - * In an ISE, when you have non-Windows or SQL Server Authentication connections, you don't need the on-premises data gateway and can use the ISE-versioned SQL Server connector. For Windows Authentication and SQL Server Authentication, you still have to use the [on-premises data gateway](../logic-apps/logic-apps-gateway-install.md) and a [data gateway resource that's already created in Azure](../logic-apps/logic-apps-gateway-connection.md). Also, the ISE-versioned SQL Server connector doesn't support Windows authentication, so you have to use the non-ISE SQL Server connector. + * In an ISE, you don't need the on-premises data gateway for SQL Server Authentication and non-Windows Authentication connections, and you can use the ISE-versioned SQL Server connector. For Windows Authentication, you need the [on-premises data gateway](../logic-apps/logic-apps-gateway-install.md) on a local computer and a [data gateway resource that's already created in Azure](../logic-apps/logic-apps-gateway-connection.md). The ISE-version connector doesn't support Windows Authentication, so you have to use the regular SQL Server managed connector. * Standard logic app workflow - In single-tenant Azure Logic Apps, you can use the built-in SQL Server connector, which requires a connection string. If you want to use the managed SQL Server connector, you need follow the same requirements as a Consumption logic app workflow in multi-tenant Azure Logic Apps. - -## Connector technical reference + You can use the SQL Server built-in connector, which requires a connection string. To use the SQL Server managed connector, follow the same requirements as a Consumption logic app workflow in multi-tenant Azure Logic Apps. -This connector is available for logic app workflows in multi-tenant Azure Logic Apps, ISEs, and single-tenant Azure Logic Apps. - -* For Consumption logic app workflows in multi-tenant Azure Logic Apps, this connector is available only as a managed connector. For more information, review the [managed SQL Server connector operations](/connectors/sql). - -* For Consumption logic app workflows in an ISE, this connector is available as a managed connector and as an ISE connector that's designed to run in an ISE. For more information, review the [managed SQL Server connector operations](/connectors/sql). - -* For Standard logic app workflows in single-tenant Azure Logic Apps, this connector is available as a managed connector and as a built-in connector that's designed to run in the same process as the single-tenant Azure Logic Apps runtime. However, the built-in version differs in the following ways: - - * The built-in SQL Server connector has no triggers. - - * The built-in SQL Server connector has only one operation: **Execute Query** - -For the managed SQL Server connector technical information, such as trigger and action operations, limits, and known issues, review the [SQL Server connector's reference page](/connectors/sql/), which is generated from the Swagger description. +For other connector requirements, review [SQL Server managed connector reference](/connectors/sql/). @@ -97,69 +100,73 @@ The following steps use the Azure portal, but with the appropriate Azure Logic A ### [Consumption](#tab/consumption) -1. In the Azure portal, open your blank logic app workflow in the designer. +1. In the [Azure portal](https://portal.azure.com), open your blank logic app workflow in the designer. -1. Find and select the [managed SQL Server connector trigger](/connectors/sql) that you want to use. +1. Find and select the [SQL Server trigger](/connectors/sql) that you want to use. - 1. Under the designer search box, select **All**. + 1. On the designer, under the search box, select **All**. - 1. In the designer search box, enter **sql server**. + 1. In the search box, enter **sql server**. - 1. From the triggers list, select the SQL trigger that you want. This example continues with the trigger named **When an item is created**. + 1. From the triggers list, select the SQL trigger that you want. - ![Screenshot showing the Azure portal, workflow designer for Consumption logic app, search box with "sql server", and the "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-consumption.png) + This example continues with the trigger named **When an item is created**. -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. + ![Screenshot showing the Azure portal, Consumption logic app workflow designer, search box with "sql server", and "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-consumption.png) -1. In the trigger, specify the interval and frequency for how often the trigger checks the table. +1. Provide the [information for your connection](#create-connection). When you're done, select **Create**. + +1. After the trigger information box appears, specify the interval and frequency for how often the trigger checks the table. 1. To add other properties available for this trigger, open the **Add new parameter** list and select those properties. This trigger returns only one row from the selected table, and nothing else. To perform other tasks, continue by adding either a [SQL Server connector action](#add-sql-action) or [another action](../connectors/apis-list.md) that performs the next task that you want in your logic app workflow. - For example, to view the data in this row, you can add other actions that create a file that includes the fields from the returned row, and then send email alerts. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). - -1. On the designer toolbar, select **Save**. + For example, to view the data in this row, you can add other actions that create a file that includes the fields from the returned row, and then send email alerts. To learn about other available actions for this connector, see the [SQL Server managed connector reference](/connectors/sql/). - Although this step automatically enables and publishes your logic app live in Azure, the only action that your logic app currently takes is to check your database based on your specified interval and frequency. +1. When you're done, save your workflow. ### [Standard](#tab/standard) -In Standard logic app workflows, only the managed SQL Server connector has triggers. The built-in SQL Server connector doesn't have any triggers. +In Standard logic app workflows, only the SQL Server managed connector has triggers. The SQL Server built-in connector doesn't have any triggers. -1. In the Azure portal, open your blank logic app workflow in the designer. +1. In the [Azure portal](https://portal.azure.com), open your blank logic app workflow in the designer. -1. Find and select the [managed SQL Server connector trigger](/connectors/sql) that you want to use. +1. Find and select the [SQL Server trigger](/connectors/sql) that you want to use. - 1. Under the designer search box, select **Azure**. + 1. On the designer, select **Choose an operation**. - 1. In the designer search box, enter **sql server**. + 1. Under the **Choose an operation** search box, select **Azure**. - 1. From the triggers list, select the SQL trigger that you want. This example continues with the trigger named **When an item is created**. + 1. In the search box, enter **sql server**. - ![Screenshot showing the Azure portal, workflow designer for Standard logic app, search box with "sql server", and the "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-standard.png) + 1. From the triggers list, select the SQL trigger that you want. -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. + This example continues with the trigger named **When an item is created**. -1. In the trigger, specify the interval and frequency for how often the trigger checks the table. + ![Screenshot showing Azure portal, Standard logic app workflow designer, search box with "sql server", and "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-standard.png) -1. To add other properties available for this trigger, open the **Add new parameter** list and select those properties. +1. Provide the [information for your connection](#create-connection). When you're done, select **Create**. - This trigger returns only one row from the selected table, and nothing else. To perform other tasks, continue by adding either a [SQL connector action](#add-sql-action) or [another action](../connectors/apis-list.md) that performs the next task that you want in your logic app workflow. +1. After the trigger information box appears, specify the interval and frequency for how often the trigger checks the table. - For example, to view the data in this row, you can add other actions that create a file that includes the fields from the returned row, and then send email alerts. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). +1. To add other properties available for this trigger, open the **Add new parameter** list and select those properties. + + This trigger returns only one row from the selected table, and nothing else. To perform other tasks, continue by adding either a [SQL Server connector action](#add-sql-action) or [another action](../connectors/apis-list.md) that performs the next task that you want in your logic app workflow. -1. On the designer toolbar, select **Save**. + For example, to view the data in this row, you can add other actions that create a file that includes the fields from the returned row, and then send email alerts. To learn about other available actions for this connector, see the [SQL Server managed connector reference](/connectors/sql/). - Although this step automatically enables and publishes your logic app live in Azure, the only action that your logic app currently takes is to check your database based on your specified interval and frequency. +1. When you're done, save your workflow. --- +When you save your workflow, this step automatically publishes your updates to your deployed logic app, which is live in Azure. With only a trigger, your workflow just checks the SQL database based on your specified schedule. You have to [add an action](#add-sql-action) that responds to the trigger. + ## Trigger recurrence shift and drift (daylight saving time) -Recurring connection-based triggers where you need to create a connection first, such as the managed SQL Server trigger, differ from built-in triggers that run natively in Azure Logic Apps, such as the [Recurrence trigger](../connectors/connectors-native-recurrence.md). For recurring connection-based triggers, the recurrence schedule isn't the only driver that controls execution, and the time zone only determines the initial start time. Subsequent runs depend on the recurrence schedule, the last trigger execution, *and* other factors that might cause run times to drift or produce unexpected behavior. For example, unexpected behavior can include failure to maintain the specified schedule when daylight saving time (DST) starts and ends. +Recurring connection-based triggers where you need to create a connection first, such as the SQL Server managed connector trigger, differ from built-in triggers that run natively in Azure Logic Apps, such as the [Recurrence trigger](../connectors/connectors-native-recurrence.md). For recurring connection-based triggers, the recurrence schedule isn't the only driver that controls execution, and the time zone only determines the initial start time. Subsequent runs depend on the recurrence schedule, the last trigger execution, *and* other factors that might cause run times to drift or produce unexpected behavior. For example, unexpected behavior can include failure to maintain the specified schedule when daylight saving time (DST) starts and ends. To make sure that the recurrence time doesn't shift when DST takes effect, manually adjust the recurrence. That way, your workflow continues to run at the expected or specified start time. Otherwise, the start time shifts one hour forward when DST starts and one hour backward when DST ends. For more information, see [Recurrence for connection-based triggers](../connectors/apis-list.md#recurrence-for-connection-based-triggers). @@ -169,31 +176,35 @@ To make sure that the recurrence time doesn't shift when DST takes effect, manua The following steps use the Azure portal, but with the appropriate Azure Logic Apps extension, you can also use Visual Studio to edit Consumption logic app workflows or Visual Studio Code to the following tools to edit logic app workflows: -* Consumption logic app workflow: Visual Studio or Visual Studio Code +* Consumption logic app workflows: [Visual Studio](../logic-apps/quickstart-create-logic-apps-with-visual-studio.md) or [Visual Studio Code](../logic-apps/quickstart-create-logic-apps-visual-studio-code.md) -* Standard logic app workflows: Visual Studio Code +* Standard logic app workflows: [Visual Studio Code](../logic-apps/create-single-tenant-workflows-visual-studio-code.md) In this example, the logic app workflow starts with the [Recurrence trigger](../connectors/connectors-native-recurrence.md), and calls an action that gets a row from a SQL database. ### [Consumption](#tab/consumption) -1. In the Azure portal, open your logic app workflow in the designer. +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. -1. Find and select the [managed SQL Server connector action](/connectors/sql) that you want to use. This example continues with the action named **Get row**. +1. Find and select the [SQL Server action](/connectors/sql) that you want to use. + + This example continues with the action named **Get row**. 1. Under the trigger or action where you want to add the SQL action, select **New step**. Or, to add an action between existing steps, move your mouse over the connecting arrow. Select the plus sign (**+**) that appears, and then select **Add an action**. - 1. In the **Choose an operation** box, under the designer search box, select **All**. + 1. Under the **Choose an operation** search box, select **All**. + + 1. In the search box, enter **sql server**. - 1. In the designer search box, enter **sql server**. + 1. From the actions list, select the SQL Server action that you want. - 1. From the actions list, select the SQL Server action that you want. This example uses the **Get row** action, which gets a single record. + This example uses the **Get row** action, which gets a single record. ![Screenshot showing the Azure portal, workflow designer for Consumption logic app, the search box with "sql server", and "Get row" selected in the "Actions" list.](./media/connectors-create-api-sqlazure/select-sql-get-row-action-consumption.png) -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. +1. Provide the [information for your connection](#create-connection). When you're done, select **Create**. 1. If you haven't already provided the SQL server name and database name, provide those values. Otherwise, from the **Table name** list, select the table that you want to use. In the **Row id** property, enter the ID for the record that you want. @@ -201,31 +212,31 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ ![Screenshot showing Consumption workflow designer and the "Get row" action with the example "Table name" property value and empty row ID.](./media/connectors-create-api-sqlazure/specify-table-row-id-consumption.png) - This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions, for example, those that create a file that includes the fields from the returned row, and store that file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). + This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions. For example, such actions might create a file, include the fields from the returned row, and store the file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). -1. When you're done, on the designer toolbar, select **Save**. +1. When you're done, save your workflow. ### [Standard](#tab/standard) -1. In the Azure portal, open your logic app workflow in the designer. +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. -1. Find and select the SQL Server connector action that you want to use. +1. Find and select the SQL Server action that you want to use. - 1. Under the trigger or action where you want to add the SQL Server action, select **New step**. + 1. Under the trigger or action where you want to add the SQL Server action, select the plus sign (**+**), and then select **Add an action**. - Or, to add an action between existing steps, move your mouse over the connecting arrow. Select the plus sign (**+**) that appears, and then select **Add an action**. + Or, to add an action between existing steps, select the plus sign (**+**) on the connecting arrow, and then select **Add an action**. - 1. In the **Choose an operation** box, under the designer search box, select either of the following options: + 1. Under the **Choose an operation** search box, select either of the following options: - * **Built-in** when you want to use built-in SQL Server actions such as **Execute Query** + * **Built-in** when you want to use SQL Server built-in actions such as **Execute Query** ![Screenshot showing the Azure portal, workflow designer for Standard logic app, and designer search box with "Built-in" selected underneath.](./media/connectors-create-api-sqlazure/select-built-in-category-standard.png) - * **Azure** when you want to use [managed SQL Server connector actions](/connectors/sql) such as **Get row** + * **Azure** when you want to use [SQL Server managed connector actions](/connectors/sql) such as **Get row** ![Screenshot showing the Azure portal, workflow designer for Standard logic app, and designer search box with "Azure" selected underneath.](./media/connectors-create-api-sqlazure/select-azure-category-standard.png) - 1. In the designer search box, enter **sql server**. + 1. In the search box, enter **sql server**. 1. From the actions list, select the SQL Server action that you want. @@ -241,7 +252,7 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ ![Screenshot showing the designer search box with "sql server" and "Azure" selected underneath with the "Get row" action selected in the "Actions" list.](./media/connectors-create-api-sqlazure/select-sql-get-row-action-standard.png) -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. +1. Provide the [information for your connection](#create-connection). When you're done, select **Create**. 1. If you haven't already provided the SQL server name and database name, provide those values. Otherwise, from the **Table name** list, select the table that you want to use. In the **Row id** property, enter the ID for the record that you want. @@ -249,9 +260,9 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ ![Screenshot showing Standard workflow designer and "Get row" action with the example "Table name" property value and empty row ID.](./media/connectors-create-api-sqlazure/specify-table-row-id-standard.png) - This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions, for example, those that create a file that includes the fields from the returned row, and store that file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). + This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions. For example, such actions might create a file, include the fields from the returned row, and store the file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). -1. When you're done, on the designer toolbar, select **Save**. +1. When you're done, save your workflow. --- @@ -261,7 +272,7 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ [!INCLUDE [Create connection general intro](../../includes/connectors-create-connection-general-intro.md)] -After you provide this information, continue with these steps: +After you provide this information, continue with the following steps based on your target database: * [Connect to cloud-based Azure SQL Database or SQL Managed Instance](#connect-azure-sql-db) * [Connect to on-premises SQL Server](#connect-sql-server) @@ -272,7 +283,7 @@ After you provide this information, continue with these steps: To access a SQL Managed Instance without using the on-premises data gateway or integration service environment, you have to [set up the public endpoint on the SQL Managed Instance](/azure/azure-sql/managed-instance/public-endpoint-configure). The public endpoint uses port 3342, so make sure that you specify this port number when you create the connection from your logic app. -The first time that you add either a [SQL Server trigger](#add-sql-trigger) or [SQL Server action](#add-sql-action), and you haven't previously created a connection to your database, you're prompted to complete these steps: +In the connection information box, complete the following steps: 1. For **Connection name**, provide a name to use for your connection. @@ -280,12 +291,12 @@ The first time that you add either a [SQL Server trigger](#add-sql-trigger) or [ | Authentication | Description | |----------------|-------------| - | **Service principal (Azure AD application)** | - Available only for the managed SQL Server connector.

                  - Requires an Azure AD application and service principal. For more information, see [Create an Azure AD application and service principal that can access resources using the Azure portal](../active-directory/develop/howto-create-service-principal-portal.md). | - | **Logic Apps Managed Identity** | - Available only for the managed SQL Server connector and ISE SQL Server connector.

                  - Requires the following items:

                  --- A valid managed identity that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database.

                  --- **SQL DB Contributor** role access to the SQL Server resource

                  --- **Contributor** access to the resource group that includes the SQL Server resource.

                  For more information, see [SQL - Server-Level Roles](/sql/relational-databases/security/authentication-access/server-level-roles). | - | [**Azure AD Integrated**](/azure/azure-sql/database/authentication-aad-overview) | - Available only for the managed SQL Server connector and ISE SQL Server connector.

                  - Requires a valid managed identity in Azure Active Directory (Azure AD) that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database. For more information, see these topics:

                  - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
                  - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization)
                  - [Azure SQL - Azure AD Integrated authentication](/azure/azure-sql/database/authentication-aad-overview) | - | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Available only for the managed SQL Server connector and ISE SQL Server connector.

                  - Requires the following items:

                  --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

                  --- A valid user name and strong password that are created and stored in your SQL Server database. For more information, see the following topics:

                  - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
                  - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization) | + | **Service principal (Azure AD application)** | - Supported with the SQL Server managed connector.

                  - Requires an Azure AD application and service principal. For more information, see [Create an Azure AD application and service principal that can access resources using the Azure portal](../active-directory/develop/howto-create-service-principal-portal.md). | + | **Logic Apps Managed Identity** | - Supported with the SQL Server managed connector and ISE-versioned connector.

                  - Requires the following items:

                  --- A valid managed identity that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database.

                  --- **SQL DB Contributor** role access to the SQL Server resource

                  --- **Contributor** access to the resource group that includes the SQL Server resource.

                  For more information, see [SQL - Server-Level Roles](/sql/relational-databases/security/authentication-access/server-level-roles). | + | [**Azure AD Integrated**](/azure/azure-sql/database/authentication-aad-overview) | - Supported with the SQL Server managed connector and ISE-versioned connector.

                  - Requires a valid managed identity in Azure Active Directory (Azure AD) that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database. For more information, see these topics:

                  - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
                  - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization)
                  - [Azure SQL - Azure AD Integrated authentication](/azure/azure-sql/database/authentication-aad-overview) | + | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Supported with the SQL Server managed connector and ISE-versioned connector.

                  - Requires the following items:

                  --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

                  --- A valid user name and strong password that are created and stored in your SQL Server database. For more information, see the following topics:

                  - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
                  - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization) | - This connection and authentication information box looks similar to the following example, which selects **Azure AD Integrated**: + The following examples show how the connection information box might appear if you select **Azure AD Integrated** authentication. * Consumption logic app workflows @@ -332,7 +343,7 @@ The first time that you add either a [SQL Server trigger](#add-sql-trigger) or [ ### Connect to on-premises SQL Server -The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL action](#add-sql-action), and you haven't previously created a connection to your database, you're prompted to complete these steps: +In the connection information box, complete the following steps: 1. For connections to your on-premises SQL server that require the on-premises data gateway, make sure that you've [completed these prerequisites](#multi-tenant-or-ise). @@ -342,8 +353,8 @@ The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL act | Authentication | Description | |----------------|-------------| - | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Available only for the managed SQL Server connector and ISE SQL Server connector.

                  - Requires the following items:

                  --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

                  --- A valid user name and strong password that are created and stored in your SQL Server.

                  For more information, see [SQL Server Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication). | - | [**Windows Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication) | - Available only for the managed SQL Server connector.

                  - Requires the following items:

                  --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

                  --- A valid Windows user name and password to confirm your identity through your Windows account.

                  For more information, see [Windows Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication). | + | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Supported with the SQL Server managed connector and ISE-versioned connector.

                  - Requires the following items:

                  --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

                  --- A valid user name and strong password that are created and stored in your SQL Server.

                  For more information, see [SQL Server Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication). | + | [**Windows Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication) | - Supported with the SQL Server managed connector.

                  - Requires the following items:

                  --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

                  --- A valid Windows user name and password to confirm your identity through your Windows account.

                  For more information, see [Windows Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication). | ||| 1. Select or provide the following values for your SQL database: @@ -366,7 +377,7 @@ The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL act > * `User ID={your-user-name}` > * `Password={your-password}` - This connection and authentication information box looks similar to the following example, which selects **Windows Authentication**: + The following examples show how the connection information box might appear if you select **Windows** authentication. * Consumption logic app workflows @@ -384,18 +395,19 @@ The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL act ## Handle bulk data -Sometimes, you have to work with result sets so large that the connector doesn't return all the results at the same time, or you want better control over the size and structure for your result sets. Here's some ways that you can handle such large result sets: +Sometimes, you work with result sets so large that the connector doesn't return all the results at the same time. Or, you want better control over the size and structure for your result sets. The following list includes some ways that you can handle such large result sets: * To help you manage results as smaller sets, turn on *pagination*. For more information, see [Get bulk data, records, and items by using pagination](../logic-apps/logic-apps-exceed-default-page-size-with-pagination.md). For more information, see [SQL Pagination for bulk data transfer with Logic Apps](https://social.technet.microsoft.com/wiki/contents/articles/40060.sql-pagination-for-bulk-data-transfer-with-logic-apps.aspx). -* Create a [*stored procedure*](/sql/relational-databases/stored-procedures/stored-procedures-database-engine) that organizes the results the way that you want. The SQL connector provides many backend features that you can access by using Azure Logic Apps so that you can more easily automate business tasks that work with SQL database tables. +* Create a [*stored procedure*](/sql/relational-databases/stored-procedures/stored-procedures-database-engine) that organizes the results the way that you want. The SQL Server connector provides many backend features that you can access by using Azure Logic Apps so that you can more easily automate business tasks that work with SQL database tables. When a SQL action gets or inserts multiple rows, your logic app workflow can iterate through these rows by using an [*until loop*](../logic-apps/logic-apps-control-flow-loops.md#until-loop) within these [limits](../logic-apps/logic-apps-limits-and-config.md). However, when your logic app has to work with record sets so large, for example, thousands or millions of rows, that you want to minimize the costs resulting from calls to the database. To organize the results in the way that you want, you can create a stored procedure that runs in your SQL instance and uses the **SELECT - ORDER BY** statement. This solution gives you more control over the size and structure of your results. Your logic app calls the stored procedure by using the SQL Server connector's **Execute stored procedure** action. For more information, see [SELECT - ORDER BY Clause](/sql/t-sql/queries/select-order-by-clause-transact-sql). > [!NOTE] - > The SQL connector has a stored procedure timeout limit that's [less than 2-minutes](/connectors/sql/#known-issues-and-limitations). + > + > The SQL Server connector has a stored procedure timeout limit that's [less than 2 minutes](/connectors/sql/#known-issues-and-limitations). > Some stored procedures might take longer than this limit to complete, causing a `504 Timeout` error. You can work around this problem > by using a SQL completion trigger, native SQL pass-through query, a state table, and server-side jobs. > @@ -404,7 +416,7 @@ Sometimes, you have to work with result sets so large that the connector doesn't > [SQL Server on premises](/sql/sql-server/sql-server-technical-documentation) > and [SQL Managed Instance](/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview), > you can use the [SQL Server Agent](/sql/ssms/agent/sql-server-agent). To learn more, see - > [Handle long-running stored procedure timeouts in the SQL connector for Azure Logic Apps](../logic-apps/handle-long-running-stored-procedures-sql-connector.md). + > [Handle long-running stored procedure timeouts in the SQL Server connector for Azure Logic Apps](../logic-apps/handle-long-running-stored-procedures-sql-connector.md). ### Handle dynamic bulk data @@ -414,7 +426,7 @@ When you call a stored procedure by using the SQL Server connector, the returned 1. View the output format by performing a test run. Copy and save your sample output. -1. In the designer, under the action where you call the stored procedure, select **New step**. +1. In the designer, under the action where you call the stored procedure, add a new action. 1. In the **Choose an operation** box, find and select the action named [**Parse JSON**](../logic-apps/logic-apps-perform-data-operations.md#parse-json-action). @@ -423,20 +435,54 @@ When you call a stored procedure by using the SQL Server connector, the returned 1. In the **Enter or paste a sample JSON payload** box, paste your sample output, and select **Done**. > [!NOTE] - > If you get an error that Logic Apps can't generate a schema, check that your sample output's syntax is correctly formatted. - > If you still can't generate the schema, in the **Schema** box, manually enter the schema. + > + > If you get an error that Azure Logic Apps can't generate a schema, + > check that your sample output's syntax is correctly formatted. + > If you still can't generate the schema, in the **Schema** box, + > manually enter the schema. -1. On the designer toolbar, select **Save**. +1. When you're done, save your workflow. 1. To reference the JSON content properties, click inside the edit boxes where you want to reference those properties so that the dynamic content list appears. In the list, under the [**Parse JSON**](../logic-apps/logic-apps-perform-data-operations.md#parse-json-action) heading, select the data tokens for the JSON content properties that you want. + + +## Built-in connector operations + + +### Actions + +The SQL Server built-in connector has a single action. + +#### Execute Query + +Operation ID: `executeQuery` + +Runs a query against a SQL database. + +##### Parameters + +| Name | Key | Required | Type | Description | +|------|-----|----------|------|-------------| +| **Query** | `query` | True | Dynamic | The body for your query | +| **Query Parameters** | `queryParameters` | False | Objects | The parameters for your query | +|||||| + +##### Returns + +The outputs from this operation are dynamic. + +## Built-in connector app settings + +The SQL Server built-in connector includes app settings on your Standard logic app resource that control various thresholds for performance, throughput, capacity, and so on. For example, you can change the default timeout value for connector operations. For more information, review [Reference for app settings - local.settings.json](../logic-apps/edit-app-settings-host-settings.md#reference-local-settings-json). + ## Troubleshoot problems ### Connection problems -Connection problems can commonly happen, so to troubleshoot and resolve these kinds of issues, review [Solving connectivity errors to SQL Server](https://support.microsoft.com/help/4009936/solving-connectivity-errors-to-sql-server). Here are some examples: +Connection problems can commonly happen, so to troubleshoot and resolve these kinds of issues, review [Solving connectivity errors to SQL Server](https://support.microsoft.com/help/4009936/solving-connectivity-errors-to-sql-server). The following list provides some examples: * **A network-related or instance-specific error occurred while establishing a connection to SQL Server. The server was not found or was not accessible. Verify that the instance name is correct and that SQL Server is configured to allow remote connections.** @@ -446,4 +492,4 @@ Connection problems can commonly happen, so to troubleshoot and resolve these ki ## Next steps -* Learn about other [connectors for Azure Logic Apps](../connectors/apis-list.md) +* Learn about other [managed connectors for Azure Logic Apps](../connectors/apis-list.md) diff --git a/articles/connectors/connectors-native-http.md b/articles/connectors/connectors-native-http.md index 441739afafdf4..a65dfbda347a5 100644 --- a/articles/connectors/connectors-native-http.md +++ b/articles/connectors/connectors-native-http.md @@ -5,7 +5,7 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 09/13/2021 +ms.date: 05/31/2022 tags: connectors --- @@ -97,7 +97,7 @@ This built-in action makes an HTTP call to the specified URL for an endpoint and ## Trigger and action outputs -Here is more information about the outputs from an HTTP trigger or action, which returns this information: +Here's more information about the outputs from an HTTP trigger or action, which returns this information: | Property | Type | Description | |----------|------|-------------| @@ -123,7 +123,7 @@ Here is more information about the outputs from an HTTP trigger or action, which If you have a **Logic App (Standard)** resource in single-tenant Azure Logic Apps, and you want to use an HTTP operation with any of the following authentication types, make sure to complete the extra setup steps for the corresponding authentication type. Otherwise, the call fails. -* [TLS/SSL certificate](#tls-ssl-certificate-authentication): Add the app setting, `WEBSITE_LOAD_ROOT_CERTIFICATES`, and provide the thumbprint for your thumbprint for your TLS/SSL certificate. +* [TLS/SSL certificate](#tls-ssl-certificate-authentication): Add the app setting, `WEBSITE_LOAD_ROOT_CERTIFICATES`, and set the value to the thumbprint for your TLS/SSL certificate. * [Client certificate or Azure Active Directory Open Authentication (Azure AD OAuth) with the "Certificate" credential type](#client-certificate-authentication): Add the app setting, `WEBSITE_LOAD_USER_PROFILE`, and set the value to `1`. @@ -215,7 +215,7 @@ For example, suppose you have a logic app that sends an HTTP POST request for an ![Multipart form data](./media/connectors-native-http/http-action-multipart.png) -Here is the same example that shows the HTTP action's JSON definition in the underlying workflow definition: +Here's the same example that shows the HTTP action's JSON definition in the underlying workflow definition: ```json "HTTP_action": { @@ -308,7 +308,7 @@ HTTP requests have a [timeout limit](../logic-apps/logic-apps-limits-and-config. To specify the number of seconds between retry attempts, you can add the `Retry-After` header to the HTTP action response. For example, if the target endpoint returns the `429 - Too many requests` status code, you can specify a longer interval between retries. The `Retry-After` header also works with the `202 - Accepted` status code. -Here is the same example that shows the HTTP action response that contains `Retry-After`: +Here's the same example that shows the HTTP action response that contains `Retry-After`: ```json { @@ -319,6 +319,9 @@ Here is the same example that shows the HTTP action response that contains `Retr } ``` +## Pagination support + +Sometimes, the target service responds by returning the results one page at a time. If the response specifies the next page with the **nextLink** or **@odata.nextLink** property, you can turn on the **Pagination** setting on the HTTP action. This setting causes the HTTP action to automatically follow these links and get the next page. However, if the response specifies the next page with any other tag, you might have to add a loop to your workflow. Make this loop follow that tag and manually get each page until the tag is null. ## Disable checking location headers diff --git a/articles/connectors/connectors-native-recurrence.md b/articles/connectors/connectors-native-recurrence.md index 55534914a6dec..cf39be847b793 100644 --- a/articles/connectors/connectors-native-recurrence.md +++ b/articles/connectors/connectors-native-recurrence.md @@ -1,111 +1,163 @@ --- -title: Schedule recurring tasks and workflows -description: Schedule and run recurring automated tasks and workflows with the Recurrence trigger in Azure Logic Apps. +title: Schedule and run recurring workflows +description: Schedule and run recurring workflows with the generic Recurrence trigger in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 01/24/2022 +ms.date: 06/01/2022 --- -# Create, schedule, and run recurring tasks and workflows with the Recurrence trigger in Azure Logic Apps +# Schedule and run recurring workflows with the Recurrence trigger in Azure Logic Apps -To regularly run tasks, processes, or jobs on specific schedule, you can start your logic app workflow with the built-in **Recurrence** trigger, which runs natively in Azure Logic Apps. You can set a date and time as well as a time zone for starting the workflow and a recurrence for repeating that workflow. If the trigger misses recurrences for any reason, for example, due to disruptions or disabled workflows, this trigger doesn't process the missed recurrences but restarts recurrences at the next scheduled interval. For more information about the built-in Schedule triggers and actions, see [Schedule and run recurring automated, tasks, and workflows with Azure Logic Apps](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md). +To start and run your workflow on a schedule, you can use the generic Recurrence trigger as the first step. You can set a date, time, and time zone for starting the workflow and a recurrence for repeating that workflow. The following list includes some patterns that this trigger supports along with more advanced recurrences and complex schedules: -Here are some patterns that this trigger supports along with more advanced recurrences and complex schedules: +* Run at a specific date and time, then repeat every *n* number of seconds, minutes, hours, days, weeks, or months. * Run immediately and repeat every *n* number of seconds, minutes, hours, days, weeks, or months. -* Start at a specific date and time, then run and repeat every *n* number of seconds, minutes, hours, days, weeks, or months. +* Run immediately and repeat daily at one or more specific times, such as 8:00 AM and 5:00 PM. -* Run and repeat at one or more times each day, for example, at 8:00 AM and 5:00 PM. +* Run immediately and repeat weekly on specific days, such as Saturday and Sunday. -* Run and repeat each week, but only for specific days, such as Saturday and Sunday. +* Run immediately and repeat weekly on specific days and times, such as Monday through Friday at 8:00 AM and 5:00 PM. -* Run and repeat each week, but only for specific days and times, such as Monday through Friday at 8:00 AM and 5:00 PM. +> [!NOTE] +> +> To start and run your workflow only once in the future, use workflow template named +> **Scheduler: Run Once Jobs**. This template uses the Request trigger and HTTP action, +> rather than the Recurrence trigger, which doesn't support this recurrence pattern. +> For more information, see [Run jobs one time only](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#run-once). -For differences between this trigger and the Sliding Window trigger or for more information about scheduling recurring workflows, see [Schedule and run recurring automated tasks, processes, and workflows with Azure Logic Apps](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md). +The Recurrence trigger isn't associated with any specific service, so you can use the trigger with almost any workflow, such as [Consumption logic app workflows and Standard logic app *stateful* workflows](../logic-apps/logic-apps-overview.md#resource-environment-differences). This trigger is currently unavailable for [Standard logic app *stateless* workflows](../logic-apps/logic-apps-overview.md#resource-environment-differences). -> [!TIP] -> If you want to trigger your logic app and run only one time in the future, see -> [Run jobs one time only](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#run-once). +The Recurrence trigger is part of the built-in Schedule connector and runs natively on the Azure Logic Apps runtime. For more information about the built-in Schedule triggers and actions, see [Schedule and run recurring automated, tasks, and workflows with Azure Logic Apps](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md). ## Prerequisites * An Azure account and subscription. If you don't have a subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -* Basic knowledge about [logic apps](../logic-apps/logic-apps-overview.md). If you're new to logic apps, learn [how to create your first logic app](../logic-apps/quickstart-create-first-logic-app-workflow.md). +* Basic knowledge about [logic app workflows](../logic-apps/logic-apps-overview.md). If you're new to logic apps, learn [how to create your first logic app workflow](../logic-apps/quickstart-create-first-logic-app-workflow.md). + + ## Add the Recurrence trigger -1. Sign in to the [Azure portal](https://portal.azure.com). Create a blank logic app. +1. In the [Azure portal](https://portal.azure.com), create a blank logic app and workflow. + + > [!NOTE] + > + > If you created a Standard logic app workflow, make sure to create a *stateful* workflow. + > The Recurrence trigger is currently unavailable for stateless workflows. + +1. In the designer, follow the corresponding steps, based on whether your logic app workflow is [Consumption or Standard](../logic-apps/logic-apps-overview.md#resource-environment-differences). + + **Consumption** + + 1. On the designer, under the search box, select **Built-in**. + 1. In the search box, enter **recurrence**. + 1. From the triggers list, select the trigger named **Recurrence**. -1. After Logic App Designer appears, in the search box, enter `recurrence` as your filter. From the triggers list, select this trigger as the first step in your logic app workflow: **Recurrence** + ![Screenshot for Consumption logic app workflow designer with "Recurrence" trigger selected.](./media/connectors-native-recurrence/add-recurrence-trigger-consumption.png) - ![Select "Recurrence" trigger](./media/connectors-native-recurrence/add-recurrence-trigger.png) + **Standard** -1. Set the interval and frequency for the recurrence. In this example, set these properties to run your workflow every week. + 1. On the designer, select **Choose operation**. + 1. On the **Add a trigger** pane, under the search box, select **Built-in**. + 1. In the search box, enter **recurrence**. + 1. From the triggers list, select the trigger named **Recurrence**. - ![Set interval and frequency](./media/connectors-native-recurrence/recurrence-trigger-details.png) + ![Screenshot for Standard logic app workflow designer with "Recurrence" trigger selected.](./media/connectors-native-recurrence/add-recurrence-trigger-standard.png) + +1. Set the interval and frequency for the recurrence. In this example, set these properties to run your workflow every week, for example: + + **Consumption** + + ![Screenshot for Consumption workflow designer with "Recurrence" trigger interval and frequency.](./media/connectors-native-recurrence/recurrence-trigger-details-consumption.png) + + **Standard** + + ![Screenshot for Standard workflow designer with "Recurrence" trigger interval and frequency.](./media/connectors-native-recurrence/recurrence-trigger-details-standard.png) | Property | JSON name | Required | Type | Description | |----------|-----------|----------|------|-------------| - | **Interval** | `interval` | Yes | Integer | A positive integer that describes how often the workflow runs based on the frequency. Here are the minimum and maximum intervals:

                  - Month: 1-16 months
                  - Week: 1-71 weeks
                  - Day: 1-500 days
                  - Hour: 1-12,000 hours
                  - Minute: 1-72,000 minutes
                  - Second: 1-9,999,999 seconds

                  For example, if the interval is 6, and the frequency is "Month", then the recurrence is every 6 months. | + | **Interval** | `interval` | Yes | Integer | A positive integer that describes how often the workflow runs based on the frequency. Here are the minimum and maximum intervals:

                  - Month: 1-16 months
                  - Week: 1-71 weeks
                  - Day: 1-500 days
                  - Hour: 1-12,000 hours
                  - Minute: 1-72,000 minutes
                  - Second: 1-9,999,999 seconds

                  For example, if the interval is 6, and the frequency is "Month", then the recurrence is every 6 months. | | **Frequency** | `frequency` | Yes | String | The unit of time for the recurrence: **Second**, **Minute**, **Hour**, **Day**, **Week**, or **Month** | |||||| > [!IMPORTANT] - > If you use the **Day** or **Week** frequency and specify a future date and time, make sure that you set up the recurrence in advance: + > If you use the **Day**, **Week**, or **Month** frequency, and you specify a future date and time, + > make sure that you set up the recurrence in advance. Otherwise, the workflow might skip the first recurrence. > > * **Day**: Set up the daily recurrence at least 24 hours in advance. > > * **Week**: Set up the weekly recurrence at least 7 days in advance. > - > Otherwise, the workflow might skip the first recurrence. + > * **Month**: Set up the monthly recurrence at least one month in advance. > - > If a recurrence doesn't specify a specific [start date and time](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time), the first recurrence runs immediately - > when you save or deploy the logic app, despite your trigger's recurrence setup. To avoid this behavior, provide a start - > date and time for when you want the first recurrence to run. + > If a recurrence doesn't specify a specific [start date and time](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time), + > the first recurrence runs immediately when you save or deploy the logic app, despite your trigger's recurrence setup. To avoid this behavior, + > provide a start date and time for when you want the first recurrence to run. > > If a recurrence doesn't specify any other advanced scheduling options such as specific times to run future recurrences, > those recurrences are based on the last run time. As a result, the start times for those recurrences might drift due to > factors such as latency during storage calls. To make sure that your logic app doesn't miss a recurrence, especially when - > the frequency is in days or longer, try these options: + > the frequency is in days or longer, try the following options: > - > * Provide a start date and time for the recurrence plus the specific times when to run subsequent recurrences by using the properties - > named **At these hours** and **At these minutes**, which are available only for the **Day** and **Week** frequencies. + > * Provide a start date and time for the recurrence and the specific times to run subsequent recurrences. You can use the + > properties named **At these hours** and **At these minutes**, which are available only for the **Day** and **Week** frequencies. > - > * Use the [Sliding Window trigger](../connectors/connectors-native-sliding-window.md), rather than the Recurrence trigger. + > * For Consumption logic app workflows, use the [Sliding Window trigger](../connectors/connectors-native-sliding-window.md), + > rather than the Recurrence trigger. 1. To set advanced scheduling options, open the **Add new parameter** list. Any options that you select appear on the trigger after selection. - ![Advanced scheduling options](./media/connectors-native-recurrence/recurrence-trigger-more-options-details.png) + **Consumption** + + ![Screenshot for Consumption workflow designer and "Recurrence" trigger with advanced scheduling options.](./media/connectors-native-recurrence/recurrence-trigger-advanced-consumption.png) + + **Standard** + + ![Screenshot for Standard workflow designer and "Recurrence" trigger with advanced scheduling options.](./media/connectors-native-recurrence/recurrence-trigger-advanced-standard.png) | Property | JSON name | Required | Type | Description | |----------|-----------|----------|------|-------------| | **Time zone** | `timeZone` | No | String | Applies only when you specify a start time because this trigger doesn't accept [UTC offset](https://en.wikipedia.org/wiki/UTC_offset). Select the time zone that you want to apply. | - | **Start time** | `startTime` | No | String | Provide a start date and time, which has a maximum of 49 years in the future and must follow the [ISO 8601 date time specification](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations) in [UTC date time format](https://en.wikipedia.org/wiki/Coordinated_Universal_Time), but without a [UTC offset](https://en.wikipedia.org/wiki/UTC_offset):

                  YYYY-MM-DDThh:mm:ss if you select a time zone

                  -or-

                  YYYY-MM-DDThh:mm:ssZ if you don't select a time zone

                  So for example, if you want September 18, 2020 at 2:00 PM, then specify "2020-09-18T14:00:00" and select a time zone such as Pacific Standard Time. Or, specify "2020-09-18T14:00:00Z" without a time zone.

                  **Important:** If you don't select a time zone, you must add the letter "Z" at the end without any spaces. This "Z" refers to the equivalent [nautical time](https://en.wikipedia.org/wiki/Nautical_time). If you select a time zone value, you don't need to add a "Z" to the end of your **Start time** value. If you do, Logic Apps ignores the time zone value because the "Z" signifies a UTC time format.

                  For simple schedules, the start time is the first occurrence, while for complex schedules, the trigger doesn't fire any sooner than the start time. [*What are the ways that I can use the start date and time?*](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time) | + | **Start time** | `startTime` | No | String | Provide a start date and time, which has a maximum of 49 years in the future and must follow the [ISO 8601 date time specification](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations) in [UTC date time format](https://en.wikipedia.org/wiki/Coordinated_Universal_Time), but without a [UTC offset](https://en.wikipedia.org/wiki/UTC_offset):

                  YYYY-MM-DDThh:mm:ss if you select a time zone

                  -or-

                  YYYY-MM-DDThh:mm:ssZ if you don't select a time zone

                  So for example, if you want September 18, 2020 at 2:00 PM, then specify "2020-09-18T14:00:00" and select a time zone such as Pacific Standard Time. Or, specify "2020-09-18T14:00:00Z" without a time zone.

                  **Important:** If you don't select a time zone, you must add the letter "Z" at the end without any spaces. This "Z" refers to the equivalent [nautical time](https://en.wikipedia.org/wiki/Nautical_time). If you select a time zone value, you don't need to add a "Z" to the end of your **Start time** value. If you do, Logic Apps ignores the time zone value because the "Z" signifies a UTC time format.

                  For simple schedules, the start time is the first occurrence, while for complex schedules, the trigger doesn't fire any sooner than the start time. [*What are the ways that I can use the start date and time?*](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time) | | **On these days** | `weekDays` | No | String or string array | If you select "Week", you can select one or more days when you want to run the workflow: **Monday**, **Tuesday**, **Wednesday**, **Thursday**, **Friday**, **Saturday**, and **Sunday** | - | **At these hours** | `hours` | No | Integer or integer array | If you select "Day" or "Week", you can select one or more integers from 0 to 23 as the hours of the day for when you want to run the workflow.

                  For example, if you specify "10", "12" and "14", you get 10 AM, 12 PM, and 2 PM for the hours of the day, but the minutes of the day are calculated based on when the recurrence starts. To set specific minutes of the day, for example, 10:00 AM, 12:00 PM, and 2:00 PM, specify those values by using the property named **At these minutes**. | - | **At these minutes** | `minutes` | No | Integer or integer array | If you select "Day" or "Week", you can select one or more integers from 0 to 59 as the minutes of the hour when you want to run the workflow.

                  For example, you can specify "30" as the minute mark and using the previous example for hours of the day, you get 10:30 AM, 12:30 PM, and 2:30 PM.

                  **Note**: Sometimes, the timestamp for the triggered run might vary up to 1 minute from the scheduled time. If you need to pass the timestamp exactly as scheduled to subsequent actions, you can use template expressions to change the timestamp accordingly. For more information, see [Date and time functions for expressions](../logic-apps/workflow-definition-language-functions-reference.md#date-time-functions). | + | **At these hours** | `hours` | No | Integer or integer array | If you select "Day" or "Week", you can select one or more integers from 0 to 23 as the hours of the day for when you want to run the workflow.

                  For example, if you specify "10", "12" and "14", you get 10 AM, 12 PM, and 2 PM for the hours of the day, but the minutes of the day are calculated based on when the recurrence starts. To set specific minutes of the day, for example, 10:00 AM, 12:00 PM, and 2:00 PM, specify those values by using the property named **At these minutes**. | + | **At these minutes** | `minutes` | No | Integer or integer array | If you select "Day" or "Week", you can select one or more integers from 0 to 59 as the minutes of the hour when you want to run the workflow.

                  For example, you can specify "30" as the minute mark and using the previous example for hours of the day, you get 10:30 AM, 12:30 PM, and 2:30 PM.

                  **Note**: Sometimes, the timestamp for the triggered run might vary up to 1 minute from the scheduled time. If you need to pass the timestamp exactly as scheduled to subsequent actions, you can use template expressions to change the timestamp accordingly. For more information, see [Date and time functions for expressions](../logic-apps/workflow-definition-language-functions-reference.md#date-time-functions). | ||||| - For example, suppose that today is Friday, September 4, 2020. The following Recurrence trigger doesn't fire *any sooner* than the start date and time, which is Friday, September 18, 2020 at 8:00 AM PST. However, the recurrence schedule is set for 10:30 AM, 12:30 PM, and 2:30 PM on Mondays only. So the first time that the trigger fires and creates a logic app workflow instance is on Monday at 10:30 AM. To learn more about how start times work, see these [start time examples](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time). + For example, suppose that today is Friday, September 4, 2020. The following Recurrence trigger doesn't fire *any sooner* than the specified start date and time, which is Friday, September 18, 2020 at 8:00 AM Pacific Time. However, the recurrence schedule is set for 10:30 AM, 12:30 PM, and 2:30 PM on Mondays only. The first time that the trigger fires and creates a workflow instance is on Monday at 10:30 AM. To learn more about how start times work, see these [start time examples](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time). Future runs happen at 12:30 PM and 2:30 PM on the same day. Each recurrence creates their own workflow instance. After that, the entire schedule repeats all over again next Monday. [*What are some other example occurrences?*](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#example-recurrences) - ![Advanced scheduling example](./media/connectors-native-recurrence/recurrence-trigger-advanced-schedule-options.png) - > [!NOTE] + > > The trigger shows a preview for your specified recurrence only when you select "Day" or "Week" as the frequency. -1. Now build your remaining workflow with other actions. For more actions that you can add, see [Connectors for Azure Logic Apps](../connectors/apis-list.md). + **Consumption** + + ![Screenshot showing Consumption workflow and "Recurrence" trigger with advanced scheduling example.](./media/connectors-native-recurrence/recurrence-trigger-advanced-example-consumption.png) + + **Standard** + + ![Screenshot showing Standard workflow and "Recurrence" trigger with advanced scheduling example.](./media/connectors-native-recurrence/recurrence-trigger-advanced-example-standard.png) + +1. Now continue building your workflow with other actions. For more actions that you can add, see [Connectors for Azure Logic Apps](../connectors/apis-list.md). ## Workflow definition - Recurrence -In your logic app's underlying workflow definition, which uses JSON, you can view the [Recurrence trigger definition](../logic-apps/logic-apps-workflow-actions-triggers.md#recurrence-trigger) with the options that you chose. To view this definition, on the designer toolbar, choose **Code view**. To return to the designer, choose on the designer toolbar, **Designer**. +You can view how the [Recurrence trigger definition](../logic-apps/logic-apps-workflow-actions-triggers.md#recurrence-trigger) appears with your chosen options by reviewing the underlying JSON definition for your workflow in Consumption logic apps and Standard logic apps (stateful only). + +Based on whether your logic app is Consumption or Standard, choose one of the following options: -This example shows how a Recurrence trigger definition might look in an underlying workflow definition: +* **Consumption**: On the designer toolbar, select **Code view**. To return to the designer, on the code view editor toolbar, select **Designer**. + +* **Standard**: On the workflow menu, select **Code view**. To return to the designer, on the workflow menu, select **Designer**. + +The following example shows how a Recurrence trigger definition might appear in the workflow's underlying JSON definition: ``` json "triggers": { @@ -159,7 +211,6 @@ To schedule jobs, Azure Logic Apps puts the message for processing into the queu Otherwise, if you don't select a time zone, daylight saving time (DST) events might affect when triggers run. For example, the start time shifts one hour forward when DST starts and one hour backward when DST ends. However, some time windows might cause problems when the time shifts. For more information and examples, see [Recurrence for daylight saving time and standard time](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#daylight-saving-standard-time). - ## Next steps * [Pause workflows with delay actions](../connectors/connectors-native-delay.md) diff --git a/articles/connectors/managed.md b/articles/connectors/managed.md index bf05ff259b5da..f2e45fa3bf55f 100644 --- a/articles/connectors/managed.md +++ b/articles/connectors/managed.md @@ -584,7 +584,7 @@ For more information, see these topics: [youtube-icon]: ./media/apis-list/youtube.png -[apache-impala-doc]: /connectors/azureimpala/ "Connect to your Impala database to read data from tables" +[apache-impala-doc]: /connectors/impala/ "Connect to your Impala database to read data from tables" [azure-automation-doc]: /connectors/azureautomation/ "Create and manage automation jobs for your cloud and on-premises infrastructure" [azure-blob-storage-doc]: ./connectors-create-api-azureblobstorage.md "Manage files in your blob container with Azure blob storage connector" [azure-cosmos-db-doc]: ./connectors-create-api-cosmos-db.md "Connect to Azure Cosmos DB so that you can access and manage Azure Cosmos DB documents" @@ -646,4 +646,4 @@ For more information, see these topics: [x12-encode-doc]: ../logic-apps/logic-apps-enterprise-integration-X12-encode.md "Encode messages that use the X12 protocol" -[gateway-doc]: ../logic-apps/logic-apps-gateway-connection.md "Connect to data sources on-premises from logic apps with on-premises data gateway" \ No newline at end of file +[gateway-doc]: ../logic-apps/logic-apps-gateway-connection.md "Connect to data sources on-premises from logic apps with on-premises data gateway" diff --git a/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger-consumption.png b/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger-consumption.png new file mode 100644 index 0000000000000..3113605de3071 Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger-consumption.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger-standard.png b/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger-standard.png new file mode 100644 index 0000000000000..d027131015f38 Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger-standard.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger.png b/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger.png deleted file mode 100644 index d83e8bd6ac437..0000000000000 Binary files a/articles/connectors/media/connectors-native-recurrence/add-recurrence-trigger.png and /dev/null differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-consumption.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-consumption.png new file mode 100644 index 0000000000000..6006597ac6f58 Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-consumption.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-schedule-options.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-example-consumption.png similarity index 100% rename from articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-schedule-options.png rename to articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-example-consumption.png diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-example-standard.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-example-standard.png new file mode 100644 index 0000000000000..90e9bef91ca97 Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-example-standard.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-standard.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-standard.png new file mode 100644 index 0000000000000..a8435ef6b1b27 Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-advanced-standard.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details-consumption.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details-consumption.png new file mode 100644 index 0000000000000..d99d097dd2f99 Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details-consumption.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details-standard.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details-standard.png new file mode 100644 index 0000000000000..bc8aefcb5bd8e Binary files /dev/null and b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details-standard.png differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details.png deleted file mode 100644 index 5edb02afdc740..0000000000000 Binary files a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-details.png and /dev/null differ diff --git a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-more-options-details.png b/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-more-options-details.png deleted file mode 100644 index 938d59989eb4b..0000000000000 Binary files a/articles/connectors/media/connectors-native-recurrence/recurrence-trigger-more-options-details.png and /dev/null differ diff --git a/articles/container-apps/TOC.yml b/articles/container-apps/TOC.yml index be5c4ce2da9b2..eec335a562de4 100644 --- a/articles/container-apps/TOC.yml +++ b/articles/container-apps/TOC.yml @@ -114,10 +114,12 @@ href: background-processing.md - name: Reference items: - - name: ARM API specification + - name: ARM and YAML specifications href: azure-resource-manager-api-spec.md - name: Azure CLI reference href: /cli/azure/containerapp?view=azure-cli-latest&preserve-view=true + - name: REST API reference + href: /rest/api/containerapps/ - name: Resources items: - name: Samples diff --git a/articles/container-apps/azure-resource-manager-api-spec.md b/articles/container-apps/azure-resource-manager-api-spec.md index 59b61a78bb461..b0757e82a443c 100644 --- a/articles/container-apps/azure-resource-manager-api-spec.md +++ b/articles/container-apps/azure-resource-manager-api-spec.md @@ -5,7 +5,7 @@ services: container-apps author: craigshoemaker ms.service: container-apps ms.topic: reference -ms.date: 05/13/2022 +ms.date: 05/26/2022 ms.author: cshoe ms.custom: ignite-fall-2021, event-tier1-build-2022 --- @@ -230,7 +230,7 @@ The following example ARM template deploys a container app. "name": "[parameters('containerappName')]", "location": "[parameters('location')]", "identity": { - "type": "None" + "type": "None" }, "properties": { "managedEnvironmentId": "[resourceId('Microsoft.App/managedEnvironments', parameters('environment_name'))]", @@ -290,43 +290,44 @@ The following example ARM template deploys a container app. "cpu": 0.5, "memory": "1Gi" }, - "probes":[ + "probes": [ { - "type":"liveness", - "httpGet":{ - "path":"/health", - "port":8080, - "httpHeaders":[ - { - "name":"Custom-Header", - "value":"liveness probe" - }] - }, - "initialDelaySeconds":7, - "periodSeconds":3 + "type": "liveness", + "httpGet": { + "path": "/health", + "port": 8080, + "httpHeaders": [ + { + "name": "Custom-Header", + "value": "liveness probe" + } + ] + }, + "initialDelaySeconds": 7, + "periodSeconds": 3 }, { - "type":"readiness", - "tcpSocket": - { - "port": 8081 - }, - "initialDelaySeconds": 10, - "periodSeconds": 3 + "type": "readiness", + "tcpSocket": { + "port": 8081 + }, + "initialDelaySeconds": 10, + "periodSeconds": 3 }, { - "type": "startup", - "httpGet": { - "path": "/startup", - "port": 8080, - "httpHeaders": [ - { - "name": "Custom-Header", - "value": "startup probe" - }] - }, - "initialDelaySeconds": 3, - "periodSeconds": 3 + "type": "startup", + "httpGet": { + "path": "/startup", + "port": 8080, + "httpHeaders": [ + { + "name": "Custom-Header", + "value": "startup probe" + } + ] + }, + "initialDelaySeconds": 3, + "periodSeconds": 3 } ], "volumeMounts": [ @@ -421,25 +422,25 @@ properties: probes: - type: liveness httpGet: - - path: "/health" - port: 8080 - httpHeaders: - - name: "Custom-Header" - value: "liveness probe" - initialDelaySeconds: 7 - periodSeconds: 3 + path: "/health" + port: 8080 + httpHeaders: + - name: "Custom-Header" + value: "liveness probe" + initialDelaySeconds: 7 + periodSeconds: 3 - type: readiness tcpSocket: - - port: 8081 + port: 8081 initialDelaySeconds: 10 periodSeconds: 3 - type: startup httpGet: - - path: "/startup" - port: 8080 - httpHeaders: - - name: "Custom-Header" - value: "startup probe" + path: "/startup" + port: 8080 + httpHeaders: + - name: "Custom-Header" + value: "startup probe" initialDelaySeconds: 3 periodSeconds: 3 scale: diff --git a/articles/container-apps/communicate-between-microservices.md b/articles/container-apps/communicate-between-microservices.md index 746dda49975dc..89cc7a27775e4 100644 --- a/articles/container-apps/communicate-between-microservices.md +++ b/articles/container-apps/communicate-between-microservices.md @@ -10,7 +10,7 @@ ms.author: cshoe zone_pivot_groups: container-apps-image-build-type --- -# Tutorial: Communication between microservices in Azure Container Apps Preview +# Tutorial: Communication between microservices in Azure Container Apps Azure Container Apps exposes each container app through a domain name if [ingress](ingress.md) is enabled. Ingress endpoints for container apps within an external environment can be either publicly accessible or only available to other container apps in the same [environment](environment.md). @@ -124,7 +124,7 @@ Output from the `az acr build` command shows the upload progress of the source c ::: zone pivot="docker-local" -1. The following command builds a container image for the album UI and tags it with the fully qualified name of the ACR log in server. The `.` at the end of the command represents the docker build context, meaning this command should be run within the *src* folder where the Dockerfile is located. +1. The following command builds a container image for the album UI and tags it with the fully qualified name of the ACR login server. The `.` at the end of the command represents the docker build context, meaning this command should be run within the *src* folder where the Dockerfile is located. # [Bash](#tab/bash) diff --git a/articles/container-apps/compare-options.md b/articles/container-apps/compare-options.md index 46ef674cf5168..ae648de090c80 100644 --- a/articles/container-apps/compare-options.md +++ b/articles/container-apps/compare-options.md @@ -2,11 +2,11 @@ title: 'Comparing Container Apps with other Azure container options' description: Understand when to use Azure Container Apps and how it compares to other container options including Azure Container Instances, Azure App Service, Azure Functions, and Azure Kubernetes Service. services: container-apps -author: jeffhollan +author: craigshoemaker ms.service: container-apps ms.topic: quickstart ms.date: 11/03/2021 -ms.author: jehollan +ms.author: cshoe ms.custom: ignite-fall-2021, mode-other, event-tier1-build-2022 --- diff --git a/articles/container-apps/containers.md b/articles/container-apps/containers.md index 61fc1fa32523a..8e1442439c6eb 100644 --- a/articles/container-apps/containers.md +++ b/articles/container-apps/containers.md @@ -5,7 +5,7 @@ services: container-apps author: craigshoemaker ms.service: container-apps ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 06/02/2022 ms.author: cshoe ms.custom: ignite-fall-2021, event-tier1-build-2022 --- @@ -32,7 +32,8 @@ Features include: ## Configuration -Below is an example of the `containers` array in the [`properties.template`](azure-resource-manager-api-spec.md#propertiestemplate) section of a container app resource template. The excerpt shows the available configuration options when setting up a container. + +The following is an example of the `containers` array in the [`properties.template`](azure-resource-manager-api-spec.md#propertiestemplate) section of a container app resource template. The excerpt shows the available configuration options when setting up a container. ```json "containers": [ @@ -114,7 +115,6 @@ Below is an example of the `containers` array in the [`properties.template`](azu | `volumeMounts` | An array of volume mount definitions. | You can define a temporary volume or multiple permanent storage volumes for your container. For more information about storage volumes, see [Use storage mounts in Azure Container Apps](storage-mounts.md).| | `probes`| An array of health probes enabled in the container. | This feature is based on Kubernetes health probes. For more information about probes settings, see [Health probes in Azure Container Apps](health-probes.md).| - When allocating resources, the total amount of CPUs and memory requested for all the containers in a container app must add up to one of the following combinations. | vCPUs (cores) | Memory | @@ -162,7 +162,7 @@ To use a container registry, you define the required fields in `registries` arra } ``` -With the registry information setup, the saved credentials can be used to pull a container image from the private registry when your app is deployed. +With the registry information set up, the saved credentials can be used to pull a container image from the private registry when your app is deployed. The following example shows how to configure Azure Container Registry credentials in a container app. @@ -188,6 +188,70 @@ The following example shows how to configure Azure Container Registry credential } ``` +### Managed identity with Azure Container Registry + +You can use an Azure managed identity to authenticate with Azure Container Registry instead of using a username and password. To use a managed identity: + +- Assign a system-assigned or user-assigned managed identity to your container app. +- Specify the managed identity you want to use for each registry. + +When assigned a managed identity to a registry, use the managed identity resource ID for a user-assigned identity, or "system" for the system-assigned identity. For more information about using managed identities see, [Managed identities in Azure Container Apps Preview](managed-identity.md). + +```json +{ + "identity": { + "type": "SystemAssigned,UserAssigned", + "userAssignedIdentities": { + "": {} + } + } + "properties": { + "configuration": { + "registries": [ + { + "server": "myacr1.azurecr.io", + "identity": "" + }, + { + "server": "myacr2.azurecr.io", + "identity": "system" + }] + } + ... + } +} +``` + +The managed identity must have `AcrPull` access for the Azure Container Registry. For more information about assigning Azure Container Registry permissions to managed identities, see [Authenticate with managed identity](../container-registry/container-registry-authentication-managed-identity.md). + +#### Configure a user-assigned managed identity + +To configure a user-assigned managed identity: + +1. Create the user-assigned identity if it doesn't exist. +1. Give the user-assigned identity `AcrPull` permission to your private repository. +1. Add the identity to your container app configuration as shown above. + +For more information about configuring user-assigned identities, see [Add a user-assigned identity](managed-identity.md#add-a-user-assigned-identity). + + +#### Configure a system-assigned managed identity + +System-assigned identities are created at the time your container app is created, and therefore, won't have `AcrPull` access to your Azure Container Registry. As a result, the image can't be pulled from your private registry when your app is first deployed. + +To configure a system-assigned identity, you must use one of the following methods. + +- **Option 1**: Use a public registry for the initial deployment: + 1. Create your container app using a public image and a system-assigned identity. + 1. Give the new system-assigned identity `AcrPull` access to your private Azure Container Registry. + 1. Update your container app replacing the public image with the image from your private Azure Container Registry. +- **Option 2**: Restart your app after assigning permissions: + 1. Create your container app using a private image and a system-assigned identity. (The deployment will result in a failure to pull the image.) + 1. Give the new system-assigned identity `AcrPull` access to your private Azure Container Registry. + 1. Restart your container app revision. + +For more information about configuring system-assigned identities, see [Add a system-assigned identity](managed-identity.md#add-a-system-assigned-identity). + ## Limitations Azure Container Apps has the following limitations: diff --git a/articles/container-apps/custom-domains-certificates.md b/articles/container-apps/custom-domains-certificates.md index 9202ec21f6dc1..5f6c8085bd1f2 100644 --- a/articles/container-apps/custom-domains-certificates.md +++ b/articles/container-apps/custom-domains-certificates.md @@ -5,7 +5,7 @@ services: container-apps author: craigshoemaker ms.service: container-apps ms.topic: how-to -ms.date: 05/15/2022 +ms.date: 06/07/2022 ms.author: cshoe --- @@ -16,14 +16,22 @@ Azure Container Apps allows you to bind one or more custom domains to a containe - Every domain name must be associated with a domain certificate. - Certificates are applied to the container app environment and are bound to individual container apps. You must have role-based access to the environment to add certificates. - [SNI domain certificates](https://wikipedia.org/wiki/Server_Name_Indication) are required. +- Ingress must be enabled for the container app ## Add a custom domain and certificate -> [!NOTE] -> If you are using a new certificate, you must have an existing [SNI domain certificate](https://wikipedia.org/wiki/Server_Name_Indication) file available to upload to Azure. +> [!IMPORTANT] +> If you are using a new certificate, you must have an existing [SNI domain certificate](https://wikipedia.org/wiki/Server_Name_Indication) file available to upload to Azure. 1. Navigate to your container app in the [Azure portal](https://portal.azure.com) +1. Verify that your app has ingress enabled by selecting **Ingress** in the *Settings* section. If ingress is not enabled, enable it with these steps: + + 1. Set *HTTP Ingress* to **Enabled**. + 1. Select the desired *Ingress traffic* setting. + 1. Enter the *Target port*. + 1. Select **Save**. + 1. Under the *Settings* section, select **Custom domains**. 1. Select the **Add custom domain** button. diff --git a/articles/container-apps/dapr-overview.md b/articles/container-apps/dapr-overview.md index 3321dfcbaa58c..e2efffaead30e 100644 --- a/articles/container-apps/dapr-overview.md +++ b/articles/container-apps/dapr-overview.md @@ -6,12 +6,12 @@ author: hhunter-ms ms.service: container-apps ms.custom: event-tier1-build-2022 ms.topic: conceptual -ms.date: 05/10/2022 +ms.date: 06/07/2022 --- # Dapr integration with Azure Container Apps -The Distributed Application Runtime ([Dapr][dapr-concepts]) is a set of incrementally adoptable APIs that simplify the authoring of distributed, microservice-based applications. For example, Dapr provides capabilities for enabling application intercommunication, whether through messaging via pub/sub or reliable and secure service-to-service calls. Once enabled in Container Apps, Dapr exposes its HTTP and gRPC APIs via a sidecar: a process that runs in tandem with each of your Container Apps. +The Distributed Application Runtime ([Dapr][dapr-concepts]) is a set of incrementally adoptable APIs that simplify the authoring of distributed, microservice-based applications. For example, Dapr provides capabilities for enabling application intercommunication, whether through messaging via pub/sub or reliable and secure service-to-service calls. Once Dapr is enabled in Container Apps, it exposes its HTTP and gRPC APIs via a sidecar: a process that runs in tandem with each of your Container Apps. Dapr APIs, also referred to as building blocks, are built on best practice industry standards, that: @@ -39,22 +39,49 @@ The following Pub/sub example demonstrates how Dapr works alongside your contain | Label | Dapr settings | Description | | ----- | ------------- | ----------- | -| 1 | Container Apps with Dapr enabled | Dapr is enabled at the container app level by configuring Dapr settings. Dapr settings exist at the app-level, meaning they apply across revisions. | +| 1 | Container Apps with Dapr enabled | Dapr is enabled at the container app level by configuring Dapr settings. Dapr settings apply across all revisions of a given container app. | | 2 | Dapr sidecar | Fully managed Dapr APIs are exposed to your container app via the Dapr sidecar. These APIs are available through HTTP and gRPC protocols. By default, the sidecar runs on port 3500 in Container Apps. | -| 3 | Dapr component | Dapr components can be shared by multiple container apps. Using scopes, the Dapr sidecar will determine which components to load for a given container app at runtime. | +| 3 | Dapr component | Dapr components can be shared by multiple container apps. The Dapr sidecar uses scopes to determine which components to load for a given container app at runtime. | ### Enable Dapr -You can define the Dapr configuration for a container app through the Azure CLI or using Infrastructure as Code templates like bicep or ARM. With the following settings, you enable Dapr on your app: +You can define the Dapr configuration for a container app through the Azure CLI or using Infrastructure as Code templates like a bicep or an Azure Resource Manager (ARM) template. You can enable Dapr in your app with the following settings: -| Field | Description | -| ----- | ----------- | -| `--enable-dapr` / `enabled` | Enables Dapr on the container app. | -| `--dapr-app-port` / `appPort` | Identifies which port your application is listening. | -| `--dapr-app-protocol` / `appProtocol` | Tells Dapr which protocol your application is using. Valid options are `http` or `grpc`. Default is `http`. | -| `--dapr-app-id` / `appId` | The unique ID of the application. Used for service discovery, state encapsulation, and the pub/sub consumer ID. | +| CLI Parameter | Template field | Description | +| ----- | ----------- | ----------- | +| `--enable-dapr` | `dapr.enabled` | Enables Dapr on the container app. | +| `--dapr-app-port` | `dapr.appPort` | Identifies which port your application is listening. | +| `--dapr-app-protocol` | `dapr.appProtocol` | Tells Dapr which protocol your application is using. Valid options are `http` or `grpc`. Default is `http`. | +| `--dapr-app-id` | `dapr.appId` | The unique ID of the application. Used for service discovery, state encapsulation, and the pub/sub consumer ID. | -Since Dapr settings are considered application-scope changes, new revisions aren't created when you change Dapr settings. However, when changing a Dapr setting, the container app instance and revisions are automatically restarted. +The following example shows how to define a Dapr configuration in a template by adding the Dapr configuration to the `properties.configuration` section of your container apps resource declaration. + +# [Bicep](#tab/bicep1) + +```bicep + dapr: { + enabled: true + appId: 'nodeapp' + appProtocol: 'http' + appPort: 3000 + } +``` + +# [ARM](#tab/arm1) + +```json + "dapr": { + "enabled": true, + "appId": "nodeapp", + "appProcotol": "http", + "appPort": 3000 + } + +``` + +--- + +Since Dapr settings are considered application-scope changes, new revisions aren't created when you change Dapr setting. However, when changing Dapr settings, the container app revisions and replicas are automatically restarted. ### Configure Dapr components @@ -65,14 +92,14 @@ Once Dapr is enabled on your container app, you're able to plug in and use the [ - Can be easily modified to point to any one of the component implementations. - Can reference secure configuration values using Container Apps secrets. -Based on your needs, you can "plug in" certain Dapr component types like state stores, pub/sub brokers, and more. In the examples below, you will find the various schemas available for defining a Dapr component in Azure Container Apps. The Container Apps manifests differ sightly from the Dapr OSS manifests in order to simplify the component creation experience. +Based on your needs, you can "plug in" certain Dapr component types like state stores, pub/sub brokers, and more. In the examples below, you'll find the various schemas available for defining a Dapr component in Azure Container Apps. The Container Apps manifests differ sightly from the Dapr OSS manifests in order to simplify the component creation experience. > [!NOTE] > By default, all Dapr-enabled container apps within the same environment will load the full set of deployed components. By adding scopes to a component, you tell the Dapr sidecars for each respective container app which components to load at runtime. Using scopes is recommended for production workloads. # [YAML](#tab/yaml) -When defining a Dapr component via YAML, you will pass your component manifest into the Azure CLI. When configuring multiple components, you will need to create a separate YAML file and run the Azure CLI command for each component. +When defining a Dapr component via YAML, you'll pass your component manifest into the Azure CLI. When configuring multiple components, you'll need to create a separate YAML file and run the Azure CLI command for each component. For example, deploy a `pubsub.yaml` component using the following command: @@ -80,7 +107,7 @@ For example, deploy a `pubsub.yaml` component using the following command: az containerapp env dapr-component set --name ENVIRONMENT_NAME --resource-group RESOURCE_GROUP_NAME --dapr-component-name pubsub --yaml "./pubsub.yaml" ``` -The `pubsub.yaml` spec will be scoped to the dapr-enabled container apps with app ids `publisher-app` and `subscriber-app`. +The `pubsub.yaml` spec will be scoped to the dapr-enabled container apps with app IDs `publisher-app` and `subscriber-app`. ```yaml # pubsub.yaml for Azure Service Bus component @@ -103,7 +130,7 @@ The `pubsub.yaml` spec will be scoped to the dapr-enabled container apps with ap This resource defines a Dapr component called `dapr-pubsub` via Bicep. The Dapr component is defined as a child resource of your Container Apps environment. To define multiple components, you can add a `daprComponent` resource for each Dapr component. -The `dapr-pubsub` component is scoped to the Dapr-enabled container apps with app ids `publisher-app` and `subscriber-app`: +The `dapr-pubsub` component is scoped to the Dapr-enabled container apps with app IDs `publisher-app` and `subscriber-app`: ```bicep resource daprComponent 'daprComponents@2022-03-01' = { @@ -136,7 +163,7 @@ resource daprComponent 'daprComponents@2022-03-01' = { A Dapr component is defined as a child resource of your Container Apps environment. To define multiple components, you can add a `daprComponent` resource for each Dapr component. -This resource defines a Dapr component called `dapr-pubsub` via ARM. The `dapr-pubsub` component will be scoped to the Dapr-enabled container apps with app ids `publisher-app` and `subscriber-app`: +This resource defines a Dapr component called `dapr-pubsub` via ARM. The `dapr-pubsub` component will be scoped to the Dapr-enabled container apps with app IDs `publisher-app` and `subscriber-app`: ```json { @@ -191,6 +218,12 @@ scopes: - subscriber-app ``` +## Current supported Dapr version + +Azure Container Apps supports Dapr version 1.7.3. + +Version upgrades are handled transparently by Azure Container Apps. You can find the current version via the Azure portal and the CLI. + ## Limitations ### Unsupported Dapr capabilities diff --git a/articles/container-apps/deploy-visual-studio.md b/articles/container-apps/deploy-visual-studio.md index 5ba89ee0aba8e..b66568214fe7b 100644 --- a/articles/container-apps/deploy-visual-studio.md +++ b/articles/container-apps/deploy-visual-studio.md @@ -19,7 +19,7 @@ In this tutorial, you'll deploy a containerized ASP.NET Core 6.0 application to ## Prerequisites - An Azure account with an active subscription is required. If you don't already have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- Visual Studio 2022 3 or higher, available as a [free download](https://visualstudio.microsoft.com/vs/preview/). +- Visual Studio 2022 version 17.2 or higher, available as a [free download](https://visualstudio.microsoft.com). - [Docker Desktop](https://hub.docker.com/editions/community/docker-ce-desktop-windows) for Windows. Visual Studio uses Docker Desktop for various containerization features. ## Create the project diff --git a/articles/container-apps/disaster-recovery.md b/articles/container-apps/disaster-recovery.md index 347a1c7d3ceb8..b8e52fac3ac84 100644 --- a/articles/container-apps/disaster-recovery.md +++ b/articles/container-apps/disaster-recovery.md @@ -19,7 +19,7 @@ In the unlikely event of a full region outage, you have the option of using one - **Manual recovery**: Manually deploy to a new region, or wait for the region to recover, and then manually redeploy all environments and apps. -- **Resilient recovery**: First, deploy your container apps in advance to multiple regions. Next, use Azure Front Door or Azure Traffic Manager to handle incoming requests, pointing traffic to your primary region. Then, should an outage occur, you can redirect traffic away from the affected region. See [Cross-region replication in Azure](/azure/availability-zones/cross-region-replication-azure) for more information. +- **Resilient recovery**: First, deploy your container apps in advance to multiple regions. Next, use Azure Front Door or Azure Traffic Manager to handle incoming requests, pointing traffic to your primary region. Then, should an outage occur, you can redirect traffic away from the affected region. See [Cross-region replication in Azure](../availability-zones/cross-region-replication-azure.md) for more information. > [!NOTE] > Regardless of which strategy you choose, make sure your deployment configuration files are in source control so you can easily redeploy if necessary. @@ -27,4 +27,4 @@ In the unlikely event of a full region outage, you have the option of using one Additionally, the following resources can help you create your own disaster recovery plan: - [Failure and disaster recovery for Azure applications](/azure/architecture/reliability/disaster-recovery) -- [Azure resiliency technical guidance](/azure/architecture/checklist/resiliency-per-service) +- [Azure resiliency technical guidance](/azure/architecture/checklist/resiliency-per-service) \ No newline at end of file diff --git a/articles/container-apps/firewall-integration.md b/articles/container-apps/firewall-integration.md index 6d41af67f5cc6..cb64b589f1375 100644 --- a/articles/container-apps/firewall-integration.md +++ b/articles/container-apps/firewall-integration.md @@ -14,7 +14,7 @@ ms.author: jennylaw Firewall settings Network Security Groups (NSGs) needed to configure virtual networks closely resemble the settings required by Kubernetes. -Some outbound dependencies of Azure Kubernetes Service (AKS) clusters rely exclusively on fully qualified domain names (FQDN), therefore securing an AKS cluster purely with NSGs isn't possible. Refer to [Control egress traffic for cluster nodes in Azure Kubernetes Service](/azure/aks/limit-egress-traffic) for details. +Some outbound dependencies of Azure Kubernetes Service (AKS) clusters rely exclusively on fully qualified domain names (FQDN), therefore securing an AKS cluster purely with NSGs isn't possible. Refer to [Control egress traffic for cluster nodes in Azure Kubernetes Service](../aks/limit-egress-traffic.md) for details. * You can lock down a network via NSGs with more restrictive rules than the default NSG rules. * To fully secure a cluster, use a combination of NSGs and a firewall. @@ -65,4 +65,4 @@ As the following rules require allowing all IPs, use a Firewall solution to lock | `dc.services.visualstudio.com` | HTTPS | `443` | This endpoint is used for metrics and monitoring using Azure Monitor. | | `*.ods.opinsights.azure.com` | HTTPS | `443` | This endpoint is used by Azure Monitor for ingesting log analytics data. | | `*.oms.opinsights.azure.com` | HTTPS | `443` | This endpoint is used by `omsagent`, which is used to authenticate the log analytics service. | -| `*.monitoring.azure.com` | HTTPS | `443` | This endpoint is used to send metrics data to Azure Monitor. | +| `*.monitoring.azure.com` | HTTPS | `443` | This endpoint is used to send metrics data to Azure Monitor. | \ No newline at end of file diff --git a/articles/container-apps/get-started-existing-container-image.md b/articles/container-apps/get-started-existing-container-image.md index d2848e3523405..f39eae7b1f319 100644 --- a/articles/container-apps/get-started-existing-container-image.md +++ b/articles/container-apps/get-started-existing-container-image.md @@ -189,7 +189,8 @@ az monitor log-analytics query \ ```powershell $LOG_ANALYTICS_WORKSPACE_CLIENT_ID=(az containerapp env show --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP --query properties.appLogsConfiguration.logAnalyticsConfiguration.customerId --out tsv) -az monitor log-analytics query \ + +az monitor log-analytics query ` --workspace $LOG_ANALYTICS_WORKSPACE_CLIENT_ID ` --analytics-query "ContainerAppConsoleLogs_CL | where ContainerAppName_s == 'my-container-app' | project ContainerAppName_s, Log_s, TimeGenerated" ` --out table diff --git a/articles/container-apps/managed-identity.md b/articles/container-apps/managed-identity.md index 18fc0101a2dd7..969e5c40025a0 100644 --- a/articles/container-apps/managed-identity.md +++ b/articles/container-apps/managed-identity.md @@ -6,7 +6,7 @@ author: cebundy ms.service: container-apps ms.custom: event-tier1-build-2022 ms.topic: how-to -ms.date: 04/11/2022 +ms.date: 06/02/2022 ms.author: v-bcatherine --- @@ -29,6 +29,8 @@ With managed identities: - You can use role-based access control to grant specific permissions to a managed identity. - System-assigned identities are automatically created and managed. They're deleted when your container app is deleted. - You can add and delete user-assigned identities and assign them to multiple resources. They're independent of your container app's life cycle. +- You can use managed identity to [authenticate with a private Azure Container Registry](containers.md#container-registries) without a username and password to pull containers for your Container App. + ### Common use cases @@ -44,11 +46,7 @@ User-assigned identities are ideal for workloads that: ## Limitations -The identity is only available within a running container, which means you can't use a managed identity to: - -- Pull an image from Azure Container Registry -- Define scaling rules or Dapr configuration - - To access resources that require a connection string or key, such as storage resources, you'll still need to include the connection string or key in the `secretRef` of the scaling rule. +The identity is only available within a running container, which means you can't use a managed identity in scaling rules or Dapr configuration. To access resources that require a connection string or key, such as storage resources, you'll still need to include the connection string or key in the `secretRef` of the scaling rule. ## Configure managed identities @@ -269,11 +267,11 @@ A container app with a managed identity exposes the identity endpoint by definin - IDENTITY_ENDPOINT - local URL from which your container app can request tokens. - IDENTITY_HEADER - a header used to help mitigate server-side request forgery (SSRF) attacks. The value is rotated by the platform. -To get a token for a resource, make an HTTP GET request to this endpoint, including the following parameters: +To get a token for a resource, make an HTTP GET request to the endpoint, including the following parameters: | Parameter name | In | Description| |---------|---------|---------| -| resource | Query | The Azure AD resource URI of the resource for which a token should be obtained. This could be one of the [Azure services that support Azure AD authentication](../active-directory/managed-identities-azure-resources/services-support-managed-identities.md#azure-services-that-support-azure-ad-authentication) or any other resource URI. | +| resource | Query | The Azure AD resource URI of the resource for which a token should be obtained. The resource could be one of the [Azure services that support Azure AD authentication](../active-directory/managed-identities-azure-resources/services-support-managed-identities.md#azure-services-that-support-azure-ad-authentication) or any other resource URI. | | api-version | Query | The version of the token API to be used. Use "2019-08-01" or later. | | X-IDENTITY-HEADER | Header | The value of the `IDENTITY_HEADER` environment variable. This header mitigates server-side request forgery (SSRF) attacks. | | client_id | Query | (Optional) The client ID of the user-assigned identity to be used. Can't be used on a request that includes `principal_id`, `mi_res_id`, or `object_id`. If all ID parameters (`client_id`, `principal_id`, `object_id`, and `mi_res_id`) are omitted, the system-assigned identity is used.| diff --git a/articles/container-apps/microservices-dapr-azure-resource-manager.md b/articles/container-apps/microservices-dapr-azure-resource-manager.md index 3f5377bcba6c5..b842c5253cc83 100644 --- a/articles/container-apps/microservices-dapr-azure-resource-manager.md +++ b/articles/container-apps/microservices-dapr-azure-resource-manager.md @@ -19,7 +19,7 @@ You learn how to: > [!div class="checklist"] > * Create an Azure Blob Storage for use as a Dapr state store -> * Deploy a container apps environment to host container apps +> * Deploy a Container Apps environment to host container apps > * Deploy two dapr-enabled container apps: one that produces orders and one that consumes orders and stores them > * Verify the interaction between the two microservices. @@ -203,19 +203,22 @@ New-AzStorageAccount -ResourceGroupName $RESOURCE_GROUP ` --- -Once your Azure Blob Storage account is created, the following values are needed for subsequent steps in this tutorial. +Once your Azure Blob Storage account is created, you'll create a template where these storage parameters will use environment variable values. The values are passed in via the `parameters` argument when you deploy your apps with the `az deployment group create` command. -- `storage_account_name` is the value of the `STORAGE_ACCOUNT` variable. +- `storage_account_name` uses the value of the `STORAGE_ACCOUNT` variable. -- `storage_container_name` is the value of the `STORAGE_ACCOUNT_CONTAINER` variable. - -Dapr creates a container with this name when it doesn't already exist in your Azure Storage account. +- `storage_container_name` uses the value of the `STORAGE_ACCOUNT_CONTAINER` variable. Dapr creates a container with this name when it doesn't already exist in your Azure Storage account. ::: zone pivot="container-apps-arm" ### Create Azure Resource Manager (ARM) template -Create an ARM template to deploy a Container Apps environment including the associated Log Analytics workspace and Application Insights resource for distributed tracing, a dapr component for the state store and the two dapr-enabled container apps. +Create an ARM template to deploy a Container Apps environment that includes: + +* the associated Log Analytics workspace +* the Application Insights resource for distributed tracing +* a dapr component for the state store +* the two dapr-enabled container apps Save the following file as _hello-world.json_: @@ -340,7 +343,7 @@ Save the following file as _hello-world.json_: "managedEnvironmentId": "[resourceId('Microsoft.App/managedEnvironments/', parameters('environment_name'))]", "configuration": { "ingress": { - "external": true, + "external": false, "targetPort": 3000 }, "dapr": { @@ -355,6 +358,12 @@ Save the following file as _hello-world.json_: { "image": "dapriosamples/hello-k8s-node:latest", "name": "hello-k8s-node", + "env": [ + { + "name": "APP_PORT", + "value": "3000" + } + ], "resources": { "cpu": 0.5, "memory": "1.0Gi" @@ -413,7 +422,12 @@ Save the following file as _hello-world.json_: ### Create Azure Bicep templates -Create a bicep template to deploy a Container Apps environment including the associated Log Analytics workspace and Application Insights resource for distributed tracing, a dapr component for the state store and the two dapr-enabled container apps. +Create a bicep template to deploy a Container Apps environment that includes: + +* the associated Log Analytics workspace +* the Application Insights resource for distributed tracing +* a dapr component for the state store +* the two dapr-enabled container apps Save the following file as _hello-world.bicep_: @@ -504,7 +518,7 @@ resource nodeapp 'Microsoft.App/containerApps@2022-03-01' = { managedEnvironmentId: environment.id configuration: { ingress: { - external: true + external: false targetPort: 3000 } dapr: { @@ -519,8 +533,14 @@ resource nodeapp 'Microsoft.App/containerApps@2022-03-01' = { { image: 'dapriosamples/hello-k8s-node:latest' name: 'hello-k8s-node' + env: [ + { + name: 'APP_PORT' + value: '3000' + } + ] resources: { - cpu: '0.5' + cpu: json('0.5') memory: '1.0Gi' } } @@ -550,7 +570,7 @@ resource pythonapp 'Microsoft.App/containerApps@2022-03-01' = { image: 'dapriosamples/hello-k8s-python:latest' name: 'hello-k8s-python' resources: { - cpu: '0.5' + cpu: json('0.5') memory: '1.0Gi' } } @@ -652,7 +672,7 @@ New-AzResourceGroupDeployment ` This command deploys: -- the container apps environment and associated Log Analytics workspace for hosting the hello world dapr solution +- the Container Apps environment and associated Log Analytics workspace for hosting the hello world dapr solution - an Application Insights instance for Dapr distributed tracing - the `nodeapp` app server running on `targetPort: 3000` with dapr enabled and configured using: `"appId": "nodeapp"` and `"appPort": 3000` - the `daprComponents` object of `"type": "state.azure.blobstorage"` scoped for use by the `nodeapp` for storing state @@ -726,7 +746,7 @@ nodeapp Got a new order! Order ID: 63 PrimaryResult 2021-10-22 ## Clean up resources -Once you are done, run the following command to delete your resource group along with all the resources you created in this tutorial. +Once you're done, run the following command to delete your resource group along with all the resources you created in this tutorial. # [Bash](#tab/bash) diff --git a/articles/container-apps/microservices-dapr.md b/articles/container-apps/microservices-dapr.md index 4538fbe19a05f..1cd0ab0ebdb6e 100644 --- a/articles/container-apps/microservices-dapr.md +++ b/articles/container-apps/microservices-dapr.md @@ -116,11 +116,13 @@ az storage account create \ # [PowerShell](#tab/powershell) -```powershell -New-AzStorageAccount -ResourceGroupName $RESOURCE_GROUP ` - -Name $STORAGE_ACCOUNT ` - -Location $LOCATION ` - -SkuName Standard_RAGRS +```azurecli +az storage account create ` + --name $STORAGE_ACCOUNT ` + --resource-group $RESOURCE_GROUP ` + --location "$LOCATION" ` + --sku Standard_RAGRS ` + --kind StorageV2 ``` --- @@ -135,11 +137,10 @@ STORAGE_ACCOUNT_KEY=`az storage account keys list --resource-group $RESOURCE_GRO # [PowerShell](#tab/powershell) -```powershell -$STORAGE_ACCOUNT_KEY=(Get-AzStorageAccountKey -ResourceGroupName $RESOURCE_GROUP -AccountName $STORAGE_ACCOUNT)| Where-Object -Property KeyName -Contains 'key1' | Select-Object -ExpandProperty Value +```azurecli +$STORAGE_ACCOUNT_KEY=(az storage account keys list --resource-group $RESOURCE_GROUP --account-name $STORAGE_ACCOUNT --query '[0].value' --out tsv) ``` - --- ### Configure the state store component @@ -196,7 +197,7 @@ az containerapp env dapr-component set \ # [PowerShell](#tab/powershell) -```powershell +```azurecli az containerapp env dapr-component set ` --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP ` --dapr-component-name statestore ` @@ -205,7 +206,7 @@ az containerapp env dapr-component set ` --- -Your state store is configured using the Dapr component described in *statestore.yaml*. The component is scoped to a container app named `nodeapp` and is not available to other container apps. +Your state store is configured using the Dapr component described in *statestore.yaml*. The component is scoped to a container app named `nodeapp` and isn't available to other container apps. ## Deploy the service application (HTTP web server) @@ -218,12 +219,13 @@ az containerapp create \ --environment $CONTAINERAPPS_ENVIRONMENT \ --image dapriosamples/hello-k8s-node:latest \ --target-port 3000 \ - --ingress 'external' \ + --ingress 'internal' \ --min-replicas 1 \ --max-replicas 1 \ --enable-dapr \ + --dapr-app-id nodeapp \ --dapr-app-port 3000 \ - --dapr-app-id nodeapp + --env-vars 'APP_PORT=3000' ``` # [PowerShell](#tab/powershell) @@ -235,12 +237,13 @@ az containerapp create ` --environment $CONTAINERAPPS_ENVIRONMENT ` --image dapriosamples/hello-k8s-node:latest ` --target-port 3000 ` - --ingress 'external' ` + --ingress 'internal' ` --min-replicas 1 ` --max-replicas 1 ` --enable-dapr ` + --dapr-app-id nodeapp ` --dapr-app-port 3000 ` - --dapr-app-id nodeapp + --env-vars 'APP_PORT=3000' ``` --- @@ -248,7 +251,7 @@ az containerapp create ` This command deploys: * the service (Node) app server on `--target-port 3000` (the app port) -* its accompanying Dapr sidecar configured with `--dapr-app-id nodeapp` and `--dapr-app-port 3000` for service discovery and invocation +* its accompanying Dapr sidecar configured with `--dapr-app-id nodeapp` and `--dapr-app-port 3000'` for service discovery and invocation ## Deploy the client application (headless client) @@ -284,7 +287,7 @@ az containerapp create ` --- -This command deploys `pythonapp` that also runs with a Dapr sidecar that is used to look up and securely call the Dapr sidecar for `nodeapp`. As this app is headless there is no `--target-port` to start a server, nor is there a need to enable ingress. +This command deploys `pythonapp` that also runs with a Dapr sidecar that is used to look up and securely call the Dapr sidecar for `nodeapp`. As this app is headless there's no `--target-port` to start a server, nor is there a need to enable ingress. ## Verify the result @@ -308,7 +311,7 @@ You can confirm that the services are working correctly by viewing data in your ### View Logs -Data logged via a container app are stored in the `ContainerAppConsoleLogs_CL` custom table in the Log Analytics workspace. You can view logs through the Azure portal or with the CLI. Wait a few minutes for the analytics to arrive for the first time before you are able to query the logged data. +Data logged via a container app are stored in the `ContainerAppConsoleLogs_CL` custom table in the Log Analytics workspace. You can view logs through the Azure portal or with the CLI. Wait a few minutes for the analytics to arrive for the first time before you're able to query the logged data. Use the following CLI command to view logs on the command line. @@ -325,11 +328,14 @@ az monitor log-analytics query \ # [PowerShell](#tab/powershell) -```powershell -$LOG_ANALYTICS_WORKSPACE_CLIENT_ID=(az containerapp env show --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP --query properties.appLogsConfiguration.logAnalyticsConfiguration.customerId --out tsv) +```azurecli +$LOG_ANALYTICS_WORKSPACE_CLIENT_ID=` +(az containerapp env show --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP --query properties.appLogsConfiguration.logAnalyticsConfiguration.customerId --out tsv) -$queryResults = Invoke-AzOperationalInsightsQuery -WorkspaceId $LOG_ANALYTICS_WORKSPACE_CLIENT_ID -Query "ContainerAppConsoleLogs_CL | where ContainerAppName_s == 'nodeapp' and (Log_s contains 'persisted' or Log_s contains 'order') | project ContainerAppName_s, Log_s, TimeGenerated | take 5" -$queryResults.Results +az monitor log-analytics query ` + --workspace $LOG_ANALYTICS_WORKSPACE_CLIENT_ID ` + --analytics-query "ContainerAppConsoleLogs_CL | where ContainerAppName_s == 'nodeapp' and (Log_s contains 'persisted' or Log_s contains 'order') | project ContainerAppName_s, Log_s, TimeGenerated | take 5" ` + --out table ``` --- @@ -348,7 +354,7 @@ nodeapp Got a new order! Order ID: 63 PrimaryResult 2021-10-22 ## Clean up resources -Once you are done, run the following command to delete your resource group along with all the resources you created in this tutorial. +Once you're done, run the following command to delete your resource group along with all the resources you created in this tutorial. # [Bash](#tab/bash) @@ -359,8 +365,9 @@ az group delete \ # [PowerShell](#tab/powershell) -```powershell -Remove-AzResourceGroup -Name $RESOURCE_GROUP -Force +```azurecli +az group delete ` + --resource-group $RESOURCE_GROUP ``` --- diff --git a/articles/container-apps/networking.md b/articles/container-apps/networking.md index f552667a33f20..4a1dc7ad8cdaa 100644 --- a/articles/container-apps/networking.md +++ b/articles/container-apps/networking.md @@ -11,7 +11,7 @@ ms.author: cshoe # Networking architecture in Azure Container Apps -Azure Container Apps run in the context of an [environment](environment.md), which is supported by a virtual network (VNET). When you create an environment, you can provide a custom VNET, otherwise a VNET is automatically generated for you. Generated VNETs are inaccessible to you as they're created in Microsoft's tenent. To take full control over your VNET, provide an existing VNET to Container Apps as you create your environment. +Azure Container Apps run in the context of an [environment](environment.md), which is supported by a virtual network (VNET). When you create an environment, you can provide a custom VNET, otherwise a VNET is automatically generated for you. Generated VNETs are inaccessible to you as they're created in Microsoft's tenant. To take full control over your VNET, provide an existing VNET to Container Apps as you create your environment. The following articles feature step-by-step instructions for creating Container Apps environments with different accessibility levels. @@ -36,7 +36,7 @@ As you create a custom VNET, keep in mind the following situations: - Each [revision](revisions.md) is assigned an IP address in the subnet. - You can restrict inbound requests to the environment exclusively to the VNET by deploying the environment as [internal](vnet-custom-internal.md). -As you begin to design the network around your container app, refer to [Plan virtual networks](/azure/virtual-network/virtual-network-vnet-plan-design-arm) for important concerns surrounding running virtual networks on Azure. +As you begin to design the network around your container app, refer to [Plan virtual networks](../virtual-network/virtual-network-vnet-plan-design-arm.md) for important concerns surrounding running virtual networks on Azure. :::image type="content" source="media/networking/azure-container-apps-virtual-network.png" alt-text="Diagram of how Azure Container Apps environments use an existing V NET, or you can provide your own."::: @@ -97,7 +97,7 @@ Once you're satisfied with the latest revision, you can lock traffic to that rev #### Update existing revision -Consider a situation where you have a known good revision that's serving 100% of your traffic, but you want to issue and update to your app. You can deploy and test new revisions using their direct endpoints without affecting the main revision serving the app. +Consider a situation where you have a known good revision that's serving 100% of your traffic, but you want to issue an update to your app. You can deploy and test new revisions using their direct endpoints without affecting the main revision serving the app. Once you're satisfied with the updated revision, you can shift a portion of traffic to the new revision for testing and verification. @@ -197,4 +197,4 @@ When you deploy an internal or an external environment into your own network, a ## Next steps - [Deploy with an external environment](vnet-custom.md) -- [Deploy with an internal environment](vnet-custom-internal.md) +- [Deploy with an internal environment](vnet-custom-internal.md) \ No newline at end of file diff --git a/articles/container-apps/quickstart-code-to-cloud.md b/articles/container-apps/quickstart-code-to-cloud.md index 95941a3d6bc28..6121bd17a9dcc 100644 --- a/articles/container-apps/quickstart-code-to-cloud.md +++ b/articles/container-apps/quickstart-code-to-cloud.md @@ -28,7 +28,7 @@ To complete this project, you'll need the following items: | Requirement | Instructions | |--|--| -| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed.

                  Refer to [Assign Azure roles using the Azure portal](/azure/role-based-access-control/role-assignments-portal?tabs=current) for details. | +| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed.

                  Refer to [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md?tabs=current) for details. | | GitHub Account | Sign up for [free](https://github.com/join). | | git | [Install git](https://git-scm.com/downloads) | | Azure CLI | Install the [Azure CLI](/cli/azure/install-azure-cli).| @@ -39,7 +39,7 @@ To complete this project, you'll need the following items: | Requirement | Instructions | |--|--| -| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed. Refer to [Assign Azure roles using the Azure portal](/azure/role-based-access-control/role-assignments-portal?tabs=current) for details. | +| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed. Refer to [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md?tabs=current) for details. | | GitHub Account | Sign up for [free](https://github.com/join). | | git | [Install git](https://git-scm.com/downloads) | | Azure CLI | Install the [Azure CLI](/cli/azure/install-azure-cli).| @@ -166,7 +166,7 @@ az acr create ` ## Build your application -With [ACR tasks](/azure/container-registry/container-registry-tasks-overview), you can build and push the docker image for the album API without installing Docker locally. +With [ACR tasks](../container-registry/container-registry-tasks-overview.md), you can build and push the docker image for the album API without installing Docker locally. ### Build the container with ACR diff --git a/articles/container-apps/quickstart-portal.md b/articles/container-apps/quickstart-portal.md index 278dab8ebcfdc..19a646c5a2bd4 100644 --- a/articles/container-apps/quickstart-portal.md +++ b/articles/container-apps/quickstart-portal.md @@ -18,7 +18,7 @@ In this quickstart, you create a secure Container Apps environment and deploy yo ## Prerequisites -An Azure account with an active subscription is required. If you don't already have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +An Azure account with an active subscription is required. If you don't already have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). Also, please make sure to have the Resource Provider "Microsoft.App" registered. ## Setup diff --git a/articles/container-apps/revisions-manage.md b/articles/container-apps/revisions-manage.md index 7a51bcbe97eac..a56ab6c5597e9 100644 --- a/articles/container-apps/revisions-manage.md +++ b/articles/container-apps/revisions-manage.md @@ -1,22 +1,57 @@ --- title: Manage revisions in Azure Container Apps -description: Manage revisions and traffic splitting in Azure Container Apps. +description: Manage revisions and traffic splitting in Azure Container Apps. services: container-apps author: craigshoemaker ms.service: container-apps ms.topic: conceptual -ms.date: 11/02/2021 +ms.date: 06/07/2022 ms.author: cshoe -ms.custom: ignite-fall-2021, event-tier1-build-2022 --- -# Manage revisions Azure Container Apps +# Manage revisions in Azure Container Apps -Supporting multiple revisions in Azure Container Apps allows you to manage the versioning and amount of [traffic sent to each revision](#traffic-splitting). Use the following commands to control of how your container app manages revisions. +Supporting multiple revisions in Azure Container Apps allows you to manage the versioning of your container app. With this feature, you can activate and deactivate revisions, and control the amount of [traffic sent to each revision](#traffic-splitting). To learn more about revisions, see [Revisions in Azure Container Apps](revisions.md) -## List +A revision is created when you first deploy your application. New revisions are created when you [update](#updating-your-container-app) your application with [revision-scope changes](revisions.md#revision-scope-changes). You can also update your container app based on a specific revision. -List all revisions associated with your container app with `az containerapp revision list`. + +This article described the commands to manage your container app's revisions. For more information about Container Apps commands, see [`az containerapp`](/cli/azure/containerapp). For more information about commands to manage revisions, see [`az containerapp revision`](/cli/azure/containerapp/revision). + + +## Updating your container app + +To update a container app, use the `az containerapp update` command. With this command you can modify environment variables, compute resources, scale parameters, and deploy a different image. If your container app update includes [revision-scope changes](revisions.md#revision-scope-changes), a new revision will be generated. + +You may also use a YAML file to define these and other configuration options and parameters. For more information regarding this command, see [`az containerapp revision copy`](/cli/azure/containerapp#az-containerapp-update). + +This example updates the container image. (Replace the \ with your values.) + +# [Bash](#tab/bash) + +```azurecli +az containerapp update \ + --name \ + --resource-group \ + --image +``` + +# [PowerShell](#tab/powershell) + +```azurecli +az containerapp update ` + --name ` + --resource-group ` + --image +``` + +--- + +You can also update your container app with the [Revision copy](#revision-copy) command. + +## Revision list + +List all revisions associated with your container app with `az containerapp revision list`. For more information about this command, see [`az containerapp revision list`](/cli/azure/containerapp/revision#az-containerapp-revision-list) # [Bash](#tab/bash) @@ -38,18 +73,17 @@ az containerapp revision list ` --- -As you interact with this example, replace the placeholders surrounded by `<>` with your values. +## Revision show -## Show +Show details about a specific revision by using `az containerapp revision show`. For more information about this command, see [`az containerapp revision show`](/cli/azure/containerapp/revision#az-containerapp-revision-show). -Show details about a specific revision by using `az containerapp revision show`. +Example: (Replace the \ with your values.) # [Bash](#tab/bash) ```azurecli az containerapp revision show \ --name \ - --app \ --resource-group ``` @@ -58,50 +92,52 @@ az containerapp revision show \ ```azurecli az containerapp revision show ` --name ` - --app ` --resource-group ``` --- -As you interact with this example, replace the placeholders surrounded by `<>` with your values. +## Revision copy + +To create a new revision based on an existing revision, use the `az containerapp revision copy`. Container Apps will use the configuration of the existing revision, which you then may modify. -## Update +With this command, you can modify environment variables, compute resources, scale parameters, and deploy a different image. You may also use a YAML file to define these and other configuration options and parameters. For more information regarding this command, see [`az containerapp revision copy`](/cli/azure/containerapp/revision#az-containerapp-revision-copy). -To update a container app, use `az containerapp update`. +This example copies the latest revision and sets the compute resource parameters. (Replace the \ with your values.) # [Bash](#tab/bash) ```azurecli -az containerapp update \ +az containerapp revision copy \ --name \ --resource-group \ - --image mcr.microsoft.com/azuredocs/containerapps-helloworld + --cpu 0.75 \ + --memory 1.5Gi ``` # [PowerShell](#tab/powershell) ```azurecli -az containerapp update ` +az containerapp revision copy ` --name ` --resource-group ` - --image mcr.microsoft.com/azuredocs/containerapps-helloworld + --cpu 0.75 ` + --memory 1.5Gi ``` --- -As you interact with this example, replace the placeholders surrounded by `<>` with your values. +## Revision activate -## Activate +Activate a revision by using `az containerapp revision activate`. For more information about this command, see [`az containerapp revision activate`](/cli/azure/containerapp/revision#az-containerapp-revision-activate). -Activate a revision by using `az containerapp revision activate`. +Example: (Replace the \ with your values.) # [Bash](#tab/bash) ```azurecli az containerapp revision activate \ --revision \ - --name \ --resource-group ``` @@ -110,24 +146,22 @@ az containerapp revision activate \ ```poweshell az containerapp revision activate ` --revision ` - --name ` --resource-group ``` --- -As you interact with this example, replace the placeholders surrounded by `<>` with your values. +## Revision deactivate -## Deactivate +Deactivate revisions that are no longer in use with `az containerapp revision deactivate`. Deactivation stops all running replicas of a revision. For more information, see [`az containerapp revision deactivate`](/cli/azure/containerapp/revision#az-containerapp-revision-deactivate). -Deactivate revisions that are no longer in use with `az container app revision deactivate`. Deactivation stops all running replicas of a revision. +Example: (Replace the \ with your values.) # [Bash](#tab/bash) ```azurecli az containerapp revision deactivate \ --revision \ - --name \ --resource-group ``` @@ -136,24 +170,24 @@ az containerapp revision deactivate \ ```azurecli az containerapp revision deactivate ` --revision ` - --name ` --resource-group ``` --- -As you interact with this example, replace the placeholders surrounded by `<>` with your values. +## Revision restart + +This command restarts a revision. For more information about this command, see [`az containerapp revision restart`](/cli/azure/containerapp/revision#az-containerapp-revision-restart). -## Restart +When you modify secrets in your container app, you'll need to restart the active revisions so they can access the secrets. -All existing container apps revisions will not have access to this secret until they are restarted +Example: (Replace the \ with your values.) # [Bash](#tab/bash) ```azurecli az containerapp revision restart \ --revision \ - --name \ --resource-group ``` @@ -162,46 +196,106 @@ az containerapp revision restart \ ```azurecli az containerapp revision restart ` --revision ` - --name ` --resource-group ``` --- -As you interact with this example, replace the placeholders surrounded by `<>` with your values. +## Revision set mode -## Set active revision mode +The revision mode controls whether only a single revision or multiple revisions of your container app can be simultaneously active. To set your container app to support [single revision mode](revisions.md#single-revision-mode) or [multiple revision mode](revisions.md#multiple-revision-mode), use the `az containerapp revision set-mode` command. -Configure whether or not your container app supports multiple active revisions. +The default setting is *single revision mode*. For more information about this command, see [`az containerapp revision set-mode`](/cli/azure/containerapp/revision#az-containerapp-revision-set-mode). -The `activeRevisionsMode` property accepts two values: +The mode values are `single` or `multiple`. Changing the revision mode doesn't create a new revision. -- `multiple`: Configures the container app to allow more than one active revision. +Example: (Replace the \ with your values.) -- `single`: Automatically deactivates all other revisions when a revision is activated. Enabling `single` mode makes it so that when you create a revision-scope change and a new revision is created, any other revisions are automatically deactivated. +# [Bash](#tab/bash) -```json -{ - ... - "resources": [ - { - ... - "properties": { - "configuration": { - "activeRevisionsMode": "multiple" - } - } - }] -} +```azurecli +az containerapp revision set-mode \ + --name \ + --resource-group \ + --mode single +``` + +# [PowerShell](#tab/powershell) + +```azurecli +az containerapp revision set-mode ` + --name ` + --resource-group ` + --mode single +``` + +--- + +## Revision labels + +Labels provide a unique URL that you can use to direct traffic to a revision. You can move a label between revisions to reroute traffic directed to the label's URL to a different revision. For more information about revision labels, see [Revision Labels](revisions.md#revision-labels). + +You can add and remove a label from a revision. For more information about the label commands, see [`az containerapp revision label`](/cli/azure/containerapp/revision/label) + +### Revision label add + +To add a label to a revision, use the [`az containerapp revision label add`](/cli/azure/containerapp/revision/label#az-containerapp-revision-label-add) command. + +You can only assign a label to one revision at a time, and a revision can only be assigned one label. If the revision you specify has a label, the add command will replace the existing label. + +This example adds a label to a revision: (Replace the \ with your values.) + +# [Bash](#tab/bash) + +```azurecli +az containerapp revision label add \ + --revision \ + --resource-group \ + --label +``` + +# [PowerShell](#tab/powershell) + +```azurecli +az containerapp revision set-mode ` + --revision ` + --resource-group ` + --mode ``` -The following configuration fragment shows how to set the `activeRevisionsMode` property. Changes made to this property require the context of the container app's full ARM template. +--- + +### Revision label remove + +To remove a label from a revision, use the [`az containerapp revision label remove`](/cli/azure/containerapp/revision/label#az-containerapp-revision-label-remove) command. + +This example removes a label to a revision: (Replace the \ with your values.) + +# [Bash](#tab/bash) + +```azurecli +az containerapp revision label add \ + --revision \ + --resource-group \ + --label +``` + +# [PowerShell](#tab/powershell) + +```azurecli +az containerapp revision set-mode ` + --revision ` + --resource-group ` + --mode +``` + +--- ## Traffic splitting Applied by assigning percentage values, you can decide how to balance traffic among different revisions. Traffic splitting rules are assigned by setting weights to different revisions. -The following example shows how to split traffic between three revisions. +The following example shows how to split traffic between three revisions. ```json { @@ -233,11 +327,11 @@ Each revision gets traffic based on the following rules: - 30% of the requests go to REVISION2 - 20% of the requests go to the latest revision -The sum total of all revision weights must equal 100. +The sum of all revision weights must equal 100. -In this example, replace the `` placeholders with revision names in your container app. You access revision names via the [list](#list) command. +In this example, replace the `` placeholders with revision names in your container app. You access revision names via the [revision list](#revision-list) command. ## Next steps -> [!div class="nextstepaction"] -> [Get started](get-started.md) +* [Revisions in Azure Container Apps](revisions.md) +* [Application lifecycle management in Azure Container Apps](application-lifecycle-management.md) diff --git a/articles/container-apps/scale-app.md b/articles/container-apps/scale-app.md index 1df7504a84565..beee78717c185 100644 --- a/articles/container-apps/scale-app.md +++ b/articles/container-apps/scale-app.md @@ -20,18 +20,18 @@ There are two scale properties that apply to all rules in your container app: | Scale property | Description | Default value | Min value | Max value | |---|---|---|---|---| -| `minReplicas` | Minimum number of replicas running for your container app. | 0 | 0 | 10 | -| `maxReplicas` | Maximum number of replicas running for your container app. | n/a | 1 | 10 | +| `minReplicas` | Minimum number of replicas running for your container app. | 0 | 0 | 30 | +| `maxReplicas` | Maximum number of replicas running for your container app. | 10 | 1 | 30 | - If your container app scales to zero, then you aren't billed. - Individual scale rules are defined in the `rules` array. - If you want to ensure that an instance of your application is always running, set `minReplicas` to 1 or higher. - Replicas not processing, but that remain in memory are billed in the "idle charge" category. -- Changes to scaling rules are a [revision-scope](overview.md) change. -- When using non-HTTP event scale rules, setting the `properties.configuration.activeRevisionsMode` property of the container app to `single` is recommended. - - - +- Changes to scaling rules are a [revision-scope](revisions.md#revision-scope-changes) change. +- It's recommended to set the `properties.configuration.activeRevisionsMode` property of the container app to `single`, when using non-HTTP event scale rules. +- Container Apps implements the KEDA ScaledObject with the following default settings. + - pollingInterval: 30 seconds + - cooldownPeriod: 300 seconds ## Scale triggers @@ -47,7 +47,7 @@ With an HTTP scaling rule, you have control over the threshold that determines w | Scale property | Description | Default value | Min value | Max value | |---|---|---|---|---| -| `concurrentRequests`| Once the number of requests exceeds this then another replica is added. Replicas will continue to be added up to the `maxReplicas` amount as the number of concurrent requests increase. | 10 | 1 | n/a | +| `concurrentRequests`| When the number of requests exceeds this value, then another replica is added. Replicas will continue to be added up to the `maxReplicas` amount as the number of concurrent requests increase. | 10 | 1 | n/a | In the following example, the container app scales out up to five replicas and can scale down to zero. The scaling threshold is set to 100 concurrent requests per second. @@ -99,13 +99,13 @@ In the following example, the container app scales out up to five replicas and c :::image type="content" source="media/scalers/http-scale-rule.png" alt-text="A screenshot showing how to add an h t t p scale rule."::: -1. Select **Create** when you are done. +1. Select **Create** when you're done. :::image type="content" source="media/scalers/create-http-scale-rule.png" alt-text="A screenshot showing the newly created http scale rule."::: ## Event-driven -Container Apps can scale based of a wide variety of event types. Any event supported by [KEDA](https://keda.sh/docs/scalers/), is supported in Container Apps. +Container Apps can scale based of a wide variety of event types. Any event supported by [KEDA](https://keda.sh/docs/scalers/) is supported in Container Apps. Each event type features different properties in the `metadata` section of the KEDA definition. Use these properties to define a scale rule in Container Apps. @@ -132,7 +132,7 @@ The container app scales according to the following behavior: ... "scale": { "minReplicas": "0", - "maxReplicas": "10", + "maxReplicas": "30", "rules": [ { "name": "queue-based-autoscaling", @@ -162,7 +162,7 @@ To create a custom scale trigger, first create a connection string secret to aut 1. Select **Add**, and then enter your secret key/value information. -1. Select **Add** when you are done. +1. Select **Add** when you're done. :::image type="content" source="media/scalers/connection-string.png" alt-text="A screenshot showing how to create a connection string."::: @@ -178,11 +178,11 @@ To create a custom scale trigger, first create a connection string secret to aut :::image type="content" source="media/scalers/add-scale-rule.png" alt-text="A screenshot showing how to add a scale rule."::: -1. Enter a **Rule name**, select **Custom** and enter a **Custom rule type**. Enter your **Secret reference** and **Trigger parameter** and then add your **Metadata** parameters. select **Add** when you are done. +1. Enter a **Rule name**, select **Custom** and enter a **Custom rule type**. Enter your **Secret reference** and **Trigger parameter** and then add your **Metadata** parameters. select **Add** when you're done. :::image type="content" source="media/scalers/custom-scaler.png" alt-text="A screenshot showing how to configure a custom scale rule."::: -1. Select **Create** when you are done. +1. Select **Create** when you're done. > [!NOTE] > In multiple revision mode, adding a new scale trigger creates a new revision of your application but your old revision remains available with the old scale rules. Use the **Revision management** page to manage their traffic allocations. @@ -207,7 +207,7 @@ Azure Container Apps supports KEDA ScaledObjects and all of the available [KEDA ... "scale": { "minReplicas": "0", - "maxReplicas": "10", + "maxReplicas": "30", "rules": [ { "name": "", @@ -224,9 +224,9 @@ Azure Container Apps supports KEDA ScaledObjects and all of the available [KEDA } ``` -The following is an example of setting up an [Azure Storage Queue](https://keda.sh/docs/scalers/azure-storage-queue/) scaler that you can configure to auto scale based on Azure Storage Queues. +The following YAML is an example of setting up an [Azure Storage Queue](https://keda.sh/docs/scalers/azure-storage-queue/) scaler that you can configure to auto scale based on Azure Storage Queues. -Below is the KEDA trigger specification for an Azure Storage Queue. To set up a scale rule in Azure Container Apps, you will need the trigger `type` and any other required parameters. You can also add other optional parameters which vary based on the scaler you are using. +Below is the KEDA trigger specification for an Azure Storage Queue. To set up a scale rule in Azure Container Apps, you'll need the trigger `type` and any other required parameters. You can also add other optional parameters, which vary based on the scaler you're using. In this example, you need the `accountName` and the name of the cloud environment that the queue belongs to `cloud` to set up your scaler in Azure Container Apps. @@ -259,7 +259,7 @@ Now your JSON config file should look like this: ... "scale": { "minReplicas": "0", - "maxReplicas": "10", + "maxReplicas": "30", "rules": [ { "name": "queue-trigger", @@ -279,7 +279,7 @@ Now your JSON config file should look like this: ``` > [!NOTE] -> KEDA ScaledJobs are not supported. See [KEDA scaling Jobs](https://keda.sh/docs/concepts/scaling-jobs/#overview) for more details. +> KEDA ScaledJobs are not supported. For more information, see [KEDA Scaling Jobs](https://keda.sh/docs/concepts/scaling-jobs/#overview). ## CPU @@ -359,12 +359,11 @@ The following example shows how to create a memory scaling rule. ## Considerations -- Vertical scaling is not supported. +- Vertical scaling isn't supported. - Replica quantities are a target amount, not a guarantee. - - Even if you set `maxReplicas` to `1`, there is no assurance of thread safety. - -- If you are using [Dapr actors](https://docs.dapr.io/developing-applications/building-blocks/actors/actors-overview/) to manage states, you should keep in mind that scaling to zero is not supported. Dapr uses virtual actors to manage asynchronous calls which means their in-memory representation is not tied to their identity or lifetime. + +- If you're using [Dapr actors](https://docs.dapr.io/developing-applications/building-blocks/actors/actors-overview/) to manage states, you should keep in mind that scaling to zero isn't supported. Dapr uses virtual actors to manage asynchronous calls, which means their in-memory representation isn't tied to their identity or lifetime. ## Next steps diff --git a/articles/container-apps/storage-mounts.md b/articles/container-apps/storage-mounts.md index a46f69a59a9e9..461fbfa243239 100644 --- a/articles/container-apps/storage-mounts.md +++ b/articles/container-apps/storage-mounts.md @@ -20,6 +20,9 @@ A container app has access to different types of storage. A single app can take | [Temporary storage](#temporary-storage) | Temporary storage scoped to an individual replica | Sharing files between containers in a replica. For instance, the main app container can write log files that are processed by a sidecar container. | | [Azure Files](#azure-files) | Permanent storage | Writing files to a file share to make data accessible by other systems. | +> [!NOTE] +> The volume mounting features in Azure Container Apps are in preview. + ## Container file system A container can write to its own file system. @@ -160,7 +163,7 @@ See the [ARM template API specification](azure-resource-manager-api-spec.md) for ## Azure Files -You can mount a file share from [Azure Files](/azure/storage/files/) as a volume inside a container. +You can mount a file share from [Azure Files](../storage/files/index.yml) as a volume inside a container. Azure Files storage has the following characteristics: @@ -181,7 +184,7 @@ To enable Azure Files storage in your container, you need to set up your contain | Requirement | Instructions | |--|--| | Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). | -| Azure Storage account | [Create a storage account](/azure/storage/common/storage-account-create?tabs=azure-cli#create-a-storage-account-1). | +| Azure Storage account | [Create a storage account](../storage/common/storage-account-create.md?tabs=azure-cli#create-a-storage-account-1). | | Azure Container Apps environment | [Create a container apps environment](environment.md). | ### Configuration @@ -346,4 +349,4 @@ The following ARM template snippets demonstrate how to add an Azure Files share See the [ARM template API specification](azure-resource-manager-api-spec.md) for a full example. -::: zone-end +::: zone-end \ No newline at end of file diff --git a/articles/container-apps/vnet-custom-internal.md b/articles/container-apps/vnet-custom-internal.md index 12acac5586d5e..a737b16a28117 100644 --- a/articles/container-apps/vnet-custom-internal.md +++ b/articles/container-apps/vnet-custom-internal.md @@ -6,7 +6,7 @@ author: craigshoemaker ms.service: container-apps ms.custom: event-tier1-build-2022 ms.topic: how-to -ms.date: 5/16/2022 +ms.date: 06/07/2022 ms.author: cshoe zone_pivot_groups: azure-cli-or-portal --- @@ -16,7 +16,7 @@ zone_pivot_groups: azure-cli-or-portal The following example shows you how to create a Container Apps environment in an existing virtual network. > [!IMPORTANT] -> In order to ensure the environment deployment within your custom VNET is successful, configure your VNET with an "allow-all" configuration by default. The full list of traffic dependencies required to configure the VNET as "deny-all" is not yet available. For more information, see [Known issues for public preview](https://github.com/microsoft/azure-container-apps/wiki/Known-Issues-for-public-preview). +> Container Apps environments are deployed on a virtual network. This network can be managed or custom (pre-configured by the user beforehand). In either case, the environment has dependencies on services outside of that virtual network. For a list of these dependencies see [Outbound FQDN dependencies](firewall-integration.md#outbound-fqdn-dependencies). ::: zone pivot="azure-portal" @@ -75,7 +75,7 @@ $VNET_NAME="my-custom-vnet" --- -Now create an instance of the virtual network to associate with the Container Apps environment. The virtual network must have two subnets available for the container apps instance. +Now create an instance of the virtual network to associate with the Container Apps environment. The virtual network must have two subnets available for the container app instance. > [!NOTE] > You can use an existing virtual network, but two empty subnets are required to use with Container Apps. @@ -175,15 +175,15 @@ The following table describes the parameters used in for `containerapp env creat | Parameter | Description | |---|---| -| `name` | Name of the container apps environment. | +| `name` | Name of the Container Apps environment. | | `resource-group` | Name of the resource group. | -| `logs-workspace-id` | The ID of the Log Analytics workspace. | -| `logs-workspace-key` | The Log Analytics client secret. | +| `logs-workspace-id` | (Optional) The ID of an existing the Log Analytics workspace. If omitted, a workspace will be created for you. | +| `logs-workspace-key` | The Log Analytics client secret. Required if using an existing workspace. | | `location` | The Azure location where the environment is to deploy. | | `infrastructure-subnet-resource-id` | Resource ID of a subnet for infrastructure components and user application containers. | -| `internal-only` | Optional parameter that scopes the environment to IP addresses only available the custom VNET. | +| `internal-only` | (Optional) The environment doesn't use a public static IP, only internal IP addresses available in the custom VNET. (Requires an infrastructure subnet resource ID.) | -With your environment created in your custom virtual network, you can deploy container apps into the environment using the `az containerapp create` command. +With your environment created using your custom virtual network, you can deploy container apps into the environment using the `az containerapp create` command. ### Optional configuration @@ -286,7 +286,7 @@ You must either provide values for all three of these properties, or none of the | Parameter | Description | |---|---| | `platform-reserved-cidr` | The address range used internally for environment infrastructure services. Must have a size between `/21` and `/12`. | -| `platform-reserved-dns-ip` | An IP address from the `platform-reserved-cidr` range that is used for the internal DNS server. The address can't be the first address in the range, or the network address. For example, if `platform-reserved-cidr` is set to `10.2.0.0/16`, then `platform-reserved-dns-ip` can't be `10.2.0.0` (this is the network address), or `10.2.0.1` (infrastructure reserves use of this IP). In this case, the first usable IP for the DNS would be `10.2.0.2`. | +| `platform-reserved-dns-ip` | An IP address from the `platform-reserved-cidr` range that is used for the internal DNS server. The address can't be the first address in the range, or the network address. For example, if `platform-reserved-cidr` is set to `10.2.0.0/16`, then `platform-reserved-dns-ip` can't be `10.2.0.0` (the network address), or `10.2.0.1` (infrastructure reserves use of this IP). In this case, the first usable IP for the DNS would be `10.2.0.2`. | | `docker-bridge-cidr` | The address range assigned to the Docker bridge network. This range must have a size between `/28` and `/12`. | - The `platform-reserved-cidr` and `docker-bridge-cidr` address ranges can't conflict with each other, or with the ranges of either provided subnet. Further, make sure these ranges don't conflict with any other address range in the VNET. @@ -297,7 +297,7 @@ You must either provide values for all three of these properties, or none of the ## Clean up resources -If you're not going to continue to use this application, you can delete the Azure Container Apps instance and all the associated services by removing the **my-container-apps** resource group. +If you're not going to continue to use this application, you can delete the Azure Container Apps instance and all the associated services by removing the **my-container-apps** resource group. Deleting this resource group will also delete the resource group automatically created by the Container Apps service containing the custom network components. ::: zone pivot="azure-cli" @@ -321,7 +321,7 @@ az group delete ` ## Additional resources -- Refer to [What is Azure Private Endpoint](../private-link/private-endpoint-overview.md) for more details on configuring your private endpoint. +- For more information about configuring your private endpoints, see [What is Azure Private Endpoint](../private-link/private-endpoint-overview.md). - To set up DNS name resolution for internal services, you must [set up your own DNS server](../dns/index.yml). diff --git a/articles/container-apps/vnet-custom.md b/articles/container-apps/vnet-custom.md index 288592dba9c47..e8badfe073d8d 100644 --- a/articles/container-apps/vnet-custom.md +++ b/articles/container-apps/vnet-custom.md @@ -6,7 +6,7 @@ author: craigshoemaker ms.service: container-apps ms.custom: event-tier1-build-2022 ms.topic: how-to -ms.date: 05/16/2022 +ms.date: 06/07/2022 ms.author: cshoe zone_pivot_groups: azure-cli-or-portal --- @@ -16,7 +16,7 @@ zone_pivot_groups: azure-cli-or-portal The following example shows you how to create a Container Apps environment in an existing virtual network. > [!IMPORTANT] -> In order to ensure the environment deployment within your custom VNET is successful, configure your VNET with an "allow-all" configuration by default. The full list of traffic dependencies required to configure the VNET as "deny-all" is not yet available. For more information, see [Known issues for public preview](https://github.com/microsoft/azure-container-apps/wiki/Known-Issues-for-public-preview). +> Container Apps environments are deployed on a virtual network. This network can be managed or custom (pre-configured by the user beforehand). In either case, the environment has dependencies on services outside of that virtual network. For a list of these dependencies see [Outbound FQDN dependencies](firewall-integration.md#outbound-fqdn-dependencies). ::: zone pivot="azure-portal" @@ -173,7 +173,7 @@ The following table describes the parameters used in `containerapp env create`. | Parameter | Description | |---|---| -| `name` | Name of the container apps environment. | +| `name` | Name of the Container Apps environment. | | `resource-group` | Name of the resource group. | | `location` | The Azure location where the environment is to deploy. | | `infrastructure-subnet-resource-id` | Resource ID of a subnet for infrastructure components and user application containers. | @@ -281,7 +281,7 @@ You must either provide values for all three of these properties, or none of the | Parameter | Description | |---|---| | `platform-reserved-cidr` | The address range used internally for environment infrastructure services. Must have a size between `/21` and `/12`. | -| `platform-reserved-dns-ip` | An IP address from the `platform-reserved-cidr` range that is used for the internal DNS server. The address can't be the first address in the range, or the network address. For example, if `platform-reserved-cidr` is set to `10.2.0.0/16`, then `platform-reserved-dns-ip` can't be `10.2.0.0` (this is the network address), or `10.2.0.1` (infrastructure reserves use of this IP). In this case, the first usable IP for the DNS would be `10.2.0.2`. | +| `platform-reserved-dns-ip` | An IP address from the `platform-reserved-cidr` range that is used for the internal DNS server. The address can't be the first address in the range, or the network address. For example, if `platform-reserved-cidr` is set to `10.2.0.0/16`, then `platform-reserved-dns-ip` can't be `10.2.0.0` (the network address), or `10.2.0.1` (infrastructure reserves use of this IP). In this case, the first usable IP for the DNS would be `10.2.0.2`. | | `docker-bridge-cidr` | The address range assigned to the Docker bridge network. This range must have a size between `/28` and `/12`. | - The `platform-reserved-cidr` and `docker-bridge-cidr` address ranges can't conflict with each other, or with the ranges of either provided subnet. Further, make sure these ranges don't conflict with any other address range in the VNET. @@ -292,7 +292,7 @@ You must either provide values for all three of these properties, or none of the ## Clean up resources -If you're not going to continue to use this application, you can delete the Azure Container Apps instance and all the associated services by removing the **my-container-apps** resource group. +If you're not going to continue to use this application, you can delete the Azure Container Apps instance and all the associated services by removing the **my-container-apps** resource group. Deleting this resource group will also delete the resource group automatically created by the Container Apps service containing the custom network components. ::: zone pivot="azure-cli" @@ -316,7 +316,8 @@ az group delete ` ## Additional resources -- Refer to [What is Azure Private Endpoint](../private-link/private-endpoint-overview.md) for more details on configuring your private endpoint. +- For more information about configuring your private endpoints, see [What is Azure Private Endpoint](../private-link/private-endpoint-overview.md). + - To set up DNS name resolution for internal services, you must [set up your own DNS server](../dns/index.yml). diff --git a/articles/container-instances/TOC.yml b/articles/container-instances/TOC.yml index 29656eba1639a..41cd904c87e54 100644 --- a/articles/container-instances/TOC.yml +++ b/articles/container-instances/TOC.yml @@ -113,6 +113,8 @@ href: container-instances-egress-ip-address.md - name: Configure container group egress with NAT gateway href: container-instances-nat-gateway.md + - name: Configure custom DNS settings for container group + href: container-instances-custom-dns.md - name: Mount data volumes items: - name: Azure file share diff --git a/articles/container-instances/availability-zones.md b/articles/container-instances/availability-zones.md index 94d30ee12cda2..217d26a59f1eb 100644 --- a/articles/container-instances/availability-zones.md +++ b/articles/container-instances/availability-zones.md @@ -8,7 +8,7 @@ ms.custom: devx-track-js, devx-track-azurecli # Deploy an Azure Container Instances (ACI) container group in an availability zone (preview) -An [availability zone][availability-zone-overview] is a physically separate zone in an Azure region. You can use availability zones to protect your containerized applications from an unlikely failure or loss of an entire data center. Three types of Azure services support availability zones: *zonal*, *zone-redundant*, and *always-available* services. You can learn more about these types of services and how they promote resiliency in the [Highly available services section of Azure services that support availability zones](/azure/availability-zones/az-region#highly-available-services). +An [availability zone][availability-zone-overview] is a physically separate zone in an Azure region. You can use availability zones to protect your containerized applications from an unlikely failure or loss of an entire data center. Three types of Azure services support availability zones: *zonal*, *zone-redundant*, and *always-available* services. You can learn more about these types of services and how they promote resiliency in the [Highly available services section of Azure services that support availability zones](../availability-zones/az-region.md#highly-available-services). Azure Container Instances (ACI) supports *zonal* container group deployments, meaning the instance is pinned to a specific, self-selected availability zone. The availability zone is specified at the container group level. Containers within a container group can't have unique availability zones. To change your container group's availability zone, you must delete the container group and create another container group with the new availability zone. diff --git a/articles/container-instances/container-instances-custom-dns.md b/articles/container-instances/container-instances-custom-dns.md new file mode 100644 index 0000000000000..d58b52085b02a --- /dev/null +++ b/articles/container-instances/container-instances-custom-dns.md @@ -0,0 +1,229 @@ +--- +title: Configure custom DNS settings for container group in Azure Container Instances +description: Configure a public or private DNS configuration for a container group +author: tomvcassidy +ms.topic: how-to +ms.service: container-instances +services: container-instances +ms.author: tomcassidy +ms.date: 05/25/2022 +--- + +# Deploy a container group with custom DNS settings + +In [Azure Virtual Network](../virtual-network/virtual-networks-overview.md), you can deploy container groups using the `az container create` command in the Azure CLI. You can also provide advanced configuration settings to the `az container create` command using a YAML configuration file. + +This article demonstrates how to deploy a container group with custom DNS settings using a YAML configuration file. + +For more information on deploying container groups to a virtual network, see the [Deploy in a virtual network article](container-instances-vnet.md). + +> [!IMPORTANT] +> Previously, the process of deploying container groups on virtual networks used [network profiles](/azure/container-instances/container-instances-virtual-network-concepts#network-profile) for configuration. However, network profiles have been retired as of the `2021-07-01` API version. We recommend you use the latest API version, which relies on [subnet IDs](/azure/virtual-network/subnet-delegation-overview) instead. + +## Prerequisites + +* An **active Azure subscription**. If you don't have an active Azure subscription, create a [free account](https://azure.microsoft.com/free) before you begin. + +* **Azure CLI**. The command-line examples in this article use the [Azure CLI](/cli/azure/) and are formatted for the Bash shell. You can [install the Azure CLI](/cli/azure/install-azure-cli) locally or use the [Azure Cloud Shell][cloud-shell-bash]. + +* A **resource group** to manage all the resources you use in this how-to guide. We use the example resource group name **ACIResourceGroup** throughout this article. + + ```azurecli-interactive + az group create --name ACIResourceGroup --location westus + ``` + +## Limitations + +For networking scenarios and limitations, see [Virtual network scenarios and resources for Azure Container Instances](container-instances-virtual-network-concepts.md). + +> [!IMPORTANT] +> Container group deployment to a virtual network is available for Linux containers in most regions where Azure Container Instances is available. For details, see [Regions and resource availability](container-instances-region-availability.md). +Examples in this article are formatted for the Bash shell. For PowerShell or command prompt, adjust the line continuation characters accordingly. + +## Create your virtual network + +You'll need a virtual network to deploy a container group with a custom DNS configuration. This virtual network will require a subnet with permissions to create Azure Container Instances resources and a linked private DNS zone to test name resolution. + +This guide uses a virtual network named `aci-vnet`, a subnet named `aci-subnet`, and a private DNS zone named `private.contoso.com`. We use **Azure Private DNS Zones**, which you can learn about in the [Private DNS Overview](../dns/private-dns-overview.md). + +If you have an existing virtual network that meets these criteria, you can skip to [Deploy your container group](#deploy-your-container-group). + +> [!TIP] +> You can modify the following commands with your own information as needed. + +1. Create the virtual network using the [az network vnet create][az-network-vnet-create] command. Enter address prefixes in Classless Inter-Domain Routing (CIDR) format (for example: `10.0.0.0/16`). + + ```azurecli + az network vnet create \ + --name aci-vnet \ + --resource-group ACIResourceGroup \ + --location westus \ + --address-prefix 10.0.0.0/16 + ``` + +1. Create the subnet using the [az network vnet subnet create][az-network-vnet-subnet-create] command. The following command creates a subnet in your virtual network with a delegation that permits it to create container groups. For more information about working with subnets, see the [Add, change, or delete a virtual network subnet](../virtual-network/virtual-network-manage-subnet.md). For more information about subnet delegation, see the [Virtual Network Scenarios and Resources article section on delegated subnets](container-instances-virtual-network-concepts.md#subnet-delegated). + + ```azurecli + az network vnet subnet create \ + --name aci-subnet \ + --resource-group ACIResourceGroup \ + --vnet-name aci-vnet \ + --address-prefixes 10.0.0.0/24 \ + --delegations Microsoft.ContainerInstance/containerGroups + ``` + +1. Record the subnet ID key-value pair from the output of this command. You'll use this in your YAML configuration file later. It will take the form `"id"`: `"/subscriptions//resourceGroups/ACIResourceGroup/providers/Microsoft.Network/virtualNetworks/aci-vnet/subnets/aci-subnet"`. + +1. Create the private DNS Zone using the [az network private-dns zone create][az-network-private-dns-zone-create] command. + + ```azurecli + az network private-dns zone create -g ACIResourceGroup -n private.contoso.com + ``` + +1. Link the DNS zone to your virtual network using the [az network private-dns link vnet create][az-network-private-dns-link-vnet-create] command. The DNS server is only required to test name resolution. The `-e` flag enables automatic hostname registration, which is unneeded, so we set it to `false`. + + ```azurecli + az network private-dns link vnet create \ + -g ACIResourceGroup \ + -n aciDNSLink \ + -z private.contoso.com \ + -v aci-vnet \ + -e false + ``` + +Once you've completed the steps above, you should see an output with a final key-value pair that reads `"virtualNetworkLinkState"`: `"Completed"`. + +## Deploy your container group + +> [!NOTE] +> Custom DNS settings are not currently available in the Azure portal for container group deployments. They must be provided with YAML file, Resource Manager template, [REST API](/rest/api/container-instances/containergroups/createorupdate), or an [Azure SDK](https://azure.microsoft.com/downloads/). + +Copy the following YAML into a new file named *custom-dns-deploy-aci.yaml*. Edit the following configurations with your values: + +* `dnsConfig`: DNS settings for your containers within your container group. + * `nameServers`: A list of name servers to be used for DNS lookups. + * `searchDomains`: DNS suffixes to be appended for DNS lookups. +* `ipAddress`: The private IP address settings for the container group. + * `ports`: The ports to open, if any. + * `protocol`: The protocol (TCP or UDP) for the opened port. +* `subnetIDs`: Network settings for the subnet(s) in the virtual network. + * `id`: The full Resource Manager resource ID of the subnet, which you obtained earlier. + +> [!NOTE] +> The DNS config fields aren't automatically queried at this time, so these fields must be explicitly filled out. + +```yaml +apiVersion: '2021-07-01' +location: westus +name: pwsh-vnet-dns +properties: + containers: + - name: pwsh-vnet-dns + properties: + command: + - /bin/bash + - -c + - echo hello; sleep 10000 + environmentVariables: [] + image: mcr.microsoft.com/powershell:latest + ports: + - port: 80 + resources: + requests: + cpu: 1.0 + memoryInGB: 2.0 + dnsConfig: + nameServers: + - 10.0.0.10 # DNS Server 1 + - 10.0.0.11 # DNS Server 2 + searchDomains: contoso.com # DNS search suffix + ipAddress: + type: Private + ports: + - port: 80 + subnetIds: + - id: /subscriptions//resourceGroups/ACIResourceGroup/providers/Microsoft.Network/virtualNetworks/aci-vnet/subnets/aci-subnet + osType: Linux +tags: null +type: Microsoft.ContainerInstance/containerGroups +``` + +Deploy the container group with the [az container create][az-container-create] command, specifying the YAML file name with the `--file` parameter: + +```azurecli +az container create --resource-group ACIResourceGroup \ + --file custom-dns-deploy-aci.yaml +``` + +Once the deployment is complete, run the [az container show][az-container-show] command to display its status. Sample output: + +```azurecli +az container show --resource-group ACIResourceGroup --name pwsh-vnet-dns -o table +``` + +```console +Name ResourceGroup Status Image IP:ports Network CPU/Memory OsType Location +---------------- --------------- -------- ------------------------------------------ ----------- --------- --------------- -------- ---------- +pwsh-vnet-dns ACIResourceGroup Running mcr.microsoft.com/powershell 10.0.0.5:80 Private 1.0 core/2.0 gb Linux westus +``` + +After the status shows `Running`, execute the [az container exec][az-container-exec] command to obtain bash access within the container. + +```azurecli +az container exec --resource-group ACIResourceGroup --name pwsh-vnet-dns --exec-command "/bin/bash" +``` + +Validate that DNS is working as expected from within your container. For example, read the `/etc/resolv.conf` file to ensure it's configured with the DNS settings provided in the YAML file. + +```console +root@wk-caas-81d609b206c541589e11058a6d260b38-90b0aff460a737f346b3b0:/# cat /etc/resolv.conf + +nameserver 10.0.0.10 +nameserver 10.0.0.11 +search contoso.com +``` + +## Clean up resources + +### Delete container instances + +When you're finished with the container instance you created, delete it with the [az container delete][az-container-delete] command: + +```azurecli +az container delete --resource-group ACIResourceGroup --name pwsh-vnet-dns -y +``` + +### Delete network resources + +If you don't plan to use this virtual network again, you can delete it with the [az network vnet delete][az-network-vnet-delete] command: + +```azurecli +az network vnet delete --resource-group ACIResourceGroup --name aci-vnet +``` + +### Delete resource group + +If you don't plan to use this resource group outside of this guide, you can delete it with [az group delete][az-group-delete] command: + +```azurecli +az group delete --name ACIResourceGroup +``` + +Enter `y` when prompted if you're sure you wish to perform the operation. + +## Next steps + +See the Azure quickstart template [Create an Azure container group with VNet](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.containerinstance/aci-vnet), to deploy a container group within a virtual network. + + +[az-network-vnet-create]: /cli/azure/network/vnet#az-network-vnet-create +[az-network-vnet-subnet-create]: /cli/azure/network/vnet/subnet#az-network-vnet-subnet-create +[az-network-private-dns-zone-create]: /cli/azure/network/private-dns/zone#az-network-private-dns-zone-create +[az-network-private-dns-link-vnet-create]: /cli/azure/network/private-dns/link/vnet#az-network-private-dns-link-vnet-create +[az-container-create]: /cli/azure/container#az-container-create +[az-container-show]: /cli/azure/container#az-container-show +[az-container-exec]: /cli/azure/container#az-container-exec +[az-container-delete]: /cli/azure/container#az-container-delete +[az-network-vnet-delete]: /cli/azure/network/vnet#az-network-vnet-delete +[az-group-delete]: /cli/azure/group#az-group-create +[cloud-shell-bash]: /cloud-shell/overview.md diff --git a/articles/container-instances/container-instances-reference-yaml.md b/articles/container-instances/container-instances-reference-yaml.md index 24bc811b5ff6e..d8338bcb079cd 100644 --- a/articles/container-instances/container-instances-reference-yaml.md +++ b/articles/container-instances/container-instances-reference-yaml.md @@ -1,28 +1,30 @@ --- title: YAML reference for container group description: Reference for the YAML file supported by Azure Container Instances to configure a container group -ms.topic: article -ms.date: 11/11/2021 +author: tomvcassidy +ms.topic: reference +ms.service: container-instances +services: container-instances +ms.author: tomcassidy +ms.date: 06/06/2022 --- # YAML reference: Azure Container Instances -This article covers the syntax and properties for the YAML file supported by Azure Container Instances to configure a [container group](container-instances-container-groups.md). Use a YAML file to input the group configuration to the [az container create][az-container-create] command in the Azure CLI. +This article covers the syntax and properties for the YAML file supported by Azure Container Instances to configure a [container group](container-instances-container-groups.md). Use a YAML file to input the group configuration to the [az container create][az-container-create] command in the Azure CLI. -A YAML file is a convenient way to configure a container group for reproducible deployments. It is a concise alternative to using a [Resource Manager template](/azure/templates/Microsoft.ContainerInstance/2019-12-01/containerGroups) or the Azure Container Instances SDKs to create or update a container group. +A YAML file is a convenient way to configure a container group for reproducible deployments. It's a concise alternative to using a [Resource Manager template](/azure/templates/Microsoft.ContainerInstance/2019-12-01/containerGroups) or the Azure Container Instances SDKs to create or update a container group. > [!NOTE] -> This reference applies to YAML files for Azure Container Instances REST API version `2021-07-01`. +> This reference applies to YAML files for Azure Container Instances REST API version `2021-10-01`. -## Schema +## Schema The schema for the YAML file follows, including comments to highlight key properties. For a description of the properties in this schema, see the [Property values](#property-values) section. -[!INCLUDE [network profile callout](./includes/network-profile/network-profile-callout.md)] - -```yml +```yaml name: string # Name of the container group -apiVersion: '2021-07-01' +apiVersion: '2021-10-01' location: string tags: {} identity: @@ -159,22 +161,17 @@ properties: # Properties of container group The following tables describe the values you need to set in the schema. - - ### Microsoft.ContainerInstance/containerGroups object | Name | Type | Required | Value | | ---- | ---- | ---- | ---- | | name | string | Yes | The name of the container group. | -| apiVersion | enum | Yes | 2018-10-01 | +| apiVersion | enum | Yes | **2021-10-01 (latest)**, 2021-09-01, 2021-07-01, 2021-03-01, 2020-11-01, 2019-12-01, 2018-10-01, 2018-09-01, 2018-07-01, 2018-06-01, 2018-04-01 | | location | string | No | The resource location. | | tags | object | No | The resource tags. | | identity | object | No | The identity of the container group, if configured. - [ContainerGroupIdentity object](#containergroupidentity-object) | | properties | object | Yes | [ContainerGroupProperties object](#containergroupproperties-object) | - - - ### ContainerGroupIdentity object | Name | Type | Required | Value | @@ -182,9 +179,6 @@ The following tables describe the values you need to set in the schema. | type | enum | No | The type of identity used for the container group. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the container group. - SystemAssigned, UserAssigned, SystemAssigned, UserAssigned, None | | userAssignedIdentities | object | No | The list of user identities associated with the container group. The user identity dictionary key references will be Azure Resource Manager resource IDs in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. | - - - ### ContainerGroupProperties object | Name | Type | Required | Value | @@ -202,9 +196,6 @@ The following tables describe the values you need to set in the schema. | encryptionProperties | object | No | The encryption properties for a container group. - [EncryptionProperties object](#encryptionproperties-object) | | initContainers | array | No | The init containers for a container group. - [InitContainerDefinition object](#initcontainerdefinition-object) | - - - ### Container object | Name | Type | Required | Value | @@ -212,9 +203,6 @@ The following tables describe the values you need to set in the schema. | name | string | Yes | The user-provided name of the container instance. | | properties | object | Yes | The properties of the container instance. - [ContainerProperties object](#containerproperties-object) | - - - ### ImageRegistryCredential object | Name | Type | Required | Value | @@ -225,9 +213,6 @@ The following tables describe the values you need to set in the schema. | identity | string | No | The resource ID of the user or system-assigned managed identity used to authenticate. | | identityUrl | string | No | The identity URL for the private registry. | - - - ### IpAddress object | Name | Type | Required | Value | @@ -237,9 +222,6 @@ The following tables describe the values you need to set in the schema. | ip | string | No | The IP exposed to the public internet. | | dnsNameLabel | string | No | The Dns name label for the IP. | - - - ### Volume object | Name | Type | Required | Value | @@ -250,18 +232,12 @@ The following tables describe the values you need to set in the schema. | secret | object | No | The secret volume. | | gitRepo | object | No | The git repo volume. - [GitRepoVolume object](#gitrepovolume-object) | - - - ### ContainerGroupDiagnostics object | Name | Type | Required | Value | | ---- | ---- | ---- | ---- | | logAnalytics | object | No | Container group log analytics information. - [LogAnalytics object](#loganalytics-object) | - - - ### ContainerGroupSubnetIds object | Name | Type | Required | Value | @@ -269,9 +245,6 @@ The following tables describe the values you need to set in the schema. | id | string | Yes | The identifier for a subnet. | | name | string | No | The name of the subnet. | - - - ### DnsConfiguration object | Name | Type | Required | Value | @@ -280,22 +253,20 @@ The following tables describe the values you need to set in the schema. | searchDomains | string | No | The DNS search domains for hostname lookup in the container group. | | options | string | No | The DNS options for the container group. | - ### EncryptionProperties object -| Name | Type | Required | Value | +| Name | Type | Required | Value | | ---- | ---- | ---- | ---- | -| vaultBaseUrl | string | Yes | The keyvault base url. | -| keyName | string | Yes | The encryption key name. | -| keyVersion | string | Yes | The encryption key version. | +| vaultBaseUrl | string | Yes | The keyvault base url. | +| keyName | string | Yes | The encryption key name. | +| keyVersion | string | Yes | The encryption key version. | ### InitContainerDefinition object -| Name | Type | Required | Value | +| Name | Type | Required | Value | | ---- | ---- | ---- | ---- | -| name | string | Yes | The name for the init container. | -| properties | object | Yes | The properties for the init container. - [InitContainerPropertiesDefinition object](#initcontainerpropertiesdefinition-object) - +| name | string | Yes | The name for the init container. | +| properties | object | Yes | The properties for the init container. - [InitContainerPropertiesDefinition object](#initcontainerpropertiesdefinition-object) ### ContainerProperties object @@ -310,9 +281,6 @@ The following tables describe the values you need to set in the schema. | livenessProbe | object | No | The liveness probe. - [ContainerProbe object](#containerprobe-object) | | readinessProbe | object | No | The readiness probe. - [ContainerProbe object](#containerprobe-object) | - - - ### Port object | Name | Type | Required | Value | @@ -320,9 +288,6 @@ The following tables describe the values you need to set in the schema. | protocol | enum | No | The protocol associated with the port. - TCP or UDP | | port | integer | Yes | The port number. | - - - ### AzureFileVolume object | Name | Type | Required | Value | @@ -332,9 +297,6 @@ The following tables describe the values you need to set in the schema. | storageAccountName | string | Yes | The name of the storage account that contains the Azure File share. | | storageAccountKey | string | No | The storage account access key used to access the Azure File share. | - - - ### GitRepoVolume object | Name | Type | Required | Value | @@ -343,8 +305,6 @@ The following tables describe the values you need to set in the schema. | repository | string | Yes | Repository URL | | revision | string | No | Commit hash for the specified revision. | - - ### LogAnalytics object | Name | Type | Required | Value | @@ -355,15 +315,14 @@ The following tables describe the values you need to set in the schema. | logType | enum | No | The log type to be used. - ContainerInsights or ContainerInstanceLogs | | metadata | object | No | Metadata for log analytics. | - ### InitContainerPropertiesDefinition object -| Name | Type | Required | Value | +| Name | Type | Required | Value | | ---- | ---- | ---- | ---- | -| image | string | No | The image of the init container. | -| command | array | No | The command to execute within the init container in exec form. - string | -| environmentVariables | array | No |The environment variables to set in the init container. - [EnvironmentVariable object](#environmentvariable-object) -| volumeMounts |array | No | The volume mounts available to the init container. - [VolumeMount object](#volumemount-object) +| image | string | No | The image of the init container. | +| command | array | No | The command to execute within the init container in exec form. - string | +| environmentVariables | array | No |The environment variables to set in the init container. - [EnvironmentVariable object](#environmentvariable-object) +| volumeMounts | array | No | The volume mounts available to the init container. - [VolumeMount object](#volumemount-object) ### ContainerPort object @@ -372,9 +331,6 @@ The following tables describe the values you need to set in the schema. | protocol | enum | No | The protocol associated with the port. - TCP or UDP | | port | integer | Yes | The port number exposed within the container group. | - - - ### EnvironmentVariable object | Name | Type | Required | Value | @@ -383,9 +339,6 @@ The following tables describe the values you need to set in the schema. | value | string | No | The value of the environment variable. | | secureValue | string | No | The value of the secure environment variable. | - - - ### ResourceRequirements object | Name | Type | Required | Value | @@ -393,9 +346,6 @@ The following tables describe the values you need to set in the schema. | requests | object | Yes | The resource requests of this container instance. - [ResourceRequests object](#resourcerequests-object) | | limits | object | No | The resource limits of this container instance. - [ResourceLimits object](#resourcelimits-object) | - - - ### VolumeMount object | Name | Type | Required | Value | @@ -404,9 +354,6 @@ The following tables describe the values you need to set in the schema. | mountPath | string | Yes | The path within the container where the volume should be mounted. Must not contain colon (:). | | readOnly | boolean | No | The flag indicating whether the volume mount is read-only. | - - - ### ContainerProbe object | Name | Type | Required | Value | @@ -419,9 +366,6 @@ The following tables describe the values you need to set in the schema. | successThreshold | integer | No | The success threshold. | | timeoutSeconds | integer | No | The timeout seconds. | - - - ### ResourceRequests object | Name | Type | Required | Value | @@ -430,9 +374,6 @@ The following tables describe the values you need to set in the schema. | cpu | number | Yes | The CPU request of this container instance. | | gpu | object | No | The GPU request of this container instance. - [GpuResource object](#gpuresource-object) | - - - ### ResourceLimits object | Name | Type | Required | Value | @@ -441,18 +382,12 @@ The following tables describe the values you need to set in the schema. | cpu | number | No | The CPU limit of this container instance. | | gpu | object | No | The GPU limit of this container instance. - [GpuResource object](#gpuresource-object) | - - - ### ContainerExec object | Name | Type | Required | Value | | ---- | ---- | ---- | ---- | | command | array | No | The commands to execute within the container. - string | - - - ### ContainerHttpGet object | Name | Type | Required | Value | @@ -476,7 +411,6 @@ The following tables describe the values you need to set in the schema. | count | integer | Yes | The count of the GPU resource. | | sku | enum | Yes | The SKU of the GPU resource. - K80, P100, V100 | - ## Next steps See the tutorial [Deploy a multi-container group using a YAML file](container-instances-multi-container-yaml.md). diff --git a/articles/container-instances/container-instances-troubleshooting.md b/articles/container-instances/container-instances-troubleshooting.md index 0db865054dabe..8c6f5f358fcd7 100644 --- a/articles/container-instances/container-instances-troubleshooting.md +++ b/articles/container-instances/container-instances-troubleshooting.md @@ -94,7 +94,7 @@ This error indicates that due to heavy load in the region in which you are attem ## Issues during container group runtime ### Container had an isolated restart without explicit user input -There are two broad categories for why a container group may restart without explicit user input. First, containers may experience restarts caused by an application process crash. The ACI service recommends leveraging observability solutions such as Application Insights SDK, container group metrics, and container group logs to determine why the application experienced issues. Second, customers may experience restarts initiated by the ACI infrastructure due to maintenance events. To increase the availability of your application, run multiple container groups behind an ingress component such as an Application Gateway or Traffic Manager. +There are two broad categories for why a container group may restart without explicit user input. First, containers may experience restarts caused by an application process crash. The ACI service recommends leveraging observability solutions such as [Application Insights SDK](../azure-monitor/app/app-insights-overview.md), [container group metrics](container-instances-monitor.md), and [container group logs](container-instances-get-logs.md) to determine why the application experienced issues. Second, customers may experience restarts initiated by the ACI infrastructure due to maintenance events. To increase the availability of your application, run multiple container groups behind an ingress component such as an [Application Gateway](../application-gateway/overview.md) or [Traffic Manager](../traffic-manager/traffic-manager-overview.md). ### Container continually exits and restarts (no long-running process) diff --git a/articles/container-instances/container-instances-using-azure-container-registry.md b/articles/container-instances/container-instances-using-azure-container-registry.md index fa4644cb6da0a..f5846a83e379d 100644 --- a/articles/container-instances/container-instances-using-azure-container-registry.md +++ b/articles/container-instances/container-instances-using-azure-container-registry.md @@ -19,7 +19,7 @@ ms.custom: mvc, devx-track-azurecli ## Limitations -* The [Azure Container Registry](../container-registry/container-registry-vnet.md) must have [Public Access set to 'All Networks'](../container-registry/container-registry-access-selected-networks.md). To use an Azure container registry with Public Access set to 'Select Networks' or 'None', visit [ACI's article for using Managed-Identity based authentication with ACR](/using-azure-container-registry-mi.md). +* The [Azure Container Registry](../container-registry/container-registry-vnet.md) must have [Public Access set to 'All Networks'](../container-registry/container-registry-access-selected-networks.md). To use an Azure container registry with Public Access set to 'Select Networks' or 'None', visit [ACI's article for using Managed-Identity based authentication with ACR](/azure/container-registry/container-registry-authentication-managed-identity). ## Configure registry authentication diff --git a/articles/container-registry/TOC.yml b/articles/container-registry/TOC.yml index 7bd6913227245..e53ac54a69a6b 100644 --- a/articles/container-registry/TOC.yml +++ b/articles/container-registry/TOC.yml @@ -185,7 +185,7 @@ items: - name: Scan with Microsoft Defender for Cloud href: scan-images-defender.md - - name: Scan with GitHub actions + - name: Scan with GitHub Actions href: github-action-scan.md - name: Troubleshoot expanded: false diff --git a/articles/container-registry/buffer-gate-public-content.md b/articles/container-registry/buffer-gate-public-content.md index 65b396bcef684..b3e8414fbd1b8 100644 --- a/articles/container-registry/buffer-gate-public-content.md +++ b/articles/container-registry/buffer-gate-public-content.md @@ -63,15 +63,14 @@ For details, see [Docker Hub authenticated pulls on App Service](https://azure.g To begin managing copies of public images, you can create an Azure container registry if you don't already have one. Create a registry using the [Azure CLI](container-registry-get-started-azure-cli.md), [Azure portal](container-registry-get-started-portal.md), [Azure PowerShell](container-registry-get-started-powershell.md), or other tools. +# [Azure CLI](#tab/azure-cli) + As a recommended one-time step, [import](container-registry-import-images.md) base images and other public content to your Azure container registry. The [az acr import](/cli/azure/acr#az-acr-import) command in the Azure CLI supports image import from public registries such as Docker Hub and Microsoft Container Registry and from other private container registries. `az acr import` doesn't require a local Docker installation. You can run it with a local installation of the Azure CLI or directly in Azure Cloud Shell. It supports images of any OS type, multi-architecture images, or OCI artifacts such as Helm charts. Depending on your organization's needs, you can import to a dedicated registry or a repository in a shared registry. -# [Azure CLI](#tab/azure-cli) -Example: - ```azurecli-interactive az acr import \ --name myregistry \ @@ -81,18 +80,28 @@ az acr import \ --password ``` -# [PowerShell](#tab/azure-powershell) -Example: +# [Azure PowerShell](#tab/azure-powershell) + +As a recommended one-time step, [import](container-registry-import-images.md) base images and other public content to your Azure container registry. The [Import-AzContainerRegistryImage](/powershell/module/az.containerregistry/import-azcontainerregistryimage) command in the Azure PowerShell supports image import from public registries such as Docker Hub and Microsoft Container Registry and from other private container registries. + +`Import-AzContainerRegistryImage` doesn't require a local Docker installation. You can run it with a local installation of the Azure PowerShell or directly in Azure Cloud Shell. It supports images of any OS type, multi-architecture images, or OCI artifacts such as Helm charts. + +Depending on your organization's needs, you can import to a dedicated registry or a repository in a shared registry. ```azurepowershell-interactive -Import-AzContainerRegistryImage - -SourceImage library/busybox:latest - -ResourceGroupName $resourceGroupName - -RegistryName $RegistryName - -SourceRegistryUri docker.io - -TargetTag busybox:latest +$Params = @{ + SourceImage = 'library/busybox:latest' + ResourceGroupName = $resourceGroupName + RegistryName = $RegistryName + SourceRegistryUri = 'docker.io' + TargetTag = 'busybox:latest' +} +Import-AzContainerRegistryImage @Params ``` - Credentials are required if the source registry is not available publicly or the admin user is disabled. + +Credentials are required if the source registry is not available publicly or the admin user is disabled. + +--- ## Update image references diff --git a/articles/container-registry/container-registry-check-health.md b/articles/container-registry/container-registry-check-health.md index bc040e32eed55..82a15bfc4f69f 100644 --- a/articles/container-registry/container-registry-check-health.md +++ b/articles/container-registry/container-registry-check-health.md @@ -79,6 +79,10 @@ Fetch refresh token for registry 'myregistry.azurecr.io' : OK Fetch access token for registry 'myregistry.azurecr.io' : OK ``` +## Check if registry is configured with quarantine + +Once you enable a container registry to be quarantined, every image you publish to this repository will be quarantined. Any attempts to access or pull quarantined images will fail with an error. For more information, See [pull the quarantine image](https://github.com/Azure/acr/tree/main/docs/preview/quarantine#pull-the-quarantined-image). + ## Next steps For details about error codes returned by the [az acr check-health][az-acr-check-health] command, see the [Health check error reference](container-registry-health-error-reference.md). diff --git a/articles/container-registry/container-registry-faq.yml b/articles/container-registry/container-registry-faq.yml index 0566faca45f08..04b49af41b449 100644 --- a/articles/container-registry/container-registry-faq.yml +++ b/articles/container-registry/container-registry-faq.yml @@ -465,7 +465,7 @@ sections: Why does my pull or push request fail with disallowed operation? answer: | Here are some scenarios where operations may be disallowed: - * Classic registries are no longer supported. Please upgrade to a supported [service tier](./container-registry-skus.md) using [az acr update](/cli/azure/acr#az_acr_update) or the Azure portal. + * Classic registries are no longer supported. Please upgrade to a supported [service tier](./container-registry-skus.md) using [az acr update](/cli/azure/acr#az-acr-update) or the Azure portal. * The image or repository maybe locked so that it can't be deleted or updated. You can use the [az acr show repository](./container-registry-image-lock.md) command to view current attributes. * Some operations are disallowed if the image is in quarantine. Learn more about [quarantine](https://github.com/Azure/acr/tree/master/docs/preview/quarantine). * Your registry may have reached its [storage limit](container-registry-skus.md#service-tier-features-and-limits). @@ -550,9 +550,9 @@ sections: - question: | How to resolve if the Agent pool creation fails due to timeout issues? answer: | - Set up the correct [firewalls rules](/azure/container-registry/tasks-agent-pools#add-firewall-rules) to the existing network security groups or user-defined routes. After the setup, wait a few minutes for the firewall rules to apply. + Set up the correct [firewalls rules](./tasks-agent-pools.md#add-firewall-rules) to the existing network security groups or user-defined routes. After the setup, wait a few minutes for the firewall rules to apply. additionalContent: | ## Next steps - * [Learn more](container-registry-intro.md) about Azure Container Registry. + * [Learn more](container-registry-intro.md) about Azure Container Registry. \ No newline at end of file diff --git a/articles/container-registry/container-registry-intro.md b/articles/container-registry/container-registry-intro.md index 8ce63b07dedda..37d55f631e8f4 100644 --- a/articles/container-registry/container-registry-intro.md +++ b/articles/container-registry/container-registry-intro.md @@ -1,15 +1,15 @@ --- title: Managed container registries -description: Introduction to the Azure Container Registry service, providing cloud-based, managed, private Docker registries. +description: Introduction to the Azure Container Registry service, providing cloud-based, managed registries. author: stevelas ms.topic: overview ms.date: 02/10/2020 ms.author: stevelas ms.custom: "seodec18, mvc" --- -# Introduction to private Docker container registries in Azure +# Introduction to Container registries in Azure -Azure Container Registry is a managed, private Docker registry service based on the open-source Docker Registry 2.0. Create and maintain Azure container registries to store and manage your private Docker container images and related artifacts. +Azure Container Registry is a managed registry service based on the open-source Docker Registry 2.0. Create and maintain Azure container registries to store and manage your container images and related artifacts. Use Azure container registries with your existing container development and deployment pipelines, or use Azure Container Registry Tasks to build container images in Azure. Build on demand, or fully automate builds with triggers such as source code commits and base image updates. diff --git a/articles/container-registry/container-registry-private-link.md b/articles/container-registry/container-registry-private-link.md index e8736fb7feedf..c4dc9a67bf5bd 100644 --- a/articles/container-registry/container-registry-private-link.md +++ b/articles/container-registry/container-registry-private-link.md @@ -332,11 +332,11 @@ az acr update --name $REGISTRY_NAME --public-network-enabled false Consider the following options to execute the `az acr build` successfully. > [!NOTE] -> Once you disable public network [access here](/azure/container-registry/container-registry-private-link#disable-public-access), then `az acr build` commands will no longer work. +> Once you disable public network [access here](#disable-public-access), then `az acr build` commands will no longer work. -1. Assign a [dedicated agent pool.](/azure/container-registry/tasks-agent-pools#Virtual-network-support) -2. If agent pool is not available in the region, add the regional [Azure Container Registry Service Tag IPv4](/azure/virtual-network/service-tags-overview#use-the-service-tag-discovery-api) to the [firewall access rules.](/azure/container-registry/container-registry-firewall-access-rules#allow-access-by-ip-address-range) -3. Create an ACR task with a managed identity, and enable trusted services to [access network restricted ACR.](/azure/container-registry/allow-access-trusted-services#example-acr-tasks) +1. Assign a [dedicated agent pool.](./tasks-agent-pools.md) +2. If agent pool is not available in the region, add the regional [Azure Container Registry Service Tag IPv4](../virtual-network/service-tags-overview.md#use-the-service-tag-discovery-api) to the [firewall access rules.](./container-registry-firewall-access-rules.md#allow-access-by-ip-address-range) +3. Create an ACR task with a managed identity, and enable trusted services to [access network restricted ACR.](./allow-access-trusted-services.md#example-acr-tasks) ## Validate private link connection diff --git a/articles/container-registry/container-registry-support-policies.md b/articles/container-registry/container-registry-support-policies.md index 0e4e3adfa559c..d06e4d5db9feb 100644 --- a/articles/container-registry/container-registry-support-policies.md +++ b/articles/container-registry/container-registry-support-policies.md @@ -18,7 +18,7 @@ This article provides details about Azure Container Registry (ACR) support polic >* [Encrypt using Customer managed keys](container-registry-customer-managed-keys.md) >* [Enable Content trust](container-registry-content-trust.md) >* [Scan Images using Azure Security Center](../defender-for-cloud/defender-for-container-registries-introduction.md) ->* [ACR Tasks](/azure/container-registry/container-registry-tasks-overview) +>* [ACR Tasks](./container-registry-tasks-overview.md) >* [Import container images to ACR](container-registry-import-images.md) >* [Image locking in ACR](container-registry-image-lock.md) >* [Synchronize content with ACR using Connected Registry](intro-connected-registry.md) @@ -68,4 +68,4 @@ This article provides details about Azure Container Registry (ACR) support polic ## Upstream bugs The ACR support will identify the root cause of every issue raised. The team will report all the identified bugs as an [issue in the ACR repository](https://github.com/Azure/acr/issues) with supporting details. The engineering team will review and provide a workaround solution, bug fix, or upgrade with a new release timeline. All the bug fixes integrate from upstream. -Customers can watch the issues, bug fixes, add more details, and follow the new releases. +Customers can watch the issues, bug fixes, add more details, and follow the new releases. \ No newline at end of file diff --git a/articles/container-registry/container-registry-tasks-base-images.md b/articles/container-registry/container-registry-tasks-base-images.md index 7cf1dce4d50c9..e060c0d259346 100644 --- a/articles/container-registry/container-registry-tasks-base-images.md +++ b/articles/container-registry/container-registry-tasks-base-images.md @@ -67,7 +67,7 @@ See the following tutorials for scenarios to automate application image builds a * [Automate container image builds when a base image is updated in the same registry](container-registry-tutorial-base-image-update.md) -* [Automate container image builds when a base image is updated in a different registry](container-registry-tutorial-base-image-update.md) +* [Automate container image builds when a base image is updated in a different registry](container-registry-tutorial-private-base-image-update.md) diff --git a/articles/container-registry/tasks-agent-pools.md b/articles/container-registry/tasks-agent-pools.md index a4d29d367bceb..66a795592ccdf 100644 --- a/articles/container-registry/tasks-agent-pools.md +++ b/articles/container-registry/tasks-agent-pools.md @@ -65,6 +65,7 @@ Create an agent pool by using the [az acr agentpool create][az-acr-agentpool-cre ```azurecli az acr agentpool create \ + --registry MyRegistry \ --name myagentpool \ --tier S2 ``` @@ -78,6 +79,7 @@ Scale the pool size up or down with the [az acr agentpool update][az-acr-agentpo ```azurecli az acr agentpool update \ + --registry MyRegistry \ --name myagentpool \ --count 2 ``` @@ -112,6 +114,7 @@ subnetId=$(az network vnet subnet show \ --query id --output tsv) az acr agentpool create \ + --registry MyRegistry \ --name myagentpool \ --tier S2 \ --subnet-id $subnetId @@ -131,6 +134,7 @@ Queue a quick task on the agent pool by using the [az acr build][az-acr-build] c ```azurecli az acr build \ + --registry MyRegistry \ --agent-pool myagentpool \ --image myimage:mytag \ --file Dockerfile \ @@ -143,6 +147,7 @@ For example, create a scheduled task on the agent pool with [az acr task create] ```azurecli az acr task create \ + --registry MyRegistry \ --name mytask \ --agent-pool myagentpool \ --image myimage:mytag \ @@ -156,6 +161,7 @@ To verify task setup, run [az acr task run][az-acr-task-run]: ```azurecli az acr task run \ + --registry MyRegistry \ --name mytask ``` @@ -165,6 +171,7 @@ To find the number of runs currently scheduled on the agent pool, run [az acr ag ```azurecli az acr agentpool show \ + --registry MyRegistry \ --name myagentpool \ --queue-count ``` diff --git a/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json b/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json index b10c325d91560..a29e3a786bd71 100644 --- a/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json +++ b/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json @@ -3625,6 +3625,16 @@ "source_path_from_root": "/articles/cosmos-db/sql/advanced-threat-protection.md", "redirect_url": "/azure/cosmos-db/sql/defender-for-cosmos-db", "redirect_document_id": false - } + }, + { + "source_path_from_root": "/articles/cosmos-db/how-to-container-copy.md", + "redirect_url": "/azure/cosmos-db/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/cosmos-db/intra-account-container-copy.md", + "redirect_url": "/azure/cosmos-db/", + "redirect_document_id": false + }, ] } diff --git a/articles/cosmos-db/TOC.yml b/articles/cosmos-db/TOC.yml index 7b65c576eca24..50a99de6180e7 100644 --- a/articles/cosmos-db/TOC.yml +++ b/articles/cosmos-db/TOC.yml @@ -4,6 +4,8 @@ items: - name: Welcome to Azure Cosmos DB href: introduction.md + - name: Try Azure Cosmos DB free + href: try-free.md - name: Choose an API href: choose-api.md - name: NoSQL vs. relational databases @@ -329,6 +331,10 @@ - name: Ternary and coalesce operators displayName: ternary, coalesce, operators href: sql/sql-query-ternary-coalesce-operators.md + - name: Bitwise operators + displayName: bitwise, binary, operators + href: sql/sql-query-bitwise-operators.md + - name: Functions items: - name: User-defined functions @@ -384,6 +390,9 @@ - name: DateTimeAdd displayName: DateTimeAdd, date time add, timestamp, date and time functions, datetime href: sql/sql-query-datetimeadd.md + - name: DateTimeBin + displayName: DateTimeBin, date time bin, date and time functions, datetime + href: sql/sql-query-datetimebin.md - name: DateTimeDiff displayName: DateTimeDiff, date time diff, date and time functions, datetime href: sql/sql-query-datetimediff.md @@ -1694,6 +1703,8 @@ href: ../private-link/tutorial-private-endpoint-cosmosdb-portal.md?bc=%2fazure%2fcosmos-db%2fbreadcrumb%2ftoc.json&toc=%2fazure%2fcosmos-db%2ftoc.json - name: Use Azure Key Vault to access data href: access-secrets-from-keyvault.md + - name: Access Azure Key Vault with managed identity + href: access-key-vault-managed-identity.md - name: Restrict user access to data operations only href: how-to-restrict-user-data.md - name: Configure customer-managed keys @@ -1740,8 +1751,6 @@ href: how-to-alert-on-logical-partition-key-storage-size.md - name: Monitoring data reference href: monitor-cosmos-db-reference.md - - name: Application logging with Logic Apps - href: ../logic-apps/logic-apps-scenario-error-and-exception-handling.md?toc=/azure/cosmos-db/toc.json&bc=/azure/cosmos-db/breadcrumb/toc.json - name: Third-party monitoring solutions href: monitoring-solutions.md - name: Migrate data to Cosmos DB diff --git a/articles/cosmos-db/access-key-vault-managed-identity.md b/articles/cosmos-db/access-key-vault-managed-identity.md new file mode 100644 index 0000000000000..31d5254c55ea2 --- /dev/null +++ b/articles/cosmos-db/access-key-vault-managed-identity.md @@ -0,0 +1,92 @@ +--- +title: Use a managed identity to access Azure Key Vault from Azure Cosmos DB +description: Use managed identity in Azure Cosmos DB to access Azure Key Vault. +author: seesharprun +ms.author: sidandrews +ms.service: cosmos-db +ms.devlang: csharp +ms.topic: how-to +ms.date: 06/01/2022 +ms.reviewer: thweiss +--- + +# Access Azure Key Vault from Azure Cosmos DB using a managed identity +[!INCLUDE[appliesto-all-apis](includes/appliesto-all-apis.md)] + +Azure Cosmos DB may need to read secret/key data from Azure Key Vault. For example, your Azure Cosmos DB may require a customer-managed key stored in Azure Key Vault. To do this, Azure Cosmos DB should be configured with a managed identity, and then an Azure Key Vault access policy should grant the managed identity access. + +## Prerequisites + +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- An existing Azure Cosmos DB SQL API account. [Create an Azure Cosmos DB SQL API account](sql/create-cosmosdb-resources-portal.md) +- An existing Azure Key Vault resource. [Create a key vault using the Azure CLI](../key-vault/general/quick-create-cli.md) +- To perform the steps in this article, install the [Azure CLI](/cli/azure/install-azure-cli) and [sign in to Azure](/cli/azure/authenticate-azure-cli). + +## Prerequisite check + +1. In a terminal or command window, store the names of your Azure Key Vault resource, Azure Cosmos DB account and resource group as shell variables named ``keyVaultName``, ``cosmosName``, and ``resourceGroupName``. + + ```azurecli-interactive + # Variable for function app name + keyVaultName="msdocs-keyvault" + + # Variable for Cosmos DB account name + cosmosName="msdocs-cosmos-app" + + # Variable for resource group name + resourceGroupName="msdocs-cosmos-keyvault-identity" + ``` + + > [!NOTE] + > These variables will be re-used in later steps. This example assumes your Azure Cosmos DB account name is ``msdocs-cosmos-app``, your key vault name is ``msdocs-keyvault`` and your resource group name is ``msdocs-cosmos-keyvault-identity``. + + +## Create a system-assigned managed identity in Azure Cosmos DB + +First, create a system-assigned managed identity for the existing Azure Cosmos DB account. + +> [!IMPORTANT] +> This how-to guide assumes that you are using a system-assigned managed identity. Many of the steps are similar when using a user-assigned managed identity. + +1. Run [``az cosmosdb identity assign``](/cli/azure/cosmosdb/identity#az-cosmosdb-identity-assign) to create a new system-assigned managed identity. + + ```azurecli-interactive + az cosmosdb identity assign \ + --resource-group $resourceGroupName \ + --name $cosmosName + ``` + +1. Retrieve the metadata of the system-assigned managed identity using [``az cosmosdb identity show``](/cli/azure/cosmosdb/identity#az-cosmosdb-identity-show), filter to just return the ``principalId`` property using the **query** parameter, and store the result in a shell variable named ``principal``. + + ```azurecli-interactive + principal=$( + az cosmosdb identity show \ + --resource-group $resourceGroupName \ + --name $cosmosName \ + --query principalId \ + --output tsv + ) + + echo $principal + ``` + + > [!NOTE] + > This variable will be re-used in a later step. + +## Create an Azure Key Vault access policy + +In this step, create an access policy in Azure Key Vault using the previously managed identity. + +1. Use the [``az keyvault set-policy``](/cli/azure/keyvault#az-keyvault-set-policy) command to create an access policy in Azure Key Vault that gives the Azure Cosmos DB managed identity permission to access Key Vault. Specifically, the policy will use the **key-permissions** parameters to grant permissions to ``get``, ``list``, and ``import`` keys. + + ```azurecli-interactive + az keyvault set-policy \ + --name $keyVaultName \ + --object-id $principal \ + --key-permissions get list import + ``` + +## Next steps + +* To use customer-managed keys in Azure Key Vault with your Azure Cosmos account, see [configure customer-managed keys](how-to-setup-cmk.md#using-managed-identity) +* To use Azure Key Vault to manage secrets, see [secure credentials](access-secrets-from-keyvault.md). diff --git a/articles/cosmos-db/access-secrets-from-keyvault.md b/articles/cosmos-db/access-secrets-from-keyvault.md index 8233ac12ef820..d5323a0757a5f 100644 --- a/articles/cosmos-db/access-secrets-from-keyvault.md +++ b/articles/cosmos-db/access-secrets-from-keyvault.md @@ -1,22 +1,22 @@ --- title: Use Key Vault to store and access Azure Cosmos DB keys description: Use Azure Key Vault to store and access Azure Cosmos DB connection string, keys, endpoints. -author: ThomasWeiss -ms.author: thweiss +author: seesharprun +ms.author: sidandrews ms.service: cosmos-db ms.devlang: csharp ms.topic: how-to -ms.date: 05/23/2019 -ms.reviewer: sngun +ms.date: 06/01/2022 +ms.reviewer: thweiss --- -# Secure Azure Cosmos keys using Azure Key Vault +# Secure Azure Cosmos credentials using Azure Key Vault [!INCLUDE[appliesto-all-apis](includes/appliesto-all-apis.md)] >[!IMPORTANT] -> The recommended solution to access Azure Cosmos DB keys is to use a [system-assigned managed identity](managed-identity-based-authentication.md). If your service cannot take advantage of managed identities then use the [cert based solution](certificate-based-authentication.md). If both the managed identity solution and cert based solution do not meet your needs, please use the key vault solution below. +> The recommended solution to access Azure Cosmos DB is to use a [system-assigned managed identity](managed-identity-based-authentication.md). If your service cannot take advantage of managed identities then use the [cert based solution](certificate-based-authentication.md). If both the managed identity solution and cert based solution do not meet your needs, please use the key vault solution below. -When using Azure Cosmos DB for your applications, you can access the database, collections, documents by using the endpoint and the key within the app's configuration file. However, it's not safe to put keys and URL directly in the application code because they are available in clear text format to all the users. You want to make sure that the endpoint and keys are available but through a secured mechanism. This is where Azure Key Vault can help you to securely store and manage application secrets. +When using Azure Cosmos DB, you can access the database, collections, documents by using the endpoint and the key within the app's configuration file. However, it's not safe to put keys and URL directly in the application code because they're available in clear text format to all the users. You want to make sure that the endpoint and keys are available but through a secured mechanism. This scenario is where Azure Key Vault can help you to securely store and manage application secrets. The following steps are required to store and read Azure Cosmos DB access keys from Key Vault: @@ -32,8 +32,8 @@ The following steps are required to store and read Azure Cosmos DB access keys f 2. Select **Create a resource > Security > Key Vault**. 3. On the **Create key vault** section provide the following information: * **Name:** Provide a unique name for your Key Vault. - * **Subscription:** Choose the subscription that you will use. - * Under **Resource Group** choose **Create new** and enter a resource group name. + * **Subscription:** Choose the subscription that you'll use. + * Within **Resource Group**, choose **Create new** and enter a resource group name. * In the Location pull-down menu, choose a location. * Leave other options to their defaults. 4. After providing the information above, select **Create**. @@ -46,30 +46,30 @@ The following steps are required to store and read Azure Cosmos DB access keys f * Provide a **Name** for your secret * Provide the connection string of your Cosmos DB account into the **Value** field. And then select **Create**. - :::image type="content" source="./media/access-secrets-from-keyvault/create-a-secret.png" alt-text="Create a secret"::: + :::image type="content" source="./media/access-secrets-from-keyvault/create-a-secret.png" alt-text="Screenshot of the Create a secret dialog in the Azure portal."::: -4. After the secret is created, open it and copy the **Secret Identifier that is in the following format. You will use this identifier in the next section. +4. After the secret is created, open it and copy the **Secret Identifier that is in the following format. You'll use this identifier in the next section. `https://.vault.azure.net/secrets//` ## Create an Azure web application -1. Create an Azure web application or you can download the app from the [GitHub repository](https://github.com/Azure/azure-cosmos-dotnet-v2/tree/master/Demo/keyvaultdemo). It is a simple MVC application. +1. Create an Azure web application or you can download the app from the [GitHub repository](https://github.com/Azure/azure-cosmos-dotnet-v2/tree/master/Demo/keyvaultdemo). It's a simple MVC application. 2. Unzip the downloaded application and open the **HomeController.cs** file. Update the secret ID in the following line: `var secret = await keyVaultClient.GetSecretAsync("")` 3. **Save** the file, **Build** the solution. -4. Next deploy the application to Azure. Right click on project and choose **publish**. Create a new app service profile (you can name the app WebAppKeyVault1) and select **Publish**. +4. Next deploy the application to Azure. Open the context menu for the project and choose **publish**. Create a new app service profile (you can name the app WebAppKeyVault1) and select **Publish**. -5. Once the application is deployed. From the Azure portal, navigate to web app that you deployed, and turn on the **Managed service identity** of this application. +5. Once the application is deployed from the Azure portal, navigate to web app that you deployed, and turn on the **Managed service identity** of this application. - :::image type="content" source="./media/access-secrets-from-keyvault/turn-on-managed-service-identity.png" alt-text="Managed service identity"::: + :::image type="content" source="./media/access-secrets-from-keyvault/turn-on-managed-service-identity.png" alt-text="Screenshot of the Managed service identity page in the Azure portal."::: -If you will run the application now, you will see the following error, as you have not given any permission to this application in Key Vault. +If you run the application now, you'll see the following error, as you have not given any permission to this application in Key Vault. -:::image type="content" source="./media/access-secrets-from-keyvault/app-deployed-without-access.png" alt-text="App deployed without access"::: +:::image type="content" source="./media/access-secrets-from-keyvault/app-deployed-without-access.png" alt-text="Screenshot of the error message displayed by an app deployed without access."::: ## Register the application & grant permissions to read the Key Vault @@ -89,5 +89,5 @@ Similarly, you can add a user to access the key Vault. You need to add yourself ## Next steps -* To configure a firewall for Azure Cosmos DB see [firewall support](how-to-configure-firewall.md) article. +* To configure a firewall for Azure Cosmos DB, see [firewall support](how-to-configure-firewall.md) article. * To configure virtual network service endpoint, see [secure access by using VNet service endpoint](how-to-configure-vnet-service-endpoint.md) article. diff --git a/articles/cosmos-db/account-databases-containers-items.md b/articles/cosmos-db/account-databases-containers-items.md index b43f2d309908b..609798630e992 100644 --- a/articles/cosmos-db/account-databases-containers-items.md +++ b/articles/cosmos-db/account-databases-containers-items.md @@ -1,13 +1,12 @@ --- title: Azure Cosmos DB resource model description: This article describes Azure Cosmos DB resource model which includes the Azure Cosmos account, database, container, and the items. It also covers the hierarchy of these elements in an Azure Cosmos account. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 07/12/2021 -ms.reviewer: sngun - --- # Azure Cosmos DB resource model diff --git a/articles/cosmos-db/analytical-store-introduction.md b/articles/cosmos-db/analytical-store-introduction.md index 59a438f2347bd..55bf5a3971de8 100644 --- a/articles/cosmos-db/analytical-store-introduction.md +++ b/articles/cosmos-db/analytical-store-introduction.md @@ -7,7 +7,7 @@ ms.topic: conceptual ms.date: 03/24/2022 ms.author: rosouz ms.custom: seo-nov-2020, devx-track-azurecli -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # What is Azure Cosmos DB analytical store? diff --git a/articles/cosmos-db/attachments.md b/articles/cosmos-db/attachments.md index d5ac37c1efb9e..6e08e4eb60da3 100644 --- a/articles/cosmos-db/attachments.md +++ b/articles/cosmos-db/attachments.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/07/2020 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Azure Cosmos DB Attachments diff --git a/articles/cosmos-db/audit-restore-continuous.md b/articles/cosmos-db/audit-restore-continuous.md index 5fedbf7e56044..54dbb288b569b 100644 --- a/articles/cosmos-db/audit-restore-continuous.md +++ b/articles/cosmos-db/audit-restore-continuous.md @@ -6,13 +6,13 @@ ms.service: cosmos-db ms.topic: conceptual ms.date: 04/18/2022 ms.author: govindk -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Audit the point in time restore action for continuous backup mode in Azure Cosmos DB [!INCLUDE[appliesto-all-apis-except-cassandra](includes/appliesto-all-apis-except-cassandra.md)] -Azure Cosmos DB provides you the list of all the point in time restores for continuous mode that were performed on a Cosmos DB account using [Activity Logs](/azure/azure-monitor/essentials/activity-log). Activity logs can be viewed for any Cosmos DB account from the **Activity Logs** page in the Azure portal. The Activity Log shows all the operations that were triggered on the specific account. When a point in time restore is triggered, it shows up as `Restore Database Account` operation on the source account as well as the target account. The Activity Log for the source account can be used to audit restore events, and the activity logs on the target account can be used to get the updates about the progress of the restore. +Azure Cosmos DB provides you the list of all the point in time restores for continuous mode that were performed on a Cosmos DB account using [Activity Logs](../azure-monitor/essentials/activity-log.md). Activity logs can be viewed for any Cosmos DB account from the **Activity Logs** page in the Azure portal. The Activity Log shows all the operations that were triggered on the specific account. When a point in time restore is triggered, it shows up as `Restore Database Account` operation on the source account as well as the target account. The Activity Log for the source account can be used to audit restore events, and the activity logs on the target account can be used to get the updates about the progress of the restore. ## Audit the restores that were triggered on a live database account @@ -32,7 +32,7 @@ For the accounts that were already deleted, there would not be any database acco :::image type="content" source="media/restore-account-continuous-backup/continuous-backup-restore-details-deleted-json.png" alt-text="Azure Cosmos DB restore audit activity log." lightbox="media/restore-account-continuous-backup/continuous-backup-restore-details-deleted-json.png"::: -The activity logs can also be accessed using Azure CLI or Azure PowerShell. For more information on activity logs, review [Azure Activity log - Azure Monitor](/azure/azure-monitor/essentials/activity-log). +The activity logs can also be accessed using Azure CLI or Azure PowerShell. For more information on activity logs, review [Azure Activity log - Azure Monitor](../azure-monitor/essentials/activity-log.md). ## Track the progress of the restore operation @@ -48,4 +48,4 @@ The account status would be *Creating*, but it would have an Activity Log page. * Provision an account with continuous backup by using the [Azure portal](provision-account-continuous-backup.md#provision-portal), [PowerShell](provision-account-continuous-backup.md#provision-powershell), the [Azure CLI](provision-account-continuous-backup.md#provision-cli), or [Azure Resource Manager](provision-account-continuous-backup.md#provision-arm-template). * [Manage permissions](continuous-backup-restore-permissions.md) required to restore data with continuous backup mode. * Learn about the [resource model of continuous backup mode](continuous-backup-restore-resource-model.md). - * Explore the [Frequently asked questions for continuous mode](continuous-backup-restore-frequently-asked-questions.yml). + * Explore the [Frequently asked questions for continuous mode](continuous-backup-restore-frequently-asked-questions.yml). \ No newline at end of file diff --git a/articles/cosmos-db/automated-recommendations.md b/articles/cosmos-db/automated-recommendations.md index 96a30adc57d7f..a6a479afb7d8e 100644 --- a/articles/cosmos-db/automated-recommendations.md +++ b/articles/cosmos-db/automated-recommendations.md @@ -6,7 +6,7 @@ ms.author: thweiss ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 -ms.reviewer: sngun +ms.reviewer: mjbrown --- diff --git a/articles/cosmos-db/bulk-executor-overview.md b/articles/cosmos-db/bulk-executor-overview.md index da17c185c93c0..a5afdf5fc1965 100644 --- a/articles/cosmos-db/bulk-executor-overview.md +++ b/articles/cosmos-db/bulk-executor-overview.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 05/28/2019 ms.author: abramees -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Azure Cosmos DB bulk executor library overview diff --git a/articles/cosmos-db/burst-capacity-faq.yml b/articles/cosmos-db/burst-capacity-faq.yml index 77fc67c4ce5ef..d6916e4cb1747 100644 --- a/articles/cosmos-db/burst-capacity-faq.yml +++ b/articles/cosmos-db/burst-capacity-faq.yml @@ -18,12 +18,20 @@ summary: | sections: - name: General questions: + - question: | + How much does it cost to use burst capacity? + answer: | + There's no charge to use burst capacity. - question: | How does burst capacity work with autoscale? answer: | Autoscale and burst capacity are compatible. Autoscale gives you a guaranteed instant 10 times scale range. Burst capacity allows you to take advantage of unused, idle capacity to handle temporary spikes, potentially beyond your autoscale max RU/s. For example, suppose we have an autoscale container with one physical partition that scales between 100 - 1000 RU/s. Without burst capacity, any requests that consume beyond 1000 RU/s would be rate limited. With burst capacity however, the partition can accumulate a maximum of 1000 RU/s of idle capacity each second. Burst capacity allows the partition to burst at a maximum rate of 3000 RU/s for a limited amount of time. - The autoscale max RU/s per physical partition must be less than 3000 RU/s for burst capacity to be applicable. + Accumulation of burst is based on the maximum autoscale RU/s. + + The autoscale maximum RU/s per physical partition must be less than 3000 RU/s for burst capacity to be applicable. + + When burst capacity is used with autoscale, autoscale will use up to the maximum RU/s before using burst capacity. You may see autoscale scale up to max RU/s during spikes of traffic. - question: | What resources can use burst capacity? answer: | @@ -31,7 +39,7 @@ sections: - question: | How can I monitor burst capacity? answer: | - [Azure Monitor metrics](monitor-cosmos-db.md#analyzing-metrics), built-in to Azure Cosmos DB, can filter by the dimension **CapacityType** on the **TotalRequests** and **TotalRequestUnits** metrics. Requests served with burst capacity will have **CapacityType** equal to **BurstCapacity**. + [Azure Monitor metrics](monitor-cosmos-db.md#analyzing-metrics), built-in to Azure Cosmos DB, can filter by the dimension **CapacityType** on the **TotalRequests** and **TotalRequestUnits (preview)** metrics. Requests served with burst capacity will have **CapacityType** equal to **BurstCapacity**. - question: | How can I see which resources have less than 3000 RU/s per physical partition? answer: | diff --git a/articles/cosmos-db/burst-capacity.md b/articles/cosmos-db/burst-capacity.md index 2b306e23fc2ff..1f7f21c456e45 100644 --- a/articles/cosmos-db/burst-capacity.md +++ b/articles/cosmos-db/burst-capacity.md @@ -28,25 +28,49 @@ After the 10 seconds is over, the burst capacity has been used up. If the worklo ## Getting started -To get started using burst capacity, enroll in the preview by filing a support ticket in the [Azure portal](https://portal.azure.com). +To get started using burst capacity, enroll in the preview by submitting a request for the **Azure Cosmos DB Burst Capacity** feature via the [**Preview Features** page](../azure-resource-manager/management/preview-features.md) in your Azure Subscription overview page. +- Before submitting your request, verify that your Azure Cosmos DB account(s) meet all the [preview eligibility criteria](#preview-eligibility-criteria). +- The Azure Cosmos DB team will review your request and contact you via email to confirm which account(s) in the subscription you want to enroll in the preview. ## Limitations -### SDK requirements (SQL API only) +### Preview eligibility criteria +To enroll in the preview, your Cosmos account must meet all the following criteria: + - Your Cosmos account is using provisioned throughput (manual or autoscale). Burst capacity doesn't apply to serverless accounts. + - If you're using SQL API, your application must use the Azure Cosmos DB .NET V3 SDK, version 3.27.0 or higher. When burst capacity is enabled on your account, all requests sent from non .NET SDKs, or older .NET SDK versions won't be accepted. + - There are no SDK or driver requirements to use the feature with Cassandra API, Gremlin API, Table API, or API for MongoDB. + - Your Cosmos account isn't using any unsupported connectors + - Azure Data Factory + - Azure Stream Analytics + - Logic Apps + - Azure Functions + - Azure Search -Burst capacity is supported only in the latest preview version of the .NET v3 SDK. When the feature is enabled on your account, you must only use the supported SDK. Requests sent from other SDKs or earlier versions won't be accepted. There are no driver or SDK requirements to use burst capacity with other APIs. +### SDK requirements (SQL and Table API only) +#### SQL API +For SQL API accounts, burst capacity is supported only in the latest version of the .NET v3 SDK. When the feature is enabled on your account, you must only use the supported SDK. Requests sent from other SDKs or earlier versions won't be accepted. There are no driver or SDK requirements to use burst capacity with Gremlin API, Cassandra API, or API for MongoDB. -Find the latest preview version the supported SDK: +Find the latest version of the supported SDK: | SDK | Supported versions | Package manager link | | --- | --- | --- | | **.NET SDK v3** | *>= 3.27.0* | | -Support for other SDKs is planned for the future. +Support for other SQL API SDKs is planned for the future. > [!TIP] > You should ensure that your application has been updated to use a compatible SDK version prior to enrolling in the preview. If you're using the legacy .NET V2 SDK, follow the [.NET SDK v3 migration guide](sql/migrate-dotnet-v3.md). +#### Table API +For Table API accounts, burst capacity is supported only when using the latest version of the Tables SDK. When the feature is enabled on your account, you must only use the supported SDK. Requests sent from other SDKs or earlier versions won't be accepted. The legacy SDK with namespace `Microsoft.Azure.CosmosDB.Table` isn't supported. Follow the [migration guide](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/tables/Azure.Data.Tables/MigrationGuide.md) to upgrade to the latest SDK. + +| SDK | Supported versions | Package manager link | +| --- | --- | --- | +| **Azure Tables client library for .NET** | *>= 12.0.0* | | +| **Azure Tables client library for Java** | *>= 12.0.0* | | +| **Azure Tables client library for JavaScript** | *>= 12.0.0* | | +| **Azure Tables client library for Python** | *>= 12.0.0* | | + ### Unsupported connectors If you enroll in the preview, the following connectors will fail. diff --git a/articles/cosmos-db/cassandra/apache-cassandra-consistency-mapping.md b/articles/cosmos-db/cassandra/apache-cassandra-consistency-mapping.md index 1e6f11fdcdf8d..3e5fbfe9c0e1e 100644 --- a/articles/cosmos-db/cassandra/apache-cassandra-consistency-mapping.md +++ b/articles/cosmos-db/cassandra/apache-cassandra-consistency-mapping.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: conceptual ms.date: 03/24/2022 -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Apache Cassandra and Azure Cosmos DB Cassandra API consistency levels diff --git a/articles/cosmos-db/cassandra/cassandra-adoption.md b/articles/cosmos-db/cassandra/cassandra-adoption.md index cc7451399353a..d4aa0721d26a7 100644 --- a/articles/cosmos-db/cassandra/cassandra-adoption.md +++ b/articles/cosmos-db/cassandra/cassandra-adoption.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 03/24/2022 -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.custom: kr2b-contr-experiment --- diff --git a/articles/cosmos-db/cassandra/cassandra-introduction.md b/articles/cosmos-db/cassandra/cassandra-introduction.md index 00c7f7c257d18..714818100088d 100644 --- a/articles/cosmos-db/cassandra/cassandra-introduction.md +++ b/articles/cosmos-db/cassandra/cassandra-introduction.md @@ -3,7 +3,7 @@ title: Introduction to the Azure Cosmos DB Cassandra API description: Learn how you can use Azure Cosmos DB to "lift-and-shift" existing applications and build new applications by using the Cassandra drivers and CQL author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: overview diff --git a/articles/cosmos-db/cassandra/cassandra-partitioning.md b/articles/cosmos-db/cassandra/cassandra-partitioning.md index aae95a3f6cd91..6713f0b161d5d 100644 --- a/articles/cosmos-db/cassandra/cassandra-partitioning.md +++ b/articles/cosmos-db/cassandra/cassandra-partitioning.md @@ -83,7 +83,7 @@ When data is returned, it is sorted by the clustering key, as expected in Apache :::image type="content" source="./media/cassandra-partitioning/select-from-pk.png" alt-text="Screenshot that shows the returned data that is sorted by the clustering key."::: > [!WARNING] -> When querying data, if you want to filter *only* on the partition key value element of a compound primary key (as is the case above), ensure that you *explicitly add a secondary index on the partition key*: +> When querying data in a table that has a compound primary key, if you want to filter on the partition key *and* any other non-indexed fields aside from the clustering key, ensure that you *explicitly add a secondary index on the partition key*: > > ```shell > CREATE INDEX ON uprofile.user (user); diff --git a/articles/cosmos-db/cassandra/cassandra-support.md b/articles/cosmos-db/cassandra/cassandra-support.md index af9da5214fe4a..3915fec494e2e 100644 --- a/articles/cosmos-db/cassandra/cassandra-support.md +++ b/articles/cosmos-db/cassandra/cassandra-support.md @@ -3,7 +3,7 @@ title: Apache Cassandra features supported by Azure Cosmos DB Cassandra API description: Learn about the Apache Cassandra feature support in Azure Cosmos DB Cassandra API author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: overview diff --git a/articles/cosmos-db/cassandra/cli-samples.md b/articles/cosmos-db/cassandra/cli-samples.md index 56c689d5a05d0..8605e09a5234f 100644 --- a/articles/cosmos-db/cassandra/cli-samples.md +++ b/articles/cosmos-db/cassandra/cli-samples.md @@ -1,12 +1,13 @@ --- title: Azure CLI Samples for Azure Cosmos DB Cassandra API description: Azure CLI Samples for Azure Cosmos DB Cassandra API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample ms.date: 02/21/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli --- diff --git a/articles/cosmos-db/cassandra/connect-spark-configuration.md b/articles/cosmos-db/cassandra/connect-spark-configuration.md index 0f30ad5d246e4..e64eedf641d06 100644 --- a/articles/cosmos-db/cassandra/connect-spark-configuration.md +++ b/articles/cosmos-db/cassandra/connect-spark-configuration.md @@ -3,7 +3,7 @@ title: Working with Azure Cosmos DB Cassandra API from Spark description: This article is the main page for Cosmos DB Cassandra API integration from Spark. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to diff --git a/articles/cosmos-db/cassandra/create-account-java.md b/articles/cosmos-db/cassandra/create-account-java.md index 5cdd8e4c9355c..656c103a36484 100644 --- a/articles/cosmos-db/cassandra/create-account-java.md +++ b/articles/cosmos-db/cassandra/create-account-java.md @@ -3,7 +3,7 @@ title: 'Tutorial: Build Java app to create Azure Cosmos DB Cassandra API account description: This tutorial shows how to create a Cassandra API account, add a database (also called a keyspace), and add a table to that account by using a Java application. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: tutorial diff --git a/articles/cosmos-db/cassandra/kafka-connect.md b/articles/cosmos-db/cassandra/kafka-connect.md index eb59ee5054a23..df506b9814a52 100644 --- a/articles/cosmos-db/cassandra/kafka-connect.md +++ b/articles/cosmos-db/cassandra/kafka-connect.md @@ -1,12 +1,12 @@ --- title: Integrate Apache Kafka and Azure Cosmos DB Cassandra API using Kafka Connect description: Learn how to ingest data from Kafka to Azure Cosmos DB Cassandra API using DataStax Apache Kafka Connector -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 12/14/2020 -ms.author: jroth +ms.author: sidandrews ms.reviewer: abhishgu --- diff --git a/articles/cosmos-db/cassandra/load-data-table.md b/articles/cosmos-db/cassandra/load-data-table.md index 449f6094be32d..589c01fa3df09 100644 --- a/articles/cosmos-db/cassandra/load-data-table.md +++ b/articles/cosmos-db/cassandra/load-data-table.md @@ -7,7 +7,7 @@ ms.topic: tutorial ms.date: 05/20/2019 author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.devlang: java #Customer intent: As a developer, I want to build a Java application to load data to a Cassandra API table in Azure Cosmos DB so that customers can store and manage the key/value data and utilize the global distribution, elastic scaling, multi-region , and other capabilities offered by Azure Cosmos DB. diff --git a/articles/cosmos-db/cassandra/manage-data-go.md b/articles/cosmos-db/cassandra/manage-data-go.md index 71f9c1807a3ba..aed241e0f9db9 100644 --- a/articles/cosmos-db/cassandra/manage-data-go.md +++ b/articles/cosmos-db/cassandra/manage-data-go.md @@ -2,8 +2,9 @@ title: Build a Go app with Azure Cosmos DB Cassandra API using the gocql client description: This quickstart shows how to use a Go client to interact with Azure Cosmos DB Cassandra API ms.service: cosmos-db -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.subservice: cosmosdb-cassandra ms.devlang: golang ms.topic: quickstart diff --git a/articles/cosmos-db/cassandra/manage-with-bicep.md b/articles/cosmos-db/cassandra/manage-with-bicep.md index ea0220bcf4b15..15976992c0ca8 100644 --- a/articles/cosmos-db/cassandra/manage-with-bicep.md +++ b/articles/cosmos-db/cassandra/manage-with-bicep.md @@ -1,12 +1,13 @@ --- title: Create and manage Azure Cosmos DB Cassandra API with Bicep description: Use Bicep to create and configure Azure Cosmos DB Cassandra API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 9/13/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Cassandra API resources using Bicep diff --git a/articles/cosmos-db/cassandra/migrate-data-arcion.md b/articles/cosmos-db/cassandra/migrate-data-arcion.md index b6c02ad4a6f05..932d5b10b7c7b 100644 --- a/articles/cosmos-db/cassandra/migrate-data-arcion.md +++ b/articles/cosmos-db/cassandra/migrate-data-arcion.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 04/04/2022 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Migrate data from Cassandra to Azure Cosmos DB Cassandra API account using Arcion diff --git a/articles/cosmos-db/cassandra/migrate-data-databricks.md b/articles/cosmos-db/cassandra/migrate-data-databricks.md index 0a85cbd7fd8d8..6b96938b3d569 100644 --- a/articles/cosmos-db/cassandra/migrate-data-databricks.md +++ b/articles/cosmos-db/cassandra/migrate-data-databricks.md @@ -52,6 +52,9 @@ Select **Install**, and then restart the cluster when installation is complete. > [!NOTE] > Make sure that you restart the Databricks cluster after the Cassandra Connector library has been installed. +> [!WARNING] +> The samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. + ## Create Scala Notebook for migration Create a Scala Notebook in Databricks. Replace your source and target Cassandra configurations with the corresponding credentials, and source and target keyspaces and tables. Then run the following code: diff --git a/articles/cosmos-db/cassandra/migrate-data-striim.md b/articles/cosmos-db/cassandra/migrate-data-striim.md index fe36970914489..6744476c273b1 100644 --- a/articles/cosmos-db/cassandra/migrate-data-striim.md +++ b/articles/cosmos-db/cassandra/migrate-data-striim.md @@ -7,7 +7,7 @@ ms.topic: how-to ms.date: 12/09/2021 author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Migrate data to Azure Cosmos DB Cassandra API account using Striim diff --git a/articles/cosmos-db/cassandra/migrate-data.md b/articles/cosmos-db/cassandra/migrate-data.md index 0edafe9673dac..13f88c724522f 100644 --- a/articles/cosmos-db/cassandra/migrate-data.md +++ b/articles/cosmos-db/cassandra/migrate-data.md @@ -3,7 +3,7 @@ title: 'Migrate your data to a Cassandra API account in Azure Cosmos DB- Tutoria description: In this tutorial, learn how to copy data from Apache Cassandra to a Cassandra API account in Azure Cosmos DB. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: tutorial diff --git a/articles/cosmos-db/cassandra/oracle-migrate-cosmos-db-arcion.md b/articles/cosmos-db/cassandra/oracle-migrate-cosmos-db-arcion.md index d5f694f86ff9a..caf44d1c49059 100644 --- a/articles/cosmos-db/cassandra/oracle-migrate-cosmos-db-arcion.md +++ b/articles/cosmos-db/cassandra/oracle-migrate-cosmos-db-arcion.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 04/04/2022 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Migrate data from Oracle to Azure Cosmos DB Cassandra API account using Arcion diff --git a/articles/cosmos-db/cassandra/postgres-migrate-cosmos-db-kafka.md b/articles/cosmos-db/cassandra/postgres-migrate-cosmos-db-kafka.md index e14ce03b01e22..ea60ca0a9fec7 100644 --- a/articles/cosmos-db/cassandra/postgres-migrate-cosmos-db-kafka.md +++ b/articles/cosmos-db/cassandra/postgres-migrate-cosmos-db-kafka.md @@ -1,12 +1,12 @@ --- title: Migrate data from PostgreSQL to Azure Cosmos DB Cassandra API account using Apache Kafka description: Learn how to use Kafka Connect to synchronize data from PostgreSQL to Azure Cosmos DB Cassandra API in real time. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 04/02/2022 -ms.author: jroth +ms.author: sidandrews ms.reviewer: abhishgu --- diff --git a/articles/cosmos-db/cassandra/powershell-samples.md b/articles/cosmos-db/cassandra/powershell-samples.md index 664f592e77995..6ff2d283b6ca8 100644 --- a/articles/cosmos-db/cassandra/powershell-samples.md +++ b/articles/cosmos-db/cassandra/powershell-samples.md @@ -1,12 +1,13 @@ --- title: Azure PowerShell samples for Azure Cosmos DB Cassandra API description: Get the Azure PowerShell samples to perform common tasks in Azure Cosmos DB Cassandra API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample ms.date: 01/20/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure PowerShell samples for Azure Cosmos DB Cassandra API diff --git a/articles/cosmos-db/cassandra/query-data.md b/articles/cosmos-db/cassandra/query-data.md index 9b3fa26ded27e..67eae5c9be6b5 100644 --- a/articles/cosmos-db/cassandra/query-data.md +++ b/articles/cosmos-db/cassandra/query-data.md @@ -4,7 +4,7 @@ description: This tutorial shows how to query user data from an Azure Cosmos DB ms.service: cosmos-db author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.subservice: cosmosdb-cassandra ms.topic: tutorial ms.date: 09/24/2018 diff --git a/articles/cosmos-db/cassandra/secondary-indexing.md b/articles/cosmos-db/cassandra/secondary-indexing.md index 7a76349553858..352a76365c233 100644 --- a/articles/cosmos-db/cassandra/secondary-indexing.md +++ b/articles/cosmos-db/cassandra/secondary-indexing.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-cassandra ms.topic: conceptual ms.date: 09/03/2021 ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Secondary indexing in Azure Cosmos DB Cassandra API @@ -27,7 +27,7 @@ It's not advised to create an index on a frequently updated column. It is pruden > - Clustering keys > [!WARNING] -> If you have a [compound primary key](cassandra-partitioning.md#compound-primary-key) in your table, and you want to filter *only* on the partition key value element of the compound primary key, please ensure that you *explicitly add a secondary index on the partition key*. Azure Cosmos DB Cassandra API does not apply indexes to partition keys by default, and the index in this scenario may significantly improve query performance. Review our article on [partitioning](cassandra-partitioning.md) for more information. +> Partition keys are not indexed by default in Cassandra API. If you have a [compound primary key](cassandra-partitioning.md#compound-primary-key) in your table, and you filter either on partition key and clustering key, or just partition key, this will give the desired behaviour. However, if you filter on partition key and any other non-indexed fields aside from the clustering key, this will result in a partition key fan-out - even if the other non-indexed fields have a secondary index. If you have a compound primary key in your table, and you want to filter on both the partition key value element of the compound primary key, plus another field that is not the partition key or clustering key, please ensure that you explicitly add a secondary index on the *partition key*. The index in this scenario should significantly improve query performance, even if the other non-partition key and non-clustering key fields have no index. Review our article on [partitioning](cassandra-partitioning.md) for more information. ## Indexing example diff --git a/articles/cosmos-db/cassandra/spark-aggregation-operations.md b/articles/cosmos-db/cassandra/spark-aggregation-operations.md index 943bee8f055b6..64c876e351384 100644 --- a/articles/cosmos-db/cassandra/spark-aggregation-operations.md +++ b/articles/cosmos-db/cassandra/spark-aggregation-operations.md @@ -3,7 +3,7 @@ title: Aggregate operations on Azure Cosmos DB Cassandra API tables from Spark description: This article covers basic aggregation operations against Azure Cosmos DB Cassandra API tables from Spark author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to diff --git a/articles/cosmos-db/cassandra/spark-create-operations.md b/articles/cosmos-db/cassandra/spark-create-operations.md index 4688987a17d4d..c2402ad40ec34 100644 --- a/articles/cosmos-db/cassandra/spark-create-operations.md +++ b/articles/cosmos-db/cassandra/spark-create-operations.md @@ -3,7 +3,7 @@ title: Create or insert data into Azure Cosmos DB Cassandra API from Spark description: This article details how to insert sample data into Azure Cosmos DB Cassandra API tables author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -47,7 +47,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Dataframe API diff --git a/articles/cosmos-db/cassandra/spark-databricks.md b/articles/cosmos-db/cassandra/spark-databricks.md index 76abf81cac264..51252027cd8bb 100644 --- a/articles/cosmos-db/cassandra/spark-databricks.md +++ b/articles/cosmos-db/cassandra/spark-databricks.md @@ -3,7 +3,7 @@ title: Access Azure Cosmos DB Cassandra API from Azure Databricks description: This article covers how to work with Azure Cosmos DB Cassandra API from Azure Databricks. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -49,7 +49,10 @@ This article details how to work with Azure Cosmos DB Cassandra API from Spark o * **Azure Cosmos DB Cassandra API-specific library:** - If you are using Spark 2.x, a custom connection factory is required to configure the retry policy from the Cassandra Spark connector to Azure Cosmos DB Cassandra API. Add the `com.microsoft.azure.cosmosdb:azure-cosmos-cassandra-spark-helper:1.2.0`[maven coordinates](https://search.maven.org/artifact/com.microsoft.azure.cosmosdb/azure-cosmos-cassandra-spark-helper/1.2.0/jar) to attach the library to the cluster. > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB Cassandra API-specific library mentioned above. +> If you are using Spark 3.0, you do not need to install the Cosmos DB Cassandra API-specific library mentioned above. + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Sample notebooks diff --git a/articles/cosmos-db/cassandra/spark-ddl-operations.md b/articles/cosmos-db/cassandra/spark-ddl-operations.md index 2ccc90de47b18..18ddc5ac454b6 100644 --- a/articles/cosmos-db/cassandra/spark-ddl-operations.md +++ b/articles/cosmos-db/cassandra/spark-ddl-operations.md @@ -3,7 +3,7 @@ title: DDL operations in Azure Cosmos DB Cassandra API from Spark description: This article details keyspace and table DDL operations against Azure Cosmos DB Cassandra API from Spark. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -51,7 +51,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Keyspace DDL operations diff --git a/articles/cosmos-db/cassandra/spark-delete-operation.md b/articles/cosmos-db/cassandra/spark-delete-operation.md index 7c0624f7e29fb..21b8c9aeec210 100644 --- a/articles/cosmos-db/cassandra/spark-delete-operation.md +++ b/articles/cosmos-db/cassandra/spark-delete-operation.md @@ -3,7 +3,7 @@ title: Delete operations on Azure Cosmos DB Cassandra API from Spark description: This article details how to delete data in tables in Azure Cosmos DB Cassandra API from Spark author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -47,7 +47,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `delete` as shown below), connection properties need to be defined at the cluster level. +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `delete` as shown below), connection properties need to be defined at the cluster level. + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Sample data generator We will use this code fragment to generate sample data: diff --git a/articles/cosmos-db/cassandra/spark-hdinsight.md b/articles/cosmos-db/cassandra/spark-hdinsight.md index fddb142575633..49b82bbd32557 100644 --- a/articles/cosmos-db/cassandra/spark-hdinsight.md +++ b/articles/cosmos-db/cassandra/spark-hdinsight.md @@ -3,7 +3,7 @@ title: Access Azure Cosmos DB Cassandra API on YARN with HDInsight description: This article covers how to work with Azure Cosmos DB Cassandra API from Spark on YARN with HDInsight. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to diff --git a/articles/cosmos-db/cassandra/spark-read-operation.md b/articles/cosmos-db/cassandra/spark-read-operation.md index 2cef5683534ac..1508ed2e6b332 100644 --- a/articles/cosmos-db/cassandra/spark-read-operation.md +++ b/articles/cosmos-db/cassandra/spark-read-operation.md @@ -4,7 +4,7 @@ titleSufix: Azure Cosmos DB description: This article describes how to read data from Cassandra API tables in Azure Cosmos DB. author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -48,7 +48,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector(see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector(see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Dataframe API diff --git a/articles/cosmos-db/cassandra/spark-table-copy-operations.md b/articles/cosmos-db/cassandra/spark-table-copy-operations.md index c514955575553..0d0a13adab14a 100644 --- a/articles/cosmos-db/cassandra/spark-table-copy-operations.md +++ b/articles/cosmos-db/cassandra/spark-table-copy-operations.md @@ -3,7 +3,7 @@ title: Table copy operations on Azure Cosmos DB Cassandra API from Spark description: This article details how to copy data between tables in Azure Cosmos DB Cassandra API author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -49,6 +49,9 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") > [!NOTE] > If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. + ## Insert sample data ```scala val booksDF = Seq( diff --git a/articles/cosmos-db/cassandra/spark-upsert-operations.md b/articles/cosmos-db/cassandra/spark-upsert-operations.md index 1ec329b2e20cf..23ae8a39ff6f0 100644 --- a/articles/cosmos-db/cassandra/spark-upsert-operations.md +++ b/articles/cosmos-db/cassandra/spark-upsert-operations.md @@ -3,7 +3,7 @@ title: Upsert data into Azure Cosmos DB Cassandra API from Spark description: This article details how to upsert into tables in Azure Cosmos DB Cassandra API from Spark author: TheovanKraay ms.author: thvankra -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to @@ -47,7 +47,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `update` as shown below), connection properties need to be defined at the cluster level. +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `update` as shown below), connection properties need to be defined at the cluster level. + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Dataframe API diff --git a/articles/cosmos-db/cassandra/templates-samples.md b/articles/cosmos-db/cassandra/templates-samples.md index 918f01ae44b04..105d11682f6cf 100644 --- a/articles/cosmos-db/cassandra/templates-samples.md +++ b/articles/cosmos-db/cassandra/templates-samples.md @@ -1,12 +1,13 @@ --- title: Resource Manager templates for Azure Cosmos DB Cassandra API description: Use Azure Resource Manager templates to create and configure Azure Cosmos DB Cassandra API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: how-to ms.date: 10/14/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Cassandra API resources using Azure Resource Manager templates diff --git a/articles/cosmos-db/change-feed.md b/articles/cosmos-db/change-feed.md index c7e092a5c884a..9e454cf9486ef 100644 --- a/articles/cosmos-db/change-feed.md +++ b/articles/cosmos-db/change-feed.md @@ -1,12 +1,12 @@ --- title: Working with the change feed support in Azure Cosmos DB description: Use Azure Cosmos DB change feed support to track changes in documents, event-based processing like triggers, and keep caches and analytic systems up-to-date -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.topic: conceptual ms.date: 06/07/2021 -ms.reviewer: sngun ms.custom: seodec18, "seo-nov-2020" --- # Change feed in Azure Cosmos DB diff --git a/articles/cosmos-db/choose-api.md b/articles/cosmos-db/choose-api.md index 7ce081defc504..4ebd5d761e39f 100644 --- a/articles/cosmos-db/choose-api.md +++ b/articles/cosmos-db/choose-api.md @@ -1,8 +1,9 @@ --- title: Choose an API in Azure Cosmos DB description: Learn how to choose between SQL/Core, MongoDB, Cassandra, Gremlin, and table APIs in Azure Cosmos DB based on your workload requirements. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 12/08/2021 @@ -58,7 +59,7 @@ You can use your existing MongoDB apps with API for MongoDB by just changing the This API stores data in column-oriented schema. Apache Cassandra offers a highly distributed, horizontally scaling approach to storing large volumes of data while offering a flexible approach to a column-oriented schema. Cassandra API in Azure Cosmos DB aligns with this philosophy to approaching distributed NoSQL databases. Cassandra API is wire protocol compatible with the Apache Cassandra. You should consider Cassandra API if you want to benefit the elasticity and fully managed nature of Azure Cosmos DB and still use most of the native Apache Cassandra features, tools, and ecosystem. This means on Cassandra API you don't need to manage the OS, Java VM, garbage collector, read/write performance, nodes, clusters, etc. -You can use Apache Cassandra client drivers to connect to the Cassandra API. The Cassandra API enables you to interact with data using the Cassandra Query Language (CQL), and tools like CQL shell, Cassandra client drivers that you're already familiar with. Cassandra API currently only supports OLTP scenarios. Using Cassandra API, you can also use the unique features of Azure Cosmos DB such as change feed. To learn more, see [Cassandra API](cassandra-introduction.md) article. +You can use Apache Cassandra client drivers to connect to the Cassandra API. The Cassandra API enables you to interact with data using the Cassandra Query Language (CQL), and tools like CQL shell, Cassandra client drivers that you're already familiar with. Cassandra API currently only supports OLTP scenarios. Using Cassandra API, you can also use the unique features of Azure Cosmos DB such as [change feed](cassandra-change-feed.md). To learn more, see [Cassandra API](cassandra-introduction.md) article. If you're already familiar with Apache Cassandra, but new to Azure Cosmos DB, we recommend our article on [how to adapt to the Cassandra API if you are coming from Apache Cassandra](./cassandra/cassandra-adoption.md). ## Gremlin API diff --git a/articles/cosmos-db/common-cli-samples.md b/articles/cosmos-db/common-cli-samples.md index 1f3f3c01da6d1..23c9e6416028b 100644 --- a/articles/cosmos-db/common-cli-samples.md +++ b/articles/cosmos-db/common-cli-samples.md @@ -5,8 +5,9 @@ ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 02/22/2022 -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli --- diff --git a/articles/cosmos-db/common-powershell-samples.md b/articles/cosmos-db/common-powershell-samples.md index 63a7f7dd84a70..5a7b251b125f0 100644 --- a/articles/cosmos-db/common-powershell-samples.md +++ b/articles/cosmos-db/common-powershell-samples.md @@ -4,8 +4,9 @@ description: Azure PowerShell Samples common to all Azure Cosmos DB APIs ms.service: cosmos-db ms.topic: sample ms.date: 05/02/2022 -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli --- diff --git a/articles/cosmos-db/compliance.md b/articles/cosmos-db/compliance.md index 852f2c936fde7..9d25535663543 100644 --- a/articles/cosmos-db/compliance.md +++ b/articles/cosmos-db/compliance.md @@ -6,7 +6,7 @@ ms.author: thweiss ms.service: cosmos-db ms.topic: conceptual ms.date: 09/11/2021 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Compliance in Azure Cosmos DB diff --git a/articles/cosmos-db/concepts-limits.md b/articles/cosmos-db/concepts-limits.md index fdb7976846339..5beb64aa6ec10 100644 --- a/articles/cosmos-db/concepts-limits.md +++ b/articles/cosmos-db/concepts-limits.md @@ -1,11 +1,12 @@ --- title: Azure Cosmos DB service quotas description: Azure Cosmos DB service quotas and default limits on different resource types. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual -ms.date: 04/27/2022 +ms.date: 05/30/2022 --- # Azure Cosmos DB service quotas @@ -93,9 +94,10 @@ Depending on the current RU/s provisioned and resource settings, each resource c | Maximum RU/s per container | 5,000 | | Maximum storage across all items per (logical) partition | 20 GB | | Maximum number of distinct (logical) partition keys | Unlimited | -| Maximum storage per container (SQL API, Mongo API, Table API, Gremlin API)| 50 GB | -| Maximum storage per container (Cassandra API)| 30 GB | +| Maximum storage per container (SQL API, Mongo API, Table API, Gremlin API)| 1 TB | +| Maximum storage per container (Cassandra API)| 1 TB | +1 Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](../azure-resource-manager/management/preview-features.md). ## Control plane operations @@ -104,7 +106,7 @@ You can [provision and manage your Azure Cosmos account](how-to-manage-database- | Resource | Limit | | --- | --- | | Maximum number of accounts per subscription | 50 by default. 1 | -| Maximum number of regional failovers | 1/hour by default. 1 2 | +| Maximum number of regional failovers | 10/hour by default. 1 2 | 1 You can increase these limits by creating an [Azure Support request](create-support-request-quota-increase.md). @@ -163,7 +165,7 @@ An Azure Cosmos item can represent either a document in a collection, a row in a | Maximum level of nesting for embedded objects / arrays | 128 | | Maximum TTL value |2147483647 | -1 Large document sizes up to 16 Mb are currently in preview with Azure Cosmos DB API for MongoDB only. Sign-up for the feature “Azure Cosmos DB API For MongoDB 16MB Document Support” from [Preview Features the Azure portal](./access-previews.md), to try the new feature. +1 Large document sizes up to 16 Mb are currently in preview with Azure Cosmos DB API for MongoDB only. Sign-up for the feature “Azure Cosmos DB API For MongoDB 16 MB Document Support” from [Preview Features the Azure portal](./access-previews.md), to try the new feature. There are no restrictions on the item payloads (like number of properties and nesting depth), except for the length restrictions on partition key and ID values, and the overall size restriction of 2 MB. You may have to configure indexing policy for containers with large or complex item structures to reduce RU consumption. See [Modeling items in Cosmos DB](how-to-model-partition-example.md) for a real-world example, and patterns to manage large items. @@ -215,7 +217,7 @@ See the [Autoscale](provision-throughput-autoscale.md#autoscale-limits) article | Current RU/s the system is scaled to | `0.1*Tmax <= T <= Tmax`, based on usage| | Minimum billable RU/s per hour| `0.1 * Tmax`

                  Billing is done on a per-hour basis, where you're billed for the highest RU/s the system scaled to in the hour, or `0.1*Tmax`, whichever is higher. | | Minimum autoscale max RU/s for a container | `MAX(1000, highest max RU/s ever provisioned / 10, current storage in GB * 100)` rounded to nearest 1000 RU/s | -| Minimum autoscale max RU/s for a database | `MAX(1000, highest max RU/s ever provisioned / 10, current storage in GB * 100, 1000 + (MAX(Container count - 25, 0) * 1000))`, rounded to nearest 1000 RU/s.

                  Note if your database has more than 25 containers, the system increments the minimum autoscale max RU/s by 1000 RU/s per additional container. For example, if you have 30 containers, the lowest autoscale maximum RU/s you can set is 6000 RU/s (scales between 600 - 6000 RU/s). +| Minimum autoscale max RU/s for a database | `MAX(1000, highest max RU/s ever provisioned / 10, current storage in GB * 100, 1000 + (MAX(Container count - 25, 0) * 1000))`, rounded to nearest 1000 RU/s.

                  Note if your database has more than 25 containers, the system increments the minimum autoscale max RU/s by 1000 RU/s per extra container. For example, if you have 30 containers, the lowest autoscale maximum RU/s you can set is 6000 RU/s (scales between 600 - 6000 RU/s). ## SQL query limits @@ -291,7 +293,7 @@ Get started with Azure Cosmos DB with one of our quickstarts: * [Get started with Azure Cosmos DB Gremlin API](create-graph-dotnet.md) * [Get started with Azure Cosmos DB Table API](table/create-table-dotnet.md) * Trying to do capacity planning for a migration to Azure Cosmos DB? You can use information about your existing database cluster for capacity planning. - * If all you know is the number of vcores and servers in your existing database cluster, read about [estimating request units using vCores or vCPUs](convert-vcore-to-request-unit.md) + * If all you know is the number of vCores and servers in your existing database cluster, read about [estimating request units using vCores or vCPUs](convert-vcore-to-request-unit.md) * If you know typical request rates for your current database workload, read about [estimating request units using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md) > [!div class="nextstepaction"] diff --git a/articles/cosmos-db/configure-periodic-backup-restore.md b/articles/cosmos-db/configure-periodic-backup-restore.md index 8f2119aedf27f..1d95b7735c72b 100644 --- a/articles/cosmos-db/configure-periodic-backup-restore.md +++ b/articles/cosmos-db/configure-periodic-backup-restore.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 12/09/2021 ms.author: govindk -ms.reviewer: sngun +ms.reviewer: mjbrown --- diff --git a/articles/cosmos-db/conflict-resolution-policies.md b/articles/cosmos-db/conflict-resolution-policies.md index e3ec9def136b8..2c9b4046ce743 100644 --- a/articles/cosmos-db/conflict-resolution-policies.md +++ b/articles/cosmos-db/conflict-resolution-policies.md @@ -1,13 +1,13 @@ --- title: Conflict resolution types and resolution policies in Azure Cosmos DB description: This article describes the conflict categories and conflict resolution policies in Azure Cosmos DB. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 04/20/2020 -ms.author: mjbrown -ms.reviewer: sngun +ms.author: sidandrews +ms.reviewer: mjbrown --- # Conflict types and resolution policies when using multiple write regions diff --git a/articles/cosmos-db/consistency-levels.md b/articles/cosmos-db/consistency-levels.md index 653c7d98f491a..8ed64cab8e80d 100644 --- a/articles/cosmos-db/consistency-levels.md +++ b/articles/cosmos-db/consistency-levels.md @@ -1,8 +1,9 @@ --- title: Consistency levels in Azure Cosmos DB description: Azure Cosmos DB has five consistency levels to help balance eventual consistency, availability, and latency trade-offs. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 02/17/2022 diff --git a/articles/cosmos-db/continuous-backup-restore-frequently-asked-questions.yml b/articles/cosmos-db/continuous-backup-restore-frequently-asked-questions.yml index 32af0008d9e6c..8811e74cf0efa 100644 --- a/articles/cosmos-db/continuous-backup-restore-frequently-asked-questions.yml +++ b/articles/cosmos-db/continuous-backup-restore-frequently-asked-questions.yml @@ -7,7 +7,7 @@ metadata: ms.topic: faq ms.date: 04/14/2022 ms.author: govindk - ms.reviewer: wiassaf + ms.reviewer: mjbrown title: Frequently asked questions about the Azure Cosmos DB continuous backup summary: | [!INCLUDE[appliesto-all-apis-except-cassandra](includes/appliesto-all-apis-except-cassandra.md)] diff --git a/articles/cosmos-db/continuous-backup-restore-introduction.md b/articles/cosmos-db/continuous-backup-restore-introduction.md index 495d0bc692b98..6dc849903ba97 100644 --- a/articles/cosmos-db/continuous-backup-restore-introduction.md +++ b/articles/cosmos-db/continuous-backup-restore-introduction.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: conceptual ms.date: 04/06/2022 ms.author: govindk -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.custom: references_regions, cosmos-db-video --- @@ -102,7 +102,7 @@ For example, if you have 1 TB of data in two regions then: * Restore cost is calculated as (1000 * 0.15) = $150 per restore > [!TIP] -> For more information about measuring the current data usage of your Azure Cosmos DB account, see [Explore Azure Monitor Cosmos DB insights](/azure/azure-monitor/insights/cosmosdb-insights-overview#view-utilization-and-performance-metrics-for-azure-cosmos-db). +> For more information about measuring the current data usage of your Azure Cosmos DB account, see [Explore Azure Monitor Cosmos DB insights](../azure-monitor/insights/cosmosdb-insights-overview.md#view-utilization-and-performance-metrics-for-azure-cosmos-db). ## Customer-managed keys @@ -152,4 +152,4 @@ Currently the point in time restore functionality has the following limitations: * Restore continuous backup account using [Azure portal](restore-account-continuous-backup.md#restore-account-portal), [PowerShell](restore-account-continuous-backup.md#restore-account-powershell), [CLI](restore-account-continuous-backup.md#restore-account-cli), or [Azure Resource Manager](restore-account-continuous-backup.md#restore-arm-template). * [Migrate to an account from periodic backup to continuous backup](migrate-continuous-backup.md). * [Manage permissions](continuous-backup-restore-permissions.md) required to restore data with continuous backup mode. -* [Resource model of continuous backup mode](continuous-backup-restore-resource-model.md) +* [Resource model of continuous backup mode](continuous-backup-restore-resource-model.md) \ No newline at end of file diff --git a/articles/cosmos-db/continuous-backup-restore-permissions.md b/articles/cosmos-db/continuous-backup-restore-permissions.md index 7e14f602544ca..db6889f9c262c 100644 --- a/articles/cosmos-db/continuous-backup-restore-permissions.md +++ b/articles/cosmos-db/continuous-backup-restore-permissions.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 02/28/2022 ms.author: govindk -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.custom: subject-rbac-steps --- diff --git a/articles/cosmos-db/continuous-backup-restore-resource-model.md b/articles/cosmos-db/continuous-backup-restore-resource-model.md index 872ea73b2d8df..7b3c6fd1bfa77 100644 --- a/articles/cosmos-db/continuous-backup-restore-resource-model.md +++ b/articles/cosmos-db/continuous-backup-restore-resource-model.md @@ -6,7 +6,7 @@ ms.author: govindk ms.service: cosmos-db ms.topic: conceptual ms.date: 03/02/2022 -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- @@ -38,7 +38,7 @@ The `RestoreParameters` resource contains the restore operation details includin |---------|---------| |restoreMode | The restore mode should be *PointInTime* | |restoreSource | The instanceId of the source account from which the restore will be initiated. | -|restoreTimestampInUtc | Point in time in UTC to which the account should be restored to. | +|restoreTimestampInUtc | Point in time in UTC to restore the account. | |databasesToRestore | List of `DatabaseRestoreResource` objects to specify which databases and containers should be restored. Each resource represents a single database and all the collections under that database, see the [restorable SQL resources](#restorable-sql-resources) section for more details. If this value is empty, then the entire account is restored. | |gremlinDatabasesToRestore | List of `GremlinDatabaseRestoreResource` objects to specify which databases and graphs should be restored. Each resource represents a single database and all the graphs under that database. See the [restorable Gremlin resources](#restorable-graph-resources) section for more details. If this value is empty, then the entire account is restored. | |tablesToRestore | List of `TableRestoreResource` objects to specify which tables should be restored. Each resource represents a table under that database, see the [restorable Table resources](#restorable-table-resources) section for more details. If this value is empty, then the entire account is restored. | @@ -126,7 +126,7 @@ Each resource contains information of a mutation event such as creation and dele | eventTimestamp | The time in UTC when the database is created or deleted. | | ownerId | The name of the SQL database. | | ownerResourceId | The resource ID of the SQL database| -| operationType | The operation type of this database event. Here are the possible values:

                  • Create: database creation event
                  • Delete: database deletion event
                  • Replace: database modification event
                  • SystemOperation: database modification event triggered by the system. This event is not initiated by the user
                  | +| operationType | The operation type of this database event. Here are the possible values:
                  • Create: database creation event
                  • Delete: database deletion event
                  • Replace: database modification event
                  • SystemOperation: database modification event triggered by the system. This event isn't initiated by the user
                  | | database |The properties of the SQL database at the time of the event| To get a list of all database mutations, see [Restorable Sql Databases - List](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/restorable-sql-databases/list) article. @@ -140,7 +140,7 @@ Each resource contains information of a mutation event such as creation and dele | eventTimestamp | The time in UTC when this container event happened.| | ownerId| The name of the SQL container.| | ownerResourceId | The resource ID of the SQL container.| -| operationType | The operation type of this container event. Here are the possible values:
                  • Create: container creation event
                  • Delete: container deletion event
                  • Replace: container modification event
                  • SystemOperation: container modification event triggered by the system. This event is not initiated by the user
                  | +| operationType | The operation type of this container event. Here are the possible values:
                  • Create: container creation event
                  • Delete: container deletion event
                  • Replace: container modification event
                  • SystemOperation: container modification event triggered by the system. This event isn't initiated by the user
                  | | container | The properties of the SQL container at the time of the event.| To get a list of all container mutations under the same database, see [Restorable Sql Containers - List](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/restorable-sql-containers/list) article. @@ -165,7 +165,7 @@ Each resource contains information of a mutation event such as creation and dele |eventTimestamp| The time in UTC when this database event happened.| | ownerId| The name of the MongoDB database. | | ownerResourceId | The resource ID of the MongoDB database. | -| operationType | The operation type of this database event. Here are the possible values:
                  • Create: database creation event
                  • Delete: database deletion event
                  • Replace: database modification event
                  • SystemOperation: database modification event triggered by the system. This event is not initiated by the user
                  | +| operationType | The operation type of this database event. Here are the possible values:
                  • Create: database creation event
                  • Delete: database deletion event
                  • Replace: database modification event
                  • SystemOperation: database modification event triggered by the system. This event isn't initiated by the user
                  | To get a list of all database mutation, see [Restorable Mongodb Databases - List](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/restorable-mongodb-databases/list) article. @@ -178,9 +178,9 @@ Each resource contains information of a mutation event such as creation and dele | eventTimestamp |The time in UTC when this collection event happened. | | ownerId| The name of the MongoDB collection. | | ownerResourceId | The resource ID of the MongoDB collection. | -| operationType |The operation type of this collection event. Here are the possible values:
                  • Create: collection creation event
                  • Delete: collection deletion event
                  • Replace: collection modification event
                  • SystemOperation: collection modification event triggered by the system. This event is not initiated by the user
                  | +| operationType |The operation type of this collection event. Here are the possible values:
                  • Create: collection creation event
                  • Delete: collection deletion event
                  • Replace: collection modification event
                  • SystemOperation: collection modification event triggered by the system. This event isn't initiated by the user
                  | -To get a list of all container mutations under the same database, see [Restorable Mongodb Collections - List](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/restorable-mongodb-collections/list) article. +To get a list of all container mutations under the same database see [Restorable Mongodb Collections - List](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/restorable-mongodb-collections/list) article. ### Restorable MongoDB resources @@ -213,9 +213,9 @@ Each resource contains information about a mutation event, such as a creation an |eventTimestamp| The time in UTC when this database event happened.| | ownerId| The name of the Graph database. | | ownerResourceId | The resource ID of the Graph database. | -| operationType | The operation type of this database event. Here are the possible values:
                  • Create: database creation event
                  • Delete: database deletion event
                  • Replace: database modification event
                  • SystemOperation: database modification event triggered by the system. This event is not initiated by the user.
                  | +| operationType | The operation type of this database event. Here are the possible values:
                  • Create: database creation event
                  • Delete: database deletion event
                  • Replace: database modification event
                  • SystemOperation: database modification event triggered by the system. This event isn't initiated by the user.
                  | -To get a event feed of all mutations on the Gremlin database for the account, see the [Restorable Graph Databases - List]( /rest/api/cosmos-db-resource-provider/2021-11-15-preview/restorable-gremlin-databases/list) article. +To get an event feed of all mutations on the Gremlin database for the account, see the [Restorable Graph Databases - List]( /rest/api/cosmos-db-resource-provider/2021-11-15-preview/restorable-gremlin-databases/list) article. ### Restorable Graphs @@ -226,19 +226,19 @@ Each resource contains information of a mutation event such as creation and dele | eventTimestamp |The time in UTC when this collection event happened. | | ownerId| The name of the Graph collection. | | ownerResourceId | The resource ID of the Graph collection. | -| operationType |The operation type of this collection event. Here are the possible values:
                  • Create: Graph creation event
                  • Delete: Graph deletion event
                  • Replace: Graph modification event
                  • SystemOperation: collection modification event triggered by the system. This event is not initiated by the user.
                  | +| operationType |The operation type of this collection event. Here are the possible values:
                  • Create: Graph creation event
                  • Delete: Graph deletion event
                  • Replace: Graph modification event
                  • SystemOperation: collection modification event triggered by the system. This event isn't initiated by the user.
                  | To get a list of all container mutations under the same database, see graph [Restorable Graphs - List](/rest/api/cosmos-db-resource-provider/2021-11-15-preview/restorable-gremlin-graphs/list) article. ### Restorable Table resources -Lists all the restorable Azure Cosmos DB Tables available for a specific database account at a given time and location. Note the Table API does not specify an explicit database. +Lists all the restorable Azure Cosmos DB Tables available for a specific database account at a given time and location. Note the Table API doesn't specify an explicit database. |Property Name |Description | |---------|---------| | TableNames | The list of Table containers under this account. | -To get a list of Table that exist on the account at the given timestamp and location, see [Restorable Table Resources - List](/rest/api/cosmos-db-resource-provider/2021-11-15-preview/restorable-table-resources/list) article. +To get a list of tables that exist on the account at the given timestamp and location, see [Restorable Table Resources - List](/rest/api/cosmos-db-resource-provider/2021-11-15-preview/restorable-table-resources/list) article. ### Restorable Table @@ -249,7 +249,7 @@ Each resource contains information of a mutation event such as creation and dele |eventTimestamp| The time in UTC when this database event happened.| | ownerId| The name of the Table database. | | ownerResourceId | The resource ID of the Table resource. | -| operationType | The operation type of this Table event. Here are the possible values:
                  • Create: Table creation event
                  • Delete: Table deletion event
                  • Replace: Table modification event
                  • SystemOperation: database modification event triggered by the system. This event is not initiated by the user
                  | +| operationType | The operation type of this Table event. Here are the possible values:
                  • Create: Table creation event
                  • Delete: Table deletion event
                  • Replace: Table modification event
                  • SystemOperation: database modification event triggered by the system. This event isn't initiated by the user
                  | To get a list of all table mutations under the same database, see [Restorable Table - List](/rest/api/cosmos-db-resource-provider/2021-11-15-preview/restorable-tables/list) article. diff --git a/articles/cosmos-db/convert-vcore-to-request-unit.md b/articles/cosmos-db/convert-vcore-to-request-unit.md index 63353676f51dc..459d4511bdf68 100644 --- a/articles/cosmos-db/convert-vcore-to-request-unit.md +++ b/articles/cosmos-db/convert-vcore-to-request-unit.md @@ -1,8 +1,9 @@ --- title: 'Convert the number of vCores or vCPUs in your nonrelational database to Azure Cosmos DB RU/s' description: 'Convert the number of vCores or vCPUs in your nonrelational database to Azure Cosmos DB RU/s' -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: tutorial diff --git a/articles/cosmos-db/cosmos-db-reserved-capacity.md b/articles/cosmos-db/cosmos-db-reserved-capacity.md index 248e4fdc282a9..a8f4e48e57224 100644 --- a/articles/cosmos-db/cosmos-db-reserved-capacity.md +++ b/articles/cosmos-db/cosmos-db-reserved-capacity.md @@ -1,12 +1,12 @@ --- title: Reserved capacity in Azure Cosmos DB to Optimize cost description: Learn how to buy Azure Cosmos DB reserved capacity to save on your compute costs. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 -ms.author: tisande -ms.reviewer: sngun +ms.author: sidandrews +ms.reviewer: jucocchi --- # Optimize cost with reserved capacity in Azure Cosmos DB diff --git a/articles/cosmos-db/cosmosdb-migrationchoices.md b/articles/cosmos-db/cosmosdb-migrationchoices.md index c04360496c788..727d157e0dcfb 100644 --- a/articles/cosmos-db/cosmosdb-migrationchoices.md +++ b/articles/cosmos-db/cosmosdb-migrationchoices.md @@ -1,8 +1,9 @@ --- title: Cosmos DB Migration options description: This doc describes the various options to migrate your on-premises or cloud data to Azure Cosmos DB -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: how-to ms.date: 04/02/2022 @@ -39,11 +40,15 @@ The following factors determine the choice of the migration tool: If you need help with capacity planning, consider reading our [guide to estimating RU/s using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md). * If you are migrating from a vCores- or server-based platform and you need guidance on estimating request units, consider reading our [guide to estimating RU/s based on vCores](estimate-ru-with-capacity-planner.md). +>[!IMPORTANT] +> The [Custom Migration Service using ChangeFeed](https://github.com/Azure-Samples/azure-cosmosdb-live-data-migrator) is an open-source tool for live container migrations that implements change feed and bulk support. However, please note that the user interface application code for this tool is not supported or actively maintained by Microsoft. For Azure Cosmos DB SQL API live container migrations, we recommend using the Spark Connector + Change Feed as illustrated in the [sample](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/cosmos/azure-cosmos-spark_3_2-12/Samples/DatabricksLiveContainerMigration) below. The [Spark Connector for Azure Cosmos DB](./create-sql-api-spark.md) is fully supported by Microsoft. + |Migration type|Solution|Supported sources|Supported targets|Considerations| |---------|---------|---------|---------|---------| |Offline|[Data Migration Tool](import-data.md)| •JSON/CSV Files
                  •Azure Cosmos DB SQL API
                  •MongoDB
                  •SQL Server
                  •Table Storage
                  •AWS DynamoDB
                  •Azure Blob Storage|•Azure Cosmos DB SQL API
                  •Azure Cosmos DB Tables API
                  •JSON Files |• Easy to set up and supports multiple sources.
                  • Not suitable for large datasets.| |Offline|[Azure Data Factory](../data-factory/connector-azure-cosmos-db.md)| •JSON/CSV Files
                  •Azure Cosmos DB SQL API
                  •Azure Cosmos DB API for MongoDB
                  •MongoDB
                  •SQL Server
                  •Table Storage
                  •Azure Blob Storage

                  See the [Azure Data Factory](../data-factory/connector-overview.md) article for other supported sources.|•Azure Cosmos DB SQL API
                  •Azure Cosmos DB API for MongoDB
                  •JSON Files

                  See the [Azure Data Factory](../data-factory/connector-overview.md) article for other supported targets. |• Easy to set up and supports multiple sources.
                  • Makes use of the Azure Cosmos DB bulk executor library.
                  • Suitable for large datasets.
                  • Lack of checkpointing - It means that if an issue occurs during the course of migration, you need to restart the whole migration process.
                  • Lack of a dead letter queue - It means that a few erroneous files can stop the entire migration process.| |Offline|[Azure Cosmos DB Spark connector](./create-sql-api-spark.md)|Azure Cosmos DB SQL API.

                  You can use other sources with additional connectors from the Spark ecosystem.| Azure Cosmos DB SQL API.

                  You can use other targets with additional connectors from the Spark ecosystem.| • Makes use of the Azure Cosmos DB bulk executor library.
                  • Suitable for large datasets.
                  • Needs a custom Spark setup.
                  • Spark is sensitive to schema inconsistencies and this can be a problem during migration. | +|Online|[Azure Cosmos DB Spark connector + Change Feed](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/cosmos/azure-cosmos-spark_3_2-12/Samples/DatabricksLiveContainerMigration)|Azure Cosmos DB SQL API.

                  Uses Azure Cosmos DB Change Feed to stream all historic data as well as live updates.| Azure Cosmos DB SQL API.

                  You can use other targets with additional connectors from the Spark ecosystem.| • Makes use of the Azure Cosmos DB bulk executor library.
                  • Suitable for large datasets.
                  • Needs a custom Spark setup.
                  • Spark is sensitive to schema inconsistencies and this can be a problem during migration. | |Offline|[Custom tool with Cosmos DB bulk executor library](migrate-cosmosdb-data.md)| The source depends on your custom code | Azure Cosmos DB SQL API| • Provides checkpointing, dead-lettering capabilities which increases migration resiliency.
                  • Suitable for very large datasets (10 TB+).
                  • Requires custom setup of this tool running as an App Service. | |Online|[Cosmos DB Functions + ChangeFeed API](change-feed-functions.md)| Azure Cosmos DB SQL API | Azure Cosmos DB SQL API| • Easy to set up.
                  • Works only if the source is an Azure Cosmos DB container.
                  • Not suitable for large datasets.
                  • Does not capture deletes from the source container. | |Online|[Custom Migration Service using ChangeFeed](https://github.com/Azure-Samples/azure-cosmosdb-live-data-migrator)| Azure Cosmos DB SQL API | Azure Cosmos DB SQL API| • Provides progress tracking.
                  • Works only if the source is an Azure Cosmos DB container.
                  • Works for larger datasets as well.
                  • Requires the user to set up an App Service to host the Change feed processor.
                  • Does not capture deletes from the source container.| @@ -79,7 +84,8 @@ If you need help with capacity planning, consider reading our [guide to estimati |Migration type|Solution|Supported sources|Supported targets|Considerations| |---------|---------|---------|---------|---------| |Offline|[cqlsh COPY command](cassandra/migrate-data.md#migrate-data-by-using-the-cqlsh-copy-command)|CSV Files | Azure Cosmos DB Cassandra API| • Easy to set up.
                  • Not suitable for large datasets.
                  • Works only when the source is a Cassandra table.| -|Offline|[Copy table with Spark](cassandra/migrate-data.md#migrate-data-by-using-spark) | •Apache Cassandra
                  •Azure Cosmos DB Cassandra API| Azure Cosmos DB Cassandra API | • Can make use of Spark capabilities to parallelize transformation and ingestion.
                  • Needs configuration with a custom retry policy to handle throttles.| +|Offline|[Copy table with Spark](cassandra/migrate-data.md#migrate-data-by-using-spark) | •Apache Cassandra
                  | Azure Cosmos DB Cassandra API | • Can make use of Spark capabilities to parallelize transformation and ingestion.
                  • Needs configuration with a custom retry policy to handle throttles.| +|Online|[Dual-write proxy + Spark](cassandra/migrate-data-dual-write-proxy.md)| •Apache Cassandra
                  |•Azure Cosmos DB Cassandra API
                  | • Supports larger datasets, but careful attention required for setup and validation.
                  • Open-source tools, no purchase required.| |Online|[Striim (from Oracle DB/Apache Cassandra)](cassandra/migrate-data-striim.md)| •Oracle
                  •Apache Cassandra

                  See the [Striim website](https://www.striim.com/sources-and-targets/) for other supported sources.|•Azure Cosmos DB SQL API
                  •Azure Cosmos DB Cassandra API

                  See the [Striim website](https://www.striim.com/sources-and-targets/) for other supported targets.| • Works with a large variety of sources like Oracle, DB2, SQL Server.
                  • Easy to build ETL pipelines and provides a dashboard for monitoring.
                  • Supports larger datasets.
                  • Since this is a third-party tool, it needs to be purchased from the marketplace and installed in the user's environment.| |Online|[Arcion (from Oracle DB/Apache Cassandra)](cassandra/oracle-migrate-cosmos-db-arcion.md)|•Oracle
                  •Apache Cassandra

                  See the [Arcion website](https://www.arcion.io/) for other supported sources. |Azure Cosmos DB Cassandra API.

                  See the [Arcion website](https://www.arcion.io/) for other supported targets. | • Supports larger datasets.
                  • Since this is a third-party tool, it needs to be purchased from the marketplace and installed in the user's environment.| @@ -90,7 +96,6 @@ For APIs other than the SQL API, Mongo API and the Cassandra API, there are vari **Table API** * [Data Migration Tool](table/table-import.md#data-migration-tool) -* [AzCopy](table/table-import.md#migrate-data-by-using-azcopy) **Gremlin API** @@ -104,4 +109,4 @@ For APIs other than the SQL API, Mongo API and the Cassandra API, there are vari * If you know typical request rates for your current database workload, read about [estimating request units using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md) * Learn more by trying out the sample applications consuming the bulk executor library in [.NET](bulk-executor-dot-net.md) and [Java](bulk-executor-java.md). * The bulk executor library is integrated into the Cosmos DB Spark connector, to learn more, see [Azure Cosmos DB Spark connector](./create-sql-api-spark.md) article. -* Contact the Azure Cosmos DB product team by opening a support ticket under the "General Advisory" problem type and "Large (TB+) migrations" problem subtype for additional help with large scale migrations. \ No newline at end of file +* Contact the Azure Cosmos DB product team by opening a support ticket under the "General Advisory" problem type and "Large (TB+) migrations" problem subtype for additional help with large scale migrations. diff --git a/articles/cosmos-db/custom-partitioning-analytical-store.md b/articles/cosmos-db/custom-partitioning-analytical-store.md index 50abc137b46aa..0f222c888e6b0 100644 --- a/articles/cosmos-db/custom-partitioning-analytical-store.md +++ b/articles/cosmos-db/custom-partitioning-analytical-store.md @@ -72,7 +72,7 @@ It is important to note that custom partitioning ensures complete transactional If you configured [managed private endpoints](analytical-store-private-endpoints.md) for your analytical store, to ensure network isolation for partitioned store, we recommend that you also add managed private endpoints for the partitioned store. The partitioned store is primary storage account associated with your Synapse workspace. -Similarly, if you configured [customer-managed keys on analytical store](how-to-setup-cmk.md#is-it-possible-to-use-customer-managed-keys-in-conjunction-with-the-azure-cosmos-db-analytical-store), you must directly enable it on the Synapse workspace primary storage account, which is the partitioned store, as well. +Similarly, if you configured [customer-managed keys on analytical store](how-to-setup-cmk.md#is-it-possible-to-use-customer-managed-keys-with-the-azure-cosmos-db-analytical-store), you must directly enable it on the Synapse workspace primary storage account, which is the partitioned store, as well. ## Partitioning strategies You could use one or more partition keys for your analytical data. If you are using multiple partition keys, below are some recommendations on how to partition the data: diff --git a/articles/cosmos-db/data-residency.md b/articles/cosmos-db/data-residency.md index 03a3940053204..c0dd86e760938 100644 --- a/articles/cosmos-db/data-residency.md +++ b/articles/cosmos-db/data-residency.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: conceptual ms.date: 04/05/2021 ms.author: govindk -ms.reviewer: sngun +ms.reviewer: mjbrown --- diff --git a/articles/cosmos-db/dedicated-gateway.md b/articles/cosmos-db/dedicated-gateway.md index bea9843697a53..92ffe7b73b87b 100644 --- a/articles/cosmos-db/dedicated-gateway.md +++ b/articles/cosmos-db/dedicated-gateway.md @@ -1,12 +1,13 @@ --- title: Azure Cosmos DB dedicated gateway description: A dedicated gateway is compute that is a front-end to your Azure Cosmos DB account. When you connect to the dedicated gateway, it routes requests and caches data. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 11/08/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Azure Cosmos DB dedicated gateway - Overview (Preview) @@ -84,7 +85,7 @@ There are many different ways to provision a dedicated gateway: - [Provision a dedicated gateway using the Azure Portal](how-to-configure-integrated-cache.md#provision-a-dedicated-gateway-cluster) - [Use Azure Cosmos DB's REAT API](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/service/create) -- [Azure CLI](/cli/azure/cosmosdb/service#az-cosmosdb-service-create) +- [Azure CLI](/cli/azure/cosmosdb/service?view=azure-cli-latest&preserve-view=true#az-cosmosdb-service-create) - [ARM template](/azure/templates/microsoft.documentdb/databaseaccounts/services?tabs=bicep) - Note: You cannot deprovision a dedicated gateway using ARM templates diff --git a/articles/cosmos-db/distribute-data-globally.md b/articles/cosmos-db/distribute-data-globally.md index 01bd284bdae44..15def2f4ee5aa 100644 --- a/articles/cosmos-db/distribute-data-globally.md +++ b/articles/cosmos-db/distribute-data-globally.md @@ -1,8 +1,9 @@ --- title: Distribute data globally with Azure Cosmos DB description: Learn about planet-scale geo-replication, multi-region writes, failover, and data recovery using global databases from Azure Cosmos DB, a globally distributed, multi-model database service. -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 01/06/2021 diff --git a/articles/cosmos-db/emulator-command-line-parameters.md b/articles/cosmos-db/emulator-command-line-parameters.md index 85af3bddf38ab..c7722d05657c3 100644 --- a/articles/cosmos-db/emulator-command-line-parameters.md +++ b/articles/cosmos-db/emulator-command-line-parameters.md @@ -3,8 +3,9 @@ title: Command-line and PowerShell reference for Azure Cosmos DB Emulator description: Learn the command-line parameters for Azure Cosmos DB Emulator, how to control the emulator with PowerShell, and how to change the number of containers that you can create within the emulator. ms.service: cosmos-db ms.topic: how-to -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.date: 09/17/2020 ms.custom: contperf-fy21q1 --- diff --git a/articles/cosmos-db/faq.yml b/articles/cosmos-db/faq.yml index c1edcd217674c..c2abf1ccb3e59 100644 --- a/articles/cosmos-db/faq.yml +++ b/articles/cosmos-db/faq.yml @@ -2,8 +2,9 @@ metadata: title: "Frequently asked questions on different APIs in Azure Cosmos DB" description: "Get answers to frequently asked questions about Azure Cosmos DB, a globally distributed, multi-model database service. Learn about capacity, performance levels, and scaling." - author: markjbrown - ms.author: mjbrown + author: seesharprun + ms.author: sidandrews + ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: faq ms.date: 09/01/2019 diff --git a/articles/cosmos-db/get-latest-restore-timestamp.md b/articles/cosmos-db/get-latest-restore-timestamp.md index 3e390ab84646a..c4e50032054d9 100644 --- a/articles/cosmos-db/get-latest-restore-timestamp.md +++ b/articles/cosmos-db/get-latest-restore-timestamp.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.date: 04/08/2022 ms.topic: how-to -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Get the latest restorable timestamp for continuous backup accounts diff --git a/articles/cosmos-db/global-dist-under-the-hood.md b/articles/cosmos-db/global-dist-under-the-hood.md index a95817271204b..3fd47a3cd8797 100644 --- a/articles/cosmos-db/global-dist-under-the-hood.md +++ b/articles/cosmos-db/global-dist-under-the-hood.md @@ -1,13 +1,12 @@ --- title: Global distribution with Azure Cosmos DB- under the hood description: This article provides technical details relating to global distribution of Azure Cosmos DB -author: rothja +author: seesharprun ms.service: cosmos-db ms.topic: conceptual ms.date: 07/02/2020 -ms.author: jroth -ms.reviewer: sngun - +ms.author: sidandrews +ms.reviewer: mjbrown --- # Global data distribution with Azure Cosmos DB - under the hood diff --git a/articles/cosmos-db/graph/cli-samples.md b/articles/cosmos-db/graph/cli-samples.md index a3ea491ffcdba..d127f2d759c40 100644 --- a/articles/cosmos-db/graph/cli-samples.md +++ b/articles/cosmos-db/graph/cli-samples.md @@ -1,12 +1,13 @@ --- title: Azure CLI Samples for Azure Cosmos DB Gremlin API description: Azure CLI Samples for Azure Cosmos DB Gremlin API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample ms.date: 02/21/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli --- diff --git a/articles/cosmos-db/graph/create-graph-console.md b/articles/cosmos-db/graph/create-graph-console.md index 5678d953f2104..0962884b89614 100644 --- a/articles/cosmos-db/graph/create-graph-console.md +++ b/articles/cosmos-db/graph/create-graph-console.md @@ -35,7 +35,7 @@ You need to have an Azure subscription to create an Azure Cosmos DB account for [!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] -You also need to install the [Gremlin Console](https://tinkerpop.apache.org/download.html). The **recommended version is v3.4.3** or earlier. (To use Gremlin Console on Windows, you need to install [Java Runtime](https://www.oracle.com/technetwork/java/javase/overview/index.html), minimum requires Java 8 but it is preferable to use Java 11). +You also need to install the [Gremlin Console](https://tinkerpop.apache.org/download.html). The **recommended version is v3.4.13**. (To use Gremlin Console on Windows, you need to install [Java Runtime](https://www.oracle.com/technetwork/java/javase/overview/index.html), minimum requires Java 8 but it is preferable to use Java 11). ## Create a database account diff --git a/articles/cosmos-db/graph/create-graph-dotnet.md b/articles/cosmos-db/graph/create-graph-dotnet.md index 2a2820c32a2cf..5c10c5f4b8747 100644 --- a/articles/cosmos-db/graph/create-graph-dotnet.md +++ b/articles/cosmos-db/graph/create-graph-dotnet.md @@ -66,14 +66,14 @@ Now let's clone a Gremlin API app from GitHub, set the connection string, and ru 5. Restore the NuGet packages in the project. The restore operation should include the Gremlin.Net driver, and the Newtonsoft.Json package. -6. You can also install the Gremlin.Net@v3.4.6 driver manually using the NuGet package manager, or the [NuGet command-line utility](/nuget/install-nuget-client-tools): +6. You can also install the Gremlin.Net@v3.4.13 driver manually using the NuGet package manager, or the [NuGet command-line utility](/nuget/install-nuget-client-tools): ```bash - nuget install Gremlin.NET -Version 3.4.6 + nuget install Gremlin.NET -Version 3.4.13 ``` > [!NOTE] -> The Gremlin API currently only [supports Gremlin.Net up to v3.4.6](gremlin-support.md#compatible-client-libraries). If you install the latest version, you'll receive errors when using the service. +> The supported Gremlin.NET driver version for Gremlin API is available [here](gremlin-support.md#compatible-client-libraries). Latest released versions of Gremlin.NET may see incompatibilities, so please check the linked table for compatibility updates. ## Review the code diff --git a/articles/cosmos-db/graph/create-graph-java.md b/articles/cosmos-db/graph/create-graph-java.md index 09d3c69d2775e..341cbd15fd97d 100644 --- a/articles/cosmos-db/graph/create-graph-java.md +++ b/articles/cosmos-db/graph/create-graph-java.md @@ -30,7 +30,7 @@ In this quickstart, you create and manage an Azure Cosmos DB Gremlin (graph) API - [Java Development Kit (JDK) 8](https://www.azul.com/downloads/azure-only/zulu/?&version=java-8-lts&architecture=x86-64-bit&package=jdk). Point your `JAVA_HOME` environment variable to the folder where the JDK is installed. - A [Maven binary archive](https://maven.apache.org/download.cgi). - [Git](https://www.git-scm.com/downloads). -- [Gremlin-driver 3.4.0](https://mvnrepository.com/artifact/org.apache.tinkerpop/gremlin-driver/3.4.0), this dependency is mentioned in the quickstart sample's pom.xml +- [Gremlin-driver 3.4.13](https://mvnrepository.com/artifact/org.apache.tinkerpop/gremlin-driver/3.4.13), this dependency is mentioned in the quickstart sample's pom.xml ## Create a database account diff --git a/articles/cosmos-db/graph/graph-modeling-tools.md b/articles/cosmos-db/graph/graph-modeling-tools.md index 8a2dfc6856013..28e93196cca67 100644 --- a/articles/cosmos-db/graph/graph-modeling-tools.md +++ b/articles/cosmos-db/graph/graph-modeling-tools.md @@ -6,7 +6,7 @@ ms.author: mansha ms.service: cosmos-db ms.topic: conceptual ms.date: 05/25/2021 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Third-party data modeling tools for Azure Cosmos DB graph data diff --git a/articles/cosmos-db/graph/graph-visualization-partners.md b/articles/cosmos-db/graph/graph-visualization-partners.md index 675fd2146a382..fef727f74034f 100644 --- a/articles/cosmos-db/graph/graph-visualization-partners.md +++ b/articles/cosmos-db/graph/graph-visualization-partners.md @@ -108,7 +108,7 @@ With the Graphistry's GPU client/cloud technology, you can do interactive visual Graphlytic is a highly customizable web application for graph visualization and analysis. Users can interactively explore the graph, look for patterns with the Gremlin language, or use filters to find answers to any graph question. Graph rendering is done with the 'Cytoscape.js' library, which allows Graphlytic to render tens of thousands of nodes and hundreds of thousands of relationships at once. -Graphlytic is compatible with Azure Cosmos DB and can be deployed to Azure in minutes. Graphlytic’s UI can be customized and extended in many ways, for instance the default [visualization configuration](https://graphlytic.biz/doc/latest/Visualization_settings.html), [data schema](https://graphlytic.biz/doc/latest/Data_schema.html), [style mappings](https://graphlytic.biz/doc/latest/Style_mappers.html), [virtual properties](https://graphlytic.biz/doc/latest/Virtual_properties.html) in the visualization, or custom implemented [widgets](https://graphlytic.biz/doc/latest/Widgets.html) that can enhance the visualization features with bespoke reports or integrations. +Graphlytic is compatible with Azure Cosmos DB and can be deployed to Azure in minutes. Graphlytic’s UI can be customized and extended in many ways, for instance the default [visualization configuration](https://graphlytic.biz/doc/latest/Visualization_Settings.html), [data schema](https://graphlytic.biz/doc/latest/Data_Schema.html), [style mappings](https://graphlytic.biz/doc/latest/Style_Mappers.html), [virtual properties](https://graphlytic.biz/doc/latest/Virtual_properties.html) in the visualization, or custom implemented [widgets](https://graphlytic.biz/doc/latest/Widgets.html) that can enhance the visualization features with bespoke reports or integrations. The following are two example scenarios: diff --git a/articles/cosmos-db/graph/gremlin-support.md b/articles/cosmos-db/graph/gremlin-support.md index 791f14114c391..3be49e43fd9be 100644 --- a/articles/cosmos-db/graph/gremlin-support.md +++ b/articles/cosmos-db/graph/gremlin-support.md @@ -20,15 +20,19 @@ Azure Cosmos DB Graph engine closely follows [Apache TinkerPop](https://tinkerpo The following table shows popular Gremlin drivers that you can use against Azure Cosmos DB: -| Download | Source | Getting Started | Supported connector version | +| Download | Source | Getting Started | Supported/Recommended connector version | | --- | --- | --- | --- | -| [.NET](https://tinkerpop.apache.org/docs/3.4.6/reference/#gremlin-DotNet) | [Gremlin.NET on GitHub](https://github.com/apache/tinkerpop/tree/master/gremlin-dotnet) | [Create Graph using .NET](create-graph-dotnet.md) | 3.4.6 | -| [Java](https://mvnrepository.com/artifact/com.tinkerpop.gremlin/gremlin-java) | [Gremlin JavaDoc](https://tinkerpop.apache.org/javadocs/current/full/) | [Create Graph using Java](create-graph-java.md) | 3.2.0+ | -| [Node.js](https://www.npmjs.com/package/gremlin) | [Gremlin-JavaScript on GitHub](https://github.com/apache/tinkerpop/tree/master/gremlin-javascript) | [Create Graph using Node.js](create-graph-nodejs.md) | 3.3.4+ | -| [Python](https://tinkerpop.apache.org/docs/3.3.1/reference/#gremlin-python) | [Gremlin-Python on GitHub](https://github.com/apache/tinkerpop/tree/master/gremlin-python) | [Create Graph using Python](create-graph-python.md) | 3.2.7 | +| [.NET](https://tinkerpop.apache.org/docs/3.4.13/reference/#gremlin-DotNet) | [Gremlin.NET on GitHub](https://github.com/apache/tinkerpop/tree/master/gremlin-dotnet) | [Create Graph using .NET](create-graph-dotnet.md) | 3.4.13 | +| [Java](https://mvnrepository.com/artifact/com.tinkerpop.gremlin/gremlin-java) | [Gremlin JavaDoc](https://tinkerpop.apache.org/javadocs/current/full/) | [Create Graph using Java](create-graph-java.md) | 3.4.13 | +| [Python](https://tinkerpop.apache.org/docs/3.4.13/reference/#gremlin-python) | [Gremlin-Python on GitHub](https://github.com/apache/tinkerpop/tree/master/gremlin-python) | [Create Graph using Python](create-graph-python.md) | 3.4.13 | +| [Gremlin console](https://tinkerpop.apache.org/download.html) | [TinkerPop docs](https://tinkerpop.apache.org/docs/current/reference/#gremlin-console) | [Create Graph using Gremlin Console](create-graph-console.md) | 3.4.13 | +| [Node.js](https://www.npmjs.com/package/gremlin) | [Gremlin-JavaScript on GitHub](https://github.com/apache/tinkerpop/tree/master/gremlin-javascript) | [Create Graph using Node.js](create-graph-nodejs.md) | 3.4.13 | | [PHP](https://packagist.org/packages/brightzone/gremlin-php) | [Gremlin-PHP on GitHub](https://github.com/PommeVerte/gremlin-php) | [Create Graph using PHP](create-graph-php.md) | 3.1.0 | | [Go Lang](https://github.com/supplyon/gremcos/) | [Go Lang](https://github.com/supplyon/gremcos/) | | This library is built by external contributors. The Azure Cosmos DB team doesn't offer any support or maintain the library. | -| [Gremlin console](https://tinkerpop.apache.org/download.html) | [TinkerPop docs](https://tinkerpop.apache.org/docs/current/reference/#gremlin-console) | [Create Graph using Gremlin Console](create-graph-console.md) | 3.2.0 + | + +> [!NOTE] +> Gremlin client driver versions for __3.5.*__, __3.6.*__ have known compatibility issues, so we recommend using the latest supported 3.4.* driver versions listed above. +> This table will be updated when compatibility issues have been addressed for these newer driver versions. ## Supported Graph Objects diff --git a/articles/cosmos-db/graph/manage-with-bicep.md b/articles/cosmos-db/graph/manage-with-bicep.md index c10c90665b870..f3ac9792b3afb 100644 --- a/articles/cosmos-db/graph/manage-with-bicep.md +++ b/articles/cosmos-db/graph/manage-with-bicep.md @@ -1,12 +1,13 @@ --- title: Create and manage Azure Cosmos DB Gremlin API with Bicep description: Use Bicep to create and configure Azure Cosmos DB Gremlin API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: how-to ms.date: 9/13/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Gremlin API resources using Bicep diff --git a/articles/cosmos-db/graph/powershell-samples.md b/articles/cosmos-db/graph/powershell-samples.md index 7446edef18cd3..ced0555df7970 100644 --- a/articles/cosmos-db/graph/powershell-samples.md +++ b/articles/cosmos-db/graph/powershell-samples.md @@ -1,12 +1,13 @@ --- title: Azure PowerShell samples for Azure Cosmos DB Gremlin API description: Get the Azure PowerShell samples to perform common tasks in Azure Cosmos DB Gremlin API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample ms.date: 01/20/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure PowerShell samples for Azure Cosmos DB Gremlin API diff --git a/articles/cosmos-db/graph/resource-manager-template-samples.md b/articles/cosmos-db/graph/resource-manager-template-samples.md index 175b60019dc84..bac542ca22036 100644 --- a/articles/cosmos-db/graph/resource-manager-template-samples.md +++ b/articles/cosmos-db/graph/resource-manager-template-samples.md @@ -1,12 +1,13 @@ --- title: Resource Manager templates for Azure Cosmos DB Gremlin API description: Use Azure Resource Manager templates to create and configure Azure Cosmos DB Gremlin API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: how-to ms.date: 10/14/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Gremlin API resources using Azure Resource Manager templates diff --git a/articles/cosmos-db/graph/tutorial-query-graph.md b/articles/cosmos-db/graph/tutorial-query-graph.md index 66e542adf7233..7886b2e676897 100644 --- a/articles/cosmos-db/graph/tutorial-query-graph.md +++ b/articles/cosmos-db/graph/tutorial-query-graph.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: tutorial ms.date: 02/16/2022 -ms.reviewer: sngun +ms.reviewer: mjbrown ms.devlang: csharp ms.custom: devx-track-csharp --- diff --git a/articles/cosmos-db/hierarchical-partition-keys.md b/articles/cosmos-db/hierarchical-partition-keys.md index e467a6b35bc19..781f785cf8ded 100644 --- a/articles/cosmos-db/hierarchical-partition-keys.md +++ b/articles/cosmos-db/hierarchical-partition-keys.md @@ -32,7 +32,8 @@ Queries that specify either the **TenantId**, or both **TenantId** and **UserId* ## Getting started > [!IMPORTANT] -> Working with containers that use hierarchical partition keys is supported only in the preview versions of the .NET v3 and Java v4 SDK. You must use the supported SDK to create new containers with hierarchical partition keys and to perform CRUD/query operations on the data +> Working with containers that use hierarchical partition keys is supported only in the preview versions of the .NET v3 and Java v4 SDK. You must use the supported SDK to create new containers with hierarchical partition keys and to perform CRUD/query operations on the data. +> If you would like to use an SDK or connector that isn't currently supported, please file a request on our [community forum](https://feedback.azure.com/d365community/forum/3002b3be-0d25-ec11-b6e6-000d3a4f0858). Find the latest preview version of each supported SDK: @@ -397,7 +398,7 @@ You can test the subpartitioning feature using the latest version of the local e .\CosmosDB.Emulator.exe /EnablePreview ``` -For more information, see [Azure Cosmos DB emulator](/azure/cosmos-db/local-emulator). +For more information, see [Azure Cosmos DB emulator](./local-emulator.md). ## Limitations and known issues @@ -414,4 +415,4 @@ For more information, see [Azure Cosmos DB emulator](/azure/cosmos-db/local-emul * See the FAQ on [hierarchical partition keys.](hierarchical-partition-keys-faq.yml) * Learn more about [partitioning in Azure Cosmos DB.](partitioning-overview.md) -* Learn more about [using Azure Resource Manager templates with Azure Cosmos DB.](/azure/templates/microsoft.documentdb/databaseaccounts) +* Learn more about [using Azure Resource Manager templates with Azure Cosmos DB.](/azure/templates/microsoft.documentdb/databaseaccounts) \ No newline at end of file diff --git a/articles/cosmos-db/high-availability.md b/articles/cosmos-db/high-availability.md index d74c179a1e54c..e8a4e42768ae9 100644 --- a/articles/cosmos-db/high-availability.md +++ b/articles/cosmos-db/high-availability.md @@ -1,13 +1,12 @@ --- title: High availability in Azure Cosmos DB description: This article describes how to build a highly available solution using Cosmos DB -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.topic: conceptual ms.date: 02/24/2022 -ms.author: mjbrown -ms.reviewer: sngun - +ms.author: sidandrews +ms.reviewer: mjbrown --- # Achieve high availability with Cosmos DB diff --git a/articles/cosmos-db/how-pricing-works.md b/articles/cosmos-db/how-pricing-works.md index 57ac09ea58d7a..e2f40d884c0a4 100644 --- a/articles/cosmos-db/how-pricing-works.md +++ b/articles/cosmos-db/how-pricing-works.md @@ -1,13 +1,13 @@ --- title: Pricing model of Azure Cosmos DB description: This article explains the pricing model of Azure Cosmos DB and how it simplifies your cost management and cost planning. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 03/24/2022 ms.custom: cosmos-db-video -ms.reviewer: wiassaf --- # Pricing model in Azure Cosmos DB @@ -18,38 +18,38 @@ The pricing model of Azure Cosmos DB simplifies the cost management and planning > > [!VIDEO https://aka.ms/docs.how-pricing-works] -- **Database operations**: The way you get charged for your database operations depends on the type of Azure Cosmos account you are using. +- **Database operations**: The way you get charged for your database operations depends on the type of Azure Cosmos account you're using. - - **Provisioned Throughput**: [Provisioned throughput](set-throughput.md) (also called reserved throughput) provides high performance at any scale. You specify the throughput that you need in [Request Units](request-units.md) per second (RU/s), and Azure Cosmos DB dedicates the resources required to provide the configured throughput. You can [provision throughput on either a database or a container](set-throughput.md). Based on your workload needs, you can scale throughput up/down at any time or use [autoscale](provision-throughput-autoscale.md) (although there is a minimum throughput required on a database or a container to guarantee the SLAs). You are billed hourly for the maximum provisioned throughput for a given hour. + - **Provisioned Throughput**: [Provisioned throughput](set-throughput.md) (also called reserved throughput) provides high performance at any scale. You specify the throughput that you need in [Request Units](request-units.md) per second (RU/s), and Azure Cosmos DB dedicates the resources required to provide the configured throughput. You can [provision throughput on either a database or a container](set-throughput.md). Based on your workload needs, you can scale throughput up/down at any time or use [autoscale](provision-throughput-autoscale.md) (although there's a minimum throughput required on a database or a container to guarantee the SLAs). You're billed hourly for the maximum provisioned throughput for a given hour. > [!NOTE] > Because the provisioned throughput model dedicates resources to your container or database, you will be charged for the throughput you have provisioned even if you don't run any workloads. - - **Serverless**: In [serverless](serverless.md) mode, you don't have to provision any throughput when creating resources in your Azure Cosmos account. At the end of your billing period, you get billed for the amount of Request Units that has been consumed by your database operations. + - **Serverless**: In [serverless](serverless.md) mode, you don't have to provision any throughput when creating resources in your Azure Cosmos account. At the end of your billing period, you get billed for the number of Request Units that has been consumed by your database operations. -- **Storage**: You are billed a flat rate for the total amount of storage (in GBs) consumed by your data and indexes for a given hour. Storage is billed on a consumption basis, so you don't have to reserve any storage in advance. You are billed only for the storage you consume. +- **Storage**: You're billed a flat rate for the total amount of storage (in GBs) consumed by your data and indexes for a given hour. Storage is billed on a consumption basis, so you don't have to reserve any storage in advance. You're billed only for the storage you consume. The pricing model in Azure Cosmos DB is consistent across all APIs. For more information, see the [Azure Cosmos DB pricing page](https://azure.microsoft.com/pricing/details/cosmos-db/), [Understanding your Azure Cosmos DB bill](understand-your-bill.md) and [How Azure Cosmos DB pricing model is cost-effective for customers](total-cost-ownership.md). -If you deploy your Azure Cosmos DB account to a non-government region in the US, there is a minimum price for both database and container-based throughput in provisioned throughput mode. There is no minimum price in serverless mode. The pricing varies depending on the region you are using, see the [Azure Cosmos DB pricing page](https://azure.microsoft.com/pricing/details/cosmos-db/) for latest pricing information. +If you deploy your Azure Cosmos DB account to a non-government region in the US, there's a minimum price for both database and container-based throughput in provisioned throughput mode. There's no minimum price in serverless mode. The pricing varies depending on the region you're using, see the [Azure Cosmos DB pricing page](https://azure.microsoft.com/pricing/details/cosmos-db/) for latest pricing information. ## Try Azure Cosmos DB for free Azure Cosmos DB offers many options for developers to it for free. These options include: -* **Azure Cosmos DB free tier**: Azure Cosmos DB free tier makes it easy to get started, develop and test your applications, or even run small production workloads for free. When free tier is enabled on an account, you'll get the first 1000 RU/s and 25 GB of storage in the account free, for the lifetime of the account. You can have up to one free tier account per Azure subscription and must opt-in when creating the account. To learn more, see how to [create a free tier account](free-tier.md) article. +* **Azure Cosmos DB free tier**: Azure Cosmos DB free tier makes it easy to get started, develop and test your applications, or even run small production workloads for free. When free tier is enabled on an account, you'll get the first 1000 RU/s and 25 GB of storage in the account free, for the lifetime of the account. You can have up to one free tier account per Azure subscription and must opt in when creating the account. To learn more, see how to [create a free tier account](free-tier.md) article. * **Azure free account**: Azure offers a [free tier](https://azure.microsoft.com/free/) that gives you $200 in Azure credits for the first 30 days and a limited quantity of free services for 12 months. For more information, see [Azure free account](../cost-management-billing/manage/avoid-charges-free-account.md). Azure Cosmos DB is a part of Azure free account. Specifically for Azure Cosmos DB, this free account offers 25-GB storage and 400 RU/s of provisioned throughput for the entire year. * **Try Azure Cosmos DB for free**: Azure Cosmos DB offers a time-limited experience by using try Azure Cosmos DB for free accounts. You can create an Azure Cosmos DB account, create database and collections and run a sample application by using the Quickstarts and tutorials. You can run the sample application without subscribing to an Azure account or using your credit card. [Try Azure Cosmos DB for free](https://azure.microsoft.com/try/cosmosdb/) offers Azure Cosmos DB for one month, with the ability to renew your account any number of times. -* **Azure Cosmos DB emulator**: Azure Cosmos DB emulator provides a local environment that emulates the Azure Cosmos DB service for development purposes. Emulator is offered at no cost and with high fidelity to the cloud service. Using Azure Cosmos DB emulator, you can develop and test your applications locally, without creating an Azure subscription or incurring any costs. You can develop your applications by using the emulator locally before going into production. After you are satisfied with the functionality of the application against the emulator, you can switch to using the Azure Cosmos DB account in the cloud and significantly save on cost. For more information, see [Using Azure Cosmos DB for development and testing](local-emulator.md) for more details. +* **Azure Cosmos DB emulator**: Azure Cosmos DB emulator provides a local environment that emulates the Azure Cosmos DB service for development purposes. Emulator is offered at no cost and with high fidelity to the cloud service. Using Azure Cosmos DB emulator, you can develop and test your applications locally, without creating an Azure subscription or incurring any costs. You can develop your applications by using the emulator locally before going into production. After you're satisfied with the functionality of the application against the emulator, you can switch to using the Azure Cosmos DB account in the cloud and significantly save on cost. For more information about dev/test, see [using Azure Cosmos DB for development and testing](local-emulator.md). ## Pricing with reserved capacity -Azure Cosmos DB [reserved capacity](cosmos-db-reserved-capacity.md) helps you save money when using the provisioned throughput mode by pre-paying for Azure Cosmos DB resources for either one year or three years. You can significantly reduce your costs with one-year or three-year upfront commitments and save between 20-65% discounts when compared to the regular pricing. Azure Cosmos DB reserved capacity helps you lower costs by pre-paying for the provisioned throughput (RU/s) for a period of one year or three years and you get a discount on the throughput provisioned. +Azure Cosmos DB [reserved capacity](cosmos-db-reserved-capacity.md) helps you save money when using the provisioned throughput mode by pre-paying for Azure Cosmos DB resources for either one year or three years. You can significantly reduce your costs with one-year or three-year upfront commitments and save between 20-65% discounts when compared to the regular pricing. Azure Cosmos DB reserved capacity helps you lower costs by pre-paying for the provisioned throughput (RU/s) for one year or three years and you get a discount on the throughput provisioned. -Reserved capacity provides a billing discount and does not affect the runtime state of your Azure Cosmos DB resources. Reserved capacity is available consistently to all APIs, which includes MongoDB, Cassandra, SQL, Gremlin, and Azure Tables and all regions worldwide. You can learn more about reserved capacity in [Prepay for Azure Cosmos DB resources with reserved capacity](cosmos-db-reserved-capacity.md) article and buy reserved capacity from the [Azure portal](https://portal.azure.com/). +Reserved capacity provides a billing discount and doesn't affect the runtime state of your Azure Cosmos DB resources. Reserved capacity is available consistently to all APIs, which includes MongoDB, Cassandra, SQL, Gremlin, and Azure Tables and all regions worldwide. You can learn more about reserved capacity in [Prepay for Azure Cosmos DB resources with reserved capacity](cosmos-db-reserved-capacity.md) article and buy reserved capacity from the [Azure portal](https://portal.azure.com/). ## Next steps diff --git a/articles/cosmos-db/how-to-configure-integrated-cache.md b/articles/cosmos-db/how-to-configure-integrated-cache.md index 396acb2f2256a..f8eb331bbcbfd 100644 --- a/articles/cosmos-db/how-to-configure-integrated-cache.md +++ b/articles/cosmos-db/how-to-configure-integrated-cache.md @@ -1,12 +1,13 @@ --- title: How to configure the Azure Cosmos DB integrated cache description: Learn how to configure the Azure Cosmos DB integrated cache -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 09/28/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # How to configure the Azure Cosmos DB integrated cache (Preview) diff --git a/articles/cosmos-db/how-to-manage-database-account.md b/articles/cosmos-db/how-to-manage-database-account.md index c303137d84b66..2333c925dabb5 100644 --- a/articles/cosmos-db/how-to-manage-database-account.md +++ b/articles/cosmos-db/how-to-manage-database-account.md @@ -1,12 +1,13 @@ --- title: Learn how to manage database accounts in Azure Cosmos DB description: Learn how to manage Azure Cosmos DB resources by using the Azure portal, PowerShell, CLI, and Azure Resource Manager templates -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 09/13/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage an Azure Cosmos account using the Azure portal diff --git a/articles/cosmos-db/how-to-move-regions.md b/articles/cosmos-db/how-to-move-regions.md index 643cfe6354bc4..115ebeb342815 100644 --- a/articles/cosmos-db/how-to-move-regions.md +++ b/articles/cosmos-db/how-to-move-regions.md @@ -1,13 +1,14 @@ --- title: Move an Azure Cosmos DB account to another region description: Learn how to move an Azure Cosmos DB account to another region. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.custom: subject-moving-resources ms.date: 03/15/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Move an Azure Cosmos DB account to another region diff --git a/articles/cosmos-db/how-to-restrict-user-data.md b/articles/cosmos-db/how-to-restrict-user-data.md index e43327dcf51ac..b4c4f30938b6d 100644 --- a/articles/cosmos-db/how-to-restrict-user-data.md +++ b/articles/cosmos-db/how-to-restrict-user-data.md @@ -1,11 +1,12 @@ --- title: Restrict user access to data operations only with Azure Cosmos DB description: Learn how to restrict access to data operations only with Azure Cosmos DB -author: rothja +author: seesharprun ms.service: cosmos-db ms.topic: how-to ms.date: 12/9/2019 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/how-to-setup-cmk.md b/articles/cosmos-db/how-to-setup-cmk.md index 26f3275cde12b..5799c60b8e1f3 100644 --- a/articles/cosmos-db/how-to-setup-cmk.md +++ b/articles/cosmos-db/how-to-setup-cmk.md @@ -232,7 +232,7 @@ az cosmosdb show \ ## Using a managed identity in the Azure Key Vault access policy -This access policy ensures that your encryption keys can be accessed by your Azure Cosmos DB account. This is done by granting access to a specific Azure Active Directory (AD) identity. Two types of identities are supported: +This access policy ensures that your encryption keys can be accessed by your Azure Cosmos DB account. The access policy is implemented by granting access to a specific Azure Active Directory (AD) identity. Two types of identities are supported: - Azure Cosmos DB's first-party identity can be used to grant access to the Azure Cosmos DB service. - Your Azure Cosmos DB account's [managed identity](how-to-setup-managed-identity.md) can be used to grant access to your account specifically. @@ -241,33 +241,33 @@ This access policy ensures that your encryption keys can be accessed by your Azu Because a system-assigned managed identity can only be retrieved after the creation of your account, you still need to initially create your account using the first-party identity, as described [above](#add-access-policy). Then: -1. If this wasn't done during account creation, [enable a system-assigned managed identity](./how-to-setup-managed-identity.md#add-a-system-assigned-identity) on your account and copy the `principalId` that got assigned. +1. If the system-assigned managed identity wasn't configured during account creation, [enable a system-assigned managed identity](./how-to-setup-managed-identity.md#add-a-system-assigned-identity) on your account and copy the `principalId` that got assigned. -1. Add a new access policy to your Azure Key Vault account just as described [above](#add-access-policy), but using the `principalId` you copied at the previous step instead of Azure Cosmos DB's first-party identity. +1. Add a new access policy to your Azure Key Vault account as described [above](#add-access-policy), but using the `principalId` you copied at the previous step instead of Azure Cosmos DB's first-party identity. -1. Update your Azure Cosmos DB account to specify that you want to use the system-assigned managed identity when accessing your encryption keys in Azure Key Vault. You can do this: +1. Update your Azure Cosmos DB account to specify that you want to use the system-assigned managed identity when accessing your encryption keys in Azure Key Vault. You have two options: - - by specifying this property in your account's Azure Resource Manager template: + - Specify the property in your account's Azure Resource Manager template: - ```json - { - "type": " Microsoft.DocumentDB/databaseAccounts", - "properties": { - "defaultIdentity": "SystemAssignedIdentity", + ```json + { + "type": " Microsoft.DocumentDB/databaseAccounts", + "properties": { + "defaultIdentity": "SystemAssignedIdentity", + // ... + }, // ... - }, - // ... - } - ``` - - - by updating your account with the Azure CLI: + } + ``` - ```azurecli - resourceGroupName='myResourceGroup' - accountName='mycosmosaccount' + - Update your account with the Azure CLI: - az cosmosdb update --resource-group $resourceGroupName --name $accountName --default-identity "SystemAssignedIdentity" - ``` + ```azurecli + resourceGroupName='myResourceGroup' + accountName='mycosmosaccount' + + az cosmosdb update --resource-group $resourceGroupName --name $accountName --default-identity "SystemAssignedIdentity" + ``` 1. Optionally, you can then remove the Azure Cosmos DB first-party identity from your Azure Key Vault access policy. @@ -275,42 +275,42 @@ Because a system-assigned managed identity can only be retrieved after the creat 1. When creating the new access policy in your Azure Key Vault account as described [above](#add-access-policy), use the `Object ID` of the managed identity you wish to use instead of Azure Cosmos DB's first-party identity. -1. When creating your Azure Cosmos DB account, you must enable the user-assigned managed identity and specify that you want to use this identity when accessing your encryption keys in Azure Key Vault. You can do this: - - - in an Azure Resource Manager template: +1. When creating your Azure Cosmos DB account, you must enable the user-assigned managed identity and specify that you want to use this identity when accessing your encryption keys in Azure Key Vault. Options include: - ```json - { - "type": "Microsoft.DocumentDB/databaseAccounts", - "identity": { - "type": "UserAssigned", - "userAssignedIdentities": { - "": {} - } - }, - // ... - "properties": { - "defaultIdentity": "UserAssignedIdentity=" - "keyVaultKeyUri": "" + - Using an Azure Resource Manager template: + + ```json + { + "type": "Microsoft.DocumentDB/databaseAccounts", + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "": {} + } + }, // ... + "properties": { + "defaultIdentity": "UserAssignedIdentity=" + "keyVaultKeyUri": "" + // ... + } } - } - ``` + ``` - - with the Azure CLI: + - Using the Azure CLI: - ```azurecli - resourceGroupName='myResourceGroup' - accountName='mycosmosaccount' - keyVaultKeyUri = 'https://.vault.azure.net/keys/' - - az cosmosdb create \ - -n $accountName \ - -g $resourceGroupName \ - --key-uri $keyVaultKeyUri - --assign-identity - --default-identity "UserAssignedIdentity=" - ``` + ```azurecli + resourceGroupName='myResourceGroup' + accountName='mycosmosaccount' + keyVaultKeyUri = 'https://.vault.azure.net/keys/' + + az cosmosdb create \ + -n $accountName \ + -g $resourceGroupName \ + --key-uri $keyVaultKeyUri + --assign-identity + --default-identity "UserAssignedIdentity=" + ``` ## Use CMK with continuous backup @@ -366,14 +366,14 @@ When you create a new Azure Cosmos account through an Azure Resource Manager tem ## Customer-managed keys and double encryption -When using customer-managed keys, the data you store in your Azure Cosmos DB account ends up being encrypted twice: +The data you store in your Azure Cosmos DB account when using customer-managed keys ends up being encrypted twice: - Once through the default encryption performed with Microsoft-managed keys. -- Once through the additional encryption performed with customer-managed keys. +- Once through the extra encryption performed with customer-managed keys. -Note that **this only applies to the main Azure Cosmos DB transactional storage**. Some features involve internal replication of your data to a second tier of storage where double encryption isn't provided, even when using customer-managed keys. These features include: +Double encryption only applies to the main Azure Cosmos DB transactional storage. Some features involve internal replication of your data to a second tier of storage where double encryption isn't provided, even with customer-managed keys. These features include: -- [Synapse Link](./synapse-link.md) +- [Azure Synapse Link](./synapse-link.md) - [Continuous backups with point-in-time restore](./continuous-backup-restore-introduction.md) ## Key rotation @@ -382,15 +382,15 @@ Rotating the customer-managed key used by your Azure Cosmos account can be done - Create a new version of the key currently used from Azure Key Vault: - :::image type="content" source="./media/how-to-setup-cmk/portal-akv-rot.png" alt-text="Create a new key version"::: + :::image type="content" source="./media/how-to-setup-cmk/portal-akv-rot.png" alt-text="Screenshot of the New Version option in the Versions page of the Azure portal."::: -- Swap the key currently used with a totally different one by updating the key URI on your account. From the Azure portal, go to your Azure Cosmos account and select **Data Encryption** from the left menu: +- Swap the key currently used with a different one by updating the key URI on your account. From the Azure portal, go to your Azure Cosmos account and select **Data Encryption** from the left menu: - :::image type="content" source="./media/how-to-setup-cmk/portal-data-encryption.png" alt-text="The Data Encryption menu entry"::: + :::image type="content" source="./media/how-to-setup-cmk/portal-data-encryption.png" alt-text="Screenshot of the Data Encryption menu option in the Azure portal."::: Then, replace the **Key URI** with the new key you want to use and select **Save**: - :::image type="content" source="./media/how-to-setup-cmk/portal-key-swap.png" alt-text="Update the key URI"::: + :::image type="content" source="./media/how-to-setup-cmk/portal-key-swap.png" alt-text="Screenshot of the Save option in the Key page of the Azure portal."::: Here's how to do achieve the same result in PowerShell: @@ -411,17 +411,17 @@ The previous key or key version can be disabled after the [Azure Key Vault audit ## Error handling -When using customer-managed keys in Azure Cosmos DB, if there are any errors, Azure Cosmos DB returns the error details along with a HTTP sub-status code in the response. You can use this sub-status code to debug the root cause of the issue. See the [HTTP Status Codes for Azure Cosmos DB](/rest/api/cosmos-db/http-status-codes-for-cosmosdb) article to get the list of supported HTTP sub-status codes. +If there are any errors with customer-managed keys in Azure Cosmos DB, Azure Cosmos DB returns the error details along with an HTTP substatus code in the response. You can use the HTTP substatus code to debug the root cause of the issue. See the [HTTP Status Codes for Azure Cosmos DB](/rest/api/cosmos-db/http-status-codes-for-cosmosdb) article to get the list of supported HTTP substatus codes. ## Frequently asked questions -### Is there an additional charge to enable customer-managed keys? +### Are there more charges to enable customer-managed keys? No, there's no charge to enable this feature. -### How do customer-managed keys impact capacity planning? +### How do customer-managed keys influence capacity planning? -When using customer-managed keys, [Request Units](./request-units.md) consumed by your database operations see an increase to reflect the additional processing required to perform encryption and decryption of your data. This may lead to slightly higher utilization of your provisioned capacity. Use the table below for guidance: +[Request Units](./request-units.md) consumed by your database operations see an increase to reflect the extra processing required to perform encryption and decryption of your data when using customer-managed keys. The extra RU consumption may lead to slightly higher utilization of your provisioned capacity. Use the table below for guidance: | Operation type | Request Unit increase | |---|---| @@ -445,9 +445,9 @@ All the data stored in your Azure Cosmos account is encrypted with the customer- This feature is currently available only for new accounts. -### Is it possible to use customer-managed keys in conjunction with the Azure Cosmos DB [analytical store](analytical-store-introduction.md)? +### Is it possible to use customer-managed keys with the Azure Cosmos DB [analytical store](analytical-store-introduction.md)? -Yes, Azure Synapse Link only supports configuring customer-managed keys using your Azure Cosmos DB account's managed identity. You must [use your Azure Cosmos DB account's managed identity](#using-managed-identity) in your Azure Key Vault access policy before [enabling Azure Synapse Link](configure-synapse-link.md#enable-synapse-link) on your account. +Yes, Azure Synapse Link only supports configuring customer-managed keys using your Azure Cosmos DB account's managed identity. You must [use your Azure Cosmos DB account's managed identity](#using-managed-identity) in your Azure Key Vault access policy before [enabling Azure Synapse Link](configure-synapse-link.md#enable-synapse-link) on your account. For a how-to guide on how to enable managed identity and use it in an access policy, see [access Azure Key Vault from Azure Cosmos DB using a managed identity](access-key-vault-managed-identity.md). ### Is there a plan to support finer granularity than account-level keys? @@ -466,15 +466,15 @@ You can also programmatically fetch the details of your Azure Cosmos account and Azure Cosmos DB takes [regular and automatic backups](./online-backup-and-restore.md) of the data stored in your account. This operation backs up the encrypted data. The following conditions are necessary to successfully restore a periodic backup: -- The encryption key that you used at the time of the backup is required and must be available in Azure Key Vault. This means that no revocation was made and the version of the key that was used at the time of the backup is still enabled. -- If you [used a system-assigned managed identity in the Azure Key Vault access policy](#to-use-a-system-assigned-managed-identity) of the source account, you must temporarily grant access to the Azure Cosmos DB first-party identity in that access policy as described [here](#add-access-policy) before restoring your data. This is because a system-assigned managed identity is specific to an account and cannot be re-used in the target account. Once the data is fully restored to the target account, you can set your desired identity configuration and remove the first-party identity from the Key Vault access policy. +- The encryption key that you used at the time of the backup is required and must be available in Azure Key Vault. This condition requires that no revocation was made and the version of the key that was used at the time of the backup is still enabled. +- If you [used a system-assigned managed identity in the access policy](#to-use-a-system-assigned-managed-identity), temporarily [grant access to the Azure Cosmos DB first-party identity](#add-access-policy) before restoring your data. This requirement exists because a system-assigned managed identity is specific to an account and can't be reused in the target account. Once the data is fully restored to the target account, you can set your desired identity configuration and remove the first-party identity from the Key Vault access policy. ### How do customer-managed keys affect continuous backups? -Azure Cosmos DB gives you the option to configure [continuous backups](./continuous-backup-restore-introduction.md) on your account. With continuous backups, you can restore your data to any point in time within the past 30 days. To use continuous backups on an account where customer-managed keys are enabled, you must [use a user-assigned managed identity](#to-use-a-user-assigned-managed-identity) in the Key Vault access policy; the Azure Cosmos DB first-party identity or a system-assigned managed identity aren't currently supported on accounts using continuous backups. +Azure Cosmos DB gives you the option to configure [continuous backups](./continuous-backup-restore-introduction.md) on your account. With continuous backups, you can restore your data to any point in time within the past 30 days. To use continuous backups on an account where customer-managed keys are enabled, you must [use a user-assigned managed identity](#to-use-a-user-assigned-managed-identity) in the Key Vault access policy. Azure Cosmos DB first-party identities or system-assigned managed identities aren't currently supported on accounts using continuous backups. The following conditions are necessary to successfully perform a point-in-time restore: -- The encryption key that you used at the time of the backup is required and must be available in Azure Key Vault. This means that no revocation was made and the version of the key that was used at the time of the backup is still enabled. +- The encryption key that you used at the time of the backup is required and must be available in Azure Key Vault. This requirement means that no revocation was made and the version of the key that was used at the time of the backup is still enabled. - You must ensure that the user-assigned managed identity originally used on the source account is still declared in the Key Vault access policy. > [!IMPORTANT] diff --git a/articles/cosmos-db/how-to-setup-rbac.md b/articles/cosmos-db/how-to-setup-rbac.md index 54a0b9a664ef2..8436762135c14 100644 --- a/articles/cosmos-db/how-to-setup-rbac.md +++ b/articles/cosmos-db/how-to-setup-rbac.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 02/16/2022 ms.author: thweiss -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Configure role-based access control with Azure Active Directory for your Azure Cosmos DB account @@ -343,14 +343,14 @@ See [this page](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/sql-res ## Initialize the SDK with Azure AD -To use the Azure Cosmos DB RBAC in your application, you have to update the way you initialize the Azure Cosmos DB SDK. Instead of passing your account's primary key, you have to pass an instance of a `TokenCredential` class. This instance provides the Azure Cosmos DB SDK with the context required to fetch an Azure AD (AAD) token on behalf of the identity you wish to use. +To use the Azure Cosmos DB RBAC in your application, you have to update the way you initialize the Azure Cosmos DB SDK. Instead of passing your account's primary key, you have to pass an instance of a `TokenCredential` class. This instance provides the Azure Cosmos DB SDK with the context required to fetch an Azure AD token on behalf of the identity you wish to use. The way you create a `TokenCredential` instance is beyond the scope of this article. There are many ways to create such an instance depending on the type of Azure AD identity you want to use (user principal, service principal, group etc.). Most importantly, your `TokenCredential` instance must resolve to the identity (principal ID) that you've assigned your roles to. You can find examples of creating a `TokenCredential` class: - [In .NET](/dotnet/api/overview/azure/identity-readme#credential-classes) - [In Java](/java/api/overview/azure/identity-readme#credential-classes) - [In JavaScript](/javascript/api/overview/azure/identity-readme#credential-classes) -- [In Python](/python/api/overview/azure/identity-readme#credential-classes) +- [In Python](/python/api/overview/azure/identity-readme?view=azure-python&preserve-view=true#credential-classes) The examples below use a service principal with a `ClientSecretCredential` instance. diff --git a/articles/cosmos-db/import-data.md b/articles/cosmos-db/import-data.md index a4460345a13a2..57f42f992195c 100644 --- a/articles/cosmos-db/import-data.md +++ b/articles/cosmos-db/import-data.md @@ -1,14 +1,15 @@ --- title: 'Tutorial: Database migration tool for Azure Cosmos DB' description: 'Tutorial: Learn how to use the open-source Azure Cosmos DB data migration tools to import data to Azure Cosmos DB from various sources including MongoDB, SQL Server, Table storage, Amazon DynamoDB, CSV, and JSON files. CSV to JSON conversion.' -author: anfeldma-ms +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: tutorial ms.date: 08/26/2021 -ms.author: dech - --- + # Tutorial: Use Data migration tool to migrate your data to Azure Cosmos DB [!INCLUDE[appliesto-sql-api](includes/appliesto-sql-api.md)] @@ -18,7 +19,7 @@ This tutorial provides instructions on using the Azure Cosmos DB Data Migration > The Azure Cosmos DB Data Migration tool is an open source tool designed for small migrations. For larger migrations, view our [guide for ingesting data](cosmosdb-migrationchoices.md). * **[SQL API](./introduction.md)** - You can use any of the source options provided in the Data Migration tool to import data at a small scale. [Learn about migration options for importing data at a large scale](cosmosdb-migrationchoices.md). -* **[Table API](table/introduction.md)** - You can use the Data Migration tool or [AzCopy](table/table-import.md#migrate-data-by-using-azcopy) to import data. For more information, see [Import data for use with the Azure Cosmos DB Table API](table/table-import.md). +* **[Table API](table/introduction.md)** - You can use the Data Migration tool to import data. For more information, see [Import data for use with the Azure Cosmos DB Table API](table/table-import.md). * **[Azure Cosmos DB's API for MongoDB](mongodb/mongodb-introduction.md)** - The Data Migration tool doesn't support Azure Cosmos DB's API for MongoDB either as a source or as a target. If you want to migrate the data in or out of collections in Azure Cosmos DB, refer to [How to migrate MongoDB data to a Cosmos database with Azure Cosmos DB's API for MongoDB](../dms/tutorial-mongodb-cosmos-db.md?toc=%2fazure%2fcosmos-db%2ftoc.json%253ftoc%253d%2fazure%2fcosmos-db%2ftoc.json) for instructions. You can still use the Data Migration tool to export data from MongoDB to Azure Cosmos DB SQL API collections for use with the SQL API. * **[Cassandra API](graph-introduction.md)** - The Data Migration tool isn't a supported import tool for Cassandra API accounts. [Learn about migration options for importing data into Cassandra API](cosmosdb-migrationchoices.md#azure-cosmos-db-cassandra-api) * **[Gremlin API](graph-introduction.md)** - The Data Migration tool isn't a supported import tool for Gremlin API accounts at this time. [Learn about migration options for importing data into Gremlin API](cosmosdb-migrationchoices.md#other-apis) @@ -70,7 +71,7 @@ While the import tool includes a graphical user interface (dtui.exe), it can als ### Build from source - The migration tool source code is available on GitHub in [this repository](https://github.com/azure/azure-documentdb-datamigrationtool). You can download and compile the solution locally then run either: + The migration tool source code is available on GitHub in [this repository](https://github.com/Azure/azure-documentdb-datamigrationtool/tree/archive). You can download and compile the solution locally then run either: * **Dtui.exe**: Graphical interface version of the tool * **Dt.exe**: Command-line version of the tool diff --git a/articles/cosmos-db/includes/capacity-planner-modes.md b/articles/cosmos-db/includes/capacity-planner-modes.md index 0afb9c27c1c0d..3bdfe715752f7 100644 --- a/articles/cosmos-db/includes/capacity-planner-modes.md +++ b/articles/cosmos-db/includes/capacity-planner-modes.md @@ -1,9 +1,10 @@ --- - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 04/21/2021 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown --- ## Capacity planner modes diff --git a/articles/cosmos-db/includes/cosmos-db-create-azure-service-account.md b/articles/cosmos-db/includes/cosmos-db-create-azure-service-account.md index e73fb1da3aee9..b39c53e89493e 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-azure-service-account.md +++ b/articles/cosmos-db/includes/cosmos-db-create-azure-service-account.md @@ -2,11 +2,12 @@ title: "include file" description: "include file" services: cosmos-db -author: WilliamDAssafMSFT +author: seesharprun ms.service: cosmos-db ms.topic: "include" ms.date: 07/23/2020 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: "include file" --- You can work with tables using the Azure Table storage or the Azure Cosmos DB. To learn more about the differences between table offerings in these two services, see the [Table offerings](../table/introduction.md#table-offerings) article. You'll need to create an account for the service you're going to use. The following sections show how to create both Azure Table storage and the Azure Cosmos DB account, however you can just use one of them. diff --git a/articles/cosmos-db/includes/cosmos-db-create-collection.md b/articles/cosmos-db/includes/cosmos-db-create-collection.md index 69affc04d756b..c953a4a35813e 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-collection.md +++ b/articles/cosmos-db/includes/cosmos-db-create-collection.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 04/13/2018 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- diff --git a/articles/cosmos-db/includes/cosmos-db-create-dbaccount-cassandra.md b/articles/cosmos-db/includes/cosmos-db-create-dbaccount-cassandra.md index 6988d01ba401b..d01d17120d739 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-dbaccount-cassandra.md +++ b/articles/cosmos-db/includes/cosmos-db-create-dbaccount-cassandra.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 07/02/2021 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- diff --git a/articles/cosmos-db/includes/cosmos-db-create-dbaccount-graph.md b/articles/cosmos-db/includes/cosmos-db-create-dbaccount-graph.md index c83237fdb7e0b..778febe53e6f0 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-dbaccount-graph.md +++ b/articles/cosmos-db/includes/cosmos-db-create-dbaccount-graph.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 07/02/2021 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- diff --git a/articles/cosmos-db/includes/cosmos-db-create-dbaccount-table.md b/articles/cosmos-db/includes/cosmos-db-create-dbaccount-table.md index ad28306624220..4ef8115121505 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-dbaccount-table.md +++ b/articles/cosmos-db/includes/cosmos-db-create-dbaccount-table.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 07/02/2021 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- diff --git a/articles/cosmos-db/includes/cosmos-db-create-dbaccount.md b/articles/cosmos-db/includes/cosmos-db-create-dbaccount.md index eec8403ad34c1..ddebefde9566e 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-dbaccount.md +++ b/articles/cosmos-db/includes/cosmos-db-create-dbaccount.md @@ -1,6 +1,7 @@ --- - author: SnehaGunda - ms.author: sngun + author: seesharprun + ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: include ms.date: 07/02/2021 diff --git a/articles/cosmos-db/includes/cosmos-db-create-graph.md b/articles/cosmos-db/includes/cosmos-db-create-graph.md index 52062e7c31462..1f7de3d115e2c 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-graph.md +++ b/articles/cosmos-db/includes/cosmos-db-create-graph.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 04/13/2018 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- diff --git a/articles/cosmos-db/includes/cosmos-db-create-sql-api-add-sample-data.md b/articles/cosmos-db/includes/cosmos-db-create-sql-api-add-sample-data.md index 00f56fb79a295..299a43221d335 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-sql-api-add-sample-data.md +++ b/articles/cosmos-db/includes/cosmos-db-create-sql-api-add-sample-data.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 08/07/2019 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- You can now add data to your new container using Data Explorer. diff --git a/articles/cosmos-db/includes/cosmos-db-create-sql-api-query-data.md b/articles/cosmos-db/includes/cosmos-db-create-sql-api-query-data.md index e556088daaef3..c3617d23fd4b4 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-sql-api-query-data.md +++ b/articles/cosmos-db/includes/cosmos-db-create-sql-api-query-data.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 04/05/2019 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- You can use queries in Data Explorer to retrieve and filter your data. diff --git a/articles/cosmos-db/includes/cosmos-db-create-table-add-sample-data.md b/articles/cosmos-db/includes/cosmos-db-create-table-add-sample-data.md index e2b2cae129000..dd004785c27ff 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-table-add-sample-data.md +++ b/articles/cosmos-db/includes/cosmos-db-create-table-add-sample-data.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 04/13/2018 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- You can now add data to your new table using Data Explorer. diff --git a/articles/cosmos-db/includes/cosmos-db-create-table.md b/articles/cosmos-db/includes/cosmos-db-create-table.md index 149abe0d74158..d6da2a8e95377 100644 --- a/articles/cosmos-db/includes/cosmos-db-create-table.md +++ b/articles/cosmos-db/includes/cosmos-db-create-table.md @@ -2,11 +2,12 @@ title: include file description: include file services: cosmos-db - author: SnehaGunda + author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 04/13/2018 - ms.author: sngun + ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: include file --- diff --git a/articles/cosmos-db/includes/cosmos-db-delete-resource-group.md b/articles/cosmos-db/includes/cosmos-db-delete-resource-group.md index 517eb9bb4d127..1d79a322cda57 100644 --- a/articles/cosmos-db/includes/cosmos-db-delete-resource-group.md +++ b/articles/cosmos-db/includes/cosmos-db-delete-resource-group.md @@ -1,9 +1,10 @@ --- -author: WilliamDAssafMSFT +author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 01/22/2020 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: seo-java-september2019 --- When you're done with your app and Azure Cosmos DB account, you can delete the Azure resources you created so you don't incur more charges. To delete the resources: diff --git a/articles/cosmos-db/includes/cosmos-db-emulator-docdb-api.md b/articles/cosmos-db/includes/cosmos-db-emulator-docdb-api.md index 753d596986a5a..46714510551e7 100644 --- a/articles/cosmos-db/includes/cosmos-db-emulator-docdb-api.md +++ b/articles/cosmos-db/includes/cosmos-db-emulator-docdb-api.md @@ -3,7 +3,7 @@ title: "include file" description: "include file" services: cosmos-db documentationcenter: '' -author: WilliamDAssafMSFT +author: seesharprun manager: kfile editor: '' tags: '' @@ -14,7 +14,8 @@ ms.topic: "include" ms.tgt_pltfrm: na ms.workload: '' ms.date: 04/13/2018 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: "include file" --- diff --git a/articles/cosmos-db/includes/cosmos-db-emulator-mongodb.md b/articles/cosmos-db/includes/cosmos-db-emulator-mongodb.md index beb8b1c8e3c44..758f4897bda61 100644 --- a/articles/cosmos-db/includes/cosmos-db-emulator-mongodb.md +++ b/articles/cosmos-db/includes/cosmos-db-emulator-mongodb.md @@ -1,9 +1,10 @@ --- -author: WilliamDAssafMSFT +author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 11/09/2018 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown --- Alternatively, you can [Try Azure Cosmos DB for free](https://azure.microsoft.com/try/cosmosdb/) without an Azure subscription, free of charge and commitments. Or you can use the [Azure Cosmos DB Emulator](../local-emulator.md) for this tutorial with a connection string of: diff --git a/articles/cosmos-db/includes/cosmos-db-keys.md b/articles/cosmos-db/includes/cosmos-db-keys.md index e4af110406e78..f8d506ac80d33 100644 --- a/articles/cosmos-db/includes/cosmos-db-keys.md +++ b/articles/cosmos-db/includes/cosmos-db-keys.md @@ -1,9 +1,10 @@ --- -author: WilliamDAssafMSFT +author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 11/09/2018 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown --- Go to the Azure Cosmos DB account page, and select **Keys**. Copy the values to use in the web application you create next. diff --git a/articles/cosmos-db/includes/cosmos-db-sdk-faq.md b/articles/cosmos-db/includes/cosmos-db-sdk-faq.md index a9e8389145ab9..503981352d052 100644 --- a/articles/cosmos-db/includes/cosmos-db-sdk-faq.md +++ b/articles/cosmos-db/includes/cosmos-db-sdk-faq.md @@ -1,9 +1,10 @@ --- -author: WilliamDAssafMSFT +author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 8/12/2020 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown --- **How will I be notified of the retiring SDK?** @@ -26,6 +27,7 @@ New features and updates will be added only to the latest minor version of the l We recommend that you upgrade to the latest SDK as early as possible. After an SDK is tagged for retirement, you'll have 12 months to update your application. If you're not able to update by the retirement date, requests sent from the retired versions of the SDK will continue to be served by Azure Cosmos DB, so your running applications will continue to function. But Azure Cosmos DB will no longer make bug fixes, add new features, or provide support to the retired SDK versions. If you have a support plan and require technical support, [contact us](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/overview) by filing a support ticket. - +**How can I request features be added to an SDK or connector?** +New features are not always added to every SDK or connector immediately. If there is a feature not supported that you would like added, please add feedback to our [community forum](https://feedback.azure.com/d365community/forum/3002b3be-0d25-ec11-b6e6-000d3a4f0858). diff --git a/articles/cosmos-db/includes/cosmos-db-tutorial-global-distribution-portal.md b/articles/cosmos-db/includes/cosmos-db-tutorial-global-distribution-portal.md index 61d41ef5fb555..b863f04bcd80d 100644 --- a/articles/cosmos-db/includes/cosmos-db-tutorial-global-distribution-portal.md +++ b/articles/cosmos-db/includes/cosmos-db-tutorial-global-distribution-portal.md @@ -2,8 +2,9 @@ title: Azure Cosmos DB global distribution description: Learn how to replicate data globally with Azure Cosmos DB in the Azure portal services: cosmos-db - author: SnehaGunda - ms.author: sngun + author: seesharprun + ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: include ms.date: 12/26/2018 diff --git a/articles/cosmos-db/includes/cosmos-db-tutorial-review-slas.md b/articles/cosmos-db/includes/cosmos-db-tutorial-review-slas.md index 5935de43ea827..68ebd9549098f 100644 --- a/articles/cosmos-db/includes/cosmos-db-tutorial-review-slas.md +++ b/articles/cosmos-db/includes/cosmos-db-tutorial-review-slas.md @@ -1,9 +1,10 @@ --- -author: WilliamDAssafMSFT +author: seesharprun ms.service: cosmos-db ms.topic: include ms.date: 03/22/2019 -ms.author: wiassaf +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: seo-java-september2019 --- The Azure portal monitors your Cosmos DB account throughput, storage, availability, latency, and consistency. Charts for metrics associated with an [Azure Cosmos DB Service Level Agreement (SLA)](https://azure.microsoft.com/support/legal/sla/cosmos-db/) show the SLA value compared to actual performance. This suite of metrics makes monitoring your SLAs transparent. diff --git a/articles/cosmos-db/index-overview.md b/articles/cosmos-db/index-overview.md index 20556b153ee99..1c86c61b73a36 100644 --- a/articles/cosmos-db/index-overview.md +++ b/articles/cosmos-db/index-overview.md @@ -1,12 +1,13 @@ --- title: Indexing in Azure Cosmos DB description: Understand how indexing works in Azure Cosmos DB, different types of indexes such as Range, Spatial, composite indexes supported. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/26/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Indexing in Azure Cosmos DB - Overview diff --git a/articles/cosmos-db/index-policy.md b/articles/cosmos-db/index-policy.md index d1f878e77f179..1ad038b8446ae 100644 --- a/articles/cosmos-db/index-policy.md +++ b/articles/cosmos-db/index-policy.md @@ -1,12 +1,13 @@ --- title: Azure Cosmos DB indexing policies description: Learn how to configure and change the default indexing policy for automatic indexing and greater performance in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/07/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Indexing policies in Azure Cosmos DB diff --git a/articles/cosmos-db/index.yml b/articles/cosmos-db/index.yml index ab080477b2397..bd647998d5aa5 100644 --- a/articles/cosmos-db/index.yml +++ b/articles/cosmos-db/index.yml @@ -10,8 +10,9 @@ metadata: ms.service: cosmos-db #Required; service per approved list. service slug assigned to your service by ACOM. ms.topic: landing-page # Required ms.collection: collection - author: SnehaGunda #Required; your GitHub user alias, with correct capitalization. - ms.author: sngun #Required; microsoft alias of author; optional team alias. + author: seesharprun #Required; your GitHub user alias, with correct capitalization. + ms.author: sidandrews + ms.reviewer: mjbrown #Required; microsoft alias of author; optional team alias. ms.date: 09/10/2019 #Required; mm/dd/yyyy format. # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new diff --git a/articles/cosmos-db/integrated-cache-faq.md b/articles/cosmos-db/integrated-cache-faq.md index 98ddcff80d641..b8e5b9c603ecc 100644 --- a/articles/cosmos-db/integrated-cache-faq.md +++ b/articles/cosmos-db/integrated-cache-faq.md @@ -1,12 +1,13 @@ --- title: Azure Cosmos DB integrated cache frequently asked questions description: Frequently asked questions about the Azure Cosmos DB integrated cache. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 09/20/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Azure Cosmos DB integrated cache frequently asked questions diff --git a/articles/cosmos-db/integrated-cache.md b/articles/cosmos-db/integrated-cache.md index f15a42a031a3d..8f766e2b0e54a 100644 --- a/articles/cosmos-db/integrated-cache.md +++ b/articles/cosmos-db/integrated-cache.md @@ -1,12 +1,13 @@ --- title: Azure Cosmos DB integrated cache description: The Azure Cosmos DB integrated cache is an in-memory cache that helps you ensure manageable costs and low latency as your request volume grows. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 09/28/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Azure Cosmos DB integrated cache - Overview (Preview) diff --git a/articles/cosmos-db/introduction.md b/articles/cosmos-db/introduction.md index 872d3d00b0f16..194945d9f54be 100644 --- a/articles/cosmos-db/introduction.md +++ b/articles/cosmos-db/introduction.md @@ -1,12 +1,12 @@ --- title: Introduction to Azure Cosmos DB description: Learn about Azure Cosmos DB. This globally distributed multi-model database is built for low latency, elastic scalability, high availability, and offers native support for NoSQL data. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: overview ms.date: 08/26/2021 -ms.reviewer: wiassaf ms.custom: cosmos-db-video adobe-target: true --- diff --git a/articles/cosmos-db/large-partition-keys.md b/articles/cosmos-db/large-partition-keys.md index a916dbac07c79..ef85d4f2010a0 100644 --- a/articles/cosmos-db/large-partition-keys.md +++ b/articles/cosmos-db/large-partition-keys.md @@ -1,12 +1,13 @@ --- title: Create Azure Cosmos containers with large partition key description: Learn how to create a container in Azure Cosmos DB with large partition key using Azure portal and different SDKs. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 12/8/2019 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-csharp --- diff --git a/articles/cosmos-db/latest-restore-timestamp-continuous-backup.md b/articles/cosmos-db/latest-restore-timestamp-continuous-backup.md index 5f84f2b188a58..3af6a8fecdfb3 100644 --- a/articles/cosmos-db/latest-restore-timestamp-continuous-backup.md +++ b/articles/cosmos-db/latest-restore-timestamp-continuous-backup.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.date: 04/08/2022 ms.topic: how-to -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Latest restorable timestamp for Azure Cosmos DB accounts with continuous backup mode @@ -25,23 +25,23 @@ You can use latest restorable timestamp in the following use cases: * You can get the latest restorable timestamp for a container, database, or an account and use it to trigger the restore. This is the latest timestamp up to which all the data of the specified resource or all its underlying resources has been successfully backed up. -* You can use this API to identify that your data has been successfully backed up before deleting the account. If the timestamp returned by this API is less than the last write timestamp, then it means that there is some data that has not been backed up yet. In such case, you must call this API until the timestamp becomes equal to or greater than the last write timestamp. If an account exists in multiple locations, you must get the latest restorable timestamp in all the locations to make sure that data has been backed up in all regions before deleting the account. +* You can use this API to identify that your data has been successfully backed up before deleting the account. If the timestamp returned by this API is less than the last write timestamp, then it means that there's some data that hasn't been backed up yet. In such case, you must call this API until the timestamp becomes equal to or greater than the last write timestamp. If an account exists in multiple locations, you must get the latest restorable timestamp in all the locations to make sure that data has been backed up in all regions before deleting the account. * You can use this API to monitor that your data is being backed up on time. This timestamp is generally within a few hundred seconds of the current timestamp, although sometimes it can differ by more. ## Semantics -The latest restorable timestamp for a container is the minimum timestamp upto which all its partitions has taken backup successfully in the given location. This Api calculates the latest restorable timestamp by retrieving the latest backup timestamp for each partition of the given container in given location and returns the minimum of all these timestamps. If the data for all its partitions is backed up and there was no new data written to those partitions, then it will return the maximum of current timestamp and the last data backup timestamp. +The latest restorable timestamp for a container is the minimum timestamp upto, which all its partitions have taken backup successfully in the given location. This Api calculates the latest restorable timestamp by retrieving the latest backup timestamp for each partition of the given container in given location and returns the minimum of all these timestamps. If the data for all its partitions is backed up and there was no new data written to those partitions, then it will return the maximum of current timestamp and the last data backup timestamp. -If a partition has not taken any backup yet but it has some data to be backed up, then it will return the minimum Unix (epoch) timestamp that is, Jan 1, 1970, midnight UTC (Coordinated Universal Time). In such cases, user must retry until it gives a timestamp greater than epoch timestamp. +If a partition hasn't taken any backup yet but it has some data to be backed up, then it will return the minimum Unix (epoch) timestamp that is, January 1, 1970, midnight UTC (Coordinated Universal Time). In such cases, user must retry until it gives a timestamp greater than epoch timestamp. ## Latest restorable timestamp calculation -The following example describes the expected outcome of latest restorable timestamp Api in different scenarios. In each scenario, we will discuss about the current log backup state of partition, pending data to be backed up and how it affects the overall latest restorable timestamp calculation for a container. +The following example describes the expected outcome of latest restorable timestamp Api in different scenarios. In each scenario, we'll discuss about the current log backup state of partition, pending data to be backed up and how it affects the overall latest restorable timestamp calculation for a container. -Let's say, we have an account which exists in 2 regions (East US and West US). We have a container "cont1" which has 2 partitions (Partition1 and Partition2). If we send a request to get the latest restorable timestamp for this container at timestamp 't3', the overall latest restorable timestamp for this container will be calculated as follows: +Let's say, we have an account, which exists in two regions (East US and West US). We have a container "cont1", which has two partitions (Partition1 and Partition2). If we send a request to get the latest restorable timestamp for this container at timestamp 't3', the overall latest restorable timestamp for this container will be calculated as follows: -##### Case1: Data for all the partitions has not been backed up yet +##### Case1: Data for all the partitions hasn't been backed up yet *East US Region:* @@ -69,7 +69,7 @@ Let's say, we have an account which exists in 2 regions (East US and West US). W * Partition 2: Last backup time = t3, and all its data is backed up. * Latest restorable timestamp = max (current timestamp, t3, t3) -##### Case3: When one or more partitions has not taken any backup yet +##### Case3: When one or more partitions hasn't taken any backup yet *East US Region:* @@ -89,7 +89,7 @@ Yes. This API can be used for account provisioned with continuous backup mode or The log backup data is backed up every 100 seconds. However, in some exceptional cases, backups could be delayed for more than 100 seconds. #### Will restorable timestamp work for deleted accounts? -No. It only applies only to live accounts. You can get the restorable timestamp to trigger the live account restore or monitor that your data is being backed up on time. +No. It applies only to live accounts. You can get the restorable timestamp to trigger the live account restore or monitor that your data is being backed up on time. ## Next steps diff --git a/articles/cosmos-db/local-emulator-release-notes.md b/articles/cosmos-db/local-emulator-release-notes.md index f81423d90c08a..28dd85e7d83ce 100644 --- a/articles/cosmos-db/local-emulator-release-notes.md +++ b/articles/cosmos-db/local-emulator-release-notes.md @@ -22,6 +22,12 @@ This article shows the Azure Cosmos DB Emulator released versions and it details ## Release notes +### 2.14.7 (May 9, 2022) + + - This release updates the Azure Cosmos DB Emulator background services to match the latest online functionality of the Azure Cosmos DB. In addition to this update there are couple issues that were addressed in this release: + * Update Data Explorer to the latest content and fix a broken link for the quick start sample documentation. + * Add option to enable the Mongo API version for the Linux Cosmos DB emulator by setting the environment variable: "AZURE_COSMOS_EMULATOR_ENABLE_MONGODB_ENDPOINT" in the Docker container setting. Valid setting are: "3.2", "3.6", "4.0" and "4.2" + ### 2.14.6 (March 7, 2022) - This release updates the Azure Cosmos DB Emulator background services to match the latest online functionality of the Azure Cosmos DB. In addition to this update there are couple issues that were addressed in this release: diff --git a/articles/cosmos-db/managed-identity-based-authentication.md b/articles/cosmos-db/managed-identity-based-authentication.md index d5b368b0b4201..c32beba403b45 100644 --- a/articles/cosmos-db/managed-identity-based-authentication.md +++ b/articles/cosmos-db/managed-identity-based-authentication.md @@ -1,220 +1,317 @@ --- -title: How to use a system-assigned managed identity to access Azure Cosmos DB data +title: Use system-assigned managed identities to access Azure Cosmos DB data description: Learn how to configure an Azure Active Directory (Azure AD) system-assigned managed identity (managed service identity) to access keys from Azure Cosmos DB. -author: j-patrick +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to -ms.date: 07/02/2021 -ms.author: justipat -ms.reviewer: sngun +ms.date: 06/01/2022 +ms.author: sidandrews +ms.reviewer: justipat ms.custom: devx-track-csharp, devx-track-azurecli, subject-rbac-steps - --- # Use system-assigned managed identities to access Azure Cosmos DB data -[!INCLUDE[appliesto-sql-api](includes/appliesto-sql-api.md)] - -> [!TIP] -> [Data plane role-based access control (RBAC)](how-to-setup-rbac.md) is now available on Azure Cosmos DB, providing a seamless way to authorize your requests with Azure Active Directory. - -In this article, you'll set up a *robust, key rotation agnostic* solution to access Azure Cosmos DB keys by using [managed identities](../active-directory/managed-identities-azure-resources/services-support-managed-identities.md). The example in this article uses Azure Functions, but you can use any service that supports managed identities. - -You'll learn how to create a function app that can access Azure Cosmos DB data without needing to copy any Azure Cosmos DB keys. The function app will wake up every minute and record the current temperature of an aquarium fish tank. To learn how to set up a timer-triggered function app, see the [Create a function in Azure that is triggered by a timer](../azure-functions/functions-create-scheduled-function.md) article. - -To simplify the scenario, a [Time To Live](./time-to-live.md) setting is already configured to clean up older temperature documents. - -> [!IMPORTANT] -> Because this approach fetches your account's primary key through the Azure Cosmos DB control plane, it will not work if [a read-only lock has been applied](../azure-resource-manager/management/lock-resources.md) to your account. In this situation, consider using the Azure Cosmos DB [data plane RBAC](how-to-setup-rbac.md) instead. - -## Assign a system-assigned managed identity to a function app - -In this step, you'll assign a system-assigned managed identity to your function app. +[!INCLUDE [appliesto-sql-api](includes/appliesto-sql-api.md)] -1. In the [Azure portal](https://portal.azure.com/), open the **Azure Function** pane and go to your function app. +In this article, you'll set up a *robust, key rotation agnostic* solution to access Azure Cosmos DB keys by using [managed identities](../active-directory/managed-identities-azure-resources/services-support-managed-identities.md) and [data plane role-based access control](how-to-setup-rbac.md). The example in this article uses Azure Functions, but you can use any service that supports managed identities. -1. Open the **Platform features** > **Identity** tab: +You'll learn how to create a function app that can access Azure Cosmos DB data without needing to copy any Azure Cosmos DB keys. The function app will trigger when an HTTP request is made and then list all of the existing databases. - :::image type="content" source="./media/managed-identity-based-authentication/identity-tab-selection.png" alt-text="Screenshot showing Platform features and Identity options for the function app."::: +## Prerequisites -1. On the **Identity** tab, turn **On** the system identity **Status** and select **Save**. The **Identity** pane should look as follows: +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- An existing Azure Cosmos DB SQL API account. [Create an Azure Cosmos DB SQL API account](sql/create-cosmosdb-resources-portal.md) +- An existing Azure Functions function app. [Create your first function in the Azure portal](../azure-functions/functions-create-function-app-portal.md) + - A system-assigned managed identity for the function app. [Add a system-assigned identity](/app-service/overview-managed-identity.md?tabs=cli#add-a-system-assigned-identity) +- [Azure Functions Core Tools](../azure-functions/functions-run-local.md) +- To perform the steps in this article, install the [Azure CLI](/cli/azure/install-azure-cli) and [sign in to Azure](/cli/azure/authenticate-azure-cli). - :::image type="content" source="./media/managed-identity-based-authentication/identity-tab-system-managed-on.png" alt-text="Screenshot showing system identity Status set to On."::: +## Prerequisite check -## Grant access to your Azure Cosmos account - -In this step, you'll assign a role to the function app's system-assigned managed identity. Azure Cosmos DB has multiple built-in roles that you can assign to the managed identity. For this solution, you'll use the following two roles: - -|Built-in role |Description | -|---------|---------| -|[DocumentDB Account Contributor](../role-based-access-control/built-in-roles.md#documentdb-account-contributor)|Can manage Azure Cosmos DB accounts. Allows retrieval of read/write keys. | -|[Cosmos DB Account Reader Role](../role-based-access-control/built-in-roles.md#cosmos-db-account-reader-role)|Can read Azure Cosmos DB account data. Allows retrieval of read keys. | - -> [!TIP] -> When you assign roles, assign only the needed access. If your service requires only reading data, then assign the **Cosmos DB Account Reader** role to the managed identity. For more information about the importance of least privilege access, see the [Lower exposure of privileged accounts](../security/fundamentals/identity-management-best-practices.md#lower-exposure-of-privileged-accounts) article. - -In this scenario, the function app will read the temperature of the aquarium, then write back that data to a container in Azure Cosmos DB. Because the function app must write the data, you'll need to assign the **DocumentDB Account Contributor** role. +1. In a terminal or command window, store the names of your Azure Functions function app, Azure Cosmos DB account and resource group as shell variables named ``functionName``, ``cosmosName``, and ``resourceGroupName``. -### Assign the role using Azure portal + ```azurecli-interactive + # Variable for function app name + functionName="msdocs-function-app" + + # Variable for Cosmos DB account name + cosmosName="msdocs-cosmos-app" -1. Sign in to the Azure portal and go to your Azure Cosmos DB account. + # Variable for resource group name + resourceGroupName="msdocs-cosmos-functions-dotnet-identity" + ``` -1. Select **Access control (IAM)**. + > [!NOTE] + > These variables will be re-used in later steps. This example assumes your Azure Cosmos DB account name is ``msdocs-cosmos-app``, your function app name is ``msdocs-function-app`` and your resource group name is ``msdocs-cosmos-functions-dotnet-identity``. -1. Select **Add** > **Add role assignment**. +1. View the function app's properties using the [``az functionapp show``](/cli/azure/functionapp&preserve-view=true#az-functionapp-show) command. - :::image type="content" source="../../includes/role-based-access-control/media/add-role-assignment-menu-generic.png" alt-text="Screenshot that shows Access control (IAM) page with Add role assignment menu open."::: + ```azurecli-interactive + az functionapp show \ + --resource-group $resourceGroupName \ + --name $functionName + ``` -1. On the **Role** tab, select **DocumentDB Account Contributor**. +1. View the properties of the system-assigned managed identity for your function app using [``az webapp identity show``](/cli/azure/webapp/identity#az-webapp-identity-show). -1. On the **Members** tab, select **Managed identity**, and then select **Select members**. + ```azurecli-interactive + az webapp identity show \ + --resource-group $resourceGroupName \ + --name $functionName + ``` -1. Select your Azure subscription. +1. View the Cosmos DB account's properties using [``az cosmosdb show``](/cli/azure/cosmosdb#az-cosmosdb-show). -1. Under **System-assigned managed identity**, select **Function App**, and then select **FishTankTemperatureService**. + ```azurecli-interactive + az cosmosdb show \ + --resource-group $resourceGroupName \ + --name $cosmosName + ``` -1. On the **Review + assign** tab, select **Review + assign** to assign the role. +## Create Cosmos DB SQL API databases -### Assign the role using Azure CLI +In this step, you'll create two databases. -To assign the role by using Azure CLI, open the Azure Cloud Shell and run the following commands: +1. In a terminal or command window, create a new ``products`` database using [``az cosmosdb sql database create``](/cli/azure/cosmosdb/sql/database#az-cosmosdb-sql-database-create). -```azurecli-interactive + ```azurecli-interactive + az cosmosdb sql database create \ + --resource-group $resourceGroupName \ + --name products \ + --account-name $cosmosName + ``` -scope=$(az cosmosdb show --name '' --resource-group '' --query id) +1. Create a new ``customers`` database. -principalId=$(az webapp identity show -n '' -g '' --query principalId) + ```azurecli-interactive + az cosmosdb sql database create \ + --resource-group $resourceGroupName \ + --name customers \ + --account-name $cosmosName + ``` -az role assignment create --assignee $principalId --role "DocumentDB Account Contributor" --scope $scope -``` +## Get Cosmos DB SQL API endpoint -## Programmatically access the Azure Cosmos DB keys +In this step, you'll query the document endpoint for the SQL API account. -Now we have a function app that has a system-assigned managed identity with the **DocumentDB Account Contributor** role in the Azure Cosmos DB permissions. The following function app code will get the Azure Cosmos DB keys, create a CosmosClient object, get the temperature of the aquarium, and then save this to Azure Cosmos DB. +1. Use ``az cosmosdb show`` with the **query** parameter set to ``documentEndpoint``. Record the result. You'll use this value in a later step. -This sample uses the [List Keys API](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/database-accounts/list-keys) to access your Azure Cosmos DB account keys. + ```azurecli-interactive + az cosmosdb show \ + --resource-group $resourceGroupName \ + --name $cosmosName \ + --query documentEndpoint -> [!IMPORTANT] -> If you want to [assign the Cosmos DB Account Reader](#grant-access-to-your-azure-cosmos-account) role, you'll need to use the [List Read Only Keys API](/rest/api/cosmos-db-resource-provider/2021-04-01-preview/database-accounts/list-read-only-keys). This will populate just the read-only keys. + cosmosEndpoint=$( + az cosmosdb show \ + --resource-group $resourceGroupName \ + --name $cosmosName \ + --query documentEndpoint \ + --output tsv + ) + + echo $cosmosEndpoint + ``` -The List Keys API returns the `DatabaseAccountListKeysResult` object. This type isn't defined in the C# libraries. The following code shows the implementation of this class: + > [!NOTE] + > This variable will be re-used in a later step. -```csharp -namespace Monitor -{ - public class DatabaseAccountListKeysResult - { - public string primaryMasterKey { get; set; } - public string primaryReadonlyMasterKey { get; set; } - public string secondaryMasterKey { get; set; } - public string secondaryReadonlyMasterKey { get; set; } - } -} -``` - -The example also uses a simple document called "TemperatureRecord," which is defined as follows: +## Grant access to your Azure Cosmos account -```csharp -using System; +In this step, you'll assign a role to the function app's system-assigned managed identity. Azure Cosmos DB has multiple built-in roles that you can assign to the managed identity. For this solution, you'll use the [Cosmos DB Built-in Data Reader](how-to-setup-rbac.md#built-in-role-definitions) role. -namespace Monitor -{ - public class TemperatureRecord +> [!TIP] +> When you assign roles, assign only the needed access. If your service requires only reading data, then assign the **Cosmos DB Built-in Data Reader** role to the managed identity. For more information about the importance of least privilege access, see the [Lower exposure of privileged accounts](../security/fundamentals/identity-management-best-practices.md#lower-exposure-of-privileged-accounts) article. + +1. Use ``az cosmosdb show`` with the **query** parameter set to ``id``. Store the result in a shell variable named ``scope``. + + ```azurecli-interactive + scope=$( + az cosmosdb show \ + --resource-group $resourceGroupName \ + --name $cosmosName \ + --query id \ + --output tsv + ) + + echo $scope + ``` + + > [!NOTE] + > This variable will be re-used in a later step. + +1. Use ``az webapp identity show`` with the **query** parameter set to ``principalId``. Store the result in a shell variable named ``principal``. + + ```azurecli-interactive + principal=$( + az webapp identity show \ + --resource-group $resourceGroupName \ + --name $functionName \ + --query principalId \ + --output tsv + ) + + echo $principal + ``` + +1. Create a new JSON object with the configuration of the new custom role. + + ```json { - public string id { get; set; } = Guid.NewGuid().ToString(); - public DateTime RecordTime { get; set; } - public int Temperature { get; set; } + "RoleName": "Read Cosmos Metadata", + "Type": "CustomRole", + "AssignableScopes": ["/"], + "Permissions": [{ + "DataActions": [ + "Microsoft.DocumentDB/databaseAccounts/readMetadata" + ] + }] } -} -``` + ``` -You'll use the [Microsoft.Azure.Services.AppAuthentication](https://www.nuget.org/packages/Microsoft.Azure.Services.AppAuthentication) library to get the system-assigned managed identity token. To learn other ways to get the token and find out more information about the `Microsoft.Azure.Service.AppAuthentication` library, see the [Service-to-service authentication](/dotnet/api/overview/azure/service-to-service-authentication) article. +1. Use [``az role assignment create``](/cli/azure/cosmosdb/sql/role/assignment#az-cosmosdb-sql-role-assignment-create) to assign the ``Cosmos DB Built-in Data Reader`` role to the system-assigned managed identity. + ```azurecli-interactive + az cosmosdb sql role assignment create \ + --resource-group $resourceGroupName \ + --account-name $cosmosName \ + --role-definition-name "Read Cosmos Metadata" \ + --principal-id $principal \ + --scope $scope + ``` -```csharp -using System; -using System.Net.Http; -using System.Net.Http.Headers; -using System.Threading.Tasks; -using Microsoft.Azure.Cosmos; -using Microsoft.Azure.Services.AppAuthentication; -using Microsoft.Azure.WebJobs; -using Microsoft.Extensions.Logging; +## Programmatically access the Azure Cosmos DB keys -namespace Monitor -{ - public static class FishTankTemperatureService +We now have a function app that has a system-assigned managed identity with the **Cosmos DB Built-in Data Reader** role. The following function app will query the Azure Cosmos DB account for a list of databases. + +1. Create a local function project with the ``--dotnet`` parameter in a folder named ``csmsfunc``. Change your shell's directory + + ```azurecli-interactive + func init csmsfunc --dotnet + + cd csmsfunc + ``` + +1. Create a new function with the **template** parameter set to ``httptrigger`` and the **name** set to ``readdatabases``. + + ```azurecli-interactive + func new --template httptrigger --name readdatabases + ``` + +1. Add the [``Azure.Identity``](https://www.nuget.org/packages/Azure.Identity/) and [``Microsoft.Azure.Cosmos``](https://www.nuget.org/packages/Microsoft.Azure.Cosmos/) NuGet package to the .NET project. Build the project using [``dotnet build``](/dotnet/core/tools/dotnet-build). + + ```azurecli-interactive + dotnet add package Azure.Identity + + dotnet add package Microsoft.Azure.Cosmos + + dotnet build + ``` + +1. Open the function code in an integrated developer environment (IDE). + + > [!TIP] + > If you are using the Azure CLI locally or in the Azure Cloud Shell, you can open Visual Studio Code. + > + > ```azurecli + > code . + > ``` + > + +1. Replace the code in the **readdatabases.cs** file with this sample function implementation. Save the updated file. + + ```csharp + using System; + using System.Collections.Generic; + using System.Threading.Tasks; + using Azure.Identity; + using Microsoft.AspNetCore.Mvc; + using Microsoft.Azure.Cosmos; + using Microsoft.Azure.WebJobs; + using Microsoft.Azure.WebJobs.Extensions.Http; + using Microsoft.AspNetCore.Http; + using Microsoft.Extensions.Logging; + + namespace csmsfunc { - private static string subscriptionId = - ""; - private static string resourceGroupName = - ""; - private static string accountName = - ""; - private static string cosmosDbEndpoint = - ""; - private static string databaseName = - ""; - private static string containerName = - ""; - - // HttpClient is intended to be instantiated once, rather than per-use. - static readonly HttpClient httpClient = new HttpClient(); - - [FunctionName("FishTankTemperatureService")] - public static async Task Run([TimerTrigger("0 * * * * *")]TimerInfo myTimer, ILogger log) + public static class readdatabases { - log.LogInformation($"Starting temperature monitoring: {DateTime.Now}"); - - // AzureServiceTokenProvider will help us to get the Service Managed token. - var azureServiceTokenProvider = new AzureServiceTokenProvider(); - - // Authenticate to the Azure Resource Manager to get the Service Managed token. - string accessToken = await azureServiceTokenProvider.GetAccessTokenAsync("https://management.azure.com/"); - - // Setup the List Keys API to get the Azure Cosmos DB keys. - string endpoint = $"https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listKeys?api-version=2019-12-12"; + [FunctionName("readdatabases")] + public static async Task Run( + [HttpTrigger(AuthorizationLevel.Anonymous, "get")] HttpRequest req, + ILogger log) + { + log.LogTrace("Start function"); + + CosmosClient client = new CosmosClient( + accountEndpoint: Environment.GetEnvironmentVariable("COSMOS_ENDPOINT", EnvironmentVariableTarget.Process), + new DefaultAzureCredential() + ); + + using FeedIterator iterator = client.GetDatabaseQueryIterator(); + + List<(string name, string uri)> databases = new(); + while(iterator.HasMoreResults) + { + foreach(DatabaseProperties database in await iterator.ReadNextAsync()) + { + log.LogTrace($"[Database Found]\t{database.Id}"); + databases.Add((database.Id, database.SelfLink)); + } + } + + return new OkObjectResult(databases); + } + } + } + ``` - // Add the access token to request headers. - httpClient.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", accessToken); +## (Optional) Run the function locally - // Post to the endpoint to get the keys result. - var result = await httpClient.PostAsync(endpoint, new StringContent("")); +In a local environment, the [``DefaultAzureCredential``](/dotnet/api/azure.identity.defaultazurecredential) class will use various local credentials to determine the current identity. While running locally isn't required for the how-to, you can develop locally using your own identity or a service principal. - // Get the result back as a DatabaseAccountListKeysResult. - DatabaseAccountListKeysResult keys = await result.Content.ReadFromJsonAsync(); +1. In the **local.settings.json** file, add a new setting named ``COSMOS_ENDPOINT`` in the **Values** object. The value of the setting should be the document endpoint you recorded earlier in this how-to guide. - log.LogInformation("Starting to create the client"); + ```json + ... + "Values": { + ... + "COSMOS_ENDPOINT": "https://msdocs-cosmos-app.documents.azure.com:443/", + ... + } + ... + ``` - CosmosClient client = new CosmosClient(cosmosDbEndpoint, keys.primaryMasterKey); + > [!NOTE] + > This JSON object has been shortened for brevity. This JSON object also includes a sample value that assumes your account name is ``msdocs-cosmos-app``. - log.LogInformation("Client created"); +1. Run the function app - var database = client.GetDatabase(databaseName); - var container = database.GetContainer(containerName); + ```azurecli + func start + ``` - log.LogInformation("Get the temperature."); +## Deploy to Azure - var tempRecord = new TemperatureRecord() { RecordTime = DateTime.UtcNow, Temperature = GetTemperature() }; +Once published, the ``DefaultAzureCredential`` class will use credentials from the environment or a managed identity. For this guide, the system-assigned managed identity will be used as a credential for the [``CosmosClient``](/dotnet/api/microsoft.azure.cosmos.cosmosclient) constructor. - log.LogInformation("Store temperature"); +1. Set the ``COSMOS_ENDPOINT`` setting on the function app already deployed in Azure. - await container.CreateItemAsync(tempRecord); + ```azurecli-interactive + az functionapp config appsettings set \ + --resource-group $resourceGroupName \ + --name $functionName \ + --settings "COSMOS_ENDPOINT=$cosmosEndpoint" + ``` - log.LogInformation($"Ending temperature monitor: {DateTime.Now}"); - } +1. Deploy your function app to Azure by reusing the ``functionName`` shell variable: - private static int GetTemperature() - { - // Fake the temperature sensor for this demo. - Random r = new Random(DateTime.UtcNow.Second); - return r.Next(0, 120); - } - } -} -``` + ```azurecli-interactive + func azure functionapp publish $functionName + ``` -You are now ready to [deploy your function app](../azure-functions/create-first-function-vs-code-csharp.md). +1. [Test your function in the Azure portal](../azure-functions/functions-create-function-app-portal.md#test-the-function). ## Next steps diff --git a/articles/cosmos-db/media/managed-identity-based-authentication/identity-tab-selection.png b/articles/cosmos-db/media/managed-identity-based-authentication/identity-tab-selection.png deleted file mode 100644 index 02a3ff2125dd0..0000000000000 Binary files a/articles/cosmos-db/media/managed-identity-based-authentication/identity-tab-selection.png and /dev/null differ diff --git a/articles/cosmos-db/media/managed-identity-based-authentication/identity-tab-system-managed-on.png b/articles/cosmos-db/media/managed-identity-based-authentication/identity-tab-system-managed-on.png deleted file mode 100644 index ddfc588ba4144..0000000000000 Binary files a/articles/cosmos-db/media/managed-identity-based-authentication/identity-tab-system-managed-on.png and /dev/null differ diff --git a/articles/cosmos-db/media/try-free/data-explorer.png b/articles/cosmos-db/media/try-free/data-explorer.png new file mode 100644 index 0000000000000..9e783f6131331 Binary files /dev/null and b/articles/cosmos-db/media/try-free/data-explorer.png differ diff --git a/articles/cosmos-db/media/try-free/migrate-data.png b/articles/cosmos-db/media/try-free/migrate-data.png new file mode 100644 index 0000000000000..d0d5a24134380 Binary files /dev/null and b/articles/cosmos-db/media/try-free/migrate-data.png differ diff --git a/articles/cosmos-db/media/try-free/sign-up-sign-in.png b/articles/cosmos-db/media/try-free/sign-up-sign-in.png new file mode 100644 index 0000000000000..b361a9568df56 Binary files /dev/null and b/articles/cosmos-db/media/try-free/sign-up-sign-in.png differ diff --git a/articles/cosmos-db/media/try-free/try-cosmos-db-page.png b/articles/cosmos-db/media/try-free/try-cosmos-db-page.png new file mode 100644 index 0000000000000..e2b469aee9c6f Binary files /dev/null and b/articles/cosmos-db/media/try-free/try-cosmos-db-page.png differ diff --git a/articles/cosmos-db/media/try-free/upgrade-account.png b/articles/cosmos-db/media/try-free/upgrade-account.png new file mode 100644 index 0000000000000..ef056a138f7a5 Binary files /dev/null and b/articles/cosmos-db/media/try-free/upgrade-account.png differ diff --git a/articles/cosmos-db/merge.md b/articles/cosmos-db/merge.md index 00987208470ae..0ba4b7be43895 100644 --- a/articles/cosmos-db/merge.md +++ b/articles/cosmos-db/merge.md @@ -17,43 +17,51 @@ Merging partitions in Azure Cosmos DB (preview) allows you to reduce the number ## Getting started -To get started using merge, enroll in the preview by filing a support ticket in the [Azure portal](https://portal.azure.com). +To get started using merge, enroll in the preview by submitting a request for the **Azure Cosmos DB Partition Merge** feature via the [**Preview Features** page](../azure-resource-manager/management/preview-features.md) in your Azure Subscription overview page. +- Before submitting your request, verify that your Azure Cosmos DB account(s) meet all the [preview eligibility criteria](#preview-eligibility-criteria). +- The Azure Cosmos DB team will review your request and contact you via email to confirm which account(s) in the subscription you want to enroll in the preview. ### Merging physical partitions -When the parameter `IsDryRun` is set to true, Azure Cosmos DB will run a simulation and return the expected result of the merge, but won't run the merge itself. When set to false, the merge will execute against the resource. + +In PowerShell, when the flag `-WhatIf` is passed in, Azure Cosmos DB will run a simulation and return the expected result of the merge, but won't run the merge itself. When the flag isn't passed in, the merge will execute against the resource. When finished, the command will output the current amount of storage in KB per physical partition post-merge. > [!TIP] > Before running a merge, it's recommended to set your provisioned RU/s (either manual RU/s or autoscale max RU/s) as close as possible to your desired steady state RU/s post-merge, to help ensure the system calculates an efficient partition layout. #### [PowerShell](#tab/azure-powershell) ```azurepowershell +// Add the preview extension +Install-Module -Name Az.CosmosDB -AllowPrerelease -Force + // SQL API -Invoke-AzCosmosDbSqlContainerPartitionMerge ` +Invoke-AzCosmosDBSqlContainerMerge ` -ResourceGroupName "" ` -AccountName "" ` -DatabaseName "" ` - -Name "" - -IsDryRun "" + -Name "" ` + -WhatIf // API for MongoDB -Invoke-AzCosmosDBMongoDBCollectionPartitionMerge ` +Invoke-AzCosmosDBMongoDBCollectionMerge ` -ResourceGroupName "" ` -AccountName "" ` -DatabaseName "" ` - -Name "" - -IsDryRun "" + -Name "" ` + -WhatIf ``` #### [Azure CLI](#tab/azure-cli) ```azurecli +// Add the preview extension +az extension add --name cosmosdb-preview + // SQL API az cosmosdb sql container merge \ --resource-group '' \ --account-name '' \ --database-name '' \ --name '' - --is-dry-run '' // API for MongoDB az cosmosdb mongodb collection merge \ @@ -61,28 +69,56 @@ az cosmosdb mongodb collection merge \ --account-name '' \ --database-name '' \ --name '' - --is-dry-run '' ``` --- +### Monitor merge operations +Partition merge is a long-running operation and there's no SLA on how long it takes to complete. The time depends on the amount of data in the container and the number of physical partitions. It's recommended to allow at least 5-6 hours for merge to complete. + +While partition merge is running on your container, it isn't possible to change the throughput or any container settings (TTL, indexing policy, unique keys, etc.). Wait until the merge operation completes before changing your container settings. + +You can track whether merge is still in progress by checking the **Activity Log** and filtering for the events **Merge the physical partitions of a MongoDB collection** or **Merge the physical partitions of a SQL container**. + ## Limitations -### Account resources and configuration +### Preview eligibility criteria +To enroll in the preview, your Cosmos account must meet all the following criteria: +* Your Cosmos account uses SQL API or API for MongoDB with version >=3.6. +* Your Cosmos account is using provisioned throughput (manual or autoscale). Merge doesn't apply to serverless accounts. + * Currently, merge isn't supported for shared throughput databases. You may enroll an account that has both shared throughput databases and containers with dedicated throughput (manual or autoscale). + * However, only the containers with dedicated throughput will be able to be merged. +* Your Cosmos account is a single-write region account (merge isn't currently supported for multi-region write accounts). +* Your Cosmos account doesn't use any of the following features: + * [Point-in-time restore](continuous-backup-restore-introduction.md) + * [Customer-managed keys](how-to-setup-cmk.md) + * [Analytical store](analytical-store-introduction.md) +* Your Cosmos account uses bounded staleness, session, consistent prefix, or eventual consistency (merge isn't currently supported for strong consistency). +* If you're using SQL API, your application must use the Azure Cosmos DB .NET V3 SDK, version 3.27.0 or higher. When merge preview enabled on your account, all requests sent from non .NET SDKs or older .NET SDK versions won't be accepted. + * There are no SDK or driver requirements to use the feature with API for MongoDB. +* Your Cosmos account doesn't use any currently unsupported connectors: + * Azure Data Factory + * Azure Stream Analytics + * Logic Apps + * Azure Functions + * Azure Search +### Account resources and configuration +* Merge is only available for SQL API and API for MongoDB accounts. For API for MongoDB accounts, the MongoDB account version must be 3.6 or greater. * Merge is only available for single-region write accounts. Multi-region write account support isn't available. -* Accounts using merge functionality can't also use these features: +* Accounts using merge functionality can't also use these features (if these features are added to a merge enabled account, resources in the account will no longer be able to be merged): * [Point-in-time restore](continuous-backup-restore-introduction.md) * [Customer-managed keys](how-to-setup-cmk.md) * [Analytical store](analytical-store-introduction.md) * Containers using merge functionality must have their throughput provisioned at the container level. Database-shared throughput support isn't available. -* For API for MongoDB accounts, the MongoDB account version must be 3.6 or greater. +* Merge is only available for accounts using bounded staleness, session, consistent prefix, or eventual consistency. It isn't currently supported for strong consistency. +* After a container has been merged, it isn't possible to read the change feed with start time. Support for this feature is planned for the future. ### SDK requirements (SQL API only) -Accounts with the merge feature enabled are supported only in the latest preview version of the .NET v3 SDK. When the feature is enabled on your account (regardless of whether you run the merge), you must only use the supported SDK using the account. Requests sent from other SDKs or earlier versions won't be accepted. As long as you're using the supported SDK, your application can continue to run while a merge is ongoing. +Accounts with the merge feature enabled are supported only when you use the latest version of the .NET v3 SDK. When the feature is enabled on your account (regardless of whether you run the merge), you must only use the supported SDK using the account. Requests sent from other SDKs or earlier versions won't be accepted. As long as you're using the supported SDK, your application can continue to run while a merge is ongoing. -Find the latest preview version the supported SDK: +Find the latest version of the supported SDK: | SDK | Supported versions | Package manager link | | --- | --- | --- | @@ -107,6 +143,6 @@ Support for these connectors is planned for the future. ## Next steps -* Learn more about [using Azure CLI with Azure Cosmos DB.](/cli/azure/azure-cli-reference-for-cosmos-db.md) +* Learn more about [using Azure CLI with Azure Cosmos DB.](/cli/azure/azure-cli-reference-for-cosmos-db) * Learn more about [using Azure PowerShell with Azure Cosmos DB.](/powershell/module/az.cosmosdb/) * Learn more about [partitioning in Azure Cosmos DB.](partitioning-overview.md) diff --git a/articles/cosmos-db/migrate-continuous-backup.md b/articles/cosmos-db/migrate-continuous-backup.md index 4fbbc8d6bf4ee..255f1d1914e65 100644 --- a/articles/cosmos-db/migrate-continuous-backup.md +++ b/articles/cosmos-db/migrate-continuous-backup.md @@ -1,26 +1,26 @@ --- title: Migrate an Azure Cosmos DB account from periodic to continuous backup mode -description: Azure Cosmos DB currently supports a one-way migration from periodic to continuous mode and it’s irreversible. After migrating from periodic to continuous mode, you can leverage the benefits of continuous mode. +description: Azure Cosmos DB currently supports a one-way migration from periodic to continuous mode and it’s irreversible. After migrating from periodic to continuous mode, you can apply the benefits of continuous mode. author: kanshiG ms.author: govindk ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.date: 04/08/2022 ms.topic: how-to -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Migrate an Azure Cosmos DB account from periodic to continuous backup mode [!INCLUDE[appliesto-all-apis-except-cassandra](includes/appliesto-all-apis-except-cassandra.md)] -Azure Cosmos DB accounts with periodic mode backup policy can be migrated to continuous mode using [Azure portal](#portal), [CLI](#cli), [PowerShell](#powershell), or [Resource Manager templates](#ARM-template). Migration from periodic to continuous mode is a one-way migration and it’s not reversible. After migrating from periodic to continuous mode, you can leverage the benefits of continuous mode. +Azure Cosmos DB accounts with periodic mode backup policy can be migrated to continuous mode using [Azure portal](#portal), [CLI](#cli), [PowerShell](#powershell), or [Resource Manager templates](#ARM-template). Migration from periodic to continuous mode is a one-way migration and it’s not reversible. After migrating from periodic to continuous mode, you can apply the benefits of continuous mode. The following are the key reasons to migrate into continuous mode: * The ability to do self-service restore using Azure portal, CLI, or PowerShell. * The ability to restore at time granularity of the second within the last 30-day window. * The ability to make sure that the backup is consistent across shards or partition key ranges within a period. -* The ability to restore container, database, or the full account when it is deleted or modified. +* The ability to restore container, database, or the full account when it's deleted or modified. * The ability to choose the events on the container, database, or account and decide when to initiate the restore. > [!NOTE] @@ -41,7 +41,7 @@ To perform the migration, you need `Microsoft.DocumentDB/databaseAccounts/write` ## Pricing after migration -After you migrate your account to continuous backup mode, the cost with this mode is different when compared to the periodic backup mode. The continuous mode backup cost is significantly cheaper than periodic mode. To learn more, see the [continuous backup mode pricing](continuous-backup-restore-introduction.md#continuous-backup-pricing) example. +After you migrate your account to continuous backup mode, the cost with this mode is different when compared to the periodic backup mode. The continuous mode backup cost is cheaper than periodic mode. To learn more, see the [continuous backup mode pricing](continuous-backup-restore-introduction.md#continuous-backup-pricing) example. ## Migrate using portal @@ -53,7 +53,7 @@ Use the following steps to migrate your account from periodic backup to continuo :::image type="content" source="./media/migrate-continuous-backup/enable-backup-migration.png" alt-text="Migrate to continuous mode using Azure portal" lightbox="./media/migrate-continuous-backup/enable-backup-migration.png"::: -1. When the migration is in progress, the status shows **Pending.** After the it’s complete, the status changes to **On.** Migration time depends on the size of data in your account. +1. When the migration is in progress, the status shows **Pending.** After it’s complete, the status changes to **On.** Migration time depends on the size of data in your account. :::image type="content" source="./media/migrate-continuous-backup/migration-status.png" alt-text="Check the status of migration from Azure portal" lightbox="./media/migrate-continuous-backup/migration-status.png"::: @@ -84,7 +84,7 @@ Install the [latest version of Azure PowerShell](/powershell/azure/install-az-ps * If you already have Azure CLI installed, use `az upgrade` command to upgrade to the latest version. * Alternatively, user can also use Cloud Shell from Azure portal. -1. Log in to your Azure account and run the following command to migrate your account to continuous mode: +1. Sign in to your Azure account and run the following command to migrate your account to continuous mode: ```azurecli-interactive az login @@ -159,7 +159,7 @@ az group deployment create -g --template-file -g --kind MongoDB -- 8. Create a database for users to connect to in the Azure portal. 9. Create an RBAC user with built-in read role. ```powershell -az cosmosdb mongodb user definition create --account-name --resource-group --body {\"Id\":\"testdb.read\",\"UserName\":\"\",\"Password\":\"\",\"DatabaseName\":\"\",\"CustomData\":\"Some_Random_Info\",\"Mechanisms\":\"SCRAM-SHA-256\",\"Roles\":[{\"Role\":\"read\",\"Db\":\"\"}]} +az cosmosdb mongodb user definition create --account-name --resource-group --body {\"Id\":\".\",\"UserName\":\"\",\"Password\":\"\",\"DatabaseName\":\"\",\"CustomData\":\"Some_Random_Info\",\"Mechanisms\":\"SCRAM-SHA-256\",\"Roles\":[{\"Role\":\"read\",\"Db\":\"\"}]} ``` diff --git a/articles/cosmos-db/mongodb/manage-with-bicep.md b/articles/cosmos-db/mongodb/manage-with-bicep.md index e4c313ebd2893..adc8f848e6e2e 100644 --- a/articles/cosmos-db/mongodb/manage-with-bicep.md +++ b/articles/cosmos-db/mongodb/manage-with-bicep.md @@ -1,12 +1,13 @@ --- title: Create and manage MongoDB API for Azure Cosmos DB with Bicep description: Use Bicep to create and configure MongoDB API Azure Cosmos DB API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: how-to ms.date: 05/23/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB MongoDB API resources using Bicep diff --git a/articles/cosmos-db/mongodb/powershell-samples.md b/articles/cosmos-db/mongodb/powershell-samples.md index 07ec69617a59a..05ad18b7cb219 100644 --- a/articles/cosmos-db/mongodb/powershell-samples.md +++ b/articles/cosmos-db/mongodb/powershell-samples.md @@ -1,12 +1,13 @@ --- title: Azure PowerShell samples for Azure Cosmos DB API for MongoDB description: Get the Azure PowerShell samples to perform common tasks in Azure Cosmos DB API for MongoDB -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample ms.date: 08/26/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure PowerShell samples for Azure Cosmos DB API for MongoDB diff --git a/articles/cosmos-db/mongodb/resource-manager-template-samples.md b/articles/cosmos-db/mongodb/resource-manager-template-samples.md index 4c31ebd532861..efbb989316642 100644 --- a/articles/cosmos-db/mongodb/resource-manager-template-samples.md +++ b/articles/cosmos-db/mongodb/resource-manager-template-samples.md @@ -1,12 +1,13 @@ --- title: Resource Manager templates for Azure Cosmos DB API for MongoDB description: Use Azure Resource Manager templates to create and configure Azure Cosmos DB API for MongoDB. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: how-to ms.date: 05/23/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB MongoDB API resources using Azure Resource Manager templates diff --git a/articles/cosmos-db/mongodb/troubleshoot-query-performance.md b/articles/cosmos-db/mongodb/troubleshoot-query-performance.md index f312a4aff0bb1..b4caec10b5017 100644 --- a/articles/cosmos-db/mongodb/troubleshoot-query-performance.md +++ b/articles/cosmos-db/mongodb/troubleshoot-query-performance.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-mongo ms.date: 08/26/2021 author: gahl-levy ms.author: gahllevy -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Troubleshoot query issues when using the Azure Cosmos DB API for MongoDB diff --git a/articles/cosmos-db/mongodb/tutorial-develop-mongodb-react.md b/articles/cosmos-db/mongodb/tutorial-develop-mongodb-react.md index b856fbb960831..c5675c47d99bc 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-mongodb-react.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-mongodb-react.md @@ -8,7 +8,7 @@ ms.devlang: javascript ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-js --- # Create a MongoDB app with React and Azure Cosmos DB diff --git a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-1.md b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-1.md index 6af1aeb6b5a10..442959752fcb7 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-1.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-1.md @@ -9,7 +9,7 @@ ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa ms.custom: seodec18 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Create an Angular app with Azure Cosmos DB's API for MongoDB [!INCLUDE[appliesto-mongodb-api](../includes/appliesto-mongodb-api.md)] diff --git a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-2.md b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-2.md index 1b0292dc3f7a9..e70db6270613f 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-2.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-2.md @@ -9,7 +9,7 @@ ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa ms.custom: seodec18 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Create an Angular app with Azure Cosmos DB's API for MongoDB - Create a Node.js Express app [!INCLUDE[appliesto-mongodb-api](../includes/appliesto-mongodb-api.md)] diff --git a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-3.md b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-3.md index 4332d790941a7..c91c6922fe004 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-3.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-3.md @@ -9,7 +9,7 @@ ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa ms.custom: seodec18, devx-track-js -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Create an Angular app with Azure Cosmos DB's API for MongoDB - Build the UI with Angular [!INCLUDE[appliesto-mongodb-api](../includes/appliesto-mongodb-api.md)] diff --git a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-4.md b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-4.md index d407c6af6c257..f18d36c28b43e 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-4.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-4.md @@ -9,7 +9,7 @@ ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa ms.custom: seodec18, devx-track-js, devx-track-azurecli -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Create an Angular app with Azure Cosmos DB's API for MongoDB - Create a Cosmos account diff --git a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-5.md b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-5.md index 8cfb42fda51e5..937ca1b55d5d2 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-5.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-5.md @@ -9,7 +9,7 @@ ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa ms.custom: seodec18, devx-track-js -ms.reviewer: sngun +ms.reviewer: mjbrown #Customer intent: As a developer, I want to build a Node.js application, so that I can manage the data stored in Cosmos DB. --- diff --git a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-6.md b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-6.md index 0bab8f73face3..f6053f41a51cc 100644 --- a/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-6.md +++ b/articles/cosmos-db/mongodb/tutorial-develop-nodejs-part-6.md @@ -9,7 +9,7 @@ ms.topic: tutorial ms.date: 08/26/2021 ms.author: jopapa ms.custom: seodec18, devx-track-js -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Create an Angular app with Azure Cosmos DB's API for MongoDB - Add CRUD functions to the app [!INCLUDE[appliesto-mongodb-api](../includes/appliesto-mongodb-api.md)] diff --git a/articles/cosmos-db/mongodb/tutorial-global-distribution-mongodb.md b/articles/cosmos-db/mongodb/tutorial-global-distribution-mongodb.md index 9e52cfec1499a..bf0a2b1bf2c71 100644 --- a/articles/cosmos-db/mongodb/tutorial-global-distribution-mongodb.md +++ b/articles/cosmos-db/mongodb/tutorial-global-distribution-mongodb.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: tutorial ms.date: 08/26/2021 -ms.reviewer: sngun +ms.reviewer: mjbrown ms.devlang: csharp ms.custom: devx-track-csharp diff --git a/articles/cosmos-db/mongodb/tutorial-mongotools-cosmos-db.md b/articles/cosmos-db/mongodb/tutorial-mongotools-cosmos-db.md index 02aa99db79499..ed8401de03df6 100644 --- a/articles/cosmos-db/mongodb/tutorial-mongotools-cosmos-db.md +++ b/articles/cosmos-db/mongodb/tutorial-mongotools-cosmos-db.md @@ -1,13 +1,13 @@ --- title: Migrate MongoDB offline to Azure Cosmos DB API for MongoDB, using MongoDB native tools description: Learn how MongoDB native tools can be used to migrate small datasets from MongoDB instances to Azure Cosmos DB -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: tutorial ms.date: 08/26/2021 -ms.reviewer: sngun --- # Tutorial: Migrate MongoDB to Azure Cosmos DB's API for MongoDB offline using MongoDB native tools diff --git a/articles/cosmos-db/mongodb/tutorial-query-mongodb.md b/articles/cosmos-db/mongodb/tutorial-query-mongodb.md index 1f4df97eafcb9..90d451e20c02b 100644 --- a/articles/cosmos-db/mongodb/tutorial-query-mongodb.md +++ b/articles/cosmos-db/mongodb/tutorial-query-mongodb.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: tutorial ms.date: 12/03/2019 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Query data by using Azure Cosmos DB's API for MongoDB diff --git a/articles/cosmos-db/monitor-cosmos-db.md b/articles/cosmos-db/monitor-cosmos-db.md index c51f54c4e5d25..8efaff0fb3fd1 100644 --- a/articles/cosmos-db/monitor-cosmos-db.md +++ b/articles/cosmos-db/monitor-cosmos-db.md @@ -135,14 +135,14 @@ Azure Cosmos DB stores data in the following tables. ### Sample Kusto queries -Prior to using Log Analytics to issue Kusto queries, you must [enable diagnostic logs for control plane operations](/azure/cosmos-db/audit-control-plane-logs#enable-diagnostic-logs-for-control-plane-operations). When enabling diagnostic logs, you will select between storing your data in a single [AzureDiagnostics table (legacy)](/azure/azure-monitor/essentials/resource-logs#azure-diagnostics-mode) or [resource-specific tables](/azure/azure-monitor/essentials/resource-logs#resource-specific). +Prior to using Log Analytics to issue Kusto queries, you must [enable diagnostic logs for control plane operations](./audit-control-plane-logs.md#enable-diagnostic-logs-for-control-plane-operations). When enabling diagnostic logs, you will select between storing your data in a single [AzureDiagnostics table (legacy)](../azure-monitor/essentials/resource-logs.md#azure-diagnostics-mode) or [resource-specific tables](../azure-monitor/essentials/resource-logs.md#resource-specific). When you select **Logs** from the Azure Cosmos DB menu, Log Analytics is opened with the query scope set to the current Azure Cosmos DB account. Log queries will only include data from that resource. > [!IMPORTANT] > If you want to run a query that includes data from other accounts or data from other Azure services, select **Logs** from the **Azure Monitor** menu. For more information, see [Log query scope and time range in Azure Monitor Log Analytics](../azure-monitor/logs/scope.md). -Here are some queries that you can enter into the **Log search** search bar to help you monitor your Azure Cosmos resources. The exact text of the queries will depend on the [collection mode](/azure/azure-monitor/essentials/resource-logs#select-the-collection-mode) you selected when you enabled diagnostics logs. +Here are some queries that you can enter into the **Log search** search bar to help you monitor your Azure Cosmos resources. The exact text of the queries will depend on the [collection mode](../azure-monitor/essentials/resource-logs.md#select-the-collection-mode) you selected when you enabled diagnostics logs. #### [AzureDiagnostics table (legacy)](#tab/azure-diagnostics) @@ -270,4 +270,4 @@ To learn more, see the [Azure monitoring REST API](../azure-monitor/essentials/r ## Next steps * See [Azure Cosmos DB monitoring data reference](monitor-cosmos-db-reference.md) for a reference of the logs and metrics created by Azure Cosmos DB. -* See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. +* See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/cosmos-db/online-backup-and-restore.md b/articles/cosmos-db/online-backup-and-restore.md index 88e5479d2ed9b..1d844d0efb0fe 100644 --- a/articles/cosmos-db/online-backup-and-restore.md +++ b/articles/cosmos-db/online-backup-and-restore.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 11/15/2021 ms.author: govindk -ms.reviewer: sngun +ms.reviewer: mjbrown --- diff --git a/articles/cosmos-db/optimize-cost-reads-writes.md b/articles/cosmos-db/optimize-cost-reads-writes.md index c38b6c8bb598d..d6f8e406cadaf 100644 --- a/articles/cosmos-db/optimize-cost-reads-writes.md +++ b/articles/cosmos-db/optimize-cost-reads-writes.md @@ -1,8 +1,9 @@ --- title: Optimizing the cost of your requests in Azure Cosmos DB description: This article explains how to optimize costs when issuing requests on Azure Cosmos DB. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 diff --git a/articles/cosmos-db/optimize-cost-regions.md b/articles/cosmos-db/optimize-cost-regions.md index dc45aef1c0291..708b238dd6588 100644 --- a/articles/cosmos-db/optimize-cost-regions.md +++ b/articles/cosmos-db/optimize-cost-regions.md @@ -1,8 +1,9 @@ --- title: Optimize cost for multi-region deployments in Azure Cosmos DB description: This article explains how to manage costs of multi-region deployments in Azure Cosmos DB. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 diff --git a/articles/cosmos-db/optimize-cost-storage.md b/articles/cosmos-db/optimize-cost-storage.md index cdb3980a2e5cb..0121cd7f01615 100644 --- a/articles/cosmos-db/optimize-cost-storage.md +++ b/articles/cosmos-db/optimize-cost-storage.md @@ -1,8 +1,9 @@ --- title: Optimize storage cost in Azure Cosmos DB description: This article explains how to manage storage costs for the data stored in Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 diff --git a/articles/cosmos-db/optimize-cost-throughput.md b/articles/cosmos-db/optimize-cost-throughput.md index 57025e657dfe9..5482f1f782975 100644 --- a/articles/cosmos-db/optimize-cost-throughput.md +++ b/articles/cosmos-db/optimize-cost-throughput.md @@ -1,8 +1,9 @@ --- title: Optimizing throughput cost in Azure Cosmos DB description: This article explains how to optimize throughput costs for the data stored in Azure Cosmos DB. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 diff --git a/articles/cosmos-db/optimize-dev-test.md b/articles/cosmos-db/optimize-dev-test.md index b182155768e7b..89330298164e2 100644 --- a/articles/cosmos-db/optimize-dev-test.md +++ b/articles/cosmos-db/optimize-dev-test.md @@ -1,8 +1,9 @@ --- title: Optimizing for development and testing in Azure Cosmos DB description: This article explains how Azure Cosmos DB offers multiple options for development and testing of the service for free. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 diff --git a/articles/cosmos-db/partitioning-overview.md b/articles/cosmos-db/partitioning-overview.md index 469b4c5abc064..0f5024769b726 100644 --- a/articles/cosmos-db/partitioning-overview.md +++ b/articles/cosmos-db/partitioning-overview.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.topic: conceptual ms.date: 03/24/2022 ms.custom: cosmos-db-video -ms.reviewer: wiassaf +ms.reviewer: mjbrown --- # Partitioning and horizontal scaling in Azure Cosmos DB diff --git a/articles/cosmos-db/partners-migration-cosmosdb.md b/articles/cosmos-db/partners-migration-cosmosdb.md index cbbcf94d12e64..1069bf31bb412 100644 --- a/articles/cosmos-db/partners-migration-cosmosdb.md +++ b/articles/cosmos-db/partners-migration-cosmosdb.md @@ -1,8 +1,9 @@ --- title: Migration and application development partners for Azure Cosmos DB description: Lists Microsoft partners with migration solutions that support Azure Cosmos DB. -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 diff --git a/articles/cosmos-db/plan-manage-costs.md b/articles/cosmos-db/plan-manage-costs.md index 1b5f036e5ada0..1e24cd69ddee7 100644 --- a/articles/cosmos-db/plan-manage-costs.md +++ b/articles/cosmos-db/plan-manage-costs.md @@ -1,8 +1,9 @@ --- title: Plan and manage costs for Azure Cosmos DB description: Learn how to plan for and manage costs for Azure Cosmos DB by using cost analysis in Azure portal. -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: subject-cost-optimization, ignite-fall-2021 ms.service: cosmos-db ms.topic: conceptual diff --git a/articles/cosmos-db/policy-reference.md b/articles/cosmos-db/policy-reference.md index c354ebd3f933a..63a8cb03c7f58 100644 --- a/articles/cosmos-db/policy-reference.md +++ b/articles/cosmos-db/policy-reference.md @@ -3,8 +3,9 @@ title: Built-in policy definitions for Azure Cosmos DB description: Lists Azure Policy built-in policy definitions for Azure Cosmos DB. These built-in policy definitions provide common approaches to managing your Azure resources. ms.date: 05/11/2022 ms.topic: reference -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.custom: subject-policy-reference --- diff --git a/articles/cosmos-db/policy.md b/articles/cosmos-db/policy.md index 21de79f97e746..736af5a71a1f7 100644 --- a/articles/cosmos-db/policy.md +++ b/articles/cosmos-db/policy.md @@ -1,8 +1,9 @@ --- title: Use Azure Policy to implement governance and controls for Azure Cosmos DB resources description: Learn how to use Azure Policy to implement governance and controls for Azure Cosmos DB resources. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 09/23/2020 diff --git a/articles/cosmos-db/provision-account-continuous-backup.md b/articles/cosmos-db/provision-account-continuous-backup.md index 795ed089abf4b..c299f87221422 100644 --- a/articles/cosmos-db/provision-account-continuous-backup.md +++ b/articles/cosmos-db/provision-account-continuous-backup.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 04/18/2022 ms.author: govindk -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli diff --git a/articles/cosmos-db/relational-nosql.md b/articles/cosmos-db/relational-nosql.md index d549c1d65a3a5..f618e7ab28213 100644 --- a/articles/cosmos-db/relational-nosql.md +++ b/articles/cosmos-db/relational-nosql.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/16/2019 -ms.reviewer: sngun +ms.reviewer: mjbrown adobe-target: true --- diff --git a/articles/cosmos-db/request-units.md b/articles/cosmos-db/request-units.md index 917d45d2ac568..595c7683de56b 100644 --- a/articles/cosmos-db/request-units.md +++ b/articles/cosmos-db/request-units.md @@ -1,13 +1,13 @@ --- title: Request Units as a throughput and performance currency in Azure Cosmos DB description: Learn about how to specify and estimate Request Unit requirements in Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 03/24/2022 ms.custom: seo-nov-2020, cosmos-db-video -ms.reviewer: wiassaf --- # Request Units in Azure Cosmos DB [!INCLUDE[appliesto-all-apis](includes/appliesto-all-apis.md)] diff --git a/articles/cosmos-db/resource-locks.md b/articles/cosmos-db/resource-locks.md index ed1954f77d96d..62cba40c770a0 100644 --- a/articles/cosmos-db/resource-locks.md +++ b/articles/cosmos-db/resource-locks.md @@ -1,12 +1,13 @@ --- title: Prevent Azure Cosmos DB resources from being deleted or changed description: Use Azure Resource Locks to prevent Azure Cosmos DB resources from being deleted or changed. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 05/13/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli --- diff --git a/articles/cosmos-db/restore-account-continuous-backup.md b/articles/cosmos-db/restore-account-continuous-backup.md index f49456e030aa4..32fad7c604853 100644 --- a/articles/cosmos-db/restore-account-continuous-backup.md +++ b/articles/cosmos-db/restore-account-continuous-backup.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 04/18/2022 ms.author: govindk -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell, devx-track-azurecli --- diff --git a/articles/cosmos-db/scaling-provisioned-throughput-best-practices.md b/articles/cosmos-db/scaling-provisioned-throughput-best-practices.md index 502894fae58d6..7ea0db8bd4729 100644 --- a/articles/cosmos-db/scaling-provisioned-throughput-best-practices.md +++ b/articles/cosmos-db/scaling-provisioned-throughput-best-practices.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 08/20/2021 ms.author: dech ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Best practices for scaling provisioned throughput (RU/s) diff --git a/articles/cosmos-db/scripts/cli/cassandra/autoscale.md b/articles/cosmos-db/scripts/cli/cassandra/autoscale.md index 27c88719ff85c..0eff557837e21 100644 --- a/articles/cosmos-db/scripts/cli/cassandra/autoscale.md +++ b/articles/cosmos-db/scripts/cli/cassandra/autoscale.md @@ -1,8 +1,9 @@ --- title: Azure Cosmos DB Cassandra API keyspace and table with autoscale description: Use Azure CLI to create an Azure Cosmos DB Cassandra API account, keyspace, and table with autoscale. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample @@ -22,7 +23,7 @@ The script in this article creates an Azure Cosmos DB Cassandra API account, key - This script requires Azure CLI version 2.12.1 or later. - - You can run the script in the Bash environment in [Azure Cloud Shell](/azure/cloud-shell/quickstart). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. + - You can run the script in the Bash environment in [Azure Cloud Shell](../../../../cloud-shell/quickstart.md). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. [![Launch Cloud Shell in a new window](../../../../../includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com) @@ -49,4 +50,4 @@ az group delete --name $resourceGroup ## Next steps -[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) +[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) \ No newline at end of file diff --git a/articles/cosmos-db/scripts/cli/cassandra/create.md b/articles/cosmos-db/scripts/cli/cassandra/create.md index b4adaf85a7e63..1b3fc110dd624 100644 --- a/articles/cosmos-db/scripts/cli/cassandra/create.md +++ b/articles/cosmos-db/scripts/cli/cassandra/create.md @@ -1,8 +1,9 @@ --- title: Create a Cassandra keyspace and table for Azure Cosmos DB description: Create a Cassandra keyspace and table for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/cassandra/lock.md b/articles/cosmos-db/scripts/cli/cassandra/lock.md index 214ed59cc62d0..995384463d852 100644 --- a/articles/cosmos-db/scripts/cli/cassandra/lock.md +++ b/articles/cosmos-db/scripts/cli/cassandra/lock.md @@ -1,8 +1,9 @@ --- title: Create resource lock for a Cassandra keyspace and table for Azure Cosmos DB description: Create resource lock for a Cassandra keyspace and table for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/cassandra/serverless.md b/articles/cosmos-db/scripts/cli/cassandra/serverless.md index f1e23f3eff61b..8c40504cb7aa4 100644 --- a/articles/cosmos-db/scripts/cli/cassandra/serverless.md +++ b/articles/cosmos-db/scripts/cli/cassandra/serverless.md @@ -1,8 +1,9 @@ --- title: Create a Cassandra serverless account, keyspace and table for Azure Cosmos DB description: Create a Cassandra serverless account, keyspace and table for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/cassandra/throughput.md b/articles/cosmos-db/scripts/cli/cassandra/throughput.md index 3cdcacc36490b..4dd2259ba82cf 100644 --- a/articles/cosmos-db/scripts/cli/cassandra/throughput.md +++ b/articles/cosmos-db/scripts/cli/cassandra/throughput.md @@ -1,8 +1,9 @@ --- title: Perform throughput (RU/s) operations for Azure Cosmos DB Cassandra API resources description: Azure CLI scripts for throughput (RU/s) operations for Azure Cosmos DB Cassandra API resources -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/common/ipfirewall.md b/articles/cosmos-db/scripts/cli/common/ipfirewall.md index e16898adb41be..941313af5e552 100644 --- a/articles/cosmos-db/scripts/cli/common/ipfirewall.md +++ b/articles/cosmos-db/scripts/cli/common/ipfirewall.md @@ -1,8 +1,9 @@ --- title: Create an Azure Cosmos account with IP firewall description: Create an Azure Cosmos account with IP firewall -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: sample ms.date: 02/21/2022 diff --git a/articles/cosmos-db/scripts/cli/common/keys.md b/articles/cosmos-db/scripts/cli/common/keys.md index ab70c6beb5e97..bb6e974cbd775 100644 --- a/articles/cosmos-db/scripts/cli/common/keys.md +++ b/articles/cosmos-db/scripts/cli/common/keys.md @@ -1,8 +1,9 @@ --- title: Work with account keys and connection strings for an Azure Cosmos account description: Work with account keys and connection strings for an Azure Cosmos account -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: sample ms.date: 02/21/2022 diff --git a/articles/cosmos-db/scripts/cli/common/regions.md b/articles/cosmos-db/scripts/cli/common/regions.md index d75a4557b0563..363e5c7d4bca7 100644 --- a/articles/cosmos-db/scripts/cli/common/regions.md +++ b/articles/cosmos-db/scripts/cli/common/regions.md @@ -1,8 +1,9 @@ --- title: Add regions, change failover priority, trigger failover for an Azure Cosmos account description: Add regions, change failover priority, trigger failover for an Azure Cosmos account -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: sample ms.date: 02/21/2022 diff --git a/articles/cosmos-db/scripts/cli/common/service-endpoints-ignore-missing-vnet.md b/articles/cosmos-db/scripts/cli/common/service-endpoints-ignore-missing-vnet.md index 208150745ac75..45e3192d7025b 100644 --- a/articles/cosmos-db/scripts/cli/common/service-endpoints-ignore-missing-vnet.md +++ b/articles/cosmos-db/scripts/cli/common/service-endpoints-ignore-missing-vnet.md @@ -1,8 +1,9 @@ --- title: Connect an existing Azure Cosmos account with virtual network service endpoints description: Connect an existing Azure Cosmos account with virtual network service endpoints -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: sample ms.date: 02/21/2022 diff --git a/articles/cosmos-db/scripts/cli/common/service-endpoints.md b/articles/cosmos-db/scripts/cli/common/service-endpoints.md index 32b4e4e0bb0fe..3527c5a30daed 100644 --- a/articles/cosmos-db/scripts/cli/common/service-endpoints.md +++ b/articles/cosmos-db/scripts/cli/common/service-endpoints.md @@ -1,8 +1,9 @@ --- title: Create an Azure Cosmos account with virtual network service endpoints description: Create an Azure Cosmos account with virtual network service endpoints -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: sample ms.date: 02/21/2022 diff --git a/articles/cosmos-db/scripts/cli/gremlin/autoscale.md b/articles/cosmos-db/scripts/cli/gremlin/autoscale.md index 8e43acf9c6fb1..699058fb3b724 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/autoscale.md +++ b/articles/cosmos-db/scripts/cli/gremlin/autoscale.md @@ -1,8 +1,9 @@ --- title: Azure Cosmos DB Gremlin database and graph with autoscale description: Use this Azure CLI script to create an Azure Cosmos DB Gremlin API account, database, and graph with autoscale. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample @@ -22,7 +23,7 @@ The script in this article creates an Azure Cosmos DB Gremlin API account, datab - This script requires Azure CLI version 2.30 or later. - - You can run the script in the Bash environment in [Azure Cloud Shell](/azure/cloud-shell/quickstart). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. + - You can run the script in the Bash environment in [Azure Cloud Shell](../../../../cloud-shell/quickstart.md). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. [![Launch Cloud Shell in a new window](../../../../../includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com) @@ -49,4 +50,4 @@ az group delete --name $resourceGroup ## Next steps -[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) +[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) \ No newline at end of file diff --git a/articles/cosmos-db/scripts/cli/gremlin/create.md b/articles/cosmos-db/scripts/cli/gremlin/create.md index d4d28bce1bc61..40373e825d7c6 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/create.md +++ b/articles/cosmos-db/scripts/cli/gremlin/create.md @@ -1,8 +1,9 @@ --- title: Create a Gremlin database and graph for Azure Cosmos DB description: Create a Gremlin database and graph for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/gremlin/lock.md b/articles/cosmos-db/scripts/cli/gremlin/lock.md index 05f631f2e1b18..9a98901a0fefc 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/lock.md +++ b/articles/cosmos-db/scripts/cli/gremlin/lock.md @@ -1,8 +1,9 @@ --- title: Create resource lock for a Gremlin database and graph for Azure Cosmos DB description: Create resource lock for a Gremlin database and graph for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/gremlin/serverless.md b/articles/cosmos-db/scripts/cli/gremlin/serverless.md index 91de3935ad988..cf21cc5c89d95 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/serverless.md +++ b/articles/cosmos-db/scripts/cli/gremlin/serverless.md @@ -1,8 +1,9 @@ --- title: Azure Cosmos DB Gremlin serverless account, database, and graph description: Use this Azure CLI script to create an Azure Cosmos DB Gremlin serverless account, database, and graph. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample @@ -22,7 +23,7 @@ The script in this article creates an Azure Cosmos DB Gremlin API serverless acc - This script requires Azure CLI version 2.30 or later. - - You can run the script in the Bash environment in [Azure Cloud Shell](/azure/cloud-shell/quickstart). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. + - You can run the script in the Bash environment in [Azure Cloud Shell](../../../../cloud-shell/quickstart.md). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. [![Launch Cloud Shell in a new window](../../../../../includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com) @@ -49,4 +50,4 @@ az group delete --name $resourceGroup ## Next steps -[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) +[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) \ No newline at end of file diff --git a/articles/cosmos-db/scripts/cli/gremlin/throughput.md b/articles/cosmos-db/scripts/cli/gremlin/throughput.md index d7b000aeb02be..c77064787656d 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/throughput.md +++ b/articles/cosmos-db/scripts/cli/gremlin/throughput.md @@ -1,8 +1,9 @@ --- title: Perform throughput (RU/s) operations for Azure Cosmos DB Gremlin API resources description: Azure CLI scripts for throughput (RU/s) operations for Azure Cosmos DB Gremlin API resources -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/mongodb/autoscale.md b/articles/cosmos-db/scripts/cli/mongodb/autoscale.md index 16a75a3369a7d..358c201407264 100644 --- a/articles/cosmos-db/scripts/cli/mongodb/autoscale.md +++ b/articles/cosmos-db/scripts/cli/mongodb/autoscale.md @@ -1,8 +1,9 @@ --- title: Create a database with autoscale and shared collections for MongoDB API for Azure Cosmos DB description: Create a database with autoscale and shared collections for MongoDB API for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/mongodb/create.md b/articles/cosmos-db/scripts/cli/mongodb/create.md index 194e94df3b5c6..b8fcc2348b6d6 100644 --- a/articles/cosmos-db/scripts/cli/mongodb/create.md +++ b/articles/cosmos-db/scripts/cli/mongodb/create.md @@ -1,8 +1,9 @@ --- title: Create a database and collection for MongoDB API for Azure Cosmos DB description: Create a database and collection for MongoDB API for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/mongodb/lock.md b/articles/cosmos-db/scripts/cli/mongodb/lock.md index 05e305ed65fc8..a42632b2233ee 100644 --- a/articles/cosmos-db/scripts/cli/mongodb/lock.md +++ b/articles/cosmos-db/scripts/cli/mongodb/lock.md @@ -1,8 +1,9 @@ --- title: Create resource lock for a database and collection for MongoDB API for Azure Cosmos DB description: Create resource lock for a database and collection for MongoDB API for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/mongodb/serverless.md b/articles/cosmos-db/scripts/cli/mongodb/serverless.md index fb1a90fed5276..cd32402f0310f 100644 --- a/articles/cosmos-db/scripts/cli/mongodb/serverless.md +++ b/articles/cosmos-db/scripts/cli/mongodb/serverless.md @@ -1,8 +1,9 @@ --- title: Create a serverless database and collection for MongoDB API for Azure Cosmos DB description: Create a serverless database and collection for MongoDB API for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/mongodb/throughput.md b/articles/cosmos-db/scripts/cli/mongodb/throughput.md index 701aa4f98183e..2983f9793a7a6 100644 --- a/articles/cosmos-db/scripts/cli/mongodb/throughput.md +++ b/articles/cosmos-db/scripts/cli/mongodb/throughput.md @@ -1,8 +1,9 @@ --- title: Perform throughput (RU/s) operations for Azure Cosmos DB API for MongoDB resources description: Azure CLI scripts for throughput (RU/s) operations for Azure Cosmos DB API for MongoDB resources -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/sql/autoscale.md b/articles/cosmos-db/scripts/cli/sql/autoscale.md index 07a5e52c009a1..388a696fce3cc 100644 --- a/articles/cosmos-db/scripts/cli/sql/autoscale.md +++ b/articles/cosmos-db/scripts/cli/sql/autoscale.md @@ -1,8 +1,9 @@ --- title: Create a Core (SQL) API database and container with autoscale for Azure Cosmos DB description: Create a Core (SQL) API database and container with autoscale for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/sql/create.md b/articles/cosmos-db/scripts/cli/sql/create.md index 17d00630d0b33..a301e56ae33d1 100644 --- a/articles/cosmos-db/scripts/cli/sql/create.md +++ b/articles/cosmos-db/scripts/cli/sql/create.md @@ -1,8 +1,9 @@ --- title: Create a Core (SQL) API database and container for Azure Cosmos DB description: Create a Core (SQL) API database and container for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/sql/lock.md b/articles/cosmos-db/scripts/cli/sql/lock.md index 864a4cb578e09..ff96c30305678 100644 --- a/articles/cosmos-db/scripts/cli/sql/lock.md +++ b/articles/cosmos-db/scripts/cli/sql/lock.md @@ -1,8 +1,9 @@ --- title: Create resource lock for a Azure Cosmos DB Core (SQL) API database and container description: Create resource lock for a Azure Cosmos DB Core (SQL) API database and container -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/sql/serverless.md b/articles/cosmos-db/scripts/cli/sql/serverless.md index 2835353fdc966..ad31b4ae95bfd 100644 --- a/articles/cosmos-db/scripts/cli/sql/serverless.md +++ b/articles/cosmos-db/scripts/cli/sql/serverless.md @@ -1,8 +1,9 @@ --- title: Create a Core (SQL) API serverless account, database and container for Azure Cosmos DB description: Create a Core (SQL) API serverless account, database and container for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/sql/throughput.md b/articles/cosmos-db/scripts/cli/sql/throughput.md index d5e9d6f220bc1..fa0e4e0f1f82e 100644 --- a/articles/cosmos-db/scripts/cli/sql/throughput.md +++ b/articles/cosmos-db/scripts/cli/sql/throughput.md @@ -1,8 +1,9 @@ --- title: Perform throughput (RU/s) operations for Azure Cosmos DB Core (SQL) API resources description: Azure CLI scripts for throughput (RU/s) operations for Azure Cosmos DB Core (SQL) API resources -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/table/autoscale.md b/articles/cosmos-db/scripts/cli/table/autoscale.md index c5bbcaef7c061..3b7f7518358e1 100644 --- a/articles/cosmos-db/scripts/cli/table/autoscale.md +++ b/articles/cosmos-db/scripts/cli/table/autoscale.md @@ -1,8 +1,9 @@ --- title: Create a Table API table with autoscale for Azure Cosmos DB description: Create a Table API table with autoscale for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/table/create.md b/articles/cosmos-db/scripts/cli/table/create.md index 34251636505f9..6a4286190a8ba 100644 --- a/articles/cosmos-db/scripts/cli/table/create.md +++ b/articles/cosmos-db/scripts/cli/table/create.md @@ -1,8 +1,9 @@ --- title: Create a Table API table for Azure Cosmos DB description: Create a Table API table for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/table/lock.md b/articles/cosmos-db/scripts/cli/table/lock.md index c8b5df1632e7a..3636160efebd1 100644 --- a/articles/cosmos-db/scripts/cli/table/lock.md +++ b/articles/cosmos-db/scripts/cli/table/lock.md @@ -1,8 +1,9 @@ --- title: Create resource lock for a Azure Cosmos DB Table API table description: Create resource lock for a Azure Cosmos DB Table API table -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/table/serverless.md b/articles/cosmos-db/scripts/cli/table/serverless.md index 09633772a0e33..b2875778719bf 100644 --- a/articles/cosmos-db/scripts/cli/table/serverless.md +++ b/articles/cosmos-db/scripts/cli/table/serverless.md @@ -1,8 +1,9 @@ --- title: Create a Table API serverless account and table for Azure Cosmos DB description: Create a Table API serverless account and table for Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample diff --git a/articles/cosmos-db/scripts/cli/table/throughput.md b/articles/cosmos-db/scripts/cli/table/throughput.md index 0bf00e0209dc2..443e3916f7f68 100644 --- a/articles/cosmos-db/scripts/cli/table/throughput.md +++ b/articles/cosmos-db/scripts/cli/table/throughput.md @@ -1,8 +1,9 @@ --- title: Perform throughput (RU/s) operations for Azure Cosmos DB Table API resources description: Azure CLI scripts for throughput (RU/s) operations for Azure Cosmos DB Table API resources -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample diff --git a/articles/cosmos-db/scripts/powershell/cassandra/autoscale.md b/articles/cosmos-db/scripts/powershell/cassandra/autoscale.md index cea31faf70be5..0a8af5efd3dea 100644 --- a/articles/cosmos-db/scripts/powershell/cassandra/autoscale.md +++ b/articles/cosmos-db/scripts/powershell/cassandra/autoscale.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos DB Cassandra API keyspace and table with autoscale description: Azure PowerShell script - Azure Cosmos DB create Cassandra API keyspace and table with autoscale -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample ms.date: 07/30/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/cassandra/create.md b/articles/cosmos-db/scripts/powershell/cassandra/create.md index c8ba1883a3c6b..e97bf2dd6af1f 100644 --- a/articles/cosmos-db/scripts/powershell/cassandra/create.md +++ b/articles/cosmos-db/scripts/powershell/cassandra/create.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos DB Cassandra API keyspace and table description: Azure PowerShell script - Azure Cosmos DB create Cassandra API keyspace and table -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/cassandra/list-get.md b/articles/cosmos-db/scripts/powershell/cassandra/list-get.md index 7e026996ddbf3..adba657e17977 100644 --- a/articles/cosmos-db/scripts/powershell/cassandra/list-get.md +++ b/articles/cosmos-db/scripts/powershell/cassandra/list-get.md @@ -1,12 +1,13 @@ --- title: PowerShell script to list and get Azure Cosmos DB Cassandra API resources description: Azure PowerShell script - Azure Cosmos DB list and get operations for Cassandra API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample ms.date: 03/18/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/cassandra/lock.md b/articles/cosmos-db/scripts/powershell/cassandra/lock.md index 531bf7223214c..85fd7396e31e3 100644 --- a/articles/cosmos-db/scripts/powershell/cassandra/lock.md +++ b/articles/cosmos-db/scripts/powershell/cassandra/lock.md @@ -1,8 +1,9 @@ --- title: PowerShell script to create resource lock for Azure Cosmos Cassandra API keyspace and table description: Create resource lock for Azure Cosmos Cassandra API keyspace and table -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample diff --git a/articles/cosmos-db/scripts/powershell/cassandra/throughput.md b/articles/cosmos-db/scripts/powershell/cassandra/throughput.md index d61d3a1e3a879..52841531ff333 100644 --- a/articles/cosmos-db/scripts/powershell/cassandra/throughput.md +++ b/articles/cosmos-db/scripts/powershell/cassandra/throughput.md @@ -1,12 +1,13 @@ --- title: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DB Cassandra API resources description: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DB Cassandra API resources -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-cassandra ms.topic: sample ms.date: 10/07/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/common/account-update.md b/articles/cosmos-db/scripts/powershell/common/account-update.md index 5d509162045e6..e118daba94d8e 100644 --- a/articles/cosmos-db/scripts/powershell/common/account-update.md +++ b/articles/cosmos-db/scripts/powershell/common/account-update.md @@ -1,11 +1,12 @@ --- title: PowerShell script to update the default consistency level on an Azure Cosmos account description: Azure PowerShell script sample - Update default consistency level on an Azure Cosmos DB account using PowerShell -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.topic: sample ms.date: 03/21/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/common/failover-priority-update.md b/articles/cosmos-db/scripts/powershell/common/failover-priority-update.md index 0df51c847df00..235dcb440ff54 100644 --- a/articles/cosmos-db/scripts/powershell/common/failover-priority-update.md +++ b/articles/cosmos-db/scripts/powershell/common/failover-priority-update.md @@ -1,11 +1,12 @@ --- title: PowerShell script to change failover priority for an Azure Cosmos account with single write region description: Azure PowerShell script sample - Change failover priority or trigger failover for an Azure Cosmos account with single write region -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.topic: sample ms.date: 03/18/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/common/firewall-create.md b/articles/cosmos-db/scripts/powershell/common/firewall-create.md index f8c8c09804fc5..935295f09c7e7 100644 --- a/articles/cosmos-db/scripts/powershell/common/firewall-create.md +++ b/articles/cosmos-db/scripts/powershell/common/firewall-create.md @@ -1,11 +1,12 @@ --- title: PowerShell script to create an Azure Cosmos DB account with IP Firewall description: Azure PowerShell script sample - Create an Azure Cosmos DB account with IP Firewall -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.topic: sample ms.date: 03/18/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/common/keys-connection-strings.md b/articles/cosmos-db/scripts/powershell/common/keys-connection-strings.md index 614166ded93ff..009ddf1e17874 100644 --- a/articles/cosmos-db/scripts/powershell/common/keys-connection-strings.md +++ b/articles/cosmos-db/scripts/powershell/common/keys-connection-strings.md @@ -1,11 +1,12 @@ --- title: PowerShell script to get key and connection string operations for an Azure Cosmos DB account description: Azure PowerShell script sample - Account key and connection string operations for an Azure Cosmos DB account -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.topic: sample ms.date: 03/18/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/common/update-region.md b/articles/cosmos-db/scripts/powershell/common/update-region.md index d3d601dc88c1e..239805637b480 100644 --- a/articles/cosmos-db/scripts/powershell/common/update-region.md +++ b/articles/cosmos-db/scripts/powershell/common/update-region.md @@ -1,11 +1,12 @@ --- title: PowerShell script to update regions for an Azure Cosmos DB account description: Run this Azure PowerShell script to add regions or change region failover order for an Azure Cosmos DB account. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.topic: sample ms.date: 05/02/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/gremlin/autoscale.md b/articles/cosmos-db/scripts/powershell/gremlin/autoscale.md index 3693854aaa3dd..b0da0a7c63f6e 100644 --- a/articles/cosmos-db/scripts/powershell/gremlin/autoscale.md +++ b/articles/cosmos-db/scripts/powershell/gremlin/autoscale.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos DB Gremlin API database and graph with autoscale description: Azure PowerShell script - Azure Cosmos DB create Gremlin API database and graph with autoscale -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample ms.date: 07/30/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/gremlin/create.md b/articles/cosmos-db/scripts/powershell/gremlin/create.md index 46e7eeef9be66..f8ab81ab4740c 100644 --- a/articles/cosmos-db/scripts/powershell/gremlin/create.md +++ b/articles/cosmos-db/scripts/powershell/gremlin/create.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos DB Gremlin API database and graph description: Azure PowerShell script - Azure Cosmos DB create Gremlin API database and graph -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/gremlin/list-get.md b/articles/cosmos-db/scripts/powershell/gremlin/list-get.md index c876ccfe262b6..2700365475146 100644 --- a/articles/cosmos-db/scripts/powershell/gremlin/list-get.md +++ b/articles/cosmos-db/scripts/powershell/gremlin/list-get.md @@ -1,12 +1,13 @@ --- title: PowerShell script to list or get Azure Cosmos DB Gremlin API databases and graphs description: Run this Azure PowerShell script to list all or get specific Azure Cosmos DB Gremlin API databases and graphs. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample ms.date: 05/02/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/gremlin/lock.md b/articles/cosmos-db/scripts/powershell/gremlin/lock.md index 9d0609be55e62..7a2ca0d511a54 100644 --- a/articles/cosmos-db/scripts/powershell/gremlin/lock.md +++ b/articles/cosmos-db/scripts/powershell/gremlin/lock.md @@ -1,8 +1,9 @@ --- title: PowerShell script to create resource lock for Azure Cosmos Gremlin API database and graph description: Create resource lock for Azure Cosmos Gremlin API database and graph -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample diff --git a/articles/cosmos-db/scripts/powershell/gremlin/throughput.md b/articles/cosmos-db/scripts/powershell/gremlin/throughput.md index 93046752af611..ab31c024cad22 100644 --- a/articles/cosmos-db/scripts/powershell/gremlin/throughput.md +++ b/articles/cosmos-db/scripts/powershell/gremlin/throughput.md @@ -1,12 +1,13 @@ --- title: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DB Gremlin API description: PowerShell scripts for throughput (RU/s) operations for Gremlin API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-graph ms.topic: sample ms.date: 10/07/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/mongodb/autoscale.md b/articles/cosmos-db/scripts/powershell/mongodb/autoscale.md index d440aed4e3132..210d9d13b975f 100644 --- a/articles/cosmos-db/scripts/powershell/mongodb/autoscale.md +++ b/articles/cosmos-db/scripts/powershell/mongodb/autoscale.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos MongoDB API database and collection with autoscale description: Azure PowerShell script - create Azure Cosmos MongoDB API database and collection with autoscale -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample ms.date: 07/30/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/mongodb/create.md b/articles/cosmos-db/scripts/powershell/mongodb/create.md index 8d4c9afb6268e..acdbb0f6ed80a 100644 --- a/articles/cosmos-db/scripts/powershell/mongodb/create.md +++ b/articles/cosmos-db/scripts/powershell/mongodb/create.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos MongoDB API database and collection description: Azure PowerShell script - create Azure Cosmos MongoDB API database and collection -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/mongodb/list-get.md b/articles/cosmos-db/scripts/powershell/mongodb/list-get.md index b049ee407bd9d..72852eef9f1e0 100644 --- a/articles/cosmos-db/scripts/powershell/mongodb/list-get.md +++ b/articles/cosmos-db/scripts/powershell/mongodb/list-get.md @@ -1,12 +1,13 @@ --- title: PowerShell script to list and get operations in Azure Cosmos DB's API for MongoDB description: Azure PowerShell script - Azure Cosmos DB list and get operations for MongoDB API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample ms.date: 05/01/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/mongodb/lock.md b/articles/cosmos-db/scripts/powershell/mongodb/lock.md index 0602b39dcd462..c976c6e93fecf 100644 --- a/articles/cosmos-db/scripts/powershell/mongodb/lock.md +++ b/articles/cosmos-db/scripts/powershell/mongodb/lock.md @@ -1,8 +1,9 @@ --- title: PowerShell script to create resource lock for Azure Cosmos MongoDB API database and collection description: Create resource lock for Azure Cosmos MongoDB API database and collection -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample diff --git a/articles/cosmos-db/scripts/powershell/mongodb/throughput.md b/articles/cosmos-db/scripts/powershell/mongodb/throughput.md index 81a2b4a374ca0..ba89038b7ae86 100644 --- a/articles/cosmos-db/scripts/powershell/mongodb/throughput.md +++ b/articles/cosmos-db/scripts/powershell/mongodb/throughput.md @@ -1,12 +1,13 @@ --- title: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DBs API for MongoDB description: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DBs API for MongoDB -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-mongo ms.topic: sample ms.date: 10/07/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/sql/autoscale.md b/articles/cosmos-db/scripts/powershell/sql/autoscale.md index a1d3c4a2b9f02..1d9bfd7ac7446 100644 --- a/articles/cosmos-db/scripts/powershell/sql/autoscale.md +++ b/articles/cosmos-db/scripts/powershell/sql/autoscale.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos DB SQL API database and container with autoscale description: Azure PowerShell script - Azure Cosmos DB create SQL API database and container with autoscale -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 07/30/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/sql/create-index-none.md b/articles/cosmos-db/scripts/powershell/sql/create-index-none.md index 0c3ec55d71d73..c6a423b260d7e 100644 --- a/articles/cosmos-db/scripts/powershell/sql/create-index-none.md +++ b/articles/cosmos-db/scripts/powershell/sql/create-index-none.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create a container with indexing turned off in an Azure Cosmos DB account description: Azure PowerShell script sample - Create a container with indexing turned off in an Azure Cosmos DB account -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/sql/create-large-partition-key.md b/articles/cosmos-db/scripts/powershell/sql/create-large-partition-key.md index 1284391457ab4..ea9ed5118ca0d 100644 --- a/articles/cosmos-db/scripts/powershell/sql/create-large-partition-key.md +++ b/articles/cosmos-db/scripts/powershell/sql/create-large-partition-key.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create an Azure Cosmos DB container with a large partition key description: Azure PowerShell script sample - Create a container with a large partition key in an Azure Cosmos DB account -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/sql/create.md b/articles/cosmos-db/scripts/powershell/sql/create.md index 9f39a4af8dfa4..0a3dd459a0ef5 100644 --- a/articles/cosmos-db/scripts/powershell/sql/create.md +++ b/articles/cosmos-db/scripts/powershell/sql/create.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create Azure Cosmos DB SQL API database and container description: Azure PowerShell script - Azure Cosmos DB create SQL API database and container -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/sql/list-get.md b/articles/cosmos-db/scripts/powershell/sql/list-get.md index 1466c7e181b1c..eb725dccf95f5 100644 --- a/articles/cosmos-db/scripts/powershell/sql/list-get.md +++ b/articles/cosmos-db/scripts/powershell/sql/list-get.md @@ -1,12 +1,13 @@ --- title: PowerShell script to list and get Azure Cosmos DB SQL API resources description: Azure PowerShell script - Azure Cosmos DB list and get operations for SQL API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 03/17/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/sql/lock.md b/articles/cosmos-db/scripts/powershell/sql/lock.md index 3c14db74251e2..962023e37b21a 100644 --- a/articles/cosmos-db/scripts/powershell/sql/lock.md +++ b/articles/cosmos-db/scripts/powershell/sql/lock.md @@ -1,8 +1,9 @@ --- title: PowerShell script to create resource lock for Azure Cosmos SQL API database and container description: Create resource lock for Azure Cosmos SQL API database and container -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/scripts/powershell/sql/throughput.md b/articles/cosmos-db/scripts/powershell/sql/throughput.md index bd871e880cf79..f464f60e8a361 100644 --- a/articles/cosmos-db/scripts/powershell/sql/throughput.md +++ b/articles/cosmos-db/scripts/powershell/sql/throughput.md @@ -1,12 +1,13 @@ --- title: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DB SQL API database or container description: PowerShell scripts for throughput (RU/s) operations for Azure Cosmos DB SQL API database or container -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 10/07/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/table/autoscale.md b/articles/cosmos-db/scripts/powershell/table/autoscale.md index b07faa1469be9..57e805e730279 100644 --- a/articles/cosmos-db/scripts/powershell/table/autoscale.md +++ b/articles/cosmos-db/scripts/powershell/table/autoscale.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create a table with autoscale in Azure Cosmos DB Table API description: PowerShell script to create a table with autoscale in Azure Cosmos DB Table API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 07/30/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/table/create.md b/articles/cosmos-db/scripts/powershell/table/create.md index be3174aa1436d..c78fee510bb30 100644 --- a/articles/cosmos-db/scripts/powershell/table/create.md +++ b/articles/cosmos-db/scripts/powershell/table/create.md @@ -1,12 +1,13 @@ --- title: PowerShell script to create a table in Azure Cosmos DB Table API description: Learn how to use a PowerShell script to update the throughput for a database or a container in Azure Cosmos DB Table API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 05/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/table/list-get.md b/articles/cosmos-db/scripts/powershell/table/list-get.md index 9af3e84918e75..70e3ca6cb457d 100644 --- a/articles/cosmos-db/scripts/powershell/table/list-get.md +++ b/articles/cosmos-db/scripts/powershell/table/list-get.md @@ -1,12 +1,13 @@ --- title: PowerShell script to list and get Azure Cosmos DB Table API operations description: Azure PowerShell script - Azure Cosmos DB list and get operations for Table API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 07/31/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/scripts/powershell/table/lock.md b/articles/cosmos-db/scripts/powershell/table/lock.md index 3c2933b78aca6..e22020b5f9ed1 100644 --- a/articles/cosmos-db/scripts/powershell/table/lock.md +++ b/articles/cosmos-db/scripts/powershell/table/lock.md @@ -1,8 +1,9 @@ --- title: PowerShell script to create resource lock for Azure Cosmos Table API table description: Create resource lock for Azure Cosmos Table API table -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample diff --git a/articles/cosmos-db/scripts/powershell/table/throughput.md b/articles/cosmos-db/scripts/powershell/table/throughput.md index 44d10a1974938..d1e389e7d4e69 100644 --- a/articles/cosmos-db/scripts/powershell/table/throughput.md +++ b/articles/cosmos-db/scripts/powershell/table/throughput.md @@ -1,12 +1,13 @@ --- title: PowerShell scripts for throughput (RU/s) operations for for Azure Cosmos DB Table API description: PowerShell scripts for throughput (RU/s) operations for for Azure Cosmos DB Table API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 10/07/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell --- diff --git a/articles/cosmos-db/secure-access-to-data.md b/articles/cosmos-db/secure-access-to-data.md index bc94ecc305367..6588995aaf926 100644 --- a/articles/cosmos-db/secure-access-to-data.md +++ b/articles/cosmos-db/secure-access-to-data.md @@ -6,7 +6,7 @@ ms.author: thweiss ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual -ms.date: 04/06/2022 +ms.date: 05/26/2022 ms.custom: devx-track-csharp, subject-rbac-steps --- # Secure access to data in Azure Cosmos DB @@ -19,7 +19,7 @@ Azure Cosmos DB provides three ways to control access to your data. | Access control type | Characteristics | |---|---| | [Primary/secondary keys](#primary-keys) | Shared secret allowing any management or data operation. It comes in both read-write and read-only variants. | -| [Role-based access control](#rbac) | Fine-grained, role-based permission model using Azure Active Directory (AAD) identities for authentication. | +| [Role-based access control](#rbac) | Fine-grained, role-based permission model using Azure Active Directory (Azure AD) identities for authentication. | | [Resource tokens](#resource-tokens)| Fine-grained permission model based on native Azure Cosmos DB users and permissions. | ## Primary/secondary keys @@ -88,7 +88,7 @@ CosmosClient client = new CosmosClient(endpointUrl, authorizationKey); Azure Cosmos DB exposes a built-in role-based access control (RBAC) system that lets you: -- Authenticate your data requests with an Azure Active Directory (AAD) identity. +- Authenticate your data requests with an Azure Active Directory identity. - Authorize your data requests with a fine-grained, role-based permission model. Azure Cosmos DB RBAC is the ideal access control method in situations where: @@ -140,9 +140,8 @@ For an example of a middle tier service used to generate or broker resource toke Azure Cosmos DB users are associated with a Cosmos database. Each database can contain zero or more Cosmos DB users. The following code sample shows how to create a Cosmos DB user using the [Azure Cosmos DB .NET SDK v3](https://github.com/Azure/azure-cosmos-dotnet-v3/tree/master/Microsoft.Azure.Cosmos.Samples/Usage/UserManagement). ```csharp -//Create a user. -Database database = benchmark.client.GetDatabase("SalesDatabase"); - +// Create a user. +Database database = client.GetDatabase("SalesDatabase"); User user = await database.CreateUserAsync("User 1"); ``` @@ -161,7 +160,7 @@ A permission resource is associated with a user and assigned to a specific resou If you enable the [diagnostic logs on data-plane requests](cosmosdb-monitor-resource-logs.md), the following two properties corresponding to the permission are logged: -* **resourceTokenPermissionId** - This property indicates the resource token permission Id that you have specified. +* **resourceTokenPermissionId** - This property indicates the resource token permission ID that you have specified. * **resourceTokenPermissionMode** - This property indicates the permission mode that you have set when creating the resource token. The permission mode can have values such as "all" or "read". @@ -172,10 +171,10 @@ The following code sample shows how to create a permission resource, read the re ```csharp // Create a permission on a container and specific partition key value Container container = client.GetContainer("SalesDatabase", "OrdersContainer"); -user.CreatePermissionAsync( +await user.CreatePermissionAsync( new PermissionProperties( - id: "permissionUser1Orders", - permissionMode: PermissionMode.All, + id: "permissionUser1Orders", + permissionMode: PermissionMode.All, container: container, resourcePartitionKey: new PartitionKey("012345"))); ``` @@ -185,10 +184,10 @@ user.CreatePermissionAsync( The following code snippet shows how to retrieve the permission associated with the user created above and instantiate a new CosmosClient on behalf of the user, scoped to a single partition key. ```csharp -//Read a permission, create user client session. -PermissionProperties permissionProperties = await user.GetPermission("permissionUser1Orders") +// Read a permission, create user client session. +Permission permission = await user.GetPermission("permissionUser1Orders").ReadAsync(); -CosmosClient client = new CosmosClient(accountEndpoint: "MyEndpoint", authKeyOrResourceToken: permissionProperties.Token); +CosmosClient client = new CosmosClient(accountEndpoint: "MyEndpoint", authKeyOrResourceToken: permission.Resource.Token); ``` ## Differences between RBAC and resource tokens @@ -197,8 +196,8 @@ CosmosClient client = new CosmosClient(accountEndpoint: "MyEndpoint", authKeyOrR |--|--|--| | Authentication | With Azure Active Directory (Azure AD). | Based on the native Azure Cosmos DB users
                  Integrating resource tokens with Azure AD requires extra work to bridge Azure AD identities and Azure Cosmos DB users. | | Authorization | Role-based: role definitions map allowed actions and can be assigned to multiple identities. | Permission-based: for each Azure Cosmos DB user, you need to assign data access permissions. | -| Token scope | An AAD token carries the identity of the requester. This identity is matched against all assigned role definitions to perform authorization. | A resource token carries the permission granted to a specific Azure Cosmos DB user on a specific Azure Cosmos DB resource. Authorization requests on different resources may requires different tokens. | -| Token refresh | The AAD token is automatically refreshed by the Azure Cosmos DB SDKs when it expires. | Resource token refresh is not supported. When a resource token expires, a new one needs to be issued. | +| Token scope | An Azure AD token carries the identity of the requester. This identity is matched against all assigned role definitions to perform authorization. | A resource token carries the permission granted to a specific Azure Cosmos DB user on a specific Azure Cosmos DB resource. Authorization requests on different resources may require different tokens. | +| Token refresh | The Azure AD token is automatically refreshed by the Azure Cosmos DB SDKs when it expires. | Resource token refresh is not supported. When a resource token expires, a new one needs to be issued. | ## Add users and assign roles diff --git a/articles/cosmos-db/security-controls-policy.md b/articles/cosmos-db/security-controls-policy.md index 6a1721930a8d5..7a9590fdf1cb7 100644 --- a/articles/cosmos-db/security-controls-policy.md +++ b/articles/cosmos-db/security-controls-policy.md @@ -3,8 +3,9 @@ title: Azure Policy Regulatory Compliance controls for Azure Cosmos DB description: Lists Azure Policy Regulatory Compliance controls available for Azure Cosmos DB. These built-in policy definitions provide common approaches to managing the compliance of your Azure resources. ms.date: 05/10/2022 ms.topic: sample -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.custom: subject-policy-compliancecontrols --- diff --git a/articles/cosmos-db/serverless.md b/articles/cosmos-db/serverless.md index ad640aaa84972..6e023548785b7 100644 --- a/articles/cosmos-db/serverless.md +++ b/articles/cosmos-db/serverless.md @@ -50,7 +50,7 @@ Any container that is created in a serverless account is a serverless container. - Serverless containers can store a maximum of 50 GB of data and indexes. > [!NOTE] -> Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](/azure/azure-resource-manager/management/preview-features). +> Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](../azure-resource-manager/management/preview-features.md). ## Monitoring your consumption @@ -74,4 +74,4 @@ Get started with serverless with the following articles: - [Request Units in Azure Cosmos DB](request-units.md) - [Choose between provisioned throughput and serverless](throughput-serverless.md) -- [Pricing model in Azure Cosmos DB](how-pricing-works.md) +- [Pricing model in Azure Cosmos DB](how-pricing-works.md) \ No newline at end of file diff --git a/articles/cosmos-db/set-throughput.md b/articles/cosmos-db/set-throughput.md index 493cbefc37d0a..2f2bc42537158 100644 --- a/articles/cosmos-db/set-throughput.md +++ b/articles/cosmos-db/set-throughput.md @@ -1,8 +1,9 @@ --- title: Provision throughput on Azure Cosmos containers and databases description: Learn how to set provisioned throughput for your Azure Cosmos containers and databases. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 09/16/2021 diff --git a/articles/cosmos-db/sql/best-practice-dotnet.md b/articles/cosmos-db/sql/best-practice-dotnet.md index 5ddc3e73c5bc5..328cf1d255896 100644 --- a/articles/cosmos-db/sql/best-practice-dotnet.md +++ b/articles/cosmos-db/sql/best-practice-dotnet.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 04/01/2022 ms.author: esarroyo -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.custom: cosmos-db-video --- @@ -22,30 +22,30 @@ Watch the video below to learn more about using the .NET SDK from a Cosmos DB en > [!VIDEO https://aka.ms/docs.dotnet-best-practices] ## Checklist -|Checked | Topic |Details/Links | +|Checked | Subject |Details/Links | |---------|---------|---------| | | SDK Version | Always using the [latest version](sql-api-sdk-dotnet-standard.md) of the Cosmos DB SDK available for optimal performance. | | | Singleton Client | Use a [single instance](/dotnet/api/microsoft.azure.cosmos.cosmosclient?view=azure-dotnet&preserve-view=true) of `CosmosClient` for the lifetime of your application for [better performance](performance-tips-dotnet-sdk-v3-sql.md#sdk-usage). | -| | Regions | Make sure to run your application in the same [Azure region](../distribute-data-globally.md) as your Azure Cosmos DB account, whenever possible to reduce latency. Enable 2-4 regions and replicate your accounts in multiple regions for [best availability](../distribute-data-globally.md). For production workloads, enable [automatic failover](../how-to-manage-database-account.md#configure-multiple-write-regions). In the absence of this configuration, the account will experience loss of write availability for all the duration of the write region outage, as manual failover will not succeed due to lack of region connectivity. To learn how to add multiple regions using the .NET SDK visit [here](tutorial-global-distribution-sql-api.md) | -| | Availability and Failovers | Set the [ApplicationPreferredRegions](/dotnet/api/microsoft.azure.cosmos.cosmosclientoptions.applicationpreferredregions?view=azure-dotnet&preserve-view=true) or [ApplicationRegion](/dotnet/api/microsoft.azure.cosmos.cosmosclientoptions.applicationregion?view=azure-dotnet&preserve-view=true) in the v3 SDK, and the [PreferredLocations](/dotnet/api/microsoft.azure.documents.client.connectionpolicy.preferredlocations?view=azure-dotnet&preserve-view=true) in the v2 SDK using the [preferred regions list](./tutorial-global-distribution-sql-api.md?tabs=dotnetv3%2capi-async#preferred-locations). During failovers, write operations are sent to the current write region and all reads are sent to the first region within your preferred regions list. For more information about regional failover mechanics see the [availability troubleshooting guide](troubleshoot-sdk-availability.md). | -| | CPU | You may run into connectivity/availability issues due to lack of resources on your client machine. Monitor your CPU utilization on nodes running the Azure Cosmos DB client, and scale up/out if usage is very high. | +| | Regions | Make sure to run your application in the same [Azure region](../distribute-data-globally.md) as your Azure Cosmos DB account, whenever possible to reduce latency. Enable 2-4 regions and replicate your accounts in multiple regions for [best availability](../distribute-data-globally.md). For production workloads, enable [automatic failover](../how-to-manage-database-account.md#configure-multiple-write-regions). In the absence of this configuration, the account will experience loss of write availability for all the duration of the write region outage, as manual failover won't succeed due to lack of region connectivity. To learn how to add multiple regions using the .NET SDK visit [here](tutorial-global-distribution-sql-api.md) | +| | Availability and Failovers | Set the [ApplicationPreferredRegions](/dotnet/api/microsoft.azure.cosmos.cosmosclientoptions.applicationpreferredregions?view=azure-dotnet&preserve-view=true) or [ApplicationRegion](/dotnet/api/microsoft.azure.cosmos.cosmosclientoptions.applicationregion?view=azure-dotnet&preserve-view=true) in the v3 SDK, and the [PreferredLocations](/dotnet/api/microsoft.azure.documents.client.connectionpolicy.preferredlocations?view=azure-dotnet&preserve-view=true) in the v2 SDK using the [preferred regions list](./tutorial-global-distribution-sql-api.md?tabs=dotnetv3%2capi-async#preferred-locations). During failovers, write operations are sent to the current write region and all reads are sent to the first region within your preferred regions list. For more information about regional failover mechanics, see the [availability troubleshooting guide](troubleshoot-sdk-availability.md). | +| | CPU | You may run into connectivity/availability issues due to lack of resources on your client machine. Monitor your CPU utilization on nodes running the Azure Cosmos DB client, and scale up/out if usage is high. | | | Hosting | Use [Windows 64-bit host](performance-tips.md#hosting) processing for best performance, whenever possible. | | | Connectivity Modes | Use [Direct mode](sql-sdk-connection-modes.md) for the best performance. For instructions on how to do this, see the [V3 SDK documentation](performance-tips-dotnet-sdk-v3-sql.md#networking) or the [V2 SDK documentation](performance-tips.md#networking).| | | Networking | If using a virtual machine to run your application, enable [Accelerated Networking](../../virtual-network/create-vm-accelerated-networking-powershell.md) on your VM to help with bottlenecks due to high traffic and reduce latency or CPU jitter. You might also want to consider using a higher end Virtual Machine where the max CPU usage is under 70%. | | | Ephemeral Port Exhaustion | For sparse or sporadic connections, we set the [`IdleConnectionTimeout`](/dotnet/api/microsoft.azure.cosmos.cosmosclientoptions.idletcpconnectiontimeout?view=azure-dotnet&preserve-view=true) and [`PortReuseMode`](/dotnet/api/microsoft.azure.cosmos.cosmosclientoptions.portreusemode?view=azure-dotnet&preserve-view=true) to `PrivatePortPool`. The `IdleConnectionTimeout` property helps which control the time unused connections are closed. This will reduce the number of unused connections. By default, idle connections are kept open indefinitely. The value set must be greater than or equal to 10 minutes. We recommended values between 20 minutes and 24 hours. The `PortReuseMode` property allows the SDK to use a small pool of ephemeral ports for various Azure Cosmos DB destination endpoints. | | | Use Async/Await | Avoid blocking calls: `Task.Result`, `Task.Wait`, and `Task.GetAwaiter().GetResult()`. The entire call stack is asynchronous in order to benefit from [async/await](/dotnet/csharp/programming-guide/concepts/async/) patterns. Many synchronous blocking calls lead to [Thread Pool starvation](/archive/blogs/vancem/diagnosing-net-core-threadpool-starvation-with-perfview-why-my-service-is-not-saturating-all-cores-or-seems-to-stall) and degraded response times. | | | End-to-End Timeouts | To get end-to-end timeouts, you'll need to use both `RequestTimeout` and `CancellationToken` parameters. For more details on timeouts with Cosmos DB [visit](troubleshoot-dot-net-sdk-request-timeout.md) | -| | Retry Logic | A transient error is an error that has an underlying cause that soon resolves itself. Applications that connect to your database should be built to expect these transient errors. To handle them, implement retry logic in your code instead of surfacing them to users as application errors. The SDK has built-in logic to handle these transient failures on retryable requests like read or query operations. The SDK will not retry on writes for transient failures as writes are not idempotent. The SDK does allow users to configure retry logic for throttles. For details on which errors to retry on [visit](troubleshoot-dot-net-sdk.md#retry-logics) | +| | Retry Logic | A transient error is an error that has an underlying cause that soon resolves itself. Applications that connect to your database should be built to expect these transient errors. To handle them, implement retry logic in your code instead of surfacing them to users as application errors. The SDK has built-in logic to handle these transient failures on retryable requests like read or query operations. The SDK won't retry on writes for transient failures as writes aren't idempotent. The SDK does allow users to configure retry logic for throttles. For details on which errors to retry on [visit](troubleshoot-dot-net-sdk.md#retry-logics) | | | Caching database/collection names | Retrieve the names of your databases and containers from configuration or cache them on start. Calls like `ReadDatabaseAsync` or `ReadDocumentCollectionAsync` and `CreateDatabaseQuery` or `CreateDocumentCollectionQuery` will result in metadata calls to the service, which consume from the system-reserved RU limit. `CreateIfNotExist` should also only be used once for setting up the database. Overall, these operations should be performed infrequently. | | | Bulk Support | In scenarios where you may not need to optimize for latency, we recommend enabling [Bulk support](https://devblogs.microsoft.com/cosmosdb/introducing-bulk-support-in-the-net-sdk/) for dumping large volumes of data. | -| | Parallel Queries | The Cosmos DB SDK supports [running queries in parallel](performance-tips-query-sdk.md?pivots=programming-language-csharp) for better latency and throughput on your queries. We recommend setting the `MaxConcurrency` property within the `QueryRequestsOptions` to the number of partitions you have. If you are not aware of the number of partitions, start by using `int.MaxValue` which will give you the best latency. Then decrease the number until it fits the resource restrictions of the environment to avoid high CPU issues. Also, set the `MaxBufferedItemCount` to the expected number of results returned to limit the number of pre-fetched results. | +| | Parallel Queries | The Cosmos DB SDK supports [running queries in parallel](performance-tips-query-sdk.md?pivots=programming-language-csharp) for better latency and throughput on your queries. We recommend setting the `MaxConcurrency` property within the `QueryRequestsOptions` to the number of partitions you have. If you aren't aware of the number of partitions, start by using `int.MaxValue`, which will give you the best latency. Then decrease the number until it fits the resource restrictions of the environment to avoid high CPU issues. Also, set the `MaxBufferedItemCount` to the expected number of results returned to limit the number of pre-fetched results. | | | Performance Testing Backoffs | When performing testing on your application, you should implement backoffs at [`RetryAfter`](performance-tips-dotnet-sdk-v3-sql.md#sdk-usage) intervals. Respecting the backoff helps ensure that you'll spend a minimal amount of time waiting between retries. | | | Indexing | The Azure Cosmos DB indexing policy also allows you to specify which document paths to include or exclude from indexing by using indexing paths (IndexingPolicy.IncludedPaths and IndexingPolicy.ExcludedPaths). Ensure that you exclude unused paths from indexing for faster writes. For a sample on how to create indexes using the SDK [visit](performance-tips-dotnet-sdk-v3-sql.md#indexing-policy) | | | Document Size | The request charge of a specified operation correlates directly to the size of the document. We recommend reducing the size of your documents as operations on large documents cost more than operations on smaller documents. | | | Increase the number of threads/tasks | Because calls to Azure Cosmos DB are made over the network, you might need to vary the degree of concurrency of your requests so that the client application spends minimal time waiting between requests. For example, if you're using the [.NET Task Parallel Library](/dotnet/standard/parallel-programming/task-parallel-library-tpl), create on the order of hundreds of tasks that read from or write to Azure Cosmos DB. | -| | Enabling Query Metrics | For additional logging of your backend query executions, you can enable SQL Query Metrics using our .NET SDK. For instructions on how to collect SQL Query Metrics [visit](profile-sql-api-query.md) | -| | SDK Logging | Use SDK logging to capture additional diagnostics information and troubleshoot latency issues. Log the [diagnostics string](/dotnet/api/microsoft.azure.documents.client.resourceresponsebase.requestdiagnosticsstring?view=azure-dotnet&preserve-view=true) in the V2 SDK or [`Diagnostics`](/dotnet/api/microsoft.azure.cosmos.responsemessage.diagnostics?view=azure-dotnet&preserve-view=true) in v3 SDK for more detailed cosmos diagnostic information for the current request to the service. As an example use case, capture Diagnostics on any exception and on completed operations if the `Diagnostics.ElapsedTime` is greater than a designated threshold value (i.e. if you have an SLA of 10 seconds, then capture diagnostics when `ElapsedTime` > 10 seconds ). It is advised to only use these diagnostics during performance testing. | -| | DefaultTraceListener | The DefaultTraceListener poses performance issues on production environments causing high CPU and I/O bottlenecks. Make sure you are using the latest SDK versions or [remove the DefaultTraceListener from your application](performance-tips-dotnet-sdk-v3-sql.md#logging-and-tracing) | +| | Enabling Query Metrics | For more logging of your backend query executions, you can enable SQL Query Metrics using our .NET SDK. For instructions on how to collect SQL Query Metrics [visit](profile-sql-api-query.md) | +| | SDK Logging | Use SDK logging to capture extra diagnostics information and troubleshoot latency issues. Log the [diagnostics string](/dotnet/api/microsoft.azure.documents.client.resourceresponsebase.requestdiagnosticsstring?view=azure-dotnet&preserve-view=true) in the V2 SDK or [`Diagnostics`](/dotnet/api/microsoft.azure.cosmos.responsemessage.diagnostics?view=azure-dotnet&preserve-view=true) in v3 SDK for more detailed cosmos diagnostic information for the current request to the service. As an example use case, capture Diagnostics on any exception and on completed operations if the `Diagnostics.ElapsedTime` is greater than a designated threshold value (that is, if you have an SLA of 10 seconds, then capture diagnostics when `ElapsedTime` > 10 seconds). It's advised to only use these diagnostics during performance testing. | +| | DefaultTraceListener | The DefaultTraceListener poses performance issues on production environments causing high CPU and I/O bottlenecks. Make sure you're using the latest SDK versions or [remove the DefaultTraceListener from your application](performance-tips-dotnet-sdk-v3-sql.md#logging-and-tracing) | ## Best practices when using Gateway mode Increase `System.Net MaxConnections` per host when you use Gateway mode. Azure Cosmos DB requests are made over HTTPS/REST when you use Gateway mode. They're subject to the default connection limit per hostname or IP address. You might need to set `MaxConnections` to a higher value (from 100 through 1,000) so that the client library can use multiple simultaneous connections to Azure Cosmos DB. In .NET SDK 1.8.0 and later, the default value for `ServicePointManager.DefaultConnectionLimit` is 50. To change the value, you can set `Documents.Client.ConnectionPolicy.MaxConnectionLimit` to a higher value. @@ -59,5 +59,5 @@ For a sample application that's used to evaluate Azure Cosmos DB for high-perfor To learn more about designing your application for scale and high performance, see [Partitioning and scaling in Azure Cosmos DB](../partitioning-overview.md). Trying to do capacity planning for a migration to Azure Cosmos DB? You can use information about your existing database cluster for capacity planning. -* If all you know is the number of vcores and servers in your existing database cluster, read about [estimating request units using vCores or vCPUs](../convert-vcore-to-request-unit.md) +* If all you know is the number of vCores and servers in your existing database cluster, read about [estimating request units using vCores or vCPUs](../convert-vcore-to-request-unit.md) * If you know typical request rates for your current database workload, read about [estimating request units using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md) diff --git a/articles/cosmos-db/sql/bicep-samples.md b/articles/cosmos-db/sql/bicep-samples.md index a30da7203d093..f0cc0142ce3b4 100644 --- a/articles/cosmos-db/sql/bicep-samples.md +++ b/articles/cosmos-db/sql/bicep-samples.md @@ -1,12 +1,13 @@ --- title: Bicep samples for Azure Cosmos DB Core (SQL API) description: Use Bicep to create and configure Azure Cosmos DB. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 09/13/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Bicep for Azure Cosmos DB diff --git a/articles/cosmos-db/sql/bulk-executor-dot-net.md b/articles/cosmos-db/sql/bulk-executor-dot-net.md index fd814dc327bec..1cba299e1c493 100644 --- a/articles/cosmos-db/sql/bulk-executor-dot-net.md +++ b/articles/cosmos-db/sql/bulk-executor-dot-net.md @@ -8,7 +8,7 @@ ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: how-to ms.date: 05/02/2020 -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-csharp --- diff --git a/articles/cosmos-db/sql/bulk-executor-java.md b/articles/cosmos-db/sql/bulk-executor-java.md index 5ee837aa8e2e7..db8eaff8679f4 100644 --- a/articles/cosmos-db/sql/bulk-executor-java.md +++ b/articles/cosmos-db/sql/bulk-executor-java.md @@ -8,7 +8,7 @@ ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: how-to ms.date: 03/07/2022 -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/certificate-based-authentication.md b/articles/cosmos-db/sql/certificate-based-authentication.md index 5f211c649d03a..30489042b4763 100644 --- a/articles/cosmos-db/sql/certificate-based-authentication.md +++ b/articles/cosmos-db/sql/certificate-based-authentication.md @@ -1,15 +1,14 @@ --- title: Certificate-based authentication with Azure Cosmos DB and Active Directory description: Learn how to configure an Azure AD identity for certificate-based authentication to access keys from Azure Cosmos DB. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 06/11/2019 -ms.author: jroth -ms.reviewer: sngun +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurepowershell, subject-rbac-steps - --- # Certificate-based authentication for an Azure AD identity to access keys from an Azure Cosmos DB account diff --git a/articles/cosmos-db/sql/change-feed-design-patterns.md b/articles/cosmos-db/sql/change-feed-design-patterns.md index b603343d9299e..b5e597c6bec50 100644 --- a/articles/cosmos-db/sql/change-feed-design-patterns.md +++ b/articles/cosmos-db/sql/change-feed-design-patterns.md @@ -1,13 +1,13 @@ --- title: Change feed design patterns in Azure Cosmos DB description: Overview of common change feed design patterns -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 03/24/2022 -ms.reviewer: wiassaf ms.custom: cosmos-db-video --- # Change feed design patterns in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/change-feed-functions.md b/articles/cosmos-db/sql/change-feed-functions.md index 151aef981053e..cd986fbae536f 100644 --- a/articles/cosmos-db/sql/change-feed-functions.md +++ b/articles/cosmos-db/sql/change-feed-functions.md @@ -1,13 +1,13 @@ --- title: How to use Azure Cosmos DB change feed with Azure Functions description: Use Azure Functions to connect to Azure Cosmos DB change feed. Later you can create reactive Azure functions that are triggered on every new event. -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 10/14/2021 -ms.reviewer: sngun --- # Serverless event-based architectures with Azure Cosmos DB and Azure Functions diff --git a/articles/cosmos-db/sql/change-feed-processor.md b/articles/cosmos-db/sql/change-feed-processor.md index 40b16a1f62161..5edb5b33624cd 100644 --- a/articles/cosmos-db/sql/change-feed-processor.md +++ b/articles/cosmos-db/sql/change-feed-processor.md @@ -1,14 +1,14 @@ --- title: Change feed processor in Azure Cosmos DB description: Learn how to use the Azure Cosmos DB change feed processor to read the change feed, the components of the change feed processor -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: conceptual ms.date: 04/05/2022 -ms.reviewer: sngun ms.custom: devx-track-csharp --- diff --git a/articles/cosmos-db/sql/change-feed-pull-model.md b/articles/cosmos-db/sql/change-feed-pull-model.md index 10576feb85341..d15f3557ab877 100644 --- a/articles/cosmos-db/sql/change-feed-pull-model.md +++ b/articles/cosmos-db/sql/change-feed-pull-model.md @@ -1,14 +1,14 @@ --- title: Change feed pull model description: Learn how to use the Azure Cosmos DB change feed pull model to read the change feed and the differences between the pull model and Change Feed Processor -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: conceptual ms.date: 04/07/2022 -ms.reviewer: sngun --- # Change feed pull model in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/changefeed-ecommerce-solution.md b/articles/cosmos-db/sql/changefeed-ecommerce-solution.md index 6c2727241752a..7da69d543c38c 100644 --- a/articles/cosmos-db/sql/changefeed-ecommerce-solution.md +++ b/articles/cosmos-db/sql/changefeed-ecommerce-solution.md @@ -1,13 +1,13 @@ --- title: Use Azure Cosmos DB change feed to visualize real-time data analytics description: This article describes how change feed can be used by a retail company to understand user patterns, perform real-time data analysis and visualization -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: how-to -ms.reviewer: wiassaf ms.date: 03/24/2022 ms.custom: devx-track-java, cosmos-db-video --- diff --git a/articles/cosmos-db/sql/cli-samples.md b/articles/cosmos-db/sql/cli-samples.md index 73c765bbc284f..370fe89b50e2e 100644 --- a/articles/cosmos-db/sql/cli-samples.md +++ b/articles/cosmos-db/sql/cli-samples.md @@ -1,12 +1,13 @@ --- title: Azure CLI Samples for Azure Cosmos DB | Microsoft Docs description: This article lists several Azure CLI code samples available for interacting with Azure Cosmos DB. View API-specific CLI samples. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 02/21/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli, seo-azure-cli keywords: cosmos db, azure cli samples, azure cli code samples, azure cli script samples --- diff --git a/articles/cosmos-db/sql/create-notebook-visualize-data.md b/articles/cosmos-db/sql/create-notebook-visualize-data.md index 0c01c8badc73e..d086217d7d920 100644 --- a/articles/cosmos-db/sql/create-notebook-visualize-data.md +++ b/articles/cosmos-db/sql/create-notebook-visualize-data.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.date: 11/05/2019 ms.author: dech -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Tutorial: Create a notebook in Azure Cosmos DB to analyze and visualize the data diff --git a/articles/cosmos-db/sql/create-sql-api-dotnet-v4.md b/articles/cosmos-db/sql/create-sql-api-dotnet-v4.md index 9720891a8ebc9..b2958bb853b30 100644 --- a/articles/cosmos-db/sql/create-sql-api-dotnet-v4.md +++ b/articles/cosmos-db/sql/create-sql-api-dotnet-v4.md @@ -1,8 +1,9 @@ --- title: Manage Azure Cosmos DB SQL API resources using .NET V4 SDK description: Use this quickstart to build a console app by using the .NET V4 SDK to manage Azure Cosmos DB SQL API account resources. -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp diff --git a/articles/cosmos-db/sql/create-sql-api-dotnet.md b/articles/cosmos-db/sql/create-sql-api-dotnet.md index 0e802a7e40253..b13230ae2ff2b 100644 --- a/articles/cosmos-db/sql/create-sql-api-dotnet.md +++ b/articles/cosmos-db/sql/create-sql-api-dotnet.md @@ -1,8 +1,9 @@ --- title: Quickstart - Build a .NET console app to manage Azure Cosmos DB SQL API resources description: Learn how to build a .NET console app to manage Azure Cosmos DB SQL API account resources in this quickstart. -author: rothja -ms.author: jroth +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp diff --git a/articles/cosmos-db/sql/create-sql-api-java-changefeed.md b/articles/cosmos-db/sql/create-sql-api-java-changefeed.md index 4b1a7431a47f4..5820a7e7e3b1a 100644 --- a/articles/cosmos-db/sql/create-sql-api-java-changefeed.md +++ b/articles/cosmos-db/sql/create-sql-api-java-changefeed.md @@ -1,13 +1,14 @@ --- title: Create an end-to-end Azure Cosmos DB Java SDK v4 application sample by using Change Feed description: This guide walks you through a simple Java SQL API application which inserts documents into an Azure Cosmos DB container, while maintaining a materialized view of the container using Change Feed. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: how-to ms.date: 06/11/2020 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/create-sql-api-java.md b/articles/cosmos-db/sql/create-sql-api-java.md index 695410b920c30..d46094bc0f55e 100644 --- a/articles/cosmos-db/sql/create-sql-api-java.md +++ b/articles/cosmos-db/sql/create-sql-api-java.md @@ -1,13 +1,14 @@ --- title: Quickstart - Use Java to create a document database using Azure Cosmos DB description: This quickstart presents a Java code sample you can use to connect to and query the Azure Cosmos DB SQL API -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: quickstart ms.date: 08/26/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: seo-java-august2019, seo-java-september2019, devx-track-java, mode-api --- diff --git a/articles/cosmos-db/sql/create-sql-api-nodejs.md b/articles/cosmos-db/sql/create-sql-api-nodejs.md index 65e148530021b..9cff6d80c9f53 100644 --- a/articles/cosmos-db/sql/create-sql-api-nodejs.md +++ b/articles/cosmos-db/sql/create-sql-api-nodejs.md @@ -1,13 +1,14 @@ --- title: Quickstart- Use Node.js to query from Azure Cosmos DB SQL API account description: How to use Node.js to create an app that connects to Azure Cosmos DB SQL API account and queries data. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: javascript ms.topic: quickstart ms.date: 08/26/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-js, mode-api --- diff --git a/articles/cosmos-db/sql/create-sql-api-spark.md b/articles/cosmos-db/sql/create-sql-api-spark.md index 5ea446b2bbd28..818a37fba6f8f 100644 --- a/articles/cosmos-db/sql/create-sql-api-spark.md +++ b/articles/cosmos-db/sql/create-sql-api-spark.md @@ -1,13 +1,14 @@ --- title: Quickstart - Manage data with Azure Cosmos DB Spark 3 OLTP Connector for SQL API description: This quickstart presents a code sample for the Azure Cosmos DB Spark 3 OLTP Connector for SQL API that you can use to connect to and query data in your Azure Cosmos DB account -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: quickstart ms.date: 03/01/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: seo-java-august2019, seo-java-september2019, devx-track-java, mode-api --- diff --git a/articles/cosmos-db/sql/create-sql-api-spring-data.md b/articles/cosmos-db/sql/create-sql-api-spring-data.md index 0e35afaa37dac..98fc60554b536 100644 --- a/articles/cosmos-db/sql/create-sql-api-spring-data.md +++ b/articles/cosmos-db/sql/create-sql-api-spring-data.md @@ -1,13 +1,14 @@ --- title: Quickstart - Use Spring Data Azure Cosmos DB v3 to create a document database using Azure Cosmos DB description: This quickstart presents a Spring Data Azure Cosmos DB v3 code sample you can use to connect to and query the Azure Cosmos DB SQL API -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: quickstart ms.date: 08/26/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: seo-java-august2019, seo-java-september2019, devx-track-java, mode-api --- diff --git a/articles/cosmos-db/sql/create-support-request-quota-increase.md b/articles/cosmos-db/sql/create-support-request-quota-increase.md index 4f213878bbd70..d1934a2e98b63 100644 --- a/articles/cosmos-db/sql/create-support-request-quota-increase.md +++ b/articles/cosmos-db/sql/create-support-request-quota-increase.md @@ -3,7 +3,7 @@ title: How to request quota increase for Azure Cosmos DB resources description: Learn how to request a quota increase for Azure Cosmos DB resources. You will also learn how to enable a subscription to access a region. author: kanshiG ms.author: govindk -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: how-to ms.date: 04/27/2022 diff --git a/articles/cosmos-db/sql/create-website.md b/articles/cosmos-db/sql/create-website.md index d9aa6f8a864f5..8f3d13293cde9 100644 --- a/articles/cosmos-db/sql/create-website.md +++ b/articles/cosmos-db/sql/create-website.md @@ -1,12 +1,13 @@ --- title: Deploy a web app with a template - Azure Cosmos DB description: Learn how to deploy an Azure Cosmos account, Azure App Service Web Apps, and a sample web application using an Azure Resource Manager template. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 06/19/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Deploy Azure Cosmos DB and Azure App Service with a web app from GitHub using an Azure Resource Manager Template diff --git a/articles/cosmos-db/sql/database-transactions-optimistic-concurrency.md b/articles/cosmos-db/sql/database-transactions-optimistic-concurrency.md index 228a6d834821c..6afdc98565f34 100644 --- a/articles/cosmos-db/sql/database-transactions-optimistic-concurrency.md +++ b/articles/cosmos-db/sql/database-transactions-optimistic-concurrency.md @@ -1,13 +1,13 @@ --- title: Database transactions and optimistic concurrency control in Azure Cosmos DB description: This article describes database transactions and optimistic concurrency control in Azure Cosmos DB -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/04/2019 -ms.reviewer: sngun --- # Transactions and optimistic concurrency control diff --git a/articles/cosmos-db/sql/distribute-throughput-across-partitions-faq.yml b/articles/cosmos-db/sql/distribute-throughput-across-partitions-faq.yml index b090db0349b47..9e509c6bfd1c9 100644 --- a/articles/cosmos-db/sql/distribute-throughput-across-partitions-faq.yml +++ b/articles/cosmos-db/sql/distribute-throughput-across-partitions-faq.yml @@ -21,11 +21,11 @@ sections: - question: | What resources can I use this feature on? answer: | - The feature is only supported for SQL and API for MongoDB accounts and on collections with dedicated throughput (either manual or autoscale). Shared throughput databases aren't supported in the preview. + The feature is only supported for SQL and API for MongoDB accounts and on collections with dedicated throughput (either manual or autoscale). Shared throughput databases aren't supported in the preview. The feature doesn't apply to serverless accounts. - question: | - Which version of the Azure Cosmos DB functionality in Azure PowerShell and Azure CLI supports this feature? + Which version of the Azure Cosmos DB functionality in Azure PowerShell supports this feature? answer: | - The ability to redistribute RU/s across physical partitions is only supported in the latest preview version of Azure PowerShell and Azure CLI. + The ability to redistribute RU/s across physical partitions is only supported in the latest preview version of Azure PowerShell. - question: | What is the maximum number of physical partitions I can change in one request? answer: | @@ -47,7 +47,7 @@ sections: |P1: 4000 RU/s | P1: 1000 RU/s | 2/3 | |P2: 1000 RU/s | P2: 500 RU/s | 1/6 | - - If you increase your RU/s without triggering a split - that is, you scale to a total RU/s <= current partition count * 10,000 RU/s - each physical partition will have RU/s = `MIN(current throughput fraction * new RU/s, 10,000 RU/s)`. Consider an example where the resulting sum of all RU/s across all partitions is less than the total new RU/s of the resource. It's recommended to reset your RU/s to an even distribution and redistribute to ensure that all available RU/s are allocated to a partition. To check if this scenario applies to your resource use Azure Monitor metrics. Compare the value of the **ProvisionedThroughput** (when using manual throughput) or **AutoscaleMaxThroughput** (when using autoscale) metric to the value of the **PhysicalPartitionThroughput** metric. If the value of **PhysicalPartitionThroughput** is less than the respective **ProvisionedThroughput** or **AutoscaleMaxThroughput**, then reset your RU/s to an even distribution before redistributing, or lower your resource's throughput to the value of **PhysicalPartitionThroughput**. + - If you increase your RU/s, each physical partition will have RU/s = `MIN(current throughput fraction * new RU/s, 10,000 RU/s)`. The RU/s on a physical partition can never exceed 10,000 RU/s. For example, suppose you have a collection with 6000 RU/s and 3 physical partitions. You scale it up to 12,000 RU/s: @@ -56,5 +56,17 @@ sections: |P0: 1000 RU/s | P0: 2000 RU/s | 1/6 | |P1: 4000 RU/s | P1: 8000 RU/s | 2/3 | |P2: 1000 RU/s | P2: 2000 RU/s | 1/6 | + - question: | + Why am I seeing a discrepancy between the overall RU/s on my container and the sum of the RU/s across all physical partitions? + answer: | + - This discrepancy can happen when you scale up your overall RU/s for any single partition, `(current RU/s per partition * new container RU/s)/(old container RU/s)` is greater than 10,000 RU/s. This discrepancy occurs when you trigger a partition split by increasing RU/s beyond `currentNumberOfPartitions * 10,000 RU/s` or increase RU/s without triggering a partition split. + - It's recommended to redistribute your throughput equally after the scale-up. Otherwise, it's possible that you won't be able to use all the RU/s you've provisioned (and are being billed for). + - To check if this scenario applies to your resource use Azure Monitor metrics. Compare the value of the **ProvisionedThroughput** (when using manual throughput) or **AutoscaleMaxThroughput** (when using autoscale) metric to the value of the **PhysicalPartitionThroughput** metric. If the value of **PhysicalPartitionThroughput** is less than the respective **ProvisionedThroughput** or **AutoscaleMaxThroughput**, then reset your RU/s to an even distribution before redistributing, or lower your resource's throughput to the value of **PhysicalPartitionThroughput**. - - If you increase your RU/s [beyond what the current partition layout can serve](../scaling-provisioned-throughput-best-practices.md), you trigger a split. By design, all physical partitions will default to having the same number of RU/s. After partitions split, the logical partitions that contributed to a hot partition may be on a different physical partition. If necessary, you can redistribute your RU/s on the new layout. + For example, suppose you have a collection with 6000 RU/s and 3 physical partitions. You scale it up to 24,000 RU/s. After the scale-up, the total throughput across all partitions is only 18,000 RU/s. This distribution means that while we're being billed for 24,000 RU/s, we're only able to get 18,000 RU/s of effective throughput. Each partition will get 8000 RU/s, as RU/s are redistributed equally, and we can redistribute RU/s again as needed. We could also choose to lower our overall RU/s to 18,000 RU/s. + + |Before scale-up (6000 RU/s) |After scale up to 24,000 RU/s (effective RU/s = 18,000 RU/s) |Fraction of total RU/s | + |---------|---------|---------| + |P0: 1000 RU/s | P0: 4000 RU/s | 1/6 | + |P1: 4000 RU/s | P1: 10000 RU/s (partition can't exceed 10,000 RU/s) | 2/3 | + |P2: 1000 RU/s | P2: 4000 RU/s | 1/6 | diff --git a/articles/cosmos-db/sql/distribute-throughput-across-partitions.md b/articles/cosmos-db/sql/distribute-throughput-across-partitions.md index 49ce5f65a9e56..f35ca67655f72 100644 --- a/articles/cosmos-db/sql/distribute-throughput-across-partitions.md +++ b/articles/cosmos-db/sql/distribute-throughput-across-partitions.md @@ -15,7 +15,7 @@ ms.date: 05/09/2022 By default, Azure Cosmos DB distributes the provisioned throughput of a database or container equally across all physical partitions. However, scenarios may arise where due to a skew in the workload or choice of partition key, certain logical (and thus physical) partitions need more throughput than others. For these scenarios, Azure Cosmos DB gives you the ability to redistribute your provisioned throughput across physical partitions. Redistributing throughput across partitions helps you achieve better performance without having to configure your overall throughput based on the hottest partition. -The throughput redistributing feature applies to databases and containers using provisioned throughput (manual and autoscale) and doesn't apply to serverless containers. You can change the throughput per physical partition using the Azure Cosmos DB PowerShell or CLI commands. +The throughput redistributing feature applies to databases and containers using provisioned throughput (manual and autoscale) and doesn't apply to serverless containers. You can change the throughput per physical partition using the Azure Cosmos DB PowerShell commands. ## When to use this feature @@ -28,7 +28,10 @@ If you aren't seeing 429 responses and your end to end latency is acceptable, th ## Getting started -To get started using distributed throughput across partitions, enroll in the preview by filing a support ticket in the [Azure portal](https://portal.azure.com). +To get started using distributed throughput across partitions, enroll in the preview by submitting a request for the **Azure Cosmos DB Throughput Redistribution Across Partitions** feature via the [**Preview Features** page](../../azure-resource-manager/management/preview-features.md) in your Azure Subscription overview page. +- Before submitting your request, verify that your Azure Cosmos DB account(s) meet all the [preview eligibility criteria](#preview-eligibility-criteria). +- The Azure Cosmos DB team will review your request and contact you via email to confirm which account(s) in the subscription you want to enroll in the preview. + ## Example scenario @@ -84,9 +87,42 @@ Alternatively, if you haven't changed your throughput per partition before, you Follow the guidance in the article [Best practices for scaling provisioned throughput (RU/s)](../scaling-provisioned-throughput-best-practices.md#step-1-find-the-current-number-of-physical-partitions) to determine the number of physical partitions. +You can also use the PowerShell `Get-AzCosmosDBSqlContainerPerPartitionThroughput` and `Get-AzCosmosDBMongoDBCollectionPerPartitionThroughput` commands to read the current RU/s on each physical partition. + +```powershell +// SQL API +$somePartitions = Get-AzCosmosDBSqlContainerPerPartitionThroughput ` + -ResourceGroupName "" ` + -AccountName "" ` + -DatabaseName "" ` + -Name "" ` + -PhysicalPartitionIds ("", ") + +$allPartitions = Get-AzCosmosDBSqlContainerPerPartitionThroughput ` + -ResourceGroupName "" ` + -AccountName "" ` + -DatabaseName "" ` + -Name "" ` + -AllPartitions + +// API for MongoDB +$somePartitions = Get-AzCosmosDBMongoDBCollectionPerPartitionThroughput ` + -ResourceGroupName "" ` + -AccountName "" ` + -DatabaseName "" ` + -Name "" ` + -PhysicalPartitionIds ("", ", ...) + +$allPartitions = Get-AzCosmosDBMongoDBCollectionPerPartitionThroughput ` + -ResourceGroupName "" ` + -AccountName "" ` + -DatabaseName "" ` + -Name "" ` + -AllPartitions +``` ### Determine RU/s for target partition -Next, let's decide how many RU/s we want to give to our hottest physical partition(s). Let's call this set our target partition(s). The most RU/s any physical partition can have been 10,000 RU/s. +Next, let's decide how many RU/s we want to give to our hottest physical partition(s). Let's call this set our target partition(s). The most RU/s any physical partition can contain is 10,000 RU/s. The right approach depends on your workload requirements. General approaches include: - Increasing the RU/s by a percentage, measure the rate of 429 responses, and repeat until desired throughput is achieved. @@ -97,9 +133,9 @@ The right approach depends on your workload requirements. General approaches inc ### Determine RU/s for source partition -Finally, let's decide how many RU/s we want to keep on our other physical partitions. This selection will determine the partitions that the target physical partition "takes" from. +Finally, let's decide how many RU/s we want to keep on our other physical partitions. This selection will determine the partitions that the target physical partition takes throughput from. -In the PowerShell and Azure CLI APIs, we must specify at least one source partition to redistribute RU/s from. Optionally, we can specify a custom minimum throughput each physical partition should have after the redistribution. If not specified, by default, Azure Cosmos DB will ensure that each physical partition has at least 100 RU/s after the redistribution. +In the PowerShell APIs, we must specify at least one source partition to redistribute RU/s from. We can also specify a custom minimum throughput each physical partition should have after the redistribution. If not specified, by default, Azure Cosmos DB will ensure that each physical partition has at least 100 RU/s after the redistribution. It's recommended to explicitly specify the minimum throughput. The right approach depends on your workload requirements. General approaches include: - Taking RU/s equally from all source partitions (works best when there are <= 10 partitions) @@ -112,7 +148,7 @@ The right approach depends on your workload requirements. General approaches inc ## Step 3: Programatically change the throughput across partitions -You can use the command `AzCosmosDBSqlContainerPerPartitionThroughput` to redistribute throughput. +You can use the PowerShell command `Update-AzCosmosDBSqlContainerPerPartitionThroughput` to redistribute throughput. To understand the below example, let's take an example where we have a container that has 6000 RU/s total (either 6000 manual RU/s or autoscale 6000 RU/s) and 3 physical partitions. Based on our analysis, we want a layout where: @@ -124,11 +160,11 @@ We specify partitions 0 and 2 as our source partitions, and specify that after t ```powershell $SourcePhysicalPartitionObjects = @() -$SourcePhysicalPartitionObjects += New-AzCosmosDBSqlPhysicalPartitionThroughputObject -Id 0 -Throughput 1000 -$SourcePhysicalPartitionObjects += New-AzCosmosDBSqlPhysicalPartitionThroughputObject -Id 2 -Throughput 1000 +$SourcePhysicalPartitionObjects += New-AzCosmosDBPhysicalPartitionThroughputObject -Id "0" -Throughput 1000 +$SourcePhysicalPartitionObjects += New-AzCosmosDBPhysicalPartitionThroughputObject -Id "2" -Throughput 1000 $TargetPhysicalPartitionObjects = @() -$TargetPhysicalPartitionObjects += New-AzCosmosDBSqlPhysicalPartitionThroughputObject -Id 1 -Throughput 4000 +$TargetPhysicalPartitionObjects += New-AzCosmosDBPhysicalPartitionThroughputObject -Id "1" -Throughput 4000 // SQL API Update-AzCosmosDBSqlContainerPerPartitionThroughput ` @@ -140,7 +176,7 @@ Update-AzCosmosDBSqlContainerPerPartitionThroughput ` -TargetPhysicalPartitionThroughputObject $TargetPhysicalPartitionObjects // API for MongoDB -Update-AzCosmosDBMongoCollectionPerPartitionThroughput ` +Update-AzCosmosDBMongoDBCollectionPerPartitionThroughput ` -ResourceGroupName "" ` -AccountName "" ` -DatabaseName "" ` @@ -151,6 +187,26 @@ Update-AzCosmosDBMongoCollectionPerPartitionThroughput ` After you've completed the redistribution, you can verify the change by viewing the **PhysicalPartitionThroughput** metric in Azure Monitor. Split by the dimension **PhysicalPartitionId** to see how many RU/s you have per physical partition. +If necessary, you can also reset the RU/s per physical partition so that the RU/s of your container are evenly distributed across all physical partitions. + +```powershell +// SQL API +$resetPartitions = Update-AzCosmosDBSqlContainerPerPartitionThroughput ` + -ResourceGroupName "" ` + -AccountName "" ` + -DatabaseName "" ` + -Name "" ` + -EqualDistributionPolicy + +// API for MongoDB +$resetPartitions = Update-AzCosmosDBMongoDBCollectionPerPartitionThroughput ` + -ResourceGroupName "" ` + -AccountName "" ` + -DatabaseName "" ` + -Name "" ` + -EqualDistributionPolicy +``` + ## Step 4: Verify and monitor your RU/s consumption After you've completed the redistribution, you can verify the change by viewing the **PhysicalPartitionThroughput** metric in Azure Monitor. Split by the dimension **PhysicalPartitionId** to see how many RU/s you have per physical partition. @@ -161,11 +217,24 @@ After the changes, assuming your overall workload hasn't changed, you'll likely ## Limitations -### SDK requirements +### Preview eligibility criteria +To enroll in the preview, your Cosmos account must meet all the following criteria: + - Your Cosmos account is using SQL API or API for MongoDB. + - If you're using API for MongoDB, the version must be >= 3.6. + - Your Cosmos account is using provisioned throughput (manual or autoscale). Distribution of throughput across partitions doesn't apply to serverless accounts. + - If you're using SQL API, your application must use the Azure Cosmos DB .NET V3 SDK, version 3.27.0 or higher. When the ability to redistribute throughput across partitions is enabled on your account, all requests sent from non .NET SDKs or older .NET SDK versions won't be accepted. + - Your Cosmos account isn't using any unsupported connectors: + - Azure Data Factory + - Azure Stream Analytics + - Logic Apps + - Azure Functions + - Azure Search + +### SDK requirements (SQL API only) -Throughput redistribution across partitions is supported only in the latest preview version of the .NET v3 SDK. When the feature is enabled on your account, you must only use the supported SDK. Requests sent from other SDKs or earlier versions won't be accepted. There are no driver or SDK requirements to use this feature for non SQL API accounts. +Throughput redistribution across partitions is supported only with the latest version of the .NET v3 SDK. When the feature is enabled on your account, you must only use the supported SDK. Requests sent from other SDKs or earlier versions won't be accepted. There are no driver or SDK requirements to use this feature for API for MongoDB accounts. -Find the latest preview version the supported SDK: +Find the latest preview version of the supported SDK: | SDK | Supported versions | Package manager link | | --- | --- | --- | @@ -195,4 +264,4 @@ Learn about how to use provisioned throughput with the following articles: * Learn more about [provisioned throughput.](../set-throughput.md) * Learn more about [request units.](../request-units.md) * Need to monitor for hot partitions? See [monitoring request units.](../monitor-normalized-request-units.md#how-to-monitor-for-hot-partitions) -* Want to learn the best practices? See [best practices for scaling provisioned throughput.](../scaling-provisioned-throughput-best-practices.md) +* Want to learn the best practices? See [best practices for scaling provisioned throughput.](../scaling-provisioned-throughput-best-practices.md) \ No newline at end of file diff --git a/articles/cosmos-db/sql/find-request-unit-charge.md b/articles/cosmos-db/sql/find-request-unit-charge.md index 46ada28eb9dca..b3c666457ad97 100644 --- a/articles/cosmos-db/sql/find-request-unit-charge.md +++ b/articles/cosmos-db/sql/find-request-unit-charge.md @@ -1,25 +1,28 @@ --- -title: Find request unit (RU) charge for a SQL query in Azure Cosmos DB -description: Learn how to find the request unit (RU) charge for SQL queries executed against an Azure Cosmos container. You can use the Azure portal, .NET, Java, Python, and Node.js languages to find the RU charge. -author: ThomasWeiss +title: Find request unit charge for a SQL query in Azure Cosmos DB +description: Find the request unit charge for SQL queries against containers created with Azure Cosmos DB, using the Azure portal, .NET, Java, Python, or Node.js. +author: jcocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to -ms.date: 10/14/2020 -ms.author: thweiss +ms.date: 06/02/2022 +ms.author: jucocchi ms.devlang: csharp, java, javascript, python -ms.custom: devx-track-js +ms.custom: +- devx-track-js +- kr2b-contr-experiment --- -# Find the request unit charge for operations executed in Azure Cosmos DB SQL API + +# Find the request unit charge for operations in Azure Cosmos DB SQL API [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] Azure Cosmos DB supports many APIs, such as SQL, MongoDB, Cassandra, Gremlin, and Table. Each API has its own set of database operations. These operations range from simple point reads and writes to complex queries. Each database operation consumes system resources based on the complexity of the operation. -The cost of all database operations is normalized by Azure Cosmos DB and is expressed by Request Units (or RUs, for short). Request charge is the request units consumed by all your database operations. You can think of RUs as a performance currency abstracting the system resources such as CPU, IOPS, and memory that are required to perform the database operations supported by Azure Cosmos DB. No matter which API you use to interact with your Azure Cosmos container, costs are always measured by RUs. Whether the database operation is a write, point read, or query, costs are always measured in RUs. To learn more, see the [request units and its considerations](../request-units.md) article. +The cost of all database operations is normalized by Azure Cosmos DB and is expressed by *request units* (RU). *Request charge* is the request units consumed by all your database operations. You can think of RUs as a performance currency abstracting the system resources such as CPU, IOPS, and memory that are required to perform the database operations supported by Azure Cosmos DB. No matter which API you use to interact with your container, costs are always measured in RUs. Whether the database operation is a write, point read, or query, costs are always measured in RUs. To learn more, see [Request Units in Azure Cosmos DB](../request-units.md). -This article presents the different ways you can find the [request unit](../request-units.md) (RU) consumption for any operation executed against a container in Azure Cosmos DB SQL API. If you are using a different API, see [API for MongoDB](../mongodb/find-request-unit-charge-mongodb.md), [Cassandra API](../cassandra/find-request-unit-charge-cassandra.md), [Gremlin API](../graph/find-request-unit-charge-gremlin.md), and [Table API](../table/find-request-unit-charge.md) articles to find the RU/s charge. +This article presents the different ways that you can find the request unit consumption for any operation run against a container in Azure Cosmos DB SQL API. If you're using a different API, see [API for MongoDB](../mongodb/find-request-unit-charge-mongodb.md), [Cassandra API](../cassandra/find-request-unit-charge-cassandra.md), [Gremlin API](../graph/find-request-unit-charge-gremlin.md), and [Table API](../table/find-request-unit-charge.md). -Currently, you can measure this consumption only by using the Azure portal or by inspecting the response sent back from Azure Cosmos DB through one of the SDKs. If you're using the SQL API, you have multiple options for finding the RU consumption for an operation against an Azure Cosmos container. +Currently, you can measure consumption only by using the Azure portal or by inspecting the response sent from Azure Cosmos DB through one of the SDKs. If you're using the SQL API, you have multiple options for finding the request charge for an operation. ## Use the Azure portal @@ -35,7 +38,7 @@ Currently, you can measure this consumption only by using the Azure portal or by 1. Select **Query Stats** to display the actual request charge for the request you executed. -:::image type="content" source="../media/find-request-unit-charge/portal-sql-query.png" alt-text="Screenshot of a SQL query request charge in the Azure portal"::: + :::image type="content" source="../media/find-request-unit-charge/portal-sql-query.png" alt-text="Screenshot of a SQL query request charge in the Azure portal."::: ## Use the .NET SDK @@ -169,10 +172,10 @@ For more information, see [Quickstart: Build a Python app by using an Azure Cosm To learn about optimizing your RU consumption, see these articles: -* [Request units and throughput in Azure Cosmos DB](../request-units.md) +* [Request Units in Azure Cosmos DB](../request-units.md) * [Optimize provisioned throughput cost in Azure Cosmos DB](../optimize-cost-throughput.md) * [Optimize query cost in Azure Cosmos DB](../optimize-cost-reads-writes.md) * [Globally scale provisioned throughput](../request-units.md) -* [Provision throughput on containers and databases](../set-throughput.md) +* [Introduction to provisioned throughput in Azure Cosmos DB](../set-throughput.md) * [Provision throughput for a container](how-to-provision-container-throughput.md) -* [Monitor and debug with metrics in Azure Cosmos DB](../use-metrics.md) +* [Monitor and debug with insights in Azure Cosmos DB](../use-metrics.md) diff --git a/articles/cosmos-db/sql/how-to-convert-session-token.md b/articles/cosmos-db/sql/how-to-convert-session-token.md index 2ddc1c6d005bb..770461421299a 100644 --- a/articles/cosmos-db/sql/how-to-convert-session-token.md +++ b/articles/cosmos-db/sql/how-to-convert-session-token.md @@ -19,7 +19,7 @@ This article explains how to convert between different session token formats to > [!NOTE] > By default, the SDK keeps track of the session token automatically and it will use the most recent session token. For more information, please visit [Utilize session tokens](how-to-manage-consistency.md#utilize-session-tokens). The instructions in this article only apply with the following conditions: > * Your Azure Cosmos DB account uses Session consistency. -> * You are managing the session tokens are manually. +> * You are managing the session tokens manually. > * You are using multiple versions of the SDK at the same time. ## Session token formats @@ -112,4 +112,4 @@ Read the following articles: * [Use session tokens to manage consistency in Azure Cosmos DB](how-to-manage-consistency.md#utilize-session-tokens) * [Choose the right consistency level in Azure Cosmos DB](../consistency-levels.md) * [Consistency, availability, and performance tradeoffs in Azure Cosmos DB](../consistency-levels.md) -* [Availability and performance tradeoffs for various consistency levels](../consistency-levels.md) \ No newline at end of file +* [Availability and performance tradeoffs for various consistency levels](../consistency-levels.md) diff --git a/articles/cosmos-db/sql/how-to-create-container.md b/articles/cosmos-db/sql/how-to-create-container.md index dd920fe72b5fb..e64e6fbf0f7c1 100644 --- a/articles/cosmos-db/sql/how-to-create-container.md +++ b/articles/cosmos-db/sql/how-to-create-container.md @@ -1,12 +1,13 @@ --- title: Create a container in Azure Cosmos DB SQL API description: Learn how to create a container in Azure Cosmos DB SQL API by using Azure portal, .NET, Java, Python, Node.js, and other SDKs. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 01/03/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.devlang: csharp ms.custom: devx-track-csharp, devx-track-azurecli --- diff --git a/articles/cosmos-db/sql/how-to-manage-conflicts.md b/articles/cosmos-db/sql/how-to-manage-conflicts.md index f6d81a936f3e5..a234862fc9539 100644 --- a/articles/cosmos-db/sql/how-to-manage-conflicts.md +++ b/articles/cosmos-db/sql/how-to-manage-conflicts.md @@ -1,12 +1,13 @@ --- title: Manage conflicts between regions in Azure Cosmos DB description: Learn how to manage conflicts in Azure Cosmos DB by creating the last-writer-wins or a custom conflict resolution policy -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 06/11/2020 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.devlang: csharp, java, javascript ms.custom: devx-track-js, devx-track-csharp --- diff --git a/articles/cosmos-db/sql/how-to-manage-consistency.md b/articles/cosmos-db/sql/how-to-manage-consistency.md index 4cc451f1023f1..ffa07f5a8d8b2 100644 --- a/articles/cosmos-db/sql/how-to-manage-consistency.md +++ b/articles/cosmos-db/sql/how-to-manage-consistency.md @@ -1,12 +1,13 @@ --- title: Manage consistency in Azure Cosmos DB description: Learn how to configure and manage consistency levels in Azure Cosmos DB using Azure portal, .NET SDK, Java SDK and various other SDKs -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 02/16/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.devlang: csharp, java, javascript ms.custom: devx-track-js, devx-track-csharp, devx-track-azurecli, devx-track-azurepowershell --- @@ -284,7 +285,7 @@ item = client.ReadItem(doc_link, options) ## Monitor Probabilistically Bounded Staleness (PBS) metric -How eventual is eventual consistency? For the average case, can we offer staleness bounds with respect to version history and time. The [**Probabilistically Bounded Staleness (PBS)**](https://pbs.cs.berkeley.edu/) metric tries to quantify the probability of staleness and shows it as a metric. To view the PBS metric, go to your Azure Cosmos account in the Azure portal. Open the **Metrics** pane, and select the **Consistency** tab. Look at the graph named **Probability of strongly consistent reads based on your workload (see PBS)**. +How eventual is eventual consistency? For the average case, can we offer staleness bounds with respect to version history and time. The [**Probabilistically Bounded Staleness (PBS)**](http://pbs.cs.berkeley.edu/) metric tries to quantify the probability of staleness and shows it as a metric. To view the PBS metric, go to your Azure Cosmos account in the Azure portal. Open the **Metrics** pane, and select the **Consistency** tab. Look at the graph named **Probability of strongly consistent reads based on your workload (see PBS)**. :::image type="content" source="./media/how-to-manage-consistency/pbs-metric.png" alt-text="PBS graph in the Azure portal"::: diff --git a/articles/cosmos-db/sql/how-to-manage-indexing-policy.md b/articles/cosmos-db/sql/how-to-manage-indexing-policy.md index e3eedea375957..a59d9508032e1 100644 --- a/articles/cosmos-db/sql/how-to-manage-indexing-policy.md +++ b/articles/cosmos-db/sql/how-to-manage-indexing-policy.md @@ -1,12 +1,13 @@ --- title: Manage indexing policies in Azure Cosmos DB description: Learn how to manage indexing policies, include or exclude a property from indexing, how to define indexing using different Azure Cosmos DB SDKs -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 05/25/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: devx-track-python, devx-track-js, devx-track-csharp, devx-track-azurecli --- diff --git a/articles/cosmos-db/sql/how-to-multi-master.md b/articles/cosmos-db/sql/how-to-multi-master.md index b05fc586200ec..12041197d3236 100644 --- a/articles/cosmos-db/sql/how-to-multi-master.md +++ b/articles/cosmos-db/sql/how-to-multi-master.md @@ -1,12 +1,13 @@ --- title: How to configure multi-region writes in Azure Cosmos DB description: Learn how to configure multi-region writes for your applications by using different SDKs in Azure Cosmos DB. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 01/06/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-python, devx-track-js, devx-track-csharp, "seo-nov-2020" --- diff --git a/articles/cosmos-db/sql/how-to-provision-container-throughput.md b/articles/cosmos-db/sql/how-to-provision-container-throughput.md index 4c1a34842603b..189a1620fbb29 100644 --- a/articles/cosmos-db/sql/how-to-provision-container-throughput.md +++ b/articles/cosmos-db/sql/how-to-provision-container-throughput.md @@ -1,12 +1,13 @@ --- title: Provision container throughput in Azure Cosmos DB SQL API description: Learn how to provision throughput at the container level in Azure Cosmos DB SQL API using Azure portal, CLI, PowerShell and various other SDKs. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 10/14/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-js, devx-track-azurecli, devx-track-csharp --- diff --git a/articles/cosmos-db/sql/how-to-provision-database-throughput.md b/articles/cosmos-db/sql/how-to-provision-database-throughput.md index 9b446a8d3a7bd..3b4a3ab93d2b5 100644 --- a/articles/cosmos-db/sql/how-to-provision-database-throughput.md +++ b/articles/cosmos-db/sql/how-to-provision-database-throughput.md @@ -1,12 +1,13 @@ --- title: Provision database throughput in Azure Cosmos DB SQL API description: Learn how to provision throughput at the database level in Azure Cosmos DB SQL API using Azure portal, CLI, PowerShell and various other SDKs. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 10/15/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli, devx-track-csharp --- diff --git a/articles/cosmos-db/sql/how-to-query-container.md b/articles/cosmos-db/sql/how-to-query-container.md index 49477757eb9e5..d85cb92460ef3 100644 --- a/articles/cosmos-db/sql/how-to-query-container.md +++ b/articles/cosmos-db/sql/how-to-query-container.md @@ -1,12 +1,13 @@ --- title: Query containers in Azure Cosmos DB description: Learn how to query containers in Azure Cosmos DB using in-partition and cross-partition queries -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 3/18/2019 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Query an Azure Cosmos container diff --git a/articles/cosmos-db/sql/how-to-use-stored-procedures-triggers-udfs.md b/articles/cosmos-db/sql/how-to-use-stored-procedures-triggers-udfs.md index cbebba834cd09..739f67c2b373f 100644 --- a/articles/cosmos-db/sql/how-to-use-stored-procedures-triggers-udfs.md +++ b/articles/cosmos-db/sql/how-to-use-stored-procedures-triggers-udfs.md @@ -1,12 +1,13 @@ --- title: Register and use stored procedures, triggers, and user-defined functions in Azure Cosmos DB SDKs description: Learn how to register and call stored procedures, triggers, and user-defined functions using the Azure Cosmos DB SDKs -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 11/03/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.devlang: csharp, java, javascript, python ms.custom: devx-track-python, devx-track-js, devx-track-csharp --- diff --git a/articles/cosmos-db/sql/how-to-write-javascript-query-api.md b/articles/cosmos-db/sql/how-to-write-javascript-query-api.md index 0679411d4369c..c6ab111acf451 100644 --- a/articles/cosmos-db/sql/how-to-write-javascript-query-api.md +++ b/articles/cosmos-db/sql/how-to-write-javascript-query-api.md @@ -1,12 +1,13 @@ --- title: Write stored procedures and triggers using the JavaScript query API in Azure Cosmos DB description: Learn how to write stored procedures and triggers using the JavaScript Query API in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 05/07/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.devlang: javascript ms.custom: devx-track-js --- diff --git a/articles/cosmos-db/sql/how-to-write-stored-procedures-triggers-udfs.md b/articles/cosmos-db/sql/how-to-write-stored-procedures-triggers-udfs.md index cfe90b0d66c24..89e47100f14ee 100644 --- a/articles/cosmos-db/sql/how-to-write-stored-procedures-triggers-udfs.md +++ b/articles/cosmos-db/sql/how-to-write-stored-procedures-triggers-udfs.md @@ -1,12 +1,13 @@ --- title: Write stored procedures, triggers, and UDFs in Azure Cosmos DB description: Learn how to define stored procedures, triggers, and user-defined functions in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 10/05/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.devlang: javascript ms.custom: devx-track-js --- diff --git a/articles/cosmos-db/sql/index-metrics.md b/articles/cosmos-db/sql/index-metrics.md index 5a6bedfb8337e..3bb25b63d6ab5 100644 --- a/articles/cosmos-db/sql/index-metrics.md +++ b/articles/cosmos-db/sql/index-metrics.md @@ -1,12 +1,13 @@ --- title: Azure Cosmos DB indexing metrics description: Learn how to obtain and interpret the indexing metrics in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 10/25/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Indexing metrics in Azure Cosmos DB [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] diff --git a/articles/cosmos-db/sql/javascript-query-api.md b/articles/cosmos-db/sql/javascript-query-api.md index fd4d5294aa721..f345cf9ff3f1b 100644 --- a/articles/cosmos-db/sql/javascript-query-api.md +++ b/articles/cosmos-db/sql/javascript-query-api.md @@ -1,13 +1,13 @@ --- title: Work with JavaScript integrated query API in Azure Cosmos DB Stored Procedures and Triggers description: This article introduces the concepts for JavaScript language-integrated query API to create stored procedures and triggers in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/07/2020 -ms.author: tisande -ms.reviewer: sngun +ms.author: sidandrews +ms.reviewer: jucocchi ms.devlang: javascript ms.custom: devx-track-js --- diff --git a/articles/cosmos-db/sql/manage-with-bicep.md b/articles/cosmos-db/sql/manage-with-bicep.md index c0811bae8cc42..63c670039cf17 100644 --- a/articles/cosmos-db/sql/manage-with-bicep.md +++ b/articles/cosmos-db/sql/manage-with-bicep.md @@ -1,12 +1,13 @@ --- title: Create and manage Azure Cosmos DB with Bicep description: Use Bicep to create and configure Azure Cosmos DB for Core (SQL) API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 02/18/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Core (SQL) API resources with Bicep diff --git a/articles/cosmos-db/sql/manage-with-cli.md b/articles/cosmos-db/sql/manage-with-cli.md index b2188c92b4f6e..2c0a8223abc0c 100644 --- a/articles/cosmos-db/sql/manage-with-cli.md +++ b/articles/cosmos-db/sql/manage-with-cli.md @@ -1,12 +1,13 @@ --- title: Manage Azure Cosmos DB Core (SQL) API resources using Azure CLI description: Manage Azure Cosmos DB Core (SQL) API resources using Azure CLI. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 02/18/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos Core (SQL) API resources using Azure CLI diff --git a/articles/cosmos-db/sql/manage-with-powershell.md b/articles/cosmos-db/sql/manage-with-powershell.md index 2192251e8c81b..f5106cbd0a045 100644 --- a/articles/cosmos-db/sql/manage-with-powershell.md +++ b/articles/cosmos-db/sql/manage-with-powershell.md @@ -1,12 +1,13 @@ --- title: Manage Azure Cosmos DB Core (SQL) API resources using using PowerShell description: Manage Azure Cosmos DB Core (SQL) API resources using using PowerShell. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 02/18/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: seodec18, devx-track-azurepowershell --- diff --git a/articles/cosmos-db/sql/manage-with-templates.md b/articles/cosmos-db/sql/manage-with-templates.md index 41c0099caa8a3..63e040b6f12b6 100644 --- a/articles/cosmos-db/sql/manage-with-templates.md +++ b/articles/cosmos-db/sql/manage-with-templates.md @@ -1,12 +1,13 @@ --- title: Create and manage Azure Cosmos DB with Resource Manager templates description: Use Azure Resource Manager templates to create and configure Azure Cosmos DB for Core (SQL) API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 02/18/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Core (SQL) API resources with Azure Resource Manager templates diff --git a/articles/cosmos-db/sql/migrate-containers-partitioned-to-nonpartitioned.md b/articles/cosmos-db/sql/migrate-containers-partitioned-to-nonpartitioned.md index 009d1465bc68e..7c37c7bcf81ec 100644 --- a/articles/cosmos-db/sql/migrate-containers-partitioned-to-nonpartitioned.md +++ b/articles/cosmos-db/sql/migrate-containers-partitioned-to-nonpartitioned.md @@ -1,12 +1,13 @@ --- title: Migrate non-partitioned Azure Cosmos containers to partitioned containers description: Learn how to migrate all the existing non-partitioned containers into partitioned containers. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 08/26/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-csharp --- diff --git a/articles/cosmos-db/sql/migrate-data-striim.md b/articles/cosmos-db/sql/migrate-data-striim.md index deaa3d1859f87..e74d0e82821ff 100644 --- a/articles/cosmos-db/sql/migrate-data-striim.md +++ b/articles/cosmos-db/sql/migrate-data-striim.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 12/09/2021 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Migrate data to Azure Cosmos DB SQL API account using Striim diff --git a/articles/cosmos-db/sql/migrate-dotnet-v3.md b/articles/cosmos-db/sql/migrate-dotnet-v3.md index ce56b835ad89e..2318dde923407 100644 --- a/articles/cosmos-db/sql/migrate-dotnet-v3.md +++ b/articles/cosmos-db/sql/migrate-dotnet-v3.md @@ -6,7 +6,7 @@ ms.author: esarroyo ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to -ms.date: 04/07/2022 +ms.date: 06/01/2022 ms.devlang: csharp --- @@ -88,7 +88,29 @@ The following classes have been replaced on the 3.0 SDK: * `Microsoft.Azure.Documents.Resource` -The Microsoft.Azure.Documents.UriFactory class has been replaced by the fluent design. The fluent design builds URLs internally and allows a single `Container` object to be passed around instead of a `DocumentClient`, `DatabaseName`, and `DocumentCollection`. +The Microsoft.Azure.Documents.UriFactory class has been replaced by the fluent design. + +# [.NET SDK v3](#tab/dotnet-v3) + +```csharp +Container container = client.GetContainer(databaseName,containerName); +ItemResponse response = await this._container.CreateItemAsync( + salesOrder, + new PartitionKey(salesOrder.AccountNumber)); + +``` + +# [.NET SDK v2](#tab/dotnet-v2) + +```csharp +Uri collectionUri = UriFactory.CreateDocumentCollectionUri(databaseName, containerName); +await client.CreateDocumentAsync( + collectionUri, + salesOrder, + new RequestOptions { PartitionKey = new PartitionKey(salesOrder.AccountNumber) }); +``` + +--- Because the .NET v3 SDK allows users to configure a custom serialization engine, there's no direct replacement for the `Document` type. When using Newtonsoft.Json (default serialization engine), `JObject` can be used to achieve the same functionality. When using a different serialization engine, you can use its base json document type (for example, `JsonDocument` for System.Text.Json). The recommendation is to use a C# type that reflects the schema of your items instead of relying on generic types. @@ -133,6 +155,8 @@ The following properties have been removed: The .NET SDK v3 provides a fluent `CosmosClientBuilder` class that replaces the need for the SDK v2 URI Factory. +The fluent design builds URLs internally and allows a single `Container` object to be passed around instead of a `DocumentClient`, `DatabaseName`, and `DocumentCollection`. + The following example creates a new `CosmosClientBuilder` with a strong ConsistencyLevel and a list of preferred locations: ```csharp diff --git a/articles/cosmos-db/sql/migrate-hbase-to-cosmos-db.md b/articles/cosmos-db/sql/migrate-hbase-to-cosmos-db.md index 26daab3cab0ac..608be6f4e1a29 100644 --- a/articles/cosmos-db/sql/migrate-hbase-to-cosmos-db.md +++ b/articles/cosmos-db/sql/migrate-hbase-to-cosmos-db.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 12/07/2021 ms.author: hitakagi -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Migrate data from Apache HBase to Azure Cosmos DB SQL API account diff --git a/articles/cosmos-db/sql/migrate-java-v4-sdk.md b/articles/cosmos-db/sql/migrate-java-v4-sdk.md index ff8bfbd2740d9..6d02d2b028993 100644 --- a/articles/cosmos-db/sql/migrate-java-v4-sdk.md +++ b/articles/cosmos-db/sql/migrate-java-v4-sdk.md @@ -1,15 +1,15 @@ --- title: Migrate your application to use the Azure Cosmos DB Java SDK v4 (com.azure.cosmos) description: Learn how to upgrade your existing Java application from using the older Azure Cosmos DB Java SDKs to the newer Java SDK 4.0 (com.azure.cosmos package)for Core (SQL) API. -author: rothja +author: seesharprun ms.devlang: java ms.custom: devx-track-java -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 08/26/2021 -ms.reviewer: sngun --- # Migrate your application to use the Azure Cosmos DB Java SDK v4 diff --git a/articles/cosmos-db/sql/modeling-data.md b/articles/cosmos-db/sql/modeling-data.md index cc3f2f988b4ef..6852dd403b7a3 100644 --- a/articles/cosmos-db/sql/modeling-data.md +++ b/articles/cosmos-db/sql/modeling-data.md @@ -2,14 +2,14 @@ title: Modeling data in Azure Cosmos DB titleSuffix: Azure Cosmos DB description: Learn about data modeling in NoSQL databases, differences between modeling data in a relational database and a document database. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 03/24/2022 ms.custom: cosmos-db-video -ms.reviewer: wiassaf --- # Data modeling in Azure Cosmos DB [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] @@ -36,7 +36,7 @@ For comparison, let's first see how we might model data in a relational database :::image type="content" source="./media/sql-api-modeling-data/relational-data-model.png" alt-text="Relational database model" border="false"::: -When working with relational databases, the strategy is to normalize all your data. Normalizing your data typically involves taking an entity, such as a person, and breaking it down into discrete components. In the example above, a person may have multiple contact detail records, as well as multiple address records. Contact details can be further broken down by further extracting common fields like a type. The same applies to address, each record can be of type *Home* or *Business*. +The strategy, when working with relational databases, is to normalize all your data. Normalizing your data typically involves taking an entity, such as a person, and breaking it down into discrete components. In the example above, a person may have multiple contact detail records, and multiple address records. Contact details can be further broken down by further extracting common fields like a type. The same applies to address, each record can be of type *Home* or *Business*. The guiding premise when normalizing data is to **avoid storing redundant data** on each record and rather refer to data. In this example, to read a person, with all their contact details and addresses, you need to use JOINS to effectively compose back (or denormalize) your data at run time. @@ -48,7 +48,7 @@ JOIN ContactDetailType cdt ON cdt.Id = cd.TypeId JOIN Address a ON a.PersonId = p.Id ``` -Updating a single person with their contact details and addresses requires write operations across many individual tables. +Write operations across many individual tables are required to update a single person's contact details and addresses. Now let's take a look at how we would model the same data as a self-contained entity in Azure Cosmos DB. @@ -76,7 +76,7 @@ Now let's take a look at how we would model the same data as a self-contained en Using the approach above we've **denormalized** the person record, by **embedding** all the information related to this person, such as their contact details and addresses, into a *single JSON* document. In addition, because we're not confined to a fixed schema we have the flexibility to do things like having contact details of different shapes entirely. -Retrieving a complete person record from the database is now a **single read operation** against a single container and for a single item. Updating a person record, with their contact details and addresses, is also a **single write operation** against a single item. +Retrieving a complete person record from the database is now a **single read operation** against a single container and for a single item. Updating the contact details and addresses of a person record is also a **single write operation** against a single item. By denormalizing data, your application may need to issue fewer queries and updates to complete common operations. @@ -87,7 +87,7 @@ In general, use embedded data models when: * There are **contained** relationships between entities. * There are **one-to-few** relationships between entities. * There's embedded data that **changes infrequently**. -* There's embedded data that will not grow **without bound**. +* There's embedded data that won't grow **without bound**. * There's embedded data that is **queried frequently together**. > [!NOTE] @@ -119,7 +119,7 @@ Take this JSON snippet. This might be what a post entity with embedded comments would look like if we were modeling a typical blog, or CMS, system. The problem with this example is that the comments array is **unbounded**, meaning that there's no (practical) limit to the number of comments any single post can have. This may become a problem as the size of the item could grow infinitely large so is a design you should avoid. -As the size of the item grows the ability to transmit the data over the wire as well as reading and updating the item, at scale, will be impacted. +As the size of the item grows the ability to transmit the data over the wire and reading and updating the item, at scale, will be impacted. In this case, it would be better to consider the following data model. @@ -148,8 +148,8 @@ Comment items: ] ``` -This model has a document for each comment with a property that contains the post id. This allows posts to contain any number of comments and can grow efficiently. Users wanting to see more -than the most recent comments would query this container passing the postId which should be the partition key for the comments container. +This model has a document for each comment with a property that contains the post identifier. This allows posts to contain any number of comments and can grow efficiently. Users wanting to see more +than the most recent comments would query this container passing the postId, which should be the partition key for the comments container. Another case where embedding data isn't a good idea is when the embedded data is used often across items and will change frequently. @@ -163,7 +163,7 @@ Take this JSON snippet. "holdings": [ { "numberHeld": 100, - "stock": { "symbol": "zaza", "open": 1, "high": 2, "low": 0.5 } + "stock": { "symbol": "zbzb", "open": 1, "high": 2, "low": 0.5 } }, { "numberHeld": 50, @@ -175,7 +175,7 @@ Take this JSON snippet. This could represent a person's stock portfolio. We have chosen to embed the stock information into each portfolio document. In an environment where related data is changing frequently, like a stock trading application, embedding data that changes frequently is going to mean that you're constantly updating each portfolio document every time a stock is traded. -Stock *zaza* may be traded many hundreds of times in a single day and thousands of users could have *zaza* on their portfolio. With a data model like the above we would have to update many thousands of portfolio documents many times every day leading to a system that won't scale well. +Stock *zbzb* may be traded many hundreds of times in a single day and thousands of users could have *zbzb* on their portfolio. With a data model like the above we would have to update many thousands of portfolio documents many times every day leading to a system that won't scale well. ## Reference data @@ -200,7 +200,7 @@ Person document: Stock documents: { "id": "1", - "symbol": "zaza", + "symbol": "zbzb", "open": 1, "high": 2, "low": 0.5, @@ -227,7 +227,7 @@ An immediate downside to this approach though is if your application is required ### What about foreign keys? -Because there's currently no concept of a constraint, foreign-key or otherwise, any inter-document relationships that you have in documents are effectively "weak links" and won't be verified by the database itself. If you want to ensure that the data a document is referring to actually exists, then you need to do this in your application, or through the use of server-side triggers or stored procedures on Azure Cosmos DB. +Because there's currently no concept of a constraint, foreign-key or otherwise, any inter-document relationships that you have in documents are effectively "weak links" and won't be verified by the database itself. If you want to ensure that the data a document is referring to actually exists, then you need to do this in your application, or by using server-side triggers or stored procedures on Azure Cosmos DB. ### When to reference @@ -286,11 +286,11 @@ Book documents: {"id": "1000","name": "Deep Dive into Azure Cosmos DB", "pub-id": "mspress"} ``` -In the above example, we have dropped the unbounded collection on the publisher document. Instead we just have a reference to the publisher on each book document. +In the above example, we've dropped the unbounded collection on the publisher document. Instead we just have a reference to the publisher on each book document. -### How do I model many to many relationships? +### How do I model many-to-many relationships? -In a relational database *many:many* relationships are often modeled with join tables, which just join records from other tables together. +In a relational database *many-to-many* relationships are often modeled with join tables, which just join records from other tables together. :::image type="content" source="./media/sql-api-modeling-data/join-table.png" alt-text="Join tables" border="false"::: @@ -392,11 +392,11 @@ Here we've (mostly) followed the embedded model, where data from other entities If you look at the book document, we can see a few interesting fields when we look at the array of authors. There's an `id` field that is the field we use to refer back to an author document, standard practice in a normalized model, but then we also have `name` and `thumbnailUrl`. We could have stuck with `id` and left the application to get any additional information it needed from the respective author document using the "link", but because our application displays the author's name and a thumbnail picture with every book displayed we can save a round trip to the server per book in a list by denormalizing **some** data from the author. -Sure, if the author's name changed or they wanted to update their photo we'd have to go and update every book they ever published but for our application, based on the assumption that authors don't change their names often, this is an acceptable design decision. +Sure, if the author's name changed or they wanted to update their photo we'd have to update every book they ever published but for our application, based on the assumption that authors don't change their names often, this is an acceptable design decision. In the example, there are **pre-calculated aggregates** values to save expensive processing on a read operation. In the example, some of the data embedded in the author document is data that is calculated at run-time. Every time a new book is published, a book document is created **and** the countOfBooks field is set to a calculated value based on the number of book documents that exist for a particular author. This optimization would be good in read heavy systems where we can afford to do computations on writes in order to optimize reads. -The ability to have a model with pre-calculated fields is made possible because Azure Cosmos DB supports **multi-document transactions**. Many NoSQL stores can't do transactions across documents and therefore advocate design decisions, such as "always embed everything", due to this limitation. With Azure Cosmos DB, you can use server-side triggers, or stored procedures, that insert books and update authors all within an ACID transaction. Now you don't **have** to embed everything into one document just to be sure that your data remains consistent. +The ability to have a model with pre-calculated fields is made possible because Azure Cosmos DB supports **multi-document transactions**. Many NoSQL stores can't do transactions across documents and therefore advocate design decisions, such as "always embed everything", due to this limitation. With Azure Cosmos DB, you can use server-side triggers, or stored procedures that insert books and update authors all within an ACID transaction. Now you don't **have** to embed everything into one document just to be sure that your data remains consistent. ## Distinguish between different document types @@ -432,7 +432,7 @@ Review documents: This integration happens through [Azure Cosmos DB analytical store](../analytical-store-introduction.md), a columnar representation of your transactional data that enables large-scale analytics without any impact to your transactional workloads. This analytical store is suitable for fast, cost-effective queries on large operational data sets, without copying data and impacting the performance of your transactional workloads. When you create a container with analytical store enabled, or when you enable analytical store on an existing container, all transactional inserts, updates, and deletes are synchronized with analytical store in near real time, no Change Feed or ETL jobs are required. -With Synapse Link, you can now directly connect to your Azure Cosmos DB containers from Azure Synapse Analytics and access the analytical store, at no Request Units (RUs) costs. Azure Synapse Analytics currently supports Synapse Link with Synapse Apache Spark and serverless SQL pools. If you have a globally distributed Azure Cosmos DB account, after you enable analytical store for a container, it will be available in all regions for that account. +With Azure Synapse Link, you can now directly connect to your Azure Cosmos DB containers from Azure Synapse Analytics and access the analytical store, at no Request Units (request units) costs. Azure Synapse Analytics currently supports Azure Synapse Link with Synapse Apache Spark and serverless SQL pools. If you have a globally distributed Azure Cosmos DB account, after you enable analytical store for a container, it will be available in all regions for that account. ### Analytical store automatic schema inference @@ -454,7 +454,7 @@ Normalization becomes meaningless since with Azure Synapse Link you can join bet * Fewer properties per document. * Data structures with fewer nested levels. -Please note that these last two factors, fewer properties and fewer levels, help in the performance of your analytical queries but also decrease the chances of parts of your data not being represented in the analytical store. As described in the article on automatic schema inference rules, there are limits to the number of levels and properties that are represented in analytical store. +Note that these last two factors, fewer properties and fewer levels, help in the performance of your analytical queries but also decrease the chances of parts of your data not being represented in the analytical store. As described in the article on automatic schema inference rules, there are limits to the number of levels and properties that are represented in analytical store. Another important factor for normalization is that SQL serverless pools in Azure Synapse support result sets with up to 1000 columns, and exposing nested columns also counts towards that limit. In other words, both analytical store and Synapse SQL serverless pools have a limit of 1000 properties. @@ -464,7 +464,7 @@ But what to do since denormalization is an important data modeling technique for Your Azure Cosmos DB partition key (PK) isn't used in analytical store. And now you can use [analytical store custom partitioning](https://devblogs.microsoft.com/cosmosdb/custom-partitioning-azure-synapse-link/) to copies of analytical store using any PK that you want. Because of this isolation, you can choose a PK for your transactional data with focus on data ingestion and point reads, while cross-partition queries can be done with Azure Synapse Link. Let's see an example: -In a hypothetical global IoT scenario, `device id` is a good PK since all devices have a similar data volume and with that you won't have a hot partition problem. But if you want to analyze the data of more than one device, like "all data from yesterday" or "totals per city", you may have problems since those are cross-partition queries. Those queries can hurt your transactional performance since they use part of your throughput in RUs to run. But with Azure Synapse Link, you can run these analytical queries at no RUs costs. Analytical store columnar format is optimized for analytical queries and Azure Synapse Link leverages this characteristic to allow great performance with Azure Synapse Analytics runtimes. +In a hypothetical global IoT scenario, `device id` is a good PK since all devices have a similar data volume and with that you won't have a hot partition problem. But if you want to analyze the data of more than one device, like "all data from yesterday" or "totals per city", you may have problems since those are cross-partition queries. Those queries can hurt your transactional performance since they use part of your throughput in request units to run. But with Azure Synapse Link, you can run these analytical queries at no request units costs. Analytical store columnar format is optimized for analytical queries and Azure Synapse Link applies this characteristic to allow great performance with Azure Synapse Analytics runtimes. ### Data types and properties names @@ -508,32 +508,32 @@ Azure Synapse Link allows you to reduce costs from the following perspectives: * Fewer queries running in your transactional database. * A PK optimized for data ingestion and point reads, reducing data footprint, hot partition scenarios, and partitions splits. * Data tiering since [analytical time-to-live (attl)](../analytical-store-introduction.md#analytical-ttl) is independent from transactional time-to-live (tttl). You can keep your transactional data in transactional store for a few days, weeks, months, and keep the data in analytical store for years or for ever. Analytical store columnar format brings a natural data compression, from 50% up to 90%. And its cost per GB is ~10% of transactional store actual price. For more information about the current backup limitations, see [analytical store overview](../analytical-store-introduction.md). - * No ETL jobs running in your environment, meaning that you don't need to provision RUs for them. + * No ETL jobs running in your environment, meaning that you don't need to provision request units for them. ### Controlled redundancy -This is a great alternative for situations when a data model already exists and can't be changed. And the existing data model doesn't fit well into analytical store due to automatic schema inference rules like the limit of nested levels or the maximum number of properties. If this is your case, you can leverage [Azure Cosmos DB Change Feed](../change-feed.md) to replicate your data into another container, applying the required transformations for a Synapse Link friendly data model. Let's see an example: +This is a great alternative for situations when a data model already exists and can't be changed. And the existing data model doesn't fit well into analytical store due to automatic schema inference rules like the limit of nested levels or the maximum number of properties. If this is your case, you can use [Azure Cosmos DB Change Feed](../change-feed.md) to replicate your data into another container, applying the required transformations for an Azure Synapse Link friendly data model. Let's see an example: #### Scenario Container `CustomersOrdersAndItems` is used to store on-line orders including customer and items details: billing address, delivery address, delivery method, delivery status, items price, etc. Only the first 1000 properties are represented and key information isn't included in analytical store, blocking Azure Synapse Link usage. The container has PBs of records it's not possible to change the application and remodel the data. -Another perspective of the problem is the big data volume. Billions of rows are constantly used by the Analytics Department, what prevents them to use tttl for old data deletion. Maintaining the entire data history in the transactional database because of analytical needs forces them to constantly increase RUs provisioning, impacting costs. Transactional and analytical workloads compete for the same resources at the same time. +Another perspective of the problem is the big data volume. Billions of rows are constantly used by the Analytics Department, what prevents them to use tttl for old data deletion. Maintaining the entire data history in the transactional database because of analytical needs forces them to constantly increase request units provisioning, impacting costs. Transactional and analytical workloads compete for the same resources at the same time. What to do? #### Solution with Change Feed -* The engineering team decided to use Change Feed to populate three new containers: `Customers`, `Orders`, and `Items`. With Change Feed they are normalizing and flattening the data. Unnecessary information is removed from the data model and each container has close to 100 properties, avoiding data loss due to automatic schema inference limits. -* These new containers have analytical store enabled and now the Analytics Department is using Synapse Analytics to read the data, reducing the RUs usage since the analytical queries are happening in Synapse Apache Spark and serverless SQL pools. -* Container `CustomersOrdersAndItems` now has tttl set to keep data for six months only, which allows for another RUs usage reduction, since there's a minimum of 10 RUs per GB in Azure Cosmos DB. Less data, fewer RUs. +* The engineering team decided to use Change Feed to populate three new containers: `Customers`, `Orders`, and `Items`. With Change Feed they're normalizing and flattening the data. Unnecessary information is removed from the data model and each container has close to 100 properties, avoiding data loss due to automatic schema inference limits. +* These new containers have analytical store enabled and now the Analytics Department is using Synapse Analytics to read the data, reducing the request units usage since the analytical queries are happening in Synapse Apache Spark and serverless SQL pools. +* Container `CustomersOrdersAndItems` now has tttl set to keep data for six months only, which allows for another request units usage reduction, since there's a minimum of 10 request units per GB in Azure Cosmos DB. Less data, fewer request units. ## Takeaways The biggest takeaways from this article are to understand that data modeling in a schema-free world is as important as ever. -Just as there's no single way to represent a piece of data on a screen, there's no single way to model your data. You need to understand your application and how it will produce, consume, and process the data. Then, by applying some of the guidelines presented here you can set about creating a model that addresses the immediate needs of your application. When your applications need to change, you can leverage the flexibility of a schema-free database to embrace that change and evolve your data model easily. +Just as there's no single way to represent a piece of data on a screen, there's no single way to model your data. You need to understand your application and how it will produce, consume, and process the data. Then, by applying some of the guidelines presented here you can set about creating a model that addresses the immediate needs of your application. When your applications need to change, you can use the flexibility of a schema-free database to embrace that change and evolve your data model easily. ## Next steps diff --git a/articles/cosmos-db/sql/odbc-driver.md b/articles/cosmos-db/sql/odbc-driver.md index 708a66050d63e..6807fc23a5b7a 100644 --- a/articles/cosmos-db/sql/odbc-driver.md +++ b/articles/cosmos-db/sql/odbc-driver.md @@ -1,8 +1,9 @@ --- title: Connect to Azure Cosmos DB using BI analytics tools description: Learn how to use the Azure Cosmos DB ODBC driver to create tables and views so that normalized data can be viewed in BI and data analytics software. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to diff --git a/articles/cosmos-db/sql/performance-testing.md b/articles/cosmos-db/sql/performance-testing.md index d4207a624ff3d..b7390e4c1d627 100644 --- a/articles/cosmos-db/sql/performance-testing.md +++ b/articles/cosmos-db/sql/performance-testing.md @@ -1,8 +1,9 @@ --- title: Performance and scale testing with Azure Cosmos DB description: Learn how to do scale and performance testing with Azure Cosmos DB. You can then evaluate the functionality of Azure Cosmos DB for high-performance application scenarios. -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to diff --git a/articles/cosmos-db/sql/performance-tips-async-java.md b/articles/cosmos-db/sql/performance-tips-async-java.md index 199a38e1587bd..5c1b58c3ef4ae 100644 --- a/articles/cosmos-db/sql/performance-tips-async-java.md +++ b/articles/cosmos-db/sql/performance-tips-async-java.md @@ -1,13 +1,14 @@ --- title: Performance tips for Azure Cosmos DB Async Java SDK v2 description: Learn client configuration options to improve Azure Cosmos database performance for Async Java SDK v2 -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: how-to ms.date: 05/11/2020 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java, contperf-fy21q2 --- diff --git a/articles/cosmos-db/sql/performance-tips-java-sdk-v4-sql.md b/articles/cosmos-db/sql/performance-tips-java-sdk-v4-sql.md index daeb063d10630..d06f763e8d19e 100644 --- a/articles/cosmos-db/sql/performance-tips-java-sdk-v4-sql.md +++ b/articles/cosmos-db/sql/performance-tips-java-sdk-v4-sql.md @@ -1,13 +1,14 @@ --- title: Performance tips for Azure Cosmos DB Java SDK v4 description: Learn client configuration options to improve Azure Cosmos database performance for Java SDK v4 -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: how-to ms.date: 04/22/2022 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java, contperf-fy21q2 --- diff --git a/articles/cosmos-db/sql/performance-tips-java.md b/articles/cosmos-db/sql/performance-tips-java.md index 94e520ca599f2..c50563b2c9c4a 100644 --- a/articles/cosmos-db/sql/performance-tips-java.md +++ b/articles/cosmos-db/sql/performance-tips-java.md @@ -1,13 +1,14 @@ --- title: Performance tips for Azure Cosmos DB Sync Java SDK v2 description: Learn client configuration options to improve Azure Cosmos database performance for Sync Java SDK v2 -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: how-to ms.date: 05/11/2020 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/powershell-samples.md b/articles/cosmos-db/sql/powershell-samples.md index ec522cfbebd40..4c3d91bc00367 100644 --- a/articles/cosmos-db/sql/powershell-samples.md +++ b/articles/cosmos-db/sql/powershell-samples.md @@ -1,12 +1,13 @@ --- title: Azure PowerShell samples for Azure Cosmos DB Core (SQL) API description: Get the Azure PowerShell samples to perform common tasks in Azure Cosmos DB for Core (SQL) API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 01/20/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure PowerShell samples for Azure Cosmos DB Core (SQL) API diff --git a/articles/cosmos-db/sql/query-cheat-sheet.md b/articles/cosmos-db/sql/query-cheat-sheet.md index a6aa6a9eb7356..b72b441ca7f8f 100644 --- a/articles/cosmos-db/sql/query-cheat-sheet.md +++ b/articles/cosmos-db/sql/query-cheat-sheet.md @@ -1,8 +1,9 @@ --- title: Azure Cosmos DB PDF query cheat sheets description: Printable PDF cheat sheets that helps you use Azure Cosmos DB's SQL, MongoDB, Graph, and Table APIs to query your data -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual diff --git a/articles/cosmos-db/sql/quick-create-template.md b/articles/cosmos-db/sql/quick-create-template.md index 473da11f426b7..dc47834120049 100644 --- a/articles/cosmos-db/sql/quick-create-template.md +++ b/articles/cosmos-db/sql/quick-create-template.md @@ -1,8 +1,9 @@ --- title: Quickstart - Create an Azure Cosmos DB and a container by using Azure Resource Manager template description: Quickstart showing how to an Azure Cosmos database and a container by using Azure Resource Manager template -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown tags: azure-resource-manager ms.service: cosmos-db ms.subservice: cosmosdb-sql diff --git a/articles/cosmos-db/sql/read-change-feed.md b/articles/cosmos-db/sql/read-change-feed.md index f9c026540115f..b1d76621f493e 100644 --- a/articles/cosmos-db/sql/read-change-feed.md +++ b/articles/cosmos-db/sql/read-change-feed.md @@ -1,13 +1,13 @@ --- title: Reading Azure Cosmos DB change feed description: This article describes different options available to read and access change feed in Azure Cosmos DB. -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 06/30/2021 -ms.reviewer: sngun --- # Reading Azure Cosmos DB change feed diff --git a/articles/cosmos-db/sql/scale-on-schedule.md b/articles/cosmos-db/sql/scale-on-schedule.md index 8b7425210e2f4..fc394ed5c66b6 100644 --- a/articles/cosmos-db/sql/scale-on-schedule.md +++ b/articles/cosmos-db/sql/scale-on-schedule.md @@ -1,12 +1,13 @@ --- title: Scale Azure Cosmos DB on a schedule by using Azure Functions timer description: Learn how to scale changes in throughput in Azure Cosmos DB using PowerShell and Azure Functions. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to ms.date: 01/13/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Scale Azure Cosmos DB throughput by using Azure Functions Timer trigger diff --git a/articles/cosmos-db/sql/serverless-computing-database.md b/articles/cosmos-db/sql/serverless-computing-database.md index 50611f48c44ce..91611e5152eaa 100644 --- a/articles/cosmos-db/sql/serverless-computing-database.md +++ b/articles/cosmos-db/sql/serverless-computing-database.md @@ -6,7 +6,7 @@ ms.author: maquaran ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.date: 05/02/2020 ms.custom: cosmos-db-video --- diff --git a/articles/cosmos-db/sql/sql-api-dotnet-v2sdk-samples.md b/articles/cosmos-db/sql/sql-api-dotnet-v2sdk-samples.md index ce391d2c4acc2..ad9351c9e46e4 100644 --- a/articles/cosmos-db/sql/sql-api-dotnet-v2sdk-samples.md +++ b/articles/cosmos-db/sql/sql-api-dotnet-v2sdk-samples.md @@ -1,8 +1,9 @@ --- title: 'Azure Cosmos DB: .NET examples for the SQL API' description: Find C# .NET examples on GitHub for common tasks using the Azure Cosmos DB SQL API, including CRUD operations. -author: StefArroyo -ms.author: esarroyo +author: seesharprun +ms.author: sidandrews +ms.reviewer: esarroyo ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample diff --git a/articles/cosmos-db/sql/sql-api-dotnet-v3sdk-samples.md b/articles/cosmos-db/sql/sql-api-dotnet-v3sdk-samples.md index edf775c3388b5..5d71a3a38f367 100644 --- a/articles/cosmos-db/sql/sql-api-dotnet-v3sdk-samples.md +++ b/articles/cosmos-db/sql/sql-api-dotnet-v3sdk-samples.md @@ -1,14 +1,14 @@ --- title: 'Azure Cosmos DB: .NET (Microsoft.Azure.Cosmos) examples for the SQL API' description: Find the C# .NET v3 SDK examples on GitHub for common tasks by using the Azure Cosmos DB SQL API. -author: StefArroyo -ms.author: esarroyo +author: seesharprun +ms.author: sidandrews +ms.reviewer: esarroyo ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 05/02/2020 ms.custom: devx-track-dotnet - --- # Azure Cosmos DB .NET v3 SDK (Microsoft.Azure.Cosmos) examples for the SQL API diff --git a/articles/cosmos-db/sql/sql-api-java-application.md b/articles/cosmos-db/sql/sql-api-java-application.md index 6c9e08e39cada..8be1e213e2e60 100644 --- a/articles/cosmos-db/sql/sql-api-java-application.md +++ b/articles/cosmos-db/sql/sql-api-java-application.md @@ -1,15 +1,15 @@ --- title: 'Tutorial: Build a Java web app using Azure Cosmos DB and the SQL API' description: 'Tutorial: This Java web application tutorial shows you how to use the Azure Cosmos DB and the SQL API to store and access data from a Java application hosted on Azure Websites.' -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: tutorial ms.date: 03/29/2022 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java -ms.reviewer: wiassaf --- # Tutorial: Build a Java web application using Azure Cosmos DB and the SQL API diff --git a/articles/cosmos-db/sql/sql-api-java-sdk-samples.md b/articles/cosmos-db/sql/sql-api-java-sdk-samples.md index 2485262a65b7e..e1ac996a16833 100644 --- a/articles/cosmos-db/sql/sql-api-java-sdk-samples.md +++ b/articles/cosmos-db/sql/sql-api-java-sdk-samples.md @@ -2,14 +2,15 @@ title: 'Azure Cosmos DB SQL API: Java SDK v4 examples' description: Find Java examples on GitHub for common tasks using the Azure Cosmos DB SQL API, including CRUD operations. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 08/26/2021 ms.devlang: java ms.custom: devx-track-java -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure Cosmos DB SQL API: Java SDK v4 examples diff --git a/articles/cosmos-db/sql/sql-api-query-metrics.md b/articles/cosmos-db/sql/sql-api-query-metrics.md index 5bf8d2db83314..90a7cc0531e61 100644 --- a/articles/cosmos-db/sql/sql-api-query-metrics.md +++ b/articles/cosmos-db/sql/sql-api-query-metrics.md @@ -1,8 +1,9 @@ --- title: SQL query metrics for Azure Cosmos DB SQL API description: Learn about how to instrument and debug the SQL query performance of Azure Cosmos DB requests. -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to diff --git a/articles/cosmos-db/sql/sql-api-sdk-async-java.md b/articles/cosmos-db/sql/sql-api-sdk-async-java.md index f430e35c8eb01..59516f93759fd 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-async-java.md +++ b/articles/cosmos-db/sql/sql-api-sdk-async-java.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: SQL Async Java API, SDK & resources' description: Learn all about the SQL Async Java API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Async Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 11/11/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-dot-net.md b/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-dot-net.md index b2cc9a6095278..09d3dd32d6248 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-dot-net.md +++ b/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-dot-net.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: Bulk executor .NET API, SDK & resources' description: Learn all about the bulk executor .NET API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB bulk executor .NET SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown --- # .NET bulk executor library: Download information (Legacy) diff --git a/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-java.md b/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-java.md index bc4affd89b763..a4463794853fd 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-java.md +++ b/articles/cosmos-db/sql/sql-api-sdk-bulk-executor-java.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: Bulk executor Java API, SDK & resources' description: Learn all about the bulk executor Java API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB bulk executor Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-dotnet-changefeed.md b/articles/cosmos-db/sql/sql-api-sdk-dotnet-changefeed.md index 030f7521e731b..bbf60eb475408 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-dotnet-changefeed.md +++ b/articles/cosmos-db/sql/sql-api-sdk-dotnet-changefeed.md @@ -1,13 +1,14 @@ --- title: Azure Cosmos DB .NET change feed Processor API, SDK release notes description: Learn all about the Change Feed Processor API and SDK including release dates, retirement dates, and changes made between each version of the .NET Change Feed Processor SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown --- # .NET Change Feed Processor SDK: Download and release notes (Legacy) diff --git a/articles/cosmos-db/sql/sql-api-sdk-dotnet-core.md b/articles/cosmos-db/sql/sql-api-sdk-dotnet-core.md index d7005e4d2da17..00bbee35ef0a5 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-dotnet-core.md +++ b/articles/cosmos-db/sql/sql-api-sdk-dotnet-core.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: SQL .NET Core API, SDK & resources' description: Learn all about the SQL .NET Core API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB .NET Core SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: reference ms.date: 04/18/2022 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-dotnet diff --git a/articles/cosmos-db/sql/sql-api-sdk-dotnet-standard.md b/articles/cosmos-db/sql/sql-api-sdk-dotnet-standard.md index dc95c96d21ab9..5e093dc72c65a 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-dotnet-standard.md +++ b/articles/cosmos-db/sql/sql-api-sdk-dotnet-standard.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: SQL .NET Standard API, SDK & resources' description: Learn all about the SQL API and .NET SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB .NET SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: reference ms.date: 03/22/2022 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-dotnet diff --git a/articles/cosmos-db/sql/sql-api-sdk-dotnet.md b/articles/cosmos-db/sql/sql-api-sdk-dotnet.md index 9308a167a888b..4dbfade45baa9 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-dotnet.md +++ b/articles/cosmos-db/sql/sql-api-sdk-dotnet.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: SQL .NET API, SDK & resources' description: Learn all about the SQL .NET API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB .NET SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: csharp ms.topic: reference ms.date: 04/18/2022 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-dotnet --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-java-spark-v3.md b/articles/cosmos-db/sql/sql-api-sdk-java-spark-v3.md index ae12f6ec4e910..1501cd10d8b45 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-java-spark-v3.md +++ b/articles/cosmos-db/sql/sql-api-sdk-java-spark-v3.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB Apache Spark 3 OLTP Connector for SQL API (Preview) release notes and resources' description: Learn about the Azure Cosmos DB Apache Spark 3 OLTP Connector for SQL API, including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 11/12/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-java-spark.md b/articles/cosmos-db/sql/sql-api-sdk-java-spark.md index 0c05f6fb8b684..67d5f32553a9f 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-java-spark.md +++ b/articles/cosmos-db/sql/sql-api-sdk-java-spark.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB Apache Spark 2 OLTP Connector for SQL API release notes and resources' description: Learn about the Azure Cosmos DB Apache Spark 2 OLTP Connector for SQL API, including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Async Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-java-spring-v2.md b/articles/cosmos-db/sql/sql-api-sdk-java-spring-v2.md index 69221aa4c7961..bd24cab6a864d 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-java-spring-v2.md +++ b/articles/cosmos-db/sql/sql-api-sdk-java-spring-v2.md @@ -1,13 +1,14 @@ --- title: 'Spring Data Azure Cosmos DB v2 for SQL API release notes and resources' description: Learn about the Spring Data Azure Cosmos DB v2 for SQL API, including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Async Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-java-spring-v3.md b/articles/cosmos-db/sql/sql-api-sdk-java-spring-v3.md index ad95e035dd99e..674b29182bae4 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-java-spring-v3.md +++ b/articles/cosmos-db/sql/sql-api-sdk-java-spring-v3.md @@ -1,13 +1,14 @@ --- title: 'Spring Data Azure Cosmos DB v3 for SQL API release notes and resources' description: Learn about the Spring Data Azure Cosmos DB v3 for SQL API, including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Async Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-java-v4.md b/articles/cosmos-db/sql/sql-api-sdk-java-v4.md index a2cae643e1cb3..c519923957c43 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-java-v4.md +++ b/articles/cosmos-db/sql/sql-api-sdk-java-v4.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB Java SDK v4 for SQL API release notes and resources' description: Learn all about the Azure Cosmos DB Java SDK v4 for SQL API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Async Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-java.md b/articles/cosmos-db/sql/sql-api-sdk-java.md index 4a39df6f8eb60..e56774cb1c9b0 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-java.md +++ b/articles/cosmos-db/sql/sql-api-sdk-java.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: SQL Java API, SDK & resources' description: Learn all about the SQL Java API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB SQL Java SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: reference ms.date: 04/06/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-node.md b/articles/cosmos-db/sql/sql-api-sdk-node.md index 6c88ea43eb8b5..7c386a7a17688 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-node.md +++ b/articles/cosmos-db/sql/sql-api-sdk-node.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB: SQL Node.js API, SDK & resources' description: Learn all about the SQL Node.js API and SDK including release dates, retirement dates, and changes made between each version of the Azure Cosmos DB Node.js SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: javascript ms.topic: reference ms.date: 12/09/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-js --- diff --git a/articles/cosmos-db/sql/sql-api-sdk-python.md b/articles/cosmos-db/sql/sql-api-sdk-python.md index 3d9c4060887d4..960b5d42e979b 100644 --- a/articles/cosmos-db/sql/sql-api-sdk-python.md +++ b/articles/cosmos-db/sql/sql-api-sdk-python.md @@ -21,10 +21,11 @@ ms.custom: devx-track-python |**API documentation**|[Python API reference documentation](/python/api/azure-cosmos/azure.cosmos?preserve-view=true&view=azure-python)| |**SDK installation instructions**|[Python SDK installation instructions](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/cosmos/azure-cosmos)| |**Get started**|[Get started with the Python SDK](create-sql-api-python.md)| +|**Samples**|[Python SDK samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cosmos/azure-cosmos/samples)| |**Current supported platform**|[Python 3.6+](https://www.python.org/downloads/)| > [!IMPORTANT] -> * Versions 4.3.0b2 and higher only support Python 3.6+. Python 2 is not supported. +> * Versions 4.3.0b2 and higher support Async IO operations and only support Python 3.6+. Python 2 is not supported. ## Release history Release history is maintained in the azure-sdk-for-python repo, for detailed list of releases, see the [changelog file](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/cosmos/azure-cosmos/CHANGELOG.md). @@ -38,6 +39,7 @@ Microsoft provides notification at least **12 months** in advance of retiring an | Version | Release Date | Retirement Date | | --- | --- | --- | +| 4.3.0 |May 23, 2022 |--- | | 4.2.0 |Oct 09, 2020 |--- | | 4.1.0 |Aug 10, 2020 |--- | | 4.0.0 |May 20, 2020 |--- | diff --git a/articles/cosmos-db/sql/sql-api-spring-data-sdk-samples.md b/articles/cosmos-db/sql/sql-api-spring-data-sdk-samples.md index d29aed5bdca4f..56ace1c72d86b 100644 --- a/articles/cosmos-db/sql/sql-api-spring-data-sdk-samples.md +++ b/articles/cosmos-db/sql/sql-api-spring-data-sdk-samples.md @@ -1,13 +1,14 @@ --- title: 'Azure Cosmos DB SQL API: Spring Data v3 examples' description: Find Spring Data v3 examples on GitHub for common tasks using the Azure Cosmos DB SQL API, including CRUD operations. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: sample ms.date: 08/26/2021 ms.custom: devx-track-java -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure Cosmos DB SQL API: Spring Data Azure Cosmos DB v3 examples diff --git a/articles/cosmos-db/sql/sql-query-aggregate-avg.md b/articles/cosmos-db/sql/sql-query-aggregate-avg.md index c67d6922813ea..11830ec7c7d88 100644 --- a/articles/cosmos-db/sql/sql-query-aggregate-avg.md +++ b/articles/cosmos-db/sql/sql-query-aggregate-avg.md @@ -1,12 +1,13 @@ --- title: AVG in Azure Cosmos DB query language description: Learn about the Average (AVG) SQL system function in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/02/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # AVG (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-aggregate-count.md b/articles/cosmos-db/sql/sql-query-aggregate-count.md index be3e0e0a5516c..5abade5183b5a 100644 --- a/articles/cosmos-db/sql/sql-query-aggregate-count.md +++ b/articles/cosmos-db/sql/sql-query-aggregate-count.md @@ -1,12 +1,13 @@ --- title: COUNT in Azure Cosmos DB query language description: Learn about the Count (COUNT) SQL system function in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/02/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # COUNT (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-aggregate-functions.md b/articles/cosmos-db/sql/sql-query-aggregate-functions.md index ba072c5da598b..ae88c5b349b1b 100644 --- a/articles/cosmos-db/sql/sql-query-aggregate-functions.md +++ b/articles/cosmos-db/sql/sql-query-aggregate-functions.md @@ -1,12 +1,13 @@ --- title: Aggregate functions in Azure Cosmos DB description: Learn about SQL aggregate function syntax, types of aggregate functions supported by Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/02/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Aggregate functions in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-aggregate-max.md b/articles/cosmos-db/sql/sql-query-aggregate-max.md index 6191b08877419..29421aecb440a 100644 --- a/articles/cosmos-db/sql/sql-query-aggregate-max.md +++ b/articles/cosmos-db/sql/sql-query-aggregate-max.md @@ -1,12 +1,13 @@ --- title: MAX in Azure Cosmos DB query language description: Learn about the Max (MAX) SQL system function in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/02/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # MAX (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-aggregate-min.md b/articles/cosmos-db/sql/sql-query-aggregate-min.md index f95e542af0aec..3c28f4092b690 100644 --- a/articles/cosmos-db/sql/sql-query-aggregate-min.md +++ b/articles/cosmos-db/sql/sql-query-aggregate-min.md @@ -1,12 +1,13 @@ --- title: MIN in Azure Cosmos DB query language description: Learn about the Min (MIN) SQL system function in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/02/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # MIN (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-aggregate-sum.md b/articles/cosmos-db/sql/sql-query-aggregate-sum.md index e24e6abe993c6..40b877ce16a8b 100644 --- a/articles/cosmos-db/sql/sql-query-aggregate-sum.md +++ b/articles/cosmos-db/sql/sql-query-aggregate-sum.md @@ -1,12 +1,13 @@ --- title: SUM in Azure Cosmos DB query language description: Learn about the Sum (SUM) SQL system function in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 12/02/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # SUM (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-bitwise-operators.md b/articles/cosmos-db/sql/sql-query-bitwise-operators.md new file mode 100644 index 0000000000000..8772098d98a4c --- /dev/null +++ b/articles/cosmos-db/sql/sql-query-bitwise-operators.md @@ -0,0 +1,67 @@ +--- +title: Bitwise operators in Azure Cosmos DB +description: Learn about SQL bitwise operators supported by Azure Cosmos DB. +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi +ms.service: cosmos-db +ms.subservice: cosmosdb-sql +ms.topic: conceptual +ms.date: 06/02/2022 +--- + +# Bitwise operators in Azure Cosmos DB +[!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] + + +This article details the bitwise operators supported by Azure Cosmos DB. Bitwise operators are useful for constructing JSON result-sets on the fly. The bitwise operators work similarly to higher-level programming languages like C# and JavaScript. For examples of C# bitwise operators, see [Bitwise and shift operators](/dotnet/csharp/language-reference/operators/bitwise-and-shift-operators). + +## Understanding bitwise operations + +The following table shows the explanations and examples of bitwise operations in the SQL API between two values. + +| Operation | Operator | Description | +| --- | --- | --- | +| **Left shift** | ``<<`` | Shift left-hand value *left* by the specified number of bits. | +| **Right shift** | ``>>`` | Shift left-hand value *right* by the specified number of bits. | +| **Zero-fill (unsigned) right shift** | ``>>>`` | Shift left-hand value *right* by the specified number of bits without filling left-most bits. | +| **AND** | ``&`` | Computes bitwise logical AND. | +| **OR** | ``|`` | Computes bitwise logical OR. | +| **XOR** | ``^`` | Computes bitwise logical exclusive OR. | + + +For example, the following query uses each of the bitwise operators and renders a result. + +```sql +SELECT + (100 >> 2) AS rightShift, + (100 << 2) AS leftShift, + (100 >>> 0) AS zeroFillRightShift, + (100 & 1000) AS logicalAnd, + (100 | 1000) AS logicalOr, + (100 ^ 1000) AS logicalExclusiveOr +``` + +The example query's results as a JSON object. + +```json +[ + { + "rightShift": 25, + "leftShift": 400, + "zeroFillRightShift": 100, + "logicalAnd": 96, + "logicalOr": 1004, + "logicalExclusiveOr": 908 + } +] +``` + +> [!IMPORTANT] +> The bitwise operators in Azure Cosmos DB SQL API follow the same behavior as bitwise operators in JavaScript. JavaScript stores numbers as 64 bits floating point numbers, but all bitwise operations are performed on 32 bits binary numbers. Before a bitwise operation is performed, JavaScript converts numbers to 32 bits signed integers. After the bitwise operation is performed, the result is converted back to 64 bits JavaScript numbers. For more information about the bitwise operators in JavaScript, see [JavaScript binary bitwise operators at MDN Web Docs](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Operators#binary_bitwise_operators). + +## Next steps + +- [Azure Cosmos DB .NET samples](https://github.com/Azure/azure-cosmos-dotnet-v3) +- [Keywords](sql-query-keywords.md) +- [SELECT clause](sql-query-select.md) diff --git a/articles/cosmos-db/sql/sql-query-constants.md b/articles/cosmos-db/sql/sql-query-constants.md index aaaadf4219933..89780f7b44274 100644 --- a/articles/cosmos-db/sql/sql-query-constants.md +++ b/articles/cosmos-db/sql/sql-query-constants.md @@ -1,12 +1,13 @@ --- title: SQL constants in Azure Cosmos DB description: Learn about how the SQL query constants in Azure Cosmos DB are used to represent a specific data value -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/31/2019 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- diff --git a/articles/cosmos-db/sql/sql-query-date-time-functions.md b/articles/cosmos-db/sql/sql-query-date-time-functions.md index 23c1507adb8fe..b2569047507f3 100644 --- a/articles/cosmos-db/sql/sql-query-date-time-functions.md +++ b/articles/cosmos-db/sql/sql-query-date-time-functions.md @@ -29,6 +29,7 @@ or numeric ticks whose value is the number of 100 nanosecond ticks which have el The following functions allow you to easily manipulate DateTime, timestamp, and tick values: * [DateTimeAdd](sql-query-datetimeadd.md) +* [DateTimeBin](sql-query-datetimebin.md) * [DateTimeDiff](sql-query-datetimediff.md) * [DateTimeFromParts](sql-query-datetimefromparts.md) * [DateTimePart](sql-query-datetimepart.md) diff --git a/articles/cosmos-db/sql/sql-query-datetimeadd.md b/articles/cosmos-db/sql/sql-query-datetimeadd.md index 35fe178d4320f..cccbfcddd05ce 100644 --- a/articles/cosmos-db/sql/sql-query-datetimeadd.md +++ b/articles/cosmos-db/sql/sql-query-datetimeadd.md @@ -1,12 +1,13 @@ --- title: DateTimeAdd in Azure Cosmos DB query language description: Learn about SQL system function DateTimeAdd in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 07/09/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # DateTimeAdd (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-datetimebin.md b/articles/cosmos-db/sql/sql-query-datetimebin.md new file mode 100644 index 0000000000000..3b68e9b9ee85b --- /dev/null +++ b/articles/cosmos-db/sql/sql-query-datetimebin.md @@ -0,0 +1,121 @@ +--- +title: DateTimeBin in Azure Cosmos DB query language +description: Learn about SQL system function DateTimeBin in Azure Cosmos DB. +author: jcocchi +ms.service: cosmos-db +ms.subservice: cosmosdb-sql +ms.topic: conceptual +ms.date: 05/27/2022 +ms.author: jucocchi +ms.custom: query-reference +--- + +# DateTimeBin (Azure Cosmos DB) + [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] + +Returns the nearest multiple of *BinSize* below the specified DateTime given the unit of measurement *DateTimePart* and start value of *BinAtDateTime*. + + +## Syntax + +```sql +DateTimeBin ( , [,BinSize] [,BinAtDateTime]) +``` + + +## Arguments + +*DateTime* + The string value date and time to be binned. A UTC date and time ISO 8601 string value in the format `YYYY-MM-DDThh:mm:ss.fffffffZ` where: + +|Format|Description| +|-|-| +|YYYY|four-digit year| +|MM|two-digit month (01 = January, etc.)| +|DD|two-digit day of month (01 through 31)| +|T|signifier for beginning of time elements| +|hh|two-digit hour (00 through 23)| +|mm|two-digit minutes (00 through 59)| +|ss|two-digit seconds (00 through 59)| +|.fffffff|seven-digit fractional seconds| +|Z|UTC (Coordinated Universal Time) designator| + +For more information on the ISO 8601 format, see [ISO_8601](https://en.wikipedia.org/wiki/ISO_8601) + +*DateTimePart* + The date time part specifies the units for BinSize. DateTimeBin is Undefined for DayOfWeek, Year, and Month. The finest granularity for binning by Nanosecond is 100 nanosecond ticks; if Nanosecond is specified with a BinSize less than 100, the result is Undefined. This table lists all valid DateTimePart arguments for DateTimeBin: + +| DateTimePart | abbreviations | +| ------------ | -------------------- | +| Day | "day", "dd", "d" | +| Hour | "hour", "hh" | +| Minute | "minute", "mi", "n" | +| Second | "second", "ss", "s" | +| Millisecond | "millisecond", "ms" | +| Microsecond | "microsecond", "mcs" | +| Nanosecond | "nanosecond", "ns" | + +*BinSize* (optional) + Numeric value that specifies the size of bins. If not specified, the default value is one. + + +*BinAtDateTime* (optional) + A UTC date and time ISO 8601 string value in the format `YYYY-MM-DDThh:mm:ss.fffffffZ` that specifies the start date to bin from. Default value is the Unix epoch, ‘1970-01-01T00:00:00.000000Z’. + + +## Return types + +Returns the result of binning the *DateTime* value. + + +## Remarks + +DateTimeBin will return `Undefined` for the following reasons: +- The DateTimePart value specified is invalid +- The BinSize value is zero or negative +- The DateTime or BinAtDateTime isn't a valid ISO 8601 DateTime or precedes the year 1601 (the Windows epoch) + + +## Examples + +The following example bins ‘2021-06-28T17:24:29.2991234Z’ by one hour: + +```sql +SELECT DateTimeBin('2021-06-28T17:24:29.2991234Z', 'hh') AS BinByHour +``` + +```json +[ +    { +        "BinByHour": "2021-06-28T17:00:00.0000000Z" +    } +] +``` + +The following example bins ‘2021-06-28T17:24:29.2991234Z’ given different *BinAtDateTime* values: + +```sql +SELECT  +DateTimeBin('2021-06-28T17:24:29.2991234Z', 'day', 5) AS One_BinByFiveDaysUnixEpochImplicit, +DateTimeBin('2021-06-28T17:24:29.2991234Z', 'day', 5, '1970-01-01T00:00:00.0000000Z') AS Two_BinByFiveDaysUnixEpochExplicit, +DateTimeBin('2021-06-28T17:24:29.2991234Z', 'day', 5, '1601-01-01T00:00:00.0000000Z') AS Three_BinByFiveDaysFromWindowsEpoch, +DateTimeBin('2021-06-28T17:24:29.2991234Z', 'day', 5, '2021-01-01T00:00:00.0000000Z') AS Four_BinByFiveDaysFromYearStart, +DateTimeBin('2021-06-28T17:24:29.2991234Z', 'day', 5, '0001-01-01T00:00:00.0000000Z') AS Five_BinByFiveDaysFromUndefinedYear +``` + +```json +[ +    { +        "One_BinByFiveDaysUnixEpochImplicit": "2021-06-27T00:00:00.0000000Z", +        "Two_BinByFiveDaysUnixEpochExplicit": "2021-06-27T00:00:00.0000000Z", +        "Three_BinByFiveDaysFromWindowsEpoch": "2021-06-28T00:00:00.0000000Z", +        "Four_BinByFiveDaysFromYearStart": "2021-06-25T00:00:00.0000000Z" +    } +] +``` + +## Next steps + +- [Date and time functions Azure Cosmos DB](sql-query-date-time-functions.md) +- [System functions Azure Cosmos DB](sql-query-system-functions.md) +- [Introduction to Azure Cosmos DB](../introduction.md) diff --git a/articles/cosmos-db/sql/sql-query-datetimediff.md b/articles/cosmos-db/sql/sql-query-datetimediff.md index e298b8fa95696..71966cf36bab3 100644 --- a/articles/cosmos-db/sql/sql-query-datetimediff.md +++ b/articles/cosmos-db/sql/sql-query-datetimediff.md @@ -1,12 +1,13 @@ --- title: DateTimeDiff in Azure Cosmos DB query language description: Learn about SQL system function DateTimeDiff in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 07/09/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # DateTimeDiff (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-datetimefromparts.md b/articles/cosmos-db/sql/sql-query-datetimefromparts.md index 9bfaa76e1fe9a..38ffcdcccf1d6 100644 --- a/articles/cosmos-db/sql/sql-query-datetimefromparts.md +++ b/articles/cosmos-db/sql/sql-query-datetimefromparts.md @@ -1,12 +1,13 @@ --- title: DateTimeFromParts in Azure Cosmos DB query language description: Learn about SQL system function DateTimeFromParts in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 07/09/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # DateTimeFromParts (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-datetimepart.md b/articles/cosmos-db/sql/sql-query-datetimepart.md index 4bf288dcc0069..52cd506550e28 100644 --- a/articles/cosmos-db/sql/sql-query-datetimepart.md +++ b/articles/cosmos-db/sql/sql-query-datetimepart.md @@ -1,12 +1,13 @@ --- title: DateTimePart in Azure Cosmos DB query language description: Learn about SQL system function DateTimePart in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/14/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # DateTimePart (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-datetimetoticks.md b/articles/cosmos-db/sql/sql-query-datetimetoticks.md index 516bfdfcab848..f943a7ca780b0 100644 --- a/articles/cosmos-db/sql/sql-query-datetimetoticks.md +++ b/articles/cosmos-db/sql/sql-query-datetimetoticks.md @@ -1,12 +1,13 @@ --- title: DateTimeToTicks in Azure Cosmos DB query language description: Learn about SQL system function DateTimeToTicks in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/18/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # DateTimeToTicks (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-datetimetotimestamp.md b/articles/cosmos-db/sql/sql-query-datetimetotimestamp.md index 6fd50446ea369..3b1acad88bda8 100644 --- a/articles/cosmos-db/sql/sql-query-datetimetotimestamp.md +++ b/articles/cosmos-db/sql/sql-query-datetimetotimestamp.md @@ -1,12 +1,13 @@ --- title: DateTimeToTimestamp in Azure Cosmos DB query language description: Learn about SQL system function DateTimeToTimestamp in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/18/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # DateTimeToTimestamp (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-equality-comparison-operators.md b/articles/cosmos-db/sql/sql-query-equality-comparison-operators.md index 34307ea3331c5..7812264ceb755 100644 --- a/articles/cosmos-db/sql/sql-query-equality-comparison-operators.md +++ b/articles/cosmos-db/sql/sql-query-equality-comparison-operators.md @@ -1,12 +1,13 @@ --- title: Equality and comparison operators in Azure Cosmos DB description: Learn about SQL equality and comparison operators supported by Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 01/07/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Equality and comparison operators in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-from.md b/articles/cosmos-db/sql/sql-query-from.md index 4c27eba74d05f..5b0cd50084ff0 100644 --- a/articles/cosmos-db/sql/sql-query-from.md +++ b/articles/cosmos-db/sql/sql-query-from.md @@ -1,12 +1,13 @@ --- title: FROM clause in Azure Cosmos DB description: Learn about the SQL syntax, and example for FROM clause for Azure Cosmos DB. This article also shows examples to scope results, and get sub items by using the FROM clause. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/08/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # FROM clause in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-geospatial-index.md b/articles/cosmos-db/sql/sql-query-geospatial-index.md index 7e08c0e009336..ff0a5423d8716 100644 --- a/articles/cosmos-db/sql/sql-query-geospatial-index.md +++ b/articles/cosmos-db/sql/sql-query-geospatial-index.md @@ -1,12 +1,13 @@ --- title: Index geospatial data with Azure Cosmos DB description: Index spatial data with Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 11/03/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Index geospatial data with Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-geospatial-intro.md b/articles/cosmos-db/sql/sql-query-geospatial-intro.md index cbe31145f8b56..25b72b166e3d3 100644 --- a/articles/cosmos-db/sql/sql-query-geospatial-intro.md +++ b/articles/cosmos-db/sql/sql-query-geospatial-intro.md @@ -1,12 +1,13 @@ --- title: Geospatial and GeoJSON location data in Azure Cosmos DB description: Understand how to create spatial objects with Azure Cosmos DB and the SQL API. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 02/17/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: devx-track-js --- # Geospatial and GeoJSON location data in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-geospatial-query.md b/articles/cosmos-db/sql/sql-query-geospatial-query.md index 8b2bd30face0c..4d156b8251e67 100644 --- a/articles/cosmos-db/sql/sql-query-geospatial-query.md +++ b/articles/cosmos-db/sql/sql-query-geospatial-query.md @@ -1,12 +1,13 @@ --- title: Querying geospatial data with Azure Cosmos DB description: Querying spatial data with Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 02/20/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Querying geospatial data with Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-getcurrentdatetime.md b/articles/cosmos-db/sql/sql-query-getcurrentdatetime.md index 6d48c835c8eda..27130aec987aa 100644 --- a/articles/cosmos-db/sql/sql-query-getcurrentdatetime.md +++ b/articles/cosmos-db/sql/sql-query-getcurrentdatetime.md @@ -1,12 +1,13 @@ --- title: GetCurrentDateTime in Azure Cosmos DB query language description: Learn about SQL system function GetCurrentDateTime in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 02/03/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # GetCurrentDateTime (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-getcurrentticks.md b/articles/cosmos-db/sql/sql-query-getcurrentticks.md index fa91c208bd82a..8e77b223dabe2 100644 --- a/articles/cosmos-db/sql/sql-query-getcurrentticks.md +++ b/articles/cosmos-db/sql/sql-query-getcurrentticks.md @@ -1,12 +1,13 @@ --- title: GetCurrentTicks in Azure Cosmos DB query language description: Learn about SQL system function GetCurrentTicks in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 02/03/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # GetCurrentTicks (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-getcurrenttimestamp.md b/articles/cosmos-db/sql/sql-query-getcurrenttimestamp.md index 6a21f337dce2e..ce0f6fb7fc454 100644 --- a/articles/cosmos-db/sql/sql-query-getcurrenttimestamp.md +++ b/articles/cosmos-db/sql/sql-query-getcurrenttimestamp.md @@ -1,12 +1,13 @@ --- title: GetCurrentTimestamp in Azure Cosmos DB query language description: Learn about SQL system function GetCurrentTimestamp in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 02/03/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # GetCurrentTimestamp (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-getting-started.md b/articles/cosmos-db/sql/sql-query-getting-started.md index 497c3d85d6ca9..8172659af6388 100644 --- a/articles/cosmos-db/sql/sql-query-getting-started.md +++ b/articles/cosmos-db/sql/sql-query-getting-started.md @@ -1,12 +1,13 @@ --- title: Getting started with SQL queries in Azure Cosmos DB description: Learn how to use SQL queries to query data from Azure Cosmos DB. You can upload sample data to a container in Azure Cosmos DB and query it. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/26/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Getting started with SQL queries diff --git a/articles/cosmos-db/sql/sql-query-group-by.md b/articles/cosmos-db/sql/sql-query-group-by.md index 54a6705c9483b..5ed99bd6ff023 100644 --- a/articles/cosmos-db/sql/sql-query-group-by.md +++ b/articles/cosmos-db/sql/sql-query-group-by.md @@ -1,12 +1,13 @@ --- title: GROUP BY clause in Azure Cosmos DB description: Learn about the GROUP BY clause for Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/12/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # GROUP BY clause in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-join.md b/articles/cosmos-db/sql/sql-query-join.md index 0bc2b234acb83..c3054d65b1e6a 100644 --- a/articles/cosmos-db/sql/sql-query-join.md +++ b/articles/cosmos-db/sql/sql-query-join.md @@ -1,12 +1,13 @@ --- title: SQL JOIN queries for Azure Cosmos DB description: Learn how to JOIN multiple tables in Azure Cosmos DB to query the data -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/27/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Joins in Azure Cosmos DB @@ -215,7 +216,7 @@ The results are: ``` > [!IMPORTANT] -> This example uses mulitple JOIN expressions in a single query. There is a maximum amount of JOINs that can be used in a single query. For more information, see [SQL query limits](/azure/cosmos-db/concepts-limits#sql-query-limits). +> This example uses mulitple JOIN expressions in a single query. There is a maximum amount of JOINs that can be used in a single query. For more information, see [SQL query limits](../concepts-limits.md#sql-query-limits). The following extension of the preceding example performs a double join. You could view the cross product as the following pseudo-code: @@ -302,4 +303,4 @@ For example, consider the earlier query that projected the familyName, child's g - [Getting started](sql-query-getting-started.md) - [Azure Cosmos DB .NET samples](https://github.com/Azure/azure-cosmosdb-dotnet) -- [Subqueries](sql-query-subquery.md) +- [Subqueries](sql-query-subquery.md) \ No newline at end of file diff --git a/articles/cosmos-db/sql/sql-query-keywords.md b/articles/cosmos-db/sql/sql-query-keywords.md index e9c7a91dbee38..184c7308bed74 100644 --- a/articles/cosmos-db/sql/sql-query-keywords.md +++ b/articles/cosmos-db/sql/sql-query-keywords.md @@ -1,12 +1,13 @@ --- title: SQL keywords for Azure Cosmos DB description: Learn about SQL keywords for Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 10/05/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Keywords in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-linq-to-sql.md b/articles/cosmos-db/sql/sql-query-linq-to-sql.md index 91f9099190e46..60ecd3783f785 100644 --- a/articles/cosmos-db/sql/sql-query-linq-to-sql.md +++ b/articles/cosmos-db/sql/sql-query-linq-to-sql.md @@ -1,12 +1,13 @@ --- title: LINQ to SQL translation in Azure Cosmos DB description: Learn the LINQ operators supported and how the LINQ queries are mapped to SQL queries in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/06/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # LINQ to SQL translation diff --git a/articles/cosmos-db/sql/sql-query-logical-operators.md b/articles/cosmos-db/sql/sql-query-logical-operators.md index 3f159007525f1..82eed5675e4f0 100644 --- a/articles/cosmos-db/sql/sql-query-logical-operators.md +++ b/articles/cosmos-db/sql/sql-query-logical-operators.md @@ -1,12 +1,13 @@ --- title: Logical operators in Azure Cosmos DB description: Learn about SQL logical operators supported by Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 01/07/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Logical operators in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-object-array.md b/articles/cosmos-db/sql/sql-query-object-array.md index 6f3863e401dd0..bfb3688e57352 100644 --- a/articles/cosmos-db/sql/sql-query-object-array.md +++ b/articles/cosmos-db/sql/sql-query-object-array.md @@ -1,12 +1,13 @@ --- title: Working with arrays and objects in Azure Cosmos DB description: Learn the SQL syntax to create arrays and objects in Azure Cosmos DB. This article also provides some examples to perform operations on array objects -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 02/02/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Working with arrays and objects in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-offset-limit.md b/articles/cosmos-db/sql/sql-query-offset-limit.md index a46678e6d0991..524c3ed78e510 100644 --- a/articles/cosmos-db/sql/sql-query-offset-limit.md +++ b/articles/cosmos-db/sql/sql-query-offset-limit.md @@ -1,12 +1,13 @@ --- title: OFFSET LIMIT clause in Azure Cosmos DB description: Learn how to use the OFFSET LIMIT clause to skip and take some certain values when querying in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 07/29/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # OFFSET LIMIT clause in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-order-by.md b/articles/cosmos-db/sql/sql-query-order-by.md index 916bfa36b9928..fd0fb47f77b2d 100644 --- a/articles/cosmos-db/sql/sql-query-order-by.md +++ b/articles/cosmos-db/sql/sql-query-order-by.md @@ -1,12 +1,13 @@ --- title: ORDER BY clause in Azure Cosmos DB description: Learn about SQL ORDER BY clause for Azure Cosmos DB. Use SQL as an Azure Cosmos DB JSON query language. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 04/27/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # ORDER BY clause in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-pagination.md b/articles/cosmos-db/sql/sql-query-pagination.md index 9d4f00171fa6b..fb0ef8b62ee15 100644 --- a/articles/cosmos-db/sql/sql-query-pagination.md +++ b/articles/cosmos-db/sql/sql-query-pagination.md @@ -1,8 +1,9 @@ --- title: Pagination in Azure Cosmos DB description: Learn about paging concepts and continuation tokens -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual diff --git a/articles/cosmos-db/sql/sql-query-parameterized-queries.md b/articles/cosmos-db/sql/sql-query-parameterized-queries.md index a79baaa626a1e..b20badb1ba043 100644 --- a/articles/cosmos-db/sql/sql-query-parameterized-queries.md +++ b/articles/cosmos-db/sql/sql-query-parameterized-queries.md @@ -1,12 +1,13 @@ --- title: Parameterized queries in Azure Cosmos DB description: Learn how SQL parameterized queries provide robust handling and escaping of user input, and prevent accidental exposure of data through SQL injection. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 07/29/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Parameterized queries in Azure Cosmos DB [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] diff --git a/articles/cosmos-db/sql/sql-query-regexmatch.md b/articles/cosmos-db/sql/sql-query-regexmatch.md index 4d0e3afc4594b..8acfb8cd5930c 100644 --- a/articles/cosmos-db/sql/sql-query-regexmatch.md +++ b/articles/cosmos-db/sql/sql-query-regexmatch.md @@ -1,12 +1,13 @@ --- title: RegexMatch in Azure Cosmos DB query language description: Learn about the RegexMatch SQL system function in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/12/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # REGEXMATCH (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-scalar-expressions.md b/articles/cosmos-db/sql/sql-query-scalar-expressions.md index c4fe17b2c8f47..c3079f29677bd 100644 --- a/articles/cosmos-db/sql/sql-query-scalar-expressions.md +++ b/articles/cosmos-db/sql/sql-query-scalar-expressions.md @@ -1,12 +1,13 @@ --- title: Scalar expressions in Azure Cosmos DB SQL queries description: Learn about the scalar expression SQL syntax for Azure Cosmos DB. This article also describes how to combine scalar expressions into complex expressions by using operators. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/17/2019 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Scalar expressions in Azure Cosmos DB SQL queries diff --git a/articles/cosmos-db/sql/sql-query-select.md b/articles/cosmos-db/sql/sql-query-select.md index cc146596b1279..99a51c6625c25 100644 --- a/articles/cosmos-db/sql/sql-query-select.md +++ b/articles/cosmos-db/sql/sql-query-select.md @@ -1,12 +1,13 @@ --- title: SELECT clause in Azure Cosmos DB description: Learn about SQL SELECT clause for Azure Cosmos DB. Use SQL as an Azure Cosmos DB JSON query language. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/08/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # SELECT clause in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-string-functions.md b/articles/cosmos-db/sql/sql-query-string-functions.md index f10a2d496953b..d551d2d6fd43e 100644 --- a/articles/cosmos-db/sql/sql-query-string-functions.md +++ b/articles/cosmos-db/sql/sql-query-string-functions.md @@ -1,12 +1,13 @@ --- title: String functions in Azure Cosmos DB query language description: Learn about string SQL system functions in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/26/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # String functions (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-stringequals.md b/articles/cosmos-db/sql/sql-query-stringequals.md index 243164b95d21f..50be1f4431cc6 100644 --- a/articles/cosmos-db/sql/sql-query-stringequals.md +++ b/articles/cosmos-db/sql/sql-query-stringequals.md @@ -1,12 +1,13 @@ --- title: StringEquals in Azure Cosmos DB query language description: Learn about how the StringEquals SQL system function in Azure Cosmos DB returns a Boolean indicating whether the first string expression matches the second -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/20/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # STRINGEQUALS (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-subquery.md b/articles/cosmos-db/sql/sql-query-subquery.md index 12f548dda5c00..8eec4009c9a7b 100644 --- a/articles/cosmos-db/sql/sql-query-subquery.md +++ b/articles/cosmos-db/sql/sql-query-subquery.md @@ -1,12 +1,13 @@ --- title: SQL subqueries for Azure Cosmos DB description: Learn about SQL subqueries and their common use cases and different types of subqueries in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 07/30/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # SQL subquery examples for Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-ternary-coalesce-operators.md b/articles/cosmos-db/sql/sql-query-ternary-coalesce-operators.md index 87770a23d22ae..fff8686b199bc 100644 --- a/articles/cosmos-db/sql/sql-query-ternary-coalesce-operators.md +++ b/articles/cosmos-db/sql/sql-query-ternary-coalesce-operators.md @@ -1,12 +1,13 @@ --- title: Ternary and coalesce operators in Azure Cosmos DB description: Learn about SQL ternary and coalesce operators supported by Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 01/07/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Ternary and coalesce operators in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-tickstodatetime.md b/articles/cosmos-db/sql/sql-query-tickstodatetime.md index 4f241944648fc..4d5ffbadf83d9 100644 --- a/articles/cosmos-db/sql/sql-query-tickstodatetime.md +++ b/articles/cosmos-db/sql/sql-query-tickstodatetime.md @@ -1,12 +1,13 @@ --- title: TicksToDateTime in Azure Cosmos DB query language description: Learn about SQL system function TicksToDateTime in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/18/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # TicksToDateTime (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-timestamptodatetime.md b/articles/cosmos-db/sql/sql-query-timestamptodatetime.md index c681b35bde358..b09b0fbab8d1f 100644 --- a/articles/cosmos-db/sql/sql-query-timestamptodatetime.md +++ b/articles/cosmos-db/sql/sql-query-timestamptodatetime.md @@ -1,12 +1,13 @@ --- title: TimestampToDateTime in Azure Cosmos DB query language description: Learn about SQL system function TimestampToDateTime in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/18/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # TimestampToDateTime (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-type-checking-functions.md b/articles/cosmos-db/sql/sql-query-type-checking-functions.md index ab6087967a060..5ee9eb465a343 100644 --- a/articles/cosmos-db/sql/sql-query-type-checking-functions.md +++ b/articles/cosmos-db/sql/sql-query-type-checking-functions.md @@ -1,12 +1,13 @@ --- title: Type checking functions in Azure Cosmos DB query language description: Learn about type checking SQL system functions in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 05/26/2021 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: query-reference --- # Type checking functions (Azure Cosmos DB) diff --git a/articles/cosmos-db/sql/sql-query-udfs.md b/articles/cosmos-db/sql/sql-query-udfs.md index 22a3ab4174eb6..488c93407edee 100644 --- a/articles/cosmos-db/sql/sql-query-udfs.md +++ b/articles/cosmos-db/sql/sql-query-udfs.md @@ -1,12 +1,13 @@ --- title: User-defined functions (UDFs) in Azure Cosmos DB description: Learn about User-defined functions in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 04/09/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.custom: devx-track-js --- diff --git a/articles/cosmos-db/sql/sql-query-where.md b/articles/cosmos-db/sql/sql-query-where.md index e1a49a28d22eb..6543cbf772e0c 100644 --- a/articles/cosmos-db/sql/sql-query-where.md +++ b/articles/cosmos-db/sql/sql-query-where.md @@ -1,12 +1,13 @@ --- title: WHERE clause in Azure Cosmos DB description: Learn about SQL WHERE clause for Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 03/06/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # WHERE clause in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-query-working-with-json.md b/articles/cosmos-db/sql/sql-query-working-with-json.md index a4162323bcfe2..a3952d9e19608 100644 --- a/articles/cosmos-db/sql/sql-query-working-with-json.md +++ b/articles/cosmos-db/sql/sql-query-working-with-json.md @@ -1,12 +1,13 @@ --- title: Working with JSON in Azure Cosmos DB description: Learn about to query and access nested JSON properties and use special characters in Azure Cosmos DB -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 09/19/2020 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi --- # Working with JSON in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/sql-sdk-connection-modes.md b/articles/cosmos-db/sql/sql-sdk-connection-modes.md index b9cff59b9a975..34e7b5774218d 100644 --- a/articles/cosmos-db/sql/sql-sdk-connection-modes.md +++ b/articles/cosmos-db/sql/sql-sdk-connection-modes.md @@ -28,7 +28,7 @@ The two available connectivity modes are: * Direct mode - Direct mode supports connectivity through TCP protocol and offers better performance because there are fewer network hops. The application connects directly to the backend replicas. Direct mode is currently only supported on .NET and Java SDK platforms. + Direct mode supports connectivity through TCP protocol, using TLS for initial authentication and encrypting traffic, and offers better performance because there are fewer network hops. The application connects directly to the backend replicas. Direct mode is currently only supported on .NET and Java SDK platforms. :::image type="content" source="./media/performance-tips/connection-policy.png" alt-text="The Azure Cosmos DB connectivity modes" border="false"::: @@ -43,7 +43,7 @@ The following table shows a summary of the connectivity modes available for vari |Connection mode |Supported protocol |Supported SDKs |API/Service port | |---------|---------|---------|---------| |Gateway | HTTPS | All SDKs | SQL (443), MongoDB (10250, 10255, 10256), Table (443), Cassandra (10350), Graph (443)
                  The port 10250 maps to a default Azure Cosmos DB API for MongoDB instance without geo-replication. Whereas the ports 10255 and 10256 map to the instance that has geo-replication. | -|Direct | TCP | .NET SDK Java SDK | When using public/service endpoints: ports in the 10000 through 20000 range
                  When using private endpoints: ports in the 0 through 65535 range | +|Direct | TCP (Encrypted via TLS) | .NET SDK Java SDK | When using public/service endpoints: ports in the 10000 through 20000 range
                  When using private endpoints: ports in the 0 through 65535 range | ## Direct mode connection architecture @@ -96,4 +96,4 @@ For specific SDK platform performance optimizations: * Trying to do capacity planning for a migration to Azure Cosmos DB? You can use information about your existing database cluster for capacity planning. * If all you know is the number of vcores and servers in your existing database cluster, read about [estimating request units using vCores or vCPUs](../convert-vcore-to-request-unit.md) - * If you know typical request rates for your current database workload, read about [estimating request units using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md) \ No newline at end of file + * If you know typical request rates for your current database workload, read about [estimating request units using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md) diff --git a/articles/cosmos-db/sql/stored-procedures-triggers-udfs.md b/articles/cosmos-db/sql/stored-procedures-triggers-udfs.md index ae8ef8f95e19d..05a849f77dd3c 100644 --- a/articles/cosmos-db/sql/stored-procedures-triggers-udfs.md +++ b/articles/cosmos-db/sql/stored-procedures-triggers-udfs.md @@ -1,14 +1,13 @@ --- title: Work with stored procedures, triggers, and UDFs in Azure Cosmos DB description: This article introduces the concepts such as stored procedures, triggers, and user-defined functions in Azure Cosmos DB. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/26/2021 -ms.author: tisande -ms.reviewer: sngun - +ms.author: sidandrews +ms.reviewer: jucocchi --- # Stored procedures, triggers, and user-defined functions diff --git a/articles/cosmos-db/sql/synthetic-partition-keys.md b/articles/cosmos-db/sql/synthetic-partition-keys.md index ac2c085052fa4..6dc08cc7e9ebd 100644 --- a/articles/cosmos-db/sql/synthetic-partition-keys.md +++ b/articles/cosmos-db/sql/synthetic-partition-keys.md @@ -5,8 +5,9 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/26/2021 -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown --- diff --git a/articles/cosmos-db/sql/templates-samples-sql.md b/articles/cosmos-db/sql/templates-samples-sql.md index 9129f8d6df9f9..b1a3f429461c4 100644 --- a/articles/cosmos-db/sql/templates-samples-sql.md +++ b/articles/cosmos-db/sql/templates-samples-sql.md @@ -1,12 +1,13 @@ --- title: Azure Resource Manager templates for Azure Cosmos DB Core (SQL API) description: Use Azure Resource Manager templates to create and configure Azure Cosmos DB. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/26/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure Resource Manager templates for Azure Cosmos DB diff --git a/articles/cosmos-db/sql/time-to-live.md b/articles/cosmos-db/sql/time-to-live.md index 9f7ee0f4495a1..d06b195820cb1 100644 --- a/articles/cosmos-db/sql/time-to-live.md +++ b/articles/cosmos-db/sql/time-to-live.md @@ -1,14 +1,13 @@ --- title: Expire data in Azure Cosmos DB with Time to Live description: With TTL, Microsoft Azure Cosmos DB provides the ability to have documents automatically purged from the system after a period of time. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 09/16/2021 -ms.reviewer: sngun - --- # Time to Live (TTL) in Azure Cosmos DB [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] diff --git a/articles/cosmos-db/sql/troubleshoot-bad-request.md b/articles/cosmos-db/sql/troubleshoot-bad-request.md index 933ec5dc573cf..daafa48100bac 100644 --- a/articles/cosmos-db/sql/troubleshoot-bad-request.md +++ b/articles/cosmos-db/sql/troubleshoot-bad-request.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 03/07/2022 ms.author: maquaran ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot bad request exceptions in Azure Cosmos DB diff --git a/articles/cosmos-db/sql/troubleshoot-changefeed-functions.md b/articles/cosmos-db/sql/troubleshoot-changefeed-functions.md index d5021470235b7..8bd0c6d610ee2 100644 --- a/articles/cosmos-db/sql/troubleshoot-changefeed-functions.md +++ b/articles/cosmos-db/sql/troubleshoot-changefeed-functions.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 04/14/2022 ms.author: maquaran ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot issues when using Azure Functions trigger for Cosmos DB diff --git a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-header-too-large.md b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-header-too-large.md index 4eeb730bc2fab..ed2ddfb91a4a8 100644 --- a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-header-too-large.md +++ b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-header-too-large.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 09/29/2021 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-dotnet --- diff --git a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-timeout.md b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-timeout.md index 434891c44940b..b6f33acdb9ad2 100644 --- a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-timeout.md +++ b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-request-timeout.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 02/02/2022 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-dotnet --- diff --git a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-slow-request.md b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-slow-request.md index 2779fb2691b10..780d98a407c89 100644 --- a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-slow-request.md +++ b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk-slow-request.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 03/09/2022 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot slow requests in Azure Cosmos DB .NET SDK @@ -57,24 +57,21 @@ try ItemResponse response = await this.Container.CreateItemAsync(item: testItem); if (response.Diagnostics.GetClientElapsedTime() > ConfigurableSlowRequestTimeSpan) { - // Log the diagnostics and add any additional info necessary to correlate to other logs - Console.Write(response.Diagnostics.ToString()); + // Log the response.Diagnostics.ToString() and add any additional info necessary to correlate to other logs } } catch (CosmosException cosmosException) { - // Log the full exception including the stack trace - Console.Write(cosmosException.ToString()); - // The Diagnostics can be logged separately if required. - Console.Write(cosmosException.Diagnostics.ToString()); + // Log the full exception including the stack trace with: cosmosException.ToString() + + // The Diagnostics can be logged separately if required with: cosmosException.Diagnostics.ToString() } // When using Stream APIs ResponseMessage response = await this.Container.CreateItemStreamAsync(partitionKey, stream); if (response.Diagnostics.GetClientElapsedTime() > ConfigurableSlowRequestTimeSpan || !response.IsSuccessStatusCode) { - // Log the diagnostics and add any additional info necessary to correlate to other logs - Console.Write(response.Diagnostics.ToString()); + // Log the diagnostics and add any additional info necessary to correlate to other logs with: response.Diagnostics.ToString() } ``` @@ -202,58 +199,94 @@ Show the time for the different stages of sending and receiving a request in the * *Transit time is large*, which leads to a networking problem. Compare this number to the `BELatencyInMs`. If `BELatencyInMs` is small, then the time was spent on the network, and not on the Azure Cosmos DB service. * *Received time is large* might be caused by a thread starvation problem. This is the time between having the response and returning the result. +### ServiceEndpointStatistics +Information about a particular backend server. The SDK can open multiple connections to a single backend server depending upon the number of pending requests and the MaxConcurrentRequestsPerConnection. + +* `inflightRequests` The number of pending requests to a backend server (maybe from different partitions). A high number may to lead to more traffic and higher latencies. +* `openConnections` is the total Number of connections open to a single backend server. This can be useful to show SNAT port exhausion if this number is very high. + +### ConnectionStatistics +Information about the particular connection (new or old) the request get's assigned to. + +* `waitforConnectionInit`: The current request was waiting for new connection initialization to complete. This will lead to higher latencies. +* `callsPendingReceive`: Number of calls that was pending receive before this call was sent. A high number can show us that there were a lot of calls before this call and it may lead to higher latencies. If this number is high it points to a head of line blocking issue possibly caused by another request like query or feed operation that is taking a long time to process. Try lowering the CosmosClientOptions.MaxRequestsPerTcpConnection to increase the number of channels. +* `LastSentTime`: Time of last request that was sent to this server. This along with LastReceivedTime can be used to see connectivity or endpoint issues. For example if there are a lot of receive timeouts, Sent time will be much larger than the Receive time. +* `lastReceive`: Time of last request that was received from this server +* `lastSendAttempt`: Time of the last send attempt + +### Request and response sizes +* `requestSizeInBytes`: The total size of the request sent to Cosmos DB +* `responseMetadataSizeInBytes`: The size of headers returned from Cosmos DB +* `responseBodySizeInBytes`: The size of content returned from Cosmos DB + ```json "StoreResult": { - "ActivityId": "a3d325c1-f4e9-405b-820c-bab4d329ee4c", - "StatusCode": "Created", + "ActivityId": "bab6ade1-b8de-407f-b89d-fa2138a91284", + "StatusCode": "Ok", "SubStatusCode": "Unknown", - "LSN": 1766, - "PartitionKeyRangeId": "0", - "GlobalCommittedLSN": -1, - "ItemLSN": -1, - "UsingLocalLSN": false, - "QuorumAckedLSN": 1765, - "SessionToken": "-1#1766", - "CurrentWriteQuorum": 1, - "CurrentReplicaSetSize": 1, + "LSN": 453362, + "PartitionKeyRangeId": "1", + "GlobalCommittedLSN": 0, + "ItemLSN": 453358, + "UsingLocalLSN": true, + "QuorumAckedLSN": -1, + "SessionToken": "-1#453362", + "CurrentWriteQuorum": -1, + "CurrentReplicaSetSize": -1, "NumberOfReadRegions": 0, - "IsClientCpuOverloaded": false, "IsValid": true, - "StorePhysicalAddress": "rntbd://127.0.0.1:10253/apps/DocDbApp/services/DocDbServer92/partitions/a4cb49a8-38c8-11e6-8106-8cdcd42c33be/replicas/1p/", - "RequestCharge": 11.05, - "BELatencyInMs": "7.954", - "RntbdRequestStats": [ - { - "EventName": "Created", - "StartTime": "2021-06-15T13:53:10.1302477Z", - "DurationInMicroSec": "6383" - }, - { - "EventName": "ChannelAcquisitionStarted", - "StartTime": "2021-06-15T13:53:10.1366314Z", - "DurationInMicroSec": "96511" - }, - { - "EventName": "Pipelined", - "StartTime": "2021-06-15T13:53:10.2331431Z", - "DurationInMicroSec": "50834" - }, - { - "EventName": "Transit Time", - "StartTime": "2021-06-15T13:53:10.2839774Z", - "DurationInMicroSec": "17677" + "StorePhysicalAddress": "rntbd://127.0.0.1:10253/apps/DocDbApp/services/DocDbServer92/partitions/a4cb49a8-38c8-11e6-8106-8cdcd42c33be/replicas/1s/", + "RequestCharge": 1, + "RetryAfterInMs": null, + "BELatencyInMs": "0.304", + "transportRequestTimeline": { + "requestTimeline": [ + { + "event": "Created", + "startTimeUtc": "2022-05-25T12:03:36.3081190Z", + "durationInMs": 0.0024 + }, + { + "event": "ChannelAcquisitionStarted", + "startTimeUtc": "2022-05-25T12:03:36.3081214Z", + "durationInMs": 0.0132 + }, + { + "event": "Pipelined", + "startTimeUtc": "2022-05-25T12:03:36.3081346Z", + "durationInMs": 0.0865 + }, + { + "event": "Transit Time", + "startTimeUtc": "2022-05-25T12:03:36.3082211Z", + "durationInMs": 1.3324 + }, + { + "event": "Received", + "startTimeUtc": "2022-05-25T12:03:36.3095535Z", + "durationInMs": 12.6128 + }, + { + "event": "Completed", + "startTimeUtc": "2022-05-25T12:03:36.8621663Z", + "durationInMs": 0 + } + ], + "serviceEndpointStats": { + "inflightRequests": 1, + "openConnections": 1 }, - { - "EventName": "Received", - "StartTime": "2021-06-15T13:53:10.3016546Z", - "DurationInMicroSec": "7079" + "connectionStats": { + "waitforConnectionInit": "False", + "callsPendingReceive": 0, + "lastSendAttempt": "2022-05-25T12:03:34.0222760Z", + "lastSend": "2022-05-25T12:03:34.0223280Z", + "lastReceive": "2022-05-25T12:03:34.0257728Z" }, - { - "EventName": "Completed", - "StartTime": "2021-06-15T13:53:10.3087338Z", - "DurationInMicroSec": "0" - } - ], + "requestSizeInBytes": 447, + "responseMetadataSizeInBytes": 438, + "responseBodySizeInBytes": 604 + }, "TransportException": null } ``` @@ -265,4 +298,4 @@ Contact [Azure support](https://aka.ms/azure-support). ## Next steps * [Diagnose and troubleshoot](troubleshoot-dot-net-sdk.md) problems when you use the Azure Cosmos DB .NET SDK. -* Learn about performance guidelines for [.NET v3](performance-tips-dotnet-sdk-v3-sql.md) and [.NET v2](performance-tips.md). \ No newline at end of file +* Learn about performance guidelines for [.NET v3](performance-tips-dotnet-sdk-v3-sql.md) and [.NET v2](performance-tips.md). diff --git a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk.md b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk.md index 7cb64fefa7511..63c8416b687e5 100644 --- a/articles/cosmos-db/sql/troubleshoot-dot-net-sdk.md +++ b/articles/cosmos-db/sql/troubleshoot-dot-net-sdk.md @@ -1,13 +1,13 @@ --- title: Diagnose and troubleshoot issues when using Azure Cosmos DB .NET SDK description: Use features like client-side logging and other third-party tools to identify, diagnose, and troubleshoot Azure Cosmos DB issues when using .NET SDK. -author: rothja +author: seesharprun ms.service: cosmos-db ms.date: 03/05/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.subservice: cosmosdb-sql ms.topic: troubleshooting -ms.reviewer: sngun ms.custom: devx-track-dotnet --- # Diagnose and troubleshoot issues when using Azure Cosmos DB .NET SDK diff --git a/articles/cosmos-db/sql/troubleshoot-forbidden.md b/articles/cosmos-db/sql/troubleshoot-forbidden.md index 1b7a3ed658430..bdf773ff4031c 100644 --- a/articles/cosmos-db/sql/troubleshoot-forbidden.md +++ b/articles/cosmos-db/sql/troubleshoot-forbidden.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 04/14/2022 ms.author: maquaran ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB forbidden exceptions diff --git a/articles/cosmos-db/sql/troubleshoot-java-async-sdk.md b/articles/cosmos-db/sql/troubleshoot-java-async-sdk.md index df89aaa59bb8a..da9f14e23d428 100644 --- a/articles/cosmos-db/sql/troubleshoot-java-async-sdk.md +++ b/articles/cosmos-db/sql/troubleshoot-java-async-sdk.md @@ -1,14 +1,14 @@ --- title: Diagnose and troubleshoot Azure Cosmos DB Async Java SDK v2 description: Use features like client-side logging and other third-party tools to identify, diagnose, and troubleshoot Azure Cosmos DB issues in Async Java SDK v2. -author: rothja +author: seesharprun ms.service: cosmos-db ms.date: 05/11/2020 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.devlang: java ms.subservice: cosmosdb-sql ms.topic: troubleshooting -ms.reviewer: sngun ms.custom: devx-track-java --- diff --git a/articles/cosmos-db/sql/troubleshoot-not-found.md b/articles/cosmos-db/sql/troubleshoot-not-found.md index 591fd79a0e3bd..1f22cf7d35d98 100644 --- a/articles/cosmos-db/sql/troubleshoot-not-found.md +++ b/articles/cosmos-db/sql/troubleshoot-not-found.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 05/26/2021 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: ignite-fall-2021 --- diff --git a/articles/cosmos-db/sql/troubleshoot-query-performance.md b/articles/cosmos-db/sql/troubleshoot-query-performance.md index 83fbba2f05405..36c3119404be9 100644 --- a/articles/cosmos-db/sql/troubleshoot-query-performance.md +++ b/articles/cosmos-db/sql/troubleshoot-query-performance.md @@ -1,13 +1,13 @@ --- title: Troubleshoot query issues when using Azure Cosmos DB description: Learn how to identify, diagnose, and troubleshoot Azure Cosmos DB SQL query issues. -author: timsander1 +author: seesharprun ms.service: cosmos-db ms.topic: troubleshooting ms.date: 04/04/2022 -ms.author: tisande +ms.author: sidandrews +ms.reviewer: jucocchi ms.subservice: cosmosdb-sql -ms.reviewer: sngun --- # Troubleshoot query issues when using Azure Cosmos DB [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] diff --git a/articles/cosmos-db/sql/troubleshoot-request-rate-too-large.md b/articles/cosmos-db/sql/troubleshoot-request-rate-too-large.md index 14b4c3e962d7f..6c30ae50b6cc6 100644 --- a/articles/cosmos-db/sql/troubleshoot-request-rate-too-large.md +++ b/articles/cosmos-db/sql/troubleshoot-request-rate-too-large.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 03/03/2022 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB request rate too large (429) exceptions diff --git a/articles/cosmos-db/sql/troubleshoot-request-timeout-java-sdk-v4-sql.md b/articles/cosmos-db/sql/troubleshoot-request-timeout-java-sdk-v4-sql.md index 84b96c002d40d..a26afbc50e04c 100644 --- a/articles/cosmos-db/sql/troubleshoot-request-timeout-java-sdk-v4-sql.md +++ b/articles/cosmos-db/sql/troubleshoot-request-timeout-java-sdk-v4-sql.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 10/28/2020 ms.author: kuthapar ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB Java v4 SDK request timeout exceptions diff --git a/articles/cosmos-db/sql/troubleshoot-request-timeout.md b/articles/cosmos-db/sql/troubleshoot-request-timeout.md index 5bc633304c7e1..5bd2d77905d54 100644 --- a/articles/cosmos-db/sql/troubleshoot-request-timeout.md +++ b/articles/cosmos-db/sql/troubleshoot-request-timeout.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 07/13/2020 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB request timeout exceptions diff --git a/articles/cosmos-db/sql/troubleshoot-sdk-availability.md b/articles/cosmos-db/sql/troubleshoot-sdk-availability.md index 8bae7a8e3a5d1..c8bf88bbf1ad5 100644 --- a/articles/cosmos-db/sql/troubleshoot-sdk-availability.md +++ b/articles/cosmos-db/sql/troubleshoot-sdk-availability.md @@ -7,7 +7,7 @@ ms.date: 03/28/2022 ms.author: maquaran ms.subservice: cosmosdb-sql ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot the availability of Azure Cosmos SDKs in multiregional environments [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] diff --git a/articles/cosmos-db/sql/troubleshoot-service-unavailable-java-sdk-v4-sql.md b/articles/cosmos-db/sql/troubleshoot-service-unavailable-java-sdk-v4-sql.md index 84984be9647e5..f267f69dca13c 100644 --- a/articles/cosmos-db/sql/troubleshoot-service-unavailable-java-sdk-v4-sql.md +++ b/articles/cosmos-db/sql/troubleshoot-service-unavailable-java-sdk-v4-sql.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 02/03/2022 ms.author: kuthapar ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB Java v4 SDK service unavailable exceptions diff --git a/articles/cosmos-db/sql/troubleshoot-service-unavailable.md b/articles/cosmos-db/sql/troubleshoot-service-unavailable.md index 6131e262b52ba..0b12b32e8fb86 100644 --- a/articles/cosmos-db/sql/troubleshoot-service-unavailable.md +++ b/articles/cosmos-db/sql/troubleshoot-service-unavailable.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 08/06/2020 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB service unavailable exceptions diff --git a/articles/cosmos-db/sql/troubleshoot-unauthorized.md b/articles/cosmos-db/sql/troubleshoot-unauthorized.md index e2630ec205fd9..ab88a462935a4 100644 --- a/articles/cosmos-db/sql/troubleshoot-unauthorized.md +++ b/articles/cosmos-db/sql/troubleshoot-unauthorized.md @@ -7,7 +7,7 @@ ms.subservice: cosmosdb-sql ms.date: 07/13/2020 ms.author: jawilley ms.topic: troubleshooting -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Diagnose and troubleshoot Azure Cosmos DB unauthorized exceptions diff --git a/articles/cosmos-db/sql/tutorial-global-distribution-sql-api.md b/articles/cosmos-db/sql/tutorial-global-distribution-sql-api.md index 8406ff97dcff1..f1c7106923738 100644 --- a/articles/cosmos-db/sql/tutorial-global-distribution-sql-api.md +++ b/articles/cosmos-db/sql/tutorial-global-distribution-sql-api.md @@ -1,15 +1,14 @@ --- title: 'Tutorial: Azure Cosmos DB global distribution tutorial for the SQL API' description: 'Tutorial: Learn how to set up Azure Cosmos DB global distribution using the SQL API with .NET, Java, Python and various other SDKs' -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: tutorial ms.date: 04/03/2022 -ms.reviewer: sngun ms.custom: devx-track-python, devx-track-js, devx-track-csharp - --- # Tutorial: Set up Azure Cosmos DB global distribution using the SQL API [!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] diff --git a/articles/cosmos-db/sql/tutorial-query-sql-api.md b/articles/cosmos-db/sql/tutorial-query-sql-api.md index 9fc7e7edaba4d..0456593a5a115 100644 --- a/articles/cosmos-db/sql/tutorial-query-sql-api.md +++ b/articles/cosmos-db/sql/tutorial-query-sql-api.md @@ -1,14 +1,14 @@ --- title: 'Tutorial: How to query with SQL in Azure Cosmos DB?' description: 'Tutorial: Learn how to query with SQL queries in Azure Cosmos DB using the query playground' -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.custom: tutorial-develop, mvc ms.topic: tutorial ms.date: 08/26/2021 -ms.reviewer: sngun --- # Tutorial: Query Azure Cosmos DB by using the SQL API diff --git a/articles/cosmos-db/sql/tutorial-springboot-azure-kubernetes-service.md b/articles/cosmos-db/sql/tutorial-springboot-azure-kubernetes-service.md index 37e13af134e69..f5ab88cbf23ab 100644 --- a/articles/cosmos-db/sql/tutorial-springboot-azure-kubernetes-service.md +++ b/articles/cosmos-db/sql/tutorial-springboot-azure-kubernetes-service.md @@ -1,13 +1,14 @@ --- title: Tutorial - Spring Boot application with Azure Cosmos DB SQL API and Azure Kubernetes Service description: This tutorial demonstrates how to deploy a Spring Boot application to Azure Kubernetes Service and use it to perform operations on data in an Azure Cosmos DB SQL API account. -author: rothja +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.devlang: java ms.topic: quickstart ms.date: 10/01/2021 -ms.author: jroth +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: mode-api, devx-track-azurecli --- diff --git a/articles/cosmos-db/sql/tutorial-sql-api-dotnet-bulk-import.md b/articles/cosmos-db/sql/tutorial-sql-api-dotnet-bulk-import.md index ed34a0ace6082..774eacc90e556 100644 --- a/articles/cosmos-db/sql/tutorial-sql-api-dotnet-bulk-import.md +++ b/articles/cosmos-db/sql/tutorial-sql-api-dotnet-bulk-import.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: tutorial ms.date: 03/25/2022 -ms.reviewer: wiassaf +ms.reviewer: mjbrown ms.devlang: csharp ms.custom: devx-track-csharp, cosmos-db-video --- diff --git a/articles/cosmos-db/sql/working-with-dates.md b/articles/cosmos-db/sql/working-with-dates.md index 6384584300ba5..de21d987f8c8e 100644 --- a/articles/cosmos-db/sql/working-with-dates.md +++ b/articles/cosmos-db/sql/working-with-dates.md @@ -3,8 +3,9 @@ title: Working with dates in Azure Cosmos DB description: Learn how to store, index, and query DataTime objects in Azure Cosmos DB ms.service: cosmos-db ms.subservice: cosmosdb-sql -author: timsander1 -ms.author: tisande +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi ms.topic: conceptual ms.date: 04/03/2020 ms.devlang: csharp diff --git a/articles/cosmos-db/synapse-link.md b/articles/cosmos-db/synapse-link.md index 4f2854106f201..c93683783797b 100644 --- a/articles/cosmos-db/synapse-link.md +++ b/articles/cosmos-db/synapse-link.md @@ -6,7 +6,7 @@ ms.author: rosouz ms.service: cosmos-db ms.topic: conceptual ms.date: 07/12/2021 -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: synapse-cosmos-db --- diff --git a/articles/cosmos-db/table/cli-samples.md b/articles/cosmos-db/table/cli-samples.md index f0f128a39e5d5..fd5291b77c5bb 100644 --- a/articles/cosmos-db/table/cli-samples.md +++ b/articles/cosmos-db/table/cli-samples.md @@ -1,12 +1,13 @@ --- title: Azure CLI Samples for Azure Cosmos DB Table API description: Azure CLI Samples for Azure Cosmos DB Table API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 02/21/2022 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-azurecli --- diff --git a/articles/cosmos-db/table/create-table-dotnet.md b/articles/cosmos-db/table/create-table-dotnet.md index 5b08457b4d102..931b02cf53d1f 100644 --- a/articles/cosmos-db/table/create-table-dotnet.md +++ b/articles/cosmos-db/table/create-table-dotnet.md @@ -1,13 +1,14 @@ --- title: 'Quickstart: Table API with .NET - Azure Cosmos DB' description: This quickstart shows how to access the Azure Cosmos DB Table API from a .NET application using the Azure.Data.Tables SDK -author: DavidCBerry13 +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.devlang: csharp ms.topic: quickstart ms.date: 09/26/2021 -ms.author: daberry +ms.author: sidandrews +ms.reviewer: mjbrown ms.custom: devx-track-csharp, mode-api, devx-track-azurecli --- diff --git a/articles/cosmos-db/table/create-table-java.md b/articles/cosmos-db/table/create-table-java.md index 1540627522f20..aec2f85330b25 100644 --- a/articles/cosmos-db/table/create-table-java.md +++ b/articles/cosmos-db/table/create-table-java.md @@ -61,7 +61,7 @@ Sign in to the [Azure portal](https://portal.azure.com/) and follow these steps ### [Azure CLI](#tab/azure-cli) -Cosmos DB accounts are created using the [az Cosmos DB create](/cli/azure/cosmosdb#az_cosmosdb_create) command. You must include the `--capabilities EnableTable` option to enable table storage within your Cosmos DB. As all Azure resource must be contained in a resource group, the following code snippet also creates a resource group for the Cosmos DB account. +Cosmos DB accounts are created using the [az Cosmos DB create](/cli/azure/cosmosdb#az-cosmosdb-create) command. You must include the `--capabilities EnableTable` option to enable table storage within your Cosmos DB. As all Azure resource must be contained in a resource group, the following code snippet also creates a resource group for the Cosmos DB account. Cosmos DB account names must be between 3 and 44 characters in length and may contain only lowercase letters, numbers, and the hyphen (-) character. Cosmos DB account names must also be unique across Azure. @@ -131,7 +131,7 @@ In the [Azure portal](https://portal.azure.com/), complete the following steps t ### [Azure CLI](#tab/azure-cli) -Tables in Cosmos DB are created using the [az Cosmos DB table create](/cli/azure/cosmosdb/table#az_cosmosdb_table_create) command. +Tables in Cosmos DB are created using the [az Cosmos DB table create](/cli/azure/cosmosdb/table#az-cosmosdb-table-create) command. ```azurecli COSMOS_TABLE_NAME='WeatherData' @@ -172,7 +172,7 @@ To access your table(s) in Cosmos DB, your app will need the table connection st ### [Azure CLI](#tab/azure-cli) -To get the primary table storage connection string using Azure CLI, use the [az Cosmos DB keys list](/cli/azure/cosmosdb/keys#az_cosmosdb_keys_list) command with the option `--type connection-strings`. This command uses a [JMESPath query](https://jmespath.org/) to display only the primary table connection string. +To get the primary table storage connection string using Azure CLI, use the [az Cosmos DB keys list](/cli/azure/cosmosdb/keys#az-cosmosdb-keys-list) command with the option `--type connection-strings`. This command uses a [JMESPath query](https://jmespath.org/) to display only the primary table connection string. ```azurecli # This gets the primary Table connection string @@ -638,7 +638,7 @@ public class ExpandableWeatherObject { } ``` -To insert or upsert such an object using the Table API, map the properties of the expandable object into a [TableEntity](/java/api/com.azure.data.tables.tableentity) object and use the [createEntity](/java/api/com.azure.data.tables.tableclient.createentity) or [upsertEntity](/java/api/com.azure.data.tables.tableclient.upsertentity) methods on the [TableClient](/java/api/com.azure.data.tables.tableclient) object as appropriate. +To insert or upsert such an object using the Table API, map the properties of the expandable object into a [TableEntity](/java/api/com.azure.data.tables.models.tableentity) object and use the [createEntity](/java/api/com.azure.data.tables.tableclient.createentity) or [upsertEntity](/java/api/com.azure.data.tables.tableclient.upsertentity) methods on the [TableClient](/java/api/com.azure.data.tables.tableclient) object as appropriate. ```java public void insertExpandableEntity(ExpandableWeatherObject model) { @@ -759,7 +759,7 @@ A resource group can be deleted using the [Azure portal](https://portal.azure.co ### [Azure CLI](#tab/azure-cli) -To delete a resource group using the Azure CLI, use the [az group delete](/cli/azure/group#az_group_delete) command with the name of the resource group to be deleted. Deleting a resource group will also remove all Azure resources contained in the resource group. +To delete a resource group using the Azure CLI, use the [az group delete](/cli/azure/group#az-group-delete) command with the name of the resource group to be deleted. Deleting a resource group will also remove all Azure resources contained in the resource group. ```azurecli az group delete --name $RESOURCE_GROUP_NAME @@ -780,4 +780,4 @@ Remove-AzResourceGroup -Name $resourceGroupName In this quickstart, you've learned how to create an Azure Cosmos DB account, create a table using the Data Explorer, and run an app. Now you can query your data using the Tables API. > [!div class="nextstepaction"] -> [Import table data to the Tables API](table-import.md) \ No newline at end of file +> [Import table data to the Tables API](table-import.md) diff --git a/articles/cosmos-db/table/how-to-create-container.md b/articles/cosmos-db/table/how-to-create-container.md index c73b3292be0af..ea007bf2ab383 100644 --- a/articles/cosmos-db/table/how-to-create-container.md +++ b/articles/cosmos-db/table/how-to-create-container.md @@ -1,12 +1,13 @@ --- title: Create a container in Azure Cosmos DB Table API description: Learn how to create a container in Azure Cosmos DB Table API by using Azure portal, .NET, Java, Python, Node.js, and other SDKs. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: how-to ms.date: 10/16/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Create a container in Azure Cosmos DB Table API diff --git a/articles/cosmos-db/table/how-to-use-java.md b/articles/cosmos-db/table/how-to-use-java.md index 76535e680e999..927222dd86749 100644 --- a/articles/cosmos-db/table/how-to-use-java.md +++ b/articles/cosmos-db/table/how-to-use-java.md @@ -334,7 +334,7 @@ try .buildClient(); // Create a filter condition where the partition key is "Sales". - ListEntitiesOptions options = new ListEntitiesOptions().setFilter(PARTITION_KEY + " eq 'Sales' AND " + ROW_KEY + " lt '0004' AND ROW_KEY + " gt '0001'"); + ListEntitiesOptions options = new ListEntitiesOptions().setFilter(PARTITION_KEY + " eq 'Sales' AND " + ROW_KEY + " lt '0004' AND " + ROW_KEY + " gt '0001'"); // Loop through the results, displaying information about the entities. tableClient.listEntities(options, null, null).forEach(tableEntity -> { @@ -377,7 +377,7 @@ try System.out.println(specificEntity.getPartitionKey() + " " + specificEntity.getRowKey() + "\t" + specificEntity.getProperty("FirstName") + - "\t" + specificEntity.getProperty("LastName")); + "\t" + specificEntity.getProperty("LastName") + "\t" + specificEntity.getProperty("Email") + "\t" + specificEntity.getProperty("PhoneNumber")); } @@ -503,7 +503,7 @@ try .tableName(tableName) .buildClient(); - Delete the entity for partition key 'Sales' and row key '0001' from the table. + // Delete the entity for partition key 'Sales' and row key '0001' from the table. tableClient.deleteEntity("Sales", "0001"); } catch (Exception e) @@ -555,4 +555,4 @@ For more information, visit [Azure for Java developers](/java/azure). [Azure Tables client library for Java]: https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/tables/azure-data-tables [Azure Tables client library reference documentation]: https://azure.github.io/azure-sdk-for-java/tables.html [Azure Tables REST API]: ../../storage/tables/table-storage-overview.md -[Azure Tables Team Blog]: https://blogs.msdn.microsoft.com/windowsazurestorage/ \ No newline at end of file +[Azure Tables Team Blog]: https://blogs.msdn.microsoft.com/windowsazurestorage/ diff --git a/articles/cosmos-db/table/how-to-use-python.md b/articles/cosmos-db/table/how-to-use-python.md index 0202f81ca9610..6701c747bc41f 100644 --- a/articles/cosmos-db/table/how-to-use-python.md +++ b/articles/cosmos-db/table/how-to-use-python.md @@ -8,7 +8,7 @@ ms.devlang: python ms.topic: quickstart ms.date: 03/23/2021 ms.author: akshanka -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-python, mode-api, devx-track-azurecli --- @@ -22,7 +22,7 @@ This quickstart shows how to access the Azure Cosmos DB [Table API](introduction The sample application is written in [Python3.6](https://www.python.org/downloads/), though the principles apply to all Python3.6+ applications. You can use [Visual Studio Code](https://code.visualstudio.com/) as an IDE. -If you don't have an [Azure subscription](/azure/guides/developer/azure-developer-guide#understanding-accounts-subscriptions-and-billing), create a [free account](https://azure.microsoft.com/free/dotnet) before you begin. +If you don't have an [Azure subscription](../../guides/developer/azure-developer-guide.md#understanding-accounts-subscriptions-and-billing), create a [free account](https://azure.microsoft.com/free/dotnet) before you begin. ## Sample application @@ -426,4 +426,4 @@ Remove-AzResourceGroup -Name $resourceGroupName In this quickstart, you've learned how to create an Azure Cosmos DB account, create a table using the Data Explorer, and run an app. Now you can query your data using the Table API. > [!div class="nextstepaction"] -> [Import table data to the Table API](table-import.md) +> [Import table data to the Table API](table-import.md) \ No newline at end of file diff --git a/articles/cosmos-db/table/how-to-use-ruby.md b/articles/cosmos-db/table/how-to-use-ruby.md index 47b6c7716d7dd..341c05428eeb4 100644 --- a/articles/cosmos-db/table/how-to-use-ruby.md +++ b/articles/cosmos-db/table/how-to-use-ruby.md @@ -8,7 +8,7 @@ ms.topic: sample ms.date: 07/23/2020 author: sakash279 ms.author: akshanka -ms.reviewer: sngun +ms.reviewer: mjbrown --- # How to use Azure Table Storage and the Azure Cosmos DB Table API with Ruby [!INCLUDE[appliesto-table-api](../includes/appliesto-table-api.md)] diff --git a/articles/cosmos-db/table/manage-with-bicep.md b/articles/cosmos-db/table/manage-with-bicep.md index 961e9aa382bb3..5532603fcfcb8 100644 --- a/articles/cosmos-db/table/manage-with-bicep.md +++ b/articles/cosmos-db/table/manage-with-bicep.md @@ -1,12 +1,13 @@ --- title: Create and manage Azure Cosmos DB Table API with Bicep description: Use Bicep to create and configure Azure Cosmos DB Table API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: how-to ms.date: 09/13/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Table API resources using Bicep diff --git a/articles/cosmos-db/table/powershell-samples.md b/articles/cosmos-db/table/powershell-samples.md index 7c47992f324c9..ba189c6057ee0 100644 --- a/articles/cosmos-db/table/powershell-samples.md +++ b/articles/cosmos-db/table/powershell-samples.md @@ -1,12 +1,13 @@ --- title: Azure PowerShell samples for Azure Cosmos DB Table API description: Get the Azure PowerShell samples to perform common tasks in Azure Cosmos DB Table API -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: sample ms.date: 01/20/2021 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Azure PowerShell samples for Azure Cosmos DB Table API diff --git a/articles/cosmos-db/table/resource-manager-templates.md b/articles/cosmos-db/table/resource-manager-templates.md index 6375f3bb41c33..7a068905f9de6 100644 --- a/articles/cosmos-db/table/resource-manager-templates.md +++ b/articles/cosmos-db/table/resource-manager-templates.md @@ -1,12 +1,13 @@ --- title: Resource Manager templates for Azure Cosmos DB Table API description: Use Azure Resource Manager templates to create and configure Azure Cosmos DB Table API. -author: markjbrown +author: seesharprun ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: how-to ms.date: 05/19/2020 -ms.author: mjbrown +ms.author: sidandrews +ms.reviewer: mjbrown --- # Manage Azure Cosmos DB Table API resources using Azure Resource Manager templates diff --git a/articles/cosmos-db/table/table-import.md b/articles/cosmos-db/table/table-import.md index 4af982512c3c8..597c6db283f03 100644 --- a/articles/cosmos-db/table/table-import.md +++ b/articles/cosmos-db/table/table-import.md @@ -1,8 +1,9 @@ --- title: Migrate existing data to a Table API account in Azure Cosmos DB description: Learn how to migrate or import on-premises or cloud data to an Azure Table API account in Azure Cosmos DB. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: tutorial @@ -13,13 +14,8 @@ ms.custom: seodec18 # Migrate your data to an Azure Cosmos DB Table API account [!INCLUDE[appliesto-table-api](../includes/appliesto-table-api.md)] -This tutorial provides instructions on importing data for use with the Azure Cosmos DB [Table API](introduction.md). If you have data stored in Azure Table Storage, you can use either the data migration tool or AzCopy to import your data to the Azure Cosmos DB Table API. +This tutorial provides instructions on importing data for use with the Azure Cosmos DB [Table API](introduction.md). If you have data stored in Azure Table Storage, you can use the **Data migration tool** to import your data to the Azure Cosmos DB Table API. -This tutorial covers the following tasks: - -> [!div class="checklist"] -> * Importing data with the data migration tool -> * Importing data with AzCopy ## Prerequisites @@ -36,7 +32,7 @@ You can use the command-line data migration tool (dt.exe) in Azure Cosmos DB to To migrate table data: -1. Download the migration tool from [GitHub](https://github.com/azure/azure-documentdb-datamigrationtool). +1. Download the migration tool from [GitHub](https://github.com/azure/azure-documentdb-datamigrationtool/tree/archive). 2. Run `dt.exe` by using the command-line arguments for your scenario. `dt.exe` takes a command in the following format: ```bash @@ -87,19 +83,6 @@ Here's a command-line sample showing how to import from Table Storage to the Tab ```bash dt /s:AzureTable /s.ConnectionString:DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net /s.Table: /t:TableAPIBulk /t.ConnectionString:DefaultEndpointsProtocol=https;AccountName=;AccountKey=;TableEndpoint=https://.table.cosmos.azure.com:443 /t.TableName:
                  /t.Overwrite ``` - -## Migrate data by using AzCopy - -You can also use the AzCopy command-line utility to migrate data from Table Storage to the Azure Cosmos DB Table API. To use AzCopy, you first export your data as described in [Export data from Table Storage](/previous-versions/azure/storage/storage-use-azcopy#export-data-from-table-storage). Then, you import the data to Azure Cosmos DB Table API with the following command. You can also import into [Azure Table storage](/previous-versions/azure/storage/storage-use-azcopy#import-data-into-table-storage). - -Refer to the following sample when you're importing into Azure Cosmos DB. Note that the `/Dest` value uses `cosmosdb`, not `core`. - -Example import command: - -```bash -AzCopy /Source:C:\myfolder\ /Dest:https://myaccount.table.cosmosdb.windows.net/mytable1/ /DestKey:key /Manifest:"myaccount_mytable_20140103T112020.manifest" /EntityOperation:InsertOrReplace -``` - ## Next steps Learn how to query data by using the Azure Cosmos DB Table API. diff --git a/articles/cosmos-db/table/table-support.md b/articles/cosmos-db/table/table-support.md index b7748248610e4..d17917baba6be 100644 --- a/articles/cosmos-db/table/table-support.md +++ b/articles/cosmos-db/table/table-support.md @@ -7,7 +7,7 @@ ms.topic: how-to ms.date: 11/03/2021 author: sakash279 ms.author: akshanka -ms.reviewer: sngun +ms.reviewer: mjbrown ms.devlang: cpp, csharp, java, javascript, php, python, ruby --- diff --git a/articles/cosmos-db/table/tutorial-global-distribution-table.md b/articles/cosmos-db/table/tutorial-global-distribution-table.md index b9414a5b1aefc..1108997ce1d33 100644 --- a/articles/cosmos-db/table/tutorial-global-distribution-table.md +++ b/articles/cosmos-db/table/tutorial-global-distribution-table.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: tutorial ms.date: 01/30/2020 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Set up Azure Cosmos DB global distribution using the Table API [!INCLUDE[appliesto-table-api](../includes/appliesto-table-api.md)] diff --git a/articles/cosmos-db/table/tutorial-query-table.md b/articles/cosmos-db/table/tutorial-query-table.md index d535cb7d15926..ed2445c9a3259 100644 --- a/articles/cosmos-db/table/tutorial-query-table.md +++ b/articles/cosmos-db/table/tutorial-query-table.md @@ -7,7 +7,7 @@ ms.service: cosmos-db ms.subservice: cosmosdb-table ms.topic: tutorial ms.date: 06/05/2020 -ms.reviewer: sngun +ms.reviewer: mjbrown ms.devlang: csharp ms.custom: devx-track-csharp --- diff --git a/articles/cosmos-db/throughput-serverless.md b/articles/cosmos-db/throughput-serverless.md index 2e2bcafec9880..34d4f361d276d 100644 --- a/articles/cosmos-db/throughput-serverless.md +++ b/articles/cosmos-db/throughput-serverless.md @@ -29,7 +29,7 @@ Azure Cosmos DB is available in two different capacity modes: [provisioned throu | Performance | < 10-ms latency for point-reads and writes covered by SLA | < 10-ms latency for point-reads and < 30 ms for writes covered by SLO | | Billing model | Billing is done on a per-hour basis for the RU/s provisioned, regardless of how many RUs were consumed. | Billing is done on a per-hour basis for the number of RUs consumed by your database operations. | -1 Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](/azure/azure-resource-manager/management/preview-features). +1 Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](../azure-resource-manager/management/preview-features.md). ## Estimating your expected consumption @@ -56,4 +56,4 @@ For more information, see [estimating serverless costs](plan-manage-costs.md#est - Read more about [provisioning throughput on Azure Cosmos DB](set-throughput.md) - Read more about [Azure Cosmos DB serverless](serverless.md) -- Get familiar with the concept of [Request Units](request-units.md) +- Get familiar with the concept of [Request Units](request-units.md) \ No newline at end of file diff --git a/articles/cosmos-db/total-cost-ownership.md b/articles/cosmos-db/total-cost-ownership.md index d5516c6c17c94..551430a4a29be 100644 --- a/articles/cosmos-db/total-cost-ownership.md +++ b/articles/cosmos-db/total-cost-ownership.md @@ -1,12 +1,12 @@ --- title: Total Cost of Ownership (TCO) with Azure Cosmos DB description: This article compares the total cost of ownership of Azure Cosmos DB with IaaS and on-premises databases -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 08/26/2021 -ms.reviewer: sngun --- # Total Cost of Ownership (TCO) with Azure Cosmos DB diff --git a/articles/cosmos-db/try-free.md b/articles/cosmos-db/try-free.md new file mode 100644 index 0000000000000..17c90d276f2bb --- /dev/null +++ b/articles/cosmos-db/try-free.md @@ -0,0 +1,132 @@ +--- +title: Try Azure Cosmos DB free +description: Try Azure Cosmos DB free of charge. No sign-up or credit card required. It's easy to test your apps, deploy, and run small workloads free for 30 days. Upgrade your account at any time during your trial. +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown +ms.service: cosmos-db +ms.topic: overview +ms.date: 08/26/2021 +--- + +# Try Azure Cosmos DB free +[!INCLUDE[appliesto-all-apis](includes/appliesto-all-apis.md)] + +[Try Azure Cosmos DB](https://aka.ms/trycosmosdb) makes it easy to try out Azure Cosmos DB for free before you commit. There's no credit card required to get started. Your account is free for 30 days. After expiration, a new sandbox account can be created. You can extend beyond 30 days for 24 hours. You can upgrade your active Try Azure Cosmos DB account at any time during the 30 day trial period. If you're using the SQL API, migrate your Try Azure Cosmos DB data to your upgraded account. + +This article walks you through how to create your account, limits, and upgrading your account. This article also walks through how to migrate your data from your Try Azure Cosmos DB sandbox to your own account using the SQL API. + +## Try Azure Cosmos DB limits + +The following table lists the limits for the [Try Azure Cosmos DB](https://aka.ms/trycosmosdb) for Free trial. + +| Resource | Limit | +| --- | --- | +| Duration of the trial | 30 days (a new trial can be requested after expiration) After expiration, the information stored is deleted. Prior to expiration you can upgrade your account and migrate the information stored. | +| Maximum containers per subscription (SQL, Gremlin, Table API) | 1 | +| Maximum containers per subscription (MongoDB API) | 3 | +| Maximum throughput per container | 5,000 | +| Maximum throughput per shared-throughput database | 20,000 | +| Maximum total storage per account | 10 GB | + +Try Azure Cosmos DB supports global distribution in only the Central US, North Europe, and Southeast Asia regions. Azure support tickets can't be created for Try Azure Cosmos DB accounts. However, support is provided for subscribers with existing support plans. + +## Create your Try Azure Cosmos DB account + +From the [Try Azure Cosmos DB home page](https://aka.ms/trycosmosdb), select an API. Azure Cosmos DB provides five APIs: Core (SQL) and MongoDB for document data, Gremlin for graph data, Azure Table, and Cassandra. + +> [!NOTE] +> Not sure which API will best meet your needs? To learn more about the APIs for Azure Cosmos DB, see [Choose an API in Azure Cosmos DB](choose-api.md). + +:::image type="content" source="media/try-free/try-cosmos-db-page.png" lightbox="media/try-free/try-cosmos-db-page.png" alt-text="Screenshot of the API options including Core (SQL), MongoDB, and Cassandra on the Try Azure Cosmos DB page."::: + +## Launch a Quick Start + +Launch the Quickstart in Data Explorer in Azure portal to start using Azure Cosmos DB or get started with our documentation. + +* [Core (SQL) API Quickstart](sql/create-cosmosdb-resources-portal.md#add-a-database-and-a-container) +* [MongoDB API Quickstart](mongodb/create-mongodb-python.md#learn-the-object-model) +* [Apache Cassandra API](cassandra/cassandra-adoption.md) +* [Gremlin (Graph) API](graph/create-graph-console.md#add-a-graph) +* [Azure Table API](table/create-table-dotnet.md) + +You can also get started with one of the learning resources in Data Explorer. + +:::image type="content" source="media/try-free/data-explorer.png" lightbox="media/try-free/data-explorer.png" alt-text="Screenshot of the Azure Cosmos DB Data Explorer landing page."::: + +## Upgrade your account + +Your account is free for 30 days. After expiration, a new sandbox account can be created. You can upgrade your active Try Azure Cosmos DB account at any time during the 30 day trial period. Here are the steps to start an upgrade. + +1. Select the option to upgrade your current account in the Dashboard page or from the [Try Azure Cosmos DB](https://aka.ms/trycosmosdb) page. + + :::image type="content" source="media/try-free/upgrade-account.png" lightbox="media/try-free/upgrade-account.png" alt-text="Confirmation page for the account upgrade experience."::: + +1. Select **Sign up for Azure Account** & create an Azure Cosmos DB account. + +You can migrate your database from Try Azure Cosmos DB to your new Azure account if you're utilizing the SQL API after you've signed up for an Azure account. Here are the steps to migrate. + +### Create an Azure Cosmos DB account + +[!INCLUDE [cosmos-db-create-dbaccount](includes/cosmos-db-create-dbaccount.md)] + +Navigate back to the **Upgrade** page and select **Next** to move on to the third step and move your data. + +> [!NOTE] +> You can have up to one free tier Azure Cosmos DB account per Azure subscription and must opt-in when creating the account. If you do not see the option to apply the free tier discount, this means another account in the subscription has already been enabled with free tier. + +:::image type="content" source="media/try-free/sign-up-sign-in.png" lightbox="media/try-free/sign-up-sign-in.png" alt-text="Screenshot of the sign-in/sign-up experience to upgrade your current account."::: + +## Migrate your Try Azure Cosmos DB data + +If you're using the SQL API, you can migrate your Try Azure Cosmos DB data to your upgraded account. Here’s how to migrate your Try Azure Cosmos DB database to your new Azure Cosmos DB Core (SQL) API account. + +### Prerequisites + +* Must be using the Azure Cosmos DB Core (SQL) API. +* Must have an active Try Azure Cosmos DB account and Azure account. +* Must have an Azure Cosmos DB account using the Core (SQL) API in your Azure account. + +### Migrate your data + +1. Locate your **Primary Connection string** for the Azure Cosmos DB account you created for your data. + + 1. Go to your Azure Cosmos DB Account in the Azure portal. + + 1. Find the connection string of your new Cosmos DB account within the **Keys** page of your new account. + + :::image type="content" source="media/try-free/migrate-data.png" lightbox="media/try-free/migrate-data.png" alt-text="Screenshot of the Keys page for an Azure Cosmos DB account."::: + +1. Insert the connection string of the new Cosmos DB account in the **Upgrade your account** page. + +1. Select **Next** to move the data to your account. + +1. Provide your email address to be notified by email once the migration has been completed. + +## Delete your account + +There can only be one free Try Azure Cosmos DB account per Microsoft account. You may want to delete your account or to try different APIs, you'll have to create a new account. Here’s how to delete your account. + +1. Go to the [Try AzureCosmos DB](https://aka.ms/trycosmosdb) page + +1. Select Delete my account. + + :::image type="content" source="media/try-free/upgrade-account.png" lightbox="media/try-free/upgrade-account.png" alt-text="Confirmation page for the account upgrade experience."::: + +## Next steps + +After you create a Try Azure Cosmos DB sandbox account, you can start building apps with Azure Cosmos DB with the following articles: + +* Use [SQL API to build a console app using .NET](sql/sql-api-get-started.md) to manage data in Azure Cosmos DB. +* Use [MongoDB API to build a sample app using Python](mongodb/create-mongodb-python.md) to manage data in Azure Cosmos DB. +* [Download a notebook from the gallery](publish-notebook-gallery.md#download-a-notebook-from-the-gallery) and analyze your data. +* Learn more about [understanding your Azure Cosmos DB bill](understand-your-bill.md) +* Get started with Azure Cosmos DB with one of our quickstarts: + * [Get started with Azure Cosmos DB SQL API](sql/create-cosmosdb-resources-portal.md#add-a-database-and-a-container) + * [Get started with Azure Cosmos DB API for MongoDB](mongodb/create-mongodb-python.md#learn-the-object-model) + * [Get started with Azure Cosmos DB Cassandra API](cassandra/cassandra-adoption.md) + * [Get started with Azure Cosmos DB Gremlin API](graph/create-graph-console.md#add-a-graph) + * [Get started with Azure Cosmos DB Table API](table/create-table-dotnet.md) +* Trying to do capacity planning for a migration to Azure Cosmos DB? You can use information about your existing database cluster for [capacity planning](sql/estimate-ru-with-capacity-planner.md). +* If all you know is the number of vCores and servers in your existing database cluster, see [estimating request units using vCores or vCPUs](convert-vcore-to-request-unit.md). +* If you know typical request rates for your current database workload, see [estimating request units using Azure Cosmos DB capacity planner](estimate-ru-with-capacity-planner.md). diff --git a/articles/cosmos-db/tutorial-setup-ci-cd.md b/articles/cosmos-db/tutorial-setup-ci-cd.md index 1f21e92436182..c6d58ccd9283a 100644 --- a/articles/cosmos-db/tutorial-setup-ci-cd.md +++ b/articles/cosmos-db/tutorial-setup-ci-cd.md @@ -6,7 +6,7 @@ ms.topic: how-to ms.date: 01/28/2020 ms.author: esarroyo author: StefArroyo -ms.reviewer: sngun +ms.reviewer: mjbrown ms.custom: devx-track-csharp --- # Set up a CI/CD pipeline with the Azure Cosmos DB Emulator build task in Azure DevOps diff --git a/articles/cosmos-db/understand-your-bill.md b/articles/cosmos-db/understand-your-bill.md index 2cdd5fe7b1d2f..72cfff2a0f3cb 100644 --- a/articles/cosmos-db/understand-your-bill.md +++ b/articles/cosmos-db/understand-your-bill.md @@ -1,12 +1,12 @@ --- title: Understanding your Azure Cosmos DB bill description: This article explains how to understand your Azure Cosmos DB bill with some examples. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.topic: conceptual ms.date: 03/31/2022 -ms.reviewer: sngun --- # Understand your Azure Cosmos DB bill diff --git a/articles/cosmos-db/unique-keys.md b/articles/cosmos-db/unique-keys.md index 2173d76ab145c..5fc526f9b66b6 100644 --- a/articles/cosmos-db/unique-keys.md +++ b/articles/cosmos-db/unique-keys.md @@ -1,13 +1,13 @@ --- title: Use unique keys in Azure Cosmos DB description: Learn how to define and use unique keys for an Azure Cosmos database. This article also describes how unique keys add a layer of data integrity. -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: conceptual ms.date: 08/26/2021 -ms.reviewer: sngun --- # Unique key constraints in Azure Cosmos DB diff --git a/articles/cosmos-db/update-backup-storage-redundancy.md b/articles/cosmos-db/update-backup-storage-redundancy.md index d2be881c4c3f8..4a7fb2c33b566 100644 --- a/articles/cosmos-db/update-backup-storage-redundancy.md +++ b/articles/cosmos-db/update-backup-storage-redundancy.md @@ -6,7 +6,7 @@ ms.service: cosmos-db ms.topic: how-to ms.date: 12/03/2021 ms.author: govindk -ms.reviewer: sngun +ms.reviewer: mjbrown --- diff --git a/articles/cosmos-db/use-cases.md b/articles/cosmos-db/use-cases.md index de054cd689f7e..b4b5c94f08aac 100644 --- a/articles/cosmos-db/use-cases.md +++ b/articles/cosmos-db/use-cases.md @@ -2,8 +2,9 @@ title: Common use cases and scenarios for Azure Cosmos DB description: 'Learn about the top five use cases for Azure Cosmos DB: user generated content, event logging, catalog data, user preferences data, and Internet of Things (IoT).' ms.service: cosmos-db -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.topic: conceptual ms.date: 05/21/2019 ms.custom: cosmos-db-video diff --git a/articles/cosmos-db/use-metrics.md b/articles/cosmos-db/use-metrics.md index e203c1387897b..7b3bdacb02a82 100644 --- a/articles/cosmos-db/use-metrics.md +++ b/articles/cosmos-db/use-metrics.md @@ -3,7 +3,7 @@ title: Monitor and debug with insights in Azure Cosmos DB description: Use metrics in Azure Cosmos DB to debug common issues and monitor the database. ms.author: esarroyo author: StefArroyo -ms.reviewer: sngun +ms.reviewer: mjbrown ms.service: cosmos-db ms.subservice: cosmosdb-sql ms.topic: how-to diff --git a/articles/cosmos-db/visualize-qlik-sense.md b/articles/cosmos-db/visualize-qlik-sense.md index c3a5c766d2d65..efff8de1aed65 100644 --- a/articles/cosmos-db/visualize-qlik-sense.md +++ b/articles/cosmos-db/visualize-qlik-sense.md @@ -7,7 +7,7 @@ author: gahl-levy ms.author: gahllevy ms.topic: how-to ms.date: 05/23/2019 -ms.reviewer: sngun +ms.reviewer: mjbrown --- # Connect Qlik Sense to Azure Cosmos DB and visualize your data diff --git a/articles/cosmos-db/whitepapers.md b/articles/cosmos-db/whitepapers.md index fd858aab6ba45..0892addba4fd3 100644 --- a/articles/cosmos-db/whitepapers.md +++ b/articles/cosmos-db/whitepapers.md @@ -3,8 +3,9 @@ title: Whitepapers that describe Azure Cosmos DB concepts description: Get the list of whitepapers for Azure Cosmos DB, these whitepapers describe the concepts in depth. ms.service: cosmos-db ms.subservice: cosmosdb-sql -author: markjbrown -ms.author: mjbrown +author: seesharprun +ms.author: sidandrews +ms.reviewer: mjbrown ms.topic: conceptual ms.date: 05/07/2021 ms.custom: seodec18 diff --git a/articles/cost-management-billing/cost-management-billing-faq.yml b/articles/cost-management-billing/cost-management-billing-faq.yml index 3d53fecbad6a1..1ace45d599355 100644 --- a/articles/cost-management-billing/cost-management-billing-faq.yml +++ b/articles/cost-management-billing/cost-management-billing-faq.yml @@ -1,14 +1,14 @@ ### YamlMime:FAQ metadata: title: Cost Management + Billing frequently asked questions (FAQ) - titleSuffix: Azure Cost Management + Billing + titleSuffix: Microsoft Cost Management description: Frequently asked questions and answers. author: bandersmsft ms.reviewer: adwise ms.service: cost-management-billing ms.subservice: common ms.topic: faq - ms.date: 02/17/2022 + ms.date: 06/07/2022 ms.author: banders title: Cost Management + Billing frequently asked questions (FAQ) summary: | @@ -42,6 +42,14 @@ sections: - question: How do I cancel my Azure subscription? answer: | See [Cancel your Azure subscription](manage/cancel-azure-subscription.md). + - question: When does Azure finalize or close the billing cycle of a closed month? + answer: | + Azure finalizes or closes the current billing period up to 72 hours (three calendar days) after the billing period ends. The following examples illustrate how billing periods could end: + + - Enterprise Agreement (EA) subscriptions – If the billing month ends on March 31, estimated charges are updated up to 72 hours later. In this example, by midnight (UTC) April 4. + - Pay-as-you-go subscriptions – If the billing month ends on May 15, then the estimated charges might get updated up to 72 hours later. In this example, by midnight (UTC) May 19. + + For more information, see [Cost and usage data updates and retention](costs/understand-cost-mgt-data.md#cost-and-usage-data-updates-and-retention). - name: Cost Management questions questions: diff --git a/articles/cost-management-billing/cost-management-billing-overview.md b/articles/cost-management-billing/cost-management-billing-overview.md index 42a6e955e53be..751b82a981774 100644 --- a/articles/cost-management-billing/cost-management-billing-overview.md +++ b/articles/cost-management-billing/cost-management-billing-overview.md @@ -1,6 +1,6 @@ --- title: Overview of Cost Management + Billing -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: You use Cost Management + Billing features to conduct billing administrative tasks and manage billing access to costs. You also use the features to monitor and control Azure spending and to optimize Azure resource use. keywords: author: bandersmsft diff --git a/articles/cost-management-billing/costs/assign-access-acm-data.md b/articles/cost-management-billing/costs/assign-access-acm-data.md index 454ee54128b91..183faadc016c3 100644 --- a/articles/cost-management-billing/costs/assign-access-acm-data.md +++ b/articles/cost-management-billing/costs/assign-access-acm-data.md @@ -1,6 +1,6 @@ --- title: Assign access to Cost Management data -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article walks you though assigning permission to Cost Management data for various access scopes. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/aws-integration-manage.md b/articles/cost-management-billing/costs/aws-integration-manage.md index 454ba3c765f83..939813fc988e5 100644 --- a/articles/cost-management-billing/costs/aws-integration-manage.md +++ b/articles/cost-management-billing/costs/aws-integration-manage.md @@ -1,6 +1,6 @@ --- title: Manage AWS costs and usage in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand how to use cost analysis and budgets in Cost Management to manage your AWS costs and usage. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/aws-integration-set-up-configure.md b/articles/cost-management-billing/costs/aws-integration-set-up-configure.md index e1aa0bfa205c2..e71c00db3cec8 100644 --- a/articles/cost-management-billing/costs/aws-integration-set-up-configure.md +++ b/articles/cost-management-billing/costs/aws-integration-set-up-configure.md @@ -1,6 +1,6 @@ --- title: Set up AWS integration with Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article walks you through setting up and configuring AWS Cost and Usage report integration with Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-analysis-built-in-views.md b/articles/cost-management-billing/costs/cost-analysis-built-in-views.md index 39871921ae4ab..665589fc77c1a 100644 --- a/articles/cost-management-billing/costs/cost-analysis-built-in-views.md +++ b/articles/cost-management-billing/costs/cost-analysis-built-in-views.md @@ -1,6 +1,6 @@ --- title: Use built-in views in Cost analysis -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand when to use which view, how each one provides unique insights about your costs and recommended next steps to investigate further. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-analysis-common-uses.md b/articles/cost-management-billing/costs/cost-analysis-common-uses.md index 8c82a59e820f5..66f0d7f0c9357 100644 --- a/articles/cost-management-billing/costs/cost-analysis-common-uses.md +++ b/articles/cost-management-billing/costs/cost-analysis-common-uses.md @@ -1,6 +1,6 @@ --- title: Common cost analysis uses in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how you can get results for common cost analysis tasks in Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-management-error-codes.md b/articles/cost-management-billing/costs/cost-management-error-codes.md index 26ae22393c963..29a5c5279ea7b 100644 --- a/articles/cost-management-billing/costs/cost-management-error-codes.md +++ b/articles/cost-management-billing/costs/cost-management-error-codes.md @@ -1,6 +1,6 @@ --- title: Troubleshoot common Cost Management errors -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article describes common Cost Management errors and provides information about solutions. author: bandersmsft ms.reviewer: micflan diff --git a/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md b/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md index 5a4757d26cf58..e8c5e3ee3f336 100644 --- a/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md +++ b/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md @@ -1,10 +1,10 @@ --- title: Monitor usage and spending with cost alerts in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article describes how cost alerts help you monitor usage and spending in Cost Management. author: bandersmsft ms.author: banders -ms.date: 05/12/2022 +ms.date: 06/07/2022 ms.topic: how-to ms.service: cost-management-billing ms.subservice: cost-management @@ -13,7 +13,9 @@ ms.reviewer: adwise # Use cost alerts to monitor usage and spending -This article helps you understand and use Cost Management alerts to monitor your Azure usage and spending. Cost alerts are automatically generated based when Azure resources are consumed. Alerts show all active cost management and billing alerts together in one place. When your consumption reaches a given threshold, alerts are generated by Cost Management. There are three types of cost alerts: budget alerts, credit alerts, and department spending quota alerts. +This article helps you understand and use Cost Management alerts to monitor your Azure usage and spending. Cost alerts are automatically generated based when Azure resources are consumed. Alerts show all active cost management and billing alerts together in one place. When your consumption reaches a given threshold, alerts are generated by Cost Management. There are three main types of cost alerts: budget alerts, credit alerts, and department spending quota alerts. + +You can also [create a cost anomaly alert](../understand/analyze-unexpected-charges.md#create-an-anomaly-alert) to automatically get notified when an anomaly is detected. ## Required permissions for alerts @@ -29,6 +31,8 @@ Budget alerts notify you when spending, based on usage or cost, reaches or excee In the Azure portal, budgets are defined by cost. Using the Azure Consumption API, budgets are defined by cost or by consumption usage. Budget alerts support both cost-based and usage-based budgets. Budget alerts are generated automatically whenever the budget alert conditions are met. You can view all cost alerts in the Azure portal. Whenever an alert is generated, it's shown in cost alerts. An alert email is also sent to the people in the alert recipients list of the budget. +If you have an Enterprise Agreement, you can [Create and edit budgets with PowerShell](tutorial-acm-create-budgets.md#create-and-edit-budgets-with-powershell). Customers with a Microsoft Customer Agreement should use the [Budgets REST API](/rest/api/consumption/budgets/create-or-update) to create budgets programmatically. + You can use the Budget API to send email alerts in a different language. For more information, see [Supported locales for budget alert emails](manage-automation.md#supported-locales-for-budget-alert-emails). ## Credit alerts diff --git a/articles/cost-management-billing/costs/cost-mgt-best-practices.md b/articles/cost-management-billing/costs/cost-mgt-best-practices.md index f4d1c139c17fd..276c93688c9b3 100644 --- a/articles/cost-management-billing/costs/cost-mgt-best-practices.md +++ b/articles/cost-management-billing/costs/cost-mgt-best-practices.md @@ -1,6 +1,6 @@ --- title: Optimize your cloud investment with Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps get the most value out of your cloud investments, reduce your costs, and evaluate where your money is being spent. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/get-started-partners.md b/articles/cost-management-billing/costs/get-started-partners.md index 46707a6fa0e4e..d1499a99a85f9 100644 --- a/articles/cost-management-billing/costs/get-started-partners.md +++ b/articles/cost-management-billing/costs/get-started-partners.md @@ -1,6 +1,6 @@ --- title: Get started with Cost Management for partners -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how partners use Cost Management features and how they enable access for their customers. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/group-filter.md b/articles/cost-management-billing/costs/group-filter.md index bc2367fe58f10..62a0885121ea0 100644 --- a/articles/cost-management-billing/costs/group-filter.md +++ b/articles/cost-management-billing/costs/group-filter.md @@ -1,6 +1,6 @@ --- title: Group and filter options in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how to use group and filter options in Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md b/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md index 2c49b55b5e646..db9cd874e63ab 100644 --- a/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md +++ b/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md @@ -1,6 +1,6 @@ --- title: Retrieve large cost datasets recurringly with exports from Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you regularly export large amounts of data with exports from Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/media/assign-access-acm-data/add-permissions.png b/articles/cost-management-billing/costs/media/assign-access-acm-data/add-permissions.png deleted file mode 100644 index 6e04db9b08fd4..0000000000000 Binary files a/articles/cost-management-billing/costs/media/assign-access-acm-data/add-permissions.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/aws-integration-setup-configure/aws-connector.png b/articles/cost-management-billing/costs/media/aws-integration-setup-configure/aws-connector.png deleted file mode 100644 index e1eb1f1b6e7d0..0000000000000 Binary files a/articles/cost-management-billing/costs/media/aws-integration-setup-configure/aws-connector.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/get-started-partners/customer-tenant-view-cost-analysis.png b/articles/cost-management-billing/costs/media/get-started-partners/customer-tenant-view-cost-analysis.png deleted file mode 100644 index 456e5703c102d..0000000000000 Binary files a/articles/cost-management-billing/costs/media/get-started-partners/customer-tenant-view-cost-analysis.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/quick-create-budget-template/deploy-to-azure.png b/articles/cost-management-billing/costs/media/quick-create-budget-template/deploy-to-azure.png deleted file mode 100644 index 764a6e256fa88..0000000000000 Binary files a/articles/cost-management-billing/costs/media/quick-create-budget-template/deploy-to-azure.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/reporting-get-started/microsoft-azure-consumption-commitment.png b/articles/cost-management-billing/costs/media/reporting-get-started/microsoft-azure-consumption-commitment.png deleted file mode 100644 index d33ce75391e6e..0000000000000 Binary files a/articles/cost-management-billing/costs/media/reporting-get-started/microsoft-azure-consumption-commitment.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/reporting-get-started/usage-charges.png b/articles/cost-management-billing/costs/media/reporting-get-started/usage-charges.png deleted file mode 100644 index cdda2793eb26c..0000000000000 Binary files a/articles/cost-management-billing/costs/media/reporting-get-started/usage-charges.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/deploy-to-azure.png b/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/deploy-to-azure.png deleted file mode 100644 index 764a6e256fa88..0000000000000 Binary files a/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/deploy-to-azure.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/manage-action-groups01.png b/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/manage-action-groups01.png deleted file mode 100644 index a0225d37a6a5e..0000000000000 Binary files a/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/manage-action-groups01.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/monthly-budget-alert.png b/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/monthly-budget-alert.png deleted file mode 100644 index f88320ea7c670..0000000000000 Binary files a/articles/cost-management-billing/costs/media/tutorial-acm-create-budgets/monthly-budget-alert.png and /dev/null differ diff --git a/articles/cost-management-billing/costs/reporting-get-started.md b/articles/cost-management-billing/costs/reporting-get-started.md index 046f9112d40c3..cd9936d616d4a 100644 --- a/articles/cost-management-billing/costs/reporting-get-started.md +++ b/articles/cost-management-billing/costs/reporting-get-started.md @@ -65,5 +65,5 @@ For more information about credits, see [Track Microsoft Customer Agreement Azur - [Explore and analyze costs with cost analysis](quick-acm-cost-analysis.md). - [Analyze Azure costs with the Power BI App](analyze-cost-data-azure-cost-management-power-bi-template-app.md). -- [Connect to Azure Cost Management data in Power BI Desktop](/power-bi/connect-data/desktop-connect-azure-cost-management). +- [Connect to Microsoft Cost Management data in Power BI Desktop](/power-bi/connect-data/desktop-connect-azure-cost-management). - [Create and manage exported data](tutorial-export-acm-data.md). \ No newline at end of file diff --git a/articles/cost-management-billing/costs/save-share-views.md b/articles/cost-management-billing/costs/save-share-views.md index fd307690a69d3..8a513a9039aaa 100644 --- a/articles/cost-management-billing/costs/save-share-views.md +++ b/articles/cost-management-billing/costs/save-share-views.md @@ -1,6 +1,6 @@ --- title: Save and share customized views -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how to save and share a customized view with others. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/tutorial-acm-create-budgets.md b/articles/cost-management-billing/costs/tutorial-acm-create-budgets.md index fb2deac38a44c..cd369252e8287 100644 --- a/articles/cost-management-billing/costs/tutorial-acm-create-budgets.md +++ b/articles/cost-management-billing/costs/tutorial-acm-create-budgets.md @@ -3,7 +3,7 @@ title: Tutorial - Create and manage Azure budgets description: This tutorial helps you plan and account for the costs of Azure services that you consume. author: bandersmsft ms.author: banders -ms.date: 05/13/2022 +ms.date: 06/07/2022 ms.topic: tutorial ms.service: cost-management-billing ms.subservice: cost-management @@ -160,10 +160,10 @@ Budget integration with action groups works for action groups which have enabled ## Create and edit budgets with PowerShell -If you're an EA customer, you can create and edit budgets programmatically using the Azure PowerShell module. +If you're an EA customer, you can create and edit budgets programmatically using the Azure PowerShell module. However, we recommend that you use REST APIs to create and edit budgets because CLI commands might not support the latest version of the APIs. ->[!Note] ->Customers with a Microsoft Customer Agreement should use the [Budgets REST API](/rest/api/consumption/budgets/create-or-update) to create budgets programmatically because PowerShell and CLI aren't yet supported. +> [!NOTE] +> Customers with a Microsoft Customer Agreement should use the [Budgets REST API](/rest/api/consumption/budgets/create-or-update) to create budgets programmatically. To download the latest version of Azure PowerShell, run the following command: diff --git a/articles/cost-management-billing/costs/tutorial-export-acm-data.md b/articles/cost-management-billing/costs/tutorial-export-acm-data.md index 389ad752a27ea..42d318210004e 100644 --- a/articles/cost-management-billing/costs/tutorial-export-acm-data.md +++ b/articles/cost-management-billing/costs/tutorial-export-acm-data.md @@ -1,6 +1,6 @@ --- title: Tutorial - Create and manage exported data from Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article shows you how you can create and manage exported Cost Management data so that you can use it in external systems. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/understand-cost-mgt-data.md b/articles/cost-management-billing/costs/understand-cost-mgt-data.md index 42f2059a7f9a2..54db5ed4f80f1 100644 --- a/articles/cost-management-billing/costs/understand-cost-mgt-data.md +++ b/articles/cost-management-billing/costs/understand-cost-mgt-data.md @@ -1,6 +1,6 @@ --- title: Understand Cost Management data -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you better understand data that's included in Cost Management and how frequently it's processed, collected, shown, and closed. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/understand-work-scopes.md b/articles/cost-management-billing/costs/understand-work-scopes.md index b74072f6e294a..02c9b039596a7 100644 --- a/articles/cost-management-billing/costs/understand-work-scopes.md +++ b/articles/cost-management-billing/costs/understand-work-scopes.md @@ -1,6 +1,6 @@ --- title: Understand and work with Cost Management scopes -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand billing and resource management scopes available in Azure and how to use the scopes in Cost Management and APIs. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/index.yml b/articles/cost-management-billing/index.yml index bc75cce295f07..156372581c278 100644 --- a/articles/cost-management-billing/index.yml +++ b/articles/cost-management-billing/index.yml @@ -4,7 +4,7 @@ title: Cost Management + Billing documentation summary: Cost Management + Billing helps you understand your Azure invoice (bill), manage your billing account and subscriptions, monitor and control Azure spending and optimize resource use. Learn how to analyze costs, create and manage budgets, export data, and review and act on recommendations. metadata: title: Cost Management + Billing - titleSuffix: Azure Cost Management + Billing documentation + titleSuffix: Microsoft Cost Management description: Cost Management + Billing helps you understand your Azure invoice (bill), manage your billing account and subscriptions, monitor and control Azure spending and optimize resource use. Learn how to analyze costs, create and manage budgets, export data, and review and act on recommendations. ms.service: cost-management-billing ms.subservice: common diff --git a/articles/cost-management-billing/manage/cancel-azure-subscription.md b/articles/cost-management-billing/manage/cancel-azure-subscription.md index edd170dbee582..e5f8fe1adf74d 100644 --- a/articles/cost-management-billing/manage/cancel-azure-subscription.md +++ b/articles/cost-management-billing/manage/cancel-azure-subscription.md @@ -78,7 +78,7 @@ After you cancel, your services are disabled. That means your virtual machines a After your subscription is canceled, Microsoft waits 30 - 90 days before permanently deleting your data in case you need to access it or you change your mind. We don't charge you for keeping the data. To learn more, see [Microsoft Trust Center - How we manage your data](https://go.microsoft.com/fwLink/p/?LinkID=822930&clcid=0x409). -## Delete free trial or pay-as-you-go subscriptions +## Delete subscriptions If you have a free trial or pay-as-you-go subscription, you don't have to wait 90 days for the subscription to automatically delete. You can delete your subscription *three days* after you cancel it. The **Delete subscription** option isn't available until three days after you cancel your subscription. diff --git a/articles/cost-management-billing/manage/create-customer-subscription.md b/articles/cost-management-billing/manage/create-customer-subscription.md new file mode 100644 index 0000000000000..7500190626c0f --- /dev/null +++ b/articles/cost-management-billing/manage/create-customer-subscription.md @@ -0,0 +1,68 @@ +--- +title: Create a subscription for a partner's customer +titleSuffix: Azure Cost Management + Billing +description: Learn how a Microsoft Partner creates a subscription for a customer in the Azure portal. +author: bandersmsft +ms.reviewer: amberb +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: conceptual +ms.date: 05/25/2022 +ms.author: banders +--- + +# Create a subscription for a partner's customer + +This article helps a Microsoft Partner with a [Microsoft Partner Agreement](https://www.microsoft.com/licensing/news/introducing-microsoft-partner-agreement) create a [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) subscription for their customer. + +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). + +## Permission required to create Azure subscriptions + +You need the following permissions to create customer subscriptions: + +- Global Admin and Admin Agent role in the CSP partner organization. + +For more information, see [Partner Center - Assign users roles and permissions](/partner-center/permissions-overview). The user needs to sign in to the partner tenant to create Azure subscriptions. + +## Create a subscription as a partner for a customer + +Partners with a Microsoft Partner Agreement use the following steps to create a new Microsoft Azure Plan subscription for their customers. The subscription is created under the partner’s billing account and billing profile. + +1. Sign in to the Azure portal using your Partner Center account. + Make sure you are in your Partner Center directory (tenant), not a customer’s tenant. +1. Navigate to **Cost Management + Billing**. +1. Select the Billing scope for your billing account where the customer account resides. +1. In the left menu under **Billing**, select **Customers**. + :::image type="content" source="./media/create-customer-subscription/customers-list.png" alt-text="Screenshot showing the Customers list where you see your list of customers." lightbox="./media/create-customer-subscription/customers-list.png" ::: +1. On the Customers page, select the customer. If you have only one customer, the selection is unavailable. +1. In the left menu, under **Products + services**, select **All billing subscriptions**. +1. On the Azure subscription page, select **+ Add** to create a subscription. Then select the type of subscription to add. For example, **Usage based/ Azure subscription**. + :::image type="content" source="./media/create-customer-subscription/all-billing-subscriptions-add.png" alt-text="Screenshot showing navigation to Add where you create a customer subscription." lightbox="./media/create-customer-subscription/all-billing-subscriptions-add.png" ::: +1. On the Basics tab, enter a subscription name. +1. Select the partner's billing account. +1. Select the partner's billing profile. +1. Select the customer that you're creating the subscription for. +1. If applicable, select a reseller. +1. Next to **Plan**, select **Microsoft Azure Plan for DevTest** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Plan**. + :::image type="content" source="./media/create-customer-subscription/create-customer-subscription-basics-tab.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the customer subscription." lightbox="./media/create-customer-subscription/create-customer-subscription-basics-tab.png" ::: +1. Optionally, select the Tags tab and then enter tag pairs for **Name** and **Value**. +1. Select **Review + create**. You should see a message stating `Validation passed`. +1. Verify that the subscription information is correct, then select **Create**. You'll see a notification that the subscription is getting created. + +After the new subscription is created, the customer can see it in on the **Subscriptions** page. + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [Add or change Azure subscription administrators](add-change-subscription-administrator.md) +- [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) +- [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) +- [Cancel your subscription for Azure](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-enterprise-subscription.md b/articles/cost-management-billing/manage/create-enterprise-subscription.md new file mode 100644 index 0000000000000..24166635fcdd0 --- /dev/null +++ b/articles/cost-management-billing/manage/create-enterprise-subscription.md @@ -0,0 +1,67 @@ +--- +title: Create an Enterprise Agreement subscription +titleSuffix: Azure Cost Management + Billing +description: Learn how to add a new Enterprise Agreement subscription in the Azure portal. See information about billing account forms and view other available resources. +author: bandersmsft +ms.reviewer: amberb +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: conceptual +ms.date: 05/25/2022 +ms.author: banders +--- + +# Create an Enterprise Agreement subscription + +This article helps you create an [Enterprise Agreement (EA)](https://azure.microsoft.com/pricing/enterprise-agreement/) subscription for yourself or for someone else in your current Azure Active Directory (Azure AD) directory/tenant. You may want another subscription to avoid hitting subscription quota limits, to create separate environments for security, or to isolate data for compliance reasons. + +If you want to create subscriptions for Microsoft Customer Agreements, see [Create a Microsoft Customer Agreement subscription](create-subscription.md). If you're a Microsoft Partner and you want to create a subscription for a customer, see [Create a subscription for a partner's customer](create-customer-subscription.md). Or, if you have a Microsoft Online Service Program (MOSP) billing account, also called pay-as-you-go, you can create subscriptions starting in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade) and then you complete the process at https://signup.azure.com/. + +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). + +## Permission required to create Azure subscriptions + +You need the following permissions to create subscriptions for an EA: + +- Account Owner role on the Enterprise Agreement enrollment. For more information, see [Understand Azure Enterprise Agreement administrative roles in Azure](understand-ea-roles.md). + +## Create an EA subscription + +Use the following information to create an EA subscription. + +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Navigate to **Subscriptions** and then select **Add**. + :::image type="content" source="./media/create-enterprise-subscription/subscription-add.png" alt-text="Screenshot showing the Subscription page where you Add a subscription." lightbox="./media/create-enterprise-subscription/subscription-add.png" ::: +1. On the Create a subscription page, on the **Basics** tab, type a **Subscription name**. +1. Select the **Billing account** where the new subscription will get created. +1. Select the **Enrollment account** where the subscription will get created. +1. Select an **Offer type**, select **Enterprise Dev/Test** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Enterprise**. + :::image type="content" source="./media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the enterprise subscription." lightbox="./media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png" ::: +1. Select the **Advanced** tab. +1. Select your **Subscription directory**. It's the Azure Active Directory (Azure AD) where the new subscription will get created. +1. Select a **Management group**. It's the Azure AD management group that the new subscription is associated with. You can only select management groups in the current directory. +1. Select more or more **Subscription owners**. You can select only users or service principals in the selected subscription directory. You can't select guest directory users. If you select a service principal, enter its App ID. + :::image type="content" source="./media/create-enterprise-subscription/create-subscription-advanced-tab.png" alt-text="Screenshot showing the Advanced tab where you specify the directory, management group, and owner for the EA subscription. " lightbox="./media/create-enterprise-subscription/create-subscription-advanced-tab.png" ::: +1. Select the **Tags** tab. +1. Enter tag pairs for **Name** and **Value**. + :::image type="content" source="./media/create-enterprise-subscription/create-subscription-tags-tab.png" alt-text="Screenshot showing the tags tab where you enter tag and value pairs." lightbox="./media/create-enterprise-subscription/create-subscription-tags-tab.png" ::: +1. Select **Review + create**. You should see a message stating `Validation passed`. +1. Verify that the subscription information is correct, then select **Create**. You'll see a notification that the subscription is getting created. + +After the new subscription is created, the account owner can see it in on the **Subscriptions** page. + + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [Add or change Azure subscription administrators](add-change-subscription-administrator.md) +- [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) +- [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) +- [Cancel your subscription for Azure](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-subscription-request.md b/articles/cost-management-billing/manage/create-subscription-request.md new file mode 100644 index 0000000000000..28bc914ee25e4 --- /dev/null +++ b/articles/cost-management-billing/manage/create-subscription-request.md @@ -0,0 +1,93 @@ +--- +title: Create a Microsoft Customer Agreement subscription request +titleSuffix: Azure Cost Management + Billing +description: Learn how to create an Azure subscription request in the Azure portal. See information about billing account forms and view other available resources. +author: bandersmsft +ms.reviewer: amberb +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: conceptual +ms.date: 05/25/2022 +ms.author: banders +--- + +# Create a Microsoft Customer Agreement subscription request + +This article helps you create a [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) subscription for someone else that's in a different Azure Active Directory (Azure AD) directory/tenant. After the request is created, the recipient accepts the subscription request. You may want another subscription to avoid hitting subscription quota limits, to create separate environments for security, or to isolate data for compliance reasons. + +If you instead want to create a subscription for yourself or for someone else in your current Azure Active Directory (Azure AD) directory/tenant, see [Create a Microsoft Customer Agreement subscription](create-subscription.md). If you want to create subscriptions for Enterprise Agreements, see [Create an EA subscription](create-enterprise-subscription.md). If you're a Microsoft Partner and you want to create a subscription for a customer, see [Create a subscription for a partner's customer](create-customer-subscription.md). Or, if you have a Microsoft Online Service Program (MOSP) billing account, also called pay-as-you-go, you can create subscriptions starting in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade) and then you complete the process at https://signup.azure.com/. + +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). + +## Permission required to create Azure subscriptions + +You need one of the following permissions to create a Microsoft Customer Agreement (MCA) subscription request. + +- Owner or contributor role on the invoice section, billing profile or billing account. +- Azure subscription creator role on the invoice section. + +For more information, see [Subscription billing roles and task](understand-mca-roles.md#subscription-billing-roles-and-tasks). + +## Create a subscription request + +The subscription creator uses the following procedure to create a subscription request for a person in a different Azure Active Directory (Azure AD). After creation, the request is sent to the subscription acceptor (recipient) by email. + +A link to the subscription request is also created. The creator can manually share the link with the acceptor. + +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Navigate to **Subscriptions** and then select **Add**. + :::image type="content" source="./media/create-subscription-request/subscription-add.png" alt-text="Screenshot showing the Subscription page where you Add a subscription." lightbox="./media/create-subscription-request/subscription-add.png" ::: +1. On the Create a subscription page, on the **Basics** tab, type a **Subscription name**. +1. Select the **Billing account** where the new subscription will get created. +1. Select the **Billing profile** where the subscription will get created. +1. Select the **Invoice section** where the subscription will get created. +1. Next to **Plan**, select **Microsoft Azure Plan for DevTest** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Plan**. + :::image type="content" source="./media/create-subscription-request/create-subscription-basics-tab.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the subscription." lightbox="./media/create-subscription-request/create-subscription-basics-tab.png" ::: +1. Select the **Advanced** tab. +1. Select your **Subscription directory**. It's the Azure Active Directory (Azure AD) where the new subscription will get created. +1. The **Management group** option is unavailable because you can only select management groups in the current directory. +1. Select more or more **Subscription owners**. You can select only users or service principals in the selected subscription directory. You can't select guest directory users. If you select a service principal, enter its App ID. + :::image type="content" source="./media/create-subscription-request/create-subscription-advanced-tab-external.png" alt-text="Screenshot showing the Advanced tab where you specify the directory, management group, and owner. " lightbox="./media/create-subscription-request/create-subscription-advanced-tab-external.png" ::: +1. Select the **Tags** tab. +1. Enter tag pairs for **Name** and **Value**. + :::image type="content" source="./media/create-subscription-request/create-subscription-tags-tab.png" alt-text="Screenshot showing the tags tab where you enter tag and value pairs." lightbox="./media/create-subscription-request/create-subscription-tags-tab.png" ::: +1. Select **Review + create**. You should see a message stating `The subscription will be created once the subscription owner accepts this request in the target directory.` +1. Verify that the subscription information is correct, then select **Request**. You'll see a notification that the request is getting created and sent to the acceptor. + +After the new subscription is sent, the acceptor receives an email with subscription acceptance information with a link where they can accept the new subscription. + +The subscription creator can also view the subscription request details from **Subscriptions** > **View Requests**. There they can open the subscription request to view its details and copy the **Accept ownership URL**. Then they can manually send the link to the subscription acceptor. + +:::image type="content" source="./media/create-subscription-request/view-requests-accept-url.png" alt-text="Screenshot showing the Accept ownership URL that you can copy to manually send to the acceptor." lightbox="./media/create-subscription-request/view-requests-accept-url.png" ::: + +## Accept subscription ownership + +The subscription acceptor receives an email inviting them to accept subscription ownership. Select the **Accept ownership** get started. + +:::image type="content" source="./media/create-subscription-request/accept-subscription-ownership-email.png" alt-text="Screenshot showing the email with the Accept Ownership link." lightbox="./media/create-subscription-request/accept-subscription-ownership-email.png" ::: + +Or, the subscription creator might have manually sent the acceptor an **Accept ownership URL** link. The acceptor uses the following steps to review and accept subscription ownership. + +1. In either case above, select the link to open the Accept subscription ownership page in the Azure portal. +1. On the Basics tab, you can optionally change the subscription name. +1. Select the Advanced tab where you can optionally change the Azure AD management group that the new subscription is associated with. You can only select management groups in the current directory. +1. Select the Tags tab to optionally enter tag pairs for **Name** and **Value**. +1. Select the Review + accept tab. You should see a message stating `Validation passed. Click on the Accept button below to initiate subscription creation`. +1. Select **Accept**. You'll see a status message stating that the subscription is being created. Then you'll see another status message stating th the subscription was successfully created. The acceptor becomes the subscription owner. + +After the new subscription is created, the acceptor can see it in on the **Subscriptions** page. + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [Add or change Azure subscription administrators](add-change-subscription-administrator.md) +- [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) +- [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) +- [Cancel your subscription for Azure](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-subscription.md b/articles/cost-management-billing/manage/create-subscription.md index 0612a15123be2..02838141b73fa 100644 --- a/articles/cost-management-billing/manage/create-subscription.md +++ b/articles/cost-management-billing/manage/create-subscription.md @@ -1,102 +1,71 @@ --- -title: Create an additional Azure subscription -description: Learn how to add a new Azure subscription in the Azure portal. See information about billing account forms and view additional available resources. +title: Create a Microsoft Customer Agreement subscription +titleSuffix: Azure Cost Management + Billing +description: Learn how to add a new Microsoft Customer Agreement subscription in the Azure portal. See information about billing account forms and view other available resources. author: bandersmsft ms.reviewer: amberb ms.service: cost-management-billing ms.subservice: billing ms.topic: conceptual -ms.date: 11/11/2021 +ms.date: 05/25/2022 ms.author: banders --- -# Create an additional Azure subscription +# Create a Microsoft Customer Agreement subscription -You can create an additional subscription for your [Enterprise Agreement (EA)](https://azure.microsoft.com/pricing/enterprise-agreement/), [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) or [Microsoft Partner Agreement](https://www.microsoft.com/licensing/news/introducing-microsoft-partner-agreement) billing account in the Azure portal. You may want an additional subscription to avoid hitting subscription limits, to create separate environments for security, or to isolate data for compliance reasons. +This article helps you create a [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) subscription for yourself or for someone else in your current Azure Active Directory (Azure AD) directory/tenant. You may want another subscription to avoid hitting subscription quota limits, to create separate environments for security, or to isolate data for compliance reasons. -If you have a Microsoft Online Service Program (MOSP) billing account, you can create additional subscriptions in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade). +If you want to create a Microsoft Customer Agreement subscription in a different Azure AD tenant, see [Create an MCA subscription request](create-subscription-request.md). -To learn more about billing accounts and identify the type of your billing account, see [View billing accounts in Azure portal](view-all-accounts.md). +If you want to create subscriptions for Enterprise Agreements, see [Create an EA subscription](create-enterprise-subscription.md). If you're a Microsoft Partner and you want to create a subscription for a customer, see [Create a subscription for a partner's customer](create-customer-subscription.md). Or, if you have a Microsoft Online Service Program (MOSP) billing account, also called pay-as-you-go, you can create subscriptions starting in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade) and then you complete the process at https://signup.azure.com/. -## Permission required to create Azure subscriptions - -You need the following permissions to create subscriptions: - -|Billing account |Permission | -|---------|---------| -|Enterprise Agreement (EA) | Account Owner role on the Enterprise Agreement enrollment. For more information, see [Understand Azure Enterprise Agreement administrative roles in Azure](understand-ea-roles.md). | -|Microsoft Customer Agreement (MCA) | Owner or contributor role on the invoice section, billing profile or billing account. Or Azure subscription creator role on the invoice section. For more information, see [Subscription billing roles and task](understand-mca-roles.md#subscription-billing-roles-and-tasks). | -|Microsoft Partner Agreement (MPA) | Global Admin and Admin Agent role in the CSP partner organization. To learn more, see [Partner Center - Assign users roles and permissions](/partner-center/permissions-overview). The user needs to sign to partner tenant to create Azure subscriptions. | - -## Create a subscription in the Azure portal - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Search for **Subscriptions**. - - ![Screenshot that shows search in portal for subscription](./media/create-subscription/billing-search-subscription-portal.png) - -1. Select **Add**. - - ![Screenshot that shows the Add button in Subscriptions view](./media/create-subscription/subscription-add.png) - -1. If you have access to multiple billing accounts, select the billing account for which you want to create the subscription. - -1. Fill the form and select **Create**. The tables below list the fields on the form for each type of billing account. +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). -**Enterprise Agreement** - -|Field |Definition | -|---------|---------| -|Name | The display name that helps you easily identify the subscription in the Azure portal. | -|Offer | Select EA Dev/Test, if you plan to use this subscription for development or testing workloads else use Microsoft Azure Enterprise. DevTest offer must be enabled for your enrollment account to create EA Dev/Test subscriptions.| - -**Microsoft Customer Agreement** - -|Field |Definition | -|---------|---------| -|Billing profile | The charges for your subscription will be billed to the billing profile that you select. If you have access to only one billing profile, the selection will be greyed out. | -|Invoice section | The charges for your subscription will appear on this section of the billing profile's invoice. If you have access to only one invoice section, the selection will be greyed out. | -|Plan | Select Microsoft Azure Plan for DevTest, if you plan to use this subscription for development or testing workloads else use Microsoft Azure Plan. If only one plan is enabled for the billing profile, the selection will be greyed out. | -|Name | The display name that helps you easily identify the subscription in the Azure portal. | - -**Microsoft Partner Agreement** +## Permission required to create Azure subscriptions -|Field |Definition | -|---------|---------| -|Customer | The subscription is created for the customer that you select. If you have only one customer, the selection will be greyed out. | -|Reseller | The reseller that will provide services to the customer. This is an optional field, which is only applicable to Indirect providers in the CSP two-tier model. | -|Name | The display name that helps you easily identify the subscription in the Azure portal. | +You need the following permissions to create subscriptions for a Microsoft Customer Agreement (MCA): -## Create a subscription as a partner for a customer +- Owner or contributor role on the invoice section, billing profile or billing account. Or Azure subscription creator role on the invoice section. -Partners with a Microsoft Partner Agreement use the following steps to create a new Microsoft Azure Plan subscription for their customers. The subscription is created under the partner’s billing account and billing profile. +For more information, see [Subscription billing roles and task](understand-mca-roles.md#subscription-billing-roles-and-tasks). -1. Sign in to the Azure portal using your Partner Center account. -Make sure you are in your Partner Center directory (tenant), not a customer’s tenant. -1. Navigate to **Cost Management + Billing**. -1. Select the Billing scope for the billing account where the customer account resides. -1. In the left menu under **Billing**, select **Customers**. -1. On the Customers page, select the customer. -1. In the left menu, under **Products + services**, select **Azure Subscriptions**. -1. On the Azure subscription page, select **+ Add** to create a subscription. -1. Enter details about the subscription and when complete, select **Review + create**. +## Create a subscription +Use the following procedure to create a subscription for yourself or for someone in the current Azure Active Directory. When you're done, the new subscription is created immediately. -## Create an additional Azure subscription programmatically +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Navigate to **Subscriptions** and then select **Add**. + :::image type="content" source="./media/create-subscription/subscription-add.png" alt-text="Screenshot showing the Subscription page where you Add a subscription." lightbox="./media/create-subscription/subscription-add.png" ::: +1. On the Create a subscription page, on the **Basics** tab, type a **Subscription name**. +1. Select the **Billing account** where the new subscription will get created. +1. Select the **Billing profile** where the subscription will get created. +1. Select the **Invoice section** where the subscription will get created. +1. Next to **Plan**, select **Microsoft Azure Plan for DevTest** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Plan**. + :::image type="content" source="./media/create-subscription/create-subscription-basics-tab.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the subscription." lightbox="./media/create-subscription/create-subscription-basics-tab.png" ::: +1. Select the **Advanced** tab. +1. Select your **Subscription directory**. It's the Azure Active Directory (Azure AD) where the new subscription will get created. +1. Select a **Management group**. It's the Azure AD management group that the new subscription is associated with. You can only select management groups in the current directory. +1. Select more or more **Subscription owners**. You can select only users or service principals in the selected subscription directory. You can't select guest directory users. If you select a service principal, enter its App ID. + :::image type="content" source="./media/create-subscription/create-subscription-advanced-tab.png" alt-text="Screenshot showing the Advanced tab where you can specify the directory, management group, and owner. " lightbox="./media/create-subscription/create-subscription-advanced-tab.png" ::: +1. Select the **Tags** tab. +1. Enter tag pairs for **Name** and **Value**. + :::image type="content" source="./media/create-subscription/create-subscription-tags-tab.png" alt-text="Screenshot showing the tags tab where you enter tag and value pairs." lightbox="./media/create-subscription/create-subscription-tags-tab.png" ::: +1. Select **Review + create**. You should see a message stating `Validation passed`. +1. Verify that the subscription information is correct, then select **Create**. You'll see a notification that the subscription is getting created. + +After the new subscription is created, the owner of the subscription can see it in on the **Subscriptions** page. + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). -You can also create additional subscriptions programmatically. For more information, see: +## Need help? Contact us. -- [Create EA subscriptions programmatically with latest API](programmatically-create-subscription-enterprise-agreement.md) -- [Create MCA subscriptions programmatically with latest API](programmatically-create-subscription-microsoft-customer-agreement.md) -- [Create MPA subscriptions programmatically with latest API](Programmatically-create-subscription-microsoft-customer-agreement.md) +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). ## Next steps - [Add or change Azure subscription administrators](add-change-subscription-administrator.md) - [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) - [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) -- [Cancel your subscription for Azure](cancel-azure-subscription.md) - -## Need help? Contact us. - -If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). \ No newline at end of file +- [Cancel your Azure subscription](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/direct-ea-azure-usage-charges-invoices.md b/articles/cost-management-billing/manage/direct-ea-azure-usage-charges-invoices.md index da14a7063722b..622dfe0c0c5d0 100644 --- a/articles/cost-management-billing/manage/direct-ea-azure-usage-charges-invoices.md +++ b/articles/cost-management-billing/manage/direct-ea-azure-usage-charges-invoices.md @@ -65,9 +65,9 @@ Enterprise administrators can also view an overall summary of the charges for th ## Download or view your Azure billing invoice -You can download your invoice from the [Azure portal](https://portal.azure.com) or have it sent in email. Invoices are sent to whoever is set up to receive invoices for the enrollment. +An EA administrator can download the invoice from the [Azure portal](https://portal.azure.com) or have it sent in email. Invoices are sent to whoever is set up to receive invoices for the enrollment. -Only an Enterprise Administrator has permission to view and get the billing invoice. To learn more about getting access to billing information, see [Manage access to Azure billing using roles](manage-billing-access.md). +Only an Enterprise Administrator has permission to view and download the billing invoice. To learn more about getting access to billing information, see [Manage access to Azure billing using roles](manage-billing-access.md). You receive an Azure invoice when any of the following events occur during your billing cycle: diff --git a/articles/cost-management-billing/manage/ea-azure-marketplace.md b/articles/cost-management-billing/manage/ea-azure-marketplace.md index 7c4ce08c88cbb..aed44cb8e7a21 100644 --- a/articles/cost-management-billing/manage/ea-azure-marketplace.md +++ b/articles/cost-management-billing/manage/ea-azure-marketplace.md @@ -6,7 +6,7 @@ ms.reviewer: sapnakeshari ms.service: cost-management-billing ms.subservice: enterprise ms.topic: conceptual -ms.date: 10/21/2021 +ms.date: 06/08/2022 ms.author: banders --- @@ -28,6 +28,9 @@ Some third-party reseller services available on Azure Marketplace now consume yo ### Partners +> [!NOTE] +> The Azure Marketplace price list feature in the EA portal is retired. + LSPs can download an Azure Marketplace price list from the price sheet page in the Azure Enterprise portal. Select the **Marketplace Price list** link in the upper right. Azure Marketplace price list shows all available services and their prices. To download the price list: @@ -76,7 +79,7 @@ The following services are billed hourly under an Enterprise Agreement instead o ### Azure RemoteApp -If you have an Enterprise Agreement, you pay for Azure RemoteApp based on your Enterprise Agreement price level. There aren't additional charges. The standard price includes an initial 40 hours. The unlimited price covers an initial 80 hours. RemoteApp stops emitting usage over 80 hours. +If you have an Enterprise Agreement, you pay for Azure RemoteApp based on your Enterprise Agreement price level. There aren't extra charges. The standard price includes an initial 40 hours. The unlimited price covers an initial 80 hours. RemoteApp stops emitting usage over 80 hours. ## Next steps diff --git a/articles/cost-management-billing/manage/ea-portal-agreements.md b/articles/cost-management-billing/manage/ea-portal-agreements.md index 3d1854b50509b..707d98098e4dd 100644 --- a/articles/cost-management-billing/manage/ea-portal-agreements.md +++ b/articles/cost-management-billing/manage/ea-portal-agreements.md @@ -37,6 +37,8 @@ An enrollment has one of the following status values. Each value determines how - Migrate to the Microsoft Online Subscription Program (MOSP) - Confirm disablement of all services associated with the enrollment +EA credit expires when the EA enrollment ends. + **Expired** - The EA enrollment expires when it reaches the enterprise agreement end date and is opted out of the extended term. Sign a new enrollment contract as soon as possible. Although your service won't be disabled immediately, there's a risk of it getting disabled. As of August 1, 2019, new opt-out forms aren't accepted for Azure commercial customers. Instead, all enrollments go into indefinite extended term. If you want to stop using Azure services, close your subscription in the [Azure portal](https://portal.azure.com). Or, your partner can submit a termination request. There's no change for customers with government agreement types. diff --git a/articles/cost-management-billing/manage/ea-portal-enrollment-invoices.md b/articles/cost-management-billing/manage/ea-portal-enrollment-invoices.md index 3a87adb361663..8685cd558aed0 100644 --- a/articles/cost-management-billing/manage/ea-portal-enrollment-invoices.md +++ b/articles/cost-management-billing/manage/ea-portal-enrollment-invoices.md @@ -3,7 +3,7 @@ title: Azure Enterprise enrollment invoices description: This article explains how to manage and act on your Azure Enterprise invoice. author: bandersmsft ms.author: banders -ms.date: 12/03/2021 +ms.date: 05/31/2022 ms.topic: conceptual ms.service: cost-management-billing ms.subservice: enterprise @@ -204,7 +204,7 @@ If an Amendment M503 is signed, you can move any agreement from any frequency to ### Request an invoice copy -To request a copy of your invoice, contact your partner. +If you're an indirect enterprise agreement customer, contact your partner to request a copy of your invoice. ## Credits and adjustments diff --git a/articles/cost-management-billing/manage/ea-transfers.md b/articles/cost-management-billing/manage/ea-transfers.md index 06096529624c4..d1f6211e7a4b1 100644 --- a/articles/cost-management-billing/manage/ea-transfers.md +++ b/articles/cost-management-billing/manage/ea-transfers.md @@ -6,7 +6,7 @@ ms.reviewer: baolcsva ms.service: cost-management-billing ms.subservice: enterprise ms.topic: conceptual -ms.date: 02/24/2022 +ms.date: 05/23/2022 ms.author: banders ms.custom: contperf-fy21q1 --- @@ -25,17 +25,22 @@ Keep the following points in mind when you transfer an enterprise account to a n - Only the accounts specified in the request are transferred. If all accounts are chosen, then they're all transferred. - The source enrollment keeps its status as active or extended. You can continue using the enrollment until it expires. +- You can't change account ownership during a transfer. After the account transfer is complete, the current account owner can change account ownership in the EA portal. Keep in mind that an EA administrator can't change account ownership. ### Prerequisites -When you request an account transfer, provide the following information: +When you request an account transfer with a support request, provide the following information: - The number of the target enrollment, account name, and account owner email of account to transfer - The enrollment number and account to transfer for the source enrollment Other points to keep in mind before an account transfer: -- Approval from an EA Administrator is required for the target and source enrollment. +- Approval from a full EA Administrator, not a read-only EA administrator, is required for the target and source enrollment. + - If you have only UPN (User Principal Name) entities configured as full EA administrators without access to e-mail, you must perform one of the following actions: + - Create a temporary full EA administrator account in the EA portal + — Or — + - Provide EA portal screenshot evidence of a user account associated with the UPN account - You should consider an enrollment transfer if an account transfer doesn't meet your requirements. - Your account transfer moves all services and subscriptions related to the specific accounts. - Your transferred account appears inactive under the source enrollment and appears active under the target enrollment when the transfer is complete. @@ -57,16 +62,17 @@ When you request to transfer an entire enterprise enrollment to an enrollment, t - Usage transferred may take up to 72 hours to be reflected in the new enrollment. - If department administrator (DA) or account owner (AO) view charges were enabled on the transferred enrollment, they must be enabled on the new enrollment. - If you're using API reports or Power BI, generate a new API key under your new enrollment. + - For reporting, all APIs use either the old enrollment or the new one, not both. If you need reporting from APIs for the old and new enrollments, you must create your own reports. - All Azure services, subscriptions, accounts, departments, and the entire enrollment structure, including all EA department administrators, transfer to a new target enrollment. - The enrollment status is set to _Transferred_. The transferred enrollment is available for historic usage reporting purposes only. - You can't add roles or subscriptions to a transferred enrollment. Transferred status prevents more usage against the enrollment. - Any remaining Azure Prepayment balance in the agreement is lost, including future terms. -- If the enrollment you're transferring from has reservation purchases, the reservation purchasing fee will remain in the source enrollment. However, all reservation benefits will be transferred across for use in the new enrollment. -- The marketplace one-time purchase fee and any monthly fixed fees already incurred on the old enrollment aren't transferred to the new enrollment. Consumption-based marketplace charges will be transferred. +- If the enrollment you're transferring from has reservation purchases, the historic (past) reservation purchasing fee will remain in the source enrollment. All future purchasing fees transfer to the new enrollment. Additionally, all reservation benefits will be transferred across for use in the new enrollment. +- The historic marketplace one-time purchase fee and any monthly fixed fees already incurred on the old enrollment aren't transferred to the new enrollment. Consumption-based marketplace charges will be transferred. ### Effective transfer date -The effective transfer day can be on or after the start date of the target enrollment. Transfers can only be backdated till the first day of the month in which request is made. +The effective transfer day can be on or after the start date of the target enrollment. Transfers can only be backdated till the first day of the month in which request is made. Additionally, if individual subscriptions are deleted or transferred in the current month, then the deletion/transfer date becomes the new earliest possible effective transfer date. The source enrollment usage is charged against Azure Prepayment or as overage. Usage that occurs after the effective transfer date is transferred to the new enrollment and charged. @@ -90,9 +96,10 @@ Other points to keep in mind before an enrollment transfer: - Any API keys used in the source enrollment must be regenerated for the target enrollment. - If the source and destination enrollments are on different cloud instances, the transfer will fail. Support personnel can transfer only within the same cloud instance. - For reservations (reserved instances): - - The enrollment or account transfer between different currencies affects monthly reservation purchases. + - The enrollment or account transfer between different currencies affects monthly reservation purchases. The following image illustrates the effects. + :::image type="content" source="./media/ea-transfers/cross-currency-reservation-transfer-effects.png" alt-text="Diagram illustrating the effects of cross currency reservation transfers." border="false" lightbox="./media/ea-transfers/cross-currency-reservation-transfer-effects.png"::: - Whenever there's is a currency change during or after an enrollment transfer, reservations paid for monthly are canceled for the source enrollment at the time of next monthly payment for an individual reservation. This cancellation is intentional and affects only the monthly reservation purchases. - - You may have to repurchase the canceled monthly reservations from the source enrollment using the new enrollment in the local or new currency. + - You may have to repurchase the canceled monthly reservations from the source enrollment using the new enrollment in the local or new currency. If you repurchase a reservation, the purchase term (one or three years) is reset. The repurchase doesn't continue under the previous term. ### Auto enrollment transfer diff --git a/articles/cost-management-billing/manage/elevate-access-global-admin.md b/articles/cost-management-billing/manage/elevate-access-global-admin.md index 2757d963ad786..600ec0766e3af 100644 --- a/articles/cost-management-billing/manage/elevate-access-global-admin.md +++ b/articles/cost-management-billing/manage/elevate-access-global-admin.md @@ -1,6 +1,6 @@ --- title: Elevate access to manage billing accounts -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: Describes how to elevate access for a Global Administrator to manage billing accounts using the Azure portal or REST API. author: bandersmsft ms.reviewer: amberb diff --git a/articles/cost-management-billing/manage/grant-access-to-create-subscription.md b/articles/cost-management-billing/manage/grant-access-to-create-subscription.md index 4fe5ca2c6938f..90879b1563fbc 100644 --- a/articles/cost-management-billing/manage/grant-access-to-create-subscription.md +++ b/articles/cost-management-billing/manage/grant-access-to-create-subscription.md @@ -6,7 +6,7 @@ ms.service: cost-management-billing ms.subservice: billing ms.reviewer: andalmia ms.topic: conceptual -ms.date: 02/24/2022 +ms.date: 06/01/2022 ms.author: banders --- @@ -15,7 +15,9 @@ ms.author: banders As an Azure customer with an [Enterprise Agreement (EA)](https://azure.microsoft.com/pricing/enterprise-agreement/), you can give another user or service principal permission to create subscriptions billed to your account. In this article, you learn how to use [Azure role-based access control (Azure RBAC)](../../role-based-access-control/role-assignments-portal.md) to share the ability to create subscriptions, and how to audit subscription creations. You must have the Owner role on the account you wish to share. > [!NOTE] -> This API only works with the [legacy APIs for subscription creation](programmatically-create-subscription-preview.md). Unless you have a specific need to use the legacy APIs, you should use the information for the [latest GA version](programmatically-create-subscription-enterprise-agreement.md) about the latest API version [2019-10-01-preview](/rest/api/billing/2019-10-01-preview/enrollment-account-role-assignments/put). If you're migrating to use the newer APIs, you must grant owner permissions again using [2019-10-01-preview](/rest/api/billing/2019-10-01-preview/enrollment-account-role-assignments/put). Your previous configuration that uses the following APIs doesn't automatically convert for use with newer APIs. +> - This API only works with the [legacy APIs for subscription creation](programmatically-create-subscription-preview.md). +> - Unless you have a specific need to use the legacy APIs, you should use the information for the [latest GA version](programmatically-create-subscription-enterprise-agreement.md) about the latest API version. **See [Enrollment Account Role Assignments - Put](/rest/api/billing/2019-10-01-preview/enrollment-account-role-assignments/put) to grant permission to create EA subscriptions with the latest API**. +> - If you're migrating to use the newer APIs, you must grant owner permissions again using [2019-10-01-preview](/rest/api/billing/2019-10-01-preview/enrollment-account-role-assignments/put). Your previous configuration that uses the following APIs doesn't automatically convert for use with newer APIs. [!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] diff --git a/articles/cost-management-billing/manage/how-to-create-azure-support-request-ea.md b/articles/cost-management-billing/manage/how-to-create-azure-support-request-ea.md index e87ee7e2b70fc..766d24fb16253 100644 --- a/articles/cost-management-billing/manage/how-to-create-azure-support-request-ea.md +++ b/articles/cost-management-billing/manage/how-to-create-azure-support-request-ea.md @@ -18,7 +18,7 @@ Azure enables you to create and manage support requests, also known as support t > The Azure portal URL is specific to the Azure cloud where your organization is deployed. > >- Azure portal for commercial use is: [https://portal.azure.com](https://portal.azure.com) ->- Azure portal for Germany is: [https://portal.microsoftazure.de](https://portal.microsoftazure.de) +>- Azure portal for Germany is: `https://portal.microsoftazure.de` >- Azure portal for the United States government is: [https://portal.azure.us](https://portal.azure.us) Azure provides unlimited support for subscription management, which includes billing, quota adjustments, and account transfers. You need a support plan for technical support. For more information, see [Compare support plans](https://azure.microsoft.com/support/plans). @@ -118,4 +118,4 @@ Follow these links to learn more: * [Azure support ticket REST API](/rest/api/support) * Engage with us on [Twitter](https://twitter.com/azuresupport) * Get help from your peers in the [Microsoft Q&A question page](/answers/products/azure) -* Learn more in [Azure Support FAQ](https://azure.microsoft.com/support/faq) \ No newline at end of file +* Learn more in [Azure Support FAQ](https://azure.microsoft.com/support/faq) diff --git a/articles/cost-management-billing/manage/manage-tax-information.md b/articles/cost-management-billing/manage/manage-tax-information.md new file mode 100644 index 0000000000000..8ac28a7bebade --- /dev/null +++ b/articles/cost-management-billing/manage/manage-tax-information.md @@ -0,0 +1,137 @@ +--- +title: Update tax details for an Azure billing account +description: This article describes how to update your Azure billing account tax details. +author: bandersmsft +ms.reviewer: amberb +tags: billing +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: how-to +ms.date: 05/23/2022 +ms.author: banders +ms.custom: references_regions +--- + +# Update tax details for an Azure billing account + +When you buy Azure products and services, the taxes that you pay are determined by one of two things: your sold-to address, or your ship-to/service usage address, if it's different. + +This article helps you review and update the sold to information, ship-to/service usage address, and tax IDs for your Azure billing account. The instructions to update vary by the billing account type. For more information about billing accounts and how to identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). An Azure billing account is separate from your Azure user account and [Microsoft account](https://account.microsoft.com/). + +> [!NOTE] +> When you update the sold to information, ship-to address, and Tax IDs in the Azure portal, the updated values are only used for invoices that are generated in the future. To make changes to an existing invoice, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Update the sold to address + +1. Sign in to the Azure portal using the email address with an owner or a contributor role on the billing account for a Microsoft Customer Agreement (MCA). Or, sign in with an account administrator role for a Microsoft Online Subscription Program (MOSP) billing account. MOSP is also referred to as pay-as-you-go. +1. Search for **Cost Management + Billing**. + ![Screenshot that shows where to search in the Azure portal.](./media/manage-tax-information/search-cmb.png) +1. In the left menu, select **Properties** and then select **Update sold-to**. + :::image type="content" source="./media/manage-tax-information/update-sold-to.png" alt-text="Screenshot showing the properties for an M C A billing account where you modify the sold-to address." lightbox="./media/manage-tax-information/update-sold-to.png" ::: +1. Enter the new address and select **Save**. + > [!NOTE] + > Some accounts require additional verification before their sold-to can be updated. If your account requires manual approval, you are prompted to contact Azure support. + +## Update ship-to address for an MCA billing account + +Customers in Canada, Puerto Rico, and the United States can set the ship-to address for their MCA billing accounts. Each billing profile in their account can have its own ship-to address. To use multiple ship-to addresses, create multiple billing profiles, one for each ship-to address. + +1. Sign in to the Azure portal using the email address with an owner or a contributor role for the billing account or a billing profile for an MCA. +1. Search for **Cost Management + Billing**. +1. In the left menu under **Billing**, select **Billing profiles**. +1. Select a billing profile to update the ship-to address. + :::image type="content" source="./media/manage-tax-information/select-billing-profile.png" alt-text="Screenshot showing the Billing profiles page where you select a billing profile." lightbox="./media/manage-tax-information/select-billing-profile.png" ::: +1. In the left menu under **Settings**, select **Properties**. +1. Select **Update ship-to/service usage address**. + :::image type="content" source="./media/manage-tax-information/update-ship-to-01.png" alt-text="Screenshot showing where to update ship-to/service usage address." lightbox="./media/manage-tax-information/update-ship-to-01.png" ::: +1. Enter the new address and then select **Save**. + +## Update ship-to address for a MOSP billing account + +Customers with a Microsoft Online Service Program (MOSP) account, also called pay-as-you-go, can set ship-to address for their billing account. Each subscription in their account can have its own ship-to address. To use multiple ship-to addresses, create multiple subscriptions, one for each ship-to address. + +1. Sign in to the Azure portal using the email address that has account administrator permission on the account. +1. Search for **Subscriptions**. + :::image type="content" source="./media/manage-tax-information/search-subscriptions.png" alt-text="Screenshot showing where to search for Subscriptions in the Azure portal." lightbox="./media/manage-tax-information/search-subscriptions.png" ::: +1. Select a subscription from the list. +1. In the left menu under **Settings**, select **Properties**. +1. Select **Update Address**. + :::image type="content" source="./media/manage-tax-information/update-ship-to-02.png" alt-text="Screenshot that shows where to update the address for the MOSP billing account." lightbox="./media/manage-tax-information/update-ship-to-02.png" ::: +1. Enter the new address and then select **Save**. + +## Add your tax IDs + +In the Azure portal, tax IDs can only be updated for Microsoft Online Service Program (MOSP) or Microsoft Customer Agreement billing accounts that are created through the Azure website. + +Customers in the following countries or regions can add their Tax IDs. + +|Country/region|Country/region| +|---------|---------| +| Armenia | Australia | +| Armenia | Australia | +| Austria | Bahamas | +| Bahrain | Bangladesh | +| Belarus | Belgium | +| Brazil | Bulgaria | +|Cambodia | Cameroon | +|Chile | Colombia | +|Croatia | Cyprus | +|Czech Republic | Denmark | +| Estonia | Fiji | +| Finland | France | +|Georgia | Germany | +|Ghana | Greece | +|Guatemala | Hungary | +|Iceland | Italy | +| India 1 | Indonesia | +|Ireland | Isle of Man | +|Kenya | Korea | +| Latvia | Liechtenstein | +|Lithuania | Luxembourg | +|Malaysia | Malta | +| Mexico | Moldova | +| Monaco | Netherlands | +| New Zealand | Nigeria | +| Oman | Philippines | +| Poland | Portugal | +| Romania | Saudi Arabia | +| Serbia | Slovakia | +| Slovenia | South Africa | +|Spain | Sweden | +|Switzerland | Taiwan | +|Tajikistan | Thailand | +|Turkey | Ukraine | +|United Arab Emirates | United Kingdom | +|Uzbekistan | Vietnam | +|Zimbabwe | | + +1. Sign in to the Azure portal using the email address that has an owner or a contributor role on the billing account for an MCA or an account administrator role for a MOSP billing account. +1. Search for **Cost Management + Billing**. + ![Screenshot that shows where to search for Cost Management + Billing.](./media/manage-tax-information/search-cmb.png) +1. In the left menu under **Settings**, select **Properties**. +1. Select **Manage Tax IDs**. + :::image type="content" source="./media/manage-tax-information/update-tax-id.png" alt-text="Screenshot showing where to update the Tax I D." lightbox="./media/manage-tax-information/update-tax-id.png" ::: +1. Enter new tax IDs and then select **Save**. + > [!NOTE] + > If you don't see the Tax IDs section, Tax IDs are not yet collected for your region. Or, updating Tax IDs in the Azure portal isn't supported for your account. + +1 Follow the instructions in the next section to add your Goods and Services Taxpayer Identification Number (GSTIN). + +## Add your GSTIN for billing accounts in India + +1. Sign in to the Azure portal using the email address that has account administrator permission on the account. +1. Search for **Subscriptions**. +1. Select a subscription from the list. +1. In the left menu, select **Properties**. +1. Select **Update Address**. + :::image type="content" source="./media/manage-tax-information/update-address-india.png" alt-text="Screenshot that shows where to update the tax I D." lightbox="./media/manage-tax-information/update-address-india.png" ::: +1. Enter the new GSTIN and then select **Save**. + :::image type="content" source="./media/manage-tax-information/update-tax-id-india.png" alt-text="Screenshot that shows where to update the G S T I N." lightbox="./media/manage-tax-information/update-tax-id-india.png" ::: + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [View your billing accounts](view-all-accounts.md) diff --git a/articles/cost-management-billing/manage/media/add-change-subscription-administrator/add-role.png b/articles/cost-management-billing/manage/media/add-change-subscription-administrator/add-role.png deleted file mode 100644 index 6fd6525a5e53c..0000000000000 Binary files a/articles/cost-management-billing/manage/media/add-change-subscription-administrator/add-role.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/add-change-subscription-administrator/role-assignments.png b/articles/cost-management-billing/manage/media/add-change-subscription-administrator/role-assignments.png deleted file mode 100644 index 85e95083eaf2d..0000000000000 Binary files a/articles/cost-management-billing/manage/media/add-change-subscription-administrator/role-assignments.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/billing-subscription-transfer/billing-select-subscription-to-transfer.png b/articles/cost-management-billing/manage/media/billing-subscription-transfer/billing-select-subscription-to-transfer.png deleted file mode 100644 index 4ed56afd64563..0000000000000 Binary files a/articles/cost-management-billing/manage/media/billing-subscription-transfer/billing-select-subscription-to-transfer.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/billing-search-subscriptions.png b/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/billing-search-subscriptions.png deleted file mode 100644 index 186f121ec32fa..0000000000000 Binary files a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/billing-search-subscriptions.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/free-services-usage-csv.png b/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/free-services-usage-csv.png deleted file mode 100644 index b0e211d311eb2..0000000000000 Binary files a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/free-services-usage-csv.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/select-free-account-subscription.png b/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/select-free-account-subscription.png deleted file mode 100644 index 8f5c4066143d0..0000000000000 Binary files a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/select-free-account-subscription.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-cost-information.png b/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-cost-information.png deleted file mode 100644 index 9d815b3525f96..0000000000000 Binary files a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-cost-information.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-essential-information.png b/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-essential-information.png deleted file mode 100644 index 46649bf860ad1..0000000000000 Binary files a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-essential-information.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-usage-free-services.png b/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-usage-free-services.png deleted file mode 100644 index b659747d55b06..0000000000000 Binary files a/articles/cost-management-billing/manage/media/cancel-azure-subscription/check-free-service-usage/subscription-usage-free-services.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/change-azure-account-profile/profile.png b/articles/cost-management-billing/manage/media/change-azure-account-profile/profile.png deleted file mode 100644 index 79680c30dad49..0000000000000 Binary files a/articles/cost-management-billing/manage/media/change-azure-account-profile/profile.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/change-credit-card/billing-profile-payment-methods.png b/articles/cost-management-billing/manage/media/change-credit-card/billing-profile-payment-methods.png deleted file mode 100644 index a4ec437e85366..0000000000000 Binary files a/articles/cost-management-billing/manage/media/change-credit-card/billing-profile-payment-methods.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/change-credit-card/billing-profile.png b/articles/cost-management-billing/manage/media/change-credit-card/billing-profile.png deleted file mode 100644 index be67a9ef4aedd..0000000000000 Binary files a/articles/cost-management-billing/manage/media/change-credit-card/billing-profile.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/change-credit-card/customer-led-switch-credit-card.png b/articles/cost-management-billing/manage/media/change-credit-card/customer-led-switch-credit-card.png deleted file mode 100644 index d8fd743c96fda..0000000000000 Binary files a/articles/cost-management-billing/manage/media/change-credit-card/customer-led-switch-credit-card.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/change-credit-card/sub-add-new-x.png b/articles/cost-management-billing/manage/media/change-credit-card/sub-add-new-x.png deleted file mode 100644 index dfcd1e8f9c352..0000000000000 Binary files a/articles/cost-management-billing/manage/media/change-credit-card/sub-add-new-x.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/change-credit-card/sub-change-active-x.png b/articles/cost-management-billing/manage/media/change-credit-card/sub-change-active-x.png deleted file mode 100644 index 0e49506addc41..0000000000000 Binary files a/articles/cost-management-billing/manage/media/change-credit-card/sub-change-active-x.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/check-free-service-usage/subscription-usage-free-services.png b/articles/cost-management-billing/manage/media/check-free-service-usage/subscription-usage-free-services.png deleted file mode 100644 index b659747d55b06..0000000000000 Binary files a/articles/cost-management-billing/manage/media/check-free-service-usage/subscription-usage-free-services.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/create-customer-subscription/all-billing-subscriptions-add.png b/articles/cost-management-billing/manage/media/create-customer-subscription/all-billing-subscriptions-add.png new file mode 100644 index 0000000000000..b73b7e98c95b2 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-customer-subscription/all-billing-subscriptions-add.png differ diff --git a/articles/cost-management-billing/manage/media/create-customer-subscription/create-customer-subscription-basics-tab.png b/articles/cost-management-billing/manage/media/create-customer-subscription/create-customer-subscription-basics-tab.png new file mode 100644 index 0000000000000..cd57daccf9174 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-customer-subscription/create-customer-subscription-basics-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-customer-subscription/customers-list.png b/articles/cost-management-billing/manage/media/create-customer-subscription/customers-list.png new file mode 100644 index 0000000000000..b968faeffbdff Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-customer-subscription/customers-list.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-advanced-tab.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-advanced-tab.png new file mode 100644 index 0000000000000..85aff05958356 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-advanced-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png new file mode 100644 index 0000000000000..23f8ec0d72803 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-tags-tab.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-tags-tab.png new file mode 100644 index 0000000000000..4967c06ea19e1 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-tags-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/subscription-add.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/subscription-add.png new file mode 100644 index 0000000000000..109b8445b07e8 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/subscription-add.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/accept-subscription-ownership-email.png b/articles/cost-management-billing/manage/media/create-subscription-request/accept-subscription-ownership-email.png new file mode 100644 index 0000000000000..95d91fc48eb78 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/accept-subscription-ownership-email.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-advanced-tab-external.png b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-advanced-tab-external.png new file mode 100644 index 0000000000000..ee04fe39962ee Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-advanced-tab-external.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-basics-tab.png b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-basics-tab.png new file mode 100644 index 0000000000000..fd2928db34eaf Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-basics-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-tags-tab.png b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-tags-tab.png new file mode 100644 index 0000000000000..4967c06ea19e1 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-tags-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/subscription-add.png b/articles/cost-management-billing/manage/media/create-subscription-request/subscription-add.png new file mode 100644 index 0000000000000..109b8445b07e8 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/subscription-add.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/view-requests-accept-url.png b/articles/cost-management-billing/manage/media/create-subscription-request/view-requests-accept-url.png new file mode 100644 index 0000000000000..21a2868075fa3 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/view-requests-accept-url.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/billing-search-subscription-portal.png b/articles/cost-management-billing/manage/media/create-subscription/billing-search-subscription-portal.png deleted file mode 100644 index d3560f3fbee24..0000000000000 Binary files a/articles/cost-management-billing/manage/media/create-subscription/billing-search-subscription-portal.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/create-subscription-advanced-tab.png b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-advanced-tab.png new file mode 100644 index 0000000000000..85aff05958356 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-advanced-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/create-subscription-basics-tab.png b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-basics-tab.png new file mode 100644 index 0000000000000..fd2928db34eaf Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-basics-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/create-subscription-tags-tab.png b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-tags-tab.png new file mode 100644 index 0000000000000..4967c06ea19e1 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-tags-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png b/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png index 27658ca946c4c..109b8445b07e8 100644 Binary files a/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png and b/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png differ diff --git a/articles/cost-management-billing/manage/media/ea-transfers/cross-currency-reservation-transfer-effects.png b/articles/cost-management-billing/manage/media/ea-transfers/cross-currency-reservation-transfer-effects.png new file mode 100644 index 0000000000000..51d106d8869d8 Binary files /dev/null and b/articles/cost-management-billing/manage/media/ea-transfers/cross-currency-reservation-transfer-effects.png differ diff --git a/articles/cost-management-billing/manage/media/manage-billing-access/billing-click-add-role-assignment.png b/articles/cost-management-billing/manage/media/manage-billing-access/billing-click-add-role-assignment.png deleted file mode 100644 index 393f44152cd9f..0000000000000 Binary files a/articles/cost-management-billing/manage/media/manage-billing-access/billing-click-add-role-assignment.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/manage-billing-access/billing-save-role-assignment.png b/articles/cost-management-billing/manage/media/manage-billing-access/billing-save-role-assignment.png deleted file mode 100644 index 3524e000a28be..0000000000000 Binary files a/articles/cost-management-billing/manage/media/manage-billing-access/billing-save-role-assignment.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-section-invoice/billing-search-cost-management-billing.png b/articles/cost-management-billing/manage/media/manage-tax-information/search-cmb.png similarity index 100% rename from articles/cost-management-billing/manage/media/mca-section-invoice/billing-search-cost-management-billing.png rename to articles/cost-management-billing/manage/media/manage-tax-information/search-cmb.png diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/search-subscriptions.png b/articles/cost-management-billing/manage/media/manage-tax-information/search-subscriptions.png new file mode 100644 index 0000000000000..f60fdba422cc7 Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/search-subscriptions.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/select-billing-profile.png b/articles/cost-management-billing/manage/media/manage-tax-information/select-billing-profile.png new file mode 100644 index 0000000000000..3b54a998dd69e Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/select-billing-profile.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/update-address-india.png b/articles/cost-management-billing/manage/media/manage-tax-information/update-address-india.png new file mode 100644 index 0000000000000..5fb66ce9a8f72 Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/update-address-india.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/update-ship-to-01.png b/articles/cost-management-billing/manage/media/manage-tax-information/update-ship-to-01.png new file mode 100644 index 0000000000000..e1f4b392fe05f Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/update-ship-to-01.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/update-ship-to-02.png b/articles/cost-management-billing/manage/media/manage-tax-information/update-ship-to-02.png new file mode 100644 index 0000000000000..0d2b29c02f140 Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/update-ship-to-02.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/update-sold-to.png b/articles/cost-management-billing/manage/media/manage-tax-information/update-sold-to.png new file mode 100644 index 0000000000000..d4f22eb450db6 Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/update-sold-to.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/update-tax-id-india.png b/articles/cost-management-billing/manage/media/manage-tax-information/update-tax-id-india.png new file mode 100644 index 0000000000000..8ddb223a36261 Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/update-tax-id-india.png differ diff --git a/articles/cost-management-billing/manage/media/manage-tax-information/update-tax-id.png b/articles/cost-management-billing/manage/media/manage-tax-information/update-tax-id.png new file mode 100644 index 0000000000000..03e75683e5149 Binary files /dev/null and b/articles/cost-management-billing/manage/media/manage-tax-information/update-tax-id.png differ diff --git a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/billing-mca-credits-list.png b/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/billing-mca-credits-list.png deleted file mode 100644 index cc4f3c253ad3d..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/billing-mca-credits-list.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/billing-mca-credits-overview.png b/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/billing-mca-credits-overview.png deleted file mode 100644 index 3b6e64794139b..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/billing-mca-credits-overview.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/mca-account-properties.png b/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/mca-account-properties.png deleted file mode 100644 index 658fe1d6af67e..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/mca-account-properties.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/mca-credit-balance-multiple-profiles.png b/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/mca-credit-balance-multiple-profiles.png deleted file mode 100644 index 77bfb93fbecc4..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-check-azure-credits-balance/mca-credit-balance-multiple-profiles.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/list-of-scopes.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/list-of-scopes.png deleted file mode 100644 index d5fd90d86b8f6..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/list-of-scopes.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-invoice-sections.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-invoice-sections.png deleted file mode 100644 index 4db859cd7bdd8..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-invoice-sections.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-profiles.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-profiles.png deleted file mode 100644 index 0f030e52eb42c..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-profiles.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-transfer-requests-for-status.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-transfer-requests-for-status.png deleted file mode 100644 index 32247f59b793b..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-transfer-requests-for-status.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-transfer-requests.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-transfer-requests.png deleted file mode 100644 index de61b7947a275..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-select-transfer-requests.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-send-transfer-requests.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-send-transfer-requests.png deleted file mode 100644 index bf3781b573f09..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-send-transfer-requests.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-transfer-completed.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-transfer-completed.png deleted file mode 100644 index 92f0ebf6b13b4..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/mca-transfer-completed.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/review-transfer-requests.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/review-transfer-requests.png deleted file mode 100644 index b861d31ae5686..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/review-transfer-requests.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/transfer-request-sent-pending.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/transfer-request-sent-pending.png deleted file mode 100644 index 2da569bd293ae..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/transfer-request-sent-pending.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/transfer-requests-new-request.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/transfer-requests-new-request.png deleted file mode 100644 index f890fb48c6ed0..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/transfer-requests-new-request.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/validate-transfer-request.png b/articles/cost-management-billing/manage/media/mca-request-billing-ownership/validate-transfer-request.png deleted file mode 100644 index fa17c58eada91..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-request-billing-ownership/validate-transfer-request.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-section-invoice/mca-select-invoice-sections.png b/articles/cost-management-billing/manage/media/mca-section-invoice/mca-select-invoice-sections.png deleted file mode 100644 index 3be4e56ff2090..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-section-invoice/mca-select-invoice-sections.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mca-section-invoice/search-subscriptions-zoomed-in.png b/articles/cost-management-billing/manage/media/mca-section-invoice/search-subscriptions-zoomed-in.png deleted file mode 100644 index ccaa1d20c9dde..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mca-section-invoice/search-subscriptions-zoomed-in.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/microsoft-customer-agreement-setup-account/ea-microsoft-customer-agreement-invite-admins.png b/articles/cost-management-billing/manage/media/microsoft-customer-agreement-setup-account/ea-microsoft-customer-agreement-invite-admins.png deleted file mode 100644 index b61a2b718db0a..0000000000000 Binary files a/articles/cost-management-billing/manage/media/microsoft-customer-agreement-setup-account/ea-microsoft-customer-agreement-invite-admins.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mpa-request-ownership/mpa-review-transfer-request.png b/articles/cost-management-billing/manage/media/mpa-request-ownership/mpa-review-transfer-request.png deleted file mode 100644 index 3280b5e22ba33..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mpa-request-ownership/mpa-review-transfer-request.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mpa-request-ownership/review-transfer-requests.png b/articles/cost-management-billing/manage/media/mpa-request-ownership/review-transfer-requests.png deleted file mode 100644 index 758b735295f2c..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mpa-request-ownership/review-transfer-requests.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/mpa-request-ownership/validate-transfer-request.png b/articles/cost-management-billing/manage/media/mpa-request-ownership/validate-transfer-request.png deleted file mode 100644 index 662027ccced6f..0000000000000 Binary files a/articles/cost-management-billing/manage/media/mpa-request-ownership/validate-transfer-request.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/pay-by-invoice/pay-by-invoice.png b/articles/cost-management-billing/manage/media/pay-by-invoice/pay-by-invoice.png deleted file mode 100644 index b303e925963d8..0000000000000 Binary files a/articles/cost-management-billing/manage/media/pay-by-invoice/pay-by-invoice.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/resolve-past-due-balance/settle-balance-entry-point.png b/articles/cost-management-billing/manage/media/resolve-past-due-balance/settle-balance-entry-point.png deleted file mode 100644 index 074017427c106..0000000000000 Binary files a/articles/cost-management-billing/manage/media/resolve-past-due-balance/settle-balance-entry-point.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/resolve-past-due-balance/settle-balance-screen.png b/articles/cost-management-billing/manage/media/resolve-past-due-balance/settle-balance-screen.png deleted file mode 100644 index 6bdba42f4b04a..0000000000000 Binary files a/articles/cost-management-billing/manage/media/resolve-past-due-balance/settle-balance-screen.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/troubleshoot-azure-sign-up/1.png b/articles/cost-management-billing/manage/media/troubleshoot-azure-sign-up/1.png deleted file mode 100644 index c1515fb97ac7c..0000000000000 Binary files a/articles/cost-management-billing/manage/media/troubleshoot-azure-sign-up/1.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/pay-by-invoice.md b/articles/cost-management-billing/manage/pay-by-invoice.md index 3d69d0667468e..94ecdb1aa1080 100644 --- a/articles/cost-management-billing/manage/pay-by-invoice.md +++ b/articles/cost-management-billing/manage/pay-by-invoice.md @@ -7,14 +7,14 @@ tags: billing ms.service: cost-management-billing ms.subservice: billing ms.topic: how-to -ms.date: 04/25/2022 +ms.date: 05/24/2022 ms.author: banders ms.custom: contperf-fy21q2 --- # Pay for your Azure subscription by check or wire transfer -This article applies to customers with a Microsoft Customer Agreement (MCA) and to customers who signed up for Azure through the Azure website (for an Microsoft Online Services Program account also called pay-as-you-go account). If you signed up for Azure through a Microsoft representative, then your default payment method is already be set to *check or wire transfer*. +This article applies to customers with a Microsoft Customer Agreement (MCA) and to customers who signed up for Azure through the Azure website (for a Microsoft Online Services Program account also called pay-as-you-go account). If you signed up for Azure through a Microsoft representative, then your default payment method is already be set to *check or wire transfer*. If you switch to pay by check or wire transfer, that means you pay your bill within 30 days of the invoice date by check/wire transfer. @@ -79,6 +79,10 @@ If you're not automatically approved, you can submit a request to Azure support - Contact Phone: - Contact Email: - Justification about why you want the check or wire transfer payment option instead of a credit card: + - File upload: Attach legal documentation showing the legal company name and company address. Your information in the Azure portal should match the legal information registered in the legal document. You can provide one of the following examples: + - A certificate of incorporation signed by the company’s legal representatives. + - Any government-issued documents having the company name and address. For example, a tax certification. + - Company registration form signed and issued by the government. - For cores increase, provide the following additional information: - (Old quota) Existing Cores: - (New quota) Requested cores: diff --git a/articles/cost-management-billing/manage/programmatically-create-subscription-enterprise-agreement.md b/articles/cost-management-billing/manage/programmatically-create-subscription-enterprise-agreement.md index 64ba8f4a0d795..8fba07bad01e2 100644 --- a/articles/cost-management-billing/manage/programmatically-create-subscription-enterprise-agreement.md +++ b/articles/cost-management-billing/manage/programmatically-create-subscription-enterprise-agreement.md @@ -5,8 +5,8 @@ author: bandersmsft ms.service: cost-management-billing ms.subservice: billing ms.topic: how-to -ms.date: 05/05/2022 -ms.reviewer: andalmia +ms.date: 06/06/2022 +ms.reviewer: sapnakeshari ms.author: banders ms.custom: devx-track-azurepowershell, devx-track-azurecli --- @@ -26,7 +26,7 @@ When you create an Azure subscription programmatically, that subscription is gov A user must have an Owner role on an Enrollment Account to create a subscription. There are two ways to get the role: * The Enterprise Administrator of your enrollment can [make you an Account Owner](https://ea.azure.com/helpdocs/addNewAccount) (sign in required) which makes you an Owner of the Enrollment Account. -* An existing Owner of the Enrollment Account can [grant you access](/rest/api/billing/2019-10-01-preview/enrollmentaccountroleassignments/put). +* An existing Owner of the Enrollment Account can [grant you access](/rest/api/billing/2019-10-01-preview/enrollmentaccountroleassignments/put). To use a service principal (SPN) to create an EA subscription, an Owner of the Enrollment Account must [grant that service principal the ability to create subscriptions](/rest/api/billing/2019-10-01-preview/enrollmentaccountroleassignments/put). @@ -37,6 +37,7 @@ For more information about the EA role assignment API request, see [Assign roles > [!NOTE] > - Ensure that you use the correct API version to give the enrollment account owner permissions. For this article and for the APIs documented in it, use the [2019-10-01-preview](/rest/api/billing/2019-10-01-preview/enrollmentaccountroleassignments/put) API. > - If you're migrating to use the newer APIs, your previous configuration made with the [2015-07-01 version](grant-access-to-create-subscription.md) doesn't automatically convert for use with the newer APIs. +> - The Enrollment Account information is only visible when the user's role is Account Owner. When a user has multiple roles, the API uses the user's least restrictive role. ## Find accounts you have access to @@ -175,6 +176,8 @@ Using one of the following methods, you'll create a subscription alias name. We An alias is used for simple substitution of a user-defined string instead of the subscription GUID. In other words, you can use it as a shortcut. You can learn more about alias at [Alias - Create](/rest/api/subscription/2020-09-01/alias/create). In the following examples, `sampleAlias` is created but you can use any string you like. +If you have multiple user roles in addition to the Account Owner role, then you must retrieve the account ID from the Azure portal. Then you can use the ID to programmatically create subscriptions. + ### [REST](#tab/rest) Call the PUT API to create a subscription creation request/alias. diff --git a/articles/cost-management-billing/reservations/media/prepare-buy-reservation/prepurchase-schedule.png b/articles/cost-management-billing/reservations/media/prepare-buy-reservation/prepurchase-schedule.png deleted file mode 100644 index 84eda821ea635..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/prepare-buy-reservation/prepurchase-schedule.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/prepare-buy-reservation/purchase-reservation.png b/articles/cost-management-billing/reservations/media/prepare-buy-reservation/purchase-reservation.png deleted file mode 100644 index 128094574a345..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/prepare-buy-reservation/purchase-reservation.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/prepare-buy-reservation/select-product-to-purchase.png b/articles/cost-management-billing/reservations/media/prepare-buy-reservation/select-product-to-purchase.png deleted file mode 100644 index 4bbd590aacb7c..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/prepare-buy-reservation/select-product-to-purchase.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/reserved-instance-purchase-recommendations/peak-savings.png b/articles/cost-management-billing/reservations/media/reserved-instance-purchase-recommendations/peak-savings.png deleted file mode 100644 index 7800f9e7802a4..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/reserved-instance-purchase-recommendations/peak-savings.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/troubleshoot-no-eligible-subscriptions/no-owner-access.png b/articles/cost-management-billing/reservations/media/troubleshoot-no-eligible-subscriptions/no-owner-access.png deleted file mode 100644 index ec7b671f47e55..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/troubleshoot-no-eligible-subscriptions/no-owner-access.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/view-reservations/portal-billing-reservation-transaction-results.png b/articles/cost-management-billing/reservations/media/view-reservations/portal-billing-reservation-transaction-results.png deleted file mode 100644 index 7e9db2f1c5a5b..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/view-reservations/portal-billing-reservation-transaction-results.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/view-reservations/portal-cm-billing-search.png b/articles/cost-management-billing/reservations/media/view-reservations/portal-cm-billing-search.png deleted file mode 100644 index 3a14c31018659..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/view-reservations/portal-cm-billing-search.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/view-reservations/portal-reservation-search.png b/articles/cost-management-billing/reservations/media/view-reservations/portal-reservation-search.png deleted file mode 100644 index a8e7f432278d3..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/view-reservations/portal-reservation-search.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/view-reservations/reservation-utilization.png b/articles/cost-management-billing/reservations/media/view-reservations/reservation-utilization.png deleted file mode 100644 index 50f5478e95828..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/view-reservations/reservation-utilization.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/media/view-reservations/view-reservations.png b/articles/cost-management-billing/reservations/media/view-reservations/view-reservations.png deleted file mode 100644 index b2014a39f9037..0000000000000 Binary files a/articles/cost-management-billing/reservations/media/view-reservations/view-reservations.png and /dev/null differ diff --git a/articles/cost-management-billing/reservations/prepare-buy-reservation.md b/articles/cost-management-billing/reservations/prepare-buy-reservation.md index b8517dbfde679..43e90b41553b9 100644 --- a/articles/cost-management-billing/reservations/prepare-buy-reservation.md +++ b/articles/cost-management-billing/reservations/prepare-buy-reservation.md @@ -30,7 +30,7 @@ You can scope a reservation to a subscription or resource groups. Setting the sc ### Reservation scoping options -You have three options to scope a reservation, depending on your needs: +You have four options to scope a reservation, depending on your needs: - **Single resource group scope** — Applies the reservation discount to the matching resources in the selected resource group only. - **Single subscription scope** — Applies the reservation discount to the matching resources in the selected subscription. diff --git a/articles/cost-management-billing/reservations/reservation-amortization.md b/articles/cost-management-billing/reservations/reservation-amortization.md index 0f5f4063d6025..5f984be060282 100644 --- a/articles/cost-management-billing/reservations/reservation-amortization.md +++ b/articles/cost-management-billing/reservations/reservation-amortization.md @@ -1,6 +1,6 @@ --- title: View amortized reservation costs -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand what amortized reservation costs are and how to view them in cost analysis. author: bandersmsft ms.reviewer: primittal diff --git a/articles/cost-management-billing/reservations/reservation-renew.md b/articles/cost-management-billing/reservations/reservation-renew.md index 58896c48c15d4..01ef55972ff9c 100644 --- a/articles/cost-management-billing/reservations/reservation-renew.md +++ b/articles/cost-management-billing/reservations/reservation-renew.md @@ -43,7 +43,7 @@ The following conditions are required to renew a reservation: ## Default renewal settings -By default, the renewal inherits all properties from the expiring reservation. A reservation renewal purchase has the same SKU, region, scope, billing subscription, term, and quantity. +By default, the renewal inherits all properties except automatic renewal setting from the expiring reservation. A reservation renewal purchase has the same SKU, region, scope, billing subscription, term, and quantity. However, you can update the renewal reservation purchase quantity to optimize your savings. diff --git a/articles/cost-management-billing/reservations/understand-azure-data-explorer-reservation-charges.md b/articles/cost-management-billing/reservations/understand-azure-data-explorer-reservation-charges.md index 38d8f5e7febf2..5aa9a689464b1 100644 --- a/articles/cost-management-billing/reservations/understand-azure-data-explorer-reservation-charges.md +++ b/articles/cost-management-billing/reservations/understand-azure-data-explorer-reservation-charges.md @@ -1,25 +1,26 @@ --- -title: Understand how the reservation discount is applied to Azure Data Explorer +title: Reservation discount for Azure Data Explorer description: Learn how the reservation discount is applied to Azure Data Explorer markup meter. author: avneraa ms.author: avnera ms.reviewer: orspodek ms.service: data-explorer ms.topic: conceptual -ms.date: 09/15/2021 +ms.date: 05/31/2022 +ms.custom: kr2b-contr-experiment --- -# Understand how the reservation discount is applied to Azure Data Explorer +# How the reservation discount is applied to Azure Data Explorer After you buy an Azure Data Explorer reserved capacity, the reservation discount is automatically applied to Azure Data Explorer resources that match the attributes and quantity of the reservation. A reservation includes the Azure Data Explorer markup charges. It doesn't include compute, networking, storage, or any other Azure resource used to operate Azure Data Explorer cluster. Reservations for these resources should be bought separately. -## How reservation discount is applied +## Reservation discount usage -A reservation discount is on a "*use-it-or-lose-it*" basis. So, if you don't have matching resources for any hour, then you lose a reservation quantity for that hour. You can't carry forward unused reserved hours. +A reservation discount is on a "*use-it-or-lose-it*" basis. So, if you don't have matching resources for any hour, then you lose a reservation quantity for that hour. You can't carry forward discounts for unused reserved hours. When you shut down a resource, the reservation discount automatically applies to another matching resource in the specified scope. If no matching resources are found in the specified scope, then the reserved hours are *lost*. -## Reservation discount applied to Azure Data Explorer clusters +## Discount for other resources A reservation discount is applied to Azure Data Explorer markup consumption on an hour-by-hour basis. For Azure Data Explorer resources that don't run the full hour, the reservation discount is automatically applied to other Data Explorer resources that match the reservation attributes. The discount can apply to Azure Data Explorer resources that are running concurrently. If you don't have Azure Data Explorer resources that run for the full hour and that match the reservation attributes, you don't get the full benefit of the reservation discount for that hour. @@ -61,8 +62,8 @@ If you have questions or need help, [create a support request](https://go.micros To learn more about Azure reservations, see the following articles: * [Prepay for Azure Data Explorer compute resources with Azure Azure Data Explorer reserved capacity](/azure/data-explorer/pricing-reserved-capacity) -* [What are reservations for Azure](save-compute-costs-reservations.md) +* [What are reservations for Azure?](save-compute-costs-reservations.md) * [Manage Azure reservations](manage-reserved-vm-instance.md) -* [Understand reservation usage for your Pay-As-You-Go subscription](understand-reserved-instance-usage.md) +* [Understand reservation usage for your pay-as-you-go subscription](understand-reserved-instance-usage.md) * [Understand reservation usage for your Enterprise enrollment](understand-reserved-instance-usage-ea.md) * [Understand reservation usage for CSP subscriptions](/partner-center/azure-reservations) \ No newline at end of file diff --git a/articles/cost-management-billing/reservations/understand-reserved-instance-usage-ea.md b/articles/cost-management-billing/reservations/understand-reserved-instance-usage-ea.md index 4bcaf5412fd02..24790fb9b3229 100644 --- a/articles/cost-management-billing/reservations/understand-reserved-instance-usage-ea.md +++ b/articles/cost-management-billing/reservations/understand-reserved-instance-usage-ea.md @@ -7,7 +7,7 @@ tags: billing ms.service: cost-management-billing ms.subservice: reservations ms.topic: conceptual -ms.date: 05/05/2022 +ms.date: 06/07/2022 ms.author: banders --- @@ -50,12 +50,14 @@ Other information available in Azure usage data has changed: - Term - 12 months or 36 months. - RINormalizationRatio - Available under AdditionalInfo. This is the ratio where the reservation is applied to the usage record. If instance size flexibility is enabled on for your reservation, then it can apply to other sizes. The value shows the ratio that the reservation was applied to for the usage record. -[See field definition](/rest/api/consumption/usagedetails/list#definitions) +For more information, see the Usage details field [Definitions](/rest/api/consumption/usagedetails/list#definitions). ## Get Azure consumption and reservation usage data using API You can get the data using the API or download it from Azure portal. +For information about permissions needed to view and manage reservations, see [Who can manage a reservation by default](view-reservations.md#who-can-manage-a-reservation-by-default). + You call the [Usage Details API](/rest/api/consumption/usagedetails/list) to get the new data. For details about terminology, see [usage terms](../understand/understand-usage.md). Here's an example call to the Usage Details API: diff --git a/articles/cost-management-billing/toc.yml b/articles/cost-management-billing/toc.yml index d0b042461c53e..b81b74f0ba394 100644 --- a/articles/cost-management-billing/toc.yml +++ b/articles/cost-management-billing/toc.yml @@ -154,6 +154,8 @@ href: manage/account-admin-tasks.md - name: Edit billing contact information href: manage/change-azure-account-profile.md + - name: Update tax details + href: manage/manage-tax-information.md - name: Grant access to billing displayName: azure billing administrator, billing administrator azure, billing admin, add billing admin, azure billing administrator role href: manage/manage-billing-access.md @@ -193,8 +195,14 @@ href: manage/azurestudents-subscription-disabled.md - name: Set up and configure AWS integration href: costs/aws-integration-set-up-configure.md - - name: Create additional subscriptions + - name: Create an MCA subscription href: manage/create-subscription.md + - name: Create an MCA subscription request + href: manage/create-subscription-request.md + - name: Create an EA subscription + href: manage/create-enterprise-subscription.md + - name: Create a subscription for a partner's customer + href: manage/create-customer-subscription.md - name: Grant access to create EA subscriptions href: manage/grant-access-to-create-subscription.md - name: Change administrator diff --git a/articles/cost-management-billing/understand/analyze-unexpected-charges.md b/articles/cost-management-billing/understand/analyze-unexpected-charges.md index c4c4f7dfea07c..5e66f23b554ce 100644 --- a/articles/cost-management-billing/understand/analyze-unexpected-charges.md +++ b/articles/cost-management-billing/understand/analyze-unexpected-charges.md @@ -1,13 +1,13 @@ --- title: Identify anomalies and unexpected changes in cost -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: Learn how to identify anomalies and unexpected changes in cost. author: bandersmsft ms.reviewer: micflan ms.service: cost-management-billing ms.subservice: cost-management ms.topic: conceptual -ms.date: 04/02/2022 +ms.date: 05/31/2022 ms.author: banders ms.custom: contperf-fy21q1 --- @@ -16,6 +16,8 @@ ms.custom: contperf-fy21q1 The article helps you identify anomalies and unexpected changes in your cloud costs using Cost Management and Billing. You'll start with anomaly detection for subscriptions in cost analysis to identify any atypical usage patterns based on your cost and usage trends. You'll then learn how to drill into cost information to find and investigate cost spikes and dips. +You can also create an anomaly alert to automatically get notified when an anomaly is detected. + In general, there are three types of changes that you might want to investigate: - New costs—For example, a resource that was started or added such as a virtual machine. New costs often appear as a cost starting from zero. @@ -50,6 +52,8 @@ Continuing from the previous example of the anomaly labeled **Daily run rate dow Cost anomalies are evaluated for subscriptions daily and compare the day's total cost to a forecasted total based on the last 60 days to account for common patterns in your recent usage. For example, spikes every Monday. Anomaly detection runs 36 hours after the end of the day (UTC) to ensure a complete data set is available. +The anomaly detection model is a univariate time-series, unsupervised prediction and reconstruction-based model that uses 60 days of historical usage for training, then forecasts expected usage for the day. Anomaly detection forecasting uses a deep learning algorithm called [WaveNet](https://www.deepmind.com/blog/wavenet-a-generative-model-for-raw-audio). Note this is different than the Cost Management forecast. The total normalized usage is determined to be anomalous if it falls outside the expected range based on a predetermined confidence interval. + Anomaly detection is available to every subscription monitored using the cost analysis preview. To enable anomaly detection for your subscriptions, open the cost analysis preview and select your subscription from the scope selector at the top of the page. You'll see a notification informing you that your subscription is onboarded and you'll start to see your anomaly detection status within 24 hours. ## Manually find unexpected cost changes @@ -106,6 +110,22 @@ If you have an existing policy of [tagging resources](../costs/cost-mgt-best-pra If you've used the preceding strategies and you still don't understand why you received a charge or if you need other help with billing issues, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). +## Create an anomaly alert + +You can create an anomaly alert to automatically get notified when an anomaly is detected. All email recipients get notified when a subscription cost anomaly is detected. + +An anomaly alert email includes a summary of changes in resource group count and cost. It also includes the top resource group changes for the day compared to the previous 60 days. And, it has a direct link to the Azure portal so that you can review the cost and investigate further. + +1. Start on a subscription scope. +1. In the left menu, select **Cost alerts**. +1. On the Cost alerts page, select **+ Add** > **Add anomaly alert**. +1. On the Subscribe to emails page, enter required information and then select **Save**. + :::image type="content" source="./media/analyze-unexpected-charges/subscribe-emails.png" alt-text="Screenshot showing the Subscribe to emails page where you enter notification information for an alert." lightbox="./media/analyze-unexpected-charges/subscribe-emails.png" ::: + +Here's an example email generated for an anomaly alert. + +:::image type="content" source="./media/analyze-unexpected-charges/anomaly-alert-email.png" alt-text="Screenshot showing an example anomaly alert email." lightbox="./media/analyze-unexpected-charges/anomaly-alert-email.png" ::: + ## Next steps -- Learn about how to [Optimize your cloud investment with Cost Management](../costs/cost-mgt-best-practices.md). \ No newline at end of file +- Learn about how to [Optimize your cloud investment with Cost Management](../costs/cost-mgt-best-practices.md). diff --git a/articles/cost-management-billing/understand/download-azure-invoice.md b/articles/cost-management-billing/understand/download-azure-invoice.md index 0e48aa7af0f95..39c58e0d8d612 100644 --- a/articles/cost-management-billing/understand/download-azure-invoice.md +++ b/articles/cost-management-billing/understand/download-azure-invoice.md @@ -8,17 +8,19 @@ tags: billing ms.service: cost-management-billing ms.subservice: billing ms.topic: conceptual -ms.date: 04/26/2022 +ms.date: 06/08/2022 ms.author: banders --- # View and download your Microsoft Azure invoice -You can download your invoice in the [Azure portal](https://portal.azure.com/) or have it sent in email. If you're an Azure customer with an Enterprise Agreement (EA customer), you can't download your organization's invoice. Instead, invoices are sent to the person set to receive invoices for the enrollment. +You can download your invoice in the [Azure portal](https://portal.azure.com/) or have it sent in email. Invoices are sent to the person set to receive invoices for the enrollment. -## When invoices are generated +If you're an Azure customer with an Enterprise Agreement (EA customer), only an EA administrator can download and view your organization's invoice. Direct EA administrators can [Download or view their Azure billing invoice](../manage/direct-ea-azure-usage-charges-invoices.md#download-or-view-your-azure-billing-invoice). Indirect EA administrators can use the information at [Azure Enterprise enrollment invoices](../manage/ea-portal-enrollment-invoices.md) to download their invoice. -An invoice is generated based on your billing account type. Invoices are created for Microsoft Online Service Program (MOSP) also called pay-as-you-go, Microsoft Customer Agreement (MCA), and Microsoft Partner Agreement (MPA) billing accounts. Invoices are also generated for Enterprise Agreement (EA) billing accounts. However, invoices for EA billing accounts aren't shown in the Azure portal. +## Where invoices are generated + +An invoice is generated based on your billing account type. Invoices are created for Microsoft Online Service Program (MOSP) also called pay-as-you-go, Microsoft Customer Agreement (MCA), and Microsoft Partner Agreement (MPA) billing accounts. Invoices are also generated for Enterprise Agreement (EA) billing accounts. To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](../manage/view-all-accounts.md). diff --git a/articles/cost-management-billing/understand/mca-overview.md b/articles/cost-management-billing/understand/mca-overview.md index cfa531879bdbd..a50560cd8a16a 100644 --- a/articles/cost-management-billing/understand/mca-overview.md +++ b/articles/cost-management-billing/understand/mca-overview.md @@ -6,7 +6,7 @@ ms.reviewer: amberbhargava ms.service: cost-management-billing ms.subservice: billing ms.topic: conceptual -ms.date: 09/15/2021 +ms.date: 05/26/2022 ms.author: banders --- @@ -61,7 +61,7 @@ Azure plans determine the pricing and service level agreements for Azure subscri | Plan | Definition | |------------------|-------------| |Microsoft Azure Plan | Allow users to create subscriptions that can run any workloads. | -|Microsoft Azure Plan for Dev/Test | Allow Visual Studio subscribers to create subscriptions that are restricted for development or testing workloads. These subscriptions get benefits such as lower rates and access to exclusive virtual machine images in the Azure portal. | +|Microsoft Azure Plan for Dev/Test | Allow Visual Studio subscribers to create subscriptions that are restricted for development or testing workloads. These subscriptions get benefits such as lower rates and access to exclusive virtual machine images in the Azure portal. Azure Plan for DevTest is only available for Microsoft Customer Agreement customers who purchase through a Microsoft Sales representative. | ## Invoice sections diff --git a/articles/cost-management-billing/understand/media/analyze-unexpected-charges/anomaly-alert-email.png b/articles/cost-management-billing/understand/media/analyze-unexpected-charges/anomaly-alert-email.png new file mode 100644 index 0000000000000..ad42ebd8cb611 Binary files /dev/null and b/articles/cost-management-billing/understand/media/analyze-unexpected-charges/anomaly-alert-email.png differ diff --git a/articles/cost-management-billing/understand/media/analyze-unexpected-charges/subscribe-emails.png b/articles/cost-management-billing/understand/media/analyze-unexpected-charges/subscribe-emails.png new file mode 100644 index 0000000000000..0deaf913907f7 Binary files /dev/null and b/articles/cost-management-billing/understand/media/analyze-unexpected-charges/subscribe-emails.png differ diff --git a/articles/cost-management-billing/understand/media/download-azure-daily-usage/open-usage01.png b/articles/cost-management-billing/understand/media/download-azure-daily-usage/open-usage01.png deleted file mode 100644 index 9696172246313..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-daily-usage/open-usage01.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/cmb-emailinvoice-zoomed-in.png b/articles/cost-management-billing/understand/media/download-azure-invoice/cmb-emailinvoice-zoomed-in.png deleted file mode 100644 index 2f66f38b62d16..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/cmb-emailinvoice-zoomed-in.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/download-invoice-subscription-zoomed.png b/articles/cost-management-billing/understand/media/download-azure-invoice/download-invoice-subscription-zoomed.png deleted file mode 100644 index 771c09c66f80f..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/download-invoice-subscription-zoomed.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/download-invoice-subscription.png b/articles/cost-management-billing/understand/media/download-azure-invoice/download-invoice-subscription.png deleted file mode 100644 index 87585e3dbbd2f..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/download-invoice-subscription.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/downloadinvoice-supportplan.png b/articles/cost-management-billing/understand/media/download-azure-invoice/downloadinvoice-supportplan.png deleted file mode 100644 index 9a2e35b0bce1e..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/downloadinvoice-supportplan.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step02.png b/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step02.png deleted file mode 100644 index eb148739cbb91..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step02.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step03-verify-email.png b/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step03-verify-email.png deleted file mode 100644 index 70f7847e07a4e..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step03-verify-email.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step03.png b/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step03.png deleted file mode 100644 index 474ce9062f8be..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step03.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step04.png b/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step04.png deleted file mode 100644 index bcfccfd1b3886..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/invoice-article-step04.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-download-invoice.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-download-invoice.png deleted file mode 100644 index 6decc2a4406e8..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-download-invoice.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-email-invoice-zoomed.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-email-invoice-zoomed.png deleted file mode 100644 index 6c15911a637c5..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-email-invoice-zoomed.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-invoices-zoomed.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-invoices-zoomed.png deleted file mode 100644 index 34d30a42745db..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-invoices-zoomed.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-invoices.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-invoices.png deleted file mode 100644 index b2cadd4afbe3c..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billing-profile-invoices.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-email-invoice.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-email-invoice.png deleted file mode 100644 index 495debacdf887..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-email-invoice.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-select-emailinvoice-zoomed-in.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-select-emailinvoice-zoomed-in.png deleted file mode 100644 index 9dc582cdd4a28..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-select-emailinvoice-zoomed-in.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-select-emailinvoice.png b/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-select-emailinvoice.png deleted file mode 100644 index cb40949261c09..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/mca-billingprofile-select-emailinvoice.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/download-azure-invoice/usageandinvoice-subscription.png b/articles/cost-management-billing/understand/media/download-azure-invoice/usageandinvoice-subscription.png deleted file mode 100644 index f7f29e83a4f1f..0000000000000 Binary files a/articles/cost-management-billing/understand/media/download-azure-invoice/usageandinvoice-subscription.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/pay-bill/billing-account-overview.png b/articles/cost-management-billing/understand/media/pay-bill/billing-account-overview.png deleted file mode 100644 index dba221307a07c..0000000000000 Binary files a/articles/cost-management-billing/understand/media/pay-bill/billing-account-overview.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/pay-bill/paynow-button-overview.png b/articles/cost-management-billing/understand/media/pay-bill/paynow-button-overview.png deleted file mode 100644 index deb2e94df0a6c..0000000000000 Binary files a/articles/cost-management-billing/understand/media/pay-bill/paynow-button-overview.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/change-payment.png b/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/change-payment.png deleted file mode 100644 index b9cbc3054a98c..0000000000000 Binary files a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/change-payment.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/edit-info.png b/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/edit-info.png deleted file mode 100644 index b865f52d6abe1..0000000000000 Binary files a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/edit-info.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/invoice-tabs.png b/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/invoice-tabs.png deleted file mode 100644 index 542332884dfca..0000000000000 Binary files a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/invoice-tabs.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/select-ext-service.png b/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/select-ext-service.png deleted file mode 100644 index 3e52bf736c37a..0000000000000 Binary files a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/select-ext-service.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/select-marketplace.png b/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/select-marketplace.png deleted file mode 100644 index 91b82b1aec706..0000000000000 Binary files a/articles/cost-management-billing/understand/media/understand-azure-marketplace-charges/select-marketplace.png and /dev/null differ diff --git a/articles/cost-management-billing/understand/pay-bill.md b/articles/cost-management-billing/understand/pay-bill.md index 06fc4f803f290..2c15c0fe4f83b 100644 --- a/articles/cost-management-billing/understand/pay-bill.md +++ b/articles/cost-management-billing/understand/pay-bill.md @@ -28,11 +28,15 @@ If you have Azure credits, they automatically apply to your invoice each billing ## Reserve Bank of India -**The Reserve Bank of India has issued new regulations.** +**The Reserve Bank of India has issued new directives.** -On 1 October 2021, automatic payments in India may block some credit card transactions, especially transactions exceeding 5,000 INR. Because of this you may need to make payments manually in the Azure portal. These regulations won't affect the total amount you will be charged for your Azure usage. +On 1 October 2021, automatic payments in India may block some credit card transactions, especially transactions exceeding 5,000 INR. Because of this you may need to make payments manually in the Azure portal. This directive will not affect the total amount you will be charged for your Azure usage. -[Learn more about the Reserve Bank of India regulation for recurring payments](https://www.rbi.org.in/Scripts/NotificationUser.aspx?Id=11668&Mode=0) +[Learn more about the Reserve Bank of India directive; Processing of e-mandate on cards for recurring transactions](https://www.rbi.org.in/Scripts/NotificationUser.aspx?Id=11668&Mode=0) + +On 1 July 2022, Microsoft and other online merchants will no longer be storing credit card information. To comply with this regulation Microsoft will be removing all stored card details from Microsoft Azure. To avoid service interruption, you will need to add a payment method and make a one-time payment for all invoices. + +[Learn about the Reserve Bank of India directive; Restriction on storage of actual card data ](https://www.rbi.org.in/Scripts/NotificationUser.aspx?Id=12211) ## Pay by default payment method diff --git a/articles/data-factory/TOC.yml b/articles/data-factory/TOC.yml index 5bf67534e97f4..ac1bfb73458e9 100644 --- a/articles/data-factory/TOC.yml +++ b/articles/data-factory/TOC.yml @@ -347,6 +347,8 @@ items: href: connector-amazon-simple-storage-service.md - name: Amazon S3 Compatible Storage href: connector-amazon-s3-compatible-storage.md + - name: Asana + href: connector-asana.md - name: Avro format href: format-avro.md - name: Azure Blob Storage @@ -820,6 +822,10 @@ items: href: data-factory-private-link.md - name: Azure security baseline href: /security/benchmark/azure/baselines/data-factory-security-baseline?toc=/azure/data-factory/TOC.json + - name: Settings + items: + - name: Manage Azure Data Factory settings and preferences + href: how-to-manage-settings.md - name: Monitor and manage items: - name: Monitor visually @@ -1199,7 +1205,7 @@ items: - name: Ask a question - Stack Overflow href: https://stackoverflow.com/questions/tagged/azure-data-factory - name: Request a feature - href: /answers/topics/azure-data-factory.html + href: https://feedback.azure.com/d365community/forum/1219ec2d-6c26-ec11-b6e6-000d3a4f032c - name: Pricing href: https://azure.microsoft.com/pricing/details/data-factory/ - name: Availability by region diff --git a/articles/data-factory/ci-cd-github-troubleshoot-guide.md b/articles/data-factory/ci-cd-github-troubleshoot-guide.md index fcb67122d52f9..ad2990e8d4c68 100644 --- a/articles/data-factory/ci-cd-github-troubleshoot-guide.md +++ b/articles/data-factory/ci-cd-github-troubleshoot-guide.md @@ -178,7 +178,7 @@ While publishing ADF resources, the azure pipeline triggers twice or more instea #### Cause -Azure DevOps has the 20 MB Rest API limit. When the ARM template exceeds this size, ADF internally splits the template file into multiple files with linked templates to solve this issue. As a side effect, this split could result in customer's triggers being run more than once. +Azure DevOps has the 20 MB REST API limit. When the ARM template exceeds this size, ADF internally splits the template file into multiple files with linked templates to solve this issue. As a side effect, this split could result in customer's triggers being run more than once. #### Resolution diff --git a/articles/data-factory/concepts-data-flow-udf.md b/articles/data-factory/concepts-data-flow-udf.md index cce9de9ea99b8..8513d1b2f78a2 100644 --- a/articles/data-factory/concepts-data-flow-udf.md +++ b/articles/data-factory/concepts-data-flow-udf.md @@ -19,7 +19,7 @@ ms.date: 04/20/2022 A user defined function is a customized expression you can define to be able to reuse logic across multiple mapping data flows. User defined functions live in a collection called a data flow library to be able to easily group up common sets of customized functions. -Whenever you find yourself building the same logic in an expression in across multiple mapping data flows this would be a good opportunity to turn that into a user defined function. +Whenever you find yourself building the same logic in an expression across multiple mapping data flows this would be a good opportunity to turn that into a user defined function. > [!IMPORTANT] > User defined functions and mapping data flow libraries are currently in public preview. diff --git a/articles/data-factory/connector-asana.md b/articles/data-factory/connector-asana.md new file mode 100644 index 0000000000000..5ac33af17fd60 --- /dev/null +++ b/articles/data-factory/connector-asana.md @@ -0,0 +1,112 @@ +--- +title: Transform data in Asana (Preview) +titleSuffix: Azure Data Factory & Azure Synapse +description: Learn how to transform data in Asana (Preview) by using Data Factory or Azure Synapse Analytics. +ms.author: jianleishen +author: jianleishen +ms.service: data-factory +ms.subservice: data-movement +ms.topic: conceptual +ms.custom: synapse +ms.date: 05/20/2022 +--- + +# Transform data in Asana (Preview) using Azure Data Factory or Synapse Analytics + +[!INCLUDE[appliesto-adf-asa-md](includes/appliesto-adf-asa-md.md)] + +This article outlines how to use Data Flow to transform data in Asana (Preview). To learn more, read the introductory article for [Azure Data Factory](introduction.md) or [Azure Synapse Analytics](../synapse-analytics/overview-what-is.md). + +> [!IMPORTANT] +> This connector is currently in preview. You can try it out and give us feedback. If you want to take a dependency on preview connectors in your solution, please contact [Azure support](https://azure.microsoft.com/support/). + +## Supported capabilities + +This Asana connector is supported for the following activities: + +- [Mapping data flow](concepts-data-flow-overview.md) + +## Create an Asana linked service using UI + +Use the following steps to create an Asana linked service in the Azure portal UI. + +1. Browse to the Manage tab in your Azure Data Factory or Synapse workspace and select Linked Services, then select New: + + # [Azure Data Factory](#tab/data-factory) + + :::image type="content" source="media/doc-common-process/new-linked-service.png" alt-text="Screenshot of creating a new linked service with Azure Data Factory U I."::: + + # [Azure Synapse](#tab/synapse-analytics) + + :::image type="content" source="media/doc-common-process/new-linked-service-synapse.png" alt-text="Screenshot of creating a new linked service with Azure Synapse U I."::: + +2. Search for Asana (Preview) and select the Asana (Preview) connector. + + :::image type="content" source="media/connector-asana/asana-connector.png" alt-text="Screenshot showing selecting Asana connector."::: + +3. Configure the service details, test the connection, and create the new linked service. + + :::image type="content" source="media/connector-asana/configure-asana-linked-service.png" alt-text="Screenshot of configuration for Asana linked service."::: + +## Connector configuration details + +The following sections provide information about properties that are used to define Data Factory and Synapse pipeline entities specific to Asana. + +## Linked service properties + +The following properties are supported for the Asana linked service: + +| Property | Description | Required | +|:--- |:--- |:--- | +| type | The type property must be set to **Asana**. |Yes | +| apiToken | Specify an API token for the Asana. Mark this field as **SecureString** to store it securely. Or, you can [reference a secret stored in Azure Key Vault](store-credentials-in-key-vault.md). |Yes | + +**Example:** + +```json +{ + "name": "AsanaLinkedService", + "properties": { + "type": "Asana", + "typeProperties": { + "apiToken": { + "type": "SecureString", + "value": "" + } + } + } +} +``` + +## Mapping data flow properties + +When transforming data in mapping data flow, you can read tables from Asana. For more information, see the [source transformation](data-flow-source.md) in mapping data flows. You can only use an [inline dataset](data-flow-source.md#inline-datasets) as source type. + +### Source transformation + +The below table lists the properties supported by Asana source. You can edit these properties in the **Source options** tab. + +| Name | Description | Required | Allowed values | Data flow script property | +| ---- | ----------- | -------- | -------------- | ---------------- | +| Workspace | The ID of the workspace in Asana. | Yes | String | workspaceId | +| Entity | The ID of the entity in Asana.| Yes | String | entityId | +| Entity Type | The type of the entity in Asana. | Yes | `teams`
                  `portfolios`
                  `projects` | entityType | + + +#### Asana source script examples + +When you use Asana as source type, the associated data flow script is: + +``` +source(allowSchemaDrift: true, + validateSchema: false, + store: 'asana', + format: 'rest', + workspaceId: '9876543210', + entityId: '1234567890', + entityType: 'teams') ~> AsanaSource +``` + +## Next steps + +For a list of data stores supported as sources and sinks by the copy activity, see [Supported data stores](copy-activity-overview.md#supported-data-stores-and-formats). diff --git a/articles/data-factory/connector-db2.md b/articles/data-factory/connector-db2.md index c033e8e494e60..71aac84db4511 100644 --- a/articles/data-factory/connector-db2.md +++ b/articles/data-factory/connector-db2.md @@ -7,7 +7,7 @@ ms.service: data-factory ms.subservice: data-movement ms.custom: synapse ms.topic: conceptual -ms.date: 09/09/2021 +ms.date: 06/07/2022 ms.author: jianleishen --- # Copy data from DB2 using Azure Data Factory or Synapse Analytics @@ -102,7 +102,7 @@ Typical properties inside the connection string: | certificateCommonName | When you use Secure Sockets Layer (SSL) or Transport Layer Security (TLS) encryption, you must enter a value for Certificate common name. | No | > [!TIP] -> If you receive an error message that states `The package corresponding to an SQL statement execution request was not found. SQLSTATE=51002 SQLCODE=-805`, the reason is a needed package is not created for the user. By default, the service will try to create the package under the collection named as the user you used to connect to the DB2. Specify the package collection property to indicate under where you want the service to create the needed packages when querying the database. +> If you receive an error message that states `The package corresponding to an SQL statement execution request was not found. SQLSTATE=51002 SQLCODE=-805`, the reason is a needed package is not created for the user. By default, the service will try to create the package under the collection named as the user you used to connect to the DB2. Specify the package collection property to indicate under where you want the service to create the needed packages when querying the database. If you can't determine the package collection name, try to set `packageCollection=NULLID`. **Example:** diff --git a/articles/data-factory/connector-dynamics-crm-office-365.md b/articles/data-factory/connector-dynamics-crm-office-365.md index f01b9bd0665b7..7ded58f0ac446 100644 --- a/articles/data-factory/connector-dynamics-crm-office-365.md +++ b/articles/data-factory/connector-dynamics-crm-office-365.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.author: jianleishen author: jianleishen ms.custom: synapse -ms.date: 04/12/2022 +ms.date: 04/24/2022 --- # Copy and transform data in Dynamics 365 (Microsoft Dataverse) or Dynamics CRM using Azure Data Factory or Azure Synapse Analytics @@ -504,7 +504,7 @@ If all of your source records map to the same target entity and your source data ## Mapping data flow properties -When transforming data in mapping data flow, you can read and write to tables from Dynamics. For more information, see the [source transformation](data-flow-source.md) and [sink transformation](data-flow-sink.md) in mapping data flows. You can choose to use a Dynamics dataset or an [inline dataset](data-flow-source.md#inline-datasets) as source and sink type. +When transforming data in mapping data flow, you can read from and write to tables in Dynamics. For more information, see the [source transformation](data-flow-source.md) and [sink transformation](data-flow-sink.md) in mapping data flows. You can choose to use a Dynamics dataset or an [inline dataset](data-flow-source.md#inline-datasets) as source and sink type. ### Source transformation @@ -512,34 +512,32 @@ The below table lists the properties supported by Dynamics. You can edit these p | Name | Description | Required | Allowed values | Data flow script property | | ---- | ----------- | -------- | -------------- | ---------------- | -| Table | If you select Table as input, data flow fetches all the data from the table specified in the dataset. | No | - | tableName | +| Entity name| The logical name of the entity to retrieve. | Yes when use inline dataset | - | *(for inline dataset only)*
                  entity | | Query |FetchXML is a proprietary query language that is used in Dynamics online and on-premises. See the following example. To learn more, see [Build queries with FetchXML](/previous-versions/dynamicscrm-2016/developers-guide/gg328332(v=crm.8)). | No | String | query | -| Entity | The logical name of the entity to retrieve. | Yes when use inline mode | - | entity| > [!Note] > If you select **Query** as input type, the column type from tables can not be retrieved. It will be treated as string by default. #### Dynamics source script example -When you use Dynamics as source type, the associated data flow script is: +When you use Dynamics dataset as source type, the associated data flow script is: ``` -source( - output( - new_name as string, - new_dataflowtestid as string - ), - store: 'dynamics', - format: 'dynamicsformat', - baseUrl: $baseUrl, - cloudType:'AzurePublic', - servicePrincipalId:$servicePrincipalId, - servicePrincipalCredential:$servicePrincipalCredential, - entity:'new_datalowtest' -query:' ' - ) ~> movies +source(allowSchemaDrift: true, + validateSchema: false, + query: '') ~> DynamicsSource +``` + +If you use inline dataset, the associated data flow script is: ``` +source(allowSchemaDrift: true, + validateSchema: false, + store: 'dynamics', + format: 'dynamicsformat', + entity: 'Entity1', + query: '') ~> DynamicsSource +``` ### Sink transformation @@ -547,39 +545,41 @@ The below table lists the properties supported by Dynamics sink. You can edit th | Name | Description | Required | Allowed values | Data flow script property | | ---- | ----------- | -------- | -------------- | ---------------- | -| Entity | The logical name of the entity to retrieve. | Yes when use inline mode | - | entity| -| Request interval | The interval time between API requests in millisecond. | No | - | requestInterval| -| Update method | Specify what operations are allowed on your database destination. The default is to only allow inserts.
                  To update, upsert, or delete rows, an [Alter row transformation](data-flow-alter-row.md) is required to tag rows for those actions. | Yes | `true` or `false` | insertable
                  updateable
                  upsertable
                  deletable| | Alternate key name | The alternate key name defined on your entity to do an update, upsert or delete. | No | - | alternateKeyName | +| Update method | Specify what operations are allowed on your database destination. The default is to only allow inserts.
                  To update, upsert, or delete rows, an [Alter row transformation](data-flow-alter-row.md) is required to tag rows for those actions. | Yes | `true` or `false` | insertable
                  updateable
                  upsertable
                  deletable| +| Entity name| The logical name of the entity to write. | Yes when use inline dataset | - | *(for inline dataset only)*
                  entity| + #### Dynamics sink script example -When you use Dynamics as sink type, the associated data flow script is: +When you use Dynamics dataset as sink type, the associated data flow script is: ``` -moviesAltered sink( - input(new_name as string, - new_id as string, - new_releasedate as string - ), - store: 'dynamics', - format: 'dynamicsformat', - baseUrl: $baseUrl, - - cloudType:'AzurePublic', - servicePrincipalId:$servicePrincipalId, - servicePrincipalCredential:$servicePrincipalCredential, - updateable: true, - upsertable: true, - insertable: true, - deletable:true, - alternateKey:'new_testalternatekey', - entity:'new_dataflow_crud_test', - -requestInterval:1000 - ) ~> movieDB +IncomingStream sink(allowSchemaDrift: true, + validateSchema: false, + deletable:true, + insertable:true, + updateable:true, + upsertable:true, + skipDuplicateMapInputs: true, + skipDuplicateMapOutputs: true) ~> DynamicsSink ``` +If you use inline dataset, the associated data flow script is: + +``` +IncomingStream sink(allowSchemaDrift: true, + validateSchema: false, + store: 'dynamics', + format: 'dynamicsformat', + entity: 'Entity1', + deletable: true, + insertable: true, + updateable: true, + upsertable: true, + skipDuplicateMapInputs: true, + skipDuplicateMapOutputs: true) ~> DynamicsSink +``` ## Lookup activity properties To learn details about the properties, see [Lookup activity](control-flow-lookup-activity.md). diff --git a/articles/data-factory/connector-google-adwords.md b/articles/data-factory/connector-google-adwords.md index 2e76bbbdc0190..e140118ec7b04 100644 --- a/articles/data-factory/connector-google-adwords.md +++ b/articles/data-factory/connector-google-adwords.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: data-movement ms.topic: conceptual ms.custom: synapse -ms.date: 02/24/2022 +ms.date: 05/30/2022 --- # Copy data from Google AdWords using Azure Data Factory or Synapse Analytics @@ -81,7 +81,7 @@ The following properties are supported for Google AdWords linked service: | clientId | The client ID of the Google application used to acquire the refresh token. You can choose to mark this field as a SecureString to store it securely, or store password in Azure Key Vault and let the copy activity pull from there when performing data copy - learn more from [Store credentials in Key Vault](store-credentials-in-key-vault.md). | No | | clientSecret | The client secret of the google application used to acquire the refresh token. You can choose to mark this field as a SecureString to store it securely, or store password in Azure Key Vault and let the copy activity pull from there when performing data copy - learn more from [Store credentials in Key Vault](store-credentials-in-key-vault.md). | No | | email | The service account email ID that is used for ServiceAuthentication and can only be used on self-hosted IR. | No | -| keyFilePath | The full path to the .p12 key file that is used to authenticate the service account email address and can only be used on self-hosted IR. | No | +| keyFilePath | The full path to the `.p12` or `.json` key file that is used to authenticate the service account email address and can only be used on self-hosted IR. | No | | trustedCertPath | The full path of the .pem file containing trusted CA certificates for verifying the server when connecting over TLS. This property can only be set when using TLS on self-hosted IR. The default value is the cacerts.pem file installed with the IR. | No | | useSystemTrustStore | Specifies whether to use a CA certificate from the system trust store or from a specified PEM file. The default value is false. | No | diff --git a/articles/data-factory/connector-google-bigquery.md b/articles/data-factory/connector-google-bigquery.md index 9157cd04e10b4..068caf3db1caf 100644 --- a/articles/data-factory/connector-google-bigquery.md +++ b/articles/data-factory/connector-google-bigquery.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: data-movement ms.topic: conceptual ms.custom: synapse -ms.date: 04/26/2022 +ms.date: 05/30/2022 --- # Copy data from Google BigQuery using Azure Data Factory or Synapse Analytics @@ -117,7 +117,7 @@ Set "authenticationType" property to **ServiceAuthentication**, and specify the | Property | Description | Required | |:--- |:--- |:--- | | email | The service account email ID that is used for ServiceAuthentication. It can be used only on Self-hosted Integration Runtime. | No | -| keyFilePath | The full path to the .p12 key file that is used to authenticate the service account email address. | No | +| keyFilePath | The full path to the `.p12` or `.json` key file that is used to authenticate the service account email address. | No | | trustedCertPath | The full path of the .pem file that contains trusted CA certificates used to verify the server when you connect over TLS. This property can be set only when you use TLS on Self-hosted Integration Runtime. The default value is the cacerts.pem file installed with the integration runtime. | No | | useSystemTrustStore | Specifies whether to use a CA certificate from the system trust store or from a specified .pem file. The default value is **false**. | No | @@ -133,7 +133,7 @@ Set "authenticationType" property to **ServiceAuthentication**, and specify the "requestGoogleDriveScope" : true, "authenticationType" : "ServiceAuthentication", "email": "", - "keyFilePath": "<.p12 key path on the IR machine>" + "keyFilePath": "<.p12 or .json key path on the IR machine>" }, "connectVia": { "referenceName": "", diff --git a/articles/data-factory/connector-overview.md b/articles/data-factory/connector-overview.md index 66e57f9b759d4..1c07854f6c8b0 100644 --- a/articles/data-factory/connector-overview.md +++ b/articles/data-factory/connector-overview.md @@ -7,7 +7,7 @@ ms.service: data-factory ms.subservice: data-movement ms.custom: synapse ms.topic: conceptual -ms.date: 04/13/2022 +ms.date: 05/27/2022 ms.author: jianleishen --- diff --git a/articles/data-factory/connector-rest.md b/articles/data-factory/connector-rest.md index d9e7baf651727..320974aefce4a 100644 --- a/articles/data-factory/connector-rest.md +++ b/articles/data-factory/connector-rest.md @@ -7,7 +7,7 @@ ms.service: data-factory ms.subservice: data-movement ms.custom: synapse ms.topic: conceptual -ms.date: 04/01/2022 +ms.date: 06/07/2022 ms.author: makromer --- @@ -72,9 +72,6 @@ The following sections provide details about properties you can use to define Da ## Linked service properties -> [!Important] -> Due to Azure service security and compliance request, system-assigned managed identity authentication is no longer available in REST connector for both Copy and Mapping data flow. You are recommended to migrate existing linked services that use system-managed identity authentication to user-assigned managed identity authentication or other authentication types. Please make sure the migration to be done by **September 15, 2022**. For more detailed steps about how to create, manage user-assigned managed identities, refer to [this](data-factory-service-identity.md#user-assigned-managed-identity). - The following properties are supported for the REST linked service: | Property | Description | Required | diff --git a/articles/data-factory/connector-salesforce.md b/articles/data-factory/connector-salesforce.md index 080e8a6f8f9f9..5af1a0b71afa6 100644 --- a/articles/data-factory/connector-salesforce.md +++ b/articles/data-factory/connector-salesforce.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: data-movement ms.topic: conceptual ms.custom: synapse -ms.date: 09/29/2021 +ms.date: 05/26/2022 --- # Copy data from and to Salesforce using Azure Data Factory or Azure Synapse Analytics @@ -35,7 +35,10 @@ Specifically, this Salesforce connector supports: - Salesforce Developer, Professional, Enterprise, or Unlimited editions. - Copying data from and to Salesforce production, sandbox, and custom domain. -The Salesforce connector is built on top of the Salesforce REST/Bulk API. When copying data from Salesforce, the connector automatically chooses between REST and Bulk APIs based on the data size – when the result set is large, Bulk API is used for better performance; You can explicitly set the API version used to read/write data via [`apiVersion` property](#linked-service-properties) in linked service. +>[!NOTE] +>This function supports copy of any schema from the above mentioned Salesforce environments, including the [Nonprofit Success Pack](https://www.salesforce.org/products/nonprofit-success-pack/) (NPSP). This allows you to bring your Salesforce nonprofit data into Azure, work with it in Azure data services, unify it with other data sets, and visualize it in Power BI for rapid insights. + +The Salesforce connector is built on top of the Salesforce REST/Bulk API. When copying data from Salesforce, the connector automatically chooses between REST and Bulk APIs based on the data size – when the result set is large, Bulk API is used for better performance; You can explicitly set the API version used to read/write data via [`apiVersion` property](#linked-service-properties) in linked service. When copying data to Salesforce, the connector uses BULK API v1. >[!NOTE] >The connector no longer sets default version for Salesforce API. For backward compatibility, if a default API version was set before, it keeps working. The default value is 45.0 for source, and 40.0 for sink. diff --git a/articles/data-factory/continuous-integration-delivery-improvements.md b/articles/data-factory/continuous-integration-delivery-improvements.md index a586847007502..981023ba42b46 100644 --- a/articles/data-factory/continuous-integration-delivery-improvements.md +++ b/articles/data-factory/continuous-integration-delivery-improvements.md @@ -7,7 +7,7 @@ author: nabhishek ms.author: abnarain ms.reviewer: jburchel ms.topic: conceptual -ms.date: 10/14/2021 +ms.date: 06/08/2022 --- # Automated publishing for continuous integration and delivery @@ -144,7 +144,7 @@ Follow these steps to get started: - task: NodeTool@0 inputs: - versionSpec: '10.x' + versionSpec: '14.x' displayName: 'Install Node.js' - task: Npm@1 @@ -161,7 +161,7 @@ Follow these steps to get started: inputs: command: 'custom' workingDir: '$(Build.Repository.LocalPath)/' #replace with the package.json folder - customCommand: 'run build validate $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testResourceGroup/providers/Microsoft.DataFactory/factories/' + customCommand: 'run build validate $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups//providers/Microsoft.DataFactory/factories/' displayName: 'Validate' # Validate and then generate the ARM template into the destination folder, which is the same as selecting "Publish" from the UX. @@ -171,7 +171,7 @@ Follow these steps to get started: inputs: command: 'custom' workingDir: '$(Build.Repository.LocalPath)/' #replace with the package.json folder - customCommand: 'run build export $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testResourceGroup/providers/Microsoft.DataFactory/factories/ "ArmTemplate"' + customCommand: 'run build export $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups//providers/Microsoft.DataFactory/factories/ "ArmTemplate"' displayName: 'Validate and Generate ARM template' # Publish the artifact to be used as a source for a release pipeline. diff --git a/articles/data-factory/control-flow-lookup-activity.md b/articles/data-factory/control-flow-lookup-activity.md index dae668abac6fc..01999ff6436fe 100644 --- a/articles/data-factory/control-flow-lookup-activity.md +++ b/articles/data-factory/control-flow-lookup-activity.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: orchestration ms.custom: synapse ms.topic: conceptual -ms.date: 04/06/2022 +ms.date: 05/31/2022 --- # Lookup activity in Azure Data Factory and Azure Synapse Analytics diff --git a/articles/data-factory/control-flow-wait-activity.md b/articles/data-factory/control-flow-wait-activity.md index af50427d2925d..c2d3288a73622 100644 --- a/articles/data-factory/control-flow-wait-activity.md +++ b/articles/data-factory/control-flow-wait-activity.md @@ -16,7 +16,7 @@ When you use a Wait activity in a pipeline, the pipeline waits for the specified [!INCLUDE[appliesto-adf-asa-md](includes/appliesto-adf-asa-md.md)] -## Create a Fail activity with UI +## Create a Wait activity with UI To use a Wait activity in a pipeline, complete the following steps: diff --git a/articles/data-factory/data-flow-source.md b/articles/data-factory/data-flow-source.md index edea8d9b9cd47..7ba913e2fd695 100644 --- a/articles/data-factory/data-flow-source.md +++ b/articles/data-factory/data-flow-source.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: data-flows ms.topic: conceptual ms.custom: seo-lt-2019 -ms.date: 04/13/2022 +ms.date: 05/31/2022 --- # Source transformation in mapping data flow @@ -48,7 +48,8 @@ Mapping data flow follows an extract, load, and transform (ELT) approach and wor | Connector | Format | Dataset/inline | | --------- | ------ | -------------- | |[Amazon S3](connector-amazon-simple-storage-service.md#mapping-data-flow-properties) | [Avro](format-avro.md#mapping-data-flow-properties)
                  [Delimited text](format-delimited-text.md#mapping-data-flow-properties)
                  [Delta](format-delta.md)
                  [Excel](format-excel.md#mapping-data-flow-properties)
                  [JSON](format-json.md#mapping-data-flow-properties)
                  [ORC](format-orc.md#mapping-data-flow-properties)
                  [Parquet](format-parquet.md#mapping-data-flow-properties)
                  [XML](format-xml.md#mapping-data-flow-properties) | ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓ | -[Azure Blob Storage](connector-azure-blob-storage.md#mapping-data-flow-properties) | [Avro](format-avro.md#mapping-data-flow-properties)
                  [Delimited text](format-delimited-text.md#mapping-data-flow-properties)
                  [Delta](format-delta.md)
                  [Excel](format-excel.md#mapping-data-flow-properties)
                  [JSON](format-json.md#mapping-data-flow-properties)
                  [ORC](format-orc.md#mapping-data-flow-properties)
                  [Parquet](format-parquet.md#mapping-data-flow-properties)
                  [XML](format-xml.md#mapping-data-flow-properties) | ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓ | +|[Asana (Preview)](connector-asana.md#mapping-data-flow-properties) | | -/✓ | +|[Azure Blob Storage](connector-azure-blob-storage.md#mapping-data-flow-properties) | [Avro](format-avro.md#mapping-data-flow-properties)
                  [Delimited text](format-delimited-text.md#mapping-data-flow-properties)
                  [Delta](format-delta.md)
                  [Excel](format-excel.md#mapping-data-flow-properties)
                  [JSON](format-json.md#mapping-data-flow-properties)
                  [ORC](format-orc.md#mapping-data-flow-properties)
                  [Parquet](format-parquet.md#mapping-data-flow-properties)
                  [XML](format-xml.md#mapping-data-flow-properties) | ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓ | | [Azure Cosmos DB (SQL API)](connector-azure-cosmos-db.md#mapping-data-flow-properties) | | ✓/- | | [Azure Data Lake Storage Gen1](connector-azure-data-lake-store.md#mapping-data-flow-properties) | [Avro](format-avro.md#mapping-data-flow-properties)
                  [Delimited text](format-delimited-text.md#mapping-data-flow-properties)
                  [Excel](format-excel.md#mapping-data-flow-properties)
                  [JSON](format-json.md#mapping-data-flow-properties)
                  [ORC](format-orc.md#mapping-data-flow-properties)
                  [Parquet](format-parquet.md#mapping-data-flow-properties)
                  [XML](format-xml.md#mapping-data-flow-properties) | ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓ | | [Azure Data Lake Storage Gen2](connector-azure-data-lake-storage.md#mapping-data-flow-properties) | [Avro](format-avro.md#mapping-data-flow-properties)
                  [Common Data Model](format-common-data-model.md#source-properties)
                  [Delimited text](format-delimited-text.md#mapping-data-flow-properties)
                  [Delta](format-delta.md)
                  [Excel](format-excel.md#mapping-data-flow-properties)
                  [JSON](format-json.md#mapping-data-flow-properties)
                  [ORC](format-orc.md#mapping-data-flow-properties)
                  [Parquet](format-parquet.md#mapping-data-flow-properties)
                  [XML](format-xml.md#mapping-data-flow-properties) | ✓/✓
                  -/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓
                  ✓/✓ | @@ -122,7 +123,7 @@ If your text file has no defined schema, select **Detect data type** so that the **Reset schema** resets the projection to what is defined in the referenced dataset. -You can modify the column data types in a downstream derived-column transformation. Use a select transformation to modify the column names. +**Overwrite schema** allows you to modify the projected data types here the source, overwriting the schema-defined data types. You can alternatively modify the column data types in a downstream derived-column transformation. Use a select transformation to modify the column names. ### Import schema diff --git a/articles/data-factory/how-to-manage-settings.md b/articles/data-factory/how-to-manage-settings.md new file mode 100644 index 0000000000000..07649cc4dcc22 --- /dev/null +++ b/articles/data-factory/how-to-manage-settings.md @@ -0,0 +1,68 @@ +--- +title: Managing Azure Data Factory settings and preferences +description: Learn how to manage Azure Data Factory settings and preferences. +author: n0elleli +ms.author: noelleli +ms.reviewer: +ms.service: data-factory +ms.subservice: tutorials +ms.topic: tutorial +ms.custom: seo-lt-2019 +ms.date: 05/24/2022 +--- + +# Manage Azure Data Factory settings and preferences + +[!INCLUDE[appliesto-adf-asa-md](includes/appliesto-adf-asa-md.md)] + +You can change the default settings of your Azure Data Factory to meet your own preferences. +Azure Data Factory settings are available in the Settings menu in the top right section of the global page header as indicated in the screenshot below. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-1.png" alt-text="Screenshot of settings gear in top right corner of page banner."::: + +Clicking the **Settings** gear button will open a flyout. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-2.png" alt-text="Screenshot of settings flyout with three setting options."::: + +Here you can find the settings and preferences that you can set for your data factory. + +## Language and Region + +Choose your language and the regional format that will influence how data such as dates and currency will appear in your data factory. + +### Language + +Use the drop-down list to select from the list of available languages. This setting controls the language you see for text throughout your data factory. There are 18 languages supported in addition to English. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-3.png" alt-text="Screenshot of drop-down list of languages that users can choose from."::: + +To apply changes, select a language and make sure to hit the **Apply** button. Your page will refresh and reflect the changes made. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-4.png" alt-text="Screenshot of Apply button in the bottom left corner to make language changes."::: + +> [!NOTE] +> Applying language changes will discard any unsaved changes in your data factory. + +### Regional Format + +Use the drop-down list to select from the list of available regional formats. This setting controls the way dates, time, numbers, and currency are shown in your data factory. + +The default shown in **Regional format** will automatically change based on the option you selected for **Language**. You can still use the drop-down list to select a different format. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-5.png" alt-text="Screenshot of drop-down list of regional formats that users can choose from. "::: + +For example, if you select **English** as your language and select **English (United States)** as the regional format, currency will be show in U.S. (United States) dollars. If you select **English** as your language and select **English (Europe)** as the regional format, currency will be show in euros. + +To apply changes, select a **Regional format** and make sure to hit the **Apply** button. Your page will refresh and reflect the changes made. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-6.png" alt-text="Screenshot of Apply button in the bottom left corner to make regional format changes."::: + +> [!NOTE] +> Applying regional format changes will discard any unsaved changes in your data factory. + +## Next steps +- [Introduction to Azure Data Factory](introduction.md) +- [Build a pipeline with a copy activity](quickstart-create-data-factory-powershell.md) +- [Build a pipeline with a data transformation activity](tutorial-transform-data-spark-powershell.md) + + diff --git a/articles/data-factory/how-to-sqldb-to-cosmosdb.md b/articles/data-factory/how-to-sqldb-to-cosmosdb.md index c8d3ccb133701..4c499046837a1 100644 --- a/articles/data-factory/how-to-sqldb-to-cosmosdb.md +++ b/articles/data-factory/how-to-sqldb-to-cosmosdb.md @@ -17,7 +17,7 @@ SQL schemas are typically modeled using third normal form, resulting in normaliz Using Azure Data Factory, we'll build a pipeline that uses a single Mapping Data Flow to read from two Azure SQL Database normalized tables that contain primary and foreign keys as the entity relationship. ADF will join those tables into a single stream using the data flow Spark engine, collect joined rows into arrays and produce individual cleansed documents for insert into a new Azure Cosmos DB container. -This guide will build a new container on the fly called "orders" that will use the ```SalesOrderHeader``` and ```SalesOrderDetail``` tables from the standard SQL Server [Adventure Works sample database](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms). Those tables represent sales transactions joined by ```SalesOrderID```. Each unique detail records has its own primary key of ```SalesOrderDetailID```. The relationship between header and detail is ```1:M```. We'll join on ```SalesOrderID``` in ADF and then roll each related detail record into an array called "detail". +This guide will build a new container on the fly called "orders" that will use the ```SalesOrderHeader``` and ```SalesOrderDetail``` tables from the standard SQL Server [Adventure Works sample database](/sql/samples/adventureworks-install-configure?tabs=ssms). Those tables represent sales transactions joined by ```SalesOrderID```. Each unique detail records has its own primary key of ```SalesOrderDetailID```. The relationship between header and detail is ```1:M```. We'll join on ```SalesOrderID``` in ADF and then roll each related detail record into an array called "detail". The representative SQL query for this guide is: diff --git a/articles/data-factory/includes/data-factory-v2-connector-overview.md b/articles/data-factory/includes/data-factory-v2-connector-overview.md index 2de521853da4b..958fb54ed085b 100644 --- a/articles/data-factory/includes/data-factory-v2-connector-overview.md +++ b/articles/data-factory/includes/data-factory-v2-connector-overview.md @@ -2,7 +2,7 @@ author: jianleishen ms.service: data-factory ms.topic: include -ms.date: 04/13/2022 +ms.date: 05/27/2022 ms.author: jianleishen --- @@ -70,6 +70,7 @@ ms.author: jianleishen |   | [Generic ODBC](../connector-odbc.md) | ✓/✓ | | ✓ | | | |   | [Generic REST](../connector-rest.md) | ✓/✓ | ✓/✓ | | | | | **Services and apps** | [Amazon Marketplace Web Service](../connector-amazon-marketplace-web-service.md) | ✓/− | | ✓ | | | +|   | [Asana (Preview)](../connector-asana.md) | | ✓/− | | | | |   | [Concur (Preview)](../connector-concur.md) | ✓/− | | ✓ | | | |   | [data.world (Preview)](../connector-dataworld.md) | |✓/− | | | | |   | [Dataverse](../connector-dynamics-crm-office-365.md) | ✓/✓ |✓/✓ | ✓ | | | diff --git a/articles/data-factory/includes/data-factory-v2-supported-data-stores-for-lookup-activity.md b/articles/data-factory/includes/data-factory-v2-supported-data-stores-for-lookup-activity.md index fc838dd11ef94..396ccb0024bcc 100644 --- a/articles/data-factory/includes/data-factory-v2-supported-data-stores-for-lookup-activity.md +++ b/articles/data-factory/includes/data-factory-v2-supported-data-stores-for-lookup-activity.md @@ -5,7 +5,7 @@ services: data-factory author: jianleishen ms.service: data-factory ms.topic: include -ms.date: 09/29/2021 +ms.date: 05/31/2022 ms.author: jianleishen ms.custom: include file --- @@ -69,7 +69,6 @@ ms.custom: include file |   |[SFTP](../connector-sftp.md) | | **Generic protocol** |[Generic OData](../connector-odata.md) | |   |[Generic ODBC](../connector-odbc.md) | -|   |[REST](../connector-rest.md) | |   |[SharePoint Online List](../connector-sharepoint-online-list.md) | | **Services and apps** |[Amazon Marketplace Web Service](../connector-amazon-marketplace-web-service.md) | |   |[Concur (Preview)](../connector-concur.md) | @@ -88,7 +87,6 @@ ms.custom: include file |   |[Oracle Service Cloud (Preview)](../connector-oracle-service-cloud.md) | |   |[PayPal (Preview)](../connector-paypal.md) | |   |[QuickBooks (Preview)](../connector-quickbooks.md) | -|   |[REST](../connector-rest.md) | |   |[SAP Cloud for Customer (C4C)](../connector-sap-cloud-for-customer.md) | |   |[SAP ECC](../connector-sap-ecc.md) | |   |[Salesforce](../connector-salesforce.md) | diff --git a/articles/data-factory/media/connector-asana/asana-connector.png b/articles/data-factory/media/connector-asana/asana-connector.png new file mode 100644 index 0000000000000..c9cf6ac3c55dc Binary files /dev/null and b/articles/data-factory/media/connector-asana/asana-connector.png differ diff --git a/articles/data-factory/media/connector-asana/configure-asana-linked-service.png b/articles/data-factory/media/connector-asana/configure-asana-linked-service.png new file mode 100644 index 0000000000000..723554fbdc18a Binary files /dev/null and b/articles/data-factory/media/connector-asana/configure-asana-linked-service.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-1.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-1.png new file mode 100644 index 0000000000000..eba788afc8a2e Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-1.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-2.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-2.png new file mode 100644 index 0000000000000..6e4317fdda513 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-2.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-3.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-3.png new file mode 100644 index 0000000000000..e8ef328a6a9f3 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-3.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-4.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-4.png new file mode 100644 index 0000000000000..4a98d7e54d174 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-4.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-5.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-5.png new file mode 100644 index 0000000000000..7f086881071ec Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-5.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-6.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-6.png new file mode 100644 index 0000000000000..97076bcce0157 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-6.png differ diff --git a/articles/data-factory/monitor-metrics-alerts.md b/articles/data-factory/monitor-metrics-alerts.md index 0d959d8da9d69..7a2c874d48cef 100644 --- a/articles/data-factory/monitor-metrics-alerts.md +++ b/articles/data-factory/monitor-metrics-alerts.md @@ -52,7 +52,7 @@ Here are some of the metrics emitted by Azure Data Factory version 2. | Total entities count | Total number of entities | Count | Total | The total number of entities in the Azure Data Factory instance. | | Total factory size (GB unit) | Total size of entities | Gigabyte | Total | The total size of entities in the Azure Data Factory instance. | -For service limits and quotas please see [quotas and limits](https://docs.microsoft.com/azure/azure-resource-manager/management/azure-subscription-service-limits#azure-data-factory-limits). +For service limits and quotas please see [quotas and limits](../azure-resource-manager/management/azure-subscription-service-limits.md#azure-data-factory-limits). To access the metrics, complete the instructions in [Azure Monitor data platform](../azure-monitor/data-platform.md). > [!NOTE] @@ -97,4 +97,4 @@ Sign in to the Azure portal, and select **Monitor** > **Alerts** to create alert ## Next steps -[Configure diagnostics settings and workspace](monitor-configure-diagnostics.md) +[Configure diagnostics settings and workspace](monitor-configure-diagnostics.md) \ No newline at end of file diff --git a/articles/data-factory/transform-data-using-script.md b/articles/data-factory/transform-data-using-script.md index ace5b5e8a76a2..97ef4aa345fd1 100644 --- a/articles/data-factory/transform-data-using-script.md +++ b/articles/data-factory/transform-data-using-script.md @@ -17,7 +17,7 @@ ms.date: 04/20/2022 You use data transformation activities in a Data Factory or Synapse [pipeline](concepts-pipelines-activities.md) to transform and process raw data into predictions and insights. The Script activity is one of the transformation activities that pipelines support. This article builds on the [transform data article](transform-data.md), which presents a general overview of data transformation and the supported transformation activities. -Using the script activity, you can execute common operations with Data Manipulation Language (DML), and Data Definition Language (DDL). DML statements like SELECT, UPDATE, and INSERT let users retrieve, store, modify, delete, insert and update data in the database. DDL statements like CREATE, ALTER and DROP allow a database manager to create, modify, and remove database objects such as tables, indexes, and users. +Using the script activity, you can execute common operations with Data Manipulation Language (DML), and Data Definition Language (DDL). DML statements like INSERT, UPDATE, DELETE and SELECT let users insert, modify, delete and retrieve data in the database. DDL statements like CREATE, ALTER and DROP allow a database manager to create, modify, and remove database objects such as tables, indexes, and users. You can use the Script activity to invoke a SQL script in one of the following data stores in your enterprise or on an Azure virtual machine (VM): @@ -29,7 +29,7 @@ You can use the Script activity to invoke a SQL script in one of the following d The script may contain either a single SQL statement or multiple SQL statements that run sequentially. You can use the Execute SQL task for the following purposes: -- Truncate a table or view in preparation for inserting data. +- Truncate a table in preparation for inserting data. - Create, alter, and drop database objects such as tables and views. - Re-create fact and dimension tables before loading data into them. - Run stored procedures. If the SQL statement invokes a stored procedure that returns results from a temporary table, use the WITH RESULT SETS option to define metadata for the result set. diff --git a/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md b/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md index b8619010fc69f..cc2cacbbdaefa 100644 --- a/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md +++ b/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md @@ -102,7 +102,7 @@ In the walkthrough, you create a data factory with a pipeline that contains a co The Copy Activity performs the data movement in Azure Data Factory. The activity is powered by a globally available service that can copy data between various data stores in a secure, reliable, and scalable way. See [Data Movement Activities](data-factory-data-movement-activities.md) article for details about the Copy Activity. > [!IMPORTANT] -> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. +> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../../active-directory/develop/msal-migration.md) for more details. 1. Using Visual Studio 2012/2013/2015, create a C# .NET console application. 1. Launch **Visual Studio** 2012/2013/2015. @@ -498,4 +498,4 @@ while (response != null); ## Next steps See the following example for creating a pipeline using .NET SDK that copies data from an Azure blob storage to Azure SQL Database: -- [Create a pipeline to copy data from Blob Storage to SQL Database](data-factory-copy-activity-tutorial-using-dotnet-api.md) +- [Create a pipeline to copy data from Blob Storage to SQL Database](data-factory-copy-activity-tutorial-using-dotnet-api.md) \ No newline at end of file diff --git a/articles/data-factory/whats-new.md b/articles/data-factory/whats-new.md index b801a84c96854..c0b17b64d7267 100644 --- a/articles/data-factory/whats-new.md +++ b/articles/data-factory/whats-new.md @@ -22,6 +22,15 @@ Azure Data Factory is improved on an ongoing basis. To stay up to date with the This page is updated monthly, so revisit it regularly. +## May 2022 +
                  +
                  + + + + +
                  Service categoryService improvementsDetails
                  Data flowUser Defined Functions for mapping data flowsAzure Data Factory introduces in public preview user defined functions and data flow libraries. A user defined function is a customized expression you can define to be able to reuse logic across multiple mapping data flows. User defined functions live in a collection called a data flow library to be able to easily group up common sets of customized functions.
                  Learn more
                  + ## April 2022
                  @@ -33,7 +42,7 @@ This page is updated monthly, so revisit it regularly. - +
                  MonitoringMultiple updates to Data Factory monitoring experiencesNew updates to the monitoring experience in Data Factory include the ability to export results to a CSV, clear all filters, and open a run in a new tab. Column and result caching is also improved.
                  Learn more
                  User interfaceNew regional format supportChoose your language and the regional format that will influence how data such as dates and times appear in the Data Factory Studio monitoring. These language and regional settings affect only the Data Factory Studio user interface and don't change or modify your actual data.
                  User interfaceNew regional format supportChoosing your language and the regional format in settings will influence the format of how data such as dates and times appear in the Azure Data Factory Studio monitoring. For example, the time format in Monitoring will appear like "Apr 2, 2022, 3:40:29 pm" when choosing English as the regional format, and "2 Apr 2022, 15:40:29" when choosing French as regional format. These settings affect only the Azure Data Factory Studio user interface and do not change/ modify your actual data and time zone.
                  diff --git a/articles/data-lake-store/data-lake-store-integrate-with-other-services.md b/articles/data-lake-store/data-lake-store-integrate-with-other-services.md index 7819265958b17..7ce2f8b80a970 100644 --- a/articles/data-lake-store/data-lake-store-integrate-with-other-services.md +++ b/articles/data-lake-store/data-lake-store-integrate-with-other-services.md @@ -5,7 +5,7 @@ description: Understand how you can integrate Azure Data Lake Storage Gen1 with author: normesta ms.service: data-lake-store ms.topic: conceptual -ms.date: 05/29/2018 +ms.date: 06/03/2022 ms.author: normesta --- @@ -49,9 +49,6 @@ You can register data from Data Lake Storage Gen1 into the Azure Data Catalog to ## Use Data Lake Storage Gen1 with SQL Server Integration Services (SSIS) You can use the Data Lake Storage Gen1 connection manager in SSIS to connect an SSIS package with Data Lake Storage Gen1. For more information, see [Use Data Lake Storage Gen1 with SSIS](/sql/integration-services/connection-manager/azure-data-lake-store-connection-manager). -## Use Data Lake Storage Gen1 with Azure Synapse Analytics -You can use PolyBase to load data from Data Lake Storage Gen1 into Azure Synapse Analytics. For more information see [Use Data Lake Storage Gen1 with Azure Synapse Analytics](../synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md). - ## Use Data Lake Storage Gen1 with Azure Event Hubs You can use Azure Data Lake Storage Gen1 to archive and capture data received by Azure Event Hubs. For more information see [Use Data Lake Storage Gen1 with Azure Event Hubs](data-lake-store-archive-eventhub-capture.md). diff --git a/articles/databox-online/TOC.yml b/articles/databox-online/TOC.yml index 15116b98e0bae..f8a1790d333ed 100644 --- a/articles/databox-online/TOC.yml +++ b/articles/databox-online/TOC.yml @@ -429,13 +429,16 @@ - name: What is GPU sharing? href: azure-stack-edge-gpu-sharing.md - name: Understand data residency - href: azure-stack-edge-gpu-data-residency.md + href: azure-stack-edge-gpu-data-residency.md + - name: Understand data resiliency + href: azure-stack-edge-gpu-data-resiliency.md - name: View key vault integration href: azure-stack-edge-gpu-activation-key-vault.md - name: Understand disconnected use href: azure-stack-edge-gpu-disconnected-scenario.md - - name: Understand data resiliency - href: azure-stack-edge-gpu-data-resiliency.md + - name: FAQ - Operational guidelines + href: azure-stack-edge-operational-guidelines-faq.yml + - name: Shared security items: - name: Security overview @@ -496,7 +499,9 @@ href: azure-stack-edge-contact-microsoft-support.md - name: Shared release notes items: - - name: 2203 - Current + - name: 2205 - Current + href: azure-stack-edge-gpu-2205-release-notes.md + - name: 2203 href: azure-stack-edge-gpu-2203-release-notes.md - name: 2202 href: azure-stack-edge-gpu-2202-release-notes.md diff --git a/articles/databox-online/azure-stack-edge-gpu-2205-release-notes.md b/articles/databox-online/azure-stack-edge-gpu-2205-release-notes.md new file mode 100644 index 0000000000000..fce67b596b436 --- /dev/null +++ b/articles/databox-online/azure-stack-edge-gpu-2205-release-notes.md @@ -0,0 +1,86 @@ +--- +title: Azure Stack Edge 2205 release notes +description: Describes critical open issues and resolutions for the Azure Stack Edge running 2205 release. +services: databox +author: alkohli + +ms.service: databox +ms.subservice: edge +ms.topic: article +ms.date: 06/07/2022 +ms.author: alkohli +--- + +# Azure Stack Edge 2205 release notes + +[!INCLUDE [applies-to-GPU-and-pro-r-and-mini-r-skus](../../includes/azure-stack-edge-applies-to-gpu-pro-r-mini-r-sku.md)] + +The following release notes identify the critical open issues and the resolved issues for the 2205 release for your Azure Stack Edge devices. Features and issues that correspond to a specific model of Azure Stack Edge are called out wherever applicable. + +The release notes are continuously updated, and as critical issues requiring a workaround are discovered, they're added. Before you deploy your device, carefully review the information contained in the release notes. + +This article applies to the **Azure Stack Edge 2205** release, which maps to software version number **2.2.1983.5094**. This software can be applied to your device if you're running at least Azure Stack Edge 2106 (2.2.1636.3457) software. + +## What's new + +The 2205 release has the following features and enhancements: + +- **Kubernetes changes** - Beginning this release, compute enablement is moved to a dedicated Kubernetes page in the local UI. +- **Generation 2 virtual machines** - Starting this release, Generation 2 virtual machines can be deployed on Azure Stack Edge. For more information, see [Supported VM sizes and types](azure-stack-edge-gpu-virtual-machine-overview.md#operating-system-disks-and-images). +- **GPU extension update** - In this release, the GPU extension packages are updated. These updates will fix some issues that were encountered in a previous release during the installation of the extension. For more information, see how to [Update GPU extension of your Azure Stack Edge](azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md). +- **No IP option** - Going forward, there's an option to not set an IP for a network interface on your Azure Stack Edge device. For more information, see [Configure network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-network). + + +## Issues fixed in 2205 release + +The following table lists the issues that were release noted in previous releases and fixed in the current release. + +| No. | Feature | Issue | +| --- | --- | --- | +|**1.**|GPU Extension installation | In the previous releases, there were issues that caused the GPU extension installation to fail. These issues are described in [Troubleshooting GPU extension issues](azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md). These are fixed in the 2205 release and both the Windows and Linux installation packages are updated. More information on 2205 specific installation changes is covered in [Install GPU extension on your Azure Stack Edge device](azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md). | +|**2.**|HPN VMs | In the previous release, the Standard_F12_HPN could only support one network interface and couldn't be used for Multi-Access Edge Computing (MEC) deployments. This issue is fixed in this release. | + +## Known issues in 2205 release + +The following table provides a summary of known issues in this release. + +| No. | Feature | Issue | Workaround/comments | +| --- | --- | --- | --- | +|**1.**|Preview features |For this release, the following features are available in preview:
                  - Clustering and Multi-Access Edge Computing (MEC) for Azure Stack Edge Pro GPU devices only.
                  - VPN for Azure Stack Edge Pro R and Azure Stack Edge Mini R only.
                  - Local Azure Resource Manager, VMs, Cloud management of VMs, Kubernetes cloud management, and Multi-process service (MPS) for Azure Stack Edge Pro GPU, Azure Stack Edge Pro R, and Azure Stack Edge Mini R. |These features will be generally available in later releases. | + +## Known issues from previous releases + +The following table provides a summary of known issues carried over from the previous releases. + +| No. | Feature | Issue | Workaround/comments | +| --- | --- | --- | --- | +| **1.** |Azure Stack Edge Pro + Azure SQL | Creating SQL database requires Administrator access. |Do the following steps instead of Steps 1-2 in [Create-the-sql-database](../iot-edge/tutorial-store-data-sql-server.md#create-the-sql-database).
                  - In the local UI of your device, enable compute interface. Select **Compute > Port # > Enable for compute > Apply.**
                  - Download `sqlcmd` on your client machine from [SQL command utility](/sql/tools/sqlcmd-utility).
                  - Connect to your compute interface IP address (the port that was enabled), adding a ",1401" to the end of the address.
                  - Final command will look like this: sqlcmd -S {Interface IP},1401 -U SA -P "Strong!Passw0rd". After this, steps 3-4 from the current documentation should be identical. | +| **2.** |Refresh| Incremental changes to blobs restored via **Refresh** are NOT supported |For Blob endpoints, partial updates of blobs after a Refresh, may result in the updates not getting uploaded to the cloud. For example, sequence of actions such as:
                  1. Create blob in cloud. Or delete a previously uploaded blob from the device.
                  2. Refresh blob from the cloud into the appliance using the refresh functionality.
                  3. Update only a portion of the blob using Azure SDK REST APIs. These actions can result in the updated sections of the blob to not get updated in the cloud.
                  **Workaround**: Use tools such as robocopy, or regular file copy through Explorer or command line, to replace entire blobs.| +|**3.**|Throttling|During throttling, if new writes to the device aren't allowed, writes by the NFS client fail with a "Permission Denied" error.| The error will show as below:
                  `hcsuser@ubuntu-vm:~/nfstest$ mkdir test`
                  mkdir: can't create directory 'test': Permission denied​| +|**4.**|Blob Storage ingestion|When using AzCopy version 10 for Blob storage ingestion, run AzCopy with the following argument: `Azcopy --cap-mbps 2000`| If these limits aren't provided for AzCopy, it could potentially send a large number of requests to the device, resulting in issues with the service.| +|**5.**|Tiered storage accounts|The following apply when using tiered storage accounts:
                  - Only block blobs are supported. Page blobs aren't supported.
                  - There's no snapshot or copy API support.
                  - Hadoop workload ingestion through `distcp` isn't supported as it uses the copy operation heavily.|| +|**6.**|NFS share connection|If multiple processes are copying to the same share, and the `nolock` attribute isn't used, you may see errors during the copy.​|The `nolock` attribute must be passed to the mount command to copy files to the NFS share. For example: `C:\Users\aseuser mount -o anon \\10.1.1.211\mnt\vms Z:`.| +|**7.**|Kubernetes cluster|When applying an update on your device that is running a Kubernetes cluster, the Kubernetes virtual machines will restart and reboot. In this instance, only pods that are deployed with replicas specified are automatically restored after an update. |If you have created individual pods outside a replication controller without specifying a replica set, these pods won't be restored automatically after the device update. You'll need to restore these pods.
                  A replica set replaces pods that are deleted or terminated for any reason, such as node failure or disruptive node upgrade. For this reason, we recommend that you use a replica set even if your application requires only a single pod.| +|**8.**|Kubernetes cluster|Kubernetes on Azure Stack Edge Pro is supported only with Helm v3 or later. For more information, go to [Frequently asked questions: Removal of Tiller](https://v3.helm.sh/docs/faq/).| +|**9.**|Kubernetes |Port 31000 is reserved for Kubernetes Dashboard. Port 31001 is reserved for Edge container registry. Similarly, in the default configuration, the IP addresses 172.28.0.1 and 172.28.0.10, are reserved for Kubernetes service and Core DNS service respectively.|Don't use reserved IPs.| +|**10.**|Kubernetes |Kubernetes doesn't currently allow multi-protocol LoadBalancer services. For example, a DNS service that would have to listen on both TCP and UDP. |To work around this limitation of Kubernetes with MetalLB, two services (one for TCP, one for UDP) can be created on the same pod selector. These services use the same sharing key and spec.loadBalancerIP to share the same IP address. IPs can also be shared if you have more services than available IP addresses.
                  For more information, see [IP address sharing](https://metallb.universe.tf/usage/#ip-address-sharing).| +|**11.**|Kubernetes cluster|Existing Azure IoT Edge marketplace modules may require modifications to run on IoT Edge on Azure Stack Edge device.|For more information, see [Run existing IoT Edge modules from Azure Stack Edge Pro FPGA devices on Azure Stack Edge Pro GPU device](azure-stack-edge-gpu-modify-fpga-modules-gpu.md).| +|**12.**|Kubernetes |File-based bind mounts aren't supported with Azure IoT Edge on Kubernetes on Azure Stack Edge device.|IoT Edge uses a translation layer to translate `ContainerCreate` options to Kubernetes constructs. Creating `Binds` maps to `hostpath` directory and thus file-based bind mounts can't be bound to paths in IoT Edge containers. If possible, map the parent directory.| +|**13.**|Kubernetes |If you bring your own certificates for IoT Edge and add those certificates on your Azure Stack Edge device after the compute is configured on the device, the new certificates aren't picked up.|To work around this problem, you should upload the certificates before you configure compute on the device. If the compute is already configured, [Connect to the PowerShell interface of the device and run IoT Edge commands](azure-stack-edge-gpu-connect-powershell-interface.md#use-iotedge-commands). Restart `iotedged` and `edgehub` pods.| +|**14.**|Certificates |In certain instances, certificate state in the local UI may take several seconds to update. |The following scenarios in the local UI may be affected.
                  - **Status** column in **Certificates** page.
                  - **Security** tile in **Get started** page.
                  - **Configuration** tile in **Overview** page. | +|**15.**|Certificates|Alerts related to signing chain certificates aren't removed from the portal even after uploading new signing chain certificates.| | +|**16.**|Web proxy |NTLM authentication-based web proxy isn't supported. || +|**17.**|Internet Explorer|If enhanced security features are enabled, you may not be able to access local web UI pages. | Disable enhanced security, and restart your browser.| +|**18.**|Kubernetes |Kubernetes doesn't support ":" in environment variable names that are used by .NET applications. This is also required for Event Grid IoT Edge module to function on Azure Stack Edge device and other applications. For more information, see [ASP.NET core documentation](/aspnet/core/fundamentals/configuration/?tabs=basicconfiguration#environment-variables).|Replace ":" by double underscore. For more information,see [Kubernetes issue](https://github.com/kubernetes/kubernetes/issues/53201)| +|**19.** |Azure Arc + Kubernetes cluster |By default, when resource `yamls` are deleted from the Git repository, the corresponding resources aren't deleted from the Kubernetes cluster. |To allow the deletion of resources when they're deleted from the git repository, set `--sync-garbage-collection` in Arc OperatorParams. For more information, see [Delete a configuration](../azure-arc/kubernetes/tutorial-use-gitops-connected-cluster.md#additional-parameters). | +|**20.**|NFS |Applications that use NFS share mounts on your device to write data should use Exclusive write. That ensures the writes are written to the disk.| | +|**21.**|Compute configuration |Compute configuration fails in network configurations where gateways or switches or routers respond to Address Resolution Protocol (ARP) requests for systems that don't exist on the network.| | +|**22.**|Compute and Kubernetes |If Kubernetes is set up first on your device, it claims all the available GPUs. Hence, it isn't possible to create Azure Resource Manager VMs using GPUs after setting up the Kubernetes. |If your device has 2 GPUs, then you can create one VM that uses the GPU and then configure Kubernetes. In this case, Kubernetes will use the remaining available one GPU. | +|**23.**|Custom script VM extension |There's a known issue in the Windows VMs that were created in an earlier release and the device was updated to 2103.
                  If you add a custom script extension on these VMs, the Windows VM Guest Agent (Version 2.7.41491.901 only) gets stuck in the update causing the extension deployment to time out. | To work around this issue:
                  - Connect to the Windows VM using remote desktop protocol (RDP).
                  - Make sure that the `waappagent.exe` is running on the machine: `Get-Process WaAppAgent`.
                  - If the `waappagent.exe` isn't running, restart the `rdagent` service: `Get-Service RdAgent` \| `Restart-Service`. Wait for 5 minutes.
                  - While the `waappagent.exe` is running, kill the `WindowsAzureGuest.exe` process.
                  - After you kill the process, the process starts running again with the newer version.
                  - Verify that the Windows VM Guest Agent version is 2.7.41491.971 using this command: `Get-Process WindowsAzureGuestAgent` \| `fl ProductVersion`.
                  - [Set up custom script extension on Windows VM](azure-stack-edge-gpu-deploy-virtual-machine-custom-script-extension.md). | +|**24.**|Multi-Process Service (MPS) |When the device software and the Kubernetes cluster are updated, the MPS setting isn't retained for the workloads. |[Re-enable MPS](azure-stack-edge-gpu-connect-powershell-interface.md#connect-to-the-powershell-interface) and redeploy the workloads that were using MPS. | +|**25.**|Wi-Fi |Wi-Fi doesn't work on Azure Stack Edge Pro 2 in this release. | This functionality may be available in a future release. | + + +## Next steps + +- [Update your device](azure-stack-edge-gpu-install-update.md) diff --git a/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md b/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md index 82bf03299da02..5338ba1f7bbe6 100644 --- a/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md +++ b/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md @@ -458,7 +458,7 @@ Set the Azure Resource Manager environment and verify that your device to client ---- -------------------- ------------------------- AzASE https://management.myasegpu.wdshcsso.com/ https://login.myasegpu.wdshcsso.c... ``` - For more information, go to [Set-AzEnvironment](/powershell/module/azurerm.profile/set-azurermenvironment?view=azurermps-6.13.0&preserve-view=true). + For more information, go to [Set-AzEnvironment](/powershell/module/az.accounts/set-azenvironment?view=azps-7.5.0). - Define the environment inline for every cmdlet that you execute. This ensures that all the API calls are going through the correct environment. By default, the calls would go through the Azure public but you want these to go through the environment that you set for Azure Stack Edge device. diff --git a/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-image.md b/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-image.md index dada75a2d332e..8911b4ba01696 100644 --- a/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-image.md +++ b/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-image.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 07/16/2021 +ms.date: 05/19/2022 ms.author: alkohli #Customer intent: As an IT admin, I need to understand how to create Azure VM images that I can use to deploy virtual machines on my Azure Stack Edge Pro GPU device. --- @@ -39,9 +39,9 @@ Do the following steps to create a Windows VM image: 1. Create a Windows virtual machine in Azure. For portal instructions, see [Create a Windows virtual machine in the Azure portal](../virtual-machines/windows/quick-create-portal.md). For PowerShell instructions, see [Tutorial: Create and manage Windows VMs with Azure PowerShell](../virtual-machines/windows/tutorial-manage-vm.md). - The virtual machine must be a Generation 1 VM. The OS disk that you use to create your VM image must be a fixed-size VHD of any size that Azure supports. For VM size options, see [Supported VM sizes](azure-stack-edge-gpu-virtual-machine-sizes.md#supported-vm-sizes). + The virtual machine can be a Generation 1 or Generation 2 VM. The OS disk that you use to create your VM image must be a fixed-size VHD of any size that Azure supports. For VM size options, see [Supported VM sizes](azure-stack-edge-gpu-virtual-machine-sizes.md#supported-vm-sizes). - You can use any Windows Gen1 VM with a fixed-size VHD in Azure Marketplace. For a list Azure Marketplace images that could work, see [Commonly used Azure Marketplace images for Azure Stack Edge](azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md#commonly-used-marketplace-images). + You can use any Windows Gen1 or Gen2 VM with a fixed-size VHD in Azure Marketplace. For a list Azure Marketplace images that could work, see [Commonly used Azure Marketplace images for Azure Stack Edge](azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md#commonly-used-marketplace-images). 2. Generalize the virtual machine. To generalize the VM, [connect to the virtual machine](azure-stack-edge-gpu-deploy-virtual-machine-powershell.md#connect-to-a-windows-vm), open a command prompt, and run the following `sysprep` command: @@ -59,7 +59,7 @@ Do the following steps to create a Linux VM image: 1. Create a Linux virtual machine in Azure. For portal instructions, see [Quickstart: Create a Linux VM in the Azure portal](../virtual-machines/linux/quick-create-portal.md). For PowerShell instructions, see [Quickstart: Create a Linux VM in Azure with PowerShell](../virtual-machines/linux/quick-create-powershell.md). - You can use any Gen1 VM with a fixed-size VHD in Azure Marketplace to create Linux custom images, with the exception of Red Hat Enterprise Linux (RHEL) images, which require extra steps. For a list of Azure Marketplace images that could work, see [Commonly used Azure Marketplace images for Azure Stack Edge](azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md#commonly-used-marketplace-images). For guidance on RHEL images, see [Using RHEL BYOS images](#using-rhel-byos-images), below. + You can use any Gen1 or Gen2 VM with a fixed-size VHD in Azure Marketplace to create Linux custom images. This excludes Red Hat Enterprise Linux (RHEL) images, which require extra steps and can only be used to create a Gen1 VM image. For a list of Azure Marketplace images that could work, see [Commonly used Azure Marketplace images for Azure Stack Edge](azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md#commonly-used-marketplace-images). For guidance on RHEL images, see [Using RHEL BYOS images](#using-rhel-byos-images), below. 1. Deprovision the VM. Use the Azure VM agent to delete machine-specific files and data. Use the `waagent` command with the `-deprovision+user` parameter on your source Linux VM. For more information, see [Understanding and using Azure Linux Agent](../virtual-machines/extensions/agent-linux.md). diff --git a/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md b/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md index 7968a8ae6ffb5..c2c72dde2005e 100644 --- a/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md +++ b/articles/databox-online/azure-stack-edge-gpu-create-virtual-machine-marketplace-image.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 06/14/2021 +ms.date: 05/24/2022 ms.author: alkohli #Customer intent: As an IT admin, I need to understand how to create and upload Azure VM images that I can use with my Azure Stack Edge Pro device so that I can deploy VMs on the device. --- @@ -130,9 +130,6 @@ PS /home/user> az vm image list --all --publisher "Canonical" --offer "UbuntuSer PS /home/user> ``` ->[!IMPORTANT] -> Use only the Gen 1 images. Any images specified as Gen 2 (usually the sku has a "-g2" suffix), do not work on Azure Stack Edge. - In this example, we will select Windows Server 2019 Datacenter Core, version 2019.0.20190410. We will identify this image by its Universal Resource Number (“URN”). :::image type="content" source="media/azure-stack-edge-create-virtual-machine-marketplace-image/marketplace-image-1.png" alt-text="List of marketplace images"::: diff --git a/articles/databox-online/azure-stack-edge-gpu-create-virtual-switch-powershell.md b/articles/databox-online/azure-stack-edge-gpu-create-virtual-switch-powershell.md index d521a8702c218..2323f0490f3ea 100644 --- a/articles/databox-online/azure-stack-edge-gpu-create-virtual-switch-powershell.md +++ b/articles/databox-online/azure-stack-edge-gpu-create-virtual-switch-powershell.md @@ -32,7 +32,7 @@ Before you begin, make sure that: The client machine should be running a [Supported OS](azure-stack-edge-gpu-system-requirements.md#supported-os-for-clients-connected-to-device). -- Use the local UI to enable compute on one of the physical network interfaces on your device as per the instructions in [Enable compute network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches-and-compute-ips) on your device. +- Use the local UI to enable compute on one of the physical network interfaces on your device as per the instructions in [Enable compute network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches) on your device. ## Connect to the PowerShell interface diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-configure-compute.md b/articles/databox-online/azure-stack-edge-gpu-deploy-configure-compute.md index 0b3994fd81acc..8f6df1434bbfc 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-configure-compute.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-configure-compute.md @@ -33,7 +33,7 @@ In this tutorial, you learn how to: Before you set up a compute role on your Azure Stack Edge Pro device: - Make sure that you've activated your Azure Stack Edge Pro device as described in [Activate Azure Stack Edge Pro](azure-stack-edge-gpu-deploy-activate.md). -- Make sure that you've followed the instructions in [Enable compute network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches-and-compute-ips) and: +- Make sure that you've followed the instructions in [Enable compute network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches) and: - Enabled a network interface for compute. - Assigned Kubernetes node IPs and Kubernetes external service IPs. diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md b/articles/databox-online/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md index 946319a2987c7..26357c90a6543 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 04/06/2022 +ms.date: 05/24/2022 ms.author: alkohli zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to connect and activate Azure Stack Edge Pro so I can use it to transfer data to Azure. @@ -86,7 +86,7 @@ Follow these steps to configure the network for your device. ![Screenshot of local web UI "Network" tile for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-1.png) - On your physical device, there are six network interfaces. PORT 1 and PORT 2 are 1-Gbps network interfaces. PORT 3, PORT 4, PORT 5, and PORT 6 are all 25-Gbps network interfaces that can also serve as 10-Gbps network interfaces. PORT 1 is automatically configured as a management-only port, and PORT 2 to PORT 6 are all data ports. For a new device, the **Network settings** page is as shown below. + On your physical device, there are six network interfaces. PORT 1 and PORT 2 are 1-Gbps network interfaces. PORT 3, PORT 4, PORT 5, and PORT 6 are all 25-Gbps network interfaces that can also serve as 10-Gbps network interfaces. PORT 1 is automatically configured as a management-only port, and PORT 2 to PORT 6 are all data ports. For a new device, the **Network** page is as shown below. ![Screenshot of local web UI "Network" page for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2a.png) @@ -100,6 +100,10 @@ Follow these steps to configure the network for your device. ![Screenshot of local web UI "Port 3 Network settings" for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-4.png) + - By default for all the ports, it is expected that you'll set an IP. If you decide not to set an IP for a network interface on your device, you can set the IP to **No** and then **Modify** the settings. + + ![Screenshot of local web UI "Port 2 Network settings" for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/set-ip-no.png) + As you configure the network settings, keep in mind: * Make sure that Port 5 and Port 6 are connected for Network Function Manager deployments. For more information, see [Tutorial: Deploy network functions on Azure Stack Edge (Preview)](../network-function-manager/deploy-functions.md). @@ -107,7 +111,7 @@ Follow these steps to configure the network for your device. * If DHCP isn't enabled, you can assign static IPs if needed. * You can configure your network interface as IPv4. * Serial number for any port corresponds to the node serial number. - + > [!NOTE] > If you need to connect to your device from an outside network, see [Enable device access from outside network](azure-stack-edge-gpu-manage-access-power-connectivity-mode.md#enable-device-access-from-outside-network) for additional network settings. @@ -123,12 +127,12 @@ Follow these steps to configure the network for your device. After you have configured and applied the network settings, select **Next: Advanced networking** to configure compute network. -## Configure virtual switches and compute IPs +## Configure virtual switches -Follow these steps to enable compute on a virtual switch and configure virtual networks. +Follow these steps to add or delete virtual switches and virtual networks. 1. In the local UI, go to **Advanced networking** page. -1. In the **Virtual switch** section, you'll assign compute intent to a virtual switch. Select **Add virtual switch** to create a new switch. +1. In the **Virtual switch** section, you'll add or delete virtual switches. Select **Add virtual switch** to create a new switch. ![Screenshot of "Advanced networking" page in local UI for one node with Add virtual switch selected.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-1.png) @@ -137,40 +141,17 @@ Follow these steps to enable compute on a virtual switch and configure virtual n 1. Provide a name for your virtual switch. 1. Choose the network interface on which the virtual switch should be created. 1. If deploying 5G workloads, set **Supports accelerated networking** to **Yes**. - 1. Select the intent to associate with this network interface as **compute**. Alternatively, the switch can be used for management traffic as well. You can't configure storage intent as storage traffic was already configured based on the network topology that you selected earlier. - - > [!TIP] - > Use *CTRL + Click* to select more than one intent for your virtual switch. - -1. Assign **Kubernetes node IPs**. These static IP addresses are for the Kubernetes VMs. - - For an *n*-node device, a contiguous range of a minimum of *n+1* IPv4 addresses (or more) are provided for the compute VM using the start and end IP addresses. For a 1-node device, provide a minimum of 2 contiguous IPv4 addresses. - - > [!IMPORTANT] - > - Kubernetes on Azure Stack Edge uses 172.27.0.0/16 subnet for pod and 172.28.0.0/16 subnet for service. Make sure that these are not in use in your network. If these subnets are already in use in your network, you can change these subnets by running the `Set-HcsKubeClusterNetworkInfo` cmdlet from the PowerShell interface of the device. For more information, see [Change Kubernetes pod and service subnets](azure-stack-edge-gpu-connect-powershell-interface.md#change-kubernetes-pod-and-service-subnets). - > - DHCP mode is not supported for Kubernetes node IPs. If you plan to deploy IoT Edge/Kubernetes, you must assign static Kubernetes IPs and then enable IoT role. This will ensure that static IPs are assigned to Kubernetes node VMs. - > - If your datacenter firewall is restricting or filtering traffic based on source IPs or MAC addresses, make sure that the compute IPs (Kubernetes node IPs) and MAC addresses are on the allowed list. The MAC addresses can be specified by running the `Set-HcsMacAddressPool` cmdlet on the PowerShell interface of the device. - -1. Assign **Kubernetes external service IPs**. These are also the load-balancing IP addresses. These contiguous IP addresses are for services that you want to expose outside of the Kubernetes cluster and you specify the static IP range depending on the number of services exposed. - - > [!IMPORTANT] - > We strongly recommend that you specify a minimum of 1 IP address for Azure Stack Edge Hub service to access compute modules. You can then optionally specify additional IP addresses for other services/IoT Edge modules (1 per service/module) that need to be accessed from outside the cluster. The service IP addresses can be updated later. - -1. Select **Apply**. - - ![Screenshot of "Advanced networking" page in local UI with fully configured Add virtual switch blade for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png) - -1. The configuration takes a couple minutes to apply and you may need to refresh the browser. You can see that the specified virtual switch is created and enabled for compute. + 1. Select **Apply**. You can see that the specified virtual switch is created. ![Screenshot of "Advanced networking" page with virtual switch added and enabled for compute in local UI for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-3.png) +1. You can create more than one switch by following the steps described earlier. +1. To delete a virtual switch, under the **Virtual switch** section, select **Delete virtual switch**. When a virtual switch is deleted, the associated virtual networks will also be deleted. -To delete a virtual switch, under the **Virtual switch** section, select **Delete virtual switch**. When a virtual switch is deleted, the associated virtual networks will also be deleted. +You can now create virtual networks and associate with the virtual switches you created. -> [!IMPORTANT] -> Only one virtual switch can be assigned for compute. -### Configure virtual network +## Configure virtual networks You can add or delete virtual networks associated with your virtual switches. To add a virtual switch, follow these steps: @@ -181,13 +162,44 @@ You can add or delete virtual networks associated with your virtual switches. To 1. Provide a **Name** for your virtual network. 1. Enter a **VLAN ID** as a unique number in 1-4094 range. The VLAN ID that you provide should be in your trunk configuration. For more information on trunk configuration for your switch, refer to the instructions from your physical switch manufacturer. 1. Specify the **Subnet mask** and **Gateway** for your virtual LAN network as per the physical network configuration. - 1. Select **Apply**. + 1. Select **Apply**. A virtual network is created on the specified virtual switch. ![Screenshot of how to add virtual network in "Advanced networking" page in local UI for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-virtual-network-one-node-1.png) -To delete a virtual network, under the **Virtual network** section, select **Delete virtual network**. +1. To delete a virtual network, under the **Virtual network** section, select **Delete virtual network** and select the virtual network you want to delete. + +1. Select **Next: Kubernetes >** to next configure your compute IPs for Kubernetes. + + +## Configure compute IPs + +Follow these steps to configure compute IPs for your Kubernetes workloads. + +1. In the local UI, go to the **Kubernetes** page. + +1. From the dropdown select a virtual switch that you will use for Kubernetes compute traffic. + +1. Assign **Kubernetes node IPs**. These static IP addresses are for the Kubernetes VMs. + + For an *n*-node device, a contiguous range of a minimum of *n+1* IPv4 addresses (or more) are provided for the compute VM using the start and end IP addresses. For a 1-node device, provide a minimum of 2 contiguous IPv4 addresses. + + > [!IMPORTANT] + > - Kubernetes on Azure Stack Edge uses 172.27.0.0/16 subnet for pod and 172.28.0.0/16 subnet for service. Make sure that these are not in use in your network. If these subnets are already in use in your network, you can change these subnets by running the `Set-HcsKubeClusterNetworkInfo` cmdlet from the PowerShell interface of the device. For more information, see [Change Kubernetes pod and service subnets](azure-stack-edge-gpu-connect-powershell-interface.md#change-kubernetes-pod-and-service-subnets). + > - DHCP mode is not supported for Kubernetes node IPs. If you plan to deploy IoT Edge/Kubernetes, you must assign static Kubernetes IPs and then enable IoT role. This will ensure that static IPs are assigned to Kubernetes node VMs. + > - If your datacenter firewall is restricting or filtering traffic based on source IPs or MAC addresses, make sure that the compute IPs (Kubernetes node IPs) and MAC addresses are on the allowed list. The MAC addresses can be specified by running the `Set-HcsMacAddressPool` cmdlet on the PowerShell interface of the device. + +1. Assign **Kubernetes external service IPs**. These are also the load-balancing IP addresses. These contiguous IP addresses are for services that you want to expose outside of the Kubernetes cluster and you specify the static IP range depending on the number of services exposed. + + > [!IMPORTANT] + > We strongly recommend that you specify a minimum of 1 IP address for Azure Stack Edge Hub service to access compute modules. You can then optionally specify additional IP addresses for other services/IoT Edge modules (1 per service/module) that need to be accessed from outside the cluster. The service IP addresses can be updated later. + +1. Select **Apply**. + + ![Screenshot of "Advanced networking" page in local UI with fully configured Add virtual switch blade for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png) -Select **Next: Web proxy** to configure web proxy. +1. The configuration takes a couple minutes to apply and you may need to refresh the browser. + +1. Select **Next: Web proxy** to configure web proxy. ::: zone-end @@ -227,6 +239,10 @@ To configure the network for a 2-node device, follow these steps on the first no ![Local web UI "Advanced networking" page for a new device 2](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-settings-1m.png) + By default for all the ports, it is expected that you'll set an IP. If you decide not to set an IP for a network interface on your device, you can set the IP to **No** and then **Modify** the settings. + + ![Screenshot of local web UI "Port 2 Network settings" for one node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/set-ip-no.png) + As you configure the network settings, keep in mind: * Make sure that Port 5 and Port 6 are connected for Network Function Manager deployments. For more information, see [Tutorial: Deploy network functions on Azure Stack Edge (Preview)](../network-function-manager/deploy-functions.md). @@ -412,58 +428,38 @@ For clients connecting via NFS protocol to the two-node device, follow these ste > [!NOTE] > Virtual IP settings are required. If you do not configure this IP, you will be blocked when configuring the **Device settings** in the next step. -### Configure virtual switches and compute IPs +### Configure virtual switches -After the cluster is formed and configured, you'll now create new virtual switches or assign intent to the existing virtual switches that are created based on the selected network topology. +After the cluster is formed and configured, you can now create new virtual switches. > [!IMPORTANT] > On a two-node cluster, compute should only be configured on a virtual switch. 1. In the local UI, go to **Advanced networking** page. -1. In the **Virtual switch** section, you'll assign compute intent to a virtual switch. You can select an existing virtual switch or select **Add virtual switch** to create a new switch. +1. In the **Virtual switch** section, add or delete virtual switches. Select **Add virtual switch** to create a new switch. ![Configure compute page in Advanced networking in local UI 1](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-1.png) 1. In the **Network settings** blade, if using a new switch, provide the following: - 1. Provide a name for your virtual switch. + 1. Provide a name for your virtual switch. 1. Choose the network interface on which the virtual switch should be created. 1. If deploying 5G workloads, set **Supports accelerated networking** to **Yes**. - 1. Select the intent to associate with this network interface as **compute**. Alternatively, the switch can be used for management traffic as well. You can't configure storage intent as storage traffic was already configured based on the network topology that you selected earlier. - - > [!TIP] - > Use *CTRL + Click* to select more than one intent for your virtual switch. - -1. Assign **Kubernetes node IPs**. These static IP addresses are for the Kubernetes VMs. - - For an *n*-node device, a contiguous range of a minimum of *n+1* IPv4 addresses (or more) are provided for the compute VM using the start and end IP addresses. For a 1-node device, provide a minimum of 2 contiguous IPv4 addresses. For a two-node cluster, provide a minimum of 3 contiguous IPv4 addresses. - - > [!IMPORTANT] - > - Kubernetes on Azure Stack Edge uses 172.27.0.0/16 subnet for pod and 172.28.0.0/16 subnet for service. Make sure that these are not in use in your network. If these subnets are already in use in your network, you can change these subnets by running the `Set-HcsKubeClusterNetworkInfo` cmdlet from the PowerShell interface of the device. For more information, see [Change Kubernetes pod and service subnets](azure-stack-edge-gpu-connect-powershell-interface.md#change-kubernetes-pod-and-service-subnets). - > - DHCP mode is not supported for Kubernetes node IPs. If you plan to deploy IoT Edge/Kubernetes, you must assign static Kubernetes IPs and then enable IoT role. This will ensure that static IPs are assigned to Kubernetes node VMs. - -1. Assign **Kubernetes external service IPs**. These are also the load-balancing IP addresses. These contiguous IP addresses are for services that you want to expose outside of the Kubernetes cluster and you specify the static IP range depending on the number of services exposed. - - > [!IMPORTANT] - > We strongly recommend that you specify a minimum of 1 IP address for Azure Stack Edge Hub service to access compute modules. You can then optionally specify additional IP addresses for other services/IoT Edge modules (1 per service/module) that need to be accessed from outside the cluster. The service IP addresses can be updated later. - -1. Select **Apply**. + 1. Select **Apply**. - ![Configure compute page in Advanced networking in local UI 2](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png) +1. The configuration will take a couple minutes to apply and once the virtual switch is created, the list of virtual switches updates to reflect the newly created switch. You can see that the specified virtual switch is created and enabled for compute. -1. The configuration takes a couple minutes to apply and you may need to refresh the browser. You can see that the specified virtual switch is created and enabled for compute. - ![Configure compute page in Advanced networking in local UI 3](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-3.png) +1. You can create more than one switch by following the steps described earlier. -To delete a virtual switch, under the **Virtual switch** section, select **Delete virtual switch**. When a virtual switch is deleted, the associated virtual networks will also be deleted. +1. To delete a virtual switch, under the **Virtual switch** section, select **Delete virtual switch**. When a virtual switch is deleted, the associated virtual networks will also be deleted. -> [!IMPORTANT] -> Only one virtual switch can be assigned for compute. +You can next create and associate virtual networks with your virtual switches. ### Configure virtual network -You can add or delete virtual networks associated with your virtual switches. To add a virtual switch, follow these steps: +You can add or delete virtual networks associated with your virtual switches. To add a virtual network, follow these steps: 1. In the local UI on the **Advanced networking** page, under the **Virtual network** section, select **Add virtual network**. 1. In the **Add virtual network** blade, input the following information: @@ -474,8 +470,39 @@ You can add or delete virtual networks associated with your virtual switches. To 1. Specify the **Subnet mask** and **Gateway** for your virtual LAN network as per the physical network configuration. 1. Select **Apply**. + ![UPDATE THIS screen - Screenshot of how to add virtual network in "Advanced networking" page in local UI for two node.](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-virtual-network-one-node-1.png) + +1. To delete a virtual network, under the **Virtual network** section, select **Delete virtual network** and select the virtual network you want to delete. + +Select **Next: Kubernetes >** to next configure your compute IPs for Kubernetes. + + + +## Configure compute IPs + +After the virtual switches are created, you can enable these switches for Kubernetes compute traffic. + +1. In the local UI, go to the **Kubernetes** page. +1. From the dropdown list, select the virtual switch you want to enable for Kubernetes compute traffic. +1. Assign **Kubernetes node IPs**. These static IP addresses are for the Kubernetes VMs. + + For an *n*-node device, a contiguous range of a minimum of *n+1* IPv4 addresses (or more) are provided for the compute VM using the start and end IP addresses. For a 1-node device, provide a minimum of 2 contiguous IPv4 addresses. For a two-node cluster, provide a minimum of 3 contiguous IPv4 addresses. + + > [!IMPORTANT] + > - Kubernetes on Azure Stack Edge uses 172.27.0.0/16 subnet for pod and 172.28.0.0/16 subnet for service. Make sure that these are not in use in your network. If these subnets are already in use in your network, you can change these subnets by running the `Set-HcsKubeClusterNetworkInfo` cmdlet from the PowerShell interface of the device. For more information, see [Change Kubernetes pod and service subnets](azure-stack-edge-gpu-connect-powershell-interface.md#change-kubernetes-pod-and-service-subnets). + > - DHCP mode is not supported for Kubernetes node IPs. If you plan to deploy IoT Edge/Kubernetes, you must assign static Kubernetes IPs and then enable IoT role. This will ensure that static IPs are assigned to Kubernetes node VMs. + +1. Assign **Kubernetes external service IPs**. These are also the load-balancing IP addresses. These contiguous IP addresses are for services that you want to expose outside of the Kubernetes cluster and you specify the static IP range depending on the number of services exposed. + + > [!IMPORTANT] + > We strongly recommend that you specify a minimum of 1 IP address for Azure Stack Edge Hub service to access compute modules. You can then optionally specify additional IP addresses for other services/IoT Edge modules (1 per service/module) that need to be accessed from outside the cluster. The service IP addresses can be updated later. + +1. Select **Apply**. + + ![Configure compute page in Advanced networking in local UI 2](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png) + +1. The configuration takes a couple minutes to apply and you may need to refresh the browser. -To delete a virtual network, under the **Virtual network** section, select **Delete virtual network**. ::: zone-end @@ -493,7 +520,7 @@ This is an optional configuration. Although web proxy configuration is optional, 2. To validate and apply the configured web proxy settings, select **Apply**. - ![Local web UI "Web proxy settings" page 2](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-web-proxy-1.png) + ![Local web UI "Web proxy settings" page 2](./media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-web-proxy-1.png). 1. After the settings are applied, select **Next: Device**. diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-gpu-virtual-machine.md b/articles/databox-online/azure-stack-edge-gpu-deploy-gpu-virtual-machine.md index e756a4bbf22c8..de55154f2f68a 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-gpu-virtual-machine.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-gpu-virtual-machine.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 08/03/2021 +ms.date: 05/26/2022 ms.author: alkohli #Customer intent: As an IT admin, I want the flexibility to deploy a single GPU virtual machine (VM) quickly in the portal or use templates to deploy and manage multiple GPU VMs efficiently on my Azure Stack Edge Pro GPU device. --- @@ -25,11 +25,13 @@ Use the Azure portal to quickly deploy a single GPU VM. You can install the GPU You can deploy a GPU VM via the portal or using Azure Resource Manager templates. -For a list of supported operating systems, drivers, and VM sizes for GPU VMs, see [What are GPU virtual machines?](azure-stack-edge-gpu-overview-gpu-virtual-machines.md). For deployment considerations, see [GPU VMs and Kubernetes](azure-stack-edge-gpu-overview-gpu-virtual-machines.md#gpu-vms-and-kubernetes). +For a list of supported operating systems, drivers, and VM sizes for GPU VMs, see [What are GPU virtual machines?](azure-stack-edge-gpu-overview-gpu-virtual-machines.md) For deployment considerations, see [GPU VMs and Kubernetes](azure-stack-edge-gpu-overview-gpu-virtual-machines.md#gpu-vms-and-kubernetes). > [!IMPORTANT] -> If your device will be running Kubernetes, do not configure Kubernetes before you deploy your GPU VMs. If you configure Kubernetes first, it claims all the available GPU resources, and GPU VM creation will fail. For Kubernetes deployment considerations on 1-GPU and 2-GPU devices, see [GPU VMs and Kubernetes](azure-stack-edge-gpu-overview-gpu-virtual-machines.md#gpu-vms-and-kubernetes). +> - Gen2 VMs are not supported for GPU. +> - If your device will be running Kubernetes, do not configure Kubernetes before you deploy your GPU VMs. If you configure Kubernetes first, it claims all the available GPU resources, and GPU VM creation will fail. For Kubernetes deployment considerations on 1-GPU and 2-GPU devices, see [GPU VMs and Kubernetes](azure-stack-edge-gpu-overview-gpu-virtual-machines.md#gpu-vms-and-kubernetes). +> - If you're running a Windows 2016 VHD, you must enable TLS 1.2 inside the VM before you install the GPU extension on 2205 and higher. For detailed steps, see [Troubleshoot GPU extension issues for GPU VMs on Azure Stack Edge Pro GPU](azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md#failure-to-install-gpu-extension-on-a-windows-2016-vhd). ### [Portal](#tab/portal) diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-install.md b/articles/databox-online/azure-stack-edge-gpu-deploy-install.md index 8ddf5f7126639..cab2c40a78f2c 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 11/11/2021 +ms.date: 05/17/2022 ms.author: alkohli zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Pro in datacenter so I can use it to transfer data to Azure. @@ -196,7 +196,7 @@ Before you start cabling your device, you need the following things: - At least one 1-GbE RJ-45 network cable to connect to the management interface. There are two 1-GbE network interfaces, one management and one data, on the device. - One 25/10-GbE SFP+ copper cable for each data network interface to be configured. At least one data network interface from among PORT 2, PORT 3, PORT 4, PORT 5, or PORT 6 needs to be connected to the Internet (with connectivity to Azure). - Access to two power distribution units (recommended). -- At least one 1-GbE network switch to connect a 1-GbE network interface to the Internet for data. The local web UI will not be accessible if the connected switch is not at least 1 GbE. If using 25/10-GbE interface for data, you will need a 25-GbE or 10-GbE switch. +- At least one 1-GbE network switch to connect a 1-GbE network interface to the Internet for data. The local web UI won't be accessible if the connected switch isn't at least 1 GbE. If using 25/10-GbE interface for data, you'll need a 25-GbE or 10-GbE switch. > [!NOTE] > - If you are connecting only one data network interface, we recommend that you use a 25/10-GbE network interface such as PORT 3, PORT 4, PORT 5, or PORT 6 to send data to Azure. @@ -210,10 +210,10 @@ Before you start cabling your device, you need the following things: Before you start cabling your device, you need the following things: - Both of your Azure Stack Edge physical devices, unpacked, and rack mounted. -- 4 power cables, 2 for each device node. +- Four power cables, two for each device node. - At least two 1-GbE RJ-45 network cables to connect Port 1 on each device node for initial configuration. - At least two 1-GbE RJ-45 network cables to connect Port 2 on each device node to the internet (with connectivity to Azure). -- 25/10-GbE SFP+ copper cables for Port 3 and Port 4 to be configured. Additional 25/10-GbR SFP+ copper cables if you will also connect Port 5 and Port 6. Port 5 and Port 6 must be connected if you intend to [Deploy network functions on Azure Stack Edge](../network-function-manager/deploy-functions.md). +- 25/10-GbE SFP+ copper cables for Port 3 and Port 4 to be configured. Additional 25/10-GbR SFP+ copper cables if you'll also connect Port 5 and Port 6. Port 5 and Port 6 must be connected if you intend to [Deploy network functions on Azure Stack Edge](../network-function-manager/deploy-functions.md). - 25-GbE or 10-GbE switches if opting for a switched network topology. See [Supported network topologies](azure-stack-edge-gpu-clustering-overview.md). - Access to two power distribution units (recommended). @@ -253,7 +253,10 @@ The backplane of Azure Stack Edge device: For a full list of supported cables, switches, and transceivers for these network adapter cards, see: - [`Qlogic` Cavium 25G NDC adapter interoperability matrix](https://www.marvell.com/documents/xalflardzafh32cfvi0z/). -- 25 GbE and 10 GbE cables and modules in [Mellanox dual port 25G ConnectX-4 channel network adapter compatible products](https://docs.mellanox.com/display/ConnectX4LxFirmwarev14271016/Firmware+Compatible+Products). +- 25 GbE and 10 GbE cables and modules in [Mellanox dual port 25G ConnectX-4 channel network adapter compatible products](https://docs.mellanox.com/display/ConnectX4LxFirmwarev14271016/Firmware+Compatible+Products). + +> [!NOTE] +> Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. ### Power cabling @@ -350,7 +353,7 @@ Use this configuration when you need port level redundancy through teaming. #### Connect Port 3 via switch -Use this configuration if you need an extra port for workload traffic and port level redundancy is not required. +Use this configuration if you need an extra port for workload traffic and port level redundancy isn't required. ![Back plane of clustered device cabled for networking with switches and without NIC teaming](./media/azure-stack-edge-gpu-deploy-install/backplane-clustered-device-networking-switches-without-nic-teaming.png) diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md b/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md index 1c3a81620f32a..cb6deceaab0f2 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md @@ -7,30 +7,17 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 02/15/2022 +ms.date: 05/24/2022 ms.author: alkohli -zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to connect and activate Azure Stack Edge Pro so I can use it to transfer data to Azure. --- # Tutorial: Configure the device settings for Azure Stack Edge Pro GPU -::: zone pivot="single-node" - -This tutorial describes how to configure device related settings for your 1-node Azure Stack Edge Pro GPU device. You can set up your device name, update server, and time server via the local web UI. +This tutorial describes how to configure device related settings for your Azure Stack Edge Pro GPU device. You can set up your device name, update server, and time server via the local web UI. The device settings can take around 5-7 minutes to complete. -::: zone-end - -::: zone pivot="two-node" - -This tutorial describes how to configure device related settings for your 2-node Azure Stack Edge Pro GPU device. You can set up your device name, update server, and time server via the local web UI. - -The device settings can take around 5-7 minutes to complete. - -::: zone-end - In this tutorial, you learn about: > [!div class="checklist"] @@ -74,13 +61,6 @@ Follow these steps to configure device related settings: ![Local web UI "Device" page 3](./media/azure-stack-edge-gpu-deploy-set-up-device-update-time/device-4.png) -::: zone pivot="two-node" - -Repeat all the above steps for the second node of your device. Make sure that the same DNS domain is used for both the nodes. - -::: zone-end - - ## Configure update 1. On the **Update** page, you can now configure the location from where to download the updates for your device. @@ -99,13 +79,6 @@ Repeat all the above steps for the second node of your device. Make sure that th 1. Select **Apply**. 1. After the update server is configured, select **Next: Time**. -::: zone pivot="two-node" - -Repeat all the above steps for the second node of your device. Make sure that the same update server is used for both the nodes. - -::: zone-end - - ## Configure time Follow these steps to configure time settings on your device. @@ -131,12 +104,6 @@ NTP servers are required because your device must synchronize time so that it ca 1. After the settings are applied, select **Next: Certificates**. -::: zone pivot="two-node" - -Repeat all the above steps for the second node of your device. Make sure that the same NTP server is used for both the nodes. - -::: zone-end - ## Next steps In this tutorial, you learn about: diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-high-performance-network.md b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-high-performance-network.md index aa131f2b7c5ad..e47b53a73e3e8 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-high-performance-network.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-high-performance-network.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 09/29/2021 +ms.date: 05/19/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to configure compute on an Azure Stack Edge Pro GPU device so that I can use it to transform data before I send it to Azure. --- @@ -154,7 +154,7 @@ In addition to the above prerequisites that are used for VM creation, you'll als Follow these steps to create an HPN VM on your device. -1. In the Azure portal of your Azure Stack Edge resource, [Add a VM image](azure-stack-edge-gpu-deploy-virtual-machine-portal.md#add-a-vm-image). You'll use this VM image to create a VM in the next step. +1. In the Azure portal of your Azure Stack Edge resource, [Add a VM image](azure-stack-edge-gpu-deploy-virtual-machine-portal.md#add-a-vm-image). You'll use this VM image to create a VM in the next step. You can choose either Gen1 or Gen2 for the VM. 1. Follow all the steps in [Add a VM](azure-stack-edge-gpu-deploy-virtual-machine-portal.md#add-a-vm) with this configuration requirement. diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md index e2bc457171198..49c6d971bbd40 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 08/02/2021 +ms.date: 05/26/2022 ms.author: alkohli #Customer intent: As an IT admin, I need to understand how install GPU extension on GPU virtual machines (VMs) on my Azure Stack Edge Pro GPU device. --- @@ -19,8 +19,8 @@ ms.author: alkohli This article describes how to install GPU driver extension to install appropriate Nvidia drivers on the GPU VMs running on your Azure Stack Edge device. The article covers installation steps for installing a GPU extension using Azure Resource Manager templates on both Windows and Linux VMs. > [!NOTE] -> In the Azure portal, you can install a GPU extension during VM creation or after the VM is deployed. For steps and requirements, see [Deploy GPU virtual machines](azure-stack-edge-gpu-deploy-gpu-virtual-machine.md). - +> - In the Azure portal, you can install a GPU extension during VM creation or after the VM is deployed. For steps and requirements, see [Deploy GPU virtual machines](azure-stack-edge-gpu-deploy-gpu-virtual-machine.md). +> - If you're running a Windows 2016 VHD, you must enable TLS 1.2 inside the VM before you install the GPU extension on 2205 and higher. For detailed steps, see [Troubleshoot GPU extension issues for GPU VMs on Azure Stack Edge Pro GPU](azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md#failure-to-install-gpu-extension-on-a-windows-2016-vhd). ## Prerequisites @@ -30,21 +30,53 @@ Before you install GPU extension on the GPU VMs running on your device, make sur - Make sure that the port enabled for compute network on your device is connected to Internet and has access. The GPU drivers are downloaded through the internet access. - Here is an example where Port 2 was connected to the internet and was used to enable the compute network. If Kubernetes is not deployed on your environment, you can skip the Kubernetes node IP and external service IP assignment. + Here's an example where Port 2 was connected to the internet and was used to enable the compute network. If Kubernetes isn't deployed on your environment, you can skip the Kubernetes node IP and external service IP assignment. ![Screenshot of the Compute pane for an Azure Stack Edge device. Compute settings for Port 2 are highlighted.](media/azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension/enable-compute-network-1.png) 1. [Download the GPU extension templates and parameters files](https://aka.ms/ase-vm-templates) to your client machine. Unzip it into a directory you’ll use as a working directory. -1. Verify that the client you'll use to access your device is still connected to the Azure Resource Manager over Azure PowerShell. The connection to Azure Resource Manager expires every 1.5 hours or if your Azure Stack Edge device restarts. If this happens, any cmdlets that you execute, will return error messages to the effect that you are not connected to Azure anymore. You will need to sign in again. For detailed instructions, see [Connect to Azure Resource Manager on your Azure Stack Edge device](azure-stack-edge-gpu-connect-resource-manager.md). +1. Verify that the client you'll use to access your device is still connected to the Azure Resource Manager over Azure PowerShell. The connection to Azure Resource Manager expires every 1.5 hours or if your Azure Stack Edge device restarts. If this happens, any cmdlets that you execute will return error messages to the effect that you aren't connected to Azure anymore. You'll need to sign in again. For detailed instructions, see [Connect to Azure Resource Manager on your Azure Stack Edge device](azure-stack-edge-gpu-connect-resource-manager.md). ## Edit parameters file Depending on the operating system for your VM, you could install GPU extension for Windows or for Linux. - ### [Windows](#tab/windows) To deploy Nvidia GPU drivers for an existing VM, edit the `addGPUExtWindowsVM.parameters.json` parameters file and then deploy the template `addGPUextensiontoVM.json`. +#### Version 2205 and higher + +The file `addGPUExtWindowsVM.parameters.json` takes the following parameters: + +```json +"parameters": { + "vmName": { + "value": "" + }, + "extensionName": { + "value": "" + }, + "publisher": { + "value": "Microsoft.HpcCompute" + }, + "type": { + "value": "NvidiaGpuDriverWindows" + }, + "typeHandlerVersion": { + "value": "1.5" + }, + "settings": { + "value": { + "DriverURL" : "http://us.download.nvidia.com/tesla/511.65/511.65-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe", + "DriverCertificateUrl" : "https://go.microsoft.com/fwlink/?linkid=871664", + "DriverType":"CUDA" + } + } + } +``` + +#### Versions lower than 2205 + The file `addGPUExtWindowsVM.parameters.json` takes the following parameters: ```json @@ -76,7 +108,36 @@ The file `addGPUExtWindowsVM.parameters.json` takes the following parameters: ### [Linux](#tab/linux) -To deploy Nvidia GPU drivers for an existing Linux VM, edit the parameters file and then deploy the template `addGPUextensiontoVM.json`. +To deploy Nvidia GPU drivers for an existing Linux VM, edit the `addGPUExtWindowsVM.parameters.json` parameters file and then deploy the template `addGPUextensiontoVM.json`. + +#### Version 2205 and higher + +If using Ubuntu or Red Hat Enterprise Linux (RHEL), the `addGPUExtLinuxVM.parameters.json` file takes the following parameters: + +```powershell +"parameters": { + "vmName": { + "value": "" + }, + "extensionName": { + "value": "" + }, + "publisher": { + "value": "Microsoft.HpcCompute" + }, + "type": { + "value": "NvidiaGpuDriverLinux" + }, + "typeHandlerVersion": { + "value": "1.8" + }, + "settings": { + } + } + } +``` + +#### Versions lower than 2205 If using Ubuntu or Red Hat Enterprise Linux (RHEL), the `addGPUExtLinuxVM.parameters.json` file takes the following parameters: @@ -103,7 +164,7 @@ If using Ubuntu or Red Hat Enterprise Linux (RHEL), the `addGPUExtLinuxVM.parame } ``` -Here is a sample Ubuntu parameter file that was used in this article: +Here's a sample Ubuntu parameter file that was used in this article: ```powershell { @@ -136,7 +197,7 @@ Here is a sample Ubuntu parameter file that was used in this article: If you created your VM using a Red Hat Enterprise Linux Bring Your Own Subscription image (RHEL BYOS), make sure that: - You've followed the steps in [using RHEL BYOS image](azure-stack-edge-gpu-create-virtual-machine-image.md). -- After you created the GPU VM, register and subscribe the VM with the Red Hat Customer portal. If your VM is not properly registered, installation does not proceed as the VM is not entitled. See [Register and automatically subscribe in one step using the Red Hat Subscription Manager](https://access.redhat.com/solutions/253273). This step allows the installation script to download relevant packages for the GPU driver. +- After you created the GPU VM, register and subscribe the VM with the Red Hat Customer portal. If your VM isn't properly registered, installation doesn't proceed as the VM isn't entitled. See [Register and automatically subscribe in one step using the Red Hat Subscription Manager](https://access.redhat.com/solutions/253273). This step allows the installation script to download relevant packages for the GPU driver. - You either manually install the `vulkan-filesystem` package or add CentOS7 repo to your yum repo list. When you install the GPU extension, the installation script looks for a `vulkan-filesystem` package that is on CentOS7 repo (for RHEL7). --- @@ -145,34 +206,36 @@ If you created your VM using a Red Hat Enterprise Linux Bring Your Own Subscript ### [Windows](#tab/windows) -Deploy the template `addGPUextensiontoVM.json`. This template deploys extension to an existing VM. Run the following command: +Deploy the template `addGPUextensiontoVM.json` to install the extension on an existing VM. + +Run the following command: ```powershell $templateFile = "" $templateParameterFile = "" -$RGName = "" +RGName = "" New-AzureRmResourceGroupDeployment -ResourceGroupName $RGName -TemplateFile $templateFile -TemplateParameterFile $templateParameterFile -Name "" ``` > [!NOTE] > The extension deployment is a long running job and takes about 10 minutes to complete. -Here is a sample output: - -```powershell -PS C:\WINDOWS\system32> "C:\12-09-2020\ExtensionTemplates\addGPUextensiontoVM.json" -C:\12-09-2020\ExtensionTemplates\addGPUextensiontoVM.json -PS C:\WINDOWS\system32> $templateFile = "C:\12-09-2020\ExtensionTemplates\addGPUextensiontoVM.json" -PS C:\WINDOWS\system32> $templateParameterFile = "C:\12-09-2020\ExtensionTemplates\addGPUExtWindowsVM.parameters.json" -PS C:\WINDOWS\system32> $RGName = "myasegpuvm1" -PS C:\WINDOWS\system32> New-AzureRmResourceGroupDeployment -ResourceGroupName $RGName -TemplateFile $templateFile -TemplateParameterFile $templateParameterFile -Name "deployment3" - -DeploymentName : deployment3 -ResourceGroupName : myasegpuvm1 -ProvisioningState : Succeeded -Timestamp : 12/16/2020 12:18:50 AM -Mode : Incremental -TemplateLink : -Parameters : +Here's a sample output: + + ```powershell + PS C:\WINDOWS\system32> "C:\12-09-2020\ExtensionTemplates\addGPUextensiontoVM.json" + C:\12-09-2020\ExtensionTemplates\addGPUextensiontoVM.json + PS C:\WINDOWS\system32> $templateFile = "C:\12-09-2020\ExtensionTemplates\addGPUextensiontoVM.json" + PS C:\WINDOWS\system32> $templateParameterFile = "C:\12-09-2020\ExtensionTemplates\addGPUExtWindowsVM.parameters.json" + PS C:\WINDOWS\system32> $RGName = "myasegpuvm1" + PS C:\WINDOWS\system32> New-AzureRmResourceGroupDeployment -ResourceGroupName $RGName -TemplateFile $templateFile -TemplateParameterFile $templateParameterFile -Name "deployment3" + + DeploymentName : deployment3 + ResourceGroupName : myasegpuvm1 + ProvisioningState : Succeeded + Timestamp : 12/16/2020 12:18:50 AM + Mode : Incremental + TemplateLink : + Parameters : Name Type Value =============== ========================= ========== vmName String VM2 @@ -186,14 +249,16 @@ Parameters : "DriverType": "CUDA" } -Outputs : -DeploymentDebugLogLevel : -PS C:\WINDOWS\system32> -``` + Outputs : + DeploymentDebugLogLevel : + PS C:\WINDOWS\system32> + ``` ### [Linux](#tab/linux) -Deploy the template `addGPUextensiontoVM.json`. This template deploys extension to an existing VM. Run the following command: +Deploy the template `addGPUextensiontoVM.json` to install the extension to an existing VM. + +Run the following command: ```powershell $templateFile = "Path to addGPUextensiontoVM.json" @@ -205,7 +270,7 @@ New-AzureRmResourceGroupDeployment -ResourceGroupName $RGName -TemplateFile $tem > [!NOTE] > The extension deployment is a long running job and takes about 10 minutes to complete. -Here is a sample output: +Here's a sample output: ```powershell Copyright (C) Microsoft Corporation. All rights reserved. @@ -242,18 +307,20 @@ Outputs : DeploymentDebugLogLevel : PS C:\WINDOWS\system32> ``` + --- ## Track deployment ### [Windows](#tab/windows) -To check the deployment state of extensions for a given VM, run the following command: +To check the deployment state of extensions for a given VM, open another PowerShell session (run as administrator), and then run the following command: ```powershell Get-AzureRmVMExtension -ResourceGroupName -VMName -Name ``` -Here is a sample output: + +Here's a sample output: ```powershell PS C:\WINDOWS\system32> Get-AzureRmVMExtension -ResourceGroupName myasegpuvm1 -VMName VM2 -Name windowsgpuext @@ -299,12 +366,13 @@ A successful install is indicated by a `message` as `Enable Extension` and `stat ### [Linux](#tab/linux) -Template deployment is a long running job. To check the deployment state of extensions for a given VM, open another PowerShell session (run as administrator). Run the following command: +To check the deployment state of extensions for a given VM, open another PowerShell session (run as administrator), and then run the following command: ```powershell Get-AzureRmVMExtension -ResourceGroupName myResourceGroup -VMName -Name ``` -Here is a sample output: + +Here's a sample output: ```powershell Copyright (C) Microsoft Corporation. All rights reserved. @@ -348,9 +416,17 @@ The extension execution output is logged to the following file: `/var/log/azure/ ### [Windows](#tab/windows) -Sign in to the VM and run the nvidia-smi command-line utility installed with the driver. The `nvidia-smi.exe` is located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe`. If you do not see the file, it's possible that the driver installation is still running in the background. Wait for 10 minutes and check again. +Sign in to the VM and run the nvidia-smi command-line utility installed with the driver. + +#### Version 2205 and higher + +The `nvidia-smi.exe` is located at `C:\Windows\System32\nvidia-smi.exe`. If you don't see the file, it's possible that the driver installation is still running in the background. Wait for 10 minutes and check again. -If the driver is installed, you see an output similar to the following sample: +#### Versions lower than 2205 + +The `nvidia-smi.exe` is located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe`. If you don't see the file, it's possible that the driver installation is still running in the background. Wait for 10 minutes and check again. + +If the driver is installed, you see an output similar to the following sample: ```powershell PS C:\Users\Administrator> cd "C:\Program Files\NVIDIA Corporation\NVSMI" @@ -398,7 +474,7 @@ Follow these steps to verify the driver installation: 1. Connect to the GPU VM. Follow the instructions in [Connect to a Linux VM](azure-stack-edge-gpu-deploy-virtual-machine-powershell.md#connect-to-a-linux-vm). - Here is a sample output: + Here's a sample output: ```powershell PS C:\WINDOWS\system32> ssh -l Administrator 10.57.50.60 @@ -440,7 +516,7 @@ Follow these steps to verify the driver installation: Administrator@VM1:~$ ``` -2. Run the nvidia-smi command-line utility installed with the driver. If the driver is successfully installed, you will be able to run the utility and see the following output: +2. Run the nvidia-smi command-line utility installed with the driver. If the driver is successfully installed, you'll be able to run the utility and see the following output: ```powershell Administrator@VM1:~$ nvidia-smi @@ -472,17 +548,15 @@ For more information, see [Nvidia GPU driver extension for Linux](../virtual-mac > [!NOTE] > After you finish installing the GPU driver and GPU extension, you no longer need to use a port with Internet access for compute. - --- - ## Remove GPU extension To remove the GPU extension, use the following command: `Remove-AzureRmVMExtension -ResourceGroupName -VMName -Name ` -Here is a sample output: +Here's a sample output: ```powershell PS C:\azure-stack-edge-deploy-vms> Remove-AzureRmVMExtension -ResourceGroupName rgl -VMName WindowsVM -Name windowsgpuext @@ -493,7 +567,6 @@ Requestld IsSuccessStatusCode StatusCode ReasonPhrase True OK OK ``` - ## Next steps Learn how to: diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-portal.md b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-portal.md index 2e93ebc04aae4..e8a38c9d2946b 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-portal.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-portal.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 04/11/2022 +ms.date: 05/25/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to configure compute on an Azure Stack Edge Pro GPU device so that I can use it to transform data before I send it to Azure. --- @@ -91,9 +91,10 @@ Follow these steps to create a VM on your Azure Stack Edge Pro GPU device. |Edge resource group |Select the resource group to add the image to. | |Save image as | The name for the VM image that you're creating from the VHD you uploaded to the storage account. | |OS type |Choose from Windows or Linux as the operating system of the VHD you'll use to create the VM image. | + |VM generation |Choose Gen 1 or Gen 2 as the generation of the image you'll use to create the VM. | - ![Screenshot showing the Add image page for a virtual machine, with the Add button highlighted.](media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-6.png) + ![Screenshot showing the Add image page for a virtual machine with the Add button highlighted.](media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-6.png) 1. The VHD is downloaded, and the VM image is created. Image creation takes several minutes to complete. You'll see a notification for the successful completion of the VM image. @@ -102,7 +103,7 @@ Follow these steps to create a VM on your Azure Stack Edge Pro GPU device. 1. After the VM image is successfully created, it's added to the list of images on the **Images** pane. - ![Screenshot that shows the Images pane in Virtual Machines view of an Azure Stack Edge device. The entry for a VM image is highlighted.](media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-9.png) + ![Screenshot that shows the Images pane in Virtual Machines view of an Azure Stack Edge device.](media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-9.png) The **Deployments** pane updates to indicate the status of the deployment. diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell-script.md b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell-script.md index 54b0c8c02c536..4afe78133e24e 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell-script.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell-script.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 03/08/2021 +ms.date: 05/24/2022 ms.author: alkohli #Customer intent: As an IT admin, I need to understand how to create and manage virtual machines (VMs) on my Azure Stack Edge Pro device using an Azure PowerShell script so that I can efficiently manage my VMs. --- @@ -189,7 +189,7 @@ Before you begin creating and managing a VM on your Azure Stack Edge Pro device Location : DBELocal Tags : - New-AzureRmImage -Image Microsoft.Azure.Commands.Compute.Automation.Models.PSImage -ImageName ig201221071831 -ResourceGroupName rg201221071831 + New-AzureRmImage -Image Microsoft.Azure.Commands.Compute.Automation.Models.PSImage -ImageName ig201221071831 -ResourceGroupName rg201221071831 -HyperVGeneration V1 ResourceGroupName : rg201221071831 SourceVirtualMachine : diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell.md b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell.md index 3dc2b6871e956..5b182b9d4b16e 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-powershell.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 04/18/2022 +ms.date: 05/24/2022 ms.author: alkohli ms.custom: devx-track-azurepowershell #Customer intent: As an IT admin, I need to understand how to create and manage virtual machines (VMs) on my Azure Stack Edge Pro device. I want to use APIs so that I can efficiently manage my VMs. @@ -437,11 +437,12 @@ You'll now create a VM image from the managed disk. $DiskSize = "" $OsType = "" $ImageName = "" + $hyperVGeneration = "" ``` 1. Create a VM image. The supported OS types are Linux and Windows. ```powershell - $imageConfig = New-AzImageConfig -Location DBELocal + $imageConfig = New-AzImageConfig -Location DBELocal -HyperVGeneration $hyperVGeneration $ManagedDiskId = (Get-AzDisk -Name $DiskName -ResourceGroupName $ResourceGroupName).Id Set-AzImageOsDisk -Image $imageConfig -OsType $OsType -OsState 'Generalized' -DiskSizeGB $DiskSize -ManagedDiskId $ManagedDiskId New-AzImage -Image $imageConfig -ImageName $ImageName -ResourceGroupName $ResourceGroupName diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-templates.md b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-templates.md index cf9700859c0a4..debf6c2a615e4 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-templates.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-virtual-machine-templates.md @@ -7,12 +7,12 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 04/22/2022 +ms.date: 05/25/2022 ms.author: alkohli #Customer intent: As an IT admin, I need to understand how to create and manage virtual machines (VMs) on my Azure Stack Edge Pro device using APIs so that I can efficiently manage my VMs. --- -# Deploy VMs on your Azure Stack Edge Pro GPU device via templates +# Deploy VMs on your Azure Stack Edge Pro GPU device via templates [!INCLUDE [applies-to-GPU-and-pro-r-and-mini-r-skus](../../includes/azure-stack-edge-applies-to-gpu-pro-r-mini-r-sku.md)] @@ -307,18 +307,26 @@ The file `CreateImage.parameters.json` takes the following parameters: "imageUri": { "value": "" }, + "hyperVGeneration": { + "type": "string", + "value": " + }, } ``` Edit the file `CreateImage.parameters.json` to include the following values for your Azure Stack Edge Pro device: -1. Provide the OS type corresponding to the VHD you'll upload. The OS type can be Windows or Linux. +1. Provide the OS type and Hyper V Generation corresponding to the VHD you'll upload. The OS type can be Windows or Linux and the VM Generation can be V1 or V2. ```json "parameters": { "osType": { "value": "Windows" - }, + }, + "hyperVGeneration": { + "value": "V2" + }, + } ``` 2. Change the image URI to the URI of the image you uploaded in the earlier step: @@ -343,12 +351,15 @@ Edit the file `CreateImage.parameters.json` to include the following values for "osType": { "value": "Linux" }, + "hyperVGeneration": { + "value": "V1" + }, "imageName": { "value": "myaselinuximg" }, "imageUri": { "value": "https://sa2.blob.myasegpuvm.wdshcsso.com/con1/ubuntu18.04waagent.vhd" - } + } } } ``` diff --git a/articles/databox-online/azure-stack-edge-gpu-faq-billing-model.yml b/articles/databox-online/azure-stack-edge-gpu-faq-billing-model.yml index 515f96131f2a8..6acf3847163a2 100644 --- a/articles/databox-online/azure-stack-edge-gpu-faq-billing-model.yml +++ b/articles/databox-online/azure-stack-edge-gpu-faq-billing-model.yml @@ -8,7 +8,7 @@ metadata: ms.service: databox ms.subservice: edge ms.topic: faq - ms.date: 01/21/2022 + ms.date: 06/01/2022 ms.author: alkohli title: "FAQ: Billing for Azure Stack Edge Pro GPU" @@ -33,7 +33,7 @@ sections: - question: | When does billing start and stop for my Azure Stack Edge device? answer: | - Billing starts 14 days after a device is marked as **Shipped**. Billing continues even if you're not using the device. + Billing starts when the devices are delivered to your location. Billing continues even if you're not using the device. Billing stops when you initiate return of the device. diff --git a/articles/databox-online/azure-stack-edge-gpu-install-update.md b/articles/databox-online/azure-stack-edge-gpu-install-update.md index c3cb441b21635..03915ad1f5420 100644 --- a/articles/databox-online/azure-stack-edge-gpu-install-update.md +++ b/articles/databox-online/azure-stack-edge-gpu-install-update.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 03/23/2022 +ms.date: 06/07/2022 ms.author: alkohli --- # Update your Azure Stack Edge Pro GPU @@ -20,21 +20,21 @@ The procedure described in this article was performed using a different version ## About latest update -The current update is Update 2203. This update installs two updates, the device update followed by Kubernetes updates. The associated versions for this update are: +The current update is Update 2205. This update installs two updates, the device update followed by Kubernetes updates. The associated versions for this update are: -- Device software version - **2.2.1902.4561** +- Device software version - **2.2.1983.5094** - Kubernetes server version - **v1.21.7** - IoT Edge version: **0.1.0-beta15** -- Azure Arc version: **1.5.3** -- GPU driver version: **470.57.02** -- CUDA version: **11.4** +- Azure Arc version: **1.6.6** +- GPU driver version: **510.47.03** +- CUDA version: **11.6** -For information on what's new in this update, go to [Release notes](azure-stack-edge-gpu-2203-release-notes.md). +For information on what's new in this update, go to [Release notes](azure-stack-edge-gpu-2205-release-notes.md). -**To apply 2203 update, your device must be running 2106 or later.** +**To apply 2205 update, your device must be running 2106 or later.** - If you are not running the minimal supported version, you'll see this error: *Update package cannot be installed as its dependencies are not met*. -- You can update to 2106 from an older version and then install 2203. +- You can update to 2106 from an older version and then install 2205. ### Updates for a single-node vs two-node @@ -193,7 +193,7 @@ Do the following steps to download the update from the Microsoft Update Catalog. -4. Select **Download**. There are two packages to download for the update. The first package will have two files for the device software updates (*SoftwareUpdatePackage.0.exe*, *SoftwareUpdatePackage.1.exe*) and the second package has two files for the Kubernetes updates (*Kubernetes_Package.0.exe*, *Kubernetes_Package.1.exe*), respectively. Download the packages to a folder on the local system. You can also copy the folder to a network share that is reachable from the device. +4. Select **Download**. There are two packages to download for the update. The first package will have two files for the device software updates (*SoftwareUpdatePackage.0.exe*, *SoftwareUpdatePackage.1.exe*) and the second package has three files for the Kubernetes updates (*Kubernetes_Package.0.exe*, *Kubernetes_Package.1.exe*, and *Kubernetes_Package.2.exe*), respectively. Download the packages to a folder on the local system. You can also copy the folder to a network share that is reachable from the device. ### Install the update or the hotfix @@ -228,7 +228,7 @@ This procedure takes around 20 minutes to complete. Perform the following steps 6. After the restart is complete, you are taken to the **Sign in** page. To verify that the device software has been updated, in the local web UI, go to **Maintenance** > **Software update**. For the current release, the displayed software version should be **Azure Stack Edge 2203**. -7. You will now update the Kubernetes software version. Select the remaining two Kubernetes files together (file with the *Kubernetes_Package.0.exe* and *Kubernetes_Package.1.exe* suffix) and repeat the above steps to apply update. +7. You will now update the Kubernetes software version. Select the remaining three Kubernetes files together (file with the *Kubernetes_Package.0.exe*, *Kubernetes_Package.1.exe*, and *Kubernetes_Package.2.exe* suffix) and repeat the above steps to apply update. ![Screenshot of files selected for the Kubernetes update.](./media/azure-stack-edge-gpu-install-update/local-ui-update-7.png) diff --git a/articles/databox-online/azure-stack-edge-gpu-prepare-windows-generalized-image-iso.md b/articles/databox-online/azure-stack-edge-gpu-prepare-windows-generalized-image-iso.md index 9194b7bc2df1d..06e603658dcb1 100644 --- a/articles/databox-online/azure-stack-edge-gpu-prepare-windows-generalized-image-iso.md +++ b/articles/databox-online/azure-stack-edge-gpu-prepare-windows-generalized-image-iso.md @@ -98,7 +98,7 @@ To create your new virtual machine, follow these steps: ![New Virtual Machine wizard, Specify Name and Location](./media/azure-stack-edge-gpu-prepare-windows-generalized-image-iso/vhd-from-iso-08.png) -4. Under **Specify Generation**, select **Generation 1**. Then select **Next >**. +4. Under **Specify Generation**, select **Generation 1** or **Generation 2**. Then select **Next >**. ![New Virtual Machine wizard, Choose the generation of virtual machine to create](./media/azure-stack-edge-gpu-prepare-windows-generalized-image-iso/vhd-from-iso-09.png) diff --git a/articles/databox-online/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image.md b/articles/databox-online/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image.md index fc4a0f76dd54f..d2c03e0582924 100644 --- a/articles/databox-online/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image.md +++ b/articles/databox-online/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 06/18/2021 +ms.date: 05/18/2022 ms.author: alkohli #Customer intent: As an IT admin, I need to understand how to create and upload Azure VM images that I can use to deploy virtual machines on my Azure Stack Edge Pro GPU device. --- @@ -108,7 +108,7 @@ You'll use this fixed-size VHD for all the subsequent steps in this article. ![Specify name and location for your VM](./media/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image/create-virtual-machine-2.png) -1. On the **Specify generation** page, choose **Generation 1** for the .vhd device image type, and then select **Next**. +1. On the **Specify generation** page, choose **Generation 1** or **Generation 2** for the .vhd device image type, and then select **Next**. ![Specify generation](./media/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image/create-virtual-machine-3.png) diff --git a/articles/databox-online/azure-stack-edge-gpu-quickstart.md b/articles/databox-online/azure-stack-edge-gpu-quickstart.md index ee74bddb58133..a11013beea12c 100644 --- a/articles/databox-online/azure-stack-edge-gpu-quickstart.md +++ b/articles/databox-online/azure-stack-edge-gpu-quickstart.md @@ -47,7 +47,7 @@ Before you deploy, make sure that following prerequisites are in place: 5. **Configure compute network**: Create a virtual switch by enabling a port on your device. Enter 2 free, contiguous static IPs for Kubernetes nodes in the same network that you created the switch. Provide at least 1 static IP for IoT Edge Hub service to access compute modules and 1 static IP for each extra service or container that you want to access from outside the Kubernetes cluster. - Kubernetes is required to deploy all containerized workloads. See more information on [Compute network settings](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches-and-compute-ips). + Kubernetes is required to deploy all containerized workloads. See more information on [Compute network settings](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches). 6. **Configure web proxy**: If you use web proxy in your environment, enter web proxy server IP in `http://:`. Set authentication to **None**. See more information on [Web proxy settings](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-web-proxy). diff --git a/articles/databox-online/azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md b/articles/databox-online/azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md index 01ee15cb50aea..0441b39cc0ad7 100644 --- a/articles/databox-online/azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md +++ b/articles/databox-online/azure-stack-edge-gpu-troubleshoot-virtual-machine-gpu-extension-installation.md @@ -7,7 +7,7 @@ author: v-dalc ms.service: databox ms.subservice: edge ms.topic: how-to -ms.date: 08/02/2021 +ms.date: 05/26/2022 ms.author: alkohli --- # Troubleshoot GPU extension issues for GPU VMs on Azure Stack Edge Pro GPU @@ -18,6 +18,89 @@ This article gives guidance for resolving the most common issues that cause inst For installation steps, see [Install GPU extension](./azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md?tabs=linux). +## In versions lower than 2205, Linux GPU extension installs old signing keys: signature and/or required key missing + +**Error description:** The Linux GPU extension installs old signing keys, preventing download of the required GPU driver. In this case, you'll see the following error in the syslog of the Linux VM: + + ```powershell + /var/log/syslog and /var/log/waagent.log + May  5 06:04:53 gpuvm12 kernel: [  833.601805] nvidia:module verification failed: signature and/or required key missing- tainting kernel + ``` +**Suggested solutions:** You have two options to mitigate this issue: + +- **Option 1:** Apply the Azure Stack Edge 2205 updates to your device. +- **Option 2:** After creating a GPU virtual machine of size in NCasT4_v3-series, manually install the new signing keys before installing the extension, then set required signing keys using steps in [Updating the CUDA Linux GPG Repository Key | NVIDIA Technical Blog](https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/). + + Here's an example that installs signing keys on an Ubuntu 1804 virtual machine: + + ```powershell + $ sudo apt-key adv --fetch- + keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + ``` + +## Failure to install GPU extension on a Windows 2016 VHD + +**Error description:** This is a known issue in versions lower than 2205. The GPU extension requires TLS 1.2. In this case, you may see the following error message: + + ```azurecli + Failed to download https://go.microsoft.com/fwlink/?linkid=871664 after 10 attempts. Exiting! + ``` + +Additional details: + +- Check the guest log for the associated error. To collect the guest logs, see [Collect guest logs for VMs on an Azure Stack Edge Pro GPU device](azure-stack-edge-gpu-collect-virtual-machine-guest-logs.md). +- On a Linux VM, look in `/var/log/waagent.log` or `/var/log/azure/nvidia-vmext-status`. +- On a Windows VM, find the error status in `C:\Packages\Plugins\Microsoft.HpcCompute.NvidiaGpuDriverWindows\1.3.0.0\Status`. +- Review the complete execution log in `C:\WindowsAzure\Logs\WaAppAgent.txt`. + +If the installation failed during the package download, that error indicates the VM couldn't access the public network to download the driver. + + +**Suggested solution:** Use the following steps to enable TLS 1.2 on a Windows 2016 VM, and then deploy the GPU extension. + +1. Run the following command inside the VM to enable TLS 1.2: + + ```powershell + sp hklm:\SOFTWARE\Microsoft\.NETFramework\v4.0.30319 SchUseStrongCrypto 1 + ``` + +1. Deploy the template `addGPUextensiontoVM.json` to install the extension on an existing VM. You can install the extension manually, or you can install the extension from the Azure portal. + + - To install the extension manually, see [Install GPU extension on VMs for your Azure Stack Edge Pro GPU device](azure-stack-edge-gpu-deploy-virtual-machine-install-gpu-extension.md) + - To install the template using the Azure portal, see [Deploy GPU VMs on your Azure Stack Edge Pro GPU device](azure-stack-edge-gpu-deploy-gpu-virtual-machine.md). + + > [!NOTE] + > The extension deployment is a long running job and takes about 10 minutes to complete. + +## Manually install the Nvidia driver on RHEL 7 + +**Error description:** When installing the GPU extension on an RHEL 7 VM, the installation may fail due to a certificate rotation issue and an incompatible driver version. + +**Suggested solution:** In this case, you have two options: + +- **Option 1:** Resolve the certificate rotation issue and then install an Nvidia driver lower than version 510. + + 1. To resolve the certificate rotation issue, run the following command: + + ```powershell + $ sudo yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/$arch/cuda-rhel7.repo + ``` + + 1. Install an Nvidia driver lower than version 510. + +- **Option 2:** Deploy the GPU extension. Use the following settings when deploying the ARM extension: + + ```powershell + settings": { + "isCustomInstall": true, + "InstallMethod": 0, + "DRIVER_URL": " https://developer.download.nvidia.com/compute/cuda/11.4.4/local_installers/cuda-repo-rhel7-11-4-local-11.4.4_470.82.01-1.x86_64.rpm", + "DKMS_URL" : " https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm", + "LIS_URL": " https://aka.ms/lis", + "LIS_RHEL_ver": "3.10.0-1062.9.1.el7" + } + ``` + ## VM size is not GPU VM size **Error description:** A GPU VM must be either Standard_NC4as_T4_v3 or Standard_NC8as_T4_v3 size. If any other VM size is used, the GPU extension will fail to be attached. diff --git a/articles/databox-online/azure-stack-edge-gpu-virtual-machine-overview.md b/articles/databox-online/azure-stack-edge-gpu-virtual-machine-overview.md index 93cb94ab4f9c1..7cd5a00da9be4 100644 --- a/articles/databox-online/azure-stack-edge-gpu-virtual-machine-overview.md +++ b/articles/databox-online/azure-stack-edge-gpu-virtual-machine-overview.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: conceptual -ms.date: 04/21/2022 +ms.date: 05/18/2022 ms.author: alkohli --- @@ -60,7 +60,7 @@ You can run a maximum of 24 VMs on your device. This is another factor to consid ### Operating system disks and images -On your device, you can only use Generation 1 VMs with a fixed virtual hard disk (VHD) format. VHDs are used to store the machine operating system (OS) and data. VHDs are also used for the images you use to install an OS. +On your device, you can use Generation 1 or Generation 2 VMs with a fixed virtual hard disk (VHD) format. VHDs are used to store the machine operating system (OS) and data. VHDs are also used for the images you use to install an OS. The images that you use to create VM images can be generalized or specialized. When creating images for your VMs, you must prepare the images. See the various ways to prepare and use VM images on your device: diff --git a/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md b/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md index e49f57906850b..528f624c4a61b 100644 --- a/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 03/22/2021 +ms.date: 05/17/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Mini R device in datacenter so I can use it to transfer data to Azure. --- @@ -150,6 +150,9 @@ Take the following steps to cable your device for power and network. - If connecting PORT 2, use the RJ-45 network cable. - For the 10-GbE network interfaces, use the SFP+ copper cables. + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. + ## Next steps In this tutorial, you learned about Azure Stack Edge topics such as how to: diff --git a/articles/databox-online/azure-stack-edge-operational-guidelines-faq.yml b/articles/databox-online/azure-stack-edge-operational-guidelines-faq.yml new file mode 100644 index 0000000000000..8da52176adc9d --- /dev/null +++ b/articles/databox-online/azure-stack-edge-operational-guidelines-faq.yml @@ -0,0 +1,73 @@ +### YamlMime:FAQ +metadata: + title: Azure Stack Edge operational guidelines FAQ + description: Contains frequently asked questions and answers about Azure Stack Edge operations. + services: databox + author: alkohli + + ms.service: databox + ms.topic: faq + ms.date: 05/26/2022 + ms.author: alkohli + +title: "Azure Stack Edge operations: frequently asked questions" +summary: | + Use the following guidelines to learn about operating the Azure Stack Edge Hardware-as-a-service offering. + +sections: + - name: Ignored + questions: + - question: | + Who is responsible for the initial delivery of the device to the customer location? + answer: | + Microsoft will deliver your device using the contact details and shipping address provided in the order. + + - question: | + Who is responsible for the ongoing operation of the device? + answer: | + The customer is responsible for the day-to-day operation of the device, including: + - Power, network, storage, and peripheral device operation. + - Software operations, like application deployment, Kubernetes cluster operations, clustering, and virtual machine management. + + The device will be located on the customer's premises during regular operation, while the subscription is active. + To create a support ticket with Microsoft, see the section below to [open a support ticket](#how-do-i-open-a-support-ticket-with-microsoft-). + + - question: | + What if the device is lost, damaged, or stolen while it's on-premises? + answer: | + If your device is lost, damaged, or stolen, you're responsible for promptly informing Microsoft and paying a fee. For more information, see the frequently asked questions on the [Azure Stack Edge pricing page](https://azure.microsoft.com/pricing/details/azure-stack/edge/). Once you submit a new order in the Azure portal, Microsoft may deliver a replacement device to you. + + - question: | + Who manages regular updates to enhance and improve the Azure Stack Edge platform? + answer: | + Microsoft releases periodic updates for firmware, BIOS, drivers, Kubernetes service, and other software-related updates. Software patches may be made available by Microsoft to address vulnerabilities and bug fixes, etc. When updates are available, you initiate installation at a time that's convenient for you. + For more information about updates for your device, see [Update your Azure Stack Edge Pro GPU](./azure-stack-edge-gpu-install-update.md?tabs=version-2106-and-later). + + - question: | + Who fixes software issues on my applications that run on Azure Stack Edge? + answer: | + You're responsible for fixing issues in the applications that you deploy on the Azure Stack Edge platform, even if you're consuming services like Kubernetes that are provided by the platform. To create a support ticket with Microsoft, see the section below to [open a support ticket](#how-do-i-open-a-support-ticket-with-microsoft-). + + - question: | + How do I replace an Azure Stack Edge device if there's a hardware failure? + answer: | + If Microsoft determines the device is faulty, Microsoft will arrange for replacement and delivery of devices. If you [Return your Azure Stack Edge device](azure-stack-edge-return-device.md?tabs=azure-edge-hardware-center), Microsoft will process the return of your device. + + - question: | + How do I open a support ticket with Microsoft? + answer: | + For issues that you can't address in-house, [open a support ticket](azure-stack-edge-contact-microsoft-support.md), and Microsoft will assess your questions. + + - For Azure Stack Edge and Azure Data Box Gateway issues like network interfaces, disk drives, or firmware, the ticket is assigned to Microsoft Support. + - If the device is faulty, damaged, or lost, and loss isn't the customer's fault, Microsoft may: + - Send a field support person to address the issue, or + - Replace the device. For more information, see the frequently asked questions section on the [Azure Stack Edge pricing page](https://azure.microsoft.com/pricing/details/azure-stack/edge/). + - If you have a software issue with your Kubernetes service that you can't fix yourself, or if you have a virtual machine management question that isn't already documented, the Microsoft support team may consult with you to triage the issue and debug it remotely. + - To address software issues in the Azure Stack Edge platform or services that run on it, Microsoft may work with you directly to provide a workaround or a fix available via a software update. + - For information about software updates for your device, see [Update your Azure Stack Edge Pro GPU](https://docs.microsoft.com/azure/databox-online/azure-stack-edge-gpu-install-update?tabs=version-2106-and-later). + +additionalContent: | + + ## Next steps + + - Learn about [troubleshooting Azure Stack Edge device issues](azure-stack-edge-gpu-troubleshoot.md). \ No newline at end of file diff --git a/articles/databox-online/azure-stack-edge-pro-2-deploy-configure-compute.md b/articles/databox-online/azure-stack-edge-pro-2-deploy-configure-compute.md index 4dd3c6594bd20..3f37fc77e0c95 100644 --- a/articles/databox-online/azure-stack-edge-pro-2-deploy-configure-compute.md +++ b/articles/databox-online/azure-stack-edge-pro-2-deploy-configure-compute.md @@ -32,7 +32,7 @@ In this tutorial, you learn how to: Before you set up a compute role on your Azure Stack Edge Pro device, make sure that: - You've activated your Azure Stack Edge Pro 2 device as described in [Activate Azure Stack Edge Pro 2](azure-stack-edge-pro-2-deploy-activate.md). -- Make sure that you've followed the instructions in [Enable compute network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches-and-compute-ips) and: +- Make sure that you've followed the instructions in [Enable compute network](azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy.md#configure-virtual-switches) and: - Enabled a network interface for compute. - Assigned Kubernetes node IPs and Kubernetes external service IPs. diff --git a/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md b/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md index e14d36576f08e..5a0e46aed69c7 100644 --- a/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 03/22/2022 +ms.date: 05/17/2022 ms.author: alkohli zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Pro 2 in datacenter so I can use it to transfer data to Azure. @@ -334,6 +334,9 @@ Follow these steps to cable your device for network: ![Back plane of a cabled device](./media/azure-stack-edge-pro-2-deploy-install/cabled-backplane-1.png) + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. + ::: zone-end ::: zone pivot="two-node" @@ -358,6 +361,8 @@ Cable your device as shown in the following diagram: 1. Connect Port 3 on one device directly (without a switch) to the Port 3 on the other device node. Use a QSFP28 passive direct attached cable (tested in-house) for the connection. 1. Connect Port 4 on one device directly (without a switch) to the Port 4 on the other device node. Use a QSFP28 passive direct attached cable (tested in-house) for the connection. + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. #### Using external switches diff --git a/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md b/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md index 9335c24c2b675..6fe5c99af1c88 100644 --- a/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 3/22/2022 +ms.date: 5/17/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Pro R in datacenter so I can use it to transfer data to Azure. --- @@ -128,6 +128,9 @@ Take the following steps to cable your device for power and network. - If connecting PORT 2, use the RJ-45 network cable. - For the 10/25-GbE network interfaces, use the SFP+ copper cables. + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. + ## Next steps In this tutorial, you learned about Azure Stack Edge Pro R topics such as how to: diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-cluster-witness-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-cluster-witness-1m.png index da22d529eed6f..ff7423dec9fdd 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-cluster-witness-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-cluster-witness-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-1m.png index ee166e9b60966..49f1827cdb3d6 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-2m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-2m.png index 1f8a9f6d80b7d..e9b07c2acd562 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-2m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/add-node-2m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-azure-consistent-services-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-azure-consistent-services-1m.png index 31c7ce3c920f2..64f9075b5b69f 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-azure-consistent-services-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-azure-consistent-services-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-1.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-1.png index c734c90d6d877..a9c3184c93bec 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-1.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-1.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png index e3081cb242e93..7ed9eb68996a5 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-2.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-3.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-3.png index 5b7389c80826b..0b708f28fb353 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-3.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-compute-network-3.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-file-system-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-file-system-1m.png index 76ae810ed46b5..6614910bebda0 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-file-system-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-file-system-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-interface-1.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-interface-1.png index 195071d6da390..cc4354f78b11c 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-interface-1.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-network-interface-1.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-web-proxy-1.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-web-proxy-1.png index ce507c6a451b8..e76acb493743e 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-web-proxy-1.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/configure-web-proxy-1.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-1.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-1.png index c4ce7c5f808c2..4b78d7123a76c 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-1.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-1.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2.png index 410d85ebf89f0..c32e92d21a36b 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2a.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2a.png index 2eacef9cee961..f87be94b94fae 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2a.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-2a.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-3.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-3.png index 238cca642de15..53394e13d8731 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-3.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-3.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-4.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-4.png index 806a755a025ec..a878555047d19 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-4.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-4.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-settings-updated-1.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-settings-updated-1.png index 0080f952e71be..b8c88e937f704 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-settings-updated-1.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/network-settings-updated-1.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-get-authentication-token-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-get-authentication-token-1m.png index c46f214425266..fe7b7b9fb8881 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-get-authentication-token-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-get-authentication-token-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-1m.png index 05e6abe85a35d..3bb825b151b75 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-2.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-2.png index 293525a82589f..6e798dddef68f 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-2.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-2.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-1m.png index d2eb214e9ff08..a6171cdd51a66 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-2.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-2.png index 2428010fe0687..13ce83dcc9857 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-2.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/select-network-topology-2.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/set-ip-no.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/set-ip-no.png new file mode 100644 index 0000000000000..cc23cae189d8a Binary files /dev/null and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/set-ip-no.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-prepare-node-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-prepare-node-1m.png index a797bfc1abfcb..42a287f27db15 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-prepare-node-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-prepare-node-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-single-node-1.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-single-node-1.png index 846f16c25148a..9ec9775523d09 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-single-node-1.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-single-node-1.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-two-node-1m.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-two-node-1m.png index a5baa50d2bc2f..154517c45e3f4 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-two-node-1m.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-configure-network-compute-web-proxy/setup-type-two-node-1m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-6.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-6.png index 67f466a9c5336..05be813284e03 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-6.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-6.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-9.png b/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-9.png index b207daa99e9fc..0ce724958a867 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-9.png and b/articles/databox-online/media/azure-stack-edge-gpu-deploy-virtual-machine-portal/add-virtual-machine-image-9.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-3-a.png b/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-3-a.png index 4ee1e849e5866..45f0277749e7f 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-3-a.png and b/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-3-a.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-7.png b/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-7.png index 91f55aea27d60..036557c8fe878 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-7.png and b/articles/databox-online/media/azure-stack-edge-gpu-install-update/local-ui-update-7.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-install-update/portal-update-16-m.png b/articles/databox-online/media/azure-stack-edge-gpu-install-update/portal-update-16-m.png index 7651c49d1fa9d..9b56f9911554d 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-install-update/portal-update-16-m.png and b/articles/databox-online/media/azure-stack-edge-gpu-install-update/portal-update-16-m.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-generalized-image-iso/vhd-from-iso-09.png b/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-generalized-image-iso/vhd-from-iso-09.png index 11e4cc6c189f5..6f443a8c738f2 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-generalized-image-iso/vhd-from-iso-09.png and b/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-generalized-image-iso/vhd-from-iso-09.png differ diff --git a/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image/create-virtual-machine-3.png b/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image/create-virtual-machine-3.png index a65737053d493..23e1e3cc43f02 100644 Binary files a/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image/create-virtual-machine-3.png and b/articles/databox-online/media/azure-stack-edge-gpu-prepare-windows-vhd-generalized-image/create-virtual-machine-3.png differ diff --git a/articles/databox/data-box-customer-managed-encryption-key-portal.md b/articles/databox/data-box-customer-managed-encryption-key-portal.md index 6fae8ea0d358d..586b9e2c08380 100644 --- a/articles/databox/data-box-customer-managed-encryption-key-portal.md +++ b/articles/databox/data-box-customer-managed-encryption-key-portal.md @@ -201,7 +201,7 @@ If you receive any errors related to your customer-managed key, use the followin | SsemUserErrorKeyVaultBadRequestException | Applied a customer-managed key, but key access has not been granted or has been revoked, or the key vault couldn't be accessed because a firewall is enabled. | Add the identity selected to your key vault to enable access to the customer-managed key. If the key vault has a firewall enabled, switch to a system-assigned identity and then add a customer-managed key. For more information, see how to [Enable the key](#enable-key). | | SsemUserErrorEncryptionKeyTypeNotSupported | The encryption key type isn't supported for the operation. | Enable a supported encryption type on the key - for example, RSA or RSA-HSM. For more information, see [Key types, algorithms, and operations](../key-vault/keys/about-keys-details.md). | | SsemUserErrorSoftDeleteAndPurgeProtectionNotEnabled | Key vault does not have soft delete or purge protection enabled. | Ensure that both soft delete and purge protection are enabled on the key vault. | -| SsemUserErrorInvalidKeyVaultUrl
                  (Command-line only) | An invalid key vault URI was used. | Get the correct key vault URI. To get the key vault URI, use [Get-AzKeyVault](/powershell/module/az.keyvault/get-azkeyvault?view=azps-7.1.0) in PowerShell. | +| SsemUserErrorInvalidKeyVaultUrl
                  (Command-line only) | An invalid key vault URI was used. | Get the correct key vault URI. To get the key vault URI, use [Get-AzKeyVault](/powershell/module/az.keyvault/get-azkeyvault?view=azps-7.1.0&preserve-view=true) in PowerShell. | | SsemUserErrorKeyVaultUrlWithInvalidScheme | Only HTTPS is supported for passing the key vault URI. | Pass the key vault URI over HTTPS. | | SsemUserErrorKeyVaultUrlInvalidHost | The key vault URI host is not an allowed host in the geographical region. | In the public cloud, the key vault URI should end with `vault.azure.net`. In the Azure Government cloud, the key vault URI should end with `vault.usgovcloudapi.net`. | | Generic error | Could not fetch the passkey. | This error is a generic error. Contact Microsoft Support to troubleshoot the error and determine the next steps.| diff --git a/articles/ddos-protection/alerts.md b/articles/ddos-protection/alerts.md index 84dfad8275941..6bda7fefe4aac 100644 --- a/articles/ddos-protection/alerts.md +++ b/articles/ddos-protection/alerts.md @@ -8,7 +8,7 @@ ms.service: ddos-protection ms.topic: article ms.tgt_pltfrm: na ms.workload: infrastructure-services -ms.date: 3/11/2022 +ms.date: 06/07/2022 ms.author: abell --- @@ -89,7 +89,7 @@ There are two specific alerts that you will see for any DDoS attack detection an - **DDoS Attack detected for Public IP**: This alert is generated when the DDoS protection service detects that one of your public IP addresses is the target of a DDoS attack. - **DDoS Attack mitigated for Public IP**: This alert is generated when an attack on the public IP address has been mitigated. -To view the alerts, open **Defender for Cloud** in the Azure portal. Under **Threat Protection**, select **Security alerts**. The following screenshot shows an example of the DDoS attack alerts. +To view the alerts, open **Defender for Cloud** in the Azure portal and select **Security alerts**. Under **Threat Protection**, select **Security alerts**. The following screenshot shows an example of the DDoS attack alerts. ![DDoS Alert in Microsoft Defender for Cloud](./media/manage-ddos-protection/ddos-alert-asc.png) diff --git a/articles/ddos-protection/ddos-protection-overview.md b/articles/ddos-protection/ddos-protection-overview.md index 3c473cc79a974..bb2fcf51cea77 100644 --- a/articles/ddos-protection/ddos-protection-overview.md +++ b/articles/ddos-protection/ddos-protection-overview.md @@ -8,7 +8,7 @@ ms.service: ddos-protection ms.topic: overview ms.tgt_pltfrm: na ms.workload: infrastructure-services -ms.date: 09/9/2020 +ms.date: 06/07/2022 ms.author: abell --- @@ -25,7 +25,7 @@ Azure DDoS Protection Standard, combined with application design best practices, - **Always-on traffic monitoring:** Your application traffic patterns are monitored 24 hours a day, 7 days a week, looking for indicators of DDoS attacks. DDoS Protection Standard instantly and automatically mitigates the attack, once it is detected. - **Adaptive tuning:** Intelligent traffic profiling learns your application's traffic over time, and selects and updates the profile that is the most suitable for your service. The profile adjusts as traffic changes over time. - **Multi-Layered protection:** When deployed with a web application firewall (WAF), DDoS Protection Standard protects both at the network layer (Layer 3 and 4, offered by Azure DDoS Protection Standard) and at the application layer (Layer 7, offered by a WAF). WAF offerings include Azure [Application Gateway WAF SKU](../web-application-firewall/ag/ag-overview.md?toc=%2fazure%2fvirtual-network%2ftoc.json) as well as third-party web application firewall offerings available in the [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps?page=1&search=web%20application%20firewall). -- **Extensive mitigation scale:** Over 60 different attack types can be mitigated, with global capacity, to protect against the largest known DDoS attacks. +- **Extensive mitigation scale:** all L3/L4 attack vectors can be mitigated, with global capacity, to protect against the largest known DDoS attacks. - **Attack analytics:** Get detailed reports in five-minute increments during an attack, and a complete summary after the attack ends. Stream mitigation flow logs to [Microsoft Sentinel](../sentinel/data-connectors-reference.md#azure-ddos-protection) or an offline security information and event management (SIEM) system for near real-time monitoring during an attack. - **Attack metrics:** Summarized metrics from each attack are accessible through Azure Monitor. - **Attack alerting:** Alerts can be configured at the start and stop of an attack, and over the attack's duration, using built-in attack metrics. Alerts integrate into your operational software like Microsoft Azure Monitor logs, Splunk, Azure Storage, Email, and the Azure portal. diff --git a/articles/ddos-protection/ddos-protection-partner-onboarding.md b/articles/ddos-protection/ddos-protection-partner-onboarding.md index ea1efc74debef..e906d6552f7d8 100644 --- a/articles/ddos-protection/ddos-protection-partner-onboarding.md +++ b/articles/ddos-protection/ddos-protection-partner-onboarding.md @@ -5,7 +5,7 @@ ms.service: ddos-protection documentationcenter: na author: AbdullahBell ms.topic: how-to -ms.date: 08/28/2020 +ms.date: 06/07/2022 ms.author: abell --- # Partnering with Azure DDoS Protection Standard @@ -88,4 +88,4 @@ The following steps are required for partners to configure integration with Azur View existing partner integrations: - [Barracuda WAF-as-a-service](https://www.barracuda.com/waf-as-a-service) -- [Azure Cloud WAF from Radware](https://www.radware.com/resources/microsoft-azure/) + diff --git a/articles/ddos-protection/manage-ddos-protection.md b/articles/ddos-protection/manage-ddos-protection.md index a8e87fd82dbee..5dab887ab85f4 100644 --- a/articles/ddos-protection/manage-ddos-protection.md +++ b/articles/ddos-protection/manage-ddos-protection.md @@ -3,7 +3,7 @@ title: Manage Azure DDoS Protection Standard using the Azure portal description: Learn how to use Azure DDoS Protection Standard to mitigate an attack. services: ddos-protection documentationcenter: na -author: AbdullahBell +author: amirdahan editor: '' tags: azure-resource-manager @@ -15,7 +15,7 @@ ms.workload: infrastructure-services ms.custom: fasttrack-edit ms.date: 05/04/2022 -ms.author: yitoh +ms.author: amirdahan --- diff --git a/articles/defender-for-cloud/TOC.yml b/articles/defender-for-cloud/TOC.yml index f17ae41955d1d..a9eddd3e1d3fa 100644 --- a/articles/defender-for-cloud/TOC.yml +++ b/articles/defender-for-cloud/TOC.yml @@ -97,6 +97,9 @@ - name: Reference list of alerts href: alerts-reference.md + - name: The Defender for Cloud multicloud solution + href: multicloud.yml + - name: How-to guides items: - name: Use the Overview dashboard @@ -116,10 +119,14 @@ href: custom-dashboards-azure-workbooks.md - name: Use security recommendations to improve security items: + - name: Create custom security initiatives and policies + href: custom-security-policies.md - name: Review your security recommendations href: review-security-recommendations.md - name: Remediate recommendations href: implement-security-recommendations.md + - name: Improve your security posture with recommendation governance + href: governance-rules.md - name: Prevent misconfigurations with Enforce/Deny displayName: recommendations href: prevent-misconfigurations.md @@ -131,8 +138,6 @@ - name: Exempt recommendations per resource, subscription, or management group displayName: disable href: exempt-resource.md - - name: Create custom security initiatives and policies - href: custom-security-policies.md - name: Protect your machines displayName: hybrid, arc, Azure Defender for machines items: @@ -232,6 +237,8 @@ - name: Overview of Defender for Containers displayName: kubernetes, aks, acr, registries, k8s, arc, hybrid, on-premises, azure arc, multicloud href: defender-for-containers-introduction.md + - name: How does Defender for Containers work? + href: defender-for-containers-architecture.md - name: Enable Defender for Containers displayName: kubernetes, aks, acr, registries, k8s, arc, hybrid, on-premises, azure arc, multicloud href: defender-for-containers-enable.md @@ -390,8 +397,32 @@ href: https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/bg-p/MicrosoftDefenderCloudBlog - name: Microsoft Defender for Cloud on Stack Overflow href: https://stackoverflow.com/search?q=microsoft+defender+for+cloud - - name: MDfCinTheField YouTube videos - href: https://www.youtube.com/hashtag/mdfcinthefield + - name: Microsoft Defender for Cloud in the field + items: + - name: New AWS connector + href: episode-one.md + - name: Integrate with Azure Purview + href: episode-two.md + - name: Microsoft Defender for Containers + href: episode-three.md + - name: Security posture management improvements + href: episode-four.md + - name: Microsoft Defender for Servers + href: episode-five.md + - name: Lessons learned from the field + href: episode-six.md + - name: New GCP connector + href: episode-seven.md + - name: Microsoft Defender for IoT + href: episode-eight.md + - name: Microsoft Defender for Containers in a multicloud environment + href: episode-nine.md + - name: Protecting containers in GCP with Defender for Containers + href: episode-ten.md + - name: Threat landscape for containers + href: episode-eleven.md + - name: Enhanced workload protection features in Defender for Servers + href: episode-twelve.md - name: Pricing href: https://azure.microsoft.com/pricing/details/azure-defender/ - name: Regional availability diff --git a/articles/defender-for-cloud/alerts-reference.md b/articles/defender-for-cloud/alerts-reference.md index eb320c2b0d719..6c202d011528d 100644 --- a/articles/defender-for-cloud/alerts-reference.md +++ b/articles/defender-for-cloud/alerts-reference.md @@ -311,72 +311,72 @@ Microsoft Defender for Containers provides security alerts on the cluster level |--|--|:-:|--| | **Attempt to create a new Linux namespace from a container detected (Preview)**
                  (K8S.NODE_NamespaceCreation) | Analysis of processes running within a container in Kubernetes cluster detected an attempt to create a new Linux namespace. While this behavior might be legitimate, it might indicate that an attacker tries to escape from the container to the node. Some CVE-2022-0185 exploitations use this technique. | PrivilegeEscalation | Medium | | **A file was downloaded and executed (Preview)**
                  (K8S.NODE_LinuxSuspiciousActivity) | Analysis of processes running within a container indicates that a file has been downloaded to the container, given execution privileges and then executed. | Execution | Medium | -| **A history file has been cleared (Preview)**
                  (K8S.NODE_HistoryFileCleared) | Analysis of processes running within a container indicates that the command history log file has been cleared. Attackers may do this to cover their tracks. The operation was performed by the specified user account. | DefenseEvasion | Medium | +| **A history file has been cleared (Preview)**
                  (K8S.NODE_HistoryFileCleared) | Analysis of processes running within a container or directly on a Kubernetes node, has detected that the command history log file has been cleared. Attackers may do this to cover their tracks. The operation was performed by the specified user account. | DefenseEvasion | Medium | | **Abnormal activity of managed identity associated with Kubernetes (Preview)**
                  (K8S_AbnormalMiAcitivty) | Analysis of Azure Resource Manager operations detected an abnormal behavior of a managed identity used by an AKS addon. The detected activity isn\'t consistent with the behavior of the associated addon. While this activity can be legitimate, such behavior might indicate that the identity was gained by an attacker, possibly from a compromised container in the Kubernetes cluster. | Lateral Movement | Medium | | **Abnormal Kubernetes service account operation detected**
                  (K8S_ServiceAccountRareOperation) | Kubernetes audit log analysis detected abnormal behavior by a service account in your Kubernetes cluster. The service account was used for an operation which isn't common for this service account. While this activity can be legitimate, such behavior might indicate that the service account is being used for malicious purposes. | Lateral Movement, Credential Access | Medium | -| **An uncommon connection attempt detected (Preview)**
                  (K8S.NODE_SuspectConnection) | Analysis of processes running within a container detected an uncommon connection attempt utilizing a socks protocol. This is very rare in normal operations, but a known technique for attackers attempting to bypass network-layer detections. | Execution, Exfiltration, Exploitation | Medium | +| **An uncommon connection attempt detected (Preview)**
                  (K8S.NODE_SuspectConnection) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an uncommon connection attempt utilizing a socks protocol. This is very rare in normal operations, but a known technique for attackers attempting to bypass network-layer detections. | Execution, Exfiltration, Exploitation | Medium | | **Anomalous pod deployment (Preview)**
                  (K8S_AnomalousPodDeployment) [2](#footnote2) | Kubernetes audit log analysis detected pod deployment which is anomalous based on previous pod deployment activity. This activity is considered an anomaly when taking into account how the different features seen in the deployment operation are in relations to one another. The features monitored include the container image registry used, the account performing the deployment, day of the week, how often this account performs pod deployments, user agent used in the operation, whether this is a namespace to which pod deployments often occur, and other features. Top contributing reasons for raising this alert as anomalous activity are detailed under the alert’s extended properties. | Execution | Medium | -| **Attempt to stop apt-daily-upgrade.timer service detected (Preview)**
                  (K8S.NODE_TimerServiceDisabled) | Analysis of host/device data detected an attempt to stop apt-daily-upgrade.timer service. Attackers have been observed stopping this service to download malicious files and grant execution privileges for their attacks. This activity can also happen if the service is updated through normal administrative actions. | DefenseEvasion | Informational | -| **Behavior similar to common Linux bots detected (Preview)**
                  (K8S.NODE_CommonBot) | Analysis of processes running within a container detected execution of a process normally associated with common Linux botnets. | Execution, Collection, Command And Control | Medium | -| **Behavior similar to Fairware ransomware detected (Preview)**
                  (K8S.NODE_FairwareMalware) | Analysis of processes running within a container detected the execution of rm -rf commands applied to suspicious locations. As rm -rf will recursively delete files, it is normally used on discrete folders. In this case, it is being used in a location that could remove a lot of data. Fairware ransomware is known to execute rm -rf commands in this folder. | Execution | Medium | +| **Attempt to stop apt-daily-upgrade.timer service detected (Preview)**
                  (K8S.NODE_TimerServiceDisabled) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an attempt to stop apt-daily-upgrade.timer service. Attackers have been observed stopping this service to download malicious files and grant execution privileges for their attacks. This activity can also happen if the service is updated through normal administrative actions. | DefenseEvasion | Informational | +| **Behavior similar to common Linux bots detected (Preview)**
                  (K8S.NODE_CommonBot) | Analysis of processes running within a container or directly on a Kubernetes node, has detected the execution of a process normally associated with common Linux botnets. | Execution, Collection, Command And Control | Medium | +| **Behavior similar to Fairware ransomware detected (Preview)**
                  (K8S.NODE_FairwareMalware) | Analysis of processes running within a container or directly on a Kubernetes node, has detected execution of rm -rf commands applied to suspicious locations. As rm -rf will recursively delete files, it is normally used on discrete folders. In this case, it is being used in a location that could remove a lot of data. Fairware ransomware is known to execute rm -rf commands in this folder. | Execution | Medium | | **Command within a container running with high privileges (Preview)**
                  (K8S.NODE_PrivilegedExecutionInContainer) | Machine logs indicate that a privileged command was run in a Docker container. A privileged command has extended privileges on the host machine. | PrivilegeEscalation | Low | -| **Container running in privileged mode (Preview)**
                  (K8S.NODE_PrivilegedContainerArtifacts) | Machine logs indicate that a privileged Docker container is running. A privileged container has full access to the host's resources. If compromised, an attacker can use the privileged container to gain access to the host machine. | PrivilegeEscalation, Execution | Low | +| **Container running in privileged mode (Preview)**
                  (K8S.NODE_PrivilegedContainerArtifacts) | Analysis of processes running within a container or directly on a Kubernetes node, has detected the execution of a Docker command that is running a privileged container. The privileged container has full access to the hosting pod or host resource. If compromised, an attacker may use the privileged container to gain access to the hosting pod or host. | PrivilegeEscalation, Execution | Low | | **Container with a sensitive volume mount detected**
                  (K8S_SensitiveMount) | Kubernetes audit log analysis detected a new container with a sensitive volume mount. The volume that was detected is a hostPath type which mounts a sensitive file or folder from the node to the container. If the container gets compromised, the attacker can use this mount for gaining access to the node. | Privilege Escalation | Medium | | **CoreDNS modification in Kubernetes detected**
                  (K8S_CoreDnsModification) [1](#footnote1) [3](#footnote3) | Kubernetes audit log analysis detected a modification of the CoreDNS configuration. The configuration of CoreDNS can be modified by overriding its configmap. While this activity can be legitimate, if attackers have permissions to modify the configmap, they can change the behavior of the cluster’s DNS server and poison it. | Lateral Movement | Low | | **Creation of admission webhook configuration detected**
                  (K8S_AdmissionController) [3](#footnote3) | Kubernetes audit log analysis detected a new admission webhook configuration. Kubernetes has two built-in generic admission controllers: MutatingAdmissionWebhook and ValidatingAdmissionWebhook. The behavior of these admission controllers is determined by an admission webhook that the user deploys to the cluster. The usage of such admission controllers can be legitimate, however attackers can use such webhooks for modifying the requests (in case of MutatingAdmissionWebhook) or inspecting the requests and gain sensitive information (in case of ValidatingAdmissionWebhook). | Credential Access, Persistence | Low | -| **Detected file download from a known malicious source (Preview)**
                  (K8S.NODE_SuspectDownload) | Analysis of processes running within a container detected download of a file from a source frequently used to distribute malware. | PrivilegeEscalation, Execution, Exfiltration, Command And Control | Medium | -| **Detected Persistence Attempt (Preview)**
                  (K8S.NODE_NewSingleUserModeStartupScript) | Analysis of processes running within a container detected installation of a startup script for single-user mode. It is extremely rare that any legitimate process needs to execute in that mode so it may indicate an attacker has added a malicious process to every run-level to guarantee persistence. | Persistence | Medium | -| **Detected suspicious file download (Preview)**
                  (K8S.NODE_SuspectDownloadArtifacts) | Analysis of processes running within a container detected suspicious download of a remote file. | Persistence | Low | -| **Detected suspicious use of the nohup command (Preview)**
                  (K8S.NODE_SuspectNohup) | Analysis of processes running within a container detected suspicious use of the nohup command. Attackers have been seen using the command nohup to run hidden files from a temporary directory to allow their executables to run in the background. It is rare to see this command run on hidden files located in a temporary directory. | Persistence, DefenseEvasion | Medium | -| **Detected suspicious use of the useradd command (Preview)**
                  (K8S.NODE_SuspectUserAddition) | Analysis of processes running within a container detected suspicious use of the useradd command. | Persistence | Medium | +| **Detected file download from a known malicious source (Preview)**
                  (K8S.NODE_SuspectDownload) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a download of a file from a source frequently used to distribute malware. | PrivilegeEscalation, Execution, Exfiltration, Command And Control | Medium | +| **Detected Persistence Attempt (Preview)**
                  (K8S.NODE_NewSingleUserModeStartupScript) | Analysis of processes running within a container or directly on a Kubernetes node, has detected the installation of a startup script for single-user mode. It is extremely rare that any legitimate process needs to execute in that mode so it may indicate an attacker has added a malicious process to every run-level to guarantee persistence. | Persistence | Medium | +| **Detected suspicious file download (Preview)**
                  (K8S.NODE_SuspectDownloadArtifacts) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious download of a remote file. | Persistence | Low | +| **Detected suspicious use of the nohup command (Preview)**
                  (K8S.NODE_SuspectNohup) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious use of the nohup command. Attackers have been seen using the command nohup to run hidden files from a temporary directory to allow their executables to run in the background. It is rare to see this command run on hidden files located in a temporary directory. | Persistence, DefenseEvasion | Medium | +| **Detected suspicious use of the useradd command (Preview)**
                  (K8S.NODE_SuspectUserAddition) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious use of the useradd command. | Persistence | Medium | | **Digital currency mining container detected**
                  (K8S_MaliciousContainerImage) [2](#footnote2) | Kubernetes audit log analysis detected a container that has an image associated with a digital currency mining tool. | Execution | High | -| **Digital currency mining related behavior detected (Preview)**
                  (K8S.NODE_DigitalCurrencyMining) | Analysis of host data detected the execution of a process or command normally associated with digital currency mining. | Execution | High | -| **Docker build operation detected on a Kubernetes node (Preview)**
                  (K8S.NODE_ImageBuildOnNode) | Analysis of processes running within a container indicates a build operation of a container image on a Kubernetes node. While this behavior might be legitimate, attackers might build their malicious images locally to avoid detection. | DefenseEvasion | Low | +| **Digital currency mining related behavior detected (Preview)**
                  (K8S.NODE_DigitalCurrencyMining) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an execution of a process or command normally associated with digital currency mining. | Execution | High | +| **Docker build operation detected on a Kubernetes node (Preview)**
                  (K8S.NODE_ImageBuildOnNode) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a build operation of a container image on a Kubernetes node. While this behavior might be legitimate, attackers might build their malicious images locally to avoid detection. | DefenseEvasion | Low | | **Excessive role permissions assigned in Kubernetes cluster (Preview)**
                  (K8S_ServiceAcountPermissionAnomaly) [3](#footnote3) | Analysis of the Kubernetes audit logs detected an excessive permissions role assignment to your cluster. The listed permissions for the assigned roles are uncommon to the specific service account. This detection considers previous role assignments to the same service account across clusters monitored by Azure, volume per permission, and the impact of the specific permission. The anomaly detection model used for this alert takes into account how this permission is used across all clusters monitored by Microsoft Defender for Cloud. | Privilege Escalation | Low | -| **Executable found running from a suspicious location (Preview)**
                  (K8S.NODE_SuspectExecutablePath) | Analysis of host data detected an executable file that is running from a location associated with known suspicious files. This executable could either be legitimate activity, or an indication of a compromised host. | Execution | Medium | -| **Execution of hidden file (Preview)**
                  (K8S.NODE_ExecuteHiddenFile) | Analysis of host data indicates that a hidden file was executed by the specified user account. | Persistence, DefenseEvasion | Informational | -| **Exposed Docker daemon on TCP socket (Preview)**
                  (K8S.NODE_ExposedDocker) | Machine logs indicate that your Docker daemon (dockerd) exposes a TCP socket. By default, Docker configuration, does not use encryption or authentication when a TCP socket is enabled. This enables full access to the Docker daemon, by anyone with access to the relevant port. | Execution, Exploitation | Medium | +| **Executable found running from a suspicious location (Preview)**
                  (K8S.NODE_SuspectExecutablePath) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an executable file that is running from a location associated with known suspicious files. This executable could either be legitimate activity, or an indication of a compromised system. | Execution | Medium | +| **Execution of hidden file (Preview)**
                  (K8S.NODE_ExecuteHiddenFile) | Analysis of processes running within a container or directly on a Kubernetes node, has detected that a hidden file was executed by the specified user account. | Persistence, DefenseEvasion | Informational | +| **Exposed Docker daemon on TCP socket (Preview)**
                  (K8S.NODE_ExposedDocker) | Analysis of processes running within a container or directly on a Kubernetes node, has detected that your Docker daemon (dockerd) exposes a TCP socket. By default, Docker configuration, does not use encryption or authentication when a TCP socket is enabled. This enables full access to the Docker daemon, by anyone with access to the relevant port. | Execution, Exploitation | Medium | | **Exposed Kubeflow dashboard detected**
                  (K8S_ExposedKubeflow) | The Kubernetes audit log analysis detected exposure of the Istio Ingress by a load balancer in a cluster that runs Kubeflow. This action might expose the Kubeflow dashboard to the internet. If the dashboard is exposed to the internet, attackers can access it and run malicious containers or code on the cluster. Find more details in the following article: https://aka.ms/exposedkubeflow-blog | Initial Access | Medium | | **Exposed Kubernetes dashboard detected**
                  (K8S_ExposedDashboard) | Kubernetes audit log analysis detected exposure of the Kubernetes Dashboard by a LoadBalancer service. Exposed dashboard allows an unauthenticated access to the cluster management and poses a security threat. | Initial Access | High | | **Exposed Kubernetes service detected**
                  (K8S_ExposedService) | The Kubernetes audit log analysis detected exposure of a service by a load balancer. This service is related to a sensitive application that allows high impact operations in the cluster such as running processes on the node or creating new containers. In some cases, this service doesn't require authentication. If the service doesn't require authentication, exposing it to the internet poses a security risk. | Initial Access | Medium | | **Exposed Redis service in AKS detected**
                  (K8S_ExposedRedis) | The Kubernetes audit log analysis detected exposure of a Redis service by a load balancer. If the service doesn't require authentication, exposing it to the internet poses a security risk. | Initial Access | Low | -| **Indicators associated with DDOS toolkit detected (Preview)**
                  (K8S.NODE_KnownLinuxDDoSToolkit) [2](#footnote2) | Analysis of processes running within a container detected file names that are part of a toolkit associated with malware capable of launching DDoS attacks, opening ports and services, and taking full control over the infected system. This could also possibly be legitimate activity. | Persistence, LateralMovement, Execution, Exploitation | Medium | +| **Indicators associated with DDOS toolkit detected (Preview)**
                  (K8S.NODE_KnownLinuxDDoSToolkit) [2](#footnote2) | Analysis of processes running within a container or directly on a Kubernetes node, has detected file names that are part of a toolkit associated with malware capable of launching DDoS attacks, opening ports and services, and taking full control over the infected system. This could also possibly be legitimate activity. | Persistence, LateralMovement, Execution, Exploitation | Medium | | **K8S API requests from proxy IP address detected**
                  (K8S_TI_Proxy) [3](#footnote3) | Kubernetes audit log analysis detected API requests to your cluster from an IP address that is associated with proxy services, such as TOR. While this behavior can be legitimate, it's often seen in malicious activities, when attackers try to hide their source IP. | Execution | Low | | **Kubernetes events deleted**
                  (K8S_DeleteEvents) [1](#footnote1) [3](#footnote3) | Defender for Cloud detected that some Kubernetes events have been deleted. Kubernetes events are objects in Kubernetes which contain information about changes in the cluster. Attackers might delete those events for hiding their operations in the cluster. | Defense Evasion | Low | | **Kubernetes penetration testing tool detected**
                  (K8S_PenTestToolsKubeHunter) | Kubernetes audit log analysis detected usage of Kubernetes penetration testing tool in the AKS cluster. While this behavior can be legitimate, attackers might use such public tools for malicious purposes. | Execution | Low | -| **Local host reconnaissance detected (Preview)**
                  (K8S.NODE_LinuxReconnaissance) | Analysis of processes running within a container detected the execution of a command normally associated with common Linux bot reconnaissance. | Discovery | Medium | -| **Manipulation of host firewall detected (Preview)**
                  (K8S.NODE_FirewallDisabled) | Analysis of processes running within a container detected possible manipulation of the on-host firewall. Attackers will often disable this to exfiltrate data. | DefenseEvasion, Exfiltration | Medium | +| **Local host reconnaissance detected (Preview)**
                  (K8S.NODE_LinuxReconnaissance) | Analysis of processes running within a container or directly on a Kubernetes node, has detected the execution of a command normally associated with common Linux bot reconnaissance. | Discovery | Medium | +| **Manipulation of host firewall detected (Preview)**
                  (K8S.NODE_FirewallDisabled) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a possible manipulation of the on-host firewall. Attackers will often disable this to exfiltrate data. | DefenseEvasion, Exfiltration | Medium | | **Microsoft Defender for Cloud test alert (not a threat). (Preview)**
                  (K8S.NODE_EICAR) | This is a test alert generated by Microsoft Defender for Cloud. No further action is needed. | Execution | High | -| **MITRE Caldera agent detected (Preview)**
                  (K8S.NODE_MitreCalderaTools) | Analysis of processes running within a container indicate that a suspicious process was running. This is often associated with the MITRE 54ndc47 agent which could be used maliciously to attack other machines. | Persistence, PrivilegeEscalation, DefenseEvasion, CredentialAccess, Discovery, LateralMovement, Execution, Collection, Exfiltration, Command And Control, Probing, Exploitation | Medium | +| **MITRE Caldera agent detected (Preview)**
                  (K8S.NODE_MitreCalderaTools) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious process. This is often associated with the MITRE 54ndc47 agent which could be used maliciously to attack other machines. | Persistence, PrivilegeEscalation, DefenseEvasion, CredentialAccess, Discovery, LateralMovement, Execution, Collection, Exfiltration, Command And Control, Probing, Exploitation | Medium | | **New container in the kube-system namespace detected**
                  (K8S_KubeSystemContainer) [2](#footnote2) | Kubernetes audit log analysis detected a new container in the kube-system namespace that isn’t among the containers that normally run in this namespace. The kube-system namespaces should not contain user resources. Attackers can use this namespace for hiding malicious components. | Persistence | Low | | **New high privileges role detected**
                  (K8S_HighPrivilegesRole) [3](#footnote3) | Kubernetes audit log analysis detected a new role with high privileges. A binding to a role with high privileges gives the user\group high privileges in the cluster. Unnecessary privileges might cause privilege escalation in the cluster. | Persistence | Low | -| **Possible attack tool detected (Preview)**
                  (K8S.NODE_KnownLinuxAttackTool) | Analysis of processes running within a container indicates a suspicious tool ran. This tool is often associated with malicious users attacking others. | Execution, Collection, Command And Control, Probing | Medium | -| **Possible backdoor detected (Preview)**
                  (K8S.NODE_LinuxBackdoorArtifact) | Analysis of processes running within a container detected a suspicious file being downloaded and run. This activity has previously been associated with installation of a backdoor. | Persistence, DefenseEvasion, Execution, Exploitation | Medium | -| **Possible command line exploitation attempt (Preview)**
                  (K8S.NODE_ExploitAttempt) | Analysis of processes running within a container detected a possible exploitation attempt against a known vulnerability. | Exploitation | Medium | -| **Possible credential access tool detected (Preview)**
                  (K8S.NODE_KnownLinuxCredentialAccessTool) | Analysis of processes running within a container indicates a possible known credential access tool was running on the container, as identified by the specified process and commandline history item. This tool is often associated with attacker attempts to access credentials. | CredentialAccess | Medium | -| **Possible Cryptocoinminer download detected (Preview)**
                  (K8S.NODE_CryptoCoinMinerDownload) | Analysis of processes running within a container detected the download of a file normally associated with digital currency mining. | DefenseEvasion, Command And Control, Exploitation | Medium | -| **Possible data exfiltration detected (Preview)**
                  (K8S.NODE_DataEgressArtifacts) | Analysis of host/device data detected a possible data egress condition. Attackers will often egress data from machines they have compromised. | Collection, Exfiltration | Medium | -| **Possible Log Tampering Activity Detected (Preview)**
                  (K8S.NODE_SystemLogRemoval) | Analysis of processes running within a container detected possible removal of files that tracks user's activity during the course of its operation. Attackers often try to evade detection and leave no trace of malicious activities by deleting such log files. | DefenseEvasion | Medium | -| **Possible password change using crypt-method detected (Preview)**
                  (K8S.NODE_SuspectPasswordChange) | Analysis of processes running within a container detected a password change using the crypt method. Attackers can make this change to continue access and gain persistence after compromise. | CredentialAccess | Medium | -| **Potential overriding of common files (Preview)**
                  (K8S.NODE_OverridingCommonFiles) | Analysis of processes running within a container detected common files as a way to obfuscate their actions or for persistence. | Persistence | Medium | -| **Potential port forwarding to external IP address (Preview)**
                  (K8S.NODE_SuspectPortForwarding) | Analysis of processes running within a container detected the initiation of port forwarding to an external IP address. | Exfiltration, Command And Control | Medium | -| **Potential reverse shell detected (Preview)**
                  (K8S.NODE_ReverseShell) | Analysis of processes running within a container detected a potential reverse shell. These are used to get a compromised machine to call back into a machine an attacker owns. | Exfiltration, Exploitation | Medium | +| **Possible attack tool detected (Preview)**
                  (K8S.NODE_KnownLinuxAttackTool) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious tool invocation. This tool is often associated with malicious users attacking others. | Execution, Collection, Command And Control, Probing | Medium | +| **Possible backdoor detected (Preview)**
                  (K8S.NODE_LinuxBackdoorArtifact) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious file being downloaded and run. This activity has previously been associated with installation of a backdoor. | Persistence, DefenseEvasion, Execution, Exploitation | Medium | +| **Possible command line exploitation attempt (Preview)**
                  (K8S.NODE_ExploitAttempt) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a possible exploitation attempt against a known vulnerability. | Exploitation | Medium | +| **Possible credential access tool detected (Preview)**
                  (K8S.NODE_KnownLinuxCredentialAccessTool) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a possible known credential access tool was running on the container, as identified by the specified process and commandline history item. This tool is often associated with attacker attempts to access credentials. | CredentialAccess | Medium | +| **Possible Cryptocoinminer download detected (Preview)**
                  (K8S.NODE_CryptoCoinMinerDownload) | Analysis of processes running within a container or directly on a Kubernetes node, has detected download of a file normally associated with digital currency mining. | DefenseEvasion, Command And Control, Exploitation | Medium | +| **Possible data exfiltration detected (Preview)**
                  (K8S.NODE_DataEgressArtifacts) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a possible data egress condition. Attackers will often egress data from machines they have compromised. | Collection, Exfiltration | Medium | +| **Possible Log Tampering Activity Detected (Preview)**
                  (K8S.NODE_SystemLogRemoval) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a possible removal of files that tracks user's activity during the course of its operation. Attackers often try to evade detection and leave no trace of malicious activities by deleting such log files. | DefenseEvasion | Medium | +| **Possible password change using crypt-method detected (Preview)**
                  (K8S.NODE_SuspectPasswordChange) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a password change using the crypt method. Attackers can make this change to continue access and gain persistence after compromise. | CredentialAccess | Medium | +| **Potential overriding of common files (Preview)**
                  (K8S.NODE_OverridingCommonFiles) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an override for common files as a way to obfuscate actions or for persistence. | Persistence | Medium | +| **Potential port forwarding to external IP address (Preview)**
                  (K8S.NODE_SuspectPortForwarding) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an initiation of port forwarding to an external IP address. | Exfiltration, Command And Control | Medium | +| **Potential reverse shell detected (Preview)**
                  (K8S.NODE_ReverseShell) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a potential reverse shell. These are used to get a compromised machine to call back into a machine an attacker owns. | Exfiltration, Exploitation | Medium | | **Privileged container detected**
                  (K8S_PrivilegedContainer) | Kubernetes audit log analysis detected a new privileged container. A privileged container has access to the node’s resources and breaks the isolation between containers. If compromised, an attacker can use the privileged container to gain access to the node. | Privilege Escalation | Low | -| **Process associated with digital currency mining detected (Preview)**
                  (K8S.NODE_CryptoCoinMinerArtifacts) | Analysis of processes running within a container detected the execution of a process normally associated with digital currency mining. | Execution, Exploitation | Medium | +| **Process associated with digital currency mining detected (Preview)**
                  (K8S.NODE_CryptoCoinMinerArtifacts) | Analysis of processes running within a container or directly on a Kubernetes node, has detected execution of a process normally associated with digital currency mining. | Execution, Exploitation | Medium | | **Process seen accessing the SSH authorized keys file in an unusual way (Preview)**
                  (K8S.NODE_SshKeyAccess) | An SSH authorized_keys file was accessed in a method similar to known malware campaigns. This access could signify that an actor is attempting to gain persistent access to a machine. | Unknown | Low | | **Role binding to the cluster-admin role detected**
                  (K8S_ClusterAdminBinding) | Kubernetes audit log analysis detected a new binding to the cluster-admin role which gives administrator privileges. Unnecessary administrator privileges might cause privilege escalation in the cluster. | Persistence | Low | -| **Screenshot taken on host (Preview)**
                  (K8S.NODE_KnownLinuxScreenshotTool) | Analysis of host/device data detected the use of a screen capture tool. Attackers may use these tools to access private data. | Collection | Low | -| **Script extension mismatch detected (Preview)**
                  (K8S.NODE_MismatchedScriptFeatures) | Analysis of processes running within a container detected a mismatch between the script interpreter and the extension of the script file provided as input. This has frequently been associated with attacker script executions. | DefenseEvasion | Medium | -| **Security-related process termination detected (Preview)**
                  (K8S.NODE_SuspectProcessTermination) | Analysis of processes running within a container detected attempt to terminate processes related to security monitoring on the container. Attackers will often try to terminate such processes using predefined scripts post-compromise. | Persistence | Low | +| **Screenshot taken on host (Preview)**
                  (K8S.NODE_KnownLinuxScreenshotTool) | Analysis of processes running within a container or directly on a Kubernetes node, has detected the use of a screen capture tool. This isn't a common usage scenario for containers and could be part of attackers attempt to access private data. | Collection | Low | +| **Script extension mismatch detected (Preview)**
                  (K8S.NODE_MismatchedScriptFeatures) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a mismatch between the script interpreter and the extension of the script file provided as input. This has frequently been associated with attacker script executions. | DefenseEvasion | Medium | +| **Security-related process termination detected (Preview)**
                  (K8S.NODE_SuspectProcessTermination) | Analysis of processes running within a container or directly on a Kubernetes node, has detected an attempt to terminate processes related to security monitoring on the container. Attackers will often try to terminate such processes using predefined scripts post-compromise. | Persistence | Low | | **SSH server is running inside a container (Preview) (Preview)**
                  (K8S.NODE_ContainerSSH) | Analysis of processes running within a container detected an SSH server running inside the container. | Execution | Medium | -| **Suspicious compilation detected (Preview)**
                  (K8S.NODE_SuspectCompilation) | Analysis of processes running within a container detected suspicious compilation. Attackers will often compile exploits to escalate privileges. | PrivilegeEscalation, Exploitation | Medium | -| **Suspicious file timestamp modification (Preview)**
                  (K8S.NODE_TimestampTampering) | Analysis of host/device data detected a suspicious timestamp modification. Attackers will often copy timestamps from existing legitimate files to new tools to avoid detection of these newly dropped files. | Persistence, DefenseEvasion | Low | +| **Suspicious compilation detected (Preview)**
                  (K8S.NODE_SuspectCompilation) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious compilation. Attackers will often compile exploits to escalate privileges. | PrivilegeEscalation, Exploitation | Medium | +| **Suspicious file timestamp modification (Preview)**
                  (K8S.NODE_TimestampTampering) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a suspicious timestamp modification. Attackers will often copy timestamps from existing legitimate files to new tools to avoid detection of these newly dropped files. | Persistence, DefenseEvasion | Low | | **Suspicious request to Kubernetes API (Preview)**
                  (K8S.NODE_KubernetesAPI) | Analysis of processes running within a container indicates that a suspicious request was made to the Kubernetes API. The request was sent from a container in the cluster. Although this behavior can be intentional, it might indicate that a compromised container is running in the cluster. | LateralMovement | Medium | | **Suspicious request to the Kubernetes Dashboard (Preview)**
                  (K8S.NODE_KubernetesDashboard) | Analysis of processes running within a container indicates that a suspicious request was made to the Kubernetes Dashboard. The request was sent from a container in the cluster. Although this behavior can be intentional, it might indicate that a compromised container is running in the cluster. | Execution | Medium | -| **Potential crypto coin miner started (Preview)**
                  (K8S.NODE_CryptoCoinMinerExecution) | Analysis of processes running within a container detected a process being started in a way normally associated with digital currency mining. | Execution | Medium | -| **Suspicious password access (Preview)**
                  (K8S.NODE_SuspectPasswordFileAccess) | Analysis of processes running within a container detected suspicious access to encrypted user passwords. | Persistence | Informational | -| **Suspicious use of DNS over HTTPS (Preview)**
                  (K8S.NODE_SuspiciousDNSOverHttps) | Analysis of processes running within a container indicates the use of a DNS call over HTTPS in an uncommon fashion. This technique is used by attackers to hide calls out to suspect or malicious sites. | DefenseEvasion, Exfiltration | Medium | -| **A possible connection to malicious location has been detected. (Preview)**
                  (K8S.NODE_ThreatIntelCommandLineSuspectDomain) | Analysis of processes running within a container detected a connection to a location that has been reported to be malicious or unusual. This is an indicator that a compromise may have occurred. | InitialAccess | Medium | +| **Potential crypto coin miner started (Preview)**
                  (K8S.NODE_CryptoCoinMinerExecution) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a process being started in a way normally associated with digital currency mining. | Execution | Medium | +| **Suspicious password access (Preview)**
                  (K8S.NODE_SuspectPasswordFileAccess) | Analysis of processes running within a container or directly on a Kubernetes node, has detected suspicious attempt to access encrypted user passwords. | Persistence | Informational | +| **Suspicious use of DNS over HTTPS (Preview)**
                  (K8S.NODE_SuspiciousDNSOverHttps) | Analysis of processes running within a container or directly on a Kubernetes node, has detected the use of a DNS call over HTTPS in an uncommon fashion. This technique is used by attackers to hide calls out to suspect or malicious sites. | DefenseEvasion, Exfiltration | Medium | +| **A possible connection to malicious location has been detected. (Preview)**
                  (K8S.NODE_ThreatIntelCommandLineSuspectDomain) | Analysis of processes running within a container or directly on a Kubernetes node, has detected a connection to a location that has been reported to be malicious or unusual. This is an indicator that a compromise may have occured. | InitialAccess | Medium | 1: **Limitations on GKE clusters**: GKE uses a Kuberenetes audit policy that doesn't support all alert types. As a result, this security alert, which is based on Kubernetes audit events, are not supported for GKE clusters. @@ -624,8 +624,8 @@ Defender for Cloud's supported kill chain intents are based on [version 9 of the | **LateralMovement** | Lateral movement consists of techniques that enable an adversary to access and control remote systems on a network and could, but does not necessarily, include execution of tools on remote systems. The lateral movement techniques could allow an adversary to gather information from a system without needing additional tools, such as a remote access tool. An adversary can use lateral movement for many purposes, including remote Execution of tools, pivoting to additional systems, access to specific information or files, access to additional credentials, or to cause an effect. | | **Execution** | The execution tactic represents techniques that result in execution of adversary-controlled code on a local or remote system. This tactic is often used in conjunction with lateral movement to expand access to remote systems on a network. | | **Collection** | Collection consists of techniques used to identify and gather information, such as sensitive files, from a target network prior to exfiltration. This category also covers locations on a system or network where the adversary may look for information to exfiltrate. | -| **Exfiltration** | Exfiltration refers to techniques and attributes that result or aid in the adversary removing files and information from a target network. This category also covers locations on a system or network where the adversary may look for information to exfiltrate. | | **Command and Control** | The command and control tactic represents how adversaries communicate with systems under their control within a target network. | +| **Exfiltration** | Exfiltration refers to techniques and attributes that result or aid in the adversary removing files and information from a target network. This category also covers locations on a system or network where the adversary may look for information to exfiltrate. | | **Impact** | Impact events primarily try to directly reduce the availability or integrity of a system, service, or network; including manipulation of data to impact a business or operational process. This would often refer to techniques such as ransomware, defacement, data manipulation, and others. | diff --git a/articles/defender-for-cloud/alerts-suppression-rules.md b/articles/defender-for-cloud/alerts-suppression-rules.md index 541da124a89ff..ba2651f843d26 100644 --- a/articles/defender-for-cloud/alerts-suppression-rules.md +++ b/articles/defender-for-cloud/alerts-suppression-rules.md @@ -46,7 +46,7 @@ There are a few ways you can create rules to suppress unwanted security alerts: - To suppress alerts at the subscription level, you can use the Azure portal or the REST API as explained below > [!NOTE] -> Suppression rules don't work retroactively - they'll only suppress alerts triggered _after_ the rule is created. Also, if a specific alert type has never been generated on a specific subscription, future alerts of that type wonn't be suppressed. For a rule to suppress an alert on a specific subscription, that alert type has to have been triggered at leaast once before the rule is created. +> Suppression rules don't work retroactively - they'll only suppress alerts triggered _after_ the rule is created. Also, if a specific alert type has never been generated on a specific subscription, future alerts of that type won't be suppressed. For a rule to suppress an alert on a specific subscription, that alert type has to have been triggered at least once before the rule is created. To create a rule directly in the Azure portal: diff --git a/articles/defender-for-cloud/defender-for-cloud-introduction.md b/articles/defender-for-cloud/defender-for-cloud-introduction.md index 0bc435d06ebd3..5d934049ee605 100644 --- a/articles/defender-for-cloud/defender-for-cloud-introduction.md +++ b/articles/defender-for-cloud/defender-for-cloud-introduction.md @@ -5,58 +5,52 @@ ms.topic: overview ms.author: benmansheim author: bmansheim ms.custom: mvc -ms.date: 05/11/2022 +ms.date: 05/19/2022 --- # What is Microsoft Defender for Cloud? -Microsoft Defender for Cloud is a Cloud Workload Protection Platform (CWPP) that also delivers Cloud Security Posture Management (CSPM) for all of your Azure, on-premises, and multicloud (Amazon AWS and Google GCP) resources. - -- [**Defender for Cloud recommendations**](security-policy-concept.md) identify cloud workloads that require security actions and provide you with steps to protect your workloads from security risks. -- [**Defender for Cloud secure score**](secure-score-security-controls.md) gives you a clear view of your security posture based on the implementation of the security recommendations so you can track new security opportunities and precisely report on the progress of your security efforts. -- [**Defender for Cloud alerts**](alerts-overview.md) warn you about security events in your workloads in real-time, including the indicators that led to the event. - -Defender for Cloud fills three vital needs as you manage the security of your resources and workloads in the cloud and on-premises: +Microsoft Defender for Cloud is a Cloud Security Posture Management (CSPM) and Cloud Workload Protection Platform (CWPP) for all of your Azure, on-premises, and multicloud (Amazon AWS and Google GCP) resources. Defender for Cloud fills three vital needs as you manage the security of your resources and workloads in the cloud and on-premises: :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-synopsis.png" alt-text="Understanding the core functionality of Microsoft Defender for Cloud."::: -|Security requirement | Defender for Cloud solution| -|---------|---------| -|**Continuous assessment** - Understand your current security posture. | **Secure score** - A single score so that you can tell, at a glance, your current security situation: the higher the score, the lower the identified risk level. | -|**Secure** - Harden all connected resources and services. | **Security recommendations** - Customized and prioritized hardening tasks to improve your posture. You implement a recommendation by following the detailed remediation steps provided in the recommendation. For many recommendations, Defender for Cloud offers a "Fix" button for automated implementation!| -|**Defend** - Detect and resolve threats to those resources and services. | **Security alerts** - With the enhanced security features enabled, Defender for Cloud detects threats to your resources and workloads. These alerts appear in the Azure portal and Defender for Cloud can also send them by email to the relevant personnel in your organization. Alerts can also be streamed to SIEM, SOAR, or IT Service Management solutions as required. | +- [**Defender for Cloud secure score**](secure-score-security-controls.md) **continually assesses** your security posture so you can track new security opportunities and precisely report on the progress of your security efforts. +- [**Defender for Cloud recommendations**](security-policy-concept.md) **secures** your workloads with step-by-step actions that protect your workloads from known security risks. +- [**Defender for Cloud alerts**](alerts-overview.md) **defends** your workloads in real-time so you can react immediately and prevent security events from developing. -## Posture management and workload protection +For a step-by-step walkthrough of Defender for Cloud, check out this [interactive tutorial](https://mslearn.cloudguides.com/en-us/guides/Protect%20your%20multi-cloud%20environment%20with%20Microsoft%20Defender%20for%20Cloud). -Microsoft Defender for Cloud's features covers the two broad pillars of cloud security: cloud security posture management and cloud workload protection. +## Protect your resources and track your security progress -### Cloud security posture management (CSPM) +Microsoft Defender for Cloud's features covers the two broad pillars of cloud security: Cloud Workload Protection Platform (CWPP) and Cloud Security Posture Management (CSPM). + +### CSPM - Remediate security issues and watch your security posture improve In Defender for Cloud, the posture management features provide: -- **Visibility** - to help you understand your current security situation - **Hardening guidance** - to help you efficiently and effectively improve your security +- **Visibility** - to help you understand your current security situation -The central feature in Defender for Cloud that enables you to achieve those goals is **secure score**. Defender for Cloud continually assesses your resources, subscriptions, and organization for security issues. It then aggregates all the findings into a single score so that you can tell, at a glance, your current security situation: the higher the score, the lower the identified risk level. +Defender for Cloud continually assesses your resources, subscriptions, and organization for security issues and shows your security posture in **secure score**, an aggregated score of the security findings that tells you, at a glance, your current security situation: the higher the score, the lower the identified risk level. -When you open Defender for Cloud for the first time, it will meet the visibility and strengthening goals as follows: +As soon as you open Defender for Cloud for the first time, Defender for Cloud: -1. **Generate a secure score** for your subscriptions based on an assessment of your connected resources compared with the guidance in [Azure Security Benchmark](/security/benchmark/azure/overview). Use the score to understand your security posture, and the compliance dashboard to review your compliance with the built-in benchmark. When you've enabled the enhanced security features, you can customize the standards used to assess your compliance, and add other regulations (such as NIST and Azure CIS) or organization-specific security requirements. You can also apply recommendations, and score based on the AWS Foundational Security Best practices standards. +- **Generates a secure score** for your subscriptions based on an assessment of your connected resources compared with the guidance in [Azure Security Benchmark](/security/benchmark/azure/overview). Use the score to understand your security posture, and the compliance dashboard to review your compliance with the built-in benchmark. When you've enabled the enhanced security features, you can customize the standards used to assess your compliance, and add other regulations (such as NIST and Azure CIS) or organization-specific security requirements. You can also apply recommendations, and score based on the AWS Foundational Security Best practices standards. -1. **Provide hardening recommendations** based on any identified security misconfigurations and weaknesses. Use these security recommendations to strengthen the security posture of your organization's Azure, hybrid, and multicloud resources. +- **Provides hardening recommendations** based on any identified security misconfigurations and weaknesses. Use these security recommendations to strengthen the security posture of your organization's Azure, hybrid, and multicloud resources. [Learn more about secure score](secure-score-security-controls.md). -### Cloud workload protection (CWP) +### CWP - Identify unique workload security requirements -Defender for Cloud offers security alerts that are powered by [Microsoft Threat Intelligence](https://go.microsoft.com/fwlink/?linkid=2128684). It also includes a range of advanced, intelligent, protections for your workloads. The workload protections are provided through Microsoft Defender plans specific to the types of resources in your subscriptions. For example, you can enable **Microsoft Defender for Storage** to get alerted about suspicious activities related to your Azure Storage accounts. +Defender for Cloud offers security alerts that are powered by [Microsoft Threat Intelligence](https://go.microsoft.com/fwlink/?linkid=2128684). It also includes a range of advanced, intelligent, protections for your workloads. The workload protections are provided through Microsoft Defender plans specific to the types of resources in your subscriptions. For example, you can enable **Microsoft Defender for Storage** to get alerted about suspicious activities related to your storage resources. -## Azure, hybrid, and multicloud protections +## Protect all of your resources under one roof -Because Defender for Cloud is an Azure-native service, many Azure services are monitored and protected without needing any deployment. +Because Defender for Cloud is an Azure-native service, many Azure services are monitored and protected without needing any deployment, but you can also add resources the are on-premises or in other public clouds. When necessary, Defender for Cloud can automatically deploy a Log Analytics agent to gather security-related data. For Azure machines, deployment is handled directly. For hybrid and multicloud environments, Microsoft Defender plans are extended to non Azure machines with the help of [Azure Arc](https://azure.microsoft.com/services/azure-arc/). CSPM features are extended to multicloud machines without the need for any agents (see [Defend resources running on other clouds](#defend-resources-running-on-other-clouds)). -### Azure-native protections +### Defend your Azure-native resources Defender for Cloud helps you detect threats across: @@ -66,7 +60,7 @@ Defender for Cloud helps you detect threats across: - **Networks** - Defender for Cloud helps you limit exposure to brute force attacks. By reducing access to virtual machine ports, using the just-in-time VM access, you can harden your network by preventing unnecessary access. You can set secure access policies on selected ports, for only authorized users, allowed source IP address ranges or IP addresses, and for a limited amount of time. -### Defend your hybrid resources +### Defend your on-premises resources In addition to defending your Azure environment, you can add Defender for Cloud capabilities to your hybrid cloud environment to protect your non-Azure servers. To help you focus on what matters the most​, you'll get customized threat intelligence and prioritized alerts according to your specific environment. @@ -84,7 +78,7 @@ For example, if you've [connected an Amazon Web Services (AWS) account](quicksta Learn more about connecting your [AWS](quickstart-onboard-aws.md) and [GCP](quickstart-onboard-gcp.md) accounts to Microsoft Defender for Cloud. -## Vulnerability assessment and management +## Close vulnerabilities before they get exploited :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-expanded-assess.png" alt-text="Focus on the assessment features of Microsoft Defender for Cloud."::: @@ -99,7 +93,7 @@ Learn more on the following pages: - [Defender for Cloud's integrated Qualys scanner for Azure and hybrid machines](deploy-vulnerability-assessment-vm.md) - [Identify vulnerabilities in images in Azure container registries](defender-for-containers-usage.md#identify-vulnerabilities-in-images-in-other-container-registries) -## Optimize and improve security by configuring recommended controls +## Enforce your security policy from the top down :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-expanded-secure.png" alt-text="Focus on the 'secure' features of Microsoft Defender for Cloud."::: @@ -117,17 +111,15 @@ To help you understand how important each recommendation is to your overall secu :::image type="content" source="./media/defender-for-cloud-introduction/sc-secure-score.png" alt-text="Defender for Cloud secure score."::: -## Defend against threats +## Extend Defender for Cloud with Defender plans and external monitoring :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-expanded-defend.png" alt-text="Focus on the 'defend'' features of Microsoft Defender for Cloud."::: -Defender for Cloud provides: - -- **Security alerts** - When Defender for Cloud detects a threat in any area of your environment, it generates a security alert. These alerts describe details of the affected resources, suggested remediation steps, and in some cases an option to trigger a logic app in response. Whether an alert is generated by Defender for Cloud, or received by Defender for Cloud from an integrated security product, you can export it. To export your alerts to Microsoft Sentinel, any third-party SIEM, or any other external tool, follow the instructions in [Stream alerts to a SIEM, SOAR, or IT Service Management solution](export-to-siem.md). Defender for Cloud's threat protection includes fusion kill-chain analysis, which automatically correlates alerts in your environment based on cyber kill-chain analysis, to help you better understand the full story of an attack campaign, where it started and what kind of impact it had on your resources. [Defender for Cloud's supported kill chain intents are based on version 9 of the MITRE ATT&CK matrix](alerts-reference.md#intentions). +You can extend the Defender for Cloud protection with: - **Advanced threat protection features** for virtual machines, SQL databases, containers, web applications, your network, and more - Protections include securing the management ports of your VMs with [just-in-time access](just-in-time-access-overview.md), and [adaptive application controls](adaptive-application-controls.md) to create allowlists for what apps should and shouldn't run on your machines. -The **Defender plans** page of Microsoft Defender for Cloud offers the following plans for comprehensive defenses for the compute, data, and service layers of your environment: +The **Defender plans** of Microsoft Defender for Cloud offer comprehensive defenses for the compute, data, and service layers of your environment: - [Microsoft Defender for Servers](defender-for-servers-introduction.md) - [Microsoft Defender for Storage](defender-for-storage-introduction.md) @@ -145,6 +137,18 @@ Use the advanced protection tiles in the [workload protections dashboard](worklo > [!TIP] > Microsoft Defender for IoT is a separate product. You'll find all the details in [Introducing Microsoft Defender for IoT](../defender-for-iot/overview.md). +- **Security alerts** - When Defender for Cloud detects a threat in any area of your environment, it generates a security alert. These alerts describe details of the affected resources, suggested remediation steps, and in some cases an option to trigger a logic app in response. Whether an alert is generated by Defender for Cloud, or received by Defender for Cloud from an integrated security product, you can export it. To export your alerts to Microsoft Sentinel, any third-party SIEM, or any other external tool, follow the instructions in [Stream alerts to a SIEM, SOAR, or IT Service Management solution](export-to-siem.md). Defender for Cloud's threat protection includes fusion kill-chain analysis, which automatically correlates alerts in your environment based on cyber kill-chain analysis, to help you better understand the full story of an attack campaign, where it started and what kind of impact it had on your resources. [Defender for Cloud's supported kill chain intents are based on version 9 of the MITRE ATT&CK matrix](alerts-reference.md#intentions). + +## Learn More + +If you would like to learn more about Defender for Cloud from a cybersecurity expert, check out [Lessons Learned from the Field](episode-six.md). + +You can also check out the following blogs: + +- [A new name for multicloud security: Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/a-new-name-for-multi-cloud-security-microsoft-defender-for-cloud/ba-p/2943020) +- [Microsoft Defender for Cloud - Use cases](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-use-cases/ba-p/2953619) +- [Microsoft Defender for Cloud PoC Series - Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-poc-series-microsoft-defender-for/ba-p/3064644) + ## Next steps - To get started with Defender for Cloud, you need a subscription to Microsoft Azure. If you don't have a subscription, [sign up for a free trial](https://azure.microsoft.com/free/). diff --git a/articles/defender-for-cloud/defender-for-container-registries-introduction.md b/articles/defender-for-cloud/defender-for-container-registries-introduction.md index f919658281730..f79ee8c4ca939 100644 --- a/articles/defender-for-cloud/defender-for-container-registries-introduction.md +++ b/articles/defender-for-cloud/defender-for-container-registries-introduction.md @@ -23,7 +23,7 @@ To protect the Azure Resource Manager based registries in your subscription, ena > > :::image type="content" source="media/defender-for-containers/enable-defender-for-containers.png" alt-text="Enable Microsoft Defender for Containers from the Defender plans page."::: > -> Learn more about this change in [the release note](release-notes.md#microsoft-defender-for-containers-plan-released-for-general-availability-ga). +> Learn more about this change in [the release note](release-notes-archive.md#microsoft-defender-for-containers-plan-released-for-general-availability-ga). |Aspect|Details| |----|:----| diff --git a/articles/defender-for-cloud/defender-for-containers-architecture.md b/articles/defender-for-cloud/defender-for-containers-architecture.md new file mode 100644 index 0000000000000..3de83045adfa1 --- /dev/null +++ b/articles/defender-for-cloud/defender-for-containers-architecture.md @@ -0,0 +1,118 @@ +--- +title: Container security architecture in Microsoft Defender for Cloud +description: Learn about the architecture of Microsoft Defender for Containers for each container platform +author: bmansheim +ms.author: benmansheim +ms.topic: overview +ms.date: 05/31/2022 +--- +# Defender for Containers architecture + +Defender for Containers is designed differently for each container environment whether they're running in: + +- **Azure Kubernetes Service (AKS)** - Microsoft's managed service for developing, deploying, and managing containerized applications. + +- **Amazon Elastic Kubernetes Service (EKS) in a connected Amazon Web Services (AWS) account** - Amazon's managed service for running Kubernetes on AWS without needing to install, operate, and maintain your own Kubernetes control plane or nodes. + +- **Google Kubernetes Engine (GKE) in a connected Google Cloud Platform (GCP) project** - Google’s managed environment for deploying, managing, and scaling applications using GCP infrastructure. + +- **An unmanaged Kubernetes distribution** (using Azure Arc-enabled Kubernetes) - Cloud Native Computing Foundation (CNCF) certified Kubernetes clusters hosted on-premises or on IaaS. + +> [!NOTE] +> Defender for Containers support for Arc-enabled Kubernetes clusters (AWS EKS and GCP GKE) is a preview feature. + +To protect your Kubernetes containers, Defender for Containers receives and analyzes: + +- Audit logs and security events from the API server +- Cluster configuration information from the control plane +- Workload configuration from Azure Policy +- Security signals and events from the node level + +## Architecture for each container environment + +## [**Azure (AKS)**](#tab/defender-for-container-arch-aks) + +### Architecture diagram of Defender for Cloud and AKS clusters + +When Defender for Cloud protects a cluster hosted in Azure Kubernetes Service, the collection of audit log data is agentless and frictionless. + +The **Defender profile (preview)** deployed to each node provides the runtime protections and collects signals from nodes using [eBPF technology](https://ebpf.io/). + +The **Azure Policy add-on for Kubernetes** collects cluster and workload configuration for admission control policies as explained in [Protect your Kubernetes workloads](kubernetes-workload-protections.md). + +> [!NOTE] +> Defender for Containers **Defender profile** is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-aks-cluster.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, and Azure Policy." lightbox="./media/defender-for-containers/architecture-aks-cluster.png"::: + +### Defender profile component details + +| Pod Name | Namespace | Kind | Short Description | Capabilities | Resource limits | Egress Required | +|--|--|--|--|--|--|--| +| azuredefender-collector-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment. | SYS_ADMIN, 
                  SYS_RESOURCE,
                  SYS_PTRACE | memory: 64Mi

                  cpu: 60m | No | +| azuredefender-collector-misc-* | kube-system | [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment that aren't bounded to a specific node. | N/A | memory: 64Mi

                  cpu: 60m | No | +| azuredefender-publisher-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | Publish the collected data to Microsoft Defender for Containers backend service where the data will be processed for and analyzed. | N/A | memory: 200Mi  

                  cpu: 60m | Https 443

                  Learn more about the [outbound access prerequisites](../aks/limit-egress-traffic.md#microsoft-defender-for-containers) | + +\* resource limits aren't configurable + +## [**On-premises / IaaS (Arc)**](#tab/defender-for-container-arch-arc) + +### Architecture diagram of Defender for Cloud and Arc-enabled Kubernetes clusters + +For all clusters hosted outside of Azure, [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md) is required to connect the clusters to Azure and provide Azure services such as Defender for Containers. + +When a non-Azure container is connected to Azure with Arc, the [Arc extension](../azure-arc/kubernetes/extensions.md) collects Kubernetes audit logs data from all control plane nodes in the cluster. The extension sends the log data to the Microsoft Defender for Cloud backend in the cloud for further analysis. The extension is registered with a Log Analytics workspace used as a data pipeline, but the audit log data isn't stored in the Log Analytics workspace. + +Workload configuration information is collected by an Azure Policy add-on. As explained in [this Azure Policy for Kubernetes page](../governance/policy/concepts/policy-for-kubernetes.md), the add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). Kubernetes admission controllers are plugins that enforce how your clusters are used. The add-on registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements and safeguards on your clusters in a centralized, consistent manner. + +> [!NOTE] +> Defender for Containers support for Arc-enabled Kubernetes clusters is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-arc-cluster.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-arc-cluster.png"::: + +## [**AWS (EKS)**](#tab/defender-for-container-arch-eks) + +### Architecture diagram of Defender for Cloud and EKS clusters + +These components are required in order to receive the full protection offered by Microsoft Defender for Containers: + +- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [AWS account’s CloudWatch](https://aws.amazon.com/cloudwatch/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. + +- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). + +- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. + +- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). + +> [!NOTE] +> Defender for Containers support for AWS EKS clusters is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-eks-cluster.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Amazon Web Services' EKS clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-eks-cluster.png"::: + +## [**GCP (GKE)**](#tab/defender-for-container-gke) + +### Architecture diagram of Defender for Cloud and GKE clusters + +These components are required in order to receive the full protection offered by Microsoft Defender for Containers: + +- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [GCP Cloud Logging](https://cloud.google.com/logging/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. + +- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). + +- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. + +- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). + +> [!NOTE] +> Defender for Containers support for GCP GKE clusters is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-gke.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Google GKE clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-gke.png"::: + +--- + +## Next steps + +In this overview, you learned about the architecture of container security in Microsoft Defender for Cloud. To enable the plan, see: + +> [!div class="nextstepaction"] +> [Enable Defender for Containers](defender-for-containers-enable.md) diff --git a/articles/defender-for-cloud/defender-for-containers-enable.md b/articles/defender-for-cloud/defender-for-containers-enable.md index 5b4a95f4440e5..6c3608271e75c 100644 --- a/articles/defender-for-cloud/defender-for-containers-enable.md +++ b/articles/defender-for-cloud/defender-for-containers-enable.md @@ -3,7 +3,7 @@ title: How to enable Microsoft Defender for Containers in Microsoft Defender for description: Enable the container protections of Microsoft Defender for Containers ms.topic: overview zone_pivot_groups: k8s-host -ms.date: 05/10/2022 +ms.date: 05/26/2022 --- # Enable Microsoft Defender for Containers @@ -25,7 +25,7 @@ Learn about this plan in [Overview of Microsoft Defender for Containers](defende ::: zone pivot="defender-for-container-arc,defender-for-container-eks,defender-for-container-gke" > [!NOTE] > Defender for Containers' support for Arc-enabled Kubernetes clusters, AWS EKS, and GCP GKE. This is a preview feature. -> +> > [!INCLUDE [Legalese](../../includes/defender-for-cloud-preview-legal-text.md)] ::: zone-end @@ -70,7 +70,7 @@ A full list of supported alerts is available in the [reference table of all Defe 1. In the Azure portal, open Microsoft Defender for Cloud's security alerts page and look for the alert on the relevant resource: :::image type="content" source="media/defender-for-kubernetes-azure-arc/sample-kubernetes-security-alert.png" alt-text="Sample alert from Microsoft Defender for Kubernetes." lightbox="media/defender-for-kubernetes-azure-arc/sample-kubernetes-security-alert.png"::: - + ::: zone pivot="defender-for-container-arc,defender-for-container-eks,defender-for-container-gke" [!INCLUDE [Remove the extension](./includes/defender-for-containers-remove-extension.md)] ::: zone-end @@ -87,6 +87,17 @@ A full list of supported alerts is available in the [reference table of all Defe [!INCLUDE [FAQ](./includes/defender-for-containers-override-faq.md)] ::: zone-end +## Learn More + +Learn more from the product manager about [Microsoft Defender for Containers in a multicloud environment](episode-nine.md). +You can also learn how to [Protect Containers in GCP with Defender for Containers](episode-ten.md). + +You can also check out the following blogs: + +- [Protect your Google Cloud workloads with Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/protect-your-google-cloud-workloads-with-microsoft-defender-for/ba-p/3073360) +- [Introducing Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317) +- [A new name for multicloud security: Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/a-new-name-for-multi-cloud-security-microsoft-defender-for-cloud/ba-p/2943020) + ## Next steps -[Use Defender for Containers to scan your ACR images for vulnerabilities](defender-for-container-registries-usage.md). \ No newline at end of file +[Use Defender for Containers to scan your ACR images for vulnerabilities](defender-for-container-registries-usage.md). diff --git a/articles/defender-for-cloud/defender-for-containers-introduction.md b/articles/defender-for-cloud/defender-for-containers-introduction.md index 64a83f8dbde11..13bdc9805759a 100644 --- a/articles/defender-for-cloud/defender-for-containers-introduction.md +++ b/articles/defender-for-cloud/defender-for-containers-introduction.md @@ -1,15 +1,17 @@ --- title: Container security with Microsoft Defender for Cloud description: Learn about Microsoft Defender for Containers +author: bmansheim +ms.author: benmansheim ms.topic: overview -ms.date: 05/15/2022 +ms.date: 05/25/2022 --- # Overview of Microsoft Defender for Containers -Microsoft Defender for Containers is the cloud-native solution for securing your containers. +Microsoft Defender for Containers is the cloud-native solution for securing your containers so you can improve, monitor, and maintain the security of your clusters, containers, and their applications. -On this page, you'll learn how you can use Defender for Containers to improve, monitor, and maintain the security of your clusters, containers, and their applications. +[How does Defender for Containers work in each Kubernetes platform?](defender-for-containers-architecture.md) ## Microsoft Defender for Containers plan availability @@ -19,18 +21,17 @@ On this page, you'll learn how you can use Defender for Containers to improve, m | Feature availability | Refer to the [availability](supported-machines-endpoint-solutions-clouds-containers.md) section for additional information on feature release state and availability.| | Pricing: | **Microsoft Defender for Containers** is billed as shown on the [pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/) | | Required roles and permissions: | • To auto provision the required components, see the [permissions for each of the components](enable-data-collection.md?tabs=autoprovision-containers)
                  • **Security admin** can dismiss alerts
                  • **Security reader** can view vulnerability assessment findings
                  See also [Azure Container Registry roles and permissions](../container-registry/container-registry-roles.md) | -| Clouds: | **Azure**:
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
                  :::image type="icon" source="./media/icons/yes-icon.png"::: National clouds (Azure Government, Azure China 21Vianet) (Except for preview features))

                  **Non Azure**:
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Connected AWS accounts (Preview)
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Connected GCP projects (Preview)
                  :::image type="icon" source="./media/icons/yes-icon.png"::: On-prem/IaaS supported via Arc enabled Kubernetes (Preview).

                  For more details, see the [availability section](supported-machines-endpoint-solutions-clouds-containers.md#defender-for-containers-feature-availability). | - +| Clouds: | **Azure**:
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
                  :::image type="icon" source="./media/icons/yes-icon.png"::: National clouds (Azure Government, Azure China 21Vianet) (Except for preview features))

                  **Non-Azure**:
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Connected AWS accounts (Preview)
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Connected GCP projects (Preview)
                  :::image type="icon" source="./media/icons/yes-icon.png"::: On-prem/IaaS supported via Arc enabled Kubernetes (Preview).

                  For more information about, see the [availability section](supported-machines-endpoint-solutions-clouds-containers.md#defender-for-containers-feature-availability). | ## What are the benefits of Microsoft Defender for Containers? Defender for Containers helps with the core aspects of container security: -- **Environment hardening** - Defender for Containers protects your Kubernetes clusters whether they're running on Azure Kubernetes Service, Kubernetes on-prem / IaaS, or Amazon EKS. By continuously assessing clusters, Defender for Containers provides visibility into misconfigurations and guidelines to help mitigate identified threats. Learn more in [Hardening](#hardening). +- [**Environment hardening**](#hardening) - Defender for Containers protects your Kubernetes clusters whether they're running on Azure Kubernetes Service, Kubernetes on-premises/IaaS, or Amazon EKS. By continuously assessing clusters, Defender for Containers provides visibility into misconfigurations and guidelines to help mitigate identified threats. -- **Vulnerability assessment** - Vulnerability assessment and management tools for images **stored** in ACR registries and **running** in Azure Kubernetes Service. Learn more in [Vulnerability assessment](#vulnerability-assessment). +- [**Vulnerability assessment**](#vulnerability-assessment) - Vulnerability assessment and management tools for images **stored** in ACR registries and **running** in Azure Kubernetes Service. -- **Run-time threat protection for nodes and clusters** - Threat protection for clusters and Linux nodes generates security alerts for suspicious activities. Learn more in [Run-time protection for Kubernetes nodes, clusters, and hosts](#run-time-protection-for-kubernetes-nodes-and-clusters). +- [**Run-time threat protection for nodes and clusters**](#run-time-protection-for-kubernetes-nodes-and-clusters) - Threat protection for clusters and Linux nodes generates security alerts for suspicious activities. ## Hardening @@ -38,7 +39,7 @@ Defender for Containers helps with the core aspects of container security: Defender for Cloud continuously assesses the configurations of your clusters and compares them with the initiatives applied to your subscriptions. When it finds misconfigurations, Defender for Cloud generates security recommendations. Use Defender for Cloud's **recommendations page** to view recommendations and remediate issues. For details of the relevant Defender for Cloud recommendations that might appear for this feature, see the [compute section](recommendations-reference.md#recs-container) of the recommendations reference table. -For Kubernetes clusters on EKS, you'll need to connect your AWS account to Microsoft Defender for Cloud via the environment settings page as described in [Connect your AWS accounts to Microsoft Defender for Cloud](quickstart-onboard-aws.md). Then ensure you've enabled the CSPM plan. +For Kubernetes clusters on EKS, you'll need to [connect your AWS account to Microsoft Defender for Cloud](quickstart-onboard-aws.md). Then ensure you've enabled the CSPM plan. When reviewing the outstanding recommendations for your container-related resources, whether in asset inventory or the recommendations page, you can use the resource filter: @@ -46,7 +47,7 @@ When reviewing the outstanding recommendations for your container-related resour ### Kubernetes data plane hardening -For a bundle of recommendations to protect the workloads of your Kubernetes containers, install the **Azure Policy for Kubernetes**. You can also auto deploy this component as explained in [enable auto provisioning of agents and extensions](enable-data-collection.md#auto-provision-mma). +To protect the workloads of your Kubernetes containers with tailored recommendations, install the **Azure Policy for Kubernetes**. You can also auto deploy this component as explained in [enable auto provisioning of agents and extensions](enable-data-collection.md#auto-provision-mma). With the add-on on your AKS cluster, every request to the Kubernetes API server will be monitored against the predefined set of best practices before being persisted to the cluster. You can then configure to **enforce** the best practices and mandate them for future workloads. @@ -69,147 +70,55 @@ Learn more in [Vulnerability assessment](defender-for-containers-usage.md). :::image type="content" source="./media/defender-for-containers/recommendation-acr-images-with-vulnerabilities.png" alt-text="Sample Microsoft Defender for Cloud recommendation about vulnerabilities discovered in Azure Container Registry (ACR) hosted images." lightbox="./media/defender-for-containers/recommendation-acr-images-with-vulnerabilities.png"::: -### View vulnerabilities for running images +### View vulnerabilities for running images -The recommendation **Running container images should have vulnerability findings resolved** shows vulnerabilities for running images by using the scan results from ACR registries and information on running images from the Defender security profile/extension. Images that are deployed from a non ACR registry, will appear under the **Not applicable** tab. +The recommendation **Running container images should have vulnerability findings resolved** shows vulnerabilities for running images by using the scan results from ACR registries and information on running images from the Defender security profile/extension. Images that are deployed from a non-ACR registry, will appear under the **Not applicable** tab. -:::image type="content" source="media/defender-for-containers/running-image-vulnerabilities-recommendation.png" alt-text="Screenshot showing where the recommendation is viewable" lightbox="media/defender-for-containers/running-image-vulnerabilities-recommendation-expanded.png"::: +:::image type="content" source="media/defender-for-containers/running-image-vulnerabilities-recommendation.png" alt-text="Screenshot showing where the recommendation is viewable." lightbox="media/defender-for-containers/running-image-vulnerabilities-recommendation-expanded.png"::: ## Run-time protection for Kubernetes nodes and clusters -Defender for Cloud provides real-time threat protection for your containerized environments and generates alerts for suspicious activities. You can use this information to quickly remediate security issues and improve the security of your containers. +Defender for Containers provides real-time threat protection for your containerized environments and generates alerts for suspicious activities. You can use this information to quickly remediate security issues and improve the security of your containers. Threat protection at the cluster level is provided by the Defender profile and analysis of the Kubernetes audit logs. Examples of events at this level include exposed Kubernetes dashboards, creation of high-privileged roles, and the creation of sensitive mounts. -Threat protection at the cluster level is provided by the Defender profile and analysis of the Kubernetes audit logs. Examples of events at this level include exposed Kubernetes dashboards, creation of high-privileged roles, and the creation of sensitive mounts. +In addition, our threat detection goes beyond the Kubernetes management layer. Defender for Containers includes **host-level threat detection** with over 60 Kubernetes-aware analytics, AI, and anomaly detections based on your runtime workload. Our global team of security researchers constantly monitor the threat landscape. They add container-specific alerts and vulnerabilities as they're discovered. -In addition, our threat detection goes beyond the Kubernetes management layer. Defender for Containers includes **host-level threat detection** with over 60 Kubernetes-aware analytics, AI, and anomaly detections based on your runtime workload. Our global team of security researchers constantly monitor the threat landscape. They add container-specific alerts and vulnerabilities as they're discovered. Together, this solution monitors the growing attack surface of multicloud Kubernetes deployments and tracks the [MITRE ATT&CK® matrix for Containers](https://www.microsoft.com/security/blog/2021/04/29/center-for-threat-informed-defense-teams-up-with-microsoft-partners-to-build-the-attck-for-containers-matrix/), a framework that was developed by the [Center for Threat-Informed Defense](https://mitre-engenuity.org/ctid/) in close partnership with Microsoft and others. +This solution monitors the growing attack surface of multicloud Kubernetes deployments and tracks the [MITRE ATT&CK® matrix for Containers](https://www.microsoft.com/security/blog/2021/04/29/center-for-threat-informed-defense-teams-up-with-microsoft-partners-to-build-the-attck-for-containers-matrix/), a framework that was developed by the [Center for Threat-Informed Defense](https://mitre-engenuity.org/ctid/) in close partnership with Microsoft and others. The full list of available alerts can be found in the [Reference table of alerts](alerts-reference.md#alerts-k8scluster). :::image type="content" source="media/defender-for-containers/sample-containers-plan-alerts.png" alt-text="Screenshot of Defender for Cloud's alerts page showing alerts for multicloud Kubernetes resources." lightbox="./media/defender-for-containers/sample-containers-plan-alerts.png"::: -## Architecture overview - -The architecture of the various elements involved in the full range of protections provided by Defender for Containers varies depending on where your Kubernetes clusters are hosted. - -Defender for Containers protects your clusters whether they're running in: - -- **Azure Kubernetes Service (AKS) (Preview)** - Microsoft's managed service for developing, deploying, and managing containerized applications. - -- **Amazon Elastic Kubernetes Service (EKS) in a connected Amazon Web Services (AWS) account (Preview)** - Amazon's managed service for running Kubernetes on AWS without needing to install, operate, and maintain your own Kubernetes control plane or nodes. - -- **Google Kubernetes Engine (GKE) in a connected Google Cloud Platform (GCP) project (Preview)** - Google’s managed environment for deploying, managing, and scaling applications using GCP infrastructure. - -- **An unmanaged Kubernetes distribution** (using Azure Arc-enabled Kubernetes) - Cloud Native Computing Foundation (CNCF) certified Kubernetes clusters hosted on-premises or on IaaS. - -For high-level diagrams of each scenario, see the relevant tabs below. - -In the diagrams you'll see that the items received and analyzed by Defender for Cloud include: - -- Audit logs and security events from the API server -- Cluster configuration information from the control plane -- Workload configuration from Azure Policy -- Security signals and events from the node level - -### [**Azure (AKS)**](#tab/defender-for-container-arch-aks) - -### Architecture diagram of Defender for Cloud and AKS clusters - -When Defender for Cloud protects a cluster hosted in Azure Kubernetes Service, the collection of audit log data is agentless and frictionless. - -The **Defender profile (preview)** deployed to each node provides the runtime protections and collects signals from nodes using [eBPF technology](https://ebpf.io/). - -The **Azure Policy add-on for Kubernetes** collects cluster and workload configuration for admission control policies as explained in [Protect your Kubernetes workloads](kubernetes-workload-protections.md). - -> [!NOTE] -> Defender for Containers' **Defender profile** is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-aks-cluster.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, and Azure Policy." lightbox="./media/defender-for-containers/architecture-aks-cluster.png"::: - -#### Defender profile component details - -| Pod Name | Namespace | Kind | Short Description | Capabilities | Resource limits | Egress Required | -|--|--|--|--|--|--|--| -| azuredefender-collector-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment. | SYS_ADMIN, 
                  SYS_RESOURCE,
                  SYS_PTRACE | memory: 64Mi

                  cpu: 60m | No | -| azuredefender-collector-misc-* | kube-system | [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment that aren't bounded to a specific node. | N/A | memory: 64Mi

                  cpu: 60m | No | -| azuredefender-publisher-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | Publish the collected data to Microsoft Defender for Containers' backend service where the data will be processed for and analyzed. | N/A | memory: 200Mi  

                  cpu: 60m | Https 443

                  Learn more about the [outbound access prerequisites](../aks/limit-egress-traffic.md#microsoft-defender-for-containers) | - -\* resource limits aren't configurable - -### [**On-premises / IaaS (Arc)**](#tab/defender-for-container-arch-arc) - -### Architecture diagram of Defender for Cloud and Arc-enabled Kubernetes clusters - -For all clusters hosted outside of Azure, [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md) is required to connect the clusters to Azure and provide Azure services such as Defender for Containers. - -With the cluster connected to Azure, an [Arc extension](../azure-arc/kubernetes/extensions.md) collects Kubernetes audit logs data from all control plane nodes in the cluster and sends them to the Microsoft Defender for Cloud backend in the cloud for further analysis. The extension is registered with a Log Analytics workspace used as a data pipeline, but the audit log data isn't stored in the Log Analytics workspace. - -Workload configuration information is collected by an Azure Policy add-on. As explained in [this Azure Policy for Kubernetes page](../governance/policy/concepts/policy-for-kubernetes.md), the add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). Kubernetes admission controllers are plugins that enforce how your clusters are used. The add-on registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements and safeguards on your clusters in a centralized, consistent manner. - -> [!NOTE] -> Defender for Containers' support for Arc-enabled Kubernetes clusters is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-arc-cluster.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-arc-cluster.png"::: - - - -### [**AWS (EKS)**](#tab/defender-for-container-arch-eks) - -### Architecture diagram of Defender for Cloud and EKS clusters - -The following describes the components necessary in order to receive the full protection offered by Microsoft Defender for Cloud for Containers. - -- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [AWS account’s CloudWatch](https://aws.amazon.com/cloudwatch/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. - -- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). - -- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. - -- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). - -> [!NOTE] -> Defender for Containers' support for AWS EKS clusters is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-eks-cluster.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Amazon Web Services' EKS clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-eks-cluster.png"::: - -### [**GCP (GKE)**](#tab/defender-for-container-gke) - -### Architecture diagram of Defender for Cloud and GKE clusters - -The following describes the components necessary in order to receive the full protection offered by Microsoft Defender for Cloud for Containers. +## FAQ - Defender for Containers -- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [GCP Cloud Logging](https://cloud.google.com/logging/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. +- [What are the options to enable the new plan at scale?](#what-are-the-options-to-enable-the-new-plan-at-scale) +- [Does Microsoft Defender for Containers support AKS clusters with virtual machines scale sets?](#does-microsoft-defender-for-containers-support-aks-clusters-with-virtual-machines-scale-sets) +- [Does Microsoft Defender for Containers support AKS without scale set (default)?](#does-microsoft-defender-for-containers-support-aks-without-scale-set-default) +- [Do I need to install the Log Analytics VM extension on my AKS nodes for security protection?](#do-i-need-to-install-the-log-analytics-vm-extension-on-my-aks-nodes-for-security-protection) -- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). +### What are the options to enable the new plan at scale? -- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. +We’ve rolled out a new policy in Azure Policy, **Configure Microsoft Defender for Containers to be enabled**, to make it easier to enable the new plan at scale. -- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). +### Does Microsoft Defender for Containers support AKS clusters with virtual machines scale sets? -> [!NOTE] -> Defender for Containers' support for GCP GKE clusters is a preview feature. +Yes. -:::image type="content" source="./media/defender-for-containers/architecture-gke.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Google GKE clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-gke.png"::: +### Does Microsoft Defender for Containers support AKS without scale set (default)? ---- +No. Only Azure Kubernetes Service (AKS) clusters that use virtual machine scale sets for the nodes is supported. -## FAQ - Defender for Containers +### Do I need to install the Log Analytics VM extension on my AKS nodes for security protection? -- [What are the options to enable the new plan at scale?](#what-are-the-options-to-enable-the-new-plan-at-scale) -- [Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set (VMSS)?](#does-microsoft-defender-for-containers-support-aks-clusters-with-virtual-machines-scale-set-vmss) -- [Does Microsoft Defender for Containers support AKS without scale set (default)?](#does-microsoft-defender-for-containers-support-aks-without-scale-set-default) -- [Do I need to install the Log Analytics VM extension on my AKS nodes for security protection?](#do-i-need-to-install-the-log-analytics-vm-extension-on-my-aks-nodes-for-security-protection) +No, AKS is a managed service, and manipulation of the IaaS resources isn't supported. The Log Analytics VM extension isn't needed and may result in additional charges. -### What are the options to enable the new plan at scale? -We’ve rolled out a new policy in Azure Policy, **Configure Microsoft Defender for Containers to be enabled**, to make it easier to enable the new plan at scale. +## Learn More -### Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set (VMSS)? -Yes. +If you would like to learn more from the product manager about Microsoft Defender for Containers, check out [Microsoft Defender for Containers](episode-three.md). -### Does Microsoft Defender for Containers support AKS without scale set (default)? -No. Only Azure Kubernetes Service (AKS) clusters that use virtual machine scale sets for the nodes is supported. +You can also check out the following blogs: -### Do I need to install the Log Analytics VM extension on my AKS nodes for security protection? -No, AKS is a managed service, and manipulation of the IaaS resources isn't supported. The Log Analytics VM extension is not needed and may result in additional charges. +- [How to demonstrate the new containers features in Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/how-to-demonstrate-the-new-containers-features-in-microsoft/ba-p/3281172) +- [Introducing Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317) ## Next steps diff --git a/articles/defender-for-cloud/defender-for-containers-usage.md b/articles/defender-for-cloud/defender-for-containers-usage.md index 8f793441d5051..e47acd912a021 100644 --- a/articles/defender-for-cloud/defender-for-containers-usage.md +++ b/articles/defender-for-cloud/defender-for-containers-usage.md @@ -1,17 +1,17 @@ --- -title: How to use Defender for Containers to identify vulnerabilities +title: How to use Defender for Containers to identify vulnerabilities in Microsoft Defender for Cloud description: Learn how to use Defender for Containers to scan images in your registries author: bmansheim ms.author: benmansheim -ms.date: 04/28/2022 +ms.date: 06/08/2022 ms.topic: how-to --- # Use Defender for Containers to scan your ACR images for vulnerabilities -This page explains how to use the built-in vulnerability scanner to scan the container images stored in your Azure Resource Manager-based Azure Container Registry. +This page explains how to use Defender for Containers to scan the container images stored in your Azure Resource Manager-based Azure Container Registry, as part of the protections provided within Microsoft Defender for Cloud. -When the scanner, powered by Qualys, reports vulnerabilities to Defender for Cloud, Defender for Cloud presents the findings and related information as recommendations. In addition, the findings include related information such as remediation steps, relevant CVEs, CVSS scores, and more. You can view the identified vulnerabilities for one or more subscriptions, or for a specific registry. +To enable scanning of vulnerabilities in containers, you have to [enable Defender for Containers](defender-for-containers-enable.md). When the scanner, powered by Qualys, reports vulnerabilities, Defender for Cloud presents the findings and related information as recommendations. In addition, the findings include related information such as remediation steps, relevant CVEs, CVSS scores, and more. You can view the identified vulnerabilities for one or more subscriptions, or for a specific registry. > [!TIP] > You can also scan container images for vulnerabilities as the images are built in your CI/CD GitHub workflows. Learn more in [Identify vulnerable container images in your CI/CD workflows](defender-for-containers-cicd.md). @@ -26,11 +26,11 @@ There are four triggers for an image scan: - **Continuous scan**- This trigger has two modes: - - A continuous scan based on an image pull. This scan is performed every seven days after an image was pulled, and only for 30 days after the image was pulled. This mode doesn't require the security profile, or extension. + - A continuous scan based on an image pull. This scan is performed every seven days after an image was pulled, and only for 30 days after the image was pulled. This mode doesn't require the security profile, or extension. - - (Preview) Continuous scan for running images. This scan is performed every seven days for as long as the image runs. This mode runs instead of the above mode when the Defender profile, or extension is running on the cluster. + - (Preview) Continuous scan for running images. This scan is performed every seven days for as long as the image runs. This mode runs instead of the above mode when the Defender profile, or extension is running on the cluster. -This scan typically completes within 2 minutes, but it might take up to 40 minutes. For every vulnerability identified, Defender for Cloud provides actionable recommendations, along with a severity classification, and guidance for how to remediate the issue. +This scan typically completes within 2 minutes, but it might take up to 40 minutes. For every vulnerability identified, Defender for Cloud provides actionable recommendations, along with a severity classification, and guidance for how to remediate the issue. Defender for Cloud filters, and classifies findings from the scanner. When an image is healthy, Defender for Cloud marks it as such. Defender for Cloud generates security recommendations only for images that have issues to be resolved. By only notifying when there are problems, Defender for Cloud reduces the potential for unwanted informational alerts. @@ -38,7 +38,7 @@ Defender for Cloud filters, and classifies findings from the scanner. When an im To enable vulnerability scans of images stored in your Azure Resource Manager-based Azure Container Registry: -1. Enable **Defender for Containers** for your subscription. Defender for Cloud is now ready to scan images in your registries. +1. [Enable Defender for Containers](defender-for-containers-enable.md) for your subscription. Defender for Containers is now ready to scan images in your registries. >[!NOTE] > This feature is charged per image. @@ -152,25 +152,30 @@ To create a rule: ## FAQ -### How does Defender for Cloud scan an image? -Defender for Cloud pulls the image from the registry and runs it in an isolated sandbox with the Qualys scanner. The scanner extracts a list of known vulnerabilities. +### How does Defender for Containers scan an image? + +Defender for Containers pulls the image from the registry and runs it in an isolated sandbox with the Qualys scanner. The scanner extracts a list of known vulnerabilities. Defender for Cloud filters and classifies findings from the scanner. When an image is healthy, Defender for Cloud marks it as such. Defender for Cloud generates security recommendations only for images that have issues to be resolved. By only notifying you when there are problems, Defender for Cloud reduces the potential for unwanted informational alerts. ### Can I get the scan results via REST API? + Yes. The results are under [Sub-Assessments REST API](/rest/api/securitycenter/subassessments/list/). Also, you can use Azure Resource Graph (ARG), the Kusto-like API for all of your resources: a query can fetch a specific scan. ### What registry types are scanned? What types are billed? + For a list of the types of container registries supported by Microsoft Defender for container registries, see [Availability](defender-for-container-registries-introduction.md#availability). -If you connect unsupported registries to your Azure subscription, Defender for Cloud won't scan them and won't bill you for them. +If you connect unsupported registries to your Azure subscription, Defender for Containers won't scan them and won't bill you for them. ### Can I customize the findings from the vulnerability scanner? + Yes. If you have an organizational need to ignore a finding, rather than remediate it, you can optionally disable it. Disabled findings don't impact your secure score or generate unwanted noise. [Learn about creating rules to disable findings from the integrated vulnerability assessment tool](defender-for-containers-usage.md#disable-specific-findings). ### Why is Defender for Cloud alerting me to vulnerabilities about an image that isn’t in my registry? + Some images may reuse tags from an image that was already scanned. For example, you may reassign the tag “Latest” every time you add an image to a digest. In such cases, the ‘old’ image does still exist in the registry and may still be pulled by its digest. If the image has security findings and is pulled, it'll expose security vulnerabilities. ## Next steps diff --git a/articles/defender-for-cloud/defender-for-kubernetes-introduction.md b/articles/defender-for-cloud/defender-for-kubernetes-introduction.md index 5ecfe31125d03..11445307f5a14 100644 --- a/articles/defender-for-cloud/defender-for-kubernetes-introduction.md +++ b/articles/defender-for-cloud/defender-for-kubernetes-introduction.md @@ -26,7 +26,7 @@ Host-level threat detection for your Linux AKS nodes is available if you enable > > :::image type="content" source="media/defender-for-containers/enable-defender-for-containers.png" alt-text="Enable Microsoft Defender for Containers from the Defender plans page."::: > -> Learn more about this change in [the release note](release-notes.md#microsoft-defender-for-containers-plan-released-for-general-availability-ga). +> Learn more about this change in [the release note](release-notes-archive.md#microsoft-defender-for-containers-plan-released-for-general-availability-ga). |Aspect|Details| diff --git a/articles/defender-for-cloud/defender-for-servers-introduction.md b/articles/defender-for-cloud/defender-for-servers-introduction.md index b6f0374a50a23..1e1800cfb0c5e 100644 --- a/articles/defender-for-cloud/defender-for-servers-introduction.md +++ b/articles/defender-for-cloud/defender-for-servers-introduction.md @@ -1,10 +1,8 @@ --- title: Microsoft Defender for Servers - the benefits and features description: Learn about the benefits and features of Microsoft Defender for Servers. -ms.date: 03/28/2022 +ms.date: 05/11/2022 ms.topic: overview -ms.author: benmansheim -author: bmansheim --- # Introduction to Microsoft Defender for Servers @@ -15,7 +13,7 @@ To protect machines in hybrid and multicloud environments, Defender for Cloud us - [Connect your AWS accounts to Microsoft Defender for Cloud](quickstart-onboard-aws.md) > [!TIP] -> For details of which Defender for Servers features are relevant for machines running on other cloud environments, see [Supported features for virtual machines and servers](supported-machines-endpoint-solutions-clouds-servers.md?tabs=features-windows#supported-features-for-virtual-machines-and-servers-). +> For details of which Defender for Servers features are relevant for machines running on other cloud environments, see [Supported features for virtual machines and servers](supported-machines-endpoint-solutions-clouds-servers.md?tabs=features-windows#supported-features-for-virtual-machines-and-servers). ## What are the Microsoft Defender for server plans? @@ -128,8 +126,15 @@ You can simulate alerts by downloading one of the following playbooks: - For Linux: [Microsoft Defender for Cloud Playbook: Linux Detections](https://github.com/Azure/Azure-Security-Center/blob/master/Simulations/Azure%20Security%20Center%20Linux%20Detections_v2.pdf). +## Learn more +If you would like to learn more from the product manager about Defender for Servers, check out [Microsoft Defender for Servers](episode-five.md). You can also learn about the [Enhanced workload protection features in Defender for Servers](episode-twelve.md). +You can also check out the following blogs: + +- [Security posture management and server protection for AWS and GCP are now generally available](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) + +- [Microsoft Defender for Cloud Server Monitoring Dashboard](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-server-monitoring-dashboard/ba-p/2869658) ## Next steps diff --git a/articles/defender-for-cloud/defender-for-sql-introduction.md b/articles/defender-for-cloud/defender-for-sql-introduction.md index 4256344b87cd6..3235bdce338aa 100644 --- a/articles/defender-for-cloud/defender-for-sql-introduction.md +++ b/articles/defender-for-cloud/defender-for-sql-introduction.md @@ -1,16 +1,22 @@ --- title: Microsoft Defender for SQL - the benefits and features description: Learn about the benefits and features of Microsoft Defender for SQL. -ms.date: 01/06/2022 +ms.date: 06/01/2022 ms.topic: overview -ms.author: benmansheim -author: bmansheim ms.custom: references_regions --- # Introduction to Microsoft Defender for SQL -Microsoft Defender for SQL includes two Microsoft Defender plans that extend Microsoft Defender for Cloud's [data security package](/azure/azure-sql/database/azure-defender-for-sql) to secure your databases and their data wherever they're located. Microsoft Defender for SQL includes functionalities for discovering and mitigating potential database vulnerabilities, and detecting anomalous activities that could indicate a threat to your databases. +Microsoft Defender for SQL includes two Microsoft Defender plans that extend Microsoft Defender for Cloud's [data security package](/azure/azure-sql/database/azure-defender-for-sql) to protect your SQL estate regardless of where it is located (Azure, multicloud or Hybrid environments). Microsoft Defender for SQL includes functions that can be used to discover and mitigate potential database vulnerabilities. Defender for SQL can also detect anomalous activities that may be an indication of a threat to your databases. + +To protect SQL databases in hybrid and multicloud environments, Defender for Cloud uses Azure Arc. Azure ARC connects your hybrid and multicloud machines. You can check out the following articles for more information: + +- [Connect your non-Azure machines to Microsoft Defender for Cloud](quickstart-onboard-machines.md) + +- [Connect your AWS accounts to Microsoft Defender for Cloud](quickstart-onboard-aws.md) + +- [Connect your GCP project to Microsoft Defender for Cloud](quickstart-onboard-gcp.md) ## Availability @@ -27,15 +33,28 @@ Microsoft Defender for SQL includes two Microsoft Defender plans that extend Mic **Microsoft Defender for SQL** comprises two separate Microsoft Defender plans: - **Microsoft Defender for Azure SQL database servers** protects: + - [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview) + - [Azure SQL Managed Instance](/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview) + - [Dedicated SQL pool in Azure Synapse](../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) - **Microsoft Defender for SQL servers on machines** extends the protections for your Azure-native SQL Servers to fully support hybrid environments and protect SQL servers (all supported version) hosted in Azure, other cloud environments, and even on-premises machines: + - [SQL Server on Virtual Machines](https://azure.microsoft.com/services/virtual-machines/sql-server/) + - On-premises SQL servers: + - [Azure Arc-enabled SQL Server (preview)](/sql/sql-server/azure-arc/overview) + - [SQL Server running on Windows machines without Azure Arc](../azure-monitor/agents/agent-windows.md) + + - Multicloud SQL servers: + + - [Connect your AWS accounts to Microsoft Defender for Cloud](quickstart-onboard-aws.md) + + - [Connect your GCP project to Microsoft Defender for Cloud](quickstart-onboard-gcp.md) When you enable either of these plans, all supported resources that exist within the subscription are protected. Future resources created on the same subscription will also be protected. diff --git a/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md b/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md index 69d8a59c22f23..65f22d984adde 100644 --- a/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md +++ b/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md @@ -2,9 +2,7 @@ title: Use Microsoft Defender for Endpoint's threat and vulnerability management capabilities with Microsoft Defender for Cloud description: Enable, deploy, and use Microsoft Defender for Endpoint's threat and vulnerability management capabilities with Microsoft Defender for Cloud to discover weaknesses in your Azure and hybrid machines ms.topic: how-to -ms.author: benmansheim -author: bmansheim -ms.date: 03/23/2022 +ms.date: 05/11/2022 --- # Investigate weaknesses with Microsoft Defender for Endpoint's threat and vulnerability management @@ -37,8 +35,6 @@ For a quick overview of threat and vulnerability management, watch this video: |Required roles and permissions:|[Owner](../role-based-access-control/built-in-roles.md#owner) (resource group level) can deploy the scanner
                  [Security Reader](../role-based-access-control/built-in-roles.md#security-reader) can view findings| |Clouds:|:::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
                  :::image type="icon" source="./media/icons/no-icon.png"::: National (Azure Government, Azure China 21Vianet)| - - ## Onboarding your machines to threat and vulnerability management The integration between Microsoft Defender for Endpoint and Microsoft Defender for Cloud takes place in the background, so it doesn't involve any changes at the endpoint level. @@ -54,6 +50,14 @@ The integration between Microsoft Defender for Endpoint and Microsoft Defender f The findings for **all** vulnerability assessment tools are in the Defender for Cloud recommendation **Vulnerabilities in your virtual machines should be remediated**. Learn about how to [view and remediate findings from vulnerability assessment solutions on your VMs](remediate-vulnerability-findings-vm.md) +## Learn more + +If you would like to learn more from the product manager about security posture, check out [Microsoft Defender for Servers](episode-five.md). + +You can also check out the following blogs: + +- [Security posture management and server protection for AWS and GCP are now generally available](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) +- [Microsoft Defender for Cloud Server Monitoring Dashboard](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-server-monitoring-dashboard/ba-p/2869658) ## Next steps > [!div class="nextstepaction"] diff --git a/articles/defender-for-cloud/enable-enhanced-security.md b/articles/defender-for-cloud/enable-enhanced-security.md index 24f300973ab7e..5b8715550e752 100644 --- a/articles/defender-for-cloud/enable-enhanced-security.md +++ b/articles/defender-for-cloud/enable-enhanced-security.md @@ -2,9 +2,7 @@ title: Enable Microsoft Defender for Cloud's integrated workload protections description: Learn how to enable enhanced security features to extend the protections of Microsoft Defender for Cloud to your hybrid and multicloud resources ms.topic: quickstart -ms.author: benmansheim -author: bmansheim -ms.date: 11/09/2021 +ms.date: 05/31/2022 ms.custom: mode-other --- @@ -28,35 +26,35 @@ To enable all Defender for Cloud features including threat protection capabiliti - You can enable **Microsoft Defender for SQL** at either the subscription level or resource level - You can enable **Microsoft Defender for open-source relational databases** at the resource level only -### To enable enhanced security features on your subscriptions and workspaces: +### Enable enhanced security features on your subscriptions and workspaces: - To enable enhanced security features on one subscription: 1. From Defender for Cloud's main menu, select **Environment settings**. + 1. Select the subscription or workspace that you want to protect. - 1. Select **Enable all Microsoft Defender plans** to upgrade. + + 1. Select **Enable all** to upgrade. + 1. Select **Save**. - > [!TIP] - > You'll notice that each Microsoft Defender plan is priced separately and can be individually set to on or off. For example, you might want to turn off Defender for App Service on subscriptions that don't have an associated Azure App Service plan. - - :::image type="content" source="./media/enhanced-security-features-overview/pricing-tier-page.png" alt-text="Defender for Cloud's pricing page in the portal"::: - + :::image type="content" source="./media/enhanced-security-features-overview/pricing-tier-page.png" alt-text="Defender for Cloud's pricing page in the portal" lightbox="media/enhanced-security-features-overview/pricing-tier-page.png"::: + - To enable enhanced security on multiple subscriptions or workspaces: 1. From Defender for Cloud's menu, select **Getting started**. The **Upgrade** tab lists subscriptions and workspaces eligible for onboarding. - :::image type="content" source="./media/enable-enhanced-security/get-started-upgrade-tab.png" alt-text="Upgrade tab of the getting started page."::: + :::image type="content" source="./media/enable-enhanced-security/get-started-upgrade-tab.png" alt-text="Upgrade tab of the getting started page." lightbox="media/enable-enhanced-security/get-started-upgrade-tab.png"::: 1. From the **Select subscriptions and workspaces to protect with Microsoft Defender for Cloud** list, select the subscriptions and workspaces to upgrade and select **Upgrade** to enable all Microsoft Defender for Cloud security features. - If you select subscriptions and workspaces that aren't eligible for trial, the next step will upgrade them and charges will begin. + - If you select a workspace that's eligible for a free trial, the next step will begin a trial. - :::image type="content" source="./media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png" alt-text="Upgrade all selected workspaces and subscriptions from the getting started page."::: - + :::image type="content" source="./media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png" alt-text="Upgrade all selected workspaces and subscriptions from the getting started page." lightbox="media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png"::: ## Disable enhanced security features @@ -64,14 +62,12 @@ If you need to disable enhanced security features for a subscription, the proced 1. From Defender for Cloud's menu, open **Environment settings**. 1. Select the relevant subscription. -1. Select **Defender plans** and select **Enhanced security off**. - - :::image type="content" source="./media/enable-enhanced-security/disable-plans.png" alt-text="Enable or disable Defender for Cloud's enhanced security features."::: +1. Find the plan you wish to turn off and select **off**. -1. Select **Save**. + :::image type="content" source="./media/enable-enhanced-security/disable-plans.png" alt-text="Enable or disable Defender for Cloud's enhanced security features." lightbox="media/enable-enhanced-security/disable-plans.png"::: -> [!NOTE] -> After you disable enhanced security features - whether you disable a single plan or all plans at once - data collection may continue for a short period of time. + > [!NOTE] + > After you disable enhanced security features - whether you disable a single plan or all plans at once - data collection may continue for a short period of time. ## Next steps diff --git a/articles/defender-for-cloud/enhanced-security-features-overview.md b/articles/defender-for-cloud/enhanced-security-features-overview.md index 4bf4d42c15100..447fa181d0964 100644 --- a/articles/defender-for-cloud/enhanced-security-features-overview.md +++ b/articles/defender-for-cloud/enhanced-security-features-overview.md @@ -2,10 +2,8 @@ title: Understand the enhanced security features of Microsoft Defender for Cloud description: Learn about the benefits of enabling enhanced security in Microsoft Defender for Cloud ms.topic: overview -ms.date: 04/11/2022 -ms.author: benmansheim +ms.date: 05/31/2022 ms.custom: references_regions -author: bmansheim --- # Microsoft Defender for Cloud's enhanced security features @@ -73,18 +71,21 @@ You can use any of the following ways to enable enhanced security for your subsc ### Can I enable Microsoft Defender for Servers on a subset of servers in my subscription? + No. When you enable [Microsoft Defender for Servers](defender-for-servers-introduction.md) on a subscription, all the machines in the subscription will be protected by Defender for Servers. An alternative is to enable Microsoft Defender for Servers at the Log Analytics workspace level. If you do this, only servers reporting to that workspace will be protected and billed. However, several capabilities will be unavailable. These include Microsoft Defender for Endpoint, VA solution (TVM/Qualys), just-in-time VM access, and more. ### If I already have a license for Microsoft Defender for Endpoint can I get a discount for Defender for Servers? + If you've already got a license for **Microsoft Defender for Endpoint for Servers Plan 2**, you won't have to pay for that part of your Microsoft Defender for Servers license. Learn more about [this license](/microsoft-365/security/defender-endpoint/minimum-requirements#licensing-requirements). To request your discount, [contact Defender for Cloud's support team](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/overview). You'll need to provide the relevant workspace ID, region, and number of Microsoft Defender for Endpoint for servers licenses applied for machines in the given workspace. The discount will be effective starting from the approval date, and won't take place retroactively. -### My subscription has Microsoft Defender for Servers enabled, do I pay for not-running servers? +### My subscription has Microsoft Defender for Servers enabled, do I pay for not-running servers? + No. When you enable [Microsoft Defender for Servers](defender-for-servers-introduction.md) on a subscription, you won't be charged for any machines that are in the deallocated power state while they're in that state. Machines are billed according to their power state as shown in the following table: | State | Description | Instance usage billed | @@ -98,35 +99,101 @@ No. When you enable [Microsoft Defender for Servers](defender-for-servers-introd :::image type="content" source="media/enhanced-security-features-overview/deallocated-virtual-machines.png" alt-text="Azure Virtual Machines showing a deallocated machine."::: +### If I enable Defender for Clouds Servers plan on the subscription level, do I need to enable it on the workspace level? + +When you enable the Servers plan on the subscription level, Defender for Cloud will enable the Servers plan on your default workspace(s) automatically when auto-provisioning is enabled. This can be accomplished on the Auto provisioning page by selecting **Connect Azure VMs to the default workspace(s) created by Defender for Cloud** option and selecting **Apply**. + +:::image type="content" source="media/enhanced-security-features-overview/connect-workspace.png" alt-text="Screenshot showing how to auto provision defender for cloud to manage your workspaces."::: + +However, if you're using a custom workspace in place of the default workspace, you'll need to enable the Servers plan on all of your custom workspaces that do not have it enabled. + +If you're using a custom workspace and enable the plan on the subscription level only, the `Microsoft Defender for servers should be enabled on workspaces` recommendation will appear on the Recommendations page. This recommendation will give you the option to enable the servers plan on the workspace level with the Fix button. Until the workspace has the Servers plan enabled, any connected VM will not benefit from the full security coverage (Microsoft Defender for Endpoint, VA solution (TVM/Qualys), just-in-time VM access, and more) offered by the Defender for Cloud, but will still incur the cost. + +Enabling the Servers plan on both the subscription and its connected workspaces, will not incur a double charge. The system will identify each unique VM. + +If you enable the Servers plan on cross-subscription workspaces, all connected VMs, even those from subscriptions that it was not enabled on, will be billed. + ### Will I be charged for machines without the Log Analytics agent installed? + Yes. When you enable [Microsoft Defender for Servers](defender-for-servers-introduction.md) on a subscription, the machines in that subscription get a range of protections even if you haven't installed the Log Analytics agent. This is applicable for Azure virtual machines, Azure virtual machine scale sets instances, and Azure Arc-enabled servers. -### If a Log Analytics agent reports to multiple workspaces, will I be charged twice? -Yes. If you've configured your Log Analytics agent to send data to two or more different Log Analytics workspaces (multi-homing), you'll be charged for every workspace that has a 'Security' or 'AntiMalware' solution installed. +### If a Log Analytics agent reports to multiple workspaces, will I be charged twice? + +No you will not be charged twice. ### If a Log Analytics agent reports to multiple workspaces, is the 500 MB free data ingestion available on all of them? + Yes. If you've configured your Log Analytics agent to send data to two or more different Log Analytics workspaces (multi-homing), you'll get 500 MB free data ingestion. It's calculated per node, per reported workspace, per day, and available for every workspace that has a 'Security' or 'AntiMalware' solution installed. You'll be charged for any data ingested over the 500 MB limit. ### Is the 500 MB free data ingestion calculated for an entire workspace or strictly per machine? -You'll get 500 MB free data ingestion per day, for every machine connected to the workspace. Specifically for security data types directly collected by Defender for Cloud. -This data is a daily rate averaged across all nodes. So even if some machines send 100-MB and others send 800-MB, if the total doesn't exceed the **[number of machines] x 500 MB** free limit, you won't be charged extra. +You'll get 500 MB free data ingestion per day, for every VM connected to the workspace. Specifically for the [security data types](#what-data-types-are-included-in-the-500-mb-data-daily-allowance) that are directly collected by Defender for Cloud. + +This data is a daily rate averaged across all nodes. Your total daily free limit is equal to **[number of machines] x 500 MB**. So even if some machines send 100-MB and others send 800-MB, if the total doesn't exceed your total daily free limit, you won't be charged extra. ### What data types are included in the 500 MB data daily allowance? Defender for Cloud's billing is closely tied to the billing for Log Analytics. [Microsoft Defender for Servers](defender-for-servers-introduction.md) provides a 500 MB/node/day allocation for machines against the following subset of [security data types](/azure/azure-monitor/reference/tables/tables-category#security): -- SecurityAlert -- SecurityBaseline -- SecurityBaselineSummary -- SecurityDetection -- SecurityEvent -- WindowsFirewall -- MaliciousIPCommunication -- SysmonEvent -- ProtectionStatus -- Update and UpdateSummary data types when the Update Management solution is not running on the workspace or solution targeting is enabled + +- [SecurityAlert](/azure/azure-monitor/reference/tables/securityalert) +- [SecurityBaseline](/azure/azure-monitor/reference/tables/securitybaseline) +- [SecurityBaselineSummary](/azure/azure-monitor/reference/tables/securitybaselinesummary) +- [SecurityDetection](/azure/azure-monitor/reference/tables/securitydetection) +- [SecurityEvent](/azure/azure-monitor/reference/tables/securityevent) +- [WindowsFirewall](/azure/azure-monitor/reference/tables/windowsfirewall) +- [SysmonEvent](/azure/azure-monitor/reference/tables/sysmonevent) +- [ProtectionStatus](/azure/azure-monitor/reference/tables/protectionstatus) +- [Update](/azure/azure-monitor/reference/tables/update) and [UpdateSummary](/azure/azure-monitor/reference/tables/updatesummary) when the Update Management solution isn't running in the workspace or solution targeting is enabled. If the workspace is in the legacy Per Node pricing tier, the Defender for Cloud and Log Analytics allocations are combined and applied jointly to all billable ingested data. +## How can I monitor my daily usage + +You can view your data usage in two different ways, the Azure portal, or by running a script. + +**To view your usage in the Azure portal**: + +1. Sign in to the [Azure portal](https://portal.azure.com). + +1. Navigate to **Log Analytics workspaces**. + +1. Select your workspace. + +1. Select **Usage and estimated costs**. + + :::image type="content" source="media/enhanced-security-features-overview/data-usage.png" alt-text="Screenshot of your data usage of your log analytics workspace. " lightbox="media/enhanced-security-features-overview/data-usage.png"::: + +You can also view estimated costs under different pricing tiers by selecting :::image type="icon" source="media/enhanced-security-features-overview/drop-down-icon.png" border="false"::: for each pricing tier. + +:::image type="content" source="media/enhanced-security-features-overview/estimated-costs.png" alt-text="Screenshot showing how to view estimated costs under additional pricing tiers." lightbox="media/enhanced-security-features-overview/estimated-costs.png"::: + +**To view your usage by using a script**: + +1. Sign in to the [Azure portal](https://portal.azure.com). + +1. Navigate to **Log Analytics workspaces** > **Logs**. + +1. Select your time range. Learn about [time ranges](../azure-monitor/logs/log-analytics-tutorial.md). + +1. Copy and past the following query into the **Type your query here** section. + + ```azurecli + let Unit= 'GB'; + Usage + | where IsBillable == 'TRUE' + | where DataType in ('SecurityAlert', 'SecurityBaseline', 'SecurityBaselineSummary', 'SecurityDetection', 'SecurityEvent', 'WindowsFirewall', 'MaliciousIPCommunication', 'SysmonEvent', 'ProtectionStatus', 'Update', 'UpdateSummary') + | project TimeGenerated, DataType, Solution, Quantity, QuantityUnit + | summarize DataConsumedPerDataType = sum(Quantity)/1024 by DataType, DataUnit = Unit + | sort by DataConsumedPerDataType desc + ``` + +1. Select **Run**. + + :::image type="content" source="media/enhanced-security-features-overview/select-run.png" alt-text="Screenshot showing where to enter your query and where the select run button is located." lightbox="media/enhanced-security-features-overview/select-run.png"::: + +You can learn how to [Analyze usage in Log Analytics workspace](../azure-monitor/logs/analyze-usage.md). + +Based on your usage, you won't be billed until you've used your daily allowance. If you're receiving a bill, it's only for the data used after the 500mb has been consumed, or for other service that does not fall under the coverage of Defender for Cloud. + ## Next steps This article explained Defender for Cloud's pricing options. For related material, see: diff --git a/articles/defender-for-cloud/episode-eight.md b/articles/defender-for-cloud/episode-eight.md new file mode 100644 index 0000000000000..ce2dc2dbe0b95 --- /dev/null +++ b/articles/defender-for-cloud/episode-eight.md @@ -0,0 +1,49 @@ +--- +title: Microsoft Defender for IoT +description: Learn how Defender for IoT discovers devices to monitor and how it fits in the Microsoft Security portfolio. +ms.topic: reference +ms.date: 06/01/2022 +--- + +# Microsoft Defender for IoT + +**Episode description**: In this episode of Defender for Cloud in the Field, Dolev Zemer joins Yuri Diogenes to talk about how Defender for IoT works. Dolev explains the difference between OT Security and IT Security and how Defender for IoT fulfills this gap. Dolev also demonstrates how Defender for IoT discovers devices to monitor and how it fits in the Microsoft Security portfolio. + +
                  +
                  + + +- [1:20](/shows/mdc-in-the-field/defender-for-iot#time=01m20s) - Overview of the Defender for IoT solution + +- [2:15](/shows/mdc-in-the-field/defender-for-iot#time=02m15s) - Difference between OT and IoT + +- [3:30](/shows/mdc-in-the-field/defender-for-iot#time=03m30s) - Prerequisites to use Defender for IoT + +- [4:30](/shows/mdc-in-the-field/defender-for-iot#time=04m30s) - Security posture and threat detection + +- [5:17](/shows/mdc-in-the-field/defender-for-iot#time=05m17s) - Automating alert response + +- [6:15](/shows/mdc-in-the-field/defender-for-iot#time=06m15s) - Integration with Microsoft Sentinel + +- [6:50](/shows/mdc-in-the-field/defender-for-iot#time=06m50s) - Architecture + +- [8:40](/shows/mdc-in-the-field/defender-for-iot#time=08m40s) - Demonstration + +## Recommended resources + +Learn more about [Defender for IoT](../defender-for-iot/index.yml). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Microsoft Defender for Containers in a Multicloud Environment](episode-nine.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/episode-eleven.md b/articles/defender-for-cloud/episode-eleven.md new file mode 100644 index 0000000000000..a8ca65d0fc5fc --- /dev/null +++ b/articles/defender-for-cloud/episode-eleven.md @@ -0,0 +1,41 @@ +--- +title: Threat landscape for Defender for Containers +description: Learn about the new detections that are available for different attacks and how Defender for Containers can help to quickly identify malicious activities in containers. +ms.topic: reference +ms.date: 06/01/2022 +--- + +# Threat landscape for Defender for Containers + +**Episode description**: In this episode of Defender for Cloud in the Field, Yossi Weizman joins Yuri Diogenes to talk about the evolution of the threat matrix for Containers and how attacks against Kubernetes have evolved. Yossi also demonstrates new detections that are available for different attacks and how Defender for Containers can help to quickly identify malicious activities in containers. + +
                  +
                  + + +- [01:15](/shows/mdc-in-the-field/threat-landscape-containers#time=01m15s) - The evolution of attacks against Kubernetes + +- [02:50](/shows/mdc-in-the-field/threat-landscape-containers#time=02m50s) - Identity related attacks against Kubernetes + +- [04:00](/shows/mdc-in-the-field/threat-landscape-containers#time=04m00s) - Threat detection beyond audit logs + +- [05:48](/shows/mdc-in-the-field/threat-landscape-containers#time=5m48s) - Demonstration + +## Recommended resources + +Learn how to [detect identity attacks in Kubernetes](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/detecting-identity-attacks-in-kubernetes/ba-p/3232340). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Enhanced workload protection features in Defender for Servers](episode-twelve.md) diff --git a/articles/defender-for-cloud/episode-five.md b/articles/defender-for-cloud/episode-five.md new file mode 100644 index 0000000000000..79078edd77187 --- /dev/null +++ b/articles/defender-for-cloud/episode-five.md @@ -0,0 +1,47 @@ +--- +title: Microsoft Defender for Servers +description: Learn all about Microsoft Defender for Servers from the product manager. +ms.topic: reference +ms.date: 06/01/2022 +--- + +# Microsoft Defender for Servers + +**Episode description**: In this episode of Defender for Cloud in the field, Aviv Mor joins Yuri Diogenes to talk about Microsoft Defender for Servers updates, including the new integration with TVM. Aviv explains how this new integration with TVM works, the advantages of this integration, which includes software inventory and easy experience to onboard. Aviv also covers the integration with MDE for Linux and the Defender for Servers support for the new multicloud connector for AWS. + +
                  +
                  + + +- [1:22](/shows/mdc-in-the-field/defender-for-containers#time=01m22s) - Overview of the announcements for Microsoft Defender for Servers + +- [5:50](/shows/mdc-in-the-field/defender-for-containers#time=05m50s) - Migration path from Qualys VA to TVM + +- [7:12](/shows/mdc-in-the-field/defender-for-containers#time=07m12s) - TVM capabilities in Defender for Servers + +- [8:38](/shows/mdc-in-the-field/defender-for-containers#time=08m38s) - Threat detections for Defender for Servers + +- [9:52](/shows/mdc-in-the-field/defender-for-containers#time=09m52s) - Defender for Servers in AWS + +- [12:23](/shows/mdc-in-the-field/defender-for-containers#time=12m23s) - Onboard process for TVM in an on-premises scenario + +- [13:20](/shows/mdc-in-the-field/defender-for-containers#time=13m20s) - Demonstration + +## Recommended resources + +Learn how to [Investigate weaknesses with Microsoft Defender for Endpoint's threat and vulnerability management](deploy-vulnerability-assessment-tvm.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Lessons Learned from the Field](episode-six.md) diff --git a/articles/defender-for-cloud/episode-four.md b/articles/defender-for-cloud/episode-four.md new file mode 100644 index 0000000000000..a7b3ff43556f6 --- /dev/null +++ b/articles/defender-for-cloud/episode-four.md @@ -0,0 +1,43 @@ +--- +title: Security posture management improvements in Microsoft Defender for Cloud +description: Learn how to manage your security posture with Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 06/01/2022 +--- + +# Security posture management improvements in Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, Lior Arviv joins Yuri Diogenes to talk about the cloud security posture management improvements in Microsoft Defender for Cloud. Lior explains the MITRE ATT&CK Framework integration with recommendations, the overall improvements of recommendations and the other fields added in the API. Lior also demonstrates the different ways to access the MITRE ATT&CK integration via filters and recommendations. + +
                  +
                  + + +- [1:24](/shows/mdc-in-the-field/defender-for-containers#time=01m24s) - Security recommendation refresh time changes + +- [3:50](/shows/mdc-in-the-field/defender-for-containers#time=03m50s) - MITRE ATT&CK Framework mapping to recommendations + +- [6:14](/shows/mdc-in-the-field/defender-for-containers#time=06m14s) - Demonstration + +- [14:44](/shows/mdc-in-the-field/defender-for-containers#time=14m44s) - Secure Score API updates + +- [18:54](/shows/mdc-in-the-field/defender-for-containers#time=18m54s) - What's coming next + +## Recommended resources + +Learn how to [Review your security recommendations](review-security-recommendations.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Microsoft Defender for Servers](episode-five.md) diff --git a/articles/defender-for-cloud/episode-nine.md b/articles/defender-for-cloud/episode-nine.md new file mode 100644 index 0000000000000..4cee9cc2a3374 --- /dev/null +++ b/articles/defender-for-cloud/episode-nine.md @@ -0,0 +1,43 @@ +--- +title: Microsoft Defender for Containers in a multicloud environment +description: Learn about Microsoft Defender for Containers implementation in AWS and GCP. +ms.topic: reference +ms.date: 06/01/2022 +--- + +# Microsoft Defender for Containers in a Multicloud Environment + +**Episode description**: In this episode of Defender for Cloud in the field, Maya Herskovic joins Yuri Diogenes to talk about Microsoft Defender for Containers implementation in AWS and GCP. + +Maya explains about the new workload protection capabilities related to Containers when they're deployed in a multicloud environment. Maya also demonstrates the onboarding experience in GCP and how to visualize security recommendations across AWS, GCP, and Azure in a single dashboard. + +
                  +
                  + + +- [01:12](/shows/mdc-in-the-field/containers-multi-cloud#time=01m12s) - Container protection in a multicloud environment + +- [05:03](/shows/mdc-in-the-field/containers-multi-cloud#time=05m03s) - Workload protection capabilities for GCP + +- [06:18](/shows/mdc-in-the-field/containers-multi-cloud#time=06m18s) - Single dashboard for multi-cloud + +- [10:25](/shows/mdc-in-the-field/containers-multi-cloud#time=10m25s) - Demonstration + +## Recommended resources + +Learn how to [Enable Microsoft Defender for Containers](defender-for-containers-enable.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Protecting Containers in GCP with Defender for Containers](episode-ten.md) diff --git a/articles/defender-for-cloud/episode-one.md b/articles/defender-for-cloud/episode-one.md new file mode 100644 index 0000000000000..5187b1bbb0fbc --- /dev/null +++ b/articles/defender-for-cloud/episode-one.md @@ -0,0 +1,47 @@ +--- +title: New AWS connector in Microsoft Defender for Cloud +description: Learn all about the new AWS connector in Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/29/2022 +--- + +# New AWS connector in Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, Or Serok joins Yuri Diogenes to share the new AWS connector in Microsoft Defender for Cloud, which was released at Ignite 2021. Or explains the use case scenarios for the new connector and how the new connector work. She demonstrates the onboarding process to connect AWS with Microsoft Defender for Cloud and talks about the centralized management of all security recommendations. + +
                  +
                  + + +- [00:00](/shows/mdc-in-the-field/aws-connector) - Introduction + +- [2:20](/shows/mdc-in-the-field/aws-connector) - Understanding the new AWS connector. + +- [3:45](/shows/mdc-in-the-field/aws-connector) - Overview of the new onboarding experience. + +- [4:30](/shows/mdc-in-the-field/aws-connector) - Customizing recommendations for AWS workloads. + +- [7:03](/shows/mdc-in-the-field/aws-connector) - Beyond CSPM capabilities. + +- [11:14](/shows/mdc-in-the-field/aws-connector) - Demonstration of the recommendations and onboarding process. + +- [23:20](/shows/mdc-in-the-field/aws-connector) - Demonstration of how to customize AWS assessments. + +## Recommended resources + +Learn more about the new [AWS connector](quickstart-onboard-aws.md) + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Integrate Azure Purview with Microsoft Defender for Cloud](episode-two.md) diff --git a/articles/defender-for-cloud/episode-seven.md b/articles/defender-for-cloud/episode-seven.md new file mode 100644 index 0000000000000..270ada01fbc5f --- /dev/null +++ b/articles/defender-for-cloud/episode-seven.md @@ -0,0 +1,47 @@ +--- +title: New GCP connector in Microsoft Defender for Cloud +description: Learn all about the new GCP connector in Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/29/2022 +--- + +# New GCP connector in Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, Or Serok joins Yuri Diogenes to share the new GCP Connector in Microsoft Defender for Cloud. Or explains the use case scenarios for the new connector and how the new connector works. She demonstrates the onboarding process to connect GCP with Microsoft Defender for Cloud and talks about custom assessment and the CSPM experience for multicloud + +
                  +
                  + + +- [1:23](/shows/mdc-in-the-field/gcp-connector#time=01m23s) - Overview of the new GCP connector + +- [4:05](/shows/mdc-in-the-field/gcp-connector#time=04m05s) - Migration path from the old GCP connector to the new one + +- [5:10](/shows/mdc-in-the-field/gcp-connector#time=05m10s) - Type of assessment utilized by the new GCP connector + +- [5:51](/shows/mdc-in-the-field/gcp-connector#time=05m51s) - Custom assessments + +- [6:52](/shows/mdc-in-the-field/gcp-connector#time=06m52s) - Demonstration + +- [15:05](/shows/mdc-in-the-field/gcp-connector#time=15m05s) - Recommendation experience + +- [18:00](/shows/mdc-in-the-field/gcp-connector#time=18m00s) - Final considerations + +## Recommended resources + +Learn more how to [Connect your GCP projects to Microsoft Defender for Cloud](quickstart-onboard-gcp.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Microsoft Defender for IoT](episode-eight.md) diff --git a/articles/defender-for-cloud/episode-six.md b/articles/defender-for-cloud/episode-six.md new file mode 100644 index 0000000000000..c848f0dcff643 --- /dev/null +++ b/articles/defender-for-cloud/episode-six.md @@ -0,0 +1,45 @@ +--- +title: Lessons learned from the field with Microsoft Defender for Cloud +description: Learn how Microsoft Defender for Cloud is used to fill the gap between cloud security posture management and cloud workload protection. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Lessons learned from the field with Microsoft Defender for Cloud + +**Episode description**: In this episode Carlos Faria, Microsoft Cybersecurity Consultant joins Yuri to talk about lessons from the field and how customers are using Microsoft Defender for Cloud to improve their security posture and protect their workloads in a multicloud environment. + +Carlos also covers how Microsoft Defender for Cloud is used to fill the gap between cloud security posture management and cloud workload protection, and demonstrates some features related to this scenario. + +
                  +
                  + + +- [1:30](/shows/mdc-in-the-field/lessons-from-the-field#time=01m30s) - Why Microsoft Defender for Cloud is a unique solution when compared with other competitors? + +- [2:58](/shows/mdc-in-the-field/lessons-from-the-field#time=02m58s) - How to fulfill the gap between CSPM and CWPP + +- [4:42](/shows/mdc-in-the-field/lessons-from-the-field#time=04m42s) - How a multicloud affects the CSPM lifecycle and how Defender for Cloud fits in? + +- [8:05](/shows/mdc-in-the-field/lessons-from-the-field#time=08m05s) - Demonstration + +- [12:34](/shows/mdc-in-the-field/lessons-from-the-field#time=12m34s) - Final considerations + +## Recommended resources + +Learn more [What is Microsoft Defender for Cloud?](defender-for-cloud-introduction.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [New GCP Connector in Microsoft Defender for Cloud](episode-seven.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/episode-ten.md b/articles/defender-for-cloud/episode-ten.md new file mode 100644 index 0000000000000..fb0c18e157968 --- /dev/null +++ b/articles/defender-for-cloud/episode-ten.md @@ -0,0 +1,43 @@ +--- +title: Protecting containers in GCP with Defender for Containers +description: Learn how to use Defender for Containers, to protect Containers that are located in Google Cloud Projects. +ms.topic: reference +ms.date: 05/29/2022 +--- + +# Protecting containers in GCP with Defender for Containers + +**Episode description**: In this episode of Defender for Cloud in the field, Nadav Wolfin joins Yuri Diogenes to talk about how to use Defender for Containers to protect Containers that are located at Google Cloud (GCP). + +Nadav gives insights about workload protection for GKE and how to obtain visibility of this type of workload across Azure and AWS. Nadav also demonstrates the overall onboarding experience and provides an overview of the architecture of this solution. + +
                  +
                  + + +- [00:55](/shows/mdc-in-the-field/gcp-containers#time=00m55s) - Architecture solution for Defender for Containers and support for GKE + +- [06:42](/shows/mdc-in-the-field/gcp-containers#time=06m42s) - How the onboard process works + +- [08:46](/shows/mdc-in-the-field/gcp-containers#time=08m46s) - Demonstration + +- [26:18](/shows/mdc-in-the-field/gcp-containers#time=26m18s) - Integration with Azure Arc + +## Recommended resources + +Learn how to [Enable Microsoft Defender for Containers](defender-for-containers-enable.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Threat landscape for Containers](episode-eleven.md) diff --git a/articles/defender-for-cloud/episode-three.md b/articles/defender-for-cloud/episode-three.md new file mode 100644 index 0000000000000..420902a3fd059 --- /dev/null +++ b/articles/defender-for-cloud/episode-three.md @@ -0,0 +1,47 @@ +--- +title: Microsoft Defender for Containers +description: Learn how about Microsoft Defender for Containers. +ms.topic: reference +ms.date: 05/29/2022 +--- + +# Microsoft Defender for Containers + +**Episode description**: In this episode of Defender for Cloud in the field, Maya Herskovic joins Yuri Diogenes to talk about Microsoft Defender for Containers. Maya explains what's new in Microsoft Defender for Containers, the new capabilities that are available, the new pricing model, and the multicloud coverage. Maya also demonstrates the overall experience of Microsoft Defender for Containers from the recommendations to the alerts that you may receive. + +
                  +
                  + + +- [1:09](/shows/mdc-in-the-field/defender-for-containers#time=01m09s) - What's new in the Defender for Containers plan? + +- [4:42](/shows/mdc-in-the-field/defender-for-containers#time=04m42s) - Change in the host level protection + +- [8:08](/shows/mdc-in-the-field/defender-for-containers#time=08m08s) - How to migrate to the new plan? + +- [9:28](/shows/mdc-in-the-field/defender-for-containers#time=09m28s) - Onboarding requirements + +- [11:45](/shows/mdc-in-the-field/defender-for-containers#time=11m45s) - Improvements in the anomaly detection + +- [13:27](/shows/mdc-in-the-field/defender-for-containers#time=13m27s) - Demonstration + +- [22:17](/shows/mdc-in-the-field/defender-for-containers#time=22m17s) - Final considerations + +## Recommended resources + +Learn more about [Microsoft Defender for Containers](defender-for-containers-introduction.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Security posture management improvements](episode-four.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/episode-twelve.md b/articles/defender-for-cloud/episode-twelve.md new file mode 100644 index 0000000000000..f61a67ffb9b24 --- /dev/null +++ b/articles/defender-for-cloud/episode-twelve.md @@ -0,0 +1,47 @@ +--- +title: Enhanced workload protection features in Defender for Servers +description: Learn about the enhanced capabilities available in Defender for Servers, for VMs that are located in GCP, AWS and on-premises. +ms.topic: reference +ms.date: 05/29/2022 +--- + +# Enhanced workload protection features in Defender for Servers + +**Episode description**: In this episode of Defender for Cloud in the Field, Netta Norman joins Yuri Diogenes to talk about the enhanced capabilities available in Defender for Servers, for VMs that are located in GCP, AWS and on-premises. + +Netta explains how Defender for Servers applies Azure Arc as a bridge to onboard non-Azure VMs as she demonstrates what the experience looks like. + +
                  +
                  + + +- [00:55](/shows/mdc-in-the-field/enhanced-workload-protection#time=00m55s) - Arc Auto-provisioning in GCP + +- [2:57](/shows/mdc-in-the-field/enhanced-workload-protection#time=02m57s) - Prerequisites to Arc auto-provisioning + +- [3:50](/shows/mdc-in-the-field/enhanced-workload-protection#time=03m50s) - Considerations when enabling Defender for Server plan in GCP + +- [5:20](/shows/mdc-in-the-field/enhanced-workload-protection#time=05m20s) - Dashboard refresh time interval + +- [7:00](/shows/mdc-in-the-field/enhanced-workload-protection#time=07m00s) - Security value for non-Azure workloads + +- [9:06](/shows/mdc-in-the-field/enhanced-workload-protection#time=05m20s) - Demonstration + +## Recommended resources + +Introduce yourself to [Microsoft Defender for Servers](defender-for-servers-introduction.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [New AWS Connector in Microsoft Defender for Cloud](episode-one.md) diff --git a/articles/defender-for-cloud/episode-two.md b/articles/defender-for-cloud/episode-two.md new file mode 100644 index 0000000000000..a190cd59a2201 --- /dev/null +++ b/articles/defender-for-cloud/episode-two.md @@ -0,0 +1,47 @@ +--- +title: Integrate Azure Purview with Microsoft Defender for Cloud +description: Learn how to integrate Azure Purview with Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/29/2022 +--- + +# Integrate Azure Purview with Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, David Trigano joins Yuri Diogenes to share the new integration of Microsoft Defender for Cloud with Azure Purview, which was released at Ignite 2021. + +David explains the use case scenarios for this integration and how the data classification is done by Azure Purview can help prioritize recommendations and alerts in Defender for Cloud. David also demonstrates the overall experience of data enrichment based on the information that flows from Azure Purview to Defender for Cloud. + +
                  +
                  + + +- [1:36](/shows/mdc-in-the-field/integrate-with-purview) - Overview of Azure Purview + +- [2:40](/shows/mdc-in-the-field/integrate-with-purview) - Integration with Microsoft Defender for Cloud + +- [3:48](/shows/mdc-in-the-field/integrate-with-purview) - How the integration with Azure Purview helps to prioritize Recommendations in Microsoft Defender for Cloud + +- [5:26](/shows/mdc-in-the-field/integrate-with-purview) - How the integration with Azure Purview helps to prioritize Alerts in Microsoft Defender for Cloud + +- [8:54](/shows/mdc-in-the-field/integrate-with-purview) - Demonstration + +- [16:50](/shows/mdc-in-the-field/integrate-with-purview) - Final considerations + +## Recommended resources + +Learn more about the [integration with Azure Purview](information-protection.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Watch Episode 3](episode-three.md) diff --git a/articles/defender-for-cloud/governance-rules.md b/articles/defender-for-cloud/governance-rules.md new file mode 100644 index 0000000000000..23888884d9145 --- /dev/null +++ b/articles/defender-for-cloud/governance-rules.md @@ -0,0 +1,134 @@ +--- +title: Driving your organization to remediate security issues with recommendation governance in Microsoft Defender for Cloud +description: Learn how to assign owners and due dates to security recommendations and create rules to automatically assign owners and due dates +services: defender-for-cloud +author: bmansheim +ms.author: benmansheim +ms.service: defender-for-cloud +ms.topic: how-to +ms.date: 05/29/2022 +--- +# Drive your organization to remediate security recommendations with governance + +Security teams are responsible for improving the security posture of their organizations but they may not have the resources or authority to actually implement security recommendations. [Assigning owners with due dates](#manually-assigning-owners-and-due-dates-for-recommendation-remediation) and [defining governance rules](#building-an-automated-process-for-improving-security-with-governance-rules) creates accountability and transparency so you can drive the process of improving the security posture in your organization. + +Stay on top of the progress on the recommendations in the security posture. Weekly email notifications to the owners and managers make sure that they take timely action on the recommendations that can improve your security posture and recommendations. + +## Building an automated process for improving security with governance rules + +To make sure your organization is systematically improving its security posture, you can define rules that assign an owner and set the due date for resources in the specified recommendations. That way resource owners have a clear set of tasks and deadlines for remediating recommendations. + +You can then review the progress of the tasks by subscription, recommendation, or owner so you can follow up with tasks that need more attention. + +### Availability + +|Aspect|Details| +|----|:----| +|Release state:|Preview.
                  [!INCLUDE [Legalese](../../includes/defender-for-cloud-preview-legal-text.md)]| +|Pricing:|Free| +|Required roles and permissions:|Azure - **Contributor**, **Security Admin**, or **Owner** on the subscription
                  AWS, GCP – **Contributor**, **Security Admin**, or **Owner** on the connector| +|Clouds:|:::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
                  :::image type="icon" source="./media/icons/no-icon.png"::: National (Azure Government, Azure China 21Vianet)
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Connected AWS accounts
                  :::image type="icon" source="./media/icons/yes-icon.png"::: Connected GCP accounts| + +### Defining governance rules to automatically set the owner and due date of recommendations + +Governance rules can identify resources that require remediation according to specific recommendations or severities, and the rule assigns an owner and due date to make sure the recommendations are handled. Many governance rules can apply to the same recommendations, so the rule with lower priority value is the one that assigns the owner and due date. + +The due date set for the recommendation to be remediated is based on a timeframe of 7, 14, 30, or 90 days from when the recommendation is found by the rule. For example, if the rule identifies the resource on March 1st and the remediation timeframe is 14 days, March 15th is the due date. You can apply a grace period so that the resources that are given a due date don't impact your secure score until they're overdue. + +You can also set the owner of the resources that are affected by the specified recommendations. In organizations that use resource tags to associate resources with an owner, you can specify the tag key and the governance rule reads the name of the resource owner from the tag. + +By default, email notifications are sent to the resource owners weekly to provide a list of the on time and overdue tasks. If an email for the owner's manager is found in the organizational Azure Active Directory (Azure AD), the owner's manager receives a weekly email showing any overdue recommendations by default. + +:::image type="content" source="./media/governance-rules/add-governance-rules.png" alt-text="Screenshot of fields required to add a governance rule." lightbox="media/governance-rules/add-governance-rules.png"::: + +To define a governance rule that assigns an owner and due date: + +1. In the **Environment settings**, select the Azure subscription, AWS account, or Google project that you want to define the rule for. +1. In **Governance rules (preview)**, select **Add rule**. +1. Enter a name for the rule. +1. Set a priority for the rule. You can see the priority for the existing rules in the list of governance rules. +1. Select the recommendations that the rule applies to, either: + - **By severity** - The rule assigns the owner and due date to any recommendation in the subscription that doesn't already have them assigned. + - **By name** - Select the specific recommendations that the rule applies to. +1. Set the owner to assign to the recommendations either: + - **By resource tag** - Enter the resource tag on your resources that defines the resource owner. + - **By email address** - Enter the email address of the owner to assign to the recommendations. +1. Set the **remediation timeframe**, which is the time between when the resources are identified to require remediation and the time that the remediation is due. +1. If you don't want the resources to affect your secure score until they're overdue, select **Apply grace period**. +1. If you don't want either the owner or the owner's manager to receive weekly emails, clear the notification options. +1. Select **Create**. + +If there are existing recommendations that match the definition of the governance rule, you can either: + +- Assign an owner and due date to recommendations that don't already have an owner or due date. +- Overwrite the owner and due date of existing recommendations. + +## Manually assigning owners and due dates for recommendation remediation + +For every resource affected by a recommendation, you can assign an owner and a due date so that you know who needs to implement the security changes to improve your security posture and when they're expected to do it by. You can also apply a grace period so that the resources that are given a due date don't impact your secure score unless they become overdue. + +To manually assign owners and due dates to recommendations: + +1. Go to the list of recommendations: + - In the Defender for Cloud overview, select **Security posture** and then select **View recommendations** for the environment that you want to improve. + - Go to **Recommendations** in the Defender for Cloud menu. +1. In the list of recommendations, use the **Potential score increase** to identify the security control that contains recommendations that will increase your secure score. + + > [!TIP] + > You can also use the search box and filters above the list of recommendations to find specific recommendations. + +1. Select a recommendation to see the affected resources. +1. For any resource that doesn't have an owner or due date, select the resources and select **Assign owner**. +1. Enter the email address of the owner that needs to make the changes that remediate the recommendation for those resources. +1. Select the date by which to remediate the recommendation for the resources. +1. You can select **Apply grace period** to keep the resource from impacting the secure score until it's overdue. +1. Select **Save**. + +The recommendation is now shown as assigned and on time. + +## Tracking the status of recommendations for further action + +After you define governance rules, you'll want to review the progress that the owners are making in remediating the recommendations. + +You can track the assigned and overdue recommendations in: + +- The security posture shows the number of unassigned and overdue recommendations. + + :::image type="content" source="./media/governance-rules/governance-in-security-posture.png" alt-text="Screenshot of governance status in the security posture."::: + +- The list of recommendations shows the governance status of each recommendation. + + :::image type="content" source="./media/governance-rules/governance-in-recommendations.png" alt-text="Screenshot of recommendations with their governance status." lightbox="media/governance-rules/governance-in-recommendations.png"::: + +- The governance report in the governance rules settings lets you drill down into recommendations by rule and owner. + + :::image type="content" source="./media/governance-rules/governance-in-workbook.png" alt-text="Screenshot of governance status by rule and owner in the governance workbook." lightbox="media/governance-rules/governance-in-workbook.png"::: + +### Tracking progress by rule with the governance report + +The governance report lets you select subscriptions that have governance rules and, for each rule and owner, shows you how many recommendations are completed, on time, overdue, or unassigned. + +To review the status of the recommendations in a rule: + +1. In **Recommendations**, select **Governance report (preview)**. +1. Select the subscriptions that you want to review. +1. Select the rules that you want to see details about. + +You can see the list of owners and recommendations for the selected rules, and their status. + +To see the list of recommendations for each owner: + +1. Select **Security posture**. +1. Select the **Owner (preview)** tab to see the list of owners and the number of overdue recommendations for each owner. + + - Hover over the (i) in the overdue recommendations to see the breakdown of overdue recommendations by severity. + + - If the owner email address is found in the organizational Azure Active Directory (Azure AD), you'll see the full name and picture of the owner. + +1. Select **View recommendations** to go to the list of recommendations associated with the owner. + +## Next steps + +In this article, you learned how to set up a process for assigning owners and due dates to tasks so that owners are accountable for taking steps to improve your security posture. + +Check out how owners can [set ETAs for tasks](review-security-recommendations.md#manage-the-owner-and-eta-of-recommendations-that-are-assigned-to-you) so that they can manage their progress. diff --git a/articles/defender-for-cloud/implement-security-recommendations.md b/articles/defender-for-cloud/implement-security-recommendations.md index 8010de7e33a85..ec0e5d8675205 100644 --- a/articles/defender-for-cloud/implement-security-recommendations.md +++ b/articles/defender-for-cloud/implement-security-recommendations.md @@ -1,7 +1,7 @@ --- title: Implement security recommendations in Microsoft Defender for Cloud | Microsoft Docs description: This article explains how to respond to recommendations in Microsoft Defender for Cloud to protect your resources and satisfy security policies. -ms.topic: conceptual +ms.topic: how-to ms.author: benmansheim author: bmansheim ms.date: 11/09/2021 @@ -30,12 +30,12 @@ To simplify remediation and improve your environment's security (and increase yo > [!TIP] > The **Fix** feature is only available for specific recommendations. To find recommendations that have an available fix, use the **Response actions** filter for the list of recommendations: -> +> > :::image type="content" source="media/implement-security-recommendations/quick-fix-filter.png" alt-text="Use the filters above the recommendations list to find recommendations that have the Fix option."::: To implement a **Fix**: -1. From the list of recommendations that have the **Fix** action icon, :::image type="icon" source="media/implement-security-recommendations/fix-icon.png" border="false":::, select a recommendation. +1. From the list of recommendations that have the **Fix** action icon :::image type="icon" source="media/implement-security-recommendations/fix-icon.png" border="false":::, select a recommendation. :::image type="content" source="./media/implement-security-recommendations/microsoft-defender-for-cloud-recommendations-fix-action.png" alt-text="Recommendations list highlighting recommendations with Fix action" lightbox="./media/implement-security-recommendations/microsoft-defender-for-cloud-recommendations-fix-action.png"::: @@ -62,7 +62,6 @@ To implement a **Fix**: The remediation operation uses a template deployment or REST API `PATCH` request to apply the configuration on the resource. These operations are logged in [Azure activity log](../azure-monitor/essentials/activity-log.md). - ## Next steps In this document, you were shown how to remediate recommendations in Defender for Cloud. To learn how recommendations are defined and selected for your environment, see the following page: diff --git a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md index 00807ea8f11f6..a62a23b36c8e2 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md @@ -1,9 +1,9 @@ --- -author: ElazarK -ms.author: elkrieger +author: bmansheim +ms.author: benmansheim ms.service: defender-for-cloud ms.topic: include -ms.date: 05/12/2022 +ms.date: 05/26/2022 --- ## Enable the plan @@ -19,16 +19,18 @@ ms.date: 05/12/2022 > > :::image type="content" source="../media/release-notes/defender-plans-deprecated-indicator.png" alt-text="Defender for container registries and Defender for Kubernetes plans showing 'Deprecated' and upgrade information."::: -1. By default, when enabling the plan through the Azure portal, [Microsoft Defender for Containers](../defender-for-containers-introduction.md) is configured to auto provision (automatically install) required components to provide the protections offered by plan, including the assignment of a default workspace. +1. By default, when enabling the plan through the Azure portal, [Microsoft Defender for Containers](../defender-for-containers-introduction.md) is configured to auto provision (automatically install) required components to provide the protections offered by plan, including the assignment of a default workspace. - Optionally, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: + If you want to disable auto provisioning during the onboading process, select **Edit configuration** for the **Containers** plan. This opens the Advanced options, where you can disable auto provisioning for each component. + + In addition, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: :::image type="content" source="../media/defender-for-containers/auto-provisioning-defender-for-containers.png" alt-text="Screenshot of the auto provisioning options for Microsoft Defender for Containers." lightbox="../media/defender-for-containers/auto-provisioning-defender-for-containers.png"::: > [!NOTE] > If you choose to **disable the plan** at any time after enabling it through the portal as shown above, you'll need to manually remove Defender for Containers components deployed on your clusters. - You can [assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-aks#assign-a-custom-workspace) through Azure Policy. + You can [assign a custom workspace](../defender-for-containers-enable.md?pivots=defender-for-container-aks&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#assign-a-custom-workspace) through Azure Policy. 1. If you disable the auto provisioning of any component, you can easily deploy the component to one or more clusters using the appropriate recommendation: @@ -39,13 +41,13 @@ ms.date: 05/12/2022 > [!Note] >Microsoft Defender for Containers is configured to defend all of your clouds automatically. When you install all of the required prerequisites and enable all of the auto provisioning capabilities. > - > If you choose to disable all of the auto provision configuration options, no agents, or components will be deployed to your clusters. Protection will be limited to the Agentless features only. Learn which features are Agentless in the [availability section](../supported-machines-endpoint-solutions-clouds-containers.md) for Defender for Containers. + > If you choose to disable all of the auto provision configuration options, no agents, or components will be deployed to your clusters. Protection will be limited to the Agentless features only. Learn which features are Agentless in the [availability section](../supported-machines-endpoint-solutions-clouds-containers.md) for Defender for Containers. ## Deploy the Defender profile You can enable the Defender for Containers plan and deploy all of the relevant components from the Azure portal, the REST API, or with a Resource Manager template. For detailed steps, select the relevant tab. -Once the Defender profile has been deployed, a default workspace will be automatically assigned. You can [assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-aks#assign-a-custom-workspace) in place of the default workspace through Azure Policy. +Once the Defender profile has been deployed, a default workspace will be automatically assigned. You can [assign a custom workspace](../defender-for-containers-enable.md?pivots=defender-for-container-aks&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#assign-a-custom-workspace) in place of the default workspace through Azure Policy. The Defender security profile is a preview feature. [!INCLUDE [Legalese](../../../includes/defender-for-cloud-preview-legal-text.md)] @@ -53,7 +55,7 @@ The Defender security profile is a preview feature. [!INCLUDE [Legalese](../../. ### Use the fix button from the Defender for Cloud recommendation -A streamlined, frictionless, process lets you use the Azure portal pages to enable the Defender for Cloud plan and setup auto provisioning of all the necessary components for defending your Kubernetes clusters at scale. +A streamlined, frictionless, process lets you use the Azure portal pages to enable the Defender for Cloud plan and setup auto provisioning of all the necessary components for defending your Kubernetes clusters at scale. A dedicated Defender for Cloud recommendation provides: @@ -73,7 +75,6 @@ A dedicated Defender for Cloud recommendation provides: 1. Select **Fix *[x]* resources**. - ### [**REST API**](#tab/aks-deploy-rest) ### Use the REST API to deploy the Defender profile @@ -85,9 +86,9 @@ PUT https://management.azure.com/subscriptions/{{Subscription Id}}/resourcegroup ``` Request URI: `https://management.azure.com/subscriptions/{{SubscriptionId}}/resourcegroups/{{ResourceGroup}}/providers/Microsoft.ContainerService/managedClusters/{{ClusterName}}?api-version={{ApiVersion}}` - + Request query parameters: - + | Name | Description | Mandatory | |----------------|------------------------------------|-----------| | SubscriptionId | Cluster's subscription ID | Yes | @@ -95,9 +96,8 @@ Request query parameters: | ClusterName | Cluster's name | Yes | | ApiVersion | API version, must be >= 2021-07-01 | Yes | - Request Body: - + ```rest { "location": "{{Location}}", @@ -111,7 +111,7 @@ Request Body: } } ``` - + Request body parameters: | Name | Description | Mandatory | @@ -120,7 +120,61 @@ Request body parameters: | properties.securityProfile.azureDefender.enabled | Determines whether to enable or disable Microsoft Defender for Containers on the cluster | Yes | | properties.securityProfile.azureDefender.logAnalyticsWorkspaceResourceId | Log Analytics workspace Azure resource ID | Yes | +### [**Azure CLI**](#tab/k8s-deploy-cli) + +### Use Azure CLI to deploy the Defender extension + +1. Log in to Azure: + + ```azurecli + az login + az account set --subscription + ``` + + > [!IMPORTANT] + > Ensure that you use the same subscription ID for ```` as the one associated with your AKS cluster. + +1. Enable the feature flag in the CLI: + + ```azurecli + az feature register --namespace Microsoft.ContainerService --name AKS-AzureDefender + ``` + +1. Enable the Defender profile on your containers: + - Run the following command to create a new cluster with the Defender profile enabled: + + ```azurecli + az aks create --enable-defender --resource-group --name + ``` + + - Run the following command to enable the Defender profile on an existing cluster: + + ```azurecli + az aks update --enable-defender --resource-group --name + ``` + + A description of all the supported configuration settings on the Defender extension type is given below: + + | Property | Description | + |----------|-------------| + | logAnalyticsWorkspaceResourceID | **Optional**. Full resource ID of your own Log Analytics workspace.
                  When not provided, the default workspace of the region will be used.

                  To get the full resource ID, run the following command to display the list of workspaces in your subscriptions in the default JSON format:
                  ```az resource list --resource-type Microsoft.OperationalInsights/workspaces -o json```

                  The Log Analytics workspace resource ID has the following syntax:
                  /subscriptions/{your-subscription-id}/resourceGroups/{your-resource-group}/providers/Microsoft.OperationalInsights/workspaces/{your-workspace-name}.
                  Learn more in [Log Analytics workspaces](../../azure-monitor/logs/log-analytics-workspace-overview.md) | + + You can include these settings in a JSON file and specify the JSON file in the `az aks create` and `az aks update` commands with this parameter: `--defender-config `. The format of the JSON file must be: + + ```json + {"logAnalyticsWorkspaceResourceID": ""} + ``` + + Learn more about AKS CLI commands in [az aks](/cli/azure/aks). + +1. To verify that the profile was successfully added, run the following command on your machine with the `kubeconfig` file pointed to your cluster: + + ```console + kubectl get pods -n azuredefender + ``` + + When the profile is added, you should see a pod called `azuredefender-XXXXX` in `Running` state. It might take a few minutes for pods to be added. ### [**Resource Manager**](#tab/aks-deploy-arm) @@ -149,4 +203,4 @@ To install the 'SecurityProfile' on an existing cluster with Resource Manager: }, } } -``` \ No newline at end of file +``` diff --git a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md index 8b7be0f31e29f..8442fcbb74d26 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md @@ -12,7 +12,7 @@ ms.date: 05/12/2022 1. From Defender for Cloud's menu, open the [Environment settings page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/EnvironmentSettings) and select the relevant subscription. -1. In the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier), enable **Defender for Containers** +1. In the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier), enable **Defender for Containers**. > [!TIP] > If the subscription already has Defender for Kubernetes and/or Defender for container registries enabled, an update notice is shown. Otherwise, the only option will be **Defender for Containers**. @@ -21,14 +21,16 @@ ms.date: 05/12/2022 1. By default, when enabling the plan through the Azure portal, [Microsoft Defender for Containers](../defender-for-containers-introduction.md) is configured to auto provision (automatically install) required components to provide the protections offered by plan, including the assignment of a default workspace. - Optionally, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: + If you want to disable auto provisioning during the onboading process, select **Edit configuration** for the **Containers** plan. This opens the Advanced options, where you can disable auto provisioning for each component. + + In addition, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: :::image type="content" source="../media/defender-for-containers/auto-provisioning-defender-for-containers.png" alt-text="Screenshot of the auto provisioning options for Microsoft Defender for Containers." lightbox="../media/defender-for-containers/auto-provisioning-defender-for-containers.png"::: > [!NOTE] > If you choose to **disable the plan** at any time after enabling it through the portal as shown above, you'll need to manually remove Defender for Containers components deployed on your clusters. - You can [assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-arc#assign-a-custom-workspace) through Azure Policy. + You can [assign a custom workspace](../defender-for-containers-enable.md?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-arc#assign-a-custom-workspace) through Azure Policy. 1. If you disable the auto provisioning of any component, you can easily deploy the component to one or more clusters using the appropriate recommendation: @@ -39,6 +41,7 @@ ms.date: 05/12/2022 ## Prerequisites Before deploying the extension, ensure you: + - [Connect the Kubernetes cluster to Azure Arc](../../azure-arc/kubernetes/quickstart-connect-cluster.md) - Complete the [pre-requisites listed under the generic cluster extensions documentation](../../azure-arc/kubernetes/extensions.md#prerequisites). @@ -72,7 +75,6 @@ A dedicated Defender for Cloud recommendation provides: :::image type="content" source="../media/defender-for-kubernetes-azure-arc/security-center-deploy-extension.gif" alt-text="Deploy Defender extension for Azure Arc with Defender for Cloud's 'fix' option."::: - ### [**Azure CLI**](#tab/k8s-deploy-cli) ### Use Azure CLI to deploy the Defender extension @@ -119,12 +121,13 @@ You can use the **azure-defender-extension-arm-template.json** Resource Manager ### [**REST API**](#tab/k8s-deploy-api) -### Use REST API to deploy the Defender extension +### Use REST API to deploy the Defender extension To use the REST API to deploy the Defender extension, you'll need a Log Analytics workspace on your subscription. Learn more in [Log Analytics workspaces](../../azure-monitor/logs/log-analytics-workspace-overview.md). > [!TIP] > The simplest way to use the API to deploy the Defender extension is with the supplied **Postman Collection JSON** example from Defender for Cloud's [installation examples](https://aka.ms/kubernetes-extension-installation-examples). + - To modify the Postman Collection JSON, or to manually deploy the extension with the REST API, run the following PUT command: ```rest @@ -139,12 +142,11 @@ To use the REST API to deploy the Defender extension, you'll need a Log Analytic |Resource Group | Path | True | String | Name of the resource group containing your Azure Arc-enabled Kubernetes resource | | Cluster Name | Path | True | String | Name of your Azure Arc-enabled Kubernetes resource | - - For **Authentication**, your header must have a Bearer token (as with other Azure APIs). To get a bearer token, run the following command: `az account get-access-token --subscription ` Use the following structure for the body of your message: + ```json { "properties": { @@ -162,10 +164,10 @@ To use the REST API to deploy the Defender extension, you'll need a Log Analytic Description of the properties is given below: - | Property | Description | + | Property | Description | | -------- | ----------- | | logAnalytics.workspaceId | Workspace ID of the Log Analytics resource | - | logAnalytics.key | Key of the Log Analytics resource | + | logAnalytics.key | Key of the Log Analytics resource | | auditLogPath | **Optional**. The full path to the audit log files. The default value is ``/var/log/kube-apiserver/audit.log`` | --- @@ -186,7 +188,6 @@ To verify that your cluster has the Defender extension installed on it, follow t 1. Check that the cluster on which you deployed the extension is listed as **Healthy**. - ### [**Azure portal - Azure Arc**](#tab/k8s-verify-arc) ### Use the Azure Arc pages to verify the status of your extension @@ -201,7 +202,6 @@ To verify that your cluster has the Defender extension installed on it, follow t :::image type="content" source="../media/defender-for-kubernetes-azure-arc/extension-details-page.png" alt-text="Full details of an Azure Arc extension on a Kubernetes cluster."::: - ### [**Azure CLI**](#tab/k8s-verify-cli) ### Use Azure CLI to verify that the extension is deployed @@ -216,9 +216,9 @@ To verify that your cluster has the Defender extension installed on it, follow t > [!NOTE] > It might show "installState": "Pending" for the first few minutes. - + 1. If the state shows **Installed**, run the following command on your machine with the `kubeconfig` file pointed to your cluster to check that a pod called "azuredefender-XXXXX" is in 'Running' state: - + ```console kubectl get pods -n azuredefender ``` @@ -247,5 +247,3 @@ To confirm a successful deployment, or to validate the status of your extension ``` --- - - diff --git a/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md b/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md index 5eb26c8bd35b2..d567d3dcc04b2 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md @@ -15,7 +15,7 @@ ms.date: 05/12/2022 ### How can I use my existing Log Analytics workspace? -You can use your existing Log Analytics workspace by following the steps in the [Assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-aks#assign-a-custom-workspace) workspace section of this article. +You can use your existing Log Analytics workspace by following the steps in the [Assign a custom workspace](../defender-for-containers-enable.md?pivots=defender-for-container-aks&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#assign-a-custom-workspace) workspace section of this article. ### Can I delete the default workspaces created by Defender for Cloud? diff --git a/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md b/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md index 476a2b06f0676..1d4e814de91b2 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md @@ -9,7 +9,7 @@ ms.author: elkrieger To remove this - or any - Defender for Cloud extension, it's not enough to turn off auto provisioning: -- **Enabling** auto provisioning, potentially impacts *existing* and *future* machines. +- **Enabling** auto provisioning, potentially impacts *existing* and *future* machines. - **Disabling** auto provisioning for an extension, only affects the *future* machines - nothing is uninstalled by disabling auto provisioning. Nevertheless, to ensure the Defender for Containers components aren't automatically provisioned to your resources from now on, disable auto provisioning of the extensions as explained in [Configure auto provisioning for agents and extensions from Microsoft Defender for Cloud](../enable-data-collection.md). @@ -18,7 +18,7 @@ You can remove the profile using the REST API or a Resource Manager template as ### [**REST API**](#tab/aks-removeprofile-api) -### Use REST API to remove the Defender profile from AKS +### Use REST API to remove the Defender profile from AKS To remove the profile using the REST API, run the following PUT command: @@ -33,9 +33,8 @@ https://management.azure.com/subscriptions/{{SubscriptionId}}/resourcegroups/{{R | ClusterName | Cluster's name | Yes | | ApiVersion | API version, must be >= 2021-07-01 | Yes | - Request body: - + ```rest { "location": "{{Location}}", @@ -48,7 +47,7 @@ Request body: } } ``` - + Request body parameters: | Name | Description | Mandatory | @@ -56,7 +55,27 @@ Request body parameters: | location | Cluster's location | Yes | | properties.securityProfile.azureDefender.enabled | Determines whether to enable or disable Microsoft Defender for Containers on the cluster | Yes | +### [**Azure CLI**](#tab/k8s-remove-cli) + +### Use Azure CLI to remove the Defender profile + +1. Remove the Microsoft Defender for with the following commands: + ```azurecli + az login + az account set --subscription + az aks update --disable-defender --resource-group --name + ``` + + Removing the profile may take a few minutes. + +1. To verify that the profile was successfully removed, run the following command: + + ```console + kubectl get pods -n azuredefender + ``` + + When the profile is removed, you should see that no pods are returned in the `get pods` command. It might take a few minutes for the pods to be deleted. ### [**Resource Manager**](#tab/aks-removeprofile-resource-manager) @@ -69,7 +88,7 @@ To use Azure Resource Manager to remove the Defender profile, you'll need a Log The relevant template and parameters to remove the Defender profile from AKS are: -``` +```json { "type": "Microsoft.ContainerService/managedClusters", "apiVersion": "2021-07-01", diff --git a/articles/defender-for-cloud/index.yml b/articles/defender-for-cloud/index.yml index e0325b7423d65..3dc488d9eccee 100644 --- a/articles/defender-for-cloud/index.yml +++ b/articles/defender-for-cloud/index.yml @@ -32,7 +32,6 @@ landingContent: url: get-started.md - text: What are security policies, initiatives, and recommendations? url: security-policy-concept.md - # Card - title: Stay current @@ -43,11 +42,11 @@ landingContent: url: release-notes.md - text: Important upcoming changes url: upcoming-changes.md - - text: Connect AWS accounts with environment settings - url: quickstart-onboard-aws.md + - text: Deploy Microsoft Defender for Endpoint with Defender for Servers Plan 1 + url: defender-for-servers-introduction.md - text: Threat and vulnerability management vulnerability assessment url: deploy-vulnerability-assessment-tvm.md - + # Card - title: Get started linkLists: @@ -90,4 +89,16 @@ landingContent: - text: Implement adaptive network hardening url: adaptive-network-hardening.md - text: Use the Workload protections - url: workload-protections-dashboard.md \ No newline at end of file + url: workload-protections-dashboard.md + + # Card + - title: Defend multicloud resources + linkLists: + - linkListType: how-to-guide + links: + - text: Use Defender for Cloud to protect multicloud resources + url: multicloud.yml + - linkListType: video + links: + - text: Protecting multicloud environments (AWS & GCP) + url: https://youtu.be/bVsifz3ZyPY \ No newline at end of file diff --git a/articles/defender-for-cloud/information-protection.md b/articles/defender-for-cloud/information-protection.md index a6b1e75681627..c8169783c4ca7 100644 --- a/articles/defender-for-cloud/information-protection.md +++ b/articles/defender-for-cloud/information-protection.md @@ -2,7 +2,7 @@ title: Prioritize security actions by data sensitivity - Microsoft Defender for Cloud description: Use Microsoft Purview's data sensitivity classifications in Microsoft Defender for Cloud ms.topic: overview -ms.date: 11/09/2021 +ms.date: 04/27/2022 --- # Prioritize security actions by data sensitivity @@ -70,6 +70,13 @@ A graph shows the number of recommendations and alerts by classified resource ty :::image type="content" source="./media/information-protection/overview-dashboard-information-protection.png" alt-text="Screenshot of the information protection tile in Microsoft Defender for Cloud's overview dashboard." lightbox="./media/information-protection/overview-dashboard-information-protection.png"::: +## Learn more + +If you would like to learn more from the product manager about Microsoft Defender for Cloud's [integration with Azure Purview](episode-two.md). + +You can also check out the following blog: + +- [Secure sensitive data in your cloud resources](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/secure-sensitive-data-in-your-cloud-resources/ba-p/2918646). ## Next steps diff --git a/articles/defender-for-cloud/media/defender-for-containers/Edit auto provisioning configuration.png b/articles/defender-for-cloud/media/defender-for-containers/Edit auto provisioning configuration.png new file mode 100644 index 0000000000000..140a9c31bdbff Binary files /dev/null and b/articles/defender-for-cloud/media/defender-for-containers/Edit auto provisioning configuration.png differ diff --git a/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png b/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png index 5e76736123b05..d7d56758f87c8 100644 Binary files a/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png and b/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png differ diff --git a/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png b/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png index 5d61eac8c8a96..1308b68a22d9a 100644 Binary files a/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png and b/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png differ diff --git a/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png b/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png index b0d7ace237061..43f6f32a20d1e 100644 Binary files a/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png and b/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/connect-workspace.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/connect-workspace.png new file mode 100644 index 0000000000000..4ae440b123946 Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/connect-workspace.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/data-usage.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/data-usage.png new file mode 100644 index 0000000000000..3ce94697d67e8 Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/data-usage.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/drop-down-icon.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/drop-down-icon.png new file mode 100644 index 0000000000000..b3a68546f033f Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/drop-down-icon.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/estimated-costs.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/estimated-costs.png new file mode 100644 index 0000000000000..adba6728a20d3 Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/estimated-costs.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png index bba3d7ed5f7d4..1d3528189a963 100644 Binary files a/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png and b/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/select-run.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/select-run.png new file mode 100644 index 0000000000000..2eb1060a4471a Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/select-run.png differ diff --git a/articles/defender-for-cloud/media/governance-rules/add-governance-rules.png b/articles/defender-for-cloud/media/governance-rules/add-governance-rules.png new file mode 100644 index 0000000000000..2c2cb5b3e2b87 Binary files /dev/null and b/articles/defender-for-cloud/media/governance-rules/add-governance-rules.png differ diff --git a/articles/defender-for-cloud/media/governance-rules/change-governance-owner-eta.png b/articles/defender-for-cloud/media/governance-rules/change-governance-owner-eta.png new file mode 100644 index 0000000000000..323151722f1e8 Binary files /dev/null and b/articles/defender-for-cloud/media/governance-rules/change-governance-owner-eta.png differ diff --git a/articles/defender-for-cloud/media/governance-rules/governance-in-recommendations.png b/articles/defender-for-cloud/media/governance-rules/governance-in-recommendations.png new file mode 100644 index 0000000000000..394c798813d62 Binary files /dev/null and b/articles/defender-for-cloud/media/governance-rules/governance-in-recommendations.png differ diff --git a/articles/defender-for-cloud/media/governance-rules/governance-in-security-posture.png b/articles/defender-for-cloud/media/governance-rules/governance-in-security-posture.png new file mode 100644 index 0000000000000..6c708faf45faf Binary files /dev/null and b/articles/defender-for-cloud/media/governance-rules/governance-in-security-posture.png differ diff --git a/articles/defender-for-cloud/media/governance-rules/governance-in-workbook.png b/articles/defender-for-cloud/media/governance-rules/governance-in-workbook.png new file mode 100644 index 0000000000000..03d4b7d2bfbed Binary files /dev/null and b/articles/defender-for-cloud/media/governance-rules/governance-in-workbook.png differ diff --git a/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-databases-screen.png b/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-databases-screen.png new file mode 100644 index 0000000000000..a595be13da9dd Binary files /dev/null and b/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-databases-screen.png differ diff --git a/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-screen.png b/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-screen.png index d4bc1f22c0612..9d45cefd161b8 100644 Binary files a/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-screen.png and b/articles/defender-for-cloud/media/quickstart-onboard-gcp/auto-provision-screen.png differ diff --git a/articles/defender-for-cloud/media/release-notes/auto-provision.png b/articles/defender-for-cloud/media/release-notes/auto-provision.png index efce799408dbe..c2e7c291d7c46 100644 Binary files a/articles/defender-for-cloud/media/release-notes/auto-provision.png and b/articles/defender-for-cloud/media/release-notes/auto-provision.png differ diff --git a/articles/defender-for-cloud/media/release-notes/ip-address-filter-for-alerts.png b/articles/defender-for-cloud/media/release-notes/ip-address-filter-for-alerts.png new file mode 100644 index 0000000000000..24979fb429326 Binary files /dev/null and b/articles/defender-for-cloud/media/release-notes/ip-address-filter-for-alerts.png differ diff --git a/articles/defender-for-cloud/media/review-security-recommendations/recommendation-details-page-expanded.png b/articles/defender-for-cloud/media/review-security-recommendations/recommendation-details-page-expanded.png deleted file mode 100644 index 3ffd239739f75..0000000000000 Binary files a/articles/defender-for-cloud/media/review-security-recommendations/recommendation-details-page-expanded.png and /dev/null differ diff --git a/articles/defender-for-cloud/media/review-security-recommendations/recommendation-details-page.png b/articles/defender-for-cloud/media/review-security-recommendations/recommendation-details-page.png deleted file mode 100644 index 3ffd239739f75..0000000000000 Binary files a/articles/defender-for-cloud/media/review-security-recommendations/recommendation-details-page.png and /dev/null differ diff --git a/articles/defender-for-cloud/media/security-policy-concept/recommendation-details-page.png b/articles/defender-for-cloud/media/security-policy-concept/recommendation-details-page.png new file mode 100644 index 0000000000000..7950b548608ca Binary files /dev/null and b/articles/defender-for-cloud/media/security-policy-concept/recommendation-details-page.png differ diff --git a/articles/defender-for-cloud/multicloud.yml b/articles/defender-for-cloud/multicloud.yml new file mode 100644 index 0000000000000..2a688ba7de18a --- /dev/null +++ b/articles/defender-for-cloud/multicloud.yml @@ -0,0 +1,110 @@ +### YamlMime:Landing + +title: Defender for Cloud for your multicloud environment # < 60 chars +summary: Microsoft Defender for Cloud provides unified security management and advanced threat protection for multicloud environments. # < 160 chars + +metadata: + title: The Defender for Cloud multicloud solution # Required; page title displayed in search results. Include the brand. < 60 chars. + description: See all of the docs that relate to multicloud scenarios in Microsoft Defender for Cloud on one page. # Required; article description that is displayed in search results. < 160 chars. + services: defender-for-cloud + author: bmansheim + ms.author: benmansheim + manager: raynew + ms.topic: landing-page # Required + ms.collection: collection + ms.date: 05/22/2022 + +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new + +landingContent: +# Cards and links should be based on top customer tasks or top subjects +# Start card title with a verb + # Card + - title: About multicloud deployments + linkLists: + - linkListType: overview + links: + - text: Introduction to hybrid and multicloud + url: https://docs.microsoft.com/en-us/azure/cloud-adoption-framework/scenarios/hybrid/ + - text: Why protect multicloud resources with Microsoft Defender for Cloud + url: https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/protect-your-google-cloud-workloads-with-microsoft-defender-for/ba-p/3073360 + - linkListType: how-to-guide + links: + - text: Connect your non-Azure machines to Defender for Cloud + url: quickstart-onboard-machines.md?pivots=azure-arc + - text: Enable enhanced security for your multicloud resources + url: enable-enhanced-security.md + + # Card + - title: Defend Amazon AWS resources + linkLists: + - linkListType: overview + links: + - text: Protect your AWS virtual machines with Defender for Servers + url: defender-for-servers-introduction.md + - text: Supported features for AWS virtual machines + url: supported-machines-endpoint-solutions-clouds-servers.md?tabs=features-multicloud#supported-features-for-virtual-machines-and-servers + - text: Protect your AWS EKS containers with Defender for Containers + url: defender-for-containers-introduction.md + - text: Supported features for AWS EKS containers + url: supported-machines-endpoint-solutions-clouds-containers.md?tabs=aws-eks + - linkListType: get-started + links: + - text: Connect your AWS accounts to Microsoft Defender for Cloud + url: quickstart-onboard-aws.md?pivots=env-settings + - text: Enable Defender for Containers for your AWS EKS containers + url: defender-for-containers-enable.md?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-eks + - linkListType: how-to-guide + links: + - text: Secure your AWS management ports with just-in-time access + url: just-in-time-access-usage.md?tabs=jit-config-asc%2Cjit-request-asc + - text: Protect your AWS virtual machines with Microsoft Defender for Endpoint + url: integration-defender-for-endpoint.md?tabs=windows + - text: Harden your AWS Docker hosts + url: harden-docker-hosts.md + - text: Monitor file changes that might indicate an attack + url: file-integrity-monitoring-overview.md + - text: Scan your AWS virtual machines for vulnerabilities + url: deploy-vulnerability-assessment-vm.md + - text: Create custom assessments and standards for AWS workloads + url: https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/custom-assessments-and-standards-in-microsoft-defender-for-cloud/ba-p/3066575 + - linkListType: video + links: + - text: How to connect AWS to Microsoft Defender for Cloud + url: https://youtu.be/LHwgEFXT3kQ + - linkListType: reference + links: + - text: Security recommendations for AWS resources + url: recommendations-reference-aws.md + + # Card + - title: Defend Google GCP resources + linkLists: + - linkListType: overview + links: + - text: Protect your GCP virtual machines with Defender for Servers + url: defender-for-servers-introduction.md + - text: Supported features for GCP virtual machines + url: supported-machines-endpoint-solutions-clouds-servers.md?tabs=features-multi-cloud#supported-features-for-virtual-machines-and-servers + - text: Protect your GCP GKE containers with Defender for Containers + url: defender-for-containers-introduction.md + - text: Supported features for GCP GKE containers + url: supported-machines-endpoint-solutions-clouds-containers.md?tabs=gcp-gke + - linkListType: get-started + links: + - text: Connect your GCP accounts to Microsoft Defender for Cloud + url: quickstart-onboard-gcp.md?pivots=env-settings + - text: Enable Defender for Containers for your GCP GKE containers + url: defender-for-containers-enable.md?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-gke + - linkListType: how-to-guide + links: + - text: Create custom assessments and standards for GCP workloads + url: https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/custom-assessments-and-standards-in-microsoft-defender-for-cloud/ba-p/3251252 + - linkListType: video + links: + - text: How to connect GCP to Microsoft Defender for Cloud + url: https://youtu.be/6BpXG3EHoMo + # - linkListType: reference + # links: + # - text: Security recommendations for GCP resources + # url: recommendations-reference-gcp.md diff --git a/articles/defender-for-cloud/quickstart-enable-defender-for-cosmos.md b/articles/defender-for-cloud/quickstart-enable-defender-for-cosmos.md index 5fde77db3c112..ac17ea46661b9 100644 --- a/articles/defender-for-cloud/quickstart-enable-defender-for-cosmos.md +++ b/articles/defender-for-cloud/quickstart-enable-defender-for-cosmos.md @@ -3,7 +3,7 @@ title: Enable Microsoft Defender for Azure Cosmos DB description: Learn how to enable Microsoft Defender for Azure Cosmos DB's enhanced security features. titleSuffix: Microsoft Defender for Azure Cosmos DB ms.topic: quickstart -ms.date: 02/28/2022 +ms.date: 06/07/2022 --- # Quickstart: Enable Microsoft Defender for Azure Cosmos DB @@ -86,7 +86,7 @@ You can enable Microsoft Defender for Cloud on a specific Azure Cosmos DB accoun ### [ARM template](#tab/arm-template) -Use an Azure Resource Manager template to deploy an Azure Cosmos DB account with Microsoft Defender for Azure Cosmos DB enabled. For more information, see [Create an Azure Cosmos DB account with Microsoft Defender for Azure Cosmos DB enabled](https://azure.microsoft.com/resources/templates/?term=cosmosdb-advanced-threat-protection-create-account). +Use an Azure Resource Manager template to deploy an Azure Cosmos DB account with Microsoft Defender for Azure Cosmos DB enabled. For more information, see [Create an Azure Cosmos DB account with Microsoft Defender for Azure Cosmos DB enabled](https://azure.microsoft.com/resources/templates/microsoft-defender-cosmosdb-create-account/). --- diff --git a/articles/defender-for-cloud/quickstart-onboard-aws.md b/articles/defender-for-cloud/quickstart-onboard-aws.md index 566f635ffb6fa..a30362caaf1df 100644 --- a/articles/defender-for-cloud/quickstart-onboard-aws.md +++ b/articles/defender-for-cloud/quickstart-onboard-aws.md @@ -1,10 +1,8 @@ --- title: Connect your AWS account to Microsoft Defender for Cloud description: Defend your AWS resources with Microsoft Defender for Cloud -author: bmansheim -ms.author: benmansheim ms.topic: quickstart -ms.date: 05/17/2022 +ms.date: 06/02/2022 zone_pivot_groups: connect-aws-accounts ms.custom: mode-other --- @@ -23,6 +21,7 @@ To protect your AWS-based resources, you can connect an account with one of two - **Defender for Cloud's CSPM features** extend to your AWS resources. This agentless plan assesses your AWS resources according to AWS-specific security recommendations and these are included in your secure score. The resources will also be assessed for compliance with built-in standards specific to AWS (AWS CIS, AWS PCI DSS, and AWS Foundational Security Best Practices). Defender for Cloud's [asset inventory page](asset-inventory.md) is a multicloud enabled feature helping you manage your AWS resources alongside your Azure resources. - **Microsoft Defender for Containers** brings threat detection and advanced defenses to your Amazon EKS clusters. This plan includes Kubernetes threat protection, behavioral analytics, Kubernetes best practices, admission control recommendations and more. You can view the full list of available features in [Defender for Containers feature availability](supported-machines-endpoint-solutions-clouds-containers.md). - **Microsoft Defender for Servers** brings threat detection and advanced defenses to your Windows and Linux EC2 instances. This plan includes the integrated license for Microsoft Defender for Endpoint, security baselines and OS level assessments, vulnerability assessment scanning, adaptive application controls (AAC), file integrity monitoring (FIM), and more. You can view the full list of available features in the [feature availability table](supported-machines-endpoint-solutions-clouds-servers.md?tabs=tab/features-multicloud). + - **Microsoft Defender for SQL** brings threat detection and advanced defenses to your SQL Servers running on AWS EC2, AWS RDS Custom for SQL Server. This plan includes the advanced threat protection and vulnerability assessment scanning. You can view the [full list of available features](defender-for-sql-introduction.md). For a reference list of all the recommendations Defender for Cloud can provide for AWS resources, see [Security recommendations for AWS resources - a reference guide](recommendations-reference-aws.md). @@ -38,7 +37,7 @@ This screenshot shows AWS accounts displayed in Defender for Cloud's [overview d |Aspect|Details| |----|:----| |Release state:|General Availability (GA)| -|Pricing:| The **CSPM plan** is free.
                  The **[Defender for Containers](defender-for-containers-introduction.md)** plan for AWS is billed at the same price as for Azure resources.
                  For every AWS machine connected to Azure with [Azure Arc-enabled servers](../azure-arc/servers/overview.md), the **Defender for Servers** plan is billed at the same price as the [Microsoft Defender for Servers](defender-for-servers-introduction.md) plan for Azure machines. If an AWS EC2 doesn't have the Azure Arc agent deployed, you won't be charged for that machine.| +|Pricing:|The **CSPM plan** is free.
                  The **[Defender for SQL](defender-for-sql-introduction.md)** plan is billed at the same price as Azure resources.
                  The **[Defender for Containers](defender-for-containers-introduction.md)** plan is free during the preview. After which, it will be billed for AWS at the same price as for Azure resources.
                  For every AWS machine connected to Azure with [Azure Arc-enabled servers](../azure-arc/servers/overview.md), the **Defender for Servers** plan is billed at the same price as the [Microsoft Defender for Servers](defender-for-servers-introduction.md) plan for Azure machines. If an AWS EC2 doesn't have the Azure Arc agent deployed, you won't be charged for that machine.| |Required roles and permissions:|**Contributor** permission for the relevant Azure subscription.
                  **Administrator** on the AWS account.| |Clouds:|:::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
                  :::image type="icon" source="./media/icons/no-icon.png"::: National (Azure Government, Azure China 21Vianet)| @@ -51,6 +50,26 @@ This screenshot shows AWS accounts displayed in Defender for Cloud's [overview d - At least one Amazon EKS cluster with permission to access to the EKS K8s API server. If you need to create a new EKS cluster, follow the instructions in [Getting started with Amazon EKS – eksctl](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html). - The resource capacity to create a new SQS queue, Kinesis Fire Hose delivery stream, and S3 bucket in the cluster's region. +- **To enable the Defender for SQL plan**, you'll need: + + - Microsoft Defender for SQL enabled on your subscription. Learn how to [enable protection on all of your databases](quickstart-enable-database-protections.md). + + - An active AWS account, with EC2 instances running SQL server or RDS Custom for SQL Server. + + - Azure Arc for servers installed on your EC2 instances/RDS Custom for SQL Server. + - (Recommended) Use the auto provisioning process to install Azure Arc on all of your existing and future EC2 instances. + + Auto provisioning is managed by AWS Systems Manager (SSM) using the SSM agent. Some Amazon Machine Images (AMIs) already have the SSM agent pre-installed. If you already have the SSM agent pre-installed, the AMI's are listed in [AMIs with SSM Agent preinstalled](https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-agent-technical-details.html#ami-preinstalled-agent). If your EC2 instances don't have the SSM Agent, you will need to install it using either of the following relevant instructions from Amazon: + - [Install SSM Agent for a hybrid environment (Windows)](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html) + + > [!NOTE] + > To enable the Azure Arc auto-provisioning, you'll need **Owner** permission on the relevant Azure subscription. + + - Additional extensions should be enabled on the Arc-connected machines. + - Log Analytics (LA) agent on Arc machines, and ensure the selected workspace has security solution installed. The LA agent is currently configured in the subscription level. All of your multicloud AWS accounts and GCP projects under the same subscription will inherit the subscription settings. + + Learn how to [configure auto-provisioning on your subscription](enable-data-collection.md#configure-auto-provisioning-for-agents-and-extensions-from-microsoft-defender-for-cloud). + - **To enable the Defender for Servers plan**, you'll need: - Microsoft Defender for Servers enabled on your subscription. Learn how to enable plans in [Enable enhanced security features](enable-enhanced-security.md). @@ -133,7 +152,7 @@ If you have any existing connectors created with the classic cloud connectors ex - (Optional) Select **Configure**, to edit the configuration as required. -1. By default the **Containers** plan is set to **On**. This is necessary to have Defender for Containers protect your AWS EKS clusters. Ensure you have fulfilled the [network requirements](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-eks&source=docs#network-requirements) for the Defender for Containers plan. +1. By default the **Containers** plan is set to **On**. This is necessary to have Defender for Containers protect your AWS EKS clusters. Ensure you have fulfilled the [network requirements](./defender-for-containers-enable.md?pivots=defender-for-container-eks&source=docs&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#network-requirements) for the Defender for Containers plan. > [!Note] > Azure Arc-enabled Kubernetes, the Defender Arc extension, and the Azure Policy Arc extension should be installed. Use the dedicated Defender for Cloud recommendations to deploy the extensions (and Arc, if necessary) as explained in [Protect Amazon Elastic Kubernetes Service clusters](defender-for-containers-enable.md?tabs=defender-for-container-eks). @@ -141,6 +160,10 @@ If you have any existing connectors created with the classic cloud connectors ex - (Optional) Select **Configure**, to edit the configuration as required. If you choose to disable this configuration, the `Threat detection (control plane)` feature will be disabled. Learn more about the [feature availability](supported-machines-endpoint-solutions-clouds-containers.md). +1. By default the **Databases** plan is set to **On**. This is necessary to extend Defender for SQL's coverage to your AWS EC2 and RDS Custom for SQL Server. + + - (Optional) Select **Configure**, to edit the configuration as required. We recommend you leave it set to the default configuration. + 1. Select **Next: Configure access**. 1. Download the CloudFormation template. @@ -321,10 +344,19 @@ For other operating systems, the SSM Agent should be installed manually using th - [Install SSM Agent for a hybrid environment (Windows)](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html) - [Install SSM Agent for a hybrid environment (Linux)](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html) +## Learn more + +If you would like to learn more from the product manager about Microsoft Defender for Cloud's new AWS connector check out [Microsoft Defender for Cloud in the Field](episode-one.md). + +You can also check out the following blogs: + +- [Ignite 2021: Microsoft Defender for Cloud news](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/ignite-2021-microsoft-defender-for-cloud-news/ba-p/2882807). +- [Custom assessments and standards in Microsoft Defender for Cloud for AWS workloads (Preview)](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/custom-assessments-and-standards-in-microsoft-defender-for-cloud/ba-p/3066575). +- [Security posture management and server protection for AWS and GCP](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) ## Next steps Connecting your AWS account is part of the multicloud experience available in Microsoft Defender for Cloud. For related information, see the following page: - [Security recommendations for AWS resources - a reference guide](recommendations-reference-aws.md). -- [Connect your GCP projects to Microsoft Defender for Cloud](quickstart-onboard-gcp.md) +- [Connect your GCP projects to Microsoft Defender for Cloud](quickstart-onboard-gcp.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/quickstart-onboard-gcp.md b/articles/defender-for-cloud/quickstart-onboard-gcp.md index 0d1c5677848d1..caf81fc6079fd 100644 --- a/articles/defender-for-cloud/quickstart-onboard-gcp.md +++ b/articles/defender-for-cloud/quickstart-onboard-gcp.md @@ -1,10 +1,8 @@ --- title: Connect your GCP project to Microsoft Defender for Cloud description: Monitoring your GCP resources from Microsoft Defender for Cloud -author: bmansheim -ms.author: benmansheim ms.topic: quickstart -ms.date: 05/17/2022 +ms.date: 06/06/2022 zone_pivot_groups: connect-gcp-accounts ms.custom: mode-other --- @@ -24,6 +22,7 @@ To protect your GCP-based resources, you can connect an account in two different - **Defender for Cloud's CSPM features** extends to your GCP resources. This agentless plan assesses your GCP resources according to GCP-specific security recommendations and these are included in your secure score. The resources will also be assessed for compliance with built-in standards specific to GCP. Defender for Cloud's [asset inventory page](asset-inventory.md) is a multicloud enabled feature helping you manage your GCP resources alongside your Azure resources. - **Microsoft Defender for Servers** brings threat detection and advanced defenses to your GCP VM instances. This plan includes the integrated license for Microsoft Defender for Endpoint, security baselines and OS level assessments, vulnerability assessment scanning, adaptive application controls (AAC), file integrity monitoring (FIM), and more. You can view the full list of available features in the [Supported features for virtual machines and servers table](supported-machines-endpoint-solutions-clouds-servers.md) - **Microsoft Defender for Containers** - Microsoft Defender for Containers brings threat detection and advanced defenses to your Google's Kubernetes Engine (GKE) Standard clusters. This plan includes Kubernetes threat protection, behavioral analytics, Kubernetes best practices, admission control recommendations and more. You can view the full list of available features in [Defender for Containers feature availability](supported-machines-endpoint-solutions-clouds-containers.md). + - **Microsoft Defender for SQL** brings threat detection and advanced defenses to your SQL Servers running on GCP compute engine instances. This plan includes the advanced threat protection and vulnerability assessment scanning. You can view the [full list of available features](defender-for-sql-introduction.md). :::image type="content" source="./media/quickstart-onboard-gcp/gcp-account-in-overview.png" alt-text="Screenshot of GCP projects shown in Microsoft Defender for Cloud's overview dashboard." lightbox="./media/quickstart-onboard-gcp/gcp-account-in-overview.png"::: @@ -34,7 +33,7 @@ To protect your GCP-based resources, you can connect an account in two different |Aspect|Details| |----|:----| | Release state: | Preview
                  The [Azure Preview Supplemental Terms](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) include additional legal terms that apply to the Azure features that are in beta, preview, or otherwise not yet released into general availability. | -|Pricing:|The **CSPM plan** is free.
                  The **Defender for Servers** plan is billed at the same price as the [Microsoft Defender for Servers](defender-for-servers-introduction.md) plan for Azure machines. If a GCP VM instance doesn't have the Azure Arc agent deployed, you won't be charged for that machine.
                  The **[Defender for Containers](defender-for-containers-introduction.md)** plan is free during the preview. After which, it will be billed for GCP at the same price as for Azure resources.| +|Pricing:|The **CSPM plan** is free.
                  The **[Defender for SQL](defender-for-sql-introduction.md)** plan is billed at the same price as Azure resources.
                  The **Defender for Servers** plan is billed at the same price as the [Microsoft Defender for Servers](defender-for-servers-introduction.md) plan for Azure machines. If a GCP VM instance doesn't have the Azure Arc agent deployed, you won't be charged for that machine.
                  The **[Defender for Containers](defender-for-containers-introduction.md)** plan is free during the preview. After which, it will be billed for GCP at the same price as for Azure resources.| |Required roles and permissions:| **Contributor** on the relevant Azure Subscription
                  **Owner** on the GCP organization or project| |Clouds:|:::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
                  :::image type="icon" source="./media/icons/no-icon.png"::: National (Azure Government, Azure China 21Vianet, Other Gov)| @@ -107,17 +106,17 @@ Follow the steps below to create your GCP cloud connector. |--|--| | CSPM service account reader role
                  Microsoft Defender for Cloud identity federation
                  CSPM identity pool
                  *Microsoft Defender for Servers* service account (when the servers plan is enabled)
                  *Azure-Arc for servers onboarding* service account (when the Arc for servers auto-provisioning is enabled) | Microsoft Defender Containers’ service account role,
                  Microsoft Defender Data Collector service account role
                  microsoft defender for cloud identity pool | -1. (**Servers only**) When Arc auto-provisioning is enabled, copy the unique numeric ID presented at the end of the Cloud Shell script. +(**Servers/SQL only**) When Arc auto-provisioning is enabled, copy the unique numeric ID presented at the end of the Cloud Shell script. - :::image type="content" source="media/quickstart-onboard-gcp/powershell-unique-id.png" alt-text="Screenshot showing the unique numeric I D to be copied." lightbox="media/quickstart-onboard-gcp/powershell-unique-id-expanded.png"::: +:::image type="content" source="media/quickstart-onboard-gcp/powershell-unique-id.png" alt-text="Screenshot showing the unique numeric I D to be copied." lightbox="media/quickstart-onboard-gcp/powershell-unique-id-expanded.png"::: - To locate the unique numeric ID in the GCP portal, Navigate to **IAM & Admin** > **Service Accounts**, in the Name column, locate `Azure-Arc for servers onboarding` and copy the unique numeric ID number (OAuth 2 Client ID). +To locate the unique numeric ID in the GCP portal, Navigate to **IAM & Admin** > **Service Accounts**, in the Name column, locate `Azure-Arc for servers onboarding` and copy the unique numeric ID number (OAuth 2 Client ID). 1. Navigate back to the Microsoft Defender for Cloud portal. 1. (Optional) If you changed any of the names of any of the resources, update the names in the appropriate fields. -1. (**Servers only**) Select **Azure-Arc for servers onboarding** +1. (**Servers/SQL only**) Select **Azure-Arc for servers onboarding** :::image type="content" source="media/quickstart-onboard-gcp/unique-numeric-id.png" alt-text="Screenshot showing the Azure-Arc for servers onboarding section of the screen."::: @@ -188,6 +187,54 @@ To have full visibility to Microsoft Defender for Servers security content, ensu 1. Continue from step number 8, of the [Connect your GCP projects](#connect-your-gcp-projects) instructions. +### Configure the Databases plan + +Connect your GCP VM instances to Azure Arc in order to have full visibility to Microsoft Defender for SQL security content. + +Microsoft Defender for SQL brings threat detection and vulnerability assessment to your GCP VM instances. +To have full visibility to Microsoft Defender for SQL security content, ensure you have the following requirements configured: + +- Microsoft SQL servers on machines plan enabled on your subscription. Learn how to enable plan in the [Enable enhanced security features](quickstart-enable-database-protections.md) article. + +- Azure Arc for servers installed on your VM instances. + - **(Recommended) Auto-provisioning** - Auto-provisioning is enabled by default in the onboarding process and requires owner permissions on the subscription. Arc auto-provisioning process is using the OS config agent on GCP end. Learn more about the [OS config agent availability on GCP machines](https://cloud.google.com/compute/docs/images/os-details#vm-manager). + + > [!NOTE] + > The Arc auto-provisioning process leverages the VM manager on your Google Cloud Platform, to enforce policies on the your VMs through the OS config agent. A VM with an [Active OS agent](https://cloud.google.com/compute/docs/manage-os#agent-state), will incur a cost according to GCP. Refer to [GCP's technical documentation](https://cloud.google.com/compute/docs/vm-manager#pricing) to see how this may affect your account. + >

                  Microsoft Defender for Servers does not install the OS config agent to a VM that does not have it installed. However, Microsoft Defender for Servers will enable communication between the OS config agent and the OS config service if the agent is already installed but not communicating with the service. + >

                  This can change the OS config agent from `inactive` to `active`, and will lead to additional costs. +- Additional extensions should be enabled on the Arc-connected machines. + - SQL servers on machines. Ensure the plan is enabled on your subscription. + - Log Analytics (LA) agent on Arc machines. Ensure the selected workspace has security solution installed. + + The LA agent and SQL servers on machines plan are currently configured in the subscription level, such that all the multicloud accounts and projects (from both AWS and GCP) under the same subscription will inherit the subscription settings and may result in additional charges. + + Learn how to [configure auto-provisioning on your subscription](enable-data-collection.md#configure-auto-provisioning-for-agents-and-extensions-from-microsoft-defender-for-cloud). + + > [!NOTE] + > Defender for SQL assigns tags to your GCP resources to manage the auto-provisioning process. You must have these tags properly assigned to your resources so that Defender for Cloud can manage your resources: + **Cloud**, **InstanceName**, **MDFCSecurityConnector**, **MachineId**, **ProjectId**, **ProjectNumber** +- Automatic SQL server discovery and registration. Enable these settings to allow automatic discovery and registration of SQL servers, providing centralized SQL asset inventory and management. + +**To configure the Databases plan**: + +1. Follow the steps to [Connect your GCP project](#connect-your-gcp-project). + +1. On the Select plans screen select **Configure**. + + :::image type="content" source="media/quickstart-onboard-gcp/view-configuration.png" alt-text="Screenshot showing where to click to configure the Databases plan."::: + +1. On the Auto provisioning screen, toggle the switches on or off depending on your need. + + :::image type="content" source="media/quickstart-onboard-gcp/auto-provision-databases-screen.png" alt-text="Screenshot showing the toggle switches for the Databases plan."::: + + > [!Note] + > If Azure Arc is toggled **Off**, you will need to follow the manual installation process mentioned above. + +1. Select **Save**. + +1. Continue from step number 8 of the [Connect your GCP projects](#connect-your-gcp-projects) instructions. + ### Configure the Containers plan Microsoft Defender for Containers brings threat detection, and advanced defenses to your GCP GKE Standard clusters. To get the full security value out of Defender for Containers, and to fully protect GCP clusters, ensure you have the following requirements configured: diff --git a/articles/defender-for-cloud/recommendations-reference-aws.md b/articles/defender-for-cloud/recommendations-reference-aws.md index c7c8ace8afa90..6bffc8a7a7db0 100644 --- a/articles/defender-for-cloud/recommendations-reference-aws.md +++ b/articles/defender-for-cloud/recommendations-reference-aws.md @@ -2,7 +2,7 @@ title: Reference table for all Microsoft Defender for Cloud recommendations for AWS resources description: This article lists Microsoft Defender for Cloud's security recommendations that help you harden and protect your AWS resources. ms.topic: reference -ms.date: 03/13/2022 +ms.date: 05/25/2022 ms.custom: generated --- # Security recommendations for AWS resources - a reference guide diff --git a/articles/defender-for-cloud/release-notes-archive.md b/articles/defender-for-cloud/release-notes-archive.md index 383d89cebe5af..b4619565aa7dc 100644 --- a/articles/defender-for-cloud/release-notes-archive.md +++ b/articles/defender-for-cloud/release-notes-archive.md @@ -17,6 +17,119 @@ This page provides you with information about: - Bug fixes - Deprecated functionality +## December 2021 + +Updates in December include: + +- [Microsoft Defender for Containers plan released for general availability (GA)](#microsoft-defender-for-containers-plan-released-for-general-availability-ga) +- [New alerts for Microsoft Defender for Storage released for general availability (GA)](#new-alerts-for-microsoft-defender-for-storage-released-for-general-availability-ga) +- [Improvements to alerts for Microsoft Defender for Storage](#improvements-to-alerts-for-microsoft-defender-for-storage) +- ['PortSweeping' alert removed from network layer alerts](#portsweeping-alert-removed-from-network-layer-alerts) + +### Microsoft Defender for Containers plan released for general availability (GA) + +Over two years ago, we introduced [Defender for Kubernetes](defender-for-kubernetes-introduction.md) and [Defender for container registries](defender-for-container-registries-introduction.md) as part of the Azure Defender offering within Microsoft Defender for Cloud. + +With the release of [Microsoft Defender for Containers](defender-for-containers-introduction.md), we've merged these two existing Defender plans. + +The new plan: + +- **Combines the features of the two existing plans** - threat detection for Kubernetes clusters and vulnerability assessment for images stored in container registries +- **Brings new and improved features** - including multicloud support, host level threat detection with over **sixty** new Kubernetes-aware analytics, and vulnerability assessment for running images +- **Introduces Kubernetes-native at-scale onboarding** - by default, when you enable the plan all relevant components are configured to be deployed automatically + +With this release, the availability and presentation of Defender for Kubernetes and Defender for container registries has changed as follows: + +- New subscriptions - The two previous container plans are no longer available +- Existing subscriptions - Wherever they appear in the Azure portal, the plans are shown as **Deprecated** with instructions for how to upgrade to the newer plan + :::image type="content" source="media/release-notes/defender-plans-deprecated-indicator.png" alt-text="Defender for container registries and Defender for Kubernetes plans showing 'Deprecated' and upgrade information."::: + +The new plan is free for the month of December 2021. For the potential changes to the billing from the old plans to Defender for Containers, and for more information on the benefits introduced with this plan, see [Introducing Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317). + +For more information, see: + +- [Overview of Microsoft Defender for Containers](defender-for-containers-introduction.md) +- [Enable Microsoft Defender for Containers](defender-for-containers-enable.md) +- [Introducing Microsoft Defender for Containers - Microsoft Tech Community](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317) +- [Microsoft Defender for Containers | Defender for Cloud in the Field #3 - YouTube](https://www.youtube.com/watch?v=KeH0a3enLJ0&t=201s) + +### New alerts for Microsoft Defender for Storage released for general availability (GA) + +Threat actors use tools and scripts to scan for publicly open containers in the hope of finding misconfigured open storage containers with sensitive data. + +Microsoft Defender for Storage detects these scanners so that you can block them and remediate your posture. + +The preview alert that detected this was called **“Anonymous scan of public storage containers”**. To provide greater clarity about the suspicious events discovered, we've divided this into **two** new alerts. These alerts are relevant to Azure Blob Storage only. + +We've improved the detection logic, updated the alert metadata, and changed the alert name and alert type. + +These are the new alerts: + +| Alert (alert type) | Description | MITRE tactic | Severity | +|---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|----------| +| **Publicly accessible storage containers successfully discovered**
                  (Storage.Blob_OpenContainersScanning.SuccessfulDiscovery) | A successful discovery of publicly open storage container(s) in your storage account was performed in the last hour by a scanning script or tool.

                  This usually indicates a reconnaissance attack, where the threat actor tries to list blobs by guessing container names, in the hope of finding misconfigured open storage containers with sensitive data in them.

                  The threat actor may use their own script or use known scanning tools like Microburst to scan for publicly open containers.

                  ✔ Azure Blob Storage
                  ✖ Azure Files
                  ✖ Azure Data Lake Storage Gen2 | Collection | Medium | +| **Publicly accessible storage containers unsuccessfully scanned**
                  (Storage.Blob_OpenContainersScanning.FailedAttempt) | A series of failed attempts to scan for publicly open storage containers were performed in the last hour.

                  This usually indicates a reconnaissance attack, where the threat actor tries to list blobs by guessing container names, in the hope of finding misconfigured open storage containers with sensitive data in them.

                  The threat actor may use their own script or use known scanning tools like Microburst to scan for publicly open containers.

                  ✔ Azure Blob Storage
                  ✖ Azure Files
                  ✖ Azure Data Lake Storage Gen2 | Collection | Low | + +For more information, see: + +- [Threat matrix for storage services](https://www.microsoft.com/security/blog/2021/04/08/threat-matrix-for-storage/) +- [Introduction to Microsoft Defender for Storage](defender-for-storage-introduction.md) +- [List of alerts provided by Microsoft Defender for Storage](alerts-reference.md#alerts-azurestorage) + +### Improvements to alerts for Microsoft Defender for Storage + +The initial access alerts now have improved accuracy and more data to support investigation. + +Threat actors use various techniques in the initial access to gain a foothold within a network. Two of the [Microsoft Defender for Storage](defender-for-storage-introduction.md) alerts that detect behavioral anomalies in this stage now have improved detection logic and additional data to support investigations. + +If you've [configured automations](workflow-automation.md) or defined [alert suppression rules](alerts-suppression-rules.md) for these alerts in the past, update them in accordance with these changes. + +#### Detecting access from a Tor exit node + +Access from a Tor exit node might indicate a threat actor trying to hide their identity. + +The alert is now tuned to generate only for authenticated access, which results in higher accuracy and confidence that the activity is malicious. This enhancement reduces the benign positive rate. + +An outlying pattern will have high severity, while less anomalous patterns will have medium severity. + +The alert name and description have been updated. The AlertType remains unchanged. + +- Alert name (old): Access from a Tor exit node to a storage account +- Alert name (new): Authenticated access from a Tor exit node +- Alert types: Storage.Blob_TorAnomaly / Storage.Files_TorAnomaly +- Description: One or more storage container(s) / file share(s) in your storage account were successfully accessed from an IP address known to be an active exit node of Tor (an anonymizing proxy). Threat actors use Tor to make it difficult to trace the activity back to them. Authenticated access from a Tor exit node is a likely indication that a threat actor is trying to hide their identity. Applies to: Azure Blob Storage, Azure Files, Azure Data Lake Storage Gen2 +- MITRE tactic: Initial access +- Severity: High/Medium + +#### Unusual unauthenticated access + +A change in access patterns may indicate that a threat actor was able to exploit public read access to storage containers, either by exploiting a mistake in access configurations, or by changing the access permissions. + +This medium severity alert is now tuned with improved behavioral logic, higher accuracy, and confidence that the activity is malicious. This enhancement reduces the benign positive rate. + +The alert name and description have been updated. The AlertType remains unchanged. + +- Alert name (old): Anonymous access to a storage account +- Alert name (new): Unusual unauthenticated access to a storage container +- Alert types: Storage.Blob_AnonymousAccessAnomaly +- Description: This storage account was accessed without authentication, which is a change in the common access pattern. Read access to this container is usually authenticated. This might indicate that a threat actor was able to exploit public read access to storage container(s) in this storage account(s). Applies to: Azure Blob Storage +- MITRE tactic: Collection +- Severity: Medium + +For more information, see: + +- [Threat matrix for storage services](https://www.microsoft.com/security/blog/2021/04/08/threat-matrix-for-storage/) +- [Introduction to Microsoft Defender for Storage](defender-for-storage-introduction.md) +- [List of alerts provided by Microsoft Defender for Storage](alerts-reference.md#alerts-azurestorage) + +### 'PortSweeping' alert removed from network layer alerts + +The following alert was removed from our network layer alerts due to inefficiencies: + +| Alert (alert type) | Description | MITRE tactics | Severity | +|------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------:|---------------| +| **Possible outgoing port scanning activity detected**
                  (PortSweeping) | Network traffic analysis detected suspicious outgoing traffic from %{Compromised Host}. This traffic may be a result of a port scanning activity. When the compromised resource is a load balancer or an application gateway, the suspected outgoing traffic has been originated from to one or more of the resources in the backend pool (of the load balancer or application gateway). If this behavior is intentional, please note that performing port scanning is against Azure Terms of service. If this behavior is unintentional, it may mean your resource has been compromised. | Discovery | Medium | + ## November 2021 Our Ignite release includes: @@ -39,7 +152,6 @@ Other changes in November include: - [New AKS security policy added to default initiative – for use by private preview customers only](#new-aks-security-policy-added-to-default-initiative--for-use-by-private-preview-customers-only) - [Inventory display of on-premises machines applies different template for resource name](#inventory-display-of-on-premises-machines-applies-different-template-for-resource-name) - ### Azure Security Center and Azure Defender become Microsoft Defender for Cloud According to the [2021 State of the Cloud report](https://info.flexera.com/CM-REPORT-State-of-the-Cloud#download), 92% of organizations now have a multicloud strategy. At Microsoft, our goal is to centralize security across these environments and help security teams work more effectively. @@ -48,10 +160,9 @@ According to the [2021 State of the Cloud report](https://info.flexera.com/CM-RE At Ignite 2019, we shared our vision to create the most complete approach for securing your digital estate and integrating XDR technologies under the Microsoft Defender brand. Unifying Azure Security Center and Azure Defender under the new name **Microsoft Defender for Cloud**, reflects the integrated capabilities of our security offering and our ability to support any cloud platform. - ### Native CSPM for AWS and threat protection for Amazon EKS, and AWS EC2 -A new **environment settings** page provides greater visibility and control over your management groups, subscriptions, and AWS accounts. The page is designed to onboard AWS accounts at scale: connect your AWS **management account**, and you'll automatically onboard existing and future accounts. +A new **environment settings** page provides greater visibility and control over your management groups, subscriptions, and AWS accounts. The page is designed to onboard AWS accounts at scale: connect your AWS **management account**, and you'll automatically onboard existing and future accounts. :::image type="content" source="media/release-notes/add-aws-account.png" alt-text="Use the new environment settings page to connect your AWS accounts."::: @@ -63,8 +174,8 @@ When you've added your AWS accounts, Defender for Cloud protects your AWS resour Learn more about [connecting your AWS accounts to Microsoft Defender for Cloud](quickstart-onboard-aws.md). - ### Prioritize security actions by data sensitivity (powered by Microsoft Purview) (in preview) + Data resources remain a popular target for threat actors. So it's crucial for security teams to identify, prioritize, and secure sensitive data resources across their cloud environments. To address this challenge, Microsoft Defender for Cloud now integrates sensitivity information from [Microsoft Purview](../purview/overview.md). Microsoft Purview is a unified data governance service that provides rich insights into the sensitivity of your data within multicloud, and on-premises workloads. @@ -73,27 +184,26 @@ The integration with Microsoft Purview extends your security visibility in Defen Learn more in [Prioritize security actions by data sensitivity](information-protection.md). - ### Expanded security control assessments with Azure Security Benchmark v3 -Microsoft Defender for Cloud's security recommendations are enabled and supported by the Azure Security Benchmark. + +Microsoft Defender for Cloud's security recommendations are enabled and supported by the Azure Security Benchmark. [Azure Security Benchmark](../security/benchmarks/introduction.md) is the Microsoft-authored, Azure-specific set of guidelines for security and compliance best practices based on common compliance frameworks. This widely respected benchmark builds on the controls from the [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/azure/) and the [National Institute of Standards and Technology (NIST)](https://www.nist.gov/) with a focus on cloud-centric security. From Ignite 2021, Azure Security Benchmark **v3** is available in [Defender for Cloud's regulatory compliance dashboard](update-regulatory-compliance-packages.md) and enabled as the new default initiative for all Azure subscriptions protected with Microsoft -Defender for Cloud. +Defender for Cloud. -Enhancements for v3 include: +Enhancements for v3 include: - Additional mappings to industry frameworks [PCI-DSS v3.2.1](https://www.pcisecuritystandards.org/documents/PCI_DSS_v3-2-1.pdf) and [CIS Controls v8](https://www.cisecurity.org/controls/v8/). - More granular and actionable guidance for controls with the introduction of: - - **Security Principles** - Providing insight into the overall security objectives that build the foundation for our recommendations. - - **Azure Guidance** - The technical “how-to” for meeting these objectives. + - **Security Principles** - Providing insight into the overall security objectives that build the foundation for our recommendations. + - **Azure Guidance** - The technical “how-to” for meeting these objectives. - New controls include DevOps security for issues such as threat modeling and software supply chain security, as well as key and certificate management for best practices in Azure. Learn more in [Introduction to Azure Security Benchmark](/security/benchmark/azure/introduction). - ### Microsoft Sentinel connector's optional bi-directional alert synchronization released for general availability (GA) In July, [we announced](release-notes-archive.md#azure-sentinel-connector-now-includes-optional-bi-directional-alert-synchronization-in-preview) a preview feature, **bi-directional alert synchronization**, for the built-in connector in [Microsoft Sentinel](../sentinel/index.yml) (Microsoft's cloud-native SIEM and SOAR solution). This feature is now released for general availability (GA). @@ -112,8 +222,7 @@ SecOps teams can choose the relevant Microsoft Sentinel workspace directly from The new recommendation, "Diagnostic logs in Kubernetes services should be enabled" includes the 'Fix' option for faster remediation. -We've also enhanced the "Auditing on SQL server should be enabled" recommendation with the same Sentinel streaming capabilities. - +We've also enhanced the "Auditing on SQL server should be enabled" recommendation with the same Sentinel streaming capabilities. ### Recommendations mapped to the MITRE ATT&CK® framework - released for general availability (GA) @@ -137,7 +246,7 @@ In October, [we announced](release-notes-archive.md#microsoft-threat-and-vulnera Use **threat and vulnerability management** to discover vulnerabilities and misconfigurations in near real time with the [integration with Microsoft Defender for Endpoint](integration-defender-for-endpoint.md) enabled, and without the need for additional agents or periodic scans. Threat and vulnerability management prioritizes vulnerabilities based on the threat landscape and detections in your organization. -Use the security recommendation "[A vulnerability assessment solution should be enabled on your virtual machines](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/ffff0522-1e88-47fc-8382-2a80ba848f5d)" to surface the vulnerabilities detected by threat and vulnerability management for your [supported machines](/microsoft-365/security/defender-endpoint/tvm-supported-os?view=o365-worldwide&preserve-view=true). +Use the security recommendation "[A vulnerability assessment solution should be enabled on your virtual machines](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/ffff0522-1e88-47fc-8382-2a80ba848f5d)" to surface the vulnerabilities detected by threat and vulnerability management for your [supported machines](/microsoft-365/security/defender-endpoint/tvm-supported-os?view=o365-worldwide&preserve-view=true). To automatically surface the vulnerabilities, on existing and new machines, without the need to manually remediate the recommendation, see [Vulnerability assessment solutions can now be auto enabled (in preview)](release-notes-archive.md#vulnerability-assessment-solutions-can-now-be-auto-enabled-in-preview). @@ -153,7 +262,6 @@ When Defender for Endpoint detects a threat, it triggers an alert. The alert is Learn more in [Protect your endpoints with Security Center's integrated EDR solution: Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). - ### Snapshot export for recommendations and security findings (in preview) Defender for Cloud generates detailed security alerts and recommendations. You can view them in the portal or through programmatic tools. You might also need to export some or all of this information for tracking with other monitoring tools in your environment. @@ -181,7 +289,7 @@ In October, [we announced](release-notes-archive.md#software-inventory-filters-a You can query the software inventory data in **Azure Resource Graph Explorer**. -To use these features, you'll need to enable the [integration with Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). +To use these features, you'll need to enable the [integration with Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). For full details, including sample Kusto queries for Azure Resource Graph, see [Access a software inventory](asset-inventory.md#access-a-software-inventory). @@ -191,7 +299,7 @@ To ensure that Kubernetes workloads are secure by default, Defender for Cloud in As part of this project, we've added a policy and recommendation (disabled by default) for gating deployment on Kubernetes clusters. The policy is in the default initiative but is only relevant for organizations who register for the related private preview. -You can safely ignore the policies and recommendation ("Kubernetes clusters should gate deployment of vulnerable images") and there will be no impact on your environment. +You can safely ignore the policies and recommendation ("Kubernetes clusters should gate deployment of vulnerable images") and there will be no impact on your environment. If you'd like to participate in the private preview, you'll need to be a member of the private preview ring. If you're not already a member, submit a request [here](https://aka.ms/atscale). Members will be notified when the preview begins. @@ -214,14 +322,13 @@ Updates in October include: - [Recommendations details pages now show related recommendations](#recommendations-details-pages-now-show-related-recommendations) - [New alerts for Azure Defender for Kubernetes (in preview)](#new-alerts-for-azure-defender-for-kubernetes-in-preview) - ### Microsoft Threat and Vulnerability Management added as vulnerability assessment solution (in preview) -We've extended the integration between [Azure Defender for Servers](defender-for-servers-introduction.md) and Microsoft Defender for Endpoint, to support a new vulnerability assessment provider for your machines: [Microsoft threat and vulnerability management](/microsoft-365/security/defender-endpoint/next-gen-threat-and-vuln-mgt). +We've extended the integration between [Azure Defender for Servers](defender-for-servers-introduction.md) and Microsoft Defender for Endpoint, to support a new vulnerability assessment provider for your machines: [Microsoft threat and vulnerability management](/microsoft-365/security/defender-endpoint/next-gen-threat-and-vuln-mgt). Use **threat and vulnerability management** to discover vulnerabilities and misconfigurations in near real time with the [integration with Microsoft Defender for Endpoint](integration-defender-for-endpoint.md) enabled, and without the need for additional agents or periodic scans. Threat and vulnerability management prioritizes vulnerabilities based on the threat landscape and detections in your organization. -Use the security recommendation "[A vulnerability assessment solution should be enabled on your virtual machines](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/ffff0522-1e88-47fc-8382-2a80ba848f5d)" to surface the vulnerabilities detected by threat and vulnerability management for your [supported machines](/microsoft-365/security/defender-endpoint/tvm-supported-os?view=o365-worldwide&preserve-view=true). +Use the security recommendation "[A vulnerability assessment solution should be enabled on your virtual machines](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/ffff0522-1e88-47fc-8382-2a80ba848f5d)" to surface the vulnerabilities detected by threat and vulnerability management for your [supported machines](/microsoft-365/security/defender-endpoint/tvm-supported-os?view=o365-worldwide&preserve-view=true). To automatically surface the vulnerabilities, on existing and new machines, without the need to manually remediate the recommendation, see [Vulnerability assessment solutions can now be auto enabled (in preview)](#vulnerability-assessment-solutions-can-now-be-auto-enabled-in-preview). @@ -244,19 +351,19 @@ Learn more in [Automatically configure vulnerability assessment for your machine ### Software inventory filters added to asset inventory (in preview) -The [asset inventory](asset-inventory.md) page now includes a filter to select machines running specific software - and even specify the versions of interest. +The [asset inventory](asset-inventory.md) page now includes a filter to select machines running specific software - and even specify the versions of interest. Additionally, you can query the software inventory data in **Azure Resource Graph Explorer**. -To use these new features, you'll need to enable the [integration with Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). +To use these new features, you'll need to enable the [integration with Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). For full details, including sample Kusto queries for Azure Resource Graph, see [Access a software inventory](asset-inventory.md#access-a-software-inventory). :::image type="content" source="media/deploy-vulnerability-assessment-tvm/software-inventory.png" alt-text="If you've enabled the threat and vulnerability solution, Security Center's asset inventory offers a filter to select resources by their installed software."::: -### Changed prefix of some alert types from "ARM_" to "VM_" +### Changed prefix of some alert types from "ARM_" to "VM_" -In July 2021, we announced a [logical reorganization of Azure Defender for Resource Manager alerts](release-notes-archive.md#logical-reorganization-of-azure-defender-for-resource-manager-alerts) +In July 2021, we announced a [logical reorganization of Azure Defender for Resource Manager alerts](release-notes-archive.md#logical-reorganization-of-azure-defender-for-resource-manager-alerts) As part of a logical reorganization of some of the Azure Defender plans, we moved twenty-one alerts from [Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) to [Azure Defender for Servers](defender-for-servers-introduction.md). @@ -286,18 +393,17 @@ With this update, we've changed the prefixes of these alerts to match this reass | ARM_VMAccessUnusualPasswordReset | VM_VMAccessUnusualPasswordReset | | ARM_VMAccessUnusualSSHReset | VM_VMAccessUnusualSSHReset | - Learn more about the [Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) and [Azure Defender for Servers](defender-for-servers-introduction.md) plans. ### Changes to the logic of a security recommendation for Kubernetes clusters -The recommendation "Kubernetes clusters should not use the default namespace" prevents usage of the default namespace for a range of resource types. Two of the resource types that were included in this recommendation have been removed: ConfigMap and Secret. +The recommendation "Kubernetes clusters should not use the default namespace" prevents usage of the default namespace for a range of resource types. Two of the resource types that were included in this recommendation have been removed: ConfigMap and Secret. Learn more about this recommendation and hardening your Kubernetes clusters in [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). ### Recommendations details pages now show related recommendations -To clarify the relationships between different recommendations, we've added a **Related recommendations** area to the details pages of many recommendations. +To clarify the relationships between different recommendations, we've added a **Related recommendations** area to the details pages of many recommendations. The three relationship types that are shown on these pages are: @@ -322,15 +428,13 @@ Obviously, Security Center can't notify you about discovered vulnerabilities unl Therefore: - - Recommendation #1 is a prerequisite for recommendation #2 - - Recommendation #2 depends upon recommendation #1 +- Recommendation #1 is a prerequisite for recommendation #2 +- Recommendation #2 depends upon recommendation #1 :::image type="content" source="media/release-notes/related-recommendations-solution-not-found.png" alt-text="Screenshot of recommendation to deploy vulnerability assessment solution."::: :::image type="content" source="media/release-notes/related-recommendations-vulnerabilities-found.png" alt-text="Screenshot of recommendation to resolve discovered vulnerabilities."::: - - ### New alerts for Azure Defender for Kubernetes (in preview) To expand the threat protections provided by Azure Defender for Kubernetes, we've added two preview alerts. @@ -342,7 +446,6 @@ These alerts are generated based on a new machine learning model and Kubernetes | **Anomalous pod deployment (Preview)**
                  (K8S_AnomalousPodDeployment) | Kubernetes audit log analysis detected pod deployment that is anomalous based on previous pod deployment activity. This activity is considered an anomaly when taking into account how the different features seen in the deployment operation are in relations to one another. The features monitored by this analytics include the container image registry used, the account performing the deployment, day of the week, how often does this account performs pod deployments, user agent used in the operation, is this a namespace which is pod deployment occur to often, or other feature. Top contributing reasons for raising this alert as anomalous activity are detailed under the alert extended properties. | Execution | Medium | | **Excessive role permissions assigned in Kubernetes cluster (Preview)**
                  (K8S_ServiceAcountPermissionAnomaly) | Analysis of the Kubernetes audit logs detected an excessive permissions role assignment to your cluster. From examining role assignments, the listed permissions are uncommon to the specific service account. This detection considers previous role assignments to the same service account across clusters monitored by Azure, volume per permission, and the impact of the specific permission. The anomaly detection model used for this alert takes into account how this permission is used across all clusters monitored by Azure Defender. | Privilege Escalation | Low | - For a full list of the Kubernetes alerts, see [Alerts for Kubernetes clusters](alerts-reference.md#alerts-k8scluster). ## September 2021 @@ -397,7 +500,6 @@ We've added two **preview** recommendations to deploy and maintain the endpoint |[Endpoint protection should be installed on your machines](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/4fb67663-9ab9-475d-b026-8c544cced439) |To protect your machines from threats and vulnerabilities, install a supported endpoint protection solution.
                  Learn more about how Endpoint Protection for machines is evaluated.
                  (Related policy: [Monitor missing Endpoint Protection in Azure Security Center](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2faf6cd1bd-1635-48cb-bde7-5b15693900b9)) |High | |[Endpoint protection health issues should be resolved on your machines](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/37a3689a-818e-4a0e-82ac-b1392b9bb000) |Resolve endpoint protection health issues on your virtual machines to protect them from latest threats and vulnerabilities. Azure Security Center supported endpoint protection solutions are documented [here](./supported-machines-endpoint-solutions-clouds-servers.md?tabs=features-windows). Endpoint protection assessment is documented here.
                  (Related policy: [Monitor missing Endpoint Protection in Azure Security Center](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2faf6cd1bd-1635-48cb-bde7-5b15693900b9)) |Medium | - > [!NOTE] > The recommendations show their freshness interval as 8 hours, but there are some scenarios in which this might take significantly longer. For example, when an on premises machine is deleted, it takes 24 hours for Security Center to identify the deletion. After that, the assessment will take up to 8 hours to return the information. In that specific situation therefore, it may take 32 hours for the machine to be removed from the list of affected resources. > @@ -410,11 +512,10 @@ A new, dedicated area of the Security Center pages in the Azure portal provides When you're facing an issue, or are seeking advice from our support team, **Diagnose and solve problems** is another tool to help you find the solution: :::image type="content" source="media/release-notes/solve-problems.png" alt-text="Security Center's 'Diagnose and solve problems' page"::: - ### Regulatory compliance dashboard's Azure Audit reports released for general availability (GA) -The regulatory compliance dashboard's toolbar offers Azure and Dynamics certification reports for the standards applied to your subscriptions. +The regulatory compliance dashboard's toolbar offers Azure and Dynamics certification reports for the standards applied to your subscriptions. :::image type="content" source="media/release-notes/audit-reports-regulatory-compliance-dashboard.png" alt-text="Regulatory compliance dashboard's toolbar showing the button for generating audit reports."::: @@ -439,25 +540,26 @@ It's likely that this change will impact your secure scores. For most subscripti > [!TIP] > The [asset inventory](asset-inventory.md) page was also affected by this change as it displays the monitored status for machines (monitored, not monitored, or partially monitored - a state which refers to an agent with health issues). - ### Azure Defender for container registries now scans for vulnerabilities in registries protected with Azure Private Link + Azure Defender for container registries includes a vulnerability scanner to scan images in your Azure Container Registry registries. Learn how to scan your registries and remediate findings in [Use Azure Defender for container registries to scan your images for vulnerabilities](defender-for-containers-usage.md). To limit access to a registry hosted in Azure Container Registry, assign virtual network private IP addresses to the registry endpoints and use Azure Private Link as explained in [Connect privately to an Azure container registry using Azure Private Link](../container-registry/container-registry-private-link.md). As part of our ongoing efforts to support additional environments and use cases, Azure Defender now also scans container registries protected with [Azure Private Link](../private-link/private-link-overview.md). - ### Security Center can now auto provision the Azure Policy's Guest Configuration extension (in preview) + Azure Policy can audit settings inside a machine, both for machines running in Azure and Arc connected machines. The validation is performed by the Guest Configuration extension and client. Learn more in [Understand Azure Policy's Guest Configuration](../governance/policy/concepts/guest-configuration.md). -With this update, you can now set Security Center to automatically provision this extension to all supported machines. +With this update, you can now set Security Center to automatically provision this extension to all supported machines. :::image type="content" source="media/release-notes/auto-provisioning-guest-configuration.png" alt-text="Enable auto deployment of Guest Configuration extension."::: Learn more about how auto provisioning works in [Configure auto provisioning for agents and extensions](enable-data-collection.md). ### Recommendations to enable Azure Defender plans now support "Enforce" + Security Center includes two features that help ensure newly created resources are provisioned in a secure manner: **enforce** and **deny**. When a recommendation offers these options, you can ensure your security requirements are met whenever someone attempts to create a resource: - **Deny** stops unhealthy resources from being created @@ -479,8 +581,6 @@ If you need to export larger amounts of data, use the available filters before s Learn more about [performing a CSV export of your security recommendations](continuous-export.md#manual-one-time-export-of-alerts-and-recommendations). - - ### Recommendations page now includes multiple views The recommendations page now has two tabs to provide alternate ways to view the recommendations relevant to your resources: @@ -495,8 +595,8 @@ The recommendations page now has two tabs to provide alternate ways to view the Updates in July include: - [Azure Sentinel connector now includes optional bi-directional alert synchronization (in preview)](#azure-sentinel-connector-now-includes-optional-bi-directional-alert-synchronization-in-preview) -- [Logical reorganization of Azure Defender for Resource Manager alerts](#logical-reorganization-of-azure-defender-for-resource-manager-alerts) -- [Enhancements to recommendation to enable Azure Disk Encryption (ADE)](#enhancements-to-recommendation-to-enable-azure-disk-encryption-ade) +- [Logical reorganization of Azure Defender for Resource Manager alerts](#logical-reorganization-of-azure-defender-for-resource-manager-alerts) +- [Enhancements to recommendation to enable Azure Disk Encryption (ADE)](#enhancements-to-recommendation-to-enable-azure-disk-encryption-ade) - [Continuous export of secure score and regulatory compliance data released for general availability (GA)](#continuous-export-of-secure-score-and-regulatory-compliance-data-released-for-general-availability-ga) - [Workflow automations can be triggered by changes to regulatory compliance assessments (GA)](#workflow-automations-can-be-triggered-by-changes-to-regulatory-compliance-assessments-ga) - [Assessments API field 'FirstEvaluationDate' and 'StatusChangeDate' now available in workspace schemas and logic apps](#assessments-api-field-firstevaluationdate-and-statuschangedate-now-available-in-workspace-schemas-and-logic-apps) @@ -504,7 +604,7 @@ Updates in July include: ### Azure Sentinel connector now includes optional bi-directional alert synchronization (in preview) -Security Center natively integrates with [Azure Sentinel](../sentinel/index.yml), Azure's cloud-native SIEM and SOAR solution. +Security Center natively integrates with [Azure Sentinel](../sentinel/index.yml), Azure's cloud-native SIEM and SOAR solution. Azure Sentinel includes built-in connectors for Azure Security Center at the subscription and tenant levels. Learn more in [Stream alerts to Azure Sentinel](export-to-siem.md#stream-alerts-to-microsoft-sentinel). @@ -551,7 +651,6 @@ These are the alerts that were part of Azure Defender for Resource Manager, and Learn more about the [Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) and [Azure Defender for Servers](defender-for-servers-introduction.md) plans. - ### Enhancements to recommendation to enable Azure Disk Encryption (ADE) Following user feedback, we've renamed the recommendation **Disk encryption should be applied on virtual machines**. @@ -562,9 +661,7 @@ The description has also been updated to better explain the purpose of this hard | Recommendation | Description | Severity | |--------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------:| -| **Virtual machines should encrypt temp disks, caches, and data flows between Compute and Storage resources** | By default, a virtual machine’s OS and data disks are encrypted-at-rest using platform-managed keys; temp disks and data caches aren’t encrypted, and data isn’t encrypted when flowing between compute and storage resources. For a comparison of different disk encryption technologies in Azure, see https://aka.ms/diskencryptioncomparison.
                  Use Azure Disk Encryption to encrypt all this data. Disregard this recommendation if: (1) you’re using the encryption-at-host feature, or (2) server-side encryption on Managed Disks meets your security requirements. Learn more in Server-side encryption of Azure Disk Storage. | High | - - +| **Virtual machines should encrypt temp disks, caches, and data flows between Compute and Storage resources** | By default, a virtual machine’s OS and data disks are encrypted-at-rest using platform-managed keys; temp disks and data caches aren’t encrypted, and data isn’t encrypted when flowing between compute and storage resources. For a comparison of different disk encryption technologies in Azure, see .
                  Use Azure Disk Encryption to encrypt all this data. Disregard this recommendation if: (1) you’re using the encryption-at-host feature, or (2) server-side encryption on Managed Disks meets your security requirements. Learn more in Server-side encryption of Azure Disk Storage. | High | ### Continuous export of secure score and regulatory compliance data released for general availability (GA) @@ -578,8 +675,7 @@ We've enhanced and expanded this feature over time: - In December 2020, we added the **preview** option to stream changes to your **regulatory compliance assessment data**.
                  For full details, see [Continuous export gets new data types (preview)](release-notes-archive.md#continuous-export-gets-new-data-types-and-improved-deployifnotexist-policies). -With this update, these two options are released for general availability (GA). - +With this update, these two options are released for general availability (GA). ### Workflow automations can be triggered by changes to regulatory compliance assessments (GA) @@ -599,14 +695,13 @@ Those fields were accessible through the REST API, Azure Resource Graph, continu With this change, we're making the information available in the Log Analytics workspace schema and from logic apps. - ### 'Compliance over time' workbook template added to Azure Monitor Workbooks gallery In March, we announced the integrated Azure Monitor Workbooks experience in Security Center (see [Azure Monitor Workbooks integrated into Security Center and three templates provided](release-notes-archive.md#azure-monitor-workbooks-integrated-into-security-center-and-three-templates-provided)). The initial release included three templates to build dynamic and visual reports about your organization's security posture. -We've now added a workbook dedicated to tracking a subscription's compliance with the regulatory or industry standards applied to it. +We've now added a workbook dedicated to tracking a subscription's compliance with the regulatory or industry standards applied to it. Learn about using these reports or building your own in [Create rich, interactive reports of Security Center data](custom-dashboards-azure-workbooks.md). @@ -621,7 +716,6 @@ Updates in June include: - [Prefix for Kubernetes alerts changed from "AKS_" to "K8S_"](#prefix-for-kubernetes-alerts-changed-from-aks_-to-k8s_) - [Deprecated two recommendations from "Apply system updates" security control](#deprecated-two-recommendations-from-apply-system-updates-security-control) - ### New alert for Azure Defender for Key Vault To expand the threat protections provided by Azure Defender for Key Vault, we've added the following alert: @@ -630,13 +724,12 @@ To expand the threat protections provided by Azure Defender for Key Vault, we've |------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------:|----------| | Access from a suspicious IP address to a key vault
                  (KV_SuspiciousIPAccess) | A key vault has been successfully accessed by an IP that has been identified by Microsoft Threat Intelligence as a suspicious IP address. This may indicate that your infrastructure has been compromised. We recommend further investigation. Learn more about [Microsoft's threat intelligence capabilities](https://go.microsoft.com/fwlink/?linkid=2128684). | Credential Access | Medium | - For more information, see: + - [Introduction to Azure Defender for Key Vault](defender-for-resource-manager-introduction.md) - [Respond to Azure Defender for Key Vault alerts](defender-for-key-vault-usage.md) - [List of alerts provided by Azure Defender for Key Vault](alerts-reference.md#alerts-azurekv) - ### Recommendations to encrypt with customer-managed keys (CMKs) disabled by default Security Center includes multiple recommendations to encrypt data at rest with customer-managed keys, such as: @@ -657,7 +750,6 @@ This change is reflected in the names of the recommendation with a new prefix, * :::image type="content" source="media/upcoming-changes/customer-managed-keys-disabled.png" alt-text="Security Center's CMK recommendations will be disabled by default." lightbox="media/upcoming-changes/customer-managed-keys-disabled.png"::: - ### Prefix for Kubernetes alerts changed from "AKS_" to "K8S_" Azure Defender for Kubernetes recently expanded to protect Kubernetes clusters hosted on-premises and in multicloud environments. Learn more in [Use Azure Defender for Kubernetes to protect hybrid and multicloud Kubernetes deployments (in preview)](release-notes-archive.md#use-azure-defender-for-kubernetes-to-protect-hybrid-and-multicloud-kubernetes-deployments-in-preview). @@ -668,14 +760,12 @@ To reflect the fact that the security alerts provided by Azure Defender for Kube |----|----| |Kubernetes penetration testing tool detected
                  (**AKS**_PenTestToolsKubeHunter)|Kubernetes audit log analysis detected usage of Kubernetes penetration testing tool in the **AKS** cluster. While this behavior can be legitimate, attackers might use such public tools for malicious purposes. - was changed to: |Alert (alert type)|Description| |----|----| |Kubernetes penetration testing tool detected
                  (**K8S**_PenTestToolsKubeHunter)|Kubernetes audit log analysis detected usage of Kubernetes penetration testing tool in the **Kubernetes** cluster. While this behavior can be legitimate, attackers might use such public tools for malicious purposes.| - Any suppression rules that refer to alerts beginning "AKS_" were automatically converted. If you've setup SIEM exports, or custom automation scripts that refer to Kubernetes alerts by alert type, you'll need to update them with the new alert types. For a full list of the Kubernetes alerts, see [Alerts for Kubernetes clusters](alerts-reference.md#alerts-k8scluster). @@ -687,7 +777,6 @@ The following two recommendations were deprecated: - **OS version should be updated for your cloud service roles** - By default, Azure periodically updates your guest OS to the latest supported image within the OS family that you've specified in your service configuration (.cscfg), such as Windows Server 2016. - **Kubernetes Services should be upgraded to a non-vulnerable Kubernetes version** - This recommendation's evaluations aren't as wide-ranging as we'd like them to be. We plan to replace the recommendation with an enhanced version that's better aligned with your security needs. - ## May 2021 Updates in May include: @@ -703,7 +792,6 @@ Updates in May include: - [Assessments API expanded with two new fields](#assessments-api-expanded-with-two-new-fields) - [Asset inventory gets a cloud environment filter](#asset-inventory-gets-a-cloud-environment-filter) - ### Azure Defender for DNS and Azure Defender for Resource Manager released for general availability (GA) These two cloud-native breadth threat protection plans are now GA. @@ -711,14 +799,14 @@ These two cloud-native breadth threat protection plans are now GA. These new protections greatly enhance your resiliency against attacks from threat actors, and significantly increase the number of Azure resources protected by Azure Defender. - **Azure Defender for Resource Manager** - automatically monitors all resource management operations performed in your organization. For more information, see: - - [Introduction to Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) - - [Respond to Azure Defender for Resource Manager alerts](defender-for-resource-manager-usage.md) - - [List of alerts provided by Azure Defender for Resource Manager](alerts-reference.md#alerts-resourcemanager) + - [Introduction to Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) + - [Respond to Azure Defender for Resource Manager alerts](defender-for-resource-manager-usage.md) + - [List of alerts provided by Azure Defender for Resource Manager](alerts-reference.md#alerts-resourcemanager) - **Azure Defender for DNS** - continuously monitors all DNS queries from your Azure resources. For more information, see: - - [Introduction to Azure Defender for DNS](defender-for-dns-introduction.md) - - [Respond to Azure Defender for DNS alerts](defender-for-dns-usage.md) - - [List of alerts provided by Azure Defender for DNS](alerts-reference.md#alerts-dns) + - [Introduction to Azure Defender for DNS](defender-for-dns-introduction.md) + - [Respond to Azure Defender for DNS alerts](defender-for-dns-usage.md) + - [List of alerts provided by Azure Defender for DNS](alerts-reference.md#alerts-dns) To simplify the process of enabling these plans, use the recommendations: @@ -726,8 +814,7 @@ To simplify the process of enabling these plans, use the recommendations: - **Azure Defender for DNS should be enabled** > [!NOTE] -> Enabling Azure Defender plans results in charges. Learn about the pricing details per region on Security Center's pricing page: https://aka.ms/pricing-security-center. - +> Enabling Azure Defender plans results in charges. Learn about the pricing details per region on Security Center's [pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/). ### Azure Defender for open-source relational databases released for general availability (GA) @@ -756,13 +843,12 @@ To expand the threat protections provided by Azure Defender for Resource Manager |**Azure Resource Manager operation from suspicious IP address (Preview)**
                  (ARM_OperationFromSuspiciousIP)|Azure Defender for Resource Manager detected an operation from an IP address that has been marked as suspicious in threat intelligence feeds.|Execution|Medium| |**Azure Resource Manager operation from suspicious proxy IP address (Preview)**
                  (ARM_OperationFromSuspiciousProxyIP)|Azure Defender for Resource Manager detected a resource management operation from an IP address that is associated with proxy services, such as TOR. While this behavior can be legitimate, it's often seen in malicious activities, when threat actors try to hide their source IP.|Defense Evasion|Medium| - For more information, see: + - [Introduction to Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) - [Respond to Azure Defender for Resource Manager alerts](defender-for-resource-manager-usage.md) - [List of alerts provided by Azure Defender for Resource Manager](alerts-reference.md#alerts-resourcemanager) - ### CI/CD vulnerability scanning of container images with GitHub workflows and Azure Defender (preview) Azure Defender for container registries now provides DevSecOps teams observability into GitHub Actions workflows. @@ -808,7 +894,7 @@ Azure offers trusted launch as a seamless way to improve the security of [genera > [!IMPORTANT] > Trusted launch requires the creation of new virtual machines. You can't enable trusted launch on existing virtual machines that were initially created without it. -> +> > Trusted launch is currently in public preview. The preview is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. Security Center's recommendation, **vTPM should be enabled on supported virtual machines**, ensures your Azure VMs are using a vTPM. This virtualized version of a hardware Trusted Platform Module enables attestation by measuring the entire boot chain of your VM (UEFI, OS, system, and drivers). @@ -821,7 +907,7 @@ With the vTPM enabled, the **Guest Attestation extension** can remotely validate - **Guest Attestation extension should be installed on supported Linux virtual machines** - **Guest Attestation extension should be installed on supported Linux virtual machine scale sets** -Learn more in [Trusted launch for Azure virtual machines](../virtual-machines/trusted-launch.md). +Learn more in [Trusted launch for Azure virtual machines](../virtual-machines/trusted-launch.md). ### New recommendations for hardening Kubernetes clusters (in preview) @@ -846,16 +932,13 @@ To access this information, you can use any of the methods in the table below. | Tool | Details | |----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| REST API call | GET https://management.azure.com/subscriptions//providers/Microsoft.Security/assessments?api-version=2019-01-01-preview&$expand=statusEvaluationDates | +| REST API call | GET /providers/Microsoft.Security/assessments?api-version=2019-01-01-preview&$expand=statusEvaluationDates> | | Azure Resource Graph | `securityresources`
                  `where type == "microsoft.security/assessments"` | | Continuous export | The two dedicated fields will be available the Log Analytics workspace data | | [CSV export](continuous-export.md#manual-one-time-export-of-alerts-and-recommendations) | The two fields are included in the CSV files | - - Learn more about the [Assessments REST API](/rest/api/securitycenter/assessments). - ### Asset inventory gets a cloud environment filter Security Center's asset inventory page offers many filters to quickly refine the list of resources displayed. Learn more in [Explore and manage your resources with asset inventory](asset-inventory.md). @@ -869,10 +952,10 @@ Learn more about the multicloud capabilities: - [Connect your AWS accounts to Azure Security Center](quickstart-onboard-aws.md) - [Connect your GCP projects to Azure Security Center](quickstart-onboard-gcp.md) - ## April 2021 Updates in April include: + - [Refreshed resource health page (in preview)](#refreshed-resource-health-page-in-preview) - [Container registry images that have been recently pulled are now rescanned weekly (released for general availability (GA))](#container-registry-images-that-have-been-recently-pulled-are-now-rescanned-weekly-released-for-general-availability-ga) - [Use Azure Defender for Kubernetes to protect hybrid and multicloud Kubernetes deployments (in preview)](#use-azure-defender-for-kubernetes-to-protect-hybrid-and-multicloud-kubernetes-deployments-in-preview) @@ -888,7 +971,7 @@ Updates in April include: ### Refreshed resource health page (in preview) -Security Center's resource health has been expanded, enhanced, and improved to provide a snapshot view of the overall health of a single resource. +Security Center's resource health has been expanded, enhanced, and improved to provide a snapshot view of the overall health of a single resource. You can review detailed information about the resource and all recommendations that apply to that resource. Also, if you're using [the advanced protection plans of Microsoft Defender](defender-for-cloud-introduction.md), you can see outstanding security alerts for that specific resource too. @@ -905,7 +988,6 @@ This preview page in Security Center's portal pages shows: Learn more in [Tutorial: Investigate the health of your resources](investigate-resource-health.md). - ### Container registry images that have been recently pulled are now rescanned weekly (released for general availability (GA)) Azure Defender for container registries includes a built-in vulnerability scanner. This scanner immediately scans any image you push to your registry and any image pulled within the last 30 days. @@ -916,10 +998,9 @@ Scanning is charged on a per image basis, so there's no additional charge for th Learn more about this scanner in [Use Azure Defender for container registries to scan your images for vulnerabilities](defender-for-containers-usage.md). - ### Use Azure Defender for Kubernetes to protect hybrid and multicloud Kubernetes deployments (in preview) -Azure Defender for Kubernetes is expanding its threat protection capabilities to defend your clusters wherever they're deployed. This has been enabled by integrating with [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md) and its new [extensions capabilities](../azure-arc/kubernetes/extensions.md). +Azure Defender for Kubernetes is expanding its threat protection capabilities to defend your clusters wherever they're deployed. This has been enabled by integrating with [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md) and its new [extensions capabilities](../azure-arc/kubernetes/extensions.md). When you've enabled Azure Arc on your non-Azure Kubernetes clusters, a new recommendation from Azure Security Center offers to deploy the Azure Defender extension to them with only a few clicks. @@ -937,19 +1018,17 @@ Learn more in [Use Azure Defender for Kubernetes with your on-premises and multi :::image type="content" source="media/defender-for-kubernetes-azure-arc/extension-recommendation.png" alt-text="Azure Security Center's recommendation for deploying the Azure Defender extension for Azure Arc-enabled Kubernetes clusters." lightbox="media/defender-for-kubernetes-azure-arc/extension-recommendation.png"::: - ### Microsoft Defender for Endpoint integration with Azure Defender now supports Windows Server 2019 and Windows 10 on Windows Virtual Desktop released for general availability (GA) Microsoft Defender for Endpoint is a holistic, cloud delivered endpoint security solution. It provides risk-based vulnerability management and assessment as well as endpoint detection and response (EDR). For a full list of the benefits of using Defender for Endpoint together with Azure Security Center, see [Protect your endpoints with Security Center's integrated EDR solution: Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). -When you enable Azure Defender for Servers running Windows Server, a license for Defender for Endpoint is included with the plan. If you've already enabled Azure Defender for Servers and you have Windows Server 2019 servers in your subscription, they'll automatically receive Defender for Endpoint with this update. No manual action is required. +When you enable Azure Defender for Servers running Windows Server, a license for Defender for Endpoint is included with the plan. If you've already enabled Azure Defender for Servers and you have Windows Server 2019 servers in your subscription, they'll automatically receive Defender for Endpoint with this update. No manual action is required. Support has now been expanded to include Windows Server 2019 and Windows 10 on [Windows Virtual Desktop](../virtual-desktop/overview.md). > [!NOTE] > If you're enabling Defender for Endpoint on a Windows Server 2019 server, ensure it meets the prerequisites described in [Enable the Microsoft Defender for Endpoint integration](integration-defender-for-endpoint.md#enable-the-microsoft-defender-for-endpoint-integration). - ### Recommendations to enable Azure Defender for DNS and Resource Manager (in preview) Two new recommendations have been added to simplify the process of enabling [Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) and [Azure Defender for DNS](defender-for-dns-introduction.md): @@ -957,12 +1036,11 @@ Two new recommendations have been added to simplify the process of enabling [Azu - **Azure Defender for Resource Manager should be enabled** - Defender for Resource Manager automatically monitors the resource management operations in your organization. Azure Defender detects threats and alerts you about suspicious activity. - **Azure Defender for DNS should be enabled** - Defender for DNS provides an additional layer of protection for your cloud resources by continuously monitoring all DNS queries from your Azure resources. Azure Defender alerts you about suspicious activity at the DNS layer. -Enabling Azure Defender plans results in charges. Learn about the pricing details per region on Security Center's pricing page: https://aka.ms/pricing-security-center. +Enabling Azure Defender plans results in charges. Learn about the pricing details per region on Security Center's [pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/). > [!TIP] > Preview recommendations don't render a resource unhealthy, and they aren't included in the calculations of your secure score. Remediate them wherever possible, so that when the preview period ends they'll contribute towards your score. Learn more about how to respond to these recommendations in [Remediate recommendations in Azure Security Center](implement-security-recommendations.md). - ### Three regulatory compliance standards added: Azure CIS 1.3.0, CMMC Level 3, and New Zealand ISM Restricted We've added three standards for use with Azure Security Center. Using the regulatory compliance dashboard, you can now track your compliance with: @@ -976,6 +1054,7 @@ You can assign these to your subscriptions as described in [Customize the set of :::image type="content" source="media/release-notes/additional-regulatory-compliance-standards.png" alt-text="Three standards added for use with Azure Security Center's regulatory compliance dashboard." lightbox="media/release-notes/additional-regulatory-compliance-standards.png"::: Learn more in: + - [Customize the set of standards in your regulatory compliance dashboard](update-regulatory-compliance-packages.md) - [Tutorial: Improve your regulatory compliance](regulatory-compliance-dashboard.md) - [FAQ - Regulatory compliance dashboard](regulatory-compliance-dashboard.md#faq---regulatory-compliance-dashboard) @@ -987,12 +1066,12 @@ Azure's [Guest Configuration extension](../governance/policy/concepts/guest-conf We've added four new recommendations to Security Center to make the most of this extension. - Two recommendations prompt you to install the extension and its required system-managed identity: - - **Guest Configuration extension should be installed on your machines** - - **Virtual machines' Guest Configuration extension should be deployed with system-assigned managed identity** + - **Guest Configuration extension should be installed on your machines** + - **Virtual machines' Guest Configuration extension should be deployed with system-assigned managed identity** - When the extension is installed and running, it will begin auditing your machines and you'll be prompted to harden settings such as configuration of the operating system and environment settings. These two recommendations will prompt you to harden your Windows and Linux machines as described: - - **Windows Defender Exploit Guard should be enabled on your machines** - - **Authentication to Linux machines should require SSH keys** + - **Windows Defender Exploit Guard should be enabled on your machines** + - **Authentication to Linux machines should require SSH keys** Learn more in [Understand Azure Policy's Guest Configuration](../governance/policy/concepts/guest-configuration.md). @@ -1014,7 +1093,6 @@ The recommendations listed below are being moved to the **Implement security bes Learn which recommendations are in each security control in [Security controls and their recommendations](secure-score-security-controls.md#security-controls-and-their-recommendations). - ### 11 Azure Defender alerts deprecated The 11 Azure Defender alerts listed below have been deprecated. @@ -1026,7 +1104,6 @@ The 11 Azure Defender alerts listed below have been deprecated. | ARM_MicroBurstDomainInfo | PREVIEW - MicroBurst toolkit "Get-AzureDomainInfo" function run detected | | ARM_MicroBurstRunbook | PREVIEW - MicroBurst toolkit "Get-AzurePasswords" function run detected | - - These nine alerts relate to an Azure Active Directory Identity Protection connector (IPC) that has already been deprecated: | AlertType | AlertDisplayName | @@ -1041,11 +1118,10 @@ The 11 Azure Defender alerts listed below have been deprecated. | LeakedCredentials | Azure AD threat intelligence | | AADAI | Azure AD AI | - > [!TIP] > These nine IPC alerts were never Security Center alerts. They’re part of the Azure Active Directory (AAD) Identity Protection connector (IPC) that was sending them to Security Center. For the last two years, the only customers who’ve been seeing those alerts are organizations who configured the export (from the connector to ASC) in 2019 or earlier. AAD IPC has continued to show them in its own alerts systems and they’ve continued to be available in Azure Sentinel. The only change is that they’re no longer appearing in Security Center. -### Two recommendations from "Apply system updates" security control were deprecated +### Two recommendations from "Apply system updates" security control were deprecated The following two recommendations were deprecated and the changes might result in a slight impact on your secure score: @@ -1060,8 +1136,7 @@ Learn more about these recommendations in the [security recommendations referenc The Azure Defender dashboard's coverage area includes tiles for the relevant Azure Defender plans for your environment. Due to an issue with the reporting of the numbers of protected and unprotected resources, we've decided to temporarily remove the resource coverage status for **Azure Defender for SQL on machines** until the issue is resolved. - -### 21 recommendations moved between security controls +### 21 recommendations moved between security controls The following recommendations were moved to different security controls. Security controls are logical groups of related security recommendations, and reflect your vulnerable attack surfaces. This move ensures that each of these recommendations is in the most appropriate control to meet its objective. @@ -1072,8 +1147,6 @@ Learn which recommendations are in each security control in [Security controls a |Vulnerability assessment should be enabled on your SQL servers
                  Vulnerability assessment should be enabled on your SQL managed instances
                  Vulnerabilities on your SQL databases should be remediated new
                  Vulnerabilities on your SQL databases in VMs should be remediated |Moving from Remediate vulnerabilities (worth 6 points)
                  to Remediate security configurations (worth 4 points).
                  Depending on your environment, these recommendations will have a reduced impact on your score.| |There should be more than one owner assigned to your subscription
                  Automation account variables should be encrypted
                  IoT Devices - Auditd process stopped sending events
                  IoT Devices - Operating system baseline validation failure
                  IoT Devices - TLS cipher suite upgrade needed
                  IoT Devices - Open Ports On Device
                  IoT Devices - Permissive firewall policy in one of the chains was found
                  IoT Devices - Permissive firewall rule in the input chain was found
                  IoT Devices - Permissive firewall rule in the output chain was found
                  Diagnostic logs in IoT Hub should be enabled
                  IoT Devices - Agent sending underutilized messages
                  IoT Devices - Default IP Filter Policy should be Deny
                  IoT Devices - IP Filter rule large IP range
                  IoT Devices - Agent message intervals and size should be adjusted
                  IoT Devices - Identical Authentication Credentials
                  IoT Devices - Audited process stopped sending events
                  IoT Devices - Operating system (OS) baseline configuration should be fixed|Moving to **Implement security best practices**.
                  When a recommendation moves to the Implement security best practices security control, which is worth no points, the recommendation no longer affects your secure score.| - - ## March 2021 Updates in March include: @@ -1087,10 +1160,9 @@ Updates in March include: - [Two legacy recommendations no longer write data directly to Azure activity log](#two-legacy-recommendations-no-longer-write-data-directly-to-azure-activity-log) - [Recommendations page enhancements](#recommendations-page-enhancements) - ### Azure Firewall management integrated into Security Center -When you open Azure Security Center, the first page to appear is the overview page. +When you open Azure Security Center, the first page to appear is the overview page. This interactive dashboard provides a unified view into the security posture of your hybrid cloud workloads. Additionally, it shows security alerts, coverage information, and more. @@ -1100,7 +1172,6 @@ Learn more about this dashboard in [Azure Security Center's overview page](overv :::image type="content" source="media/release-notes/overview-dashboard-firewall-manager.png" alt-text="Security Center's overview dashboard with a tile for Azure Firewall"::: - ### SQL vulnerability assessment now includes the "Disable rule" experience (preview) Security Center includes a built-in vulnerability scanner to help you discover, track, and remediate potential database vulnerabilities. The results from your assessment scans provide an overview of your SQL machines' security state, and details of any security findings. @@ -1109,8 +1180,6 @@ If you have an organizational need to ignore a finding, rather than remediate it Learn more in [Disable specific findings](defender-for-sql-on-machines-vulnerability-assessment.md#disable-specific-findings). - - ### Azure Monitor Workbooks integrated into Security Center and three templates provided As part of Ignite Spring 2021, we announced an integrated Azure Monitor Workbooks experience in Security Center. @@ -1127,10 +1196,9 @@ Learn about using these reports or building your own in [Create rich, interactiv :::image type="content" source="media/custom-dashboards-azure-workbooks/secure-score-over-time-snip.png" alt-text="Secure score over time report."::: - ### Regulatory compliance dashboard now includes Azure Audit reports (preview) -From the regulatory compliance dashboard's toolbar, you can now download Azure and Dynamics certification reports. +From the regulatory compliance dashboard's toolbar, you can now download Azure and Dynamics certification reports. :::image type="content" source="media/release-notes/audit-reports-regulatory-compliance-dashboard.png" alt-text="Regulatory compliance dashboard's toolbar"::: @@ -1140,8 +1208,6 @@ Learn more about [Managing the standards in your regulatory compliance dashboard :::image type="content" source="media/release-notes/audit-reports-list-regulatory-compliance-dashboard.png" alt-text="Filtering the list of available Azure Audit reports."::: - - ### Recommendation data can be viewed in Azure Resource Graph with "Explore in ARG" The recommendation details pages now include the "Explore in ARG" toolbar button. Use this button to open an Azure Resource Graph query and explore, export, and share the recommendation's data. @@ -1152,7 +1218,6 @@ Learn more about [Azure Resource Graph (ARG)](../governance/resource-graph/index :::image type="content" source="media/release-notes/explore-in-resource-graph.png" alt-text="Explore recommendation data in Azure Resource Graph."::: - ### Updates to the policies for deploying workflow automation Automating your organization's monitoring and incident response processes can greatly improve the time it takes to investigate and mitigate security incidents. @@ -1165,7 +1230,6 @@ We provide three Azure Policy 'DeployIfNotExist' policies that create and config |Workflow automation for security recommendations|[Deploy Workflow Automation for Azure Security Center recommendations](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f73d6ab6c-2475-4850-afd6-43795f3492ef)|73d6ab6c-2475-4850-afd6-43795f3492ef| |Workflow automation for regulatory compliance changes|[Deploy Workflow Automation for Azure Security Center regulatory compliance](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f73d6ab6c-509122b9-ddd9-47ba-a5f1-d0dac20be63c)|509122b9-ddd9-47ba-a5f1-d0dac20be63c| - There are two updates to the features of these policies: - When assigned, they will remain enabled by enforcement. @@ -1175,21 +1239,20 @@ Get started with [workflow automation templates](https://github.com/Azure/Azure- Learn more about how to [Automate responses to Security Center triggers](workflow-automation.md). - -### Two legacy recommendations no longer write data directly to Azure activity log +### Two legacy recommendations no longer write data directly to Azure activity log Security Center passes the data for almost all security recommendations to Azure Advisor, which in turn, writes it to [Azure activity log](../azure-monitor/essentials/activity-log.md). For two recommendations, the data is simultaneously written directly to Azure activity log. With this change, Security Center stops writing data for these legacy security recommendations directly to activity Log. Instead, we're exporting the data to Azure Advisor as we do for all the other recommendations. The two legacy recommendations are: + - Endpoint protection health issues should be resolved on your machines - Vulnerabilities in security configuration on your machines should be remediated If you've been accessing information for these two recommendations in activity log's "Recommendation of type TaskDiscovery" category, this is no longer available. - -### Recommendations page enhancements +### Recommendations page enhancements We've released an improved version of the recommendations list to present more information at a glance. @@ -1217,7 +1280,6 @@ Updates in February include: - [Workflow automations can be triggered by changes to regulatory compliance assessments (in preview)](#workflow-automations-can-be-triggered-by-changes-to-regulatory-compliance-assessments-in-preview) - [Asset inventory page enhancements](#asset-inventory-page-enhancements) - ### New security alerts page in the Azure portal released for general availability (GA) Azure Security Center's security alerts page has been redesigned to provide: @@ -1233,7 +1295,6 @@ Azure Security Center's security alerts page has been redesigned to provide: :::image type="content" source="media/managing-and-responding-alerts/alerts-page.png" alt-text="Azure Security Center's security alerts list"::: - ### Kubernetes workload protection recommendations released for general availability (GA) We're happy to announce the general availability (GA) of the set of recommendations for Kubernetes workload protections. @@ -1249,12 +1310,11 @@ Learn more in [Workload protection best-practices using Kubernetes admission con > [!NOTE] > While the recommendations were in preview, they didn't render an AKS cluster resource unhealthy, and they weren't included in the calculations of your secure score. with this GA announcement these will be included in the score calculation. If you haven't remediated them already, this might result in a slight impact on your secure score. Remediate them wherever possible as described in [Remediate recommendations in Azure Security Center](implement-security-recommendations.md). - ### Microsoft Defender for Endpoint integration with Azure Defender now supports Windows Server 2019 and Windows 10 on Windows Virtual Desktop (in preview) Microsoft Defender for Endpoint is a holistic, cloud delivered endpoint security solution. It provides risk-based vulnerability management and assessment as well as endpoint detection and response (EDR). For a full list of the benefits of using Defender for Endpoint together with Azure Security Center, see [Protect your endpoints with Security Center's integrated EDR solution: Microsoft Defender for Endpoint](integration-defender-for-endpoint.md). -When you enable Azure Defender for Servers running Windows Server, a license for Defender for Endpoint is included with the plan. If you've already enabled Azure Defender for Servers and you have Windows Server 2019 servers in your subscription, they'll automatically receive Defender for Endpoint with this update. No manual action is required. +When you enable Azure Defender for Servers running Windows Server, a license for Defender for Endpoint is included with the plan. If you've already enabled Azure Defender for Servers and you have Windows Server 2019 servers in your subscription, they'll automatically receive Defender for Endpoint with this update. No manual action is required. Support has now been expanded to include Windows Server 2019 and Windows 10 on [Windows Virtual Desktop](../virtual-desktop/overview.md). @@ -1267,28 +1327,28 @@ When you're reviewing the details of a recommendation, it's often helpful to be :::image type="content" source="media/release-notes/view-policy-definition.png" alt-text="Link to Azure Policy page for the specific policy supporting a recommendation."::: -Use this link to view the policy definition and review the evaluation logic. +Use this link to view the policy definition and review the evaluation logic. If you're reviewing the list of recommendations on our [Security recommendations reference guide](recommendations-reference.md), you'll also see links to the policy definition pages: :::image type="content" source="media/release-notes/view-policy-definition-from-documentation.png" alt-text="Accessing the Azure Policy page for a specific policy directly from the Azure Security Center recommendations reference page." lightbox="media/release-notes/view-policy-definition-from-documentation.png"::: - ### SQL data classification recommendation no longer affects your secure score + The recommendation **Sensitive data in your SQL databases should be classified** no longer affects your secure score. This is the only recommendation in the **Apply data classification** security control, so that control now has a secure score value of 0. For a full list of all security controls in Security Center, together with their scores and a list of the recommendations in each, see [Security controls and their recommendations](secure-score-security-controls.md#security-controls-and-their-recommendations). - ### Workflow automations can be triggered by changes to regulatory compliance assessments (in preview) + We've added a third data type to the trigger options for your workflow automations: changes to regulatory compliance assessments. Learn how to use the workflow automation tools in [Automate responses to Security Center triggers](workflow-automation.md). :::image type="content" source="media/release-notes/regulatory-compliance-triggers-workflow-automation.png" alt-text="Using changes to regulatory compliance assessments to trigger a workflow automation." lightbox="media/release-notes/regulatory-compliance-triggers-workflow-automation.png"::: - ### Asset inventory page enhancements + Security Center's asset inventory page has been improved in the following ways: - Summaries at the top of the page now include **Unregistered subscriptions**, showing the number of subscriptions without Security Center enabled. @@ -1296,18 +1356,16 @@ Security Center's asset inventory page has been improved in the following ways: :::image type="content" source="media/release-notes/unregistered-subscriptions.png" alt-text="Count of unregistered subscriptions in the summaries at the top of the asset inventory page."::: - Filters have been expanded and enhanced to include: - - **Counts** - Each filter presents the number of resources that meet the criteria of each category + - **Counts** - Each filter presents the number of resources that meet the criteria of each category - :::image type="content" source="media/release-notes/counts-in-inventory-filters.png" alt-text="Counts in the filters in the asset inventory page of Azure Security Center."::: + :::image type="content" source="media/release-notes/counts-in-inventory-filters.png" alt-text="Counts in the filters in the asset inventory page of Azure Security Center."::: - - **Contains exemptions filter** (Optional) - narrow the results to resources that have/haven't got exemptions. This filter isn't shown by default, but is accessible from the **Add filter** button. + - **Contains exemptions filter** (Optional) - narrow the results to resources that have/haven't got exemptions. This filter isn't shown by default, but is accessible from the **Add filter** button. - :::image type="content" source="media/release-notes/adding-contains-exemption-filter.gif" alt-text="Adding the filter 'contains exemption' in Azure Security Center's asset inventory page"::: + :::image type="content" source="media/release-notes/adding-contains-exemption-filter.gif" alt-text="Adding the filter 'contains exemption' in Azure Security Center's asset inventory page"::: Learn more about how to [Explore and manage your resources with asset inventory](asset-inventory.md). - - ## January 2021 Updates in January include: @@ -1325,14 +1383,13 @@ Updates in January include: - ["Not applicable" resources now reported as "Compliant" in Azure Policy assessments](#not-applicable-resources-now-reported-as-compliant-in-azure-policy-assessments) - [Export weekly snapshots of secure score and regulatory compliance data with continuous export (preview)](#export-weekly-snapshots-of-secure-score-and-regulatory-compliance-data-with-continuous-export-preview) - ### Azure Security Benchmark is now the default policy initiative for Azure Security Center Azure Security Benchmark is the Microsoft-authored, Azure-specific set of guidelines for security and compliance best practices based on common compliance frameworks. This widely respected benchmark builds on the controls from the [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/azure/) and the [National Institute of Standards and Technology (NIST)](https://www.nist.gov/) with a focus on cloud-centric security. In recent months, Security Center's list of built-in security recommendations has grown significantly to expand our coverage of this benchmark. -From this release, the benchmark is the foundation for Security Center’s recommendations and fully integrated as the default policy initiative. +From this release, the benchmark is the foundation for Security Center’s recommendations and fully integrated as the default policy initiative. All Azure services have a security baseline page in their documentation. These baselines are built on Azure Security Benchmark. @@ -1340,7 +1397,7 @@ If you're using Security Center's regulatory compliance dashboard, you'll see tw :::image type="content" source="media/release-notes/regulatory-compliance-with-azure-security-benchmark.png" alt-text="Azure Security Center's regulatory compliance dashboard showing the Azure Security Benchmark"::: -Existing recommendations are unaffected and as the benchmark grows, changes will automatically be reflected within Security Center. +Existing recommendations are unaffected and as the benchmark grows, changes will automatically be reflected within Security Center. To learn more, see the following pages: @@ -1368,7 +1425,6 @@ Main capabilities: [Learn more about Azure Arc-enabled servers](../azure-arc/servers/index.yml). - ### Secure score for management groups is now available in preview The secure score page now shows the aggregated secure scores for your management groups in addition to the subscription level. So now you can see the list of management groups in your organization and the score for each management group. @@ -1388,10 +1444,9 @@ Learn about external tools made possible with the secure score API in [the secur Learn more about [secure score and security controls in Azure Security Center](secure-score-security-controls.md). - ### Dangling DNS protections added to Azure Defender for App Service -Subdomain takeovers are a common, high-severity threat for organizations. A subdomain takeover can occur when you have a DNS record that points to a deprovisioned web site. Such DNS records are also known as "dangling DNS" entries. CNAME records are especially vulnerable to this threat. +Subdomain takeovers are a common, high-severity threat for organizations. A subdomain takeover can occur when you have a DNS record that points to a deprovisioned web site. Such DNS records are also known as "dangling DNS" entries. CNAME records are especially vulnerable to this threat. Subdomain takeovers enable threat actors to redirect traffic intended for an organization’s domain to a site performing malicious activity. @@ -1403,7 +1458,6 @@ Learn more: - [Prevent dangling DNS entries and avoid subdomain takeover](../security/fundamentals/subdomain-takeover.md) - Learn about the threat of subdomain takeover and the dangling DNS aspect - [Introduction to Azure Defender for App Service](defender-for-app-service-introduction.md) - ### Multicloud connectors are released for general availability (GA) With cloud workloads commonly spanning multiple cloud platforms, cloud security services must do the same. @@ -1428,10 +1482,10 @@ From Defender for Cloud's menu, select **Multicloud connectors** and you'll see :::image type="content" source="./media/quickstart-onboard-aws/add-aws-account.png" alt-text="Add AWS account button on Security Center's multicloud connectors page"::: Learn more in: + - [Connect your AWS accounts to Azure Security Center](quickstart-onboard-aws.md) - [Connect your GCP projects to Azure Security Center](quickstart-onboard-gcp.md) - ### Exempt entire recommendations from your secure score for subscriptions and management groups We're expanding the exemption capability to include entire recommendations. Providing further options to fine-tune the security recommendations that Security Center makes for your subscriptions, management group, or resources. @@ -1446,8 +1500,6 @@ With this preview feature, you can now create an exemption for a recommendation Learn more in [Exempting resources and recommendations from your secure score](exempt-resource.md). - - ### Users can now request tenant-wide visibility from their global administrator If a user doesn't have permissions to see Security Center data, they'll now see a link to request permissions from their organization's global administrator. The request includes the role they'd like and the justification for why it's necessary. @@ -1456,10 +1508,9 @@ If a user doesn't have permissions to see Security Center data, they'll now see Learn more in [Request tenant-wide permissions when yours are insufficient](tenant-wide-permissions-management.md#request-tenant-wide-permissions-when-yours-are-insufficient). - ### 35 preview recommendations added to increase coverage of Azure Security Benchmark -[Azure Security Benchmark](/security/benchmark/azure/introduction) is the default policy initiative in Azure Security Center. +[Azure Security Benchmark](/security/benchmark/azure/introduction) is the default policy initiative in Azure Security Center. To increase the coverage of this benchmark, the following 35 preview recommendations have been added to Security Center. @@ -1474,7 +1525,6 @@ To increase the coverage of this benchmark, the following 35 preview recommendat | Protect applications against DDoS attacks | - Web Application Firewall (WAF) should be enabled for Application Gateway
                  - Web Application Firewall (WAF) should be enabled for Azure Front Door Service service | | Restrict unauthorized network access | - Firewall should be enabled on Key Vault
                  - Private endpoint should be configured for Key Vault
                  - App Configuration should use private link
                  - Azure Cache for Redis should reside within a virtual network
                  - Azure Event Grid domains should use private link
                  - Azure Event Grid topics should use private link
                  - Azure Machine Learning workspaces should use private link
                  - Azure SignalR Service should use private link
                  - Azure Spring Cloud should use network injection
                  - Container registries should not allow unrestricted network access
                  - Container registries should use private link
                  - Public network access should be disabled for MariaDB servers
                  - Public network access should be disabled for MySQL servers
                  - Public network access should be disabled for PostgreSQL servers
                  - Storage account should use a private link connection
                  - Storage accounts should restrict network access using virtual network rules
                  - VM Image Builder templates should use private link| - Related links: - [Learn more about Azure Security Benchmark](/security/benchmark/azure/introduction) @@ -1482,29 +1532,24 @@ Related links: - [Learn more about Azure Database for MySQL](../mysql/overview.md) - [Learn more about Azure Database for PostgreSQL](../postgresql/overview.md) +### CSV export of filtered list of recommendations +In November 2020, we added filters to the recommendations page ([Recommendations list now includes filters](release-notes-archive.md#recommendations-list-now-includes-filters)). In December, we expanded those filters ([Recommendations page has new filters for environment, severity, and available responses](release-notes-archive.md#recommendations-page-has-new-filters-for-environment-severity-and-available-responses)). +With this announcement, we're changing the behavior of the **Download to CSV** button so that the CSV export only includes the recommendations currently displayed in the filtered list. -### CSV export of filtered list of recommendations - -In November 2020, we added filters to the recommendations page ([Recommendations list now includes filters](release-notes-archive.md#recommendations-list-now-includes-filters)). In December, we expanded those filters ([Recommendations page has new filters for environment, severity, and available responses](release-notes-archive.md#recommendations-page-has-new-filters-for-environment-severity-and-available-responses)). - -With this announcement, we're changing the behavior of the **Download to CSV** button so that the CSV export only includes the recommendations currently displayed in the filtered list. - -For example, in the image below you can see that the list has been filtered to two recommendations. The CSV file that is generated includes the status details for every resource affected by those two recommendations. +For example, in the image below you can see that the list has been filtered to two recommendations. The CSV file that is generated includes the status details for every resource affected by those two recommendations. :::image type="content" source="media/managing-and-responding-alerts/export-to-csv-with-filters.png" alt-text="Exporting filtered recommendations to a CSV file."::: Learn more in [Security recommendations in Azure Security Center](review-security-recommendations.md). - ### "Not applicable" resources now reported as "Compliant" in Azure Policy assessments Previously, resources that were evaluated for a recommendation and found to be **not applicable** appeared in Azure Policy as "Non-compliant". No user actions could change their state to "Compliant". With this change, they're reported as "Compliant" for improved clarity. The only impact will be seen in Azure Policy where the number of compliant resources will increase. There will be no impact to your secure score in Azure Security Center. - ### Export weekly snapshots of secure score and regulatory compliance data with continuous export (preview) We've added a new preview feature to the [continuous export](continuous-export.md) tools for exporting weekly snapshots of secure score and regulatory compliance data. @@ -1533,12 +1578,11 @@ Updates in December include: - [Recommendations page has new filters for environment, severity, and available responses](#recommendations-page-has-new-filters-for-environment-severity-and-available-responses) - [Continuous export gets new data types and improved deployifnotexist policies](#continuous-export-gets-new-data-types-and-improved-deployifnotexist-policies) - ### Azure Defender for SQL servers on machines is generally available Azure Security Center offers two Azure Defender plans for SQL Servers: -- **Azure Defender for Azure SQL database servers** - defends your Azure-native SQL Servers +- **Azure Defender for Azure SQL database servers** - defends your Azure-native SQL Servers - **Azure Defender for SQL servers on machines** - extends the same protections to your SQL servers in hybrid, multicloud, and on-premises environments With this announcement, **Azure Defender for SQL** now protects your databases and their data wherever they're located. @@ -1551,28 +1595,25 @@ Azure Defender for SQL includes vulnerability assessment capabilities. The vulne Learn more about [Azure Defender for SQL](defender-for-sql-introduction.md). - ### Azure Defender for SQL support for Azure Synapse Analytics dedicated SQL pool is generally available Azure Synapse Analytics (formerly SQL DW) is an analytics service that combines enterprise data warehousing and big data analytics. Dedicated SQL pools are the enterprise data warehousing features of Azure Synapse. Learn more in [What is Azure Synapse Analytics (formerly SQL DW)?](../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md). Azure Defender for SQL protects your dedicated SQL pools with: -- **Advanced threat protection** to detect threats and attacks +- **Advanced threat protection** to detect threats and attacks - **Vulnerability assessment capabilities** to identify and remediate security misconfigurations Azure Defender for SQL's support for Azure Synapse Analytics SQL pools is automatically added to Azure SQL databases bundle in Azure Security Center. You'll find a new “Azure Defender for SQL” tab in your Synapse workspace page in the Azure portal. Learn more about [Azure Defender for SQL](defender-for-sql-introduction.md). - ### Global Administrators can now grant themselves tenant-level permissions -A user with the Azure Active Directory role of **Global Administrator** might have tenant-wide responsibilities, but lack the Azure permissions to view that organization-wide information in Azure Security Center. +A user with the Azure Active Directory role of **Global Administrator** might have tenant-wide responsibilities, but lack the Azure permissions to view that organization-wide information in Azure Security Center. To assign yourself tenant-level permissions, follow the instructions in [Grant tenant-wide permissions to yourself](tenant-wide-permissions-management.md#grant-tenant-wide-permissions-to-yourself). - ### Two new Azure Defender plans: Azure Defender for DNS and Azure Defender for Resource Manager (in preview) We've added two new cloud-native breadth threat protection capabilities for your Azure environment. @@ -1580,15 +1621,14 @@ We've added two new cloud-native breadth threat protection capabilities for your These new protections greatly enhance your resiliency against attacks from threat actors, and significantly increase the number of Azure resources protected by Azure Defender. - **Azure Defender for Resource Manager** - automatically monitors all resource management operations performed in your organization. For more information, see: - - [Introduction to Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) - - [Respond to Azure Defender for Resource Manager alerts](defender-for-resource-manager-usage.md) - - [List of alerts provided by Azure Defender for Resource Manager](alerts-reference.md#alerts-resourcemanager) + - [Introduction to Azure Defender for Resource Manager](defender-for-resource-manager-introduction.md) + - [Respond to Azure Defender for Resource Manager alerts](defender-for-resource-manager-usage.md) + - [List of alerts provided by Azure Defender for Resource Manager](alerts-reference.md#alerts-resourcemanager) - **Azure Defender for DNS** - continuously monitors all DNS queries from your Azure resources. For more information, see: - - [Introduction to Azure Defender for DNS](defender-for-dns-introduction.md) - - [Respond to Azure Defender for DNS alerts](defender-for-dns-usage.md) - - [List of alerts provided by Azure Defender for DNS](alerts-reference.md#alerts-dns) - + - [Introduction to Azure Defender for DNS](defender-for-dns-introduction.md) + - [Respond to Azure Defender for DNS alerts](defender-for-dns-usage.md) + - [List of alerts provided by Azure Defender for DNS](alerts-reference.md#alerts-dns) ### New security alerts page in the Azure portal (preview) @@ -1608,43 +1648,39 @@ To access the new experience, use the 'try it now' link from the banner at the t To create sample alerts from the new alerts experience, see [Generate sample Azure Defender alerts](alert-validation.md#generate-sample-security-alerts). - -### Revitalized Security Center experience in Azure SQL Database & SQL Managed Instance +### Revitalized Security Center experience in Azure SQL Database & SQL Managed Instance The Security Center experience within SQL provides access to the following Security Center and Azure Defender for SQL features: - **Security recommendations** – Security Center periodically analyzes the security state of all connected Azure resources to identify potential security misconfigurations. It then provides recommendations on how to remediate those vulnerabilities and improve organizations’ security posture. - **Security alerts** – a detection service that continuously monitors Azure SQL activities for threats such as SQL injection, brute-force attacks, and privilege abuse. This service triggers detailed and action-oriented security alerts in Security Center and provides options for continuing investigations with Azure Sentinel, Microsoft’s Azure-native SIEM solution. -- **Findings** – a vulnerability assessment service that continuously monitors Azure SQL configurations and helps remediate vulnerabilities. Assessment scans provide an overview of Azure SQL security states together with detailed security findings. +- **Findings** – a vulnerability assessment service that continuously monitors Azure SQL configurations and helps remediate vulnerabilities. Assessment scans provide an overview of Azure SQL security states together with detailed security findings. :::image type="content" source="media/release-notes/microsoft-defender-for-cloud-experience-in-sql.png" alt-text="Azure Security Center's security features for SQL are available from within Azure SQL"::: - ### Asset inventory tools and filters updated The inventory page in Azure Security Center has been refreshed with the following changes: -- **Guides and feedback** added to the toolbar. This opens a pane with links to related information and tools. +- **Guides and feedback** added to the toolbar. This opens a pane with links to related information and tools. - **Subscriptions filter** added to the default filters available for your resources. - **Open query** link for opening the current filter options as an Azure Resource Graph query (formerly called "View in resource graph explorer"). -- **Operator options** for each filter. Now you can choose from more logical operators other than '='. For example, you might want to find all resources with active recommendations whose titles include the string 'encrypt'. +- **Operator options** for each filter. Now you can choose from more logical operators other than '='. For example, you might want to find all resources with active recommendations whose titles include the string 'encrypt'. :::image type="content" source="media/release-notes/inventory-filter-operators.png" alt-text="Controls for the operator option in asset inventory's filters"::: Learn more about inventory in [Explore and manage your resources with asset inventory](asset-inventory.md). - ### Recommendation about web apps requesting SSL certificates no longer part of secure score -The recommendation "Web apps should request an SSL certificate for all incoming requests" has been moved from the security control **Manage access and permissions** (worth a maximum of 4 pts) into **Implement security best practices** (which is worth no points). +The recommendation "Web apps should request an SSL certificate for all incoming requests" has been moved from the security control **Manage access and permissions** (worth a maximum of 4 pts) into **Implement security best practices** (which is worth no points). Ensuring a web app requests a certificate certainly makes it more secure. However, for public-facing web apps it's irrelevant. If you access your site over HTTP and not HTTPS, you will not receive any client certificate. So if your application requires client certificates, you should not allow requests to your application over HTTP. Learn more in [Configure TLS mutual authentication for Azure App Service](../app-service/app-service-web-configure-tls-mutual-auth.md). -With this change, the recommendation is now a recommended best practice that does not impact your score. +With this change, the recommendation is now a recommended best practice that does not impact your score. Learn which recommendations are in each security control in [Security controls and their recommendations](secure-score-security-controls.md#security-controls-and-their-recommendations). - ### Recommendations page has new filters for environment, severity, and available responses Azure Security Center monitors all connected resources and generates security recommendations. Use these recommendations to strengthen your hybrid cloud posture and track compliance with the policies and standards relevant to your organization, industry, and country. @@ -1660,9 +1696,10 @@ The filters added this month provide options to refine the recommendations list - **Response actions** - View recommendations according to the availability of Security Center response options: Fix, Deny, and Enforce > [!TIP] - > The response actions filter replaces the **Quick fix available (Yes/No)** filter. - > + > The response actions filter replaces the **Quick fix available (Yes/No)** filter. + > > Learn more about each of these response options: + > > - [Fix button](implement-security-recommendations.md#fix-button) > - [Prevent misconfigurations with Enforce/Deny recommendations](prevent-misconfigurations.md) @@ -1678,11 +1715,11 @@ These tools have been enhanced and expanded in the following ways: - **Continuous export's deployifnotexist policies enhanced**. The policies now: - - **Check whether the configuration is enabled.** If it isn't, the policy will show as non-compliant and create a compliant resource. Learn more about the supplied Azure Policy templates in the "Deploy at scale with Azure Policy tab" in [Set up a continuous export](continuous-export.md#set-up-a-continuous-export). + - **Check whether the configuration is enabled.** If it isn't, the policy will show as non-compliant and create a compliant resource. Learn more about the supplied Azure Policy templates in the "Deploy at scale with Azure Policy tab" in [Set up a continuous export](continuous-export.md#set-up-a-continuous-export). - - **Support exporting security findings.** When using the Azure Policy templates, you can configure your continuous export to include findings. This is relevant when exporting recommendations that have 'sub' recommendations, like findings from vulnerability assessment scanners or specific system updates for the 'parent' recommendation "System updates should be installed on your machines". - - - **Support exporting secure score data.** + - **Support exporting security findings.** When using the Azure Policy templates, you can configure your continuous export to include findings. This is relevant when exporting recommendations that have 'sub' recommendations, like findings from vulnerability assessment scanners or specific system updates for the 'parent' recommendation "System updates should be installed on your machines". + + - **Support exporting secure score data.** - **Regulatory compliance assessment data added (in preview).** You can now continuously export updates to regulatory compliance assessments, including for any custom initiatives, to a Log Analytics workspace or Event Hubs. This feature is unavailable on national clouds. @@ -1716,7 +1753,6 @@ Preview recommendations don't render a resource unhealthy, and they aren't inclu | Enable auditing and logging | - Diagnostic logs in App Services should be enabled | | Implement security best practices | - Azure Backup should be enabled for virtual machines
                  - Geo-redundant backup should be enabled for Azure Database for MariaDB
                  - Geo-redundant backup should be enabled for Azure Database for MySQL
                  - Geo-redundant backup should be enabled for Azure Database for PostgreSQL
                  - PHP should be updated to the latest version for your API app
                  - PHP should be updated to the latest version for your web app
                  - Java should be updated to the latest version for your API app
                  - Java should be updated to the latest version for your function app
                  - Java should be updated to the latest version for your web app
                  - Python should be updated to the latest version for your API app
                  - Python should be updated to the latest version for your function app
                  - Python should be updated to the latest version for your web app
                  - Audit retention for SQL servers should be set to at least 90 days | - Related links: - [Learn more about Azure Security Benchmark](/security/benchmark/azure/introduction) @@ -1727,10 +1763,9 @@ Related links: - [Learn more about Azure Database for MySQL](../mysql/overview.md) - [Learn more about Azure Database for PostgreSQL](../postgresql/overview.md) - ### NIST SP 800 171 R2 added to Security Center's regulatory compliance dashboard -The NIST SP 800-171 R2 standard is now available as a built-in initiative for use with Azure Security Center's regulatory compliance dashboard. The mappings for the controls are described in [Details of the NIST SP 800-171 R2 Regulatory Compliance built-in initiative](../governance/policy/samples/nist-sp-800-171-r2.md). +The NIST SP 800-171 R2 standard is now available as a built-in initiative for use with Azure Security Center's regulatory compliance dashboard. The mappings for the controls are described in [Details of the NIST SP 800-171 R2 Regulatory Compliance built-in initiative](../governance/policy/samples/nist-sp-800-171-r2.md). To apply the standard to your subscriptions and continuously monitor your compliance status, use the instructions in [Customize the set of standards in your regulatory compliance dashboard](update-regulatory-compliance-packages.md). @@ -1738,7 +1773,6 @@ To apply the standard to your subscriptions and continuously monitor your compli For more information about this compliance standard, see [NIST SP 800-171 R2](https://csrc.nist.gov/publications/detail/sp/800-171/rev-2/final). - ### Recommendations list now includes filters You can now filter the list of security recommendations according to a range of criteria. In the following example, the recommendations list has been filtered to show recommendations that: @@ -1749,10 +1783,9 @@ You can now filter the list of security recommendations according to a range of :::image type="content" source="media/release-notes/recommendations-filters.png" alt-text="Filters for the recommendations list."::: - ### Auto provisioning experience improved and expanded -The auto provisioning feature helps reduce management overhead by installing the required extensions on new - and existing - Azure VMs so they can benefit from Security Center's protections. +The auto provisioning feature helps reduce management overhead by installing the required extensions on new - and existing - Azure VMs so they can benefit from Security Center's protections. As Azure Security Center grows, more extensions have been developed and Security Center can monitor a larger list of resource types. The auto provisioning tools have now been expanded to support other extensions and resource types by leveraging the capabilities of Azure Policy. @@ -1764,7 +1797,6 @@ You can now configure the auto provisioning of: Learn more in [Auto provisioning agents and extensions from Azure Security Center](enable-data-collection.md). - ### Secure score is now available in continuous export (preview) With continuous export of secure score, you can stream changes to your score in real-time to Azure Event Hubs or a Log Analytics workspace. Use this capability to: @@ -1775,7 +1807,6 @@ With continuous export of secure score, you can stream changes to your score in Learn more about how to [Continuously export Security Center data](continuous-export.md). - ### "System updates should be installed on your machines" recommendation now includes subrecommendations The **System updates should be installed on your machines** recommendation has been enhanced. The new version includes subrecommendations for each missing update and brings the following improvements: @@ -1784,7 +1815,7 @@ The **System updates should be installed on your machines** recommendation has b :::image type="content" source="./media/upcoming-changes/system-updates-should-be-installed-subassessment.png" alt-text="Opening one of the subrecommendations in the portal experience for the updated recommendation."::: -- Enriched data for the recommendation from Azure Resource Graph (ARG). ARG is an Azure service that's designed to provide efficient resource exploration. You can use ARG to query at scale across a given set of subscriptions so that you can effectively govern your environment. +- Enriched data for the recommendation from Azure Resource Graph (ARG). ARG is an Azure service that's designed to provide efficient resource exploration. You can use ARG to query at scale across a given set of subscriptions so that you can effectively govern your environment. For Azure Security Center, you can use ARG and the [Kusto Query Language (KQL)](/azure/data-explorer/kusto/query/) to query a wide range of security posture data. @@ -1803,11 +1834,10 @@ You can now see whether or not your subscriptions have the default Security Cent :::image type="content" source="media/release-notes/policy-assignment-info-per-subscription.png" alt-text="The policy management page of Azure Security Center showing the default policy assignments."::: - - ## October 2020 Updates in October include: + - [Vulnerability assessment for on-premise and multicloud machines (preview)](#vulnerability-assessment-for-on-premise-and-multicloud-machines-preview) - [Azure Firewall recommendation added (preview)](#azure-firewall-recommendation-added-preview) - [Authorized IP ranges should be defined on Kubernetes Services recommendation updated with quick fix](#authorized-ip-ranges-should-be-defined-on-kubernetes-services-recommendation-updated-with-quick-fix) @@ -1833,7 +1863,6 @@ Main capabilities: [Learn more about Azure Arc-enabled servers](../azure-arc/servers/index.yml). - ### Azure Firewall recommendation added (preview) A new recommendation has been added to protect all your virtual networks with Azure Firewall. @@ -1842,7 +1871,6 @@ The recommendation, **Virtual networks should be protected by Azure Firewall** a Learn more about [Azure Firewall](https://azure.microsoft.com/services/azure-firewall/). - ### Authorized IP ranges should be defined on Kubernetes Services recommendation updated with quick fix The recommendation **Authorized IP ranges should be defined on Kubernetes Services** now has a quick fix option. @@ -1851,7 +1879,6 @@ For more information about this recommendation and all other Security Center rec :::image type="content" source="./media/release-notes/authorized-ip-ranges-recommendation.png" alt-text="The authorized IP ranges should be defined on Kubernetes Services recommendation with the quick fix option."::: - ### Regulatory compliance dashboard now includes option to remove standards Security Center's regulatory compliance dashboard provides insights into your compliance posture based on how you're meeting specific compliance controls and requirements. @@ -1860,10 +1887,9 @@ The dashboard includes a default set of regulatory standards. If any of the supp Learn more in [Remove a standard from your dashboard](update-regulatory-compliance-packages.md#remove-a-standard-from-your-dashboard). - ### Microsoft.Security/securityStatuses table removed from Azure Resource Graph (ARG) -Azure Resource Graph is a service in Azure that is designed to provide efficient resource exploration with the ability to query at scale across a given set of subscriptions so that you can effectively govern your environment. +Azure Resource Graph is a service in Azure that is designed to provide efficient resource exploration with the ability to query at scale across a given set of subscriptions so that you can effectively govern your environment. For Azure Security Center, you can use ARG and the [Kusto Query Language (KQL)](/azure/data-explorer/kusto/query/) to query a wide range of security posture data. For example: @@ -1901,6 +1927,7 @@ properties: { securitystate: "High" } ``` + Whereas, Microsoft.Security/Assessments will hold a record for each such policy assessment as follows: ``` @@ -1953,13 +1980,14 @@ extract("^(.+)/providers/Microsoft.Security/assessments/.+$",1,id))))) ``` Learn more at the following links: + - [How to create queries with Azure Resource Graph Explorer](../governance/resource-graph/first-query-portal.md) - [Kusto Query Language (KQL)](/azure/data-explorer/kusto/query/) - ## September 2020 Updates in September include: + - [Security Center gets a new look!](#security-center-gets-a-new-look) - [Azure Defender released](#azure-defender-released) - [Azure Defender for Key Vault is generally available](#azure-defender-for-key-vault-is-generally-available) @@ -1977,8 +2005,7 @@ Updates in September include: - [Secure score doesn't include preview recommendations](#secure-score-doesnt-include-preview-recommendations) - [Recommendations now include a severity indicator and the freshness interval](#recommendations-now-include-a-severity-indicator-and-the-freshness-interval) - -### Security Center gets a new look! +### Security Center gets a new look We've released a refreshed UI for Security Center's portal pages. The new pages include a new overview page and dashboards for secure score, asset inventory, and Azure Defender. @@ -1986,10 +2013,9 @@ The redesigned overview page now has a tile for accessing the secure score, asse Learn more about the [overview page](overview-page.md). - ### Azure Defender released -**Azure Defender** is the cloud workload protection platform (CWPP) integrated within Security Center for advanced, intelligent, protection of your Azure and hybrid workloads. It replaces Security Center's standard pricing tier option. +**Azure Defender** is the cloud workload protection platform (CWPP) integrated within Security Center for advanced, intelligent, protection of your Azure and hybrid workloads. It replaces Security Center's standard pricing tier option. When you enable Azure Defender from the **Pricing and settings** area of Azure Security Center, the following Defender plans are all enabled simultaneously and provide comprehensive defenses for the compute, data, and service layers of your environment: @@ -2009,7 +2035,7 @@ With its dedicated dashboard, Azure Defender provides security alerts and advanc ### Azure Defender for Key Vault is generally available -Azure Key Vault is a cloud service that safeguards encryption keys and secrets like certificates, connection strings, and passwords. +Azure Key Vault is a cloud service that safeguards encryption keys and secrets like certificates, connection strings, and passwords. **Azure Defender for Key Vault** provides Azure-native, advanced threat protection for Azure Key Vault, providing an additional layer of security intelligence. By extension, Azure Defender for Key Vault is consequently protecting many of the resources dependent upon your Key Vault accounts. @@ -2019,8 +2045,7 @@ Also, the Key Vault pages in the Azure portal now include a dedicated **Security Learn more in [Azure Defender for Key Vault](defender-for-key-vault-introduction.md). - -### Azure Defender for Storage protection for Files and ADLS Gen2 is generally available +### Azure Defender for Storage protection for Files and ADLS Gen2 is generally available **Azure Defender for Storage** detects potentially harmful activity on your Azure Storage accounts. Your data can be protected whether it's stored as blob containers, file shares, or data lakes. @@ -2030,7 +2055,6 @@ From 1 October 2020, we'll begin charging for protecting resources on these serv Learn more in [Azure Defender for Storage](defender-for-storage-introduction.md). - ### Asset inventory tools are now generally available The asset inventory page of Azure Security Center provides a single page for viewing the security posture of the resources you've connected to Security Center. @@ -2041,8 +2065,6 @@ When any resource has outstanding recommendations, they'll appear in the invento Learn more in [Explore and manage your resources with asset inventory](asset-inventory.md). - - ### Disable a specific vulnerability finding for scans of container registries and virtual machines Azure Defender includes vulnerability scanners to scan images in your Azure Container Registry and your virtual machines. @@ -2058,27 +2080,24 @@ This option is available from the recommendations details pages for: Learn more in [Disable specific findings for your container images](defender-for-containers-usage.md#disable-specific-findings) and [Disable specific findings for your virtual machines](remediate-vulnerability-findings-vm.md#disable-specific-findings). - ### Exempt a resource from a recommendation -Occasionally, a resource will be listed as unhealthy regarding a specific recommendation (and therefore lowering your secure score) even though you feel it shouldn't be. It might have been remediated by a process not tracked by Security Center. Or perhaps your organization has decided to accept the risk for that specific resource. +Occasionally, a resource will be listed as unhealthy regarding a specific recommendation (and therefore lowering your secure score) even though you feel it shouldn't be. It might have been remediated by a process not tracked by Security Center. Or perhaps your organization has decided to accept the risk for that specific resource. In such cases, you can create an exemption rule and ensure that resource isn't listed amongst the unhealthy resources in the future. These rules can include documented justifications as described below. Learn more in [Exempt a resource from recommendations and secure score](exempt-resource.md). - ### AWS and GCP connectors in Security Center bring a multicloud experience With cloud workloads commonly spanning multiple cloud platforms, cloud security services must do the same. Azure Security Center now protects workloads in Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). -Onboarding your AWS and GCP projects into Security Center, integrates AWS Security Hub, GCP Security Command and Azure Security Center. +Onboarding your AWS and GCP projects into Security Center, integrates AWS Security Hub, GCP Security Command and Azure Security Center. Learn more in [Connect your AWS accounts to Azure Security Center](quickstart-onboard-aws.md) and [Connect your GCP projects to Azure Security Center](quickstart-onboard-gcp.md). - ### Kubernetes workload protection recommendation bundle To ensure that Kubernetes workloads are secure by default, Security Center is adding Kubernetes level hardening recommendations, including enforcement options with Kubernetes admission control. @@ -2089,12 +2108,11 @@ For example, you can mandate that privileged containers shouldn't be created, an Learn more in [Workload protection best-practices using Kubernetes admission control](defender-for-containers-introduction.md#hardening). - ### Vulnerability assessment findings are now available in continuous export Use continuous export to stream your alerts and recommendations to Azure Event Hubs, Log Analytics workspaces, or Azure Monitor. From there, you can integrate this data with SIEMs (such as Azure Sentinel, Power BI, Azure Data Explorer, and more. -Security Center's integrated vulnerability assessment tools return findings about your resources as actionable recommendations within a 'parent' recommendation such as "Vulnerabilities in your virtual machines should be remediated". +Security Center's integrated vulnerability assessment tools return findings about your resources as actionable recommendations within a 'parent' recommendation such as "Vulnerabilities in your virtual machines should be remediated". The security findings are now available for export through continuous export when you select recommendations and enable the **include security findings** option. @@ -2108,7 +2126,7 @@ Related pages: ### Prevent security misconfigurations by enforcing recommendations when creating new resources -Security misconfigurations are a major cause of security incidents. Security Center now has the ability to help *prevent* misconfigurations of new resources with regard to specific recommendations. +Security misconfigurations are a major cause of security incidents. Security Center now has the ability to help *prevent* misconfigurations of new resources with regard to specific recommendations. This feature can help keep your workloads secure and stabilize your secure score. @@ -2117,12 +2135,12 @@ Enforcing a secure configuration, based on a specific recommendation, is offered - Using the **Deny** effect of Azure Policy, you can stop unhealthy resources from being created - Using the **Enforce** option, you can take advantage of Azure Policy's **DeployIfNotExist** effect and automatically remediate non-compliant resources upon creation - + This is available for selected security recommendations and can be found at the top of the resource details page. Learn more in [Prevent misconfigurations with Enforce/Deny recommendations](prevent-misconfigurations.md). -### Network security group recommendations improved +### Network security group recommendations improved The following security recommendations related to network security groups have been improved to reduce some instances of false positives. @@ -2131,7 +2149,6 @@ The following security recommendations related to network security groups have b - Internet-facing virtual machines should be protected with Network Security Groups - Subnets should be associated with a Network Security Group - ### Deprecated preview AKS recommendation "Pod Security Policies should be defined on Kubernetes Services" The preview recommendation "Pod Security Policies should be defined on Kubernetes Services" is being deprecated as described in the [Azure Kubernetes Service](../aks/use-pod-security-policies.md) documentation. @@ -2140,10 +2157,9 @@ The pod security policy (preview) feature, is set for deprecation and will no lo After pod security policy (preview) is deprecated, you must disable the feature on any existing clusters using the deprecated feature to perform future cluster upgrades and stay within Azure support. - ### Email notifications from Azure Security Center improved -The following areas of the emails regarding security alerts have been improved: +The following areas of the emails regarding security alerts have been improved: - Added the ability to send email notifications about alerts for all severity levels - Added the ability to notify users with different Azure roles on the subscription @@ -2152,8 +2168,7 @@ The following areas of the emails regarding security alerts have been improved: Learn more in [Set up email notifications for security alerts](configure-email-notifications.md). - -### Secure score doesn't include preview recommendations +### Secure score doesn't include preview recommendations Security Center continually assesses your resources, subscriptions, and organization for security issues. It then aggregates all the findings into a single score so that you can tell, at a glance, your current security situation: the higher the score, the lower the identified risk level. @@ -2167,14 +2182,12 @@ An example of a preview recommendation: [Learn more about secure score](secure-score-security-controls.md). - ### Recommendations now include a severity indicator and the freshness interval The details page for recommendations now includes a freshness interval indicator (whenever relevant) and a clear display of the severity of the recommendation. :::image type="content" source="./media/release-notes/recommendations-severity-freshness-indicators.png" alt-text="Recommendation page showing freshness and severity."::: - ## August 2020 Updates in August include: @@ -2185,7 +2198,6 @@ Updates in August include: - [Vulnerability assessment on VMs - recommendations and policies consolidated](#vulnerability-assessment-on-vms---recommendations-and-policies-consolidated) - [New AKS security policies added to ASC_default initiative – for use by private preview customers only](#new-aks-security-policies-added-to-asc_default-initiative--for-use-by-private-preview-customers-only) - ### Asset inventory - powerful new view of the security posture of your assets Security Center's asset inventory (currently in preview) provides a way to view the security posture of the resources you've connected to Security Center. @@ -2196,29 +2208,26 @@ You can use the view and its filters to explore your security posture data and t Learn more about [asset inventory](asset-inventory.md). - ### Added support for Azure Active Directory security defaults (for multi-factor authentication) Security Center has added full support for [security defaults](../active-directory/fundamentals/concept-fundamentals-security-defaults.md), Microsoft's free identity security protections. Security defaults provide preconfigured identity security settings to defend your organization from common identity-related attacks. Security defaults already protecting more than 5 million tenants overall; 50,000 tenants are also protected by Security Center. -Security Center now provides a security recommendation whenever it identifies an Azure subscription without security defaults enabled. Until now, Security Center recommended enabling multi-factor authentication using conditional access, which is part of the Azure Active Directory (AD) premium license. For customers using Azure AD free, we now recommend enabling security defaults. +Security Center now provides a security recommendation whenever it identifies an Azure subscription without security defaults enabled. Until now, Security Center recommended enabling multi-factor authentication using conditional access, which is part of the Azure Active Directory (AD) premium license. For customers using Azure AD free, we now recommend enabling security defaults. Our goal is to encourage more customers to secure their cloud environments with MFA, and mitigate one of the highest risks that is also the most impactful to your [secure score](secure-score-security-controls.md). Learn more about [security defaults](../active-directory/fundamentals/concept-fundamentals-security-defaults.md). - ### Service principals recommendation added A new recommendation has been added to recommend that Security Center customers using management certificates to manage their subscriptions switch to service principals. -The recommendation, **Service principals should be used to protect your subscriptions instead of Management Certificates** advises you to use Service Principals or Azure Resource Manager to more securely manage your subscriptions. +The recommendation, **Service principals should be used to protect your subscriptions instead of Management Certificates** advises you to use Service Principals or Azure Resource Manager to more securely manage your subscriptions. Learn more about [Application and service principal objects in Azure Active Directory](../active-directory/develop/app-objects-and-service-principals.md#service-principal-object). - ### Vulnerability assessment on VMs - recommendations and policies consolidated Security Center inspects your VMs to detect whether they're running a vulnerability assessment solution. If no vulnerability assessment solution is found, Security Center provides a recommendation to simplify the deployment. @@ -2232,7 +2241,6 @@ To ensure a consistent experience for all users, regardless of the scanner type |**A vulnerability assessment solution should be enabled on your virtual machines**|Replaces the following two recommendations:
                  ***** Enable the built-in vulnerability assessment solution on virtual machines (powered by Qualys (now deprecated) (Included with standard tier)
                  ***** Vulnerability assessment solution should be installed on your virtual machines (now deprecated) (Standard and free tiers)| |**Vulnerabilities in your virtual machines should be remediated**|Replaces the following two recommendations:
                  ***** Remediate vulnerabilities found on your virtual machines (powered by Qualys) (now deprecated)
                  ***** Vulnerabilities should be remediated by a Vulnerability Assessment solution (now deprecated)| - Now you'll use the same recommendation to deploy Security Center's vulnerability assessment extension or a privately licensed solution ("BYOL") from a partner such as Qualys or Rapid7. Also, when vulnerabilities are found and reported to Security Center, a single recommendation will alert you to the findings regardless of the vulnerability assessment solution that identified them. @@ -2250,15 +2258,11 @@ If you have scripts, queries, or automations referring to the previous recommend |**Vulnerability assessment solution should be installed on your virtual machines**
                  Key: 01b1ed4c-b733-4fee-b145-f23236e70cf3|BYOL| |**Vulnerabilities should be remediated by a Vulnerability Assessment solution**
                  Key: 71992a2a-d168-42e0-b10e-6b45fa2ecddb|BYOL| - - |Policy|Scope| |----|:----| |**Vulnerability assessment should be enabled on virtual machines**
                  Policy ID: 501541f7-f7e7-4cd6-868c-4190fdad3ac9|Built-in| |**Vulnerabilities should be remediated by a vulnerability assessment solution**
                  Policy ID: 760a85ff-6162-42b3-8d70-698e268f648c|BYOL| - - ##### From August 2020 |Recommendation|Scope| @@ -2266,28 +2270,25 @@ If you have scripts, queries, or automations referring to the previous recommend |**A vulnerability assessment solution should be enabled on your virtual machines**
                  Key: ffff0522-1e88-47fc-8382-2a80ba848f5d|Built-in + BYOL| |**Vulnerabilities in your virtual machines should be remediated**
                  Key: 1195afff-c881-495e-9bc5-1486211ae03f|Built-in + BYOL| - |Policy|Scope| |----|:----| |[**Vulnerability assessment should be enabled on virtual machines**](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f501541f7-f7e7-4cd6-868c-4190fdad3ac9)
                  Policy ID: 501541f7-f7e7-4cd6-868c-4190fdad3ac9 |Built-in + BYOL| - - ### New AKS security policies added to ASC_default initiative – for use by private preview customers only To ensure that Kubernetes workloads are secure by default, Security Center is adding Kubernetes level policies and hardening recommendations, including enforcement options with Kubernetes admission control. The early phase of this project includes a private preview and the addition of new (disabled by default) policies to the ASC_default initiative. -You can safely ignore these policies and there will be no impact on your environment. If you'd like to enable them, sign up for the preview at https://aka.ms/SecurityPrP and select from the following options: +You can safely ignore these policies and there will be no impact on your environment. If you'd like to enable them, sign up for the preview at and select from the following options: 1. **Single Preview** – To join only this private preview. Explicitly mention "ASC Continuous Scan" as the preview you would like to join. 1. **Ongoing Program** – To be added to this and future private previews. You'll need to complete a profile and privacy agreement. - ## July 2020 Updates in July include: + - [Vulnerability assessment for virtual machines is now available for non-marketplace images](#vulnerability-assessment-for-virtual-machines-is-now-available-for-non-marketplace-images) - [Threat protection for Azure Storage expanded to include Azure Files and Azure Data Lake Storage Gen2 (preview)](#threat-protection-for-azure-storage-expanded-to-include-azure-files-and-azure-data-lake-storage-gen2-preview) - [Eight new recommendations to enable threat protection features](#eight-new-recommendations-to-enable-threat-protection-features) @@ -2295,12 +2296,9 @@ Updates in July include: - [Adaptive application controls updated with a new recommendation and support for wildcards in path rules](#adaptive-application-controls-updated-with-a-new-recommendation-and-support-for-wildcards-in-path-rules) - [Six policies for SQL advanced data security deprecated](#six-policies-for-sql-advanced-data-security-deprecated) - - - ### Vulnerability assessment for virtual machines is now available for non-marketplace images -When deploying a vulnerability assessment solution, Security Center previously performed a validation check before deploying. The check was to confirm a marketplace SKU of the destination virtual machine. +When deploying a vulnerability assessment solution, Security Center previously performed a validation check before deploying. The check was to confirm a marketplace SKU of the destination virtual machine. From this update, the check has been removed and you can now deploy vulnerability assessment tools to 'custom' Windows and Linux machines. Custom images are ones that you've modified from the marketplace defaults. @@ -2310,16 +2308,12 @@ Learn more about the [integrated vulnerability scanner for virtual machines (req Learn more about using your own privately-licensed vulnerability assessment solution from Qualys or Rapid7 in [Deploying a partner vulnerability scanning solution](deploy-vulnerability-assessment-vm.md). - ### Threat protection for Azure Storage expanded to include Azure Files and Azure Data Lake Storage Gen2 (preview) -Threat protection for Azure Storage detects potentially harmful activity on your Azure Storage accounts. Security Center displays alerts when it detects attempts to access or exploit your storage accounts. +Threat protection for Azure Storage detects potentially harmful activity on your Azure Storage accounts. Security Center displays alerts when it detects attempts to access or exploit your storage accounts. Your data can be protected whether it's stored as blob containers, file shares, or data lakes. - - - ### Eight new recommendations to enable threat protection features Eight new recommendations have been added to provide a simple way to enable Azure Security Center's threat protection features for the following resource types: virtual machines, App Service plans, Azure SQL Database servers, SQL servers on machines, Azure Storage accounts, Azure Kubernetes Service clusters, Azure Container Registry registries, and Azure Key Vault vaults. @@ -2337,25 +2331,22 @@ The new recommendations are: These new recommendations belong to the **Enable Azure Defender** security control. -The recommendations also include the quick fix capability. +The recommendations also include the quick fix capability. > [!IMPORTANT] > Remediating any of these recommendations will result in charges for protecting the relevant resources. These charges will begin immediately if you have related resources in the current subscription. Or in the future, if you add them at a later date. -> +> > For example, if you don't have any Azure Kubernetes Service clusters in your subscription and you enable the threat protection, no charges will be incurred. If, in the future, you add a cluster on the same subscription, it will automatically be protected and charges will begin at that time. Learn more about each of these in the [security recommendations reference page](recommendations-reference.md). Learn more about [threat protection in Azure Security Center](azure-defender.md). - - - ### Container security improvements - faster registry scanning and refreshed documentation As part of the continuous investments in the container security domain, we are happy to share a significant performance improvement in Security Center's dynamic scans of container images stored in Azure Container Registry. Scans now typically complete in approximately two minutes. In some cases, they might take up to 15 minutes. -To improve the clarity and guidance regarding Azure Security Center's container security capabilities, we've also refreshed the container security documentation pages. +To improve the clarity and guidance regarding Azure Security Center's container security capabilities, we've also refreshed the container security documentation pages. Learn more about Security Center's container security in the following articles: @@ -2366,25 +2357,20 @@ Learn more about Security Center's container security in the following articles: - [Security alerts from the threat protection features for Azure Kubernetes Service clusters](alerts-reference.md#alerts-k8scluster) - [Security recommendations for containers](recommendations-reference.md#recs-compute) - - ### Adaptive application controls updated with a new recommendation and support for wildcards in path rules The adaptive application controls feature has received two significant updates: -* A new recommendation identifies potentially legitimate behavior that hasn't previously been allowed. The new recommendation, **Allowlist rules in your adaptive application control policy should be updated**, prompts you to add new rules to the existing policy to reduce the number of false positives in adaptive application controls violation alerts. +- A new recommendation identifies potentially legitimate behavior that hasn't previously been allowed. The new recommendation, **Allowlist rules in your adaptive application control policy should be updated**, prompts you to add new rules to the existing policy to reduce the number of false positives in adaptive application controls violation alerts. -* Path rules now support wildcards. From this update, you can configure allowed path rules using wildcards. There are two supported scenarios: +- Path rules now support wildcards. From this update, you can configure allowed path rules using wildcards. There are two supported scenarios: - * Using a wildcard at the end of a path to allow all executables within this folder and sub-folders - - * Using a wildcard in the middle of a path to enable a known executable name with a changing folder name (e.g. personal user folders with a known executable, automatically generated folder names, etc.). + - Using a wildcard at the end of a path to allow all executables within this folder and sub-folders + - Using a wildcard in the middle of a path to enable a known executable name with a changing folder name (e.g. personal user folders with a known executable, automatically generated folder names, etc.). [Learn more about adaptive application controls](adaptive-application-controls.md). - - ### Six policies for SQL advanced data security deprecated Six policies related to advanced data security for SQL machines are being deprecated: @@ -2398,8 +2384,6 @@ Six policies related to advanced data security for SQL machines are being deprec Learn more about [built-in policies](./policy-reference.md). - - ## June 2020 Updates in June include: @@ -2411,8 +2395,6 @@ Updates in June include: - [New recommendation for using NSGs to protect non-internet-facing virtual machines](#new-recommendation-for-using-nsgs-to-protect-non-internet-facing-virtual-machines) - [New policies for enabling threat protection and advanced data security](#new-policies-for-enabling-threat-protection-and-advanced-data-security) - - ### Secure score API (preview) You can now access your score via the [secure score API](/rest/api/securitycenter/securescores/) (currently in preview). The API methods provide the flexibility to query the data and build your own reporting mechanism of your secure scores over time. For example, you can use the **Secure Scores** API to get the score for a specific subscription. In addition, you can use the **Secure Score Controls** API to list the security controls and the current score of your subscriptions. @@ -2421,8 +2403,6 @@ For examples of external tools made possible with the secure score API, see [the Learn more about [secure score and security controls in Azure Security Center](secure-score-security-controls.md). - - ### Advanced data security for SQL machines (Azure, other clouds, and on-premises) (preview) Azure Security Center's advanced data security for SQL machines now protects SQL Servers hosted in Azure, on other cloud environments, and even on-premises machines. This extends the protections for your Azure-native SQL Servers to fully support hybrid environments. @@ -2437,8 +2417,6 @@ Set up involves two steps: Learn more about [advanced data security for SQL machines](defender-for-sql-usage.md). - - ### Two new recommendations to deploy the Log Analytics agent to Azure Arc machines (preview) Two new recommendations have been added to help deploy the [Log Analytics Agent](../azure-monitor/agents/log-analytics-agent.md) to your Azure Arc machines and ensure they're protected by Azure Security Center: @@ -2448,7 +2426,7 @@ Two new recommendations have been added to help deploy the [Log Analytics Agent] These new recommendations will appear in the same four security controls as the existing (related) recommendation, **Monitoring agent should be installed on your machines**: remediate security configurations, apply adaptive application control, apply system updates, and enable endpoint protection. -The recommendations also include the Quick fix capability to help speed up the deployment process. +The recommendations also include the Quick fix capability to help speed up the deployment process. Learn more about these two new recommendations in the [Compute and app recommendations](recommendations-reference.md#recs-compute) table. @@ -2456,7 +2434,6 @@ Learn more about how Azure Security Center uses the agent in [What is the Log An Learn more about [extensions for Azure Arc machines](../azure-arc/servers/manage-vm-extensions.md). - ### New policies to create continuous export and workflow automation configurations at scale Automating your organization's monitoring and incident response processes can greatly improve the time it takes to investigate and mitigate security incidents. @@ -2465,7 +2442,6 @@ To deploy your automation configurations across your organization, use these bui The policy definitions can be found in Azure Policy: - |Goal |Policy |Policy ID | |---------|---------|---------| |Continuous export to Event Hubs|[Deploy export to Event Hubs for Azure Security Center alerts and recommendations](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2fcdfcce10-4578-4ecd-9703-530938e4abcb)|cdfcce10-4578-4ecd-9703-530938e4abcb| @@ -2473,12 +2449,10 @@ The policy definitions can be found in Azure Policy: |Workflow automation for security alerts|[Deploy Workflow Automation for Azure Security Center alerts](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2ff1525828-9a90-4fcf-be48-268cdd02361e)|f1525828-9a90-4fcf-be48-268cdd02361e| |Workflow automation for security recommendations|[Deploy Workflow Automation for Azure Security Center recommendations](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f73d6ab6c-2475-4850-afd6-43795f3492ef)|73d6ab6c-2475-4850-afd6-43795f3492ef| - Get started with [workflow automation templates](https://github.com/Azure/Azure-Security-Center/tree/master/Workflow%20automation). Learn more about using the two export policies in [Configure workflow automation at scale using the supplied policies](workflow-automation.md#configure-workflow-automation-at-scale-using-the-supplied-policies) and [Set up a continuous export](continuous-export.md#set-up-a-continuous-export). - ### New recommendation for using NSGs to protect non-internet-facing virtual machines The "implement security best practices" security control now includes the following new recommendation: @@ -2489,16 +2463,12 @@ An existing recommendation, **Internet-facing virtual machines should be protect Learn more in the [Network recommendations](recommendations-reference.md#recs-networking) table. - - - ### New policies for enabling threat protection and advanced data security The new policy definitions below were added to the ASC Default initiative and are designed to assist with enabling threat protection or advanced data security for the relevant resource types. The policy definitions can be found in Azure Policy: - | Policy | Policy ID | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------| | [Advanced data security should be enabled on Azure SQL Database servers](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f7fe3b40f-802b-4cdd-8bd4-fd799c948cc2) | 7fe3b40f-802b-4cdd-8bd4-fd799c948cc2 | @@ -2510,13 +2480,12 @@ The policy definitions can be found in Azure Policy: | [Advanced threat protection should be enabled on Azure Kubernetes Service clusters](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f523b5cd1-3e23-492f-a539-13118b6d1e3a) | 523b5cd1-3e23-492f-a539-13118b6d1e3a | | [Advanced threat protection should be enabled on Virtual Machines](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f4da35fc9-c9e7-4960-aec9-797fe7d9051d) | 4da35fc9-c9e7-4960-aec9-797fe7d9051d | - Learn more about [Threat protection in Azure Security Center](azure-defender.md). - ## May 2020 Updates in May include: + - [Alert suppression rules (preview)](#alert-suppression-rules-preview) - [Virtual machine vulnerability assessment is now generally available](#virtual-machine-vulnerability-assessment-is-now-generally-available) - [Changes to just-in-time (JIT) virtual machine (VM) access](#changes-to-just-in-time-jit-virtual-machine-vm-access) @@ -2526,10 +2495,9 @@ Updates in May include: - [Custom policies with custom metadata are now generally available](#custom-policies-with-custom-metadata-are-now-generally-available) - [Crash dump analysis capabilities migrating to fileless attack detection](#crash-dump-analysis-capabilities-migrating-to-fileless-attack-detection) - ### Alert suppression rules (preview) -This new feature (currently in preview) helps reduce alert fatigue. Use rules to automatically hide alerts that are known to be innocuous or related to normal activities in your organization. This lets you focus on the most relevant threats. +This new feature (currently in preview) helps reduce alert fatigue. Use rules to automatically hide alerts that are known to be innocuous or related to normal activities in your organization. This lets you focus on the most relevant threats. Alerts that match your enabled suppression rules will still be generated, but their state will be set to dismissed. You can see the state in the Azure portal or however you access your Security Center security alerts. @@ -2541,12 +2509,11 @@ Suppression rules define the criteria for which alerts should be automatically d Learn more about [suppressing alerts from Azure Security Center's threat protection](alerts-suppression-rules.md). - ### Virtual machine vulnerability assessment is now generally available Security Center's standard tier now includes an integrated vulnerability assessment for virtual machines for no additional fee. This extension is powered by Qualys but reports its findings directly back to Security Center. You don't need a Qualys license or even a Qualys account - everything's handled seamlessly inside Security Center. -The new solution can continuously scan your virtual machines to find vulnerabilities and present the findings in Security Center. +The new solution can continuously scan your virtual machines to find vulnerabilities and present the findings in Security Center. To deploy the solution, use the new security recommendation: @@ -2554,8 +2521,6 @@ To deploy the solution, use the new security recommendation: Learn more about [Security Center's integrated vulnerability assessment for virtual machines](deploy-vulnerability-assessment-vm.md#overview-of-the-integrated-vulnerability-scanner). - - ### Changes to just-in-time (JIT) virtual machine (VM) access Security Center includes an optional feature to protect the management ports of your VMs. This provides a defense against the most common form of brute force attacks. @@ -2568,16 +2533,14 @@ This update brings the following changes to this feature: Learn more about [the JIT access feature](just-in-time-access-usage.md). - ### Custom recommendations have been moved to a separate security control -One security control introduced with the enhanced secure score was "Implement security best practices". Any custom recommendations created for your subscriptions were automatically placed in that control. +One security control introduced with the enhanced secure score was "Implement security best practices". Any custom recommendations created for your subscriptions were automatically placed in that control. To make it easier to find your custom recommendations, we've moved them into a dedicated security control, "Custom recommendations". This control has no impact on your secure score. Learn more about security controls in [Enhanced secure score (preview) in Azure Security Center](secure-score-security-controls.md). - ### Toggle added to view recommendations in controls or as a flat list Security controls are logical groups of related security recommendations. They reflect your vulnerable attack surfaces. A control is a set of security recommendations, with instructions that help you implement those recommendations. @@ -2592,9 +2555,9 @@ Learn more about security controls in [Enhanced secure score (preview) in Azure :::image type="content" source="./media/secure-score-security-controls/recommendations-group-by-toggle.gif" alt-text="Group by controls toggle for recommendations."::: -### Expanded security control "Implement security best practices" +### Expanded security control "Implement security best practices" -One security control introduced with the enhanced secure score is "Implement security best practices". When a recommendation is in this control, it doesn't impact the secure score. +One security control introduced with the enhanced secure score is "Implement security best practices". When a recommendation is in this control, it doesn't impact the secure score. With this update, three recommendations have moved out of the controls in which they were originally placed, and into this best practices control. We've taken this step because we've determined that the risk of these three recommendations is lower than was initially thought. @@ -2616,11 +2579,9 @@ Learn more about Windows Defender Exploit Guard in [Create and deploy an Exploit Learn more about security controls in [Enhanced secure score (preview)](secure-score-security-controls.md). - - ### Custom policies with custom metadata are now generally available -Custom policies are now part of the Security Center recommendations experience, secure score, and the regulatory compliance standards dashboard. This feature is now generally available and allows you to extend your organization's security assessment coverage in Security Center. +Custom policies are now part of the Security Center recommendations experience, secure score, and the regulatory compliance standards dashboard. This feature is now generally available and allows you to extend your organization's security assessment coverage in Security Center. Create a custom initiative in Azure Policy, add policies to it and onboard it to Azure Security Center, and visualize it as recommendations. @@ -2628,34 +2589,27 @@ We've now also added the option to edit the custom recommendation metadata. Meta Learn more about [enhancing your custom recommendations with detailed information](custom-security-policies.md#enhance-your-custom-recommendations-with-detailed-information). - - -### Crash dump analysis capabilities migrating to fileless attack detection +### Crash dump analysis capabilities migrating to fileless attack detection We are integrating the Windows crash dump analysis (CDA) detection capabilities into [fileless attack detection](defender-for-servers-introduction.md#what-are-the-benefits-of-defender-for-servers). Fileless attack detection analytics brings improved versions of the following security alerts for Windows machines: Code injection discovered, Masquerading Windows Module Detected, Shell code discovered, and Suspicious code segment detected. Some of the benefits of this transition: -- **Proactive and timely malware detection** - The CDA approach involved waiting for a crash to occur and then running analysis to find malicious artifacts. Using fileless attack detection brings proactive identification of in-memory threats while they are running. +- **Proactive and timely malware detection** - The CDA approach involved waiting for a crash to occur and then running analysis to find malicious artifacts. Using fileless attack detection brings proactive identification of in-memory threats while they are running. -- **Enriched alerts** - The security alerts from fileless attack detection include enrichments that aren't available from CDA, such as the active network connections information. +- **Enriched alerts** - The security alerts from fileless attack detection include enrichments that aren't available from CDA, such as the active network connections information. - **Alert aggregation** - When CDA detected multiple attack patterns within a single crash dump, it triggered multiple security alerts. Fileless attack detection combines all of the identified attack patterns from the same process into a single alert, removing the need to correlate multiple alerts. - **Reduced requirements on your Log Analytics workspace** - Crash dumps containing potentially sensitive data will no longer be uploaded to your Log Analytics workspace. - - - - - ## April 2020 Updates in April include: + - [Dynamic compliance packages are now generally available](#dynamic-compliance-packages-are-now-generally-available) - [Identity recommendations now included in Azure Security Center free tier](#identity-recommendations-now-included-in-azure-security-center-free-tier) - ### Dynamic compliance packages are now generally available The Azure Security Center regulatory compliance dashboard now includes **dynamic compliance packages** (now generally available) to track additional industry and regulatory standards. @@ -2671,9 +2625,8 @@ Now, you can add standards such as: - **Azure CIS 1.1.0 (new)** (which is a more complete representation of Azure CIS 1.1.0) In addition, we've recently added the [Azure Security Benchmark](/security/benchmark/azure/introduction), the Microsoft-authored Azure-specific guidelines for security and compliance best practices based on common compliance frameworks. Additional standards will be supported in the dashboard as they become available. - -Learn more about [customizing the set of standards in your regulatory compliance dashboard](update-regulatory-compliance-packages.md). +Learn more about [customizing the set of standards in your regulatory compliance dashboard](update-regulatory-compliance-packages.md). ### Identity recommendations now included in Azure Security Center free tier @@ -2691,8 +2644,6 @@ Learn more about [identity and access recommendations](recommendations-reference Learn more about [Managing multi-factor authentication (MFA) enforcement on your subscriptions](multi-factor-authentication-enforcement.md). - - ## March 2020 Updates in March include: @@ -2703,7 +2654,6 @@ Updates in March include: - [Improved just-in-time experience](#improved-just-in-time-experience) - [Two security recommendations for web applications deprecated](#two-security-recommendations-for-web-applications-deprecated) - ### Workflow automation is now generally available The workflow automation feature of Azure Security Center is now generally available. Use it to automatically trigger Logic Apps on security alerts and recommendations. In addition, manual triggers are available for alerts and all recommendations that have the quick fix option available. @@ -2714,7 +2664,6 @@ For more information about the automatic and manual Security Center capabilities Learn more about [creating Logic Apps](../logic-apps/logic-apps-overview.md). - ### Integration of Azure Security Center with Windows Admin Center It's now possible to move your on-premises Windows servers from the Windows Admin Center directly to the Azure Security Center. Security Center then becomes your single pane of glass to view security information for all your Windows Admin Center resources, including on-premises servers, virtual machines, and additional PaaS workloads. @@ -2726,7 +2675,6 @@ After moving a server from Windows Admin Center to Azure Security Center, you'll Learn more about [how to integrate Azure Security Center with Windows Admin Center](windows-admin-center-integration.md). - ### Protection for Azure Kubernetes Service Azure Security Center is expanding its container security features to protect Azure Kubernetes Service (AKS). @@ -2743,20 +2691,18 @@ Learn more about [Azure Kubernetes Services' integration with Security Center](d Learn more about [the container security features in Security Center](defender-for-containers-introduction.md). - ### Improved just-in-time experience -The features, operation, and UI for Azure Security Center's just-in-time tools that secure your management ports have been enhanced as follows: +The features, operation, and UI for Azure Security Center's just-in-time tools that secure your management ports have been enhanced as follows: -- **Justification field** - When requesting access to a virtual machine (VM) through the just-in-time page of the Azure portal, a new optional field is available to enter a justification for the request. Information entered into this field can be tracked in the activity log. -- **Automatic cleanup of redundant just-in-time (JIT) rules** - Whenever you update a JIT policy, a cleanup tool automatically runs to check the validity of your entire ruleset. The tool looks for mismatches between rules in your policy and rules in the NSG. If the cleanup tool finds a mismatch, it determines the cause and, when it's safe to do so, removes built-in rules that aren't needed anymore. The cleaner never deletes rules that you've created. +- **Justification field** - When requesting access to a virtual machine (VM) through the just-in-time page of the Azure portal, a new optional field is available to enter a justification for the request. Information entered into this field can be tracked in the activity log. +- **Automatic cleanup of redundant just-in-time (JIT) rules** - Whenever you update a JIT policy, a cleanup tool automatically runs to check the validity of your entire ruleset. The tool looks for mismatches between rules in your policy and rules in the NSG. If the cleanup tool finds a mismatch, it determines the cause and, when it's safe to do so, removes built-in rules that aren't needed anymore. The cleaner never deletes rules that you've created. Learn more about [the JIT access feature](just-in-time-access-usage.md). - ### Two security recommendations for web applications deprecated -Two security recommendations related to web applications are being deprecated: +Two security recommendations related to web applications are being deprecated: - The rules for web applications on IaaS NSGs should be hardened. (Related policy: The NSGs rules for web applications on IaaS should be hardened) @@ -2768,9 +2714,6 @@ These recommendations will no longer appear in the Security Center list of recom Learn more about [security recommendations](recommendations-reference.md). - - - ## February 2020 ### Fileless attack detection for Linux (preview) @@ -2780,9 +2723,7 @@ As attackers increasing employ stealthier methods to avoid detection, Azure Secu - minimize or eliminate traces of malware on disk - greatly reduce the chances of detection by disk-based malware scanning solutions -To counter this threat, Azure Security Center released fileless attack detection for Windows in October 2018, and has now extended fileless attack detection on Linux as well. - - +To counter this threat, Azure Security Center released fileless attack detection for Windows in October 2018, and has now extended fileless attack detection on Linux as well. ## January 2020 @@ -2794,24 +2735,23 @@ Familiarize yourself with the secure score changes during the preview phase and Learn more about [enhanced secure score (preview)](secure-score-security-controls.md). - - ## November 2019 Updates in November include: - - [Threat Protection for Azure Key Vault in North America regions (preview)](#threat-protection-for-azure-key-vault-in-north-america-regions-preview) - - [Threat Protection for Azure Storage includes Malware Reputation Screening](#threat-protection-for-azure-storage-includes-malware-reputation-screening) - - [Workflow automation with Logic Apps (preview)](#workflow-automation-with-logic-apps-preview) - - [Quick Fix for bulk resources generally available](#quick-fix-for-bulk-resources-generally-available) - - [Scan container images for vulnerabilities (preview)](#scan-container-images-for-vulnerabilities-preview) - - [Additional regulatory compliance standards (preview)](#additional-regulatory-compliance-standards-preview) - - [Threat Protection for Azure Kubernetes Service (preview)](#threat-protection-for-azure-kubernetes-service-preview) - - [Virtual machine vulnerability assessment (preview)](#virtual-machine-vulnerability-assessment-preview) - - [Advanced data security for SQL servers on Azure Virtual Machines (preview)](#advanced-data-security-for-sql-servers-on-azure-virtual-machines-preview) - - [Support for custom policies (preview)](#support-for-custom-policies-preview) - - [Extending Azure Security Center coverage with platform for community and partners](#extending-azure-security-center-coverage-with-platform-for-community-and-partners) - - [Advanced integrations with export of recommendations and alerts (preview)](#advanced-integrations-with-export-of-recommendations-and-alerts-preview) - - [Onboard on-prem servers to Security Center from Windows Admin Center (preview)](#onboard-on-prem-servers-to-security-center-from-windows-admin-center-preview) + +- [Threat Protection for Azure Key Vault in North America regions (preview)](#threat-protection-for-azure-key-vault-in-north-america-regions-preview) +- [Threat Protection for Azure Storage includes Malware Reputation Screening](#threat-protection-for-azure-storage-includes-malware-reputation-screening) +- [Workflow automation with Logic Apps (preview)](#workflow-automation-with-logic-apps-preview) +- [Quick Fix for bulk resources generally available](#quick-fix-for-bulk-resources-generally-available) +- [Scan container images for vulnerabilities (preview)](#scan-container-images-for-vulnerabilities-preview) +- [Additional regulatory compliance standards (preview)](#additional-regulatory-compliance-standards-preview) +- [Threat Protection for Azure Kubernetes Service (preview)](#threat-protection-for-azure-kubernetes-service-preview) +- [Virtual machine vulnerability assessment (preview)](#virtual-machine-vulnerability-assessment-preview) +- [Advanced data security for SQL servers on Azure Virtual Machines (preview)](#advanced-data-security-for-sql-servers-on-azure-virtual-machines-preview) +- [Support for custom policies (preview)](#support-for-custom-policies-preview) +- [Extending Azure Security Center coverage with platform for community and partners](#extending-azure-security-center-coverage-with-platform-for-community-and-partners) +- [Advanced integrations with export of recommendations and alerts (preview)](#advanced-integrations-with-export-of-recommendations-and-alerts-preview) +- [Onboard on-prem servers to Security Center from Windows Admin Center (preview)](#onboard-on-prem-servers-to-security-center-from-windows-admin-center-preview) ### Threat Protection for Azure Key Vault in North America Regions (preview) @@ -2819,12 +2759,10 @@ Azure Key Vault is an essential service for protecting data and improving perfor Azure Security Center's support for Threat Protection for Azure Key Vault provides an additional layer of security intelligence that detects unusual and potentially harmful attempts to access or exploit key vaults. This new layer of protection allows customers to address threats against their key vaults without being a security expert or manage security monitoring systems. The feature is in public preview in North America Regions. - ### Threat Protection for Azure Storage includes Malware Reputation Screening Threat protection for Azure Storage offers new detections powered by Microsoft Threat Intelligence for detecting malware uploads to Azure Storage using hash reputation analysis and suspicious access from an active Tor exit node (an anonymizing proxy). You can now view detected malware across storage accounts using Azure Security Center. - ### Workflow automation with Logic Apps (preview) Organizations with centrally managed security and IT/operations implement internal workflow processes to drive required action within the organization when discrepancies are discovered in their environments. In many cases, these workflows are repeatable processes and automation can greatly streamline processes within the organization. @@ -2835,7 +2773,6 @@ For more information about the automatic and manual Security Center capabilities To learn about creating Logic Apps, see [Azure Logic Apps](../logic-apps/logic-apps-overview.md). - ### Quick Fix for bulk resources generally available With the many tasks that a user is given as part of Secure Score, the ability to effectively remediate issues across a large fleet can become challenging. @@ -2848,7 +2785,6 @@ Quick fix is generally available today customers as part of the Security Center See which recommendations have quick fix enabled in the [reference guide to security recommendations](recommendations-reference.md). - ### Scan container images for vulnerabilities (preview) Azure Security Center can now scan container images in Azure Container Registry for vulnerabilities. @@ -2857,7 +2793,6 @@ The image scanning works by parsing the container image file, then checking to s The scan itself is automatically triggered when pushing new container images to Azure Container Registry. Found vulnerabilities will surface as Security Center recommendations and included in the secure score together with information on how to patch them to reduce the attack surface they allowed. - ### Additional regulatory compliance standards (preview) The Regulatory Compliance dashboard provides insights into your compliance posture based on Security Center assessments. The dashboard shows how your environment complies with controls and requirements designated by specific regulatory standards and industry benchmarks and provides prescriptive recommendations for how to address these requirements. @@ -2866,7 +2801,6 @@ The regulatory compliance dashboard has thus far supported four built-in standar [Learn more about customizing the set of standards in your regulatory compliance dashboard](update-regulatory-compliance-packages.md). - ### Threat Protection for Azure Kubernetes Service (preview) Kubernetes is quickly becoming the new standard for deploying and managing software in the cloud. Few people have extensive experience with Kubernetes and many only focuses on general engineering and administration and overlook the security aspect. Kubernetes environment needs to be configured carefully to be secure, making sure no container focused attack surface doors are not left open is exposed for attackers. Security Center is expanding its support in the container space to one of the fastest growing services in Azure - Azure Kubernetes Service (AKS). @@ -2877,14 +2811,12 @@ The new capabilities in this public preview release include: - **Secure Score recommendations** - Actionable items to help customers comply with security best practices for AKS, and increase their secure score. Recommendations include items such as "Role-based access control should be used to restrict access to a Kubernetes Service Cluster". - **Threat Detection** - Host and cluster-based analytics, such as "A privileged container detected". - ### Virtual machine vulnerability assessment (preview) Applications that are installed in virtual machines could often have vulnerabilities that could lead to a breach of the virtual machine. We are announcing that the Security Center standard tier includes built-in vulnerability assessment for virtual machines for no additional fee. The vulnerability assessment, powered by Qualys in the public preview, will allow you to continuously scan all the installed applications on a virtual machine to find vulnerable applications and present the findings in the Security Center portal's experience. Security Center takes care of all deployment operations so that no extra work is required from the user. Going forward we are planning to provide vulnerability assessment options to support our customers' unique business needs. [Learn more about vulnerability assessments for your Azure Virtual Machines](deploy-vulnerability-assessment-vm.md). - ### Advanced data security for SQL servers on Azure Virtual Machines (preview) Azure Security Center's support for threat protection and vulnerability assessment for SQL DBs running on IaaS VMs is now in preview. @@ -2893,7 +2825,6 @@ Azure Security Center's support for threat protection and vulnerability assessme [Advanced threat protection](/azure/azure-sql/database/threat-detection-overview) detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit your SQL server. It continuously monitors your database for suspicious activities and provides action-oriented security alerts on anomalous database access patterns. These alerts provide the suspicious activity details and recommended actions to investigate and mitigate the threat. - ### Support for custom policies (preview) Azure Security Center now supports custom policies (in preview). @@ -2902,15 +2833,12 @@ Our customers have been wanting to extend their current security assessments cov These new policies will be part of the Security Center recommendations experience, Secure Score, and the regulatory compliance standards dashboard. With the support for custom policies, you're now able to create a custom initiative in Azure Policy, then add it as a policy in Security Center and visualize it as a recommendation. - ### Extending Azure Security Center coverage with platform for community and partners Use Security Center to receive recommendations not only from Microsoft but also from existing solutions from partners such as Check Point, Tenable, and CyberArk with many more integrations coming. Security Center's simple onboarding flow can connect your existing solutions to Security Center, enabling you to view your security posture recommendations in a single place, run unified reports and leverage all of Security Center's capabilities against both built-in and partner recommendations. You can also export Security Center recommendations to partner products. [Learn more about Microsoft Intelligent Security Association](https://www.microsoft.com/security/partnerships/intelligent-security-association). - - ### Advanced integrations with export of recommendations and alerts (preview) In order to enable enterprise level scenarios on top of Security Center, it's now possible to consume Security Center alerts and recommendations in additional places except the Azure portal or API. These can be directly exported to an event hub and to Log Analytics workspaces. Here are a few workflows you can create around these new capabilities: @@ -2918,20 +2846,18 @@ In order to enable enterprise level scenarios on top of Security Center, it's no - With export to Log Analytics workspace, you can create custom dashboards with Power BI. - With export to Event Hubs, you'll be able to export Security Center alerts and recommendations to your third-party SIEMs, to a third-party solution, or Azure Data Explorer. - ### Onboard on-prem servers to Security Center from Windows Admin Center (preview) Windows Admin Center is a management portal for Windows Servers who are not deployed in Azure offering them several Azure management capabilities such as backup and system updates. We have recently added an ability to onboard these non-Azure servers to be protected by ASC directly from the Windows Admin Center experience. With this new experience users will be to onboard a WAC server to Azure Security Center and enable viewing its security alerts and recommendations directly in the Windows Admin Center experience. - ## September 2019 Updates in September include: - - [Managing rules with adaptive application controls improvements](#managing-rules-with-adaptive-application-controls-improvements) - - [Control container security recommendation using Azure Policy](#control-container-security-recommendation-using-azure-policy) +- [Managing rules with adaptive application controls improvements](#managing-rules-with-adaptive-application-controls-improvements) +- [Control container security recommendation using Azure Policy](#control-container-security-recommendation-using-azure-policy) ### Managing rules with adaptive application controls improvements @@ -2939,23 +2865,21 @@ The experience of managing rules for virtual machines using adaptive application [Learn more about adaptive application controls](adaptive-application-controls.md). - ### Control container security recommendation using Azure Policy Azure Security Center's recommendation to remediate vulnerabilities in container security can now be enabled or disabled via Azure Policy. To view your enabled security policies, from Security Center open the Security Policy page. - ## August 2019 Updates in August include: - - [Just-in-time (JIT) VM access for Azure Firewall](#just-in-time-jit-vm-access-for-azure-firewall) - - [Single click remediation to boost your security posture (preview)](#single-click-remediation-to-boost-your-security-posture-preview) - - [Cross-tenant management](#cross-tenant-management) +- [Just-in-time (JIT) VM access for Azure Firewall](#just-in-time-jit-vm-access-for-azure-firewall) +- [Single click remediation to boost your security posture (preview)](#single-click-remediation-to-boost-your-security-posture-preview) +- [Cross-tenant management](#cross-tenant-management) -### Just-in-time (JIT) VM access for Azure Firewall +### Just-in-time (JIT) VM access for Azure Firewall Just-in-time (JIT) VM access for Azure Firewall is now generally available. Use it to secure your Azure Firewall protected environments in addition to your NSG protected environments. @@ -2967,7 +2891,6 @@ Requests are logged in the Azure Activity Log, so you can easily monitor and aud [Learn more about Azure Firewall](../firewall/overview.md). - ### Single click remediation to boost your security posture (preview) Secure score is a tool that helps you assess your workload security posture. It reviews your security recommendations and prioritizes them for you, so you know which recommendations to perform first. This helps you find the most serious security vulnerabilities to prioritize investigation. @@ -2978,27 +2901,24 @@ This operation will allow you to select the resources you want to apply the reme See which recommendations have quick fix enabled in the [reference guide to security recommendations](recommendations-reference.md). - ### Cross-tenant management -Security Center now supports cross-tenant management scenarios as part of Azure Lighthouse. This enables you to gain visibility and manage the security posture of multiple tenants in Security Center. +Security Center now supports cross-tenant management scenarios as part of Azure Lighthouse. This enables you to gain visibility and manage the security posture of multiple tenants in Security Center. [Learn more about cross-tenant management experiences](cross-tenant-management.md). - ## July 2019 ### Updates to network recommendations -Azure Security Center (ASC) has launched new networking recommendations and improved some existing ones. Now, using Security Center ensures even greater networking protection for your resources. +Azure Security Center (ASC) has launched new networking recommendations and improved some existing ones. Now, using Security Center ensures even greater networking protection for your resources. [Learn more about network recommendations](recommendations-reference.md#recs-networking). - ## June 2019 ### Adaptive network hardening - generally available -One of the biggest attack surfaces for workloads running in the public cloud are connections to and from the public Internet. Our customers find it hard to know which Network Security Group (NSG) rules should be in place to make sure that Azure workloads are only available to required source ranges. With this feature, Security Center learns the network traffic and connectivity patterns of Azure workloads and provides NSG rule recommendations, for Internet facing virtual machines. This helps our customer better configure their network access policies and limit their exposure to attacks. +One of the biggest attack surfaces for workloads running in the public cloud are connections to and from the public Internet. Our customers find it hard to know which Network Security Group (NSG) rules should be in place to make sure that Azure workloads are only available to required source ranges. With this feature, Security Center learns the network traffic and connectivity patterns of Azure workloads and provides NSG rule recommendations, for Internet facing virtual machines. This helps our customer better configure their network access policies and limit their exposure to attacks. [Learn more about adaptive network hardening](adaptive-network-hardening.md). diff --git a/articles/defender-for-cloud/release-notes.md b/articles/defender-for-cloud/release-notes.md index f9c0703613551..abde88d225bc3 100644 --- a/articles/defender-for-cloud/release-notes.md +++ b/articles/defender-for-cloud/release-notes.md @@ -2,26 +2,61 @@ title: Release notes for Microsoft Defender for Cloud description: A description of what's new and changed in Microsoft Defender for Cloud ms.topic: reference -ms.date: 05/18/2022 +ms.date: 05/30/2022 --- # What's new in Microsoft Defender for Cloud? Defender for Cloud is in active development and receives improvements on an ongoing basis. To stay up to date with the most recent developments, this page provides you with information about new features, bug fixes, and deprecated functionality. -This page is updated frequently, so revisit it often. +This page is updated frequently, so revisit it often. -To learn about *planned* changes that are coming soon to Defender for Cloud, see [Important upcoming changes to Microsoft Defender for Cloud](upcoming-changes.md). +To learn about *planned* changes that are coming soon to Defender for Cloud, see [Important upcoming changes to Microsoft Defender for Cloud](upcoming-changes.md). > [!TIP] > If you're looking for items older than six months, you'll find them in the [Archive for What's new in Microsoft Defender for Cloud](release-notes-archive.md). +## June 2022 + +Updates in June include: + +- [Drive implementation of security recommendations to enhance your security posture](#drive-implementation-of-security-recommendations-to-enhance-your-security-posture) +- [Filter security alerts by IP address](#filter-security-alerts-by-ip-address) +- [General availability (GA) of Defender for SQL on machines for AWS and GCP environments](#general-availability-ga-of-defender-for-sql-on-machines-for-aws-and-gcp-environments) + +### Drive implementation of security recommendations to enhance your security posture + +Today's increasing threats to organizations stretch the limits of security personnel to protect their expanding workloads. Security teams are challenged to implement the protections defined in their security policies. + +Now with the governance experience, security teams can assign remediation of security recommendations to the resource owners and require a remediation schedule. They can have full transparency into the progress of the remediation and get notified when tasks are overdue. + +Learn more about the governance experience in [Driving your organization to remediate security issues with recommendation governance](governance-rules.md). + +### Filter security alerts by IP address + +In many cases of attacks, you want to track alerts based on the IP address of the entity involved in the attack. Up until now, the IP appeared only in the "Related Entities" section in the single alert blade. Now, you can filter the alerts in the security alerts blade to see the alerts related to the IP address, and you can search for a specific IP address. + +:::image type="content" source="media/release-notes/ip-address-filter-for-alerts.png" alt-text="Screenshot of filter for I P address in Defender for Cloud alerts." lightbox="media/release-notes/ip-address-filter-for-alerts.png"::: + +### General availability (GA) of Defender for SQL on machines for AWS and GCP environments + +The database protection capabilities provided by Microsoft Defender for Cloud, has added support for your SQL servers that are hosted in either AWS or GCP environments. + +Using Defender for SQL, enterprises can now protect their entire database estate, hosted in Azure, AWS, GCP and on-premises machines. + +Microsoft Defender for SQL provides a unified multicloud experience to view security recommendations, security alerts and vulnerability assessment findings for both the SQL server and the underlining Windows OS. + +Using the multicloud onboarding experience, you can enable and enforce databases protection for SQL servers running on AWS EC2, RDS Custom for SQL Server and GCP compute engine. After enabling either of these plans, all supported resources that exist within the subscription are protected. Future resources created on the same subscription will also be protected. + +Learn how to protect and connect your [AWS environment](quickstart-onboard-aws.md) and your [GCP organization](quickstart-onboard-gcp.md) with Microsoft Defender for Cloud. + ## May 2022 Updates in May include: - [Multicloud settings of Servers plan are now available in connector level](#multicloud-settings-of-servers-plan-are-now-available-in-connector-level) - [JIT (Just-in-time) access for VMs is now available for AWS EC2 instances (Preview)](#jit-just-in-time-access-for-vms-is-now-available-for-aws-ec2-instances-preview) +- [Add and remove the Defender profile for AKS clusters using the CLI](#add-and-remove-the-defender-profile-for-aks-clusters-using-the-cli) ### Multicloud settings of Servers plan are now available in connector level @@ -30,7 +65,7 @@ There are now connector-level settings for Defender for Servers in multicloud. The new connector-level settings provide granularity for pricing and auto-provisioning configuration per connector, independently of the subscription. All auto-provisioning components available in the connector level (Azure Arc, MDE, and vulnerability assessments) are enabled by default, and the new configuration supports both [Plan 1 and Plan 2 pricing tiers](defender-for-servers-introduction.md#what-are-the-microsoft-defender-for-server-plans). - + Updates in the UI include a reflection of the selected pricing tier and the required components configured. :::image type="content" source="media/release-notes/main-page.png" alt-text="Screenshot of the main plan page with the Server plan multicloud settings." lightbox="media/release-notes/main-page.png"::: @@ -49,10 +84,17 @@ Learn more about [vulnerability management](deploy-vulnerability-assessment-tvm. ### JIT (Just-in-time) access for VMs is now available for AWS EC2 instances (Preview) -When you [connect AWS accounts](quickstart-onboard-aws.md), JIT will automatically evaluate the network configuration of your instances, security groups and recommend which instances need protection for their exposed management ports. This is similar to how JIT works with Azure. When you onboard unprotected EC2 instances, JIT will block public access to the management ports and only open them with authorized requests for a limited time frame. +When you [connect AWS accounts](quickstart-onboard-aws.md), JIT will automatically evaluate the network configuration of your instance's security groups and recommend which instances need protection for their exposed management ports. This is similar to how JIT works with Azure. When you onboard unprotected EC2 instances, JIT will block public access to the management ports and only open them with authorized requests for a limited time frame. Learn how [JIT protects your AWS EC2 instances](just-in-time-access-overview.md#how-jit-operates-with-network-resources-in-azure-and-aws) +### Add and remove the Defender profile for AKS clusters using the CLI + +The Defender profile (preview) is required for Defender for Containers to provide the runtime protections and collects signals from nodes. You can now use the Azure CLI to [add and remove the Defender profile](defender-for-containers-enable.md?tabs=k8s-deploy-cli%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Ck8s-remove-cli&pivots=defender-for-container-aks#use-azure-cli-to-deploy-the-defender-extension) for an AKS cluster. + +> [!NOTE] +> This option is included in [Azure CLI 3.7 and above](/cli/azure/update-azure-cli). + ## April 2022 Updates in April include: @@ -96,7 +138,7 @@ To learn more, see [Stream alerts to Splunk and QRadar](export-to-siem.md#stream ### Deprecated the Azure Cache for Redis recommendation -The recommendation `Azure Cache for Redis should reside within a virtual network` (Preview) has been deprecated. We’ve changed our guidance for securing Azure Cache for Redis instances. We recommend the use of a private endpoint to restrict access to your Azure Cache for Redis instance, instead of a virtual network. +The recommendation `Azure Cache for Redis should reside within a virtual network` (Preview) has been deprecated. We’ve changed our guidance for securing Azure Cache for Redis instances. We recommend the use of a private endpoint to restrict access to your Azure Cache for Redis instance, instead of a virtual network. ### New alert variant for Microsoft Defender for Storage (preview) to detect exposure of sensitive data @@ -110,25 +152,25 @@ The new alert, `Publicly accessible storage containers with potentially sensitiv |--|--|--|--| |**PREVIEW - Publicly accessible storage containers with potentially sensitive data have been exposed**
                  (Storage.Blob_OpenContainersScanning.SuccessfulDiscovery.Sensitive)| Someone has scanned your Azure Storage account and exposed container(s) that allow public access. One or more of the exposed containers have names that indicate that they may contain sensitive data.

                  This usually indicates reconnaissance by a threat actor that is scanning for misconfigured publicly accessible storage containers that may contain sensitive data.

                  After a threat actor successfully discovers a container, they may continue by exfiltrating the data.
                  ✔ Azure Blob Storage
                  ✖ Azure Files
                  ✖ Azure Data Lake Storage Gen2 | Collection | High | -### Container scan alert title augmented with IP address reputation +### Container scan alert title augmented with IP address reputation -An IP address's reputation can indicate whether the scanning activity originates from a known threat actor, or from an actor that is using the Tor network to hide their identity. Both of these indicators, suggest that there's malicious intent. The IP address's reputation is provided by [Microsoft Threat Intelligence](https://go.microsoft.com/fwlink/?linkid=2128684). +An IP address's reputation can indicate whether the scanning activity originates from a known threat actor, or from an actor that is using the Tor network to hide their identity. Both of these indicators, suggest that there's malicious intent. The IP address's reputation is provided by [Microsoft Threat Intelligence](https://go.microsoft.com/fwlink/?linkid=2128684). The addition of the IP address's reputation to the alert title provides a way to quickly evaluate the intent of the actor, and thus the severity of the threat. -The following alerts will include this information: +The following alerts will include this information: -- `Publicly accessible storage containers have been exposed` +- `Publicly accessible storage containers have been exposed` - `Publicly accessible storage containers with potentially sensitive data have been exposed` - `Publicly accessible storage containers have been scanned. No publicly accessible data was discovered` -For example, the added information to the title of the `Publicly accessible storage containers have been exposed` alert will look like this: +For example, the added information to the title of the `Publicly accessible storage containers have been exposed` alert will look like this: - `Publicly accessible storage containers have been exposed`**`by a suspicious IP address`** -- `Publicly accessible storage containers have been exposed`**`by a Tor exit node`** +- `Publicly accessible storage containers have been exposed`**`by a Tor exit node`** All of the alerts for Microsoft Defender for Storage will continue to include threat intelligence information in the IP entity under the alert's Related Entities section. @@ -284,7 +326,7 @@ Updates in February include: - [Microsoft Defender for Azure Cosmos DB plan released for preview](#microsoft-defender-for-azure-cosmos-db-plan-released-for-preview) - [Threat protection for Google Kubernetes Engine (GKE) clusters](#threat-protection-for-google-kubernetes-engine-gke-clusters) -### Kubernetes workload protection for Arc-enabled Kubernetes clusters +### Kubernetes workload protection for Arc-enabled Kubernetes clusters Defender for Containers previously only protected Kubernetes workloads running in Azure Kubernetes Service. We've now extended the protective coverage to include Azure Arc-enabled Kubernetes clusters. @@ -304,15 +346,15 @@ Learn how to protect, and [connect your GCP projects](quickstart-onboard-gcp.md) ### Microsoft Defender for Azure Cosmos DB plan released for preview -We have extended Microsoft Defender for Cloud’s database coverage. You can now enable protection for your Azure Cosmos DB databases. +We have extended Microsoft Defender for Cloud’s database coverage. You can now enable protection for your Azure Cosmos DB databases. -Microsoft Defender for Azure Cosmos DB is an Azure-native layer of security that detects any attempt to exploit databases in your Azure Cosmos DB accounts. Microsoft Defender for Azure Cosmos DB detects potential SQL injections, known bad actors based on Microsoft Threat Intelligence, suspicious access patterns, and potential exploitation of your database through compromised identities, or malicious insiders. +Microsoft Defender for Azure Cosmos DB is an Azure-native layer of security that detects any attempt to exploit databases in your Azure Cosmos DB accounts. Microsoft Defender for Azure Cosmos DB detects potential SQL injections, known bad actors based on Microsoft Threat Intelligence, suspicious access patterns, and potential exploitation of your database through compromised identities, or malicious insiders. It continuously analyzes the customer data stream generated by the Azure Cosmos DB services. -When potentially malicious activities are detected, security alerts are generated. These alerts are displayed in Microsoft Defender for Cloud together with the details of the suspicious activity along with the relevant investigation steps, remediation actions, and security recommendations. +When potentially malicious activities are detected, security alerts are generated. These alerts are displayed in Microsoft Defender for Cloud together with the details of the suspicious activity along with the relevant investigation steps, remediation actions, and security recommendations. -There's no impact on database performance when enabling the service, because Defender for Azure Cosmos DB doesn't access the Azure Cosmos DB account data. +There's no impact on database performance when enabling the service, because Defender for Azure Cosmos DB doesn't access the Azure Cosmos DB account data. Learn more at [Introduction to Microsoft Defender for Azure Cosmos DB](concept-defender-for-cosmos.md). @@ -324,7 +366,6 @@ Learn how to [enable your database security at the subscription level](quickstar Following our recent announcement [Native CSPM for GCP and threat protection for GCP compute instances](#native-cspm-for-gcp-and-threat-protection-for-gcp-compute-instances), Microsoft Defender for Containers has extended its Kubernetes threat protection, behavioral analytics, and built-in admission control policies to Google's Kubernetes Engine (GKE) Standard clusters. You can easily onboard any existing, or new GKE Standard clusters to your environment through our Automatic onboarding capabilities. Check out [Container security with Microsoft Defender for Cloud](defender-for-containers-introduction.md#vulnerability-assessment), for a full list of available features. - ## January 2022 Updates in January include: @@ -348,7 +389,7 @@ Microsoft Defender for Resource Manager automatically monitors the resource mana The plan's protections greatly enhance an organization's resiliency against attacks from threat actors and significantly increase the number of Azure resources protected by Defender for Cloud. -In December 2020, we introduced the preview of Defender for Resource Manager, and in May 2021 the plan was release for general availability. +In December 2020, we introduced the preview of Defender for Resource Manager, and in May 2021 the plan was release for general availability. With this update, we've comprehensively revised the focus of the Microsoft Defender for Resource Manager plan. The updated plan includes many **new alerts focused on identifying suspicious invocation of high-risk operations**. These new alerts provide extensive monitoring for attacks across the *complete* [MITRE ATT&CK® matrix for cloud-based techniques](https://attack.mitre.org/matrices/enterprise/cloud/). @@ -371,7 +412,6 @@ The new alerts for this Defender plan cover these intentions as shown in the fol | **Suspicious invocation of a high-risk 'Data Collection' operation detected (Preview)**
                  (ARM_AnomalousOperation.Collection) | Microsoft Defender for Resource Manager identified a suspicious invocation of a high-risk operation in your subscription, which might indicate an attempt to collect data. The identified operations are designed to allow administrators to efficiently manage their environments. While this activity may be legitimate, a threat actor might utilize such operations to collect sensitive data on resources in your environment. This can indicate that the account is compromised and is being used with malicious intent. | Collection | Medium | | **Suspicious invocation of a high-risk 'Impact' operation detected (Preview)**
                  (ARM_AnomalousOperation.Impact) | Microsoft Defender for Resource Manager identified a suspicious invocation of a high-risk operation in your subscription, which might indicate an attempted configuration change. The identified operations are designed to allow administrators to efficiently manage their environments. While this activity may be legitimate, a threat actor might utilize such operations to access restricted credentials and compromise resources in your environment. This can indicate that the account is compromised and is being used with malicious intent. | Impact | Medium | - In addition, these two alerts from this plan have come out of preview: | Alert (alert type) | Description | MITRE tactics (intentions)| Severity | @@ -379,15 +419,13 @@ In addition, these two alerts from this plan have come out of preview: | **Azure Resource Manager operation from suspicious IP address**
                  (ARM_OperationFromSuspiciousIP) | Microsoft Defender for Resource Manager detected an operation from an IP address that has been marked as suspicious in threat intelligence feeds. | Execution | Medium | | **Azure Resource Manager operation from suspicious proxy IP address**
                  (ARM_OperationFromSuspiciousProxyIP) | Microsoft Defender for Resource Manager detected a resource management operation from an IP address that is associated with proxy services, such as TOR. While this behavior can be legitimate, it's often seen in malicious activities, when threat actors try to hide their source IP. | Defense Evasion | Medium | - - ### Recommendations to enable Microsoft Defender plans on workspaces (in preview) -To benefit from all of the security features available from [Microsoft Defender for Servers](defender-for-servers-introduction.md) and [Microsoft Defender for SQL on machines](defender-for-sql-introduction.md), the plans must be enabled on **both** the subscription and workspace levels. +To benefit from all of the security features available from [Microsoft Defender for Servers](defender-for-servers-introduction.md) and [Microsoft Defender for SQL on machines](defender-for-sql-introduction.md), the plans must be enabled on **both** the subscription and workspace levels. -When a machine is in a subscription with one of these plan enabled, you'll be billed for the full protections. However, if that machine is reporting to a workspace *without* the plan enabled, you won't actually receive those benefits. +When a machine is in a subscription with one of these plan enabled, you'll be billed for the full protections. However, if that machine is reporting to a workspace *without* the plan enabled, you won't actually receive those benefits. -We've added two recommendations that highlight workspaces without these plans enabled, that nevertheless have machines reporting to them from subscriptions that *do* have the plan enabled. +We've added two recommendations that highlight workspaces without these plans enabled, that nevertheless have machines reporting to them from subscriptions that *do* have the plan enabled. The two recommendations, which both offer automated remediation (the 'Fix' action), are: @@ -396,19 +434,17 @@ The two recommendations, which both offer automated remediation (the 'Fix' actio |[Microsoft Defender for Servers should be enabled on workspaces](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/1ce68079-b783-4404-b341-d2851d6f0fa2) |Microsoft Defender for Servers brings threat detection and advanced defenses for your Windows and Linux machines.
                  With this Defender plan enabled on your subscriptions but not on your workspaces, you're paying for the full capability of Microsoft Defender for Servers but missing out on some of the benefits.
                  When you enable Microsoft Defender for Servers on a workspace, all machines reporting to that workspace will be billed for Microsoft Defender for Servers - even if they're in subscriptions without Defender plans enabled. Unless you also enable Microsoft Defender for Servers on the subscription, those machines won't be able to take advantage of just-in-time VM access, adaptive application controls, and network detections for Azure resources.
                  Learn more in Introduction to Microsoft Defender for Servers.
                  (No related policy) |Medium | |[Microsoft Defender for SQL on machines should be enabled on workspaces](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/e9c320f1-03a0-4d2b-9a37-84b3bdc2e281) |Microsoft Defender for Servers brings threat detection and advanced defenses for your Windows and Linux machines.
                  With this Defender plan enabled on your subscriptions but not on your workspaces, you're paying for the full capability of Microsoft Defender for Servers but missing out on some of the benefits.
                  When you enable Microsoft Defender for Servers on a workspace, all machines reporting to that workspace will be billed for Microsoft Defender for Servers - even if they're in subscriptions without Defender plans enabled. Unless you also enable Microsoft Defender for Servers on the subscription, those machines won't be able to take advantage of just-in-time VM access, adaptive application controls, and network detections for Azure resources.
                  Learn more in Introduction to Microsoft Defender for Servers.
                  (No related policy) |Medium | - - ### Auto provision Log Analytics agent to Azure Arc-enabled machines (preview) Defender for Cloud uses the Log Analytics agent to gather security-related data from machines. The agent reads various security-related configurations and event logs and copies the data to your workspace for analysis. -Defender for Cloud's auto provisioning settings has a toggle for each type of supported extension, including the Log Analytics agent. +Defender for Cloud's auto provisioning settings has a toggle for each type of supported extension, including the Log Analytics agent. -In a further expansion of our hybrid cloud features, we've added an option to auto provision the Log Analytics agent to machines connected to Azure Arc. +In a further expansion of our hybrid cloud features, we've added an option to auto provision the Log Analytics agent to machines connected to Azure Arc. As with the other auto provisioning options, this is configured at the subscription level. -When you enable this option, you'll be prompted for the workspace. +When you enable this option, you'll be prompted for the workspace. > [!NOTE] > For this preview, you can't select the default workspaces that was created by Defender for Cloud. To ensure you receive the full set of security features available for the Azure Arc-enabled servers, verify that you have the relevant security solution installed on the selected workspace. @@ -421,9 +457,9 @@ We've removed the recommendation **Sensitive data in your SQL databases should b Advance notice of this change appeared for the last six months in the [Important upcoming changes to Microsoft Defender for Cloud](upcoming-changes.md) page. -### Communication with suspicious domain alert expanded to included known Log4Shell-related domains +### Communication with suspicious domain alert expanded to included known Log4Shell-related domains -The following alert was previously only available to organizations who had enabled the [Microsoft Defender for DNS](defender-for-dns-introduction.md) plan. +The following alert was previously only available to organizations who had enabled the [Microsoft Defender for DNS](defender-for-dns-introduction.md) plan. With this update, the alert will also show for subscriptions with the [Microsoft Defender for Servers](defender-for-servers-introduction.md) or [Defender for App Service](defender-for-app-service-introduction.md) plan enabled. @@ -446,12 +482,12 @@ The new **Copy alert JSON** button puts the alert’s details, in JSON format, i For consistency with other recommendation names, we've renamed the following two recommendations: - Recommendation to resolve vulnerabilities discovered in running container images - - Previous name: Vulnerabilities in running container images should be remediated (powered by Qualys) - - New name: Running container images should have vulnerability findings resolved + - Previous name: Vulnerabilities in running container images should be remediated (powered by Qualys) + - New name: Running container images should have vulnerability findings resolved - Recommendation to enable diagnostic logs for Azure App Service - - Previous name: Diagnostic logs should be enabled in App Service - - New name: Diagnostic logs in App Service should be enabled + - Previous name: Diagnostic logs should be enabled in App Service + - New name: Diagnostic logs in App Service should be enabled ### Deprecate Kubernetes cluster containers should only listen on allowed ports policy @@ -476,123 +512,3 @@ The active alerts workbook allows users to view a unified dashboard of their agg The 'System updates should be installed on your machines' recommendation is now available on all government clouds. It's likely that this change will impact your government cloud subscription's secure score. We expect the change to lead to a decreased score, but it's possible the recommendation's inclusion might result in an increased score in some cases. - -## December 2021 - -Updates in December include: - -- [Microsoft Defender for Containers plan released for general availability (GA)](#microsoft-defender-for-containers-plan-released-for-general-availability-ga) -- [New alerts for Microsoft Defender for Storage released for general availability (GA)](#new-alerts-for-microsoft-defender-for-storage-released-for-general-availability-ga) -- [Improvements to alerts for Microsoft Defender for Storage](#improvements-to-alerts-for-microsoft-defender-for-storage) -- ['PortSweeping' alert removed from network layer alerts](#portsweeping-alert-removed-from-network-layer-alerts) - -### Microsoft Defender for Containers plan released for general availability (GA) - -Over two years ago, we introduced [Defender for Kubernetes](defender-for-kubernetes-introduction.md) and [Defender for container registries](defender-for-container-registries-introduction.md) as part of the Azure Defender offering within Microsoft Defender for Cloud. - -With the release of [Microsoft Defender for Containers](defender-for-containers-introduction.md), we've merged these two existing Defender plans. - -The new plan: - -- **Combines the features of the two existing plans** - threat detection for Kubernetes clusters and vulnerability assessment for images stored in container registries -- **Brings new and improved features** - including multicloud support, host level threat detection with over **sixty** new Kubernetes-aware analytics, and vulnerability assessment for running images -- **Introduces Kubernetes-native at-scale onboarding** - by default, when you enable the plan all relevant components are configured to be deployed automatically - -With this release, the availability and presentation of Defender for Kubernetes and Defender for container registries has changed as follows: - -- New subscriptions - The two previous container plans are no longer available -- Existing subscriptions - Wherever they appear in the Azure portal, the plans are shown as **Deprecated** with instructions for how to upgrade to the newer plan - :::image type="content" source="media/release-notes/defender-plans-deprecated-indicator.png" alt-text="Defender for container registries and Defender for Kubernetes plans showing 'Deprecated' and upgrade information."::: - -The new plan is free for the month of December 2021. For the potential changes to the billing from the old plans to Defender for Containers, and for more information on the benefits introduced with this plan, see [Introducing Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317). - -For more information, see: - -- [Overview of Microsoft Defender for Containers](defender-for-containers-introduction.md) -- [Enable Microsoft Defender for Containers](defender-for-containers-enable.md) -- [Introducing Microsoft Defender for Containers - Microsoft Tech Community](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317) -- [Microsoft Defender for Containers | Defender for Cloud in the Field #3 - YouTube](https://www.youtube.com/watch?v=KeH0a3enLJ0&t=201s) - - -### New alerts for Microsoft Defender for Storage released for general availability (GA) - -Threat actors use tools and scripts to scan for publicly open containers in the hope of finding misconfigured open storage containers with sensitive data. - -Microsoft Defender for Storage detects these scanners so that you can block them and remediate your posture. - -The preview alert that detected this was called **“Anonymous scan of public storage containers”**. To provide greater clarity about the suspicious events discovered, we've divided this into **two** new alerts. These alerts are relevant to Azure Blob Storage only. - -We've improved the detection logic, updated the alert metadata, and changed the alert name and alert type. - -These are the new alerts: - -| Alert (alert type) | Description | MITRE tactic | Severity | -|---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|----------| -| **Publicly accessible storage containers successfully discovered**
                  (Storage.Blob_OpenContainersScanning.SuccessfulDiscovery) | A successful discovery of publicly open storage container(s) in your storage account was performed in the last hour by a scanning script or tool.

                  This usually indicates a reconnaissance attack, where the threat actor tries to list blobs by guessing container names, in the hope of finding misconfigured open storage containers with sensitive data in them.

                  The threat actor may use their own script or use known scanning tools like Microburst to scan for publicly open containers.

                  ✔ Azure Blob Storage
                  ✖ Azure Files
                  ✖ Azure Data Lake Storage Gen2 | Collection | Medium | -| **Publicly accessible storage containers unsuccessfully scanned**
                  (Storage.Blob_OpenContainersScanning.FailedAttempt) | A series of failed attempts to scan for publicly open storage containers were performed in the last hour.

                  This usually indicates a reconnaissance attack, where the threat actor tries to list blobs by guessing container names, in the hope of finding misconfigured open storage containers with sensitive data in them.

                  The threat actor may use their own script or use known scanning tools like Microburst to scan for publicly open containers.

                  ✔ Azure Blob Storage
                  ✖ Azure Files
                  ✖ Azure Data Lake Storage Gen2 | Collection | Low | - - -For more information, see: -- [Threat matrix for storage services](https://www.microsoft.com/security/blog/2021/04/08/threat-matrix-for-storage/) -- [Introduction to Microsoft Defender for Storage](defender-for-storage-introduction.md) -- [List of alerts provided by Microsoft Defender for Storage](alerts-reference.md#alerts-azurestorage) - - -### Improvements to alerts for Microsoft Defender for Storage - -The initial access alerts now have improved accuracy and more data to support investigation. - -Threat actors use various techniques in the initial access to gain a foothold within a network. Two of the [Microsoft Defender for Storage](defender-for-storage-introduction.md) alerts that detect behavioral anomalies in this stage now have improved detection logic and additional data to support investigations. - -If you've [configured automations](workflow-automation.md) or defined [alert suppression rules](alerts-suppression-rules.md) for these alerts in the past, update them in accordance with these changes. - -#### Detecting access from a Tor exit node - -Access from a Tor exit node might indicate a threat actor trying to hide their identity. - -The alert is now tuned to generate only for authenticated access, which results in higher accuracy and confidence that the activity is malicious. This enhancement reduces the benign positive rate. - -An outlying pattern will have high severity, while less anomalous patterns will have medium severity. - -The alert name and description have been updated. The AlertType remains unchanged. - -- Alert name (old): Access from a Tor exit node to a storage account -- Alert name (new): Authenticated access from a Tor exit node -- Alert types: Storage.Blob_TorAnomaly / Storage.Files_TorAnomaly -- Description: One or more storage container(s) / file share(s) in your storage account were successfully accessed from an IP address known to be an active exit node of Tor (an anonymizing proxy). Threat actors use Tor to make it difficult to trace the activity back to them. Authenticated access from a Tor exit node is a likely indication that a threat actor is trying to hide their identity. Applies to: Azure Blob Storage, Azure Files, Azure Data Lake Storage Gen2 -- MITRE tactic: Initial access -- Severity: High/Medium - -#### Unusual unauthenticated access - -A change in access patterns may indicate that a threat actor was able to exploit public read access to storage containers, either by exploiting a mistake in access configurations, or by changing the access permissions. - -This medium severity alert is now tuned with improved behavioral logic, higher accuracy, and confidence that the activity is malicious. This enhancement reduces the benign positive rate. - -The alert name and description have been updated. The AlertType remains unchanged. - -- Alert name (old): Anonymous access to a storage account -- Alert name (new): Unusual unauthenticated access to a storage container -- Alert types: Storage.Blob_AnonymousAccessAnomaly -- Description: This storage account was accessed without authentication, which is a change in the common access pattern. Read access to this container is usually authenticated. This might indicate that a threat actor was able to exploit public read access to storage container(s) in this storage account(s). Applies to: Azure Blob Storage -- MITRE tactic: Collection -- Severity: Medium - -For more information, see: - -- [Threat matrix for storage services](https://www.microsoft.com/security/blog/2021/04/08/threat-matrix-for-storage/) -- [Introduction to Microsoft Defender for Storage](defender-for-storage-introduction.md) -- [List of alerts provided by Microsoft Defender for Storage](alerts-reference.md#alerts-azurestorage) - - -### 'PortSweeping' alert removed from network layer alerts - -The following alert was removed from our network layer alerts due to inefficiencies: - -| Alert (alert type) | Description | MITRE tactics | Severity | -|------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------:|---------------| -| **Possible outgoing port scanning activity detected**
                  (PortSweeping) | Network traffic analysis detected suspicious outgoing traffic from %{Compromised Host}. This traffic may be a result of a port scanning activity. When the compromised resource is a load balancer or an application gateway, the suspected outgoing traffic has been originated from to one or more of the resources in the backend pool (of the load balancer or application gateway). If this behavior is intentional, please note that performing port scanning is against Azure Terms of service. If this behavior is unintentional, it may mean your resource has been compromised. | Discovery | Medium | - - - - diff --git a/articles/defender-for-cloud/review-security-recommendations.md b/articles/defender-for-cloud/review-security-recommendations.md index e764958bf666d..e94d4bd377bdb 100644 --- a/articles/defender-for-cloud/review-security-recommendations.md +++ b/articles/defender-for-cloud/review-security-recommendations.md @@ -1,114 +1,68 @@ --- -title: Security recommendations in Microsoft Defender for Cloud -description: This document walks you through how recommendations in Microsoft Defender for Cloud help you protect your Azure resources and stay in compliance with security policies. -ms.topic: conceptual -ms.date: 04/03/2022 +title: Improving your security posture with recommendations in Microsoft Defender for Cloud +description: This document walks you through how to identify security recommendations that will help you improve your security posture. +ms.topic: how-to +ms.date: 05/23/2022 --- -# Review your security recommendations +# Find recommendations that can improve your security posture -This article explains how to view and understand the recommendations in Microsoft Defender for Cloud to help you protect your multicloud resources. +To improve your [secure score](secure-score-security-controls.md), you have to implement the security recommendations for your environment. From the list of recommendations, you can use filters to find the recommendations that have the most impact on your score, or the ones that you were assigned to implement. -## View your recommendations +To get to the list of recommendations: -Defender for Cloud analyzes the security state of your resources to identify potential vulnerabilities. +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Either: + - In the Defender for Cloud overview, select **Security posture** and then select **View recommendations** for the environment that you want to improve. + - Go to **Recommendations** in the Defender for Cloud menu. -**To view your Secure score recommendations**: +You can search for specific recommendations by name. Use the search box and filters above the list of recommendations to find specific recommendations, and look at the [details of the recommendation](security-policy-concept.md#security-recommendation-details) to decide whether to [remediate it](implement-security-recommendations.md), [exempt resources](exempt-resource.md), or [disable the recommendation](tutorial-security-policy.md#disable-security-policies-and-disable-recommendations). -1. Sign in to the [Azure portal](https://portal.azure.com). +## Finding recommendations with high impact on your secure score -1. Navigate to **Microsoft Defender for Cloud** > **Recommendations**. - - :::image type="content" source="media/review-security-recommendations/recommendations-view.png" alt-text="Screenshot of the recommendations page."::: - - Here you'll see the recommendations applicable to your environment(s). Recommendations are grouped into security controls. - -1. Select **Secure score recommendations**. - - :::image type="content" source="media/review-security-recommendations/secure-score-recommendations.png" alt-text="Screenshot showing the location of the secure score recommendations tab."::: - - > [!NOTE] - > Custom recommendations can be found under the All recommendations tab. Learn how to [Create custom security initiatives and policies](custom-security-policies.md). - - Secure score recommendations affect the secure score and are mapped to the various security controls. The All recommendations tab, allows you to see all of the recommendations including recommendations that are part of different regulatory compliance standards. - -1. (Optional) Select a relevant environment(s). - - :::image type="content" source="media/review-security-recommendations/environment-filter.png" alt-text="Screenshot of the environment filter, to select your filters."::: - -1. Select the :::image type="icon" source="media/review-security-recommendations/drop-down-arrow.png" border="false"::: to expand the control, and view a list of recommendations. - - :::image type="content" source="media/review-security-recommendations/list-recommendations.png" alt-text="Screenshot showing how to see the full list of recommendations by selecting the drop-down menu icon." lightbox="media/review-security-recommendations/list-recommendations-expanded.png"::: - -1. Select a specific recommendation to view the recommendation details page. - - :::image type="content" source="./media/review-security-recommendations/recommendation-details-page.png" alt-text="Screenshot of the recommendation details page." lightbox="./media/review-security-recommendations/recommendation-details-page-expanded.png"::: - - 1. For supported recommendations, the top toolbar shows any or all of the following buttons: - - **Enforce** and **Deny** (see [Prevent misconfigurations with Enforce/Deny recommendations](prevent-misconfigurations.md)). - - **View policy definition** to go directly to the Azure Policy entry for the underlying policy. - - **Open query** - All recommendations have the option to view the detailed information about the affected resources using Azure Resource Graph Explorer. - 1. **Severity indicator**. - 1. **Freshness interval** (where relevant). - 1. **Count of exempted resources** if exemptions exist for a recommendation, this shows the number of resources that have been exempted with a link to view the specific resources. - 1. **Mapping to MITRE ATT&CK ® tactics and techniques** if a recommendation has defined tactics and techniques, select the icon for links to the relevant pages on MITRE's site. This applies only to Azure scored recommendations. - - :::image type="content" source="media/review-security-recommendations/tactics-window.png" alt-text="Screenshot of the MITRE tactics mapping for a recommendation."::: +Your [secure score is calculated](secure-score-security-controls.md?branch=main#how-your-secure-score-is-calculated) based on the security recommendations that you have implemented. In order to increase your score and improve your security posture, you have to find recommendations with unhealthy resources and [remediate those recommendations](implement-security-recommendations.md). - 1. **Description** - A short description of the security issue. - 1. When relevant, the details page also includes a table of **related recommendations**: +The list of recommendations shows the **Potential score increase** that you can achieve when you remediate all of the recommendations in the security control. - The relationship types are: +To find recommendations that can improve your secure score: - - **Prerequisite** - A recommendation that must be completed before the selected recommendation - - **Alternative** - A different recommendation, which provides another way of achieving the goals of the selected recommendation - - **Dependent** - A recommendation for which the selected recommendation is a prerequisite +1. In the list of recommendations, use the **Potential score increase** to identify the security control that contains recommendations that will increase your secure score. + - You can also use the search box and filters above the list of recommendations to find specific recommendations. +1. Open a security control to see the recommendations that have unhealthy resources. - For each related recommendation, the number of unhealthy resources is shown in the "Affected resources" column. +When you [remediate](implement-security-recommendations.md) all of the recommendations in the security control, your secure score increases by the percentage points listed for the control. - > [!TIP] - > If a related recommendation is grayed out, its dependency isn't yet completed and so isn't available. +## Manage the owner and ETA of recommendations that are assigned to you - 1. **Remediation steps** - A description of the manual steps required to remediate the security issue on the affected resources. For recommendations with the **Fix** option**, you can select **View remediation logic** before applying the suggested fix to your resources. +[Security teams can assign a recommendation](governance-rules.md) to a specific person and assign a due date to drive your organization towards increased security. If you have recommendations assigned to you, you are accountable to remediate the resources affected by the recommendations to help your organization be compliant with the security policy. - 1. **Affected resources** - Your resources are grouped into tabs: - - **Healthy resources** – Relevant resources, which either aren't impacted or on which you've already remediated the issue. - - **Unhealthy resources** – Resources that are still impacted by the identified issue. - - **Not applicable resources** – Resources for which the recommendation can't give a definitive answer. The not applicable tab also includes reasons for each resource. +Recommendations are listed as **On time** until their due date is passed, when they are changed to **Overdue**. Before the recommendation is overdue, the recommendation does not impact the secure score. The security team can also apply a grace period during which overdue recommendations continue to not impact the secure score. - :::image type="content" source="./media/review-security-recommendations/recommendations-not-applicable-reasons.png" alt-text="Not applicable resources with reasons."::: - 1. Action buttons to remediate the recommendation or trigger a logic app. +To help you plan your work and report on progress, you can set an ETA for the specific resources to show when you plan to have the recommendation resolved by for those resources. You can also change the owner of the recommendation for specific resources so that the person responsible for remediation is assigned to the resource. -## Search for a recommendation +:::image type="content" source="./media/governance-rules/change-governance-owner-eta.png" alt-text="Screenshot of fields required to add a governance rule." lightbox="media/governance-rules/change-governance-owner-eta.png"::: -You can search for specific recommendations by name. The search box and filters above the list of recommendations can be used to help locate a specific recommendation. +To change the owner of resources and set the ETA for remediation of recommendations that are assigned to you: -Custom recommendations only appear under the All recommendations tab. +1. In the filters for list of recommendations, select **Show my items only**. -**To search for recommendations**: + - The status column indicates the recommendations that are on time, overdue, or completed. + - The insights column indicates the recommendations that are in a grace period, so they currently do not impact your secure score until they become overdue. -1. On the recommendation page, select an environment from the environment filter. +1. Select an on time or overdue recommendation. +1. For the resources that are assigned to you, set the owner of the resource: + 1. Select the resources that are owned by another person, and select **Change owner and set ETA**. + 1. Select **Change owner**, enter the email address of the owner of the resource, and select **Save**. + The owner of the resource gets a weekly email listing the recommendations that they are assigned to. +1. For resources that you own, set an ETA for remediation: + 1. Select resources that you plan to remediate by the same date, and select **Change owner and set ETA**. + 1. Select **Change ETA** and set the date by which you plan to remediate the recommendation for those resources. + 1. Enter a justification for the remediation by that date, and select **Save**. - :::image type="content" source="media/review-security-recommendations/environment-filter.png" alt-text="Screenshot of the environmental filter on the recommendation page."::: - - You can select 1, 2, or all options at a time. The page's results will automatically reflect your choice. - -1. Enter a name in the search box, or select one of the available filters. - - :::image type="content" source="media/review-security-recommendations/search-filters.png" alt-text="Screenshot of the search box and filter list."::: - -1. Select :::image type="icon" source="media/review-security-recommendations/add-filter.png" border="false"::: to add more filter(s). - -1. Select a filter from the drop-down menu. - - :::image type="content" source="media/review-security-recommendations/filter-drop-down.png" alt-text="Screenshot of the available filters to select."::: - -1. Select a value from the drop-down menu. - -1. Select **OK**. +The due date for the recommendation does not change, but the security team can see that you plan to update the resources by the specified ETA date. ## Review recommendation data in Azure Resource Graph Explorer (ARG) -You can review recommendations in ARG both on the recommendations page or on an individual recommendation. +You can review recommendations in ARG both on the recommendations page or on an individual recommendation. The toolbar on the recommendation details page includes an **Open query** button to explore the details in [Azure Resource Graph (ARG)](../governance/resource-graph/index.yml), an Azure service that gives you the ability to query - across multiple subscriptions - Defender for Cloud's security posture data. @@ -120,7 +74,7 @@ For example, this recommendation details page shows 15 affected resources: :::image type="content" source="./media/review-security-recommendations/open-query.png" alt-text="The **Open Query** button on the recommendation details page."::: -When you open the underlying query, and run it, Azure Resource Graph Explorer returns the same 15 resources and their health status for this recommendation: +When you open the underlying query, and run it, Azure Resource Graph Explorer returns the same 15 resources and their health status for this recommendation: :::image type="content" source="./media/review-security-recommendations/run-query.png" alt-text="Azure Resource Graph Explorer showing the results for the recommendation shown in the previous screenshot."::: @@ -130,7 +84,7 @@ The Insights column of the page gives you more details for each recommendation. | Icon | Name | Description | |--|--|--| -| :::image type="icon" source="media/secure-score-security-controls/preview-icon.png" border="false"::: | *Preview recommendation** | This recommendation won't affect your secure score until it's GA. | +| :::image type="icon" source="media/secure-score-security-controls/preview-icon.png" border="false"::: | Preview recommendation | This recommendation won't affect your secure score until it's GA. | | :::image type="icon" source="media/secure-score-security-controls/fix-icon.png" border="false"::: | **Fix** | From within the recommendation details page, you can use 'Fix' to resolve this issue. | | :::image type="icon" source="media/secure-score-security-controls/enforce-icon.png" border="false"::: | **Enforce** | From within the recommendation details page, you can automatically deploy a policy to fix this issue whenever someone creates a non-compliant resource. | | :::image type="icon" source="media/secure-score-security-controls/deny-icon.png" border="false"::: | **Deny** | From within the recommendation details page, you can prevent new resources from being created with this issue. | @@ -141,12 +95,10 @@ Recommendations that aren't included in the calculations of your secure score, s Recommendations can be downloaded to a CSV report from the Recommendations page. -**To download a CSV report of your recommendations**: - -1. Sign in to the [Azure portal](https://portal.azure.com). +To download a CSV report of your recommendations: +1. Sign in to the [Azure portal](https://portal.azure.com). 1. Navigate to **Microsoft Defender for Cloud** > **Recommendations**. - 1. Select **Download CSV report**. :::image type="content" source="media/review-security-recommendations/download-csv.png" alt-text="Screenshot showing you where to select the Download C S V report from."::: @@ -159,6 +111,16 @@ When the report is ready, you'll be notified by a second pop-up. :::image type="content" source="media/review-security-recommendations/downloaded-csv.png" alt-text="Screenshot letting you know your downloaded completed."::: +## Learn more + +If you would like to learn more from the product manager about security posture, check out [Security posture management improvements](episode-four.md). + +You can also check out the following blogs: + +- [Security posture management and server protection for AWS and GCP are now generally available](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) +- [Custom assessments and standards in Microsoft Defender for Cloud for AWS workloads (Preview)](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/custom-assessments-and-standards-in-microsoft-defender-for-cloud/ba-p/3066575) +- [New enhancements added to network security dashboard](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/new-enhancements-added-to-network-security-dashboard/ba-p/2896021) + ## Next steps In this document, you were introduced to security recommendations in Defender for Cloud. For related information: diff --git a/articles/defender-for-cloud/secure-score-security-controls.md b/articles/defender-for-cloud/secure-score-security-controls.md index ffe4888ea685f..f6280253d955f 100644 --- a/articles/defender-for-cloud/secure-score-security-controls.md +++ b/articles/defender-for-cloud/secure-score-security-controls.md @@ -1,17 +1,17 @@ --- title: Security posture for Microsoft Defender for Cloud description: Description of Microsoft Defender for Cloud's secure score and its security controls -author: Elazark -ms.author: elkrieger -ms.topic: article -ms.date: 04/03/2022 +author: bmansheim +ms.author: benmansheim +ms.topic: conceptual +ms.date: 06/02/2022 --- # Security posture for Microsoft Defender for Cloud ## Introduction to secure score -Microsoft Defender for Cloud has two main goals: +Microsoft Defender for Cloud has two main goals: - to help you understand your current security situation - to help you efficiently and effectively improve your security @@ -49,7 +49,7 @@ You can group this section by environment by selecting the Group by Environment :::image type="content" source="media/secure-score-security-controls/bottom-half.png" alt-text="Screenshot of the bottom half of the security posture page."::: -## How your secure score is calculated +## How your secure score is calculated The contribution of each security control towards the overall secure score is shown on the recommendations page. @@ -73,7 +73,7 @@ In this example: - **Current score** - :::image type="icon" source="media/secure-score-security-controls/current-score.png" border="false"::: - The current score for this control.
                  Current score=[Score per resource]*[Number of healthy resources]. + The current score for this control.
                  Current score=[Score per resource]*[Number of healthy resources]. Each control contributes towards the total score. In this example, the control is contributing 2.00 points to current total secure score. @@ -83,17 +83,17 @@ In this example: For example, Potential score increase=[Score per resource]*[Number of unhealthy resources] or 0.1714 x 30 unhealthy resources = 5.14. -- **Insights** - :::image type="icon" source="media/secure-score-security-controls/insights.png" border="false"::: +- **Insights** - :::image type="icon" source="media/secure-score-security-controls/insights.png" border="false"::: - Gives you extra details for each recommendation. Which can be: + Gives you extra details for each recommendation. Which can be: - - :::image type="icon" source="media/secure-score-security-controls/preview-icon.png" border="false"::: Preview recommendation - This recommendation won't affect your secure score until it's GA. + - :::image type="icon" source="media/secure-score-security-controls/preview-icon.png" border="false"::: Preview recommendation - This recommendation won't affect your secure score until it's GA. - - :::image type="icon" source="media/secure-score-security-controls/fix-icon.png" border="false"::: Fix - From within the recommendation details page, you can use 'Fix' to resolve this issue. + - :::image type="icon" source="media/secure-score-security-controls/fix-icon.png" border="false"::: Fix - From within the recommendation details page, you can use 'Fix' to resolve this issue. - - :::image type="icon" source="media/secure-score-security-controls/enforce-icon.png" border="false"::: Enforce - From within the recommendation details page, you can automatically deploy a policy to fix this issue whenever someone creates a non-compliant resource. + - :::image type="icon" source="media/secure-score-security-controls/enforce-icon.png" border="false"::: Enforce - From within the recommendation details page, you can automatically deploy a policy to fix this issue whenever someone creates a non-compliant resource. - - :::image type="icon" source="media/secure-score-security-controls/deny-icon.png" border="false"::: Deny - From within the recommendation details page, you can prevent new resources from being created with this issue. + - :::image type="icon" source="media/secure-score-security-controls/deny-icon.png" border="false"::: Deny - From within the recommendation details page, you can prevent new resources from being created with this issue. ### Calculations - understanding your score @@ -102,7 +102,7 @@ In this example: |**Security control's current score**|
                  ![Equation for calculating a security control's score.](media/secure-score-security-controls/secure-score-equation-single-control.png)

                  Each individual security control contributes towards the Security Score. Each resource affected by a recommendation within the control, contributes towards the control's current score. The current score for each control is a measure of the status of the resources *within* the control.
                  ![Tooltips showing the values used when calculating the security control's current score](media/secure-score-security-controls/security-control-scoring-tooltips.png)
                  In this example, the max score of 6 would be divided by 78 because that's the sum of the healthy and unhealthy resources.
                  6 / 78 = 0.0769
                  Multiplying that by the number of healthy resources (4) results in the current score:
                  0.0769 * 4 = **0.31**

                  | |**Secure score**
                  Single subscription, or connector|
                  ![Equation for calculating a subscription's secure score](media/secure-score-security-controls/secure-score-equation-single-sub.png)

                  ![Single subscription secure score with all controls enabled](media/secure-score-security-controls/secure-score-example-single-sub.png)
                  In this example, there's a single subscription, or connector with all security controls available (a potential maximum score of 60 points). The score shows 28 points out of a possible 60 and the remaining 32 points are reflected in the "Potential score increase" figures of the security controls.
                  ![List of controls and the potential score increase](media/secure-score-security-controls/secure-score-example-single-sub-recs.png)
                  This equation is the same equation for a connector with just the word subscription being replaced by the word connector. | |**Secure score**
                  Multiple subscriptions, and connectors|
                  ![Equation for calculating the secure score for multiple subscriptions.](media/secure-score-security-controls/secure-score-equation-multiple-subs.png)

                  When calculating the combined score for multiple subscriptions, and connectors, Defender for Cloud includes a *weight* for each subscription, and connector. The relative weights for your subscriptions, and connectors are determined by Defender for Cloud based on factors such as the number of resources.
                  The current score for each subscription, a dn connector is calculated in the same way as for a single subscription, or connector, but then the weight is applied as shown in the equation.
                  When viewing multiple subscriptions, and connectors, the secure score evaluates all resources within all enabled policies and groups their combined impact on each security control's maximum score.
                  ![Secure score for multiple subscriptions with all controls enabled](media/secure-score-security-controls/secure-score-example-multiple-subs.png)
                  The combined score is **not** an average; rather it's the evaluated posture of the status of all resources across all subscriptions, and connectors.

                  Here too, if you go to the recommendations page and add up the potential points available, you'll find that it's the difference between the current score (22) and the maximum score available (58).| - + ### Which recommendations are included in the secure score calculations? Only built-in recommendations have an impact on the secure score. @@ -121,35 +121,36 @@ You can also configure the Enforce and Deny options on the relevant recommendati ## Security controls and their recommendations -The table below lists the security controls in Microsoft Defender for Cloud. For each control, you can see the maximum number of points you can add to your secure score if you remediate *all* of the recommendations listed in the control, for *all* of your resources. +The table below lists the security controls in Microsoft Defender for Cloud. For each control, you can see the maximum number of points you can add to your secure score if you remediate *all* of the recommendations listed in the control, for *all* of your resources. + +The set of security recommendations provided with Defender for Cloud is tailored to the available resources in each organization's environment. You can [disable policies](tutorial-security-policy.md#disable-security-policies-and-disable-recommendations) and [exempt specific resources from a recommendation](exempt-resource.md) to further customize the recommendations. -The set of security recommendations provided with Defender for Cloud is tailored to the available resources in each organization's environment. You can [disable policies](tutorial-security-policy.md#disable-security-policies-and-disable-recommendations) and [exempt specific resources from a recommendation](exempt-resource.md) to further customize the recommendations. - We recommend every organization carefully reviews their assigned Azure Policy initiatives. > [!TIP] -> For details about reviewing and editing your initiatives, see [Working with security policies](tutorial-security-policy.md). +> For details about reviewing and editing your initiatives, see [Working with security policies](tutorial-security-policy.md). Even though Defender for Cloud's default security initiative is based on industry best practices and standards, there are scenarios in which the built-in recommendations listed below might not completely fit your organization. It's sometimes necessary to adjust the default initiative - without compromising security - to ensure it's aligned with your organization's own policies, industry standards, regulatory standards, and benchmarks.

                  [!INCLUDE [security-center-controls-and-recommendations](../../includes/asc/security-control-recommendations.md)] - - ## FAQ - Secure score ### If I address only three out of four recommendations in a security control, will my secure score change? + No. It won't change until you remediate all of the recommendations for a single resource. To get the maximum score for a control, you must remediate all recommendations for all resources. ### If a recommendation isn't applicable to me, and I disable it in the policy, will my security control be fulfilled and my secure score updated? + Yes. We recommend disabling recommendations when they're inapplicable in your environment. For instructions on how to disable a specific recommendation, see [Disable security policies](./tutorial-security-policy.md#disable-security-policies-and-disable-recommendations). ### If a security control offers me zero points towards my secure score, should I ignore it? + In some cases, you'll see a control max score greater than zero, but the impact is zero. When the incremental score for fixing resources is negligible, it's rounded to zero. Don't ignore these recommendations because they still bring security improvements. The only exception is the "Additional Best Practice" control. Remediating these recommendations won't increase your score, but it will enhance your overall security. ## Next steps -This article described the secure score and the included security controls. +This article described the secure score and the included security controls. > [!div class="nextstepaction"] > [Access and track your secure score](secure-score-access-and-track.md) diff --git a/articles/defender-for-cloud/security-policy-concept.md b/articles/defender-for-cloud/security-policy-concept.md index 6aed0be99f625..3c3b3c116ebce 100644 --- a/articles/defender-for-cloud/security-policy-concept.md +++ b/articles/defender-for-cloud/security-policy-concept.md @@ -2,14 +2,13 @@ title: Understanding security policies, initiatives, and recommendations in Microsoft Defender for Cloud description: Learn about security policies, initiatives, and recommendations in Microsoft Defender for Cloud. ms.topic: conceptual -ms.date: 11/09/2021 +ms.date: 06/06/2022 --- # What are security policies, initiatives, and recommendations? Microsoft Defender for Cloud applies security initiatives to your subscriptions. These initiatives contain one or more security policies. Each of those policies results in a security recommendation for improving your security posture. This page explains each of these ideas in detail. - ## What is a security policy? An Azure Policy definition, created in Azure Policy, is a rule about specific security conditions that you want controlled. Built in definitions include things like controlling what type of resources can be deployed or enforcing the use of tags on all resources. You can also create your own custom policy definitions. @@ -40,7 +39,7 @@ Defender for Cloud offers the following options for working with security initia Using the policies, Defender for Cloud periodically analyzes the compliance status of your resources to identify potential security misconfigurations and weaknesses. It then provides you with recommendations on how to remediate those issues. Recommendations are the result of assessing your resources against the relevant policies and identifying resources that aren't meeting your defined requirements. -Defender for Cloud makes its security recommendations based on your chosen initiatives. When a policy from your initiative is compared against your resources and finds one or more that aren't compliant it is presented as a recommendation in Defender for Cloud. +Defender for Cloud makes its security recommendations based on your chosen initiatives. When a policy from your initiative is compared against your resources and finds one or more that aren't compliant, it is presented as a recommendation in Defender for Cloud. Recommendations are actions for you to take to secure and harden your resources. Each recommendation provides you with the following information: @@ -54,16 +53,60 @@ In practice, it works like this: For example, Azure Storage accounts must restrict network access to reduce their attack surface. -1. The initiative includes multiple ***policies***, each with a requirement of a specific resource type. These policies enforce the requirements in the initiative. +1. The initiative includes multiple ***policies***, each with a requirement of a specific resource type. These policies enforce the requirements in the initiative. To continue the example, the storage requirement is enforced with the policy "Storage accounts should restrict network access using virtual network rules". 1. Microsoft Defender for Cloud continually assesses your connected subscriptions. If it finds a resource that doesn't satisfy a policy, it displays a ***recommendation*** to fix that situation and harden the security of resources that aren't meeting your security requirements. - So, for example, if an Azure Storage account on any of your protected subscriptions isn't protected with virtual network rules, you'll see the recommendation to harden those resources. + So, for example, if an Azure Storage account on any of your protected subscriptions isn't protected with virtual network rules, you'll see the recommendation to harden those resources. So, (1) an initiative includes (2) policies that generate (3) environment-specific recommendations. +### Security recommendation details + +Security recommendations contain details that help you understand its significance and how to handle it. + +:::image type="content" source="./media/security-policy-concept/recommendation-details-page.png" alt-text="Screenshot of the recommendation details page with labels for each element." lightbox="./media/security-policy-concept/recommendation-details-page.png"::: + +The recommendation details shown are: + +1. For supported recommendations, the top toolbar shows any or all of the following buttons: + - **Enforce** and **Deny** (see [Prevent misconfigurations with Enforce/Deny recommendations](prevent-misconfigurations.md)). + - **View policy definition** to go directly to the Azure Policy entry for the underlying policy. + - **Open query** - You can view the detailed information about the affected resources using Azure Resource Graph Explorer. +1. **Severity indicator** +1. **Freshness interval** +1. **Count of exempted resources** if exemptions exist for a recommendation, this shows the number of resources that have been exempted with a link to view the specific resources. +1. **Mapping to MITRE ATT&CK ® tactics and techniques** if a recommendation has defined tactics and techniques, select the icon for links to the relevant pages on MITRE's site. This applies only to Azure scored recommendations. + + :::image type="content" source="media/review-security-recommendations/tactics-window.png" alt-text="Screenshot of the MITRE tactics mapping for a recommendation."::: + +1. **Description** - A short description of the security issue. +1. When relevant, the details page also includes a table of **related recommendations**: + + The relationship types are: + + - **Prerequisite** - A recommendation that must be completed before the selected recommendation + - **Alternative** - A different recommendation, which provides another way of achieving the goals of the selected recommendation + - **Dependent** - A recommendation for which the selected recommendation is a prerequisite + + For each related recommendation, the number of unhealthy resources is shown in the "Affected resources" column. + + > [!TIP] + > If a related recommendation is grayed out, its dependency isn't yet completed and so isn't available. + +1. **Remediation steps** - A description of the manual steps required to remediate the security issue on the affected resources. For recommendations with the **Fix** option, you can select**View remediation logic** before applying the suggested fix to your resources. + +1. **Affected resources** - Your resources are grouped into tabs: + - **Healthy resources** – Relevant resources, which either aren't impacted or on which you've already remediated the issue. + - **Unhealthy resources** – Resources that are still impacted by the identified issue. + - **Not applicable resources** – Resources for which the recommendation can't give a definitive answer. The not applicable tab also includes reasons for each resource. + + :::image type="content" source="./media/review-security-recommendations/recommendations-not-applicable-reasons.png" alt-text="Screenshot of resources for which the recommendation can't give a definitive answer."::: + +1. Action buttons to remediate the recommendation or trigger a logic app. + ## Viewing the relationship between a recommendation and a policy As mentioned above, Defender for Cloud's built in recommendations are based on the Azure Security Benchmark. Almost every recommendation has an underlying policy that is derived from a requirement in the benchmark. @@ -72,13 +115,12 @@ When you're reviewing the details of a recommendation, it's often helpful to be :::image type="content" source="media/release-notes/view-policy-definition.png" alt-text="Link to Azure Policy page for the specific policy supporting a recommendation."::: -Use this link to view the policy definition and review the evaluation logic. +Use this link to view the policy definition and review the evaluation logic. If you're reviewing the list of recommendations on our [Security recommendations reference guide](recommendations-reference.md), you'll also see links to the policy definition pages: :::image type="content" source="media/release-notes/view-policy-definition-from-documentation.png" alt-text="Accessing the Azure Policy page for a specific policy directly from the Microsoft Defender for Cloud recommendations reference page."::: - ## Next steps This page explained, at a high level, the basic concepts and relationships between policies, initiatives, and recommendations. For related information, see: diff --git a/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-containers.md b/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-containers.md index 38a63060d5bd0..abc51262cf2eb 100644 --- a/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-containers.md +++ b/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-containers.md @@ -1,10 +1,8 @@ --- title: Microsoft Defender for Containers feature availability description: Learn about the availability of Microsoft Defender for Cloud containers features according to OS, machine type, and cloud deployment. -author: bmansheim -ms.author: benmansheim ms.topic: overview -ms.date: 04/28/2022 +ms.date: 06/08/2022 ms.custom: references_regions --- @@ -18,7 +16,7 @@ The **tabs** below show the features that are available, by environment, for Mic | Domain | Feature | Supported Resources | Release state [1](#footnote1) | Windows support | Agentless/Agent-based | Pricing Tier | Azure clouds availability | |--|--|--|--|--|--|--|--| -| Compliance | Docker CIS | VM, VMSS | GA | X | Log Analytics agent | Defender for Servers Plan 2 | | +| Compliance | Docker CIS | VM, VMSS | GA | X | Log Analytics agent | Defender for Servers Plan 2 | Commercial clouds

                  National clouds: Azure Government, Azure China 21Vianet | | Vulnerability Assessment | Registry scan | ACR, Private ACR | GA | ✓ (Preview) | Agentless | Defender for Containers | Commercial clouds

                  National clouds: Azure Government, Azure China 21Vianet | | Vulnerability Assessment | View vulnerabilities for running images | AKS | Preview | ✓ (Preview) | Defender profile | Defender for Containers | Commercial clouds | | Hardening | Control plane recommendations | ACR, AKS | GA | ✓ | Agentless | Free | Commercial clouds

                  National clouds: Azure Government, Azure China 21Vianet | @@ -73,7 +71,7 @@ The **tabs** below show the features that are available, by environment, for Mic | Domain | Feature | Supported Resources | Release state [1](#footnote1) | Windows support | Agentless/Agent-based | Pricing tier | |--|--| -- | -- | -- | -- | --| | Compliance | Docker CIS | Arc enabled VMs | Preview | X | Log Analytics agent | Defender for Servers Plan 2 | -| Vulnerability Assessment | Registry scan | ACR, Private ACR | Preview | ✓ (Preview) | Agentless | Defender for Containers | +| Vulnerability Assessment | Registry scan | ACR, Private ACR | GA | ✓ (Preview) | Agentless | Defender for Containers | | Vulnerability Assessment | View vulnerabilities for running images | Arc enabled K8s clusters | Preview | X | Defender extension | Defender for Containers | | Hardening | Control plane recommendations | - | - | - | - | - | | Hardening | Kubernetes data plane recommendations | Arc enabled K8s clusters | Preview | X | Azure Policy extension | Defender for Containers | diff --git a/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-servers.md b/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-servers.md index ba29a34bba08a..c354a871b5a9f 100644 --- a/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-servers.md +++ b/articles/defender-for-cloud/supported-machines-endpoint-solutions-clouds-servers.md @@ -10,7 +10,7 @@ ms.custom: references_regions The **tabs** below show the features of Microsoft Defender for Cloud that are available for Windows and Linux machines. -## Supported features for virtual machines and servers +## Supported features for virtual machines and servers ### [**Windows machines**](#tab/features-windows) diff --git a/articles/defender-for-cloud/upcoming-changes.md b/articles/defender-for-cloud/upcoming-changes.md index 192ef8cb2983c..5d2b3ffa6d27b 100644 --- a/articles/defender-for-cloud/upcoming-changes.md +++ b/articles/defender-for-cloud/upcoming-changes.md @@ -2,7 +2,7 @@ title: Important changes coming to Microsoft Defender for Cloud description: Upcoming changes to Microsoft Defender for Cloud that you might need to be aware of and for which you might need to plan ms.topic: overview -ms.date: 05/10/2022 +ms.date: 05/31/2022 --- # Important upcoming changes to Microsoft Defender for Cloud @@ -14,39 +14,48 @@ On this page, you'll learn about changes that are planned for Defender for Cloud If you're looking for the latest release notes, you'll find them in the [What's new in Microsoft Defender for Cloud](release-notes.md). - ## Planned changes | Planned change | Estimated date for change | |--|--| -| [Changes to recommendations for managing endpoint protection solutions](#changes-to-recommendations-for-managing-endpoint-protection-solutions) | May 2022 | -| [Key Vault recommendations changed to "audit"](#key-vault-recommendations-changed-to-audit) | May 2022 | +| [GA support for Arc-enabled Kubernetes clusters](#ga-support-for-arc-enabled-kubernetes-clusters) | July 2022 | +| [Changes to recommendations for managing endpoint protection solutions](#changes-to-recommendations-for-managing-endpoint-protection-solutions) | June 2022 | +| [Key Vault recommendations changed to "audit"](#key-vault-recommendations-changed-to-audit) | June 2022 | | [Multiple changes to identity recommendations](#multiple-changes-to-identity-recommendations) | June 2022 | | [Deprecating three VM alerts](#deprecating-three-vm-alerts) | June 2022| -| [Deprecating the "API App should only be accessible over HTTPS" policy](#deprecating-the-api-app-should-only-be-accessible-over-https-policy)|June 2022| +| [Deprecating the "API App should only be accessible over HTTPS" policy](#deprecating-the-api-app-should-only-be-accessible-over-https-policy)|June 2022| + +### GA support for Arc-enabled Kubernetes clusters + +**Estimated date for change:** July 2022 + +Defender for Containers is currently a preview feature for Arc-enabled Kubernetes clusters. In July, Arc-enabled Kubernetes clusters will be charged according to the listing on the [pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/). Customers that already have clusters onboarded to Arc (on the subscription level) will incur charges. ### Changes to recommendations for managing endpoint protection solutions -**Estimated date for change:** May 2022 +**Estimated date for change:** June 2022 In August 2021, we added two new **preview** recommendations to deploy and maintain the endpoint protection solutions on your machines. For full details, [see the release note](release-notes-archive.md#two-new-recommendations-for-managing-endpoint-protection-solutions-in-preview). When the recommendations are released to general availability, they will replace the following existing recommendations: - **Endpoint protection should be installed on your machines** will replace: - - [Install endpoint protection solution on virtual machines (key: 83f577bd-a1b6-b7e1-0891-12ca19d1e6df)](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/83f577bd-a1b6-b7e1-0891-12ca19d1e6df) - - [Install endpoint protection solution on your machines (key: 383cf3bc-fdf9-4a02-120a-3e7e36c6bfee)](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/383cf3bc-fdf9-4a02-120a-3e7e36c6bfee) + - [Install endpoint protection solution on virtual machines (key: 83f577bd-a1b6-b7e1-0891-12ca19d1e6df)](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/83f577bd-a1b6-b7e1-0891-12ca19d1e6df) + - [Install endpoint protection solution on your machines (key: 383cf3bc-fdf9-4a02-120a-3e7e36c6bfee)](https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/383cf3bc-fdf9-4a02-120a-3e7e36c6bfee) - **Endpoint protection health issues should be resolved on your machines** will replace the existing recommendation that has the same name. The two recommendations have different assessment keys: - - Assessment key for the **preview** recommendation: 37a3689a-818e-4a0e-82ac-b1392b9bb000 - - Assessment key for the **GA** recommendation: 3bcd234d-c9c7-c2a2-89e0-c01f419c1a8a + - Assessment key for the **preview** recommendation: 37a3689a-818e-4a0e-82ac-b1392b9bb000 + - Assessment key for the **GA** recommendation: 3bcd234d-c9c7-c2a2-89e0-c01f419c1a8a Learn more: + - [Defender for Cloud's supported endpoint protection solutions](supported-machines-endpoint-solutions-clouds-servers.md#endpoint-supported) - [How these recommendations assess the status of your deployed solutions](endpoint-protection-recommendations-technical.md) ### Key Vault recommendations changed to "audit" +**Estimated date for change:** June 2022 + The Key Vault recommendations listed here are currently disabled so that they don't impact your secure score. We will change their effect to "audit". | Recommendation name | Recommendation ID | @@ -69,7 +78,7 @@ The new release will bring the following capabilities: - **Improved freshness interval** - Currently, the identity recommendations have a freshness interval of 24 hours. This update will reduce that interval to 12 hours. -- **Account exemption capability** - Defender for Cloud has many features you can use to customize your experience and ensure that your secure score reflects your organization's security priorities. For example, you can [exempt resources and recommendations from your secure score](exempt-resource.md). +- **Account exemption capability** - Defender for Cloud has many features you can use to customize your experience and ensure that your secure score reflects your organization's security priorities. For example, you can [exempt resources and recommendations from your secure score](exempt-resource.md). This update will allow you to exempt specific accounts from evaluation with the six recommendations listed in the following table. @@ -86,23 +95,23 @@ The new release will bring the following capabilities: |External accounts with owner permissions should be removed from your subscription|c3b6ae71-f1f0-31b4-e6c1-d5951285d03d| |External accounts with read permissions should be removed from your subscription|a8c6a4ad-d51e-88fe-2979-d3ee3c864f8b| |External accounts with write permissions should be removed from your subscription|04e7147b-0deb-9796-2e5c-0336343ceb3d| -#### Recommendations rename + +#### Recommendations rename This update, will rename two recommendations, and revise their descriptions. The assessment keys will remain unchanged. -| Property | Current value | New update's change | -|--|--|--| -|**First recommendation**| - | - | -|Assessment key | e52064aa-6853-e252-a11e-dffc675689c2 | No change | -| Name | [Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/e52064aa-6853-e252-a11e-dffc675689c2) | Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions. | -| Description | User accounts that have been blocked from signing in, should be removed from your subscriptions. -These accounts can be targets for attackers looking to find ways to access your data without being noticed. | User accounts that have been blocked from signing into Active Directory, should be removed from your subscriptions. These accounts can be targets for attackers looking to find ways to access your data without being noticed.
                  Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md). | -| Related policy | [Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2febb62a0c-3560-49e1-89ed-27e074e9f8ad) | Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions. | -|**Second recommendation**| - | - | -| Assessment key | 00c6d40b-e990-6acf-d4f3-471e747a27c4 | No change | -| Name | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/00c6d40b-e990-6acf-d4f3-471e747a27c4) | Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions. | -| Description | User accounts that have been blocked from signing in, should be removed from your subscriptions.
                  These accounts can be targets for attackers looking to find ways to access your data without being noticed. | User accounts that have been blocked from signing into Active Directory, should be removed from your subscriptions. These accounts can be targets for attackers looking to find ways to access your data without being noticed.
                  Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md). | -| Related policy | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f6b1cbf55-e8b6-442f-ba4c-7246b6381474) | Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions. | + | Property | Current value | New update's change | + |----|----|----| + |**First recommendation**| - | - | + |Assessment key | e52064aa-6853-e252-a11e-dffc675689c2 | No change| + | Name | [Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/e52064aa-6853-e252-a11e-dffc675689c2) |Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions.| + |Description| User accounts that have been blocked from signing in, should be removed from your subscriptions.|These accounts can be targets for attackers looking to find ways to access your data without being noticed.
                  Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md).| + |Related policy|[Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2febb62a0c-3560-49e1-89ed-27e074e9f8ad) | Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions.| + |**Second recommendation**| - | - | + | Assessment key | 00c6d40b-e990-6acf-d4f3-471e747a27c4 | No change | + | Name | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/00c6d40b-e990-6acf-d4f3-471e747a27c4)|Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions.| +|Description|User accounts that have been blocked from signing in, should be removed from your subscriptions.
                  These accounts can be targets for attackers looking to find ways to access your data without being noticed.|User accounts that have been blocked from signing into Active Directory, should be removed from your subscriptions.
                  Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md).| + | Related policy | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f6b1cbf55-e8b6-442f-ba4c-7246b6381474) | Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions. | ### Deprecating three VM alerts @@ -114,7 +123,7 @@ The following table lists the alerts that will be deprecated during June 2022. |--|--|--|--| | **Docker build operation detected on a Kubernetes node**
                  (VM_ImageBuildOnNode) | Machine logs indicate a build operation of a container image on a Kubernetes node. While this behavior might be legitimate, attackers might build their malicious images locally to avoid detection. | Defense Evasion | Low | | **Suspicious request to Kubernetes API**
                  (VM_KubernetesAPI) | Machine logs indicate that a suspicious request was made to the Kubernetes API. The request was sent from a Kubernetes node, possibly from one of the containers running in the node. Although this behavior can be intentional, it might indicate that the node is running a compromised container. | LateralMovement | Medium | -| **SSH server is running inside a container**
                  (VM_ContainerSSH) | Machine logs indicate that an SSH server is running inside a Docker container. While this behavior can be intentional, it frequently indicates that a container is misconfigured or breached. | Execution | Medium | +| **SSH server is running inside a container**
                  (VM_ContainerSSH) | Machine logs indicate that an SSH server is running inside a Docker container. While this behavior can be intentional, it frequently indicates that a container is misconfigured or breached. | Execution | Medium | These alerts are used to notify a user about suspicious activity connected to a Kubernetes cluster. The alerts will be replaced with matching alerts that are part of the Microsoft Defender for Cloud Container alerts (`K8S.NODE_ImageBuildOnNode`, `K8S.NODE_ KubernetesAPI` and `K8S.NODE_ ContainerSSH`) which will provide improved fidelity and comprehensive context to investigate and act on the alerts. Learn more about alerts for [Kubernetes Clusters](alerts-reference.md). @@ -122,7 +131,7 @@ These alerts are used to notify a user about suspicious activity connected to a **Estimated date for change:** June 2022 -The policy `API App should only be accessible over HTTPS` is set to be deprecated. This policy will be replaced with `Web Application should only be accessible over HTTPS`, which will be renamed to `App Service apps should only be accessible over HTTPS`. +The policy `API App should only be accessible over HTTPS` is set to be deprecated. This policy will be replaced with `Web Application should only be accessible over HTTPS`, which will be renamed to `App Service apps should only be accessible over HTTPS`. To learn more about policy definitions for Azure App Service, see [Azure Policy built-in definitions for Azure App Service](../azure-app-configuration/policy-reference.md) diff --git a/articles/defender-for-iot/device-builders/agent-based-recommendations.md b/articles/defender-for-iot/device-builders/agent-based-recommendations.md index b22af7c1d1fdf..c1e19c24d166d 100644 --- a/articles/defender-for-iot/device-builders/agent-based-recommendations.md +++ b/articles/defender-for-iot/device-builders/agent-based-recommendations.md @@ -10,7 +10,7 @@ ms.date: 03/28/2022 Defender for IoT scans your Azure resources and IoT devices and provides security recommendations to reduce your attack surface. Security recommendations are actionable and aim to aid customers in complying with security best practices. -In this article, you will find a list of recommendations, which can be triggered on your IoT devices. +In this article, you'll find a list of recommendations, which can be triggered on your IoT devices. ## Agent based recommendations @@ -31,7 +31,7 @@ Operational recommendations provide insights and suggestions to improve security | Severity | Name | Data Source | Description | |--|--|--|--| | Low | Agent sends unutilized messages | Legacy Defender-IoT-micro-agent | 10% or more of security messages were smaller than 4 KB during the last 24 hours. | -| Low | Security twin configuration not optimal | Legacy Defender-IoT-micro-agent | Security twin configuration is not optimal. | +| Low | Security twin configuration not optimal | Legacy Defender-IoT-micro-agent | Security twin configuration isn't optimal. | | Low | Security twin configuration conflict | Legacy Defender-IoT-micro-agent | Conflicts were identified in the security twin configuration. | ## Next steps diff --git a/articles/defender-for-iot/device-builders/concept-agent-based-security-alerts.md b/articles/defender-for-iot/device-builders/concept-agent-based-security-alerts.md index bf11040082be2..b9acfa0ee0f71 100644 --- a/articles/defender-for-iot/device-builders/concept-agent-based-security-alerts.md +++ b/articles/defender-for-iot/device-builders/concept-agent-based-security-alerts.md @@ -11,7 +11,7 @@ Defender for IoT continuously analyzes your IoT solution using advanced analytic In addition, you can create custom alerts based on your knowledge of expected device behavior. An alert acts as an indicator of potential compromise, and should be investigated and remediated. -In this article, you will find a list of built-in alerts, which can be triggered on your IoT devices. +In this article, you'll find a list of built-in alerts, which can be triggered on your IoT devices. In addition to built-in alerts, Defender for IoT allows you to define custom alerts based on expected IoT Hub and/or device behavior. For more information, see [customizable alerts](concept-customizable-security-alerts.md). @@ -27,7 +27,7 @@ For more information, see [customizable alerts](concept-customizable-security-al | Port forwarding detection | High | Defender-IoT-micro-agent | Initiation of port forwarding to an external IP address detected. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_PortForwarding | | Possible attempt to disable Auditd logging detected | High | Defender-IoT-micro-agent | Linux Auditd system provides a way to track security-relevant information on the system. The system records as much information about the events that are happening on your system as possible. This information is crucial for mission-critical environments to determine who violated the security policy and the actions they performed. Disabling Auditd logging may prevent your ability to discover violations of security policies used on the system. | Check with the device owner if this was legitimate activity with business reasons. If not, this event may be hiding activity by malicious actors. Immediately escalated the incident to your information security team. | IoT_DisableAuditdLogging | | Reverse shells | High | Defender-IoT-micro-agent | Analysis of host data on a device detected a potential reverse shell. Reverse shells are often used to get a compromised machine to call back into a machine controlled by a malicious actor. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_ReverseShell | -| Successful local login | High | Defender-IoT-micro-agent | Successful local sign in to the device detected | Make sure the signed in user is an authorized party. | IoT_SucessfulLocalLogin | +| Successful local login | High | Defender-IoT-micro-agent | Successful local sign-in to the device detected | Make sure the signed in user is an authorized party. | IoT_SucessfulLocalLogin | | Web shell | High | Defender-IoT-micro-agent | Possible web shell detected. Malicious actors commonly upload a web shell to a compromised machine to gain persistence or for further exploitation. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_WebShell | | Behavior similar to ransomware detected | High | Defender-IoT-micro-agent | Execution of files similar to known ransomware that may prevent users from accessing their system, or personal files, and may demand ransom payment to regain access. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_Ransomware | | Crypto coin miner image | High | Defender-IoT-micro-agent | Execution of a process normally associated with digital currency mining detected. | Verify with the user that ran the command if this was legitimate activity on the device. If not, escalate the alert to the information security team. | IoT_CryptoMiner | @@ -37,8 +37,8 @@ For more information, see [customizable alerts](concept-customizable-security-al | Name | Severity | Data Source | Description | Suggested remediation steps | Alert type | |--|--|--|--|--|--| | Behavior similar to common Linux bots detected | Medium | Defender-IoT-micro-agent | Execution of a process normally associated with common Linux botnets detected. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_CommonBots | -| Behavior similar to Fairware ransomware detected | Medium | Defender-IoT-micro-agent | Execution of rm -rf commands applied to suspicious locations detected using analysis of host data. Because rm -rf recursively deletes files, it is normally only used on discrete folders. In this case, it is being used in a location that could remove a large amount of data. Fairware ransomware is known to execute rm -rf commands in this folder. | Review with the user that ran the command this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_FairwareMalware | -| Crypto coin miner container image detected | Medium | Defender-IoT-micro-agent | Container detecting running known digital currency mining images. | 1. If this behavior is not intended, delete the relevant container image.
                  2. Make sure that the Docker daemon is not accessible via an unsafe TCP socket.
                  3. Escalate the alert to the information security team. | IoT_CryptoMinerContainer | +| Behavior similar to Fairware ransomware detected | Medium | Defender-IoT-micro-agent | Execution of rm -rf commands applied to suspicious locations detected using analysis of host data. Because rm -rf recursively deletes files, it's normally only used on discrete folders. In this case, it's being used in a location that could remove a large amount of data. Fairware ransomware is known to execute rm -rf commands in this folder. | Review with the user that ran the command this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_FairwareMalware | +| Crypto coin miner container image detected | Medium | Defender-IoT-micro-agent | Container detecting running known digital currency mining images. | 1. If this behavior isn't intended, delete the relevant container image.
                  2. Make sure that the Docker daemon isn't accessible via an unsafe TCP socket.
                  3. Escalate the alert to the information security team. | IoT_CryptoMinerContainer | | Detected suspicious use of the nohup command | Medium | Defender-IoT-micro-agent | Suspicious use of the nohup command on host detected. Malicious actors commonly run the nohup command from a temporary directory, effectively allowing their executables to run in the background. Seeing this command run on files located in a temporary directory is not expected or usual behavior. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_SuspiciousNohup | | Detected suspicious use of the useradd command | Medium | Defender-IoT-micro-agent | Suspicious use of the useradd command detected on the device. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_SuspiciousUseradd | | Exposed Docker daemon by TCP socket | Medium | Defender-IoT-micro-agent | Machine logs indicate that your Docker daemon (dockerd) exposes a TCP socket. By default, Docker configuration, does not use encryption or authentication when a TCP socket is enabled. Default Docker configuration enables full access to the Docker daemon, by anyone with access to the relevant port. | Review with the user that ran the command if this was legitimate activity that you expect to see on the device. If not, escalate the alert to the information security team. | IoT_ExposedDocker | diff --git a/articles/defender-for-iot/device-builders/concept-baseline.md b/articles/defender-for-iot/device-builders/concept-baseline.md index 39340dddad1ee..2cbd559d840c5 100644 --- a/articles/defender-for-iot/device-builders/concept-baseline.md +++ b/articles/defender-for-iot/device-builders/concept-baseline.md @@ -21,13 +21,13 @@ Baseline custom checks establish a custom list of checks for each device baselin 1. In your IoT Hub, locate and select the device you wish to change. -1. Click on the device, and then click the **azureiotsecurity** module. +1. Select on the device, and then select the **azureiotsecurity** module. -1. Click **Module Identity Twin**. +1. Select **Module Identity Twin**. 1. Upload the **baseline custom checks** file to the device. -1. Add baseline properties to the Defender-IoT-micro-agent and click **Save**. +1. Add baseline properties to the Defender-IoT-micro-agent and select **Save**. ### Baseline custom check file example diff --git a/articles/defender-for-iot/device-builders/concept-customizable-security-alerts.md b/articles/defender-for-iot/device-builders/concept-customizable-security-alerts.md index c3173fcf490d4..2663f38157721 100644 --- a/articles/defender-for-iot/device-builders/concept-customizable-security-alerts.md +++ b/articles/defender-for-iot/device-builders/concept-customizable-security-alerts.md @@ -22,17 +22,17 @@ The following lists of Defender for IoT alerts are definable by you based on you | Custom alert - The number of cloud to device messages in AMQP protocol is outside the allowed range | Low | IoT Hub | The number of cloud to device messages (AMQP protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_AmqpC2DMessagesNotInAllowedRange | | Custom alert - The number of rejected cloud to device messages in AMQP protocol is outside the allowed range | Low | IoT Hub | The number of cloud to device messages (AMQP protocol) rejected by the device, within a specific time window is outside the currently configured and allowable range. | IoT_CA_AmqpC2DRejectedMessagesNotInAllowedRange | | Custom alert - The number of device to cloud messages in AMQP protocol is outside the allowed range | Low | IoT Hub | The amount of device to cloud messages (AMQP protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_AmqpD2CMessagesNotInAllowedRange | -| Custom alert - The number of direct method invokes is outside the allowed range | Low | IoT Hub | The amount of direct method invokes within a specific time window is outside the currently configured and allowable range. | IoT_CA_DirectMethodInvokesNotInAllowedRange | +| Custom alert - The number of direct method invokes are outside the allowed range | Low | IoT Hub | The amount of direct method invokes within a specific time window is outside the currently configured and allowable range. | IoT_CA_DirectMethodInvokesNotInAllowedRange | | Custom alert - The number of file uploads is outside the allowed range | Low | IoT Hub | The amount of file uploads within a specific time window is outside the currently configured and allowable range. | IoT_CA_FileUploadsNotInAllowedRange | -| Custom alert - The number of cloud to device messages in HTTP protocol is outside the allowed range | Low | IoT Hub | The amount of cloud to device messages (HTTP protocol) in a time window is not in the configured allowed range | IoT_CA_HttpC2DMessagesNotInAllowedRange | -| Custom alert - The number of rejected cloud to device messages in HTTP protocol is not in the allowed range | Low | IoT Hub | The amount of cloud to device messages (HTTP protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_HttpC2DRejectedMessagesNotInAllowedRange | +| Custom alert - The number of cloud to device messages in HTTP protocol is outside the allowed range | Low | IoT Hub | The amount of cloud to device messages (HTTP protocol) in a time window isn't in the configured allowed range | IoT_CA_HttpC2DMessagesNotInAllowedRange | +| Custom alert - The number of rejected cloud to device messages in HTTP protocol isn't in the allowed range | Low | IoT Hub | The amount of cloud to device messages (HTTP protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_HttpC2DRejectedMessagesNotInAllowedRange | | Custom alert - The number of device to cloud messages in HTTP protocol is outside the allowed range | Low | IoT Hub | The amount of device to cloud messages (HTTP protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_HttpD2CMessagesNotInAllowedRange | | Custom alert - The number of cloud to device messages in MQTT protocol is outside the allowed range | Low | IoT Hub | The amount of cloud to device messages (MQTT protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_MqttC2DMessagesNotInAllowedRange | | Custom alert - The number of rejected cloud to device messages in MQTT protocol is outside the allowed range | Low | IoT Hub | The amount of cloud to device messages (MQTT protocol) rejected by the device within a specific time window is outside the currently configured and allowable range. | IoT_CA_MqttC2DRejectedMessagesNotInAllowedRange | | Custom alert - The number of device to cloud messages in MQTT protocol is outside the allowed range | Low | IoT Hub | The amount of device to cloud messages (MQTT protocol) within a specific time window is outside the currently configured and allowable range. | IoT_CA_MqttD2CMessagesNotInAllowedRange | | Custom alert - The number of command queue purges that are outside of the allowed range | Low | IoT Hub | The amount of command queue purges within a specific time window is outside the currently configured and allowable range. | IoT_CA_QueuePurgesNotInAllowedRange | -| Custom alert - The number of module twin updates is outside the allowed range | Low | IoT Hub | The amount of module twin updates within a specific time window is outside the currently configured and allowable range. | IoT_CA_TwinUpdatesNotInAllowedRange | -| Custom alert - The number of unauthorized operations is outside the allowed range | Low | IoT Hub | The amount of unauthorized operations within a specific time window is outside the currently configured and allowable range. | IoT_CA_UnauthorizedOperationsNotInAllowedRange | +| Custom alert - The number of module twin updates is outside the allowed range | Low | IoT Hub | The number of module twin updates within a specific time window is outside the currently configured and allowable range. | IoT_CA_TwinUpdatesNotInAllowedRange | +| Custom alert - The number of unauthorized operations is outside the allowed range | Low | IoT Hub | The number of unauthorized operations within a specific time window is outside the currently configured and allowable range. | IoT_CA_UnauthorizedOperationsNotInAllowedRange | ## Next steps diff --git a/articles/defender-for-iot/device-builders/concept-data-processing.md b/articles/defender-for-iot/device-builders/concept-data-processing.md index 32c05dfa163e1..5ac5083709ed0 100644 --- a/articles/defender-for-iot/device-builders/concept-data-processing.md +++ b/articles/defender-for-iot/device-builders/concept-data-processing.md @@ -7,7 +7,7 @@ ms.topic: conceptual # Data processing and residency -Microsoft Defender for IoT is a separate service which adds an extra layer of threat protection to the Azure IoT Hub, IoT Edge, and your devices. Defender for IoT may process, and store your data within a different geographic location than your IoT Hub. +Microsoft Defender for IoT is a separate service, which adds an extra layer of threat protection to the Azure IoT Hub, IoT Edge, and your devices. Defender for IoT may process, and store your data within a different geographic location than your IoT Hub. Mapping between the IoT Hub, and Microsoft Defender for IoT's regions is as follows: diff --git a/articles/defender-for-iot/device-builders/concept-event-aggregation.md b/articles/defender-for-iot/device-builders/concept-event-aggregation.md index 04b7ab04fd05a..685d5676c85f0 100644 --- a/articles/defender-for-iot/device-builders/concept-event-aggregation.md +++ b/articles/defender-for-iot/device-builders/concept-event-aggregation.md @@ -105,7 +105,7 @@ The data collected for each event is: | **os_version** | The version of the operating system. For example, `Windows 10`, or `Ubuntu 20.04.1`. | | **os_platform** | The OS of the device. | | **os_arch** | The architecture of the OS. For example, `x86_64`. | -| **nics** | The network interface controller. The full list of properties are listed below. | +| **nics** | The network interface controller. The full list of properties is listed below. | The **nics** properties are composed of the following; diff --git a/articles/defender-for-iot/device-builders/concept-micro-agent-configuration.md b/articles/defender-for-iot/device-builders/concept-micro-agent-configuration.md index 84473704c26d2..265c22a06d2d6 100644 --- a/articles/defender-for-iot/device-builders/concept-micro-agent-configuration.md +++ b/articles/defender-for-iot/device-builders/concept-micro-agent-configuration.md @@ -53,7 +53,7 @@ These configurations include process, and network activity collectors. |--|--|--|--| | **Interval** | `High`
                  `Medium`
                  `Low` | Determines the sending frequency. | `Medium` | | **Aggregation mode** | `True`
                  `False` | Determines whether to process event aggregation for an identical event. | `True` | -| **Cache size** | cycle FIFO | Defines the number of events collected in between the the times that data is sent. | `256` | +| **Cache size** | cycle FIFO | Defines the number of events collected in between the times that data is sent. | `256` | | **Disable collector** | `True`
                  `False` | Determines whether or not the collector is operational. | `False` | | | | | | @@ -67,7 +67,7 @@ These configurations include process, and network activity collectors. | Setting Name | Setting options | Description | Default | |--|--|--|--| -| **Devices** | A list of the network devices separated by a comma.

                  For example `eth0,eth1` | Defines the list of network devices (interfaces) that the agent will use to monitor the traffic.

                  If a network device is not listed, the Network Raw events will not be recorded for the missing device.| `eth0` | +| **Devices** | A list of the network devices separated by a comma.

                  For example `eth0,eth1` | Defines the list of network devices (interfaces) that the agent will use to monitor the traffic.

                  If a network device isn't listed, the Network Raw events won't be recorded for the missing device.| `eth0` | | | | | | ## Process collector specific-settings diff --git a/articles/defender-for-iot/device-builders/concept-recommendations.md b/articles/defender-for-iot/device-builders/concept-recommendations.md index 2768baa1ef8c0..6ca7ce4bef515 100644 --- a/articles/defender-for-iot/device-builders/concept-recommendations.md +++ b/articles/defender-for-iot/device-builders/concept-recommendations.md @@ -1,6 +1,6 @@ --- title: Security recommendations for IoT Hub -description: Learn about the concept of security recommendations and how they are used in the Defender for IoT Hub. +description: Learn about the concept of security recommendations and how they're used in the Defender for IoT Hub. ms.topic: conceptual ms.date: 11/09/2021 --- diff --git a/articles/defender-for-iot/device-builders/concept-security-module.md b/articles/defender-for-iot/device-builders/concept-security-module.md index 8610689e45fe6..f4c81649bd3eb 100644 --- a/articles/defender-for-iot/device-builders/concept-security-module.md +++ b/articles/defender-for-iot/device-builders/concept-security-module.md @@ -1,6 +1,6 @@ --- title: Defender-IoT-micro-agent and device twins -description: Learn about the concept of Defender-IoT-micro-agent twins and how they are used in Defender for IoT. +description: Learn about the concept of Defender-IoT-micro-agent twins and how they're used in Defender for IoT. ms.topic: conceptual ms.date: 03/28/2022 --- diff --git a/articles/defender-for-iot/device-builders/configure-pam-to-audit-sign-in-events.md b/articles/defender-for-iot/device-builders/configure-pam-to-audit-sign-in-events.md index 70f7e499fd4cf..01a7c69f92926 100644 --- a/articles/defender-for-iot/device-builders/configure-pam-to-audit-sign-in-events.md +++ b/articles/defender-for-iot/device-builders/configure-pam-to-audit-sign-in-events.md @@ -1,6 +1,6 @@ --- title: Configure Pluggable Authentication Modules (PAM) to audit sign-in events (Preview) -description: Learn how to configure Pluggable Authentication Modules (PAM) to audit sign-in events when syslog is not configured for your device. +description: Learn how to configure Pluggable Authentication Modules (PAM) to audit sign-in events when syslog isn't configured for your device. ms.date: 02/20/2022 ms.topic: how-to --- diff --git a/articles/defender-for-iot/device-builders/how-to-agent-configuration.md b/articles/defender-for-iot/device-builders/how-to-agent-configuration.md index 9de9cdac2a9cb..7b4044ea87403 100644 --- a/articles/defender-for-iot/device-builders/how-to-agent-configuration.md +++ b/articles/defender-for-iot/device-builders/how-to-agent-configuration.md @@ -41,9 +41,9 @@ If the agent configuration object does not exist in the **azureiotsecurity** mod ## Configuration schema and validation -Make sure to validate your agent configuration against this [schema](https://aka.ms/iot-security-github-module-schema). An agent will not launch if the configuration object does not match the schema. +Make sure to validate your agent configuration against this [schema](https://aka.ms/iot-security-github-module-schema). An agent will not launch if the configuration object doesn't match the schema. -If, while the agent is running, the configuration object is changed to a non-valid configuration (the configuration does not match the schema), the agent will ignore the invalid configuration and will continue using the current configuration. +If, while the agent is running, the configuration object is changed to a non-valid configuration (the configuration doesn't match the schema), the agent will ignore the invalid configuration and will continue using the current configuration. ### Configuration validation diff --git a/articles/defender-for-iot/device-builders/how-to-azure-rtos-security-module.md b/articles/defender-for-iot/device-builders/how-to-azure-rtos-security-module.md index c4d043c662922..b4f9d27d2c162 100644 --- a/articles/defender-for-iot/device-builders/how-to-azure-rtos-security-module.md +++ b/articles/defender-for-iot/device-builders/how-to-azure-rtos-security-module.md @@ -30,7 +30,7 @@ The default behavior of each configuration is provided in the following tables: | ASC_SECURITY_MODULE_ID | String | defender-iot-micro-agent | The unique identifier of the device. | | SECURITY_MODULE_VERSION_(MAJOR)(MINOR)(PATCH) | Number | 3.2.1 | The version. | | ASC_SECURITY_MODULE_SEND_MESSAGE_RETRY_TIME | Number | 3 | The amount of time the Defender-IoT-micro-agent will take to send the security message after a fail. (in seconds) | -| ASC_SECURITY_MODULE_PENDING_TIME | Number | 300 | The Defender-IoT-micro-agent pending time (in seconds). The state will change to suspend, if the time is exceeded.. | +| ASC_SECURITY_MODULE_PENDING_TIME | Number | 300 | The Defender-IoT-micro-agent pending time (in seconds). The state will change to suspend, if the time is exceeded. | ## Collection diff --git a/articles/defender-for-iot/device-builders/how-to-deploy-linux-c.md b/articles/defender-for-iot/device-builders/how-to-deploy-linux-c.md index 18914895450a2..56dffc8b98c1b 100644 --- a/articles/defender-for-iot/device-builders/how-to-deploy-linux-c.md +++ b/articles/defender-for-iot/device-builders/how-to-deploy-linux-c.md @@ -48,7 +48,7 @@ This script performs the following function: 1. Installs prerequisites. -1. Adds a service user (with interactive sign in disabled). +1. Adds a service user (with interactive sign-in disabled). 1. Installs the agent as a **Daemon** - assumes the device uses **systemd** for service management. diff --git a/articles/defender-for-iot/device-builders/how-to-deploy-linux-cs.md b/articles/defender-for-iot/device-builders/how-to-deploy-linux-cs.md index b2c1d55994799..24c87fdd44bc9 100644 --- a/articles/defender-for-iot/device-builders/how-to-deploy-linux-cs.md +++ b/articles/defender-for-iot/device-builders/how-to-deploy-linux-cs.md @@ -46,7 +46,7 @@ This script performs the following actions: - Installs prerequisites. -- Adds a service user (with interactive sign in disabled). +- Adds a service user (with interactive sign-in disabled). - Installs the agent as a **Daemon** - assumes the device uses **systemd** for legacy deployment model. diff --git a/articles/defender-for-iot/device-builders/how-to-install-micro-agent-for-edge.md b/articles/defender-for-iot/device-builders/how-to-install-micro-agent-for-edge.md index fcb19f76aa539..4421b0bf6641d 100644 --- a/articles/defender-for-iot/device-builders/how-to-install-micro-agent-for-edge.md +++ b/articles/defender-for-iot/device-builders/how-to-install-micro-agent-for-edge.md @@ -67,7 +67,7 @@ This article explains how to install, and authenticate the Defender micro agent systemctl status defender-iot-micro-agent.service ``` - 1. Ensure that the service is stable by making sure it is `active` and that the uptime of the process is appropriate + 1. Ensure that the service is stable by making sure it's `active` and that the uptime of the process is appropriate :::image type="content" source="media/quickstart-standalone-agent-binary-installation/active-running.png" alt-text="Check to make sure your service is stable and active."::: diff --git a/articles/defender-for-iot/device-builders/how-to-manage-device-inventory-on-the-cloud.md b/articles/defender-for-iot/device-builders/how-to-manage-device-inventory-on-the-cloud.md index 88269eae10058..54f082b6dac0f 100644 --- a/articles/defender-for-iot/device-builders/how-to-manage-device-inventory-on-the-cloud.md +++ b/articles/defender-for-iot/device-builders/how-to-manage-device-inventory-on-the-cloud.md @@ -13,7 +13,7 @@ The device inventory can be used to view device systems, and network information Some of the benefits of the device inventory include: -- Identify all IOT, and OT devices from different inputs. For example, allowing you to understand which devices in your environment are not communicating, and will require troubleshooting. +- Identify all IOT, and OT devices from different inputs. For example, allowing you to understand which devices in your environment aren't communicating, and will require troubleshooting. - Group, and filter devices by site, type, or vendor. @@ -99,7 +99,7 @@ For a list of filters that can be applied to the device inventory table, see the 1. Select the **Apply button**. -Multiple filters can be applied at one time. The filters are not saved when you leave the Device inventory page. +Multiple filters can be applied at one time. The filters aren't saved when you leave the Device inventory page. ## View device information @@ -115,7 +115,7 @@ Select the :::image type="icon" source="media/how-to-manage-device-inventory-on- ## How to identify devices that have not recently communicated with the Azure cloud -If you are under the impression that certain devices are not actively communicating, there is a way to check, and see which devices have not communicated in a specified time period. +If you are under the impression that certain devices are not actively communicating, there's a way to check, and see which devices have not communicated in a specified time period. **To identify all devices that have not communicated recently**: diff --git a/articles/defender-for-iot/device-builders/how-to-region-move.md b/articles/defender-for-iot/device-builders/how-to-region-move.md index 56a8257b5c671..0b5d66d9bad81 100644 --- a/articles/defender-for-iot/device-builders/how-to-region-move.md +++ b/articles/defender-for-iot/device-builders/how-to-region-move.md @@ -24,7 +24,7 @@ You can move a Microsoft Defender for IoT "iotsecuritysolutions" resource to a d ## Prepare -In this section, you will prepare to move the resource for the move by finding the resource and confirming it is in a region you wish to move from. +In this section, you'll prepare to move the resource for the move by finding the resource and confirming it is in a region you wish to move from. Before transitioning the resource to the new region, we recommended using [log analytics](../../azure-monitor/logs/quick-create-workspace.md) to store alerts, and raw events. @@ -44,19 +44,19 @@ Before transitioning the resource to the new region, we recommended using [log a 1. Select your hub from the list. -1. Ensure that you have selected the correct hub, and that it is in the region you want to move it from. +1. Ensure that you've selected the correct hub, and that it is in the region you want to move it from. :::image type="content" source="media/region-move/location.png" alt-text="Screenshot showing you the region your hub is located in."::: ## Move -You are now ready to move your resource to your new location. Follow [these instructions](../../iot-hub/iot-hub-how-to-clone.md) to move your IoT Hub. +You're now ready to move your resource to your new location. Follow [these instructions](../../iot-hub/iot-hub-how-to-clone.md) to move your IoT Hub. After transferring, and enabling the resource, you can link to the same log analytics workspace that was configured earlier. ## Verify -In this section, you will verify that the resource has been moved, that the connection to the IoT Hub has been enabled, and that everything is working correctly. +In this section, you'll verify that the resource has been moved, that the connection to the IoT Hub has been enabled, and that everything is working correctly. **To verify the resource in in the correct region**: diff --git a/articles/defender-for-iot/device-builders/how-to-security-data-access.md b/articles/defender-for-iot/device-builders/how-to-security-data-access.md index 051546c9a37f8..cc942345c2c3b 100644 --- a/articles/defender-for-iot/device-builders/how-to-security-data-access.md +++ b/articles/defender-for-iot/device-builders/how-to-security-data-access.md @@ -18,13 +18,13 @@ Defender for IoT stores security alerts, recommendations, and raw security data To configure which Log Analytics workspace is used: 1. Open your IoT hub. -1. Click the **Settings** blade under the **Security** section. -1. Click **Data Collection**, and change your Log Analytics workspace configuration. +1. Select the **Settings** blade under the **Security** section. +1. Select **Data Collection**, and change your Log Analytics workspace configuration. To access your alerts and recommendations in your Log Analytics workspace after configuration: 1. Choose an alert or recommendation in Defender for IoT. -1. Click **further investigation**, then click **To see which devices have this alert click here and view the DeviceId column**. +1. Select **further investigation**, then select **To see which devices have this alert click here and view the DeviceId column**. For details on querying data from Log Analytics, see [Get started with log queries in Azure Monitor](../../azure-monitor/logs/get-started-queries.md). diff --git a/articles/defender-for-iot/device-builders/quickstart-onboard-iot-hub.md b/articles/defender-for-iot/device-builders/quickstart-onboard-iot-hub.md index 1016ee7a721bf..731f0a0f6983b 100644 --- a/articles/defender-for-iot/device-builders/quickstart-onboard-iot-hub.md +++ b/articles/defender-for-iot/device-builders/quickstart-onboard-iot-hub.md @@ -47,7 +47,7 @@ You can onboard Defender for IoT to an existing IoT Hub, where you can then moni :::image type="content" source="media/quickstart-onboard-iot-hub/secure-your-iot-solution.png" alt-text="Select the secure your IoT solution button to secure your solution." lightbox="media/quickstart-onboard-iot-hub/secure-your-iot-solution-expanded.png"::: -The **Secure your IoT solution** button will only appear if the IoT Hub has not already been onboarded, or if you set the Defender for IoT toggle to **Off** while onboarding. +The **Secure your IoT solution** button will only appear if the IoT Hub hasn't already been onboarded, or if you set the Defender for IoT toggle to **Off** while onboarding. :::image type="content" source="media/quickstart-onboard-iot-hub/toggle-is-off.png" alt-text="If your toggle was set to off during onboarding."::: diff --git a/articles/defender-for-iot/device-builders/references-defender-for-iot-glossary.md b/articles/defender-for-iot/device-builders/references-defender-for-iot-glossary.md index dc5fbd10375ed..7c4b1877d2d12 100644 --- a/articles/defender-for-iot/device-builders/references-defender-for-iot-glossary.md +++ b/articles/defender-for-iot/device-builders/references-defender-for-iot-glossary.md @@ -23,7 +23,7 @@ This glossary provides a brief description of important terms and concepts for t |--|--|--| | **Device twins** | Device twins are JSON documents that store device state information including metadata, configurations, and conditions. | [Module Twin](#m)

                  [Defender-IoT-micro-agent twin](#s) | | **Defender-IoT-micro-agent twin** `(DB)` | The Defender-IoT-micro-agent twin holds all of the information that is relevant to device security, for each specific device in your solution. | [Device twin](#d)

                  [Module Twin](#m) | -| **Device inventory** | Defender for IoT identifies, and classifies devices as a single unique network device in the inventory for:

                  - Standalone IT, OT, and IoT devices with 1 or multiple NICs.

                  - Devices composed of multiple backplane components. This includes all racks, slots, and modules.

                  - Devices that act as network infrastructure. For example, switches, and routers with multiple NICs.

                  - Public internet IP addresses, multicast groups, and broadcast groups are not considered inventory devices.

                  Devices that have been inactive for more than 60 days are classified as inactive Inventory devices.| +| **Device inventory** | Defender for IoT identifies, and classifies devices as a single unique network device in the inventory for:

                  - Standalone IT, OT, and IoT devices with 1 or multiple NICs.

                  - Devices composed of multiple backplane components. This includes all racks, slots, and modules.

                  - Devices that act as network infrastructure. For example, switches, and routers with multiple NICs.

                  - Public internet IP addresses, multicast groups, and broadcast groups aren't considered inventory devices.

                  Devices that have been inactive for more than 60 days are classified as inactive Inventory devices.| ## E diff --git a/articles/defender-for-iot/device-builders/release-notes.md b/articles/defender-for-iot/device-builders/release-notes.md index 18d41a7a6ed02..4dbeec1b47f47 100644 --- a/articles/defender-for-iot/device-builders/release-notes.md +++ b/articles/defender-for-iot/device-builders/release-notes.md @@ -33,7 +33,7 @@ Listed below are the support, breaking change policies for Defender for IoT, and - **CIS benchmarks**: The micro agent now supports recommendations based on CIS Distribution Independent Linux Benchmarks, version 2.0.0, and the ability to disable specific CIS Benchmark checks or groups using twin configurations. For more information, see [Micro agent configurations (Preview)](concept-micro-agent-configuration.md). -- **Micro agent supported devices list expands**: The micro agent now supports Debian 11 AMD64 and ARM32v7 devices, as well as Ubuntu Server 18.04 ARM32 Linux devices & Ubuntu Server 20.04 ARM32 & ARM64 Linux devices. +- **Micro agent supported devices list expands**: The micro agent now supports Debian 11 AMD64 and ARM32v7 devices, and Ubuntu Server 18.04 ARM32 Linux devices & Ubuntu Server 20.04 ARM32 & ARM64 Linux devices. For more information, see [Agent portfolio overview and OS support (Preview)](concept-agent-portfolio-overview-os-support.md). @@ -50,7 +50,7 @@ Listed below are the support, breaking change policies for Defender for IoT, and - DNS network activity on managed devices is now supported. Microsoft threat intelligence security graph can now detect suspicious activity based on DNS traffic. -- [Leaf device proxying](../../iot-edge/how-to-connect-downstream-iot-edge-device.md#integrate-microsoft-defender-for-iot-with-iot-edge-gateway): There is now an enhanced integration with IoT Edge. This integration enhances the connectivity between the agent, and the cloud using leaf device proxying. +- [Leaf device proxying](../../iot-edge/how-to-connect-downstream-iot-edge-device.md#integrate-microsoft-defender-for-iot-with-iot-edge-gateway): There's now an enhanced integration with IoT Edge. This integration enhances the connectivity between the agent, and the cloud using leaf device proxying. ## October 2021 diff --git a/articles/defender-for-iot/device-builders/resources-agent-frequently-asked-questions.md b/articles/defender-for-iot/device-builders/resources-agent-frequently-asked-questions.md index 24fd220f6f3ae..ee770f690d266 100644 --- a/articles/defender-for-iot/device-builders/resources-agent-frequently-asked-questions.md +++ b/articles/defender-for-iot/device-builders/resources-agent-frequently-asked-questions.md @@ -11,7 +11,7 @@ This article provides a list of frequently asked questions and answers about the ## Do I have to install an embedded security agent? -Agent installation on your IoT devices isn't mandatory in order to enable Defender for IoT. You can choose between the following two options There are four different levels of security monitoring, and management capabilities which will provide different levels of protection: +Agent installation on your IoT devices isn't mandatory in order to enable Defender for IoT. You can choose between the following two options There are four different levels of security monitoring, and management capabilities, which will provide different levels of protection: - Install the Defender for IoT embedded security agent with or without modifications. This option provides the highest level of enhanced security insights into device behavior and access. diff --git a/articles/defender-for-iot/device-builders/security-agent-architecture.md b/articles/defender-for-iot/device-builders/security-agent-architecture.md index 39a91ced02a4a..cd7b5a88648df 100644 --- a/articles/defender-for-iot/device-builders/security-agent-architecture.md +++ b/articles/defender-for-iot/device-builders/security-agent-architecture.md @@ -42,7 +42,7 @@ Defender for IoT offers different installer agents for 32 bit and 64-bit Windows ## Next steps -In this article, you got a high-level overview about Defender for IoT Defender-IoT-micro-agent architecture, and the available installers.To continue getting started with Defender for IoT deployment, review the security agent authentication methods that are available. +In this article, you got a high-level overview about Defender for IoT Defender-IoT-micro-agent architecture, and the available installers. To continue getting started with Defender for IoT deployment, review the security agent authentication methods that are available. > [!div class="nextstepaction"] > [Security agent authentication methods](concept-security-agent-authentication-methods.md) diff --git a/articles/defender-for-iot/device-builders/troubleshoot-agent.md b/articles/defender-for-iot/device-builders/troubleshoot-agent.md index 65d0e0fcd58e9..44406003b2be4 100644 --- a/articles/defender-for-iot/device-builders/troubleshoot-agent.md +++ b/articles/defender-for-iot/device-builders/troubleshoot-agent.md @@ -9,7 +9,7 @@ ms.date: 03/28/2022 This article explains how to solve potential problems in the security agent start-up process. -Microsoft Defender for IoT agent self-starts immediately after installation. The agent start up process includes reading local configuration, connecting to Azure IoT Hub, and retrieving the remote twin configuration. Failure in any one of these steps may cause the security agent to fail. +Microsoft Defender for IoT agent self-starts immediately after installation. The agent start-up process includes reading local configuration, connecting to Azure IoT Hub, and retrieving the remote twin configuration. Failure in any one of these steps may cause the security agent to fail. In this troubleshooting guide you'll learn how to: @@ -19,7 +19,7 @@ In this troubleshooting guide you'll learn how to: ## Validate if the security agent is running -1. To validate is the security agent is running, wait a few minutes after installing the agent and and run the following command. +1. To validate that the security agent is running, wait a few minutes after installing the agent and run the following command.
                  **C agent** @@ -78,10 +78,10 @@ Defender for IoT agent encountered an error! Error in: {Error Code}, reason: {Er | Error Code | Error sub code | Error details | Remediate C | Remediate C# | |--|--|--|--|--| | Local Configuration | Missing configuration | A configuration is missing in the local configuration file. The error message should state which key is missing. | Add the missing key to the /var/LocalConfiguration.json file, see the [cs-localconfig-reference](azure-iot-security-local-configuration-c.md) for details. | Add the missing key to the General.config file, see the [c#-localconfig-reference](azure-iot-security-local-configuration-csharp.md) for details. | -| Local Configuration | Cant Parse Configuration | A configuration value can't be parsed. The error message should state which key can't be parsed. A configuration value cannot be parsed either because the value is not in the expected type, or the value is out of range. | Fix the value of the key in /var/LocalConfiguration.json file so that it matches the LocalConfiguration schema, see the [c#-localconfig-reference](azure-iot-security-local-configuration-csharp.md) for details. | Fix the value of the key in General.config file so that it matches the schema, see the [cs-localconfig-reference](azure-iot-security-local-configuration-c.md) for details. | +| Local Configuration | Cant Parse Configuration | A configuration value can't be parsed. The error message should state which key can't be parsed. A configuration value cannot be parsed either because the value isn't in the expected type, or the value is out of range. | Fix the value of the key in /var/LocalConfiguration.json file so that it matches the LocalConfiguration schema, see the [c#-localconfig-reference](azure-iot-security-local-configuration-csharp.md) for details. | Fix the value of the key in General.config file so that it matches the schema, see the [cs-localconfig-reference](azure-iot-security-local-configuration-c.md) for details. | | Local Configuration | File Format | Failed to parse configuration file. | The configuration file is corrupted, download the agent and re-install. | - | -| Remote Configuration | Timeout | The agent could not fetch the azureiotsecurity module twin within the timeout period. | Make sure authentication configuration is correct and try again. | The agent could not fetch the azureiotsecurity module twin within timeout period. Make sure authentication configuration is correct and try again. | -| Authentication | File Not Exist | The file in the given path does not exist. | Make sure the file exists in the given path or go to the **LocalConfiguration.json** file and change the **FilePath** configuration. | Make sure the file exists in the given path or go to the **Authentication.config** file and change the **filePath** configuration. | +| Remote Configuration | Timeout | The agent could not fetch the azureiotsecurity module twin within the timeout period. | Make sure authentication configuration is correct and try again. | The agent couldn't fetch the azureiotsecurity module twin within timeout period. Make sure authentication configuration is correct and try again. | +| Authentication | File Not Exist | The file in the given path doesn't exist. | Make sure the file exists in the given path or go to the **LocalConfiguration.json** file and change the **FilePath** configuration. | Make sure the file exists in the given path or go to the **Authentication.config** file and change the **filePath** configuration. | | Authentication | File Permission | The agent does not have sufficient permissions to open the file. | Give the **asciotagent** user read permissions on the file in the given path. | Make sure the file is accessible. | | Authentication | File Format | The given file is not in the correct format. | Make sure the file is in the correct format. The supported file types are .pfx and .pem. | Make sure the file is a valid certificate file. | | Authentication | Unauthorized | The agent was not able to authenticate against IoT Hub with the given credentials. | Validate authentication configuration in LocalConfiguration file, go through the authentication configuration and make sure all the details are correct, validate that the secret in the file matches the authenticated identity. | Validate authentication configuration in Authentication.config, go through the authentication configuration and make sure all the details are correct, then validate that the secret in the file matches the authenticated identity. | diff --git a/articles/defender-for-iot/device-builders/troubleshoot-defender-micro-agent.md b/articles/defender-for-iot/device-builders/troubleshoot-defender-micro-agent.md index e1da0f613726b..9e10cbf0b05b9 100644 --- a/articles/defender-for-iot/device-builders/troubleshoot-defender-micro-agent.md +++ b/articles/defender-for-iot/device-builders/troubleshoot-defender-micro-agent.md @@ -19,9 +19,9 @@ To view the status of the service: systemctl status defender-iot-micro-agent.service ``` -1. Check that the service is stable by making sure it is `active`, and that the uptime in the process is appropriate. +1. Check that the service is stable by making sure it's `active`, and that the uptime in the process is appropriate. - :::image type="content" source="media/troubleshooting/active-running.png" alt-text="Ensure your service is stable by checking to see that it is active and the uptime is appropriate."::: + :::image type="content" source="media/troubleshooting/active-running.png" alt-text="Ensure your service is stable by checking to see that it's active and the uptime is appropriate."::: If the service is listed as `inactive`, use the following command to start the service: diff --git a/articles/defender-for-iot/device-builders/tutorial-configure-agent-based-solution.md b/articles/defender-for-iot/device-builders/tutorial-configure-agent-based-solution.md index 344c46c651847..a6a3bca6330b8 100644 --- a/articles/defender-for-iot/device-builders/tutorial-configure-agent-based-solution.md +++ b/articles/defender-for-iot/device-builders/tutorial-configure-agent-based-solution.md @@ -9,7 +9,7 @@ ms.topic: tutorial This tutorial will help you learn how to configure the Microsoft Defender for IoT agent-based solution. -In this tutorial you will learn how to: +In this tutorial you'll learn how to: > [!div class="checklist"] > - Enable data collection @@ -71,7 +71,7 @@ You can choose to add storage of an additional information type as `raw events`. 1. Select a subscription from the drop-down menu. -1. Select a workspace from the drop-down menu. If you do not already have an existing Log Analytics workspace, you can select **Create New Workspace** to create a new one. +1. Select a workspace from the drop-down menu. If you don't already have an existing Log Analytics workspace, you can select **Create New Workspace** to create a new one. 1. Verify that the **Access to raw security data** option is selected. diff --git a/articles/defender-for-iot/device-builders/tutorial-create-micro-agent-module-twin.md b/articles/defender-for-iot/device-builders/tutorial-create-micro-agent-module-twin.md index 329a9e946dcd9..68aa7a60b99ca 100644 --- a/articles/defender-for-iot/device-builders/tutorial-create-micro-agent-module-twin.md +++ b/articles/defender-for-iot/device-builders/tutorial-create-micro-agent-module-twin.md @@ -1,6 +1,6 @@ --- title: Create a DefenderforIoTMicroAgent module twin (Preview) -description: In this tutorial, you will learn how to create a DefenderIotMicroAgent module twin for new devices. +description: In this tutorial, you'll learn how to create a DefenderIotMicroAgent module twin for new devices. ms.date: 01/16/2022 ms.topic: tutorial ms.custom: mode-other @@ -34,7 +34,7 @@ Defender for IoT uses the module twin mechanism, and maintains a Defender-IoT-mi To take full advantage of all Defender for IoT feature's, you need to create, configure, and use the Defender-IoT-micro-agent twins for every device in the service. -In this tutorial you will learn how to: +In this tutorial you'll learn how to: > [!div class="checklist"] > - Create a DefenderIotMicroAgent module twin diff --git a/articles/defender-for-iot/device-builders/tutorial-investigate-security-alerts.md b/articles/defender-for-iot/device-builders/tutorial-investigate-security-alerts.md index 6f908f6cf2d23..14f4d70d676ae 100644 --- a/articles/defender-for-iot/device-builders/tutorial-investigate-security-alerts.md +++ b/articles/defender-for-iot/device-builders/tutorial-investigate-security-alerts.md @@ -9,7 +9,7 @@ ms.date: 01/13/2022 This tutorial will help you learn how to investigate, and remediate the alerts issued by Defender for IoT. Remediating alerts is the best way to ensure compliance, and protection across your IoT solution. -In this tutorial you will learn how to: +In this tutorial you'll learn how to: > [!div class="checklist"] > - Investigate security alerts diff --git a/articles/defender-for-iot/device-builders/tutorial-investigate-security-recommendations.md b/articles/defender-for-iot/device-builders/tutorial-investigate-security-recommendations.md index edbeafb2bf878..bad1dacbb74fa 100644 --- a/articles/defender-for-iot/device-builders/tutorial-investigate-security-recommendations.md +++ b/articles/defender-for-iot/device-builders/tutorial-investigate-security-recommendations.md @@ -11,7 +11,7 @@ This tutorial will help you learn how to explore the information available in ea Timely analysis and mitigation of recommendations by Defender for IoT is the best way to improve security posture and reduce attack surface across your IoT solution. -In this tutorial you will learn how to: +In this tutorial you'll learn how to: > [!div class="checklist"] > - Investigate new recommendations diff --git a/articles/defender-for-iot/device-builders/tutorial-standalone-agent-binary-installation.md b/articles/defender-for-iot/device-builders/tutorial-standalone-agent-binary-installation.md index 75fed4cceeeb2..7a6f02fffb2de 100644 --- a/articles/defender-for-iot/device-builders/tutorial-standalone-agent-binary-installation.md +++ b/articles/defender-for-iot/device-builders/tutorial-standalone-agent-binary-installation.md @@ -11,7 +11,7 @@ ms.custom: mode-other This tutorial will help you learn how to install and authenticate the Defender for IoT micro agent. -In this tutorial you will learn how to: +In this tutorial you'll learn how to: > [!div class="checklist"] > - Download and install the micro agent @@ -26,7 +26,7 @@ In this tutorial you will learn how to: - An [IoT hub](../../iot-hub/iot-hub-create-through-portal.md). -- Verify you are running one of the following [operating systems](concept-agent-portfolio-overview-os-support.md#agent-portfolio-overview-and-os-support-preview). +- Verify you're running one of the following [operating systems](concept-agent-portfolio-overview-os-support.md#agent-portfolio-overview-and-os-support-preview). - You must have [enabled Microsoft Defender for IoT on your Azure IoT Hub](quickstart-onboard-iot-hub.md). @@ -73,7 +73,7 @@ Depending on your setup, the appropriate Microsoft package will need to be insta sudo cp ./microsoft.gpg /etc/apt/trusted.gpg.d/ ``` -1. Ensure that you have updated the apt using the following command: +1. Ensure that you've updated the apt using the following command: ```bash sudo apt-get update @@ -161,7 +161,7 @@ You will need to copy the module identity connection string from the DefenderIoT systemctl status defender-iot-micro-agent.service ``` -1. Ensure that the service is stable by making sure it is `active`, and that the uptime of the process is appropriate. +1. Ensure that the service is stable by making sure it's `active`, and that the uptime of the process is appropriate. :::image type="content" source="media/quickstart-standalone-agent-binary-installation/active-running.png" alt-text="Check to make sure your service is stable and active."::: diff --git a/articles/defender-for-iot/device-builders/upgrade-micro-agent.md b/articles/defender-for-iot/device-builders/upgrade-micro-agent.md index e64d358b4d2d1..f3d2f5357ba30 100644 --- a/articles/defender-for-iot/device-builders/upgrade-micro-agent.md +++ b/articles/defender-for-iot/device-builders/upgrade-micro-agent.md @@ -13,7 +13,7 @@ For more information, see our [release notes for device builders](release-notes. ## Upgrade a standalone micro agent -1. Ensure that you have upgraded the apt. Run: +1. Ensure that you've upgraded the apt. Run: ```bash sudo apt-get update @@ -27,7 +27,7 @@ For more information, see our [release notes for device builders](release-notes. ## Upgrade a micro agent for Edge -1. Ensure that you have upgraded the apt. Run: +1. Ensure that you've upgraded the apt. Run: ```bash sudo apt-get update diff --git a/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl20-plus-enterprise.md b/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl20-plus-enterprise.md index 4b9ff061de20b..277883b410bd8 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl20-plus-enterprise.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl20-plus-enterprise.md @@ -40,7 +40,6 @@ The following image shows a sample of the HPE ProLiant DL20 back panel: | Quantity | PN| Description: high end | |--|--|--| |1| P06963-B21 | HPE DL20 Gen10 4SFF CTO Server | -|1| P06963-B21 | HPE DL20 Gen10 4SFF CTO Server | |1| P17104-L21 | HPE DL20 Gen10 E-2234 FIO Kit | |2| 879507-B21 | HPE 16-GB 2Rx8 PC4-2666V-E STND Kit | |3| 655710-B21 | HPE 1-TB SATA 7.2 K SFF SC DS HDD | diff --git a/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md b/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md index c8bd5f2b1ac9c..c8f5365a56da9 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md @@ -12,7 +12,7 @@ This article describes the **HPE ProLiant DL360** appliance for OT sensors. | Appliance characteristic |Details | |---------|---------| |**Hardware profile** | Corporate | -|**Performance** | Max bandwidth: 3Gbp/s
                  Max devices: 12,000 | +|**Performance** | Max bandwidth: 3Gbp/s
                  Max devices: 12,000 | |**Physical specifications** | Mounting: 1U
                  Ports: 15x RJ45 or 8x SFP (OPT)| |**Status** | Supported, Available preconfigured| diff --git a/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md b/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md index 2681fd3c401b4..e669fcfdf40c4 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md @@ -12,7 +12,7 @@ This article describes an on-premises management console deployment on a virtual | Appliance characteristic |Details | |---------|---------| |**Hardware profile** | As required for your organization. For more information, see [Which appliances do I need?](../ot-appliance-sizing.md) | -|**Performance** | As required for your organization. For more information, see [Which appliances do I need?](../ot-appliance-sizing.md) | +|**Performance** | As required for your organization. For more information, see [Which appliances do I need?](../ot-appliance-sizing.md) | |**Physical specifications** | Virtual Machine | |**Status** | Supported | diff --git a/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md b/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md index c7d92127af702..94db90d9045bf 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md @@ -146,7 +146,7 @@ You are able to attach a SPAN Virtual Interface to the Virtual Switch through Wi 1. Select **OK**. -These commands set the name of the newly added adapter hardware to be `Monitor`. If you are using Hyper-V Manager, the name of the newly added adapter hardware is set to `Network Adapter`. +These commands set the name of the newly added adapter hardware to be `Monitor`. If you're using Hyper-V Manager, the name of the newly added adapter hardware is set to `Network Adapter`. **To attach a SPAN Virtual Interface to the virtual switch with Hyper-V Manager**: diff --git a/articles/defender-for-iot/organizations/architecture.md b/articles/defender-for-iot/organizations/architecture.md index 620ebf88dca67..8ec72c3c0b08e 100644 --- a/articles/defender-for-iot/organizations/architecture.md +++ b/articles/defender-for-iot/organizations/architecture.md @@ -52,6 +52,10 @@ In contrast, when working with locally managed sensors: - Sensor names can be updated in the sensor console. +### Devices monitored by Defender for IoT + +[!INCLUDE [devices-inventoried](includes/devices-inventoried.md)] + ## Analytics engines Defender for IoT sensors apply analytics engines on ingested data, triggering alerts based on both real-time and pre-recorded traffic. diff --git a/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md b/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md index 50ae2ac1fcec1..a191060e849bb 100644 --- a/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md +++ b/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md @@ -60,7 +60,7 @@ The alert group will appear in supported partner solutions with the following pr - **alert_group** for Syslog objects -These fields should be configured in the partner solution to display the alert group name. If there is no alert associated with an alert group, the field in the partner solution will display **NA**. +These fields should be configured in the partner solution to display the alert group name. If there's no alert associated with an alert group, the field in the partner solution will display **NA**. ### Default alert groups @@ -97,7 +97,7 @@ Add custom alert rule to pinpoint specific activity as needed for your organizat For example, you might want to define an alert for an environment running MODBUS to detect any write commands to a memory register, on a specific IP address and ethernet destination. Another example would be an alert for any access to a specific IP address. -Use custom alert rule actions to for IT to take specific action when the alert is triggered, such as allowing users to access PCAP files from the alert, assigning alert severity, or generating an event that shows in the event timeline. Alert messages indicate that the alert was generated from a custom alert rule. +Use custom alert rule actions to instruct Defender for IT to take specific action when the alert is triggered, such as allowing users to access PCAP files from the alert, assigning alert severity, or generating an event that shows in the event timeline. Alert messages indicate that the alert was generated from a custom alert rule. **To create a custom alert rule**: diff --git a/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-on-premises-management-console.md b/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-on-premises-management-console.md index 0b48cc61ed614..7f6ea36747400 100644 --- a/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-on-premises-management-console.md +++ b/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-on-premises-management-console.md @@ -1,7 +1,7 @@ --- title: Activate and set up your on-premises management console description: Activating the management console ensures that sensors are registered with Azure and send information to the on-premises management console, and that the on-premises management console carries out management tasks on connected sensors. -ms.date: 11/09/2021 +ms.date: 06/06/2022 ms.topic: how-to --- @@ -26,9 +26,9 @@ If you forgot your password, select the **Recover Password** option. See [Passwo ## Activate the on-premises management console -After you sign in for the first time, you need to activate the on-premises management console by getting and uploading an activation file. +After you sign in for the first time, you need to activate the on-premises management console by getting and uploading an activation file. Activation files on the on-premises management console enforces the number of committed devices configured for your subscription and Defender for IoT plan. For more information, see [Manage Defender for IoT subscriptions](how-to-manage-subscriptions.md). -To activate the on-premises management console: +**To activate the on-premises management console**: 1. Sign in to the on-premises management console. @@ -61,13 +61,25 @@ After initial activation, the number of monitored devices might exceed the numbe If this warning appears, you need to upload a [new activation file](#activate-the-on-premises-management-console). -### Activate an expired license (versions under 10.0) +### Activation expirations + +After activating an on-premises management console, you'll need to apply new activation files on both the on-premises management console and connected sensors as follows: + +|Location |Activation process | +|---------|---------| +|**On-premises management console** | Apply a new activation file on your on-premises management console if you've [modified the number of committed devices](how-to-manage-subscriptions.md#update-committed-devices-in-a-subscription) in your subscription. | +|**Cloud-connected sensors** | Cloud-connected sensors remain activated for as long as your Azure subscription with your Defender for IoT plan is active.

                  However, you'll also need to apply a new activation file when [updating your sensor software](how-to-manage-individual-sensors.md#download-a-new-activation-file-for-version-221x-or-higher) from a legacy version to version 22.2.x. | +| **Locally-managed** | Apply a new activation file to locally-managed sensors every year. After a sensor's activation file has expired, the sensor will continue to monitor your network, but you'll see a warning message when signing in to the sensor. | + +For more information, see [Manage Defender for IoT subscriptions](how-to-manage-subscriptions.md). + +### Activate expired licenses from versions earlier than 10.0 For users with versions prior to 10.0, your license might expire and the following alert will appear: :::image type="content" source="media/how-to-activate-and-set-up-your-on-premises-management-console/activation-popup.png" alt-text="Screenshot that shows the License has expired alert."::: -To activate your license: +**To activate your license**: 1. Open a case with [support](https://portal.azure.com/?passwordRecovery=true&Microsoft_Azure_IoT_Defender=canary#create/Microsoft.Support). diff --git a/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md b/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md index 0bb74fb7101fc..30d168792cf13 100644 --- a/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md +++ b/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md @@ -1,7 +1,7 @@ --- title: Activate and set up your sensor description: This article describes how to sign in and activate a sensor console. -ms.date: 11/09/2021 +ms.date: 06/06/2022 ms.topic: how-to --- @@ -126,6 +126,17 @@ You might need to refresh your screen after uploading the CA-signed certificate. For information about uploading a new certificate, supported certificate parameters, and working with CLI certificate commands, see [Manage individual sensors](how-to-manage-individual-sensors.md). +### Activation expirations + +After activating a sensor, you'll need to apply new activation files as follows: + +|Location |Activation process | +|---------|---------| +|**Cloud-connected sensors** | Cloud-connected sensors remain activated for as long as your Azure subscription with your Defender for IoT plan is active.

                  However, you'll also need to apply a new activation file when [updating your sensor software](how-to-manage-individual-sensors.md#download-a-new-activation-file-for-version-221x-or-higher) from a legacy version to version 22.2.x. | +| **Locally-managed** | Apply a new activation file to locally-managed sensors every year. After a sensor's activation file has expired, the sensor will continue to monitor your network, but you'll see a warning message when signing in to the sensor. | + +For more information, see [Manage Defender for IoT subscriptions](how-to-manage-subscriptions.md) and [Manage the on-premises management console](how-to-manage-the-on-premises-management-console.md). + ### Activate an expired license (versions under 10.0) @@ -265,7 +276,7 @@ System messages provide general information about your sensor that may require y For more information, see: -- [Threat intelligence research and packages ](how-to-work-with-threat-intelligence-packages.md) +- [Threat intelligence research and packages](how-to-work-with-threat-intelligence-packages.md) - [Onboard a sensor](tutorial-onboarding.md#onboard-and-activate-the-virtual-sensor) diff --git a/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md b/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md index bc9858d2b1568..bbdd7b0f505fd 100644 --- a/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md +++ b/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md @@ -28,7 +28,7 @@ You may need to review programming activity: - After a planned update to controllers - - When a process or machine is not working correctly (to see who carried out the last update and when) + - When a process or machine isn't working correctly (to see who carried out the last update and when) :::image type="content" source="media/how-to-work-with-maps/differences.png" alt-text="Screenshot of a Programming Change Log"::: @@ -40,7 +40,7 @@ Other options let you: ## About authorized versus unauthorized programming events -Unauthorized programming events are carried out by devices that have not been learned or manually defined as programming devices. Authorized programming events are carried out by devices that were resolved or manually defined as programming devices. +Unauthorized programming events are carried out by devices that haven't been learned or manually defined as programming devices. Authorized programming events are carried out by devices that were resolved or manually defined as programming devices. The Programming Analysis window displays both authorized and unauthorized programming events. @@ -80,9 +80,9 @@ This section describes how to view programming files and compare versions. Searc |Programming timeline type | Description | |--|--| | Programmed Device | Provides details about the device that was programmed, including the hostname and file. | -| Recent Events | Displays the 50 most recent events detected by the sensor.
                  To highlight an event, hover over it and click the star. :::image type="icon" source="media/how-to-work-with-maps/star.png" border="false":::
                  The last 50 events can be viewed. | +| Recent Events | Displays the 50 most recent events detected by the sensor.
                  To highlight an event, hover over it and select the star. :::image type="icon" source="media/how-to-work-with-maps/star.png" border="false":::
                  The last 50 events can be viewed. | | Files | Displays the files detected for the chosen date and the file size on the programmed device.
                  By default, the maximum number of files available for display per device is 300.
                  By default, the maximum file size for each file is 15 MB. | -| File status :::image type="icon" source="media/how-to-work-with-maps/status-v2.png" border="false"::: | File labels indicate the status of the file on the device, including:
                  **Added**: the file was added to the endpoint on the date or time selected.
                  **Updated**: The file was updated on the date or time selected.
                  **Deleted**: This file was removed.
                  **No label**: The file was not changed. | +| File status :::image type="icon" source="media/how-to-work-with-maps/status-v2.png" border="false"::: | File labels indicate the status of the file on the device, including:
                  **Added**: the file was added to the endpoint on the date or time selected.
                  **Updated**: The file was updated on the date or time selected.
                  **Deleted**: This file was removed.
                  **No label**: The file wasn't changed. | | Programming Device | The device that made the programming change. Multiple devices may have carried out programming changes on one programmed device. The hostname, date, or time of change and logged in user are displayed. | | :::image type="icon" source="media/how-to-work-with-maps/current.png" border="false"::: | Displays the current file installed on the programmed device. | | :::image type="icon" source="media/how-to-work-with-maps/download-text.png" border="false"::: | Download a text file of the code displayed. | diff --git a/articles/defender-for-iot/organizations/how-to-connect-sensor-by-proxy.md b/articles/defender-for-iot/organizations/how-to-connect-sensor-by-proxy.md index 2af897064deb4..e806005770844 100644 --- a/articles/defender-for-iot/organizations/how-to-connect-sensor-by-proxy.md +++ b/articles/defender-for-iot/organizations/how-to-connect-sensor-by-proxy.md @@ -25,7 +25,7 @@ The following diagram shows data going from Microsoft Defender for IoT to the Io ## Set up your system -For this scenario we will be installing, and configuring the latest version of [Squid](http://www.squid-cache.org/) on an Ubuntu 18 server. +For this scenario we'll be installing, and configuring the latest version of [Squid](http://www.squid-cache.org/) on an Ubuntu 18 server. > [!Note] > Microsoft Defender for IoT does not offer support for Squid or any other proxy service. diff --git a/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md b/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md index 71edbad4e6cc7..03d208023a423 100644 --- a/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md +++ b/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md @@ -156,7 +156,7 @@ If you're working with dynamic networks, you handle IP address changes that occu Changes might happen, for example, when a DHCP server assigns IP addresses. -Defining dynamic IP addresses on each sensor enables comprehensive, transparent support in instances of IP address changes. This ensures comprehensive reporting for each unique device. +Defining dynamic IP addresses on each sensor enables comprehensive, transparent support in instances of IP address changes. This activity ensures comprehensive reporting for each unique device. The sensor console presents the most current IP address associated with the device and indicates which devices are dynamic. For example: diff --git a/articles/defender-for-iot/organizations/how-to-create-and-manage-users.md b/articles/defender-for-iot/organizations/how-to-create-and-manage-users.md index 40af51155ee33..f1cbedfd0563d 100644 --- a/articles/defender-for-iot/organizations/how-to-create-and-manage-users.md +++ b/articles/defender-for-iot/organizations/how-to-create-and-manage-users.md @@ -101,7 +101,7 @@ This section describes how to define users. Cyberx, support, and administrator u If users aren't active at the keyboard or mouse for a specific time, they're signed out of their session and must sign in again. -When users haven't worked with their console mouse or keyboard for 30 minutes, a session sign out is forced. +When users haven't worked with their console mouse or keyboard for 30 minutes, a session sign-out is forced. This feature is enabled by default and on upgrade, but can be disabled. In addition, session counting times can be updated. Session times are defined in seconds. Definitions are applied per sensor and on-premises management console. @@ -203,9 +203,9 @@ You can recover the password for the on-premises management console or the senso **To recover the password for the on-premises management console, or the sensor**: -1. On the sign in screen of either the on-premises management console or the sensor, select **Password recovery**. The **Password recovery** screen opens. +1. On the sign-in screen of either the on-premises management console or the sensor, select **Password recovery**. The **Password recovery** screen opens. - :::image type="content" source="media/how-to-create-and-manage-users/password-recovery.png" alt-text="Screenshot of the Select Password recovery from the sign in screen of either the on-premises management console, or the sensor."::: + :::image type="content" source="media/how-to-create-and-manage-users/password-recovery.png" alt-text="Screenshot of the Select Password recovery from the sign-in screen of either the on-premises management console, or the sensor."::: 1. Select either **CyberX** or **Support** from the drop-down menu, and copy the unique identifier code. diff --git a/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md b/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md index f7c79acc710ff..42b4824356f5b 100644 --- a/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md +++ b/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md @@ -7,13 +7,13 @@ ms.topic: how-to # Run data mining queries -Using data mining queries to get dynamic, granular information about your network devices, including for specific time periods, internet connectivity, ports and protocols, firmware vrsions, programming commands, and device state. You can use data mining queries for: +Using data mining queries to get dynamic, granular information about your network devices, including for specific time periods, internet connectivity, ports and protocols, firmware versions, programming commands, and device state. You can use data mining queries for: - **SOC incident response**: Generate a report in real time to help deal with immediate incident response. For example, Data Mining can generate a report for a list of devices that might require patching. - **Forensics**: Generate a report based on historical data for investigative reports. - **Network security**: Generate a report that helps improve overall network security. For example, generate a report can be generated that lists devices with weak authentication credentials. - **Visibility**: Generate a report that covers all query items to view all baseline parameters of your network. -- **PLC security** Improve security by detecting PLCs in unsecure states for example Program and Remote states. +- **PLC security** Improve security by detecting PLCs in unsecure states, for example, Program and Remote states. Data mining information is saved and stored continuously, except for when a device is deleted. Data mining results can be exported and stored externally to a secure server. In addition, the sensor performs automatic daily backups to ensure system continuity and preservation of data. @@ -26,7 +26,7 @@ The following predefined reports are available. These queries are generated in r - **Internet activity**: Devices that are connected to the internet. - **CVEs**: A list of devices detected with known vulnerabilities, along with CVSSv2 risk scores. - **Excluded CVEs**: A list of all the CVEs that were manually excluded. It is possible to customize the CVE list manually so that the VA reports and attack vectors more accurately reflect your network by excluding or including particular CVEs and updating the CVSSv2 score accordingly. -- **Nonactive devices**: Devices that have not communicated for the past seven days. +- **Nonactive devices**: Devices that haven't communicated for the past seven days. - **Active devices**: Active network devices within the last 24 hours. Find these reports in **Analyze** > **Data Mining**. Reports are available for users with Administrator and Security Analyst permissions. Read only users can't access these reports. diff --git a/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md b/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md index a192751c16f73..cc1ccd97e47b9 100644 --- a/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md +++ b/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md @@ -50,7 +50,7 @@ Protocol dissection | Displays a pie chart that provides you with a look at the Active TCP connections | Displays a chart that shows the number of active TCP connections in the system. Incident by type | Displays a pie chart that shows the number of incidents by type. This is the number of alerts generated by each engine over a predefined time period. Devices by vendor | Displays a pie chart that shows the number of devices by vendor. The number of devices for a specific vendor is proportional to the size of that device’s vendor part of the disk relative to other device vendors. -Number of devices per VLAN | Displays a pie chart that shows the number of discovered devices per VLAN. The size of each slice of the pie is proportional to the number of discovered devices relative to the other slices. Each VLAN appears with the VLAN tag assigned by the sensor or name that you have manually added. +Number of devices per VLAN | Displays a pie chart that shows the number of discovered devices per VLAN. The size of each slice of the pie is proportional to the number of discovered devices relative to the other slices. Each VLAN appears with the VLAN tag assigned by the sensor or name that you've manually added. Top bandwidth by VLAN | Displays the bandwidth consumption by VLAN. By default, the widget shows five VLANs with the highest bandwidth usage. You can filter the data by the period presented in the widget. Select the down arrow to show more results. diff --git a/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md b/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md index f58eeadd8e70c..e531e64517408 100644 --- a/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md +++ b/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md @@ -9,7 +9,7 @@ ms.topic: how-to The site map in the on-premises management console helps you achieve full security coverage by dividing your network into geographical and logical segments that reflect your business topology: -- **Geographical facility level**: A site reflects a number of devices grouped according to a geographical location presented on the map. By default, Microsoft Defender for IoT provides you with a world map. You update the map to reflect your organizational or business structure. For example, use a map that reflects sites across a specific country, city, or industrial campus. When the site color changes on the map, it provides the SOC team with an indication of critical system status in the facility. +- **Geographical facility level**: A site reflects many devices grouped according to a geographical location presented on the map. By default, Microsoft Defender for IoT provides you with a world map. You update the map to reflect your organizational or business structure. For example, use a map that reflects sites across a specific country, city, or industrial campus. When the site color changes on the map, it provides the SOC team with an indication of critical system status in the facility. The map is interactive and enables opening each site and delving into this site's information. diff --git a/articles/defender-for-iot/organizations/how-to-install-software.md b/articles/defender-for-iot/organizations/how-to-install-software.md index 6190da88824de..929314f272ab4 100644 --- a/articles/defender-for-iot/organizations/how-to-install-software.md +++ b/articles/defender-for-iot/organizations/how-to-install-software.md @@ -185,7 +185,7 @@ This procedure describes how to add a secondary NIC if you've already installed ### Find your port -If you are having trouble locating the physical port on your device, you can use the following command to find your port: +If you're having trouble locating the physical port on your device, you can use the following command to find your port: ```bash sudo ethtool -p diff --git a/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md b/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md index ade9b88066cc9..72bb2e86d1522 100644 --- a/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md +++ b/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md @@ -1,14 +1,18 @@ --- -title: Learn about devices discovered by all enterprise sensors +title: Learn about devices discovered by all sensors description: Use the device inventory in the on-premises management console to get a comprehensive view of device information from connected sensors. Use import, export, and filtering tools to manage this information. ms.date: 11/09/2021 ms.topic: how-to --- -# Investigate all enterprise sensor detections in the device inventory +# Investigate all sensor detections in the device inventory You can view device information from connected sensors by using the *device inventory* in the on-premises management console. This feature gives you a comprehensive view of all network information. Use import, export, and filtering tools to manage this information. The status information about the connected sensor versions also appears. +For more information, see [Devices monitored by Defender for IoT](architecture.md#devices-monitored-by-defender-for-iot). + +## View the device inventory from an on-premises management console + :::image type="content" source="media/how-to-work-with-asset-inventory-information/device-inventory-data-table.png" alt-text="Screenshot of the device inventory data table."::: The following table describes the table columns in the device inventory. @@ -31,28 +35,17 @@ The following table describes the table columns in the device inventory. | **MAC Address** | The MAC address of the device. | | **Protocols** | The protocols that the device uses. | | **Unacknowledged Alerts** | The number of unhandled alerts associated with this device. | -| **Is Authorized** | The authorization status of the device:
                  - **True**: The device has been authorized.
                  - **False**: The device has not been authorized. | +| **Is Authorized** | The authorization status of the device:
                  - **True**: The device has been authorized.
                  - **False**: The device hasn't been authorized. | | **Is Known as Scanner** | Whether this device performs scanning-like activities in the network. | -| **Is Programming Device** | Whether this is a programming device:
                  - **True**: The device performs programming activities for PLCs, RTUs, and controllers, which are relevant to engineering stations.
                  - **False**: The device is not a programming device. | +| **Is Programming Device** | Whether this is a programming device:
                  - **True**: The device performs programming activities for PLCs, RTUs, and controllers, which are relevant to engineering stations.
                  - **False**: The device isn't a programming device. | | **Groups** | Groups in which this device participates. | | **Last Activity** | The last activity that the device performed. | | **Discovered** | When this device was first seen in the network. | -| **PLC mode (preview)** | The PLC operating mode includes the Key state (physical) and run state (logical). Possible **Key** states include, Run, Program, Remote, Stop, Invalid, Programming Disabled.Possible Run. The possible **Run** states are Run, Program, Stop, Paused, Exception, Halted, Trapped, Idle, Offline. if both states are the same, only oe state is presented. | - -## What is an Inventory device? - -The Defender for IoT Device Inventory displays an extensive range of device attributes that are detected by sensors monitoring organizational networks and managed endpoints. Defender for IoT will identify and classify devices as a single unique network device in the inventory for: - -1. Standalone IT/OT/IoT devices (w/ 1 or multiple NICs) -1. Devices composed of multiple backplane components (including all racks/slots/modules) -1. Devices acting as network infrastructure such as Switch/Router (w/ multiple NICs). - -Public internet IP addresses, multicast groups, and broadcast groups are not considered inventory devices. -Devices that have been inactive for more than 60 days are classified as inactive Inventory devices. +| **PLC mode (preview)** | The PLC operating mode includes the Key state (physical) and run state (logical). Possible **Key** states include, Run, Program, Remote, Stop, Invalid, Programming Disabled.Possible Run. The possible **Run** states are Run, Program, Stop, Paused, Exception, Halted, Trapped, Idle, Offline. if both states are the same, only one state is presented. | ## Integrate data into the enterprise device inventory -Data integration capabilities let you enhance the data in the device inventory with information from other enterprise resources. These sources include CMDBs, DNS, firewalls, and Web APIs. +Data integration capabilities let you enhance the data in the device inventory with information from other resources. These sources include CMDBs, DNS, firewalls, and Web APIs. You can use this information to learn. For example: @@ -78,7 +71,7 @@ You can integrate data by either: - Running customized scripts that Defender for IoT provides -:::image type="content" source="media/how-to-work-with-asset-inventory-information/enterprise-data-integrator-graph.png" alt-text="Diagram of the enterprise data integrator."::: +:::image type="content" source="media/how-to-work-with-asset-inventory-information/enterprise-data-integrator-graph.png" alt-text="Diagram of the data integrator."::: You can work with Defender for IoT technical support to set up your system to receive Web API queries. @@ -102,7 +95,7 @@ To add data manually: 6. In the upper-right corner of the **Device Inventory** window, select :::image type="icon" source="media/how-to-work-with-asset-inventory-information/menu-icon-device-inventory.png" border="false":::, select **Import Manual Input Columns**, and browse to the CSV file. The new data appears in the **Device Inventory** table. -To integrate data from other enterprise entities: +To integrate data from other entities: 1. In the upper-right corner of the **Device Inventory** window, select :::image type="icon" source="media/how-to-work-with-asset-inventory-information/menu-icon-device-inventory.png" border="false"::: and select **Export All Device Inventory**. diff --git a/articles/defender-for-iot/organizations/how-to-investigate-sensor-detections-in-a-device-inventory.md b/articles/defender-for-iot/organizations/how-to-investigate-sensor-detections-in-a-device-inventory.md index 9df8fdb14b00e..84b436aeb4533 100644 --- a/articles/defender-for-iot/organizations/how-to-investigate-sensor-detections-in-a-device-inventory.md +++ b/articles/defender-for-iot/organizations/how-to-investigate-sensor-detections-in-a-device-inventory.md @@ -20,19 +20,8 @@ Options are available to: - Import Windows registry details. - Create groups for display in the device map. - -## What is an inventory device? - -The Defender for IoT Device inventory displays an extensive range of asset attributes that are detected by sensors monitoring the organization's networks and managed endpoints. - -Defender for IoT will identify and classify devices as a single unique network device in the inventory for: - -- Standalone IT/OT/IoT devices (w/ 1 or multiple NICs) -- Devices composed of multiple backplane components (including all racks/slots/modules) -- Devices acting as network infrastructure such as Switch/Router (w/ multiple NICs). -Public internet IP addresses, multicast groups, and broadcast groups aren't considered inventory devices. -Devices that have been inactive for more than 60 days are classified as inactive inventory devices. +For more information, see [Devices monitored by Defender for IoT](architecture.md#devices-monitored-by-defender-for-iot). ## View device attributes in the inventory diff --git a/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md b/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md index 3e6373adc5245..e67922af3e24d 100644 --- a/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md +++ b/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md @@ -27,7 +27,7 @@ You can continue to work with Defender for IoT features even if the activation f ### About activation files for cloud-connected sensors -Sensors that are cloud connected are not limited by time periods for their activation file. The activation file for cloud-connected sensors is used to ensure the connection to Defender for IoT. +Sensors that are cloud connected aren't limited by time periods for their activation file. The activation file for cloud-connected sensors is used to ensure the connection to Defender for IoT. ### Upload new activation files @@ -65,9 +65,9 @@ You might need to upload a new activation file for an onboarded sensor when: ### Troubleshoot activation file upload -You'll receive an error message if the activation file could not be uploaded. The following events might have occurred: +You'll receive an error message if the activation file couldn't be uploaded. The following events might have occurred: -- **For locally connected sensors**: The activation file is not valid. If the file is not valid, go to [Defender for IoT in the Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started). On the **Sensor Management** page, select the sensor with the invalid file, and download a new activation file. +- **For locally connected sensors**: The activation file isn't valid. If the file isn't valid, go to [Defender for IoT in the Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started). On the **Sensor Management** page, select the sensor with the invalid file, and download a new activation file. - **For cloud-connected sensors**: The sensor can't connect to the internet. Check the sensor's network configuration. If your sensor needs to connect through a web proxy to access the internet, verify that your proxy server is configured correctly on the **Sensor Network Configuration** screen. Verify that \*.azure-devices.net:443 is allowed in the firewall and/or proxy. If wildcards are not supported or you want more control, the FQDN for your specific endpoint (either a sensor, or for legacy connections, an IoT hub) should be opened in your firewall and/or proxy. For more information, see [Reference - IoT Hub endpoints](../../iot-hub/iot-hub-devguide-endpoints.md). diff --git a/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md b/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md index 1b861a6696b68..169af1b13188b 100644 --- a/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md +++ b/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md @@ -9,7 +9,7 @@ ms.topic: how-to Your Defender for IoT deployment is managed through your Microsoft Defender for IoT account subscriptions. You can onboard, edit, and offboard your subscriptions to Defender for IoT in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started). -For each subscription, you will be asked to define a number of *committed devices*. Committed devices are the approximate number of devices that will be monitored in your enterprise. +For each subscription, you'll be asked to define a number of *committed devices*. Committed devices are the approximate number of devices that will be monitored in your enterprise. > [!NOTE] > If you've come to this page because you are a [former CyberX customer](https://blogs.microsoft.com/blog/2020/06/22/microsoft-acquires-cyberx-to-accelerate-and-secure-customers-iot-deployments) and have questions about your account, reach out to your account manager for guidance. @@ -17,7 +17,7 @@ For each subscription, you will be asked to define a number of *committed device ## Subscription billing -You are billed based on the number of committed devices associated with each subscription. +You're billed based on the number of committed devices associated with each subscription. The billing cycle for Microsoft Defender for IoT follows a calendar month. Changes you make to committed devices during the month are implemented one hour after confirming your update, and are reflected in your monthly bill. Subscription *offboarding* also takes effect one hour after confirming the offboard. @@ -25,7 +25,11 @@ Your enterprise may have more than one paying entity. If this is the case you ca Before you subscribe, you should have a sense of how many devices you would like your subscriptions to cover. -Users can also work with trial subscription, which supports monitoring a limited number of devices for 30 days. See [Microsoft Defender for IoT pricing](https://azure.microsoft.com/pricing/details/defender-for-cloud/#defenderforiot) information on committed device prices. +Users can also work with trial subscription, which supports monitoring a limited number of devices for 30 days. See [Microsoft Defender for IoT pricing](https://azure.microsoft.com/pricing/details/iot-defender/) information on committed device prices. + +### What's a device? + +[!INCLUDE [devices-inventoried](includes/devices-inventoried.md)] ## Requirements @@ -46,6 +50,24 @@ If you already have access to an Azure subscription, but it isn't listed when su Azure **Subscription Owners** and **Subscription Contributor**s can onboard, update, and offboard Microsoft Defender for IoT subscriptions. +### Calculate the number of devices you need to monitor + +When onboarding or editing your Defender for IoT plan, you'll need to know how many devices you want to monitor. + +**To calculate the number of devices you need to monitor**: + +Collect the total number of devices in your network and remove: + +- **Duplicate devices that have the same IP or MAC address**. When detected, the duplicates are automatically removed by Defender for IoT. + +- **Duplicate devices that have the same ID**. These are the same devices, seen by the same sensor, with different field values. For such devices, check the last time each device had activity and use the latest device only. + +- **Inactive devices**, with no traffic for more than 60 days. + +- **Broadcast / multicast devices**. These represent unique addresses but not unique devices. + +For more information, see [What's a device?](#whats-a-device) + ## Onboard a trial subscription If you would like to evaluate Defender for IoT, you can use a trial subscription. The trial is valid for 30 days and supports 1000 committed devices. Using the trial lets you deploy one or more Defender for IoT sensors on your network. Use the sensors to monitor traffic, analyze data, generate alerts, learn about network risks and vulnerabilities, and more. The trial also allows you to download an on-premises management console to view aggregated information generated by sensors. @@ -76,11 +98,11 @@ This section describes how to onboard a subscription. 1. Select **Subscribe**. 1. Confirm your subscription. -1. If you have not done so already, onboard a sensor or Set up a sensor. +1. If you haven't done so already, onboard a sensor or Set up a sensor. ## Update committed devices in a subscription -You may need to update your subscription with more committed devices, or more fewer committed devices. More devices may require monitoring if, for example, you are increasing existing site coverage, discovered more devices than expected or there are network changes such as adding switches. +You may need to update your subscription with more committed devices, or fewer committed devices. More devices may require monitoring if, for example, you are increasing existing site coverage, discovered more devices than expected or there are network changes such as adding switches. **To update a subscription:** 1. Go to [Defender for IoT: Getting started](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started) in the Azure portal. diff --git a/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md b/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md index 3c7babf156691..89ca75f8e6b2b 100644 --- a/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md +++ b/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md @@ -209,8 +209,8 @@ Users working with alerts on the Defender for IoT portal on Azure should underst Parameter | Description |--|--| | **Alert Exclusion rules**| Alert *Exclusion rules* defined in the on-premises management console impact the alerts triggered by managed sensors. As a result, the alerts excluded by these rules also won't be displayed in the Alerts page on the portal. For more information, see [Create alert exclusion rules](how-to-work-with-alerts-on-premises-management-console.md#create-alert-exclusion-rules). -| **Managing alerts on your sensor** | If you change the status of an alert, or learn or mute an alert on a sensor, the changes are not updated in the Defender for IoT Alerts page on the portal. This means that this alert will stay open on the portal. However another alert won't be triggered from sensor for this activity. -| **Managing alerts in the portal Alerts page** | Changing the status of an alert on the Azure portal, Alerts page or changing the alert severity on the portal, does not impact the alert status or severity in on-premises sensors. +| **Managing alerts on your sensor** | If you change the status of an alert, or learn or mute an alert on a sensor, the changes are not updated in the Defender for IoT Alerts page on the portal. This means that this alert will stay open on the portal. However another alert won't be triggered from the sensor for this activity. +| **Managing alerts in the portal Alerts page** | Changing the status of an alert on the Azure portal, Alerts page or changing the alert severity on the portal, doesn't impact the alert status or severity in on-premises sensors. ## Next steps diff --git a/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md b/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md index 32d4ec251644e..4d2c4e3abc447 100644 --- a/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md +++ b/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md @@ -6,10 +6,13 @@ ms.topic: how-to --- # About high availability -Increase the resiliency of your Defender for IoT deployment by installing an on-premises management console high availability appliance. High availability deployments ensure your managed sensors continuously report to an active on-premises management console. +Increase the resiliency of your Defender for IoT deployment by configuring high availability on your on-premises management console. High availability deployments ensure your managed sensors continuously report to an active on-premises management console. This deployment is implemented with an on-premises management console pair that includes a primary and secondary appliance. +> [!NOTE] +> In this document, the principal on-premises management console is referred to as the primary, and the agent is referred to as the secondary. + ## About primary and secondary communication When a primary and secondary on-premises management console is paired: @@ -18,17 +21,17 @@ When a primary and secondary on-premises management console is paired: When validation is `ON`, the appliance should be able to establish connection to the CRL server defined by the certificate. -- The primary on-premises management console data is automatically backed up to the secondary on-premises management console every 10 minutes. The on-premises management console configurations and device data are backed up. PCAP files and logs are not included in the backup. You can back up and restore of PCAPs and logs manually. +- The primary on-premises management console data is automatically backed up to the secondary on-premises management console every 10 minutes. The on-premises management console configurations and device data are backed up. PCAP files and logs are not included in the backup. You can back up and restore PCAPs and logs manually. -- The primary setup at the management console is duplicated on the secondary; for example, system settings. If these settings are updated on the primary, they are also updated on the secondary. +- The primary setup on the management console is duplicated on the secondary. For example, if the system settings are updated on the primary, they're also updated on the secondary. - Before the license of the secondary expires, you should define it as the primary in order to update the license. ## About failover and failback -If a sensor cannot connect to the primary on-premises management console, it automatically connects to the secondary. Your system will be supported by both the primary and secondary simultaneously, if less than half of the sensors are communicating with the secondary. The secondary takes over when more than half of the sensors are communicating with it. Fail over from the primary to the secondary takes approximately three minutes. When the failover occurs, the primary on-premises management console freezes. When this happens, you can sign in to the secondary using the same sign-in credentials. +If a sensor can't connect to the primary on-premises management console, it automatically connects to the secondary. Your system will be supported by both the primary and secondary simultaneously, if less than half of the sensors are communicating with the secondary. The secondary takes over when more than half of the sensors are communicating with it. Fail over from the primary to the secondary takes approximately three minutes. When the failover occurs, the primary on-premises management console freezes. When this happens, you can sign in to the secondary using the same sign-in credentials. -During failover, sensors continue attempting to communicate with the primary appliance. When more than half the managed sensors succeed to communicate with the primary, the primary is restored. The following message appears at the secondary console when the primary is restored. +During failover, sensors continue attempting to communicate with the primary appliance. When more than half the managed sensors succeed to communicate with the primary, the primary is restored. The following message appears on the secondary console when the primary is restored: :::image type="content" source="media/how-to-set-up-high-availability/secondary-console-message.png" alt-text="Screenshot of a message that appears at the secondary console when the primary is restored."::: @@ -40,7 +43,7 @@ The installation and configuration procedures are performed in four main stages: 1. Install an on-premises management console primary appliance. -1. Configure the on-premises management console primary appliance. For example, scheduled backup settings, VLAN settings. See the on-premises management console user guide for details. All settings are applied to the secondary appliance automatically after pairing. +1. Configure the on-premises management console primary appliance. For example, scheduled backup settings, VLAN settings. For more information, see [Manage the on-premises management console](how-to-manage-the-on-premises-management-console.md). All settings are applied to the secondary appliance automatically after pairing. 1. Install an on-premises management console secondary appliance. For more information, see [About the Defender for IoT Installation](how-to-install-software.md). @@ -48,9 +51,9 @@ The installation and configuration procedures are performed in four main stages: ## High availability requirements -Verify that you have met the following high availability requirements: +Verify that you've met the following high availability requirements: -- Certificate requirements +- [Certificate requirements](how-to-manage-the-on-premises-management-console.md#manage-certificates) - Software and hardware requirements @@ -64,7 +67,7 @@ Verify that you have met the following high availability requirements: ### Network access requirements -Verify if your organizational security policy allows you to hav access to the following services on the primary and secondary on-premises management console. These services also allow the connection between the sensors and secondary on-premises management console: +Verify if your organizational security policy allows you to have access to the following services on the primary and secondary on-premises management console. These services also allow the connection between the sensors and secondary on-premises management console: |Port|Service|Description| |----|-------|-----------| @@ -92,8 +95,6 @@ Verify that both the primary and secondary on-premises management console applia sudo cyberx-management-trusted-hosts-add -ip -token ``` - >[!NOTE] - > In this document, the principal on-premises management console is referred to as the primary, and the agent is referred to as the secondary. 1. Enter the IP address of the secondary appliance in the `````` field and select Enter. The IP address is then validated, and the SSL certificate is downloaded to the primary. Entering the IP address also associates the sensors to the secondary appliance. @@ -137,15 +138,68 @@ The core application logs can be exported to the Defender for IoT support team t ## Update the on-premises management console with high availability -Perform the high availability update in the following order. Make sure each step is complete before you begin a new step. +To update an on-premises management console that has high availability configured, you will need to: + +1. Disconnect the high availability from both the primary and secondary appliances. +1. Update the appliances to the new version. +1. Reconfigure the high availability back onto both appliances. + +Perform the update in the following order. Make sure each step is complete before you begin a new step. + +**To update an on-premises management console with high availability configured**: + +1. Disconnect the high availability from both the primary and secondary appliances: + + **On the primary:** + + 1. Get the list of the currently connected appliances. Run: + + ```bash + cyberx-management-trusted-hosts-list + ``` + + 1. Find the domain associated with the secondary appliance and copy it to your clipboard. For example: + + :::image type="content" source="media/how-to-set-up-high-availability/update-high-availability-domain.jpg" alt-text="Screenshot showing the domain associated with the secondary appliance."::: + + 1. Remove the secondary domain from the list of trusted hosts. Run: + + ```bash + sudo cyberx-management-trusted-hosts-remove -d [Secondary domain] + ``` + + 1. Verify that the certificate is installed correctly. Run: + + ```bash + sudo cyberx-management-trusted-hosts-apply + ``` + + **On the secondary:** + + 1. Get the list of the currently connected appliances. Run: + + ```bash + cyberx-management-trusted-hosts-list + ``` + + 1. Find the domain associated with the primary appliance and copy it to your clipboard. -**To update with high availability**: + 1. Remove the primary domain from the list of trusted hosts. Run: + + ```bash + sudo cyberx-management-trusted-hosts-remove -d [Primary domain] + ``` + + 1. Verify that the certificate is installed correctly. Run: + + ```bash + sudo cyberx-management-trusted-hosts-apply + ``` -1. Update the primary on-premises management console. +1. Update both the primary and secondary appliances to the new version. For more information, see [Update the software version](how-to-manage-the-on-premises-management-console.md#update-the-software-version). -1. Update the secondary on-premises management console. +1. Set up high availability again, on both the primary and secondary appliances. For more information, see [Create the primary and secondary pair](#create-the-primary-and-secondary-pair). -1. Update the sensors. ## Next steps diff --git a/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md b/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md index 4e71bdd07254d..a1f7919609aa5 100644 --- a/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md +++ b/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md @@ -1,7 +1,7 @@ --- title: Set up SNMP MIB monitoring description: You can perform sensor health monitoring by using SNMP. The sensor responds to SNMP queries sent from an authorized monitoring server. -ms.date: 01/31/2022 +ms.date: 05/31/2022 ms.topic: how-to --- @@ -35,7 +35,7 @@ Note that: ## Prerequisites for AES and 3-DES Encryption Support for SNMP Version 3 - The network management station (NMS) must support Simple Network Management Protocol (SNMP) Version 3 to be able to use this feature. -- It is important to understand the SNMP architecture and the terminology of the architecture to understand the security model used and how the security model interacts with the other subsystems in the architecture. +- It's important to understand the SNMP architecture and the terminology of the architecture to understand the security model used and how the security model interacts with the other subsystems in the architecture. - Before you begin configuring SNMP monitoring, you need to open the port UDP 161 in the firewall. @@ -50,7 +50,7 @@ Note that: | Parameter | Description | |--|--| - | **Username** | The SNMP username can contain up to 32 characters and include any combination of alphanumeric characters (uppercase letters, lowercase letters, and numbers). Spaces are not allowed.

                  The username for the SNMP v3 authentication must be configured on the system and on the SNMP server. | + | **Username** | The SNMP username can contain up to 32 characters and include any combination of alphanumeric characters (uppercase letters, lowercase letters, and numbers). Spaces aren't allowed.

                  The username for the SNMP v3 authentication must be configured on the system and on the SNMP server. | | **Password** | Enter a case-sensitive authentication password. The authentication password can contain 8 to 12 characters and include any combination of alphanumeric characters (uppercase letters, lowercase letters, and numbers).

                  The username for the SNMP v3 authentication must be configured on the system and on the SNMP server. | | **Auth Type** | Select MD5 or SHA-1. | | **Encryption** | Select DES (56 bit key size)[1](#1) or AES (AES 128 bits supported)[2](#2). | diff --git a/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md b/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md index 224d9b0fe4aac..d19952e1a7f72 100644 --- a/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md +++ b/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md @@ -51,7 +51,7 @@ The following tools are available for viewing devices and device information fro To view alerts associated with a specific zone: -- Select the alert icon form the **Zone** window. +- Select the alert icon from the **Zone** window. :::image type="content" source="media/how-to-work-with-asset-inventory-information/business-unit-view-v2.png" alt-text="The default Business Unit view with examples."::: @@ -77,7 +77,7 @@ The following additional zone information is available: - **Connectivity status**: If a sensor is disconnected, connect from the sensor. See [Connect sensors to the on-premises management console](how-to-activate-and-set-up-your-on-premises-management-console.md#connect-sensors-to-the-on-premises-management-console). -- **Update progress**: If the connected sensor is being upgraded, upgrade statuses will appear. During upgrade, the on-premises management console does not receive device information from the sensor. +- **Update progress**: If the connected sensor is being upgraded, upgrade statuses will appear. During upgrade, the on-premises management console doesn't receive device information from the sensor. ## Next steps diff --git a/articles/defender-for-iot/organizations/how-to-work-with-the-sensor-device-map.md b/articles/defender-for-iot/organizations/how-to-work-with-the-sensor-device-map.md index 264bb02779b87..5183ef1b400b1 100644 --- a/articles/defender-for-iot/organizations/how-to-work-with-the-sensor-device-map.md +++ b/articles/defender-for-iot/organizations/how-to-work-with-the-sensor-device-map.md @@ -7,7 +7,7 @@ ms.topic: how-to # Investigate sensor detections in the Device map -The Device map provides a graphical representation of network devices detected, as well as the connections between them. Use the map to: +The Device map provides a graphical representation of network devices detected, and the connections between them. Use the map to: - Retrieve, analyze, and manage device information. @@ -24,7 +24,7 @@ The Device map provides a graphical representation of network devices detected, ## Map search and layout tools -A variety of map tools help you gain insight into devices and connections of interest to you. +A variety of map tools help you gain insight into devices and connections of interest to you. - [Basic search tools](#basic-search-tools) - [Group highlight and filters tools](#group-highlight-and-filters-tools) - [Map display tools](#map-display-tools) @@ -48,8 +48,8 @@ When you search by IP or MAC address, the map displays the device that you searc Filter or highlight the map based on default and custom device groups. -- Filtering omits the devices that are not in the selected group. -- Highlights displays all devices and highlights the selected items in the group in blue. +- Filtering omits the devices that aren't in the selected group. +- Highlights display all devices and highlights the selected items in the group in blue. :::image type="content" source="media/how-to-work-with-maps/group-highlight-and-filters-v2.png" alt-text="Screenshot of the group highlights and filters."::: @@ -65,7 +65,7 @@ The following predefined groups are available: | Group name | Description | |--|--| | **Known applications** | Devices that use reserved ports, such as TCP. | -| **non-standard ports (default)** | Devices that use non-standard ports or ports that have not been assigned an alias. | +| **non-standard ports (default)** | Devices that use non-standard ports or ports that haven't been assigned an alias. | | **OT protocols (default)** | Devices that handle known OT traffic. | | **Authorization (default)** | Devices that were discovered in the network during the learning process or were officially authorized on the network. | | **Device inventory filters** | Devices grouped according to the filters save in the Device Inventory table. | @@ -76,7 +76,7 @@ The following predefined groups are available: | **Cross subnet connections** | Devices that communicate from one subnet to another subnet. | | **Attack vector simulations** | Vulnerable devices detected in attack vector reports. To view these devices on the map, select the **Display on Device Map** checkbox when generating the Attack Vector. :::image type="content" source="media/how-to-work-with-maps/add-attack-v3.png" alt-text="Screenshot of the Add Attack Vector Simulations":::| | **Last seen** | Devices grouped by the time frame they were last seen, for example: One hour, six hours, one day, seven days. | -| **Not In Active Directory** | All non-PLC devices that are not communicating with the Active Directory. | +| **Not In Active Directory** | All non-PLC devices that aren't communicating with the Active Directory. | For information about creating custom groups, see [Define custom groups](#define-custom-groups). @@ -131,7 +131,7 @@ Overall connections are displayed. **To view specific connections:** 1. Select a device in the map. -1. Specific connections between devices are displayed in blue. In addition, you will see connections that cross various Purdue levels. +1. Specific connections between devices are displayed in blue. In addition, you'll see connections that cross various Purdue levels. :::image type="content" source="media/how-to-work-with-maps/connections-purdue-level.png" alt-text="Screenshot of the detailed map view." lightbox="media/how-to-work-with-maps/connections-purdue-level.png" ::: @@ -142,8 +142,8 @@ By default, IT devices are automatically aggregated by subnet, so that the map v Each subnet is presented as a single entity on the Device map. Options are available to expand subnets to see details; and collapse subnets or hide them. **To expand an IT subnet:** -1. Right-click the icon on the map the represents the IT network and select **Expand Network**. -1. A confirmation box appears, notifying you that the layout change cannot be redone. +1. Right-click the icon on the map that represents the IT network and select **Expand Network**. +1. A confirmation box appears, notifying you that the layout change can't be redone. 1. Select **OK**. The IT subnet elements appear on the map. **To collapse an IT subnet:** @@ -214,7 +214,7 @@ The following labels and indicators may appear on devices on the map: | :::image type="content" source="media/how-to-work-with-maps/amount-alerts-v2.png" alt-text="Screenshot of the number of alerts"::: | Number of alerts associated with the device | | :::image type="icon" source="media/how-to-work-with-maps/type-v2.png" border="false"::: | Device type icon, for example storage, PLC or historian. | | :::image type="content" source="media/how-to-work-with-maps/grouped-v2.png" alt-text="Screenshot of devices grouped together."::: | Number of devices grouped in a subnet in an IT network. In this example 8. | -| :::image type="content" source="media/how-to-work-with-maps/not-authorized-v2.png" alt-text="Screenshot of the device learning period"::: | A device that was detected after the Learning period and was not authorized as a network device. | +| :::image type="content" source="media/how-to-work-with-maps/not-authorized-v2.png" alt-text="Screenshot of the device learning period"::: | A device that was detected after the Learning period and wasn't authorized as a network device. | | Solid line | Logical connection between devices | | :::image type="content" source="media/how-to-work-with-maps/new-v2.png" alt-text="Screenshot of a new device discovered after learning is complete."::: | New device discovered after Learning is complete. | @@ -248,10 +248,10 @@ This section describes device details. | Location | The Purdue layer identified by the sensor for this device, including:
                  - Automatic
                  - Process Control
                  - Supervisory
                  - Enterprise | | Description | A free text field.
                  Add more information about the device. | | Attributes | Additional information was discovered on the device. For example, view the PLC Run and Key state, the secure status of the PLC, or information on when the state changed.
                  The information is read only and cannot be updated from the Attributes section. | -| Scanner or Programming device | **Scanner**: Enable this option if you know that this device is known as scanner and there is no need to alert you about it.
                  **Programming Device**: Enable this option if you know that this device is known as a programming device and is used to make programming changes. Identifying it as a programming device will prevent alerts for programming changes originating from this asset. | +| Scanner or Programming device | **Scanner**: Enable this option if you know that this device is known as a scanner and there's no need to alert you about it.
                  **Programming Device**: Enable this option if you know that this device is known as a programming device and is used to make programming changes. Identifying it as a programming device will prevent alerts for programming changes originating from this asset. | | Network Interfaces | The device interfaces. A RO field. | | Protocols | The protocols used by the device. A RO field. | -| Firmware | If Backplane information is available, firmware information will not be displayed. | +| Firmware | If Backplane information is available, firmware information won't be displayed. | | Address | The device IP address. | | Serial | The device serial number. | | Module Address | The device model and slot number or ID. | @@ -346,13 +346,13 @@ This table lists device types you can manually assign to a device. ### Delete devices -You may want to delete a device if the information learned is not relevant. For example, +You may want to delete a device if the information learned isn't relevant. For example, - A partner contractor at an engineering workstation connects temporarily to perform configuration updates. After the task is completed, the device is removed. - Due to changes in the network, some devices are no longer connected. -If you do not delete the device, the sensor will continue monitoring it. After 60 days, a notification will appear, recommending that you delete. +If you don't delete the device, the sensor will continue monitoring it. After 60 days, a notification will appear, recommending that you delete. You may receive an alert indicating that the device is unresponsive if another device tries to access it. In this case, your network may be misconfigured. @@ -382,7 +382,7 @@ The event timeline presents the merge event. :::image type="content" source="media/how-to-work-with-maps/events-time.png" alt-text="Screenshot of an event timeline with merged events."::: -You cannot undo a device merge. If you mistakenly merged two devices, delete the device and wait for the sensor to rediscover both. +You can't undo a device merge. If you mistakenly merged two devices, delete the device and wait for the sensor to rediscover both. **To merge devices:** @@ -398,7 +398,7 @@ You cannot undo a device merge. If you mistakenly merged two devices, delete the ### Authorize and unauthorize devices -During the Learning period, all the devices discovered in the network are identified as authorized devices. The **Authorized** label does not appear on these devices in the Device map. +During the Learning period, all the devices discovered in the network are identified as authorized devices. The **Authorized** label doesn't appear on these devices in the Device map. When a device is discovered after the Learning period, it appears as an unauthorized device. In addition to seeing unauthorized devices in the map, you can also see them in the Device Inventory. diff --git a/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md b/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md index 670aae45fee1e..a28ebc7cfa4ac 100644 --- a/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md +++ b/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md @@ -102,7 +102,7 @@ This option is available for both *cloud connected* and *locally managed* sensor ## Review package update status on the sensor ## -The package update status and version information is displayed in the sensor **System Settings**, **Threat Intelligence** section. +The package update status and version information are displayed in the sensor **System Settings**, **Threat Intelligence** section. ## Review package information for cloud connected sensors ## diff --git a/articles/defender-for-iot/organizations/includes/devices-inventoried.md b/articles/defender-for-iot/organizations/includes/devices-inventoried.md new file mode 100644 index 0000000000000..41b1f30c42f17 --- /dev/null +++ b/articles/defender-for-iot/organizations/includes/devices-inventoried.md @@ -0,0 +1,22 @@ +--- +title: include +author: batamig +ms.date: 06/01/2022 +ms.topic: include +--- + + + +Defender for IoT considers any of the following as single and unique network devices: + +- Managed or unmanaged standalone IT/OT/IoT devices, with one or more NICs +- Devices with multiple backplane components, including all racks, slots, or modules +- Devices that provide network infrastructure, such as switches or routers with multiple NICs + +The following items aren't monitored as devices, and don't appear in the Defender for IoT device inventories: + +- Public internet IP addresses +- Multi-cast groups +- Broadcast groups + +Devices that are inactive for more than 60 days are classified as *inactive* inventory devices. \ No newline at end of file diff --git a/articles/defender-for-iot/organizations/integrate-with-active-directory.md b/articles/defender-for-iot/organizations/integrate-with-active-directory.md index 19e25350ad275..20686cb55b146 100644 --- a/articles/defender-for-iot/organizations/integrate-with-active-directory.md +++ b/articles/defender-for-iot/organizations/integrate-with-active-directory.md @@ -59,4 +59,4 @@ You can associate Active Directory groups defined here with specific permission ## Next steps -For more information, see [how to create and manage users](/azure/defender-for-iot/organizations/how-to-create-and-manage-users). \ No newline at end of file +For more information, see [how to create and manage users](./how-to-create-and-manage-users.md). \ No newline at end of file diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/and-condition.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/and-condition.png deleted file mode 100644 index 29c23b95f5016..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/and-condition.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/custom-alert-rules.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/custom-alert-rules.png deleted file mode 100644 index 1ad0f82996146..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/custom-alert-rules.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/define-conditions.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/define-conditions.png deleted file mode 100644 index 7ae30883e68ef..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/define-conditions.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/export-logs-details.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/export-logs-details.png deleted file mode 100644 index 31c4e2add663f..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/export-logs-details.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-from-the-menu.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-from-the-menu.png deleted file mode 100644 index 1a3513e4795f1..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-from-the-menu.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-overview.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-overview.png deleted file mode 100644 index eaa43e7321376..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-overview.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-plugin.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-plugin.png deleted file mode 100644 index c9f041e8f2328..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/horizon-plugin.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/infrastructure.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/infrastructure.png deleted file mode 100644 index 48d522cf51505..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/infrastructure.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/ip-address-value.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/ip-address-value.png deleted file mode 100644 index 53f598fb302c6..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/ip-address-value.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/monitor-icon.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/monitor-icon.png deleted file mode 100644 index 01cb411563189..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/monitor-icon.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/plugins-menu.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/plugins-menu.png deleted file mode 100644 index c1788dde6cdfa..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/plugins-menu.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/rule-window.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/rule-window.png deleted file mode 100644 index 5fbe45f0334dd..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/rule-window.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/sample-rule-window.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/sample-rule-window.png deleted file mode 100644 index f837e356787cb..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/sample-rule-window.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/snmp-monitor.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/snmp-monitor.png deleted file mode 100644 index 87b2dc71468af..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/snmp-monitor.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/toggle-icon.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/toggle-icon.png deleted file mode 100644 index 1090f75389d56..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/toggle-icon.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/upload-a-plugin.png b/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/upload-a-plugin.png deleted file mode 100644 index 3eef419524372..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-manage-proprietary-protocols/upload-a-plugin.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/azure-defender-for-iot-sensor-download-software-screen.png b/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/azure-defender-for-iot-sensor-download-software-screen.png deleted file mode 100644 index baf152a15daf8..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/azure-defender-for-iot-sensor-download-software-screen.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/corporate-hpe-proliant-dl360-v2.png b/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/corporate-hpe-proliant-dl360-v2.png deleted file mode 100644 index 708c2ed7e47ca..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/corporate-hpe-proliant-dl360-v2.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/defender-for-iot-iso-download-screen.png b/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/defender-for-iot-iso-download-screen.png deleted file mode 100644 index 320bd3a5f75f3..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/defender-for-iot-iso-download-screen.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/deployment-type-enterprise-for-azure-defender-for-iot-v2.png b/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/deployment-type-enterprise-for-azure-defender-for-iot-v2.png deleted file mode 100644 index cb6ef5238e7b3..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/deployment-type-enterprise-for-azure-defender-for-iot-v2.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/enterprise-and-smb-hpe-proliant-dl20-v2.png b/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/enterprise-and-smb-hpe-proliant-dl20-v2.png deleted file mode 100644 index b7983e9612a77..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/enterprise-and-smb-hpe-proliant-dl20-v2.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/office-ruggedized.png b/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/office-ruggedized.png deleted file mode 100644 index 10b48ec4d90f9..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/how-to-prepare-your-network/office-ruggedized.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/how-to-set-up-high-availability/update-high-availability-domain.jpg b/articles/defender-for-iot/organizations/media/how-to-set-up-high-availability/update-high-availability-domain.jpg new file mode 100644 index 0000000000000..c16979013f5c8 Binary files /dev/null and b/articles/defender-for-iot/organizations/media/how-to-set-up-high-availability/update-high-availability-domain.jpg differ diff --git a/articles/defender-for-iot/organizations/media/release-notes/appliance-catalog.png b/articles/defender-for-iot/organizations/media/release-notes/appliance-catalog.png new file mode 100644 index 0000000000000..cc1ddbe9d2e79 Binary files /dev/null and b/articles/defender-for-iot/organizations/media/release-notes/appliance-catalog.png differ diff --git a/articles/defender-for-iot/organizations/media/tutorial-install-components/defender-for-iot-management-console-sign-in-screen.png b/articles/defender-for-iot/organizations/media/tutorial-install-components/defender-for-iot-management-console-sign-in-screen.png deleted file mode 100644 index a53339ef345bf..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/tutorial-install-components/defender-for-iot-management-console-sign-in-screen.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/media/tutorial-install-components/sensor-version-select-screen-v2.png b/articles/defender-for-iot/organizations/media/tutorial-install-components/sensor-version-select-screen-v2.png deleted file mode 100644 index 79abb149442f3..0000000000000 Binary files a/articles/defender-for-iot/organizations/media/tutorial-install-components/sensor-version-select-screen-v2.png and /dev/null differ diff --git a/articles/defender-for-iot/organizations/references-defender-for-iot-glossary.md b/articles/defender-for-iot/organizations/references-defender-for-iot-glossary.md index 82ad81860634c..d31eeb6fb605b 100644 --- a/articles/defender-for-iot/organizations/references-defender-for-iot-glossary.md +++ b/articles/defender-for-iot/organizations/references-defender-for-iot-glossary.md @@ -44,15 +44,8 @@ This glossary provides a brief description of important terms and concepts for t |--|--|--| | **Data mining** | Generate comprehensive and granular reports about your network devices:

                  - **SOC incident response**: Reports in real time to help deal with immediate incident response. For example, a report can list devices that might need patching.

                  - **Forensics**: Reports based on historical data for investigative reports.

                  - **IT network integrity**: Reports that help improve overall network security. For example, a report can list devices with weak authentication credentials.

                  - **visibility**: Reports that cover all query items to view all baseline parameters of your network.

                  Save data-mining reports for read-only users to view. | **[Baseline](#b)

                  [Reports](#r)** | | **Defender for IoT platform** | The Defender for IoT solution installed on Defender for IoT sensors and the on-premises management console. | **[Sensor](#s)

                  [On-premises management console](#o)** | -| **Inventory device** | Defender for IoT will identify and classify devices as a single unique network device in the inventory for: -1. Standalone IT/OT/IoT devices (w/ 1 or multiple NICs) -1. Devices composed of multiple backplane components (including all racks/slots/modules) -1. Devices acting as network infrastructure such as Switch/Router (w/ multiple NICs). -Public internet IP addresses, multicast groups, and broadcast groups are not considered inventory devices. Devices that have been inactive for more than 60 days are classified as inactive Inventory devices.| +| **Device inventories** | Defender for IoT considers any of the following as single and unique network devices:

                  - Managed or un-managed standalone IT/OT/IoT devices, with one or more NICs
                  - Devices with multiple backplane components, including all racks, slots, or modules
                  - Devices that provide network infrastructure, such as switches or routers with multiple NICs

                  Monitored devices are listed in the **Device inventory** pages on the Azure portal, sensor console, and the on-premises management console. Data integration features let you enhance device data with details from other enterprise resources, such as CMDBs, DNS, firewalls, and Web APIs.

                  The following items are not monitored as devices, and do not appear in the Defender for IoT device inventories:
                  - Public internet IP addresses
                  - Multi-cast groups
                  - Broadcast groups

                  Devices that are inactive for more than 60 days are classified as *inactive* inventory devices.
                  The data integration capabilities of the on-premises management console let you enhance the data in the device inventory with information from other enterprise resources. Example resources are CMDBs, DNS, firewalls, and Web APIs.| [**Device map**](#d)| | **Device map** | A graphical representation of network devices that Defender for IoT detects. It shows the connections between devices and information about each device. Use the map to:

                  - Retrieve and control critical device information.

                  - Analyze network slices.

                  - Export device details and summaries. | **[Purdue layer group](#p)** | -| **Device inventory - sensor** | The device inventory displays an extensive range of device attributes detected by Defender for IoT. Options are available to:

                  - Filter displayed information.

                  - Export this information to a CSV file.

                  - Import Windows registry details. | **[Group](#g)**

                  **[Device inventory- on-premises management console](#d)** | -| **Device inventory - on-premises management console** | Device information from connected sensors can be viewed from the on-premises management console in the device inventory. This gives users of the on-premises management console a comprehensive view of all network information. | **[Device inventory - sensor](#d)

                  [Device inventory - data integrator](#d)** | -| **Device inventory - data integrator** | The data integration capabilities of the on-premises management console let you enhance the data in the device inventory with information from other enterprise resources. Example resources are CMDBs, DNS, firewalls, and Web APIs. | **[Device inventory - on-premises management console](#d)** | ## E diff --git a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md index 52eeeb45aed9e..8b0520f5e1a1d 100644 --- a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md +++ b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md @@ -7,7 +7,7 @@ ms.topic: reference # Defender for IoT sensor and management console APIs -Defender for IoT APIs are governed by [Microsoft API License and Terms of use](/legal/microsoft-apis/terms-of-use). +Defender for IoT APIs is governed by [Microsoft API License and Terms of use](/legal/microsoft-apis/terms-of-use). Use an external REST API to access the data discovered by sensors and management consoles and perform actions with that data. @@ -239,11 +239,11 @@ Message string with the operation status details: - **Failure – error**: User authentication failure -- **Failure – error**: User does not exist +- **Failure – error**: User doesn't exist - **Failure – error**: Password doesn't match security policy -- **Failure – error**: User does not have the permissions to change password +- **Failure – error**: User doesn't have the permissions to change password #### Response example @@ -2501,13 +2501,15 @@ Define conditions under which alerts won't be sent. For example, define and upda The APIs that you define here appear in the on-premises management console's Alert Exclusions window as a read-only exclusion rule. +This API is supported for maintenance purposes only and is not meant to be used instead of [alert exclusion rules](/azure/defender-for-iot/organizations/how-to-work-with-alerts-on-premises-management-console#create-alert-exclusion-rules). Use this API for one-time maintenance operations only. + #### Method - POST #### Query parameters - **ticketId**: Defines the maintenance ticket ID in the user's systems. -- **ttl**: Defines the TTL (time to live), which is the duration of the maintenance window in minutes. After the period of time that this parameter defines, the system automatically starts sending alerts. +- **ttl**: Required. Defines the TTL (time to live), which is the duration of the maintenance window in minutes. After the period of time that this parameter defines, the system automatically starts sending alerts. - **engines**: Defines from which security engine to suppress alerts during the maintenance process: @@ -2777,7 +2779,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - Type: JSON - Structure: - - “**u_id**” - the internal id of the device. + - “**u_id**” - the internal ID of the device. - “**u_vendor**” - the name of the vendor. - “**u_mac_address_objects**” - array of - “**u_mac_address**” - mac address of the device. @@ -2804,7 +2806,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - “**u_protocol**” - protocol the device uses. - “**u_purdue_layer**” - the purdue layer that was manually set by the user. - “**u_sensor_ids**” - array of - - “**u_sensor_id**” - the id of the sensor that saw the device. + - “**u_sensor_id**” - the ID of the sensor that saw the device. - “**u_device_urls**” - array of - “**u_device_url**” the URL to view the device in the sensor. - “**u_firmwares**” - array of @@ -2829,7 +2831,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - Type: JSON - Structure: - Array of - - “**u_id**” - the id of the deleted device. + - “**u_id**” - the ID of the deleted device. ### Sensors @@ -2843,7 +2845,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - Type: JSON - Structure: - Array of - - “**u_id**” - internal sensor id, to be used in the devices API. + - “**u_id**” - internal sensor ID, to be used in the devices API. - “**u_name**” - the name of the appliance. - “**u_connection_state**” - connectivity with the CM state. One of the following: - “**SYNCED**” - Connection is successful. diff --git a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md index 53274e5a21f13..6e3f16de91443 100644 --- a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md +++ b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md @@ -111,7 +111,7 @@ The following table describes the commands available to configure your network o ## Network capture filter configuration -The `network capture-filter` command allows administrators to eliminate network traffic that doesn't need to be analyzed. You can filter traffic by using an include list, or an exclude list. This command does not support the malware detection engine. +The `network capture-filter` command allows administrators to eliminate network traffic that doesn't need to be analyzed. You can filter traffic by using an include list, or an exclude list. This command doesn't support the malware detection engine. ```azurecli-interactive network capture-filter @@ -175,9 +175,9 @@ You're asked the following question: Your options are: `all`, `dissector`, `collector`, `statistics-collector`, `rpc-parser`, or `smb-parser`. -In most common use cases, we recommend that you select `all`. Selecting `all` does not include the malware detection engine, which is not supported by this command. +In most common use cases, we recommend that you select `all`. Selecting `all` doesn't include the malware detection engine, which isn't supported by this command. -### Custom base capture filter +### Custom base capture filter The base capture filter is the baseline for the components. For example, the filter determines which ports are available to the component. diff --git a/articles/defender-for-iot/organizations/release-notes.md b/articles/defender-for-iot/organizations/release-notes.md index 5f3383d425b14..7995dc43bb4b5 100644 --- a/articles/defender-for-iot/organizations/release-notes.md +++ b/articles/defender-for-iot/organizations/release-notes.md @@ -2,7 +2,7 @@ title: What's new in Microsoft Defender for IoT description: This article lets you know what's new in the latest release of Defender for IoT. ms.topic: overview -ms.date: 03/22/2022 +ms.date: 05/25/2022 --- # What's new in Microsoft Defender for IoT? @@ -19,7 +19,7 @@ Noted features listed below are in PREVIEW. The [Azure Preview Supplemental Term The Defender for IoT architecture uses on-premises sensors and management servers. This section describes the servicing information and timelines for the available on-premises software versions. -- Each General Availability (GA) version of the Defender for IoT sensor and on-premises management console software is supported for nine months after release. Fixes and new functionality are applied to each new version and are not applied to older versions. +- Each General Availability (GA) version of the Defender for IoT sensor and on-premises management console software is supported for nine months after release. Fixes and new functionality are applied to each new version and aren't applied to older versions. - Software update packages include new functionality and security patches. Urgent, high-risk security updates are applied in minor versions that may be released throughout the quarter. @@ -41,6 +41,49 @@ For more information, see the [Microsoft Security Development Lifecycle practice | 10.5.3 | 10/2021 | 07/2022 | | 10.5.2 | 10/2021 | 07/2022 | +## May 2022 + +We've recently optimized and enhanced our documentation as follows: + +- [Updated appliance catalog for OT environments](#updated-appliance-catalog-for-ot-environments) +- [Documentation reorganization for end-user organizations](#documentation-reorganization-for-end-user-organizations) + +### Updated appliance catalog for OT environments + +We've refreshed and revamped the catalog of supported appliances for monitoring OT environments. These appliances support flexible deployment options for environments of all sizes and can be used to host both the OT monitoring sensor and on-premises management consoles. + +Use the new pages as follows: + +1. **Understand which hardware model best fits your organization's needs.** For more information, see [Which appliances do I need?](ot-appliance-sizing.md) + +1. **Learn about the preconfigured hardware appliances that are available to purchase, or system requirements for virtual machines.** For more information, see [Pre-configured physical appliances for OT monitoring](ot-pre-configured-appliances.md) and [OT monitoring with virtual appliances](ot-virtual-appliances.md). + + For more information about each appliance type, use the linked reference page, or browse through our new **Reference > OT monitoring appliances** section. + + :::image type="content" source="media/release-notes/appliance-catalog.png" alt-text="Screenshot of the new appliance catalog reference section." lightbox="media/release-notes/appliance-catalog.png"::: + + Reference articles for each appliance type, including virtual appliances, include specific steps to configure the appliance for OT monitoring with Defender for IoT. Generic software installation and troubleshooting procedures are still documented in [Defender for IoT software installation](how-to-install-software.md). + +### Documentation reorganization for end-user organizations + +We recently reorganized our Defender for IoT documentation for end-user organizations, highlighting a clearer path for onboarding and getting started. + +Check out our new structure to follow through viewing devices and assets, managing alerts, vulnerabilities and threats, integrating with other services, and deploying and maintaining your Defender for IoT system. + +**New and updated articles include**: + +- [Welcome to Microsoft Defender for IoT for organizations](overview.md) +- [Microsoft Defender for IoT architecture](architecture.md) +- [Quickstart: Get started with Defender for IoT](getting-started.md) +- [Tutorial: Microsoft Defender for IoT trial setup](tutorial-onboarding.md) +- [Tutorial: Get started with Enterprise IoT](tutorial-getting-started-eiot-sensor.md) +- [Plan your sensor connections for OT monitoring](plan-network-monitoring.md) +- [About Microsoft Defender for IoT network setup](how-to-set-up-your-network.md) + +> [!NOTE] +> To send feedback on docs via GitHub, scroll to the bottom of the page and select the **Feedback** option for **This page**. We'd be glad to hear from you! +> + ## April 2022 **Sensor software version**: 22.1.4 @@ -219,7 +262,7 @@ Other alert updates include: - **Access contextual data** for each alert, such as events that occurred around the same time, or a map of connected devices. Maps of connected devices are available for sensor console alerts only. -- **Alert statuses** are updated, and for example now include a *Closed* status instead of *Acknowledged*. +- **Alert statuses** are updated, and, for example, now include a *Closed* status instead of *Acknowledged*. - **Alert storage** for 90 days from the time that they're first detected. @@ -462,4 +505,4 @@ Unicode characters are now supported when working with sensor certificate passph ## Next steps -[Getting started with Defender for IoT](getting-started.md) \ No newline at end of file +[Getting started with Defender for IoT](getting-started.md) diff --git a/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md b/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md index 207d896198440..ba77ab1a0a9a1 100644 --- a/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md +++ b/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md @@ -20,7 +20,7 @@ Microsoft Defender for IoT provides comprehensive protocol support. In addition - Secure proprietary information by developing on-site as an external plugin. - Localize text for alerts, events, and protocol parameters. -This unique solution for developing protocols as plugins, does not require dedicated developer teams or version releases in order to support a new protocol. Developers, partners, and customers can securely develop protocols and share insights and knowledge using Horizon. +This unique solution for developing protocols as plugins, doesn't require dedicated developer teams or version releases in order to support a new protocol. Developers, partners, and customers can securely develop protocols and share insights and knowledge using Horizon. ## Do I have to purchase hardware appliances from Microsoft partners? Microsoft Defender for IoT sensor runs on specific hardware specs as described in the [Hardware Specifications Guide](./how-to-identify-required-appliances.md), customers can purchase certified hardware from Microsoft partners or use the supplied bill of materials (BOM) and purchase it on their own. @@ -28,7 +28,7 @@ Microsoft Defender for IoT sensor runs on specific hardware specs as described i Certified hardware has been tested in our labs for driver stability, packet drops and network sizing. -## Regulation does not allow us to connect our system to the Internet. Can we still utilize Defender for IoT? +## Regulation doesn't allow us to connect our system to the Internet. Can we still utilize Defender for IoT? Yes you can! The Microsoft Defender for IoT platform on-premises solution is deployed as a physical or virtual sensor appliance that passively ingests network traffic (via SPAN, RSPAN, or TAP) to analyze, discover, and continuously monitor IT, OT, and IoT networks. For larger enterprises, multiple sensors can aggregate their data to an on-premises management console. @@ -76,7 +76,7 @@ You can work with CLI [commands](references-work-with-defender-for-iot-cli-comma ## How do I check the sanity of my deployment -After installing the software for your sensor or on-premises management console, you will want to perform the [Post-installation validation](how-to-install-software.md#post-installation-validation). +After installing the software for your sensor or on-premises management console, you'll want to perform the [Post-installation validation](how-to-install-software.md#post-installation-validation). You can also use our [UI and CLI tools](how-to-troubleshoot-the-sensor-and-on-premises-management-console.md#check-system-health) to check system health and review your overall system statistics. diff --git a/articles/defender-for-iot/organizations/tutorial-splunk.md b/articles/defender-for-iot/organizations/tutorial-splunk.md index 8d16cbeabfee3..996d8d527426a 100644 --- a/articles/defender-for-iot/organizations/tutorial-splunk.md +++ b/articles/defender-for-iot/organizations/tutorial-splunk.md @@ -16,7 +16,7 @@ To address a lack of visibility into the security and resiliency of OT networks, The application provides SOC analysts with multidimensional visibility into the specialized OT protocols and IIoT devices deployed in industrial environments, along with ICS-aware behavioral analytics to rapidly detect suspicious or anomalous behavior. The application also enables both IT, and OT incident response from within one corporate SOC. This is an important evolution given the ongoing convergence of IT and OT to support new IIoT initiatives, such as smart machines and real-time intelligence. -The Splunk application can be installed locally or run on a cloud. The Splunk integration along with Defender for IoT supports both deployments. +The Splunk application can be installed locally ('Splunk Enterprise') or run on a cloud ('Splunk Cloud'). The Splunk integration along with Defender for IoT supports 'Splunk Enterprise' only. > [!Note] > References to CyberX refer to Microsoft Defender for IoT. diff --git a/articles/devops-project/index.yml b/articles/devops-project/index.yml index ee5699a21e90c..1afd13588e319 100644 --- a/articles/devops-project/index.yml +++ b/articles/devops-project/index.yml @@ -1,7 +1,7 @@ ### YamlMime:Landing title: DevOps Starter documentation to deploy to Azure -summary: DevOps Starter presents a simplified experience where you bring your existing code and Git repository, or choose from one of the sample applications to create a continuous integration (CI) and continuous delivery (CD) pipeline or workflow to Azure. DevOps Starter now supports creating GitHub actions workflows. +summary: DevOps Starter presents a simplified experience where you bring your existing code and Git repository, or choose from one of the sample applications to create a continuous integration (CI) and continuous delivery (CD) pipeline or workflow to Azure. DevOps Starter now supports creating GitHub Actions workflows. metadata: title: DevOps Starter documentation @@ -32,7 +32,7 @@ landingContent: linkLists: - linkListType: quickstart links: - - text: Node.js using GitHub actions + - text: Node.js using GitHub Actions url: ./devops-starter-gh-nodejs.md - text: .NET url: ./azure-devops-project-aspnet-core.md @@ -54,7 +54,7 @@ landingContent: linkLists: - linkListType: tutorial links: - - text: Deploy your app to Azure Web App using GitHub actions + - text: Deploy your app to Azure Web App using GitHub Actions url: devops-starter-gh-web-app.md - text: Bring your own code with GitHub url: azure-devops-project-vms.md diff --git a/articles/devtest-labs/how-to-move-schedule-to-new-region.md b/articles/devtest-labs/how-to-move-schedule-to-new-region.md index 09530f4c197da..2f6bd5a1834c6 100644 --- a/articles/devtest-labs/how-to-move-schedule-to-new-region.md +++ b/articles/devtest-labs/how-to-move-schedule-to-new-region.md @@ -1,14 +1,14 @@ --- -title: How to move a schedule to another region -description: This article explains how to move schedules to another Azure region. +title: Move a schedule to another region +description: This article explains how to move a top level schedule to another Azure region. ms.topic: how-to ms.author: rosemalcolm author: RoseHJM ms.date: 05/09/2022 --- -# Move schedules to another region +# Move a schedule to another region -In this article, you'll learn how to move schedules by using an Azure Resource Manager (ARM) template. +In this article, you'll learn how to move a schedule by using an Azure Resource Manager (ARM) template. DevTest Labs supports two types of schedules. diff --git a/articles/digital-twins/.openpublishing.redirection.digital-twins.json b/articles/digital-twins/.openpublishing.redirection.digital-twins.json index 7bdf54312c7f5..5ae59a46ca6ac 100644 --- a/articles/digital-twins/.openpublishing.redirection.digital-twins.json +++ b/articles/digital-twins/.openpublishing.redirection.digital-twins.json @@ -1,5 +1,15 @@ { "redirections": [ + { + "source_path": "how-to-create-app-registration-portal.md", + "redirect_url": "/azure/digital-twins/how-to-create-app-registration", + "redirect_document_id": true + }, + { + "source_path": "how-to-create-app-registration-cli.md", + "redirect_url": "/azure/digital-twins/how-to-create-app-registration", + "redirect_document_id": false + }, { "source_path": "how-to-set-up-instance-powershell.md", "redirect_url": "/azure/digital-twins/how-to-set-up-instance-cli", @@ -25,11 +35,6 @@ "redirect_url": "/azure/digital-twins/how-to-monitor-resource-health", "redirect_document_id": true }, - { - "source_path": "how-to-create-app-registration.md", - "redirect_url": "/azure/digital-twins/how-to-create-app-registration-portal", - "redirect_document_id": true - }, { "source_path": "concepts-integration.md", "redirect_url": "/azure/digital-twins/concepts-data-ingress-egress", diff --git a/articles/digital-twins/TOC.yml b/articles/digital-twins/TOC.yml index d9f08c89ffd0b..ec799e3c7a9ec 100644 --- a/articles/digital-twins/TOC.yml +++ b/articles/digital-twins/TOC.yml @@ -110,11 +110,7 @@ - name: Write app authentication code href: how-to-authenticate-client.md - name: Create an app registration with Azure Digital Twins access - items: - - name: Portal - href: how-to-create-app-registration-portal.md - - name: CLI - href: how-to-create-app-registration-cli.md + href: how-to-create-app-registration.md - name: Integrate with Azure SignalR Service href: how-to-integrate-azure-signalr.md - name: Connect input diff --git a/articles/digital-twins/concepts-3d-scenes-studio.md b/articles/digital-twins/concepts-3d-scenes-studio.md index c3de942d4b5b5..32b2463a8be68 100644 --- a/articles/digital-twins/concepts-3d-scenes-studio.md +++ b/articles/digital-twins/concepts-3d-scenes-studio.md @@ -1,8 +1,8 @@ --- # Mandatory fields. -title: 3D Scenes Studio for Azure Digital Twins +title: 3D Scenes Studio (preview) for Azure Digital Twins titleSuffix: Azure Digital Twins -description: Learn about 3D Scenes Studio for Azure Digital Twins. +description: Learn about 3D Scenes Studio (preview) for Azure Digital Twins. author: baanders ms.author: baanders # Microsoft employees only ms.date: 05/04/2022 @@ -16,12 +16,14 @@ ms.custom: event-tier1-build-2022 # manager: MSFT-alias-of-manager-or-PM-counterpart --- -# 3D Scenes Studio for Azure Digital Twins +# 3D Scenes Studio (preview) for Azure Digital Twins -Azure Digital Twins [3D Scenes Studio](https://explorer.digitaltwins.azure.net/3dscenes) is an immersive 3D environment, where end users can monitor, diagnose, and investigate operational data with the visual context of 3D assets. 3D Scenes Studio empowers organizations to enrich existing 3D models with visualizations powered by Azure Digital Twins data, without the need for 3D expertise. The visualizations can be easily consumed from web browsers. +Azure Digital Twins [3D Scenes Studio (preview)](https://explorer.digitaltwins.azure.net/3dscenes) is an immersive 3D environment, where end users can monitor, diagnose, and investigate operational data with the visual context of 3D assets. 3D Scenes Studio empowers organizations to enrich existing 3D models with visualizations powered by Azure Digital Twins data, without the need for 3D expertise. The visualizations can be easily consumed from web browsers. With a digital twin graph and curated 3D model, subject matter experts can leverage the studio's low-code builder to map the 3D elements to the digital twin, and define UI interactivity and business logic for a 3D visualization of a business environment. The 3D scenes can then be consumed in the hosted [Azure Digital Twins Explorer 3D Scenes Studio](concepts-azure-digital-twins-explorer.md), or in a custom application that leverages the embeddable 3D viewer component. +This article gives an overview of 3D Scenes Studio and its key features. For comprehensive, step-by-step instructions on how to use each feature, see [Use 3D Scenes Studio (preview)](how-to-use-3d-scenes-studio.md). + ## Studio overview Work in 3D Scenes Studio is built around the concept of *scenes*. A scene is a view of a single business environment, and is comprised of 3D content, custom business logic, and references to an Azure Digital Twins instance. You can have multiple scenes for a single digital twin instance. @@ -51,13 +53,16 @@ To work with 3D Scenes Studio, you'll need the following required resources: * An [Azure Digital Twins instance](how-to-set-up-instance-cli.md) * You'll need *Azure Digital Twins Data Owner* or *Azure Digital Twins Data Reader* access to the instance * The instance should be populated with [models](concepts-models.md) and [twins](concepts-twins-graph.md) -* An [Azure storage account](/azure/storage/common/storage-account-create?tabs=azure-portal) - * To build 3D scenes, you'll need *Storage Blob Data Owner* access to the storage account. If you only need to consume 3D scenes that others have created, you'll need *Storage Blob Data Reader*. -* A [private container](/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container) in the storage account -Then, you can access 3D Scenes Studio at this link: [3D Scenes Studio](https://explorer.digitaltwins.azure.net/3dscenes). +* An [Azure storage account](../storage/common/storage-account-create.md?tabs=azure-portal), and a [private container](../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container) in the storage account + * To **view** 3D scenes, you'll need at least *Storage Blob Data Reader* access to these storage resources. To **build** 3D scenes, you'll need *Storage Blob Data Contributor* or *Storage Blob Data Owner* access. + + You can grant required roles at either the storage account level or the container level. For more information about Azure storage permissions, see [Assign an Azure role](../storage/blobs/assign-azure-role-data-access.md?tabs=portal#assign-an-azure-role). + * You should also configure [CORS](/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) for your storage account, so that 3D Scenes Studio will be able to access your storage container. For complete CORS setting information, see [Use 3D Scenes Studio (preview)](how-to-use-3d-scenes-studio.md#prerequisites). + +Then, you can access 3D Scenes Studio at this link: [3D Scenes Studio](https://dev.explorer.azuredigitaltwins-test.net/3dscenes). -Once there, you'll link your 3D environment to your storage resources, and configure your first scene. For detailed instructions on how to perform these actions, see [Initialize your 3D Scenes Studio environment](how-to-use-3d-scenes-studio.md#initialize-your-3d-scenes-studio-environment) and [Create and view scenes](how-to-use-3d-scenes-studio.md#create-and-view-scenes). +Once there, you'll link your 3D environment to your storage resources, and configure your first scene. For detailed instructions on how to perform these actions, see [Initialize your 3D Scenes Studio environment](how-to-use-3d-scenes-studio.md#initialize-your-3d-scenes-studio-environment) and [Create, edit, and view scenes](how-to-use-3d-scenes-studio.md#create-edit-and-view-scenes). ## Builder @@ -67,24 +72,28 @@ Here's what the builder looks like: :::image type="content" source="media/concepts-3d-scenes-studio/build-mode.png" alt-text="Screenshot of 3D Scenes Studio builder." lightbox="media/concepts-3d-scenes-studio/build-mode.png"::: -In the builder, you'll create *elements* and *behaviors* for your scene. Elements are user-defined 3D meshes that are linked to digital twins, mapping the visualization pieces to relevant twin data. Behaviors are business logic rules that use digital twin data to drive visuals in the scene. +In the builder, you'll create *elements* and *behaviors* for your scene. The following sections explain these features in more detail. ### Elements +*Elements* are user-defined 3D meshes that are linked to digital twins, mapping the visualization pieces to relevant twin data. + When creating an element in the builder, you'll define the following components: * **Primary twin**: Each element is connected to a primary digital twin counterpart. You connect the element to a twin in your Azure Digital Twins instance so that the element can represent your twin and its data within the 3D visualization. * **Name**: Each element needs a name. You might want to make it match the `$dtId` of its primary twin. * **Meshes**: Identify which components of the 3D model represent this element. * **Behaviors**: [Behaviors](#behaviors) describe how elements appear in the visualization. You can assign behaviors to this element here. -* **Aliased twins**: If you want, you can add secondary digital twin data sources for an element. You should only add aliased twins when there are additional twins with data beyond your primary twin that you want to leverage in your behaviors. After configuring an aliased twin, you'll be able to use properties from that twin when defining behaviors for that element. +* **Other twins**: If you want, you can add secondary digital twin data sources for an element. You should only add other twins when there are additional twins with data beyond your primary twin that you want to leverage in your behaviors. After configuring another twin, you'll be able to use properties from that twin when defining behaviors for that element. ### Behaviors +*Behaviors* are business logic rules that use digital twin data to drive visuals in the scene. + When creating a behavior for an element, you'll define the following components: * **Elements**: Behaviors describe the visuals that are applied to each [element](#elements) in the visualization. You can choose which elements this behavior applies to. -* **Twins**: Identify the set of twins whose data is available to this behavior. This includes the targeted elements' primary twins, and any aliased twins. +* **Twins**: Identify the set of twins whose data is available to this behavior. This includes the targeted elements' primary twins, and any other twins. * **Status**: States are data-driven overlays on your elements to indicate the health or status of the element. * **Alerts**: Alerts are conditional notifications to help you quickly see when an element requires attention. * **Widgets**: Widgets are data-driven visuals that provide additional data to help you diagnose and investigate the scenario that the behavior represents. Configuring widgets will help you make sure the right data is discoverable when an alert or status is active. @@ -105,9 +114,7 @@ You can use the **Elements** list to explore all the elements and active alerts 3D Scenes Studio is extensible to support additional viewing needs. The [viewer component](#viewer) can be embedded into custom applications, and can work in conjunction with 3rd party components. -Here's an example of what the embedded viewer might look like in an independent application: - -:::image type="content" source="media/concepts-3d-scenes-studio/embedded-view.png" alt-text="Screenshot of 3D Scenes Studio in embedded view." lightbox="media/concepts-3d-scenes-studio/embedded-view.png"::: +[!INCLUDE [digital-twins-3d-embed.md](../../includes/digital-twins-3d-embed.md)] ## Recommended limits @@ -126,4 +133,4 @@ These limits are recommended because 3D Scenes Studio leverages the standard [Az Try out 3D Scenes Studio with a sample scenario in [Get started with 3D Scenes Studio](quickstart-3d-scenes-studio.md). -Or, learn how to use the studio's full feature set in [Use 3D Scenes Studio](how-to-use-3d-scenes-studio.md). +Or, learn how to use the studio's full feature set in [Use 3D Scenes Studio](how-to-use-3d-scenes-studio.md). \ No newline at end of file diff --git a/articles/digital-twins/concepts-data-ingress-egress.md b/articles/digital-twins/concepts-data-ingress-egress.md index 88ae053145c30..4fc799f89f131 100644 --- a/articles/digital-twins/concepts-data-ingress-egress.md +++ b/articles/digital-twins/concepts-data-ingress-egress.md @@ -5,7 +5,7 @@ titleSuffix: Azure Digital Twins description: Learn about the data ingress and egress requirements for integrating Azure Digital Twins with other services. author: baanders ms.author: baanders # Microsoft employees only -ms.date: 03/01/2022 +ms.date: 06/01/2022 ms.topic: conceptual ms.service: digital-twins @@ -19,9 +19,9 @@ ms.service: digital-twins Azure Digital Twins is typically used together with other services to create flexible, connected solutions that use your data in different kinds of ways. This article covers data ingress and egress for Azure Digital Twins and Azure services that can be used to take advantage of it. -Using [event routes](concepts-route-events.md), Azure Digital Twins can receive data from upstream services such as [IoT Hub](../iot-hub/about-iot-hub.md) or [Logic Apps](../logic-apps/logic-apps-overview.md), which are used to deliver telemetry and notifications. +Azure Digital Twins can receive data from upstream services such as [IoT Hub](../iot-hub/about-iot-hub.md) or [Logic Apps](../logic-apps/logic-apps-overview.md), which are used to deliver telemetry and notifications. -Azure Digital Twins can also route data to downstream services, such as [Azure Maps](../azure-maps/about-azure-maps.md) and [Time Series Insights](../time-series-insights/overview-what-is-tsi.md), for storage, workflow integration, analytics, and more. +Azure Digital Twins can also use [event routes](concepts-route-events.md) to send data to downstream services, such as [Azure Maps](../azure-maps/about-azure-maps.md) and [Time Series Insights](../time-series-insights/overview-what-is-tsi.md), for storage, workflow integration, analytics, and more. ## Data ingress @@ -33,13 +33,18 @@ To ingest data from any source into Azure Digital Twins, use an [Azure function] You can also learn how to connect Azure Digital Twins to a Logic Apps trigger in [Integrate with Logic Apps](how-to-integrate-logic-apps.md). -## Data egress services +## Data egress -You may want to send Azure Digital Twins data to other downstream services for storage or additional processing. +You may want to send Azure Digital Twins data to other downstream services for storage or additional processing. -To send twin data to [Azure Data Explorer](/azure/data-explorer/data-explorer-overview), set up a [data history (preview) connection](concepts-data-history.md) that automatically historizes digital twin property updates from your Azure Digital Twins instance to an Azure Data Explorer cluster. You can then query this data in Azure Data Explorer using the [Azure Digital Twins query plugin for Azure Data Explorer](concepts-data-explorer-plugin.md). +Digital twin data can be sent to most Azure services using *endpoints*. If your destination is [Azure Data Explorer](/azure/data-explorer/data-explorer-overview), you can use *data history* instead to automatically historize twin property updates to an Azure Data Explorer cluster, where they can be queried as time series data. The rest of this section describes these capabilities in more detail. -To send data to other services, such as [Azure Maps](../azure-maps/about-azure-maps.md), [Time Series Insights](../time-series-insights/overview-what-is-tsi.md), or [Azure Storage](../storage/common/storage-introduction.md), start by attaching the destination service to an *endpoint*. +>[!NOTE] +>Azure Digital Twins implements *at least once* delivery for data emitted to egress services. + +### Endpoints + +To send Azure Digital Twins data to most Azure services, such as [Azure Maps](../azure-maps/about-azure-maps.md), [Time Series Insights](../time-series-insights/overview-what-is-tsi.md), or [Azure Storage](../storage/common/storage-introduction.md), start by attaching the destination service to an *endpoint*. Endpoints can be instances of any of these Azure services: * [Event Hubs](../event-hubs/event-hubs-about.md) @@ -50,7 +55,13 @@ The endpoint is attached to an Azure Digital Twins instance using management API For detailed instructions on how to send Azure Digital Twins data to Azure Maps, see [Use Azure Digital Twins to update an Azure Maps indoor map](how-to-integrate-maps.md). For detailed instructions on how to send Azure Digital Twins data to Time Series Insights, see [Integrate with Time Series Insights](how-to-integrate-time-series-insights.md). -Azure Digital Twins implements *at least once* delivery for data emitted to egress services. +### Data history + +To send twin data to [Azure Data Explorer](/azure/data-explorer/data-explorer-overview), set up a [data history (preview) connection](concepts-data-history.md) that automatically historizes digital twin property updates from your Azure Digital Twins instance to an Azure Data Explorer cluster. The data history connection requires an [event hub](../event-hubs/event-hubs-about.md), but doesn't require an explicit endpoint. + +Once the data has been historized, you can query this data in Azure Data Explorer using the [Azure Digital Twins query plugin for Azure Data Explorer](concepts-data-explorer-plugin.md). + +You can also use data history in combination with [Azure Synapse Analytics](../synapse-analytics/overview-what-is.md) to aggregate data from disparate sources. One useful application of this is to combine information technology (IT) data from ERP or CRM systems (like Dynamics 365, SAP, or Salesforce) with operational technology (OT) data from IoT devices and production management systems. For an example that illustrates how a company might combine this data, see the following blog post: [Integrating IT and OT Data with Azure Digital Twins, Azure Data Explorer, and Azure Synapse](https://techcommunity.microsoft.com/t5/internet-of-things-blog/integrating-it-and-ot-data-with-azure-digital-twins-azure-data/ba-p/3401981). ## Next steps @@ -58,4 +69,4 @@ Learn more about endpoints and routing events to external services: * [Endpoints and event routes](concepts-route-events.md) See how to set up Azure Digital Twins to ingest data from IoT Hub: -* [Ingest telemetry from IoT Hub](how-to-ingest-iot-hub-data.md) \ No newline at end of file +* [Ingest telemetry from IoT Hub](how-to-ingest-iot-hub-data.md) diff --git a/articles/digital-twins/concepts-security.md b/articles/digital-twins/concepts-security.md index 02e5bb438e962..b054af75a3d9a 100644 --- a/articles/digital-twins/concepts-security.md +++ b/articles/digital-twins/concepts-security.md @@ -101,7 +101,7 @@ For instructions on how to enable a system-managed identity for Azure Digital Tw [Azure Private Link](../private-link/private-link-overview.md) is a service that enables you to access Azure resources (like [Azure Event Hubs](../event-hubs/event-hubs-about.md), [Azure Storage](../storage/common/storage-introduction.md), and [Azure Cosmos DB](../cosmos-db/introduction.md)) and Azure-hosted customer and partner services over a private endpoint in your [Azure Virtual Network (VNet)](../virtual-network/virtual-networks-overview.md). -Similarly, you can use private endpoints for your Azure Digital Twin instance to allow clients located in your virtual network to securely access the instance over Private Link. +Similarly, you can use private endpoints for your Azure Digital Twins instance to allow clients located in your virtual network to securely access the instance over Private Link. Configuring a private endpoint for your Azure Digital Twins instance enables you to secure your Azure Digital Twins instance and eliminate public exposure. Additionally, it helps avoid data exfiltration from your [Azure Virtual Network (VNet)](../virtual-network/virtual-networks-overview.md). The private endpoint uses an IP address from your Azure VNet address space. Network traffic between a client on your private network and the Azure Digital Twins instance traverses over the VNet and a Private Link on the Microsoft backbone network, eliminating exposure to the public internet. Here's a visual representation of this system: diff --git a/articles/digital-twins/how-to-authenticate-client.md b/articles/digital-twins/how-to-authenticate-client.md index fed4e7acb3169..3af3e333fb61a 100644 --- a/articles/digital-twins/how-to-authenticate-client.md +++ b/articles/digital-twins/how-to-authenticate-client.md @@ -80,7 +80,7 @@ The [ManagedIdentityCredential](/dotnet/api/azure.identity.managedidentitycreden This means that you may use `ManagedIdentityCredential` in the same project as `DefaultAzureCredential` or `InteractiveBrowserCredential`, to authenticate a different part of the project. -To use the default Azure credentials, you'll need the Azure Digital Twins instance's URL ([instructions to find](how-to-set-up-instance-portal.md#verify-success-and-collect-important-values)). You may also need an [app registration](./how-to-create-app-registration-portal.md) and the registration's [Application (client) ID](./how-to-create-app-registration-portal.md#collect-client-id-and-tenant-id). +To use the default Azure credentials, you'll need the Azure Digital Twins instance's URL ([instructions to find](how-to-set-up-instance-portal.md#verify-success-and-collect-important-values)). You may also need an [app registration](./how-to-create-app-registration.md) and the registration's [Application (client) ID](./how-to-create-app-registration.md#collect-client-id-and-tenant-id). In an Azure function, you can use the managed identity credentials like this: @@ -90,9 +90,9 @@ In an Azure function, you can use the managed identity credentials like this: The [InteractiveBrowserCredential](/dotnet/api/azure.identity.interactivebrowsercredential?view=azure-dotnet&preserve-view=true) method is intended for interactive applications and will bring up a web browser for authentication. You can use this method instead of `DefaultAzureCredential` in cases where you require interactive authentication. -To use the interactive browser credentials, you'll need an **app registration** that has permissions to the Azure Digital Twins APIs. For steps on how to set up this app registration, see [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration-portal.md). Once the app registration is set up, you'll need... -* [the app registration's Application (client) ID](./how-to-create-app-registration-portal.md#collect-client-id-and-tenant-id) -* [the app registration's Directory (tenant) ID](./how-to-create-app-registration-portal.md#collect-client-id-and-tenant-id) +To use the interactive browser credentials, you'll need an **app registration** that has permissions to the Azure Digital Twins APIs. For steps on how to set up this app registration, see [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration.md). Once the app registration is set up, you'll need... +* [the app registration's Application (client) ID](./how-to-create-app-registration.md#collect-client-id-and-tenant-id) +* [the app registration's Directory (tenant) ID](./how-to-create-app-registration.md#collect-client-id-and-tenant-id) * [the Azure Digital Twins instance's URL](how-to-set-up-instance-portal.md#verify-success-and-collect-important-values) Here's an example of the code to create an authenticated SDK client using `InteractiveBrowserCredential`. diff --git a/articles/digital-twins/how-to-create-app-registration-cli.md b/articles/digital-twins/how-to-create-app-registration-cli.md deleted file mode 100644 index a5b7f167d4fae..0000000000000 --- a/articles/digital-twins/how-to-create-app-registration-cli.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -# Mandatory fields. -title: Create an app registration with Azure Digital Twins access (CLI) -titleSuffix: Azure Digital Twins -description: Use the CLI to create an Azure AD app registration that can access Azure Digital Twins resources. -author: baanders -ms.author: baanders # Microsoft employees only -ms.date: 2/24/2022 -ms.topic: how-to -ms.service: digital-twins -ms.custom: contperf-fy22q3 - -# Optional fields. Don't forget to remove # if you need a field. -# ms.custom: can-be-multiple-comma-separated -# ms.reviewer: MSFT-alias-of-reviewer -# manager: MSFT-alias-of-manager-or-PM-counterpart ---- - -# Create an app registration to use with Azure Digital Twins (CLI) - -[!INCLUDE [digital-twins-create-app-registration-selector.md](../../includes/digital-twins-create-app-registration-selector.md)] - -This article describes how to use the Azure CLI to create an [Azure Active Directory (Azure AD)](../active-directory/fundamentals/active-directory-whatis.md) *app registration* that can access Azure Digital Twins. - -When working with Azure Digital Twins, it's common to interact with your instance through client applications. Those applications need to authenticate with Azure Digital Twins, and some of the [authentication mechanisms](how-to-authenticate-client.md) that apps can use involve an app registration. - -The app registration isn't required for all authentication scenarios. However, if you're using an authentication strategy or code sample that does require an app registration, this article shows you how to set one up and grant it permissions to the Azure Digital Twins APIs. It also covers how to collect important values that you'll need to use the app registration when authenticating. - ->[!TIP] -> You may prefer to set up a new app registration every time you need one, or to do this only once, establishing a single app registration that will be shared among all scenarios that require it. - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../includes/azure-cli-prepare-your-environment.md)] - -## Create manifest - -First, create a file containing certain service information that your app registration will need to access the Azure Digital Twins APIs. Later, you'll pass in this file when creating the app registration, to set up the Azure Digital Twins permissions. - -Create a new .json file on your computer called *manifest.json*. Copy this text into the file: - -```json -[ - { - "resourceAppId": "0b07f429-9f4b-4714-9392-cc5e8e80c8b0", - "resourceAccess": [ - { - "id": "4589bd03-58cb-4e6c-b17f-b580e39652f8", - "type": "Scope" - } - ] - } -] -``` - -The static value `0b07f429-9f4b-4714-9392-cc5e8e80c8b0` is the resource ID for the Azure Digital Twins service endpoint, which your app registration will need to access the Azure Digital Twins APIs. - -Save the finished file. - -### Cloud Shell users: Upload manifest - -If you're using Cloud Shell for this tutorial, you'll need to upload the manifest file you created to the Cloud Shell, so that you can access it in Cloud Shell commands when configuring the app registration. If you're using a local installation of the Azure CLI, you can skip this step. - -To upload the file, go to the Cloud Shell window in your browser. Select the "Upload/Download files" icon and choose "Upload". - -:::image type="content" source="media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png" alt-text="Screenshot of Azure Cloud Shell. The Upload icon is highlighted."::: - -Navigate to the *manifest.json* file on your machine and select **Open**. Doing so will upload the file to the root of your Cloud Shell storage. - -## Create the registration - -In this section, you'll run a CLI command to create an app registration with the following settings: -* Name of your choice -* Available only to accounts in the default directory (single tenant) -* A web reply URL of `http://localhost` -* Read/write permissions to the Azure Digital Twins APIs - -Run the following command to create the registration. If you're using Cloud Shell, the path to the manifest.json file is `@manifest.json`. - -```azurecli-interactive -az ad app create --display-name --available-to-other-tenants false --reply-urls http://localhost --native-app --required-resource-accesses "" -``` - -The output of the command is information about the app registration you've created. - -## Verify success - -You can confirm that the Azure Digital Twins permissions were granted by looking for the following fields in the output of the `az ad app create` command, and confirming their values match what's shown in the screenshot below: - -:::image type="content" source="media/how-to-create-app-registration/cli-required-resource-access.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The items under 'requiredResourceAccess' are highlighted: there's a 'resourceAppId' value of 0b07f429-9f4b-4714-9392-cc5e8e80c8b0, and a 'resourceAccess > id' value of 4589bd03-58cb-4e6c-b17f-b580e39652f8."::: - -You can also verify the app registration was successfully created with the necessary API permissions by using the Azure portal. For portal instructions, see [Verify API permissions (portal)](how-to-create-app-registration-portal.md#verify-api-permissions). - -## Collect important values - -Next, collect some important values about the app registration that you'll need to use the app registration to authenticate a client application. These values include: -* resource name -* client ID -* tenant ID -* client secret - -To work with Azure Digital Twins, the resource name is `http://digitaltwins.azure.net`. - -The following sections describe how to find the other values. - -### Collect client ID and tenant ID - -To use the app registration for authentication, you may need to provide its **Application (client) ID** and **Directory (tenant) ID**. In this section, you'll collect these values so you can save them and use them whenever they're needed. - -You can find both of these values in the output from the `az ad app create` command. - -Application (client) ID: - -:::image type="content" source="media/how-to-create-app-registration/cli-app-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The appId value is highlighted."::: - -Directory (tenant) ID: - -:::image type="content" source="media/how-to-create-app-registration/cli-tenant-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The GUID value in the odata.metadata is highlighted."::: - -### Collect client secret - -To create a client secret for your app registration, you'll need your app registration's client ID value from the previous section. Use the value in the following CLI command to create a new secret: - -```azurecli-interactive -az ad app credential reset --id --append -``` - -You can also add optional parameters to this command to specify a credential description, end date, and other details. For more information about the command and its parameters, see [az ad app credential reset documentation](/cli/azure/ad/app/credential#az-ad-app-credential-reset). - -The output of this command is information about the client secret that you've created. Copy the value for `password` to use when you need the client secret for authentication. - -:::image type="content" source="media/how-to-create-app-registration/cli-client-secret.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The password value is highlighted."::: - ->[!IMPORTANT] ->Make sure to copy the value now and store it in a safe place, as it cannot be retrieved again. If you can't find the value later, you'll have to create a new secret. - -## Create Azure Digital Twins role assignment - -In this section, you'll create a role assignment for the app registration to set its permissions on the Azure Digital Twins instance. This role will determine what permissions the app registration holds on the instance, so you should select the role that matches the appropriate level of permission for your situation. One possible role is [Azure Digital Twins Data Owner](../role-based-access-control/built-in-roles.md#azure-digital-twins-data-owner). For a full list of roles and their descriptions, see [Azure built-in roles](../role-based-access-control/built-in-roles.md). - -Use the following command to assign the role (must be run by a user with [sufficient permissions](how-to-set-up-instance-cli.md#prerequisites-permission-requirements) in the Azure subscription). The command requires you to pass in the name of the app registration. - -```azurecli-interactive -az dt role-assignment create --dt-name --assignee "" --role "" -``` - -The result of this command is outputted information about the role assignment that's been created for the app registration. - -### Verify role assignment - -To further verify the role assignment, you can look for it in the Azure portal. Follow the instructions in [Verify role assignment (portal)](how-to-create-app-registration-portal.md#verify-role-assignment). - -## Other possible steps for your organization - -It's possible that your organization requires more actions from subscription Owners/administrators to successfully set up an app registration. The steps required may vary depending on your organization's specific settings. - -Here are some common potential activities that an Owner or administrator on the subscription may need to do. -* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the Owner/administrator may need to grant additional delegated or application permissions. -* Activate public client access by appending `--set publicClient=true` to a create or update command for the registration. -* Set specific reply URLs for web and desktop access using the `--reply-urls` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). -* Allow for implicit OAuth2 authentication flows using the `--oauth2-allow-implicit-flow` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). - -For more information about app registration and its different setup options, see [Register an application with the Microsoft identity platform](/graph/auth-register-app-v2). - -## Next steps - -In this article, you set up an Azure AD app registration that can be used to authenticate client applications with the Azure Digital Twins APIs. - -Next, read about authentication mechanisms, including one that uses app registrations and others that don't: -* [Write app authentication code](how-to-authenticate-client.md) \ No newline at end of file diff --git a/articles/digital-twins/how-to-create-app-registration-portal.md b/articles/digital-twins/how-to-create-app-registration-portal.md deleted file mode 100644 index e5c874a53ebf8..0000000000000 --- a/articles/digital-twins/how-to-create-app-registration-portal.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -# Mandatory fields. -title: Create an app registration with Azure Digital Twins access (portal) -titleSuffix: Azure Digital Twins -description: Use the Azure portal to create an Azure AD app registration that can access Azure Digital Twins resources. -author: baanders -ms.author: baanders # Microsoft employees only -ms.date: 2/24/2022 -ms.topic: how-to -ms.service: digital-twins - -# Optional fields. Don't forget to remove # if you need a field. -# ms.custom: can-be-multiple-comma-separated -# ms.reviewer: MSFT-alias-of-reviewer -# manager: MSFT-alias-of-manager-or-PM-counterpart ---- - -# Create an app registration to use with Azure Digital Twins (portal) - -[!INCLUDE [digital-twins-create-app-registration-selector.md](../../includes/digital-twins-create-app-registration-selector.md)] - -This article describes how to use the Azure portal to create an [Azure Active Directory (Azure AD)](../active-directory/fundamentals/active-directory-whatis.md) *app registration* that can access Azure Digital Twins. - -When working with Azure Digital Twins, it's common to interact with your instance through client applications. Those applications need to authenticate with Azure Digital Twins, and some of the [authentication mechanisms](how-to-authenticate-client.md) that apps can use involve an app registration. - -The app registration isn't required for all authentication scenarios. However, if you're using an authentication strategy or code sample that does require an app registration, this article shows you how to set one up and grant it permissions to the Azure Digital Twins APIs. It also covers how to collect important values that you'll need to use the app registration when authenticating. - ->[!TIP] -> You may prefer to set up a new app registration every time you need one, or to do this only once, establishing a single app registration that will be shared among all scenarios that require it. - -## Create the registration - -Start by navigating to [Azure Active Directory](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview) in the Azure portal (you can use this link or find it with the portal search bar). Select **App registrations** from the service menu, and then **+ New registration**. - -:::image type="content" source="media/how-to-create-app-registration/new-registration.png" alt-text="Screenshot of the Azure AD service page in the Azure portal, showing the steps to create a new registration in the 'App registrations' page."::: - -In the **Register an application** page that follows, fill in the requested values: -* **Name**: An Azure AD application display name to associate with the registration -* **Supported account types**: Select **Accounts in this organizational directory only (Default Directory only - Single tenant)** -* **Redirect URI**: An **Azure AD application reply URL** for the Azure AD application. Add a **Public client/native (mobile & desktop)** URI for `http://localhost`. - -When you're finished, select the **Register** button. - -:::image type="content" source="media/how-to-create-app-registration/register-an-application.png" alt-text="Screenshot of the 'Register an application' page in the Azure portal with the described values filled in."::: - -When the registration is finished setting up, the portal will redirect you to its details page. - -## Collect important values - -Next, collect some important values about the app registration that you'll need to use the app registration to authenticate a client application. These values include: -* resource name -* client ID -* tenant ID -* client secret - -To work with Azure Digital Twins, the resource name is `http://digitaltwins.azure.net`. - -The following sections describe how to find the other values. - -### Collect client ID and tenant ID - -The client ID and tenant ID values can be collected from the app registration's details page in the Azure portal: - -:::image type="content" source="media/how-to-create-app-registration/client-id-tenant-id.png" alt-text="Screenshot of the Azure portal showing the important values for the app registration." lightbox="media/how-to-create-app-registration/client-id-tenant-id.png"::: - -Take note of the **Application (client) ID** and **Directory (tenant) ID** shown on your page. - -### Collect client secret - -To set up a client secret for your app registration, start on your app registration page in the Azure portal. - -1. Select **Certificates & secrets** from the registration's menu, and then select **+ New client secret**. - - :::image type="content" source="media/how-to-create-app-registration/client-secret.png" alt-text="Screenshot of the Azure portal showing an Azure AD app registration and a highlight around 'New client secret'."::: - -1. Enter whatever values you want for Description and Expires, and select **Add**. - - :::row::: - :::column::: - :::image type="content" source="media/how-to-create-app-registration/add-client-secret.png" alt-text="Screenshot of the Azure portal while adding a client secret."::: - :::column-end::: - :::column::: - :::column-end::: - :::row-end::: - -1. Verify that the client secret is visible on the **Certificates & secrets** page with Expires and Value fields. - -1. Take note of its **Secret ID** and **Value** to use later (you can also copy them to the clipboard with the Copy icons). - - :::image type="content" source="media/how-to-create-app-registration/client-secret-value.png" alt-text="Screenshot of the Azure portal showing how to copy the client secret value."::: - ->[!IMPORTANT] ->Make sure to copy the values now and store them in a safe place, as they can't be retrieved again. If you can't find them later, you'll have to create a new secret. - -## Provide Azure Digital Twins permissions - -Next, configure the app registration you've created with permissions to access Azure Digital Twins. First, you'll create a role assignment for the app registration within the Azure Digital Twins instance. Then, you'll provide API permissions for the app to read and write to the Azure Digital Twins APIs. - -### Create role assignment - -In this section, you'll create a role assignment for the app registration on the Azure Digital Twins instance. This role will determine what permissions the app registration holds on the instance, so you should select the role that matches the appropriate level of permission for your situation. One possible role is [Azure Digital Twins Data Owner](../role-based-access-control/built-in-roles.md#azure-digital-twins-data-owner). For a full list of roles and their descriptions, see [Azure built-in roles](../role-based-access-control/built-in-roles.md). - -1. First, open the page for your Azure Digital Twins instance in the Azure portal. - -1. Select **Access control (IAM)**. - -1. Select **Add** > **Add role assignment** to open the Add role assignment page. - -1. Assign the appropriate role. For detailed steps, see [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md). - - | Setting | Value | - | --- | --- | - | Role | Select as appropriate | - | Assign access to | User, group, or service principal | - | Members | Search for the name or [client ID](#collect-client-id-and-tenant-id) of the app registration | - - ![Add role assignment page](../../includes/role-based-access-control/media/add-role-assignment-page.png) - -#### Verify role assignment - -You can view the role assignment you've set up under **Access control (IAM) > Role assignments**. - -:::image type="content" source="media/how-to-create-app-registration/verify-role-assignment.png" alt-text="Screenshot of the Role Assignments page for an Azure Digital Twins instance in the Azure portal."::: - -The app registration should show up in the list along with the role you assigned to it. - -### Provide API permissions - -In this section, you'll grant your app baseline read/write permissions to the Azure Digital Twins APIs. - -From the portal page for your app registration, select **API permissions** from the menu. On the following permissions page, select the **+ Add a permission** button. - -:::image type="content" source="media/how-to-create-app-registration/add-permission.png" alt-text="Screenshot of the app registration in the Azure portal, highlighting the 'API permissions' menu option and 'Add a permission' button."::: - -In the **Request API permissions** page that follows, switch to the **APIs my organization uses** tab and search for *Azure digital twins*. Select **Azure Digital Twins** from the search results to continue with assigning permissions for the Azure Digital Twins APIs. - -:::image type="content" source="media/how-to-create-app-registration/request-api-permissions-1.png" alt-text="Screenshot of the 'Request API Permissions' page search result in the Azure portal showing Azure Digital Twins."::: - ->[!NOTE] -> If your subscription still has an existing Azure Digital Twins instance from the previous public preview of the service (before July 2020), you'll need to search for and select **Azure Smart Spaces Service** instead. This is an older name for the same set of APIs (notice that the **Application (client) ID** is the same as in the screenshot above), and your experience won't be changed beyond this step. -> :::image type="content" source="media/how-to-create-app-registration/request-api-permissions-1-smart-spaces.png" alt-text="Screenshot of the 'Request API Permissions' page search result showing Azure Smart Spaces Service in the Azure portal."::: - -Next, you'll select which permissions to grant for these APIs. Expand the **Read (1)** permission and check the box that says **Read.Write** to grant this app registration reader and writer permissions. - -:::image type="content" source="media/how-to-create-app-registration/request-api-permissions-2.png" alt-text="Screenshot of the 'Request API Permissions' page and selecting 'Read.Write' permissions for the Azure Digital Twins APIs in the Azure portal."::: - -Select **Add permissions** when finished. - -#### Verify API permissions - -On the **API permissions** page, verify that there's now an entry for Azure Digital Twins reflecting **Read.Write** permissions: - -:::image type="content" source="media/how-to-create-app-registration/verify-api-permissions.png" alt-text="Screenshot of the API permissions for the Azure AD app registration in the Azure portal, showing 'Read/Write Access' for Azure Digital Twins."::: - -You can also verify the connection to Azure Digital Twins within the app registration's *manifest.json*, which was automatically updated with the Azure Digital Twins information when you added the API permissions. - -To do so, select **Manifest** from the menu to view the app registration's manifest code. Scroll to the bottom of the code window and look for the following fields and values under `requiredResourceAccess`: -* `"resourceAppId": "0b07f429-9f4b-4714-9392-cc5e8e80c8b0"` -* `"resourceAccess"` > `"id": "4589bd03-58cb-4e6c-b17f-b580e39652f8"` - -These values are shown in the screenshot below: - -:::image type="content" source="media/how-to-create-app-registration/verify-manifest.png" alt-text="Screenshot of the manifest for the Azure AD app registration in the Azure portal."::: - -If these values are missing, retry the steps in the [section for adding the API permission](#provide-api-permissions). - -## Other possible steps for your organization - -It's possible that your organization requires more actions from subscription Owners/administrators to successfully set up an app registration. The steps required may vary depending on your organization's specific settings. - -Here are some common potential activities that an Owner/administrator on the subscription may need to do. These and other operations can be performed from the [Azure AD App registrations](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) page in the Azure portal. -* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the Owner/administrator will need to select this button for your company on the app registration's **API permissions** page for the app registration to be valid: - - :::image type="content" source="media/how-to-create-app-registration/grant-admin-consent.png" alt-text="Screenshot of the Azure portal showing the 'Grant admin consent' button under API permissions."::: - - If consent was granted successfully, the entry for Azure Digital Twins should then show a **Status** value of **Granted for (your company)** - - :::image type="content" source="media/how-to-create-app-registration/granted-admin-consent-done.png" alt-text="Screenshot of the Azure portal showing the admin consent granted for the company under API permissions."::: -* Activate public client access -* Set specific reply URLs for web and desktop access -* Allow for implicit OAuth2 authentication flows - -For more information about app registration and its different setup options, see [Register an application with the Microsoft identity platform](/graph/auth-register-app-v2). - -## Next steps - -In this article, you set up an Azure AD app registration that can be used to authenticate client applications with the Azure Digital Twins APIs. - -Next, read about authentication mechanisms, including one that uses app registrations and others that don't: -* [Write app authentication code](how-to-authenticate-client.md) \ No newline at end of file diff --git a/articles/digital-twins/how-to-create-app-registration.md b/articles/digital-twins/how-to-create-app-registration.md new file mode 100644 index 0000000000000..7c720d44b559a --- /dev/null +++ b/articles/digital-twins/how-to-create-app-registration.md @@ -0,0 +1,341 @@ +--- +# Mandatory fields. +title: Create an app registration with Azure Digital Twins access +titleSuffix: Azure Digital Twins +description: Create an Azure Active Directory app registration that can access Azure Digital Twins resources. +author: baanders +ms.author: baanders # Microsoft employees only +ms.date: 5/25/2022 +ms.topic: how-to +ms.service: digital-twins +ms.custom: contperf-fy22q4 + +# Optional fields. Don't forget to remove # if you need a field. +# ms.custom: can-be-multiple-comma-separated +# ms.reviewer: MSFT-alias-of-reviewer +# manager: MSFT-alias-of-manager-or-PM-counterpart +--- + +# Create an app registration to use with Azure Digital Twins + +This article describes how to create an [Azure Active Directory (Azure AD)](../active-directory/fundamentals/active-directory-whatis.md) *app registration* that can access Azure Digital Twins. This article includes steps for the [Azure portal](https://portal.azure.com) and the [Azure CLI](/cli/azure/what-is-azure-cli). + +When working with Azure Digital Twins, it's common to interact with your instance through client applications. Those applications need to authenticate with Azure Digital Twins, and some of the [authentication mechanisms](how-to-authenticate-client.md) that apps can use involve an app registration. + +The app registration isn't required for all authentication scenarios. However, if you're using an authentication strategy or code sample that does require an app registration, this article shows you how to set one up and grant it permissions to the Azure Digital Twins APIs. It also covers how to collect important values that you'll need to use the app registration when authenticating. + +>[!TIP] +> You may prefer to set up a new app registration every time you need one, or to do this only once, establishing a single app registration that will be shared among all scenarios that require it. + +## Create the registration + +Start by selecting the tab below for your preferred interface. + +# [Portal](#tab/portal) + +Navigate to [Azure Active Directory](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview) in the Azure portal (you can use this link or find it with the portal search bar). Select **App registrations** from the service menu, and then **+ New registration**. + +:::image type="content" source="media/how-to-create-app-registration/new-registration.png" alt-text="Screenshot of the Azure AD service page in the Azure portal, showing the steps to create a new registration in the 'App registrations' page."::: + +In the **Register an application** page that follows, fill in the requested values: +* **Name**: An Azure AD application display name to associate with the registration +* **Supported account types**: Select **Accounts in this organizational directory only (Default Directory only - Single tenant)** +* **Redirect URI**: An **Azure AD application reply URL** for the Azure AD application. Add a **Public client/native (mobile & desktop)** URI for `http://localhost`. + +When you're finished, select the **Register** button. + +:::image type="content" source="media/how-to-create-app-registration/register-an-application.png" alt-text="Screenshot of the 'Register an application' page in the Azure portal with the described values filled in."::: + +When the registration is finished setting up, the portal will redirect you to its details page. + +# [CLI](#tab/cli) + +Start by creating a manifest file, which contains service information that your app registration will need to access the Azure Digital Twins APIs. Afterwards, you'll pass this file into a CLI command to create the registration. + +### Create manifest + +Create a new .json file on your computer called *manifest.json*. Copy this text into the file: + +```json +[ + { + "resourceAppId": "0b07f429-9f4b-4714-9392-cc5e8e80c8b0", + "resourceAccess": [ + { + "id": "4589bd03-58cb-4e6c-b17f-b580e39652f8", + "type": "Scope" + } + ] + } +] +``` + +The static value `0b07f429-9f4b-4714-9392-cc5e8e80c8b0` is the resource ID for the Azure Digital Twins service endpoint, which your app registration will need to access the Azure Digital Twins APIs. + +Save the finished file. + +### Cloud Shell users: Upload manifest + +If you're using Azure Cloud Shell for this tutorial, you'll need to upload the manifest file you created to the Cloud Shell, so that you can access it in Cloud Shell commands when configuring the app registration. If you're using a local installation of the Azure CLI, you can skip ahead to the next step, [Run the creation command](#run-the-creation-command). + +To upload the file, go to the Cloud Shell window in your browser. Select the "Upload/Download files" icon and choose "Upload". + +:::image type="content" source="media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png" alt-text="Screenshot of Azure Cloud Shell. The Upload icon is highlighted."::: + +Navigate to the *manifest.json* file on your machine and select **Open**. Doing so will upload the file to the root of your Cloud Shell storage. + +### Run the creation command + +In this section, you'll run a CLI command to create an app registration with the following settings: +* Name of your choice +* Available only to accounts in the default directory (single tenant) +* A web reply URL of `http://localhost` +* Read/write permissions to the Azure Digital Twins APIs + +Run the following command to create the registration. If you're using Cloud Shell, the path to the manifest.json file is `@manifest.json`. + +```azurecli-interactive +az ad app create --display-name --available-to-other-tenants false --reply-urls http://localhost --native-app --required-resource-accesses "" +``` + +The output of the command is information about the app registration you've created. + +### Verify success + +You can confirm that the Azure Digital Twins permissions were granted by looking for the following fields in the output of the creation command, under `requiredResourceAccess`. Confirm their values match what's listed below. +* `resourceAccess > id` is *4589bd03-58cb-4e6c-b17f-b580e39652f8* +* `resourceAppId` is *0b07f429-9f4b-4714-9392-cc5e8e80c8b0* + +:::image type="content" source="media/how-to-create-app-registration/cli-required-resource-access.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command."::: + +--- + +## Collect important values + +Next, collect some important values about the app registration that you'll need to use the app registration to authenticate a client application. These values include: +* resource name — When working with Azure Digital Twins, the **resource name** is `http://digitaltwins.azure.net`. +* client ID +* tenant ID +* client secret + +The following sections describe how to find the remaining values. + +### Collect client ID and tenant ID + +To use the app registration for authentication, you may need to provide its **Application (client) ID** and **Directory (tenant) ID**. Here, you'll collect these values so you can save them and use them whenever they're needed. + +# [Portal](#tab/portal) + +The client ID and tenant ID values can be collected from the app registration's details page in the Azure portal: + +:::image type="content" source="media/how-to-create-app-registration/client-id-tenant-id.png" alt-text="Screenshot of the Azure portal showing the important values for the app registration." lightbox="media/how-to-create-app-registration/client-id-tenant-id.png"::: + +Take note of the **Application (client) ID** and **Directory (tenant) ID** shown on your page. + +# [CLI](#tab/cli) + +You can find both of these values in the output from the `az ad app create` command that you ran [earlier](#run-the-creation-command). (You can also bring up the app registration's information again using [az ad app show](/cli/azure/ad/app#az-ad-app-show).) + +Look for these values in the result: + +Application (client) ID: + +:::image type="content" source="media/how-to-create-app-registration/cli-app-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The appId value is highlighted."::: + +Directory (tenant) ID: + +:::image type="content" source="media/how-to-create-app-registration/cli-tenant-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The GUID value in the odata.metadata is highlighted."::: + +--- + +### Collect client secret + +Set up a client secret for your app registration, which other applications can use to authenticate through it. + +# [Portal](#tab/portal) + +Start on your app registration page in the Azure portal. + +1. Select **Certificates & secrets** from the registration's menu, and then select **+ New client secret**. + + :::image type="content" source="media/how-to-create-app-registration/client-secret.png" alt-text="Screenshot of the Azure portal showing an Azure AD app registration and a highlight around 'New client secret'."::: + +1. Enter whatever values you want for Description and Expires, and select **Add**. + + :::row::: + :::column::: + :::image type="content" source="media/how-to-create-app-registration/add-client-secret.png" alt-text="Screenshot of the Azure portal while adding a client secret."::: + :::column-end::: + :::column::: + :::column-end::: + :::row-end::: + +1. Verify that the client secret is visible on the **Certificates & secrets** page with Expires and Value fields. + +1. Take note of its **Secret ID** and **Value** to use later (you can also copy them to the clipboard with the Copy icons). + + :::image type="content" source="media/how-to-create-app-registration/client-secret-value.png" alt-text="Screenshot of the Azure portal showing how to copy the client secret value."::: + +>[!IMPORTANT] +>Make sure to copy the values now and store them in a safe place, as they can't be retrieved again. If you can't find them later, you'll have to create a new secret. + +# [CLI](#tab/cli) + +To create a client secret for your app registration, you'll need your app registration's client ID value that you collected in the [previous step](#collect-client-id-and-tenant-id). Use the value in the following CLI command to create a new secret: + +```azurecli-interactive +az ad app credential reset --id --append +``` + +You can also add optional parameters to this command to specify a credential description, end date, and other details. For more information about the command and its parameters, see [az ad app credential reset documentation](/cli/azure/ad/app/credential#az-ad-app-credential-reset). + +The output of this command is information about the client secret that you've created. + +Copy the value for `password` to use when you need the client secret for authentication. + +:::image type="content" source="media/how-to-create-app-registration/cli-client-secret.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The password value is highlighted."::: + +>[!IMPORTANT] +>Make sure to copy the value now and store it in a safe place, as it cannot be retrieved again. If you can't find the value later, you'll have to create a new secret. + +--- + +## Provide Azure Digital Twins permissions + +Next, configure the app registration you've created with permissions to access Azure Digital Twins. There are two types of permissions that are required: +* A role assignment for the app registration within the Azure Digital Twins instance +* API permissions for the app to read and write to the Azure Digital Twins APIs + +### Create role assignment + +In this section, you'll create a role assignment for the app registration on the Azure Digital Twins instance. This role will determine what permissions the app registration holds on the instance, so you should select the role that matches the appropriate level of permission for your situation. One possible role is [Azure Digital Twins Data Owner](../role-based-access-control/built-in-roles.md#azure-digital-twins-data-owner). For a full list of roles and their descriptions, see [Azure built-in roles](../role-based-access-control/built-in-roles.md). + +# [Portal](#tab/portal) + +Use these steps to create the role assignment for your registration. + +1. Open the page for your Azure Digital Twins instance in the Azure portal. + +1. Select **Access control (IAM)**. + +1. Select **Add** > **Add role assignment** to open the Add role assignment page. + +1. Assign the appropriate role. For detailed steps, see [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md). + + | Setting | Value | + | --- | --- | + | Role | Select as appropriate | + | Assign access to | User, group, or service principal | + | Members | Search for the name or [client ID](#collect-client-id-and-tenant-id) of the app registration | + + ![Add role assignment page](../../includes/role-based-access-control/media/add-role-assignment-page.png) + +#### Verify role assignment + +You can view the role assignment you've set up under **Access control (IAM) > Role assignments**. + +:::image type="content" source="media/how-to-create-app-registration/verify-role-assignment.png" alt-text="Screenshot of the Role Assignments page for an Azure Digital Twins instance in the Azure portal."::: + +The app registration should show up in the list along with the role you assigned to it. + +# [CLI](#tab/cli) + +Use the [az dt role-assignment create](/cli/azure/dt/role-assignment#az-dt-role-assignment-create) command to assign the role (it must be run by a user with [sufficient permissions](how-to-set-up-instance-cli.md#prerequisites-permission-requirements) in the Azure subscription). The command requires you to pass in the name of the role you want to assign, the name of your Azure Digital Twins instance, and either the name or the object ID of the app registration. + +```azurecli-interactive +az dt role-assignment create --dt-name --assignee "" --role "" +``` + +The result of this command is outputted information about the role assignment that's been created for the app registration. + +To further verify the role assignment, you can look for it in the Azure portal (switch to the [Portal instruction tab](?tabs=portal#verify-role-assignment)). + +--- + +### Provide API permissions + +In this section, you'll grant your app baseline read/write permissions to the Azure Digital Twins APIs. + +If you're using the Azure CLI and set up your app registration [earlier](#create-the-registration) with a manifest file, this step is already done. If you're using the Azure portal to create your app registration, continue through the rest of this section to set up API permissions. + +# [Portal](#tab/portal) + +From the portal page for your app registration, select **API permissions** from the menu. On the following permissions page, select the **+ Add a permission** button. + +:::image type="content" source="media/how-to-create-app-registration/add-permission.png" alt-text="Screenshot of the app registration in the Azure portal, highlighting the 'API permissions' menu option and 'Add a permission' button."::: + +In the **Request API permissions** page that follows, switch to the **APIs my organization uses** tab and search for *Azure digital twins*. Select **Azure Digital Twins** from the search results to continue with assigning permissions for the Azure Digital Twins APIs. + +:::image type="content" source="media/how-to-create-app-registration/request-api-permissions-1.png" alt-text="Screenshot of the 'Request API Permissions' page search result in the Azure portal showing Azure Digital Twins."::: + +>[!NOTE] +> If your subscription still has an existing Azure Digital Twins instance from the previous public preview of the service (before July 2020), you'll need to search for and select **Azure Smart Spaces Service** instead. This is an older name for the same set of APIs (notice that the **Application (client) ID** is the same as in the screenshot above), and your experience won't be changed beyond this step. +> :::image type="content" source="media/how-to-create-app-registration/request-api-permissions-1-smart-spaces.png" alt-text="Screenshot of the 'Request API Permissions' page search result showing Azure Smart Spaces Service in the Azure portal."::: + +Next, you'll select which permissions to grant for these APIs. Expand the **Read (1)** permission and check the box that says **Read.Write** to grant this app registration reader and writer permissions. + +:::image type="content" source="media/how-to-create-app-registration/request-api-permissions-2.png" alt-text="Screenshot of the 'Request API Permissions' page and selecting 'Read.Write' permissions for the Azure Digital Twins APIs in the Azure portal."::: + +Select **Add permissions** when finished. + +#### Verify API permissions + +On the **API permissions** page, verify that there's now an entry for Azure Digital Twins reflecting **Read.Write** permissions: + +:::image type="content" source="media/how-to-create-app-registration/verify-api-permissions.png" alt-text="Screenshot of the API permissions for the Azure AD app registration in the Azure portal, showing 'Read/Write Access' for Azure Digital Twins."::: + +You can also verify the connection to Azure Digital Twins within the app registration's *manifest.json*, which was automatically updated with the Azure Digital Twins information when you added the API permissions. + +To do so, select **Manifest** from the menu to view the app registration's manifest code. Scroll to the bottom of the code window and look for the following fields and values under `requiredResourceAccess`: +* `"resourceAppId": "0b07f429-9f4b-4714-9392-cc5e8e80c8b0"` +* `"resourceAccess"` > `"id": "4589bd03-58cb-4e6c-b17f-b580e39652f8"` + +These values are shown in the screenshot below: + +:::image type="content" source="media/how-to-create-app-registration/verify-manifest.png" alt-text="Screenshot of the manifest for the Azure AD app registration in the Azure portal."::: + +If these values are missing, retry the steps in the [section for adding the API permission](#provide-api-permissions). + +# [CLI](#tab/cli) + +If you're using the CLI, the API permissions were set up earlier as part of the [Create the registration](#create-the-registration) step. + +You can verify them now using the Azure portal (switch to the [Portal instruction tab](?tabs=portal#verify-api-permissions)). + +--- + +## Other possible steps for your organization + +It's possible that your organization requires more actions from subscription owners or administrators to finish setting up the app registration. The steps required may vary depending on your organization's specific settings. Choose a tab below to see this information tailored to your preferred interface. + +# [Portal](#tab/portal) + +Here are some common potential activities that an owner or administrator on the subscription may need to do. These and other operations can be performed from the [Azure AD App registrations](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) page in the Azure portal. +* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the owner/administrator will need to select this button for your company on the app registration's **API permissions** page for the app registration to be valid: + + :::image type="content" source="media/how-to-create-app-registration/grant-admin-consent.png" alt-text="Screenshot of the Azure portal showing the 'Grant admin consent' button under API permissions."::: + - If consent was granted successfully, the entry for Azure Digital Twins should then show a **Status** value of **Granted for (your company)** + + :::image type="content" source="media/how-to-create-app-registration/granted-admin-consent-done.png" alt-text="Screenshot of the Azure portal showing the admin consent granted for the company under API permissions."::: +* Activate public client access +* Set specific reply URLs for web and desktop access +* Allow for implicit OAuth2 authentication flows + +# [CLI](#tab/cli) + +Here are some common potential activities that an owner or administrator on the subscription may need to do. +* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the owner/administrator may need to grant additional delegated or application permissions. +* Activate public client access by appending `--set publicClient=true` to a create or update command for the registration. +* Set specific reply URLs for web and desktop access using the `--reply-urls` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). +* Allow for implicit OAuth2 authentication flows using the `--oauth2-allow-implicit-flow` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). + +--- + +For more information about app registration and its different setup options, see [Register an application with the Microsoft identity platform](/graph/auth-register-app-v2). + +## Next steps + +In this article, you set up an Azure AD app registration that can be used to authenticate client applications with the Azure Digital Twins APIs. + +Next, read about authentication mechanisms, including one that uses app registrations and others that don't: +* [Write app authentication code](how-to-authenticate-client.md) \ No newline at end of file diff --git a/articles/digital-twins/how-to-enable-private-link.md b/articles/digital-twins/how-to-enable-private-link.md index 390a586f9cad4..b404c20bd20d0 100644 --- a/articles/digital-twins/how-to-enable-private-link.md +++ b/articles/digital-twins/how-to-enable-private-link.md @@ -1,36 +1,32 @@ --- -# Mandatory fields. -title: Enable private access with Private Link +title: Enable private access to Azure Digital Twins titleSuffix: Azure Digital Twins -description: Learn how to enable private access for Azure Digital Twins solutions with Private Link. +description: Learn how to enable private access to your Azure Digital Twins solutions, using Azure Private Link. author: baanders -ms.author: baanders # Microsoft employees only -ms.date: 02/22/2022 +ms.author: baanders +ms.date: 06/07/2022 ms.topic: how-to ms.service: digital-twins -ms.custom: contperf-fy22q1 +ms.custom: contperf-fy22q1, contperf-fy22q4 ms.devlang: azurecli - -# Optional fields. Don't forget to remove # if you need a field. -# ms.custom: can-be-multiple-comma-separated -# ms.reviewer: MSFT-alias-of-reviewer -# manager: MSFT-alias-of-manager-or-PM-counterpart --- -# Enable private access with Private Link +# Enable private access to Azure Digital Twins using Private Link -This article describes the different ways to enable [Private Link with a private endpoint for an Azure Digital Twins instance](concepts-security.md#private-network-access-with-azure-private-link). Configuring a private endpoint for your Azure Digital Twins instance enables you to secure your Azure Digital Twins instance and eliminate public exposure. Additionally, it helps avoid data exfiltration from your [Azure Virtual Network (VNet)](../virtual-network/virtual-networks-overview.md). +By using Azure Digital Twins together with [Azure Private Link](../private-link/private-link-overview.md), you can enable private endpoints for your Azure Digital Twins instance, to eliminate public exposure and allow clients located in your virtual network to securely access the instance over Private Link. For more information about this security strategy for Azure Digital Twins, see [Private Link with a private endpoint for an Azure Digital Twins instance](concepts-security.md#private-network-access-with-azure-private-link). Here are the steps that are covered in this article: 1. Turn on Private Link and configure a private endpoint for an Azure Digital Twins instance. -1. View, edit, or delete a private endpoint from an instance. -1. Disable or enable public network access flags, to restrict API access to Private Link connections only. +1. View, edit, or delete a private endpoint from an Azure Digital Twins instance. +1. Disable or enable public network access flags, to restrict API access for an Azure Digital Twins to Private Link connections only. + +This article also contains information for deploying Azure Digital Twins with Private Link using an ARM template, and troubleshooting the configuration. ## Prerequisites Before you can set up a private endpoint, you'll need an [Azure Virtual Network (VNet)](../virtual-network/virtual-networks-overview.md) where the endpoint can be deployed. If you don't have a VNet already, you can follow one of the [Azure Virtual Network quickstarts](../virtual-network/quick-create-portal.md) to set this up. -## Add a private endpoint to Azure Digital Twins +## Add private endpoints to Azure Digital Twins You can use either the [Azure portal](https://portal.azure.com) or the [Azure CLI](/cli/azure/what-is-azure-cli) to turn on Private Link with a private endpoint for an Azure Digital Twins instance. @@ -147,7 +143,7 @@ For a full list of required and optional parameters, as well as more private end --- -## Manage private endpoint connections +## Manage private endpoints In this section, you'll see how to view, edit, and delete a private endpoint after it's been created. @@ -255,9 +251,9 @@ For a sample template that allows an Azure function to connect to Azure Digital This template creates an Azure Digital Twins instance, a virtual network, an Azure function connected to the virtual network, and a Private Link connection to make the Azure Digital Twins instance accessible to the Azure function through a private endpoint. -## Troubleshoot Private Link with Azure Digital Twins +## Troubleshoot -Here are some common issues experienced with Private Link for Azure Digital Twins. +Here are some common issues that might arise when using Private Link with Azure Digital Twins. * **Issue:** When trying to access Azure Digital Twins APIs, you see an HTTP error code 403 with the following error in the response body: ```json diff --git a/articles/digital-twins/how-to-monitor-diagnostics.md b/articles/digital-twins/how-to-monitor-diagnostics.md index 9580869ec9c6e..2968df34b0af0 100644 --- a/articles/digital-twins/how-to-monitor-diagnostics.md +++ b/articles/digital-twins/how-to-monitor-diagnostics.md @@ -147,7 +147,7 @@ Here are the field and property descriptions for API logs. | `ResultDescription` | String | Additional details about the event | | `DurationMs` | String | How long it took to perform the event in milliseconds | | `CallerIpAddress` | String | A masked source IP address for the event | -| `CorrelationId` | Guid | Customer provided unique identifier for the event | +| `CorrelationId` | Guid | Unique identifier for the event | | `ApplicationId` | Guid | Application ID used in bearer authorization | | `Level` | Int | The logging severity of the event | | `Location` | String | The region where the event took place | diff --git a/articles/digital-twins/how-to-move-regions.md b/articles/digital-twins/how-to-move-regions.md index e71e36a4fef26..8cc8f1aae024b 100644 --- a/articles/digital-twins/how-to-move-regions.md +++ b/articles/digital-twins/how-to-move-regions.md @@ -126,7 +126,7 @@ The exact resources you need to edit depends on your scenario, but here are some * Azure Maps. * IoT Hub Device Provisioning Service. * Personal or company apps outside of Azure, such as the client app created in [Code a client app](tutorial-code.md), that connect to the instance and call Azure Digital Twins APIs. -* Azure AD app registrations don't need to be recreated. If you're using an [app registration](./how-to-create-app-registration-portal.md) to connect to the Azure Digital Twins APIs, you can reuse the same app registration with your new instance. +* Azure AD app registrations don't need to be recreated. If you're using an [app registration](./how-to-create-app-registration.md) to connect to the Azure Digital Twins APIs, you can reuse the same app registration with your new instance. After you finish this step, your new instance in the target region should be a copy of the original instance. diff --git a/articles/digital-twins/how-to-use-3d-scenes-studio.md b/articles/digital-twins/how-to-use-3d-scenes-studio.md index 2fecb0e4eccc3..9bcca2aa90593 100644 --- a/articles/digital-twins/how-to-use-3d-scenes-studio.md +++ b/articles/digital-twins/how-to-use-3d-scenes-studio.md @@ -1,8 +1,8 @@ --- # Mandatory fields. -title: Use 3D Scenes Studio (all features) +title: Use 3D Scenes Studio (preview) titleSuffix: Azure Digital Twins -description: Learn how to use all the features of 3D Scenes Studio for Azure Digital Twins. +description: Learn how to use all the features of 3D Scenes Studio (preview) for Azure Digital Twins. author: baanders ms.author: baanders # Microsoft employees only ms.date: 05/03/2022 @@ -16,9 +16,9 @@ ms.custom: event-tier1-build-2022 # manager: MSFT-alias-of-manager-or-PM-counterpart --- -# Build 3D scenes with 3D Scenes Studio for Azure Digital Twins +# Build 3D scenes with 3D Scenes Studio (preview) for Azure Digital Twins -Azure Digital Twins [3D Scenes Studio](https://explorer.digitaltwins.azure.net/3dscenes) is an immersive 3D environment, where business and front-line workers can consume and investigate operational data from their Azure Digital Twins solutions with visual context. +Azure Digital Twins [3D Scenes Studio (preview)](https://explorer.digitaltwins.azure.net/3dscenes) is an immersive 3D environment, where business and front-line workers can consume and investigate operational data from their Azure Digital Twins solutions with visual context. ## Prerequisites @@ -26,21 +26,16 @@ To use 3D Scenes Studio, you'll need the following resources: * An Azure Digital Twins instance. For instructions, see [Set up an instance and authentication](how-to-set-up-instance-cli.md). * Obtain *Azure Digital Twins Data Owner* or *Azure Digital Twins Data Reader* access to the instance. For instructions, see [Set up user access permissions](how-to-set-up-instance-cli.md#set-up-user-access-permissions). * Take note of the *host name* of your instance to use later. -* An Azure storage account. For instructions, see [Create a storage account](/azure/storage/common/storage-account-create?tabs=azure-portal). - * Obtain *Storage Blob Data Owner* access to the storage account. For instructions, see [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md). -* A private container in the storage account. For instructions, see [Create a container](/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container). +* An Azure storage account. For instructions, see [Create a storage account](../storage/common/storage-account-create.md?tabs=azure-portal). +* A private container in the storage account. For instructions, see [Create a container](../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). * Take note of the *URL* of your storage container to use later. +* *Storage Blob Data Owner* or *Storage Blob Data Contributor* access to your storage resources. You can grant required roles at either the storage account level or the container level. For instructions and more information about permissions to Azure storage, see [Assign an Azure role](../storage/blobs/assign-azure-role-data-access.md?tabs=portal#assign-an-azure-role). -You should also configure CORS for your storage account, so that 3D Scenes Studio will be able to access your storage container. -1. Return to the storage account's page in the portal. -1. Scroll down in the left menu to **Resource sharing (CORS)** and select it. -1. On the **Resource sharing (CORS)** page for your storage account, fill in an entry with the following details: - 1. **Allowed origins** - Enter *https://explorer.digitaltwins.azure.net*. You can add additional origins if you want, or use * to allow general access. - 1. **Allowed methods** - Select the checkboxes for *GET*, *POST*, *OPTIONS*, and *PUT*. You can add additional methods if you want. - 1. **Allowed headers** - Enter *Authorization,x-ms-version,x-ms-blob-type*. You can add additional headers if you want. -1. Select **Save**. +You should also configure [CORS](/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) for your storage account, so that 3D Scenes Studio will be able to access your storage container. You can use the following [Azure CLI](/cli/azure/what-is-azure-cli) command to set the minimum required methods, origins, and headers. The command contains one placeholder for the name of your storage account. - :::image type="content" source="media/how-to-use-3d-scenes-studio/cors.png" alt-text="Screenshot of the Azure portal where the CORS entry is being created and saved." lightbox="media/how-to-use-3d-scenes-studio/cors.png"::: +```azurecli +az storage cors add --services b --methods GET OPTIONS POST PUT --origins https://explorer.digitaltwins.azure.net --allowed-headers Authorization x-ms-version x-ms-blob-type --account-name +``` Now you have all the necessary resources to work with scenes in 3D Scenes Studio. @@ -53,15 +48,15 @@ In this section, you'll set the environment in *3D Scenes Studio* and customize :::image type="content" source="media/how-to-use-3d-scenes-studio/studio-edit-environment-1.png" alt-text="Screenshot of 3D Scenes Studio highlighting the edit environment icon, which looks like a pencil." lightbox="media/how-to-use-3d-scenes-studio/studio-edit-environment-1.png"::: - 1. The **Environment URL** should start with *https://*, followed by the *host name* of your instance from the [Prerequisites](#prerequisites) section. + 1. The **Azure Digital Twins instance URL** should start with *https://*, followed by the *host name* of your instance from the [Prerequisites](#prerequisites) section. - 1. For the **Container URL**, enter the URL of your storage container from the [Prerequisites](#prerequisites) section. + 1. For the **Azure storage container URL**, enter the URL of your storage container from the [Prerequisites](#prerequisites) section. 1. Select **Save**. :::image type="content" source="media/how-to-use-3d-scenes-studio/studio-edit-environment-2.png" alt-text="Screenshot of 3D Scenes Studio highlighting the Save button for the environment." lightbox="media/how-to-use-3d-scenes-studio/studio-edit-environment-2.png"::: -## Create and view scenes +## Create, edit, and view scenes The 3D representation of an environment in 3D Scenes Studio is called a *scene*. A scene consists of a 3D file and a configuration file that's created for you automatically. @@ -74,12 +69,22 @@ You can use 3D Scenes Studio with a 3D file that's already present in your stora 1. From the home page of 3D Scenes Studio, select the **Add 3D scene** button to start creating a new scene. -1. Enter a **Name** for the scene, and select one of the following tabs for the file upload option: - 1. **From container** to enter the URL of a 3D file that's already in your storage container +1. Enter a **Name** and **Description** for the scene. +1. If you want the scene to show up in [globe view](#view-scenes-in-globe-view), toggle **Show on globe** to **On**. Enter **Latitude** and **Longitude** values for the scene. +1. Select one of the following tabs in the **Link 3D file** section: + 1. **Choose file** to enter the URL of a 3D file that's already in your storage container 1. **Upload file** to upload a 3D file from your computer :::image type="content" source="media/how-to-use-3d-scenes-studio/add-scene.png" alt-text="Screenshot of 3D Scenes Studio, Create new scene dialog." lightbox="media/how-to-use-3d-scenes-studio/add-scene.png"::: 1. Select **Create**. + +### Edit scenes + +To edit or delete a scene after it's been created, use the **Actions** icons next to the scene in the 3D Scenes Studio home page. + +:::image type="content" source="media/how-to-use-3d-scenes-studio/edit-delete-scene.png" alt-text="Screenshot of 3D Scenes Studio, highlighting actions for a scene." lightbox="media/how-to-use-3d-scenes-studio/edit-delete-scene.png"::: + +Editing a scene will reopen all of the scene properties you set while creating it, allowing you to change them and update the scene. ### View scenes in globe view @@ -103,6 +108,12 @@ You can switch to **View** mode to enable filtering on specific elements and vis :::image type="content" source="media/how-to-use-3d-scenes-studio/scene-view.png" alt-text="Screenshot of 3D Scenes Studio, showing a scene in the viewer." lightbox="media/how-to-use-3d-scenes-studio/scene-view.png"::: +### Embed scenes in custom applications + +The viewer component can also be embedded into custom applications outside of 3D Scenes Studio, and can work in conjunction with 3rd party components. + +[!INCLUDE [digital-twins-3d-embed.md](../../includes/digital-twins-3d-embed.md)] + ## Add elements An *element* is a self-defined set of 3D meshes that is linked to data on one or more underlying digital twins. @@ -142,22 +153,21 @@ If you started element creation by selecting a mesh in the visualization, that m ### Behaviors -A *behavior* is a scenario for your scene. You can select **Add behavior** from this tab to enter the **New behavior** flow. - +A *behavior* is a scenario for your scene. Select **Add behavior** on this tab. From there, you can either select an existing behavior to add it to this element, or select **New behavior** to enter the flow for creating a new behavior. :::image type="content" source="media/how-to-use-3d-scenes-studio/new-element-behaviors.png" alt-text="Screenshot of the New element options in 3D Scenes Studio. The Behaviors tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/new-element-behaviors.png"::: For more details on creating new behaviors, see [Add behaviors](#add-behaviors). -### Aliased twins +### Other twins -An *aliased twin* is a secondary digital twin data source for an element. You can add aliased twins to an element if the data on the primary twin won't be enough to define all the behaviors you want for the element, so you need access to the data of additional twins. +On the **other twins** tab, you can add secondary digital twin data sources for an element. You can add other twins to an element if the data on the primary twin won't be enough to define all the behaviors you want for the element, so you need access to the data of additional twins. -:::image type="content" source="media/how-to-use-3d-scenes-studio/new-element-aliased-twins.png" alt-text="Screenshot of the New element options in 3D Scenes Studio. The Aliased twins tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/new-element-aliased-twins.png"::: +:::image type="content" source="media/how-to-use-3d-scenes-studio/new-element-other-twins.png" alt-text="Screenshot of the New element options in 3D Scenes Studio. The Other twins tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/new-element-other-twins.png"::: -You can't add aliased twins during new element creation. For instructions on adding aliased twins, see [Twins](#twins) as a behavior option. +You can't add other twins during new element creation. For instructions on adding other twins, see [Twins](#twins) as a behavior option. -Once there are aliased twins added to the element, you'll be able to view and modify them on this tab. +Once there are other twins added to the element, you'll be able to view and modify them on this tab. ## Add behaviors @@ -169,8 +179,6 @@ One way to create a new behavior is to select **New behavior** from the **Behavi Alternatively, you can select an element from the **Elements** tab, and create a new behavior from [that element's Behaviors tab](#behaviors). -:::image type="content" source="media/how-to-use-3d-scenes-studio/modify-element-behaviors.png" alt-text="Screenshot of the Modify element options in 3D Scenes Studio. The Behaviors tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/modify-element-behaviors.png"::: - This will open the **New behavior** panel where you can fill in behavior information. :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-panel.png" alt-text="Screenshot of New behavior options in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-panel.png"::: @@ -194,17 +202,17 @@ If you started the behavior creation process from a specific element, that eleme ### Twins -On the **Twins** tab, you can modify the set of twins whose data is available to this behavior. This includes the targeted elements' primary twins, and any aliased twins. +On the **Twins** tab, you can modify the set of twins whose data is available to this behavior. This includes the targeted elements' primary twins, and any additional twins. -An *aliased twin* is a secondary digital twin data source for an element. After configuring an aliased twin, you'll be able to use properties from that twin in your behavior expressions for this element. You should only add aliased twins when there are additional twins with data beyond your primary twin that you want to leverage in your [status](#status), [alerts](#alerts), and [widgets](#widgets) for this behavior. +You can add secondary digital twin data sources for an element. After configuring other twins, you'll be able to use properties from those twins in your behavior expressions for this element. You should only add other twins when there are additional twins with data beyond your primary twin that you want to leverage in your [status](#status), [alerts](#alerts), and [widgets](#widgets) for this behavior. -To create a new alias, select **Add twin alias** and **Create twin alias**. +To add a new twin data source, select **Add twin** and **Create twin**. :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-twins.png" alt-text="Screenshot of the New behavior options in 3D Scenes Studio. The Twins tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-twins.png"::: -This will open a **New twin alias** panel where you can name the alias and select a twin to map. +This will open a **New twin** panel where you can name the additional twin and select a twin from your Azure Digital Twins instance to map. -:::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-twins-new-alias.png" alt-text="Screenshot of the New twin alias panel in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-twins-new-alias.png"::: +:::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-twins-new-twin.png" alt-text="Screenshot of the New twin panel in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-twins-new-twin.png"::: >[!TIP] >[Azure Digital Twins Explorer](concepts-azure-digital-twins-explorer.md) can help you see twins that might be related to the primary twin for this element. You can query your graph using `SELECT * FROM digitaltwins WHERE $dtId="`, and then use the [double-click expansion feature](how-to-use-azure-digital-twins-explorer.md#control-twin-graph-expansion) to explore related twins. @@ -213,9 +221,9 @@ This will open a **New twin alias** panel where you can name the alias and selec In the **Status** tab, you can define states for your element. *States* are data-driven overlays on your elements to indicate the health or status of the element. -To create a state, first choose whether the state is dependent on a **Single property** or a **Custom (advanced)** property expression. For a **Single property**, you'll get a dropdown list of properties on the primary twin. For **Custom (advanced)**, you'll get a text box. +To create a state, first choose whether the state is dependent on a **Single property** or a **Custom (advanced)** property expression. For a **Single property**, you'll get a dropdown list of numeric properties on the primary twin. For **Custom (advanced)**, you'll get a text box where you can write a custom JavaScript expression using one or more properties. The expression should have a numeric outcome. For more information about writing custom expressions, see [Use custom (advanced) expressions](#use-custom-advanced-expressions). -Once you've defined your property expression, set value ranges to create state boundaries, and choose colors to represent each state in the visualization. +Once you've defined your property expression, set value ranges to create state boundaries, and choose colors to represent each state in the visualization. The min of each value range is inclusive, and the max is exclusive. :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-status.png" alt-text="Screenshot of the New behavior options in 3D Scenes Studio. The Status tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-status.png"::: @@ -223,10 +231,18 @@ Once you've defined your property expression, set value ranges to create state b In the **Alerts** tab, you can set conditional notifications to help you quickly see when an element requires your attention. -First, select a **Trigger expression** involving properties of *PrimaryTwin* that will generate an alert badge when it evaluates to true. Then, customize your alert badge with a **Badge icon**, **Badge color**, and **Notification text**. +First, enter a **Trigger expression**. This is a JavaScript expression involving one or more properties of *PrimaryTwin* that yields a boolean result. This expression will generate an alert badge in the visualization when it evaluates to true. For more information about writing custom expressions, see [Use custom (advanced) expressions](#use-custom-advanced-expressions). + +Then, customize your alert badge with an **Icon** and **Color**, and a string for **Scenario Description**. :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-alerts.png" alt-text="Screenshot of the New behavior options in 3D Scenes Studio. The Alerts tab is highlighted." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-alerts.png"::: +Notification text can also include calculation expressions with this syntax: `${}`. Expressions will be computed and displayed dynamically in the [viewer](#view-scenes-individually). + +For an example of notification text with an expression, consider a behavior for a pasteurization tank, whose twin has double properties for `InFlow` and `OutFlow`. To display the difference between the tank's inflow and outflow in the notification, you could use this notification text: `Too much flow (InFlow is ${PrimaryTwin.InFlow - PrimaryTwin.OutFlow} greater than OutFlow)`. The computed result of the expression will be shown in the alert text in the viewer. + +:::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png" alt-text="Screenshots showing the notification text being entered on the Alerts dialog, and how the alert appears in the Viewer." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png"::: + ### Widgets Widgets are managed on the **Widgets** tab. *Widgets* are data-driven visuals that provide additional context and data, to help you understand the scenario that the behavior represents. Configuring widgets will help you make sure the right data is discoverable when an alert or status is active. @@ -238,20 +254,62 @@ Select **Add widget** to bring up the **Widget library**, where you can select f Here are the types of widget that you can create: * **Gauge**: For representing numerical data points visually + + Enter a **Display name** and **Unit of measure**, then choose whether the gauge reflects a **Single property** or a **Custom (advanced)** property expression. For a **Single property**, you'll get a dropdown list of numeric properties on the primary twin. For **Custom (advanced)**, you'll get a text box where you can write a custom JavaScript expression using one or more properties. The expression should have a numeric outcome. For more information about writing custom expressions, see [Use custom (advanced) expressions](#use-custom-advanced-expressions). + + Once you've defined your property expression, set value ranges to appear in certain colors on the gauge. The min of each value range is inclusive, and the max is exclusive. + :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-widgets-gauge.png" alt-text="Screenshot of creating a new gauge-type widget in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-widgets-gauge.png"::: -* **Link**: For including externally referenced content via a linked URL +* **Link**: For including externally-referenced content via a linked URL + + Enter a **Label** and destination **URL**. + :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-widgets-link.png" alt-text="Screenshot of creating a new link-type widget in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-widgets-link.png"::: + Link URLs can also include calculation expressions with this syntax: `${}`. The screenshot above contains an expression for accessing a property of the primary twin. Expressions will be computed and displayed dynamically in the [viewer](#view-scenes-individually). + * **Value**: For directly displaying twin property values + + Enter a **Display name** and select a **Property expression** that you want to display. This can be a **Single property** of the primary twin, or a **Custom (advanced)** property expression. Custom expressions should be JavaScript expressions using one or more properties of the twin, and you'll select which outcome type the expression will produce. For more information about writing custom expressions, see [Use custom (advanced) expressions](#use-custom-advanced-expressions). + :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-widgets-value.png" alt-text="Screenshot of creating a new value-type widget in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-widgets-value.png"::: + If your custom property expression outputs a string, you can also use JavaScript's template literal syntax to include a dynamic expression in the string output. Format the dynamic expression with this syntax: `${}`. Then, wrap the whole string output with backticks (`` ` ``). + + Below is an example of a value widget that checks if the `InFlow` value of the primary twin exceeds 99. If so, it outputs a string with an expression containing the twin's `$dtId`. Otherwise, there will be no expression in the output, so no backticks are required. + + Here's the value expression: `` PrimaryTwin.InFlow > 99 ? `${PrimaryTwin.$dtId} has an InFlow problem` : 'Everything looks good' ``. The computed result of the expression (the `$dtId`) will be shown in the widget in the viewer. + + :::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png" alt-text="Screenshots showing the notification text being entered on the value widget dialog, and how the widget appears in the Viewer." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png"::: + +### Use custom (advanced) expressions + +While defining [status](#status), [alerts](#alerts), and [widgets](#widgets) in your behaviors, you may want to use custom expressions to define a property condition. + +:::image type="content" source="media/how-to-use-3d-scenes-studio/new-behavior-status-custom.png" alt-text="Screenshot of defining a custom expression for a Status in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/new-behavior-status-custom.png"::: + +These expressions use the JavaScript language, and allow you to use one or more properties of associated twins to define custom logic. + +The following chart indicates which JavaScript operators are supported in 3D Scenes Studio. + +| Operator type | Supported? | +| --- | --- | +| Assignment operators | No | +| Comparison operators | Yes | +| Arithmetic operators | Yes | +| Bitwise operators | Yes | +| Logical operators | Yes | +| String operators | Yes | +| Conditional (ternary) operator | Yes | +| Command operator | No | +| Unary operators | No | +| Relational operators | No | + ## Manage layers You can create *layers* in your scene to help organize your [behaviors](#add-behaviors). Layers act like tags on the behaviors, enabling you to define which behaviors need to be seen together, thus creating custom views of your scene for different roles or tasks. -If there are no layers in a scene, all of the behaviors will show up in the scene viewer. If there's at least one layer present in the scene, then only behaviors that are **tagged with a layer** will show up in the viewer. - One way to create layers is to use the **Scene layers** button in the **Build** view for a scene. :::image type="content" source="media/how-to-use-3d-scenes-studio/layers-start-button.png" alt-text="Screenshot of 3D Scenes Studio builder for a scene. The Scene layers button is highlighted." lightbox="media/how-to-use-3d-scenes-studio/layers-start-button.png"::: @@ -262,33 +320,54 @@ Alternatively, you can create layers while [creating or modifying a behavior](#n :::image type="content" source="media/how-to-use-3d-scenes-studio/layers-start-behavior.png" alt-text="Screenshot of the Modify Behavior options in 3D Scenes Studio. A Scene layer is being selected." lightbox="media/how-to-use-3d-scenes-studio/layers-start-behavior.png"::: +When looking at your scene in the viewer, you can use the **Select layers** button to choose which layers show up in the visualization. Behaviors that aren't part of any layer are grouped under **Default layer**. + +:::image type="content" source="media/how-to-use-3d-scenes-studio/layers-select-viewer.png" alt-text="Screenshot of 3D Scenes Studio in View mode. The layer selection is highlighted." lightbox="media/how-to-use-3d-scenes-studio/layers-select-viewer.png"::: + ## Modify theme -In the **Build** view for a scene, you can use the **Theme** button to change the style, object colors, and background color of the display. +In either the builder or viewer for a scene, select the **Theme** icon to change the style, object colors, and background color of the display. :::image type="content" source="media/how-to-use-3d-scenes-studio/theme.png" alt-text="Screenshot of 3D Scenes Studio builder for a scene. The Theme button is highlighted." lightbox="media/how-to-use-3d-scenes-studio/theme.png"::: ## Share your environment -A *3D Scenes Studio environment* is formed from a unique pairing of an **Azure Digital Twins instance** and an **Azure storage container**. +A *3D Scenes Studio environment* is formed from a unique pairing of an **Azure Digital Twins instance** and an **Azure storage container**. You can share your entire environment with someone, including all of your scenes, or share a specific scene. -To share your environment with someone else, they need to have these permissions to your resources: +To share your environment with someone else, start by giving them the following permissions to your resources: * *Azure Digital Twins Data Reader* access (or greater) on the Azure Digital Twins instance * *Storage Blob Data Reader* access (or greater) to the storage container * *Storage Blob Data Reader* will allow them to view your scenes. - * *Storage Blob Data Owner* will allow them to edit your scenes. + * *Storage Blob Data Owner* or *Storage Blob Data Contributor* will allow them to edit your scenes. + +Then, follow the instructions in the rest of this section to share either your [entire environment](#share-general-environment) or a [specific scene](#share-a-specific-scene). -Once someone has the required permissions, there are two ways to give them access to your environment. You can do either of the following things: +### Share general environment + +Once someone has the required permissions, there are two ways to give them access to your entire environment. You can do either of the following things: * Use the Share button on the 3D Scenes Studio homepage to copy the **URL of your 3D Scenes Studio environment**. (The URL includes the URLs of both your Azure Digital Twins instance and your storage container.) - :::image type="content" source="media/how-to-use-3d-scenes-studio/copy-url.png" alt-text="Screenshot of the Share button in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/copy-url.png"::: + :::image type="content" source="media/how-to-use-3d-scenes-studio/copy-url.png" alt-text="Screenshot of the Share environment button in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/copy-url.png"::: Share it with the recipient, who can paste this URL directly into their browser to connect to your environment. * Share the **URL of your Azure Digital Twins instance** and the **URL of your Azure storage container** that you used when [initializing your 3D Scenes Studio environment](#initialize-your-3d-scenes-studio-environment). The recipient can access [3D Scenes Studio](https://dev.explorer.azuredigitaltwins-test.net/3dscenes) and initialize it with these same URL values to connect to your same environment. After this, the recipient can view and interact with your scenes in the studio. +### Share a specific scene + +You can also share your environment with a link directly to a specific scene. To share a specific scene, open the scene in **View** mode. + +Use the **Share scene** icon to generate a link to your scene. You can choose whether you want to link to preserve your current layer and element selections. + +:::image type="content" source="media/how-to-use-3d-scenes-studio/share-scene.png" alt-text="Screenshot of the Share scene button in 3D Scenes Studio." lightbox="media/how-to-use-3d-scenes-studio/share-scene.png"::: + +When the recipient pastes this URL into their browser, the specified scene will open in the viewer, with any chosen layers or elements selected. + +>[!NOTE] +>When a scene is shared with someone in this way, the recipient will also be able to leave this scene and view other scenes in your environment if they choose. + ## Next steps Try out 3D Scenes Studio with a sample scenario in [Get started with 3D Scenes Studio](quickstart-3d-scenes-studio.md). -Or, visualize your Azure Digital Twins graph differently using [Azure Digital Twins Explorer](how-to-use-azure-digital-twins-explorer.md). +Or, visualize your Azure Digital Twins graph differently using [Azure Digital Twins Explorer](how-to-use-azure-digital-twins-explorer.md). \ No newline at end of file diff --git a/articles/digital-twins/how-to-use-azure-digital-twins-explorer.md b/articles/digital-twins/how-to-use-azure-digital-twins-explorer.md index fab914844dff1..45eb6f41333f7 100644 --- a/articles/digital-twins/how-to-use-azure-digital-twins-explorer.md +++ b/articles/digital-twins/how-to-use-azure-digital-twins-explorer.md @@ -1,6 +1,6 @@ --- # Mandatory fields. -title: Use Azure Digital Twins Explorer (all features) +title: Use Azure Digital Twins Explorer titleSuffix: Azure Digital Twins description: Learn how to use all the features of Azure Digital Twins Explorer author: baanders diff --git a/articles/digital-twins/how-to-use-data-history.md b/articles/digital-twins/how-to-use-data-history.md index 6831c7512559a..7c9d4c718d258 100644 --- a/articles/digital-twins/how-to-use-data-history.md +++ b/articles/digital-twins/how-to-use-data-history.md @@ -155,7 +155,7 @@ Next, create the Kusto cluster. The command below requires 5-10 minutes to execu az kusto cluster create --cluster-name $clustername --sku name="Dev(No SLA)_Standard_E2a_v4" tier="Basic" --resource-group $resourcegroup --location $location --type SystemAssigned ``` -Create a database in your new Kusto cluster (using the cluster name from above and in the same location). This database will be used to store contextualized Azure Digital Twins data. The command below creates a database with a soft delete period of 365 days, and a hot cache period of 31 days. For more information about the options available for this command, see [az kusto database create](/cli/azure/kusto/database?view=azure-cli-latest&preserve-view=true#az_kusto_database_create). +Create a database in your new Kusto cluster (using the cluster name from above and in the same location). This database will be used to store contextualized Azure Digital Twins data. The command below creates a database with a soft delete period of 365 days, and a hot cache period of 31 days. For more information about the options available for this command, see [az kusto database create](/cli/azure/kusto/database?view=azure-cli-latest&preserve-view=true#az-kusto-database-create). ```azurecli-interactive az kusto database create --cluster-name $clustername --database-name $databasename --resource-group $resourcegroup --read-write-database soft-delete-period=P365D hot-cache-period=P31D location=$location diff --git a/articles/digital-twins/index.yml b/articles/digital-twins/index.yml index e3f80b92bb8bb..66a482eb26c06 100644 --- a/articles/digital-twins/index.yml +++ b/articles/digital-twins/index.yml @@ -24,6 +24,8 @@ landingContent: links: - text: Get started with Azure Digital Twins Explorer url: quickstart-azure-digital-twins-explorer.md + - text: Get started with 3D Scenes Studio + url: quickstart-3d-scenes-studio.md - linkListType: how-to-guide links: - text: Set up an instance and authentication diff --git a/articles/digital-twins/media/concepts-3d-scenes-studio/build-mode.png b/articles/digital-twins/media/concepts-3d-scenes-studio/build-mode.png index f819583e9c7f4..cb71bc1052ca7 100644 Binary files a/articles/digital-twins/media/concepts-3d-scenes-studio/build-mode.png and b/articles/digital-twins/media/concepts-3d-scenes-studio/build-mode.png differ diff --git a/articles/digital-twins/media/concepts-3d-scenes-studio/view-mode.png b/articles/digital-twins/media/concepts-3d-scenes-studio/view-mode.png index eb8af798558af..fd626d37fb9f8 100644 Binary files a/articles/digital-twins/media/concepts-3d-scenes-studio/view-mode.png and b/articles/digital-twins/media/concepts-3d-scenes-studio/view-mode.png differ diff --git a/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png b/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png index 65c107cb3c51c..c416f3c0b6f2c 100644 Binary files a/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png and b/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png differ diff --git a/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png b/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png index e3404c3e3b692..27f78c5c29ec8 100644 Binary files a/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png and b/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/add-scene.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/add-scene.png index 5d97fedaf96b0..a2469790f135c 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/add-scene.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/add-scene.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/copy-url.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/copy-url.png index aed0bd85a91ca..04561f1839947 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/copy-url.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/copy-url.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/cors.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/cors.png deleted file mode 100644 index 067aa0f5ce1da..0000000000000 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/cors.png and /dev/null differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/edit-delete-scene.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/edit-delete-scene.png new file mode 100644 index 0000000000000..64d520fb589dc Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/edit-delete-scene.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-1.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-1.png index 8e74894750f6c..7c40904be4967 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-1.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-1.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-2.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-2.png index 344b4da1de8f1..d89d76fa9a8c4 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-2.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/globe-view-2.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-select-viewer.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-select-viewer.png new file mode 100644 index 0000000000000..4aa0a19c1709b Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-select-viewer.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-behavior.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-behavior.png index 3850dfd673ce7..249556b326ce4 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-behavior.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-behavior.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-button.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-button.png index 2bb5b89679ea2..59fdcf493b5ee 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-button.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/layers-start-button.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/modify-element-behaviors.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/modify-element-behaviors.png deleted file mode 100644 index 3ea5b22f167ea..0000000000000 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/modify-element-behaviors.png and /dev/null differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png new file mode 100644 index 0000000000000..8e7cdb29fbc6f Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts.png index 61255b622fe8b..e03ab6a0f32d9 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-elements.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-elements.png index 2ae1185aabd8b..4fef7e992f122 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-elements.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-elements.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-panel.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-panel.png index 70473f3be06dd..a31f85fa7c2a8 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-panel.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-panel.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-start-button.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-start-button.png index 77fedbb68fb18..7e245105f7f76 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-start-button.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-start-button.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status-custom.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status-custom.png new file mode 100644 index 0000000000000..b86a64a70566e Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status-custom.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status.png index 3660d8aa69846..1f387d41bd8ca 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-status.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins-new-alias.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins-new-alias.png deleted file mode 100644 index 11bcf7889b893..0000000000000 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins-new-alias.png and /dev/null differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins-new-twin.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins-new-twin.png new file mode 100644 index 0000000000000..9681d0210b9df Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins-new-twin.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins.png index d3386a2520e40..8480ad0b93881 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-twins.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-gauge.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-gauge.png index 0408977496bf0..b20e8e49e0b6d 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-gauge.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-gauge.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-library.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-library.png index 912dc67bd768b..19f89d7ec8a0b 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-library.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-library.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-link.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-link.png index ca1ddb12ddd7a..2241b1bb3a96a 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-link.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-link.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png new file mode 100644 index 0000000000000..f5d9a4e70d497 Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value.png index e21df46fecc2b..cacbee798bfb2 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-aliased-twins.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-aliased-twins.png deleted file mode 100644 index aca036969e2b0..0000000000000 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-aliased-twins.png and /dev/null differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-behaviors.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-behaviors.png index b3b753f0ef986..c9dc1ef49581a 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-behaviors.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-behaviors.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-meshes.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-meshes.png index 4c524bddfbb56..16b65c35588e6 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-meshes.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-meshes.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-other-twins.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-other-twins.png new file mode 100644 index 0000000000000..0e4a6069f5e8b Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-other-twins.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-panel.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-panel.png index 4d70af2e8bdf5..23d0a198e286e 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-panel.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-panel.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-primary-twin.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-primary-twin.png index 27ea9a9dd6b02..06c48c795a752 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-primary-twin.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-primary-twin.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-start-button.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-start-button.png index 2b575d31cd5a4..cccb7c5d29594 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-start-button.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-element-start-button.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-build.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-build.png index 4dfffd6b04a26..8d3469dffc78e 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-build.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-build.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png index 1414007f334f6..bfc303c7bae1b 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png new file mode 100644 index 0000000000000..925cb959026d1 Binary files /dev/null and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-1.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-1.png index bebf71660e3f7..661999451a7c0 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-1.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-1.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-2.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-2.png index d7427ae78d35d..3d19dc703818d 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-2.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/studio-edit-environment-2.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png index 31dff582cfb1f..6ad7191021bd8 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-create.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-create.png index 37c791e89c98c..fdad716a26d4f 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-create.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-create.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-upload-file.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-upload-file.png index fc948e42181c1..f80b44f18f859 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-upload-file.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/add-scene-upload-file.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-storage-role-1.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/add-storage-role-1.png deleted file mode 100644 index 49302b8abbff3..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-storage-role-1.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-storage-role-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/add-storage-role-2.png deleted file mode 100644 index 2a6c16e101577..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/add-storage-role-2.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/container-properties.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/container-properties.png deleted file mode 100644 index 0f73a0ad1b89a..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/container-properties.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/container-url.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/container-url.png deleted file mode 100644 index d940135c5faff..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/container-url.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/create-container.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/create-container.png deleted file mode 100644 index 934e8b1f35b0c..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/create-container.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/create-storage-account-1.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/create-storage-account-1.png deleted file mode 100644 index a85ab84cd59c3..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/create-storage-account-1.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/create-storage-account-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/create-storage-account-2.png deleted file mode 100644 index 7ac7562f121b3..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/create-storage-account-2.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/data-simulator-to-graph.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/data-simulator-to-graph.png index 24ecd60c5a01d..f09da0a28de46 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/data-simulator-to-graph.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/data-simulator-to-graph.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/delete-storage.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/delete-storage.png index 57a72688928c3..a92670a1bce55 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/delete-storage.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/delete-storage.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/deployment-complete-storage.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/deployment-complete-storage.png deleted file mode 100644 index ed01888cac5a3..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/deployment-complete-storage.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-1.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-1.png new file mode 100644 index 0000000000000..b463366d0a8f3 Binary files /dev/null and b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-1.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png new file mode 100644 index 0000000000000..53ae361ee6289 Binary files /dev/null and b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png new file mode 100644 index 0000000000000..955e6d3a8048d Binary files /dev/null and b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene.png new file mode 100644 index 0000000000000..07ba1d875974b Binary files /dev/null and b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-1.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-1.png deleted file mode 100644 index 4dc9a56028ac7..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-1.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-element-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-element-2.png deleted file mode 100644 index 568296ff80f8e..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-element-2.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-element.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-element.png deleted file mode 100644 index e184c8463f3d5..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene-view-element.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene.png deleted file mode 100644 index 96f66f2d2a16c..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/factory-scene.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element-2.png index 7bac99a3b046a..796c2a67f6e45 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element-2.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element-2.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element.png index 928529fba79fb..4a261d980e0c4 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-arm-element.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-alerts.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-alerts.png index 4813c65cfe8b4..3711597595846 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-alerts.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-alerts.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-badges.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-badges.png deleted file mode 100644 index bf9eb9f750519..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-badges.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-create.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-create.png index ec0a7f86f882d..7f1868abd38a9 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-create.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-create.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-elements.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-elements.png index 0c46e8fd968d6..1b8cb4ae84313 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-elements.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-elements.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-status.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-status.png index 57f852be4d7ee..728ca1fe03fe2 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-status.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-status.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-twins.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-twins.png deleted file mode 100644 index ea9fa429e4a4f..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-twins.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-widgets.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-widgets.png index df780337a091a..a5a574926bbe7 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-widgets.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior-widgets.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior.png index 3c58da2a20948..32ef056d3863d 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-behavior.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-behaviors.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-behaviors.png index ef5c08d1f9efc..fc9d3c2e53fa4 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-behaviors.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-behaviors.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details-2.png index bfc48040633af..717beccaf6cf0 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details-2.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details-2.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details.png index 92c8da34851ed..83aebb443792d 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-element-details.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-tank-element.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-tank-element.png deleted file mode 100644 index e7727c07180e4..0000000000000 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-tank-element.png and /dev/null differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-gauge.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-gauge.png index 862b7de217329..216df2567dbdd 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-gauge.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-gauge.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-link.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-link.png index 179c0a3d5445b..8bcac023cfc59 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-link.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/new-widget-link.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/storage-account-id.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/storage-account-id.png new file mode 100644 index 0000000000000..ccf2fb5de9f69 Binary files /dev/null and b/articles/digital-twins/media/quickstart-3d-scenes-studio/storage-account-id.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-dismiss-demo.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-dismiss-demo.png new file mode 100644 index 0000000000000..392b9a45ce360 Binary files /dev/null and b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-dismiss-demo.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-1.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-1.png index 06f18bfff3b1c..538aed5132b2b 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-1.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-1.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-2.png index d7427ae78d35d..5dc30a7fb6f60 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-2.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-edit-environment-2.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png index 9135663711bb8..f32d76e303c71 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png differ diff --git a/articles/digital-twins/quickstart-3d-scenes-studio.md b/articles/digital-twins/quickstart-3d-scenes-studio.md index 10aeac6d05053..ed1cd46990e0b 100644 --- a/articles/digital-twins/quickstart-3d-scenes-studio.md +++ b/articles/digital-twins/quickstart-3d-scenes-studio.md @@ -1,8 +1,8 @@ --- # Mandatory fields. -title: Quickstart - Get started with 3D Scenes Studio +title: Quickstart - Get started with 3D Scenes Studio (preview) titleSuffix: Azure Digital Twins -description: Learn how to use 3D Scenes Studio for Azure Digital Twins by following this demo, where you'll create a sample scene with elements and behaviors. +description: Learn how to use 3D Scenes Studio (preview) for Azure Digital Twins by following this demo, where you'll create a sample scene with elements and behaviors. author: baanders ms.author: baanders # Microsoft employees only ms.date: 05/04/2022 @@ -16,9 +16,9 @@ ms.custom: event-tier1-build-2022 # manager: MSFT-alias-of-manager-or-PM-counterpart --- -# Quickstart - Get started with 3D Scenes Studio for Azure Digital Twins +# Quickstart - Get started with 3D Scenes Studio (preview) for Azure Digital Twins -Azure Digital Twins *3D Scenes Studio* is an immersive 3D environment, where business and front-line workers can consume and investigate operational data from their Azure Digital Twins solutions with visual context. +Azure Digital Twins *3D Scenes Studio (preview)* is an immersive 3D environment, where business and front-line workers can consume and investigate operational data from their Azure Digital Twins solutions with visual context. In this article, you'll set up all the required resources for using 3D Scenes Studio, including an Azure Digital Twins instance with sample data, and Azure storage resources. Then, you'll create a scene in the studio that's connected to the sample Azure Digital Twins environment. @@ -87,77 +87,65 @@ To see the models that have been uploaded and how they relate to each other, sel ## Create storage resources -Next, create a new storage account and a container in the storage account. 3D Scenes Studio will use this storage container to store your 3D file and configuration information. You'll also set up read and write permissions to the storage account. +Next, create a new storage account and a container in the storage account. 3D Scenes Studio will use this storage container to store your 3D file and configuration information. -### Create the storage account +You'll also set up read and write permissions to the storage account. In order to set these backing resources up quickly, this section uses the [Azure Cloud Shell](../cloud-shell/overview.md). -1. In the [Azure portal](https://portal.azure.com), search for *storage accounts* in the top search bar. -1. On the **Storage accounts** page, select **+ Create**. +1. Navigate to the [Cloud Shell](https://shell.azure.com) in your browser. - :::image type="content" source="media/quickstart-3d-scenes-studio/create-storage-account-1.png" alt-text="Screenshot of the Azure portal showing the Storage accounts page and highlighting the Create button." lightbox="media/quickstart-3d-scenes-studio/create-storage-account-1.png"::: + Run the following command to set the CLI context to your subscription for this session. -1. Fill in the details on the **Basics** tab, including your **Subscription** and **Resource group**. Choose a **Storage account name** and **Region**, select **Standard** performance, and select **Geo-redundant storage (GRS)**. - :::image type="content" source="media/quickstart-3d-scenes-studio/create-storage-account-2.png" alt-text="Screenshot of the Azure portal showing the Basics tab of storage account creation." lightbox="media/quickstart-3d-scenes-studio/create-storage-account-2.png"::: + ```azurecli + az account set --subscription "" + ``` +1. Run the following command to create a storage account in your subscription. The command contains placeholders for you to enter a name and choose a region for your storage account, as well as a placeholder for your resource group. - Select **Review + create**. + ```azurecli + az storage account create --resource-group --name --location --sku Standard_RAGRS + ``` -1. You will see a summary page on the **Review + create** tab showing the details you've entered. Confirm and create the storage account by selecting **Create**. -1. After deployment completes, use the **Go to resource** button to navigate to the storage account in the portal. - :::image type="content" source="media/quickstart-3d-scenes-studio/deployment-complete-storage.png" alt-text="Screenshot of the deployment page for the storage account in the Azure portal. The page indicates that deployment is complete." lightbox="media/quickstart-3d-scenes-studio/deployment-complete-storage.png"::: + When the command completes successfully, you'll see details of your new storage account in the output. Look for the `ID` value in the output and copy it to use in the next command. -1. Select **Access Control (IAM)** from the storage account's left menu, **+ Add**, and **Add role assignment**. - :::image type="content" source="media/quickstart-3d-scenes-studio/add-storage-role-1.png" alt-text="Screenshot of the IAM tab for the storage account in the Azure portal." lightbox="media/quickstart-3d-scenes-studio/add-storage-role-1.png"::: - -1. Search for *Storage Blob Data Owner* and select **Next**. This level of access will allow you to perform both read and write operations in 3D Scenes Studio. -1. Switch to the **Members** tab. Assign access to a **User, group, or service principal**, and select **+ Select members**. Search for your name in the list and hit **Select**. - :::image type="content" source="media/quickstart-3d-scenes-studio/add-storage-role-2.png" alt-text="Screenshot of granting a user Storage Blob Data Owner in the Azure portal." lightbox="media/quickstart-3d-scenes-studio/add-storage-role-2.png"::: - -1. Select **Review + assign** to review the details of your assignment, and **Review + assign** again to confirm and finish the role assignment. - -### Configure CORS - -Next, configure CORS for your storage account. This will be necessary for 3D Scenes Studio to access your storage container. - -1. Return to the storage account's page in the portal. -1. Scroll down in the left menu to **Resource sharing (CORS)** and select it. -1. On the **Resource sharing (CORS)** page for your storage account, fill in an entry with the following details: - 1. **Allowed origins** - Enter *https://explorer.digitaltwins.azure.net*. - 1. **Allowed methods** - Select the checkboxes for *GET*, *POST*, *OPTIONS*, and *PUT*. - 1. **Allowed headers** - Enter *Authorization,x-ms-version,x-ms-blob-type* -1. Select **Save**. - - :::image type="content" source="media/how-to-use-3d-scenes-studio/cors.png" alt-text="Screenshot of the Azure portal where the C O R S entry is being created and saved." lightbox="media/how-to-use-3d-scenes-studio/cors.png"::: - -### Create the container + :::image type="content" source="media/quickstart-3d-scenes-studio/storage-account-id.png" alt-text="Screenshot of Cloud Shell output. The I D of the storage account is highlighted." lightbox="media/quickstart-3d-scenes-studio/storage-account-id.png"::: + +1. Run the following command to grant yourself the *Storage Blob Data Owner* on the storage account. This level of access will allow you to perform both read and write operations in 3D Scenes Studio. The command contains placeholders for your Azure account and the ID of your storage account from the previous step. -Lastly, create a private container in the storage account. + ```azurecli + az role assignment create --role "Storage Blob Data Owner" --assignee --scope + ``` -1. Select **Containers** from the left menu for the storage account and use **+ Container** to create a new container. + When the command completes successfully, you'll see details of the role assignment in the output. -1. Enter a **Name** for the container and set the **Public access level** to **Private**. Select **Create**. +1. Run the following command to configure CORS for your storage account. This will be necessary for 3D Scenes Studio to access your storage container. The command contains a placeholder for the name of your storage account. - :::image type="content" source="media/quickstart-3d-scenes-studio/create-container.png" alt-text="Screenshot of the Azure portal highlighting Containers for the storage account." lightbox="media/quickstart-3d-scenes-studio/create-container.png"::: -1. Once the container has been created, open its menu of options and select **Container properties**. + ```azurecli + az storage cors add --services b --methods GET OPTIONS POST PUT --origins https://explorer.digitaltwins.azure.net --allowed-headers Authorization x-ms-version x-ms-blob-type --account-name + ``` - :::image type="content" source="media/quickstart-3d-scenes-studio/container-properties.png" alt-text="Screenshot of the Azure portal highlighting the Container Properties for the new container." lightbox="media/quickstart-3d-scenes-studio/container-properties.png"::: + This command doesn't have any output. - This will bring you to a **Properties** page for the container. -1. Copy the **URL** and save this value to use later. +1. Run the following command to create a private container in the storage account. Your 3D Scenes Studio files will be stored here. The command contains a placeholder for you to enter a name for your storage container, and a placeholder for the name of your storage account. + ```azurecli + az storage container create --name --public-access off --account-name + ``` - :::image type="content" source="media/quickstart-3d-scenes-studio/container-url.png" alt-text="Screenshot of the Azure portal highlighting the container's U R L value." lightbox="media/quickstart-3d-scenes-studio/container-url.png"::: + When the command completes successfully, the output will show `"created": true`. ## Initialize your 3D Scenes Studio environment Now that all your resources are set up, you can use them to create an environment in *3D Scenes Studio*. In this section, you'll create a scene and customize it for the sample graph that's in your Azure Digital Twins instance. -1. Navigate to the [3D Scenes Studio](https://explorer.digitaltwins.azure.net/3dscenes). The studio will open, connected to the Azure Digital Twins instance that you accessed last in the Azure Digital Twins Explorer. +1. Navigate to the [3D Scenes Studio](https://explorer.digitaltwins.azure.net/3dscenes). The studio will open, connected to the Azure Digital Twins instance that you accessed last in the Azure Digital Twins Explorer. Dismiss the welcome demo. + + :::image type="content" source="media/quickstart-3d-scenes-studio/studio-dismiss-demo.png" alt-text="Screenshot of 3D Scenes Studio with welcome demo." lightbox="media/quickstart-3d-scenes-studio/studio-dismiss-demo.png"::: + 1. Select the **Edit** icon next to the instance name to configure the instance and storage container details. :::image type="content" source="media/quickstart-3d-scenes-studio/studio-edit-environment-1.png" alt-text="Screenshot of 3D Scenes Studio highlighting the edit environment icon, which looks like a pencil." lightbox="media/quickstart-3d-scenes-studio/studio-edit-environment-1.png"::: - 1. For the **Environment URL**, enter *https://*, followed by the *host name* of your instance from the [Collect host name](#collect-host-name) step. + 1. For the **Azure Digital Twins instance URL**, fill the *host name* of your instance from the [Collect host name](#collect-host-name) step into this URL: `https://`. - 1. For the **Container URL**, enter the URL of your container from the [Create storage resources](#create-storage-resources) step. + 1. For the **Azure Storage container URL**, fill the names of your storage account and container from the [Create storage resources](#create-storage-resources) step into this URL: `https://.blob.core.windows.net/`. 1. Select **Save**. @@ -169,9 +157,9 @@ In this section you'll create a new 3D scene, using the *RobotArms.glb* 3D model This sample scene contains a visualization of the distribution center and its arms. You'll connect this visualization to the sample twins you created in the [Generate sample models and twins](#generate-sample-models-and-twins) step, and customize the data-driven view in later steps. -1. Select the **Add 3D scene** button to start creating a new scene. Enter a **Name** for your scene, and select **Upload file** under **3D file asset**. +1. Select the **Add 3D scene** button to start creating a new scene. Enter a **Name** and **Description** for your scene, and select **Upload file**. - :::image type="content" source="media/quickstart-3d-scenes-studio/add-scene-upload-file.png" alt-text="Screenshot of 3D Scenes Studio highlighting the Add 3D scene button and Upload file option." lightbox="media/quickstart-3d-scenes-studio/add-scene-upload-file.png"::: + :::image type="content" source="media/quickstart-3d-scenes-studio/add-scene-upload-file.png" alt-text="Screenshot of the Create new scene process in 3D Scenes Studio." lightbox="media/quickstart-3d-scenes-studio/add-scene-upload-file.png"::: 1. Browse for the *RobotArms.glb* file on your computer and open it. Select **Create**. :::image type="content" source="media/quickstart-3d-scenes-studio/add-scene-create.png" alt-text="Screenshot of creating a new scene in 3D Scenes Studio. The robot arms file has been uploaded and the Create button is highlighted." lightbox="media/quickstart-3d-scenes-studio/add-scene-create.png"::: @@ -179,7 +167,7 @@ This sample scene contains a visualization of the distribution center and its ar Once the file is uploaded, you'll see it listed back on the main screen of 3D Scenes Studio. 1. Select the scene to open and view it. The scene will open in **Build** mode. - :::image type="content" source="media/quickstart-3d-scenes-studio/factory-scene.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio." lightbox="media/quickstart-3d-scenes-studio/factory-scene.png"::: + :::image type="content" source="media/quickstart-3d-scenes-studio/distribution-scene.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio." lightbox="media/quickstart-3d-scenes-studio/distribution-scene.png"::: ## Create a scene element @@ -206,19 +194,13 @@ Next, you'll create a *behavior* for the element. These behaviors allow you to c :::image type="content" source="media/quickstart-3d-scenes-studio/new-behavior.png" alt-text="Screenshot of the New behavior button in 3D Scenes Studio." lightbox="media/quickstart-3d-scenes-studio/new-behavior.png"::: -1. For **Display name**, enter *Packing Line Efficiency*. Under **Elements**, select *Arm1* (it may already be selected). +1. For **Display name**, enter *Packing Line Efficiency*. Under **Elements**, select *Arm1*. :::image type="content" source="media/quickstart-3d-scenes-studio/new-behavior-elements.png" alt-text="Screenshot of the New behavior options in 3D Scenes Studio, showing the Elements options." lightbox="media/quickstart-3d-scenes-studio/new-behavior-elements.png"::: -1. Switch to view the **Twins** tab. This tab gives you the option to set up *aliased twins* to leverage more data in your behaviors. After configuring aliased twins, you'll be able to use properties from those twins in your behavior descriptions. - - :::image type="content" source="media/quickstart-3d-scenes-studio/new-behavior-twins.png" alt-text="Screenshot of the New behavior options in 3D Scenes Studio, showing the Twins options." lightbox="media/quickstart-3d-scenes-studio/new-behavior-twins.png"::: +1. Skip the **Twins** tab, which isn't used in this quickstart. Switch to the **Status** tab. *States* are data-driven overlays on your elements to indicate the health or status of the element. Here, you'll set value ranges for a property on the element and associate certain colors with each range. - You don't need to do anything in the **Twins** tab for this quickstart. - -1. Switch to the **Status** tab. *States* are data-driven overlays on your elements to indicate the health or status of the element. Here, you'll set value ranges for a property on the element and associate certain colors with each range. - - 1. Keep the **Property expression** on **Single property** and open the property dropdown list. It contains names of all the properties on the primary twin for the *Arm1* element. Select *PrimaryTwin.FailedPickupsLastHr*. + 1. Keep the **Property expression** on **Single property** and open the property dropdown list. It contains names of all the properties on the primary twin for the *Arm1* element. Select *FailedPickupsLastHr*. 1. In this sample scenario, you want to flag that an arm that misses three or more pickups in an hour requires maintenance, and an arm that misses one or two pickups may require maintenance in the future. Set two value ranges so that values *1-3* appear in one color, and values *3-Infinity* appear in another (the min range value is inclusive, and the max value is exclusive). @@ -228,7 +210,7 @@ Next, you'll create a *behavior* for the element. These behaviors allow you to c 1. For the **Trigger expression**, enter *PrimaryTwin.PickupFailedAlert*. `PickupFailedAlert` is a property on the primary twin that is set to True when a pickup was failed. Using it as the trigger expression means this alert will appear whenever the property value is True. - 1. Set the **Badge color**. For **Notification text**, enter *${PrimaryTwin.PickupFailedBoxID} was missed, please track down this box and remediate.* This will use the primary twin's property `PickupFailedBoxID` to display a message about which box the arm failed to pick up. + 1. Set the badge **Icon** and **Color**. For **Scenario description**, enter *${PrimaryTwin.PickupFailedBoxID} was missed, please track down this box and remediate.* This will use the primary twin's property `PickupFailedBoxID` to display a message about which box the arm failed to pick up. :::image type="content" source="media/quickstart-3d-scenes-studio/new-behavior-alerts.png" alt-text="Screenshot of the New behavior options in 3D Scenes Studio, showing the Alerts options." lightbox="media/quickstart-3d-scenes-studio/new-behavior-alerts.png"::: @@ -241,7 +223,7 @@ Next, you'll create a *behavior* for the element. These behaviors allow you to c From the **Widget library**, select the **Gauge** widget and then **Add widget**. - 1. In the **New widget** options, add a **Label** of *Hydraulic Pressure*, a **Unit of measure** of *m/s*, and a single-property **Property expression** of *PrimaryTwin.HydraulicPressure*. + 1. In the **New widget** options, add a **Display name** of *Hydraulic Pressure*, a **Unit of measure** of *m/s*, and a single-property **Property expression** of *PrimaryTwin.HydraulicPressure*. Set three value ranges so that values *0-40* appear one color, *40-80* appear in a second color, and *80-Infinity* appear in a third color (remember that the min range value is inclusive, and the max value is exclusive). @@ -253,7 +235,7 @@ Next, you'll create a *behavior* for the element. These behaviors allow you to c 1. Select **Add widget**. From the **Widget library**, select the **Link** widget and then **Add widget**. - 1. In the **New widget** options, enter a **Label** of *Live arm camera*. For the **URL**, you can use the example URL *contoso.aws.armstreams.com/${$dtid}*. (There's no live camera hosted at the URL for this sample, but the link represents where the video feed might be hosted in a real scenario.) + 1. In the **New widget** options, enter a **Label** of *Live arm camera*. For the **URL**, you can use the example URL *http://contoso.aws.armstreams.com/${PrimaryTwin.$dtId}*. (There's no live camera hosted at the URL for this sample, but the link represents where the video feed might be hosted in a real scenario.) 1. Select **Create widget**. @@ -269,11 +251,11 @@ The *Packing Line Efficiency* behavior will now show up in the list of behaviors So far, you've been working with 3D Scenes Studio in **Build** mode. Now, switch the mode to **View**. -:::image type="content" source="media/quickstart-3d-scenes-studio/factory-scene-view-1.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio, highlighting the View mode button." lightbox="media/quickstart-3d-scenes-studio/factory-scene-view-1.png"::: +:::image type="content" source="media/quickstart-3d-scenes-studio/distribution-scene-view-1.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio, highlighting the View mode button." lightbox="media/quickstart-3d-scenes-studio/distribution-scene-view-1.png"::: From the list of **Elements**, select the **Arm1** element that you created. The visualization will zoom in to show the visual element and display the behaviors you set up for it. -:::image type="content" source="media/quickstart-3d-scenes-studio/factory-scene-view-element.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio, showing the viewer for the arm." lightbox="media/quickstart-3d-scenes-studio/factory-scene-view-element.png"::: +:::image type="content" source="media/quickstart-3d-scenes-studio/distribution-scene-view-element.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio, showing the viewer for the arm." lightbox="media/quickstart-3d-scenes-studio/distribution-scene-view-element.png"::: ## Apply behavior to additional elements @@ -285,12 +267,14 @@ Sometimes, an environment might contain multiple similar elements, which should 1. Select a **Primary twin** for the new element, then switch to the **Behaviors** tab. :::image type="content" source="media/quickstart-3d-scenes-studio/new-element-details-2.png" alt-text="Screenshot of the New element options for Arm2 in 3D Scenes Studio." lightbox="media/quickstart-3d-scenes-studio/new-element-details-2.png"::: -1. Select **Add behavior**. Choose the **Packing Line Efficiency** behavior that you created in this quickstart. Then, select **Create element** to finish creating the new arm element. +1. Select **Add behavior**. Choose the **Packing Line Efficiency** behavior that you created in this quickstart. :::image type="content" source="media/quickstart-3d-scenes-studio/new-element-behaviors.png" alt-text="Screenshot of the New element behavior options for Arm2 in 3D Scenes Studio." lightbox="media/quickstart-3d-scenes-studio/new-element-behaviors.png"::: +1. Select **Create element** to finish creating the new arm element. + Switch to the **View** tab to see the behavior working on the new arm element. All the information you selected when [creating the behavior](#create-a-behavior) is now available for both of the arm elements in the scene. -:::image type="content" source="media/quickstart-3d-scenes-studio/factory-scene-view-element-2.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio, showing the viewer for the second arm." lightbox="media/quickstart-3d-scenes-studio/factory-scene-view-element-2.png"::: +:::image type="content" source="media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png" alt-text="Screenshot of the factory scene in 3D Scenes Studio, showing the viewer for the second arm." lightbox="media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png"::: >[!TIP] >If you'd like, you can repeat the steps in this section to create elements for the remaining four arms, and apply the behavior to all of them to make the visualization complete. @@ -330,4 +314,4 @@ You may also want to delete the downloaded sample 3D file from your local machin Next, continue on to the Azure Digital Twins tutorials to build out your own Azure Digital Twins environment. > [!div class="nextstepaction"] -> [Code a client app](tutorial-code.md) +> [Code a client app](tutorial-code.md) \ No newline at end of file diff --git a/articles/digital-twins/troubleshoot-error-403.md b/articles/digital-twins/troubleshoot-error-403.md index 98b8caf26379b..499c04d207004 100644 --- a/articles/digital-twins/troubleshoot-error-403.md +++ b/articles/digital-twins/troubleshoot-error-403.md @@ -25,7 +25,7 @@ Most often, this error indicates that your Azure role-based access control (Azur ### Cause #2 -If you're using a client app to communicate with Azure Digital Twins that's authenticating with an [app registration](./how-to-create-app-registration-portal.md), this error may happen because your app registration doesn't have permissions set up for the Azure Digital Twins service. +If you're using a client app to communicate with Azure Digital Twins that's authenticating with an [app registration](./how-to-create-app-registration.md), this error may happen because your app registration doesn't have permissions set up for the Azure Digital Twins service. The app registration must have access permissions configured for the Azure Digital Twins APIs. Then, when your client app authenticates against the app registration, it will be granted the permissions that the app registration has configured. @@ -82,7 +82,7 @@ Next, select **API permissions** from the menu bar to verify that this app regis #### Fix issues -If any of this appears differently than described, follow the instructions on how to set up an app registration in [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration-portal.md). +If any of this appears differently than described, follow the instructions on how to set up an app registration in [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration.md). ## Next steps diff --git a/articles/dms/migration-using-azure-data-studio.md b/articles/dms/migration-using-azure-data-studio.md index 6a9ddc17f6fb0..10c7c8fa7de9e 100644 --- a/articles/dms/migration-using-azure-data-studio.md +++ b/articles/dms/migration-using-azure-data-studio.md @@ -126,7 +126,8 @@ When you migrate database(s) using the Azure SQL migration extension for Azure D - SSIS packages - Server roles - Server audit -- When migrating to SQL Server on Azure Virtual Machines, SQL Server 2014 and below as target versions are not supported currently. +- When migrating to SQL Server on Azure Virtual Machines, SQL Server 2008 and below as target versions are not supported currently. +- If you are using SQL Server 2012 or SQL Server 2014 you need to store your source database backup files on an Azure Storage Blob Container instead of using the network share option. Store the backup files as page blobs since block blobs are only supported in SQL 2016 and after. - Migrating to Azure SQL Database isn't supported. - Azure storage accounts secured by specific firewall rules or configured with a private endpoint are not supported for migrations. - You can't use an existing self-hosted integration runtime created from Azure Data Factory for database migrations with DMS. Initially, the self-hosted integration runtime should be created using the Azure SQL migration extension in Azure Data Studio and can be reused for further database migrations. diff --git a/articles/dns/TOC.yml b/articles/dns/TOC.yml index 194bfccc5cbdd..82183daa36c95 100644 --- a/articles/dns/TOC.yml +++ b/articles/dns/TOC.yml @@ -45,16 +45,16 @@ items: - name: Host your domain in Azure DNS href: dns-delegate-domain-azure-dns.md + - name: Create child DNS zones + href: tutorial-public-dns-zones-child.md - name: Create custom DNS records for a web app href: dns-web-sites-custom-domain.md - - name: Creating a new Child DNS zone - href: tutorial-public-dns-zones-child.md - - name: Alias records for Traffic Manager - href: tutorial-alias-tm.md - - name: Alias records for Public IP addresses - href: tutorial-alias-pip.md - - name: Alias records for zone records + - name: Create alias records for zone records href: tutorial-alias-rr.md + - name: Create alias records for public IP addresses + href: tutorial-alias-pip.md + - name: Create alias records for Traffic Manager + href: tutorial-alias-tm.md - name: Samples items: - name: Azure PowerShell @@ -189,6 +189,8 @@ items: - name: Azure PowerShell href: /powershell/module/az.dnsresolver + - name: Azure CLI + href: /cli/azure/dns-resolver - name: .NET href: /dotnet/api/azure.resourcemanager.dnsresolver - name: Java diff --git a/articles/dns/dns-delegate-domain-azure-dns.md b/articles/dns/dns-delegate-domain-azure-dns.md index a67ec78508aa0..0dcd3d79499e7 100644 --- a/articles/dns/dns-delegate-domain-azure-dns.md +++ b/articles/dns/dns-delegate-domain-azure-dns.md @@ -5,7 +5,7 @@ services: dns author: rohinkoul ms.service: dns ms.topic: tutorial -ms.date: 04/19/2021 +ms.date: 05/25/2022 ms.author: rohink #Customer intent: As an experienced network administrator, I want to configure Azure DNS, so I can host DNS zones. --- @@ -38,32 +38,32 @@ In this example, we'll reference the parent domain a `contoso.net`. 1. Go to the [Azure portal](https://portal.azure.com/) to create a DNS zone. Search for and select **DNS zones**. - ![DNS zone](./media/dns-delegate-domain-azure-dns/openzone650.png) +1. Select **+ Create**. -1. Select **Create DNS zone**. - -1. On the **Create DNS zone** page, enter the following values, and then select **Create**. For example, `contoso.net`. - - > [!NOTE] - > If the new zone that you are creating is a child zone (e.g. Parent zone = `contoso.net` Child zone = `child.contoso.net`), please refer to our [Creating a new Child DNS zone tutorial](./tutorial-public-dns-zones-child.md) +1. On the **Create DNS zone** page, enter the following values, and then select **Review + create**. | **Setting** | **Value** | **Details** | |--|--|--| - | **Resource group** | ContosoRG | Create a resource group. The resource group name must be unique within the subscription that you selected. The location of the resource group has no impact on the DNS zone. The DNS zone location is always "global," and isn't shown. | - | **Zone child** | leave unchecked | Since this zone is **not** a [child zone](./tutorial-public-dns-zones-child.md) you should leave this unchecked | - | **Name** | `contoso.net` | Field for your parent zone name | - | **Location** | East US | This field is based on the location selected as part of Resource group creation | + | **Resource group** | *ContosoRG* | Create a resource group. The resource group name must be unique within the subscription that you selected. The location of the resource group doesn't affect the DNS zone. The DNS zone location is always "global," and isn't shown. | + | **This zone is a child of an existing zone already hosted in Azure DNS** | leave unchecked | Leave this box unchecked since the DNS zone is **not** a [child zone](./tutorial-public-dns-zones-child.md). | + | **Name** | *contoso.net* | Enter your parent DNS zone name | + | **Resource group location** | *East US* | This field is based on the location selected as part of Resource group creation | +1. Select **Create**. + + + > [!NOTE] + > If the new zone that you are creating is a child zone (e.g. Parent zone = `contoso.net` Child zone = `child.contoso.net`), please refer to our [Creating a new Child DNS zone tutorial](./tutorial-public-dns-zones-child.md) ## Retrieve name servers Before you can delegate your DNS zone to Azure DNS, you need to know the name servers for your zone. Azure DNS gives name servers from a pool each time a zone is created. -1. With the DNS zone created, in the Azure portal **Favorites** pane, select **All resources**. On the **All resources** page, select your DNS zone. If the subscription you've selected already has several resources in it, you can enter your domain name in the **Filter by name** box to easily access the application gateway. +1. Select **Resource groups** in the left-hand menu, select the **ContosoRG** resource group, and then from the **Resources** list, select **contoso.net** DNS zone. -1. Retrieve the name servers from the DNS zone page. In this example, the zone `contoso.net` has been assigned name servers `ns1-01.azure-dns.com`, `ns2-01.azure-dns.net`, *`ns3-01.azure-dns.org`, and `ns4-01.azure-dns.info`: +1. Retrieve the name servers from the DNS zone page. In this example, the zone `contoso.net` has been assigned name servers `ns1-01.azure-dns.com`, `ns2-01.azure-dns.net`, `ns3-01.azure-dns.org`, and `ns4-01.azure-dns.info`: - ![List of name servers](./media/dns-delegate-domain-azure-dns/viewzonens500.png) + :::image type="content" source="./media/dns-delegate-domain-azure-dns/dns-name-servers.png" alt-text="Screenshot of D N S zone showing name servers" lightbox="./media/dns-delegate-domain-azure-dns/dns-name-servers.png"::: Azure DNS automatically creates authoritative NS records in your zone for the assigned name servers. @@ -110,9 +110,15 @@ You don't have to specify the Azure DNS name servers. If the delegation is set u ## Clean up resources -You can keep the **contosoRG** resource group if you intend to do the next tutorial. Otherwise, delete the **contosoRG** resource group to delete the resources created in this tutorial. +When no longer needed, you can delete all resources created in this tutorial by following these steps to delete the resource group **ContosoRG**: + +1. From the left-hand menu, select **Resource groups**. + +2. Select the **ContosoRG** resource group. + +3. Select **Delete resource group**. -Select the **contosoRG** resource group, and then select **Delete resource group**. +4. Enter **ContosoRG** and select **Delete**. ## Next steps diff --git a/articles/dns/dns-private-resolver-get-started-portal.md b/articles/dns/dns-private-resolver-get-started-portal.md index af1adaf27eea5..d6a36c07fe72e 100644 --- a/articles/dns/dns-private-resolver-get-started-portal.md +++ b/articles/dns/dns-private-resolver-get-started-portal.md @@ -4,7 +4,7 @@ description: In this quickstart, you create and test a private DNS resolver in A services: dns author: greg-lindsay ms.author: greglin -ms.date: 05/11/2022 +ms.date: 06/02/2022 ms.topic: quickstart ms.service: dns ms.custom: mode-ui @@ -117,12 +117,64 @@ Create a second virtual network to simulate an on-premises or other environment. ![second vnet create](./media/dns-resolver-getstarted-portal/vnet-create.png) +## Link your forwarding ruleset to the second virtual network + +To apply your forwarding ruleset to the second virtual network, you must create a virtual link. + +1. Search for **DNS forwarding rulesets** in the Azure services list and select your ruleset (ex: **myruleset**). +2. Select **Virtual Network Links**, select **Add**, choose **myvnet2** and use the default Link Name **myvnet2-link**. +3. Select **Add** and verify that the link was added successfully. You might need to refresh the page. + + ![Screenshot of ruleset virtual network links.](./media/dns-resolver-getstarted-portal/ruleset-links.png) + +## Configure a DNS forwarding ruleset + +Add or remove specific rules your DNS forwarding ruleset as desired, such as: +- A rule to resolve an Azure Private DNS zone linked to your virtual network: azure.contoso.com. +- A rule to resolve an on-premises zone: internal.contoso.com. +- A wildcard rule to forward unmatched DNS queries to a protective DNS service. + +### Delete a rule from the forwarding ruleset + +Individual rules can be deleted or disabled. In this example, a rule is deleted. + +1. Search for **Dns Forwarding Rulesets** in the Azure Services list and select it. +2. Select the ruleset you previously configured (ex: **myruleset**) and then select **Rules**. +3. Select the **contosocom** sample rule that you previously configured, select **Delete**, and then select **OK**. + +### Add rules to the forwarding ruleset + +Add three new conditional forwarding rules to the ruleset. + +1. On the **myruleset | Rules** page, click **Add**, and enter the following rule data: + - Rule Name: **AzurePrivate** + - Domain Name: **azure.contoso.com.** + - Rule State: **Enabled** +2. Under **Destination IP address** enter 10.0.0.4, and then click **Add**. +3. On the **myruleset | Rules** page, click **Add**, and enter the following rule data: + - Rule Name: **Internal** + - Domain Name: **internal.contoso.com.** + - Rule State: **Enabled** +4. Under **Destination IP address** enter 192.168.1.2, and then click **Add**. +5. On the **myruleset | Rules** page, click **Add**, and enter the following rule data: + - Rule Name: **Wildcard** + - Domain Name: **.** (enter only a dot) + - Rule State: **Enabled** +6. Under **Destination IP address** enter 10.5.5.5, and then click **Add**. + + ![Screenshot of a forwarding ruleset example.](./media/dns-resolver-getstarted-portal/ruleset.png) + +In this example: +- 10.0.0.4 is the resolver's inbound endpoint. +- 192.168.1.2 is an on-premises DNS server. +- 10.5.5.5 is a protective DNS service. + ## Test the private resolver You should now be able to send DNS traffic to your DNS resolver and resolve records based on your forwarding rulesets, including: - Azure DNS private zones linked to the virtual network where the resolver is deployed. -- DNS zones in the public internet DNS namespace. - Private DNS zones that are hosted on-premises. +- DNS zones in the public internet DNS namespace. ## Next steps diff --git a/articles/dns/dns-private-resolver-get-started-powershell.md b/articles/dns/dns-private-resolver-get-started-powershell.md index 853d684305de8..ed6e5b109db21 100644 --- a/articles/dns/dns-private-resolver-get-started-powershell.md +++ b/articles/dns/dns-private-resolver-get-started-powershell.md @@ -4,7 +4,7 @@ description: In this quickstart, you learn how to create and manage your first p services: dns author: greg-lindsay ms.author: greglin -ms.date: 05/10/2022 +ms.date: 06/02/2022 ms.topic: quickstart ms.service: dns ms.custom: devx-track-azurepowershell, mode-api @@ -17,7 +17,7 @@ This article walks you through the steps to create your first private DNS zone a [!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] -Azure DNS Private Resolver is a new service currently in public preview. Azure DNS Private Resolver enables you to query Azure DNS private zones from an on-prem environment and vice versa without deploying VM based DNS servers. For more information, including benefits, capabilities, and regional availability, see [What is Azure DNS Private Resolver](dns-private-resolver-overview.md). +Azure DNS Private Resolver is a new service currently in public preview. Azure DNS Private Resolver enables you to query Azure DNS private zones from an on-premises environment and vice versa without deploying VM based DNS servers. For more information, including benefits, capabilities, and regional availability, see [What is Azure DNS Private Resolver](dns-private-resolver-overview.md). ## Prerequisites @@ -107,7 +107,7 @@ $virtualNetwork | Set-AzVirtualNetwork ### Create the inbound endpoint -Create an inbound endpoint to enable name resolution from on-prem or another private location using an IP address that is part of your private virtual network address space. +Create an inbound endpoint to enable name resolution from on-premises or another private location using an IP address that is part of your private virtual network address space. ```Azure PowerShell $ipconfig = New-AzDnsResolverIPConfigurationObject -PrivateIPAllocationMethod Dynamic -SubnetId /subscriptions//resourceGroups/myresourcegroup/providers/Microsoft.Network/virtualNetworks/myvnet/subnets/snet-inbound @@ -189,7 +189,7 @@ $virtualNetworkLink.ToJsonString() ## Create a second virtual network and link it to your DNS forwarding ruleset -Create a second virtual network to simulate an on-prem or other environment. +Create a second virtual network to simulate an on-premises or other environment. ```Azure PowerShell $vnet2 = New-AzVirtualNetwork -Name myvnet2 -ResourceGroupName myresourcegroup -Location westcentralus -AddressPrefix "12.0.0.0/8" @@ -205,22 +205,31 @@ $virtualNetworkLink2 = Get-AzDnsForwardingRulesetVirtualNetworkLink -DnsForwardi $virtualNetworkLink2.ToJsonString() ``` -## Create a forwarding rule +## Create forwarding rules Create a forwarding rule for a ruleset to one or more target DNS servers. You must specify the fully qualified domain name (FQDN) with a trailing dot. The **New-AzDnsResolverTargetDnsServerObject** cmdlet sets the default port as 53, but you can also specify a unique port. ```Azure PowerShell -$targetDNS1 = New-AzDnsResolverTargetDnsServerObject -IPAddress 11.0.1.4 -Port 53 -$targetDNS2 = New-AzDnsResolverTargetDnsServerObject -IPAddress 11.0.1.5 -Port 53 -$forwardingrule = New-AzDnsForwardingRulesetForwardingRule -ResourceGroupName myresourcegroup -DnsForwardingRulesetName myruleset -Name "contosocom" -DomainName "contoso.com." -ForwardingRuleState "Enabled" -TargetDnsServer @($targetDNS1,$targetDNS2) +$targetDNS1 = New-AzDnsResolverTargetDnsServerObject -IPAddress 192.168.1.2 -Port 53 +$targetDNS2 = New-AzDnsResolverTargetDnsServerObject -IPAddress 192.168.1.3 -Port 53 +$targetDNS3 = New-AzDnsResolverTargetDnsServerObject -IPAddress 10.0.0.4 -Port 53 +$targetDNS4 = New-AzDnsResolverTargetDnsServerObject -IPAddress 10.5.5.5 -Port 53 +$forwardingrule = New-AzDnsForwardingRulesetForwardingRule -ResourceGroupName myresourcegroup -DnsForwardingRulesetName myruleset -Name "Internal" -DomainName "internal.contoso.com." -ForwardingRuleState "Enabled" -TargetDnsServer @($targetDNS1,$targetDNS2) +$forwardingrule = New-AzDnsForwardingRulesetForwardingRule -ResourceGroupName myresourcegroup -DnsForwardingRulesetName myruleset -Name "AzurePrivate" -DomainName "." -ForwardingRuleState "Enabled" -TargetDnsServer $targetDNS3 +$forwardingrule = New-AzDnsForwardingRulesetForwardingRule -ResourceGroupName myresourcegroup -DnsForwardingRulesetName myruleset -Name "Wildcard" -DomainName "." -ForwardingRuleState "Enabled" -TargetDnsServer $targetDNS4 ``` +In this example: +- 10.0.0.4 is the resolver's inbound endpoint. +- 192.168.1.2 and 192.168.1.3 are on-premises DNS servers. +- 10.5.5.5 is a protective DNS service. + ## Test the private resolver You should now be able to send DNS traffic to your DNS resolver and resolve records based on your forwarding rulesets, including: - Azure DNS private zones linked to the virtual network where the resolver is deployed. - DNS zones in the public internet DNS namespace. -- Private DNS zones that are hosted on-prem. +- Private DNS zones that are hosted on-premises. ## Delete a DNS resolver diff --git a/articles/dns/dns-private-resolver-overview.md b/articles/dns/dns-private-resolver-overview.md index 9faafdf4d74c3..40512c82b65ef 100644 --- a/articles/dns/dns-private-resolver-overview.md +++ b/articles/dns/dns-private-resolver-overview.md @@ -6,7 +6,7 @@ ms.custom: references_regions author: greg-lindsay ms.service: dns ms.topic: overview -ms.date: 05/10/2022 +ms.date: 06/02/2022 ms.author: greglin #Customer intent: As an administrator, I want to evaluate Azure DNS Private Resolver so I can determine if I want to use it instead of my current DNS resolver service. --- @@ -20,12 +20,12 @@ Azure DNS Private Resolver is a new service that enables you to query Azure DNS ## How does it work? -Azure DNS Private Resolver requires an [Azure Virtual Network](/azure/virtual-network/virtual-networks-overview). When you create an Azure DNS Private Resolver inside a virtual network, one or more [inbound endpoints](#inbound-endpoints) are established that can be used as the destination for DNS queries. The resolver's [outbound endpoint](#outbound-endpoints) processes DNS queries based on a [DNS forwarding ruleset](#dns-forwarding-rulesets) that you configure. DNS queries that are initiated in networks linked to a ruleset can be sent to other DNS servers. +Azure DNS Private Resolver requires an [Azure Virtual Network](../virtual-network/virtual-networks-overview.md). When you create an Azure DNS Private Resolver inside a virtual network, one or more [inbound endpoints](#inbound-endpoints) are established that can be used as the destination for DNS queries. The resolver's [outbound endpoint](#outbound-endpoints) processes DNS queries based on a [DNS forwarding ruleset](#dns-forwarding-rulesets) that you configure. DNS queries that are initiated in networks linked to a ruleset can be sent to other DNS servers. The DNS query process when using an Azure DNS Private Resolver is summarized below: 1. A client in a virtual network issues a DNS query. -2. If the DNS servers for this virtual network are [specified as custom](/azure/virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances#specify-dns-servers), then the query is forwarded to the specified IP addresses. +2. If the DNS servers for this virtual network are [specified as custom](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#specify-dns-servers), then the query is forwarded to the specified IP addresses. 3. If Default (Azure-provided) DNS servers are configured in the virtual network, and there are Private DNS zones [linked to the same virtual network](private-dns-virtual-network-links.md), these zones are consulted. 4. If the query doesn't match a Private DNS zone linked to the virtual network, then [Virtual network links](#virtual-network-links) for [DNS forwarding rulesets](#dns-forwarding-rulesets) are consulted. 5. If no ruleset links are present, then Azure DNS is used to resolve the query. @@ -34,7 +34,7 @@ The DNS query process when using an Azure DNS Private Resolver is summarized bel 8. If multiple matches are present, the longest suffix is used. 9. If no match is found, no DNS forwarding occurs and Azure DNS is used to resolve the query. -The architecture for Azure DNS Private Resolver is summarized in the following figure. DNS resolution between Azure virtual networks and on-premises networks requires [Azure ExpressRoute](/azure/expressroute/expressroute-introduction) or a [VPN](/azure/vpn-gateway/vpn-gateway-about-vpngateways). +The architecture for Azure DNS Private Resolver is summarized in the following figure. DNS resolution between Azure virtual networks and on-premises networks requires [Azure ExpressRoute](../expressroute/expressroute-introduction.md) or a [VPN](../vpn-gateway/vpn-gateway-about-vpngateways.md). [ ![Azure DNS Private Resolver architecture](./media/dns-resolver-overview/resolver-architecture.png) ](./media/dns-resolver-overview/resolver-architecture.png#lightbox) @@ -114,21 +114,20 @@ Subnets used for DNS resolver have the following limitations: ### Outbound endpoint restrictions Outbound endpoints have the following limitations: -- An outbound endpoint can't be deleted unless the DNS forwarding ruleset and the virtual network links under it are deleted +- An outbound endpoint can't be deleted unless the DNS forwarding ruleset and the virtual network links under it are deleted. -### DNS forwarding ruleset restrictions +### Ruleset restrictions -DNS forwarding rulesets have the following limitations: -- A DNS forwarding ruleset can't be deleted unless the virtual network links under it are deleted +- Rulesets can have no more than 25 rules in Public Preview. +- Rulesets can't be linked across different subscriptions in Public Preview. ### Other restrictions -- DNS resolver endpoints can't be updated to include IP configurations from a different subnet -- IPv6 enabled subnets aren't supported in Public Preview +- IPv6 enabled subnets aren't supported in Public Preview. ## Next steps * Learn how to create an Azure DNS Private Resolver by using [Azure PowerShell](./dns-private-resolver-get-started-powershell.md) or [Azure portal](./dns-private-resolver-get-started-portal.md). * Learn about some of the other key [networking capabilities](../networking/fundamentals/networking-overview.md) of Azure. -* [Learn module: Introduction to Azure DNS](/learn/modules/intro-to-azure-dns). +* [Learn module: Introduction to Azure DNS](/learn/modules/intro-to-azure-dns). \ No newline at end of file diff --git a/articles/dns/dns-web-sites-custom-domain.md b/articles/dns/dns-web-sites-custom-domain.md index 24d86cc9b3239..c5c32698a2285 100644 --- a/articles/dns/dns-web-sites-custom-domain.md +++ b/articles/dns/dns-web-sites-custom-domain.md @@ -1,11 +1,11 @@ --- -title: Tutorial - Create custom Azure DNS records for a web app -description: In this tutorial you create custom domain DNS records for web app using Azure DNS. +title: 'Tutorial: Create custom Azure DNS records for a web app' +description: In this tutorial, you create custom domain DNS records for web app using Azure DNS. services: dns author: rohinkoul ms.service: dns ms.topic: tutorial -ms.date: 10/20/2020 +ms.date: 06/06/2022 ms.author: rohink ms.custom: devx-track-azurepowershell #Customer intent: As an experienced network administrator, I want to create DNS records in Azure DNS, so I can host a web app in a custom domain. @@ -15,16 +15,14 @@ ms.custom: devx-track-azurepowershell You can configure Azure DNS to host a custom domain for your web apps. For example, you can create an Azure web app and have your users access it using either www\.contoso.com or contoso.com as a fully qualified domain name (FQDN). -> [!NOTE] -> Contoso.com is used as an example throughout this tutorial. Substitute your own domain name for contoso.com. - To do this, you have to create three records: * A root "A" record pointing to contoso.com * A root "TXT" record for verification * A "CNAME" record for the www name that points to the A record -Keep in mind that if you create an A record for a web app in Azure, the A record must be manually updated if the underlying IP address for the web app changes. +> [!NOTE] +> Contoso.com is used as an example throughout this tutorial. Substitute your own domain name for contoso.com. In this tutorial, you learn how to: @@ -37,37 +35,32 @@ In this tutorial, you learn how to: ## Prerequisites -If you don’t have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. +* An Azure account with an active subscription. If you don’t have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -[!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] +* A domain name that you can host in Azure DNS. You must have full control of this domain. Full control includes the ability to set the name server (NS) records for the domain. -* You must have a domain name available to test with that you can host in Azure DNS . You must have full control of this domain. Full control includes the ability to set the name server (NS) records for the domain. -* [Create an App Service app](../app-service/quickstart-html.md), or use an app that you created for another tutorial. +* A web app. If you don't have one, you can [create a static HTML web app](../app-service/quickstart-html.md) for this tutorial. -* Create a DNS zone in Azure DNS, and delegate the zone in your registrar to Azure DNS. +* An Azure DNS zone with delegation in your registrar to Azure DNS. If you don't have one, you can [create a DNS zone](./dns-getstarted-powershell.md), then [delegate your domain](dns-delegate-domain-azure-dns.md#delegate-the-domain) to Azure DNS. - 1. To create a DNS zone, follow the steps in [Create a DNS zone](./dns-getstarted-powershell.md). - 2. To delegate your zone to Azure DNS, follow the steps in [DNS domain delegation](dns-delegate-domain-azure-dns.md). -After creating a zone and delegating it to Azure DNS, you can then create records for your custom domain. +[!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] [!INCLUDE [cloud-shell-try-it.md](../../includes/cloud-shell-try-it.md)] -## Create an A record and TXT record +## Create the A record An A record is used to map a name to its IP address. In the following example, assign "\@" as an A record using your web app IPv4 address. \@ typically represents the root domain. ### Get the IPv4 address -In the left navigation of the App Services page in the Azure portal, select **Custom domains**. - -![Custom domain menu](../app-service/./media/app-service-web-tutorial-custom-domain/custom-domain-menu.png) +In the left navigation of the App Services page in the Azure portal, select **Custom domains**, then copy the IP address of your web app: -In the **Custom domains** page, copy the app's IPv4 address: +:::image type="content" source="./media/dns-web-sites-custom-domain/app-service-custom-domains.png" alt-text="Screenshot of Azure App Service Custom domains page showing the web app I P address."::: -![Portal navigation to Azure app](../app-service/./media/app-service-web-tutorial-custom-domain/mapping-information.png) +### Create the record -### Create the A record +To create the A record, use: ```azurepowershell New-AzDnsRecordSet -Name "@" -RecordType "A" -ZoneName "contoso.com" ` @@ -75,13 +68,18 @@ New-AzDnsRecordSet -Name "@" -RecordType "A" -ZoneName "contoso.com" ` -DnsRecords (New-AzDnsRecordConfig -IPv4Address "") ``` -### Create the TXT record +> [!IMPORTANT] +> The A record must be manually updated if the underlying IP address for the web app changes. + +## Create the TXT record App Services uses this record only at configuration time to verify that you own the custom domain. You can delete this TXT record after your custom domain is validated and configured in App Service. > [!NOTE] > If you want to verify the domain name, but not route production traffic to the web app, you only need to specify the TXT record for the verification step. Verification does not require an A or CNAME record in addition to the TXT record. +To create the TXT record, use: + ```azurepowershell New-AzDnsRecordSet -ZoneName contoso.com -ResourceGroupName MyAzureResourceGroup ` -Name "@" -RecordType "txt" -Ttl 600 ` @@ -90,11 +88,7 @@ New-AzDnsRecordSet -ZoneName contoso.com -ResourceGroupName MyAzureResourceGroup ## Create the CNAME record -If your domain is already managed by Azure DNS (see [DNS domain delegation](dns-domain-delegation.md), you can use the following example to create a CNAME record for contoso.azurewebsites.net. - -Open Azure PowerShell and create a new CNAME record. This example creates a record set type CNAME with a "time to live" of 600 seconds in DNS zone named "contoso.com" with the alias for the web app contoso.azurewebsites.net. - -### Create the record +If your domain is already managed by Azure DNS (see [DNS domain delegation](dns-domain-delegation.md)), you can use the following example to create a CNAME record for contoso.azurewebsites.net. The CNAME created in this example has a "time to live" of 600 seconds in DNS zone named "contoso.com" with the alias for the web app contoso.azurewebsites.net. ```azurepowershell New-AzDnsRecordSet -ZoneName contoso.com -ResourceGroupName "MyAzureResourceGroup" ` @@ -107,7 +101,7 @@ The following example is the response: ``` Name : www ZoneName : contoso.com - ResourceGroupName : myresourcegroup + ResourceGroupName : myazureresourcegroup Ttl : 600 Etag : 8baceeb9-4c2c-4608-a22c-229923ee185 RecordType : CNAME @@ -156,29 +150,36 @@ contoso.com text = ``` ## Add custom host names -Now you can add the custom host names to your web app: +Now, you can add the custom host names to your web app: ```azurepowershell set-AzWebApp ` -Name contoso ` - -ResourceGroupName MyAzureResourceGroup ` + -ResourceGroupName ` -HostNames @("contoso.com","www.contoso.com","contoso.azurewebsites.net") ``` ## Test the custom host names -Open a browser and browse to `http://www.` and `http://`. +Open a browser and browse to `http://www.` and `http://`. > [!NOTE] > Make sure you include the `http://` prefix, otherwise your browser may attempt to predict a URL for you! You should see the same page for both URLs. For example: -![Contoso app service](media/dns-web-sites-custom-domain/contoso-app-svc.png) - +:::image type="content" source="./media/dns-web-sites-custom-domain/contoso-web-app.png" alt-text="Screenshot of the contoso Azure App Service Web App accessed via web browser."::: ## Clean up resources -When you no longer need the resources created in this tutorial, you can delete the **myresourcegroup** resource group. +When no longer needed, you can delete all resources created in this tutorial by deleting the resource group **MyAzureResourceGroup**: + +1. From the left-hand menu, select **Resource groups**. + +2. Select the **MyAzureResourceGroup** resource group. + +3. Select **Delete resource group**. + +4. Enter *MyAzureResourceGroup* and select **Delete**. ## Next steps diff --git a/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png b/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png index ea3bb604bada1..95c1401e758ff 100644 Binary files a/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png and b/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png differ diff --git a/articles/dns/media/dns-delegate-domain-azure-dns/dns-name-servers.png b/articles/dns/media/dns-delegate-domain-azure-dns/dns-name-servers.png new file mode 100644 index 0000000000000..d6216e9cb1206 Binary files /dev/null and b/articles/dns/media/dns-delegate-domain-azure-dns/dns-name-servers.png differ diff --git a/articles/dns/media/dns-resolver-getstarted-portal/ruleset-links.png b/articles/dns/media/dns-resolver-getstarted-portal/ruleset-links.png new file mode 100644 index 0000000000000..8a871a2c04c27 Binary files /dev/null and b/articles/dns/media/dns-resolver-getstarted-portal/ruleset-links.png differ diff --git a/articles/dns/media/dns-resolver-getstarted-portal/ruleset.png b/articles/dns/media/dns-resolver-getstarted-portal/ruleset.png new file mode 100644 index 0000000000000..135af123c9909 Binary files /dev/null and b/articles/dns/media/dns-resolver-getstarted-portal/ruleset.png differ diff --git a/articles/dns/media/dns-resolver-overview/resolver-architecture.png b/articles/dns/media/dns-resolver-overview/resolver-architecture.png index 167fcb9e3d6a5..30519825435e9 100644 Binary files a/articles/dns/media/dns-resolver-overview/resolver-architecture.png and b/articles/dns/media/dns-resolver-overview/resolver-architecture.png differ diff --git a/articles/dns/media/dns-web-sites-custom-domain/app-service-custom-domains.png b/articles/dns/media/dns-web-sites-custom-domain/app-service-custom-domains.png new file mode 100644 index 0000000000000..aa710de208dfc Binary files /dev/null and b/articles/dns/media/dns-web-sites-custom-domain/app-service-custom-domains.png differ diff --git a/articles/dns/media/dns-web-sites-custom-domain/contoso-app-svc.png b/articles/dns/media/dns-web-sites-custom-domain/contoso-app-svc.png deleted file mode 100644 index 2b616bfb1a322..0000000000000 Binary files a/articles/dns/media/dns-web-sites-custom-domain/contoso-app-svc.png and /dev/null differ diff --git a/articles/dns/media/dns-web-sites-custom-domain/contoso-web-app.png b/articles/dns/media/dns-web-sites-custom-domain/contoso-web-app.png new file mode 100644 index 0000000000000..c9a1853651a4c Binary files /dev/null and b/articles/dns/media/dns-web-sites-custom-domain/contoso-web-app.png differ diff --git a/articles/dns/media/tutorial-alias-pip/add-public-ip-alias-expanded.png b/articles/dns/media/tutorial-alias-pip/add-public-ip-alias-expanded.png new file mode 100644 index 0000000000000..0e442d4e80096 Binary files /dev/null and b/articles/dns/media/tutorial-alias-pip/add-public-ip-alias-expanded.png differ diff --git a/articles/dns/media/tutorial-alias-pip/add-public-ip-alias-inline.png b/articles/dns/media/tutorial-alias-pip/add-public-ip-alias-inline.png new file mode 100644 index 0000000000000..0335e437d4b85 Binary files /dev/null and b/articles/dns/media/tutorial-alias-pip/add-public-ip-alias-inline.png differ diff --git a/articles/dns/media/tutorial-alias-pip/iis-web-server.png b/articles/dns/media/tutorial-alias-pip/iis-web-server.png new file mode 100644 index 0000000000000..9ca6280abe112 Binary files /dev/null and b/articles/dns/media/tutorial-alias-pip/iis-web-server.png differ diff --git a/articles/dns/media/tutorial-alias-rr/add-alias-record-set-expanded.png b/articles/dns/media/tutorial-alias-rr/add-alias-record-set-expanded.png new file mode 100644 index 0000000000000..5c758ef8649fd Binary files /dev/null and b/articles/dns/media/tutorial-alias-rr/add-alias-record-set-expanded.png differ diff --git a/articles/dns/media/tutorial-alias-rr/add-alias-record-set-inline.png b/articles/dns/media/tutorial-alias-rr/add-alias-record-set-inline.png new file mode 100644 index 0000000000000..7f4572e2c9db4 Binary files /dev/null and b/articles/dns/media/tutorial-alias-rr/add-alias-record-set-inline.png differ diff --git a/articles/dns/media/tutorial-alias-rr/add-record-set-expanded.png b/articles/dns/media/tutorial-alias-rr/add-record-set-expanded.png new file mode 100644 index 0000000000000..b52e4b692bd89 Binary files /dev/null and b/articles/dns/media/tutorial-alias-rr/add-record-set-expanded.png differ diff --git a/articles/dns/media/tutorial-alias-rr/add-record-set-inline.png b/articles/dns/media/tutorial-alias-rr/add-record-set-inline.png new file mode 100644 index 0000000000000..89922fcabb8f6 Binary files /dev/null and b/articles/dns/media/tutorial-alias-rr/add-record-set-inline.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/child-zone-button.png b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-button.png new file mode 100644 index 0000000000000..40d7f1a78f120 Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-button.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/child-zone-name-servers-expanded.png b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-name-servers-expanded.png new file mode 100644 index 0000000000000..2d1d7b5cdd3a5 Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-name-servers-expanded.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/child-zone-name-servers-inline.png b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-name-servers-inline.png new file mode 100644 index 0000000000000..3d5544eb4ee2b Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-name-servers-inline.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/child-zone-via-create-dns-zone-page.png b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-via-create-dns-zone-page.png new file mode 100644 index 0000000000000..11b6e6604fd76 Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-via-create-dns-zone-page.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/child-zone-via-overview-page.png b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-via-overview-page.png new file mode 100644 index 0000000000000..42863109e7c99 Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/child-zone-via-overview-page.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/parent-zone-name-servers-expanded.png b/articles/dns/media/tutorial-public-dns-zones-child/parent-zone-name-servers-expanded.png new file mode 100644 index 0000000000000..c2d689b51b860 Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/parent-zone-name-servers-expanded.png differ diff --git a/articles/dns/media/tutorial-public-dns-zones-child/parent-zone-name-servers-inline.png b/articles/dns/media/tutorial-public-dns-zones-child/parent-zone-name-servers-inline.png new file mode 100644 index 0000000000000..cdac0033aab10 Binary files /dev/null and b/articles/dns/media/tutorial-public-dns-zones-child/parent-zone-name-servers-inline.png differ diff --git a/articles/dns/private-dns-getstarted-cli.md b/articles/dns/private-dns-getstarted-cli.md index 06dcfbb95c85f..75e025590f14a 100644 --- a/articles/dns/private-dns-getstarted-cli.md +++ b/articles/dns/private-dns-getstarted-cli.md @@ -5,7 +5,7 @@ services: dns author: rohinkoul ms.service: dns ms.topic: quickstart -ms.date: 10/20/2020 +ms.date: 05/23/2022 ms.author: rohink ms.custom: devx-track-azurecli, mode-api #Customer intent: As an experienced network administrator, I want to create an Azure private DNS zone, so I can resolve host names on my private virtual networks. @@ -87,7 +87,9 @@ az vm create \ --nsg NSG01 \ --nsg-rule RDP \ --image win2016datacenter +``` +```azurecli az vm create \ -n myVM02 \ --admin-username AzureAdmin \ @@ -100,7 +102,7 @@ az vm create \ --image win2016datacenter ``` -This will take a few minutes to complete. +Creating a virtual machine will take a few minutes to complete. ## Create an additional DNS record @@ -151,7 +153,7 @@ Repeat for myVM02. ping myVM01.private.contoso.com ``` - You should see output that looks similar to this: + You should see an output that looks similar to what is shown below: ```output PS C:\> ping myvm01.private.contoso.com @@ -175,7 +177,7 @@ Repeat for myVM02. ping db.private.contoso.com ``` - You should see output that looks similar to this: + You should see an output that looks similar to what is shown below: ```output PS C:\> ping db.private.contoso.com diff --git a/articles/dns/private-dns-getstarted-portal.md b/articles/dns/private-dns-getstarted-portal.md index d4b2d32492923..1c8863d513c3b 100644 --- a/articles/dns/private-dns-getstarted-portal.md +++ b/articles/dns/private-dns-getstarted-portal.md @@ -2,8 +2,8 @@ title: Quickstart - Create an Azure private DNS zone using the Azure portal description: In this quickstart, you create and test a private DNS zone and record in Azure DNS. This is a step-by-step guide to create and manage your first private DNS zone and record using the Azure portal. services: dns -author: rohinkoul -ms.author: rohink +author: greg-lindsay +ms.author: greglin ms.date: 05/18/2022 ms.topic: quickstart ms.service: dns diff --git a/articles/dns/tutorial-alias-pip.md b/articles/dns/tutorial-alias-pip.md index 50bd2c58613ac..f66e17f63c88e 100644 --- a/articles/dns/tutorial-alias-pip.md +++ b/articles/dns/tutorial-alias-pip.md @@ -1,21 +1,24 @@ --- title: 'Tutorial: Create an Azure DNS alias record to refer to an Azure public IP address' -description: This tutorial shows you how to configure an Azure DNS alias record to reference an Azure public IP address. +description: In this tutorial, you learn how to configure an Azure DNS alias record to reference an Azure public IP address. services: dns author: rohinkoul ms.service: dns ms.topic: tutorial -ms.date: 04/19/2021 +ms.date: 06/09/2022 ms.author: rohink +ms.custom: template-tutorial #Customer intent: As an experienced network administrator, I want to configure Azure an DNS alias record to refer to an Azure public IP address. --- -# Tutorial: Configure an alias record to refer to an Azure public IP address +# Tutorial: Create an alias record to refer to an Azure public IP address + +You can create an alias record to reference an Azure resource. An example is an alias record that references an Azure public IP resource. In this tutorial, you learn how to: > [!div class="checklist"] -> * Create a network infrastructure. +> * Create a virtual network and a subnet. > * Create a web server virtual machine with a public IP. > * Create an alias record that points to the public IP. > * Test the alias record. @@ -24,70 +27,156 @@ In this tutorial, you learn how to: If you don’t have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. ## Prerequisites -You must have a domain name available that you can host in Azure DNS to test with. You must have full control of this domain. Full control includes the ability to set the name server (NS) records for the domain. -For instructions to host your domain in Azure DNS, see [Tutorial: Host your domain in Azure DNS](dns-delegate-domain-azure-dns.md). +* An Azure account with an active subscription. +* A domain name hosted in Azure DNS. If you don't have an Azure DNS zone, you can [create a DNS zone](./dns-delegate-domain-azure-dns.md#create-a-dns-zone), then [delegate your domain](dns-delegate-domain-azure-dns.md#delegate-the-domain) to Azure DNS. + +> [!NOTE] +> In this tutorial, `contoso.com` is used as an example. Replace `contoso.com` with your own domain name. -The example domain used for this tutorial is contoso.com, but use your own domain name. +## Sign in to Azure + +Sign in to the Azure portal at https://portal.azure.com. ## Create the network infrastructure -First, create a virtual network and a subnet to place your web servers in. -1. Sign in to the [Azure portal](https://portal.azure.com). -2. Select **Create a resource** from the left panel of the Azure portal. Enter *resource group* in the search box, and create a resource group named **RG-DNS-Alias-pip**. -3. Select **Create a resource** > **Networking** > **Virtual network**. -4. Create a virtual network named **VNet-Server**. Place it in the **RG-DNS-Alias-pip** resource group, and name the subnet **SN-Web**. + +Create a virtual network and a subnet to place your web server in. + +1. In the Azure portal, enter *virtual network* in the search box at the top of the portal, and then select **Virtual networks** from the search results. +1. In **Virtual networks**, select **+ Create**. +1. In **Create virtual network**, enter or select the following information in the **Basics** tab: + + | **Setting** | **Value** | + |----------------------|------------| + | **Project Details** | | + | Subscription | Select your Azure subscription | + | Resource Group | Select **Create new**
                  In **Name**, enter **PIPResourceGroup**
                  Select **OK** | + | **Instance details** | | + | Name | Enter **myPIPVNet** | + | Region | Select your region | + +1. Select the **IP Addresses** tab or select the **Next: IP Addresses** button at the bottom of the page. +1. In the **IP Addresses** tab, enter the following information: + + | Setting | Value | + |--------------------|----------------------------| + | IPv4 address space | Enter **10.10.0.0/16** | + +1. Select **+ Add subnet**, and enter this information in the **Add subnet**: + + | Setting | Value | + |----------------------|----------------------------| + | Subnet name | Enter **WebSubnet** | + | Subnet address range | Enter **10.10.0.0/24** | + +1. Select **Add**. +1. Select the **Review + create** tab or select the **Review + create** button. +1. Select **Create**. ## Create a web server virtual machine -1. Select **Create a resource** > **Windows Server 2016 VM**. -2. Enter **Web-01** for the name, and place the VM in the **RG-DNS-Alias-TM** resource group. Enter a username and password, and select **OK**. -3. For **Size**, select an SKU with 8-GB RAM. -4. For **Settings**, select the **VNet-Servers** virtual network and the **SN-Web** subnet. For public inbound ports, select **HTTP (80)** > **HTTPS (443)** > **RDP (3389)**, and then select **OK**. -5. On the **Summary** page, select **Create**. -This deployment takes a few minutes to complete. The virtual machine will have an attached NIC with a basic dynamic public IP called Web-01-ip. The public IP will change every time the virtual machine is restarted. +Create a Windows Server virtual machine and then install IIS web server on it. + +### Create the virtual machine + +Create a Windows Server 2019 virtual machine. -### Install IIS +1. In the Azure portal, enter *virtual machine* in the search box at the top of the portal, and then select **Virtual machines** from the search results. +1. In **Virtual machines**, select **+ Create** and then select **Azure virtual machine**. +1. In **Create a virtual machine**, enter or select the following information in the **Basics** tab: -Install IIS on **Web-01**. + | **Setting** | **Value** | + |---------------------------|------------| + | **Project Details** | | + | Subscription | Select your Azure subscription | + | Resource Group | Select **RG-DNS-Alias-pip** | + | **Instance details** | | + | Virtual machine name | Enter **Web-01** | + | Region | Select **(US) East US** | + | Availability options | Select **No infrastructure redundancy required** | + | Security type | Select **Standard**. | + | Image | Select **Windows Server 2019 Datacenter - Gen2** | + | Size | Choose VM size or take default setting | + | **Administrator account** | | + | Username | Enter a username | + | Password | Enter a password | + | Confirm password | Reenter password | + | **Inbound port rules** | | + | Public inbound ports | Select **None** | -1. Connect to **Web-01**, and sign in. -2. On the **Server Manager** dashboard, select **Add roles and features**. -3. Select **Next** three times. On the **Server Roles** page, select **Web Server (IIS)**. -4. Select **Add Features**, and then select **Next**. -5. Select **Next** four times, and then select **Install**. This procedure takes a few minutes to finish. -6. After the installation finishes, select **Close**. -7. Open a web browser. Browse to **localhost** to verify that the default IIS web page appears. + +1. Select the **Networking** tab, or select **Next: Disks**, then **Next: Networking**. + +1. In the **Networking** tab, enter or select the following information: + + | Setting | Value | + |---------|-------| + | **Network interface** | | + | Virtual network | **myPIPVNet** | + | Subnet | **WebSubnet** | + | Public IP | Take the default public IP | + | NIC network security group | Select **Basic**| + | Public inbound ports | Select **Allow selected ports** | + | Select inbound ports | Select **HTTP (80)**, **HTTPS (443)** and **RDP (3389)** | + +1. Select **Review + create**. +1. Review the settings, and then select **Create**. + +This deployment may take a few minutes to complete. + +> [!NOTE] +> **Web-01** virtual machine has an attached NIC with a basic dynamic public IP that changes every time the virtual machine is restarted. + +### Install IIS web server + +Install IIS web server on **Web-01**. + +1. In the **Overview** page of **Web-01**, select **Connect** and then **RDP**. +1. In the **RDP** page, select **Download RDP File**. +1. Open *Web-01.rdp*, and select **Connect**. +1. Enter the username and password entered during virtual machine creation. +1. On the **Server Manager** dashboard, select **Manage** then **Add Roles and Features**. +1. Select **Server Roles** or select **Next** three times. On the **Server Roles** page, select **Web Server (IIS)**. +1. Select **Add Features**, and then select **Next**. +1. Select **Confirmation** or select **Next** three times, and then select **Install**. The installation process takes a few minutes to finish. +1. After the installation finishes, select **Close**. +1. Open a web browser. Browse to **localhost** to verify that the default IIS web page appears. + + :::image type="content" source="./media/tutorial-alias-pip/iis-web-server.png" alt-text="Screenshot of Internet Explorer showing the I I S Web Server Welcome page."::: ## Create an alias record Create an alias record that points to the public IP address. -1. Select your Azure DNS zone to open the zone. -2. Select **Record set**. -3. In the **Name** text box, select **web01**. -4. Leave the **Type** as an **A** record. -5. Select the **Alias Record Set** check box. -6. Select **Choose Azure service**, and then select the **Web-01-ip** public IP address. +1. In the Azure portal, enter *contoso.com* in the search box at the top of the portal, and then select **contoso.com** DNS zone from the search results. +1. In the **Overview** page, select the **+ Record set** button. +1. In the **Add record set**, enter *web01* in the **Name**. +1. Select **A** for the **Type**. +1. Select **Yes** for the **Alias record set**, and then select the **Azure Resource** for the **Alias type**. +1. Select the **Web-01-ip** public IP address for the **Azure resource**. +1. Select **OK**. + + :::image type="content" source="./media/tutorial-alias-pip/add-public-ip-alias-inline.png" alt-text="Screenshot of adding an alias record to refer to the Azure public IP of the I I S web server using the Add record set page." lightbox="./media/tutorial-alias-pip/add-public-ip-alias-expanded.png"::: ## Test the alias record -1. In the **RG-DNS-Alias-pip** resource group, select the **Web-01** virtual machine. Note the public IP address. -1. From a web browser, browse to the fully qualified domain name for the Web01-01 virtual machine. An example is **web01.contoso.com**. You now see the IIS default web page. -2. Close the web browser. -3. Stop the **Web-01** virtual machine, and then restart it. -4. After the virtual machine restarts, note the new public IP address for the virtual machine. -5. Open a new browser. Browse again to the fully qualified domain name for the Web01-01 virtual machine. An example is **web01.contoso.com**. +1. In the Azure portal, enter *virtual machine* in the search box at the top of the portal, and then select **Virtual machines** from the search results. +1. Select the **Web-01** virtual machine. Note the public IP address in the **Overview** page. +1. From a web browser, browse to `web01.contoso.com`, which is the fully qualified domain name of the **Web-01** virtual machine. You now see the IIS welcome web page. +1. Close the web browser. +1. Stop the **Web-01** virtual machine, and then restart it. +1. After the virtual machine restarts, note the new public IP address for the virtual machine. +1. From a web browser, browse again to `web01.contoso.com`. -This procedure succeeds because you used an alias record to point to the public IP address resource, not a standard A record. +This procedure succeeds because you used an alias record to point to the public IP resource instead of a standard A record that points to the public IP address, not the resource. ## Clean up resources -When you no longer need the resources created for this tutorial, delete the **RG-DNS-Alias-pip** resource group. - +When no longer needed, you can delete all resources created in this tutorial by deleting the **RG-DNS-Alias-pip** resource group and the alias record **web01** from **contoso.com** DNS zone. ## Next steps -In this tutorial, you created an alias record to refer to an Azure public IP address. To learn about Azure DNS and web apps, continue with the tutorial for web apps. +In this tutorial, you created an alias record to refer to an Azure public IP address resource. To learn how to create an alias record to support domain name apex with Traffic Manager, continue with the alias records for Traffic Manager tutorial. > [!div class="nextstepaction"] -> [Create DNS records for a web app in a custom domain](./dns-web-sites-custom-domain.md) +> [Create alias records for Traffic Manager](./tutorial-alias-tm.md) diff --git a/articles/dns/tutorial-alias-rr.md b/articles/dns/tutorial-alias-rr.md index 20d205406818a..74b644a22b46c 100644 --- a/articles/dns/tutorial-alias-rr.md +++ b/articles/dns/tutorial-alias-rr.md @@ -1,13 +1,14 @@ --- title: 'Tutorial: Create an alias record to refer to a resource record in a zone' titleSuffix: Azure DNS -description: This tutorial shows you how to configure an Azure DNS alias record to reference a resource record within the zone. -services: dns +description: In this tutorial, you learn how to configure an alias record to reference a resource record within the zone. author: rohinkoul +ms.author: rohink ms.service: dns +services: dns ms.topic: tutorial -ms.date: 04/19/2021 -ms.author: rohink +ms.date: 06/09/2022 +ms.custom: template-tutorial #Customer intent: As an experienced network administrator, I want to configure Azure an DNS alias record to refer to a resource record within the zone. --- @@ -18,54 +19,94 @@ Alias records can reference other record sets of the same type. For example, you In this tutorial, you learn how to: > [!div class="checklist"] -> * Create an alias record for a resource record in the zone. +> * Create a resource record in the zone. +> * Create an alias record for the resource record. > * Test the alias record. If you don’t have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. ## Prerequisites -You must have a domain name available that you can host in Azure DNS to test with. You must have full control of this domain. Full control includes the ability to set the name server (NS) records for the domain. -For instructions to host your domain in Azure DNS, see [Tutorial: Host your domain in Azure DNS](dns-delegate-domain-azure-dns.md). +* An Azure account with an active subscription. +* A domain name hosted in Azure DNS. If you don't have an Azure DNS zone, you can [create a DNS zone](./dns-delegate-domain-azure-dns.md#create-a-dns-zone), then [delegate your domain](dns-delegate-domain-azure-dns.md#delegate-the-domain) to Azure DNS. + +> [!NOTE] +> In this tutorial, `contoso.com` is used as an example. Replace `contoso.com` with your own domain name. + +## Sign in to Azure +Sign in to the Azure portal at https://portal.azure.com. ## Create an alias record Create an alias record that points to a resource record in the zone. ### Create the target resource record -1. Select your Azure DNS zone to open the zone. -2. Select **Record set**. -3. In the **Name** text box, enter **server**. -4. For the **Type**, select **A**. -5. In the **IP ADDRESS** text box, enter **10.10.10.10**. -6. Select **OK**. +1. In the Azure portal, enter *contoso.com* in the search box at the top of the portal, and then select **contoso.com** DNS zone from the search results. +1. In the **Overview** page, select the **+Record set** button. +1. In the **Add record set**, enter *server* in the **Name**. +1. Select **A** for the **Type**. +1. Enter *10.10.10.10* in the **IP address**. +1. Select **OK**. + + :::image type="content" source="./media/tutorial-alias-rr/add-record-set-inline.png" alt-text="Screentshot of adding the target record set in the Add record set page." lightbox="./media/tutorial-alias-rr/add-record-set-expanded.png"::: ### Create the alias record -1. Select your Azure DNS zone to open the zone. -2. Select **Record set**. -3. In the **Name** text box, enter **test**. -4. For the **Type**, select **A**. -5. Select **Yes** in the **Alias Record Set** check box. Then select the **Zone record set** option. -6. For the **Zone record set**, select the **server** record. -7. Select **OK**. +1. In the **Overview** page of **contoso.com** DNS zone, select the **+Record set** button. +1. In the **Add record set**, enter *test* in the **Name**. +1. Select **A** for the **Type**. +1. Select **Yes** for the **Alias record set**, and then select the **Zone record set** for the **Alias type**. +1. Select the **server** record for the **Zone record set**. +1. Select **OK**. + + :::image type="content" source="./media/tutorial-alias-rr/add-alias-record-set-inline.png" alt-text="Screentshot of adding the alias record set in the Add record set page." lightbox="./media/tutorial-alias-rr/add-alias-record-set-expanded.png"::: ## Test the alias record -1. Start your favorite nslookup tool. One option is to browse to [https://network-tools.com/nslook](https://network-tools.com/nslook). -2. Set the query type for A records, and look up **test.\**. The answer is **10.10.10.10**. -3. In the Azure portal, change the **server** A record to **10.11.11.11**. -4. Wait a few minutes, and then use nslookup again for the **test** record. The answer is **10.11.11.11**. +After adding the alias record, you can verify that it's working by using a tool such as *nslookup* to query the `test` A record. -## Clean up resources +> [!TIP] +> You may need to wait at least 10 minutes after you add a record to successfully verify that it's working. It can take a while for changes to propagate through the DNS system. + +1. From a command prompt, enter the `nslookup` command: + + ``` + nslookup test.contoso.com + ``` -When you no longer need the resources created for this tutorial, delete the **server** and **test** resource records in your zone. +1. Verify that the response looks similar to the following output: + + ``` + Server: UnKnown + Address: 40.90.4.1 + + Name: test.contoso.com + Address: 10.10.10.10 + ``` + +1. In the **Overview** page of **contoso.com** DNS zone, select the **server** record, and then enter *10.11.11.11* in the **IP address**. + +1. Select **Save**. + +1. Wait a few minutes, and then use the `nslookup` command again. Verify the response changed to reflect the new IP address: + + + ``` + Server: UnKnown + Address: 40.90.4.1 + + Name: test.contoso.com + Address: 10.11.11.11 + ``` + +## Clean up resources +When you no longer need the resources created for this tutorial, delete the **server** and **test** records from your zone. ## Next steps -In this tutorial, you created an alias record to refer to a resource record within the zone. To learn about Azure DNS and web apps, continue with the tutorial for web apps. +In this tutorial, you learned the basic steps to create an alias record to refer to a resource record within the Azure DNS zone. -> [!div class="nextstepaction"] -> [Create DNS records for a web app in a custom domain](./dns-web-sites-custom-domain.md) +- Learn more about [alias records](dns-alias.md). +- Learn more about [zones and records](dns-zones-records.md). diff --git a/articles/dns/tutorial-public-dns-zones-child.md b/articles/dns/tutorial-public-dns-zones-child.md index 640437fa6d6d3..bad11ae10a970 100644 --- a/articles/dns/tutorial-public-dns-zones-child.md +++ b/articles/dns/tutorial-public-dns-zones-child.md @@ -1,98 +1,127 @@ --- -title: 'Tutorial: Creating an Azure child DNS zones' +title: 'Tutorial: Create an Azure child DNS zone' titleSuffix: Azure DNS -description: Tutorial on how to create child DNS zones in Azure portal. +description: In this tutorial, you learn how to create child DNS zones in Azure portal. author: jonbeck ms.assetid: be4580d7-aa1b-4b6b-89a3-0991c0cda897 ms.service: dns ms.topic: tutorial -ms.custom: +ms.custom: template-tutorial ms.workload: infrastructure-services -ms.date: 04/19/2021 +ms.date: 06/07/2022 ms.author: jonbeck --- -# Tutorial: Creating a new Child DNS zone +# Tutorial: Create a new Child DNS zone In this tutorial, you learn how to: > [!div class="checklist"] -> * Signing in to Azure Portal. -> * Creating child DNS zone via new DNS zone. -> * Creating child DNS zone via parent DNS zone. -> * Verifying NS Delegation for new Child DNS zone. +> * Create a child DNS zone via parent DNS zone. +> * Create a child DNS zone via new DNS zone. +> * Verify NS Delegation for the new Child DNS zone. ## Prerequisites -* An Azure account with an active subscription. If you don't have an account, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -* Existing parent Azure DNS zone. +* An Azure account with an active subscription. If you don't have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +* A parent Azure DNS zone. If you don't have one, you can [create a DNS zone](./dns-getstarted-portal.md#create-a-dns-zone). -In this tutorial, we'll use contoso.com as the parent zone and subdomain.contoso.com as the child domain name. Replace *contoso.com* with your parent domain name and *subdomain* with your child domain. If you haven't created your parent DNS zone, see steps to [create DNS zone using Azure portal](./dns-getstarted-portal.md#create-a-dns-zone). +In this tutorial, we'll use `contoso.com` as the parent zone and `subdomain.contoso.com` as the child domain name. Replace `contoso.com` with your parent domain name and `subdomain` with your child domain. +There are two ways you can create your child DNS zone: +1. Through the parent DNS zone's **Overview** page. +1. Through the **Create DNS zone** page. -## Sign in to Azure portal +## Create a child DNS zone via parent DNS zone Overview page -Sign in to the [Azure portal](https://portal.azure.com/) with your Azure account. -If you don't have an Azure subscription, create a free account before you begin. +You'll create a new child DNS zone and delegate it to the parent DNS zone using the **Child Zone** button from parent zone **Overview** page. Using this button, the parent parameters are automatically pre-populated. -There are two ways you can do create your child DNS zone. -1. Through the "Create DNS zone" portal page. -1. Through the parent DNS zone's configuration page. +1. Sign in to the [Azure portal](https://portal.azure.com). +1. In the Azure portal, enter *contoso.com* in the search box at the top of the portal and then select **contoso.com** DNS zone from the search results. +1. In the **Overview** page, select the **+Child zone** button. -## Create child DNS zone via create DNS zone + :::image type="content" source="./media/tutorial-public-dns-zones-child/child-zone-button.png" alt-text="Screenshot of D N S zone showing the child zone button."::: -In this step, we'll create a new child DNS zone with name **subdomain.contoso.com** and delegate it to existing parent DNS zone **contoso.com**. You'll create the DNS zone using the tabs on the **Create DNS zone** page. -1. On the Azure portal menu or from the **Home** page, select **Create a resource**. The **New** window appears. -1. Select **Networking**, then select **DNS zone** and then select **Add** button. +1. In the **Create DNS zone**, enter or select this information in the **Basics** tab: -1. On the **basics** tab, type or select the following values: - * **Subscription**: Select a subscription to create the zone in. - * **Resource group**: Enter your existing Resource group or create a new one by selecting **Create new**. Enter *MyResourceGroup*, and select **OK**. The resource group name must be unique within the Azure subscription. - * Check this checkbox: **This zone is a child of an existing zone already hosted in Azure DNS** - * **Parent zone subscription**: From this drop down, search or select the subscription name under which parent DNS zone *contoso.com* was created. - * **Parent zone**: In the search bar type *contoso.com* to load it in dropdown list. Once loaded select *contoso.com* from dropdown list. - * **Name:** Type *subdomain* for this tutorial example. Notice that your parent DNS zone name *contoso.com* is automatically added as suffix to name when we select parent zone from the above step. + | Setting | Value | + | ------- | ----- | + | **Project details** | | + | Subscription | Select your Azure subscription.| + | Resource group | Select an existing resource group for the child zone or create a new one by selecting **Create new**.
                  In this tutorial, the resource group **MyResourceGroup** of the parent DNS zone is selected. | + | **Instance details** | | + | Name | Enter your child zone name. In this tutorial, *subdomain* is used. Notice that the parent DNS zone name `contoso.com` is automatically added as a suffix to **Name**. | + | Resource group location | The resource group location is selected for you if you selected an existing resource group for the child zone.
                  Select the resource group location if you created a new resource group for the child zone.
                  The resource group location doesn't affect your DNS zone service, which is global and not bound to a location. | -1. Select **Next: Review + create**. -1. On the **Review + create** tab, review the summary, correct any validation errors, and then select **Create**. -It may take a few minutes to create the zone. + :::image type="content" source="./media/tutorial-public-dns-zones-child/child-zone-via-overview-page.png" alt-text="Screenshot of Create D N S zone page accessed via the Add child zone button."::: - :::image type="content" source="./media/dns-delegate-domain-azure-dns/create-dns-zone-inline.png" alt-text="Screenshot of the create DNS zone page." lightbox="./media/dns-delegate-domain-azure-dns/create-dns-zone-expanded.png"::: + > [!NOTE] + > Parent zone information is automatically pre-populated with child zone option box already checked. -## Create child DNS zone via parent DNS zone overview page -You can also create a new child DNS zone and delegate it into the parent DNS zone by using the **Child Zone** button from parent zone overview page. Using this button automatically pre-populates the parent parameters for the child zone automatically. +1. Select **Review + create** button. +1. Select **Create** button. It may take a few minutes to create the child zone. -1. In the Azure portal, under **All resources**, open the *contoso.com* DNS zone in the **MyResourceGroup** resource group. You can enter *contoso.com* in the **Filter by name** box to find it more easily. -1. On DNS zone overview page, select the **+Child Zone** button. - :::image type="content" source="./media/dns-delegate-domain-azure-dns/create-child-zone-inline.png" alt-text="Screenshot child zone button." border="true" lightbox="./media/dns-delegate-domain-azure-dns/create-child-zone-expanded.png"::: +## Create a child DNS zone via Create DNS zone -1. The create DNS zone page will then open. Child zone option is already checked, and parent zone subscription and parent zone gets populated for you on this page. -1. Type the name as *child* for this tutorial example. Notice that you parent DNS zone name contoso.com is automatically added as prefix to name. -1. Select **Next: Tags** and then **Next: Review + create**. -1. On the **Review + create** tab, review the summary, correct any validation errors, and then select **Create**. +You'll create a new child DNS zone and delegate it to the parent DNS zone using the **Create DNS zone** page. - :::image type="content" source="./media/dns-delegate-domain-azure-dns/create-dns-zone-child-inline.png" alt-text="Screenshot of child zone selected" border="true" lightbox="./media/dns-delegate-domain-azure-dns/create-dns-zone-child-expanded.png"::: +1. On the Azure portal menu or from the **Home** page, select **Create a resource** and then select **Networking**. +1. Select **DNS zone** and then select the **Create** button. -## Verify child DNS zone -Now that you have a new child DNS zone *subdomain.contoso.com* created. To verify that delegation happened correctly, you'll want to check the nameserver(NS) records for your child zone is in the parent zone as described below. +1. In **Create DNS zone**, enter or select this information in the **Basics** tab: -**Retrieve name servers of child DNS zone:** + | Setting | Value | + | ------- | ----- | + | **Project details** | | + | Subscription | Select your Azure subscription.| + | Resource group | Select an existing resource group or create a new one by selecting **Create new**.
                  In this tutorial, the resource group **MyResourceGroup** of the parent DNS zone is selected. | + | **Instance details** | | + | This zone is a child of an existing zone already hosted in Azure DNS | Check this checkbox. | + | Parent zone subscription | Select your Azure subscription under which parent DNS zone `contoso.com` was created. | + | Parent zone | In the search bar, enter *contoso.com* to load it in dropdown list. Once loaded, select it from dropdown list. | + | Name | Enter your child zone name. In this tutorial, *subdomain* is used. Notice that the parent DNS zone name `contoso.com` is automatically added as a suffix to **Name** after you selected parent zone from the previous step. | + | Resource group location | The resource group location is selected for you if you selected an existing resource group for the child zone.
                  Select the resource group location if you created a new resource group for the child zone.
                  The resource group location doesn't affect your DNS zone service, which is global and not bound to a location. | -1. In the Azure portal, under **All resources**, open the *subdomain.contoso.com* DNS zone in the **MyResourceGroup** resource group. You can enter *subdomain.contoso.com* in the **Filter by name** box to find it more easily. -1. Retrieve the name servers from the DNS zone overview page. In this example, the zone contoso.com has been assigned name servers *ns1-08.azure-dns.com, ns2-08.azure-dns.net, ns3-08.azure-dns.org*, and *ns4-08.azure-dns.info*: + :::image type="content" source="./media/tutorial-public-dns-zones-child/child-zone-via-create-dns-zone-page.png" alt-text="Screenshot of Create D N S zone page accessed via the Create button of D N S zone page."::: - :::image type="content" source="./media/dns-delegate-domain-azure-dns/create-child-zone-ns-inline.png" alt-text="Screenshot of child zone nameservers" border="true" lightbox="./media/dns-delegate-domain-azure-dns/create-child-zone-ns-expanded.png"::: -**Verify the NS record in parent DNS zone:** +1. Select **Review + create** button. +1. Select **Create** button. It may take a few minutes to create the zone. -Now in this step we go the parent DNS zone *contoso.com* and check that its NS record set entry for the child zones nameservers has been created. -1. In the Azure portal, under **All resources**, open the contoso.com DNS zone in the **MyResourceGroup** resource group. You can enter contoso.com in the **Filter by name** box to find it more easily. -1. On the *contoso.com* DNS zones overview page, check for the record sets. -1. You'll find that record set of type NS and name subdomain is already created in parent DNS zone. Check the values for this record set it's similar to the nameserver list we retrieved from child DNS zone in above step. +## Verify the child DNS zone + +After the new child DNS zone `subdomain.contoso.com` created, verify that the delegation configured correctly. You'll need to check that your child zone name server (NS) records are in the parent zone as described below. + +### Retrieve name servers of child DNS zone + +1. In the Azure portal, enter *subdomain.contoso.com* in the search box at the top of the portal and then select **subdomain.contoso.com** DNS zone from the search results. + +1. Retrieve the name servers from the DNS zone **Overview** page. In this example, the zone `subdomain.contoso.com` has been assigned name servers `ns1-05.azure-dns.com.`, `ns2-05.azure-dns.net.`, `ns3-05.azure-dns.org.`, and `ns4-05.azure-dns.info.`: + + :::image type="content" source="./media/tutorial-public-dns-zones-child/child-zone-name-servers-inline.png" alt-text="Screenshot of child D N S zone Overview page showing its name servers." lightbox="./media/tutorial-public-dns-zones-child/child-zone-name-servers-expanded.png"::: + +### Check the NS record set in parent DNS zone + +After retrieving the name servers from the child DNS zone, check that the parent DNS zone `contoso.com` has the NS record set entry for its child zone name servers. + +1. In the Azure portal, enter *contoso.com* in the search box at the top of the portal and then select **contoso.com** DNS zone from the search results. +1. Check the record sets in **Overview** page of **contoso.com** DNS zone. +1. You'll find a record set of type **NS** and name **subdomain** created in the parent DNS zone. Compare the name servers in this record set with the ones you retrieved from the **Overview** page of the child DNS zone. + + :::image type="content" source="./media/tutorial-public-dns-zones-child/parent-zone-name-servers-inline.png" alt-text="Screenshot of child zone name servers validation in the parent D N S zone Overview page." lightbox="./media/tutorial-public-dns-zones-child/parent-zone-name-servers-expanded.png"::: - :::image type="content" source="./media/dns-delegate-domain-azure-dns/create-child-zone-ns-validate-inline.png" alt-text="Screenshot of Child zone nameservers validation" border="true" lightbox="./media/dns-delegate-domain-azure-dns/create-child-zone-ns-validate-expanded.png"::: ## Clean up resources -When you no longer need the resources you created in this tutorial, remove them by deleting the **MyResourceGroup** resource group. Open the **MyResourceGroup** resource group, and select **Delete resource group**. + +When no longer needed, you can delete all resources created in this tutorial by following these steps to delete the resource group **MyResourceGroup**: + +1. On the Azure portal menu, select **Resource groups**. + +2. Select the **MyResourceGroup** resource group. + +3. Select **Delete resource group**. + +4. Enter *MyResourceGroup* and select **Delete**. + ## Next steps diff --git a/articles/event-grid/event-filtering.md b/articles/event-grid/event-filtering.md index 0a4e2c1fd5641..e6f5768bc8e7e 100644 --- a/articles/event-grid/event-filtering.md +++ b/articles/event-grid/event-filtering.md @@ -2,7 +2,7 @@ title: Event filtering for Azure Event Grid description: Describes how to filter events when creating an Azure Event Grid subscription. ms.topic: conceptual -ms.date: 03/04/2021 +ms.date: 06/01/2022 --- # Understand event filtering for Event Grid subscriptions @@ -627,10 +627,8 @@ Here's an example of using an extension context attribute in a filter. Advanced filtering has the following limitations: -* 5 advanced filters and 25 filter values across all the filters per event grid subscription +* 25 advanced filters and 25 filter values across all the filters per event grid subscription * 512 characters per string value -* Five values for **in** and **not in** operators -* The `StringNotContains` operator is currently not available in the portal. * Keys with **`.` (dot)** character in them. For example: `http://schemas.microsoft.com/claims/authnclassreference` or `john.doe@contoso.com`. Currently, there's no support for escape characters in keys. The same key can be used in more than one filter. diff --git a/articles/event-grid/event-schema-blob-storage.md b/articles/event-grid/event-schema-blob-storage.md index a8a6178ab9796..2d4387287a287 100644 --- a/articles/event-grid/event-schema-blob-storage.md +++ b/articles/event-grid/event-schema-blob-storage.md @@ -2,7 +2,7 @@ title: Azure Blob Storage as Event Grid source description: Describes the properties that are provided for blob storage events with Azure Event Grid ms.topic: conceptual -ms.date: 09/08/2021 +ms.date: 05/26/2022 --- # Azure Blob Storage as an Event Grid source @@ -45,7 +45,16 @@ These events are triggered if you enable a hierarchical namespace on the storage > [!NOTE] > For **Azure Data Lake Storage Gen2**, if you want to ensure that the **Microsoft.Storage.BlobCreated** event is triggered only when a Block Blob is completely committed, filter the event for the `FlushWithClose` REST API call. This API call triggers the **Microsoft.Storage.BlobCreated** event only after data is fully committed to a Block Blob. To learn how to create a filter, see [Filter events for Event Grid](./how-to-filter-events.md). -## Example event +### List of policy-related events + +These events are triggered when the actions defined by a policy are performed. + + |Event name |Description| + |----------|-----------| + |**Microsoft.Storage.BlobInventoryPolicyCompleted** |Triggered when the inventory run completes for a rule that is defined an inventory policy . This event also occurs if the inventory run fails with a user error before it starts to run. For example, an invalid policy, or an error that occurs when a destination container is not present will trigger the event. | + |**Microsoft.Storage.LifecyclePolicyCompleted** |Triggered when the actions defined by a lifecycle management policy are performed. | + +## Example events When an event is triggered, the Event Grid service sends data about that event to subscribing endpoint. This section contains an example of what that data would look like for each blob storage event. # [Event Grid event schema](#tab/event-grid-event-schema) @@ -334,6 +343,61 @@ If the blob storage account has a hierarchical namespace, the data looks similar }] ``` +### Microsoft.Storage.BlobInventoryPolicyCompleted event + +```json +{ + "topic": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/BlobInventory/providers/Microsoft.EventGrid/topics/BlobInventoryTopic", + "subject": "BlobDataManagement/BlobInventory", + "eventType": "Microsoft.Storage.BlobInventoryPolicyCompleted", + "id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "data": { + "scheduleDateTime": "2021-05-28T03:50:27Z", + "accountName": "testaccount", + "ruleName": "Rule_1", + "policyRunStatus": "Succeeded", + "policyRunStatusMessage": "Inventory run succeeded, refer manifest file for inventory details.", + "policyRunId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "manifestBlobUrl": "https://testaccount.blob.core.windows.net/inventory-destination-container/2021/05/26/13-25-36/Rule_1/Rule_1.csv" + }, + "dataVersion": "1.0", + "metadataVersion": "1", + "eventTime": "2021-05-28T15:03:18Z" +} +``` + +### Microsoft.Storage.LifecyclePolicyCompleted event + +```json +{ + "topic": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/contosoresourcegroup/providers/Microsoft.Storage/storageAccounts/contosostorageaccount", + "subject": "BlobDataManagement/LifeCycleManagement/SummaryReport", + "eventType": "Microsoft.Storage.LifecyclePolicyCompleted", + "id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "data": { + "scheduleTime": "2022/05/24 22:57:29.3260160", + "deleteSummary": { + "totalObjectsCount": 16, + "successCount": 14, + "errorList": "" + }, + "tierToCoolSummary": { + "totalObjectsCount": 0, + "successCount": 0, + "errorList": "" + }, + "tierToArchiveSummary": { + "totalObjectsCount": 0, + "successCount": 0, + "errorList": "" + } + }, + "dataVersion": "1", + "metadataVersion": "1", + "eventTime": "2022-05-26T00:00:40.1880331" +} +``` + # [Cloud event schema](#tab/cloud-event-schema) ### Microsoft.Storage.BlobCreated event @@ -554,7 +618,6 @@ If the blob storage account has a hierarchical namespace, the data looks similar --- - ## Event properties # [Event Grid event schema](#tab/event-grid-event-schema) diff --git a/articles/event-grid/receive-events.md b/articles/event-grid/receive-events.md index 30df54725e266..fd0bba9870e0e 100644 --- a/articles/event-grid/receive-events.md +++ b/articles/event-grid/receive-events.md @@ -219,7 +219,7 @@ module.exports = function (context, req) { ### Test Blob Created event handling -Test the new functionality of the function by putting a [Blob storage event](./event-schema-blob-storage.md#example-event) into the test field and running: +Test the new functionality of the function by putting a [Blob storage event](./event-schema-blob-storage.md#example-events) into the test field and running: ```json [{ diff --git a/articles/event-grid/whats-new.md b/articles/event-grid/whats-new.md index 689f8b278ebb5..ad0f14531c341 100644 --- a/articles/event-grid/whats-new.md +++ b/articles/event-grid/whats-new.md @@ -12,6 +12,16 @@ ms.date: 03/31/2022 Azure Event Grid receives improvements on an ongoing basis. To stay up to date with the most recent developments, this article provides you with information about the features that are added or updated in a release. +## REST API version 2021-12 +This release corresponds to REST API version 2021-12-01, which includes the following features: + +- [Enable managed identities for system topics](enable-identity-system-topics.md) +- [Enabled managed identities for custom topics and domains](enable-identity-custom-topics-domains.md) +- [Use managed identities to deliver events to destinations](add-identity-roles.md) +- [Support for delivery attributes](delivery-properties.md) +- [Storage queue - message time-to-live (TTL)](delivery-properties.md#configure-time-to-live-on-outgoing-events-to-azure-storage-queues)- +- [Azure Active Directory authentication for topics and domains, and partner namespaces](authenticate-with-active-directory.md) + ## REST API version 2021-10 This release corresponds to REST API version 2021-10-15-preview, which includes the following features: diff --git a/articles/event-hubs/event-hubs-availability-and-consistency.md b/articles/event-hubs/event-hubs-availability-and-consistency.md index 959d75d201e91..5668505c1a361 100644 --- a/articles/event-hubs/event-hubs-availability-and-consistency.md +++ b/articles/event-hubs/event-hubs-availability-and-consistency.md @@ -31,7 +31,7 @@ We recommend sending events to an event hub without setting partition informatio - [Send events using .NET](event-hubs-dotnet-standard-getstarted-send.md) - [Send events using Java](event-hubs-java-get-started-send.md) -- [Send events using JavaScript](event-hubs-python-get-started-send.md) +- [Send events using JavaScript](event-hubs-node-get-started-send.md) - [Send events using Python](event-hubs-python-get-started-send.md) diff --git a/articles/event-hubs/event-hubs-capture-overview.md b/articles/event-hubs/event-hubs-capture-overview.md index bad083519e798..fa2a856826824 100644 --- a/articles/event-hubs/event-hubs-capture-overview.md +++ b/articles/event-hubs/event-hubs-capture-overview.md @@ -2,7 +2,7 @@ title: Capture streaming events - Azure Event Hubs | Microsoft Docs description: This article provides an overview of the Capture feature that allows you to capture events streaming through Azure Event Hubs. ms.topic: article -ms.date: 02/16/2021 +ms.date: 05/31/2022 --- # Capture events through Azure Event Hubs in Azure Blob Storage or Azure Data Lake Storage @@ -27,6 +27,9 @@ Event Hubs Capture enables you to specify your own Azure Blob storage account an Captured data is written in [Apache Avro][Apache Avro] format: a compact, fast, binary format that provides rich data structures with inline schema. This format is widely used in the Hadoop ecosystem, Stream Analytics, and Azure Data Factory. More information about working with Avro is available later in this article. +> [!NOTE] +> When you use no code editor in the Azure portal, you can capture streaming data in Event Hubs in an Azure Data Lake Storage Gen2 account in the **Parquet** format. For more information, see [How to: capture data from Event Hubs in Parquet format](../stream-analytics/capture-event-hub-data-parquet.md?toc=%2Fazure%2Fevent-hubs%2Ftoc.json) and [Tutorial: capture Event Hubs data in Parquet format and analyze with Azure Synapse Analytics](../stream-analytics/event-hubs-parquet-capture-tutorial.md?toc=%2Fazure%2Fevent-hubs%2Ftoc.json). + ### Capture windowing Event Hubs Capture enables you to set up a window to control capturing. This window is a minimum size and time configuration with a "first wins policy," meaning that the first trigger encountered causes a capture operation. If you have a fifteen-minute, 100 MB capture window and send 1 MB per second, the size window triggers before the time window. Each partition captures independently and writes a completed block blob at the time of capture, named for the time at which the capture interval was encountered. The storage naming convention is as follows: @@ -41,13 +44,13 @@ The date values are padded with zeroes; an example filename might be: https://mystorageaccount.blob.core.windows.net/mycontainer/mynamespace/myeventhub/0/2017/12/08/03/03/17.avro ``` -In the event that your Azure storage blob is temporarily unavailable, Event Hubs Capture will retain your data for the data retention period configured on your event hub and back fill the data once your storage account is available again. +If your Azure storage blob is temporarily unavailable, Event Hubs Capture will retain your data for the data retention period configured on your event hub and back fill the data once your storage account is available again. ### Scaling throughput units or processing units In the standard tier of Event Hubs, the traffic is controlled by [throughput units](event-hubs-scalability.md#throughput-units) and in the premium tier Event Hubs, it's controlled by [processing units](event-hubs-scalability.md#processing-units). Event Hubs Capture copies data directly from the internal Event Hubs storage, bypassing throughput unit or processing unit egress quotas and saving your egress for other processing readers, such as Stream Analytics or Spark. -Once configured, Event Hubs Capture runs automatically when you send your first event, and continues running. To make it easier for your downstream processing to know that the process is working, Event Hubs writes empty files when there is no data. This process provides a predictable cadence and marker that can feed your batch processors. +Once configured, Event Hubs Capture runs automatically when you send your first event, and continues running. To make it easier for your downstream processing to know that the process is working, Event Hubs writes empty files when there's no data. This process provides a predictable cadence and marker that can feed your batch processors. ## Setting up Event Hubs Capture @@ -124,7 +127,7 @@ Apache Avro has complete Getting Started guides for [Java][Java] and [Python][Py Event Hubs Capture is metered similarly to [throughput units](event-hubs-scalability.md#throughput-units) (standard tier) or [processing units](event-hubs-scalability.md#processing-units) (in premium tier): as an hourly charge. The charge is directly proportional to the number of throughput units or processing units purchased for the namespace. As throughput units or processing units are increased and decreased, Event Hubs Capture meters increase and decrease to provide matching performance. The meters occur in tandem. For pricing details, see [Event Hubs pricing](https://azure.microsoft.com/pricing/details/event-hubs/). -Capture does not consume egress quota as it is billed separately. +Capture doesn't consume egress quota as it is billed separately. ## Integration with Event Grid diff --git a/articles/event-hubs/exceptions-dotnet.md b/articles/event-hubs/exceptions-dotnet.md index c786c50e9aefa..15df8d97d574b 100644 --- a/articles/event-hubs/exceptions-dotnet.md +++ b/articles/event-hubs/exceptions-dotnet.md @@ -39,7 +39,7 @@ try { // Read events using the consumer client } -catch (EventHubsException ex) where +catch (EventHubsException ex) when (ex.Reason == EventHubsException.FailureReason.ConsumerDisconnected) { // Take action based on a consumer being disconnected @@ -47,4 +47,4 @@ catch (EventHubsException ex) where ``` ## Next steps -There are other exceptions that are documented in the [legacy article](event-hubs-messaging-exceptions.md). Some of them apply only to the legacy Event Hubs .NET client library. \ No newline at end of file +There are other exceptions that are documented in the [legacy article](event-hubs-messaging-exceptions.md). Some of them apply only to the legacy Event Hubs .NET client library. diff --git a/articles/event-hubs/includes/event-hubs-dedicated-clusters-faq.md b/articles/event-hubs/includes/event-hubs-dedicated-clusters-faq.md index 5a14070d30c5e..fe020bb1a840f 100644 --- a/articles/event-hubs/includes/event-hubs-dedicated-clusters-faq.md +++ b/articles/event-hubs/includes/event-hubs-dedicated-clusters-faq.md @@ -5,7 +5,7 @@ services: event-hubs author: spelluru ms.service: event-hubs ms.topic: include -ms.date: 09/28/2021 +ms.date: 06/01/2022 ms.author: spelluru ms.custom: "include file" @@ -48,7 +48,7 @@ To monitor the CPU usage of the dedicated cluster you need to follow these steps - Select `CPU` as the metrics and use the `Max` as the aggregation. - Then add a filter for the property type `Role`, use the equal operator and select all three values(`SBSAdmin`, `SBSFE`, `SBSEH`) from the dropdown. -Then you can monitor this metic to determine when you should scale your dedicated cluster. +Then you can monitor this metric to determine when you should scale your dedicated cluster. You can also set up [alerts](../../azure-monitor/alerts/alerts-overview.md) against this metric to get notified when CPU usage reaches the thresholds you set. @@ -57,4 +57,4 @@ You can also set up [alerts](../../azure-monitor/alerts/alerts-overview.md) agai You can geo-pair a namespace under a Dedicated-tier cluster with another namespace under a Dedicated-tier cluster. We don't encourage pairing a dedicated-tier namespace with a namespace in our standard offering because the throughput limit will be incompatible and result in errors. ### Can I migrate my Standard namespaces to belong to a Dedicated-tier cluster? -We don't currently support an automated migration process for migrating your event hubs data from a Standard namespace to a Dedicated one. +We don't currently support an automated migration process for migrating your event hubs data from a standard or premium namespace to a dedicated one. diff --git a/articles/event-hubs/transport-layer-security-configure-minimum-version.md b/articles/event-hubs/transport-layer-security-configure-minimum-version.md index bd3c1b34026e9..1db8e2a4ebdd0 100644 --- a/articles/event-hubs/transport-layer-security-configure-minimum-version.md +++ b/articles/event-hubs/transport-layer-security-configure-minimum-version.md @@ -68,7 +68,7 @@ To check the minimum required TLS version for your Event Hubs namespace, you can .\ARMClient.exe token ``` -Once you have your bearer token, you can use the script below in combination with something like [Rest Client](https://marketplace.visualstudio.com/items?itemName=humao.rest-client) to query the API. +Once you have your bearer token, you can use the script below in combination with something like [REST Client](https://marketplace.visualstudio.com/items?itemName=humao.rest-client) to query the API. ```http @token = Bearer diff --git a/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md b/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md index c2ed6c4d76592..6cf51fea9fe3a 100644 --- a/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md +++ b/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md @@ -5,7 +5,7 @@ services: expressroute author: duongau ms.service: expressroute ms.topic: article -ms.date: 05/09/2022 +ms.date: 05/25/2022 ms.author: duau --- @@ -21,14 +21,20 @@ However, taking Murphy's popular adage--*if anything can go wrong, it will*--int ## Need for redundant connectivity solution -There are possibilities and instances where an entire regional service (be it that of Microsoft, network service providers, customer, or other cloud service providers) gets degraded. The root cause for such regional wide service impact include natural calamity. That's why, for business continuity and mission critical applications it's important to plan for disaster recovery. +There are possibilities and instances where an ExpressRoute peering locations or an entire regional service (be it that of Microsoft, network service providers, customer, or other cloud service providers) gets degraded. The root cause for such regional wide service impact include natural calamity. That's why, for business continuity and mission critical applications it's important to plan for disaster recovery. No matter what, whether you run your mission critical applications in an Azure region or on-premises or anywhere else, you can use another Azure region as your failover site. The following articles addresses disaster recovery from applications and frontend access perspectives: - [Enterprise-scale disaster recovery][Enterprise DR] - [SMB disaster recovery with Azure Site Recovery][SMB DR] -If you rely on ExpressRoute connectivity between your on-premises network and Microsoft for mission critical operations, your disaster recovery plan should also include geo-redundant network connectivity. +If you rely on ExpressRoute connectivity between your on-premises network and Microsoft for mission critical operations, you need to consider the following to plan for disaster recovery over ExpressRoute + +- using geo-redundant ExpressRoute circuits +- using diverse service provider network(s) for different ExpressRoute circuit +- designing each of the ExpressRoute circuit for [high availability][HA] +- terminating the different ExpressRoute circuit in different location on the customer network +- using [Availability zone aware ExpressRoute Virtual Network Gateways](/articles/vpn-gateway/about-zone-redundant-vnet-gateways.md) ## Challenges of using multiple ExpressRoute circuits @@ -36,14 +42,17 @@ When you interconnect the same set of networks using more than one connection, y However, if you load balance traffic across geo-redundant parallel paths, regardless of whether you have stateful entities or not, you would experience inconsistent network performance. These geo-redundant parallel paths can be through the same metro or different metro found on the [providers by location](expressroute-locations-providers.md#partners) page. -### Same metro +### Redundancy with ExpressRoute circuits in same metro -[Many metros](expressroute-locations-providers.md#global-commercial-azure) have two ExpressRoute locations. An example would be *Amsterdam* and *Amsterdam2*. When designing redundancy, you could build two parallel paths to Azure with both locations in the same metro. The advantage of this design is when application failover happens, end-to-end latency between your on-premises applications and Microsoft stays approximately the same. However, if there is a natural disaster such as an earthquake, connectivity for both paths may no longer be available. +[Many metros](expressroute-locations-providers.md#global-commercial-azure) have two ExpressRoute locations. An example would be *Amsterdam* and *Amsterdam2*. When designing redundancy, you could build two parallel paths to Azure with both locations in the same metro. You could do this with the same provider or choose to work with a different service provider to improve resiliency. Another advantage of this design is when application failover happens, end-to-end latency between your on-premises applications and Microsoft stays approximately the same. However, if there is a natural disaster such as an earthquake, connectivity for both paths may no longer be available. -### Different metros +### Redundancy with ExpressRoute circuits in different metros When using different metros for redundancy, you should select the secondary location in the same [geo-political region](expressroute-locations-providers.md#locations). To choose a location outside of the geo-political region, you'll need to use Premium SKU for both circuits in the parallel paths. The advantage of this configuration is the chances of a natural disaster causing an outage to both links are much lower but at the cost of increased latency end-to-end. +>[!NOTE] +>Enabling BFD on the ExpressRoute circuits will help with faster link failure detection between Microsoft Enterprise Edge (MSEE) devices and the Customer/Partner Edge routers. However, the overall failover and convergence to redundant site may take up to 180 seconds under some failure conditions and you may experience increased laterncy or performance degradation during this time. + In this article, let's discuss how to address challenges you may face when configuring geo-redundant paths. ## Small to medium on-premises network considerations @@ -52,13 +61,6 @@ Let's consider the example network illustrated in the following diagram. In the :::image type="content" source="./media/designing-for-disaster-recovery-with-expressroute-pvt/one-region.png" alt-text="Diagram of small to medium size on-premises network considerations."::: -When you are designing ExpressRoute connectivity for disaster recovery, you need to consider: - -- using geo-redundant ExpressRoute circuits -- using diverse service provider network(s) for different ExpressRoute circuit -- designing each of the ExpressRoute circuit for [high availability][HA] -- terminating the different ExpressRoute circuit in different location on the customer network - By default, if you advertise routes identically over all the ExpressRoute paths, Azure will load-balance on-premises bound traffic across all the ExpressRoute paths using Equal-cost multi-path (ECMP) routing. However, with the geo-redundant ExpressRoute circuits we need to take into consideration different network performances with different network paths (particularly for network latency). To get more consistent network performance during normal operation, you may want to prefer the ExpressRoute circuit that offers the minimal latency. @@ -134,7 +136,7 @@ The Scenario 2 is illustrated in the following diagram. In the diagram, green li The solution is illustrated in the following diagram. As illustrated, you can architect the scenario either using more specific route (Option 1) or AS-path prepend (Option 2) to influence VNet path selection. To influence on-premises network route selection for Azure bound traffic, you need configure the interconnection between the on-premises location as less preferable. How you configure the interconnection link as preferable depends on the routing protocol used within the on-premises network. You can use local preference with iBGP or metric with IGP (OSPF or IS-IS). -:::image type="content" source="./media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-arch2.png" alt-text="Diagram of active-active ExpressRoute circuits solution 2."::: +:::image type="content" source="./media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png" alt-text="Diagram of active-active ExpressRoute circuits solution 2."::: > [!IMPORTANT] > When one or multiple ExpressRoute circuits are connected to multiple virtual networks, virtual network to virtual network traffic can route via ExpressRoute. However, this is not recommended. To enable virtual network to virtual network connectivity, [configure virtual network peering](../virtual-network/virtual-network-manage-peering.md). diff --git a/articles/expressroute/expressroute-faqs.md b/articles/expressroute/expressroute-faqs.md index cca8c8c78f3ad..c015b47f32d8b 100644 --- a/articles/expressroute/expressroute-faqs.md +++ b/articles/expressroute/expressroute-faqs.md @@ -441,7 +441,7 @@ Your existing circuit will continue advertising the prefixes for Microsoft 365. * Microsoft peering of ExpressRoute circuits that are configured on or after August 1, 2017 will not have any prefixes advertised until a route filter is attached to the circuit. You will see no prefixes by default. ### If I have multiple Virtual Networks (Vnets) connected to the same ExpressRoute circuit, can I use ExpressRoute for Vnet-to-Vnet connectivity? -Vnet-to-Vnet connectivity over ExpressRoute is not recommended. To acheive this, configure [Virtual Network Peering](https://docs.microsoft.com/azure/virtual-network/virtual-network-peering-overview?msclkid=b64a7b6ac19e11eca60d5e3e5d0764f5). +Vnet-to-Vnet connectivity over ExpressRoute is not recommended. To acheive this, configure [Virtual Network Peering](../virtual-network/virtual-network-peering-overview.md?msclkid=b64a7b6ac19e11eca60d5e3e5d0764f5). ## ExpressRoute Direct @@ -455,4 +455,4 @@ Vnet-to-Vnet connectivity over ExpressRoute is not recommended. To acheive this, ### Does the ExpressRoute service store customer data? -No. +No. \ No newline at end of file diff --git a/articles/expressroute/expressroute-howto-erdirect.md b/articles/expressroute/expressroute-howto-erdirect.md index 2b46aa165980d..6208d6c970e2e 100644 --- a/articles/expressroute/expressroute-howto-erdirect.md +++ b/articles/expressroute/expressroute-howto-erdirect.md @@ -25,7 +25,7 @@ Before using ExpressRoute Direct, you must first enroll your subscription. To en Select-AzSubscription -Subscription "" ``` -2. Register your subscription for Public Preview using the following command: +2. Register your subscription using the following command: ```azurepowershell-interactive Register-AzProviderFeature -FeatureName AllowExpressRoutePorts -ProviderNamespace Microsoft.Network ``` @@ -102,6 +102,9 @@ Once enrolled, verify that the **Microsoft.Network** resource provider is regist } ] ``` + > [!NOTE] + > If bandwidth is unavailable in the target location, open a [support request in the Azure Portal](https://ms.portal.azure.com/#view/Microsoft_Azure_Support/HelpAndSupportBlade/~/overview) and select the ExpressRoute Direct Support Topic. + > 5. Create an ExpressRoute Direct resource based on the location chosen above ExpressRoute Direct supports both QinQ and Dot1Q encapsulation. If QinQ is selected, each ExpressRoute circuit will be dynamically assigned an S-Tag and will be unique throughout the ExpressRoute Direct resource. Each C-Tag on the circuit must be unique on the circuit, but not across the ExpressRoute Direct. diff --git a/articles/expressroute/expressroute-howto-linkvnet-arm.md b/articles/expressroute/expressroute-howto-linkvnet-arm.md index be23036da8510..08fe3ebdf9e8e 100644 --- a/articles/expressroute/expressroute-howto-linkvnet-arm.md +++ b/articles/expressroute/expressroute-howto-linkvnet-arm.md @@ -200,7 +200,7 @@ Set-AzVirtualNetworkGatewayConnection -VirtualNetworkGatewayConnection $connecti ### FastPath and Private Link for 100Gbps ExpressRoute Direct -With FastPath and Private Link, Private Link traffic sent over ExpressRoute bypassess the ExpressRoute virtual network gateway in the data path. This is supported for connections associated to 100Gb ExpressRoute Direct circuits. To enable this, follow the below guidance: +With FastPath and Private Link, Private Link traffic sent over ExpressRoute bypassess the ExpressRoute virtual network gateway in the data path. This is Generally Available for connections associated to 100Gb ExpressRoute Direct circuits. To enable this, follow the below guidance: 1. Send an email to **ERFastPathPL@microsoft.com**, providing the following information: * Azure Subscription ID * Virtual Network (Vnet) Resource ID diff --git a/articles/expressroute/expressroute-locations-providers.md b/articles/expressroute/expressroute-locations-providers.md index 1758a948e9c48..08558ba8c0bd1 100644 --- a/articles/expressroute/expressroute-locations-providers.md +++ b/articles/expressroute/expressroute-locations-providers.md @@ -6,7 +6,7 @@ author: duongau ms.service: expressroute ms.topic: conceptual -ms.date: 01/24/2022 +ms.date: 05/24/2022 ms.author: duau ms.custom: references_regions @@ -68,6 +68,7 @@ The following table shows connectivity locations and the service providers for e | **Copenhagen** | [Interxion CPH1](https://www.interxion.com/Locations/copenhagen/) | 1 | n/a | Supported | Interxion | | **Dallas** | [Equinix DA3](https://www.equinix.com/locations/americas-colocation/united-states-colocation/dallas-data-centers/da3/) | 1 | n/a | Supported | Aryaka Networks, AT&T NetBond, Cologix, Cox Business Cloud Port, Equinix, Intercloud, Internet2, Level 3 Communications, Megaport, Neutrona Networks, Orange, PacketFabric, Telmex Uninet, Telia Carrier, Transtelco, Verizon, Zayo| | **Denver** | [CoreSite DE1](https://www.coresite.com/data-centers/locations/denver/de1) | 1 | West Central US | Supported | CoreSite, Megaport, PacketFabric, Zayo | +| **Doha2** | [Ooredoo](https://www.ooredoo.qa/portal/OoredooQatar/b2b-data-centre) | 3 | Qatar Central | Supported | | | **Dubai** | [PCCS](https://www.pacificcontrols.net/cloudservices/index.html) | 3 | UAE North | n/a | Etisalat UAE | | **Dubai2** | [du datamena](http://datamena.com/solutions/data-centre) | 3 | UAE North | n/a | DE-CIX, du datamena, Equinix, GBI, Megaport, Orange, Orixcom | | **Dublin** | [Equinix DB3](https://www.equinix.com/locations/europe-colocation/ireland-colocation/dublin-data-centers/db3/) | 1 | North Europe | Supported | CenturyLink Cloud Connect, Colt, eir, Equinix, GEANT, euNetworks, Interxion, Megaport, Zayo| @@ -109,7 +110,7 @@ The following table shows connectivity locations and the service providers for e | **Rio de Janeiro** | [Equinix-RJ2](https://www.equinix.com/locations/americas-colocation/brazil-colocation/rio-de-janeiro-data-centers/rj2/) | 3 | Brazil Southeast | Supported | Equinix | | **San Antonio** | [CyrusOne SA1](https://cyrusone.com/locations/texas/san-antonio-texas/) | 1 | South Central US | Supported | CenturyLink Cloud Connect, Megaport, Zayo | | **Sao Paulo** | [Equinix SP2](https://www.equinix.com/locations/americas-colocation/brazil-colocation/sao-paulo-data-centers/sp2/) | 3 | Brazil South | Supported | Aryaka Networks, Ascenty Data Centers, British Telecom, Equinix, InterCloud, Level 3 Communications, Neutrona Networks, Orange, Tata Communications, Telefonica, UOLDIVEO | -| **Sao Paulo2** | [TIVIT TSM](https://www.tivit.com/en/tivit/) | 3 | Brazil South | Supported | Ascenty Data Centers | +| **Sao Paulo2** | [TIVIT TSM](https://www.tivit.com/en/tivit/) | 3 | Brazil South | Supported | Ascenty Data Centers, Tivit | | **Seattle** | [Equinix SE2](https://www.equinix.com/locations/americas-colocation/united-states-colocation/seattle-data-centers/se2/) | 1 | West US 2 | Supported | Aryaka Networks, CenturyLink Cloud Connect, Equinix, Level 3 Communications, Megaport, Telus, Zayo | | **Seoul** | [KINX Gasan IDC](https://www.kinx.net/?lang=en) | 2 | Korea Central | Supported | KINX, KT, LG CNS, LGUplus, Equinix, Sejong Telecom, SK Telecom | | **Seoul2** | [KT IDC](https://www.kt-idc.com/eng/introduce/sub1_4_10.jsp#tab) | 2 | Korea Central | n/a | KT | @@ -144,7 +145,7 @@ Azure national clouds are isolated from each other and from global commercial Az | **Chicago** | [Equinix CH1](https://www.equinix.com/locations/americas-colocation/united-states-colocation/chicago-data-centers/ch1/) | n/a | Supported | AT&T NetBond, British Telecom, Equinix, Level 3 Communications, Verizon | | **Dallas** | [Equinix DA3](https://www.equinix.com/locations/americas-colocation/united-states-colocation/dallas-data-centers/da3/) | n/a | Supported | Equinix, Internet2, Megaport, Verizon | | **New York** | [Equinix NY5](https://www.equinix.com/locations/americas-colocation/united-states-colocation/new-york-data-centers/ny5/) | n/a | Supported | Equinix, CenturyLink Cloud Connect, Verizon | -| **Phoenix** | [CyrusOne Chandler](https://cyrusone.com/data-center-locations/arizona/phoenix-data-center/) | US Gov Arizona | Supported | AT&T NetBond, CenturyLink Cloud Connect, Megaport | +| **Phoenix** | [CyrusOne Chandler](https://cyrusone.com/locations/arizona/phoenix-arizona-chandler/) | US Gov Arizona | Supported | AT&T NetBond, CenturyLink Cloud Connect, Megaport | | **San Antonio** | [CyrusOne SA2](https://cyrusone.com/locations/texas/san-antonio-texas-ii/) | US Gov Texas | Supported | CenturyLink Cloud Connect, Megaport | | **Silicon Valley** | [Equinix SV4](https://www.equinix.com/locations/americas-colocation/united-states-colocation/silicon-valley-data-centers/sv4/) | n/a | Supported | AT&T, Equinix, Level 3 Communications, Verizon | | **Seattle** | [Equinix SE2](https://www.equinix.com/locations/americas-colocation/united-states-colocation/seattle-data-centers/se2/) | n/a | Supported | Equinix, Megaport | diff --git a/articles/expressroute/expressroute-locations.md b/articles/expressroute/expressroute-locations.md index e40c607845221..7515d382e9e97 100644 --- a/articles/expressroute/expressroute-locations.md +++ b/articles/expressroute/expressroute-locations.md @@ -6,7 +6,7 @@ author: duongau ms.service: expressroute ms.topic: conceptual ms.workload: infrastructure-services -ms.date: 01/31/2022 +ms.date: 05/24/2022 ms.author: duau ms.custom: references_regions @@ -77,7 +77,7 @@ The following table shows locations by service provider. If you want to view ava | **[Deutsche Telekom AG](https://www.t-systems.com/de/en/cloud-and-infrastructure/manage-it-efficiently/managed-azure/cloudconnect-for-azure)** | Supported |Supported | Frankfurt2 | | **du datamena** |Supported |Supported | Dubai2 | | **[eir](https://www.eirevo.ie/cloud-services/cloud-connectivity)** |Supported |Supported | Dublin| -| **[Epsilon Global Communications](https://www.epsilontel.com/solutions/direct-cloud-connect)** |Supported |Supported | Singapore, Singapore2 | +| **[Epsilon Global Communications](https://epsilontel.com/solutions/cloud-connect/)** |Supported |Supported | Singapore, Singapore2 | | **[Equinix](https://www.equinix.com/partners/microsoft-azure/)** |Supported |Supported | Amsterdam, Amsterdam2, Atlanta, Berlin, Bogota, Canberra2, Chicago, Dallas, Dubai2, Dublin, Frankfurt, Frankfurt2, Geneva, Hong Kong SAR, London, London2, Los Angeles*, Los Angeles2, Melbourne, Miami, Milan, New York, Osaka, Paris, Quebec City, Rio de Janeiro, Sao Paulo, Seattle, Seoul, Silicon Valley, Singapore, Singapore2, Stockholm, Sydney, Tokyo, Toronto, Washington DC, Zurich

                  **New ExpressRoute circuits are no longer supported with Equinix in Los Angeles. Please create new circuits in Los Angeles2.* | | **Etisalat UAE** |Supported |Supported | Dubai | | **[euNetworks](https://eunetworks.com/services/solutions/cloud-connect/microsoft-azure-expressroute/)** |Supported |Supported | Amsterdam, Amsterdam2, Dublin, Frankfurt, London | @@ -159,6 +159,7 @@ The following table shows locations by service provider. If you want to view ava | **[Transtelco](https://transtelco.net/enterprise-services/)** |Supported |Supported | Dallas, Queretaro(Mexico)| | **[T-Mobile/Sprint](https://www.t-mobile.com/business/solutions/networking/cloud-networking )** |Supported |Supported | Chicago, Silicon Valley, Washington DC | | **[T-Systems](https://geschaeftskunden.telekom.de/vernetzung-digitalisierung/produkt/intraselect)** |Supported |Supported | Frankfurt | +| **[Tivit](https://www.tivit.com/cloud-solutions/public-cloud/public-cloud-azure/)** |Supported |Supported | Sao Paulo2 | | **[UOLDIVEO](https://www.uoldiveo.com.br/)** |Supported |Supported | Sao Paulo | | **[UIH](https://www.uih.co.th/en/network-solutions/global-network/cloud-direct-for-microsoft-azure-expressroute)** | Supported | Supported | Bangkok | | **[Verizon](https://enterprise.verizon.com/products/network/application-enablement/secure-cloud-interconnect/)** |Supported |Supported | Amsterdam, Chicago, Dallas, Hong Kong SAR, London, Mumbai, Silicon Valley, Singapore, Sydney, Tokyo, Toronto, Washington DC | @@ -318,7 +319,7 @@ If you are remote and do not have fiber connectivity or you want to explore othe | **[Stream Data Centers]( https://www.streamdatacenters.com/products-services/network-cloud/ )** | Megaport | | **[RagingWire Data Centers](https://www.ragingwire.com/wholesale/wholesale-data-centers-worldwide-nexcenters)** | IX Reach, Megaport, PacketFabric | | **[T5 Datacenters](https://t5datacenters.com/)** | IX Reach | -| **[vXchnge](https://www.vxchnge.com)** | IX Reach, Megaport | +| **vXchnge** | IX Reach, Megaport | ## Connectivity through National Research and Education Networks (NREN) diff --git a/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png b/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png index 3263c550229b7..886c5a945f16e 100644 Binary files a/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png and b/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png differ diff --git a/articles/firewall-manager/manage-web-application-firewall-policies.md b/articles/firewall-manager/manage-web-application-firewall-policies.md new file mode 100644 index 0000000000000..bb5e5dfa9de1c --- /dev/null +++ b/articles/firewall-manager/manage-web-application-firewall-policies.md @@ -0,0 +1,44 @@ +--- +title: Manage Azure Web Application Firewall policies (preview) +description: Learn how to use Azure Firewall Manager to manage Azure Web Application Firewall policies +author: vhorne +ms.author: victorh +ms.service: firewall-manager +ms.topic: how-to +ms.date: 06/02/2022 +--- + +# Manage Web Application Firewall policies (preview) + +You can centrally create and associate Web Application Firewall (WAF) policies for your application delivery platforms, including Azure Front Door and Azure Application Gateway. + +> [!IMPORTANT] +> Managing Web Application Firewall policies using Azure Firewall Manager is currently in PREVIEW. +> See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. + +## Prerequisites + +- A deployed [Azure Front Door](../frontdoor/quickstart-create-front-door.md) or [Azure Application Gateway](../application-gateway/quick-create-portal.md) + +## Associate a WAF policy + +1. Sign in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). +2. In the Azure portal search bar, type **Firewall Manager** and press **Enter**. +3. On the Azure Firewall Manager page, select **Application Delivery Platforms**. + :::image type="content" source="media/manage-web-application-firewall-policies/application-delivery-platforms.png" alt-text="Screenshot of Firewall Manager application delivery platforms."::: +1. Select your application delivery platform (Front Door or Application Gateway) to associate a WAF policy. In this example, we'll associate a WAF policy to a Front Door. +1. Select **Manage Security** and then select **Associate WAF policy**. + :::image type="content" source="media/manage-web-application-firewall-policies/associate-waf-policy.png" alt-text="Screenshot of Firewall Manager associate WAF policy."::: +1. Select either an existing policy or **Create New**. +1. Select the domain(s) that you want the WAF policy to protect with your Azure Front Door profile. +1. Select **Associate**. + +## View and manage WAF policies + +1. On the Azure Firewall Manager page, under **Security**, select **Web application firewall policies** to view all your policies. +1. Select **Add** to create a new WAF policy or import settings from an existing WAF policy. + :::image type="content" source="media/manage-web-application-firewall-policies/web-application-firewall-policies.png" alt-text="Screenshot of Firewall Manager Web Application Firewall policies."::: + +## Next steps + +- [Configure WAF policies using Azure Firewall Manager (preview)](../web-application-firewall/shared/manage-policies.md) diff --git a/articles/firewall-manager/media/manage-web-application-firewall-policies/application-delivery-platforms.png b/articles/firewall-manager/media/manage-web-application-firewall-policies/application-delivery-platforms.png new file mode 100644 index 0000000000000..79e9716b02f74 Binary files /dev/null and b/articles/firewall-manager/media/manage-web-application-firewall-policies/application-delivery-platforms.png differ diff --git a/articles/firewall-manager/media/manage-web-application-firewall-policies/associate-waf-policy.png b/articles/firewall-manager/media/manage-web-application-firewall-policies/associate-waf-policy.png new file mode 100644 index 0000000000000..0713b49e30cc1 Binary files /dev/null and b/articles/firewall-manager/media/manage-web-application-firewall-policies/associate-waf-policy.png differ diff --git a/articles/firewall-manager/media/manage-web-application-firewall-policies/web-application-firewall-policies.png b/articles/firewall-manager/media/manage-web-application-firewall-policies/web-application-firewall-policies.png new file mode 100644 index 0000000000000..c0189ffd0f443 Binary files /dev/null and b/articles/firewall-manager/media/manage-web-application-firewall-policies/web-application-firewall-policies.png differ diff --git a/articles/firewall-manager/secured-virtual-hub.md b/articles/firewall-manager/secured-virtual-hub.md index 0059fe3179765..e53b243efbbf1 100644 --- a/articles/firewall-manager/secured-virtual-hub.md +++ b/articles/firewall-manager/secured-virtual-hub.md @@ -5,7 +5,7 @@ author: vhorne ms.service: firewall-manager services: firewall-manager ms.topic: conceptual -ms.date: 10/12/2020 +ms.date: 06/09/2022 ms.author: victorh --- @@ -23,13 +23,13 @@ You can choose the required security providers to protect and govern your networ Using Firewall Manager in the Azure portal, you can either create a new secured virtual hub, or convert an existing virtual hub that you previously created using Azure Virtual WAN. -## Gated public preview +## Public preview features -The below features are currently in gated public preview. +The following features are in public preview: | Feature | Description | | ---------- | --------- | -| Routing Intent and Policies enabling Inter-hub security | This feature allows customers to configure internet-bound, private or inter-hub traffic flow through the Azure Firewall. Please review [Routing Intent and Policies](../virtual-wan/how-to-routing-policies.md) to learn more. | +| Routing Intent and Policies enabling Inter-hub security | This feature allows you to configure internet-bound, private or inter-hub traffic flow through Azure Firewall. For more information, see [Routing Intent and Policies](../virtual-wan/how-to-routing-policies.md). | ## Next steps diff --git a/articles/firewall-manager/threat-intelligence-settings.md b/articles/firewall-manager/threat-intelligence-settings.md index 15b328241380b..e559366ac04f8 100644 --- a/articles/firewall-manager/threat-intelligence-settings.md +++ b/articles/firewall-manager/threat-intelligence-settings.md @@ -5,7 +5,7 @@ services: firewall-manager author: vhorne ms.service: firewall-manager ms.topic: article -ms.date: 06/30/2020 +ms.date: 06/09/2022 ms.author: victorh --- @@ -23,9 +23,9 @@ You can configure threat intelligence in one of the three modes that are describ |Mode |Description | |---------|---------| -|`Off` | The threat intelligence feature is not enabled for your firewall. | -|`Alert only` | You will receive high-confidence alerts for traffic going through your firewall to or from known malicious IP addresses and domains. | -|`Alert and deny` | Traffic is blocked and you will receive high-confidence alerts when traffic is detected attempting to go through your firewall to or from known malicious IP addresses and domains. | +|`Off` | The threat intelligence feature isn't enabled for your firewall. | +|`Alert only` | You'll receive high-confidence alerts for traffic going through your firewall to or from known malicious IP addresses and domains. | +|`Alert and deny` | Traffic is blocked and you'll receive high-confidence alerts when traffic is detected attempting to go through your firewall to or from known malicious IP addresses and domains. | > [!NOTE] > Threat intelligence mode is inherited from parent policies to child policies. A child policy must be configured with the same or a stricter mode than the parent policy. @@ -59,7 +59,18 @@ The following log excerpt shows a triggered rule for outbound traffic to a malic ## Testing -- **Outbound testing** - Outbound traffic alerts should be a rare occurrence, as it means that your environment has been compromised. To help test outbound alerts are working, a test FQDN has been created that triggers an alert. Use **testmaliciousdomain.eastus.cloudapp.azure.com** for your outbound tests. +- **Outbound testing** - Outbound traffic alerts should be a rare occurrence, as it means that your environment has been compromised. To help test outbound alerts are working, the following FQDNs have been created to trigger an alert. Use the following FQDNs for your outbound tests: +

                  + + - `documentos-001.brazilsouth.cloudapp.azure.com` + - `itaucardiupp.centralus.cloudapp.azure.com` + - `azure-c.online` + - `www.azureadsec.com` + - `azurein360.co` + + > [!NOTE] + > These FQDNs are subject to change, so they are not guaranteed to always work. Any changes will be documented here. + - **Inbound testing** - You can expect to see alerts on incoming traffic if DNAT rules are configured on the firewall. This is true even if only specific sources are allowed on the DNAT rule and traffic is otherwise denied. Azure Firewall doesn't alert on all known port scanners; only on scanners that are known to also engage in malicious activity. diff --git a/articles/firewall-manager/toc.yml b/articles/firewall-manager/toc.yml index b4e460e6de8dd..d7fd8627d2f70 100644 --- a/articles/firewall-manager/toc.yml +++ b/articles/firewall-manager/toc.yml @@ -66,6 +66,8 @@ href: rule-hierarchy.md - name: Configure DDoS Protection Plan href: configure-ddos.md + - name: Manage WAF policies + href: manage-web-application-firewall-policies.md - name: Reference items: - name: Azure CLI diff --git a/articles/firewall/features.md b/articles/firewall/features.md index d2685afe7ac7a..3a3f0845914d6 100644 --- a/articles/firewall/features.md +++ b/articles/firewall/features.md @@ -5,7 +5,7 @@ services: firewall author: vhorne ms.service: firewall ms.topic: conceptual -ms.date: 07/30/2021 +ms.date: 06/06/2022 ms.author: victorh --- @@ -47,7 +47,7 @@ Azure Firewall can be configured during deployment to span multiple Availability You can also associate Azure Firewall to a specific zone just for proximity reasons, using the service standard 99.95% SLA. -There's no additional cost for a firewall deployed in an Availability Zone. However, there are added costs for inbound and outbound data transfers associated with Availability Zones. For more information, see [Bandwidth pricing details](https://azure.microsoft.com/pricing/details/bandwidth/). +There's no additional cost for a firewall deployed in more than one Availability Zone. However, there are added costs for inbound and outbound data transfers associated with Availability Zones. For more information, see [Bandwidth pricing details](https://azure.microsoft.com/pricing/details/bandwidth/). Azure Firewall Availability Zones are available in regions that support Availability Zones. For more information, see [Regions that support Availability Zones in Azure](../availability-zones/az-region.md) diff --git a/articles/firewall/firewall-preview.md b/articles/firewall/firewall-preview.md index 8d66e47b0e092..4934c9811b292 100644 --- a/articles/firewall/firewall-preview.md +++ b/articles/firewall/firewall-preview.md @@ -5,7 +5,7 @@ services: firewall author: vhorne ms.service: firewall ms.topic: conceptual -ms.date: 03/04/2022 +ms.date: 05/25/2022 ms.author: victorh --- @@ -61,7 +61,7 @@ Unregister-AzProviderFeature -FeatureName AFWEnableNetworkRuleNameLogging -Provi As more applications move to the cloud, the performance of the network elements can become a bottleneck. As the central piece of any network design, the firewall needs to support all the workloads. The Azure Firewall Premium performance boost feature allows more scalability for these deployments. -This feature significantly increases the throughput of Azure Firewall Premium. For more details, see [Azure Firewall performance](firewall-performance.md). +This feature significantly increases the throughput of Azure Firewall Premium. For more information, see [Azure Firewall performance](firewall-performance.md). To enable the Azure Firewall Premium Performance boost feature, run the following commands in Azure PowerShell. Stop and start the firewall for the feature to take effect immediately. Otherwise, the firewall/s is updated with the feature within several days. @@ -82,6 +82,53 @@ Run the following Azure PowerShell command to turn off this feature: Unregister-AzProviderFeature -FeatureName AFWEnableAccelnet -ProviderNamespace Microsoft.Network ``` +### IDPS Private IP ranges (preview) + +In Azure Firewall Premium IDPS, private IP address ranges are used to identify if traffic is inbound, outbound, or internal (East-West). Each signature is applied on specific traffic direction, as indicated in the signature rules table. By default, only ranges defined by IANA RFC 1918 are considered private IP addresses. So traffic sent from a private IP address range to a private IP address range is considered internal. To modify your private IP addresses, you can now easily edit, remove, or add ranges as needed. + +:::image type="content" source="media/firewall-preview/idps-private-ip.png" alt-text="Screenshot showing I D P S private IP address ranges."::: + +### Structured firewall logs (preview) + +Today, the following diagnostic log categories are available for Azure Firewall: +- Application rule log +- Network rule log +- DNS proxy log + +These log categories use [Azure diagnostics mode](../azure-monitor/essentials/resource-logs.md#azure-diagnostics-mode). In this mode, all data from any diagnostic setting will be collected in the [AzureDiagnostics](/azure/azure-monitor/reference/tables/azurediagnostics) table. + +With this new feature, you'll be able to choose to use [Resource Specific Tables](../azure-monitor/essentials/resource-logs.md#resource-specific) instead of the existing [AzureDiagnostics](/azure/azure-monitor/reference/tables/azurediagnostics) table. In case both sets of logs are required, at least two diagnostic settings need to be created per firewall. + +In **Resource specific** mode, individual tables in the selected workspace are created for each category selected in the diagnostic setting. This method is recommended since it: +- makes it much easier to work with the data in log queries +- makes it easier to discover schemas and their structure +- improves performance across both ingestion latency and query times +- allows you to grant Azure RBAC rights on a specific table + +New resource specific tables are now available in Diagnostic setting that allows you to utilize the following newly added categories: + +- [Network rule log](/azure/azure-monitor/reference/tables/azfwnetworkrule) - Contains all Network Rule log data. Each match between data plane and network rule creates a log entry with the data plane packet and the matched rule's attributes. +- [NAT rule log](/azure/azure-monitor/reference/tables/azfwnatrule) - Contains all DNAT (Destination Network Address Translation) events log data. Each match between data plane and DNAT rule creates a log entry with the data plane packet and the matched rule's attributes. +- [Application rule log](/azure/azure-monitor/reference/tables/azfwapplicationrule) - Contains all Application rule log data. Each match between data plane and Application rule creates a log entry with the data plane packet and the matched rule's attributes. +- [Threat Intelligence log](/azure/azure-monitor/reference/tables/azfwthreatintel) - Contains all Threat Intelligence events. +- [IDPS log](/azure/azure-monitor/reference/tables/azfwidpssignature) - Contains all data plane packets that were matched with one or more IDPS signatures. +- [DNS proxy log](/azure/azure-monitor/reference/tables/azfwdnsquery) - Contains all DNS Proxy events log data. +- [Internal FQDN resolve failure log](/azure/azure-monitor/reference/tables/azfwinternalfqdnresolutionfailure) - Contains all internal Firewall FQDN resolution requests that resulted in failure. +- [Application rule aggregation log](/azure/azure-monitor/reference/tables/azfwapplicationruleaggregation) - Contains aggregated Application rule log data for Policy Analytics. +- [Network rule aggregation log](/azure/azure-monitor/reference/tables/azfwnetworkruleaggregation) - Contains aggregated Network rule log data for Policy Analytics. +- [NAT rule aggregation log](/azure/azure-monitor/reference/tables/azfwnatruleaggregation) - Contains aggregated NAT rule log data for Policy Analytics. + +By default, the new resource specific tables are disabled. Open a support ticket to enable the functionality in your environment. + +In addition, when setting up your log analytics workspace, you must select whether you want to work with the AzureDiagnostics table (default) or with Resource Specific Tables. + +Additional KQL log queries were added (as seen in the following screenshot) to query structured firewall logs. + +:::image type="content" source="media/firewall-preview/resource-specific-tables.png" alt-text="Screenshot showing Firewall logs Resource Specific Tables." lightbox="media/firewall-preview/resource-specific-tables-zoom.png"::: + +> [!NOTE] +> Existing Workbooks and any Sentinel integration will be adjusted to support the new structured logs when **Resource Specific** mode is selected. + ## Next steps To learn more about Azure Firewall, see [What is Azure Firewall?](overview.md). \ No newline at end of file diff --git a/articles/firewall/forced-tunneling.md b/articles/firewall/forced-tunneling.md index 0e6622774b518..35e885d2425e8 100644 --- a/articles/firewall/forced-tunneling.md +++ b/articles/firewall/forced-tunneling.md @@ -5,7 +5,7 @@ services: firewall author: vhorne ms.service: firewall ms.topic: article -ms.date: 01/13/2022 +ms.date: 06/02/2022 ms.author: victorh --- @@ -15,6 +15,9 @@ When you configure a new Azure Firewall, you can route all Internet-bound traffi Azure Firewall provides automatic SNAT for all outbound traffic to public IP addresses. Azure Firewall doesn’t SNAT when the destination IP address is a private IP address range per IANA RFC 1918. This logic works perfectly when you egress directly to the Internet. However, with forced tunneling enabled, Internet-bound traffic is SNATed to one of the firewall private IP addresses in the AzureFirewallSubnet. This hides the source address from your on-premises firewall. You can configure Azure Firewall to not SNAT regardless of the destination IP address by adding *0.0.0.0/0* as your private IP address range. With this configuration, Azure Firewall can never egress directly to the Internet. For more information, see [Azure Firewall SNAT private IP address ranges](snat-private-range.md). +> [!IMPORTANT] +> If you deploy a Secured Virtual Hub in forced tunnel mode, advertising the default route over Express Route or VPN Gateway is not currently supported. A fix is being investigated. + ## Forced tunneling configuration You can configure Forced Tunneling during Firewall creation by enabling Forced Tunnel mode as shown below. To support forced tunneling, Service Management traffic is separated from customer traffic. An additional dedicated subnet named **AzureFirewallManagementSubnet** (minimum subnet size /26) is required with its own associated public IP address. This public IP address is for management traffic. It is used exclusively by the Azure platform and can't be used for any other purpose. diff --git a/articles/firewall/index.yml b/articles/firewall/index.yml index 53e7e6bab0dc5..060a21e8cac4c 100644 --- a/articles/firewall/index.yml +++ b/articles/firewall/index.yml @@ -11,7 +11,7 @@ metadata: ms.topic: landing-page author: vhorne ms.author: victorh - ms.date: 05/03/2021 + ms.date: 06/06/2022 ms.custom: e2e-hybrid # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new diff --git a/articles/firewall/media/firewall-preview/idps-private-ip.png b/articles/firewall/media/firewall-preview/idps-private-ip.png new file mode 100644 index 0000000000000..36dae79fb6600 Binary files /dev/null and b/articles/firewall/media/firewall-preview/idps-private-ip.png differ diff --git a/articles/firewall/media/firewall-preview/resource-specific-tables-zoom.png b/articles/firewall/media/firewall-preview/resource-specific-tables-zoom.png new file mode 100644 index 0000000000000..cfdba32c78823 Binary files /dev/null and b/articles/firewall/media/firewall-preview/resource-specific-tables-zoom.png differ diff --git a/articles/firewall/media/firewall-preview/resource-specific-tables.png b/articles/firewall/media/firewall-preview/resource-specific-tables.png new file mode 100644 index 0000000000000..606bef1f2a400 Binary files /dev/null and b/articles/firewall/media/firewall-preview/resource-specific-tables.png differ diff --git a/articles/firewall/overview.md b/articles/firewall/overview.md index 47dd835cb6e04..7c2224a7f339e 100644 --- a/articles/firewall/overview.md +++ b/articles/firewall/overview.md @@ -7,7 +7,7 @@ ms.service: firewall services: firewall ms.topic: overview ms.custom: mvc, contperf-fy21q1 -ms.date: 05/12/2022 +ms.date: 05/26/2022 # Customer intent: As an administrator, I want to evaluate Azure Firewall so I can determine if I want to use it. --- @@ -102,7 +102,6 @@ Azure Firewall Standard has the following known issues: | Error encountered when creating more than 2000 rule collections. | The maximal number of NAT/Application or Network rule collections is 2000 (Resource Manager limit). | This is a current limitation. | |Unable to see Network Rule Name in Azure Firewall Logs|Azure Firewall network rule log data does not show the Rule name for network traffic.|Network rule name logging is in preview. For for information, see [Azure Firewall preview features](firewall-preview.md#network-rule-name-logging-preview).| |XFF header in HTTP/S|XFF headers are overwritten with the original source IP address as seen by the firewall. This is applicable for the following use cases:
                  - HTTP requests
                  - HTTPS requests with TLS termination|A fix is being investigated.| -| Firewall logs (Resource specific tables - Preview) | Resource specific log queries are in preview mode and aren't currently supported. | A fix is being investigated.| |Can't upgrade to Premium with Availability Zones in the Southeast Asia region|You can't currently upgrade to Azure Firewall Premium with Availability Zones in the Southeast Asia region.|Deploy a new Premium firewall in Southeast Asia without Availability Zones, or deploy in a region that supports Availability Zones.| |Can’t deploy Firewall with Availability Zones with a newly created Public IP address|When you deploy a Firewall with Availability Zones, you can’t use a newly created Public IP address.|First create a new zone redundant Public IP address, then assign this previously created IP address during the Firewall deployment. diff --git a/articles/firewall/premium-features.md b/articles/firewall/premium-features.md index d8a8f6f3d9753..832e396fe1243 100644 --- a/articles/firewall/premium-features.md +++ b/articles/firewall/premium-features.md @@ -5,7 +5,7 @@ author: vhorne ms.service: firewall services: firewall ms.topic: conceptual -ms.date: 03/30/2022 +ms.date: 05/25/2022 ms.author: victorh ms.custom: references_regions --- @@ -56,7 +56,7 @@ To learn more about Azure Firewall Premium Intermediate CA certificate requireme A network intrusion detection and prevention system (IDPS) allows you to monitor your network for malicious activity, log information about this activity, report it, and optionally attempt to block it. -Azure Firewall Premium provides signature-based IDPS to allow rapid detection of attacks by looking for specific patterns, such as byte sequences in network traffic, or known malicious instruction sequences used by malware. The IDPS signatures are applicable for both application and network level traffic (Layers 4-7), they're fully managed, and continuously updated. IDPS can be applied to inbound, spoke-to-spoke (East-West), and outbound traffic. Spoke-to-spoke (East-West) includes traffic that goes from/to an on-premises network. +Azure Firewall Premium provides signature-based IDPS to allow rapid detection of attacks by looking for specific patterns, such as byte sequences in network traffic, or known malicious instruction sequences used by malware. The IDPS signatures are applicable for both application and network level traffic (Layers 3-7), they're fully managed, and continuously updated. IDPS can be applied to inbound, spoke-to-spoke (East-West), and outbound traffic. Spoke-to-spoke (East-West) includes traffic that goes from/to an on-premises network. You can configure your IDPS private IP address ranges using the **Private IP ranges** preview feature. For more information, see [Azure Firewall preview features](firewall-preview.md#idps-private-ip-ranges-preview). The Azure Firewall signatures/rulesets include: - An emphasis on fingerprinting actual malware, Command and Control, exploit kits, and in the wild malicious activity missed by traditional prevention methods. diff --git a/articles/firewall/tutorial-firewall-dnat.md b/articles/firewall/tutorial-firewall-dnat.md index ef0fe6dbd1cdc..e33fb6cfac6d1 100644 --- a/articles/firewall/tutorial-firewall-dnat.md +++ b/articles/firewall/tutorial-firewall-dnat.md @@ -5,7 +5,7 @@ services: firewall author: vhorne ms.service: firewall ms.topic: how-to -ms.date: 04/29/2021 +ms.date: 06/06/2022 ms.author: victorh ms.custom: mvc #Customer intent: As an administrator, I want to deploy and configure Azure Firewall DNAT so that I can control inbound Internet access to resources located in a subnet. @@ -36,9 +36,9 @@ If you don't have an Azure subscription, create a [free account](https://azure.m ## Create a resource group 1. Sign in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). -2. On the Azure portal home page, select **Resource groups**, then select **Add**. +2. On the Azure portal home page, select **Resource groups**, then select **Create**. 4. For **Subscription**, select your subscription. -1. For **Resource group name**, type **RG-DNAT-Test**. +1. For **Resource group**, type **RG-DNAT-Test**. 5. For **Region**, select a region. All other resources that you create must be in the same region. 6. Select **Review + create**. 1. Select **Create**. @@ -56,13 +56,13 @@ First, create the VNets and then peer them. 1. From the Azure portal home page, select **All services**. 2. Under **Networking**, select **Virtual networks**. -3. Select **Add**. +3. Select **Create**. 7. For **Resource group**, select **RG-DNAT-Test**. 1. For **Name**, type **VN-Hub**. 1. For **Region**, select the same region that you used before. 1. Select **Next: IP Addresses**. 1. For **IPv4 Address space**, accept the default **10.0.0.0/16**. -1. Under **Subnet name**, select default. +1. Under **Subnet name**, select **default**. 1. Edit the **Subnet name** and type **AzureFirewallSubnet**. The firewall will be in this subnet, and the subnet name **must** be AzureFirewallSubnet. @@ -78,7 +78,7 @@ First, create the VNets and then peer them. 1. From the Azure portal home page, select **All services**. 2. Under **Networking**, select **Virtual networks**. -3. Select **Add**. +3. Select **Create**. 1. For **Resource group**, select **RG-DNAT-Test**. 1. For **Name**, type **VN-Spoke**. 1. For **Region**, select the same region that you used before. @@ -108,7 +108,7 @@ Now peer the two VNets. Create a workload virtual machine, and place it in the **SN-Workload** subnet. 1. From the Azure portal menu, select **Create a resource**. -2. Under **Popular**, select **Windows Server 2016 Datacenter**. +2. Under **Popular**, select **Windows Server 2019 Datacenter**. **Basics** @@ -156,6 +156,7 @@ After deployment finishes, note the private IP address for the virtual machine. |Resource group |Select **RG-DNAT-Test** | |Name |**FW-DNAT-test**| |Region |Select the same location that you used previously| + |Firewall tier|**Standard**| |Firewall management|**Use Firewall rules (classic) to manage this firewall**| |Choose a virtual network |**Use existing**: VN-Hub| |Public IP address |**Add new**, Name: **fw-pip**.| @@ -173,7 +174,7 @@ For the **SN-Workload** subnet, you configure the outbound default route to go t 1. From the Azure portal home page, select **All services**. 2. Under **Networking**, select **Route tables**. -3. Select **Add**. +3. Select **Create**. 5. For **Subscription**, select your subscription. 1. For **Resource group**, select **RG-DNAT-Test**. 1. For **Region**, select the same region that you used previously. @@ -187,12 +188,13 @@ For the **SN-Workload** subnet, you configure the outbound default route to go t 1. Select **OK**. 1. Select **Routes**, and then select **Add**. 1. For **Route name**, type **FW-DG**. -1. For **Address prefix**, type **0.0.0.0/0**. +1. For **Address prefix destination**, select **IP Addresses**. +1. For **Destination IP addresses/CIDR ranges**, type **0.0.0.0/0**. 1. For **Next hop type**, select **Virtual appliance**. Azure Firewall is actually a managed service, but virtual appliance works in this situation. 18. For **Next hop address**, type the private IP address for the firewall that you noted previously. -19. Select **OK**. +19. Select **Add**. ## Configure a NAT rule diff --git a/articles/firewall/tutorial-hybrid-portal-policy.md b/articles/firewall/tutorial-hybrid-portal-policy.md index 6b491e16aaaa9..84dec9f3abfaa 100644 --- a/articles/firewall/tutorial-hybrid-portal-policy.md +++ b/articles/firewall/tutorial-hybrid-portal-policy.md @@ -5,7 +5,7 @@ services: firewall author: vhorne ms.service: firewall ms.topic: tutorial -ms.date: 08/26/2021 +ms.date: 06/08/2022 ms.author: victorh #Customer intent: As an administrator, I want to control network access from an on-premises network to an Azure virtual network. --- @@ -68,7 +68,7 @@ If you don't have an Azure subscription, create a [free account](https://azure.m First, create the resource group to contain the resources for this tutorial: 1. Sign in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). -2. On the Azure portal home page, select **Resource groups** > **Add**. +2. On the Azure portal home page, select **Resource groups** > **Create**. 3. For **Subscription**, select your subscription. 1. For **Resource group name**, type **FW-Hybrid-Test**. 2. For **Region**, select **(US) East US**. All resources that you create later must be in the same location. @@ -98,17 +98,18 @@ Now, create the VNet: 1. From the Azure portal home page, select **Create a resource**. 2. In **Networking**, select **Virtual network**. -7. For **Resource group**, select **FW-Hybrid-Test**. +1. Select **Create**. +1. For **Resource group**, select **FW-Hybrid-Test**. 1. For **Name**, type **VNet-Spoke**. -2. For **Region**, select **(US) East US**. -3. Select **Next: IP Addresses**. -4. For **IPv4 address space**, delete the default address and type **10.6.0.0/16**. -6. Under **Subnet name**, select **Add subnet**. -7. For **Subnet name** type **SN-Workload**. -8. For **Subnet address range**, type **10.6.0.0/24**. -9. Select **Add**. -10. Select **Review + create**. -11. Select **Create**. +1. For **Region**, select **(US) East US**. +1. Select **Next: IP Addresses**. +1. For **IPv4 address space**, delete the default address and type **10.6.0.0/16**. +1. Under **Subnet name**, select **Add subnet**. +1. For **Subnet name** type **SN-Workload**. +1. For **Subnet address range**, type **10.6.0.0/24**. +1. Select **Add**. +1. Select **Review + create**. +1. Select **Create**. ## Create the on-premises virtual network @@ -132,14 +133,14 @@ Now create a second subnet for the gateway. 2. Select **+Subnet**. 3. For **Name**, type **GatewaySubnet**. 4. For **Subnet address range** type **192.168.2.0/24**. -5. Select **OK**. +5. Select **Save**. ## Configure and deploy the firewall Now deploy the firewall into the firewall hub virtual network. 1. From the Azure portal home page, select **Create a resource**. -2. In the left column, select **Networking**, and search for and then select **Firewall**. +2. In the left column, select **Networking**, and search for and then select **Firewall**, and then select **Create**. 4. On the **Create a Firewall** page, use the following table to configure the firewall: |Setting |Value | @@ -148,10 +149,11 @@ Now deploy the firewall into the firewall hub virtual network. |Resource group |**FW-Hybrid-Test** | |Name |**AzFW01**| |Region |**East US**| + |Firewall tier|**Standard**| |Firewall management|**Use a Firewall Policy to manage this firewall**| |Firewall policy|Add new:
                  **hybrid-test-pol**
                  **East US** |Choose a virtual network |Use existing:
                  **VNet-hub**| - |Public IP address |Add new:
                  **fw-pip**. | + |Public IP address |Add new:
                  **fw-pip** | 5. Select **Review + create**. @@ -316,10 +318,11 @@ Next, create a couple routes: 12. Select **Routes** in the left column. 13. Select **Add**. 14. For the route name, type **ToSpoke**. -15. For the address prefix, type **10.6.0.0/16**. -16. For next hop type, select **Virtual appliance**. -17. For next hop address, type the firewall's private IP address that you noted earlier. -18. Select **OK**. +1. For the **Address prefix destination**, select **IP Addresses**. +1. For the **Destination IP addresses/CIDR ranges**, type **10.6.0.0/16**. +1. For next hop type, select **Virtual appliance**. +1. For next hop address, type the firewall's private IP address that you noted earlier. +1. Select **Add**. Now associate the route to the subnet. @@ -345,10 +348,11 @@ Now create the default route from the spoke subnet. 8. Select **Routes** in the left column. 9. Select **Add**. 10. For the route name, type **ToHub**. -11. For the address prefix, type **0.0.0.0/0**. -12. For next hop type, select **Virtual appliance**. -13. For next hop address, type the firewall's private IP address that you noted earlier. -14. Select **OK**. +1. For the **Address prefix destination**, select **IP Addresses**. +1. For the **Destination IP addresses/CIDR ranges**, type **0.0.0.0/0**. +1. For next hop type, select **Virtual appliance**. +1. For next hop address, type the firewall's private IP address that you noted earlier. +1. Select **Add**. Now associate the route to the subnet. @@ -367,14 +371,14 @@ Now create the spoke workload and on-premises virtual machines, and place them i Create a virtual machine in the spoke virtual network, running IIS, with no public IP address. 1. From the Azure portal home page, select **Create a resource**. -2. Under **Popular**, select **Windows Server 2016 Datacenter**. +2. Under **Popular Marketplace products**, select **Windows Server 2019 Datacenter**. 3. Enter these values for the virtual machine: - - **Resource group** - Select **FW-Hybrid-Test**. - - **Virtual machine name**: *VM-Spoke-01*. - - **Region** - Same region that you're used previously. - - **User name**: \. + - **Resource group** - Select **FW-Hybrid-Test** + - **Virtual machine name**: *VM-Spoke-01* + - **Region** - Same region that you're used previously + - **User name**: \ - **Password**: \ -4. For **Public inbound ports**, select **Allow selected ports**, and then select **HTTP (80)**, and **RDP (3389)** +4. For **Public inbound ports**, select **Allow selected ports**, and then select **HTTP (80)**, and **RDP (3389)**. 4. Select **Next:Disks**. 5. Accept the defaults and select **Next: Networking**. 6. Select **VNet-Spoke** for the virtual network and the subnet is **SN-Workload**. @@ -385,6 +389,8 @@ Create a virtual machine in the spoke virtual network, running IIS, with no publ ### Install IIS +After the virtual machine is created, install IIS. + 1. From the Azure portal, open the Cloud Shell and make sure that it's set to **PowerShell**. 2. Run the following command to install IIS on the virtual machine and change the location if necessary: @@ -405,7 +411,7 @@ Create a virtual machine in the spoke virtual network, running IIS, with no publ This is a virtual machine that you use to connect using Remote Desktop to the public IP address. From there, you then connect to the on-premises server through the firewall. 1. From the Azure portal home page, select **Create a resource**. -2. Under **Popular**, select **Windows Server 2016 Datacenter**. +2. Under **Popular Marketplace products**, select **Windows Server 2019 Datacenter**. 3. Enter these values for the virtual machine: - **Resource group** - Select existing, and then select **FW-Hybrid-Test**. - **Virtual machine name** - *VM-Onprem*. @@ -427,9 +433,7 @@ This is a virtual machine that you use to connect using Remote Desktop to the pu 1. First, note the private IP address for **VM-spoke-01** virtual machine. 2. From the Azure portal, connect to the **VM-Onprem** virtual machine. - 3. Open a web browser on **VM-Onprem**, and browse to http://\. You should see the **VM-spoke-01** web page: @@ -441,7 +445,6 @@ This is a virtual machine that you use to connect using Remote Desktop to the pu So now you've verified that the firewall rules are working: - - You can browse web server on the spoke virtual network. - You can connect to the server on the spoke virtual network using RDP. diff --git a/articles/frontdoor/TOC.yml b/articles/frontdoor/TOC.yml index c0486f710786a..5691a01170734 100644 --- a/articles/frontdoor/TOC.yml +++ b/articles/frontdoor/TOC.yml @@ -93,6 +93,8 @@ href: how-to-configure-origin.md - name: Add a custom domain href: standard-premium/how-to-add-custom-domain.md + - name: Add a root or apex domain + href: front-door-how-to-onboard-apex-domain.md?pivots=front-door-standard-premium - name: Configure HTTPS on a custom domain href: standard-premium/how-to-configure-https-custom-domain.md - name: Rules Engine @@ -122,7 +124,7 @@ - name: Configure HTTPS on a custom domain href: front-door-custom-domain-https.md - name: Add a root or apex domain - href: front-door-how-to-onboard-apex-domain.md + href: front-door-how-to-onboard-apex-domain.md?pivots=front-door-classic - name: Set up a Rules Engine href: front-door-tutorial-rules-engine.md - name: Configure HTTP to HTTPS redirect @@ -177,6 +179,8 @@ href: edge-locations-by-abbreviation.md - name: Azure CLI href: /cli/azure/afd + - name: Azure PowerShell + href: /powershell/module/az.cdn - name: REST API href: /rest/api/frontdoor - name: Python SDK diff --git a/articles/frontdoor/edge-locations-by-region.md b/articles/frontdoor/edge-locations-by-region.md index b7fcb30ea514c..bf8d2f29b50cd 100644 --- a/articles/frontdoor/edge-locations-by-region.md +++ b/articles/frontdoor/edge-locations-by-region.md @@ -6,14 +6,14 @@ author: duongau ms.service: frontdoor ms.topic: article ms.workload: infrastructure-services -ms.date: 05/25/2021 +ms.date: 06/01/2022 ms.author: duau ms.custom: references_regions --- # Azure Front Door edge locations by metro -This article lists current metros containing edge locations, sorted by region, for Azure Front Door. Each metro may contain more than one edge locations. Currently, Azure Front Door has 118 edge locations across 100 metro cities. +This article lists current metros containing edge locations, sorted by region, for Azure Front Door. Each metro may contain more than one edge locations. Currently, Azure Front Door has 118 edge locations across 100 metro cities. Azure Front Door also has 4 edge locations across 4 Azure US Government cloud regions. ## Microsoft edge locations diff --git a/articles/frontdoor/end-to-end-tls.md b/articles/frontdoor/end-to-end-tls.md index 2a790d171b18b..6800a06c5aa83 100644 --- a/articles/frontdoor/end-to-end-tls.md +++ b/articles/frontdoor/end-to-end-tls.md @@ -14,7 +14,7 @@ ms.author: duau Transport Layer Security (TLS), previously known as Secure Sockets Layer (SSL), is the standard security technology for establishing an encrypted link between a web server and a browser. This link ensures that all data passed between the web server and the web browser remain private and encrypted. -To meet your security or compliance requirements, Azure Front Door (AFD) supports end-to-end TLS encryption. Front Door TLS/SSL offload terminates the TLS connection, decrypts the traffic at the Azure Front Door, and re-encrypts the traffic before forwarding it to the backend. Since connections to the backend happen over the public IP. It's highly recommended you configure HTTPS as the forwarding protocol on your Azure Front Door to enforce end-to-end TLS encryption from the client to the backend. +To meet your security or compliance requirements, Azure Front Door (AFD) supports end-to-end TLS encryption. Front Door TLS/SSL offload terminates the TLS connection, decrypts the traffic at the Azure Front Door, and re-encrypts the traffic before forwarding it to the backend. Since connections to the backend happen over the public IP, it is highly recommended you configure HTTPS as the forwarding protocol on your Azure Front Door to enforce end-to-end TLS encryption from the client to the backend. TLS/SSL offload is also supported if you deploy a private backend with AFD Premium using the [PrivateLink](private-link.md) feature. ## End-to-end TLS encryption diff --git a/articles/frontdoor/front-door-custom-domain-https.md b/articles/frontdoor/front-door-custom-domain-https.md index 3e4938f94c8b1..da44ad28709c0 100644 --- a/articles/frontdoor/front-door-custom-domain-https.md +++ b/articles/frontdoor/front-door-custom-domain-https.md @@ -9,7 +9,7 @@ ms.service: frontdoor ms.workload: infrastructure-services ms.tgt_pltfrm: na ms.topic: tutorial -ms.date: 12/06/2021 +ms.date: 06/06/2022 ms.author: duau ms.custom: devx-track-azurepowershell #Customer intent: As a website owner, I want to enable HTTPS on the custom domain in my Front Door so that my users can use my custom domain to access their content securely. @@ -48,7 +48,6 @@ Before you can complete the steps in this tutorial, you must first create a Fron To enable the HTTPS protocol for securely delivering content on a Front Door custom domain, you must use a TLS/SSL certificate. You can choose to use a certificate that is managed by Azure Front Door or use your own certificate. - ### Option 1 (default): Use a certificate managed by Front Door When you use a certificate managed by Azure Front Door, the HTTPS feature can be turned on with just a few clicks. Azure Front Door completely handles certificate management tasks such as procurement and renewal. After you enable the feature, the process starts immediately. If the custom domain is already mapped to the Front Door's default frontend host (`{hostname}.azurefd.net`), no further action is required. Front Door will process the steps and complete your request automatically. However, if your custom domain is mapped elsewhere, you must use email to validate your domain ownership. @@ -73,24 +72,28 @@ To enable HTTPS on a custom domain, follow these steps: You can use your own certificate to enable the HTTPS feature. This process is done through an integration with Azure Key Vault, which allows you to store your certificates securely. Azure Front Door uses this secure mechanism to get your certificate and it requires a few extra steps. When you create your TLS/SSL certificate, you must create a complete certificate chain with an allowed certificate authority (CA) that is part of the [Microsoft Trusted CA List](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). If you use a non-allowed CA, your request will be rejected. If a certificate without complete chain is presented, the requests which involve that certificate are not guaranteed to work as expected. -#### Prepare your Azure Key vault account and certificate +#### Prepare your key vault and certificate + +- You must have a key vault account in the same Azure subscription as your front door. Create a key vault account if you don't have one. + + > [!WARNING] + > Azure Front Door currently only supports Key Vault accounts in the same subscription as the Front Door configuration. Choosing a Key Vault under a different subscription than your Front Door will result in a failure. -1. Azure Key Vault: You must have a running Azure Key Vault account under the same subscription as your Front Door that you want to enable custom HTTPS. Create an Azure Key Vault account if you don't have one. +- If your key vault has network access restrictions enabled, you must configure your key vault to allow trusted Microsoft services to bypass the firewall. -> [!WARNING] -> Azure Front Door currently only supports Key Vault accounts in the same subscription as the Front Door configuration. Choosing a Key Vault under a different subscription than your Front Door will result in a failure. +- Your key vault must be configured to use the *Key Vault access policy* permission model. -2. Azure Key Vault certificates: If you already have a certificate, you can upload it directly to your Azure Key Vault account or you can create a new certificate directly through Azure Key Vault from one of the partner CAs that Azure Key Vault integrates with. Upload your certificate as a **certificate** object, rather than a **secret**. +- If you already have a certificate, you can upload it directly to your key vault. Otherwise, create a new certificate directly through Azure Key Vault from one of the partner certificate authorities (CAs) that Azure Key Vault integrates with. Upload your certificate as a **certificate** object, rather than a **secret**. > [!NOTE] -> For your own TLS/SSL certificate, Front Door doesn't support certificates with EC cryptography algorithms. The certificate must have a complete certificate chain with leaf and intermediate certificates, and root CA must be part of the [Microsoft Trusted CA list](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). +> Front Door doesn't support certificates with elliptic curve (EC) cryptography algorithms. The certificate must have a complete certificate chain with leaf and intermediate certificates, and root CA must be part of the [Microsoft Trusted CA list](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). #### Register Azure Front Door -Register the service principal for Azure Front Door as an app in your Azure Active Directory using Azure PowerShell or Azure CLI. +Register the service principal for Azure Front Door as an app in your Azure Active Directory (Azure AD) by using Azure PowerShell or the Azure CLI. > [!NOTE] -> This action requires Global Administrator permissions, and needs to be performed only **once** per tenant. +> This action requires you to have Global Administrator permissions in Azure AD. The registration only needs to be performed **once per Azure AD tenant**. ##### Azure PowerShell @@ -99,7 +102,7 @@ Register the service principal for Azure Front Door as an app in your Azure Acti 2. In PowerShell, run the following command: ```azurepowershell-interactive - New-AzADServicePrincipal -ApplicationId "ad0e1c7e-6d38-4ba4-9efd-0bc77ba9f037" -Role Contributor + New-AzADServicePrincipal -ApplicationId "ad0e1c7e-6d38-4ba4-9efd-0bc77ba9f037" ``` ##### Azure CLI @@ -109,27 +112,31 @@ Register the service principal for Azure Front Door as an app in your Azure Acti 2. In CLI, run the following command: ```azurecli-interactive - SP_ID=$(az ad sp create --id 205478c0-bd83-4e1b-a9d6-db63a3e1e1c8 --query objectId -o tsv) - az role assignment create --assignee $SP_ID --role Contributor + az ad sp create --id ad0e1c7e-6d38-4ba4-9efd-0bc77ba9f037 ``` #### Grant Azure Front Door access to your key vault -Grant Azure Front Door permission to access the certificates in your Azure Key Vault account. +Grant Azure Front Door permission to access the certificates in your Azure Key Vault account. -1. In your key vault account, under SETTINGS, select **Access policies**, then select **Add new** to create a new policy. +1. In your key vault account, select **Access policies**. -2. In **Select principal**, search for **ad0e1c7e-6d38-4ba4-9efd-0bc77ba9f037**, and choose **Microsoft.Azure.Frontdoor**. Click **Select**. +1. Select **Create** to create a new access policy. -3. In **Secret permissions**, select **Get** to allow Front Door to retrieve the certificate. +1. In **Secret permissions**, select **Get** to allow Front Door to retrieve the certificate. -4. In **Certificate permissions**, select **Get** to allow Front Door to retrieve the certificate. +1. In **Certificate permissions**, select **Get** to allow Front Door to retrieve the certificate. -5. Select **Add**. +1. In **Select principal**, search for **ad0e1c7e-6d38-4ba4-9efd-0bc77ba9f037**, and select **Microsoft.Azure.Frontdoor**. Select **Next**. -6. On the **Access policies** page, select **Save**. +1. In **Application**, select **Next**. + +1. In **Review + create**, select **Create**. + +> [!NOTE] +> If your key vault is protected with network access restrictions, make sure to allow trusted Microsoft services to access your key vault. -Azure Front Door can now access this Key Vault and the certificates that are stored in this Key Vault. +Azure Front Door can now access this key vault and the certificates it contains. #### Select the certificate for Azure Front Door to deploy @@ -149,7 +156,7 @@ Azure Front Door can now access this Key Vault and the certificates that are sto - The available secret versions. > [!NOTE] - > In order for the certificate to be automatically rotated to the latest version when a newer version of the certificate is available in your Key Vault, please set the secret version to 'Latest'. If a specific version is selected, you have to re-select the new version manually for certificate rotation. It takes up to 24 hours for the new version of the certificate/secret to be deployed. + > In order for the certificate to be automatically rotated to the latest version when a newer version of the certificate is available in your Key Vault, please set the secret version to 'Latest'. If a specific version is selected, you have to re-select the new version manually for certificate rotation. It takes up to 48 hours for the new version of the certificate/secret to be deployed. > > :::image type="content" source="./media/front-door-custom-domain-https/certificate-version.png" alt-text="Screenshot of selecting secret version on update custom domain page."::: diff --git a/articles/frontdoor/front-door-faq.yml b/articles/frontdoor/front-door-faq.yml index d8c9544dacd9c..a7c9491d1647c 100644 --- a/articles/frontdoor/front-door-faq.yml +++ b/articles/frontdoor/front-door-faq.yml @@ -14,7 +14,7 @@ summary: | 1. The comments section of this article. - 2. [Azure Front Door UserVoice](https://feedback.azure.com/d365community/forum/8ae9bf04-8326-ec11-b6e6-000d3a4f0789?c=d47b0f41-8326-ec11-b6e6-000d3a4f0789). + 2. [Azure Front Door Feedback](https://feedback.azure.com/d365community/forum/8ae9bf04-8326-ec11-b6e6-000d3a4f0789?c=d47b0f41-8326-ec11-b6e6-000d3a4f0789). 3. **Microsoft Support:** To create a new support request, in the Azure portal, on the **Help** tab, select the **Help + support** button, and then select **New support request**. diff --git a/articles/frontdoor/front-door-how-to-onboard-apex-domain.md b/articles/frontdoor/front-door-how-to-onboard-apex-domain.md index fe5eb7040215a..5bcbc84b33024 100644 --- a/articles/frontdoor/front-door-how-to-onboard-apex-domain.md +++ b/articles/frontdoor/front-door-how-to-onboard-apex-domain.md @@ -5,15 +5,27 @@ services: front-door author: duongau ms.service: frontdoor ms.topic: how-to -ms.date: 11/13/2020 +ms.date: 05/31/2022 ms.author: duau - +zone_pivot_groups: front-door-tiers --- + # Onboard a root or apex domain on your Front Door + +::: zone pivot="front-door-standard-premium" + +Azure Front Door supports adding custom domain to Front Door profile. This is done by adding DNS TXT record for domain ownership validation and creating a CNAME record in your DNS configuration to route DNS queries for the custom domain to Azure Front Door endpoint. For apex domain, DNS TXT will continue to be used for domain validation. However, the DNS protocol prevents the assignment of CNAME records at the zone apex. For example, if your domain is `contoso.com`; you can create CNAME records for `somelabel.contoso.com`; but you can't create CNAME for `contoso.com` itself. Front Door doesn't expose the frontend IP address associated with your Front Door profile. So you can't map your apex domain to an IP address if your intent is to onboard it to Azure Front Door. + +::: zone-end + +::: zone pivot="front-door-classic" + Azure Front Door uses CNAME records to validate domain ownership for onboarding of custom domains. Front Door doesn't expose the frontend IP address associated with your Front Door profile. So you can't map your apex domain to an IP address if your intent is to onboard it to Azure Front Door. The DNS protocol prevents the assignment of CNAME records at the zone apex. For example, if your domain is `contoso.com`; you can create CNAME records for `somelabel.contoso.com`; but you can't create CNAME for `contoso.com` itself. This restriction presents a problem for application owners who have load-balanced applications behind Azure Front Door. Since using a Front Door profile requires creation of a CNAME record, it isn't possible to point at the Front Door profile from the zone apex. +::: zone-end + This problem can be resolved by using alias records in Azure DNS. Unlike CNAME records, alias records are created at the zone apex. Application owners can use it to point their zone apex record to a Front Door profile that has public endpoints. Application owners point to the same Front Door profile that's used for any other domain within their DNS zone. For example, `contoso.com` and `www.contoso.com` can point to the same Front Door profile. Mapping your apex or root domain to your Front Door profile basically requires CNAME flattening or DNS chasing. A mechanism where the DNS provider recursively resolves the CNAME entry until it hits an IP address. This functionality is supported by Azure DNS for Front Door endpoints. @@ -23,6 +35,71 @@ Mapping your apex or root domain to your Front Door profile basically requires C You can use the Azure portal to onboard an apex domain on your Front Door and enable HTTPS on it by associating it with a certificate for TLS termination. Apex domains are also referred as root or naked domains. +::: zone pivot="front-door-standard-premium" + +## Onboard the custom domain to your Front Door + +1. Select **Domains** from under *Settings* on the left side pane for your Front Door profile and then select **+ Add** to add a new custom domain. + + :::image type="content" source="./media/front-door-apex-domain/add-domain.png" alt-text="Screenshot of adding a new domain to Front Door profile."::: + +1. On **Add a domain** page, you'll enter information about the custom domain. You can choose Azure-managed DNS (recommended) or you can choose to use your DNS provider. + + - **Azure-managed DNS** - select an existing DNS zone and for *Custom domain*, select **Add new**. Select **APEX domain** from the pop-up and then select **OK** to save. + + :::image type="content" source="./media/front-door-apex-domain/add-custom-domain.png" alt-text="Screenshot of adding a new custom domain to Front Door profile."::: + + - **Another DNS provider** - make sure the DNS provider supports CNAME flattening and follow the steps for [adding a custom domain](standard-premium/how-to-add-custom-domain.md#add-a-new-custom-domain). + +1. Select the **Pending** validation state. A new page will appear with DNS TXT record information needed to validate the custom domain. The TXT record is in the form of `_dnsauth.`. + + :::image type="content" source="./media/front-door-apex-domain/pending-validation.png" alt-text="Screenshot of custom domain pending validation."::: + + - **Azure DNS-based zone** - select the **Add** button and a new TXT record with the displayed record value will be created in the Azure DNS zone. + + :::image type="content" source="./media/front-door-apex-domain/validate-custom-domain.png" alt-text="Screenshot of validate a new custom domain."::: + + - If you're using another DNS provider, manually create a new TXT record of name `_dnsauth.` with the record value as shown on the page. + +1. Close the *Validate the custom domain* page and return to the *Domains* page for the Front Door profile. You should see the *Validation state* change from **Pending** to **Approved**. If not, wait up to 10 minutes for changes to reflect. If your validation doesn't get approved make sure your TXT record is correct and name servers are configured correctly if you're using Azure DNS. + + :::image type="content" source="./media/front-door-apex-domain/validation-approved.png" alt-text="Screenshot of new custom domain passing validation."::: + +1. Select **Unassociated** from the *Endpoint association* column, to add the new custom domain to an endpoint. + + :::image type="content" source="./media/front-door-apex-domain/unassociated-endpoint.png" alt-text="Screenshot of unassociated custom domain to an endpoint."::: + +1. On the *Associate endpoint and route* page, select the **Endpoint** and **Route** you would like to associate the domain to. Then select **Associate** to complete this step. + + :::image type="content" source="./media/front-door-apex-domain/associate-endpoint.png" alt-text="Screenshot of associated endpoint and route page for a domain."::: + +1. Under the *DNS state* column, select the **CNAME record is currently not detected** to add the alias record to DNS provider. + + - **Azure DNS** - select the **Add** button on the page. + + :::image type="content" source="./media/front-door-apex-domain/cname-record.png" alt-text="Screenshot of add or update CNAME record page."::: + + - **A DNS provider that supports CNAME flattening** - you must manually enter the alias record name. + +1. Once the alias record gets created and the custom domain is associated to the Azure Front Door endpoint, traffic will start flowing. + + :::image type="content" source="./media/front-door-apex-domain/cname-record-added.png" alt-text="Screenshot of completed APEX domain configuration."::: + +> [!NOTE] +> **DNS state** column is meant for CNAME mapping check. Because apex domain doesn’t support CNAME record, the DNS state will show 'CNAME record is currently not detected' even after you added the alias record to the DNS provider. + +## Enable HTTPS on your custom domain + +Follow the guidance for [configuring HTTPS for your custom domain](standard-premium/how-to-configure-https-custom-domain.md) to enable HTTPS for your apex domain. + +## Managed certificate renewal for apex domain + +Front Door managed certificates will automatically rotate certificates only if the domain CNAME is pointed to Front Door endpoint. If the APEX domain doesn’t have a CNAME record pointing to Front Door endpoint, the auto-rotation for managed certificate will fail until domain ownership is re-validated. The validation column will become `Pending-revalidation` 45 days before the managed certificate expires. Select the **Pending-revalidation** link and then select the **Regenerate** button to regenerate the TXT token. After that, add the TXT token to the DNS provider settings. + +::: zone-end + +::: zone pivot="front-door-classic" + ## Create an alias record for zone apex 1. Open **Azure DNS** configuration for the domain to be onboarded. @@ -73,6 +150,8 @@ You can use the Azure portal to onboard an apex domain on your Front Door and en > [!WARNING] > Ensure that you have created appropriate routing rules for your apex domain or added the domain to existing routing rules. +::: zone-end + ## Next steps - Learn how to [create a Front Door](quickstart-create-front-door.md). diff --git a/articles/frontdoor/front-door-overview.md b/articles/frontdoor/front-door-overview.md index 055616ad61718..d99de8ec9cbea 100644 --- a/articles/frontdoor/front-door-overview.md +++ b/articles/frontdoor/front-door-overview.md @@ -89,6 +89,10 @@ Modernize your internet first applications on Azure with Cloud Native experience For a comparison of supported features in Azure Front Door, see [Tier comparison](standard-premium/tier-comparison.md). +## Where is the service available? + +Azure Front Door Classic/Standard/Premium SKUs are available in Microsoft Azure (Commercial) and Azure Front Door Classic SKU is available in Microsoft Azure Government (US). + ## Pricing For pricing information, see [Front Door Pricing](https://azure.microsoft.com/pricing/details/frontdoor/). For information about service-level agreements, See [SLA for Azure Front Door](https://azure.microsoft.com/support/legal/sla/frontdoor/v1_0/). diff --git a/articles/frontdoor/media/front-door-apex-domain/add-custom-domain.png b/articles/frontdoor/media/front-door-apex-domain/add-custom-domain.png new file mode 100644 index 0000000000000..db8021780dae7 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/add-custom-domain.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/add-domain.png b/articles/frontdoor/media/front-door-apex-domain/add-domain.png new file mode 100644 index 0000000000000..2d84664437fb1 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/add-domain.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/associate-endpoint.png b/articles/frontdoor/media/front-door-apex-domain/associate-endpoint.png new file mode 100644 index 0000000000000..55fb5f4809b82 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/associate-endpoint.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/cname-record-added.png b/articles/frontdoor/media/front-door-apex-domain/cname-record-added.png new file mode 100644 index 0000000000000..fd4d27792f884 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/cname-record-added.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/cname-record.png b/articles/frontdoor/media/front-door-apex-domain/cname-record.png new file mode 100644 index 0000000000000..a1683b83c594a Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/cname-record.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/pending-validation.png b/articles/frontdoor/media/front-door-apex-domain/pending-validation.png new file mode 100644 index 0000000000000..b35c244c35f94 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/pending-validation.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/unassociated-endpoint.png b/articles/frontdoor/media/front-door-apex-domain/unassociated-endpoint.png new file mode 100644 index 0000000000000..a3be0ee9c8640 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/unassociated-endpoint.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/validate-custom-domain.png b/articles/frontdoor/media/front-door-apex-domain/validate-custom-domain.png new file mode 100644 index 0000000000000..b5b269fdd2d34 Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/validate-custom-domain.png differ diff --git a/articles/frontdoor/media/front-door-apex-domain/validation-approved.png b/articles/frontdoor/media/front-door-apex-domain/validation-approved.png new file mode 100644 index 0000000000000..191c2b7063e9a Binary files /dev/null and b/articles/frontdoor/media/front-door-apex-domain/validation-approved.png differ diff --git a/articles/frontdoor/private-link.md b/articles/frontdoor/private-link.md index c7a547b1c8d43..1f6f641760aeb 100644 --- a/articles/frontdoor/private-link.md +++ b/articles/frontdoor/private-link.md @@ -46,14 +46,13 @@ Azure Front Door private link is available in the following regions: | East US | Sweden Central | Japan East | | East US 2 | UK South | Korea Central | | South Central US | West Europe | | -| West US 2 | | | | West US 3 | | | ## Limitations Origin support for direct private end point connectivity is limited to Storage (Azure Blobs), App Services and internal load balancers. -For the best latency, you should always pick an Azure region closest to your origin when choosing to enable Azure Front Door private link endpoint. +The Azure Front Door Private Link feature is region agnostic but for the best latency, you should always pick an Azure region closest to your origin when choosing to enable Azure Front Door Private Link endpoint. ## Next steps diff --git a/articles/frontdoor/standard-premium/concept-endpoint-manager.md b/articles/frontdoor/standard-premium/concept-endpoint-manager.md index 6d04fd9b4fe0e..e983a83246ebb 100644 --- a/articles/frontdoor/standard-premium/concept-endpoint-manager.md +++ b/articles/frontdoor/standard-premium/concept-endpoint-manager.md @@ -10,10 +10,10 @@ ms.date: 02/18/2021 ms.author: qixwang --- -# What is Azure Front Door Standard/Premium (Preview) Endpoint Manager? +# What is Azure Front Door Standard/Premium Endpoint Manager? > [!NOTE] -> * This documentation is for Azure Front Door Standard/Premium (Preview). Looking for information on Azure Front Door? View [Azure Front Door Docs](../front-door-overview.md). +> * This documentation is for Azure Front Door Standard/Premium. Looking for information on Azure Front Door? View [Azure Front Door Docs](../front-door-overview.md). Endpoint Manager provides an overview of endpoints you've configured for your Azure Front Door. An endpoint is a logical grouping of a domains and their associated configurations. Endpoint Manager helps you manage your collection of endpoints for CRUD (create, read, update, and delete) operation. You can manage the following elements for your endpoints through Endpoint Manager: @@ -26,11 +26,6 @@ Endpoint Manager provides an overview of endpoints you've configured for your Az Endpoint Manager list how many instances of each element are created within an endpoint. The association status for each element will also be displayed. For example, you may create multiple domains and origin groups, and assign the association between them with different routes. -> [!IMPORTANT] -> * Azure Front Door Standard/Premium (Preview) is currently in public preview. -> This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. -> For more information, see [**Supplemental Terms of Use for Microsoft Azure Previews**](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). - ## Linked view With the linked view within Endpoint Manager, you could easily identify the association between your Azure Front Door elements, such as: diff --git a/articles/frontdoor/standard-premium/how-to-add-custom-domain.md b/articles/frontdoor/standard-premium/how-to-add-custom-domain.md index e28913bec6880..85f7b8792b086 100644 --- a/articles/frontdoor/standard-premium/how-to-add-custom-domain.md +++ b/articles/frontdoor/standard-premium/how-to-add-custom-domain.md @@ -19,6 +19,7 @@ When you use Azure Front Door for application delivery, a custom domain is neces After you create an Azure Front Door Standard/Premium profile, the default frontend host will have a subdomain of `azurefd.net`. This subdomain gets included in the URL when Azure Front Door Standard/Premium delivers content from your backend by default. For example, `https://contoso-frontend.azurefd.net/activeusers.htm`. For your convenience, Azure Front Door provides the option of associating a custom domain with the default host. With this option, you deliver your content with a custom domain in your URL instead of an Azure Front Door owned domain name. For example, `https://www.contoso.com/photo.png`. ## Prerequisites + * Before you can complete the steps in this tutorial, you must first create a Front Door. For more information, see [Quickstart: Create a Front Door Standard/Premium](create-front-door-portal.md). * If you don't already have a custom domain, you must first purchase one with a domain provider. For example, see [Buy a custom domain name](../../app-service/manage-custom-dns-buy-domain.md). @@ -28,9 +29,7 @@ After you create an Azure Front Door Standard/Premium profile, the default front ## Add a new custom domain > [!NOTE] -> * When using Azure DNS to create Apex domains isn't supported on Azure Front Door currently. There are other DNS providers that support CNAME flattening or DNS chasing that will allow APEX domains to be used for Azure Front Door Standard/Premium. > * If a custom domain is validated in one of the Azure Front Door Standard, Premium, classic or classic Microsoft CDN profiles, then it can't be added to another profile. -> A custom domain is managed by Domains section in the portal. A custom domain can be created and validated before association to an endpoint. A custom domain and its subdomains can be associated with only a single endpoint at a time. However, you can use different subdomains from the same custom domain for different Front Doors. You can also map custom domains with different subdomains to the same Front Door endpoint. @@ -40,9 +39,8 @@ A custom domain is managed by Domains section in the portal. A custom domain can 1. The **Add a domain** page will appear where you can enter information about of the custom domain. You can choose Azure-managed DNS, which is recommended or you can choose to use your own DNS provider. If you choose Azure-managed DNS, select an existing DNS zone and then select a custom subdomain or create a new one. If you're using another DNS provider, manually enter the custom domain name. Select **Add** to add your custom domain. - > [!NOTE] - > Azure Front Door supports both Azure managed certificate and customer-managed certificates. If you want to use customer-managed certificate, see [Configure HTTPS on a custom domain](how-to-configure-https-custom-domain.md). - > + > [!NOTE] + > Azure Front Door supports both Azure managed certificate and customer-managed certificates. If you want to use customer-managed certificate, see [Configure HTTPS on a custom domain](how-to-configure-https-custom-domain.md). :::image type="content" source="../media/how-to-add-custom-domain/add-domain-page.png" alt-text="Screenshot of add a domain page."::: @@ -76,8 +74,9 @@ A custom domain is managed by Domains section in the portal. A custom domain can | Internal error | If you see this error, retry by clicking the **Refresh** or **Regenerate** buttons. If you're still experiencing issues, raise a support request. | > [!NOTE] -> 1. If the **Regenerate** button doesn't work, delete and recreate the domain. -> 2. If the domain state doesn't reflect as expected, select the **Refresh** button. +> 1. The default TTL for TXT record is 1 hour. When you need to regenerate the TXT record for re-validation, please pay attention to the TTL for the previous TXT record. If it doesn't expire, the validation will fail until the previous TXT record expires. +> 2. If the **Regenerate** button doesn't work, delete and recreate the domain. +> 3. If the domain state doesn't reflect as expected, select the **Refresh** button. ## Associate the custom domain with your Front Door Endpoint @@ -101,8 +100,8 @@ After you've validated your custom domain, you can then add it to your Azure Fro 1. Once the CNAME record gets created and the custom domain is associated to the Azure Front Door endpoint completes, traffic flow will start flowing. - > [!NOTE] - > If HTTPS is enabled, certificate provisioning and propagation may take a few minutes because propagation is being done to all edge locations. + > [!NOTE] + > If HTTPS is enabled, certificate provisioning and propagation may take a few minutes because propagation is being done to all edge locations. ## Verify the custom domain diff --git a/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md b/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md index bbca7cba28a90..762e2cea2809f 100644 --- a/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md +++ b/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md @@ -6,7 +6,7 @@ author: duongau ms.service: frontdoor ms.topic: article ms.workload: infrastructure-services -ms.date: 03/18/2022 +ms.date: 06/06/2022 ms.author: amsriva ms.custom: devx-track-azurepowershell #Customer intent: As a website owner, I want to add a custom domain to my Front Door configuration so that my users can use my custom domain to access my content. @@ -45,47 +45,71 @@ Azure Front Door supports both Azure managed certificate and customer-managed ce You can also choose to use your own TLS certificate. When you create your TLS/SSL certificate, you must create a complete certificate chain with an allowed certificate authority (CA) that is part of the [Microsoft Trusted CA List](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). If you use a non-allowed CA, your request will be rejected. The root CA must be part of the [Microsoft Trusted CA List](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). If a certificate without complete chain is presented, the requests that involve that certificate aren't guaranteed to work as expected. This certificate must be imported into an Azure Key Vault before you can use it with Azure Front Door Standard/Premium. See how to [import a certificate](../../key-vault/certificates/tutorial-import-certificate.md) to Azure Key Vault. -#### Prepare your Azure Key vault account and certificate +#### Prepare your key vault and certificate -1. You must have a running Azure Key Vault account under the same subscription as your Azure Front Door Standard/Premium that you want to enable custom HTTPS. Create an Azure Key Vault account if you don't have one. +- You must have a key vault in the same Azure subscription as your Azure Front Door Standard/Premium profile. Create a key vault if you don't have one. > [!WARNING] - > Azure Front Door currently only supports Key Vault accounts in the same subscription as the Front Door configuration. Choosing a Key Vault under a different subscription than your Azure Front Door Standard/Premium will result in a failure. + > Azure Front Door currently only supports key vaults in the same subscription as the Front Door profile. Choosing a key vault under a different subscription than your Azure Front Door Standard/Premium profile will result in a failure. -1. If you already have a certificate, you can upload it directly to your Azure Key Vault account. Otherwise, create a new certificate directly through Azure Key Vault from one of the partner Certificate Authorities that Azure Key Vault integrates with. Upload your certificate as a **certificate** object, rather than a **secret**. +- If your key vault has network access restrictions enabled, you must configure your key vault to allow trusted Microsoft services to bypass the firewall. - > [!NOTE] - > For your own TLS/SSL certificate, Front Door doesn't support certificates with EC cryptography algorithms. The certificate must have a complete certificate chain with leaf and intermediate certificates, and root CA must be part of the [Microsoft Trusted CA List](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). +- Your key vault must be configured to use the *Key Vault access policy* permission model. + +- If you already have a certificate, you can upload it to your key vault. Otherwise, create a new certificate directly through Azure Key Vault from one of the partner certificate authorities (CAs) that Azure Key Vault integrates with. Upload your certificate as a **certificate** object, rather than a **secret**. + +> [!NOTE] +> Front Door doesn't support certificates with elliptic curve (EC) cryptography algorithms. The certificate must have a complete certificate chain with leaf and intermediate certificates, and root CA must be part of the [Microsoft Trusted CA List](https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT). #### Register Azure Front Door -Register the service principal for Azure Front Door as an app in your Azure Active Directory via PowerShell. +Register the service principal for Azure Front Door as an app in your Azure Active Directory (Azure AD) by using Azure PowerShell or the Azure CLI. > [!NOTE] -> This action requires Global Administrator permissions, and needs to be performed only **once** per tenant. +> This action requires you to have Global Administrator permissions in Azure AD. The registration only needs to be performed **once per Azure AD tenant**. + +##### Azure PowerShell 1. If needed, install [Azure PowerShell](/powershell/azure/install-az-ps) in PowerShell on your local machine. -1. In PowerShell, run the following command: +2. In PowerShell, run the following command: + + ```azurepowershell-interactive + New-AzADServicePrincipal -ApplicationId '205478c0-bd83-4e1b-a9d6-db63a3e1e1c8' -Role Contributor + ``` + +##### Azure CLI - `New-AzADServicePrincipal -ApplicationId "205478c0-bd83-4e1b-a9d6-db63a3e1e1c8" -Role Contributor` +1. If need, install [Azure CLI](/cli/azure/install-azure-cli) on your local machine. + +2. In CLI, run the following command: + + ```azurecli-interactive + az ad sp create --id 205478c0-bd83-4e1b-a9d6-db63a3e1e1c8 + ``` #### Grant Azure Front Door access to your key vault -Grant Azure Front Door permission to access the certificates in your Azure Key Vault account. +Grant Azure Front Door permission to access the certificates in your Azure Key Vault account. -1. In your key vault account, under SETTINGS, select **Access policies**. Then select **Add new** to create a new policy. +1. In your key vault account, select **Access policies**. -1. In **Select principal**, search for **205478c0-bd83-4e1b-a9d6-db63a3e1e1c8**, and choose **Microsoft.AzureFrontDoor-Cdn**. Select **Select**. +1. Select **Add new** or **Create** to create a new access policy. 1. In **Secret permissions**, select **Get** to allow Front Door to retrieve the certificate. 1. In **Certificate permissions**, select **Get** to allow Front Door to retrieve the certificate. -1. Select **OK**. +1. In **Select principal**, search for **205478c0-bd83-4e1b-a9d6-db63a3e1e1c8**, and select **Microsoft.AzureFrontDoor-Cdn**. Select **Next**. + +1. In **Application**, select **Next**. + +1. In **Review + create**, select **Create**. > [!NOTE] -> If your Azure Key Vault is being protected with Firewall, make sure to allow Azure Front Door to access your Azure Key Vault account. +> If your key vault is protected with network access restrictions, make sure to allow trusted Microsoft services to access your key vault. + +Azure Front Door can now access this key vault and the certificates it contains. #### Select the certificate for Azure Front Door to deploy @@ -95,7 +119,11 @@ Grant Azure Front Door permission to access the certificates in your Azure Key :::image type="content" source="../media/how-to-configure-https-custom-domain/add-certificate.png" alt-text="Screenshot of Azure Front Door secret landing page."::: -1. On the **Add certificate** page, select the checkbox for the certificate you want to add to Azure Front Door Standard/Premium. Leave the version selection as "Latest" and select **Add**. +1. On the **Add certificate** page, select the checkbox for the certificate you want to add to Azure Front Door Standard/Premium. + +1. When you select a certificate, you must [select the certificate version](#rotate-own-certificate). If you select **Latest**, Azure Front Door will automatically update whenever the certificate is rotated (renewed). Alternatively, you can select a specific certificate version if you prefer to manage certificate rotation yourself. + + Leave the version selection as "Latest" and select **Add**. :::image type="content" source="../media/how-to-configure-https-custom-domain/add-certificate-page.png" alt-text="Screenshot of add certificate page."::: @@ -107,7 +135,7 @@ Grant Azure Front Door permission to access the certificates in your Azure Key "Bring Your Own Certificate (BYOC)" for *HTTPS*. For *Secret*, select the certificate you want to use from the drop-down. > [!NOTE] - > The selected certificate must have a common name (CN) same as the custom domain being added. + > The common name (CN) of the selected certificate must match the custom domain being added. :::image type="content" source="../media/how-to-configure-https-custom-domain/add-custom-domain-https.png" alt-text="Screenshot of add a custom domain page with HTTPS."::: @@ -115,19 +143,20 @@ Grant Azure Front Door permission to access the certificates in your Azure Key ## Certificate renewal and changing certificate types -### Azure managed certificate +### Azure-managed certificate -Azure managed certificate will be automatically rotated when your custom domain has the CNAME record to an Azure Front Door standard or premium endpoint. The auto rotation won't happen for the two scenarios below +Azure-managed certificates are automatically rotated when your custom domain uses a CNAME record that points to an Azure Front Door standard or premium endpoint. -* If the custom domain CNAME record is pointing to other DNS resources +Front Door won't automatically rotate certificates in the following scenarios: -* If your custom domain points to Azure Front Door through a long chain, for example, putting an Azure Traffic Manager before Azure Front Door and other CDN providers, the CNAME chain is contoso.com CNAME in `contoso.trafficmanager.net` CNAME in `contoso.z01.azurefd.net`. +* The custom domain's CNAME record is pointing to other DNS resources. +* The custom domain points to Azure Front Door through a long chain. For example, if you put Azure Traffic Manager before Azure Front Door, the CNAME chain is `contoso.com` CNAME in `contoso.trafficmanager.net` CNAME in `contoso.z01.azurefd.net`. -The domain validation state will become ‘Pending Revalidation’ 45 days before managed certificate expiry or ‘Rejected’ if the managed certificate issuance is rejected by the certificate authority. Refer to [Add a custom domain](how-to-add-custom-domain.md#domain-validation-state) for actions for different domain state. +The domain validation state will become *Pending Revalidation* 45 days before the managed certificate expires, or *Rejected* if the managed certificate issuance is rejected by the certificate authority. Refer to [Add a custom domain](how-to-add-custom-domain.md#domain-validation-state) for actions for each of the domain states. -### Use your own certificate +### Use your own certificate -In order for the certificate to be automatically rotated to the latest version when a newer version of the certificate is available in your Key Vault, set the secret version to 'Latest'. If a specific version is selected, you have to reselect the new version manually for certificate rotation. It takes up to 24 hours for the new version of the certificate/secret to be automatically deployed. +In order for the certificate to be automatically rotated to the latest version when a newer version of the certificate is available in your key vault, set the secret version to 'Latest'. If a specific version is selected, you have to reselect the new version manually for certificate rotation. It takes up to 24 hours for the new version of the certificate/secret to be automatically deployed. If you want to change the secret version from ‘Latest’ to a specified version or vice versa, add a new certificate. @@ -142,7 +171,7 @@ If you want to change the secret version from ‘Latest’ to a specified versio > [!NOTE] > * It may take up to an hour for the new certificate to be deployed when you switch between certificate types. - > * If your domain state is Approved, switching the certificate type between BYOC and managed certificate won't have any downtime. Whhen switching to managed certificate, unless the domain ownership is re-validated and the domain state becomes Approved, you will continue to be served by the previous certificate. + > * If your domain state is Approved, switching the certificate type between BYOC and managed certificate won't have any downtime. When switching to managed certificate, unless the domain ownership is re-validated and the domain state becomes Approved, you will continue to be served by the previous certificate. > * If you switch from BYOC to managed certificate, domain re-validation is required. If you switch from managed certificate to BYOC, you're not required to re-validate the domain. > diff --git a/articles/frontdoor/troubleshoot-issues.md b/articles/frontdoor/troubleshoot-issues.md index 24228e0c52125..9b537e5f9f00d 100644 --- a/articles/frontdoor/troubleshoot-issues.md +++ b/articles/frontdoor/troubleshoot-issues.md @@ -141,7 +141,7 @@ Responses to these requests might also contain an HTML error page in the respons There are several possible causes for this symptom. The overall reason is that your HTTP request isn't fully RFC-compliant. -An example of noncompliance is a `POST` request sent without either a **Content-Length** or a **Transfer-Encoding** header. An example would be using `curl -X POST https://example-front-door.domain.com`. This request doesn't meet the requirements set out in [RFC 7230](https://tools.ietf.org/html/rfc7230#section-3.3.2). Azure Front Door would block it with an HTTP 411 response. +An example of noncompliance is a `POST` request sent without either a **Content-Length** or a **Transfer-Encoding** header. An example would be using `curl -X POST https://example-front-door.domain.com`. This request doesn't meet the requirements set out in [RFC 7230](https://tools.ietf.org/html/rfc7230#section-3.3.2). Azure Front Door would block it with an HTTP 411 response. Such requests will not be logged. This behavior is separate from the web application firewall (WAF) functionality of Azure Front Door. Currently, there's no way to disable this behavior. All HTTP requests must meet the requirements, even if the WAF functionality isn't in use. diff --git a/articles/governance/blueprints/overview.md b/articles/governance/blueprints/overview.md index e8f8ffdc61afb..93bc210acceb6 100644 --- a/articles/governance/blueprints/overview.md +++ b/articles/governance/blueprints/overview.md @@ -16,7 +16,7 @@ Just as a blueprint allows an engineer or an architect to sketch a project's des Azure Blueprints enables cloud architects and central information technology groups to define a repeatable set of Azure resources that implements and adheres to an organization's standards, patterns, and requirements. Azure Blueprints makes it possible for development teams to rapidly -build and stand up new environments with trust they're building within organizational compliance +build and start up new environments with trust they're building within organizational compliance with a set of built-in components, such as networking, to speed up development and delivery. Blueprints are a declarative way to orchestrate the deployment of various resource templates and diff --git a/articles/governance/management-groups/create-management-group-azure-cli.md b/articles/governance/management-groups/create-management-group-azure-cli.md index 38f6f47312331..1e25d09624b12 100644 --- a/articles/governance/management-groups/create-management-group-azure-cli.md +++ b/articles/governance/management-groups/create-management-group-azure-cli.md @@ -3,7 +3,7 @@ title: "Quickstart: Create a management group with the Azure CLI" description: In this quickstart, you use the Azure CLI to create a management group to organize your resources into a resource hierarchy. ms.date: 08/17/2021 ms.topic: quickstart -ms.custom: devx-track-azurecli +ms.tool: azure-cli --- # Quickstart: Create a management group with the Azure CLI diff --git a/articles/governance/management-groups/overview.md b/articles/governance/management-groups/overview.md index fc43eec5c7a7c..283e91a674802 100644 --- a/articles/governance/management-groups/overview.md +++ b/articles/governance/management-groups/overview.md @@ -1,7 +1,7 @@ --- title: Organize your resources with management groups - Azure Governance description: Learn about the management groups, how their permissions work, and how to use them. -ms.date: 05/12/2022 +ms.date: 05/25/2022 ms.topic: overview author: timwarner-msft ms.author: timwarner @@ -314,6 +314,9 @@ management group. When looking to query on management groups outside the Azure portal, the target scope for management groups looks like **"/providers/Microsoft.Management/managementGroups/{_management-group-id_}"**. +> [!NOTE] +> Using the Azure Resource Manager REST API, you can enable diagnostic settings on a management group to send related Azure Activity log entries to a Log Analytics workspace, Azure Storage, or Azure Event Hub. For more information, see [Management Group Diagnostic Settings - Create Or Update](https://docs.microsoft.com/rest/api/monitor/management-group-diagnostic-settings/create-or-update). + ## Next steps To learn more about management groups, see: diff --git a/articles/governance/policy/assign-policy-terraform.md b/articles/governance/policy/assign-policy-terraform.md index 79fd12151aa31..aefed49487546 100644 --- a/articles/governance/policy/assign-policy-terraform.md +++ b/articles/governance/policy/assign-policy-terraform.md @@ -3,6 +3,7 @@ title: "Quickstart: New policy assignment with Terraform" description: In this quickstart, you use Terraform and HCL syntax to create a policy assignment to identify non-compliant resources. ms.date: 08/17/2021 ms.topic: quickstart +ms.tool: terraform --- # Quickstart: Create a policy assignment to identify non-compliant resources using Terraform diff --git a/articles/governance/policy/concepts/definition-structure.md b/articles/governance/policy/concepts/definition-structure.md index fe1b282ccbe7a..78c3dc1ea02de 100644 --- a/articles/governance/policy/concepts/definition-structure.md +++ b/articles/governance/policy/concepts/definition-structure.md @@ -137,7 +137,7 @@ The following Resource Provider modes are fully supported: - `Microsoft.Kubernetes.Data` for managing your Kubernetes clusters on or off Azure. Definitions using this Resource Provider mode use effects _audit_, _deny_, and _disabled_. This mode supports custom definitions as a _public preview_. See - [Create policy definition from constraint template](../how-to/extension-for-vscode.md) to create a + [Create policy definition from constraint template](https://docs.microsoft.com/azure/governance/policy/how-to/extension-for-vscode#create-policy-definition-from-constraint-template) to create a custom definition from an existing [Open Policy Agent](https://www.openpolicyagent.org/) (OPA) GateKeeper v3 [constraint template](https://open-policy-agent.github.io/gatekeeper/website/docs/howto/#constraint-templates). Use diff --git a/articles/governance/policy/concepts/exemption-structure.md b/articles/governance/policy/concepts/exemption-structure.md index eb4f623087610..42e2d95607edc 100644 --- a/articles/governance/policy/concepts/exemption-structure.md +++ b/articles/governance/policy/concepts/exemption-structure.md @@ -151,10 +151,10 @@ assignment. ## Next steps -- Study the [Microsoft.Authorization policyExemptions resource type](https://docs.microsoft.com/azure/templates/microsoft.authorization/policyexemptions?tabs=json). +- Study the [Microsoft.Authorization policyExemptions resource type](/azure/templates/microsoft.authorization/policyexemptions?tabs=json). - Learn about the [policy definition structure](./definition-structure.md). - Understand how to [programmatically create policies](../how-to/programmatically-create.md). - Learn how to [get compliance data](../how-to/get-compliance-data.md). - Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). - Review what a management group is with - [Organize your resources with Azure management groups](../../management-groups/overview.md). + [Organize your resources with Azure management groups](../../management-groups/overview.md). \ No newline at end of file diff --git a/articles/governance/policy/concepts/guest-configuration.md b/articles/governance/policy/concepts/guest-configuration.md index c34bf9222d617..e4b886ce53b46 100644 --- a/articles/governance/policy/concepts/guest-configuration.md +++ b/articles/governance/policy/concepts/guest-configuration.md @@ -280,7 +280,10 @@ Management Groups. The guest configuration extension writes log files to the following locations: -Windows: `C:\ProgramData\GuestConfig\gc_agent_logs\gc_agent.log` +Windows + +- Azure VM: `C:\ProgramData\GuestConfig\gc_agent_logs\gc_agent.log` +- Arc-enabled server: `C:\ProgramData\GuestConfig\arc_policy_logs\gc_agent.log` Linux diff --git a/articles/governance/policy/samples/gov-dod-impact-level-4.md b/articles/governance/policy/samples/gov-dod-impact-level-4.md index 00d0182371087..d44fcdd4de0b6 100644 --- a/articles/governance/policy/samples/gov-dod-impact-level-4.md +++ b/articles/governance/policy/samples/gov-dod-impact-level-4.md @@ -243,7 +243,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -291,7 +291,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -684,7 +684,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | |[Cognitive Services accounts should restrict network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F037eea7a-bd0a-46c5-9a66-03aea78705d3) |Network access to Cognitive Services accounts should be restricted. Configure network rules so only applications from allowed networks can access the Cognitive Services account. To allow connections from specific internet or on-premises clients, access can be granted to traffic from specific Azure virtual networks or to public internet IP address ranges. |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_NetworkAcls_Audit.json) | @@ -768,7 +768,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -806,7 +806,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -1125,4 +1125,4 @@ Additional articles about Azure Policy: - See the [initiative definition structure](../concepts/initiative-definition-structure.md). - Review other examples at [Azure Policy samples](./index.md). - Review [Understanding policy effects](../concepts/effects.md). -- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). \ No newline at end of file +- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). diff --git a/articles/governance/policy/samples/gov-dod-impact-level-5.md b/articles/governance/policy/samples/gov-dod-impact-level-5.md index 0e85189527d42..a18a3e649dfd9 100644 --- a/articles/governance/policy/samples/gov-dod-impact-level-5.md +++ b/articles/governance/policy/samples/gov-dod-impact-level-5.md @@ -243,7 +243,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -291,7 +291,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -684,7 +684,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | |[Cognitive Services accounts should restrict network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F037eea7a-bd0a-46c5-9a66-03aea78705d3) |Network access to Cognitive Services accounts should be restricted. Configure network rules so only applications from allowed networks can access the Cognitive Services account. To allow connections from specific internet or on-premises clients, access can be granted to traffic from specific Azure virtual networks or to public internet IP address ranges. |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_NetworkAcls_Audit.json) | @@ -768,7 +768,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -806,7 +806,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -1125,4 +1125,4 @@ Additional articles about Azure Policy: - See the [initiative definition structure](../concepts/initiative-definition-structure.md). - Review other examples at [Azure Policy samples](./index.md). - Review [Understanding policy effects](../concepts/effects.md). -- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). \ No newline at end of file +- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). diff --git a/articles/governance/policy/samples/guest-configuration-baseline-docker.md b/articles/governance/policy/samples/guest-configuration-baseline-docker.md new file mode 100644 index 0000000000000..9ae80c6532ad0 --- /dev/null +++ b/articles/governance/policy/samples/guest-configuration-baseline-docker.md @@ -0,0 +1,110 @@ +--- +title: Reference - Azure Policy guest configuration baseline for Docker +description: Details of the Docker baseline on Azure implemented through Azure Policy guest configuration. +ms.date: 05/17/2022 +ms.topic: reference +ms.custom: generated +--- +# Docker security baseline + +This article details the configuration settings for Docker hosts as applicable in the following +implementations: + +- **\[Preview\]: Linux machines should meet requirements for the Azure security baseline for Docker hosts** +- **Vulnerabilities in security configuration on your machines should be remediated** in Azure + Security Center + +For more information, see [Understand the guest configuration feature of Azure Policy](../concepts/guest-configuration.md) and +[Overview of the Azure Security Benchmark (V2)](../../../security/benchmarks/overview.md). + +## General security controls + +|Name
                  (CCEID) |Details |Remediation check | +|---|---|---| +|Docker inventory Information
                  (0.0) |Description: None |None | +|Ensure a separate partition for containers has been created
                  (1.01) |Description: Docker depends on /var/lib/docker as the default directory where all Docker related files, including the images, are stored. This directory might fill up fast and soon Docker and the host could become unusable. So, it's advisable to create a separate partition (logical volume) for storing Docker files. |For new installations, create a separate partition for /var/lib/docker mount point. For systems that were previously installed, use the Logical Volume Manager (LVM) to create partitions. | +|Ensure docker version is up-to-date
                  (1.03) |Description: Using up-to-date docker version will keep your host secure |Follow the docker documentation in aim to upgrade your version | +|Ensure auditing is configured for the docker daemon
                  (1.05) |Description: Apart from auditing your regular Linux file system and system calls, audit Docker daemon as well. Docker daemon runs with root privileges. It's thus necessary to audit its activities and usage. |Add the line `-w /usr/bin/docker -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - /var/lib/docker
                  (1.06) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. /var/lib/docker is one such directory. It holds all the information about containers. It must be audited. |Add the line `-w /var/lib/docker -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - /etc/docker
                  (1.07) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. /etc/docker is one such directory. It holds various certificates and keys used for TLS communication between Docker daemon and Docker client. It must be audited. |Add the line `-w /etc/docker -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - docker.service
                  (1.08) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. Docker.service is one such file. The docker.service file might be present if the daemon parameters have been changed by an administrator. It holds various parameters for Docker daemon. It must be audited, if applicable. |Find out the 'docker.service' file location by running: `systemctl show -p FragmentPath docker.service` and add the line `-w {docker.service file location} -k docker` into the /etc/audit/audit.rules file where `{docker.service file location}` is the file path you have found earlier. Restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - docker.socket
                  (1.09) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. Docker.socket is one such file. It holds various parameters for Docker daemon socket. It must be audited, if applicable. |Find out the 'docker.socket' file location by running: `systemctl show -p FragmentPath docker.socket` and add the line `-w {docker.socket file location} -k docker` into the /etc/audit/audit.rules file where `{docker.socket file location}` is the file path you have found earlier. Restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - /etc/default/docker
                  (1.10) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. /etc/default/docker is one such file. It holds various parameters for Docker daemon. It must be audited, if applicable. |Add the line `-w /etc/default/docker -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - /etc/docker/daemon.json
                  (1.11) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. /etc/docker/daemon.json is one such file. It holds various parameters for Docker daemon. It must be audited, if applicable. |Add the line `-w /etc/docker/daemon.json -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - /usr/bin/docker-containerd
                  (1.12) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. /usr/bin/docker-containerd is one such file. Docker now relies on containerd and runC to spawn containers. It must be audited, if applicable. |Add the line `-w /usr/bin/docker-containerd -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure auditing is configured for Docker files and directories - /usr/bin/docker-runc
                  (1.13) |Description: Apart from auditing your regular Linux file system and system calls, audit all Docker related files and directories. Docker daemon runs with root privileges. Its behavior depends on some key files and directories. /usr/bin/docker-runc is one such file. Docker now relies on containerd and runC to spawn containers. It must be audited, if applicable. |Add the line `-w /usr/bin/docker-runc -k docker` into the /etc/audit/audit.rules file. Then, restart the audit daemon by running the command: `service auditd restart` | +|Ensure network traffic is restricted between containers on the default bridge
                  (2.01) |Description: The inter-container communication would be disabled on the default network bridge. If any communication between containers on the same host is desired, then it needs to be explicitly defined using container linking or alternatively custom networks have to be defined. |Run the docker in daemon mode and pass `--icc=false` as an argument or set the 'icc' setting to false in the daemon.json file. Alternatively, you can follow the Docker documentation and create a custom network and only join containers that need to communicate to that custom network. The `--icc` parameter only applies to the default docker bridge, if custom networks are used then the approach of segmenting networks should be adopted instead. | +|Ensure the logging level is set to 'info'.
                  (2.02) |Description: Setting up an appropriate log level, configures the Docker daemon to log events that you would want to review later. A base log level of `info` and above would capture all logs except debug logs. Until and unless required, you shouldn't run Docker daemon at `debug` log level. |Run the Docker daemon as below: ```dockerd --log-level info``` | +|Ensure Docker is allowed to make changes to iptables
                  (2.03) |Description: Docker will never make changes to your system `iptables` rules if you choose to do so. Docker server would automatically make the needed changes to iptables based on how you choose your networking options for the containers if it's allowed to do so. It's recommended to let Docker server make changes to `iptables`automatically to avoid networking misconfiguration that might hamper the communication between containers and to the outside world. Additionally, it would save you hassles of updating `iptable`every time you choose to run the containers or modify networking options. |Don't run the Docker daemon with `--iptables=false` parameter. For example, don't start the Docker daemon as below: ```dockerd --iptables=false``` | +|Ensure insecure registries aren't used
                  (2.04) |Description: You shouldn't be using any insecure registries in the production environment. Insecure registries can be tampered with leading to possible compromise to your production system. |remove `--insecure-registry` flag from the dockerd start command | +|The 'aufs' storage driver shouldn't be used by the docker daemon
                  (2.05) |Description: The 'aufs' storage driver is the oldest storage driver. It's based on a Linux kernel patch-set that is unlikely to be merged into the main Linux kernel. aufs driver is also known to cause some serious kernel crashes. aufs just has legacy support from Docker. Most importantly, aufs isn't a supported driver in many Linux distributions using latest Linux kernels |The 'aufs' storage driver should be replaced by a different storage driver, we recommend to use 'overlay2' | +|Ensure TLS authentication for Docker daemon is configured
                  (2.06) |Description: By default, Docker daemon binds to a non-networked Unix socket and runs with `root` privileges. If you change the default docker daemon binding to a TCP port or any other Unix socket, anyone with access to that port or socket can have full access to Docker daemon and in turn to the host system. Hence, you shouldn't bind the Docker daemon to another IP/port or a Unix socket. If you must expose the Docker daemon via a network socket, configure TLS authentication for the daemon and Docker Swarm APIs (if using). This would restrict the connections to your Docker daemon over the network to a limited number of clients who could successfully authenticate over TLS. |Follow the steps mentioned in the Docker documentation or other references. | +|Ensure the default ulimit's configured appropriately
                  (2.07) |Description: If the ulimits aren't set properly, the desired resource control might not be achieved and might even make the system unusable. |Run the docker in daemon mode and pass --default-ulimit as argument with respective ulimits as appropriate in your environment. Alternatively, you can also set a specific resource limitation to each container separately by using the `--ulimit` argument with respective ulimits as appropriate in your environment. | +|Enable user namespace support
                  (2.08) |Description: The Linux kernel user namespace support in Docker daemon provides additional security for the Docker host system. It allows a container to have a unique range of user and group IDs which are outside the traditional user and group range utilized by the host system. For example, the root user will have expected administrative privilege inside the container but can effectively be mapped to an unprivileged UID on the host system. |Please consult Docker documentation for various ways in which this can be configured depending upon your requirements. Your steps might also vary based on platform - For example, on Red Hat, sub-UIDs and sub-GIDs mapping creation does not work automatically. You might have to create your own mapping. However, the high-level steps are as below: **Step 1:** Ensure that the files `/etc/subuid` and `/etc/subgid` exist.```touch /etc/subuid /etc/subgid```**Step 2:** Start the docker daemon with `--userns-remap` flag ```dockerd --userns-remap=default``` | +|Ensure base device size isn't changed until needed
                  (2.10) |Description: Increasing the base device size allows all future images and containers to be of the new base device size, this may cause a denial of service by ending up in file system being over-allocated or full. |remove `--storage-opt dm.basesize` flag from the dockerd start command until you need it | +|Ensure that authorization for Docker client commands is enabled
                  (2.11) |Description: Docker’s out-of-the-box authorization model is all or nothing. Any user with permission to access the Docker daemon can run any Docker client command. The same is true for callers using Docker’s remote API to contact the daemon. If you require greater access control, you can create authorization plugins and add them to your Docker daemon configuration. Using an authorization plugin, a Docker administrator can configure granular access policies for managing access to Docker daemon. Third party integrations of Docker may implement their own authorization models to require authorization with the Docker daemon outside of docker's native authorization plugin (i.e. Kubernetes, Cloud Foundry, OpenShift). |**Step 1**: Install/Create an authorization plugin. **Step 2**: Configure the authorization policy as desired. **Step 3**: Start the docker daemon as below: ```dockerd --authorization-plugin=``` | +|Ensure centralized and remote logging is configured
                  (2.12) |Description: Centralized and remote logging ensures that all important log records are safe despite catastrophic events. Docker now supports various such logging drivers. Use the one that suits your environment the best. |**Step 1**: Setup the desired log driver by following its documentation. **Step 2**: Start the docker daemon with that logging driver. For example, ```dockerd --log-driver=syslog --log-opt syslog-address=tcp://192.xxx.xxx.xxx``` | +|Ensure live restore is Enabled
                  (2.14) |Description: One of the important security triads is availability. Setting `--live-restore` flag in the docker daemon ensures that container execution isn't interrupted when the docker daemon isn't available. This also means that it's now easier to update and patch the docker daemon without execution downtime. |Run the docker in daemon mode and pass `--live-restore` as an argument. For Example,```dockerd --live-restore``` | +|Ensure Userland Proxy is Disabled
                  (2.15) |Description: Docker engine provides two mechanisms for forwarding ports from the host to containers, hairpin NAT, and a userland proxy. In most circumstances, the hairpin NAT mode is preferred as it improves performance and makes use of native Linux iptables functionality instead of an additional component. Where hairpin NAT is available, the userland proxy should be disabled on startup to reduce the attack surface of the installation. |Run the Docker daemon as below: ```dockerd --userland-proxy=false``` | +|Ensure experimental features are avoided in production
                  (2.17) |Description: Experimental is now a runtime docker daemon flag instead of a separate build. Passing `--experimental` as a runtime flag to the docker daemon, activates experimental features. Experimental is now considered a stable release, but with a couple of features which might not have tested and guaranteed API stability. |Don't pass `--experimental` as a runtime parameter to the docker daemon. | +|Ensure containers are restricted from acquiring new privileges.
                  (2.18) |Description: A process can set the `no_new_priv` bit in the kernel. It persists across fork, clone and execve. The `no_new_priv` bit ensures that the process or its children processes don't gain any additional privileges via suid or sgid bits. This way numerous dangerous operations become a lot less dangerous because there's no possibility of subverting privileged binaries. Setting this at the daemon level ensures that by default all new containers are restricted from acquiring new privileges. |Run the Docker daemon as below: ```dockerd --no-new-privileges``` | +|Ensure that docker.service file ownership is set to root:root.
                  (3.01) |Description: `docker.service` file contains sensitive parameters that may alter the behavior of Docker daemon. Hence, it should be owned and group-owned by `root` to maintain the integrity of the file. |**Step 1**: Find out the file location: ```systemctl show -p FragmentPath docker.service``` **Step 2**: If the file does not exist, this recommendation isn't applicable. If the file exists, execute the below command with the correct file path to set the ownership and group ownership for the file to `root`. For example, ```chown root:root /usr/lib/systemd/system/docker.service``` | +|Ensure that docker .service file permissions are set to 644 or more restrictive
                  (3.02) |Description: `docker.service` file contains sensitive parameters that may alter the behavior of Docker daemon. Hence, it shouldn't be writable by any other user other than `root` to maintain the integrity of the file. |**Step 1**: Find out the file location: ```systemctl show -p FragmentPath docker.service``` **Step 2**: If the file does not exist, this recommendation isn't applicable. If the file exists, execute the below command with the correct file path to set the file permissions to `644`. For example, ```chmod 644 /usr/lib/systemd/system/docker.service``` | +|Ensure that docker.socket file ownership is set to root:root.
                  (3.03) |Description: `docker.socket` file contains sensitive parameters that may alter the behavior of Docker remote API. Hence, it should be owned and group-owned by `root` to maintain the integrity of the file. |**Step 1**: Find out the file location: ```systemctl show -p FragmentPath docker.socket``` **Step 2**: If the file does not exist, this recommendation isn't applicable. If the file exists, execute the below command with the correct file path to set the ownership and group ownership for the file to `root`. For example, ```chown root:root /usr/lib/systemd/system/docker.socket``` | +|Ensure that docker.socket file permissions are set to `644` or more restrictive
                  (3.04) |Description: `docker.socket` file contains sensitive parameters that may alter the behavior of Docker daemon. Hence, it shouldn't be writable by any other user other than `root` to maintain the integrity of the file. |**Step 1**: Find out the file location: ```systemctl show -p FragmentPath docker.socket``` **Step 2**: If the file does not exist, this recommendation isn't applicable. If the file exists, execute the below command with the correct file path to set the file permissions to `644`. For example, ```chmod 644 /usr/lib/systemd/system/docker.service``` | +|Ensure that /etc/docker directory ownership is set to `root:root`.
                  (3.05) |Description: /etc/docker directory contains certificates and keys in addition to various sensitive files. Hence, it should be owned and group-owned by `root` to maintain the integrity of the directory. | ```chown root:root /etc/docker``` This would set the ownership and group-ownership for the directory to `root`. | +|Ensure that /etc/docker directory permissions are set to `755` or more restrictive
                  (3.06) |Description: /etc/docker directory contains certificates and keys in addition to various sensitive files. Hence, it should only be writable by `root` to maintain the integrity of the directory. | ```chmod 755 /etc/docker``` This would set the permissions for the directory to `755`. | +|Ensure that registry certificate file ownership is set to root:root
                  (3.07) |Description: /etc/docker/certs.d/ directory contains Docker registry certificates. These certificate files must be owned and group-owned by `root` to maintain the integrity of the certificates. | ```chown root:root /etc/docker/certs.d//*``` This would set the ownership and group-ownership for the registry certificate files to `root`. | +|Ensure that registry certificate file permissions are set to `444` or more restrictive
                  (3.08) |Description: /etc/docker/certs.d/ directory contains Docker registry certificates. These certificate files must have permissions of `444` to maintain the integrity of the certificates. | ```chmod 444 /etc/docker/certs.d//*``` This would set the permissions for registry certificate files to `444`. | +|Ensure that TLS CA certificate file ownership is set to root:root
                  (3.09) |Description: The TLS CA certificate file should be protected from any tampering. It's used to authenticate Docker server based on given CA certificate. Hence, it must be owned and group-owned by `root` to maintain the integrity of the CA certificate. |```chown root:root``` This would set the ownership and group-ownership for the TLS CA certificate file to `root`. | +|Ensure that TLS CA certificate file permissions are set to `444` or more restrictive
                  (3.10) |Description: The TLS CA certificate file should be protected from any tampering. It's used to authenticate Docker server based on given CA certificate. Hence, it must have permissions of `444` to maintain the integrity of the CA certificate. | ```chmod 444``` This would set the file permissions of the TLS CA file to `444`. | +|Ensure that Docker server certificate file ownership is set to root:root
                  (3.11) |Description: The Docker server certificate file should be protected from any tampering. It's used to authenticate Docker server based on the given server certificate. Hence, it must be owned and group-owned by `root` to maintain the integrity of the certificate. | ```chown root:root``` This would set the ownership and group-ownership for the Docker server certificate file to `root`. | +|Ensure that Docker server certificate file permissions are set to `444` or more restrictive
                  (3.12) |Description: The Docker server certificate file should be protected from any tampering. It's used to authenticate Docker server based on the given server certificate. Hence, it must have permissions of `444` to maintain the integrity of the certificate. | ```chmod 444``` This would set the file permissions of the Docker server file to `444`. | +|Ensure that Docker server certificate key file ownership is set to root:root
                  (3.13) |Description: The Docker server certificate key file should be protected from any tampering or unneeded reads. It holds the private key for the Docker server certificate. Hence, it must be owned and group-owned by `root` to maintain the integrity of the Docker server certificate. | ```chown root:root``` This would set the ownership and group-ownership for the Docker server certificate key file to `root`. | +|Ensure that Docker server certificate key file permissions are set to 400
                  (3.14) |Description: The Docker server certificate key file should be protected from any tampering or unneeded reads. It holds the private key for the Docker server certificate. Hence, it must have permissions of `400` to maintain the integrity of the Docker server certificate. | ```chmod 400``` This would set the Docker server certificate key file permissions to `400`. | +|Ensure that Docker socket file ownership is set to root:docker
                  (3.15) |Description: Docker daemon runs as `root`. The default Unix socket hence must be owned by `root`. If any other user or process owns this socket, then it might be possible for that non-privileged user or process to interact with Docker daemon. Also, such a non-privileged user or process might interact with containers. This is neither secure nor desired behavior. Additionally, the Docker installer creates a Unix group called `docker`. You can add users to this group, and then those users would be able to read and write to default Docker Unix socket. The membership to the `docker` group is tightly controlled by the system administrator. If any other group owns this socket, then it might be possible for members of that group to interact with Docker daemon. Also, such a group might not be as tightly controlled as the `docker` group. This is neither secure nor desired behavior. Hence, the default Docker Unix socket file must be owned by `root` and group-owned by `docker` to maintain the integrity of the socket file. | ```chown root:docker /var/run/docker.sock``` This would set the ownership to `root` and group-ownership to `docker` for default Docker socket file. | +|Ensure that Docker socket file permissions are set to `660` or more restrictive
                  (3.16) |Description: Only `root` and members of `docker` group should be allowed to read and write to default Docker Unix socket. Hence, the Docket socket file must have permissions of `660` or more restrictive. | ```chmod 660 /var/run/docker.sock``` This would set the file permissions of the Docker socket file to `660`. | +|Ensure that daemon.json file ownership is set to root:root
                  (3.17) |Description: `daemon.json` file contains sensitive parameters that may alter the behavior of docker daemon. Hence, it should be owned and group-owned by `root` to maintain the integrity of the file. | ```chown root:root /etc/docker/daemon.json``` This would set the ownership and group-ownership for the file to `root`. | +|Ensure that daemon.json file permissions are set to 644 or more restrictive
                  (3.18) |Description: `daemon.json` file contains sensitive parameters that may alter the behavior of docker daemon. Hence, it should be writable only by `root` to maintain the integrity of the file. | ```chmod 644 /etc/docker/daemon.json``` This would set the file permissions for this file to `644`. | +|Ensure that /etc/default/docker file ownership is set to root:root
                  (3.19) |Description: `/etc/default/docker` file contains sensitive parameters that may alter the behavior of docker daemon. Hence, it should be owned and group-owned by `root` to maintain the integrity of the file. | ```chown root:root /etc/default/docker``` This would set the ownership and group-ownership for the file to `root`. | +|Ensure that /etc/default/docker file permissions are set to 644 or more restrictive
                  (3.20) |Description: /etc/default/docker file contains sensitive parameters that may alter the behavior of docker daemon. Hence, it should be writable only by `root` to maintain the integrity of the file. | ```chmod 644 /etc/default/docker``` This would set the file permissions for this file to `644`. | +|Ensure a user for the container has been created
                  (4.01) |Description: it's a good practice to run the container as a non-root user, if possible. Though user namespace mapping is now available, if a user is already defined in the container image, the container is run as that user by default and specific user namespace remapping isn't required. |Ensure that the Dockerfile for the container image contains: `USER {username or ID}` where username or ID refers to the user that could be found in the container base image. If there's no specific user created in the container base image, then add a useradd command to add the specific user before USER instruction. | +|Ensure HEALTHCHECK instructions have been added to the container image
                  (4.06) |Description: One of the important security triads is availability. Adding `HEALTHCHECK` instruction to your container image ensures that the docker engine periodically checks the running container instances against that instruction to ensure that the instances are still working. Based on the reported health status, the docker engine could then exit non-working containers and instantiate new ones. |Follow Docker documentation and rebuild your container image with `HEALTHCHECK` instruction. | +|Ensure either SELinux or AppArmor is enabled as appropriate
                  (5.01-2) |Description: AppArmor protects the Linux OS and applications from various threats by enforcing security policy which is also known as AppArmor profile. You can create your own AppArmor profile for containers or use the Docker's default AppArmor profile. This would enforce security policies on the containers as defined in the profile. SELinux provides a Mandatory Access Control (MAC) system that greatly augments the default Discretionary Access Control (DAC) model. You can thus add an extra layer of safety by enabling SELinux on your Linux host, if applicable. |After enabling the relevant Mandatory Access Control Plugin for your distro, run the docker daemon as ```docker run --interactive --tty --security-opt="apparmor:PROFILENAME" centos /bin/bash``` for AppArmor or ```docker run --interactive --tty --security-opt label=level:TopSecret centos /bin/bash``` for SELinux. | +|Ensure Linux Kernel Capabilities are restricted within containers
                  (5.03) |Description: Docker supports the addition and removal of capabilities, allowing the use of a non-default profile. This may make Docker more secure through capability removal, or less secure through the addition of capabilities. It's thus recommended to remove all capabilities except those explicitly required for your container process. For example, capabilities such as below are usually not needed for container process: ```NET_ADMIN SYS_ADMIN SYS_MODULE``` |Execute the below command to add needed capabilities: ```$> docker run --cap-add={"Capability 1","Capability 2"}``` For example,```docker run --interactive --tty --cap-add={"NET_ADMIN","SYS_ADMIN"} centos:latest /bin/bash``` Execute the below command to drop unneeded capabilities: ```$> docker run --cap-drop={"Capability 1","Capability 2"}``` For example,```docker run --interactive --tty --cap-drop={"SETUID","SETGID"} centos:latest /bin/bash``` Alternatively, You may choose to drop all capabilities and add only the needed ones: $> docker run --cap-drop=all --cap-add={"Capability 1","Capability 2"} For example, ```docker run --interactive --tty --cap-drop=all --cap-add={"NET_ADMIN","SYS_ADMIN"} centos:latest /bin/bash``` | +|Ensure privileged containers aren't used
                  (5.04) |Description: The `--privileged` flag gives all capabilities to the container, and it also lifts all the limitations enforced by the device cgroup controller. In other words, the container can then do almost everything that the host can do. This flag exists to allow special use-cases, like running Docker within Docker. |Don't run container with the `--privileged` flag. For example, don't start a container as below: ```docker run --interactive --tty --privileged centos /bin/bash``` | +|Ensure sensitive host system directories aren't mounted on containers
                  (5.05) |Description: If sensitive directories are mounted in read-write mode, it would be possible to make changes to files within those sensitive directories. The changes might bring down security implications or unwarranted changes that could put the Docker host in compromised state. |Don't mount host sensitive directories on containers especially in read-write mode. | +|Ensure the host's network namespace isn't shared
                  (5.09) |Description: This is potentially dangerous. It allows the container process to open low-numbered ports like any other `root` process. It also allows the container to access network services like D-bus on the Docker host. Thus, a container process can potentially do unexpected things such as shutting down the Docker host. You shouldn't use this option. |Don't pass `--net=host` option when starting the container. | +|Ensure memory usage for container is limited
                  (5.10) |Description: By default, container can use all of the memory on the host. You can use memory limit mechanism to prevent a denial of service arising from one container consuming all of the host’s resources such that other containers on the same host cannot perform their intended functions. Having no limit on memory can lead to issues where one container can easily make the whole system unstable and as a result unusable. |Run the container with only as much memory as required. Always run the container using the `--memory` argument. For example, you could run a container as below: ```docker run --interactive --tty --memory 256m centos /bin/bash``` In the above example, the container is started with a memory limit of 256 MB. Note: Please note that the output of the below command would return values in scientific notation if memory limits are in place. ```docker inspect --format='{{.Config.Memory}}' 7c5a2d4c7fe0``` For example, if the memory limit's set to `256 MB` for the above container instance, the output of the above command would be `2.68435456e+08` and NOT 256m. You should convert this value using a scientific calculator or programmatic methods. | +|Ensure the container's root filesystem is mounted as read only
                  (5.12) |Description: Enabling this option forces containers at runtime to explicitly define their data writing strategy to persist or not persist their data. This also reduces security attack vectors since the container instance's filesystem cannot be tampered with or written to unless it has explicit read-write permissions on its filesystem folder and directories. |Add a `--read-only` flag at a container's runtime to enforce the container's root filesystem to be mounted as read only.```docker run --read-only``` Enabling the `--read-only` option at a container's runtime should be used by administrators to force a container's executable processes to only write container data to explicit storage locations during the container's runtime. Examples of explicit storage locations during a container's runtime include, but not limited to: 1. Use the `--tmpfs` option to mount a temporary file system for non-persistent data writes. ```docker run --interactive --tty --read-only --tmpfs "/run" --tmpfs "/tmp" centos /bin/bash``` 2. Enabling Docker `rw` mounts at a container's runtime to persist container data directly on the Docker host filesystem. ```docker run --interactive --tty --read-only -v /opt/app/data:/run/app/data:rw centos /bin/bash``` 3. Utilizing Docker shared-storage volume plugins for Docker data volume to persist container data. ```docker volume create -d convoy --opt o=size=20GB my-named-volume``````docker run --interactive --tty --read-only -v my-named-volume:/run/app/data centos /bin/bash``` 4. Transmitting container data outside of the docker during the container's runtime for container data to persist container data. Examples include hosted databases, network file shares, and APIs. | +|Ensure incoming container traffic is bound to a specific host interface
                  (5.13) |Description: If you have multiple network interfaces on your host machine, the container can accept connections on the exposed ports on any network interface. This might not be desired and may not be secured. Many times a particular interface is exposed externally and services such as intrusion detection, intrusion prevention, firewall, load balancing, etc. are run on those interfaces to screen incoming public traffic. Hence, you shouldn't accept incoming connections on any interface. You should only allow incoming connections from a particular external interface. |Bind the container port to a specific host interface on the desired host port. For example, ```docker run --detach --publish 10.2.3.4:49153:80 nginx``` In the example above, the container port `80` is bound to the host port on `49153` and would accept incoming connection only from `10.2.3.4` external interface. | +|Ensure 'on-failure' container restart policy is set to '5' or lower
                  (5.14) |Description: If you indefinitely keep trying to start the container, it could possibly lead to a denial of service on the host. It could be an easy way to do a distributed denial of service attack especially if you have many containers on the same host. Additionally, ignoring the exit status of the container and `always` attempting to restart the container leads to non-investigation of the root cause behind containers getting terminated. If a container gets terminated, you should investigate on the reason behind it instead of just attempting to restart it indefinitely. Thus, it's recommended to use `on-failure` restart policy and limit it to maximum of `5` restart attempts. |If a container is desired to be restarted of its own then, for example, you could start the container as below: ```docker run --detach --restart=on-failure:5 nginx``` | +|Ensure the host's process namespace isn't shared
                  (5.15) |Description: PID namespace provides separation of processes. The PID Namespace removes the view of the system processes, and allows process ID's to be reused including PID `1`. If the host's PID namespace is shared with the container, it would basically allow processes within the container to see all of the processes on the host system. This breaks the benefit of process level isolation between the host and the containers. Someone having access to the container can eventually know all the processes running on the host system and can even kill the host system processes from within the container. This can be catastrophic. Hence, don't share the host's process namespace with the containers. |Don't start a container with `--pid=host` argument. For example, don't start a container as below: ```docker run --interactive --tty --pid=host centos /bin/bash``` | +|Ensure the host's IPC namespace isn't shared
                  (5.16) |Description: IPC namespace provides separation of IPC between the host and containers. If the host's IPC namespace is shared with the container, it would basically allow processes within the container to see all of the IPC on the host system. This breaks the benefit of IPC level isolation between the host and the containers. Someone having access to the container can eventually manipulate the host IPC. This can be catastrophic. Hence, don't share the host's IPC namespace with the containers. |Don't start a container with `--ipc=host` argument. For example, don't start a container as below: ```docker run --interactive --tty --ipc=host centos /bin/bas``` | +|Ensure host devices aren't directly exposed to containers
                  (5.17) |Description: The `--device` option exposes the host devices to the containers and consequently, the containers can directly access such host devices. You would not require the container to run in `privileged` mode to access and manipulate the host devices. By default, the container will be able to read, write and mknod these devices. Additionally, it's possible for containers to remove block devices from the host. Hence, don't expose host devices to containers directly. If at all, you would want to expose the host device to a container, use the sharing permissions appropriately: - r - read only - w - writable - m - mknod allowed |Don't directly expose the host devices to containers. If at all, you need to expose the host devices to containers, use the correct set of permissions: For example, don't start a container as below: ```docker run --interactive --tty --device=/dev/tty0:/dev/tty0:rwm --device=/dev/temp_sda:/dev/temp_sda:rwm centos bash``` For example, share the host device with correct permissions: ```docker run --interactive --tty --device=/dev/tty0:/dev/tty0:rw --device=/dev/temp_sda:/dev/temp_sda:r centos bash``` | +|Ensure mount propagation mode isn't set to shared
                  (5.19) |Description: A shared mount is replicated at all mounts and the changes made at any mount point are propagated to all mounts. Mounting a volume in shared mode does not restrict any other container to mount and make changes to that volume. This might be catastrophic if the mounted volume is sensitive to changes. Don't set mount propagation mode to shared until needed. |Don't mount volumes in shared mode propagation. For example, don't start container as below: ```docker run --volume=/hostPath:/containerPath:shared``` | +|Ensure the host's UTS namespace isn't shared
                  (5.20) |Description: Sharing the UTS namespace with the host provides full permission to the container to change the hostname of the host. This is insecure and shouldn't be allowed. |Don't start a container with `--uts=host` argument. For example, don't start a container as below: ```docker run --rm --interactive --tty --uts=host rhel7.2``` | +|Ensure cgroup usage is confirmed
                  (5.24) |Description: System administrators typically define cgroups under which containers are supposed to run. Even if cgroups aren't explicitly defined by the system administrators, containers run under `docker` cgroup by default. At run-time, it's possible to attach to a different cgroup other than the one that was expected to be used. This usage should be monitored and confirmed. By attaching to a different cgroup than the one that is expected, excess permissions and resources might be granted to the container and thus, can prove to be unsafe. |Don't use `--cgroup-parent` option in `docker run` command unless needed. | +|Ensure the container is restricted from acquiring additional privileges
                  (5.25) |Description: A process can set the `no_new_priv` bit in the kernel. It persists across fork, clone and execve. The `no_new_priv` bit ensures that the process or its children processes don't gain any additional privileges via suid or sgid bits. This way numerous dangerous operations become a lot less dangerous because there's no possibility of subverting privileged binaries. |For example, you should start your container as below: ```docker run --rm -it --security-opt=no-new-privileges ubuntu bash``` | +|Ensure container health is checked at runtime
                  (5.26) |Description: One of the important security triads is availability. If the container image you're using does not have a pre-defined `HEALTHCHECK` instruction, use the `--health-cmd` parameter to check container health at runtime. Based on the reported health status, you could take necessary actions. |Run the container using `--health-cmd` and the other parameters. For example, ```docker run -d --health-cmd='stat /etc/passwd || exit 1' nginx``` | +|Ensure PIDs cgroup limit's used
                  (5.28) |Description: Attackers could launch a fork bomb with a single command inside the container. This fork bomb can crash the entire system and requires a restart of the host to make the system functional again. PIDs cgroup `--pids-limit` will prevent this kind of attacks by restricting the number of forks that can happen inside a container at a given time. |Use `--pids-limit` flag while launching the container with an appropriate value. For example, ```docker run -it --pids-limit 100``` In the above example, the number of processes allowed to run at any given time is set to 100. After a limit of 100 concurrently running processes is reached, docker would restrict any new process creation. | +|Ensure Docker's default bridge docker0 isn't used
                  (5.29) |Description: Docker connects virtual interfaces created in the bridge mode to a common bridge called `docker0`. This default networking model is vulnerable to ARP spoofing and MAC flooding attacks since there's no filtering applied. |Follow Docker documentation and setup a user-defined network. Run all the containers in the defined network. | +|Ensure the host's user namespace isn't shared
                  (5.30) |Description: User namespaces ensure that a root process inside the container will be mapped to a non-root process outside the container. Sharing the user namespaces of the host with the container thus does not isolate users on the host with users on the containers. |Don't share user namespaces between host and containers. For example, don't run a container as below: ```docker run --rm -it --userns=host ubuntu bash``` | +|Ensure the Docker socket isn't mounted inside any containers
                  (5.31) |Description: If the docker socket is mounted inside a container it would allow processes running within the container to execute docker commands which effectively allows for full control of the host. |Ensure that no containers mount `docker.sock` as a volume. | +|Ensure swarm services are bound to a specific host interface
                  (7.03) |Description: When a swarm is initialized the default value for the `--listen-addr` flag is `0.0.0.0:2377` which means that the swarm services will listen on all interfaces on the host. If a host has multiple network interfaces this may be undesirable as it may expose the docker swarm services to networks which aren't involved in the operation of the swarm. By passing a specific IP address to the `--listen-addr`, a specific network interface can be specified limiting this exposure. |Remediation of this requires re-initialization of the swarm specifying a specific interface for the `--listen-addr` parameter. | +|Ensure data exchanged between containers are encrypted on different nodes on the overlay network
                  (7.04) |Description: By default, data exchanged between containers on different nodes on the overlay network isn't encrypted. This could potentially expose traffic between the container nodes. |Create overlay network with `--opt encrypted` flag. | +|Ensure swarm manager is run in auto-lock mode
                  (7.06) |Description: When Docker restarts, both the TLS key used to encrypt communication among swarm nodes, and the key used to encrypt and decrypt Raft logs on disk, are loaded into each manager node's memory. You should protect the mutual TLS encryption key and the key used to encrypt and decrypt Raft logs at rest. This protection could be enabled by initializing swarm with `--autolock` flag. With `--autolock` enabled, when Docker restarts, you must unlock the swarm first, using a key encryption key generated by Docker when the swarm was initialized. |If you're initializing swarm, use the below command. ```docker swarm init --autolock``` If you want to set `--autolock` on an existing swarm manager node, use the below command.```docker swarm update --autolock``` | + +> [!NOTE] +> Availability of specific Azure Policy guest configuration settings may vary in Azure Government +> and other national clouds. + +## Next steps + +Additional articles about Azure Policy and guest configuration: + +- [Understand the guest configuration feature of Azure Policy]Understand the guest configuration feature of Azure Polic(../concepts/guest-configuration.md). +- [Regulatory Compliance](../concepts/regulatory-compliance.md) overview. +- Review other examples at [Azure Policy samples](./index.md). +- Review [Understanding policy effects](../concepts/effects.md). +- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). diff --git a/articles/governance/policy/toc.yml b/articles/governance/policy/toc.yml index 2df67a115ffef..a4d29eb9bb41d 100644 --- a/articles/governance/policy/toc.yml +++ b/articles/governance/policy/toc.yml @@ -165,6 +165,8 @@ href: ./samples/gov-nist-sp-800-171-r2.md - name: Compute security baselines items: + - name: Docker host security baseline + href: ./samples/guest-configuration-baseline-docker.md - name: Linux security baseline href: ./samples/guest-configuration-baseline-linux.md - name: Windows security baseline diff --git a/articles/governance/policy/tutorials/policy-as-code-github.md b/articles/governance/policy/tutorials/policy-as-code-github.md index bd1e853fe6660..fb81466535681 100644 --- a/articles/governance/policy/tutorials/policy-as-code-github.md +++ b/articles/governance/policy/tutorials/policy-as-code-github.md @@ -1,8 +1,10 @@ --- title: "Tutorial: Implement Azure Policy as Code with GitHub" description: In this tutorial, you implement an Azure Policy as Code workflow with export, GitHub actions, and GitHub workflows -ms.date: 08/17/2021 +ms.date: 06/07/2022 ms.topic: tutorial +ms.author: timwarner +author: timwarner-msft --- # Tutorial: Implement Azure Policy as Code with GitHub @@ -25,6 +27,7 @@ resources, the quickstart articles explain how to do so. [free account](https://azure.microsoft.com/free/) before you begin. - Review [Design an Azure Policy as Code workflow](../concepts/policy-as-code.md) to have an understanding of the design patterns used in this tutorial. +- Your account must be assigned the **Owner** role at the management group or subscription scope. For more information on Azure RBAC permissions in Azure Policy, see [Overview of Azure Policy](../overview.md). ### Export Azure Policy objects from the Azure portal diff --git a/articles/guides/operations/TOC.yml b/articles/guides/operations/TOC.yml index b6988793ebc61..d476921fb0569 100644 --- a/articles/guides/operations/TOC.yml +++ b/articles/guides/operations/TOC.yml @@ -59,5 +59,5 @@ - name: Pricing calculator href: https://azure.microsoft.com/pricing/calculator/ - name: Microsoft Azure portal overview - href: /azure/azure-portal/azure-portal-overview - - name: References + href: ../../azure-portal/azure-portal-overview.md + - name: References \ No newline at end of file diff --git a/articles/guides/operations/azure-operations-guide.md b/articles/guides/operations/azure-operations-guide.md index 9605c3c974f82..ffe489b7128fc 100644 --- a/articles/guides/operations/azure-operations-guide.md +++ b/articles/guides/operations/azure-operations-guide.md @@ -137,7 +137,7 @@ One of the benefits of using Azure is that you can deploy your applications into ### Azure portal -The Azure portal is a web-based application that can be used to create, manage, and remove Azure resources and services. The Azure portal is located at [portal.azure.com](https://portal.azure.com). It includes a customizable dashboard and tooling for managing Azure resources. It also provides billing and subscription information. For more information, see [Microsoft Azure portal overview](https://azure.microsoft.com/documentation/articles/azure-portal-overview/) and [Manage Azure resources through portal](../../azure-resource-manager/management/manage-resources-portal.md). +The Azure portal is a web-based application that can be used to create, manage, and remove Azure resources and services. The Azure portal is located at [portal.azure.com](https://portal.azure.com). It includes a customizable dashboard and tooling for managing Azure resources. It also provides billing and subscription information. For more information, see [Microsoft Azure portal overview](/azure/azure-portal/azure-portal-overview) and [Manage Azure resources through portal](../../azure-resource-manager/management/manage-resources-portal.md). ### Resources diff --git a/articles/hdinsight/.openpublishing.redirection.hdinsight.json b/articles/hdinsight/.openpublishing.redirection.hdinsight.json index 1ccb324afea97..773c8439d83ae 100644 --- a/articles/hdinsight/.openpublishing.redirection.hdinsight.json +++ b/articles/hdinsight/.openpublishing.redirection.hdinsight.json @@ -582,8 +582,8 @@ }, { "source_path_from_root": "/articles/hdinsight/hdinsight-hadoop-r-server-compute-contexts.md", - "redirect_url": "/azure/hdinsight/r-server/r-server-compute-contexts", - "redirect_document_id": true + "redirect_url": "/azure/hdinsight/hdinsight-overview", + "redirect_document_id": false }, { "source_path_from_root": "/articles/hdinsight/hdinsight-hadoop-r-server-get-started.md", @@ -602,7 +602,7 @@ }, { "source_path_from_root": "/articles/hdinsight/hdinsight-hadoop-r-server-storage.md", - "redirect_url": "/azure/hdinsight/r-server/r-server-storage", + "redirect_url": "/azure/hdinsight/hdinsight-overview", "redirect_document_id": true }, { @@ -1179,6 +1179,61 @@ "source_path_from_root": "/articles/hdinsight/interactive-query/sizing-guidelines.md", "redirect_url": "/azure/hdinsight/interactive-query/hive-llap-sizing-guide", "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rconsole.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rstudio.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/r-server-overview.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/quickstart-resource-manager-template.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/ml-services-tutorial-spark-compute.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/r-server-submit-jobs-r-tools-vs.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/r-server-compute-contexts.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/hdinsight-hadoop-r-scaler-sparkr.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/r-server-hdinsight-manage.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/r-server-operationalize.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/hdinsight/r-server/r-server/r-server-storage.md", + "redirect_url": "azure/hdinsight/hdinsight-overview", + "redirect_document_id": false } ] } diff --git a/articles/hdinsight/TOC.yml b/articles/hdinsight/TOC.yml index fd9c7f7e4b75a..297d0368eb090 100644 --- a/articles/hdinsight/TOC.yml +++ b/articles/hdinsight/TOC.yml @@ -921,43 +921,6 @@ items: items: - name: Hive LLAP Workload Management commands href: ./interactive-query/workload-management-commands.md -- name: ML Services (Retired) - items: - - name: Overview - items: - - name: What is ML Services in HDInsight? - href: ./r-server/r-server-overview.md - - name: Quickstarts - items: - - name: Create ML Services cluster - ARM Template - displayName: Resource Manager - href: ./r-server/quickstart-resource-manager-template.md - - name: Sample job - RStudio Server - href: ./r-server/machine-learning-services-quickstart-job-rstudio.md - - name: Sample job - R console - href: ./r-server/machine-learning-services-quickstart-job-rconsole.md - - name: Tutorials - items: - - name: R in Spark compute context in HDInsight - href: ./r-server/ml-services-tutorial-spark-compute.md - - name: Use R tools for Visual Studio - href: ./r-server/r-server-submit-jobs-r-tools-vs.md - - name: How-to guides - items: - - name: Develop - items: - - name: Compute context - href: ./r-server/r-server-compute-contexts.md - - name: ScaleR and SparkR - href: ./hdinsight-hadoop-r-scaler-sparkr.md - - name: Manage - items: - - name: Manage ML Services cluster - href: ./r-server/r-server-hdinsight-manage.md - - name: Operationalize ML Services cluster - href: ./r-server/r-server-operationalize.md - - name: Storage options - href: ./r-server/r-server-storage.md - name: Apache Storm items: - name: Overview diff --git a/articles/hdinsight/cluster-management-best-practices.md b/articles/hdinsight/cluster-management-best-practices.md index 03932e422a88c..66c6e8ae608ba 100644 --- a/articles/hdinsight/cluster-management-best-practices.md +++ b/articles/hdinsight/cluster-management-best-practices.md @@ -4,7 +4,7 @@ description: Learn best practices for managing HDInsight clusters. ms.service: hdinsight ms.custom: hdinsightactive ms.topic: conceptual -ms.date: 04/11/2020 +ms.date: 05/30/2022 --- # HDInsight cluster management best practices diff --git a/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md b/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md index a162805d63d47..14169b5fdfcd0 100644 --- a/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md +++ b/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md @@ -4,7 +4,7 @@ description: Learn how to create and configure Enterprise Security Package clust services: hdinsight ms.service: hdinsight ms.topic: how-to -ms.date: 12/10/2019 +ms.date: 05/31/2022 ms.custom: devx-track-azurepowershell --- diff --git a/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md b/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md index 6b98d2f498362..9fd09a3eccf28 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md +++ b/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md @@ -4,7 +4,7 @@ description: Learn how to take advantage of business intelligence components and ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/17/2019 +ms.date: 05/30/2022 --- # Connect Excel to Apache Hadoop by using Power Query diff --git a/articles/hdinsight/hadoop/apache-hadoop-connect-hive-jdbc-driver.md b/articles/hdinsight/hadoop/apache-hadoop-connect-hive-jdbc-driver.md index 2cc8395017b17..c9c7b59e21945 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-connect-hive-jdbc-driver.md +++ b/articles/hdinsight/hadoop/apache-hadoop-connect-hive-jdbc-driver.md @@ -4,7 +4,7 @@ description: Use the JDBC driver from a Java application to submit Apache Hive q ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,hdiseo17may2017,seoapr2020 -ms.date: 04/20/2020 +ms.date: 06/08/2022 --- # Query Apache Hive through the JDBC driver in HDInsight diff --git a/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md b/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md index 121f64bad2719..1c7362bf4a3da 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md +++ b/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md @@ -4,7 +4,7 @@ description: Learn how to use Apache Maven to create a Java-based MapReduce appl ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,hdiseo17may2017, devx-track-java -ms.date: 01/16/2020 +ms.date: 05/31/2022 --- # Develop Java MapReduce programs for Apache Hadoop on HDInsight @@ -279,4 +279,4 @@ In this document, you have learned how to develop a Java MapReduce job. See the * [Use Apache Hive with HDInsight](hdinsight-use-hive.md) * [Use MapReduce with HDInsight](hdinsight-use-mapreduce.md) -* [Java Developer Center](https://azure.microsoft.com/develop/java/) \ No newline at end of file +* [Java Developer Center](https://azure.microsoft.com/develop/java/) diff --git a/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md b/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md index 2e70ea7197686..f2d8420219c3d 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md +++ b/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md @@ -4,7 +4,7 @@ description: Learn how to use C# user-defined functions (UDF) with Apache Hive a ms.service: hdinsight ms.topic: how-to ms.custom: "hdinsightactive, devx-track-csharp" -ms.date: 12/06/2019 +ms.date: 05/30/2022 --- # Use C# user-defined functions with Apache Hive and Apache Pig on Apache Hadoop in HDInsight diff --git a/articles/hdinsight/hadoop/apache-hadoop-on-premises-migration-best-practices-architecture.md b/articles/hdinsight/hadoop/apache-hadoop-on-premises-migration-best-practices-architecture.md index 2adc3654c99d8..7f4378726d7bb 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-on-premises-migration-best-practices-architecture.md +++ b/articles/hdinsight/hadoop/apache-hadoop-on-premises-migration-best-practices-architecture.md @@ -5,7 +5,7 @@ ms.reviewer: ashishth ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/06/2019 +ms.date: 05/27/2019 --- # Migrate on-premises Apache Hadoop clusters to Azure HDInsight - architecture best practices @@ -115,4 +115,4 @@ Some HDInsight Hive metastore best practices are as follows: Read the next article in this series: -- [Infrastructure best practices for on-premises to Azure HDInsight Hadoop migration](apache-hadoop-on-premises-migration-best-practices-infrastructure.md) \ No newline at end of file +- [Infrastructure best practices for on-premises to Azure HDInsight Hadoop migration](apache-hadoop-on-premises-migration-best-practices-infrastructure.md) diff --git a/articles/hdinsight/hadoop/apache-hadoop-use-hive-ambari-view.md b/articles/hdinsight/hadoop/apache-hadoop-use-hive-ambari-view.md index 1a79387b1a7df..14d33bb4530fc 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-use-hive-ambari-view.md +++ b/articles/hdinsight/hadoop/apache-hadoop-use-hive-ambari-view.md @@ -4,7 +4,7 @@ description: Learn how to use the Hive View from your web browser to submit Hive ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,seoapr2020 -ms.date: 04/23/2020 +ms.date: 06/09/2022 --- # Use Apache Ambari Hive View with Apache Hadoop in HDInsight diff --git a/articles/hdinsight/hadoop/connect-install-beeline.md b/articles/hdinsight/hadoop/connect-install-beeline.md index e7531d85d3c6c..2bc41e036db26 100644 --- a/articles/hdinsight/hadoop/connect-install-beeline.md +++ b/articles/hdinsight/hadoop/connect-install-beeline.md @@ -4,7 +4,7 @@ description: Learn how to connect to the Apache Beeline client to run Hive queri ms.service: hdinsight ms.topic: how-to ms.custom: contperf-fy21q1 -ms.date: 04/07/2021 +ms.date: 05/30/2022 --- # Connect to HiveServer2 using Beeline or install Beeline locally to connect from your local diff --git a/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md b/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md index 442e89d66c263..3a7e0f30f01f8 100644 --- a/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md +++ b/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md @@ -3,7 +3,7 @@ title: Converting certificate contents to base-64 - Azure HDInsight description: Converting service principal certificate contents to base-64 encoded string format in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 07/31/2019 +ms.date: 05/30/2022 ms.custom: devx-track-csharp --- @@ -47,4 +47,4 @@ namespace ConsoleApplication ## Next steps -[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] \ No newline at end of file +[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] diff --git a/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md b/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md index 69c8d242dda94..55ab151162d4f 100644 --- a/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md +++ b/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md @@ -3,7 +3,7 @@ title: Cluster node runs out of disk space in Azure HDInsight description: Troubleshooting Apache Hadoop cluster node disk space issues in Azure HDInsight. ms.service: hdinsight ms.topic: troubleshooting -ms.date: 04/30/2020 +ms.date: 05/30/2022 --- # Scenario: Cluster node runs out of disk space in Azure HDInsight @@ -24,7 +24,7 @@ Apache Yarn application cache may have consumed all available disk space. Your S 1. Use Ambari UI to determine which node is running out of disk space. -1. Determine which folder in the troubling node contributes to most of the disk space. SSH to the node first, then run `df` to list disk usage for all mounts. Usually it is `/mnt` which is a temp disk used by OSS. You can enter into a folder, then type `sudo du -hs` to show summarized file sizes under a folder. If you see a folder similar to `/mnt/resource/hadoop/yarn/local/usercache/livy/appcache/application_1537280705629_0007`, this means the application is still running. This could be due to RDD persistence or intermediate shuffle files. +1. Determine which folder in the troubling node contributes to most of the disk space. SSH to the node first, then run `df` to list disk usage for all mounts. Usually it's `/mnt` that is a temp disk used by OSS. You can enter into a folder, then type `sudo du -hs` to show summarized file sizes under a folder. If you see a folder similar to `/mnt/resource/hadoop/yarn/local/usercache/livy/appcache/application_1537280705629_0007`, this output means the application is still running. This output could be due to RDD persistence or intermediate shuffle files. 1. To mitigate the issue, kill the application, which will release disk space used by that application. @@ -32,15 +32,15 @@ Apache Yarn application cache may have consumed all available disk space. Your S Open the Ambari UI Navigate to YARN --> Configs --> Advanced. - Add the following 2 properties to the custom yarn-site.xml section and save: + Add the following two properties to the custom yarn-site.xml section and save: ``` yarn.nodemanager.localizer.cache.target-size-mb=2048 yarn.nodemanager.localizer.cache.cleanup.interval-ms=300000 ``` -1. If the above does not permanently fix the issue, optimize your application. +1. If the above doesn't permanently fix the issue, optimize your application. ## Next steps -[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] \ No newline at end of file +[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] diff --git a/articles/hdinsight/hadoop/hdinsight-use-sqoop.md b/articles/hdinsight/hadoop/hdinsight-use-sqoop.md index 95692530f6a17..43b3ab51499d2 100644 --- a/articles/hdinsight/hadoop/hdinsight-use-sqoop.md +++ b/articles/hdinsight/hadoop/hdinsight-use-sqoop.md @@ -54,7 +54,7 @@ In this article, you use these two datasets to test Sqoop import and export. ## Set up test environment -The cluster, SQL database, and other objects are created through the Azure portal using an Azure Resource Manager template. The template can be found in [Azure quickstart templates](https://azure.microsoft.com/resources/templates/hdinsight-linux-with-sql-database/). The Resource Manager template calls a bacpac package to deploy the table schemas to a SQL database. The bacpac package is located in a public blob container, https://hditutorialdata.blob.core.windows.net/usesqoop/SqoopTutorial-2016-2-23-11-2.bacpac. If you want to use a private container for the bacpac files, use the following values in the template: +The cluster, SQL database, and other objects are created through the Azure portal using an Azure Resource Manager template. The template can be found in [Azure quickstart templates](https://azure.microsoft.com/resources/templates/hdinsight-linux-with-sql-database/). The Resource Manager template calls a bacpac package to deploy the table schemas to a SQL database. If you want to use a private container for the bacpac files, use the following values in the template: ```json "storageKeyType": "Primary", diff --git a/articles/hdinsight/hadoop/troubleshoot-lost-key-vault-access.md b/articles/hdinsight/hadoop/troubleshoot-lost-key-vault-access.md index 9d66738eead20..321fc92b07caa 100644 --- a/articles/hdinsight/hadoop/troubleshoot-lost-key-vault-access.md +++ b/articles/hdinsight/hadoop/troubleshoot-lost-key-vault-access.md @@ -3,7 +3,7 @@ title: Azure HDInsight clusters with disk encryption lose Key Vault access description: Troubleshooting steps and possible resolutions for Key Vault access issues when interacting with Azure HDInsight clusters. ms.service: hdinsight ms.topic: troubleshooting -ms.date: 01/30/2020 +ms.date: 05/27/2022 --- # Scenario: Azure HDInsight clusters with disk encryption lose Key Vault access diff --git a/articles/hdinsight/hbase/apache-hbase-backup-replication.md b/articles/hdinsight/hbase/apache-hbase-backup-replication.md index 25f019b89d5c5..eed5c3c66416b 100644 --- a/articles/hdinsight/hbase/apache-hbase-backup-replication.md +++ b/articles/hdinsight/hbase/apache-hbase-backup-replication.md @@ -4,7 +4,7 @@ description: Set up Backup and replication for Apache HBase and Apache Phoenix i ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/19/2019 +ms.date: 05/30/2022 --- # Set up backup and replication for Apache HBase and Apache Phoenix on HDInsight diff --git a/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md b/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md index c2834fdcd7451..6034407bacde2 100644 --- a/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md +++ b/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md @@ -4,7 +4,7 @@ description: Use the psql tool to load bulk load data into Apache Phoenix tables ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/17/2019 +ms.date: 05/30/2022 --- # Bulk load data into Apache Phoenix using psql diff --git a/articles/hdinsight/hbase/apache-hbase-replication.md b/articles/hdinsight/hbase/apache-hbase-replication.md index e8386bc8832fb..d305915d01ffa 100644 --- a/articles/hdinsight/hbase/apache-hbase-replication.md +++ b/articles/hdinsight/hbase/apache-hbase-replication.md @@ -50,7 +50,7 @@ To help you set up the environments, we have created some [Azure Resource Manage ### Set up two virtual networks in two different regions -To use a template that creates two virtual networks in two different regions and the VPN connection between the VNets, select the following **Deploy to Azure** button. The template definition is stored in a [public blob storage](https://hditutorialdata.blob.core.windows.net/hbaseha/azuredeploy.json). +To use a template that creates two virtual networks in two different regions and the VPN connection between the VNets, select the following **Deploy to Azure** button. Deploy to Azure button for new cluster diff --git a/articles/hdinsight/hbase/apache-hbase-tutorial-get-started-linux.md b/articles/hdinsight/hbase/apache-hbase-tutorial-get-started-linux.md index c88874214130a..f645fcf954b28 100644 --- a/articles/hdinsight/hbase/apache-hbase-tutorial-get-started-linux.md +++ b/articles/hdinsight/hbase/apache-hbase-tutorial-get-started-linux.md @@ -240,7 +240,7 @@ The HBase REST API is secured via [basic authentication](https://en.wikipedia.or echo "Applying mitigation; starting REST Server" sudo python /usr/lib/python2.7/dist-packages/hdinsight_hbrest/HbaseRestAgent.py else - echo "Rest server already running" + echo "REST server already running" exit 0 fi ``` diff --git a/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md b/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md index 89ddf58787c9d..506cc5194317a 100644 --- a/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md +++ b/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md @@ -3,7 +3,7 @@ title: BindException - Address already in use in Azure HDInsight description: BindException - Address already in use in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 08/16/2019 +ms.date: 05/30/2022 --- # Scenario: BindException - Address already in use in Azure HDInsight diff --git a/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md b/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md index fab08580d6bc8..386e7d40531ea 100644 --- a/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md +++ b/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md @@ -3,7 +3,7 @@ title: HDP upgrade & no data in Apache Phoenix views in Azure HDInsight description: HDP upgrade causes no data in Apache Phoenix views in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 08/08/2019 +ms.date: 05/26/2022 --- # Scenario: HDP upgrade causes no data in Apache Phoenix views in Azure HDInsight @@ -30,4 +30,4 @@ If you didn't see your problem or are unable to solve your issue, visit one of t * Connect with [@AzureSupport](https://twitter.com/azuresupport) - the official Microsoft Azure account for improving customer experience. Connecting the Azure community to the right resources: answers, support, and experts. -* If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). \ No newline at end of file +* If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). diff --git a/articles/hdinsight/hdinsight-apache-spark-with-kafka.md b/articles/hdinsight/hdinsight-apache-spark-with-kafka.md index 3870cad638550..1536fb0844343 100644 --- a/articles/hdinsight/hdinsight-apache-spark-with-kafka.md +++ b/articles/hdinsight/hdinsight-apache-spark-with-kafka.md @@ -34,8 +34,6 @@ While you can create an Azure virtual network, Kafka, and Spark clusters manuall Deploy to Azure button for new cluster - The Azure Resource Manager template is located at **https://hditutorialdata.blob.core.windows.net/armtemplates/create-linux-based-kafka-spark-cluster-in-vnet-v4.1.json**. - > [!WARNING] > To guarantee availability of Kafka on HDInsight, your cluster must contain at least three worker nodes. This template creates a Kafka cluster that contains three worker nodes. diff --git a/articles/hdinsight/hdinsight-apps-install-custom-applications.md b/articles/hdinsight/hdinsight-apps-install-custom-applications.md index 874d882322022..37420cdf647d1 100644 --- a/articles/hdinsight/hdinsight-apps-install-custom-applications.md +++ b/articles/hdinsight/hdinsight-apps-install-custom-applications.md @@ -53,7 +53,7 @@ You can see the installation status from the tile pinned to the portal dashboard Deploy to Azure button for new cluster - The Resource Manager template is located at [https://hditutorialdata.blob.core.windows.net/hdinsightapps/create-linux-based-hadoop-cluster-in-hdinsight.json](https://hditutorialdata.blob.core.windows.net/hdinsightapps/create-linux-based-hadoop-cluster-in-hdinsight.json). To learn how to write this Resource Manager template, see [MSDN: Install an HDInsight application](/rest/api/hdinsight/hdinsight-application). + To learn how to write this Resource Manager template, see [MSDN: Install an HDInsight application](/rest/api/hdinsight/hdinsight-application). 2. Follow the instruction to create cluster and install Hue. For more information on creating HDInsight clusters, see [Create Linux-based Hadoop clusters in HDInsight](hdinsight-hadoop-provision-linux-clusters.md). diff --git a/articles/hdinsight/hdinsight-authorize-users-to-ambari.md b/articles/hdinsight/hdinsight-authorize-users-to-ambari.md index a1ce9d096891d..02e789b51b470 100644 --- a/articles/hdinsight/hdinsight-authorize-users-to-ambari.md +++ b/articles/hdinsight/hdinsight-authorize-users-to-ambari.md @@ -4,7 +4,7 @@ description: 'How to manage Ambari user and group permissions for HDInsight clus ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 11/27/2019 +ms.date: 05/27/2022 --- # Authorize users for Apache Ambari Views @@ -266,4 +266,4 @@ We have assigned our Azure AD domain user "hiveuser2" to the *Cluster User* role * [Manage ESP HDInsight clusters](./domain-joined/apache-domain-joined-manage.md) * [Use the Apache Hive View with Apache Hadoop in HDInsight](hadoop/apache-hadoop-use-hive-ambari-view.md) * [Synchronize Azure AD users to the cluster](hdinsight-sync-aad-users-to-cluster.md) -* [Manage HDInsight clusters by using the Apache Ambari REST API](./hdinsight-hadoop-manage-ambari-rest-api.md) \ No newline at end of file +* [Manage HDInsight clusters by using the Apache Ambari REST API](./hdinsight-hadoop-manage-ambari-rest-api.md) diff --git a/articles/hdinsight/hdinsight-business-continuity-architecture.md b/articles/hdinsight/hdinsight-business-continuity-architecture.md index d4d7fdbae1a76..66cbcfba59537 100644 --- a/articles/hdinsight/hdinsight-business-continuity-architecture.md +++ b/articles/hdinsight/hdinsight-business-continuity-architecture.md @@ -4,7 +4,7 @@ description: This article discusses the different possible business continuity a keywords: hadoop high availability ms.service: hdinsight ms.topic: conceptual -ms.date: 10/07/2020 +ms.date: 05/27/2022 --- # Azure HDInsight business continuity architectures @@ -202,4 +202,4 @@ To learn more about the items discussed in this article, see: * [Azure HDInsight business continuity](./hdinsight-business-continuity.md) * [Azure HDInsight highly available solution architecture case study](./hdinsight-high-availability-case-study.md) -* [What is Apache Hive and HiveQL on Azure HDInsight?](./hadoop/hdinsight-use-hive.md) \ No newline at end of file +* [What is Apache Hive and HiveQL on Azure HDInsight?](./hadoop/hdinsight-use-hive.md) diff --git a/articles/hdinsight/hdinsight-business-continuity.md b/articles/hdinsight/hdinsight-business-continuity.md index 9b6c52c85cf35..afa361da287f0 100644 --- a/articles/hdinsight/hdinsight-business-continuity.md +++ b/articles/hdinsight/hdinsight-business-continuity.md @@ -4,7 +4,7 @@ description: This article gives an overview of best practices, single region ava keywords: hadoop high availability ms.service: hdinsight ms.topic: conceptual -ms.date: 10/08/2020 +ms.date: 05/27/2022 --- # Azure HDInsight business continuity diff --git a/articles/hdinsight/hdinsight-cluster-availability.md b/articles/hdinsight/hdinsight-cluster-availability.md index 783cb85fac0b0..1bf57872d8402 100644 --- a/articles/hdinsight/hdinsight-cluster-availability.md +++ b/articles/hdinsight/hdinsight-cluster-availability.md @@ -4,7 +4,7 @@ description: Learn how to use Apache Ambari to monitor cluster health and availa ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,seoapr2020 -ms.date: 05/01/2020 +ms.date: 05/26/2022 --- # How to monitor cluster availability with Apache Ambari in Azure HDInsight diff --git a/articles/hdinsight/hdinsight-for-vscode.md b/articles/hdinsight/hdinsight-for-vscode.md index 5f87635e826fa..720b2a591a347 100644 --- a/articles/hdinsight/hdinsight-for-vscode.md +++ b/articles/hdinsight/hdinsight-for-vscode.md @@ -3,7 +3,7 @@ title: Azure HDInsight for Visual Studio Code description: Learn how to use the Spark & Hive Tools (Azure HDInsight) for Visual Studio Code. Use the tools to create and submit queries and scripts. ms.service: hdinsight ms.topic: how-to -ms.date: 10/20/2020 +ms.date: 05/27/2022 ms.custom: devx-track-python --- @@ -164,9 +164,13 @@ With Spark & Hive Tools for Visual Studio Code, you can submit interactive Hive - **MESSAGES** panel: When you select a **Line** number, it jumps to the first line of the running script. -## Submit interactive PySpark queries +## Submit interactive PySpark queries -Users can perform PySpark interactive in the following ways. Note here that Jupyter Extension version (ms-jupyter): v2022.1.1001614873 and Python Extension version (ms-python): v2021.12.1559732655, python 3.6.x and 3.7.x are only for HDInsight interactive PySpark queries. +### Prerequisite for Pyspark interactive + +Note here that Jupyter Extension version (ms-jupyter): v2022.1.1001614873 and Python Extension version (ms-python): v2021.12.1559732655, python 3.6.x and 3.7.x are required for HDInsight interactive PySpark queries. + +Users can perform PySpark interactive in the following ways. ### Using the PySpark interactive command in PY file Using the PySpark interactive command to submit the queries, follow these steps: @@ -493,4 +497,4 @@ From the menu bar, go to **View** > **Command Palette**, and then enter **Azure: ## Next steps -For a video that demonstrates using Spark & Hive for Visual Studio Code, see [Spark & Hive for Visual Studio Code](https://go.microsoft.com/fwlink/?linkid=858706). \ No newline at end of file +For a video that demonstrates using Spark & Hive for Visual Studio Code, see [Spark & Hive for Visual Studio Code](https://go.microsoft.com/fwlink/?linkid=858706). diff --git a/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md b/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md index ba3bcf5c91a50..de96145ba5719 100644 --- a/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md +++ b/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md @@ -4,7 +4,7 @@ description: Provides an overview of storage types and how they work with Azure ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/21/2020 +ms.date: 05/30/2022 --- # Compare storage options for use with Azure HDInsight clusters diff --git a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-adf.md b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-adf.md index d0404f70359f6..d3e37c5a98b74 100644 --- a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-adf.md +++ b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-adf.md @@ -30,7 +30,7 @@ If you don't have an Azure subscription, [create a free account](https://azure.m ## Prerequisites -* The PowerShell [Az Module](/powershell/azure/) installed. +* The PowerShell [Az Module](/powershell/azure/install-az-ps) installed. * An Azure Active Directory service principal. Once you've created the service principal, be sure to retrieve the **application ID** and **authentication key** using the instructions in the linked article. You need these values later in this tutorial. Also, make sure the service principal is a member of the *Contributor* role of the subscription or the resource group in which the cluster is created. For instructions to retrieve the required values and assign the right roles, see [Create an Azure Active Directory service principal](../active-directory/develop/howto-create-service-principal-portal.md). @@ -44,7 +44,7 @@ This section uses an Azure PowerShell script to create the storage account and c 2. Creates an Azure resource group. 3. Creates an Azure Storage account. 4. Creates a Blob container in the storage account -5. Copies the sample HiveQL script (**partitionweblogs.hql**) the Blob container. The script is available at [https://hditutorialdata.blob.core.windows.net/adfhiveactivity/script/partitionweblogs.hql](https://hditutorialdata.blob.core.windows.net/adfhiveactivity/script/partitionweblogs.hql). The sample script is already available in another public Blob container. The PowerShell script below makes a copy of these files into the Azure Storage account it creates. +5. Copies the sample HiveQL script (**partitionweblogs.hql**) the Blob container. The sample script is already available in another public Blob container. The PowerShell script below makes a copy of these files into the Azure Storage account it creates. ### Create storage account and copy files diff --git a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-azure-powershell.md b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-azure-powershell.md index 3b0e0fdf6b914..9785e3d8d8dc2 100644 --- a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-azure-powershell.md +++ b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-azure-powershell.md @@ -3,6 +3,7 @@ title: Create Apache Hadoop clusters using PowerShell - Azure HDInsight description: Learn how to create Apache Hadoop, Apache HBase, Apache Storm, or Apache Spark clusters on Linux for HDInsight by using Azure PowerShell. ms.service: hdinsight ms.topic: how-to +ms.tool: azure-powershell ms.custom: hdinsightactive ms.date: 12/18/2019 --- diff --git a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md index 21c70dfa8fb78..0dced2fca7e1c 100644 --- a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md +++ b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md @@ -4,7 +4,7 @@ description: Learn to create Apache Hadoop, Apache HBase, Apache Storm, or Apach ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,seoapr2020 -ms.date: 08/06/2020 +ms.date: 05/31/2022 --- # Create Linux-based clusters in HDInsight by using the Azure portal @@ -153,4 +153,4 @@ You've successfully created an HDInsight cluster. Now learn how to work with you * [Use Apache Hive with HDInsight](hadoop/hdinsight-use-hive.md) * [Get started with Apache HBase on HDInsight](hbase/apache-hbase-tutorial-get-started-linux.md) -* [Customize Linux-based HDInsight clusters by using script actions](hdinsight-hadoop-customize-cluster-linux.md) \ No newline at end of file +* [Customize Linux-based HDInsight clusters by using script actions](hdinsight-hadoop-customize-cluster-linux.md) diff --git a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-with-secure-transfer-storage.md b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-with-secure-transfer-storage.md index d93ebc1930914..81a817dfa11af 100644 --- a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-with-secure-transfer-storage.md +++ b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-with-secure-transfer-storage.md @@ -4,7 +4,7 @@ description: Learn how to create HDInsight clusters with secure transfer enabled ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 02/18/2020 +ms.date: 06/08/2022 --- # Apache Hadoop clusters with secure transfer storage accounts in Azure HDInsight diff --git a/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md b/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md index 052337d16266e..f89a2beb2c576 100644 --- a/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md +++ b/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md @@ -4,7 +4,7 @@ description: Learn how to customize HDInsight cluster configuration programmatic ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive, devx-track-azurepowershell -ms.date: 04/01/2020 +ms.date: 05/31/2022 --- # Customize HDInsight clusters using Bootstrap diff --git a/articles/hdinsight/hdinsight-hadoop-customize-cluster-linux.md b/articles/hdinsight/hdinsight-hadoop-customize-cluster-linux.md index 1bc23324f2331..758108b24cf8a 100644 --- a/articles/hdinsight/hdinsight-hadoop-customize-cluster-linux.md +++ b/articles/hdinsight/hdinsight-hadoop-customize-cluster-linux.md @@ -4,7 +4,7 @@ description: Add custom components to HDInsight clusters by using script actions ms.service: hdinsight ms.topic: how-to ms.custom: seoapr2020, contperf-fy21q2, devx-track-azurepowershell -ms.date: 03/09/2021 +ms.date: 06/08/2022 --- # Customize Azure HDInsight clusters by using script actions diff --git a/articles/hdinsight/hdinsight-hadoop-manage-ambari-rest-api.md b/articles/hdinsight/hdinsight-hadoop-manage-ambari-rest-api.md index c222bada457e5..56d018d78afac 100644 --- a/articles/hdinsight/hdinsight-hadoop-manage-ambari-rest-api.md +++ b/articles/hdinsight/hdinsight-hadoop-manage-ambari-rest-api.md @@ -4,7 +4,7 @@ description: Learn how to use Ambari to monitor and manage Hadoop clusters in Az ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,seoapr2020, devx-track-azurepowershell -ms.date: 04/29/2020 +ms.date: 06/09/2022 --- # Manage HDInsight clusters by using the Apache Ambari REST API diff --git a/articles/hdinsight/hdinsight-hadoop-port-settings-for-services.md b/articles/hdinsight/hdinsight-hadoop-port-settings-for-services.md index c9bd1637de8a1..0f6badd15e677 100644 --- a/articles/hdinsight/hdinsight-hadoop-port-settings-for-services.md +++ b/articles/hdinsight/hdinsight-hadoop-port-settings-for-services.md @@ -45,7 +45,7 @@ The following are available for specific cluster types: | Livy |443 |HTTPS |Spark |Spark REST API. See [Submit Apache Spark jobs remotely using Apache Livy](spark/apache-spark-livy-rest-interface.md) | | Spark Thrift server |443 |HTTPS |Spark |Spark Thrift server used to submit Hive queries. See [Use Beeline with Apache Hive on HDInsight](hadoop/apache-hadoop-use-hive-beeline.md) | | Storm |443 |HTTPS |Storm |Storm web UI. See [Deploy and manage Apache Storm topologies on HDInsight](storm/apache-storm-deploy-monitor-topology-linux.md) | -| Kafka Rest proxy |443 |HTTPS |Kafka |Kafka REST API. See [Interact with Apache Kafka clusters in Azure HDInsight using a REST proxy](kafka/rest-proxy.md) | +| Kafka REST proxy |443 |HTTPS |Kafka |Kafka REST API. See [Interact with Apache Kafka clusters in Azure HDInsight using a REST proxy](kafka/rest-proxy.md) | ### Authentication diff --git a/articles/hdinsight/hdinsight-high-availability-case-study.md b/articles/hdinsight/hdinsight-high-availability-case-study.md index c9bc78d3108e0..ba9128925943f 100644 --- a/articles/hdinsight/hdinsight-high-availability-case-study.md +++ b/articles/hdinsight/hdinsight-high-availability-case-study.md @@ -4,7 +4,7 @@ description: This article is a fictional case study of a possible Azure HDInsigh keywords: hadoop high availability ms.service: hdinsight ms.topic: conceptual -ms.date: 10/08/2020 +ms.date: 05/27/2022 --- # Azure HDInsight highly available solution architecture case study diff --git a/articles/hdinsight/hdinsight-linux-ambari-ssh-tunnel.md b/articles/hdinsight/hdinsight-linux-ambari-ssh-tunnel.md index 3e9185181c4f6..f3c292a488d02 100644 --- a/articles/hdinsight/hdinsight-linux-ambari-ssh-tunnel.md +++ b/articles/hdinsight/hdinsight-linux-ambari-ssh-tunnel.md @@ -4,7 +4,7 @@ description: Learn how to use an SSH tunnel to securely browse web resources hos ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 04/14/2020 +ms.date: 06/09/2022 --- # Use SSH tunneling to access Apache Ambari web UI, JobHistory, NameNode, Apache Oozie, and other UIs diff --git a/articles/hdinsight/hdinsight-private-link.md b/articles/hdinsight/hdinsight-private-link.md index 04612c1c57f9a..e80d5226c6ba5 100644 --- a/articles/hdinsight/hdinsight-private-link.md +++ b/articles/hdinsight/hdinsight-private-link.md @@ -3,7 +3,7 @@ title: Enable Private Link on an Azure HDInsight cluster description: Learn how to connect to an outside HDInsight cluster by using Azure Private Link. ms.service: hdinsight ms.topic: conceptual -ms.date: 10/15/2020 +ms.date: 06/08/2022 --- # Enable Private Link on an HDInsight cluster diff --git a/articles/hdinsight/hdinsight-release-notes-archive.md b/articles/hdinsight/hdinsight-release-notes-archive.md index 52c3be24e0d3e..bf17d05a9e30e 100644 --- a/articles/hdinsight/hdinsight-release-notes-archive.md +++ b/articles/hdinsight/hdinsight-release-notes-archive.md @@ -4,14 +4,80 @@ description: Archived release notes for Azure HDInsight. Get development tips an ms.service: hdinsight ms.topic: conceptual ms.custom: hdinsightactive, references_regions -ms.date: 03/10/2022 +ms.date: 06/07/2022 --- # Archived release notes ## Summary -Azure HDInsight is one of the most popular services among enterprise customers for open-source Apache Hadoop and Apache Spark analytics on Azure. +Azure HDInsight is one of the most popular services among enterprise customers for open-source analytics on Azure. +If you would like to subscribe on release notes, watch releases on [this GitHub repository](https://github.com/hdinsight/release-notes/releases). + +## Release date: 03/10/2022 + +This release applies for HDInsight 4.0. HDInsight release is made available to all regions over several days. The release date here indicates the first region release date. If you don't see below changes, wait for the release being live in your region over several days. + +The OS versions for this release are: +- HDInsight 4.0: Ubuntu 18.04.5 + +## Spark 3.1 is now generally available + +Spark 3.1 is now Generally Available on HDInsight 4.0 release. This release includes + +* Adaptive Query Execution, +* Convert Sort Merge Join to Broadcast Hash Join, +* Spark Catalyst Optimizer, +* Dynamic Partition Pruning, +* Customers will be able to create new Spark 3.1 clusters and not Spark 3.0 (preview) clusters. + +For more details, see the [Apache Spark 3.1](https://techcommunity.microsoft.com/t5/analytics-on-azure-blog/spark-3-1-is-now-generally-available-on-hdinsight/ba-p/3253679) is now Generally Available on HDInsight - Microsoft Tech Community. + +For a complete list of improvements, see the [Apache Spark 3.1 release notes.](https://spark.apache.org/releases/spark-release-3-1-2.html) + +For more details on migration, see the [migration guide.](https://spark.apache.org/docs/latest/migration-guide.html) + +## Kafka 2.4 is now generally available + +Kafka 2.4.1 is now Generally Available. For more information, please see [Kafka 2.4.1 Release Notes.](http://kafka.apache.org/24/documentation.html) +Other features include MirrorMaker 2 availability, new metric category AtMinIsr topic partition, Improved broker start-up time by lazy on demand mmap of index files, More consumer metrics to observe user poll behavior. + +## Map Datatype in HWC is now supported in HDInsight 4.0 + +This release includes Map Datatype Support for HWC 1.0 (Spark 2.4) Via the spark-shell application, and all other all spark clients that HWC supports. Following improvements are included like any other data types: + +A user can +* Create a Hive table with any column(s) containing Map datatype, insert data into it and read the results from it. +* Create an Apache Spark dataframe with Map Type and do batch/stream reads and writes. + +### New regions + +HDInsight has now expanded its geographical presence to two new regions: China East 3 and China North 3. + +### OSS backport changes + +OSS backports that are included in Hive including HWC 1.0 (Spark 2.4) which supports Map data type. + +### Here are the OSS backported Apache JIRAs for this release: + +| Impacted Feature | Apache JIRA | +|---------------------|--------------------------------------------------------------------| +| Metastore direct sql queries with IN/(NOT IN) should be split based on max parameters allowed by SQL DB | [HIVE-25659](https://issues.apache.org/jira/browse/HIVE-25659) | +| Upgrade log4j 2.16.0 to 2.17.0 | [HIVE-25825](https://issues.apache.org/jira/browse/HIVE-25825) | +| Update Flatbuffer version | [HIVE-22827](https://issues.apache.org/jira/browse/HIVE-22827) | +| Support Map data-type natively in Arrow format | [HIVE-25553](https://issues.apache.org/jira/browse/HIVE-25553) | +| LLAP external client - Handle nested values when the parent struct is null | [HIVE-25243](https://issues.apache.org/jira/browse/HIVE-25243) | +| Upgrade arrow version to 0.11.0 | [HIVE-23987](https://issues.apache.org/jira/browse/HIVE-23987) | + +## Deprecation notices +### Azure Virtual Machine Scale Sets on HDInsight + +HDInsight will no longer use Azure Virtual Machine Scale Sets to provision the clusters, no breaking change is expected. Existing HDInsight clusters on virtual machine scale sets will have no impact, any new clusters on latest images will no longer use Virtual Machine Scale Sets. + +### Scaling of Azure HDInsight HBase workloads will now be supported only using manual scale + +Starting from March 01, 2022, HDInsight will only support manual scale for HBase, there's no impact on running clusters. New HBase clusters won't be able to enable schedule based Autoscaling. For more information on how to  manually scale your HBase cluster, refer our documentation on [Manually scaling Azure HDInsight clusters](./hdinsight-scaling-best-practices.md) + ## Release date: 12/27/2021 @@ -262,14 +328,14 @@ This release applies for both HDInsight 3.6 and HDInsight 4.0. HDInsight release HDInsight added Dav4-series support in this release. Learn more about [Dav4-series here](../virtual-machines/dav4-dasv4-series.md). #### Kafka REST Proxy GA -Kafka REST Proxy enables you to interact with your Kafka cluster via a REST API over HTTPS. Kafka Rest Proxy is general available starting from this release. Learn more about [Kafka REST Proxy here](./kafka/rest-proxy.md). +Kafka REST Proxy enables you to interact with your Kafka cluster via a REST API over HTTPS. Kafka REST Proxy is general available starting from this release. Learn more about [Kafka REST Proxy here](./kafka/rest-proxy.md). #### Moving to Azure virtual machine scale sets HDInsight now uses Azure virtual machines to provision the cluster. The service is gradually migrating to [Azure virtual machine scale sets](../virtual-machine-scale-sets/overview.md). The entire process may take months. After your regions and subscriptions are migrated, newly created HDInsight clusters will run on virtual machine scale sets without customer actions. No breaking change is expected. ### Deprecation #### Disabled VM sizes -Starting form January 9 2021, HDInsight will block all customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing clusters will run as is. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. +Starting from January 9 2021, HDInsight will block all customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing clusters will run as is. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. ### Behavior changes #### Default cluster VM size changes to Ev3-series @@ -291,7 +357,7 @@ Starting February 2021, the default version of HDInsight cluster will be changed HDInsight is upgrading OS version from Ubuntu 16.04 to 18.04. The upgrade will complete before April 2021. #### HDInsight 3.6 end of support on June 30 2021 -HDInsight 3.6 will be end of support. Starting form June 30 2021, customers can't create new HDInsight 3.6 clusters. Existing clusters will run as is without the support from Microsoft. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. +HDInsight 3.6 will be end of support. Starting from June 30 2021, customers can't create new HDInsight 3.6 clusters. Existing clusters will run as is without the support from Microsoft. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. ### Component version change No component version change for this release. You can find the current component versions for HDInsight 4.0 and HDInsight 3.6 in [this doc](./hdinsight-component-versioning.md). @@ -315,7 +381,7 @@ HDInsight now uses Azure virtual machines to provision the cluster. Starting fro HDInsight 3.6 ML Services cluster type will be end of support by December 31 2020. Customers won't be able to create new 3.6 ML Services clusters after December 31 2020. Existing clusters will run as is without the support from Microsoft. Check the support expiration for HDInsight versions and cluster types [here](./hdinsight-component-versioning.md). #### Disabled VM sizes -Starting from November 16 2020, HDInsight will block new customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing customers who have used these VM sizes in the past three months won't be affected. Starting form January 9 2021, HDInsight will block all customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing clusters will run as is. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. +Starting from November 16 2020, HDInsight will block new customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing customers who have used these VM sizes in the past three months won't be affected. Starting from January 9 2021, HDInsight will block all customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing clusters will run as is. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. ### Behavior changes #### Add NSG rule checking before scaling operation @@ -344,7 +410,7 @@ HDInsight now uses Azure virtual machines to provision the cluster. Starting fro HDInsight 3.6 ML Services cluster type will be end of support by December 31 2020. Customers won't create new 3.6 ML Services clusters after December 31 2020. Existing clusters will run as is without the support from Microsoft. Check the support expiration for HDInsight versions and cluster types [here](./hdinsight-component-versioning.md#supported-hdinsight-versions). #### Disabled VM sizes -Starting from November 16 2020, HDInsight will block new customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing customers who have used these VM sizes in the past three months won't be affected. Starting form January 9 2021, HDInsight will block all customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing clusters will run as is. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. +Starting from November 16 2020, HDInsight will block new customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing customers who have used these VM sizes in the past three months won't be affected. Starting from January 9 2021, HDInsight will block all customers creating clusters using standand_A8, standand_A9, standand_A10 and standand_A11 VM sizes. Existing clusters will run as is. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. ### Behavior changes No behavior change for this release. @@ -359,7 +425,7 @@ HDInsight today doesn't support customizing Zookeeper node size for Spark, Hadoo Starting February 2021, the default version of HDInsight cluster will be changed from 3.6 to 4.0. For more information about available versions, see [supported versions](./hdinsight-component-versioning.md#supported-hdinsight-versions). Learn more about what is new in [HDInsight 4.0](./hdinsight-version-release.md) #### HDInsight 3.6 end of support on June 30 2021 -HDInsight 3.6 will be end of support. Starting form June 30 2021, customers can't create new HDInsight 3.6 clusters. Existing clusters will run as is without the support from Microsoft. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. +HDInsight 3.6 will be end of support. Starting from June 30 2021, customers can't create new HDInsight 3.6 clusters. Existing clusters will run as is without the support from Microsoft. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. ### Bug fixes HDInsight continues to make cluster reliability and performance improvements. @@ -567,7 +633,7 @@ A minimum 4-core VM is required for Head Node to ensure the high availability an #### Cluster worker node provisioning change When 80% of the worker nodes are ready, the cluster enters **operational** stage. At this stage, customers can do all the data plane operations like running scripts and jobs. But customers can't do any control plane operation like scaling up/down. Only deletion is supported. -After the **operational** stage, the cluster waits another 60 minutes for the remaining 20% worker nodes. At the end of this 60 minutes, the cluster moves to the **running** stage, even if all of worker nodes are still not available. Once a cluster enters the **running** stage, you can use it as normal. Both control plan operations like scaling up/down, and data plan operations like running scripts and jobs are accepted. If some of the requested worker nodes are not available, the cluster will be marked as partial success. You are charged for the nodes that were deployed successfully. +After the **operational** stage, the cluster waits another 60 minutes for the remaining 20% worker nodes. At the end of this 60 minute, the cluster moves to the **running** stage, even if all of worker nodes are still not available. Once a cluster enters the **running** stage, you can use it as normal. Both control plan operations like scaling up/down, and data plan operations like running scripts and jobs are accepted. If some of the requested worker nodes are not available, the cluster will be marked as partial success. You are charged for the nodes that were deployed successfully. #### Create new service principal through HDInsight Previously, with cluster creation, customers can create a new service principal to access the connected ADLS Gen 1 account in Azure portal. Starting June 15 2020, customers cannot create new service principal in HDInsight creation workflow, only existing service principal is supported. See [Create Service Principal and Certificates using Azure Active Directory](../active-directory/develop/howto-create-service-principal-portal.md). @@ -604,7 +670,7 @@ This release applies both for HDInsight 3.6 and 4.0. HDInsight release is made a ### New features #### TLS 1.2 enforcement -Transport Layer Security (TLS) and Secure Sockets Layer (SSL) are cryptographic protocols that provide communications security over a computer network. Learn more about [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security#SSL_1.0.2C_2.0_and_3.0). HDInsight uses TLS 1.2 on public HTTPs endpoints but TLS 1.1 is still supported for backward compatibility. +Transport Layer Security (TLS) and Secure Sockets Layer (SSL) are cryptographic protocols that provide communications security over a computer network. Learn more about [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security#SSL_1.0.2C_2.0_and_3.0). HDInsight uses TLS 1.2 on public HTTP's endpoints but TLS 1.1 is still supported for backward compatibility. With this release, customers can opt into TLS 1.2 only for all connections through the public cluster endpoint. To support this, the new property **minSupportedTlsVersion** is introduced and can be specified during cluster creation. If the property is not set, the cluster still supports TLS 1.0, 1.1 and 1.2, which is the same as today's behavior. Customers can set the value for this property to "1.2", which means that the cluster only supports TLS 1.2 and above. For more information, see [Transport Layer Security](./transport-layer-security.md). @@ -680,7 +746,7 @@ The following changes will happen in upcoming releases. #### Transport Layer Security (TLS) 1.2 enforcement Transport Layer Security (TLS) and Secure Sockets Layer (SSL) are cryptographic protocols that provide communications security over a computer network. For more information, see [Transport Layer Security](https://en.wikipedia.org/wiki/Transport_Layer_Security#SSL_1.0.2C_2.0_and_3.0). While Azure HDInsight clusters accept TLS 1.2 connections on public HTTPS endpoints, TLS 1.1 is still supported for backward compatibility with older clients. -Starting from the next release, you will be able to opt-in and configure your new HDInsight clusters to only accept TLS 1.2 connections. +Starting from the next release, you will be able to opt in and configure your new HDInsight clusters to only accept TLS 1.2 connections. Later in the year, starting on 6/30/2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. We recommend that you ensure that all your clients are ready to handle TLS 1.2 or later versions. @@ -745,7 +811,7 @@ HDInsight now offers a new capacity to enable customers to use their own SQL DB #### F-series virtual machines are now available with HDInsight -F-series virtual machines(VMs) are good choice to get started with HDInsight with light processing requirements. At a lower per-hour list price, the F-series is the best value in price-performance in the Azure portfolio based on the Azure Compute Unit (ACU) per vCPU. For more information, see [Selecting the right VM size for your Azure HDInsight cluster](./hdinsight-selecting-vm-size.md). +F-series virtual machines(VMs) is a good choice to get started with HDInsight with light processing requirements. At a lower per-hour list price, the F-series are the best value in price-performance in the Azure portfolio based on the Azure Compute Unit (ACU) per vCPU. For more information, see [Selecting the right VM size for your Azure HDInsight cluster](./hdinsight-selecting-vm-size.md). ### Deprecation @@ -1030,7 +1096,7 @@ HDP 2.6.4 provided Hadoop Common 2.7.3 and the following Apache patches: - [YARN-5641](https://issues.apache.org/jira/browse/YARN-5641): Localizer leaves behind tarballs after container is complete. -- [YARN-6004](https://issues.apache.org/jira/browse/YARN-6004): Refactor TestResourceLocalizationService\#testDownloadingResourcesOnContainer so that it is less than 150 lines. +- [YARN-6004](https://issues.apache.org/jira/browse/YARN-6004): Refactor TestResourceLocalizationService\#testDownloadingResourcesOnContainer so that it is fewer than 150 lines. - [YARN-6078](https://issues.apache.org/jira/browse/YARN-6078): Containers stuck in Localizing state. @@ -1082,9 +1148,9 @@ This release provides HBase 1.1.2 and the following Apache patches. - [HBASE-18164](https://issues.apache.org/jira/browse/HBASE-18164): Much faster locality cost function and candidate generator. -- [HBASE-18212](https://issues.apache.org/jira/browse/HBASE-18212): In Standalone mode with local filesystem HBase logs Warning message: Failed to invoke 'unbuffer' method in class class org.apache.hadoop.fs.FSDataInputStream. +- [HBASE-18212](https://issues.apache.org/jira/browse/HBASE-18212): In Standalone mode with local filesystem HBase logs Warning message: Failed to invoke 'unbuffer' method in class org.apache.hadoop.fs.FSDataInputStream. -- [HBASE-18808](https://issues.apache.org/jira/browse/HBASE-18808): Ineffective config check in BackupLogCleaner\#getDeletableFiles(). +- [HBASE-18808](https://issues.apache.org/jira/browse/HBASE-18808): Ineffective config check-in BackupLogCleaner\#getDeletableFiles(). - [HBASE-19052](https://issues.apache.org/jira/browse/HBASE-19052): FixedFileTrailer should recognize CellComparatorImpl class in branch-1.x. @@ -1140,7 +1206,7 @@ This release provides Hive 1.2.1 and Hive 2.1.0 in addition to the following pat - [*HIVE-17013*](https://issues.apache.org/jira/browse/HIVE-17013): Delete request with a subquery based on select over a view. -- [*HIVE-17063*](https://issues.apache.org/jira/browse/HIVE-17063): insert overwrite partition onto an external table fail when drop partition first. +- [*HIVE-17063*](https://issues.apache.org/jira/browse/HIVE-17063): insert overwrite partition onto an external table fails when drop partition first. - [*HIVE-17259*](https://issues.apache.org/jira/browse/HIVE-17259): Hive JDBC does not recognize UNIONTYPE columns. @@ -1182,7 +1248,7 @@ This release provides Hive 1.2.1 and Hive 2.1.0 in addition to the following pat - [*HIVE-18352*](https://issues.apache.org/jira/browse/HIVE-18352): introduce a METADATAONLY option while doing REPL DUMP to allow integrations of other tools. -- [*HIVE-18353*](https://issues.apache.org/jira/browse/HIVE-18353): CompactorMR should call jobclient.close() to trigger cleanup (Prabhu Joseph via Thejas Nair). +- [*HIVE-18353*](https://issues.apache.org/jira/browse/HIVE-18353): CompactorMR should call jobclient.close() to trigger cleanup. - [*HIVE-18390*](https://issues.apache.org/jira/browse/HIVE-18390): IndexOutOfBoundsException when query a partitioned view in ColumnPruner. @@ -1292,7 +1358,7 @@ This release provides Hive 1.2.1 and Hive 2.1.0 in addition to the following pat - [*HIVE-18327*](https://issues.apache.org/jira/browse/HIVE-18327): Remove the unnecessary HiveConf dependency for MiniHiveKdc. -- [*HIVE-18331*](https://issues.apache.org/jira/browse/HIVE-18331): Add relogin when TGT expire and some logging/lambda. +- [*HIVE-18331*](https://issues.apache.org/jira/browse/HIVE-18331): Add relogin when TGT expires and some logging/lambda. - [*HIVE-18341*](https://issues.apache.org/jira/browse/HIVE-18341): Add repl load support for adding "raw" namespace for TDE with same encryption keys. @@ -1650,7 +1716,7 @@ This release provides Storm 1.1.1 and the following Apache patches: - [STORM-2841](https://issues.apache.org/jira/browse/STORM-2841): testNoAcksIfFlushFails UT fails with NullPointerException. -- [STORM-2854](https://issues.apache.org/jira/browse/STORM-2854): Expose IEventLogger to make event logging pluggable. +- [STORM-2854](https://issues.apache.org/jira/browse/STORM-2854): Expose IEventLogger to make event log pluggable. - [STORM-2870](https://issues.apache.org/jira/browse/STORM-2870): FileBasedEventLogger leaks non-daemon ExecutorService which prevents process to be finished. diff --git a/articles/hdinsight/hdinsight-release-notes.md b/articles/hdinsight/hdinsight-release-notes.md index db3f5c4f1c13a..8ccc017d40b49 100644 --- a/articles/hdinsight/hdinsight-release-notes.md +++ b/articles/hdinsight/hdinsight-release-notes.md @@ -4,8 +4,9 @@ description: Latest release notes for Azure HDInsight. Get development tips and ms.custom: references_regions ms.service: hdinsight ms.topic: conceptual -ms.date: 03/10/2022 +ms.date: 06/03/2022 --- + # Azure HDInsight release notes This article provides information about the **most recent** Azure HDInsight release updates. For information on earlier releases, see [HDInsight Release Notes Archive](hdinsight-release-notes-archive.md). @@ -15,74 +16,103 @@ This article provides information about the **most recent** Azure HDInsight rele Azure HDInsight is one of the most popular services among enterprise customers for open-source analytics on Azure. If you would like to subscribe on release notes, watch releases on [this GitHub repository](https://github.com/hdinsight/release-notes/releases). -## Release date: 03/10/2022 +## Release date: 06/03/2022 This release applies for HDInsight 4.0. HDInsight release is made available to all regions over several days. The release date here indicates the first region release date. If you don't see below changes, wait for the release being live in your region over several days. -The OS versions for this release are: -- HDInsight 4.0: Ubuntu 18.04.5 - -## Spark 3.1 is now generally available - -Spark 3.1 is now Generally Available on HDInsight 4.0 release. This release includes - -* Adaptive Query Execution, -* Convert Sort Merge Join to Broadcast Hash Join, -* Spark Catalyst Optimizer, -* Dynamic Partition Pruning, -* Customers will be able to create new Spark 3.1 clusters and not Spark 3.0 (preview) clusters. - -For more details, see the [Apache Spark 3.1](https://techcommunity.microsoft.com/t5/analytics-on-azure-blog/spark-3-1-is-now-generally-available-on-hdinsight/ba-p/3253679) is now Generally Available on HDInsight - Microsoft Tech Community. - -For a complete list of improvements, see the [Apache Spark 3.1 release notes.](https://spark.apache.org/releases/spark-release-3-1-2.html) - -For more details on migration, see the [migration guide.](https://spark.apache.org/docs/latest/migration-guide.html) - -## Kafka 2.4 is now generally available - -Kafka 2.4.1 is now Generally Available. For more information, please see [Kafka 2.4.1 Release Notes.](http://kafka.apache.org/24/documentation.html) -Other features include MirrorMaker 2 availability, new metric category AtMinIsr topic partition, Improved broker start-up time by lazy on demand mmap of index files, More consumer metrics to observe user poll behavior. - -## Map Datatype in HWC is now supported in HDInsight 4.0 - -This release includes Map Datatype Support for HWC 1.0 (Spark 2.4) Via the spark-shell application, and all other all spark clients that HWC supports. Following improvements are included like any other data types: - -A user can -* Create a Hive table with any column(s) containing Map datatype, insert data into it and read the results from it. -* Create an Apache Spark dataframe with Map Type and do batch/stream reads and writes. - -### New regions - -HDInsight has now expanded its geographical presence to two new regions: China East 3 and China North 3. - -### OSS backport changes - -OSS backports that are included in Hive including HWC 1.0 (Spark 2.4) which supports Map data type. - -### Here are the OSS backported Apache JIRAs for this release: - -| Impacted Feature | Apache JIRA | -|---------------------|--------------------------------------------------------------------| -| Metastore direct sql queries with IN/(NOT IN) should be split based on max parameters allowed by SQL DB | [HIVE-25659](https://issues.apache.org/jira/browse/HIVE-25659) | -| Upgrade log4j 2.16.0 to 2.17.0 | [HIVE-25825](https://issues.apache.org/jira/browse/HIVE-25825) | -| Update Flatbuffer version | [HIVE-22827](https://issues.apache.org/jira/browse/HIVE-22827) | -| Support Map data-type natively in Arrow format | [HIVE-25553](https://issues.apache.org/jira/browse/HIVE-25553) | -| LLAP external client - Handle nested values when the parent struct is null | [HIVE-25243](https://issues.apache.org/jira/browse/HIVE-25243) | -| Upgrade arrow version to 0.11.0 | [HIVE-23987](https://issues.apache.org/jira/browse/HIVE-23987) | - -## Deprecation notices -### Azure Virtual Machine Scale Sets on HDInsight - -HDInsight will no longer use Azure Virtual Machine Scale Sets to provision the clusters, no breaking change is expected. Existing HDInsight clusters on virtual machine scale sets will have no impact, any new clusters on latest images will no longer use Virtual Machine Scale Sets. - -### Scaling of Azure HDInsight HBase workloads will now be supported only using manual scale - -Starting from March 01, 2022, HDInsight will only support manual scale for HBase, there's no impact on running clusters. New HBase clusters won't be able to enable schedule based Autoscaling. For more information on how to  manually scale your HBase cluster, refer our documentation on [Manually scaling Azure HDInsight clusters](./hdinsight-scaling-best-practices.md) - -## HDInsight 3.6 end of support extension - -HDInsight 3.6 end of support is extended until September 30, 2022. - -Starting from September 30, 2022, customers can't create new HDInsight 3.6 clusters. Existing clusters will run as is without the support from Microsoft. Consider moving to HDInsight 4.0 to avoid potential system/support interruption. - -Customers who are on Azure HDInsight 3.6 clusters will continue to get [Basic support](./hdinsight-component-versioning.md#support-options-for-hdinsight-versions) until September 30, 2022. After September 30, 2022 customers won't be able to create new HDInsight 3.6 clusters. \ No newline at end of file +## Release highlights + +**The Hive Warehouse Connector (HWC) on Spark v3.1.2** + +The Hive Warehouse Connector (HWC) allows you to take advantage of the unique features of Hive and Spark to build powerful big-data applications. HWC is currently supported for Spark v2.4 only. This feature adds business value by allowing ACID transactions on Hive Tables using Spark. This feature is useful for customers who use both Hive and Spark in their data estate. +For more information, see [Apache Spark & Hive - Hive Warehouse Connector - Azure HDInsight | Microsoft Docs](/azure/hdinsight/interactive-query/apache-hive-warehouse-connector) + +## Ambari + +* Scaling and provisioning improvement changes +* HDI hive is now compatible with OSS version 3.1.2 + +HDI Hive 3.1 version is upgraded to OSS Hive 3.1.2. This version has all fixes and features available in open source Hive 3.1.2 version. + +> [!NOTE] +> **Spark** +> +> * If you are using Azure User Interface to create Spark Cluster for HDInsight, you will see from the dropdown list an additional version Spark 3.1.(HDI 5.0) along with the older versions. This version is a renamed version of Spark 3.1.(HDI 4.0). This is only an UI level change, which doesn’t impact anything for the existing users and users who are already using the ARM template. + +![Screenshot_of spark 3.1 for HDI 5.0.](media/hdinsight-release-notes/spark-3-1-for-hdi-5-0.png) + +> [!NOTE] +> **Interactive Query** +> +> * If you are creating an Interactive Query Cluster, you will see from the dropdown list an additional version as Interactive Query 3.1 (HDI 5.0). +> * If you are going to use Spark 3.1 version along with Hive which require ACID support, you need to select this version Interactive Query 3.1 (HDI 5.0). + +![Screenshot_of interactive query 3.1 for HDI 5.0.](media/hdinsight-release-notes/interactive-query-3-1-for-hdi-5-0.png) + +## TEZ bug fixes + +| Bug Fixes|Apache JIRA| +|---|---| +|TezUtils.createConfFromByteString on Configuration larger than 32 MB throws com.google.protobuf.CodedInputStream exception |[TEZ-4142](https://issues.apache.org/jira/browse/TEZ-4142)| +|TezUtils createByteStringFromConf should use snappy instead of DeflaterOutputStream|[TEZ-4113](https://issues.apache.org/jira/browse/TEZ-4113)| + +## HBase bug fixes + +| Bug Fixes|Apache JIRA| +|---|---| +|TableSnapshotInputFormat should use ReadType.STREAM for scanning HFiles |[HBASE-26273](https://issues.apache.org/jira/browse/HBASE-26273)| +|Add option to disable scanMetrics in TableSnapshotInputFormat |[HBASE-26330](https://issues.apache.org/jira/browse/HBASE-26330)| +|Fix for ArrayIndexOutOfBoundsException when balancer is executed |[HBASE-22739](https://issues.apache.org/jira/browse/HBASE-22739)| + +## Hive bug fixes + +|Bug Fixes|Apache JIRA| +|---|---| +| NPE when inserting data with 'distribute by' clause with dynpart sort optimization|[HIVE-18284](https://issues.apache.org/jira/browse/HIVE-18284)| +| MSCK REPAIR Command with Partition Filtering Fails While Dropping Partitions|[HIVE-23851](https://issues.apache.org/jira/browse/HIVE-23851)| +| Wrong exception thrown if capacity<=0|[HIVE-25446](https://issues.apache.org/jira/browse/HIVE-25446)| +| Support parallel load for HastTables - Interfaces|[HIVE-25583](https://issues.apache.org/jira/browse/HIVE-25583)| +| Include MultiDelimitSerDe in HiveServer2 By Default|[HIVE-20619](https://issues.apache.org/jira/browse/HIVE-20619)| +| Remove glassfish.jersey and mssql-jdbc classes from jdbc-standalone jar|[HIVE-22134](https://issues.apache.org/jira/browse/HIVE-22134)| +| Null pointer exception on running compaction against an MM table.|[HIVE-21280 ](https://issues.apache.org/jira/browse/HIVE-21280)| +| Hive query with large size via knox fails with Broken pipe Write failed|[HIVE-22231](https://issues.apache.org/jira/browse/HIVE-22231)| +| Adding ability for user to set bind user|[HIVE-21009](https://issues.apache.org/jira/browse/HIVE-21009)| +| Implement UDF to interpret date/timestamp using its internal representation and Gregorian-Julian hybrid calendar|[HIVE-22241](https://issues.apache.org/jira/browse/HIVE-22241)| +| Beeline option to show/not show execution report|[HIVE-22204](https://issues.apache.org/jira/browse/HIVE-22204)| +| Tez: SplitGenerator tries to look for plan files, which won't exist for Tez|[HIVE-22169 ](https://issues.apache.org/jira/browse/HIVE-22169)| +| Remove expensive logging from the LLAP cache hotpath|[HIVE-22168](https://issues.apache.org/jira/browse/HIVE-22168)| +| UDF: FunctionRegistry synchronizes on org.apache.hadoop.hive.ql.udf.UDFType class|[HIVE-22161](https://issues.apache.org/jira/browse/HIVE-22161)| +| Prevent the creation of query routing appender if property is set to false|[HIVE-22115](https://issues.apache.org/jira/browse/HIVE-22115)| +| Remove cross-query synchronization for the partition-eval|[HIVE-22106](https://issues.apache.org/jira/browse/HIVE-22106)| +| Skip setting up hive scratch dir during planning|[HIVE-21182](https://issues.apache.org/jira/browse/HIVE-21182)| +| Skip creating scratch dirs for tez if RPC is on|[HIVE-21171](https://issues.apache.org/jira/browse/HIVE-21171)| +| switch Hive UDFs to use Re2J regex engine|[HIVE-19661 ](https://issues.apache.org/jira/browse/HIVE-19661)| +| Migrated clustered tables using bucketing_version 1 on hive 3 uses bucketing_version 2 for inserts|[HIVE-22429](https://issues.apache.org/jira/browse/HIVE-22429)| +| Bucketing: Bucketing version 1 is incorrectly partitioning data|[HIVE-21167 ](https://issues.apache.org/jira/browse/HIVE-21167)| +| Adding ASF License header to the newly added file|[HIVE-22498](https://issues.apache.org/jira/browse/HIVE-22498)| +| Schema tool enhancements to support mergeCatalog|[HIVE-22498](https://issues.apache.org/jira/browse/HIVE-22498)| +| Hive with TEZ UNION ALL and UDTF results in data loss|[HIVE-21915](https://issues.apache.org/jira/browse/HIVE-21915)| +| Split text files even if header/footer exists|[HIVE-21924](https://issues.apache.org/jira/browse/HIVE-21924)| +| MultiDelimitSerDe returns wrong results in last column when the loaded file has more columns than the once are present in table schema|[HIVE-22360](https://issues.apache.org/jira/browse/HIVE-22360)| +| LLAP external client - Need to reduce LlapBaseInputFormat#getSplits() footprint|[HIVE-22221](https://issues.apache.org/jira/browse/HIVE-22221)| +| Column name with reserved keyword is unescaped when query including join on table with mask column is rewritten (Zoltan Matyus via Zoltan Haindrich)|[HIVE-22208](https://issues.apache.org/jira/browse/HIVE-22208)| +|Prevent LLAP shutdown on AMReporter related RuntimeException|[HIVE-22113](https://issues.apache.org/jira/browse/HIVE-22113)| +| LLAP status service driver may get stuck with wrong Yarn app ID|[HIVE-21866](https://issues.apache.org/jira/browse/HIVE-21866)| +| OperationManager.queryIdOperation doesn't properly clean up multiple queryIds|[HIVE-22275](https://issues.apache.org/jira/browse/HIVE-22275)| +| Bringing a node manager down blocks restart of LLAP service|[HIVE-22219](https://issues.apache.org/jira/browse/HIVE-22219)| +| StackOverflowError when drop lots of partitions|[HIVE-15956](https://issues.apache.org/jira/browse/HIVE-15956)| +| Access check is failed when a temporary directory is removed|[HIVE-22273](https://issues.apache.org/jira/browse/HIVE-22273)| +| Fix wrong results/ArrayOutOfBound exception in left outer map joins on specific boundary conditions|[HIVE-22120](https://issues.apache.org/jira/browse/HIVE-22120)| +| Remove distribution management tag from pom.xml|[HIVE-19667](https://issues.apache.org/jira/browse/HIVE-19667)| +| Parsing time can be high if there's deeply nested subqueries|[HIVE-21980](https://issues.apache.org/jira/browse/HIVE-21980)| +| For ALTER TABLE t SET TBLPROPERTIES ('EXTERNAL'='TRUE'); `TBL_TYPE` attribute changes not reflecting for non-CAPS|[HIVE-20057 ](https://issues.apache.org/jira/browse/HIVE-20057)| +| JDBC: HiveConnection shades log4j interfaces|[HIVE-18874](https://issues.apache.org/jira/browse/HIVE-18874)| +| Update repo URLs in poms - branh 3.1 version|[HIVE-21786](https://issues.apache.org/jira/browse/HIVE-21786)| +| DBInstall tests broken on master and branch-3.1|[HIVE-21758](https://issues.apache.org/jira/browse/HIVE-21758)| +| Load data into a bucketed table is ignoring partitions specs and loads data into default partition|[HIVE-21564](https://issues.apache.org/jira/browse/HIVE-21564)| +| Queries with join condition having timestamp or timestamp with local time zone literal throw SemanticException|[HIVE-21613](https://issues.apache.org/jira/browse/HIVE-21613)| +| Analyze compute stats for column leave behind staging dir on HDFS|[HIVE-21342](https://issues.apache.org/jira/browse/HIVE-21342)| +| Incompatible change in Hive bucket computation|[HIVE-21376](https://issues.apache.org/jira/browse/HIVE-21376)| +| Provide a fallback authorizer when no other authorizer is in use|[HIVE-20420](https://issues.apache.org/jira/browse/HIVE-20420)| +| Some alterPartitions invocations throw 'NumberFormatException: null'|[HIVE-18767](https://issues.apache.org/jira/browse/HIVE-18767)| +| HiveServer2: Preauthenticated subject for http transport isn't retained for entire duration of http communication in some cases|[HIVE-20555](https://issues.apache.org/jira/browse/HIVE-20555)| diff --git a/articles/hdinsight/hdinsight-scaling-best-practices.md b/articles/hdinsight/hdinsight-scaling-best-practices.md index 999294aac5515..183c9fbff7276 100644 --- a/articles/hdinsight/hdinsight-scaling-best-practices.md +++ b/articles/hdinsight/hdinsight-scaling-best-practices.md @@ -5,7 +5,7 @@ ms.author: ashish ms.service: hdinsight ms.topic: how-to ms.custom: seoapr2020 -ms.date: 04/29/2020 +ms.date: 06/09/2022 --- # Manually scale Azure HDInsight clusters diff --git a/articles/hdinsight/hdinsight-sdk-java-samples.md b/articles/hdinsight/hdinsight-sdk-java-samples.md index c8560405a686e..785ba3954ba86 100644 --- a/articles/hdinsight/hdinsight-sdk-java-samples.md +++ b/articles/hdinsight/hdinsight-sdk-java-samples.md @@ -4,7 +4,7 @@ description: Find Java examples on GitHub for common tasks using the HDInsight S ms.custom: devx-track-java ms.service: hdinsight ms.topic: sample -ms.date: 11/29/2019 +ms.date: 05/30/2022 --- # Azure HDInsight: Java samples @@ -37,4 +37,4 @@ You can get these samples for Java by cloning the [hdinsight-java-sdk-samples](h [!INCLUDE [hdinsight-sdk-additional-functionality](includes/hdinsight-sdk-additional-functionality.md)] -Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Java reference documentation](/java/api/overview/azure/hdinsight). \ No newline at end of file +Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Java reference documentation](/java/api/overview/azure/hdinsight). diff --git a/articles/hdinsight/hdinsight-sdk-python-samples.md b/articles/hdinsight/hdinsight-sdk-python-samples.md index 8c4d23dea1aa0..d9adef3ba18f0 100644 --- a/articles/hdinsight/hdinsight-sdk-python-samples.md +++ b/articles/hdinsight/hdinsight-sdk-python-samples.md @@ -3,7 +3,7 @@ title: 'Azure HDInsight: Python samples' description: Find Python examples on GitHub for common tasks using the HDInsight SDK for Python. ms.service: hdinsight ms.topic: sample -ms.date: 11/08/2019 +ms.date: 05/30/2022 ms.custom: devx-track-python --- @@ -40,4 +40,4 @@ You can get these samples for Python by cloning the [hdinsight-python-sdk-sample [!INCLUDE [hdinsight-sdk-additional-functionality](includes/hdinsight-sdk-additional-functionality.md)] -Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Python reference documentation](/python/api/overview/azure/hdinsight). \ No newline at end of file +Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Python reference documentation](/python/api/overview/azure/hdinsight). diff --git a/articles/hdinsight/hdinsight-troubleshoot-failed-cluster.md b/articles/hdinsight/hdinsight-troubleshoot-failed-cluster.md index b0b64522605f1..876d4dce3706c 100644 --- a/articles/hdinsight/hdinsight-troubleshoot-failed-cluster.md +++ b/articles/hdinsight/hdinsight-troubleshoot-failed-cluster.md @@ -4,7 +4,7 @@ description: Diagnose and troubleshoot a slow or failing job on an Azure HDInsig ms.service: hdinsight ms.custom: hdinsightactive ms.topic: troubleshooting -ms.date: 08/15/2019 +ms.date: 06/09/2022 --- # Troubleshoot a slow or failing job on a HDInsight cluster @@ -259,4 +259,4 @@ To help diagnose the source of a cluster error, start a new cluster with the sam * [Analyze HDInsight Logs](./hdinsight-troubleshoot-guide.md) * [Access Apache Hadoop YARN application sign in Linux-based HDInsight](hdinsight-hadoop-access-yarn-app-logs-linux.md) * [Enable heap dumps for Apache Hadoop services on Linux-based HDInsight](hdinsight-hadoop-collect-debug-heap-dump-linux.md) -* [Known Issues for Apache Spark cluster on HDInsight](./spark/apache-spark-known-issues.md) \ No newline at end of file +* [Known Issues for Apache Spark cluster on HDInsight](./spark/apache-spark-known-issues.md) diff --git a/articles/hdinsight/hdinsight-use-external-metadata-stores.md b/articles/hdinsight/hdinsight-use-external-metadata-stores.md index 8e96e4c204557..3dd483daba19a 100644 --- a/articles/hdinsight/hdinsight-use-external-metadata-stores.md +++ b/articles/hdinsight/hdinsight-use-external-metadata-stores.md @@ -4,11 +4,14 @@ description: Use external metadata stores with Azure HDInsight clusters. ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 05/05/2022 +ms.date: 06/08/2022 --- # Use external metadata stores in Azure HDInsight +> [!IMPORTANT] +> The default metastore provides a basic tier Azure SQL Database with only **5 DTU and 2 GB data max size (NOT UPGRADEABLE)**! Use this for QA and testing purposes only. **For production or large workloads, we recommend migrating to an external metastore!** + HDInsight allows you to take control of your data and metadata with external data stores. This feature is available for [Apache Hive metastore](#custom-metastore), [Apache Oozie metastore](#apache-oozie-metastore), and [Apache Ambari database](#custom-ambari-db). The Apache Hive metastore in HDInsight is an essential part of the Apache Hadoop architecture. A metastore is the central schema repository. The metastore is used by other big data access tools such as Apache Spark, Interactive Query (LLAP), Presto, or Apache Pig. HDInsight uses an Azure SQL Database as the Hive metastore. @@ -22,18 +25,17 @@ There are two ways you can set up a metastore for your HDInsight clusters: ## Default metastore -> [!IMPORTANT] -> The default metastore provides a basic tier Azure SQL Database with only **5 DTU and 2 GB data max size (NOT UPGRADEABLE)**! Use this for QA and testing purposes only. **For production or large workloads, we recommend migrating to an external metastore!** - By default, HDInsight creates a metastore with every cluster type. You can instead specify a custom metastore. The default metastore includes the following considerations: +* Limited resources. See notice at the top of the page. + * No additional cost. HDInsight creates a metastore with every cluster type without any additional cost to you. -* Each default metastore is part of the cluster lifecycle. When you delete a cluster, the corresponding metastore and metadata are also deleted. +* The default metastore is part of the cluster lifecycle. When you delete a cluster, the corresponding metastore and metadata are also deleted. -* You can't share the default metastore with other clusters. +* The default metastore is recommended only for simple workloads. Workloads that don't require multiple clusters and don't need metadata preserved beyond the cluster's lifecycle. -* Default metastore is recommended only for simple workloads. Workloads that don't require multiple clusters and don't need metadata preserved beyond the cluster's lifecycle. +* The default metastore can't be shared with other clusters. ## Custom metastore diff --git a/articles/hdinsight/hdinsight-use-oozie-linux-mac.md b/articles/hdinsight/hdinsight-use-oozie-linux-mac.md index 2301c868eea98..aa49280ce2412 100644 --- a/articles/hdinsight/hdinsight-use-oozie-linux-mac.md +++ b/articles/hdinsight/hdinsight-use-oozie-linux-mac.md @@ -4,7 +4,7 @@ description: Use Hadoop Oozie in Linux-based HDInsight. Learn how to define an O ms.service: hdinsight ms.topic: how-to ms.custom: seoapr2020 -ms.date: 04/27/2020 +ms.date: 05/09/2022 --- # Use Apache Oozie with Apache Hadoop to define and run a workflow on Linux-based Azure HDInsight @@ -653,4 +653,4 @@ In this article, you learned how to define an Oozie workflow and how to run an O * [Upload data for Apache Hadoop jobs in HDInsight](hdinsight-upload-data.md) * [Use Apache Sqoop with Apache Hadoop in HDInsight](hadoop/apache-hadoop-use-sqoop-mac-linux.md) * [Use Apache Hive with Apache Hadoop on HDInsight](hadoop/hdinsight-use-hive.md) -* [Troubleshoot Apache Oozie](./troubleshoot-oozie.md) \ No newline at end of file +* [Troubleshoot Apache Oozie](./troubleshoot-oozie.md) diff --git a/articles/hdinsight/hortonworks-release-notes.md b/articles/hdinsight/hortonworks-release-notes.md index 65470ce570d1e..ff0c0fff6d895 100644 --- a/articles/hdinsight/hortonworks-release-notes.md +++ b/articles/hdinsight/hortonworks-release-notes.md @@ -4,7 +4,7 @@ description: Learn the Apache Hadoop components and versions in Azure HDInsight. ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/22/2020 +ms.date: 05/26/2022 --- # Hortonworks release notes associated with HDInsight versions @@ -55,4 +55,4 @@ The section provides links to release notes for the Hortonworks Data Platform di [hdp-1-3-0]: https://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.0/bk_releasenotes_hdp_1.x/content/ch_relnotes-hdp1.3.0_1.html -[hdp-1-1-0]: https://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.0/bk_releasenotes_hdp_1.x/content/ch_relnotes-hdp1.1.1.16_1.html \ No newline at end of file +[hdp-1-1-0]: https://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.0/bk_releasenotes_hdp_1.x/content/ch_relnotes-hdp1.1.1.16_1.html diff --git a/articles/hdinsight/interactive-query/apache-hive-replication.md b/articles/hdinsight/interactive-query/apache-hive-replication.md index c091946a0ead0..9c41ba4bd98c7 100644 --- a/articles/hdinsight/interactive-query/apache-hive-replication.md +++ b/articles/hdinsight/interactive-query/apache-hive-replication.md @@ -3,7 +3,7 @@ title: How to use Apache Hive replication in Azure HDInsight clusters description: Learn how to use Hive replication in HDInsight clusters to replicate the Hive metastore and the Azure Data Lake Storage Gen 2 data lake. ms.service: hdinsight ms.topic: conceptual -ms.date: 10/08/2020 +ms.date: 05/26/2022 --- # How to use Apache Hive replication in Azure HDInsight clusters @@ -219,4 +219,4 @@ To learn more about the items discussed in this article, see: - [Azure HDInsight business continuity](../hdinsight-business-continuity.md) - [Azure HDInsight business continuity architectures](../hdinsight-business-continuity-architecture.md) - [Azure HDInsight highly available solution architecture case study](../hdinsight-high-availability-case-study.md) -- [What is Apache Hive and HiveQL on Azure HDInsight?](../hadoop/hdinsight-use-hive.md) \ No newline at end of file +- [What is Apache Hive and HiveQL on Azure HDInsight?](../hadoop/hdinsight-use-hive.md) diff --git a/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md b/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md index 5dedbdb6dd8af..b8ef7a19c607b 100644 --- a/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md +++ b/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md @@ -5,7 +5,7 @@ author: nis-goel ms.author: nisgoel ms.service: hdinsight ms.topic: how-to -ms.date: 05/28/2020 +ms.date: 05/26/2022 --- # Integrate Apache Zeppelin with Hive Warehouse Connector in Azure HDInsight @@ -133,4 +133,4 @@ hive.executeQuery("select * from testers").show() * [HWC and Apache Spark operations](./apache-hive-warehouse-connector-operations.md) * [HWC integration with Apache Spark and Apache Hive](./apache-hive-warehouse-connector.md) -* [Use Interactive Query with HDInsight](./apache-interactive-query-get-started.md) \ No newline at end of file +* [Use Interactive Query with HDInsight](./apache-interactive-query-get-started.md) diff --git a/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md b/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md index d39af56edeb30..a7b958718e052 100644 --- a/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md +++ b/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md @@ -6,7 +6,7 @@ ms.author: kevx ms.reviewer: ms.service: hdinsight ms.topic: how-to -ms.date: 12/11/2020 +ms.date: 05/26/2022 --- # Hive workload migration to new account in Azure Storage diff --git a/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md b/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md index 5cf7119537414..0b0dcaaac2928 100644 --- a/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md +++ b/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md @@ -3,7 +3,7 @@ title: Apache Ambari Tez View loads slowly in Azure HDInsight description: Apache Ambari Tez View may load slowly or may not load at all in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 04/06/2020 +ms.date: 05/26/2022 --- # Scenario: Apache Ambari Tez View loads slowly in Azure HDInsight diff --git a/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md b/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md index cb3dd6abbfa45..d0c0a91879076 100644 --- a/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md +++ b/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md @@ -1,5 +1,5 @@ --- -title: HDInsight Interactive Query Autoscale(Schedule-Based) Guide and Best Practices +title: HDInsight Interactive Query Autoscale(bchedule-based) guide and best practices description: LLAP Autoscale Guide and Best Practices ms.service: hdinsight ms.topic: quickstart @@ -9,7 +9,7 @@ ms.reviewer: HDI HiveLLAP Team ms.date: 05/25/2022 --- -# Azure HDInsight Interactive Query Cluster (Hive LLAP) Schedule Based Autoscale +# Azure HDInsight interactive query cluster (Hive LLAP) schedule based autoscale This document provides the onboarding steps to enable schedule-based autoscale for Interactive Query (LLAP) Cluster type in Azure HDInsight. It includes some of the best practices to operate Autoscale in Hive-LLAP. @@ -31,7 +31,7 @@ Feature Supportability with HDInsight 4.0 Interactive Query(LLAP) Autoscale ### **Interactive Query Cluster setup for Autoscale** -1. [Create an HDInsight Interactive Query Cluster.](/azure/hdinsight/hdinsight-hadoop-provision-linux-clusters) +1. [Create an HDInsight Interactive Query Cluster.](../hdinsight-hadoop-provision-linux-clusters.md) 2. Post successful creation of cluster, navigate to **Azure Portal** and apply the recommended Script Action ``` @@ -49,7 +49,7 @@ Feature Supportability with HDInsight 4.0 Interactive Query(LLAP) Autoscale ``` -3. [Enable and Configure Schedule-Based Autoscale](/azure/hdinsight/hdinsight-autoscale-clusters#create-a-cluster-with-schedule-based-autoscaling) +3. [Enable and Configure Schedule-Based Autoscale](../hdinsight-autoscale-clusters.md#create-a-cluster-with-schedule-based-autoscaling) > [!NOTE] @@ -92,7 +92,7 @@ Disabling the WLM should be before the actual schedule of the scaling event and Each time the Interactive Query cluster scales, the Autoscale smart probe would perform a silent update of the number of LLAP Daemons and the Concurrency in the Ambari since these configurations are static. These configs are updated to make sure if autoscale is in disabled state or LLAP Service restarts for some reason. It utilizes all the worker nodes resized at that time. Explicit restart of services to handle these stale config changes isn't required. -### **Next Steps** +### **Next steps** If the above guidelines didn't resolve your query, visit one of the following. * Get answers from Azure experts through [Azure Community Support](https://azure.microsoft.com/support/community/). @@ -101,8 +101,8 @@ If the above guidelines didn't resolve your query, visit one of the following. * If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). -## **Other References:** - * [Interactive Query in Azure HDInsight](/azure/hdinsight/interactive-query/apache-interactive-query-get-started) - * [Create a cluster with Schedule-based Autoscaling](/azure/hdinsight/interactive-query/apache-interactive-query-get-started) - * [Azure HDInsight Interactive Query Cluster (Hive LLAP) sizing guide](/azure/hdinsight/interactive-query/hive-llap-sizing-guide) - * [Hive Warehouse Connector in Azure HDInsight](/azure/hdinsight/interactive-query/apache-hive-warehouse-connector) +## **Other references:** + * [Interactive Query in Azure HDInsight](./apache-interactive-query-get-started.md) + * [Create a cluster with Schedule-based Autoscaling](./apache-interactive-query-get-started.md) + * [Azure HDInsight Interactive Query Cluster (Hive LLAP) sizing guide](./hive-llap-sizing-guide.md) + * [Hive Warehouse Connector in Azure HDInsight](./apache-hive-warehouse-connector.md) diff --git a/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md b/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md index 6136eed770678..5ec1dee873382 100644 --- a/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md +++ b/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md @@ -4,7 +4,7 @@ description: Learn how to directly connect to Kafka on HDInsight through an Azur ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive, devx-track-python -ms.date: 03/04/2020 +ms.date: 05/30/2022 --- # Connect to Apache Kafka on HDInsight through an Azure Virtual Network @@ -336,8 +336,8 @@ To validate connectivity to Kafka, use the following steps to create and run a P * If you have __enabled name resolution through a custom DNS server__, replace the `kafka_broker` entries with the FQDN of the worker nodes. > [!NOTE] - > This code sends the string `test message` to the topic `testtopic`. The default configuration of Kafka on HDInsight is to create the topic if it does not exist. - + > This code sends the string `test message` to the topic `testtopic`. The default configuration of Kafka on HDInsight is not to create the topic if it does not exist. See [How to configure Apache Kafka on HDInsight to automatically create topics](./apache-kafka-auto-create-topics.md). Alternatively, you can create topics manually before producing messages. + 4. To retrieve the messages from Kafka, use the following Python code: ```python diff --git a/articles/hdinsight/kafka/apache-kafka-scalability.md b/articles/hdinsight/kafka/apache-kafka-scalability.md index 46dc900adbd6f..4006a81d4600f 100644 --- a/articles/hdinsight/kafka/apache-kafka-scalability.md +++ b/articles/hdinsight/kafka/apache-kafka-scalability.md @@ -40,8 +40,6 @@ To control the number of disks used by the worker nodes in a Kafka cluster, use ], ``` -You can find a complete template that demonstrates how to configure managed disks at [https://hditutorialdata.blob.core.windows.net/armtemplates/create-linux-based-kafka-mirror-cluster-in-vnet-v2.1.json](https://hditutorialdata.blob.core.windows.net/armtemplates/create-linux-based-kafka-mirror-cluster-in-vnet-v2.1.json). - ## Next steps For more information on working with Apache Kafka on HDInsight, see the following documents: diff --git a/articles/hdinsight/kafka/kafka-mirrormaker-2-0-guide.md b/articles/hdinsight/kafka/kafka-mirrormaker-2-0-guide.md index bf219995c7dc2..8f7b6e45af3b4 100644 --- a/articles/hdinsight/kafka/kafka-mirrormaker-2-0-guide.md +++ b/articles/hdinsight/kafka/kafka-mirrormaker-2-0-guide.md @@ -211,4 +211,4 @@ The implementation needs to be added to the Kafka classpath for the class refere [Apache Kafka 2.4 Documentation](https://kafka.apache.org/24/documentation.html) -[Connect an on-premises network to Azure](/azure/architecture/reference-architectures/hybrid-networking.md) +[Connect an on-premises network to Azure](/azure/architecture/reference-architectures/hybrid-networking) diff --git a/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md b/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md index 6822090a7e14d..0b835c16c85e4 100644 --- a/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md +++ b/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md @@ -50,13 +50,13 @@ If you don't have an Azure subscription, create a [free account](https://azure.m |storageAccount|Replace STORAGEACCOUNTNAME with a name for your new storage account.| |httpPassword|Replace PASSWORD with a password for the cluster login, **admin**.| |sshPassword|Replace PASSWORD with a password for the secure shell username, **sshuser**.| - |securityGroupName|Replace SECURITYGROUPNAME with the client AAD security group name for Kafka Rest Proxy. The variable will be passed to the `--kafka-client-group-name` parameter for `az-hdinsight-create`.| - |securityGroupID|Replace SECURITYGROUPID with the client AAD security group ID for Kafka Rest Proxy. The variable will be passed to the `--kafka-client-group-id` parameter for `az-hdinsight-create`.| + |securityGroupName|Replace SECURITYGROUPNAME with the client AAD security group name for Kafka REST Proxy. The variable will be passed to the `--kafka-client-group-name` parameter for `az-hdinsight-create`.| + |securityGroupID|Replace SECURITYGROUPID with the client AAD security group ID for Kafka REST Proxy. The variable will be passed to the `--kafka-client-group-id` parameter for `az-hdinsight-create`.| |storageContainer|Storage container the cluster will use, leave as-is for this tutorial. This variable will be set with the name of the cluster.| |workernodeCount|Number of worker nodes in the cluster, leave as-is for this tutorial. To guarantee high availability, Kafka requires a minimum of 3 worker nodes| |clusterType|Type of HDInsight cluster, leave as-is for this tutorial.| - |clusterVersion|HDInsight cluster version, leave as-is for this tutorial. Kafka Rest Proxy requires a minimum cluster version of 4.0.| - |componentVersion|Kafka version, leave as-is for this tutorial. Kafka Rest Proxy requires a minimum component version of 2.1.| + |clusterVersion|HDInsight cluster version, leave as-is for this tutorial. Kafka REST Proxy requires a minimum cluster version of 4.0.| + |componentVersion|Kafka version, leave as-is for this tutorial. Kafka REST Proxy requires a minimum component version of 2.1.| Update the variables with desired values. Then enter the CLI commands to set the environment variables. @@ -130,8 +130,8 @@ If you don't have an Azure subscription, create a [free account](https://azure.m |Parameter | Description| |---|---| |--kafka-management-node-size|The size of the node. This tutorial uses the value **Standard_D4_v2**.| - |--kafka-client-group-id|The client AAD security group ID for Kafka Rest Proxy. The value is passed from the variable **$securityGroupID**.| - |--kafka-client-group-name|The client AAD security group name for Kafka Rest Proxy. The value is passed from the variable **$securityGroupName**.| + |--kafka-client-group-id|The client AAD security group ID for Kafka REST Proxy. The value is passed from the variable **$securityGroupID**.| + |--kafka-client-group-name|The client AAD security group name for Kafka REST Proxy. The value is passed from the variable **$securityGroupName**.| |--version|The HDInsight cluster version must be at least 4.0. The value is passed from the variable **$clusterVersion**.| |--component-version|The Kafka version must be at least 2.1. The value is passed from the variable **$componentVersion**.| diff --git a/articles/hdinsight/media/hdinsight-release-notes/interactive-query-3-1-for-hdi-5-0.png b/articles/hdinsight/media/hdinsight-release-notes/interactive-query-3-1-for-hdi-5-0.png new file mode 100644 index 0000000000000..34c2b830ddf82 Binary files /dev/null and b/articles/hdinsight/media/hdinsight-release-notes/interactive-query-3-1-for-hdi-5-0.png differ diff --git a/articles/hdinsight/media/hdinsight-release-notes/spark-3-1-for-hdi-5-0.png b/articles/hdinsight/media/hdinsight-release-notes/spark-3-1-for-hdi-5-0.png new file mode 100644 index 0000000000000..75cad301a54ed Binary files /dev/null and b/articles/hdinsight/media/hdinsight-release-notes/spark-3-1-for-hdi-5-0.png differ diff --git a/articles/hdinsight/overview-azure-storage.md b/articles/hdinsight/overview-azure-storage.md index 4ad20df2154b0..a94e303081e69 100644 --- a/articles/hdinsight/overview-azure-storage.md +++ b/articles/hdinsight/overview-azure-storage.md @@ -4,7 +4,7 @@ description: Overview of Azure Storage in HDInsight. ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/21/2020 +ms.date: 05/30/2022 --- # Azure Storage overview in HDInsight @@ -80,4 +80,4 @@ Certain MapReduce jobs and packages might create intermediate results that you w - [Introduction to Azure Storage](../storage/common/storage-introduction.md) - [Azure Data Lake Storage Gen1 overview](./overview-data-lake-storage-gen1.md) - [Use Azure storage with Azure HDInsight clusters](hdinsight-hadoop-use-blob-storage.md) -- [Use Azure Data Lake Storage Gen2 with Azure HDInsight clusters](hdinsight-hadoop-use-data-lake-storage-gen2.md) \ No newline at end of file +- [Use Azure Data Lake Storage Gen2 with Azure HDInsight clusters](hdinsight-hadoop-use-data-lake-storage-gen2.md) diff --git a/articles/hdinsight/overview-data-lake-storage-gen2.md b/articles/hdinsight/overview-data-lake-storage-gen2.md index 28c6b0bb9cb8e..477ecd14a3779 100644 --- a/articles/hdinsight/overview-data-lake-storage-gen2.md +++ b/articles/hdinsight/overview-data-lake-storage-gen2.md @@ -4,7 +4,7 @@ description: Overview of Data Lake Storage Gen2 in HDInsight. ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/21/2020 +ms.date: 05/27/2022 --- # Azure Data Lake Storage Gen2 overview in HDInsight @@ -71,4 +71,4 @@ For more information, see [Use the Azure Data Lake Storage Gen2 URI](../storage/ * [Introduction to Azure Data Lake Storage Gen2](../storage/blobs/data-lake-storage-introduction.md) * [Introduction to Azure Storage](../storage/common/storage-introduction.md) -* [Azure Data Lake Storage Gen1 overview](./overview-data-lake-storage-gen1.md) \ No newline at end of file +* [Azure Data Lake Storage Gen1 overview](./overview-data-lake-storage-gen1.md) diff --git a/articles/hdinsight/r-server/breadcrumb/toc.yml b/articles/hdinsight/r-server/breadcrumb/toc.yml deleted file mode 100644 index cafc012ad7848..0000000000000 --- a/articles/hdinsight/r-server/breadcrumb/toc.yml +++ /dev/null @@ -1,15 +0,0 @@ -items: -- name: Azure - tocHref: /azure/ - topicHref: /azure/index - items: - - name: HDInsight - tocHref: /azure/hdinsight/ - topicHref: /azure/hdinsight/index - items: - - name: ML Services - tocHref: /azure/hdinsight/hadoop/ - topicHref: /azure/hdinsight/r-server/index - - name: ML Services - tocHref: /azure/hdinsight/domain-joined/ - topicHref: /azure/hdinsight/r-server/index \ No newline at end of file diff --git a/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rconsole.md b/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rconsole.md deleted file mode 100644 index b1cc74c98526c..0000000000000 --- a/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rconsole.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: 'Quickstart: R script on ML Services & R console - Azure HDInsight' -description: In the quickstart, you execute an R script on an ML Services cluster in Azure HDInsight using R console. -ms.service: hdinsight -ms.topic: quickstart -ms.date: 06/19/2019 -ROBOTS: NOINDEX -ms.custom: mode-ui -#Customer intent: I want to learn how to execute an R script using ML Services in Azure HDInsight for R console. ---- - -# Quickstart: Execute an R script on an ML Services cluster in Azure HDInsight using R console - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -ML Services on Azure HDInsight allows R scripts to use Apache Spark and Apache Hadoop MapReduce to run distributed computations. ML Services controls how calls are executed by setting the compute context. The edge node of a cluster provides a convenient place to connect to the cluster and to run your R scripts. With an edge node, you have the option of running the parallelized distributed functions of RevoScaleR across the cores of the edge node server. You can also run them across the nodes of the cluster by using RevoScaleR's Hadoop Map Reduce or Apache Spark compute contexts. - -In this quickstart, you learn how to run an R script with R console that demonstrates using Spark for distributed R computations. You will define a compute context to perform computations locally on an edge node, and again distributed across the nodes in the HDInsight cluster. - -## Prerequisites - -* An ML Services cluster on HDInsight. See [Create Apache Hadoop clusters using the Azure portal](../hdinsight-hadoop-create-linux-clusters-portal.md) and select **ML Services** for **Cluster type**. - -* An SSH client. For more information, see [Connect to HDInsight (Apache Hadoop) using SSH](../hdinsight-hadoop-linux-use-ssh-unix.md). - - -## Connect to R console - -1. Connect to the edge node of an ML Services HDInsight cluster using SSH. Edit the command below by replacing `CLUSTERNAME` with the name of your cluster, and then enter the command: - - ```cmd - ssh sshuser@CLUSTERNAME-ed-ssh.azurehdinsight.net - ``` - -1. From the SSH session, use the following command to start the R console: - - ``` - R - ``` - - You should see an output with the version of ML Server, in addition to other information. - - -## Use a compute context - -1. From the `>` prompt, you can enter R code. Use the following code to load example data into the default storage for HDInsight: - - ```R - # Set the HDFS (WASB) location of example data - bigDataDirRoot <- "/example/data" - - # create a local folder for storing data temporarily - source <- "/tmp/AirOnTimeCSV2012" - dir.create(source) - - # Download data to the tmp folder - remoteDir <- "https://packages.revolutionanalytics.com/datasets/AirOnTimeCSV2012" - download.file(file.path(remoteDir, "airOT201201.csv"), file.path(source, "airOT201201.csv")) - download.file(file.path(remoteDir, "airOT201202.csv"), file.path(source, "airOT201202.csv")) - download.file(file.path(remoteDir, "airOT201203.csv"), file.path(source, "airOT201203.csv")) - download.file(file.path(remoteDir, "airOT201204.csv"), file.path(source, "airOT201204.csv")) - download.file(file.path(remoteDir, "airOT201205.csv"), file.path(source, "airOT201205.csv")) - download.file(file.path(remoteDir, "airOT201206.csv"), file.path(source, "airOT201206.csv")) - download.file(file.path(remoteDir, "airOT201207.csv"), file.path(source, "airOT201207.csv")) - download.file(file.path(remoteDir, "airOT201208.csv"), file.path(source, "airOT201208.csv")) - download.file(file.path(remoteDir, "airOT201209.csv"), file.path(source, "airOT201209.csv")) - download.file(file.path(remoteDir, "airOT201210.csv"), file.path(source, "airOT201210.csv")) - download.file(file.path(remoteDir, "airOT201211.csv"), file.path(source, "airOT201211.csv")) - download.file(file.path(remoteDir, "airOT201212.csv"), file.path(source, "airOT201212.csv")) - - # Set directory in bigDataDirRoot to load the data into - inputDir <- file.path(bigDataDirRoot,"AirOnTimeCSV2012") - - # Make the directory - rxHadoopMakeDir(inputDir) - - # Copy the data from source to input - rxHadoopCopyFromLocal(source, bigDataDirRoot) - ``` - - This step may take around 10 minutes to complete. - -1. Create some data info and define two data sources. Enter the following code in the R console: - - ```R - # Define the HDFS (WASB) file system - hdfsFS <- RxHdfsFileSystem() - - # Create info list for the airline data - airlineColInfo <- list( - DAY_OF_WEEK = list(type = "factor"), - ORIGIN = list(type = "factor"), - DEST = list(type = "factor"), - DEP_TIME = list(type = "integer"), - ARR_DEL15 = list(type = "logical")) - - # get all the column names - varNames <- names(airlineColInfo) - - # Define the text data source in hdfs - airOnTimeData <- RxTextData(inputDir, colInfo = airlineColInfo, varsToKeep = varNames, fileSystem = hdfsFS) - - # Define the text data source in local system - airOnTimeDataLocal <- RxTextData(source, colInfo = airlineColInfo, varsToKeep = varNames) - - # formula to use - formula = "ARR_DEL15 ~ ORIGIN + DAY_OF_WEEK + DEP_TIME + DEST" - ``` - -1. Run a logistic regression over the data using the **local** compute context. Enter the following code in the R console: - - ```R - # Set a local compute context - rxSetComputeContext("local") - - # Run a logistic regression - system.time( - modelLocal <- rxLogit(formula, data = airOnTimeDataLocal) - ) - - # Display a summary - summary(modelLocal) - ``` - - The computations should complete in about 7 minutes. You should see output that ends with lines similar to the following snippet: - - ```output - Data: airOnTimeDataLocal (RxTextData Data Source) - File name: /tmp/AirOnTimeCSV2012 - Dependent variable(s): ARR_DEL15 - Total independent variables: 634 (Including number dropped: 3) - Number of valid observations: 6005381 - Number of missing observations: 91381 - -2*LogLikelihood: 5143814.1504 (Residual deviance on 6004750 degrees of freedom) - - Coefficients: - Estimate Std. Error z value Pr(>|z|) - (Intercept) -3.370e+00 1.051e+00 -3.208 0.00134 ** - ORIGIN=JFK 4.549e-01 7.915e-01 0.575 0.56548 - ORIGIN=LAX 5.265e-01 7.915e-01 0.665 0.50590 - ...... - DEST=SHD 5.975e-01 9.371e-01 0.638 0.52377 - DEST=TTN 4.563e-01 9.520e-01 0.479 0.63172 - DEST=LAR -1.270e+00 7.575e-01 -1.676 0.09364 . - DEST=BPT Dropped Dropped Dropped Dropped - - --- - - Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - - Condition number of final variance-covariance matrix: 11904202 - Number of iterations: 7 - ``` - -1. Run the same logistic regression using the **Spark** context. The Spark context distributes the processing over all the worker nodes in the HDInsight cluster. Enter the following code in the R console: - - ```R - # Define the Spark compute context - mySparkCluster <- RxSpark() - - # Set the compute context - rxSetComputeContext(mySparkCluster) - - # Run a logistic regression - system.time( - modelSpark <- rxLogit(formula, data = airOnTimeData) - ) - - # Display a summary - summary(modelSpark) - ``` - - The computations should complete in about 5 minutes. - -1. To quit the R console, use the following command: - - ```R - quit() - ``` - -## Clean up resources - -After you complete the quickstart, you may want to delete the cluster. With HDInsight, your data is stored in Azure Storage, so you can safely delete a cluster when it is not in use. You are also charged for an HDInsight cluster, even when it is not in use. Since the charges for the cluster are many times more than the charges for storage, it makes economic sense to delete clusters when they are not in use. - -To delete a cluster, see [Delete an HDInsight cluster using your browser, PowerShell, or the Azure CLI](../hdinsight-delete-cluster.md). - -## Next steps - -In this quickstart, you learned how to run an R script with R console that demonstrated using Spark for distributed R computations. Advance to the next article to learn the options that are available to specify whether and how execution is parallelized across cores of the edge node or HDInsight cluster. - -> [!div class="nextstepaction"] ->[Compute context options for ML Services on HDInsight](./r-server-compute-contexts.md) diff --git a/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rstudio.md b/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rstudio.md deleted file mode 100644 index 7cf9c1dab75cb..0000000000000 --- a/articles/hdinsight/r-server/machine-learning-services-quickstart-job-rstudio.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Quickstart: RStudio Server & ML Services for R - Azure HDInsight' -description: In the quickstart, you execute an R script on an ML Services cluster in Azure HDInsight using RStudio Server. -ms.service: hdinsight -ms.topic: quickstart -ms.date: 06/19/2019 -ROBOTS: NOINDEX -ms.custom: mode-other -#Customer intent: I want to learn how to execute an R script using ML Services in Azure HDInsight for RStudio Server. ---- - -# Quickstart: Execute an R script on an ML Services cluster in Azure HDInsight using RStudio Server - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -ML Services on Azure HDInsight allows R scripts to use Apache Spark and Apache Hadoop MapReduce to run distributed computations. ML Services controls how calls are executed by setting the compute context. The edge node of a cluster provides a convenient place to connect to the cluster and to run your R scripts. With an edge node, you have the option of running the parallelized distributed functions of RevoScaleR across the cores of the edge node server. You can also run them across the nodes of the cluster by using RevoScaleR's Hadoop Map Reduce or Apache Spark compute contexts. - -In this quickstart, you learn how to run an R script with RStudio Server that demonstrates using Spark for distributed R computations. You will define a compute context to perform computations locally on an edge node, and again distributed across the nodes in the HDInsight cluster. - -## Prerequisite - -An ML Services cluster on HDInsight. See [Create Apache Hadoop clusters using the Azure portal](../hdinsight-hadoop-create-linux-clusters-portal.md) and select **ML Services** for **Cluster type**. - -## Connect to RStudio Server - -RStudio Server runs on the cluster's edge node. Go to the following URL where `CLUSTERNAME` is the name of the ML Services cluster you created: - -``` -https://CLUSTERNAME.azurehdinsight.net/rstudio/ -``` - -The first time you sign in you need to authenticate twice. For the first authentication prompt, provide the cluster Admin login and password, default is `admin`. For the second authentication prompt, provide the SSH login and password, default is `sshuser`. Subsequent sign-ins only require the SSH credentials. - -Once you are connected, your screen should resemble the following screenshot: - -:::image type="content" source="./media/ml-services-quickstart-job-rstudio/connect-to-r-studio1.png" alt-text="R studio web console overviews" border="true"::: - -## Use a compute context - -1. From RStudio Server, use the following code to load example data into the default storage for HDInsight: - - ```RStudio - # Set the HDFS (WASB) location of example data - bigDataDirRoot <- "/example/data" - - # create a local folder for storing data temporarily - source <- "/tmp/AirOnTimeCSV2012" - dir.create(source) - - # Download data to the tmp folder - remoteDir <- "https://packages.revolutionanalytics.com/datasets/AirOnTimeCSV2012" - download.file(file.path(remoteDir, "airOT201201.csv"), file.path(source, "airOT201201.csv")) - download.file(file.path(remoteDir, "airOT201202.csv"), file.path(source, "airOT201202.csv")) - download.file(file.path(remoteDir, "airOT201203.csv"), file.path(source, "airOT201203.csv")) - download.file(file.path(remoteDir, "airOT201204.csv"), file.path(source, "airOT201204.csv")) - download.file(file.path(remoteDir, "airOT201205.csv"), file.path(source, "airOT201205.csv")) - download.file(file.path(remoteDir, "airOT201206.csv"), file.path(source, "airOT201206.csv")) - download.file(file.path(remoteDir, "airOT201207.csv"), file.path(source, "airOT201207.csv")) - download.file(file.path(remoteDir, "airOT201208.csv"), file.path(source, "airOT201208.csv")) - download.file(file.path(remoteDir, "airOT201209.csv"), file.path(source, "airOT201209.csv")) - download.file(file.path(remoteDir, "airOT201210.csv"), file.path(source, "airOT201210.csv")) - download.file(file.path(remoteDir, "airOT201211.csv"), file.path(source, "airOT201211.csv")) - download.file(file.path(remoteDir, "airOT201212.csv"), file.path(source, "airOT201212.csv")) - - # Set directory in bigDataDirRoot to load the data into - inputDir <- file.path(bigDataDirRoot,"AirOnTimeCSV2012") - - # Make the directory - rxHadoopMakeDir(inputDir) - - # Copy the data from source to input - rxHadoopCopyFromLocal(source, bigDataDirRoot) - ``` - - This step may take around 8 minutes to complete. - -1. Create some data info and define two data sources. Enter the following code in RStudio: - - ```RStudio - # Define the HDFS (WASB) file system - hdfsFS <- RxHdfsFileSystem() - - # Create info list for the airline data - airlineColInfo <- list( - DAY_OF_WEEK = list(type = "factor"), - ORIGIN = list(type = "factor"), - DEST = list(type = "factor"), - DEP_TIME = list(type = "integer"), - ARR_DEL15 = list(type = "logical")) - - # get all the column names - varNames <- names(airlineColInfo) - - # Define the text data source in hdfs - airOnTimeData <- RxTextData(inputDir, colInfo = airlineColInfo, varsToKeep = varNames, fileSystem = hdfsFS) - - # Define the text data source in local system - airOnTimeDataLocal <- RxTextData(source, colInfo = airlineColInfo, varsToKeep = varNames) - - # formula to use - formula = "ARR_DEL15 ~ ORIGIN + DAY_OF_WEEK + DEP_TIME + DEST" - ``` - -1. Run a logistic regression over the data using the **local** compute context. Enter the following code in RStudio: - - ```RStudio - # Set a local compute context - rxSetComputeContext("local") - - # Run a logistic regression - system.time( - modelLocal <- rxLogit(formula, data = airOnTimeDataLocal) - ) - - # Display a summary - summary(modelLocal) - ``` - - The computations should complete in about 7 minutes. You should see output that ends with lines similar to the following snippet: - - ```output - Data: airOnTimeDataLocal (RxTextData Data Source) - File name: /tmp/AirOnTimeCSV2012 - Dependent variable(s): ARR_DEL15 - Total independent variables: 634 (Including number dropped: 3) - Number of valid observations: 6005381 - Number of missing observations: 91381 - -2*LogLikelihood: 5143814.1504 (Residual deviance on 6004750 degrees of freedom) - - Coefficients: - Estimate Std. Error z value Pr(>|z|) - (Intercept) -3.370e+00 1.051e+00 -3.208 0.00134 ** - ORIGIN=JFK 4.549e-01 7.915e-01 0.575 0.56548 - ORIGIN=LAX 5.265e-01 7.915e-01 0.665 0.50590 - ...... - DEST=SHD 5.975e-01 9.371e-01 0.638 0.52377 - DEST=TTN 4.563e-01 9.520e-01 0.479 0.63172 - DEST=LAR -1.270e+00 7.575e-01 -1.676 0.09364 . - DEST=BPT Dropped Dropped Dropped Dropped - - --- - - Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - - Condition number of final variance-covariance matrix: 11904202 - Number of iterations: 7 - ``` - -1. Run the same logistic regression using the **Spark** context. The Spark context distributes the processing over all the worker nodes in the HDInsight cluster. Enter the following code in RStudio: - - ```RStudio - # Define the Spark compute context - mySparkCluster <- RxSpark() - - # Set the compute context - rxSetComputeContext(mySparkCluster) - - # Run a logistic regression - system.time( - modelSpark <- rxLogit(formula, data = airOnTimeData) - ) - - # Display a summary - summary(modelSpark) - ``` - - The computations should complete in about 5 minutes. - -## Clean up resources - -After you complete the quickstart, you may want to delete the cluster. With HDInsight, your data is stored in Azure Storage, so you can safely delete a cluster when it is not in use. You are also charged for an HDInsight cluster, even when it is not in use. Since the charges for the cluster are many times more than the charges for storage, it makes economic sense to delete clusters when they are not in use. - -To delete a cluster, see [Delete an HDInsight cluster using your browser, PowerShell, or the Azure CLI](../hdinsight-delete-cluster.md). - -## Next steps - -In this quickstart, you learned how to run an R script with RStudio Server that demonstrated using Spark for distributed R computations. Advance to the next article to learn the options that are available to specify whether and how execution is parallelized across cores of the edge node or HDInsight cluster. - -> [!div class="nextstepaction"] ->[Compute context options for ML Services on HDInsight](./r-server-compute-contexts.md) - -> [!NOTE] -> This page describes features of RStudio software. Microsoft Azure HDInsight is not affiliated with RStudio, Inc. diff --git a/articles/hdinsight/r-server/media/ml-services-quickstart-job-rstudio/connect-to-r-studio1.png b/articles/hdinsight/r-server/media/ml-services-quickstart-job-rstudio/connect-to-r-studio1.png deleted file mode 100644 index 539edbf9c0b24..0000000000000 Binary files a/articles/hdinsight/r-server/media/ml-services-quickstart-job-rstudio/connect-to-r-studio1.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/ml-services-tutorial-spark-compute/hdinsight-rstudio-image.png b/articles/hdinsight/r-server/media/ml-services-tutorial-spark-compute/hdinsight-rstudio-image.png deleted file mode 100644 index 8236532ae702b..0000000000000 Binary files a/articles/hdinsight/r-server/media/ml-services-tutorial-spark-compute/hdinsight-rstudio-image.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/quickstart-resource-manager-template/azure-portal-delete-rserver.png b/articles/hdinsight/r-server/media/quickstart-resource-manager-template/azure-portal-delete-rserver.png deleted file mode 100644 index 1e5f98a4488f3..0000000000000 Binary files a/articles/hdinsight/r-server/media/quickstart-resource-manager-template/azure-portal-delete-rserver.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/quickstart-resource-manager-template/deploy-to-azure.png b/articles/hdinsight/r-server/media/quickstart-resource-manager-template/deploy-to-azure.png deleted file mode 100644 index e81f2c1c57331..0000000000000 Binary files a/articles/hdinsight/r-server/media/quickstart-resource-manager-template/deploy-to-azure.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/quickstart-resource-manager-template/resource-manager-template-rserver.png b/articles/hdinsight/r-server/media/quickstart-resource-manager-template/resource-manager-template-rserver.png deleted file mode 100644 index 11f7d5000c16e..0000000000000 Binary files a/articles/hdinsight/r-server/media/quickstart-resource-manager-template/resource-manager-template-rserver.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-hdinsight-manage/hdi-concurrent-users1.png b/articles/hdinsight/r-server/media/r-server-hdinsight-manage/hdi-concurrent-users1.png deleted file mode 100644 index 1b6a0b006f9dc..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-hdinsight-manage/hdi-concurrent-users1.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-hdinsight-manage/hdi-concurrent-users2.png b/articles/hdinsight/r-server/media/r-server-hdinsight-manage/hdi-concurrent-users2.png deleted file mode 100644 index a1ea2ae6f03a8..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-hdinsight-manage/hdi-concurrent-users2.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-hdinsight-manage/submit-script-action.png b/articles/hdinsight/r-server/media/r-server-hdinsight-manage/submit-script-action.png deleted file mode 100644 index 2f1926d256e26..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-hdinsight-manage/submit-script-action.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-1.png b/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-1.png deleted file mode 100644 index 9bc321119ba7f..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-1.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-2.png b/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-2.png deleted file mode 100644 index ea937c8a4dd8d..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-2.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-3.png b/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-3.png deleted file mode 100644 index a553b2b41d707..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/admin-util-one-box-3.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/get-started-operationalization.png b/articles/hdinsight/r-server/media/r-server-operationalize/get-started-operationalization.png deleted file mode 100644 index 3862354ea68a0..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/get-started-operationalization.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic1.png b/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic1.png deleted file mode 100644 index c54815bee40d6..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic1.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic2.png b/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic2.png deleted file mode 100644 index 923b3e0f8ce77..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic2.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic3.png b/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic3.png deleted file mode 100644 index f6ecf907d1256..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-operationalize/hdinsight-diagnostic3.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/apache-spark-context.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/apache-spark-context.png deleted file mode 100644 index 15421b088aacb..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/apache-spark-context.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/data-science-settings.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/data-science-settings.png deleted file mode 100644 index e977141d12093..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/data-science-settings.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/execute-interactive1.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/execute-interactive1.png deleted file mode 100644 index 3001193bd3f41..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/execute-interactive1.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdi-storage-containers.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdi-storage-containers.png deleted file mode 100644 index 51276a1807c9b..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdi-storage-containers.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdinsight-copied-file.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdinsight-copied-file.png deleted file mode 100644 index b10233ef87b89..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdinsight-copied-file.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdinsight-storage-accounts.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdinsight-storage-accounts.png deleted file mode 100644 index e4f7fc55510ce..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/hdinsight-storage-accounts.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/install-r-tools-for-vs.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/install-r-tools-for-vs.png deleted file mode 100644 index e6789464856be..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/install-r-tools-for-vs.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/successful-rx-commands.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/successful-rx-commands.png deleted file mode 100644 index 2f496caf4f263..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/successful-rx-commands.png and /dev/null differ diff --git a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/visual-studio-workspace.png b/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/visual-studio-workspace.png deleted file mode 100644 index 6d27aaac643a2..0000000000000 Binary files a/articles/hdinsight/r-server/media/r-server-submit-jobs-r-tools-vs/visual-studio-workspace.png and /dev/null differ diff --git a/articles/hdinsight/r-server/ml-services-tutorial-spark-compute.md b/articles/hdinsight/r-server/ml-services-tutorial-spark-compute.md deleted file mode 100644 index d804ca8510f69..0000000000000 --- a/articles/hdinsight/r-server/ml-services-tutorial-spark-compute.md +++ /dev/null @@ -1,364 +0,0 @@ ---- -title: 'Tutorial: Use R in a Spark compute context in Azure HDInsight' -description: Tutorial - Get started with R and Spark on an Azure HDInsight Machine Learning services cluster. -ms.service: hdinsight -ms.topic: tutorial -ms.date: 06/21/2019 -ROBOTS: NOINDEX -#Customer intent: As a developer, I need to understand the Spark compute context for Machine Learning services. ---- - -# Tutorial: Use R in a Spark compute context in Azure HDInsight - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -This tutorial provides a step-by-step introduction to using the R functions in Apache Spark that run on an Azure HDInsight Machine Learning services cluster. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> * Download the sample data to local storage -> * Copy the data to default storage -> * Set up a dataset -> * Create data sources -> * Create a compute context for Spark -> * Fit a linear model -> * Use composite XDF files -> * Convert XDF to CSV - -## Prerequisites - -* An Azure HDInsight Machine Learning services cluster. Go to [Create Apache Hadoop clusters by using the Azure portal](../hdinsight-hadoop-create-linux-clusters-portal.md) and, for **Cluster type**, select **ML Services**. - -## Connect to RStudio Server - -RStudio Server runs on the cluster's edge node. Go to the following site (where *CLUSTERNAME* in the URL is the name of the HDInsight Machine Learning services cluster you created): - -``` -https://CLUSTERNAME.azurehdinsight.net/rstudio/ -``` - -The first time you sign in, you authenticate twice. At the first authentication prompt, provide the cluster admin username and password (the default is *admin*). At the second authentication prompt, provide the SSH username and password (the default is *sshuser*). Subsequent sign-ins require only the SSH credentials. - -## Download the sample data to local storage - -The *Airline 2012 On-Time Data Set* consists of 12 comma-separated files that contain flight arrival and departure details for all commercial flights within the US for the year 2012. This dataset is large, with over 6 million observations. - -1. Initialize a few environment variables. In the RStudio Server console, enter the following code: - - ```R - bigDataDirRoot <- "/tutorial/data" # root directory on cluster default storage - localDir <- "/tmp/AirOnTimeCSV2012" # directory on edge node - remoteDir <- "https://packages.revolutionanalytics.com/datasets/AirOnTimeCSV2012" # location of data - ``` - -1. In the right pane, select the **Environment** tab. The variables are displayed under **Values**. - - :::image type="content" source="./media/ml-services-tutorial-spark-compute/hdinsight-rstudio-image.png" alt-text="HDInsight R studio web console" border="true"::: - -1. Create a local directory, and download the sample data. In RStudio, enter the following code: - - ```R - # Create local directory - dir.create(localDir) - - # Download data to the tmp folder(local) - download.file(file.path(remoteDir, "airOT201201.csv"), file.path(localDir, "airOT201201.csv")) - download.file(file.path(remoteDir, "airOT201202.csv"), file.path(localDir, "airOT201202.csv")) - download.file(file.path(remoteDir, "airOT201203.csv"), file.path(localDir, "airOT201203.csv")) - download.file(file.path(remoteDir, "airOT201204.csv"), file.path(localDir, "airOT201204.csv")) - download.file(file.path(remoteDir, "airOT201205.csv"), file.path(localDir, "airOT201205.csv")) - download.file(file.path(remoteDir, "airOT201206.csv"), file.path(localDir, "airOT201206.csv")) - download.file(file.path(remoteDir, "airOT201207.csv"), file.path(localDir, "airOT201207.csv")) - download.file(file.path(remoteDir, "airOT201208.csv"), file.path(localDir, "airOT201208.csv")) - download.file(file.path(remoteDir, "airOT201209.csv"), file.path(localDir, "airOT201209.csv")) - download.file(file.path(remoteDir, "airOT201210.csv"), file.path(localDir, "airOT201210.csv")) - download.file(file.path(remoteDir, "airOT201211.csv"), file.path(localDir, "airOT201211.csv")) - download.file(file.path(remoteDir, "airOT201212.csv"), file.path(localDir, "airOT201212.csv")) - ``` - - The download should be complete in about 9.5 minutes. - -## Copy the data to default storage - -The Hadoop Distributed File System (HDFS) location is specified with the `airDataDir` variable. In RStudio, enter the following code: - -```R -# Set directory in bigDataDirRoot to load the data into -airDataDir <- file.path(bigDataDirRoot,"AirOnTimeCSV2012") - -# Create directory (default storage) -rxHadoopMakeDir(airDataDir) - -# Copy data from local storage to default storage -rxHadoopCopyFromLocal(localDir, bigDataDirRoot) - -# Optional. Verify files -rxHadoopListFiles(airDataDir) -``` - -The step should be complete in about 10 seconds. - -## Set up a dataset - -1. Create a file system object that uses the default values. In RStudio, enter the following code: - - ```R - # Define the HDFS (WASB) file system - hdfsFS <- RxHdfsFileSystem() - ``` - -1. Because the original CSV files have rather unwieldy variable names, you supply a *colInfo* list to make them more manageable. In RStudio, enter the following code: - - ```R - airlineColInfo <- list( - MONTH = list(newName = "Month", type = "integer"), - DAY_OF_WEEK = list(newName = "DayOfWeek", type = "factor", - levels = as.character(1:7), - newLevels = c("Mon", "Tues", "Wed", "Thur", "Fri", "Sat", - "Sun")), - UNIQUE_CARRIER = list(newName = "UniqueCarrier", type = - "factor"), - ORIGIN = list(newName = "Origin", type = "factor"), - DEST = list(newName = "Dest", type = "factor"), - CRS_DEP_TIME = list(newName = "CRSDepTime", type = "integer"), - DEP_TIME = list(newName = "DepTime", type = "integer"), - DEP_DELAY = list(newName = "DepDelay", type = "integer"), - DEP_DELAY_NEW = list(newName = "DepDelayMinutes", type = - "integer"), - DEP_DEL15 = list(newName = "DepDel15", type = "logical"), - DEP_DELAY_GROUP = list(newName = "DepDelayGroups", type = - "factor", - levels = as.character(-2:12), - newLevels = c("< -15", "-15 to -1","0 to 14", "15 to 29", - "30 to 44", "45 to 59", "60 to 74", - "75 to 89", "90 to 104", "105 to 119", - "120 to 134", "135 to 149", "150 to 164", - "165 to 179", ">= 180")), - ARR_DELAY = list(newName = "ArrDelay", type = "integer"), - ARR_DELAY_NEW = list(newName = "ArrDelayMinutes", type = - "integer"), - ARR_DEL15 = list(newName = "ArrDel15", type = "logical"), - AIR_TIME = list(newName = "AirTime", type = "integer"), - DISTANCE = list(newName = "Distance", type = "integer"), - DISTANCE_GROUP = list(newName = "DistanceGroup", type = - "factor", - levels = as.character(1:11), - newLevels = c("< 250", "250-499", "500-749", "750-999", - "1000-1249", "1250-1499", "1500-1749", "1750-1999", - "2000-2249", "2250-2499", ">= 2500"))) - - varNames <- names(airlineColInfo) - ``` - -## Create data sources - -In a Spark compute context, you can create data sources by using the following functions: - -|Function | Description | -|---------|-------------| -|`RxTextData` | A comma-delimited text data source. | -|`RxXdfData` | Data in the XDF data file format. In RevoScaleR, the XDF file format is modified for Hadoop to store data in a composite set of files rather than a single file. | -|`RxHiveData` | Generates a Hive Data Source object.| -|`RxParquetData` | Generates a Parquet Data Source object.| -|`RxOrcData` | Generates an Orc Data Source object.| - -Create an [RxTextData](/machine-learning-server/r-reference/revoscaler/rxtextdata) object by using the files you copied to HDFS. In RStudio, enter the following code: - -```R -airDS <- RxTextData( airDataDir, - colInfo = airlineColInfo, - varsToKeep = varNames, - fileSystem = hdfsFS ) -``` - -## Create a compute context for Spark - -To load data and run analyses on worker nodes, you set the compute context in your script to [RxSpark](/machine-learning-server/r-reference/revoscaler/rxspark). In this context, R functions automatically distribute the workload across all the worker nodes, with no built-in requirement for managing jobs or the queue. The Spark compute context is established through `RxSpark` or `rxSparkConnect()` to create the Spark compute context, and it uses `rxSparkDisconnect()` to return to a local compute context. In RStudio, enter the following code: - -```R -# Define the Spark compute context -mySparkCluster <- RxSpark() - -# Set the compute context -rxSetComputeContext(mySparkCluster) -``` - -## Fit a linear model - -1. Use the [rxLinMod](/machine-learning-server/r-reference/revoscaler/rxlinmod) function to fit a linear model using your `airDS` data source. In RStudio, enter the following code: - - ```R - system.time( - delayArr <- rxLinMod(ArrDelay ~ DayOfWeek, data = airDS, - cube = TRUE) - ) - ``` - - This step should be complete in 2 to 3 minutes. - -1. View the results. In RStudio, enter the following code: - - ```R - summary(delayArr) - ``` - - You should see the following results: - - ```output - Call: - rxLinMod(formula = ArrDelay ~ DayOfWeek, data = airDS, cube = TRUE) - - Cube Linear Regression Results for: ArrDelay ~ DayOfWeek - Data: airDataXdf (RxXdfData Data Source) - File name: /tutorial/data/AirOnTimeCSV2012 - Dependent variable(s): ArrDelay - Total independent variables: 7 - Number of valid observations: 6005381 - Number of missing observations: 91381 - - Coefficients: - Estimate Std. Error t value Pr(>|t|) | Counts - DayOfWeek=Mon 3.54210 0.03736 94.80 2.22e-16 *** | 901592 - DayOfWeek=Tues 1.80696 0.03835 47.12 2.22e-16 *** | 855805 - DayOfWeek=Wed 2.19424 0.03807 57.64 2.22e-16 *** | 868505 - DayOfWeek=Thur 4.65502 0.03757 123.90 2.22e-16 *** | 891674 - DayOfWeek=Fri 5.64402 0.03747 150.62 2.22e-16 *** | 896495 - DayOfWeek=Sat 0.91008 0.04144 21.96 2.22e-16 *** | 732944 - DayOfWeek=Sun 2.82780 0.03829 73.84 2.22e-16 *** | 858366 - --- - Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - - Residual standard error: 35.48 on 6005374 degrees of freedom - Multiple R-squared: 0.001827 (as if intercept included) - Adjusted R-squared: 0.001826 - F-statistic: 1832 on 6 and 6005374 DF, p-value: < 2.2e-16 - Condition number: 1 - ``` - - The results indicate that you've processed all the data, 6 million observations, using all the CSV files in the specified directory. Because you specified `cube = TRUE`, you have an estimated coefficient for each day of the week (and not the intercept). - -## Use composite XDF files - -As you've seen, you can analyze CSV files directly with R on Hadoop. But you can do the analysis more quickly if you store the data in a more efficient format. The R XDF file format is efficient, but it's modified somewhat for HDFS so that individual files remain within a single HDFS block. (The HDFS block size varies from installation to installation but is typically either 64 MB or 128 MB.) - -When you use [rxImport](/machine-learning-server/r-reference/revoscaler/rximport) on Hadoop to create a set of composite XDF files, you specify an `RxTextData` data source such as `AirDS` as the inData and an `RxXdfData` data source with fileSystem set to an HDFS file system as the outFile argument. You can then use the `RxXdfData` object as the data argument in subsequent R analyses. - -1. Define an `RxXdfData` object. In RStudio, enter the following code: - - ```R - airDataXdfDir <- file.path(bigDataDirRoot,"AirOnTimeXDF2012") - - airDataXdf <- RxXdfData( airDataXdfDir, - fileSystem = hdfsFS ) - ``` - -1. Set a block size of 250000 rows and specify that we read all the data. In RStudio, enter the following code: - - ```R - blockSize <- 250000 - numRowsToRead = -1 - ``` - -1. Import the data using `rxImport`. In RStudio, enter the following code: - - ```R - rxImport(inData = airDS, - outFile = airDataXdf, - rowsPerRead = blockSize, - overwrite = TRUE, - numRows = numRowsToRead ) - ``` - - This step should be complete in a few minutes. - -1. Re-estimate the same linear model, using the new, faster data source. In RStudio, enter the following code: - - ```R - system.time( - delayArr <- rxLinMod(ArrDelay ~ DayOfWeek, data = airDataXdf, - cube = TRUE) - ) - ``` - - The step should be complete in less than a minute. - -1. View the results. The results should be the same as from the CSV files. In RStudio, enter the following code: - - ```R - summary(delayArr) - ``` - -## Convert XDF to CSV - -### In a Spark context - -If you converted your CSV files to XDF file format for greater efficiency while running the analyses, but now want to convert your data back to CSV, you can do so by using [rxDataStep](/machine-learning-server/r-reference/revoscaler/rxdatastep). - -To create a folder of CSV files, first create an `RxTextData` object by using a directory name as the file argument. This object represents the folder in which to create the CSV files. This directory is created when you run the `rxDataStep`. Then, point to this `RxTextData` object in the `outFile` argument of the `rxDataStep`. Each CSV that's created is named based on the directory name and followed by a number. - -Suppose that you want to write out a folder of CSV files in HDFS from your `airDataXdf` composite XDF after you perform the logistic regression and prediction, so that the new CSV files contain the predicted values and residuals. In RStudio, enter the following code: - -```R -airDataCsvDir <- file.path(bigDataDirRoot,"AirDataCSV2012") -airDataCsvDS <- RxTextData(airDataCsvDir,fileSystem=hdfsFS) -rxDataStep(inData=airDataXdf, outFile=airDataCsvDS) -``` - -This step should be complete in about 2.5 minutes. - -The `rxDataStep` wrote out one CSV file for every XDFD file in the input composite XDF file. This is the default behavior for writing CSV files from composite XDF files to HDFS when the compute context is set to `RxSpark`. - -### In a local context - -Alternatively, when you're done performing your analyses, you could switch your compute context back to `local` to take advantage of two arguments within `RxTextData` that give you slightly more control when you write out CSV files to HDFS: `createFileSet` and `rowsPerOutFile`. When you set `createFileSet` to `TRUE`, a folder of CSV files is written to the directory that you specify. When you set `createFileSet` to `FALSE`, a single CSV file is written. You can set the second argument, `rowsPerOutFile`, to an integer to indicate how many rows to write to each CSV file when `createFileSet` is `TRUE`. - -In RStudio, enter the following code: - -```R -rxSetComputeContext("local") -airDataCsvRowsDir <- file.path(bigDataDirRoot,"AirDataCSVRows2012") -airDataCsvRowsDS <- RxTextData(airDataCsvRowsDir, fileSystem=hdfsFS, createFileSet=TRUE, rowsPerOutFile=1000000) -rxDataStep(inData=airDataXdf, outFile=airDataCsvRowsDS) -``` - -This step should be complete in about 10 minutes. - -When you use an `RxSpark` compute context, `createFileSet` defaults to `TRUE` and `rowsPerOutFile` has no effect. Therefore, if you want to create a single CSV or customize the number of rows per file, perform `rxDataStep` in a `local` compute context (the data can still be in HDFS). - -## Final steps - -1. Clean up the data. In RStudio, enter the following code: - - ```R - rxHadoopRemoveDir(airDataDir) - rxHadoopRemoveDir(airDataXdfDir) - rxHadoopRemoveDir(airDataCsvDir) - rxHadoopRemoveDir(airDataCsvRowsDir) - rxHadoopRemoveDir(bigDataDirRoot) - ``` - -1. Stop the remote Spark application. In RStudio, enter the following code: - - ```R - rxStopEngine(mySparkCluster) - ``` - -1. Quit the R session. In RStudio, enter the following code: - - ```R - quit() - ``` - -## Clean up resources - -After you complete the tutorial, you might want to delete the cluster. With HDInsight, your data is stored in Azure Storage, so you can safely delete a cluster when it's not in use. You're also charged for an HDInsight cluster, even when it's not in use. Because the charges for the cluster are many times more than the charges for storage, it makes economic sense to delete clusters when they're not in use. - -To delete a cluster, see [Delete an HDInsight cluster by using your browser, PowerShell, or the Azure CLI](../hdinsight-delete-cluster.md). - -## Next steps - -In this tutorial, you learned how to use R functions in Apache Spark that are running on an HDInsight Machine Learning services cluster. For more information, see the following articles: - -* [Compute context options for an Azure HDInsight Machine Learning services cluster](r-server-compute-contexts.md) -* [R Functions for Spark on Hadoop](/machine-learning-server/r-reference/revoscaler/revoscaler-hadoop-functions) \ No newline at end of file diff --git a/articles/hdinsight/r-server/quickstart-resource-manager-template.md b/articles/hdinsight/r-server/quickstart-resource-manager-template.md deleted file mode 100644 index da0b1fbcf2833..0000000000000 --- a/articles/hdinsight/r-server/quickstart-resource-manager-template.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: 'Quickstart: Create ML Services cluster using template - Azure HDInsight' -description: This quickstart shows how to use Resource Manager template to create an ML Services cluster in Azure HDInsight. -ms.service: hdinsight -ms.topic: quickstart -ms.custom: subject-armqs, mode-arm -ms.date: 03/13/2020 -ROBOTS: NOINDEX -#Customer intent: As a developer new to ML Services on Azure, I need to see how to create an ML Services cluster. ---- - -# Quickstart: Create ML Services cluster in Azure HDInsight using ARM template - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -In this quickstart, you use an Azure Resource Manager template (ARM template) to create an [ML Services](./r-server-overview.md) cluster in Azure HDInsight. Microsoft Machine Learning Server is available as a deployment option when you create HDInsight clusters in Azure. The cluster type that provides this option is called ML Services. This capability provides data scientists, statisticians, and R programmers with on-demand access to scalable, distributed methods of analytics on HDInsight. - -[!INCLUDE [About Azure Resource Manager](../../../includes/resource-manager-quickstart-introduction.md)] - -If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. - -[:::image type="icon" source="../../media/template-deployments/deploy-to-azure.svg" alt-text="Deploy to Azure":::](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.hdinsight%2Fhdinsight-rserver%2Fazuredeploy.json) - -## Prerequisites - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - -## Review the template - -The template used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/hdinsight-rserver/). - -:::code language="json" source="~/quickstart-templates/quickstarts/microsoft.hdinsight/hdinsight-rserver/azuredeploy.json"::: - -Two Azure resources are defined in the template: - -* [Microsoft.Storage/storageAccounts](/azure/templates/microsoft.storage/storageaccounts): create an Azure Storage Account. -* [Microsoft.HDInsight/cluster](/azure/templates/microsoft.hdinsight/clusters): create an HDInsight cluster. - -## Deploy the template - -1. Select the **Deploy to Azure** button below to sign in to Azure and open the ARM template. - - [:::image type="icon" source="../../media/template-deployments/deploy-to-azure.svg" alt-text="Deploy to Azure":::](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.hdinsight%2Fhdinsight-rserver%2Fazuredeploy.json) - -1. Enter or select the following values: - - |Property |Description | - |---|---| - |Subscription|From the drop-down list, select the Azure subscription that's used for the cluster.| - |Resource group|From the drop-down list, select your existing resource group, or select **Create new**.| - |Location|The value will autopopulate with the location used for the resource group.| - |Cluster Name|Enter a globally unique name. For this template, use only lowercase letters, and numbers.| - |Cluster Login User Name|Provide the username, default is **admin**.| - |Cluster Login Password|Provide a password. The password must be at least 10 characters in length and must contain at least one digit, one uppercase, and one lower case letter, one non-alphanumeric character (except characters ' " ` ). | - |Ssh User Name|Provide the username, default is sshuser| - |Ssh Password|Provide the password.| - - :::image type="content" source="./media/quickstart-resource-manager-template/resource-manager-template-rserver.png" alt-text="Deploy Resource Manager template HBase" border="true"::: - -1. Review the **TERMS AND CONDITIONS**. Then select **I agree to the terms and conditions stated above**, then **Purchase**. You'll receive a notification that your deployment is in progress. It takes about 20 minutes to create a cluster. - -## Review deployed resources - -Once the cluster is created, you'll receive a **Deployment succeeded** notification with a **Go to resource** link. Your Resource group page will list your new HDInsight cluster and the default storage associated with the cluster. Each cluster has an [Azure Blob Storage](../hdinsight-hadoop-use-blob-storage.md) account, an [Azure Data Lake Storage Gen1](../hdinsight-hadoop-use-data-lake-storage-gen1.md), or an [`Azure Data Lake Storage Gen2`](../hdinsight-hadoop-use-data-lake-storage-gen2.md) dependency. It's referred as the default storage account. The HDInsight cluster and its default storage account must be colocated in the same Azure region. Deleting clusters doesn't delete the storage account. - -## Clean up resources - -After you complete the quickstart, you may want to delete the cluster. With HDInsight, your data is stored in Azure Storage, so you can safely delete a cluster when it isn't in use. You're also charged for an HDInsight cluster, even when it isn't in use. Since the charges for the cluster are many times more than the charges for storage, it makes economic sense to delete clusters when they aren't in use. - -From the Azure portal, navigate to your cluster, and select **Delete**. - -[Delete Resource Manager template HBase](./media/quickstart-resource-manager-template/azure-portal-delete-rserver.png) - -You can also select the resource group name to open the resource group page, and then select **Delete resource group**. By deleting the resource group, you delete both the HDInsight cluster, and the default storage account. - -## Next steps - -In this quickstart, you learned how to create an ML Services cluster in HDInsight using an ARM template. In the next article, you learn how to run an R script with RStudio Server that demonstrates using Spark for distributed R computations.. - -> [!div class="nextstepaction"] -> [Execute an R script on an ML Services cluster in Azure HDInsight using RStudio Server](./machine-learning-services-quickstart-job-rstudio.md) diff --git a/articles/hdinsight/r-server/r-server-compute-contexts.md b/articles/hdinsight/r-server/r-server-compute-contexts.md deleted file mode 100644 index 2a9430aa51964..0000000000000 --- a/articles/hdinsight/r-server/r-server-compute-contexts.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Compute context options for ML Services on HDInsight - Azure -description: Learn about the different compute context options available to users with ML Services on HDInsight -ms.service: hdinsight -ms.topic: how-to -ms.date: 01/02/2020 -ROBOTS: NOINDEX ---- - -# Compute context options for ML Services on HDInsight - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -ML Services on Azure HDInsight controls how calls are executed by setting the compute context. This article outlines the options that are available to specify whether and how execution is parallelized across cores of the edge node or HDInsight cluster. - -The edge node of a cluster provides a convenient place to connect to the cluster and to run your R scripts. With an edge node, you have the option of running the parallelized distributed functions of RevoScaleR across the cores of the edge node server. You can also run them across the nodes of the cluster by using RevoScaleR's Hadoop Map Reduce or Apache Spark compute contexts. - -## ML Services on Azure HDInsight - -[ML Services on Azure HDInsight](r-server-overview.md) provides the latest capabilities for R-based analytics. It can use data that is stored in an Apache Hadoop HDFS container in your [Azure Blob](../../storage/common/storage-introduction.md "Azure Blob storage") storage account, a Data Lake Store, or the local Linux file system. Since ML Services is built on open-source R, the R-based applications you build can apply any of the 8000+ open-source R packages. They can also use the routines in [RevoScaleR](/machine-learning-server/r-reference/revoscaler/revoscaler), Microsoft's big data analytics package that is included with ML Services. - -## Compute contexts for an edge node - -In general, an R script that's run in ML Services cluster on the edge node runs within the R interpreter on that node. The exceptions are those steps that call a RevoScaleR function. The RevoScaleR calls run in a compute environment that is determined by how you set the RevoScaleR compute context. When you run your R script from an edge node, the possible values of the compute context are: - -- local sequential (*local*) -- local parallel (*localpar*) -- Map Reduce -- Spark - -The *local* and *localpar* options differ only in how **rxExec** calls are executed. They both execute other rx-function calls in a parallel manner across all available cores unless specified otherwise through use of the RevoScaleR **numCoresToUse** option, for example `rxOptions(numCoresToUse=6)`. Parallel execution options offer optimal performance. - -The following table summarizes the various compute context options to set how calls are executed: - -| Compute context | How to set | Execution context | -| ---------------- | ------------------------------- | ---------------------------------------- | -| Local sequential | rxSetComputeContext('local') | Parallelized execution across the cores of the edge node server, except for rxExec calls, which are executed serially | -| Local parallel | rxSetComputeContext('localpar') | Parallelized execution across the cores of the edge node server | -| Spark | RxSpark() | Parallelized distributed execution via Spark across the nodes of the HDI cluster | -| Map Reduce | RxHadoopMR() | Parallelized distributed execution via Map Reduce across the nodes of the HDI cluster | - -## Guidelines for deciding on a compute context - -Which of the three options you choose that provide parallelized execution depends on the nature of your analytics work, the size, and the location of your data. There's no simple formula that tells you, which compute context to use. There are, however, some guiding principles that can help you make the right choice, or, at least, help you narrow down your choices before you run a benchmark. These guiding principles include: - -- The local Linux file system is faster than HDFS. -- Repeated analyses are faster if the data is local, and if it's in XDF. -- It's preferable to stream small amounts of data from a text data source. If the amount of data is larger, convert it to XDF before analysis. -- The overhead of copying or streaming the data to the edge node for analysis becomes unmanageable for very large amounts of data. -- ApacheSpark is faster than Map Reduce for analysis in Hadoop. - -Given these principles, the following sections offer some general rules of thumb for selecting a compute context. - -### Local - -- If the amount of data to analyze is small and doesn't require repeated analysis, then stream it directly into the analysis routine using *local* or *localpar*. -- If the amount of data to analyze is small or medium-sized and requires repeated analysis, then copy it to the local file system, import it to XDF, and analyze it via *local* or *localpar*. - -### Apache Spark - -- If the amount of data to analyze is large, then import it to a Spark DataFrame using **RxHiveData** or **RxParquetData**, or to XDF in HDFS (unless storage is an issue), and analyze it using the Spark compute context. - -### Apache Hadoop Map Reduce - -- Use the Map Reduce compute context only if you come across an insurmountable problem with the Spark compute context since it's generally slower. - -## Inline help on rxSetComputeContext -For more information and examples of RevoScaleR compute contexts, see the inline help in R on the rxSetComputeContext method, for example: - -```console -> ?rxSetComputeContext -``` - -You can also refer to the [Distributed computing overview](/machine-learning-server/r/how-to-revoscaler-distributed-computing) in [Machine Learning Server documentation](/machine-learning-server/). - -## Next steps - -In this article, you learned about the options that are available to specify whether and how execution is parallelized across cores of the edge node or HDInsight cluster. To learn more about how to use ML Services with HDInsight clusters, see the following topics: - -- [Overview of ML Services for Apache Hadoop](r-server-overview.md) -- [Azure Storage options for ML Services on HDInsight](r-server-storage.md) \ No newline at end of file diff --git a/articles/hdinsight/r-server/r-server-hdinsight-manage.md b/articles/hdinsight/r-server/r-server-hdinsight-manage.md deleted file mode 100644 index 20cbde6906cb5..0000000000000 --- a/articles/hdinsight/r-server/r-server-hdinsight-manage.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Manage ML Services cluster on HDInsight - Azure -description: Learn how to manage various tasks on ML Services cluster in Azure HDInsight. -ms.service: hdinsight -ms.topic: how-to -ms.date: 06/19/2019 -ROBOTS: NOINDEX ---- - -# Manage ML Services cluster on Azure HDInsight - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -In this article, you learn how to manage an existing ML Services cluster on Azure HDInsight to perform tasks like adding multiple concurrent users, connecting remotely to an ML Services cluster, changing compute context, etc. - -## Prerequisites - -* An ML Services cluster on HDInsight. See [Create Apache Hadoop clusters using the Azure portal](../hdinsight-hadoop-create-linux-clusters-portal.md) and select **ML Services** for **Cluster type**. - -* A Secure Shell (SSH) client: An SSH client is used to remotely connect to the HDInsight cluster and run commands directly on the cluster. For more information, see [Use SSH with HDInsight.](../hdinsight-hadoop-linux-use-ssh-unix.md). - -## Enable multiple concurrent users - -You can enable multiple concurrent users for ML Services cluster on HDInsight by adding more users for the edge node on which the RStudio community version runs. When you create an HDInsight cluster, you must provide two users, an HTTP user and an SSH user: - -:::image type="content" source="./media/r-server-hdinsight-manage/hdi-concurrent-users1.png" alt-text="HDI Azure portal login parameters" border="true"::: - -- **Cluster login username**: an HTTP user for authentication through the HDInsight gateway that is used to protect the HDInsight clusters you created. This HTTP user is used to access the Apache Ambari UI, Apache Hadoop YARN UI, as well as other UI components. -- **Secure Shell (SSH) username**: an SSH user to access the cluster through secure shell. This user is a user in the Linux system for all the head nodes, worker nodes, and edge nodes. So you can use secure shell to access any of the nodes in a remote cluster. - -The R Studio Server Community version used in the ML Services cluster on HDInsight accepts only Linux username and password as a sign in mechanism. It does not support passing tokens. So, when you try to access R Studio for the first time on an ML Services cluster, you need to sign in twice. - -- First sign in using the HTTP user credentials through the HDInsight Gateway. - -- Then use the SSH user credentials to sign in to RStudio. - -Currently, only one SSH user account can be created when provisioning an HDInsight cluster. So to enable multiple users to access ML Services cluster on HDInsight, you must create additional users in the Linux system. - -Because RStudio runs on the cluster's edge node, there are several steps here: - -1. Use the existing SSH user to sign in to the edge node -2. Add more Linux users in edge node -3. Use RStudio Community version with the user created - -### Step 1: Use the created SSH user to sign in to the edge node - -Follow the instructions at [Connect to HDInsight (Apache Hadoop) using SSH](../hdinsight-hadoop-linux-use-ssh-unix.md) to access the edge node. The edge node address for ML Services cluster on HDInsight is `CLUSTERNAME-ed-ssh.azurehdinsight.net`. - -### Step 2: Add more Linux users in edge node - -To add a user to the edge node, execute the commands: - -```bash -# Add a user -sudo useradd -m - -# Set password for the new user -sudo passwd -``` - -The following screenshot shows the outputs. - -:::image type="content" source="./media/r-server-hdinsight-manage/hdi-concurrent-users2.png" alt-text="screenshot output concurrent users" border="true"::: - -When prompted for "Current Kerberos password:", just press **Enter** to ignore it. The `-m` option in `useradd` command indicates that the system will create a home folder for the user, which is required for RStudio Community version. - -### Step 3: Use RStudio Community version with the user created - -Access RStudio from `https://CLUSTERNAME.azurehdinsight.net/rstudio/`. If you are logging in for the first time after creating the cluster, enter the cluster admin credentials followed by the SSH user credentials you created. If this is not your first login, only enter the credentials for the SSH user you created. - -You can also sign in using the original credentials (by default, it is *sshuser*) concurrently from another browser window. - -Note also that the newly added users do not have root privileges in Linux system, but they do have the same access to all the files in the remote HDFS and WASB storage. - -## Connect remotely to Microsoft ML Services - -You can set up access to the HDInsight Spark compute context from a remote instance of ML Client running on your desktop. To do so, you must specify the options (hdfsShareDir, shareDir, sshUsername, sshHostname, sshSwitches, and sshProfileScript) when defining the RxSpark compute context on your desktop: For example: - -```r -myNameNode <- "default" -myPort <- 0 - -mySshHostname <- '-ed-ssh.azurehdinsight.net' # HDI secure shell hostname -mySshUsername <- ''# HDI SSH username -mySshSwitches <- '-i /cygdrive/c/Data/R/davec' # HDI SSH private key - -myhdfsShareDir <- paste("/user/RevoShare", mySshUsername, sep="/") -myShareDir <- paste("/var/RevoShare" , mySshUsername, sep="/") - -mySparkCluster <- RxSpark( - hdfsShareDir = myhdfsShareDir, - shareDir = myShareDir, - sshUsername = mySshUsername, - sshHostname = mySshHostname, - sshSwitches = mySshSwitches, - sshProfileScript = '/etc/profile', - nameNode = myNameNode, - port = myPort, - consoleOutput= TRUE -) -``` - -For more information, see the "Using Microsoft Machine Learning Server as an Apache Hadoop Client" section in [How to use RevoScaleR in an Apache Spark compute context](/machine-learning-server/r/how-to-revoscaler-spark#more-spark-scenarios) - -## Use a compute context - -A compute context allows you to control whether computation is performed locally on the edge node or distributed across the nodes in the HDInsight cluster. For an example of setting a compute context with RStudio Server, see [Execute an R script on an ML Services cluster in Azure HDInsight using RStudio Server](machine-learning-services-quickstart-job-rstudio.md). - -## Distribute R code to multiple nodes - -With ML Services on HDInsight, you can take existing R code and run it across multiple nodes in the cluster by using `rxExec`. This function is useful when doing a parameter sweep or simulations. The following code is an example of how to use `rxExec`: - -```r -rxExec( function() {Sys.info()["nodename"]}, timesToRun = 4 ) -``` - -If you are still using the Spark context, this command returns the nodename value for the worker nodes that the code `(Sys.info()["nodename"])` is run on. For example, on a four node cluster, you expect to receive output similar to the following snippet: - -```r -$rxElem1 - nodename -"wn3-mymlser" - -$rxElem2 - nodename -"wn0-mymlser" - -$rxElem3 - nodename -"wn3-mymlser" - -$rxElem4 - nodename -"wn3-mymlser" -``` - -## Access data in Apache Hive and Parquet - -HDInsight ML Services allows direct access to data in Hive and Parquet for use by ScaleR functions in the Spark compute context. These capabilities are available through new ScaleR data source functions called RxHiveData and RxParquetData that work through use of Spark SQL to load data directly into a Spark DataFrame for analysis by ScaleR. - -The following code provides some sample code on use of the new functions: - -```r -#Create a Spark compute context: -myHadoopCluster <- rxSparkConnect(reset = TRUE) - -#Retrieve some sample data from Hive and run a model: -hiveData <- RxHiveData("select * from hivesampletable", - colInfo = list(devicemake = list(type = "factor"))) -rxGetInfo(hiveData, getVarInfo = TRUE) - -rxLinMod(querydwelltime ~ devicemake, data=hiveData) - -#Retrieve some sample data from Parquet and run a model: -rxHadoopMakeDir('/share') -rxHadoopCopyFromLocal(file.path(rxGetOption('sampleDataDir'), 'claimsParquet/'), '/share/') -pqData <- RxParquetData('/share/claimsParquet', - colInfo = list( - age = list(type = "factor"), - car.age = list(type = "factor"), - type = list(type = "factor") - ) ) -rxGetInfo(pqData, getVarInfo = TRUE) - -rxNaiveBayes(type ~ age + cost, data = pqData) - -#Check on Spark data objects, cleanup, and close the Spark session: -lsObj <- rxSparkListData() # two data objs are cached -lsObj -rxSparkRemoveData(lsObj) -rxSparkListData() # it should show empty list -rxSparkDisconnect(myHadoopCluster) -``` - -For additional info on use of these new functions see the online help in ML Services through use of the `?RxHivedata` and `?RxParquetData` commands. - -## Install additional R packages on the cluster - -### To install R packages on the edge node - -If you want to install additional R packages on the edge node, you can use `install.packages()` directly from within the R console, once connected to the edge node through SSH. - -### To install R packages on the worker node - -To install R packages on the worker nodes of the cluster, you must use a Script Action. Script Actions are Bash scripts that are used to make configuration changes to the HDInsight cluster or to install additional software, such as additional R packages. - -> [!IMPORTANT] -> Using Script Actions to install additional R packages can only be used after the cluster has been created. Do not use this procedure during cluster creation, as the script relies on ML Services being completely configured. - -1. Follow the steps at [Customize clusters using Script Action](../hdinsight-hadoop-customize-cluster-linux.md). - -3. For **Submit script action**, provide the following information: - - * For **Script type**, select **Custom**. - - * For **Name**, provide a name for the script action. - - * For **Bash script URI**, enter `https://mrsactionscripts.blob.core.windows.net/rpackages-v01/InstallRPackages.sh`. This is the script that installs additional R packages on the worker node - - * Select the check box only for **Worker**. - - * **Parameters**: The R packages to be installed. For example, `bitops stringr arules` - - * Select the check box to **Persist this script action**. - - > [!NOTE] - > 1. By default, all R packages are installed from a snapshot of the Microsoft MRAN repository consistent with the version of ML Server that has been installed. If you want to install newer versions of packages, then there is some risk of incompatibility. However this kind of install is possible by specifying `useCRAN` as the first element of the package list, for example `useCRAN bitops, stringr, arules`. - > 2. Some R packages require additional Linux system libraries. For convenience, the HDInsight ML Services comes pre-installed with the dependencies needed by the top 100 most popular R packages. However, if the R package(s) you install require libraries beyond these then you must download the base script used here and add steps to install the system libraries. You must then upload the modified script to a public blob container in Azure storage and use the modified script to install the packages. - > For more information on developing Script Actions, see [Script Action development](../hdinsight-hadoop-script-actions-linux.md). - - :::image type="content" source="./media/r-server-hdinsight-manage/submit-script-action.png" alt-text="Azure portal submit script action" border="true"::: - -4. Select **Create** to run the script. Once the script completes, the R packages are available on all worker nodes. - -## Next steps - -* [Operationalize ML Services cluster on HDInsight](r-server-operationalize.md) -* [Compute context options for ML Service cluster on HDInsight](r-server-compute-contexts.md) -* [Azure Storage options for ML Services cluster on HDInsight](r-server-storage.md) \ No newline at end of file diff --git a/articles/hdinsight/r-server/r-server-operationalize.md b/articles/hdinsight/r-server/r-server-operationalize.md deleted file mode 100644 index 9c899181fa12f..0000000000000 --- a/articles/hdinsight/r-server/r-server-operationalize.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Operationalize ML Services on HDInsight - Azure -description: Learn how to operationalize your data model to make predictions with ML Services in Azure HDInsight. -ms.service: hdinsight -ms.topic: how-to -ms.date: 06/27/2018 -ROBOTS: NOINDEX ---- - -# Operationalize ML Services cluster on Azure HDInsight - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -After you have used ML Services cluster in HDInsight to complete your data modeling, you can operationalize the model to make predictions. This article provides instructions on how to perform this task. - -## Prerequisites - -* An ML Services cluster on HDInsight. See [Create Apache Hadoop clusters using the Azure portal](../hdinsight-hadoop-create-linux-clusters-portal.md) and select **ML Services** for **Cluster type**. - -* A Secure Shell (SSH) client: An SSH client is used to remotely connect to the HDInsight cluster and run commands directly on the cluster. For more information, see [Use SSH with HDInsight](../hdinsight-hadoop-linux-use-ssh-unix.md). - -## Operationalize ML Services cluster with one-box configuration - -> [!NOTE] -> The steps below are applicable to R Server 9.0 and ML Server 9.1. For ML Server 9.3, refer to [Use the administration tool to manage the operationalization configuration](/machine-learning-server/operationalize/configure-admin-cli-launch). - -1. SSH into the edge node. - - ```bash - ssh USERNAME@CLUSTERNAME-ed-ssh.azurehdinsight.net - ``` - - For instructions on how to use SSH with Azure HDInsight, see [Use SSH with HDInsight.](../hdinsight-hadoop-linux-use-ssh-unix.md). - -1. Change directory for the relevant version and sudo the dot net dll: - - - For Microsoft ML Server 9.1: - - ```bash - cd /usr/lib64/microsoft-r/rserver/o16n/9.1.0 - sudo dotnet Microsoft.RServer.Utils.AdminUtil/Microsoft.RServer.Utils.AdminUtil.dll - ``` - - - For Microsoft R Server 9.0: - - ```bash - cd /usr/lib64/microsoft-deployr/9.0.1 - sudo dotnet Microsoft.DeployR.Utils.AdminUtil/Microsoft.DeployR.Utils.AdminUtil.dll - ``` - -1. You are presented with the options to choose from. Choose the first option, as shown in the following screenshot, to **Configure ML Server for Operationalization**. - - :::image type="content" source="./media/r-server-operationalize/admin-util-one-box-1.png" alt-text="R server Administration utility select" border="true"::: - -1. You are now presented with the option to choose how you want to operationalize ML Server. From the presented options, choose the first one by entering **A**. - - :::image type="content" source="./media/r-server-operationalize/admin-util-one-box-2.png" alt-text="R server Administration utility operationalize" border="true"::: - -1. When prompted, enter and reenter the password for a local admin user. - -1. You should see outputs suggesting that the operation was successful. You are also prompted to select another option from the menu. Select E to go back to the main menu. - - :::image type="content" source="./media/r-server-operationalize/admin-util-one-box-3.png" alt-text="R server Administration utility success" border="true"::: - -1. Optionally, you can perform diagnostic checks by running a diagnostic test as follows: - - a. From the main menu, select **6** to run diagnostic tests. - - :::image type="content" source="./media/r-server-operationalize/hdinsight-diagnostic1.png" alt-text="R server Administration utility diagnostic" border="true"::: - - b. From the Diagnostic Tests menu, select **A**. When prompted, enter the password that you provided for the local admin user. - - :::image type="content" source="./media/r-server-operationalize/hdinsight-diagnostic2.png" alt-text="R server Administration utility test" border="true"::: - - c. Verify that the output shows that overall health is a pass. - - :::image type="content" source="./media/r-server-operationalize/hdinsight-diagnostic3.png" alt-text="R server Administration utility pass" border="true"::: - - d. From the menu options presented, enter **E** to return to the main menu and then enter **8** to exit the admin utility. - -### Long delays when consuming web service on Apache Spark - -If you encounter long delays when trying to consume a web service created with mrsdeploy functions in an Apache Spark compute context, you may need to add some missing folders. The Spark application belongs to a user called '*rserve2*' whenever it is invoked from a web service using mrsdeploy functions. To work around this issue: - -```r -# Create these required folders for user 'rserve2' in local and hdfs: - -hadoop fs -mkdir /user/RevoShare/rserve2 -hadoop fs -chmod 777 /user/RevoShare/rserve2 - -mkdir /var/RevoShare/rserve2 -chmod 777 /var/RevoShare/rserve2 - - -# Next, create a new Spark compute context: - -rxSparkConnect(reset = TRUE) -``` - -At this stage, the configuration for operationalization is complete. Now you can use the `mrsdeploy` package on your RClient to connect to the operationalization on edge node and start using its features like [remote execution](/machine-learning-server/r/how-to-execute-code-remotely) and [web-services](/machine-learning-server/operationalize/concept-what-are-web-services). Depending on whether your cluster is set up on a virtual network or not, you may need to set up port forward tunneling through SSH login. The following sections explain how to set up this tunnel. - -### ML Services cluster on virtual network - -Make sure you allow traffic through port 12800 to the edge node. That way, you can use the edge node to connect to the Operationalization feature. - -```r -library(mrsdeploy) - -remoteLogin( - deployr_endpoint = "http://[your-cluster-name]-ed-ssh.azurehdinsight.net:12800", - username = "admin", - password = "xxxxxxx" -) -``` - -If the `remoteLogin()` cannot connect to the edge node, but you can SSH to the edge node, then you need to verify whether the rule to allow traffic on port 12800 has been set properly or not. If you continue to face the issue, you can work around it by setting up port forward tunneling through SSH. For instructions, see the following section: - -### ML Services cluster not set up on virtual network - -If your cluster is not set up on vnet or if you are having troubles with connectivity through vnet, you can use SSH port forward tunneling: - -```bash -ssh -L localhost:12800:localhost:12800 USERNAME@CLUSTERNAME-ed-ssh.azurehdinsight.net -``` - -Once your SSH session is active, the traffic from your local machine's port 12800 is forwarded to the edge node's port 12800 through SSH session. Make sure you use `127.0.0.1:12800` in your `remoteLogin()` method. This logs into the edge node's operationalization through port forwarding. - -```r -library(mrsdeploy) - -remoteLogin( - deployr_endpoint = "http://127.0.0.1:12800", - username = "admin", - password = "xxxxxxx" -) -``` - -## Scale operationalized compute nodes on HDInsight worker nodes - -To scale the compute nodes, you first decommission the worker nodes and then configure compute nodes on the decommissioned worker nodes. - -### Step 1: Decommission the worker nodes - -ML Services cluster is not managed through [Apache Hadoop YARN](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html). If the worker nodes are not decommissioned, the YARN Resource Manager does not work as expected because it is not aware of the resources being taken up by the server. In order to avoid this situation, we recommend decommissioning the worker nodes before you scale out the compute nodes. - -Follow these steps to decommission worker nodes: - -1. Log in to the cluster's Ambari console and click on **Hosts** tab. - -1. Select worker nodes (to be decommissioned). - -1. Click **Actions** > **Selected Hosts** > **Hosts** > **Turn ON Maintenance Mode**. For example, in the following image we have selected wn3 and wn4 to decommission. - - :::image type="content" source="./media/r-server-operationalize/get-started-operationalization.png" alt-text="Apache Ambari Turn On Maintenance Mode" border="true"::: - -* Select **Actions** > **Selected Hosts** > **DataNodes** > click **Decommission**. -* Select **Actions** > **Selected Hosts** > **NodeManagers** > click **Decommission**. -* Select **Actions** > **Selected Hosts** > **DataNodes** > click **Stop**. -* Select **Actions** > **Selected Hosts** > **NodeManagers** > click on **Stop**. -* Select **Actions** > **Selected Hosts** > **Hosts** > click **Stop All Components**. -* Unselect the worker nodes and select the head nodes. -* Select **Actions** > **Selected Hosts** > "**Hosts** > **Restart All Components**. - -### Step 2: Configure compute nodes on each decommissioned worker node(s) - -1. SSH into each decommissioned worker node. - -1. Run admin utility using the relevant DLL for the ML Services cluster that you have. For ML Server 9.1, run the following: - - ```bash - dotnet /usr/lib64/microsoft-deployr/9.0.1/Microsoft.DeployR.Utils.AdminUtil/Microsoft.DeployR.Utils.AdminUtil.dll - ``` - -1. Enter **1** to select option **Configure ML Server for Operationalization**. - -1. Enter **C** to select option `C. Compute node`. This configures the compute node on the worker node. - -1. Exit the Admin Utility. - -### Step 3: Add compute nodes details on web node - -Once all decommissioned worker nodes are configured to run compute node, come back on the edge node and add decommissioned worker nodes' IP addresses in the ML Server web node's configuration: - -1. SSH into the edge node. - -1. Run `vi /usr/lib64/microsoft-deployr/9.0.1/Microsoft.DeployR.Server.WebAPI/appsettings.json`. - -1. Look for the "Uris" section, and add worker node's IP and port details. - - ```json - "Uris": { - "Description": "Update 'Values' section to point to your backend machines. Using HTTPS is highly recommended", - "Values": [ - "http://localhost:12805", "http://[worker-node1-ip]:12805", "http://[workder-node2-ip]:12805" - ] - } - ``` - -## Next steps - -* [Manage ML Services cluster on HDInsight](r-server-hdinsight-manage.md) -* [Compute context options for ML Services cluster on HDInsight](r-server-compute-contexts.md) -* [Azure Storage options for ML Services cluster on HDInsight](r-server-storage.md) \ No newline at end of file diff --git a/articles/hdinsight/r-server/r-server-overview.md b/articles/hdinsight/r-server/r-server-overview.md deleted file mode 100644 index 1a8717a79d01b..0000000000000 --- a/articles/hdinsight/r-server/r-server-overview.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Introduction to ML Services on Azure HDInsight -description: Learn how to use ML Services on HDInsight to create applications for big data analysis. -ms.service: hdinsight -ms.topic: overview -ms.date: 04/20/2020 -ROBOTS: NOINDEX -#Customer intent: As a developer I want to have a basic understanding of Microsoft's implementation of machine learning in Azure HDInsight so I can decide if I want to use it rather than build my own cluster. ---- - -# What is ML Services in Azure HDInsight - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -Microsoft Machine Learning Server is available as a deployment option when you create HDInsight clusters in Azure. The cluster type that provides this option is called **ML Services**. This capability provides on-demand access to adaptable, distributed methods of analytics on HDInsight. - -ML Services on HDInsight provides the latest capabilities for R-based analytics on datasets of virtually any size. The datasets can be loaded to either Azure Blob or Data Lake storage. Your R-based applications can use the 8000+ open-source R packages. The routines in ScaleR, Microsoft's big data analytics package are also available. - -The edge node provides a convenient place to connect to the cluster and run your R scripts. The edge node allows running the ScaleR parallelized distributed functions across the cores of the server. You can also run them across the nodes of the cluster by using ScaleR's Hadoop Map Reduce. You can also use Apache Spark compute contexts. - -The models or predictions that result from analysis can be downloaded for on-premises use. They can also be `operationalized` elsewhere in Azure. In particular, through [Azure Machine Learning Studio (classic)](https://studio.azureml.net), and [web service](../../machine-learning/classic/deploy-a-machine-learning-web-service.md). - -## Get started with ML Services on HDInsight - -To create an ML Services cluster in HDInsight, select the **ML Services** cluster type. The ML Services cluster type includes ML Server on the data nodes, and edge node. The edge node serves as a landing zone for ML Services-based analytics. See [Create Apache Hadoop clusters using the Azure portal](../hdinsight-hadoop-create-linux-clusters-portal.md) for a walkthrough on how to create the cluster. - -## Why choose ML Services in HDInsight? - -ML Services in HDInsight provides the following benefits: - -### AI innovation from Microsoft and open-source - - ML Services includes highly adaptable, distributed set of algorithms such as [RevoscaleR](/machine-learning-server/r-reference/revoscaler/revoscaler), [revoscalepy](/machine-learning-server/python-reference/revoscalepy/revoscalepy-package), and [microsoftML](/machine-learning-server/python-reference/microsoftml/microsoftml-package). These algorithms can work on data sizes larger than the size of physical memory. They also run on a wide variety of platforms in a distributed manner. Learn more about the collection of Microsoft's custom [R packages](/machine-learning-server/r-reference/introducing-r-server-r-package-reference) and [Python packages](/machine-learning-server/python-reference/introducing-python-package-reference) included with the product. - - ML Services bridges these Microsoft innovations and contributions coming from the open-source community (R, Python, and AI toolkits). All on top of a single enterprise-grade platform. Any R or Python open-source machine learning package can work side by side with any proprietary innovation from Microsoft. - -### Simple, secure, and high-scale operationalization and administration - - Enterprises relying on traditional paradigms and environments invest much time and effort towards operationalization. This action results in inflated costs and delays including the translation time for: models, iterations to keep them valid and current, regulatory approval, and managing permissions. - - ML Services offers enterprise grade [operationalization](/machine-learning-server/what-is-operationalization). After a machine learning model completes, it takes just a few clicks to generate web services APIs. These [web services](/machine-learning-server/operationalize/concept-what-are-web-services) are hosted on a server grid in the cloud and can be integrated with line-of-business applications. The ability to deploy to an elastic grid lets you scale seamlessly with the needs of your business, both for batch and real-time scoring. For instructions, see [Operationalize ML Services on HDInsight](r-server-operationalize.md). - - - -> [!NOTE] -> The ML Services cluster type on HDInsight is supported only on HDInsight 3.6. HDInsight 3.6 is scheduled to retire on December 31, 2020. - -## Key features of ML Services on HDInsight - -The following features are included in ML Services on HDInsight. - -| Feature category | Description | -|------------------|-------------| -| R-enabled | [R packages](/machine-learning-server/r-reference/introducing-r-server-r-package-reference) for solutions written in R, with an open-source distribution of R, and run-time infrastructure for script execution. | -| Python-enabled | [Python modules](/machine-learning-server/python-reference/introducing-python-package-reference) for solutions written in Python, with an open-source distribution of Python, and run-time infrastructure for script execution. -| [Pre-trained models](/machine-learning-server/install/microsoftml-install-pretrained-models) | For visual analysis and text sentiment analysis, ready to score data you provide. | -| [Deploy and consume](r-server-operationalize.md) | `Operationalize` your server and deploy solutions as a web service. | -| [Remote execution](r-server-hdinsight-manage.md#connect-remotely-to-microsoft-ml-services) | Start remote sessions on ML Services cluster on your network from your client workstation. | - -## Data storage options for ML Services on HDInsight - -Default storage for the HDFS file system can be an Azure Storage account or Azure Data Lake Storage. Uploaded data to cluster storage during analysis is made persistent. The data is available even after the cluster is deleted. Various tools can handle the data transfer to storage. The tools include the portal-based upload facility of the storage account and the AzCopy utility. - -You can enable access to additional Blob and Data lake stores during cluster creation. You aren't limited by the primary storage option in use. See [Azure Storage options for ML Services on HDInsight](./r-server-storage.md) article to learn more about using multiple storage accounts. - -You can also use Azure Files as a storage option for use on the edge node. Azure Files enables file shares created in Azure Storage to the Linux file system. For more information, see [Azure Storage options for ML Services on HDInsight](r-server-storage.md). - -## Access ML Services edge node - -You can connect to Microsoft ML Server on the edge node using a browser, or SSH/PuTTY. The R console is installed by default during cluster creation. - -## Develop and run R scripts - -Your R scripts can use any of the 8000+ open-source R packages. You can also use the parallelized and distributed routines from the ScaleR library. Scripts run on the edge node run within the R interpreter on that node. Except for steps that call ScaleR functions with a Map Reduce (RxHadoopMR) or Spark (RxSpark) compute context. The functions run in a distributed fashion across the data nodes that are associated with the data. For more information about context options, see [Compute context options for ML Services on HDInsight](r-server-compute-contexts.md). - -## `Operationalize` a model - -When your data modeling is complete, `operationalize` the model to make predictions for new data either from Azure or on-premises. This process is known as scoring. Scoring can be done in HDInsight, Azure Machine Learning, or on-premises. - -### Score in HDInsight - -To score in HDInsight, write an R function. The function calls your model to make predictions for a new data file that you've loaded to your storage account. Then, save the predictions back to the storage account. You can run this routine on-demand on the edge node of your cluster or by using a scheduled job. - -### Score in Azure Machine Learning (AML) - -To score using Azure Machine Learning, use the open-source Azure Machine Learning R package known as [AzureML](https://cran.r-project.org/src/contrib/Archive/AzureML/) to publish your model as an Azure web service. For convenience, this package is pre-installed on the edge node. Next, use the facilities in Azure Machine Learning to create a user interface for the web service, and then call the web service as needed for scoring. Then convert ScaleR model objects to equivalent open-source model objects for use with the web service. Use ScaleR coercion functions, such as `as.randomForest()` for ensemble-based models, for this conversion. - -### Score on-premises - -To score on-premises after creating your model: serialize the model in R, download it, de-serialize it, then use it for scoring new data. You can score new data by using the approach described earlier in Score in HDInsight or by using [web services](/machine-learning-server/operationalize/concept-what-are-web-services). - -## Maintain the cluster - -### Install and maintain R packages - -Most of the R packages that you use are required on the edge node since most steps of your R scripts run there. To install additional R packages on the edge node, you can use the `install.packages()` method in R. - -If you're just using ScaleR library routines, you don't usually need additional R packages. You might need additional packages for **rxExec** or **RxDataStep** execution on the data nodes. - -The additional packages can be installed with a script action after you create the cluster. For more information, see [Manage ML Services in HDInsight cluster](r-server-hdinsight-manage.md). - -### Change Apache Hadoop MapReduce memory settings - -Available memory to ML Services can be modified when it's running a MapReduce job. To modify a cluster, use the Apache Ambari UI for your cluster. For Ambari UI instructions, see [Manage HDInsight clusters using the Ambari Web UI](../hdinsight-hadoop-manage-ambari.md). - -Available memory to ML Services can be changed by using Hadoop switches in the call to **RxHadoopMR**: - -```r -hadoopSwitches = "-libjars /etc/hadoop/conf -Dmapred.job.map.memory.mb=6656" -``` - -### Scale your cluster - -An existing ML Services cluster on HDInsight can be scaled up or down through the portal. By scaling up, you gain additional capacity for larger processing tasks. You can scale back a cluster when it's idle. For instructions about how to scale a cluster, see [Manage HDInsight clusters](../hdinsight-administer-use-portal-linux.md). - -### Maintain the system - -OS Maintenance is done on the underlying Linux VMs in an HDInsight cluster during off-hours. Typically, maintenance is done at 3:30 AM (VM's local time) every Monday and Thursday. Updates don't impact more than a quarter of the cluster at a time. - -Running jobs might slow down during maintenance. However, they should still run to completion. Any custom software or local data that you've is preserved across these maintenance events unless a catastrophic failure occurs that requires a cluster rebuild. - -## IDE options for ML Services on HDInsight - -The Linux edge node of an HDInsight cluster is the landing zone for R-based analysis. Recent versions of HDInsight provide a browser-based IDE of RStudio Server on the edge node. RStudio Server is more productive than the R console for development and execution. - -A desktop IDE can access the cluster through a remote MapReduce or Spark compute context. Options include: Microsoft's [R Tools for Visual Studio](https://marketplace.visualstudio.com/items?itemName=MikhailArkhipov007.RTVS2019) (RTVS), RStudio, and Walware's Eclipse-based StatET. - -Access the R console on the edge node by typing **R** at the command prompt. When using the console interface, it's convenient to develop R script in a text editor. Then cut and paste sections of your script into the R console as needed. - -## Pricing - -The prices associated with an ML Services HDInsight cluster are structured similarly to other HDInsight cluster types. They're based on the sizing of the underlying VMs across the name, data, and edge nodes. Core-hour uplifts as well. For more information, see [HDInsight pricing](https://azure.microsoft.com/pricing/details/hdinsight/). - -## Next steps - -To learn more about how to use ML Services on HDInsight clusters, see the following articles: - -* [Execute an R script on an ML Services cluster in Azure HDInsight using RStudio Server](machine-learning-services-quickstart-job-rstudio.md) -* [Compute context options for ML Services cluster on HDInsight](r-server-compute-contexts.md) -* [Storage options for ML Services cluster on HDInsight](r-server-storage.md) \ No newline at end of file diff --git a/articles/hdinsight/r-server/r-server-storage.md b/articles/hdinsight/r-server/r-server-storage.md deleted file mode 100644 index ad32ffe5bd59f..0000000000000 --- a/articles/hdinsight/r-server/r-server-storage.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Azure storage solutions for ML Services on HDInsight - Azure -description: Learn about the different storage options available with ML Services on HDInsight -ms.service: hdinsight -ms.topic: how-to -ms.date: 01/02/2020 -ROBOTS: NOINDEX ---- - -# Azure storage solutions for ML Services on Azure HDInsight - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -ML Services on HDInsight can use different storage solutions to persist data, code, or objects that contain results from analysis. These solutions include the following options: - -- [Azure Blob storage](https://azure.microsoft.com/services/storage/blobs/) -- [Azure Data Lake Storage Gen1](https://azure.microsoft.com/services/storage/data-lake-storage/) -- [Azure Files](https://azure.microsoft.com/services/storage/files/) - -You also have the option of accessing multiple Azure storage accounts or containers with your HDInsight cluster. Azure Files is a convenient data storage option for use on the edge node that enables you to mount an Azure file share to, for example, the Linux file system. But Azure file shares can be mounted and used by any system that has a supported operating system such as Windows or Linux. - -When you create an Apache Hadoop cluster in HDInsight, you specify either an **Azure Blob storage** account or **Data Lake Storage Gen1**. A specific storage container from that account holds the file system for the cluster that you create (for example, the Hadoop Distributed File System). For more information and guidance, see: - -- [Use Azure Blob storage with HDInsight](../hdinsight-hadoop-use-blob-storage.md) -- [Use Data Lake Storage Gen1 with Azure HDInsight clusters](../hdinsight-hadoop-use-data-lake-storage-gen1.md) - -## Use Azure Blob storage accounts with ML Services cluster - -If you specified more than one storage account when creating your ML Services cluster, the following instructions explain how to use a secondary account for data access and operations on an ML Services cluster. Assume the following storage accounts and container: **storage1** and a default container called **container1**, and **storage2** with **container2**. - -> [!WARNING] -> For performance purposes, the HDInsight cluster is created in the same data center as the primary storage account that you specify. Using a storage account in a different location than the HDInsight cluster is not supported. - -### Use the default storage with ML Services on HDInsight - -1. Using an SSH client, connect to the edge node of your cluster. For information on using SSH with HDInsight clusters, see [Use SSH with HDInsight](../hdinsight-hadoop-linux-use-ssh-unix.md). - -2. Copy a sample file, mysamplefile.csv, to the /share directory. - - ```bash - hadoop fs –mkdir /share - hadoop fs –copyFromLocal mycsv.scv /share - ``` - -3. Switch to R Studio or another R console, and write R code to set the name node to **default** and location of the file you want to access. - - ```R - myNameNode <- "default" - myPort <- 0 - - #Location of the data: - bigDataDirRoot <- "/share" - - #Define Spark compute context: - mySparkCluster <- RxSpark(nameNode=myNameNode, consoleOutput=TRUE) - - #Set compute context: - rxSetComputeContext(mySparkCluster) - - #Define the Hadoop Distributed File System (HDFS) file system: - hdfsFS <- RxHdfsFileSystem(hostName=myNameNode, port=myPort) - - #Specify the input file to analyze in HDFS: - inputFile <-file.path(bigDataDirRoot,"mysamplefile.csv") - ``` - -All the directory and file references point to the storage account `wasbs://container1@storage1.blob.core.windows.net`. This is the **default storage account** that's associated with the HDInsight cluster. - -### Use the additional storage with ML Services on HDInsight - -Now, suppose you want to process a file called mysamplefile1.csv that's located in the /private directory of **container2** in **storage2**. - -In your R code, point the name node reference to the **storage2** storage account. - -```R -myNameNode <- "wasbs://container2@storage2.blob.core.windows.net" -myPort <- 0 - -#Location of the data: -bigDataDirRoot <- "/private" - -#Define Spark compute context: -mySparkCluster <- RxSpark(consoleOutput=TRUE, nameNode=myNameNode, port=myPort) - -#Set compute context: -rxSetComputeContext(mySparkCluster) - -#Define HDFS file system: -hdfsFS <- RxHdfsFileSystem(hostName=myNameNode, port=myPort) - -#Specify the input file to analyze in HDFS: -inputFile <-file.path(bigDataDirRoot,"mysamplefile1.csv") -``` - -All of the directory and file references now point to the storage account `wasbs://container2@storage2.blob.core.windows.net`. This is the **Name Node** that you've specified. - -Configure the `/user/RevoShare/` directory on **storage2** as follows: - -```bash -hadoop fs -mkdir wasbs://container2@storage2.blob.core.windows.net/user -hadoop fs -mkdir wasbs://container2@storage2.blob.core.windows.net/user/RevoShare -hadoop fs -mkdir wasbs://container2@storage2.blob.core.windows.net/user/RevoShare/ -``` - -## Use Azure Data Lake Storage Gen1 with ML Services cluster - -To use Data Lake Storage Gen1 with your HDInsight cluster, you need to give your cluster access to each Azure Data Lake Storage Gen1 that you want to use. For instructions on how to use the Azure portal to create a HDInsight cluster with an Azure Data Lake Storage Gen1 as the default storage or as additional storage, see [Create an HDInsight cluster with Data Lake Storage Gen1 using Azure portal](../../data-lake-store/data-lake-store-hdinsight-hadoop-use-portal.md). - -You then use the storage in your R script much like you did a secondary Azure storage account as described in the previous procedure. - -### Add cluster access to your Azure Data Lake Storage Gen1 - -You access Data Lake Storage Gen1 by using an Azure Active Directory (Azure AD) Service Principal that's associated with your HDInsight cluster. - -1. When you create your HDInsight cluster, select **Cluster Azure AD Identity** from the **Data Source** tab. - -2. In the **Cluster Azure AD Identity** dialog box, under **Select AD Service Principal**, select **Create new**. - -After you give the Service Principal a name and create a password for it, click **Manage ADLS Access** to associate the Service Principal with your Data Lake Storage. - -It's also possible to add cluster access to one or more Data Lake storage Gen1 accounts following cluster creation. Open the Azure portal entry for a Data Lake Storage Gen1 and go to **Data Explorer > Access > Add**. - -### How to access Data Lake Storage Gen1 from ML Services on HDInsight - -Once you've given access to Data Lake Storage Gen1, you can use the storage in ML Services cluster on HDInsight the way you would a secondary Azure storage account. The only difference is that the prefix **wasbs://** changes to **adl://** as follows: - -```R -# Point to the ADL Storage (e.g. ADLtest) -myNameNode <- "adl://rkadl1.azuredatalakestore.net" -myPort <- 0 - -# Location of the data (assumes a /share directory on the ADL account) -bigDataDirRoot <- "/share" - -# Define Spark compute context -mySparkCluster <- RxSpark(consoleOutput=TRUE, nameNode=myNameNode, port=myPort) - -# Set compute context -rxSetComputeContext(mySparkCluster) - -# Define HDFS file system -hdfsFS <- RxHdfsFileSystem(hostName=myNameNode, port=myPort) - -# Specify the input file in HDFS to analyze -inputFile <-file.path(bigDataDirRoot,"mysamplefile.csv") -``` - -The following commands are used to configure the Data Lake Storage Gen1 with the RevoShare directory and add the sample .csv file from the previous example: - -```bash -hadoop fs -mkdir adl://rkadl1.azuredatalakestore.net/user -hadoop fs -mkdir adl://rkadl1.azuredatalakestore.net/user/RevoShare -hadoop fs -mkdir adl://rkadl1.azuredatalakestore.net/user/RevoShare/ - -hadoop fs -mkdir adl://rkadl1.azuredatalakestore.net/share - -hadoop fs -copyFromLocal /usr/lib64/R Server-7.4.1/library/RevoScaleR/SampleData/mysamplefile.csv adl://rkadl1.azuredatalakestore.net/share - -hadoop fs –ls adl://rkadl1.azuredatalakestore.net/share -``` - -## Use Azure Files with ML Services on HDInsight - -There's also a convenient data storage option for use on the edge node called [Azure Files](https://azure.microsoft.com/services/storage/files/). It enables you to mount an Azure Storage file share to the Linux file system. This option can be handy for storing data files, R scripts, and result objects that might be needed later, especially when it makes sense to use the native file system on the edge node rather than HDFS. - -A major benefit of Azure Files is that the file shares can be mounted and used by any system that has a supported OS such as Windows or Linux. For example, it can be used by another HDInsight cluster that you or someone on your team has, by an Azure VM, or even by an on-premises system. For more information, see: - -- [How to use Azure Files with Linux](../../storage/files/storage-how-to-use-files-linux.md) -- [How to use Azure Files on Windows](../../storage/files/storage-dotnet-how-to-use-files.md) - -## Next steps - -- [Overview of ML Services cluster on HDInsight](r-server-overview.md) -- [Compute context options for ML Services cluster on HDInsight](r-server-compute-contexts.md) -- [Use Azure Data Lake Storage Gen2 with Azure HDInsight clusters](../hdinsight-hadoop-use-data-lake-storage-gen2.md) diff --git a/articles/hdinsight/r-server/r-server-submit-jobs-r-tools-vs.md b/articles/hdinsight/r-server/r-server-submit-jobs-r-tools-vs.md deleted file mode 100644 index db4e99f784ada..0000000000000 --- a/articles/hdinsight/r-server/r-server-submit-jobs-r-tools-vs.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Submit jobs from R Tools for Visual Studio - Azure HDInsight -description: Submit R jobs from your local Visual Studio machine to an HDInsight cluster. -ms.service: hdinsight -ms.topic: conceptual -ms.date: 06/19/2019 -ROBOTS: NOINDEX ---- - -# Submit jobs from R Tools for Visual Studio - -[!INCLUDE [retirement banner](../includes/ml-services-retirement.md)] - -[R Tools for Visual Studio](https://marketplace.visualstudio.com/items?itemName=MikhailArkhipov007.RTVS2019) (RTVS) is a free, open-source extension for the Community (free), Professional, and Enterprise editions of both [Visual Studio 2017](https://www.visualstudio.com/downloads/), and [Visual Studio 2015 Update 3](https://go.microsoft.com/fwlink/?LinkId=691129) or higher. RTVS is not available for [Visual Studio 2019](/visualstudio/porting/port-migrate-and-upgrade-visual-studio-projects?preserve-view=true&view=vs-2019). - -RTVS enhances your R workflow by offering tools such as the [R Interactive window](/visualstudio/rtvs/interactive-repl) (REPL), intellisense (code completion), [plot visualization](/visualstudio/rtvs/visualizing-data) through R libraries such as ggplot2 and ggviz, [R code debugging](/visualstudio/rtvs/debugging), and more. - -## Set up your environment - -1. Install [R Tools for Visual Studio](/visualstudio/rtvs/installing-r-tools-for-visual-studio). - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/install-r-tools-for-vs.png" alt-text="Installing RTVS in Visual Studio 2017" border="true"::: - -2. Select the *Data science and analytical applications* workload, then select the **R language support**, **Runtime support for R development**, and **Microsoft R Client** options. - -3. You need to have public and private keys for SSH authentication. - - -4. Install [ML Server](/previous-versions/machine-learning-server/install/r-server-install-windows) on your machine. ML Server provides the [`RevoScaleR`](/machine-learning-server/r-reference/revoscaler/revoscaler) and `RxSpark` functions. - -5. Install [PuTTY](https://www.putty.org/) to provide a compute context to run `RevoScaleR` functions from your local client to your HDInsight cluster. - -6. You have the option to apply the Data Science Settings to your Visual Studio environment, which provides a new layout for your workspace for the R tools. - 1. To save your current Visual Studio settings, use the **Tools > Import and Export Settings** command, then select **Export selected environment settings** and specify a file name. To restore those settings, use the same command and select **Import selected environment settings**. - - 2. Go to the **R Tools** menu item, then select **Data Science Settings...**. - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/data-science-settings.png" alt-text="Visual Studio Data Science Settings" border="true"::: - - > [!NOTE] - > Using the approach in step 1, you can also save and restore your personalized data scientist layout, rather than repeating the **Data Science Settings** command. - -## Execute local R methods - -1. Create your HDInsight ML Services cluster. -2. Install the [RTVS extension](/visualstudio/rtvs/installation). -3. Download the [samples zip file](https://github.com/Microsoft/RTVS-docs/archive/master.zip). -4. Open `examples/Examples.sln` to launch the solution in Visual Studio. -5. Open the `1-Getting Started with R.R` file in the `A first look at R` solution folder. -6. Starting at the top of the file, press Ctrl+Enter to send each line, one at a time, to the R Interactive window. Some lines might take a while as they install packages. - * Alternatively, you can select all lines in the R file (Ctrl+A), then either execute all (Ctrl+Enter), or select the Execute Interactive icon on the toolbar. - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/execute-interactive1.png" alt-text="Visual Studio execute interactive" border="true"::: - -7. After running all the lines in the script, you should see an output similar to this: - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/visual-studio-workspace.png" alt-text="Visual Studio workspace R tools" border="true"::: - -## Submit jobs to an HDInsight ML Services cluster - -Using a Microsoft ML Server/Microsoft R Client from a Windows computer equipped with PuTTY, you can create a compute context that will run distributed `RevoScaleR` functions from your local client to your HDInsight cluster. Use `RxSpark` to create the compute context, specifying your username, the Apache Hadoop cluster's edge node, SSH switches, and so forth. - -1. The ML Services edge node address on HDInsight is `CLUSTERNAME-ed-ssh.azurehdinsight.net` where `CLUSTERNAME` is the name of your ML Services cluster. - -1. Paste the following code into the R Interactive window in Visual Studio, altering the values of the setup variables to match your environment. - - ```R - # Setup variables that connect the compute context to your HDInsight cluster - mySshHostname <- 'r-cluster-ed-ssh.azurehdinsight.net ' # HDI secure shell hostname - mySshUsername <- 'sshuser' # HDI SSH username - mySshClientDir <- "C:\\Program Files (x86)\\PuTTY" - mySshSwitches <- '-i C:\\Users\\azureuser\\r.ppk' # Path to your private ssh key - myHdfsShareDir <- paste("/user/RevoShare", mySshUsername, sep = "/") - myShareDir <- paste("/var/RevoShare", mySshUsername, sep = "/") - mySshProfileScript <- "/usr/lib64/microsoft-r/3.3/hadoop/RevoHadoopEnvVars.site" - - # Create the Spark Cluster compute context - mySparkCluster <- RxSpark( - sshUsername = mySshUsername, - sshHostname = mySshHostname, - sshSwitches = mySshSwitches, - sshProfileScript = mySshProfileScript, - consoleOutput = TRUE, - hdfsShareDir = myHdfsShareDir, - shareDir = myShareDir, - sshClientDir = mySshClientDir - ) - - # Set the current compute context as the Spark compute context defined above - rxSetComputeContext(mySparkCluster) - ``` - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/apache-spark-context.png" alt-text="apache spark setting the context" border="true"::: - -1. Execute the following commands in the R Interactive window: - - ```R - rxHadoopCommand("version") # should return version information - rxHadoopMakeDir("/user/RevoShare/newUser") # creates a new folder in your storage account - rxHadoopCopy("/example/data/people.json", "/user/RevoShare/newUser") # copies file to new folder - ``` - - You should see an output similar to the following: - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/successful-rx-commands.png" alt-text="Successful rx command execution" border="true"::: -a -1. Verify that the `rxHadoopCopy` successfully copied the `people.json` file from the example data folder to the newly created `/user/RevoShare/newUser` folder: - - 1. From your HDInsight ML Services cluster pane in Azure, select **Storage accounts** from the left-hand menu. - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/hdinsight-storage-accounts.png" alt-text="Azure HDInsight Storage accounts" border="true"::: - - 2. Select the default storage account for your cluster, making note of the container/directory name. - - 3. Select **Containers** from the left-hand menu on your storage account pane. - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/hdi-storage-containers.png" alt-text="Azure HDInsight Storage containers" border="true"::: - - 4. Select your cluster's container name, browse to the **user** folder (you might have to click *Load more* at the bottom of the list), then select *RevoShare*, then **newUser**. The `people.json` file should be displayed in the `newUser` folder. - - :::image type="content" source="./media/r-server-submit-jobs-r-tools-vs/hdinsight-copied-file.png" alt-text="HDInsight copied file folder location" border="true"::: - -1. After you are finished using the current Apache Spark context, you must stop it. You cannot run multiple contexts at once. - - ```R - rxStopEngine(mySparkCluster) - ``` - -## Next steps - -* [Compute context options for ML Services on HDInsight](r-server-compute-contexts.md) -* [Combining ScaleR and SparkR](../hdinsight-hadoop-r-scaler-sparkr.md) provides an example of airline flight delay predictions. \ No newline at end of file diff --git a/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md b/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md index f68c4ca17bfb2..031a1ae167432 100644 --- a/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md +++ b/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md @@ -4,7 +4,7 @@ description: Use HDInsight Tools in Azure Toolkit for Eclipse to develop Spark a ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/13/2019 +ms.date: 05/30/2022 --- # Use Azure Toolkit for Eclipse to create Apache Spark applications for an HDInsight cluster @@ -348,4 +348,4 @@ There are two modes to submit the jobs. If storage credential is provided, batch ### Managing resources * [Manage resources for the Apache Spark cluster in Azure HDInsight](apache-spark-resource-manager.md) -* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) \ No newline at end of file +* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) diff --git a/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md b/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md index 59a3452140420..c11bb0ee534e4 100644 --- a/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md +++ b/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md @@ -3,7 +3,7 @@ title: Apache Spark performance - Azure HDInsight IO Cache (Preview) description: Learn about Azure HDInsight IO Cache and how to use it to improve Apache Spark performance. ms.service: hdinsight ms.topic: how-to -ms.date: 12/23/2019 +ms.date: 05/26/2022 --- # Improve performance of Apache Spark workloads using Azure HDInsight IO Cache diff --git a/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md b/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md index cd0f899fba297..b8c6d65da9a59 100644 --- a/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md +++ b/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md @@ -4,7 +4,7 @@ description: Step-by-step guidance on how to use HDInsight Tools in Azure Toolki ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,hdiseo17may2017 -ms.date: 12/23/2019 +ms.date: 05/30/2022 --- # Debug Apache Spark applications on an HDInsight cluster with Azure Toolkit for IntelliJ through SSH @@ -172,4 +172,4 @@ This article provides step-by-step guidance on how to use HDInsight Tools in [Az ### Manage resources * [Manage resources for the Apache Spark cluster in Azure HDInsight](apache-spark-resource-manager.md) -* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) \ No newline at end of file +* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) diff --git a/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md b/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md index 5b9a7d4fd65a8..a9db399bc9b53 100644 --- a/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md +++ b/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md @@ -4,7 +4,7 @@ description: Learn how to use HDInsight Tools in Azure Toolkit for IntelliJ to r ms.service: hdinsight ms.custom: hdinsightactive ms.topic: how-to -ms.date: 11/28/2017 +ms.date: 05/30/2022 --- # Use Azure Toolkit for IntelliJ to debug Apache Spark applications remotely in HDInsight through VPN @@ -320,4 +320,4 @@ We recommend that you also create an Apache Spark cluster in Azure HDInsight tha ### Manage resources * [Manage resources for the Apache Spark cluster in Azure HDInsight](apache-spark-resource-manager.md) -* [Track and debug jobs that run on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) \ No newline at end of file +* [Track and debug jobs that run on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) diff --git a/articles/hdinsight/spark/apache-spark-load-data-run-query.md b/articles/hdinsight/spark/apache-spark-load-data-run-query.md index 1273d9a9cd26b..3dc87e12e4052 100644 --- a/articles/hdinsight/spark/apache-spark-load-data-run-query.md +++ b/articles/hdinsight/spark/apache-spark-load-data-run-query.md @@ -4,7 +4,7 @@ description: Tutorial - Learn how to load data and run interactive queries on Sp ms.service: hdinsight ms.topic: tutorial ms.custom: hdinsightactive,mvc -ms.date: 02/12/2020 +ms.date: 06/08/2022 # Customer intent: As a developer new to Apache Spark and to Apache Spark in Azure HDInsight, I want to learn how to load data into a Spark cluster, so I can run interactive SQL queries against the data. --- diff --git a/articles/hdinsight/spark/apache-spark-settings.md b/articles/hdinsight/spark/apache-spark-settings.md index cf11a254afa0a..5d9a26ac32973 100644 --- a/articles/hdinsight/spark/apache-spark-settings.md +++ b/articles/hdinsight/spark/apache-spark-settings.md @@ -4,7 +4,7 @@ description: How to view and configure Apache Spark settings for an Azure HDInsi ms.service: hdinsight ms.topic: conceptual ms.custom: hdinsightactive,seoapr2020 -ms.date: 04/24/2020 +ms.date: 05/30/2022 --- # Configure Apache Spark settings diff --git a/articles/hdinsight/spark/apache-spark-streaming-high-availability.md b/articles/hdinsight/spark/apache-spark-streaming-high-availability.md index b518bd9932655..c28dde4abf8b2 100644 --- a/articles/hdinsight/spark/apache-spark-streaming-high-availability.md +++ b/articles/hdinsight/spark/apache-spark-streaming-high-availability.md @@ -4,7 +4,7 @@ description: How to set up Apache Spark Streaming for a high-availability scenar ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 11/29/2019 +ms.date: 05/26/2022 --- # Create high-availability Apache Spark Streaming jobs with YARN diff --git a/articles/hdinsight/spark/apache-spark-troubleshoot-application-stops.md b/articles/hdinsight/spark/apache-spark-troubleshoot-application-stops.md index 05c06a675e582..f168d9e35818b 100644 --- a/articles/hdinsight/spark/apache-spark-troubleshoot-application-stops.md +++ b/articles/hdinsight/spark/apache-spark-troubleshoot-application-stops.md @@ -3,7 +3,7 @@ title: Apache Spark Streaming application stops after 24 days in Azure HDInsight description: An Apache Spark Streaming application stops after executing for 24 days and there are no errors in the log files. ms.service: hdinsight ms.topic: troubleshooting -ms.date: 07/29/2019 +ms.date: 06/08/2022 --- # Scenario: Apache Spark Streaming application stops after executing for 24 days in Azure HDInsight @@ -26,4 +26,4 @@ Replace `` with the name of your HDInsight cluster as shown in ## Next steps -[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] \ No newline at end of file +[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] diff --git a/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md b/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md index 14b316aa4dc9d..049d81ab719d8 100644 --- a/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md +++ b/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md @@ -3,7 +3,7 @@ title: Apache Spark slow when Azure HDInsight storage has many files description: Apache Spark job runs slowly when the Azure storage container contains many files in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 08/21/2019 +ms.date: 05/26/2022 --- # Apache Spark job run slowly when the Azure storage container contains many files in Azure HDInsight diff --git a/articles/hdinsight/spark/apache-spark-troubleshoot-outofmemory.md b/articles/hdinsight/spark/apache-spark-troubleshoot-outofmemory.md index 9dc887835d8f2..d70b875ab06da 100644 --- a/articles/hdinsight/spark/apache-spark-troubleshoot-outofmemory.md +++ b/articles/hdinsight/spark/apache-spark-troubleshoot-outofmemory.md @@ -231,7 +231,7 @@ Delete all entries using steps detailed below. 1. Wait for the above command to complete and the cursor to return the prompt and then restart Livy service from Ambari, which should succeed. > [!NOTE] -> `DELETE` the livy session once it is completed its execution. The Livy batch sessions will not be deleted automatically as soon as the spark app completes, which is by design. A Livy session is an entity created by a POST request against Livy Rest server. A `DELETE` call is needed to delete that entity. Or we should wait for the GC to kick in. +> `DELETE` the livy session once it is completed its execution. The Livy batch sessions will not be deleted automatically as soon as the spark app completes, which is by design. A Livy session is an entity created by a POST request against Livy REST server. A `DELETE` call is needed to delete that entity. Or we should wait for the GC to kick in. --- diff --git a/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md b/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md index aa6d8959f3c50..4a7e3de2d12ef 100644 --- a/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md +++ b/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md @@ -165,6 +165,21 @@ Privileged domain users can use the `Shiro.ini` file to control access to the In /api/interpreter/** = authc, roles[adminGroupName] ``` +### Example shiro.ini for multiple domain groups: + + ``` + [main] + anyofrolesuser = org.apache.zeppelin.utils.AnyOfRolesUserAuthorizationFilter + + [roles] + group1 = * + group2 = * + group3 = * + + [urls] + /api/interpreter/** = authc, anyofrolesuser[group1, group2, group3] + ``` + ## Livy session management The first code paragraph in your Zeppelin notebook creates a new Livy session in your cluster. This session is shared across all Zeppelin notebooks that you later create. If the Livy session is killed for any reason, jobs won't run from the Zeppelin notebook. diff --git a/articles/hdinsight/spark/zookeeper-troubleshoot-quorum-fails.md b/articles/hdinsight/spark/zookeeper-troubleshoot-quorum-fails.md index a29bbf1cab861..c295669b37495 100644 --- a/articles/hdinsight/spark/zookeeper-troubleshoot-quorum-fails.md +++ b/articles/hdinsight/spark/zookeeper-troubleshoot-quorum-fails.md @@ -3,7 +3,7 @@ title: Apache ZooKeeper server fails to form a quorum in Azure HDInsight description: Apache ZooKeeper server fails to form a quorum in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 05/20/2020 +ms.date: 05/28/2022 --- # Apache ZooKeeper server fails to form a quorum in Azure HDInsight @@ -118,4 +118,4 @@ If you didn't see your problem or are unable to solve your issue, visit one of t - Get answers from Azure experts through [Azure Community Support](https://azure.microsoft.com/support/community/). - Connect with [@AzureSupport](https://twitter.com/azuresupport) - the official Microsoft Azure account for improving customer experience. Connecting the Azure community to the right resources: answers, support, and experts. -- If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). \ No newline at end of file +- If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). diff --git a/articles/healthcare-apis/access-healthcare-apis.md b/articles/healthcare-apis/access-healthcare-apis.md index dc6b106351f65..b03801bc04764 100644 --- a/articles/healthcare-apis/access-healthcare-apis.md +++ b/articles/healthcare-apis/access-healthcare-apis.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -43,5 +43,7 @@ In this document, you learned about the tools and programming languages that you >[!div class="nextstepaction"] >[Deploy Azure Health Data Services workspace using the Azure portal](healthcare-apis-quickstart.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/authentication-authorization.md b/articles/healthcare-apis/authentication-authorization.md index c72ee004d92eb..adcc8293e292c 100644 --- a/articles/healthcare-apis/authentication-authorization.md +++ b/articles/healthcare-apis/authentication-authorization.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: overview -ms.date: 03/22/2022 +ms.date: 06/06/2022 ms.author: ginle --- @@ -102,7 +102,7 @@ You can use online tools such as [https://jwt.ms](https://jwt.ms/) to view the t **The access token is valid for one hour by default. You can obtain a new token or renew it using the refresh token before it expires.** -To obtain an access token, you can use tools such as Postman, the Rest Client extension in Visual Studio Code, PowerShell, CLI, curl, and the [Azure AD authentication libraries](../active-directory/develop/reference-v2-libraries.md). +To obtain an access token, you can use tools such as Postman, the REST Client extension in Visual Studio Code, PowerShell, CLI, curl, and the [Azure AD authentication libraries](../active-directory/develop/reference-v2-libraries.md). ## Encryption @@ -118,3 +118,5 @@ In this document, you learned the authentication and authorization of Azure Heal >[!div class="nextstepaction"] >[Deploy Azure Health Data Services workspace using the Azure portal](healthcare-apis-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/autoscale-azure-api-fhir.md b/articles/healthcare-apis/azure-api-for-fhir/autoscale-azure-api-fhir.md index cc0cb93c53904..768fc5f783a81 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/autoscale-azure-api-fhir.md +++ b/articles/healthcare-apis/azure-api-for-fhir/autoscale-azure-api-fhir.md @@ -5,17 +5,17 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/02/2022 ms.author: mikaelw --- # Autoscale for Azure API for FHIR -The Azure API for FHIR as a managed service allows customers to persist with FHIR compliant healthcare data and exchange it securely through the service API. To accommodate different transaction workloads, customers can use manual scale or autoscale. +Azure API for FHIR, as a managed service, allows customers to persist with Fast Healthcare Interoperability Resources (FHIR®) compliant healthcare data and exchange it securely through the service API. To accommodate different transaction workloads, customers can use manual scale or autoscale. ## What is autoscale? -By default, the Azure API for FHIR is set to manual scale. This option works well when the transaction workloads are known and consistent. Customers can adjust the throughput `RU/s` through the portal up to 10,000 and submit a request to increase the limit. +By default, Azure API for FHIR is set to manual scale. This option works well when the transaction workloads are known and consistent. Customers can adjust the throughput `RU/s` through the portal up to 10,000 and submit a request to increase the limit. The autoscale feature is designed to scale computing resources including the database throughput `RU/s` up and down automatically according to the workloads, thus eliminating the manual steps of adjusting allocated computing resources. @@ -74,5 +74,11 @@ Keep in mind that this is only an estimate based on data size and that there are The autoscale feature incurs costs because of managing the provisioned throughput units automatically. The actual costs depend on hourly usage, but keep in mind that there are minimum costs of 10% of `Tmax` for reserved throughput RU/s. However, this cost increase doesn't apply to storage and runtime costs. For information about pricing, see [Azure API for FHIR pricing](https://azure.microsoft.com/pricing/details/azure-api-for-fhir/). +## Next steps + +In this document, you learned about the autoscale feature for Azure API for FHIR. For an overview about Azure API for FHIR, see + >[!div class="nextstepaction"] >[About Azure API for FHIR](overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/azure-active-directory-identity-configuration.md b/articles/healthcare-apis/azure-api-for-fhir/azure-active-directory-identity-configuration.md index 46c1fc33bdcc1..1bf13d52d787e 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/azure-active-directory-identity-configuration.md +++ b/articles/healthcare-apis/azure-api-for-fhir/azure-active-directory-identity-configuration.md @@ -7,13 +7,13 @@ ms.reviewer: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/02/2022 ms.author: mikaelw --- # Azure Active Directory identity configuration for Azure API for FHIR -When you're working with healthcare data, it's important to ensure that the data is secure, and it can't be accessed by unauthorized users or applications. FHIR servers use [OAuth 2.0](https://oauth.net/2/) to ensure this data security. The [Azure API for FHIR](https://azure.microsoft.com/services/azure-api-for-fhir/) is secured using [Azure Active Directory](../../active-directory/index.yml), which is an example of an OAuth 2.0 identity provider. This article provides an overview of FHIR server authorization and the steps needed to obtain a token to access a FHIR server. While these steps apply to any FHIR server and any identity provider, we'll walk through Azure API for FHIR as the FHIR server and Azure Active Directory (Azure AD) as our identity provider in this article. +When you're working with healthcare data, it's important to ensure that the data is secure, and it can't be accessed by unauthorized users or applications. FHIR servers use [OAuth 2.0](https://oauth.net/2/) to ensure this data security. [Azure API for FHIR](https://azure.microsoft.com/services/azure-api-for-fhir/) is secured using [Azure Active Directory](../../active-directory/index.yml), which is an example of an OAuth 2.0 identity provider. This article provides an overview of FHIR server authorization and the steps needed to obtain a token to access a FHIR server. While these steps apply to any FHIR server and any identity provider, we'll walk through Azure API for FHIR as the FHIR server and Azure Active Directory (Azure AD) as our identity provider in this article. ## Access control overview @@ -21,20 +21,20 @@ In order for a client application to access Azure API for FHIR, it must present There are many ways to obtain a token, but the Azure API for FHIR doesn't care how the token is obtained as long as it's an appropriately signed token with the correct claims. -Using [authorization code flow](../../active-directory/azuread-dev/v1-protocols-oauth-code.md) as an example, accessing a FHIR server goes through the four steps: +For example like when you use [authorization code flow](../../active-directory/azuread-dev/v1-protocols-oauth-code.md), accessing a FHIR server goes through the following four steps: ![FHIR Authorization](media/azure-ad-hcapi/fhir-authorization.png) 1. The client sends a request to the `/authorize` endpoint of Azure AD. Azure AD will redirect the client to a sign-in page where the user will authenticate using appropriate credentials (for example username and password or two-factor authentication). See details on [obtaining an authorization code](../../active-directory/azuread-dev/v1-protocols-oauth-code.md#request-an-authorization-code). Upon successful authentication, an *authorization code* is returned to the client. Azure AD will only allow this authorization code to be returned to a registered reply URL configured in the client application registration. 1. The client application exchanges the authorization code for an *access token* at the `/token` endpoint of Azure AD. When you request a token, the client application may have to provide a client secret (the applications password). See details on [obtaining an access token](../../active-directory/azuread-dev/v1-protocols-oauth-code.md#use-the-authorization-code-to-request-an-access-token). -1. The client makes a request to the Azure API for FHIR, for example `GET /Patient` to search all patients. When making the request, it includes the access token in an HTTP request header, for example `Authorization: Bearer eyJ0e...`, where `eyJ0e...` represents the Base64 encoded access token. -1. The Azure API for FHIR validates that the token contains appropriate claims (properties in the token). If everything checks out, it will complete the request and return a FHIR bundle with results to the client. +1. The client makes a request to Azure API for FHIR, for example `GET /Patient`, to search all patients. When the client makes the request, it includes the access token in an HTTP request header, for example `Authorization: Bearer eyJ0e...`, where `eyJ0e...` represents the Base64 encoded access token. +1. Azure API for FHIR validates that the token contains appropriate claims (properties in the token). If everything checks out, it will complete the request and return a FHIR bundle with results to the client. -It's important to note that the Azure API for FHIR isn't involved in validating user credentials and it doesn't issue the token. The authentication and token creation is done by Azure AD. The Azure API for FHIR simply validates that the token is signed correctly (it's authentic) and that it has appropriate claims. +It's important to note that Azure API for FHIR isn't involved in validating user credentials and it doesn't issue the token. The authentication and token creation is done by Azure AD. Azure API for FHIR simply validates that the token is signed correctly (it's authentic) and that it has appropriate claims. ## Structure of an access token -Development of FHIR applications often involves debugging access issues. If a client is denied access to the Azure API for FHIR, it's useful to understand the structure of the access token and how it can be decoded to inspect the contents (the claims) of the token. +Development of Fast Healthcare Interoperability Resources (FHIR®) applications often involves debugging access issues. If a client is denied access to Azure API for FHIR, it's useful to understand the structure of the access token and how it can be decoded to inspect the contents (the claims) of the token. FHIR servers typically expect a [JSON Web Token](https://en.wikipedia.org/wiki/JSON_Web_Token) (JWT, sometimes pronounced "jot"). It consists of three parts: @@ -101,11 +101,13 @@ The pertinent sections of the Azure AD documentation are: * [Authorization code flow](../../active-directory/develop/v2-oauth2-auth-code-flow.md). * [Client credentials flow](../../active-directory/develop/v2-oauth2-client-creds-grant-flow.md). -There are other variations (for example on behalf of flow) for obtaining a token. Check the Azure AD documentation for details. When you use Azure API for FHIR, there are some shortcuts for obtaining an access token (for debugging purposes) [using the Azure CLI](get-healthcare-apis-access-token-cli.md). +There are other variations (for example due to flow) for obtaining a token. Refer to the [Azure AD documentation](../../active-directory/index.yml) for details. When you use Azure API for FHIR, there are some shortcuts for obtaining an access token (such as for debugging purposes) [using the Azure CLI](get-healthcare-apis-access-token-cli.md). ## Next steps -In this document, you learned some of the basic concepts involved in securing access to the Azure API for FHIR using Azure AD. For information about how to deploy the Azure API for FHIR service, see. +In this document, you learned some of the basic concepts involved in securing access to the Azure API for FHIR using Azure AD. For information about how to deploy the Azure API for FHIR service, see >[!div class="nextstepaction"] ->[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) \ No newline at end of file +>[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-access-token-validation.md b/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-access-token-validation.md index d919790570b7c..a4ed2da58f4a4 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-access-token-validation.md +++ b/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-access-token-validation.md @@ -7,7 +7,7 @@ ms.reviewer: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/02/2022 ms.author: mikaelw --- # Azure API for FHIR access token validation @@ -16,7 +16,7 @@ How Azure API for FHIR validates the access token will depend on implementation ## Validate token has no issues with identity provider -The first step in the token validation is to verify that the token was issued by the correct identity provider and that it hasn't been modified. The FHIR server will be configured to use a specific identity provider known as the authority `Authority`. The FHIR server will retrieve information about the identity provider from the `/.well-known/openid-configuration` endpoint. When you use Azure AD, the full URL is: +The first step in the token validation is to verify that the token was issued by the correct identity provider and that it hasn't been modified. The FHIR server will be configured to use a specific identity provider known as the authority `Authority`. The FHIR server will retrieve information about the identity provider from the `/.well-known/openid-configuration` endpoint. When you use Azure Active Directory (Azure AD), the full URL is: ``` GET https://login.microsoftonline.com//.well-known/openid-configuration @@ -96,12 +96,12 @@ The important properties for the FHIR server are `jwks_uri`, which tells the ser Once the server has verified the authenticity of the token, the FHIR server will then proceed to validate that the client has the required claims to access the token. -When using the Azure API for FHIR, the server will validate: +When you use Azure API for FHIR, the server will validate: 1. The token has the right `Audience` (`aud` claim). 1. The user or principal that the token was issued for is allowed to access the FHIR server data plane. The `oid` claim of the token contains an identity object ID, which uniquely identifies the user or principal. -We recommend that the FHIR service be [configured to use Azure RBAC](configure-azure-rbac.md) to manage data plane role assignments. But you can also [configure local RBAC](configure-local-rbac.md) if your FHIR service uses an external or secondary Azure Active Directory tenant. +We recommend that the FHIR service be [configured to use Azure RBAC](configure-azure-rbac.md) to manage data plane role assignments. However, you can also [configure local RBAC](configure-local-rbac.md) if your FHIR service uses an external or secondary Azure AD tenant. When you use the OSS Microsoft FHIR server for Azure, the server will validate: @@ -110,10 +110,12 @@ When you use the OSS Microsoft FHIR server for Azure, the server will validate: Consult details on how to [define roles on the FHIR server](https://github.com/microsoft/fhir-server/blob/master/docs/Roles.md). -A FHIR server may also validate that an access token has the scopes (in token claim `scp`) to access the part of the FHIR API that a client is trying to access. Currently, the Azure API for FHIR and the FHIR server for Azure don't validate token scopes. +A FHIR server may also validate that an access token has the scopes (in token claim `scp`) to access the part of the FHIR API that a client is trying to access. Currently, Azure API for FHIR and the FHIR server for Azure don't validate token scopes. ## Next steps -Now that you know how to walk through token validation, you can complete the tutorial to create a JavaScript application and read FHIR data. +Now that you know how to walk through token validation, you can complete the tutorial to create a JavaScript application and read Fast Healthcare Interoperability Resources (FHIR®) data. >[!div class="nextstepaction"] ->[Web application tutorial](tutorial-web-app-fhir-server.md) \ No newline at end of file +>[Web application tutorial](tutorial-web-app-fhir-server.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-resource-manager-template.md b/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-resource-manager-template.md index 36b755746f580..76df3ccad9ab2 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-resource-manager-template.md +++ b/articles/healthcare-apis/azure-api-for-fhir/azure-api-fhir-resource-manager-template.md @@ -7,7 +7,7 @@ ms.subservice: fhir ms.topic: quickstart ms.custom: subject-armqs, devx-track-azurepowershell, mode-api ms.author: mikaelw -ms.date: 05/03/2022 +ms.date: 06/03/2022 --- # Quickstart: Use an ARM template to deploy Azure API for FHIR @@ -245,4 +245,6 @@ In this quickstart guide, you've deployed the Azure API for FHIR into your subsc >[Configure CORS](configure-cross-origin-resource-sharing.md) >[!div class="nextstepaction"] ->[Configure Private Link](configure-private-link.md) \ No newline at end of file +>[Configure Private Link](configure-private-link.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/azure-api-for-fhir-additional-settings.md b/articles/healthcare-apis/azure-api-for-fhir/azure-api-for-fhir-additional-settings.md index 5773e2cf316b1..6e97d47887b76 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/azure-api-for-fhir-additional-settings.md +++ b/articles/healthcare-apis/azure-api-for-fhir/azure-api-for-fhir-additional-settings.md @@ -7,7 +7,7 @@ ms.topic: conceptual ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/02/2022 --- # Additional settings for Azure API for FHIR @@ -24,7 +24,7 @@ For more information on how to change the default settings, see [configure datab ## Access control -The Azure API for FHIR will only allow authorized users to access the FHIR API. You can configure authorized users through two different mechanisms. The primary and recommended way to configure access control is using [Azure role-based access control (Azure RBAC)](../../role-based-access-control/index.yml), which is accessible through the **Access control (IAM)** blade. Azure RBAC only works if you want to secure data plane access using the Azure Active Directory tenant associated with your subscription. If you wish to use a different tenant, the Azure API for FHIR offers a local FHIR data plane access control mechanism. The configuration options aren't as rich when using the local RBAC mechanism. For details, choose one of the following options: +Azure API for FHIR will only allow authorized users to access the FHIR API. You can configure authorized users through two different mechanisms. The primary and recommended way to configure access control is using [Azure role-based access control (Azure RBAC)](../../role-based-access-control/index.yml), which is accessible through the **Access control (IAM)** blade. Azure RBAC only works if you want to secure data plane access using the Azure Active Directory tenant associated with your subscription. If you wish to use a different tenant, the Azure API for FHIR offers a local FHIR data plane access control mechanism. The configuration options aren't as rich when using the local RBAC mechanism. For details, choose one of the following options: * [Azure RBAC for FHIR data plane](configure-azure-rbac.md). This is the preferred option when you're using the Azure Active Directory tenant associated with your subscription. * [Local FHIR data plane access control](configure-local-rbac.md). Use this option only when you need to use an external Azure Active Directory tenant for data plane access control. @@ -51,4 +51,6 @@ In this how-to guide, you set up additional settings for the Azure API for FHIR. Next check out the series of tutorials to create a web application that reads FHIR data. >[!div class="nextstepaction"] ->[Deploy JavaScript application](tutorial-web-app-fhir-server.md) \ No newline at end of file +>[Deploy JavaScript application](tutorial-web-app-fhir-server.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/carin-implementation-guide-blue-button-tutorial.md b/articles/healthcare-apis/azure-api-for-fhir/carin-implementation-guide-blue-button-tutorial.md index 691fc8a869c98..e4055414e343e 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/carin-implementation-guide-blue-button-tutorial.md +++ b/articles/healthcare-apis/azure-api-for-fhir/carin-implementation-guide-blue-button-tutorial.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/02/2022 --- # CARIN Implementation Guide for Blue Button® for Azure API for FHIR @@ -75,5 +75,7 @@ The final test we'll walk through is testing [error handling](https://touchstone In this tutorial, we walked through how to pass the CARIN IG for Blue Button tests in Touchstone. Next, you can review how to test the Da Vinci formulary tests. >[!div class="nextstepaction"] ->[DaVinci Drug Formulary](davinci-drug-formulary-tutorial.md) +>[DaVinci Drug Formulary](davinci-drug-formulary-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/centers-for-medicare-tutorial-introduction.md b/articles/healthcare-apis/azure-api-for-fhir/centers-for-medicare-tutorial-introduction.md index 63897df89bfaf..cf19442617820 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/centers-for-medicare-tutorial-introduction.md +++ b/articles/healthcare-apis/azure-api-for-fhir/centers-for-medicare-tutorial-introduction.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/02/2022 --- # Centers for Medicare and Medicaid Services (CMS) Interoperability and Patient Access rule introduction @@ -20,7 +20,7 @@ In this series of tutorials, we'll cover a high-level summary of the Center for The CMS released the [Interoperability and Patient Access rule](https://www.cms.gov/Regulations-and-Guidance/Guidance/Interoperability/index) on May 1, 2020. This rule requires free and secure data flow between all parties involved in patient care (patients, providers, and payers) to allow patients to access their health information when they need it. Interoperability has plagued the healthcare industry for decades, resulting in siloed data that causes negative health outcomes with higher and unpredictable costs for care. CMS is using their authority to regulate Medicare Advantage (MA), Medicaid, Children's Health Insurance Program (CHIP), and Qualified Health Plan (QHP) issuers on the Federally Facilitated Exchanges (FFEs) to enforce this rule. -In August 2020, CMS detailed how organizations can meet the mandate. To ensure that data can be exchanged securely and in a standardized manner, CMS identified FHIR version release 4 (R4) as the foundational standard required for the data exchange. +In August 2020, CMS detailed how organizations can meet the mandate. To ensure that data can be exchanged securely and in a standardized manner, CMS identified Fast Healthcare Interoperability Resources (FHIR®) version release 4 (R4) as the foundational standard required for the data exchange. There are three main pieces to the Interoperability and Patient Access ruling: @@ -28,7 +28,7 @@ There are three main pieces to the Interoperability and Patient Access ruling: * **Provider Directory API (Required July 1, 2021)** – CMS-regulated payers are required by this portion of the rule to make provider directory information publicly available via a standards-based API. Through making this information available, third-party application developers will be able to create services that help patients find providers for specific care needs and clinicians find other providers for care coordination. -* **Payer-to-Payer Data Exchange (Originally required Jan 1, 2022 - [Currently Delayed](https://www.cms.gov/Regulations-and-Guidance/Guidance/Interoperability/index))** – CMS-regulated payers are required to exchange certain patient clinical data at the patient’s request with other payers. While there's no requirement to follow any kind of standard, applying FHIR to exchange this data is encouraged. +* **Payer-to-Payer Data Exchange (Originally required Jan 1, 2022 - [Currently Delayed](https://www.cms.gov/Regulations-and-Guidance/Guidance/Interoperability/index))** – CMS-regulated payers are required to exchange certain patient clinical data at the patient’s request with other payers. While there's no requirement to follow any kind of standard, applying FHIR® to exchange this data is encouraged. ## Key FHIR concepts @@ -66,4 +66,6 @@ To test adherence to the various implementation guides, [Touchstone](https://tou Now that you have a basic understanding of the Interoperability and Patient Access rule, implementation guides, and available testing tool (Touchstone), we’ll walk through setting up the Azure API for FHIR for the CARIN IG for Blue Button. >[!div class="nextstepaction"] ->[CARIN Implementation Guide for Blue Button](carin-implementation-guide-blue-button-tutorial.md) \ No newline at end of file +>[CARIN Implementation Guide for Blue Button](carin-implementation-guide-blue-button-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/configure-azure-rbac.md b/articles/healthcare-apis/azure-api-for-fhir/configure-azure-rbac.md index 840ba2fac056b..79ffe7ec65256 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/configure-azure-rbac.md +++ b/articles/healthcare-apis/azure-api-for-fhir/configure-azure-rbac.md @@ -5,7 +5,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/03/2022 +ms.date: 06/02/2022 ms.author: mikaelw ms.reviewer: matjazl --- @@ -71,3 +71,5 @@ In this article, you learned how to assign Azure roles for the FHIR data plane. >[!div class="nextstepaction"] >[Configure Private Link](configure-private-link.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/configure-cross-origin-resource-sharing.md b/articles/healthcare-apis/azure-api-for-fhir/configure-cross-origin-resource-sharing.md index 39faca4850490..fa4f025304435 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/configure-cross-origin-resource-sharing.md +++ b/articles/healthcare-apis/azure-api-for-fhir/configure-cross-origin-resource-sharing.md @@ -3,14 +3,14 @@ title: Configure cross-origin resource sharing in Azure API for FHIR description: This article describes how to configure cross-origin resource sharing in Azure API for FHIR. author: mikaelweave ms.author: mikaelw -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.topic: reference ms.service: healthcare-apis ms.subservice: fhir --- # Configure cross-origin resource sharing in Azure API for FHIR -Azure API for Fast Healthcare Interoperability Resources (FHIR) supports [cross-origin resource sharing (CORS)](https://wikipedia.org/wiki/Cross-Origin_Resource_Sharing). CORS allows you to configure settings so that applications from one domain (origin) can access resources from a different domain, known as a cross-domain request. +Azure API for FHIR supports [cross-origin resource sharing (CORS)](https://wikipedia.org/wiki/Cross-Origin_Resource_Sharing). CORS allows you to configure settings so that applications from one domain (origin) can access resources from a different domain, known as a cross-domain request. CORS is often used in a single-page app that must call a RESTful API to a different domain. @@ -35,7 +35,9 @@ To configure a CORS setting in the Azure API for FHIR, specify the following set ## Next steps -In this article, you learned how to configure cross-origin sharing in Azure API for FHIR. Next deploy a fully managed Azure API for FHIR: +In this article, you learned how to configure cross-origin resource sharing in Azure API for FHIR. For more information about deploying Azure API for FHIR, see >[!div class="nextstepaction"] ->[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) \ No newline at end of file +>[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/configure-database.md b/articles/healthcare-apis/azure-api-for-fhir/configure-database.md index 3f9cb99770259..10a8ed0322427 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/configure-database.md +++ b/articles/healthcare-apis/azure-api-for-fhir/configure-database.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: ranku --- # Configure database settings @@ -40,4 +40,6 @@ In this article, you learned how to update your RUs for Azure API for FHIR. To l Or you can deploy a fully managed Azure API for FHIR: >[!div class="nextstepaction"] ->[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) \ No newline at end of file +>[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/configure-export-data.md b/articles/healthcare-apis/azure-api-for-fhir/configure-export-data.md index 46085100d18e5..b4f5665164b44 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/configure-export-data.md +++ b/articles/healthcare-apis/azure-api-for-fhir/configure-export-data.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: ranku --- @@ -59,4 +59,6 @@ After you've completed this final step, you’re now ready to export the data us In this article, you learned the steps in configuring export settings that allow you to export data out the Azure API for FHIR to a storage account. For more information about configuring database settings, access control, enabling diagnostic logging, and using custom headers to add data to audit logs, see >[!div class="nextstepaction"] ->[Additional Settings](azure-api-for-fhir-additional-settings.md) \ No newline at end of file +>[Additional Settings](azure-api-for-fhir-additional-settings.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/configure-local-rbac.md b/articles/healthcare-apis/azure-api-for-fhir/configure-local-rbac.md index 62104d334fa2b..a38ab96884f89 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/configure-local-rbac.md +++ b/articles/healthcare-apis/azure-api-for-fhir/configure-local-rbac.md @@ -5,7 +5,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: mikaelw ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli @@ -81,3 +81,5 @@ In this article, you learned how to assign FHIR data plane access using an exter >[!div class="nextstepaction"] >[Configure Private Link](configure-private-link.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/configure-private-link.md b/articles/healthcare-apis/azure-api-for-fhir/configure-private-link.md index af35d0b4bb9c6..98eb417da2a9d 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/configure-private-link.md +++ b/articles/healthcare-apis/azure-api-for-fhir/configure-private-link.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -160,3 +160,5 @@ Based on your private link setup and for more information about registering your * [Register a public client application](register-public-azure-ad-client-app.md) * [Register a service application](register-service-azure-ad-client-app.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/convert-data.md b/articles/healthcare-apis/azure-api-for-fhir/convert-data.md index 9408db5128d8f..13b313b0c483d 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/convert-data.md +++ b/articles/healthcare-apis/azure-api-for-fhir/convert-data.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: overview -ms.date: 03/21/2022 +ms.date: 06/03/2022 ms.author: ranku --- @@ -211,3 +211,5 @@ In this article, you learned about data conversion for Azure API for FHIR. For m >[!div class="nextstepaction"] >[Related GitHub Projects for Azure API for FHIR](fhir-github-projects.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/copy-to-synapse.md b/articles/healthcare-apis/azure-api-for-fhir/copy-to-synapse.md index f100e01247b4e..0d9a9b7d1472f 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/copy-to-synapse.md +++ b/articles/healthcare-apis/azure-api-for-fhir/copy-to-synapse.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/16/2022 +ms.date: 06/03/2022 ms.author: ginle --- @@ -195,4 +195,6 @@ In this article, you learned three different ways to copy your FHIR data into Sy Next, you can learn about how you can de-identify your FHIR data while exporting it to Synapse in order to protect PHI. >[!div class="nextstepaction"] ->[Exporting de-identified data](de-identified-export.md) \ No newline at end of file +>[Exporting de-identified data](de-identified-export.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/customer-managed-key.md b/articles/healthcare-apis/azure-api-for-fhir/customer-managed-key.md index 1c3f385052837..2f1123ec76096 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/customer-managed-key.md +++ b/articles/healthcare-apis/azure-api-for-fhir/customer-managed-key.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: overview -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: ginle ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli @@ -16,7 +16,7 @@ ms.devlang: azurecli When you create a new Azure API for FHIR account, your data is encrypted using Microsoft-managed keys by default. Now, you can add a second layer of encryption for the data using your own key that you choose and manage yourself. -In Azure, this is typically accomplished using an encryption key in the customer's Azure Key Vault. Azure SQL, Azure Storage, and Cosmos DB are some examples that provide this capability today. Azure API for FHIR leverages this support from Cosmos DB. When you create an account, you'll have the option to specify an Azure Key Vault key URI. This key will be passed on to Cosmos DB when the DB account is provisioned. When a FHIR request is made, Cosmos DB fetches your key and uses it to encrypt/decrypt the data. +In Azure, this is typically accomplished using an encryption key in the customer's Azure Key Vault. Azure SQL, Azure Storage, and Cosmos DB are some examples that provide this capability today. Azure API for FHIR leverages this support from Cosmos DB. When you create an account, you'll have the option to specify an Azure Key Vault key URI. This key will be passed on to Cosmos DB when the DB account is provisioned. When a Fast Healthcare Interoperability Resources (FHIR®) request is made, Cosmos DB fetches your key and uses it to encrypt/decrypt the data. To get started, refer to the following links: @@ -147,3 +147,5 @@ In this article, you learned how to configure customer-managed keys at rest usin >[!div class="nextstepaction"] >[Cosmos DB: how to setup CMK](../../cosmos-db/how-to-setup-cmk.md#frequently-asked-questions) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md b/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md index 6b912afbf2ea2..585f08c258ee5 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md +++ b/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md @@ -6,9 +6,9 @@ ms.service: healthcare-apis ms.subservice: fhir ms.topic: tutorial ms.reviewer: matjazl -ms.author: cavoeg -author: modillon -ms.date: 02/15/2022 +ms.author: mikaelw +author: mikaelweave +ms.date: 06/03/2022 --- # Tutorial for Da Vinci Drug Formulary for Azure API for FHIR @@ -18,8 +18,7 @@ In this tutorial, we'll walk through setting up Azure API for FHIR to pass the [ ## Touchstone capability statement The first test that we'll focus on is testing Azure API for FHIR against the [Da Vinci Drug Formulary capability -statement](https://touchstone.aegis.net/touchstone/testdefinitions?selectedTestGrp=/FHIRSandbox/DaVinci/FHIR4-0-1-Test/PDEX/Formulary/00-Capability&activeOnly=false&contentEntry=TEST_SCRIPTS). If you run this test without any updates, the test will fail due to -missing search parameters and missing profiles. +statement](https://touchstone.aegis.net/touchstone/testdefinitions?selectedTestGrp=/FHIRSandbox/DaVinci/FHIR4-0-1-Test/PDEX/Formulary/00-Capability&activeOnly=false&contentEntry=TEST_SCRIPTS). If you run this test without any updates, the test will fail due to missing search parameters and missing profiles. ### Define search parameters @@ -56,4 +55,6 @@ The second test is the [query capabilities](https://touchstone.aegis.net/touchst In this tutorial, we walked through how to pass the Da Vinci Payer Data Exchange US Drug Formulary in Touchstone. Next, you can learn how to test the Da Vinci PDex Implementation Guide in Touchstone. >[!div class="nextstepaction"] ->[Da Vinci PDex](davinci-pdex-tutorial.md) \ No newline at end of file +>[Da Vinci PDex](davinci-pdex-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/davinci-pdex-tutorial.md b/articles/healthcare-apis/azure-api-for-fhir/davinci-pdex-tutorial.md index fffad6f7b0833..f94b1a3d9b917 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/davinci-pdex-tutorial.md +++ b/articles/healthcare-apis/azure-api-for-fhir/davinci-pdex-tutorial.md @@ -7,7 +7,7 @@ ms.subservice: fhir ms.topic: tutorial ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/03/2022 --- # Da Vinci PDex for Azure API for FHIR @@ -54,4 +54,6 @@ The final test we'll walk through is testing patient-everything. For this test, In this tutorial, we walked through how to pass the Payer Exchange tests in Touchstone. Next, you can learn how to test the Da Vinci PDEX Payer Network (Plan-Net) Implementation Guide. >[!div class="nextstepaction"] ->[Da Vinci Plan Net](davinci-plan-net.md) +>[Da Vinci Plan Net](davinci-plan-net.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/davinci-plan-net.md b/articles/healthcare-apis/azure-api-for-fhir/davinci-plan-net.md index 1316fda907e7f..280076d7d8dcd 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/davinci-plan-net.md +++ b/articles/healthcare-apis/azure-api-for-fhir/davinci-plan-net.md @@ -7,8 +7,8 @@ ms.subservice: fhir ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw -author: modillon -ms.date: 02/15/2022 +author: mikaelweave +ms.date: 06/03/2022 --- # Da Vinci Plan Net for Azure API for FHIR @@ -78,3 +78,5 @@ In this tutorial, we walked through setting up Azure API for FHIR to pass the To >[!div class="nextstepaction"] >[Supported features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/de-identified-export.md b/articles/healthcare-apis/azure-api-for-fhir/de-identified-export.md index 8e2d4a780f7df..72770fe4c4b78 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/de-identified-export.md +++ b/articles/healthcare-apis/azure-api-for-fhir/de-identified-export.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/28/2022 +ms.date: 06/03/2022 ms.author: ginle --- # Exporting de-identified data for Azure API for FHIR @@ -33,4 +33,6 @@ The $export command can also be used to export de-identified data from the FHIR In this article, you've learned how to set up and use de-identified export. Next, to learn how to export FHIR data using $export for Azure API for FHIR, see >[!div class="nextstepaction"] ->[Export data](export-data.md) \ No newline at end of file +>[Export data](export-data.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/device-data-through-iot-hub.md b/articles/healthcare-apis/azure-api-for-fhir/device-data-through-iot-hub.md index 6de87534e4a7a..c478ecc2d3cc0 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/device-data-through-iot-hub.md +++ b/articles/healthcare-apis/azure-api-for-fhir/device-data-through-iot-hub.md @@ -6,12 +6,17 @@ author: ms-puneet-nagpal ms.service: healthcare-apis ms.subservice: iomt ms.topic: tutorial -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: rabhaiya --- # Receive device data through Azure IoT Hub +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)* provides you the capability to ingest data from Internet of Medical Things (IoMT) devices into Azure API for FHIR. The [Deploy Azure IoT Connector for FHIR (preview) using Azure portal](iot-fhir-portal-quickstart.md) quickstart showed an example of device managed by Azure IoT Central [sending telemetry](iot-fhir-portal-quickstart.md#connect-your-devices-to-iot) to Azure IoT Connector for FHIR. Azure IoT Connector for FHIR can also work with devices provisioned and managed through Azure IoT Hub. This tutorial provides the procedure to connect and route device data from Azure IoT Hub to Azure IoT Connector for FHIR. ## Prerequisites @@ -108,4 +113,4 @@ Learn how to configure IoT Connector using device and FHIR mapping templates. >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR mapping templates](iot-mapping-templates.md) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. \ No newline at end of file +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/disaster-recovery.md b/articles/healthcare-apis/azure-api-for-fhir/disaster-recovery.md index e40b3265b081f..bca41f26801b9 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/disaster-recovery.md +++ b/articles/healthcare-apis/azure-api-for-fhir/disaster-recovery.md @@ -5,13 +5,13 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: how-to -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # Disaster recovery for Azure API for FHIR -The Azure API for FHIR® is a fully managed service, based on Fast Healthcare Interoperability Resources (FHIR®). To meet business and compliance requirements you can use the disaster recovery (DR) feature for Azure API for FHIR. +Azure API for FHIR is a fully managed service, based on Fast Healthcare Interoperability Resources (FHIR®). To meet business and compliance requirements you can use the disaster recovery (DR) feature for Azure API for FHIR. The DR feature provides a Recovery Point Objective (RPO) of 15 minutes and a Recovery Time Objective (RTO) of 60 minutes. @@ -29,7 +29,7 @@ The DR process involves the following steps: ### Data replication in the secondary region -By default, the Azure API for FHIR offers data protection through backup and restore. When the disaster recovery feature is enabled, data replication begins. A data replica is automatically created and synchronized in the secondary Azure region. The initial data replication can take a few minutes to a few hours, or longer, depending on the amount of data. The secondary data replica is a replication of the primary data. It's used directly to recover the service, and it helps speed up the recovery process. +By default, Azure API for FHIR offers data protection through backup and restore. When the disaster recovery feature is enabled, data replication begins. A data replica is automatically created and synchronized in the secondary Azure region. The initial data replication can take a few minutes to a few hours, or longer, depending on the amount of data. The secondary data replica is a replication of the primary data. It's used directly to recover the service, and it helps speed up the recovery process. It's worth noting that the throughput RU/s must have the same values in the primary and secondary regions. @@ -85,7 +85,7 @@ The private link feature should continue to work during a regional outage and af ### CMK -Your access to the Azure API for FHIR will be maintained if the key vault hosting the managed key in your subscription is accessible. There's a possible temporary downtime as Key Vault can take up to 20 minutes to re-establish its connection. For more information, see [Azure Key Vault availability and redundancy](../../key-vault/general/disaster-recovery-guidance.md). +Your access to Azure API for FHIR will be maintained if the key vault hosting the managed key in your subscription is accessible. There's a possible temporary downtime as Key Vault can take up to 20 minutes to re-establish its connection. For more information, see [Azure Key Vault availability and redundancy](../../key-vault/general/disaster-recovery-guidance.md). ### $export @@ -134,7 +134,9 @@ The disaster recovery feature incurs extra costs because data of the compute and ## Next steps -In this article, you've learned how DR for Azure API for FHIR works and how to enable it. To learn about Azure API for FHIR's other supported features, see: +In this article, you've learned how DR for Azure API for FHIR works and how to enable it. To learn about Azure API for FHIR's other supported features, see >[!div class="nextstepaction"] >[FHIR supported features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/enable-diagnostic-logging.md b/articles/healthcare-apis/azure-api-for-fhir/enable-diagnostic-logging.md index 81396fe08cf3b..82b046d4b1a90 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/enable-diagnostic-logging.md +++ b/articles/healthcare-apis/azure-api-for-fhir/enable-diagnostic-logging.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/03/2022 --- # Enable Diagnostic Logging in Azure API for FHIR @@ -124,4 +124,6 @@ In this article, you learned how to enable Audit Logs for Azure API for FHIR. Fo >[Configure CORS](configure-cross-origin-resource-sharing.md) >[!div class="nextstepaction"] ->[Configure Private Link](configure-private-link.md) \ No newline at end of file +>[Configure Private Link](configure-private-link.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/export-data.md b/articles/healthcare-apis/azure-api-for-fhir/export-data.md index 5db22338821f1..eff5211f060de 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/export-data.md +++ b/articles/healthcare-apis/azure-api-for-fhir/export-data.md @@ -5,7 +5,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -143,4 +143,6 @@ address range in CIDR format is used instead, 100.64.0.0/10. The reason why the In this article, you've learned how to export FHIR resources using $export command. Next, to learn how to export de-identified data, see >[!div class="nextstepaction"] ->[Export de-identified data](de-identified-export.md) \ No newline at end of file +>[Export de-identified data](de-identified-export.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-app-registration.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-app-registration.md index 91d9a26f8c8bf..2b54bfb65ec50 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-app-registration.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-app-registration.md @@ -8,7 +8,7 @@ ms.topic: overview ms.reviewer: dseven ms.author: mikaelw author: matjazl -ms.date: 02/15/2022 +ms.date: 06/03/2022 --- # Register the Azure Active Directory apps for Azure API for FHIR @@ -34,14 +34,16 @@ In order for an application to interact with Azure AD, it needs to be registered In this overview, you've gone through the types of application registrations you may need in order to work with a FHIR API. -Based on your setup, please see the how-to-guides to register your applications +Based on your setup, refer to the how-to-guides to register your applications: * [Register a resource application](register-resource-azure-ad-client-app.md) * [Register a confidential client application](register-confidential-azure-ad-client-app.md) * [Register a public client application](register-public-azure-ad-client-app.md) * [Register a service application](register-service-azure-ad-client-app.md) -Once you've registered your applications, you can deploy the Azure API for FHIR. +After you've registered your applications, you can deploy Azure API for FHIR. >[!div class="nextstepaction"] ->[Deploy Azure API for FHIR](fhir-paas-powershell-quickstart.md) \ No newline at end of file +>[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-features-supported.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-features-supported.md index 643de05c1439c..d86d1574b560e 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-features-supported.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-features-supported.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/05/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -90,3 +90,5 @@ In this article, you've read about the supported FHIR features in Azure API for >[!div class="nextstepaction"] >[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-github-projects.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-github-projects.md index 0c43e33c6ced0..2004b40a286ed 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-github-projects.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-github-projects.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/28/2022 +ms.date: 06/03/2022 ms.author: ginle --- @@ -58,4 +58,6 @@ We have many open-source projects on GitHub that provide you the source code and In this article, you've learned about the related GitHub Projects for Azure API for FHIR that provide source code and instructions to let you experiment and deploy services for various uses. For more information about Azure API for FHIR, see >[!div class="nextstepaction"] ->[What is Azure API for FHIR?](overview.md) \ No newline at end of file +>[What is Azure API for FHIR?](overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-cli-quickstart.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-cli-quickstart.md index 7fcae48b4ea99..0e2aa1c271a5d 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-cli-quickstart.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-cli-quickstart.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: mikaelw ms.custom: devx-track-azurecli, mode-api --- @@ -85,4 +85,6 @@ In this quickstart guide, you've deployed the Azure API for FHIR into your subsc >[Configure CORS](configure-cross-origin-resource-sharing.md) >[!div class="nextstepaction"] ->[Configure Private Link](configure-private-link.md) \ No newline at end of file +>[Configure Private Link](configure-private-link.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-portal-quickstart.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-portal-quickstart.md index e907821fecd2d..223e2a4b4a5cb 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-portal-quickstart.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-portal-quickstart.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 03/21/2022 +ms.date: 06/03/2022 ms.author: mikaelw ms.custom: mode-api --- @@ -85,4 +85,6 @@ In this quickstart guide, you've deployed the Azure API for FHIR into your subsc >[Configure CORS](configure-cross-origin-resource-sharing.md) >[!div class="nextstepaction"] ->[Configure Private Link](configure-private-link.md) \ No newline at end of file +>[Configure Private Link](configure-private-link.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-powershell-quickstart.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-powershell-quickstart.md index 342a8061cb801..c70f589e754d3 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-powershell-quickstart.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-paas-powershell-quickstart.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw ms.custom: devx-track-azurepowershell --- @@ -88,4 +88,6 @@ In this quickstart guide, you've deployed the Azure API for FHIR into your subsc >[Configure CORS](configure-cross-origin-resource-sharing.md) >[!div class="nextstepaction"] ->[Configure Private Link](configure-private-link.md) \ No newline at end of file +>[Configure Private Link](configure-private-link.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/fhir-rest-api-capabilities.md b/articles/healthcare-apis/azure-api-for-fhir/fhir-rest-api-capabilities.md index 5756639d67196..874a4a61a7e73 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/fhir-rest-api-capabilities.md +++ b/articles/healthcare-apis/azure-api-for-fhir/fhir-rest-api-capabilities.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- diff --git a/articles/healthcare-apis/azure-api-for-fhir/find-identity-object-ids.md b/articles/healthcare-apis/azure-api-for-fhir/find-identity-object-ids.md index 5f2fc296ce0b6..c3ffa5e43b859 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/find-identity-object-ids.md +++ b/articles/healthcare-apis/azure-api-for-fhir/find-identity-object-ids.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -69,3 +69,5 @@ In this article, you've learned how to find identity object IDs needed to config >[!div class="nextstepaction"] >[Configure local RBAC settings](configure-local-rbac.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/get-healthcare-apis-access-token-cli.md b/articles/healthcare-apis/azure-api-for-fhir/get-healthcare-apis-access-token-cli.md index bdd9e16076848..38a7a8c2500af 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/get-healthcare-apis-access-token-cli.md +++ b/articles/healthcare-apis/azure-api-for-fhir/get-healthcare-apis-access-token-cli.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -36,3 +36,5 @@ In this article, you've learned how to obtain an access token for the Azure API >[!div class="nextstepaction"] >[Access the FHIR service using Postman](./../fhir/use-postman.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md b/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md index 5be1a4d4697d6..1cb6dffb5f1e4 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md +++ b/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/17/2022 +ms.date: 06/03/2022 ms.author: ranku --- @@ -34,7 +34,7 @@ Refer to the steps in the [Quickstart guide](fhir-paas-portal-quickstart.md) for ## Accessing Azure API for FHIR -When you're working with healthcare data, it's important to ensure that the data is secure, and it can't be accessed by unauthorized users or applications. FHIR servers use [OAuth 2.0](https://oauth.net/2/) to ensure this data security. Azure API for FHIR is secured using [Azure Active Directory (Azure AD)](https://docs.microsoft.com/azure/active-directory/), which is an example of an OAuth 2.0 identity provider. [Azure AD identity configuration for Azure API for FHIR](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md) provides an overview of FHIR server authorization, and the steps needed to obtain a token to access a FHIR server. While these steps apply to any FHIR server and any identity provider, this article will walk you through Azure API for FHIR as the FHIR server and Azure AD as our identity provider. For more information about accessing Azure API for FHIR, see [Access control overview](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md#access-control-overview). +When you're working with healthcare data, it's important to ensure that the data is secure, and it can't be accessed by unauthorized users or applications. FHIR servers use [OAuth 2.0](https://oauth.net/2/) to ensure this data security. Azure API for FHIR is secured using [Azure Active Directory (Azure AD)](../../active-directory/index.yml), which is an example of an OAuth 2.0 identity provider. [Azure AD identity configuration for Azure API for FHIR](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md) provides an overview of FHIR server authorization, and the steps needed to obtain a token to access a FHIR server. While these steps apply to any FHIR server and any identity provider, this article will walk you through Azure API for FHIR as the FHIR server and Azure AD as our identity provider. For more information about accessing Azure API for FHIR, see [Access control overview](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md#access-control-overview). ### Access token validation @@ -51,7 +51,7 @@ For more information about the two kinds of application registrations, see [Regi ## Configure Azure RBAC for FHIR -The article [Configure Azure RBAC for FHIR](configure-azure-rbac.md), describes how to use [Azure role-based access control (Azure RBAC)](https://docs.microsoft.com/azure/role-based-access-control/) to assign access to the Azure API for FHIR data plane. Azure RBAC is the preferred method for assigning data plane access when data plane users are managed in the Azure AD tenant associated with your Azure subscription. If you're using an external Azure AD tenant, refer to the [local RBAC assignment reference](configure-local-rbac.md). +The article [Configure Azure RBAC for FHIR](configure-azure-rbac.md), describes how to use [Azure role-based access control (Azure RBAC)](../../role-based-access-control/index.yml) to assign access to the Azure API for FHIR data plane. Azure RBAC is the preferred method for assigning data plane access when data plane users are managed in the Azure AD tenant associated with your Azure subscription. If you're using an external Azure AD tenant, refer to the [local RBAC assignment reference](configure-local-rbac.md). ## Next steps @@ -63,5 +63,4 @@ This article described the basic steps to get started using Azure API for FHIR. >[!div class="nextstepaction"] >[Frequently asked questions about Azure API for FHIR](fhir-faq.yml) - - +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/how-to-do-custom-search.md b/articles/healthcare-apis/azure-api-for-fhir/how-to-do-custom-search.md index 054e0e3de92e2..f06cbc63f4d25 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/how-to-do-custom-search.md +++ b/articles/healthcare-apis/azure-api-for-fhir/how-to-do-custom-search.md @@ -5,12 +5,12 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # Defining custom search parameters for Azure API for FHIR -The FHIR specification defines a set of search parameters for all resources and search parameters that are specific to a resource(s). However, there are scenarios where you might want to search against an element in a resource that isn’t defined by the FHIR specification as a standard search parameter. This article describes how you can define your own [search parameters](https://www.hl7.org/fhir/searchparameter.html) to be used in the Azure API for FHIR. +The Fast Healthcare Interoperability Resources (FHIR®) specification defines a set of search parameters for all resources and search parameters that are specific to a resource(s). However, there are scenarios where you might want to search against an element in a resource that isn’t defined by the FHIR specification as a standard search parameter. This article describes how you can define your own [search parameters](https://www.hl7.org/fhir/searchparameter.html) to be used in the Azure API for FHIR. > [!NOTE] > Each time you create, update, or delete a search parameter you’ll need to run a [reindex job](how-to-run-a-reindex.md) to enable the search parameter to be used in production. Below we will outline how you can test search parameters before reindexing the entire FHIR server. @@ -215,3 +215,5 @@ In this article, you’ve learned how to create a search parameter. Next you can >[!div class="nextstepaction"] >[How to run a reindex job](how-to-run-a-reindex.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/how-to-run-a-reindex.md b/articles/healthcare-apis/azure-api-for-fhir/how-to-run-a-reindex.md index 242236c2b0a76..4b6595b28d6a4 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/how-to-run-a-reindex.md +++ b/articles/healthcare-apis/azure-api-for-fhir/how-to-run-a-reindex.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # Running a reindex job in Azure API for FHIR @@ -221,3 +221,5 @@ In this article, you’ve learned how to start a reindex job. To learn how to de >[!div class="nextstepaction"] >[Defining custom search parameters](how-to-do-custom-search.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-azure-resource-manager-template-quickstart.md b/articles/healthcare-apis/azure-api-for-fhir/iot-azure-resource-manager-template-quickstart.md index 67c036e75b054..6619603b071c2 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-azure-resource-manager-template-quickstart.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-azure-resource-manager-template-quickstart.md @@ -6,12 +6,17 @@ ms.service: healthcare-apis ms.subservice: iomt ms.topic: quickstart ms.author: rabhaiya -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.custom: devx-track-azurepowershell, mode-arm --- # Quickstart: Use an Azure Resource Manager (ARM) template to deploy Azure IoT Connector for FHIR (preview) +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + In this quickstart, you'll learn how to use an Azure Resource Manager template (ARM template) to deploy Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)*, a feature of Azure API for FHIR. To deploy a working instance of Azure IoT Connector for FHIR, this template also deploys a parent Azure API for FHIR service and an Azure IoT Central application that exports telemetry from a device simulator to Azure IoT Connector for FHIR. You can execute ARM template to deploy Azure IoT Connector for FHIR through the Azure portal, PowerShell, or CLI. [!INCLUDE [About Azure Resource Manager](../../../includes/resource-manager-quickstart-introduction.md)] @@ -311,4 +316,4 @@ Learn how to configure IoT Connector using device and FHIR mapping templates. >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR mapping templates](iot-mapping-templates.md) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-data-flow.md b/articles/healthcare-apis/azure-api-for-fhir/iot-data-flow.md index 52d112e585820..413e9f52e677d 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-data-flow.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-data-flow.md @@ -6,12 +6,17 @@ author: ms-puneet-nagpal ms.service: healthcare-apis ms.subservice: iomt ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: rabhaiya --- # Azure IoT Connector for FHIR (preview) data flow +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + This article provides an overview of data flow in Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)*. You'll learn about different data processing stages within Azure IoT Connector for FHIR that transform device data into FHIR-based [Observation](https://www.hl7.org/fhir/observation.html) resources. ![Azure IoT Connector for FHIR data flow](media/concepts-iot-data-flow/iot-connector-data-flow.png) @@ -59,4 +64,4 @@ For more information about how to create device and FHIR mapping templates, see >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR mapping templates](iot-mapping-templates.md) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. \ No newline at end of file +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-fhir-portal-quickstart.md b/articles/healthcare-apis/azure-api-for-fhir/iot-fhir-portal-quickstart.md index 2ca5c72c172d0..0644d422c34fc 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-fhir-portal-quickstart.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-fhir-portal-quickstart.md @@ -6,13 +6,18 @@ author: msjasteppe ms.service: healthcare-apis ms.subservice: iomt ms.topic: quickstart -ms.date: 04/11/2022 +ms.date: 06/03/2022 ms.author: rabhaiya ms.custom: mode-ui --- # Quickstart: Deploy Azure IoT Connector for FHIR (preview) using Azure portal +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)* is an optional feature of Azure API for FHIR that provides the capability to ingest data from Internet of Medical Things (IoMT) devices. During the preview phase, Azure IoT Connector for FHIR feature is being available for free. In this quickstart, you'll learn how to: - Deploy and configure Azure IoT Connector for FHIR using the Azure portal - Use a simulated device to send data to Azure IoT Connector for FHIR @@ -214,4 +219,4 @@ Learn how to configure IoT Connector using device and FHIR mapping templates. >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR mapping templates](iot-mapping-templates.md) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-mapping-templates.md b/articles/healthcare-apis/azure-api-for-fhir/iot-mapping-templates.md index 98fd040b95928..de9421ff1a7bd 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-mapping-templates.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-mapping-templates.md @@ -6,11 +6,17 @@ author: ms-puneet-nagpal ms.service: healthcare-apis ms.subservice: iomt ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: rabhaiya --- # Azure IoT Connector for FHIR (preview) mapping templates + +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + This article details how to configure Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)* using mapping templates. The Azure IoT Connector for FHIR requires two types of JSON-based mapping templates. The first type, **Device mapping**, is responsible for mapping the device payloads sent to the `devicedata` Azure Event Hub end point. It extracts types, device identifiers, measurement date time, and the measurement value(s). The second type, **FHIR mapping**, controls the mapping for FHIR resource. It allows configuration of the length of the observation period, FHIR data type used to store the values, and terminology code(s). @@ -659,4 +665,4 @@ Check out frequently asked questions on Azure IoT Connector for FHIR (preview). >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR FAQs](fhir-faq.yml) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. \ No newline at end of file +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-diagnostics-export.md b/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-diagnostics-export.md index 5abed812729de..b6e8f4e64650c 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-diagnostics-export.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-diagnostics-export.md @@ -6,12 +6,17 @@ author: msjasteppe ms.service: healthcare-apis ms.subservice: iomt ms.topic: how-to -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: jasteppe --- # Export IoT connector for FHIR (preview) Metrics through Diagnostic settings +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + In this article, you'll learn how to export Azure IoT connector for Fast Healthcare Interoperability Resources (FHIR®) Metrics logs. The feature that enables Metrics logging is the [**Diagnostic settings**](../../azure-monitor/essentials/diagnostic-settings.md) in the Azure portal. > [!TIP] @@ -58,5 +63,5 @@ For more information about the frequently asked questions of Azure IoT connector >[!div class="nextstepaction"] >[Frequently asked questions about IoT connector](../../healthcare-apis/iot/iot-connector-faqs.md) -*In the Azure portal, Azure IoT connector for FHIR is referred to as IoT connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. +*In the Azure portal, Azure IoT connector for FHIR is referred to as IoT connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-display.md b/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-display.md index 7675afb529bd6..f30c15f2d59ce 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-display.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-metrics-display.md @@ -6,11 +6,16 @@ author: msjasteppe ms.service: healthcare-apis ms.subservice: iomt ms.topic: how-to -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: jasteppe --- -# Display and configure IoT Connector for FHIR (preview) metrics +# Display and configure IoT Connector for FHIR (preview) metrics + +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). In this article, you'll learn how to display and configure Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)* metrics. @@ -73,4 +78,4 @@ Get answers to frequently asked questions about Azure IoT Connector for FHIR. >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR FAQ](fhir-faq.yml) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/iot-troubleshoot-guide.md b/articles/healthcare-apis/azure-api-for-fhir/iot-troubleshoot-guide.md index a299cc8237a60..604420883b4e5 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/iot-troubleshoot-guide.md +++ b/articles/healthcare-apis/azure-api-for-fhir/iot-troubleshoot-guide.md @@ -6,11 +6,18 @@ author: msjasteppe ms.service: healthcare-apis ms.subservice: iomt ms.topic: troubleshooting -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: jasteppe --- + # IoT Connector for FHIR (preview) troubleshooting guide +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + + This article provides steps for troubleshooting common Azure IoT Connector for Fast Healthcare Interoperability Resources (FHIR®)* error messages and conditions. You'll also learn how to create copies of the Azure IoT Connector for FHIR conversion mappings JSON (for example: Device and FHIR). @@ -185,4 +192,4 @@ Check out frequently asked questions about the Azure IoT Connector for FHIR. >[!div class="nextstepaction"] >[Azure IoT Connector for FHIR FAQs](fhir-faq.yml) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. \ No newline at end of file +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/move-fhir-service.md b/articles/healthcare-apis/azure-api-for-fhir/move-fhir-service.md index f04a95295bfd9..5218652940fb9 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/move-fhir-service.md +++ b/articles/healthcare-apis/azure-api-for-fhir/move-fhir-service.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: ranku --- @@ -56,3 +56,5 @@ In this article, you've learned how to move the Azure API for FHIR instance. For >[!div class="nextstepaction"] >[Supported FHIR features](fhir-features-supported.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/overview-of-search.md b/articles/healthcare-apis/azure-api-for-fhir/overview-of-search.md index 285738bbcd561..92c6c2d36fb32 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/overview-of-search.md +++ b/articles/healthcare-apis/azure-api-for-fhir/overview-of-search.md @@ -5,12 +5,12 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # Overview of search in Azure API for FHIR -The FHIR specification defines the fundamentals of search for FHIR resources. This article will guide you through some key aspects to searching resources in FHIR. For complete details about searching FHIR resources, refer to [Search](https://www.hl7.org/fhir/search.html) in the HL7 FHIR Specification. Throughout this article, we'll give examples of search syntax. Each search will be against your FHIR server, which typically has a URL of `https://.azurewebsites.net`. In the examples, we'll use the placeholder {{FHIR_URL}} for this URL. +The Fast Healthcare Interoperability Resources (FHIR®) specification defines the fundamentals of search for FHIR resources. This article will guide you through some key aspects to searching resources in FHIR. For complete details about searching FHIR resources, refer to [Search](https://www.hl7.org/fhir/search.html) in the HL7 FHIR Specification. Throughout this article, we'll give examples of search syntax. Each search will be against your FHIR server, which typically has a URL of `https://.azurewebsites.net`. In the examples, we'll use the placeholder {{FHIR_URL}} for this URL. FHIR searches can be against a specific resource type, a specified [compartment](https://www.hl7.org/fhir/compartmentdefinition.html), or all resources. The simplest way to execute a search in FHIR is to use a `GET` request. For example, if you want to pull all patients in the database, you could use the following request: @@ -165,3 +165,5 @@ Now that you've learned about the basics of search, see the search samples page >[!div class="nextstepaction"] >[FHIR search examples](search-samples.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/overview.md b/articles/healthcare-apis/azure-api-for-fhir/overview.md index 72b4dca4dd050..d813a68b8d5b1 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/overview.md +++ b/articles/healthcare-apis/azure-api-for-fhir/overview.md @@ -6,11 +6,11 @@ author: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: overview -ms.date: 03/21/2022 +ms.date: 06/03/2022 ms.author: chrupa --- -# What is Azure API for FHIR®? +# What is Azure API for FHIR? Azure API for FHIR enables rapid exchange of data through Fast Healthcare Interoperability Resources (FHIR®) APIs, backed by a managed Platform-as-a Service (PaaS) offering in the cloud. It makes it easier for anyone working with health data to ingest, manage, and persist Protected Health Information [PHI](https://www.hhs.gov/answers/hipaa/what-is-phi/index.html) in the cloud: @@ -79,6 +79,11 @@ For use cases that require extending or customizing the FHIR server, or requires ## Azure IoT Connector for FHIR (preview) +> [!IMPORTANT] +> As of September 2022, the IoT Connector feature within Azure API for FHIR will be retired and replaced with the [MedTech service](../../healthcare-apis/iot/deploy-iot-connector-in-azure.md) for enhanced service quality and functionality. +> +> All new users are directed to deploy and use the MedTech service feature within the Azure Health Data Services. For more information about the MedTech service, see [What is the MedTech service?](../../healthcare-apis/iot/iot-connector-overview.md). + Azure IoT Connector for (FHIR®)* is an optional feature of Azure API for FHIR that provides the capability to ingest data from Internet of Medical Things (IoMT) devices. Internet of Medical Things is a category of IoT devices that capture and exchange health & wellness data with other healthcare IT systems over network. Some examples of IoMT devices include fitness and clinical wearables, monitoring sensors, activity trackers, point of care kiosks, or even a smart pill. The Azure IoT Connector for FHIR feature enables you to quickly set up a service to ingest IoMT data into Azure API for FHIR in a scalable, secure, and compliant manner. Azure IoT Connector for FHIR can accept any JSON-based messages sent out by an IoMT device. This data is first transformed into appropriate FHIR-based [Observation](https://www.hl7.org/fhir/observation.html) resources and then persisted into Azure API for FHIR. The data transformation logic is defined through a pair of mapping templates that you configure based on your message schema and FHIR requirements. Device data can be pushed directly to Azure IoT Connector for FHIR or seamlessly used in concert with other Azure IoT solutions ([Azure IoT Hub](../../iot-hub/index.yml) and [Azure IoT Central](../../iot-central/index.yml)). Azure IoT Connector for FHIR provides a secure data pipeline while allowing the Azure IoT solutions manage provisioning and maintenance of the physical devices. @@ -93,14 +98,14 @@ Use of IoMT devices is rapidly expanding in healthcare and Azure IoT Connector f ## Next Steps -To start working with the Azure API for FHIR, follow the 5-minute quickstart to deploy the Azure API for FHIR. +To start working with Azure API for FHIR, follow the 5-minute quickstart to deploy the Azure API for FHIR. >[!div class="nextstepaction"] >[Deploy Azure API for FHIR](fhir-paas-portal-quickstart.md) -To try out the Azure IoT Connector for FHIR feature, check out the quickstart to deploy Azure IoT Connector for FHIR using the Azure portal. +To try out the Azure IoT Connector for FHIR feature and check out the quickstart to deploy Azure IoT Connector for FHIR using the Azure portal. >[!div class="nextstepaction"] >[Deploy Azure IoT Connector for FHIR](iot-fhir-portal-quickstart.md) -*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR is a registered trademark of HL7 and is used with the permission of HL7. +*In the Azure portal, Azure IoT Connector for FHIR is referred to as IoT Connector (preview). FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/patient-everything.md b/articles/healthcare-apis/azure-api-for-fhir/patient-everything.md index e7dc15e14bea9..6adc99c1e66c3 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/patient-everything.md +++ b/articles/healthcare-apis/azure-api-for-fhir/patient-everything.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -124,3 +124,5 @@ Now that you know how to use the Patient-everything operation, you can learn abo >[!div class="nextstepaction"] >[Overview of search in Azure API for FHIR](overview-of-search.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/policy-reference.md b/articles/healthcare-apis/azure-api-for-fhir/policy-reference.md index 9a3bf3ff380f4..a16f0cb5b33f8 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/policy-reference.md +++ b/articles/healthcare-apis/azure-api-for-fhir/policy-reference.md @@ -1,7 +1,7 @@ --- title: Built-in policy definitions for Azure API for FHIR description: Lists Azure Policy built-in policy definitions for Azure API for FHIR. These built-in policy definitions provide common approaches to managing your Azure resources. -ms.date: 05/03/2022 +ms.date: 06/03/2022 author: mikaelweave ms.author: mikaelw ms.service: healthcare-apis @@ -28,3 +28,5 @@ the link in the **Version** column to view the source on the - See the built-ins on the [Azure Policy GitHub repo](https://github.com/Azure/azure-policy). - Review the [Azure Policy definition structure](../../governance/policy/concepts/definition-structure.md). - Review [Understanding policy effects](../../governance/policy/concepts/effects.md). +- +- FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/purge-history.md b/articles/healthcare-apis/azure-api-for-fhir/purge-history.md index 6fc5c7334c772..243936384a52a 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/purge-history.md +++ b/articles/healthcare-apis/azure-api-for-fhir/purge-history.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/05/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -41,4 +41,6 @@ In this article, you learned how to purge the history for resources in Azure API >[Supported FHIR features](fhir-features-supported.md) >[!div class="nextstepaction"] ->[FHIR REST API capabilities for Azure API for FHIR](fhir-rest-api-capabilities.md) \ No newline at end of file +>[FHIR REST API capabilities for Azure API for FHIR](fhir-rest-api-capabilities.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/register-confidential-azure-ad-client-app.md b/articles/healthcare-apis/azure-api-for-fhir/register-confidential-azure-ad-client-app.md index 6fead1e189269..f5587a44e7337 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/register-confidential-azure-ad-client-app.md +++ b/articles/healthcare-apis/azure-api-for-fhir/register-confidential-azure-ad-client-app.md @@ -5,7 +5,7 @@ author: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -68,3 +68,5 @@ In this article, you were guided through the steps of how to register a confiden >[!div class="nextstepaction"] >[Access the FHIR service using Postman](./../fhir/use-postman.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/register-public-azure-ad-client-app.md b/articles/healthcare-apis/azure-api-for-fhir/register-public-azure-ad-client-app.md index 920c5959d6756..1a0e2ed54b6ce 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/register-public-azure-ad-client-app.md +++ b/articles/healthcare-apis/azure-api-for-fhir/register-public-azure-ad-client-app.md @@ -5,7 +5,7 @@ author: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/21/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -61,7 +61,9 @@ If you configure your client application in a different Azure AD tenant from you ## Next steps -In this article, you've learned how to register a public client application in Azure Active Directory. Next, test access to your FHIR server using Postman. +In this article, you've learned how to register a public client application in Azure AD. Next, test access to your FHIR Server using Postman. >[!div class="nextstepaction"] >[Access the FHIR service using Postman](./../fhir/use-postman.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/register-resource-azure-ad-client-app.md b/articles/healthcare-apis/azure-api-for-fhir/register-resource-azure-ad-client-app.md index e75264cbbe9fa..92ed94b12d3a8 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/register-resource-azure-ad-client-app.md +++ b/articles/healthcare-apis/azure-api-for-fhir/register-resource-azure-ad-client-app.md @@ -6,20 +6,20 @@ author: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw ms.custom: devx-tr2ck-azurepowershell --- # Register a resource application in Azure Active Directory for Azure API for FHIR -In this article, you'll learn how to register a resource (or API) application in Azure Active Directory. A resource application is an Azure Active Directory representation of the FHIR server API itself and client applications can request access to the resource when authenticating. The resource application is also known as the *audience* in OAuth parlance. +In this article, you'll learn how to register a resource (or API) application in Azure Active Directory (Azure AD). A resource application is an Azure AD representation of the FHIR server API itself and client applications can request access to the resource when authenticating. The resource application is also known as the *audience* in OAuth parlance. ## Azure API for FHIR -If you're using the Azure API for FHIR, a resource application is automatically created when you deploy the service. As long as you're using the Azure API for FHIR in the same Azure Active Directory tenant as you're deploying your application, you can skip this how-to-guide and instead deploy your Azure API for FHIR to get started. +If you're using the Azure API for FHIR, a resource application is automatically created when you deploy the service. As long as you're using the Azure API for FHIR in the same Azure AD tenant as you're deploying your application, you can skip this how-to-guide and instead deploy your Azure API for FHIR to get started. -If you're using a different Azure Active Directory tenant (not associated with your subscription), you can import the Azure API for FHIR resource application into your tenant with +If you're using a different Azure AD tenant (not associated with your subscription), you can import the Azure API for FHIR resource application into your tenant with PowerShell: ```azurepowershell-interactive @@ -38,7 +38,9 @@ If you're using the open source FHIR Server for Azure, follow the steps on the [ ## Next steps -In this article, you've learned how to register a resource application in Azure Active Directory. Next, register your confidential client application. +In this article, you've learned how to register a resource application in Azure AD. Next, register your confidential client application. >[!div class="nextstepaction"] ->[Register Confidential Client Application](register-confidential-azure-ad-client-app.md) \ No newline at end of file +>[Register Confidential Client Application](register-confidential-azure-ad-client-app.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/register-service-azure-ad-client-app.md b/articles/healthcare-apis/azure-api-for-fhir/register-service-azure-ad-client-app.md index 486d5da02b5bb..56b502909fba7 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/register-service-azure-ad-client-app.md +++ b/articles/healthcare-apis/azure-api-for-fhir/register-service-azure-ad-client-app.md @@ -5,7 +5,7 @@ author: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/21/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -57,3 +57,5 @@ In this article, you've learned how to register a service client application in >[!div class="nextstepaction"] >[Access the FHIR service using Postman](./../fhir/use-postman.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/search-samples.md b/articles/healthcare-apis/azure-api-for-fhir/search-samples.md index 7637afb98dedb..f4e23705975e2 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/search-samples.md +++ b/articles/healthcare-apis/azure-api-for-fhir/search-samples.md @@ -5,13 +5,13 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # FHIR search examples for Azure API for FHIR -Below are some examples of using FHIR search operations, including search parameters and modifiers, chain and reverse chain search, composite search, viewing the next entry set for search results, and searching with a `POST` request. For more information about search, see [Overview of FHIR Search](overview-of-search.md). +Below are some examples of using Fast Healthcare Interoperability Resources (FHIR®) search operations, including search parameters and modifiers, chain and reverse chain search, composite search, viewing the next entry set for search results, and searching with a `POST` request. For more information about search, see [Overview of FHIR Search](overview-of-search.md). ## Search result parameters @@ -213,5 +213,9 @@ name=John ``` ## Next steps +In this article, you learned about how to search using different search parameters, modifiers, and FHIR search tools. For more information about FHIR Search, see + >[!div class="nextstepaction"] ->[Overview of FHIR Search](overview-of-search.md) \ No newline at end of file +>[Overview of FHIR Search](overview-of-search.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/security-controls-policy.md b/articles/healthcare-apis/azure-api-for-fhir/security-controls-policy.md index 743bf6a6ce641..1e9d980b49ffb 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/security-controls-policy.md +++ b/articles/healthcare-apis/azure-api-for-fhir/security-controls-policy.md @@ -1,7 +1,7 @@ --- title: Azure Policy Regulatory Compliance controls for Azure API for FHIR description: Lists Azure Policy Regulatory Compliance controls available for Azure API for FHIR. These built-in policy definitions provide common approaches to managing the compliance of your Azure resources. -ms.date: 05/10/2022 +ms.date: 06/03/2022 ms.topic: sample author: matjazl ms.author: chrupa @@ -24,5 +24,10 @@ compliant with the specific standard. ## Next steps +In this article, you learned about the Azure Policy Regulatory Compliance controls for Azure API for FHIR. For more information, see + - Learn more about [Azure Policy Regulatory Compliance](../../governance/policy/concepts/regulatory-compliance.md). - See the built-ins on the [Azure Policy GitHub repo](https://github.com/Azure/azure-policy). + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/store-profiles-in-fhir.md b/articles/healthcare-apis/azure-api-for-fhir/store-profiles-in-fhir.md index c039c9401c654..aaf4f56a0a3c2 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/store-profiles-in-fhir.md +++ b/articles/healthcare-apis/azure-api-for-fhir/store-profiles-in-fhir.md @@ -5,13 +5,13 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # Store profiles in Azure API for FHIR -HL7 FHIR defines a standard and interoperable way to store and exchange healthcare data. Even within the base FHIR specification, it can be helpful to define other rules or extensions based on the context that FHIR is being used. For such context-specific uses of FHIR, **FHIR profiles** are used for the extra layer of specifications. +HL7 Fast Healthcare Interoperability Resources (FHIR®) defines a standard and interoperable way to store and exchange healthcare data. Even within the base FHIR specification, it can be helpful to define other rules or extensions based on the context that FHIR is being used. For such context-specific uses of FHIR, **FHIR profiles** are used for the extra layer of specifications. [FHIR profile](https://www.hl7.org/fhir/profiling.html) allows you to narrow down and customize resource definitions using constraints and extensions. Azure API for FHIR allows validating resources against profiles to see if the resources conform to the profiles. This article guides you through the basics of FHIR profiles and how to store them. For more information about FHIR profiles outside of this article, visit [HL7.org](https://www.hl7.org/fhir/profiling.html). @@ -207,3 +207,5 @@ In this article, you've learned about FHIR profiles. Next, you'll learn how you >[!div class="nextstepaction"] >[Validate FHIR resources against profiles](validation-against-profiles.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/tutorial-member-match.md b/articles/healthcare-apis/azure-api-for-fhir/tutorial-member-match.md index 382f81a741be2..c5c619fe6988d 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/tutorial-member-match.md +++ b/articles/healthcare-apis/azure-api-for-fhir/tutorial-member-match.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/03/2022 --- # $member-match operation for Azure API for FHIR @@ -51,4 +51,6 @@ If the $member-match can't find a unique match, you'll receive a 422 response wi In this guide, you've learned about the $member-match operation. Next, you can learn about testing the Da Vinci Payer Data Exchange IG in Touchstone, which requires the $member-match operation. >[!div class="nextstepaction"] ->[DaVinci PDex](../fhir/davinci-pdex-tutorial.md) \ No newline at end of file +>[DaVinci PDex](../fhir/davinci-pdex-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-fhir-server.md b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-fhir-server.md index c89b48e0926eb..3139a183aea7f 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-fhir-server.md +++ b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-fhir-server.md @@ -8,19 +8,22 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.custom: devx-track-js --- # Deploy JavaScript app to read data from Azure API for FHIR + In this tutorial, you'll deploy a small JavaScript app, which reads data from a FHIR service. The steps in this tutorial are: + 1. Deploy a FHIR server 1. Register a public client application 1. Test access to the application 1. Create a web application that reads this FHIR data ## Prerequisites + Before starting this set of tutorials, you'll need the following items: 1. An Azure subscription 1. An Azure Active Directory tenant @@ -30,6 +33,7 @@ Before starting this set of tutorials, you'll need the following items: > For this tutorial, the FHIR service, Azure AD application, and Azure AD users are all in the same Azure AD tenant. If this is not the case, you can still follow along with this tutorial, but may need to dive into some of the referenced documents to do additional steps. ## Deploy Azure API for FHIR + The first step in the tutorial is to get your Azure API for FHIR setup correctly. 1. If you haven't already, deploy the [Azure API for FHIR](fhir-paas-portal-quickstart.md). @@ -40,7 +44,10 @@ The first step in the tutorial is to get your Azure API for FHIR setup correctly 1. Set the **Max age** to **600** ## Next Steps -Now that you have your Azure API for FHIR deployed, you're ready to register a public client application. + +Now that you have your Azure API for FHIR deployed, you're ready to register a public client application. For more information, see >[!div class="nextstepaction"] >[Register public client application](tutorial-web-app-public-app-reg.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-public-app-reg.md b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-public-app-reg.md index 9ddeb35e50aff..e854febac891f 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-public-app-reg.md +++ b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-public-app-reg.md @@ -8,10 +8,11 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 03/22/2022 +ms.date: 06/03/2022 --- # Client application registration for Azure API for FHIR + In the previous tutorial, you deployed and set up your Azure API for FHIR. Now that you have your Azure API for FHIR setup, we’ll register a public client application. You can read through the full [register a public client app](register-public-azure-ad-client-app.md) how-to guide for more details or troubleshooting, but we’ve called out the major steps for this tutorial in this article. 1. Navigate to Azure Active Directory @@ -52,7 +53,10 @@ Now that you have set up the correct authentication, set the API permissions: :::image type="content" source="media/tutorial-web-app/api-permissions.png" alt-text="Screenshot of the Add API permissions blade, with the steps to add API permissions highlighted."::: ## Next Steps + You now have a public client application. In the next tutorial, we’ll walk through testing and gaining access to this application through Postman. >[!div class="nextstepaction"] >[Test client application in Postman](tutorial-web-app-test-postman.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-test-postman.md b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-test-postman.md index 1350a0f4902fb..6f3401f8da8c6 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-test-postman.md +++ b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-test-postman.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/03/2022 --- # Testing the FHIR API on Azure API for FHIR @@ -16,6 +16,7 @@ ms.date: 02/15/2022 In the previous tutorial, you deployed the Azure API for FHIR and registered your client application. You're now ready to test your Azure API for FHIR. ## Retrieve capability statement + First we'll get the capability statement for your Azure API for FHIR. 1. Open Postman. 1. Retrieve the capability statement by doing `GET https://\.azurehealthcareapis.com/metadata`. In the image below the FHIR server name is **fhirserver**. @@ -25,6 +26,7 @@ First we'll get the capability statement for your Azure API for FHIR. Next we'll attempt to retrieve a patient. To retrieve a patient, enter `GET https://\.azurehealthcareapis.com/Patient`. You’ll receive a 401 Unauthorized error. This error is because you haven't proven that you should have access to patient data. ## Get patient from FHIR server + ![Failed Patient](media/tutorial-web-app/postman-patient-authorization-failed.png) In order to gain access, you need an access token. @@ -51,6 +53,7 @@ In order to gain access, you need an access token. ![Success Patient](media/tutorial-web-app/postman-patient-authorization-success.png) ## Post patient into FHIR server + Now you have access, you can create a new patient. Here's a sample of a simple patient you can add into your FHIR server. Enter this `json` into the **Body** section of Postman. ``` json @@ -86,16 +89,20 @@ If you do the GET command to retrieve a patient again, you'll see James Tiberiou > When sending requests to the Azure API for FHIR, you need to ensure that you've set the content-type header to `application/json` ## Troubleshooting access issues + If you ran into issues during any of these steps, review the documents we have put together on Azure Active Directory and the Azure API for FHIR. * [Azure AD and Azure API for FHIR](azure-active-directory-identity-configuration.md) - This document outlines some of the basic principles of Azure Active Directory and how it interacts with the Azure API for FHIR. * [Access token validation](azure-api-fhir-access-token-validation.md) - This how-to guide gives more specific details on access token validation and steps to take to resolve access issues. ## Next Steps + Now that you can successfully connect to your client application, you’re ready to write your web application. >[!div class="nextstepaction"] >[Write a web application](tutorial-web-app-write-web-app.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-write-web-app.md b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-write-web-app.md index 04033d71435b3..394eec20e1802 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-write-web-app.md +++ b/articles/healthcare-apis/azure-api-for-fhir/tutorial-web-app-write-web-app.md @@ -8,10 +8,11 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 02/15/2022 +ms.date: 06/03/2022 --- # Write Azure web application to read FHIR data in Azure API for FHIR + Now that you're able to connect to your FHIR server and POST data, you’re ready to write a web application that will read FHIR data. In this final step of the tutorial, we’ll walk through writing and accessing the web application. ## Create web application @@ -143,11 +144,14 @@ Included is the code that you can input into **index.html**. You’ll need to up From here, you can go back to your web application resource and open the URL found on the Overview page. Sign in to see the patient James Tiberious Kirk that you previously created. ## Next Steps + You’ve successfully deployed the Azure API for FHIR, registered a public client application, tested access, and created a small web application. Check out the Azure API for FHIR supported features as a next step. >[!div class="nextstepaction"] >[Supported Features](fhir-features-supported.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/use-custom-headers.md b/articles/healthcare-apis/azure-api-for-fhir/use-custom-headers.md index fa5172dd3de3a..5a870389c3c45 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/use-custom-headers.md +++ b/articles/healthcare-apis/azure-api-for-fhir/use-custom-headers.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 05/03/2022 +ms.date: 06/03/2022 --- # Add data to audit logs by using custom HTTP headers in Azure API for FHIR @@ -71,6 +71,7 @@ client.OnBeforeRequest += (object sender, BeforeRequestEventArgs e) => client.Get("Patient"); ``` ## Next steps + In this article, you learned how to add data to audit logs by using custom headers in the Azure API for FHIR. For information about Azure API for FHIR configuration settings, see >[!div class="nextstepaction"] @@ -89,4 +90,7 @@ In this article, you learned how to add data to audit logs by using custom heade >[Configure CORS](configure-cross-origin-resource-sharing.md) >[!div class="nextstepaction"] ->[Configure Private Link](configure-private-link.md) \ No newline at end of file +>[Configure Private Link](configure-private-link.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/azure-api-for-fhir/use-smart-on-fhir-proxy.md b/articles/healthcare-apis/azure-api-for-fhir/use-smart-on-fhir-proxy.md index 408bae96e9346..a449b0da16e8e 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/use-smart-on-fhir-proxy.md +++ b/articles/healthcare-apis/azure-api-for-fhir/use-smart-on-fhir-proxy.md @@ -7,16 +7,16 @@ ms.subservice: fhir ms.topic: tutorial ms.author: mikaelw author: mikaelweave -ms.date: 05/03/2022 +ms.date: 06/03/2022 --- # Tutorial: Azure Active Directory SMART on FHIR proxy -[SMART on FHIR](https://docs.smarthealthit.org/) is a set of open specifications to integrate partner applications with FHIR servers and electronic medical records systems that have FHIR interfaces. One of the main purposes of the specifications is to describe how an application should discover authentication endpoints for an FHIR server and start an authentication sequence. +[SMART on FHIR](https://docs.smarthealthit.org/) is a set of open specifications to integrate partner applications with FHIR servers and electronic medical records systems that have Fast Healthcare Interoperability Resources (FHIR®) interfaces. One of the main purposes of the specifications is to describe how an application should discover authentication endpoints for an FHIR server and start an authentication sequence. Authentication is based on OAuth2. But because SMART on FHIR uses parameter naming conventions that aren’t immediately compatible with Azure Active Directory (Azure AD), the Azure API for FHIR has a built-in Azure AD SMART on FHIR proxy that enables a subset of the SMART on FHIR launch sequences. Specifically, the proxy enables the [EHR launch sequence](https://hl7.org/fhir/smart-app-launch/#ehr-launch-sequence). -This tutorial describes how to use the proxy to enable SMART on FHIR applications with the Azure API for FHIR. +This tutorial describes how to use the proxy to enable SMART on FHIR applications with Azure API for FHIR. ## Prerequisites @@ -161,3 +161,5 @@ In this tutorial, you've configured the Azure Active Directory SMART on FHIR pro >[!div class="nextstepaction"] >[FHIR server samples](https://github.com/Microsoft/fhir-server-samples) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/azure-api-for-fhir/validation-against-profiles.md b/articles/healthcare-apis/azure-api-for-fhir/validation-against-profiles.md index d73bb6c59e086..e65384c6f88e7 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/validation-against-profiles.md +++ b/articles/healthcare-apis/azure-api-for-fhir/validation-against-profiles.md @@ -5,13 +5,13 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- # Validate FHIR resources against profiles in Azure API for FHIR -`$validate` is an operation in FHIR that allows you to ensure that a FHIR resource conforms to the base resource requirements or a specified profile. This is a valuable operation to ensure that the data in Azure API for FHIR has the expected attributes and values. +`$validate` is an operation in Fast Healthcare Interoperability Resources (FHIR®) that allows you to ensure that a FHIR resource conforms to the base resource requirements or a specified profile. This is a valuable operation to ensure that the data in Azure API for FHIR has the expected attributes and values. In the [store profiles in Azure API for FHIR](store-profiles-in-fhir.md) article, you walked through the basics of FHIR profiles and storing them. This article will guide you through how to use `$validate` for validating resources against profiles. For more information about FHIR profiles outside of this article, visit [HL7.org](https://www.hl7.org/fhir/profiling.html). @@ -142,3 +142,5 @@ In this article, you learned how to validate resources against profiles using `$ >[!div class="nextstepaction"] >[Azure API for FHIR supported features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/configure-azure-rbac-using-scripts.md b/articles/healthcare-apis/configure-azure-rbac-using-scripts.md index b431cc865eebd..a3f6439d405d8 100644 --- a/articles/healthcare-apis/configure-azure-rbac-using-scripts.md +++ b/articles/healthcare-apis/configure-azure-rbac-using-scripts.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -192,3 +192,5 @@ In this article, you learned how to grant permissions to client applications usi >[!div class="nextstepaction"] >[Access using REST Client](./fhir/using-rest-client.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/configure-azure-rbac.md b/articles/healthcare-apis/configure-azure-rbac.md index 79ea3d7decad1..34a028145b885 100644 --- a/articles/healthcare-apis/configure-azure-rbac.md +++ b/articles/healthcare-apis/configure-azure-rbac.md @@ -4,7 +4,7 @@ description: This article describes how to configure Azure RBAC role for FHIR. author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -68,3 +68,5 @@ In this article, you've learned how to assign Azure roles for the FHIR service a - [Access using Postman](./fhir/use-postman.md) - [Access using the REST Client](./fhir/using-rest-client.md) - [Access using cURL](./fhir/using-curl.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/deploy-healthcare-apis-using-bicep.md b/articles/healthcare-apis/deploy-healthcare-apis-using-bicep.md index 66a3cdb11cc1f..f081d96053f74 100644 --- a/articles/healthcare-apis/deploy-healthcare-apis-using-bicep.md +++ b/articles/healthcare-apis/deploy-healthcare-apis-using-bicep.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw ms.custom: mode-api --- @@ -282,3 +282,5 @@ In this article, you learned how to create Azure Health Data Services, including >[!div class="nextstepaction"] >[What is Azure Health Data Services](healthcare-apis-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/dicom/dicom-cast-access-request.md b/articles/healthcare-apis/dicom/dicom-cast-access-request.md index c21bf455fe211..4f05be5c68770 100644 --- a/articles/healthcare-apis/dicom/dicom-cast-access-request.md +++ b/articles/healthcare-apis/dicom/dicom-cast-access-request.md @@ -5,7 +5,7 @@ author: aersoy ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/22/2022 +ms.date: 06/03/2022 ms.author: aersoy --- @@ -63,3 +63,5 @@ For more information about DICOMcast, see >[!div class="nextstepaction"] >[DICOMcast overview](dicom-cast-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/dicom/dicom-cast-overview.md b/articles/healthcare-apis/dicom/dicom-cast-overview.md index 4400ad60511fd..cd2244938dc45 100644 --- a/articles/healthcare-apis/dicom/dicom-cast-overview.md +++ b/articles/healthcare-apis/dicom/dicom-cast-overview.md @@ -5,7 +5,7 @@ author: aersoy ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/22/2022 +ms.date: 06/03/2022 ms.author: aersoy --- @@ -101,4 +101,6 @@ To get started using the DICOM service, see >[Deploy DICOM service to Azure](deploy-dicom-services-in-azure.md) >[!div class="nextstepaction"] ->[Using DICOMweb™Standard APIs with DICOM service](dicomweb-standard-apis-with-dicom-services.md) \ No newline at end of file +>[Using DICOMweb™Standard APIs with DICOM service](dicomweb-standard-apis-with-dicom-services.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/dicom/dicom-services-overview.md b/articles/healthcare-apis/dicom/dicom-services-overview.md index ea784e89741e2..af4c0e255d983 100644 --- a/articles/healthcare-apis/dicom/dicom-services-overview.md +++ b/articles/healthcare-apis/dicom/dicom-services-overview.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/22/2022 +ms.date: 06/03/2022 ms.author: aersoy --- @@ -56,7 +56,7 @@ This conceptual article provided you with an overview of DICOM and the DICOM ser ## Next steps -To get started using the DICOM service, see: +To get started using the DICOM service, see >[!div class="nextstepaction"] >[Deploy DICOM service to Azure](deploy-dicom-services-in-azure.md) @@ -65,3 +65,5 @@ For more information about how to use the DICOMweb™ Standard APIs with th >[!div class="nextstepaction"] >[Using DICOMweb™Standard APIs with DICOM service](dicomweb-standard-apis-with-dicom-services.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/dicom/dicomweb-standard-apis-c-sharp.md b/articles/healthcare-apis/dicom/dicomweb-standard-apis-c-sharp.md index 3862c18dda4c3..9461507b6ac86 100644 --- a/articles/healthcare-apis/dicom/dicomweb-standard-apis-c-sharp.md +++ b/articles/healthcare-apis/dicom/dicomweb-standard-apis-c-sharp.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: tutorial -ms.date: 02/15/2022 +ms.date: 05/26/2022 ms.author: aersoy --- @@ -43,7 +43,7 @@ After you've deployed an instance of the DICOM service, retrieve the URL for you In your application, install the following NuGet packages: -* [DICOM Client](https://microsofthealthoss.visualstudio.com/FhirServer/_packaging?_a=package&feed=Public&package=Microsoft.Health.Dicom.Client&protocolType=NuGet) +* [DICOM Client](https://microsofthealthoss.visualstudio.com/FhirServer/_artifacts/feed/Public/NuGet/Microsoft.Health.Dicom.Client/) * [fo-dicom](https://www.nuget.org/packages/fo-dicom/) diff --git a/articles/healthcare-apis/dicom/get-started-with-dicom.md b/articles/healthcare-apis/dicom/get-started-with-dicom.md index ad3b1c302918d..62fc5701beabc 100644 --- a/articles/healthcare-apis/dicom/get-started-with-dicom.md +++ b/articles/healthcare-apis/dicom/get-started-with-dicom.md @@ -5,7 +5,7 @@ author: aersoy ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 06/03/2022 ms.author: aersoy ms.custom: mode-api --- @@ -77,3 +77,5 @@ This article described the basic steps to get started using the DICOM service. F >[!div class="nextstepaction"] >[Deploy DICOM service using the Azure portal](deploy-dicom-services-in-azure.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/dicom/media/dicom-add-api-permissions.png b/articles/healthcare-apis/dicom/media/dicom-add-api-permissions.png deleted file mode 100644 index 5c1090a29e800..0000000000000 Binary files a/articles/healthcare-apis/dicom/media/dicom-add-api-permissions.png and /dev/null differ diff --git a/articles/healthcare-apis/dicom/media/dicom-add-apis-permissions.png b/articles/healthcare-apis/dicom/media/dicom-add-apis-permissions.png new file mode 100644 index 0000000000000..41a974daa3933 Binary files /dev/null and b/articles/healthcare-apis/dicom/media/dicom-add-apis-permissions.png differ diff --git a/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png b/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png index 3a2d7531f4eb4..0adb86986d848 100644 Binary files a/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png and b/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png differ diff --git a/articles/healthcare-apis/dicom/media/dicom-select-scopes.png b/articles/healthcare-apis/dicom/media/dicom-select-scopes.png index 5bd0f9cdbb09a..1ce88f4c21b5d 100644 Binary files a/articles/healthcare-apis/dicom/media/dicom-select-scopes.png and b/articles/healthcare-apis/dicom/media/dicom-select-scopes.png differ diff --git a/articles/healthcare-apis/dicom/references-for-dicom-service.md b/articles/healthcare-apis/dicom/references-for-dicom-service.md index a5ceceee28a51..ae14431e8a993 100644 --- a/articles/healthcare-apis/dicom/references-for-dicom-service.md +++ b/articles/healthcare-apis/dicom/references-for-dicom-service.md @@ -5,7 +5,7 @@ author: aersoy ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/21/2022 +ms.date: 06/03/2022 ms.author: aersoy --- @@ -45,4 +45,6 @@ For more information about using the DICOM service, see For more information about DICOM cast, see >[!div class="nextstepaction"] ->[DICOM cast overview](dicom-cast-overview.md) \ No newline at end of file +>[DICOM cast overview](dicom-cast-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/events/events-consume-logic-apps.md b/articles/healthcare-apis/events/events-consume-logic-apps.md new file mode 100644 index 0000000000000..6f2e6007cc6ef --- /dev/null +++ b/articles/healthcare-apis/events/events-consume-logic-apps.md @@ -0,0 +1,347 @@ +--- +title: Consume events with Logic Apps - Azure Health Data Services +description: This article provides resources on how to consume events with Logic Apps. +services: healthcare-apis +author: msjasteppe +ms.service: healthcare-apis +ms.subservice: fhir +ms.topic: how-to +ms.date: 05/26/2022 +ms.author: v-smcevoy +--- + +# Consume events with Logic Apps + +This tutorial shows how to use Azure Logic Apps to process Azure Health Data Services FHIR events. Logic Apps create and run automated workflows to process event data from other applications. You will learn how to register a FHIR event with your Logic App, meet a specified event criteria, and perform a service operation. + +Here's an example of a Logic App workflow: + +:::image type="content" source="media/events-logic-apps/events-logic-example.png" alt-text="Screenshot showing an example of a Logic App workflow." lightbox="./media/events-logic-apps/events-logic-example.png"::: + +The workflow is on the left and the trigger condition is on the right. + +## Overview + +Follow these steps to create a Logic App workflow to consume FHIR events: + +1. Set up prerequisites +2. Create a Logic App +3. Create a Logic App workflow + +## Prerequisites + +Before you begin this tutorial, you need to have deployed a FHIR service and enabled events. For more information about deploying events, see [Deploy Events in the Azure portal](./events-deploy-portal.md). + +## Creating a Logic App + +To set up an automated workflow, you must first create a Logic App. For more information about Logic Apps, see [What is Azure Logic Apps?](./../../logic-apps/logic-apps-overview.md) + +### Specify your Logic App details + +Follow these steps: + +1. Go to the Azure portal. +2. Search for "Logic App". +3. Click "Add". +4. Specify Basic details. +5. Specify Hosting. +6. Specify Monitoring. +7. Specify Tags. +8. Review and create your Logic App. + +You now need to fill out the details of your Logic App. Specify information for these five categories. They are in separate tabs: + +:::image type="content" source="media/events-logic-apps/events-logic-tabs.png" alt-text="Screenshot of the five tabs for specifying your Logic App." lightbox="./media/events-logic-apps/events-logic-tabs.png"::: + +- Tab 1 - Basics +- Tab 2 - Hosting +- Tab 3 - Monitoring +- Tab 4 - Tags +- Tab 5 - Review + Create + +### Basics - Tab 1 + +Start by specifying the following basics: + +#### Project details + +- Subscription +- Resource Group + +Select a current subscription and specify an existing or new resource group. + +#### Instance details + +- Logic App name +- Publish type +- Region + +Create a name for your Logic App. You must choose Workflow or Docker Container as your publishing type. Select a region that is compatible with your plan. + +#### Plan + +- Plan type +- App Service Plan +- Sku and size + +Choose a plan type (Standard or Consumption). Create a new Windows Plan name and specify the Sku and size. + +#### Zone redundancy + +- Zone redundancy deployment + +Enabling your plan will make it zone redundant. + +### Hosting - Tab 2 + +Continue specifying your Logic App by clicking "Next: Hosting". + +#### Storage + +- Storage type +- Storage account + +Choose the type of storage you want to use and the storage account. You can use Azure Storage or add SQL functionality. You must also create a new storage account or use an existing one. + +### Monitoring - Tab 3 + +Continue specifying your Logic App by clicking "Next: Monitoring". + +#### Monitoring with Application Insights + +- Enable Application Insights +- Application Insights +- Region + +Enable Azure Monitor application insights to automatically monitor your application. If you enable insights, you must create a new insight and specify the region. + +### Tags - Tab 4 + +Continue specifying your Logic App by clicking "Next: Tags". + +#### Use tags to categorize resources + +Tags are name/value pairs that enable you to categorize resources and view consolidated billing by applying the same tag to multiple resources and resource groups. + +This example will not use tagging. + +### Review + create - Tab 5 + +Finish specifying your Logic App by clicking "Next: Review + create". + +#### Review your Logic App + +Your proposed Logic app will display the following details: + +- Subscription +- Resource Group +- Logic App Name +- Runtime stack +- Hosting +- Storage +- Plan +- Monitoring + +If you're satisfied with the proposed configuration, click "Create". If not, click "Previous" to go back and specify new details. + +First you'll see an alert telling you that deployment is initializing. Next you'll see a new page telling you that the deployment is in progress. + +:::image type="content" source="media/events-logic-apps/events-logic-progress.png" alt-text="Screenshot of the notification telling you your deployment is in progress." lightbox="./media/events-logic-apps/events-logic-progress.png"::: + +If there are no errors, you will finally see a notification telling you that your deployment is complete. + +:::image type="content" source="media/events-logic-apps/events-logic-complete.png" alt-text="Screenshot of the notification telling you your deployment is complete." lightbox="./media/events-logic-apps/events-logic-complete.png"::: + +#### Your Logic App dashboard + +Azure creates a dashboard when your Logic App is complete. The dashboard will show you the status of your app. You can return to your dashboard by clicking Overview in the Logic App menu. Here's a Logic App dashboard: + +:::image type="content" source="media/events-logic-apps/events-logic-overview.png" alt-text="Screenshot of your Logic Apps overview dashboard." lightbox="./media/events-logic-apps/events-logic-overview.png"::: + +You can do the following activities from your dashboard. + +- Browse +- Refresh +- Stop +- Restart +- Swap +- Get Publish Profile +- Reset Publish Profile +- Delete + +## Creating a Logic App workflow + +When your Logic App is running, follow these steps to create a Logic App workflow: + +1. Initialize a workflow +2. Configuring a workflow +3. Designing a workflow +4. Adding an action +5. Giving FHIR Reader access +6. Adding a condition +7. Choosing a condition criteria +8. Testing your condition + +### Initializing your workflow + +Before you begin, you'll need to have a Logic App configured and running correctly. + +Once your Logic App is running, you can create and configure a workflow. To initialize a workflow, follow these steps: + +1. Start at the Azure portal. +2. Click "Logic Apps" in Azure services. +3. Select the Logic App you created. +4. Click "Workflows" in the Workflow menu on the left. +5. Click "Add" to add a workflow. + +### Configuring a new workflow + +You will see a new panel on the right for creating a workflow. + +:::image type="content" source="media/events-logic-apps/events-logic-panel.png" alt-text="Screenshot of the panel for creating a workflow." lightbox="./media/events-logic-apps/events-logic-panel.png"::: + +You can specify the details of the new workflow in the panel on the right. + +#### Creating a new workflow for the Logic App + +To set up a new workflow, fill in these details: + +- Workflow Name +- State type + +Specify a new name for your workflow. Indicate whether you want the workflow to be stateful or stateless. Stateful is for business processes and stateless is for processing IoT events. + +When you have specified the details, click "Create" to begin designing your workflow. + +### Designing the workflow + +In your new workflow, click the name of the enabled workflow. + +You can write code to design a workflow for your application, but for this tutorial, choose the Designer option on the Developer menu. + +Next, click "Choose an operation" to display the "Add a Trigger" blade on the right. Then search for "Azure Event Grid" and click the "Azure" tab below. The Event Grid is not a Logic App Built-in. + +:::image type="content" source="media/events-logic-apps/events-logic-grid.png" alt-text="Screenshot of the search results for Azure Event Grid." lightbox="./media/events-logic-apps/events-logic-grid.png"::: + +When you see the "Azure Event Grid" icon, click on it to display the Triggers and Actions available from Event Grid. For more information about Event Grid, see [What is Azure Event Grid?](./../../event-grid/overview.md). + +Click "When a resource event occurs" to set up a trigger for the Azure Event Grid. + +To tell Event Grid how to respond to the trigger, you must specify parameters and add actions. + +#### Parameter settings + +You need to specify the parameters for the trigger: + +- Subscription +- Resource Type +- Resource Name +- Event type item(s) + +Fill in the details for subscription, resource type, and resource name. Then you must specify the event types you want to respond to. The event types used in this article are: + +- Resource created +- Resource deleted +- Resource updated + +For more information about event types, see [What FHIR resource events does Events support?](./events-faqs.md). + +### Adding an HTTP action + +Once you have specified the trigger events, you must add more details. Click the "+" below the "When a resource event occurs" button. + +You need to add a specific action. Click "Choose an operation" to continue. Then, for the operation, search for "HTTP" and click on "Built-in" to select an HTTP operation. The HTTP action will allow you to query the FHIR service. + +The options in this example are: + +- Method is "Get" +- URL is "concat('https://', triggerBody()?['subject'], '/_history/', triggerBody()?['dataVersion'])". +- Authentication type is "Managed Identity". +- Audience is "concat('https://', triggerBody()?['data']['resourceFhirAccount'])" + +### Allow FHIR Reader access to your Logic App + +At this point, you need to give the FHIR Reader access to your app, so it can verify that the event details are correct. Follow these steps to give it access: + +1. The first step is to go back to your Logic App and click the Identity menu item. + +2. In the System assigned tab, make sure the Status is "On". + +3. Click on Azure role assignments. Click "Add role assignment". + +4. Specify the following: + + - Scope = Subscription + - Subscription = your subscription + - Role = FHIR Data Reader. + +When you have specified the first four steps, add the role assignment by Managed identity, using Subscription, Managed identity (Logic App Standard), and select your Logic App by clicking the name and then clicking the Select button. Finally, click "Review + assign" to assign the role. + +### Add a condition + +After you have given FHIR Reader access to your app, go back to the Logic App workflow Designer. Then add a condition to determine whether the event is one you want to process. Click the "+" below HTTP to "Choose an operation". On the right, search for the word "condition". Click on "Built-in" to display the Control icon. Next click Actions and choose Condition. + +When the condition is ready, you can specify what actions happen if the condition is true or false. + +### Choosing a condition criteria + +In order to specify whether you want to take action for the specific event, begin specifying the criteria by clicking on "Condition" in the workflow on the left. You will then see a set of condition choices on the right. + +Under the "And" box, add these two conditions: + +- resourceType +- Event Type + +#### resourceType + +The expression for getting the resourceType is `body('HTTP')?['resourceType']`. + +#### Event Type + +You can select Event Type from the Dynamic Content. + +Here is an example of the Condition criteria: + +:::image type="content" source="media/events-logic-apps/events-logic-condition.png" alt-text="Screenshot of the condition criteria for your workflow." lightbox="./media/events-logic-apps/events-logic-condition.png"::: + +#### Save your workflow + +When you have entered the condition criteria, save your workflow. + +#### Workflow dashboard + +To check the status of your workflow, click Overview in the workflow menu. Here is a dashboard for a workflow: + +:::image type="content" source="media/events-logic-apps/events-logic-dashboard.png" alt-text="Screenshot of the Logic App workflow dashboard." lightbox="./media/events-logic-apps/events-logic-dashboard.png"::: + +You can do the following operations from your workflow dashboard: + +- Run trigger +- Refresh +- Enable +- Disable +- Delete + +### Condition testing + +Save your workflow by clicking the "Save" button. + +To test your new workflow, do the following steps: + +1. Add a new Patient FHIR Resource to your FHIR Service. +2. Wait a moment or two and then check the Overview webpage of your Logic App workflow. +3. The event should be shaded in green if the action was successful. +4. If it failed, the event will be shaded in red. + +Here is an example of a workflow trigger success operation: + +:::image type="content" source="media/events-logic-apps/events-logic-success.png" alt-text="Screenshot showing workflow success indicated by green highlighting of the workflow name." lightbox="./media/events-logic-apps/events-logic-success.png"::: + +## Next steps + +For more information about FHIR events, see + +>[!div class="nextstepaction"] +>[What are Events?](./events-overview.md) + +(FHIR®) is a registered trademark of HL7 and is used with the permission of HL7. diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-complete.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-complete.png new file mode 100644 index 0000000000000..1ce16503b3273 Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-complete.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-condition.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-condition.png new file mode 100644 index 0000000000000..2509048206fca Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-condition.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-dashboard.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-dashboard.png new file mode 100644 index 0000000000000..6679c2adbca1d Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-dashboard.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-example.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-example.png new file mode 100644 index 0000000000000..b14ac7868a3ff Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-example.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-grid.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-grid.png new file mode 100644 index 0000000000000..279546bfb4558 Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-grid.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-overview.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-overview.png new file mode 100644 index 0000000000000..1066a6347a310 Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-overview.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-panel.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-panel.png new file mode 100644 index 0000000000000..eba70bbb21de9 Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-panel.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-progress.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-progress.png new file mode 100644 index 0000000000000..bfe06ff801342 Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-progress.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-success.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-success.png new file mode 100644 index 0000000000000..a931f773ff83e Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-success.png differ diff --git a/articles/healthcare-apis/events/media/events-logic-apps/events-logic-tabs.png b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-tabs.png new file mode 100644 index 0000000000000..7aa1508936d0c Binary files /dev/null and b/articles/healthcare-apis/events/media/events-logic-apps/events-logic-tabs.png differ diff --git a/articles/healthcare-apis/events/toc.yml b/articles/healthcare-apis/events/toc.yml index 8c15eb6c698d2..5e8471d2a510b 100644 --- a/articles/healthcare-apis/events/toc.yml +++ b/articles/healthcare-apis/events/toc.yml @@ -16,6 +16,8 @@ items: items: - name: Display Events metrics href: events-display-metrics.md + - name: Consume events with Logic Apps + href: events-consume-logic-apps.md - name: How-to guides expanded: true items: diff --git a/articles/healthcare-apis/fhir/azure-active-directory-identity-configuration-old.md b/articles/healthcare-apis/fhir/azure-active-directory-identity-configuration-old.md index 85e6198bd8f3b..401e2b22c2a4d 100644 --- a/articles/healthcare-apis/fhir/azure-active-directory-identity-configuration-old.md +++ b/articles/healthcare-apis/fhir/azure-active-directory-identity-configuration-old.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/01/2022 +ms.date: 06/03/2022 ms.author: mikaelw --- @@ -108,3 +108,5 @@ In this document, you learned some of the basic concepts involved in securing ac >[!div class="nextstepaction"] >[Deploy FHIR service](fhir-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/carin-implementation-guide-blue-button-tutorial.md b/articles/healthcare-apis/fhir/carin-implementation-guide-blue-button-tutorial.md index 112c78032e40c..d4a279c977ea5 100644 --- a/articles/healthcare-apis/fhir/carin-implementation-guide-blue-button-tutorial.md +++ b/articles/healthcare-apis/fhir/carin-implementation-guide-blue-button-tutorial.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 03/01/2022 +ms.date: 06/06/2022 --- # CARIN Implementation Guide for Blue Button® @@ -75,5 +75,7 @@ The final test we'll walk through is testing [error handling](https://touchstone In this tutorial, we walked through how to pass the CARIN IG for Blue Button tests in Touchstone. Next, you can review how to test the Da Vinci formulary tests. >[!div class="nextstepaction"] ->[DaVinci Drug Formulary](davinci-drug-formulary-tutorial.md) +>[DaVinci Drug Formulary](davinci-drug-formulary-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/centers-for-medicare-tutorial-introduction.md b/articles/healthcare-apis/fhir/centers-for-medicare-tutorial-introduction.md index 2ed099cecf4ae..735bcf93ab3d6 100644 --- a/articles/healthcare-apis/fhir/centers-for-medicare-tutorial-introduction.md +++ b/articles/healthcare-apis/fhir/centers-for-medicare-tutorial-introduction.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 03/01/2022 +ms.date: 06/06/2022 --- # Introduction: Centers for Medicare and Medicaid Services (CMS) Interoperability and Patient Access rule @@ -66,4 +66,6 @@ To test adherence to the various implementation guides, [Touchstone](https://tou Now that you have a basic understanding of the Interoperability and Patient Access rule, implementation guides, and available testing tool (Touchstone), we'll walk through setting up FHIR service for the CARIN IG for Blue Button. >[!div class="nextstepaction"] ->[CARIN Implementation Guide for Blue Button](carin-implementation-guide-blue-button-tutorial.md) +>[CARIN Implementation Guide for Blue Button](carin-implementation-guide-blue-button-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/configure-cross-origin-resource-sharing.md b/articles/healthcare-apis/fhir/configure-cross-origin-resource-sharing.md index 5ac1bf2c65b32..0bdbb55f51113 100644 --- a/articles/healthcare-apis/fhir/configure-cross-origin-resource-sharing.md +++ b/articles/healthcare-apis/fhir/configure-cross-origin-resource-sharing.md @@ -3,7 +3,7 @@ title: Configure cross-origin resource sharing in FHIR service description: This article describes how to configure cross-origin resource sharing in FHIR service author: mikaelweave ms.author: mikaelw -ms.date: 03/02/2022 +ms.date: 06/06/2022 ms.topic: reference ms.service: healthcare-apis ms.subservice: fhir @@ -41,4 +41,6 @@ To configure a CORS setting in the FHIR service, specify the following settings: In this tutorial, we walked through how to configure a CORS setting in the FHIR service. Next, you can review how to pass the CARIN IG for Blue Button tests in Touchstone. >[!div class="nextstepaction"] ->[CARIN Implementation Guide for Blue Button®](carin-implementation-guide-blue-button-tutorial.md) \ No newline at end of file +>[CARIN Implementation Guide for Blue Button®](carin-implementation-guide-blue-button-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/configure-export-data.md b/articles/healthcare-apis/fhir/configure-export-data.md index b9342d8a55e41..de50d79a8be20 100644 --- a/articles/healthcare-apis/fhir/configure-export-data.md +++ b/articles/healthcare-apis/fhir/configure-export-data.md @@ -6,7 +6,7 @@ ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference ms.custom: references_regions, subject-rbac-steps -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -169,3 +169,5 @@ In this article, you learned about the three steps in configuring export setting >[!div class="nextstepaction"] >[How to export FHIR data](export-data.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/configure-import-data.md b/articles/healthcare-apis/fhir/configure-import-data.md index 86a57fe929a34..09c68d62dc676 100644 --- a/articles/healthcare-apis/fhir/configure-import-data.md +++ b/articles/healthcare-apis/fhir/configure-import-data.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: how-to -ms.date: 04/20/2022 +ms.date: 06/06/2022 ms.author: ranku --- @@ -77,4 +77,6 @@ In this article, you've learned the FHIR service supports $import operation and >[Configure export settings and set up a storage account](configure-export-data.md) >[!div class="nextstepaction"] ->[Copy data from FHIR service to Azure Synapse Analytics](copy-to-synapse.md) \ No newline at end of file +>[Copy data from FHIR service to Azure Synapse Analytics](copy-to-synapse.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/convert-data.md b/articles/healthcare-apis/fhir/convert-data.md index d9d7ea0ddc0a1..38c55b97ae673 100644 --- a/articles/healthcare-apis/fhir/convert-data.md +++ b/articles/healthcare-apis/fhir/convert-data.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: overview -ms.date: 03/21/2022 +ms.date: 06/06/2022 ms.author: ranku ms.custom: subject-rbac-steps --- @@ -227,3 +227,5 @@ In this article, you've learned about the $convert-data endpoint and customize-c >[!div class="nextstepaction"] >[Export data](export-data.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/copy-to-synapse.md b/articles/healthcare-apis/fhir/copy-to-synapse.md index 9adecc158da23..a8a79def93dd1 100644 --- a/articles/healthcare-apis/fhir/copy-to-synapse.md +++ b/articles/healthcare-apis/fhir/copy-to-synapse.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/16/2022 +ms.date: 06/06/2022 ms.author: ginle --- # Copy data from FHIR service to Azure Synapse Analytics @@ -195,4 +195,6 @@ In this article, you learned three different ways to copy your FHIR data into Sy Next, you can learn about how you can de-identify your FHIR data while exporting it to Synapse in order to protect PHI. >[!div class="nextstepaction"] ->[Exporting de-identified data](./de-identified-export.md) \ No newline at end of file +>[Exporting de-identified data](./de-identified-export.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/davinci-drug-formulary-tutorial.md b/articles/healthcare-apis/fhir/davinci-drug-formulary-tutorial.md index b31571d9501af..85161282fdf8d 100644 --- a/articles/healthcare-apis/fhir/davinci-drug-formulary-tutorial.md +++ b/articles/healthcare-apis/fhir/davinci-drug-formulary-tutorial.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: modillon -ms.date: 03/01/2022 +ms.date: 06/06/2022 --- # Tutorial for Da Vinci Drug Formulary @@ -56,4 +56,6 @@ The second test is the [query capabilities](https://touchstone.aegis.net/touchst In this tutorial, we walked through how to pass the Da Vinci Payer Data Exchange US Drug Formulary in Touchstone. Next, you can learn how to test the Da Vinci PDex Implementation Guide in Touchstone. >[!div class="nextstepaction"] ->[Da Vinci PDex](davinci-pdex-tutorial.md) \ No newline at end of file +>[Da Vinci PDex](davinci-pdex-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/davinci-pdex-tutorial.md b/articles/healthcare-apis/fhir/davinci-pdex-tutorial.md index cf9ebb658f154..ad5f5159cfa88 100644 --- a/articles/healthcare-apis/fhir/davinci-pdex-tutorial.md +++ b/articles/healthcare-apis/fhir/davinci-pdex-tutorial.md @@ -7,7 +7,7 @@ ms.subservice: fhir ms.topic: tutorial ms.author: mikaelw author: mikaelweave -ms.date: 03/01/2022 +ms.date: 06/06/2022 --- # Da Vinci PDex @@ -54,4 +54,6 @@ The final test we'll walk through is testing patient-everything. For this test, In this tutorial, we walked through how to pass the Payer Exchange tests in Touchstone. Next, you can learn how to test the Da Vinci PDEX Payer Network (Plan-Net) Implementation Guide. >[!div class="nextstepaction"] ->[Da Vinci Plan Net](davinci-plan-net.md) +>[Da Vinci Plan Net](davinci-plan-net.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/davinci-plan-net.md b/articles/healthcare-apis/fhir/davinci-plan-net.md index 28e763b43287d..dcdc3f024e7c4 100644 --- a/articles/healthcare-apis/fhir/davinci-plan-net.md +++ b/articles/healthcare-apis/fhir/davinci-plan-net.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: modillon -ms.date: 03/01/2022 +ms.date: 06/06/2022 --- # Da Vinci Plan Net @@ -78,3 +78,5 @@ In this tutorial, we walked through setting up the Azure API for FHIR to pass th >[!div class="nextstepaction"] >[Supported features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/de-identified-export.md b/articles/healthcare-apis/fhir/de-identified-export.md index eb32a838dfd5b..ccde1bc7ebbec 100644 --- a/articles/healthcare-apis/fhir/de-identified-export.md +++ b/articles/healthcare-apis/fhir/de-identified-export.md @@ -5,7 +5,7 @@ author: ranvijaykumar ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: ranku --- # Exporting de-identified data @@ -67,3 +67,5 @@ In this article, you've learned how to set up and use de-identified export. For >[!div class="nextstepaction"] >[Export data](export-data.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/export-data.md b/articles/healthcare-apis/fhir/export-data.md index 042a56756906f..1f57b8a7610e6 100644 --- a/articles/healthcare-apis/fhir/export-data.md +++ b/articles/healthcare-apis/fhir/export-data.md @@ -5,7 +5,7 @@ author: ranvijaykumar ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 02/15/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # How to export FHIR data @@ -76,3 +76,5 @@ In this article, you've learned how to export FHIR resources using the $export c >[!div class="nextstepaction"] >[Copy data from the FHIR service to Azure Synapse Analytics](copy-to-synapse.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/fhir-faq.md b/articles/healthcare-apis/fhir/fhir-faq.md index 44451feef8081..51bf45b692542 100644 --- a/articles/healthcare-apis/fhir/fhir-faq.md +++ b/articles/healthcare-apis/fhir/fhir-faq.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw ms.custom: references_regions --- @@ -143,4 +143,6 @@ We have a collection of reference architectures available on the [Health Archite In this article, you've learned the answers to frequently asked questions about FHIR service. To see the frequently asked questions about FHIR service in Azure API for FHIR, see >[!div class="nextstepaction"] ->[FAQs about Azure API for FHIR](../azure-api-for-fhir/fhir-faq.yml) \ No newline at end of file +>[FAQs about Azure API for FHIR](../azure-api-for-fhir/fhir-faq.yml) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/fhir-features-supported.md b/articles/healthcare-apis/fhir/fhir-features-supported.md index 92d48b1c9c5b9..b0ca8082064da 100644 --- a/articles/healthcare-apis/fhir/fhir-features-supported.md +++ b/articles/healthcare-apis/fhir/fhir-features-supported.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/05/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -73,3 +73,5 @@ In this article, you've read about the supported FHIR features in the FHIR servi >[!div class="nextstepaction"] >[Deploy FHIR service](fhir-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/fhir-portal-quickstart.md b/articles/healthcare-apis/fhir/fhir-portal-quickstart.md index bf3e348996f65..9ca7f473fdd01 100644 --- a/articles/healthcare-apis/fhir/fhir-portal-quickstart.md +++ b/articles/healthcare-apis/fhir/fhir-portal-quickstart.md @@ -4,7 +4,7 @@ description: This article teaches users how to deploy a FHIR service in the Azur author: stevewohl ms.service: healthcare-apis ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: ginle ms.custom: mode-api --- @@ -55,3 +55,5 @@ In this article, you learned how to deploy FHIR service within Azure Health Data >[!div class="nextstepaction"] >[Access FHIR service using Postman](../fhir/use-postman.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/fhir-rest-api-capabilities.md b/articles/healthcare-apis/fhir/fhir-rest-api-capabilities.md index 8513df2554a45..e79dc20d62fac 100644 --- a/articles/healthcare-apis/fhir/fhir-rest-api-capabilities.md +++ b/articles/healthcare-apis/fhir/fhir-rest-api-capabilities.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/09/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- diff --git a/articles/healthcare-apis/fhir/fhir-service-access-token-validation-old.md b/articles/healthcare-apis/fhir/fhir-service-access-token-validation-old.md index 8bf2f48094095..ddcba5a222d3a 100644 --- a/articles/healthcare-apis/fhir/fhir-service-access-token-validation-old.md +++ b/articles/healthcare-apis/fhir/fhir-service-access-token-validation-old.md @@ -6,7 +6,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # FHIR service access token validation @@ -117,3 +117,5 @@ In this article, you learned about the FHIR service access token validation step >[!div class="nextstepaction"] >[Supported FHIR Features](fhir-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/fhir-service-autoscale.md b/articles/healthcare-apis/fhir/fhir-service-autoscale.md index 98e92270105d9..d1915bfa2b202 100644 --- a/articles/healthcare-apis/fhir/fhir-service-autoscale.md +++ b/articles/healthcare-apis/fhir/fhir-service-autoscale.md @@ -5,7 +5,7 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -33,3 +33,5 @@ In this article, you've learned about the FHIR service autoscale feature in Azur >[!div class="nextstepaction"] >[Supported FHIR Features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/fhir-service-diagnostic-logs.md b/articles/healthcare-apis/fhir/fhir-service-diagnostic-logs.md index d9dd63f0bf2e9..3cd2209195622 100644 --- a/articles/healthcare-apis/fhir/fhir-service-diagnostic-logs.md +++ b/articles/healthcare-apis/fhir/fhir-service-diagnostic-logs.md @@ -5,7 +5,7 @@ services: healthcare-apis author: mikaelweave ms.service: healthcare-apis ms.topic: how-to -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- diff --git a/articles/healthcare-apis/fhir/fhir-service-resource-manager-template.md b/articles/healthcare-apis/fhir/fhir-service-resource-manager-template.md index df29ae0532460..cc01626cab584 100644 --- a/articles/healthcare-apis/fhir/fhir-service-resource-manager-template.md +++ b/articles/healthcare-apis/fhir/fhir-service-resource-manager-template.md @@ -5,7 +5,7 @@ author: mikaelweave ms.service: healthcare-apis ms.topic: tutorial ms.author: mikaelw -ms.date: 05/03/2022 +ms.date: 06/06/2022 --- # Deploy a FHIR service within Azure Health Data Services - using ARM template @@ -280,4 +280,6 @@ az group delete --name $resourceGroupName In this quickstart guide, you've deployed the FHIR service within Azure Health Data Services using an ARM template. For more information about FHIR service supported features, see. >[!div class="nextstepaction"] ->[Supported FHIR Features](fhir-features-supported.md) \ No newline at end of file +>[Supported FHIR Features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/fhir-versioning-policy-and-history-management.md b/articles/healthcare-apis/fhir/fhir-versioning-policy-and-history-management.md index 3a28884ed70e4..426b104b29412 100644 --- a/articles/healthcare-apis/fhir/fhir-versioning-policy-and-history-management.md +++ b/articles/healthcare-apis/fhir/fhir-versioning-policy-and-history-management.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/06/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -73,4 +73,5 @@ In this article, you learned how to purge the history for resources in the FHIR >[!div class="nextstepaction"] >[Purge history operation](purge-history.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/get-started-with-fhir.md b/articles/healthcare-apis/fhir/get-started-with-fhir.md index b0b59d7065ea5..e5470a1a59cab 100644 --- a/articles/healthcare-apis/fhir/get-started-with-fhir.md +++ b/articles/healthcare-apis/fhir/get-started-with-fhir.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw ms.custom: mode-api --- @@ -58,7 +58,7 @@ You can obtain an Azure AD access token using PowerShell, Azure CLI, REST CCI, o #### Access using existing tools - [Postman](../fhir/use-postman.md) -- [Rest Client](../fhir/using-rest-client.md) +- [REST Client](../fhir/using-rest-client.md) - [cURL](../fhir/using-curl.md) #### Load data @@ -96,3 +96,5 @@ This article described the basic steps to get started using the FHIR service. Fo >[!div class="nextstepaction"] >[Deploy a FHIR service within Azure Health Data Services](fhir-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/how-to-do-custom-search.md b/articles/healthcare-apis/fhir/how-to-do-custom-search.md index 29584fc18bfed..420299c2b6a82 100644 --- a/articles/healthcare-apis/fhir/how-to-do-custom-search.md +++ b/articles/healthcare-apis/fhir/how-to-do-custom-search.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Defining custom search parameters @@ -215,3 +215,5 @@ In this article, you’ve learned how to create a search parameter. Next you can >[!div class="nextstepaction"] >[How to run a reindex job](how-to-run-a-reindex.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/how-to-run-a-reindex.md b/articles/healthcare-apis/fhir/how-to-run-a-reindex.md index a442299fb6c35..b55e5b16d4846 100644 --- a/articles/healthcare-apis/fhir/how-to-run-a-reindex.md +++ b/articles/healthcare-apis/fhir/how-to-run-a-reindex.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Running a reindex job @@ -216,3 +216,5 @@ In this article, you've learned how to start a reindex job. To learn how to defi >[!div class="nextstepaction"] >[Defining custom search parameters](how-to-do-custom-search.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/import-data.md b/articles/healthcare-apis/fhir/import-data.md index 4b681c42b96be..0aed77967951c 100644 --- a/articles/healthcare-apis/fhir/import-data.md +++ b/articles/healthcare-apis/fhir/import-data.md @@ -5,13 +5,13 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: how-to -ms.date: 04/22/2022 +ms.date: 06/06/2022 ms.author: ranku --- # Bulk-import FHIR data (Preview) -The bulk-import feature enables importing FHIR data to the FHIR server at high throughput using the $import operation. This feature is suitable for initial data load into the FHIR server. +The bulk-import feature enables importing Fast Healthcare Interoperability Resources (FHIR®) data to the FHIR server at high throughput using the $import operation. This feature is suitable for initial data load into the FHIR server. > [!NOTE] > You must have the **FHIR Data Importer** role on the FHIR server to use $import. @@ -219,7 +219,7 @@ Below are some error codes you may encounter and the solutions to help you resol **Cause:** We use managed identity for source storage auth. This error may be caused by a missing or wrong role assignment. -**Solution:** Assign _Storage Blob Data Contributor_ role to the FHIR server following [the RBAC guide.](https://docs.microsoft.com/azure/role-based-access-control/role-assignments-portal?tabs=current) +**Solution:** Assign _Storage Blob Data Contributor_ role to the FHIR server following [the RBAC guide.](../../role-based-access-control/role-assignments-portal.md?tabs=current) ### 500 Internal Server Error @@ -264,3 +264,5 @@ In this article, you've learned about how the Bulk import feature enables import >[!div class="nextstepaction"] >[Copy data from Azure API for FHIR to Azure Synapse Analytics](copy-to-synapse.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/overview-of-search.md b/articles/healthcare-apis/fhir/overview-of-search.md index 123a734f091c5..1f621cd1b6b8e 100644 --- a/articles/healthcare-apis/fhir/overview-of-search.md +++ b/articles/healthcare-apis/fhir/overview-of-search.md @@ -5,12 +5,12 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Overview of FHIR search -The FHIR specification defines the fundamentals of search for FHIR resources. This article will guide you through some key aspects to searching resources in FHIR. For complete details about searching FHIR resources, refer to [Search](https://www.hl7.org/fhir/search.html) in the HL7 FHIR Specification. Throughout this article, we'll give examples of search syntax. Each search will be against your FHIR server, which typically has a URL of `https://-.fhir.azurehealthcareapis.com`. In the examples, we'll use the placeholder {{FHIR_URL}} for this URL. +The Fast Healthcare Interoperability Resources (FHIR®) specification defines the fundamentals of search for FHIR resources. This article will guide you through some key aspects to searching resources in FHIR. For complete details about searching FHIR resources, refer to [Search](https://www.hl7.org/fhir/search.html) in the HL7 FHIR Specification. Throughout this article, we'll give examples of search syntax. Each search will be against your FHIR server, which typically has a URL of `https://-.fhir.azurehealthcareapis.com`. In the examples, we'll use the placeholder {{FHIR_URL}} for this URL. FHIR searches can be against a specific resource type, a specified [compartment](https://www.hl7.org/fhir/compartmentdefinition.html), or all resources. The simplest way to execute a search in FHIR is to use a `GET` request. For example, if you want to pull all patients in the database, you could use the following request: @@ -160,3 +160,5 @@ Now that you've learned about the basics of search, see the search samples page >[!div class="nextstepaction"] >[FHIR search examples](search-samples.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/overview.md b/articles/healthcare-apis/fhir/overview.md index c2057d3c77f73..bce4137f4b448 100644 --- a/articles/healthcare-apis/fhir/overview.md +++ b/articles/healthcare-apis/fhir/overview.md @@ -6,7 +6,7 @@ author: matjazl ms.service: healthcare-apis ms.subservice: fhir ms.topic: overview -ms.date: 05/16/2022 +ms.date: 06/06/2022 ms.author: chrupa --- @@ -77,3 +77,5 @@ To start working with the FHIR service, follow the 5-minute quickstart to deploy >[!div class="nextstepaction"] >[Deploy FHIR service](fhir-portal-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/patient-everything.md b/articles/healthcare-apis/fhir/patient-everything.md index 879f1cd455cb7..334425c54f164 100644 --- a/articles/healthcare-apis/fhir/patient-everything.md +++ b/articles/healthcare-apis/fhir/patient-everything.md @@ -6,13 +6,13 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Using Patient-everything in FHIR service -The [Patient-everything](https://www.hl7.org/fhir/patient-operation-everything.html) operation is used to provide a view of all resources related to a patient. This operation can be useful to give patients' access to their entire record or for a provider or other user to perform a bulk data download related to a patient. According to the FHIR specification, Patient-everything returns all the information related to one or more patients described in the resource or context on which this operation is invoked. In the FHIR service in Azure Health Data Services(hereby called FHIR service), Patient-everything is available to pull data related to a specific patient. +The [Patient-everything](https://www.hl7.org/fhir/patient-operation-everything.html) operation is used to provide a view of all resources related to a patient. This operation can be useful to give patients' access to their entire record or for a provider or other user to perform a bulk data download related to a patient. According to the Fast Healthcare Interoperability Resources (FHIR®) specification, Patient-everything returns all the information related to one or more patients described in the resource or context on which this operation is invoked. In the FHIR service in Azure Health Data Services(hereby called FHIR service), Patient-everything is available to pull data related to a specific patient. ## Use Patient-everything To call Patient-everything, use the following command: @@ -124,3 +124,5 @@ Now that you know how to use the Patient-everything operation, you can learn abo >[!div class="nextstepaction"] >[Overview of FHIR search](overview-of-search.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/purge-history.md b/articles/healthcare-apis/fhir/purge-history.md index 53e9df8de03f5..30a0a9c182787 100644 --- a/articles/healthcare-apis/fhir/purge-history.md +++ b/articles/healthcare-apis/fhir/purge-history.md @@ -5,13 +5,13 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/05/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Purge history operation -`$purge-history` is an operation that allows you to delete the history of a single FHIR resource. This operation isn't defined in the FHIR specification, but it's useful for [history management](fhir-versioning-policy-and-history-management.md) in large FHIR service instances. +`$purge-history` is an operation that allows you to delete the history of a single Fast Healthcare Interoperability Resources (FHIR®) resource. This operation isn't defined in the FHIR specification, but it's useful for [history management](fhir-versioning-policy-and-history-management.md) in large FHIR service instances. ## Overview of purge history @@ -44,4 +44,6 @@ In this article, you learned how to purge the history for resources in the FHIR >[Supported FHIR features](fhir-features-supported.md) >[!div class="nextstepaction"] ->[FHIR REST API capabilities for Azure Health Data Services FHIR service](fhir-rest-api-capabilities.md) \ No newline at end of file +>[FHIR REST API capabilities for Azure Health Data Services FHIR service](fhir-rest-api-capabilities.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/search-samples.md b/articles/healthcare-apis/fhir/search-samples.md index 7f753801f6403..569369196295b 100644 --- a/articles/healthcare-apis/fhir/search-samples.md +++ b/articles/healthcare-apis/fhir/search-samples.md @@ -5,13 +5,13 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # FHIR search examples -Below are some examples of using FHIR search operations, including search parameters and modifiers, chain and reverse chain search, composite search, viewing the next entry set for search results, and searching with a `POST` request. For more information about search, see [Overview of FHIR Search](overview-of-search.md). +Below are some examples of using Fast Healthcare Interoperability Resources (FHIR®) search operations, including search parameters and modifiers, chain and reverse chain search, composite search, viewing the next entry set for search results, and searching with a `POST` request. For more information about search, see [Overview of FHIR Search](overview-of-search.md). ## Search result parameters @@ -220,3 +220,5 @@ In this article, you learned about how to search using different search paramete >[!div class="nextstepaction"] >[Overview of FHIR Search](overview-of-search.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/store-profiles-in-fhir.md b/articles/healthcare-apis/fhir/store-profiles-in-fhir.md index 5902cfa1f9bbf..6715b71677d30 100644 --- a/articles/healthcare-apis/fhir/store-profiles-in-fhir.md +++ b/articles/healthcare-apis/fhir/store-profiles-in-fhir.md @@ -5,13 +5,13 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Store profiles in FHIR service -HL7 FHIR defines a standard and interoperable way to store and exchange healthcare data. Even within the base FHIR specification, it can be helpful to define other rules or extensions based on the context that FHIR is being used. For such context-specific uses of FHIR, **FHIR profiles** are used for the extra layer of specifications. +HL7 Fast Healthcare Interoperability Resources (FHIR®) defines a standard and interoperable way to store and exchange healthcare data. Even within the base FHIR specification, it can be helpful to define other rules or extensions based on the context that FHIR is being used. For such context-specific uses of FHIR, **FHIR profiles** are used for the extra layer of specifications. [FHIR profile](https://www.hl7.org/fhir/profiling.html) allows you to narrow down and customize resource definitions using constraints and extensions. The FHIR service in Azure Health Data Services (hereby called FHIR service) allows validating resources against profiles to see if the resources conform to the profiles. This article guides you through the basics of FHIR profiles and how to store them. For more information about FHIR profiles outside of this article, visit [HL7.org](https://www.hl7.org/fhir/profiling.html). @@ -207,3 +207,5 @@ In this article, you've learned about FHIR profiles. Next, you'll learn how you >[!div class="nextstepaction"] >[Validate FHIR resources against profiles](validation-against-profiles.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/tutorial-member-match.md b/articles/healthcare-apis/fhir/tutorial-member-match.md index bf0402c8b6aca..b431535387fab 100644 --- a/articles/healthcare-apis/fhir/tutorial-member-match.md +++ b/articles/healthcare-apis/fhir/tutorial-member-match.md @@ -8,7 +8,7 @@ ms.topic: tutorial ms.reviewer: matjazl ms.author: mikaelw author: mikaelweave -ms.date: 03/01/2022 +ms.date: 06/06/2022 --- # $member-match operation in FHIR service @@ -48,3 +48,5 @@ In this guide, you've learned about the $member-match operation. Next, you can l >[!div class="nextstepaction"] >[DaVinci PDex](davinci-pdex-tutorial.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/use-postman.md b/articles/healthcare-apis/fhir/use-postman.md index 325cf6de5ca3c..c868c0af5e4a5 100644 --- a/articles/healthcare-apis/fhir/use-postman.md +++ b/articles/healthcare-apis/fhir/use-postman.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -161,3 +161,5 @@ In this article, you learned how to access the FHIR service in Azure Health Data >[!div class="nextstepaction"] >[What is FHIR service?](overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/fhir/using-curl.md b/articles/healthcare-apis/fhir/using-curl.md index 473b8f8ceeeb9..b719793ed842a 100644 --- a/articles/healthcare-apis/fhir/using-curl.md +++ b/articles/healthcare-apis/fhir/using-curl.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 03/22/2022 +ms.date: 06/06/2022 ms.author: ginle --- @@ -19,7 +19,7 @@ In this article, you'll learn how to access Azure Health Data Services with cURL * An Azure account with an active subscription. [Create one for free](https://azure.microsoft.com/free/). * If you want to run the code locally, install [PowerShell](/powershell/module/powershellget/) and [Azure Az PowerShell](/powershell/azure/install-az-ps). -* Optionally, you can run the scripts in Visual Studio Code with the Rest Client extension. For more information, see [Make a link to the Rest Client doc](using-rest-client.md). +* Optionally, you can run the scripts in Visual Studio Code with the REST Client extension. For more information, see [Make a link to the REST Client doc](using-rest-client.md). * Download and install [cURL](https://curl.se/download.html). ### CLI @@ -27,7 +27,7 @@ In this article, you'll learn how to access Azure Health Data Services with cURL * An Azure account with an active subscription. [Create one for free](https://azure.microsoft.com/free/). * If you want to run the code locally, install [Azure CLI](/cli/azure/install-azure-cli). * Optionally, install a Bash shell, such as Git Bash, which it's included in [Git for Windows](https://gitforwindows.org/). -* Optionally, run the scripts in Visual Studio Code with the Rest Client extension. For more information, see [Make a link to the Rest Client doc](using-rest-client.md). +* Optionally, run the scripts in Visual Studio Code with the REST Client extension. For more information, see [Make a link to the REST Client doc](using-rest-client.md). * Download and install [cURL](https://curl.se/download.html). ## Obtain Azure Access Token @@ -123,4 +123,6 @@ In this article, you learned how to access Azure Health Data Services data using To learn about how to access Azure Health Data Services data using REST Client extension in Visual Studio Code, see >[!div class="nextstepaction"] ->[Access Azure Health Data Services using REST Client](using-rest-client.md) \ No newline at end of file +>[Access Azure Health Data Services using REST Client](using-rest-client.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/using-rest-client.md b/articles/healthcare-apis/fhir/using-rest-client.md index ddf957b012a33..8ea44487a432d 100644 --- a/articles/healthcare-apis/fhir/using-rest-client.md +++ b/articles/healthcare-apis/fhir/using-rest-client.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: ginle --- @@ -105,4 +105,6 @@ In this article, you learned how to access Azure Health Data Services data using To learn about how to validate FHIR resources against profiles in Azure Health Data Services, see >[!div class="nextstepaction"] ->[Validate FHIR resources against profiles in Azure Health Data Services](validation-against-profiles.md) \ No newline at end of file +>[Validate FHIR resources against profiles in Azure Health Data Services](validation-against-profiles.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/validation-against-profiles.md b/articles/healthcare-apis/fhir/validation-against-profiles.md index 350f89482c4c5..72ec7874de3aa 100644 --- a/articles/healthcare-apis/fhir/validation-against-profiles.md +++ b/articles/healthcare-apis/fhir/validation-against-profiles.md @@ -5,13 +5,13 @@ author: mikaelweave ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 03/01/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- # Validate FHIR resources against profiles in Azure Health Data Services -`$validate` is an operation in FHIR that allows you to ensure that a FHIR resource conforms to the base resource requirements or a specified profile. This is a valuable operation to ensure that the data in the FHIR server has the expected attributes and values. +`$validate` is an operation in Fast Healthcare Interoperability Resources (FHIR®) that allows you to ensure that a FHIR resource conforms to the base resource requirements or a specified profile. This is a valuable operation to ensure that the data in the FHIR server has the expected attributes and values. In the [store profiles in the FHIR service](store-profiles-in-fhir.md) article, you walked through the basics of FHIR profiles and storing them. The FHIR service in Azure Health Data Services (hereby called the FHIR service) allows validating resources against profiles to see if the resources conform to the profiles. This article will guide you through how to use `$validate` for validating resources against profiles. For more information about FHIR profiles outside of this article, visit [HL7.org](https://www.hl7.org/fhir/profiling.html). @@ -141,3 +141,5 @@ In this article, you learned how to validate resources against profiles using `$ >[!div class="nextstepaction"] >[Supported FHIR features](fhir-features-supported.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/get-access-token.md b/articles/healthcare-apis/get-access-token.md index e1f01a673ddbd..52a7a360603fd 100644 --- a/articles/healthcare-apis/get-access-token.md +++ b/articles/healthcare-apis/get-access-token.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: conceptual -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli @@ -65,7 +65,9 @@ In this article, you learned how to obtain an access token for the FHIR service >[Access FHIR service using Postman](./fhir/use-postman.md) >[!div class="nextstepaction"] ->[Access FHIR service using Rest Client](./fhir/using-rest-client.md) +>[Access FHIR service using REST Client](./fhir/using-rest-client.md) >[!div class="nextstepaction"] >[Access DICOM service using cURL](dicom/dicomweb-standard-apis-curl.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/get-started-with-health-data-services.md b/articles/healthcare-apis/get-started-with-health-data-services.md index d43d7c8a940ea..a2a05aa29930e 100644 --- a/articles/healthcare-apis/get-started-with-health-data-services.md +++ b/articles/healthcare-apis/get-started-with-health-data-services.md @@ -5,7 +5,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/17/2022 +ms.date: 06/06/2022 ms.author: ranku --- @@ -111,3 +111,5 @@ This article described the basic steps to get started using Azure Health Data Se >[!div class="nextstepaction"] >[Frequently asked questions about Azure Health Data Services](healthcare-apis-faqs.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/github-projects.md b/articles/healthcare-apis/github-projects.md index 68494272f3139..b10341537ce6e 100644 --- a/articles/healthcare-apis/github-projects.md +++ b/articles/healthcare-apis/github-projects.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: reference -ms.date: 03/22/2022 +ms.date: 06/06/2022 ms.author: ginle --- # GitHub Projects diff --git a/articles/healthcare-apis/healthcare-apis-configure-private-link.md b/articles/healthcare-apis/healthcare-apis-configure-private-link.md index c1f2d0b0e2d38..54931f73bc223 100644 --- a/articles/healthcare-apis/healthcare-apis-configure-private-link.md +++ b/articles/healthcare-apis/healthcare-apis-configure-private-link.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.service: healthcare-apis ms.subservice: fhir ms.topic: reference -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -21,7 +21,7 @@ Private Link enables you to access Azure Health Data Services over a private end ## Prerequisites -Before creating a private endpoint, the following Azure resources must be created first: +Before you create a private endpoint, the following Azure resources must be created first: - **Resource Group** – The Azure resource group that will contain the virtual network and private endpoint. - **Workspace** – This is a logical container for FHIR and DICOM service instances. @@ -100,3 +100,5 @@ In this article, you've learned how to configure Private Link for Azure Health D >[!div class="nextstepaction"] >[Overview of Azure Health Data Services](healthcare-apis-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/healthcare-apis-faqs.md b/articles/healthcare-apis/healthcare-apis-faqs.md index 9bc081a7459fc..0f87eb019df36 100644 --- a/articles/healthcare-apis/healthcare-apis-faqs.md +++ b/articles/healthcare-apis/healthcare-apis-faqs.md @@ -6,7 +6,7 @@ author: ginalee-dotcom ms.custom: references_regions ms.service: healthcare-apis ms.topic: reference -ms.date: 03/22/2022 +ms.date: 06/06/2022 ms.author: ginle --- diff --git a/articles/healthcare-apis/healthcare-apis-overview.md b/articles/healthcare-apis/healthcare-apis-overview.md index 9563d355ac1fa..626bfdb40f05c 100644 --- a/articles/healthcare-apis/healthcare-apis-overview.md +++ b/articles/healthcare-apis/healthcare-apis-overview.md @@ -5,7 +5,7 @@ services: healthcare-apis author: stevewohl ms.service: healthcare-apis ms.topic: overview -ms.date: 03/22/2022 +ms.date: 06/03/2022 ms.author: ginle --- @@ -17,7 +17,7 @@ Azure Health Data Services provides the following benefits: * Empower new workloads to leverage PHI by enabling the data to be collected and accessed in one place, in a consistent way. * Discover new insight by bringing disparate PHI together and connecting it end-to-end with tools for machine learning, analytics, and AI. * Build on a trusted cloud with confidence in how Protected Health Information is managed, stored, and made available. -The new Microsoft Azure Health Data Services will, in addition to FHIR, support other healthcare industry data standards, like DICOM, extending healthcare data interoperability. The business model and infrastructure platform have been redesigned to accommodate the expansion and introduction of different and future healthcare data standards. Customers can use health data of different types across healthcare standards under the same compliance umbrella. Tools have been built into the managed service that allow customers to transform data from legacy or device proprietary formats, to FHIR. Some of these tools have been previously developed and open-sourced; others will be net new. +The new Microsoft Azure Health Data Services will, in addition to Fast Healthcare Interoperability Resources (FHIR®), support other healthcare industry data standards, like DICOM, extending healthcare data interoperability. The business model and infrastructure platform have been redesigned to accommodate the expansion and introduction of different and future healthcare data standards. Customers can use health data of different types across healthcare standards under the same compliance umbrella. Tools have been built into the managed service that allow customers to transform data from legacy or device proprietary formats, to FHIR. Some of these tools have been previously developed and open-sourced; others will be net new. Azure Health Data Services enables you to: * Quickly connect disparate health data sources and formats such as structured, imaging, and device data and normalize it to be persisted in the cloud. @@ -55,3 +55,5 @@ To start working with the Azure Health Data Services, follow the 5-minute quick > [!div class="nextstepaction"] > [Workspace overview](workspace-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. \ No newline at end of file diff --git a/articles/healthcare-apis/healthcare-apis-quickstart.md b/articles/healthcare-apis/healthcare-apis-quickstart.md index 1328d35b35de7..532624656416f 100644 --- a/articles/healthcare-apis/healthcare-apis-quickstart.md +++ b/articles/healthcare-apis/healthcare-apis-quickstart.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: ginle ms.custom: mode-api --- @@ -89,3 +89,5 @@ For more information about Azure Health Data Services workspace, see >[!div class="nextstepaction"] >[Workspace overview](workspace-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/iot/iot-git-projects.md b/articles/healthcare-apis/iot/iot-git-projects.md index 8f9647f4c6b0a..7bb44f8da85dc 100644 --- a/articles/healthcare-apis/iot/iot-git-projects.md +++ b/articles/healthcare-apis/iot/iot-git-projects.md @@ -5,7 +5,7 @@ services: healthcare-apis author: msjasteppe ms.service: healthcare-apis ms.topic: reference -ms.date: 02/16/2022 +ms.date: 05/25/2022 ms.author: jasteppe --- # Open-source projects @@ -34,9 +34,9 @@ HealthKit * [microsoft/healthkit-to-fhir](https://github.com/microsoft/healthkit-to-fhir): Provides a simple way to create FHIR Resources from HKObjects -Google Fit on FHIR +Fit on FHIR -* [microsoft/googlefit-on-fhir](https://github.com/microsoft/googlefit-on-fhir): Bring Google Fit® data to a FHIR service. +* [microsoft/fit-on-fhir](https://github.com/microsoft/fit-on-fhir): Bring Google Fit® data to a FHIR service. Health Data Sync diff --git a/articles/healthcare-apis/logging.md b/articles/healthcare-apis/logging.md index d4be8f6573ad3..a17d4fabcbd1f 100644 --- a/articles/healthcare-apis/logging.md +++ b/articles/healthcare-apis/logging.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 03/22/2022 +ms.date: 06/06/2022 ms.author: ginle --- @@ -64,4 +64,6 @@ For more information about service logs and metrics for the DICOM service and Me >[!div class="nextstepaction"] >[How to display MedTech service metrics](./../healthcare-apis/iot/how-to-display-metrics.md) +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. + diff --git a/articles/healthcare-apis/register-application.md b/articles/healthcare-apis/register-application.md index 352de3675ac52..81199977e681d 100644 --- a/articles/healthcare-apis/register-application.md +++ b/articles/healthcare-apis/register-application.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 05/03/2022 +ms.date: 06/06/2022 ms.author: mikaelw --- @@ -71,15 +71,15 @@ The following steps are required for the DICOM service, but optional for the FHI 1. Select the **API permissions** blade. - [ ![Add API permissions](dicom/media/dicom-add-api-permissions.png) ](dicom/media/dicom-add-api-permissions.png#lightbox) + [ ![Add API permissions](dicom/media/dicom-add-apis-permissions.png) ](dicom/media/dicom-add-apis-permissions.png#lightbox) 2. Select **Add a permission**. - If you're using Azure Health Data Services, you'll add a permission to the DICOM service by searching for **Azure API for DICOM** under **APIs my organization** uses. + If you're using Azure Health Data Services, you'll add a permission to the DICOM service by searching for **Azure Healthcare APIs** under **APIs my organization** uses. [ ![Search API permissions](dicom/media/dicom-search-apis-permissions.png) ](dicom/media/dicom-search-apis-permissions.png#lightbox) - The search result for Azure API for DICOM will only return if you've already deployed the DICOM service in the workspace. + The search result for Azure Healthcare APIs will only return if you've already deployed the DICOM service in the workspace. If you're referencing a different resource application, select your DICOM API Resource Application Registration that you created previously under **APIs my organization**. @@ -88,7 +88,7 @@ The following steps are required for the DICOM service, but optional for the FHI [ ![Select permissions scopes.](dicom/media/dicom-select-scopes.png) ](dicom/media/dicom-select-scopes.png#lightbox) >[!NOTE] ->Use grant_type of client_credentials when trying to otain an access token for the FHIR service using tools such as Postman or Rest Client. For more details, visit [Access using Postman](./fhir/use-postman.md) and [Accessing Azure Health Data Services using the REST Client Extension in Visual Studio Code](./fhir/using-rest-client.md). +>Use grant_type of client_credentials when trying to obtain an access token for the FHIR service using tools such as Postman or REST Client. For more details, visit [Access using Postman](./fhir/use-postman.md) and [Accessing Azure Health Data Services using the REST Client Extension in Visual Studio Code](./fhir/using-rest-client.md). >>Use grant_type of client_credentials or authentication_doe when trying to obtain an access token for the DICOM service. For more details, visit [Using DICOM with cURL](dicom/dicomweb-standard-apis-curl.md). Your application registration is now complete. @@ -99,3 +99,5 @@ In this article, you learned how to register a client application in the Azure A >[!div class="nextstepaction"] >[Overview of Azure Health Data Services](healthcare-apis-overview.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/security-controls-policy.md b/articles/healthcare-apis/security-controls-policy.md index 76ee5de364c41..c55f010630182 100644 --- a/articles/healthcare-apis/security-controls-policy.md +++ b/articles/healthcare-apis/security-controls-policy.md @@ -1,7 +1,7 @@ --- title: Azure Policy Regulatory Compliance controls for Azure Health Data Services FHIR service description: Lists Azure Policy Regulatory Compliance controls available. These built-in policy definitions provide common approaches to managing the compliance of your Azure resources. -ms.date: 05/10/2022 +ms.date: 06/06/2022 ms.topic: sample author: matjazl ms.author: chrupa @@ -24,3 +24,5 @@ page lists the **compliance domains** and **security controls** for the FHIR ser - For more information, see [Azure Policy Regulatory Compliance](../governance/policy/concepts/regulatory-compliance.md). - For more information, see built-ins on the [Azure Policy GitHub repo](https://github.com/Azure/azure-policy). + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/healthcare-apis/workspace-overview.md b/articles/healthcare-apis/workspace-overview.md index bb6a721cb2f64..0d7353c1cdc86 100644 --- a/articles/healthcare-apis/workspace-overview.md +++ b/articles/healthcare-apis/workspace-overview.md @@ -5,7 +5,7 @@ author: stevewohl ms.service: healthcare-apis ms.subservice: fhir ms.topic: overview -ms.date: 03/28/2022 +ms.date: 06/06/2022 ms.author: ginle --- @@ -76,3 +76,5 @@ To start working with Azure Health Data Services, follow the 5-minute quick star >[!div class="nextstepaction"] >[Deploy workspace in the Azure portal](healthcare-apis-quickstart.md) + +FHIR® is a registered trademark of [HL7](https://hl7.org/fhir/) and is used with the permission of HL7. diff --git a/articles/hpc-cache/access-policies.md b/articles/hpc-cache/access-policies.md index 9b50b948d723e..c9b1ec2a14a8b 100644 --- a/articles/hpc-cache/access-policies.md +++ b/articles/hpc-cache/access-policies.md @@ -1,11 +1,11 @@ --- title: Use access policies in Azure HPC Cache description: How to create and apply custom access policies to limit client access to storage targets in Azure HPC Cache -author: femila +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 03/11/2021 -ms.author: femila +ms.date: 05/19/2022 +ms.author: v-erinkelly --- # Control client access @@ -22,7 +22,7 @@ If you don't need fine-grained control over storage target access, you can use t ## Create a client access policy -Use the **Client access policies** page in the Azure portal to create and manage policies. +Use the **Client access policies** page in the Azure portal to create and manage policies. [![screenshot of client access policies page. Several policies are defined, and some are expanded to show their rules](media/policies-overview.png)](media/policies-overview.png#lightbox) @@ -76,11 +76,11 @@ Check this box to allow the specified clients to directly mount this export's su Choose whether or not to set root squash for clients that match this rule. -This setting controls how Azure HPC Cache treats requests from the root user on client machines. When root squash is enabled, root users from a client are automatically mapped to a non-privileged user when they send requests through the Azure HPC Cache. It also prevents client requests from using set-UID permission bits. +This setting controls how Azure HPC Cache treats requests from the root user on client machines. When root squash is enabled, root users from a client are automatically mapped to a non-privileged user when they send requests through the Azure HPC Cache. It also prevents client requests from using set-UID permission bits. If root squash is disabled, a request from the client root user (UID 0) is passed through to a back-end NFS storage system as root. This configuration might allow inappropriate file access. -Setting root squash for client requests can help compensate for the required ``no_root_squash`` setting on NAS systems that are used as storage targets. (Read more about [NFS storage target prerequisites](hpc-cache-prerequisites.md#nfs-storage-requirements).) It also can improve security when used with Azure Blob storage targets. +Setting root squash for client requests can provide extra security for your storage target back-end systems. This might be important if you use a NAS system that is configured with ``no_root_squash`` as a storage target. (Read more about [NFS storage target prerequisites](hpc-cache-prerequisites.md#nfs-storage-requirements).) If you turn on root squash, you must also set the anonymous ID user value. The portal accepts integer values between 0 and 4294967295. (The old values -2 and -1 are supported for backward compatibility, but not recommended for new configurations.) diff --git a/articles/hpc-cache/configuration.md b/articles/hpc-cache/configuration.md index 9f0c3bbd7b7fd..81f7642da026e 100644 --- a/articles/hpc-cache/configuration.md +++ b/articles/hpc-cache/configuration.md @@ -1,11 +1,11 @@ --- title: Configure Azure HPC Cache settings description: Explains how to configure additional settings for the cache like MTU, custom NTP and DNS configuration, and how to access the express snapshots from Azure Blob storage targets. -author: ronhogue +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 04/08/2021 -ms.author: rohogue +ms.date: 05/16/2022 +ms.author: v-erinkelly --- # Configure additional Azure HPC Cache settings @@ -18,9 +18,6 @@ To see the settings, open the cache's **Networking** page in the Azure portal. ![screenshot of networking page in Azure portal](media/networking-page.png) -> [!NOTE] -> A previous version of this page included a cache-level root squash setting, but this setting has moved to [client access policies](access-policies.md). - diff --git a/articles/hpc-cache/hpc-cache-manage.md b/articles/hpc-cache/hpc-cache-manage.md index c4ed785bb1909..4b14bc8282d61 100644 --- a/articles/hpc-cache/hpc-cache-manage.md +++ b/articles/hpc-cache/hpc-cache-manage.md @@ -1,11 +1,11 @@ --- title: Manage and update Azure HPC Cache description: How to manage and update Azure HPC Cache using the Azure portal or Azure CLI -author: ronhogue +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 01/19/2022 -ms.author: rohogue +ms.date: 06/02/2022 +ms.author: v-erinkelly --- # Manage your cache @@ -147,15 +147,36 @@ If a new software version is available, the **Upgrade** button becomes active. Y Client access is not interrupted during a software upgrade, but cache performance slows. Plan to upgrade software during non-peak usage hours or in a planned maintenance period. -The software update can take several hours. Caches configured with higher throughput take longer to upgrade than caches with smaller peak throughput values. +The software update can take several hours. Caches configured with higher throughput take longer to upgrade than caches with smaller peak throughput values. The cache status changes to **Upgrading** until the operation completes. -When a software upgrade is available, you will have a week or so to apply it manually. The end date is listed in the upgrade message. If you don't upgrade during that time, Azure automatically applies the update to your cache. The timing of the automatic upgrade is not configurable. If you are concerned about the cache performance impact, you should upgrade the software yourself before the time period expires. +When a software upgrade is available, you will have a week or so to apply it manually. The end date is listed in the upgrade message. If you don't upgrade during that time, Azure automatically applies the new software to your cache. + +You can use the Azure portal to schedule a more convenient time for the upgrade. Follow the instructions in the **Portal** tab below. If your cache is stopped when the end date passes, the cache will automatically upgrade software the next time it is started. (The update might not start immediately, but it will start in the first hour.) ### [Portal](#tab/azure-portal) -Click the **Upgrade** button to begin the software update. The cache status changes to **Upgrading** until the operation completes. +Click the **Upgrade** button to configure your software update. You have the option to upgrade the software immediately, or to schedule the upgrade for a specific date and time. + +![Screenshot of the Schedule software upgrade blade showing radio buttons with "Schedule later" selected and fields to select a new date and time.](media/upgrade-schedule.png) + +To upgrade immediately, select **Upgrade now** and click the **Save** button. + +To schedule a different upgrade time, select **Schedule later** and select a new date and time. + +* The date and time are shown in the browser's local time zone. +* You can't choose a later time than the deadline in the original message. + +When you save the custom date, the banner message will change to show the date you chose. + +If you want to revise your scheduled upgrade date, click the **Upgrade** button again. Click the **Reset date** link. This immediately removes your scheduled date. + +![Screenshot of the Schedule software upgrade blade with a custom date selected. A text link appears at the left of the date labeled "Reset date".](media/upgrade-reset-date.png) + +After you reset the previously scheduled value, the date selector resets to the latest available date and time. You can choose a new date and save it, or click **Discard** to keep the latest date. + +You can't change the schedule if there are fewer than 15 minutes remaining before the upgrade. ### [Azure CLI](#tab/azure-cli) @@ -163,11 +184,11 @@ Click the **Upgrade** button to begin the software update. The cache status chan On the Azure CLI, new software information is included at the end of the cache status report. (Use [az hpc-cache show](/cli/azure/hpc-cache#az-hpc-cache-show) to check.) Look for the string "upgradeStatus" in the message. -Use [az hpc-cache upgrade-firmware](/cli/azure/hpc-cache#az-hpc-cache-upgrade-firmware) to apply the update, if any exists. +Use [az hpc-cache upgrade-firmware](/cli/azure/hpc-cache#az-hpc-cache-upgrade-firmware) to apply the software upgrade, if any exists. If no update is available, this operation has no effect. -This example shows the cache status (no update is available) and the results of the upgrade-firmware command. +This example shows the cache status (no upgrade is available) and the results of the upgrade-firmware command. ```azurecli $ az hpc-cache show --name doc-cache0629 diff --git a/articles/hpc-cache/hpc-cache-prerequisites.md b/articles/hpc-cache/hpc-cache-prerequisites.md index bf3671fe525f7..3897ec7607d12 100644 --- a/articles/hpc-cache/hpc-cache-prerequisites.md +++ b/articles/hpc-cache/hpc-cache-prerequisites.md @@ -230,15 +230,7 @@ More information is included in [Troubleshoot NAS configuration and NFS storage * Check firewall settings to be sure that they allow traffic on all of these required ports. Be sure to check firewalls used in Azure as well as on-premises firewalls in your data center. -* Root access (read/write): The cache connects to the back-end system as user ID 0. Check these settings on your storage system: - - * Enable `no_root_squash`. This option ensures that the remote root user can access files owned by root. - - * Check export policies to make sure they don't include restrictions on root access from the cache's subnet. - - * If your storage has any exports that are subdirectories of another export, make sure the cache has root access to the lowest segment of the path. Read [Root access on directory paths](troubleshoot-nas.md#allow-root-access-on-directory-paths) in the NFS storage target troubleshooting article for details. - -* NFS back-end storage must be a compatible hardware/software platform. The storage must support NFS Version 3 (NFSv3). Contact the Azure HPC Cache team for more details. +* NFS back-end storage must be a compatible hardware/software platform. The storage must support NFS Version 3 (NFSv3). Contact the Azure HPC Cache team for details. ### NFS-mounted blob (ADLS-NFS) storage requirements diff --git a/articles/hpc-cache/hpc-cache-security-info.md b/articles/hpc-cache/hpc-cache-security-info.md index 2a0a6d6d51a81..6125144537370 100644 --- a/articles/hpc-cache/hpc-cache-security-info.md +++ b/articles/hpc-cache/hpc-cache-security-info.md @@ -16,7 +16,7 @@ This security information applies to Microsoft Azure HPC Cache. It addresses com The HPC Cache Service is only accessible through your private virtual network. Microsoft cannot access your virtual network. -Learn more about [connecting private networks](/security/benchmark/azure/baselines/hpc-cache-security-baseline.md). +Learn more about [connecting private networks](/security/benchmark/azure/baselines/hpc-cache-security-baseline). ## Network infrastructure requirements @@ -48,4 +48,4 @@ You can also optionally configure network security groups (NSGs) to control inbo ## Next steps -* Review [Azure HPC Cache security baseline](/security/benchmark/azure/baselines/hpc-cache-security-baseline.md). +* Review [Azure HPC Cache security baseline](/security/benchmark/azure/baselines/hpc-cache-security-baseline). diff --git a/articles/hpc-cache/manage-storage-targets.md b/articles/hpc-cache/manage-storage-targets.md index 56611d7e5a1b8..35a9643e9ba25 100644 --- a/articles/hpc-cache/manage-storage-targets.md +++ b/articles/hpc-cache/manage-storage-targets.md @@ -1,22 +1,26 @@ --- title: Manage Azure HPC Cache storage targets description: How to suspend, remove, force delete, and flush Azure HPC Cache storage targets, and how to understand the storage target state -author: ronhogue +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 01/26/2022 -ms.author: rohogue +ms.date: 05/29/2022 +ms.author: v-erinkelly --- # View and manage storage targets The storage targets settings page shows information about each storage target for your HPC Cache, and gives options to manage individual storage targets. +This page also has a utility for customizing the amount of cache space allocated to each individual storage target. Read [Allocate cache storage](#allocate-cache-storage) for details. + > [!TIP] > Instructions for listing storage targets using Azure CLI are included in the [Add storage targets](hpc-cache-add-storage.md#view-storage-targets) article. Other actions listed here might not yet be available in Azure CLI. ![Screenshot of the Settings > Storage targets page in the Azure portal. There are multiple storage targets in the list, and column headings show Name, Type, State, Provisioning state, Address/Container, and Usage model for each one.](media/storage-targets-list-states.png) + + ## Manage storage targets You can perform management actions on individual storage targets. These actions supplement the cache-level options discussed in [Manage your cache](hpc-cache-manage.md). @@ -153,6 +157,24 @@ The **State** value affects which management options you can use. Here's a short * **Suspended** - The storage target has been taken offline. You can still flush, delete, or force remove this storage target. Choose **Resume** to put the target back in service. * **Flushing** - The storage target is writing data to the back-end storage. The target can't process client requests while flushing, but it will automatically go back to its previous state after it finishes writing data. +## Allocate cache storage + +Optionally, you can configure the amount of cache storage that can be used by each storage target. This feature lets you plan ahead so that space is available to store a particular storage system's files. + +If you do not customize the storage allocation, each storage target receives an equal share of the available cache space. + +Click the **Allocate storage** button to customize the cache allocation. + +![Screenshot of the storage targets page in the Azure portal. The mouse pointer is over the 'Allocate storage' button.](media/allocate-storage-button.png) + +On the **Allocate storage** blade, enter the percentage of cache space you want to assign to each storage target. The storage allocations must total 100%. + +Remember that some cache space is used for overhead, so the total amount of space available for cached files is not exactly the same as the capacity you chose when you created your HPC Cache. + +![Screenshot of the 'Allocate storage' panel at the right side of the storage targets list. Text fields next to each storage target name allow you to enter a new percent value for each target. The screenshot has target 'blob01' set to 75% and target 'blob02' set to 50%. The total is calculated underneath as 125% and an error message explains that the total must be 100%. The Save button is inactive; the Discard button is active.](media/allocate-storage-blade.png) + +Click **Save** to complete the allocation. + ## Next steps * Learn about [cache-level management actions](hpc-cache-manage.md) diff --git a/articles/hpc-cache/media/allocate-storage-blade.png b/articles/hpc-cache/media/allocate-storage-blade.png new file mode 100644 index 0000000000000..c8117f44f565d Binary files /dev/null and b/articles/hpc-cache/media/allocate-storage-blade.png differ diff --git a/articles/hpc-cache/media/allocate-storage-button.png b/articles/hpc-cache/media/allocate-storage-button.png new file mode 100644 index 0000000000000..d98e557264faa Binary files /dev/null and b/articles/hpc-cache/media/allocate-storage-button.png differ diff --git a/articles/hpc-cache/media/create-priming-job.png b/articles/hpc-cache/media/create-priming-job.png index 369f53a19989d..92d73c6e09736 100644 Binary files a/articles/hpc-cache/media/create-priming-job.png and b/articles/hpc-cache/media/create-priming-job.png differ diff --git a/articles/hpc-cache/media/hpc-cache-upgrade-button.png b/articles/hpc-cache/media/hpc-cache-upgrade-button.png index 1d2d95c70e8b6..d244074ae14c6 100644 Binary files a/articles/hpc-cache/media/hpc-cache-upgrade-button.png and b/articles/hpc-cache/media/hpc-cache-upgrade-button.png differ diff --git a/articles/hpc-cache/media/prime-cache-context.png b/articles/hpc-cache/media/prime-cache-context.png new file mode 100644 index 0000000000000..412faaf76024d Binary files /dev/null and b/articles/hpc-cache/media/prime-cache-context.png differ diff --git a/articles/hpc-cache/media/prime-cache-list.png b/articles/hpc-cache/media/prime-cache-list.png deleted file mode 100644 index 8dd0899f2f73c..0000000000000 Binary files a/articles/hpc-cache/media/prime-cache-list.png and /dev/null differ diff --git a/articles/hpc-cache/media/prime-overview.png b/articles/hpc-cache/media/prime-overview.png new file mode 100644 index 0000000000000..09c9bc4073492 Binary files /dev/null and b/articles/hpc-cache/media/prime-overview.png differ diff --git a/articles/hpc-cache/media/priming-preview.png b/articles/hpc-cache/media/priming-preview.png deleted file mode 100644 index c5c740e07a223..0000000000000 Binary files a/articles/hpc-cache/media/priming-preview.png and /dev/null differ diff --git a/articles/hpc-cache/media/storage-target-manage-options.png b/articles/hpc-cache/media/storage-target-manage-options.png index d015aaf1f1a32..ee6665ef8db4a 100644 Binary files a/articles/hpc-cache/media/storage-target-manage-options.png and b/articles/hpc-cache/media/storage-target-manage-options.png differ diff --git a/articles/hpc-cache/media/storage-targets-list-states.png b/articles/hpc-cache/media/storage-targets-list-states.png index 5bf18c95008d4..49ab7263699ee 100644 Binary files a/articles/hpc-cache/media/storage-targets-list-states.png and b/articles/hpc-cache/media/storage-targets-list-states.png differ diff --git a/articles/hpc-cache/media/upgrade-reset-date.png b/articles/hpc-cache/media/upgrade-reset-date.png new file mode 100644 index 0000000000000..8614d65466fa6 Binary files /dev/null and b/articles/hpc-cache/media/upgrade-reset-date.png differ diff --git a/articles/hpc-cache/media/upgrade-schedule.png b/articles/hpc-cache/media/upgrade-schedule.png new file mode 100644 index 0000000000000..0c80c7b41c944 Binary files /dev/null and b/articles/hpc-cache/media/upgrade-schedule.png differ diff --git a/articles/hpc-cache/prime-cache.md b/articles/hpc-cache/prime-cache.md index 5a160f3d7150a..e6039380bfb64 100644 --- a/articles/hpc-cache/prime-cache.md +++ b/articles/hpc-cache/prime-cache.md @@ -1,19 +1,16 @@ --- -title: Pre-load files in Azure HPC Cache (Preview) -description: Use the cache priming feature (preview) to populate or preload cache contents before files are requested -author: ronhogue +title: Pre-load files in Azure HPC Cache +description: Use the cache priming feature to populate or preload cache contents before files are requested +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 02/03/2022 -ms.author: rohogue +ms.date: 06/01/2022 +ms.author: v-erinkelly --- -# Pre-load files in Azure HPC Cache (preview) +# Pre-load files in Azure HPC Cache -> [!IMPORTANT] -> Cache priming is currently in PREVIEW. See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. - -Azure HPC Cache’s priming feature (preview) allows customers to pre-load files in the cache. +Azure HPC Cache’s priming feature allows customers to pre-load files in the cache. You can use this feature to fetch your expected working set of files and populate the cache before work begins. This technique is sometimes called cache warming. @@ -183,16 +180,15 @@ The cache accesses the manifest file once when the priming job starts. The SAS U Use the Azure portal to create a priming job. View your Azure HPC Cache in the portal and select the **Prime cache** page under the **Settings** heading. -![screenshot of the Priming page in the portal, with several completed jobs.](media/priming-preview.png) - +![screenshot of the Priming page in the portal, with several jobs in various states.](media/prime-overview.png) -Click the **Add priming job** text at the top of the table to define a new job. +Click the **Start priming job** text at the top of the table to define a new job. In the **Job name** field, type a unique name for the priming job. Use the **Priming file** field to select your priming manifest file. Select the storage account, container, and file where your priming manifest is stored. -![screenshot of the Add priming job page, with a job name and priming file path filled in. Below the Priming file field is a link labeled "Select from existing blob location".](media/create-priming-job.png) +![screenshot of the Start priming job page, with a job name and priming file path filled in. Below the Priming file field is a link labeled "Select from existing blob location".](media/create-priming-job.png) To select the priming manifest file, click the link to select a storage target. Then select the container where your .json manifest file is stored. @@ -202,29 +198,33 @@ If you can’t find the manifest file, your cache might not be able to access th Priming jobs are listed in the **Prime cache** page in the Azure portal. -![screenshot of the priming jobs list in the portal, with jobs in various states (running, paused, and success). The cursor has clicked the ... symbol at the right side of one job's row, and a context menu shows options to pause or resume.](media/prime-cache-list.png) +This page shows each job's name, its state, its current status, and summary statistics about the priming progress. The summary in the **Details** column updates periodically as the job progresses. The **Job status** field is populated when a priming job starts; this field also gives basic error information like **Invalid manifest** if a problem occurs. + +While a job is running, the **Percentage complete** column shows an estimate of the progress. -This page shows each job's name, its state, its current status, and summary statistics about the priming progress. The summary in the **Details** column updates periodically as the job progresses. The **Status** field is populated when a priming job starts; this field also gives basic error information like **Invalid manifest** if a problem occurs. +Before a priming job starts, it has the state **Queued**. Its **Job status**, **Percentage complete**, and **Details** fields are empty. -Before a priming job starts, it has the state **Queued**. Its **Status** and **Details** fields are empty. +![screenshot of the priming jobs list in the portal, with jobs in various states (running, paused, and success). The cursor has clicked the ... symbol at the right side of one job's row, and a context menu shows options to pause or resume.](media/prime-cache-context.png) -Click the **...** section at the right of the table to pause or resume a priming job. +Click the **...** section at the right of the table to pause or resume a priming job. (It might take a few minutes for the status to update.) -To delete a priming job, select it in the list and use the delete control at the top of the table. +To delete a priming job, select it in the list and use the **Stop** control at the top of the table. You can use the **Stop** control to delete a job in any state. ## Azure REST APIs -You can use these REST API endpoints to create an HPC Cache priming job. These are part of the `2021-10-01-preview` version of the REST API, so make sure you use that string in the *api_version* term. +You can use these REST API endpoints to create and manage HPC Cache priming jobs. These are part of the `2022-05-01` version of the REST API, so make sure you use that string in the *api_version* term. -Read the [Azure REST API reference](/rest/api/azure/) to learn how to use this interface. +Read the [Azure REST API reference](/rest/api/azure/) to learn how to use these tools. -### Add a priming job +### Add a new priming job + +The `startPrimingJob` interface creates and queues a priming job. The job starts automatically when resources are available. ```rest URL: POST - https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME/addPrimingJob?api-version=2021-10-01-preview + https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME/startPrimingJob?api-version=2022-05-01 BODY: { @@ -236,14 +236,20 @@ URL: POST For the `primingManifestUrl` value, pass the file’s SAS URL or other HTTPS URL that is accessible to the cache. Read [Upload the priming manifest file](#upload-the-priming-manifest-file) to learn more. -### Remove a priming job +### Stop a priming job + +The `stopPrimingJob` interface cancels a job (if it is running) and removes it from the job list. Use this interface to delete a priming job in any state. ```rest URL: POST - https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME/removePrimingJob/MY-JOB-ID-TO-REMOVE?api-version=2021-10-01-preview + https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME/stopPrimingJob?api-version=2022-05-01 BODY: + { + "primingJobId": "MY-JOB-ID-TO-REMOVE" + } + ``` ### Get priming jobs @@ -255,12 +261,44 @@ Priming job names and IDs are returned, along with other information. ```rest URL: GET - https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME?api-version=2021-10-01-preview + https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME?api-version=2022-05-01 BODY: ``` +### Pause a priming job + +The `pausePrimingJob` interface suspends a running job. + +```rest + +URL: POST + https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME/pausePrimingJob?api-version=2022-05-01 + +BODY: + { + "primingJobId": "MY-JOB-ID-TO-PAUSE" + } + +``` + +### Resume a priming job + +Use the `resumePrimingJob` interface to reactivate a suspended priming job. + +```rest + +URL: POST + https://MY-ARM-HOST/subscriptions/MY-SUBSCRIPTION-ID/resourceGroups/MY-RESOURCE-GROUP-NAME/providers/Microsoft.StorageCache/caches/MY-CACHE-NAME/resumePrimingJob?api-version=2022-05-01 + +BODY: + { + "primingJobId": "MY-JOB-ID-TO-RESUME" + } + +``` + ## Frequently asked questions * Can I reuse a priming job? @@ -271,7 +309,7 @@ BODY: * How long does a failed or completed priming job stay in the list? - Priming jobs persist in the list until you delete them. On the portal **Prime cache** page, check the checkbox next to the job and select the **Delete** control at the top of the list. + Priming jobs persist in the list until you delete them. On the portal **Prime cache** page, check the checkbox next to the job and select the **Stop** control at the top of the list to delete the job. * What happens if the content I’m pre-loading is larger than my cache storage? @@ -279,5 +317,5 @@ BODY: ## Next steps -* For help with HPC Cache priming (preview) or to report a problem, use the standard Azure support process, described in [Get help with Azure HPC Cache](hpc-cache-support-ticket.md). +* For more help with HPC Cache priming, follow the process in [Get help with Azure HPC Cache](hpc-cache-support-ticket.md). * Learn more about [Azure REST APIs](/rest/api/azure/) diff --git a/articles/hpc-cache/troubleshoot-nas.md b/articles/hpc-cache/troubleshoot-nas.md index 56b2c64bac3ae..4059ce420f81e 100644 --- a/articles/hpc-cache/troubleshoot-nas.md +++ b/articles/hpc-cache/troubleshoot-nas.md @@ -1,18 +1,18 @@ --- title: Troubleshoot Azure HPC Cache NFS storage targets description: Tips to avoid and fix configuration errors and other problems that can cause failure when creating an NFS storage target -author: femila +author: ekpgh ms.service: hpc-cache ms.topic: troubleshooting -ms.date: 03/18/2020 -ms.author: femila +ms.date: 05/27/2022 +ms.author: v-erinkelly --- # Troubleshoot NAS configuration and NFS storage target issues This article gives solutions for some common configuration errors and other issues that could prevent Azure HPC Cache from adding an NFS storage system as a storage target. -This article includes details about how to check ports and how to enable root access to a NAS system. It also includes detailed information about less common issues that might cause NFS storage target creation to fail. +This article includes details about how to check ports and how to enable needed access to a NAS system. It also includes detailed information about less common issues that might cause NFS storage target creation to fail. > [!TIP] > Before using this guide, read [prerequisites for NFS storage targets](hpc-cache-prerequisites.md#nfs-storage-requirements). @@ -47,26 +47,42 @@ Make sure that all of the ports returned by the ``rpcinfo`` query allow unrestri Check these settings both on the NAS itself and also on any firewalls between the storage system and the cache subnet. -## Check root access +## Check root squash settings -Azure HPC Cache needs access to your storage system's exports to create the storage target. Specifically, it mounts the exports as user ID 0. +Root squash settings can disrupt file access if they are improperly configured. You should check that the settings on each storage export and on the matching HPC Cache client access policies are appropriate. -Different storage systems use different methods to enable this access: +Root squash prevents requests sent by a local superuser root on the client from being sent to a back-end storage system as root. It reassigns requests from root to a non-privileged user ID (UID) like 'nobody'. -* Linux servers generally add ``no_root_squash`` to the exported path in ``/etc/exports``. -* NetApp and EMC systems typically control access with export rules that are tied to specific IP addresses or networks. +> [!TIP] +> +> Previous versions of Azure HPC Cache required NAS storage systems to allow root access from the HPC Cache. Now, you don't need to allow root access on a storage target export unless you want HPC Cache clients to have root access to the export. + +Root squash can be configured in an HPC Cache system in these places: + +* At the Azure HPC Cache - Use [client access policies](access-policies.md#root-squash) to configure root squash for clients that match specific filter rules. A client access policy is part of each NFS storage target namespace path. + + The default client access policy does not squash root. + +* At the storage export - You can configure your storage system to reassign incoming requests from root to a non-privileged user ID (UID). + +If your storage system export squashes root, you should update the HPC Cache client access rule for that storage target to also squash root. If not, you can have access problems when you try to read or write to the back-end storage system through the HPC Cache. -If using export rules, remember that the cache can use multiple different IP addresses from the cache subnet. Allow access from the full range of possible subnet IP addresses. +This table illustrates the behavior for different root squash scenarios when a client request is sent as UID 0 (root). The scenario marked with * is ***not recommended*** because it can cause access problems. -> [!NOTE] -> Although the cache needs root access to the back-end storage system, you can restrict access for clients that connect through the cache. Read [Control client access](access-policies.md#root-squash) for details. +| Setting | UID sent from client | UID sent from HPC Cache | Effective UID on back-end storage | +|--|--|--|--| +| no root squash | 0 (root) | 0 (root) | 0 (root) | +| root squash at HPC Cache only | 0 (root) | 65534 (nobody) | 65534 (nobody) | +| *root squash at NAS storage only | 0 (root) | 0 (root) | 65534 (nobody) | +| root squash at HPC Cache and NAS | 0 (root) | 65534 (nobody) | 65534 (nobody) | -Work with your NAS storage vendor to enable the right level of access for the cache. +(UID 65534 is an example; when you turn on root squash in a client access policy you can customize the UID.) -### Allow root access on directory paths - +## Check access on directory paths + + -For NAS systems that export hierarchical directories, Azure HPC Cache needs root access to each export level. +For NAS systems that export hierarchical directories, check that Azure HPC Cache has appropriate access to each export level in the path to the files you are using. For example, a system might show three exports like these: @@ -76,7 +92,7 @@ For example, a system might show three exports like these: The export ``/ifs/accounting/payroll`` is a child of ``/ifs/accounting``, and ``/ifs/accounting`` is itself a child of ``/ifs``. -If you add the ``payroll`` export as an HPC Cache storage target, the cache actually mounts ``/ifs/`` and accesses the payroll directory from there. So Azure HPC Cache needs root access to ``/ifs`` in order to access the ``/ifs/accounting/payroll`` export. +If you add the ``payroll`` export as an HPC Cache storage target, the cache actually mounts ``/ifs/`` and accesses the payroll directory from there. So Azure HPC Cache needs sufficient access to ``/ifs`` in order to access the ``/ifs/accounting/payroll`` export. This requirement is related to the way the cache indexes files and avoids file collisions, using file handles that the storage system provides. @@ -84,7 +100,7 @@ A NAS system with hierarchical exports can give different file handles for the s The back-end storage system keeps internal aliases for file handles, but Azure HPC Cache cannot tell which file handles in its index reference the same item. So it is possible that the cache can have different writes cached for the same file, and apply the changes incorrectly because it does not know that they are the same file. -To avoid this possible file collision for files in multiple exports, Azure HPC Cache automatically mounts the shallowest available export in the path (``/ifs`` in the example) and uses the file handle given from that export. If multiple exports use the same base path, Azure HPC Cache needs root access to that path. +To avoid this possible file collision for files in multiple exports, Azure HPC Cache automatically mounts the shallowest available export in the path (``/ifs`` in the example) and uses the file handle given from that export. If multiple exports use the same base path, Azure HPC Cache needs access to that path. :::moniker-end @@ -101,7 +101,7 @@ For information about IoT Edge for Linux on Windows updates, see [EFLOW Updates] :::moniker range=">=iotedge-2020-11" >[!NOTE] ->Currently, there is not support for IoT Edge version 1.2 running on Windows devices. +>Currently, there's no support for IoT Edge version 1.2 running on Windows devices. > >To view the steps for updating IoT Edge for Linux on Windows, see [IoT Edge 1.1](?view=iotedge-2018-06&preserve-view=true&tabs=windows). @@ -113,15 +113,15 @@ For information about IoT Edge for Linux on Windows updates, see [EFLOW Updates] With IoT Edge for Windows, IoT Edge runs directly on the Windows device. -Use the `Update-IoTEdge` command to update the security daemon. The script automatically pulls the latest version of the security daemon. +Use the `Update-IoTEdge` command to update the module runtime. The script automatically pulls the latest version of the module runtime. ```powershell . {Invoke-WebRequest -useb aka.ms/iotedge-win} | Invoke-Expression; Update-IoTEdge ``` -Running the Update-IoTEdge command removes and updates the security daemon from your device, along with the two runtime container images. The config.yaml file is kept on the device, as well as data from the Moby container engine. Keeping the configuration information means that you don't have to provide the connection string or Device Provisioning Service information for your device again during the update process. +Running the `Update-IoTEdge` command removes and updates the runtime module from your device, along with the two runtime container images. The config.yaml file is kept on the device, as well as data from the Moby container engine. Keeping the configuration information means that you don't have to provide the connection string or Device Provisioning Service information for your device again during the update process. -If you want to update to a specific version of the security daemon, find the version from 1.1 release channel you want to target from [IoT Edge releases](https://github.com/Azure/azure-iotedge/releases). In that version, download the **Microsoft-Azure-IoTEdge.cab** file. Then, use the `-OfflineInstallationPath` parameter to point to the local file location. For example: +If you want to update to a specific version of the security subsystem, find the version from 1.1 release channel you want to target from [IoT Edge releases](https://github.com/Azure/azure-iotedge/releases). In that version, download the **Microsoft-Azure-IoTEdge.cab** file. Then, use the `-OfflineInstallationPath` parameter to point to the local file location. For example: ```powershell . {Invoke-WebRequest -useb aka.ms/iotedge-win} | Invoke-Expression; Update-IoTEdge -OfflineInstallationPath @@ -168,7 +168,7 @@ The IoT Edge agent and IoT Edge hub images are tagged with the IoT Edge version If you use rolling tags in your deployment (for example, mcr.microsoft.com/azureiotedge-hub:**1.1**) then you need to force the container runtime on your device to pull the latest version of the image. -Delete the local version of the image from your IoT Edge device. On Windows machines, uninstalling the security daemon also removes the runtime images, so you don't need to take this step again. +Delete the local version of the image from your IoT Edge device. On Windows machines, uninstalling the security subsystem also removes the runtime images, so you don't need to take this step again. ```bash docker rmi mcr.microsoft.com/azureiotedge-hub:1.1 @@ -215,7 +215,7 @@ Some of the key differences between 1.2 and earlier versions include: * The package name changed from **iotedge** to **aziot-edge**. * The **libiothsm-std** package is no longer used. If you used the standard package provided as part of the IoT Edge release, then your configurations can be transferred to the new version. If you used a different implementation of libiothsm-std, then any user-provided certificates like the device identity certificate, device CA, and trust bundle will need to be reconfigured. -* A new identity service, **aziot-identity-service** was introduced as part of the 1.2 release. This service handles the identity provisioning and management for IoT Edge and for other device components that need to communicate with IoT Hub, like [Device Update for IoT Hub](../iot-hub-device-update/understand-device-update.md). +* A new identity service, **[aziot-identity-service](https://azure.github.io/iot-identity-service/)** was introduced as part of the 1.2 release. This service handles the identity provisioning and management for IoT Edge and for other device components that need to communicate with IoT Hub, like [Device Update for IoT Hub](../iot-hub-device-update/understand-device-update.md). * The default config file has a new name and location. Formerly `/etc/iotedge/config.yaml`, your device configuration information is now expected to be in `/etc/aziot/config.toml` by default. The `iotedge config import` command can be used to help migrate configuration information from the old location and syntax to the new one. * The import command cannot detect or modify access rules to a device's trusted platform module (TPM). If your device uses TPM attestation, you need to manually update the /etc/udev/rules.d/tpmaccess.rules file to give access to the aziottpm service. For more information, see [Give IoT Edge access to the TPM](how-to-auto-provision-simulated-device-linux.md?view=iotedge-2020-11&preserve-view=true#give-iot-edge-access-to-the-tpm). * The workload API in version 1.2 saves encrypted secrets in a new format. If you upgrade from an older version to version 1.2, the existing master encryption key is imported. The workload API can read secrets saved in the prior format using the imported encryption key. However, the workload API can't write encrypted secrets in the old format. Once a secret is re-encrypted by a module, it is saved in the new format. Secrets encrypted in version 1.2 are unreadable by the same module in version 1.1. If you persist encrypted data to a host-mounted folder or volume, always create a backup copy of the data *before* upgrading to retain the ability to downgrade if necessary. @@ -241,7 +241,7 @@ When you're ready, follow these steps to update IoT Edge on your devices: ```bash sudo apt-get install aziot-edge defender-iot-micro-agent-edge ``` -It is recommended to install the micro agent with the Edge agent to enable security monitoring and hardening of your Edge devices. To learn more about Microsoft Defender for IoT, see [What is Microsoft Defender for IoT for device builders](../defender-for-iot/device-builders/overview.md). +It's recommended to install the micro agent with the Edge agent to enable security monitoring and hardening of your Edge devices. To learn more about Microsoft Defender for IoT, see [What is Microsoft Defender for IoT for device builders](../defender-for-iot/device-builders/overview.md). 1. Import your old config.yaml file into its new format, and apply the configuration info. @@ -264,7 +264,7 @@ The IoT Edge agent and hub modules have RC versions that are tagged with the sam As previews, release candidate versions aren't included as the latest version that the regular installers target. Instead, you need to manually target the assets for the RC version that you want to test. For the most part, installing or updating to an RC version is the same as targeting any other specific version of IoT Edge. -Use the sections in this article to learn how to update an IoT Edge device to a specific version of the security daemon or runtime modules. +Use the sections in this article to learn how to update an IoT Edge device to a specific version of the security subsystem or runtime modules. If you're installing IoT Edge, rather than upgrading an existing installation, use the steps in [Offline or specific version installation](how-to-provision-single-device-linux-symmetric.md#offline-or-specific-version-installation-optional). diff --git a/articles/iot-edge/how-to-visual-studio-develop-module.md b/articles/iot-edge/how-to-visual-studio-develop-module.md index 0d043d0648b02..9ae2c5bf501d9 100644 --- a/articles/iot-edge/how-to-visual-studio-develop-module.md +++ b/articles/iot-edge/how-to-visual-studio-develop-module.md @@ -9,46 +9,59 @@ ms.date: 08/24/2021 ms.topic: conceptual ms.service: iot-edge --- -# Use Visual Studio 2019 to develop and debug modules for Azure IoT Edge +# Use Visual Studio 2022 to develop and debug modules for Azure IoT Edge [!INCLUDE [iot-edge-version-all-supported](../../includes/iot-edge-version-all-supported.md)] -This article shows you how to use Visual Studio 2019 to develop and debug Azure IoT Edge modules. +This article shows you how to use Visual Studio 2022 to develop and debug Azure IoT Edge modules. -The Azure IoT Edge Tools for Visual Studio extension provides the following benefits: +The **Azure IoT Edge Tools for Visual Studio** extension provides the following benefits: * Create, edit, build, run, and debug IoT Edge solutions and modules on your local development computer. +* Code your Azure IoT modules in C or C# with the benefits of Visual Studio development. * Deploy your IoT Edge solution to an IoT Edge device via Azure IoT Hub. -* Code your Azure IoT modules in C or C# while having all of the benefits of Visual Studio development. -* Manage IoT Edge devices and modules with UI. +* Manage IoT Edge devices and modules with the UI. -This article shows you how to use the Azure IoT Edge Tools for Visual Studio 2019 to develop your IoT Edge modules. You also learn how to deploy your project to an IoT Edge device. Currently, Visual Studio 2019 provides support for modules written in C and C#. The supported device architectures are Windows X64 and Linux X64 or ARM32. For more information about supported operating systems, languages, and architectures, see [Language and architecture support](module-development.md#language-and-architecture-support). +Visual Studio 2022 provides support for modules written in C and C#. The supported device architectures are Windows x64 and Linux x64 or ARM32, while ARM64 is in preview. For more information about supported operating systems, languages, and architectures, see [Language and architecture support](module-development.md#language-and-architecture-support). ## Prerequisites -This article assumes that you use a machine running Windows as your development machine. On Windows computers, you can develop either Windows or Linux modules. +This article assumes that you use a machine running Windows as your development machine. -* To develop modules with **Windows containers**, use a Windows computer running version 1809/build 17763 or newer. -* To develop modules with **Linux containers**, use a Windows computer that meets the [requirements for Docker Desktop](https://docs.docker.com/docker-for-windows/install/#what-to-know-before-you-install). +* On Windows computers, you can develop either Windows or Linux modules. -Install Visual Studio on your development machine. Make sure you include the **Azure development** and **Desktop development with C++** workloads in your Visual Studio 2019 installation. You can [Modify Visual Studio 2019](/visualstudio/install/modify-visual-studio?view=vs-2019&preserve-view=true) to add the required workloads. + * To develop modules with **Windows containers**, use a Windows computer running version 1809/build 17763 or newer. + * To develop modules with **Linux containers**, use a Windows computer that meets the [requirements for Docker Desktop](https://docs.docker.com/docker-for-windows/install/#what-to-know-before-you-install). -After your Visual Studio 2019 is ready, you also need the following tools and components: +* Install Visual Studio on your development machine. Make sure you include the **Azure development** and **Desktop development with C++** workloads in your Visual Studio 2022 installation. Alternatively, you can [Modify Visual Studio 2022](/visualstudio/install/modify-visual-studio?view=vs-2022&preserve-view=true) to add the required workloads, if Visual Studio is already installed on your machine. -* Download and install [Azure IoT Edge Tools](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs16iotedgetools) from the Visual Studio marketplace to create an IoT Edge project in Visual Studio 2019. +* Install the Azure IoT Edge Tools either from the Marketplace or from Visual Studio: - > [!TIP] - > If you are using Visual Studio 2017, download and install [Azure IoT Edge Tools for VS 2017](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vsiotedgetools) from the Visual Studio marketplace + * Download and install [Azure IoT Edge Tools](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs17iotedgetools) from the Visual Studio Marketplace. + + > [!TIP] + > If you are using Visual Studio 2019, download and install [Azure IoT Edge Tools for VS 2019](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs16iotedgetools) from the Visual Studio marketplace + + * Or, in Visual Studio go to **Tools > Get Tools and Features**. The Visual Studio Installer will open. From the **Individual components** tab, select **Azure IoT Edge Tools for VS 2022**, then select **Install** in the lower right of the popup. Close the popup when finished. + + If you only need to update your tools, go to the **Manage Extensions** window, expand **Updates > Visual Studio Marketplace**, select **Azure IoT Edge Tools** then select **Update**. + + After the update is complete, select **Close** and restart Visual Studio. -* Download and install [Docker Community Edition](https://docs.docker.com/install/) on your development machine to build and run your module images. You'll need to set Docker CE to run in either Linux container mode or Windows container mode, depending on the type of modules you are developing. +* Download and install [Docker Community Edition](https://docs.docker.com/install/) on your development machine to build and run your module images. Set Docker CE to run in either Linux container mode or Windows container mode, depending on the type of modules you are developing. -* Set up your local development environment to debug, run, and test your IoT Edge solution by installing the [Azure IoT EdgeHub Dev Tool](https://pypi.org/project/iotedgehubdev/). Install [Python (3.5/3.6/3.7/3.8) and Pip](https://www.python.org/) and then install the **iotedgehubdev** package by running the following command in your terminal. Make sure your Azure IoT EdgeHub Dev Tool version is greater than 0.3.0. +* Set up your local development environment to debug, run, and test your IoT Edge solution by installing the [Azure IoT EdgeHub Dev Tool](https://pypi.org/project/iotedgehubdev/). Install [Python (3.5/3.6/3.7/3.8) and Pip](https://www.python.org/) and then install the **iotedgehubdev** package by running the following command in your terminal. ```cmd pip install --upgrade iotedgehubdev ``` + + > [!TIP] + >Make sure your Azure IoT EdgeHub Dev Tool version is greater than 0.3.0. You'll need to have a pre-existing IoT Edge device in the Azure portal and have your connection string ready during setup. -* Install the Vcpkg library manager, and then install the **azure-iot-sdk-c package** for Windows. + You may need to restart Visual Studio to complete the installation. + +* Install the **Vcpkg** library manager ```cmd git clone https://github.com/Microsoft/vcpkg @@ -56,6 +69,7 @@ After your Visual Studio 2019 is ready, you also need the following tools and co bootstrap-vcpkg.bat ``` + Install the **azure-iot-sdk-c** package for Windows ```cmd vcpkg.exe install azure-iot-sdk-c:x64-windows vcpkg.exe --triplet x64-windows integrate install @@ -66,73 +80,76 @@ After your Visual Studio 2019 is ready, you also need the following tools and co > [!TIP] > You can use a local Docker registry for prototype and testing purposes instead of a cloud registry. -* To test your module on a device, you'll need an active IoT hub with at least one IoT Edge device. To quickly create an IoT Edge device for testing, follow the steps in the quickstart for [Linux](quickstart-linux.md) or [Windows](quickstart.md). If you are running IoT Edge daemon on your development machine, you might need to stop EdgeHub and EdgeAgent before you start development in Visual Studio. - -### Check your tools version +* To test your module on a device, you'll need an active IoT Hub with at least one IoT Edge device. To create an IoT Edge device for testing you can create one in the Azure portal or with the CLI: -1. From the **Extensions** menu, select **Manage Extensions**. Expand **Installed > Tools** and you can find **Azure IoT Edge Tools for Visual Studio** and **Cloud Explorer for Visual Studio**. + * Creating one in the [Azure portal](https://portal.azure.com/) is the quickest. From the Azure portal, go to your IoT Hub resource. Select **IoT Edge** from the menu on the left and then select **Add IoT Edge Device**. -1. Note the installed version. You can compare this version with the latest version on Visual Studio Marketplace ([Cloud Explorer](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.CloudExplorerForVS2019), [Azure IoT Edge](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs16iotedgetools)) + :::image type="content" source="./media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png" alt-text="Screenshot of how to add a new I o T Edge device"::: + + A new popup called **Create a device** will appear. Add a name to your device (known as the Device ID), then select **Save** in the lower left. + + Finally, confirm that your new device exists in your IoT Hub, from the **Device management > IoT Edge** menu. For more information on creating an IoT Edge device through the Azure portal, read [Create and provision an IoT Edge device on Linux using symmetric keys](how-to-provision-single-device-linux-symmetric.md). -1. If your version is older than what's available on Visual Studio Marketplace, update your tools in Visual Studio as shown in the following section. + * To create an IoT Edge device with the CLI follow the steps in the quickstart for [Linux](quickstart-linux.md#register-an-iot-edge-device) or [Windows](quickstart.md#register-an-iot-edge-device). In the process of registering an IoT Edge device, you create an IoT Edge device. -> [!NOTE] -> If you are using Visual Studio 2022, [Cloud Explorer](/visualstudio/azure/vs-azure-tools-resources-managing-with-cloud-explorer?view=vs-2022&preserve-view=true) is retired. To deploy Azure IoT Edge modules, use [Azure CLI](how-to-deploy-modules-cli.md?view=iotedge-2020-11&preserve-view=true) or [Azure portal](how-to-deploy-modules-portal.md?view=iotedge-2020-11&preserve-view=true). - -### Update your tools - -1. In the **Manage Extensions** window, expand **Updates > Visual Studio Marketplace**, select **Azure IoT Edge Tools** or **Cloud Explorer for Visual Studio** and select **Update**. - -1. After the tools update is downloaded, close Visual Studio to trigger the tools update using the VSIX installer. - -1. In the installer, select **OK** to start and then **Modify** to update the tools. - -1. After the update is complete, select **Close** and restart Visual Studio. + If you are running the IoT Edge daemon on your development machine, you might need to stop EdgeHub and EdgeAgent before you start development in Visual Studio. ## Create an Azure IoT Edge project -The IoT Edge project template in Visual Studio creates a solution that can be deployed to IoT Edge devices. First you create an Azure IoT Edge solution, and then you generate the first module in that solution. Each IoT Edge solution can contain more than one module. +The IoT Edge project template in Visual Studio creates a solution that can be deployed to IoT Edge devices. In summary, first you'll create an Azure IoT Edge solution, and then you'll generate the first module in that solution. Each IoT Edge solution can contain more than one module. + +In all, we're going to build three projects in our solution. The main module that contains EdgeAgent and EdgeHub, in addition to the temperature sensor module, then you'll add two more IoT Edge modules. > [!TIP] -> The IoT Edge project structure created by Visual Studio is not the same as in Visual Studio Code. +> The IoT Edge project structure created by Visual Studio is not the same as the one in Visual Studio Code. 1. In Visual Studio, create a new project. -1. On the **Create a new project** page, search for **Azure IoT Edge**. Select the project that matches the platform and architecture for your IoT Edge device, and click **Next**. +1. In the **Create a new project** window, search for **Azure IoT Edge**. Select the project that matches the platform and architecture for your IoT Edge device, and click **Next**. :::image type="content" source="./media/how-to-visual-studio-develop-module/create-new-project.png" alt-text="Create New Project"::: -1. On the **Configure your new project** page, enter a name for your project and specify the location, then select **Create**. +1. In the **Configure your new project** window, enter a name for your project and specify the location, then select **Create**. -1. On the **Add Module** window, select the type of module you want to develop. You can also select **Existing module** to add an existing IoT Edge module to your deployment. Specify your module name and module image repository. +1. In the **Add Module** window, select the type of module you want to develop. You can also select **Existing module** to add an existing IoT Edge module to your deployment. Specify your module name and module image repository. - Visual Studio autopopulates the repository URL with **localhost:5000/**. If you use a local Docker registry for testing, then **localhost** is fine. If you use Azure Container Registry, then replace **localhost:5000** with the login server from your registry's settings. The login server looks like **_\_.azurecr.io**.The final result should look like **\<*registry name*\>.azurecr.io/_\_**. + Visual Studio autopopulates the repository URL with **localhost:5000/**. If you use a local Docker registry for testing, then **localhost** is fine. If you use Azure Container Registry, then replace **localhost:5000** with the login server from your registry's settings. + + The login server looks like **_\_.azurecr.io**.The final result should look like **\<*registry name*\>.azurecr.io/_\_**, for example **my-registry-name.azurecr.io/my-module-name**. Select **Add** to add your module to the project. ![Add Application and Module](./media/how-to-visual-studio-develop-csharp-module/add-module.png) + > [!NOTE] + >If you have an existing IoT Edge project, you can still change the repository URL by opening the **module.json** file. The repository URL is located in the 'repository' property of the JSON file. + Now you have an IoT Edge project and an IoT Edge module in your Visual Studio solution. -The module folder contains a file for your module code, named either `program.cs` or `main.c` depending on the language you chose. This folder also contains a file named `module.json` that describes the metadata of your module. Various Docker files provide the information needed to build your module as a Windows or Linux container. +#### Project structure + +In your solution is a main project folder and a single module folder. Both are on the project level. The main project folder contains your deployment manifest. -The project folder contains a list of all the modules included in that project. Right now it should show only one module, but you can add more. For more information about adding modules to a project, see the [Build and debug multiple modules](#build-and-debug-multiple-modules) section later in this article. +The module project folder contains a file for your module code named either `program.cs` or `main.c` depending on the language you chose. This folder also contains a file named `module.json` that describes the metadata of your module. Various Docker files included here provide the information needed to build your module as a Windows or Linux container. +#### Deployment manifest of your project -The project folder also contains a file named `deployment.template.json`. This file is a template of an IoT Edge deployment manifest, which defines all the modules that will run on a device along with how they will communicate with each other. For more information about deployment manifests, see [Learn how to deploy modules and establish routes](module-composition.md). If you open this deployment template, you see that the two runtime modules, **edgeAgent** and **edgeHub** are included, along with the custom module that you created in this Visual Studio project. A fourth module named **SimulatedTemperatureSensor** is also included. This default module generates simulated data that you can use to test your modules, or delete if it's not necessary. To see how the simulated temperature sensor works, view the [SimulatedTemperatureSensor.csproj source code](https://github.com/Azure/iotedge/tree/master/edge-modules/SimulatedTemperatureSensor). +The deployment manifest you'll edit is called `deployment.debug.template.json`. This file is a template of an IoT Edge deployment manifest, which defines all the modules that run on a device along with how they communicate with each other. For more information about deployment manifests, see [Learn how to deploy modules and establish routes](module-composition.md). + +If you open this deployment template, you see that the two runtime modules, **edgeAgent** and **edgeHub** are included, along with the custom module that you created in this Visual Studio project. A fourth module named **SimulatedTemperatureSensor** is also included. This default module generates simulated data that you can use to test your modules, or delete if it's not necessary. To see how the simulated temperature sensor works, view the [SimulatedTemperatureSensor.csproj source code](https://github.com/Azure/iotedge/tree/master/edge-modules/SimulatedTemperatureSensor). ### Set IoT Edge runtime version The IoT Edge extension defaults to the latest stable version of the IoT Edge runtime when it creates your deployment assets. Currently, the latest stable version is version 1.2. If you're developing modules for devices running the 1.1 long-term support version or the earlier 1.0 version, update the IoT Edge runtime version in Visual Studio to match. -1. In the Solution Explorer, right-click the name of your project and select **Set IoT Edge runtime version**. +1. In the Solution Explorer, right-click the name of your main project and select **Set IoT Edge runtime version**. - :::image type="content" source="./media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png" alt-text="Right-click your project name and select set IoT Edge runtime version."::: + :::image type="content" source="./media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png" alt-text="Screenshot of how to find and select the menu item named 'Set I o T Edge Runtime version'."::: -1. Use the drop-down menu to choose the runtime version that your IoT Edge devices are running, then select **OK** to save your changes. +1. Use the drop-down menu to choose the runtime version that your IoT Edge devices are running, then select **OK** to save your changes. If no change was made, select **Cancel** to exit. -1. Re-generate your deployment manifest with the new runtime version. Right-click the name of your project and select **Generate deployment for IoT Edge**. +1. If you changed the version, re-generate your deployment manifest by right-clicking the name of your project and select **Generate deployment for IoT Edge**. This will generate a deployment manifest based on your deployment template and will appear in the **config** folder of your Visual Studio project. -## Develop your module +## Module infrastructure & development options When you add a new module, it comes with default code that is ready to be built and deployed to a device so that you can start testing without touching any code. The module code is located within the module folder in a file named `Program.cs` (for C#) or `main.c` (for C). @@ -142,15 +159,21 @@ When you're ready to customize the module template with your own code, use the [ ## Set up the iotedgehubdev testing tool -The IoT edgeHub dev tool provides a local development and debug experience. The tool helps start IoT Edge modules without the IoT Edge runtime so that you can create, develop, test, run, and debug IoT Edge modules and solutions locally. You don't have to push images to a container registry and deploy them to a device for testing. +The Azure IoT EdgeHub Dev Tool provides a local development and debug experience. The tool helps start IoT Edge modules without the IoT Edge runtime so that you can create, develop, test, run, and debug IoT Edge modules and solutions locally. You don't have to push images to a container registry and deploy them to a device for testing. For more information, see [Azure IoT EdgeHub Dev Tool](https://pypi.org/project/iotedgehubdev/). -To initialize the tool, provide an IoT Edge device connection string from IoT Hub. +To initialize the tool in Visual Studio: -1. Retrieve the connection string of an IoT Edge device from the Azure portal, the Azure CLI, or the Visual Studio Cloud Explorer. +1. Retrieve the connection string of your IoT Edge device (found in your IoT Hub) from the [Azure portal](https://portal.azure.com/) or from the Azure CLI. -1. From the **Tools** menu, select **Azure IoT Edge Tools** > **Setup IoT Edge Simulator**. + If using the CLI to retrieve your connection string, use this command, replacing "**[device_id]**" and "**[hub_name]**" with your own values: + + ```Azure CLI + az iot hub device-identity connection-string show --device-id [device_id] --hub-name [hub_name] + ``` + +1. From the **Tools** menu in Visual Studio, select **Azure IoT Edge Tools** > **Setup IoT Edge Simulator**. 1. Paste the connection string and click **OK**. @@ -162,17 +185,19 @@ To initialize the tool, provide an IoT Edge device connection string from IoT Hu Typically, you'll want to test and debug each module before running it within an entire solution with multiple modules. >[!TIP] ->Make sure you have switched over to the correct Docker container mode, either Linux container mode or Windows container mode, depending on the type of IoT Edge module you are developing. From the Docker Desktop menu, you can toggle between the two types of modes. Select **Switch to Windows containers** to use Windows containers, or select **Switch to Linux containers** to use Linux containers. +>Depending on the type of IoT Edge module you are developing, you may need to enable the correct Docker container mode: either Linux or Windows. From the Docker Desktop menu, you can toggle between the two types of modes. Select **Switch to Windows containers** or select **Switch to Linux containers**. For this tutorial, we use Linux. +> +>:::image type="content" source="./media/how-to-visual-studio-develop-module/system-tray.png" alt-text="Screenshot of how to find and select the menu item named 'Switch to Windows containers'."::: -1. In **Solution Explorer**, right-click the module folder and select **Set as StartUp Project** from the menu. +1. In **Solution Explorer**, right-click the module project folder and select **Set as StartUp Project** from the menu. - ![Set Start-up Project](./media/how-to-visual-studio-develop-csharp-module/module-start-up-project.png) + :::image type="content" source="./media/how-to-visual-studio-develop-module/module-start-up-project.png" alt-text="Screenshot of how to set project as startup project."::: -1. Press **F5** or click the run button in the toolbar to run the module. It may take 10–20 seconds the first time you do so. +1. Press **F5** or click the run button in the toolbar to run the module. It may take 10–20 seconds the first time you do so. Be sure you don't have other Docker containers running that might bind the port you need for this project. - ![Run Module](./media/how-to-visual-studio-develop-csharp-module/run-module.png) + :::image type="content" source="./media/how-to-visual-studio-develop-module/run-module.png" alt-text="Screenshot of how to run a module."::: -1. You should see a .NET Core console app start if the module has been initialized successfully. +1. You should see a .NET Core console app window appear if the module has been initialized successfully. 1. Set a breakpoint to inspect the module. @@ -185,9 +210,18 @@ Typically, you'll want to test and debug each module before running it within an curl --header "Content-Type: application/json" --request POST --data '{"inputName": "input1","data":"hello world"}' http://localhost:53000/api/v1/messages ``` - ![Debug Single Module](./media/how-to-visual-studio-develop-csharp-module/debug-single-module.png) + :::image type="content" source="./media/how-to-visual-studio-develop-csharp-module/debug-single-module.png" alt-text="Screenshot of the output console, Visual Studio project, and Bash window." lightbox="./media/how-to-visual-studio-develop-csharp-module/debug-single-module.png"::: + + The breakpoint should be triggered. You can watch variables in the Visual Studio **Locals** window, found when the debugger is running. Go to Debug > Windows > Locals. - The breakpoint should be triggered. You can watch variables in the Visual Studio **Locals** window. + In your Bash or shell, you should see a `{"message":"accepted"}` confirmation. + + In your .NET console you should see: + + ```dotnetcli + IoT Hub module client initialized. + Received message: 1, Body: [hello world] + ``` > [!TIP] > You can also use [PostMan](https://www.getpostman.com/) or other API tools to send messages instead of `curl`. @@ -198,38 +232,56 @@ Typically, you'll want to test and debug each module before running it within an After you're done developing a single module, you might want to run and debug an entire solution with multiple modules. -1. In **Solution Explorer**, add a second module to the solution by right-clicking the project folder. On the menu, select **Add** > **New IoT Edge Module**. +1. In **Solution Explorer**, add a second module to the solution by right-clicking the main project folder. On the menu, select **Add** > **New IoT Edge Module**. + + :::image type="content" source="./media/how-to-visual-studio-develop-module/add-new-module.png" alt-text="Screenshot of how to add a 'New I o T Edge Module' from the menu." lightbox="./media/how-to-visual-studio-develop-module/add-new-module.png"::: - ![Add a new module to an existing IoT Edge project](./media/how-to-visual-studio-develop-csharp-module/add-new-module.png) +1. In the `Add module` window give your new module a name and replace the `localhost:5000` portion of the repository URL with your Azure Container Registry login server, like you did before. -1. Open the file `deployment.template.json` and you'll see that the new module has been added in the **modules** section. A new route was also added to the **routes** section to send messages from the new module to IoT Hub. If you want to send data from the simulated temperature sensor to the new module, add another route like the following example: +1. Open the file `deployment.debug.template.json` to see that the new module has been added in the **modules** section. A new route was also added to the **routes** section in `EdgeHub` to send messages from the new module to IoT Hub. To send data from the simulated temperature sensor to the new module, add another route with the following line of `JSON`. Replace `` (in two places) with your own module name. ```json "sensorTo": "FROM /messages/modules/SimulatedTemperatureSensor/outputs/temperatureOutput INTO BrokeredEndpoint(\"/modules//inputs/input1\")" ``` -1. Right-click the project folder and select **Set as StartUp Project** from the context menu. +1. Right-click the main project (for example, `IoTEdgeProject`) and select **Set as StartUp Project**. -1. Create your breakpoints and then press **F5** to run and debug multiple modules simultaneously. You should see multiple .NET Core console app windows, which each window representing a different module. +1. Create breakpoints in each module and then press **F5** to run and debug multiple modules simultaneously. You should see multiple .NET Core console app windows, with each window representing a different module. - ![Debug Multiple Modules](./media/how-to-visual-studio-develop-csharp-module/debug-multiple-modules.png) + :::image type="content" source="./media/how-to-visual-studio-develop-csharp-module/debug-multiple-modules.png" alt-text="Screenshot of Visual Studio with two output consoles."::: 1. Press **Ctrl + F5** or select the stop button to stop debugging. ## Build and push images -1. Make sure the IoT Edge project is the start-up project, not one of the individual modules. Select either **Debug** or **Release** as the configuration to build for your module images. +1. Make sure the main IoT Edge project is the start-up project, not one of the individual modules. Select either **Debug** or **Release** as the configuration to build for your module images. > [!NOTE] > When choosing **Debug**, Visual Studio uses `Dockerfile.(amd64|windows-amd64).debug` to build Docker images. This includes the .NET Core command-line debugger VSDBG in your container image while building it. For production-ready IoT Edge modules, we recommend that you use the **Release** configuration, which uses `Dockerfile.(amd64|windows-amd64)` without VSDBG. -1. If you're using a private registry like Azure Container Registry (ACR), use the following Docker command to sign in to it. You can get the username and password from the **Access keys** page of your registry in the Azure portal. If you're using local registry, you can [run a local registry](https://docs.docker.com/registry/deploying/#run-a-local-registry). +1. If you're using a private registry like Azure Container Registry (ACR), use the following Docker command to sign in to it. You can get the username and password from the **Access keys** page of your registry in the Azure portal. ```cmd docker login -u -p ``` -1. If you're using a private registry like Azure Container Registry, you need to add your registry login information to the runtime settings found in the file `deployment.template.json`. Replace the placeholders with your actual ACR admin username, password, and registry name. +1. Let's add the Azure Container Registry login information to the runtime settings found in the file `deployment.debug.template.json`. There are two ways to do this. You can either add your registry credentials to your `.env` file (most secure) or add them directly to your `deployment.debug.template.json` file. + + **Add credentials to your `.env` file:** + + In the Solution Explorer, click the button that will **Show All Files**. The `.env` file will appear. Add your Azure Container Registry username and password to your `.env` file. These credentials can be found on the **Access Keys** page of your Azure Container Registry in the Azure portal. + + :::image type="content" source="./media/how-to-visual-studio-develop-module/show-env-file.png" alt-text="Screenshot of button that will show all files in the Solution Explorer."::: + + ```env + DEFAULT_RT_IMAGE=1.2 + CONTAINER_REGISTRY_USERNAME_myregistry= + CONTAINER_REGISTRY_PASSWORD_myregistry= + ``` + + **Add credentials directly to `deployment.debug.template.json`:** + + If you'd rather add your credentials directly to your deployment template, replace the placeholders with your actual ACR admin username, password, and registry name. ```json "settings": { @@ -248,25 +300,87 @@ After you're done developing a single module, you might want to run and debug an >[!NOTE] >This article uses admin login credentials for Azure Container Registry, which are convenient for development and test scenarios. When you're ready for production scenarios, we recommend a least-privilege authentication option like service principals. For more information, see [Manage access to your container registry](production-checklist.md#manage-access-to-your-container-registry). -1. In **Solution Explorer**, right-click the project folder and select **Build and Push IoT Edge Modules** to build and push the Docker image for each module. +1. If you're using a local registry, you can [run a local registry](https://docs.docker.com/registry/deploying/#run-a-local-registry). + +1. Finally, in the **Solution Explorer**, right-click the main project folder and select **Build and Push IoT Edge Modules** to build and push the Docker image for each module. This might take a minute. When you see `Finished Build and Push IoT Edge Modules.` in your Output console of Visual Studio, you are done. ## Deploy the solution -In the quickstart article that you used to set up your IoT Edge device, you deployed a module by using the Azure portal. You can also deploy modules using the Cloud Explorer for Visual Studio. You already have a deployment manifest prepared for your scenario, the `deployment.json` file and all you need to do is select a device to receive the deployment. +In the quickstart article that you used to set up your IoT Edge device, you deployed a module by using the Azure portal. You can also deploy modules using the CLI in Visual Studio. You already have a deployment manifest template you've been observing throughout this tutorial. Let's generate a deployment manifest from that, then use an Azure CLI command to deploy your modules to your IoT Edge device in Azure. -1. Open **Cloud Explorer** by clicking **View** > **Cloud Explorer**. Make sure you've logged in to Visual Studio 2019. +1. Right-click on your main project in Visual Studio Solution Explorer and choose **Generate Deployment for IoT Edge**. -1. In **Cloud Explorer**, expand your subscription, find your Azure IoT Hub and the Azure IoT Edge device you want to deploy. + :::image type="content" source="./media/how-to-visual-studio-develop-module/generate-deployment.png" alt-text="Screenshot of location of the 'generate deployment' menu item."::: -1. Right-click on the IoT Edge device to create a deployment for it. Navigate to the deployment manifest configured for your platform located in the **config** folder in your Visual Studio solution, such as `deployment.arm32v7.json`. +1. Go to your local Visual Studio main project folder and look in the `config` folder. The file path might look like this: `C:\Users\\source\repos\\config`. Here you'll find the generated deployment manifest such as `deployment.amd64.debug.json`. -1. Click the refresh button to see the new modules running along with the **SimulatedTemperatureSensor** module and **$edgeAgent** and **$edgeHub**. +1. Check your `deployment.amd64.debug.json` file to confirm the `edgeHub` schema version is set to 1.2. -## View generated data + ```json + "$edgeHub": { + "properties.desired": { + "schemaVersion": "1.2", + "routes": { + "IotEdgeModule2022ToIoTHub": "FROM /messages/modules/IotEdgeModule2022/outputs/* INTO $upstream", + "sensorToIotEdgeModule2022": "FROM /messages/modules/SimulatedTemperatureSensor/outputs/temperatureOutput INTO BrokeredEndpoint(\"/modules/IotEdgeModule2022/inputs/input1\")", + "IotEdgeModule2022bToIoTHub": "FROM /messages/modules/IotEdgeModule2022b/outputs/* INTO $upstream" + }, + "storeAndForwardConfiguration": { + "timeToLiveSecs": 7200 + } + } + } + ``` + > [!TIP] + > The deployment template for Visual Studio 2022 requires the 1.2 schema version. If you need it to be 1.1 or 1.0, wait until after the deployment is generated (do not change it in `deployment.debug.template.json`). Generating a deployment will create a 1.2 schema by default. However, you can manually change `deployment.amd64.debug.json`, the generated manifest, if needed before deploying it to Azure. + + > [!IMPORTANT] + > Once your IoT Edge device is deployed, it currently won't display correctly in the Azure portal with schema version 1.2 (version 1.1 will be fine). This is a known bug and will be fixed soon. However, this won't affect your device, as it's still connected in IoT Hub and can be communicated with at any time using the Azure CLI. + > + >:::image type="content" source="./media/how-to-publish-subscribe/unsupported-1.2-schema.png" alt-text="Screenshot of Azure portal error on the I o T Edge device page."::: + +1. Now let's deploy our manifest with an Azure CLI command. Open the Visual Studio **Developer Command Prompt** and change to the **config** directory. + + ```cmd + cd config + ``` + +1. From your **config** folder, execute the following deployment command. Replace the `[device id]`, `[hub name]`, and `[file path]` with your values. -1. To monitor the D2C message for a specific IoT Edge device, select it in your IoT hub in **Cloud Explorer** and then click **Start Monitoring Built-in Event Endpoint** in the **Action** window. + ```cmd + az iot edge set-modules --device-id [device id] --hub-name [hub name] --content [file path] + ``` + + For example, your command might look like this: + + ```cmd + az iot edge set-modules --device-id my-device-name --hub-name my-iot-hub-name --content deployment.amd64.debug.json + ``` + +1. After running the command, you'll see a confirmation of deployment printed in `JSON` in your command prompt. + +### Confirm the deployment to your device + +To check that your IoT Edge modules were deployed to Azure, sign in to your device (or virtual machine), for example through SSH or Azure Bastion, and run the IoT Edge list command. + +```azurecli + iotedge list +``` + +You should see a list of your modules running on your device or virtual machine. + +```azurecli + NAME STATUS DESCRIPTION CONFIG + SimulatedTemperatureSensor running Up a day mcr.microsoft.com/azureiotedge-simulated-temperature-sensor:1.0 + edgeAgent running Up a day mcr.microsoft.com/azureiotedge-agent:1.2 + edgeHub running Up a day mcr.microsoft.com/azureiotedge-hub:1.2 + myIotEdgeModule running Up 2 hours myregistry.azurecr.io/myiotedgemodule:0.0.1-amd64.debug + myIotEdgeModule2 running Up 2 hours myregistry.azurecr.io/myiotedgemodule2:0.0.1-amd64.debug +``` + +## View generated data -1. To stop monitoring data, select **Stop Monitoring Built-in Event Endpoint** in the **Action** window. +To monitor the device-to-cloud (D2C) messages for a specific IoT Edge device, review the [Tutorial: Monitor IoT Edge devices](tutorial-monitor-with-workbooks.md) to get started. ## Next steps diff --git a/articles/iot-edge/iot-edge-limits-and-restrictions.md b/articles/iot-edge/iot-edge-limits-and-restrictions.md index 349b72d8ee5c5..a8c1e80e60bab 100644 --- a/articles/iot-edge/iot-edge-limits-and-restrictions.md +++ b/articles/iot-edge/iot-edge-limits-and-restrictions.md @@ -62,6 +62,11 @@ Supported query syntax: Not supported query syntax: * [Message routing query based on device twin](../iot-hub/iot-hub-devguide-routing-query-syntax.md#message-routing-query-based-on-device-twin) +### Restart policies + Don't use `on-unhealthy` or `on-failure` as values in modules' `restartPolicy` because they are unimplemented and won't initiate a restart. Only `never` and `always` restart policies are implemented. + +The recommended way to automatically restart unhealthy IoT Edge modules is noted in [this workaround](https://github.com/Azure/iotedge/issues/6358#issuecomment-1144022920). Configure the `Healthcheck` property in the module's `createOptions` to handle a failed health check. + ### File upload IoT Hub only supports file upload APIs for device identities, not module identities. Since IoT Edge exclusively uses modules, file upload isn't natively supported in IoT Edge. diff --git a/articles/iot-edge/media/how-to-create-alerts/change-scope.png b/articles/iot-edge/media/how-to-create-alerts/change-scope.png index 4629b3a4b01aa..97e42229c9046 100644 Binary files a/articles/iot-edge/media/how-to-create-alerts/change-scope.png and b/articles/iot-edge/media/how-to-create-alerts/change-scope.png differ diff --git a/articles/iot-edge/media/how-to-create-alerts/example-alerts.png b/articles/iot-edge/media/how-to-create-alerts/example-alerts.png index 49fd69a70d534..5147bb4976064 100644 Binary files a/articles/iot-edge/media/how-to-create-alerts/example-alerts.png and b/articles/iot-edge/media/how-to-create-alerts/example-alerts.png differ diff --git a/articles/iot-edge/media/how-to-publish-subscribe/unsupported-1.2-schema.png b/articles/iot-edge/media/how-to-publish-subscribe/unsupported-1.2-schema.png new file mode 100644 index 0000000000000..50697df9dcb21 Binary files /dev/null and b/articles/iot-edge/media/how-to-publish-subscribe/unsupported-1.2-schema.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/add-new-module.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/add-new-module.png new file mode 100644 index 0000000000000..2e20d502d7544 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/add-new-module.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png new file mode 100644 index 0000000000000..6f20762693425 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png index 0334ec57dea4b..b778c610a3e98 100644 Binary files a/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png and b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/device-created-confirm.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/device-created-confirm.png new file mode 100644 index 0000000000000..95b7e3ebdf766 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/device-created-confirm.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/generate-deployment.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/generate-deployment.png new file mode 100644 index 0000000000000..5c168f0ab0c8e Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/generate-deployment.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/get-dev-command-prompt.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/get-dev-command-prompt.png new file mode 100644 index 0000000000000..0dc5209130932 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/get-dev-command-prompt.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/module-start-up-project.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/module-start-up-project.png new file mode 100644 index 0000000000000..673d52f421939 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/module-start-up-project.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/run-module.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/run-module.png new file mode 100644 index 0000000000000..a81cdc5c19934 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/run-module.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png index a0c6636b72a20..2d889e48e7650 100644 Binary files a/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png and b/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/show-env-file.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/show-env-file.png new file mode 100644 index 0000000000000..13c754486563e Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/show-env-file.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/system-tray.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/system-tray.png new file mode 100644 index 0000000000000..d699fa32a8af5 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/system-tray.png differ diff --git a/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png b/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png index 74b2c2ed6ccd1..f64a151e2b293 100644 Binary files a/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png and b/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png differ diff --git a/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/route-details.png b/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/route-details.png index 555b2c79d03b5..dbf127ae8b1a7 100644 Binary files a/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/route-details.png and b/articles/iot-edge/media/tutorial-machine-learning-edge-02-prepare-environment/route-details.png differ diff --git a/articles/iot-edge/module-development.md b/articles/iot-edge/module-development.md index a39ce3bc1cb30..3b1c728c39e27 100644 --- a/articles/iot-edge/module-development.md +++ b/articles/iot-edge/module-development.md @@ -54,6 +54,9 @@ With the IoT Edge MQTT broker, you can publish messages on any user-defined topi With the IoT Edge MQTT broker, receiving messages is similar. First make sure that your module is authorized to subscribe to specific topics, then get a token from the workload API to use as a password when connecting to the MQTT broker, and finally subscribe to messages on the authorized topics with the MQTT client of your choice. +> [!NOTE] +> IoT Edge MQTT broker (currently in preview) will not move to general availability and will be removed from the future version of IoT Edge Hub. We appreciate the feedback we received on the preview, and we are continuing to refine our plans for an MQTT broker. In the meantime, if you need a standards-compliant MQTT broker on IoT Edge, consider deploying an open-source broker like [Mosquitto](https://mosquitto.org/) as an IoT Edge module. + ::: moniker-end ### IoT Hub primitives diff --git a/articles/iot-edge/module-edgeagent-edgehub.md b/articles/iot-edge/module-edgeagent-edgehub.md index 056cefba737bc..64cd3505b380a 100644 --- a/articles/iot-edge/module-edgeagent-edgehub.md +++ b/articles/iot-edge/module-edgeagent-edgehub.md @@ -51,7 +51,7 @@ The module twin for the IoT Edge agent is called `$edgeAgent` and coordinates th | modules.{moduleId}.version | A user-defined string representing the version of this module. | Yes | | modules.{moduleId}.type | Has to be "docker" | Yes | | modules.{moduleId}.status | {"running" \| "stopped"} | Yes | -| modules.{moduleId}.restartPolicy | {"never" \| "on-failure" \| "on-unhealthy" \| "always"} | Yes | +| modules.{moduleId}.restartPolicy | {"never" \| "always"} | Yes | | modules.{moduleId}.startupOrder | An integer value for which spot a module has in the startup order. 0 is first and max integer (4294967295) is last. If a value isn't provided, the default is max integer. | No | | modules.{moduleId}.imagePullPolicy | {"on-create" \| "never"} | No | | modules.{moduleId}.env | A list of environment variables to pass to the module. Takes the format `"": {"value": ""}` | No | diff --git a/articles/iot-edge/production-checklist.md b/articles/iot-edge/production-checklist.md index 84e91ae34fc70..f15e265625aac 100644 --- a/articles/iot-edge/production-checklist.md +++ b/articles/iot-edge/production-checklist.md @@ -57,7 +57,9 @@ Before you put any device in production you should know how you're going to mana * IoT Edge * CA certificates -For more information, see [Update the IoT Edge runtime](how-to-update-iot-edge.md). The current methods for updating IoT Edge require physical or SSH access to the IoT Edge device. If you have many devices to update, consider adding the update steps to a script or use an automation tool like Ansible. +[Device Update for IoT Hub](../iot-hub-device-update/index.yml) (Preview) is a service that enables you to deploy over-the-air updates (OTA) for your IoT Edge devices. + +Alternative methods for updating IoT Edge require physical or SSH access to the IoT Edge device. For more information, see [Update the IoT Edge runtime](how-to-update-iot-edge.md). To update multiple devices, consider adding the update steps to a script or use an automation tool like Ansible. ### Use Moby as the container engine @@ -220,6 +222,7 @@ This checklist is a starting point for firewall rules: | FQDN (\* = wildcard) | Outbound TCP Ports | Usage | | ----- | ----- | ----- | | `mcr.microsoft.com` | 443 | Microsoft Container Registry | + | `\*.data.mcr.microsoft.com` | 443 | Data endpoint providing content delivery. | | `global.azure-devices-provisioning.net` | 443 | [Device Provisioning Service](../iot-dps/about-iot-dps.md) access (optional) | | `\*.azurecr.io` | 443 | Personal and third-party container registries | | `\*.blob.core.windows.net` | 443 | Download Azure Container Registry image deltas from blob storage | diff --git a/articles/iot-edge/quickstart-linux.md b/articles/iot-edge/quickstart-linux.md index 275dfbbb375de..71f682843e09b 100644 --- a/articles/iot-edge/quickstart-linux.md +++ b/articles/iot-edge/quickstart-linux.md @@ -269,7 +269,7 @@ Follow these steps to start the **Set Modules** wizard to deploy your first modu 1. Sign in to the [Azure portal](https://portal.azure.com) and go to your IoT hub. -1. From the menu on the left, under **Automatic Device Management**, select **IoT Edge**. +1. From the menu on the left, under **Device Management**, select **IoT Edge**. 1. Select the device ID of the target device from the list of devices. diff --git a/articles/iot-edge/quickstart.md b/articles/iot-edge/quickstart.md index 61159915e7e09..d1680a6a256fa 100644 --- a/articles/iot-edge/quickstart.md +++ b/articles/iot-edge/quickstart.md @@ -206,7 +206,7 @@ Follow these steps to deploy your first module from Azure Marketplace. 1. Sign in to the [Azure portal](https://portal.azure.com) and go to your IoT hub. -1. From the menu on the left, under **Automatic Device Management**, select **IoT Edge**. +1. From the menu on the left, under **Device Management**, select **IoT Edge**. 1. Select the device ID of the target device from the list of devices. diff --git a/articles/iot-edge/reference-iot-edge-for-linux-on-windows-functions.md b/articles/iot-edge/reference-iot-edge-for-linux-on-windows-functions.md index ad453d1f7c71b..a92fe2527304d 100644 --- a/articles/iot-edge/reference-iot-edge-for-linux-on-windows-functions.md +++ b/articles/iot-edge/reference-iot-edge-for-linux-on-windows-functions.md @@ -126,6 +126,7 @@ The **Deploy-Eflow** command is the main deployment method. The deployment comma | gpuName | GPU Device name | Name of GPU device to be used for passthrough. | | gpuPassthroughType | **DirectDeviceAssignment**, **ParaVirtualization**, or none (CPU only) | GPU Passthrough type | | gpuCount | Integer value between 1 and the number of the device's GPU cores | Number of GPU devices for the VM.

                  **Note**: If using ParaVirtualization, make sure to set gpuCount = 1 | +| customSsh | None | Determines whether user wants to use their custom OpenSSH.Client installation. If present, ssh.exe must be available to the EFLOW PSM | :::moniker-end @@ -148,6 +149,7 @@ The **Deploy-Eflow** command is the main deployment method. The deployment comma | gpuName | GPU Device name | Name of GPU device to be used for passthrough. | | gpuPassthroughType | **DirectDeviceAssignment**, **ParaVirtualization**, or none (CPU only) | GPU Passthrough type | | gpuCount | Integer value between 1 and the number of the device's GPU cores | Number of GPU devices for the VM.

                  **Note**: If using ParaVirtualization, make sure to set gpuCount = 1 | +| customSsh | None | Determines whether user wants to use their custom OpenSSH.Client installation. If present, ssh.exe must be available to the EFLOW PSM | :::moniker-end diff --git a/articles/iot-edge/support.md b/articles/iot-edge/support.md index 06a2abf92b258..1e4fc3640d003 100644 --- a/articles/iot-edge/support.md +++ b/articles/iot-edge/support.md @@ -180,7 +180,7 @@ The following table lists the components included in each release starting with The following table lists the components included in each release up to the 1.1 LTS release. The components listed in this table can be installed or updated individually, and are backwards compatible with older versions. -IoT Edge 1.1 is the first long-term support (LTS) release channel. This version introduced no new features, but will receive security updates and fixes to regressions. IoT Edge 1.1 LTS uses .NET Core 3.1, and will be supported until December 3, 2022 to match the [.NET Core and .NET 5 release lifecycle](https://dotnet.microsoft.com/platform/support/policy/dotnet-core). +IoT Edge 1.1 is the first long-term support (LTS) release channel. This version introduced no new features, but will receive security updates and fixes to regressions. IoT Edge 1.1 LTS uses .NET Core 3.1, and will be supported until December 13, 2022 to match the [.NET Core and .NET 5 release lifecycle](https://dotnet.microsoft.com/platform/support/policy/dotnet-core). | Release | iotedge | edgeHub
                  edgeAgent | libiothsm | moby | |--|--|--|--|--| @@ -192,8 +192,9 @@ IoT Edge 1.1 is the first long-term support (LTS) release channel. This version | **1.0.6** | 1.0.6
                  1.0.6.1 | 1.0.6
                  1.0.6.1 | 1.0.6
                  1.0.6.1 | | | **1.0.5** | 1.0.5 | 1.0.5 | 1.0.5 | 3.0.2 | ->[!IMPORTANT] ->With the release of a long-term support channel, we recommend that all current customers running 1.0.x upgrade their devices to 1.1.x to receive ongoing support. +> [!IMPORTANT] +> * Every Microsoft product has a lifecycle. The lifecycle begins when a product is released and ends when it's no longer supported. Knowing key dates in this lifecycle helps you make informed decisions about when to upgrade or make other changes to your software. IoT Edge is governed by Microsoft's [Modern Lifecycle Policy](/lifecycle/policies/modern). +> * With the release of a long-term support channel, we recommend that all current customers running 1.0.x upgrade their devices to 1.1.x to receive ongoing support. IoT Edge uses the Microsoft.Azure.Devices.Client SDK. For more information, see the [Azure IoT C# SDK GitHub repo](https://github.com/Azure/azure-iot-sdk-csharp) or the [Azure SDK for .NET reference content](/dotnet/api/overview/azure/iot/client). The following list shows the version of the client SDK that each release is tested against: diff --git a/articles/iot-edge/tutorial-machine-learning-edge-02-prepare-environment.md b/articles/iot-edge/tutorial-machine-learning-edge-02-prepare-environment.md index dc2a0ceb3af07..7b4d4c06d2fb7 100644 --- a/articles/iot-edge/tutorial-machine-learning-edge-02-prepare-environment.md +++ b/articles/iot-edge/tutorial-machine-learning-edge-02-prepare-environment.md @@ -187,18 +187,18 @@ As part of creating the IoT hub, the script that we ran in the previous section 1. In the list of resources, select the IoT Hub that the script created. It will have a name ending with random characters such as `IotEdgeAndMlHub-jrujej6de6i7w`. -1. From the left pane menu, under **Messaging**, select **Message routing**. +1. From the left pane menu, under **Hub settings**, select **Message routing**. 1. On the **Message routing** page, select the **Custom endpoints** tab. 1. Expand the **Storage** section: - ![Verify turbofanDeviceStorage is in the custom endpoints list](media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png) + :::image type="content" source="./media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png" alt-text="Screenshot of the storage called turbofanDeviceStorage in the custom endpoints list in the I o T Hub portal." lightbox="media/tutorial-machine-learning-edge-02-prepare-environment/custom-endpoints.png"::: We see **turbofanDeviceStorage** is in the custom endpoints list. Note the following characteristics about this endpoint: * It points to the blob storage container you created named `devicedata` as indicated by **Container name**. - * Its **Filename format** has partition as the last element in the name. We find this format is more convenient for the file operations we will do with Azure Notebooks later in the tutorial. + * Its **Filename format** has the word "partition" in the name. We find this format is more convenient for the file operations we'll do with Azure Notebooks later in this tutorial. * Its **Status** should be healthy. 1. Select the **Routes** tab. @@ -207,7 +207,7 @@ As part of creating the IoT hub, the script that we ran in the previous section 1. On the **Routes details** page, note that the route's endpoint is the **turbofanDeviceStorage** endpoint. - ![Review details about the turbofanDeviceDataToStorage route](media/tutorial-machine-learning-edge-02-prepare-environment/route-details.png) + :::image type="content" source="./media/tutorial-machine-learning-edge-02-prepare-environment/route-details.png" alt-text="Screenshot that shows detail about the turbofanDeviceDataToStorage route."::: 1. Look at the **Routing query**, which is set to **true**. This setting means that all device telemetry messages will match this route; and therefore all messages will be sent to the **turbofanDeviceStorage** endpoint. diff --git a/articles/iot-edge/tutorial-machine-learning-edge-06-custom-modules.md b/articles/iot-edge/tutorial-machine-learning-edge-06-custom-modules.md index 86f92b27bb80e..38874c1afa591 100644 --- a/articles/iot-edge/tutorial-machine-learning-edge-06-custom-modules.md +++ b/articles/iot-edge/tutorial-machine-learning-edge-06-custom-modules.md @@ -588,7 +588,7 @@ With the router and classifier in place, we expect to receive regular messages c 1. In the Azure portal, navigate to your IoT Hub. -1. From the menu on the left pane, under **Messaging**, select **Message routing**. +1. From the menu on the left pane, under **Hub settings**, select **Message routing**. 1. On the **Routes** tab, select **Add**. @@ -683,7 +683,7 @@ We don't want to route the new prediction data to our old storage location, so u Configure the IoT Hub file upload feature to enable the file writer module to upload files to storage. -1. From the left pane menu in your IoT Hub, under **Messaging**, choose **File upload**. +1. From the left pane menu in your IoT Hub, under **Hub settings**, choose **File upload**. 1. Select **Azure Storage Container**. diff --git a/articles/iot-edge/version-history.md b/articles/iot-edge/version-history.md index 8f85f67579df2..7e2c7a3cf3e71 100644 --- a/articles/iot-edge/version-history.md +++ b/articles/iot-edge/version-history.md @@ -14,6 +14,8 @@ ms.service: iot-edge Azure IoT Edge is a product built from the open-source IoT Edge project hosted on GitHub. All new releases are made available in the [Azure IoT Edge project](https://github.com/Azure/azure-iotedge). Contributions and bug reports can be made on the [open-source IoT Edge project](https://github.com/Azure/iotedge). +Azure IoT Edge is governed by Microsoft's [Modern Lifecycle Policy](/lifecycle/policies/modern). + ## Documented versions The IoT Edge documentation on this site is available for two different versions of the product, so that you can choose the content that applies to your IoT Edge environment. Currently, the two supported versions are: @@ -37,7 +39,7 @@ This table provides recent version history for IoT Edge package releases, and hi | Release notes and assets | Type | Date | Highlights | | ------------------------ | ---- | ---- | ---------- | -| [1.2](https://github.com/Azure/azure-iotedge/releases/tag/1.2.0) | Stable | April 2021 | [IoT Edge devices behind gateways](how-to-connect-downstream-iot-edge-device.md?view=iotedge-2020-11&preserve-view=true)
                  [IoT Edge MQTT broker (preview)](how-to-publish-subscribe.md?view=iotedge-2020-11&preserve-view=true)
                  New IoT Edge packages introduced, with new installation and configuration steps. For more information, see [Update from 1.0 or 1.1 to 1.2](how-to-update-iot-edge.md#special-case-update-from-10-or-11-to-12).
                  Includes [Microsoft Defender for IoT micro-agent for Edge](../defender-for-iot/device-builders/overview.md). +| [1.2](https://github.com/Azure/azure-iotedge/releases/tag/1.2.0) | Stable | April 2021 | [IoT Edge devices behind gateways](how-to-connect-downstream-iot-edge-device.md?view=iotedge-2020-11&preserve-view=true)
                  [IoT Edge MQTT broker (preview)](how-to-publish-subscribe.md?view=iotedge-2020-11&preserve-view=true)
                  New IoT Edge packages introduced, with new installation and configuration steps. For more information, see [Update from 1.0 or 1.1 to 1.2](how-to-update-iot-edge.md#special-case-update-from-10-or-11-to-12).
                  Includes [Microsoft Defender for IoT micro-agent for Edge](../defender-for-iot/device-builders/overview.md).
                  Integration with Device Update. For more information, see [Update IoT Edge](how-to-update-iot-edge.md). | [1.1](https://github.com/Azure/azure-iotedge/releases/tag/1.1.0) | Long-term support (LTS) | February 2021 | [Long-term support plan and supported systems updates](support.md) | | [1.0.10](https://github.com/Azure/azure-iotedge/releases/tag/1.0.10) | Stable | October 2020 | [UploadSupportBundle direct method](how-to-retrieve-iot-edge-logs.md#upload-support-bundle-diagnostics)
                  [Upload runtime metrics](how-to-access-built-in-metrics.md)
                  [Route priority and time-to-live](module-composition.md#priority-and-time-to-live)
                  [Module startup order](module-composition.md#configure-modules)
                  [X.509 manual provisioning](how-to-provision-single-device-linux-x509.md) | | [1.0.9](https://github.com/Azure/azure-iotedge/releases/tag/1.0.9) | Stable | March 2020 | X.509 auto-provisioning with DPS
                  [RestartModule direct method](how-to-edgeagent-direct-method.md#restart-module)
                  [support-bundle command](troubleshoot.md#gather-debug-information-with-support-bundle-command) | diff --git a/articles/iot-hub-device-update/device-update-apt-manifest.md b/articles/iot-hub-device-update/device-update-apt-manifest.md index 3e0aba8922bed..9c4c21f9181fa 100644 --- a/articles/iot-hub-device-update/device-update-apt-manifest.md +++ b/articles/iot-hub-device-update/device-update-apt-manifest.md @@ -101,8 +101,8 @@ If version is omitted, the latest available version of specified package will be > APT package manager ignores versioning requirements given by a package when the dependent packages to install are being automatically resolved. Unless explicit versions of dependent packages are given they will use the latest, even though the package itself may specify a strict requirement (=) on a given version. This automatic resolution can lead to errors regarding an unmet dependency. [Learn More](https://unix.stackexchange.com/questions/350192/apt-get-not-properly-resolving-a-dependency-on-a-fixed-version-in-a-debian-ubunt) -If you're updating a specific version of the Azure IoT Edge security daemon, then you should include the desired version of the `iotedge` package and its dependent `libiothsm-std` package in your APT manifest. -[Learn More](../iot-edge/how-to-update-iot-edge.md#update-the-security-daemon) +If you're updating a specific version of the Azure IoT Edge security daemon, then you should include the desired version of the `aziot-edge` package and its dependent `aziot-identity-service` package in your APT manifest. +[Learn More](../iot-edge/how-to-update-iot-edge.md#update-the-security-subsystem) > [!NOTE] > An apt manifest can be used to update Device Update agent and its dependencies. List the device update agent name and desired version in the apt manifest, like you would for any other package. This apt manifest can then be imported and deployed through the Device Update for IoT Hub pipeline. diff --git a/articles/iot-hub-device-update/device-update-proxy-updates.md b/articles/iot-hub-device-update/device-update-proxy-updates.md index 42cccb52637da..ded1f31ba669b 100644 --- a/articles/iot-hub-device-update/device-update-proxy-updates.md +++ b/articles/iot-hub-device-update/device-update-proxy-updates.md @@ -10,7 +10,7 @@ ms.service: iot-hub-device-update # Proxy Updates and multi-component updating -Proxy Updates can support updating multiple **component(s)** on a target IoT device connected to IoT Hub. With Proxy updates, you can (1) target over-the-air updates to multiple components on the IoT device or (2) target over-the-air updates to multiple sensors connected to the IoT device. Use cases where proxy updates is applicable include: +With Proxy updates, you can (1) target over-the-air updates to multiple components on the IoT device or (2) target over-the-air updates to multiple sensors connected to the IoT device. Use cases where proxy updates is applicable include: * Targeting specific update files to different partitions on the device. * Targeting specific update files to different apps/components on the device diff --git a/articles/iot-hub-device-update/import-schema.md b/articles/iot-hub-device-update/import-schema.md index 999b6d64370fe..4360ae2fa1199 100644 --- a/articles/iot-hub-device-update/import-schema.md +++ b/articles/iot-hub-device-update/import-schema.md @@ -282,7 +282,7 @@ Update payload file, e.g. binary, firmware, script, etc. Must be unique within u |---|---|---|---| |**filename**|`string`|Update payload file name.|Yes| |**sizeInBytes**|`number`|File size in number of bytes.|Yes| -|**hashes**|`fileHashes`|Base64-encoded file hashes with algorithm name as key. At least SHA-256 algorithm must be specified, and additional algorithm may be specified if supported by agent.|Yes| +|**hashes**|`fileHashes`|Base64-encoded file hashes with algorithm name as key. At least SHA-256 algorithm must be specified, and additional algorithm may be specified if supported by agent. See below for details on how to calculate the hash. |Yes| Additional properties are not allowed. @@ -314,7 +314,7 @@ File hashes. ### fileHashes object -Base64-encoded file hashes with algorithm name as key. At least SHA-256 algorithm must be specified, and additional algorithm may be specified if supported by agent. For an example of how to calculate the hash correctly, see the [AduUpdate.psm1 script](https://github.com/Azure/iot-hub-device-update/blob/main/tools/AduCmdlets/AduUpdate.psm1). +Base64-encoded file hashes with algorithm name as key. At least SHA-256 algorithm must be specified, and additional algorithm may be specified if supported by agent. For an example of how to calculate the hash correctly, see the Get-AduFileHashes function in [AduUpdate.psm1 script](https://github.com/Azure/iot-hub-device-update/blob/main/tools/AduCmdlets/AduUpdate.psm1). **Properties** diff --git a/articles/iot-hub/TOC.yml b/articles/iot-hub/TOC.yml index f62da4de0be44..e64def6c69c59 100644 --- a/articles/iot-hub/TOC.yml +++ b/articles/iot-hub/TOC.yml @@ -21,20 +21,8 @@ href: horizontal-arm-route-messages.md - name: Tutorials items: - - name: Routing messages - items: - - name: Part 1 - Configure message routing - items: - - name: Portal - href: tutorial-routing.md - - name: Azure RM template - href: tutorial-routing-config-message-routing-RM-template.md - - name: Azure CLI - href: tutorial-routing-config-message-routing-CLI.md - - name: PowerShell - href: tutorial-routing-config-message-routing-PowerShell.md - - name: Part 2 - View message routing results - href: tutorial-routing-view-message-routing-results.md + - name: Route device messages to storage + href: tutorial-routing.md - name: Message enrichments href: tutorial-message-enrichments.md - name: Use metrics and logs diff --git a/articles/iot-hub/horizontal-arm-route-messages.md b/articles/iot-hub/horizontal-arm-route-messages.md index cacc73055c3ed..cc021aaf9040e 100644 --- a/articles/iot-hub/horizontal-arm-route-messages.md +++ b/articles/iot-hub/horizontal-arm-route-messages.md @@ -22,7 +22,19 @@ If your environment meets the prerequisites and you're familiar with using ARM t ## Prerequisites -If you don't have an Azure subscription, create a [free Azure account](https://azure.microsoft.com/free/) before you begin. +- If you don't have an Azure subscription, create a [free Azure account](https://azure.microsoft.com/free/) before you begin. + +- The sample application you run in this quickstart is written using C#. You need the .NET SDK 6.0 or greater on your development machine. + + You can download the .NET Core SDK for multiple platforms from [.NET](https://dotnet.microsoft.com/download). + + You can verify the current version of C# on your development machine using the following command: + + ```cmd/sh + dotnet --version + ``` + +- Download and unzip the [IoT C# Samples](/samples/azure-samples/azure-iot-samples-csharp/azure-iot-samples-for-csharp-net/). ## Review the template @@ -46,8 +58,6 @@ This section provides the steps to deploy the template, create a virtual device, [![Deploy To Azure](https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/1-CONTRIBUTION-GUIDE/images/deploytoazure.svg?sanitize=true)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.devices%2Fiothub-auto-route-messages%2Fazuredeploy.json) -1. Download and unzip the [IoT C# Samples](/samples/azure-samples/azure-iot-samples-csharp/azure-iot-samples-for-csharp-net/). - 1. Open a command window and go to the folder where you unzipped the IoT C# Samples. Find the folder with the arm-read-write.csproj file. You create the environment variables in this command window. Log into the [Azure portal](https://portal.azure.com) to get the keys. Select **Resource Groups** then select the resource group used for this quickstart. ![Select the resource group](./media/horizontal-arm-route-messages/01-select-resource-group.png) diff --git a/articles/iot-hub/iot-hub-devguide-device-twins.md b/articles/iot-hub/iot-hub-devguide-device-twins.md index fb25bb18d2a7d..c27179d909bfc 100644 --- a/articles/iot-hub/iot-hub-devguide-device-twins.md +++ b/articles/iot-hub/iot-hub-devguide-device-twins.md @@ -364,8 +364,6 @@ This information is kept at every level (not just the leaves of the JSON structu Tags, desired properties, and reported properties all support optimistic concurrency. If you need to guarantee order of twin property updates, consider implementing synchronization at the application level by waiting for reported properties callback before sending the next update. -Tags have an ETag, as per [RFC7232](https://tools.ietf.org/html/rfc7232), that represents the tag's JSON representation. You can use ETags in conditional update operations from the solution back end to ensure consistency. - Device twins have an ETag (`etag` property), as per [RFC7232](https://tools.ietf.org/html/rfc7232), that represents the twin's JSON representation. You can use the `etag` property in conditional update operations from the solution back end to ensure consistency. This is the only option for ensuring consistency in operations that involve the `tags` container. Device twin desired and reported properties also have a `$version` value that is guaranteed to be incremental. Similarly to an ETag, the version can be used by the updating party to enforce consistency of updates. For example, a device app for a reported property or the solution back end for a desired property. diff --git a/articles/iot-hub/iot-hub-devguide-identity-registry.md b/articles/iot-hub/iot-hub-devguide-identity-registry.md index bf44245448eb8..d2090f5107993 100644 --- a/articles/iot-hub/iot-hub-devguide-identity-registry.md +++ b/articles/iot-hub/iot-hub-devguide-identity-registry.md @@ -47,6 +47,9 @@ An IoT Hub identity registry: > [!IMPORTANT] > Only use the identity registry for device management and provisioning operations. High throughput operations at run time should not depend on performing operations in the identity registry. For example, checking the connection state of a device before sending a command is not a supported pattern. Make sure to check the [throttling rates](iot-hub-devguide-quotas-throttling.md) for the identity registry, and the [device heartbeat](iot-hub-devguide-identity-registry.md#device-heartbeat) pattern. +> [!NOTE] +> It can take a few seconds for a device or module identity to be available for retrieval after creation. Please retry `get` operation of device or module identities in case of failures. + ## Disable devices You can disable devices by updating the **status** property of an identity in the identity registry. Typically, you use this property in two scenarios: @@ -243,4 +246,4 @@ To try out some of the concepts described in this article, see the following IoT To explore using the IoT Hub Device Provisioning Service to enable zero-touch, just-in-time provisioning, see: -* [Azure IoT Hub Device Provisioning Service](https://azure.microsoft.com/documentation/services/iot-dps) \ No newline at end of file +* [Azure IoT Hub Device Provisioning Service](https://azure.microsoft.com/documentation/services/iot-dps) diff --git a/articles/iot-hub/iot-hub-devguide-messages-read-custom.md b/articles/iot-hub/iot-hub-devguide-messages-read-custom.md index 8f978f08c115f..9d1991162a0dc 100644 --- a/articles/iot-hub/iot-hub-devguide-messages-read-custom.md +++ b/articles/iot-hub/iot-hub-devguide-messages-read-custom.md @@ -34,7 +34,7 @@ When you use routing and custom endpoints, messages are only delivered to the bu > [!NOTE] > * IoT Hub only supports writing data to Azure Storage containers as blobs. > * Service Bus queues and topics with **Sessions** or **Duplicate Detection** enabled are not supported as custom endpoints. -> * In the Azure portal, you can create custom routing endpoints only to Azure resources that are in the same subscription as your IoT hub. You can create custom endpoints for resources in other subscriptions by using either the [Azure CLI](./tutorial-routing-config-message-routing-CLI.md) or [Azure Resource Manager](./tutorial-routing-config-message-routing-RM-template.md). +> * In the Azure portal, you can create custom routing endpoints only to Azure resources that are in the same subscription as your IoT hub. You can create custom endpoints for resources in other subscriptions by using either the [Azure CLI](./tutorial-routing.md) or Azure Resource Manager. For more information about creating custom endpoints in IoT Hub, see [IoT Hub endpoints](iot-hub-devguide-endpoints.md). diff --git a/articles/iot-hub/iot-hub-devguide-module-twins.md b/articles/iot-hub/iot-hub-devguide-module-twins.md index 580c815af5ab9..3f91a5ce99d2c 100644 --- a/articles/iot-hub/iot-hub-devguide-module-twins.md +++ b/articles/iot-hub/iot-hub-devguide-module-twins.md @@ -345,7 +345,7 @@ This information is kept at every level (not just the leaves of the JSON structu ## Optimistic concurrency -Tags, desired, and reported properties all support optimistic concurrency. +Tags, desired properties, and reported properties all support optimistic concurrency. If you need to guarantee order of twin property updates, consider implementing synchronization at the application level by waiting for reported properties callback before sending the next update. Module twins have an ETag (`etag` property), as per [RFC7232](https://tools.ietf.org/html/rfc7232), that represents the twin's JSON representation. You can use the `etag` property in conditional update operations from the solution back end to ensure consistency. This is the only option for ensuring consistency in operations that involve the `tags` container. diff --git a/articles/iot-hub/iot-hub-devguide-quotas-throttling.md b/articles/iot-hub/iot-hub-devguide-quotas-throttling.md index d0280be73bae8..d9d116f53fa45 100644 --- a/articles/iot-hub/iot-hub-devguide-quotas-throttling.md +++ b/articles/iot-hub/iot-hub-devguide-quotas-throttling.md @@ -7,7 +7,7 @@ ms.author: kgremban ms.service: iot-hub services: iot-hub ms.topic: conceptual -ms.date: 02/22/2022 +ms.date: 06/01/2022 ms.custom: ['Role: Cloud Development', 'Role: Operations', 'Role: Technical Support', 'contperf-fy21q4'] --- @@ -101,7 +101,7 @@ IoT Hub enforces other operational limits: | Direct method1 | Maximum direct method payload size is 128 KB. | | Automatic device and module configurations1 | 100 configurations per paid SKU hub. 10 configurations per free SKU hub. | | IoT Edge automatic deployments1 | 50 modules per deployment. 100 deployments (including layered deployments) per paid SKU hub. 10 deployments per free SKU hub. | -| Twins1 | Maximum size of desired properties and reported properties sections are 32 KB each. Maximum size of tags section is 8 KB. | +| Twins1 | Maximum size of desired properties and reported properties sections are 32 KB each. Maximum size of tags section is 8 KB. Maximum size of each individual property in every section is 4 KB. | | Shared access policies | Maximum number of shared access policies is 16. | | Restrict outbound network access | Maximum number of allowed FQDNs is 20. | | x509 CA certificates | Maximum number of x509 CA certificates that can be registered on IoT Hub is 25. | diff --git a/articles/iot-hub/iot-hub-devguide-sdks.md b/articles/iot-hub/iot-hub-devguide-sdks.md index 85d532d37c398..c38e728938219 100644 --- a/articles/iot-hub/iot-hub-devguide-sdks.md +++ b/articles/iot-hub/iot-hub-devguide-sdks.md @@ -15,19 +15,19 @@ ms.custom: [mqtt, 'Role: IoT Device', 'Role: Cloud Development'] There are two categories of software development kits (SDKs) for working with IoT Hub: -* [**IoT Hub Service SDKs**](#azure-iot-hub-service-sdks) enable you to build backend applications to manage your IoT hub, and optionally send messages, schedule jobs, invoke direct methods, or send desired property updates to your IoT devices or modules. +* [**IoT Hub service SDKs**](#azure-iot-hub-service-sdks) enable you to build backend applications to manage your IoT hub, and optionally send messages, schedule jobs, invoke direct methods, or send desired property updates to your IoT devices or modules. -* [**IoT Hub Device SDKs**](../iot-develop/about-iot-sdks.md) enable you to build apps that run on your IoT devices using device client or module client. These apps send telemetry to your IoT hub, and optionally receive messages, job, method, or twin updates from your IoT hub. You can use these SDKs to build device apps that use [Azure IoT Plug and Play](../iot-develop/overview-iot-plug-and-play.md) conventions and models to advertise their capabilities to IoT Plug and Play-enabled applications. You can also use module client to author [modules](../iot-edge/iot-edge-modules.md) for [Azure IoT Edge runtime](../iot-edge/about-iot-edge.md). +* [**IoT Hub device SDKs**](../iot-develop/about-iot-sdks.md) enable you to build apps that run on your IoT devices using device client or module client. These apps send telemetry to your IoT hub, and optionally receive messages, job, method, or twin updates from your IoT hub. You can use these SDKs to build device apps that use [Azure IoT Plug and Play](../iot-develop/overview-iot-plug-and-play.md) conventions and models to advertise their capabilities to IoT Plug and Play-enabled applications. You can also use module client to author [modules](../iot-edge/iot-edge-modules.md) for [Azure IoT Edge runtime](../iot-edge/about-iot-edge.md). In addition, we also provide a set of SDKs for working with the [Device Provisioning Service](../iot-dps/about-iot-dps.md). -* **Provisioning Device SDKs** enable you to build apps that run on your IoT devices to communicate with the Device Provisioning Service. +* **Provisioning device SDKs** enable you to build apps that run on your IoT devices to communicate with the Device Provisioning Service. -* **Provisioning Service SDKs** enable you to build backend applications to manage your enrollments in the Device Provisioning Service. +* **Provisioning service SDKs** enable you to build backend applications to manage your enrollments in the Device Provisioning Service. Learn about the [benefits of developing using Azure IoT SDKs](https://azure.microsoft.com/blog/benefits-of-using-the-azure-iot-sdks-in-your-azure-iot-solution/). -## Azure IoT Hub Service SDKs +## Azure IoT Hub service SDKs The Azure IoT service SDKs contain code to facilitate building applications that interact directly with IoT Hub to manage devices and security. @@ -38,44 +38,24 @@ The Azure IoT service SDKs contain code to facilitate building applications that | Node | [npm](https://www.npmjs.com/package/azure-iothub) | [GitHub](https://github.com/Azure/azure-iot-sdk-node) | [Samples](https://github.com/Azure/azure-iot-sdk-node/tree/main/service/samples) | [Reference](/javascript/api/azure-iothub/) | | Python | [pip](https://pypi.org/project/azure-iot-hub) | [GitHub](https://github.com/Azure/azure-iot-sdk-python) | [Samples](https://github.com/Azure/azure-iot-sdk-python/tree/main/azure-iot-hub/samples) | [Reference](/python/api/azure-iot-hub) | -## Microsoft Azure Provisioning SDKs +## Microsoft Azure provisioning SDKs -The **Microsoft Azure Provisioning SDKs** enable you to provision devices to your IoT Hub using the [Device Provisioning Service](../iot-dps/about-iot-dps.md). +The **Microsoft Azure provisioning SDKs** enable you to provision devices to your IoT Hub using the [Device Provisioning Service](../iot-dps/about-iot-dps.md). To learn more about the provisioning SDKs, see [Microsoft SDKs for Device Provisioning Service](../iot-dps/libraries-sdks.md). -| Platform | Package | Source code | Reference | -| -----|-----|-----|-----| -| .NET|[Device SDK](https://www.nuget.org/packages/Microsoft.Azure.Devices.Provisioning.Client/), [Service SDK](https://www.nuget.org/packages/Microsoft.Azure.Devices.Provisioning.Service/) |[GitHub](https://github.com/Azure/azure-iot-sdk-csharp/)|[Reference](/dotnet/api/microsoft.azure.devices.provisioning.client) | -| C|[Device SDK](https://github.com/Azure/azure-iot-sdk-c/blob/master/readme.md#packages-and-libraries)|[GitHub](https://github.com/Azure/azure-iot-sdk-c/blob/master/provisioning\_client)|[Reference](/azure/iot-hub/iot-c-sdk-ref/) | -| Java|[Maven](https://github.com/Azure/azure-iot-sdk-java/blob/main/doc/java-devbox-setup.md#for-the-service-sdk)|[GitHub](https://github.com/Azure/azure-iot-sdk-java/blob/main/provisioning)|[Reference](/java/api/com.microsoft.azure.sdk.iot.provisioning.device) | -| Node.js|[Device SDK](https://badge.fury.io/js/azure-iot-provisioning-device), [Service SDK](https://badge.fury.io/js/azure-iot-provisioning-service) |[GitHub](https://github.com/Azure/azure-iot-sdk-node/tree/main/provisioning)|[Reference](/javascript/api/overview/azure/iothubdeviceprovisioning) | -| Python|[Device SDK](https://pypi.org/project/azure-iot-device/), [Service SDK](https://pypi.org/project/azure-iothub-provisioningserviceclient/)|[GitHub](https://github.com/Azure/azure-iot-sdk-python)|[Device Reference](/python/api/azure-iot-device/azure.iot.device.provisioningdeviceclient), [Service Reference](/python/api/azure-mgmt-iothubprovisioningservices) | - -## Azure IoT Hub Device SDKs +## Azure IoT Hub device SDKs The Microsoft Azure IoT device SDKs contain code that facilitates building applications that connect to and are managed by Azure IoT Hub services. -Learn more about the IoT Hub Device SDKS in the [IoT Device Development Documentation](../iot-develop/about-iot-sdks.md). +Learn more about the IoT Hub device SDKS in the [IoT Device Development Documentation](../iot-develop/about-iot-sdks.md). ## SDK and hardware compatibility -For more information about choosing a device SDK, see [Overview of Azure IoT Device SDKs](../iot-develop/about-iot-sdks.md). - For more information about SDK compatibility with specific hardware devices, see the [Azure Certified for IoT device catalog](https://devicecatalog.azure.com/) or individual repository. [!INCLUDE [iot-hub-basic](../../includes/iot-hub-basic-partial.md)] ## Next steps -Relevant docs related to development using the Azure IoT SDKs: - -* Learn about [how to manage connectivity and reliable messaging](iot-hub-reliability-features-in-sdks.md) using the IoT Hub SDKs. -* Learn about how to [develop for mobile platforms](iot-hub-how-to-develop-for-mobile-devices.md) such as iOS and Android. -* [IoT Device Development Documentation](../iot-develop/about-iot-sdks.md) - -Other reference topics in this IoT Hub developer guide include: - -* [IoT Hub endpoints](iot-hub-devguide-endpoints.md) -* [IoT Hub query language for device twins, jobs, and message routing](iot-hub-devguide-query-language.md) -* [Quotas and throttling](iot-hub-devguide-quotas-throttling.md) -* [IoT Hub MQTT support](iot-hub-mqtt-support.md) -* [IoT Hub REST API reference](/rest/api/iothub/) +* Learn how to [manage connectivity and reliable messaging](iot-hub-reliability-features-in-sdks.md) using the IoT Hub SDKs. +* Learn how to [develop for mobile platforms](iot-hub-how-to-develop-for-mobile-devices.md) such as iOS and Android. +* Learn how to [develop without an SDK](iot-hub-devguide-no-sdk.md). diff --git a/articles/iot-hub/iot-hub-how-to-clone.md b/articles/iot-hub/iot-hub-how-to-clone.md index 0602bf5a4a9b6..c863512f5349d 100644 --- a/articles/iot-hub/iot-hub-how-to-clone.md +++ b/articles/iot-hub/iot-hub-how-to-clone.md @@ -14,18 +14,18 @@ ms.author: kgremban This article explores ways to clone an IoT Hub and provides some questions you need to answer before you start. Here are several reasons you might want to clone an IoT hub: -* You are moving your company from one region to another, such as from Europe to North America (or vice versa), and you want your resources and data to be geographically close to your new location, so you need to move your hub. +* You're moving your company from one region to another, such as from Europe to North America (or vice versa), and you want your resources and data to be geographically close to your new location, so you need to move your hub. -* You are setting up a hub for a development versus production environment. +* You're setting up a hub for a development versus production environment. * You want to do a custom implementation of multi-hub high availability. For more information, see the [How to achieve cross region HA section of IoT Hub high availability and disaster recovery](iot-hub-ha-dr.md#achieve-cross-region-ha). -* You want to increase the number of [partitions](iot-hub-scaling.md#partitions) configured for your hub. This is set when you first create your hub, and can't be changed. You can use the information in this article to clone your hub and when the clone is created, increase the number of partitions. +* You want to increase the number of [partitions](iot-hub-scaling.md#partitions) configured for your hub. This number is set when you first create your hub, and can't be changed. You can use the information in this article to clone your hub and when the clone is created, increase the number of partitions. To clone a hub, you need a subscription with administrative access to the original hub. You can put the new hub in a new resource group and region, in the same subscription as the original hub, or even in a new subscription. You just can't use the same name because the hub name has to be globally unique. > [!NOTE] -> At this time, there's no feature available for cloning an IoT hub automatically. It's primarily a manual process, and thus is fairly error-prone. The complexity of cloning a hub is directly proportional to the complexity of the hub. For example, cloning an IoT hub with no message routing is fairly simple. If you add message routing as just one complexity, cloning the hub becomes at least an order of magnitude more complicated. If you also move the resources used for routing endpoints, it's another order of magniture more complicated. +> At this time, there's no feature available for cloning an IoT hub automatically. It's primarily a manual process, and thus is fairly error-prone. The complexity of cloning a hub is directly proportional to the complexity of the hub. For example, cloning an IoT hub with no message routing is fairly simple. If you add message routing as just one complexity, cloning the hub becomes at least an order of magnitude more complicated. If you also move the resources used for routing endpoints, it's another order of magnitude more complicated. ## Things to consider @@ -33,17 +33,17 @@ There are several things to consider before cloning an IoT hub. * Make sure that all of the features available in the original location are also available in the new location. Some services are in preview, and not all features are available everywhere. -* Do not remove the original resources before creating and verifying the cloned version. Once you remove a hub, it's gone forever, and there is no way to recover it to check the settings or data to make sure the hub is replicated correctly. +* Don't remove the original resources before creating and verifying the cloned version. Once you remove a hub, it's gone forever, and there's no way to recover it to check the settings or data to make sure the hub is replicated correctly. * Many resources require globally unique names, so you must use different names for the cloned versions. You also should use a different name for the resource group to which the cloned hub belongs. -* Data for the original IoT hub is not migrated. This includes telemetry messages, cloud-to-device (C2D) commands, and job-related information such as schedules and history. Metrics and logging results are also not migrated. +* Data for the original IoT hub isn't migrated. This data includes device messages, cloud-to-device (C2D) commands, and job-related information such as schedules and history. Metrics and logging results are also not migrated. * For data or messages routed to Azure Storage, you can leave the data in the original storage account, transfer that data to a new storage account in the new region, or leave the old data in place and create a new storage account in the new location for the new data. For more information on moving data in Blob storage, see [Get started with AzCopy](../storage/common/storage-use-azcopy-v10.md). -* Data for Event Hubs and for Service Bus Topics and Queues can't be migrated. This is point-in-time data and is not stored after the messages are processed. +* Data for Event Hubs and for Service Bus Topics and Queues can't be migrated. This data is point-in-time data and isn't stored after the messages are processed. -* You need to schedule downtime for the migration. Cloning the devices to the new hub takes time. If you are using the Import/Export method, benchmark testing has revealed that it could take around two hours to move 500,000 devices, and four hours to move a million devices. +* You need to schedule downtime for the migration. Cloning the devices to the new hub takes time. If you use the Import/Export method, benchmark testing has revealed that it could take around two hours to move 500,000 devices, and four hours to move a million devices. * You can copy the devices to the new hub without shutting down or changing the devices. @@ -51,39 +51,39 @@ There are several things to consider before cloning an IoT hub. * Otherwise, you have to use the Import/Export method to move the devices, and then the devices have to be modified to use the new hub. For example, you can set up your device to consume the IoT Hub host name from the twin desired properties. The device will take that IoT Hub host name, disconnect the device from the old hub, and reconnect it to the new one. -* You need to update any certificates you are using so you can use them with the new resources. Also, you probably have the hub defined in a DNS table somewhere — you will need to update that DNS information. +* You need to update any certificates so you can use them with the new resources. Also, you probably have the hub defined in a DNS table somewhere and need to update that DNS information. ## Methodology -This is the general method we recommend for moving an IoT hub from one region to another. For message routing, this assumes the resources are not being moved to the new region. For more information, see the [section on Message Routing](#how-to-handle-message-routing). +This is the general method we recommend for moving an IoT hub from one region to another. For message routing, this assumes the resources aren't being moved to the new region. For more information, see the [section on Message Routing](#how-to-handle-message-routing). + + 1. Export the hub and its settings to a Resource Manager template. - 1. Export the hub and its settings to a Resource Manager template. - 1. Make the necessary changes to the template, such as updating all occurrences of the name and the location for the cloned hub. For any resources in the template used for message routing endpoints, update the key in the template for that resource. - - 1. Import the template into a new resource group in the new location. This creates the clone. - 1. Debug as needed. - - 1. Add anything that wasn't exported to the template. - - For example, consumer groups are not exported to the template. You need to add the consumer groups to the template manually or use the [Azure portal](https://portal.azure.com) after the hub is created. There is an example of adding one consumer group to a template in the article [Use an Azure Resource Manager template to configure IoT Hub message routing](tutorial-routing-config-message-routing-rm-template.md). - - 1. Copy the devices from the original hub to the clone. This is covered in the section [Managing the devices registered to the IoT hub](#managing-the-devices-registered-to-the-iot-hub). + 1. Import the template into a new resource group in the new location. This step creates the clone. + + 1. Debug as needed. + + 1. Add anything that wasn't exported to the template. + + For example, consumer groups aren't exported to the template. You need to add the consumer groups to the template manually or use the [Azure portal](https://portal.azure.com) after the hub is created. + + 1. Copy the devices from the original hub to the clone. This process is covered in the section [Managing the devices registered to the IoT hub](#managing-the-devices-registered-to-the-iot-hub). ## How to handle message routing -If your hub uses [custom routing](iot-hub-devguide-messages-read-custom.md), exporting the template for the hub includes the routing configuration, but it does not include the resources themselves. You must choose whether to move the routing resources to the new location or to leave them in place and continue to use them "as is". +If your hub uses [custom routing](iot-hub-devguide-messages-read-custom.md), exporting the template for the hub includes the routing configuration, but it doesn't include the resources themselves. You must choose whether to move the routing resources to the new location or to leave them in place and continue to use them "as is". For example, say you have a hub in West US that is routing messages to a storage account (also in West US), and you want to move the hub to East US. You can move the hub and have it still route messages to the storage account in West US, or you can move the hub and also move the storage account. There may be a small performance hit from routing messages to endpoint resources in a different region. -You can move a hub that uses message routing pretty easily if you do not also move the resources used for the routing endpoints. +You can move a hub that uses message routing easily if you don't also move the resources used for the routing endpoints. If the hub uses message routing, you have two choices. 1. Move the resources used for the routing endpoints to the new location. - * You must create the new resources yourself either manually in the [Azure portal](https://portal.azure.com) or through the use of Resource Manager templates. + * You must create the new resources yourself either manually in the [Azure portal](https://portal.azure.com) or by using Resource Manager templates. * You must rename all of the resources when you create them in the new location, as they have globally unique names. @@ -91,11 +91,11 @@ If the hub uses message routing, you have two choices. 1. Don't move the resources used for the routing endpoints. Use them "in place". - * In the step where you edit the template, you will need to retrieve the keys for each routing resource and put them in the template before you create the new hub. + * In the step where you edit the template, you need to retrieve the keys for each routing resource and put them in the template before you create the new hub. * The hub still references the original routing resources and routes messages to them as configured. - * You will have a small performance hit because the hub and the routing endpoint resources are not in the same location. + * You'll have a small performance hit because the hub and the routing endpoint resources aren't in the same location. ## Prepare to migrate the hub to another region @@ -119,7 +119,7 @@ This section provides specific instructions for migrating the hub. 1. Go to the Downloads folder (or to whichever folder you used when you exported the template) and find the zip file. Extract the zip file and find the file called `template.json`. Select and copy it. Go to a different folder and paste the template file (Ctrl+V). Now you can edit it. - The following example is for a generic hub with no routing configuration. It is an S1 tier hub (with 1 unit) called **ContosoHub** in region **westus**. Here is the exported template. + The following example is for a generic hub with no routing configuration. It's an S1 tier hub (with 1 unit) called **ContosoHub** in region **westus**: ``` json { @@ -217,7 +217,7 @@ You have to make some changes before you can use the template to create the new #### Edit the hub name and location -1. Remove the container name parameter section at the top. **ContosoHub** does not have an associated container. +1. Remove the container name parameter section at the top. **ContosoHub** doesn't have an associated container. ``` json "parameters": { @@ -258,9 +258,9 @@ You have to make some changes before you can use the template to create the new ``` json "location": "eastus", ``` -#### Update the keys for the routing resources that are not being moved +#### Update the keys for the routing resources that aren't being moved -When you export the Resource Manager template for a hub that has routing configured, you will see that the keys for those resources are not provided in the exported template -- their placement is denoted by asterisks. You must fill them in by going to those resources in the portal and retrieving the keys **before** you import the new hub's template and create the hub. +When you export the Resource Manager template for a hub that has routing configured, you will see that the keys for those resources aren't provided in the exported template. Their placement is denoted by asterisks. You must fill them in by going to those resources in the portal and retrieving the keys **before** you import the new hub's template and create the hub. 1. Retrieve the keys required for any of the routing resources and put them in the template. You can retrieve the key(s) from the resource in the [Azure portal](https://portal.azure.com). @@ -274,7 +274,7 @@ When you export the Resource Manager template for a hub that has routing configu 1. After you retrieve the account key for the storage account, put it in the template in the clause `AccountKey=****` in the place of the asterisks. -1. For service bus queues, get the Shared Access Key matching the SharedAccessKeyName. Here is the key and the `SharedAccessKeyName` in the json: +1. For service bus queues, get the Shared Access Key matching the SharedAccessKeyName. Here's the key and the `SharedAccessKeyName` in the json: ```json "connectionString": "Endpoint=sb://fabrikamsbnamespace1234.servicebus.windows.net:5671/; @@ -283,7 +283,7 @@ When you export the Resource Manager template for a hub that has routing configu EntityPath=fabrikamsbqueue1234", ``` -1. The same applies for the Service Bus Topics and Event Hub connections. +1. The same applies for the Service Bus Topics and Event Hubs connections. #### Create the new routing resources in the new location @@ -307,7 +307,7 @@ If you want to move the routing resources, you must manually set up the resource Now you have a template that will create a new hub that looks almost exactly like the old hub, depending on how you decided to handle the routing. -## Move -- create the new hub in the new region by loading the template +## Create the new hub in the new region by loading the template Create the new hub in the new location using the template. If you have routing resources that are going to move, the resources should be set up in the new location and the references in the template updated to match. If you are not moving the routing resources, they should be in the template with the updated keys. @@ -355,9 +355,9 @@ Create the new hub in the new location using the template. If you have routing r Now that you have your clone up and running, you need to copy all of the devices from the original hub to the clone. -There are multiple ways to accomplish this. You either originally used [Device Provisioning Service (DPS)](../iot-dps/about-iot-dps.md)to provision the devices, or you didn't. If you did, this is not difficult. If you did not, this can be very complicated. +There are multiple ways to copy the devices. You either originally used [Device Provisioning Service (DPS)](../iot-dps/about-iot-dps.md) to provision the devices, or you didn't. If you did, this process isn't difficult. If you didn't, this process can be complicated. -If you did not use DPS to provision your devices, you can skip the next section and start with [Using Import/Export to move the devices to the new hub](#using-import-export-to-move-the-devices-to-the-new-hub). +If you didn't use DPS to provision your devices, you can skip the next section and start with [Using Import/Export to move the devices to the new hub](#using-import-export-to-move-the-devices-to-the-new-hub). ## Using DPS to re-provision the devices in the new hub @@ -395,15 +395,15 @@ The application targets .NET Core, so you can run it on either Windows or Linux. Here are the five options you specify when you run the application. We'll put these on the command line in a minute. -* **addDevices** (argument 1) -- set this to true if you want to add virtual devices that are generated for you. These are added to the source hub. Also, set **numToAdd** (argument 2) to specify how many devices you want to add. The maximum number of devices you can register to a hub is one million.The purpose of this option is for testing -- you can generate a specific number of devices, and then copy them to another hub. +* **addDevices** (argument 1) -- set this to true if you want to add virtual devices that are generated for you. These are added to the source hub. Also, set **numToAdd** (argument 2) to specify how many devices you want to add. The maximum number of devices you can register to a hub is one million. The purpose of this option is for testing. You can generate a specific number of devices, and then copy them to another hub. * **copyDevices** (argument 3) -- set this to true to copy the devices from one hub to another. -* **deleteSourceDevices** (argument 4) -- set this to true to delete all of the devices registered to the source hub. We recommending waiting until you are certain all of the devices have been transferred before you run this. Once you delete the devices, you can't get them back. +* **deleteSourceDevices** (argument 4) -- set this to true to delete all of the devices registered to the source hub. We recommend waiting until you are certain all of the devices have been transferred before you run this. Once you delete the devices, you can't get them back. * **deleteDestDevices** (argument 5) -- set this to true to delete all of the devices registered to the destination hub (the clone). You might want to do this if you want to copy the devices more than once. -The basic command will be *dotnet run* -- this tells .NET to build the local csproj file and then run it. You add your command-line arguments to the end before you run it. +The basic command is *dotnet run*, which tells .NET to build the local csproj file and then run it. You add your command-line arguments to the end before you run it. Your command-line will look like these examples: @@ -425,17 +425,17 @@ Your command-line will look like these examples: 1. To get the connection string values, sign in to the [Azure portal](https://portal.azure.com). -1. Put the connection strings somewhere you can retrieve them, such as NotePad. If you copy the following, you can paste the connection strings in directly where they go. Don't add spaces around the equal sign, or it changes the variable name. Also, you do not need double-quotes around the connection strings. If you put quotes around the storage account connection string, it won't work. +1. Put the connection strings somewhere you can retrieve them, such as NotePad. If you copy the following, you can paste the connection strings in directly where they go. Don't add spaces around the equal sign, or it changes the variable name. Also, you don't need double-quotes around the connection strings. If you put quotes around the storage account connection string, it won't work. - For Windows, this is how you set the environment variables: + Set the environment variables in Windows: ``` console SET IOTHUB_CONN_STRING= SET DEST_IOTHUB_CONN_STRING= SET STORAGE_ACCT_CONN_STRING= ``` - - For Linux, this is how you define the environment variables: + + Set the environment variables in Linux: ``` console export IOTHUB_CONN_STRING="" @@ -517,11 +517,11 @@ Now you have the environment variables in a file with the SET commands, and you You can view the devices in the [Azure portal](https://portal.azure.com) and verify they are in the new location. -1. Go to the new hub using the [Azure portal](https://portal.azure.com). Select your hub, then select **IoT Devices**. You see the devices you just copied from the old hub to the cloned hub. You can also view the properties for the cloned hub. +1. Go to the new hub using the [Azure portal](https://portal.azure.com). Select your hub, then select **IoT Devices**. You see the devices you copied from the old hub to the cloned hub. You can also view the properties for the cloned hub. 1. Check for import/export errors by going to the Azure storage account in the [Azure portal](https://portal.azure.com) and looking in the `devicefiles` container for the `ImportErrors.log`. If this file is empty (the size is 0), there were no errors. If you try to import the same device more than once, it rejects the device the second time and adds an error message to the log file. -### Committing the changes +### Commit the changes At this point, you have copied your hub to the new location and migrated the devices to the new clone. Now you need to make changes so the devices work with the cloned hub. @@ -553,7 +553,7 @@ If you have implemented routing, test and make sure your messages are routed to ## Clean-up -Don't clean up until you are really certain the new hub is up and running and the devices are working correctly. Also be sure to test the routing if you are using that feature. When you're ready, clean up the old resources by performing these steps: +Don't clean up until you are certain the new hub is up and running and the devices are working correctly. Also be sure to test the routing if you are using that feature. When you're ready, clean up the old resources by performing these steps: * If you haven't already, delete the old hub. This removes all of the active devices from the hub. @@ -563,7 +563,7 @@ Don't clean up until you are really certain the new hub is up and running and th You have cloned an IoT hub into a new hub in a new region, complete with the devices. For more information about performing bulk operations against the identity registry in an IoT Hub, see [Import and export IoT Hub device identities in bulk](iot-hub-bulk-identity-mgmt.md). -For more information about IoT Hub and development for the hub, please see the following articles. +For more information about IoT Hub and development for the hub, see the following articles: * [IoT Hub developer's guide](iot-hub-devguide.md) @@ -571,4 +571,4 @@ For more information about IoT Hub and development for the hub, please see the f * [IoT Hub device management overview](iot-hub-device-management-overview.md) -* If you want to deploy the sample application, please see [.NET Core application deployment](/dotnet/core/deploying/index). \ No newline at end of file +If you want to deploy the sample application, see [.NET Core application deployment](/dotnet/core/deploying/index). diff --git a/articles/iot-hub/iot-hub-rm-rest.md b/articles/iot-hub/iot-hub-rm-rest.md index a642df7b7c8c7..0bd92718224e1 100644 --- a/articles/iot-hub/iot-hub-rm-rest.md +++ b/articles/iot-hub/iot-hub-rm-rest.md @@ -39,7 +39,7 @@ To complete this tutorial, you need the following: 4. In NuGet Package Manager, search for **Microsoft.IdentityModel.Clients.ActiveDirectory**. Click **Install**, in **Review Changes** click **OK**, then click **I Accept** to accept the license. > [!IMPORTANT] - > The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. + > The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../active-directory/develop/msal-migration.md) for more details. 5. In Program.cs, replace the existing **using** statements with the following code: diff --git a/articles/iot-hub/iot-hub-rm-template.md b/articles/iot-hub/iot-hub-rm-template.md index aab3a6e9bbfff..1860a0b34406f 100644 --- a/articles/iot-hub/iot-hub-rm-template.md +++ b/articles/iot-hub/iot-hub-rm-template.md @@ -42,7 +42,7 @@ To complete this tutorial, you need the following: 4. In NuGet Package Manager, search for **Microsoft.IdentityModel.Clients.ActiveDirectory**. Click **Install**, in **Review Changes** click **OK**, then click **I Accept** to accept the license. > [!IMPORTANT] - > The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. + > The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../active-directory/develop/msal-migration.md) for more details. 5. In Program.cs, replace the existing **using** statements with the following code: diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-button.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-button.png index a3dee31dd44c9..0616f30a928ee 100644 Binary files a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-button.png and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-button.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-device-twin-show-update.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-device-twin-show-update.png new file mode 100644 index 0000000000000..e2b51b6fdbb6b Binary files /dev/null and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-device-twin-show-update.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-device-twin-update.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-device-twin-update.png new file mode 100644 index 0000000000000..966793fb30540 Binary files /dev/null and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-device-twin-update.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-environment.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-environment.png index 2ef06339e1601..f867d963f2439 100644 Binary files a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-environment.png and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-environment.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-method-payload.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-method-payload.png new file mode 100644 index 0000000000000..4f1e7db3fb6d4 Binary files /dev/null and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-method-payload.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-monitor.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-monitor.png index 850707bc4115c..d1ce3ff90217f 100644 Binary files a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-monitor.png and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-monitor.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-new-session.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-new-session.png index 40183eeb48047..d06e023998379 100644 Binary files a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-new-session.png and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-new-session.png differ diff --git a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-receive-message.png b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-receive-message.png index ec695a814ee1f..8f32c549723d6 100644 Binary files a/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-receive-message.png and b/articles/iot-hub/media/quickstart-send-telemetry-cli/cloud-shell-receive-message.png differ diff --git a/articles/iot-hub/media/tutorial-message-enrichments/select-storage-explorer.png b/articles/iot-hub/media/tutorial-message-enrichments/select-storage-explorer.png deleted file mode 100644 index 9d5cdf39d0ec7..0000000000000 Binary files a/articles/iot-hub/media/tutorial-message-enrichments/select-storage-explorer.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-message-enrichments/show-blob-containers.png b/articles/iot-hub/media/tutorial-message-enrichments/show-blob-containers.png index b27e7305d24fe..b125bcdfe658a 100644 Binary files a/articles/iot-hub/media/tutorial-message-enrichments/show-blob-containers.png and b/articles/iot-hub/media/tutorial-message-enrichments/show-blob-containers.png differ diff --git a/articles/iot-hub/media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_files.png b/articles/iot-hub/media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_files.png deleted file mode 100644 index 4b91a8e037ad8..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_files.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_results.png b/articles/iot-hub/media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_results.png deleted file mode 100644 index 3a940cdd5e4f5..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_results.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-chart-temp-humidity.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-chart-temp-humidity.png deleted file mode 100644 index 284e1d48266f8..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-chart-temp-humidity.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-personal-workspace.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-personal-workspace.png deleted file mode 100644 index d4b488779cb9b..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-personal-workspace.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-temperature-chart.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-temperature-chart.png deleted file mode 100644 index 142ee40b4ea56..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/bi-temperature-chart.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/create-logic-app.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/create-logic-app.png deleted file mode 100644 index 6f6531d317f42..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/create-logic-app.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-connection.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-connection.png deleted file mode 100644 index dd7e7adfe29ba..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-connection.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-connectors.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-connectors.png deleted file mode 100644 index da6a7836514e3..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-connectors.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-define-connection.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-define-connection.png deleted file mode 100644 index d65d65d5f978b..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-define-connection.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-finish-connection.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-finish-connection.png deleted file mode 100644 index dcf7526810b33..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-finish-connection.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-queue-options.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-queue-options.png deleted file mode 100644 index 91e91577168e3..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-queue-options.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-select-outlook.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-select-outlook.png deleted file mode 100644 index 93f277e491a57..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-select-outlook.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-send-email.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-send-email.png deleted file mode 100644 index 9d7e8bf2bcca1..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-send-email.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-triggers.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-triggers.png deleted file mode 100644 index be97d38c77877..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/logic-app-triggers.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/power-bi-report.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/power-bi-report.png deleted file mode 100644 index 50f90cea7ef94..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/power-bi-report.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/power-bi-visualizations-and-fields.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/power-bi-visualizations-and-fields.png deleted file mode 100644 index 5c61560352b03..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/power-bi-visualizations-and-fields.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/results-in-email.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/results-in-email.png deleted file mode 100644 index 3a221e6c320e0..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/results-in-email.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/results-in-storage.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/results-in-storage.png deleted file mode 100644 index df4e68eaaa8d6..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/results-in-storage.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-create-job.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-create-job.png deleted file mode 100644 index 67e41708f1646..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-create-job.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-inputs.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-inputs.png deleted file mode 100644 index 46470f1b2422a..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-inputs.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-outputs.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-outputs.png deleted file mode 100644 index 9c65a6eb9b9be..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-outputs.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-query.png b/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-query.png deleted file mode 100644 index 992bbff9d70cf..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing-view-message-routing-results/stream-analytics-job-query.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/01-add-a-route-to-storage.png b/articles/iot-hub/media/tutorial-routing/01-add-a-route-to-storage.png deleted file mode 100644 index 4f8cd5fde0608..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/01-add-a-route-to-storage.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/02-add-a-storage-endpoint.png b/articles/iot-hub/media/tutorial-routing/02-add-a-storage-endpoint.png deleted file mode 100644 index bb8f105c81199..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/02-add-a-storage-endpoint.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/03-select-a-storage-container.png b/articles/iot-hub/media/tutorial-routing/03-select-a-storage-container.png deleted file mode 100644 index ba16dc2e357a4..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/03-select-a-storage-container.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/04-save-storage-route.png b/articles/iot-hub/media/tutorial-routing/04-save-storage-route.png deleted file mode 100644 index bc3c5cec1754a..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/04-save-storage-route.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/05-setup-sbq-endpoint.png b/articles/iot-hub/media/tutorial-routing/05-setup-sbq-endpoint.png deleted file mode 100644 index 4c4d6d671ccfd..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/05-setup-sbq-endpoint.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/06-save-sbq-endpoint.png b/articles/iot-hub/media/tutorial-routing/06-save-sbq-endpoint.png deleted file mode 100644 index 1e91f7ab33820..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/06-save-sbq-endpoint.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/07-save-servicebusqueue-route.png b/articles/iot-hub/media/tutorial-routing/07-save-servicebusqueue-route.png deleted file mode 100644 index 6ffd7476ce506..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/07-save-servicebusqueue-route.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/08-show-both-routes.png b/articles/iot-hub/media/tutorial-routing/08-show-both-routes.png deleted file mode 100644 index 9e335478a4a4d..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/08-show-both-routes.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/09-show-custom-endpoints.png b/articles/iot-hub/media/tutorial-routing/09-show-custom-endpoints.png deleted file mode 100644 index 3f3a3476c2560..0000000000000 Binary files a/articles/iot-hub/media/tutorial-routing/09-show-custom-endpoints.png and /dev/null differ diff --git a/articles/iot-hub/media/tutorial-routing/add-device.png b/articles/iot-hub/media/tutorial-routing/add-device.png new file mode 100644 index 0000000000000..c4eeee9b689a1 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/add-device.png differ diff --git a/articles/iot-hub/media/tutorial-routing/add-route.png b/articles/iot-hub/media/tutorial-routing/add-route.png new file mode 100644 index 0000000000000..ed3ca2a4a0fc7 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/add-route.png differ diff --git a/articles/iot-hub/media/tutorial-routing/add-storage-endpoint.png b/articles/iot-hub/media/tutorial-routing/add-storage-endpoint.png new file mode 100644 index 0000000000000..34df00a39a243 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/add-storage-endpoint.png differ diff --git a/articles/iot-hub/media/tutorial-routing/copy-device-key.png b/articles/iot-hub/media/tutorial-routing/copy-device-key.png new file mode 100644 index 0000000000000..8f4e6cbcb2ce1 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/copy-device-key.png differ diff --git a/articles/iot-hub/media/tutorial-routing/copy-iothubowner-connection-string.png b/articles/iot-hub/media/tutorial-routing/copy-iothubowner-connection-string.png new file mode 100644 index 0000000000000..0b8dc279c3747 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/copy-iothubowner-connection-string.png differ diff --git a/articles/iot-hub/media/tutorial-routing/create-storage-account.png b/articles/iot-hub/media/tutorial-routing/create-storage-account.png new file mode 100644 index 0000000000000..00052cf48a645 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/create-storage-account.png differ diff --git a/articles/iot-hub/media/tutorial-routing/create-storage-container.png b/articles/iot-hub/media/tutorial-routing/create-storage-container.png new file mode 100644 index 0000000000000..ee17a0062fad8 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/create-storage-container.png differ diff --git a/articles/iot-hub/media/tutorial-routing/create-storage-endpoint.png b/articles/iot-hub/media/tutorial-routing/create-storage-endpoint.png new file mode 100644 index 0000000000000..bc1b3f3b12827 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/create-storage-endpoint.png differ diff --git a/articles/iot-hub/media/tutorial-routing/create-storage-route.png b/articles/iot-hub/media/tutorial-routing/create-storage-route.png new file mode 100644 index 0000000000000..917bbd24e6064 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/create-storage-route.png differ diff --git a/articles/iot-hub/media/tutorial-routing/iot-explorer-add-connection.png b/articles/iot-hub/media/tutorial-routing/iot-explorer-add-connection.png new file mode 100644 index 0000000000000..9c27ca68d174f Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/iot-explorer-add-connection.png differ diff --git a/articles/iot-hub/media/tutorial-routing/iot-explorer-start-monitoring-telemetry.png b/articles/iot-hub/media/tutorial-routing/iot-explorer-start-monitoring-telemetry.png new file mode 100644 index 0000000000000..fcd3f88c9159e Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/iot-explorer-start-monitoring-telemetry.png differ diff --git a/articles/iot-hub/media/tutorial-routing/iot-explorer-view-messages.png b/articles/iot-hub/media/tutorial-routing/iot-explorer-view-messages.png new file mode 100644 index 0000000000000..8d17d7417e35e Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/iot-explorer-view-messages.png differ diff --git a/articles/iot-hub/media/tutorial-routing/iothubowner-access-policy.png b/articles/iot-hub/media/tutorial-routing/iothubowner-access-policy.png new file mode 100644 index 0000000000000..28d5e3c1ced03 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/iothubowner-access-policy.png differ diff --git a/articles/iot-hub/media/tutorial-routing/view-messages-in-storage.png b/articles/iot-hub/media/tutorial-routing/view-messages-in-storage.png new file mode 100644 index 0000000000000..1189ff55eac71 Binary files /dev/null and b/articles/iot-hub/media/tutorial-routing/view-messages-in-storage.png differ diff --git a/articles/iot-hub/quickstart-send-telemetry-cli.md b/articles/iot-hub/quickstart-send-telemetry-cli.md index 8364ea09c2dc1..94b9001ea50ba 100644 --- a/articles/iot-hub/quickstart-send-telemetry-cli.md +++ b/articles/iot-hub/quickstart-send-telemetry-cli.md @@ -1,22 +1,22 @@ --- title: Quickstart - Send telemetry to Azure IoT Hub (CLI) quickstart -description: This quickstart shows developers new to IoT Hub how to get started by using the Azure CLI to create an IoT hub, send telemetry, and view messages between a device and the hub. +description: This quickstart shows developers new to IoT Hub how to get started by using the Azure CLI to create an IoT hub, send telemetry, and view messages between a device and the hub. ms.service: iot-hub ms.topic: quickstart ms.custom: [iot-send-telemetry-cli, iot-p0-scenario, "Role: Cloud Development", devx-track-azurecli, mode-api] ms.author: timlt author: timlt -ms.date: 03/24/2022 +ms.date: 05/26/2022 --- # Quickstart: Send telemetry from a device to an IoT hub and monitor it with the Azure CLI -IoT Hub is an Azure service that enables you to ingest high volumes of telemetry from your IoT devices into the cloud for storage or processing. In this quickstart, you use the Azure CLI to create an IoT Hub and a simulated device, send device telemetry to the hub, and send a cloud-to-device message. You also use the Azure portal to visualize device metrics. This is a basic workflow for developers who use the CLI to interact with an IoT Hub application. +IoT Hub is an Azure service that enables you to ingest high volumes of telemetry from your IoT devices into the cloud for storage or processing. In this codeless quickstart, you use the Azure CLI to create an IoT Hub and a simulated device. You'll send device telemetry to the hub, and send messages, call methods, and update properties on the device. You'll also use the Azure portal to visualize device metrics. This article shows a basic workflow for developers who use the CLI to interact with an IoT Hub application. ## Prerequisites - If you don't have an Azure subscription, [create one for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -- Azure CLI. You can run all commands in this quickstart using the Azure Cloud Shell, an interactive CLI shell that runs in your browser. If you use the Cloud Shell, you don't need to install anything. If you prefer to use the CLI locally, this quickstart requires Azure CLI version 2.0.76 or later. Run `az --version` to find the version. To install or upgrade, see [Install Azure CLI](/cli/azure/install-azure-cli). +- Azure CLI. You can run all commands in this quickstart using the Azure Cloud Shell, an interactive CLI shell that runs in your browser or in an app such as Windows Terminal. If you use the Cloud Shell, you don't need to install anything. If you prefer to use the CLI locally, this quickstart requires Azure CLI version 2.36 or later. Run `az --version` to find the version. To install or upgrade, see [Install Azure CLI](/cli/azure/install-azure-cli). ## Sign in to the Azure portal @@ -37,17 +37,20 @@ To launch the Cloud Shell: > [!NOTE] > If this is the first time you've used the Cloud Shell, it prompts you to create storage, which is required to use the Cloud Shell. Select a subscription to create a storage account and Microsoft Azure Files share. -2. Select your preferred CLI environment in the **Select environment** dropdown. This quickstart uses the **Bash** environment. All the following CLI commands work in the PowerShell environment too. - +2. Select your preferred CLI environment in the **Select environment** dropdown. This quickstart uses the **Bash** environment. All the following CLI commands work in PowerShell too. ![Select CLI environment](media/quickstart-send-telemetry-cli/cloud-shell-environment.png) ## Prepare two CLI sessions -In this section, you prepare two Azure CLI sessions. If you're using the Cloud Shell, you will run the two sessions in separate browser tabs. If using a local CLI client, you will run two separate CLI instances. You'll use the first session as a simulated device, and the second session to monitor and send messages. To run a command, select **Copy** to copy a block of code in this quickstart, paste it into your shell session, and run it. +Next, you prepare two Azure CLI sessions. If you're using the Cloud Shell, you'll run these sessions in separate Cloud Shell tabs. If using a local CLI client, you'll run separate CLI instances. Use the separate CLI sessions for the following tasks: +- The first session simulates an IoT device that communicates with your IoT hub. +- The second session either monitors the device in the first session, or sends messages, commands, and property updates. + +To run a command, select **Copy** to copy a block of code in this quickstart, paste it into your shell session, and run it. -Azure CLI requires you to be logged into your Azure account. All communication between your Azure CLI shell session and your IoT hub is authenticated and encrypted. As a result, this quickstart does not need additional authentication that you'd use with a real device, such as a connection string. +Azure CLI requires you to be logged into your Azure account. All communication between your Azure CLI shell session and your IoT hub is authenticated and encrypted. As a result, this quickstart doesn't need extra authentication that you'd use with a real device, such as a connection string. -- Run the [az extension add](/cli/azure/extension#az-extension-add) command to add the Microsoft Azure IoT Extension for Azure CLI to your CLI shell. The IOT Extension adds IoT Hub, IoT Edge, and IoT Device Provisioning Service (DPS) specific commands to Azure CLI. +- In the first CLI session, run the [az extension add](/cli/azure/extension#az-extension-add) command. The command adds the Microsoft Azure IoT Extension for Azure CLI to your CLI shell. The IOT Extension adds IoT Hub, IoT Edge, and IoT Device Provisioning Service (DPS) specific commands to Azure CLI. ```azurecli az extension add --name azure-iot @@ -57,7 +60,7 @@ Azure CLI requires you to be logged into your Azure account. All communication b [!INCLUDE [iot-hub-cli-version-info](../../includes/iot-hub-cli-version-info.md)] -- Open a second CLI session. If you're using the Cloud Shell, select **Open new session**. If you're using the CLI locally, open a second instance. +- Open the second CLI session. If you're using the Cloud Shell in a browser, use the **Open new session** button. If using the CLI locally, open a second CLI instance. >[!div class="mx-imgBorder"] >![Open new Cloud Shell session](media/quickstart-send-telemetry-cli/cloud-shell-new-session.png) @@ -69,13 +72,13 @@ In this section, you use the Azure CLI to create a resource group and an IoT hub > [!TIP] > Optionally, you can create an Azure resource group, an IoT hub, and other resources by using the [Azure portal](iot-hub-create-through-portal.md), [Visual Studio Code](iot-hub-create-use-iot-toolkit.md), or other programmatic methods. -1. Run the [az group create](/cli/azure/group#az-group-create) command to create a resource group. The following command creates a resource group named *MyResourceGroup* in the *eastus* location. +1. In the first CLI session, run the [az group create](/cli/azure/group#az-group-create) command to create a resource group. The following command creates a resource group named *MyResourceGroup* in the *eastus* location. ```azurecli az group create --name MyResourceGroup --location eastus ``` -1. Run the [az iot hub create](/cli/azure/iot/hub#az-iot-hub-create) command to create an IoT hub. It might take a few minutes to create an IoT hub. +1. In the first CLI session, run the [Az PowerShell module iot hub create](/cli/azure/iot/hub#az-iot-hub-create) command to create an IoT hub. It takes a few minutes to create an IoT hub. *YourIotHubName*. Replace this placeholder and the surrounding braces in the following command, using the name you chose for your IoT hub. An IoT hub name must be globally unique in Azure. Use your IoT hub name in the rest of this quickstart wherever you see the placeholder. @@ -85,21 +88,21 @@ In this section, you use the Azure CLI to create a resource group and an IoT hub ## Create and monitor a device -In this section, you create a simulated device in the first CLI session. The simulated device sends device telemetry to your IoT hub. In the second CLI session, you monitor events and telemetry, and send a cloud-to-device message to the simulated device. +In this section, you create a simulated device in the first CLI session. The simulated device sends device telemetry to your IoT hub. In the second CLI session, you monitor events and telemetry. To create and start a simulated device: -1. Run the [az iot hub device-identity create](/cli/azure/iot/hub/device-identity#az-iot-hub-device-identity-create) command in the first CLI session. This creates the simulated device identity. +1. In the first CLI session, run the [az iot hub device-identity create](/cli/azure/iot/hub/device-identity#az-iot-hub-device-identity-create) command. This command creates the simulated device identity. *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. *simDevice*. You can use this name directly for the simulated device in the rest of this quickstart. Optionally, use a different name. ```azurecli - az iot hub device-identity create --device-id simDevice --hub-name {YourIoTHubName} + az iot hub device-identity create -d simDevice -n {YourIoTHubName} ``` -1. Run the [az iot device simulate](/cli/azure/iot/device#az-iot-device-simulate) command in the first CLI session. This starts the simulated device. The device sends telemetry to your IoT hub and receives messages from it. +1. In the first CLI session, run the [az iot device simulate](/cli/azure/iot/device#az-iot-device-simulate) command. This command starts the simulated device. The device sends telemetry to your IoT hub and receives messages from it. *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. @@ -109,23 +112,23 @@ To create and start a simulated device: To monitor a device: -1. In the second CLI session, run the [az iot hub monitor-events](/cli/azure/iot/hub#az-iot-hub-monitor-events) command. This starts monitoring the simulated device. The output shows telemetry that the simulated device sends to the IoT hub. +1. In the second CLI session, run the [az iot hub monitor-events](/cli/azure/iot/hub#az-iot-hub-monitor-events) command. This command continuously monitors the simulated device. The output shows telemetry such as events and property state changes that the simulated device sends to the IoT hub. *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. - + ```azurecli - az iot hub monitor-events --output table --hub-name {YourIoTHubName} + az iot hub monitor-events --output table -p all -n {YourIoTHubName} ``` + + :::image type="content" source="media/quickstart-send-telemetry-cli/cloud-shell-monitor.png" alt-text="Screenshot of monitoring events on a simulated device."::: - ![Cloud Shell monitor events](media/quickstart-send-telemetry-cli/cloud-shell-monitor.png) - -1. After you monitor the simulated device in the second CLI session, press Ctrl+C to stop monitoring. +1. After you monitor the simulated device in the second CLI session, press Ctrl+C to stop monitoring. Keep the second CLI session open to use in later steps. ## Use the CLI to send a message -In this section, you use the second CLI session to send a message to the simulated device. +In this section, you send a message to the simulated device. -1. In the first CLI session, confirm that the simulated device is running. If the device has stopped, run the following command to start it: +1. In the first CLI session, confirm that the simulated device is still running. If the device stopped, run the following command to restart it: *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. @@ -133,7 +136,7 @@ In this section, you use the second CLI session to send a message to the simulat az iot device simulate -d simDevice -n {YourIoTHubName} ``` -1. In the second CLI session, run the [az iot device c2d-message send](/cli/azure/iot/device/c2d-message#az-iot-device-c2d-message-send) command. This sends a cloud-to-device message from your IoT hub to the simulated device. The message includes a string and two key-value pairs. +1. In the second CLI session, run the [az iot device c2d-message send](/cli/azure/iot/device/c2d-message#az-iot-device-c2d-message-send) command. This command sends a cloud-to-device message from your IoT hub to the simulated device. The message includes a string and two key-value pairs. *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. @@ -145,9 +148,53 @@ In this section, you use the second CLI session to send a message to the simulat 1. In the first CLI session, confirm that the simulated device received the message. - ![Cloud Shell cloud-to-device message](media/quickstart-send-telemetry-cli/cloud-shell-receive-message.png) + :::image type="content" source="media/quickstart-send-telemetry-cli/cloud-shell-receive-message.png" alt-text="Screenshot of a simulated device receiving a message."::: + + +## Use the CLI to call a device method + +In this section, you call a direct method on the simulated device. + +1. As you did before, confirm that the simulated device in the first CLI session is running. If not, restart it. + +1. In the second CLI session, run the [az iot hub invoke-device-method](/cli/azure/iot/hub#az-iot-hub-invoke-device-method) command. In this example, there's no preexisting method for the device. The command calls an example method name on the simulated device and returns a payload. + + *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. + + ```azurecli + az iot hub invoke-device-method --mn MySampleMethod -d simDevice -n {YourIoTHubName} + ``` +1. In the first CLI session, confirm the output shows the method call. + + :::image type="content" source="media/quickstart-send-telemetry-cli/cloud-shell-method-payload.png" alt-text="Screenshot of a simulated device displaying output after a method was invoked."::: + +## Use the CLI to update device properties + +In this section, you update the state of the simulated device by setting property values. + +1. As you did before, confirm that the simulated device in the first CLI session is running. If not, restart it. + +1. In the second CLI session, run the [az iot hub device-twin update](/cli/azure/iot/hub/device-twin#az-iot-hub-device-twin-update) command. This command updates the properties to the desired state on the IoT hub device twin that corresponds to your simulated device. In this case, the command sets example temperature condition properties. + + *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. + + ```azurecli + az iot hub device-twin update -d simDevice --desired '{"conditions":{"temperature":{"warning":98, "critical":107}}}' -n {YourIoTHubName} + ``` + +1. In the first CLI session, confirm that the simulated device outputs the property update. + + :::image type="content" source="media/quickstart-send-telemetry-cli/cloud-shell-device-twin-update.png" alt-text="Screenshot that shows how to update properties on a device."::: + +1. In the second CLI session, run the [az iot hub device-twin show](/cli/azure/iot/hub/device-twin#az-iot-hub-device-twin-show) command. This command reports changes to the device properties. + + *YourIotHubName*. Replace this placeholder below with the name you chose for your IoT hub. + + ```azurecli + az iot hub device-twin show -d simDevice --query properties.reported -n {YourIoTHubName} + ``` -1. After you view the message, close the second CLI session. Keep the first CLI session open. You use it to clean up resources in a later step. + :::image type="content" source="media/quickstart-send-telemetry-cli/cloud-shell-device-twin-show-update.png" alt-text="Screenshot that shows the updated properties on a device twin."::: ## View messaging metrics in the portal @@ -155,7 +202,7 @@ The Azure portal enables you to manage all aspects of your IoT hub and devices. To visualize messaging metrics in the Azure portal: -1. In the left navigation menu on the portal, select **All Resources**. This lists all resources in your subscription, including the IoT hub you created. +1. In the left navigation menu on the portal, select **All Resources**. This tab lists all resources in your subscription, including the IoT hub you created. 1. Select the link on the IoT hub you created. The portal displays the overview page for the hub. @@ -186,7 +233,7 @@ If you continue to the next recommended article, you can keep the resources you' To delete a resource group by name: -1. Run the [az group delete](/cli/azure/group#az-group-delete) command. This removes the resource group, the IoT Hub, and the device registration you created. +1. Run the [az group delete](/cli/azure/group#az-group-delete) command. This command removes the resource group, the IoT Hub, and the device registration you created. ```azurecli az group delete --name MyResourceGroup @@ -200,9 +247,9 @@ To delete a resource group by name: ## Next steps -In this quickstart, you used the Azure CLI to create an IoT hub, create a simulated device, send telemetry, monitor telemetry, send a cloud-to-device message, and clean up resources. You used the Azure portal to visualize messaging metrics on your device. +In this quickstart, you used the Azure CLI to create an IoT hub, create a simulated device, send and monitor telemetry, call a method, set desired properties, and clean up resources. You used the Azure portal to visualize messaging metrics on your device. -If you are a device developer, the suggested next step is to see the telemetry quickstart that uses the Azure IoT Device SDK for C. Optionally, see one of the available Azure IoT Hub telemetry quickstart articles in your preferred language or SDK. +If you're a device developer, the suggested next step is to see the telemetry quickstart that uses the Azure IoT Device SDK for C. Optionally, see one of the available Azure IoT Hub telemetry quickstart articles in your preferred language or SDK. To learn how to control your simulated device from a back-end application, continue to the next quickstart. diff --git a/articles/iot-hub/tutorial-message-enrichments.md b/articles/iot-hub/tutorial-message-enrichments.md index 72133ff93e961..0594ea8021aee 100644 --- a/articles/iot-hub/tutorial-message-enrichments.md +++ b/articles/iot-hub/tutorial-message-enrichments.md @@ -5,66 +5,65 @@ author: kgremban ms.service: iot-hub services: iot-hub ms.topic: tutorial -ms.date: 03/16/2022 +ms.date: 06/08/2022 ms.author: kgremban ms.custom: "mqtt, devx-track-azurecli, devx-track-csharp" # Customer intent: As a customer using Azure IoT Hub, I want to add information to the messages that come through my IoT hub and are sent to another endpoint. For example, I'd like to pass the IoT hub name to the application that reads the messages from the final endpoint, such as Azure Storage. --- # Tutorial: Use Azure IoT Hub message enrichments -*Message enrichments* describes the ability of Azure IoT Hub to *stamp* messages with additional information before the messages are sent to the designated endpoint. One reason to use message enrichments is to include data that can be used to simplify downstream processing. For example, enriching device telemetry messages with a device twin tag can reduce load on customers to make device twin API calls for this information. For more information, see [Overview of message enrichments](iot-hub-message-enrichments-overview.md). +*Message enrichments* are the ability of Azure IoT Hub to stamp messages with additional information before the messages are sent to the designated endpoint. One reason to use message enrichments is to include data that can be used to simplify downstream processing. For example, enriching device messages with a device twin tag can reduce load on customers to make device twin API calls for this information. For more information, see [Overview of message enrichments](iot-hub-message-enrichments-overview.md). In this tutorial, you see two ways to create and configure the resources that are needed to test the message enrichments for an IoT hub. The resources include one storage account with two storage containers. One container holds the enriched messages, and another container holds the original messages. Also included is an IoT hub to receive the messages and route them to the appropriate storage container based on whether they're enriched or not. -* The first method is to use the Azure CLI to create the resources and configure the message routing. Then you define the enrichments manually by using the [Azure portal](https://portal.azure.com). +* The first method is to use the Azure CLI to create the resources and configure the message routing. Then you define the message enrichments in the Azure portal. -* The second method is to use an Azure Resource Manager template to create both the resources *and* the configurations for the message routing and message enrichments. +* The second method is to use an Azure Resource Manager template to create both the resources and configure both the message routing and message enrichments. After the configurations for the message routing and message enrichments are finished, you use an application to send messages to the IoT hub. The hub then routes them to both storage containers. Only the messages sent to the endpoint for the **enriched** storage container are enriched. -Here are the tasks you perform to complete this tutorial: +In this tutorial, you perform the following tasks: -**Use IoT Hub message enrichments** > [!div class="checklist"] -> * First method: Create resources and configure message routing by using the Azure CLI. Configure the message enrichments manually by using the [Azure portal](https://portal.azure.com). -> * Second method: Create resources and configure message routing and message enrichments by using a Resource Manager template. +> +> * First method: Create resources and configure message routing using the Azure CLI. Configure the message enrichments in the Azure portal. +> * Second method: Create resources and configure message routing and message enrichments using a Resource Manager template. > * Run an app that simulates an IoT device sending messages to the hub. -> * View the results, and verify that the message enrichments are working as expected. +> * View the results, and verify that the message enrichments are being applied to the targeted messages. ## Prerequisites -- You must have an Azure subscription. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - -- Install [Visual Studio](https://www.visualstudio.com/). +* You must have an Azure subscription. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -- Make sure that port 8883 is open in your firewall. The device sample in this tutorial uses MQTT protocol, which communicates over port 8883. This port may be blocked in some corporate and educational network environments. For more information and ways to work around this issue, see [Connecting to IoT Hub (MQTT)](iot-hub-mqtt-support.md#connecting-to-iot-hub). +* Make sure that port 8883 is open in your firewall. The device sample in this tutorial uses MQTT protocol, which communicates over port 8883. This port may be blocked in some corporate and educational network environments. For more information and ways to work around this issue, see [Connecting to IoT Hub (MQTT)](iot-hub-mqtt-support.md#connecting-to-iot-hub). [!INCLUDE [azure-cli-prepare-your-environment-no-header.md](../../includes/azure-cli-prepare-your-environment-no-header.md)] ## Retrieve the IoT C# samples repository -Download the [IoT C# samples](https://github.com/Azure-Samples/azure-iot-samples-csharp/archive/main.zip) from GitHub and unzip them. This repository has several applications, scripts, and Resource Manager templates in it. The ones to be used for this tutorial are as follows: +Download or clone the [IoT C# samples](https://github.com/Azure-Samples/azure-iot-samples-csharp) from GitHub. Follow the directions in **README.md** to set up the prerequisites for running C# samples. + +This repository has several applications, scripts, and Resource Manager templates in it. The ones to be used for this tutorial are as follows: -* For the manual method, there's a CLI script that's used to create the resources. This script is in /azure-iot-samples-csharp/iot-hub/Tutorials/Routing/SimulatedDevice/resources/iothub_msgenrichment_cli.azcli. This script creates the resources and configures the message routing. After you run this script, create the message enrichments manually by using the [Azure portal](https://portal.azure.com). -* For the automated method, there's an Azure Resource Manager template. The template is in /azure-iot-samples-csharp/iot-hub/Tutorials/Routing/SimulatedDevice/resources/template_msgenrichments.json. This template creates the resources, configures the message routing, and then configures the message enrichments. -* The third application you use is the Device Simulation app, which you use to send messages to the IoT hub and test the message enrichments. +* For the manual method, there's a CLI script that creates the cloud resources. This script is in `/azure-iot-samples-csharp/iot-hub/Tutorials/Routing/SimulatedDevice/resources/iothub_msgenrichment_cli.azcli`. This script creates the resources and configures the message routing. After you run this script, create the message enrichments manually by using the Azure portal. +* For the automated method, there's an Azure Resource Manager template. The template is in `/azure-iot-samples-csharp/iot-hub/Tutorials/Routing/SimulatedDevice/resources/template_msgenrichments.json`. This template creates the resources, configures the message routing, and then configures the message enrichments. +* The third application you use is the device simulation app, which you use to send messages to the IoT hub and test the message enrichments. -## Manually set up and configure by using the Azure CLI +## Create and configure resources using the Azure CLI -In addition to creating the necessary resources, the Azure CLI script also configures the two routes to the endpoints that are separate storage containers. For more information on how to configure the message routing, see the [Routing tutorial](tutorial-routing.md). After the resources are set up, use the [Azure portal](https://portal.azure.com) to configure message enrichments for each endpoint. Then continue on to the testing step. +In addition to creating the necessary resources, the Azure CLI script also configures the two routes to the endpoints that are separate storage containers. For more information on how to configure message routing, see the [routing tutorial](tutorial-routing.md). After the resources are set up, use the [Azure portal](https://portal.azure.com) to configure message enrichments for each endpoint. Then continue on to the testing step. > [!NOTE] > All messages are routed to both endpoints, but only the messages going to the endpoint with configured message enrichments will be enriched. -> You can use the script that follows, or you can open the script in the /resources folder of the downloaded repository. The script performs the following steps: * Create an IoT hub. * Create a storage account. * Create two containers in the storage account. One container is for the enriched messages, and another container is for messages that aren't enriched. -* Set up routing for the two different storage accounts: - * Create an endpoint for each storage account container. - * Create a route to each of the storage account container endpoints. +* Set up routing for the two different storage containers: + * Create an endpoint for each storage account container. + * Create a route to each of the storage account container endpoints. There are several resource names that must be globally unique, such as the IoT hub name and the storage account name. To make running the script easier, those resource names are appended with a random alphanumeric value called *randomValue*. The random value is generated once at the top of the script. It's appended to the resource names as needed throughout the script. If you don't want the value to be random, you can set it to an empty string or to a specific value. @@ -75,11 +74,11 @@ Here are the resources created by the script. *Enriched* means that the resource | Name | Value | |-----|-----| | resourceGroup | ContosoResourcesMsgEn | -| container name | original | -| container name | enriched | | IoT device name | Contoso-Test-Device | | IoT Hub name | ContosoTestHubMsgEn | | storage Account Name | contosostorage | +| container name 1 | original | +| container name 2 | enriched | | endpoint Name 1 | ContosoStorageEndpointOriginal | | endpoint Name 2 | ContosoStorageEndpointEnriched| | route Name 1 | ContosoStorageRouteOriginal | @@ -95,7 +94,7 @@ subscriptionID=$(az account show --query id -o tsv) # This retrieves a random value. randomValue=$RANDOM -# This command installs the IOT Extension for Azure CLI. +# This command installs the IoT Extension for Azure CLI. # You only need to install this the first time. # You need it to create the device identity. az extension add --name azure-iot @@ -242,35 +241,43 @@ az iot hub route create \ At this point, the resources are all set up and the message routing is configured. You can view the message routing configuration in the portal and set up the message enrichments for messages going to the **enriched** storage container. -### Manually configure the message enrichments by using the Azure portal +### Configure the message enrichments using the Azure portal + +1. In the [Azure portal](https://portal.azure.com), go to your IoT hub by selecting **Resource groups**. Then select the resource group set up for this tutorial (**ContosoResourcesMsgEn**). Find the IoT hub in the list, and select it. -1. Go to your IoT hub by selecting **Resource groups**. Then select the resource group set up for this tutorial (**ContosoResourcesMsgEn**). Find the IoT hub in the list, and select it. Select **Message routing** for the IoT hub. +2. Select **Message routing** for the IoT hub. :::image type="content" source="./media/tutorial-message-enrichments/select-iot-hub.png" alt-text="Screenshot that shows how to select message routing." border="true"::: - The message routing pane has three tabs labeled **Routes**, **Custom endpoints**, and **Enrich messages**. Browse the first two tabs to see the configuration set up by the script. Use the third tab to add message enrichments. Let's enrich messages going to the endpoint for the storage container called **enriched**. Fill in the name and value, and then select the endpoint **ContosoStorageEndpointEnriched** from the drop-down list. Here's an example of how to set up an enrichment that adds the IoT hub name to the message: + The message routing pane has three tabs labeled **Routes**, **Custom endpoints**, and **Enrich messages**. Browse the first two tabs to see the configuration set up by the script. + +3. Select the **Enrich messages** tab to add three message enrichments for the messages going to the endpoint for the storage container called **enriched**. + +4. For each message enrichment, fill in the name and value, and then select the endpoint **ContosoStorageEndpointEnriched** from the drop-down list. Here's an example of how to set up an enrichment that adds the IoT hub name to the message: ![Add first enrichment](./media/tutorial-message-enrichments/add-message-enrichments.png) -2. Add these values to the list for the ContosoStorageEndpointEnriched endpoint. + Add these values to the list for the ContosoStorageEndpointEnriched endpoint: - | Key | Value | Endpoint (drop-down list) | - | ---- | ----- | -------------------------| - | myIotHub | $iothubname | AzureStorageContainers > ContosoStorageEndpointEnriched | - | DeviceLocation | $twin.tags.location (assumes that the device twin has a location tag) | AzureStorageContainers > ContosoStorageEndpointEnriched | - |customerID | 6ce345b8-1e4a-411e-9398-d34587459a3a | AzureStorageContainers > ContosoStorageEndpointEnriched | + | Name | Value | Endpoint | + | ---- | ----- | -------- | + | myIotHub | `$iothubname` | ContosoStorageEndpointEnriched | + | DeviceLocation | `$twin.tags.location` (assumes that the device twin has a location tag) | ContosoStorageEndpointEnriched | + |customerID | `6ce345b8-1e4a-411e-9398-d34587459a3a` | ContosoStorageEndpointEnriched | -3. When you're finished, your pane should look similar to this image: + When you're finished, your pane should look similar to this image: ![Table with all enrichments added](./media/tutorial-message-enrichments/all-message-enrichments.png) -4. Select **Apply** to save the changes. Skip to the [Test message enrichments](#test-message-enrichments) section. +5. Select **Apply** to save the changes. + +You now have message enrichments set up for all messages routed to the **enriched** endpoint. Skip to the [Test message enrichments](#test-message-enrichments) section to continue the tutorial. -## Create and configure by using a Resource Manager template +## Create and configure resources using a Resource Manager template You can use a Resource Manager template to create and configure the resources, message routing, and message enrichments. -1. Sign in to the Azure portal. Select **+ Create a Resource** to bring up a search box. Enter *template deployment*, and search for it. In the results pane, select **Template deployment (deploy using custom template)**. +1. Sign in to the [Azure portal](https://portal.azure.com). Select **+ Create a Resource** to bring up a search box. Enter *template deployment*, and search for it. In the results pane, select **Template deployment (deploy using custom template)**. ![Template deployment in the Azure portal](./media/tutorial-message-enrichments/template-select-deployment.png) @@ -278,7 +285,7 @@ You can use a Resource Manager template to create and configure the resources, m 1. In the **Custom deployment** pane, select **Build your own template in the editor**. -1. In the **Edit template** pane, select **Load file**. Windows Explorer appears. Locate the **template_messageenrichments.json** file in the unzipped repo file in **/iot-hub/Tutorials/Routing/SimulatedDevice/resources**. +1. In the **Edit template** pane, select **Load file**. Windows Explorer appears. Locate the **template_messageenrichments.json** file in the unzipped repo file in the **/iot-hub/Tutorials/Routing/SimulatedDevice/resources** directory. ![Select template from local machine](./media/tutorial-message-enrichments/template-select.png) @@ -290,12 +297,10 @@ You can use a Resource Manager template to create and configure the resources, m | Name | Value | |-----|-----| - | resourceGroup | ContosoResourcesMsgEn | - | container name | original | - | container name | enriched | - | IoT device name | Contoso-Test-Device | | IoT Hub name | ContosoTestHubMsgEn | | storage Account Name | contosostorage | + | container name 1 | original | + | container name 2 | enriched | | endpoint Name 1 | ContosoStorageEndpointOriginal | | endpoint Name 2 | ContosoStorageEndpointEnriched| | route Name 1 | ContosoStorageRouteOriginal | @@ -307,13 +312,22 @@ You can use a Resource Manager template to create and configure the resources, m ![Top half of Custom deployment pane](./media/tutorial-message-enrichments/template-deployment-top.png) -1. Here's the bottom half of the **Custom deployment** pane. You can see the rest of the parameters and the terms and conditions. +1. Here's the bottom half of the **Custom deployment** pane. You can see the rest of the parameters and the terms and conditions. ![Bottom half of Custom deployment pane](./media/tutorial-message-enrichments/template-deployment-bottom.png) 1. Select the check box to agree to the terms and conditions. Then select **Purchase** to continue with the template deployment. -1. Wait for the template to be fully deployed. Select the bell icon at the top of the screen to check on the progress. When it's finished, continue to the [Test message enrichments](#test-message-enrichments) section. +1. Wait for the template to be fully deployed. Select the bell icon at the top of the screen to check on the progress. + +### Register a device in the portal + +1. Once your resources are deployed, select the IoT hub in your resource group. +1. Select **Devices** from the **Device management** section of the navigation menu. +1. Select **Add Device** to register a new device in your hub. +1. Provide a device ID. The sample application used later in this tutorial defaults to a device named `Contoso-Test-Device`, but you can use any ID. Select **Save**. +1. Once the device is created in your hub, select its name from the list of devices. You may need to refresh the list. +1. Copy the **Primary key** value and have it available to use in the testing section of this article. ## Add location tag to the device twin @@ -321,13 +335,14 @@ One of the message enrichments configured on your IoT hub specifies a key of Dev Follow these steps to add a location tag to your device's twin with the portal. -1. Go to your IoT hub by selecting **Resource groups**. Then select the resource group set up for this tutorial (**ContosoResourcesMsgEn**). Find the IoT hub in the list, and select it. Select **Devices** on the left-pane of the IoT hub, then select your device (**Contoso-Test-Device**). +1. Navigate to your IoT hub in the Azure portal. + +1. Select **Devices** on the left-pane of the IoT hub, then select your device. 1. Select the **Device twin** tab at the top of the device page and add the following line just before the closing brace at the bottom of the device twin. Then select **Save**. ```json , "tags": {"location": "Plant 43"} - ``` :::image type="content" source="./media/tutorial-message-enrichments/add-location-tag-to-device-twin.png" alt-text="Screenshot of adding location tag to device twin in Azure portal"::: @@ -340,56 +355,90 @@ To learn more about how device twin paths are handled with message enrichments, To view the message enrichments, select **Resource groups**. Then select the resource group you're using for this tutorial. Select the IoT hub from the list of resources, and go to **Messaging**. The message routing configuration and the configured enrichments appear. -Now that the message enrichments are configured for the endpoint, run the Simulated Device application to send messages to the IoT hub. The hub was set up with settings that accomplish the following tasks: +Now that the message enrichments are configured for the **enriched** endpoint, run the simulated device application to send messages to the IoT hub. The hub was set up with settings that accomplish the following tasks: -* Messages routed to the storage endpoint ContosoStorageEndpointOriginal won't be enriched and will be stored in the storage container `original`. +* Messages routed to the storage endpoint ContosoStorageEndpointOriginal won't be enriched and will be stored in the storage container **original**. -* Messages routed to the storage endpoint ContosoStorageEndpointEnriched will be enriched and stored in the storage container `enriched`. +* Messages routed to the storage endpoint ContosoStorageEndpointEnriched will be enriched and stored in the storage container **enriched**. -The Simulated Device application is one of the applications in the unzipped download. The application sends messages for each of the different message routing methods in the [Routing tutorial](tutorial-routing.md), which includes Azure Storage. +The simulated device application is one of the applications in the azure-iot-samples-csharp repository. The application sends messages with a randomized value for the property `level`. Only messages that have `storage` set as the message's level property will be routed to the two endpoints. -Double-click the solution file **IoT_SimulatedDevice.sln** to open the code in Visual Studio, and then open **Program.cs**. Substitute the IoT hub name for the marker `{your hub name}`. The format of the IoT hub host name is **{your hub name}.azure-devices.net**. For this tutorial, the hub host name is ContosoTestHubMsgEn.azure-devices.net. Next, substitute the device key you saved earlier when you ran the script to create the resources for the marker `{your device key}`. +1. Open the file **Program.cs** from the **SimulatedDevice** directory in your preferred code editor. -If you don't have the device key, you can retrieve it from the portal. After you sign in, go to **Resource groups**, select your resource group, and then select your IoT hub. Look under **IoT Devices** for your test device, and select your device. Select the copy icon next to **Primary key** to copy it to the clipboard. +1. Replace the placeholder text with your own resource information. Substitute the IoT hub name for the marker `{your hub name}`. The format of the IoT hub host name is **{your hub name}.azure-devices.net**. Next, substitute the device key you saved earlier when you ran the script to create the resources for the marker `{your device key}`. + + If you don't have the device key, you can retrieve it from the portal. After you sign in, go to **Resource groups**, select your resource group, and then select your IoT hub. Look under **IoT Devices** for your test device, and select your device. Select the copy icon next to **Primary key** to copy it to the clipboard. ```csharp - private readonly static string s_myDeviceId = "Contoso-Test-Device"; - private readonly static string s_iotHubUri = "ContosoTestHubMsgEn.azure-devices.net"; - // This is the primary key for the device. This is in the portal. - // Find your IoT hub in the portal > IoT devices > select your device > copy the key. - private readonly static string s_deviceKey = "{your device key}"; + private readonly static string s_myDeviceId = "Contoso-Test-Device"; + private readonly static string s_iotHubUri = "{your hub name}.azure-devices.net"; + // This is the primary key for the device. This is in the portal. + // Find your IoT hub in the portal > IoT devices > select your device > copy the key. + private readonly static string s_deviceKey = "{your device key}"; ``` ### Run and test -Run the console application for a few minutes. The messages that are being sent are displayed on the console screen of the application. +Run the console application for a few minutes. -The app sends a new device-to-cloud message to the IoT hub every second. The message contains a JSON-serialized object with the device ID, temperature, humidity, and message level, which defaults to `normal`. It randomly assigns a level of `critical` or `storage`, which causes the message to be routed to the storage account or to the default endpoint. The messages sent to the **enriched** container in the storage account will be enriched. +In a command line window, you can run the sample with the following commands executed at the **SimulatedDevice** directory level: -After several storage messages are sent, view the data. +```console +dotnet restore +dotnet run +``` -1. Select **Resource groups**. Find your resource group, **ContosoResourcesMsgEn**, and select it. +The app sends a new device-to-cloud message to the IoT hub every second. The messages that are being sent are displayed on the console screen of the application. The message contains a JSON-serialized object with the device ID, temperature, humidity, and message level, which defaults to `normal`. The sample program randomly changes the message level to either `critical` or `storage`. Messages labeled for storage are routed to the storage account, and the rest go to the default endpoint. The messages sent to the **enriched** container in the storage account will be enriched. -2. Select your storage account, which is **contosostorage**. Then select **Storage Explorer (preview)** in the left pane. +After several storage messages are sent, view the data. - ![Select Storage Explorer](./media/tutorial-message-enrichments/select-storage-explorer.png) +1. Select **Resource groups**. Find your resource group, **ContosoResourcesMsgEn**, and select it. - Select **BLOB CONTAINERS** to see the two containers that can be used. +2. Select your storage account, which begins with **contosostorage**. Then select **Storage browser (preview)** from the navigation menu. Select **Blob containers** to see the two containers that you created. - ![See the containers in the storage account](./media/tutorial-message-enrichments/show-blob-containers.png) + :::image type="content" source="./media/tutorial-message-enrichments/show-blob-containers.png" alt-text="See the containers in the storage account."::: -The messages in the container called **enriched** have the message enrichments included in the messages. The messages in the container called **original** have the raw messages with no enrichments. Drill down into one of the containers until you get to the bottom, and open the most recent message file. Then do the same for the other container to verify that there are no enrichments added to messages in that container. +The messages in the container called **enriched** have the message enrichments included in the messages. The messages in the container called **original** have the raw messages with no enrichments. Drill down into one of the containers until you get to the bottom, and open the most recent message file. Then do the same for the other container to verify that the one is enriched and one isn't. When you look at messages that have been enriched, you should see "my IoT Hub" with the hub name and the location and the customer ID, like this: ```json -{"EnqueuedTimeUtc":"2019-05-10T06:06:32.7220000Z","Properties":{"level":"storage","myIotHub":"contosotesthubmsgen3276","DeviceLocation":"Plant 43","customerID":"6ce345b8-1e4a-411e-9398-d34587459a3a"},"SystemProperties":{"connectionDeviceId":"Contoso-Test-Device","connectionAuthMethod":"{\"scope\":\"device\",\"type\":\"sas\",\"issuer\":\"iothub\",\"acceptingIpFilterRule\":null}","connectionDeviceGenerationId":"636930642531278483","enqueuedTime":"2019-05-10T06:06:32.7220000Z"},"Body":"eyJkZXZpY2VJZCI6IkNvbnRvc28tVGVzdC1EZXZpY2UiLCJ0ZW1wZXJhdHVyZSI6MjkuMjMyMDE2ODQ4MDQyNjE1LCJodW1pZGl0eSI6NjQuMzA1MzQ5NjkyODQ0NDg3LCJwb2ludEluZm8iOiJUaGlzIGlzIGEgc3RvcmFnZSBtZXNzYWdlLiJ9"} +{ + "EnqueuedTimeUtc":"2019-05-10T06:06:32.7220000Z", + "Properties": + { + "level":"storage", + "myIotHub":"contosotesthubmsgen3276", + "DeviceLocation":"Plant 43", + "customerID":"6ce345b8-1e4a-411e-9398-d34587459a3a" + }, + "SystemProperties": + { + "connectionDeviceId":"Contoso-Test-Device", + "connectionAuthMethod":"{\"scope\":\"device\",\"type\":\"sas\",\"issuer\":\"iothub\",\"acceptingIpFilterRule\":null}", + "connectionDeviceGenerationId":"636930642531278483", + "enqueuedTime":"2019-05-10T06:06:32.7220000Z" + },"Body":"eyJkZXZpY2VJZCI6IkNvbnRvc28tVGVzdC1EZXZpY2UiLCJ0ZW1wZXJhdHVyZSI6MjkuMjMyMDE2ODQ4MDQyNjE1LCJodW1pZGl0eSI6NjQuMzA1MzQ5NjkyODQ0NDg3LCJwb2ludEluZm8iOiJUaGlzIGlzIGEgc3RvcmFnZSBtZXNzYWdlLiJ9" +} ``` -Here's an unenriched message. Notice that "my IoT Hub," "devicelocation," and "customerID" don't show up here because these fields are added by the enrichments. This endpoint has no enrichments. +Here's an unenriched message. Notice that `my IoT Hub,` `devicelocation,` and `customerID` don't show up here because these fields are added by the enrichments. This endpoint has no enrichments. ```json -{"EnqueuedTimeUtc":"2019-05-10T06:06:32.7220000Z","Properties":{"level":"storage"},"SystemProperties":{"connectionDeviceId":"Contoso-Test-Device","connectionAuthMethod":"{\"scope\":\"device\",\"type\":\"sas\",\"issuer\":\"iothub\",\"acceptingIpFilterRule\":null}","connectionDeviceGenerationId":"636930642531278483","enqueuedTime":"2019-05-10T06:06:32.7220000Z"},"Body":"eyJkZXZpY2VJZCI6IkNvbnRvc28tVGVzdC1EZXZpY2UiLCJ0ZW1wZXJhdHVyZSI6MjkuMjMyMDE2ODQ4MDQyNjE1LCJodW1pZGl0eSI6NjQuMzA1MzQ5NjkyODQ0NDg3LCJwb2ludEluZm8iOiJUaGlzIGlzIGEgc3RvcmFnZSBtZXNzYWdlLiJ9"} +{ + "EnqueuedTimeUtc":"2019-05-10T06:06:32.7220000Z", + "Properties": + { + "level":"storage" + }, + "SystemProperties": + { + "connectionDeviceId":"Contoso-Test-Device", + "connectionAuthMethod":"{\"scope\":\"device\",\"type\":\"sas\",\"issuer\":\"iothub\",\"acceptingIpFilterRule\":null}", + "connectionDeviceGenerationId":"636930642531278483", + "enqueuedTime":"2019-05-10T06:06:32.7220000Z" + },"Body":"eyJkZXZpY2VJZCI6IkNvbnRvc28tVGVzdC1EZXZpY2UiLCJ0ZW1wZXJhdHVyZSI6MjkuMjMyMDE2ODQ4MDQyNjE1LCJodW1pZGl0eSI6NjQuMzA1MzQ5NjkyODQ0NDg3LCJwb2ludEluZm8iOiJUaGlzIGlzIGEgc3RvcmFnZSBtZXNzYWdlLiJ9" +} ``` ## Clean up resources @@ -406,22 +455,11 @@ az group delete --name $resourceGroup ## Next steps -In this tutorial, you configured and tested adding message enrichments to IoT Hub messages by using the following steps: - -**Use IoT Hub message enrichments** - -> [!div class="checklist"] -> * First method: Create resources and configure message routing by using the Azure CLI. Configure the message enrichments manually by using the [Azure portal](https://portal.azure.com). -> * Second method: Create resources and configure message routing and message enrichments by using an Azure Resource Manager template. -> * Run an app that simulates an IoT device sending messages to the hub. -> * View the results, and verify that the message enrichments are working as expected. +In this tutorial, you configured and tested message enrichments for IoT Hub messages as they are routed to an endpoint. For more information about message enrichments, see [Overview of message enrichments](iot-hub-message-enrichments-overview.md). -For more information about message routing, see these articles: - -> [!div class="nextstepaction"] -> [Use IoT Hub message routing to send device-to-cloud messages to different endpoints](iot-hub-devguide-messages-d2c.md) +To learn more about IoT Hub, continue to the next tutorial. > [!div class="nextstepaction"] -> [Tutorial: IoT Hub routing](tutorial-routing.md) \ No newline at end of file +> [Tutorial: Set up and use metrics and logs with an IoT hub](tutorial-use-metrics-and-diags.md) \ No newline at end of file diff --git a/articles/iot-hub/tutorial-routing-config-message-routing-CLI.md b/articles/iot-hub/tutorial-routing-config-message-routing-CLI.md deleted file mode 100644 index 0d4b65595e781..0000000000000 --- a/articles/iot-hub/tutorial-routing-config-message-routing-CLI.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: Tutorial - Configure message routing for Azure IoT Hub using the Azure CLI -description: Tutorial - Configure message routing for Azure IoT Hub using the Azure CLI. Depending on properties in the message, route to either a storage account or a Service Bus queue. -author: kgremban -ms.service: iot-hub -services: iot-hub -ms.topic: tutorial -ms.date: 8/20/2021 -ms.author: kgremban -ms.custom: mvc, devx-track-azurecli -#Customer intent: As a developer, I want to be able to route messages sent to my IoT hub to different destinations based on properties stored in the message. I want to be able to set up the resource and the routing using the Azure CLI. ---- - -# Tutorial: Use the Azure CLI to configure IoT Hub message routing - -[!INCLUDE [iot-hub-include-routing-intro](../../includes/iot-hub-include-routing-intro.md)] - -[!INCLUDE [iot-hub-include-routing-create-resources](../../includes/iot-hub-include-routing-create-resources.md)] - -## Download the script (optional) - -For the second part of this tutorial, you download and run a Visual Studio application to send messages to the IoT Hub. There is a folder in the download that contains the Azure Resource Manager template and parameters file, as well as the Azure CLI and PowerShell scripts. - -If you want to view the finished script, download the [Azure IoT C# Samples](https://github.com/Azure-Samples/azure-iot-samples-csharp/archive/main.zip). Unzip the main.zip file. The Azure CLI script is in /iot-hub/Tutorials/Routing/SimulatedDevice/resources/ as **iothub_routing_cli.azcli**. - -## Use the Azure CLI to create your resources - -Copy and paste the script below into Cloud Shell and press Enter. It runs the script one line at a time. This first section of the script will create the base resources for this tutorial, including the storage account, IoT Hub, Service Bus Namespace, and Service Bus queue. As you go through the rest of the tutorial, copy each block of script and paste it into Cloud Shell to run it. - -> [!TIP] -> A tip about debugging: this script uses the continuation symbol (the backslash `\`) to make the script more readable. If you have a problem running the script, make sure your Cloud Shell session is running `bash` and that there are no spaces after any of the backslashes. -> - -There are several resource names that must be globally unique, such as the IoT Hub name and the storage account name. To make this easier, those resource names are appended with a random alphanumeric value called *randomValue*. The randomValue is generated once at the top of the script and appended to the resource names as needed throughout the script. If you don't want it to be random, you can set it to an empty string or to a specific value. - -> [!IMPORTANT] -> The variables set in the initial script are also used by the routing script, so run all of the script in the same Cloud Shell session. If you open a new session to run the script for setting up the routing, several of the variables will be missing values. -> - -```azurecli-interactive -# This command retrieves the subscription id of the current Azure account. -# This field is used when setting up the routing queries. -subscriptionID=$(az account show --query id -o tsv) - -# Concatenate this number onto the resources that have to be globally unique. -# You can set this to "" or to a specific value if you don't want it to be random. -# This retrieves a random value. -randomValue=$RANDOM - -# This command installs the IOT Extension for Azure CLI. -# You only need to install this the first time. -# You need it to create the device identity. -az extension add --name azure-iot - -# Set the values for the resource names that -# don't have to be globally unique. -location=westus -resourceGroup=ContosoResources -iotHubConsumerGroup=ContosoConsumers -containerName=contosoresults -iotDeviceName=Contoso-Test-Device - -# Create the resource group to be used -# for all the resources for this tutorial. -az group create --name $resourceGroup \ - --location $location - -# The IoT hub name must be globally unique, -# so add a random value to the end. -iotHubName=ContosoTestHub$randomValue -echo "IoT hub name = " $iotHubName - -# Create the IoT hub. -az iot hub create --name $iotHubName \ - --resource-group $resourceGroup \ - --sku S1 --location $location - -# Add a consumer group to the IoT hub for the 'events' endpoint. -az iot hub consumer-group create --hub-name $iotHubName \ - --name $iotHubConsumerGroup - -# The storage account name must be globally unique, -# so add a random value to the end. -storageAccountName=contosostorage$randomValue -echo "Storage account name = " $storageAccountName - -# Create the storage account to be used as a routing destination. -az storage account create --name $storageAccountName \ - --resource-group $resourceGroup \ - --location $location \ - --sku Standard_LRS - -# Get the primary storage account key. -# You need this to create the container. -storageAccountKey=$(az storage account keys list \ - --resource-group $resourceGroup \ - --account-name $storageAccountName \ - --query "[0].value" | tr -d '"') - -# See the value of the storage account key. -echo "storage account key = " $storageAccountKey - -# Create the container in the storage account. -az storage container create --name $containerName \ - --account-name $storageAccountName \ - --account-key $storageAccountKey \ - --public-access off - -# The Service Bus namespace must be globally unique, -# so add a random value to the end. -sbNamespace=ContosoSBNamespace$randomValue -echo "Service Bus namespace = " $sbNamespace - -# Create the Service Bus namespace. -az servicebus namespace create --resource-group $resourceGroup \ - --name $sbNamespace \ - --location $location - -# The Service Bus queue name must be globally unique, -# so add a random value to the end. -sbQueueName=ContosoSBQueue$randomValue -echo "Service Bus queue name = " $sbQueueName - -# Create the Service Bus queue to be used as a routing destination. -az servicebus queue create --name $sbQueueName \ - --namespace-name $sbNamespace \ - --resource-group $resourceGroup - -# Create the IoT device identity to be used for testing. -az iot hub device-identity create --device-id $iotDeviceName \ - --hub-name $iotHubName - -# Retrieve the information about the device identity, then copy the primary key to -# Notepad. You need this to run the device simulation during the testing phase. -az iot hub device-identity show --device-id $iotDeviceName \ - --hub-name $iotHubName -``` - -Now that the base resources are set up, you can configure the message routing. - -## Set up message routing - -[!INCLUDE [iot-hub-include-create-routing-description](../../includes/iot-hub-include-create-routing-description.md)] - -To create a routing endpoint, use [az iot hub routing-endpoint create](/cli/azure/iot/hub/routing-endpoint#az-iot-hub-routing-endpoint-create). To create the message route for the endpoint, use [az iot hub route create](/cli/azure/iot/hub/route#az-iot-hub-route-create). - -### Route to a storage account - -[!INCLUDE [iot-hub-include-blob-storage-format](../../includes/iot-hub-include-blob-storage-format.md)] - -First, set up the endpoint for the storage account, then set up the route. - -These are the variables used by the script that must be set within your Cloud Shell session: - -**storageConnectionString**: This value is retrieved from the storage account set up in the previous script. It is used by the message routing to access the storage account. - - **resourceGroup**: There are two occurrences of resource group -- set them to your resource group. - -**endpoint subscriptionID**: This field is set to the Azure subscriptionID for the endpoint. - -**endpointType**: This field is the type of endpoint. This value must be set to `azurestoragecontainer`, `eventhub`, `servicebusqueue`, or `servicebustopic`. For your purposes here, set it to `azurestoragecontainer`. - -**iotHubName**: This field is the name of the hub that will do the routing. - -**containerName**: This field is the name of the container in the storage account to which data will be written. - -**encoding**: This field will be either `avro` or `json`. This denotes the format of the stored data. - -**routeName**: This field is the name of the route you are setting up. - -**endpointName**: This field is the name identifying the endpoint. - -**enabled**: This field defaults to `true`, indicating that the message route should be enabled after being created. - -**condition**: This field is the query used to filter for the messages sent to this endpoint. The query condition for the messages being routed to storage is `level="storage"`. - -Copy this script and paste it into your Cloud Shell window and run it. - -```azurecli -##### ROUTING FOR STORAGE ##### - -endpointName="ContosoStorageEndpoint" -endpointType="azurestoragecontainer" -routeName="ContosoStorageRoute" -condition='level="storage"' - -# Get the connection string for the storage account. -# Adding the "-o tsv" makes it be returned without the default double quotes around it. -storageConnectionString=$(az storage account show-connection-string \ - --name $storageAccountName --query connectionString -o tsv) -``` - -The next step is to create the routing endpoint for the storage account. You also specify the container in which the results will be stored. The container was created previously when the storage account was created. - -```azurecli -# Create the routing endpoint for storage. -az iot hub routing-endpoint create \ - --connection-string $storageConnectionString \ - --endpoint-name $endpointName \ - --endpoint-resource-group $resourceGroup \ - --endpoint-subscription-id $subscriptionID \ - --endpoint-type $endpointType \ - --hub-name $iotHubName \ - --container $containerName \ - --resource-group $resourceGroup \ - --encoding avro -``` - -Next, create the route for the storage endpoint. The message route designates where to send the messages that meet the query specification. - -```azurecli -# Create the route for the storage endpoint. -az iot hub route create \ - --name $routeName \ - --hub-name $iotHubName \ - --source devicemessages \ - --resource-group $resourceGroup \ - --endpoint-name $endpointName \ - --enabled \ - --condition $condition -``` - -### Route to a Service Bus queue - -Now set up the routing for the Service Bus queue. To retrieve the connection string for the Service Bus queue, you must create an authorization rule that has the correct rights defined. The following script creates an authorization rule for the Service Bus queue called `sbauthrule`, and sets the rights to `Listen Manage Send`. Once this authorization rule is defined, you can use it to retrieve the connection string for the queue. - -```azurecli -# Create the authorization rule for the Service Bus queue. -az servicebus queue authorization-rule create \ - --name "sbauthrule" \ - --namespace-name $sbNamespace \ - --queue-name $sbQueueName \ - --resource-group $resourceGroup \ - --rights Listen Manage Send \ - --subscription $subscriptionID -``` - -Now use the authorization rule to retrieve the connection string to the Service Bus queue. - -```azurecli -# Get the Service Bus queue connection string. -# The "-o tsv" ensures it is returned without the default double-quotes. -sbqConnectionString=$(az servicebus queue authorization-rule keys list \ - --name "sbauthrule" \ - --namespace-name $sbNamespace \ - --queue-name $sbQueueName \ - --resource-group $resourceGroup \ - --subscription $subscriptionID \ - --query primaryConnectionString -o tsv) - -# Show the Service Bus queue connection string. -echo "service bus queue connection string = " $sbqConnectionString -``` - -Now set up the routing endpoint and the message route for the Service Bus queue. These are the variables used by the script that must be set within your Cloud Shell session: - -**endpointName**: This field is the name identifying the endpoint. - -**endpointType**: This field is the type of endpoint. This value must be set to `azurestoragecontainer`, `eventhub`, `servicebusqueue`, or `servicebustopic`. For your purposes here, set it to `servicebusqueue`. - -**routeName**: This field is the name of the route you are setting up. - -**condition**: This field is the query used to filter for the messages sent to this endpoint. The query condition for the messages being routed to the Service Bus queue is `level="critical"`. - -Here is the Azure CLI for the routing endpoint and the message route for the Service Bus queue. - -```azurecli -endpointName="ContosoSBQueueEndpoint" -endpointType="ServiceBusQueue" -routeName="ContosoSBQueueRoute" -condition='level="critical"' - -# Set up the routing endpoint for the Service Bus queue. -# This uses the Service Bus queue connection string. -az iot hub routing-endpoint create \ - --connection-string $sbqConnectionString \ - --endpoint-name $endpointName \ - --endpoint-resource-group $resourceGroup \ - --endpoint-subscription-id $subscriptionID \ - --endpoint-type $endpointType \ - --hub-name $iotHubName \ - --resource-group $resourceGroup - -# Set up the message route for the Service Bus queue endpoint. -az iot hub route create --name $routeName \ - --hub-name $iotHubName \ - --source-type devicemessages \ - --resource-group $resourceGroup \ - --endpoint-name $endpointName \ - --enabled \ - --condition $condition - ``` - -### View message routing in the portal - -[!INCLUDE [iot-hub-include-view-routing-in-portal](../../includes/iot-hub-include-view-routing-in-portal.md)] - -## Next steps - -Now that you have the resources set up and the message routes configured, advance to the next tutorial to learn how to send messages to the IoT hub and see them be routed to the different destinations. - -> [!div class="nextstepaction"] -> [Part 2 - View the message routing results](tutorial-routing-view-message-routing-results.md) diff --git a/articles/iot-hub/tutorial-routing-config-message-routing-PowerShell.md b/articles/iot-hub/tutorial-routing-config-message-routing-PowerShell.md deleted file mode 100644 index da5c7872e2ca9..0000000000000 --- a/articles/iot-hub/tutorial-routing-config-message-routing-PowerShell.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Tutorial - Configure message routing for Azure IoT Hub with Azure PowerShell -description: Tutorial - Configure message routing for Azure IoT Hub using Azure PowerShell. Depending on properties in the message, route to either a storage account or a Service Bus queue. -author: kgremban - -ms.service: iot-hub -services: iot-hub -ms.topic: tutorial -ms.date: 03/25/2019 -ms.author: kgremban -ms.custom: mvc, devx-track-azurepowershell -#Customer intent: As a developer, I want to be able to route messages sent to my IoT hub to different destinations based on properties stored in the message. I want to be able to set up the resources and the routing using Azure PowerShell. ---- - -# Tutorial: Use Azure PowerShell to configure IoT Hub message routing - -[!INCLUDE [iot-hub-include-routing-intro](../../includes/iot-hub-include-routing-intro.md)] - -[!INCLUDE [iot-hub-include-routing-create-resources](../../includes/iot-hub-include-routing-create-resources.md)] - -## Download the script (optional) - -For the second part of this tutorial, you download and run a Visual Studio application to send messages to the IoT Hub. There is a folder in the download that contains the Azure Resource Manager template and parameters file, as well as the Azure CLI and PowerShell scripts. - -If you want to view the finished script, download the [Azure IoT C# Samples](https://github.com/Azure-Samples/azure-iot-samples-csharp/archive/main.zip). Unzip the main.zip file. The Azure CLI script is in /iot-hub/Tutorials/Routing/SimulatedDevice/resources/ as **iothub_routing_psh.ps1**. - -## Create your resources - -Start by creating the resources with PowerShell. - -### Use PowerShell to create your base resources - -Copy and paste the script below into Cloud Shell and press Enter. It runs the script one line at a time. This first section of the script will create the base resources for this tutorial, including the storage account, IoT Hub, Service Bus Namespace, and Service Bus queue. As you go through the tutorial, copy each block of script and paste it into Cloud Shell to run it. - -There are several resource names that must be globally unique, such as the IoT Hub name and the storage account name. To make this easier, those resource names are appended with a random alphanumeric value called *randomValue*. The randomValue is generated once at the top of the script and appended to the resource names as needed throughout the script. If you don't want it to be random, you can set it to an empty string or to a specific value. - -> [!IMPORTANT] -> The variables set in the initial script are also used by the routing script, so run all of the script in the same Cloud Shell session. If you open a new session to run the script for setting up the routing, several of the variables will be missing values. -> - -```azurepowershell-interactive -# This command retrieves the subscription id of the current Azure account. -# This field is used when setting up the routing queries. -$subscriptionID = (Get-AzContext).Subscription.Id - -# Concatenate this number onto the resources that have to be globally unique. -# You can set this to "" or to a specific value if you don't want it to be random. -# This retrieves the first 6 digits of a random value. -$randomValue = "$(Get-Random)".Substring(0,6) - -# Set the values for the resource names that don't have to be globally unique. -$location = "West US" -$resourceGroup = "ContosoResources" -$iotHubConsumerGroup = "ContosoConsumers" -$containerName = "contosoresults" - -# Create the resource group to be used -# for all resources for this tutorial. -New-AzResourceGroup -Name $resourceGroup -Location $location - -# The IoT hub name must be globally unique, -# so add a random value to the end. -$iotHubName = "ContosoTestHub" + $randomValue -Write-Host "IoT hub name is " $iotHubName - -# Create the IoT hub. -New-AzIotHub -ResourceGroupName $resourceGroup ` - -Name $iotHubName ` - -SkuName "S1" ` - -Location $location ` - -Units 1 - -# Add a consumer group to the IoT hub. -Add-AzIotHubEventHubConsumerGroup -ResourceGroupName $resourceGroup ` - -Name $iotHubName ` - -EventHubConsumerGroupName $iotHubConsumerGroup - -# The storage account name must be globally unique, so add a random value to the end. -$storageAccountName = "contosostorage" + $randomValue -Write-Host "storage account name is " $storageAccountName - -# Create the storage account to be used as a routing destination. -# Save the context for the storage account -# to be used when creating a container. -$storageAccount = New-AzStorageAccount -ResourceGroupName $resourceGroup ` - -Name $storageAccountName ` - -Location $location ` - -SkuName Standard_LRS ` - -Kind Storage -# Retrieve the connection string from the context. -$storageConnectionString = $storageAccount.Context.ConnectionString -Write-Host "storage connection string = " $storageConnectionString - -# Create the container in the storage account. -New-AzStorageContainer -Name $containerName ` - -Context $storageAccount.Context - -# The Service Bus namespace must be globally unique, -# so add a random value to the end. -$serviceBusNamespace = "ContosoSBNamespace" + $randomValue -Write-Host "Service Bus namespace is " $serviceBusNamespace - -# Create the Service Bus namespace. -New-AzServiceBusNamespace -ResourceGroupName $resourceGroup ` - -Location $location ` - -Name $serviceBusNamespace - -# The Service Bus queue name must be globally unique, -# so add a random value to the end. -$serviceBusQueueName = "ContosoSBQueue" + $randomValue -Write-Host "Service Bus queue name is " $serviceBusQueueName - -# Create the Service Bus queue to be used as a routing destination. -New-AzServiceBusQueue -ResourceGroupName $resourceGroup ` - -Namespace $serviceBusNamespace ` - -Name $serviceBusQueueName ` - -EnablePartitioning $False -``` - -### Create a simulated device - -[!INCLUDE [iot-hub-include-create-simulated-device-portal](../../includes/iot-hub-include-create-simulated-device-portal.md)] - -Now that the base resources are set up, you can configure the message routing. - -## Set up message routing - -[!INCLUDE [iot-hub-include-create-routing-description](../../includes/iot-hub-include-create-routing-description.md)] - -To create a routing endpoint, use [Add-AzIotHubRoutingEndpoint](/powershell/module/az.iothub/Add-AzIotHubRoutingEndpoint). To create the messaging route for the endpoint, use [Add-AzIotHubRoute](/powershell/module/az.iothub/Add-AzIoTHubRoute). - -### Route to a storage account - -First, set up the endpoint for the storage account, then create the message route. - -[!INCLUDE [iot-hub-include-blob-storage-format](../../includes/iot-hub-include-blob-storage-format.md)] - -These are the variables used by the script that must be set within your Cloud Shell session: - -**resourceGroup**: There are two occurrences of this field -- set both of them to your resource group. - -**name**: This field is the name of the IoT Hub to which the routing will apply. - -**endpointName**: This field is the name identifying the endpoint. - -**endpointType**: This field is the type of endpoint. This value must be set to `azurestoragecontainer`, `eventhub`, `servicebusqueue`, or `servicebustopic`. For your purposes here, set it to `azurestoragecontainer`. - -**subscriptionID**: This field is set to the subscriptionID for your Azure account. - -**storageConnectionString**: This value is retrieved from the storage account set up in the previous script. It is used by the routing to access the storage account. - -**containerName**: This field is the name of the container in the storage account to which data will be written. - -**Encoding**: Set this field to either `AVRO` or `JSON`. This designates the format of the stored data. The default is AVRO. - -**routeName**: This field is the name of the route you are setting up. - -**condition**: This field is the query used to filter for the messages sent to this endpoint. The query condition for the messages being routed to storage is `level="storage"`. - -**enabled**: This field defaults to `true`, indicating that the message route should be enabled after being created. - -Copy this script and paste it into your Cloud Shell window. - -```powershell -##### ROUTING FOR STORAGE ##### - -$endpointName = "ContosoStorageEndpoint" -$endpointType = "azurestoragecontainer" -$routeName = "ContosoStorageRoute" -$condition = 'level="storage"' -``` - -The next step is to create the routing endpoint for the storage account. You also specify the container in which the results will be stored. The container was created when the storage account was created. - -```powershell -# Create the routing endpoint for storage. -# Specify 'AVRO' or 'JSON' for the encoding of the data. -Add-AzIotHubRoutingEndpoint ` - -ResourceGroupName $resourceGroup ` - -Name $iotHubName ` - -EndpointName $endpointName ` - -EndpointType $endpointType ` - -EndpointResourceGroup $resourceGroup ` - -EndpointSubscriptionId $subscriptionId ` - -ConnectionString $storageConnectionString ` - -ContainerName $containerName ` - -Encoding AVRO -``` - -Next, create the message route for the storage endpoint. The message route designates where to send the messages that meet the query specification. - -```powershell -# Create the route for the storage endpoint. -Add-AzIotHubRoute ` - -ResourceGroupName $resourceGroup ` - -Name $iotHubName ` - -RouteName $routeName ` - -Source DeviceMessages ` - -EndpointName $endpointName ` - -Condition $condition ` - -Enabled -``` - -### Route to a Service Bus queue - -Now set up the routing for the Service Bus queue. To retrieve the connection string for the Service Bus queue, you must create an authorization rule that has the correct rights defined. The following script creates an authorization rule for the Service Bus queue called `sbauthrule`, and sets the rights to `Listen Manage Send`. Once this authorization rule is set up, you can use it to retrieve the connection string for the queue. - -```powershell -##### ROUTING FOR SERVICE BUS QUEUE ##### - -# Create the authorization rule for the Service Bus queue. -New-AzServiceBusAuthorizationRule ` - -ResourceGroupName $resourceGroup ` - -NamespaceName $serviceBusNamespace ` - -Queue $serviceBusQueueName ` - -Name "sbauthrule" ` - -Rights @("Manage","Listen","Send") -``` - -Now use the authorization rule to retrieve the Service Bus queue key. This authorization rule will be used to retrieve the connection string later in the script. - -```powershell -$sbqkey = Get-AzServiceBusKey ` - -ResourceGroupName $resourceGroup ` - -NamespaceName $serviceBusNamespace ` - -Queue $servicebusQueueName ` - -Name "sbauthrule" -``` - -Now set up the routing endpoint and the message route for the Service Bus queue. These are the variables used by the script that must be set within your Cloud Shell session: - -**endpointName**: This field is the name identifying the endpoint. - -**endpointType**: This field is the type of endpoint. This value must be set to `azurestoragecontainer`, `eventhub`, `servicebusqueue`, or `servicebustopic`. For your purposes here, set it to `servicebusqueue`. - -**routeName**: This field is the name of the route you are setting up. - -**condition**: This field is the query used to filter for the messages sent to this endpoint. The query condition for the messages being routed to the Service Bus queue is `level="critical"`. - -Here is the Azure PowerShell for the message routing for the Service Bus queue. - -```powershell -$endpointName = "ContosoSBQueueEndpoint" -$endpointType = "servicebusqueue" -$routeName = "ContosoSBQueueRoute" -$condition = 'level="critical"' - -# If this script fails on the next statement (Add-AzIotHubRoutingEndpoint), -# put the pause in and run it again. Note that if you're running it -# interactively, you can just stop it and then run the rest, because -# you have already set the variables before you get to this point. -# -# Pause for 90 seconds to allow previous steps to complete. -# Then report it to the IoT team here: -# https://github.com/Azure/azure-powershell/issues -# pause for 90 seconds and then start again. -# This way, it if didn't get to finish before it tried to move on, -# now it will have time to finish first. - Start-Sleep -Seconds 90 - -# This command is the one that sometimes doesn't work. It's as if it doesn't have time to -# finish before it moves to the next line. -# The error from Add-AzIotHubRoutingEndpoint is "Operation returned an invalid status code 'BadRequest'". -# This command adds the routing endpoint, using the connection string property from the key. -# This will definitely work if you execute the Sleep command first (it's in the line above). -Add-AzIotHubRoutingEndpoint ` - -ResourceGroupName $resourceGroup ` - -Name $iotHubName ` - -EndpointName $endpointName ` - -EndpointType $endpointType ` - -EndpointResourceGroup $resourceGroup ` - -EndpointSubscriptionId $subscriptionId ` - -ConnectionString $sbqkey.PrimaryConnectionString - -# Set up the message route for the Service Bus queue endpoint. -Add-AzIotHubRoute ` - -ResourceGroupName $resourceGroup ` - -Name $iotHubName ` - -RouteName $routeName ` - -Source DeviceMessages ` - -EndpointName $endpointName ` - -Condition $condition ` - -Enabled -``` - -### View message routing in the portal - -[!INCLUDE [iot-hub-include-view-routing-in-portal](../../includes/iot-hub-include-view-routing-in-portal.md)] - -## Next steps - -Now that you have the resources set up and the message routes configured, advance to the next tutorial to learn how to send messages to the IoT hub and see them be routed to the different destinations. - -> [!div class="nextstepaction"] -> [Part 2 - View the message routing results](tutorial-routing-view-message-routing-results.md) diff --git a/articles/iot-hub/tutorial-routing-config-message-routing-RM-template.md b/articles/iot-hub/tutorial-routing-config-message-routing-RM-template.md deleted file mode 100644 index 707d17b2a6df5..0000000000000 --- a/articles/iot-hub/tutorial-routing-config-message-routing-RM-template.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: Tutorial - Configure message routing for Azure IoT Hub using an Azure Resource Manager template -description: Tutorial - Configure message routing for Azure IoT Hub using an Azure Resource Manager template -author: kgremban -ms.service: iot-hub -services: iot-hub -ms.topic: tutorial -ms.date: 08/24/2021 -ms.author: kgremban -ms.custom: mvc, devx-track-azurepowershell -#Customer intent: As a developer, I want to be able to route messages sent to my IoT hub to different destinations based on properties stored in the message. This step of the tutorial needs to show me how to set up my resources using an Azure Resource Manager template. ---- - -# Tutorial: Use an Azure Resource Manager template to configure IoT Hub message routing - -[!INCLUDE [iot-hub-include-routing-intro](../../includes/iot-hub-include-routing-intro.md)] - -[!INCLUDE [iot-hub-include-routing-create-resources](../../includes/iot-hub-include-routing-create-resources.md)] - -## Message routing - -[!INCLUDE [iot-hub-include-create-routing-description](../../includes/iot-hub-include-create-routing-description.md)] - -## Download the template and parameters file - -For the second part of this tutorial, you download and run a Visual Studio application to send messages to the IoT Hub. There is a folder in that download that contains the Azure Resource Manager template and parameters file, as well as the Azure CLI and PowerShell scripts. - -Go ahead and download the [Azure IoT C# Samples](https://github.com/Azure-Samples/azure-iot-samples-csharp/archive/main.zip) now. Unzip the main.zip file. The Resource Manager template and the parameters file are in /iot-hub/Tutorials/Routing/SimulatedDevice/resources/ as **template_iothub.json** and **template_iothub_parameters.json**. - -## Create your resources - -You're going to use an Azure Resource Manager (RM) template to create all of your resources. The Azure CLI and PowerShell scripts can be run a few lines at a time. An RM template is deployed in one step. This article shows you the sections separately to help you understand each one. Then it will show you how to deploy the template, and create the virtual device for testing. After the template is deployed, you can view the message routing configuration in the portal. - -There are several resource names that must be globally unique, such as the IoT Hub name and the storage account name. To make naming the resources easier, those resource names are set up to append a random alphanumeric value generated from the current date/time. - -If you look at the template, you'll see where variables are set up for these resources that take the parameter passed in and concatenate *randomValue* to the parameter. - -The following section explains the parameters used. - -### Parameters - -Most of these parameters have default values. The ones ending with **_in** are concatenated with *randomValue* to make them globally unique. - -**randomValue**: This value is generated from the current date/time when you deploy the template. This field is not in the parameters file, as it is generated in the template itself. - -**subscriptionId**: This field is set for you to the subscription into which you are deploying the template. This field is not in the parameters file since it is set for you. - -**IoTHubName_in**: This field is the base IoT Hub name, which is concatenated with the randomValue so it is globally unique. - -**location**: This field is the Azure region into which you are deploying, such as "westus". - -**consumer_group**: This field is the consumer group set for messages coming through the routing endpoint. It is used to filter results in Azure Stream Analytics. For example, there is the whole stream where you get everything, or if you have data coming through with consumer_group set to **Contoso**, then you can set up an Azure Stream Analytics stream (and Power BI report) to show only those entries. This field is used in part 2 of this tutorial. - -**sku_name**: This field is the scaling for the IoT Hub. This value must be S1 or above; a free tier does not work for this tutorial because it does not allow multiple endpoints. - -**sku_units**: This field goes with the **sku_name**, and is the number of IoT Hub units that can be used. - -**d2c_partitions**: This field is the number of partitions used for the event stream. - -**storageAccountName_in**: This field is the name of the storage account to be created. Messages are routed to a container in the storage account. This field is concatenated with the randomValue to make it globally unique. - -**storageContainerName**: This field is the name of the container in which the messages routed to the storage account are stored. - -**storage_endpoint**: This field is the name for the storage account endpoint used by the message routing. - -**service_bus_namespace_in**: This field is the name of the Service Bus namespace to be created. This value is concatenated with the randomValue to make it globally unique. - -**service_bus_queue_in**: This field is the name of the Service Bus queue used for routing messages. This value is concatenated with the randomValue to make it globally unique. - -**AuthRules_sb_queue**: This field is the authorization rules for the service bus queue, used to retrieve the connection string for the queue. - -### Variables - -These values are used in the template, and are mostly derived from parameters. - -**queueAuthorizationRuleResourceId**: This field is the ResourceId for the authorization rule for the Service Bus queue. ResourceId is in turn used to retrieve the connection string for the queue. - -**iotHubName**: This field is the name of the IoT Hub after having randomValue concatenated. - -**storageAccountName**: This field is the name of the storage account after having randomValue concatenated. - -**service_bus_namespace**: This field is the namespace after having randomValue concatenated. - -**service_bus_queue**: This field is the Service Bus queue name after having randomValue concatenated. - -**sbVersion**: The version of the Service Bus API to use. In this case, it is "2017-04-01". - -### Resources: Storage account and container - -The first resource created is the storage account, along with the container to which messages are routed. The container is a resource under the storage account. It has a `dependsOn` clause for the storage account, requiring the storage account be created before the container. - -Here's what this section looks like: - -```json -{ - "type": "Microsoft.Storage/storageAccounts", - "name": "[variables('storageAccountName')]", - "apiVersion": "2018-07-01", - "location": "[parameters('location')]", - "sku": { - "name": "Standard_LRS", - "tier": "Standard" - }, - "kind": "Storage", - "properties": {}, - "resources": [ - { - "type": "blobServices/containers", - "apiVersion": "2018-07-01", - "name": "[concat('default/', parameters('storageContainerName'))]", - "properties": { - "publicAccess": "None" - } , - "dependsOn": [ - "[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName'))]" - ] - } - ] -} -``` - -### Resources: Service Bus namespace and queue - -The second resource created is the Service Bus namespace, along with the Service Bus queue to which messages are routed. The SKU is set to standard. The API version is retrieved from the variables. It is also set to activate the Service Bus namespace when it deploys this section (status:Active). - -```json -{ - "type": "Microsoft.ServiceBus/namespaces", - "comments": "The Sku should be 'Standard' for this tutorial.", - "sku": { - "name": "Standard", - "tier": "Standard" - }, - "name": "[variables('service_bus_namespace')]", - "apiVersion": "[variables('sbVersion')]", - "location": "[parameters('location')]", - "properties": { - "provisioningState": "Succeeded", - "metricId": "[concat('a4295411-5eff-4f81-b77e-276ab1ccda12:', variables('service_bus_namespace'))]", - "serviceBusEndpoint": "[concat('https://', variables('service_bus_namespace'),'.servicebus.windows.net:443/')]", - "status": "Active" - }, - "dependsOn": [] -} -``` - -This section creates the Service Bus queue. This part of the script has a `dependsOn` clause that ensures the namespace is created before the queue. - -```json -{ - "type": "Microsoft.ServiceBus/namespaces/queues", - "name": "[concat(variables('service_bus_namespace'), '/', variables('service_bus_queue'))]", - "apiVersion": "[variables('sbVersion')]", - "location": "[parameters('location')]", - "scale": null, - "properties": {}, - "dependsOn": [ - "[resourceId('Microsoft.ServiceBus/namespaces', variables('service_bus_namespace'))]" - ] -} -``` - -### Resources: Iot Hub and message routing - -Now that the storage account and Service Bus queue have been created, you create the IoT Hub that routes messages to them. The RM template uses `dependsOn` clauses so it doesn't try to create the hub before the Service Bus resources and the storage account have been created. - -Here's the first part of the IoT Hub section. This part of the template sets up the dependencies and starts with the properties. - -```json -{ - "apiVersion": "2018-04-01", - "type": "Microsoft.Devices/IotHubs", - "name": "[variables('IoTHubName')]", - "location": "[parameters('location')]", - "dependsOn": [ - "[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName'))]", - "[resourceId('Microsoft.ServiceBus/namespaces', variables('service_bus_namespace'))]", - "[resourceId('Microsoft.ServiceBus/namespaces/queues', variables('service_bus_namespace'), variables('service_bus_queue'))]" - ], - "properties": { - "eventHubEndpoints": {} - "events": { - "retentionTimeInDays": 1, - "partitionCount": "[parameters('d2c_partitions')]" - } - }, -``` - -The next section is the section for the message routing configuration for the Iot Hub. First is the section for the endpoints. This part of the template sets up the routing endpoints for the Service Bus queue and the storage account, including the connection strings. - -To create the connection string for the queue, you need the queueAuthorizationRulesResourcedId, which is retrieved inline. To create the connection string for the storage account, you retrieve the primary storage key and then use it in the format for the connection string. - -The endpoint configuration is also where you set the blob format to `AVRO` or `JSON`. - -[!INCLUDE [iot-hub-include-blob-storage-format](../../includes/iot-hub-include-blob-storage-format.md)] - - ```json -"routing": { - "endpoints": { - "serviceBusQueues": [ - { - "connectionString": "[Concat('Endpoint=sb://',variables('service_bus_namespace'),'.servicebus.windows.net/;SharedAccessKeyName=',parameters('AuthRules_sb_queue'),';SharedAccessKey=',listkeys(variables('queueAuthorizationRuleResourceId'),variables('sbVersion')).primaryKey,';EntityPath=',variables('service_bus_queue'))]", - "name": "[parameters('service_bus_queue_endpoint')]", - "subscriptionId": "[parameters('subscriptionId')]", - "resourceGroup": "[resourceGroup().Name]" - } - ], - "serviceBusTopics": [], - "eventHubs": [], - "storageContainers": [ - { - "connectionString": - "[Concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), providers('Microsoft.Storage', 'storageAccounts').apiVersions[0]).keys[0].value)]", - "containerName": "[parameters('storageContainerName')]", - "fileNameFormat": "{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}", - "batchFrequencyInSeconds": 100, - "maxChunkSizeInBytes": 104857600, - "encoding": "avro", - "name": "[parameters('storage_endpoint')]", - "subscriptionId": "[parameters('subscriptionId')]", - "resourceGroup": "[resourceGroup().Name]" - } - ] - }, -``` - -This next section is for the message routes to the endpoints. There is one set up for each endpoint, so there is one for the Service Bus queue and one for the storage account container. - -Remember that the query condition for the messages being routed to storage is `level="storage"`, and the query condition for the messages being routed to the Service Bus queue is `level="critical"`. - -```json -"routes": [ - { - "name": "contosoStorageRoute", - "source": "DeviceMessages", - "condition": "level=\"storage\"", - "endpointNames": [ - "[parameters('storage_endpoint')]" - ], - "isEnabled": true - }, - { - "name": "contosoSBQueueRoute", - "source": "DeviceMessages", - "condition": "level=\"critical\"", - "endpointNames": [ - "[parameters('service_bus_queue_endpoint')]" - ], - "isEnabled": true - } -], -``` - -This json shows the rest of the IoT Hub section, which contains default information and the SKU for the hub. - -```json - "fallbackRoute": { - "name": "$fallback", - "source": "DeviceMessages", - "condition": "true", - "endpointNames": [ - "events" - ], - "isEnabled": true - } - }, - "storageEndpoints": { - "$default": { - "sasTtlAsIso8601": "PT1H", - "connectionString": "", - "containerName": "" - } - }, - "messagingEndpoints": { - "fileNotifications": { - "lockDurationAsIso8601": "PT1M", - "ttlAsIso8601": "PT1H", - "maxDeliveryCount": 10 - } - }, - "enableFileUploadNotifications": false, - "cloudToDevice": { - "maxDeliveryCount": 10, - "defaultTtlAsIso8601": "PT1H", - "feedback": { - "lockDurationAsIso8601": "PT1M", - "ttlAsIso8601": "PT1H", - "maxDeliveryCount": 10 - } - } - }, - "sku": { - "name": "[parameters('sku_name')]", - "capacity": "[parameters('sku_units')]" - } -} -``` - -### Resources: Service Bus queue authorization rules - -The Service Bus queue authorization rule is used to retrieve the connection string for the Service Bus queue. It uses a `dependsOn` clause to ensure it is not created before the Service Bus namespace and the Service Bus queue. - -```json -{ - "type": "Microsoft.ServiceBus/namespaces/queues/authorizationRules", - "name": "[concat(variables('service_bus_namespace'), '/', variables('service_bus_queue'), '/', parameters('AuthRules_sb_queue'))]", - "apiVersion": "[variables('sbVersion')]", - "location": "[parameters('location')]", - "scale": null, - "properties": { - "rights": [ - "Send" - ] - }, - "dependsOn": [ - "[resourceId('Microsoft.ServiceBus/namespaces', variables('service_bus_namespace'))]", - "[resourceId('Microsoft.ServiceBus/namespaces/queues', variables('service_bus_namespace'), variables('service_bus_queue'))]" - ] -}, -``` - -### Resources: Consumer group - -In this section, you create a Consumer Group for the IoT Hub data to be used by the Azure Stream Analytics in the second part of this tutorial. - -```json -{ - "type": "Microsoft.Devices/IotHubs/eventHubEndpoints/ConsumerGroups", - "name": "[concat(variables('iotHubName'), '/events/',parameters('consumer_group'))]", - "apiVersion": "2018-04-01", - "dependsOn": [ - "[concat('Microsoft.Devices/IotHubs/', variables('iotHubName'))]" - ] -} -``` - -### Resources: Outputs - -If you want to send a value back to the deployment script to be displayed, you use an output section. This part of the template returns the connection string for the Service Bus queue. Returning a value isn't required, it's included as an example of how to return results to the calling script. - -```json -"outputs": { - "sbq_connectionString": { - "type": "string", - "value": "[Concat('Endpoint=sb://',variables('service_bus_namespace'),'.servicebus.windows.net/;SharedAccessKeyName=',parameters('AuthRules_sb_queue'),';SharedAccessKey=',listkeys(variables('queueAuthorizationRuleResourceId'),variables('sbVersion')).primaryKey,';EntityPath=',variables('service_bus_queue'))]" - } - } -``` - -## Deploy the RM template - -To deploy the template to Azure, upload the template and the parameters file to Azure Cloud Shell, and then execute a script to deploy the template. Open Azure Cloud Shell and sign in. This example uses PowerShell. - -To upload the files, select the **Upload/Download files** icon in the menu bar, then choose Upload. - -![Screenshot that highlights the Upload/Download files icon.](media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_files.png) - -Use the File Explorer that pops up to find the files on your local disk and select them, then choose **Open**. - -After the files are uploaded, a results dialog shows something like the following image. - -![Cloud Shell menu bar with Upload/Download results highlighted](media/tutorial-routing-config-message-routing-RM-template/CloudShell_upload_results.png) - -The files are uploaded to the share used by your Cloud Shell instance. - -Run the script to perform the deployment. The last line of this script retrieves the variable that was set up to be returned -- the Service Bus queue connection string. - -The script sets and uses these variables: - -**$RGName** is the resource group name to which to deploy the template. This field is created before deploying the template. - -**$location** is the Azure location to be used for the template, such as "westus". - -**deploymentname** is a name you assign to the deployment to retrieve the returning variable value. - -Here's the PowerShell script. Copy this PowerShell script and paste it into the Cloud Shell window, then hit Enter to run it. - -```powershell -$RGName="ContosoResources" -$location = "westus" -$deploymentname="contoso-routing" - -# Remove the resource group if it already exists. -#Remove-AzResourceGroup -name $RGName -# Create the resource group. -New-AzResourceGroup -name $RGName -Location $location - -# Set a path to the parameter file. -$parameterFile = "$HOME/template_iothub_parameters.json" -$templateFile = "$HOME/template_iothub.json" - -# Deploy the template. -New-AzResourceGroupDeployment ` - -Name $deploymentname ` - -ResourceGroupName $RGName ` - -TemplateParameterFile $parameterFile ` - -TemplateFile $templateFile ` - -verbose - -# Get the returning value of the connection string. -(Get-AzResourceGroupDeployment -ResourceGroupName $RGName -Name $deploymentname).Outputs.sbq_connectionString.value -``` - -If you have script errors, you can edit the script locally, upload it again to the Cloud Shell, and run the script again. After the script finishes running successfully, continue to the next step. - -## Create simulated device - -[!INCLUDE [iot-hub-include-create-simulated-device-portal](../../includes/iot-hub-include-create-simulated-device-portal.md)] - -## View message routing in the portal - -[!INCLUDE [iot-hub-include-view-routing-in-portal](../../includes/iot-hub-include-view-routing-in-portal.md)] - -## Next steps - -Now that you have all of the resources set up and the message routes are configured, advance to the next tutorial to learn how to process and display the information about the routed messages. - -> [!div class="nextstepaction"] -> [Part 2 - View the message routing results](tutorial-routing-view-message-routing-results.md) diff --git a/articles/iot-hub/tutorial-routing-view-message-routing-results.md b/articles/iot-hub/tutorial-routing-view-message-routing-results.md deleted file mode 100644 index 0825d7470a329..0000000000000 --- a/articles/iot-hub/tutorial-routing-view-message-routing-results.md +++ /dev/null @@ -1,317 +0,0 @@ ---- -title: Tutorial - View Azure IoT Hub message routing results (.NET) | Microsoft Docs -description: Tutorial - After setting up all of the resources using Part 1 of the tutorial, add the ability to route messages to Azure Stream Analytics and view the results in Power BI. -author: kgremban -ms.service: iot-hub -services: iot-hub -ms.topic: tutorial -ms.date: 09/21/2021 -ms.author: kgremban -ms.custom: "mvc, devx-track-csharp, devx-track-azurepowershell" -#Customer intent: As a developer, I want to be able to route messages sent to my IoT hub to different destinations based on properties stored in the message. ---- - -# Tutorial: Part 2 - View the routed messages - -[!INCLUDE [iot-hub-include-routing-intro](../../includes/iot-hub-include-routing-intro.md)] - -[!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] - -## Rules for routing the messages - -The following are the rules for the message routing that were set up in Part 1 of this tutorial, and you see them work in this second part. - -|Value |Result| -|------|------| -|level="storage" |Write to Azure Storage.| -|level="critical" |Write to a Service Bus queue. A Logic App retrieves the message from the queue and uses Office 365 to e-mail the message.| -|default |Display this data using Power BI.| - -Now you create the resources to which the messages will be routed, run an app to send messages to the hub, and see the routing in action. - -## Create a Logic App - -The Service Bus queue is to be used for receiving messages designated as critical. Set up a Logic app to monitor the Service Bus queue, and send an e-mail when a message is added to the queue. - -1. In the [Azure portal](https://portal.azure.com), select **+ Create a resource**. Put **logic app** in the search box and select Enter. From the search results displayed, select Logic App, then select **Create** to continue to the **Create logic app** pane. Fill in the fields. - - **Subscription**: Select your Azure subscription. - - **Resource group**: Select **Create new** under the Resource Group field. Specify **ContosoResources** for the name of the resource group. - - **Instance Details** - **Type**: Select **Consumption** for the instance type. - - For **Logic App Name**, specify the name of the logic app. This tutorial uses **ContosoLogicApp**. - - **Region**: Use the location of the nearest datacenter. This tutorial uses **West US**. - - **Enable Log Analytics**: Set this toggle button to not enable the log analytics. - - ![The Create Logic App screen](./media/tutorial-routing-view-message-routing-results/create-logic-app.png) - - Select **Review + Create**. It may take a few minutes for the app to deploy. When it's finished, it shows a screen giving the overview of the deployment. - -1. Go to the Logic App. If you're still on the deployment page, you can select **Go To Resource**. Another way to get to the Logic App is to select **Resource groups**, select your resource group (this tutorial uses **ContosoResources**), then select the Logic App from the list of resources. - - Scroll down until you see the almost-empty tile that says **Blank Logic App +** and select it. The default tab on the screen is "For You". If this pane is blank, select **All** to see the connectors and triggers available. - -1. Select **Service Bus** from the list of connectors. - - ![The list of connectors](./media/tutorial-routing-view-message-routing-results/logic-app-connectors.png) - -1. This screenshot shows a list of triggers. Select the one that says **When a message is received in a queue (auto-complete)**. - - ![The list of triggers](./media/tutorial-routing-view-message-routing-results/logic-app-triggers.png) - -1. Fill in the fields on the next screen with the connection information. - - **Connection Name**: ContosoConnection - - Select the Service Bus Namespace. This tutorial uses **ContosoSBNamespace**. The name of the key (RootManageSharedAccessKey) and the rights (Listen, Manage, Send) are retrieved and loaded. Select **RootManageSharedAccessKey**. The **Create** button changes to blue (active). Select it; it shows the queue selection screen. - -1. Next, provide information about the queue. - - ![Selecting a queue](./media/tutorial-routing-view-message-routing-results/logic-app-queue-options.png) - - **Queue Name:** This field is the name of the queue from which the message is sent. Select this dropdown list and select the queue name that was set in the setup steps. This tutorial uses **contososbqueue**. - - **Queue Type:** The type of queue. Select **Main** from the dropdown list. - - Take the defaults for the other fields. Select **Save** to save the logic apps designer configuration. - -1. Select **+New Step**. The **Choose an operation** pane is displayed. Select **Office 365 Outlook**. In the list, find and select **Send an Email (V2)**. Sign in to your Office 365 account. - -1. Fill in the fields to be used when sending an e-mail about the message in the queue. - - ![Select to send-an-email from one of the Outlook connectors](./media/tutorial-routing-view-message-routing-results/logic-app-send-email.png) - - **To:** Put in the e-mail address where the warning is to be sent. - - **Subject:** Fill in the subject for the e-mail. - - **Body**: Fill in some text for the body. Select **Add dynamic content**, it will show fields you can pick from the e-mail to include. If you don't see any, select **See More** to see more options. Select **Content** to have the body from the e-mail displayed in the error message. - -1. Select **Save** to save your changes. Close the Logic app Designer. - -## Set up Azure Stream Analytics - -To see the data in a Power BI visualization, first set up a Stream Analytics job to retrieve the data. Remember that only the messages where the **level** is **normal** are sent to the default endpoint, and will be retrieved by the Stream Analytics job for the Power BI visualization. - -### Create the Stream Analytics job - -1. Put **stream** **analytics** **job** in the [Azure portal](https://portal.azure.com) search box and select **Enter**. Select **Create** to get to the Stream Analytics job screen, and then **create** again to get to the create screen. - -1. Enter the following information for the job. - - **Job name**: The name of the job. The name must be globally unique. This tutorial uses **contosoJob**. - - **Subscription**: The Azure subscription you are using for the tutorial. - - **Resource group**: Use the same resource group used by your IoT hub. This tutorial uses **ContosoResources**. - - **Location**: Use the same location used in the setup script. This tutorial uses **West US**. - - ![Create the stream analytics job](./media/tutorial-routing-view-message-routing-results/stream-analytics-create-job.png) - -1. Select **Create** to create the job. It may take a few minutes to deploy. - - To return to the job, select **Go to resource**. You can also select **Resource groups**. This tutorial uses **ContosoResources**. Then select the resource group, then select the Stream Analytics job in the list of resources. - -### Add an input to the Stream Analytics job - -1. Under **Job Topology**, select **Inputs**. - -1. In the **Inputs** pane, select **Add stream input** and select IoT Hub. On the screen that comes up, fill in the following fields: - - **Input alias**: This tutorial uses **contosoinputs**. - - Select **Select IoT Hub from your subscriptions**, then select your subscription from the dropdown list. - - **IoT Hub**: Select the IoT hub. This tutorial uses **ContosoTestHub**. - - **Consumer group**: Select the consumer group set up in Part 1 of this tutorial. This tutorial uses **contosoconsumers**. - - **Shared access policy name**: Select **service**. The portal fills in the Shared Access Policy Key for you. - - **Endpoint**: Select **Messaging**. (If you select Operations Monitoring, you get the telemetry data about the IoT hub rather than the data you're sending through.) - - For the rest of the fields, accept the defaults. - - ![Set up the inputs for the stream analytics job](./media/tutorial-routing-view-message-routing-results/stream-analytics-job-inputs.png) - -1. Select **Save**. - -### Add an output to the Stream Analytics job - -1. Under **Job Topology**, select **Outputs**. - -1. In the **Outputs** pane, select **Add**, and then select **Power BI**. On the screen that comes up, fill in the following fields: - - **Output alias**: The unique alias for the output. This tutorial uses **contosooutputs**. - - Select **Select Group workspace from your subscriptions**. In **Group workspace**, specify **My workspace**. - - **Authentication mode**: Select **User token**. - - **Dataset name**: Name of the dataset to be used in Power BI. This tutorial uses **contosodataset**. - - **Table name**: Name of the table to be used in Power BI. This tutorial uses **contosotable**. - -1. Select **Authorize**, and sign in to your Power BI account. (Signing in may take more than one try). - - ![Set up the outputs for the stream analytics job](./media/tutorial-routing-view-message-routing-results/stream-analytics-job-outputs.png) - -1. Select **Save**. - -### Configure the query of the Stream Analytics job - -1. Under **Job Topology**, select **Query**. - -1. Replace `[YourInputAlias]` with the input alias of the job. This tutorial uses **contosoinputs**. - -1. Replace `[YourOutputAlias]` with the output alias of the job. This tutorial uses **contosooutputs**. - - ![Set up the query for the stream analytics job](./media/tutorial-routing-view-message-routing-results/stream-analytics-job-query.png) - -1. Select **Save**. - -1. Close the Query pane. You return to the view of the resources in the Resource Group. Select the Stream Analytics job. This tutorial calls it **contosoJob**. - -### Run the Stream Analytics job - -In the Stream Analytics job, select **Start** > **Now** > **Start**. Once the job successfully starts, the job status changes from **Stopped** to **Running**. - -To set up the Power BI report, you need data, so you'll set up Power BI after you create the device and run the device simulation application to generate some data. - -## Run simulated device app - -In Part 1 of this tutorial, you set up a device to simulate using an IoT device. If you haven't already downloaded it, download it not the .NET console app that simulates the device sending device-to-cloud messages to an IoT hub, you'll download it here. - -This application sends messages for each of the different message routing methods. There is also a folder in the download that contains the complete Azure Resource Manager template and parameters file, as well as the Azure CLI and PowerShell scripts. - -If you didn't download the files from the repository in Part 1 of this tutorial, go ahead and download them now from [IoT Device Simulation](https://github.com/Azure-Samples/azure-iot-samples-csharp/archive/main.zip). Selecting this link downloads a repository with several applications in it; the solution for this tutorial is iot-hub/Tutorials/Routing/IoT_SimulatedDevice.sln. - -Double-click on the solution file (IoT_SimulatedDevice.sln) to open the code in Visual Studio, then open Program.cs. Substitute `{your hub name}` with the IoT hub host name. The format of the IoT hub host name is **{iot-hub-name}.azure-devices.net**. For this tutorial, the hub host name is **ContosoTestHub.azure-devices.net**. Next, substitute `{your device key}` with the device key you saved earlier when setting up the simulated device. - -```csharp - static string s_myDeviceId = "Contoso-Test-Device"; - static string s_iotHubUri = "ContosoTestHub.azure-devices.net"; - // This is the primary key for the device. This is in the portal. - // Find your IoT hub in the portal > IoT devices > select your device > copy the key. - static string s_deviceKey = "{your device key}"; -``` - -## Run and test - -Run the console application. Wait a few minutes. You can see the messages being sent on the console screen of the application. - -The app sends a new device-to-cloud message to the IoT hub every second. The message contains a JSON-serialized object with the device ID, temperature, humidity, and message level, which defaults to `normal`. It randomly assigns a level of `critical` or `storage`, causing the message to be routed to the storage account or to the Service Bus queue (which triggers your Logic App to send an e-mail). The default (`normal`) readings can be displayed in a BI report. - -If everything is set up correctly, at this point you should see the following results: - -1. You start getting e-mails about critical messages. - - ![The resulting emails](./media/tutorial-routing-view-message-routing-results/results-in-email.png) - - This result means the following statements are true. - - * The routing to the Service Bus queue is working correctly. - * The Logic App retrieving the message from the Service Bus queue is working correctly. - * The Logic App connector to Outlook is working correctly. - -1. In the [Azure portal](https://portal.azure.com), select **Resource groups** and select your Resource Group. This tutorial uses **ContosoResources**. - - Select the storage account, select **Containers**, then select the container that stores your results. This tutorial uses **contosoresults**. You should see a folder, and you can drill down through the directories until you see one or more files. Open one of those files; they contain the entries routed to the storage account. - - ![The result files in storage](./media/tutorial-routing-view-message-routing-results/results-in-storage.png) - -This result means the following statement is true. - -* The routing to the storage account is working correctly. - -With the application still running, set up the Power BI visualization to see the messages coming through the default endpoint. - -## Set up the Power BI visualizations - -1. Sign in to your [Power BI](https://powerbi.microsoft.com/) account. - -1. Select **My Workspace**. It shows at least one dataset that was created. If there's nothing there, run the **Simulated Device** application for another 5-10 minutes to stream more data. After the workspace appears, it will have a dataset called ContosoDataset. Right-click on the three vertical dots to the right of the dataset name. In the dropdown list, select **Create report**. - - ![Power BI creating report](./media/tutorial-routing-view-message-routing-results/bi-personal-workspace.png) - -1. Look in the **Visualizations** section on the right-hand side and select **Line chart** to select a line chart in the BI report page. Drag the graphic so it fills the space horizontally. Now in the **Fields** section on the right, open ContosoTable. Select **EventEnqueuedUtcTime**. It should put it across the X-Axis. Select **temperature** and drag it into the **Values** field for temperature. This adds temperature to the chart. You should have something that looks like the following graphic: - - ![Power BI graph of temperature](./media/tutorial-routing-view-message-routing-results/bi-temperature-chart.png) - -1. Click in the bottom half of the chart area. Select **Line Chart** again. It creates a chart under the first one. - -1. In the table, select **EventQueuedTime**, it will put it in the Axis field. Drag **humidity** to the Values field. Now you see both charts. - - ![Power BI graph of both fields](./media/tutorial-routing-view-message-routing-results/bi-chart-temp-humidity.png) - - You sent messages from the default endpoint of the IoT Hub to the Azure Stream Analytics. Then you added a Power BI report to show the data, adding two charts to represent the temperature and the humidity. - -1. Select **File > Save** to save the report, entering a name for the report when prompted. Save your report in your workspace. - -You can see data on both charts. This result means the following statements are true: - -* The routing to the default endpoint is working correctly. -* The Azure Stream Analytics job is streaming correctly. -* The Power BI Visualization is set up correctly. - -You can refresh the charts to see the most recent data by selecting the Refresh button on the top of the Power BI window. - -## Clean up resources - -If you want to remove all of the Azure resources you've created through both parts of this tutorial, delete the resource group. This action deletes all resources contained within the group. In this case, it removes the IoT hub, the Service Bus namespace and queue, the Logic App, the storage account, and the resource group itself. You can also remove the Power BI resources and clear the emails sent during the tutorial. - -### Clean up resources in the Power BI visualization - -Sign in to your [Power BI](https://powerbi.microsoft.com/) account. Go to your workspace. This tutorial uses **My Workspace**. To remove the Power BI visualization, go to DataSets and select the trash can icon to delete the dataset. This tutorial uses **contosodataset**. When you remove the dataset, the report is removed as well. - -### Use the Azure CLI to clean up resources - -To remove the resource group, use the [az group delete](/cli/azure/group#az-group-delete) command. `$resourceGroup` was set to **ContosoResources** back at the beginning of this tutorial. - -```azurecli-interactive -az group delete --name $resourceGroup -``` - -### Use PowerShell to clean up resources - -To remove the resource group, use the [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) command. `$resourceGroup` was set to **ContosoResources** back at the beginning of this tutorial. - -```azurepowershell-interactive -Remove-AzResourceGroup -Name $resourceGroup -``` - -### Clean up test emails - -You may also want to delete the quantity of emails in your inbox that were generated through the Logic App while the device application was running. - -## Next steps - -In this two-part tutorial, you learned how to use message routing to route IoT Hub messages to different destinations by performing the following tasks. - -**Part I: Create resources, set up message routing** -> [!div class="checklist"] -> * Create the resources--an IoT hub, a storage account, a Service Bus queue, and a simulated device. -> * Configure the endpoints and message routes in IoT Hub for the storage account and Service Bus queue. - -**Part II: Send messages to the hub, view routed results** -> [!div class="checklist"] -> * Create a Logic App that is triggered and sends e-mail when a message is added to the Service Bus queue. -> * Download and run an app that simulates an IoT Device sending messages to the hub for the different routing options. -> -> * Create a Power BI visualization for data sent to the default endpoint. -> -> * View the results ... -> * ...in the Service Bus queue and e-mails. -> * ...in the storage account. -> * ...in the Power BI visualization. - -Advance to the next tutorial to learn how to manage the state of an IoT device. -> [!div class="nextstepaction"] -> [Set up and use metrics and diagnostics with an IoT Hub](tutorial-use-metrics-and-diags.md) - diff --git a/articles/iot-hub/tutorial-routing.md b/articles/iot-hub/tutorial-routing.md index ecd93ef65aa3f..ba1a7ce883c33 100644 --- a/articles/iot-hub/tutorial-routing.md +++ b/articles/iot-hub/tutorial-routing.md @@ -1,232 +1,410 @@ --- -title: Tutorial - Configure message routing for Azure IoT Hub using Azure CLI -description: Tutorial - Configure message routing for Azure IoT Hub using the Azure CLI and the Azure portal +title: Tutorial - Configure message routing | Azure IoT Hub +description: Tutorial - Route device messages to an Azure Storage account with message routing for Azure IoT Hub using the Azure CLI and the Azure portal author: kgremban ms.service: iot-hub services: iot-hub ms.topic: tutorial -ms.date: 08/16/2021 +ms.date: 05/24/2022 ms.author: kgremban ms.custom: [mvc, 'Role: Cloud Development', 'Role: Data Analytics', devx-track-azurecli] #Customer intent: As a developer, I want to be able to route messages sent to my IoT hub to different destinations based on properties stored in the message. This step of the tutorial needs to show me how to set up my base resources using CLI and the Azure Portal. --- -# Tutorial: Use the Azure CLI and Azure portal to configure IoT Hub message routing +# Tutorial: Send device data to Azure Storage using IoT Hub message routing -[!INCLUDE [iot-hub-include-routing-intro](../../includes/iot-hub-include-routing-intro.md)] +Use [message routing](iot-hub-devguide-messages-d2c.md) in Azure IoT Hub to send telemetry data from your IoT devices Azure services such as blob storage, Service Bus Queues, Service Bus Topics, and Event Hubs. -[!INCLUDE [iot-hub-include-routing-create-resources](../../includes/iot-hub-include-routing-create-resources.md)] +Every IoT hub has a default built-in endpoint that is compatible with Event Hubs. You can also create custom endpoints and route messages to other Azure services by defining [routing queries](iot-hub-devguide-routing-query-syntax.md). Each message that arrives at the IoT hub is routed to all endpoints whose routing queries it matches. If a message doesn't match any of the defined routing queries, it is routed to the default endpoint. -## Use the Azure CLI to create the base resources +In this tutorial, you perform the following tasks: -This tutorial uses the Azure CLI to create the base resources, then uses the [Azure portal](https://portal.azure.com) to show how to configure message routing and set up the virtual device for testing. +> [!div class="checklist"] +> +> * Create an IoT hub and send device messages to it. +> * Create a storage account. +> * Create a custom endpoint for the storage account and route messages to it from the IoT hub. +> * View device messages in the storage account blob. -Copy and paste the script below into Cloud Shell and press Enter. It runs the script one line at a time. This will create the base resources for this tutorial, including the storage account, IoT Hub, Service Bus Namespace, and Service Bus queue. +## Prerequisites -There are several resource names that must be globally unique, such as the IoT Hub name and the storage account name. To make this easier, those resource names are appended with a random alphanumeric value called *randomValue*. The randomValue is generated once at the top of the script and appended to the resource names as needed throughout the script. If you don't want it to be random, you can set it to an empty string or to a specific value. +* An Azure subscription. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -> [!TIP] -> A tip about debugging: this script uses the continuation symbol (the backslash `\`) to make the script more readable. If you have a problem running the script, make sure your Cloud Shell session is running `bash` and that there are no spaces after any of the backslashes. -> +* An IoT hub in your Azure subscription. If you don't have a hub yet, you can follow the steps in [Create an IoT hub](iot-hub-create-through-portal.md). + +* This tutorial uses sample code from [Azure IoT samples for C#](https://github.com/Azure-Samples/azure-iot-samples-csharp). + + * Download or clone the samples repo to your development machine. + * Have .NET Core 3.0.0 or greater on your development machine. Check your version by running `dotnet --version` and [Download .NET](https://dotnet.microsoft.com/download) if necessary. + +* Make sure that port 8883 is open in your firewall. The sample in this tutorial uses MQTT protocol, which communicates over port 8883. This port may be blocked in some corporate and educational network environments. For more information and ways to work around this issue, see [Connecting to IoT Hub (MQTT)](iot-hub-mqtt-support.md#connecting-to-iot-hub). + +* Optionally, install [Azure IoT Explorer](https://github.com/Azure/azure-iot-explorer). This tool helps you observe the messages as they arrive at your IoT hub. + +# [Azure portal](#tab/portal) + +There are no other prerequisites for the Azure portal. + +# [Azure CLI](#tab/cli) + +[!INCLUDE [azure-cli-prepare-your-environment-no-header](../../includes/azure-cli-prepare-your-environment-no-header.md)] + +--- + +## Register a device and send messages to IoT Hub + +Register a new device in your IoT hub. + +# [Azure portal](#tab/portal) + +1. Sign in to the [Azure portal](https://portal.azure.com) and navigate to your IoT hub. + +1. Select **Devices** from the **Device management** section of the menu. + +1. Select **Add device**. + + ![Add a new device in the Azure portal.](./media/tutorial-routing/add-device.png) + +1. Provide a device ID and select **Save**. + +1. The new device should be in the list of devices now. If it's not, refresh the page. Select the device ID to open the device details page. + +1. Copy one of the device keys and save it. You'll use this value to configure the sample code that generates simulated device telemetry messages. + + ![Copy the primary key from the device details page.](./media/tutorial-routing/copy-device-key.png) + +# [Azure CLI](#tab/cli) + +>[!TIP] +>Many of the CLI commands used throughout this tutorial use the same parameters. For your convenience, we have you define local variables that can be called as needed. Be sure to run all the commands in the same session, or else you will have to redefine the variables. + +1. Define variables for your IoT hub and device. + + *IOTHUB_NAME*: Replace this placeholder with the name of your IoT hub. + + *DEVICE_NAME*: Replace this placeholder with any name you want to use for the device in this tutorial. + + ```azurecli-interactive + hubName=IOTHUB_NAME + deviceName=DEVICE_NAME + ``` + +1. Run the [az iot hub device-identity create](/cli/azure/iot/hub/device-identity#az-iot-hub-device-identity-create) command in your CLI shell. This command creates the device identity. + + ```azurecli-interactive + az iot hub device-identity create --device-id $deviceName --hub-name $hubName + ``` + +1. From the device-identity output, copy the **primaryKey** value without the surrounding quotation marks and save it. You'll use this value to configure the sample code that generates simulated device telemetry messages. + +--- + +Now that you have a device ID and key, use the sample code to start sending device telemetry messages to IoT Hub. + + +>[!TIP] +>If you're following the Azure CLI steps for this tutorial, run the sample code in a separate session. That way, you can allow the sample code to continue running while you follow the rest of the CLI steps. + +1. If you didn't as part of the prerequisites, download or clone the [Azure IoT samples for C# repo](https://github.com/Azure-Samples/azure-iot-samples-csharp) from GitHub now. +1. In the sample folder, navigate to the `/iot-hub/Tutorials/Routing/SimulatedDevice/` folder. +1. In an editor of your choice, open the `Program.cs` file. +1. Find the variable definitions at the top of the **Program** class. Update the following variables with your own information: + + * **s_myDeviceId**: The device ID that you assigned when registering the device. + * **s_iotHubUri**: The hostname of your IoT hub, which takes the format `IOTHUB_NAME.azure-devices.net`. + * **s_deviceKey**: The device key that you copied from the device identity information. + +1. Save and close the file. +1. Install the Azure IoT C# SDK and necessary dependencies as specified in the `SimulatedDevice.csproj` file: + + ```console + dotnet restore + ``` + +1. Run the sample code: + + ```console + dotnet run + ``` + +1. You should start to see messages printed to output as they are sent to IoT Hub. Leave this program running for the duration of the tutorial. + +## Configure IoT Explorer to view messages + +Configure IoT Explorer to connect to your IoT hub and read messages as they arrive at the built-in endpoint. + +First, retrieve the connection string for your IoT hub. + +# [Azure portal](#tab/portal) + +1. In the Azure portal, navigate to your IoT hub. +1. Select **Shared access policies** from the **Security settings** section of the menu. +1. Select the **iothubowner** policy. + + ![Open the iothubowner shared access policy.](./media/tutorial-routing/iothubowner-access-policy.png) + +1. Copy the **Primary connection string**. + + ![Copy the iothubowner primary connection string.](./media/tutorial-routing/copy-iothubowner-connection-string.png) + +# [Azure CLI](#tab/cli) -```azurecli-interactive -# This retrieves the subscription id of the account -# in which you're logged in. -# This field is used to set up the routing queries. -subscriptionID=$(az account show --query id) - -# Concatenate this number onto the resources that have to be globally unique. -# You can set this to "" or to a specific value if you don't want it to be random. -# This retrieves a random value. -randomValue=$RANDOM - -# Set the values for the resource names that -# don't have to be globally unique. -location=westus -resourceGroup=ContosoResources -iotHubConsumerGroup=ContosoConsumers -containerName=contosoresults - -# Create the resource group to be used -# for all the resources for this tutorial. -az group create --name $resourceGroup \ - --location $location - -# The IoT hub name must be globally unique, -# so add a random value to the end. -iotHubName=ContosoTestHub$randomValue -echo "IoT hub name = " $iotHubName - -# Create the IoT hub. -az iot hub create --name $iotHubName \ - --resource-group $resourceGroup \ - --sku S1 --location $location - -# Add a consumer group to the IoT hub for the 'events' endpoint. -az iot hub consumer-group create --hub-name $iotHubName \ - --name $iotHubConsumerGroup - -# The storage account name must be globally unique, -# so add a random value to the end. -storageAccountName=contosostorage$randomValue -echo "Storage account name = " $storageAccountName - -# Create the storage account to be used as a routing destination. -az storage account create --name $storageAccountName \ - --resource-group $resourceGroup \ - --location $location \ - --sku Standard_LRS - -# Get the primary storage account key. -# You need this to create the container. -storageAccountKey=$(az storage account keys list \ - --resource-group $resourceGroup \ - --account-name $storageAccountName \ - --query "[0].value" | tr -d '"') - -# See the value of the storage account key. -echo "storage account key = " $storageAccountKey - -# Create the container in the storage account. -az storage container create --name $containerName \ - --account-name $storageAccountName \ - --account-key $storageAccountKey \ - --public-access off - -# The Service Bus namespace must be globally unique, -# so add a random value to the end. -sbNamespace=ContosoSBNamespace$randomValue -echo "Service Bus namespace = " $sbNamespace - -# Create the Service Bus namespace. -az servicebus namespace create --resource-group $resourceGroup \ - --name $sbNamespace \ - --location $location - -# The Service Bus queue name must be globally unique, -# so add a random value to the end. -sbQueueName=ContosoSBQueue$randomValue -echo "Service Bus queue name = " $sbQueueName - -# Create the Service Bus queue to be used as a routing destination. -az servicebus queue create --name $sbQueueName \ - --namespace-name $sbNamespace \ - --resource-group $resourceGroup - -``` - -Now that the base resources are set up, you can configure the message routing in the [Azure portal](https://portal.azure.com). +1. Run the [az iot hub connection-string show](/cli/azure/iot/hub/connection-string#az-iot-hub-connection-string-show) command: + + ```azurecli-interactive + az iot hub connection-string show --hub-name $hubName + ``` + +2. Copy the connection string without the surrounding quotation marks. + +--- + +Now, use that connection string to configure IoT Explorer for your IoT hub. + +1. Open IoT Explorer on your development machine. +1. Select **Add connection**. + + ![Add IoT hub connection in IoT Explorer.](./media/tutorial-routing/iot-explorer-add-connection.png) + +1. Paste your hub's connection string into the text box. +1. Select **Save**. +1. Once you connect to your IoT hub, you should see a list of devices. Select the device ID that you created for this tutorial. +1. Select **Telemetry**. +1. Select **Start**. + + ![Start monitoring device telemetry in IoT Explorer.](./media/tutorial-routing/iot-explorer-start-monitoring-telemetry.png) + +1. You should see the messages arriving from your device, with the most recent displayed at the top. + + ![View messages arriving at IoT hub on the built-in endpoint.](./media/tutorial-routing/iot-explorer-view-messages.png) + +Watch the incoming messages for a few moments to verify that you see three different types of messages: normal, storage, and critical. + +These messages are all arriving at the default built-in endpoint for your IoT hub. In the next sections, we're going to create a custom endpoint and route some of these messages to storage based on the message properties. Those messages will stop appearing in IoT Explorer because messages only go to the built-in endpoint when they don't match any other routes in IoT hub. ## Set up message routing -[!INCLUDE [iot-hub-include-create-routing-description](../../includes/iot-hub-include-create-routing-description.md)] +You're going to route messages to different resources based on properties attached to the message by the simulated device. Messages that aren't custom routed are sent to the default endpoint (messages/events). + +The sample app for this tutorial assigns a **level** property to each message it sends to IoT hub. Each message is randomly assigned a level of **normal**, **storage**, or **critical**. + +The first step is to set up the endpoint to which the data will be routed. The second step is to set up the message route that uses that endpoint. After setting up the routing, you can view endpoints and message routes in the portal. + +### Create a storage account + +Create an Azure Storage account and a container within that account, which will hold the device messages that are routed to it. + +# [Azure portal](#tab/portal) + +1. In the Azure portal, search for **Storage accounts**. + +1. Select **Create**. + +1. Provide the following values for your storage account: + + | Parameter | Value | + | --------- | ----- | + | **Subscription** | Select the same subscription that contains your IoT hub. | + | **Resource group** | Select the same resource group that contains your IoT hub. | + | **Storage account name** | Provide a globally unique name for your storage account. | + | **Performance** | Accept the default **Standard** value. | + + ![Create a storage account.](./media/tutorial-routing/create-storage-account.png) + +1. You can accept all the other default values by selecting **Review + create**. + +1. After validation completes, select **Create**. + +1. After the deployment is complete, select **Go to resource**. + +1. In the storage account menu, select **Containers** from the **Data storage** section. + +1. Select **Container** to create a new container. + + ![Create a storage container](./media/tutorial-routing/create-storage-container.png) + +1. Provide a name for your container and select **Create**. + +# [Azure CLI](#tab/cli) + +1. Define the variables for your storage account and container. + + *GROUP_NAME*: Replace this placeholder with the name of the resource group that contains your IoT hub. + + *STORAGE_NAME*: Replace this placeholder with a name for your storage account. Storage account names must be lowercase and globally unique. + + *CONTAINER_NAME*: Replace this placeholder with a name for your container. + + ```azurecli-interactive + resourceGroup=GROUP_NAME + storageName=STORAGE_NAME + containerName=CONTAINER_NAME + ``` + +1. Use the [az storage account create](/cli/azure/storage/account#az-storage-account-create) command to create a standard general-purpose v2 storage account. + + ```azurecli-interactive + az storage account create --name $storageName --resource-group $resourceGroup + ``` + +1. Use the [az storage container create](/cli/azure/storage/container#az-storage-container-create) to add a container to your storage account. + + ```azurecli-interactive + az storage container create --auth-mode login --account-name $storageName --name $containerName + ``` + +--- ### Route to a storage account -Now set up the routing for the storage account. You go to the Message Routing pane, then add a route. When adding the route, define a new endpoint for the route. After this routing is set up, messages where the **level** property is set to **storage** are written to a storage account automatically. +Now set up the routing for the storage account. In this section you define a new endpoint that points to the storage account you created. Then, create a route that filters for messages where the **level** property is set to **storage**, and route those to the storage endpoint. [!INCLUDE [iot-hub-include-blob-storage-format](../../includes/iot-hub-include-blob-storage-format.md)] -Now you set up the configuration for the message routing to Azure Storage. +# [Azure portal](#tab/portal) -1. In the [Azure portal](https://portal.azure.com), select **Resource Groups**, then select your resource group. This tutorial uses **ContosoResources**. +1. In the Azure portal, navigate to your IoT hub. -2. Select the IoT hub under the list of resources. This tutorial uses **ContosoTestHub**. +1. Select **Message Routing** from the **Hub settings** section of the menu. -3. Select **Message Routing** in the middle column that says ***Messaging**. Select +**Add** to see the **Add a Route** pane. Select +**Add endpoint** next to the Endpoint field, then select **Storage**. You see the **Add a storage endpoint** pane. +1. In the **Routes** tab, select **Add**. - ![Start adding an endpoint for a route](./media/tutorial-routing/01-add-a-route-to-storage.png) + ![Add a new message route.](./media/tutorial-routing/add-route.png) -4. Enter a name for the endpoint. This tutorial uses **ContosoStorageEndpoint**. +1. Select **Add endpoint** next to the **Endpoint** field, then select **Storage** from the dropdown menu. - ![Name the endpoint](./media/tutorial-routing/02-add-a-storage-endpoint.png) + ![Add a new endpoint for a route.](./media/tutorial-routing/add-storage-endpoint.png) -5. Select **Pick a container**. This takes you to a list of your storage accounts. Select the one you set up in the preparation steps; this tutorial uses **contosostorage**. It shows a list of containers in that storage account. **Select** the container you set up in the preparation steps. This tutorial uses **contosoresults**. Then click **Select** at the bottom of the screen. It returns to a different **Add a storage endpoint** pane. You see the URL for the selected container. +1. Provide the following information for the new storage endpoint: -6. Set the encoding to AVRO or JSON. For the purpose of this tutorial, use the defaults for the rest of the fields. This field will be greyed out if the region selected does not support JSON encoding. Set the file name format. + | Parameter | Value | + | --------- | ----- | + | **Endpoint name** | Create a name for this endpoint. | + | **Azure Storage container** | Select **Pick a container**, which takes you to a list of storage accounts. Choose the storage account that you created in the previous section, then choose the container that you created in that account. Select **Select**.| + | **Encoding** | Select **JSON**. If this field is greyed out, then your storage account region doesn't support JSON. In that case, continue with the default **AVRO**. | - > [!NOTE] - > Set the format of the blob name using the **Blob file name format**. The default is `{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}`. The format must contain {iothub}, {partition}, {YYYY}, {MM}, {DD}, {HH}, and {mm} in any order. - > - > For example, using the default blob file name format, if the hub name is ContosoTestHub, and the date/time is October 30, 2018 at 10:56 a.m., the blob name will look like this: `ContosoTestHub/0/2018/10/30/10/56`. - > - > The blobs are written in the AVRO format by default. - > + ![Pick a container.](./media/tutorial-routing/create-storage-endpoint.png) -7. Select **Create** at the bottom of the page to create the storage endpoint and add it to the route. You are returned to the **Add a Route** pane. +1. Accept the default values for the rest of the parameters and select **Create**. -8. Complete the rest of the routing query information. This query specifies the criteria for sending messages to the storage container you just added as an endpoint. Fill in the fields on the screen. +1. Continue creating the new route, now that you've added the storage endpoint. Provide the following information for the new route: -9. Fill in the rest of the fields. + | Parameter | Value | + | -------- | ----- | + | **Name** | Create a name for your route. | + | **Data source** | Verify that **Device Telemetry Messages** is selected from the dropdown list. | + | **Enable route** | Verify that this field is set to `enabled`. | + | **Routing query** | Enter `level="storage"` as the query string. | - - **Name**: Enter a name for your route. This tutorial uses **ContosoStorageRoute**. Next, specify the endpoint for storage. This tutorial uses ContosoStorageEndpoint. - - - Specify **Data source**: Select **Device Telemetry Messages** from the dropdown list. + ![Save the routing query information](./media/tutorial-routing/create-storage-route.png) + +1. Select **Save**. - - Select **Enable route**: Be sure this field is set to `enabled`. +# [Azure CLI](#tab/cli) - - **Routing query**: Enter `level="storage"` as the query string. +1. Configure the variables that you need for the endpoint and route commands. - ![Save the routing query information](./media/tutorial-routing/04-save-storage-route.png) - -10. Select **Save**. When it finishes, it returns to the Message Routing pane, where you can see your new routing query for storage. Close the Message Routing pane, which returns you to the Resource group page. + *ENDPOINT_NAME*: Provide a name for the endpoint that represents your storage container. + + *ROUTE_NAME*: Provide a name for the route that filters messages for the storage endpoint + ```azurecli-interactive + endpointName=ENDPOINT_NAME + routeName=ROUTE_NAME + ``` -### Route to a Service Bus queue +1. Use the [az iot hub routing-endpoint create](/cli/azure/iot/hub/routing-endpoint#az-iot-hub-routing-endpoint-create) command to create a custom endpoint that points to the storage container you made in the previous section. -Now set up the routing for the Service Bus queue. You go to the Message Routing pane, then add a route. When adding the route, define a Service Bus Queue as the endpoint for the route. After this route is set up, messages where the **level** property is set to **critical** are written to the Service Bus queue, which triggers a Logic App, which then sends an e-mail with the information. + ```azurecli-interactive + az iot hub routing-endpoint create \ + --connection-string $(az storage account show-connection-string --name $storageName --query connectionString -o tsv) \ + --endpoint-name $endpointName \ + --endpoint-resource-group $resourceGroup \ + --endpoint-subscription-id $(az account show --query id -o tsv) \ + --endpoint-type azurestoragecontainer + --hub-name $hubName \ + --container $containerName \ + --resource-group $resourceGroup \ + --encoding json + ``` -1. On the Resource group page, select your IoT hub, then select **Message Routing**. +1. Use the [az iot hub route create](/cli/azure/iot/hub/route#az-iot-hub-route-create) command to create a route that passes any message where `level=storage` to the storage container endpoint. -2. On the **Message Routing** pane, select +**Add**. + ```azurecli-interactive + az iot hub route create \ + --name $routeName \ + --hub-name $hubName \ + --resource-group $resourceGroup \ + --source devicemessages \ + --endpoint-name $endpointName \ + --enabled true \ + --condition 'level="storage"' + ``` -3. On the **Add a Route** pane, Select +**Add** near **+endpoint**. Select **Service Bus Queue**. You see the **Add Service Bus Endpoint** pane. +--- - ![Adding a 1st service bus endpoint](./media/tutorial-routing/05-setup-sbq-endpoint.png) +## View routed messages -4. Fill in the rest of the fields: +Once the route is created in IoT Hub and enabled, it will immediately start routing messages that meet its query condition to the storage endpoint. - **Endpoint Name**: Enter a name for the endpoint. This tutorial uses **ContosoSBQEndpoint**. - - **Service Bus Namespace**: Use the dropdown list to select the service bus namespace you set up in the preparation steps. This tutorial uses **ContosoSBNamespace**. +### Monitor the built-in endpoint with IoT Explorer - **Service Bus queue**: Use the dropdown list to select the Service Bus queue. This tutorial uses **contososbqueue**. +Return to the IoT Explorer session on your development machine. Recall that the IoT Explorer monitors the built-in endpoint for your IoT hub. That means that now you should be seeing only the messages that are *not* being routed by the custom route we created. Watch the incoming messages for a few moments and you should only see messages where `level` is set to `normal` or `critical`. -5. Select **Create** to add the 1st Service Bus queue endpoint. You return to the **Add a route** pane. +### View messages in the storage container - ![Adding 2nd service bus endpoint](./media/tutorial-routing/06-save-sbq-endpoint.png) +Verify that the messages are arriving in the storage container. -6. Now complete the rest of the routing query information. This query specifies the criteria for sending messages to the Service Bus queue you just added as an endpoint. Fill in the fields on the screen. +1. In the [Azure portal](https://portal.azure.com), navigate to your storage account. - **Name**: Enter a name for your route. This tutorial uses **ContosoSBQueueRoute**. +1. Select **Containers** from the **Data storage** section of the menu. - **Endpoint**: This shows the endpoint you just set up. +1. Select the container that you created for this tutorial. - **Data source**: Select **Device Telemetry Messages** from the dropdown list. +1. There should be a folder with the name of your IoT hub. Drill down through the file structure until you get to a **.json** file. - **Enable route**: Set this field to `enable`." + ![Find routed messages in storage.](./media/tutorial-routing/view-messages-in-storage.png) - **Routing query**: Enter `level="critical"` as the routing query. +1. Download the JSON file and confirm that it contains messages from your device that have the `level` property set to `storage`. - ![Create a routing query for the Service Bus queue](./media/tutorial-routing/07-save-servicebusqueue-route.png) +## Clean up resources -7. Select **Save**. When it returns to the Routes pane, you see both of your new routes. +If you want to remove all of the Azure resources you used for this tutorial, delete the resource group. This action deletes all resources contained within the group. If you don't want to delete the entire resource group, use the Azure portal to locate and delete the individual resources. - ![The routes you just set up](./media/tutorial-routing/08-show-both-routes.png) +# [Azure portal](#tab/portal) -8. You can see the custom endpoints that you set up by selecting the **Custom Endpoints** tab. +1. In the Azure portal, navigate to the resource group that contains the IoT hub and storage account for this tutorial. +1. Review all the resources that are in the resource group to determine which ones you want to clean up. + * If you want to delete all the resource, select **Delete resource group**. + * If you only want to delete certain resource, use the check boxes next to each resource name to select the ones you want to delete. Then select **Delete**. - ![The custom endpoints you just set up](./media/tutorial-routing/09-show-custom-endpoints.png) +# [Azure CLI](#tab/cli) -9. Close the Message Routing pane, which returns you to the Resource group pane. +1. Use the [az resource list](/cli/azure/resource#az-resource-list) command to view all the resources in your resource group. -## Create a simulated device + ```azurecli-interactive + az resource list --resource-group $resourceGroup --output table + ``` -[!INCLUDE [iot-hub-include-create-simulated-device-portal](../../includes/iot-hub-include-create-simulated-device-portal.md)] +1. Review all the resources that are in the resource group to determine which ones you want to clean up. + + * If you want to delete all the resources, use the [az group delete](/cli/azure/group#az-group-delete) command. + + ```azurecli-interactive + az group delete --name $resourceGroup + ``` + + * If you only want to delete certain resources, use the [az resource delete](/cli/azure/resource#az-resource-delete) command. For example: + + ```azurecli-interactive + az resource delete --resource-group $resourceGroup --name $storageName + ``` + +--- ## Next steps -Now that you have the resources set up and the message routes configured, advance to the next tutorial to learn how to send messages to the IoT hub and see them be routed to the different destinations. +In this tutorial you learned how to create a custom endpoint for an Azure resource and then create a route to send device messages to that endpoint. Continue to the next tutorial to learn how to enrich messages with extra data that can be used to simplify downstream processing > [!div class="nextstepaction"] -> [Part 2 - View the message routing results](tutorial-routing-view-message-routing-results.md) +> [Use Azure IoT Hub message enrichments](tutorial-message-enrichments.md) diff --git a/articles/key-vault/general/monitor-key-vault.md b/articles/key-vault/general/monitor-key-vault.md index 010bd7a08500e..eed192ebc2af2 100644 --- a/articles/key-vault/general/monitor-key-vault.md +++ b/articles/key-vault/general/monitor-key-vault.md @@ -182,7 +182,7 @@ If you are creating or running an application which runs on Azure Key Vault, [Az Here are some common and recommended alert rules for Azure Key Vault - - Key Vault Availability drops below 100% (Static Threshold) -- Key Vault Latency is greater than 500ms (Static Threshold) +- Key Vault Latency is greater than 1000ms (Static Threshold) - Overall Vault Saturation is greater than 75% (Static Threshold) - Overall Vault Saturation exceeds average (Dynamic Threshold) - Total Error Codes higher than average (Dynamic Threshold) diff --git a/articles/key-vault/general/overview-vnet-service-endpoints.md b/articles/key-vault/general/overview-vnet-service-endpoints.md index fcb80797e5ef2..d8df4792d9bf6 100644 --- a/articles/key-vault/general/overview-vnet-service-endpoints.md +++ b/articles/key-vault/general/overview-vnet-service-endpoints.md @@ -55,7 +55,8 @@ Here's a list of trusted services that are allowed to access a key vault if the |Azure Import/Export| [Use customer-managed keys in Azure Key Vault for Import/Export service](../../import-export/storage-import-export-encryption-key-portal.md) |Azure Container Registry|[Registry encryption using customer-managed keys](../../container-registry/container-registry-customer-managed-keys.md) |Azure Application Gateway |[Using Key Vault certificates for HTTPS-enabled listeners](../../application-gateway/key-vault-certs.md) -|Azure Front Door|[Using Key Vault certificates for HTTPS](../../frontdoor/front-door-custom-domain-https.md#prepare-your-azure-key-vault-account-and-certificate) +|Azure Front Door Standard/Premium|[Using Key Vault certificates for HTTPS](../../frontdoor/standard-premium/how-to-configure-https-custom-domain.md#prepare-your-key-vault-and-certificate) +|Azure Front Door Classic|[Using Key Vault certificates for HTTPS](../../frontdoor/front-door-custom-domain-https.md#prepare-your-key-vault-and-certificate) |Microsoft Purview|[Using credentials for source authentication in Microsoft Purview](../../purview/manage-credentials.md) |Azure Machine Learning|[Secure Azure Machine Learning in a virtual network](../../machine-learning/how-to-secure-workspace-vnet.md)| diff --git a/articles/key-vault/general/security-features.md b/articles/key-vault/general/security-features.md index ed1e82c06593b..c645f4a820c71 100755 --- a/articles/key-vault/general/security-features.md +++ b/articles/key-vault/general/security-features.md @@ -38,7 +38,7 @@ Azure Private Link Service enables you to access Azure Key Vault and Azure hoste - Despite known vulnerabilities in TLS protocol, there is no known attack that would allow a malicious agent to extract any information from your key vault when the attacker initiates a connection with a TLS version that has vulnerabilities. The attacker would still need to authenticate and authorize itself, and as long as legitimate clients always connect with recent TLS versions, there is no way that credentials could have been leaked from vulnerabilities at old TLS versions. > [!NOTE] -> For Azure Key Vault, ensure that the application accessing the Keyvault service should be running on a platform that supports TLS 1.2 or recent version. If the application is dependent on .Net framework, it should be updated as well. You can also make the registry changes mentioned in [this article](/troubleshoot/azure/active-directory/enable-support-tls-environment) to explicitly enable the use of TLS 1.2 at OS level and for .Net framework. To meet with compliance obligations and to improve security posture, Key Vault connections via TLS 1.0 & 1.1 will be deprecated starting on 31st May 2022 and disallowed later in the future. +> For Azure Key Vault, ensure that the application accessing the Keyvault service should be running on a platform that supports TLS 1.2 or recent version. If the application is dependent on .Net framework, it should be updated as well. You can also make the registry changes mentioned in [this article](/troubleshoot/azure/active-directory/enable-support-tls-environment) to explicitly enable the use of TLS 1.2 at OS level and for .Net framework. To meet with compliance obligations and to improve security posture, Key Vault connections via TLS 1.0 & 1.1 are considered a security risk, and any connections using old TLS protocols will be disallowed in 2023. ## Key Vault authentication options diff --git a/articles/key-vault/general/tutorial-net-create-vault-azure-web-app.md b/articles/key-vault/general/tutorial-net-create-vault-azure-web-app.md index 3775a2d394ab5..b2ae60aa14eb6 100644 --- a/articles/key-vault/general/tutorial-net-create-vault-azure-web-app.md +++ b/articles/key-vault/general/tutorial-net-create-vault-azure-web-app.md @@ -282,7 +282,7 @@ dotnet add package Azure.Security.KeyVault.Secrets #### Update the code -Find and open the Startup.cs file in your akvwebapp project. +Find and open the Startup.cs file for .NET 5.0 or earlier, or Program.cs file for .NET 6.0 in your akvwebapp project. Add these lines to the header: @@ -292,7 +292,7 @@ using Azure.Security.KeyVault.Secrets; using Azure.Core; ``` -Add the following lines before the `app.UseEndpoints` call, updating the URI to reflect the `vaultUri` of your key vault. This code uses [DefaultAzureCredential()](/dotnet/api/azure.identity.defaultazurecredential) to authenticate to Key Vault, which uses a token from managed identity to authenticate. For more information about authenticating to Key Vault, see the [Developer's Guide](./developers-guide.md#authenticate-to-key-vault-in-code). The code also uses exponential backoff for retries in case Key Vault is being throttled. For more information about Key Vault transaction limits, see [Azure Key Vault throttling guidance](./overview-throttling.md). +Add the following lines before the `app.UseEndpoints` call (.NET 5.0 or earlier) or `app.MapGet` call (.NET 6.0) , updating the URI to reflect the `vaultUri` of your key vault. This code uses [DefaultAzureCredential()](/dotnet/api/azure.identity.defaultazurecredential) to authenticate to Key Vault, which uses a token from managed identity to authenticate. For more information about authenticating to Key Vault, see the [Developer's Guide](./developers-guide.md#authenticate-to-key-vault-in-code). The code also uses exponential backoff for retries in case Key Vault is being throttled. For more information about Key Vault transaction limits, see [Azure Key Vault throttling guidance](./overview-throttling.md). ```csharp SecretClientOptions options = new SecretClientOptions() @@ -312,12 +312,23 @@ KeyVaultSecret secret = client.GetSecret(""); string secretValue = secret.Value; ``` +##### .NET 5.0 or earlier + Update the line `await context.Response.WriteAsync("Hello World!");` to look like this line: ```csharp await context.Response.WriteAsync(secretValue); ``` +##### .NET 6.0 + +Update the line `app.MapGet("/", () => "Hello World!");` to look like this line: + +```csharp +app.MapGet("/", () => secretValue); +``` + + Be sure to save your changes before continuing to the next step. #### Redeploy your web app diff --git a/articles/key-vault/index.yml b/articles/key-vault/index.yml index cf81cbe142ff1..13824604972e2 100644 --- a/articles/key-vault/index.yml +++ b/articles/key-vault/index.yml @@ -67,6 +67,9 @@ conceptualContent: - url: ./keys/quick-create-portal.md itemType: quickstart text: Set and retrieve a key from Azure Key Vault using the Azure portal + - url: ./keys/how-to-configure-key-rotation.md + itemType: tutorial + text: Configure key auto-rotation - url: ./keys/hsm-protected-keys.md itemType: overview text: Import HSM-protected keys (overview) diff --git a/articles/key-vault/keys/about-keys-details.md b/articles/key-vault/keys/about-keys-details.md index 3594dd1d9bf10..080969514d371 100644 --- a/articles/key-vault/keys/about-keys-details.md +++ b/articles/key-vault/keys/about-keys-details.md @@ -20,7 +20,7 @@ Following table shows a summary of key types and supported algorithms. |Key types/sizes/curves| Encrypt/Decrypt
                  (Wrap/Unwrap) | Sign/Verify | | --- | --- | --- | -|EC-P256, EC-P256K, EC-P384, EC-521|NA|ES256
                  ES256K
                  ES384
                  ES512| +|EC-P256, EC-P256K, EC-P384, EC-P521|NA|ES256
                  ES256K
                  ES384
                  ES512| |RSA 2K, 3K, 4K| RSA1_5
                  RSA-OAEP
                  RSA-OAEP-256|PS256
                  PS384
                  PS512
                  RS256
                  RS384
                  RS512
                  RSNULL| |AES 128-bit, 256-bit
                  (Managed HSM only)| AES-KW
                  AES-GCM
                  AES-CBC| NA| ||| diff --git a/articles/key-vault/keys/how-to-configure-key-rotation.md b/articles/key-vault/keys/how-to-configure-key-rotation.md index 4066200ecdb64..02c076a834889 100644 --- a/articles/key-vault/keys/how-to-configure-key-rotation.md +++ b/articles/key-vault/keys/how-to-configure-key-rotation.md @@ -15,10 +15,15 @@ ms.author: mbaldwin ## Overview -Automated key rotation in Key Vault allows users to configure Key Vault to automatically generate a new key version at a specified frequency. You can use rotation policy to configure rotation for each individual -key. Our recommendation is to rotate encryption keys at least every two years to meet cryptographic best practices. +Automated key rotation in [Key Vault](../general/overview.md) allows users to configure Key Vault to automatically generate a new key version at a specified frequency. For more information about how keys are versioned, see [Key Vault objects, identifiers, and versioning](../general/about-keys-secrets-certificates.md#objects-identifiers-and-versioning). -This feature enables end-to-end zero-touch rotation for encryption at rest for Azure services with customer-managed key (CMK) stored in Azure Key Vault. Please refer to specific Azure service documentation to see if the service covers end-to-end rotation. +You can use rotation policy to configure rotation for each individual key. Our recommendation is to rotate encryption keys at least every two years to meet cryptographic best practices. + +This feature enables end-to-end zero-touch rotation for encryption at rest for Azure services with customer-managed key (CMK) stored in Azure Key Vault. Please refer to specific Azure service documentation to see if the service covers end-to-end rotation. + +For more information about data encryption in Azure, see: +- [Azure Encryption at Rest](../../security/fundamentals/encryption-atrest.md#azure-encryption-at-rest-components) +- [Azure services data encryption support table](../../security/fundamentals/encryption-models.md#supporting-services) ## Pricing @@ -48,6 +53,9 @@ Key rotation policy settings: - Rotation time: key rotation interval, the minimum value is seven days from creation and seven days from expiration time - Notification time: key near expiry event interval for Event Grid notification. It requires 'Expiry Time' set on rotation policy and 'Expiration Date' set on the key. +> [!IMPORTANT] +> Key rotation generates a new key version of an existing key with new key material. Ensure that your data encryption solution uses versioned key uri to point to the same key material for encrypt/decrypt, wrap/unwrap operations to avoid disruption to your services. All Azure services are currently following that pattern for data encryption. + :::image type="content" source="../media/keys/key-rotation/key-rotation-1.png" alt-text="Rotation policy configuration"::: ## Configure key rotation policy @@ -90,12 +98,20 @@ Save key rotation policy to a file. Key rotation policy example: } } ``` -Set rotation policy on a key passing previously saved file. + +Set rotation policy on a key passing previously saved file using Azure CLI [az keyvault key rotation-policy update](/cli/azure/keyvault/key/rotation-policy) command. ```azurecli az keyvault key rotation-policy update --vault-name --name --value
                  ``` +### Azure PowerShell + +Set rotation policy using Azure Powershell [Set-AzKeyVaultKeyRotationPolicy](/powershell/module/az.keyvault/set-azkeyvaultkeyrotationpolicy) cmdlet. + +```powershell +Set-AzKeyVaultKeyRotationPolicy -VaultName -KeyName -ExpiresIn (New-TimeSpan -Days 720) -KeyRotationLifetimeAction @{Action="Rotate";TimeAfterCreate= (New-TimeSpan -Days 540)} +``` ## Rotation on demand Key rotation can be invoked manually. @@ -106,10 +122,21 @@ Click 'Rotate Now' to invoke rotation. :::image type="content" source="../media/keys/key-rotation/key-rotation-4.png" alt-text="Rotation on-demand"::: ### Azure CLI + +Use Azure CLI [az keyvault key rotate](/cli/azure/keyvault/key#az-keyvault-key-rotate) command to rotate key. + ```azurecli az keyvault key rotate --vault-name --name ``` +### Azure PowerShell + +Use Azure PowerShell [Invoke-AzKeyVaultKeyRotation](/powershell/module/az.keyvault/invoke-azkeyvaultkeyrotation) cmdlet. + +```powershell +Invoke-AzKeyVaultKeyRotation -VaultName -Name +``` + ## Configure key near expiry notification Configuration of expiry notification for Event Grid key near expiry event. You can configure notification with days, months and years before expiry to trigger near expiry event. diff --git a/articles/key-vault/managed-hsm/built-in-roles.md b/articles/key-vault/managed-hsm/built-in-roles.md index 8403f4b456880..ee6d147023749 100644 --- a/articles/key-vault/managed-hsm/built-in-roles.md +++ b/articles/key-vault/managed-hsm/built-in-roles.md @@ -51,7 +51,6 @@ Managed HSM local RBAC has several built-in roles. You can assign these roles to |/keys/deletedKeys/delete||
                  X
                  |||||
                  X
                  | |/keys/backup/action|||
                  X
                  |||
                  X
                  | |/keys/restore/action|||
                  X
                  |||| -|/keys/export/action||
                  X
                  ||||| |/keys/release/action|||
                  X
                  |||| |/keys/import/action|||
                  X
                  |||| |**Key cryptographic operations**| diff --git a/articles/key-vault/secrets/overview-storage-keys-powershell.md b/articles/key-vault/secrets/overview-storage-keys-powershell.md index 21aa15c535d3e..0a40ff00efafd 100644 --- a/articles/key-vault/secrets/overview-storage-keys-powershell.md +++ b/articles/key-vault/secrets/overview-storage-keys-powershell.md @@ -228,7 +228,7 @@ $sasTemplate="sv=2018-03-28&ss=bfqt&srt=sco&sp=rw&spr=https" |`SignedProtocol (spr)`|Optional. Specifies the protocol permitted for a request made with the account SAS. Possible values are both HTTPS and HTTP (`https,http`) or HTTPS only (`https`). The default value is `https,http`.

                  Note that HTTP only is not a permitted value.| For more information about account SAS, see: -[Create an account SAS](https://docs.microsoft.com/rest/api/storageservices/create-account-sas) +[Create an account SAS](/rest/api/storageservices/create-account-sas) > [!NOTE] > Key Vault ignores lifetime parameters like 'Signed Expiry', 'Signed Start' and parameters introduced after 2018-03-28 version @@ -279,4 +279,4 @@ The output of this command will show your SAS definition string. ## Next steps - [Managed storage account key samples](https://github.com/Azure-Samples?utf8=%E2%9C%93&q=key+vault+storage&type=&language=) -- [Key Vault PowerShell reference](/powershell/module/az.keyvault/#key_vault) +- [Key Vault PowerShell reference](/powershell/module/az.keyvault/#key_vault) \ No newline at end of file diff --git a/articles/key-vault/secrets/overview-storage-keys.md b/articles/key-vault/secrets/overview-storage-keys.md index 46b83d12a96a4..a48894d71f4cc 100644 --- a/articles/key-vault/secrets/overview-storage-keys.md +++ b/articles/key-vault/secrets/overview-storage-keys.md @@ -131,7 +131,7 @@ SAS definition template will be the passed to the `--template-uri` parameter in |`SignedProtocol (spr)`|Optional. Specifies the protocol permitted for a request made with the account SAS. Possible values are both HTTPS and HTTP (`https,http`) or HTTPS only (`https`). The default value is `https,http`.

                  Note that HTTP only isn't a permitted value.| For more information about account SAS, see: -[Create an account SAS](https://docs.microsoft.com/rest/api/storageservices/create-account-sas) +[Create an account SAS](/rest/api/storageservices/create-account-sas) > [!NOTE] > Key Vault ignores lifetime parameters like 'Signed Expiry', 'Signed Start' and parameters introduced after 2018-03-28 version @@ -158,4 +158,4 @@ az keyvault storage sas-definition show --id https://.vault.az - Learn more about [keys, secrets, and certificates](/rest/api/keyvault/). - Review articles on the [Azure Key Vault team blog](/archive/blogs/kv/). -- See the [az keyvault storage](/cli/azure/keyvault/storage) reference documentation. +- See the [az keyvault storage](/cli/azure/keyvault/storage) reference documentation. \ No newline at end of file diff --git a/articles/key-vault/secrets/tutorial-rotation-dual.md b/articles/key-vault/secrets/tutorial-rotation-dual.md index 73f98f249a9b8..1089f4702ad4d 100644 --- a/articles/key-vault/secrets/tutorial-rotation-dual.md +++ b/articles/key-vault/secrets/tutorial-rotation-dual.md @@ -19,7 +19,7 @@ The best way to authenticate to Azure services is by using a [managed identity]( This tutorial shows how to automate the periodic rotation of secrets for databases and services that use two sets of authentication credentials. Specifically, this tutorial shows how to rotate Azure Storage account keys stored in Azure Key Vault as secrets. You'll use a function triggered by Azure Event Grid notification. > [!NOTE] -> Storage account keys can be automatically managed in Key Vault if you provide shared access signature tokens for delegated access to the storage account. There are services that require storage account connection strings with access keys. For that scenario, we recommend this solution. +> For Storage account services, using Azure Active Directory to authorize requests is recommended. For more information, see [Authorize access to blobs using Azure Active Directory](../../storage/blobs/authorize-access-azure-active-directory.md). There are services that require storage account connection strings with access keys. For that scenario, we recommend this solution. Here's the rotation solution described in this tutorial: @@ -38,6 +38,9 @@ In this solution, Azure Key Vault stores storage account individual access keys * Azure Key Vault. * Two Azure storage accounts. +> [!NOTE] +> Rotation of shared storage account key revokes account level shared access signature (SAS) generated based on that key. After storage account key rotation, you must regenerate account-level SAS tokens to avoid disruptions to applications. + You can use this deployment link if you don't have an existing key vault and existing storage accounts: [![Link that's labelled Deploy to Azure.](https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/1-CONTRIBUTION-GUIDE/images/deploytoazure.png)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2FKeyVault-Rotation-StorageAccountKey-PowerShell%2Fmaster%2FARM-Templates%2FInitial-Setup%2Fazuredeploy.json) diff --git a/articles/lab-services/TOC.yml b/articles/lab-services/TOC.yml index ae3e5bfd401bb..f6555bb936b00 100644 --- a/articles/lab-services/TOC.yml +++ b/articles/lab-services/TOC.yml @@ -1,14 +1,17 @@ - name: Azure Lab Services docs href: index.yml - name: Overview + expanded: true items: - name: About Lab Services href: lab-services-overview.md - name: What's New in Lab Services? href: lab-services-whats-new.md - name: Quickstarts + expanded: true items: - name: Create a lab plan (admin) + expanded: true items: - name: Azure portal href: quick-create-lab-plan-portal.md @@ -16,7 +19,10 @@ href: quick-create-lab-plan-template.md - name: PowerShell href: quick-create-lab-plan-powershell.md + - name: Bicep + href: quick-create-lab-plan-bicep.md - name: Create a lab (educator) + expanded: true items: - name: Azure Lab Services website href: quick-create-lab-portal.md @@ -24,6 +30,8 @@ href: quick-create-lab-template.md - name: PowerShell href: quick-create-lab-powershell.md + - name: Bicep + href: quick-create-lab-bicep.md - name: Tutorials items: - name: Set up a lab plan (admin) @@ -233,6 +241,12 @@ href: how-to-access-vm-for-students-within-teams.md - name: Earlier releases items: + - name: Labs architecture fundamentals + href: classroom-labs-fundamentals-1.md + - name: Administrator guide + href: administrator-guide-1.md + - name: Lab account creation guide + href: account-setup-guide.md - name: Create & configure lab accounts items: - name: Create and manage labs @@ -255,10 +269,6 @@ href: how-to-add-user-lab-owner.md - name: Manage labs in a lab account href: manage-labs-1.md - - name: Administrator guide - href: administrator-guide-1.md - - name: Lab account creation guide - href: account-setup-guide.md - name: Az.LabServices PowerShell module for lab accounts href: reference-powershell-module.md - name: Reference diff --git a/articles/lab-services/add-lab-creator.md b/articles/lab-services/add-lab-creator.md index 9edce6e5615ec..2c4f4d2251f7b 100644 --- a/articles/lab-services/add-lab-creator.md +++ b/articles/lab-services/add-lab-creator.md @@ -43,7 +43,7 @@ You might need to add an external user as a lab creator. If that is the case, yo - A non-Microsoft email account, such as one provided by Yahoo or Google. However, these types of accounts must be linked with a Microsoft account. - A GitHub account. This account must be linked with a Microsoft account. -For instructions to add someone as a guest account in Azure AD, see [Quickstart: Add guest users in the Azure portal - Azure AD](/azure/active-directory/external-identities/b2b-quickstart-add-guest-users-portal). If using an email account that's provided by your university’s Azure AD, you don't have to add them as a guest account. +For instructions to add someone as a guest account in Azure AD, see [Quickstart: Add guest users in the Azure portal - Azure AD](../active-directory/external-identities/b2b-quickstart-add-guest-users-portal.md). If using an email account that's provided by your university’s Azure AD, you don't have to add them as a guest account. Once the user has an Azure AD account, [add the Azure AD user account to Lab Creator role](#add-azure-ad-user-account-to-lab-creator-role). @@ -81,4 +81,4 @@ See the following articles: - [As a lab owner, create and manage labs](how-to-manage-labs.md) - [As a lab owner, set up and publish templates](how-to-create-manage-template.md) - [As a lab owner, configure and control usage of a lab](how-to-configure-student-usage.md) -- [As a lab user, access labs](how-to-use-lab.md) +- [As a lab user, access labs](how-to-use-lab.md) \ No newline at end of file diff --git a/articles/lab-services/capacity-limits.md b/articles/lab-services/capacity-limits.md index 31abf2ed54bf8..eb8f04b38bddc 100644 --- a/articles/lab-services/capacity-limits.md +++ b/articles/lab-services/capacity-limits.md @@ -23,7 +23,7 @@ These actions may be disabled if there no more cores that can be enabled for you If you reach the cores limit, you can request a limit increase to continue using Azure Lab Services. The request process is a checkpoint to ensure your subscription isn’t involved in any cases of fraud or unintentional, sudden large-scale deployments. -To create a support request, you must be an [Owner](/azure/role-based-access-control/built-in-roles), [Contributor](/azure/role-based-access-control/built-in-roles), or be assigned to the [Support Request Contributor](/azure/role-based-access-control/built-in-roles) role at the subscription level. For information about creating support requests in general, see how to create a [How to create an Azure support request](/azure/azure-portal/supportability/how-to-create-azure-support-request). +To create a support request, you must be an [Owner](../role-based-access-control/built-in-roles.md), [Contributor](../role-based-access-control/built-in-roles.md), or be assigned to the [Support Request Contributor](../role-based-access-control/built-in-roles.md) role at the subscription level. For information about creating support requests in general, see how to create a [How to create an Azure support request](../azure-portal/supportability/how-to-create-azure-support-request.md). The admin can follow these steps to request a limit increase: @@ -60,4 +60,4 @@ Before you set up a large number of VMs across your labs, we recommend that you See the following articles: - [As an admin, see VM sizing](administrator-guide.md#vm-sizing). -- [Frequently asked questions](classroom-labs-faq.yml). +- [Frequently asked questions](classroom-labs-faq.yml). \ No newline at end of file diff --git a/articles/lab-services/class-type-networking-gns3.md b/articles/lab-services/class-type-networking-gns3.md index f2f65ddd9e4ac..233a7ab82becc 100644 --- a/articles/lab-services/class-type-networking-gns3.md +++ b/articles/lab-services/class-type-networking-gns3.md @@ -1,8 +1,8 @@ --- -title: Set up a networking lab with Azure Lab Services and GNS3 | Microsoft Docs +title: Set up a networking lab with GNS3 description: Learn how to set up a lab using Azure Lab Services to teach networking with GNS3. ms.topic: how-to -ms.date: 01/19/2021 +ms.date: 04/19/2022 --- # Set up a lab to teach a networking class diff --git a/articles/lab-services/class-types.md b/articles/lab-services/class-types.md index 9790e33b0292a..b717b58107f4a 100644 --- a/articles/lab-services/class-types.md +++ b/articles/lab-services/class-types.md @@ -2,7 +2,7 @@ title: Example class types on Azure Lab Services | Microsoft Docs description: Provides some types of classes for which you can set up labs using Azure Lab Services. ms.topic: how-to -ms.date: 01/04/2020 +ms.date: 04/04/2022 --- # Class types overview - Azure Lab Services diff --git a/articles/lab-services/classroom-labs-fundamentals-1.md b/articles/lab-services/classroom-labs-fundamentals-1.md new file mode 100644 index 0000000000000..8fc83e676d227 --- /dev/null +++ b/articles/lab-services/classroom-labs-fundamentals-1.md @@ -0,0 +1,47 @@ +--- +title: Architecture fundamentals with lab accounts in Azure Lab Services | Microsoft Docs +description: This article will cover the fundamental resources used by Lab Services and basic architecture of a lab that using lab accounts. +author: emaher +ms.topic: overview +ms.date: 05/30/2022 +ms.service: lab-services +ms.author: enewman +--- + +# Architecture Fundamentals in Azure Lab Services when using lab accounts + +[!INCLUDE [preview note](./includes/lab-services-new-update-note.md)] + +Azure Lab Services is a SaaS (software as a service) solution, which means that the resources needed by Lab Services are handled for you. This article will cover the fundamental resources used by Lab Services and basic architecture of a lab. + +Azure Lab Services does provide a couple of areas that allow you to use your own resources with Lab Services. For more information about using VMs on your own network, see how to [peer a virtual network](how-to-connect-peer-virtual-network.md). To reuse images from an Azure Compute Gallery, see how to [attach a compute gallery](how-to-attach-detach-shared-image-gallery.md). + +Below is the basic architecture of a lab. The lab account is hosted in your subscription. The student VMs, along with the resources needed to support the VMs are hosted in a subscription owned by Azure Lab Services. Let’s talk about what is in Azure Lab Service's subscriptions in more detail. + +:::image type="content" source="./media/classroom-labs-fundamentals-1/labservices-basic-architecture.png" alt-text="Architecture diagram of labs using lab accounts in Azure Lab Services."::: + +## Hosted Resources + +The resources required to run a lab are hosted in one of the Microsoft-managed Azure subscriptions. Resources include: + +- template virtual machine for the educator +- virtual machine for each student +- network-related items such as a load balancer, virtual network, and network security group. + +These subscriptions are monitored for suspicious activity. It's important to note that this monitoring is done externally to the virtual machines through VM extension or network pattern monitoring. If [shutdown on disconnect](how-to-enable-shutdown-disconnect.md) is enabled, a diagnostic extension is enabled on the virtual machine. The extension allows Lab Services to be informed of the remote desktop protocol (RDP) session disconnect event. + +## Virtual Network + +Each lab is isolated by its own virtual network. If the lab has a [peered virtual network](how-to-connect-peer-virtual-network.md), then each lab is isolated by its own subnet. Students connect to their virtual machine through a load balancer. No student virtual machines have a public IP address; they only have a private IP address. The connection string for the student will be the public IP address of the load balancer and a random port between 49152 and 65535. Inbound rules on the load balancer forward the connection, depending on the operating system, to either port 22 (SSH) or port 3389 (RDP) of the appropriate virtual machine. An NSG prevents outside traffic on any other ports. + +## Access control to the virtual machines + +Lab Services handles the student’s ability to perform actions like start and stop on their virtual machines. It also controls access to their VM connection information. + +Lab Services also handles the registration of students to the service. There are currently two different access settings: restricted and nonrestricted. For more information, see the [manage lab users](how-to-configure-student-usage.md#send-invitations-to-users) article. Restricted access means Lab Services verifies that the students are added as user before allowing access. Nonrestricted means any user can register as long as they have the registration link and there's capacity in the lab. Nonrestricted can be useful for hackathon events. + +Student VMs that are hosted in the lab have a username and password set by the creator of the lab. Alternately, the creator of the lab can allow registered students to choose their own password on first sign-in. + +## Next steps + +To learn more about features available in Lab Services, see [Azure Lab Services concepts](classroom-labs-concepts.md) and [Azure Lab Services overview](lab-services-overview.md). diff --git a/articles/lab-services/classroom-labs-fundamentals.md b/articles/lab-services/classroom-labs-fundamentals.md index bb95f5f0bee76..e2d400540cacf 100644 --- a/articles/lab-services/classroom-labs-fundamentals.md +++ b/articles/lab-services/classroom-labs-fundamentals.md @@ -3,38 +3,49 @@ title: Architecture Fundamentals in Azure Lab Services | Microsoft Docs description: This article will cover the fundamental resources used by Lab Services and basic architecture of a lab. author: emaher ms.topic: overview -ms.date: 11/19/2021 +ms.date: 05/30/2022 ms.author: enewman +ms.service: lab-services --- # Architecture Fundamentals in Azure Lab Services -[!INCLUDE [preview note](./includes/lab-services-new-update-note.md)] +[!INCLUDE [preview note](./includes/lab-services-new-update-focused-article.md)] Azure Lab Services is a SaaS (software as a service) solution, which means that the resources needed by Lab Services are handled for you. This article will cover the fundamental resources used by Lab Services and basic architecture of a lab. -Azure Lab Services does provide a couple of areas that allow you to use your own resources in conjunction with Lab Services. For more information about using VMs on your own network, see how to [peer a virtual network](how-to-connect-peer-virtual-network.md). If using the April 2022 Update, see [Connect to your virtual network in Azure Lab Services](how-to-connect-vnet-injection.md) to use virtual network injection instead of virtual network peering. To reuse images from an Azure Compute Gallery, see how to [attach a compute gallery](how-to-attach-detach-shared-image-gallery.md). +Azure Lab Services does provide a couple of areas that allow you to use your own resources with Lab Services. For more information about using VMs on your own network, see [Connect to your virtual network in Azure Lab Services](how-to-connect-vnet-injection.md) to use virtual network injection instead of virtual network peering. To reuse images from an Azure Compute Gallery, see how to [attach a compute gallery](how-to-attach-detach-shared-image-gallery.md). -Below is the basic architecture of a lab. The lab account or lab plan is hosted in your subscription. The student VMs, along with the resources needed to support the VMs are hosted in a subscription owned by Azure Lab Services. Let’s talk about what is in Azure Lab Service's subscriptions in more detail. +Below is the basic architecture of a lab. The lab plan is hosted in your subscription. The student VMs, along with the resources needed to support the VMs are hosted in a subscription owned by Azure Lab Services. Let’s talk about what is in Azure Lab Service's subscriptions in more detail. -![labs basic architecture](./media/classroom-labs-fundamentals/labservices-basic-architecture.png) +:::image type="content" source="./media/classroom-labs-fundamentals/labservices-basic-architecture.png" alt-text="Architecture diagram of basic lab in Azure Lab Services."::: ## Hosted Resources -The resources required to run a lab are hosted in one of the Microsoft-managed Azure subscriptions. Resources include a template virtual machine for the educator, virtual machine for each student, and network-related items such as a load balancer, virtual network, and network security group. These subscriptions are monitored for suspicious activity. It is important to note that this monitoring is done externally to the virtual machines through VM extension or network pattern monitoring. If [shutdown on disconnect](how-to-enable-shutdown-disconnect.md) is enabled, a diagnostic extension is enabled on the virtual machine. The extension allows Lab Services to be informed of the remote desktop protocol (RDP) session disconnect event. +The resources required to run a lab are hosted in one of the Microsoft-managed Azure subscriptions. Resources include: + +- template virtual machine for the educator +- virtual machine for each student +- network-related items such as a load balancer, virtual network, and network security group + +These subscriptions are monitored for suspicious activity. It's important to note that this monitoring is done externally to the virtual machines through VM extension or network pattern monitoring. If [shutdown on disconnect](how-to-enable-shutdown-disconnect.md) is enabled, a diagnostic extension is enabled on the virtual machine. The extension allows Lab Services to be informed of the remote desktop protocol (RDP) session disconnect event. ## Virtual Network -> [!NOTE] -> For the latest experience in Azure Lab Services using your virtual network, see [Connect to your virtual network](how-to-connect-vnet-injection.md). This experience replaces the peer virtual network experience. +Each lab is isolated by its own virtual network. If the lab is using [advanced networking](how-to-connect-vnet-injection.md), then each lab using the same subnet that has been delegated to Azure Lab Services and connected to the lab plan. + +Students connect to their virtual machine through a load balancer. No student virtual machines have a public IP address; they only have a private IP address. The connection string for the student will be the public IP address of the load balancer and a random port between: + +- 4980-4989 and 5000-6999 for SSH connections +- 4990-4999 and 7000-8999 for RDP connections -Each lab is isolated by its own virtual network. If the lab has a [peered virtual network](how-to-connect-peer-virtual-network.md), then each lab is isolated by its own subnet. Students connect to their virtual machine through a load balancer. No student virtual machines have a public IP address; they only have a private ip address. The connection string for the student will be the public IP address of the load balancer and a random port between 49152 and 65535. Inbound rules on the load balancer forward the connection, depending on the operating system, to either port 22 (SSH) or port 3389 (RDP) of the appropriate virtual machine. An NSG prevents outside traffic on any other ports. +Inbound rules on the load balancer forward the connection, depending on the operating system, to either port 22 (SSH) or port 3389 (RDP) of the appropriate virtual machine. An NSG prevents outside traffic on any other ports. ## Access control to the virtual machines Lab Services handles the student’s ability to perform actions like start and stop on their virtual machines. It also controls access to their VM connection information. -Lab Services also handles the registration of students to the service. There are currently two different access settings: restricted and nonrestricted. For more information, see the [manage lab users](how-to-configure-student-usage.md#send-invitations-to-users) article. Restricted access means Lab Services verifies that the students are added as user before allowing access. Nonrestricted means any user can register as long as they have the registration link and there is capacity in the lab. Nonrestricted can be useful for hackathon events. +Lab Services also handles the registration of students to the service. There are currently two different access settings: restricted and nonrestricted. For more information, see the [manage lab users](how-to-configure-student-usage.md#send-invitations-to-users) article. Restricted access means Lab Services verifies that the students are added as user before allowing access. Nonrestricted means any user can register as long as they have the registration link and there's capacity in the lab. Nonrestricted can be useful for hackathon events. Student VMs that are hosted in the lab have a username and password set by the creator of the lab. Alternately, the creator of the lab can allow registered students to choose their own password on first sign-in. diff --git a/articles/lab-services/connect-virtual-machine-mac-remote-desktop.md b/articles/lab-services/connect-virtual-machine-mac-remote-desktop.md index d39bcb2d59b22..e0b9eca3f578d 100644 --- a/articles/lab-services/connect-virtual-machine-mac-remote-desktop.md +++ b/articles/lab-services/connect-virtual-machine-mac-remote-desktop.md @@ -1,8 +1,8 @@ --- -title: How to connect to an Azure Lab Services VM from Mac | Microsoft Docs +title: Connect to Azure Lab Services VMs from Mac description: Learn how to connect from a Mac to a virtual machine in Azure Lab Services. ms.topic: how-to -ms.date: 01/04/2020 +ms.date: 02/04/2022 --- # Connect to a VM using Remote Desktop Protocol on a Mac diff --git a/articles/lab-services/how-to-attach-detach-shared-image-gallery.md b/articles/lab-services/how-to-attach-detach-shared-image-gallery.md index fd5baf7193df9..d3acbfafc4c90 100644 --- a/articles/lab-services/how-to-attach-detach-shared-image-gallery.md +++ b/articles/lab-services/how-to-attach-detach-shared-image-gallery.md @@ -16,7 +16,7 @@ ms.custom: devdivchpfy22 This article shows you how to attach or detach an Azure Compute Gallery to a lab plan. > [!IMPORTANT] -> Lab plan administrators must manually [replicate images](/azure/virtual-machines/shared-image-galleries) to other regions in the compute gallery. Replicate an Azure Compute Gallery image to the same region as the lab plan to be shown in the list of virtual machine images during lab creation. +> Lab plan administrators must manually [replicate images](../virtual-machines/shared-image-galleries.md) to other regions in the compute gallery. Replicate an Azure Compute Gallery image to the same region as the lab plan to be shown in the list of virtual machine images during lab creation. Saving images to a compute gallery and replicating those images incurs additional cost. This cost is separate from the Azure Lab Services usage cost. For more information about Azure Compute Gallery pricing, see [Azure Compute Gallery – Billing](../virtual-machines/azure-compute-gallery.md#billing). @@ -108,4 +108,4 @@ To learn how to save a template image to the compute gallery or use an image fro To explore other options for bringing custom images to compute gallery outside of the context of a lab, see [Recommended approaches for creating custom images](approaches-for-custom-image-creation.md). -For more information about compute galleries in general, see [compute gallery](../virtual-machines/shared-image-galleries.md). +For more information about compute galleries in general, see [compute gallery](../virtual-machines/shared-image-galleries.md). \ No newline at end of file diff --git a/articles/lab-services/how-to-configure-firewall-settings.md b/articles/lab-services/how-to-configure-firewall-settings.md index 1e022433536fe..4527d34cb96f8 100644 --- a/articles/lab-services/how-to-configure-firewall-settings.md +++ b/articles/lab-services/how-to-configure-firewall-settings.md @@ -11,7 +11,7 @@ ms.topic: how-to Each organization or school will configure their own network in a way that best fits their needs. Sometimes that includes setting firewall rules that block Remote Desktop Protocol (RDP) or Secure Shell (SSH) connections to machines outside their own network. Because Azure Lab Services runs in the public cloud, some extra configuration maybe needed to allow students to access their VM when connecting from the campus network. -Each lab uses single public IP address and multiple ports. All VMs, both the template VM and student VMs, will use this public IP address. The public IP address won’t change for the life of lab. Each VM will have a different port number. The port numbers range is 49152 - 65535. The combination of public IP address and port number is used to connect educators and students to the correct VM. This article will cover how to find the specific public IP address used by a lab. That information can be used to update inbound and outbound firewall rules so students can access their VMs. +Each lab uses single public IP address and multiple ports. All VMs, both the template VM and student VMs, will use this public IP address. The public IP address won’t change for the life of lab. Each VM will have a different port number. The port numbers range is 49152 - 65535. If using the April 2022 Update (preview), the port ranges for SSH connections are 4980-4989 and 5000-6999. The port ranges for RDP connections are 4990-4999 and 7000-8999. The combination of public IP address and port number is used to connect educators and students to the correct VM. This article will cover how to find the specific public IP address used by a lab. That information can be used to update inbound and outbound firewall rules so students can access their VMs. >[!IMPORTANT] >Each lab will have a different public IP address. @@ -30,7 +30,7 @@ The public IP addresses for each lab are listed in the **All labs** page of the ## Conclusion -Now we know the public IP address for the lab. Inbound and outbound rules can be created for the organization's firewall for the public ip address and the port range 49152 - 65535. Once the rules are updated, students can access their VMs without the network firewall blocking access. +Now we know the public IP address for the lab. Inbound and outbound rules can be created for the organization's firewall for the public IP address and the port range 49152 - 65535. Once the rules are updated, students can access their VMs without the network firewall blocking access. ## Next steps diff --git a/articles/lab-services/how-to-connect-vnet-injection.md b/articles/lab-services/how-to-connect-vnet-injection.md index f7045b3885f9a..89a869f987ee1 100644 --- a/articles/lab-services/how-to-connect-vnet-injection.md +++ b/articles/lab-services/how-to-connect-vnet-injection.md @@ -26,9 +26,9 @@ You can connect to your own virtual network to your lab plan when you create the Before you configure VNet injection for your lab plan: -- [Create a virtual network](/azure/virtual-network/quick-create-portal). The virtual network must be in the same region as the lab plan. -- [Create a subnet](/azure/virtual-network/virtual-network-manage-subnet) for the virtual network. -- [Create a network security group (NSG)](/azure/virtual-network/manage-network-security-group) and apply it to the subnet. +- [Create a virtual network](../virtual-network/quick-create-portal.md). The virtual network must be in the same region as the lab plan. +- [Create a subnet](../virtual-network/virtual-network-manage-subnet.md) for the virtual network. +- [Create a network security group (NSG)](../virtual-network/manage-network-security-group.md) and apply it to the subnet. - [Delegate the subnet](#delegate-the-virtual-network-subnet-for-use-with-a-lab-plan) to **Microsoft.LabServices/labplans**. Certain on-premises networks are connected to Azure Virtual Network either through [ExpressRoute](../expressroute/expressroute-introduction.md) or [Virtual Network Gateway](../vpn-gateway/vpn-gateway-about-vpngateways.md). These services must be set up outside of Azure Lab Services. To learn more about connecting an on-premises network to Azure using ExpressRoute, see [ExpressRoute overview](../expressroute/expressroute-introduction.md). For on-premises connectivity using a Virtual Network Gateway, the gateway, specified virtual network, network security group, and the lab plan all must be in the same region. @@ -38,11 +38,11 @@ Certain on-premises networks are connected to Azure Virtual Network either throu ## Delegate the virtual network subnet for use with a lab plan -After you create a subnet for your virtual network, you must [delegate the subnet](/azure/virtual-network/subnet-delegation-overview) for use with Azure Lab Services. +After you create a subnet for your virtual network, you must [delegate the subnet](../virtual-network/subnet-delegation-overview.md) for use with Azure Lab Services. Only one lab plan at a time can be delegated for use with one subnet. -1. Create a [virtual network](/azure/virtual-network/manage-virtual-network), [subnet](/azure/virtual-network/virtual-network-manage-subnet), and [network security group (NSG)](/azure/virtual-network/manage-network-security-group) if not done already. +1. Create a [virtual network](../virtual-network/manage-virtual-network.md), [subnet](../virtual-network/virtual-network-manage-subnet.md), and [network security group (NSG)](../virtual-network/manage-network-security-group.md) if not done already. 1. Open the **Subnets** page for your virtual network. 1. Select the subnet you wish to delegate to Lab Services to open the property window for that subnet. 1. For the **Delegate subnet to a service** property, select **Microsoft.LabServices/labplans**. Select **Save**. @@ -87,4 +87,4 @@ See the following articles: - As an admin, [attach a compute gallery to a lab plan](how-to-attach-detach-shared-image-gallery.md). - As an admin, [configure automatic shutdown settings for a lab plan](how-to-configure-auto-shutdown-lab-plans.md). -- As an admin, [add lab creators to a lab plan](add-lab-creator.md). +- As an admin, [add lab creators to a lab plan](add-lab-creator.md). \ No newline at end of file diff --git a/articles/lab-services/how-to-create-a-lab-with-shared-resource.md b/articles/lab-services/how-to-create-a-lab-with-shared-resource.md index 555507e582451..a53003a15cac0 100644 --- a/articles/lab-services/how-to-create-a-lab-with-shared-resource.md +++ b/articles/lab-services/how-to-create-a-lab-with-shared-resource.md @@ -40,7 +40,7 @@ To use a shared resource, the lab plan must be set up to use advanced networking > [!WARNING] > Advanced networking must be enabled during lab plan creation. It can't be added later. -When your lab plan is set to use advanced networking, the template VM and student VMs should now have access to the shared resource. You might have to update the virtual network's [network security group](/azure/virtual-network/network-security-groups-overview), virtual network's [user-defined routes](/azure/virtual-network/virtual-networks-udr-overview#user-defined) or server's firewall rules. +When your lab plan is set to use advanced networking, the template VM and student VMs should now have access to the shared resource. You might have to update the virtual network's [network security group](../virtual-network/network-security-groups-overview.md), virtual network's [user-defined routes](../virtual-network/virtual-networks-udr-overview.md#user-defined) or server's firewall rules. ## Tips @@ -57,4 +57,4 @@ One of the most common shared resources is a license server. The following list ## Next steps -As an administrator, [create a lab plan with advanced networking](how-to-connect-vnet-injection.md). +As an administrator, [create a lab plan with advanced networking](how-to-connect-vnet-injection.md). \ No newline at end of file diff --git a/articles/lab-services/how-to-create-manage-template.md b/articles/lab-services/how-to-create-manage-template.md index 5fec0608bbf64..0d813cad60995 100644 --- a/articles/lab-services/how-to-create-manage-template.md +++ b/articles/lab-services/how-to-create-manage-template.md @@ -52,7 +52,7 @@ In this step, you publish the template VM. When you publish the template VM, Azu 2. On the **Publish template** page, enter the number of virtual machines you want to create in the lab, and then select **Publish**. ![Publish template - number of VMs](./media/how-to-create-manage-template/publish-template-number-vms.png) -3. You see the **status of publishing** the template on page. This process can take up to an hour. +3. You see the **status of publishing** the template on page. If using [Azure Lab Services April 2022 Update (preview)](lab-services-whats-new.md), publishing can take up to 20 minutes. ![Publish template - progress](./media/how-to-create-manage-template/publish-template-progress.png) 4. Wait until the publishing is complete and then switch to the **Virtual machines pool** page by selecting **Virtual machines** on the left menu or by selecting **Virtual machines** tile. Confirm that you see virtual machines that are in **Unassigned** state. These VMs aren’t assigned to students yet. They should be in **Stopped** state. You can start a student VM, connect to the VM, stop the VM, and delete the VM on this page. You can start them in this page or let your students start the VMs. diff --git a/articles/lab-services/how-to-create-schedules-within-canvas.md b/articles/lab-services/how-to-create-schedules-within-canvas.md index 44ac3df8f15bf..cd8a44369f30d 100644 --- a/articles/lab-services/how-to-create-schedules-within-canvas.md +++ b/articles/lab-services/how-to-create-schedules-within-canvas.md @@ -19,7 +19,7 @@ Here is how schedules affect lab VM: The scheduled running time of VMs does not count against the [quota](classroom-labs-concepts.md#quota) given to a user. The quota is for the time outside of schedule hours that a student spends on VMs. -Educators can create, edit, and delete lab schedules within Canvas as in the Azure Lab Services portal. For more information on scheduling, see [Creating and managing schedules](how-to-create-schedules-within-canvas.md). +Educators can create, edit, and delete lab schedules within Canvas as in the Azure Lab Services portal. For more information on scheduling, see [Creating and managing schedules](how-to-create-schedules.md). > [!IMPORTANT] > Schedules will apply at the course level. If you have many sections of a course, consider using [automatic shutdown policies](how-to-configure-auto-shutdown-lab-plans.md) and/or [quotas hours](how-to-configure-student-usage.md#set-quotas-for-users). diff --git a/articles/lab-services/how-to-create-schedules-within-teams.md b/articles/lab-services/how-to-create-schedules-within-teams.md index 21e7a8988bc30..3f62a6e49e3d1 100644 --- a/articles/lab-services/how-to-create-schedules-within-teams.md +++ b/articles/lab-services/how-to-create-schedules-within-teams.md @@ -19,7 +19,7 @@ Here's how schedules affect lab virtual machines: > [!IMPORTANT] > The scheduled run time of VMs doesn't count against the quota allotted to a user. The alloted quota is for the time outside of schedule hours that a student spends on VMs. -Users can create, edit, and delete lab schedules within Teams as in the Lab Services web portal: [https://labs.azure.com](https://labs.azure.com). For more information, see [creating and managing schedules](how-to-create-schedules-within-teams.md). +Users can create, edit, and delete lab schedules within Teams as in the Lab Services web portal: [https://labs.azure.com](https://labs.azure.com). For more information, see [creating and managing schedules](how-to-create-schedules.md). ## Automatic shutdown and disconnect settings diff --git a/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md b/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md index b58621672f705..3c32fb6dc2b4d 100644 --- a/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md +++ b/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md @@ -9,7 +9,7 @@ ms.date: 06/26/2020 Nested virtualization enables you to create a multi-VM environment inside a lab's template virtual machine. Publishing the template will provide each user in the lab with a virtual machine set up with multiple VMs within it. For more information about nested virtualization and Azure Lab Services, see [Enable nested virtualization on a template virtual machine in Azure Lab Services](how-to-enable-nested-virtualization-template-vm.md). -The steps in this article focus on setting up nested virtualization for Windows Server 2016, Windows Server 2019, or Windows 10. You will use a script to set up template machine with Hyper-V. The following steps will guide you through how to use the [Lab Services Hyper-V scripts](https://github.com/Azure/azure-devtestlab/tree/master/samples/ClassroomLabs/Scripts/HyperV). +The steps in this article focus on setting up nested virtualization for Windows Server 2016, Windows Server 2019, or Windows 10. You will use a script to set up template machine with Hyper-V. The following steps will guide you through how to use the [Lab Services Hyper-V scripts](https://github.com/Azure/LabServices/tree/main/General_Scripts/PowerShell/HyperV). >[!IMPORTANT] >Select **Large (nested virtualization)** or **Medium (nested virtualization)** for the virtual machine size when creating the lab. Nested virtualization will not work otherwise. diff --git a/articles/lab-services/how-to-use-shared-image-gallery.md b/articles/lab-services/how-to-use-shared-image-gallery.md index a4e21726f868d..5148a409978d7 100644 --- a/articles/lab-services/how-to-use-shared-image-gallery.md +++ b/articles/lab-services/how-to-use-shared-image-gallery.md @@ -62,7 +62,7 @@ An educator can pick a custom image available in the compute gallery for the tem >[!IMPORTANT] >Azure Compute Gallery images will not show if they have been disabled or if the region of the lab plan is different than the gallery images. -For more information about replicating images, see [replication in Azure Compute Gallery](/azure/virtual-machines/shared-image-galleries.md). For more information about disabling gallery images for a lab plan, see [enable and disable images](how-to-attach-detach-shared-image-gallery.md#enable-and-disable-images). +For more information about replicating images, see [replication in Azure Compute Gallery](../virtual-machines/shared-image-galleries.md). For more information about disabling gallery images for a lab plan, see [enable and disable images](how-to-attach-detach-shared-image-gallery.md#enable-and-disable-images). ### Re-save a custom image to compute gallery @@ -78,4 +78,4 @@ To learn about how to set up a compute gallery by attaching and detaching it to To explore other options for bringing custom images to compute gallery outside of the context of a lab, see [Recommended approaches for creating custom images](approaches-for-custom-image-creation.md). -For more information about compute galleries in general, see [Azure Compute Gallery overview](../virtual-machines/shared-image-galleries.md). +For more information about compute galleries in general, see [Azure Compute Gallery overview](../virtual-machines/shared-image-galleries.md). \ No newline at end of file diff --git a/articles/lab-services/lab-services-whats-new.md b/articles/lab-services/lab-services-whats-new.md index 4bc88515d81e3..1a3fd93b5ede5 100644 --- a/articles/lab-services/lab-services-whats-new.md +++ b/articles/lab-services/lab-services-whats-new.md @@ -35,7 +35,7 @@ In this release, there are a few known issues: - When using virtual network injection, use caution in making changes to the virtual network and subnet. Changes may cause the lab VMs to stop working. For example, deleting your virtual network will cause all the lab VMs to stop working. We plan to improve this experience in the future, but for now make sure to delete labs before deleting networks. - Moving lab plan and lab resources from one Azure region to another isn't supported. -- Azure Compute [resource provider must be registered](/azure/azure-resource-manager/management/resource-providers-and-types) before Azure Lab Services can [create and attach an Azure Compute Gallery resource](how-to-attach-detach-shared-image-gallery.md#create-and-attach-a-compute-gallery). +- Azure Compute [resource provider must be registered](../azure-resource-manager/management/resource-providers-and-types.md) before Azure Lab Services can [create and attach an Azure Compute Gallery resource](how-to-attach-detach-shared-image-gallery.md#create-and-attach-a-compute-gallery). ### Lab plans replace lab accounts @@ -111,11 +111,11 @@ Let's cover each step to get started with the April 2022 Update (preview) in mor 1. **Validate images**. Each of the VM sizes has been remapped to use a newer Azure VM Compute SKU. If using an [attached compute gallery](how-to-attach-detach-shared-image-gallery.md), validate images with new [Azure VM Compute SKUs](administrator-guide.md#vm-sizing). Validate that each image in the compute gallery is replicated to regions the lab plans and labs are in. 1. **Configure integrations**. Optionally, configure [integration with Canvas](lab-services-within-canvas-overview.md) including [adding the app and linking lab plans](how-to-get-started-create-lab-within-canvas.md). Alternately, configure [integration with Teams](lab-services-within-teams-overview.md) by [adding the app to Teams groups](how-to-get-started-create-lab-within-teams.md). 1. **Create labs**. Create labs to test educator and student experience in preparation for general availability of the updates. Lab administrators and educators should validate performance based on common student workloads. -1. **Update cost management reports.** Update reports to include the new cost entry type, `Microsoft.LabServices/labs`, for labs created using the April 2022 Update (preview). [Built-in and custom tags](cost-management-guide.md#understand-the-entries) allow for [grouping](/azure/cost-management-billing/costs/quick-acm-cost-analysis) in cost analysis. For more information about tracking costs, see [Cost management for Azure Lab Services](cost-management-guide.md). +1. **Update cost management reports.** Update reports to include the new cost entry type, `Microsoft.LabServices/labs`, for labs created using the April 2022 Update (preview). [Built-in and custom tags](cost-management-guide.md#understand-the-entries) allow for [grouping](../cost-management-billing/costs/quick-acm-cost-analysis.md) in cost analysis. For more information about tracking costs, see [Cost management for Azure Lab Services](cost-management-guide.md). ## Next steps - As an admin, [create a lab plan](tutorial-setup-lab-plan.md). - As an admin, [manage your lab plan](how-to-manage-lab-plans.md). - As an educator, [create a lab](tutorial-setup-lab.md). -- As a student, [access a lab](how-to-use-lab.md). +- As a student, [access a lab](how-to-use-lab.md). \ No newline at end of file diff --git a/articles/lab-services/media/classroom-labs-fundamentals-1/labservices-basic-architecture.png b/articles/lab-services/media/classroom-labs-fundamentals-1/labservices-basic-architecture.png new file mode 100644 index 0000000000000..1509eec10102f Binary files /dev/null and b/articles/lab-services/media/classroom-labs-fundamentals-1/labservices-basic-architecture.png differ diff --git a/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png b/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png index 1509eec10102f..6d1d18c65f8e0 100644 Binary files a/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png and b/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png differ diff --git a/articles/lab-services/media/how-to-create-a-lab-with-shared-resource/shared-resource-architecture.png b/articles/lab-services/media/how-to-create-a-lab-with-shared-resource/shared-resource-architecture.png index 17eca63319dd1..66200d07e4267 100644 Binary files a/articles/lab-services/media/how-to-create-a-lab-with-shared-resource/shared-resource-architecture.png and b/articles/lab-services/media/how-to-create-a-lab-with-shared-resource/shared-resource-architecture.png differ diff --git a/articles/lab-services/quick-create-lab-bicep.md b/articles/lab-services/quick-create-lab-bicep.md new file mode 100644 index 0000000000000..d73a4b49df363 --- /dev/null +++ b/articles/lab-services/quick-create-lab-bicep.md @@ -0,0 +1,92 @@ +--- +title: Azure Lab Services Quickstart - Create a lab using Bicep +description: In this quickstart, you learn how to create an Azure Lab Services lab using Bicep +ms.topic: quickstart +ms.date: 05/23/2022 +ms.custom: template-quickstart +--- + +# Quickstart: Create a lab using a Bicep file + +In this quickstart, you, as the educator, create a lab using a Bicep file. For detailed overview of Azure Lab Services, see [An introduction to Azure Lab Services](lab-services-overview.md). + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/lab/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.labservices/lab/main.bicep"::: + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters adminUsername= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -adminUsername "" + ``` + + --- + + > [!NOTE] + > Replace **\** with a unique username. You'll also be prompted to enter adminPassword. The minimum password length is 12 characters. + + When the deployment finishes, you should see a messaged indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, use the Azure portal, Azure CLI, or Azure PowerShell to delete the VM and all of the resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +In this quickstart, you deployed a simple virtual machine using a Bicep file. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. + +> [!div class="nextstepaction"] +> [Configure a template VM](how-to-create-manage-template.md) \ No newline at end of file diff --git a/articles/lab-services/quick-create-lab-plan-bicep.md b/articles/lab-services/quick-create-lab-plan-bicep.md new file mode 100644 index 0000000000000..655c664dd1bff --- /dev/null +++ b/articles/lab-services/quick-create-lab-plan-bicep.md @@ -0,0 +1,92 @@ +--- +title: Azure Lab Services Quickstart - Create a lab plan using Bicep +description: In this quickstart, you learn how to create an Azure Lab Services lab plan using Bicep +ms.topic: quickstart +ms.date: 05/23/2022 +ms.custom: template-quickstart +--- + +# Quickstart: Create a lab plan using a Bicep file + +In this quickstart, you, as the educator, create a lab plan using a Bicep file. For detailed overview of Azure Lab Services, see [An introduction to Azure Lab Services](lab-services-overview.md). + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/lab-plan/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.labservices/lab-plan/main.bicep"::: + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters adminUsername= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -adminUsername "" + ``` + + --- + + > [!NOTE] + > Replace **\** with a unique username. You'll also be prompted to enter adminPassword. The minimum password length is 12 characters. + + When the deployment finishes, you should see a messaged indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, use the Azure portal, Azure CLI, or Azure PowerShell to delete the VM and all of the resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +In this quickstart, you deployed a simple virtual machine using a Bicep file. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. + +> [!div class="nextstepaction"] +> [Managing Labs](how-to-manage-labs.md) \ No newline at end of file diff --git a/articles/lab-services/quick-create-lab-plan-powershell.md b/articles/lab-services/quick-create-lab-plan-powershell.md index f4fe8639fc73f..b319d46b5b477 100644 --- a/articles/lab-services/quick-create-lab-plan-powershell.md +++ b/articles/lab-services/quick-create-lab-plan-powershell.md @@ -52,7 +52,7 @@ New-AzRoleAssignment -SignInName ` -ResourceGroupName "MyResourceGroup" ``` -For more information about role assignments, see [Assign Azure roles using Azure PowerShell](/azure/role-based-access-control/role-assignments-powershell). +For more information about role assignments, see [Assign Azure roles using Azure PowerShell](../role-based-access-control/role-assignments-powershell.md). ## Clean up resources @@ -70,4 +70,4 @@ $plan | Remove-AzLabServicesLabPlan In this QuickStart, you created a resource group and a lab plan. As an admin, you can learn more about [Azure PowerShell module](/powershell/azure) and [Az.LabServices cmdlets](/powershell/module/az.labservices/). > [!div class="nextstepaction"] -> [Quickstart: Create a lab using PowerShell and the Azure module](quick-create-lab-powershell.md) +> [Quickstart: Create a lab using PowerShell and the Azure module](quick-create-lab-powershell.md) \ No newline at end of file diff --git a/articles/lab-services/quick-create-lab-template.md b/articles/lab-services/quick-create-lab-template.md index 9b57afaa601ab..dbe0317554f16 100644 --- a/articles/lab-services/quick-create-lab-template.md +++ b/articles/lab-services/quick-create-lab-template.md @@ -72,7 +72,7 @@ Get-AzLabServicesLab -Name $lab Write-Host "Press [ENTER] to continue..." ``` -To verify educators can use the lab, navigate to the Azure Lab Services website: [https://labs.azure.com](https://labs.azure.com). For more information about managing labs, see [View all labs](/azure/lab-services/how-to-manage-labs.md#)](how-to-manage-labs.md#view-all-labs). +To verify educators can use the lab, navigate to the Azure Lab Services website: [https://labs.azure.com](https://labs.azure.com). For more information about managing labs, see [View all labs](/azure/lab-services/how-to-manage-labs). ## Clean up resources @@ -93,4 +93,4 @@ Alternately, an educator may delete a lab from the Azure Lab Services website: [ For a step-by-step tutorial that guides you through the process of creating a template, see: > [!div class="nextstepaction"] -> [Tutorial: Create and deploy your first ARM template](/azure/azure-resource-manager/templates/template-tutorial-create-first-template) \ No newline at end of file +> [Tutorial: Create and deploy your first ARM template](../azure-resource-manager/templates/template-tutorial-create-first-template.md) diff --git a/articles/lighthouse/concepts/cross-tenant-management-experience.md b/articles/lighthouse/concepts/cross-tenant-management-experience.md index 0c21ddd356988..b014f455d35cc 100644 --- a/articles/lighthouse/concepts/cross-tenant-management-experience.md +++ b/articles/lighthouse/concepts/cross-tenant-management-experience.md @@ -93,7 +93,7 @@ Most tasks and services can be performed on delegated resources across managed t - View alerts for delegated subscriptions, with the ability to view and refresh alerts across all subscriptions - View activity log details for delegated subscriptions -- [Log analytics](../../azure-monitor/logs/service-providers.md): Query data from remote workspaces in multiple tenants (note that automation accounts used to access data from workspaces in customer tenants must be created in the same tenant) +- [Log analytics](../../azure-monitor/logs/workspace-design.md#multiple-tenant-strategies): Query data from remote workspaces in multiple tenants (note that automation accounts used to access data from workspaces in customer tenants must be created in the same tenant) - Create, view, and manage [metric alerts](../../azure-monitor/alerts/alerts-metric.md), [log alerts](../../azure-monitor/alerts/alerts-log.md), and [activity log alerts](../../azure-monitor/alerts/alerts-activity-log.md) in customer tenants - Create alerts in customer tenants that trigger automation, such as Azure Automation runbooks or Azure Functions, in the managing tenant through webhooks - Create [diagnostic settings](../..//azure-monitor/essentials/diagnostic-settings.md) in workspaces created in customer tenants, to send resource logs to workspaces in the managing tenant diff --git a/articles/lighthouse/how-to/monitor-at-scale.md b/articles/lighthouse/how-to/monitor-at-scale.md index 92edb6d8acf0b..855d6df57b1a5 100644 --- a/articles/lighthouse/how-to/monitor-at-scale.md +++ b/articles/lighthouse/how-to/monitor-at-scale.md @@ -13,7 +13,7 @@ As a service provider, you may have onboarded multiple customer tenants to [Azur This topic shows you how to use [Azure Monitor Logs](../../azure-monitor/logs/data-platform-logs.md) in a scalable way across the customer tenants you're managing. Though we refer to service providers and customers in this topic, this guidance also applies to [enterprises using Azure Lighthouse to manage multiple tenants](../concepts/enterprise.md). > [!NOTE] -> Be sure that users in your managing tenants have been granted the [necessary roles for managing Log Analytics workspaces](../../azure-monitor/logs/manage-access.md#manage-access-using-azure-permissions) on your delegated customer subscriptions. +> Be sure that users in your managing tenants have been granted the [necessary roles for managing Log Analytics workspaces](../../azure-monitor/logs/manage-access.md#azure-rbac) on your delegated customer subscriptions. ## Create Log Analytics workspaces diff --git a/articles/load-balancer/backend-pool-management.md b/articles/load-balancer/backend-pool-management.md index ca4dda7b1a4ce..9192e7b2b2540 100644 --- a/articles/load-balancer/backend-pool-management.md +++ b/articles/load-balancer/backend-pool-management.md @@ -3,11 +3,11 @@ title: Backend Pool Management titleSuffix: Azure Load Balancer description: Get started learning how to configure and manage the backend pool of an Azure Load Balancer services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 2/17/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell, devx-track-azurecli --- # Backend pool management diff --git a/articles/load-balancer/basic/index.yml b/articles/load-balancer/basic/index.yml index 10f35e902f31a..b6fad88b82c4b 100644 --- a/articles/load-balancer/basic/index.yml +++ b/articles/load-balancer/basic/index.yml @@ -10,8 +10,8 @@ metadata: ms.service: load-balancer ms.topic: landing-page ms.collection: collection - author: asudbring - ms.author: allensu + author: greg-lindsay + ms.author: greglin ms.date: 03/15/2022 #Required; mm/dd/yyyy format. # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new diff --git a/articles/load-balancer/basic/overview.md b/articles/load-balancer/basic/overview.md index 307c9b898c36e..e7681115c05e2 100644 --- a/articles/load-balancer/basic/overview.md +++ b/articles/load-balancer/basic/overview.md @@ -1,9 +1,9 @@ --- title: What is Basic Azure Load Balancer? description: Overview of Basic Azure Load Balancer. -author: asudbring +author: greg-lindsay ms.service: load-balancer -ms.author: allensu +ms.author: greglin ms.topic: overview ms.date: 04/14/2022 ms.custom: template-overview diff --git a/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-cli.md b/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-cli.md index 82f6a52b61fde..c372a4e8c8b35 100644 --- a/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-cli.md +++ b/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-cli.md @@ -2,11 +2,11 @@ title: 'Quickstart: Create an internal basic load balancer - Azure CLI' titleSuffix: Azure Load Balancer description: This quickstart shows how to create an internal basic load balancer by using the Azure CLI. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/24/2022 -ms.author: allensu +ms.author: greglin ms.custom: mvc, devx-track-js, devx-track-azurecli, mode-api #Customer intent: I want to create a load balancer so that I can load balance internal traffic to VMs. --- @@ -24,7 +24,7 @@ This quickstart requires version 2.0.28 or later of the Azure CLI. If you're usi An Azure resource group is a logical container into which you deploy and manage your Azure resources. -Create a resource group with [az group create](/cli/azure/group#az_group_create). +Create a resource group with [az group create](/cli/azure/group#az-group-create). ```azurecli az group create \ @@ -39,7 +39,7 @@ When you create an internal load balancer, a virtual network is configured as th Before you deploy VMs and test your load balancer, create the supporting virtual network and subnet. The virtual network and subnet will contain the resources deployed later in this article. -Create a virtual network by using [az network vnet create](/cli/azure/network/vnet#az_network_vnet_create). +Create a virtual network by using [az network vnet create](/cli/azure/network/vnet#az-network-vnet-create). ```azurecli az network vnet create \ @@ -58,7 +58,7 @@ In this example, you'll create an Azure Bastion host. The Azure Bastion host is ### Create a bastion public IP address -Use [az network public-ip create](/cli/azure/network/public-ip#az_network_public_ip_create) to create a public IP address for the Azure Bastion host. +Use [az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create) to create a public IP address for the Azure Bastion host. ```azurecli az network public-ip create \ @@ -69,7 +69,7 @@ az network public-ip create \ ``` ### Create a bastion subnet -Use [az network vnet subnet create](/cli/azure/network/vnet/subnet#az_network_vnet_subnet_create) to create a subnet. +Use [az network vnet subnet create](/cli/azure/network/vnet/subnet#az-network-vnet-subnet-create) to create a subnet. ```azurecli az network vnet subnet create \ @@ -81,7 +81,7 @@ az network vnet subnet create \ ### Create the bastion host -Use [az network bastion create](/cli/azure/network/bastion#az_network_bastion_create) to create a host. +Use [az network bastion create](/cli/azure/network/bastion#az-network-bastion-create) to create a host. ```azurecli az network bastion create \ @@ -108,7 +108,7 @@ This section details how you can create and configure the following components o ### Create the load balancer resource -Create an internal load balancer with [az network lb create](/cli/azure/network/lb#az_network_lb_create). +Create an internal load balancer with [az network lb create](/cli/azure/network/lb#az-network-lb-create). ```azurecli az network lb create \ @@ -127,7 +127,7 @@ A health probe checks all virtual machine instances to ensure they can send netw A virtual machine with a failed probe check is removed from the load balancer. The virtual machine is added back into the load balancer when the failure is resolved. -Create a health probe with [az network lb probe create](/cli/azure/network/lb/probe#az_network_lb_probe_create). +Create a health probe with [az network lb probe create](/cli/azure/network/lb/probe#az-network-lb-probe-create). ```azurecli az network lb probe create \ @@ -148,7 +148,7 @@ A load balancer rule defines: * The required source and destination port -Create a load balancer rule with [az network lb rule create](/cli/azure/network/lb/rule#az_network_lb_rule_create). +Create a load balancer rule with [az network lb rule create](/cli/azure/network/lb/rule#az-network-lb-rule-create). ```azurecli az network lb rule create \ @@ -168,7 +168,7 @@ Create a load balancer rule with [az network lb rule create](/cli/azure/network/ For a standard load balancer, the VMs in the backend pool are required to have network interfaces that belong to a network security group. -To create a network security group, use [az network nsg create](/cli/azure/network/nsg#az_network_nsg_create). +To create a network security group, use [az network nsg create](/cli/azure/network/nsg#az-network-nsg-create). ```azurecli az network nsg create \ @@ -178,7 +178,7 @@ To create a network security group, use [az network nsg create](/cli/azure/netwo ## Create a network security group rule -To create a network security group rule, use [az network nsg rule create](/cli/azure/network/nsg/rule#az_network_nsg_rule_create). +To create a network security group rule, use [az network nsg rule create](/cli/azure/network/nsg/rule#az-network-nsg-rule-create). ```azurecli az network nsg rule create \ @@ -205,7 +205,7 @@ In this section, you create: ### Create network interfaces for the virtual machines -Create two network interfaces with [az network nic create](/cli/azure/network/nic#az_network_nic_create). +Create two network interfaces with [az network nic create](/cli/azure/network/nic#az-network-nic-create). ```azurecli array=(myNicVM1 myNicVM2) @@ -222,7 +222,7 @@ Create two network interfaces with [az network nic create](/cli/azure/network/ni ### Create the availability set for the virtual machines -Create the availability set with [az vm availability-set create](/cli/azure/vm/availability-set#az_vm_availability_set_create). +Create the availability set with [az vm availability-set create](/cli/azure/vm/availability-set#az-vm-availability-set-create). ```azurecli az vm availability-set create \ @@ -233,7 +233,7 @@ Create the availability set with [az vm availability-set create](/cli/azure/vm/a ### Create the virtual machines -Create the virtual machines with [az vm create](/cli/azure/vm#az_vm_create). +Create the virtual machines with [az vm create](/cli/azure/vm#az-vm-create). ```azurecli array=(1 2) @@ -256,7 +256,7 @@ It can take a few minutes for the VMs to deploy. ## Add virtual machines to the backend pool -Add the virtual machines to the backend pool with [az network nic ip-config address-pool add](/cli/azure/network/nic/ip-config/address-pool#az_network_nic_ip_config_address_pool_add). +Add the virtual machines to the backend pool with [az network nic ip-config address-pool add](/cli/azure/network/nic/ip-config/address-pool#az-network-nic-ip-config-address-pool-add). ```azurecli array=(VM1 VM2) @@ -274,7 +274,7 @@ Add the virtual machines to the backend pool with [az network nic ip-config addr ## Create test virtual machine -Create the network interface with [az network nic create](/cli/azure/network/nic#az_network_nic_create). +Create the network interface with [az network nic create](/cli/azure/network/nic#az-network-nic-create). ```azurecli az network nic create \ @@ -284,7 +284,7 @@ Create the network interface with [az network nic create](/cli/azure/network/nic --subnet myBackEndSubnet \ --network-security-group myNSG ``` -Create the virtual machine with [az vm create](/cli/azure/vm#az_vm_create). +Create the virtual machine with [az vm create](/cli/azure/vm#az-vm-create). ```azurecli az vm create \ @@ -299,7 +299,7 @@ You might need to wait a few minutes for the virtual machine to deploy. ## Install IIS -Use [az vm extension set](/cli/azure/vm/extension#az_vm_extension_set) to install IIS on the backend virtual machines and set the default website to the computer name. +Use [az vm extension set](/cli/azure/vm/extension#az-vm-extension-set) to install IIS on the backend virtual machines and set the default website to the computer name. ```azurecli array=(myVM1 myVM2) diff --git a/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-portal.md b/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-portal.md index 367161bb324eb..d2ec65f2b98f0 100644 --- a/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-portal.md +++ b/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-portal.md @@ -3,11 +3,11 @@ title: "Quickstart: Create a basic internal load balancer - Azure portal" titleSuffix: Azure Load Balancer description: This quickstart shows how to create a basic internal load balancer by using the Azure portal. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/21/2022 -ms.author: allensu +ms.author: greglin ms.custom: mvc, mode-ui #Customer intent: I want to create a internal load balancer so that I can load balance internal traffic to VMs. --- diff --git a/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-powershell.md b/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-powershell.md index 0eb34d89c93ba..10ea276951b01 100644 --- a/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-powershell.md +++ b/articles/load-balancer/basic/quickstart-basic-internal-load-balancer-powershell.md @@ -2,11 +2,11 @@ title: 'Quickstart: Create an internal basic load balancer - Azure PowerShell' titleSuffix: Azure Load Balancer description: This quickstart shows how to create an internal basic load balancer using Azure PowerShell -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/24/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell, mode-api #Customer intent: I want to create a load balancer so that I can load balance internal traffic to VMs. --- diff --git a/articles/load-balancer/basic/quickstart-basic-public-load-balancer-cli.md b/articles/load-balancer/basic/quickstart-basic-public-load-balancer-cli.md index aef74308bd794..6b1fe44a42acb 100644 --- a/articles/load-balancer/basic/quickstart-basic-public-load-balancer-cli.md +++ b/articles/load-balancer/basic/quickstart-basic-public-load-balancer-cli.md @@ -2,8 +2,8 @@ title: 'Quickstart: Create a basic public load balancer - Azure CLI' titleSuffix: Azure Load Balancer description: Learn how to create a public basic SKU Azure Load Balancer in this quickstart using the Azure CLI. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: quickstart ms.date: 03/16/2022 @@ -166,11 +166,11 @@ Create a network security group rule using [az network nsg rule create](/cli/azu ## Create a bastion host -In this section, you'll create te resources for Azure Bastion. Azure Bastion is used to securely manage the virtual machines in the backend pool of the load balancer. +In this section, you'll create the resources for Azure Bastion. Azure Bastion is used to securely manage the virtual machines in the backend pool of the load balancer. ### Create a public IP address -Use [az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create) to create a public ip address for the bastion host. The public IP is used by the bastion host for secure access to the virtual machine resources. +Use [az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create) to create a public IP address for the bastion host. The public IP is used by the bastion host for secure access to the virtual machine resources. ```azurecli az network public-ip create \ diff --git a/articles/load-balancer/basic/quickstart-basic-public-load-balancer-portal.md b/articles/load-balancer/basic/quickstart-basic-public-load-balancer-portal.md index 680787a62655f..8dea4a837c9e1 100644 --- a/articles/load-balancer/basic/quickstart-basic-public-load-balancer-portal.md +++ b/articles/load-balancer/basic/quickstart-basic-public-load-balancer-portal.md @@ -2,8 +2,8 @@ title: 'Quickstart: Create a basic public load balancer - Azure portal' titleSuffix: Azure Load Balancer description: Learn how to create a public basic SKU Azure Load Balancer in this quickstart. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: quickstart ms.date: 03/15/2022 diff --git a/articles/load-balancer/basic/quickstart-basic-public-load-balancer-powershell.md b/articles/load-balancer/basic/quickstart-basic-public-load-balancer-powershell.md index af524d197a8d0..73864f1cdf545 100644 --- a/articles/load-balancer/basic/quickstart-basic-public-load-balancer-powershell.md +++ b/articles/load-balancer/basic/quickstart-basic-public-load-balancer-powershell.md @@ -2,8 +2,8 @@ title: 'Quickstart: Create a basic internal load balancer - Azure PowerShell' titleSuffix: Azure Load Balancer description: This quickstart shows how to create a basic internal load balancer using Azure PowerShell -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.date: 03/22/2022 ms.topic: quickstart ms.service: load-balancer diff --git a/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-cli.md b/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-cli.md index 1a64033fdda0d..3c337fc9154ba 100644 --- a/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-cli.md +++ b/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-cli.md @@ -2,11 +2,11 @@ title: Deploy IPv6 dual stack application - Basic Load Balancer - CLI titlesuffix: Azure Virtual Network description: Learn how to deploy a dual stack (IPv4 + IPv6) application with Basic Load Balancer using Azure CLI. -author: asudbring +author: greg-lindsay ms.service: virtual-network ms.topic: how-to ms.date: 03/31/2022 -ms.author: allensu +ms.author: greglin --- # Deploy an IPv6 dual stack application using Basic Load Balancer - CLI diff --git a/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-powershell.md b/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-powershell.md index c546ea84d0d0c..dfe8c798221e4 100644 --- a/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-powershell.md +++ b/articles/load-balancer/basic/virtual-network-ipv4-ipv6-dual-stack-powershell.md @@ -2,11 +2,11 @@ title: Deploy IPv6 dual stack application - Basic Load Balancer - PowerShell titlesuffix: Azure Virtual Network description: This article shows how deploy an IPv6 dual stack application in Azure virtual network using Azure PowerShell. -author: asudbring +author: greg-lindsay ms.service: virtual-network ms.topic: how-to ms.date: 03/31/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell --- diff --git a/articles/load-balancer/cli-samples.md b/articles/load-balancer/cli-samples.md index 2de719223b7b5..7348db104c19e 100644 --- a/articles/load-balancer/cli-samples.md +++ b/articles/load-balancer/cli-samples.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: Azure CLI Samples services: load-balancer documentationcenter: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: article ms.custom: seodec18, devx-track-azurecli ms.tgt_pltfrm: ms.workload: infrastructure ms.date: 06/14/2018 -ms.author: allensu +ms.author: greglin --- # Azure CLI Samples for Load Balancer diff --git a/articles/load-balancer/components.md b/articles/load-balancer/components.md index b1625e20d72da..76e594a5065c4 100644 --- a/articles/load-balancer/components.md +++ b/articles/load-balancer/components.md @@ -3,13 +3,13 @@ title: Azure Load Balancer components description: Overview of Azure Load Balancer components services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 12/27/2021 -ms.author: allensu +ms.author: greglin --- # Azure Load Balancer components diff --git a/articles/load-balancer/concepts.md b/articles/load-balancer/concepts.md index cc2c73a5c3434..8a835f65bf421 100644 --- a/articles/load-balancer/concepts.md +++ b/articles/load-balancer/concepts.md @@ -3,13 +3,13 @@ title: Azure Load Balancer concepts description: Overview of Azure Load Balancer concepts services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 11/29/2021 -ms.author: allensu +ms.author: greglin --- diff --git a/articles/load-balancer/configure-vm-scale-set-cli.md b/articles/load-balancer/configure-vm-scale-set-cli.md index ad52c17481db1..82fdf8391a50e 100644 --- a/articles/load-balancer/configure-vm-scale-set-cli.md +++ b/articles/load-balancer/configure-vm-scale-set-cli.md @@ -1,8 +1,8 @@ --- title: Configure virtual machine scale set with an existing Azure Load Balancer - Azure CLI description: Learn how to configure a virtual machine scale set with an existing Azure Load Balancer by using the Azure CLI. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.date: 03/25/2020 diff --git a/articles/load-balancer/configure-vm-scale-set-portal.md b/articles/load-balancer/configure-vm-scale-set-portal.md index 11e06c3aa38c5..729a0e8eff058 100644 --- a/articles/load-balancer/configure-vm-scale-set-portal.md +++ b/articles/load-balancer/configure-vm-scale-set-portal.md @@ -1,8 +1,8 @@ --- title: Configure virtual machine scale set with an existing Azure Load Balancer - Azure portal description: Learn how to configure a virtual machine scale set with an existing Azure Load Balancer by using the Azure portal. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.date: 03/25/2020 diff --git a/articles/load-balancer/configure-vm-scale-set-powershell.md b/articles/load-balancer/configure-vm-scale-set-powershell.md index 6fc7e10e9ffe6..7c9899b306377 100644 --- a/articles/load-balancer/configure-vm-scale-set-powershell.md +++ b/articles/load-balancer/configure-vm-scale-set-powershell.md @@ -1,8 +1,8 @@ --- title: Configure virtual machine scale set with an existing Azure Load Balancer - Azure PowerShell description: Learn how to configure a virtual machine scale set with an existing Azure Load Balancer. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.date: 03/26/2020 diff --git a/articles/load-balancer/cross-region-overview.md b/articles/load-balancer/cross-region-overview.md index 33d4a1ddfb3b3..3b7bff4fec219 100644 --- a/articles/load-balancer/cross-region-overview.md +++ b/articles/load-balancer/cross-region-overview.md @@ -4,13 +4,13 @@ titleSuffix: Azure Load Balancer description: Overview of cross region load balancer tier for Azure Load Balancer. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 09/22/2020 -ms.author: allensu +ms.author: greglin ms.custom: references_regions --- diff --git a/articles/load-balancer/distribution-mode-concepts.md b/articles/load-balancer/distribution-mode-concepts.md index e9273c9009141..9c17b629bfbf0 100644 --- a/articles/load-balancer/distribution-mode-concepts.md +++ b/articles/load-balancer/distribution-mode-concepts.md @@ -1,11 +1,11 @@ --- title: Azure Load Balancer distribution modes description: Get started learning about the different distribution modes of Azure Load Balancer. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: article -ms.date: 12/27/2021 +ms.date: 05/24/2022 ms.custom: template-concept #Customer intent: As a administrator, I want to learn about the different distribution modes of Azure Load Balancer so that I can configure the distribution mode for my application. --- @@ -21,13 +21,13 @@ Azure Load Balancer supports the following distribution modes for routing connec | Azure portal configuration | Session persistence: **None** | Session persistence: **Client IP** | Session persistence: **Client IP and protocol** | | [REST API](/rest/api/load-balancer/load-balancers/create-or-update#loaddistribution) | ```"loadDistribution":"Default"```| ```"loadDistribution":SourceIP``` | ```"loadDistribution":SourceIPProtocol``` | -There is no downtime when switching from one distribution mode to another on a Load Balancer. +There's no downtime when switching from one distribution mode to another on a load balancer. ## Hash based Azure Load Balancer uses a five tuple hash based distribution mode by default. -The five tuple is consists of: +The five tuple consists of: * **Source IP** * **Source port** * **Destination IP** diff --git a/articles/load-balancer/egress-only.md b/articles/load-balancer/egress-only.md index 8745b4b6cb8d8..2d5cca6316972 100644 --- a/articles/load-balancer/egress-only.md +++ b/articles/load-balancer/egress-only.md @@ -2,12 +2,12 @@ title: Outbound-only load balancer configuration titleSuffix: Azure Load Balancer description: In this article, learn about how to create an internal load balancer with outbound NAT -author: asudbring +author: greg-lindsay ms.custom: seodec18 ms.service: load-balancer ms.topic: how-to ms.date: 08/21/2021 -ms.author: allensu +ms.author: greglin --- # Outbound-only load balancer configuration diff --git a/articles/load-balancer/gateway-overview.md b/articles/load-balancer/gateway-overview.md index b68cf23a2be1c..d88f540110c38 100644 --- a/articles/load-balancer/gateway-overview.md +++ b/articles/load-balancer/gateway-overview.md @@ -3,8 +3,8 @@ title: Gateway load balancer (Preview) titleSuffix: Azure Load Balancer description: Overview of gateway load balancer SKU for Azure Load Balancer. ms.service: load-balancer -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.date: 12/28/2021 ms.topic: conceptual ms.custom: ignite-fall-2021 diff --git a/articles/load-balancer/gateway-partners.md b/articles/load-balancer/gateway-partners.md index e8c5bc0c94b98..aaccfb7057214 100644 --- a/articles/load-balancer/gateway-partners.md +++ b/articles/load-balancer/gateway-partners.md @@ -1,11 +1,11 @@ --- title: Azure Gateway Load Balancer partners description: Learn about partners offering their network appliances for use with this service. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: article ms.date: 05/11/2022 -ms.author: allensu +ms.author: greglin ms.custom: --- # Gateway Load Balancer partners diff --git a/articles/load-balancer/howto-load-balancer-imds.md b/articles/load-balancer/howto-load-balancer-imds.md index 24cb43c0b1ffb..9128b7d2c6020 100644 --- a/articles/load-balancer/howto-load-balancer-imds.md +++ b/articles/load-balancer/howto-load-balancer-imds.md @@ -3,11 +3,11 @@ title: Retrieve load balancer metadata using Azure Instance Metadata Service (IM titleSuffix: Azure Load Balancer description: Get started learning how to retrieve load balancer metadata using Azure Instance Metadata Service. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 02/12/2021 -ms.author: allensu +ms.author: greglin --- # Retrieve load balancer metadata using Azure Instance Metadata Service (IMDS) diff --git a/articles/load-balancer/inbound-nat-rules.md b/articles/load-balancer/inbound-nat-rules.md index 83f8eec1731ab..5da7792da60fc 100644 --- a/articles/load-balancer/inbound-nat-rules.md +++ b/articles/load-balancer/inbound-nat-rules.md @@ -2,12 +2,12 @@ title: Inbound NAT rules titleSuffix: Azure Load Balancer description: Overview of what is inbound NAT rule, why to use inbound NAT rule, and how to use inbound NAT rule. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.custom: ms.date: 2/17/2022 -ms.author: allensu +ms.author: greglin #Customer intent: As a administrator, I want to create an inbound NAT rule so that I can forward a port to a virtual machine in the backend pool of an Azure Load Balancer. --- diff --git a/articles/load-balancer/index.yml b/articles/load-balancer/index.yml index 14037b5164ff7..cbad9ef437b90 100644 --- a/articles/load-balancer/index.yml +++ b/articles/load-balancer/index.yml @@ -10,8 +10,8 @@ metadata: ms.service: load-balancer ms.topic: landing-page ms.collection: collection - author: asudbring - ms.author: allensu + author: greg-lindsay + ms.author: greglin ms.date: 05/07/2020 #Required; mm/dd/yyyy format. # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new diff --git a/articles/load-balancer/instance-metadata-service-load-balancer.md b/articles/load-balancer/instance-metadata-service-load-balancer.md index 736ad1f921887..d39840efc4800 100644 --- a/articles/load-balancer/instance-metadata-service-load-balancer.md +++ b/articles/load-balancer/instance-metadata-service-load-balancer.md @@ -3,11 +3,11 @@ title: Retrieve load balancer information by using Azure Instance Metadata Servi titleSuffix: Azure Load Balancer description: Get started learning about using Azure Instance Metadata Service to retrieve load balancer information. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.date: 02/12/2021 -ms.author: allensu +ms.author: greglin --- # Retrieve load balancer information by using Azure Instance Metadata Service diff --git a/articles/load-balancer/load-balancer-custom-probe-overview.md b/articles/load-balancer/load-balancer-custom-probe-overview.md index 4de6a1fe2a852..9c6fbf838e1bc 100644 --- a/articles/load-balancer/load-balancer-custom-probe-overview.md +++ b/articles/load-balancer/load-balancer-custom-probe-overview.md @@ -1,11 +1,11 @@ --- title: Azure Load Balancer health probes description: Learn about the different types of health probes and configuration for Azure Load Balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.date: 02/10/2022 -ms.author: allensu +ms.author: greglin --- # Azure Load Balancer health probes @@ -206,5 +206,6 @@ Azure Monitor logs aren't available for both public and internal Basic Load Bala ## Next steps - Learn more about [Standard Load Balancer](./load-balancer-overview.md) +- Learn [how to manage health probes](../load-balancer/manage-probes-how-to.md) - [Get started creating a public load balancer in Resource Manager by using PowerShell](quickstart-load-balancer-standard-public-powershell.md) - [REST API for health probes](/rest/api/load-balancer/loadbalancerprobes/) diff --git a/articles/load-balancer/load-balancer-distribution-mode.md b/articles/load-balancer/load-balancer-distribution-mode.md index 2ef151f4a4574..c3fdbc3c5b18b 100644 --- a/articles/load-balancer/load-balancer-distribution-mode.md +++ b/articles/load-balancer/load-balancer-distribution-mode.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: In this article, get started configuring the distribution mode for Azure Load Balancer to support source IP affinity. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.custom: seodec18, devx-track-azurecli, devx-track-azurepowershell ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 02/04/2021 -ms.author: allensu +ms.author: greglin --- # Configure the distribution mode for Azure Load Balancer diff --git a/articles/load-balancer/load-balancer-faqs.yml b/articles/load-balancer/load-balancer-faqs.yml index 4fee26cc59637..da94efafd0262 100644 --- a/articles/load-balancer/load-balancer-faqs.yml +++ b/articles/load-balancer/load-balancer-faqs.yml @@ -77,7 +77,7 @@ sections: - question: | Does Azure Load Balancer support TLS/SSL termination? answer: | - No, Azure Load Balancer doesn't currently support termination as it is a pass through network load balancer. Application Gateway could be a potential solution if your application requires this. + No, Azure Load Balancer doesn't currently support termination as it is a pass through network load balancer. [Application Gateway](../application-gateway/ssl-overview.md) could be a potential solution if your application requires this. - question: | How do I configure my Load Balancer with an Azure Firewall? @@ -92,7 +92,7 @@ sections: - question: | What are best practices with respect to outbound connectivity? answer: | - Standard Load Balancer and Standard Public IP introduces abilities and different behaviors to outbound connectivity. They are not the same as Basic SKUs. If you want outbound connectivity when working with Standard SKUs, you must explicitly define it either with Standard Public IP addresses or Standard public Load Balancer. This includes creating outbound connectivity when using an internal Standard Load Balancer. We recommend you always use outbound rules on a Standard public Load Balancer. That means when an internal Standard Load Balancer is used, you need to take steps to create outbound connectivity for the VMs in the backend pool if outbound connectivity is desired. In the context of outbound connectivity,a single standalone VM, all the VM's in an Availability Set, all the instances in a virtual machine scale set behave as a group. This means, if a single VM in an Availability Set is associated with a Standard SKU, all VM instances within this Availability Set now behave by the same rules as if they are associated with Standard SKU, even if an individual instance is not directly associated with it. This behavior is also observed in the case of a standalone VM with multiple network interface cards attached to a load balancer. If one NIC is added as a standalone, it will have the same behavior. Carefully review this entire document to understand the overall concepts, review [Standard Load Balancer](./load-balancer-overview.md) for differences between SKUs, and review [outbound rules](load-balancer-outbound-connections.md#outboundrules). + Standard Load Balancer and Standard Public IP introduces abilities and different behaviors to outbound connectivity. They are not the same as Basic SKUs. If you want outbound connectivity when working with Standard SKUs, you must explicitly define it either with Standard Public IP addresses or Standard public Load Balancer. This includes creating outbound connectivity when using an internal Standard Load Balancer. We recommend you always use outbound rules on a Standard public Load Balancer. That means when an internal Standard Load Balancer is used, you need to take steps to create outbound connectivity for the VMs in the backend pool if outbound connectivity is desired. In the context of outbound connectivity, a single standalone VM, all the VM's in an Availability Set, all the instances in a virtual machine scale set behave as a group. This means, if a single VM in an Availability Set is associated with a Standard SKU, all VM instances within this Availability Set now behave by the same rules as if they are associated with Standard SKU, even if an individual instance is not directly associated with it. This behavior is also observed in the case of a standalone VM with multiple network interface cards attached to a load balancer. If one NIC is added as a standalone, it will have the same behavior. Carefully review this entire document to understand the overall concepts, review [Standard Load Balancer](./load-balancer-overview.md) for differences between SKUs, and review [outbound rules](load-balancer-outbound-connections.md#outboundrules). Using outbound rules allows you fine grained control over all aspects of outbound connectivity. - question: | diff --git a/articles/load-balancer/load-balancer-floating-ip.md b/articles/load-balancer/load-balancer-floating-ip.md index a6aa1e8912f99..579c8b85774e6 100644 --- a/articles/load-balancer/load-balancer-floating-ip.md +++ b/articles/load-balancer/load-balancer-floating-ip.md @@ -3,13 +3,13 @@ title: Azure Load Balancer Floating IP configuration description: Overview of Azure Load Balancer Floating IP services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 12/2/2021 -ms.author: allensu +ms.author: greglin --- @@ -28,9 +28,9 @@ If you want to reuse the backend port across multiple rules, you must enable Flo When Floating IP is enabled, Azure changes the IP address mapping to the Frontend IP address of the Load Balancer frontend instead of backend instance's IP. -Without Floating IP, Azure exposes the VM instances' IP. Enabling Floating IP changes the IP address mapping to the Frontend IP of the load Balancer to allow for additional flexibility. Learn more [here](load-balancer-multivip-overview.md). +Without Floating IP, Azure exposes the VM instances' IP. Enabling Floating IP changes the IP address mapping to the Frontend IP of the load Balancer to allow for more flexibility. Learn more [here](load-balancer-multivip-overview.md). -Floating IP can be configured on a Load Balancer rule via the Azure portal, REST API, CLI, PowerShell, or other client. In addition to the rule configuration, you must also configure your virtual machine's Guest OS in order to leverage Floating IP. +Floating IP can be configured on a Load Balancer rule via the Azure portal, REST API, CLI, PowerShell, or other client. In addition to the rule configuration, you must also configure your virtual machine's Guest OS in order to use Floating IP. ## Floating IP Guest OS configuration For each VM in the backend pool, run the following commands at a Windows Command Prompt. @@ -61,14 +61,14 @@ netsh interface ipv4 set interface “interfacename” weakhostreceive=enabled netsh interface ipv4 set interface “interfacename” weakhostsend=enabled ``` -(replace interfacename with the name of this loopback interface) +(replace **interfacename** with the name of this loopback interface) > [!IMPORTANT] > The configuration of the loopback interfaces is performed within the guest OS. This configuration is not performed or managed by Azure. Without this configuration, the rules will not function. ## Limitations -- Floating IP is not currently supported on secondary IP configurations for Load Balancing scenarios. Note that this does not apply to Public load balancers with dual-stack configurations or to architectures that utilize a NAT Gateway for outbound connectivity. +- Floating IP is not currently supported on secondary IP configurations for Load Balancing scenarios. This does not apply to Public load balancers with dual-stack configurations or to architectures that utilize a NAT Gateway for outbound connectivity. ## Next steps diff --git a/articles/load-balancer/load-balancer-ha-ports-overview.md b/articles/load-balancer/load-balancer-ha-ports-overview.md index e454afbfac71b..3a1b81df4bc26 100644 --- a/articles/load-balancer/load-balancer-ha-ports-overview.md +++ b/articles/load-balancer/load-balancer-ha-ports-overview.md @@ -2,14 +2,14 @@ title: High availability ports overview in Azure titleSuffix: Azure Load Balancer description: Learn about high availability ports load balancing on an internal load balancer. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: article ms.custom: seodec18 ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 04/14/2022 -ms.author: allensu +ms.author: greglin --- # High availability ports overview diff --git a/articles/load-balancer/load-balancer-ipv6-for-linux.md b/articles/load-balancer/load-balancer-ipv6-for-linux.md index b72c9d193ade0..0d348090c6dde 100644 --- a/articles/load-balancer/load-balancer-ipv6-for-linux.md +++ b/articles/load-balancer/load-balancer-ipv6-for-linux.md @@ -4,7 +4,7 @@ titleSuffix: Azure Load Balancer description: In this article, learn how to configure DHCPv6 for Linux VMs. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay keywords: ipv6, azure load balancer, dual stack, public ip, native ipv6, mobile, iot ms.service: load-balancer ms.topic: article @@ -12,7 +12,7 @@ ms.custom: seodec18 ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 03/22/2019 -ms.author: allensu +ms.author: greglin --- # Configure DHCPv6 for Linux VMs diff --git a/articles/load-balancer/load-balancer-ipv6-internet-cli.md b/articles/load-balancer/load-balancer-ipv6-internet-cli.md index 7c3a25284f54d..b35a6a145f556 100644 --- a/articles/load-balancer/load-balancer-ipv6-internet-cli.md +++ b/articles/load-balancer/load-balancer-ipv6-internet-cli.md @@ -4,7 +4,7 @@ titleSuffix: Azure Load Balancer description: With this learning path, get started creating a public load balancer with IPv6 using Azure CLI. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay keywords: ipv6, azure load balancer, dual stack, public ip, native ipv6, mobile, iot ms.service: load-balancer ms.topic: how-to @@ -12,7 +12,7 @@ ms.custom: seodec18, devx-track-azurecli ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 06/25/2018 -ms.author: allensu +ms.author: greglin --- # Create a public load balancer with IPv6 using Azure CLI diff --git a/articles/load-balancer/load-balancer-ipv6-internet-ps.md b/articles/load-balancer/load-balancer-ipv6-internet-ps.md index a3d8a39c9f03d..edc47077d2533 100644 --- a/articles/load-balancer/load-balancer-ipv6-internet-ps.md +++ b/articles/load-balancer/load-balancer-ipv6-internet-ps.md @@ -4,7 +4,7 @@ titleSuffix: Azure Load Balancer description: Learn how to create an Internet facing load balancer with IPv6 using PowerShell for Resource Manager services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay keywords: ipv6, azure load balancer, dual stack, public ip, native ipv6, mobile, iot ms.service: load-balancer ms.custom: seodec18 @@ -12,7 +12,7 @@ ms.topic: how-to ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 09/25/2017 -ms.author: allensu +ms.author: greglin --- # Get started creating an Internet facing load balancer with IPv6 using PowerShell for Resource Manager diff --git a/articles/load-balancer/load-balancer-ipv6-internet-template.md b/articles/load-balancer/load-balancer-ipv6-internet-template.md index efb779d9d57fc..85a745492ea7b 100644 --- a/articles/load-balancer/load-balancer-ipv6-internet-template.md +++ b/articles/load-balancer/load-balancer-ipv6-internet-template.md @@ -4,7 +4,7 @@ titleSuffix: Azure Load Balancer description: Learn how to deploy IPv6 support for Azure Load Balancer and load-balanced VMs using an Azure template. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay keywords: ipv6, azure load balancer, dual stack, public ip, native ipv6, mobile, iot ms.service: load-balancer ms.topic: how-to @@ -12,7 +12,7 @@ ms.custom: seodec18 ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 09/25/2017 -ms.author: allensu +ms.author: greglin --- # Deploy an Internet-facing load-balancer solution with IPv6 using a template diff --git a/articles/load-balancer/load-balancer-ipv6-overview.md b/articles/load-balancer/load-balancer-ipv6-overview.md index 2eb09332e8275..a4803e9bd99fb 100644 --- a/articles/load-balancer/load-balancer-ipv6-overview.md +++ b/articles/load-balancer/load-balancer-ipv6-overview.md @@ -3,7 +3,7 @@ title: Overview of IPv6 - Azure Load Balancer description: With this learning path, get started with IPv6 support for Azure Load Balancer and load-balanced VMs. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay keywords: ipv6, azure load balancer, dual stack, public ip, native ipv6, mobile, iot ms.service: load-balancer ms.topic: article @@ -11,7 +11,7 @@ ms.custom: seodec18 ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 08/24/2018 -ms.author: allensu +ms.author: greglin --- # Overview of IPv6 for Azure Load Balancer diff --git a/articles/load-balancer/load-balancer-multiple-ip-cli.md b/articles/load-balancer/load-balancer-multiple-ip-cli.md index a6d5f2e377933..c74dfe4fa6918 100644 --- a/articles/load-balancer/load-balancer-multiple-ip-cli.md +++ b/articles/load-balancer/load-balancer-multiple-ip-cli.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: Learn how to assign multiple IP addresses to a virtual machine using Azure CLI. services: virtual-network documentationcenter: na -author: asudbring +author: greg-lindsay ms.custom: seodec18, devx-track-azurecli ms.service: load-balancer ms.topic: how-to ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 06/25/2018 -ms.author: allensu +ms.author: greglin --- # Load balancing on multiple IP configurations using Azure CLI diff --git a/articles/load-balancer/load-balancer-multiple-ip-powershell.md b/articles/load-balancer/load-balancer-multiple-ip-powershell.md index a25a9ab0c1612..6e919755abc31 100644 --- a/articles/load-balancer/load-balancer-multiple-ip-powershell.md +++ b/articles/load-balancer/load-balancer-multiple-ip-powershell.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: In this article, learn about load balancing across primary and secondary IP configurations using Azure CLI. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.custom: seodec18, devx-track-azurecli ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 09/25/2017 -ms.author: allensu +ms.author: greglin --- # Load balancing on multiple IP configurations using PowerShell diff --git a/articles/load-balancer/load-balancer-multiple-ip.md b/articles/load-balancer/load-balancer-multiple-ip.md index 5b9ae73d5f8d7..3b2965598a550 100644 --- a/articles/load-balancer/load-balancer-multiple-ip.md +++ b/articles/load-balancer/load-balancer-multiple-ip.md @@ -2,8 +2,8 @@ title: 'Tutorial: Load balance multiple IP configurations - Azure portal' titleSuffix: Azure Load Balancer description: In this article, learn about load balancing across primary and secondary NIC configurations using the Azure portal. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 08/08/2021 diff --git a/articles/load-balancer/load-balancer-multivip-overview.md b/articles/load-balancer/load-balancer-multivip-overview.md index 8c7d681e108f7..e79f8edbff7fd 100644 --- a/articles/load-balancer/load-balancer-multivip-overview.md +++ b/articles/load-balancer/load-balancer-multivip-overview.md @@ -3,14 +3,14 @@ title: Multiple frontends - Azure Load Balancer description: With this learning path, get started with an overview of multiple frontends on Azure Load Balancer services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.custom: seodec18 ms.topic: article ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 01/26/2022 -ms.author: allensu +ms.author: greglin --- # Multiple frontends for Azure Load Balancer @@ -157,7 +157,7 @@ The Floating IP rule type is the foundation of several load balancer configurati * Multiple frontend configurations are only supported with IaaS VMs and virtual machine scale sets. * With the Floating IP rule, your application must use the primary IP configuration for outbound SNAT flows. If your application binds to the frontend IP address configured on the loopback interface in the guest OS, Azure's outbound SNAT is not available to rewrite the outbound flow and the flow fails. Review [outbound scenarios](load-balancer-outbound-connections.md). -* Floating IP is not currently supported on secondary IP configurations for Internal Load Balancing scenarios. +* Floating IP is not currently supported on secondary IP configurations. * Public IP addresses have an effect on billing. For more information, see [IP Address pricing](https://azure.microsoft.com/pricing/details/ip-addresses/) * Subscription limits apply. For more information, see [Service limits](../azure-resource-manager/management/azure-subscription-service-limits.md#networking-limits) for details. diff --git a/articles/load-balancer/load-balancer-outbound-connections.md b/articles/load-balancer/load-balancer-outbound-connections.md index ab189465a1470..c3150db74bf6a 100644 --- a/articles/load-balancer/load-balancer-outbound-connections.md +++ b/articles/load-balancer/load-balancer-outbound-connections.md @@ -3,12 +3,12 @@ title: Source Network Address Translation (SNAT) for outbound connections titleSuffix: Azure Load Balancer description: Learn how Azure Load Balancer is used for outbound internet connectivity (SNAT). services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.custom: contperf-fy21q1 ms.date: 03/01/2022 -ms.author: allensu +ms.author: greglin --- # Use Source Network Address Translation (SNAT) for outbound connections diff --git a/articles/load-balancer/load-balancer-overview.md b/articles/load-balancer/load-balancer-overview.md index 2e1a0270d97c6..4ee3b95b4bb35 100644 --- a/articles/load-balancer/load-balancer-overview.md +++ b/articles/load-balancer/load-balancer-overview.md @@ -4,7 +4,7 @@ titleSuffix: Azure Load Balancer description: Overview of Azure Load Balancer features, architecture, and implementation. Learn how the Load Balancer works and how to use it in the cloud. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer # Customer intent: As an IT administrator, I want to learn more about the Azure Load Balancer service and what I can use it for. ms.topic: overview @@ -12,7 +12,7 @@ ms.custom: seodec18 ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 1/25/2021 -ms.author: allensu +ms.author: greglin --- diff --git a/articles/load-balancer/load-balancer-query-metrics-rest-api.md b/articles/load-balancer/load-balancer-query-metrics-rest-api.md index 80ead80d29252..3401b9aafdfb8 100644 --- a/articles/load-balancer/load-balancer-query-metrics-rest-api.md +++ b/articles/load-balancer/load-balancer-query-metrics-rest-api.md @@ -3,13 +3,13 @@ title: Retrieve metrics with the REST API titleSuffix: Azure Load Balancer description: In this article, get started using the Azure REST APIs to collect health and usage metrics for Azure Load Balancer. services: sql-database -author: asudbring +author: greg-lindsay manager: KumudD ms.service: load-balancer ms.custom: REST, seodec18 ms.topic: how-to ms.date: 11/19/2019 -ms.author: allensu +ms.author: greglin --- # Get Load Balancer usage metrics using the REST API diff --git a/articles/load-balancer/load-balancer-standard-availability-zones.md b/articles/load-balancer/load-balancer-standard-availability-zones.md index 6861de8ac1f92..b25f922448563 100644 --- a/articles/load-balancer/load-balancer-standard-availability-zones.md +++ b/articles/load-balancer/load-balancer-standard-availability-zones.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: With this learning path, get started with Azure Standard Load Balancer and Availability Zones. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.custom: seodec18 ms.service: load-balancer ms.topic: article ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 05/07/2020 -ms.author: allensu +ms.author: greglin --- # Load Balancer and Availability Zones diff --git a/articles/load-balancer/load-balancer-standard-diagnostics.md b/articles/load-balancer/load-balancer-standard-diagnostics.md index 9a167c08c1cfd..167c8b2653d2d 100644 --- a/articles/load-balancer/load-balancer-standard-diagnostics.md +++ b/articles/load-balancer/load-balancer-standard-diagnostics.md @@ -2,12 +2,12 @@ title: Diagnostics with metrics, alerts, and resource health titleSuffix: Azure Load Balancer description: Use the available metrics, alerts, and resource health information to diagnose your load balancer. -author: asudbring +author: greg-lindsay ms.custom: seodec18 ms.service: load-balancer ms.topic: article ms.date: 01/26/2022 -ms.author: allensu +ms.author: greglin --- # Standard load balancer diagnostics with metrics, alerts, and resource health diff --git a/articles/load-balancer/load-balancer-tcp-idle-timeout.md b/articles/load-balancer/load-balancer-tcp-idle-timeout.md index 9a0f5fd9023bf..2ecc4b8d96ccb 100644 --- a/articles/load-balancer/load-balancer-tcp-idle-timeout.md +++ b/articles/load-balancer/load-balancer-tcp-idle-timeout.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: In this article, learn how to configure Azure Load Balancer TCP idle timeout and reset. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.custom: seodec18, devx-track-azurepowershell ms.service: load-balancer ms.topic: how-to ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 10/26/2020 -ms.author: allensu +ms.author: greglin --- # Configure TCP reset and idle timeout for Azure Load Balancer diff --git a/articles/load-balancer/load-balancer-tcp-reset.md b/articles/load-balancer/load-balancer-tcp-reset.md index 4b46ab617005c..8e8aadbd857f5 100644 --- a/articles/load-balancer/load-balancer-tcp-reset.md +++ b/articles/load-balancer/load-balancer-tcp-reset.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: With this article, learn about Azure Load Balancer with bidirectional TCP RST packets on idle timeout. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.custom: seodec18 ms.service: load-balancer ms.topic: how-to ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 10/07/2020 -ms.author: allensu +ms.author: greglin --- # Load Balancer TCP Reset and Idle Timeout diff --git a/articles/load-balancer/load-balancer-troubleshoot-backend-traffic.md b/articles/load-balancer/load-balancer-troubleshoot-backend-traffic.md index 131dd0e5f079a..d24e84a65f939 100644 --- a/articles/load-balancer/load-balancer-troubleshoot-backend-traffic.md +++ b/articles/load-balancer/load-balancer-troubleshoot-backend-traffic.md @@ -3,7 +3,7 @@ title: Troubleshoot Azure Load Balancer description: Learn how to troubleshoot known issues with Azure Load Balancer. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay manager: dcscontentpm ms.custom: seodoc18 ms.service: load-balancer @@ -11,7 +11,7 @@ ms.topic: troubleshooting ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 03/02/2022 -ms.author: allensu +ms.author: greglin --- # Troubleshoot Azure Load Balancer backend traffic responses diff --git a/articles/load-balancer/load-balancer-troubleshoot-health-probe-status.md b/articles/load-balancer/load-balancer-troubleshoot-health-probe-status.md index a35d650ff353a..c10aedab4cbb4 100644 --- a/articles/load-balancer/load-balancer-troubleshoot-health-probe-status.md +++ b/articles/load-balancer/load-balancer-troubleshoot-health-probe-status.md @@ -3,7 +3,7 @@ title: Troubleshoot Azure Load Balancer health probe status description: Learn how to troubleshoot known issues with Azure Load Balancer health probe status. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay manager: dcscontentpm ms.custom: seodoc18 ms.service: load-balancer @@ -11,7 +11,7 @@ ms.topic: troubleshooting ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 12/02/2020 -ms.author: allensu +ms.author: greglin --- # Troubleshoot Azure Load Balancer health probe status diff --git a/articles/load-balancer/load-balancer-troubleshoot.md b/articles/load-balancer/load-balancer-troubleshoot.md index 22790ccc18232..f0ee4fccf5954 100644 --- a/articles/load-balancer/load-balancer-troubleshoot.md +++ b/articles/load-balancer/load-balancer-troubleshoot.md @@ -3,7 +3,7 @@ title: Troubleshoot common issues Azure Load Balancer description: Learn how to troubleshoot common issues with Azure Load Balancer. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay manager: dcscontentpm ms.custom: seodoc18 ms.service: load-balancer @@ -11,7 +11,7 @@ ms.topic: troubleshooting ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 01/28/2020 -ms.author: allensu +ms.author: greglin --- # Troubleshoot Azure Load Balancer diff --git a/articles/load-balancer/manage-inbound-nat-rules.md b/articles/load-balancer/manage-inbound-nat-rules.md index f02e328364084..2ac37221155a7 100644 --- a/articles/load-balancer/manage-inbound-nat-rules.md +++ b/articles/load-balancer/manage-inbound-nat-rules.md @@ -1,8 +1,8 @@ --- title: Manage inbound NAT rules for Azure Load Balancer description: In this article, you'll learn how to add and remove and inbound NAT rule in the Azure portal. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.date: 03/15/2022 diff --git a/articles/load-balancer/manage-probes-how-to.md b/articles/load-balancer/manage-probes-how-to.md index 17da6460c5b4a..36198f7b470e1 100644 --- a/articles/load-balancer/manage-probes-how-to.md +++ b/articles/load-balancer/manage-probes-how-to.md @@ -1,8 +1,8 @@ --- title: Manage health probes for Azure Load Balancer - Azure portal description: In this article, learn how to manage health probes for Azure Load Balancer using the Azure portal -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.date: 03/02/2022 diff --git a/articles/load-balancer/manage-rules-how-to.md b/articles/load-balancer/manage-rules-how-to.md index 3375934de63e0..a0f45d1bc37db 100644 --- a/articles/load-balancer/manage-rules-how-to.md +++ b/articles/load-balancer/manage-rules-how-to.md @@ -1,8 +1,8 @@ --- title: Manage rules for Azure Load Balancer - Azure portal description: In this article, learn how to manage rules for Azure Load Balancer using the Azure portal -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.date: 08/23/2021 diff --git a/articles/load-balancer/manage.md b/articles/load-balancer/manage.md index c81aa0a6466a8..4a17a8bd71800 100644 --- a/articles/load-balancer/manage.md +++ b/articles/load-balancer/manage.md @@ -1,12 +1,12 @@ --- title: Azure Load Balancer portal settings description: Get started learning about Azure Load Balancer portal settings -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.workload: infrastructure-services ms.date: 08/16/2021 -ms.author: allensu +ms.author: greglin --- # Azure Load Balancer portal settings diff --git a/articles/load-balancer/monitor-load-balancer-reference.md b/articles/load-balancer/monitor-load-balancer-reference.md index d04e5c9de22bf..062e455994e71 100644 --- a/articles/load-balancer/monitor-load-balancer-reference.md +++ b/articles/load-balancer/monitor-load-balancer-reference.md @@ -2,9 +2,9 @@ title: Monitoring Load Balancer data reference titleSuffix: Azure Load Balancer description: Important reference material needed when you monitor Load Balancer -author: asudbring +author: greg-lindsay ms.topic: reference -ms.author: allensu +ms.author: greglin ms.service: load-balancer ms.custom: subject-monitoring ms.date: 06/29/2021 diff --git a/articles/load-balancer/monitor-load-balancer.md b/articles/load-balancer/monitor-load-balancer.md index 3689c71e61351..2b5e018771439 100644 --- a/articles/load-balancer/monitor-load-balancer.md +++ b/articles/load-balancer/monitor-load-balancer.md @@ -1,8 +1,8 @@ --- title: Monitoring Azure Load Balancer description: Start here to learn how to monitor load balancer. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: how-to ms.custom: subject-monitoring, devx-track-azurecli diff --git a/articles/load-balancer/move-across-regions-external-load-balancer-portal.md b/articles/load-balancer/move-across-regions-external-load-balancer-portal.md index 03aded3245651..649a0d00ede6f 100644 --- a/articles/load-balancer/move-across-regions-external-load-balancer-portal.md +++ b/articles/load-balancer/move-across-regions-external-load-balancer-portal.md @@ -1,11 +1,11 @@ --- title: Move an Azure external load balancer to another Azure region by using the Azure portal description: Use an Azure Resource Manager template to move an external load balancer from one Azure region to another by using the Azure portal. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 09/17/2019 -ms.author: allensu +ms.author: greglin --- # Move an external load balancer to another region by using the Azure portal diff --git a/articles/load-balancer/move-across-regions-external-load-balancer-powershell.md b/articles/load-balancer/move-across-regions-external-load-balancer-powershell.md index dd0c752bcfd86..b842221041cc3 100644 --- a/articles/load-balancer/move-across-regions-external-load-balancer-powershell.md +++ b/articles/load-balancer/move-across-regions-external-load-balancer-powershell.md @@ -1,11 +1,11 @@ --- title: Move Azure external Load Balancer to another Azure region using Azure PowerShell description: Use Azure Resource Manager template to move Azure external Load Balancer from one Azure region to another using Azure PowerShell. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 09/17/2019 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell --- diff --git a/articles/load-balancer/move-across-regions-internal-load-balancer-portal.md b/articles/load-balancer/move-across-regions-internal-load-balancer-portal.md index eac53168d9674..fe943eff3bd1a 100644 --- a/articles/load-balancer/move-across-regions-internal-load-balancer-portal.md +++ b/articles/load-balancer/move-across-regions-internal-load-balancer-portal.md @@ -1,11 +1,11 @@ --- title: Move Azure internal Load Balancer to another Azure region using the Azure portal description: Use Azure Resource Manager template to move Azure internal Load Balancer from one Azure region to another using the Azure portal -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 09/18/2019 -ms.author: allensu +ms.author: greglin --- # Move Azure internal Load Balancer to another region using the Azure portal diff --git a/articles/load-balancer/move-across-regions-internal-load-balancer-powershell.md b/articles/load-balancer/move-across-regions-internal-load-balancer-powershell.md index cfdfedd0d1eb9..64c55ad26c11b 100644 --- a/articles/load-balancer/move-across-regions-internal-load-balancer-powershell.md +++ b/articles/load-balancer/move-across-regions-internal-load-balancer-powershell.md @@ -1,11 +1,11 @@ --- title: Move Azure internal Load Balancer to another Azure region using Azure PowerShell description: Use Azure Resource Manager template to move Azure internal Load Balancer from one Azure region to another using Azure PowerShell -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 09/17/2019 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell --- diff --git a/articles/load-balancer/outbound-rules.md b/articles/load-balancer/outbound-rules.md index ca16776b64e00..99ee172b971a5 100644 --- a/articles/load-balancer/outbound-rules.md +++ b/articles/load-balancer/outbound-rules.md @@ -2,12 +2,12 @@ title: Outbound rules Azure Load Balancer description: This article explains how to configure outbound rules to control egress of internet traffic with Azure Load Balancer. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: conceptual ms.custom: contperf-fy21q1 ms.date: 1/6/2022 -ms.author: allensu +ms.author: greglin --- # Outbound rules Azure Load Balancer diff --git a/articles/load-balancer/powershell-samples.md b/articles/load-balancer/powershell-samples.md index 8a92263b3eb2f..acaaf7e39271b 100644 --- a/articles/load-balancer/powershell-samples.md +++ b/articles/load-balancer/powershell-samples.md @@ -3,14 +3,14 @@ title: Azure PowerShell Samples - Azure Load Balancer description: With these samples, load balance traffic to multiple websites on VMs and traffic to VMs for HA with Azure Load Balancer. services: virtual-network documentationcenter: load-balancer -author: asudbring +author: greg-lindsay ms.custom: seodec18 ms.service: load-balancer ms.topic: article ms.tgt_pltfrm: ms.workload: infrastructure ms.date: 12/10/2018 -ms.author: allensu +ms.author: greglin --- # Azure PowerShell Samples for Load Balancer diff --git a/articles/load-balancer/python-samples.md b/articles/load-balancer/python-samples.md index 418c4dd78cb57..74597c9c1a14e 100644 --- a/articles/load-balancer/python-samples.md +++ b/articles/load-balancer/python-samples.md @@ -4,12 +4,12 @@ titleSuffix: Azure Load Balancer description: With these samples, load balance traffic to multiple websites. Deploy load balancers in a HA configuration. services: virtual-network documentationcenter: load-balancer -author: asudbring +author: greg-lindsay ms.custom: seodec18 ms.service: load-balancer ms.topic: article ms.date: 08/20/2021 -ms.author: allensu +ms.author: greglin --- # Python Samples for Azure Load Balancer diff --git a/articles/load-balancer/quickstart-load-balancer-standard-internal-cli.md b/articles/load-balancer/quickstart-load-balancer-standard-internal-cli.md index 736cb2506585e..6e52534333ec5 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-internal-cli.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-internal-cli.md @@ -2,11 +2,11 @@ title: 'Quickstart: Create an internal load balancer - Azure CLI' titleSuffix: Azure Load Balancer description: This quickstart shows how to create an internal load balancer by using the Azure CLI. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/23/2022 -ms.author: allensu +ms.author: greglin ms.custom: mvc, devx-track-js, devx-track-azurecli, mode-api #Customer intent: I want to create a load balancer so that I can load balance internal traffic to VMs. --- @@ -24,7 +24,7 @@ This quickstart requires version 2.0.28 or later of the Azure CLI. If you're usi An Azure resource group is a logical container into which you deploy and manage your Azure resources. -Create a resource group with [az group create](/cli/azure/group#az_group_create). +Create a resource group with [az group create](/cli/azure/group#az-group-create). ```azurecli az group create \ @@ -39,7 +39,7 @@ When you create an internal load balancer, a virtual network is configured as th Before you deploy VMs and test your load balancer, create the supporting virtual network and subnet. The virtual network and subnet will contain the resources deployed later in this article. -Create a virtual network by using [az network vnet create](/cli/azure/network/vnet#az_network_vnet_create). +Create a virtual network by using [az network vnet create](/cli/azure/network/vnet#az-network-vnet-create). ```azurecli az network vnet create \ @@ -58,7 +58,7 @@ In this example, you'll create an Azure Bastion host. The Azure Bastion host is ### Create a bastion public IP address -Use [az network public-ip create](/cli/azure/network/public-ip#az_network_public_ip_create) to create a public IP address for the Azure Bastion host. +Use [az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create) to create a public IP address for the Azure Bastion host. ```azurecli az network public-ip create \ @@ -69,7 +69,7 @@ az network public-ip create \ ``` ### Create a bastion subnet -Use [az network vnet subnet create](/cli/azure/network/vnet/subnet#az_network_vnet_subnet_create) to create a subnet. +Use [az network vnet subnet create](/cli/azure/network/vnet/subnet#az-network-vnet-subnet-create) to create a subnet. ```azurecli az network vnet subnet create \ @@ -81,7 +81,7 @@ az network vnet subnet create \ ### Create the bastion host -Use [az network bastion create](/cli/azure/network/bastion#az_network_bastion_create) to create a host. +Use [az network bastion create](/cli/azure/network/bastion#az-network-bastion-create) to create a host. ```azurecli az network bastion create \ @@ -108,7 +108,7 @@ This section details how you can create and configure the following components o ### Create the load balancer resource -Create an internal load balancer with [az network lb create](/cli/azure/network/lb#az_network_lb_create). +Create an internal load balancer with [az network lb create](/cli/azure/network/lb#az-network-lb-create). ```azurecli az network lb create \ @@ -127,7 +127,7 @@ A health probe checks all virtual machine instances to ensure they can send netw A virtual machine with a failed probe check is removed from the load balancer. The virtual machine is added back into the load balancer when the failure is resolved. -Create a health probe with [az network lb probe create](/cli/azure/network/lb/probe#az_network_lb_probe_create). +Create a health probe with [az network lb probe create](/cli/azure/network/lb/probe#az-network-lb-probe-create). ```azurecli az network lb probe create \ @@ -148,7 +148,7 @@ A load balancer rule defines: * The required source and destination port -Create a load balancer rule with [az network lb rule create](/cli/azure/network/lb/rule#az_network_lb_rule_create). +Create a load balancer rule with [az network lb rule create](/cli/azure/network/lb/rule#az-network-lb-rule-create). ```azurecli az network lb rule create \ @@ -169,7 +169,7 @@ Create a load balancer rule with [az network lb rule create](/cli/azure/network/ For a standard load balancer, the VMs in the backend pool are required to have network interfaces that belong to a network security group. -To create a network security group, use [az network nsg create](/cli/azure/network/nsg#az_network_nsg_create). +To create a network security group, use [az network nsg create](/cli/azure/network/nsg#az-network-nsg-create). ```azurecli az network nsg create \ @@ -179,7 +179,7 @@ To create a network security group, use [az network nsg create](/cli/azure/netwo ## Create a network security group rule -To create a network security group rule, use [az network nsg rule create](/cli/azure/network/nsg/rule#az_network_nsg_rule_create). +To create a network security group rule, use [az network nsg rule create](/cli/azure/network/nsg/rule#az-network-nsg-rule-create). ```azurecli az network nsg rule create \ @@ -206,7 +206,7 @@ In this section, you create: ### Create network interfaces for the virtual machines -Create two network interfaces with [az network nic create](/cli/azure/network/nic#az_network_nic_create). +Create two network interfaces with [az network nic create](/cli/azure/network/nic#az-network-nic-create). ```azurecli array=(myNicVM1 myNicVM2) @@ -223,7 +223,7 @@ Create two network interfaces with [az network nic create](/cli/azure/network/ni ### Create the virtual machines -Create the virtual machines with [az vm create](/cli/azure/vm#az_vm_create). +Create the virtual machines with [az vm create](/cli/azure/vm#az-vm-create). ```azurecli array=(1 2) @@ -246,7 +246,7 @@ It can take a few minutes for the VMs to deploy. ## Add virtual machines to the backend pool -Add the virtual machines to the backend pool with [az network nic ip-config address-pool add](/cli/azure/network/nic/ip-config/address-pool#az_network_nic_ip_config_address_pool_add). +Add the virtual machines to the backend pool with [az network nic ip-config address-pool add](/cli/azure/network/nic/ip-config/address-pool#az-network-nic-ip-config-address-pool-add). ```azurecli array=(VM1 VM2) @@ -267,7 +267,7 @@ To provide outbound internet access for resources in the backend pool, create a ### Create public IP -Use [az network public-ip create](/cli/azure/network/public-ip#az_network_public_ip_create) to create a single IP for the outbound connectivity. +Use [az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create) to create a single IP for the outbound connectivity. ```azurecli az network public-ip create \ @@ -279,7 +279,7 @@ Use [az network public-ip create](/cli/azure/network/public-ip#az_network_public ### Create NAT gateway resource -Use [az network nat gateway create](/cli/azure/network/nat#az_network_nat_gateway_create) to create the NAT gateway resource. The public IP created in the previous step is associated with the NAT gateway. +Use [az network nat gateway create](/cli/azure/network/nat#az-network-nat-gateway-create) to create the NAT gateway resource. The public IP created in the previous step is associated with the NAT gateway. ```azurecli az network nat gateway create \ @@ -291,7 +291,7 @@ Use [az network nat gateway create](/cli/azure/network/nat#az_network_nat_gatewa ### Associate NAT gateway with subnet -Configure the source subnet in virtual network to use a specific NAT gateway resource with [az network vnet subnet update](/cli/azure/network/vnet/subnet#az_network_vnet_subnet_update). +Configure the source subnet in virtual network to use a specific NAT gateway resource with [az network vnet subnet update](/cli/azure/network/vnet/subnet#az-network-vnet-subnet-update). ```azurecli az network vnet subnet update \ @@ -303,7 +303,7 @@ Configure the source subnet in virtual network to use a specific NAT gateway res ## Create test virtual machine -Create the network interface with [az network nic create](/cli/azure/network/nic#az_network_nic_create). +Create the network interface with [az network nic create](/cli/azure/network/nic#az-network-nic-create). ```azurecli az network nic create \ @@ -313,7 +313,7 @@ Create the network interface with [az network nic create](/cli/azure/network/nic --subnet myBackEndSubnet \ --network-security-group myNSG ``` -Create the virtual machine with [az vm create](/cli/azure/vm#az_vm_create). +Create the virtual machine with [az vm create](/cli/azure/vm#az-vm-create). ```azurecli az vm create \ @@ -328,7 +328,7 @@ You might need to wait a few minutes for the virtual machine to deploy. ## Install IIS -Use [az vm extension set](/cli/azure/vm/extension#az_vm_extension_set) to install IIS on the backend virtual machines and set the default website to the computer name. +Use [az vm extension set](/cli/azure/vm/extension#az-vm-extension-set) to install IIS on the backend virtual machines and set the default website to the computer name. ```azurecli array=(myVM1 myVM2) diff --git a/articles/load-balancer/quickstart-load-balancer-standard-internal-portal.md b/articles/load-balancer/quickstart-load-balancer-standard-internal-portal.md index 701d77b090077..38fd1a0e4ba1b 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-internal-portal.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-internal-portal.md @@ -3,11 +3,11 @@ title: "Quickstart: Create an internal load balancer - Azure portal" titleSuffix: Azure Load Balancer description: This quickstart shows how to create an internal load balancer by using the Azure portal. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/21/2022 -ms.author: allensu +ms.author: greglin ms.custom: mvc, mode-ui #Customer intent: I want to create a internal load balancer so that I can load balance internal traffic to VMs. --- diff --git a/articles/load-balancer/quickstart-load-balancer-standard-internal-powershell.md b/articles/load-balancer/quickstart-load-balancer-standard-internal-powershell.md index 50c2f2b4b22e8..56310276b035e 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-internal-powershell.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-internal-powershell.md @@ -2,11 +2,11 @@ title: 'Quickstart: Create an internal load balancer - Azure PowerShell' titleSuffix: Azure Load Balancer description: This quickstart shows how to create an internal load balancer using Azure PowerShell -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/24/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell, mode-api #Customer intent: I want to create a load balancer so that I can load balance internal traffic to VMs. --- diff --git a/articles/load-balancer/quickstart-load-balancer-standard-internal-template.md b/articles/load-balancer/quickstart-load-balancer-standard-internal-template.md index 5efb877cad7d6..5ef1b754aef5a 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-internal-template.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-internal-template.md @@ -2,11 +2,11 @@ title: 'Quickstart: Create an internal load balancer by using a template' description: This quickstart shows how to create an internal Azure load balancer by using an Azure Resource Manager template (ARM template). services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.custom: subject-armqs, mode-arm -ms.author: allensu +ms.author: greglin ms.date: 09/14/2020 --- diff --git a/articles/load-balancer/quickstart-load-balancer-standard-public-cli.md b/articles/load-balancer/quickstart-load-balancer-standard-public-cli.md index e0cdf0cc6822b..2e3cb96585c22 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-public-cli.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-public-cli.md @@ -2,11 +2,11 @@ title: "Quickstart: Create a public load balancer - Azure CLI" titleSuffix: Azure Load Balancer description: This quickstart shows how to create a public load balancer using the Azure CLI -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/16/2022 -ms.author: allensu +ms.author: greglin ms.custom: mvc, devx-track-js, devx-track-azurecli, mode-api #Customer intent: I want to create a load balancer so that I can load balance internet traffic to VMs. --- diff --git a/articles/load-balancer/quickstart-load-balancer-standard-public-portal.md b/articles/load-balancer/quickstart-load-balancer-standard-public-portal.md index 87e55fd21fea6..4ee45e830e3cd 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-public-portal.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-public-portal.md @@ -2,11 +2,11 @@ title: "Quickstart: Create a public load balancer - Azure portal" titleSuffix: Azure Load Balancer description: This quickstart shows how to create a load balancer by using the Azure portal. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: quickstart ms.date: 03/16/2022 -ms.author: allensu +ms.author: greglin ms.custom: mvc, mode-ui #Customer intent: I want to create a load balancer so that I can load balance internet traffic to VMs. --- diff --git a/articles/load-balancer/quickstart-load-balancer-standard-public-powershell.md b/articles/load-balancer/quickstart-load-balancer-standard-public-powershell.md index dbd77317c0a73..035335ee27b42 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-public-powershell.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-public-powershell.md @@ -2,8 +2,8 @@ title: 'Quickstart: Create a public load balancer - Azure PowerShell' titleSuffix: Azure Load Balancer description: This quickstart shows how to create a load balancer using Azure PowerShell -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.date: 03/17/2022 ms.topic: quickstart ms.service: load-balancer diff --git a/articles/load-balancer/quickstart-load-balancer-standard-public-template.md b/articles/load-balancer/quickstart-load-balancer-standard-public-template.md index d1643a07567e9..0c03fb5826320 100644 --- a/articles/load-balancer/quickstart-load-balancer-standard-public-template.md +++ b/articles/load-balancer/quickstart-load-balancer-standard-public-template.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: This quickstart shows how to create a load balancer by using an Azure Resource Manager template. services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay manager: KumudD ms.service: load-balancer ms.topic: quickstart ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 12/09/2020 -ms.author: allensu +ms.author: greglin ms.custom: mvc, subject-armqs, mode-arm #Customer intent: I want to create a load balancer by using an Azure Resource Manager template so that I can load balance internet traffic to VMs. --- diff --git a/articles/load-balancer/scripts/load-balancer-linux-cli-load-balance-multiple-websites-vm.md b/articles/load-balancer/scripts/load-balancer-linux-cli-load-balance-multiple-websites-vm.md index e1c2eb7fc85a2..31574356f4b87 100644 --- a/articles/load-balancer/scripts/load-balancer-linux-cli-load-balance-multiple-websites-vm.md +++ b/articles/load-balancer/scripts/load-balancer-linux-cli-load-balance-multiple-websites-vm.md @@ -2,13 +2,13 @@ title: Load balance multiple websites - Azure CLI - Azure Load Balancer description: This Azure CLI script example shows how to load balance multiple websites to the same virtual machine documentationcenter: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.devlang: azurecli ms.topic: sample ms.workload: infrastructure ms.date: 03/04/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurecli --- diff --git a/articles/load-balancer/scripts/load-balancer-linux-cli-sample-nlb.md b/articles/load-balancer/scripts/load-balancer-linux-cli-sample-nlb.md index 287cc70b9111e..ad40fa23a00e6 100644 --- a/articles/load-balancer/scripts/load-balancer-linux-cli-sample-nlb.md +++ b/articles/load-balancer/scripts/load-balancer-linux-cli-sample-nlb.md @@ -3,14 +3,14 @@ title: Load balance traffic to VMs for HA - Azure CLI - Azure Load Balancer description: This Azure CLI script example shows how to load balance traffic to VMs for high availability services: load-balancer documentationcenter: load-balancer -author: asudbring +author: greg-lindsay manager: kumudD ms.service: load-balancer ms.devlang: azurecli ms.topic: sample ms.workload: infrastructure ms.date: 03/04/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurecli --- diff --git a/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zonal-frontend.md b/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zonal-frontend.md index e43d3307451d8..007f5712f2d4f 100644 --- a/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zonal-frontend.md +++ b/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zonal-frontend.md @@ -3,7 +3,7 @@ title: Load balance VMs within a zone - Azure CLI description: This Azure CLI script example shows how to load balance traffic to VMs within a specific availability zone services: load-balancer documentationcenter: load-balancer -author: asudbring +author: greg-lindsay manager: kumudD # Customer intent: As an IT administrator, I want to create a load balancer that load balances incoming internet traffic to virtual machines within a specific zone in a region. ms.assetid: @@ -13,7 +13,7 @@ ms.topic: sample ms.tgt_pltfrm: ms.workload: infrastructure ms.date: 03/04/2022 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurecli --- diff --git a/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zone-redundant-frontend.md b/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zone-redundant-frontend.md index dea4343a83119..a84d9f23e4b71 100644 --- a/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zone-redundant-frontend.md +++ b/articles/load-balancer/scripts/load-balancer-linux-cli-sample-zone-redundant-frontend.md @@ -2,14 +2,14 @@ title: Load balance VMs across availability zones - Azure CLI - Azure Load Balancer description: This Azure CLI script example shows how to load balance traffic to VMs across availability zones documentationcenter: load-balancer -author: asudbring +author: greg-lindsay # Customer intent: As an IT administrator, I want to create a load balancer that load balances incoming internet traffic to virtual machines across availability zones in a region. ms.service: load-balancer ms.devlang: azurecli ms.topic: sample ms.workload: infrastructure ms.date: 06/14/2018 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurecli --- diff --git a/articles/load-balancer/scripts/load-balancer-windows-powershell-load-balance-multiple-websites-vm.md b/articles/load-balancer/scripts/load-balancer-windows-powershell-load-balance-multiple-websites-vm.md index ec9009585c6ef..fd8e7970ab171 100644 --- a/articles/load-balancer/scripts/load-balancer-windows-powershell-load-balance-multiple-websites-vm.md +++ b/articles/load-balancer/scripts/load-balancer-windows-powershell-load-balance-multiple-websites-vm.md @@ -2,13 +2,13 @@ title: Load balance multiple websites - Azure PowerShell - Azure Load Balancer description: This Azure PowerShell script example hows how to load balance multiple websites to the same virtual machine documentationcenter: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.devlang: powershell ms.topic: sample ms.workload: infrastructure ms.date: 04/20/2018 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell --- diff --git a/articles/load-balancer/scripts/load-balancer-windows-powershell-sample-nlb.md b/articles/load-balancer/scripts/load-balancer-windows-powershell-sample-nlb.md index f1231649c7bc8..b5d3e6024ea89 100644 --- a/articles/load-balancer/scripts/load-balancer-windows-powershell-sample-nlb.md +++ b/articles/load-balancer/scripts/load-balancer-windows-powershell-sample-nlb.md @@ -4,14 +4,14 @@ titleSuffix: Azure Load Balancer description: This Azure PowerShell Script Example shows how to load balance traffic to VMs for high availability services: load-balancer documentationcenter: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.devlang: powershell ms.topic: sample ms.tgt_pltfrm: ms.workload: infrastructure ms.date: 04/20/2018 -ms.author: allensu +ms.author: greglin ms.custom: devx-track-azurepowershell --- diff --git a/articles/load-balancer/skus.md b/articles/load-balancer/skus.md index 0bd33374e9dad..3c666e3cfcb36 100644 --- a/articles/load-balancer/skus.md +++ b/articles/load-balancer/skus.md @@ -3,13 +3,13 @@ title: Azure Load Balancer SKUs description: Overview of Azure Load Balancer SKUs services: load-balancer documentationcenter: na -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: reference ms.tgt_pltfrm: na ms.workload: infrastructure-services ms.date: 12/22/2021 -ms.author: allensu +ms.author: greglin --- # Azure Load Balancer SKUs diff --git a/articles/load-balancer/troubleshoot-load-balancer-imds.md b/articles/load-balancer/troubleshoot-load-balancer-imds.md index 2ee86b3434b1a..235b50775b096 100644 --- a/articles/load-balancer/troubleshoot-load-balancer-imds.md +++ b/articles/load-balancer/troubleshoot-load-balancer-imds.md @@ -3,11 +3,11 @@ title: Common error codes for Azure Instance Metadata Service (IMDS) titleSuffix: Azure Load Balancer description: Overview of common error codes and corresponding mitigation methods for Azure Instance Metadata Service (IMDS) services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: troubleshooting ms.date: 02/12/2021 -ms.author: allensu +ms.author: greglin --- # Error codes: Common error codes when using IMDS to retrieve load balancer information diff --git a/articles/load-balancer/troubleshoot-outbound-connection.md b/articles/load-balancer/troubleshoot-outbound-connection.md index 8ee6972731db6..c8138e71f8c04 100644 --- a/articles/load-balancer/troubleshoot-outbound-connection.md +++ b/articles/load-balancer/troubleshoot-outbound-connection.md @@ -3,11 +3,11 @@ title: Troubleshoot SNAT exhaustion and connection timeouts titleSuffix: Azure Load Balancer description: Resolutions for common problems with outbound connectivity with Azure Load Balancer. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: troubleshooting ms.date: 04/21/2022 -ms.author: allensu +ms.author: greglin --- # Troubleshoot SNAT exhaustion and connection timeouts diff --git a/articles/load-balancer/tutorial-add-lb-existing-scale-set-portal.md b/articles/load-balancer/tutorial-add-lb-existing-scale-set-portal.md index 819f1e2e70f0b..fd6e452f3a917 100644 --- a/articles/load-balancer/tutorial-add-lb-existing-scale-set-portal.md +++ b/articles/load-balancer/tutorial-add-lb-existing-scale-set-portal.md @@ -1,8 +1,8 @@ --- title: 'Tutorial: Add Azure Load Balancer to an existing virtual machine scale set - Azure portal' description: In this tutorial, learn how to add a load balancer to existing virtual machine scale set using the Azure portal. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 4/21/2021 diff --git a/articles/load-balancer/tutorial-cross-region-cli.md b/articles/load-balancer/tutorial-cross-region-cli.md index 6dd45b9ad78c8..d0081eb8cb34f 100644 --- a/articles/load-balancer/tutorial-cross-region-cli.md +++ b/articles/load-balancer/tutorial-cross-region-cli.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a cross-region load balancer using Azure CLI' titleSuffix: Azure Load Balancer description: Get started with this tutorial deploying a cross-region Azure Load Balancer using Azure CLI. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 03/04/2021 diff --git a/articles/load-balancer/tutorial-cross-region-portal.md b/articles/load-balancer/tutorial-cross-region-portal.md index 53501d42b2421..d1c7a5324fcec 100644 --- a/articles/load-balancer/tutorial-cross-region-portal.md +++ b/articles/load-balancer/tutorial-cross-region-portal.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a cross-region load balancer using the Azure portal' titleSuffix: Azure Load Balancer description: Get started with this tutorial deploying a cross-region Azure Load Balancer with the Azure portal. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 08/02/2021 diff --git a/articles/load-balancer/tutorial-cross-region-powershell.md b/articles/load-balancer/tutorial-cross-region-powershell.md index 355f078802fb3..7807d4a9e5e97 100644 --- a/articles/load-balancer/tutorial-cross-region-powershell.md +++ b/articles/load-balancer/tutorial-cross-region-powershell.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a cross-region load balancer using Azure PowerShell' titleSuffix: Azure Load Balancer description: Get started with this tutorial deploying a cross-region Azure Load Balancer using Azure PowerShell. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 02/10/2021 diff --git a/articles/load-balancer/tutorial-gateway-cli.md b/articles/load-balancer/tutorial-gateway-cli.md index 23ea05598cf1c..1a34eb397a25b 100644 --- a/articles/load-balancer/tutorial-gateway-cli.md +++ b/articles/load-balancer/tutorial-gateway-cli.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a gateway load balancer - Azure CLI' titleSuffix: Azure Load Balancer description: Use this tutorial to learn how to create a gateway load balancer using the Azure CLI. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 11/02/2021 diff --git a/articles/load-balancer/tutorial-gateway-portal.md b/articles/load-balancer/tutorial-gateway-portal.md index 2c57f6a57cf38..42365184f16a6 100644 --- a/articles/load-balancer/tutorial-gateway-portal.md +++ b/articles/load-balancer/tutorial-gateway-portal.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a gateway load balancer - Azure portal' titleSuffix: Azure Load Balancer description: Use this tutorial to learn how to create a gateway load balancer using the Azure portal. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 12/03/2021 diff --git a/articles/load-balancer/tutorial-gateway-powershell.md b/articles/load-balancer/tutorial-gateway-powershell.md index 745787b3bfc32..4965be3295e60 100644 --- a/articles/load-balancer/tutorial-gateway-powershell.md +++ b/articles/load-balancer/tutorial-gateway-powershell.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a gateway load balancer - Azure PowerShell' titleSuffix: Azure Load Balancer description: Use this tutorial to learn how to create a gateway load balancer using Azure PowerShell. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 11/17/2021 diff --git a/articles/load-balancer/tutorial-load-balancer-ip-backend-portal.md b/articles/load-balancer/tutorial-load-balancer-ip-backend-portal.md index 0713186066f2f..a964dd121714a 100644 --- a/articles/load-balancer/tutorial-load-balancer-ip-backend-portal.md +++ b/articles/load-balancer/tutorial-load-balancer-ip-backend-portal.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a public load balancer with an IP-based backend - Azure portal' titleSuffix: Azure Load Balancer description: In this tutorial, learn how to create a public load balancer with an IP based backend pool. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 08/06/2021 diff --git a/articles/load-balancer/tutorial-load-balancer-port-forwarding-portal.md b/articles/load-balancer/tutorial-load-balancer-port-forwarding-portal.md index bf1ca1855f95a..60ba218ce0cdd 100644 --- a/articles/load-balancer/tutorial-load-balancer-port-forwarding-portal.md +++ b/articles/load-balancer/tutorial-load-balancer-port-forwarding-portal.md @@ -2,8 +2,8 @@ title: "Tutorial: Create a single virtual machine inbound NAT rule - Azure portal" titleSuffix: Azure Load Balancer description: This tutorial shows how to configure port forwarding using Azure Load Balancer to create a connection to a single virtual machine in an Azure virtual network. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 03/08/2022 diff --git a/articles/load-balancer/tutorial-load-balancer-standard-public-zonal-portal.md b/articles/load-balancer/tutorial-load-balancer-standard-public-zonal-portal.md index f6839e1bc2032..0765559f7d490 100644 --- a/articles/load-balancer/tutorial-load-balancer-standard-public-zonal-portal.md +++ b/articles/load-balancer/tutorial-load-balancer-standard-public-zonal-portal.md @@ -3,12 +3,12 @@ title: "Tutorial: Load balance VMs within an availability zone - Azure portal" titleSuffix: Azure Load Balancer description: This tutorial demonstrates how to create a Standard Load Balancer with zonal frontend to load balance VMs within an availability zone by using Azure portal services: load-balancer -author: asudbring +author: greg-lindsay # Customer intent: As an IT administrator, I want to create a load balancer that load balances incoming internet traffic to virtual machines within a specific zone in a region. ms.service: load-balancer ms.topic: tutorial ms.date: 08/15/2021 -ms.author: allensu +ms.author: greglin ms.custom: seodec18 --- diff --git a/articles/load-balancer/tutorial-multi-availability-sets-portal.md b/articles/load-balancer/tutorial-multi-availability-sets-portal.md index 7791c178fce8a..c7bdb6d0b7be5 100644 --- a/articles/load-balancer/tutorial-multi-availability-sets-portal.md +++ b/articles/load-balancer/tutorial-multi-availability-sets-portal.md @@ -2,8 +2,8 @@ title: 'Tutorial: Create a load balancer with more than one availability set in the backend pool - Azure portal' titleSuffix: Azure Load Balancer description: In this tutorial, deploy an Azure Load Balancer with more than one availability set in the backend pool. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 05/09/2022 diff --git a/articles/load-balancer/tutorial-nat-rule-multi-instance-portal.md b/articles/load-balancer/tutorial-nat-rule-multi-instance-portal.md index aec8c83e4cfff..dc45670ce60b0 100644 --- a/articles/load-balancer/tutorial-nat-rule-multi-instance-portal.md +++ b/articles/load-balancer/tutorial-nat-rule-multi-instance-portal.md @@ -2,8 +2,8 @@ title: "Tutorial: Create a multiple virtual machines inbound NAT rule - Azure portal" titleSuffix: Azure Load Balancer description: This tutorial shows how to configure port forwarding using Azure Load Balancer to create a connection to multiple virtual machines in an Azure virtual network. -author: asudbring -ms.author: allensu +author: greg-lindsay +ms.author: greglin ms.service: load-balancer ms.topic: tutorial ms.date: 03/10/2022 diff --git a/articles/load-balancer/upgrade-basic-standard.md b/articles/load-balancer/upgrade-basic-standard.md index 91013e4920525..08c3b07fd7e28 100644 --- a/articles/load-balancer/upgrade-basic-standard.md +++ b/articles/load-balancer/upgrade-basic-standard.md @@ -3,11 +3,11 @@ title: Upgrade a basic to standard public load balancer titleSuffix: Azure Load Balancer description: This article shows you how to upgrade a public load balancer from basic to standard SKU. services: load-balancer -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 03/17/2022 -ms.author: allensu +ms.author: greglin --- # Upgrade from a basic public to standard public load balancer @@ -39,7 +39,7 @@ An Azure PowerShell script is available that does the following procedures: * If the load balancer doesn't have a frontend IP configuration or backend pool, you'll encounter an error running the script. Ensure the load balancer has a frontend IP and backend pool -* The script cannot migrate Virtual Machine Scale Set from Basic Load Balancer's backend to Standard Load Balancer's backend. We recommend manually creating a Standard Load Balancer and follow [Update or delete a load balancer used by virtual machine scale sets](https://docs.microsoft.com/azure/load-balancer/update-load-balancer-with-vm-scale-set) to complete the migration. +* The script cannot migrate Virtual Machine Scale Set from Basic Load Balancer's backend to Standard Load Balancer's backend. We recommend manually creating a Standard Load Balancer and follow [Update or delete a load balancer used by virtual machine scale sets](update-load-balancer-with-vm-scale-set.md) to complete the migration. ### Change allocation method of the public IP address to static diff --git a/articles/load-balancer/upgrade-basicInternal-standard.md b/articles/load-balancer/upgrade-basicInternal-standard.md index 5063113f6a22d..8d8471e53af3e 100644 --- a/articles/load-balancer/upgrade-basicInternal-standard.md +++ b/articles/load-balancer/upgrade-basicInternal-standard.md @@ -27,7 +27,7 @@ This article introduces a PowerShell script that creates a Standard Load Balance * The Basic Load Balancer needs to be in the same resource group as the backend VMs and NICs. * If the Standard load balancer is created in a different region, you won’t be able to associate the VMs existing in the old region to the newly created Standard Load Balancer. To work around this limitation, make sure to create a new VM in the new region. * If your Load Balancer does not have any frontend IP configuration or backend pool, you are likely to hit an error running the script. Make sure they are not empty. -* The script cannot migrate Virtual Machine Scale Set from Basic Load Balancer's backend to Standard Load Balancer's backend. We recommend manually creating a Standard Load Balancer and follow [Update or delete a load balancer used by virtual machine scale sets](https://docs.microsoft.com/azure/load-balancer/update-load-balancer-with-vm-scale-set) to complete the migration. +* The script cannot migrate Virtual Machine Scale Set from Basic Load Balancer's backend to Standard Load Balancer's backend. We recommend manually creating a Standard Load Balancer and follow [Update or delete a load balancer used by virtual machine scale sets](./update-load-balancer-with-vm-scale-set.md) to complete the migration. ## Change IP allocation method to Static for frontend IP Configuration (Ignore this step if it's already static) @@ -98,4 +98,4 @@ Yes it migrates traffic. If you would like to migrate traffic personally, use [t ## Next steps -[Learn about Standard Load Balancer](load-balancer-overview.md) +[Learn about Standard Load Balancer](load-balancer-overview.md) \ No newline at end of file diff --git a/articles/load-balancer/upgrade-internalbasic-to-publicstandard.md b/articles/load-balancer/upgrade-internalbasic-to-publicstandard.md index ae9d06c628b3d..8dac7ee2acb9c 100644 --- a/articles/load-balancer/upgrade-internalbasic-to-publicstandard.md +++ b/articles/load-balancer/upgrade-internalbasic-to-publicstandard.md @@ -2,11 +2,11 @@ title: Upgrade an internal basic load balancer - Outbound connections required titleSuffix: Azure Load Balancer description: Learn how to upgrade a basic internal load balancer to a standard public load balancer. -author: asudbring +author: greg-lindsay ms.service: load-balancer ms.topic: how-to ms.date: 03/17/2022 -ms.author: allensu +ms.author: greglin --- # Upgrade an internal basic load balancer - Outbound connections required @@ -43,7 +43,7 @@ An Azure PowerShell script is available that does the following procedures: * If the load balancer doesn't have a frontend IP configuration or backend pool, you'll encounter an error running the script. Ensure the load balancer has a frontend IP and backend pool -* The script cannot migrate Virtual Machine Scale Set from Basic Load Balancer's backend to Standard Load Balancer's backend. We recommend manually creating a Standard Load Balancer and follow [Update or delete a load balancer used by virtual machine scale sets](https://docs.microsoft.com/azure/load-balancer/update-load-balancer-with-vm-scale-set) to complete the migration. +* The script cannot migrate Virtual Machine Scale Set from Basic Load Balancer's backend to Standard Load Balancer's backend. We recommend manually creating a Standard Load Balancer and follow [Update or delete a load balancer used by virtual machine scale sets](update-load-balancer-with-vm-scale-set.md) to complete the migration. ## Download the script diff --git a/articles/load-testing/how-to-configure-customer-managed-keys.md b/articles/load-testing/how-to-configure-customer-managed-keys.md index f8fb08d7b63ef..1307b5a87b30a 100644 --- a/articles/load-testing/how-to-configure-customer-managed-keys.md +++ b/articles/load-testing/how-to-configure-customer-managed-keys.md @@ -14,7 +14,7 @@ ms.topic: how-to Azure Load Testing Preview automatically encrypts all data stored in your load testing resource with keys that Microsoft provides (service-managed keys). Optionally, you can add a second layer of security by also providing your own (customer-managed) keys. Customer-managed keys offer greater flexibility for controlling access and using key-rotation policies. -The keys you provide are stored securely using [Azure Key Vault](/azure/key-vault/general/overview). You can create a separate key for each Azure Load Testing resource you enable with customer-managed keys. +The keys you provide are stored securely using [Azure Key Vault](../key-vault/general/overview.md). You can create a separate key for each Azure Load Testing resource you enable with customer-managed keys. Azure Load Testing uses the customer-managed key to encrypt the following data in the load testing resource: @@ -46,7 +46,7 @@ You have to set the **Soft Delete** and **Purge Protection** properties on your # [Azure portal](#tab/portal) -To learn how to create a key vault with the Azure portal, see [Create a key vault using the Azure portal](/azure/key-vault/general/quick-create-portal). When you create the key vault, select **Enable purge protection**, as shown in the following image. +To learn how to create a key vault with the Azure portal, see [Create a key vault using the Azure portal](../key-vault/general/quick-create-portal.md). When you create the key vault, select **Enable purge protection**, as shown in the following image. :::image type="content" source="media/how-to-configure-customer-managed-keys/purge-protection-on-azure-key-vault.png" alt-text="Screenshot that shows how to enable purge protection on a new key vault."::: @@ -69,7 +69,7 @@ $keyVault = New-AzKeyVault -Name ` -EnablePurgeProtection ``` -To learn how to enable purge protection on an existing key vault with PowerShell, see [Azure Key Vault recovery overview](/azure/key-vault/general/key-vault-recovery?tabs=azure-powershell). +To learn how to enable purge protection on an existing key vault with PowerShell, see [Azure Key Vault recovery overview](../key-vault/general/key-vault-recovery.md?tabs=azure-powershell). # [Azure CLI](#tab/azure-cli) @@ -83,17 +83,17 @@ az keyvault create \ --enable-purge-protection ``` -To learn how to enable purge protection on an existing key vault with Azure CLI, see [Azure Key Vault recovery overview](/azure/key-vault/general/key-vault-recovery?tabs=azure-cli). +To learn how to enable purge protection on an existing key vault with Azure CLI, see [Azure Key Vault recovery overview](../key-vault/general/key-vault-recovery.md?tabs=azure-cli). --- ## Add a key -Next, add a key to the key vault. Azure Load Testing encryption supports RSA keys. For more information about supported key types, see [About keys](/azure/key-vault/keys/about-keys). +Next, add a key to the key vault. Azure Load Testing encryption supports RSA keys. For more information about supported key types, see [About keys](../key-vault/keys/about-keys.md). # [Azure portal](#tab/portal) -To learn how to add a key with the Azure portal, see [Set and retrieve a key from Azure Key Vault using the Azure portal](/azure/key-vault/keys/quick-create-portal). +To learn how to add a key with the Azure portal, see [Set and retrieve a key from Azure Key Vault using the Azure portal](../key-vault/keys/quick-create-portal.md). # [PowerShell](#tab/powershell) @@ -149,7 +149,7 @@ To configure customer-managed keys for a new Azure Load Testing resource, follow 1. In the Azure portal, navigate to the **Azure Load Testing** page, and select the **Create** button to create a new resource. -1. Follow the steps outlined in [create an Azure Load Testing resource](/azure/load-testing/quickstart-create-and-run-load-test#create_resource) to fill out the fields on the **Basics** tab. +1. Follow the steps outlined in [create an Azure Load Testing resource](./quickstart-create-and-run-load-test.md#create_resource) to fill out the fields on the **Basics** tab. 1. Go to the **Encryption** tab. In the **Encryption type** field, select **Customer-managed keys (CMK)**. @@ -268,7 +268,7 @@ You can change the managed identity for customer-managed keys for an existing Az 1. If the encryption type is **Customer-managed keys**, select the type of identity to use to authenticate to the key vault. The options include **System-assigned** (the default) or **User-assigned**. - To learn more about each type of managed identity, see [Managed identity types](/azure/active-directory/managed-identities-azure-resources/overview#managed-identity-types). + To learn more about each type of managed identity, see [Managed identity types](../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types). - If you select System-assigned, the system-assigned managed identity needs to be enabled on the resource and granted access to the AKV before changing the identity for customer-managed keys. - If you select **User-assigned**, you must select an existing user-assigned identity that has permissions to access the key vault. To learn how to create a user-assigned identity, see [Use managed identities for Azure Load Testing Preview](how-to-use-a-managed-identity.md). @@ -321,4 +321,4 @@ When you revoke the encryption key you may be able to run tests for about 10 min ## Next steps - Learn how to [Monitor server-side application metrics](./how-to-monitor-server-side-metrics.md). -- Learn how to [Parameterize a load test](./how-to-parameterize-load-tests.md). +- Learn how to [Parameterize a load test](./how-to-parameterize-load-tests.md). \ No newline at end of file diff --git a/articles/load-testing/how-to-create-manage-test.md b/articles/load-testing/how-to-create-manage-test.md new file mode 100644 index 0000000000000..1a146f0b35d0d --- /dev/null +++ b/articles/load-testing/how-to-create-manage-test.md @@ -0,0 +1,173 @@ +--- +title: Create and manage tests +titleSuffix: Azure Load Testing +description: 'Learn how to create and manage tests in your Azure Load Testing Preview resource.' +services: load-testing +ms.service: load-testing +ms.author: nicktrog +author: ntrogh +ms.date: 05/30/2022 +ms.topic: how-to +--- + + +# Create and manage tests in Azure Load Testing Preview + +Learn how to create and manage [tests](./concept-load-testing-concepts.md#test) in your Azure Load Testing Preview resource. + +> [!IMPORTANT] +> Azure Load Testing is currently in preview. For legal terms that apply to Azure features that are in beta, in preview, or otherwise not yet released into general availability, see the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +## Prerequisites + +* An Azure account with an active subscription. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. +* An Azure Load Testing resource. To create a Load Testing resource, see [Create and run a load test](./quickstart-create-and-run-load-test.md#create_resource). + +## Create a test + +There are two options to create a load test for Azure Load Testing resource in the Azure portal: + +- Create a quick test by using a web application URL. +- Create a test by uploading a JMeter test script (JMX). + +:::image type="content" source="media/how-to-create-manage-test/create-test-dropdown.png" alt-text="Screenshot that shows the options to create a new test in the Azure portal."::: + +### Create a quick test by using a URL + +To load test a single web endpoint, use the quick test experience in the Azure portal. Specify the application endpoint URL and basic load parameters to create and run a load test. For more information, see our [quickstart for creating and running a test by using a URL](./quickstart-create-and-run-load-test.md). + +1. In the [Azure portal](https://portal.azure.com), and go to your Azure Load Testing resource. + +1. Select **Quick test** on the **Overview** page. + + Alternately, select **Tests** in the left pane, select **+ Create**, and then select **Create a quick test**. + +1. Enter the URL and load parameters. + + :::image type="content" source="media/how-to-create-manage-test/create-quick-test.png" alt-text="Screenshot that shows the page for creating a quick test in the Azure portal."::: + +1. Select **Run test** to start the load test. + + Azure Load Testing automatically generates a JMeter test script, and configures your test to scale across multiple test engines, based on your load parameters. + + You can edit the test configuration at time after creating it. For example to [monitor server-side metrics](./how-to-monitor-server-side-metrics.md), [configure high scale load](./how-to-high-scale-load.md), or to edit the generated JMX file. + +### Create a test by using a JMeter script + +To reuse an existing JMeter test script, or for more advanced test scenarios, create a test by uploading a JMX file. For example, to [read data from a CSV input file](./how-to-read-csv-data.md), or to [configure JMeter user properties](./how-to-configure-user-properties.md). + +1. In the [Azure portal](https://portal.azure.com), and go to your Azure Load Testing resource. + +1. Select **Create** on the **Overview** page. + + Alternately, select **Tests** in the left pane, select **+ Create**, and then select **Upload a JMeter script**. + +1. On the **Basics** page, enter the basic test information. + + If you select **Run test after creation**, the test will start automatically. You can start your test manually at any time, after creating it. + + :::image type="content" source="media/how-to-create-manage-test/create-jmeter-test.png" alt-text="Screenshot that shows the page for creating a test with a J Meter script in the Azure portal."::: + +## Test plan + +The test plan contains all files that are needed for running your load test. At a minimum, the test plan should contain one `*.jmx` JMeter script. Azure Load Testing only supports one JMX file per load test. In addition, you can include a user property file, configuration files, or input data files. + +1. Go to the **Test plan**. +1. Select all files from your local machine, and upload them to Azure. + + :::image type="content" source="media/how-to-create-manage-test/test-plan-upload-files.png" alt-text="Screenshot that shows the test plan page for creating a test in the Azure portal, highlighting the upload functionality."::: + + +If you've previously created a quick test, you can edit the test plan at any time. You can add files to the test plan, or download and edit the generated JMeter script. Download a file by selecting the file name in the list. + +### Split CSV input data across test engines + +By default, Azure Load Testing copies and processes your input files unmodified across all test engine instances. Azure Load Testing enables you to split the CSV input data evenly across all engine instances. If you have multiple CSV files, each file will be split evenly. + +For example, if you have a large customer CSV input file, and the load test runs on 10 parallel test engines, then each instance will process 1/10th of the customers. + +Azure Load Testing doesn't preserve the header row in your CSV file when splitting a CSV file. For more information about how to configure your JMeter script and CSV file, see [Read data from a CSV file](./how-to-read-csv-data.md). + +:::image type="content" source="media/how-to-create-manage-test/configure-test-split-csv.png" alt-text="Screenshot that shows the checkbox to enable splitting input C S V files when configuring a test in the Azure portal."::: + +## Parameters + +You can use parameters to make your test plan configurable. Specify key-value pairs in the load test configuration, and then reference their value in the JMeter script by using the parameter name. + +There are two types of parameters: + +- Environment variables. For example, to specify the domain name of the web application. +- Secrets, backed by Azure Key Vault. For example, to pass an authentication token in an HTTP request. + +You can specify the managed identity to use for accessing your key vault. + +For more information, see [Parameterize a load test with environment variables and secrets](./how-to-parameterize-load-tests.md). + +:::image type="content" source="media/how-to-create-manage-test/configure-parameters.png" alt-text="Screenshot that shows how to configure parameters when creating a test in the Azure portal."::: + +## Load + +Configure the number of test engine instances, and Azure Load Testing automatically scales your load test across all instances. You configure the number of virtual users, or threads, in the JMeter script and the engine instances then run the script in parallel. For more information, see [Configure a test for high-scale load](./how-to-high-scale-load.md). + +:::image type="content" source="media/how-to-create-manage-test/configure-test-engine-instances.png" alt-text="Screenshot that shows how to configure the number of test engine instances when creating a test in the Azure portal."::: + +## Test criteria + +You can specify test failure criteria based on client metrics. When a load test surpasses the threshold for a metric, the load test has a **Failed** status. For more information, see [Configure test failure criteria](./how-to-define-test-criteria.md). + +You can use the following client metrics: + +- Average **Response time**. +- **Error** percentage. + +:::image type="content" source="media/how-to-create-manage-test/configure-test-criteria.png" alt-text="Screenshot that shows how to configure test criteria when creating a test in the Azure portal."::: + +## Monitoring + +For Azure-hosted applications, Azure Load Testing can capture detailed resource metrics for the Azure app components. These metrics enable you to [analyze application performance bottlenecks](./tutorial-identify-bottlenecks-azure-portal.md). + +When you edit a load test, you can select the Azure app component that you want to monitor. Azure Load Testing selects the most relevant resource metrics. You can add or remove resource metrics for each of the app components at any time. + +:::image type="content" source="media/how-to-create-manage-test/configure-monitoring.png" alt-text="Screenshot that shows how to configure the Azure app components to monitor when creating a test in the Azure portal."::: + +When the load test finishes, the test result dashboard shows a graph for each of the Azure app components and resource metrics. + +:::image type="content" source="media/how-to-create-manage-test/test-result-dashboard.png" alt-text="Screenshot that shows the test result dashboard in the Azure portal."::: + +For more information, see [Configure server-side monitoring](./how-to-monitor-server-side-metrics.md). + +## Manage + +If you already have a load test, you can start a new run, delete the load test, edit the test configuration, or compare test runs. + +1. In the [Azure portal](https://portal.azure.com), go to your Azure Load Testing resource. +1. On the left pane, select **Tests** to view the list of load tests, and then select your test. + +:::image type="content" source="media/how-to-create-manage-test/manage-load-test.png" alt-text="Screenshot that shows the tests page in the Azure portal, highlighting the action bar."::: + +You can perform the following actions: + +- Refresh the list of test runs. +- Start a new test run. The run uses the current test configuration settings. +- Delete the load test. All test runs for the load test are also deleted. +- Configure the test configuration: + - Configure the test plan. You can add or remove any of the files for the load test. If you want to update a file, first remove it and then add the updated version. + - Add or remove Azure app components. + - Configure resource metrics for the app components. Azure Load Testing automatically selects the relevant resource metrics for each app component. Add or remove metrics for any of the app components in the load test. +- [Compare test runs](./how-to-compare-multiple-test-runs.md). Select two or more test runs in the list to visually compare them in the results dashboard. + +## Next steps + +- [Identify performance bottlenecks with Azure Load Testing in the Azure portal](./quickstart-create-and-run-load-test.md) +- [Set up automated load testing with CI/CD in Azure Pipelines](./tutorial-cicd-azure-pipelines.md) +- [Set up automated load testing with CI/CD in GitHub Actions](./tutorial-cicd-github-actions.md) diff --git a/articles/load-testing/how-to-export-test-results.md b/articles/load-testing/how-to-export-test-results.md index c6364c536e249..946cbe7b0c840 100644 --- a/articles/load-testing/how-to-export-test-results.md +++ b/articles/load-testing/how-to-export-test-results.md @@ -14,16 +14,11 @@ ms.topic: how-to In this article, you'll learn how to download the test results from Azure Load Testing Preview in the Azure portal. You might use these results for reporting in third-party tools. -The test results contain a comma-separated values (CSV) file with details of each application request. See [Apache JMeter CSV log format](https://jmeter.apache.org/usermanual/listeners.html#csvlogformat) and the [Apache JMeter Glossary](https://jmeter.apache.org/usermanual/glossary.html) for details about the different fields. +The test results contain comma-separated values (CSV) file(s) with details of each application request. See [Apache JMeter CSV log format](https://jmeter.apache.org/usermanual/listeners.html#csvlogformat) and the [Apache JMeter Glossary](https://jmeter.apache.org/usermanual/glossary.html) for details about the different fields. You can also use the test results to diagnose errors during a load test. The `responseCode` and `responseMessage` fields give you more information about failed requests. For more information about investigating errors, see [Troubleshoot test execution errors](./how-to-find-download-logs.md). -In addition, all files for running the Apache JMeter dashboard locally are included. - -> [!NOTE] -> The Apache JMeter dashboard generation is temporarily disabled. You can download the CSV files with the test results. - -:::image type="content" source="media/how-to-export-test-results/apache-jmeter-dashboard.png" alt-text="Screenshot that shows the downloaded test results on the Apache JMeter dashboard."::: +You can generate the Apache JMeter dashboard from the CSV log file following the steps mentioned [here](https://jmeter.apache.org/usermanual/generating-dashboard.html#report). > [!IMPORTANT] > Azure Load Testing is currently in preview. For legal terms that apply to Azure features that are in beta, in preview, or otherwise not yet released into general availability, see the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). @@ -63,7 +58,7 @@ In this section, you'll retrieve and download the Azure Load Testing results fil :::image type="content" source="media/how-to-export-test-results/test-results-zip.png" alt-text="Screenshot that shows the test results zip file in the downloads list."::: - The *testreport.csv* file contains details of each request that the test engine executed during the load test. The Apache JMeter dashboard, which is also included in the zip file, uses this file for its graphs. + The folder contains a separate CSV file for every test engine and contains details of requests that the test engine executed during the load test. ## Next steps diff --git a/articles/load-testing/how-to-read-csv-data.md b/articles/load-testing/how-to-read-csv-data.md index bc3ec06c069ca..ccd0c00882aac 100644 --- a/articles/load-testing/how-to-read-csv-data.md +++ b/articles/load-testing/how-to-read-csv-data.md @@ -1,23 +1,29 @@ --- title: Read CSV data in an Apache JMeter load test titleSuffix: Azure Load Testing -description: Learn how to read external data from a CSV file in Apache JMeter and Azure Load Testing. +description: Learn how to read external data from a CSV file in Apache JMeter with Azure Load Testing. services: load-testing ms.service: load-testing ms.author: nicktrog author: ntrogh -ms.date: 12/15/2021 +ms.date: 05/23/2022 ms.topic: how-to ms.custom: template-how-to +zone_pivot_groups: load-testing-config --- -# Read data from a CSV file in JMeter and Azure Load Testing Preview +# Read data from a CSV file in JMeter with Azure Load Testing Preview -In this article, you'll learn how to read data from a comma-separated value (CSV) file in JMeter and Azure Load Testing Preview. +In this article, you'll learn how to read data from a comma-separated value (CSV) file in JMeter with Azure Load Testing Preview. You can use the JMeter [CSV Data Set Config element](https://jmeter.apache.org/usermanual/component_reference.html#CSV_Data_Set_Config) in your test script. -You can make an Apache JMeter test script configurable by reading settings from an external CSV file. To do this, you can use the [CSV Data Set Config element](https://jmeter.apache.org/usermanual/component_reference.html#CSV_Data_Set_Config) in JMeter. For example, to test a search API, you might retrieve the various query parameters from an external file. +Use data from an external CSV file to make your JMeter test script configurable. For example, you might invoke an API for each entry in a customers CSV file. -When you configure your Azure load test, you can upload any additional files that the JMeter script requires. For example, CSV files that contain configuration settings or binary files to send in the body of an HTTP request. You then update the JMeter script to reference the external files. +In this article, you learn how to: + +> [!div class="checklist"] +> * Configure your JMeter script to read the CSV file. +> * Add the CSV file to your load test. +> * Optionally, split the CSV file evenly across all test engine instances. > [!IMPORTANT] > Azure Load Testing is currently in preview. For legal terms that apply to Azure features that are in beta, in preview, or otherwise not yet released into general availability, see the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). @@ -31,27 +37,35 @@ When you configure your Azure load test, you can upload any additional files tha ## Configure your JMeter script -In this section, you'll configure your Apache JMeter test script to reference an external file. You'll use a CSV Data Set Config element to read data from a CSV file. +In this section, you'll configure your Apache JMeter script to reference the external CSV file. You'll use a [CSV Data Set Config element](https://jmeter.apache.org/usermanual/component_reference.html#CSV_Data_Set_Config) to read data from a CSV file. -Azure Load Testing uploads the JMX file and all related files in a single folder. Verify that you refer to the external files in the JMX script by using only the file name. +Azure Load Testing uploads the JMX file and all related files in a single folder. When you reference an external file in your JMeter script, verify that your only use the file name and remove any file path references. To edit your JMeter script by using the Apache JMeter GUI: - 1. Select the CSV Data Set Config element in your test plan. + 1. Select the **CSV Data Set Config** element in your test plan. 1. Update the **Filename** information and remove any file path reference. + + 1. Optionally, enter the CSV field names in **Variable Names**, when you split the CSV file across test engines. - :::image type="content" source="media/how-to-read-csv-data/update-csv-data-set-config.png" alt-text="Screenshot that shows the test runs to compare."::: + Azure Load Testing doesn't preserve the header row when splitting your CSV file. Provide the variable names in the **CSV Data Set Config** element instead of using a header row. + + :::image type="content" source="media/how-to-read-csv-data/update-csv-data-set-config.png" alt-text="Screenshot that shows the JMeter UI to configure a C S V Data Set Config element."::: - 1. Repeat the previous steps for every CSV Data Set Config element. + 1. Repeat the previous steps for every **CSV Data Set Config** element in the script. - 1. Save the JMeter script. + 1. Save the JMeter script and add it to your [test plan](./how-to-create-manage-test.md#test-plan). To edit your JMeter script by using Visual Studio Code or your editor of preference: 1. Open the JMX file in Visual Studio Code. - 1. For each `CSVDataSet`, update the `filename` element and remove any file path reference. + 1. For each `CSVDataSet`: + + 1. Update the `filename` element and remove any file path reference. + + 1. Add the CSV field names as a comma-separated list in `variableNames`. ```xml @@ -67,18 +81,16 @@ To edit your JMeter script by using Visual Studio Code or your editor of prefere ``` - 1. Save the JMeter script. - -## Add a CSV file to your load test + 1. Save the JMeter script and add it to your [test plan](./how-to-create-manage-test.md#test-plan). -In this section, you'll configure your Azure load test to include a CSV file. You can then use this CSV file in the JMeter test script. If you reference other external files in your script, you can add them in the same way. +## Add a CSV file to your load test -You can add a CSV file to your load test in two ways: +When you reference an external file in your JMeter script, upload this file to your load test. When the load starts, Azure Load Testing copies all files to a single folder on each of the test engines instances. -* Configure the load test by using the Azure portal -* If you have a CI/CD workflow, update the test configuration YAML file +> [!IMPORTANT] +> Azure Load Testing doesn't preserve the header row when splitting your CSV file. Before you add the CSV file to the load test, remove the header row from the file. -### Add a CSV file by using the Azure portal +::: zone pivot="experience-azp" To add a CSV file to your load test by using the Azure portal: @@ -101,11 +113,15 @@ To add a CSV file to your load test by using the Azure portal: 1. Select **Apply** to modify the test and to use the new configuration when you rerun it. -### Add a CSV file to the test configuration YAML file +::: zone-end + +::: zone pivot="experience-pipelines,experience-ghactions" If you run a load test within your CI/CD workflow, you can add a CSV file to the test configuration YAML file. For more information about running a load test in a CI/CD workflow, see the [Automated regression testing tutorial](./tutorial-cicd-azure-pipelines.md). -To add a CSV file in the test configuration YAML file: +To add a CSV file to your load test: + + 1. Commit the CSV file to the source control repository that contains the JMX file and YAML test configuration file. 1. Open your YAML test configuration file in Visual Studio Code or your editor of choice. @@ -126,6 +142,54 @@ To add a CSV file in the test configuration YAML file: The next time the CI/CD workflow runs, it will use the updated configuration. +::: zone-end + +## Split CSV input data across test engines + +By default, Azure Load Testing copies and processes your input files unmodified across all test engine instances. Azure Load Testing enables you to split the CSV input data evenly across all engine instances. If you have multiple CSV files, each file will be split evenly. + +For example, if you have a large customer CSV input file, and the load test runs on 10 parallel test engines, then each instance will process 1/10th of the customers. + +> [!IMPORTANT] +> Azure Load Testing doesn't preserve the header row when splitting your CSV file. +> 1. [Configure your JMeter script](#configure-your-jmeter-script) to use variable names when reading the CSV file. +> 1. Remove the header row from the CSV file before you add it to the load test. + +To configure your load test to split input CSV files: + +::: zone pivot="experience-azp" + +1. Go to the **Test plan** page for your load test. +1. Select **Split CSV evenly between Test engines**. + + :::image type="content" source="media/how-to-read-csv-data/configure-test-split-csv.png" alt-text="Screenshot that shows the checkbox to enable splitting input C S V files when configuring a test in the Azure portal."::: + +1. Select **Apply** to confirm the configuration changes. + + The next time you run the test, Azure Load Testing splits and processes the CSV file evenly across the test engines. +::: zone-end + +::: zone pivot="experience-pipelines,experience-ghactions" + +1. Open your YAML test configuration file in Visual Studio Code or your editor of choice. + +1. Add the `splitAllCSVs` setting and set its value to **True**. + + ```yaml + testName: MyTest + testPlan: SampleApp.jmx + description: Run a load test for my sample web app + engineInstances: 1 + configurationFiles: + - customers.csv + splitAllCSVs: True + ``` + +1. Save the YAML configuration file and commit it to your source control repository. + + The next time you run the test, Azure Load Testing splits and processes the CSV file evenly across the test engines. +::: zone-end + ## Next steps - For information about high-scale load tests, see [Set up a high-scale load test](./how-to-high-scale-load.md). diff --git a/articles/load-testing/index.yml b/articles/load-testing/index.yml index 2ba3d41f871a9..ec771846dddab 100644 --- a/articles/load-testing/index.yml +++ b/articles/load-testing/index.yml @@ -1,99 +1,112 @@ -### YamlMime:Hub +### YamlMime:Landing title: Azure Load Testing documentation -summary: Learn how to generate high-scale loads, identify app performance bottlenecks, and automate regression testing with Azure Load Testing service. Tutorials, code examples, GitHub actions, Azure Pipelines, and more. +summary: Learn how to generate high-scale loads, identify app performance bottlenecks, and automate regression testing with Azure Load Testing service. Tutorials, code examples, GitHub Actions, Azure Pipelines, and more. metadata: title: Azure Load Testing documentation - description: Learn how to generate high-scale loads, identify app performance bottlenecks, and automate regression testing with Azure Load Testing service. Tutorials, code examples, GitHub actions, Azure Pipelines, and more. - + description: Learn how to generate high-scale loads, identify app performance bottlenecks, and automate regression testing with Azure Load Testing service. Tutorials, code examples, GitHub Actions, Azure Pipelines, and more. services: load-testing ms.service: load-testing - ms.topic: hub-page + ms.topic: landing-page ms.collection: collection - ms.author: jmartens - author: j-martens - ms.date: 11/30/2021 + ms.author: nicktrog + author: ntrogh + ms.date: 05/05/2022 -# highlightedContent section -highlightedContent: -# itemType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new - items: - # Card - - title: What is Azure Load Testing? - itemType: overview - url: overview-what-is-azure-load-testing.md - # Card - - title: 'Create & run a load test' - itemType: quickstart - url: quickstart-create-and-run-load-test.md - # Card - - title: 'Identify performance bottlenecks' - itemType: tutorial - url: tutorial-identify-bottlenecks-azure-portal.md - # Card - - title: 'Set up continuous regression testing' - itemType: tutorial - url: tutorial-cicd-azure-pipelines.md +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new -conceptualContent: -# Supports up to 3 sections -# itemType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new - items: - # Card - - title: Configure load tests - links: - - url: how-to-high-scale-load.md - itemType: how-to-guide - text: Configure for high scale - - url: how-to-parameterize-load-tests.md - itemType: how-to-guide - text: Parameterize load tests - - url: how-to-read-csv-data.md - itemType: how-to-guide - text: Read a CSV file in load tests - - url: how-to-monitor-server-side-metrics.md - itemType: how-to-guide - text: Configure server-side monitoring - - url: how-to-define-test-criteria.md - itemType: how-to-guide - text: 'Define pass/fail criteria' - - url: how-to-use-a-managed-identity.md - itemType: how-to-guide - text: Use a managed identity - - url: how-to-assign-roles.md - itemType: how-to-guide - text: Manage users and roles - - # Card - - title: Analyze test results - links: - - url: how-to-compare-multiple-test-runs.md - itemType: how-to-guide - text: Analyze performance by comparing runs - - url: how-to-find-download-logs.md - itemType: how-to-guide - text: Troubleshoot test execution errors - - url: how-to-appservice-insights.md - itemType: how-to-guide - text: Get insights from App Service Diagnostics - - url: how-to-export-test-results.md - itemType: how-to-guide - text: Export results +landingContent: + # Card + - title: About Azure Load Testing + linkLists: + - linkListType: overview + links: + - text: What is Azure Load Testing? + url: overview-what-is-azure-load-testing.md + - text: Key concepts + url: concept-load-testing-concepts.md - # Card - - title: Automate load tests - links: - - url: tutorial-cicd-azure-pipelines.md - itemType: tutorial - text: CI/CD workflows in Azure Pipelines - - url: tutorial-cicd-github-actions.md - itemType: tutorial - text: CI/CD workflows in GitHub Actions + # Card + - title: Get started + linkLists: + - linkListType: quickstart + links: + - text: 'Create & run a load test' + url: quickstart-create-and-run-load-test.md + - linkListType: tutorial + links: + - text: Identify performance bottlenecks + url: tutorial-identify-bottlenecks-azure-portal.md + - text: Set up automated regression testing + url: tutorial-cicd-azure-pipelines.md - # Card - - title: Reference - links: - - url: reference-test-config-yaml.md - itemType: reference - text: Test configuration YAML + # Card + - title: Configure load tests + linkLists: + - linkListType: how-to-guide + links: + - text: Configure for high scale + url: how-to-high-scale-load.md + - text: Parameterize load tests + url: how-to-parameterize-load-tests.md + - text: Configure server-side monitoring + url: how-to-monitor-server-side-metrics.md + - text: 'Define pass/fail criteria' + url: how-to-define-test-criteria.md + - text: Use JMeter user properties + url: how-to-configure-user-properties.md + - text: Read data from a CSV file + url: how-to-read-csv-data.md + + # Card + - title: Analyze test results + linkLists: + - linkListType: how-to-guide + links: + - text: Identify performance regressions by comparing test runs + url: how-to-compare-multiple-test-runs.md + - text: Get insights from App Service Diagnostics + url: how-to-appservice-insights.md + - text: Export test results for custom reporting + url: how-to-export-test-results.md + + # Card + - title: Automate load tests + linkLists: + - linkListType: tutorial + links: + - text: Automate load testing with Azure Pipelines + url: tutorial-cicd-azure-pipelines.md + - text: Automate load testing with GitHub Actions + url: tutorial-cicd-github-actions.md + + # Card + - title: Security + linkLists: + - linkListType: how-to-guide + links: + - text: Manage users and roles + url: how-to-assign-roles.md + - text: Use a managed identity + url: how-to-use-a-managed-identity.md + + # Card + - title: Manage resources + linkLists: + - linkListType: how-to-guide + links: + - text: Move between regions + url: how-to-move-between-regions.md + - text: Monitor Azure Load Testing + url: monitor-load-testing.md + + # Card + - title: Reference docs + linkLists: + - linkListType: reference + links: + - text: REST API + url: /rest/api/loadtesting/ + - text: Test configuration YAML + url: reference-test-config-yaml.md diff --git a/articles/load-testing/media/how-to-create-manage-test/configure-monitoring.png b/articles/load-testing/media/how-to-create-manage-test/configure-monitoring.png new file mode 100644 index 0000000000000..b7d6873fdd641 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/configure-monitoring.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/configure-parameters.png b/articles/load-testing/media/how-to-create-manage-test/configure-parameters.png new file mode 100644 index 0000000000000..8ac517dfcef1e Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/configure-parameters.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/configure-test-criteria.png b/articles/load-testing/media/how-to-create-manage-test/configure-test-criteria.png new file mode 100644 index 0000000000000..d3a2d1c6093d7 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/configure-test-criteria.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/configure-test-engine-instances.png b/articles/load-testing/media/how-to-create-manage-test/configure-test-engine-instances.png new file mode 100644 index 0000000000000..bed4b3c4d4085 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/configure-test-engine-instances.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/configure-test-split-csv.png b/articles/load-testing/media/how-to-create-manage-test/configure-test-split-csv.png new file mode 100644 index 0000000000000..164e8ac7cedcb Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/configure-test-split-csv.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/create-jmeter-test.png b/articles/load-testing/media/how-to-create-manage-test/create-jmeter-test.png new file mode 100644 index 0000000000000..e6fddbbc27983 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/create-jmeter-test.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/create-quick-test.png b/articles/load-testing/media/how-to-create-manage-test/create-quick-test.png new file mode 100644 index 0000000000000..f3514e60dfa67 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/create-quick-test.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/create-test-dropdown.png b/articles/load-testing/media/how-to-create-manage-test/create-test-dropdown.png new file mode 100644 index 0000000000000..45126ac7ca123 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/create-test-dropdown.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/manage-load-test.png b/articles/load-testing/media/how-to-create-manage-test/manage-load-test.png new file mode 100644 index 0000000000000..dde60e4d42cc3 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/manage-load-test.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/test-plan-upload-files.png b/articles/load-testing/media/how-to-create-manage-test/test-plan-upload-files.png new file mode 100644 index 0000000000000..165cdc9026a04 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/test-plan-upload-files.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/test-plan-upload-zip.png b/articles/load-testing/media/how-to-create-manage-test/test-plan-upload-zip.png new file mode 100644 index 0000000000000..2ec6c44f6582e Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/test-plan-upload-zip.png differ diff --git a/articles/load-testing/media/how-to-create-manage-test/test-result-dashboard.png b/articles/load-testing/media/how-to-create-manage-test/test-result-dashboard.png new file mode 100644 index 0000000000000..bc80c18a40409 Binary files /dev/null and b/articles/load-testing/media/how-to-create-manage-test/test-result-dashboard.png differ diff --git a/articles/load-testing/media/how-to-read-csv-data/configure-test-split-csv.png b/articles/load-testing/media/how-to-read-csv-data/configure-test-split-csv.png new file mode 100644 index 0000000000000..164e8ac7cedcb Binary files /dev/null and b/articles/load-testing/media/how-to-read-csv-data/configure-test-split-csv.png differ diff --git a/articles/load-testing/media/how-to-read-csv-data/update-csv-data-set-config.png b/articles/load-testing/media/how-to-read-csv-data/update-csv-data-set-config.png index 0f99bf50cd859..8c9c4293cc3f7 100644 Binary files a/articles/load-testing/media/how-to-read-csv-data/update-csv-data-set-config.png and b/articles/load-testing/media/how-to-read-csv-data/update-csv-data-set-config.png differ diff --git a/articles/load-testing/media/tutorial-cicd-github-actions/commit-workflow.png b/articles/load-testing/media/tutorial-cicd-github-actions/commit-workflow.png deleted file mode 100644 index 7ebc4ad74081f..0000000000000 Binary files a/articles/load-testing/media/tutorial-cicd-github-actions/commit-workflow.png and /dev/null differ diff --git a/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png b/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png index 55728ec0f7f85..16c4280f701ee 100644 Binary files a/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png and b/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png differ diff --git a/articles/load-testing/monitor-load-testing-reference.md b/articles/load-testing/monitor-load-testing-reference.md index 9041a3f5145b6..49cfeef8eaa54 100644 --- a/articles/load-testing/monitor-load-testing-reference.md +++ b/articles/load-testing/monitor-load-testing-reference.md @@ -46,4 +46,4 @@ Operational log entries include elements listed in the following table: - See [Monitor Azure Load Testing](monitor-load-testing.md) for a description of monitoring Azure Load Testing. -- See [Monitor Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitor Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/load-testing/monitor-load-testing.md b/articles/load-testing/monitor-load-testing.md index 9bfe8fa12c685..2360c8e217125 100644 --- a/articles/load-testing/monitor-load-testing.md +++ b/articles/load-testing/monitor-load-testing.md @@ -37,7 +37,7 @@ The following sections build on this article by describing the specific data gat ## Monitoring data -Azure Load Testing collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](/azure/azure-monitor/essentials/monitor-azure-resource#monitoring-data-from-Azure-resources). +Azure Load Testing collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](../azure-monitor/essentials/monitor-azure-resource.md#monitoring-data-from-azure-resources). See [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md) for detailed information on logs metrics created by Azure Load Testing. @@ -60,9 +60,9 @@ The following sections describe which types of logs you can collect. Data in Azure Monitor Logs is stored in tables where each table has its own set of unique properties. -All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](/azure/azure-monitor/essentials/resource-logs-schema). You can find the schema for Azure Load Testing resource logs in the [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md#resource-logs). +All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](../azure-monitor/essentials/resource-logs-schema.md). You can find the schema for Azure Load Testing resource logs in the [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md#resource-logs). -The [Activity log](/azure/azure-monitor/essentials/activity-log) is a type of platform log in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. +The [Activity log](../azure-monitor/essentials/activity-log.md) is a type of platform log in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. For a list of resource logs types collected for Azure Load Testing, see [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md#resource-logs). @@ -71,7 +71,7 @@ For a list of resource logs types collected for Azure Load Testing, see [Monitor > [!IMPORTANT] -> When you select **Logs** from the Azure Load Testing menu, Log Analytics is opened with the query scope set to the current [service name]. This means that log queries will only include data from that resource. If you want to run a query that includes data from other [service resource] or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](/azure/azure-monitor/logs/scope) for details. +> When you select **Logs** from the Azure Load Testing menu, Log Analytics is opened with the query scope set to the current [service name]. This means that log queries will only include data from that resource. If you want to run a query that includes data from other [service resource] or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](../azure-monitor/logs/scope.md) for details. Following are queries that you can use to help you monitor your Azure Load Testing resources: @@ -98,4 +98,4 @@ AzureLoadTestingOperation - See [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md) for a reference of the metrics, logs, and other important values created by Azure Load Testing. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. diff --git a/articles/load-testing/quickstart-create-and-run-load-test.md b/articles/load-testing/quickstart-create-and-run-load-test.md index 4d8d07db7f99d..8fe27326574bd 100644 --- a/articles/load-testing/quickstart-create-and-run-load-test.md +++ b/articles/load-testing/quickstart-create-and-run-load-test.md @@ -25,7 +25,7 @@ Learn more about the [key concepts for Azure Load Testing](./concept-load-testin ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- Azure RBAC role with permission to create and manage resources in the subscription, such as [Contributor](/azure/role-based-access-control/built-in-roles#contributor) or [Owner](/azure/role-based-access-control/built-in-roles#owner) +- Azure RBAC role with permission to create and manage resources in the subscription, such as [Contributor](../role-based-access-control/built-in-roles.md#contributor) or [Owner](../role-based-access-control/built-in-roles.md#owner) ## Create an Azure Load Testing resource @@ -49,7 +49,7 @@ Azure Load Testing enables you to quickly create a load test from the Azure port 1. On the **Quickstart test** page, enter the **Test URL**. - Enter the complete URL that you would like to run the test for. For example, https://www.example.com/login. + Enter the complete URL that you would like to run the test for. For example, `https://www.example.com/login`. 1. (Optional) Update the **Number of virtual users** to the total number of virtual users. diff --git a/articles/load-testing/reference-test-config-yaml.md b/articles/load-testing/reference-test-config-yaml.md index bfbf3041fc34f..54856043525a2 100644 --- a/articles/load-testing/reference-test-config-yaml.md +++ b/articles/load-testing/reference-test-config-yaml.md @@ -22,24 +22,25 @@ Learn how to configure your load test in Azure Load Testing Preview by using [YA A test configuration uses the following keys: -| Key | Type | Description | -| ----- | ----- | ----- | -| `version` | string | Version of the YAML configuration file that the service uses. Currently, the only valid value is `v0.1`. | -| `testName` | string | *Required*. Name of the test to run. The results of various test runs will be collected under this test name in the Azure portal. | -| `testPlan` | string | *Required*. Relative path to the Apache JMeter test script to run. | -| `engineInstances` | integer | *Required*. Number of parallel instances of the test engine to execute the provided test plan. You can update this property to increase the amount of load that the service can generate. | -| `configurationFiles` | array | List of relevant configuration files or other files that you reference in the Apache JMeter script. For example, a CSV data set file, images, or any other data file. These files will be uploaded to the Azure Load Testing resource alongside the test script. If the files are in a subfolder on your local machine, use file paths that are relative to the location of the test script.

                  Azure Load Testing currently doesn't support the use of file paths in the JMX file. When you reference an external file in the test script, make sure to only specify the file name. | -| `description` | string | Short description of the test run. | -| `failureCriteria` | object | Criteria that indicate failure of the test. Each criterion is in the form of:
                  `[Aggregate_function] ([client_metric]) > [value]`

                  - `[Aggregate function] ([client_metric])` is either `avg(response_time_ms)` or `percentage(error).`
                  - `value` is an integer number. | -| `properties` | object | List of properties to configure the load test. | -| `properties.userPropertyFile` | string | File to use as an Apache JMeter [user properties file](https://jmeter.apache.org/usermanual/test_plan.html#properties). The file will be uploaded to the Azure Load Testing resource alongside the JMeter test script and other configuration files. If the file is in a subfolder on your local machine, use a path relative to the location of the test script. | -| `secrets` | object | List of secrets that the Apache JMeter script references. | -| `secrets.name` | string | Name of the secret. This name should match the secret name that you use in the Apache JMeter script. | -| `secrets.value` | string | URI for the Azure Key Vault secret. | -| `env` | object | List of environment variables that the Apache JMeter script references. | -| `env.name` | string | Name of the environment variable. This name should match the secret name that you use in the Apache JMeter script. | -| `env.value` | string | Value of the environment variable. | -| `keyVaultReferenceIdentity` | string | Resource ID of the user-assigned managed identity for accessing the secrets from your Azure Key Vault. If you use a system-managed identity, this information isn't needed. Make sure to grant this user-assigned identity access to your Azure key vault. | +| Key | Type | Default value | Description | +| ----- | ----- | ----- | ---- | +| `version` | string | | Version of the YAML configuration file that the service uses. Currently, the only valid value is `v0.1`. | +| `testName` | string | | *Required*. Name of the test to run. The results of various test runs will be collected under this test name in the Azure portal. | +| `testPlan` | string | | *Required*. Relative path to the Apache JMeter test script to run. | +| `engineInstances` | integer | | *Required*. Number of parallel instances of the test engine to execute the provided test plan. You can update this property to increase the amount of load that the service can generate. | +| `configurationFiles` | array | | List of relevant configuration files or other files that you reference in the Apache JMeter script. For example, a CSV data set file, images, or any other data file. These files will be uploaded to the Azure Load Testing resource alongside the test script. If the files are in a subfolder on your local machine, use file paths that are relative to the location of the test script.

                  Azure Load Testing currently doesn't support the use of file paths in the JMX file. When you reference an external file in the test script, make sure to only specify the file name. | +| `description` | string | | Short description of the test run. | +| `failureCriteria` | object | | Criteria that indicate failure of the test. Each criterion is in the form of:
                  `[Aggregate_function] ([client_metric]) > [value]`

                  - `[Aggregate function] ([client_metric])` is either `avg(response_time_ms)` or `percentage(error).`
                  - `value` is an integer number. | +| `properties` | object | | List of properties to configure the load test. | +| `properties.userPropertyFile` | string | | File to use as an Apache JMeter [user properties file](https://jmeter.apache.org/usermanual/test_plan.html#properties). The file will be uploaded to the Azure Load Testing resource alongside the JMeter test script and other configuration files. If the file is in a subfolder on your local machine, use a path relative to the location of the test script. | +| `splitAllCSVs` | boolean | False | Split the input CSV files evenly across all test engine instances. For more information, see [Read a CSV file in load tests](./how-to-read-csv-data.md#split-csv-input-data-across-test-engines). | +| `secrets` | object | | List of secrets that the Apache JMeter script references. | +| `secrets.name` | string | | Name of the secret. This name should match the secret name that you use in the Apache JMeter script. | +| `secrets.value` | string | | URI for the Azure Key Vault secret. | +| `env` | object | | List of environment variables that the Apache JMeter script references. | +| `env.name` | string | | Name of the environment variable. This name should match the secret name that you use in the Apache JMeter script. | +| `env.value` | string | | Value of the environment variable. | +| `keyVaultReferenceIdentity` | string | | Resource ID of the user-assigned managed identity for accessing the secrets from your Azure Key Vault. If you use a system-managed identity, this information isn't needed. Make sure to grant this user-assigned identity access to your Azure key vault. | The following YAML snippet contains an example load test configuration: @@ -56,6 +57,7 @@ configurationFiles: failureCriteria: - avg(response_time_ms) > 300 - percentage(error) > 50 +splitAllCSVs: True env: - name: my-variable value: my-value diff --git a/articles/load-testing/toc.yml b/articles/load-testing/toc.yml index 89ee1cfe0d771..513482591770f 100644 --- a/articles/load-testing/toc.yml +++ b/articles/load-testing/toc.yml @@ -19,10 +19,10 @@ - name: Continuous regression testing (CI/CD) expanded: true items: - - name: Automate with GitHub Actions - href: tutorial-cicd-github-actions.md - - name: Automate with Azure Pipelines - href: tutorial-cicd-azure-pipelines.md + - name: Automate with GitHub Actions + href: tutorial-cicd-github-actions.md + - name: Automate with Azure Pipelines + href: tutorial-cicd-azure-pipelines.md - name: Concepts expanded: true items: @@ -31,6 +31,8 @@ - name: How-to guides expanded: true items: + - name: Create & manage tests + href: how-to-create-manage-test.md - name: Create a load test with a JMeter script href: how-to-create-and-run-load-test-with-JMeter-script.md - name: Compare test runs @@ -39,7 +41,7 @@ href: how-to-monitor-server-side-metrics.md - name: Configure for high scale loads href: how-to-high-scale-load.md - - name: Troubleshoot test execution errors + - name: Troubleshoot with load test logs href: how-to-find-download-logs.md - name: Define test criteria href: how-to-define-test-criteria.md @@ -47,7 +49,7 @@ href: how-to-parameterize-load-tests.md - name: Use JMeter user properties href: how-to-configure-user-properties.md - - name: Read a CSV file in load tests + - name: Read data from a CSV file href: how-to-read-csv-data.md - name: Get insights from App Service Diagnostics href: how-to-appservice-insights.md @@ -65,10 +67,13 @@ href: monitor-load-testing.md - name: Reference items: - - name: Test configuration YAML - href: reference-test-config-yaml.md - - name: Monitor data reference - href: monitor-load-testing-reference.md + - name: REST API + href: /rest/api/loadtesting/ + - name: Test configuration YAML + href: reference-test-config-yaml.md + - name: Monitor data reference + href: monitor-load-testing-reference.md + - name: Resources items: - name: Azure roadmap diff --git a/articles/load-testing/tutorial-cicd-github-actions.md b/articles/load-testing/tutorial-cicd-github-actions.md index 60e2dc53364d6..6f93602183d99 100644 --- a/articles/load-testing/tutorial-cicd-github-actions.md +++ b/articles/load-testing/tutorial-cicd-github-actions.md @@ -1,25 +1,27 @@ --- -title: 'Tutorial: Identify performance regressions with Azure Load Testing and GitHub Actions' +title: 'Tutorial: Automate regression testing with GitHub Actions' titleSuffix: Azure Load Testing description: 'In this tutorial, you learn how to automate performance regression testing by using Azure Load Testing and GitHub Actions CI/CD workflows.' services: load-testing ms.service: load-testing ms.author: ninallam author: ninallam -ms.date: 03/28/2022 +ms.date: 05/30/2022 ms.topic: tutorial #Customer intent: As an Azure user, I want to learn how to automatically test builds for performance regressions on every pull request and/or deployment by using GitHub Actions. --- # Tutorial: Identify performance regressions with Azure Load Testing Preview and GitHub Actions -This tutorial describes how to automate performance regression testing by using Azure Load Testing Preview and GitHub Actions. You'll set up a GitHub Actions CI/CD workflow to deploy a sample Node.js application on Azure and trigger a load test using the [Azure Load Testing action](https://github.com/marketplace/actions/azure-load-testing). Once the load test finishes, you'll use the Azure Load Testing dashboard to identify performance issues. +This tutorial describes how to automate performance regression testing with Azure Load Testing Preview and GitHub Actions. -You'll deploy a sample Node.js web app on Azure App Service. The web app uses Azure Cosmos DB for storing the data. The sample application also contains an Apache JMeter script to load test three APIs. +You'll set up a GitHub Actions CI/CD workflow to deploy a sample Node.js application on Azure and trigger a load test using the [Azure Load Testing action](https://github.com/marketplace/actions/azure-load-testing). -If you're using Azure Pipelines for your CI/CD workflows, see the corresponding [Azure Pipelines tutorial](./tutorial-cicd-azure-pipelines.md). +You'll then define test failure criteria to ensure the application meets your goals. When a criterion isn't met, the CI/CD pipeline will fail. For more information, see [Define load test failure criteria](./how-to-define-test-criteria.md). + +Finally, you'll make the load test configurable by passing parameters from the CI/CD pipeline to the JMeter script. For example, you could use a GitHub secret to pass an authentication token the script. For more information, see [Parameterize load tests with secrets and environment variables](./how-to-parameterize-load-tests.md). -Learn more about the [key concepts for Azure Load Testing](./concept-load-testing-concepts.md). +If you're using Azure Pipelines for your CI/CD workflows, see the corresponding [Azure Pipelines tutorial](./tutorial-cicd-azure-pipelines.md). You'll learn how to: @@ -41,7 +43,7 @@ You'll learn how to: ## Set up the sample application repository -To get started with this tutorial, you first need to set up a sample Node.js web application. The sample application contains a GitHub Actions workflow definition to deploy the application on Azure and trigger a load test. +To get started with this tutorial, you first need to set up a sample Node.js web application. The sample application repository contains a GitHub Actions workflow definition that deploys the Node.js application on Azure and then triggers a load test. [!INCLUDE [azure-load-testing-set-up-sample-application](../../includes/azure-load-testing-set-up-sample-application.md)] @@ -70,7 +72,7 @@ First, you'll create an Azure Active Directory [service principal](../active-dir > [!NOTE] > Azure Login supports multiple ways to authenticate with Azure. For other authentication options, see the [Azure and GitHub integration site](/azure/developer/github). - The output is the role assignment credentials that provide access to your resource. The command should output a JSON object similar to this. + The output is the role assignment credentials that provide access to your resource. The command outputs a JSON object similar to the following snippet. ```json { @@ -82,9 +84,9 @@ First, you'll create an Azure Active Directory [service principal](../active-dir } ``` -1. Copy this JSON object, which you can use to authenticate from GitHub. +1. Copy this JSON object. You'll store this value as a GitHub secret in a later step. -1. Grant permissions to the service principal to create and run tests with Azure Load Testing. The **Load Test Contributor** role grants permissions to create, manage and run tests in an Azure Load Testing resource. +1. Assign the service principal the **Load Test Contributor** role, which grants permission to create, manage and run tests in an Azure Load Testing resource. First, retrieve the ID of the service principal object by running this Azure CLI command: @@ -92,7 +94,9 @@ First, you'll create an Azure Active Directory [service principal](../active-dir az ad sp list --filter "displayname eq 'my-load-test-cicd'" -o table ``` - Next, run the following Azure CLI command to assign the *Load Test Contributor* role to the service principal. + Next, assign the **Load Test Contributor** role to the service principal. + + Replace the placeholder text `` with the `ObjectId` value from the previous Azure CLI command. Also, replace `` with your Azure subscription ID. ```azurecli az role assignment create --assignee "" \ @@ -100,12 +104,14 @@ First, you'll create an Azure Active Directory [service principal](../active-dir --scope /subscriptions//resourceGroups/ \ --subscription "" ``` - + In the previous command, replace the placeholder text `` with the `ObjectId` value from the previous Azure CLI command. Also, replace `` with your Azure subscription ID. +You now have a service principal that the necessary permissions to create and run a load test. + ### Configure the GitHub secret -You'll add a GitHub secret **AZURE_CREDENTIALS** to your repository for the service principal you created in the previous step. The Azure Login action in the GitHub Actions workflow uses this secret to authenticate with Azure. +Next, add a GitHub secret **AZURE_CREDENTIALS** to your repository to store the service principal you created earlier. You'll pass this GitHub secret to the Azure Login action to authenticate with Azure. 1. In [GitHub](https://github.com), browse to your forked repository, select **Settings** > **Secrets** > **New repository secret**. @@ -117,7 +123,7 @@ You'll add a GitHub secret **AZURE_CREDENTIALS** to your repository for the serv ### Authenticate with Azure -You can now use the `AZURE_CREDENTIALS` secret with the Azure Login action in your CI/CD workflow. The *workflow.yml* file in the sample application already has the necessary configuration: +You can now use the `AZURE_CREDENTIALS` secret with the Azure Login action in your CI/CD workflow. The *.github/workflows/workflow.yml* file in the sample application repository already has this configuration: ```yml jobs: @@ -138,13 +144,31 @@ jobs: creds: ${{ secrets.AZURE_CREDENTIALS }} ``` -You've now authorized your GitHub Actions workflow to access your Azure Load Testing resource. You'll now configure the CI/CD workflow to run a load test by using Azure Load Testing. +You've now authorized your GitHub Actions workflow to access your Azure Load Testing resource. You'll now configure the CI/CD workflow to run a load test with Azure Load Testing. ## Configure the GitHub Actions workflow to run a load test -In this section, you'll set up a GitHub Actions workflow that triggers the load test. The sample application repository contains a workflow file *SampleApp.yaml*. The workflow first deploys the sample web application to Azure App Service, and then invokes the load test by using the [Azure Load Testing Action](https://github.com/marketplace/actions/azure-load-testing). The GitHub Actions uses an environment variable to pass the URL of the web application to the Apache JMeter script. +In this section, you'll set up a GitHub Actions workflow that triggers the load test by using the [Azure Load Testing Action](https://github.com/marketplace/actions/azure-load-testing). -The GitHub Actions workflow performs the following steps for every update to the main branch: +The following code snippet shows an example of how to trigger a load test using the `azure/load-testing` action: + +```yml +- name: 'Azure Load Testing' +uses: azure/load-testing@v1 +with: + loadTestConfigFile: 'my-jmeter-script.jmx' + loadTestResource: my-load-test-resource + resourceGroup: my-resource-group + env: | + [ + { + "name": "webapp", + "value": "my-web-app.azurewebsites.net" + } + ] +``` + +The sample application repository already contains a sample workflow file *.github/workflows/workflow.yml*. The GitHub Actions workflow performs the following steps for every update to the main branch: - Deploy the sample Node.js application to an Azure App Service web app. - Create an Azure Load Testing resource using the *ARMTemplate/template.json* Azure Resource Manager (ARM) template, if the resource doesn't exist yet. Learn more about ARM templates [here](../azure-resource-manager/templates/overview.md). @@ -169,17 +193,15 @@ Follow these steps to configure the GitHub Actions workflow for your environment LOAD_TEST_RESOURCE_GROUP: "" ``` - These variables are used to configure the GitHub actions for deploying the sample application to Azure, and to connect to your Azure Load Testing resource. + These variables are used to configure the GitHub Actions for deploying the sample application to Azure, and to connect to your Azure Load Testing resource. 1. Commit your changes directly to the main branch. - :::image type="content" source="./media/tutorial-cicd-github-actions/commit-workflow.png" alt-text="Screenshot that shows selections for committing changes to the GitHub Actions workflow file."::: - The commit will trigger the GitHub Actions workflow in your repository. You can verify that the workflow is running by going to the **Actions** tab. ## View load test results -To view the results of the load test in the GitHub Actions workflow log: +When the load test finishes, view the results in the GitHub Actions workflow log: 1. Select the **Actions** tab in your GitHub repository to view the list of workflow runs. @@ -194,12 +216,14 @@ To view the results of the load test in the GitHub Actions workflow log: 1. On the screen that shows the workflow run's details, select the **loadTestResults** artifact to download the result files for the load test. :::image type="content" source="./media/tutorial-cicd-github-actions/github-actions-artifacts.png" alt-text="Screenshot that shows artifacts of the workflow run."::: - + ## Define test pass/fail criteria -In this section, you'll add criteria to determine whether your load test passes or fails. If at least one of the pass/fail criteria evaluates to `true`, the load test is unsuccessful. +You can use test failure criteria to define thresholds for when a load test should fail. For example, a test might fail when the percentage of failed requests surpasses a specific value. + +When at least one of the failure criteria is met, the load test status is failed. As a result, the CI/CD workflow will also fail and the development team can be alerted. -You can specify these criteria in the test configuration YAML file: +You can specify these criteria in the [test configuration YAML file](./reference-test-config-yaml.md): 1. Edit the *SampleApp.yml* file in your GitHub repository. @@ -244,13 +268,13 @@ You can specify these criteria in the test configuration YAML file: ## Pass parameters to your load tests from the workflow -Next, you'll parameterize your load test by using workflow variables. These parameters can be secrets, such as passwords, or non-secrets. +Next, you'll parameterize your load test by using workflow variables. These parameters can be secrets, such as passwords, or non-secrets. For more information, see [Parameterize load tests with secrets and environment variables](./how-to-parameterize-load-tests.md). -In this tutorial, you'll reconfigure the sample application to accept only secure requests. To send a secure request, you need to pass a secret value in the HTTP request: +In this tutorial, you'll now use the *SampleApp_Secrets.jmx* JMeter test script. This script invokes an application endpoint that requires a secure value to be passed as an HTTP header. -1. Edit the *SampleApp.yaml* file in your GitHub repository. +1. Edit the *SampleApp.yaml* file in your GitHub repository and update the `testPlan` configuration setting to use the *SampleApp_Secrets.jmx* file. - Update the `testPlan` configuration setting to use the *SampleApp_Secrets.jmx* file: + The `testPlan` setting specifies which JMeter script Azure Load Testing uses. ```yml version: v0.1 @@ -308,6 +332,7 @@ In this tutorial, you'll reconfigure the sample application to accept only secur You've now created a GitHub Actions workflow that uses Azure Load Testing for automatically running load tests. By using pass/fail criteria, you can set the status of the CI/CD workflow. With parameters, you can make the running of load tests configurable. +* Learn more about the [key concepts for Azure Load Testing](./concept-load-testing-concepts.md). * Learn more about the [Azure Load Testing Action](https://github.com/marketplace/actions/azure-load-testing). * Learn how to [parameterize a load test](./how-to-parameterize-load-tests.md). * Learn how to [define test pass/fail criteria](./how-to-define-test-criteria.md). \ No newline at end of file diff --git a/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md b/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md index 6db741669a471..6f4862923eec7 100644 --- a/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md +++ b/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md @@ -1,11 +1,11 @@ --- -title: Schedules for recurring triggers in workflows -description: An overview about scheduling recurring automated workflows in Azure Logic Apps. +title: About schedules for recurring triggers in workflows +description: An overview about schedules for recurring workflows in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: conceptual -ms.date: 03/17/2022 +ms.date: 05/27/2022 --- # Schedules for recurring triggers in Azure Logic Apps workflows @@ -52,12 +52,14 @@ Here are the differences between these triggers: If you select **Day** as the frequency, you can specify the hours of the day and minutes of the hour, for example, every day at 2:30. If you select **Week** as the frequency, you can also select days of the week, such as Wednesday and Saturday. You can also specify a start date and time along with a time zone for your recurrence schedule. For more information about time zone formatting, see [Add a Recurrence trigger](../connectors/connectors-native-recurrence.md#add-the-recurrence-trigger). > [!IMPORTANT] - > If you use the **Day** or **Week** frequency and specify a future date and time, make sure that you set up the recurrence in advance: + > If you use the **Day**, **Week**, or **Month** frequency, and you specify a future date and time, make sure that you set up the recurrence in advance: > > * **Day**: Set up the daily recurrence at least 24 hours in advance. > > * **Week**: Set up the weekly recurrence at least 7 days in advance. - > + > + > * **Month**: Set up the monthly recurrence at least one month in advance. + > > Otherwise, the workflow might skip the first recurrence. > > If a recurrence doesn't specify a specific [start date and time](#start-time), the first recurrence runs immediately @@ -66,7 +68,7 @@ Here are the differences between these triggers: > > If a recurrence doesn't specify any other advanced scheduling options such as specific times to run future recurrences, > those recurrences are based on the last run time. As a result, the start times for those recurrences might drift due to - > factors such as latency during storage calls. To make sure that your logic app doesn't miss a recurrence, especially when + > factors such as latency during storage calls. To make sure that your workflow doesn't miss a recurrence, especially when > the frequency is in days or longer, try these options: > > * Provide a start date and time for the recurrence plus the specific times when to run subsequent recurrences by using the properties @@ -100,7 +102,7 @@ Here are some patterns that show how you can control recurrence with the start d |------------|-----------------------------|----------------------------------------------------| | {none} | Runs the first workload instantly.

                  Runs future workloads based on the last run time. | Runs the first workload instantly.

                  Runs future workloads based on the specified schedule. | | Start time in the past | **Recurrence** trigger: Calculates run times based on the specified start time and discards past run times.

                  Runs the first workload at the next future run time.

                  Runs future workloads based on the last run time.

                  **Sliding Window** trigger: Calculates run times based on the specified start time and honors past run times.

                  Runs future workloads based on the specified start time.

                  For more explanation, see the example following this table. | Runs the first workload *no sooner* than the start time, based on the schedule calculated from the start time.

                  Runs future workloads based on the specified schedule.

                  **Note:** If you specify a recurrence with a schedule, but don't specify hours or minutes for the schedule, Azure Logic Apps calculates future run times by using the hours or minutes, respectively, from the first run time. | -| Start time now or in the future | Runs the first workload at the specified start time.

                  **Recurrence** trigger: Runs future workloads based on the last run time.

                  **Sliding Window** trigger: Runs future workloads based on the specified start time. | Runs the first workload *no sooner* than the start time, based on the schedule calculated from the start time.

                  Runs future workloads based on the specified schedule. If you use the **Day** or **Week** frequency and specify a future date and time, make sure that you set up the recurrence in advance:

                  - **Day**: Set up the daily recurrence at least 24 hours in advance.

                  - **Week**: Set up the weekly recurrence at least 7 days in advance.

                  Otherwise, the workflow might skip the first recurrence.

                  **Note:** If you specify a recurrence with a schedule, but don't specify hours or minutes for the schedule, Azure Logic Apps calculates future run times by using the hours or minutes, respectively, from the first run time. | +| Start time now or in the future | Runs the first workload at the specified start time.

                  **Recurrence** trigger: Runs future workloads based on the last run time.

                  **Sliding Window** trigger: Runs future workloads based on the specified start time. | Runs the first workload *no sooner* than the start time, based on the schedule calculated from the start time.

                  Runs future workloads based on the specified schedule. If you use the **Day**, **Week**, or **Month** frequency, and you specify a future date and time, make sure that you set up the recurrence in advance:

                  - **Day**: Set up the daily recurrence at least 24 hours in advance.

                  - **Week**: Set up the weekly recurrence at least 7 days in advance.

                  - **Month**: Set up the monthly recurrence at least one month in advance.

                  Otherwise, the workflow might skip the first recurrence.

                  **Note:** If you specify a recurrence with a schedule, but don't specify hours or minutes for the schedule, Azure Logic Apps calculates future run times by using the hours or minutes, respectively, from the first run time. | |||| *Example for past start time and recurrence but no schedule* diff --git a/articles/logic-apps/create-managed-service-identity.md b/articles/logic-apps/create-managed-service-identity.md index d0f0592b4d854..747e559608092 100644 --- a/articles/logic-apps/create-managed-service-identity.md +++ b/articles/logic-apps/create-managed-service-identity.md @@ -707,7 +707,7 @@ As a specific example, suppose that you want to run the [Snapshot Blob operation > [!IMPORTANT] > To access Azure storage accounts behind firewalls by using HTTP requests and managed identities, -> make sure that you also set up your storage account with the [exception that allows access by trusted Microsoft services](../connectors/connectors-create-api-azureblobstorage.md#access-blob-storage-with-managed-identities). +> make sure that you also set up your storage account with the [exception that allows access by trusted Microsoft services](../connectors/connectors-create-api-azureblobstorage.md#access-blob-storage-in-same-region-with-managed-identities). To run the [Snapshot Blob operation](/rest/api/storageservices/snapshot-blob), the HTTP action specifies these properties: diff --git a/articles/logic-apps/create-serverless-apps-visual-studio.md b/articles/logic-apps/create-serverless-apps-visual-studio.md index a40110010597d..80d9b8d3d79c1 100644 --- a/articles/logic-apps/create-serverless-apps-visual-studio.md +++ b/articles/logic-apps/create-serverless-apps-visual-studio.md @@ -1,6 +1,6 @@ --- title: Create an example serverless app with Visual Studio -description: Create, deploy, and manage an example serverless app with an Azure quickstart template, Azure Logic Apps and Azure Functions in Visual Studio. +description: Create, deploy, and manage an example serverless app with an Azure Quickstart Template, Azure Logic Apps and Azure Functions in Visual Studio. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla @@ -10,9 +10,11 @@ ms.date: 07/15/2021 # Create an example serverless app with Azure Logic Apps and Azure Functions in Visual Studio +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + You can quickly create, build, and deploy cloud-based "serverless" apps by using the services and capabilities in Azure, such as Azure Logic Apps and Azure Functions. When you use Azure Logic Apps, you can quickly and easily build workflows using low-code or no-code approaches to simplify orchestrating combined tasks. You can integrate different services, cloud, on-premises, or hybrid, without coding those interactions, having to maintain glue code, or learn new APIs or specifications. When you use Azure Functions, you can speed up development by using an event-driven model. You can use triggers that respond to events by automatically running your own code. You can use bindings to seamlessly integrate other services. -This article shows how to create an example serverless app that runs in multi-tenant Azure by using an Azure Quickstart template. The template creates an Azure resource group project that includes an Azure Resource Manager deployment template. This template defines a basic logic app resource where a predefined a workflow includes a call to an Azure function that you define. The workflow definition includes the following components: +This article shows how to create an example serverless app that runs in multi-tenant Azure by using an Azure Quickstart Template. The template creates an Azure resource group project that includes an Azure Resource Manager deployment template. This template defines a basic logic app resource where a predefined a workflow includes a call to an Azure function that you define. The workflow definition includes the following components: * A Request trigger that receives HTTP requests. To start this trigger, you send a request to the trigger's URL. * An Azure Functions action that calls an Azure function that you can later define. diff --git a/articles/logic-apps/deploy-single-tenant-logic-apps-private-storage-account.md b/articles/logic-apps/deploy-single-tenant-logic-apps-private-storage-account.md index 036e01500cb86..bf30e735e818b 100644 --- a/articles/logic-apps/deploy-single-tenant-logic-apps-private-storage-account.md +++ b/articles/logic-apps/deploy-single-tenant-logic-apps-private-storage-account.md @@ -1,17 +1,19 @@ --- -title: Deploy single-tenant logic apps to private storage accounts -description: How to deploy Standard logic app workflows to Azure storage accounts that use private endpoints and deny public access. +title: Deploy Standard logic apps to private storage accounts +description: Deploy Standard logic app workflows to Azure storage accounts that use private endpoints and deny public access. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to ms.date: 01/06/2022 -# As a developer, I want to deploy my single-tenant logic apps to Azure storage accounts using private endpoints +# As a developer, I want to deploy Standard logic apps to Azure storage accounts that use private endpoints. --- # Deploy single-tenant Standard logic apps to private storage accounts using private endpoints +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] + When you create a single-tenant Standard logic app resource, you're required to have a storage account for storing logic app artifacts. You can restrict access to this storage account so that only the resources inside a virtual network can connect to your logic app workflow. Azure Storage supports adding private endpoints to your storage account. This article describes the steps to follow for deploying such logic apps to protected private storage accounts. For more information, review [Use private endpoints for Azure Storage](../storage/common/storage-private-endpoints.md). diff --git a/articles/logic-apps/designer-overview.md b/articles/logic-apps/designer-overview.md index 25667843cd6fa..775750360e453 100644 --- a/articles/logic-apps/designer-overview.md +++ b/articles/logic-apps/designer-overview.md @@ -1,5 +1,5 @@ --- -title: About single-tenant workflow designer +title: About Standard logic app workflow designer description: Learn how the designer in single-tenant Azure Logic Apps helps you visually create workflows through the Azure portal. Discover the benefits and features in this latest version. services: logic-apps ms.suite: integration @@ -8,7 +8,9 @@ ms.topic: conceptual ms.date: 06/30/2021 --- -# About the workflow designer in single-tenant Azure Logic Apps +# About the Standard logic app workflow designer in single-tenant Azure Logic Apps + +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] When you work with Azure Logic Apps in the Azure portal, you can edit your [*workflows*](logic-apps-overview.md#workflow) visually or programmatically. After you open a [*logic app* resource](logic-apps-overview.md#logic-app) in the portal, on the resource menu under **Developer**, you can select between [**Code** view](#code-view) and **Designer** view. When you want to visually develop, edit, and run your workflow, select the designer view. You can switch between the designer view and code view at any time. diff --git a/articles/logic-apps/edit-app-settings-host-settings.md b/articles/logic-apps/edit-app-settings-host-settings.md index 4f71f52b7132a..c2de320ce34c3 100644 --- a/articles/logic-apps/edit-app-settings-host-settings.md +++ b/articles/logic-apps/edit-app-settings-host-settings.md @@ -1,6 +1,6 @@ --- -title: Edit runtime and environment settings in single-tenant Azure Logic Apps -description: Change the runtime and environment settings for logic apps in single-tenant Azure Logic Apps. +title: Edit runtime and environment settings for Standard logic apps +description: Change the runtime and environment settings for Standard logic apps in single-tenant Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla @@ -9,7 +9,9 @@ ms.date: 03/22/2022 ms.custom: fasttrack-edit --- -# Edit host and app settings for logic apps in single-tenant Azure Logic Apps +# Edit host and app settings for Standard logic apps in single-tenant Azure Logic Apps + +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] In *single-tenant* Azure Logic Apps, the *app settings* for a logic app specify the global configuration options that affect *all the workflows* in that logic app. However, these settings apply *only* when these workflows run in your *local development environment*. Locally running workflows can access these app settings as *local environment variables*, which are used by local development tools for values that can often change between environments. For example, these values can contain connection strings. When you deploy to Azure, app settings are ignored and aren't included with your deployment. diff --git a/articles/logic-apps/estimate-storage-costs.md b/articles/logic-apps/estimate-storage-costs.md index dd7dfb27f15a4..3dc7b75a11279 100644 --- a/articles/logic-apps/estimate-storage-costs.md +++ b/articles/logic-apps/estimate-storage-costs.md @@ -1,6 +1,6 @@ --- title: Estimate storage costs for single-tenant Azure Logic Apps -description: Estimate storage costs for your workflows using the Logic Apps Storage Calculator. +description: Estimate storage costs for Standard logic app workflows using the Logic Apps Storage Calculator. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla @@ -8,7 +8,9 @@ ms.topic: how-to ms.date: 11/10/2021 --- -# Estimate storage costs for workflows in single-tenant Azure Logic Apps +# Estimate storage costs for Standard logic app workflows in single-tenant Azure Logic Apps + +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] Azure Logic Apps uses [Azure Storage](../storage/index.yml) for any storage operations. In traditional *multi-tenant* Azure Logic Apps, any storage usage and costs are attached to the logic app. Now, in *single-tenant* Azure Logic Apps, you can use your own storage account. These storage costs are listed separately in your Azure billing invoice. This capability gives you more flexibility and control over your logic app data. diff --git a/articles/logic-apps/healthy-unhealthy-resource.md b/articles/logic-apps/healthy-unhealthy-resource.md index be297a22cd7bc..b6f21fb7da90f 100644 --- a/articles/logic-apps/healthy-unhealthy-resource.md +++ b/articles/logic-apps/healthy-unhealthy-resource.md @@ -1,6 +1,6 @@ --- title: Set up logging to monitor logic apps in Azure Security Center -description: Monitor the health of your Logic Apps resources in Azure Security Center by setting up diagnostic logging. +description: Monitor health for Azure Logic Apps resources in Azure Security Center by setting up diagnostic logging. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla @@ -10,10 +10,10 @@ ms.date: 12/07/2020 # Set up logging to monitor logic apps in Microsoft Defender for Cloud -When you monitor your Logic Apps resources in [Microsoft Azure Security Center](../security-center/security-center-introduction.md), you can [review whether your logic apps are following the default policies](#view-logic-apps-health-status). Azure shows the health status for a Logic Apps resource after you enable logging and correctly set up the logs' destination. This article explains how to configure diagnostic logging and make sure that all your logic apps are healthy resources. +When you monitor your Azure Logic Apps resources in [Microsoft Azure Security Center](../security-center/security-center-introduction.md), you can [review whether your logic apps are following the default policies](#view-logic-apps-health-status). Azure shows the health status for an Azure Logic Apps resource after you enable logging and correctly set up the logs' destination. This article explains how to configure diagnostic logging and make sure that all your logic apps are healthy resources. > [!TIP] -> To find the current status for the Logic Apps service, review the [Azure status page](https://status.azure.com/), which lists the status for different products and services in each available region. +> To find the current status for the Azure Logic Apps service, review the [Azure status page](https://status.azure.com/), which lists the status for different products and services in each available region. ## Prerequisites @@ -28,7 +28,7 @@ When you monitor your Logic Apps resources in [Microsoft Azure Security Center]( Before you can view the resource health status for your logic apps, you must first [set up diagnostic logging](monitor-logic-apps-log-analytics.md). If you already have a Log Analytics workspace, you can enable logging either when you create your logic app or on existing logic apps. > [!TIP] -> The default recommendation is to enable diagnostic logs for Logic Apps. However, you control this setting for your logic apps. When you enable diagnostic logs for your logic apps, you can use the information to help analyze security incidents. +> The default recommendation is to enable diagnostic logs for Azure Logic Apps. However, you control this setting for your logic apps. When you enable diagnostic logs for your logic apps, you can use the information to help analyze security incidents. ### Check diagnostic logging setting @@ -38,7 +38,7 @@ If you're not sure whether your logic apps have diagnostic logging enabled, you 1. In the search bar, enter and select **Defender for Cloud**. 1. On the workload protection dashboard menu, under **General**, select **Recommendations**. 1. In the table of security suggestions, find and select **Enable auditing and logging** > **Diagnostic logs in Logic Apps should be enabled** in the table of security controls. -1. On the recommendation page, expand the **Remediation steps** section and review the options. You can enable Logic Apps diagnostics by selecting the **Quick Fix!** button, or by following the manual remediation instructions. +1. On the recommendation page, expand the **Remediation steps** section and review the options. You can enable Azure Logic Apps diagnostics by selecting the **Quick Fix!** button, or by following the manual remediation instructions. ## View logic apps' health status @@ -47,7 +47,7 @@ After you've [enabled diagnostic logging](#enable-diagnostic-logging), you can s 1. Sign in to the [Azure portal](https://portal.azure.com). 1. In the search bar, enter and select **Defender for Cloud**. 1. On the workload protection dashboard menu, under **General**, select **Inventory**. -1. On the inventory page, filter your assets list to show only Logic Apps resources. In the page menu, select **Resource types** > **logic apps**. +1. On the inventory page, filter your assets list to show only Azure Logic Apps resources. In the page menu, select **Resource types** > **logic apps**. The **Unhealthy Resources** counter shows the number of logic apps that Defender for Cloud considers unhealthy. 1. In the list of logic apps resources, review the **Recommendations** column. To review the health details for a specific logic app, select a resource name, or select the ellipses button (**...**) > **View resource**. @@ -61,7 +61,7 @@ If your [logic apps are listed as unhealthy in Defender for Cloud](#view-logic-a ### Log Analytics and Event Hubs destinations -If you use Log Analytics or Event Hubs as the destination for your Logic Apps diagnostic logs, check the following settings. +If you use Log Analytics or Event Hubs as the destination for your Azure Logic Apps diagnostic logs, check the following settings. 1. To confirm that you enabled diagnostic logs, check that the diagnostic settings `logs.enabled` field is set to `true`. 1. To confirm that you haven't set a storage account as the destination instead, check that the `storageAccountId` field is set to `false`. @@ -91,7 +91,7 @@ For example: ### Storage account destination -If you use a storage account as the destination for your Logic Apps diagnostic logs, check the following settings. +If you use a storage account as the destination for your Azure Logic Apps diagnostic logs, check the following settings. 1. To confirm that you enabled diagnostic logs, check that the diagnostics settings `logs.enabled` field is set to `true`. 1. To confirm that you enabled a retention policy for your diagnostic logs, check that the `retentionPolicy.enabled` field is set to `true`. diff --git a/articles/logic-apps/logic-apps-add-run-inline-code.md b/articles/logic-apps/logic-apps-add-run-inline-code.md index 4b1c6a9e7c168..acabc092cb6ea 100644 --- a/articles/logic-apps/logic-apps-add-run-inline-code.md +++ b/articles/logic-apps/logic-apps-add-run-inline-code.md @@ -1,120 +1,222 @@ --- -title: Add and run code snippets by using inline code -description: Learn how to create and run code snippets by using inline code actions for automated tasks and workflows that you create with Azure Logic Apps. +title: Run code snippets in workflows +description: Run code snippets in workflows using Inline Code operations in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: deli, estfan, azla ms.topic: how-to -ms.date: 05/25/2021 +ms.date: 05/24/2022 ms.custom: devx-track-js --- -# Add and run code snippets by using inline code in Azure Logic Apps +# Run code snippets in workflows with Inline Code operations in Azure Logic Apps -When you want to run a piece of code inside your logic app workflow, you can add the built-in Inline Code action as a step in your logic app's workflow. This action works best when you want to run code that fits this scenario: +To create and run a code snippet in your logic app workflow without much setup, you can use the **Inline Code** built-in connector. This connector has an action that returns the result from the code snippet so that you can use that output in your workflow's subsequent actions. -* Runs in JavaScript. More languages are in development. +Currently, the connector only has a single action, which works best for a code snippet with the following attributes, but more actions are in development. The Inline Code connector also has +[different limits](logic-apps-limits-and-config.md#inline-code-action-limits), based on whether your logic app workflow is [Consumption or Standard](logic-apps-overview.md#resource-environment-differences). -* Finishes running in five seconds or fewer. +| Action | Language | Language version | Run duration | Data size | Other notes | +|--------|----------|------------------|--------------|-----------|-------------| +| **Execute JavaScript Code** | JavaScript | **Standard**:
                  Node.js 12.x.x or 14.x.x

                  **Consumption**:
                  Node.js 8.11.1

                  For more information, review [Standard built-in objects](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects). | Finishes in 5 seconds or fewer. | Handles data up to 50 MB. | - Doesn't require working with the [**Variables** actions](logic-apps-create-variables-store-values.md), which are unsupported by the action.

                  - Doesn't support the `require()` function for running JavaScript. | +||||||| -* Handles data up to 50 MB in size. +To run code that doesn't fit these attributes, you can [create and call a function through Azure Functions](logic-apps-azure-functions.md) instead. -* Doesn't require working with the [**Variables** actions](../logic-apps/logic-apps-create-variables-store-values.md), which are not yet supported. +This article shows how the action works in an example workflow that starts with an Office 365 Outlook trigger. The workflow runs when a new email arrives in the associated Outlook email account. The sample code snippet extracts any email addresses that exist the email body and returns those addresses as output that you can use in a subsequent action. -* Uses Node.js version 8.11.1 for [multi-tenant based logic apps](logic-apps-overview.md) or [Node.js versions 12.x.x or 14.x.x](https://nodejs.org/en/download/releases/) for [single-tenant based logic apps](single-tenant-overview-compare.md). +The following diagram shows the highlights from example workflow: - For more information, see [Standard built-in objects](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects). +### [Consumption](#tab/consumption) - > [!NOTE] - > The `require()` function isn't supported by the Inline Code action for running JavaScript. +![Screenshot showing an example Consumption logic app workflow with the Inline Code action.](./media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png) -This action runs the code snippet and returns the output from that snippet as a token that's named `Result`. You can use this token with subsequent actions in your logic app's workflow. For other scenarios where you want to create a function for your code, try [creating and calling a function through Azure Functions instead](../logic-apps/logic-apps-azure-functions.md) in your logic app. +### [Standard](#tab/standard) -In this article, the example logic app triggers when a new email arrives in a work or school account. The code snippet extracts and returns any email addresses that appear in the email body. +![Screenshot showing an example Standard logic app workflow with the Inline Code action.](./media/logic-apps-add-run-inline-code/inline-code-overview-standard.png) -![Screenshot that shows an example logic app](./media/logic-apps-add-run-inline-code/inline-code-example-overview.png) +--- ## Prerequisites -* An Azure account and subscription. If you don't have an Azure subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +* An Azure account and subscription. If you don't have a subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). + +* The logic app workflow where you want to add your code snippet. The workflow must already start with a trigger. -* The logic app workflow where you want to add your code snippet, including a trigger. The example in this topic uses the Office 365 Outlook trigger that's named **When a new email arrives**. + This article's example uses the Office 365 Outlook trigger that's named **When a new email arrives**. - If you don't have a logic app, review the following documentation: + If you don't have a workflow, review the following documentation: - * Multi-tenant: [Quickstart: Create your first logic app](../logic-apps/quickstart-create-first-logic-app-workflow.md) - * Single-tenant: [Create single-tenant based logic app workflows](create-single-tenant-workflows-azure-portal.md) + * Consumption: [Quickstart: Create your first logic app](quickstart-create-first-logic-app-workflow.md) -* Based on whether your logic app is multi-tenant or single-tenant, review the following information. + * Standard: [Create single-tenant based logic app workflows](create-single-tenant-workflows-azure-portal.md) - * Multi-tenant: Requires Node.js version 8.11.1. You also need an empty [integration account](../logic-apps/logic-apps-enterprise-integration-create-integration-account.md) that's linked to your logic app. Make sure that you use an integration account that's appropriate for your use case or scenario. +* Based on whether your logic app is Consumption or Standard, review the following requirements: - For example, [Free-tier](../logic-apps/logic-apps-pricing.md#integration-accounts) integration accounts are meant only for exploratory scenarios and workloads, not production scenarios, are limited in usage and throughput, and aren't supported by a service-level agreement (SLA). + * Consumption: Requires [Node.js version 8.11.10](https://nodejs.org/en/download/releases/) and a [link to an integration account](logic-apps-enterprise-integration-create-integration-account.md), empty or otherwise, from your logic app resource. - Other integration account tiers incur costs, but include SLA support, offer more throughput, and have higher limits. Learn more about integration account [tiers](../logic-apps/logic-apps-pricing.md#integration-accounts), [pricing](https://azure.microsoft.com/pricing/details/logic-apps/), and [limits](../logic-apps/logic-apps-limits-and-config.md#integration-account-limits). + > [!IMPORTANT] + > + > Make sure that you use an integration account that's appropriate for your use case or scenario. + > + > For example, [Free-tier](logic-apps-pricing.md#integration-accounts) integration accounts are meant only + > for exploratory scenarios and workloads, not production scenarios, are limited in usage and throughput, + > and aren't supported by a service-level agreement (SLA). + > + > Other integration account tiers incur costs, but include SLA support, offer more throughput, and have higher limits. + > Learn more about [integration account tiers](logic-apps-pricing.md#integration-accounts), + > [limits](logic-apps-limits-and-config.md#integration-account-limits), and + > [pricing](https://azure.microsoft.com/pricing/details/logic-apps/). - * Single-tenant: Requires [Node.js versions 10.x.x, 11.x.x, or 12.x.x](https://nodejs.org/en/download/releases/). However, you don't need an integration account, but the Inline Code action is renamed **Inline Code Operations** and has [updated limits](logic-apps-limits-and-config.md). + * Standard: Requires [Node.js versions 12.x.x or 14.x.x](https://nodejs.org/en/download/releases/), but no integration account. -## Add inline code +## Add the Inline Code action -1. If you haven't already, in the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. +### [Consumption](#tab/consumption) -1. In your workflow, choose where to add the Inline Code action, either as a new step at the end of your workflow or between steps. +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. - To add the action between steps, move your mouse pointer over the arrow that connects those steps. Select the plus sign (**+**) that appears, and select **Add an action**. +1. On the designer, add the Inline Code action to your workflow. You can add an action either as a new step at the end of your workflow or between steps. This example adds the action under the Office 365 Outlook trigger. - This example adds the action under the Office 365 Outlook trigger. + * To add the action at the end of your workflow, select **New step**. - ![Add the new step under the trigger](./media/logic-apps-add-run-inline-code/add-new-step.png) + * To add the action between steps, move your mouse pointer over the arrow that connects those steps. Select the plus sign (**+**) that appears, and select **Add an action**. -1. In the action search box, enter `inline code`. From the actions list, select the action named **Execute JavaScript Code**. +1. In the **Choose an operation** search box, enter **inline code**. From the actions list, select the action named **Execute JavaScript Code**. - ![Select the "Execute JavaScript Code" action](./media/logic-apps-add-run-inline-code/select-inline-code-action.png) + ![Screenshot showing Consumption workflow designer and "Execute JavaScript Code" action selected.](./media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png) The action appears in the designer and by default, contains some sample code, including a `return` statement. - ![Inline Code action with default sample code](./media/logic-apps-add-run-inline-code/inline-code-action-default.png) + ![Screenshot showing the Inline Code action with default sample code.](./media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png) 1. In the **Code** box, delete the sample code, and enter your code. Write the code that you'd put inside a method, but without the method signature. + > [!TIP] + > + > When your cursor is in the **Code** box, the dynamic content list appears. Although you'll + > use this list later, you can ignore and leave the list open for now. Don't select **Hide**. + If you start typing a recognized keyword, the autocomplete list appears so that you can select from available keywords, for example: - ![Keyword autocomplete list](./media/logic-apps-add-run-inline-code/auto-complete.png) + ![Screenshot showing the Consumption workflow, Inline Code action, and keyword autocomplete list.](./media/logic-apps-add-run-inline-code/auto-complete-consumption.png) - This example code snippet first creates a variable that stores a *regular expression*, which specifies a pattern to match in input text. The code then creates a variable that stores the email body data from the trigger. + The following example code snippet first creates a variable named **myResult** that stores a *regular expression*, which specifies a pattern to match in input text. The code then creates a variable named **email** that stores the email message's body content from the trigger outputs. - ![Create variables](./media/logic-apps-add-run-inline-code/save-email-body-variable.png) + ![Screenshot showing the Consumption workflow, Inline Code action, and example code that creates variables.](./media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png) - To make the results from the trigger and previous actions easier to reference, the dynamic content list appears when your cursor is inside the **Code** box. For this example, the list shows available results from the trigger, including the **Body** token, which you can now select. +1. With your cursor still in the **Code** box, from the open dynamic content list, find the **When a new email arrives** section, and select the **Body** property, which references the email message's body. - After you select the **Body** token, the inline code action resolves the token to a `workflowContext` object that references the email's `Body` property value: + ![Screenshot showing the Consumption workflow, Inline Code action, dynamic content list, and email message's "Body" property selected.](./media/logic-apps-add-run-inline-code/select-output-consumption.png) - ![Select result](./media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png) + The dynamic content list shows the outputs from the trigger and any preceding actions when those outputs match the input format for the edit box that's currently in focus. This list makes these outputs easier to use and reference from your workflow. For this example, the list shows the outputs from the Outlook trigger, including the email message's **Body** property. - In the **Code** box, your snippet can use the read-only `workflowContext` object as input. This object includes properties that give your code access to the results from the trigger and previous actions in your workflow. For more information, see [Reference trigger and action results in your code](#workflowcontext) later in this topic. + After you select the **Body** property, the Inline Code action resolves the token to a read-only `workflowContext` JSON object, which your snippet can use as input. The `workflowContext` object includes properties that give your code access to the outputs from the trigger and preceding actions in your workflow, such as the trigger's `body` property, which differs from the email message's **Body** property. For more information about the `workflowContext` object, see [Reference trigger and action outputs using the workflowContext object](#workflowcontext) later in this article. - > [!NOTE] - > If your code snippet references action names that use the dot (.) operator, you must add those - > action names to the [**Actions** parameter](#add-parameters). Those references must also enclose - > the action names with square brackets ([]) and quotation marks, for example: + > [!IMPORTANT] + > + > If your code snippet references action names that include the dot (**.**) operator, + > those references have to enclose these action names with square brackets (**[]**) + > and quotation marks (**""**), for example: > - > `// Correct`
                  - > `workflowContext.actions["my.action.name"].body`
                  + > `// Correct`
                  + > `workflowContext.actions["my.action.name"].body` > > `// Incorrect`
                  > `workflowContext.actions.my.action.name.body` + > + > Also, in the Inline Code action, you have to add the [**Actions** parameter](#add-parameters) + > and then add these action names to that parameter. For more information, see + > [Add dependencies as parameters to an Inline Code action](#add-parameters) later in this article. + +1. To differentiate the email message's **Body** property that you selected from the trigger's `body` property, rename the second `body` property to `Body` instead. Add the closing semicolon (**;**) at the end to finish the code statement. + + ![Screenshot showing the Consumption logic app workflow, Inline Code action, and renamed "Body" property with closing semicolon.](./media/logic-apps-add-run-inline-code/rename-body-property-consumption.png) + + The Inline Code action doesn't syntactically require a `return` statement. However, by including the `return` statement, you can more easily reference the action results later in your workflow by using the **Result** token in later actions. + + In this example, the code snippet returns the result by calling the `match()` function, which finds any matches in the email message body to the specified regular expression. The **Create HTML table** action then uses the **Result** token to reference the results from the Inline Code action and creates a single result. + + ![Screenshot showing the finished Consumption logic app workflow.](./media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png) + +1. When you're done, save your workflow. + +### [Standard](#tab/standard) + +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. + +1. On the designer, add the Inline Code action to your workflow. You can add an action either as a new step at the end of your workflow or between steps. This example adds the action under the Office 365 Outlook trigger. + + * To add the action at the end of your workflow, select the plus sign (**+**), and then select **Add an action**. + + * To add the action between steps, move your mouse pointer over the arrow that connects those steps. Select the plus sign (**+**) that appears, and select **Add an action**. + +1. In the **Choose an operation** search box, enter **inline code**. From the actions list, select the action named **Execute JavaScript Code**. - The Inline Code action doesn't require a `return` statement, but the results from a `return` statement are available for reference in later actions through the **Result** token. For example, the code snippet returns the result by calling the `match()` function, which finds matches in the email body against the regular expression. The **Compose** action uses the **Result** token to reference the results from the inline code action and creates a single result. + ![Screenshot showing Standard workflow designer and "Execute JavaScript Code" action selected.](./media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png) - ![Finished logic app](./media/logic-apps-add-run-inline-code/inline-code-complete-example.png) +1. In the **code** box, enter your code. Write the code that you'd put inside a method, but without the method signature. + + > [!TIP] + > + > When your cursor is in the **code** box, the dynamic content list appears. Although you'll + > use this list later, you can ignore and leave the list open for now. Don't select **Hide**. + + If you start typing a recognized keyword, the autocomplete list appears so that you can select from available keywords, for example: + + ![Screenshot showing the Standard workflow, Inline Code action, and keyword autocomplete list.](./media/logic-apps-add-run-inline-code/auto-complete-standard.png) + + The following example code snippet first creates a variable named **myResult** that stores a *regular expression*, which specifies a pattern to match in input text. The code then creates a variable named **email** that stores the email message's body content from the trigger outputs. + + ![Screenshot showing the Standard workflow, Inline Code action, and example code that creates variables.](./media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png) + +1. With your cursor still in the **code** box, from the open dynamic content list, find the **When a new email arrives** section, and select the **Body** token, which references the email's message body. + + ![Screenshot showing the Standard workflow, Inline Code action, dynamic content list, and email message's "Body" property selected.](./media/logic-apps-add-run-inline-code/select-output-standard.png) + + The dynamic content list shows the outputs from the trigger and any preceding actions where those outputs match the input format for the edit box that's currently in focus. This list makes these outputs easier to use and reference from your workflow. For this example, the list shows the outputs from the Outlook trigger, including the email message's **Body** property. + + After you select the **Body** property, the Inline Code action resolves the token to a read-only `workflowContext` JSON object, which your snippet can use as input. The `workflowContext` object includes properties that give your code access to the outputs from the trigger and preceding actions in your workflow, such as the trigger's `body` property, which differs from the email message's **Body** property. For more information about the `workflowContext` object, see [Reference trigger and action outputs using the workflowContext object](#workflowcontext) later in this article. + + > [!IMPORTANT] + > + > If your code snippet references action names that include the dot (**.**) operator, + > those references have to enclose these action names with square brackets (**[]**) + > and quotation marks (**""**), for example: + > + > `// Correct`
                  + > `workflowContext.actions["my.action.name"].body` + > + > `// Incorrect`
                  + > `workflowContext.actions.my.action.name.body` + > + > Also, in the Inline Code action, you have to add the **Actions** parameter + > and then add these action names to that parameter. For more information, see + > [Add dependencies as parameters to an Inline Code action](#add-parameters) later in this article. -1. When you're done, save your logic app. +1. To differentiate the email message's **Body** property that you selected from the trigger's `body` property, rename the second `body` property to `Body` instead. Add the closing semicolon (**;**) at the end to finish the code statement. + + ![Screenshot showing the Standard logic app workflow, Inline Code action, and renamed "Body" property with closing semicolon.](./media/logic-apps-add-run-inline-code/rename-body-property-standard.png) + + The Inline Code action doesn't syntactically require a `return` statement. However, by including the `return` statement, you can reference the action results later in your workflow by using the **Outputs** token in later actions. + + In this example, the code snippet returns the result by calling the `match()` function, which finds any matches in the email message body to the specified regular expression. + + ![Screenshot showing the Standard logic app workflow and Inline Code action with "return" statement.](./media/logic-apps-add-run-inline-code/return-statement-standard.png) + + The **Create HTML table** action then uses the **Outputs** token to reference the results from the Inline Code action and creates a single result. + + ![Screenshot showing the finished Standard logic app workflow.](./media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png) + +1. When you're done, save your workflow. + +--- -### Reference trigger and action results in your code +### Reference trigger and action outputs using the workflowContext object -The `workflowContext` object has this structure, which includes the `actions`, `trigger`, and `workflow` subproperties: +From inside your code snippet on the designer, you can use the dynamic content list to select a token that references the output from the trigger or any preceding action. When you select the token, the Inline Code action resolves that token to a read-only `workflowContext` JSON object. This object gives your code access to the outputs from the trigger, any preceding actions, and the workflow. The object uses the following structure and includes the `actions`, `trigger`, and `workflow` properties, which are also objects: ```json { @@ -133,16 +235,16 @@ The `workflowContext` object has this structure, which includes the `actions`, ` } ``` -This table contains more information about these subproperties: +The following table has more information about these properties: | Property | Type | Description | -|----------|------|-------| -| `actions` | Object collection | Result objects from actions that run before your code snippet runs. Each object has a *key-value* pair where the key is the name of an action, and the value is equivalent to calling the [actions() function](../logic-apps/workflow-definition-language-functions-reference.md#actions) with `@actions('')`. The action's name uses the same action name that's used in the underlying workflow definition, which replaces spaces (" ") in the action name with underscores (_). This object provides access to action property values from the current workflow instance run. | -| `trigger` | Object | Result object from the trigger and equivalent to calling the [trigger() function](../logic-apps/workflow-definition-language-functions-reference.md#trigger). This object provides access to trigger property values from the current workflow instance run. | -| `workflow` | Object | The workflow object and equivalent to calling the [workflow() function](../logic-apps/workflow-definition-language-functions-reference.md#workflow). This object provides access to workflow property values, such as the workflow name, run ID, and so on, from the current workflow instance run. | -||| +|----------|------|-------------| +| `actions` | Object collection | The result objects from any preceding actions that run before your code snippet runs. Each object has a *key-value* pair where the key is the action name, and the value is equivalent to the result from calling the [actions() function](workflow-definition-language-functions-reference.md#actions) with the `@actions('')` expression.

                  The action's name uses the same action name that appears in the underlying workflow definition, which replaces spaces (**" "**) in the action name with underscores (**\_**). This object collection provides access to the action's property values from the current workflow instance run. | +| `trigger` | Object | The result object from the trigger where the result is the equivalent to calling the [trigger() function](workflow-definition-language-functions-reference.md#trigger). This object provides access to trigger's property values from the current workflow instance run. | +| `workflow` | Object | The workflow object that is the equivalent to calling the [workflow() function](workflow-definition-language-functions-reference.md#workflow). This object provides access to the property values, such as the workflow name, run ID, and so on, from the current workflow instance run. | +|||| -In this topic's example, the `workflowContext` object has these properties that your code can access: +In this article's example, the `workflowContext` JSON object might have the following sample properties and values from the Outlook trigger: ```json { @@ -212,65 +314,98 @@ In this topic's example, the `workflowContext` object has these properties that -## Add parameters +## Add dependencies as parameters to an Inline Code action -In some cases, you might have to explicitly require that the Inline Code action includes results from the trigger or specific actions that your code references as dependencies by adding the **Trigger** or **Actions** parameters. This option is useful for scenarios where the referenced results aren't found at run time. +In some scenarios, you might have to explicitly require that the Inline Code action includes outputs from the trigger or actions that your code references as dependencies. For example, you have to take this extra step when your code references outputs that aren't available at workflow run time. During workflow creation time, the Azure Logic Apps engine analyzes the code snippet to determine whether the code references any trigger or action outputs. If those references exist, the engine includes those outputs automatically. At workflow run time, if the referenced trigger or action output isn't found in the `workflowContext` object, the engine generates an error. To resolve this error, you have to add that trigger or action as an explicit dependency for the Inline Code action. Another scenario that requires you to take this step is when the `workflowContext` object references a trigger or action name that uses the dot operator (**.**). -> [!TIP] -> If you plan to reuse your code, add references to properties by using the **Code** box so that your code -> includes the resolved token references, rather than adding the trigger or actions as explicit dependencies. +To add a trigger or action as a dependency, you add the **Trigger** or **Actions** parameters as applicable to the Inline Code action. You then add the trigger or action names as they appear in your workflow's underlying JSON definition. -For example, suppose you have code that references the **SelectedOption** result from the **Send approval email** action for the Office 365 Outlook connector. At create time, the Logic Apps engine analyzes your code to determine whether you've referenced any trigger or action results and includes those results automatically. At run time, should you get an error that the referenced trigger or action result isn't available in the specified `workflowContext` object, you can add that trigger or action as an explicit dependency. In this example, you add the **Actions** parameter and specify that the Inline Code action explicitly include the result from the **Send approval email** action. +> [!NOTE] +> +> You can't add **Variables** operations, loops such as **For each** or **Until**, and iteration +> indexes as explicit dependencies. +> +> If you plan to reuse your code, make sure to always use the code snippet edit box to reference +> trigger and action outputs. That way, your code includes the resolved token references, rather than +> just add the trigger or action outputs as explicit dependencies. -To add these parameters, open the **Add new parameter** list, and select the parameters you want: +For example, suppose the Office 365 Outlook connector's **Send approval email** action precedes the code snippet in the sample workflow. The following example code snippet includes a reference to the **SelectedOption** output from this action. - ![Add parameters](./media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png) +### [Consumption](#tab/consumption) - | Parameter | Description | - |-----------|-------------| - | **Actions** | Include results from previous actions. See [Include action results](#action-results). | - | **Trigger** | Include results from the trigger. See [Include trigger results](#trigger-results). | - ||| - - - -### Include trigger results +![Screenshot that shows the Consumption workflow and Inline Code action with updated example code snippet.](./media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png) -If you select **Triggers**, you're prompted whether to include trigger results. +### [Standard](#tab/standard) -* From the **Trigger** list, select **Yes**. +![Screenshot that shows the Standard workflow and Inline Code action with updated example code snippet.](./media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png) - +--- -### Include action results +For this example, you have to add only the **Actions** parameter, and then add the action's JSON name, `Send_approval_email`, to the parameter. That way, you specify that the Inline Code action explicitly includes the output from the **Send approval email** action. -If you select **Actions**, you're prompted for the actions that you want to add. However, before you start adding actions, you need the version of the action name that appears in the logic app's underlying workflow definition. +### Find the trigger or action's JSON name -* This capability doesn't support variables, loops, and iteration indexes. +Before you start, you need the JSON name for the trigger or action in the underlying workflow definition. -* Names in your logic app's workflow definition use an underscore (_), not a space. +* Names in your workflow definition use an underscore (_), not a space. -* For action names that use the dot operator (.), include those operators, for example: +* If an action name uses the dot operator (.), include that operator, for example: `My.Action.Name` -1. On the designer toolbar, select **Code view**, and search inside the `actions` attribute for the action name. +### [Consumption](#tab/consumption) + +1. On the workflow designer toolbar, select **Code view**. In the `actions` object, find the action's name. - For example, `Send_approval_email_` is the JSON name for the **Send approval email** action. + For example, `Send_approval_email` is the JSON name for the **Send approval email** action. - ![Find action name in JSON](./media/logic-apps-add-run-inline-code/find-action-name-json.png) + ![Screenshot showing the action name in JSON.](./media/logic-apps-add-run-inline-code/find-action-name-json.png) 1. To return to designer view, on the code view toolbar, select **Designer**. -1. To add the first action, in the **Actions Item - 1** box, enter the action's JSON name. +1. Now add the JSON name to the Inline Code action. + +### [Standard](#tab/standard) + +1. On the workflow menu, select **Code**. In the `actions` object, find the action's name. + + For example, `Send_approval_email` is the JSON name for the **Send approval email** action. + + ![Screenshot showing the action name in JSON.](./media/logic-apps-add-run-inline-code/find-action-name-json.png) + +1. To return to designer view, on the workflow menu, select **Designer**. + +1. Now add the JSON name to the Inline Code action. + +--- + +### Add the trigger or action name to the Inline Code action + +1. In the Inline Code action, open the **Add new parameter** list. + +1. From the parameters list, select the following parameters as your scenario requires. + + | Parameter | Description | + |-----------|-------------| + | **Actions** | Include outputs from preceding actions as dependencies. When you select this parameter, you're prompted for the actions that you want to add. | + | **Trigger** | Include outputs from the trigger as dependencies. When you select this parameter, you're prompted whether to include trigger results. So, from the **Trigger** list, select **Yes**. | + ||| + +1. For this example, select the **Actions** parameter. + + ![Screenshot showing the Inline Code action and "Actions" parameter selected.](./media/logic-apps-add-run-inline-code/add-actions-parameter.png) + +1. In the **Actions Item - 1** box, enter the action's JSON name. + + ![Screenshot showing the "Actions Item -1" box and the action's JSON name.](./media/logic-apps-add-run-inline-code/add-action-json-name.png) - ![Enter first action](./media/logic-apps-add-run-inline-code/add-action-parameter.png) +1. To add another action name, select **Add new item**. -1. To add another action, select **Add new item**. +1. When you're done, save your workflow. -## Reference +## Action reference -For more information about the **Execute JavaScript Code** action's structure and syntax in your logic app's underlying workflow definition using the Workflow Definition Language, see this action's [reference section](../logic-apps/logic-apps-workflow-actions-triggers.md#run-javascript-code). +For more information about the **Execute JavaScript Code** action's structure and syntax in your underlying workflow definition using the Workflow Definition Language, see this action's [reference section](logic-apps-workflow-actions-triggers.md#run-javascript-code). ## Next steps diff --git a/articles/logic-apps/logic-apps-batch-process-send-receive-messages.md b/articles/logic-apps/logic-apps-batch-process-send-receive-messages.md index 39645fe5f19f1..e4b1f0ae3fdbd 100644 --- a/articles/logic-apps/logic-apps-batch-process-send-receive-messages.md +++ b/articles/logic-apps/logic-apps-batch-process-send-receive-messages.md @@ -12,6 +12,8 @@ ms.date: 07/31/2020 # Send, receive, and batch process messages in Azure Logic Apps +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + To send and process messages together in a specific way as groups, you can create a batching solution. This solution collects messages into a *batch* and waits until your specified criteria are met before releasing and processing the batched messages. Batching can reduce how often your logic app processes messages. This article shows how to build a batching solution by creating two logic apps within the same Azure subscription, Azure region, and in this order: @@ -20,9 +22,9 @@ This article shows how to build a batching solution by creating two logic apps w 1. One or more ["batch sender"](#batch-sender) logic apps, which send the messages to the previously created batch receiver. - You can also specify a unique key, such as a customer number, that *partitions* or divides the target batch into logical subsets based on that key. That way, the receiver app can collect all items with the same key and process them together. + The batch sender can specify a unique key that *partitions* or divides the target batch into logical subsets, based on that key. For example, a customer number is a unique key. That way, the receiver app can collect all items with the same key and process them together. -Your batch receiver and batch sender needs to share the same Azure subscription *and* Azure region. If they don't, you can't select the batch receiver when you create the batch sender because they're not visible to each other. +Your batch receiver and batch sender need to share the same Azure subscription *and* Azure region. If they don't, you can't select the batch receiver when you create the batch sender because they're not visible to each other. ## Prerequisites @@ -199,7 +201,7 @@ Now create one or more batch sender logic apps that send messages to the batch r ![Set up a partition for your target batch](./media/logic-apps-batch-process-send-receive-messages/batch-sender-partition-advanced-options.png) - This **rand** function generates a number between one and five. So you are dividing this batch into five numbered partitions, which this expression dynamically sets. + This **rand** function generates a number between one and five. So, you're dividing this batch into five numbered partitions, which this expression dynamically sets. 1. Save your logic app. Your sender logic app now looks similar to this example: @@ -209,13 +211,11 @@ Now create one or more batch sender logic apps that send messages to the batch r To test your batching solution, leave your logic apps running for a few minutes. Soon, you start getting emails in groups of five, all with the same partition key. -Your batch sender logic app runs every minute, generates a random number between one and five, and uses this generated number as the partition key for the target batch where messages are sent. Each time the batch has five items with the same partition key, your batch receiver logic app fires and sends mail for each message. +Your batch sender logic app runs every minute and generates a random number between one and five. The batch sender uses this random number as the partition key for the target batch where you send the messages. Each time the batch has five items with the same partition key, your batch receiver logic app fires and sends mail for each message. > [!IMPORTANT] > When you're done testing, make sure that you disable the `BatchSender` logic app to stop sending messages and avoid overloading your inbox. ## Next steps -* [Batch and send EDI messages](../logic-apps/logic-apps-scenario-edi-send-batch-messages.md) -* [Build on logic app definitions by using JSON](../logic-apps/logic-apps-author-definitions.md) -* [Exception handling and error logging for logic apps](../logic-apps/logic-apps-scenario-error-and-exception-handling.md) +* [Batch and send EDI messages](../logic-apps/logic-apps-scenario-edi-send-batch-messages.md) \ No newline at end of file diff --git a/articles/logic-apps/logic-apps-control-flow-loops.md b/articles/logic-apps/logic-apps-control-flow-loops.md index 130c9ee086b99..8770c415e7400 100644 --- a/articles/logic-apps/logic-apps-control-flow-loops.md +++ b/articles/logic-apps/logic-apps-control-flow-loops.md @@ -293,7 +293,7 @@ The "Until" loop stops execution based on these properties, so make sure that yo * **Count**: This value is the highest number of loops that run before the loop exits. For the default and maximum limits on the number of "Until" loops that a logic app run can have, see [Concurrency, looping, and debatching limits](../logic-apps/logic-apps-limits-and-config.md#looping-debatching-limits). -* **Timeout**: This value is the most amount of time that the loop runs before exiting and is specified in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601). For the default and maximum limits on the **Timeout** value, see [Concurrency, looping, and debatching limits](../logic-apps/logic-apps-limits-and-config.md#looping-debatching-limits). +* **Timeout**: This value is the most amount of time that the "Until" action, including all the loops, runs before exiting and is specified in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601). For the default and maximum limits on the **Timeout** value, see [Concurrency, looping, and debatching limits](../logic-apps/logic-apps-limits-and-config.md#looping-debatching-limits). The timeout value is evaluated for each loop cycle. If any action in the loop takes longer than the timeout limit, the current cycle doesn't stop. However, the next cycle doesn't start because the limit condition isn't met. diff --git a/articles/logic-apps/logic-apps-create-logic-apps-from-templates.md b/articles/logic-apps/logic-apps-create-logic-apps-from-templates.md index 55d31613a92b8..92994b48d00c0 100644 --- a/articles/logic-apps/logic-apps-create-logic-apps-from-templates.md +++ b/articles/logic-apps/logic-apps-create-logic-apps-from-templates.md @@ -10,6 +10,8 @@ ms.date: 10/15/2017 # Create logic app workflows from prebuilt templates +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + To get you started creating workflows more quickly, Logic Apps provides templates, which are prebuilt logic apps that follow commonly used patterns. diff --git a/articles/logic-apps/logic-apps-diagnosing-failures.md b/articles/logic-apps/logic-apps-diagnosing-failures.md index 2ddbcf2ba9e99..e44b28dc2cf59 100644 --- a/articles/logic-apps/logic-apps-diagnosing-failures.md +++ b/articles/logic-apps/logic-apps-diagnosing-failures.md @@ -5,78 +5,199 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 01/31/2020 +ms.date: 05/24/2022 --- # Troubleshoot and diagnose workflow failures in Azure Logic Apps -Your logic app generates information that can help you diagnose and debug problems in your app. You can diagnose a logic app by reviewing each step in the workflow through the Azure portal. Or, you can add some steps to a workflow for runtime debugging. +Your logic app workflow generates information that can help you diagnose and debug problems in your app. You can diagnose your workflow by reviewing the inputs, outputs, and other information for each step in the workflow using the Azure portal. Or, you can add some steps to a workflow for runtime debugging. ## Check trigger history -Each logic app run starts with a trigger attempt, so if the trigger doesn't fire, follow these steps: +Each workflow run starts with a trigger, which either fires on a schedule or waits for an incoming request or event. The trigger history lists all the trigger attempts that your workflow made and information about the inputs and outputs for each trigger attempt. If the trigger doesn't fire, try the following steps. -1. Check the trigger's status by [checking the trigger history](../logic-apps/monitor-logic-apps.md#review-trigger-history). To view more information about the trigger attempt, select that trigger event, for example: +### [Consumption](#tab/consumption) - ![View trigger status and history](./media/logic-apps-diagnosing-failures/logic-app-trigger-history.png) +1. To check the trigger's status in your Consumption logic app, [review the trigger history](monitor-logic-apps.md#review-trigger-history). To view more information about the trigger attempt, select that trigger event, for example: -1. Check the trigger's inputs to confirm that they appear as you expect. Under **Inputs link**, select the link, which shows the **Inputs** pane. + ![Screenshot showing Azure portal with Consumption logic app workflow trigger history.](./media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png) + +1. Check the trigger's inputs to confirm that they appear as you expect. On the **History** pane, under **Inputs link**, select the link, which shows the **Inputs** pane. + + Trigger inputs include the data that the trigger expects and requires to start the workflow. Reviewing these inputs can help you determine whether the trigger inputs are correct and whether the condition was met so that the workflow can continue. + + ![Screenshot showing Consumption logic app workflow trigger inputs.](./media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png) + +1. Check the triggers outputs, if any, to confirm that they appear as you expect. On the **History** pane, under **Outputs link**, select the link, which shows the **Outputs** pane. + + Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow. + + For example, an error message states that the RSS feed wasn't found: + + ![Screenshot showing Consumption logic app workflow trigger outputs.](./media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png) + + > [!TIP] + > + > If you find any content that you don't recognize, learn more about + > [different content types](../logic-apps/logic-apps-content-type.md) in Azure Logic Apps. + +### [Standard](#tab/standard) + +1. To check the trigger's status in your Standard logic app, [review the trigger history](monitor-logic-apps.md#review-trigger-history). To view more information about the trigger attempt, select that trigger event, for example: + + ![Screenshot showing Azure portal with Standard logic app workflow trigger history.](./media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png) + +1. Check the trigger's inputs to confirm that they appear as you expect. On the **History** pane, under **Inputs link**, select the link, which shows the **Inputs** pane. Trigger inputs include the data that the trigger expects and requires to start the workflow. Reviewing these inputs can help you determine whether the trigger inputs are correct and whether the condition was met so that the workflow can continue. - For example, the `feedUrl` property here has an incorrect RSS feed value: + ![Screenshot showing Standard logic app workflow trigger inputs.](./media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png) - ![Review trigger inputs for errors](./media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png) +1. Check the triggers outputs, if any, to confirm that they appear as you expect. On the **History** pane, under **Outputs link**, select the link, which shows the **Outputs** pane. -1. Check the triggers outputs, if any, to confirm that they appear as you expect. Under **Outputs link**, select the link, which shows the **Outputs** pane. + Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow. - Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow, for example: + For example, an error message states that the RSS feed wasn't found: - ![Review trigger outputs for errors](./media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png) + ![Screenshot showing Standard logic app workflow trigger outputs.](./media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png) > [!TIP] + > > If you find any content that you don't recognize, learn more about > [different content types](../logic-apps/logic-apps-content-type.md) in Azure Logic Apps. +--- + -## Check runs history +## Check workflow run history + +Each time that the trigger fires, Azure Logic Apps creates a workflow instance and runs that instance. If a run fails, try the following steps so you can review what happened during that run. You can review the status, inputs, and outputs for each step in the workflow. + +### [Consumption](#tab/consumption) + +1. To check the workflow's run status in your Consumption logic app, [review the runs history](monitor-logic-apps.md#review-runs-history). To view more information about a failed run, including all the steps in that run in their status, select the failed run. + + ![Screenshot showing Azure portal with Consumption logic app workflow runs and a failed run selected.](./media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png) + +1. After all the steps in the run appear, select each step to expand their shapes. + + ![Screenshot showing Consumption logic app workflow with failed step selected.](./media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png) -Each time that the trigger fires for an item or event, the Logic Apps engine creates and runs a separate workflow instance for each item or event. If a run fails, follow these steps to review what happened during that run, including the status for each step in the workflow plus the inputs and outputs for each step. +1. Review the inputs, outputs, and any error messages for the failed step. -1. Check the workflow's run status by [checking the runs history](../logic-apps/monitor-logic-apps.md#review-runs-history). To view more information about a failed run, including all the steps in that run in their status, select the failed run. + ![Screenshot showing Consumption logic app workflow with failed step details.](./media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png) - ![View run history and select failed run](./media/logic-apps-diagnosing-failures/logic-app-runs-history.png) + For example, the following screenshot shows the outputs from the failed RSS action. -1. After all the steps in the run appear, expand the first failed step. + ![Screenshot showing Consumption logic app workflow with failed step outputs.](./media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png) - ![Expand first failed step](./media/logic-apps-diagnosing-failures/logic-app-run-pane.png) +### [Standard](#tab/standard) -1. Check the failed step's inputs to confirm whether they appear as you expect. +1. To check the workflow's run status in your Standard logic app, [review the runs history](monitor-logic-apps.md#review-runs-history). To view more information about a failed run, including all the steps in that run in their status, select the failed run. -1. Review the details for each step in a specific run. Under **Runs history**, select the run that you want to examine. + ![Screenshot showing Azure portal with Standard logic app workflow runs and a failed run selected.](./media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png) - ![Review runs history](./media/logic-apps-diagnosing-failures/logic-app-runs-history.png) +1. After all the steps in the run appear, select each step to review their details. - ![View details for a logic app run](./media/logic-apps-diagnosing-failures/logic-app-run-details.png) + ![Screenshot showing Standard logic app workflow with failed step selected.](./media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png) -1. To examine the inputs, outputs, and any error messages for a specific step, choose that step so that the shape expands and shows the details. For example: +1. Review the inputs, outputs, and any error messages for the failed step. - ![View step details](./media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png) + ![Screenshot showing Standard logic app workflow with failed step inputs.](./media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png) + + For example, the following screenshot shows the outputs from the failed RSS action. + + ![Screenshot showing Standard logic app workflow with failed step outputs.](./media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png) + +--- ## Perform runtime debugging -To help with debugging, you can add diagnostic steps to a logic app workflow, along with reviewing the trigger and runs history. For example, you can add steps that use the [Webhook Tester](https://webhook.site/) service so that you can inspect HTTP requests and determine their exact size, shape, and format. +To help with debugging, you can add diagnostic steps to a logic app workflow, along with reviewing the trigger and runs history. For example, you can add steps that use the [Webhook Tester](https://webhook.site/) service, so you can inspect HTTP requests and determine their exact size, shape, and format. -1. Go to the [Webhook Tester](https://webhook.site/) site and copy the generated unique URL. +1. In a browser, go to the [Webhook Tester](https://webhook.site/) site, and copy the generated unique URL. -1. In your logic app, add an HTTP POST action plus the body content that you want to test, for example, an expression or another step output. +1. In your logic app, add an HTTP POST action with the body content that you want to test, for example, an expression or another step output. 1. Paste your URL from Webhook Tester into the HTTP POST action. -1. To review how a request is formed when generated from the Logic Apps engine, run the logic app, and revisit the Webhook Tester site for more details. +1. To review how Azure Logic Apps generates and forms a request, run the logic app workflow. You can then revisit the Webhook Tester site for more information. + +## Common problems - Standard logic apps + +### Inaccessible artifacts in Azure storage account + +Standard logic apps store all artifacts in an Azure storage account. You might get the following errors if these artifacts aren't accessible. For example, the storage account itself might not be accessible, or the storage account is behind a firewall but no private endpoint is set up for the storage services to use. + +| Azure portal location | Error | +|-----------------------|-------| +| Overview pane | - **System.private.corelib:Access to the path 'C:\\home\\site\\wwwroot\\hostj.son is denied**

                  - **Azure.Storage.Blobs: This request is not authorized to perform this operation** | +| Workflows pane | - **Cannot reach host runtime. Error details, Code: 'BadRequest', Message: 'Encountered an error (InternalServerError) from host runtime.'**

                  - **Cannot reach host runtime. Error details, Code: 'BadRequest', Message: 'Encountered an error (ServiceUnavailable) from host runtime.'**

                  - **Cannot reach host runtime. Error details, Code: 'BadRequest', Message: 'Encountered an error (BadGateway) from host runtime.'** | +| During workflow creation and execution | - **Failed to save workflow**

                  - **Error in the designer: GetCallFailed. Failed fetching operations**

                  - **ajaxExtended call failed** | +||| + +### Troubleshooting options + +The following list includes possible causes for these errors and steps to help troubleshoot. + +* For a public storage account, check access to the storage account in the following ways: + + * Check the storage account's connectivity using [Azure Storage Explorer](../vs-azure-tools-storage-manage-with-storage-explorer.md). + + * In your logic app resource's app settings, confirm the storage account's connection string in the app settings, **AzureWebJobsStorage** and **WEBSITE_CONTENTAZUREFILECONNECTIONSTRING**. For more information, review [Host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md#manage-app-settings). + + If connectivity fails, check whether the Shared Access Signature (SAS) key in the connection string is the most recent. + +* For a storage account that's behind a firewall, check access to the storage account in the following ways: + + * If firewall restrictions are enabled on the storage account, check whether [private endpoints](../private-link/private-endpoint-overview.md) are set up for Blob, File, Table, and Queue storage services. + + * Check the storage account's connectivity using [Azure Storage Explorer](../vs-azure-tools-storage-manage-with-storage-explorer.md). + + If you find connectivity problems, continue with the following steps: + + 1. In the same virtual network that's integrated with your logic app, create an Azure virtual machine, which you can put in a different subnet. + + 1. From a command prompt, run **nslookup** to check that the Blob, File, Table, and Queue storage services resolve to the expected IP addresses. + + Syntax: `nslookup [StorageaccountHostName] [OptionalDNSServer]` + + Blob: `nslookup {StorageaccountName}.blob.core.windows.net` + + File: `nslookup {StorageaccountName}.file.core.windows.net` + + Table: `nslookup {StorageaccountName}.table.core.windows.net` + + Queue: `nslookup {StorageaccountName}.queue.core.windows.net` + + * If the storage service has a [Service Endpoint](../virtual-network/virtual-network-service-endpoints-overview.md), the service resolves to a public IP address. + + * If the storage service has a [private endpoint](../private-link/private-endpoint-overview.md), the service resolves to the respective network interface controller (NIC) private IP addresses. + + 1. If the previous domain name server (DNS) queries resolve successfully, run the **psping** or **tcpping** commands to check connectivity to the storage account over port 443: + + Syntax: `psping [StorageaccountHostName] [Port] [OptionalDNSServer]` + + Blob: `psping {StorageaccountName}.blob.core.windows.net:443` + + File: `psping {StorageaccountName}.file.core.windows.net:443` + + Table: `psping {StorageaccountName}.table.core.windows.net:443` + + Queue: `psping {StorageaccountName}.queue.core.windows.net:443` + + 1. If each storage service is resolvable from your Azure virtual machine, find the DNS that's used by the virtual machine for resolution. + + 1. Set your logic app's **WEBSITE_DNS_SERVER** app setting to the DNS, and confirm that the DNS works successfully. + + 1. Confirm that VNet integration is set up correctly with appropriate virtual network and subnet in your Standard logic app. + + 1. If you use [private Azure DNS zones](../dns/private-dns-privatednszone.md) for your storage account's private endpoint services, check that a [virtual network link](../dns/private-dns-virtual-network-links.md) has been created to your logic app's integrated virtual network. + +For more information, review [Deploy Standard logic app to a storage account behind a firewall using service or private endpoints](https://techcommunity.microsoft.com/t5/integrations-on-azure-blog/deploying-standard-logic-app-to-storage-account-behind-firewall/ba-p/2626286). ## Next steps diff --git a/articles/logic-apps/logic-apps-enterprise-integration-certificates.md b/articles/logic-apps/logic-apps-enterprise-integration-certificates.md index 432eddc240eaf..a838e6c90d8e8 100644 --- a/articles/logic-apps/logic-apps-enterprise-integration-certificates.md +++ b/articles/logic-apps/logic-apps-enterprise-integration-certificates.md @@ -24,7 +24,7 @@ You can use the following certificate types in your workflows: * [Public certificates](https://en.wikipedia.org/wiki/Public_key_certificate), which you must purchase from a public internet [certificate authority (CA)](https://en.wikipedia.org/wiki/Certificate_authority). These certificates don't require any keys. -* Private certificates or [*self-signed certificates*](https://en.wikipedia.org/wiki/Self-signed_certificate), which you create and issue yourself. However, these certificates require private keys. +* Private certificates or [*self-signed certificates*](https://en.wikipedia.org/wiki/Self-signed_certificate), which you create and issue yourself. However, these certificates require [private keys in an Azure key vault](#prerequisites). If you're new to logic apps, review [What is Azure Logic Apps](logic-apps-overview.md)? For more information about B2B enterprise integration, review [B2B enterprise integration workflows with Azure Logic Apps and Enterprise Integration Pack](logic-apps-enterprise-integration-overview.md). @@ -58,7 +58,7 @@ If you're new to logic apps, review [What is Azure Logic Apps](logic-apps-overvi [!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] - * [Add a corresponding public certificate](#add-public-certificate) to your key vault. This certificate appears in your [agreement's **Send** and **Receive** settings for signing and encrypting messages](logic-apps-enterprise-integration-agreements.md). For example, review [Reference for AS2 messages settings in Azure Logic Apps](logic-apps-enterprise-integration-as2-message-settings.md). + * [Add the corresponding public certificate](#add-public-certificate) to your key vault. This certificate appears in your [agreement's **Send** and **Receive** settings for signing and encrypting messages](logic-apps-enterprise-integration-agreements.md). For example, review [Reference for AS2 messages settings in Azure Logic Apps](logic-apps-enterprise-integration-as2-message-settings.md). * At least two [trading partners](logic-apps-enterprise-integration-partners.md) and an [agreement between those partners](logic-apps-enterprise-integration-agreements.md) in your integration account. An agreement requires a host partner and a guest partner. Also, an agreement requires that both partners use the same or compatible *business identity* qualifier that's appropriate for an AS2, X12, EDIFACT, or RosettaNet agreement. @@ -66,7 +66,7 @@ If you're new to logic apps, review [What is Azure Logic Apps](logic-apps-overvi -## Add a public certificate +## Use a public certificate To use a *public certificate* in your workflow, you have to first add the certificate to your integration account. @@ -84,7 +84,7 @@ To use a *public certificate* in your workflow, you have to first add the certif |----------|----------|-------|-------------| | **Name** | Yes | <*certificate-name*> | Your certificate's name, which is `publicCert` in this example | | **Certificate Type** | Yes | **Public** | Your certificate's type | - | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. | + | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. Select the certificate that you want to use. | ||||| ![Screenshot showing the Azure portal and integration account with "Add" selected and the "Add Certificate" pane with public certificate details.](media/logic-apps-enterprise-integration-certificates/public-certificate-details.png) @@ -95,11 +95,11 @@ To use a *public certificate* in your workflow, you have to first add the certif ![Screenshot showing the Azure portal and integration account with the public certificate in the "Certificates" list.](media/logic-apps-enterprise-integration-certificates/new-public-certificate.png) - + -## Add a private certificate +## Use a private certificate -To use a *private certificate* in your workflow, you have to first add the certificate to your integration account. Make sure that you've also met the [prerequisites private certificates](#prerequisites). +To use a *private certificate* in your workflow, you have to first meet the [prerequisites for private keys](#prerequisites), and add a public certificate to your integration account. 1. In the [Azure portal](https://portal.azure.com) search box, enter `integration accounts`, and select **Integration accounts**. @@ -115,7 +115,7 @@ To use a *private certificate* in your workflow, you have to first add the certi |----------|----------|-------|-------------| | **Name** | Yes | <*certificate-name*> | Your certificate's name, which is `privateCert` in this example | | **Certificate Type** | Yes | **Private** | Your certificate's type | - | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. In the key vault that contains your private key, the file you add there is the public certificate. | + | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. Select the public certificate that corresponds to the private key that's stored in your key vault. | | **Resource Group** | Yes | <*integration-account-resource-group*> | Your integration account's resource group, which is `Integration-Account-RG` in this example | | **Key Vault** | Yes | <*key-vault-name*> | Your key vault name | | **Key name** | Yes | <*key-name*> | Your key name | diff --git a/articles/logic-apps/logic-apps-enterprise-integration-liquid-transform.md b/articles/logic-apps/logic-apps-enterprise-integration-liquid-transform.md index aab8fb83319ab..5a6ce46d8e064 100644 --- a/articles/logic-apps/logic-apps-enterprise-integration-liquid-transform.md +++ b/articles/logic-apps/logic-apps-enterprise-integration-liquid-transform.md @@ -162,7 +162,7 @@ This article shows you how to complete these tasks: * If your template uses [Liquid filters](https://shopify.github.io/liquid/basics/introduction/#filters), make sure that you follow the [DotLiquid and C# naming conventions](https://github.com/dotliquid/dotliquid/wiki/DotLiquid-for-Designers#filter-and-output-casing), which use *sentence casing*. For all Liquid transforms, make sure that filter names in your template also use sentence casing. Otherwise, the filters won't work. - For example, when you use the `replace` filter, use `Replace`, not `replace`. The same rule applies if you try out examples at [DotLiquid online](http://dotliquidmarkup.org/try-online). For more information, see [Shopify Liquid filters](https://shopify.dev/docs/themes/liquid/reference/filters) and [DotLiquid Liquid filters](https://github.com/dotliquid/dotliquid/wiki/DotLiquid-for-Developers#create-your-own-filters). The Shopify specification includes examples for each filter, so for comparison, you can try these examples at [DotLiquid - Try online](http://dotliquidmarkup.org/try-online). + For example, when you use the `replace` filter, use `Replace`, not `replace`. The same rule applies if you try out examples at [DotLiquid online](http://dotliquidmarkup.org/TryOnline). For more information, see [Shopify Liquid filters](https://shopify.dev/docs/themes/liquid/reference/filters) and [DotLiquid Liquid filters](https://github.com/dotliquid/dotliquid/wiki/DotLiquid-for-Developers#create-your-own-filters). The Shopify specification includes examples for each filter, so for comparison, you can try these examples at [DotLiquid - Try online](http://dotliquidmarkup.org/TryOnline). * The `json` filter from the Shopify extension filters is currently [not implemented in DotLiquid](https://github.com/dotliquid/dotliquid/issues/384). Typically, you can use this filter to prepare text output for JSON string parsing, but instead, you need to use the `Replace` filter instead. @@ -254,7 +254,7 @@ Here are the sample inputs and outputs: * [Shopify Liquid language and examples](https://shopify.github.io/liquid/basics/introduction/) * [DotLiquid](http://dotliquidmarkup.org/) -* [DotLiquid - Try online](http://dotliquidmarkup.org/try-online) +* [DotLiquid - Try online](http://dotliquidmarkup.org/TryOnline) * [DotLiquid GitHub](https://github.com/dotliquid/dotliquid) * [DotLiquid GitHub issues](https://github.com/dotliquid/dotliquid/issues/) * Learn more about [maps](../logic-apps/logic-apps-enterprise-integration-maps.md) diff --git a/articles/logic-apps/logic-apps-examples-and-scenarios.md b/articles/logic-apps/logic-apps-examples-and-scenarios.md index 1f8d5626d7870..db85a8a91cfc5 100644 --- a/articles/logic-apps/logic-apps-examples-and-scenarios.md +++ b/articles/logic-apps/logic-apps-examples-and-scenarios.md @@ -67,7 +67,6 @@ and [switch statements](../logic-apps/logic-apps-control-flow-switch-statement.m * [Repeat steps or process items in arrays and collections with loops](../logic-apps/logic-apps-control-flow-loops.md) * [Group actions together with scopes](../logic-apps/logic-apps-control-flow-run-steps-group-scopes.md) * [Add error and exception handling to a workflow](../logic-apps/logic-apps-exception-handling.md) -* [Use case: How a healthcare company uses logic app exception handling for HL7 FHIR workflows](../logic-apps/logic-apps-scenario-error-and-exception-handling.md) ## Create custom APIs and connectors diff --git a/articles/logic-apps/logic-apps-exception-handling.md b/articles/logic-apps/logic-apps-exception-handling.md index 602417237e904..6e4de6a31663e 100644 --- a/articles/logic-apps/logic-apps-exception-handling.md +++ b/articles/logic-apps/logic-apps-exception-handling.md @@ -5,89 +5,101 @@ services: logic-apps ms.suite: integration author: dereklee ms.author: deli -ms.reviewer: estfan, azla +ms.reviewer: estfan, laveeshb, azla ms.topic: how-to -ms.date: 02/18/2021 +ms.date: 05/26/2022 --- # Handle errors and exceptions in Azure Logic Apps -The way that any integration architecture appropriately handles downtime or issues caused by dependent systems can pose a challenge. To help you create robust and resilient integrations that gracefully handle problems and failures, Logic Apps provides a first-class experience for handling errors and exceptions. +The way that any integration architecture appropriately handles downtime or issues caused by dependent systems can pose a challenge. To help you create robust and resilient integrations that gracefully handle problems and failures, Azure Logic Apps provides a first-class experience for handling errors and exceptions. ## Retry policies -For the most basic exception and error handling, you can use a *retry policy* in any action or trigger where supported, for example, see [HTTP action](../logic-apps/logic-apps-workflow-actions-triggers.md#http-trigger). A retry policy specifies whether and how the action or trigger retries a request when the original request times out or fails, which is any request that results in a 408, 429, or 5xx response. If no other retry policy is used, the default policy is used. +For the most basic exception and error handling, you can use the *retry policy* when supported on a trigger or action, such as the [HTTP action](logic-apps-workflow-actions-triggers.md#http-trigger). If the trigger or action's original request times out or fails, resulting in a 408, 429, or 5xx response, the retry policy specifies that the trigger or action resend the request per policy settings. -Here are the retry policy types: +### Retry policy types -| Type | Description | -|------|-------------| -| **Default** | This policy sends up to four retries at *exponentially increasing* intervals, which scale by 7.5 seconds but are capped between 5 and 45 seconds. | -| **Exponential interval** | This policy waits a random interval selected from an exponentially growing range before sending the next request. | -| **Fixed interval** | This policy waits the specified interval before sending the next request. | -| **None** | Don't resend the request. | +By default, the retry policy is set to the **Default** type. + +| Retry policy | Description | +|--------------|-------------| +| **Default** | This policy sends up to 4 retries at *exponentially increasing* intervals, which scale by 7.5 seconds but are capped between 5 and 45 seconds. For more information, review the [Default](#default) policy type. | +| **None** | Don't resend the request. For more information, review the [None](#none) policy type. | +| **Exponential Interval** | This policy waits a random interval, which is selected from an exponentially growing range before sending the next request. For more information, review the [Exponential Interval](#exponential-interval) policy type. | +| **Fixed Interval** | This policy waits the specified interval before sending the next request. For more information, review the [Fixed Interval](#fixed-interval) policy type. | ||| -For information about retry policy limits, see [Logic Apps limits and configuration](../logic-apps/logic-apps-limits-and-config.md#http-limits). + -### Change retry policy +### Retry policy limits -To select a different retry policy, follow these steps: +For more information about retry policies, settings, limits, and other options, review [Retry policy limits](logic-apps-limits-and-config.md#retry-policy-limits). -1. Open your logic app in Logic App Designer. +### Change retry policy type in the designer -1. Open the **Settings** for an action or trigger. +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. -1. If the action or trigger supports retry policies, under **Retry Policy**, select the type you want. +1. Based on your [logic app type](logic-apps-overview.md#resource-environment-differences), open the trigger or action's **Settings**. -Or, you can manually specify the retry policy in the `inputs` section for an action or trigger that supports retry policies. If you don't specify a retry policy, the action uses the default policy. + * **Consumption**: On the action shape, open the ellipses menu (**...**), and select **Settings**. -```json -"": { - "type": "", + * **Standard**: On the designer, select the action. On the details pane, select **Settings**. + +1. If the trigger or action supports retry policies, under **Retry Policy**, select the policy type that you want. + +### Change retry policy type in the code view editor + +1. If necessary, confirm whether the trigger or action supports retry policies by completing the earlier steps in the designer. + +1. Open your logic app workflow in the code view editor. + +1. In the trigger or action definition, add the `retryPolicy` JSON object to that trigger or action's `inputs` object. Otherwise, if no `retryPolicy` object exists, the trigger or action uses the `default` retry policy. + + ```json "inputs": { - "", + <...>, "retryPolicy": { "type": "", - "interval": "", + // The following properties apply to specific retry policies. "count": , - "minimumInterval": "", - "maximumInterval": "" + "interval": "", + "maximumInterval": "", + "minimumInterval": "" }, - "" + <...> }, "runAfter": {} -} -``` + ``` -*Required* + *Required* -| Value | Type | Description | -|-------|------|-------------| -| <*retry-policy-type*> | String | The retry policy type you want to use: `default`, `none`, `fixed`, or `exponential` | -| <*retry-interval*> | String | The retry interval where the value must use [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). The default minimum interval is `PT5S` and the maximum interval is `PT1D`. When you use the exponential interval policy, you can specify different minimum and maximum values. | -| <*retry-attempts*> | Integer | The number of retry attempts, which must be between 1 and 90 | -|||| + | Property | Value | Type | Description | + |----------|-------|------|-------------| + | `type` | <*retry-policy-type*> | String | The retry policy type to use: `default`, `none`, `fixed`, or `exponential` | + | `count` | <*retry-attempts*> | Integer | For `fixed` and `exponential` policy types, the number of retry attempts, which is a value from 1 - 90. For more information, review [Fixed Interval](#fixed-interval) and [Exponential Interval](#exponential-interval). | + | `interval`| <*retry-interval*> | String | For `fixed` and `exponential` policy types, the retry interval value in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). For the `exponential` policy, you can also specify [optional maximum and minimum intervals](#optional-max-min-intervals). For more information, review [Fixed Interval](#fixed-interval) and [Exponential Interval](#exponential-interval).

                  **Consumption**: 5 seconds (`PT5S`) to 1 day (`P1D`).
                  **Standard**: For stateful workflows, 5 seconds (`PT5S`) to 1 day (`P1D`). For stateless workflows, 1 second (`PT1S`) to 1 minute (`PT1M`). | + ||||| -*Optional* + -| Value | Type | Description | -|-------|------|-------------| -| <*minimum-interval*> | String | For the exponential interval policy, the smallest interval for the randomly selected interval in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations) | -| <*maximum-interval*> | String | For the exponential interval policy, the largest interval for the randomly selected interval in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations) | -|||| + *Optional* -Here is more information about the different policy types. + | Property | Value | Type | Description | + |----------|-------|------|-------------| + | `maximumInterval` | <*maximum-interval*> | String | For the `exponential` policy, the largest interval for the randomly selected interval in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). The default value is 1 day (`P1D`). For more information, review [Exponential Interval](#exponential-interval). | + | `minimumInterval` | <*minimum-interval*> | String | For the `exponential` policy, the smallest interval for the randomly selected interval in [ISO 8601 format](https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). The default value is 5 seconds (`PT5S`). For more information, review [Exponential Interval](#exponential-interval). | + ||||| - + -### Default +#### Default retry policy -If you don't specify a retry policy, the action uses the default policy, which is actually an [exponential interval policy](#exponential-interval) that sends up to four retries at exponentially increasing intervals that are scaled by 7.5 seconds. The interval is capped between 5 and 45 seconds. +If you don't specify a retry policy, the action uses the default policy. The default is actually an [exponential interval policy](#exponential-interval) that sends up to four retries at exponentially increasing intervals, which scales by 7.5 seconds. The interval is capped between 5 and 45 seconds. -Though not explicitly defined in your action or trigger, here is how the default policy behaves in an example HTTP action: +Though not explicitly defined in your action or trigger, the following example shows how the default policy behaves in an example HTTP action: ```json "HTTP": { @@ -107,11 +119,15 @@ Though not explicitly defined in your action or trigger, here is how the default } ``` -### None + + +### None - No retry policy To specify that the action or trigger doesn't retry failed requests, set the <*retry-policy-type*> to `none`. -### Fixed interval + + +### Fixed interval retry policy To specify that the action or trigger waits the specified interval before sending the next request, set the <*retry-policy-type*> to `fixed`. @@ -136,13 +152,19 @@ This retry policy attempts to get the latest news two more times after the first -### Exponential interval +### Exponential interval retry policy + +The exponential interval retry policy specifies that the trigger or action waits a random interval before sending the next request. This random interval is selected from an exponentially growing range. Optionally, you can override the default minimum and maximum intervals by specifying your own minimum and maximum intervals, based on whether you have a [Consumption or Standard logic app workflow](logic-apps-overview.md#resource-environment-differences). -To specify that the action or trigger waits a random interval before sending the next request, set the <*retry-policy-type*> to `exponential`. The random interval is selected from an exponentially growing range. Optionally, you can also override the default minimum and maximum intervals by specifying your own minimum and maximum intervals. +| Name | Consumption limit | Standard limit | Notes | +|------|-------------------|----------------|-------| +| Maximum delay | Default: 1 day | Default: 1 hour | To change the default limit in a Consumption logic app workflow, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in a Standard logic app workflow, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | +| Minimum delay | Default: 5 sec | Default: 5 sec | To change the default limit in a Consumption logic app workflow, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in a Standard logic app workflow, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | +||||| **Random variable ranges** -This table shows how Logic Apps generates a uniform random variable in the specified range for each retry up to and including the number of retries: +For the exponential interval retry policy, the following table shows the general algorithm that Azure Logic Apps uses to generate a uniform random variable in the specified range for each retry. The specified range can be up to and including the number of retries. | Retry number | Minimum interval | Maximum interval | |--------------|------------------|------------------| @@ -155,117 +177,178 @@ This table shows how Logic Apps generates a uniform random variable in the speci -## Catch and handle failures by changing "run after" behavior +## Manage the "run after" behavior + +When you add actions in the workflow designer, you implicitly declare the order to use for running those actions. After an action finishes running, that action is marked with a status such as **Succeeded**, **Failed**, **Skipped**, or **TimedOut**. By default, an action that you add in the designer runs only after the predecessor completes with **Succeeded** status. In an action's underlying definition, the `runAfter` property specifies that the predecessor action that must first finish and the statuses permitted for that predecessor before the successor action can run. + +When an action throws an unhandled error or exception, the action is marked **Failed**, and any successor action is marked **Skipped**. If this behavior happens for an action that has parallel branches, the Azure Logic Apps engine follows the other branches to determine their completion statuses. For example, if a branch ends with a **Skipped** action, that branch's completion status is based on that skipped action's predecessor status. After the workflow run completes, the engine determines the entire run's status by evaluating all the branch statuses. If any branch ends in failure, the entire workflow run is marked **Failed**. + +![Conceptual diagram with examples that show how run statuses are evaluated.](./media/logic-apps-exception-handling/status-evaluation-for-parallel-branches.png) + +To make sure that an action can still run despite its predecessor's status, you can change an action's "run after" behavior to handle the predecessor's unsuccessful statuses. That way, the action runs when the predecessor's status is **Succeeded**, **Failed**, **Skipped**, **TimedOut**, or all these statuses. + +For example, to run the Office 365 Outlook **Send an email** action after the Excel Online **Add a row into a table** predecessor action is marked **Failed**, rather than **Succeeded**, change the "run after" behavior using either the designer or code view editor. + +> [!NOTE] +> +> In the designer, the "run after" setting doesn't apply to the action that immediately +> follows the trigger as the trigger must run successfully before the first action can run. + + + +### Change "run after" behavior in the designer + +### [Consumption](#tab/consumption) + +1. In the [Azure portal](https://portal.azure.com), open the logic app workflow in the designer. + +1. On the action shape, open the ellipses menu (**...**), and select **Configure run after**. + + ![Screenshot showing Consumption workflow designer and current action with ellipses and "Configure run after" selected.](./media/logic-apps-exception-handling/configure-run-after-consumption.png) -When you add actions in the Logic App Designer, you implicitly declare the order to use for running those actions. After an action finishes running, that action is marked with a status such as `Succeeded`, `Failed`, `Skipped`, or `TimedOut`. In each action definition, the `runAfter` property specifies the predecessor action that must first finish and the statuses permitted for that predecessor before the successor action can run. By default, an action that you add in the designer runs only after the predecessor completes with `Succeeded` status. + The action shape expands and shows the predecessor action for the currently selected action. -When an action throws an unhandled error or exception, the action is marked `Failed`, and any successor action is marked `Skipped`. If this behavior happens for an action that has parallel branches, the Logic Apps engine follows the other branches to determine their completion statuses. For example, if a branch ends with a `Skipped` action, that branch's completion status is based on that skipped action's predecessor status. After the logic app run completes, the engine determines the entire run's status by evaluating all the branch statuses. If any branch ends in failure, the entire logic app run is marked `Failed`. + ![Screenshot showing Consumption workflow designer, current action, and "run after" status for predecessor action.](./media/logic-apps-exception-handling/predecessor-action-consumption.png) -![Examples that show how run statuses are evaluated](./media/logic-apps-exception-handling/status-evaluation-for-parallel-branches.png) +1. Expand the predecessor action node to view all the "run after" statuses. -To make sure that an action can still run despite its predecessor's status, [customize an action's "run after" behavior](#customize-run-after) to handle the predecessor's unsuccessful statuses. + By default, the "run after" status is set to **is successful**. So, the predecessor action must run successfully before the currently selected action can run. - + ![Screenshot showing Consumption designer, current action, and default "run after" set to "is successful".](./media/logic-apps-exception-handling/default-run-after-status-consumption.png) -### Customize "run after" behavior +1. Change the "run after" behavior to the status that you want. Make sure that you first select an option before you clear the default option. You have to always have at least one option selected. -You can customize an action's "run after" behavior so that the action runs when the predecessor's status is either `Succeeded`, `Failed`, `Skipped`, `TimedOut`, or any of these statuses. For example, to send an email after the Excel Online `Add_a_row_into_a_table` predecessor action is marked `Failed`, rather than `Succeeded`, change the "run after" behavior by following either step: + The following example selects **has failed**. -* In the design view, select the ellipses (**...**) button, and then select **Configure run after**. + ![Screenshot showing Consumption designer, current action, and "run after" set to "has failed".](./media/logic-apps-exception-handling/failed-run-after-status-consumption.png) - ![Configure "run after" behavior for an action](./media/logic-apps-exception-handling/configure-run-after-property-setting.png) +1. To specify that the current action runs whether the predecessor action is marked as **Failed**, **Skipped**, or **TimedOut**, select the other statuses. - The action shape shows the default status that's required for the predecessor action, which is **Add a row into a table** in this example: + ![Screenshot showing Consumption designer, current action, and multiple "run after" statuses selected.](./media/logic-apps-exception-handling/run-after-multiple-statuses-consumption.png) - ![Default "run after" behavior for an action](./media/logic-apps-exception-handling/change-run-after-property-status.png) +1. When you're ready, select **Done**. - Change the "run after" behavior to the status that you want, which is **has failed** in this example: +### [Standard](#tab/standard) - ![Change "run after" behavior to "has failed"](./media/logic-apps-exception-handling/run-after-property-status-set-to-failed.png) +1. In the [Azure portal](https://portal.azure.com), open the logic app workflow in the designer. - To specify that the action runs whether the predecessor action is marked as `Failed`, `Skipped` or `TimedOut`, select the other statuses: +1. On the designer, select the action shape. On the details pane, select **Run After**. - ![Change "run after" behavior to have any other status](./media/logic-apps-exception-handling/run-after-property-multiple-statuses.png) + ![Screenshot showing Standard workflow designer and current action details pane with "Run After" selected.](./media/logic-apps-exception-handling/configure-run-after-standard.png) -* In code view, in the action's JSON definition, edit the `runAfter` property, which follows this syntax: + The **Run After** pane shows the predecessor action for the currently selected action. - ```json - "": { - "inputs": { - "" - }, - "runAfter": { - "": [ - "Succeeded" - ] - }, - "type": "" - } - ``` + ![Screenshot showing Standard designer, current action, and "run after" status for predecessor action.](./media/logic-apps-exception-handling/predecessor-action-standard.png) - For this example, change the `runAfter` property from `Succeeded` to `Failed`: +1. Expand the predecessor action node to view all the "run after" statuses. - ```json - "Send_an_email_(V2)": { - "inputs": { - "body": { - "Body": "

                  Failed to add row to  @{body('Add_a_row_into_a_table')?['Terms']}

                  ",, - "Subject": "Add row to table failed: @{body('Add_a_row_into_a_table')?['Terms']}", - "To": "Sophia.Owen@fabrikam.com" - }, - "host": { - "connection": { - "name": "@parameters('$connections')['office365']['connectionId']" - } - }, - "method": "post", - "path": "/v2/Mail" - }, - "runAfter": { - "Add_a_row_into_a_table": [ - "Failed" - ] - }, - "type": "ApiConnection" - } - ``` + By default, the "run after" status is set to **is successful**. So, the predecessor action must run successfully before the currently selected action can run. - To specify that the action runs whether the predecessor action is marked as `Failed`, `Skipped` or `TimedOut`, add the other statuses: + ![Screenshot showing Standard designer, current action, and default "run after" set to "is successful".](./media/logic-apps-exception-handling/change-run-after-status-standard.png) - ```json - "runAfter": { - "Add_a_row_into_a_table": [ - "Failed", "Skipped", "TimedOut" - ] - }, - ``` +1. Change the "run after" behavior to the status that you want. Make sure that you first select an option before you clear the default option. You have to always have at least one option selected. + + The following example selects **has failed**. + + ![Screenshot showing Standard designer, current action, and "run after" set to "has failed".](./media/logic-apps-exception-handling/failed-run-after-status-standard.png) + +1. To specify that the current action runs whether the predecessor action is marked as **Failed**, **Skipped**, or **TimedOut**, select the other statuses. + + ![Screenshot showing Standard designer, current action, and multiple "run after" statuses selected.](./media/logic-apps-exception-handling/run-after-multiple-statuses-standard.png) + +1. To require that more than one predecessor action runs, each with their own "run after" statuses, expand the **Select actions** list. Select the predecessor actions that you want, and specify their required "run after" statuses. + + ![Screenshot showing Standard designer, current action, and multiple predecessor actions available.](./media/logic-apps-exception-handling/multiple-predecessor-actions-standard.png) + +1. When you're ready, select **Done**. + +--- + +### Change "run after" behavior in the code view editor + +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the code view editor. + +1. In the action's JSON definition, edit the `runAfter` property, which has the following syntax: + + ```json + "": { + "inputs": { + "" + }, + "runAfter": { + "": [ + "Succeeded" + ] + }, + "type": "" + } + ``` + +1. For this example, change the `runAfter` property from `Succeeded` to `Failed`: + + ```json + "Send_an_email_(V2)": { + "inputs": { + "body": { + "Body": "

                  Failed to add row to table: @{body('Add_a_row_into_a_table')?['Terms']}

                  ", + "Subject": "Add row to table failed: @{body('Add_a_row_into_a_table')?['Terms']}", + "To": "Sophia.Owen@fabrikam.com" + }, + "host": { + "connection": { + "name": "@parameters('$connections')['office365']['connectionId']" + } + }, + "method": "post", + "path": "/v2/Mail" + }, + "runAfter": { + "Add_a_row_into_a_table": [ + "Failed" + ] + }, + "type": "ApiConnection" + } + ``` + +1. To specify that the action runs whether the predecessor action is marked as `Failed`, `Skipped` or `TimedOut`, add the other statuses: + + ```json + "runAfter": { + "Add_a_row_into_a_table": [ + "Failed", "Skipped", "TimedOut" + ] + }, + ``` ## Evaluate actions with scopes and their results -Similar to running steps after individual actions with the `runAfter` property, you can group actions together inside a [scope](../logic-apps/logic-apps-control-flow-run-steps-group-scopes.md). You can use scopes when you want to logically group actions together, assess the scope's aggregate status, and perform actions based on that status. After all the actions in a scope finish running, the scope itself gets its own status. +Similar to running steps after individual actions with the "run after" setting, you can group actions together inside a [scope](logic-apps-control-flow-run-steps-group-scopes.md). You can use scopes when you want to logically group actions together, assess the scope's aggregate status, and perform actions based on that status. After all the actions in a scope finish running, the scope itself gets its own status. -To check a scope's status, you can use the same criteria that you use to check a logic app's run status, such as `Succeeded`, `Failed`, and so on. +To check a scope's status, you can use the same criteria that you use to check a workflow run status, such as **Succeeded**, **Failed**, and so on. -By default, when all the scope's actions succeed, the scope's status is marked `Succeeded`. If the final action in a scope results as `Failed` or `Aborted`, the scope's status is marked `Failed`. +By default, when all the scope's actions succeed, the scope's status is marked **Succeeded**. If the final action in a scope is marked **Failed** or **Aborted**, the scope's status is marked **Failed**. -To catch exceptions in a `Failed` scope and run actions that handle those errors, you can use the `runAfter` property for that `Failed` scope. That way, if *any* actions in the scope fail, and you use the `runAfter` property for that scope, you can create a single action to catch failures. +To catch exceptions in a **Failed** scope and run actions that handle those errors, you can use the "run after" setting that **Failed** scope. That way, if *any* actions in the scope fail, and you use the "run after" setting for that scope, you can create a single action to catch failures. -For limits on scopes, see [Limits and config](../logic-apps/logic-apps-limits-and-config.md). +For limits on scopes, see [Limits and config](logic-apps-limits-and-config.md). ### Get context and results for failures -Although catching failures from a scope is useful, you might also want context to help you understand exactly which actions failed plus any errors or status codes that were returned. The [`result()` function](../logic-apps/workflow-definition-language-functions-reference.md#result) returns the results from the top-level actions in a scoped action by accepting a single parameter, which is the scope's name, and returning an array that contains the results from those first-level actions. These action objects include the same attributes as those returned by the `actions()` function, such as the action's start time, end time, status, inputs, correlation IDs, and outputs. +Although catching failures from a scope is useful, you might also want more context to help you learn the exact failed actions plus any errors or status codes. The [`result()` function](workflow-definition-language-functions-reference.md#result) returns the results from the top-level actions in a scoped action. This function accepts the scope's name as a single parameter, and returns an array with the results from those top-level actions. These action objects have the same attributes as the attributes returned by the `actions()` function, such as the action's start time, end time, status, inputs, correlation IDs, and outputs. > [!NOTE] -> The `result()` function returns the results from *only* the first-level actions and not from deeper nested actions such as switch or condition actions. +> +> The `result()` function returns the results *only* from the top-level actions +> and not from deeper nested actions such as switch or condition actions. -To get context about the actions that failed in a scope, you can use the `@result()` expression with the scope's name and the `runAfter` property. To filter down the returned array to actions that have `Failed` status, you can add the [**Filter Array** action](logic-apps-perform-data-operations.md#filter-array-action). To run an action for a returned failed action, take the returned filtered array and use a [**For each** loop](../logic-apps/logic-apps-control-flow-loops.md). +To get context about the actions that failed in a scope, you can use the `@result()` expression with the scope's name and the "run after" setting. To filter down the returned array to actions that have **Failed** status, you can add the [**Filter Array** action](logic-apps-perform-data-operations.md#filter-array-action). To run an action for a returned failed action, take the returned filtered array and use a [**For each** loop](logic-apps-control-flow-loops.md). -Here's an example, followed by a detailed explanation, that sends an HTTP POST request with the response body for any actions that failed within the scope action named "My_Scope": +The following JSON example sends an HTTP POST request with the response body for any actions that failed within the scope action named **My_Scope**. A detailed explanation follows the example. ```json "Filter_array": { @@ -306,11 +389,11 @@ Here's an example, followed by a detailed explanation, that sends an HTTP POST r } ``` -Here's a detailed walkthrough that describes what happens in this example: +The following steps describe what happens in this example: -1. To get the result from all actions inside "My_Scope", the **Filter Array** action uses this filter expression: `@result('My_Scope')` +1. To get the result from all actions inside **My_Scope**, the **Filter Array** action uses this filter expression: `@result('My_Scope')` -1. The condition for **Filter Array** is any `@result()` item that has a status equal to `Failed`. This condition filters the array that has all the action results from "My_Scope" down to an array with only the failed action results. +1. The condition for **Filter Array** is any `@result()` item that has a status equal to `Failed`. This condition filters the array that has all the action results from **My_Scope** down to an array with only the failed action results. 1. Perform a `For_each` loop action on the *filtered array* outputs. This step performs an action for each failed action result that was previously filtered. @@ -358,11 +441,12 @@ To perform different exception handling patterns, you can use the expressions pr ## Set up Azure Monitor logs -The previous patterns are great way to handle errors and exceptions within a run, but you can also identify and respond to errors independent of the run itself. [Azure Monitor](../azure-monitor/overview.md) provides a simple way to send all workflow events, including all run and action statuses, to a [Log Analytics workspace](../azure-monitor/logs/data-platform-logs.md), [Azure storage account](../storage/blobs/storage-blobs-overview.md), or [Azure Event Hubs](../event-hubs/event-hubs-about.md). +The previous patterns are useful ways to handle errors and exceptions that happen within a run. However, you can also identify and respond to errors that happen independently from the run. To evaluate run statuses, you can monitor the logs and metrics for your runs, or publish them into any monitoring tool that you prefer. + +For example, [Azure Monitor](../azure-monitor/overview.md) provides a streamlined way to send all workflow events, including all run and action statuses, to a destination. You can [set up alerts for specific metrics and thresholds in Azure Monitor](monitor-logic-apps.md#set-up-monitoring-alerts). You can also send workflow events to a [Log Analytics workspace](../azure-monitor/logs/data-platform-logs.md) or [Azure storage account](../storage/blobs/storage-blobs-overview.md). Or, you can stream all events through [Azure Event Hubs](../event-hubs/event-hubs-about.md) into [Azure Stream Analytics](https://azure.microsoft.com/services/stream-analytics/). In Stream Analytics, you can write live queries based on any anomalies, averages, or failures from the diagnostic logs. You can use Stream Analytics to send information to other data sources, such as queues, topics, SQL, Azure Cosmos DB, or Power BI. -To evaluate run statuses, you can monitor the logs and metrics, or publish them into any monitoring tool that you prefer. One potential option is to stream all the events through Event Hubs into [Azure Stream Analytics](https://azure.microsoft.com/services/stream-analytics/). In Stream Analytics, you can write live queries based on any anomalies, averages, or failures from the diagnostic logs. You can use Stream Analytics to send information to other data sources, such as queues, topics, SQL, Azure Cosmos DB, or Power BI. +For more information, review [Set up Azure Monitor logs and collect diagnostics data for Azure Logic Apps](monitor-logic-apps-log-analytics.md). ## Next steps -* [See how a customer builds error handling with Azure Logic Apps](../logic-apps/logic-apps-scenario-error-and-exception-handling.md) -* [Find more Logic Apps examples and scenarios](../logic-apps/logic-apps-examples-and-scenarios.md) +* [Learn more about Azure Logic Apps examples and scenarios](logic-apps-examples-and-scenarios.md) diff --git a/articles/logic-apps/logic-apps-limits-and-config.md b/articles/logic-apps/logic-apps-limits-and-config.md index e626fa6b9695f..465e317f40bd5 100644 --- a/articles/logic-apps/logic-apps-limits-and-config.md +++ b/articles/logic-apps/logic-apps-limits-and-config.md @@ -52,7 +52,7 @@ The following table lists the values for a single workflow run: | Name | Multi-tenant | Single-tenant | Integration service environment | Notes | |------|--------------|---------------|---------------------------------|-------| -| Run history retention in storage | 90 days | 90 days
                  (Default) | 366 days | The amount of time to keep a workflow's run history in storage after a run starts.

                  **Note**: If the workflow's run duration exceeds the retention limit, that run is removed from the run history in storage. If a run isn't immediately removed after reaching the retention limit, the run is removed within 7 days.

                  Whether a run completes or times out, run history retention is always calculated by using the run's start time and the current limit specified in the workflow setting, [**Run history retention in days**](#change-retention). No matter the previous limit, the current limit is always used for calculating retention.

                  For more information, review [Change duration and run history retention in storage](#change-retention). | +| Run history retention in storage | 90 days | 90 days
                  (Default) | 366 days | The amount of time to keep a workflow's run history in storage after a run starts.

                  **Note**: If the workflow's run duration exceeds the retention limit, this run is removed from the run history in storage. If a run isn't immediately removed after reaching the retention limit, the run is removed within 7 days.

                  Whether a run completes or times out, run history retention is always calculated by using the run's start time and the current limit specified in the workflow setting, [**Run history retention in days**](#change-retention). No matter the previous limit, the current limit is always used for calculating retention.

                  For more information, review [Change duration and run history retention in storage](#change-retention). | | Run duration | 90 days | - Stateful workflow: 90 days
                  (Default)

                  - Stateless workflow: 5 min
                  (Default) | 366 days | The amount of time that a workflow can continue running before forcing a timeout.

                  The run duration is calculated by using a run's start time and the limit that's specified in the workflow setting, [**Run history retention in days**](#change-duration) at that start time.

                  **Important**: Make sure the run duration value is always less than or equal to the run history retention in storage value. Otherwise, run histories might be deleted before the associated jobs are complete.

                  For more information, review [Change run duration and history retention in storage](#change-duration). | | Recurrence interval | - Min: 1 sec

                  - Max: 500 days | - Min: 1 sec

                  - Max: 500 days | - Min: 1 sec

                  - Max: 500 days || |||||| @@ -250,6 +250,18 @@ For more information about your logic app resource definition, review [Overview: Azure Logic Apps supports write operations, including inserts and updates, through the on-premises data gateway. However, these operations have [limits on their payload size](/data-integration/gateway/service-gateway-onprem#considerations). + + +## Retry policy limits + +The following table lists the retry policy limits for a trigger or action, based on whether you have a [Consumption or Standard logic app workflow](logic-apps-overview.md#resource-environment-differences). + +| Name | Consumption limit | Standard limit | Notes | +|------|-------------------|----------------|-------| +| Retry attempts | - Default: 4 attempts
                  - Max: 90 attempts | - Default: 4 attempts | To change the default limit in Consumption logic app workflows, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies). To change the default limit in Standard logic app workflows, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | +| Retry interval | None | Default: 7 sec | To change the default limit in Consumption logic app workflows, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in Standard logic app workflows, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | +||||| + ## Variables action limits @@ -304,18 +316,6 @@ By default, the HTTP action and APIConnection actions follow the [standard async | Request URL character limit | 16,384 characters | | |||| - - -### Retry policy - -| Name | Multi-tenant limit | Single-tenant limit | Notes | -|------|--------------------|---------------------|-------| -| Retry attempts | - Default: 4 attempts
                  - Max: 90 attempts | - Default: 4 attempts | To change the default limit in the multi-tenant service, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in the single-tenant service, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | -| Retry interval | None | Default: 7 sec | To change the default limit in the multi-tenant service, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in the single-tenant service, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | -| Retry max delay | Default: 1 day | Default: 1 hour | To change the default limit in the multi-tenant service, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in the single-tenant service, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | -| Retry min delay | Default: 5 sec | Default: 5 sec | To change the default limit in the multi-tenant service, use the [retry policy parameter](logic-apps-exception-handling.md#retry-policies).

                  To change the default limit in the single-tenant service, review [Edit host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md). | -||||| - ### Authentication limits @@ -657,7 +657,7 @@ This section lists the outbound IP addresses that Azure Logic Apps requires in y | West Europe | 40.68.222.65, 40.68.209.23, 13.95.147.65, 23.97.218.130, 51.144.182.201, 23.97.211.179, 104.45.9.52, 23.97.210.126, 13.69.71.160, 13.69.71.161, 13.69.71.162, 13.69.71.163, 13.69.71.164, 13.69.71.165, 13.69.71.166, 13.69.71.167, 20.103.21.81, 20.103.17.247, 20.103.17.223, 20.103.16.47, 20.103.58.116, 20.103.57.29, 20.101.174.49, 20.101.174.23, 20.93.236.26, 20.93.235.107, 20.103.94.250, 20.76.174.72, 20.82.87.192, 20.82.87.16, 20.76.170.145, 20.103.91.39, 20.103.84.41, 20.76.161.156 | | West India | 104.211.164.80, 104.211.162.205, 104.211.164.136, 104.211.158.127, 104.211.156.153, 104.211.158.123, 104.211.154.59, 104.211.154.7 | | West US | 52.160.92.112, 40.118.244.241, 40.118.241.243, 157.56.162.53, 157.56.167.147, 104.42.49.145, 40.83.164.80, 104.42.38.32, 13.86.223.0, 13.86.223.1, 13.86.223.2, 13.86.223.3, 13.86.223.4, 13.86.223.5, 104.40.34.169, 104.40.32.148, 52.160.70.221, 52.160.70.105, 13.91.81.221, 13.64.231.196, 13.87.204.182, 40.78.65.193, 13.87.207.39, 104.42.44.28, 40.83.134.97, 40.78.65.112, 168.62.9.74, 168.62.28.191 | -| West US 2 | 13.66.210.167, 52.183.30.169, 52.183.29.132, 13.66.210.167, 13.66.201.169, 13.77.149.159, 52.175.198.132, 13.66.246.219, 20.99.189.158, 20.99.189.70, 20.72.244.58, 20.72.243.225 | +| West US 2 | 13.66.210.167, 52.183.30.169, 52.183.29.132, 13.66.201.169, 13.77.149.159, 52.175.198.132, 13.66.246.219, 20.99.189.158, 20.99.189.70, 20.72.244.58, 20.72.243.225 | | West US 3 | 20.150.181.32, 20.150.181.33, 20.150.181.34, 20.150.181.35, 20.150.181.36, 20.150.181.37, 20.150.181.38, 20.150.173.192, 20.106.85.228, 20.150.159.163, 20.106.116.207, 20.106.116.186 | ||| diff --git a/articles/logic-apps/logic-apps-overview.md b/articles/logic-apps/logic-apps-overview.md index bcdc7a1f20808..f3ed66999d90e 100644 --- a/articles/logic-apps/logic-apps-overview.md +++ b/articles/logic-apps/logic-apps-overview.md @@ -47,9 +47,9 @@ For more information about the ways workflows can access and work with apps, dat * [Connectors for Azure Logic Apps](../connectors/apis-list.md) -* [Managed connectors for Azure Logic Apps](../connectors/built-in.md) +* [Managed connectors for Azure Logic Apps](../connectors/managed.md) -* [Built-in triggers and actions for Azure Logic Apps](../connectors/managed.md) +* [Built-in triggers and actions for Azure Logic Apps](../connectors/built-in.md) * [B2B enterprise integration solutions with Azure Logic Apps](logic-apps-enterprise-integration-overview.md) diff --git a/articles/logic-apps/logic-apps-scenario-edi-send-batch-messages.md b/articles/logic-apps/logic-apps-scenario-edi-send-batch-messages.md index 57a90a5eedf7b..5e9c1e6a01207 100644 --- a/articles/logic-apps/logic-apps-scenario-edi-send-batch-messages.md +++ b/articles/logic-apps/logic-apps-scenario-edi-send-batch-messages.md @@ -11,6 +11,8 @@ ms.date: 08/19/2018 # Exchange EDI messages as batches or groups between trading partners in Azure Logic Apps +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + In business to business (B2B) scenarios, partners often exchange messages in groups or *batches*. When you build a batching solution with Logic Apps, diff --git a/articles/logic-apps/logic-apps-scenario-error-and-exception-handling.md b/articles/logic-apps/logic-apps-scenario-error-and-exception-handling.md deleted file mode 100644 index fef4f61400b4a..0000000000000 --- a/articles/logic-apps/logic-apps-scenario-error-and-exception-handling.md +++ /dev/null @@ -1,513 +0,0 @@ ---- -title: Exception handling & error logging scenario -description: Advanced exception handling and error logging in Azure Logic Apps. -services: logic-apps -ms.suite: integration -author: hedidin -ms.reviewer: estfan, azla -ms.topic: how-to -ms.date: 07/29/2016 ---- - -# Scenario: Exception handling and error logging for logic apps - -This scenario describes how you can extend a logic app to better support exception handling. -We've used a real-life use case to answer the question: "Does Azure Logic Apps support exception and error handling?" - -> [!NOTE] -> The current Azure Logic Apps schema provides a standard template for action responses. -> This template includes both internal validation and error responses returned from an API app. - -## Scenario and use case overview - -Here's the story as the use case for this scenario: - -A well-known healthcare organization engaged us to develop an Azure solution -that would create a patient portal by using Microsoft Dynamics CRM Online. -They needed to send appointment records between the Dynamics CRM Online patient portal and Salesforce. -We were asked to use the [HL7 FHIR](https://www.hl7.org/implement/standards/fhir/) standard for all patient records. - -The project had two major requirements: - -* A method to log records sent from the Dynamics CRM Online portal -* A way to view any errors that occurred within the workflow - -> [!TIP] -> For a high-level video about this project, see -> [Integration User Group](http://www.integrationusergroup.com/logic-apps-support-error-handling/ "Integration User Group"). - -## How we solved the problem - -We chose [Azure Cosmos DB](https://azure.microsoft.com/services/cosmos-db/ "Azure Cosmos DB") -As a repository for the log and error records (Cosmos DB refers to records as documents). -Because Azure Logic Apps has a standard template for all responses, -we would not have to create a custom schema. We could create an API app to **Insert** and **Query** for both error and log records. -We could also define a schema for each within the API app. - -Another requirement was to purge records after a certain date. -Cosmos DB has a property called [Time to Live](https://azure.microsoft.com/blog/documentdb-now-supports-time-to-live-ttl/ "Time to Live") (TTL), -which allowed us to set a **Time to Live** value for each record or collection. -This capability eliminated the need to manually delete records in Cosmos DB. - -> [!IMPORTANT] -> To complete this tutorial, you need to create a Cosmos DB database and two collections (Logging and Errors). - -## Create the logic app - -The first step is to create the logic app and open the app in Logic App Designer. -In this example, we are using parent-child logic apps. -Let's assume that we have already created the parent and are going to create one child logic app. - -Because we are going to log the record coming out of Dynamics CRM Online, -let's start at the top. We must use a **Request** trigger because the parent logic app triggers this child. - -### Logic app trigger - -We are using a **Request** trigger as shown in the following example: - -``` json -"triggers": { - "request": { - "type": "request", - "kind": "http", - "inputs": { - "schema": { - "properties": { - "CRMid": { - "type": "string" - }, - "recordType": { - "type": "string" - }, - "salesforceID": { - "type": "string" - }, - "update": { - "type": "boolean" - } - }, - "required": [ - "CRMid", - "recordType", - "salesforceID", - "update" - ], - "type": "object" - } - } - } - }, - -``` - - -## Steps - -We must log the source (request) of the patient record from the Dynamics CRM Online portal. - -1. We must get a new appointment record from Dynamics CRM Online. - - The trigger coming from CRM provides us with the **CRM PatentId**, - **record type**, **New or Updated Record** (new or update Boolean value), - and **SalesforceId**. The **SalesforceId** can be null because it's only used for an update. - We get the CRM record by using the CRM **PatientID** and the **Record Type**. - -2. Next, we need to add our Azure Cosmos DB SQL API app **InsertLogEntry** operation as shown here in -Logic App Designer. - - **Insert log entry** - - ![Screenshot from Logic App Designer showing the configuration settings for InsertLogEntry.](media/logic-apps-scenario-error-and-exception-handling/lognewpatient.png) - - **Insert error entry** - - ![Screenshot from Logic App Designer showing the configuration settings for CreateErrorRecord.](media/logic-apps-scenario-error-and-exception-handling/insertlogentry.png) - - **Check for create record failure** - - ![Screenshot of the CreateErrorRecord screen in the Logic App Designer showing the fields for creating an error entry.](media/logic-apps-scenario-error-and-exception-handling/condition.png) - -## Logic app source code - -> [!NOTE] -> The following examples are samples only. -> Because this tutorial is based on an implementation now in production, -> the value of a **Source Node** might not display properties -> that are related to scheduling an appointment.> - -### Logging - -The following logic app code sample shows how to handle logging. - -#### Log entry - -Here is the logic app source code for inserting a log entry. - -``` json -"InsertLogEntry": { - "metadata": { - "apiDefinitionUrl": "https://.../swagger/docs/v1", - "swaggerSource": "website" - }, - "type": "Http", - "inputs": { - "body": { - "date": "@{outputs('Gets_NewPatientRecord')['headers']['Date']}", - "operation": "New Patient", - "patientId": "@{triggerBody()['CRMid']}", - "providerId": "@{triggerBody()['providerID']}", - "source": "@{outputs('Gets_NewPatientRecord')['headers']}" - }, - "method": "post", - "uri": "https://.../api/Log" - }, - "runAfter": { - "Gets_NewPatientecord": ["Succeeded"] - } -} -``` - -#### Log request - -Here is the log request message posted to the API app. - -``` json - { - "uri": "https://.../api/Log", - "method": "post", - "body": { - "date": "Fri, 10 Jun 2016 22:31:56 GMT", - "operation": "New Patient", - "patientId": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0", - "providerId": "", - "source": "{/"Pragma/":/"no-cache/",/"x-ms-request-id/":/"e750c9a9-bd48-44c4-bbba-1688b6f8a132/",/"OData-Version/":/"4.0/",/"Cache-Control/":/"no-cache/",/"Date/":/"Fri, 10 Jun 2016 22:31:56 GMT/",/"Set-Cookie/":/"ARRAffinity=785f4334b5e64d2db0b84edcc1b84f1bf37319679aefce206b51510e56fd9770;Path=/;Domain=127.0.0.1/",/"Server/":/"Microsoft-IIS/8.0,Microsoft-HTTPAPI/2.0/",/"X-AspNet-Version/":/"4.0.30319/",/"X-Powered-By/":/"ASP.NET/",/"Content-Length/":/"1935/",/"Content-Type/":/"application/json; odata.metadata=minimal; odata.streaming=true/",/"Expires/":/"-1/"}" - } - } - -``` - - -#### Log response - -Here is the log response message from the API app. - -``` json -{ - "statusCode": 200, - "headers": { - "Pragma": "no-cache", - "Cache-Control": "no-cache", - "Date": "Fri, 10 Jun 2016 22:32:17 GMT", - "Server": "Microsoft-IIS/8.0", - "X-AspNet-Version": "4.0.30319", - "X-Powered-By": "ASP.NET", - "Content-Length": "964", - "Content-Type": "application/json; charset=utf-8", - "Expires": "-1" - }, - "body": { - "ttl": 2592000, - "id": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0_1465597937", - "_rid": "XngRAOT6IQEHAAAAAAAAAA==", - "_self": "dbs/XngRAA==/colls/XngRAOT6IQE=/docs/XngRAOT6IQEHAAAAAAAAAA==/", - "_ts": 1465597936, - "_etag": "/"0400fc2f-0000-0000-0000-575b3ff00000/"", - "patientID": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0", - "timestamp": "2016-06-10T22:31:56Z", - "source": "{/"Pragma/":/"no-cache/",/"x-ms-request-id/":/"e750c9a9-bd48-44c4-bbba-1688b6f8a132/",/"OData-Version/":/"4.0/",/"Cache-Control/":/"no-cache/",/"Date/":/"Fri, 10 Jun 2016 22:31:56 GMT/",/"Set-Cookie/":/"ARRAffinity=785f4334b5e64d2db0b84edcc1b84f1bf37319679aefce206b51510e56fd9770;Path=/;Domain=127.0.0.1/",/"Server/":/"Microsoft-IIS/8.0,Microsoft-HTTPAPI/2.0/",/"X-AspNet-Version/":/"4.0.30319/",/"X-Powered-By/":/"ASP.NET/",/"Content-Length/":/"1935/",/"Content-Type/":/"application/json; odata.metadata=minimal; odata.streaming=true/",/"Expires/":/"-1/"}", - "operation": "New Patient", - "salesforceId": "", - "expired": false - } -} - -``` - -Now let's look at the error handling steps. - -### Error handling - -The following logic app code sample shows how you can implement error handling. - -#### Create error record - -Here is the logic app source code for creating an error record. - -``` json -"actions": { - "CreateErrorRecord": { - "metadata": { - "apiDefinitionUrl": "https://.../swagger/docs/v1", - "swaggerSource": "website" - }, - "type": "Http", - "inputs": { - "body": { - "action": "New_Patient", - "isError": true, - "crmId": "@{triggerBody()['CRMid']}", - "patientID": "@{triggerBody()['CRMid']}", - "message": "@{body('Create_NewPatientRecord')['message']}", - "providerId": "@{triggerBody()['providerId']}", - "severity": 4, - "source": "@{actions('Create_NewPatientRecord')['inputs']['body']}", - "statusCode": "@{int(outputs('Create_NewPatientRecord')['statusCode'])}", - "salesforceId": "", - "update": false - }, - "method": "post", - "uri": "https://.../api/CrMtoSfError" - }, - "runAfter": - { - "Create_NewPatientRecord": ["Failed" ] - } - } -} -``` - -#### Insert error into Cosmos DB--request - -``` json - -{ - "uri": "https://.../api/CrMtoSfError", - "method": "post", - "body": { - "action": "New_Patient", - "isError": true, - "crmId": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0", - "patientId": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0", - "message": "Salesforce failed to complete task: Message: duplicate value found: Account_ID_MED__c duplicates value on record with id: 001U000001c83gK", - "providerId": "", - "severity": 4, - "salesforceId": "", - "update": false, - "source": "{/"Account_Class_vod__c/":/"PRAC/",/"Account_Status_MED__c/":/"I/",/"CRM_HUB_ID__c/":/"6b115f6d-a7ee-e511-80f5-3863bb2eb2d0/",/"Credentials_vod__c/",/"DTC_ID_MED__c/":/"/",/"Fax/":/"/",/"FirstName/":/"A/",/"Gender_vod__c/":/"/",/"IMS_ID__c/":/"/",/"LastName/":/"BAILEY/",/"MasterID_mp__c/":/"/",/"C_ID_MED__c/":/"851588/",/"Middle_vod__c/":/"/",/"NPI_vod__c/":/"/",/"PDRP_MED__c/":false,/"PersonDoNotCall/":false,/"PersonEmail/":/"/",/"PersonHasOptedOutOfEmail/":false,/"PersonHasOptedOutOfFax/":false,/"PersonMobilePhone/":/"/",/"Phone/":/"/",/"Practicing_Specialty__c/":/"FM - FAMILY MEDICINE/",/"Primary_City__c/":/"/",/"Primary_State__c/":/"/",/"Primary_Street_Line2__c/":/"/",/"Primary_Street__c/":/"/",/"Primary_Zip__c/":/"/",/"RecordTypeId/":/"012U0000000JaPWIA0/",/"Request_Date__c/":/"2016-06-10T22:31:55.9647467Z/",/"ONY_ID__c/":/"/",/"Specialty_1_vod__c/":/"/",/"Suffix_vod__c/":/"/",/"Website/":/"/"}", - "statusCode": "400" - } -} -``` - -#### Insert error into Cosmos DB--response - -``` json -{ - "statusCode": 200, - "headers": { - "Pragma": "no-cache", - "Cache-Control": "no-cache", - "Date": "Fri, 10 Jun 2016 22:31:57 GMT", - "Server": "Microsoft-IIS/8.0", - "X-AspNet-Version": "4.0.30319", - "X-Powered-By": "ASP.NET", - "Content-Length": "1561", - "Content-Type": "application/json; charset=utf-8", - "Expires": "-1" - }, - "body": { - "id": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0-1465597917", - "_rid": "sQx2APhVzAA8AAAAAAAAAA==", - "_self": "dbs/sQx2AA==/colls/sQx2APhVzAA=/docs/sQx2APhVzAA8AAAAAAAAAA==/", - "_ts": 1465597912, - "_etag": "/"0c00eaac-0000-0000-0000-575b3fdc0000/"", - "prescriberId": "6b115f6d-a7ee-e511-80f5-3863bb2eb2d0", - "timestamp": "2016-06-10T22:31:57.3651027Z", - "action": "New_Patient", - "salesforceId": "", - "update": false, - "body": "CRM failed to complete task: Message: duplicate value found: CRM_HUB_ID__c duplicates value on record with id: 001U000001c83gK", - "source": "{/"Account_Class_vod__c/":/"PRAC/",/"Account_Status_MED__c/":/"I/",/"CRM_HUB_ID__c/":/"6b115f6d-a7ee-e511-80f5-3863bb2eb2d0/",/"Credentials_vod__c/":/"DO - Degree level is DO/",/"DTC_ID_MED__c/":/"/",/"Fax/":/"/",/"FirstName/":/"A/",/"Gender_vod__c/":/"/",/"IMS_ID__c/":/"/",/"LastName/":/"BAILEY/",/"MterID_mp__c/":/"/",/"Medicis_ID_MED__c/":/"851588/",/"Middle_vod__c/":/"/",/"NPI_vod__c/":/"/",/"PDRP_MED__c/":false,/"PersonDoNotCall/":false,/"PersonEmail/":/"/",/"PersonHasOptedOutOfEmail/":false,/"PersonHasOptedOutOfFax/":false,/"PersonMobilePhone/":/"/",/"Phone/":/"/",/"Practicing_Specialty__c/":/"FM - FAMILY MEDICINE/",/"Primary_City__c/":/"/",/"Primary_State__c/":/"/",/"Primary_Street_Line2__c/":/"/",/"Primary_Street__c/":/"/",/"Primary_Zip__c/":/"/",/"RecordTypeId/":/"012U0000000JaPWIA0/",/"Request_Date__c/":/"2016-06-10T22:31:55.9647467Z/",/"XXXXXXX/":/"/",/"Specialty_1_vod__c/":/"/",/"Suffix_vod__c/":/"/",/"Website/":/"/"}", - "code": 400, - "errors": null, - "isError": true, - "severity": 4, - "notes": null, - "resolved": 0 - } -} -``` - -#### Salesforce error response - -``` json -{ - "statusCode": 400, - "headers": { - "Pragma": "no-cache", - "x-ms-request-id": "3e8e4884-288e-4633-972c-8271b2cc912c", - "X-Content-Type-Options": "nosniff", - "Cache-Control": "no-cache", - "Date": "Fri, 10 Jun 2016 22:31:56 GMT", - "Set-Cookie": "ARRAffinity=785f4334b5e64d2db0b84edcc1b84f1bf37319679aefce206b51510e56fd9770;Path=/;Domain=127.0.0.1", - "Server": "Microsoft-IIS/8.0,Microsoft-HTTPAPI/2.0", - "X-AspNet-Version": "4.0.30319", - "X-Powered-By": "ASP.NET", - "Content-Length": "205", - "Content-Type": "application/json; charset=utf-8", - "Expires": "-1" - }, - "body": { - "status": 400, - "message": "Salesforce failed to complete task: Message: duplicate value found: Account_ID_MED__c duplicates value on record with id: 001U000001c83gK", - "source": "Salesforce.Common", - "errors": [] - } -} - -``` - -### Return the response back to parent logic app - -After you get the response, you can pass the response back to the parent logic app. - -#### Return success response to parent logic app - -``` json -"SuccessResponse": { - "runAfter": - { - "UpdateNew_CRMPatientResponse": ["Succeeded"] - }, - "inputs": { - "body": { - "status": "Success" - }, - "headers": { - " Content-type": "application/json", - "x-ms-date": "@utcnow()" - }, - "statusCode": 200 - }, - "type": "Response" -} -``` - -#### Return error response to parent logic app - -``` json -"ErrorResponse": { - "runAfter": - { - "Create_NewPatientRecord": ["Failed"] - }, - "inputs": { - "body": { - "status": "BadRequest" - }, - "headers": { - "Content-type": "application/json", - "x-ms-date": "@utcnow()" - }, - "statusCode": 400 - }, - "type": "Response" -} - -``` - - -## Cosmos DB repository and portal - -Our solution added capabilities with [Azure Cosmos DB](https://azure.microsoft.com/services/cosmos-db). - -### Error management portal - -To view the errors, you can create an MVC web app to display the error records from Cosmos DB. -The **List**, **Details**, **Edit**, and **Delete** operations are included in the current version. - -> [!NOTE] -> Edit operation: Cosmos DB replaces the entire document. -> The records shown in the **List** and **Detail** views are samples only. -> They are not actual patient appointment records. - -Here are examples of our MVC app details created with the previously described approach. - -#### Error management list -![Error List](media/logic-apps-scenario-error-and-exception-handling/errorlist.png) - -#### Error management detail view -![Error Details](media/logic-apps-scenario-error-and-exception-handling/errordetails.png) - -### Log management portal - -To view the logs, we also created an MVC web app. -Here are examples of our MVC app details created with the previously described approach. - -#### Sample log detail view -![Log Detail View](media/logic-apps-scenario-error-and-exception-handling/samplelogdetail.png) - -### API app details - -#### Logic Apps exception management API - -Our open-source Azure Logic Apps exception management API app -provides functionality as described here - there are two controllers: - -* **ErrorController** inserts an error record (document) in an Azure Cosmos DB collection. -* **LogController** Inserts a log record (document) in an Azure Cosmos DB collection. - -> [!TIP] -> Both controllers use `async Task` operations, -> allowing operations to resolve at runtime, -> so we can create the Azure Cosmos DB schema in the body of the operation. -> - -Every document in Azure Cosmos DB must have a unique ID. -We are using `PatientId` and adding a timestamp that is converted to a Unix timestamp value (double). -We truncate the value to remove the fractional value. - -You can view the source code of our error controller API from -[GitHub](https://github.com/HEDIDIN/LogicAppsExceptionManagementApi/blob/master/LogicAppsExceptionManagementApi/Controllers/LogController.cs). - -We call the API from a logic app by using the following syntax: - -``` json - "actions": { - "CreateErrorRecord": { - "metadata": { - "apiDefinitionUrl": "https://.../swagger/docs/v1", - "swaggerSource": "website" - }, - "type": "Http", - "inputs": { - "body": { - "action": "New_Patient", - "isError": true, - "crmId": "@{triggerBody()['CRMid']}", - "prescriberId": "@{triggerBody()['CRMid']}", - "message": "@{body('Create_NewPatientRecord')['message']}", - "salesforceId": "@{triggerBody()['salesforceID']}", - "severity": 4, - "source": "@{actions('Create_NewPatientRecord')['inputs']['body']}", - "statusCode": "@{int(outputs('Create_NewPatientRecord')['statusCode'])}", - "update": false - }, - "method": "post", - "uri": "https://.../api/CrMtoSfError" - }, - "runAfter": { - "Create_NewPatientRecord": ["Failed"] - } - } - } -``` - -The expression in the preceding code sample checks for the *Create_NewPatientRecord* status of **Failed**. - -## Summary - -* You can easily implement logging and error handling in a logic app. -* You can use Azure Cosmos DB as the repository for log and error records (documents). -* You can use MVC to create a portal to display log and error records. - -### Source code - -The source code for the Logic Apps exception management API application is available in this -[GitHub repository](https://github.com/HEDIDIN/LogicAppsExceptionManagementApi "Logic App Exception Management API"). - -## Next steps - -* [View more logic app examples and scenarios](../logic-apps/logic-apps-examples-and-scenarios.md) -* [Monitor logic apps](../logic-apps/monitor-logic-apps.md) -* [Automate logic app deployment](../logic-apps/logic-apps-azure-resource-manager-templates-overview.md) diff --git a/articles/logic-apps/logic-apps-scenario-social-serverless.md b/articles/logic-apps/logic-apps-scenario-social-serverless.md index 2380e38fdbe8c..7b4eb4047c060 100644 --- a/articles/logic-apps/logic-apps-scenario-social-serverless.md +++ b/articles/logic-apps/logic-apps-scenario-social-serverless.md @@ -3,8 +3,6 @@ title: Create customer insights dashboard description: Manage customer feedback, social media data, and more by building a customer dashboard with Azure Logic Apps and Azure Functions. services: logic-apps ms.suite: integration -author: jeffhollan -ms.author: jehollan ms.reviewer: estfan, azla ms.topic: how-to ms.date: 03/15/2018 @@ -12,6 +10,8 @@ ms.date: 03/15/2018 # Create a streaming customer insights dashboard with Azure Logic Apps and Azure Functions +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + Azure offers [serverless](https://azure.microsoft.com/solutions/serverless/) tools that help you quickly build and host apps in the cloud, without having to think about infrastructure. In this tutorial, you can create a dashboard that triggers on customer feedback, diff --git a/articles/logic-apps/logic-apps-securing-a-logic-app.md b/articles/logic-apps/logic-apps-securing-a-logic-app.md index fafcb6bbfa1e3..086bdf02a90b5 100644 --- a/articles/logic-apps/logic-apps-securing-a-logic-app.md +++ b/articles/logic-apps/logic-apps-securing-a-logic-app.md @@ -35,8 +35,7 @@ For more information about security in Azure, review these topics: ## Access to logic app operations -For Consumption logic apps only, before you can create or manage logic apps and their connections, you need specific permissions, which are provided through roles using [Azure role-based access control (Azure RBAC)](../role-based-access-control/role-assignments-portal.md). You can also -you can set up permissions so that only specific users or groups can run specific tasks, such as managing, editing, and viewing logic apps. To control their permissions, you can assign built-in or customized roles to members who have access to your Azure subscription. Azure Logic Apps has the following specific roles: +For Consumption logic apps only, before you can create or manage logic apps and their connections, you need specific permissions, which are provided through roles using [Azure role-based access control (Azure RBAC)](../role-based-access-control/role-assignments-portal.md). You can also set up permissions so that only specific users or groups can run specific tasks, such as managing, editing, and viewing logic apps. To control their permissions, you can assign built-in or customized roles to members who have access to your Azure subscription. Azure Logic Apps has the following specific roles: * [Logic App Contributor](../role-based-access-control/built-in-roles.md#logic-app-contributor): Lets you manage logic apps, but you can't change access to them. diff --git a/articles/logic-apps/logic-apps-workflow-actions-triggers.md b/articles/logic-apps/logic-apps-workflow-actions-triggers.md index 12b1ed86f15cd..a9fe6f453c981 100644 --- a/articles/logic-apps/logic-apps-workflow-actions-triggers.md +++ b/articles/logic-apps/logic-apps-workflow-actions-triggers.md @@ -1029,7 +1029,7 @@ This action definition merges `abcdefg ` with a trailing space and the value `12 }, ``` -Here is the output that this action creates: +Here's the output that this action creates: `abcdefg 1234` @@ -1045,7 +1045,7 @@ This action definition merges a string variable that contains `abcdefg` and an i }, ``` -Here is the output that this action creates: +Here's the output that this action creates: `"abcdefg1234"` @@ -1053,7 +1053,7 @@ Here is the output that this action creates: ### Execute JavaScript Code action -This action runs a JavaScript code snippet and returns the results through a `Result` token that later actions can reference. +This action runs a JavaScript code snippet and returns the results through a token that subsequent actions in the workflow can reference. ```json "Execute_JavaScript_Code": { @@ -1061,7 +1061,7 @@ This action runs a JavaScript code snippet and returns the results through a `Re "inputs": { "code": "", "explicitDependencies": { - "actions": [ ], + "actions": [ ], "includeTrigger": true } }, @@ -1073,26 +1073,23 @@ This action runs a JavaScript code snippet and returns the results through a `Re | Value | Type | Description | |-------|------|-------------| -| <*JavaScript-code-snippet*> | Varies | The JavaScript code that you want to run. For code requirements and more information, see [Add and run code snippets with inline code](../logic-apps/logic-apps-add-run-inline-code.md).

                  In the `code` attribute, your code snippet can use the read-only `workflowContext` object as input. This object has subproperties that give your code access to the results from the trigger and previous actions in your workflow. For more information about the `workflowContext` object, see [Reference trigger and action results in your code](../logic-apps/logic-apps-add-run-inline-code.md#workflowcontext). | +| <*JavaScript-code-snippet*> | Varies | The JavaScript code that you want to run. For code requirements and more information, see [Run code snippets in workflows](logic-apps-add-run-inline-code.md).

                  In the `code` attribute, your code snippet can use the read-only `workflowContext` object as input. This object has subproperties that give your code access to the outputs from the trigger and any preceding actions in your workflow. For more information about the `workflowContext` object, see [Reference trigger and action results using the workflowContext object](logic-apps-add-run-inline-code.md#workflowcontext). | |||| *Required in some cases* -The `explicitDependencies` attribute specifies that you want to explicitly -include results from the trigger, previous actions, or both as dependencies -for your code snippet. For more information about adding these dependencies, see -[Add parameters for inline code](../logic-apps/logic-apps-add-run-inline-code.md#add-parameters). +The `explicitDependencies` attribute specifies that you want to explicitly include results from the trigger, previous actions, or both as dependencies for your code snippet. For more information about adding these dependencies, see [Add dependencies as parameters to an Inline Code action](logic-apps-add-run-inline-code.md#add-parameters). For the `includeTrigger` attribute, you can specify `true` or `false` values. | Value | Type | Description | |-------|------|-------------| -| <*previous-actions*> | String array | An array with your specified action names. Use the action names that appear in your workflow definition where action names use underscores (_), not spaces (" "). | +| <*preceding-actions*> | String array | An array with the action names in JSON format as dependencies. Make sure to use the action names that appear in your workflow definition where action names use underscores (**_**), not spaces (**" "**). | |||| *Example 1* -This action runs code that gets your logic app's name and returns the text "Hello world from \" as the result. In this example, the code references the workflow's name by accessing the `workflowContext.workflow.name` property through the read-only `workflowContext` object. For more information about using the `workflowContext` object, see [Reference trigger and action results in your code](../logic-apps/logic-apps-add-run-inline-code.md#workflowcontext). +This action runs code that gets your logic app workflow's name and returns the text "Hello world from \" as the result. In this example, the code references the workflow's name by accessing the `workflowContext.workflow.name` property through the read-only `workflowContext` object. For more information about using the `workflowContext` object, see [Reference trigger and action results in your code](../logic-apps/logic-apps-add-run-inline-code.md#workflowcontext). ```json "Execute_JavaScript_Code": { @@ -1106,18 +1103,18 @@ This action runs code that gets your logic app's name and returns the text "Hell *Example 2* -This action runs code in a logic app that triggers when a new email arrives in a work or school account. The logic app also uses a send approval email action that forwards the content from the received email along with a request for approval. +This action runs code in a logic app workflow that triggers when a new email arrives in an Outlook account. The workflow also uses the Office 365 Outlook **Send approval email** action that forwards the content from the received email along with a request for approval. -The code extracts the email addresses from the trigger's `Body` property and returns the addresses along with the `SelectedOption` property value from the approval action. The action explicitly includes the send approval email action as a dependency in the `explicitDependencies` > `actions` attribute. +The code extracts the email addresses from the email message's `Body` property, and returns the addresses along with the `SelectedOption` property value from the approval action. The action explicitly includes the **Send approval email** action as a dependency in the `actions` object inside the `explicitDependencies` object. ```json "Execute_JavaScript_Code": { "type": "JavaScriptCode", "inputs": { - "code": "var re = /(([^<>()\\[\\]\\\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\\\.,;:\\s@\"]+)*)|(\".+\"))@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}])|(([a-zA-Z\\-0-9]+\\.)+[a-zA-Z]{2,}))/g;\r\n\r\nvar email = workflowContext.trigger.outputs.body.Body;\r\n\r\nvar reply = workflowContext.actions.Send_approval_email_.outputs.body.SelectedOption;\r\n\r\nreturn email.match(re) + \" - \" + reply;\r\n;", + "code": "var myResult = /(([^<>()\\[\\]\\\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\\\.,;:\\s@\"]+)*)|(\".+\"))@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}])|(([a-zA-Z\\-0-9]+\\.)+[a-zA-Z]{2,}))/g;\r\n\r\nvar email = workflowContext.trigger.outputs.body.Body;\r\n\r\nvar reply = workflowContext.actions.Send_approval_email.outputs.body.SelectedOption;\r\n\r\nreturn email.match(myResult) + \" - \" + reply;\r\n;", "explicitDependencies": { "actions": [ - "Send_approval_email_" + "Send_approval_email" ] } }, @@ -1125,8 +1122,6 @@ The code extracts the email addresses from the trigger's `Body` property and ret } ``` - - ### Function action @@ -1153,7 +1148,7 @@ This action calls a previously created [Azure function](../azure-functions/funct | Value | Type | Description | |-------|------|-------------| -| <*Azure-function-ID*> | String | The resource ID for the Azure function you want to call. Here is the format for this value:

                  "/subscriptions/<*Azure-subscription-ID*>/resourceGroups/<*Azure-resource-group*>/providers/Microsoft.Web/sites/<*Azure-function-app-name*>/functions/<*Azure-function-name*>" | +| <*Azure-function-ID*> | String | The resource ID for the Azure function you want to call. Here's the format for this value:

                  "/subscriptions/<*Azure-subscription-ID*>/resourceGroups/<*Azure-resource-group*>/providers/Microsoft.Web/sites/<*Azure-function-app-name*>/functions/<*Azure-function-name*>" | | <*method-type*> | String | The HTTP method to use for calling the function: "GET", "PUT", "POST", "PATCH", or "DELETE"

                  If not specified, the default is the "POST" method. | |||| @@ -1569,7 +1564,7 @@ This action definition creates a JSON object array from an integer array. The ac }, ``` -Here is the array that this action creates: +Here's the array that this action creates: `[ { "number": 1 }, { "number": 2 }, { "number": 3 } ]` @@ -1676,7 +1671,7 @@ This action definition creates a CSV table from the "myItemArray" variable. The } ``` -Here is the CSV table that this action creates: +Here's the CSV table that this action creates: ``` ID,Product_Name @@ -1699,7 +1694,7 @@ This action definition creates an HTML table from the "myItemArray" variable. Th } ``` -Here is the HTML table that this action creates: +Here's the HTML table that this action creates:
                  IDProduct_Name
                  0Apples
                  1Oranges
                  @@ -1728,7 +1723,7 @@ This action definition creates an HTML table from the "myItemArray" variable. Ho }, ``` -Here is the HTML table that this action creates: +Here's the HTML table that this action creates:
                  Stock_IDDescription
                  0Organic Apples
                  1Organic Oranges
                  @@ -2447,7 +2442,7 @@ Here are some considerations to review before you enable concurrency on a trigge * To work around this possibility, add a timeout to any action that might hold up these runs. If you're working in the code editor, see [Change asynchronous duration](#asynchronous-limits). Otherwise, if you're using the designer, follow these steps: - 1. In your logic app, on the action where you want to add a timeout, in the upper-right corner, select the ellipses (**...**) button, and then select **Settings**. + 1. In your logic app workflow, select the action where you want to add a timeout. In the action's upper-right corner, select the ellipses (**...**) button, and then select **Settings**. ![Open action settings](./media/logic-apps-workflow-actions-triggers/action-settings.png) @@ -2505,7 +2500,7 @@ To change the default limit, you can use either the code view editor or Logic Ap In the underlying "for each" definition, add or update the `runtimeConfiguration.concurrency.repetitions` property, which can have a value that ranges from `1` and `50`. -Here is an example that limits concurrent runs to 10 iterations: +Here's an example that limits concurrent runs to 10 iterations: ```json "For_each" { diff --git a/articles/logic-apps/manage-logic-apps-with-visual-studio.md b/articles/logic-apps/manage-logic-apps-with-visual-studio.md index 950ec0437e4a7..33b56ddd61973 100644 --- a/articles/logic-apps/manage-logic-apps-with-visual-studio.md +++ b/articles/logic-apps/manage-logic-apps-with-visual-studio.md @@ -11,6 +11,8 @@ ms.date: 01/28/2022 # Manage logic apps with Visual Studio +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + Although you can create, edit, manage, and deploy logic apps in the [Azure portal](https://portal.azure.com), you can also use Visual Studio when you want to add your logic apps to source control, publish different versions, and create [Azure Resource Manager](../azure-resource-manager/management/overview.md) templates for various deployment environments. With Visual Studio Cloud Explorer, you can find and manage your logic apps along with other Azure resources. For example, you can open, download, edit, run, view run history, disable, and enable logic apps that are already deployed in the Azure portal. If you're new to working with Azure Logic Apps in Visual Studio, learn [how to create logic apps with Visual Studio](../logic-apps/quickstart-create-logic-apps-with-visual-studio.md). You can also [manage your logic apps in the Azure portal](manage-logic-apps-with-azure-portal.md). diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-json-name.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-json-name.png new file mode 100644 index 0000000000000..ce672e9c963ff Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-json-name.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-consumption.png new file mode 100644 index 0000000000000..fe6879822b094 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-standard.png new file mode 100644 index 0000000000000..1643571d7eda1 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter.png deleted file mode 100644 index 7744b140c8a52..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png new file mode 100644 index 0000000000000..6d79376d23f1e Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png new file mode 100644 index 0000000000000..54f85834d6641 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter.png new file mode 100644 index 0000000000000..764c05e6f9252 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-new-step.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-new-step.png deleted file mode 100644 index 182a3379afa2d..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-new-step.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-consumption.png new file mode 100644 index 0000000000000..6cf0803a7cab4 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-standard.png new file mode 100644 index 0000000000000..2677fd02668ce Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete.png deleted file mode 100644 index de2dc0e7ad69b..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png index 5bb315d5ca918..7bea820d73e83 100644 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png and b/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png deleted file mode 100644 index 5814d5cbf7e1e..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png new file mode 100644 index 0000000000000..6e8aacf63c18b Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default.png deleted file mode 100644 index ae3e9c9c7833b..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png new file mode 100644 index 0000000000000..6f68949ac1435 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png new file mode 100644 index 0000000000000..28f5763c4ec50 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example.png deleted file mode 100644 index 1ce50d2ead4af..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-overview.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-overview.png deleted file mode 100644 index ced1c22de669c..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-overview.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png deleted file mode 100644 index 5db17a4723ca9..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png new file mode 100644 index 0000000000000..d9e9baf03e416 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-standard.png new file mode 100644 index 0000000000000..96f6beea61185 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-consumption.png new file mode 100644 index 0000000000000..a1fc165198dcb Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-standard.png new file mode 100644 index 0000000000000..0a3918e2419f6 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/return-statement-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/return-statement-standard.png new file mode 100644 index 0000000000000..55fb406847e84 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/return-statement-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png new file mode 100644 index 0000000000000..080e9157b5ae6 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png new file mode 100644 index 0000000000000..afcf6be0e25d9 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable.png deleted file mode 100644 index f5dc468f22fd4..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png new file mode 100644 index 0000000000000..22a1864257d03 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png new file mode 100644 index 0000000000000..2686633eff76b Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action.png deleted file mode 100644 index 421a1a22b0893..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-consumption.png new file mode 100644 index 0000000000000..54550e3adf334 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-standard.png new file mode 100644 index 0000000000000..71f22e56ec0e2 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png new file mode 100644 index 0000000000000..788b3b11d1e73 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png new file mode 100644 index 0000000000000..87af8a0a6a7a5 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png new file mode 100644 index 0000000000000..dea74dfa1b95e Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png new file mode 100644 index 0000000000000..c58e877eac20c Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png deleted file mode 100644 index 78fa81e7b4ddd..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details.png deleted file mode 100644 index daca9cdb50a41..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png new file mode 100644 index 0000000000000..7260005f09c28 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png new file mode 100644 index 0000000000000..4e99e836d88e1 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane.png deleted file mode 100644 index e93e1b359d61c..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png new file mode 100644 index 0000000000000..9e12bb0bd4e69 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-overview.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-overview.png deleted file mode 100644 index 09d893604cdae..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-overview.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png new file mode 100644 index 0000000000000..656520e1e51c1 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history.png deleted file mode 100644 index 1aaee087605ad..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png new file mode 100644 index 0000000000000..2c466eadd711a Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-overview.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-overview.png deleted file mode 100644 index 22e18d78c9ad5..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-overview.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png new file mode 100644 index 0000000000000..f865aace75158 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history.png deleted file mode 100644 index 5a138066a020c..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-action-outputs-for-errors.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-action-outputs-for-errors.png deleted file mode 100644 index 8c40cb97723b5..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-action-outputs-for-errors.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png new file mode 100644 index 0000000000000..8ed5412ebeec2 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png deleted file mode 100644 index 6890035e4c377..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png new file mode 100644 index 0000000000000..d080b74c2f9fa Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png new file mode 100644 index 0000000000000..1359acf972a5a Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png deleted file mode 100644 index 8f169f15e81c6..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png new file mode 100644 index 0000000000000..7a13739ead67d Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png b/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png index 7b89d4714d077..9847bd98d5b60 100644 Binary files a/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png and b/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-property-status.png b/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-property-status.png deleted file mode 100644 index 64d524ec8c098..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-property-status.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-status-consumption.png b/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-status-consumption.png new file mode 100644 index 0000000000000..2be76c79eac05 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-status-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-status-standard.png b/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-status-standard.png new file mode 100644 index 0000000000000..fd4811bea612f Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/change-run-after-status-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-consumption.png b/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-consumption.png new file mode 100644 index 0000000000000..d9fbf13d33d87 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-property-setting.png b/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-property-setting.png deleted file mode 100644 index ac2baa7dacdfc..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-property-setting.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-standard.png b/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-standard.png new file mode 100644 index 0000000000000..8ad2e2c98dd45 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/configure-run-after-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/default-run-after-status-consumption.png b/articles/logic-apps/media/logic-apps-exception-handling/default-run-after-status-consumption.png new file mode 100644 index 0000000000000..b58422d7c61a0 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/default-run-after-status-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/failed-run-after-status-consumption.png b/articles/logic-apps/media/logic-apps-exception-handling/failed-run-after-status-consumption.png new file mode 100644 index 0000000000000..dbcb813c113b1 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/failed-run-after-status-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/failed-run-after-status-standard.png b/articles/logic-apps/media/logic-apps-exception-handling/failed-run-after-status-standard.png new file mode 100644 index 0000000000000..6b23e3da8e042 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/failed-run-after-status-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/multiple-predecessor-actions-standard.png b/articles/logic-apps/media/logic-apps-exception-handling/multiple-predecessor-actions-standard.png new file mode 100644 index 0000000000000..6d4202de77331 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/multiple-predecessor-actions-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/predecessor-action-consumption.png b/articles/logic-apps/media/logic-apps-exception-handling/predecessor-action-consumption.png new file mode 100644 index 0000000000000..29d333c437899 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/predecessor-action-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/predecessor-action-standard.png b/articles/logic-apps/media/logic-apps-exception-handling/predecessor-action-standard.png new file mode 100644 index 0000000000000..f0399e43485aa Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/predecessor-action-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/run-after-multiple-statuses-consumption.png b/articles/logic-apps/media/logic-apps-exception-handling/run-after-multiple-statuses-consumption.png new file mode 100644 index 0000000000000..535e3fe7e6f81 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/run-after-multiple-statuses-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/run-after-multiple-statuses-standard.png b/articles/logic-apps/media/logic-apps-exception-handling/run-after-multiple-statuses-standard.png new file mode 100644 index 0000000000000..b0551bda062f0 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-exception-handling/run-after-multiple-statuses-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/run-after-property-multiple-statuses.png b/articles/logic-apps/media/logic-apps-exception-handling/run-after-property-multiple-statuses.png deleted file mode 100644 index dc17951318565..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-exception-handling/run-after-property-multiple-statuses.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-exception-handling/run-after-property-status-set-to-failed.png b/articles/logic-apps/media/logic-apps-exception-handling/run-after-property-status-set-to-failed.png deleted file mode 100644 index 33f5625383332..0000000000000 Binary files a/articles/logic-apps/media/logic-apps-exception-handling/run-after-property-status-set-to-failed.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/add-condition-for-rule.png b/articles/logic-apps/media/monitor-logic-apps/add-condition-for-rule.png deleted file mode 100644 index c32d254fcc490..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/add-condition-for-rule.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png b/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png index 432dd1d8c5db5..7022cc952f0cd 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png and b/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/failed-action-inputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/failed-action-inputs-standard.png new file mode 100644 index 0000000000000..87af8a0a6a7a5 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/failed-action-inputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/failed-action-outputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/failed-action-outputs-standard.png new file mode 100644 index 0000000000000..c58e877eac20c Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/failed-action-outputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png b/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png index cba53e56ba0f2..65b3c8df1e3d4 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png and b/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png b/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png index 633f1cce290c1..0e27a587f9b3c 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png and b/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png b/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png index b3373115be202..eff43e819d8ab 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png and b/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-consumption.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-consumption.png new file mode 100644 index 0000000000000..3b88a64d5a4f9 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-standard.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-standard.png new file mode 100644 index 0000000000000..4e99e836d88e1 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane.png deleted file mode 100644 index 3f63a20e5e8b6..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-trigger-history.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-trigger-history.png deleted file mode 100644 index 8e4ae719d4745..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/logic-app-trigger-history.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-consumption.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-consumption.png new file mode 100644 index 0000000000000..2cee657d64873 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-standard.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-standard.png new file mode 100644 index 0000000000000..c22dec807464a Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png b/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png index ac7a8a1048b54..17898041fe06a 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png and b/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png new file mode 100644 index 0000000000000..cbdd0c7e7ee0e Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-standard.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-standard.png new file mode 100644 index 0000000000000..fc258c6197c49 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png new file mode 100644 index 0000000000000..2d30e8fa04818 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png new file mode 100644 index 0000000000000..468f2da252ce5 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png b/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png deleted file mode 100644 index 98684b199e852..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png b/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png deleted file mode 100644 index 8e366b7653c06..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png b/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png index cfe155edd1049..5f6e9439f7201 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png and b/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/review-trigger-inputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/review-trigger-inputs-standard.png new file mode 100644 index 0000000000000..70e59a93ef021 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/review-trigger-inputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/review-trigger-outputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/review-trigger-outputs-standard.png new file mode 100644 index 0000000000000..15a5f0397c045 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/review-trigger-outputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-failed-step-in-failed-run.png b/articles/logic-apps/media/monitor-logic-apps/select-failed-step-in-failed-run.png deleted file mode 100644 index b5dce6ad16317..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-failed-step-in-failed-run.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-failed-step.png b/articles/logic-apps/media/monitor-logic-apps/select-failed-step.png new file mode 100644 index 0000000000000..8e7329f18aa79 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-failed-step.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-run-details-on-toolbar.png b/articles/logic-apps/media/monitor-logic-apps/select-run-details-on-toolbar.png deleted file mode 100644 index 54f89cbe33bf3..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-run-details-on-toolbar.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-consumption.png b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-consumption.png new file mode 100644 index 0000000000000..e9fe8fdbe24e2 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-standard.png b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-standard.png new file mode 100644 index 0000000000000..8c3247716b5d6 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run.png b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run.png deleted file mode 100644 index b2e07b620a1d0..0000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review-standard.png b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review-standard.png new file mode 100644 index 0000000000000..338440b650a0f Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png index 0ff61cf985876..8da7ba5bd1478 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png and b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png b/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png index 67cdba0bc6612..16a5e1170b214 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png and b/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/toolbar-select-run-details.png b/articles/logic-apps/media/monitor-logic-apps/toolbar-select-run-details.png new file mode 100644 index 0000000000000..7e2423eb269c0 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/toolbar-select-run-details.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png b/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png index c697f4571a5cb..e523ce128b01b 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png and b/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png differ diff --git a/articles/logic-apps/monitor-logic-apps.md b/articles/logic-apps/monitor-logic-apps.md index 33143057c9b7a..81e537327bb23 100644 --- a/articles/logic-apps/monitor-logic-apps.md +++ b/articles/logic-apps/monitor-logic-apps.md @@ -3,9 +3,9 @@ title: Monitor status, view history, and set up alerts description: Troubleshoot logic apps by checking run status, reviewing trigger history, and enabling alerts in Azure Logic Apps. services: logic-apps ms.suite: integration -ms.reviewer: divswa, azla +ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 05/04/2020 +ms.date: 05/24/2022 --- # Monitor run status, review trigger history, and set up alerts for Azure Logic Apps @@ -15,198 +15,319 @@ ms.date: 05/04/2020 > review the following sections in [Create an integration workflow with single-tenant Azure Logic Apps](create-single-tenant-workflows-azure-portal.md): > [Review run history](create-single-tenant-workflows-azure-portal.md#review-run-history), [Review trigger history](create-single-tenant-workflows-azure-portal.md#review-trigger-history), and [Enable or open Application Insights after deployment](create-single-tenant-workflows-azure-portal.md#enable-open-application-insights). -After you create and run a [Consumption logic app workflow](quickstart-create-first-logic-app-workflow.md), you can check that workflow's run status, [runs history](#review-runs-history), [trigger history](#review-trigger-history), and performance. To get notifications about failures or other possible problems, set up [alerts](#add-azure-alerts). For example, you can create an alert that detects "when more than five runs fail in an hour." +After you create and run a [Consumption logic app workflow](quickstart-create-first-logic-app-workflow.md), you can check that workflow's run status, [trigger history](#review-trigger-history), [runs history](#review-runs-history), and performance. To get notifications about failures or other possible problems, set up [alerts](#add-azure-alerts). For example, you can create an alert that detects "when more than five runs fail in an hour." -For real-time event monitoring and richer debugging, set up diagnostics logging for your logic app by using [Azure Monitor logs](../azure-monitor/overview.md). This Azure service helps you monitor your cloud and on-premises environments so that you can more easily maintain their availability and performance. You can then find and view events, such as trigger events, run events, and action events. By storing this information in [Azure Monitor logs](../azure-monitor/logs/data-platform-logs.md), you can create [log queries](../azure-monitor/logs/log-query-overview.md) that help you find and analyze this information. You can also use this diagnostic data with other Azure services, such as Azure Storage and Azure Event Hubs. For more information, see [Monitor logic apps by using Azure Monitor](../logic-apps/monitor-logic-apps-log-analytics.md). +For real-time event monitoring and richer debugging, set up diagnostics logging for your logic app by using [Azure Monitor logs](../azure-monitor/overview.md). This Azure service helps you monitor your cloud and on-premises environments so that you can more easily maintain their availability and performance. You can then find and view events, such as trigger events, run events, and action events. By storing this information in [Azure Monitor logs](../azure-monitor/logs/data-platform-logs.md), you can create [log queries](../azure-monitor/logs/log-query-overview.md) that help you find and analyze this information. You can also use this diagnostic data with other Azure services, such as Azure Storage and Azure Event Hubs. For more information, see [Monitor logic apps by using Azure Monitor](monitor-logic-apps-log-analytics.md). > [!NOTE] -> If your logic apps run in an [integration service environment (ISE)](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md) -> that was created to use an [internal access endpoint](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access), -> you can view and access inputs and outputs from logic app's runs history *only from inside your virtual network*. Make sure that you have network +> If your logic apps run in an [integration service environment (ISE)](connect-virtual-network-vnet-isolated-environment-overview.md) +> that was created to use an [internal access endpoint](connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access), +> you can view and access inputs and outputs from a workflow runs history *only from inside your virtual network*. Make sure that you have network > connectivity between the private endpoints and the computer from where you want to access runs history. For example, your client computer can exist > inside the ISE's virtual network or inside a virtual network that's connected to the ISE's virtual network, for example, through peering or a virtual -> private network. For more information, see [ISE endpoint access](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access). +> private network. For more information, see [ISE endpoint access](connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access). - + + +## Review trigger history -## Review runs history +Each workflow run starts with a trigger, which either fires on a schedule or waits for an incoming request or event. The trigger history lists all the trigger attempts that your logic app made and information about the inputs and outputs for each trigger attempt. -Each time that the trigger fires for an item or event, the Logic Apps engine creates and runs a separate workflow instance for each item or event. By default, each workflow instance runs in parallel so that no workflow has to wait before starting a run. You can review what happened during that run, including the status for each step in the workflow plus the inputs and outputs for each step. +### [Consumption](#tab/consumption) 1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. - To find your logic app, in the main Azure search box, enter `logic apps`, and then select **Logic apps**. + To find your logic app, in the portal search box, enter **logic apps**, and then select **Logic apps**. - ![Find and select "Logic Apps" service](./media/monitor-logic-apps/find-your-logic-app.png) + ![Screenshot showing the Azure portal main search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) - The Azure portal shows all the logic apps that are associated with your Azure subscriptions. You can filter this list based on name, subscription, resource group, location, and so on. + The Azure portal shows all the logic apps in your Azure subscription. You can filter this list based on name, subscription, resource group, location, and so on. + + ![Screenshot showing the Azure portal with all logic apps associated with selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + +1. Select your logic app. On your logic app's menu, select **Overview**. On the Overview pane, select **Trigger history**. - ![View logic apps associated with subscriptions](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + ![Screenshot showing "Overview" pane for a Consumption logic app workflow with "Trigger history" selected.](./media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png) -1. Select your logic app, and then select **Overview**. + Under **Trigger history**, all trigger attempts appear. Each time the trigger successfully fires, Azure Logic Apps creates an individual workflow instance and runs that instance. By default, each instance runs in parallel so that no workflow has to wait before starting a run. If your workflow triggers for multiple events or items at the same time, a trigger entry appears for each item with the same date and time. - On the overview pane, under **Runs history**, all the past, current, and any waiting runs for your logic app appear. If the list shows many runs, and you can't find the entry that you want, try filtering the list. + ![Screenshot showing "Overview" pane for a Consumption logic app workflow with multiple trigger attempts for different items.](./media/monitor-logic-apps/logic-app-triggers-history-consumption.png) + + The following table lists the possible trigger statuses: + + | Trigger status | Description | + |----------------|-------------| + | **Failed** | An error occurred. To review any generated error messages for a failed trigger, select that trigger attempt and choose **Outputs**. For example, you might find inputs that aren't valid. | + | **Skipped** | The trigger checked the endpoint but found no data that met the specified criteria. | + | **Succeeded** | The trigger checked the endpoint and found available data. Usually, a **Fired** status also appears alongside this status. If not, the trigger definition might have a condition or `SplitOn` command that wasn't met.

                  This status can apply to a manual trigger, recurrence-based trigger, or polling trigger. A trigger can run successfully, but the run itself might still fail when the actions generate unhandled errors. | + ||| > [!TIP] - > If the run status doesn't appear, try refreshing the overview page by selecting **Refresh**. - > No run happens for a trigger that's skipped due to unmet criteria or finding no data. + > + > You can recheck the trigger without waiting for the next recurrence. On the + > **Overview** pane toolbar or on the designer toolbar, select **Run Trigger** > **Run**. + +1. To view information about a specific trigger attempt, select that trigger event. + + ![Screenshot showing the Consumption workflow trigger entry selected.](./media/monitor-logic-apps/select-trigger-event-for-review.png) + + If the list shows many trigger attempts, and you can't find the entry that you want, try filtering the list. If you don't find the data that you expect, try selecting **Refresh** on the toolbar. + + You can now review information about the selected trigger event, for example: + + ![Screenshot showing the selected Consumption workflow trigger history information.](./media/monitor-logic-apps/view-specific-trigger-details.png) + +### [Standard](#tab/standard) + +1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. + + To find your logic app, in the portal search box, enter **logic apps**, and then select **Logic apps**. - ![Overview, runs history, and other logic app information](./media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png) + ![Screenshot showing the Azure portal search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) - Here are the possible run statuses: + The Azure portal shows all the logic apps in your Azure subscription. You can filter this list based on name, subscription, resource group, location, and so on. + + ![Screenshot showing Azure portal with all logic apps associated with selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + +1. Select your logic app. On your logic app's menu, select **Overview**. On the Overview pane, select **Trigger history**. + + ![Screenshot showing Overview pane with "Trigger history" selected.](./media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png) + + Under **Trigger history**, all trigger attempts appear. Each time the trigger successfully fires, Azure Logic Apps creates an individual workflow instance and runs that instance. By default, each instance runs in parallel so that no workflow has to wait before starting a run. If your workflow triggers for multiple events or items at the same time, a trigger entry appears for each item with the same date and time. + + ![Screenshot showing Overview pane with multiple trigger attempts for different items.](./media/monitor-logic-apps/logic-app-triggers-history-standard.png) + + The following table lists the possible trigger statuses: + + | Trigger status | Description | + |----------------|-------------| + | **Failed** | An error occurred. To review any generated error messages for a failed trigger, select that trigger attempt and choose **Outputs**. For example, you might find inputs that aren't valid. | + | **Skipped** | The trigger checked the endpoint but found no data that met the specified criteria. | + | **Succeeded** | The trigger checked the endpoint and found available data. Usually, a **Fired** status also appears alongside this status. If not, the trigger definition might have a condition or `SplitOn` command that wasn't met.

                  This status can apply to a manual trigger, recurrence-based trigger, or polling trigger. A trigger can run successfully, but the run itself might still fail when the actions generate unhandled errors. | + ||| + + > [!TIP] + > + > You can recheck the trigger without waiting for the next recurrence. On the + > **Overview** pane toolbar, select **Run Trigger** > **Run**. + +1. To view information about a specific trigger attempt, select that trigger event. + + ![Screenshot showing a Standard workflow trigger entry selected.](./media/monitor-logic-apps/select-trigger-event-for-review-standard.png) + + If the list shows many trigger attempts, and you can't find the entry that you want, try filtering the list. If you don't find the data that you expect, try selecting **Refresh** on the toolbar. + +1. Check the trigger's inputs to confirm that they appear as you expect. On the **History** pane, under **Inputs link**, select the link, which shows the **Inputs** pane. + + ![Screenshot showing Standard logic app workflow trigger inputs.](./media/monitor-logic-apps/review-trigger-inputs-standard.png) + +1. Check the triggers outputs, if any, to confirm that they appear as you expect. On the **History** pane, under **Outputs link**, select the link, which shows the **Outputs** pane. + + Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow. + + For example, the RSS trigger generated an error message that states that the RSS feed wasn't found. + + ![Screenshot showing Standard logic app workflow trigger outputs.](./media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png) + +--- + + + +## Review workflow run history + +Each time the trigger successfully fires, Azure Logic Apps creates a workflow instance and runs that instance. By default, each instance runs in parallel so that no workflow has to wait before starting a run. You can review what happened during each run, including the status, inputs, and outputs for each step in the workflow. + +### [Consumption](#tab/consumption) + +1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. + + To find your logic app, in the main Azure search box, enter **logic apps**, and then select **Logic apps**. + + ![Screenshot showing Azure portal main search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) + + The Azure portal shows all the logic apps that are associated with your Azure subscriptions. You can filter this list based on name, subscription, resource group, location, and so on. + + ![Screenshot showing all the logic apps in selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + +1. Select your logic app. On your logic app's menu, select **Overview**. On the Overview pane, select **Runs history**. + + Under **Runs history**, all the past, current, and any waiting runs appear. If the trigger fires for multiple events or items at the same time, an entry appears for each item with the same date and time. + + ![Screenshot showing Consumption logic app workflow "Overview" pane with "Runs history" selected.](./media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png) + + The following table lists the possible run statuses: | Run status | Description | |------------|-------------| | **Aborted** | The run stopped or didn't finish due to external problems, for example, a system outage or lapsed Azure subscription. | - | **Cancelled** | The run was triggered and started but received a cancellation request. | + | **Cancelled** | The run was triggered and started, but received a cancellation request. | | **Failed** | At least one action in the run failed. No subsequent actions in the workflow were set up to handle the failure. | - | **Running** | The run was triggered and is in progress, but this status can also appear for a run that is throttled due to [action limits](logic-apps-limits-and-config.md) or the [current pricing plan](https://azure.microsoft.com/pricing/details/logic-apps/).

                  **Tip**: If you set up [diagnostics logging](monitor-logic-apps-log-analytics.md), you can get information about any throttle events that happen. | + | **Running** | The run was triggered and is in progress. However, this status can also appear for a run that's throttled due to [action limits](logic-apps-limits-and-config.md) or the [current pricing plan](https://azure.microsoft.com/pricing/details/logic-apps/).

                  **Tip**: If you set up [diagnostics logging](monitor-logic-apps-log-analytics.md), you can get information about any throttle events that happen. | | **Succeeded** | The run succeeded. If any action failed, a subsequent action in the workflow handled that failure. | - | **Timed out** | The run timed out because the current duration exceeded the run duration limit, which is controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits). A run's duration is calculated by using the run's start time and run duration limit at that start time.

                  **Note**: If the run's duration also exceeds the current *run history retention limit*, which is also controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits), the run is cleared from the runs history by a daily cleanup job. Whether the run times out or completes, the retention period is always calculated by using the run's start time and *current* retention limit. So, if you reduce the duration limit for an in-flight run, the run times out. However, the run either stays or is cleared from the runs history based on whether the run's duration exceeded the retention limit. | + | **Timed out** | The run timed out because the current duration exceeded the run duration limit, which is controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits). A run's duration is calculated by using the run's start time and run duration limit at that start time.

                  **Note**: If the run's duration also exceeds the current *run history retention limit*, which is also controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits), the run is cleared from the runs history by a daily cleanup job. Whether the run times out or completes, the retention period is always calculated by using the run's start time and *current* retention limit. So, if you reduce the duration limit for an in-flight run, the run times out. However, the run either stays or is cleared from the runs history based on whether the run's duration exceeded the retention limit. | | **Waiting** | The run hasn't started or is paused, for example, due to an earlier workflow instance that's still running. | ||| -1. To review the steps and other information for a specific run, under **Runs history**, select that run. +1. To review the steps and other information for a specific run, under **Runs history**, select that run. If the list shows many runs, and you can't find the entry that you want, try filtering the list. + + > [!TIP] + > + > If the run status doesn't appear, try refreshing the overview pane by selecting **Refresh**. + > No run happens for a trigger that's skipped due to unmet criteria or finding no data. - ![Select a specific run to review](./media/monitor-logic-apps/select-specific-logic-app-run.png) + ![Screenshot showing the Consumption logic app workflow run selected.](./media/monitor-logic-apps/select-specific-logic-app-run-consumption.png) The **Logic app run** pane shows each step in the selected run, each step's run status, and the time taken for each step to run, for example: - ![Each action in the specific run](./media/monitor-logic-apps/logic-app-run-pane.png) + ![Screenshot showing each action in the selected workflow run.](./media/monitor-logic-apps/logic-app-run-pane-consumption.png) To view this information in list form, on the **Logic app run** toolbar, select **Run Details**. - ![On the toolbar, select "Run Details"](./media/monitor-logic-apps/select-run-details-on-toolbar.png) + ![Screenshot showing the "Logic app run" toolbar with "Run Details" selected.](./media/monitor-logic-apps/toolbar-select-run-details.png) - The Run Details view shows each step, their status, and other information. + The Run Details lists each step, their status, and other information. - ![Review details about each step in the run](./media/monitor-logic-apps/review-logic-app-run-details.png) + ![Screenshot showing the run details for each step in the workflow.](./media/monitor-logic-apps/review-logic-app-run-details.png) For example, you can get the run's **Correlation ID** property, which you might need when you use the [REST API for Logic Apps](/rest/api/logic). 1. To get more information about a specific step, select either option: - * In the **Logic app run** pane select the step so that the shape expands. You can now view information such as inputs, outputs, and any errors that happened in that step, for example: + * In the **Logic app run** pane, select the step so that the shape expands. You can now view information such as inputs, outputs, and any errors that happened in that step. - ![In logic app run pane, view failed step](./media/monitor-logic-apps/specific-step-inputs-outputs-errors.png) + For example, suppose you had an action that failed, and you wanted to review which inputs might have caused that step to fail. By expanding the shape, you can view the inputs, outputs, and error for that step: - * In the **Logic app run details** pane, select the step that you want. + ![Screenshot showing the "Logic app run" pane with the expanded shape for an example failed step.](./media/monitor-logic-apps/specific-step-inputs-outputs-errors.png) - ![In run details pane, view failed step](./media/monitor-logic-apps/select-failed-step-in-failed-run.png) + * In the **Logic app run details** pane, select the step that you want. - You can now view information such as inputs and outputs for that step, for example: + ![Screenshot showing the the "Logic app run details" pane with the example failed step selected.](./media/monitor-logic-apps/select-failed-step.png) > [!NOTE] - > All runtime details and events are encrypted within the Logic Apps service. - > They are decrypted only when a user requests to view that data. - > You can [hide inputs and outputs in run history](../logic-apps/logic-apps-securing-a-logic-app.md#obfuscate) + > + > All runtime details and events are encrypted within Azure Logic Apps and + > are decrypted only when a user requests to view that data. You can + > [hide inputs and outputs in run history](logic-apps-securing-a-logic-app.md#obfuscate) > or control user access to this information by using > [Azure role-based access control (Azure RBAC)](../role-based-access-control/overview.md). - - -## Review trigger history - -Each logic app run starts with a trigger. The trigger history lists all the trigger attempts that your logic app made and information about the inputs and outputs for each trigger attempt. +### [Standard](#tab/standard) 1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. - To find your logic app, in the main Azure search box, enter `logic apps`, and then select **Logic Apps**. + To find your logic app, in the main Azure search box, enter **logic apps**, and then select **Logic apps**. - ![Find and select "Logic Apps" service](./media/monitor-logic-apps/find-your-logic-app.png) + ![Screenshot showing Azure portal search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) The Azure portal shows all the logic apps that are associated with your Azure subscriptions. You can filter this list based on name, subscription, resource group, location, and so on. - ![View logic apps associated with subscriptions](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + ![Screenshot showing all logic apps in selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) -1. Select your logic app, and then select **Overview**. +1. Select your logic app. On your logic app's menu, under **Workflows**, select **Workflows**, and then select your workflow. -1. On your logic app's menu, select **Overview**. In the **Summary** section, under **Evaluation**, select **See trigger history**. + > [!NOTE] + > + > By default, stateless workflows don't store run history unless you enable this capability for debugging. + > For more information, review [Stateful versus stateless workflows](single-tenant-overview-compare.md#stateful-stateless). - ![View trigger history for your logic app](./media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png) +1. On your workflow's menu, select **Overview**. On the Overview pane, select **Run History**. - The trigger history pane shows all the trigger attempts that your logic app has made. Each time that the trigger fires for an item or event, the Logic Apps engine creates a separate logic app instance that runs the workflow. By default, each instance runs in parallel so that no workflow has to wait before starting a run. So if your logic app triggers on multiple items at the same time, a trigger entry with the same date and time appears for each item. + Under **Run History**, all the past, current, and any waiting runs appear. If the trigger fires for multiple events or items at the same time, an entry appears for each item with the same date and time. - ![Multiple trigger attempts for different items](./media/monitor-logic-apps/logic-app-trigger-history.png) + ![Screenshot showing Standard logic app workflow "Overview" pane with "Run History" selected.](./media/monitor-logic-apps/overview-logic-app-runs-history-standard.png) - Here are the possible trigger attempt statuses: + The following table lists the possible run statuses: - | Trigger status | Description | - |----------------|-------------| - | **Failed** | An error occurred. To review any generated error messages for a failed trigger, select that trigger attempt and choose **Outputs**. For example, you might find inputs that aren't valid. | - | **Skipped** | The trigger checked the endpoint but found no data that met the specified criteria. | - | **Succeeded** | The trigger checked the endpoint and found available data. Usually, a **Fired** status also appears alongside this status. If not, the trigger definition might have a condition or `SplitOn` command that wasn't met.

                  This status can apply to a manual trigger, recurrence trigger, or polling trigger. A trigger can run successfully, but the run itself might still fail when the actions generate unhandled errors. | + | Run status | Description | + |------------|-------------| + | **Aborted** | The run stopped or didn't finish due to external problems, for example, a system outage or lapsed Azure subscription. | + | **Cancelled** | The run was triggered and started, but received a cancellation request. | + | **Failed** | At least one action in the run failed. No subsequent actions in the workflow were set up to handle the failure. | + | **Running** | The run was triggered and is in progress. However, this status can also appear for a run that's throttled due to [action limits](logic-apps-limits-and-config.md) or the [current pricing plan](https://azure.microsoft.com/pricing/details/logic-apps/).

                  **Tip**: If you set up [diagnostics logging](monitor-logic-apps-log-analytics.md), you can get information about any throttle events that happen. | + | **Succeeded** | The run succeeded. If any action failed, a subsequent action in the workflow handled that failure. | + | **Timed out** | The run timed out because the current duration exceeded the run duration limit, which is controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits). A run's duration is calculated by using the run's start time and run duration limit at that start time.

                  **Note**: If the run's duration also exceeds the current *run history retention limit*, which is also controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits), the run is cleared from the runs history by a daily cleanup job. Whether the run times out or completes, the retention period is always calculated by using the run's start time and *current* retention limit. So, if you reduce the duration limit for an in-flight run, the run times out. However, the run either stays or is cleared from the runs history based on whether the run's duration exceeded the retention limit. | + | **Waiting** | The run hasn't started or is paused, for example, due to an earlier workflow instance that's still running. | ||| - > [!TIP] - > You can recheck the trigger without waiting for the next recurrence. On the overview toolbar, select **Run Trigger**, - > and select the trigger, which forces a check. Or, select **Run Trigger** on designer toolbar. +1. To review the steps and other information for a specific run, under **Run History**, select that run. If the list shows many runs, and you can't find the entry that you want, try filtering the list. -1. To view information about a specific trigger attempt, on the trigger pane, select that trigger event. If the list shows many trigger attempts, and you can't find the entry that you want, try filtering the list. If you don't find the data that you expect, try selecting **Refresh** on the toolbar. + > [!TIP] + > + > If the run status doesn't appear, try refreshing the overview pane by selecting **Refresh**. + > No run happens for a trigger that's skipped due to unmet criteria or finding no data. - ![View specific trigger attempt](./media/monitor-logic-apps/select-trigger-event-for-review.png) + ![Screenshot showing the Standard workflow run selected.](./media/monitor-logic-apps/select-specific-logic-app-run-standard.png) - You can now review information about the selected trigger event, for example: + The workflow run pane shows each step in the selected run, each step's run status, and the time taken for each step to run, for example: - ![View specific trigger information](./media/monitor-logic-apps/view-specific-trigger-details.png) + ![Screenshot showing each action in selected workflow run.](./media/monitor-logic-apps/logic-app-run-pane-standard.png) - +1. After all the steps in the run appear, select each step to review more information such as inputs, outputs, and any errors that happened in that step. -## Set up monitoring alerts + For example, suppose you had an action that failed, and you wanted to review which inputs might have caused that step to fail. -To get alerts based on specific metrics or exceeded thresholds for your logic app, set up [alerts in Azure Monitor](../azure-monitor/alerts/alerts-overview.md). Learn about [metrics in Azure](../azure-monitor/data-platform.md). To set up alerts without using [Azure Monitor](../azure-monitor/logs/log-query-overview.md), follow these steps. + ![Screenshot showing Standard logic app workflow with failed step inputs.](./media/monitor-logic-apps/failed-action-inputs-standard.png) -1. On your logic app menu, under **Monitoring**, select **Alerts** > **New alert rule**. + The following screenshot shows the outputs from the failed step. - ![Add an alert for your logic app](./media/monitor-logic-apps/add-new-alert-rule.png) + ![Screenshot showing Standard logic app workflow with failed step outputs.](./media/monitor-logic-apps/failed-action-outputs-standard.png) -1. On the **Create rule** pane, under **Resource**, select your logic app, if not already selected. Under **Condition**, select **Add** so that you can define the condition that triggers the alert. + > [!NOTE] + > + > All runtime details and events are encrypted within Azure Logic Apps and + > are decrypted only when a user requests to view that data. You can + > [hide inputs and outputs in run history](logic-apps-securing-a-logic-app.md#obfuscate). - ![Add a condition for the rule](./media/monitor-logic-apps/add-condition-for-rule.png) +--- -1. On the **Configure signal logic** pane, find and select the signal for which you want to get an alert. You can use the search box, or to sort the signals alphabetically, select the **Signal name** column header. + - For example, if you want to send an alert when a trigger fails, follow these steps: +## Set up monitoring alerts - 1. In the **Signal name** column, find and select the **Triggers Failed** signal. +To get alerts based on specific metrics or exceeded thresholds for your logic app, set up [alerts in Azure Monitor](../azure-monitor/alerts/alerts-overview.md). For more information, review [Metrics in Azure](../azure-monitor/data-platform.md). To set up alerts without using [Azure Monitor](../azure-monitor/logs/log-query-overview.md), follow these steps. - ![Select signal for creating alert](./media/monitor-logic-apps/find-and-select-signal.png) +1. On your logic app menu, under **Monitoring**, select **Alerts**. On the toolbar, select **Create** > **Alert rule**. - 1. On the information pane that opens for the selected signal, under **Alert logic**, set up your condition, for example: + ![Screenshot showing Azure portal, logic app menu with "Alerts" selected, and toolbar with "Create", "Alert rule" selected.](./media/monitor-logic-apps/add-new-alert-rule.png) - 1. For **Operator**, select **Greater than or equal to**. +1. On the **Select a signal** pane, under **Signal type**, select the signal for which you want to get an alert. - 1. For **Aggregation type**, select **Count**. + > [!TIP] + > + > You can use the search box, or to sort the signals alphabetically, + > select the **Signal name** column header. - 1. For **Threshold value**, enter `1`. + For example, to send an alert when a trigger fails, follow these steps: - 1. Under **Condition preview**, confirm that your condition appears correct. + 1. In the **Signal name** column, find and select the **Triggers Failed** signal. - 1. Under **Evaluated based on**, set up the interval and frequency for running the alert rule. For **Aggregation granularity (Period)**, select the period for grouping the data. For **Frequency of evaluation**, select how often you want to check the condition. + ![Screenshot showing "Select a signal pane", the "Signal name" column, and "Triggers Failed" signal selected.](./media/monitor-logic-apps/find-and-select-signal.png) - 1. When you're ready, select **Done**. + 1. On the **Configure signal logic** pane, under **Alert logic**, set up your condition, and select **Done**, for example: - Here's the finished condition: + | Property | Example value | + |----------|---------------| + | **Operator** | **Greater than or equal to** | + | **Aggregation type** | **Count** | + | **Threshold value** | **1** | + | **Unit** | **Count** | + | **Condition preview** | **Whenever the count of triggers failed is greater than or equal to 1** | + | **Aggregation granularity (Period)** | **1 minute** | + | **Frequency of evaluation** | **Every 1 Minute** | + ||| - ![Set up condition for alert](./media/monitor-logic-apps/set-up-condition-for-alert.png) + For more information, review [Create, view, and manage log alerts by using Azure Monitor](../azure-monitor/alerts/alerts-activity-log.md). - The **Create rule** page now shows the condition that you created and the cost for running that alert. + The following screenshot shows the finished condition: - ![New alert on the "Create rule" page](./media/monitor-logic-apps/finished-alert-condition-cost.png) + ![Screenshot showing the condition for alert.](./media/monitor-logic-apps/set-up-condition-for-alert.png) -1. Specify a name, optional description, and severity level for your alert. Either leave the **Enable rule upon creation** setting turned on, or turn off until you're ready to enable the rule. + The **Create an alert rule** page now shows the condition that you created and the cost for running that alert. -1. When you're done, select **Create alert rule**. + ![Screenshot showing the new alert on the "Create an alert rule" page.](./media/monitor-logic-apps/finished-alert-condition-cost.png) -> [!TIP] -> To run a logic app from an alert, you can include the -> [request trigger](../connectors/connectors-native-reqres.md) in your workflow, -> which lets you perform tasks like these examples: -> -> * [Post to Slack](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/alert-to-slack-with-logic-app) -> * [Send a text](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/alert-to-text-message-with-logic-app) -> * [Add a message to a queue](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/alert-to-queue-with-logic-app) +1. If you're satisfied, select **Next: Details** to finish creating the rule. ## Next steps -* [Monitor logic apps by using Azure Monitor](../logic-apps/monitor-logic-apps-log-analytics.md) +* [Monitor logic apps with Azure Monitor](monitor-logic-apps-log-analytics.md) diff --git a/articles/logic-apps/quickstart-create-deploy-azure-resource-manager-template.md b/articles/logic-apps/quickstart-create-deploy-azure-resource-manager-template.md index 2b75630ae8833..f5df2066106cf 100644 --- a/articles/logic-apps/quickstart-create-deploy-azure-resource-manager-template.md +++ b/articles/logic-apps/quickstart-create-deploy-azure-resource-manager-template.md @@ -12,6 +12,8 @@ ms.date: 04/27/2022 # Quickstart: Create and deploy a Consumption logic app workflow in multi-tenant Azure Logic Apps with an ARM template +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + [Azure Logic Apps](logic-apps-overview.md) is a cloud service that helps you create and run automated workflows that integrate data, apps, cloud-based services, and on-premises systems by choosing from [hundreds of connectors](/connectors/connector-reference/connector-reference-logicapps-connectors). This quickstart focuses on the process for deploying an Azure Resource Manager template (ARM template) to create a basic [Consumption logic app workflow](logic-apps-overview.md#resource-environment-differences) that checks the status for Azure on an hourly schedule and runs in [multi-tenant Azure Logic Apps](logic-apps-overview.md#resource-environment-differences). [!INCLUDE [About Azure Resource Manager](../../includes/resource-manager-quickstart-introduction.md)] diff --git a/articles/logic-apps/quickstart-create-deploy-bicep.md b/articles/logic-apps/quickstart-create-deploy-bicep.md index 53c9707f77d82..3b8b8d9c78dd8 100644 --- a/articles/logic-apps/quickstart-create-deploy-bicep.md +++ b/articles/logic-apps/quickstart-create-deploy-bicep.md @@ -13,6 +13,8 @@ ms.date: 04/07/2022 # Quickstart: Create and deploy a Consumption logic app workflow in multi-tenant Azure Logic Apps with Bicep +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + [Azure Logic Apps](logic-apps-overview.md) is a cloud service that helps you create and run automated workflows that integrate data, apps, cloud-based services, and on-premises systems by choosing from [hundreds of connectors](/connectors/connector-reference/connector-reference-logicapps-connectors). This quickstart focuses on the process for deploying a Bicep file to create a basic [Consumption logic app workflow](logic-apps-overview.md#resource-environment-differences) that checks the status for Azure on an hourly schedule and runs in [multi-tenant Azure Logic Apps](logic-apps-overview.md#resource-environment-differences). [!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] diff --git a/articles/logic-apps/quickstart-create-first-logic-app-workflow.md b/articles/logic-apps/quickstart-create-first-logic-app-workflow.md index 4843402f1b4e4..00db6841157ae 100644 --- a/articles/logic-apps/quickstart-create-first-logic-app-workflow.md +++ b/articles/logic-apps/quickstart-create-first-logic-app-workflow.md @@ -13,6 +13,8 @@ ms.date: 05/02/2022 # Quickstart: Create an integration workflow with multi-tenant Azure Logic Apps and the Azure portal +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This quickstart shows how to create an example automated workflow that integrates two services, an RSS feed for a website and an email account. More specifically, you create a [Consumption plan-based](logic-apps-pricing.md#consumption-pricing) logic app resource and workflow that uses the RSS connector and the Office 365 Outlook connector. This resource runs in [*multi-tenant* Azure Logic Apps](logic-apps-overview.md). > [!NOTE] diff --git a/articles/logic-apps/quickstart-create-logic-apps-visual-studio-code.md b/articles/logic-apps/quickstart-create-logic-apps-visual-studio-code.md index e09213f364ad6..09c1767256314 100644 --- a/articles/logic-apps/quickstart-create-logic-apps-visual-studio-code.md +++ b/articles/logic-apps/quickstart-create-logic-apps-visual-studio-code.md @@ -12,6 +12,8 @@ ms.date: 02/02/2022 # Quickstart: Create and manage logic app workflow definitions with multi-tenant Azure Logic Apps and Visual Studio Code +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This quickstart shows how to create and manage logic app workflows that help you automate tasks and processes that integrate apps, data, systems, and services across organizations and enterprises by using multi-tenant [Azure Logic Apps](../logic-apps/logic-apps-overview.md) and Visual Studio Code. You can create and edit the underlying workflow definitions, which use JavaScript Object Notation (JSON), for logic apps through a code-based experience. You can also work on existing logic apps that are already deployed to Azure. For more information about multi-tenant versus single-tenant model, review [Single-tenant versus multi-tenant and integration service environment](single-tenant-overview-compare.md). Although you can perform these same tasks in the [Azure portal](https://portal.azure.com) and in Visual Studio, you can get started faster in Visual Studio Code when you're already familiar with logic app definitions and want to work directly in code. For example, you can disable, enable, delete, and refresh already created logic apps. Also, you can work on logic apps and integration accounts from any development platform where Visual Studio Code runs, such as Linux, Windows, and Mac. diff --git a/articles/logic-apps/quickstart-create-logic-apps-with-visual-studio.md b/articles/logic-apps/quickstart-create-logic-apps-with-visual-studio.md index b1ee292b96eb3..20f5a876b48eb 100644 --- a/articles/logic-apps/quickstart-create-logic-apps-with-visual-studio.md +++ b/articles/logic-apps/quickstart-create-logic-apps-with-visual-studio.md @@ -12,6 +12,8 @@ ms.date: 05/25/2021 # Quickstart: Create automated integration workflows with multi-tenant Azure Logic Apps and Visual Studio +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This quickstart shows how to design, develop, and deploy automated workflows that integrate apps, data, systems, and services across enterprises and organizations by using multi-tenant [Azure Logic Apps](../logic-apps/logic-apps-overview.md) and Visual Studio. Although you can perform these tasks in the Azure portal, Visual Studio lets you add your logic apps to source control, publish different versions, and create Azure Resource Manager templates for different deployment environments. For more information about multi-tenant versus single-tenant model, review [Single-tenant versus multi-tenant and integration service environment](single-tenant-overview-compare.md). If you're new to Azure Logic Apps and just want the basic concepts, try the [quickstart for creating a logic app in the Azure portal](../logic-apps/quickstart-create-first-logic-app-workflow.md). The Logic App Designer works similarly in both the Azure portal and Visual Studio. diff --git a/articles/logic-apps/quickstart-logic-apps-azure-cli.md b/articles/logic-apps/quickstart-logic-apps-azure-cli.md index 7203f9492cf82..37832525cbdcf 100644 --- a/articles/logic-apps/quickstart-logic-apps-azure-cli.md +++ b/articles/logic-apps/quickstart-logic-apps-azure-cli.md @@ -11,6 +11,8 @@ ms.date: 05/03/2022 # Quickstart: Create and manage workflows with Azure CLI in Azure Logic Apps +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This quickstart shows how to create and manage automated workflows that run in Azure Logic Apps by using the [Azure CLI Logic Apps extension](/cli/azure/logic) (`az logic`). From the command line, you can create a [Consumption logic app](logic-apps-overview.md#resource-environment-differences) in multi-tenant Azure Logic Apps by using the JSON file for a logic app workflow definition. You can then manage your logic app by running operations such as `list`, `show` (`get`), `update`, and `delete` from the command line. > [!WARNING] diff --git a/articles/logic-apps/quickstart-logic-apps-azure-powershell.md b/articles/logic-apps/quickstart-logic-apps-azure-powershell.md index 605f04d392ad0..19768efde11fc 100644 --- a/articles/logic-apps/quickstart-logic-apps-azure-powershell.md +++ b/articles/logic-apps/quickstart-logic-apps-azure-powershell.md @@ -5,12 +5,15 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: quickstart -ms.custom: mvc, devx-track-azurepowershell, contperf-fy21q2, mode-api +ms.tool: azure-powershell +ms.custom: mvc, contperf-fy21q2, mode-api ms.date: 05/03/2022 --- # Quickstart: Create and manage workflows with Azure PowerShell in Azure Logic Apps +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This quickstart shows how to create and manage automated workflows that run in Azure Logic Apps by using [Azure PowerShell](/powershell/azure/install-az-ps). From PowerShell, you can create a [Consumption logic app](logic-apps-overview.md#resource-environment-differences) in multi-tenant Azure Logic Apps by using the JSON file for a logic app workflow definition. You can then manage your logic app by running the cmdlets in the [Az.LogicApp](/powershell/module/az.logicapp/) PowerShell module. > [!NOTE] diff --git a/articles/logic-apps/sample-logic-apps-cli-script.md b/articles/logic-apps/sample-logic-apps-cli-script.md index a299a72095c9a..eff7df426975c 100644 --- a/articles/logic-apps/sample-logic-apps-cli-script.md +++ b/articles/logic-apps/sample-logic-apps-cli-script.md @@ -11,6 +11,8 @@ ms.date: 07/30/2020 # Azure CLI script sample - create a logic app +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This script creates a sample logic app through the [Azure CLI Logic Apps extension](/cli/azure/logic), (`az logic`). For a detailed guide to creating and managing logic apps through the Azure CLI, see the [Logic Apps quickstart for the Azure CLI](quickstart-logic-apps-azure-cli.md). > [!WARNING] diff --git a/articles/logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint.md b/articles/logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint.md index 41ba5e77c466b..cb2db4e354f93 100644 --- a/articles/logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint.md +++ b/articles/logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint.md @@ -12,6 +12,8 @@ ms.date: 03/11/2022 # Secure traffic between single-tenant Standard logic apps and Azure virtual networks using private endpoints and VNet integration +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] + To securely and privately communicate between your workflow in a Standard logic app and an Azure virtual network, you can set up *private endpoints* for inbound traffic and use VNet integration for outbound traffic. A private endpoint is a network interface that privately and securely connects to a service powered by Azure Private Link. This service can be an Azure service such as Azure Logic Apps, Azure Storage, Azure Cosmos DB, SQL, or your own Private Link Service. The private endpoint uses a private IP address from your virtual network, which effectively brings the service into your virtual network. diff --git a/articles/logic-apps/send-related-messages-sequential-convoy.md b/articles/logic-apps/send-related-messages-sequential-convoy.md index 35c99da929f35..13ad41a008784 100644 --- a/articles/logic-apps/send-related-messages-sequential-convoy.md +++ b/articles/logic-apps/send-related-messages-sequential-convoy.md @@ -10,6 +10,8 @@ ms.date: 05/29/2020 # Send related messages in order by using a sequential convoy in Azure Logic Apps with Azure Service Bus +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + When you need to send correlated messages in a specific order, you can follow the [*sequential convoy* pattern](/azure/architecture/patterns/sequential-convoy) when using [Azure Logic Apps](../logic-apps/logic-apps-overview.md) by using the [Azure Service Bus connector](../connectors/connectors-create-api-servicebus.md). Correlated messages have a property that defines the relationship between those messages, such as the ID for the [session](../service-bus-messaging/message-sessions.md) in Service Bus. For example, suppose that you have 10 messages for a session named "Session 1", and you have 5 messages for a session named "Session 2" that are all sent to the same [Service Bus queue](../service-bus-messaging/service-bus-queues-topics-subscriptions.md). You can create a logic app that processes messages from the queue so that all messages from "Session 1" are handled by a single trigger run and all messages from "Session 2" are handled by the next trigger run. diff --git a/articles/logic-apps/set-up-devops-deployment-single-tenant-azure-logic-apps.md b/articles/logic-apps/set-up-devops-deployment-single-tenant-azure-logic-apps.md index d49d16ba38a3e..649a3d0cca6a1 100644 --- a/articles/logic-apps/set-up-devops-deployment-single-tenant-azure-logic-apps.md +++ b/articles/logic-apps/set-up-devops-deployment-single-tenant-azure-logic-apps.md @@ -12,6 +12,8 @@ ms.date: 02/14/2022 # Set up DevOps deployment for Standard logic app workflows in single-tenant Azure Logic Apps +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] + This article shows how to deploy a Standard logic app project to single-tenant Azure Logic Apps from Visual Studio Code to your infrastructure by using DevOps tools and processes. Based on whether you prefer GitHub or Azure DevOps for deployment, choose the path and tools that work best for your scenario. You can use the included samples that contain example logic app projects plus examples for Azure deployment using either GitHub or Azure DevOps. For more information about DevOps for single-tenant, review [DevOps deployment overview for single-tenant Azure Logic Apps](devops-deployment-single-tenant-azure-logic-apps.md). ## Prerequisites diff --git a/articles/logic-apps/set-up-sql-db-storage-single-tenant-standard-workflows.md b/articles/logic-apps/set-up-sql-db-storage-single-tenant-standard-workflows.md index e57ce45988d64..4c7d5966649f3 100644 --- a/articles/logic-apps/set-up-sql-db-storage-single-tenant-standard-workflows.md +++ b/articles/logic-apps/set-up-sql-db-storage-single-tenant-standard-workflows.md @@ -11,6 +11,8 @@ ms.custom: ignite-fall-2021 # Set up SQL database storage for Standard logic apps in single-tenant Azure Logic Apps (preview) +[!INCLUDE [logic-apps-sku-standard](../../includes/logic-apps-sku-standard.md)] + > [!IMPORTANT] > This capability is in preview and is subject to the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/logic-apps/single-tenant-overview-compare.md b/articles/logic-apps/single-tenant-overview-compare.md index 3a3ce62a23f3b..d92002c96cd77 100644 --- a/articles/logic-apps/single-tenant-overview-compare.md +++ b/articles/logic-apps/single-tenant-overview-compare.md @@ -5,7 +5,7 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: conceptual -ms.date: 04/28/2022 +ms.date: 06/01/2022 ms.custom: ignite-fall-2021 --- @@ -127,35 +127,26 @@ With the **Logic App (Standard)** resource type, you can create these workflow t Create a stateless workflow when you don't need to keep, review, or reference data from previous events in external storage after each run finishes for later review. These workflows save all the inputs and outputs for each action and their states *in memory only*, not in external storage. As a result, stateless workflows have shorter runs that are typically less than 5 minutes, faster performance with quicker response times, higher throughput, and reduced running costs because the run details and history aren't saved in external storage. However, if outages happen, interrupted runs aren't automatically restored, so the caller needs to manually resubmit interrupted runs. - > [!IMPORTANT] - > A stateless workflow provides the best performance when handling data or content, such as a file, that doesn't exceed 64 KB in *total* size. - > Larger content sizes, such as multiple large attachments, might significantly slow your workflow's performance or even cause your workflow to - > crash due to out-of-memory exceptions. If your workflow might have to handle larger content sizes, use a stateful workflow instead. + A stateless workflow provides the best performance when handling data or content, such as a file, that doesn't exceed 64 KB in *total* size. Larger content sizes, such as multiple large attachments, might significantly slow your workflow's performance or even cause your workflow to crash due to out-of-memory exceptions. If your workflow might have to handle larger content sizes, use a stateful workflow instead. - Stateless workflows only run synchronously, so they don't use the standard [asynchronous operation pattern](/azure/architecture/patterns/async-request-reply) used by stateful workflows. Instead, all HTTP-based actions that return a ["202 ACCEPTED"](https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.3) response proceed to the next step in the workflow execution. If the response includes a `location` header, a stateless workflow won't poll the specified URI to check the status. To follow the standard asynchronous operation pattern, use a stateful workflow instead. + In stateless workflows, [*managed connector actions*](../connectors/managed.md) are available, but *managed connector triggers* are unavailable. So, to start your workflow, select a [built-in trigger](../connectors/built-in.md) instead, such as the Request, Event Hubs, or Service Bus trigger. These triggers run natively on the Azure Logic Apps runtime. The Recurrence trigger is unavailable for stateless workflows and is available only for stateful workflows. For more information about limited, unavailable, or unsupported triggers, actions, and connectors, see [Changed, limited, unavailable, or unsupported capabilities](#limited-unavailable-unsupported). - For easier debugging, you can enable run history for a stateless workflow, which has some impact on performance, and then disable the run history when you're done. For more information, see [Create single-tenant based workflows in Visual Studio Code](create-single-tenant-workflows-visual-studio-code.md#enable-run-history-stateless) or [Create single-tenant based workflows in the Azure portal](create-single-tenant-workflows-visual-studio-code.md#enable-run-history-stateless). + Stateless workflows run only synchronously, so they don't use the standard [asynchronous operation pattern](/azure/architecture/patterns/async-request-reply) used by stateful workflows. Instead, all HTTP-based actions that return a ["202 ACCEPTED"](https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.3) response continue to the next step in the workflow execution. If the response includes a `location` header, a stateless workflow won't poll the specified URI to check the status. To follow the standard asynchronous operation pattern, use a stateful workflow instead. - > [!NOTE] - > Stateless workflows currently support only *actions* for [managed connectors](../connectors/managed.md), - > which are deployed in Azure, and not triggers. To start your workflow, select either the - > [built-in Request, Event Hubs, or Service Bus trigger](../connectors/built-in.md). - > These triggers run natively in the Azure Logic Apps runtime. For more information about limited, - > unavailable, or unsupported triggers, actions, and connectors, see - > [Changed, limited, unavailable, or unsupported capabilities](#limited-unavailable-unsupported). + For easier debugging, you can enable run history for a stateless workflow, which has some impact on performance, and then disable the run history when you're done. For more information, see [Create single-tenant based workflows in Visual Studio Code](create-single-tenant-workflows-visual-studio-code.md#enable-run-history-stateless) or [Create single-tenant based workflows in the Azure portal](create-single-tenant-workflows-visual-studio-code.md#enable-run-history-stateless). ### Summary differences between stateful and stateless workflows

                  -| Stateless | Stateful | +| Stateful | Stateless | |--------------------------------------------------------------|-------------------------------------------------------------| -| Doesn't store run history, inputs, or outputs by default | Stores run history, inputs, and outputs | -| Managed connector triggers are unavailable or not allowed | Managed connector triggers are available and allowed | -| No support for chunking | Supports chunking | -| No support for asynchronous operations | Supports asynchronous operations | -| Best for workflows with max duration under 5 minutes | Edit default max run duration in host configuration | -| Best for handling small message sizes (under 64K) | Handles large messages | +| Stores run history, inputs, and outputs | Doesn't store run history, inputs, or outputs by default | +| Managed connector triggers are available and allowed | Managed connector triggers are unavailable or not allowed | +| Supports chunking | No support for chunking | +| Supports asynchronous operations | No support for asynchronous operations | +| Edit default max run duration in host configuration | Best for workflows with max duration under 5 minutes | +| Handles large messages | Best for handling small message sizes (under 64K) | |||
                  @@ -166,23 +157,23 @@ With the **Logic App (Standard)** resource type, you can create these workflow t You can [make a workflow callable](logic-apps-http-endpoint.md) from other workflows that exist in the same **Logic App (Standard)** resource by using the [Request trigger](../connectors/connectors-native-reqres.md), [HTTP Webhook trigger](../connectors/connectors-native-webhook.md), or managed connector triggers that have the [ApiConnectionWebhook type](logic-apps-workflow-actions-triggers.md#apiconnectionwebhook-trigger) and can receive HTTPS requests. -Here are the behavior patterns that nested workflows can follow after a parent workflow calls a child workflow: +The following list describes the behavior patterns that nested workflows can follow after a parent workflow calls a child workflow: * Asynchronous polling pattern - The parent doesn't wait for a response to their initial call, but continually checks the child's run history until the child finishes running. By default, stateful workflows follow this pattern, which is ideal for long-running child workflows that might exceed [request timeout limits](logic-apps-limits-and-config.md). + The parent workflow doesn't wait for the child workflow to respond to their initial call. However, the parent continually checks the child's run history until the child finishes running. By default, stateful workflows follow this pattern, which is ideal for long-running child workflows that might exceed [request timeout limits](logic-apps-limits-and-config.md). * Synchronous pattern ("fire and forget") - The child acknowledges the call by immediately returning a `202 ACCEPTED` response, and the parent continues to the next action without waiting for the results from the child. Instead, the parent receives the results when the child finishes running. Child stateful workflows that don't include a Response action always follow the synchronous pattern. For child stateful workflows, the run history is available for you to review. + The child workflow acknowledges the parent workflow's call by immediately returning a `202 ACCEPTED` response. However, the parent doesn't wait for the child to return results. Instead, the parent continues on to the next action in the workflow and receives the results when the child finishes running. Child stateful workflows that don't include a Response action always follow the synchronous pattern and provide a run history for you to review. To enable this behavior, in the workflow's JSON definition, set the `operationOptions` property to `DisableAsyncPattern`. For more information, see [Trigger and action types - Operation options](logic-apps-workflow-actions-triggers.md#operation-options). * Trigger and wait - For a child stateless workflow, the parent waits for a response that returns the results from the child. This pattern works similar to using the built-in [HTTP trigger or action](../connectors/connectors-native-http.md) to call a child workflow. Child stateless workflows that don't include a Response action immediately return a `202 ACCEPTED` response, but the parent waits for the child to finish before continuing to the next action. These behaviors apply only to child stateless workflows. + Stateless workflows run in memory. So when a parent workflow calls a child stateless workflow, the parent waits for a response that returns the results from the child. This pattern works similarly to using the built-in [HTTP trigger or action](../connectors/connectors-native-http.md) to call a child workflow. Child stateless workflows that don't include a Response action immediately return a `202 ACCEPTED` response, but the parent waits for the child to finish before continuing to the next action. These behaviors apply only to child stateless workflows. -This table specifies the child workflow's behavior based on whether the parent and child are stateful, stateless, or are mixed workflow types: +The following table identifies the child workflow's behavior based on whether the parent and child are stateful, stateless, or are mixed workflow types. The list after the table | Parent workflow | Child workflow | Child behavior | |-----------------|----------------|----------------| @@ -255,9 +246,9 @@ The single-tenant model and **Logic App (Standard)** resource type include many For the **Logic App (Standard)** resource, these capabilities have changed, or they are currently limited, unavailable, or unsupported: -* **Triggers and actions**: Built-in triggers and actions run natively in Azure Logic Apps, while managed connectors are hosted and run in Azure. Some built-in triggers and actions are unavailable, such as Sliding Window, Batch, Azure App Services, and Azure API Management. To start a stateful or stateless workflow, use the [Request, HTTP, HTTP Webhook, Event Hubs, Service Bus trigger, and so on](../connectors/built-in.md). The Recurrence trigger is available only for stateful workflows, not stateless workflows. In the designer, built-in triggers and actions appear under the **Built-in** tab. +* **Triggers and actions**: [Built-in triggers and actions](../connectors/built-in.md) run natively in Azure Logic Apps, while managed connectors are hosted and run in Azure. For Standard workflows, some built-in triggers and actions are currently unavailable, such as Sliding Window, Batch, Azure App Service, and Azure API Management. To start a stateful or stateless workflow, use a built-in trigger such as the Request, Event Hubs, or Service Bus trigger. The Recurrence trigger is available for stateful workflows, but not stateless workflows. In the designer, built-in triggers and actions appear on the **Built-in** tab, while [managed connector triggers and actions](../connectors/managed.md) appear on the **Azure** tab. - For *stateful* workflows, [managed connector triggers and actions](../connectors/managed.md) appear under the **Azure** tab, except for the unavailable operations listed below. For *stateless* workflows, the **Azure** tab doesn't appear when you want to select a trigger. You can select only [managed connector *actions*, not triggers](../connectors/managed.md). Although you can enable Azure-hosted managed connectors for stateless workflows, the designer doesn't show any managed connector triggers for you to add. + For *stateless* workflows, *managed connector actions* are available, but *managed connector triggers* are unavailable. So the **Azure** tab appears only when you can select managed connector actions. Although you can enable managed connectors for stateless workflows, the designer doesn't show any managed connector triggers for you to add. > [!NOTE] > To run locally in Visual Studio Code, webhook-based triggers and actions require additional setup. For more information, see diff --git a/articles/logic-apps/toc.yml b/articles/logic-apps/toc.yml index a37cfa9643eca..07deb36c83ab6 100644 --- a/articles/logic-apps/toc.yml +++ b/articles/logic-apps/toc.yml @@ -429,8 +429,6 @@ href: logic-apps-scenario-social-serverless.md - name: Create a serverless app - Visual Studio href: create-serverless-apps-visual-studio.md - - name: Add error and exception handling - href: logic-apps-scenario-error-and-exception-handling.md - name: B2B processing href: logic-apps-enterprise-integration-b2b.md - name: Reference diff --git a/articles/logic-apps/tutorial-build-schedule-recurring-logic-app-workflow.md b/articles/logic-apps/tutorial-build-schedule-recurring-logic-app-workflow.md index 836ac89e17076..2f71f2270ab22 100644 --- a/articles/logic-apps/tutorial-build-schedule-recurring-logic-app-workflow.md +++ b/articles/logic-apps/tutorial-build-schedule-recurring-logic-app-workflow.md @@ -11,6 +11,8 @@ ms.date: 03/24/2021 # Tutorial: Create schedule-based and recurring automation workflows with Azure Logic Apps +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This tutorial shows how to build an example [logic app](../logic-apps/logic-apps-overview.md) that automates a workflow that runs on a recurring schedule. Specifically, this example logic app checks the travel time, including the traffic, between two places and runs every weekday morning. If the time exceeds a specific limit, the logic app sends you an email that includes the travel time and the extra time necessary to arrive at your destination. The workflow includes various steps, which start with a schedule-based trigger followed by a Bing Maps action, a data operations action, a control flow action, and an email notification action. In this tutorial, you learn how to: diff --git a/articles/logic-apps/tutorial-process-email-attachments-workflow.md b/articles/logic-apps/tutorial-process-email-attachments-workflow.md index 2d860c8cc34e5..aec7585f67259 100644 --- a/articles/logic-apps/tutorial-process-email-attachments-workflow.md +++ b/articles/logic-apps/tutorial-process-email-attachments-workflow.md @@ -11,6 +11,8 @@ ms.date: 03/24/2021 # Tutorial: Automate tasks to process emails by using Azure Logic Apps, Azure Functions, and Azure Storage +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + Azure Logic Apps helps you automate workflows and integrate data across Azure services, Microsoft services, other software-as-a-service (SaaS) apps, and on-premises systems. This tutorial shows how you can build a [logic app](../logic-apps/logic-apps-overview.md) that handles incoming emails and any attachments. This logic app analyzes the email content, saves the content to Azure storage, and sends notifications for reviewing that content. In this tutorial, you learn how to: diff --git a/articles/logic-apps/tutorial-process-mailing-list-subscriptions-workflow.md b/articles/logic-apps/tutorial-process-mailing-list-subscriptions-workflow.md index eb60055554e23..9226f27a0b2a9 100644 --- a/articles/logic-apps/tutorial-process-mailing-list-subscriptions-workflow.md +++ b/articles/logic-apps/tutorial-process-mailing-list-subscriptions-workflow.md @@ -11,6 +11,8 @@ ms.date: 03/24/2021 # Tutorial: Create automated approval-based workflows by using Azure Logic Apps +[!INCLUDE [logic-apps-sku-consumption](../../includes/logic-apps-sku-consumption.md)] + This tutorial shows how to build an example [logic app](../logic-apps/logic-apps-overview.md) that automates an approval-based workflow. Specifically, this example logic app processes subscription requests for a mailing list that's managed by the [MailChimp](https://mailchimp.com/) service. This logic app includes various steps, which start by monitoring an email account for requests, sends these requests for approval, checks whether or not the request gets approval, adds approved members to the mailing list, and confirms whether or not new members get added to the list. In this tutorial, you learn how to: diff --git a/articles/logic-apps/workflow-definition-language-functions-reference.md b/articles/logic-apps/workflow-definition-language-functions-reference.md index 766eeaaacbd92..51bc2cf79adf0 100644 --- a/articles/logic-apps/workflow-definition-language-functions-reference.md +++ b/articles/logic-apps/workflow-definition-language-functions-reference.md @@ -1355,7 +1355,7 @@ convertFromUtc('', '', ''?) | Return value | Type | Description | | ------------ | ---- | ----------- | -| <*converted-timestamp*> | String | The timestamp converted to the target time zone | +| <*converted-timestamp*> | String | The timestamp converted to the target time zone without the timezone UTC offset. | |||| *Example 1* diff --git a/articles/machine-learning/.openpublishing.redirection.machine-learning.json b/articles/machine-learning/.openpublishing.redirection.machine-learning.json new file mode 100644 index 0000000000000..20cf2292bc8e6 --- /dev/null +++ b/articles/machine-learning/.openpublishing.redirection.machine-learning.json @@ -0,0 +1,3274 @@ +{ + "redirections": [ + { + "source_path_from_root": "/articles/machine-learning/tutorial-train-models-with-aml.md", + "redirect_url": "/azure/machine-learning/tutorial-train-deploy-notebook", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/concept-datastore.md", + "redirect_url": "/azure/machine-learning/concept-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-train-deploy-model-cli.md", + "redirect_url": "/azure/machine-learning/how-to-train-cli", + "redirect_document_id": "false" + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-setup-vscode-extension.md", + "redirect_url": "/azure/machine-learning/how-to-setup-vs-code", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-resource-manager-workspace.md", + "redirect_url": "/azure/machine-learning/how-to-create-workspace-template", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-pipeline-batch-scoring-classification.md", + "redirect_url": "/azure/machine-learning/tutorial-pipeline-python-sdk", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-deploy-models-with-aml.md", + "redirect_url": "/azure/machine-learning/tutorial-train-deploy-notebook", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-deploy-managed-endpoints-using-system-managed-identity.md", + "redirect_url": "/azure/machine-learning/how-to-access-resources-from-endpoints-managed-identities", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-1st-r-experiment.md", + "redirect_url": "https://github.com/Azure/azureml-sdk-for-r", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-1st-experiment-sdk-setup.md", + "redirect_url": "/azure/machine-learning/quickstart-create-resources", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/tutorial-1st-experiment-sdk-setup-local.md", + "redirect_url": "/azure/machine-learning/quickstart-create-resources", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-sql-data-warehouse.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-data-warehouse", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-spark.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-spark", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-hdinsight-hadoop.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-hdinsight-hadoop", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-azure-data-lake.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-azure-data-lake", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/walkthroughs-aml-with-tdsp.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/virtual-machines.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/track-progress.md", + "redirect_url": "/azure/architecture/data-science-process/track-progress", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/team-lead-tasks.md", + "redirect_url": "/azure/architecture/data-science-process/team-lead-tasks", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/team-data-science-process-project-templates.md", + "redirect_url": "/azure/architecture/data-science-process/team-data-science-process-project-templates", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/team-data-science-process-for-devops.md", + "redirect_url": "/azure/architecture/data-science-process/team-data-science-process-for-devops", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/team-data-science-process-for-data-scientists.md", + "redirect_url": "/azure/architecture/data-science-process/team-data-science-process-for-data-scientists", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sqldw-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/sqldw-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sql-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/sql-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sql-server-virtual-machine.md", + "redirect_url": "/azure/architecture/data-science-process/sql-server-virtual-machine", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/spark-overview.md", + "redirect_url": "/azure/architecture/data-science-process/spark-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/spark-model-consumption.md", + "redirect_url": "/azure/architecture/data-science-process/spark-model-consumption", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/spark-data-exploration-modeling.md", + "redirect_url": "/azure/architecture/data-science-process/spark-data-exploration-modeling", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/spark-advanced-data-exploration-modeling.md", + "redirect_url": "/azure/architecture/data-science-process/spark-advanced-data-exploration-modeling", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/select-features.md", + "redirect_url": "/azure/architecture/data-science-process/select-features", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/scala-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/scala-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sample-data.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sample-data-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sample-data-hive.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data-hive", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/sample-data-blob.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/roles-tasks.md", + "redirect_url": "/azure/architecture/data-science-process/roles-tasks", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/python-data-access.md", + "redirect_url": "/azure/architecture/data-science-process/python-data-access", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/project-lead-tasks.md", + "redirect_url": "/azure/architecture/data-science-process/project-lead-tasks", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/project-ic-tasks.md", + "redirect_url": "/azure/architecture/data-science-process/project-ic-tasks", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/project-execution.md", + "redirect_url": "/azure/architecture/data-science-process/agile-development", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/prepare-data.md", + "redirect_url": "/azure/architecture/data-science-process/prepare-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/predictive-maintenance-technical-guide.md", + "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-technical-guide", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/predictive-maintenance-playbook.md", + "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-playbook", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/predictive-maintenance-architecture.md", + "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-architecture", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/predict-twitter-sentiment.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/predict-twitter-sentiment-amltextpackage.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/platforms-and-tools.md", + "redirect_url": "/azure/architecture/data-science-process/platforms-and-tools", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/plan-your-environment.md", + "redirect_url": "/azure/architecture/data-science-process/plan-your-environment", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/plan-sample-scenarios.md", + "redirect_url": "/azure/architecture/data-science-process/plan-sample-scenarios", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/parallel-load-sql-partitioned-tables.md", + "redirect_url": "/azure/architecture/data-science-process/parallel-load-sql-partitioned-tables", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/overview.md", + "redirect_url": "/azure/architecture/data-science-process/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-sql-server-virtual-machine.md", + "redirect_url": "/azure/architecture/data-science-process/move-sql-server-virtual-machine", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-sql-azure.md", + "redirect_url": "/azure/architecture/data-science-process/move-sql-azure", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-sql-azure-adf.md", + "redirect_url": "/azure/architecture/data-science-process/move-sql-azure-adf", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-hive-tables.md", + "redirect_url": "/azure/architecture/data-science-process/move-hive-tables", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-ssis.md", + "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-ssis", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-python.md", + "redirect_url": "/azure/storage/blobs/storage-python-how-to-use-blob-storage", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-azure-storage-explorer.md", + "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-azure-storage-explorer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-data-to-azure-blob-using-azcopy.md", + "redirect_url": "/azure/storage/common/storage-use-azcopy-v10", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/move-azure-blob.md", + "redirect_url": "/azure/architecture/data-science-process/move-azure-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/lifecycle.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/lifecycle-modeling.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle-modeling", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/lifecycle-deployment.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle-deployment", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/lifecycle-data.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/lifecycle-business-understanding.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle-business-understanding", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/lifecycle-acceptance.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle-acceptance", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/isic-image-classification.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/ingest-data.md", + "redirect_url": "/azure/architecture/data-science-process/ingest-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/index.yml", + "redirect_url": "/azure/architecture/data-science-process/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/hive-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/hive-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/hive-criteo-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/hive-criteo-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/group-manager-tasks.md", + "redirect_url": "/azure/architecture/data-science-process/group-manager-tasks", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/explore-data.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/explore-data-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/explore-data-hive-tables.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-hive-tables", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/explore-data-blob.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/execute-data-science-tasks.md", + "redirect_url": "/azure/architecture/data-science-process/execute-data-science-tasks", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/environment-setup.md", + "redirect_url": "/azure/architecture/data-science-process/environment-setup", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/deploy-models-in-production.md", + "redirect_url": "/azure/architecture/data-science-process/deploy-models-in-production", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/data-lake-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/data-lake-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/data-blob.md", + "redirect_url": "/azure/architecture/data-science-process/data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/create-features.md", + "redirect_url": "/azure/architecture/data-science-process/create-features", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/create-features-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/create-features-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/create-features-hive.md", + "redirect_url": "/azure/architecture/data-science-process/create-features-hive", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/create-features-blob.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-intelligence-appsource-publishing-guide.md", + "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-intelligence-appsource-evaluation-tool.md", + "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-technical-guide-predictive-maintenance.md", + "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-technical-guide", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-vehicle-telemetry.md", + "redirect_url": "https://gallery.azure.ai/browse?s=vehicle%20telemetry%20analytics", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-vehicle-telemetry-powerbi.md", + "redirect_url": "https://gallery.azure.ai/browse?s=vehicle%20telemetry%20analytics", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-vehicle-telemetry-deep-dive.md", + "redirect_url": "https://gallery.azure.ai/browse?s=vehicle%20telemetry%20analytics", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenance.md", + "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-playbook", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/cortana-analytics-architecture-predictive-maintenance.md", + "redirect_url": "/azure/architecture/data-science-process/predictive-maintenance-architecture", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/context/ml-context.yml", + "redirect_url": "/azure/architecture/data-science-process/context/ml-context", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/collaborative-coding-with-git.md", + "redirect_url": "/azure/architecture/data-science-process/collaborative-coding-with-git", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/code-test.md", + "redirect_url": "/azure/architecture/data-science-process/code-test", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/ci-cd-flask.md", + "redirect_url": "/azure/architecture/data-science-process/ci-cd-flask", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/automated-data-pipeline-cheat-sheet.md", + "redirect_url": "/azure/architecture/data-science-process/automated-data-pipeline-cheat-sheet", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/apps-anomaly-detection-api.md", + "redirect_url": "/azure/architecture/data-science-process/apps-anomaly-detection-api", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/team-data-science-process/agile-development.md", + "redirect_url": "/azure/architecture/data-science-process/agile-development", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/whats-new.md", + "redirect_url": "https://azure.microsoft.com/updates/?product=machine-learning-studio", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/what-is-ml-studio.md", + "redirect_url": "/azure/machine-learning/overview-what-is-machine-learning-studio#ml-studio-classic-vs-azure-machine-learning-studio", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/what-is-machine-learning.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/web-services-that-use-import-export-modules.md", + "redirect_url": "/azure/machine-learning/classic/web-services-that-use-import-export-modules", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/web-services-logging.md", + "redirect_url": "/azure/machine-learning/classic/web-services-logging", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/web-service-parameters.md", + "redirect_url": "/azure/machine-learning/classic/web-service-parameters", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/web-service-error-codes.md", + "redirect_url": "/azure/machine-learning/classic/web-service-error-codes", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-develop-predictive-solution.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-6-access-web-service.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-5-publish-web-service.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-4-train-and-evaluate-models.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part2-credit-risk-train", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-3-create-new-experiment.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-2-upload-data.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/walkthrough-1-create-ml-workspace.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/version-control.md", + "redirect_url": "/azure/machine-learning/classic/version-control", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/use-sample-datasets.md", + "redirect_url": "/azure/machine-learning/classic/use-sample-datasets", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/use-data-from-an-on-premises-sql-server.md", + "redirect_url": "/azure/machine-learning/classic/use-data-from-an-on-premises-sql-server", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/use-case-excel-studio.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/tutorial-part3-credit-risk-deploy.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/tutorial-part2-credit-risk-train.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part2-credit-risk-train", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/tutorial-part1-credit-risk.md", + "redirect_url": "/azure/machine-learning/classic/tutorial-part1-credit-risk", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/troubleshooting-retraining-models.md", + "redirect_url": "/azure/machine-learning/classic/retrain-classic-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/troubleshooting-creating-ml-workspace.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/text-analytics-module-tutorial.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/support-aml-studio.md", + "redirect_url": "/azure/machine-learning/classic/support-aml-studio", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/studio-classic-overview.md", + "redirect_url": "/azure/machine-learning/classic/studio-classic-overview", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/sample-experiments.md", + "redirect_url": "/azure/machine-learning/classic/sample-experiments", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/retrain-new-web-service-using-powershell.md", + "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/retrain-models-programmatically.md", + "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/retrain-machine-learning-model.md", + "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/retrain-existing-resource-manager-based-web-service.md", + "redirect_url": "/azure/machine-learning/classic/retrain-machine-learning-model", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/retrain-classic-web-service.md", + "redirect_url": "/azure/machine-learning/classic/retrain-classic-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/r-get-started.md", + "redirect_url": "/azure/machine-learning/classic/r-get-started", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/powershell-module.md", + "redirect_url": "/azure/machine-learning/classic/powershell-module", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/model-progression-experiment-to-web-service.md", + "redirect_url": "/azure/machine-learning/classic/model-progression-experiment-to-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/manage-workspace.md", + "redirect_url": "/azure/machine-learning/classic/manage-workspace", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/manage-web-service-endpoints-using-api-management.md", + "redirect_url": "/azure/machine-learning/classic/manage-web-service-endpoints-using-api-management", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/manage-new-webservice.md", + "redirect_url": "/azure/machine-learning/classic/manage-new-webservice", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/manage-experiment-iterations.md", + "redirect_url": "/azure/machine-learning/classic/manage-experiment-iterations", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/live-chat.md", + "redirect_url": "https://social.msdn.microsoft.com/Forums/home?forum=MachineLearning", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/linear-regression-in-azure.md", + "redirect_url": "/azure/machine-learning/classic/use-case-excel-studio", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/interpret-model-results.md", + "redirect_url": "/azure/machine-learning/classic/interpret-model-results", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/index.yml", + "redirect_url": "/azure/machine-learning/classic/index", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/import-data.md", + "redirect_url": "/azure/machine-learning/classic/import-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/gallery-how-to-use-contribute-publish.md", + "redirect_url": "/azure/machine-learning/classic/gallery-how-to-use-contribute-publish", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/faq.md", + "redirect_url": "/azure/machine-learning/classic/what-is-ml-studio", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/extend-your-experiment-with-r.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/export-delete-personal-data-dsr.md", + "redirect_url": "/azure/machine-learning/classic/export-delete-personal-data-dsr", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/execute-python-scripts.md", + "redirect_url": "/azure/machine-learning/classic/execute-python-scripts", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/excel-add-in-for-web-services.md", + "redirect_url": "/azure/machine-learning/classic/excel-add-in-for-web-services", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/evaluate-model-performance.md", + "redirect_url": "/azure/machine-learning/classic/evaluate-model-performance", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/deploy-with-resource-manager-template.md", + "redirect_url": "/azure/machine-learning/classic/deploy-with-resource-manager-template", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/deploy-consume-web-service-guide.md", + "redirect_url": "/azure/machine-learning/classic/deploy-consume-web-service-guide", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/deploy-a-machine-learning-web-service.md", + "redirect_url": "/azure/machine-learning/classic/deploy-a-machine-learning-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/debug-models.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/datamarket-deprecation.md", + "redirect_url": "https://azure.microsoft.com/services/cognitive-services/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-the-5-questions-data-science-answers.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-predict-an-answer-with-a-simple-model.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-is-your-data-ready-for-data-science.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-copy-other-peoples-work-to-do-data-science.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/data-science-for-beginners-ask-a-question-you-can-answer-with-data.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/custom-r-modules.md", + "redirect_url": "/azure/machine-learning/classic/custom-r-modules", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/create-workspace.md", + "redirect_url": "/azure/machine-learning/classic/create-workspace", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/create-models-and-endpoints-with-powershell.md", + "redirect_url": "/azure/machine-learning/classic/create-models-and-endpoints-with-powershell", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/create-experiment.md", + "redirect_url": "/azure/machine-learning/classic/create-experiment", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/create-endpoint.md", + "redirect_url": "/azure/machine-learning/classic/create-endpoint", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/convert-training-experiment-to-scoring-experiment.md", + "redirect_url": "/azure/machine-learning/classic/deploy-a-machine-learning-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/consuming-from-excel.md", + "redirect_url": "/azure/machine-learning/classic/consuming-from-excel", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/consume-web-services.md", + "redirect_url": "/azure/machine-learning/classic/consume-web-services", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/consume-web-service-with-web-app-template.md", + "redirect_url": "/azure/machine-learning/classic/consume-web-services", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/basics-infographic-with-algorithm-examples.md", + "redirect_url": "/azure/machine-learning/how-to-select-algorithms", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/azure-ml-netsharp-reference-guide.md", + "redirect_url": "/azure/machine-learning/classic/azure-ml-netsharp-reference-guide", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/azure-ml-customer-churn-scenario.md", + "redirect_url": "/azure/machine-learning/classic/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/studio/algorithm-parameters-optimize.md", + "redirect_url": "/azure/machine-learning/classic/algorithm-parameters-optimize", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/algorithm-choice.md", + "redirect_url": "/azure/machine-learning/how-to-select-algorithms", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/algorithm-cheat-sheet.md", + "redirect_url": "/azure/machine-learning/algorithm-cheat-sheet", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/studio/ai-gallery-control-personal-data-dsr.md", + "redirect_url": "/azure/machine-learning/classic/ai-gallery-control-personal-data-dsr", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/services.md", + "redirect_url": "/azure/machine-learning/index", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/ui-tutorial-automobile-price-train-score.md", + "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-train-score", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/ui-tutorial-automobile-price-deploy.md", + "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-deploy", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/ui-quickstart-run-experiment.md", + "redirect_url": "tutorial-designer-automobile-price-train-score", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/ui-concept-visual-interface.md", + "redirect_url": "/azure/machine-learning/concept-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-train-models-with-aml.md", + "redirect_url": "/azure/machine-learning/tutorial-train-models-with-aml", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-train-deploy-model-cli.md", + "redirect_url": "/azure/machine-learning/how-to-train-cli", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-pipeline-batch-scoring-classification.md", + "redirect_url": "/azure/machine-learning/tutorial-pipeline-batch-scoring-classification", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-labeling.md", + "redirect_url": "/azure/machine-learning/how-to-create-image-labeling-projects", + "redirect_document_id": "false" + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-first-experiment-automated-ml.md", + "redirect_url": "/azure/machine-learning/tutorial-first-experiment-automated-ml", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-designer-automobile-price-train-score.md", + "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-train-score", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-designer-automobile-price-deploy.md", + "redirect_url": "/azure/machine-learning/tutorial-designer-automobile-price-deploy", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-deploy-models-with-aml.md", + "redirect_url": "/azure/machine-learning/tutorial-deploy-models-with-aml", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-data-prep.md", + "redirect_url": "/azure/machine-learning/tutorial-auto-train-models", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-auto-train-models.md", + "redirect_url": "/azure/machine-learning/tutorial-auto-train-models", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-1st-r-experiment.md", + "redirect_url": "https://github.com/Azure/azureml-sdk-for-r", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-1st-experiment-sdk-train.md", + "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-train", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/tutorial-1st-experiment-sdk-setup.md", + "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-setup", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/support-for-aml-services.md", + "redirect_url": "https://social.msdn.microsoft.com/Forums/home?forum=AzureMachineLearningService", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/setup-create-workspace.md", + "redirect_url": "/azure/machine-learning/how-to-manage-workspace", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/samples-notebooks.md", + "redirect_url": "/azure/machine-learning/samples-notebooks", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/resource-known-issues.md", + "redirect_url": "/azure/machine-learning/how-to-debug-visual-studio-code", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/reference-azure-machine-learning-cli.md", + "redirect_url": "/azure/machine-learning/reference-azure-machine-learning-cli", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/quickstart-run-local-notebook.md", + "redirect_url": "/azure/machine-learning/how-to-configure-environment#local", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/quickstart-run-cloud-notebook.md", + "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-setup", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/quickstart-get-started.md", + "redirect_url": "/azure/machine-learning/tutorial-1st-experiment-sdk-setup", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/quickstart-get-started-with-cli.md", + "redirect_url": "/azure/machine-learning/reference-azure-machine-learning-cli", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/quickstart-create-workspace-with-python.md", + "redirect_url": "/azure/machine-learning/how-to-configure-environment#local", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/publish-a-machine-learning-web-service.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/overview-what-is-machine-learning.md", + "redirect_url": "/azure/machine-learning/overview-what-is-machine-learning", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/overview-what-happened-to-workbench.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/overview-more-machine-learning.md", + "redirect_url": "/azure/architecture/data-guide/technology-choices/data-science-and-machine-learning", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/monitor-resource-reference.md", + "redirect_url": "/azure/machine-learning/monitor-resource-reference", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/monitor-azure-machine-learning.md", + "redirect_url": "/azure/machine-learning/monitor-azure-machine-learning", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/machine-learning-interpretability-explainability.md", + "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/index.yml", + "redirect_url": "/azure/machine-learning/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-write-data.md", + "redirect_url": "/azure/machine-learning/how-to-create-register-datasets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-vscode-train-deploy.md", + "redirect_url": "/azure/machine-learning/tutorial-setup-vscode-extension", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-vscode-tools.md", + "redirect_url": "/azure/machine-learning/tutorial-setup-vscode-extension", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-version-track-datasets.md", + "redirect_url": "/azure/machine-learning/how-to-version-track-datasets", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-use-secrets-in-runs.md", + "redirect_url": "/azure/machine-learning/how-to-use-secrets-in-runs", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-use-mlflow.md", + "redirect_url": "/azure/machine-learning/how-to-use-mlflow", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-use-event-grid.md", + "redirect_url": "/azure/machine-learning/how-to-use-event-grid", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-use-environments.md", + "redirect_url": "/azure/machine-learning/how-to-use-environments", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-understand-automated-ml.md", + "redirect_url": "/azure/machine-learning/how-to-understand-automated-ml", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-understand-accuracy-metrics.md", + "redirect_url": "/azure/machine-learning/how-to-understand-automated-ml", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-text-classification.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-regression-predict-automobile-price-compare-algorithms.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-regression-predict-automobile-price-basic.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-flight-delay.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-credit-risk-cost-sensitive.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-credit-risk-basic.md", + "redirect_url": "/azure/machine-learning/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-ui-sample-classification-predict-churn.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-tune-hyperparameters.md", + "redirect_url": "/azure/machine-learning/how-to-tune-hyperparameters", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-troubleshoot-deployment.md", + "redirect_url": "/azure/machine-learning/how-to-troubleshoot-deployment", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-transform-data.md", + "redirect_url": "/azure/machine-learning/how-to-create-register-datasets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-with-datasets.md", + "redirect_url": "/azure/machine-learning/how-to-train-with-datasets", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-tensorflow.md", + "redirect_url": "/azure/machine-learning/how-to-train-tensorflow", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-scikit-learn.md", + "redirect_url": "/azure/machine-learning/how-to-train-scikit-learn", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-pytorch.md", + "redirect_url": "/azure/machine-learning/how-to-train-pytorch", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-ml-models.md", + "redirect_url": "/azure/machine-learning/how-to-train-ml-models", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-keras.md", + "redirect_url": "/azure/machine-learning/how-to-train-keras", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-train-chainer.md", + "redirect_url": "/azure/machine-learning/how-to-train-chainer", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-track-experiments.md", + "redirect_url": "/azure/machine-learning/how-to-track-experiments", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-setup-authentication.md", + "redirect_url": "/azure/machine-learning/how-to-setup-authentication", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-set-up-training-targets.md", + "redirect_url": "/azure/machine-learning/how-to-set-up-training-targets", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-secure-web-service.md", + "redirect_url": "/azure/machine-learning/how-to-secure-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-schedule-pipelines.md", + "redirect_url": "/azure/machine-learning/how-to-schedule-pipelines", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-save-write-experiment-files.md", + "redirect_url": "/azure/machine-learning/how-to-save-write-experiment-files", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-run-batch-predictions.md", + "redirect_url": "/azure/machine-learning/how-to-run-batch-predictions", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-run-batch-predictions-designer.md", + "redirect_url": "/azure/machine-learning/how-to-run-batch-predictions-designer", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-retrain-designer.md", + "redirect_url": "/azure/machine-learning/how-to-retrain-designer", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-monitor-tensorboard.md", + "redirect_url": "/azure/machine-learning/how-to-monitor-tensorboard", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-monitor-datasets.md", + "redirect_url": "/azure/machine-learning/how-to-monitor-datasets", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-monitor-data-drift.md", + "redirect_url": "/azure/machine-learning/how-to-monitor-data-drift", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-migrate.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-manage-workspace.md", + "redirect_url": "/azure/machine-learning/how-to-manage-workspace", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-manage-workspace-cli.md", + "redirect_url": "/azure/machine-learning/how-to-manage-workspace-cli", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-manage-runs.md", + "redirect_url": "/azure/machine-learning/how-to-manage-runs", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-manage-quotas.md", + "redirect_url": "/azure/machine-learning/how-to-manage-quotas", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-manage-dataset-definitions.md", + "redirect_url": "/python/api/azureml-core/azureml.core.dataset", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-machine-learning-interpretability.md", + "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-machine-learning-interpretability-automl.md", + "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability-automl", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-machine-learning-interpretability-aml.md", + "redirect_url": "/azure/machine-learning/how-to-machine-learning-interpretability-aml", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-label-images.md", + "redirect_url": "/azure/machine-learning/how-to-label-images", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-improve-accuracy-for-computer-vision-models.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-export-delete-data.md", + "redirect_url": "/azure/machine-learning/how-to-export-delete-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-explore-prepare-data.md", + "redirect_url": "/python/api/azureml-core/azureml.core.dataset.dataset", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-enable-virtual-network.md", + "redirect_url": "/azure/machine-learning/how-to-enable-virtual-network", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-enable-logging.md", + "redirect_url": "/azure/machine-learning/how-to-enable-logging", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-enable-data-collection.md", + "redirect_url": "/azure/machine-learning/how-to-enable-data-collection", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-enable-app-insights.md", + "redirect_url": "/azure/machine-learning/how-to-enable-app-insights", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-text-classification.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-regression-automobile-price-compare-algorithms.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-regression-automobile-price-basic.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-predict-income.md", + "redirect_url": "/azure/machine-learning/how-to-designer-sample-classification-predict-income", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-flight-delay.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-credit-risk-cost-sensitive.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-designer-sample-classification-churn.md", + "redirect_url": "/azure/machine-learning/how-to-designer-sample-classification-churn", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-to-iot.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-to-aks.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-to-aci.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-local-container-notebook-vm.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-local-container-notebook-vm", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-inferencing-gpus.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-inferencing-gpus", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-fpga-web-service.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-fpga-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-existing-model.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-custom-docker-image.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-custom-docker-image", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-azure-kubernetes-service.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-azure-kubernetes-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-azure-container-instance.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-azure-container-instance", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-deploy-and-where.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-define-task-type.md", + "redirect_url": "/azure/machine-learning/how-to-define-task-type", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-debug-pipelines.md", + "redirect_url": "/azure/machine-learning/how-to-debug-pipelines", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-debug-batch-predictions.md", + "redirect_url": "/azure/machine-learning/how-to-debug-batch-predictions", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-data-prep.md", + "redirect_url": "/python/api/overview/azure/dataprep/intro?view=azure-dataprep-py", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-create-your-first-pipeline.md", + "redirect_url": "/azure/machine-learning/how-to-create-your-first-pipeline", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-create-workspace-template.md", + "redirect_url": "/azure/machine-learning/how-to-create-workspace-template", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-create-register-datasets.md", + "redirect_url": "/azure/machine-learning/how-to-create-register-datasets", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-create-portal-experiments.md", + "redirect_url": "/azure/machine-learning/how-to-use-automated-ml-for-ml-models", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-create-labeling-projects.md", + "redirect_url": "/azure/machine-learning/how-to-create-labeling-projects", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-create-dataset-snapshots.md", + "redirect_url": "/python/api/azureml-core/azureml.data.dataset_snapshot.datasetsnapshot", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-consume-web-service.md", + "redirect_url": "/azure/machine-learning/how-to-consume-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-configure-environment.md", + "redirect_url": "/azure/machine-learning/how-to-configure-environment", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-configure-auto-train.md", + "redirect_url": "/azure/machine-learning/how-to-configure-auto-train", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-choose-a-dev-environment.md", + "redirect_url": "/azure/machine-learning/how-to-configure-environment", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-change-storage-access-key.md", + "redirect_url": "/azure/machine-learning/how-to-change-storage-access-key", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-text-classification-models.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-onnx.md", + "redirect_url": "/azure/machine-learning/concept-onnx", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-object-detection-models.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-image-similarity-models.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-build-deploy-image-classification-models.md", + "redirect_url": "/azure/machine-learning/overview-what-happened-to-workbench", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-automated-ml.md", + "redirect_url": "/azure/machine-learning/concept-automated-ml", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-auto-train-remote.md", + "redirect_url": "/azure/machine-learning/how-to-auto-train-remote", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-auto-train-forecast.md", + "redirect_url": "/azure/machine-learning/how-to-auto-train-forecast", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-assign-roles.md", + "redirect_url": "/azure/machine-learning/how-to-assign-roles", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/how-to-access-data.md", + "redirect_url": "/azure/machine-learning/how-to-access-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-workspace.md", + "redirect_url": "/azure/machine-learning/concept-workspace", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-train-model-git-integration.md", + "redirect_url": "/azure/machine-learning/concept-train-model-git-integration", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-train-machine-learning-model.md", + "redirect_url": "/azure/machine-learning/concept-train-machine-learning-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-onnx.md", + "redirect_url": "/azure/machine-learning/concept-onnx", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-model-management-and-deployment.md", + "redirect_url": "/azure/machine-learning/concept-model-management-and-deployment", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-ml-pipelines.md", + "redirect_url": "/azure/machine-learning/concept-ml-pipelines", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-event-grid-integration.md", + "redirect_url": "/azure/machine-learning/concept-event-grid-integration", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-enterprise-security.md", + "redirect_url": "/azure/machine-learning/concept-enterprise-security", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-designer.md", + "redirect_url": "/azure/machine-learning/concept-designer", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-deep-learning-vs-machine-learning.md", + "redirect_url": "/azure/machine-learning/concept-deep-learning-vs-machine-learning", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-data.md", + "redirect_url": "/azure/machine-learning/concept-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-data-drift.md", + "redirect_url": "/azure/machine-learning/how-to-monitor-data-drift", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-compute-target.md", + "redirect_url": "/azure/machine-learning/concept-compute-target", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-compute-instance.md", + "redirect_url": "/azure/machine-learning/concept-compute-instance", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-azure-machine-learning-architecture.md", + "redirect_url": "/azure/machine-learning/concept-azure-machine-learning-architecture", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-automated-ml.md", + "redirect_url": "/azure/machine-learning/concept-automated-ml", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-accelerate-with-fpgas.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-fpga-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/concept-accelerate-inferencing-with-gpus.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-inferencing-gpus", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/service/azure-machine-learning-release-notes.md", + "redirect_url": "/azure/machine-learning/azure-machine-learning-release-notes", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/sample-designer-datasets.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/resource-known-issues.md", + "redirect_url": "/azure/machine-learning/how-to-configure-auto-train#troubleshooting", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-job-component.md", + "redirect_url": "reference-yaml-job-command", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-endpoint-managed-batch.md", + "redirect_url": "/azure/machine-learning/reference-yaml-endpoint-batch.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-endpoint-k8s-online.md", + "redirect_url": "/azure/machine-learning/reference-yaml-overview.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-deployment-managed-batch.md", + "redirect_url": "/azure/machine-learning/reference-yaml-deployment-batch.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-deployment-k8s-online.md", + "redirect_url": "/azure/machine-learning/reference-yaml-overview.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-dataset.md", + "redirect_url": "reference-yaml-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/reference-yaml-compute.md", + "redirect_url": "/azure/machine-learning/reference-yaml-compute-aml.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-pipeline-yaml.md", + "redirect_url": "/azure/machine-learning/v1/reference-pipeline-yaml", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/reference-online-endpoint-yaml.md", + "redirect_url": "reference-yaml-endpoint-online", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/reference-azure-machine-learning-cli.md", + "redirect_url": "/azure/machine-learning/v1/reference-azure-machine-learning-cli", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/r-developers-guide.md", + "redirect_url": "/azure/architecture/data-guide/technology-choices/r-developers-guide", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/overview-what-is-azure-ml.md", + "redirect_url": "/azure/machine-learning/overview-what-is-azure-machine-learning", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-webservice-deploy-a-web-service.md", + "redirect_url": "machine-learning-publish-a-machine-learning-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-sample-application.md", + "redirect_url": "machine-learning-datamarket-deprecation", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-quick-start-guide.md", + "redirect_url": "machine-learning-datamarket-deprecation", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-javascript-integration.md", + "redirect_url": "machine-learning-datamarket-deprecation", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-faq.md", + "redirect_url": "machine-learning-datamarket-deprecation", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-recommendation-api-documentation.md", + "redirect_url": "machine-learning-datamarket-deprecation", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-web-service-examples.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-survival-analysis.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-retail-demand-forecasting.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-normal-distribution.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-multivariate-linear-regression.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-lexicon-based-sentiment-analysis.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-forecasting-exponential-smoothing.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-difference-in-two-proportions.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-cluster-model.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-binomial-distribution.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-binary-classifier.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-r-csharp-arima.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-python-data-access.md", + "redirect_url": "/azure/architecture/data-science-process/python-data-access", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-publish-web-service-to-azure-marketplace.md", + "redirect_url": "machine-learning-gallery-experiments", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-marketplace-faq.md", + "redirect_url": "https://gallery.cortanaintelligence.com/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-feature-selection-and-engineering.md", + "redirect_url": "machine-learning-data-science-create-features", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-faq.md", + "redirect_url": "/azure/machine-learning/classic/faq", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-vm-do-ten-things.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/vm-do-ten-things", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-virtual-machine-overview.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-the-cortana-analytics-process.md", + "redirect_url": "data-science-process-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-overview.md", + "redirect_url": "/azure/architecture/data-science-process/spark-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-model-consumption.md", + "redirect_url": "/azure/architecture/data-science-process/spark-model-consumption", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-data-exploration-modeling.md", + "redirect_url": "/azure/architecture/data-science-process/spark-data-exploration-modeling", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-spark-advanced-data-exploration-modeling.md", + "redirect_url": "/azure/architecture/data-science-process/spark-advanced-data-exploration-modeling", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-setup-virtual-machine.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-setup-sql-server-virtual-machine.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-select-features.md", + "redirect_url": "/azure/architecture/data-science-process/select-features", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data-hive.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data-hive", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-sample-data-blob.md", + "redirect_url": "/azure/architecture/data-science-process/sample-data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-provision-vm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/provision-vm", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-sqldw-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/sqldw-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-sql-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/sql-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-sql-server-virtual-machine.md", + "redirect_url": "/azure/architecture/data-science-process/sql-server-virtual-machine", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-scala-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/scala-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-hive-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/hive-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-hive-criteo-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/hive-criteo-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-data-lake-walkthrough.md", + "redirect_url": "/azure/architecture/data-science-process/data-lake-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-process-data-blob.md", + "redirect_url": "/azure/architecture/data-science-process/data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-prepare-data.md", + "redirect_url": "/azure/architecture/data-science-process/prepare-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-plan-your-environment.md", + "redirect_url": "/azure/architecture/data-science-process/plan-your-environment", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-plan-sample-scenarios.md", + "redirect_url": "/azure/architecture/data-science-process/plan-sample-scenarios", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-parallel-load-sql-partitioned-tables.md", + "redirect_url": "/azure/architecture/data-science-process/parallel-load-sql-partitioned-tables", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-sql-server-virtual-machine.md", + "redirect_url": "/azure/architecture/data-science-process/move-sql-server-virtual-machine", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-sql-azure.md", + "redirect_url": "/azure/architecture/data-science-process/move-sql-azure", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-sql-azure-adf.md", + "redirect_url": "/azure/architecture/data-science-process/move-sql-azure-adf", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-hive-tables.md", + "redirect_url": "/azure/architecture/data-science-process/move-hive-tables", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-ssis.md", + "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-ssis", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-python.md", + "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-python", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-azure-storage-explorer.md", + "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-azure-storage-explorer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-data-to-azure-blob-using-azcopy.md", + "redirect_url": "/azure/architecture/data-science-process/move-data-to-azure-blob-using-azcopy", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-move-azure-blob.md", + "redirect_url": "/azure/architecture/data-science-process/move-azure-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-linux-dsvm-walkthrough.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/linux-dsvm-walkthrough", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-linux-dsvm-intro.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/linux-dsvm-intro", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-ingest-data.md", + "redirect_url": "/azure/architecture/data-science-process/ingest-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data-hive-tables.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-hive-tables", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-explore-data-blob.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-environment-setup.md", + "redirect_url": "/azure/architecture/data-science-process/environment-setup", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-dsvm-ubuntu-intro.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features.md", + "redirect_url": "/azure/architecture/data-science-process/create-features", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/create-features-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features-hive.md", + "redirect_url": "/azure/architecture/data-science-process/create-features-hive", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-data-science-create-features-blob.md", + "redirect_url": "/azure/architecture/data-science-process/explore-data-blob", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-connect-to-azure-machine-learning-web-service.md", + "redirect_url": "machine-learning-consume-web-services", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-automated-data-pipeline-cheat-sheet.md", + "redirect_url": "/azure/architecture/data-science-process/automated-data-pipeline-cheat-sheet", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-apps-text-analytics.md", + "redirect_url": "/azure/cognitive-services/cognitive-services-text-analytics-quick-start", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-apps-anomaly-detection.md", + "redirect_url": "machine-learning-apps-anomaly-detection-api", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-apps-anomaly-detection-api.md", + "redirect_url": "/azure/architecture/data-science-process/apps-anomaly-detection-api", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-algorithm-choice.md", + "redirect_url": "/azure/machine-learning/how-to-select-algorithms", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/machine-learning-algorithm-cheat-sheet.md", + "redirect_url": "/azure/machine-learning/algorithm-cheat-sheet", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-use-parallel-run-step.md", + "redirect_url": "/azure/machine-learning/tutorial-pipeline-batch-scoring-classification", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-use-mlflow.md", + "redirect_url": "/azure/machine-learning/v1/how-to-use-mlflow", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-ui-sample-regression-predict-automobile-price-basic.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-ui-sample-classification-predict-flight-delay.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-troubleshoot-managed-online-endpoints.md", + "redirect_url": "/azure/machine-learning/how-to-troubleshoot-online-endpoints", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-train-ml-models.md", + "redirect_url": "/azure/machine-learning/how-to-set-up-training-targets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-train-chainer.md", + "redirect_url": "/azure/machine-learning/how-to-train-ml-models", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-track-experiments.md", + "redirect_url": "/azure/machine-learning/how-to-log-view-metrics", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-search-cross-workspace.md", + "redirect_url": "/azure/machine-learning/how-to-manage-workspace", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-schedule-pipelines.md", + "redirect_url": "/azure/machine-learning/how-to-trigger-published-pipeline", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-run-batch-predictions.md", + "redirect_url": "/azure/machine-learning/how-to-use-parallel-run-step", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-monitor-view-training-logs.md", + "redirect_url": "/azure/machine-learning/how-to-log-view-metrics", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-monitor-data-drift.md", + "redirect_url": "/azure/machine-learning/how-to-enable-data-collection", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-manage-runs.md", + "redirect_url": "/azure/machine-learning/how-to-track-monitor-analyze-runs", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-label-images.md", + "redirect_url": "/azure/machine-learning/how-to-label-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-kubernetes-instance-type.md", + "redirect_url": "/azure/machine-learning/how-to-attach-kubernetes-anywhere", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-homomorphic-encryption-seal.md", + "redirect_url": "/azure/machine-learning/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-enable-virtual-network.md", + "redirect_url": "/azure/machine-learning/how-to-network-security-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-enable-logging.md", + "redirect_url": "/azure/machine-learning/how-to-track-experiments", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-text-classification.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-regression-automobile-price-compare-algorithms.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-regression-automobile-price-basic.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-predict-income.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-flight-delay.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-credit-risk-cost-sensitive.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-designer-sample-classification-churn.md", + "redirect_url": "/azure/machine-learning/samples-designer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-profile-model.md", + "redirect_url": "/azure/machine-learning/v1/how-to-deploy-profile-model", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-no-code-deployment.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-mlflow-models-online-endpoints", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-mlflow-models.md", + "redirect_url": "/azure/machine-learning/v1/how-to-deploy-mlflow-models", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-functions.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-managed-online-endpoints", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-existing-model.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-and-where", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-custom-docker-image.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-custom-container", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-continuously-deploy.md", + "redirect_url": "/azure/machine-learning/how-to-safely-rollout-managed-endpoints", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-azure-kubernetes-service.md", + "redirect_url": "/azure/machine-learning/v1/how-to-deploy-azure-kubernetes-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-azure-container-instance.md", + "redirect_url": "/azure/machine-learning/v1/how-to-deploy-azure-container-instance", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-deploy-app-service.md", + "redirect_url": "/azure/machine-learning/how-to-deploy-managed-online-endpoints", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-define-task-type.md", + "redirect_url": "/azure/machine-learning/concept-automated-ml", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-debug-pipelines-application-insights.md", + "redirect_url": "/azure/machine-learning/how-to-log-pipelines-application-insights", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-debug-batch-predictions.md", + "redirect_url": "/azure/machine-learning/how-to-debug-parallel-run-step", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-create-your-first-pipeline.md", + "redirect_url": "/azure/machine-learning/how-to-create-machine-learning-pipelines", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-create-register-datasets.md", + "redirect_url": "/azure/machine-learning/how-to-create-register-data-assets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-create-labeling-projects.md", + "redirect_url": "/azure/machine-learning/how-to-create-image-labeling-projects", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-create-attach-compute-sdk.md", + "redirect_url": "/azure/machine-learning/how-to-attach-compute-targets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-compute-cluster-instance-os-upgrade.md", + "redirect_url": "/azure/machine-learning/concept-vulnerability-management", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-auto-train-remote.md", + "redirect_url": "/azure/machine-learning/concept-automated-ml#local-remote", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-attach-compute-targets.md", + "redirect_url": "/azure/machine-learning/v1/how-to-attach-compute-targets", + " redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-attach-arc-kubernetes.md", + "redirect_url": "/azure/machine-learning/how-to-attach-kubernetes-anywhere", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/how-to-access-data.md", + "redirect_url": "/azure/machine-learning/how-to-datastore", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/designer-sample-datasets.md", + "redirect_url": "/azure/machine-learning/sample-designer-datasets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/use-geo-ai-dsvm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/use-deep-learning-dsvm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/linux-dsvm-walkthrough#deep-learning", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/setup-virtual-machine.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/setup-sql-server-virtual-machine.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/reference-windows-vm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/tools-included", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/reference-deprecation.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/reference-centos-vm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/provision-geo-ai-dsvm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/provision-deep-learning-dsvm.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/linux-dsvm-intro.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/geo-ai-dsvm-overview.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/release-notes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-tools-overview.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/tools-included", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-tools-explore-and-visualize.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-productivity", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-ml-data-science-tools.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-data-science", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-languages.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-languages", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-deep-learning-ai-frameworks.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-deep-learning-frameworks", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/dsvm-data-platforms.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/dsvm-tools-data-platforms", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-virtual-machine/deep-learning-dsvm-overview.md", + "redirect_url": "/azure/machine-learning/data-science-virtual-machine/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-sql-server.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-sql-data-warehouse.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-sql-data-warehouse", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-spark.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-spark", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-hdinsight-hadoop.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-hdinsight-hadoop", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-walkthroughs-azure-data-lake.md", + "redirect_url": "/azure/architecture/data-science-process/walkthroughs-azure-data-lake", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-overview.md", + "redirect_url": "/azure/architecture/data-science-process/overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/data-science-process-lifecycle.md", + "redirect_url": "/azure/architecture/data-science-process/lifecycle", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-intelligence-appsource-publishing-guide.md", + "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-intelligence-appsource-evaluation-tool.md", + "redirect_url": "https://azure.microsoft.com/overview/ai-platform/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-analytics-technical-guide-predictive-maintenance.md", + "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-technical-guide-predictive-maintenance", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-vehicle-telemetry.md", + "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-vehicle-telemetry", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-vehicle-telemetry-powerbi.md", + "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-vehicle-telemetry-powerbi", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-vehicle-telemetry-deep-dive.md", + "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-vehicle-telemetry-deep-dive", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-analytics-playbook-predictive-maintenance.md", + "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-playbook-predictive-maintenance", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/cortana-analytics-architecture-predictive-maintenance.md", + "redirect_url": "/azure/architecture/data-science-process/cortana-analytics-architecture-predictive-maintenance", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/concept-pipeline-practices-tips.md", + "redirect_url": "/azure/machine-learning/how-to-debug-pipelines", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/concept-managed-endpoints.md", + "redirect_url": "/azure/machine-learning/concept-endpoints", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/concept-event-grid-integration.md", + "redirect_url": "/azure/machine-learning/how-to-use-event-grid", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/concept-editions.md", + "redirect_url": "/azure/machine-learning/concept-workspace#wheres-enterprise", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/concept-azure-machine-learning-architecture.md", + "redirect_url": "/azure/machine-learning/concept-azure-machine-learning-v2", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/compare-azure-ml-to-studio-classic.md", + "redirect_url": "/azure/machine-learning/overview-what-is-machine-learning-studio#ml-studio-classic-vs-azure-machine-learning-studio", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/web-services-that-use-import-export-modules.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/web-services-that-use-import-export-modules", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/web-services-logging.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/web-services-logging", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/web-service-parameters.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/web-service-parameters", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/web-service-error-codes.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/web-service-error-codes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/version-control.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/version-control", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/use-sample-datasets.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/use-sample-datasets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/use-data-from-an-on-premises-sql-server.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/use-data-from-an-on-premises-sql-server", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/tutorial-part3-credit-risk-deploy.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/tutorial-part3-credit-risk-deploy", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/tutorial-part2-credit-risk-train.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/tutorial-part2-credit-risk-train", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/tutorial-part1-credit-risk.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/tutorial-part1-credit-risk", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/support-aml-studio.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/support-aml-studio", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/studio-classic-overview.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/studio-classic-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/sample-experiments.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/sample-experiments", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/retrain-machine-learning-model.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/retrain-machine-learning-model", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/retrain-classic-web-service.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/retrain-classic-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/retired-data-science-for-beginners-videos.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/r-get-started.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/r-get-started", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/powershell-module.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/powershell-module", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/model-progression-experiment-to-web-service.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/model-progression-experiment-to-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/migrate-register-dataset.md", + "redirect_url": "/azure/machine-learning/migrate-register-dataset", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/migrate-rebuild-web-service.md", + "redirect_url": "/azure/machine-learning/migrate-rebuild-web-service", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/migrate-rebuild-integrate-with-client-app.md", + "redirect_url": "/azure/machine-learning/migrate-rebuild-integrate-with-client-app", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/migrate-rebuild-experiment.md", + "redirect_url": "/azure/machine-learning/migrate-rebuild-experiment", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/migrate-overview.md", + "redirect_url": "/azure/machine-learning/migrate-overview", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/migrate-execute-r-script.md", + "redirect_url": "/azure/machine-learning/migrate-execute-r-script", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/manage-workspace.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-workspace", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/manage-web-service-endpoints-using-api-management.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-web-service-endpoints-using-api-management", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/manage-new-webservice.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-new-webservice", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/manage-experiment-iterations.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/manage-experiment-iterations", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/interpret-model-results.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/interpret-model-results", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/index.yml", + "redirect_url": "/previous-versions/azure/machine-learning/classic/index", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/import-data.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/import-data", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/gallery-how-to-use-contribute-publish.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/gallery-how-to-use-contribute-publish", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/export-delete-personal-data-dsr.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/export-delete-personal-data-dsr", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/execute-python-scripts.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/execute-python-scripts", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/excel-add-in-for-web-services.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/excel-add-in-for-web-services", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/evaluate-model-performance.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/evaluate-model-performance", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/deploy-with-resource-manager-template.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/deploy-with-resource-manager-template", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/deploy-consume-web-service-guide.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/deploy-consume-web-service-guide", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/deploy-a-machine-learning-web-service.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/deploy-a-machine-learning-web-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-the-5-questions-data-science-answers.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-predict-an-answer-with-a-simple-model.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-is-your-data-ready-for-data-science.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-copy-other-peoples-work-to-do-data-science.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/data-science-for-beginners-ask-a-question-you-can-answer-with-data.md", + "redirect_url": "/azure/machine-learning/classic/retired-data-science-for-beginners-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/custom-r-modules.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/custom-r-modules", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/create-workspace.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/create-workspace", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/create-models-and-endpoints-with-powershell.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/create-models-and-endpoints-with-powershell", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/create-experiment.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/create-experiment", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/create-endpoint.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/create-endpoint", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/consuming-from-excel.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/consuming-from-excel", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/consume-web-services.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/consume-web-services", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/azure-ml-netsharp-reference-guide.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/azure-ml-netsharp-reference-guide", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/algorithm-parameters-optimize.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/algorithm-parameters-optimize", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/classic/ai-gallery-control-personal-data-dsr.md", + "redirect_url": "/previous-versions/azure/machine-learning/classic/ai-gallery-control-personal-data-dsr", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/web-service-input-output.md", + "redirect_url": "/azure/machine-learning/component-reference/web-service-input-output", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-support-vector-machine.md", + "redirect_url": "/azure/machine-learning/component-reference/two-class-support-vector-machine", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-neural-network.md", + "redirect_url": "/azure/machine-learning/component-reference/two-class-neural-network", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-logistic-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/two-class-logistic-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-decision-forest.md", + "redirect_url": "/azure/machine-learning/component-reference/two-class-decision-forest", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-boosted-decision-tree.md", + "redirect_url": "/azure/machine-learning/component-reference/two-class-boosted-decision-tree", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/two-class-averaged-perceptron.md", + "redirect_url": "/azure/machine-learning/component-reference/two-class-averaged-perceptron", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/tune-model-hyperparameters.md", + "redirect_url": "/azure/machine-learning/component-reference/tune-model-hyperparameters", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-wide-and-deep-recommender.md", + "redirect_url": "/azure/machine-learning/component-reference/train-wide-and-deep-recommender", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-vowpal-wabbit-model.md", + "redirect_url": "/azure/machine-learning/component-reference/train-vowpal-wabbit-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-svd-recommender.md", + "redirect_url": "/azure/machine-learning/component-reference/train-svd-recommender", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-pytorch-model.md", + "redirect_url": "/azure/machine-learning/component-reference/train-pytorch-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-model.md", + "redirect_url": "/azure/machine-learning/component-reference/train-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-clustering-model.md", + "redirect_url": "/azure/machine-learning/component-reference/train-clustering-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/train-anomaly-detection-model.md", + "redirect_url": "/azure/machine-learning/component-reference/train-anomaly-detection-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/summarize-data.md", + "redirect_url": "/azure/machine-learning/component-reference/summarize-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/split-image-directory.md", + "redirect_url": "/azure/machine-learning/component-reference/split-image-directory", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/split-data.md", + "redirect_url": "/azure/machine-learning/component-reference/split-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/smote.md", + "redirect_url": "/azure/machine-learning/component-reference/smote", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/select-columns-transform.md", + "redirect_url": "/azure/machine-learning/component-reference/select-columns-transform", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/select-columns-in-dataset.md", + "redirect_url": "/azure/machine-learning/component-reference/select-columns-in-dataset", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-wide-and-deep-recommender.md", + "redirect_url": "/azure/machine-learning/component-reference/score-wide-and-deep-recommender", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-vowpal-wabbit-model.md", + "redirect_url": "/azure/machine-learning/component-reference/score-vowpal-wabbit-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-svd-recommender.md", + "redirect_url": "/azure/machine-learning/component-reference/score-svd-recommender", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-model.md", + "redirect_url": "/azure/machine-learning/component-reference/score-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/score-image-model.md", + "redirect_url": "/azure/machine-learning/component-reference/score-image-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/resnet.md", + "redirect_url": "/azure/machine-learning/component-reference/resnet", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/remove-duplicate-rows.md", + "redirect_url": "/azure/machine-learning/component-reference/remove-duplicate-rows", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/preprocess-text.md", + "redirect_url": "/azure/machine-learning/component-reference/preprocess-text", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/poisson-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/poisson-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/permutation-feature-importance.md", + "redirect_url": "/azure/machine-learning/component-reference/permutation-feature-importance", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/pca-based-anomaly-detection.md", + "redirect_url": "/azure/machine-learning/component-reference/pca-based-anomaly-detection", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/partition-and-sample.md", + "redirect_url": "/azure/machine-learning/component-reference/partition-and-sample", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/one-vs-one-multiclass.md", + "redirect_url": "/azure/machine-learning/component-reference/one-vs-one-multiclass", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/one-vs-all-multiclass.md", + "redirect_url": "/azure/machine-learning/component-reference/one-vs-all-multiclass", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/normalize-data.md", + "redirect_url": "/azure/machine-learning/component-reference/normalize-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/neural-network-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/neural-network-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-neural-network.md", + "redirect_url": "/azure/machine-learning/component-reference/multiclass-neural-network", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-logistic-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/multiclass-logistic-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-decision-forest.md", + "redirect_url": "/azure/machine-learning/component-reference/multiclass-decision-forest", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/multiclass-boosted-decision-tree.md", + "redirect_url": "/azure/machine-learning/component-reference/multiclass-boosted-decision-tree", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/module-reference.md", + "redirect_url": "/azure/machine-learning/component-reference/component-reference", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/linear-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/linear-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/latent-dirichlet-allocation.md", + "redirect_url": "/azure/machine-learning/component-reference/latent-dirichlet-allocation", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/k-means-clustering.md", + "redirect_url": "/azure/machine-learning/component-reference/k-means-clustering", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/join-data.md", + "redirect_url": "/azure/machine-learning/component-reference/join-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/init-image-transformation.md", + "redirect_url": "/azure/machine-learning/component-reference/init-image-transformation", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-from-web-url-via-http.md", + "redirect_url": "/azure/machine-learning/algorithm-module-reference/module-reference", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-from-azure-sql-database.md", + "redirect_url": "/azure/machine-learning/algorithm-module-reference/module-reference", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-from-azure-blob-storage.md", + "redirect_url": "/azure/machine-learning/algorithm-module-reference/module-reference", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/import-data.md", + "redirect_url": "/azure/machine-learning/component-reference/import-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/group-data-into-bins.md", + "redirect_url": "/azure/machine-learning/component-reference/group-data-into-bins", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/graph-search-syntax.md", + "redirect_url": "/azure/machine-learning/component-reference/graph-search-syntax", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/filter-based-feature-selection.md", + "redirect_url": "/azure/machine-learning/component-reference/filter-based-feature-selection", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/feature-hashing.md", + "redirect_url": "/azure/machine-learning/component-reference/feature-hashing", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/fast-forest-quantile-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/fast-forest-quantile-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/extract-n-gram-features-from-text.md", + "redirect_url": "/azure/machine-learning/component-reference/extract-n-gram-features-from-text", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/export-data.md", + "redirect_url": "/azure/machine-learning/component-reference/export-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/execute-r-script.md", + "redirect_url": "/azure/machine-learning/component-reference/execute-r-script", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/execute-python-script.md", + "redirect_url": "/azure/machine-learning/component-reference/execute-python-script", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/evaluate-recommender.md", + "redirect_url": "/azure/machine-learning/component-reference/evaluate-recommender", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/evaluate-model.md", + "redirect_url": "/azure/machine-learning/component-reference/evaluate-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/enter-data-manually.md", + "redirect_url": "/azure/machine-learning/component-reference/enter-data-manually", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/edit-metadata.md", + "redirect_url": "/azure/machine-learning/component-reference/edit-metadata", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/designer-error-codes.md", + "redirect_url": "/azure/machine-learning/component-reference/designer-error-codes", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/densenet.md", + "redirect_url": "/azure/machine-learning/component-reference/densenet", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/decision-forest-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/decision-forest-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/cross-validate-model.md", + "redirect_url": "/azure/machine-learning/component-reference/cross-validate-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/create-python-model.md", + "redirect_url": "/azure/machine-learning/component-reference/create-python-model", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-word-to-vector.md", + "redirect_url": "/azure/machine-learning/component-reference/convert-word-to-vector", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-indicator-values.md", + "redirect_url": "/azure/machine-learning/component-reference/convert-to-indicator-values", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-image-directory.md", + "redirect_url": "/azure/machine-learning/component-reference/convert-to-image-directory", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-dataset.md", + "redirect_url": "/azure/machine-learning/component-reference/convert-to-dataset", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/convert-to-csv.md", + "redirect_url": "/azure/machine-learning/component-reference/convert-to-csv", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/clip-values.md", + "redirect_url": "/azure/machine-learning/component-reference/clip-values", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/clean-missing-data.md", + "redirect_url": "/azure/machine-learning/component-reference/clean-missing-data", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/boosted-decision-tree-regression.md", + "redirect_url": "/azure/machine-learning/component-reference/boosted-decision-tree-regression", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/assign-data-to-clusters.md", + "redirect_url": "/azure/machine-learning/component-reference/assign-data-to-clusters", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-transformation.md", + "redirect_url": "/azure/machine-learning/component-reference/apply-transformation", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-sql-transformation.md", + "redirect_url": "/azure/machine-learning/component-reference/apply-sql-transformation", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-math-operation.md", + "redirect_url": "/azure/machine-learning/component-reference/apply-math-operation", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/apply-image-transformation.md", + "redirect_url": "/azure/machine-learning/component-reference/apply-image-transformation", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/add-rows.md", + "redirect_url": "/azure/machine-learning/component-reference/add-rows", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/machine-learning/algorithm-module-reference/add-columns.md", + "redirect_url": "/azure/machine-learning/component-reference/add-columns", + "redirect_document_id": true + } +] +} \ No newline at end of file diff --git a/articles/machine-learning/azure-machine-learning-release-notes-cli-v2.md b/articles/machine-learning/azure-machine-learning-release-notes-cli-v2.md index 6eca0900e885f..89514f760282e 100644 --- a/articles/machine-learning/azure-machine-learning-release-notes-cli-v2.md +++ b/articles/machine-learning/azure-machine-learning-release-notes-cli-v2.md @@ -23,6 +23,51 @@ In this article, learn about Azure Machine Learning CLI (v2) releases. __RSS feed__: Get notified when this page is updated by copying and pasting the following URL into your feed reader: `https://docs.microsoft.com/api/search/rss?search=%22Azure+machine+learning+release+notes-v2%22&locale=en-us` +## 2022-05-24 + +### Azure Machine Learning CLI (v2) v2.4.0 + +- The Azure Machine Learning CLI (v2) is now GA. +- `az ml job` + - The command group is marked as GA. + - Added AutoML job type in public preview. + - Added `schedules` property to pipeline job in public preview. + - Added an option to list only archived jobs. + - Improved reliability of `az ml job download` command. +- `az ml data` + - The command group is marked as GA. + - Added MLTable data type in public preview. + - Added an option to list only archived data assets. +- `az ml environment` + - Added an option to list only archived environments. +- `az ml model` + - The command group is marked as GA. + - Allow models to be created from job outputs. + - Added an option to list only archived models. +- `az ml online-deployment` + - The command group is marked as GA. + - Removed timeout waiting for deployment creation. + - Improved online deployment list view. +- `az ml online-endpoint` + - The command group is marked as GA. + - Added `mirror_traffic` property to online endpoints in public preview. + - Improved online endpoint list view. +- `az ml batch-deployment` + - The command group is marked as GA. + - Added support for `uri_file` and `uri_folder` as invocation input. + - Fixed a bug in batch deployment update. + - Fixed a bug in batch deployment list-jobs output. +- `az ml batch-endpoint` + - The command group is marked as GA. + - Added support for `uri_file` and `uri_folder` as invocation input. + - Fixed a bug in batch endpoint update. + - Fixed a bug in batch endpoint list-jobs output. +- `az ml component` + - The command group is marked as GA. + - Added an option to list only archived components. +- `az ml code` + - This command group is removed. + ## 2022-03-14 ### Azure Machine Learning CLI (v2) v2.2.1 diff --git a/articles/machine-learning/component-reference/import-data.md b/articles/machine-learning/component-reference/import-data.md index be4c53c534b3d..db87ed03eb828 100644 --- a/articles/machine-learning/component-reference/import-data.md +++ b/articles/machine-learning/component-reference/import-data.md @@ -19,7 +19,7 @@ This article describes a component in Azure Machine Learning designer. Use this component to load data into a machine learning pipeline from existing cloud data services. > [!Note] -> All functionality provided by this component can be done by **datastore** and **datasets** in the worksapce landing page. We recommend you use **datastore** and **dataset** which includes additional features like data monitoring. To learn more, see [How to Access Data](../v1/how-to-access-data.md) and [How to Register Datasets](../v1/how-to-create-register-datasets.md) article. +> All functionality provided by this component can be done by **datastore** and **datasets** in the workspace landing page. We recommend you use **datastore** and **dataset** which includes additional features like data monitoring. To learn more, see [How to Access Data](../v1/how-to-access-data.md) and [How to Register Datasets](../v1/how-to-create-register-datasets.md) article. > After you register a dataset, you can find it in the **Datasets** -> **My Datasets** category in designer interface. This component is reserved for Studio(classic) users to for a familiar experience. > diff --git a/articles/machine-learning/component-reference/media/module/aml-comparing2models.png b/articles/machine-learning/component-reference/media/module/aml-comparing2models.png deleted file mode 100644 index d1ebcd75bce0f..0000000000000 Binary files a/articles/machine-learning/component-reference/media/module/aml-comparing2models.png and /dev/null differ diff --git a/articles/machine-learning/component-reference/media/module/aml-create-python-model.png b/articles/machine-learning/component-reference/media/module/aml-create-python-model.png deleted file mode 100644 index 845d00a86c7b1..0000000000000 Binary files a/articles/machine-learning/component-reference/media/module/aml-create-python-model.png and /dev/null differ diff --git a/articles/machine-learning/component-reference/media/module/upload-image-in-r-script.png b/articles/machine-learning/component-reference/media/module/upload-image-in-r-script.png deleted file mode 100644 index 5ab02972d651c..0000000000000 Binary files a/articles/machine-learning/component-reference/media/module/upload-image-in-r-script.png and /dev/null differ diff --git a/articles/machine-learning/component-reference/media/partition-and-sample/partition-and-sample-lg.png b/articles/machine-learning/component-reference/media/partition-and-sample/partition-and-sample-lg.png deleted file mode 100644 index 1f8f17caf4421..0000000000000 Binary files a/articles/machine-learning/component-reference/media/partition-and-sample/partition-and-sample-lg.png and /dev/null differ diff --git a/articles/machine-learning/component-reference/media/partition-and-sample/partition-and-sample.png b/articles/machine-learning/component-reference/media/partition-and-sample/partition-and-sample.png deleted file mode 100644 index 3eee1adc50ba7..0000000000000 Binary files a/articles/machine-learning/component-reference/media/partition-and-sample/partition-and-sample.png and /dev/null differ diff --git a/articles/machine-learning/component-reference/media/search/graph-search.gif b/articles/machine-learning/component-reference/media/search/graph-search.gif deleted file mode 100644 index 81d2e36a6f938..0000000000000 Binary files a/articles/machine-learning/component-reference/media/search/graph-search.gif and /dev/null differ diff --git a/articles/machine-learning/component-reference/media/search/graph-search.png b/articles/machine-learning/component-reference/media/search/graph-search.png deleted file mode 100644 index f8a5a5103ad93..0000000000000 Binary files a/articles/machine-learning/component-reference/media/search/graph-search.png and /dev/null differ diff --git a/articles/machine-learning/concept-compute-target.md b/articles/machine-learning/concept-compute-target.md index e173dad557644..7e47733a6e455 100644 --- a/articles/machine-learning/concept-compute-target.md +++ b/articles/machine-learning/concept-compute-target.md @@ -162,7 +162,7 @@ Azure Machine Learning supports the following unmanaged compute types: * Azure Databricks * Azure Data Lake Analytics * Azure Container Instance -* Azure Kubernetes Service & Azure Arc-enabled Kubernetes (preview) +* Kubernetes For more information, see [set up compute targets for model training and deployment](how-to-attach-compute-targets.md) diff --git a/articles/machine-learning/concept-data.md b/articles/machine-learning/concept-data.md index e3de84959ab24..2cc865ce27964 100644 --- a/articles/machine-learning/concept-data.md +++ b/articles/machine-learning/concept-data.md @@ -1,14 +1,14 @@ --- title: Data access titleSuffix: Azure Machine Learning -description: Learn how to connect to your data storage on Azure with Azure Machine Learning. +description: Learn how to access and process data in Azure Machine Learning services: machine-learning ms.service: machine-learning ms.subservice: enterprise-readiness ms.topic: conceptual -ms.reviewer: nibaccam -author: blackmist -ms.author: larryfr +ms.reviewer: larryfr +author: samuel100 +ms.author: samkemp ms.date: 05/11/2022 ms.custom: devx-track-python, data4ml, event-tier1-build-2022 #Customer intent: As an experienced Python developer, I need to securely access my data in my Azure storage solutions and use it to accomplish my machine learning tasks. @@ -20,28 +20,162 @@ ms.custom: devx-track-python, data4ml, event-tier1-build-2022 > * [v1](./v1/concept-data.md) > * [v2 (current version)](concept-data.md) -Azure Machine Learning makes it easy to connect to your data in the cloud. It provides an abstraction layer over the underlying storage service, so you can securely access and work with your data without having to write code specific to your storage type. Azure Machine Learning also provides the following data capabilities: +Azure Machine Learning lets you bring data from a local machine or an existing cloud-based storage. In this article you will learn the main data concepts in Azure Machine Learning, including: -* Interoperability with Pandas and Spark DataFrames -* Versioning and tracking of data lineage -* Data labeling (V1 only for now) +> [!div class="checklist"] +> - [**URIs**](#uris) - A **U**niform **R**esource **I**dentifier that is a reference to a storage location on your local computer or in the cloud that makes it very easy to access data in your jobs. +> - [**Data asset**](#data-asset) - Create data assets in your workspace to share with team members, version, and track data lineage. +> - [**Datastore**](#datastore) - Azure Machine Learning Datastores securely keep the connection information to your data storage on Azure, so you don't have to code it in your scripts. +> - [**MLTable**](#mltable) - a method to abstract the schema definition for tabular data so that it is easier for consumers of the data to materialize the table into a Pandas/Dask/Spark dataframe. -You can bring data to Azure Machine Learning +## URIs +A URI (uniform resource identifier) represents a storage location on your local computer, an attached Datastore, blob/ADLS storage, or a publicly available http(s) location. In addition to local paths (for example: `./path_to_my_data/`), several different protocols are supported for cloud storage locations: -* Directly from your local machine and URLs +- `http(s)` - Private/Public Azure Blob Storage Locations, or publicly available http(s) location +- `abfs(s)` - Azure Data Lake Storage Gen2 storage location +- `azureml` - An Azure Machine Learning [Datastore](#datastore) location -* That's already in a cloud-based storage service in Azure and access it using your [Azure storage account](../storage/common/storage-account-create.md?tabs=azure-portal) related credentials and an Azure Machine Learning datastore. +Azure Machine Learning distinguishes two types of URIs: - -## Connect to storage with datastores +Data type | Description | Examples +---|------|--- +`uri_file` | Refers to a specific **file** location | `https://.blob.core.windows.net///`
                  `azureml://datastores//paths//`
                  `abfss://@.dfs.core.windows.net//` +`uri_folder`| Refers to a specific **folder** location | `https://.blob.core.windows.net//`
                  `azureml://datastores//paths/`
                  `abfss://@.dfs.core.windows.net//` + +URIs are mapped to the filesystem on the compute target, hence using URIs is like using files or folders in the command that consumes/produces them. URIs leverage **identity-based authentication** to connect to storage services with either your Azure Active Directory ID (default) or Managed Identity. + +> [!TIP] +> For data located in an Azure storage account we recommend using the [Azure Storage Explorer](https://azure.microsoft.com/features/storage-explorer/#overview). You can browse data and obtain the URI for any file/folder by right-selecting **Copy URL**: +> :::image type="content" source="media/concept-data/use-storage-explorer.png" alt-text="Screenshot of the Storage Explorer with Copy URL highlighted."::: + +### Examples + +# [`uri_file`](#tab/uri-file-example) + +Below is an example of a job specification that shows how to access a file from a public blob store. In this example, the job executes the Linux `ls` command. + +```yml +# hello-data-uri-file.yml +$schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json +command: | + ls ${{inputs.my_csv_file}} + +inputs: + my_csv_file: + type: uri_file + path: https://azuremlexamples.blob.core.windows.net/datasets/titanic.csv +environment: azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest +compute: azureml:cpu-cluster +``` + +Create the job using the CLI: + +```azurecli +az ml job create --file hello-data-uri-file.yml +``` + +When the job has completed the user logs will show the standard output of the Linux command `ls ${{inputs.my_csv_file}}`: + +:::image type="content" source="media/concept-data/uri-file.png" alt-text="Screenshot of the job log showing URI file output."::: + +Notice that the file has been mapped to the filesystem on the compute target and `${{inputs.my_csv_file}}` resolves to that location. + +# [`uri_folder`](#tab/uri-folder-example) + +In the case where you want to map a **folder** to the filesystem of the compute target, you define the `uri_folder` type in your job specification file: + +```yml +# hello-data-uri-folder.yml +$schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json +command: | + ls ${{inputs.sampledata}} +inputs: + sampledata: + type: uri_folder + path: https://.blob.core.windows.net// +environment: azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest +compute: azureml:cpu-cluster +``` + +Create the job using the CLI: + +```azurecli +az ml job create --file hello-data-uri-folder.yml +``` + +When the job has completed the user logs will show the standard output of the Linux command `ls ${{inputs.sampledata}}`: + +:::image type="content" source="media/concept-data/uri-folder.png" alt-text="Screenshot of the job log showing the URI folder output"::: + +Notice that the folder has been mapped to the filesystem on the compute target (you can see all the files in the folder), and `${{inputs.sampledata}}` resolves to the folder location. + +--- -Azure Machine Learning datastores securely keep the connection information to your data storage on Azure, so you don't have to code it in your scripts. +## Data asset -You can access your data and create datastores with, -* [Credential-based data authentication](how-to-access-data.md), like a service principal or shared access signature (SAS) token. These credentials can be accessed by users who have *Reader* access to the workspace. -* Identity-based data authentication to connect to storage services with your Azure Active Directory ID. +Azure Machine Learning allows you to create and version data assets in a workspace so that other members of your team can easily consume the data asset by using a name/version. + +### Example usage + + +# [Create data asset](#tab/cli-data-create-example) +To create a data asset, firstly define a data specification in a YAML file that provides a name, type and path for the data: + +```yml +# data-example.yml +$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json +name: +description: +type: # uri_file, uri_folder, mltable +path: https://.blob.core.windows.net//path +``` -The following table summarizes which cloud-based storage services in Azure can be registered as datastores and what authentication type can be used to access them. +Then in the CLI, create the data asset: + +```azurecli +az ml data create --file data-example.yml --version 1 +``` + +# [Consume data asset](#tab/cli-data-consume-example) + +To consume a data asset in a job, define your job specification in a YAML file the path to be `azureml::`, for example: + +```yml +# hello-data-uri-file.yml +$schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json +command: | + ls ${{inputs.sampledata}} +code: src +inputs: + sampledata: + type: # uri_file, uri_folder, mltable + path: azureml:@latest +environment: azureml:@latest +compute: azureml: +``` + +Next, use the CLI to create your job: + +```azurecli +az ml job create --file hello-data-uri-file.yml +``` + +--- + +## Datastore + +An Azure Machine Learning datastore is a *reference* to an *existing* storage account on Azure. The benefits of creating and using a datastore are: + +1. A common and easy-to-use API to interact with different storage types (Blob/Files/ADLS). +1. Easier to discover useful datastores when working as a team. +1. When using credential-based access (service principal/SAS/key), the connection information is secured so you don't have to code it in your scripts. + +When you create a datastore with an existing storage account on Azure, you have the choice between two different authentication methods: + +- **Credential-based** - authenticate access to the data using a service principal, shared access signature (SAS) token or account key. These credentials can be accessed by users who have *Reader* access to the workspace. +- **Identity-based** - authenticate access to the data using your Azure Active Directory identity or managed identity. + +The table below summarizes which cloud-based storage services in Azure can be created as an Azure Machine Learning datastore and what authentication type can be used to access them. Supported storage service | Credential-based authentication | Identity-based authentication |---|:----:|:---:| @@ -50,26 +184,167 @@ Azure File Share| ✓ | | Azure Data Lake Gen1 | ✓ | ✓| Azure Data Lake Gen2| ✓ | ✓| +> [!NOTE] +> The URI format to refer to a file/folder/mltable on a datastore is: +> `azureml://datastores//paths/` -## Work with data -You can read in data from a datastore or directly from storage uri's. +## MLTable +`mltable` is a way to abstract the schema definition for tabular data so that it is easier for consumers of the data to materialize the table into a Pandas/Dask/Spark dataframe. -In Azure Machine Learning there are three types for data +> [!TIP] +> The ideal scenarios to use `mltable` are: +> - The schema of your data is complex and/or changes frequently. +> - You only need a subset of data (for example: a sample of rows or files, specific columns, etc). +> - AutoML jobs requiring tabular data. +> +> If your scenario does not fit the above then it is likely that URIs are a more suitable type. -Data type | Description | Example ----|------|--- -`uri_file` | Refers to a specific file | `https://.blob.core.windows.net//path/file.csv`. -`uri_folder`| Refers to a specific folder |`https://.blob.core.windows.net//path` -`mltable` |Defines tabular data for use in automated ML and parallel jobs| Schema and subsetting transforms +### A motivating example + +Imagine a scenario where you have many text files in a folder: + +```text +├── my_data +│ ├── file1.txt +│ ├── file1_use_this.txt +│ ├── file2.txt +│ ├── file2_use_this.txt +. +. +. +│ ├── file1000.txt +│ ├── file1000_use_this.txt +``` -In the following example, the expectation is to provide a `uri_folder` because to read the file in, the training script creates a path that joins the folder with the file name. If you want to pass in just an individual file rather than the entire folder you can use the `uri_file` type. +Each text file has the following structure: + +```text +store_location date zip_code amount x y z noise_col1 noise_col2 +Seattle 20/04/2022 12324 123.4 true false true blah blah +. +. +. +London 20/04/2022 XX358YY 156 true true true blah blah +``` + +Some important features of this data are: + +- The data of interest is only in files that have the following suffix: `_use_this.txt` and other file names that don't match should be ignored. +- The date should be represented as a date and not a string. +- The x, y, z columns are booleans, not strings. +- The store location is an index that is useful for generating subsets of data. +- The file is encoded in `ascii` format. +- Every file in the folder contains the same header. +- The first million records for zip_code are numeric but later on you can see they're alphanumeric. +- There are some dummy (noisy) columns in the data that aren't useful for machine learning. + +You could materialize the above text files into a dataframe using Pandas and a URI: ```python - file_name = os.path.join(args.input_folder, "MY_CSV_FILE.csv") -df = pd.read_csv(file_name) +import glob +import datetime +import os +import argparse +import pandas as pd + +parser = argparse.ArgumentParser() +parser.add_argument("--input_folder", type=str) +args = parser.parse_args() + +path = os.path.join(args.input_folder, "*_use_this.txt") +files = glob.glob(path) + +# create empty list +dfl = [] + +# dict of column types +col_types = { + "zip": str, + "date": datetime.date, + "x": bool, + "y": bool, + "z": bool +} + +# enumerate files into a list of dfs +for f in files: + csv = pd.read_table( + path=f, + delimiter=" ", + header=0, + usecols=["store_location", "zip_code", "date", "amount", "x", "y", "z"], + dtype=col_types, + encoding='ascii' + ) + dfl.append(csv) + +# concatenate the list of dataframes +df = pd.concat(dfl) +# set the index column +df.index_columns("store_location") +``` + +However, it will be the responsibility of the *consumer* of the data asset to parse the schema into a dataframe. In the scenario defined above, that means the consumers will need to independently ascertain the Python code to materialize the data into a dataframe. + +Passing responsibility to the consumer of the data asset will cause problems when: + +- **The schema changes (for example, a column name changes):** All consumers of the data must update their Python code independently. Other examples can be type changes, columns being added/removed, encoding change, etc. +- **The data size increases** - If the data gets too large for Pandas to process, then all the consumers of the data need to switch to a more scalable library (PySpark/Dask). + +Under the above two conditions, `mltable` can help because it enables the creator of the data asset to define the schema in a single file and the consumers can materialize the data into a dataframe easily without needing to write Python code to parse the schema. For the above example, the creator of the data asset defines an MLTable file **in the same directory** as the data: + +```text +├── my_data +│ ├── MLTable +│ ├── file1.txt +│ ├── file1_use_this.txt +. +. +. +``` + +The MLTable file has the following definition that specifies how the data should be processed into a dataframe: + +```yaml +type: mltable + +paths: + - pattern: ./*_use_this.txt + +traits: + - index_columns: store_location + +transformations: + - read_delimited: + encoding: ascii + header: all_files_have_same_headers + delimiter: " " + - keep_columns: ["store_location", "zip_code", "date", "amount", "x", "y", "z"] + - convert_column_types: + - columns: ["x", "y", "z"] + to_type: boolean + - columns: "date" + to_type: datetime +``` + +The consumers can read the data into dataframe using three lines of Python code: + +```python +import mltable + +tbl = mltable.load("./my_data") +df = tbl.to_pandas_dataframe() ``` +If the schema of the data changes, then it can be updated in a single place (the MLTable file) rather than having to make code changes in multiple places. + +Just like `uri_file` and `uri_folder`, you can create a data asset with `mltable` types. + ## Next steps -* [Work with data using SDK v2](how-to-use-data.md) +- [Install and set up the CLI (v2)](how-to-configure-cli.md#install-and-set-up-the-cli-v2) +- [Create datastores](how-to-datastore.md#create-datastores) +- [Create data assets](how-to-create-register-data-assets.md#create-data-assets) +- [Read and write data in a job](how-to-read-write-data-v2.md#read-and-write-data-in-a-job) +- [Data administration](how-to-administrate-data-authentication.md#data-administration) \ No newline at end of file diff --git a/articles/machine-learning/concept-endpoints.md b/articles/machine-learning/concept-endpoints.md index ea36e88d92edd..d0795d5d89762 100644 --- a/articles/machine-learning/concept-endpoints.md +++ b/articles/machine-learning/concept-endpoints.md @@ -214,8 +214,6 @@ You can use the following options for input data when invoking a batch endpoint: For more information on supported input options, see [Batch scoring with batch endpoint](how-to-use-batch-endpoint.md#invoke-the-batch-endpoint-with-different-input-options). -For more information on supported input options, see [Batch scoring with batch endpoint](how-to-use-batch-endpoint.md#invoke-the-batch-endpoint-with-different-input-options). - Specify the storage output location to any datastore and path. By default, batch endpoints store their output to the workspace's default blob store, organized by the Job Name (a system-generated GUID). ### Security diff --git a/articles/machine-learning/concept-network-data-access.md b/articles/machine-learning/concept-network-data-access.md index 8075c7054d255..3d1936d18d7c7 100644 --- a/articles/machine-learning/concept-network-data-access.md +++ b/articles/machine-learning/concept-network-data-access.md @@ -136,4 +136,4 @@ To secure communication between Azure Machine Learning and Azure SQL Database, t ## Next steps -For information on enabling studio in a network, see [Use Azure Machine Learning studio in an Azure Virtual Network](how-to-enable-studio-virtual-network.md). \ No newline at end of file +For information on enabling studio in a network, see [Use Azure Machine Learning studio in an Azure Virtual Network](how-to-enable-studio-virtual-network.md). diff --git a/articles/machine-learning/concept-open-source.md b/articles/machine-learning/concept-open-source.md index 21df91f694fd2..8d593c2e7b549 100644 --- a/articles/machine-learning/concept-open-source.md +++ b/articles/machine-learning/concept-open-source.md @@ -97,6 +97,6 @@ Securing deployments is an important part of the deployment process. To [deploy Machine Learning Operations (MLOps), commonly thought of as DevOps for machine learning allows you to build more transparent, resilient, and reproducible machine learning workflows. See the [what is MLOps article](./concept-model-management-and-deployment.md) to learn more about MLOps. -Using DevOps practices like continuous integration (CI) and continuous deployment (CD), you can automate the end-to-end machine learning lifecycle and capture governance data around it. You can define your [machine learning CI/CD pipeline in GitHub actions](./how-to-github-actions-machine-learning.md) to run Azure Machine Learning training and deployment tasks. +Using DevOps practices like continuous integration (CI) and continuous deployment (CD), you can automate the end-to-end machine learning lifecycle and capture governance data around it. You can define your [machine learning CI/CD pipeline in GitHub Actions](./how-to-github-actions-machine-learning.md) to run Azure Machine Learning training and deployment tasks. Capturing software dependencies, metrics, metadata, data and model versioning are an important part of the MLOps process in order to build transparent, reproducible, and auditable pipelines. For this task, you can [use MLFlow in Azure Machine Learning](how-to-use-mlflow.md) as well as when [training machine learning models in Azure Databricks](./how-to-use-mlflow-azure-databricks.md). You can also [deploy MLflow models as an Azure web service](how-to-deploy-mlflow-models.md). diff --git a/articles/machine-learning/data-science-virtual-machine/media/caffe2-samples.png b/articles/machine-learning/data-science-virtual-machine/media/caffe2-samples.png deleted file mode 100644 index 1d8106aa2c567..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/caffe2-samples.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/cntk-samples.png b/articles/machine-learning/data-science-virtual-machine/media/cntk-samples.png deleted file mode 100644 index dbbb8dd42f12d..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/cntk-samples.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/create-ubuntu18-dsvm.png b/articles/machine-learning/data-science-virtual-machine/media/create-ubuntu18-dsvm.png deleted file mode 100644 index cd0e3d7ae7c8f..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/create-ubuntu18-dsvm.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/create-ubuntu18-review.png b/articles/machine-learning/data-science-virtual-machine/media/create-ubuntu18-review.png deleted file mode 100644 index 28abb4bdf5413..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/create-ubuntu18-review.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/create-win19-dsvm-review.png b/articles/machine-learning/data-science-virtual-machine/media/create-win19-dsvm-review.png deleted file mode 100644 index da06998c4531f..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/create-win19-dsvm-review.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/create-win19-dsvm.png b/articles/machine-learning/data-science-virtual-machine/media/create-win19-dsvm.png deleted file mode 100644 index ac7fe79db2f6f..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/create-win19-dsvm.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/create-win19-expanded.png b/articles/machine-learning/data-science-virtual-machine/media/create-win19-expanded.png deleted file mode 100644 index 27b06b186f783..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/create-win19-expanded.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-provision-step2.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-provision-step2.png deleted file mode 100644 index d47aae211beac..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-provision-step2.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-provision-wizard.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-provision-wizard.png deleted file mode 100644 index 9369713773925..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-provision-wizard.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/configure-data-science-virtual-machine.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/configure-data-science-virtual-machine.png deleted file mode 100644 index 53e96e9809654..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/configure-data-science-virtual-machine.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/create-linux-expanded.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/create-linux-expanded.png deleted file mode 100755 index 89639c9c6ce53..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/create-linux-expanded.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/create-linux.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/create-linux.png deleted file mode 100755 index a379e71187c20..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/create-linux.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/review-create-ubuntu.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/review-create-ubuntu.png deleted file mode 100755 index 72375fec17af3..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/review-create-ubuntu.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/search-ubuntu.png b/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/search-ubuntu.png deleted file mode 100755 index 29c254d485834..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/dsvm-ubuntu-intro/search-ubuntu.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/how-to-track-experiments/mlflow-experiments-9.png b/articles/machine-learning/data-science-virtual-machine/media/how-to-track-experiments/mlflow-experiments-9.png deleted file mode 100644 index 3c2febd57ef4b..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/how-to-track-experiments/mlflow-experiments-9.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/index/i_machine-learning.svg b/articles/machine-learning/data-science-virtual-machine/media/index/i_machine-learning.svg deleted file mode 100644 index a63c07676fdad..0000000000000 --- a/articles/machine-learning/data-science-virtual-machine/media/index/i_machine-learning.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/articles/machine-learning/data-science-virtual-machine/media/index/machine-learning-studio.svg b/articles/machine-learning/data-science-virtual-machine/media/index/machine-learning-studio.svg deleted file mode 100644 index d22ff0e414dca..0000000000000 --- a/articles/machine-learning/data-science-virtual-machine/media/index/machine-learning-studio.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/centos-ip-address.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/centos-ip-address.png deleted file mode 100755 index eab1cc15e3a04..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/centos-ip-address.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/configure-linux-data-science-virtual-machine.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/configure-linux-data-science-virtual-machine.png deleted file mode 100644 index 532e48d922475..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/configure-linux-data-science-virtual-machine.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/create-centos-expanded.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/create-centos-expanded.png deleted file mode 100755 index 44e2babe2fa66..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/create-centos-expanded.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/create-centos.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/create-centos.png deleted file mode 100755 index 58cf1574705c4..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/create-centos.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/review-create-centos.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/review-create-centos.png deleted file mode 100755 index 937f4f9363066..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/review-create-centos.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/search-centos.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/search-centos.png deleted file mode 100755 index 926092ee95f16..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-intro/search-centos.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-walkthrough/workspace-id.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-walkthrough/workspace-id.png deleted file mode 100644 index 5403ddbfaae0e..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-walkthrough/workspace-id.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-walkthrough/workspace-token.png b/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-walkthrough/workspace-token.png deleted file mode 100644 index 78a05c667b034..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/linux-dsvm-walkthrough/workspace-token.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/new-search-ubuntu.png b/articles/machine-learning/data-science-virtual-machine/media/new-search-ubuntu.png deleted file mode 100644 index f566ae5af6b65..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/new-search-ubuntu.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/ArcGIS-Free-Trial.png b/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/ArcGIS-Free-Trial.png deleted file mode 100644 index 2bac403366daa..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/ArcGIS-Free-Trial.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/ArcGISLogon.png b/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/ArcGISLogon.png deleted file mode 100644 index 54421f49b6172..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/ArcGISLogon.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/Create-Geo-AI.png b/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/Create-Geo-AI.png deleted file mode 100644 index be2c7b35b42e2..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/Create-Geo-AI.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/search-geo-ai.png b/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/search-geo-ai.png deleted file mode 100755 index 14a862ac9cc7d..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-geo-ai-dsvm/search-geo-ai.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/configure-data-science-virtual-machine.png b/articles/machine-learning/data-science-virtual-machine/media/provision-vm/configure-data-science-virtual-machine.png deleted file mode 100644 index 84ea7efd5e8e4..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/configure-data-science-virtual-machine.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/create-windows-expanded.png b/articles/machine-learning/data-science-virtual-machine/media/provision-vm/create-windows-expanded.png deleted file mode 100755 index 67413670aa7b9..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/create-windows-expanded.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/create-windows.png b/articles/machine-learning/data-science-virtual-machine/media/provision-vm/create-windows.png deleted file mode 100755 index f83f4dbf1f86c..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/create-windows.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/review-create-windows.png b/articles/machine-learning/data-science-virtual-machine/media/provision-vm/review-create-windows.png deleted file mode 100755 index 8eb874eb2d456..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/review-create-windows.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/search-windows.png b/articles/machine-learning/data-science-virtual-machine/media/provision-vm/search-windows.png deleted file mode 100755 index a57a6544b4eec..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/provision-vm/search-windows.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_Model_Results.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_Model_Results.PNG deleted file mode 100644 index bf9b67c5bd833..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_Model_Results.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_ReaderBlob_Module_v3.png b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_ReaderBlob_Module_v3.png deleted file mode 100644 index d56e68ee3e7b7..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_ReaderBlob_Module_v3.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_Reader_Hive.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_Reader_Hive.PNG deleted file mode 100644 index 4e56887f1a7b8..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/AML_Reader_Hive.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Azure_Data_Lake_PlugIn_v2.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Azure_Data_Lake_PlugIn_v2.PNG deleted file mode 100644 index 67f6fa95d1aec..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Azure_Data_Lake_PlugIn_v2.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_HDI_dashboard_v3.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_HDI_dashboard_v3.PNG deleted file mode 100644 index bce6999e4e45a..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_HDI_dashboard_v3.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_HDI_v4.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_HDI_v4.PNG deleted file mode 100644 index 101a368c759c2..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_HDI_v4.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_ML_Space.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_ML_Space.PNG deleted file mode 100644 index 47873f98162f4..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_ML_Space.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_ML_Space_step2_v2.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_ML_Space_step2_v2.PNG deleted file mode 100644 index 9be3f0d8f7a7f..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Create_ML_Space_step2_v2.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/DownSample_Data_For_Modeling_v2.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/DownSample_Data_For_Modeling_v2.PNG deleted file mode 100644 index d1deced6ccbe8..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/DownSample_Data_For_Modeling_v2.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_Frequency_tip_or_not_v3.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_Frequency_tip_or_not_v3.PNG deleted file mode 100644 index cfc650a4558c0..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_Frequency_tip_or_not_v3.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_Number_Records_by_Month_v3.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_Number_Records_by_Month_v3.PNG deleted file mode 100644 index 2b4dfe45e24d7..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_Number_Records_by_Month_v3.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_compute_pickup_dropoff_distance_v2.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_compute_pickup_dropoff_distance_v2.PNG deleted file mode 100644 index 9a318061e84ed..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_compute_pickup_dropoff_distance_v2.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_direct_distance_trip_distance_v2.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_direct_distance_trip_distance_v2.PNG deleted file mode 100644 index 2641795fc5fc7..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Exploration_direct_distance_trip_distance_v2.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Link_Blob_to_ADLA_v2.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Link_Blob_to_ADLA_v2.PNG deleted file mode 100644 index 604102a289370..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Link_Blob_to_ADLA_v2.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Python_View_Existing_Tables_Hive_v3.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Python_View_Existing_Tables_Hive_v3.PNG deleted file mode 100644 index e7d51b1f8df9b..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/Python_View_Existing_Tables_Hive_v3.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_Job_Status.PNG b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_Job_Status.PNG deleted file mode 100644 index 0424050720f2a..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_Job_Status.PNG and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_create_summary.png b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_create_summary.png deleted file mode 100644 index 49ff0fb4175d9..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_create_summary.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_tripdata_summary.png b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_tripdata_summary.png deleted file mode 100644 index ab5a45ced08fc..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/USQL_tripdata_summary.png and /dev/null differ diff --git a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/azure-data-lake-create-v3.png b/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/azure-data-lake-create-v3.png deleted file mode 100644 index e47bc1179b2cd..0000000000000 Binary files a/articles/machine-learning/data-science-virtual-machine/media/vm-do-ten-things/azure-data-lake-create-v3.png and /dev/null differ diff --git a/articles/machine-learning/how-to-access-azureml-behind-firewall.md b/articles/machine-learning/how-to-access-azureml-behind-firewall.md index 724d03754100d..8604b15ffb429 100644 --- a/articles/machine-learning/how-to-access-azureml-behind-firewall.md +++ b/articles/machine-learning/how-to-access-azureml-behind-firewall.md @@ -9,7 +9,7 @@ ms.topic: how-to ms.author: jhirono author: jhirono ms.reviewer: larryfr -ms.date: 03/04/2022 +ms.date: 06/08/2022 ms.custom: devx-track-python, ignite-fall-2021, devx-track-azurecli, event-tier1-build-2022 ms.devlang: azurecli --- @@ -35,7 +35,7 @@ In this article, learn about the network communication requirements when securin ## Well-known ports -The following are well-known ports used by services listed in this article. If a port range is used in this article and is not listed in this section, it is specific to the service and may not have published information on what it is used for: +The following are well-known ports used by services listed in this article. If a port range is used in this article and isn't listed in this section, it's specific to the service and may not have published information on what it's used for: | Port | Description | @@ -73,12 +73,12 @@ These rule collections are described in more detail in [What are some Azure Fire | Service tag | Protocol | Port | | ----- |:-----:|:-----:| | AzureActiveDirectory | TCP | 80, 443 | - | AzureMachineLearning | TCP | 443 | + | AzureMachineLearning | TCP | 443, 8787, 18881 | | AzureResourceManager | TCP | 443 | | Storage.region | TCP | 443 | | AzureFrontDoor.FrontEnd
                  * Not needed in Azure China. | TCP | 443 | | AzureContainerRegistry.region | TCP | 443 | - | MicrosoftContainerRegistry.region | TCP | 443 | + | MicrosoftContainerRegistry.region
                  **Note** that this tag has a dependency on the **AzureFrontDoor.FirstParty** tag | TCP | 443 | | AzureKeyVault.region | TCP | 443 | > [!TIP] @@ -97,7 +97,7 @@ These rule collections are described in more detail in [What are some Azure Fire | **graph.windows.net** | Used by Azure Machine Learning compute instance/cluster. | | **anaconda.com**
                  **\*.anaconda.com** | Used to install default packages. | | **\*.anaconda.org** | Used to get repo data. | - | **pypi.org** | Used to list dependencies from the default index, if any, and the index is not overwritten by user settings. If the index is overwritten, you must also allow **\*.pythonhosted.org**. | + | **pypi.org** | Used to list dependencies from the default index, if any, and the index isn't overwritten by user settings. If the index is overwritten, you must also allow **\*.pythonhosted.org**. | | **cloud.r-project.org** | Used when installing CRAN packages for R development. | | **\*pytorch.org** | Used by some examples based on PyTorch. | | **\*.tensorflow.org** | Used by some examples based on Tensorflow. | @@ -114,17 +114,28 @@ These rule collections are described in more detail in [What are some Azure Fire 1. To restrict outbound traffic for models deployed to Azure Kubernetes Service (AKS), see the [Restrict egress traffic in Azure Kubernetes Service](../aks/limit-egress-traffic.md) and [Deploy ML models to Azure Kubernetes Service](v1/how-to-deploy-azure-kubernetes-service.md#connectivity) articles. -### Azure Kubernetes Services +### Kubernetes Compute + +[Kubernetes Cluster](./how-to-attach-kubernetes-anywhere.md) running behind an outbound proxy server or firewall needs extra network configuration. Configure the [Azure Arc network requirements](../azure-arc/kubernetes/quickstart-connect-cluster.md?tabs=azure-cli#meet-network-requirements) needed by Azure Arc agents. The following outbound URLs are also required for Azure Machine Learning, + +| Outbound Endpoint| Port | Description|Training |Inference | +|--|--|--|--|--| +| __\*.kusto.windows.net__
                  __\*.table.core.windows.net__
                  __\*.queue.core.windows.net__ | https:443 | Required to upload system logs to Kusto. |**✓**|**✓**| +| __\*.azurecr.io__ | https:443 | Azure container registry, required to pull docker images used for machine learning workloads.|**✓**|**✓**| +| __\*.blob.core.windows.net__ | https:443 | Azure blob storage, required to fetch machine learning project scripts,data or models, and upload job logs/outputs.|**✓**|**✓**| +| __\*.workspace.\.api.azureml.ms__
                  __\.experiments.azureml.net__
                  __\.api.azureml.ms__ | https:443 | Azure mahince learning service API.|**✓**|**✓**| +| __pypi.org__ | https:443 | Python package index, to install pip packages used for training job environment initialization.|**✓**|N/A| +| __archive.ubuntu.com__
                  __security.ubuntu.com__
                  __ppa.launchpad.net__ | http:80 | Required to download the necessary security patches. |**✓**|N/A| + +> [!NOTE] +> `` is the lowcase full spelling of Azure Region, for example, eastus, southeastasia. + -When using Azure Kubernetes Service with Azure Machine Learning, the following traffic must be allowed: -* General inbound/outbound requirements for AKS as described in the [Restrict egress traffic in Azure Kubernetes Service](../aks/limit-egress-traffic.md) article. -* __Outbound__ to mcr.microsoft.com. -* When deploying a model to an AKS cluster, use the guidance in the [Deploy ML models to Azure Kubernetes Service](v1/how-to-deploy-azure-kubernetes-service.md#connectivity) article. ## Other firewalls -The guidance in this section is generic, as each firewall has its own terminology and specific configurations. If you have questions, check the documentation for the firewall you are using. +The guidance in this section is generic, as each firewall has its own terminology and specific configurations. If you have questions, check the documentation for the firewall you're using. If not configured correctly, the firewall can cause problems using your workspace. There are various host names that are used both by the Azure Machine Learning workspace. The following sections list hosts that are required for Azure Machine Learning. @@ -373,7 +384,7 @@ The hosts in this section are used to install Python packages, and are required | ---- | ---- | | **anaconda.com**
                  **\*.anaconda.com** | Used to install default packages. | | **\*.anaconda.org** | Used to get repo data. | -| **pypi.org** | Used to list dependencies from the default index, if any, and the index is not overwritten by user settings. If the index is overwritten, you must also allow **\*.pythonhosted.org**. | +| **pypi.org** | Used to list dependencies from the default index, if any, and the index isn't overwritten by user settings. If the index is overwritten, you must also allow **\*.pythonhosted.org**. | | **\*pytorch.org** | Used by some examples based on PyTorch. | | **\*.tensorflow.org** | Used by some examples based on Tensorflow. | @@ -388,22 +399,6 @@ The hosts in this section are used to install R packages, and are required durin | ---- | ---- | | **cloud.r-project.org** | Used when installing CRAN packages. | -### Azure Arc enabled Kubernetes - -Clusters running behind an outbound proxy server or firewall need additional network configurations. Fulfill [Azure Arc network requirements](../azure-arc/kubernetes/quickstart-connect-cluster.md?tabs=azure-cli#meet-network-requirements) needed by Azure Arc agents. Besides that, the following outbound URLs are required for Azure Machine Learning, - -| Outbound Endpoint| Port | Description|Training |Inference | -|--|--|--|--|--| -| *.kusto.windows.net,
                  *.table.core.windows.net,
                  *.queue.core.windows.net | https:443 | Required to upload system logs to Kusto. |**✓**|**✓**| -| *.azurecr.io | https:443 | Azure container registry, required to pull docker images used for machine learning workloads.|**✓**|**✓**| -| *.blob.core.windows.net | https:443 | Azure blob storage, required to fetch machine learning project scripts,data or models, and upload job logs/outputs.|**✓**|**✓**| -| *.workspace.\.api.azureml.ms ,
                  \.experiments.azureml.net,
                  \.api.azureml.ms | https:443 | Azure mahince learning service API.|**✓**|**✓**| -| pypi.org | https:443 | Python package index, to install pip packages used for training job environment initialization.|**✓**|N/A| -| archive.ubuntu.com,
                  security.ubuntu.com,
                  ppa.launchpad.net | http:80 | Required to download the necessary security patches. |**✓**|N/A| - -> [!NOTE] -> `` is the lowcase full spelling of Azure Region, for example, eastus, southeastasia. - ### Visual Studio Code hosts The hosts in this section are used to install Visual Studio Code packages to establish a remote connection between Visual Studio Code and compute instances in your Azure Machine Learning workspace. diff --git a/articles/machine-learning/how-to-access-data.md b/articles/machine-learning/how-to-access-data.md deleted file mode 100644 index 8500c3b24f66c..0000000000000 --- a/articles/machine-learning/how-to-access-data.md +++ /dev/null @@ -1,484 +0,0 @@ ---- -title: Connect to storage services on Azure -titleSuffix: Azure Machine Learning -description: Learn how to use datastores to securely connect to Azure storage services during training with Azure Machine Learning -services: machine-learning -ms.service: machine-learning -ms.subservice: mldata -ms.topic: how-to -ms.author: xunwan -author: SturgeonMi -ms.reviewer: nibaccam -ms.date: 05/11/2022 -ms.custom: contperf-fy21q1, devx-track-python, data4ml, event-tier1-build-2022 -#Customer intent: As an experienced Python developer, I need to make my data in Azure storage available to my remote compute to train my machine learning models. ---- - -# Connect to storage services with datastores - -> [!div class="op_single_selector" title1="Select the version of Azure Machine Learning developer platform you are using:"] -> * [v1](./v1/how-to-access-data.md) -> * [v2 (current version)](how-to-access-data.md) - -[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] -[!INCLUDE [cli v2](../../includes/machine-learning-cli-v2.md)] - -In this article, learn how to connect to data storage services on Azure with Azure Machine Learning datastores using the Azure CLI extension for machine learning (v2). - -Datastores securely connect to your storage service on Azure without putting your authentication credentials and the integrity of your original data source at risk. They store connection information, like your subscription ID and token authorization in your [Key Vault](https://azure.microsoft.com/services/key-vault/) that's associated with the workspace, so you can securely access your storage without having to hard code them in your scripts. You can create datastores that connect to [these Azure storage solutions](#supported-data-storage-service-types). - -To understand where datastores fit in Azure Machine Learning's overall data access workflow, see [Data in Azure Machine Learning](concept-data.md) article. - -For a low code experience, see how to use the [Azure Machine Learning studio to create and register datastores](how-to-connect-data-ui.md#create-datastores). - ->[!TIP] -> This article assumes you want to connect to your storage service with credential-based authentication credentials, like a service principal or a shared access signature (SAS) token. Keep in mind, if credentials are registered with datastores, all users with workspace *Reader* role are able to retrieve these credentials. [Learn more about workspace *Reader* role.](how-to-assign-roles.md#default-roles)

                  If this is a concern, learn how to [Connect to storage services with identity based access](how-to-identity-based-data-access.md). - -## Prerequisites - -- An Azure subscription. If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/). - -- An Azure storage account with a [supported storage type](#supported-data-storage-service-types). - -- The [Azure Machine Learning CLI V2](how-to-configure-cli.md). - -- An Azure Machine Learning workspace. Either [create an Azure Machine Learning workspace](how-to-manage-workspace.md) or use an existing workspace. - - When you create a workspace, an Azure blob container and an Azure file share are automatically registered as datastores to the workspace. They're named `workspaceblobstore` and `workspacefilestore`, respectively. The `workspaceblobstore` is used to store workspace artifacts and your machine learning experiment logs. It's also set as the **default datastore** and can't be deleted from the workspace. The `workspacefilestore` is used to store notebooks and R scripts authorized via [compute instance](./concept-compute-instance.md#accessing-files). - - -## Supported data storage service types - -Datastores currently support storing connection information to the storage services listed in the following matrix. - -> [!TIP] -> **For unsupported storage solutions**, and to save data egress cost during ML experiments, move your data to a supported Azure storage solution. - -| Storage type | Authentication type | [Azure Machine Learning studio](https://ml.azure.com/) | [Azure Machine Learning  Python SDK](/python/api/overview/azure/ml/intro) | [Azure Machine Learning CLI](v1/reference-azure-machine-learning-cli.md) | [Azure Machine Learning  REST API](/rest/api/azureml/) | VS Code ----|---|---|---|---|---|--- -[Azure Blob Storage](../storage/blobs/storage-blobs-overview.md)| Account key
                  SAS token | ✓ | ✓ | ✓ |✓ |✓ -[Azure File Share](../storage/files/storage-files-introduction.md)| Account key
                  SAS token | ✓ | ✓ | ✓ |✓|✓ -[Azure Data Lake Storage Gen 1](../data-lake-store/index.yml)| Service principal| ✓ | ✓ | ✓ |✓| -[Azure Data Lake Storage Gen 2](../storage/blobs/data-lake-storage-introduction.md)| Service principal| ✓ | ✓ | ✓ |✓| - - -### Storage guidance - -We recommend creating a datastore for an [Azure Blob container](../storage/blobs/storage-blobs-introduction.md). Both standard and premium storage are available for blobs. Although premium storage is more expensive, its faster throughput speeds might improve the speed of your training runs, particularly if you train against a large amount of data. For information about the cost of storage accounts, see the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=machine-learning-service). - -[Azure Data Lake Storage Gen2](../storage/blobs/data-lake-storage-introduction.md?toc=%2fazure%2fstorage%2fblobs%2ftoc.json) is built on top of Azure Blob storage and designed for enterprise big data analytics. A fundamental part of Data Lake Storage Gen2 is the addition of a [hierarchical namespace](../storage/blobs/data-lake-storage-namespace.md) to Blob storage. The hierarchical namespace organizes objects/files into a hierarchy of directories for efficient data access. - -## Storage access and permissions - -To ensure you securely connect to your Azure storage service, Azure Machine Learning requires that you have permission to access the corresponding data storage container. This access depends on the authentication credentials used to register the datastore. - -> [!NOTE] -> This guidance also applies to [datastores created with identity-based data access](how-to-identity-based-data-access.md). - -### Virtual network - -Azure Machine Learning requires extra configuration steps to communicate with a storage account that is behind a firewall or within a virtual network. If your storage account is behind a firewall, you can [add your client's IP address to an allowlist](../storage/common/storage-network-security.md#managing-ip-network-rules) via the Azure portal. - -Azure Machine Learning can receive requests from clients outside of the virtual network. To ensure that the entity requesting data from the service is safe and to enable data being displayed in your workspace, [use a private endpoint with your workspace](how-to-configure-private-link.md). - -**For Python SDK users**, to access your data via your training script on a compute target, the compute target needs to be inside the same virtual network and subnet of the storage. You can [use a compute cluster in the same virtual network](how-to-secure-training-vnet.md?tabs=azure-studio%2Cipaddress#compute-cluster) or [use a compute instance in the same virtual network](how-to-secure-training-vnet.md?tabs=azure-studio%2Cipaddress#compute-instance). - -**For Azure Machine Learning studio users**, several features rely on the ability to read data, such as data previews, profiles, and automated machine learning. For these features to work with storage behind virtual networks, use a [workspace managed identity in the studio](how-to-enable-studio-virtual-network.md) to allow Azure Machine Learning to access the storage account from outside the virtual network. - -> [!NOTE] -> If your data storage is an Azure SQL Database behind a virtual network, be sure to set *Deny public access* to **No** via the [Azure portal](https://portal.azure.com/) to allow Azure Machine Learning to access the storage account. - -### Access validation - -> [!WARNING] -> Cross tenant access to storage accounts is not supported. If cross tenant access is needed for your scenario, please reach out to the AzureML Data Support team alias at amldatasupport@microsoft.com for assistance with a custom code solution. - -**As part of the initial datastore creation and registration process**, Azure Machine Learning automatically validates that the underlying storage service exists and the user provided principal (username, service principal, or SAS token) has access to the specified storage. - -**After datastore creation**, this validation is only performed for methods that require access to the underlying storage container, **not** each time datastore objects are retrieved. For example, validation happens if you want to download files from your datastore; but if you just want to change your default datastore, then validation doesn't happen. - -To authenticate your access to the underlying storage service, you can provide either your account key, shared access signatures (SAS) tokens, or service principal in the corresponding `register_azure_*()` method of the datastore type you want to create. The [storage type matrix](#supported-data-storage-service-types) lists the supported authentication types that correspond to each datastore type. - -You can find account key, SAS token, and service principal information on your [Azure portal](https://portal.azure.com). - -* If you plan to use an account key or SAS token for authentication, select **Storage Accounts** on the left pane, and choose the storage account that you want to register. - * The **Overview** page provides information such as the account name, container, and file share name. - 1. For account keys, go to **Access keys** on the **Settings** pane. - 1. For SAS tokens, go to **Shared access signatures** on the **Settings** pane. - -* If you plan to use a service principal for authentication, go to your **App registrations** and select which app you want to use. - * Its corresponding **Overview** page will contain required information like tenant ID and client ID. - -> [!IMPORTANT] -> If you need to change your access keys for an Azure Storage account (account key or SAS token), be sure to sync the new credentials with your workspace and the datastores connected to it. Learn how to [sync your updated credentials](how-to-change-storage-access-key.md). - -### Permissions - -For Azure blob container and Azure Data Lake Gen 2 storage, make sure your authentication credentials have **Storage Blob Data Reader** access. Learn more about [Storage Blob Data Reader](../role-based-access-control/built-in-roles.md#storage-blob-data-reader). An account SAS token defaults to no permissions. -* For data **read access**, your authentication credentials must have a minimum of list and read permissions for containers and objects. - -* For data **write access**, write and add permissions also are required. - -## Create and register datastores - -When you register an Azure storage solution as a datastore, you automatically create and register that datastore to a specific workspace. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -Within this section are examples for how to create and register a datastore via the Python SDK v2 (preview) for the following storage types. The parameters provided in these examples are the **required parameters** to create and register a datastore. - -* Azure blob container -* Azure file share -* Azure Data Lake Storage Generation 1 -* Azure Data Lake Storage Generation 2 - - -If you prefer a low code experience, see [Connect to data with Azure Machine Learning studio](how-to-connect-data-ui.md). ->[!IMPORTANT] -> If you unregister and re-register a datastore with the same name, and it fails, the Azure Key Vault for your workspace may not have soft-delete enabled. By default, soft-delete is enabled for the key vault instance created by your workspace, but it may not be enabled if you used an existing key vault or have a workspace created prior to October 2020. For information on how to enable soft-delete, see [Turn on Soft Delete for an existing key vault](../key-vault/general/soft-delete-change.md#turn-on-soft-delete-for-an-existing-key-vault). - - -> [!NOTE] -> Datastore name should only consist of lowercase letters, digits and underscores. - -The following Python SDK v2 examples are from the [Create Azure Machine Learning Datastore](https://github.com/Azure/azureml-examples/blob/sdk-preview/sdk/resources/datastores/datastore.ipynb) notebook in the [Azure Machine Learning examples repository](https://github.com/azure/azureml-examples). - -To learn more about the Azure Machine Learning Python SDK v2 preview, see [What is Azure Machine Learning CLI & SDK v2](concept-v2.md). - -### Azure blob container - SDK (v2) - -The following code creates and registers the `blob_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using account key. - -```Python -blob_datastore1 = AzureBlobDatastore( - name="blob-example", - description="Datastore pointing to a blob container.", - account_name="mytestblobstore", - container_name="data-container", - credentials={ - "account_key": "XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX" - }, -) -ml_client.create_or_update(blob_datastore1) -``` - -The following code creates and registers the `blob_sas_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using sas token. - -```Python -# create a SAS based blob datastore -blob_sas_datastore = AzureBlobDatastore( - name="blob-sas-example", - description="Datastore pointing to a blob container using SAS token.", - account_name="mytestblobstore", - container_name="data-container", - credentials={ - "sas_token": "?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX" - }, -) -ml_client.create_or_update(blob_sas_datastore) -``` - - -The following code creates and registers the `blob_protocol_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using wasbs protocol and account key. - -```Python -# create a datastore pointing to a blob container using wasbs protocol -blob_wasb_datastore = AzureBlobDatastore( - name="blob-protocol-example", - description="Datastore pointing to a blob container using wasbs protocol.", - account_name="mytestblobstore", - container_name="data-container", - protocol="wasbs", - credentials={ - "account_key": "XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX" - }, -) -ml_client.create_or_update(blob_wasb_datastore) -``` - -The following code creates and registers the `blob_credless_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using the user's identity or other managed identities. - -```Python -# create a credential-less datastore pointing to a blob container -blob_credless_datastore = AzureBlobDatastore( - name="blob-credless-example", - description="Credential-less datastore pointing to a blob container.", - account_name="mytestblobstore", - container_name="data-container", -) -ml_client.create_or_update(blob_credless_datastore) -``` - -### Azure file share - SDK (v2) - -The following code creates and registers the `file_example` datastore to the workspace. This datastore accesses the `my-share` file share on the `mytestfilestore` storage account, by using the provided account access key. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -```Python -# Datastore pointing to an Azure File Share -file_datastore = AzureFileDatastore( - name="file-example", - description="Datastore pointing to an Azure File Share.", - account_name="mytestfilestore", - file_share_name="my-share", - credentials={ - "account_key": "XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX" - }, -) -ml_client.create_or_update(file_datastore) -``` - - -The following code creates and registers the `file_sas_example` datastore to the workspace. This datastore accesses the `my-share` file share on the `mytestfilestore` storage account, by using the provided account sas token. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -```Python -# Datastore pointing to an Azure File Share using SAS token -file_sas_datastore = AzureFileDatastore( - name="file-sas-example", - description="Datastore pointing to an Azure File Share using SAS token.", - account_name="mytestfilestore", - file_share_name="my-share", - credentials={ - "sas_token": "?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX" - }, -) -ml_client.create_or_update(file_sas_datastore) -``` - -### Azure Data Lake Storage Generation 1 - SDK (v2) - - -The following code creates and registers the `adls_gen1_example` datastore to the workspace. This datastore accesses the `mytestdatalakegen1` storage, by using the provided service principal credentials. -Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -In order to utilize your service principal, you need to [register your application](../active-directory/develop/app-objects-and-service-principals.md) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS](../storage/blobs/data-lake-storage-access-control-model.md). - -```python -adlsg1_datastore = AzureDataLakeGen1Datastore( - name="adls-gen1-example", - description="Datastore pointing to an Azure Data Lake Storage Gen1.", - store_name="mytestdatalakegen1", - credentials={ - "tenant_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", - "client_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", - "client_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", - }, -) -ml_client.create_or_update(adlsg1_datastore) -``` - - -### Azure Data Lake Storage Generation 2 - SDK (v2) - - -The following code creates and registers the `adls_gen2_example` datastore to the workspace. This datastore accesses the file system `my-gen2-container` in the `mytestdatalakegen2` storage account, by using the provided service principal credentials. -Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -In order to utilize your service principal, you need to [register your application](../active-directory/develop/app-objects-and-service-principals.md) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS Gen 2](../storage/blobs/data-lake-storage-access-control-model.md). - -```python -adlsg2_datastore = AzureDataLakeGen2Datastore( - name="adls-gen2-example", - description="Datastore pointing to an Azure Data Lake Storage Gen2.", - account_name="mytestdatalakegen2", - filesystem="my-gen2-container", - credentials={ - "tenant_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", - "client_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", - "client_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", - }, -) -ml_client.create_or_update(adlsg2_datastore) -``` - - -## Working with Datastores in Azure Machine Learning CLI (v2) - -To create a datastore using the CLI, use the [az ml datastore create](/cli/azure/ml/dataset#az-ml-dataset-create) command and provide a YAML file that defines the dataset. - -```cli -az ml datastore create -f .yml -``` - -The YAML files in this section demonstrate how to create and register a datastore. These files are pulled from [https://github.com/Azure/azureml-examples/tree/main/cli/assets/data](https://github.com/Azure/azureml-examples/tree/main/cli/assets/data) in the Azure Machine Learning examples repository. The samples are provided for the following storage types: - -- Azure Blob Storage container -- Azure File share -- Azure Data Lake Storage Gen1 -- Azure Data Lake Storage Gen2 - -> [!NOTE] -> The `credentials` property in these sample `YAML` is redacted. Please replace the redacted `account_key`, `sas_token`, `tenant_id`, `client_id` and `client_secret` appropriately in these files. - - -For more information on the CLI v2, see [Install and configure CLI](/azure/machine-learning/how-to-configure-cli). - -### Azure blob container - CLI (v2) - - -The following code creates and registers the `blob_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using account key. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json -name: blob_example -type: azure_blob -description: Datastore pointing to a blob container. -account_name: mytestblobstore -container_name: data-container -credentials: - account_key: XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX -``` - -The following code creates and registers the `blob_sas_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using sas token. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json -name: blob_sas_example -type: azure_blob -description: Datastore pointing to a blob container using SAS token. -account_name: mytestblobstore -container_name: data-container -credentials: - sas_token: ?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX -``` - - -The following code creates and registers the `blob_protocol_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using wasbs protocol and account key. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json -name: blob_protocol_example -type: azure_blob -description: Datastore pointing to a blob container using wasbs protocol. -account_name: mytestblobstore -protocol: wasbs -container_name: data-container -credentials: - account_key: XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX -``` - -The following code creates and registers the `blob_credless_example` datastore to the workspace. This datastore accesses the `data-container` blob container on the `mytestblobstore` storage account, by using the user's identity or other managed identities. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json -name: blob_credless_example -type: azure_blob -description: Credential-less datastore pointing to a blob container. -account_name: mytestblobstore -container_name: data-container -``` - -### Azure file share - CLI (v2) - - -The following code creates and registers the `file_example` datastore to the workspace. This datastore accesses the `my-share` file share on the `mytestfilestore` storage account, by using the provided account access key. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureFile.schema.json -name: file_example -type: azure_file -description: Datastore pointing to an Azure File Share. -account_name: mytestfilestore -file_share_name: my-share -credentials: - account_key: XxXxXxXXXXXXXxXxXxxXxxXXXXXXXXxXxxXXxXXXXXXXxxxXxXXxXXXXXxXXxXXXxXxXxxxXXxXXxXXXXXxXxxXX -``` - - -The following code creates and registers the `file_sas_example` datastore to the workspace. This datastore accesses the `my-share` file share on the `mytestfilestore` storage account, by using the provided account sas token. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureFile.schema.json -name: file_sas_example -type: azure_file -description: Datastore pointing to an Azure File Share using SAS token. -account_name: mytestfilestore -file_share_name: my-share -credentials: - sas_token: ?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX -``` - - - -### Azure Data Lake Storage Generation 1 - CLI (v2) - - -The following code creates and registers the `adls_gen1_example` datastore to the workspace. This datastore accesses the `mytestdatalakegen1` storage, by using the provided service principal credentials. -Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -In order to utilize your service principal, you need to [register your application](../active-directory/develop/app-objects-and-service-principals.md) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS](../storage/blobs/data-lake-storage-access-control-model.md). - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen1.schema.json -name: adls_gen1_example -type: azure_data_lake_gen1 -description: Datastore pointing to an Azure Data Lake Storage Gen1. -store_name: mytestdatalakegen1 -credentials: - tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - client_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - client_secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -``` - -The following code creates and registers the `adls_gen1_credless_example` datastore to the workspace. This datastore accesses the `mytestdatalakegen1` storage, by using the user's identity or other managed identities. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen1.schema.json -name: alds_gen1_credless_example -type: azure_data_lake_gen1 -description: Credential-less datastore pointing to an Azure Data Lake Storage Gen1. -store_name: mytestdatalakegen1 -``` - - -### Azure Data Lake Storage Generation 2 - CLI (v2) - - -The following code creates and registers the `adls_gen2_example` datastore to the workspace. This datastore accesses the file system `my-gen2-container` in the `mytestdatalakegen2` storage account, by using the provided service principal credentials. -Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. - -In order to utilize your service principal, you need to [register your application](../active-directory/develop/app-objects-and-service-principals.md) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS Gen 2](../storage/blobs/data-lake-storage-access-control-model.md). - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen2.schema.json -name: adls_gen2_example -type: azure_data_lake_gen2 -description: Datastore pointing to an Azure Data Lake Storage Gen2. -account_name: mytestdatalakegen2 -filesystem: my-gen2-container -credentials: - tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - client_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - client_secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -``` - -The following code creates and registers the `adls_gen2_credless_example` datastore to the workspace. This datastore accesses the file system `my-gen2-container` in the `mytestdatalakegen2` storage account, by using the user's identity or other managed identities. - -```YAML -$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen2.schema.json -name: adls_gen2_credless_example -type: azure_data_lake_gen2 -description: Credential-less datastore pointing to an Azure Data Lake Storage Gen2. -account_name: mytestdatalakegen2 -filesystem: my-gen2-container -``` - -## Create datastores with other Azure tools -In addition to creating datastores with the CLI/SDK and the studio, you can also use Azure Resource Manager templates or the Azure Machine Learning VS Code extension. - -### Azure Resource Manager - -There are several templates at [https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.machinelearningservices](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.machinelearningservices) that can be used to create datastores. - -For information on using these templates, see [Use an Azure Resource Manager template to create a workspace for Azure Machine Learning](how-to-create-workspace-template.md). - -### VS Code extension - -If you prefer to create and manage datastores using the Azure Machine Learning VS Code extension, visit the [VS Code resource management how-to guide](how-to-manage-resources-vscode.md#datastores) to learn more. - -## Move data to supported Azure storage solutions - -Azure Machine Learning supports accessing data from Azure Blob storage, Azure Files, Azure Data Lake Storage Gen1, and Azure Data Lake Storage Gen2. If you're using unsupported storage, we recommend that you move your data to supported Azure storage solutions by using [Azure Data Factory and these steps](/azure/data-factory/quickstart-create-data-factory-copy-data-tool). Moving data to supported storage can help you save data egress costs during machine learning experiments. - -Azure Data Factory provides efficient and resilient data transfer with more than 80 prebuilt connectors at no extra cost. These connectors include Azure data services, on-premises data sources, Amazon S3 and Redshift, and Google BigQuery. - -## Next steps - -* [Work with data using SDK v2](how-to-use-data.md) diff --git a/articles/machine-learning/how-to-administrate-data-authentication.md b/articles/machine-learning/how-to-administrate-data-authentication.md new file mode 100644 index 0000000000000..006a8cef94990 --- /dev/null +++ b/articles/machine-learning/how-to-administrate-data-authentication.md @@ -0,0 +1,109 @@ +--- +title: How to administrate data authentication +titleSuffix: Azure Machine Learning +description: Learn how to manage data access and how to authenticate in Azure Machine Learning +services: machine-learning +ms.service: machine-learning +ms.subservice: enterprise-readiness +ms.topic: how-to +ms.author: xunwan +author: xunwan +ms.reviewer: larryfr +ms.date: 05/24/2022 + +# Customer intent: As an administrator, I need to administrate data access and set up authentication method for data scientists. +--- + +# Data administration +Learn how to manage data access and how to authenticate in Azure Machine Learning +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] +[!INCLUDE [CLI v2](../../includes/machine-learning-CLI-v2.md)] + +> [!IMPORTANT] +> The information in this article is intended for Azure administrators who are creating the infrastructure required for an Azure Machine Learning solution. + +In general, data access from studio involves the following checks: + +* Who is accessing? + - There are multiple different types of authentication depending on the storage type. For example, account key, token, service principal, managed identity, and user identity. + - If authentication is made using a user identity, then it's important to know *which* user is trying to access storage. Learn more about [identity-based data access](how-to-identity-based-data-access.md). +* Do they have permission? + - Are the credentials correct? If so, does the service principal, managed identity, etc., have the necessary permissions on the storage? Permissions are granted using Azure role-based access controls (Azure RBAC). + - [Reader](../role-based-access-control/built-in-roles.md#reader) of the storage account reads metadata of the storage. + - [Storage Blob Data Reader](../role-based-access-control/built-in-roles.md#storage-blob-data-reader) reads data within a blob container. + - [Contributor](../role-based-access-control/built-in-roles.md#contributor) allows write access to a storage account. + - More roles may be required depending on the type of storage. +* Where is access from? + - User: Is the client IP address in the VNet/subnet range? + - Workspace: Is the workspace public or does it have a private endpoint in a VNet/subnet? + - Storage: Does the storage allow public access, or does it restrict access through a service endpoint or a private endpoint? +* What operation is being performed? + - Create, read, update, and delete (CRUD) operations on a data store/dataset are handled by Azure Machine Learning. + - Data Access calls (such as preview or schema) go to the underlying storage and need extra permissions. +* Where is this operation being run; compute resources in your Azure subscription or resources hosted in a Microsoft subscription? + - All calls to dataset and datastore services (except the "Generate Profile" option) use resources hosted in a __Microsoft subscription__ to run the operations. + - Jobs, including the "Generate Profile" option for datasets, run on a compute resource in __your subscription__, and access the data from there. So the compute identity needs permission to the storage rather than the identity of the user submitting the job. + +The following diagram shows the general flow of a data access call. In this example, a user is trying to make a data access call through a machine learning workspace, without using any compute resource. + +:::image type="content" source="./media/concept-network-data-access/data-access-flow.svg" alt-text="Diagram of the logic flow when accessing data."::: + +## Scenarios and identities + +The following table lists what identities should be used for specific scenarios: + +| Scenario | Use workspace
                  Managed Service Identity (MSI) | Identity to use | +|--|--|--| +| Access from UI | Yes | Workspace MSI | +| Access from UI | No | User's Identity | +| Access from Job | Yes/No | Compute MSI | +| Access from Notebook | Yes/No | User's identity | + + +Data access is complex and it's important to recognize that there are many pieces to it. For example, accessing data from Azure Machine Learning studio is different than using the SDK. When using the SDK on your local development environment, you're directly accessing data in the cloud. When using studio, you aren't always directly accessing the data store from your client. Studio relies on the workspace to access data on your behalf. + +> [!TIP] +> If you need to access data from outside Azure Machine Learning, such as using Azure Storage Explorer, *user* identity is probably what is used. Consult the documentation for the tool or service you are using for specific information. For more information on how Azure Machine Learning works with data, see [Identity-based data access to storage services on Azure](how-to-identity-based-data-access.md). + +## Azure Storage Account + +When using an Azure Storage Account from Azure Machine Learning studio, you must add the managed identity of the workspace to the following Azure RBAC roles for the storage account: + +* [Blob Data Reader](../role-based-access-control/built-in-roles.md#storage-blob-data-reader) +* If the storage account uses a private endpoint to connect to the VNet, you must grant the managed identity the [Reader](../role-based-access-control/built-in-roles.md#reader) role for the storage account private endpoint. + +For more information, see [Use Azure Machine Learning studio in an Azure Virtual Network](how-to-enable-studio-virtual-network.md). + +See the following sections for information on limitations when using Azure Storage Account with your workspace in a VNet. + +### Secure communication with Azure Storage Account + +To secure communication between Azure Machine Learning and Azure Storage Accounts, configure storage to [Grant access to trusted Azure services](../storage/common/storage-network-security.md#grant-access-to-trusted-azure-services). + +### Azure Storage firewall + +When an Azure Storage account is behind a virtual network, the storage firewall can normally be used to allow your client to directly connect over the internet. However, when using studio it isn't your client that connects to the storage account; it's the Azure Machine Learning service that makes the request. The IP address of the service isn't documented and changes frequently. __Enabling the storage firewall will not allow studio to access the storage account in a VNet configuration__. + +### Azure Storage endpoint type + +When the workspace uses a private endpoint and the storage account is also in the VNet, there are extra validation requirements when using studio: + +* If the storage account uses a __service endpoint__, the workspace private endpoint and storage service endpoint must be in the same subnet of the VNet. +* If the storage account uses a __private endpoint__, the workspace private endpoint and storage service endpoint must be in the same VNet. In this case, they can be in different subnets. + +## Azure Data Lake Storage Gen1 + +When using Azure Data Lake Storage Gen1 as a datastore, you can only use POSIX-style access control lists. You can assign the workspace's managed identity access to resources just like any other security principal. For more information, see [Access control in Azure Data Lake Storage Gen1](../data-lake-store/data-lake-store-access-control.md). + +## Azure Data Lake Storage Gen2 + +When using Azure Data Lake Storage Gen2 as a datastore, you can use both Azure RBAC and POSIX-style access control lists (ACLs) to control data access inside of a virtual network. + +__To use Azure RBAC__, follow the steps in the [Datastore: Azure Storage Account](how-to-enable-studio-virtual-network.md#datastore-azure-storage-account) section of the 'Use Azure Machine Learning studio in an Azure Virtual Network' article. Data Lake Storage Gen2 is based on Azure Storage, so the same steps apply when using Azure RBAC. + +__To use ACLs__, the managed identity of the workspace can be assigned access just like any other security principal. For more information, see [Access control lists on files and directories](../storage/blobs/data-lake-storage-access-control.md#access-control-lists-on-files-and-directories). + + +## Next steps + +For information on enabling studio in a network, see [Use Azure Machine Learning studio in an Azure Virtual Network](how-to-enable-studio-virtual-network.md). diff --git a/articles/machine-learning/how-to-attach-kubernetes-anywhere.md b/articles/machine-learning/how-to-attach-kubernetes-anywhere.md index c30b36c2c270e..130f05415061e 100644 --- a/articles/machine-learning/how-to-attach-kubernetes-anywhere.md +++ b/articles/machine-learning/how-to-attach-kubernetes-anywhere.md @@ -1,5 +1,5 @@ --- -title: Azure Machine Learning anywhere with Kubernetes (preview) +title: Configure Kubernetes cluster (Preview) description: Configure and attach an existing Kubernetes in any infrastructure across on-premises and multi-cloud to build, train, and deploy models with seamless Azure ML experience. titleSuffix: Azure Machine Learning author: ssalgadodev @@ -11,82 +11,129 @@ ms.topic: how-to ms.custom: build-spring-2022, cliv2, sdkv2, event-tier1-build-2022 --- -# Azure Machine Learning anywhere with Kubernetes (preview) +# Configure Kubernetes cluster for Azure Machine Learning (Preview) -Azure Machine Learning anywhere with Kubernetes (AzureML anywhere) enables customers to build, train, and deploy models in any infrastructure on-premises and across multi-cloud using Kubernetes. With an AzureML extension deployment on a Kubernetes cluster, you can instantly onboard teams of ML professionals with AzureML service capabilities. These services include full machine learning lifecycle and automation with MLOps in hybrid cloud and multi-cloud. +Using Kubernetes with Azure Machine Learning enables you to build, train, and deploy models in any infrastructure on-premises and across multi-cloud. With an AzureML extension deployment on Kubernetes, you can instantly onboard teams of ML professionals with AzureML service capabilities. These services include full machine learning lifecycle and automation with MLOps in hybrid cloud and multi-cloud. + +You can easily bring AzureML capabilities to your Kubernetes cluster from cloud or on-premises by deploying AzureML extension. + +- For Azure Kubernetes Service (AKS) in Azure, deploy AzureML extension to the AKS directly. For more information, see [Deploy and manage cluster extensions for Azure Kubernetes Service (AKS)](../aks/cluster-extensions.md). +- For Kubernetes clusters on-premises or from other cloud providers, connect the cluster with Azure Arc first, then deploy AzureML extension to Azure Arc-enabled Kubernetes. For more information, see [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md). In this article, you can learn about steps to configure and attach an existing Kubernetes cluster anywhere for Azure Machine Learning: -* [Deploy AzureML extension to Kubernetes cluster](#deploy-azureml-extension---example-scenarios) -* [Create and use instance types to manage compute resources efficiently](#create-custom-instance-types) +* [Deploy AzureML extension to Kubernetes cluster](#deploy-azureml-extension) +* [Attach a Kubernetes cluster to AzureML workspace](#attach-a-kubernetes-cluster-to-an-azureml-workspace) + +## Why use Azure Machine Learning Kubernetes? + +AzureML Kubernetes is customer fully configured and managed compute for machine learning. It can be used as both [training compute target](./concept-compute-target.md#train) and [inference compute target](./concept-compute-target.md#deploy). It provides the following benefits: + +- Harness existing heterogeneous or homogeneous Kubernetes cluster, with CPUs or GPUs. +- Share the same Kubernetes cluster in multiple AzureML Workspaces across region. +- Use the same Kubernetes cluster for different machine learning purposes, including model training, batch scoring, and real-time inference. +- Secure network communication between the cluster and cloud via Azure Private Link and Private Endpoint. +- Isolate team projects and machine learning workloads with Kubernetes node selector and namespace. +- [Target certain types of compute nodes and CPU/Memory/GPU resource allocation for training and inference workloads](./reference-kubernetes.md#create-and-use-instance-types-for-efficient-compute-resource-usage). +- [Connect with custom data sources for machine learning workloads using Kubernetes PV and PVC ](./reference-kubernetes.md#azureml-jobs-connect-with-on-premises-data-storage). ## Prerequisites -1. A running Kubernetes cluster - **We recommend minimum of 4 vCPU cores and 8GB memory, around 2 vCPU cores and 3GB memory will be used by Azure Arc agent and AzureML extension components**. -1. Connect your Kubernetes cluster to Azure Arc. Follow instructions in [connect existing Kubernetes cluster to Azure Arc](../azure-arc/kubernetes/quickstart-connect-cluster.md). +* A running Kubernetes cluster in [supported version and region](./reference-kubernetes.md#supported-kubernetes-version-and-region). **We recommend your cluster has a minimum of 4 vCPU cores and 8GB memory, around 2 vCPU cores and 3GB memory will be used by Azure Arc and AzureML extension components**. +* Other than Azure Kubernetes Services (AKS) cluster in Azure, connect your Kubernetes cluster to Azure Arc. Follow instructions in [connect existing Kubernetes cluster to Azure Arc](../azure-arc/kubernetes/quickstart-connect-cluster.md). - a. if you have Azure RedHat OpenShift Service (ARO) cluster or OpenShift Container Platform (OCP) cluster, follow another prerequisite step [here](#prerequisite-for-azure-arc-enabled-kubernetes) before AzureML extension deployment. -1. If you have an AKS cluster in Azure, register the AKS-ExtensionManager feature flag by using the ```az feature register --namespace "Microsoft.ContainerService" --name "AKS-ExtensionManager``` command. **Azure Arc connection is not required and not recommended**. -1. Install or upgrade Azure CLI to version >=2.16.0 -1. Install the Azure CLI extension ```k8s-extension``` (version>=1.0.0) by running ```az extension add --name k8s-extension``` + * If you have an AKS cluster in Azure, **Azure Arc connection is not required and not recommended**. + + * If you have Azure RedHat OpenShift Service (ARO) cluster or OpenShift Container Platform (OCP) cluster, follow another prerequisite step [here](./reference-kubernetes.md#prerequisites-for-aro-or-ocp-clusters) before AzureML extension deployment. +* Cluster running behind an outbound proxy server or firewall needs additional network configurations. Fulfill the [network requirements](./how-to-access-azureml-behind-firewall.md#kubernetes-compute) +* Install or upgrade Azure CLI to version >=2.16.0 +* Install the Azure CLI extension ```k8s-extension``` (version>=1.2.3) by running ```az extension add --name k8s-extension``` + ## What is AzureML extension -AzureML extension consists of a set of system components deployed to your Kubernetes cluster so you can enable your cluster to run an AzureML workload - model training jobs or model endpoints. You can use an Azure CLI command ```k8s-extension create``` to deploy AzureML extension. +AzureML extension consists of a set of system components deployed to your Kubernetes cluster in `azureml` namespace, so you can enable your cluster to run an AzureML workload - model training jobs or model endpoints. You can use an Azure CLI command ```k8s-extension create``` to deploy AzureML extension. General available (GA) version of AzureML extension >= 1.1.1 -For a detailed list of AzureML extension system components, see appendix [AzureML extension components](#appendix-i-azureml-extension-components). +For a detailed list of AzureML extension system components, see [AzureML extension components](./reference-kubernetes.md#azureml-extension-components). ## Key considerations for AzureML extension deployment AzureML extension allows you to specify configuration settings needed for different workload support at deployment time. Before AzureML extension deployment, **read following carefully to avoid unnecessary extension deployment errors**: - * Type of workload to enable for your cluster. ```enableTraining``` and ```enableInference``` config settings are your convenient choices here; they will enable training and inference workload respectively. + * Type of workload to enable for your cluster. ```enableTraining``` and ```enableInference``` config settings are your convenient choices here; `enableTraining` will enable **training** and **batch scoring** workload, `enableInference` will enable **real-time inference** workload. * For inference workload support, it requires ```azureml-fe``` router service to be deployed for routing incoming inference requests to model pod, and you would need to specify ```inferenceRouterServiceType``` config setting for ```azureml-fe```. ```azureml-fe``` can be deployed with one of following ```inferenceRouterServiceType```: * Type ```LoadBalancer```. Exposes ```azureml-fe``` externally using a cloud provider's load balancer. To specify this value, ensure that your cluster supports load balancer provisioning. Note most on-premises Kubernetes clusters might not support external load balancer. * Type ```NodePort```. Exposes ```azureml-fe``` on each Node's IP at a static port. You'll be able to contact ```azureml-fe```, from outside of cluster, by requesting ```:```. Using ```NodePort``` also allows you to set up your own load balancing solution and SSL termination for ```azureml-fe```. * Type ```ClusterIP```. Exposes ```azureml-fe``` on a cluster-internal IP, and it makes ```azureml-fe``` only reachable from within the cluster. For ```azureml-fe``` to serve inference requests coming outside of cluster, it requires you to set up your own load balancing solution and SSL termination for ```azureml-fe```. * For inference workload support, to ensure high availability of ```azureml-fe``` routing service, AzureML extension deployment by default creates 3 replicas of ```azureml-fe``` for clusters having 3 nodes or more. If your cluster has **less than 3 nodes**, set ```inferenceLoadbalancerHA=False```. - * For inference workload support, you would also want to consider using **HTTPS** to restrict access to model endpoints and secure the data that clients submit. For this purpose, you would need to specify either ```sslSecret``` config setting or combination of ```sslCertPemFile``` and ```sslCertKeyFile``` config settings. By default, AzureML extension deployment expects **HTTPS** support required, and you would need to provide above config setting. For development or test purposes, **HTTP** support is conveniently supported through config setting ```allowInsecureConnections=True```. + * For inference workload support, you would also want to consider using **HTTPS** to restrict access to model endpoints and secure the data that clients submit. For this purpose, you would need to specify either ```sslSecret``` config setting or combination of ```sslKeyPemFile``` and ```sslCertPemFile``` config settings. By default, AzureML extension deployment expects **HTTPS** support required, and you would need to provide above config setting. For development or test purposes, **HTTP** support is conveniently supported through config setting ```allowInsecureConnections=True```. -For a complete list of configuration settings available to choose at AzureML deployment time, see appendix [Review AzureML extension config settings](#appendix-ii-review-azureml-deployment-configuration-settings) +For a complete list of configuration settings available to choose at AzureML deployment time, see [Review AzureML extension config settings](#review-azureml-extension-configuration-settings) -## Deploy AzureML extension - example scenarios +## Deploy AzureML extension +### [CLI](#tab/deploy-extension-with-cli) +To deploy AzureML extension with CLI, use `az k8s-extension create` command passing in values for the mandatory parameters. -### Use AKS in Azure for a quick Proof of Concept, both training and inference workloads support +We list 4 typical extension deployment scenarios for reference. To deploy extension for your production usage, please carefully read the complete list of [configuration settings](#review-azureml-extension-configuration-settings). -Ensure you have fulfilled [prerequisites](#prerequisites). For AzureML extension deployment on AKS, make sure to specify ```managedClusters``` value for ```--cluster-type``` parameter. Run the following Azure CLI command to deploy AzureML extension: -```azurecli - az k8s-extension create --name azureml-extension --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True enableInference=True inferenceRouterServiceType=LoadBalancer allowInsecureConnections=True inferenceLoadBalancerHA=False --cluster-type managedClusters --cluster-name --resource-group --scope cluster -``` +- **Use AKS in Azure for a quick Proof of Concept, both training and inference workloads support** -### Use Minikube on your desktop for a quick POC, training workload support only + Ensure you have fulfilled [prerequisites](#prerequisites). For AzureML extension deployment on AKS, make sure to specify ```managedClusters``` value for ```--cluster-type``` parameter. Run the following Azure CLI command to deploy AzureML extension: + ```azurecli + az k8s-extension create --name --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True enableInference=True inferenceRouterServiceType=LoadBalancer allowInsecureConnections=True inferenceLoadBalancerHA=False --cluster-type managedClusters --cluster-name --resource-group --scope cluster + ``` -Ensure you have fulfilled [prerequisites](#prerequisites). Since the follow steps would create an Azure Arc connected cluster, you would need to specify ```connectedClusters``` value for ```--cluster-type``` parameter. Run following simple Azure CLI command to deploy AzureML extension: -```azurecli - az k8s-extension create --name azureml-extension --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True --cluster-type connectedClusters --cluster-name --resource-group --scope cluster -``` +- **Use Kubernetes at your lab for a quick Proof of Concept, training workload support only** -### Enable an AKS cluster in Azure for production training and inference workload + Ensure you have fulfilled [prerequisites](#prerequisites). For AzureML extension deployment on Azure Arc connected cluster, you would need to specify ```connectedClusters``` value for ```--cluster-type``` parameter. Run following simple Azure CLI command to deploy AzureML extension: + ```azurecli + az k8s-extension create --name --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True --cluster-type connectedClusters --cluster-name --resource-group --scope cluster + ``` -Ensure you have fulfilled [prerequisites](#prerequisites). Assuming your cluster has more than 3 nodes, and you will use an Azure public load balancer and HTTPS for inference workload support, run following Azure CLI command to deploy AzureML extension: -```azurecli - az k8s-extension create --name azureml-extension --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True enableInference=True inferenceRouterServiceType=LoadBalancer --config-protected sslCertPemFile= sslCertKeyFile= --cluster-type managedClusters --cluster-name --resource-group --scope cluster -``` -### Enable an Azure Arc connected cluster anywhere for production training and inference workload +- **Enable an AKS cluster in Azure for production training and inference workload** + Ensure you have fulfilled [prerequisites](#prerequisites). For AzureML extension deployment on AKS, make sure to specify ```managedClusters``` value for ```--cluster-type``` parameter. Assuming your cluster has more than 3 nodes, and you will use an Azure public load balancer and HTTPS for inference workload support, run following Azure CLI command to deploy AzureML extension: + ```azurecli + az k8s-extension create --name --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True enableInference=True inferenceRouterServiceType=LoadBalancer sslCname= --config-protected sslCertPemFile= sslKeyPemFile= --cluster-type managedClusters --cluster-name --resource-group --scope cluster + ``` +- **Enable an Azure Arc connected cluster anywhere for production training and inference workload using NVIDIA GPUs** -Ensure you have fulfilled [prerequisites](#prerequisites). Assuming your cluster has more than 3 nodes, you will use a NodePort service type and HTTPS for inference workload support, run following Azure CLI command to deploy AzureML extension: -```azurecli - az k8s-extension create --name azureml-extension --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True enableInference=True inferenceRouterServiceType=NodePort --config-protected sslCertPemFile= sslCertKeyFile= --cluster-type connectedClusters --cluster-name --resource-group --scope cluster -``` + Ensure you have fulfilled [prerequisites](#prerequisites). For AzureML extension deployment on Azure Arc connected cluster, make sure to specify ```connectedClusters``` value for ```--cluster-type``` parameter. Assuming your cluster has more than 3 nodes, you will use a NodePort service type and HTTPS for inference workload support, run following Azure CLI command to deploy AzureML extension: + ```azurecli + az k8s-extension create --name --extension-type Microsoft.AzureML.Kubernetes --config enableTraining=True enableInference=True inferenceRouterServiceType=NodePort sslCname= installNvidiaDevicePlugin=True installDcgmExporter=True --config-protected sslCertPemFile= sslKeyPemFile= --cluster-type connectedClusters --cluster-name --resource-group --scope cluster + ``` + +### [Azure portal](#tab/portal) + +The UI experience to deploy extension is only available for **Azure Arc-enabled Kubernetes**. If you have an AKS cluster without Azure Arc connected, you need to use CLI to deploy AzureML extension. + +1. In the [Azure portal](https://ms.portal.azure.com/#home), navigate to **Kubernetes - Azure Arc** and select your cluster. +1. Select **Extensions** (under **Settings**), and then select **+ Add**. + + :::image type="content" source="media/how-to-attach-arc-kubernetes/deploy-extension-from-ui.png" alt-text="Screenshot of adding new extension to the Arc-enabled Kubernetes cluster from Azure portal."::: + +1. From the list of available extensions, select **Azure Machine Learning extension** to deploy the latest version of the extension. + + :::image type="content" source="media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-list.png" alt-text="Screenshot of selecting AzureML extension from Azure portal."::: + +1. Follow the prompts to deploy the extension. You can customize the installation by configuring the installtion in the tab of **Basics**, **Configurations** and **Advanced**. For a detailed list of AzureML extension configuration settings, see [AzureML extension configuration settings](#review-azureml-extension-configuration-settings). + + :::image type="content" source="media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-settings.png" alt-text="Screenshot of configuring AzureML extension settings from Azure portal."::: +1. On the **Review + create** tab, select **Create**. + + :::image type="content" source="media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-create.png" alt-text="Screenshot of deploying new extension to the Arc-enabled Kubernetes cluster from Azure portal."::: + +1. After the deployment completes, you are able to see the AzureML extension in **Extension** page. If the extension installation succeeds, you can see **Installed** for the **Install status**. + + :::image type="content" source="media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-detail.png" alt-text="Screenshot of installed AzureML extensions listing in Azure portal."::: ### Verify AzureML extension deployment 1. Run the following CLI command to check AzureML extension details: ```azurecli - az k8s-extension show --name arcml-extension --cluster-type connectedClusters --cluster-name --resource-group + az k8s-extension show --name --cluster-type connectedClusters --cluster-name --resource-group ``` -1. In the response, look for "name": "azureml-extension" and "provisioningState": "Succeeded". Note it might show "provisioningState": "Pending" for the first few minutes. +1. In the response, look for "name" and "provisioningState": "Succeeded". Note it might show "provisioningState": "Pending" for the first few minutes. 1. If the provisioningState shows Succeeded, run the following command on your machine with the kubeconfig file pointed to your cluster to check that all pods under "azureml" namespace are in 'Running' state: @@ -94,316 +141,150 @@ Ensure you have fulfilled [prerequisites](#prerequisites). Assuming your cluster kubectl get pods -n azureml ``` -## Attach a Kubernetes cluster to an AzureML workspace - -### Prerequisite for Azure Arc enabled Kubernetes +### Manage AzureML extension -Azure Machine Learning workspace defaults to having a system-assigned managed identity to access Azure ML resources. The steps are completed if the system assigned default setting is on. +Update, list, show and delete an AzureML extension. +- For AKS cluster without Azure Arc connected, refer to [Usage of AKS extensions](../aks/cluster-extensions.md#usage-of-cluster-extensions). +- For Azure Arc-enabled Kubernetes, refer to [Usage of cluster extensions](../azure-arc/kubernetes/extensions.md#usage-of-cluster-extensions). -Otherwise, if a user-assigned managed identity is specified in Azure Machine Learning workspace creation, the following role assignments need to be granted to the identity manually before attaching the compute. +--- -|Azure resource name |Role to be assigned| -|--|--| -|Azure Relay|Azure Relay Owner| -|Azure Arc-enabled Kubernetes|Reader| +## Review AzureML extension configuration settings -Azure Relay resources are created under the same Resource Group as the Arc cluster. +For AzureML extension deployment configurations, use ```--config``` or ```--config-protected``` to specify list of ```key=value``` pairs. Following is the list of configuration settings available to be used for different AzureML extension deployment scenario ns. -### [Studio](#tab/studio) +|Configuration Setting Key Name |Description |Training |Inference |Training and Inference + |--|--|--|--|--| + |```enableTraining``` |```True``` or ```False```, default ```False```. **Must** be set to ```True``` for AzureML extension deployment with Machine Learning model training support. | **✓**| N/A | **✓** | + | ```enableInference``` |```True``` or ```False```, default ```False```. **Must** be set to ```True``` for AzureML extension deployment with Machine Learning inference support. |N/A| **✓** | **✓** | + | ```allowInsecureConnections``` |```True``` or ```False```, default `False`. **Must** be set to ```True``` to use inference HTTP endpoints for development or test purposes. |N/A| Optional | Optional | + | ```inferenceRouterServiceType``` |```loadBalancer```, ```nodePort``` or ```clusterIP```. **Required** if ```enableInference=True```. | N/A| **✓** | **✓** | + | ```internalLoadBalancerProvider``` | This config is only applicable for Azure Kubernetes Service(AKS) cluster now. Set to ```azure``` to allow the inference router using internal load balancer. | N/A| Optional | Optional | + |```sslSecret```| The name of Kubernetes secret in `azureml` namespace to store `cert.pem` (PEM-encoded SSL cert) and `key.pem` (PEM-encoded SSL key), required for inference HTTPS endpoint support, when ``allowInsecureConnections`` is set to False. You can find a sample YAML definition of sslSecret [here](./reference-kubernetes.md#sample-yaml-definition-of-kubernetes-secret-for-tlsssl). Use this config or combination of `sslCertPemFile` and `sslKeyPemFile` protected config settings. |N/A| Optional | Optional | + |```sslCname``` |A SSL CName used by inference HTTPS endpoint. **Required** if ```allowInsecureConnections=True``` | N/A | Optional | Optional| + | ```inferenceRouterHA``` |```True``` or ```False```, default ```True```. By default, AzureML extension will deploy 3 ingress controller replicas for high availability, which requires at least 3 workers in a cluster. Set to ```False``` if your cluster has fewer than 3 workers, in this case only one ingress controller is deployed. | N/A| Optional | Optional | + |```nodeSelector``` | By default, the deployed kubernetes resources are randomly deployed to 1 or more nodes of the cluster, and daemonset resources are deployed to ALL nodes. If you want to restrict the extension deployment to specific nodes with label `key1=value1` and `key2=value2`, use `nodeSelector.key1=value1`, `nodeSelector.key2=value2` correspondingly. | Optional| Optional | Optional | + |```installNvidiaDevicePlugin``` | ```True``` or ```False```, default ```False```. [NVIDIA Device Plugin](https://github.com/NVIDIA/k8s-device-plugin#nvidia-device-plugin-for-kubernetes) is required for ML workloads on NVIDIA GPU hardware. By default, AzureML extension deployment will not install NVIDIA Device Plugin regardless Kubernetes cluster has GPU hardware or not. User can specify this setting to ```True```, to install it, but make sure to fulfill [Prerequisites](https://github.com/NVIDIA/k8s-device-plugin#prerequisites). | Optional |Optional |Optional | + |```installPromOp```|```True``` or ```False```, default ```True```. AzureML extension needs prometheus operator to manage prometheus. Set to ```False``` to reuse existing prometheus operator. Compatible [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md) helm chart versions are from 9.3.4 to 30.0.1.| Optional| Optional | Optional | + |```installVolcano```| ```True``` or ```False```, default ```True```. AzureML extension needs volcano scheduler to schedule the job. Set to ```False``` to reuse existing volcano scheduler. Supported volcano scheduler versions are 1.4, 1.5. | Optional| N/A | Optional | + |```installDcgmExporter``` |```True``` or ```False```, default ```False```. Dcgm-exporter can expose GPU metrics for AzureML workloads, which can be monitored in Azure portal. Set ```installDcgmExporter``` to ```True``` to install dcgm-exporter. But if you want to utilize your own dcgm-exporter, refer to [DCGM exporter](https://github.com/Azure/AML-Kubernetes/blob/master/docs/troubleshooting.md#dcgm) |Optional |Optional |Optional | -Attaching an Azure Arc-enabled Kubernetes cluster makes it available to your workspace for training. -1. Navigate to [Azure Machine Learning studio](https://ml.azure.com). -1. Under **Manage**, select **Compute**. -1. Select the **Attached computes** tab. -1. Select **+New > Kubernetes (preview)** + |Configuration Protected Setting Key Name |Description |Training |Inference |Training and Inference + |--|--|--|--|--| + | ```sslCertPemFile```, ```sslKeyPemFile``` |Path to SSL certificate and key file (PEM-encoded), required for AzureML extension deployment with inference HTTPS endpoint support, when ``allowInsecureConnections`` is set to False. | N/A| Optional | Optional | + +## Attach a Kubernetes cluster to an AzureML workspace - :::image type="content" source="media/how-to-attach-arc-kubernetes/attach-kubernetes-cluster.png" alt-text="Screenshot of settings for Kubernetes cluster to make available in your workspace."::: +Attach an AKS or Arc-enabled Kubernetes cluster with AzureML extension installed to AzureML workspace. The same cluster can be attached and shared by multiple AzureMl Workspaces across region. -1. Enter a compute name and select your Azure Arc-enabled Kubernetes cluster from the dropdown. +### Prerequisite - * **(Optional)** Enter Kubernetes namespace, which defaults to `default`. All machine learning workloads will be sent to the specified Kubernetes namespace in the cluster. +Azure Machine Learning workspace defaults to having a system-assigned managed identity to access Azure ML resources. The steps are completed if the system assigned default setting is on. - * **(Optional)** Assign system-assigned or user-assigned managed identity. Managed identities eliminate the need for developers to manage credentials. For more information, see [managed identities overview](../active-directory/managed-identities-azure-resources/overview.md) . - :::image type="content" source="media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster-2.png" alt-text="Screenshot of settings for developer configuration of Kubernetes cluster."::: +Otherwise, if a user-assigned managed identity is specified in Azure Machine Learning workspace creation, the following role assignments need to be granted to the managed identity manually before attaching the compute. -1. Select **Attach** +|Azure resource name |Role to be assigned|Description| +|--|--|--| +|Azure Relay|Azure Relay Owner|Only applicable for Arc-enabled Kubernetes cluster. Azure Relay isn't created for AKS cluster without Arc connected.| +|Azure Arc-enabled Kubernetes|Reader|Applicable for both Arc-enabled Kubernetes cluster and AKS cluster.| - In the Attached compute tab, the initial state of your cluster is *Creating*. When the cluster is successfully attached, the state changes to *Succeeded*. Otherwise, the state changes to *Failed*. +Azure Relay resource is created during the extension deployment under the same Resource Group as the Arc-enabled Kubernetes cluster. - :::image type="content" source="media/how-to-attach-arc-kubernetes/provision-resources.png" alt-text="Screenshot of attached settings for configuration of Kubernetes cluster."::: ### [CLI](#tab/cli) -You can attach an AKS or Azure Arc enabled Kubernetes cluster using the Azure Machine Learning 2.0 CLI (preview). - -Use the Azure Machine Learning CLI [`attach`](/cli/azure/ml/compute) command and set the `--type` argument to `Kubernetes` to attach your Kubernetes cluster using the Azure Machine Learning 2.0 CLI. +[!INCLUDE [cli v2](../../includes/machine-learning-cli-v2.md)] -> [!NOTE] -> Compute attach support for AKS or Azure Arc enabled Kubernetes clusters requires a version of the Azure CLI `ml` extension >= 2.0.1a4. For more information, see [Install and set up the CLI (v2)](how-to-configure-cli.md). - -The following commands show how to attach an Azure Arc-enabled Kubernetes cluster and use it as a compute target with managed identity enabled. +The following commands show how to attach an AKS and Azure Arc-enabled Kubernetes cluster, and use it as a compute target with managed identity enabled. **AKS** ```azurecli -az ml compute attach --resource-group --workspace-name --name k8s-compute --resource-id "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/managedclusters/" --type Kubernetes --identity-type UserAssigned --user-assigned-identities "subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" --no-wait +az ml compute attach --resource-group --workspace-name --type Kubernetes --name k8s-compute --resource-id "/subscriptions//resourceGroups//providers/Microsoft.ContainerService/managedclusters/" --identity-type SystemAssigned --namespace --no-wait ``` **Azure Arc enabled Kubernetes** ```azurecli -az ml compute attach --resource-group --workspace-name --name amlarc-compute --resource-id "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" --type kubernetes --user-assigned-identities "subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" --no-wait +az ml compute attach --resource-group --workspace-name --type Kubernetes --name amlarc-compute --resource-id "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" --user-assigned-identities "subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" --no-wait ``` -Use the `identity_type` argument to enable `SystemAssigned` or `UserAssigned` managed identities. +Set the `--type` argument to `Kubernetes`. Use the `identity_type` argument to enable `SystemAssigned` or `UserAssigned` managed identities. > [!IMPORTANT] > `--user-assigned-identities` is only required for `UserAssigned` managed identities. Although you can provide a list of comma-separated user managed identities, only the first one is used when you attach your cluster. ---- - -## Create instance types for efficient compute resource usage - -### What are instance types? - -Instance types are an Azure Machine Learning concept that allows targeting certain types of -compute nodes for training and inference workloads. For an Azure VM, an example for an -instance type is `STANDARD_D2_V3`. - -In Kubernetes clusters, instance types are represented in a custom resource definition (CRD) that is installed with the AzureML extension. Instance types are represented by two elements in AzureML extension: -[nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) -and [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). -In short, a `nodeSelector` lets us specify which node a pod should run on. The node must have a -corresponding label. In the `resources` section, we can set the compute resources (CPU, memory and -Nvidia GPU) for the pod. - -### Default instance type - -By default, a `defaultinstancetype` with following definition is created when you attach Kuberenetes cluster to AzureML workspace: -- No `nodeSelector` is applied, meaning the pod can get scheduled on any node. -- The workload's pods are assigned default resources with 0.6 cpu cores, 1536Mi memory and 0 GPU: -```yaml -resources: - requests: - cpu: "0.6" - memory: "1536Mi" - limits: - cpu: "0.6" - memory: "1536Mi" - nvidia.com/gpu: null -``` - -> [!NOTE] -> - The default instance type purposefully uses little resources. To ensure all ML workloads -run with appropriate resources, for example GPU resource, it is highly recommended to create custom instance types. -> - `defaultinstancetype` will not appear as an InstanceType custom resource in the cluster when running the command ```kubectl get instancetype```, but it will appear in all clients (UI, CLI, SDK). -> - `defaultinstancetype` can be overridden with a custom instance type definition having the same name as `defaultinstancetype` (see [Create custom instance types](#create-custom-instance-types) section) - -## Create custom instance types +### [Python](#tab/python) -To create a new instance type, create a new custom resource for the instance type CRD. For example: +[!INCLUDE [sdk v1](../../includes/machine-learning-sdk-v1.md)] -```bash -kubectl apply -f my_instance_type.yaml -``` +```python +from azureml.core.compute import KubernetesCompute, ComputeTarget -With `my_instance_type.yaml`: -```yaml -apiVersion: amlarc.azureml.com/v1alpha1 -kind: InstanceType -metadata: - name: myinstancetypename -spec: - nodeSelector: - mylabel: mylabelvalue - resources: - limits: - cpu: "1" - nvidia.com/gpu: 1 - memory: "2Gi" - requests: - cpu: "700m" - memory: "1500Mi" -``` +# Specify a name for your Kubernetes compute +compute_target_name = "" -The following steps will create an instance type with the labeled behavior: -- Pods will be scheduled only on nodes with label `mylabel: mylabelvalue`. -- Pods will be assigned resource requests of `700m` CPU and `1500Mi` memory. -- Pods will be assigned resource limits of `1` CPU, `2Gi` memory and `1` Nvidia GPU. +# resource ID of the Arc-enabled Kubernetes cluster +cluster_resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" -> [!NOTE] -> - Nvidia GPU resources are only specified in the `limits` section as integer values. For more information, - see the Kubernetes [documentation](https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/#using-device-plugins). -> - CPU and memory resources are string values. -> - CPU can be specified in millicores, for example `100m`, or in full numbers, for example `"1"` - is equivalent to `1000m`. -> - Memory can be specified as a full number + suffix, for example `1024Mi` for 1024 MiB. +user_assigned_identity_resouce_id = ['subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/'] -It is also possible to create multiple instance types at once: +# Specify Kubernetes namespace to run AzureML workloads +ns = "default" -```bash -kubectl apply -f my_instance_type_list.yaml +try: + compute_target = ComputeTarget(workspace=ws, name=compute_target_name) + print('Found existing cluster, use it.') +except ComputeTargetException: + attach_configuration = KubernetesCompute.attach_configuration(resource_id = cluster_resource_id, namespace = ns, identity_type ='UserAssigned',identity_ids = user_assigned_identity_resouce_id) + compute_target = ComputeTarget.attach(ws, compute_target_name, attach_configuration) + compute_target.wait_for_completion(show_output=True) ``` +### [Studio](#tab/studio) -With `my_instance_type_list.yaml`: -```yaml -apiVersion: amlarc.azureml.com/v1alpha1 -kind: InstanceTypeList -items: - - metadata: - name: cpusmall - spec: - resources: - requests: - cpu: "100m" - memory: "100Mi" - limits: - cpu: "1" - nvidia.com/gpu: 0 - memory: "1Gi" - - - metadata: - name: defaultinstancetype - spec: - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - cpu: "1" - nvidia.com/gpu: 0 - memory: "1Gi" -``` +Attaching an Azure Arc-enabled Kubernetes cluster makes it available to your workspace for training. -The above example creates two instance types: `cpusmall` and `defaultinstancetype`. Above `defaultinstancetype` definition will override the `defaultinstancetype` definition created when Kubernetes cluster was attached to AzureML workspace. +1. Navigate to [Azure Machine Learning studio](https://ml.azure.com). +1. Under **Manage**, select **Compute**. +1. Select the **Attached computes** tab. +1. Select **+New > Kubernetes** -If a training or inference workload is submitted without an instance type, it uses the default -instance type. To specify a default instance type for a Kubernetes cluster, create an instance -type with name `defaultinstancetype`. It will automatically be recognized as the default. + :::image type="content" source="media/how-to-attach-arc-kubernetes/attach-kubernetes-cluster.png" alt-text="Screenshot of settings for Kubernetes cluster to make available in your workspace."::: -## Select instance type to submit training job +1. Enter a compute name and select your Azure Arc-enabled Kubernetes cluster from the dropdown. -To select an instance type for a training job using CLI (V2), specify its name as part of the -`resources` properties section in job YAML. For example: -```yaml -command: python -c "print('Hello world!')" -environment: - docker: - image: python -compute: azureml: -resources: - instance_type: -``` + * **(Optional)** Enter Kubernetes namespace, which defaults to `default`. All machine learning workloads will be sent to the specified Kubernetes namespace in the cluster. -In the above example, replace `` with the name of your Kubernetes compute -target and `` with the name of the instance type you wish to select. If there is no `instance_type` property specified, the system will use `defaultinstancetype` to submit job. - -## Select instance type to deploy model - -To select an instance type for a model deployment using CLI (V2), specify its name for `instance_type` property in deployment YAML. For example: - -```yaml -deployments: - - name: blue - app_insights_enabled: true - model: - name: sklearn_mnist_model - version: 1 - local_path: ./model/sklearn_mnist_model.pkl - code_configuration: - code: - local_path: ./script/ - scoring_script: score.py - instance_type: - environment: - name: sklearn-mnist-env - version: 1 - path: . - conda_file: file:./model/conda.yml - docker: - image: mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1 -``` + * **(Optional)** Assign system-assigned or user-assigned managed identity. Managed identities eliminate the need for developers to manage credentials. For more information, see [managed identities overview](../active-directory/managed-identities-azure-resources/overview.md) . -In the above example, replace `` with the name of the instance type you wish to select. If there is no `instance_type` property specified, the system will use `defaultinstancetype` to deploy model. - -### Appendix I: AzureML extension components - -Upon AzureML extension deployment completes, it will create following resources in Azure cloud: - - |Resource name |Resource type | Description | - |--|--|--| - |Azure Service Bus|Azure resource|Used to sync nodes and cluster resource information to Azure Machine Learning services regularly.| - |Azure Relay|Azure resource|Route traffic between Azure Machine Learning services and the Kubernetes cluster.| - -Upon AzureML extension deployment completes, it will create following resources in Kubernetes cluster, depending on each AzureML extension deployment scenario: - - |Resource name |Resource type |Training |Inference |Training and Inference| Description | Communication with cloud service| - |--|--|--|--|--|--|--| - |relayserver|Kubernetes deployment|**✓**|**✓**|**✓**|The entry component to receive and sync the message with cloud.|Receive the request of job creation, model deployment from cloud service; sync the job status with cloud service.| - |gateway|Kubernetes deployment|**✓**|**✓**|**✓**|The gateway to communicate and send data back and forth.|Send nodes and cluster resource information to cloud services.| - |aml-operator|Kubernetes deployment|**✓**|N/A|**✓**|Manage the lifecycle of training jobs.| Token exchange with cloud token service for authentication and authorization of Azure Container Registry used by training job.| - |metrics-controller-manager|Kubernetes deployment|**✓**|**✓**|**✓**|Manage the configuration for Prometheus|N/A| - |{EXTENSION-NAME}-kube-state-metrics|Kubernetes deployment|**✓**|**✓**|**✓**|Export the cluster-related metrics to Prometheus.|N/A| - |{EXTENSION-NAME}-prometheus-operator|Kubernetes deployment|**✓**|**✓**|**✓**| Provide Kubernetes native deployment and management of Prometheus and related monitoring components.|N/A| - |amlarc-identity-controller|Kubernetes deployment|N/A|**✓**|**✓**|Request and renew Azure Blob/Azure Container Registry token through managed identity.|Token exchange with cloud token service for authentication and authorization of Azure Container Registry and Azure Blob used by inference/model deployment.| - |amlarc-identity-proxy|Kubernetes deployment|N/A|**✓**|**✓**|Request and renew Azure Blob/Azure Container Registry token through managed identity.|Token exchange with cloud token service for authentication and authorization of Azure Container Registry and Azure Blob used by inference/model deployment.| - |azureml-fe|Kubernetes deployment|N/A|**✓**|**✓**|The front-end component that routes incoming inference requests to deployed services.|azureml-fe service logs are sent to Azure Blob.| - |inference-operator-controller-manager|Kubernetes deployment|N/A|**✓**|**✓**|Manage the lifecycle of inference endpoints. |N/A| - |cluster-status-reporter|Kubernetes deployment|**✓**|**✓**|**✓**|Gather the cluster information, like cpu/gpu/memory usage, cluster healthiness.|N/A| - |csi-blob-controller|Kubernetes deployment|**✓**|N/A|**✓**|Azure Blob Storage Container Storage Interface(CSI) driver.|N/A| - |csi-blob-node|Kubernetes daemonset|**✓**|N/A|**✓**|Azure Blob Storage Container Storage Interface(CSI) driver.|N/A| - |fluent-bit|Kubernetes daemonset|**✓**|**✓**|**✓**|Gather the components' system log.| Upload the components' system log to cloud.| - |k8s-host-device-plugin-daemonset|Kubernetes daemonset|**✓**|**✓**|**✓**|Expose fuse to pods on each node.|N/A| - |prometheus-prom-prometheus|Kubernetes statefulset|**✓**|**✓**|**✓**|Gather and send job metrics to cloud.|Send job metrics like cpu/gpu/memory utilization to cloud.| - |volcano-admission|Kubernetes deployment|**✓**|N/A|**✓**|Volcano admission webhook.|N/A| - |volcano-controllers|Kubernetes deployment|**✓**|N/A|**✓**|Manage the lifecycle of Azure Machine Learning training job pods.|N/A| - |volcano-scheduler |Kubernetes deployment|**✓**|N/A|**✓**|Used to do in cluster job scheduling.|N/A| + :::image type="content" source="media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster-2.png" alt-text="Screenshot of settings for developer configuration of Kubernetes cluster."::: -> [!IMPORTANT] - > * Azure ServiceBus and Azure Relay resources are under the same resource group as the Arc cluster resource. These resources are used to communicate with the Kubernetes cluster and modifying them will break attached compute targets. - > * By default, the deployed kubernetes deployment resourses are randomly deployed to 1 or more nodes of the cluster, and daemonset resource are deployed to ALL nodes. If you want to restrict the extension deployment to specific nodes, use `nodeSelector` configuration setting described as below. +1. Select **Attach** -> [!NOTE] - > * **{EXTENSION-NAME}:** is the extension name specified with ```az k8s-extension create --name``` CLI command. + In the Attached compute tab, the initial state of your cluster is *Creating*. When the cluster is successfully attached, the state changes to *Succeeded*. Otherwise, the state changes to *Failed*. -### Appendix II: Review AzureML deployment configuration settings + :::image type="content" source="media/how-to-attach-arc-kubernetes/provision-resources.png" alt-text="Screenshot of attached settings for configuration of Kubernetes cluster."::: + +--- -For AzureML extension deployment configurations, use ```--config``` or ```--config-protected``` to specify list of ```key=value``` pairs. Following is the list of configuration settings available to be used for different AzureML extension deployment scenario ns. +## Next steps - |Configuration Setting Key Name |Description |Training |Inference |Training and Inference - |--|--|--|--|--| - |```enableTraining``` |```True``` or ```False```, default ```False```. **Must** be set to ```True``` for AzureML extension deployment with Machine Learning model training support. | **✓**| N/A | **✓** | - | ```enableInference``` |```True``` or ```False```, default ```False```. **Must** be set to ```True``` for AzureML extension deployment with Machine Learning inference support. |N/A| **✓** | **✓** | - | ```allowInsecureConnections``` |```True``` or ```False```, default False. This **must** be set to ```True``` for AzureML extension deployment with HTTP endpoints support for inference, when ```sslCertPemFile``` and ```sslKeyPemFile``` are not provided. |N/A| Optional | Optional | - | ```inferenceRouterServiceType``` |```loadBalancer``` or ```nodePort```. **Must** be set for ```enableInference=true```. | N/A| **✓** | **✓** | - | ```internalLoadBalancerProvider``` | This config is only applicable for Azure Kubernetes Service(AKS) cluster now. **Must** be set to ```azure``` to allow the inference router use internal load balancer. | N/A| Optional | Optional | - |```sslSecret```| The Kubernetes secret name under azureml namespace to store `cert.pem` (PEM-encoded SSL cert) and `key.pem` (PEM-encoded SSL key), required for AzureML extension deployment with HTTPS endpoint support for inference, when ``allowInsecureConnections`` is set to False. Use this config or give static cert and key file path in configuration protected settings. |N/A| Optional | Optional | - |```sslCname``` |A SSL CName to use if enabling SSL validation on the cluster. | N/A | N/A | required when using HTTPS endpoint | - | ```inferenceLoadBalancerHA``` |```True``` or ```False```, default ```True```. By default, AzureML extension will deploy three ingress controller replicas for high availability, which requires at least three workers in a cluster. Set this value to ```False``` if you have fewer than three workers and want to deploy AzureML extension for development and testing only, in this case it will deploy one ingress controller replica only. | N/A| Optional | Optional | - |```openshift``` | ```True``` or ```False```, default ```False```. Set to ```True``` if you deploy AzureML extension on ARO or OCP cluster. The deployment process will automatically compile a policy package and load policy package on each node so AzureML services operation can function properly. | Optional| Optional | Optional | - |```nodeSelector``` | Set the node selector so the extension components and the training/inference workloads will only be deployed to the nodes with all specified selectors. Usage: `nodeSelector.key=value`, support multiple selectors. Example: `nodeSelector.node-purpose=worker nodeSelector.node-region=eastus`| Optional| Optional | Optional | - |```installNvidiaDevicePlugin``` | ```True``` or ```False```, default ```False```. [Nvidia Device Plugin](https://github.com/NVIDIA/k8s-device-plugin#nvidia-device-plugin-for-kubernetes) is required for ML workloads on Nvidia GPU hardware. By default, AzureML extension deployment will not install Nvidia Device Plugin regardless Kubernetes cluster has GPU hardware or not. User can specify this configuration setting to ```True```, so the extension will install Nvidia Device Plugin, but make sure to have [Prerequisites](https://github.com/NVIDIA/k8s-device-plugin#prerequisites) ready beforehand. | Optional |Optional |Optional | - |```blobCsiDriverEnabled```| ```True``` or ```False```, default ```True```. Blob CSI driver is required for ML workloads. User can specify this configuration setting to ```False``` if it was installed already. | Optional |Optional |Optional | - |```reuseExistingPromOp```|```True``` or ```False```, default ```False```. AzureML extension needs prometheus operator to manage prometheus. Set to ```True``` to reuse existing prometheus operator. Compatible [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md) helm chart versions are from 9.3.4 to 30.0.1.| Optional| Optional | Optional | - |```volcanoScheduler.enable```| ```True``` or ```False```, default ```True```. AzureML extension needs volcano scheduler to schedule the job. Set to ```False``` to reuse existing volcano scheduler. Supported volcano scheduler versions are 1.4, 1.5. | Optional| N/A | Optional | - |```logAnalyticsWS``` |```True``` or ```False```, default ```False```. AzureML extension integrates with Azure LogAnalytics Workspace to provide log viewing and analysis capability through LogAalytics Workspace. This setting must be explicitly set to ```True``` if customer wants to use this capability. LogAnalytics Workspace cost may apply. |N/A |Optional |Optional | - |```installDcgmExporter``` |```True``` or ```False```, default ```False```. Dcgm-exporter is used to collect GPU metrics for GPU jobs. Specify ```installDcgmExporter``` flag to ```true``` to enable the build-in dcgm-exporter. |N/A |Optional |Optional | +- [Create and use instance types for efficient compute resource usage](./reference-kubernetes.md#create-and-use-instance-types-for-efficient-compute-resource-usage) +- [Train models with CLI v2](how-to-train-cli.md) +- [Train models with Python SDK](how-to-set-up-training-targets.md) +- [Deploy model with an online endpoint (CLI v2)](./how-to-deploy-managed-online-endpoints.md) +- [Use batch endpoint for batch scoring (CLI v2)](./how-to-use-batch-endpoint.md) - |Configuration Protected Setting Key Name |Description |Training |Inference |Training and Inference - |--|--|--|--|--| - | ```sslCertPemFile```, ```sslKeyPemFile``` |Path to SSL certificate and key file (PEM-encoded), required for AzureML extension deployment with HTTPS endpoint support for inference, when ``allowInsecureConnections`` is set to False. | N/A| Optional | Optional | - +### Examples -## Next steps +All AzureML examples can be found in [https://github.com/Azure/azureml-examples.git](https://github.com/Azure/azureml-examples). -- [Train models with CLI (v2)](how-to-train-cli.md) -- [Configure and submit training runs](how-to-set-up-training-targets.md) -- [Tune hyperparameters](how-to-tune-hyperparameters.md) -- [Train a model using Scikit-learn](how-to-train-scikit-learn.md) -- [Train a TensorFlow model](how-to-train-tensorflow.md) -- [Train a PyTorch model](how-to-train-pytorch.md) -- [Train using Azure Machine Learning pipelines](how-to-create-machine-learning-pipelines.md) -- [Train model on-premise with outbound proxy server](../azure-arc/kubernetes/quickstart-connect-cluster.md#connect-using-an-outbound-proxy-server) +For any AzureML example, you only need to update the compute target name to your Kubernetes compute target, then you are all done. +* Explore training job samples with CLI v2 - [https://github.com/Azure/azureml-examples/tree/main/cli/jobs](https://github.com/Azure/azureml-examples/tree/main/cli/jobs) +* Explore model deployment with online endpoint samples with CLI v2 - [https://github.com/Azure/azureml-examples/tree/main/cli/endpoints/online/kubernetes](https://github.com/Azure/azureml-examples/tree/main/cli/endpoints/online/kubernetes) +* Explore batch endpoint samples with CLI v2 - [https://github.com/Azure/azureml-examples/tree/main/cli/endpoints/batch](https://github.com/Azure/azureml-examples/tree/main/cli/endpoints/batch) +* Explore training job samples with SDK v2 -[https://github.com/Azure/azureml-examples/tree/main/sdk/jobs](https://github.com/Azure/azureml-examples/tree/main/sdk/jobs) +* Explore model deployment with online endpoint samples with SDK v2 -[https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints/online/kubernetes](https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints/online/kubernetes) \ No newline at end of file diff --git a/articles/machine-learning/how-to-auto-train-image-models.md b/articles/machine-learning/how-to-auto-train-image-models.md index bb62031cf93c0..d4782fd4c7ddc 100644 --- a/articles/machine-learning/how-to-auto-train-image-models.md +++ b/articles/machine-learning/how-to-auto-train-image-models.md @@ -9,7 +9,7 @@ ms.service: machine-learning ms.subservice: automl ms.custom: event-tier1-build-2022 ms.topic: how-to -ms.date: 01/18/2022 +ms.date: 05/26/2022 #Customer intent: I'm a data scientist with ML knowledge in the computer vision space, looking to build ML models using image data in Azure Machine Learning with full control of the model algorithm, hyperparameters, and training and deployment environments. --- @@ -185,7 +185,7 @@ The following is a sample JSONL file for image classification: Once your data is in JSONL format, you can create training and validation `MLTable` as shown below. -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: Automated ML doesn't impose any constraints on training or validation data size for computer vision tasks. Maximum dataset size is only limited by the storage layer behind the dataset (i.e. blob store). There's no minimum number of images or labels. However, we recommend starting with a minimum of 10-15 samples per label to ensure the output model is sufficiently trained. The higher the total number of labels/classes, the more samples you need per label. @@ -211,7 +211,7 @@ validation_data: You can create data inputs from training and validation MLTable from your local directory or cloud storage with the following code: -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] Training data is a required parameter and is passed in using the `training_data` parameter of the task specific `automl` type function. You can optionally specify another MLTable as a validation data with the `validation_data` parameter. If no validation data is specified, 20% of your training data will be used for validation by default, unless you pass `validation_data_size` argument with a different value. @@ -329,7 +329,7 @@ limits: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=limit-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=limit-settings)] --- @@ -404,7 +404,7 @@ sweep: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] --- @@ -426,7 +426,7 @@ image_model: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=pass-arguments)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=pass-arguments)] --- @@ -514,7 +514,7 @@ az ml job create --file ./hello-automl-job-basic.yml --workspace-name [YOUR_AZUR When you've configured your AutoML Job to the desired settings, you can submit the job. -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] --- ## Outputs and evaluation metrics diff --git a/articles/machine-learning/how-to-auto-train-nlp-models.md b/articles/machine-learning/how-to-auto-train-nlp-models.md index 6cb2350da3bec..aeae69271ad81 100644 --- a/articles/machine-learning/how-to-auto-train-nlp-models.md +++ b/articles/machine-learning/how-to-auto-train-nlp-models.md @@ -309,9 +309,9 @@ ml_client.jobs.stream(returned_job.name) See the following sample YAML files for each NLP task. -* [Multi-class text classification](https://github.com/Azure/azureml-examples/blob/april-sdk-preview/cli/jobs/automl-standalone-jobs/cli-automl-text-classification-newsgroup/cli-automl-text-classification-newsgroup.yml) -* [Multi-label text classification](https://github.com/Azure/azureml-examples/blob/april-sdk-preview/cli/jobs/automl-standalone-jobs/cli-automl-text-classification-multilabel-paper-cat/cli-automl-text-classification-multilabel-paper-cat.yml) -* [Named entity recognition](https://github.com/Azure/azureml-examples/blob/april-sdk-preview/cli/jobs/automl-standalone-jobs/cli-automl-text-ner-conll/cli-automl-text-ner-conll2003.yml) +* [Multi-class text classification](https://github.com/Azure/azureml-examples/blob/main/cli/jobs/automl-standalone-jobs/cli-automl-text-classification-newsgroup/cli-automl-text-classification-newsgroup.yml) +* [Multi-label text classification](https://github.com/Azure/azureml-examples/blob/main/cli/jobs/automl-standalone-jobs/cli-automl-text-classification-multilabel-paper-cat/cli-automl-text-classification-multilabel-paper-cat.yml) +* [Named entity recognition](https://github.com/Azure/azureml-examples/blob/main/cli/jobs/automl-standalone-jobs/cli-automl-text-ner-conll/cli-automl-text-ner-conll2003.yml) # [Python SDK v2 (preview)](#tab/SDK-v2) @@ -319,10 +319,10 @@ See the following sample YAML files for each NLP task. See the sample notebooks for detailed code examples for each NLP task. -* [Multi-class text classification](https://github.com/Azure/azureml-examples/blob/april-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-nlp-text-classification-multiclass-task-sentiment-analysis/automl-nlp-text-classification-multiclass-task-sentiment.ipynb) +* [Multi-class text classification](https://github.com/Azure/azureml-examples/blob/main/sdk/jobs/automl-standalone-jobs/automl-nlp-text-classification-multiclass-task-sentiment-analysis/automl-nlp-text-classification-multiclass-task-sentiment.ipynb) * [Multi-label text classification]( -https://github.com/Azure/azureml-examples/blob/april-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-nlp-text-classification-multilabel-task-paper-categorization/automl-nlp-text-classification-multilabel-task-paper-cat.ipynb) -* [Named entity recognition](https://github.com/Azure/azureml-examples/blob/april-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-nlp-text-named-entity-recognition-task/automl-nlp-text-ner-task.ipynb) +https://github.com/Azure/azureml-examples/blob/main/sdk/jobs/automl-standalone-jobs/automl-nlp-text-classification-multilabel-task-paper-categorization/automl-nlp-text-classification-multilabel-task-paper-cat.ipynb) +* [Named entity recognition](https://github.com/Azure/azureml-examples/blob/main/sdk/jobs/automl-standalone-jobs/automl-nlp-text-named-entity-recognition-task/automl-nlp-text-ner-task.ipynb) --- diff --git a/articles/machine-learning/how-to-autoscale-endpoints.md b/articles/machine-learning/how-to-autoscale-endpoints.md index 7235afe76d8a8..ed001eede0f9b 100644 --- a/articles/machine-learning/how-to-autoscale-endpoints.md +++ b/articles/machine-learning/how-to-autoscale-endpoints.md @@ -1,7 +1,7 @@ --- -title: Autoscale managed online endpoints +title: Autoscale online endpoints titleSuffix: Azure Machine Learning -description: Learn to scale up managed endpoints. Get more CPU, memory, disk space, and extra features. +description: Learn to scale up online endpoints. Get more CPU, memory, disk space, and extra features. ms.service: machine-learning ms.subservice: core ms.topic: how-to @@ -12,9 +12,9 @@ ms.custom: devplatv2, cliv2, event-tier1-build-2022 ms.date: 04/27/2022 --- -# Autoscale a managed online endpoint +# Autoscale an online endpoint -Autoscale automatically runs the right amount of resources to handle the load on your application. [Managed endpoints](concept-endpoints.md) supports autoscaling through integration with the Azure Monitor autoscale feature. +Autoscale automatically runs the right amount of resources to handle the load on your application. [Online endpoints](concept-endpoints.md) supports autoscaling through integration with the Azure Monitor autoscale feature. Azure Monitor autoscaling supports a rich set of rules. You can configure metrics-based scaling (for instance, CPU utilization >70%), schedule-based scaling (for example, scaling rules for peak business hours), or a combination. For more information, see [Overview of autoscale in Microsoft Azure](../azure-monitor/autoscale/autoscale-overview.md). @@ -24,7 +24,7 @@ Today, you can manage autoscaling using either the Azure CLI, REST, ARM, or the ## Prerequisites -* A deployed endpoint. [Deploy and score a machine learning model by using a managed online endpoint](how-to-deploy-managed-online-endpoints.md). +* A deployed endpoint. [Deploy and score a machine learning model by using an online endpoint](how-to-deploy-managed-online-endpoints.md). ## Define an autoscale profile diff --git a/articles/machine-learning/how-to-configure-auto-train.md b/articles/machine-learning/how-to-configure-auto-train.md index 921b16d2e17ae..3338a416dbfa9 100644 --- a/articles/machine-learning/how-to-configure-auto-train.md +++ b/articles/machine-learning/how-to-configure-auto-train.md @@ -104,16 +104,15 @@ The following shows two ways of creating an MLTable. ```Python from azure.ai.ml.constants import AssetTypes -from azure.ai.ml import automl -from azure.ai.ml.entities import JobInput +from azure.ai.ml import automl, Input # A. Create MLTable for training data from your local directory -my_training_data_input = JobInput( +my_training_data_input = Input( type=AssetTypes.MLTABLE, path="./data/training-mltable-folder" ) # B. Remote MLTable definition -my_training_data_input = JobInput(type=AssetTypes.MLTABLE, path="azureml://datastores/workspaceblobstore/paths/Classification/Train") +my_training_data_input = Input(type=AssetTypes.MLTABLE, path="azureml://datastores/workspaceblobstore/paths/Classification/Train") ``` ### Training, validation, and test data diff --git a/articles/machine-learning/how-to-configure-network-isolation-with-v2.md b/articles/machine-learning/how-to-configure-network-isolation-with-v2.md index 93787ec002e14..6b29c4b877768 100644 --- a/articles/machine-learning/how-to-configure-network-isolation-with-v2.md +++ b/articles/machine-learning/how-to-configure-network-isolation-with-v2.md @@ -44,7 +44,7 @@ The Azure Machine Learning CLI v2 uses our new v2 API platform. New features suc As mentioned in the previous section, there are two types of operations; with ARM and with the workspace. With the __legacy v1 API__, most operations used the workspace. With the v1 API, adding a private endpoint to the workspace provided network isolation for everything except CRUD operations on the workspace or compute resources. -With the __new v2 API__, most operations use ARM. So enabling a private endpoint on your workspace doesn't provide the same level of network isolation. Operations that use ARM communicate over public networks, and include any metadata (such as your resource IDs) or parameters used by the operation. For example, the [create or update job](/rest/api/azureml/jobs/create-or-update) api sends metadata, and [parameters](/azure/machine-learning/reference-yaml-job-command). +With the __new v2 API__, most operations use ARM. So enabling a private endpoint on your workspace doesn't provide the same level of network isolation. Operations that use ARM communicate over public networks, and include any metadata (such as your resource IDs) or parameters used by the operation. For example, the [create or update job](/rest/api/azureml/jobs/create-or-update) api sends metadata, and [parameters](./reference-yaml-job-command.md). > [!TIP] > * Public ARM operations do not surface data in your storage account on public networks. @@ -116,4 +116,4 @@ az ml workspace show -g -w --query v1LegacyMode ## Next steps * [Use a private endpoint with Azure Machine Learning workspace](how-to-configure-private-link.md). -* [Create private link for managing Azure resources](/azure/azure-resource-manager/management/create-private-link-access-portal). \ No newline at end of file +* [Create private link for managing Azure resources](../azure-resource-manager/management/create-private-link-access-portal.md). \ No newline at end of file diff --git a/articles/machine-learning/how-to-create-attach-compute-studio.md b/articles/machine-learning/how-to-create-attach-compute-studio.md index d1dd8414699c5..80f6763f163ab 100644 --- a/articles/machine-learning/how-to-create-attach-compute-studio.md +++ b/articles/machine-learning/how-to-create-attach-compute-studio.md @@ -16,10 +16,9 @@ ms.custom: contperf-fy21q1, sdkv1, event-tier1-build-2022 In this article, learn how to create and manage compute targets in Azure Machine studio. You can also create and manage compute targets with: -* Azure Machine Learning Learning SDK or CLI extension for Azure Machine Learning +* Azure Machine Learning SDK or CLI extension for Azure Machine Learning * [Compute instance](how-to-create-manage-compute-instance.md) * [Compute cluster](how-to-create-attach-compute-cluster.md) - * [Azure Kubernetes Service cluster](how-to-create-attach-kubernetes.md) * [Other compute resources](how-to-attach-compute-targets.md) * The [VS Code extension](how-to-manage-resources-vscode.md#compute-clusters) for Azure Machine Learning. @@ -68,7 +67,6 @@ Follow the previous steps to view the list of compute targets. Then use these st * [Compute instance](how-to-create-manage-compute-instance.md?tabs=azure-studio#create) * [Compute clusters](#amlcompute) - * [Inference clusters](#inference-clusters) * [Attached compute](#attached-compute) 1. Select __Create__. @@ -124,7 +122,6 @@ During cluster creation or when editing compute cluster details, in the **Advanc > [!IMPORTANT] > Using Azure Kubernetes Service with Azure Machine Learning has multiple configuration options. Some scenarios, such as networking, require additional setup and configuration. For more information on using AKS with Azure ML, see [Create and attach an Azure Kubernetes Service cluster](how-to-create-attach-kubernetes.md). - Create or attach an Azure Kubernetes Service (AKS) cluster for large scale inferencing. Use the [steps above](#portal-create) to create the AKS cluster. Then fill out the form as follows: @@ -151,7 +148,7 @@ Use the [steps above](#portal-create) to attach a compute. Then fill out the fo * Azure Databricks (for use in machine learning pipelines) * Azure Data Lake Analytics (for use in machine learning pipelines) * Azure HDInsight - * Kubernetes (preview) + * [Kubernetes](./how-to-attach-kubernetes-anywhere.md#attach-a-kubernetes-cluster-to-an-azureml-workspace) 1. Fill out the form and provide values for the required properties. diff --git a/articles/machine-learning/how-to-create-component-pipeline-python.md b/articles/machine-learning/how-to-create-component-pipeline-python.md index 216062419d5e0..6024202ab41e2 100644 --- a/articles/machine-learning/how-to-create-component-pipeline-python.md +++ b/articles/machine-learning/how-to-create-component-pipeline-python.md @@ -8,7 +8,7 @@ ms.subservice: mlops ms.topic: how-to author: likebupt ms.author: keli19 -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, sdkv2, event-tier1-build-2022 --- @@ -59,7 +59,7 @@ This article is based on the [image_classification_keras_minist_convnet.ipynb](h Import all the Azure Machine Learning required libraries that you'll need for this article: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=required-library)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=required-library)] ## Prepare input data for your pipeline job @@ -69,7 +69,7 @@ Fashion-MNIST is a dataset of fashion images divided into 10 classes. Each image To define the input data of a job that references the Web-based data, run: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=define-input)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=define-input)] By defining an `Input`, you create a reference to the data source location. The data remains in its existing location, so no extra storage cost is incurred. @@ -103,7 +103,7 @@ If you're following along with the example in the [AzureML Examples repo](https: By using command_component() function as a decorator, you can easily define the component's interface, metadata and code to execute from a python function. Each decorated Python function will be transformed into a single static specification (YAML) that the pipeline service can process. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py"::: The code above define a component with display name `Prep Data` using `@command_component` decorator: @@ -127,13 +127,13 @@ Following is what a component looks like in the studio UI. You'll need to modify the runtime environment in which your component runs. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py" range="5-10"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py" range="5-10"::: The above code creates an object of `Environment` class, which represents the runtime environment in which the component runs. The `conda.yaml` file contains all packages used for the component like following: -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/conda.yaml"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/conda.yaml"::: Now, you've prepared all source files for the `Prep Data` component. @@ -159,7 +159,7 @@ The `train.py` file contains a normal python function, which performs the traini After defining the training function successfully, you can use @command_component in Azure Machine Learning SDK v2 to wrap your function as a component, which can be used in AML pipelines. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/train_component.py"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/train_component.py"::: The code above define a component with display name `Train Image Classification Keras` using `@command_component`: @@ -169,7 +169,7 @@ The code above define a component with display name `Train Image Classification The train-model component has a slightly more complex configuration than the prep-data component. The `conda.yaml` is like following: -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/conda.yaml"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/conda.yaml"::: Now, you've prepared all source files for the `Train Image Classification Keras` component. @@ -187,7 +187,7 @@ If you're following along with the example in the [AzureML Examples repo](https: The `score.py` file contains a normal python function, which performs the training model logic. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.py"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.py"::: The code in score.py takes three command-line arguments: `input_data`, `input_model` and `output_result`. The program score the input model using input data and then output the scoring result. @@ -200,7 +200,7 @@ In this section, you'll learn to create a component specification in the valid Y - Interface: inputs and outputs - Command, code, & environment: The command, code, and environment used to run the component -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.yaml"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.yaml"::: * `name` is the unique identifier of the component. Its display name is `Score Image Classification Keras`. * This component has two inputs and one output. @@ -220,17 +220,17 @@ For prep-data component and train-model component defined by python function, yo In the following code, you import `prepare_data_component()` and `keras_train_component()` function from the `prep_component.py` file under `prep` folder and `train_component` file under `train` folder respectively. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-dsl-component)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-dsl-component)] For score component defined by yaml, you can use `load_component()` function to load. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-yaml)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-yaml)] ## Build your pipeline Now that you've created and loaded all components and input data to build the pipeline. You can compose them into a pipeline: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] The pipeline has a default compute `cpu_compute_target`, which means if you don't specify compute for a specific node, that node will run on the default compute. @@ -261,13 +261,13 @@ We'll use `DefaultAzureCredential` to get access to workspace. `DefaultAzureCred Reference for more available credentials if it doesn't work for you: [configure credential example](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb), [azure-identity reference doc](/python/api/azure-identity/azure.identity?view=azure-python&preserve-view=true ). -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=credential)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=credential)] #### Get a handle to a workspace with compute Create a `MLClient` object to manage Azure Machine Learning services. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=workspace)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=workspace)] > [!IMPORTANT] > This code snippet expects the workspace configuration json file to be saved in the current directory or its parent. For more information on creating a workspace, see [Create and manage Azure Machine Learning workspaces](how-to-manage-workspace.md). For more information on saving the configuration to file, see [Create a workspace configuration file](how-to-configure-environment.md#workspace). @@ -276,7 +276,7 @@ Create a `MLClient` object to manage Azure Machine Learning services. Now you've get a handle to your workspace, you can submit your pipeline job. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=submit-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=submit-pipeline)] The code above submit this image classification pipeline job to experiment called `pipeline_samples`. It will auto create the experiment if not exists. The `pipeline_input_data` uses `fashion_ds`. @@ -291,7 +291,7 @@ The call to `submit` the `Experiment` completes quickly, and produces output sim You can monitor the pipeline run by opening the link or you can block until it completes by running: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=stream-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=stream-pipeline)] > [!IMPORTANT] > The first pipeline run takes roughly *15 minutes*. All dependencies must be downloaded, a Docker image is created, and the Python environment is provisioned and created. Running the pipeline again takes significantly less time because those resources are reused instead of created. However, total run time for the pipeline depends on the workload of your scripts and the processes that are running in each pipeline step. @@ -308,7 +308,7 @@ You can check the logs and outputs of each component by right clicking the compo In the previous section, you have built a pipeline using three components to E2E complete an image classification task. You can also register components to your workspace so that they can be shared and resued within the workspace. Following is an example to register prep-data component. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=register-component)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=register-component)] Using `ml_client.components.get()`, you can get a registered component by name and version. Using `ml_client.compoennts.create_or_update()`, you can register a component previously loaded from python function or yaml. diff --git a/articles/machine-learning/how-to-create-component-pipelines-cli.md b/articles/machine-learning/how-to-create-component-pipelines-cli.md index b6ba63b1f4da4..b834ffa9e84d2 100644 --- a/articles/machine-learning/how-to-create-component-pipelines-cli.md +++ b/articles/machine-learning/how-to-create-component-pipelines-cli.md @@ -7,7 +7,7 @@ ms.service: machine-learning ms.subservice: core author: xiaoharper ms.author: zhanxia -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.topic: how-to ms.custom: devplatv2, devx-track-azurecli, event-tier1-build-2022 ms.devlang: azurecli, cliv2 @@ -92,7 +92,7 @@ Open the `services.Studio.endpoint` URL you'll see a graph visualization of the Let's take a look at the pipeline definition in the *3b_pipeline_with_data/pipeline.yml* file. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: Below table describes the most common used fields of pipeline YAML schema. See [full pipeline YAML schema here](reference-yaml-job-pipeline.md). @@ -125,7 +125,7 @@ One common scenario is to read and write data in your pipeline. In AuzreML, we u Now let's look at the *componentA.yml* as an example to understand component definition YAML. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/componentA.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/componentA.yml"::: The most common used schema of the component YAML is described in below table. See [full component YAML schema here](reference-yaml-component-command.md). @@ -193,7 +193,7 @@ Under **Jobs** tab, you'll see the history of all jobs that use this component. Let's use `1b_e2e_registered_components` to demo how to use registered component in pipeline YAML. Navigate to `1b_e2e_registered_components` directory, open the `pipeline.yml` file. The keys and values in the `inputs` and `outputs` fields are similar to those already discussed. The only significant difference is the value of the `component` field in the `jobs..component` entries. The `component` value is of the form `azureml::`. The `train-job` definition, for instance, specifies the latest version of the registered component `my_train` should be used: -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/1b_e2e_registered_components/pipeline.yml" range="24-36" highlight="4"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/1b_e2e_registered_components/pipeline.yml" range="24-36" highlight="4"::: ### Manage components diff --git a/articles/machine-learning/how-to-create-register-data-assets.md b/articles/machine-learning/how-to-create-register-data-assets.md new file mode 100644 index 0000000000000..d7b1861ceb0a8 --- /dev/null +++ b/articles/machine-learning/how-to-create-register-data-assets.md @@ -0,0 +1,290 @@ +--- +title: Create Data Assets +titleSuffix: Azure Machine Learning +description: Learn how to create Azure Machine Learning data assets. +services: machine-learning +ms.service: machine-learning +ms.subservice: mldata +ms.topic: how-to +ms.custom: contperf-fy21q1, data4ml, sdkv1 +ms.author: xunwan +author: xunwan +ms.reviewer: nibaccam +ms.date: 05/24/2022 + +# Customer intent: As an experienced data scientist, I need to package my data into a consumable and reusable object to train my machine learning models. + +--- + +# Create data assets + +> [!div class="op_single_selector" title1="Select the version of Azure Machine Learning SDK you are using:"] +> * [v1](./v1/how-to-create-register-datasets.md) +> * [v2 (current version)](how-to-create-register-datasets.md) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] +[!INCLUDE [CLI v2](../../includes/machine-learning-CLI-v2.md)] + +In this article, you learn how to create a Data asset in Azure Machine Learning. By creating a Data asset, you create a *reference* to the data source location, along with a copy of its metadata. Because the data remains in its existing location, you incur no extra storage cost, and don't risk the integrity of your data sources. You can create Data from Datastores, Azure Storage, public URLs, and local files. + +The benefits of creating Data assets are: + +* You can **share and reuse data** with other members of the team such that they do not need to remember file locations. + +* You can **seamlessly access data** during model training (on any supported compute type) without worrying about connection strings or data paths. + +* You can **version** the data. + + +## Prerequisites + +To create and work with Data assets, you need: + +* An Azure subscription. If you don't have one, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/). + +* An [Azure Machine Learning workspace](how-to-manage-workspace.md). + +* The [Azure Machine Learning CLI/SDK installed](how-to-configure-cli.md) and MLTable package installed (`pip install mltable`). + +## Supported paths + +When you create a data asset in Azure Machine Learning, you'll need to specify a `path` parameter that points to its location. Below is a table that shows the different data locations supported in Azure Machine Learning and examples for the `path` parameter: + + +|Location | Examples | +|---------|---------| +|A path on your local computer | `./home/username/data/my_data` | +|A path on a public http(s) server | `https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv` | +|A path on Azure Storage | `https://.blob.core.windows.net//path`
                  `abfss://@.dfs.core.windows.net/` | +|A path on a datastore | `azureml://datastores//paths/` | + + +> [!NOTE] +> When you create a data asset from a local path, it will be automatically uploaded to the default Azure Machine Learning datastore in the cloud. + +## Create a `uri_folder` data asset + +Below shows you how to create a *folder* as an asset: + +# [CLI](#tab/CLI) + +Create a `YAML` file (`.yml`): + +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json + +# Supported paths include: +# local: ./ +# blob: https://.blob.core.windows.net// +# ADLS gen2: abfss://@.dfs.core.windows.net// +# Datastore: azureml://datastores//paths/ +type: uri_folder +name: +description: +path: +``` + +Next, create the data asset using the CLI: + +```azurecli +az ml data create -f .yml +``` + +# [Python-SDK](#tab/Python-SDK) + +You can create a data asset in Azure Machine Learning using the following Python Code: + +```python +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes + +# Supported paths include: +# local: './' +# blob: 'https://.blob.core.windows.net//' +# ADLS gen2: 'abfss://@.dfs.core.windows.net//' +# Datastore: 'azureml://datastores//paths/' + +my_path = '' + +my_data = Data( + path=my_path, + type=AssetTypes.URI_FOLDER, + description="", + name="", + version='' +) + +ml_client.data.create_or_update(my_data) +``` + +--- + +## Create a `uri_file` data asset + +Below shows you how to create a *specific file* as a data asset: + +# [CLI](#tab/CLI) + +Sample `YAML` file `.yml` for data in local path is as below: + +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json + +# Supported paths include: +# local: .// +# blob: https://.blob.core.windows.net/// +# ADLS gen2: abfss://@.dfs.core.windows.net// +# Datastore: azureml://datastores//paths// + +type: uri_file +name: +description: +path: +``` + +```cli +> az ml data create -f .yml +``` + +# [Python-SDK](#tab/Python-SDK) +```python +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes + +# Supported paths include: +# local: './/' +# blob: 'https://.blob.core.windows.net///' +# ADLS gen2: 'abfss://@.dfs.core.windows.net//' +# Datastore: 'azureml://datastores//paths//' +my_path = '' + +my_data = Data( + path=my_path, + type=AssetTypes.URI_FILE, + description="", + name="", + version="" +) + +ml_client.data.create_or_update(my_data) +``` + +--- + +## Create a `mltable` data asset + +`mltable` is a way to abstract the schema definition for tabular data to make it easier to share data assets (an overview can be found in [MLTable](concept-data.md#mltable)). + +In this section, we show you how to create a data asset when the type is an `mltable`. + +### The MLTable file + +The MLTable file is a file that provides the specification of the data's schema so that the `mltable` *engine* can materialize the data into an in-memory object (Pandas/Dask/Spark). An *example* MLTable file is provided below: + +```yml +type: mltable + +paths: + - pattern: ./*.txt +transformations: + - read_delimited: + delimiter: , + encoding: ascii + header: all_files_same_headers +``` +> [!IMPORTANT] +> We recommend co-locating the MLTable file with the underlying data in storage. For example: +> +> ```Text +> ├── my_data +> │ ├── MLTable +> │ ├── file_1.txt +> . +> . +> . +> │ ├── file_n.txt +> ``` +> Co-locating the MLTable with the data ensures a **self-contained *artifact*** where all that is needed is stored in that one folder (`my_data`); regardless of whether that folder is stored on your local drive or in your cloud store or on a public http server. You should **not** specify *absolute paths* in the MLTable file. + +In your Python code, you materialize the MLTable artifact into a Pandas dataframe using: + +```python +import mltable + +tbl = mltable.load(uri="./my_data") +df = tbl.to_pandas_dataframe() +``` + +The `uri` parameter in `mltable.load()` should be a valid path to a local or cloud **folder** which contains a valid MLTable file. + +> [!NOTE] +> You will need the `mltable` library installed in your Environment (`pip install mltable`). + +Below shows you how to create an `mltable` data asset. The `path` can be any of the supported path formats outlined above. + + +# [CLI](#tab/CLI) + +Create a `YAML` file (`.yml`): + +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json + +# path must point to **folder** containing MLTable artifact (MLTable file + data +# Supported paths include: +# local: ./ +# blob: https://.blob.core.windows.net// +# ADLS gen2: abfss://@.dfs.core.windows.net// +# Datastore: azureml://datastores//paths/ + +type: mltable +name: +description: +path: +``` + +> [!NOTE] +> The path points to the **folder** containing the MLTable artifact. + +Next, create the data asset using the CLI: + +```azurecli +az ml data create -f .yml +``` + +# [Python-SDK](#tab/Python-SDK) + +You can create a data asset in Azure Machine Learning using the following Python Code: + +```python +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes + +# my_path must point to folder containing MLTable artifact (MLTable file + data +# Supported paths include: +# local: './' +# blob: 'https://.blob.core.windows.net//' +# ADLS gen2: 'abfss://@.dfs.core.windows.net//' +# Datastore: 'azureml://datastores//paths/' + +my_path = '' + +my_data = Data( + path=my_path, + type=AssetTypes.MLTABLE, + description="", + name="", + version='' +) + +ml_client.data.create_or_update(my_data) +``` + +> [!NOTE] +> The path points to the **folder** containing the MLTable artifact. + +--- + +## Next steps + +- [Read data in a job](how-to-read-write-data-v2.md#read-data-in-a-job) \ No newline at end of file diff --git a/articles/machine-learning/how-to-create-register-datasets.md b/articles/machine-learning/how-to-create-register-datasets.md deleted file mode 100644 index 1a4cb5e28bd13..0000000000000 --- a/articles/machine-learning/how-to-create-register-datasets.md +++ /dev/null @@ -1,362 +0,0 @@ ---- -title: Create Azure Machine Learning data assets -titleSuffix: Azure Machine Learning -description: Learn how to create Azure Machine Learning data assets to access your data for machine learning experiment runs. -services: machine-learning -ms.service: machine-learning -ms.subservice: mldata -ms.topic: how-to -ms.custom: contperf-fy21q1, data4ml, sdkv1, event-tier1-build-2022 -ms.author: yogipandey -author: ynpandey -ms.reviewer: nibaccam -ms.date: 05/11/2022 -#Customer intent: As an experienced data scientist, I need to package my data into a consumable and reusable object to train my machine learning models. ---- - -# Create Azure Machine Learning data assets - -> [!div class="op_single_selector" title1="Select the version of Azure Machine Learning SDK you are using:"] -> * [v1](./v1/how-to-create-register-datasets.md) -> * [v2 (current version)](how-to-create-register-datasets.md) - -[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] - -In this article, you learn how to create Azure Machine Learning Data to access data for your local or remote experiments with the Azure Machine Learning CLI/SDK. To understand where Data fits in Azure Machine Learning's overall data access workflow, see the [Work with Data](concept-data.md) article. - -By creating a Data asset, you create a reference to the data source location, along with a copy of its metadata. Because the data remains in its existing location, you incur no extra storage cost, and don't risk the integrity of your data sources. Also Data assets are lazily evaluated, which aids in workflow performance speeds. You can create Data from Datastores, public URLs, and local files. - -With Azure Machine Learning Data assets, you can: - -* Keep a single copy of data in your storage, referenced by Data. - -* Seamlessly access data during model training without worrying about connection strings or data paths. - -* Share data and collaborate with other users. - -## Prerequisites - -To create and work with Data assets, you need: - -* An Azure subscription. If you don't have one, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/). - -* An [Azure Machine Learning workspace](how-to-manage-workspace.md). - -* The [Azure Machine Learning CLI/SDK installed](how-to-configure-cli.md) and MLTable package installed. - - * Create an [Azure Machine Learning compute instance](how-to-create-manage-compute-instance.md), which is a fully configured and managed development environment that includes integrated notebooks and the SDK already installed. - - **OR** - - * Work on your own Jupyter notebook and install the CLI/SDK and required packages. - -> [!IMPORTANT] -> While the package may work on older versions of Linux distros, we do not recommend using a distro that is out of mainstream support. Distros that are out of mainstream support may have security vulnerabilities, as they do not receive the latest updates. We recommend using the latest supported version of your distro that is compatible with . - -## Compute size guidance - -When creating a Data asset, review your compute processing power and the size of your data in memory. The size of your data in storage isn't the same as the size of data in a dataframe. For example, data in CSV files can expand up to 10x in a dataframe, so a 1-GB CSV file can become 10 GB in a dataframe. - -If your data is compressed, it can expand further; 20 GB of relatively sparse data stored in compressed parquet format can expand to ~400 GB in memory. - -[Learn more about optimizing data processing in Azure Machine Learning](concept-optimize-data-processing.md). - -## Data types - -Azure Machine Learning allows you to work with different types of data. Your data can be local or in the cloud (from a registered Azure ML Datastore, a common Azure Storage URL or a public data url). In this article, you'll learn about using the Python SDK v2 to work with _URIs_ and _Tables_. URIs reference a location either local to your development environment or in the cloud. Tables are a tabular data abstraction. - -For most scenarios, you'll use URIs (`uri_folder` and `uri_file`). A URI references a location in storage that can be easily mapped to the filesystem of a compute node when you run a job. The data is accessed by either mounting or downloading the storage to the node. - -When using tables, you'll use `mltable`. It's an abstraction for tabular data that is used for AutoML jobs, parallel jobs, and some advanced scenarios. If you're just starting to use Azure Machine Learning, and aren't using AutoML, we strongly encourage you to begin with URIs. - -If you're creating Azure ML Data asset from an existing Datastore: - -1. Verify that you have `contributor` or `owner` access to the underlying storage service of your registered Azure Machine Learning datastore. [Check your storage account permissions in the Azure portal](/azure/role-based-access-control/check-access). - -1. Create the data asset by referencing paths in the datastore. You can create a Data asset from multiple paths in multiple datastores. There's no hard limit on the number of files or data size that you can create a data asset from. - -> [!NOTE] -> For each data path, a few requests will be sent to the storage service to check whether it points to a file or a folder. This overhead may lead to degraded performance or failure. A Data asset referencing one folder with 1000 files inside is considered referencing one data path. We recommend creating Data asset referencing less than 100 paths in datastores for optimal performance. - -> [!TIP] -> You can create Data asset with identity-based data access. If you don't provide any credentials, we will use your identity by defaut. - - -> [!TIP] -> If you have dataset assets created using the SDK v1, you can still use those with SDK v2. For more information, see the [Consuming V1 Dataset Assets in V2](#consuming-v1-dataset-assets-in-v2) section. - - - -## URIs - -The code snippets in this section cover the following scenarios: -* Registering data as an asset in Azure Machine Learning -* Reading registered data assets from Azure Machine Learning in a job - -These snippets use `uri_file` and `uri_folder`. - -- `uri_file` is a type that refers to a specific file. For example, `'https://.blob.core.windows.net//path/file.csv'`. -- `uri_folder` is a type that refers to a specific folder. For example, `'https://.blob.core.windows.net//path'`. - -> [!TIP] -> We recommend using an argument parser to pass folder information into _data-plane_ code. By data-plane code, we mean your data processing and/or training code that you run in the cloud. The code that runs in your development environment and submits code to the data-plane is _control-plane_ code. -> -> Data-plane code is typically a Python script, but can be any programming language. Passing the folder as part of job submission allows you to easily adjust the path from training locally using local data, to training in the cloud. -> If you wanted to pass in just an individual file rather than the entire folder you can use the `uri_file` type. - -For a complete example, see the [working_with_uris.ipynb notebook](https://github.com/azure/azureml-previews/sdk/docs/working_with_uris.ipynb). - - -### Register data as URI Folder type Data - -```python -from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes - -# select one from: -my_path = 'abfss://@.dfs.core.windows.net/' # adls gen2 -my_path = 'https://.blob.core.windows.net//path' # blob - -my_data = Data( - path=my_path, - type=AssetTypes.URI_FOLDER, - description="description here", - name="a_name", - version='1' -) - -ml_client.data.create_or_update(my_data) -``` - -You can also use CLI to register a URI Folder type Data as below example. - -```azurecli -az ml data create -f .yml -``` - -Sample `YAML` file `.yml` for local path is as below: - -```yaml -$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json -name: uri_folder_my_data -description: Local data asset will be created as URI folder type Data in Azure ML. -path: path -``` - -Sample `YAML` file `.yml` for data folder in an existing Azure ML Datastore is as below: -```yaml -$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json -name: uri_folder_my_data -description: Datastore data asset will be created as URI folder type Data in Azure ML. -type: uri_folder -path: azureml://datastores/workspaceblobstore/paths/example-data/ -``` - -Sample `YAML` file `.yml` for data folder in storage url is as below: -```yaml -$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json -name: cloud_file_wasbs_example -description: Data asset created from folder in cloud using wasbs URL. -type: uri_folder -path: wasbs://mainstorage9c05dabf5c924.blob.core.windows.net/azureml-blobstore-54887b46-3cb0-485b-bb15-62e7b5578ee6/example-data/ -``` - - -### Consume registered URI Folder data assets in job - -```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes - -registered_data_asset = ml_client.data.get(name='titanic', version='1') - -my_job_inputs = { - "input_data": JobInput( - type=AssetTypes.URI_FOLDER, - path=registered_data_asset.id - ) -} - -job = CommandJob( - code="./src", - command='python read_data_asset.py --input_folder ${{inputs.input_data}}', - inputs=my_job_inputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" -) - -#submit the command job -returned_job = ml_client.create_or_update(job) -#get a URL for the status of the job -returned_job.services["Studio"].endpoint -``` - -### Register data as URI File type Data - -```python -from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes - -# select one from: -my_file_path = '/' # local -my_file_path = 'abfss://@.dfs.core.windows.net//' # adls gen2 -my_file_path = 'https://.blob.core.windows.net///' # blob - -my_data = Data( - path=my_file_path, - type=AssetTypes.URI_FILE, - description="description here", - name="a_name", - version='1' -) - -ml_client.data.create_or_update(my_data) -``` - -You can also use CLI to register a URI File type Data as below example. - -```cli -> az ml data create -f .yml -``` -Sample `YAML` file `.yml` for data in local path is as below: -```yaml -$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json -name: uri_file_my_data -description: Local data asset will be created as URI folder type Data in Azure ML. -path: ./paths/example-data.csv -``` - -Sample `YAML` file `.yml` for data in an existing Azure ML Datastore is as below: -```yaml -$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json -name: uri_file_my_data -description: Datastore data asset will be created as URI folder type Data in Azure ML. -type: uri_file -path: azureml://datastores/workspaceblobstore/paths/example-data.csv -``` - -Sample `YAML` file `.yml` for data in storage url is as below: -```yaml -$schema: https://azuremlschemas.azureedge.net/latest/data.schema.json -name: cloud_file_wasbs_example -description: Data asset created from folder in cloud using wasbs URL. -type: uri_file -path: wasbs://mainstorage9c05dabf5c924.blob.core.windows.net/azureml-blobstore-54887b46-3cb0-485b-bb15-62e7b5578ee6/paths/example-data.csv -``` - - -## MLTable - -### Register data as MLTable type Data assets -Registering a `mltable` as an asset in Azure Machine Learning -You can register a `mltable` as a data asset in Azure Machine Learning. The benefits of registering data are: - -Easy to share with other members of the team (no need to remember file locations) -Versioning of the metadata (location, description, etc.) -Below we show an example of versioning the sample data in this repo. The data is uploaded to cloud storage and registered as an asset. - -```python -from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes - -my_data = Data( - path="./sample_data", - type=AssetTypes.MLTABLE, - description="Titanic Data", - name="titanic-mltable", - version='1' -) - -ml_client.data.create_or_update(my_data) -``` - -> [!TIP] -> Whilst the above example shows a local file. Remember that path supports cloud storage (https, abfss, wasbs protocols). Therefore, if you want to register data in a > cloud location just specify the path with any of the supported protocols. - -You can also use CLI and following YAML that describes an MLTable to register MLTable Data. -```cli -> az ml data create -f .yml -``` -```yaml -paths: - - file: ./titanic.csv -transformations: - - read_delimited: - delimiter: ',' - encoding: 'ascii' - empty_as_string: false - header: from_first_file -``` - -The contents of the MLTable file specify the underlying data location (here a local path) and also the transforms to perform on the underlying data before materializing into a pandas/spark/dask data frame. The important part here's that the MLTable-artifact doesn't have any absolute paths, making it *self-contained*. All the information stored in one folder; regardless of whether that folder is stored on your local drive or in your cloud drive or on a public http server. - -To consume the data in a job or interactive session, use `mltable`: - -```python -import mltable - -tbl = mltable.load("./sample_data") -df = tbl.to_pandas_dataframe() -``` - -For a full example of using an MLTable, see the [Working with MLTable notebook](https://github.com/Azure/azureml-examples/blob/samuel100/mltable/sdk/assets/data/working_with_mltable.ipynb). - - - - - -## Consuming V1 dataset assets in V2 - -> [!NOTE] -> While full backward compatibility is provided, if your intention with your V1 `FileDataset` assets was to have a single path to a file or folder with no loading transforms (sample, take, filter, etc.), then we recommend that you re-create them as a `uri_file`/`uri_folder` using the v2 CLI: -> -> ```cli -> az ml data create --file my-data-asset.yaml -> ``` - -Registered v1 `FileDataset` and `TabularDataset` data assets can be consumed in an v2 job using `mltable`. To use the v1 assets, add the following definition in the `inputs` section of your job yaml: - -```yaml -inputs: - my_v1_dataset: - type: mltable - path: azureml:myv1ds:1 - mode: eval_mount -``` - -The following example shows how to do this using the v2 SDK: - -```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes - -registered_v1_data_asset = ml_client.data.get(name='', version='') - -my_job_inputs = { - "input_data": JobInput( - type=AssetTypes.MLTABLE, - path=registered_v1_data_asset.id, - mode="eval_mount" - ) -} - -job = CommandJob( - code="./src", #local path where the code is stored - command='python train.py --input_data ${{inputs.input_data}}', - inputs=my_job_inputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" -) - -#submit the command job -returned_job = ml_client.jobs.create_or_update(job) -#get a URL for the status of the job -returned_job.services["Studio"].endpoint -``` - - -## Next steps - -* [Install and set up Python SDK v2 (preview)](https://aka.ms/sdk-v2-install) -* [Train models with the Python SDK v2 (preview)](how-to-train-sdk.md) -* [Tutorial: Create production ML pipelines with Python SDK v2 (preview)](tutorial-pipeline-python-sdk.md) diff --git a/articles/machine-learning/how-to-datastore.md b/articles/machine-learning/how-to-datastore.md new file mode 100644 index 0000000000000..4605683bcfe24 --- /dev/null +++ b/articles/machine-learning/how-to-datastore.md @@ -0,0 +1,420 @@ +--- +title: Use datastores +titleSuffix: Azure Machine Learning +description: Learn how to use datastores to connect to Azure storage services during training with Azure Machine Learning. +services: machine-learning +ms.service: machine-learning +ms.subservice: mldata +ms.topic: how-to +ms.author: yogipandey +author: ynpandey +ms.reviewer: nibaccam +ms.date: 01/28/2022 +ms.custom: contperf-fy21q1, devx-track-python, data4ml + + +# Customer intent: As an experienced Python developer, I need to make my data in Azure storage available to my remote compute to train my machine learning models. +--- + +# Create datastores + +In this article, learn how to connect to data storage services on Azure with Azure Machine Learning datastores. + +## Prerequisites + +- An Azure subscription. If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/). + +- The [Azure Machine Learning SDK for Python](/python/api/overview/azure/ml/intro). + +- An Azure Machine Learning workspace. + +> [!NOTE] +> Azure Machine Learning datastores do **not** create the underlying storage accounts, rather they link an **existing** storage account for use in Azure Machine Learning. It is not a requirement to use Azure Machine Learning datastores - you can use storage URIs directly assuming you have access to the underlying data. + + +## Create an Azure Blob datastore + +# [CLI: Identity-based access](#tab/cli-identity-based-access) +Create the following YAML file (updating the values): + +```yaml +# my_blob_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json +name: my_blob_ds # add name of your datastore here +type: azure_blob +description: here is a description # add a description of your datastore here +account_name: my_account_name # add storage account name here +container_name: my_container_name # add storage container name here +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_blob_datastore.yml +``` + +# [CLI: Account key](#tab/cli-account-key) +Create the following YAML file (updating the values): + +```yaml +# my_blob_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json +name: blob_example +type: azure_blob +description: Datastore pointing to a blob container. +account_name: mytestblobstore +container_name: data-container +credentials: + account_key: XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_blob_datastore.yml +``` + +# [CLI: SAS](#tab/cli-sas) +Create the following YAML file (updating the values): + +```yaml +# my_blob_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureBlob.schema.json +name: blob_sas_example +type: azure_blob +description: Datastore pointing to a blob container using SAS token. +account_name: mytestblobstore +container_name: data-container +credentials: + sas_token: ?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_blob_datastore.yml +``` + +# [Python SDK: Identity-based access](#tab/sdk-identity-based-access) + +```python +from azure.ai.ml.entities import AzureBlobDatastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureBlobDatastore( + name="", + description="", + account_name="", + container_name="" +) + +ml_client.create_or_update(store) +``` + +# [Python SDK: Account key](#tab/sdk-account-key) + +```python +from azure.ai.ml.entities import AzureBlobDatastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureBlobDatastore( + name="blob-protocol-example", + description="Datastore pointing to a blob container using wasbs protocol.", + account_name="mytestblobstore", + container_name="data-container", + protocol="wasbs", + credentials={ + "account_key": "XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX" + }, +) + +ml_client.create_or_update(store) +``` + +# [Python SDK: SAS](#tab/sdk-SAS) + +```python +from azure.ai.ml.entities import AzureBlobDatastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureBlobDatastore( + name="blob-sas-example", + description="Datastore pointing to a blob container using SAS token.", + account_name="mytestblobstore", + container_name="data-container", + credentials={ + "sas_token": "?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX" + }, +) + +ml_client.create_or_update(store) +``` +--- + +## Create an Azure Data Lake Gen2 datastore + +# [CLI: Identity-based access](#tab/cli-adls-identity-based-access) +Create the following YAML file (updating the values): + +```yaml +# my_adls_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen2.schema.json +name: adls_gen2_credless_example +type: azure_data_lake_gen2 +description: Credential-less datastore pointing to an Azure Data Lake Storage Gen2. +account_name: mytestdatalakegen2 +filesystem: my-gen2-container +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_adls_datastore.yml +``` + +# [CLI: Service principal](#tab/cli-adls-sp) +Create the following YAML file (updating the values): + +```yaml +# my_adls_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen2.schema.json +name: adls_gen2_example +type: azure_data_lake_gen2 +description: Datastore pointing to an Azure Data Lake Storage Gen2. +account_name: mytestdatalakegen2 +filesystem: my-gen2-container +credentials: + tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + client_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + client_secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_adls_datastore.yml +``` + +# [Python SDK: Identity-based access](#tab/sdk-adls-identity-access) + +```python +from azure.ai.ml.entities import AzureDataLakeGen2Datastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureDataLakeGen2Datastore( + name="", + description="", + account_name="", + file_system="" +) + +ml_client.create_or_update(store) +``` + +# [Python SDK: Service principal](#tab/sdk-adls-sp) + +```python +from azure.ai.ml.entities import AzureDataLakeGen2Datastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureDataLakeGen2Datastore( + name="adls-gen2-example", + description="Datastore pointing to an Azure Data Lake Storage Gen2.", + account_name="mytestdatalakegen2", + filesystem="my-gen2-container", + credentials={ + "tenant_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "client_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "client_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + }, +) + +ml_client.create_or_update(store) +``` +--- + +## Create an Azure Files datastore + +# [CLI: Account key](#tab/cli-azfiles-account-key) +Create the following YAML file (updating the values): + +```yaml +# my_files_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureFile.schema.json +name: file_example +type: azure_file +description: Datastore pointing to an Azure File Share. +account_name: mytestfilestore +file_share_name: my-share +credentials: + account_key: XxXxXxXXXXXXXxXxXxxXxxXXXXXXXXxXxxXXxXXXXXXXxxxXxXXxXXXXXxXXxXXXxXxXxxxXXxXXxXXXXXxXxxXX +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_files_datastore.yml +``` + +# [CLI: SAS](#tab/cli-azfiles-sas) +Create the following YAML file (updating the values): + +```yaml +# my_files_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureFile.schema.json +name: file_sas_example +type: azure_file +description: Datastore pointing to an Azure File Share using SAS token. +account_name: mytestfilestore +file_share_name: my-share +credentials: + sas_token: ?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_files_datastore.yml +``` + +# [Python SDK: Account key](#tab/sdk-azfiles-accountkey) + +```python +from azure.ai.ml.entities import AzureFileDatastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureFileDatastore( + name="file-example", + description="Datastore pointing to an Azure File Share.", + account_name="mytestfilestore", + file_share_name="my-share", + credentials={ + "account_key": "XXXxxxXXXxXXXXxxXXXXXxXXXXXxXxxXxXXXxXXXxXXxxxXXxxXXXxXxXXXxxXxxXXXXxxxxxXXxxxxxxXXXxXXX" + }, +) + +ml_client.create_or_update(store) +``` + +# [Python SDK: SAS](#tab/sdk-azfiles-sas) + +```python +from azure.ai.ml.entities import AzureFileDatastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureFileDatastore( + name="file-sas-example", + description="Datastore pointing to an Azure File Share using SAS token.", + account_name="mytestfilestore", + file_share_name="my-share", + credentials={ + "sas_token": "?xx=XXXX-XX-XX&xx=xxxx&xxx=xxx&xx=xxxxxxxxxxx&xx=XXXX-XX-XXXXX:XX:XXX&xx=XXXX-XX-XXXXX:XX:XXX&xxx=xxxxx&xxx=XXxXXXxxxxxXXXXXXXxXxxxXXXXXxxXXXXXxXXXXxXXXxXXxXX" + }, +) + +ml_client.create_or_update(store) +``` +--- + +## Create an Azure Data Lake Gen1 datastore + +# [CLI: Identity-based access](#tab/cli-adlsgen1-identity-based-access) +Create the following YAML file (updating the values): + +```yaml +# my_adls_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen1.schema.json +name: alds_gen1_credless_example +type: azure_data_lake_gen1 +description: Credential-less datastore pointing to an Azure Data Lake Storage Gen1. +store_name: mytestdatalakegen1 +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_adls_datastore.yml +``` + +# [CLI: Service principal](#tab/cli-adlsgen1-sp) +Create the following YAML file (updating the values): + +```yaml +# my_adls_datastore.yml +$schema: https://azuremlschemas.azureedge.net/latest/azureDataLakeGen1.schema.json +name: adls_gen1_example +type: azure_data_lake_gen1 +description: Datastore pointing to an Azure Data Lake Storage Gen1. +store_name: mytestdatalakegen1 +credentials: + tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + client_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + client_secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +``` + +Create the Azure Machine Learning datastore in the CLI: + +```azurecli +az ml datastore create --file my_adls_datastore.yml +``` + +# [Python SDK: Identity-based access](#tab/sdk-adlsgen1-identity-access) + +```python +from azure.ai.ml.entities import AzureDataLakeGen1Datastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureDataLakeGen1Datastore( + name="", + store_name="", + description="", +) + +ml_client.create_or_update(store) +``` + +# [Python SDK: Service principal](#tab/sdk-adlsgen1-sp) + +```python +from azure.ai.ml.entities import AzureDataLakeGen1Datastore +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() + +store = AzureDataLakeGen1Datastore( + name="adls-gen1-example", + description="Datastore pointing to an Azure Data Lake Storage Gen1.", + store_name="mytestdatalakegen1", + credentials={ + "tenant_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "client_id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "client_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + }, +) + +ml_client.create_or_update(store) +``` + +--- + +## Next steps + +- [Read data in a job](how-to-read-write-data-v2.md#read-data-in-a-job) +- [Create data assets](how-to-create-register-data-assets.md#create-data-assets) +- [Data administration](how-to-administrate-data-authentication.md#data-administration) \ No newline at end of file diff --git a/articles/machine-learning/how-to-deploy-local.md b/articles/machine-learning/how-to-deploy-local.md index 15b615c485e0e..014e5764f281c 100644 --- a/articles/machine-learning/how-to-deploy-local.md +++ b/articles/machine-learning/how-to-deploy-local.md @@ -245,7 +245,7 @@ model = Model.register(model_path="sklearn_regression_model.pkl", You can then find your newly registered model on the Azure Machine Learning **Model** tab: -:::image type="content" source="media/how-to-deploy-local/registered-model.png" alt-text="Screenshot of Azure Machine Learning Model tab, showing an uploaded model."::: +:::image type="content" source="media/how-to-deploy-local/registered-model.png" alt-text="Screenshot of Azure Machine Learning Model tab, showing an uploaded model." lightbox="media/how-to-deploy-local/registered-model.png"::: For more information on uploading and updating models and environments, see [Register model and deploy locally with advanced usages](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb). diff --git a/articles/machine-learning/how-to-deploy-managed-online-endpoint-sdk-v2.md b/articles/machine-learning/how-to-deploy-managed-online-endpoint-sdk-v2.md new file mode 100644 index 0000000000000..d9cae8b860be1 --- /dev/null +++ b/articles/machine-learning/how-to-deploy-managed-online-endpoint-sdk-v2.md @@ -0,0 +1,313 @@ +--- +title: Deploy machine learning models to managed online endpoint using Python SDK v2 (preview). +titleSuffix: Azure Machine Learning +description: Learn to deploy your machine learning model to Azure using Python SDK v2 (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: mlops +ms.author: ssambare +ms.reviewer: larryfr +author: shivanissambare +ms.date: 05/25/2022 +ms.topic: how-to +ms.custom: how-to, devplatv2, sdkv2, deployment +--- + +# Deploy and score a machine learning model with managed online endpoint using Python SDK v2 (preview) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] + +> [!IMPORTANT] +> SDK v2 is currently in public preview. +> The preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +In this article, you learn how to deploy your machine learning model to managed online endpoint and get predictions. You'll begin by deploying a model on your local machine to debug any errors, and then you'll deploy and test it in Azure. + +## Prerequisites + +* If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/) today. +* The [Azure Machine Learning SDK v2 for Python](/python/api/overview/azure/ml/installv2). +* You must have an Azure resource group, and you (or the service principal you use) must have Contributor access to it. +* You must have an Azure Machine Learning workspace. +* To deploy locally, you must install [Docker Engine](https://docs.docker.com/engine/) on your local computer. We highly recommend this option, so it's easier to debug issues. + +### Clone examples repository + +To run the training examples, first clone the examples repository and change into the `sdk` directory: + +```bash +git clone --depth 1 https://github.com/Azure/azureml-examples +cd azureml-examples/sdk +``` + +> [!TIP] +> Use `--depth 1` to clone only the latest commit to the repository, which reduces time to complete the operation. + +## Connect to Azure Machine Learning workspace + +The [workspace](concept-workspace.md) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section, we'll connect to the workspace in which you'll perform deployment tasks. + +1. Import the required libraries: + + ```python + # import required libraries + from azure.ai.ml import MLClient + from azure.ai.ml.entities import ( + ManagedOnlineEndpoint, + ManagedOnlineDeployment, + Model, + Environment, + CodeConfiguration, + ) + from azure.identity import DefaultAzureCredential + ``` + +1. Configure workspace details and get a handle to the workspace: + + To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. This example uses the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential). + + ```python + # enter details of your AML workspace + subscription_id = "" + resource_group = "" + workspace = "" + ``` + + ```python + # get a handle to the workspace + ml_client = MLClient( + DefaultAzureCredential(), subscription_id, resource_group, workspace + ) + ``` + +## Create local endpoint and deployment + +> [!NOTE] +> To deploy locally, [Docker Engine](https://docs.docker.com/engine/install/) must be installed. +> Docker Engine must be running. Docker Engine typically starts when the computer starts. If it doesn't, you can [troubleshoot Docker Engine](https://docs.docker.com/config/daemon/#start-the-daemon-manually). + +1. Create local endpoint: + + The goal of a local endpoint deployment is to validate and debug your code and configuration before you deploy to Azure. Local deployment has the following limitations: + + * Local endpoints don't support traffic rules, authentication, or probe settings. + * Local endpoints support only one deployment per endpoint. + + ```python + # Creating a local endpoint + import datetime + + local_endpoint_name = "local-" + datetime.datetime.now().strftime("%m%d%H%M%f") + + # create an online endpoint + endpoint = ManagedOnlineEndpoint( + name=local_endpoint_name, description="this is a sample local endpoint" + ) + ``` + + ```python + ml_client.online_endpoints.begin_create_or_update(endpoint, local=True) + ``` + +1. Create local deployment: + + The example contains all the files needed to deploy a model on an online endpoint. To deploy a model, you must have: + + * Model files (or the name and version of a model that's already registered in your workspace). In the example, we have a scikit-learn model that does regression. + * The code that's required to score the model. In this case, we have a score.py file. + * An environment in which your model runs. As you'll see, the environment might be a Docker image with Conda dependencies, or it might be a Dockerfile. + * Settings to specify the instance type and scaling capacity. + + **Key aspects of deployment** + * `name` - Name of the deployment. + * `endpoint_name` - Name of the endpoint to create the deployment under. + * `model` - The model to use for the deployment. This value can be either a reference to an existing versioned model in the workspace or an inline model specification. + * `environment` - The environment to use for the deployment. This value can be either a reference to an existing versioned environment in the workspace or an inline environment specification. + * `code_configuration` - the configuration for the source code and scoring script + * `path`- Path to the source code directory for scoring the model + * `scoring_script` - Relative path to the scoring file in the source code directory + * `instance_type` - The VM size to use for the deployment. For the list of supported sizes, see [Managed online endpoints SKU list](reference-managed-online-endpoints-vm-sku-list.md). + * `instance_count` - The number of instances to use for the deployment + + ```python + model = Model(path="../model-1/model/sklearn_regression_model.pkl") + env = Environment( + conda_file="../model-1/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", + ) + + blue_deployment = ManagedOnlineDeployment( + name="blue", + endpoint_name=local_endpoint_name, + model=model, + environment=env, + code_configuration=CodeConfiguration( + code="../model-1/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, + ) + ``` + + ```python + ml_client.online_deployments.begin_create_or_update( + deployment=blue_deployment, local=True + ) + ``` + +## Verify the local deployment succeeded + +1. Check the status to see whether the model was deployed without error: + + ```python + ml_client.online_endpoints.get(name=local_endpoint_name, local=True) + ``` + +1. Get logs: + + ```python + ml_client.online_deployments.get_logs( + name="blue", endpoint_name=local_endpoint_name, local=True, lines=50 + ) + ``` + +## Invoke the local endpoint + +Invoke the endpoint to score the model by using the convenience command invoke and passing query parameters that are stored in a JSON file + +```python +ml_client.online_endpoints.invoke( + endpoint_name=local_endpoint_name, + request_file="../model-1/sample-request.json", + local=True, +) +``` + +## Deploy your online endpoint to Azure + +Next, deploy your online endpoint to Azure. + +1. Configure online endpoint: + + > [!TIP] + > * `endpoint_name`: The name of the endpoint. It must be unique in the Azure region. For more information on the naming rules, see [managed online endpoint limits](how-to-manage-quotas.md#azure-machine-learning-managed-online-endpoints). + > * `auth_mode` : Use `key` for key-based authentication. Use `aml_token` for Azure Machine Learning token-based authentication. A `key` doesn't expire, but `aml_token` does expire. For more information on authenticating, see [Authenticate to an online endpoint](how-to-authenticate-online-endpoint.md). + > * Optionally, you can add description, tags to your endpoint. + + ```python + # Creating a unique endpoint name with current datetime to avoid conflicts + import datetime + + online_endpoint_name = "endpoint-" + datetime.datetime.now().strftime("%m%d%H%M%f") + + # create an online endpoint + endpoint = ManagedOnlineEndpoint( + name=online_endpoint_name, + description="this is a sample online endpoint", + auth_mode="key", + tags={"foo": "bar"}, + ) + ``` + +1. Create the endpoint: + + Using the `MLClient` created earlier, we'll now create the Endpoint in the workspace. This command will start the endpoint creation and return a confirmation response while the endpoint creation continues. + + ```python + ml_client.begin_create_or_update(endpoint) + ``` + +1. Configure online deployment: + + A deployment is a set of resources required for hosting the model that does the actual inferencing. We'll create a deployment for our endpoint using the `ManagedOnlineDeployment` class. + + ```python + model = Model(path="../model-1/model/sklearn_regression_model.pkl") + env = Environment( + conda_file="../model-1/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", + ) + + blue_deployment = ManagedOnlineDeployment( + name="blue", + endpoint_name=online_endpoint_name, + model=model, + environment=env, + code_configuration=CodeConfiguration( + code="../model-1/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, + ) + ``` + +1. Create the deployment: + + Using the `MLClient` created earlier, we'll now create the deployment in the workspace. This command will start the deployment creation and return a confirmation response while the deployment creation continues. + + ```python + ml_client.begin_create_or_update(blue_deployment) + ``` + + ```python + # blue deployment takes 100 traffic + endpoint.traffic = {"blue": 100} + ml_client.begin_create_or_update(endpoint) + ``` + +## Test the endpoint with sample data + +Using the `MLClient` created earlier, we'll get a handle to the endpoint. The endpoint can be invoked using the `invoke` command with the following parameters: + +* `endpoint_name` - Name of the endpoint +* `request_file` - File with request data +* `deployment_name` - Name of the specific deployment to test in an endpoint + +We'll send a sample request using a [json](https://github.com/Azure/azureml-examples/blob/main/sdk/endpoints/online/model-1/sample-request.json) file. + +```python +# test the blue deployment with some sample data +ml_client.online_endpoints.invoke( + endpoint_name=online_endpoint_name, + deployment_name="blue", + request_file="../model-1/sample-request.json", +) +``` + +## Managing endpoints and deployments + +1. Get details of the endpoint: + + ```python + # Get the details for online endpoint + endpoint = ml_client.online_endpoints.get(name=online_endpoint_name) + + # existing traffic details + print(endpoint.traffic) + + # Get the scoring URI + print(endpoint.scoring_uri) + ``` + +1. Get the logs for the new deployment: + + Get the logs for the green deployment and verify as needed + + ```python + ml_client.online_deployments.get_logs( + name="blue", endpoint_name=online_endpoint_name, lines=50 + ) + ``` + +## Delete the endpoint + +```python +ml_client.online_endpoints.begin_delete(name=online_endpoint_name) +``` + +## Next steps + +Try these next steps to learn how to use the Azure Machine Learning SDK (v2) for Python: +* [Managed online endpoint safe rollout](how-to-safely-rollout-managed-endpoints-sdk-v2.md) +* Explore online endpoint samples - [https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints](https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints) \ No newline at end of file diff --git a/articles/machine-learning/how-to-deploy-managed-online-endpoints.md b/articles/machine-learning/how-to-deploy-managed-online-endpoints.md index dffcf8e87a68d..af12cc13c1120 100644 --- a/articles/machine-learning/how-to-deploy-managed-online-endpoints.md +++ b/articles/machine-learning/how-to-deploy-managed-online-endpoints.md @@ -114,7 +114,7 @@ For more information about the YAML schema, see the [online endpoint YAML refere > [!NOTE] > To use Kubernetes instead of managed endpoints as a compute target: > 1. Create and attach your Kubernetes cluster as a compute target to your Azure Machine Learning workspace by using [Azure Machine Learning studio](how-to-attach-kubernetes-anywhere.md?&tabs=studio#attach-a-kubernetes-cluster-to-an-azureml-workspace). -> 1. Use the [endpoint YAML](https://github.com/Azure/azureml-examples/blob/main/cli/endpoints/online/amlarc/endpoint.yml) to target Kubernetes instead of the managed endpoint YAML. You'll need to edit the YAML to change the value of `target` to the name of your registered compute target. You can use this [deployment.yaml](https://github.com/Azure/azureml-examples/blob/main/cli/endpoints/online/amlarc/blue-deployment.yml) that has additional properties applicable to Kubernetes deployment. +> 1. Use the [endpoint YAML](https://github.com/Azure/azureml-examples/blob/main/cli/endpoints/online/managed/sample/endpoint.yml) to target Kubernetes instead of the managed endpoint YAML. You'll need to edit the YAML to change the value of `target` to the name of your registered compute target. You can use this [deployment.yaml](https://github.com/Azure/azureml-examples/blob/main/cli/endpoints/online/managed/sample/blue-deployment.yml) that has additional properties applicable to Kubernetes deployment. > > All the commands that are used in this article (except the optional SLA monitoring and Azure Log Analytics integration) can be used either with managed endpoints or with Kubernetes endpoints. diff --git a/articles/machine-learning/how-to-enable-studio-virtual-network.md b/articles/machine-learning/how-to-enable-studio-virtual-network.md index 700db4b4acd5a..d1da18122c533 100644 --- a/articles/machine-learning/how-to-enable-studio-virtual-network.md +++ b/articles/machine-learning/how-to-enable-studio-virtual-network.md @@ -87,11 +87,11 @@ Use the following steps to enable access to data stored in Azure Blob and File s > [!TIP] > The first step is not required for the default storage account for the workspace. All other steps are required for *any* storage account behind the VNet and used by the workspace, including the default storage account. -1. **If the storage account is the *default* storage for your workspace, skip this step**. If it is not the default, **Grant the workspace managed identity the 'Storage Blob Data Reader' role** for the Azure storage account so that it can read data from blob storage. +1. **If the storage account is the *default* storage for your workspace, skip this step**. If it is not the default, __Grant the workspace managed identity the 'Storage Blob Data Reader' role__ for the Azure storage account so that it can read data from blob storage. For more information, see the [Blob Data Reader](../role-based-access-control/built-in-roles.md#storage-blob-data-reader) built-in role. -1. **Grant the workspace managed identity the 'Reader' role for storage private endpoints**. If your storage service uses a __private endpoint__, grant the workspace's managed identity **Reader** access to the private endpoint. The workspace's managed identity in Azure AD has the same name as your Azure Machine Learning workspace. +1. __Grant the workspace managed identity the 'Reader' role for storage private endpoints__. If your storage service uses a __private endpoint__, grant the workspace's managed identity __Reader__ access to the private endpoint. The workspace's managed identity in Azure AD has the same name as your Azure Machine Learning workspace. > [!TIP] > Your storage account may have multiple private endpoints. For example, one storage account may have separate private endpoint for blob, file, and dfs (Azure Data Lake Storage Gen2). Add the managed identity to all these endpoints. @@ -99,7 +99,7 @@ Use the following steps to enable access to data stored in Azure Blob and File s For more information, see the [Reader](../role-based-access-control/built-in-roles.md#reader) built-in role. -1. **Enable managed identity authentication for default storage accounts**. Each Azure Machine Learning workspace has two default storage accounts, a default blob storage account and a default file store account, which are defined when you create your workspace. You can also set new defaults in the **Datastore** management page. +1. __Enable managed identity authentication for default storage accounts__. Each Azure Machine Learning workspace has two default storage accounts, a default blob storage account and a default file store account, which are defined when you create your workspace. You can also set new defaults in the __Datastore__ management page. ![Screenshot showing where default datastores can be found](./media/how-to-enable-studio-virtual-network/default-datastores.png) @@ -110,9 +110,9 @@ Use the following steps to enable access to data stored in Azure Blob and File s |Workspace default blob storage| Stores model assets from the designer. Enable managed identity authentication on this storage account to deploy models in the designer. If managed identity authentication is disabled, the user's identity is used to access data stored in the blob.

                  You can visualize and run a designer pipeline if it uses a non-default datastore that has been configured to use managed identity. However, if you try to deploy a trained model without managed identity enabled on the default datastore, deployment will fail regardless of any other datastores in use.| |Workspace default file store| Stores AutoML experiment assets. Enable managed identity authentication on this storage account to submit AutoML experiments. | -1. **Configure datastores to use managed identity authentication**. After you add an Azure storage account to your virtual network with either a [service endpoint](how-to-secure-workspace-vnet.md?tabs=se#secure-azure-storage-accounts) or [private endpoint](how-to-secure-workspace-vnet.md?tabs=pe#secure-azure-storage-accounts), you must configure your datastore to use [managed identity](../active-directory/managed-identities-azure-resources/overview.md) authentication. Doing so lets the studio access data in your storage account. +1. __Configure datastores to use managed identity authentication__. After you add an Azure storage account to your virtual network with either a [service endpoint](how-to-secure-workspace-vnet.md?tabs=se#secure-azure-storage-accounts) or [private endpoint](how-to-secure-workspace-vnet.md?tabs=pe#secure-azure-storage-accounts), you must configure your datastore to use [managed identity](../active-directory/managed-identities-azure-resources/overview.md) authentication. Doing so lets the studio access data in your storage account. - Azure Machine Learning uses [datastores](concept-data.md#datastores) to connect to storage accounts. When creating a new datastore, use the following steps to configure a datastore to use managed identity authentication: + Azure Machine Learning uses [datastore](concept-data.md#datastore) to connect to storage accounts. When creating a new datastore, use the following steps to configure a datastore to use managed identity authentication: 1. In the studio, select __Datastores__. @@ -136,9 +136,9 @@ When using Azure Data Lake Storage Gen1 as a datastore, you can only use POSIX-s When using Azure Data Lake Storage Gen2 as a datastore, you can use both Azure RBAC and POSIX-style access control lists (ACLs) to control data access inside of a virtual network. -**To use Azure RBAC**, follow the steps in the [Datastore: Azure Storage Account](#datastore-azure-storage-account) section of this article. Data Lake Storage Gen2 is based on Azure Storage, so the same steps apply when using Azure RBAC. +__To use Azure RBAC__, follow the steps in the [Datastore: Azure Storage Account](#datastore-azure-storage-account) section of this article. Data Lake Storage Gen2 is based on Azure Storage, so the same steps apply when using Azure RBAC. -**To use ACLs**, the workspace's managed identity can be assigned access just like any other security principal. For more information, see [Access control lists on files and directories](../storage/blobs/data-lake-storage-access-control.md#access-control-lists-on-files-and-directories). +__To use ACLs__, the workspace's managed identity can be assigned access just like any other security principal. For more information, see [Access control lists on files and directories](../storage/blobs/data-lake-storage-access-control.md#access-control-lists-on-files-and-directories). ## Datastore: Azure SQL Database @@ -151,7 +151,7 @@ After you create a SQL contained user, grant permissions to it by using the [GRA When using the Azure Machine Learning designer intermediate component output, you can specify the output location for any component in the designer. Use this to store intermediate datasets in separate location for security, logging, or auditing purposes. To specify output, use the following steps: 1. Select the component whose output you'd like to specify. -1. In the component settings pane that appears to the right, select **Output settings**. +1. In the component settings pane that appears to the right, select __Output settings__. 1. Specify the datastore you want to use for each component output. Make sure that you have access to the intermediate storage accounts in your virtual network. Otherwise, the pipeline will fail. diff --git a/articles/machine-learning/how-to-identity-based-data-access.md b/articles/machine-learning/how-to-identity-based-data-access.md index 1c0948d579494..37996280c03d4 100644 --- a/articles/machine-learning/how-to-identity-based-data-access.md +++ b/articles/machine-learning/how-to-identity-based-data-access.md @@ -61,7 +61,6 @@ Certain machine learning scenarios involve training models with private data. In - [Azure Blob Storage](../storage/blobs/storage-blobs-overview.md) - [Azure Data Lake Storage Gen1](../data-lake-store/index.yml) - [Azure Data Lake Storage Gen2](../storage/blobs/data-lake-storage-introduction.md) - - [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview) - The [Azure Machine Learning SDK for Python](/python/api/overview/azure/ml/install). @@ -122,22 +121,6 @@ adls2_dstore = Datastore.register_azure_data_lake_gen2(workspace=ws, account_name='myadls2') ``` -### Azure SQL database - -For an Azure SQL database, use [register_azure_sql_database()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-sql-database-workspace--datastore-name--server-name--database-name--tenant-id-none--client-id-none--client-secret-none--resource-url-none--authority-url-none--endpoint-none--overwrite-false--username-none--password-none--subscription-id-none--resource-group-none--grant-workspace-access-false----kwargs-) to register a datastore that connects to an Azure SQL database storage. - -The following code creates and registers the `credentialless_sqldb` datastore to the `ws` workspace and assigns it to the variable, `sqldb_dstore`. This datastore accesses the database `mydb` in the `myserver` SQL DB server. - -```python -# Create a sqldatabase datastore without credentials - -sqldb_dstore = Datastore.register_azure_sql_database(workspace=ws, - datastore_name='credentialless_sqldb', - server_name='myserver', - database_name='mydb') - -``` - ## Storage access permissions @@ -150,7 +133,6 @@ Identity-based data access supports connections to **only** the following storag * Azure Blob Storage * Azure Data Lake Storage Gen1 * Azure Data Lake Storage Gen2 -* Azure SQL Database To access these storage services, you must have at least [Storage Blob Data Reader](../role-based-access-control/built-in-roles.md#storage-blob-data-reader) access to the storage account. Only storage account owners can [change your access level via the Azure portal](../storage/blobs/assign-azure-role-data-access.md). @@ -162,7 +144,7 @@ If you're training a model on a remote compute target and want to access the dat By default, Azure Machine Learning can't communicate with a storage account that's behind a firewall or in a virtual network. -You can configure storage accounts to allow access only from within specific virtual networks. This configuration requires additional steps to ensure data isn't leaked outside of the network. This behavior is the same for credential-based data access. For more information, see [How to configure virtual network scenarios](how-to-access-data.md#virtual-network). +You can configure storage accounts to allow access only from within specific virtual networks. This configuration requires extra steps to ensure data isn't leaked outside of the network. This behavior is the same for credential-based data access. For more information, see [How to configure virtual network scenarios](how-to-access-data.md#virtual-network). If your storage account has virtual network settings, that dictates what identity type and permissions access is needed. For example for data preview and data profile, the virtual network settings determine what type of identity is used to authenticate data access. @@ -181,7 +163,7 @@ We recommend that you use [Azure Machine Learning datasets](./v1/how-to-create-r Datasets package your data into a lazily evaluated consumable object for machine learning tasks like training. Also, with datasets you can [download or mount](how-to-train-with-datasets.md#mount-vs-download) files of any format from Azure storage services like Azure Blob Storage and Azure Data Lake Storage to a compute target. -To create a dataset, you can reference paths from datastores that also use identity-based data access . +To create a dataset, you can reference paths from datastores that also use identity-based data access. * If you're underlying storage account type is Blob or ADLS Gen 2, your user identity needs Blob Reader role. * If your underlying storage is ADLS Gen 1, permissions need can be set via the storage's Access Control List (ACL). diff --git a/articles/machine-learning/how-to-log-pipelines-application-insights.md b/articles/machine-learning/how-to-log-pipelines-application-insights.md index 4ff8313d92da7..d5c4ae57af099 100644 --- a/articles/machine-learning/how-to-log-pipelines-application-insights.md +++ b/articles/machine-learning/how-to-log-pipelines-application-insights.md @@ -159,6 +159,6 @@ Some of the queries below use 'customDimensions.Level'. These severity levels co ## Next Steps -Once you have logs in your Application Insights instance, they can be used to set [Azure Monitor alerts](../azure-monitor/alerts/alerts-overview.md#what-you-can-alert-on) based on query results. +Once you have logs in your Application Insights instance, they can be used to set [Azure Monitor alerts](../azure-monitor/alerts/alerts-overview.md) based on query results. You can also add results from queries to an [Azure Dashboard](../azure-monitor/app/tutorial-app-dashboards.md#add-logs-query) for additional insights. diff --git a/articles/machine-learning/how-to-manage-models.md b/articles/machine-learning/how-to-manage-models.md index e502268d1b85c..487cc7f10f852 100644 --- a/articles/machine-learning/how-to-manage-models.md +++ b/articles/machine-learning/how-to-manage-models.md @@ -128,7 +128,7 @@ Use the tabs below to select where your model is located. ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType file_model = Model( path="mlflow-model/model.pkl", @@ -146,7 +146,7 @@ A model can be created from a cloud path using any one of the following supporte ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType cloud_model = Model( path= "azureml://datastores/workspaceblobstore/paths/model.pkl" @@ -173,7 +173,7 @@ Example: ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType run_model = Model( path="runs:/$RUN_ID/model/" @@ -205,7 +205,7 @@ Saving model from a named output: ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType run_model = Model( path="azureml://jobs/$RUN_ID/outputs/artifacts/paths/model/" diff --git a/articles/machine-learning/how-to-manage-quotas.md b/articles/machine-learning/how-to-manage-quotas.md index 87e139d87db3f..0fb959b9b00bf 100644 --- a/articles/machine-learning/how-to-manage-quotas.md +++ b/articles/machine-learning/how-to-manage-quotas.md @@ -7,7 +7,7 @@ ms.service: machine-learning ms.subservice: core author: SimranArora904 ms.author: siarora -ms.date: 04/08/2022 +ms.date: 06/01/2022 ms.topic: how-to ms.custom: troubleshooting, contperf-fy20q4, contperf-fy21q2, event-tier1-build-2022 --- @@ -65,14 +65,14 @@ In addition, the maximum **run time** is 30 days and the maximum number of **met ### Azure Machine Learning Compute [Azure Machine Learning Compute](concept-compute-target.md#azure-machine-learning-compute-managed) has a default quota limit on both the number of cores (split by each VM Family and cumulative total cores) as well as the number of unique compute resources allowed per region in a subscription. This quota is separate from the VM core quota listed in the previous section as it applies only to the managed compute resources of Azure Machine Learning. -[Request a quota increase](#request-quota-increases) to raise the limits for various VM family core quotas, total subscription core quotas and resources in this section. +[Request a quota increase](#request-quota-increases) to raise the limits for various VM family core quotas, total subscription core quotas, cluster quota and resources in this section. Available resources: + **Dedicated cores per region** have a default limit of 24 to 300, depending on your subscription offer type. You can increase the number of dedicated cores per subscription for each VM family. Specialized VM families like NCv2, NCv3, or ND series start with a default of zero cores. + **Low-priority cores per region** have a default limit of 100 to 3,000, depending on your subscription offer type. The number of low-priority cores per subscription can be increased and is a single value across VM families. -+ **Clusters per region** have a default limit of 200. These are shared between a training cluster and a compute instance. (A compute instance is considered a single-node cluster for quota purposes.) ++ **Clusters per region** have a default limit of 200. These are shared between training clusters, compute instances and MIR endpoint deployments. (A compute instance is considered a single-node cluster for quota purposes.) Cluster quota can be increased up to a value of 500 per region within a given subscription. > [!TIP] > To learn more about which VM family to request a quota increase for, check out [virtual machine sizes in Azure](../virtual-machines/sizes.md). For instance GPU VM families start with an "N" in their family name (eg. NCv3 series) @@ -101,8 +101,6 @@ The following table shows additional limits in the platform. Please reach out to Azure Machine Learning managed online endpoints have limits described in the following table. -To determine the current usage for an endpoint, [view the metrics](how-to-monitor-online-endpoints.md#view-metrics). To request an exception from the Azure Machine Learning product team, please open a technical support ticket. - | **Resource** | **Limit** | | --- | --- | | Endpoint name| Endpoint names must
                • Begin with a letter
                • Be 3-32 characters in length
                • Only consist of letters and numbers 1 | @@ -123,6 +121,26 @@ To determine the current usage for an endpoint, [view the metrics](how-to-monito 3 If you request a limit increase, be sure to calculate related limit increases you might need. For example, if you request a limit increase for requests per second, you might also want to compute the required connections and bandwidth limits and include these limit increases in the same request. +To determine the current usage for an endpoint, [view the metrics](how-to-monitor-online-endpoints.md#view-metrics). + +To request an exception from the Azure Machine Learning product team, use the steps in the [Request quota increases](#request-quota-increases) section and provide the following information: + +1. When opening the support request, __do not select Service and subscription limits (quotas)__. Instead, select __Technical__ as the issue type. +1. Provide the Azure __subscriptions__ and __regions__ where you want to increase the quota. +1. Provide the __tenant ID__ and __customer name__. +1. Provide the __quota type__ and __new limit__. Use the following table as a guide: + + | Quota Type | New Limit | + | ----- | ----- | + | MaxEndpointsPerSub (Number of endpoints per subscription) | ? | + | MaxDeploymentsPerSub (Number of deployments per subscription) | ? | + | MaxDeploymentsPerEndpoint (Number of deployments per endpoint) | ? | + | MaxInstancesPerDeployment (Number of instances per deployment) | ? | + | EndpointRequestRateLimitPerSec (Total requests per second at endpoint level for all deployments) | ? | + | EndpointConnectionRateLimitPerSec (Total connections per second at endpoint level for all deployments) | ? | + | EndpointConnectionLimit (Total connections active at endpoint level for all deployments) | ? | + | EndpointBandwidthLimitKBps (Total bandwidth at endpoint level for all deployments (MBPS)) | ? | + ### Azure Machine Learning pipelines [Azure Machine Learning pipelines](concept-ml-pipelines.md) have the following limits. diff --git a/articles/machine-learning/how-to-manage-resources-vscode.md b/articles/machine-learning/how-to-manage-resources-vscode.md index 9671b71d21ac5..37a06cf9c7023 100644 --- a/articles/machine-learning/how-to-manage-resources-vscode.md +++ b/articles/machine-learning/how-to-manage-resources-vscode.md @@ -86,7 +86,7 @@ The extension currently supports datastores of the following types: - Azure Data Lake Gen 2 - Azure File -For more information, see [datastores](concept-data.md#datastores). +For more information, see [datastore](concept-data.md#datastore). ### Create a datastore diff --git a/articles/machine-learning/how-to-manage-workspace-terraform.md b/articles/machine-learning/how-to-manage-workspace-terraform.md index 6363a8044e19d..6da8521a9a457 100644 --- a/articles/machine-learning/how-to-manage-workspace-terraform.md +++ b/articles/machine-learning/how-to-manage-workspace-terraform.md @@ -9,6 +9,7 @@ ms.author: deeikele author: denniseik ms.date: 01/05/2022 ms.topic: how-to +ms.tool: terraform --- # Manage Azure Machine Learning workspaces using Terraform diff --git a/articles/machine-learning/how-to-migrate-from-v1.md b/articles/machine-learning/how-to-migrate-from-v1.md new file mode 100644 index 0000000000000..b9cc85f2ab4bf --- /dev/null +++ b/articles/machine-learning/how-to-migrate-from-v1.md @@ -0,0 +1,194 @@ +--- +title: 'Migrate from v1 to v2' +titleSuffix: Azure Machine Learning +description: Migrate from v1 to v2 of Azure Machine Learning REST APIs, CLI extension, and Python SDK (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: core +ms.topic: how-to +author: lostmygithubaccount +ms.author: copeters +ms.date: 06/01/2022 +ms.reviewer: blackmist +ms.custom: devx-track-azurecli, devplatv2 +--- + +# How to migrate from v1 to v2 + +Azure Machine Learning's v2 REST APIs, Azure CLI extension, and Python SDK (preview) introduce consistency and a set of new features to accelerate the production machine learning lifecycle. In this article, we'll overview migrating from v1 to v2 with recommendations to help you decide on v1, v2, or both. + +## Prerequisites + +- General familiarity with Azure ML and the v1 Python SDK. +- Understand [what is v2?](concept-v2.md) + +## Should I use v2? + +You should use v2 if you're starting a new machine learning project. A new v2 project can reuse resources like workspaces and compute and assets like models and environments created using v1. You can also use v1 and v2 in tandem, for example using the v1 Python SDK within jobs that are submitted from the v2 CLI extension. However, see the [section below](#can-i-use-v1-and-v2-together) for details on why separating v1 and v2 use is recommended. + +We recommend assessing the effort needed to migrate a project from v1 to v2. First, you should ensure all the features needed from v1 are available in v2. Some notable feature gaps include: + +- Spark support in jobs. +- Publishing jobs (pipelines in v1) as endpoints. +- AutoML jobs within pipeline jobs (AutoML step in a pipeline in v1). +- Model deployment to Azure Container Instance (ACI), replaced with managed online endpoints. +- An equivalent for ParallelRunStep in jobs. +- Support for SQL/database datastores. +- Built-in components in the designer. + +You should then ensure the features you need in v2 meet your organization's requirements, such as being generally available. You and your team will need to assess on a case-by-case basis whether migrating to v2 is right for you. + +> [!IMPORTANT] +> New features in Azure ML will only be launched in v2. + +## How do I migrate to v2? + +To migrate to v2, start by prototyping an existing v1 workflow into v2. Migrating will typically include: + +- Optionally (and recommended in most cases), re-create resources and assets with v2. +- Refactor model training code to de-couple Azure ML code from ML model code (model training, model logging, and other model tracking code). +- Refactor Azure ML model deployment code and test with v2 endpoints. +- Refactor CI/CD code to use the v2 CLI (recommended), v2 Python SDK, or directly use REST. + +Based on this prototype, you can estimate the effort involved for a full migration to v2. Consider the workflow patterns (like [GitOps](#a-note-on-gitops-with-v2)) your organization wants to establish for use with v2 and factor this effort in. + +## Which v2 API should I use? + +In v2 interfaces via REST API, CLI, and Python SDK (preview) are available. The interface you should use depends on your scenario and preferences. + +|API|Notes| +|-|-| +|REST|Fewest dependencies and overhead. Use for building applications on Azure ML as a platform, directly in programming languages without a SDK provided, or per personal preference.| +|CLI|Recommended for automation with CI/CD or per personal preference. Allows quick iteration with YAML files and straightforward separation between Azure ML and ML model code.| +|Python SDK|Recommended for complicated scripting (for example, programmatically generating large pipeline jobs) or per personal preference. Allows quick iteration with YAML files or development solely in Python.| + +## Can I use v1 and v2 together? + +Generally, yes. Resources like workspace, compute, and datastore work across v1 and v2, with exceptions. A user can call the v1 Python SDK to change a workspace's description, then using the v2 CLI extension change it again. Jobs (experiments/runs/pipelines in v1) can be submitted to the same workspace from the v1 or v2 Python SDK. A workspace can have both v1 and v2 model deployment endpoints. You can also call v1 Python SDK code within jobs created via v2, though [this pattern isn't recommended](#production-model-training). + +We recommend creating a new workspace for using v2 to keep v1/v2 entities separate and avoid backward/forward compatibility considerations. + +> [!IMPORTANT] +> If your workspace uses a private endpoint, it will automatically have the `v1_legacy_mode` flag enabled, preventing usage of v2 APIs. See [how to configure network isolation with v2](how-to-configure-network-isolation-with-v2.md) for details. + +## Migrating resources and assets + +This section gives an overview of migration recommendations for specific resources and assets in Azure ML. See the concept article for each entity for details on their usage in v2. + +### Workspace + +Workspaces don't need to be migrated with v2. You can use the same workspace, regardless of whether you're using v1 or v2. We recommend creating a new workspace for using v2 to keep v1/v2 entities separate and avoid backward/forward compatibility considerations. + +Do consider migrating the code for deploying a workspace to v2. Typically Azure resources are managed via Azure Resource Manager (and Bicep) or similar resource provisioning tools. Alternatively, you can use the CLI (v2) and YAML files. + +> [!IMPORTANT] +> If your workspace uses a private endpoint, it will automatically have the `v1_legacy_mode` flag enabled, preventing usage of v2 APIs. See [how to configure network isolation with v2](how-to-configure-network-isolation-with-v2.md) for details. + +### Connection (workspace connection in v1) + +Workspace connections from v1 are persisted on the workspace, and fully available with v2. + +We recommend migrating the code for creating connections to v2. + +### Datastore + +Object storage datastore types created with v1 are fully available for use in v2. Database datastores are not supported; export to object storage (usually Azure Blob) is the recommended migration path. + +We recommend migrating the code for creating datastores to v2. + +### Compute + +Compute of type `AmlCompute` and `ComputeInstance` are fully available for use in v2. + +We recommend migrating the code for creating compute to v2. + +### Endpoint and deployment (endpoint or web service in v1) + +You can continue using your existing v1 model deployments. For new model deployments, we recommend migrating to v2. In v2, we offer managed endpoints or Kubernetes endpoints. The following table guides our recommendation: + +|Endpoint type in v2|Migrate from|Notes| +|-|-|-| +|Local|ACI|Quick test of model deployment locally; not for production.| +|Managed online endpoint|ACI, AKS|Enterprise-grade managed model deployment infrastructure with near real-time responses and massive scaling for production.| +|Managed batch endpoint|ParallelRunStep in a pipeline for batch scoring|Enterprise-grade managed model deployment infrastructure with massively-parallel batch processing for production.| +|Azure Kubernetes Service (AKS)|ACI, AKS|Manage your own AKS cluster(s) for model deployment, giving flexibility and granular control at the cost of IT overhead.| +|Azure Arc Kubernetes|N/A|Manage your own Kubernetes cluster(s) in other clouds or on-prem, giving flexibility and granular control at the cost of IT overhead.| + +### Jobs (experiments, runs, pipelines in v1) + +In v2, "experiments", "runs", and "pipelines" are consolidated into jobs. A job has a type. Most jobs are `command` jobs that run a command, like `python main.py`. What runs in a job is agnostic to any programming language, so you can run `bash` scripts, invoke `python` interpreters, run a bunch of `curl` commands, or anything else. Another common type of job is `pipeline`, which defines child jobs that may have input/output relationships, forming a directed acyclic graph (DAG). + +To migrate, you'll need to change your code for submitting jobs to v2. We recommend refactoring the control-plane code authoring a job into YAML file specification, which can then be submitted through the v2 CLI or Python SDK (preview). A simple `command` job looks like this: + +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/basics/hello-world.yml"::: + +What you run *within* the job does not need to be migrated to v2. However, it is recommended to remove any code specific to Azure ML from your model training scripts. This separation allows for an easier transition between local and cloud and is considered best practice for mature MLOps. In practice, this means removing `azureml.*` lines of code. Model logging and tracking code should be replaced with MLflow. See [how to use MLflow in v2](how-to-use-mlflow-cli-runs.md) for details. + +We recommend migrating the code for creating jobs to v2. You can see [how to train models with the CLI (v2)](how-to-train-cli.md) and the [job YAML references](reference-yaml-job-command.md) for authoring jobs in v2 YAMLs. + +### Data (datasets in v1) + +Datasets are renamed to data assets. Interoperability between v1 datasets and v2 data assets is the most complex of any entity in Azure ML. + +Data assets in v2 (or File Datasets in v1) are *references* to files in object storage. Thus, deleting a data asset (or v1 dataset) doesn't actually delete anything in underlying storage, only a reference. Therefore, it may be easier to avoid backward and forward compatibility considerations for data by re-creating v1 datasets as v2 data assets. + +For details on data in v2, see the [data concept article](concept-data.md). + +We recommend migrating the code for creating data assets to v2. + +### Model + +Models created from v1 can be used in v2. In v2, explicit model types are introduced. Similar to data assets, it may be easier to re-create a v1 model as a v2 model, setting the type appropriately. + +We recommend migrating the code for creating models to v2. + +### Environment + +Environments created from v1 can be used in v2. In v2, environments have new features like creation from a local Docker context. + +We recommend migrating the code for creating environments to v2. + +## Scenarios across the machine learning lifecycle + +There are a few scenarios that are common across the machine learning lifecycle using Azure ML. We'll look at a few and give general recommendations for migrating to v2. + +### Azure setup + +Azure recommends Azure Resource Manager templates (often via Bicep for ease of use) to create resources. The same is a good approach for creating Azure ML resources as well. + +If your team is only using Azure ML, you may consider provisioning the workspace and any other resources via YAML files and CLI instead. + +### Prototyping models + +We recommend v2 for prototyping models. You may consider using the CLI for an interactive use of Azure ML, while your model training code is Python or any other programming language. Alternatively, you may adopt a full-stack approach with Python solely using the Azure ML SDK or a mixed approach with the Azure ML Python SDK and YAML files. + +### Production model training + +We recommend v2 for production model training. Jobs consolidate the terminology and provide a set of consistency that allows for easier transition between types (for example, `command` to `sweep`) and a GitOps-friendly process for serializing jobs into YAML files. + +With v2, you should separate your machine learning code from the control plane code. This separation allows for easier iteration and allows for easier transition between local and cloud. + +Typically, converting to v2 will involve refactoring your code to use MLflow for tracking and model logging. See the [MLflow concept article](concept-mlflow.md) for details. + +### Production model deployment + +We recommend v2 for production model deployment. Managed endpoints abstract the IT overhead and provide a performant solution for deploying and scoring models, both for online (near real-time) and batch (massively parallel) scenarios. + +Kubernetes deployments are supported in v2 through AKS or Azure Arc, enabling Azure cloud and on-premise deployments managed by your organization. + +### Machine learning operations (MLOps) + +A MLOps workflow typically involves CI/CD through an external tool. It's recommended refactor existing CI/CD workflows to use v2 APIs. Typically a CLI is used in CI/CD, though you can alternatively invoke Python or directly use REST. + +The solution accelerator for MLOps with v2 is being developed at https://github.com/Azure/mlops-v2 and can be used as reference or adopted for setup and automation of the machine learning lifecycle. + +#### A note on GitOps with v2 + +A key paradigm with v2 is serializing machine learning entities as YAML files for source control with `git`, enabling better GitOps approaches than were possible with v1. For instance, you could enforce policy by which only a service principal used in CI/CD pipelines can create/update/delete some or all entities, ensuring changes go through a governed process like pull requests with required reviewers. Since the files in source control are YAML, they're easy to diff and track changes over time. You and your team may consider shifting to this paradigm as you migrate to v2. + +You can obtain a YAML representation of any entity with the CLI via `az ml show --output yaml`. Note that this output will have system-generated properties, which can be ignored or deleted. + +## Next steps + +- [Get started with the CLI (v2)](how-to-configure-cli.md) +- [Get started with the Python SDK (v2)](https://aka.ms/sdk-v2-install) diff --git a/articles/machine-learning/how-to-monitor-online-endpoints.md b/articles/machine-learning/how-to-monitor-online-endpoints.md index d861b60b4f5ec..2968c79b6eeb9 100644 --- a/articles/machine-learning/how-to-monitor-online-endpoints.md +++ b/articles/machine-learning/how-to-monitor-online-endpoints.md @@ -1,46 +1,46 @@ --- -title: Monitor managed online endpoints +title: Monitor online endpoints titleSuffix: Azure Machine Learning -description: Monitor managed online endpoints and create alerts with Application Insights. +description: Monitor online endpoints and create alerts with Application Insights. services: machine-learning ms.service: machine-learning ms.author: larryfr author: blackmist ms.subservice: mlops -ms.date: 10/21/2021 +ms.date: 06/01/2022 ms.topic: conceptual ms.custom: how-to, devplatv2, event-tier1-build-2022 --- -# Monitor managed online endpoints +# Monitor online endpoints -In this article, you learn how to monitor [Azure Machine Learning managed online endpoints](concept-endpoints.md). Use Application Insights to view metrics and create alerts to stay up to date with your managed online endpoints. +In this article, you learn how to monitor [Azure Machine Learning online endpoints](concept-endpoints.md). Use Application Insights to view metrics and create alerts to stay up to date with your online endpoints. In this article you learn how to: > [!div class="checklist"] -> * View metrics for your managed online endpoint +> * View metrics for your online endpoint > * Create a dashboard for your metrics > * Create a metric alert ## Prerequisites -- Deploy an Azure Machine Learning managed online endpoint. +- Deploy an Azure Machine Learning online endpoint. - You must have at least [Reader access](../role-based-access-control/role-assignments-portal.md) on the endpoint. ## View metrics Use the following steps to view metrics for a managed endpoint or deployment: 1. Go to the [Azure portal](https://portal.azure.com). -1. Navigate to the managed online endpoint or deployment resource. +1. Navigate to the online endpoint or deployment resource. - Managed online endpoints and deployments are Azure Resource Manager (ARM) resources that can be found by going to their owning resource group. Look for the resource types **Machine Learning online endpoint** and **Machine Learning online deployment**. + online endpoints and deployments are Azure Resource Manager (ARM) resources that can be found by going to their owning resource group. Look for the resource types **Machine Learning online endpoint** and **Machine Learning online deployment**. 1. In the left-hand column, select **Metrics**. ## Available metrics -Depending on the resource that you select, the metrics that you see will be different. Metrics are scoped differently for managed online endpoints and managed online deployments. +Depending on the resource that you select, the metrics that you see will be different. Metrics are scoped differently for online endpoints and online deployments. ### Metrics at endpoint scope @@ -61,7 +61,7 @@ Split on the following dimensions: #### Bandwidth throttling -Bandwidth will be throttled if the limits are exceeded (see managed online endpoints section in [Manage and increase quotas for resources with Azure Machine Learning](how-to-manage-quotas.md#azure-machine-learning-managed-online-endpoints)). To determine if requests are throttled: +Bandwidth will be throttled if the limits are exceeded for _managed_ online endpoints (see managed online endpoints section in [Manage and increase quotas for resources with Azure Machine Learning](how-to-manage-quotas.md#azure-machine-learning-managed-online-endpoints)). To determine if requests are throttled: - Monitor the "Network bytes" metric - The response trailers will have the fields: `ms-azureml-bandwidth-request-delay-ms` and `ms-azureml-bandwidth-response-delay-ms`. The values of the fields are the delays, in milliseconds, of the bandwidth throttling. @@ -80,11 +80,11 @@ Split on the following dimension: ## Create a dashboard -You can create custom dashboards to visualize data from multiple sources in the Azure portal, including the metrics for your managed online endpoint. For more information, see [Create custom KPI dashboards using Application Insights](../azure-monitor/app/tutorial-app-dashboards.md#add-custom-metric-chart). +You can create custom dashboards to visualize data from multiple sources in the Azure portal, including the metrics for your online endpoint. For more information, see [Create custom KPI dashboards using Application Insights](../azure-monitor/app/tutorial-app-dashboards.md#add-custom-metric-chart). ## Create an alert -You can also create custom alerts to notify you of important status updates to your managed online endpoint: +You can also create custom alerts to notify you of important status updates to your online endpoint: 1. At the top right of the metrics page, select **New alert rule**. diff --git a/articles/machine-learning/how-to-network-security-overview.md b/articles/machine-learning/how-to-network-security-overview.md index d408cf67cf6f1..068f5b480e32d 100644 --- a/articles/machine-learning/how-to-network-security-overview.md +++ b/articles/machine-learning/how-to-network-security-overview.md @@ -101,7 +101,7 @@ Use the following steps to secure your workspace and associated resources. These | Service | Endpoint information | Allow trusted information | | ----- | ----- | ----- | | __Azure Key Vault__| [Service endpoint](../key-vault/general/overview-vnet-service-endpoints.md)
                  [Private endpoint](../key-vault/general/private-link-service.md) | [Allow trusted Microsoft services to bypass this firewall](how-to-secure-workspace-vnet.md#secure-azure-key-vault) | - | __Azure Storage Account__ | [Service and private endpoint](how-to-secure-workspace-vnet.md?tabs=se#secure-azure-storage-accounts)
                  [Private endpoint](how-to-secure-workspace-vnet.md?tabs=pe#secure-azure-storage-accounts) | [Grant access from Azure resource instances](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances-preview)
                  **or**
                  [Grant access to trusted Azure services](../storage/common/storage-network-security.md#grant-access-to-trusted-azure-services) | + | __Azure Storage Account__ | [Service and private endpoint](how-to-secure-workspace-vnet.md?tabs=se#secure-azure-storage-accounts)
                  [Private endpoint](how-to-secure-workspace-vnet.md?tabs=pe#secure-azure-storage-accounts) | [Grant access from Azure resource instances](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances)
                  **or**
                  [Grant access to trusted Azure services](../storage/common/storage-network-security.md#grant-access-to-trusted-azure-services) | | __Azure Container Registry__ | [Private endpoint](../container-registry/container-registry-private-link.md) | [Allow trusted services](../container-registry/allow-access-trusted-services.md) | diff --git a/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md b/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md index 4950c71856357..d0d52dbac7fa5 100644 --- a/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md +++ b/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md @@ -8,7 +8,7 @@ ms.service: machine-learning ms.subservice: automl ms.topic: how-to ms.custom: template-how-to, sdkv2, event-tier1-build-2022 -ms.date: 04/15/2022 +ms.date: 05/26/2022 --- # Prepare data for computer vision tasks with automated machine learning (preview) @@ -70,7 +70,7 @@ az ml data create -f [PATH_TO_YML_FILE] --workspace-name [YOUR_AZURE_WORKSPACE] ``` # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] --- Next, you will need to get the label annotations in JSONL format. The schema of labeled data depends on the computer vision task at hand. Refer to [schemas for JSONL files for AutoML computer vision experiments](reference-automl-images-schema.md) to learn more about the required JSONL schema for each task type. @@ -81,7 +81,7 @@ If your training data is in a different format (like, pascal VOC or COCO), [help Once you have your labeled data in JSONL format, you can use it to create `MLTable` as shown below. MLtable packages your data into a consumable object for training. -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: You can then pass in the `MLTable` as a data input for your AutoML training job. diff --git a/articles/machine-learning/how-to-read-write-data-v2.md b/articles/machine-learning/how-to-read-write-data-v2.md index 8af6ebbc2f253..019620018b618 100644 --- a/articles/machine-learning/how-to-read-write-data-v2.md +++ b/articles/machine-learning/how-to-read-write-data-v2.md @@ -1,7 +1,7 @@ --- -title: Read and write data +title: Read and write data in jobs titleSuffix: Azure Machine Learning -description: Learn how to read and write data for consumption in Azure Machine Learning training jobs. +description: Learn how to read and write data in Azure Machine Learning training jobs. services: machine-learning ms.service: machine-learning ms.subservice: mldata @@ -9,16 +9,17 @@ ms.topic: how-to ms.author: yogipandey author: ynpandey ms.reviewer: ssalgadodev -ms.date: 04/15/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, devplatv2, sdkv2, cliv2, event-tier1-build-2022 #Customer intent: As an experienced Python developer, I need to read in my data to make it available to a remote compute to train my machine learning models. --- -# Read and write data for ML experiments +# Read and write data in a job + [!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] -[!INCLUDE [preview disclaimer](../../includes/machine-learning-preview-generic-disclaimer.md)] +[!INCLUDE [CLI v2](../../includes/machine-learning-CLI-v2.md)] -Learn how to read and write data for your training jobs with the Azure Machine Learning Python SDK v2(preview) and the Azure Machine Learning CLI extension v2. +Learn how to read and write data for your jobs with the Azure Machine Learning Python SDK v2(preview) and the Azure Machine Learning CLI extension v2. ## Prerequisites @@ -28,294 +29,346 @@ Learn how to read and write data for your training jobs with the Azure Machine L - An Azure Machine Learning workspace -```python +## Supported paths -from azure.ai.ml import MLClient -from azure.identity import InteractiveBrowserCredential +When you provide a data input/output to a Job, you'll need to specify a `path` parameter that points to the data location. Below is a table that shows the different data locations supported in Azure Machine Learning and examples for the `path` parameter: -#enter details of your AML workspace -subscription_id = '' -resource_group = '' -workspace = '' -#get a handle to the workspace -ml_client = MLClient(InteractiveBrowserCredential(), subscription_id, resource_group, workspace) -``` +|Location | Examples | +|---------|---------| +|A path on your local computer | `./home/username/data/my_data` | +|A path on a public http(s) server | `https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv` | +|A path on Azure Storage | `https://.blob.core.windows.net//path`
                  `abfss://@.dfs.core.windows.net/` | +|A path on a Datastore | `azureml://datastores//paths/` | -## Use local data in a job +## Supported modes -You can use data from your current working directory in a training job with the JobInput class. -The JobInput class allows you to define data inputs from a specific file, `uri_file` or a folder location, `uri_folder`. In the JobInput object, you specify the `path` of where your data is located; the path can be a local path or a cloud path. Azure Machine Learning supports `https://`, `abfss://`, `wasbs://` and `azureml://` URIs. +When you run a job with data inputs/outputs, you can specify the *mode* - for example, whether you would like the data to be read-only mounted or downloaded to the compute target. The table below shows the possible modes for different type/mode/input/output combinations: -> [!IMPORTANT] -> If the path is local, but your compute is defined to be in the cloud, Azure Machine Learning will automatically upload the data to cloud storage for you. +Type | Input/Output | `upload` | `download` | `ro_mount` | `rw_mount` | `direct` | `eval_download` | `eval_mount` +------ | ------ | :---: | :---: | :---: | :---: | :---: | :---: | :---: +`uri_folder` | Input | | ✓ | ✓ | | ✓ | | +`uri_file` | Input | | ✓ | ✓ | | ✓ | | +`mltable` | Input | | ✓ | ✓ | | ✓ | ✓ | ✓ +`uri_folder` | Output | ✓ | | | ✓ | ✓ | | +`uri_file` | Output | ✓ | | | ✓ | ✓ | | +`mltable` | Output | ✓ | | | ✓ | ✓ | | -```python +> [!NOTE] +> `eval_download` and `eval_mount` are unique to `mltable`. Whilst `ro_mount` is the default mode for MLTable, there are scenarios where an MLTable can yield files that are not necessarily co-located with the MLTable file in storage. Alternatively, an `mltable` can subset or shuffle the data that resides in the storage. That view is only visible if the MLTable file is actually evaluated by the engine. These modes will provide that view of the files. -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes -my_job_inputs = { - "input_data": JobInput( - path='./sample_data', # change to be your local directory - type=AssetTypes.URI_FOLDER - ) -} +## Read data in a job -job = CommandJob( - code="./src", # local path where the code is stored - command='python train.py --input_folder ${{inputs.input_data}}', - inputs=my_job_inputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" -) +# [CLI](#tab/CLI) -#submit the command job -returned_job = ml_client.create_or_update(job) -#get a URL for the status of the job -returned_job.services["Studio"].endpoint -``` +Create a job specification YAML file (`.yml`). Specify in the `inputs` section of the job: -## Use data stored in storage service on Azure in a job +1. The `type`; whether the data you are pointing to is a specific file (`uri_file`) or a folder location (`uri_folder`) or an `mltable`. +1. The `path` of where your data is located; the path can be any of those outlined in the [Supported Paths](#supported-paths) section. -You can read your data in from existing storage on Azure. +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json -# [Azure Data Lake Storage Gen2](#tab/ADLS-Gen2) +# Possible Paths for Data: +# Blob: https://.blob.core.windows.net/// +# Datastore: azureml://datastores/paths// +# Data Asset: azureml:: -The following code shows how to read in data from Azure Data Lake Storage Gen 2. +command: | + ls ${{inputs.my_data}} +code: +inputs: + my_data: + type: # uri_file, uri_folder, mltable + path: +environment: azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest +compute: azureml:cpu-cluster +``` -```python +Next, run in the CLI -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +```azurecli +az ml job create -f .yml +``` -my_job_inputs = { - "input_data": JobInput( - path='abfss://@.dfs.core.windows.net/', - type=AssetTypes.URI_FOLDER - ) -} +# [Python-SDK](#tab/Python-SDK) -job = CommandJob( - code="./src", # local path where the code is stored - command='python train.py --input_folder ${{inputs.input_data}}', - inputs=my_job_inputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" -) +The `Input` class allows you to define: -#submit the command job -returned_job = ml_client.create_or_update(job) -#get a URL for the status of the job -returned_job.services["Studio"].endpoint -``` +1. The `type`; whether the data you are pointing to is a specific file (`uri_file`) or a folder location (`uri_folder`) or an `mltable`. +1. The `path` of where your data is located; the path can be any of those outlined in the [Supported Paths](#supported-paths) section. -# [Azure Blob Storage](#tab/blob) +```python +from azure.ai.ml import command +from azure.ai.ml.entities import Data +from azure.ai.ml import Input +from azure.ai.ml.constants import AssetTypes +from azure.ai.ml import MLClient -The following code shows how to read in data from Azure Blob Storage. +ml_client = MLClient.from_config() -```python +# Possible Asset Types for Data: +# AssetTypes.URI_FILE +# AssetTypes.URI_FOLDER +# AssetTypes.MLTABLE -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +# Possible Paths for Data: +# Blob: https://.blob.core.windows.net/// +# Datastore: azureml://datastores/paths// +# Data Asset: azureml:: -# in this example we my_job_inputs = { - "input_data": JobInput( - path='https://.blob.core.windows.net//path', - type=AssetTypes.URI_FOLDER - ) + "input_data": Input(type=AssetTypes.URI_FOLDER, path="") } -job = CommandJob( - code="./src", # local path where the code is stored - command='python train.py --input_folder ${{inputs.input_data}}', +job = command( + code="./src", # local path where the code is stored + command="ls ${{inputs.input_data}}", inputs=my_job_inputs, environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" + compute="cpu-cluster", ) -#submit the command job -returned_job = ml_client.create_or_update(job) -#get a URL for the status of the job +# submit the command +returned_job = ml_client.jobs.create_or_update(job) +# get a URL for the status of the job returned_job.services["Studio"].endpoint ``` --- -## Read and write data to cloud-based storage +### Read V1 data assets +This section outlines how you can read V1 `FileDataset` and `TabularDataset` data entities in a V2 job. -You can read and write data from your job into your cloud-based storage. +#### Read a `FileDataset` -The JobInput defaults the mode - how the input will be exposed during job runtime - to InputOutputModes.RO_MOUNT (read-only mount). Put another way, Azure Machine Learning will mount the file or folder to the compute and set the file/folder to read-only. By design, you can't write to JobInputs only JobOutputs. The data is automatically uploaded to cloud storage. +# [CLI](#tab/CLI) -# [Azure Data Lake Storage Gen2](#tab/ADLS-Gen2) +Create a job specification YAML file (`.yml`), with the type set to `mltable` and the mode set to `eval_mount`: -```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes - -my_job_inputs = { - "input_data": JobInput( - path='abfss://@.dfs.core.windows.net/', - type=AssetTypes.URI_FOLDER - ) -} - -my_job_outputs = { - "output_folder": JobOutput( - path='abfss://@.dfs.core.windows.net/', - type=AssetTypes.URI_FOLDER - ) -} - -job = CommandJob( - code="./src", #local path where the code is stored - command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', - inputs=my_job_inputs, - outputs=my_job_outputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" -) +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json -#submit the command job -returned_job = ml_client.create_or_update(job) +command: | + ls ${{inputs.my_data}} +code: +inputs: + my_data: + type: mltable + mode: eval_mount + path: azureml:@latest +environment: azureml:@latest +compute: azureml:cpu-cluster +``` -#get a URL for the status of the job -returned_job.services["Studio"].endpoint +Next, run in the CLI +```azurecli +az ml job create -f .yml ``` -# [Azure Blob Storage ](#tab/blob) +# [Python-SDK](#tab/Python-SDK) + +In the `Input` object specify the `type` as `AssetTypes.MLTABLE` and `mode` as `InputOutputModes.EVAL_MOUNT`: ```python +from azure.ai.ml import command +from azure.ai.ml.entities import Data +from azure.ai.ml import Input +from azure.ai.ml.constants import AssetTypes, InputOutputModes +from azure.ai.ml import MLClient -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes +ml_client = MLClient.from_config() -my_job_inputs = { - "input_data": JobInput( - path='https://.blob.core.windows.net//path', - type=AssetTypes.URI_FOLDER - ) -} +filedataset_asset = ml_client.data.get(name="", version="") -my_job_outputs = { - "output_folder": JobOutput( - path='https://.blob.core.windows.net//path', - type=AssetTypes.URI_FOLDER +my_job_inputs = { + "input_data": Input( + type=AssetTypes.MLTABLE, + path=filedataset_asset, + mode=InputOutputModes.EVAL_MOUNT ) } -job = CommandJob( - code="./src", #local path where the code is stored - command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', +job = command( + code="./src", # local path where the code is stored + command="ls ${{inputs.input_data}}", inputs=my_job_inputs, - outputs=my_job_outputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" + environment=":", + compute="cpu-cluster", ) -#submit the command job -returned_job = ml_client.create_or_update(job) -#get a URL for the status of the job +# submit the command +returned_job = ml_client.jobs.create_or_update(job) +# get a URL for the status of the job returned_job.services["Studio"].endpoint ``` --- -## Register data -You can register data as an asset to your workspace. The benefits of registering data are: +#### Read a `TabularDataset` -* Easy to share with other members of the team (no need to remember file locations) -* Versioning of the metadata (location, description, etc.) -* Lineage tracking +# [CLI](#tab/CLI) -The following example demonstrates versioning of sample data, and shows how to register a local file as a data asset. The data is uploaded to cloud storage and registered as an asset. +Create a job specification YAML file (`.yml`), with the type set to `mltable` and the mode set to `direct`: -```python +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json -from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes - -my_data = Data( - path="./sample_data/titanic.csv", - type=AssetTypes.URI_FILE, - description="Titanic Data", - name="titanic", - version='1' -) +command: | + ls ${{inputs.my_data}} +code: +inputs: + my_data: + type: mltable + mode: direct + path: azureml:@latest +environment: azureml:@latest +compute: azureml:cpu-cluster +``` -ml_client.data.create_or_update(my_data) -``` +Next, run in the CLI -To register data that is in a cloud location, you can specify the path with any of the supported protocols for the storage type. The following example shows what the path looks like for data from Azure Data Lake Storage Gen 2. +```azurecli +az ml job create -f .yml +``` + +# [Python-SDK](#tab/Python-SDK) + +In the `Input` object specify the `type` as `AssetTypes.MLTABLE` and `mode` as `InputOutputModes.DIRECT`: ```python +from azure.ai.ml import command from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input +from azure.ai.ml.constants import AssetTypes, InputOutputModes +from azure.ai.ml import MLClient + +ml_client = MLClient.from_config() -my_path = 'abfss://@.dfs.core.windows.net/' # adls gen2 +filedataset_asset = ml_client.data.get(name="", version="") -my_data = Data( - path=my_path, - type=AssetTypes.URI_FOLDER, - description="description here", - name="a_name", - version='1' +my_job_inputs = { + "input_data": Input( + type=AssetTypes.MLTABLE, + path=filedataset_asset, + mode=InputOutputModes.DIRECT + ) +} + +job = command( + code="./src", # local path where the code is stored + command="python train.py --inputs ${{inputs.input_data}}", + inputs=my_job_inputs, + environment=":", + compute="cpu-cluster", ) -ml_client.data.create_or_update(my_data) +# submit the command +returned_job = ml_client.jobs.create_or_update(job) +# get a URL for the status of the job +returned_job.services["Studio"].endpoint +``` + +--- +## Write data in a job + +In your job you can write data to your cloud-based storage using *outputs*. The [Supported modes](#supported-modes) section showed that only job *outputs* can write data because the mode can be either `rw_mount` or `upload`. + +# [CLI](#tab/CLI) + +Create a job specification YAML file (`.yml`), with the `outputs` section populated with the type and path of where you would like to write your data to: + +```yaml +$schema: https://azuremlschemas.azureedge.net/latest/CommandJob.schema.json + +# Possible Paths for Data: +# Blob: https://.blob.core.windows.net/// +# Datastore: azureml://datastores/paths// +# Data Asset: azureml:: + +code: src +command: >- + python prep.py + --raw_data ${{inputs.raw_data}} + --prep_data ${{outputs.prep_data}} +inputs: + raw_data: + type: # uri_file, uri_folder, mltable + path: +outputs: + prep_data: + type: # uri_file, uri_folder, mltable + path: +environment: azureml:@latest +compute: azureml:cpu-cluster ``` -## Consume registered data assets in jobs +Next create a job using the CLI: -Once your data is registered as an asset to the workspace, you can consume that data asset in jobs. -The following example demonstrates how to consume `version` 1 of the registered data asset `titanic`. +```azurecli +az ml job create --file .yml +``` + +# [Python-SDK](#tab/Python-SDK) ```python +from azure.ai.ml import command +from azure.ai.ml.entities import Data +from azure.ai.ml import Input, Output +from azure.ai.ml.constants import AssetTypes -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +# Possible Asset Types for Data: +# AssetTypes.URI_FILE +# AssetTypes.URI_FOLDER +# AssetTypes.MLTABLE -registered_data_asset = ml_client.data.get(name='titanic', version='1') +# Possible Paths for Data: +# Blob: https://.blob.core.windows.net/// +# Datastore: azureml://datastores/paths// +# Data Asset: azureml:: my_job_inputs = { - "input_data": JobInput( - type=AssetTypes.URI_FOLDER, - path=registered_data_asset.id - ) + "raw_data": Input(type=AssetTypes.URI_FOLDER, path="") +} + +my_job_outputs = { + "prep_data": Output(type=AssetTypes.URI_FOLDER, path="") } -job = CommandJob( - code="./src", - command='python read_data_asset.py --input_folder ${{inputs.input_data}}', +job = command( + code="./src", # local path where the code is stored + command="python process_data.py --raw_data ${{inputs.raw_data}} --prep_data ${{outputs.prep_data}}", inputs=my_job_inputs, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9", - compute="cpu-cluster" + outputs=my_job_outputs, + environment=":", + compute="cpu-cluster", ) -#submit the command job +# submit the command returned_job = ml_client.create_or_update(job) - -#get a URL for the status of the job +# get a URL for the status of the job returned_job.services["Studio"].endpoint + ``` -## Use data in pipelines +--- + +## Data in pipelines If you're working with Azure Machine Learning pipelines, you can read data into and move data between pipeline components with the Azure Machine Learning CLI v2 extension or the Python SDK v2 (preview). ### Azure Machine Learning CLI v2 The following YAML file demonstrates how to use the output data from one component as the input for another component of the pipeline using the Azure Machine Learning CLI v2 extension: -[!INCLUDE [cli v2](../../includes/machine-learning-cli-v2.md)] +[!INCLUDE [CLI v2](../../includes/machine-learning-CLI-v2.md)] -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: +:::code language="yaml" source="~/azureml-examples-main/CLI/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: -## Python SDK v2 (preview) +### Python SDK v2 (preview) The following example defines a pipeline containing three nodes and moves data between each node. @@ -323,7 +376,10 @@ The following example defines a pipeline containing three nodes and moves data b * `train_node` that trains a CNN model with Keras using the training data, `mnist_train.csv` . * `score_node` that scores the model using test data, `mnist_test.csv`. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] ## Next steps -Learn more about [Data in Azure Machine Learning](concept-data.md) + +* [Train models with the Python SDK v2 (preview)](how-to-train-sdk.md) +* [Tutorial: Create production ML pipelines with Python SDK v2 (preview)](tutorial-pipeline-python-sdk.md) +* Learn more about [Data in Azure Machine Learning](concept-data.md) diff --git a/articles/machine-learning/how-to-responsible-ai-dashboard-sdk-cli.md b/articles/machine-learning/how-to-responsible-ai-dashboard-sdk-cli.md index 2f403f0b3fe5a..181c246b3fd9b 100644 --- a/articles/machine-learning/how-to-responsible-ai-dashboard-sdk-cli.md +++ b/articles/machine-learning/how-to-responsible-ai-dashboard-sdk-cli.md @@ -87,6 +87,16 @@ The ` RAI Insights Dashboard Constructor` and `Gather RAI Insights Dashboard ` c Below are specifications of the Responsible AI components and examples of code snippets in YAML and Python. To view the full code, see [sample YAML and Python notebook](https://aka.ms/RAIsamplesProgrammer) +### Limitations +The current set of components have a number of limitations on their use: + +- All models must be in registered in AzureML in MLFlow format with a sklearn flavor. +- The models must be loadable in the component environment. +- The models must be pickleable. +- The models must be supplied to the RAI components using the 'Fetch Registered Model' component which we provide. +- The dataset inputs must be `pandas` DataFrames in Parquet format. +- A model must still be supplied even if only a causal analysis of the data is performed. The `DummyClassifier` and `DummyRegressor` estimators from SciKit-Learn can be used for this purpose. + ### RAI Insights Dashboard Constructor This component has three input ports: @@ -504,3 +514,6 @@ The supplied datasets should be file datasets (uri_file type) in Parquet format. - Learn more about the [concepts and techniques behind the Responsible AI dashboard](concept-responsible-ai-dashboard.md). - Learn more about how to [collect data responsibly](concept-sourcing-human-data.md) - View [sample YAML and Python notebooks](https://aka.ms/RAIsamples) to generate a Responsible AI dashboard with YAML or Python. +- Learn more about how the Responsible AI Dashboard and Scorecard can be used to debug data and models and inform better decision making in this [tech community blog post](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) +- Learn about how the Responsible AI Dashboard and Scorecard were used by the NHS in a [real life customer story](https://aka.ms/NHSCustomerStory) +- Explore the features of the Responsible AI Dashboard through this [interactive AI Lab web demo](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) diff --git a/articles/machine-learning/how-to-responsible-ai-dashboard-ui.md b/articles/machine-learning/how-to-responsible-ai-dashboard-ui.md index e9c91566d500b..da9c855e35297 100644 --- a/articles/machine-learning/how-to-responsible-ai-dashboard-ui.md +++ b/articles/machine-learning/how-to-responsible-ai-dashboard-ui.md @@ -117,3 +117,6 @@ After you’ve finished your experiment configuration, select **Create** to star - Summarize and share your Responsible AI insights with the [Responsible AI scorecard as a PDF export](how-to-responsible-ai-scorecard.md). - Learn more about the [concepts and techniques behind the Responsible AI dashboard](concept-responsible-ai-dashboard.md). - Learn more about how to [collect data responsibly](concept-sourcing-human-data.md) +- Learn more about how the Responsible AI Dashboard and Scorecard can be used to debug data and models and inform better decision making in this [tech community blog post](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) +- Learn about how the Responsible AI Dashboard and Scorecard were used by the NHS in a [real life customer story](https://aka.ms/NHSCustomerStory) +- Explore the features of the Responsible AI Dashboard through this [interactive AI Lab web demo](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) diff --git a/articles/machine-learning/how-to-responsible-ai-dashboard.md b/articles/machine-learning/how-to-responsible-ai-dashboard.md index e1746ebca1b3e..66fda7dbb25c6 100644 --- a/articles/machine-learning/how-to-responsible-ai-dashboard.md +++ b/articles/machine-learning/how-to-responsible-ai-dashboard.md @@ -318,3 +318,6 @@ Selecting the Treatment policy tab switches to a view to help determine real-wor - Summarize and share your Responsible AI insights with the [Responsible AI scorecard as a PDF export](how-to-responsible-ai-scorecard.md). - Learn more about the [concepts and techniques behind the Responsible AI dashboard](concept-responsible-ai-dashboard.md). - View [sample YAML and Python notebooks](https://aka.ms/RAIsamples) to generate a Responsible AI dashboard with YAML or Python. +- Explore the features of the Responsible AI Dashboard through this [interactive AI Lab web demo](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) +- Learn more about how the Responsible AI Dashboard and Scorecard can be used to debug data and models and inform better decision making in this [tech community blog post](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) +- Learn about how the Responsible AI Dashboard and Scorecard were used by the NHS in a [real life customer story](https://aka.ms/NHSCustomerStory) diff --git a/articles/machine-learning/how-to-responsible-ai-scorecard.md b/articles/machine-learning/how-to-responsible-ai-scorecard.md index d8f0c928fa3e4..42683c72dcd3e 100644 --- a/articles/machine-learning/how-to-responsible-ai-scorecard.md +++ b/articles/machine-learning/how-to-responsible-ai-scorecard.md @@ -242,3 +242,6 @@ Finally, you can observe your dataset’s causal insights summarized, figuring o - See the how-to guide for generating a Responsible AI dashboard via [CLIv2 and SDKv2](how-to-responsible-ai-dashboard-sdk-cli.md) or [studio UI ](how-to-responsible-ai-dashboard-ui.md). - Learn more about the [concepts and techniques behind the Responsible AI dashboard](concept-responsible-ai-dashboard.md). - View [sample YAML and Python notebooks](https://aka.ms/RAIsamples) to generate a Responsible AI dashboard with YAML or Python. +- Learn more about how the Responsible AI Dashboard and Scorecard can be used to debug data and models and inform better decision making in this [tech community blog post](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) +- See how the Responsible AI Dashboard and Scorecard were used by the NHS in a [real life customer story](https://aka.ms/NHSCustomerStory) +- Explore the features of the Responsible AI Dashboard through this [interactive AI Lab web demo](https://www.microsoft.com/ai/ai-lab-responsible-ai-dashboard) diff --git a/articles/machine-learning/how-to-safely-rollout-managed-endpoints-sdk-v2.md b/articles/machine-learning/how-to-safely-rollout-managed-endpoints-sdk-v2.md new file mode 100644 index 0000000000000..4d57da05423a3 --- /dev/null +++ b/articles/machine-learning/how-to-safely-rollout-managed-endpoints-sdk-v2.md @@ -0,0 +1,305 @@ +--- +title: Safe rollout for managed online endpoints using Python SDK v2 (preview). +titleSuffix: Azure Machine Learning +description: Safe rollout for online endpoints using Python SDK v2 (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: mlops +ms.author: ssambare +ms.reviewer: larryfr +author: shivanissambare +ms.date: 05/25/2022 +ms.topic: how-to +ms.custom: how-to, devplatv2, sdkv2, deployment +--- + +# Safe rollout for managed online endpoints using Python SDK v2 (preview) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] + +> [!IMPORTANT] +> SDK v2 is currently in public preview. +> The preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +In this article, you learn how to deploy a new version of the model without causing any disruption. With blue-green deployment or safe rollout, an approach in which a new version of a web service is introduced to production by rolling out the change to a small subset of users/requests before rolling it out completely. This article assumes you're using online endpoints; for more information, see [Azure Machine Learning endpoints](concept-endpoints.md). + +In this article, you'll learn to: + +* Deploy a new online endpoint called "blue" that serves version 1 of the model. +* Scale this deployment so that it can handle more requests. +* Deploy version 2 of the model to an endpoint called "green" that accepts no live traffic. +* Test the green deployment in isolation. +* Send 10% of live traffic to the green deployment. +* Fully cut-over all live traffic to the green deployment. +* Delete the now-unused v1 blue deployment. + +## Prerequisites + +* If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/) today. +* The [Azure Machine Learning SDK v2 for Python](/python/api/overview/azure/ml/installv2). +* You must have an Azure resource group, and you (or the service principal you use) must have Contributor access to it. +* You must have an Azure Machine Learning workspace. +* To deploy locally, you must install [Docker Engine](https://docs.docker.com/engine/) on your local computer. We highly recommend this option, so it's easier to debug issues. + +### Clone examples repository + +To run the training examples, first clone the examples repository and change into the `sdk` directory: + +```bash +git clone --depth 1 https://github.com/Azure/azureml-examples +cd azureml-examples/sdk +``` + +> [!TIP] +> Use `--depth 1` to clone only the latest commit to the repository, which reduces time to complete the operation. + +## Connect to Azure Machine Learning workspace + +The [workspace](concept-workspace.md) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section, we'll connect to the workspace in which you'll perform deployment tasks. + +1. Import the required libraries: + + ```python + # import required libraries + from azure.ai.ml import MLClient + from azure.ai.ml.entities import ( + ManagedOnlineEndpoint, + ManagedOnlineDeployment, + Model, + Environment, + CodeConfiguration, + ) + from azure.identity import DefaultAzureCredential + ``` + +1. Configure workspace details and get a handle to the workspace: + + To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. This example uses the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential). + + ```python + # enter details of your AML workspace + subscription_id = "" + resource_group = "" + workspace = "" + ``` + + ```python + # get a handle to the workspace + ml_client = MLClient( + DefaultAzureCredential(), subscription_id, resource_group, workspace + ) + ``` + +## Create online endpoint + +Online endpoints are endpoints that are used for online (real-time) inferencing. Online endpoints contain deployments that are ready to receive data from clients and can send responses back in real time. + +To create an online endpoint, we'll use `ManagedOnlineEndpoint`. This class allows user to configure the following key aspects: + +* `name` - Name of the endpoint. Needs to be unique at the Azure region level +* `auth_mode` - The authentication method for the endpoint. Key-based authentication and Azure ML token-based authentication are supported. Key-based authentication doesn't expire but Azure ML token-based authentication does. Possible values are `key` or `aml_token`. +* `identity`- The managed identity configuration for accessing Azure resources for endpoint provisioning and inference. + * `type`- The type of managed identity. Azure Machine Learning supports `system_assigned` or `user_assigned` identity. + * `user_assigned_identities` - List (array) of fully qualified resource IDs of the user-assigned identities. This property is required if `identity.type` is user_assigned. +* `description`- Description of the endpoint. + +1. Configure the endpoint: + + ```python + # Creating a unique endpoint name with current datetime to avoid conflicts + import datetime + + online_endpoint_name = "endpoint-" + datetime.datetime.now().strftime("%m%d%H%M%f") + + # create an online endpoint + endpoint = ManagedOnlineEndpoint( + name=online_endpoint_name, + description="this is a sample online endpoint", + auth_mode="key", + tags={"foo": "bar"}, + ) + ``` + +1. Create the endpoint: + + Using the `MLClient` created earlier, we'll now create the Endpoint in the workspace. This command will start the endpoint creation and return a confirmation response while the endpoint creation continues. + + ```python + ml_client.begin_create_or_update(endpoint) + ``` + +## Create the 'blue' deployment + +A deployment is a set of resources required for hosting the model that does the actual inferencing. We'll create a deployment for our endpoint using the `ManagedOnlineDeployment` class. This class allows user to configure the following key aspects. + +**Key aspects of deployment** +* `name` - Name of the deployment. +* `endpoint_name` - Name of the endpoint to create the deployment under. +* `model` - The model to use for the deployment. This value can be either a reference to an existing versioned model in the workspace or an inline model specification. +* `environment` - The environment to use for the deployment. This value can be either a reference to an existing versioned environment in the workspace or an inline environment specification. +* `code_configuration` - the configuration for the source code and scoring script + * `path`- Path to the source code directory for scoring the model + * `scoring_script` - Relative path to the scoring file in the source code directory +* `instance_type` - The VM size to use for the deployment. For the list of supported sizes, see [Managed online endpoints SKU list](reference-managed-online-endpoints-vm-sku-list.md). +* `instance_count` - The number of instances to use for the deployment + +1. Configure blue deployment: + + ```python + # create blue deployment + model = Model(path="../model-1/model/sklearn_regression_model.pkl") + env = Environment( + conda_file="../model-1/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", + ) + + blue_deployment = ManagedOnlineDeployment( + name="blue", + endpoint_name=online_endpoint_name, + model=model, + environment=env, + code_configuration=CodeConfiguration( + code="../model-1/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, + ) + ``` + +1. Create the deployment: + + Using the `MLClient` created earlier, we'll now create the deployment in the workspace. This command will start the deployment creation and return a confirmation response while the deployment creation continues. + + ```python + ml_client.begin_create_or_update(blue_deployment) + ``` + + ```python + # blue deployment takes 100 traffic + endpoint.traffic = {"blue": 100} + ml_client.begin_create_or_update(endpoint) + ``` + +## Test the endpoint with sample data + +Using the `MLClient` created earlier, we'll get a handle to the endpoint. The endpoint can be invoked using the `invoke` command with the following parameters: + +* `endpoint_name` - Name of the endpoint +* `request_file` - File with request data +* `deployment_name` - Name of the specific deployment to test in an endpoint + +We'll send a sample request using a [json](https://github.com/Azure/azureml-examples/blob/main/sdk/endpoints/online/model-1/sample-request.json) file. + +```python +# test the blue deployment with some sample data +ml_client.online_endpoints.invoke( + endpoint_name=online_endpoint_name, + deployment_name="blue", + request_file="../model-1/sample-request.json", +) +``` + +## Scale the deployment + +Using the `MLClient` created earlier, we'll get a handle to the deployment. The deployment can be scaled by increasing or decreasing the `instance_count`. + +```python +# scale the deployment +blue_deployment = ml_client.online_deployments.get( + name="blue", endpoint_name=online_endpoint_name +) +blue_deployment.instance_count = 2 +ml_client.online_deployments.begin_create_or_update(blue_deployment) +``` + +## Get endpoint details + +```python +# Get the details for online endpoint +endpoint = ml_client.online_endpoints.get(name=online_endpoint_name) + +# existing traffic details +print(endpoint.traffic) + +# Get the scoring URI +print(endpoint.scoring_uri) +``` + +## Deploy a new model, but send no traffic yet + +Create a new deployment named green: + +```python +# create green deployment +model2 = Model(path="../model-2/model/sklearn_regression_model.pkl") +env2 = Environment( + conda_file="../model-2/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", +) + +green_deployment = ManagedOnlineDeployment( + name="green", + endpoint_name=online_endpoint_name, + model=model2, + environment=env2, + code_configuration=CodeConfiguration( + code="../model-2/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, +) +``` + +```python +# use MLClient to create green deployment +ml_client.begin_create_or_update(green_deployment) +``` + +## Test the 'green' deployment + +Though green has 0% of traffic allocated, you can still invoke the endpoint and deployment with [json](https://github.com/Azure/azureml-examples/blob/main/sdk/endpoints/online/model-2/sample-request.json) file. + +```python +ml_client.online_endpoints.invoke( + endpoint_name=online_endpoint_name, + deployment_name="green", + request_file="../model-2/sample-request.json", +) +``` + +1. Test the new deployment with a small percentage of live traffic: + + Once you've tested your green deployment, allocate a small percentage of traffic to it: + + ```python + endpoint.traffic = {"blue": 90, "green": 10} + ml_client.begin_create_or_update(endpoint) + ``` + + Now, your green deployment will receive 10% of requests. + +1. Send all traffic to your new deployment: + + Once you're satisfied that your green deployment is fully satisfactory, switch all traffic to it. + + ```python + endpoint.traffic = {"blue": 0, "green": 100} + ml_client.begin_create_or_update(endpoint) + ``` + +1. Remove the old deployment: + + ```python + ml_client.online_deployments.delete(name="blue", endpoint_name=online_endpoint_name) + ``` + +## Delete endpoint + +```python +ml_client.online_endpoints.begin_delete(name=online_endpoint_name) +``` + +## Next steps + +* Explore online endpoint samples - [https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints](https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints) \ No newline at end of file diff --git a/articles/machine-learning/how-to-secure-online-endpoint.md b/articles/machine-learning/how-to-secure-online-endpoint.md index d7a9340f2160b..671eb5fbe649c 100644 --- a/articles/machine-learning/how-to-secure-online-endpoint.md +++ b/articles/machine-learning/how-to-secure-online-endpoint.md @@ -9,13 +9,13 @@ ms.topic: how-to ms.reviewer: larryfr ms.author: seramasu author: rsethur -ms.date: 04/22/2022 +ms.date: 06/06/2022 ms.custom: event-tier1-build-2022 --- # Use network isolation with managed online endpoints (preview) -When deploying a machine learning model to a managed online endpoint, you can secure communication with the online endpoint by using [private endpoints](/azure/private-link/private-endpoint-overview). Using a private endpoint with online endpoints is currently a preview feature. +When deploying a machine learning model to a managed online endpoint, you can secure communication with the online endpoint by using [private endpoints](../private-link/private-endpoint-overview.md). Using a private endpoint with online endpoints is currently a preview feature. [!INCLUDE [preview disclaimer](../../includes/machine-learning-preview-generic-disclaimer.md)] @@ -31,9 +31,17 @@ The following diagram shows how communications flow through private endpoints to * You must install and configure the Azure CLI and ML extension. For more information, see [Install, set up, and use the CLI (v2)](how-to-configure-cli.md). -* You must have an Azure Resource group, in which you (or the service principal you use) need to have `Contributor` access. You'll have such a resource group if you configured your ML extension per the above article. +* You must have an Azure Resource Group, in which you (or the service principal you use) need to have `Contributor` access. You'll have such a resource group if you configured your ML extension per the above article. -* You must have an Azure Machine Learning workspace, and the workspace must use a private endpoint. If you don't have one, the steps in this article create an example workspace, VNet, and VM. For more information, see [Configure a private endpoint for Azure Machine Learning workspace](how-to-configure-private-link.md). +* You must have an Azure Machine Learning workspace, and the workspace must use a private endpoint. If you don't have one, the steps in this article create an example workspace, VNet, and VM. For more information, see [Configure a private endpoint for Azure Machine Learning workspace](/azure/machine-learning/how-to-configure-private-link). + + The workspace can be configured to allow or disallow public network access. If you plan on using managed online endpoint deployments that use __public outbound__, then you must also [configure the workspace to allow public access](how-to-configure-private-link.md#enable-public-access). + + Outbound communication from managed online endpoint deployment is to the _workspace API_. When the endpoint is configured to use __public outbound__, then the workspace must be able to accept that public communication (allow public access). + +* When the workspace is configured with a private endpoint, the Azure Container Registry for the workspace must be configured for __Premium__ tier. For more information, see [Azure Container Registry service tiers](/azure/container-registry/container-registry-skus). + +* The Azure Container Registry and Azure Storage Account must be in the same Azure Resource Group as the workspace. > [!IMPORTANT] > The end-to-end example in this article comes from the files in the __azureml-examples__ GitHub repository. To clone the samples repository and switch to the repository's `cli/` directory, use the following commands: @@ -45,12 +53,18 @@ The following diagram shows how communications flow through private endpoints to ## Limitations +* The `v1_legacy_mode` flag must be disabled (false) on your Azure Machine Learning workspace. If this flag is enabled, you won't be able to create a managed online endpoint. For more information, see [Network isolation with v2 API](how-to-configure-network-isolation-with-v2.md). + * If your Azure Machine Learning workspace has a private endpoint that was created before May 24, 2022, you must recreate the workspace's private endpoint before configuring your online endpoints to use a private endpoint. For more information on creating a private endpoint for your workspace, see [How to configure a private endpoint for Azure Machine Learning workspace](how-to-configure-private-link.md). * Secure outbound communication creates three private endpoints per deployment. One to Azure Blob storage, one to Azure Container Registry, and one to your workspace. * Azure Log Analytics and Application Insights aren't supported when using network isolation with a deployment. To see the logs for the deployment, use the [az ml online-deployment get_logs](/cli/azure/ml/online-deployment#az-ml-online-deployment-get-logs) command instead. +* You can configure public access to a __managed online endpoint__ (_inbound_ and _outbound_). You can also configure [public access to an Azure Machine Learning workspace](how-to-configure-private-link.md#enable-public-access). + + Outbound communication from managed online endpoint deployment is to the _workspace API_. When the endpoint is configured to use __public outbound__, then the workspace must be able to accept that public communication (allow public access). + > [!NOTE] > Requests to create, update, or retrieve the authentication keys are sent to the Azure Resource Manager over the public network. @@ -62,7 +76,7 @@ To secure scoring requests to the online endpoint to your virtual network, set t az ml online-endpoint create -f endpoint.yml --set public_network_access=disabled ``` -When `public_network_access` is `disabled`, inbound scoring requests are received using the [private endpoint of the Azure Machine Learning workspace](how-to-configure-private-link.md) and the endpoint can't be reached from public networks. +When `public_network_access` is `disabled`, inbound scoring requests are received using the [private endpoint of the Azure Machine Learning workspace](/azure/machine-learning/how-to-configure-private-link) and the endpoint can't be reached from public networks. ## Outbound (resource access) @@ -74,7 +88,7 @@ The following are the resources that the deployment communicates with over the p * The Azure Storage blob that is the default storage for the workspace. * The Azure Container Registry for the workspace. -When you configure the `egress_public_network_access` to `disabled`, a new private endpoint is created per deployment, per service. For example, if you set the flag to `true` for three deployments to an online endpoint, nine private endpoints are created. Each deployment would have three private endpoints that are used to communicate with the workspace, blob, and container registry. +When you configure the `egress_public_network_access` to `disabled`, a new private endpoint is created per deployment, per service. For example, if you set the flag to `disabled` for three deployments to an online endpoint, nine private endpoints are created. Each deployment would have three private endpoints that are used to communicate with the workspace, blob, and container registry. ```azurecli az ml online-deployment create -f deployment.yml --set egress_public_network_access=disabled @@ -87,9 +101,12 @@ The following table lists the supported configurations when configuring inbound | Configuration | Inbound
                  (Endpoint property) | Outbound
                  (Deployment property) | Supported? | | -------- | -------------------------------- | --------------------------------- | --------- | | secure inbound with secure outbound | `public_network_access` is disabled | `egress_public_network_access` is disabled | Yes | -| secure inbound with public outbound | `public_network_access` is disabled | `egress_public_network_access` is enabled | Yes | +| secure inbound with public outbound | `public_network_access` is disabled
                  The workspace must also allow public access. | `egress_public_network_access` is enabled | Yes | | public inbound with secure outbound | `public_network_access` is enabled | `egress_public_network_access` is disabled | Yes | -| public inbound with public outbound | `public_network_access` is enabled | `egress_public_network_access` is enabled | Yes | +| public inbound with public outbound | `public_network_access` is enabled
                  The workspace must also allow public access. | `egress_public_network_access` is enabled | Yes | + +> [!IMPORTANT] +> Outbound communication from managed online endpoint deployment is to the _workspace API_. When the endpoint is configured to use __public outbound__, then the workspace must be able to accept that public communication (allow public access). ## End-to-end example @@ -123,9 +140,9 @@ The following diagram shows the overall architecture of this example: To create the resources, use the following Azure CLI commands. Replace `` with a unique suffix for the resources that are created. -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/setup-repo/azure-github.sh" id="managed_vnet_workspace_suffix"::: +:::code language="azurecli" source="~/azureml-examples-main/setup-repo/azure-github.sh" id="managed_vnet_workspace_suffix"::: -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/setup-repo/azure-github.sh" id="managed_vnet_workspace_create"::: +:::code language="azurecli" source="~/azureml-examples-main/setup-repo/azure-github.sh" id="managed_vnet_workspace_create"::: ### Create the virtual machine jump box @@ -167,7 +184,7 @@ When prompted, enter the password you used when creating the VM. 1. Use the following commands from the SSH session to install the CLI and Docker: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="setup_docker_az_cli"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="setup_docker_az_cli"::: 1. To create the environment variables used by this example, run the following commands. Replace `` with your Azure subscription ID. Replace `` with the resource group that contains your workspace. Replace `` with the suffix you provided earlier. Replace `` with the location of your Azure workspace. Replace `` with the name to use for the endpoint. @@ -176,11 +193,11 @@ When prompted, enter the password you used when creating the VM. # [Generic model](#tab/model) - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet.sh" id="set_env_vars"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet.sh" id="set_env_vars"::: # [MLflow model](#tab/mlflow) - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet-mlflow.sh" id="set_env_vars"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet-mlflow.sh" id="set_env_vars"::: --- @@ -190,7 +207,7 @@ When prompted, enter the password you used when creating the VM. 1. To configure the defaults for the CLI, use the following commands: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="configure_defaults"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="configure_defaults"::: 1. To clone the example files for the deployment, use the following command: @@ -200,7 +217,7 @@ When prompted, enter the password you used when creating the VM. 1. To build a custom docker image to use with the deployment, use the following commands: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/build_image.sh" id="build_image"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/build_image.sh" id="build_image"::: > [!TIP] > In this example, we build the Docker image before pushing it to Azure Container Registry. Alternatively, you can build the image in your vnet by using an Azure Machine Learning compute cluster and environments. For more information, see [Secure Azure Machine Learning workspace](how-to-secure-workspace-vnet.md#enable-azure-container-registry-acr). @@ -212,22 +229,22 @@ When prompted, enter the password you used when creating the VM. > [!TIP] > You can test or debug the Docker image locally by using the `--local` flag when creating the deployment. For more information, see the [Deploy and debug locally](how-to-deploy-managed-online-endpoints.md#deploy-and-debug-locally-by-using-local-endpoints) article. - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/create_moe.sh" id="create_vnet_deployment"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/create_moe.sh" id="create_vnet_deployment"::: 1. To make a scoring request with the endpoint, use the following commands: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/score_endpoint.sh" id="check_deployment"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/score_endpoint.sh" id="check_deployment"::: ### Cleanup To delete the endpoint, use the following command: -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet.sh" id="delete_endpoint"::: +:::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet.sh" id="delete_endpoint"::: To delete the VM, use the following command: -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet.sh" id="delete_vm"::: +:::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet.sh" id="delete_vm"::: To delete all the resources created in this article, use the following command. Replace `` with the name of the resource group used in this example: diff --git a/articles/machine-learning/how-to-secure-training-vnet.md b/articles/machine-learning/how-to-secure-training-vnet.md index c459be92e92f2..9010a11d56ea3 100644 --- a/articles/machine-learning/how-to-secure-training-vnet.md +++ b/articles/machine-learning/how-to-secure-training-vnet.md @@ -233,7 +233,7 @@ When the creation process finishes, you train your model by using the cluster in When you enable **No public IP**, your compute cluster doesn't use a public IP for communication with any dependencies. Instead, it communicates solely within the virtual network using Azure Private Link ecosystem and service/private endpoints, eliminating the need for a public IP entirely. No public IP removes access and discoverability of compute cluster nodes from the internet thus eliminating a significant threat vector. **No public IP** clusters help comply with no public IP policies many enterprises have. > [!WARNING] -> By default, you do not have public internet access from No Public IP Compute Cluster. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](/azure/virtual-network/nat-gateway/nat-overview) with a public IP. +> By default, you do not have public internet access from No Public IP Compute Cluster. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](../virtual-network/nat-gateway/nat-overview.md) with a public IP. A compute cluster with **No public IP** enabled has **no inbound communication requirements** from public internet. Specifically, neither inbound NSG rule (`BatchNodeManagement`, `AzureMachineLearning`) is required. You still need to allow inbound from source of **VirtualNetwork** and any port source, to destination of **VirtualNetwork**, and destination port of **29876, 29877** and inbound from source **AzureLoadBalancer** and any port source to destination **VirtualNetwork** and port **44224** destination. @@ -266,7 +266,7 @@ For steps on how to create a compute instance deployed in a virtual network, see When you enable **No public IP**, your compute instance doesn't use a public IP for communication with any dependencies. Instead, it communicates solely within the virtual network using Azure Private Link ecosystem and service/private endpoints, eliminating the need for a public IP entirely. No public IP removes access and discoverability of compute instance node from the internet thus eliminating a significant threat vector. Compute instances will also do packet filtering to reject any traffic from outside virtual network. **No public IP** instances are dependent on [Azure Private Link](how-to-configure-private-link.md) for Azure Machine Learning workspace. > [!WARNING] -> By default, you do not have public internet access from No Public IP Compute Instance. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](/azure/virtual-network/nat-gateway/nat-overview) with a public IP. +> By default, you do not have public internet access from No Public IP Compute Instance. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](../virtual-network/nat-gateway/nat-overview.md) with a public IP. For **outbound connections** to work, you need to set up an egress firewall such as Azure firewall with user defined routes. For instance, you can use a firewall set up with [inbound/outbound configuration](how-to-access-azureml-behind-firewall.md) and route traffic there by defining a route table on the subnet in which the compute instance is deployed. The route table entry can set up the next hop of the private IP address of the firewall with the address prefix of 0.0.0.0/0. @@ -345,4 +345,4 @@ This article is part of a series on securing an Azure Machine Learning workflow. * If using CLI v2 or SDK v2 - [Network isolation for managed online endpoints](how-to-secure-online-endpoint.md) * [Enable studio functionality](how-to-enable-studio-virtual-network.md) * [Use custom DNS](how-to-custom-dns.md) -* [Use a firewall](how-to-access-azureml-behind-firewall.md) +* [Use a firewall](how-to-access-azureml-behind-firewall.md) \ No newline at end of file diff --git a/articles/machine-learning/how-to-train-cli.md b/articles/machine-learning/how-to-train-cli.md index 1be08fc487694..1672a00df439a 100644 --- a/articles/machine-learning/how-to-train-cli.md +++ b/articles/machine-learning/how-to-train-cli.md @@ -8,7 +8,7 @@ ms.subservice: core ms.topic: how-to author: amibp ms.author: amipatel -ms.date: 03/31/2022 +ms.date: 05/26/2022 ms.reviewer: nibaccam ms.custom: devx-track-azurecli, devplatv2, event-tier1-build-2022 --- @@ -327,11 +327,11 @@ The following example shows an AutoML configuration file for training a classifi * The training has a time out of 180 minutes * The data for training is in the folder "./training-mltable-folder". Automated ML jobs only accept data in the form of an `MLTable`. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/basics/hello-automl/hello-automl-job-basic.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/basics/hello-automl/hello-automl-job-basic.yml"::: That mentioned MLTable definition is what points to the training data file, in this case a local .csv file that will be uploaded automatically: -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/basics/hello-automl/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/basics/hello-automl/training-mltable-folder/MLTable"::: Finally, you can run it (create the AutoML job) with this CLI command: @@ -345,7 +345,7 @@ Or like the following if providing workspace IDs explicitly instead of using the /> az ml job create --file ./hello-automl-job-basic.yml --workspace-name [YOUR_AZURE_WORKSPACE] --resource-group [YOUR_AZURE_RESOURCE_GROUP] --subscription [YOUR_AZURE_SUBSCRIPTION] ``` -To investigate additional AutoML model training examples using other ML-tasks such as regression, time-series forecasting, image classification, object detection, NLP text-classification, etc., see the complete list of [AutoML CLI examples](https://github.com/Azure/azureml-examples/tree/sdk-preview/cli/jobs/automl-standalone-jobs). +To investigate additional AutoML model training examples using other ML-tasks such as regression, time-series forecasting, image classification, object detection, NLP text-classification, etc., see the complete list of [AutoML CLI examples](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/automl-standalone-jobs). ### Train a model with a custom script diff --git a/articles/machine-learning/how-to-train-sdk.md b/articles/machine-learning/how-to-train-sdk.md index 9cb30ea26c181..6c24c15c71b29 100644 --- a/articles/machine-learning/how-to-train-sdk.md +++ b/articles/machine-learning/how-to-train-sdk.md @@ -8,7 +8,7 @@ ms.author: balapv ms.reviewer: sgilley ms.service: machine-learning ms.subservice: core -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.topic: how-to ms.custom: sdkv2, event-tier1-build-2022 --- @@ -88,7 +88,7 @@ Let us tackle these steps below ### 1. Connect to the workspace -To connect to the workspace, you need identifier parameters - a subscription, resource group and workspace name. You'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. To authenticate, you use the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python). Check this [example](https://github.com/Azure/azureml-examples/blob/sdk-preview/sdk/jobs/configuration.ipynb) for more details on how to configure credentials and connect to a workspace. +To connect to the workspace, you need identifier parameters - a subscription, resource group and workspace name. You'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. To authenticate, you use the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python&preserve-view=true). Check this [example](https://github.com/Azure/azureml-examples/blob/sdk-preview/sdk/jobs/configuration.ipynb) for more details on how to configure credentials and connect to a workspace. ```python #import required libraries @@ -109,21 +109,8 @@ ml_client = MLClient(DefaultAzureCredential(), subscription_id, resource_group, You'll create a compute called `cpu-cluster` for your job, with this code: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/configuration.ipynb?name=create-cpu-compute)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/configuration.ipynb?name=create-cpu-compute)] -```python -from azure.ai.ml.entities import AmlCompute - -# specify aml compute name. -cpu_compute_target = 'cpu-cluster' - -try: - ml_client.compute.get(cpu_compute_target) -except Exception: - print('Creating a new cpu compute target...') - compute = AmlCompute(name=cpu_compute_target, size="STANDARD_D2_V2", min_instances=0, max_instances=4) - ml_client.compute.begin_create_or_update(compute) -``` ### 3. Environment to run the script @@ -135,7 +122,7 @@ To run your script on `cpu-cluster`, you need an environment, which has the requ * A base docker image with a conda YAML to customize further * A docker build context - Check this [example](https://github.com/Azure/azureml-examples/sdk/assets/environment/environment.ipynb) on how to create custom environments. + Check this [example](https://github.com/Azure/azureml-examples/blob/main/sdk/assets/environment/environment.ipynb) on how to create custom environments. You'll use a curated environment provided by Azure ML for `lightgm` called `AzureML-lightgbm-3.2-ubuntu18.04-py37-cpu` @@ -143,28 +130,10 @@ You'll use a curated environment provided by Azure ML for `lightgm` called `Azur To run this script, you'll use a `command`. The command will be run by submitting it as a `job` to Azure ML. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=create-command)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=create-command)] -```python -from azure.ai.ml import command, Input -#define the command -command_job=command( - code='./src', - inputs={'iris_csv':Input(type='uri_file', path='https://azuremlexamples.blob.core.windows.net/datasets/iris.csv')}, - command = 'python main.py --iris-csv ${{inputs.iris_csv}}', - environment='AzureML-lightgbm-3.2-ubuntu18.04-py37-cpu@latest', - compute='cpu-cluster' -) -``` +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-command)] -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-command)] - -```python -# submit the command -returned_job = ml_client.jobs.create_or_update(command_job) -# get a URL for the status of the job -returned_job.services["Studio"].endpoint -``` In the above, you configured: - `code` - path where the code to run the command is located @@ -181,42 +150,15 @@ To perform a sweep, there needs to be input(s) against which the sweep needs to Let us improve our model by sweeping on `learning_rate` and `boosting` inputs to the script. In the previous step, you used a specific value for these parameters, but now you'll use a range or choice of values. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=search-space)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=search-space)] -```python -# we will reuse the command_job created before. we call it as a function so that we can apply inputs -# we do not apply the 'iris_csv' input again -- we will just use what was already defined earlier -command_job_for_sweep = command_job( - learning_rate=Uniform(min_value=0.01, max_value=0.9), - boosting=Choice(values=["gbdt", "dart"]), -) -``` Now that you've defined the parameters, run the sweep -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=configure-sweep)] - -```python -# apply the sweep parameter to obtain the sweep_job -sweep_job = command_job_for_sweep.sweep( - compute='cpu-cluster', - sampling_algorithm='random', - primary_metric='test-multi_logloss', - goal='Minimize' -) - -#define the limits for this sweep -sweep_job.set_limits(max_total_trials=20, max_concurrent_trials=10, timeout=7200) -``` +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=configure-sweep)] -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-sweep)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-sweep)] -```python -# submit the sweep -returned_sweep_job = ml_client.create_or_update(sweep_job) -# get a URL for the status of the job -returned_sweep_job.services["Studio"].endpoint -``` As seen above, the `sweep` function allows user to configure the following key aspects: diff --git a/articles/machine-learning/how-to-train-with-ui.md b/articles/machine-learning/how-to-train-with-ui.md index 150a5956a1a43..be23935d2f46d 100644 --- a/articles/machine-learning/how-to-train-with-ui.md +++ b/articles/machine-learning/how-to-train-with-ui.md @@ -37,9 +37,6 @@ There are many ways to create a training job with Azure Machine Learning. You ca * Or, you may enter the job creation from the left pane. Click **+New** and select **Job**. [![Azure Machine Learning studio left navigation](media/how-to-train-with-ui/left-nav-entry.png)](media/how-to-train-with-ui/left-nav-entry.png) -* Or, if you're in the Experiment page, you may go to the **All runs** tab and click **Create job**. -[![Experiment page entry for job creation UI](media/how-to-train-with-ui/experiment-entry.png)](media/how-to-train-with-ui/experiment-entry.png) - These options will all take you to the job creation panel, which has a wizard for configuring and creating a training job. ## Select compute resources diff --git a/articles/machine-learning/how-to-troubleshoot-online-endpoints.md b/articles/machine-learning/how-to-troubleshoot-online-endpoints.md index 68e6fb04a01f0..8851181f3366b 100644 --- a/articles/machine-learning/how-to-troubleshoot-online-endpoints.md +++ b/articles/machine-learning/how-to-troubleshoot-online-endpoints.md @@ -113,7 +113,7 @@ There are three supported tracing headers: > [!Note] > When you create a support ticket for a failed request, attach the failed request ID to expedite investigation. -- `x-ms-request-id` and `x-ms-client-request-id` are available for client tracing scenarios. We sanitize these headers to remove non-alphanumeric symbols. These headers are truncated to 72 characters. +- `x-ms-client-request-id` is available for client tracing scenarios. We sanitize this header to remove non-alphanumeric symbols. This header is truncated to 72 characters. ## Common deployment errors diff --git a/articles/machine-learning/how-to-tune-hyperparameters.md b/articles/machine-learning/how-to-tune-hyperparameters.md index df293cc9caf7c..67d90758663ae 100644 --- a/articles/machine-learning/how-to-tune-hyperparameters.md +++ b/articles/machine-learning/how-to-tune-hyperparameters.md @@ -3,7 +3,7 @@ title: Hyperparameter tuning a model (v2) titleSuffix: Azure Machine Learning description: Automate hyperparameter tuning for deep learning and machine learning models using Azure Machine Learning. ms.author: amipatel -author: amipatel +author: amibp services: machine-learning ms.service: machine-learning ms.subservice: core @@ -94,7 +94,7 @@ command_job_for_sweep = command_job( This code defines a search space with two parameters - `learning_rate` and `keep_probability`. `learning_rate` has a normal distribution with mean value 10 and a standard deviation of 3. `keep_probability` has a uniform distribution with a minimum value of 0.05 and a maximum value of 0.1. -For the CLI, you can use the [sweep job YAML schema](/articles/machine-learning/reference-yaml-job-sweep)., to define the search space in your YAML: +For the CLI, you can use the [sweep job YAML schema](/azure/machine-learning/reference-yaml-job-sweep)., to define the search space in your YAML: ```YAML search_space: conv_size: @@ -116,7 +116,7 @@ Specify the parameter sampling method to use over the hyperparameter space. Azur ### Random sampling -[Random sampling](/python/api/azure-ai-ml/azure.ai.ml.sweep.randomparametersampling) supports discrete and continuous hyperparameters. It supports early termination of low-performance jobs. Some users do an initial search with random sampling and then refine the search space to improve results. +[Random sampling](/azure/machine-learning/how-to-tune-hyperparameters) supports discrete and continuous hyperparameters. It supports early termination of low-performance jobs. Some users do an initial search with random sampling and then refine the search space to improve results. In random sampling, hyperparameter values are randomly selected from the defined search space. After creating your command job, you can use the sweep parameter to define the sampling algorithm. @@ -152,7 +152,7 @@ sweep_job = command_job_for_sweep.sweep( ### Grid sampling -[Grid sampling](/python/api/azure-ai-ml/azure.ai.ml.sweep.gridparametersampling) supports discrete hyperparameters. Use grid sampling if you can budget to exhaustively search over the search space. Supports early termination of low-performance jobs. +Grid sampling supports discrete hyperparameters. Use grid sampling if you can budget to exhaustively search over the search space. Supports early termination of low-performance jobs. Grid sampling does a simple grid search over all possible values. Grid sampling can only be used with `choice` hyperparameters. For example, the following space has six samples: @@ -173,7 +173,7 @@ sweep_job = command_job_for_sweep.sweep( ### Bayesian sampling -[Bayesian sampling](/python/api/azure-ai-ml/azure.ai.ml.sweep.bayesianparametersampling) is based on the Bayesian optimization algorithm. It picks samples based on how previous samples did, so that new samples improve the primary metric. +Bayesian sampling is based on the Bayesian optimization algorithm. It picks samples based on how previous samples did, so that new samples improve the primary metric. Bayesian sampling is recommended if you have enough budget to explore the hyperparameter space. For best results, we recommend a maximum number of jobs greater than or equal to 20 times the number of hyperparameters being tuned. @@ -199,7 +199,7 @@ sweep_job = command_job_for_sweep.sweep( ## Specify the objective of the sweep -Define the objective of your sweep job by specifying the [primary metric](/python/api/azure-ai-ml/azure.ai.ml.sweep.primary_metric) and [goal](/python/api/azure-ai-ml/azure.ai.ml.sweep.goal) you want hyperparameter tuning to optimize. Each training job is evaluated for the primary metric. The early termination policy uses the primary metric to identify low-performance jobs. +Define the objective of your sweep job by specifying the primary metric and goal you want hyperparameter tuning to optimize. Each training job is evaluated for the primary metric. The early termination policy uses the primary metric to identify low-performance jobs. * `primary_metric`: The name of the primary metric needs to exactly match the name of the metric logged by the training script * `goal`: It can be either `Maximize` or `Minimize` and determines whether the primary metric will be maximized or minimized when evaluating the jobs. @@ -257,9 +257,6 @@ Azure Machine Learning supports the following early termination policies: [Bandit policy](/python/api/azure-ai-ml/azure.ai.ml.sweep.banditpolicy) is based on slack factor/slack amount and evaluation interval. Bandit policy ends a job when the primary metric isn't within the specified slack factor/slack amount of the most successful job. -> [!NOTE] -> Bayesian sampling does not support early termination. When using Bayesian sampling, set `early_termination_policy = None`. - Specify the following configuration parameters: * `slack_factor` or `slack_amount`: the slack allowed with respect to the best performing training job. `slack_factor` specifies the allowable slack as a ratio. `slack_amount` specifies the allowable slack as an absolute amount, instead of a ratio. @@ -348,7 +345,7 @@ This code configures the hyperparameter tuning experiment to use a maximum of 20 ## Configure hyperparameter tuning experiment -To [configure your hyperparameter tuning](/python/api/azure-ai-ml/azure.ai.ml.train.sweep) experiment, provide the following: +To configure your hyperparameter tuning experiment, provide the following: * The defined hyperparameter search space * Your sampling algorithm * Your early termination policy diff --git a/articles/machine-learning/how-to-use-batch-endpoint-sdk-v2.md b/articles/machine-learning/how-to-use-batch-endpoint-sdk-v2.md new file mode 100644 index 0000000000000..48a984cc3f909 --- /dev/null +++ b/articles/machine-learning/how-to-use-batch-endpoint-sdk-v2.md @@ -0,0 +1,236 @@ +--- +title: 'Use batch endpoints for batch scoring using Python SDK v2 (preview)' +titleSuffix: Azure Machine Learning +description: In this article, learn how to create a batch endpoint to continuously batch score large data using Python SDK v2 (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: mlops +ms.topic: how-to +author: shivanissambare +ms.author: ssambare +ms.reviewer: larryfr +ms.date: 05/25/2022 +ms.custom: how-to, devplatv2, sdkv2 +#Customer intent: As an ML engineer or data scientist, I want to create an endpoint to host my models for batch scoring, so that I can use the same endpoint continuously for different large datasets on-demand or on-schedule. +--- + +# Use batch endpoints for batch scoring using Python SDK v2 (preview) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] + +> [!IMPORTANT] +> SDK v2 is currently in public preview. +> The preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +Learn how to use batch endpoints to do batch scoring using Python SDK v2. Batch endpoints simplify the process of hosting your models for batch scoring, so you can focus on machine learning, not infrastructure. For more information, see [What are Azure Machine Learning endpoints?](concept-endpoints.md). + +In this article, you'll learn to: + +* Connect to your Azure machine learning workspace from the Python SDK v2. +* Create a batch endpoint from Python SDK v2. +* Create deployments on that endpoint from Python SDK v2. +* Test a deployment with a sample request. + +## Prerequisites + +* A basic understanding of Machine Learning. +* An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/). +* An Azure ML workspace with computer cluster to run your batch scoring job. +* The [Azure Machine Learning SDK v2 for Python](/python/api/overview/azure/ml/installv2). + + +## 1. Connect to Azure Machine Learning workspace + +The [workspace](concept-workspace.md) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section, we'll connect to the workspace in which the job will be run. + +1. Import the required libraries: + + ```python + # import required libraries + from azure.ai.ml import MLClient, Input + from azure.ai.ml.entities import ( + AmlCompute, + BatchEndpoint, + BatchDeployment, + Model, + Environment, + BatchRetrySettings, + ) + from azure.ai.ml.entities._assets import Dataset + from azure.identity import DefaultAzureCredential + from azure.ai.ml.constants import BatchDeploymentOutputAction + ``` + +1. Configure workspace details and get a handle to the workspace: + + To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. This example uses the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential). + + ```python + # enter details of your AML workspace + subscription_id = "" + resource_group = "" + workspace = "" + ``` + + ```python + # get a handle to the workspace + ml_client = MLClient( + DefaultAzureCredential(), subscription_id, resource_group, workspace + ) + ``` + +## Create batch endpoint + +Batch endpoints are endpoints that are used batch inferencing on large volumes of data over a period of time. Batch endpoints receive pointers to data and run jobs asynchronously to process the data in parallel on compute clusters. Batch endpoints store outputs to a data store for further analysis. + +To create an online endpoint, we'll use `BatchEndpoint`. This class allows user to configure the following key aspects: + +* `name` - Name of the endpoint. Needs to be unique at the Azure region level +* `auth_mode` - The authentication method for the endpoint. Currently only Azure Active Directory (Azure AD) token-based (`aad_token`) authentication is supported. +* `identity`- The managed identity configuration for accessing Azure resources for endpoint provisioning and inference. +* `defaults` - Default settings for the endpoint. + * `deployment_name` - Name of the deployment that will serve as the default deployment for the endpoint. +* `description`- Description of the endpoint. + +1. Configure the endpoint: + + ```python + # Creating a unique endpoint name with current datetime to avoid conflicts + import datetime + + batch_endpoint_name = "my-batch-endpoint-" + datetime.datetime.now().strftime( + "%Y%m%d%H%M" + ) + + # create a batch endpoint + endpoint = BatchEndpoint( + name=batch_endpoint_name, + description="this is a sample batch endpoint", + tags={"foo": "bar"}, + ) + ``` + +1. Create the endpoint: + + Using the `MLClient` created earlier, we'll now create the Endpoint in the workspace. This command will start the endpoint creation and return a confirmation response while the endpoint creation continues. + + ```python + ml_client.begin_create_or_update(endpoint) + ``` + +## Create batch compute + +Batch endpoint runs only on cloud computing resources, not locally. The cloud computing resource is a reusable virtual computer cluster. Run the following code to create an Azure Machine Learning compute cluster. The following examples in this article use the compute created here named `cpu-cluster`. + +```python +compute_name = "cpu-cluster" +compute_cluster = AmlCompute(name=compute_name, description="amlcompute", min_instances=0, max_instances=5) +ml_client.begin_create_or_update(compute_cluster) +``` + +## Create a deployment + +A deployment is a set of resources required for hosting the model that does the actual inferencing. We'll create a deployment for our endpoint using the `BatchDeployment` class. This class allows user to configure the following key aspects. + +* `name` - Name of the deployment. +* `endpoint_name` - Name of the endpoint to create the deployment under. +* `model` - The model to use for the deployment. This value can be either a reference to an existing versioned model in the workspace or an inline model specification. +* `environment` - The environment to use for the deployment. This value can be either a reference to an existing versioned environment in the workspace or an inline environment specification. +* `code_path`- Path to the source code directory for scoring the model +* `scoring_script` - Relative path to the scoring file in the source code directory +* `compute` - Name of the compute target to execute the batch scoring jobs on +* `instance_count`- The number of nodes to use for each batch scoring job. +* `max_concurrency_per_instance`- The maximum number of parallel scoring_script runs per instance. +* `mini_batch_size` - The number of files the code_configuration.scoring_script can process in one `run`() call. +* `retry_settings`- Retry settings for scoring each mini batch. + * `max_retries`- The maximum number of retries for a failed or timed-out mini batch (default is 3) + * `timeout`- The timeout in seconds for scoring a mini batch (default is 30) +* `output_action`- Indicates how the output should be organized in the output file. Allowed values are `append_row` or `summary_only`. Default is `append_row` +* `output_file_name`- Name of the batch scoring output file. Default is `predictions.csv` +* `environment_variables`- Dictionary of environment variable name-value pairs to set for each batch scoring job. +* `logging_level`- The log verbosity level. Allowed values are `warning`, `info`, `debug`. Default is `info`. + +1. Configure the deployment: + + ```python + # create a batch deployment + model = Model(path="./mnist/model/") + env = Environment( + conda_file="./mnist/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:latest", + ) + deployment = BatchDeployment( + name="non-mlflow-deployment", + description="this is a sample non-mlflow deployment", + endpoint_name=batch_endpoint_name, + model=model, + code_path="./mnist/code/", + scoring_script="digit_identification.py", + environment=env, + compute=compute_name, + instance_count=2, + max_concurrency_per_instance=2, + mini_batch_size=10, + output_action=BatchDeploymentOutputAction.APPEND_ROW, + output_file_name="predictions.csv", + retry_settings=BatchRetrySettings(max_retries=3, timeout=30), + logging_level="info", + ) + ``` + +1. Create the deployment: + + Using the `MLClient` created earlier, we'll now create the deployment in the workspace. This command will start the deployment creation and return a confirmation response while the deployment creation continues. + + ```python + ml_client.begin_create_or_update(deployment) + ``` + +## Test the endpoint with sample data + +Using the `MLClient` created earlier, we'll get a handle to the endpoint. The endpoint can be invoked using the `invoke` command with the following parameters: + +* `name` - Name of the endpoint +* `input_path` - Path where input data is present +* `deployment_name` - Name of the specific deployment to test in an endpoint + +1. Invoke the endpoint: + + ```python + # create a dataset form the folderpath + input = Input(path="https://pipelinedata.blob.core.windows.net/sampledata/mnist") + + # invoke the endpoint for batch scoring job + job = ml_client.batch_endpoints.invoke( + endpoint_name=batch_endpoint_name, + input=input, + deployment_name="non-mlflow-deployment", # name is required as default deployment is not set + params_override=[{"mini_batch_size": "20"}, {"compute.instance_count": "4"}], + ) + ``` + +1. Get the details of the invoked job: + + Let us get details and logs of the invoked job + + ```python + # get the details of the job + job_name = job.name + batch_job = ml_client.jobs.get(name=job_name) + print(batch_job.status) + # stream the job logs + ml_client.jobs.stream(name=job_name) + ``` + +## Clean up resources + +Delete endpoint + +```python +ml_client.batch_endpoints.begin_delete(name=batch_endpoint_name) +``` + +## Next steps + +If you encounter problems using batch endpoints, see [Troubleshooting batch endpoints](how-to-troubleshoot-batch-endpoints.md). diff --git a/articles/machine-learning/how-to-use-data.md b/articles/machine-learning/how-to-use-data.md index 97d4b098e118f..d070711d94ff1 100644 --- a/articles/machine-learning/how-to-use-data.md +++ b/articles/machine-learning/how-to-use-data.md @@ -4,8 +4,8 @@ titleSuffix: Azure Machine Learning description: 'Learn to how work with data using the Python SDK v2 preview for Azure Machine Learning.' services: machine-learning ms.service: machine-learning -author: blackmist -ms.author: larryfr +author: samuel100 +ms.author: samkemp ms.subservice: core ms.date: 05/10/2022 ms.topic: how-to @@ -74,8 +74,6 @@ These snippets use `uri_file` and `uri_folder`. > > If you wanted to pass in just an individual file rather than the entire folder you can use the `uri_file` type. -For a complete example, see the [working_with_uris.ipynb notebook](https://github.com/azure/azureml-previews/sdk/docs/working_with_uris.ipynb). - Below are some common data access patterns that you can use in your *control-plane* code to submit a job to Azure Machine Learning: ### Use data with a training job @@ -87,17 +85,18 @@ Use the tabs below to select where your data is located. When you pass local data, the data is automatically uploaded to cloud storage as part of the job submission. ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='./sample_data', # change to be your local directory type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -114,18 +113,19 @@ returned_job.services["Studio"].endpoint # [ADLS Gen2](#tab/use-adls) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob +from azure.ai.ml.constants import AssetTypes # in this example we my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='abfss://@.dfs.core.windows.net/', type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -142,18 +142,19 @@ returned_job.services["Studio"].endpoint # [Blob](#tab/use-blob) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob +from azure.ai.ml.constants import AssetTypes # in this example we my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='https://.blob.core.windows.net//path', type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -176,11 +177,12 @@ Use the tabs below to select where your data is located. # [Blob](#tab/rw-blob) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob, JobOutput +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='https://.blob.core.windows.net//path', type=AssetTypes.URI_FOLDER ) @@ -193,7 +195,7 @@ my_job_outputs = { ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', inputs=my_job_inputs, @@ -211,11 +213,12 @@ returned_job.services["Studio"].endpoint # [ADLS Gen2](#tab/rw-adls) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob, JobOutput +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='abfss://@.dfs.core.windows.net/', type=AssetTypes.URI_FOLDER ) @@ -228,7 +231,7 @@ my_job_outputs = { ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', inputs=my_job_inputs, @@ -248,7 +251,7 @@ returned_job.services["Studio"].endpoint ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes # select one from: my_path = 'abfss://@.dfs.core.windows.net/' # adls gen2 @@ -268,19 +271,20 @@ ml_client.data.create_or_update(my_data) ### Consume registered data assets in job ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, Input, CommandJob +from azure.ai.ml.constants import AssetTypes registered_data_asset = ml_client.data.get(name='titanic', version='1') my_job_inputs = { - "input_data": JobInput( + "input_data": Input( type=AssetTypes.URI_FOLDER, path=registered_data_asset.id ) } -job = CommandJob( +job = command( code="./src", command='python read_data_asset.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -296,7 +300,7 @@ returned_job.services["Studio"].endpoint ## Table -An MLTable is primarily an abstraction over tabular data, but it can also be used for some advanced scenarios involving multiple paths. The following YAML describes an MLTable: +An [MLTable](concept-data.md#mltable) is primarily an abstraction over tabular data, but it can also be used for some advanced scenarios involving multiple paths. The following YAML describes an MLTable: ```yaml paths: @@ -320,7 +324,9 @@ tbl = mltable.load("./sample_data") df = tbl.to_pandas_dataframe() ``` -For a full example of using an MLTable, see the [Working with MLTable notebook]. +For more information on the YAML file format, see [the MLTable file](how-to-create-register-data-assets.md#the-mltable-file). + + ## Consuming V1 dataset assets in V2 @@ -344,20 +350,21 @@ inputs: The following example shows how to do this using the v2 SDK: ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob +from azure.ai.ml.constants import AssetTypes registered_v1_data_asset = ml_client.data.get(name='', version='') my_job_inputs = { - "input_data": JobInput( + "input_data": Input( type=AssetTypes.MLTABLE, path=registered_v1_data_asset.id, mode="eval_mount" ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python train.py --input_data ${{inputs.input_data}}', inputs=my_job_inputs, diff --git a/articles/machine-learning/how-to-use-sweep-in-pipeline.md b/articles/machine-learning/how-to-use-sweep-in-pipeline.md index 56eaeff000067..d29a369269907 100644 --- a/articles/machine-learning/how-to-use-sweep-in-pipeline.md +++ b/articles/machine-learning/how-to-use-sweep-in-pipeline.md @@ -8,7 +8,7 @@ ms.subservice: mlops ms.topic: how-to author: xiaoharper ms.author: zhanxia -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, sdkv2, cliv2, event-tier1-build-2022 --- @@ -37,19 +37,19 @@ The example used in this article can be found in [azureml-example repo](https:// Assume you already have a command component defined in `train.yaml`. A two-step pipeline job (train and predict) YAML file looks like below. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/pipeline.yml" highlight="7-48"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/pipeline.yml" highlight="7-48"::: The `sweep_step` is the step for hyperparameter tuning. Its type needs to be `sweep`. And `trial` refers to the command component defined in `train.yaml`. From the `search sapce` field we can see three hyparmeters (`c_value`, `kernel`, and `coef`) are added to the search space. After you submit this pipeline job, Azure Machine Learning will run the trial component multiple times to sweep over hyperparameters based on the search space and terminate policy you defined in `sweep_step`. Check [sweep job YAML schema](reference-yaml-job-sweep.md) for full schema of sweep job. Below is the trial component definition (train.yml file). -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train.yml" highlight="11-16,23-25,60"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train.yml" highlight="11-16,23-25,60"::: The hyperparameters added to search space in pipeline.yml need to be inputs for the trial component. The source code of the trial component is under `./train-src` folder. In this example, it's a single `train.py` file. This is the code that will be executed in every trial of the sweep job. Make sure you've logged the metrics in the trial component source code with exactly the same name as `primary_metric` value in pipeline.yml file. In this example, we use `mlflow.autolog()`, which is the recommended way to track your ML experiments. See more about mlflow [here](./how-to-use-mlflow-cli-runs.md) Below code snippet is the source code of trial component. -:::code language="python" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train-src/train.py" highlight="15"::: +:::code language="python" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train-src/train.py" highlight="15"::: ### Python SDK @@ -59,7 +59,7 @@ In Azure Machine Learning Python SDK v2, you can enable hyperparameter tuning fo Below code snippet shows how to enable sweep for `train_model`. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/1c_pipeline_with_hyperparameter_sweep/pipeline_with_hyperparameter_sweep.ipynb?name=enable-sweep)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/1c_pipeline_with_hyperparameter_sweep/pipeline_with_hyperparameter_sweep.ipynb?name=enable-sweep)] We first load `train_component_func` defined in `train.yml` file. When creating `train_model`, we add `c_value`, `kernel` and `coef0` into search space(line 15-17). Line 30-35 defines the primary metric, sampling algorithm etc. diff --git a/articles/machine-learning/how-to-version-track-datasets.md b/articles/machine-learning/how-to-version-track-datasets.md index e35c3291cf848..3998034479618 100644 --- a/articles/machine-learning/how-to-version-track-datasets.md +++ b/articles/machine-learning/how-to-version-track-datasets.md @@ -5,8 +5,8 @@ description: Learn how to version machine learning datasets and how versioning w services: machine-learning ms.service: machine-learning ms.subservice: mldata -ms.author: larryfr -author: blackmist +ms.author: samkemp +author: samuel100 ms.date: 10/21/2021 ms.topic: how-to ms.custom: devx-track-python, data4ml, sdkv1, event-tier1-build-2022 diff --git a/articles/machine-learning/media/concept-bring-your-own-key-encryption/concept-bring-your-own-key-encryption-overview.png b/articles/machine-learning/media/concept-bring-your-own-key-encryption/concept-bring-your-own-key-encryption-overview.png deleted file mode 100644 index 7d1aa1e44d3d1..0000000000000 Binary files a/articles/machine-learning/media/concept-bring-your-own-key-encryption/concept-bring-your-own-key-encryption-overview.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-bring-your-own-key-encryption/concept-bring-your-own-key-encryption-portal.png b/articles/machine-learning/media/concept-bring-your-own-key-encryption/concept-bring-your-own-key-encryption-portal.png deleted file mode 100644 index 670ab5df26a0c..0000000000000 Binary files a/articles/machine-learning/media/concept-bring-your-own-key-encryption/concept-bring-your-own-key-encryption-portal.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-component/archive-component.png b/articles/machine-learning/media/concept-component/archive-component.png deleted file mode 100644 index 476facba7561b..0000000000000 Binary files a/articles/machine-learning/media/concept-component/archive-component.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-component/component-introduction.png b/articles/machine-learning/media/concept-component/component-introduction.png deleted file mode 100644 index 35b6f30ec409d..0000000000000 Binary files a/articles/machine-learning/media/concept-component/component-introduction.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-component/ui-create-component.png b/articles/machine-learning/media/concept-component/ui-create-component.png deleted file mode 100644 index a34606fc198f6..0000000000000 Binary files a/articles/machine-learning/media/concept-component/ui-create-component.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-component/upgrade-component.png b/articles/machine-learning/media/concept-component/upgrade-component.png deleted file mode 100644 index 0fe6cd6044ab7..0000000000000 Binary files a/articles/machine-learning/media/concept-component/upgrade-component.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-compute-instance/create-compute-instance.png b/articles/machine-learning/media/concept-compute-instance/create-compute-instance.png deleted file mode 100644 index cfacd4c96f975..0000000000000 Binary files a/articles/machine-learning/media/concept-compute-instance/create-compute-instance.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png b/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png index e70f1de31b7e2..cec83c2e779a5 100644 Binary files a/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png and b/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png differ diff --git a/articles/machine-learning/media/concept-data/uri-file.png b/articles/machine-learning/media/concept-data/uri-file.png new file mode 100644 index 0000000000000..adaec031f4a4b Binary files /dev/null and b/articles/machine-learning/media/concept-data/uri-file.png differ diff --git a/articles/machine-learning/media/concept-data/uri-folder.png b/articles/machine-learning/media/concept-data/uri-folder.png new file mode 100644 index 0000000000000..0d45c4feeaabd Binary files /dev/null and b/articles/machine-learning/media/concept-data/uri-folder.png differ diff --git a/articles/machine-learning/media/concept-data/use-storage-explorer.png b/articles/machine-learning/media/concept-data/use-storage-explorer.png new file mode 100644 index 0000000000000..6fce5237630ed Binary files /dev/null and b/articles/machine-learning/media/concept-data/use-storage-explorer.png differ diff --git a/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png b/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png index b2b842ba239af..4044dce9adfca 100644 Binary files a/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png and b/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png differ diff --git a/articles/machine-learning/media/concept-endpoints/batch-endpoint-concept.png b/articles/machine-learning/media/concept-endpoints/batch-endpoint-concept.png deleted file mode 100644 index 581f2adc117cd..0000000000000 Binary files a/articles/machine-learning/media/concept-endpoints/batch-endpoint-concept.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-enterprise-security/authorize-azure-machine-learning.png b/articles/machine-learning/media/concept-enterprise-security/authorize-azure-machine-learning.png deleted file mode 100644 index 5f4116b5f25f1..0000000000000 Binary files a/articles/machine-learning/media/concept-enterprise-security/authorize-azure-machine-learning.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-enterprise-security/workspace-activity-log-expanded.png b/articles/machine-learning/media/concept-enterprise-security/workspace-activity-log-expanded.png deleted file mode 100644 index 7d7c04ff4feba..0000000000000 Binary files a/articles/machine-learning/media/concept-enterprise-security/workspace-activity-log-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-enterprise-security/workspace-activity-log.png b/articles/machine-learning/media/concept-enterprise-security/workspace-activity-log.png deleted file mode 100644 index 46ab0efe84212..0000000000000 Binary files a/articles/machine-learning/media/concept-enterprise-security/workspace-activity-log.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-enterprise-security/workspace-metrics-expanded.png b/articles/machine-learning/media/concept-enterprise-security/workspace-metrics-expanded.png deleted file mode 100644 index 7bc3aa15bdc06..0000000000000 Binary files a/articles/machine-learning/media/concept-enterprise-security/workspace-metrics-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-enterprise-security/workspace-metrics.png b/articles/machine-learning/media/concept-enterprise-security/workspace-metrics.png deleted file mode 100644 index 44805e163e39c..0000000000000 Binary files a/articles/machine-learning/media/concept-enterprise-security/workspace-metrics.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-environments/ml-environment.png b/articles/machine-learning/media/concept-environments/ml-environment.png index 2e51a515bab85..0670c661b21ea 100644 Binary files a/articles/machine-learning/media/concept-environments/ml-environment.png and b/articles/machine-learning/media/concept-environments/ml-environment.png differ diff --git a/articles/machine-learning/media/concept-ml-pipelines/pipeline-flow.png b/articles/machine-learning/media/concept-ml-pipelines/pipeline-flow.png deleted file mode 100644 index 12943663dd3ee..0000000000000 Binary files a/articles/machine-learning/media/concept-ml-pipelines/pipeline-flow.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-ml-pipelines/run_an_experiment_as_a_pipeline.png b/articles/machine-learning/media/concept-ml-pipelines/run_an_experiment_as_a_pipeline.png deleted file mode 100644 index c8b63e1870239..0000000000000 Binary files a/articles/machine-learning/media/concept-ml-pipelines/run_an_experiment_as_a_pipeline.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-responsible-ml/responsible-ml-pillars.png b/articles/machine-learning/media/concept-responsible-ml/responsible-ml-pillars.png deleted file mode 100644 index 1e5fd909e4ba4..0000000000000 Binary files a/articles/machine-learning/media/concept-responsible-ml/responsible-ml-pillars.png and /dev/null differ diff --git a/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png b/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png index f1ed2ab1ed2c3..bdfb6e48413fc 100644 Binary files a/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png and b/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png differ diff --git a/articles/machine-learning/media/concept-workspace/azure-machine-learning-taxonomy.png b/articles/machine-learning/media/concept-workspace/azure-machine-learning-taxonomy.png index a460ba4e00450..27755b83e548c 100644 Binary files a/articles/machine-learning/media/concept-workspace/azure-machine-learning-taxonomy.png and b/articles/machine-learning/media/concept-workspace/azure-machine-learning-taxonomy.png differ diff --git a/articles/machine-learning/media/how-to-access-data/datastore-designer-sample.png b/articles/machine-learning/media/how-to-access-data/datastore-designer-sample.png deleted file mode 100644 index ea2cd40225bfe..0000000000000 Binary files a/articles/machine-learning/media/how-to-access-data/datastore-designer-sample.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-access-data/new-datastore-form.png b/articles/machine-learning/media/how-to-access-data/new-datastore-form.png deleted file mode 100644 index b2d483e7ff11b..0000000000000 Binary files a/articles/machine-learning/media/how-to-access-data/new-datastore-form.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/attach-kubernetes-cluster.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/attach-kubernetes-cluster.png index 10114eb37c70f..f1dcd50b66f88 100644 Binary files a/articles/machine-learning/media/how-to-attach-arc-kubernetes/attach-kubernetes-cluster.png and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/attach-kubernetes-cluster.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster-2.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster-2.png index 13d111db238ea..ddbf235eecefb 100644 Binary files a/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster-2.png and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster-2.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster.png deleted file mode 100644 index 769c9a19ddc78..0000000000000 Binary files a/articles/machine-learning/media/how-to-attach-arc-kubernetes/configure-kubernetes-cluster.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-create.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-create.png new file mode 100644 index 0000000000000..1ad3f227e2d25 Binary files /dev/null and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-create.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-detail.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-detail.png new file mode 100644 index 0000000000000..a8191c6b611a8 Binary files /dev/null and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-detail.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-list.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-list.png new file mode 100644 index 0000000000000..5e955ed118c3f Binary files /dev/null and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-extension-list.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-settings.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-settings.png new file mode 100644 index 0000000000000..bc9e309a40c88 Binary files /dev/null and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui-settings.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui.png new file mode 100644 index 0000000000000..b26a325edba1f Binary files /dev/null and b/articles/machine-learning/media/how-to-attach-arc-kubernetes/deploy-extension-from-ui.png differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/upload-configuration-file.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/upload-configuration-file.png deleted file mode 100644 index d3b33edf6bcf4..0000000000000 Binary files a/articles/machine-learning/media/how-to-attach-arc-kubernetes/upload-configuration-file.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-attach-arc-kubernetes/ws-msi.png b/articles/machine-learning/media/how-to-attach-arc-kubernetes/ws-msi.png deleted file mode 100644 index 1ba37a00778c9..0000000000000 Binary files a/articles/machine-learning/media/how-to-attach-arc-kubernetes/ws-msi.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-auto-train-forecast/enable_dnn.png b/articles/machine-learning/media/how-to-auto-train-forecast/enable_dnn.png deleted file mode 100644 index 0ccbe7fb413e0..0000000000000 Binary files a/articles/machine-learning/media/how-to-auto-train-forecast/enable_dnn.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-auto-train-remote/plot.png b/articles/machine-learning/media/how-to-auto-train-remote/plot.png deleted file mode 100644 index e9a44725ab551..0000000000000 Binary files a/articles/machine-learning/media/how-to-auto-train-remote/plot.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-auto-train-remote/table.png b/articles/machine-learning/media/how-to-auto-train-remote/table.png deleted file mode 100644 index b05d9b2e03c50..0000000000000 Binary files a/articles/machine-learning/media/how-to-auto-train-remote/table.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/choose-custom-autoscale.png b/articles/machine-learning/media/how-to-autoscale-endpoints/choose-custom-autoscale.png index 17c00b5c7cf5b..882f800de14b0 100644 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/choose-custom-autoscale.png and b/articles/machine-learning/media/how-to-autoscale-endpoints/choose-custom-autoscale.png differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png b/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png deleted file mode 100644 index 53d3be8b2db60..0000000000000 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/endpoints-portal.png b/articles/machine-learning/media/how-to-autoscale-endpoints/endpoints-portal.png deleted file mode 100644 index 4ba57b4d32cdf..0000000000000 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/endpoints-portal.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/select-azure-monitor.png b/articles/machine-learning/media/how-to-autoscale-endpoints/select-azure-monitor.png deleted file mode 100644 index abdfe4a61a72e..0000000000000 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/select-azure-monitor.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/select-endpoint.png b/articles/machine-learning/media/how-to-autoscale-endpoints/select-endpoint.png index c702d0c88ff10..de7f4ff9af2b1 100644 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/select-endpoint.png and b/articles/machine-learning/media/how-to-autoscale-endpoints/select-endpoint.png differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/set-instance-limits.png b/articles/machine-learning/media/how-to-autoscale-endpoints/set-instance-limits.png deleted file mode 100644 index 3f03fc9d5d571..0000000000000 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/set-instance-limits.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-configure-environment/automlonadb.png b/articles/machine-learning/media/how-to-configure-environment/automlonadb.png deleted file mode 100644 index e937311436aef..0000000000000 Binary files a/articles/machine-learning/media/how-to-configure-environment/automlonadb.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-attach-studio/compute-nodes.png b/articles/machine-learning/media/how-to-create-attach-studio/compute-nodes.png index 5541fe7ca0f4f..1e54d00243bc6 100644 Binary files a/articles/machine-learning/media/how-to-create-attach-studio/compute-nodes.png and b/articles/machine-learning/media/how-to-create-attach-studio/compute-nodes.png differ diff --git a/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png b/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png index 822af6f488c5e..c60c389886ec6 100644 Binary files a/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png and b/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png differ diff --git a/articles/machine-learning/media/how-to-create-attach-studio/details.png b/articles/machine-learning/media/how-to-create-attach-studio/details.png index 2b0b26f5e6764..027e795103013 100644 Binary files a/articles/machine-learning/media/how-to-create-attach-studio/details.png and b/articles/machine-learning/media/how-to-create-attach-studio/details.png differ diff --git a/articles/machine-learning/media/how-to-create-attach-studio/view-compute-targets.png b/articles/machine-learning/media/how-to-create-attach-studio/view-compute-targets.png index 28e1b6704e7ef..45c3ba96cbb36 100644 Binary files a/articles/machine-learning/media/how-to-create-attach-studio/view-compute-targets.png and b/articles/machine-learning/media/how-to-create-attach-studio/view-compute-targets.png differ diff --git a/articles/machine-learning/media/how-to-create-component-pipelines-cli/component-details.png b/articles/machine-learning/media/how-to-create-component-pipelines-cli/component-details.png deleted file mode 100644 index fca2e0ef6f2b5..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-component-pipelines-cli/component-details.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-component-pipelines-cli/component-ui-detail.png b/articles/machine-learning/media/how-to-create-component-pipelines-cli/component-ui-detail.png deleted file mode 100644 index f4993235d4df6..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-component-pipelines-cli/component-ui-detail.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-component-pipelines-cli/inputs-and-outputs.png b/articles/machine-learning/media/how-to-create-component-pipelines-cli/inputs-and-outputs.png deleted file mode 100644 index 612e6b3938951..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-component-pipelines-cli/inputs-and-outputs.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-component-pipelines-cli/pipeline-graph.png b/articles/machine-learning/media/how-to-create-component-pipelines-cli/pipeline-graph.png deleted file mode 100644 index d31d22f67b9e7..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-component-pipelines-cli/pipeline-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-component-pipelines-cli/regression-graph.png b/articles/machine-learning/media/how-to-create-component-pipelines-cli/regression-graph.png deleted file mode 100644 index 6f5abaf28e9e2..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-component-pipelines-cli/regression-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png b/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png index f9c62a713a07c..a14b2cc6c804d 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png and b/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png differ diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png b/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png index c2926f5246540..5146a6d826de0 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png and b/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png differ diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png b/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png index f9ed5d2011515..408b9c63ea1a1 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png and b/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png differ diff --git a/articles/machine-learning/media/how-to-create-manage-compute-instance/create-or-upload-file.png b/articles/machine-learning/media/how-to-create-manage-compute-instance/create-or-upload-file.png index 76ad7c76e8c2a..aa723b490bb41 100644 Binary files a/articles/machine-learning/media/how-to-create-manage-compute-instance/create-or-upload-file.png and b/articles/machine-learning/media/how-to-create-manage-compute-instance/create-or-upload-file.png differ diff --git a/articles/machine-learning/media/how-to-create-manage-compute-instance/custom-service.png b/articles/machine-learning/media/how-to-create-manage-compute-instance/custom-service.png index 4f5618834d6d0..01a8232cd69aa 100644 Binary files a/articles/machine-learning/media/how-to-create-manage-compute-instance/custom-service.png and b/articles/machine-learning/media/how-to-create-manage-compute-instance/custom-service.png differ diff --git a/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-open-source.png b/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-open-source.png index a8360c1069f15..23d9fcacd36cd 100644 Binary files a/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-open-source.png and b/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-open-source.png differ diff --git a/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-workbench.png b/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-workbench.png index 3e6eabf3dec4c..7db37cad1bf4a 100644 Binary files a/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-workbench.png and b/articles/machine-learning/media/how-to-create-manage-compute-instance/rstudio-workbench.png differ diff --git a/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/endpoint-create-managed-online-endpoint.png b/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/endpoint-create-managed-online-endpoint.png index 7e3f01a655b0d..827ce222cc0d4 100644 Binary files a/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/endpoint-create-managed-online-endpoint.png and b/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/endpoint-create-managed-online-endpoint.png differ diff --git a/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/models-create-managed-online-endpoint.png b/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/models-create-managed-online-endpoint.png deleted file mode 100644 index f5e4c40f4bc06..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/models-create-managed-online-endpoint.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/models-page-deployment-latest.png b/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/models-page-deployment-latest.png deleted file mode 100644 index a480d2016e60d..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-managed-online-endpoint-studio/models-page-deployment-latest.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-register-datasets/create-dataset-ui.gif b/articles/machine-learning/media/how-to-create-register-datasets/create-dataset-ui.gif deleted file mode 100644 index 14fa09e7280ab..0000000000000 Binary files a/articles/machine-learning/media/how-to-create-register-datasets/create-dataset-ui.gif and /dev/null differ diff --git a/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png b/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png index 4228a66b48b8e..2126b41c0d758 100644 Binary files a/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png and b/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png differ diff --git a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png index 786a48d47c0c7..679d66d9000bd 100644 Binary files a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png and b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png differ diff --git a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png index 99522098e04eb..153333648aec2 100644 Binary files a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png and b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png differ diff --git a/articles/machine-learning/media/how-to-custom-dns/custom-dns-express-route.png b/articles/machine-learning/media/how-to-custom-dns/custom-dns-express-route.png deleted file mode 100644 index b05924f504d22..0000000000000 Binary files a/articles/machine-learning/media/how-to-custom-dns/custom-dns-express-route.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-custom-dns/custom-dns-topology.png b/articles/machine-learning/media/how-to-custom-dns/custom-dns-topology.png deleted file mode 100644 index a784c69f07c00..0000000000000 Binary files a/articles/machine-learning/media/how-to-custom-dns/custom-dns-topology.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-data-ingest-adf/aml-dataset.png b/articles/machine-learning/media/how-to-data-ingest-adf/aml-dataset.png index f8cea0eb7d996..46a3dcff85521 100644 Binary files a/articles/machine-learning/media/how-to-data-ingest-adf/aml-dataset.png and b/articles/machine-learning/media/how-to-data-ingest-adf/aml-dataset.png differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-1.png b/articles/machine-learning/media/how-to-debug-pipelines/pipeline-1.png deleted file mode 100644 index f3f3060b97b6a..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-1.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-2.png b/articles/machine-learning/media/how-to-debug-pipelines/pipeline-2.png deleted file mode 100644 index 047cb96249555..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-2.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-3.png b/articles/machine-learning/media/how-to-debug-pipelines/pipeline-3.png deleted file mode 100644 index d64f63cfc85ce..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-3.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-4.png b/articles/machine-learning/media/how-to-debug-pipelines/pipeline-4.png deleted file mode 100644 index 998d24a5c49c3..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipeline-4.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-01.png b/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-01.png deleted file mode 100644 index 913dae93badbe..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-01.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-02.png b/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-02.png deleted file mode 100644 index 82919374cc255..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-02.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-03.png b/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-03.png deleted file mode 100644 index 7b32b1a16afbd..0000000000000 Binary files a/articles/machine-learning/media/how-to-debug-pipelines/pipelinerun-03.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-define-task-type/task-type.png b/articles/machine-learning/media/how-to-define-task-type/task-type.png deleted file mode 100644 index 35e3a4b661100..0000000000000 Binary files a/articles/machine-learning/media/how-to-define-task-type/task-type.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/enable-modeltrigger-artifact-expanded.png b/articles/machine-learning/media/how-to-deploy-and-where/enable-modeltrigger-artifact-expanded.png deleted file mode 100644 index 2622990432e1d..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/enable-modeltrigger-artifact-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/enable-modeltrigger-artifact.png b/articles/machine-learning/media/how-to-deploy-and-where/enable-modeltrigger-artifact.png deleted file mode 100644 index fac6019ada188..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/enable-modeltrigger-artifact.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/how-to-choose-target.png b/articles/machine-learning/media/how-to-deploy-and-where/how-to-choose-target.png deleted file mode 100644 index ac715cf27033b..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/how-to-choose-target.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/resource-manager-connection.png b/articles/machine-learning/media/how-to-deploy-and-where/resource-manager-connection.png deleted file mode 100644 index eed73f1ad4e12..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/resource-manager-connection.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/set-modeltrigger-expanded.png b/articles/machine-learning/media/how-to-deploy-and-where/set-modeltrigger-expanded.png deleted file mode 100644 index 6aea45c89a968..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/set-modeltrigger-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/set-modeltrigger.png b/articles/machine-learning/media/how-to-deploy-and-where/set-modeltrigger.png deleted file mode 100644 index afdd76875d4ba..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/set-modeltrigger.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/view-service-connection-expanded.png b/articles/machine-learning/media/how-to-deploy-and-where/view-service-connection-expanded.png deleted file mode 100644 index 34c6bdbd06d2a..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/view-service-connection-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-and-where/view-service-connection.png b/articles/machine-learning/media/how-to-deploy-and-where/view-service-connection.png deleted file mode 100644 index 1c9b85fc2c263..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-and-where/view-service-connection.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-automl-endpoint/deploy-button.png b/articles/machine-learning/media/how-to-deploy-automl-endpoint/deploy-button.png index 7ba55eb3bf259..adcb04e86f41e 100644 Binary files a/articles/machine-learning/media/how-to-deploy-automl-endpoint/deploy-button.png and b/articles/machine-learning/media/how-to-deploy-automl-endpoint/deploy-button.png differ diff --git a/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png b/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png index a3577c73bff42..884f0f827af6d 100644 Binary files a/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png and b/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png differ diff --git a/articles/machine-learning/media/how-to-deploy-automl-endpoint/model-option.png b/articles/machine-learning/media/how-to-deploy-automl-endpoint/model-option.png deleted file mode 100644 index 3a981bb68da44..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-automl-endpoint/model-option.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-automl-endpoint/output-and-logs.png b/articles/machine-learning/media/how-to-deploy-automl-endpoint/output-and-logs.png deleted file mode 100644 index 754edfdabdb45..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-automl-endpoint/output-and-logs.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-local/registered-model.png b/articles/machine-learning/media/how-to-deploy-local/registered-model.png index b8d58cc969d8a..d07c252fc0474 100644 Binary files a/articles/machine-learning/media/how-to-deploy-local/registered-model.png and b/articles/machine-learning/media/how-to-deploy-local/registered-model.png differ diff --git a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/create-from-endpoints.png b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/create-from-endpoints.png index 2e3efc05a5d57..8a35d3d8be7e5 100644 Binary files a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/create-from-endpoints.png and b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/create-from-endpoints.png differ diff --git a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/deploy-from-models-ui.png b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/deploy-from-models-ui.png index 67c8f519b515d..92bbe5c3783f3 100644 Binary files a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/deploy-from-models-ui.png and b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/deploy-from-models-ui.png differ diff --git a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png index a50581366242e..0441adadd5ba2 100644 Binary files a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png and b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png differ diff --git a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/ncd-wizard.png b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/ncd-wizard.png index cf0a09f891e48..2b84f3adf27dc 100644 Binary files a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/ncd-wizard.png and b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/ncd-wizard.png differ diff --git a/articles/machine-learning/media/how-to-deploy-model-designer/download-artifacts-in-models-page.png b/articles/machine-learning/media/how-to-deploy-model-designer/download-artifacts-in-models-page.png index 708b0f370aa05..8d79a0851a635 100644 Binary files a/articles/machine-learning/media/how-to-deploy-model-designer/download-artifacts-in-models-page.png and b/articles/machine-learning/media/how-to-deploy-model-designer/download-artifacts-in-models-page.png differ diff --git a/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png b/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png index 7de046c6a3dfc..06d834d8a8b06 100644 Binary files a/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png and b/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png differ diff --git a/articles/machine-learning/media/how-to-deploy-model-designer/open-deploy-wizard.png b/articles/machine-learning/media/how-to-deploy-model-designer/open-deploy-wizard.png index 9fd0b43e980f4..952ed691a9eaa 100644 Binary files a/articles/machine-learning/media/how-to-deploy-model-designer/open-deploy-wizard.png and b/articles/machine-learning/media/how-to-deploy-model-designer/open-deploy-wizard.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png b/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png index 4f757eaec7c39..118954fa0eb51 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png and b/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/deploy-from-models-page.png b/articles/machine-learning/media/how-to-deploy-with-triton/deploy-from-models-page.png index a5d251a0b76ce..7485f5c148a66 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/deploy-from-models-page.png and b/articles/machine-learning/media/how-to-deploy-with-triton/deploy-from-models-page.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/inference-config-deploy.png b/articles/machine-learning/media/how-to-deploy-with-triton/inference-config-deploy.png deleted file mode 100644 index 48910f1fed9b2..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/inference-config-deploy.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png b/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png index 9eb0c7020a9fe..665a0f34d17a7 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png and b/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/no-code-deploy.png b/articles/machine-learning/media/how-to-deploy-with-triton/no-code-deploy.png deleted file mode 100644 index 43efdc63af4de..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/no-code-deploy.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/normal-deploy.png b/articles/machine-learning/media/how-to-deploy-with-triton/normal-deploy.png deleted file mode 100644 index 209d10386bf23..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/normal-deploy.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/triton-architecture.png b/articles/machine-learning/media/how-to-deploy-with-triton/triton-architecture.png deleted file mode 100644 index b73a264e68959..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/triton-architecture.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/triton-deploy.png b/articles/machine-learning/media/how-to-deploy-with-triton/triton-deploy.png deleted file mode 100644 index 65b62246ecaba..0000000000000 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/triton-deploy.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png b/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png index 32efb8ffefb38..4e5c1af52e827 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png and b/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/added-column1.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/added-column1.png deleted file mode 100644 index bdb19a510dab4..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/added-column1.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/cleaned-dataset.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/cleaned-dataset.png deleted file mode 100644 index 3eff71bbdc602..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/cleaned-dataset.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/evaluate-result.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/evaluate-result.png deleted file mode 100644 index 8e7f4982409dc..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/evaluate-result.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/pipeline-graph.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/pipeline-graph.png deleted file mode 100644 index 09647f601e7a6..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/pipeline-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-addcol-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-addcol-1225.png deleted file mode 100644 index c40fccf481757..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-addcol-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-dataset-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-dataset-1225.png deleted file mode 100644 index 6c80ca7db1ca0..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-dataset-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-evaluate-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-evaluate-1225.png deleted file mode 100644 index cfb675500f248..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-churn/sample5-evaluate-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/graph.png b/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/graph.png deleted file mode 100644 index e157760c9233d..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/result.png b/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/result.png deleted file mode 100644 index 4fdc545029ced..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/result.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/sample4-lastselect-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/sample4-lastselect-1225.png deleted file mode 100644 index 8be3c7d0aeaca..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/sample4-lastselect-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/score-part.png b/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/score-part.png deleted file mode 100644 index 821fb4b370ed8..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-credit-risk-cost-sensitive/score-part.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/data-process.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/data-process.png deleted file mode 100644 index cb696f6953721..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/data-process.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/edit-metadata.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/edit-metadata.png deleted file mode 100644 index a0842a83b2941..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/edit-metadata.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/evaluate.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/evaluate.png deleted file mode 100644 index 94aa78ebabae7..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/evaluate.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/join-destination.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/join-destination.png deleted file mode 100644 index a38341a534cfd..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/join-destination.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/join-origin.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/join-origin.png deleted file mode 100644 index 333f2ef17fac5..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/join-origin.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/pipeline-graph.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/pipeline-graph.png deleted file mode 100644 index 322b7f9615a72..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/pipeline-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/sample6-evaluate-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/sample6-evaluate-1225.png deleted file mode 100644 index 2442ee67cd45e..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/sample6-evaluate-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/split.png b/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/split.png deleted file mode 100644 index f05036fe43845..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-flight-delay/split.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/data.png b/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/data.png deleted file mode 100644 index cf6de797372e0..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/data.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/evaluate-result.png b/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/evaluate-result.png deleted file mode 100644 index 0ffbbf1943ae1..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/evaluate-result.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/overall-graph.png b/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/overall-graph.png deleted file mode 100644 index ebb3cb88e32bd..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/overall-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/sample3-dataset-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/sample3-dataset-1225.png deleted file mode 100644 index 6697c76765fb2..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/sample3-dataset-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/sample3-evaluate-1225.png b/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/sample3-evaluate-1225.png deleted file mode 100644 index 53d2189456402..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/sample3-evaluate-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/transformation.png b/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/transformation.png deleted file mode 100644 index 162b761b73406..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-classification-predict-income/transformation.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/data-processing.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/data-processing.png deleted file mode 100644 index 9652df32ff4f6..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/data-processing.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/evaluate-result.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/evaluate-result.png deleted file mode 100644 index 3cfe0a523a67f..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/evaluate-result.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/overall-graph.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/overall-graph.png deleted file mode 100644 index 2ee9d6fa44b7a..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/overall-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/sample1-evaluate-1225.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/sample1-evaluate-1225.png deleted file mode 100644 index c4b6ca47878c7..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/sample1-evaluate-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/sample1-score-1225.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/sample1-score-1225.png deleted file mode 100644 index 81ca159bc4c66..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/sample1-score-1225.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/score-result.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/score-result.png deleted file mode 100644 index 11d37f09ceb74..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-basic/score-result.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/data-processing.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/data-processing.png deleted file mode 100644 index 38c6a9f80d6bd..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/data-processing.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/graph.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/graph.png deleted file mode 100644 index 0b1d791e84430..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/result.png b/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/result.png deleted file mode 100644 index a6fde3525381e..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-regression-automobile-price-compare-algorithms/result.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-text-classification/n-gram.png b/articles/machine-learning/media/how-to-designer-sample-text-classification/n-gram.png deleted file mode 100644 index 413369008e850..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-text-classification/n-gram.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-sample-text-classification/nlp-modules-overall.png b/articles/machine-learning/media/how-to-designer-sample-text-classification/nlp-modules-overall.png deleted file mode 100644 index 9df448ef10299..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-sample-text-classification/nlp-modules-overall.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-designer-transform-data/view-data.png b/articles/machine-learning/media/how-to-designer-transform-data/view-data.png deleted file mode 100644 index cefb1810731e7..0000000000000 Binary files a/articles/machine-learning/media/how-to-designer-transform-data/view-data.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-app-insights/advancedsettings.png b/articles/machine-learning/media/how-to-enable-app-insights/advancedsettings.png deleted file mode 100644 index af7e69e74d638..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-app-insights/advancedsettings.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-app-insights/deployments.png b/articles/machine-learning/media/how-to-enable-app-insights/deployments.png deleted file mode 100644 index c439bc6e3e31d..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-app-insights/deployments.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-app-insights/edit.png b/articles/machine-learning/media/how-to-enable-app-insights/edit.png deleted file mode 100644 index 75c2695424553..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-app-insights/edit.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-app-insights/logs.png b/articles/machine-learning/media/how-to-enable-app-insights/logs.png deleted file mode 100644 index bd19090d76843..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-app-insights/logs.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-app-insights/uncheck.png b/articles/machine-learning/media/how-to-enable-app-insights/uncheck.png deleted file mode 100644 index 0ae78d21fcdc0..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-app-insights/uncheck.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-data-collection/editservice.png b/articles/machine-learning/media/how-to-enable-data-collection/editservice.png deleted file mode 100644 index d96e1f122a3fa..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-data-collection/editservice.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-studio-virtual-network/default-datastores.png b/articles/machine-learning/media/how-to-enable-studio-virtual-network/default-datastores.png index da132d47928bf..ed5c981f3bdd6 100644 Binary files a/articles/machine-learning/media/how-to-enable-studio-virtual-network/default-datastores.png and b/articles/machine-learning/media/how-to-enable-studio-virtual-network/default-datastores.png differ diff --git a/articles/machine-learning/media/how-to-enable-studio-virtual-network/enable-managed-identity.png b/articles/machine-learning/media/how-to-enable-studio-virtual-network/enable-managed-identity.png index da31a0af6af66..57e93f34106e8 100644 Binary files a/articles/machine-learning/media/how-to-enable-studio-virtual-network/enable-managed-identity.png and b/articles/machine-learning/media/how-to-enable-studio-virtual-network/enable-managed-identity.png differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/aks-vnet-inbound-nsg-aml.png b/articles/machine-learning/media/how-to-enable-virtual-network/aks-vnet-inbound-nsg-aml.png deleted file mode 100644 index 9567020e7e1b4..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/aks-vnet-inbound-nsg-aml.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/amlcompute-virtual-network-inbound.png b/articles/machine-learning/media/how-to-enable-virtual-network/amlcompute-virtual-network-inbound.png deleted file mode 100644 index 8c4ad7b2f69eb..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/amlcompute-virtual-network-inbound.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/batchnodemanagement-service-tag.png b/articles/machine-learning/media/how-to-enable-virtual-network/batchnodemanagement-service-tag.png deleted file mode 100644 index f3dce78c31e19..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/batchnodemanagement-service-tag.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png b/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png index 4addf83192b98..bb84093162ada 100644 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png and b/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png b/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png index b00d598e6df29..fcda93dec7890 100644 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png and b/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/experimentation-virtual-network-outbound.png b/articles/machine-learning/media/how-to-enable-virtual-network/experimentation-virtual-network-outbound.png deleted file mode 100644 index 32f8dd590902f..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/experimentation-virtual-network-outbound.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/limited-outbound-nsg-exp.png b/articles/machine-learning/media/how-to-enable-virtual-network/limited-outbound-nsg-exp.png deleted file mode 100644 index 411af3fd843dd..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/limited-outbound-nsg-exp.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/storage-firewalls-and-virtual-networks-page.png b/articles/machine-learning/media/how-to-enable-virtual-network/storage-firewalls-and-virtual-networks-page.png deleted file mode 100644 index 5aa94004999bc..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/storage-firewalls-and-virtual-networks-page.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/workspace-storage.png b/articles/machine-learning/media/how-to-enable-virtual-network/workspace-storage.png deleted file mode 100644 index a3c55577c7061..0000000000000 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/workspace-storage.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-export-delete-data/delete-experiment.png b/articles/machine-learning/media/how-to-export-delete-data/delete-experiment.png deleted file mode 100644 index c0a702543ff7f..0000000000000 Binary files a/articles/machine-learning/media/how-to-export-delete-data/delete-experiment.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-export-delete-data/unregister-dataset.png b/articles/machine-learning/media/how-to-export-delete-data/unregister-dataset.png deleted file mode 100644 index 60eb61805922e..0000000000000 Binary files a/articles/machine-learning/media/how-to-export-delete-data/unregister-dataset.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-generate-automl-training-code/generated-code-illustration.png b/articles/machine-learning/media/how-to-generate-automl-training-code/generated-code-illustration.png deleted file mode 100644 index 43c98f15ee853..0000000000000 Binary files a/articles/machine-learning/media/how-to-generate-automl-training-code/generated-code-illustration.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-identity-based-data-access/create-identity-based-datastore.png b/articles/machine-learning/media/how-to-identity-based-data-access/create-identity-based-datastore.png deleted file mode 100644 index 71f46564af10f..0000000000000 Binary files a/articles/machine-learning/media/how-to-identity-based-data-access/create-identity-based-datastore.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png b/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png index feec32a9b9751..0fbf48386cb1a 100644 Binary files a/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png and b/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png differ diff --git a/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png b/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png index ff2c7945a8968..e765c87496a6b 100644 Binary files a/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png and b/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png differ diff --git a/articles/machine-learning/media/how-to-machine-learning-interpretability/interpretability-architecture-old.png b/articles/machine-learning/media/how-to-machine-learning-interpretability/interpretability-architecture-old.png deleted file mode 100644 index d3d155934eb90..0000000000000 Binary files a/articles/machine-learning/media/how-to-machine-learning-interpretability/interpretability-architecture-old.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-manage-environments-in-studio/create-page.jpg b/articles/machine-learning/media/how-to-manage-environments-in-studio/create-page.jpg index feb1141a4ca4b..6b77628efadc4 100644 Binary files a/articles/machine-learning/media/how-to-manage-environments-in-studio/create-page.jpg and b/articles/machine-learning/media/how-to-manage-environments-in-studio/create-page.jpg differ diff --git a/articles/machine-learning/media/how-to-manage-environments-in-studio/createpage.jpg b/articles/machine-learning/media/how-to-manage-environments-in-studio/createpage.jpg deleted file mode 100644 index feb1141a4ca4b..0000000000000 Binary files a/articles/machine-learning/media/how-to-manage-environments-in-studio/createpage.jpg and /dev/null differ diff --git a/articles/machine-learning/media/how-to-manage-environments-in-studio/curatedenv.jpg b/articles/machine-learning/media/how-to-manage-environments-in-studio/curatedenv.jpg deleted file mode 100644 index 4299fe628e49c..0000000000000 Binary files a/articles/machine-learning/media/how-to-manage-environments-in-studio/curatedenv.jpg and /dev/null differ diff --git a/articles/machine-learning/media/how-to-manage-environments-in-studio/customhome.jpg b/articles/machine-learning/media/how-to-manage-environments-in-studio/customhome.jpg deleted file mode 100644 index dbaa1921a9da3..0000000000000 Binary files a/articles/machine-learning/media/how-to-manage-environments-in-studio/customhome.jpg and /dev/null differ diff --git a/articles/machine-learning/media/how-to-manage-environments-in-studio/detailspage.jpg b/articles/machine-learning/media/how-to-manage-environments-in-studio/detailspage.jpg deleted file mode 100644 index 4f59215080fb6..0000000000000 Binary files a/articles/machine-learning/media/how-to-manage-environments-in-studio/detailspage.jpg and /dev/null differ diff --git a/articles/machine-learning/media/how-to-manage-quotas/quota-increase-private-endpoint.png b/articles/machine-learning/media/how-to-manage-quotas/quota-increase-private-endpoint.png deleted file mode 100644 index 4214f8ca01f45..0000000000000 Binary files a/articles/machine-learning/media/how-to-manage-quotas/quota-increase-private-endpoint.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config-expanded.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-config-expanded.png deleted file mode 100644 index 97a7d45b479fe..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-config.png deleted file mode 100644 index 56cff5bf0a47b..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui-expanded.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui-expanded.png deleted file mode 100644 index 8fc3c03d6be42..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui.png deleted file mode 100644 index 6c237c9868a88..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_config.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_config.png deleted file mode 100644 index 72889fb3c6ec4..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_config.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_email.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_email.png deleted file mode 100644 index e3261e50c6392..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_email.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_show.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_show.png deleted file mode 100644 index dacb13bbbdad1..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_show.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_ui.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_ui.png deleted file mode 100644 index 53391cfa4d626..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_ui.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-datasets/video.gif b/articles/machine-learning/media/how-to-monitor-datasets/video.gif deleted file mode 100644 index 38ca068575787..0000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-datasets/video.gif and /dev/null differ diff --git a/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png b/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png index 9978bd88ef378..1df800e95a3a8 100644 Binary files a/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png and b/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png differ diff --git a/articles/machine-learning/media/how-to-retrain-designer/download-model.png b/articles/machine-learning/media/how-to-retrain-designer/download-model.png deleted file mode 100644 index 224e142d25f8e..0000000000000 Binary files a/articles/machine-learning/media/how-to-retrain-designer/download-model.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png b/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png deleted file mode 100644 index 4f534ec00ba0e..0000000000000 Binary files a/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-retrain-designer/trained-model-view-output.png b/articles/machine-learning/media/how-to-retrain-designer/trained-model-view-output.png deleted file mode 100644 index 436d8e3f741c4..0000000000000 Binary files a/articles/machine-learning/media/how-to-retrain-designer/trained-model-view-output.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-run-batch-predictions-designer/register-transformation-dataset.png b/articles/machine-learning/media/how-to-run-batch-predictions-designer/register-transformation-dataset.png deleted file mode 100644 index 32c3f8d69f5c2..0000000000000 Binary files a/articles/machine-learning/media/how-to-run-batch-predictions-designer/register-transformation-dataset.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-run-batch-predictions-designer/replace-td-module-batch-inference-pipeline.png b/articles/machine-learning/media/how-to-run-batch-predictions-designer/replace-td-module-batch-inference-pipeline.png deleted file mode 100644 index cec13098727c0..0000000000000 Binary files a/articles/machine-learning/media/how-to-run-batch-predictions-designer/replace-td-module-batch-inference-pipeline.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png b/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png deleted file mode 100644 index 6286598de62df..0000000000000 Binary files a/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-run-batch-predictions-designer/set-dataset-as-pipeline-parameter.png b/articles/machine-learning/media/how-to-run-batch-predictions-designer/set-dataset-as-pipeline-parameter.png deleted file mode 100644 index df635400bf713..0000000000000 Binary files a/articles/machine-learning/media/how-to-run-batch-predictions-designer/set-dataset-as-pipeline-parameter.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-run-jupyter-notebooks/alt-open-terminal.png b/articles/machine-learning/media/how-to-run-jupyter-notebooks/alt-open-terminal.png deleted file mode 100644 index fed70491be00c..0000000000000 Binary files a/articles/machine-learning/media/how-to-run-jupyter-notebooks/alt-open-terminal.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-run-jupyter-notebooks/open-terminal.png b/articles/machine-learning/media/how-to-run-jupyter-notebooks/open-terminal.png deleted file mode 100644 index 931854ceb5459..0000000000000 Binary files a/articles/machine-learning/media/how-to-run-jupyter-notebooks/open-terminal.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-secure-web-service/aks-public-ip-address-expanded.png b/articles/machine-learning/media/how-to-secure-web-service/aks-public-ip-address-expanded.png deleted file mode 100644 index e181c77c653be..0000000000000 Binary files a/articles/machine-learning/media/how-to-secure-web-service/aks-public-ip-address-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-secure-web-service/aks-public-ip-address.png b/articles/machine-learning/media/how-to-secure-web-service/aks-public-ip-address.png deleted file mode 100644 index 88b8cb5b5b359..0000000000000 Binary files a/articles/machine-learning/media/how-to-secure-web-service/aks-public-ip-address.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-secure-workspace-vnet/acr-private-endpoint.png b/articles/machine-learning/media/how-to-secure-workspace-vnet/acr-private-endpoint.png deleted file mode 100644 index 5d5eeed0927eb..0000000000000 Binary files a/articles/machine-learning/media/how-to-secure-workspace-vnet/acr-private-endpoint.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-secure-workspace-vnet/create-compute-cluster-config.png b/articles/machine-learning/media/how-to-secure-workspace-vnet/create-compute-cluster-config.png deleted file mode 100644 index d38b9c53dfba5..0000000000000 Binary files a/articles/machine-learning/media/how-to-secure-workspace-vnet/create-compute-cluster-config.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-set-up-training-targets/add-compute-target.png b/articles/machine-learning/media/how-to-set-up-training-targets/add-compute-target.png deleted file mode 100644 index 56149656faa93..0000000000000 Binary files a/articles/machine-learning/media/how-to-set-up-training-targets/add-compute-target.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-set-up-training-targets/azure-machine-learning-service-workspace-expanded.png b/articles/machine-learning/media/how-to-set-up-training-targets/azure-machine-learning-service-workspace-expanded.png deleted file mode 100644 index 5db2d2832d46e..0000000000000 Binary files a/articles/machine-learning/media/how-to-set-up-training-targets/azure-machine-learning-service-workspace-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-set-up-training-targets/azure-machine-learning-service-workspace.png b/articles/machine-learning/media/how-to-set-up-training-targets/azure-machine-learning-service-workspace.png deleted file mode 100644 index 2aee9bfdb44fe..0000000000000 Binary files a/articles/machine-learning/media/how-to-set-up-training-targets/azure-machine-learning-service-workspace.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png b/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png deleted file mode 100644 index a70fcd689f703..0000000000000 Binary files a/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-set-up-training-targets/view_list.png b/articles/machine-learning/media/how-to-set-up-training-targets/view_list.png deleted file mode 100644 index 3f60375417b10..0000000000000 Binary files a/articles/machine-learning/media/how-to-set-up-training-targets/view_list.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png b/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png index aafe87f001874..29b2c3c47ede2 100644 Binary files a/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png and b/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png differ diff --git a/articles/machine-learning/media/how-to-setup-vs-code/vs-code-extension.png b/articles/machine-learning/media/how-to-setup-vs-code/vs-code-extension.png deleted file mode 100644 index ea3fb0c67e2d6..0000000000000 Binary files a/articles/machine-learning/media/how-to-setup-vs-code/vs-code-extension.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-track-monitor-analyze-runs/display-name-runs-list.png b/articles/machine-learning/media/how-to-track-monitor-analyze-runs/display-name-runs-list.png deleted file mode 100644 index b13e44ef39fcd..0000000000000 Binary files a/articles/machine-learning/media/how-to-track-monitor-analyze-runs/display-name-runs-list.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-track-monitor-analyze-runs/run-history.png b/articles/machine-learning/media/how-to-track-monitor-analyze-runs/run-history.png deleted file mode 100644 index 8fd4952bd03fa..0000000000000 Binary files a/articles/machine-learning/media/how-to-track-monitor-analyze-runs/run-history.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-train-with-ui/experiment-entry.png b/articles/machine-learning/media/how-to-train-with-ui/experiment-entry.png deleted file mode 100644 index 05ffb468967fa..0000000000000 Binary files a/articles/machine-learning/media/how-to-train-with-ui/experiment-entry.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-train-with-ui/home-entry.png b/articles/machine-learning/media/how-to-train-with-ui/home-entry.png index da4eb52aaadcf..cd364db9e7c44 100644 Binary files a/articles/machine-learning/media/how-to-train-with-ui/home-entry.png and b/articles/machine-learning/media/how-to-train-with-ui/home-entry.png differ diff --git a/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png b/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png index dfc4044d39f64..96153ec688413 100644 Binary files a/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png and b/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png differ diff --git a/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png b/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png index 4bd5c082391c1..6d2ca8cacfb6f 100644 Binary files a/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png and b/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png differ diff --git a/articles/machine-learning/media/how-to-understand-automated-ml/how-to-feature-importance.gif b/articles/machine-learning/media/how-to-understand-automated-ml/how-to-feature-importance.gif deleted file mode 100644 index bdfe1e6d6ac49..0000000000000 Binary files a/articles/machine-learning/media/how-to-understand-automated-ml/how-to-feature-importance.gif and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png index 1532d9b5bcc05..b16cab3fca1b2 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/iteration-details.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/iteration-details.png deleted file mode 100644 index 88f4b7d561547..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/iteration-details.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png index ae74ffec471b6..f776fce9151dc 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane.png index ecc5989d682be..d4ed5f5c7ed78 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane.png differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png deleted file mode 100644 index 2ee6ed87593c5..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png deleted file mode 100644 index b73a5170ae89a..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-batch-endpoint/two-deployments.png b/articles/machine-learning/media/how-to-use-batch-endpoint/two-deployments.png deleted file mode 100644 index a8edb3c98fe19..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-batch-endpoint/two-deployments.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-batch-endpoint/view-data-outputs.png b/articles/machine-learning/media/how-to-use-batch-endpoint/view-data-outputs.png index b33829cfd3be3..1ccac901b5a4b 100644 Binary files a/articles/machine-learning/media/how-to-use-batch-endpoint/view-data-outputs.png and b/articles/machine-learning/media/how-to-use-batch-endpoint/view-data-outputs.png differ diff --git a/articles/machine-learning/media/how-to-use-batch-endpoints-studio/create-aml-compute.png b/articles/machine-learning/media/how-to-use-batch-endpoints-studio/create-aml-compute.png deleted file mode 100644 index 20ee416d0b229..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-batch-endpoints-studio/create-aml-compute.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-environments/ml-environment.png b/articles/machine-learning/media/how-to-use-environments/ml-environment.png deleted file mode 100644 index 2e51a515bab85..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-environments/ml-environment.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-event-grid/add-event-type.png b/articles/machine-learning/media/how-to-use-event-grid/add-event-type.png deleted file mode 100644 index c494ac5bbe4ff..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-event-grid/add-event-type.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-event-grid/filtering-events.png b/articles/machine-learning/media/how-to-use-event-grid/filtering-events.png deleted file mode 100644 index 5e5a3f9df701f..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-event-grid/filtering-events.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-event-grid/re-register-resource-provider.png b/articles/machine-learning/media/how-to-use-event-grid/re-register-resource-provider.png deleted file mode 100644 index d9c871b2ba1bf..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-event-grid/re-register-resource-provider.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png b/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png index e0497a5eaf4ab..ca3242a0ad6d0 100644 Binary files a/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png and b/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png differ diff --git a/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png b/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png index 0cf10b4cfa2ab..2f01b6a1184b8 100644 Binary files a/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png and b/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png differ diff --git a/articles/machine-learning/media/how-to-use-mlflow-cli-runs/delete-resources.png b/articles/machine-learning/media/how-to-use-mlflow-cli-runs/delete-resources.png deleted file mode 100644 index 6937913e40093..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-mlflow-cli-runs/delete-resources.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-mlflow-cli-runs/registered-mlflow-model.png b/articles/machine-learning/media/how-to-use-mlflow-cli-runs/registered-mlflow-model.png index b78b70137e06a..108450babe43b 100644 Binary files a/articles/machine-learning/media/how-to-use-mlflow-cli-runs/registered-mlflow-model.png and b/articles/machine-learning/media/how-to-use-mlflow-cli-runs/registered-mlflow-model.png differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-parameter/current-pipeline-parameter.png b/articles/machine-learning/media/how-to-use-pipeline-parameter/current-pipeline-parameter.png deleted file mode 100644 index f5d2709d7b532..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-parameter/current-pipeline-parameter.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-parameter/delete-pipeline-parameter.png b/articles/machine-learning/media/how-to-use-pipeline-parameter/delete-pipeline-parameter.png deleted file mode 100644 index b4694716ce8bd..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-parameter/delete-pipeline-parameter.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-parameter/detach-from-pipeline-parameter.png b/articles/machine-learning/media/how-to-use-pipeline-parameter/detach-from-pipeline-parameter.png deleted file mode 100644 index ea51c2cd854a8..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-parameter/detach-from-pipeline-parameter.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-parameter/pipeline-parameter-sample.png b/articles/machine-learning/media/how-to-use-pipeline-parameter/pipeline-parameter-sample.png deleted file mode 100644 index 760aef1164d0b..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-parameter/pipeline-parameter-sample.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-parameter/promote-module-para-to-pipeline-para2.png b/articles/machine-learning/media/how-to-use-pipeline-parameter/promote-module-para-to-pipeline-para2.png deleted file mode 100644 index 8337d0ae8a001..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-parameter/promote-module-para-to-pipeline-para2.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-detail.png b/articles/machine-learning/media/how-to-use-pipeline-ui/compare-detail.png deleted file mode 100644 index e2cc80c69adda..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-detail.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-graph-in-detail.png b/articles/machine-learning/media/how-to-use-pipeline-ui/compare-graph-in-detail.png deleted file mode 100644 index e82ffdb4f1154..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-graph-in-detail.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-graph.png b/articles/machine-learning/media/how-to-use-pipeline-ui/compare-graph.png deleted file mode 100644 index cb05a8d062ffe..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-graph.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-parameter.png b/articles/machine-learning/media/how-to-use-pipeline-ui/compare-parameter.png deleted file mode 100644 index 03c62cdfebb79..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-ui/compare-parameter.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-pipeline-ui/job-overview-compare.png b/articles/machine-learning/media/how-to-use-pipeline-ui/job-overview-compare.png deleted file mode 100644 index 7eef22b73daa5..0000000000000 Binary files a/articles/machine-learning/media/how-to-use-pipeline-ui/job-overview-compare.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png b/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png index a3e6ebdca9b02..34083dac92107 100644 Binary files a/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png and b/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png differ diff --git a/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png b/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png index a2900057df23c..94874ca23fa8a 100644 Binary files a/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png and b/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png differ diff --git a/articles/machine-learning/media/index/i_machine-learning.svg b/articles/machine-learning/media/index/i_machine-learning.svg deleted file mode 100644 index a63c07676fdad..0000000000000 --- a/articles/machine-learning/media/index/i_machine-learning.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/articles/machine-learning/media/index/machine-learning-studio.svg b/articles/machine-learning/media/index/machine-learning-studio.svg deleted file mode 100644 index d22ff0e414dca..0000000000000 --- a/articles/machine-learning/media/index/machine-learning-studio.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - diff --git a/articles/machine-learning/media/index/virtualmachine.svg b/articles/machine-learning/media/index/virtualmachine.svg deleted file mode 100644 index 2079c75e59d70..0000000000000 --- a/articles/machine-learning/media/index/virtualmachine.svg +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/articles/machine-learning/media/migrate-overview/aml-dataset.png b/articles/machine-learning/media/migrate-overview/aml-dataset.png deleted file mode 100644 index c982c4ec9e1a5..0000000000000 Binary files a/articles/machine-learning/media/migrate-overview/aml-dataset.png and /dev/null differ diff --git a/articles/machine-learning/media/migrate-overview/aml-endpoint.png b/articles/machine-learning/media/migrate-overview/aml-endpoint.png index a2c1f7704084a..d44b12b7ad75e 100644 Binary files a/articles/machine-learning/media/migrate-overview/aml-endpoint.png and b/articles/machine-learning/media/migrate-overview/aml-endpoint.png differ diff --git a/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png b/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png index 308074b2edde0..8b1d1533bec96 100644 Binary files a/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png and b/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png differ diff --git a/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png b/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png index 862732a9b7835..35d1a2a6c0963 100644 Binary files a/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png and b/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png differ diff --git a/articles/machine-learning/media/migrate-register-dataset/register-dataset.png b/articles/machine-learning/media/migrate-register-dataset/register-dataset.png index 8ecaff9f321da..30cbca27ad5f9 100644 Binary files a/articles/machine-learning/media/migrate-register-dataset/register-dataset.png and b/articles/machine-learning/media/migrate-register-dataset/register-dataset.png differ diff --git a/articles/machine-learning/media/overview-what-is-azure-machine-learning/designer-drag-and-drop.gif b/articles/machine-learning/media/overview-what-is-azure-machine-learning/designer-drag-and-drop.gif deleted file mode 100644 index 05b3a2fb013a9..0000000000000 Binary files a/articles/machine-learning/media/overview-what-is-azure-machine-learning/designer-drag-and-drop.gif and /dev/null differ diff --git a/articles/machine-learning/media/overview-what-is-azure-machine-learning/placeholder-ml-development-cycle.png b/articles/machine-learning/media/overview-what-is-azure-machine-learning/placeholder-ml-development-cycle.png deleted file mode 100644 index 6fca971189e5c..0000000000000 Binary files a/articles/machine-learning/media/overview-what-is-azure-machine-learning/placeholder-ml-development-cycle.png and /dev/null differ diff --git a/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg b/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg index 9a2cd6985f50b..3128147aafae5 100644 Binary files a/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg and b/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg differ diff --git a/articles/machine-learning/media/resource-known-issues/aml-visualize-data.png b/articles/machine-learning/media/resource-known-issues/aml-visualize-data.png deleted file mode 100644 index 6ab8ad15f9cca..0000000000000 Binary files a/articles/machine-learning/media/resource-known-issues/aml-visualize-data.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-bring-data/directory-structure.png b/articles/machine-learning/media/tutorial-1st-experiment-bring-data/directory-structure.png deleted file mode 100644 index 67242190d87c5..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-bring-data/directory-structure.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png index ae78cae3e7d03..bc56d8bcc40a4 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png and b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png index 28dd6be826c42..3e5e2ca6e357b 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png and b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/compute-instance-in-studio.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/compute-instance-in-studio.png deleted file mode 100644 index 6c3bd28e269fe..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/compute-instance-in-studio.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-1.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-1.png deleted file mode 100644 index bcfa4242e17ed..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-1.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-2.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-2.png deleted file mode 100644 index 8fc1e7bd26a80..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-2.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-3.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-3.png deleted file mode 100644 index 151cc4f68b202..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-local/directory-structure-3.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png index 3c0c713447599..320a5e745f8d9 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png and b/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/expand-user-folder.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/expand-user-folder.png deleted file mode 100644 index 18d9a05cb633d..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/expand-user-folder.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/experiment-main.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/experiment-main.png deleted file mode 100644 index ffd51beb11ac5..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/experiment-main.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png deleted file mode 100644 index 51a788e8868e5..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-r-experiment/clone-folder.png b/articles/machine-learning/media/tutorial-1st-r-experiment/clone-folder.png deleted file mode 100644 index 16184d85c3ec5..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-r-experiment/clone-folder.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-r-experiment/rstudio.png b/articles/machine-learning/media/tutorial-1st-r-experiment/rstudio.png deleted file mode 100644 index 2d6c6dcc52b7e..0000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-r-experiment/rstudio.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/create-compute-instance-settings.png b/articles/machine-learning/media/tutorial-create-secure-workspace/create-compute-instance-settings.png index d92e14e03e39c..df403817eae2c 100644 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/create-compute-instance-settings.png and b/articles/machine-learning/media/tutorial-create-secure-workspace/create-compute-instance-settings.png differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/create-nsg.png b/articles/machine-learning/media/tutorial-create-secure-workspace/create-nsg.png deleted file mode 100644 index 00d45039cb2b5..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/create-nsg.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-associate-subnet.png b/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-associate-subnet.png deleted file mode 100644 index 0cdccaad1eaa5..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-associate-subnet.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-azureml.png b/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-azureml.png deleted file mode 100644 index f3c7e22692641..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-azureml.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-batchnodemanagement.png b/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-batchnodemanagement.png deleted file mode 100644 index d445d609d781e..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-batchnodemanagement.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-inbound-security-rules.png b/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-inbound-security-rules.png deleted file mode 100644 index 04c8a8920d730..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/nsg-inbound-security-rules.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-access-control.png b/articles/machine-learning/media/tutorial-create-secure-workspace/storage-access-control.png deleted file mode 100644 index 8e21c581e9751..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-access-control.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-add-blob-data-contributor.png b/articles/machine-learning/media/tutorial-create-secure-workspace/storage-add-blob-data-contributor.png deleted file mode 100644 index 97c7ddc43a820..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-add-blob-data-contributor.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-add-role.png b/articles/machine-learning/media/tutorial-create-secure-workspace/storage-add-role.png deleted file mode 100644 index 8383c117e5c86..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-add-role.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-blob-private-endpoint.png b/articles/machine-learning/media/tutorial-create-secure-workspace/storage-blob-private-endpoint.png deleted file mode 100644 index 86ce71ccb2b9e..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-blob-private-endpoint.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-private-endpoint-add-role.png b/articles/machine-learning/media/tutorial-create-secure-workspace/storage-private-endpoint-add-role.png deleted file mode 100644 index 08309067de819..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-private-endpoint-add-role.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-private-endpoint-add-workspace.png b/articles/machine-learning/media/tutorial-create-secure-workspace/storage-private-endpoint-add-workspace.png deleted file mode 100644 index d1a0dc7946cfa..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/storage-private-endpoint-add-workspace.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png b/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png index 6fe38ad387283..e9b88bfc9c103 100644 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png and b/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/studio-select-service-principal.png b/articles/machine-learning/media/tutorial-create-secure-workspace/studio-select-service-principal.png deleted file mode 100644 index 2d5343a5c18a0..0000000000000 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/studio-select-service-principal.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-deploy-models-with-aml/confusion.png b/articles/machine-learning/media/tutorial-deploy-models-with-aml/confusion.png deleted file mode 100644 index 6b8408fd27890..0000000000000 Binary files a/articles/machine-learning/media/tutorial-deploy-models-with-aml/confusion.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-deploy-models-with-aml/results.png b/articles/machine-learning/media/tutorial-deploy-models-with-aml/results.png deleted file mode 100644 index 53103afe5b87e..0000000000000 Binary files a/articles/machine-learning/media/tutorial-deploy-models-with-aml/results.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/create-inference-pipeline.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/create-inference-pipeline.png deleted file mode 100644 index 33d6141785297..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/create-inference-pipeline.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/new-inference-cluster.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/new-inference-cluster.png index b1244568935c6..9d02df182a553 100644 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/new-inference-cluster.png and b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/new-inference-cluster.png differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-train-model-as-dataset-2.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-train-model-as-dataset-2.png deleted file mode 100644 index 70906cadaa7e2..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-train-model-as-dataset-2.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-train-model-as-dataset.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-train-model-as-dataset.png deleted file mode 100644 index 485ba54078558..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-train-model-as-dataset.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-transformation-dataset.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-transformation-dataset.png deleted file mode 100644 index 9be80591c250f..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/register-transformation-dataset.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/test-endpoint.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/test-endpoint.png deleted file mode 100644 index c1a3bf4ae2d55..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/test-endpoint.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/tutorial2-create-inference-pipeline.png b/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/tutorial2-create-inference-pipeline.png deleted file mode 100644 index 07571c42ef322..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-deploy/tutorial2-create-inference-pipeline.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/job-detail-page.png b/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/job-detail-page.png deleted file mode 100644 index 14fddc9147204..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/job-detail-page.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/launch-designer.png b/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/launch-designer.png index 0ad4ddbf52f70..bf4801a346d32 100644 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/launch-designer.png and b/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/launch-designer.png differ diff --git a/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/visualize-data.png b/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/visualize-data.png deleted file mode 100644 index c0a3c52780afd..0000000000000 Binary files a/articles/machine-learning/media/tutorial-designer-automobile-price-train-score/visualize-data.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-first-experiment-automated-ml/featurization-setting-config.gif b/articles/machine-learning/media/tutorial-first-experiment-automated-ml/featurization-setting-config.gif deleted file mode 100644 index e46283bbd316c..0000000000000 Binary files a/articles/machine-learning/media/tutorial-first-experiment-automated-ml/featurization-setting-config.gif and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png b/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png index ecc5989d682be..f21679a7746ab 100644 Binary files a/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png and b/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/compute-green.png b/articles/machine-learning/media/tutorial-power-bi/compute-green.png deleted file mode 100644 index 5f85a155ff398..0000000000000 Binary files a/articles/machine-learning/media/tutorial-power-bi/compute-green.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-power-bi/configure-task.png b/articles/machine-learning/media/tutorial-power-bi/configure-task.png index 1501c7c7f17b6..3a4d19f9ca3b8 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/configure-task.png and b/articles/machine-learning/media/tutorial-power-bi/configure-task.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-cluster.png b/articles/machine-learning/media/tutorial-power-bi/create-cluster.png index 85fb75d2e5f74..758276d20545f 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-cluster.png and b/articles/machine-learning/media/tutorial-power-bi/create-cluster.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-compute-cluster.png b/articles/machine-learning/media/tutorial-power-bi/create-compute-cluster.png index e5f5837a8fd51..53ed79928c359 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-compute-cluster.png and b/articles/machine-learning/media/tutorial-power-bi/create-compute-cluster.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-dataset.png b/articles/machine-learning/media/tutorial-power-bi/create-dataset.png index 5b2d0da747c92..bc29e0baf0d32 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-dataset.png and b/articles/machine-learning/media/tutorial-power-bi/create-dataset.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-designer.png b/articles/machine-learning/media/tutorial-power-bi/create-designer.png deleted file mode 100644 index 90050afb5cff1..0000000000000 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-designer.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-new-compute.png b/articles/machine-learning/media/tutorial-power-bi/create-new-compute.png index 39dcd935aa0a6..1809f0490d23b 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-new-compute.png and b/articles/machine-learning/media/tutorial-power-bi/create-new-compute.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png b/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png index 8e035609e9212..2a291518ea6c7 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png and b/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-new-run.png b/articles/machine-learning/media/tutorial-power-bi/create-new-run.png index da983ff3f6d0d..bc21939ef9e69 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-new-run.png and b/articles/machine-learning/media/tutorial-power-bi/create-new-run.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/data-refresh.png b/articles/machine-learning/media/tutorial-power-bi/data-refresh.png deleted file mode 100644 index 40acf9e5570c8..0000000000000 Binary files a/articles/machine-learning/media/tutorial-power-bi/data-refresh.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-power-bi/endpoint.png b/articles/machine-learning/media/tutorial-power-bi/endpoint.png index afb25fd620dba..052f1b51477c8 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/endpoint.png and b/articles/machine-learning/media/tutorial-power-bi/endpoint.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/model.png b/articles/machine-learning/media/tutorial-power-bi/model.png index 7ded762b5238c..508183a6c97d3 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/model.png and b/articles/machine-learning/media/tutorial-power-bi/model.png differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/azure-activity-bar.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/azure-activity-bar.png deleted file mode 100644 index fd1bcf0380112..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/azure-activity-bar.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/create-experiment.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/create-experiment.png deleted file mode 100644 index 7aa44cfb47c2a..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/create-experiment.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/create-run-configuration.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/create-run-configuration.png deleted file mode 100644 index 08becfb796c31..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/create-run-configuration.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/deploy-model.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/deploy-model.png deleted file mode 100644 index 78c407b62ba87..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/deploy-model.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/download-outputs.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/download-outputs.png deleted file mode 100644 index 2fd906eb9a343..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/download-outputs.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/experiment-run-on-portal.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/experiment-run-on-portal.png deleted file mode 100644 index f958e27e91378..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/experiment-run-on-portal.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/register-model.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/register-model.png deleted file mode 100644 index 26a4610f7d53b..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/register-model.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/run-experiment.png b/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/run-experiment.png deleted file mode 100644 index 71dfe349cfacc..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-deploy-image-classification-model-vscode/run-experiment.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-models-with-aml/digits.png b/articles/machine-learning/media/tutorial-train-models-with-aml/digits.png deleted file mode 100644 index d6b61c5821159..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-models-with-aml/digits.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-train-models-with-aml/widget.png b/articles/machine-learning/media/tutorial-train-models-with-aml/widget.png deleted file mode 100644 index 27cf2845784bb..0000000000000 Binary files a/articles/machine-learning/media/tutorial-train-models-with-aml/widget.png and /dev/null differ diff --git a/articles/machine-learning/migrate-overview.md b/articles/machine-learning/migrate-overview.md index a21b9c7e82b7b..4e400aa40a48d 100644 --- a/articles/machine-learning/migrate-overview.md +++ b/articles/machine-learning/migrate-overview.md @@ -146,7 +146,6 @@ In Studio (classic), **datasets** were saved in your workspace and could only be In Azure Machine Learning, **datasets** are registered to the workspace and can be used across all of Azure Machine Learning. For more information on the benefits of Azure Machine Learning datasets, see [Secure data access](./v1/concept-data.md). -![automobile-price-aml-dataset](./media/migrate-overview/aml-dataset.png) ### Pipeline diff --git a/articles/machine-learning/migrate-rebuild-integrate-with-client-app.md b/articles/machine-learning/migrate-rebuild-integrate-with-client-app.md index b3621620564ad..05c5101018fb0 100644 --- a/articles/machine-learning/migrate-rebuild-integrate-with-client-app.md +++ b/articles/machine-learning/migrate-rebuild-integrate-with-client-app.md @@ -1,14 +1,15 @@ --- -title: 'ML Studio (classic): Migrate to Azure Machine Learning - Consume pipeline endpoints' -description: Integrate pipeline endpoints with client applications in Azure Machine Learning. +title: 'Migrate to Azure Machine Learning - Consume pipeline endpoints' +description: Learn how to integrate pipeline endpoints with client applications in Azure Machine Learning as part of migrating from Machine Learning Studio (Classic). services: machine-learning ms.service: machine-learning ms.subservice: studio-classic ms.topic: how-to +ms.custom: kr2b-contr-experiment author: xiaoharper ms.author: zhanxia -ms.date: 03/08/2021 +ms.date: 05/31/2022 --- # Consume pipeline endpoints from client applications @@ -17,7 +18,7 @@ ms.date: 03/08/2021 In this article, you learn how to integrate client applications with Azure Machine Learning endpoints. For more information on writing application code, see [Consume an Azure Machine Learning endpoint](how-to-consume-web-service.md). -This article is part of the Studio (classic) to Azure Machine Learning migration series. For more information on migrating to Azure Machine Learning, see [the migration overview article](migrate-overview.md). +This article is part of the ML Studio (classic) to Azure Machine Learning migration series. For more information on migrating to Azure Machine Learning, see [the migration overview article](migrate-overview.md). ## Prerequisites @@ -25,10 +26,9 @@ This article is part of the Studio (classic) to Azure Machine Learning migration - An Azure Machine Learning workspace. [Create an Azure Machine Learning workspace](how-to-manage-workspace.md#create-a-workspace). - An [Azure Machine Learning real-time endpoint or pipeline endpoint](migrate-rebuild-web-service.md). +## Consume a real-time endpoint -## Consume a real-time endpoint - -If you deployed your model as a **real-time endpoint**, you can find its REST endpoint, and pre-generated consumption code in C#, Python, and R: +If you deployed your model as a *real-time endpoint*, you can find its REST endpoint, and pre-generated consumption code in C#, Python, and R: 1. Go to Azure Machine Learning studio ([ml.azure.com](https://ml.azure.com)). 1. Go the **Endpoints** tab. @@ -38,7 +38,6 @@ If you deployed your model as a **real-time endpoint**, you can find its REST en > [!NOTE] > You can also find the Swagger specification for your endpoint in the **Details** tab. Use the Swagger definition to understand your endpoint schema. For more information on Swagger definition, see [Swagger official documentation](https://swagger.io/docs/specification/2-0/what-is-swagger/). - ## Consume a pipeline endpoint There are two ways to consume a pipeline endpoint: @@ -60,15 +59,14 @@ Call the REST endpoint from your client application. You can use the Swagger spe You can call your Azure Machine Learning pipeline as a step in an Azure Data Factory pipeline. For more information, see [Execute Azure Machine Learning pipelines in Azure Data Factory](../data-factory/transform-data-machine-learning-service.md). - ## Next steps In this article, you learned how to find schema and sample code for your pipeline endpoints. For more information on consuming endpoints from the client application, see [Consume an Azure Machine Learning endpoint](how-to-consume-web-service.md). -See the rest of the articles in the Azure Machine Learning migration series: -1. [Migration overview](migrate-overview.md). -1. [Migrate dataset](migrate-register-dataset.md). -1. [Rebuild a Studio (classic) training pipeline](migrate-rebuild-experiment.md). -1. [Rebuild a Studio (classic) web service](migrate-rebuild-web-service.md). -1. **Integrate an Azure Machine Learning web service with client apps**. -1. [Migrate Execute R Script](migrate-execute-r-script.md). \ No newline at end of file +See the rest of the articles in the Azure Machine Learning migration series: + +- [Migration overview](migrate-overview.md). +- [Migrate dataset](migrate-register-dataset.md). +- [Rebuild a Studio (classic) training pipeline](migrate-rebuild-experiment.md). +- [Rebuild a Studio (classic) web service](migrate-rebuild-web-service.md). +- [Migrate Execute R Script](migrate-execute-r-script.md). diff --git a/articles/machine-learning/overview-what-is-azure-machine-learning.md b/articles/machine-learning/overview-what-is-azure-machine-learning.md index d4c30b846ae51..fdcf11b885c76 100644 --- a/articles/machine-learning/overview-what-is-azure-machine-learning.md +++ b/articles/machine-learning/overview-what-is-azure-machine-learning.md @@ -143,7 +143,7 @@ See [How to tune hyperparameters](how-to-tune-hyperparameters.md). Efficiency of training for deep learning and sometimes classical machine learning training jobs can be drastically improved via multinode distributed training. Azure Machine Learning compute clusters offer the latest GPU options. -Supported via Azure Arc-attached Kubernetes (preview) and Azure ML compute clusters: +Supported via Azure ML Kubernetes and Azure ML compute clusters: - PyTorch - TensorFlow diff --git a/articles/machine-learning/reference-kubernetes.md b/articles/machine-learning/reference-kubernetes.md new file mode 100644 index 0000000000000..6452b21650a39 --- /dev/null +++ b/articles/machine-learning/reference-kubernetes.md @@ -0,0 +1,309 @@ +--- +title: Reference for configuring Kubernetes cluster for Azure Machine Learning (Preview) +titleSuffix: Azure Machine Learning +description: Reference for configuring Kubernetes cluster for Azure Machine Learning. +services: machine-learning +author: zhongj +ms.author: jinzhong +ms.reviewer: larryfr +ms.service: machine-learning +ms.subservice: core +ms.topic: reference +ms.date: 06/06/2022 +--- + +# Reference for configuring Kubernetes cluster for Azure Machine Learning (Preview) + +This article contains reference information that may be useful when [configuring Kubernetes with Azure Machine Learning](./how-to-attach-kubernetes-anywhere.md). + +## Supported Kubernetes version and region + + +- Kubernetes clusters installing AzureML extension have a version support window of "N-2", that is aligned with [Azure Kubernetes Service (AKS) version support policy](../aks/supported-kubernetes-versions.md#kubernetes-version-support-policy), where 'N' is the latest GA minor version of Azure Kubernetes Service. + + - For example, if AKS introduces 1.20.a today, versions 1.20.a, 1.20.b, 1.19.c, 1.19.d, 1.18.e, and 1.18.f are supported. + + - If customers are running an unsupported Kubernetes version, they'll be asked to upgrade when requesting support for the cluster. Clusters running unsupported Kubernetes releases aren't covered by the AzureML extension support policies. +- AzureML extension region availability: + - AzureML extension can be deployed to AKS or Azure Arc-enabled Kubernetes in supported regions listed in [Azure Arc enabled Kubernetes region support](https://azure.microsoft.com/global-infrastructure/services/?products=azure-arc®ions=all). + +## Prerequisites for ARO or OCP clusters +### Disable Security Enhanced Linux (SELinux) + +[AzureML dataset](./how-to-train-with-datasets.md) (used in AzureML training jobs) isn't supported on machines with SELinux enabled. Therefore, you need to disable `selinux` on all workers in order to use AzureML dataset. + +### Privileged setup for ARO and OCP + +For AzureML extension deployment on ARO or OCP cluster, grant privileged access to AzureML service accounts, run ```oc edit scc privileged``` command, and add following service accounts under "users:": + +* ```system:serviceaccount:azure-arc:azure-arc-kube-aad-proxy-sa``` +* ```system:serviceaccount:azureml:{EXTENSION-NAME}-kube-state-metrics``` +* ```system:serviceaccount:azureml:prom-admission``` +* ```system:serviceaccount:azureml:default``` +* ```system:serviceaccount:azureml:prom-operator``` +* ```system:serviceaccount:azureml:load-amlarc-selinux-policy-sa``` +* ```system:serviceaccount:azureml:azureml-fe-v2``` +* ```system:serviceaccount:azureml:prom-prometheus``` +* ```system:serviceaccount:{KUBERNETES-COMPUTE-NAMESPACE}:default``` +* ```system:serviceaccount:azureml:azureml-ingress-nginx``` +* ```system:serviceaccount:azureml:azureml-ingress-nginx-admission``` + +> [!NOTE] +> * `{EXTENSION-NAME}`: is the extension name specified with the `az k8s-extension create --name` CLI command. +>* `{KUBERNETES-COMPUTE-NAMESPACE}`: is the namespace of the Kubernetes compute specified when attaching the compute to the Azure Machine Learning workspace. Skip configuring `system:serviceaccount:{KUBERNETES-COMPUTE-NAMESPACE}:default` if `KUBERNETES-COMPUTE-NAMESPACE` is `default`. + +## AzureML extension components + +For Arc-connected cluster, AzureML extension deployment will create [Azure Relay](../azure-relay/relay-what-is-it.md) in Azure cloud, used to route traffic between Azure services and the Kubernetes cluster. For AKS cluster without Arc connected, Azure Relay resource won't be created. + +Upon AzureML extension deployment completes, it will create following resources in Kubernetes cluster, depending on each AzureML extension deployment scenario: + + |Resource name |Resource type |Training |Inference |Training and Inference| Description | Communication with cloud| + |--|--|--|--|--|--|--| + |relayserver|Kubernetes deployment|**✓**|**✓**|**✓**|relayserver is only needed in arc-connected cluster, and won't be installed in AKS cluster. Relayserver works with Azure Relay to communicate with the cloud services.|Receive the request of job creation, model deployment from cloud service; sync the job status with cloud service.| + |gateway|Kubernetes deployment|**✓**|**✓**|**✓**|The gateway is used to communicate and send data back and forth.|Send nodes and cluster resource information to cloud services.| + |aml-operator|Kubernetes deployment|**✓**|N/A|**✓**|Manage the lifecycle of training jobs.| Token exchange with the cloud token service for authentication and authorization of Azure Container Registry.| + |metrics-controller-manager|Kubernetes deployment|**✓**|**✓**|**✓**|Manage the configuration for Prometheus|N/A| + |{EXTENSION-NAME}-kube-state-metrics|Kubernetes deployment|**✓**|**✓**|**✓**|Export the cluster-related metrics to Prometheus.|N/A| + |{EXTENSION-NAME}-prometheus-operator|Kubernetes deployment|Optional|Optional|Optional| Provide Kubernetes native deployment and management of Prometheus and related monitoring components.|N/A| + |amlarc-identity-controller|Kubernetes deployment|N/A|**✓**|**✓**|Request and renew Azure Blob/Azure Container Registry token through managed identity.|Token exchange with the cloud token service for authentication and authorization of Azure Container Registry and Azure Blob used by inference/model deployment.| + |amlarc-identity-proxy|Kubernetes deployment|N/A|**✓**|**✓**|Request and renew Azure Blob/Azure Container Registry token through managed identity.|Token exchange with the cloud token service for authentication and authorization of Azure Container Registry and Azure Blob used by inference/model deployment.| + |azureml-fe-v2|Kubernetes deployment|N/A|**✓**|**✓**|The front-end component that routes incoming inference requests to deployed services.|Send service logs to Azure Blob.| + |inference-operator-controller-manager|Kubernetes deployment|N/A|**✓**|**✓**|Manage the lifecycle of inference endpoints. |N/A| + |volcano-admission|Kubernetes deployment|Optional|N/A|Optional|Volcano admission webhook.|N/A| + |volcano-controllers|Kubernetes deployment|Optional|N/A|Optional|Manage the lifecycle of Azure Machine Learning training job pods.|N/A| + |volcano-scheduler |Kubernetes deployment|Optional|N/A|Optional|Used to perform in-cluster job scheduling.|N/A| + |fluent-bit|Kubernetes daemonset|**✓**|**✓**|**✓**|Gather the components' system log.| Upload the components' system log to cloud.| + |{EXTENSION-NAME}-dcgm-exporter|Kubernetes daemonset|Optional|Optional|Optional|dcgm-exporter exposes GPU metrics for Prometheus.|N/A| + |nvidia-device-plugin-daemonset|Kubernetes daemonset|Optional|Optional|Optional|nvidia-device-plugin-daemonset exposes GPUs on each node of your cluster| N/A| + |prometheus-prom-prometheus|Kubernetes statefulset|**✓**|**✓**|**✓**|Gather and send job metrics to cloud.|Send job metrics like cpu/gpu/memory utilization to cloud.| + +> [!IMPORTANT] + > * Azure Relay resource is under the same resource group as the Arc cluster resource. It is used to communicate with the Kubernetes cluster and modifying them will break attached compute targets. + > * By default, the kubernetes deployment resources are randomly deployed to 1 or more nodes of the cluster, and daemonset resources are deployed to ALL nodes. If you want to restrict the extension deployment to specific nodes, use `nodeSelector` configuration setting described as below. + +> [!NOTE] + > * **{EXTENSION-NAME}:** is the extension name specified with ```az k8s-extension create --name``` CLI command. + + +## Create and use instance types for efficient compute resource usage + +### What are instance types? + +Instance types are an Azure Machine Learning concept that allows targeting certain types of +compute nodes for training and inference workloads. For an Azure VM, an example for an +instance type is `STANDARD_D2_V3`. + +In Kubernetes clusters, instance types are represented in a custom resource definition (CRD) that is installed with the AzureML extension. Instance types are represented by two elements in AzureML extension: +[nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) +and [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). +In short, a `nodeSelector` lets us specify which node a pod should run on. The node must have a +corresponding label. In the `resources` section, we can set the compute resources (CPU, memory and +NVIDIA GPU) for the pod. + +### Default instance type + +By default, a `defaultinstancetype` with following definition is created when you attach Kuberenetes cluster to AzureML workspace: +- No `nodeSelector` is applied, meaning the pod can get scheduled on any node. +- The workload's pods are assigned default resources with 0.6 cpu cores, 1536Mi memory and 0 GPU: +```yaml +resources: + requests: + cpu: "0.6" + memory: "1536Mi" + limits: + cpu: "0.6" + memory: "1536Mi" + nvidia.com/gpu: null +``` + +> [!NOTE] +> - The default instance type purposefully uses little resources. To ensure all ML workloads +run with appropriate resources, for example GPU resource, it is highly recommended to create custom instance types. +> - `defaultinstancetype` will not appear as an InstanceType custom resource in the cluster when running the command ```kubectl get instancetype```, but it will appear in all clients (UI, CLI, SDK). +> - `defaultinstancetype` can be overridden with a custom instance type definition having the same name as `defaultinstancetype` (see [Create custom instance types](#create-custom-instance-types) section) + +### Create custom instance types + +To create a new instance type, create a new custom resource for the instance type CRD. For example: + +```bash +kubectl apply -f my_instance_type.yaml +``` + +With `my_instance_type.yaml`: +```yaml +apiVersion: amlarc.azureml.com/v1alpha1 +kind: InstanceType +metadata: + name: myinstancetypename +spec: + nodeSelector: + mylabel: mylabelvalue + resources: + limits: + cpu: "1" + nvidia.com/gpu: 1 + memory: "2Gi" + requests: + cpu: "700m" + memory: "1500Mi" +``` + +The following steps will create an instance type with the labeled behavior: +- Pods will be scheduled only on nodes with label `mylabel: mylabelvalue`. +- Pods will be assigned resource requests of `700m` CPU and `1500Mi` memory. +- Pods will be assigned resource limits of `1` CPU, `2Gi` memory and `1` NVIDIA GPU. + +> [!NOTE] +> - NVIDIA GPU resources are only specified in the `limits` section as integer values. For more information, + see the Kubernetes [documentation](https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/#using-device-plugins). +> - CPU and memory resources are string values. +> - CPU can be specified in millicores, for example `100m`, or in full numbers, for example `"1"` + is equivalent to `1000m`. +> - Memory can be specified as a full number + suffix, for example `1024Mi` for 1024 MiB. + +It's also possible to create multiple instance types at once: + +```bash +kubectl apply -f my_instance_type_list.yaml +``` + +With `my_instance_type_list.yaml`: +```yaml +apiVersion: amlarc.azureml.com/v1alpha1 +kind: InstanceTypeList +items: + - metadata: + name: cpusmall + spec: + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "1" + nvidia.com/gpu: 0 + memory: "1Gi" + + - metadata: + name: defaultinstancetype + spec: + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + nvidia.com/gpu: 0 + memory: "1Gi" +``` + +The above example creates two instance types: `cpusmall` and `defaultinstancetype`. This `defaultinstancetype` definition will override the `defaultinstancetype` definition created when Kubernetes cluster was attached to AzureML workspace. + +If a training or inference workload is submitted without an instance type, it uses the default +instance type. To specify a default instance type for a Kubernetes cluster, create an instance +type with name `defaultinstancetype`. It will automatically be recognized as the default. + +### Select instance type to submit training job + +To select an instance type for a training job using CLI (V2), specify its name as part of the +`resources` properties section in job YAML. For example: +```yaml +command: python -c "print('Hello world!')" +environment: + image: library/python:latest +compute: azureml: +resources: + instance_type: +``` + +In the above example, replace `` with the name of your Kubernetes compute +target and `` with the name of the instance type you wish to select. If there's no `instance_type` property specified, the system will use `defaultinstancetype` to submit job. + +### Select instance type to deploy model + +To select an instance type for a model deployment using CLI (V2), specify its name for `instance_type` property in deployment YAML. For example: + +```yaml +name: blue +app_insights_enabled: true +endpoint_name: +model: + path: ./model/sklearn_mnist_model.pkl +code_configuration: + code: ./script/ + scoring_script: score.py +instance_type: +environment: + conda_file: file:./model/conda.yml + image: mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1 +``` + +In the above example, replace `` with the name of the instance type you wish to select. If there's no `instance_type` property specified, the system will use `defaultinstancetype` to deploy model. + +## AzureML jobs connect with on-premises data storage + +[Persistent Volume (PV) and Persistent Volume Claim (PVC)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) are Kubernetes concept, allowing user to provide and consume various storage resources. + +1. Create PV, take NFS as example, + +``` +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-pv +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: "" + nfs: + path: /share/nfs + server: 20.98.110.84 + readOnly: false +``` +2. Create PVC in the same Kubernetes namespace with ML workloads. In `metadata`, you **must** add label `ml.azure.com/pvc: "true"` to be recognized by AzureML, and add annotation `ml.azure.com/mountpath: ` to set the mount path. + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-pvc + namespace: default + labels: + ml.azure.com/pvc: "true" + annotations: + ml.azure.com/mountpath: "/mnt/nfs" +spec: + storageClassName: "" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +``` +> [!IMPORTANT] +> Only the job pods in the same Kubernetes namespace with the PVC(s) will be mounted the volume. Data scientist is able to access the `mount path` specified in the PVC annotation in the job. + + +## Sample YAML definition of Kubernetes secret for TLS/SSL + +To enable HTTPS endpoint for real-time inference, you need to provide both PEM-encoded TLS/SSL certificate and key. The best practice is to save the certificate and key in a Kubernetes secret in the `azureml` namespace. + +The sample YAML definition of the TLS/SSL secret is as follows, + +``` +apiVersion: v1 +data: + cert.pem: + key.pem: +kind: Secret +metadata: + name: + namespace: azureml +type: Opaque +``` + diff --git a/articles/machine-learning/reference-managed-online-endpoints-vm-sku-list.md b/articles/machine-learning/reference-managed-online-endpoints-vm-sku-list.md index bebd7131c4f58..b8bdca59922b2 100644 --- a/articles/machine-learning/reference-managed-online-endpoints-vm-sku-list.md +++ b/articles/machine-learning/reference-managed-online-endpoints-vm-sku-list.md @@ -1,23 +1,23 @@ --- -title: Managed online endpoints VM SKU list (preview) +title: Managed online endpoints VM SKU list titleSuffix: Azure Machine Learning -description: Lists the VM SKUs that can be used for managed online endpoints (preview) in Azure Machine Learning. +description: Lists the VM SKUs that can be used for managed online endpoints in Azure Machine Learning. services: machine-learning ms.service: machine-learning ms.subservice: core ms.topic: reference ms.reviewer: larryfr -ms.author: seramasu -author: rsethur +ms.author: sehan +author: dem108 ms.custom: devplatv2, event-tier1-build-2022 -ms.date: 04/11/2022 +ms.date: 06/02/2022 --- -# Managed online endpoints SKU list (preview) +# Managed online endpoints SKU list -This table shows the VM SKUs that are supported for Azure Machine Learning managed online endpoints (preview). +This table shows the VM SKUs that are supported for Azure Machine Learning managed online endpoints. * The `instance_type` attribute used for deployment must be specified in the form "Standard_F4s_v2". The table below lists instance names, for example, F2s v2. These names should be put in the specified form (`Standard_{name}`) for Azure CLI or Azure Resource Manager templates (ARM templates) requests to create and update deployments. diff --git a/articles/machine-learning/reference-yaml-compute-kubernetes.md b/articles/machine-learning/reference-yaml-compute-kubernetes.md index 9b7aea4fea433..a308256210157 100644 --- a/articles/machine-learning/reference-yaml-compute-kubernetes.md +++ b/articles/machine-learning/reference-yaml-compute-kubernetes.md @@ -1,5 +1,5 @@ --- -title: 'CLI (v2) Attached Azure Arc-enabled Kubernetes cluster (KubernetesCompute) YAML schema' +title: 'CLI (v2) Attached Kubernetes cluster (KubernetesCompute) YAML schema' titleSuffix: Azure Machine Learning description: Reference documentation for the CLI (v2) Attached Azure Arc-enabled Kubernetes cluster (KubernetesCompute) YAML schema. services: machine-learning diff --git a/articles/machine-learning/reference-yaml-overview.md b/articles/machine-learning/reference-yaml-overview.md index da1c1422a97aa..0d945d05a50fc 100644 --- a/articles/machine-learning/reference-yaml-overview.md +++ b/articles/machine-learning/reference-yaml-overview.md @@ -53,7 +53,7 @@ The Azure Machine Learning CLI (v2), an extension to the Azure CLI, often uses a | [Compute cluster (AmlCompute)](reference-yaml-compute-aml.md) | https://azuremlschemas.azureedge.net/latest/amlCompute.schema.json | | [Compute instance](reference-yaml-compute-instance.md) | https://azuremlschemas.azureedge.net/latest/computeInstance.schema.json | | [Attached Virtual Machine](reference-yaml-compute-vm.md) | https://azuremlschemas.azureedge.net/latest/vmCompute.schema.json | -| [Attached Azure Arc-enabled Kubernetes (KubernetesCompute)](reference-yaml-compute-kubernetes.md) | https://azuremlschemas.azureedge.net/latest/kubernetesCompute.schema.json | +| [Attached Azure Arc-enabled Kubernetes (KubernetesCompute)](reference-yaml-compute-kubernetes.md) | `https://azuremlschemas.azureedge.net/latest/kubernetesCompute.schema.json` | ## Job diff --git a/articles/machine-learning/toc.yml b/articles/machine-learning/toc.yml index a7240864d2be4..c0129e21e14d3 100644 --- a/articles/machine-learning/toc.yml +++ b/articles/machine-learning/toc.yml @@ -197,18 +197,10 @@ - name: Plan and manage costs displayName: low priority VM href: concept-plan-manage-cost.md - - name: Work with data + - name: Work with Data items: - - name: Data access + - name: Access data href: concept-data.md - - name: Data ingestion - href: concept-data-ingestion.md - - name: Data processing - href: concept-optimize-data-processing.md - - name: Studio network data access - href: concept-network-data-access.md - - name: Collect data - items: - name: Sourcing human data responsibly href: concept-sourcing-human-data.md - name: Security @@ -314,11 +306,8 @@ displayName: compute target, low priority, managed identity href: how-to-create-attach-compute-cluster.md - name: Kubernetes cluster - displayName: Azure Arc, Kubernetes, on-premise, multi-cloud + displayName: Azure Arc, Kubernetes, on-premise, multi-cloud, attach href: how-to-attach-kubernetes-anywhere.md - - name: Azure Kubernetes Service - displayName: AKS, inference - href: how-to-create-attach-kubernetes.md - name: Use studio displayName: compute target, dsvm, Data Science Virtual Machine, local, cluster, ACI, container instance, Databricks, data lake, lake, HDI, HDInsight, low priority, managed identity href: how-to-create-attach-compute-studio.md @@ -368,69 +357,34 @@ href: how-to-troubleshoot-environments.md - name: Work with data items: - - name: Work with data SDK v2 (preview) - href: how-to-use-data.md + - name: Create datastores + displayName: Create datastores + href: how-to-datastore.md + - name: Create data assets + displayName: Create data assets + href: how-to-create-register-data-assets.md + - name: Read & write data in jobs + displayName: Read & write data in jobs + href: how-to-read-write-data-v2.md + - name: Data administration + displayName: Data administration + href: how-to-administrate-data-authentication.md - name: Label data items: - - name: Set up image labeling - displayName: data, dataset - href: how-to-create-image-labeling-projects.md - - name: Set up text labeling - displayName: data, dataset - href: how-to-create-text-labeling-projects.md - - name: Label images and text - displayName: data, dataset, labeling - href: how-to-label-data.md - - name: Add users - displayName: data, dataset, labeling - href: how-to-add-users.md - - name: Outsource labeling tasks - href: how-to-outsource-data-labeling.md - - name: Create datasets with labels - displayName: data, labels, torchvision - href: how-to-use-labeled-dataset.md - - name: Get & prepare data - items: - - name: Data ingestion with Azure Data Factory - displayName: data, ingestion, adf - href: how-to-data-ingest-adf.md - - name: Data preparation with Azure Synapse - displayName: data, data prep, spark, spark pool, cluster, spark cluster,dataset, datastore - href: how-to-data-prep-synapse-spark-pool.md - - name: DevOps for data ingestion - displayName: data, ingestion, devops - href: how-to-cicd-data-ingestion.md - - name: Import data in the designer - displayName: designer, data, import, dataset, datastore - href: how-to-designer-import-data.md - - name: Access data - items: - - name: Connect to Azure storage with datastores (Python) - displayName: blob, get, fileshare, access, mount, download, data lake, datastore - href: how-to-access-data.md - - name: Identity-based data access to storage - displayName: blob, access, data lake, datastore, managed identity - href: how-to-identity-based-data-access.md - - name: Get data from storage with datasets (Python) - displayName: data, data set, register, access data - href: how-to-create-register-datasets.md - - name: Connect to data (UI) - displayName: blob, get, fileshare, access, mount, download, data lake, datastore, dataset, data set - href: how-to-connect-data-ui.md - - name: Manage & consume data - items: - - name: Read & write data (v2) - displayName: train - href: how-to-read-write-data-v2.md - - - name: Compliance - items: - - name: Preserve data privacy - displayName: data,privacy,differential privacy - href: how-to-differential-privacy.md - - name: Export and delete data - displayName: GDPR - href: how-to-export-delete-data.md + - name: Set up image labeling + displayName: data, dataset + href: how-to-create-image-labeling-projects.md + - name: Set up text labeling + displayName: data, dataset + href: how-to-create-text-labeling-projects.md + - name: Label images and text + displayName: data, dataset, labeling + href: how-to-label-data.md + - name: Add users + displayName: data, dataset, labeling + href: how-to-add-users.md + - name: Outsource labeling tasks + href: how-to-outsource-data-labeling.md - name: Train models items: - name: Train with the job creation UI @@ -544,23 +498,14 @@ items: - name: Online endpoints (real-time) items: - - name: Deploy an ML model with an online endpoint + - name: Deploy an ML model with an online endpoint (CLI) href: how-to-deploy-managed-online-endpoints.md - - name: Security - items: - - name: Authenticate to endpoints - href: how-to-authenticate-online-endpoint.md - - name: Network isolation with managed online endpoints - displayName: network, vnet, secure - href: how-to-secure-online-endpoint.md - - name: Managed online endpoints VM SKU list - href: reference-managed-online-endpoints-vm-sku-list.md - - name: Safe rollout for online endpoints + - name: Deploy an ML model with an online endpoint (SDK preview) + href: how-to-deploy-managed-online-endpoint-sdk-v2.md + - name: Safe rollout for online endpoints (CLI) href: how-to-safely-rollout-managed-endpoints.md - - name: Autoscale managed online endpoints - href: how-to-autoscale-endpoints.md - - name: Access Azure resources from online endpoints - href: how-to-access-resources-from-endpoints-managed-identities.md + - name: Safe rollout for online endpoints (SDK preview) + href: how-to-safely-rollout-managed-endpoints-sdk-v2.md - name: Deployment scenarios items: - name: Deploy a MLflow model with an online endpoint @@ -575,10 +520,23 @@ - name: Use REST to deploy a model as an online endpoint href: how-to-deploy-with-rest.md - name: Deploy an AutoML model with an online endpoint - href: how-to-deploy-automl-endpoint.md + href: how-to-deploy-automl-endpoint.md + - name: Security + items: + - name: Authenticate to endpoints + href: how-to-authenticate-online-endpoint.md + - name: Network isolation with managed online endpoints + displayName: network, vnet, secure + href: how-to-secure-online-endpoint.md + - name: Access Azure resources from online endpoints + href: how-to-access-resources-from-endpoints-managed-identities.md + - name: Autoscale online endpoints + href: how-to-autoscale-endpoints.md + - name: Managed online endpoints VM SKU list + href: reference-managed-online-endpoints-vm-sku-list.md - name: Viewing managed online endpoint costs href: how-to-view-online-endpoints-costs.md - - name: Monitoring managed online endpoints + - name: Monitoring online endpoints href: how-to-monitor-online-endpoints.md - name: Debug online endpoints locally VS Code href: how-to-debug-managed-online-endpoints-visual-studio-code.md @@ -586,8 +544,10 @@ href: how-to-troubleshoot-online-endpoints.md - name: Batch endpoints items: - - name: Batch scoring with batch endpoints + - name: Batch scoring with batch endpoints (CLI) href: how-to-use-batch-endpoint.md + - name: Batch scoring with batch endpoints (SDK preview) + href: how-to-use-batch-endpoint-sdk-v2.md - name: Batch endpoints in studio href: how-to-use-batch-endpoints-studio.md - name: Use REST to deploy a model as a batch endpoint @@ -672,6 +632,9 @@ - name: Responsible AI dashboard scorecard displayName: explanations, interpretability, fairness, bias, model performance, disparity metrics, unfairness, causal analysis, causal inference, exploratory data analysis, EDA, dataset explorer href: how-to-responsible-ai-scorecard.md + - name: Migrate from v1 + displayName: migration, v1, v2 + href: how-to-migrate-from-v1.md - name: Troubleshoot & debug items: - name: VS Code interactive debugging @@ -753,6 +716,8 @@ - name: Hyperparameters for AutoML computer vision tasks displayName: automl, yolo-5, image, image model href: reference-automl-images-hyperparameters.md + - name: Kubernetes cluster configuration + href: reference-kubernetes.md - name: Designer component reference displayName: module, component, reference, algorithm, studio href: algorithm-module-reference/module-reference.md diff --git a/articles/machine-learning/tutorial-auto-train-image-models.md b/articles/machine-learning/tutorial-auto-train-image-models.md index 735dda33ab048..33040a8864d97 100644 --- a/articles/machine-learning/tutorial-auto-train-image-models.md +++ b/articles/machine-learning/tutorial-auto-train-image-models.md @@ -9,7 +9,7 @@ ms.topic: tutorial author: swatig007 ms.author: swatig ms.reviewer: nibaccam -ms.date: 04/15/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, automl, event-tier1-build-2022 --- @@ -253,13 +253,13 @@ az ml data create -f [PATH_TO_YML_FILE] --workspace-name [YOUR_AZURE_WORKSPACE] ``` # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] --- Next step is to create `MLTable` from your data in jsonl format as shown below. MLtable package your data into a consumable object for training. -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: # [CLI v2](#tab/CLI-v2) [!INCLUDE [cli v2](../../includes/machine-learning-cli-v2.md)] @@ -280,7 +280,7 @@ validation_data: You can create data inputs from training and validation MLTable with the following code: -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] --- @@ -298,7 +298,7 @@ primary_metric: mean_average_precision # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=image-object-detection-configuration)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=image-object-detection-configuration)] --- @@ -346,9 +346,9 @@ search_space: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=search-space-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=search-space-settings)] --- @@ -368,7 +368,7 @@ az ml job create --file ./hello-automl-job-basic.yml --workspace-name [YOUR_AZUR When you've configured your AutoML Job to the desired settings, you can submit the job. -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] --- diff --git a/articles/machine-learning/tutorial-designer-automobile-price-deploy.md b/articles/machine-learning/tutorial-designer-automobile-price-deploy.md index 3b376fa4e53e8..a2f924f739ea2 100644 --- a/articles/machine-learning/tutorial-designer-automobile-price-deploy.md +++ b/articles/machine-learning/tutorial-designer-automobile-price-deploy.md @@ -57,7 +57,7 @@ To deploy your pipeline, you must first convert the training pipeline into a rea > [!NOTE] > By default, the **Web Service Input** will expect the same data schema as the component output data which connects to the same downstream port as it. In this sample, **Web Service Input** and **Automobile price data (Raw)** connect to the same downstream component, hence **Web Service Input** expect the same data schema as **Automobile price data (Raw)** and target variable column `price` is included in the schema. - > However, usually When you score the data, you won't know the target variable values. For such case, you can remove the target variable column in the inference pipeline using **Select Columns in Dataset** component. Make sure that the output of **Select Columns in Dataset** removing target variable column is connected to the same port as the output of the **Web Service Intput** component. + > However, usually When you score the data, you won't know the target variable values. For such case, you can remove the target variable column in the inference pipeline using **Select Columns in Dataset** component. Make sure that the output of **Select Columns in Dataset** removing target variable column is connected to the same port as the output of the **Web Service Input** component. 1. Select **Submit**, and use the same compute target and experiment that you used in part one. diff --git a/articles/machine-learning/tutorial-pipeline-python-sdk.md b/articles/machine-learning/tutorial-pipeline-python-sdk.md index 2d836a006e5fb..f4fd38774d8dc 100644 --- a/articles/machine-learning/tutorial-pipeline-python-sdk.md +++ b/articles/machine-learning/tutorial-pipeline-python-sdk.md @@ -76,7 +76,7 @@ First you'll install the v2 SDK on your compute instance: 1. Now on the terminal, run the command: ``` - git clone --depth 1 https://github.com/Azure/azureml-examples --branch sdk-preview + git clone --depth 1 https://github.com/Azure/azureml-examples ``` 1. On the left, select **Notebooks**. @@ -127,14 +127,7 @@ Before creating the pipeline, you'll set up the resources the pipeline will use: Before we dive in the code, you'll need to connect to your Azure ML workspace. The workspace is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. - -```python -# handle to the workspace -from azure.ai.ml import MLClient - -# Authentication package -from azure.identity import DefaultAzureCredential -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=import-mlclient)] In the next cell, enter your Subscription ID, Resource Group name and Workspace name. To find your Subscription ID: 1. In the upper right Azure Machine Learning studio toolbar, select your workspace name. @@ -143,15 +136,7 @@ In the next cell, enter your Subscription ID, Resource Group name and Workspace :::image type="content" source="media/tutorial-pipeline-python-sdk/find-info.png" alt-text="Screenshot shows how to find values needed for your code."::: -```python -# get a handle to the workspace -ml_client = MLClient( - DefaultAzureCredential(), - subscription_id="", - resource_group_name="", - workspace_name="", -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=ml_client)] The result is a handler to the workspace that you'll use to manage other resources and jobs. @@ -168,20 +153,7 @@ The data you use for training is usually in one of the locations below: Azure ML uses a `Data` object to register a reusable definition of data, and consume data within a pipeline. In the section below, you'll consume some data from web url as one example. Data from other sources can be created as well. -```python -from azure.ai.ml.entities import Data -from azure.ai.ml.constants import AssetTypes -web_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls" - -credit_data = Data( - name="creditcard_defaults", - path=web_path, - type=AssetTypes.URI_FILE, - description="Dataset for credit card defaults", - tags={"source_type": "web", "source": "UCI ML Repo"}, - version='1.0.0' -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=credit_data)] This code just created a `Data` asset, ready to be consumed as an input by the pipeline that you'll define in the next sections. In addition, you can register the dataset to your workspace so it becomes reusable across pipelines. @@ -193,16 +165,22 @@ Registering the dataset will enable you to: Since this is the first time that you're making a call to the workspace, you may be asked to authenticate. Once the authentication is complete, you'll then see the dataset registration completion message. - -```python -credit_data = ml_client.data.create_or_update(credit_data) -print( - f"Dataset with name {credit_data.name} was registered to workspace, the dataset version is {credit_data.version}" -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=update-credit_data)] In the future, you can fetch the same dataset from the workspace using `credit_dataset = ml_client.data.get("", version='')`. +## Create a compute resource to run your pipeline + +Each step of an Azure ML pipeline can use a different compute resource for running the specific job of that step. It can be single or multi-node machines with Linux or Windows OS, or a specific compute fabric like Spark. + +In this section, you'll provision a Linux compute cluster. + +For this tutorial you only need a basic cluster, so we'll use a Standard_DS3_v2 model with 2 vCPU cores, 7 GB RAM and create an Azure ML Compute. + +> [!TIP] +> If you already have a compute cluster, replace "cpu-cluster" in the code below with the name of your cluster. This will keep you from creating another one. + +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=cpu_cluster)] ## Create a job environment for pipeline steps @@ -211,34 +189,11 @@ So far, you've created a development environment on the compute instance, your d In this example, you'll create a conda environment for your jobs, using a conda yaml file. First, create a directory to store the file in. - -```python -import os -dependencies_dir = "./dependencies" -os.makedirs(dependencies_dir, exist_ok=True) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=dependencies_dir)] Now, create the file in the dependencies directory. -```python -%%writefile {dependencies_dir}/conda.yml -name: model-env -channels: - - conda-forge -dependencies: - - python=3.8 - - numpy=1.21.2 - - pip=21.2.4 - - scikit-learn=0.24.2 - - scipy=1.7.1 - - pandas>=1.1,<1.2 - - pip: - - azureml-defaults==1.38.0 - - azureml-mlflow==1.38.0 - - inference-schema[numpy-support]==1.3.0 - - joblib==1.0.1 - - xlrd==2.0.1 -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=conda.yml)] The specification contains some usual packages, that you'll use in your pipeline (numpy, pip), together with some Azure ML specific packages (azureml-defaults, azureml-mlflow). @@ -246,25 +201,7 @@ The Azure ML packages aren't mandatory to run Azure ML jobs. However, adding the Use the *yaml* file to create and register this custom environment in your workspace: -```Python -from azure.ai.ml.entities import Environment - -custom_env_name = "aml-scikit-learn" - -pipeline_job_env = Environment( - name=custom_env_name, - description="Custom environment for Credit Card Defaults pipeline", - tags={"scikit-learn": "0.24.2", "azureml-defaults": "1.38.0"}, - conda_file=os.path.join(dependencies_dir, "conda.yml"), - image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", - version="1.0.0" -) -pipeline_job_env = ml_client.environments.create_or_update(pipeline_job_env) - -print( - f"Environment with name {pipeline_job_env.name} is registered to workspace, the environment version is {pipeline_job_env.version}" -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=custom_env_name)] ## Build the training pipeline @@ -284,122 +221,24 @@ Let's start by creating the first component. This component handles the preproce First create a source folder for the data_prep component: -```python -import os - -data_prep_src_dir = "./components/data_prep" -os.makedirs(data_prep_src_dir, exist_ok=True) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=data_prep_src_dir)] This script performs the simple task of splitting the data into train and test datasets. Azure ML mounts datasets as folders to the computes, therefore, we created an auxiliary `select_first_file` function to access the data file inside the mounted input folder. [MLFlow](https://mlflow.org/docs/latest/tracking.html) will be used to log the parameters and metrics during our pipeline run. -```python -%%writefile {data_prep_src_dir}/data_prep.py -import os -import argparse -import pandas as pd -from sklearn.model_selection import train_test_split -import logging -import mlflow - - -def main(): - """Main function of the script.""" - - # input and output arguments - parser = argparse.ArgumentParser() - parser.add_argument("--data", type=str, help="path to input data") - parser.add_argument("--test_train_ratio", type=float, required=False, default=0.25) - parser.add_argument("--train_data", type=str, help="path to train data") - parser.add_argument("--test_data", type=str, help="path to test data") - args = parser.parse_args() - - # Start Logging - mlflow.start_run() - - print(" ".join(f"{k}={v}" for k, v in vars(args).items())) - - print("input data:", args.data) - - credit_df = pd.read_excel(args.data, header=1, index_col=0) - - mlflow.log_metric("num_samples", credit_df.shape[0]) - mlflow.log_metric("num_features", credit_df.shape[1] - 1) - - credit_train_df, credit_test_df = train_test_split( - credit_df, - test_size=args.test_train_ratio, - ) - - # output paths are mounted as folder, therefore, we are adding a filename to the path - credit_train_df.to_csv(os.path.join(args.train_data, "data.csv"), index=False) - - credit_test_df.to_csv(os.path.join(args.test_data, "data.csv"), index=False) - - # Stop Logging - mlflow.end_run() - - -if __name__ == "__main__": - main() -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=def-main)] Now that you have a script that can perform the desired task, create an Azure ML Component from it. You'll use the general purpose **CommandComponent** that can run command line actions. This command line action can directly call system commands or run a script. The inputs/outputs are specified on the command line via the `${{ ... }}` notation. -```python -%%writefile {data_prep_src_dir}/data_prep.yml -# -name: data_prep_credit_defaults -display_name: Data preparation for training -# version: 1 # Not specifying a version will automatically update the version -type: command -inputs: - data: - type: uri_folder - test_train_ratio: - type: number -outputs: - train_data: - type: uri_folder - test_data: - type: uri_folder -code: . -environment: - # for this step, we'll use an AzureML curate environment - azureml:aml-scikit-learn:1.0.0 -command: >- - python data_prep.py - --data ${{inputs.data}} --test_train_ratio ${{inputs.test_train_ratio}} - --train_data ${{outputs.train_data}} --test_data ${{outputs.test_data}} -# -``` - -Once the `yaml` file and the script are ready, you can create your component using `load_component()`. - -```python -# importing the Component Package -from azure.ai.ml.entities import load_component - -# Loading the component from the yml file -data_prep_component = load_component(yaml_file=os.path.join(data_prep_src_dir, "data_prep.yml")) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=data_prep_component)] Optionally, register the component in the workspace for future re-use. -```python -data_prep_component = ml_client.create_or_update(data_prep_component) - -print( - f"Component {data_prep_component.name} with Version {data_prep_component.version} is registered" -) -``` - ## Create component 2: training (using yaml definition) The second component that you'll create will consume the training and test data, train a tree based model and return the output model. You'll use Azure ML logging capabilities to record and visualize the learning progress. @@ -409,113 +248,7 @@ You used the `CommandComponent` class to create your first component. This time Create the directory for this component: -```python -import os -train_src_dir = "./components/train" -os.makedirs(train_src_dir, exist_ok=True) -``` - -Create the training script in the directory: - -```python -%%writefile {train_src_dir}/train.py -import argparse -from sklearn.ensemble import GradientBoostingClassifier -from sklearn.metrics import classification_report -from azureml.core.model import Model -from azureml.core import Run -import os -import pandas as pd -import joblib -import mlflow - - -def select_first_file(path): - """Selects first file in folder, use under assumption there is only one file in folder - Args: - path (str): path to directory or file to choose - Returns: - str: full path of selected file - """ - files = os.listdir(path) - return os.path.join(path, files[0]) - - -# Start Logging -mlflow.start_run() - -# enable autologging -mlflow.sklearn.autolog() - -# This line creates a handles to the current run. It is used for model registration -run = Run.get_context() - -os.makedirs("./outputs", exist_ok=True) - - -def main(): - """Main function of the script.""" - - # input and output arguments - parser = argparse.ArgumentParser() - parser.add_argument("--train_data", type=str, help="path to train data") - parser.add_argument("--test_data", type=str, help="path to test data") - parser.add_argument("--n_estimators", required=False, default=100, type=int) - parser.add_argument("--learning_rate", required=False, default=0.1, type=float) - parser.add_argument("--registered_model_name", type=str, help="model name") - parser.add_argument("--model", type=str, help="path to model file") - args = parser.parse_args() - - # paths are mounted as folder, therefore, we are selecting the file from folder - train_df = pd.read_csv(select_first_file(args.train_data)) - - # Extracting the label column - y_train = train_df.pop("default payment next month") - - # convert the dataframe values to array - X_train = train_df.values - - # paths are mounted as folder, therefore, we are selecting the file from folder - test_df = pd.read_csv(select_first_file(args.test_data)) - - # Extracting the label column - y_test = test_df.pop("default payment next month") - - # convert the dataframe values to array - X_test = test_df.values - - print(f"Training with data of shape {X_train.shape}") - - clf = GradientBoostingClassifier( - n_estimators=args.n_estimators, learning_rate=args.learning_rate - ) - clf.fit(X_train, y_train) - - y_pred = clf.predict(X_test) - - print(classification_report(y_test, y_pred)) - - # setting the full path of the model file - model_file = os.path.join(args.model, "model.pkl") - with open(model_file, "wb") as mf: - joblib.dump(clf, mf) - - # Registering the model to the workspace - model = Model.register( - run.experiment.workspace, - model_name=args.registered_model_name, - model_path=model_file, - tags={"type": "sklearn.GradientBoostingClassifier"}, - description="Model created in Azure ML on credit card defaults dataset", - ) - - # Stop Logging - mlflow.end_run() - - -if __name__ == "__main__": - main() -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=train_src_dir)] As you can see in this training script, once the model is trained, the model file is saved and registered to the workspace. Now you can use the registered model in inferencing endpoints. @@ -524,59 +257,13 @@ For the environment of this step, you'll use one of the built-in (curated) Azure First, create the *yaml* file describing the component: -```python -%%writefile {train_src_dir}/train.yml -# -name: train_credit_defaults_model -display_name: Train Credit Defaults Model -# version: 1 # Not specifying a version will automatically update the version -type: command -inputs: - train_data: - type: uri_folder - test_data: - type: uri_folder - learning_rate: - type: number - registered_model_name: - type: string -outputs: - model: - type: uri_folder -code: . -environment: - # for this step, we'll use an AzureML curate environment - azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:21 -command: >- - python train.py - --train_data ${{inputs.train_data}} - --test_data ${{inputs.test_data}} - --learning_rate ${{inputs.learning_rate}} - --registered_model_name ${{inputs.registered_model_name}} - --model ${{outputs.model}} -# - -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=train.yml)] Now create and register the component: -```python -# importing the Component Package -from azure.ai.ml.entities import load_component - -# Loading the component from the yml file -train_component = load_component(yaml_file=os.path.join(train_src_dir, "train.yml")) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=train_component)] -```python -# Now we register the component to the workspace -train_component = ml_client.create_or_update(train_component) - -# Create (register) the component in your workspace -print( - f"Component {train_component.name} with Version {train_component.version} is registered" -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=update-train_component)] ## Create the pipeline from components @@ -590,59 +277,11 @@ To code the pipeline, you use a specific `@dsl.pipeline` decorator that identifi Here, we used *input data*, *split ratio* and *registered model name* as input variables. We then call the components and connect them via their inputs/outputs identifiers. The outputs of each step can be accessed via the `.outputs` property. -> [!IMPORTANT] -> In the code below, replace `` with the name you used when you created a compute cluster in the [Quickstart: Create workspace resources you need to get started with Azure Machine Learning](quickstart-create-resources.md). - -```python -# the dsl decorator tells the sdk that we are defining an Azure ML pipeline -from azure.ai.ml import dsl, Input, Output - -@dsl.pipeline( - compute="", - description="E2E data_perp-train pipeline", -) -def credit_defaults_pipeline( - pipeline_job_data_input, - pipeline_job_test_train_ratio, - pipeline_job_learning_rate, - pipeline_job_registered_model_name, -): - # using data_prep_function like a python call with its own inputs - data_prep_job = data_prep_component( - data=pipeline_job_data_input, - test_train_ratio=pipeline_job_test_train_ratio, - ) - - # using train_func like a python call with its own inputs - train_job = train_component( - train_data=data_prep_job.outputs.train_data, # note: using outputs from previous step - test_data=data_prep_job.outputs.test_data, # note: using outputs from previous step - learning_rate=pipeline_job_learning_rate, # note: using a pipeline input as parameter - registered_model_name=pipeline_job_registered_model_name, - ) - - # a pipeline returns a dict of outputs - # keys will code for the pipeline output identifier - return { - "pipeline_job_train_data": data_prep_job.outputs.train_data, - "pipeline_job_test_data": data_prep_job.outputs.test_data, - } -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=pipeline)] Now use your pipeline definition to instantiate a pipeline with your dataset, split rate of choice and the name you picked for your model. -```python -registered_model_name = "credit_defaults_model" - -# Let's instantiate the pipeline with the parameters of our choice -pipeline = credit_defaults_pipeline( - # pipeline_job_data_input=credit_data, - pipeline_job_data_input=Input(type="uri_file", path=web_path), - pipeline_job_test_train_ratio=0.2, - pipeline_job_learning_rate=0.25, - pipeline_job_registered_model_name=registered_model_name, -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=registered_model_name)] ## Submit the job @@ -652,18 +291,7 @@ Here you'll also pass an experiment name. An experiment is a container for all t Once completed, the pipeline will register a model in your workspace as a result of training. -```python -import webbrowser -# submit the pipeline job -returned_job = ml_client.jobs.create_or_update( - pipeline, - - # Project's name - experiment_name="e2e_registered_components", -) -# open the pipeline in web browser -webbrowser.open(returned_job.services["Studio"].endpoint) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=returned_job)] An output of "False" is expected from the above cell. You can track the progress of your pipeline, by using the link generated in the cell above. @@ -687,103 +315,19 @@ Now deploy your machine learning model as a web service in the Azure cloud. To deploy a machine learning service, you'll usually need: * The model assets (filed, metadata) that you want to deploy. You've already registered these assets in your training component. -* Some code to run as a service. The code executes the model on a given input request. This entry script receives data submitted to a deployed web service and passes it to the model, then returns the model's response to the client. The script is specific to your model. The entry script must understand the data that the model expects and returns. - -## Create an inference script - -The two things you need to accomplish in your inference script are: - -* Load your model (using a function called `init()`) -* Run your model on input data (using a function called `run()`) - -In the following implementation the `init()` function loads the model, and the run function expects the data in `json` format with the input data stored under `data`. - -```python -deploy_dir = "./deploy" -os.makedirs(deploy_dir, exist_ok=True) -``` - -```python -%%writefile {deploy_dir}/score.py -import os -import logging -import json -import numpy -import joblib - - -def init(): - """ - This function is called when the container is initialized/started, typically after create/update of the deployment. - You can write the logic here to perform init operations like caching the model in memory - """ - global model - # AZUREML_MODEL_DIR is an environment variable created during deployment. - # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION) - model_path = os.path.join(os.getenv("AZUREML_MODEL_DIR"), "model.pkl") - # deserialize the model file back into a sklearn model - model = joblib.load(model_path) - logging.info("Init complete") - - -def run(raw_data): - """ - This function is called for every invocation of the endpoint to perform the actual scoring/prediction. - In the example we extract the data from the json input and call the scikit-learn model's predict() - method and return the result back - """ - logging.info("Request received") - data = json.loads(raw_data)["data"] - data = numpy.array(data) - result = model.predict(data) - logging.info("Request processed") - return result.tolist() -``` +* Some code to run as a service. The code executes the model on a given input request. This entry script receives data submitted to a deployed web service and passes it to the model, then returns the model's response to the client. The script is specific to your model. The entry script must understand the data that the model expects and returns. When using a MLFlow model, as in this tutorial, this script is automatically created for you ## Create a new online endpoint Now that you have a registered model and an inference script, it's time to create your online endpoint. The endpoint name needs to be unique in the entire Azure region. For this tutorial, you'll create a unique name using [`UUID`](https://en.wikipedia.org/wiki/Universally_unique_identifier). -```python -import uuid - -# Creating a unique name for the endpoint -online_endpoint_name = "credit-endpoint-" + str(uuid.uuid4())[:8] - -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=online_endpoint_name)] -```Python -from azure.ai.ml.entities import ( - ManagedOnlineEndpoint, - ManagedOnlineDeployment, - CodeConfiguration, - Model, - Environment, -) - -# create an online endpoint -endpoint = ManagedOnlineEndpoint( - name=online_endpoint_name, - description="this is an online endpoint", - auth_mode="key", - tags={ - "training_dataset": "credit_defaults", - "model_type": "sklearn.GradientBoostingClassifier", - }, -) - -endpoint = ml_client.begin_create_or_update(endpoint) - -print(f"Endpint {endpoint.name} provisioning state: {endpoint.provisioning_state}") -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=endpoint)] Once you've created an endpoint, you can retrieve it as below: -```python -endpoint = ml_client.online_endpoints.get(name = online_endpoint_name) - -print(f"Endpint \"{endpoint.name}\" with provisioning state \"{endpoint.provisioning_state}\" is retrieved") -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=update-endpoint)] ## Deploy the model to the endpoint @@ -791,39 +335,14 @@ Once the endpoint is created, deploy the model with the entry script. Each endpo You can check the *Models* page on the Azure ML studio, to identify the latest version of your registered model. Alternatively, the code below will retrieve the latest version number for you to use. - -```python -# Let's pick the latest version of the model -latest_model_version = max( - [int(m.version) for m in ml_client.models.list(name=registered_model_name)] -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=latest_model_version)] Deploy the latest version of the model. > [!NOTE] > Expect this deployment to take approximately 6 to 8 minutes. - -```python -# picking the model to deploy. Here we use the latest version of our registered model -model = ml_client.models.get(name=registered_model_name, version=latest_model_version) - - -#create an online deployment. -blue_deployment = ManagedOnlineDeployment( - name='blue', - endpoint_name=online_endpoint_name, - model=model, - environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:21", - code_configuration=CodeConfiguration( - code=deploy_dir, - scoring_script="score.py"), - instance_type='Standard_DS3_v2', - instance_count=1) - -blue_deployment = ml_client.begin_create_or_update(blue_deployment) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=model)] ### Test with a sample query @@ -831,23 +350,11 @@ Now that the model is deployed to the endpoint, you can run inference with it. Create a sample request file following the design expected in the run method in the score script. +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=sample-request.json)] -```python -%%writefile {deploy_dir}/sample-request.json -{"data": [ - [20000,2,2,1,24,2,2,-1,-1,-2,-2,3913,3102,689,0,0,0,0,689,0,0,0,0], - [10,9,8,7,6,5,4,3,2,1, 10,9,8,7,6,5,4,3,2,1,10,9,8] -]} -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=write-sample-request)] -```python -# test the blue deployment with some sample data -ml_client.online_endpoints.invoke( - endpoint_name=online_endpoint_name, - request_file="./deploy/sample-request.json", - deployment_name='blue' -) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=ml_client.online_endpoints.invoke)] ## Clean up resources @@ -856,9 +363,7 @@ If you're not going to use the endpoint, delete it to stop using the resource. > [!NOTE] > Expect this step to take approximately 6 to 8 minutes. -```python -ml_client.online_endpoints.begin_delete(name=online_endpoint_name) -``` +[!Notebook-python[] (~/azureml-examples-main/tutorials/e2e-ds-experience/e2e-ml-workflow.ipynb?name=ml_client.online_endpoints.begin_delete)] ## Next steps diff --git a/articles/machine-learning/tutorial-power-bi-designer-model.md b/articles/machine-learning/tutorial-power-bi-designer-model.md index 2ca5837ecaa7a..5d75cc377820d 100644 --- a/articles/machine-learning/tutorial-power-bi-designer-model.md +++ b/articles/machine-learning/tutorial-power-bi-designer-model.md @@ -31,7 +31,7 @@ There are three ways to create and deploy the model you'll use in Power BI. Thi But you could instead use one of the other options: -* [Option A: Train and deploy models by using Jupyter Notebooks](tutorial-power-bi-custom-model.md). This code-first authoring experience uses Jupyter Notebooks that are hosted in Azure Machine Learning Studio. +* [Option A: Train and deploy models by using Jupyter Notebooks](tutorial-power-bi-custom-model.md). This code-first authoring experience uses Jupyter Notebooks that are hosted in Azure Machine Learning studio. * [Option C: Train and deploy models by using automated machine learning](tutorial-power-bi-automated-model.md). This no-code authoring experience fully automates data preparation and model training. ## Prerequisites @@ -45,7 +45,7 @@ But you could instead use one of the other options: In this section, you create a *compute instance*. Compute instances are used to train machine learning models. You also create an *inference cluster* to host the deployed model for real-time scoring. -Sign in to [Azure Machine Learning Studio](https://ml.azure.com). In the menu on the left, select **Compute** and then **New**: +Sign in to [Azure Machine Learning studio](https://ml.azure.com). In the menu on the left, select **Compute** and then **New**: :::image type="content" source="media/tutorial-power-bi/create-new-compute.png" alt-text="Screenshot showing how to create a compute instance."::: @@ -76,7 +76,7 @@ Your inference cluster **Status** is now **Creating**. Your single node cluster In this tutorial, you use the [Diabetes dataset](https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html). This dataset is available in [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). -To create the dataset, in the menu on the left, select **Datasets**. Then select **Create dataset**. You see the following options: +To create the dataset, in the menu on the left, select **Data**. Then select **Create**. You see the following options: :::image type="content" source="media/tutorial-power-bi/create-dataset.png" alt-text="Screenshot showing how to create a new dataset."::: @@ -96,9 +96,10 @@ The data has 10 baseline input variables, such as age, sex, body mass index, ave ## Create a machine learning model by using the designer -After you create the compute and datasets, you can use the designer to create the machine learning model. In Azure Machine Learning Studio, select **Designer** and then **New pipeline**: +After you create the compute and datasets, you can use the designer to create the machine learning model. In Azure Machine Learning studio, select **Designer** and then **New pipeline**: + +:::image type="content" source="media/tutorial-designer-automobile-price-train-score/launch-designer.png" alt-text="Screenshot showing how to create a new pipeline."::: -:::image type="content" source="media/tutorial-power-bi/create-designer.png" alt-text="Screenshot showing how to create a new pipeline."::: You see a blank *canvas* and a **Settings** menu: diff --git a/articles/machine-learning/v1/concept-azure-machine-learning-architecture.md b/articles/machine-learning/v1/concept-azure-machine-learning-architecture.md index 044e1eeb3bdd2..978185251a889 100644 --- a/articles/machine-learning/v1/concept-azure-machine-learning-architecture.md +++ b/articles/machine-learning/v1/concept-azure-machine-learning-architecture.md @@ -25,7 +25,7 @@ Learn about the architecture and concepts for [Azure Machine Learning](../overvi A [machine learning workspace](../concept-workspace.md) is the top-level resource for Azure Machine Learning. -:::image type="content" source="media/concept-azure-machine-learning-architecture/architecture.svg" alt-text="Diagram: Azure Machine Learning architecture of a workspace and its components"::: +:::image type="content" source="media/concept-Azure-machine-learning-architecture/architecture.svg" alt-text="Diagram: Azure Machine Learning architecture of a workspace and its components"::: The workspace is the centralized place to: @@ -66,7 +66,7 @@ For more information about training compute targets, see [Training compute targe For more information, see [Create and register Azure Machine Learning Datasets](how-to-create-register-datasets.md). For more examples using Datasets, see the [sample notebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/work-with-data/datasets-tutorial). -Datasets use [datastores](../concept-data.md#datastores) to securely connect to your Azure storage services. Datastores store connection information without putting your authentication credentials and the integrity of your original data source at risk. They store connection information, like your subscription ID and token authorization in your Key Vault associated with the workspace, so you can securely access your storage without having to hard code them in your script. +Datasets use [datastore](../concept-data.md#datastore) to securely connect to your Azure storage services. Datastores store connection information without putting your authentication credentials and the integrity of your original data source at risk. They store connection information, like your subscription ID and token authorization in your Key Vault associated with the workspace, so you can securely access your storage without having to hard code them in your script. ## Environments @@ -154,7 +154,7 @@ Because Machine Learning Compute is a managed compute target (that is, it's mana * After the run completes, you can query runs and metrics. In the flow diagram below, this step occurs when the training compute target writes the run metrics back to Azure Machine Learning from storage in the Cosmos DB database. Clients can call Azure Machine Learning. Machine Learning will in turn pull metrics from the Cosmos DB database and return them back to the client. -[![Training workflow](media/concept-azure-machine-learning-architecture/training-and-metrics.png)](media/concept-azure-machine-learning-architecture/training-and-metrics.png#lightbox) +[![Training workflow](media/concept-Azure-machine-learning-architecture/training-and-metrics.png)](media/concept-azure-machine-learning-architecture/training-and-metrics.png#lightbox) ## Models @@ -217,7 +217,7 @@ Here are the details: * Scoring request details are stored in Application Insights, which is in the user's subscription. * Telemetry is also pushed to the Microsoft Azure subscription. -[![Inference workflow](media/concept-azure-machine-learning-architecture/inferencing.png)](media/concept-azure-machine-learning-architecture/inferencing.png#lightbox) +[![Inference workflow](media/concept-Azure-machine-learning-architecture/inferencing.png)](media/concept-azure-machine-learning-architecture/inferencing.png#lightbox) For an example of deploying a model as a web service, see [Tutorial: Train and deploy a model](../tutorial-train-deploy-notebook.md). @@ -248,13 +248,13 @@ Pipeline steps are reusable, and can be run without rerunning the previous steps Azure Machine Learning provides the following monitoring and logging capabilities: -* For __Data Scientists__, you can monitor your experiments and log information from your training runs. For more information, see the following articles: +* For **Data Scientists**, you can monitor your experiments and log information from your training runs. For more information, see the following articles: * [Start, monitor, and cancel training runs](../how-to-track-monitor-analyze-runs.md) * [Log metrics for training runs](../how-to-log-view-metrics.md) * [Track experiments with MLflow](../how-to-use-mlflow.md) * [Visualize runs with TensorBoard](../how-to-monitor-tensorboard.md) -* For __Administrators__, you can monitor information about the workspace, related Azure resources, and events such as resource creation and deletion by using Azure Monitor. For more information, see [How to monitor Azure Machine Learning](../monitor-azure-machine-learning.md). -* For __DevOps__ or __MLOps__, you can monitor information generated by models deployed as web services to identify problems with the deployments and gather data submitted to the service. For more information, see [Collect model data](../how-to-enable-data-collection.md) and [Monitor with Application Insights](../how-to-enable-app-insights.md). +* For **Administrators**, you can monitor information about the workspace, related Azure resources, and events such as resource creation and deletion by using Azure Monitor. For more information, see [How to monitor Azure Machine Learning](../monitor-azure-machine-learning.md). +* For **DevOps** or **MLOps**, you can monitor information generated by models deployed as web services to identify problems with the deployments and gather data submitted to the service. For more information, see [Collect model data](../how-to-enable-data-collection.md) and [Monitor with Application Insights](../how-to-enable-app-insights.md). ## Interacting with your workspace diff --git a/articles/machine-learning/v1/concept-secure-network-traffic-flow/azure-machine-learning-docker-images.png b/articles/machine-learning/v1/concept-secure-network-traffic-flow/azure-machine-learning-docker-images.png deleted file mode 100644 index b87f85f8ee25f..0000000000000 Binary files a/articles/machine-learning/v1/concept-secure-network-traffic-flow/azure-machine-learning-docker-images.png and /dev/null differ diff --git a/articles/machine-learning/v1/concept-secure-network-traffic-flow/compute-instance-and-cluster.png b/articles/machine-learning/v1/concept-secure-network-traffic-flow/compute-instance-and-cluster.png deleted file mode 100644 index c78b02b6905c1..0000000000000 Binary files a/articles/machine-learning/v1/concept-secure-network-traffic-flow/compute-instance-and-cluster.png and /dev/null differ diff --git a/articles/machine-learning/v1/concept-secure-network-traffic-flow/storage-traffic-studio.png b/articles/machine-learning/v1/concept-secure-network-traffic-flow/storage-traffic-studio.png deleted file mode 100644 index f1ed2ab1ed2c3..0000000000000 Binary files a/articles/machine-learning/v1/concept-secure-network-traffic-flow/storage-traffic-studio.png and /dev/null differ diff --git a/articles/machine-learning/v1/concept-secure-network-traffic-flow/workspace-traffic-studio.png b/articles/machine-learning/v1/concept-secure-network-traffic-flow/workspace-traffic-studio.png deleted file mode 100644 index 9e1baa79fed98..0000000000000 Binary files a/articles/machine-learning/v1/concept-secure-network-traffic-flow/workspace-traffic-studio.png and /dev/null differ diff --git a/articles/machine-learning/v1/how-to-access-data.md b/articles/machine-learning/v1/how-to-access-data.md index ff1c7ab4fd370..53dddc6457435 100644 --- a/articles/machine-learning/v1/how-to-access-data.md +++ b/articles/machine-learning/v1/how-to-access-data.md @@ -18,7 +18,7 @@ ms.custom: contperf-fy21q1, devx-track-python, data4ml, event-tier1-build-2022 > [!div class="op_single_selector" title1="Select the version of Azure Machine Learning developer platform you are using:"] > * [v1](how-to-access-data.md) -> * [v2 (current version)](../how-to-access-data.md) +> * [v2 (current version)](../how-to-datastore.md) [!INCLUDE [sdk v1](../../../includes/machine-learning-sdk-v1.md)] [!INCLUDE [cli v1](../../../includes/machine-learning-cli-v1.md)] @@ -72,10 +72,10 @@ Datastores currently support storing connection information to the storage servi | Storage type | Authentication type | [Azure Machine Learning studio](https://ml.azure.com/) | [Azure Machine Learning  Python SDK](/python/api/overview/azure/ml/intro) | [Azure Machine Learning CLI](reference-azure-machine-learning-cli.md) | [Azure Machine Learning  REST API](/rest/api/azureml/) | VS Code ---|---|---|---|---|---|--- -[Azure Blob Storage](/azure/storage/blobs/storage-blobs-overview)| Account key
                  SAS token | ✓ | ✓ | ✓ |✓ |✓ -[Azure File Share](/azure/storage/files/storage-files-introduction)| Account key
                  SAS token | ✓ | ✓ | ✓ |✓|✓ -[Azure Data Lake Storage Gen 1](/azure/data-lake-store/)| Service principal| ✓ | ✓ | ✓ |✓| -[Azure Data Lake Storage Gen 2](/azure/storage/blobs/data-lake-storage-introduction)| Service principal| ✓ | ✓ | ✓ |✓| +[Azure Blob Storage](../../storage/blobs/storage-blobs-overview.md)| Account key
                  SAS token | ✓ | ✓ | ✓ |✓ |✓ +[Azure File Share](../../storage/files/storage-files-introduction.md)| Account key
                  SAS token | ✓ | ✓ | ✓ |✓|✓ +[Azure Data Lake Storage Gen 1](../../data-lake-store/index.yml)| Service principal| ✓ | ✓ | ✓ |✓| +[Azure Data Lake Storage Gen 2](../../storage/blobs/data-lake-storage-introduction.md)| Service principal| ✓ | ✓ | ✓ |✓| [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview)| SQL authentication
                  Service principal| ✓ | ✓ | ✓ |✓| [Azure PostgreSQL](/azure/postgresql/overview) | SQL authentication| ✓ | ✓ | ✓ |✓| [Azure Database for MySQL](/azure/mysql/overview) | SQL authentication| | ✓* | ✓* |✓*| @@ -87,9 +87,9 @@ Datastores currently support storing connection information to the storage servi ### Storage guidance -We recommend creating a datastore for an [Azure Blob container](/azure/storage/blobs/storage-blobs-introduction). Both standard and premium storage are available for blobs. Although premium storage is more expensive, its faster throughput speeds might improve the speed of your training runs, particularly if you train against a large dataset. For information about the cost of storage accounts, see the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=machine-learning-service). +We recommend creating a datastore for an [Azure Blob container](../../storage/blobs/storage-blobs-introduction.md). Both standard and premium storage are available for blobs. Although premium storage is more expensive, its faster throughput speeds might improve the speed of your training runs, particularly if you train against a large dataset. For information about the cost of storage accounts, see the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=machine-learning-service). -[Azure Data Lake Storage Gen2](/azure/storage/blobs/data-lake-storage-introduction) is built on top of Azure Blob storage and designed for enterprise big data analytics. A fundamental part of Data Lake Storage Gen2 is the addition of a [hierarchical namespace](/azure/storage/blobs/data-lake-storage-namespace) to Blob storage. The hierarchical namespace organizes objects/files into a hierarchy of directories for efficient data access. +[Azure Data Lake Storage Gen2](../../storage/blobs/data-lake-storage-introduction.md) is built on top of Azure Blob storage and designed for enterprise big data analytics. A fundamental part of Data Lake Storage Gen2 is the addition of a [hierarchical namespace](../../storage/blobs/data-lake-storage-namespace.md) to Blob storage. The hierarchical namespace organizes objects/files into a hierarchy of directories for efficient data access. ## Storage access and permissions @@ -100,7 +100,7 @@ To ensure you securely connect to your Azure storage service, Azure Machine Lear ### Virtual network -Azure Machine Learning requires extra configuration steps to communicate with a storage account that is behind a firewall or within a virtual network. If your storage account is behind a firewall, you can [add your client's IP address to an allowlist](/azure/storage/common/storage-network-security#managing-ip-network-rules) via the Azure portal. +Azure Machine Learning requires extra configuration steps to communicate with a storage account that is behind a firewall or within a virtual network. If your storage account is behind a firewall, you can [add your client's IP address to an allowlist](../../storage/common/storage-network-security.md#managing-ip-network-rules) via the Azure portal. Azure Machine Learning can receive requests from clients outside of the virtual network. To ensure that the entity requesting data from the service is safe and to enable data being displayed in your workspace, [use a private endpoint with your workspace](../how-to-configure-private-link.md). @@ -137,7 +137,7 @@ You can find account key, SAS token, and service principal information on your [ ### Permissions -For Azure blob container and Azure Data Lake Gen 2 storage, make sure your authentication credentials have **Storage Blob Data Reader** access. Learn more about [Storage Blob Data Reader](/azure/role-based-access-control/built-in-roles#storage-blob-data-reader). An account SAS token defaults to no permissions. +For Azure blob container and Azure Data Lake Gen 2 storage, make sure your authentication credentials have **Storage Blob Data Reader** access. Learn more about [Storage Blob Data Reader](../../role-based-access-control/built-in-roles.md#storage-blob-data-reader). An account SAS token defaults to no permissions. * For data **read access**, your authentication credentials must have a minimum of list and read permissions for containers and objects. * For data **write access**, write and add permissions also are required. @@ -158,7 +158,7 @@ Within this section are examples for how to create and register a datastore via If you prefer a low code experience, see [Connect to data with Azure Machine Learning studio](../how-to-connect-data-ui.md). >[!IMPORTANT] -> If you unregister and re-register a datastore with the same name, and it fails, the Azure Key Vault for your workspace may not have soft-delete enabled. By default, soft-delete is enabled for the key vault instance created by your workspace, but it may not be enabled if you used an existing key vault or have a workspace created prior to October 2020. For information on how to enable soft-delete, see [Turn on Soft Delete for an existing key vault](/azure/key-vault/general/soft-delete-change#turn-on-soft-delete-for-an-existing-key-vault). +> If you unregister and re-register a datastore with the same name, and it fails, the Azure Key Vault for your workspace may not have soft-delete enabled. By default, soft-delete is enabled for the key vault instance created by your workspace, but it may not be enabled if you used an existing key vault or have a workspace created prior to October 2020. For information on how to enable soft-delete, see [Turn on Soft Delete for an existing key vault](../../key-vault/general/soft-delete-change.md#turn-on-soft-delete-for-an-existing-key-vault). > [!NOTE] @@ -204,9 +204,9 @@ file_datastore = Datastore.register_azure_file_share(workspace=ws, ### Azure Data Lake Storage Generation 2 -For an Azure Data Lake Storage Generation 2 (ADLS Gen 2) datastore, use [register_azure_data_lake_gen2()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-data-lake-gen2-workspace--datastore-name--filesystem--account-name--tenant-id--client-id--client-secret--resource-url-none--authority-url-none--protocol-none--endpoint-none--overwrite-false-) to register a credential datastore connected to an Azure DataLake Gen 2 storage with [service principal permissions](/azure/active-directory/develop/howto-create-service-principal-portal). +For an Azure Data Lake Storage Generation 2 (ADLS Gen 2) datastore, use [register_azure_data_lake_gen2()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-data-lake-gen2-workspace--datastore-name--filesystem--account-name--tenant-id--client-id--client-secret--resource-url-none--authority-url-none--protocol-none--endpoint-none--overwrite-false-) to register a credential datastore connected to an Azure DataLake Gen 2 storage with [service principal permissions](../../active-directory/develop/howto-create-service-principal-portal.md). -In order to utilize your service principal, you need to [register your application](/azure/active-directory/develop/app-objects-and-service-principals) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS Gen 2](/azure/storage/blobs/data-lake-storage-access-control-model). +In order to utilize your service principal, you need to [register your application](../../active-directory/develop/app-objects-and-service-principals.md) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS Gen 2](../../storage/blobs/data-lake-storage-access-control-model.md). The following code creates and registers the `adlsgen2_datastore_name` datastore to the `ws` workspace. This datastore accesses the file system `test` in the `account_name` storage account, by using the provided service principal credentials. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. @@ -296,7 +296,7 @@ For situations where the SDK doesn't provide access to datastores, you might be ## Move data to supported Azure storage solutions -Azure Machine Learning supports accessing data from Azure Blob storage, Azure Files, Azure Data Lake Storage Gen1, Azure Data Lake Storage Gen2, Azure SQL Database, and Azure Database for PostgreSQL. If you're using unsupported storage, we recommend that you move your data to supported Azure storage solutions by using [Azure Data Factory and these steps](/azure/data-factory/quickstart-create-data-factory-copy-data-tool.). Moving data to supported storage can help you save data egress costs during machine learning experiments. +Azure Machine Learning supports accessing data from Azure Blob storage, Azure Files, Azure Data Lake Storage Gen1, Azure Data Lake Storage Gen2, Azure SQL Database, and Azure Database for PostgreSQL. If you're using unsupported storage, we recommend that you move your data to supported Azure storage solutions by using [Azure Data Factory and these steps](/azure/data-factory/quickstart-create-data-factory-copy-data-tool). Moving data to supported storage can help you save data egress costs during machine learning experiments. Azure Data Factory provides efficient and resilient data transfer with more than 80 prebuilt connectors at no extra cost. These connectors include Azure data services, on-premises data sources, Amazon S3 and Redshift, and Google BigQuery. diff --git a/articles/machine-learning/v1/how-to-attach-compute-targets.md b/articles/machine-learning/v1/how-to-attach-compute-targets.md index 4013760f7b084..395a3318cb16a 100644 --- a/articles/machine-learning/v1/how-to-attach-compute-targets.md +++ b/articles/machine-learning/v1/how-to-attach-compute-targets.md @@ -32,7 +32,7 @@ In this article, learn how to set up your workspace to use these compute resourc * Azure Databricks - used as a training compute target only in [machine learning pipelines](../how-to-create-machine-learning-pipelines.md) * Azure Data Lake Analytics * Azure Container Instance -* Azure Kubernetes Service & Azure Arc-enabled Kubernetes (preview) +* Azure Machine Learning Kubernetes To use compute targets managed by Azure Machine Learning, see: @@ -344,14 +344,9 @@ For a more detailed example, see an [example notebook](https://aka.ms/pl-adla) o Azure Container Instances (ACI) are created dynamically when you deploy a model. You cannot create or attach ACI to your workspace in any other way. For more information, see [Deploy a model to Azure Container Instances](how-to-deploy-azure-container-instance.md). -## Kubernetes (preview) +## Kubernetes -Azure Machine Learning provides you with the following options to attach your own Kubernetes clusters for training and inferencing: - -* [Azure Kubernetes Service](../../aks/intro-kubernetes.md). Azure Kubernetes Service provides a managed cluster in Azure. -* [Azure Arc Kubernetes](../../azure-arc/kubernetes/overview.md). Use Azure Arc-enabled Kubernetes clusters if your cluster is hosted outside of Azure. - -[!INCLUDE [arc-enabled-machine-learning-create-training-compute](../../../includes/machine-learning-create-arc-enabled-training-computer-target.md)] +Azure Machine Learning provides you with the option to attach your own Kubernetes clusters for training and inferencing. See [Configure Kubernetes cluster for Azure Machine Learning](../how-to-attach-kubernetes-anywhere.md). To detach a Kubernetes cluster from your workspace, use the following method: diff --git a/articles/machine-learning/v1/how-to-create-register-datasets.md b/articles/machine-learning/v1/how-to-create-register-datasets.md index 1d5218d9a2f6a..7a58d4493f626 100644 --- a/articles/machine-learning/v1/how-to-create-register-datasets.md +++ b/articles/machine-learning/v1/how-to-create-register-datasets.md @@ -18,13 +18,13 @@ ms.date: 05/11/2022 > [!div class="op_single_selector" title1="Select the version of Azure Machine Learning SDK you are using:"] > * [v1](how-to-create-register-datasets.md) -> * [v2 (current version)](../how-to-create-register-datasets.md) +> * [v2 (current version)](../how-to-create-register-data-assets.md) [!INCLUDE [sdk v1](../../../includes/machine-learning-sdk-v1.md)] In this article, you learn how to create Azure Machine Learning datasets to access data for your local or remote experiments with the Azure Machine Learning Python SDK. To understand where datasets fit in Azure Machine Learning's overall data access workflow, see the [Securely access data](concept-data.md#data-workflow) article. -By creating a dataset, you create a reference to the data source location, along with a copy of its metadata. Because the data remains in its existing location, you incur no extra storage cost, and don't risk the integrity of your data sources. Also datasets are lazily evaluated, which aids in workflow performance speeds. You can create datasets from datastores, public URLs, and [Azure Open Datasets](/azure/open-datasets/how-to-create-azure-machine-learning-dataset-from-open-dataset). +By creating a dataset, you create a reference to the data source location, along with a copy of its metadata. Because the data remains in its existing location, you incur no extra storage cost, and don't risk the integrity of your data sources. Also datasets are lazily evaluated, which aids in workflow performance speeds. You can create datasets from datastores, public URLs, and [Azure Open Datasets](../../open-datasets/how-to-create-azure-machine-learning-dataset-from-open-dataset.md). For a low-code experience, [Create Azure Machine Learning datasets with the Azure Machine Learning studio.](../how-to-connect-data-ui.md#create-datasets) @@ -105,7 +105,7 @@ For the data to be accessible by Azure Machine Learning, datasets must be create To create datasets from a datastore with the Python SDK: -1. Verify that you have `contributor` or `owner` access to the underlying storage service of your registered Azure Machine Learning datastore. [Check your storage account permissions in the Azure portal](/azure/role-based-access-control/check-access). +1. Verify that you have `contributor` or `owner` access to the underlying storage service of your registered Azure Machine Learning datastore. [Check your storage account permissions in the Azure portal](../../role-based-access-control/check-access.md). 1. Create the dataset by referencing paths in the datastore. You can create a dataset from multiple paths in multiple datastores. There is no hard limit on the number of files or data size that you can create a dataset from. @@ -388,4 +388,4 @@ titanic_ds = titanic_ds.register(workspace = workspace, * Learn [how to train with datasets](../how-to-train-with-datasets.md). * Use automated machine learning to [train with TabularDatasets](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb). -* For more dataset training examples, see the [sample notebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/work-with-data/). +* For more dataset training examples, see the [sample notebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/work-with-data/). \ No newline at end of file diff --git a/articles/machine-learning/v1/how-to-identity-based-data-access.md b/articles/machine-learning/v1/how-to-identity-based-data-access.md new file mode 100644 index 0000000000000..0c68bbc2951bf --- /dev/null +++ b/articles/machine-learning/v1/how-to-identity-based-data-access.md @@ -0,0 +1,250 @@ +--- +title: Identity-based data access to storage services (v1) +titleSuffix: Machine Learning +description: Learn how to use identity-based data access to connect to storage services on Azure with Azure Machine Learning datastores and the Machine Learning Python SDK v1. +ms.service: machine-learning +ms.subservice: enterprise-readiness +ms.topic: how-to +ms.author: yogipandey +author: ynpandey +ms.reviewer: nibaccam +ms.date: 01/25/2022 +ms.custom: contperf-fy21q1, devx-track-python, data4ml + +# Customer intent: As an experienced Python developer, I need to make my data in Azure Storage available to my compute for training my machine learning models. +--- + +# Connect to storage by using identity-based data access with SDK v1 + +In this article, you learn how to connect to storage services on Azure by using identity-based data access and Azure Machine Learning datastores via the [Azure Machine Learning SDK for Python](/python/api/overview/azure/ml/intro). + +Typically, datastores use **credential-based authentication** to confirm you have permission to access the storage service. They keep connection information, like your subscription ID and token authorization, in the [key vault](https://azure.microsoft.com/services/key-vault/) that's associated with the workspace. When you create a datastore that uses **identity-based data access**, your Azure account ([Azure Active Directory token](../../active-directory/fundamentals/active-directory-whatis.md)) is used to confirm you have permission to access the storage service. In the **identity-based data access** scenario, no authentication credentials are saved. Only the storage account information is stored in the datastore. + +To create datastores with **identity-based** data access via the Azure Machine Learning studio UI, see [Connect to data with the Azure Machine Learning studio](../how-to-connect-data-ui.md#create-datastores). + +To create datastores that use **credential-based** authentication, like access keys or service principals, see [Connect to storage services on Azure](how-to-access-data.md). + +## Identity-based data access in Azure Machine Learning + +There are two scenarios in which you can apply identity-based data access in Azure Machine Learning. These scenarios are a good fit for identity-based access when you're working with confidential data and need more granular data access management: + +> [!WARNING] +> Identity-based data access is not supported for [automated ML experiments](../how-to-configure-auto-train.md). + +- Accessing storage services +- Training machine learning models with private data + +### Accessing storage services + +You can connect to storage services via identity-based data access with Azure Machine Learning datastores or [Azure Machine Learning datasets](how-to-create-register-datasets.md). + +Your authentication credentials are usually kept in a datastore, which is used to ensure you have permission to access the storage service. When these credentials are registered via datastores, any user with the workspace Reader role can retrieve them. That scale of access can be a security concern for some organizations. [Learn more about the workspace Reader role.](../how-to-assign-roles.md#default-roles) + +When you use identity-based data access, Azure Machine Learning prompts you for your Azure Active Directory token for data access authentication instead of keeping your credentials in the datastore. That approach allows for data access management at the storage level and keeps credentials confidential. + +The same behavior applies when you: + +* [Create a dataset directly from storage URLs](#use-data-in-storage). +* Work with data interactively via a Jupyter Notebook on your local computer or [compute instance](../concept-compute-instance.md). + +> [!NOTE] +> Credentials stored via credential-based authentication include subscription IDs, shared access signature (SAS) tokens, and storage access key and service principal information, like client IDs and tenant IDs. + +### Model training on private data + +Certain machine learning scenarios involve training models with private data. In such cases, data scientists need to run training workflows without being exposed to the confidential input data. In this scenario, a [managed identity](how-to-use-managed-identities.md) of the training compute is used for data access authentication. This approach allows storage admins to grant Storage Blob Data Reader access to the managed identity that the training compute uses to run the training job. The individual data scientists don't need to be granted access. For more information, see [Set up managed identity on a compute cluster](how-to-create-attach-compute-cluster.md#set-up-managed-identity). + +## Prerequisites + +- An Azure subscription. If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/). + +- An Azure storage account with a supported storage type. These storage types are supported: + - [Azure Blob Storage](../../storage/blobs/storage-blobs-overview.md) + - [Azure Data Lake Storage Gen1](../../data-lake-store/index.yml) + - [Azure Data Lake Storage Gen2](../../storage/blobs/data-lake-storage-introduction.md) + - [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview) + +- The [Azure Machine Learning SDK for Python](/python/api/overview/azure/ml/install). + +- An Azure Machine Learning workspace. + + Either [create an Azure Machine Learning workspace](../how-to-manage-workspace.md) or use an [existing one via the Python SDK](../how-to-manage-workspace.md#connect-to-a-workspace). + +## Create and register datastores + +When you register a storage service on Azure as a datastore, you automatically create and register that datastore to a specific workspace. See [Storage access permissions](#storage-access-permissions) for guidance on required permission types. You also have the option to manually create the storage you want to connect to without any special permissions, and you just need the name. + +See [Work with virtual networks](#work-with-virtual-networks) for details on how to connect to data storage behind virtual networks. + +In the following code, notice the absence of authentication parameters like `sas_token`, `account_key`, `subscription_id`, and the service principal `client_id`. This omission indicates that Azure Machine Learning will use identity-based data access for authentication. Creation of datastores typically happens interactively in a notebook or via the studio. So your Azure Active Directory token is used for data access authentication. + +> [!NOTE] +> Datastore names should consist only of lowercase letters, numbers, and underscores. + +### Azure blob container + +To register an Azure blob container as a datastore, use [`register_azure_blob_container()`](/python/api/azureml-core/azureml.core.datastore%28class%29#register-azure-blob-container-workspace--datastore-name--container-name--account-name--sas-token-none--account-key-none--protocol-none--endpoint-none--overwrite-false--create-if-not-exists-false--skip-validation-false--blob-cache-timeout-none--grant-workspace-access-false--subscription-id-none--resource-group-none-). + +The following code creates the `credentialless_blob` datastore, registers it to the `ws` workspace, and assigns it to the `blob_datastore` variable. This datastore accesses the `my_container_name` blob container on the `my-account-name` storage account. + +```Python +# Create blob datastore without credentials. +blob_datastore = Datastore.register_azure_blob_container(workspace=ws, + datastore_name='credentialless_blob', + container_name='my_container_name', + account_name='my_account_name') +``` + +### Azure Data Lake Storage Gen1 + +Use [register_azure_data_lake()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-data-lake-workspace--datastore-name--store-name--tenant-id-none--client-id-none--client-secret-none--resource-url-none--authority-url-none--subscription-id-none--resource-group-none--overwrite-false--grant-workspace-access-false-) to register a datastore that connects to Azure Data Lake Storage Gen1. + +The following code creates the `credentialless_adls1` datastore, registers it to the `workspace` workspace, and assigns it to the `adls_dstore` variable. This datastore accesses the `adls_storage` Azure Data Lake Storage account. + +```Python +# Create Azure Data Lake Storage Gen1 datastore without credentials. +adls_dstore = Datastore.register_azure_data_lake(workspace = workspace, + datastore_name='credentialless_adls1', + store_name='adls_storage') + +``` + +### Azure Data Lake Storage Gen2 + +Use [register_azure_data_lake_gen2()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-data-lake-gen2-workspace--datastore-name--filesystem--account-name--tenant-id--client-id--client-secret--resource-url-none--authority-url-none--protocol-none--endpoint-none--overwrite-false-) to register a datastore that connects to Azure Data Lake Storage Gen2. + +The following code creates the `credentialless_adls2` datastore, registers it to the `ws` workspace, and assigns it to the `adls2_dstore` variable. This datastore accesses the file system `tabular` in the `myadls2` storage account. + +```python +# Create Azure Data Lake Storage Gen2 datastore without credentials. +adls2_dstore = Datastore.register_azure_data_lake_gen2(workspace=ws, + datastore_name='credentialless_adls2', + filesystem='tabular', + account_name='myadls2') +``` + +### Azure SQL database + +For an Azure SQL database, use [register_azure_sql_database()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-sql-database-workspace--datastore-name--server-name--database-name--tenant-id-none--client-id-none--client-secret-none--resource-url-none--authority-url-none--endpoint-none--overwrite-false--username-none--password-none--subscription-id-none--resource-group-none--grant-workspace-access-false----kwargs-) to register a datastore that connects to an Azure SQL database storage. + +The following code creates and registers the `credentialless_sqldb` datastore to the `ws` workspace and assigns it to the variable, `sqldb_dstore`. This datastore accesses the database `mydb` in the `myserver` SQL DB server. + +```python +# Create a sqldatabase datastore without credentials + +sqldb_dstore = Datastore.register_azure_sql_database(workspace=ws, + datastore_name='credentialless_sqldb', + server_name='myserver', + database_name='mydb') + +``` + + +## Storage access permissions + +To help ensure that you securely connect to your storage service on Azure, Azure Machine Learning requires that you have permission to access the corresponding data storage. +> [!WARNING] +> Cross tenant access to storage accounts is not supported. If cross tenant access is needed for your scenario, please reach out to the AzureML Data Support team alias at amldatasupport@microsoft.com for assistance with a custom code solution. + +Identity-based data access supports connections to **only** the following storage services. + +* Azure Blob Storage +* Azure Data Lake Storage Gen1 +* Azure Data Lake Storage Gen2 +* Azure SQL Database + +To access these storage services, you must have at least [Storage Blob Data Reader](../../role-based-access-control/built-in-roles.md#storage-blob-data-reader) access to the storage account. Only storage account owners can [change your access level via the Azure portal](../../storage/blobs/assign-azure-role-data-access.md). + +If you prefer to not use your user identity (Azure Active Directory), you also have the option to grant a workspace managed-system identity (MSI) permission to create the datastore. To do so, you must have Owner permissions to the storage account and add the `grant_workspace_access= True` parameter to your data register method. + +If you're training a model on a remote compute target and want to access the data for training, the compute identity must be granted at least the Storage Blob Data Reader role from the storage service. Learn how to [set up managed identity on a compute cluster](how-to-create-attach-compute-cluster.md#set-up-managed-identity). + +## Work with virtual networks + +By default, Azure Machine Learning can't communicate with a storage account that's behind a firewall or in a virtual network. + +You can configure storage accounts to allow access only from within specific virtual networks. This configuration requires additional steps to ensure data isn't leaked outside of the network. This behavior is the same for credential-based data access. For more information, see [How to configure virtual network scenarios](how-to-access-data.md#virtual-network). + +If your storage account has virtual network settings, that dictates what identity type and permissions access is needed. For example for data preview and data profile, the virtual network settings determine what type of identity is used to authenticate data access. + +* In scenarios where only certain IPs and subnets are allowed to access the storage, then Azure Machine Learning uses the workspace MSI to accomplish data previews and profiles. + +* If your storage is ADLS Gen 2 or Blob and has virtual network settings, customers can use either user identity or workspace MSI depending on the datastore settings defined during creation. + +* If the virtual network setting is “Allow Azure services on the trusted services list to access this storage account”, then Workspace MSI is used. + +## Use data in storage + +We recommend that you use [Azure Machine Learning datasets](how-to-create-register-datasets.md) when you interact with your data in storage with Azure Machine Learning. + +> [!IMPORTANT] +> Datasets using identity-based data access are not supported for [automated ML experiments](../how-to-configure-auto-train.md). + +Datasets package your data into a lazily evaluated consumable object for machine learning tasks like training. Also, with datasets you can [download or mount](../how-to-train-with-datasets.md#mount-vs-download) files of any format from Azure storage services like Azure Blob Storage and Azure Data Lake Storage to a compute target. + +To create a dataset, you can reference paths from datastores that also use identity-based data access . + +* If you're underlying storage account type is Blob or ADLS Gen 2, your user identity needs Blob Reader role. +* If your underlying storage is ADLS Gen 1, permissions need can be set via the storage's Access Control List (ACL). + +In the following example, `blob_datastore` already exists and uses identity-based data access. + +```python +blob_dataset = Dataset.Tabular.from_delimited_files(blob_datastore,'test.csv') +``` + +Another option is to skip datastore creation and create datasets directly from storage URLs. This functionality currently supports only Azure blobs and Azure Data Lake Storage Gen1 and Gen2. For creation based on storage URL, only the user identity is needed to authenticate. + +```python +blob_dset = Dataset.File.from_files('https://myblob.blob.core.windows.net/may/keras-mnist-fashion/') +``` + +When you submit a training job that consumes a dataset created with identity-based data access, the managed identity of the training compute is used for data access authentication. Your Azure Active Directory token isn't used. For this scenario, ensure that the managed identity of the compute is granted at least the Storage Blob Data Reader role from the storage service. For more information, see [Set up managed identity on compute clusters](how-to-create-attach-compute-cluster.md#set-up-managed-identity). + +## Access data for training jobs on compute clusters (preview) + +[!INCLUDE [cli v2](../../../includes/machine-learning-cli-v2.md)] + +When training on [Azure Machine Learning compute clusters](how-to-create-attach-compute-cluster.md#what-is-a-compute-cluster), you can authenticate to storage with your Azure Active Directory token. + +This authentication mode allows you to: +* Set up fine-grained permissions, where different workspace users can have access to different storage accounts or folders within storage accounts. +* Audit storage access because the storage logs show which identities were used to access data. + +> [!WARNING] +> This functionality has the following limitations +> * Feature is only supported for experiments submitted via the [Azure Machine Learning CLI](../how-to-configure-cli.md) +> * Only CommandJobs, and PipelineJobs with CommandSteps and AutoMLSteps are supported +> * User identity and compute managed identity cannot be used for authentication within same job. + +The following steps outline how to set up identity-based data access for training jobs on compute clusters. + +1. Grant the user identity access to storage resources. For example, grant StorageBlobReader access to the specific storage account you want to use or grant ACL-based permission to specific folders or files in Azure Data Lake Gen 2 storage. + +1. Create an Azure Machine Learning datastore without cached credentials for the storage account. If a datastore has cached credentials, such as storage account key, those credentials are used instead of user identity. + +1. Submit a training job with property **identity** set to **type: user_identity**, as shown in following job specification. During the training job, the authentication to storage happens via the identity of the user that submits the job. + +> [!NOTE] +> If the **identity** property is left unspecified and datastore does not have cached credentials, then compute managed identity becomes the fallback option. + +```yaml +command: | + echo "--census-csv: ${{inputs.census_csv}}" + python hello-census.py --census-csv ${{inputs.census_csv}} +code: src +inputs: + census_csv: + type: uri_file + path: azureml://datastores/mydata/paths/census.csv +environment: azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest +compute: azureml:cpu-cluster +identity: + type: user_identity +``` + +## Next steps + +* [Create an Azure Machine Learning dataset](how-to-create-register-datasets.md) +* [Train with datasets](../how-to-train-with-datasets.md) +* [Create a datastore with key-based data access](how-to-access-data.md) \ No newline at end of file diff --git a/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md b/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md index 780409e66928f..c13134ccfbe2c 100644 --- a/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md +++ b/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md @@ -441,7 +441,7 @@ root_run(current_child_run).log("MyMetric", f"Data from child run {current_child 1. In the **Destination details**, select the **Send to Log Analytics workspace** and specify the **Subscription** and **Log Analytics workspace**. > [!NOTE] - > The **Azure Log Analytics Workspace** is a different type of Azure Resource than the **Azure Machine Learning service Workspace**. If there are no options in that list, you can [create a Log Analytics Workspace](/azure/azure-monitor/logs/quick-create-workspace). + > The **Azure Log Analytics Workspace** is a different type of Azure Resource than the **Azure Machine Learning service Workspace**. If there are no options in that list, you can [create a Log Analytics Workspace](../../azure-monitor/logs/quick-create-workspace.md). ![Screenshot of configuring the email notification.](./media/how-to-track-monitor-analyze-runs/log-location.png) @@ -449,7 +449,7 @@ root_run(current_child_run).log("MyMetric", f"Data from child run {current_child ![Screeenshot of the new alert rule.](./media/how-to-track-monitor-analyze-runs/new-alert-rule.png) -1. See [how to create and manage log alerts using Azure Monitor](/azure/azure-monitor/alerts/alerts-log). +1. See [how to create and manage log alerts using Azure Monitor](../../azure-monitor/alerts/alerts-log.md). ## Example notebooks @@ -462,4 +462,4 @@ The following notebooks demonstrate the concepts in this article: ## Next steps * To learn how to log metrics for your experiments, see [Log metrics during training runs](../how-to-log-view-metrics.md). -* To learn how to monitor resources and logs from Azure Machine Learning, see [Monitoring Azure Machine Learning](../monitor-azure-machine-learning.md). +* To learn how to monitor resources and logs from Azure Machine Learning, see [Monitoring Azure Machine Learning](../monitor-azure-machine-learning.md). \ No newline at end of file diff --git a/articles/machine-learning/v1/how-to-use-managed-identities.md b/articles/machine-learning/v1/how-to-use-managed-identities.md index 794abcac358f4..ec37ab406c34d 100644 --- a/articles/machine-learning/v1/how-to-use-managed-identities.md +++ b/articles/machine-learning/v1/how-to-use-managed-identities.md @@ -22,7 +22,7 @@ ms.custom: cliv1, sdkv1, event-tier1-build-2022 > * [v1](how-to-use-managed-identities.md) > * [v2 (current version)](../how-to-use-managed-identities.md) -[Managed identities](/active-directory/managed-identities-azure-resources/overview) allow you to configure your workspace with the *minimum required permissions to access resources*. +[Managed identities](/azure/active-directory/managed-identities-azure-resources/overview) allow you to configure your workspace with the *minimum required permissions to access resources*. When configuring Azure Machine Learning workspace in trustworthy manner, it is important to ensure that different services associated with the workspace have the correct level of access. For example, during machine learning workflow the workspace needs access to Azure Container Registry (ACR) for Docker images, and storage accounts for training data. @@ -39,8 +39,8 @@ In this article, you'll learn how to use managed identities to: - An Azure Machine Learning workspace. For more information, see [Create an Azure Machine Learning workspace](../how-to-manage-workspace.md). - The [Azure CLI extension for Machine Learning service](reference-azure-machine-learning-cli.md) - The [Azure Machine Learning Python SDK](/python/api/overview/azure/ml/intro). -- To assign roles, the login for your Azure subscription must have the [Managed Identity Operator](/role-based-access-control/built-in-roles#managed-identity-operator) role, or other role that grants the required actions (such as __Owner__). -- You must be familiar with creating and working with [Managed Identities](/active-directory/managed-identities-azure-resources/overview). +- To assign roles, the login for your Azure subscription must have the [Managed Identity Operator](/azure/role-based-access-control/built-in-roles#managed-identity-operator) role, or other role that grants the required actions (such as __Owner__). +- You must be familiar with creating and working with [Managed Identities](/azure/active-directory/managed-identities-azure-resources/overview). ## Configure managed identities @@ -57,7 +57,7 @@ You can bring your own ACR with admin user disabled when you create the workspac If ACR admin user is disallowed by subscription policy, you should first create ACR without admin user, and then associate it with the workspace. Also, if you have existing ACR with admin user disabled, you can attach it to the workspace. -[Create ACR from Azure CLI](/container-registry/container-registry-get-started-azure-cli) without setting ```--admin-enabled``` argument, or from Azure portal without enabling admin user. Then, when creating Azure Machine Learning workspace, specify the Azure resource ID of the ACR. The following example demonstrates creating a new Azure ML workspace that uses an existing ACR: +[Create ACR from Azure CLI](/azure/container-registry/container-registry-get-started-azure-cli) without setting ```--admin-enabled``` argument, or from Azure portal without enabling admin user. Then, when creating Azure Machine Learning workspace, specify the Azure resource ID of the ACR. The following example demonstrates creating a new Azure ML workspace that uses an existing ACR: > [!TIP] > To get the value for the `--container-registry` parameter, use the [az acr show](/cli/azure/acr#az-acr-show) command to show information for your ACR. The `id` field contains the resource ID for your ACR. @@ -250,7 +250,7 @@ Once you've configured ACR without admin user as described earlier, you can acce ## Create workspace with user-assigned managed identity -When creating a workspace, you can bring your own [user-assigned managed identity](/active-directory/managed-identities-azure-resources/how-to-manage-ua-identity-cli) that will be used to access the associated resources: ACR, KeyVault, Storage, and App Insights. +When creating a workspace, you can bring your own [user-assigned managed identity](/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities) that will be used to access the associated resources: ACR, KeyVault, Storage, and App Insights. > [!IMPORTANT] > When creating workspace with user-assigned managed identity, you must create the associated resources yourself, and grant the managed identity roles on those resources. Use the [role assignment ARM template](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.machinelearningservices/machine-learning-dependencies-role-assignment) to make the assignments. diff --git a/articles/machine-learning/v1/introduction.md b/articles/machine-learning/v1/introduction.md index 15f7ff74f1e17..d52c78b70ffca 100644 --- a/articles/machine-learning/v1/introduction.md +++ b/articles/machine-learning/v1/introduction.md @@ -61,5 +61,5 @@ For more information on installing and using the different extensions, see the f For more information on installing and using the different SDK versions: -* `azureml-core` - [Install the Azure Machine Learning SDK (v1) for Python](/python/api/overview/azure/ml/install?view=azure-ml-py) +* `azureml-core` - [Install the Azure Machine Learning SDK (v1) for Python](/python/api/overview/azure/ml/install?view=azure-ml-py&preserve-view=true ) * `azure-ai-ml` - [Install the Azure Machine Learning SDK (v2) for Python](https://aka.ms/sdk-v2-install) diff --git a/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/code-snapshot.png b/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/code-snapshot.png deleted file mode 100644 index df92429a2270e..0000000000000 Binary files a/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/code-snapshot.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/create-workspace.png b/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/create-workspace.png deleted file mode 100644 index 92a941d0e3536..0000000000000 Binary files a/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/create-workspace.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/workflow.png b/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/workflow.png deleted file mode 100644 index 922c70a66255f..0000000000000 Binary files a/articles/machine-learning/v1/media/concept-azure-machine-learning-architecture/workflow.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/concept-data/dataset-workflow.svg b/articles/machine-learning/v1/media/concept-data/dataset-workflow.svg deleted file mode 100644 index ef36307079813..0000000000000 --- a/articles/machine-learning/v1/media/concept-data/dataset-workflow.svg +++ /dev/null @@ -1,306 +0,0 @@ - - - - - - - - - - - - - - - - AML-model2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/articles/machine-learning/v1/media/how-to-log-view-metrics/designer-logging-pipeline.png b/articles/machine-learning/v1/media/how-to-log-view-metrics/designer-logging-pipeline.png deleted file mode 100644 index c69fdc700b794..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-log-view-metrics/designer-logging-pipeline.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-log-view-metrics/experiment-page-metrics-across-runs.png b/articles/machine-learning/v1/media/how-to-log-view-metrics/experiment-page-metrics-across-runs.png deleted file mode 100644 index c911c255349ef..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-log-view-metrics/experiment-page-metrics-across-runs.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/custom-views-2.gif b/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/custom-views-2.gif deleted file mode 100644 index a3a9610d2723f..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/custom-views-2.gif and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/display-name-runs-list.png b/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/display-name-runs-list.png deleted file mode 100644 index b13e44ef39fcd..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/display-name-runs-list.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/display-name.gif b/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/display-name.gif deleted file mode 100644 index 4c065605e09bd..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/display-name.gif and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-description-2.gif b/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-description-2.gif deleted file mode 100644 index 57d50c76b43cc..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-description-2.gif and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-history.png b/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-history.png deleted file mode 100644 index 8fd4952bd03fa..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-history.png and /dev/null differ diff --git a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-tags.gif b/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-tags.gif deleted file mode 100644 index b35570dd3634f..0000000000000 Binary files a/articles/machine-learning/v1/media/how-to-track-monitor-analyze-runs/run-tags.gif and /dev/null differ diff --git a/articles/machine-learning/v1/toc.yml b/articles/machine-learning/v1/toc.yml index 2cfec3949ac77..c8ed44e7f0350 100644 --- a/articles/machine-learning/v1/toc.yml +++ b/articles/machine-learning/v1/toc.yml @@ -24,9 +24,15 @@ - name: Concepts items: - name: Work with data - items: + items: - name: Data access href: concept-data.md + - name: Data ingestion + href: ../concept-data-ingestion.md + - name: Data processing + href: ../concept-optimize-data-processing.md + - name: Studio network data access + href: ../concept-network-data-access.md - name: Automated ML overview displayName: automl, auto ml href: concept-automated-ml-v1.md @@ -99,22 +105,55 @@ - name: Work with data items: - name: Access data - items: - - name: Connect to storage with datastores + items: + - name: Connect to Azure storage with datastores + displayName: blob, get, fileshare, access, mount, download, data lake, datastore href: how-to-access-data.md + - name: Identity-based data access to storage + displayName: blob, access, data lake, datastore, managed identity + href: how-to-identity-based-data-access.md - name: Get data from storage with datasets - href: how-to-create-register-datasets.md - - name: Manage & consume data - items: - - name: Train with datasets - displayName: data, dataset, mount - href: ../how-to-train-with-datasets.md - - name: Detect drift on datasets - displayName: data, dataset - href: ../how-to-monitor-datasets.md - - name: Version & track datasets - displayName: data, data set - href: ../how-to-version-track-datasets.md + displayName: data, data set, register, access data + href: how-to-create-register-datasets.md + - name: Connect to data (UI) + displayName: blob, get, fileshare, access, mount, download, data lake, datastore, dataset, data set + href: ../how-to-connect-data-ui.md + - name: Manage & consume data + items: + - name: Train with datasets + displayName: data, dataset, mount + href: ../how-to-train-with-datasets.md + - name: Detect drift on datasets + displayName: data, dataset + href: ../how-to-monitor-datasets.md + - name: Version & track datasets + displayName: data, data set + href: ../how-to-version-track-datasets.md + - name: Create datasets with labels + displayName: data, labels, torchvision + href: ../how-to-use-labeled-dataset.md + - name: Get & prepare data + items: + - name: Data ingestion with Azure Data Factory + displayName: data, ingestion, adf + href: ../how-to-data-ingest-adf.md + - name: Data preparation with Azure Synapse + displayName: data, data prep, spark, spark pool, cluster, spark cluster,dataset, datastore + href: ../how-to-data-prep-synapse-spark-pool.md + - name: DevOps for data ingestion + displayName: data, ingestion, devops + href: ../how-to-cicd-data-ingestion.md + - name: Import data in the designer + displayName: designer, data, import, dataset, datastore + href: ../how-to-designer-import-data.md + - name: Compliance + items: + - name: Preserve data privacy + displayName: data,privacy,differential privacy + href: ../how-to-differential-privacy.md + - name: Export and delete data + displayName: GDPR + href: ../how-to-export-delete-data.md - name: Train models items: - name: Train with SDK v1 diff --git a/articles/machine-learning/v1/tutorial-pipeline-python-sdk.md b/articles/machine-learning/v1/tutorial-pipeline-python-sdk.md index ec253c5ee7740..dab6eb8a7e0cb 100644 --- a/articles/machine-learning/v1/tutorial-pipeline-python-sdk.md +++ b/articles/machine-learning/v1/tutorial-pipeline-python-sdk.md @@ -174,7 +174,7 @@ The code that you've executed so far has create and controlled Azure resources. If you're following along with the example in the [AzureML Examples repo](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/using-pipelines), the source file is already available as `keras-mnist-fashion/prepare.py`. -If you're working from scratch, create a subdirectory called `kera-mnist-fashion/`. Create a new file, add the following code to it, and name the file `prepare.py`. +If you're working from scratch, create a subdirectory called `keras-mnist-fashion/`. Create a new file, add the following code to it, and name the file `prepare.py`. ```python # prepare.py diff --git a/articles/managed-grafana/how-to-api-calls.md b/articles/managed-grafana/how-to-api-calls.md index 48449e5721c32..2dacd0645c1aa 100644 --- a/articles/managed-grafana/how-to-api-calls.md +++ b/articles/managed-grafana/how-to-api-calls.md @@ -16,7 +16,7 @@ In this article, you'll learn how to call Grafana APIs within Azure Managed Graf ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/quickstart-managed-grafana-portal). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./quickstart-managed-grafana-portal.md). ## Sign in to Azure @@ -70,4 +70,4 @@ Replace `` with the access token retrieved in the previous step an ## Next steps > [!div class="nextstepaction"] -> [Grafana UI](./grafana-app-ui.md) +> [Grafana UI](./grafana-app-ui.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md b/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md index aa234f5f18964..d48d2c21c203f 100644 --- a/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md +++ b/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md @@ -13,7 +13,7 @@ ms.date: 3/31/2022 ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/how-to-permissions). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./how-to-permissions.md). - A resource including monitoring data with Managed Grafana monitoring permissions. Read [how to configure permissions](how-to-permissions.md) for more information. ## Sign in to Azure @@ -70,4 +70,4 @@ Authentication and authorization are subsequently made through the provided mana > [!div class="nextstepaction"] > [Modify access permissions to Azure Monitor](./how-to-permissions.md) -> [Share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) +> [Share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md b/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md index be3b26e69f3dd..b92ee6abfe82f 100644 --- a/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md +++ b/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md @@ -15,7 +15,7 @@ In this article, you'll learn how to monitor an Azure Managed Grafana Preview wo ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace with access to at least one data source. If you don't have a workspace yet, [create an Azure Managed Grafana workspace](/azure/managed-grafana/how-to-permissions) and [add a data source](how-to-data-source-plugins-managed-identity.md). +- An Azure Managed Grafana workspace with access to at least one data source. If you don't have a workspace yet, [create an Azure Managed Grafana workspace](./how-to-permissions.md) and [add a data source](how-to-data-source-plugins-managed-identity.md). ## Sign in to Azure @@ -73,4 +73,4 @@ Now that you've configured your diagnostic settings, Azure will stream all new e > [!div class="nextstepaction"] > [Grafana UI](./grafana-app-ui.md) -> [How to share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) +> [How to share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-permissions.md b/articles/managed-grafana/how-to-permissions.md index 05ddbf6ddfdc4..c12b2603f619e 100644 --- a/articles/managed-grafana/how-to-permissions.md +++ b/articles/managed-grafana/how-to-permissions.md @@ -19,7 +19,7 @@ In this article, you'll learn how to manually edit permissions for a specific re ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/quickstart-managed-grafana-portal). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./quickstart-managed-grafana-portal.md). - An Azure resource with monitoring data and write permissions, such as [User Access Administrator](../../articles/role-based-access-control/built-in-roles.md#user-access-administrator) or [Owner](../../articles/role-based-access-control/built-in-roles.md#owner) ## Sign in to Azure @@ -34,27 +34,29 @@ To change permissions for a specific resource, follow these steps: 1. Select **Access Control (IAM)**. 1. Under **Grant access to this resource**, select **Add role assignment**. - :::image type="content" source="media/managed-grafana-how-to-permissions-iam.png" alt-text="Screenshot of the Azure platform to add role assignment in App Insights."::: + :::image type="content" source="./media/permissions/permissions-iam.png" alt-text="Screenshot of the Azure platform to add role assignment in App Insights."::: 1. The portal lists various roles you can give to your Managed Grafana resource. Select a role. For instance, **Monitoring Reader**. Select this role. 1. Click **Next**. - :::image type="content" source="media/managed-grafana-how-to-permissions-role.png" alt-text="Screenshot of the Azure platform and choose Monitor Reader."::: + :::image type="content" source="./media/permissions/permissions-role.png" alt-text="Screenshot of the Azure platform and choose Monitor Reader."::: 1. For **Assign access to**, select **Managed Identity**. 1. Click **Select members**. - :::image type="content" source="media/managed-grafana-how-to-permissions-members.png" alt-text="Screenshot of the Azure platform selecting members."::: + :::image type="content" source="media/permissions/permissions-members.png" alt-text="Screenshot of the Azure platform selecting members."::: 1. Select the **Subscription** containing your Managed Grafana workspace 1. Select a **Managed identity** from the options in the dropdown list 1. Select your Managed Grafana workspace from the list. 1. Click **Select** to confirm - :::image type="content" source="media/managed-grafana-how-to-permissions-identity.png" alt-text="Screenshot of the Azure platform selecting the workspace."::: + :::image type="content" source="media/permissions/permissions-managed-identities.png" alt-text="Screenshot of the Azure platform selecting the workspace."::: 1. Click **Next**, then **Review + assign** to confirm the application of the new permission +For more information about how to use Managed Grafana with Azure Monitor, go to [Monitor your Azure services in Grafana](../azure-monitor/visualize/grafana-plugin.md). + ## Next steps > [!div class="nextstepaction"] -> [How to configure data sources for Azure Managed Grafana](./how-to-data-source-plugins-managed-identity.md) +> [How to configure data sources for Azure Managed Grafana](./how-to-data-source-plugins-managed-identity.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-share-grafana-workspace.md b/articles/managed-grafana/how-to-share-grafana-workspace.md index a6f19fb4e6a3b..5f48d734150c5 100644 --- a/articles/managed-grafana/how-to-share-grafana-workspace.md +++ b/articles/managed-grafana/how-to-share-grafana-workspace.md @@ -15,7 +15,7 @@ A DevOps team may build dashboards to monitor and diagnose an application or inf ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/how-to-permissions). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./how-to-permissions.md). ## Supported Grafana roles @@ -60,4 +60,4 @@ Sign in to the Azure portal at [https://portal.azure.com/](https://portal.azure. > [!div class="nextstepaction"] > [How to configure data sources for Azure Managed Grafana](./how-to-data-source-plugins-managed-identity.md) > [How to modify access permissions to Azure Monitor](./how-to-permissions.md) -> [How to call Grafana APIs in your automation with Azure Managed Grafana](./how-to-api-calls.md) +> [How to call Grafana APIs in your automation with Azure Managed Grafana](./how-to-api-calls.md) \ No newline at end of file diff --git a/articles/managed-grafana/media/managed-grafana-how-to-permissions-iam.png b/articles/managed-grafana/media/managed-grafana-how-to-permissions-iam.png deleted file mode 100644 index dd413970f384c..0000000000000 Binary files a/articles/managed-grafana/media/managed-grafana-how-to-permissions-iam.png and /dev/null differ diff --git a/articles/managed-grafana/media/managed-grafana-how-to-permissions-role.png b/articles/managed-grafana/media/managed-grafana-how-to-permissions-role.png deleted file mode 100644 index 6d154fe8501b2..0000000000000 Binary files a/articles/managed-grafana/media/managed-grafana-how-to-permissions-role.png and /dev/null differ diff --git a/articles/managed-grafana/media/permissions/permissions-iam.png b/articles/managed-grafana/media/permissions/permissions-iam.png new file mode 100644 index 0000000000000..ebcf1621c6769 Binary files /dev/null and b/articles/managed-grafana/media/permissions/permissions-iam.png differ diff --git a/articles/managed-grafana/media/managed-grafana-how-to-permissions-identity.png b/articles/managed-grafana/media/permissions/permissions-managed-identities.png similarity index 100% rename from articles/managed-grafana/media/managed-grafana-how-to-permissions-identity.png rename to articles/managed-grafana/media/permissions/permissions-managed-identities.png diff --git a/articles/managed-grafana/media/managed-grafana-how-to-permissions-members.png b/articles/managed-grafana/media/permissions/permissions-members.png similarity index 100% rename from articles/managed-grafana/media/managed-grafana-how-to-permissions-members.png rename to articles/managed-grafana/media/permissions/permissions-members.png diff --git a/articles/managed-grafana/media/permissions/permissions-role.png b/articles/managed-grafana/media/permissions/permissions-role.png new file mode 100644 index 0000000000000..98fb44accf998 Binary files /dev/null and b/articles/managed-grafana/media/permissions/permissions-role.png differ diff --git a/articles/managed-grafana/overview.md b/articles/managed-grafana/overview.md index 6608caf8bb67c..5594d876516eb 100644 --- a/articles/managed-grafana/overview.md +++ b/articles/managed-grafana/overview.md @@ -14,7 +14,7 @@ Azure Managed Grafana is a data visualization platform built on top of the Grafa Azure Managed Grafana is optimized for the Azure environment. It works seamlessly with many Azure services. Specifically, for the current preview, it provides with the following integration features: -* Built-in support for [Azure Monitor](/azure/azure-monitor/) and [Azure Data Explorer](/azure/data-explorer/) +* Built-in support for [Azure Monitor](../azure-monitor/index.yml) and [Azure Data Explorer](/azure/data-explorer/) * User authentication and access control using Azure Active Directory identities * Direct import of existing charts from Azure portal @@ -35,4 +35,4 @@ You can create dashboards instantaneously by importing existing charts directly ## Next steps > [!div class="nextstepaction"] -> [Create a workspace in Azure Managed Grafana Preview using the Azure portal](./quickstart-managed-grafana-portal.md). +> [Create a workspace in Azure Managed Grafana Preview using the Azure portal](./quickstart-managed-grafana-portal.md). \ No newline at end of file diff --git a/articles/managed-instance-apache-cassandra/TOC.yml b/articles/managed-instance-apache-cassandra/TOC.yml index 35359b9ad0f44..c4e57b150fafd 100644 --- a/articles/managed-instance-apache-cassandra/TOC.yml +++ b/articles/managed-instance-apache-cassandra/TOC.yml @@ -42,6 +42,8 @@ href: add-service-principal.md - name: Configure Customer-Managed Keys href: customer-managed-keys.md + - name: Enable LDAP authentication + href: ldap.md - name: Monitor Managed Instance href: monitor-clusters.md - name: Manage with Azure CLI diff --git a/articles/managed-instance-apache-cassandra/create-cluster-portal.md b/articles/managed-instance-apache-cassandra/create-cluster-portal.md index 1a9423898d503..90eb32a76bafc 100644 --- a/articles/managed-instance-apache-cassandra/create-cluster-portal.md +++ b/articles/managed-instance-apache-cassandra/create-cluster-portal.md @@ -5,7 +5,7 @@ author: TheovanKraay ms.author: thvankra ms.service: managed-instance-apache-cassandra ms.topic: quickstart -ms.date: 11/02/2021 +ms.date: 05/31/2022 ms.custom: ignite-fall-2021, mode-ui --- # Quickstart: Create an Azure Managed Instance for Apache Cassandra cluster from the Azure portal @@ -18,7 +18,7 @@ This quickstart demonstrates how to use the Azure portal to create an Azure Mana If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -## Create a managed instance cluster +## Create a managed instance cluster 1. Sign in to the [Azure portal](https://portal.azure.com/). @@ -82,7 +82,7 @@ If you don't have an Azure subscription, create a [free account](https://azure.m :::image type="content" source="./media/create-cluster-portal/datacenter-1.png" alt-text="View datacenter nodes." lightbox="./media/create-cluster-portal/datacenter-1.png" border="true"::: - +> The Azure Cosmos DB role assignment is used for deployment purposes only. Azure Managed Instanced for Apache Cassandra has no backend dependencies on Azure Cosmos DB. ## Connecting to your cluster diff --git a/articles/managed-instance-apache-cassandra/dba-commands.md b/articles/managed-instance-apache-cassandra/dba-commands.md index 502d2468ab4e5..89a383a30fe70 100644 --- a/articles/managed-instance-apache-cassandra/dba-commands.md +++ b/articles/managed-instance-apache-cassandra/dba-commands.md @@ -13,22 +13,19 @@ ms.author: thvankra Azure Managed Instance for Apache Cassandra provides automated deployment, scaling, and [management operations](management-operations.md) for open-source Apache Cassandra data centers. The automation in the service should be sufficient for many use cases. However, this article describes how to run DBA commands manually when the need arises. > [!IMPORTANT] -> Nodetool commands are in public preview. +> Nodetool and sstable commands are in public preview. > This feature is provided without a service level agreement, and it's not recommended for production workloads. > For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). - - ## DBA command support -Azure Managed Instance for Apache Cassandra allows you to run `nodetool` commands via Azure CLI, for routine DBA administration. Not all commands are supported and there are some limitations. For supported commands, see the sections below. +Azure Managed Instance for Apache Cassandra allows you to run `nodetool` and `sstable` commands via Azure CLI, for routine DBA administration. Not all commands are supported and there are some limitations. For supported commands, see the sections below. >[!WARNING] > Some of these commands can destabilize the cassandra cluster and should only be run carefully and after being tested in non-production environments. Where possible a `--dry-run` option should be deployed first. Microsoft cannot offer any SLA or support on issues with running commands which alter the default database configuration and/or tables. -## How to run a nodetool command +## How to run a `nodetool` command Azure Managed Instance for Apache Cassandra provides the following Azure CLI command to run DBA commands: ```azurecli-interactive @@ -59,9 +56,9 @@ Both will return a json of the following form: } ``` - +``` - +* `sstableexpiredblockers` -## List of supported nodetool commands +## List of supported `nodetool` commands For more information on each command, see https://cassandra.apache.org/doc/latest/cassandra/tools/nodetool/nodetool.html diff --git a/articles/managed-instance-apache-cassandra/faq.md b/articles/managed-instance-apache-cassandra/faq.md index 13458d844ba7d..6fb5c64957eb0 100644 --- a/articles/managed-instance-apache-cassandra/faq.md +++ b/articles/managed-instance-apache-cassandra/faq.md @@ -28,11 +28,15 @@ Azure Managed Instance for Apache Cassandra is delivered by the Azure Cosmos DB No, there's no architectural dependency between Azure Managed Instance for Apache Cassandra and the Azure Cosmos DB backend. +### What versions of Apache Cassandra does the service support? + +The service currently supports Cassandra versions 3.11 and 4.0. By default, version 3.11 is deployed, as version 4.0 is currently in public preview. See our [Azure CLI Quickstart](create-cluster-cli.md) (step 5) for specifying Cassandra version during cluster deployment. + ### Does Azure Managed Instance for Apache Cassandra have an SLA? Yes, the SLA is published [here](https://azure.microsoft.com/support/legal/sla/managed-instance-apache-cassandra/v1_0/). -#### Can I deploy Azure Managed Instance for Apache Cassandra in any region? +### Can I deploy Azure Managed Instance for Apache Cassandra in any region? Currently the managed instance is available in a limited number of regions. diff --git a/articles/managed-instance-apache-cassandra/index.yml b/articles/managed-instance-apache-cassandra/index.yml index 5871b7eaab556..099ba2dcdca09 100644 --- a/articles/managed-instance-apache-cassandra/index.yml +++ b/articles/managed-instance-apache-cassandra/index.yml @@ -67,6 +67,8 @@ landingContent: links: - text: Manage resources with Azure CLI url: manage-resources-cli.md + - text: Enable LDAP Authentication + url: ldap.md - text: Monitor cluster resources url: monitor-clusters.md - text: Configure Customer-Managed Keys diff --git a/articles/managed-instance-apache-cassandra/introduction.md b/articles/managed-instance-apache-cassandra/introduction.md index 0e2b5342a8ba0..0ca3eb5ab66db 100644 --- a/articles/managed-instance-apache-cassandra/introduction.md +++ b/articles/managed-instance-apache-cassandra/introduction.md @@ -24,6 +24,9 @@ You can use this service to easily place managed instances of Apache Cassandra d - **Simplified deployment:** After the hybrid connectivity is established, deployment of new data centers in Azure is easy through [simple commands](manage-resources-cli.md#create-datacenter). - **Metrics:** each datacenter node provisioned by the service emits metrics using [Metric Collector for Apache Cassandra](https://github.com/datastax/metric-collector-for-apache-cassandra). The metrics can be [visualized in Prometheus or Grafana](visualize-prometheus-grafana.md). The service is also integrated with [Azure Monitor for metrics and diagnostic logging](monitor-clusters.md). +>[!NOTE] +> The service currently supports Cassandra versions 3.11 and 4.0. By default, version 3.11 is deployed, as version 4.0 is currently in public preview. See our [Azure CLI Quickstart](create-cluster-cli.md) (step 5) for specifying Cassandra version during cluster deployment. + ### Simplified scaling In the managed instance, scaling up and scaling down nodes in a datacenter is fully managed. You select the number of nodes you need, and with a [simple command](manage-resources-cli.md#update-datacenter), the scaling orchestrator takes care of establishing their operation within the Cassandra ring. diff --git a/articles/managed-instance-apache-cassandra/ldap.md b/articles/managed-instance-apache-cassandra/ldap.md new file mode 100644 index 0000000000000..956ec96ff64df --- /dev/null +++ b/articles/managed-instance-apache-cassandra/ldap.md @@ -0,0 +1,133 @@ +--- +title: How to enable LDAP authentication in Azure Managed Instance for Apache Cassandra +description: Learn how to enable LDAP authentication in Azure Managed Instance for Apache Cassandra +author: TheovanKraay +ms.author: thvankra +ms.service: managed-instance-apache-cassandra +ms.topic: how-to +ms.date: 05/23/2022 +--- + +# How to enable LDAP authentication in Azure Managed Instance for Apache Cassandra + +Azure Managed Instance for Apache Cassandra provides automated deployment and scaling operations for managed open-source Apache Cassandra data centers. This article discusses how to enable LDAP authentication to your clusters and data centers. + +> [!IMPORTANT] +> LDAP authentication is in public preview. +> This feature is provided without a service level agreement, and it's not recommended for production workloads. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +## Prerequisites + +- If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. +- An Azure Managed Instance for Apache Cassandra cluster. Review how to [create an Azure Managed Instance for Apache Cassandra cluster from the Azure portal](create-cluster-portal.md). + +## Deploy an LDAP Server in Azure +In this section, we'll walk through creating a simple LDAP server on a Virtual Machine in Azure. If you already have an LDAP server running, you can skip this section and review [how to enable LDAP authentication](ldap.md#enable-ldap-authentication). + +1. Deploy a Virtual Machine in Azure using Ubuntu Server 18.04 LTS. You can follow instructions [here](visualize-prometheus-grafana.md#deploy-an-ubuntu-server). + +1. Give your server a DNS name: + + :::image type="content" source="./media/ldap/dns.jpg" alt-text="Screenshot of virtual machine d n s name in Azure portal." lightbox="./media/ldap/dns.jpg" border="true"::: + +1. Install Docker on the virtual machine. We recommend [this](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-18-04) tutorial. + +1. In the home directory, copy and paste the following text and hit enter. This command will create a file containing a test LDAP user account. + + ```shell + mkdir ldap-user && cd ldap-user && cat >> user.ldif <` with the dns name you created for your LDAP server earlier. This command will deploy an LDAP server with TLS enabled to a Docker container, and will also copy the user file you created earlier to the container. + + ```shell + sudo docker run --hostname .uksouth.cloudapp.azure.com --name -v $(pwd)/ldap-user:/container/service/slapd/assets/test --detach osixia/openldap:1.5.0 + ``` + +1. Now copy out the certificates folder from the container (replace `` with the dns name you created for your LDAP server): + + ```shell + sudo docker cp :/container/service/slapd/assets/certs certs + ``` + +1. Verify that dns name is correct: + + ```shell + openssl x509 -in certs/ldap.crt -text + ``` + :::image type="content" source="./media/ldap/dns-verify.jpg" alt-text="Screenshot of output from command to verify certificate." lightbox="./media/ldap/dns-verify.jpg" border="true"::: + +1. Copy the `ldap.crt` file to [clouddrive](../cloud-shell/persisting-shell-storage.md) in Azure CLI for use later. + +1. Add the user to the ldap (replace `` with the dns name you created for your LDAP server): + + ```shell + sudo docker container exec ldapadd -H ldap://.uksouth.cloudapp.azure.com -D "cn=admin,dc=example,dc=org" -w admin -f /container/service/slapd/assets/test/user.ldif + ``` + +## Enable LDAP authentication + +> [!IMPORTANT] +> If you skipped the above section because you already have an existing LDAP server, please ensure that it has server SSL certificates enabled. The `subject alternative name (dns name)` specified for the certificate must also match the domain of the server that LDAP is hosted on, or authentication will fail. + +1. Currently, LDAP authentication is a public preview feature. Run the below command to add the required Azure CLI extension: + + ```azurecli-interactive + az extension add --upgrade --name cosmosdb-preview + ``` + +1. Set authentication method to "Ldap" on the cluster, replacing `` and `` with the appropriate values: + + ```azurecli-interactive + az managed-cassandra cluster update -g -c --authentication-method "Ldap" + ``` + +1. Now set properties at the data center level. Replace `` and `` with the appropriate values, and `` with the dns name you created for your LDAP server. + + > [!NOTE] + > The below command is based on the LDAP setup in the earlier section. If you skipped that section because you already have an existing LDAP server, provide the corresponding values for that server instead. Ensure you have uploaded a certificate file like `ldap.crt` to your [clouddrive](../cloud-shell/persisting-shell-storage.md) in Azure CLI. + + ```azurecli-interactive + ldap_search_base_distinguished_name='dc=example,dc=org' + ldap_server_certificates='/usr/csuser/clouddrive/ldap.crt' + ldap_server_hostname='.uksouth.cloudapp.azure.com' + ldap_service_user_distinguished_name='cn=admin,dc=example,dc=org' + ldap_service_user_password='admin' + + az managed-cassandra datacenter update -g `` -c `` -d datacenter-1 --ldap-search-base-dn $ldap_search_base_distinguished_name --ldap-server-certs $ldap_server_certificates --ldap-server-hostname $ldap_server_hostname --ldap-service-user-dn $ldap_service_user_distinguished_name --ldap-svc-user-pwd $ldap_service_user_password + ``` + +1. Once this command has completed, you should be able to use [CQLSH](https://cassandra.apache.org/doc/latest/cassandra/tools/cqlsh.html) (see below) or any Apache Cassandra open-source client driver to connect to your managed instance data center with the user added in the above step: + + ```shell + export SSL_VALIDATE=false + cqlsh --debug --ssl -u -p + ``` + +## Next steps + +* [LDAP authentication with Azure Active Directory](../active-directory/fundamentals/auth-ldap.md) +* [Manage Azure Managed Instance for Apache Cassandra resources using Azure CLI](manage-resources-cli.md) +* [Deploy a Managed Apache Spark Cluster with Azure Databricks](deploy-cluster-databricks.md) \ No newline at end of file diff --git a/articles/managed-instance-apache-cassandra/management-operations.md b/articles/managed-instance-apache-cassandra/management-operations.md index b39f153ca58df..c30a59b410322 100644 --- a/articles/managed-instance-apache-cassandra/management-operations.md +++ b/articles/managed-instance-apache-cassandra/management-operations.md @@ -15,7 +15,7 @@ Azure Managed Instance for Apache Cassandra provides automated deployment and sc ## Compaction -* The system currently does not perform a major compaction. +* The system currently doesn't perform a major compaction. * Repair (see [Maintenance](#maintenance)) performs a Merkle tree compaction, which is a special kind of compaction. * Depending on the compaction strategy on the keyspace, Cassandra automatically compacts when the keyspace reaches a specific size. We recommend that you carefully select a compaction strategy for your workload, and don't do any manual compactions outside the strategy. @@ -25,9 +25,12 @@ Azure Managed Instance for Apache Cassandra provides automated deployment and sc * Apache Cassandra software-level patches are done when security vulnerabilities are identified. The patching cadence may vary. -* During patching, machines are rebooted one rack at a time. You should not experience any degradation at the application side as long as **quorum ALL setting is not being used**, and the replication factor is **3 or higher**. +* During patching, machines are rebooted one rack at a time. You shouldn't experience any degradation at the application side as long as **quorum ALL setting is not being used**, and the replication factor is **3 or higher**. -* The version in Apache Cassandra is in the format `X.Y.Z`. You can control the deployment of major (X) and minor (Y) versions manually via service tools. Whereas the Cassandra patches (Z) that may be required for that major/minor version combination are done automatically. +* The version in Apache Cassandra is in the format `X.Y.Z`. You can control the deployment of major (X) and minor (Y) versions manually via service tools. Whereas the Cassandra patches (Z) that may be required for that major/minor version combination are done automatically. + +>[!NOTE] +> The service currently supports Cassandra versions 3.11 and 4.0. By default, version 3.11 is deployed, as version 4.0 is currently in public preview. See our [Azure CLI Quickstart](create-cluster-cli.md) (step 5) for specifying Cassandra version during cluster deployment. ## Maintenance @@ -60,7 +63,7 @@ Azure Managed Instance for Apache Cassandra provides an [SLA](https://azure.micr ## Backup and restore -Snapshot backups are enabled by default and taken every 4 hours with [Medusa](https://github.com/thelastpickle/cassandra-medusa). Backups are stored in an internal Azure Blob Storage account and are retained for up to 2 days (48 hours). There is no cost for backups. To restore from a backup, file a [support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest) in the Azure portal. +Snapshot backups are enabled by default and taken every 4 hours with [Medusa](https://github.com/thelastpickle/cassandra-medusa). Backups are stored in an internal Azure Blob Storage account and are retained for up to 2 days (48 hours). There's no cost for backups. To restore from a backup, file a [support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest) in the Azure portal. > [!WARNING] > Backups can be restored to the same VNet/subnet as your existing cluster, but they cannot be restored to the *same cluster*. Backups can only be restored to **new clusters**. Backups are intended for accidental deletion scenarios, and are not geo-redundant. They are therefore not recommended for use as a disaster recovery (DR) strategy in case of a total regional outage. To safeguard against region-wide outages, we recommend a multi-region deployment. Take a look at our [quickstart for multi-region deployments](create-multi-region-cluster.md). @@ -80,7 +83,7 @@ For more information on security features, see our article [here](security.md). ## Hybrid support -When a [hybrid](configure-hybrid-cluster.md) cluster is configured, automated reaper operations running in the service will benefit the whole cluster. This includes data centers that are not provisioned by the service. Outside this, it is your responsibility to maintain your on-premise or externally hosted data center. +When a [hybrid](configure-hybrid-cluster.md) cluster is configured, automated reaper operations running in the service will benefit the whole cluster. This includes data centers that aren't provisioned by the service. Outside this, it is your responsibility to maintain your on-premise or externally hosted data center. ## Next steps diff --git a/articles/managed-instance-apache-cassandra/media/create-cluster-portal/add-datacenter-2.png b/articles/managed-instance-apache-cassandra/media/create-cluster-portal/add-datacenter-2.png index e2020e8541b9d..3104f9edde494 100644 Binary files a/articles/managed-instance-apache-cassandra/media/create-cluster-portal/add-datacenter-2.png and b/articles/managed-instance-apache-cassandra/media/create-cluster-portal/add-datacenter-2.png differ diff --git a/articles/managed-instance-apache-cassandra/media/ldap/dns-verify.jpg b/articles/managed-instance-apache-cassandra/media/ldap/dns-verify.jpg new file mode 100644 index 0000000000000..d1ec71ccdde2f Binary files /dev/null and b/articles/managed-instance-apache-cassandra/media/ldap/dns-verify.jpg differ diff --git a/articles/managed-instance-apache-cassandra/media/ldap/dns.jpg b/articles/managed-instance-apache-cassandra/media/ldap/dns.jpg new file mode 100644 index 0000000000000..b1cecfc1be3c7 Binary files /dev/null and b/articles/managed-instance-apache-cassandra/media/ldap/dns.jpg differ diff --git a/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md b/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md index 1971d60be455d..e1560ec3e3787 100644 --- a/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md +++ b/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md @@ -19,7 +19,7 @@ The following tasks are required to visualize metrics: * Install the [Prometheus Dashboards](https://github.com/datastax/metric-collector-for-apache-cassandra#installing-the-prometheus-dashboards) onto the VM. >[!WARNING] -> Prometheus and Grafana are open-source software and not supported as part of the Azure Managed Instance for Apache Cassandra service. Visualizing metrics in the way described below will require you to host and maintain a virtual machine as the server for both Prometheus and Grafana. The instructions below were tested only for Ubuntu Server 18.04, there is no guarantee that they will work with other linux distributions. Following this approach will entail supporting any issues that may arise, such as running out of space, or availability of the server. For a fully supported and hosted metrics experience, consider using [Azure Monitor metrics](monitor-clusters.md#azure-metrics), or alternatively [Azure Monitor partner integrations](/azure/azure-monitor/partners). +> Prometheus and Grafana are open-source software and not supported as part of the Azure Managed Instance for Apache Cassandra service. Visualizing metrics in the way described below will require you to host and maintain a virtual machine as the server for both Prometheus and Grafana. The instructions below were tested only for Ubuntu Server 18.04, there is no guarantee that they will work with other linux distributions. Following this approach will entail supporting any issues that may arise, such as running out of space, or availability of the server. For a fully supported and hosted metrics experience, consider using [Azure Monitor metrics](monitor-clusters.md#azure-metrics), or alternatively [Azure Monitor partner integrations](../azure-monitor/partners.md). ## Deploy an Ubuntu server @@ -146,4 +146,4 @@ The following tasks are required to visualize metrics: In this article, you learned how to configure dashboards to visualize metrics in Prometheus using Grafana. Learn more about Azure Managed Instance for Apache Cassandra with the following articles: * [Overview of Azure Managed Instance for Apache Cassandra](introduction.md) -* [Deploy a Managed Apache Spark Cluster with Azure Databricks](deploy-cluster-databricks.md) +* [Deploy a Managed Apache Spark Cluster with Azure Databricks](deploy-cluster-databricks.md) \ No newline at end of file diff --git a/articles/marketplace/TOC.yml b/articles/marketplace/TOC.yml index 69e6923c7db65..242e08090b4ce 100644 --- a/articles/marketplace/TOC.yml +++ b/articles/marketplace/TOC.yml @@ -629,6 +629,10 @@ href: partner-center-portal/saas-fulfillment-apis-faq.yml - name: SaaS Fulfillment APIs v1 (deprecated) href: partner-center-portal/pc-saas-fulfillment-api-v1.md + - name: Private Offer APIs + items: + - name: Private Offer APIs + href: private-offers-api.md - name: Resources items: - name: Commercial marketplace FAQ diff --git a/articles/marketplace/azure-ad-transactable-saas-landing-page.md b/articles/marketplace/azure-ad-transactable-saas-landing-page.md index ac49c7117ee34..9357e80cf2fab 100644 --- a/articles/marketplace/azure-ad-transactable-saas-landing-page.md +++ b/articles/marketplace/azure-ad-transactable-saas-landing-page.md @@ -127,3 +127,7 @@ Most apps that are registered with Azure AD grant delegated permissions to read ## Next steps - [How to create a SaaS offer in the commercial marketplace](create-new-saas-offer.md) + +**Video tutorials** + +- [Building a Simple SaaS Landing Page in .NET](https://go.microsoft.com/fwlink/?linkid=2196323) diff --git a/articles/marketplace/azure-private-plan-troubleshooting.md b/articles/marketplace/azure-private-plan-troubleshooting.md index 3eba2d1c028cf..6f84be4756d2c 100644 --- a/articles/marketplace/azure-private-plan-troubleshooting.md +++ b/articles/marketplace/azure-private-plan-troubleshooting.md @@ -63,7 +63,7 @@ While troubleshooting the Azure Subscription Hierarchy, keep these things in min ## Troubleshooting Checklist -- ISV to ensure the SaaS private plan is using the correct tenant ID for the customer - [How to find your Azure Active Directory tenant ID](../active-directory/fundamentals/active-directory-how-to-find-tenant.md). For VMs use the [Azure Subscription ID.](/azure/azure-portal/get-subscription-tenant-id) +- ISV to ensure the SaaS private plan is using the correct tenant ID for the customer - [How to find your Azure Active Directory tenant ID](../active-directory/fundamentals/active-directory-how-to-find-tenant.md). For VMs use the [Azure Subscription ID.](../azure-portal/get-subscription-tenant-id.md) - ISV to ensure that the Customer is not buying through a CSP. Private Plans are not available on a CSP-managed subscription. - Customer to ensure customer is logging in with an email ID that is registered under the same tenant ID (use the same user ID they used in step #1 above) - ISV to ask the customer to find the Private Plan in Azure Marketplace: [Private plans in Azure Marketplace](/marketplace/private-plans) @@ -81,4 +81,4 @@ While troubleshooting the Azure Subscription Hierarchy, keep these things in min ## Next steps -- [Create an Azure Support Request](../azure-portal/supportability/how-to-create-azure-support-request.md) +- [Create an Azure Support Request](../azure-portal/supportability/how-to-create-azure-support-request.md) \ No newline at end of file diff --git a/articles/marketplace/azure-resource-manager-test-drive.md b/articles/marketplace/azure-resource-manager-test-drive.md index 2d3cacf2b7794..027981c14c3b5 100644 --- a/articles/marketplace/azure-resource-manager-test-drive.md +++ b/articles/marketplace/azure-resource-manager-test-drive.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: article ms.author: trkeya author: trkeya -ms.date: 12/06/2021 +ms.date: 06/03/2022 ms.custom: devx-track-azurepowershell, subject-rbac-steps --- @@ -83,6 +83,9 @@ You can use any valid name for your parameters; test drive recognizes parameter Test drive initializes this parameter with a **Base Uri** of your deployment package so you can use this parameter to construct a Uri of any file included in your package. +> [!NOTE] +> The `baseUri` parameter cannot be used in conjunction with a custom script extension. + ```JSON "parameters": { ... diff --git a/articles/marketplace/azure-vm-faq.yml b/articles/marketplace/azure-vm-faq.yml index a8f9a756d1e9e..034a5c25d4700 100644 --- a/articles/marketplace/azure-vm-faq.yml +++ b/articles/marketplace/azure-vm-faq.yml @@ -478,11 +478,11 @@ sections: answer: | You can deploy hidden preview images using quickstart templates. To deploy a preview image, - 1. Goto the respective quick-start template for [Linux](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-linux/) or [Windows](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-windows), select "Deploy to Azure". This should take you to Azure portal. + 1. Go to the respective quick-start template for [Linux](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-linux/) or [Windows](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-windows), select "Deploy to Azure". This should take you to Azure portal. 2. In Azure portal, select "Edit template". 3. In the JSON template, search for imageReference and update the publisherid, offerid, skuid, and version of the image. To test preview image, append "-PREVIEW" to the offerid. ![image](https://user-images.githubusercontent.com/79274470/110191995-71c7d500-7de0-11eb-9f3c-6a42f55d8f03.png) - 4. Click Save + 4. Select **Save**. 5. Fill out the rest of the details. Review and Create diff --git a/articles/marketplace/azure-vm-plan-technical-configuration.md b/articles/marketplace/azure-vm-plan-technical-configuration.md index ef089a527d6b0..7b7dfbbfe4935 100644 --- a/articles/marketplace/azure-vm-plan-technical-configuration.md +++ b/articles/marketplace/azure-vm-plan-technical-configuration.md @@ -23,7 +23,7 @@ Some common reasons for reusing the technical configuration settings from anothe - Your solution behaves differently based on the plan the user chooses to deploy. For example, the software is the same, but features vary by plan. > [!NOTE] -> If you would like to use a public plan to create a private plan with a different price, consider creating a private offer instead of reusing the technical configuration. Learn more about [the difference between private plans and private offers](/azure/marketplace/isv-customer-faq). Learn more about [how to create a private offer](/azure/marketplace/isv-customer). +> If you would like to use a public plan to create a private plan with a different price, consider creating a private offer instead of reusing the technical configuration. Learn more about [the difference between private plans and private offers](./isv-customer-faq.yml). Learn more about [how to create a private offer](./isv-customer.md). Leverage [Azure Instance Metadata Service](../virtual-machines/windows/instance-metadata-service.md) (IMDS) to identify which plan your solution is deployed within to validate license or enabling of appropriate features. @@ -55,23 +55,23 @@ Here is a list of properties that can be selected for your VM. Enable the proper - Python version above 2.6+ - For more information, see [VM Extension](/azure/marketplace/azure-vm-certification-faq). + For more information, see [VM Extension](./azure-vm-certification-faq.yml). -- **Supports backup**: Enable this property if your images support Azure VM backup. Learn more about [Azure VM backup](/azure/backup/backup-azure-vms-introduction). +- **Supports backup**: Enable this property if your images support Azure VM backup. Learn more about [Azure VM backup](../backup/backup-azure-vms-introduction.md). -- **Supports accelerated networking**: The VM images in this plan support single root I/O virtualization (SR-IOV) to a VM, enabling low latency and high throughput on the network interface. Learn more about [accelerated networking for Linux](/azure/virtual-network/create-vm-accelerated-networking-cli). Learn more about [accelerated networking for Windows](/azure/virtual-network/create-vm-accelerated-networking-powershell). +- **Supports accelerated networking**: The VM images in this plan support single root I/O virtualization (SR-IOV) to a VM, enabling low latency and high throughput on the network interface. Learn more about [accelerated networking for Linux](../virtual-network/create-vm-accelerated-networking-cli.md). Learn more about [accelerated networking for Windows](../virtual-network/create-vm-accelerated-networking-powershell.md). - **Is a network virtual appliance**: A network virtual appliance is a product that performs one or more network functions, such as a Load Balancer, VPN Gateway, Firewall or Application Gateway. Learn more about [network virtual appliances](https://go.microsoft.com/fwlink/?linkid=2155373). - **Supports NVMe** - Enable this property if the images in this plan support NVMe disk interface. The NVMe interface offers higher and consistent IOPS and bandwidth relative to legacy SCSI interface. -- **Supports cloud-init configuration**: Enable this property if the images in this plan support cloud-init post deployment scripts. Learn more about [cloud-init configuration](/azure/virtual-machines/linux/using-cloud-init). +- **Supports cloud-init configuration**: Enable this property if the images in this plan support cloud-init post deployment scripts. Learn more about [cloud-init configuration](../virtual-machines/linux/using-cloud-init.md). - **Supports hibernation** – The images in this plan support hibernation/resume. - **Remote desktop/SSH not supported**: Enable this property if any of the following conditions are true: - - Virtual machines deployed with these images don't allow customers to access it using Remote Desktop or SSH. Learn more about [locked VM images](/azure/marketplace/azure-vm-certification-faq#locked-down-or-ssh-disabled-offer.md). Images that are published with either SSH disabled (for Linux) or RDP disabled (for Windows) are treated as Locked down VMs. There are special business scenarios to restrict access to users. During validation checks, Locked down VMs might not allow execution of certain certification commands. + - Virtual machines deployed with these images don't allow customers to access it using Remote Desktop or SSH. Learn more about [locked VM images](./azure-vm-certification-faq.yml#locked-down-or-ssh-disabled-offer). Images that are published with either SSH disabled (for Linux) or RDP disabled (for Windows) are treated as Locked down VMs. There are special business scenarios to restrict access to users. During validation checks, Locked down VMs might not allow execution of certain certification commands. - Image does not support sampleuser while deploying. - Image has limited access. @@ -90,12 +90,12 @@ Below are examples (non-exhaustive) that might require custom templates for depl ## Image types -Generations of a virtual machine defines the virtual hardware it uses. Based on your customer’s needs, you can publish a Generation 1 VM, Generation 2 VM, or both. To learn more about the differences between Generation 1 and Generation 2 capabilities, see [Support for generation 2 VMs on Azure](/azure/virtual-machines/generation-2). +Generations of a virtual machine defines the virtual hardware it uses. Based on your customer’s needs, you can publish a Generation 1 VM, Generation 2 VM, or both. To learn more about the differences between Generation 1 and Generation 2 capabilities, see [Support for generation 2 VMs on Azure](../virtual-machines/generation-2.md). When creating a new plan, select an Image type from the drop-down menu. You can choose either X64 Gen 1 or X64 Gen 2. To add another image type to a plan, select **+Add image type**. You will need to provide a SKU ID for each new image type that is added. > [!NOTE] -> A published generation requires at least one image version to remain available for customers. To remove the entire plan (along with all its generations and images), select **Deprecate plan** on the **Plan Overview** page. Learn more about [deprecating plans](/azure/marketplace/deprecate-vm). +> A published generation requires at least one image version to remain available for customers. To remove the entire plan (along with all its generations and images), select **Deprecate plan** on the **Plan Overview** page. Learn more about [deprecating plans](./deprecate-vm.md). > ## VM images @@ -105,7 +105,7 @@ To add a new image version, click **+Add VM image**. This will open a panel in w Keep in mind the following when publishing VM images: 1. Provide only one new VM image per image type in a given submission. -2. After an image has been published, you can't edit it, but you can deprecate it. Deprecating a version prevents both new and existing users from deploying a new instance of the deprecated version. Learn more about [deprecating VM images](/azure/marketplace/deprecate-vm). +2. After an image has been published, you can't edit it, but you can deprecate it. Deprecating a version prevents both new and existing users from deploying a new instance of the deprecated version. Learn more about [deprecating VM images](./deprecate-vm.md). 3. You can add up to 16 data disks for each VM image provided. Regardless of which operating system you use, add only the minimum number of data disks that the solution requires. During deployment, customers can’t remove disks that are part of an image, but they can always add disks during or after deployment. > [!NOTE] diff --git a/articles/marketplace/create-new-saas-offer-plans.md b/articles/marketplace/create-new-saas-offer-plans.md index 917213d7c6912..2efdfb0ca093b 100644 --- a/articles/marketplace/create-new-saas-offer-plans.md +++ b/articles/marketplace/create-new-saas-offer-plans.md @@ -152,3 +152,7 @@ If you haven't already done so, create a development and test (DEV) offer to tes - [Sell your SaaS offer](create-new-saas-offer-marketing.md) through the **Co-sell with Microsoft** and **Resell through CSPs** programs. - [Test and publish a SaaS offer](test-publish-saas-offer.md). + +**Video tutorials** + +- [Publishing a Private SaaS plan](https://go.microsoft.com/fwlink/?linkid=2196256) diff --git a/articles/marketplace/dynamics-365-customer-engage-availability.md b/articles/marketplace/dynamics-365-customer-engage-availability.md index 52fb773386393..7689ddaed2083 100644 --- a/articles/marketplace/dynamics-365-customer-engage-availability.md +++ b/articles/marketplace/dynamics-365-customer-engage-availability.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 12/03/2021 +ms.date: 05/25/2022 --- # Configure Dynamics 365 apps on Dataverse and Power Apps offer availability @@ -17,6 +17,9 @@ This page lets you define where and how to make your offer available, including To specify the markets in which your offer should be available, select **Edit markets**. +> [!NOTE] +> If you choose to sell through Microsoft and have Microsoft host transactions on your behalf, then the **Markets** section is not available on this page. In this case, you’ll configure the markets later when you create plans for the offer. If the **Markets** section isn’t shown, go to [Preview audience](#preview-audience). + On the **Market selection** popup window, select at least one market. Choose **Select all** to make your offer available in every possible market or select only the specific markets you want. When you're finished, select **Save**. Your selections here apply only to new acquisitions; if someone already has your app in a certain market, and you later remove that market, the people who already have the offer in that market can continue to use it, but no new customers in that market will be able to get your offer. diff --git a/articles/marketplace/dynamics-365-customer-engage-offer-setup.md b/articles/marketplace/dynamics-365-customer-engage-offer-setup.md index c4190a7411160..290ac29600274 100644 --- a/articles/marketplace/dynamics-365-customer-engage-offer-setup.md +++ b/articles/marketplace/dynamics-365-customer-engage-offer-setup.md @@ -6,14 +6,12 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 04/18/2022 +ms.date: 05/25/2022 --- # Create a Dynamics 365 apps on Dataverse and Power Apps offer -This article describes how to create a Dynamics 365 apps on Dataverse and Power Apps offer. All offers for Dynamics 365 go through our certification process. The trial experience allows users to deploy your solution to a live Dynamics 365 environment. - -Before you start, create a commercial marketplace account in [Partner Center](./create-account.md) and ensure it is enrolled in the commercial marketplace program. +This article describes how to create a _Dynamics 365 apps on Dataverse and Power Apps_ offer. Before you start, create a commercial marketplace account in [Partner Center](./create-account.md) and ensure it is enrolled in the commercial marketplace program. ## Before you begin @@ -59,20 +57,30 @@ Enter a descriptive name that we'll use to refer to this offer solely within Par ## Setup details -For **How do you want potential customers to interact with this listing offer?**, select the option you want to use for this offer: +1. On the _Offer setup_ page, choose one of the following options: -- **Enable app license management through Microsoft** – Manage your app licenses through Microsoft. To let customers run your app’s base functionality without a license and run premium features after they’ve purchased a license, select the **Allow customers to install my app even if licenses are not assigned box**. If you select this second box, you need to configure your solution package to not require a license. + - Select **Yes** to sell through Microsoft and have Microsoft host transactions on your behalf. + + If you choose this option, the Enable app license management through Microsoft check box is enabled and cannot be changed. - > [!NOTE] - > You cannot change this setting after you publish your offer. To learn more about this setting, see [ISV app license management](isv-app-license.md). + > [!NOTE] + > This capability is currently in Public Preview. -- **Get it now (free)** – List your offer to customers for free. -- **Free trial (listing)** – List your offer to customers with a link to a free trial. Offer listing free trials are created, managed, and configured by your service and do not have subscriptions managed by Microsoft. + - Select **No**, if you prefer to only list your offer through the marketplace and process transactions independently. - > [!NOTE] - > The tokens your application will receive through your trial link can only be used to obtain user information through Azure Active Directory (Azure AD) to automate account creation in your app. Microsoft accounts are not supported for authentication using this token. + If you choose this option, you can use the **Enable app license management through Microsoft** check box to choose whether or not to enable app license management through Microsoft. For more information, see [ISV app license management](isv-app-license.md). + +1. To let customers run your app’s base functionality without a license and run premium features after they’ve purchased a license, select the **Allow customers to install my app even if licenses are not assigned** box. If you select this second box, you need to configure your solution package to not require a license. + +1. If you chose **No** in step 1 and chose not to enable app license management through Microsoft, then you can select one of the following: + + - **Get it now (free)** – List your offer to customers for free. + - **Free trial (listing)** – List your offer to customers with a link to a free trial. The trial experience lets users deploy your solution to a live Dynamics 365 environment. Offer listing free trials are created, managed, and configured by your service and do not have subscriptions managed by Microsoft. + + > [!NOTE] + > The tokens your application will receive through your trial link can only be used to obtain user information through Azure Active Directory (Azure AD) to automate account creation in your app. Microsoft accounts are not supported for authentication using this token. -- **Contact me** – Collect customer contact information by connecting your Customer Relationship Management (CRM) system. The customer will be asked for permission to share their information. These customer details, along with the offer name, ID, and marketplace source where they found your offer, will be sent to the CRM system that you've configured. For more information about configuring your CRM, see [Customer leads](#customer-leads). + - **Contact me** – Collect customer contact information by connecting your Customer Relationship Management (CRM) system. The customer will be asked for permission to share their information. These customer details, along with the offer name, ID, and marketplace source where they found your offer, will be sent to the CRM system that you've configured. For more information about configuring your CRM, see [Customer leads](#customer-leads). ## Test drive diff --git a/articles/marketplace/dynamics-365-customer-engage-plans.md b/articles/marketplace/dynamics-365-customer-engage-plans.md index 0dab8e9dfaf05..dffba7935091e 100644 --- a/articles/marketplace/dynamics-365-customer-engage-plans.md +++ b/articles/marketplace/dynamics-365-customer-engage-plans.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 12/03/2021 +ms.date: 05/25/2022 --- # Create Dynamics 365 apps on Dataverse and Power Apps plans @@ -15,12 +15,12 @@ If you enabled app license management for your offer, the **Plans overview** tab [ ![Screenshot of the Plan overview tab for a Dynamics 365 apps on Dataverse and Power Apps offer that's been enabled for third-party app licensing.](./media/third-party-license/plan-tab-d365-workspaces.png) ](./media/third-party-license/plan-tab-d365-workspaces.png#lightbox) -You need to define at least one plan, if your offer has app license management enabled. You can create a variety of plans with different options for the same offer. These plans (sometimes referred to as SKUs) can differ in terms of monetization or tiers of service. Later, you will map the Service IDs of these plans in your solution package to enable a runtime license check by the Dynamics platform against these plans. You will map the Service ID of each plan in your solution package. This enables the Dynamics platform to run a license check against these plans. +You need to define at least one plan, if your offer has app license management enabled. You can create a variety of plans with different options for the same offer. These plans (sometimes referred to as SKUs) can differ in terms of monetization or tiers of service. Later, you will map the Service IDs of each plan in the metadata of your solution package to enable a runtime license check by the Dynamics platform against these plans (we'll walk you through this process later). You will map the Service ID of each plan in your solution package. ## Create a plan 1. In the left-nav, select **Plan overview**. -1. Near the top of the **Plan overview** page, select **+ Create new plan**. +1. Near the top of the page, select **+ Create new plan**. 1. In the dialog box that appears, in the **Plan ID** box, enter a unique plan ID. Use up to 50 lowercase alphanumeric characters, dashes, or underscores. You cannot modify the plan ID after you select **Create**. 1. In the **Plan name** box, enter a unique name for this plan. Use a maximum of 200 characters. 1. Select **Create**. @@ -31,24 +31,94 @@ On the **Plan listing** tab, you can define the plan name and description as you 1. In the **Plan name** box, the name you provided earlier for this plan appears here. You can change it at any time. This name will appear in the commercial marketplace as the title of your offer's software plan. 1. In the **Plan description** box, explain what makes this software plan unique and any differences from other plans within your offer. This description may contain up to 3,000 characters. -1. Select **Save draft**, and then in the breadcrumb at the top of the page, select **Plans**. +1. Select **Save draft**. - [ ![Screenshot shows the Plan overview link on the Plan listing page of an offer in Partner Center.](./media/third-party-license/bronze-plan-workspaces.png) ](./media/third-party-license/bronze-plan-workspaces.png#lightbox) +## Define pricing and availability -1. To create another plan for this offer, at the top of the **Plan overview** page, select **+ Create new plan**. Then repeat the steps in the [Create a plan](#create-a-plan) section. Otherwise, if you're done creating plans, go to the next section: Copy the Service IDs. +If you chose to sell through Microsoft and have Microsoft host transactions on your behalf, then the **Pricing and availability** tab appears in the left-nav. Otherwise, go to [Copy the Service IDs](#copy-the-service-ids). + +1. In the left-nav, select **Pricing and availability**. +1. In the **Markets** section, select **Edit markets**. +1. On the side panel that appears, select at least one market. To make your offer available in every possible market, choose **Select all** or select only the specific markets you want. When you're finished, select **Save**. + + Your selections here apply only to new acquisitions; if someone already has your app in a certain market, and you later remove that market, the people who already have the offer in that market can continue to use it, but no new customers in that market will be able to get your offer. + + > [!IMPORTANT] + > It is your responsibility to meet any local legal requirements, even if those requirements aren't listed here or in Partner Center. Even if you select all markets, local laws, restrictions, or other factors may prevent certain offers from being listed in some countries and regions. + +### Configure per user pricing + +1. On the **Pricing and availability** tab, under **User limits**, optionally specify the minimum and maximum number of users that for this plan. + > [!NOTE] + > If you choose not to define the user limits, the default value of one to one million users will be used. +1. Under **Billing term**, specify a monthly price, annual price, or both. + + > [!NOTE] + > You must specify a price for your offer, even if the price is zero. + +### Enable a free trial + +You can optionally configure a free trial for each plan in your offer. To enable a free trial, select the **Allow a one-month free trial** check box. + +> [!IMPORTANT] +> After your transactable offer has been published with a free trial, it cannot be disabled for that plan. Make sure this setting is correct before you publish the offer to avoid having to re-create the plan. + +If you select this option, customers are not charged for the first month of use. At the end of the free month, one of the following occurs: +- If the customer chose recurring billing, they will automatically be upgraded to a paid plan and the selected payment method is charged. +- If the customer didn’t choose recurring billing, the plan will expire at the end of the free trial. + +### Choose who can see your plan + +You can configure each plan to be visible to everyone or to only a specific audience. You grant access to a private plan using tenant IDs with the option to include a description of each tenant ID you assign. You can add a maximum of 10 tenant IDs manually or up to 20,000 tenant IDs using a .CSV file. A private plan is not the same as a preview audience. + +> [!NOTE] +> If you publish a private plan, you can change its visibility to public later. However, once you publish a public plan, you cannot change its visibility to private. + +#### Make your plan public + +1. Under **Plan visibility**, select **Public**. +1. Select **Save draft**, and then go to [View your plans](#view-your-plans). + +#### Manually add tenant IDs for a private plan + +1. Under **Plan visibility**, select **Private**. +1. In the **Tenant ID** box that appears, enter the Azure AD tenant ID of the audience you want to grant access to this private plan. A minimum of one tenant ID is required. +1. (Optional) Enter a description of this audience in the **Description** box. +1. To add another tenant ID, select **Add ID**, and then repeat steps 2 and 3. +1. When you're done adding tenant IDs, select **Save draft**, and then go to [View your plans](#view-your-plans). + +#### Use a .CSV file for a private plan + +1. Under **Plan visibility**, select **Private**. +1. Select the **Export Audience (csv)** link. +1. Open the .CSV file and add the Azure IDs you want to grant access to the private offer to the **ID** column. +1. (Optional) Enter a description for each audience in the **Description** column. +1. Add "TenantID" in the **Type** column, for each row with an Azure ID. +1. Save the .CSV file. +1. On the **Pricing and availability** tab, under **Plan visibility**, select the **Import Audience (csv)** link. +1. In the dialog box that appears, select **Yes**. +1. Select the .CSV file and then select **Open**. +1. Select **Save draft**, and then the next section: View your plans. + +### View your plans + +1. In the breadcrumb at the top of the page, select **Plan overview**. +1. To create another plan for this offer, at the top of the **Plan overview** page, repeat the steps in the [Create a plan](#create-a-plan) section. Otherwise, if you're done creating plans, go to the next section: Copy the Service IDs. ## Copy the Service IDs You need to copy the Service ID of each plan you created so you can map them to your solution package in the next section: Add Service IDs to your solution package. -- For each plan you created, copy the Service ID to a safe place. You’ll add them to your solution package in the next step. The service ID is listed on the **Plan overview** page in the form of `ISV name.offer name.plan ID`. For example, Fabrikam.F365.bronze. +1. To go to the **Plan overview** page, in the breadcrumb at the top of the page, select **Plan overview**. If you don’t see the breadcrumb, select **Plan overview** in the left-nav. + +1. For each plan you created, copy the Service ID to a safe place. You’ll add them to your solution package in the next section. The service ID is listed on the **Plan overview** page in the form of `ISV name.offer name.plan ID`. For example, fabrikam.f365.bronze. [ ![Screenshot of the Plan overview page. The service ID for the plan is highlighted.](./media/third-party-license/service-id-workspaces.png) ](./media/third-party-license/service-id-workspaces.png#lightbox) ## Add Service IDs to your solution package -1. Add the Service IDs you copied in the previous step to your solution package. To learn how, see [Add licensing information to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution) and [Create an AppSource package for your app](/powerapps/developer/data-platform/create-package-app-appsource). -1. After you create the CRM package .zip file, upload it to Azure Blob Storage. You will need to provide the SAS URL of the Azure Blob Storage account that contains the uploaded CRM package .zip file. +1. Add the Service IDs you copied in the previous step to the metadata of your solution package. To learn how, see [Add licensing information to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution) and [Create an AppSource package for your app](/powerapps/developer/data-platform/create-package-app-appsource). +1. After you create the CRM package .zip file, upload it to [Azure Blob Storage](/power-apps/developer/data-platform/store-appsource-package-azure-storage). You will need to provide the SAS URL of the Azure Blob Storage account that contains the uploaded CRM package .zip file, when configuring the technical configuration. ## Next steps diff --git a/articles/marketplace/dynamics-365-review-publish.md b/articles/marketplace/dynamics-365-review-publish.md index 69000354b1d9f..ef5cc0e26062f 100644 --- a/articles/marketplace/dynamics-365-review-publish.md +++ b/articles/marketplace/dynamics-365-review-publish.md @@ -7,53 +7,81 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 09/27/2021 +ms.date: 05/25/2022 --- # Review and publish a Dynamics 365 offer -This article shows you how to use Partner Center to preview your draft Dynamics 365 offer and then publish it to the commercial marketplace. It also covers how to check publishing status as it proceeds through the publishing steps. +This article shows you how to use Partner Center to submit your Dynamics 365 offer for publishing, preview your offer, subscribe to a plan, and then publish it live to the commercial marketplace. It also covers how to check the publishing status as it proceeds through the publishing steps. You must have already created the offer that you want to publish. -## Offer status +## Submit your offer to publishing -You can review your offer status on the **Overview** tab of the commercial marketplace dashboard in [Partner Center](https://partner.microsoft.com/dashboard/commercial-marketplace/overview). The **Status** of each offer will be one of the following: +1. Return to [Partner Center](https://go.microsoft.com/fwlink/?linkid=2166002). +1. On the Home page, select the **Marketplace offers** tile. +1. In the **Offer alias** column, select the offer you want to publish. +1. In the upper-right corner of the portal, select **Review and publish**. +1. Make sure that the **Status column** for each page for the offer says **Complete**. The three possible statuses are as follows: + + - **Not started** – The page is incomplete. + - **Incomplete** – The page is missing required information or has errors that need to be fixed. You'll need to go back to the page and update it. + - **Complete** – The page is complete. All required data has been provided and there are no errors. + +1. If any of the pages have a status other than **Complete**, select the page name, correct the issue, save the page, and then select **Review and publish** again to return to this page. +1. Some offer types require testing. After all of the pages are complete, if you see a **Notes for certification** box, provide testing instructions to the certification team to ensure that your app is tested correctly. Provide any supplementary notes helpful for understanding your app. +1. To start the publishing process for your offer, select **Publish**. The **Offer overview** page appears and shows the offer's **Publish status**. + +## Publish status + +Your offer's publish status will change as it moves through the publication process. You can review your offer status on the **Overview** tab of the commercial marketplace offer in [Partner Center](https://partner.microsoft.com/dashboard/commercial-marketplace/overview). The **Status** of each offer will be one of the following: | Status | Description | -| ------------ | ------------- | +| ------------ | ------------ | | Draft | Offer has been created but it isn't being published. | | Publish in progress | Offer is working its way through the publishing process. | | Attention needed | We discovered a critical issue during certification or during another publishing phase. | | Preview | We certified the offer, which now awaits a final verification by the publisher. Select **Go live** to publish the offer live. | | Live | Offer is live in the marketplace and can be seen and acquired by customers. | -| Pending stop sell | Publisher selected "stop sell" on an offer or plan, but the action has not yet been completed. | +| Pending stop distribution | Publisher selected "stop distribution" on an offer or plan, but the action has not yet been completed. | | Not available in the marketplace | A previously published offer in the marketplace has been removed. | -## Validation and publishing steps +## Preview and subscribe to the offer -Your offer's publish status will change as it moves through the publication process. For detailed information on this process, see [Validation and publishing steps](review-publish-offer.md#validation-and-publishing-steps). +When the offer is ready for you to test in the preview environment, we’ll send you an email to request that you review and approve your offer preview. You can also refresh the **Offer overview** page in your browser to see if your offer has reached the Publisher sign-off phase. If it has, the **Go live** button and preview link will be available. If you chose to sell your offer through Microsoft, anyone who has been added to the preview audience can test the acquisition and deployment of your offer to ensure it meets your requirements during this stage. -When you are ready to submit an offer for publishing, select **Review and publish** at the upper-right corner of the portal. You'll see the status of each page for your offer listed as one of the following: +The following screenshot shows the **Offer overview** page for a _Dynamics 365 apps on Dataverse and Power apps_ offer, with a preview link under the **Go live** button. The validation steps you’ll see on this page vary depending on the selections you made when you created the offer. -- **Not started** – The page is incomplete. -- **Incomplete** – The page is missing required information or has errors that need to be fixed. You'll need to go back to the page and update it. -- **Complete** – The page is complete. All required data has been provided and there are no errors. +- To preview your offer, select the _preview link_ under the **Go live** button. This takes you to the product details page on AppSource, where you can validate that all the details of the offer are showing correctly. -If any of the pages have a status other than **Complete**, you need to correct the issue on that page and then return to the **Review and publish** page to confirm the status now shows as **Complete**. Some offer types require testing. If so, you will see a **Notes for certification** field where you need to provide testing instructions to the certification team and any supplementary notes helpful for understanding your app. + [ ![Illustrates the preview link on the Offer overview page.](./media/dynamics-365/preview-link.png) ](./media/dynamics-365/preview-link.png#lightbox) -After all pages are complete and you have entered applicable testing notes, select **Publish** to submit your offer. We will email you when a preview version of your offer is available to approve. At that time complete the following steps: +> [!IMPORTANT] +> To validate the end-to-end purchase and setup flow, purchase your offer while it is in Preview. First notify Microsoft with a support ticket to ensure we are aware that you're testing the offer. Otherwise, the customer account used for the purchase will be billed and invoiced. Publisher Payout will occur when the criteria are met and will be paid out per the payout schedule with the agency fee deducted from the purchase price. -1. Return to [Partner Center](https://go.microsoft.com/fwlink/?linkid=2166002). -1. On the Home page, select the **Marketplace offers** tile. +If your offer is a _Contact Me_ listing, test that a lead is created as expected by providing the Contact Me details during preview. + +## Test the offer in AppSource + +1. From the _Product details_ page of the offer, select the **Buy Now** button. +1. Select the plan you want to purchase and then select **Next**. +1. Select the billing term, recurring billing term, and number of users. +1. On the Payment page, enter the sold-to address and payment method. +1. To place the order, select the **Place order** button. +1. Once the order is placed, you can select the **Assign licenses** button to go to the [Microsoft 365 admin center](https://admin.microsoft.com/) to assign licenses to users. + +## Go live - [ ![Illustrates the Marketplace offers tile on the Partner Center Home page.](./media/workspaces/partner-center-home.png) ](./media/workspaces/partner-center-home.png#lightbox) +After you complete your tests, you can publish the offer live to the commercial marketplace. +1. Return to [Partner Center](https://go.microsoft.com/fwlink/?linkid=2166002). +1. On the Home page, select the **Marketplace offers** tile. 1. On the Marketplace offers page, select the offer. -1. Select **Review and publish**. 1. Select **Go live** to make your offer publicly available. -After you select **Review and publish**, we will perform certification and other verification processes before your offer is published to AppSource. We will notify you when your offer is available in preview so you can go live. If there is an issue, we will notify you with the details and provide guidance on how to fix it. +All offers for Dynamics 365 go through our certification process. Now that you’ve chosen to make your offer available in the commercial marketplace, we will perform certification and other verification processes before your offer is published to AppSource. If there is an issue, we will notify you with the details and provide guidance on how to fix it. + +After these validation checks are complete, your offer will be live in the marketplace. ## Next steps -- If you enabled _Third-party app license management through Microsoft_ for your offer, after you sell your offer, you’ll need to register the deal in Partner Center. To learn more, see [Managing licensing in marketplace offers](/partner-center/csp-commercial-marketplace-licensing). +- If you enabled _Third-party app license management through Microsoft_ for your offer, after you sell your offer, you’ll need to register the deal in Partner Center. To learn more, see [Register deals you've won in Partner Center](/partner-center/register-deals). - [Update an existing offer in the Commercial Marketplace](update-existing-offer.md) diff --git a/articles/marketplace/isv-app-license.md b/articles/marketplace/isv-app-license.md index b4c71136e2aa8..44db5d41029b9 100644 --- a/articles/marketplace/isv-app-license.md +++ b/articles/marketplace/isv-app-license.md @@ -7,7 +7,7 @@ ms.topic: conceptual author: mingshen-ms ms.author: mingshen ms.reviewer: dannyevers -ms.date: 12/03/2021 +ms.date: 05/25/2022 --- # ISV app license management @@ -16,15 +16,15 @@ Applies to the following offer type: - Dynamics 365 apps on Dataverse and Power Apps -_ISV app license management_ enables independent software vendors (ISVs) who build solutions using Dynamics 365 suite of products to manage and enforce licenses for their solutions using systems provided by Microsoft. By adopting this approach you can: +_ISV app license management_ enables independent software vendors (ISVs) who build solutions using Dynamics 365 suite of products to manage and enforce licenses for their solutions using systems provided by Microsoft. By adopting license management, ISVs can: -- Enable your customers to assign and unassign your solution’s licenses using familiar tools such as Microsoft 365 Admin Center, which they use to manage Office and Dynamics licenses. -- Have the Power Platform enforce your licenses at runtime to ensure that only licensed users can access your solution. +- Enable your customers to assign and unassign licenses of ISV products using familiar tools such as Microsoft 365 Admin Center, which customers use to manage Office and Dynamics licenses. +- Have the Power Platform enforce ISV product licenses at runtime to ensure that only licensed users can access your solution. - Save yourself the effort of building and maintaining your own license management and enforcement system. - -> [!NOTE] -> ISV app license management is only available to ISVs participating in the ISV Connect program. Microsoft is not involved in the sale of licenses. +ISV app license management currently supports: +- A named user license model. Each license must be assigned to an Azure AD user or Azure AD security group. +- [Enforcement for model-driven apps](/power-apps/maker/model-driven-apps/model-driven-app-overview). ## Prerequisites @@ -37,17 +37,49 @@ To manage your ISV app licenses, you need to comply with the following pre-requi ## High-level process -This table illustrates the high-level process to manage ISV app licenses: +The process varies depending on whether Microsoft hosts transactions on your behalf (also known as a _transactable offer_) or you only list the offer through the marketplace and host transactions independently. + +These steps illustrate the high-level process to manage ISV app licenses: + +### Step 1: Create an offer + +| Transactable offers | Licensable-only offers | +| ------------ | ------------- | +| The ISV [creates an offer in Partner Center](dynamics-365-customer-engage-offer-setup.md) and chooses to transact through Microsoft’s commerce system and enable Microsoft to manage the licenses of these add-ons. The ISV also defines at least one plan and configures pricing information and availability. The ISV can optionally define a private plan which only specific customers can see and purchase on [Microsoft AppSource](https://appsource.microsoft.com/). | The ISV [creates an offer in Partner Center](dynamics-365-customer-engage-offer-setup.md) and chooses to manage licenses for this offer through Microsoft. This includes defining one or more licensing plans for the offer. | + +### Step 2: Add license metadata to solution package + +The ISV creates a solution package for the offer that includes license plan information as metadata and uploads it to Partner Center for publication to Microsoft AppSource. To learn more, see [Adding license metadata to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution). + +### Step 3: Purchase subscription to ISV products + +| Transactable offers | Licensable-only offers | +| ------------ | ------------- | +| Customers discover the ISV’s offer in AppSource, purchase a subscription to the offer from AppSource, and get licenses for the ISV app. | - Customers discover the ISV’s offer in AppSource or directly on the ISV’s website. Customers purchase licenses for the plans they want directly from the ISV.
                  - The ISV registers the purchase with Microsoft in Partner Center. As part of [deal registration](/partner-center/csp-commercial-marketplace-licensing#register-isv-connect-deal-in-deal-registration), the ISV will specify the type and quantity of each licensing plan purchased by the customer. | + +### Step 4: Manage subscription -| Step | Details | +| Transactable offers | Licensable-only offers | | ------------ | ------------- | -| Step 1: Create offer | The ISV creates an offer in Partner Center and chooses to manage licenses for this offer through Microsoft. This includes defining one or more licensing plans for the offer. For more information, see [Create a Dynamics 365 apps on Dataverse and Power Apps offer on Microsoft AppSource](dynamics-365-customer-engage-offer-setup.md). | -| Step 2: Update package | The ISV creates a solution package for the offer that includes license plan information as metadata, and uploads it to Partner Center for publication to Microsoft AppSource. To learn more, see [Adding license metadata to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution). | -| Step 3: Purchase licenses | Customers discover the ISV’s offer in AppSource or directly on the ISV’s website. Customers purchase licenses for the plans they want directly from the ISV (these offers are not purchasable through AppSource at this time). | -| Step 4: Register deal | The ISV registers the purchase with Microsoft in Partner Center. As part of [deal registration](/partner-center/csp-commercial-marketplace-licensing#register-isv-connect-deal-in-deal-registration), the ISV will specify the type and quantity of each licensing plan purchased by the customer. | -| Step 5: Manage licenses | The license plans will appear in Microsoft 365 Admin Center for the customer to [assign to users or groups](/microsoft-365/commerce/licenses/manage-third-party-app-licenses) in their organization. The customer can also install the application in their tenant via the Power Platform Admin Center. | -| Step 6: Perform license check | When a user within the customer’s organization tries to run an application, Microsoft checks to ensure that user has a license before permitting them to run it. If they don’t have a license, the user sees a message explaining that they need to contact an administrator for a license. | -| Step 7: View reports | ISVs can view information on provisioned and assigned licenses over a period of time and by geography. | +| Customers can manage subscriptions for the Apps they purchased in [Microsoft 365 admin center](https://admin.microsoft.com/), just like they normally do for any of their Microsoft Office or Dynamics subscriptions. | ISVs activate and manage deals in Partner Center ([deal registration portal(https://partner.microsoft.com/)]) | + +### Step 5: Assign licenses + +Customers can assign licenses of these add-ons in license pages under the billing node in [Microsoft 365 admin center](https://admin.microsoft.com/). Customers can assign licenses to users or groups. Doing so will enable these users to launch the ISV app. Customers can also install the app from [Microsoft 365 admin center](https://admin.microsoft.com/) into their Power Platform environment. + +**Licensable-only offers:** +- The license plans will appear in Microsoft 365 Admin Center for the customer to [assign to users or groups](/microsoft-365/commerce/licenses/manage-third-party-app-licenses) in their organization. The customer can also install the application in their tenant via the Power Platform Admin Center. + +### Step 6: Power Platform performs license checks + +When a user within the customer’s organization tries to run an application, Microsoft checks to ensure that the user has a license before permitting them to run it. If they do not have a license, the user sees a message explaining that they need to contact an administrator for a license. + +### Step 7: View reports + +ISVs can view information on: +- Orders purchased, renewed, or cancelled over time and by geography. + +- Provisioned and assigned licenses over a period of time and by geography. ## Enabling app license management through Microsoft @@ -60,9 +92,12 @@ Here’s how it works: - After you select the **Enable app license management through Microsoft** box, you can define licensing plans for your offer. - Customers will see a **Get it now** button on the offer listing page in AppSource. Customers can select this button to contact you to purchase licenses for the app. +> [!NOTE] +> This check box is automatically enabled if you choose to sell your offer through Microsoft and have Microsoft host transactions on your behalf. + ### Allow customers to install my app even if licenses are not assigned check box -After you select the first box, the **Allow customers to install my app even if licenses are not assigned** box appears. This option is useful if you are employing a “freemium” licensing strategy whereby you want to offer some basic features of your solution for free to all users and charge for premium features. Conversely, if you want to ensure that only tenants who currently own licenses for your product can download it from AppSource, then don’t select this option. +If you choose to list your offer through the marketplace and process transactions independently, after you select the first box, the **Allow customers to install my app even if licenses are not assigned** box appears. This option is useful if you are employing a “freemium” licensing strategy whereby you want to offer some basic features of your solution for free to all users and charge for premium features. Conversely, if you want to ensure that only tenants who currently own licenses for your product can download it from AppSource, then don’t select this option. > [!NOTE] > If you choose this option, you need to configure your solution package to not require a license. @@ -80,9 +115,7 @@ After your offer is published, the options you chose will drive which buttons ap :::image type="content" source="./media/third-party-license/f365.png" alt-text="Screenshot of an offer listing page on AppSource. The Get it now and Contact me buttons are shown."::: -***Figure 1: Offer listing page on Microsoft AppSource*** - ## Next steps - [Plan a Dynamics 365 offer](marketplace-dynamics-365.md) -- [How to create a Dynamics 365 apps on Dataverse and Power Apps offer](dynamics-365-customer-engage-offer-setup.md) +- [Create a Dynamics 365 apps on Dataverse and Power Apps offer](dynamics-365-customer-engage-offer-setup.md) diff --git a/articles/marketplace/marketplace-dynamics-365.md b/articles/marketplace/marketplace-dynamics-365.md index 8b47576d55b8a..24920dfa4d838 100644 --- a/articles/marketplace/marketplace-dynamics-365.md +++ b/articles/marketplace/marketplace-dynamics-365.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual author: vamahtan ms.author: vamahtan -ms.date: 04/13/2022 +ms.date: 06/06/2022 --- # Plan a Microsoft Dynamics 365 offer @@ -15,11 +15,11 @@ This article explains the different options and features of a Dynamics 365 offer Before you start, create a commercial marketplace account in [Partner Center](./create-account.md) and ensure it is enrolled in the commercial marketplace program. Also, review the [publishing process and guidelines](/office/dev/store/submit-to-appsource-via-partner-center). -## Licensing options +## Listing options -As you prepare to publish a new offer, you need to decide which licensing option to choose. This will determine what additional information you'll need to provide later as you create the offer in Partner Center. +As you prepare to publish a new offer, you need to decide which listing option to choose. This will determine what additional information you'll need to provide later as you create the offer in Partner Center. -These are the available licensing options for Dynamics 365 offer types: +These are the available listing options for the _Dynamics 365 apps on Dataverse and Power Apps_ offer type: | Offer type | Listing option | | --- | --- | @@ -31,12 +31,13 @@ These are the available licensing options for Dynamics 365 offer types: The following table describes the transaction process of each listing option. -| Licensing option | Transaction process | +| Listing option | Transaction process | | --- | --- | +| Transact with license management | You can choose to sell through Microsoft and have Microsoft host transactions on your behalf. For more information about this option, see [ISV app license management](isv-app-license.md).
                  Currently available to the following offer type only:
                  • Dynamics 365 apps on Dataverse and Power Apps
                  | +| License management | Enables you to manage your ISV app licenses in Partner Center. For more information about this option, see [ISV app license management](isv-app-license.md).
                  Currently available to the following offer type only:
                  • Dynamics 365 apps on Dataverse and Power Apps
                  | | Contact me | Collect customer contact information by connecting your Customer Relationship Management (CRM) system. The customer will be asked for permission to share their information. These customer details, along with the offer name, ID, and marketplace source where they found your offer, will be sent to the CRM system that you've configured. For more information about configuring your CRM, see the **Customer leads** section of your offer type's **Offer setup** page. | | Free trial (listing) | Offer your customers a one-, three- or six-month free trial. Offer listing free trials are created, managed, and configured by your service and do not have subscriptions managed by Microsoft. | | Get it now (free) | List your offer to customers for free. | -| Get it now | Enables you to manage your ISV app licenses in Partner Center.
                  Currently available to the following offer type only:
                  • Dynamics 365 apps on Dataverse and Power Apps

                  For more information about this option, see [ISV app license management](isv-app-license.md). | ## Test drive @@ -44,7 +45,7 @@ The following table describes the transaction process of each listing option. ## Customer leads -When you're publishing an offer to the commercial marketplace with Partner Center, you'll want to connect it to your Customer Relationship Management (CRM) system. This lets you receive customer contact information as soon as someone expresses interest in or uses your product. Connecting to a CRM is required if you want to enable a test drive; otherwise, connecting to a CRM is optional. Partner Center supports Azure table, Dynamics 365 Customer Engagement, HTTPS endpoint, Marketo, and Salesforce. +When you're publishing an offer to the commercial marketplace with Partner Center, you'll want to connect it to your Customer Relationship Management (CRM) system. This lets you receive customer contact information as soon as someone expresses interest in or uses your product. Partner Center supports Azure table, Dynamics 365 Customer Engagement, HTTPS endpoint, Marketo, and Salesforce. ## Legal diff --git a/articles/marketplace/media/api-call-pattern.svg b/articles/marketplace/media/api-call-pattern.svg new file mode 100644 index 0000000000000..821c42d9bc035 --- /dev/null +++ b/articles/marketplace/media/api-call-pattern.svg @@ -0,0 +1 @@ +1. Make requestA new job is created (jobId)2. Poll job status (poll once per minute until complete)Use jobId from step 13. Get information from completed jobsReceive resourceURI, which provides information about the relevant private offer \ No newline at end of file diff --git a/articles/marketplace/media/dynamics-365/preview-link.png b/articles/marketplace/media/dynamics-365/preview-link.png new file mode 100644 index 0000000000000..f3456f839a92c Binary files /dev/null and b/articles/marketplace/media/dynamics-365/preview-link.png differ diff --git a/articles/marketplace/media/parsing-error-messages.png b/articles/marketplace/media/parsing-error-messages.png new file mode 100644 index 0000000000000..ec1579da779b6 Binary files /dev/null and b/articles/marketplace/media/parsing-error-messages.png differ diff --git a/articles/marketplace/media/third-party-license/service-id-workspaces.png b/articles/marketplace/media/third-party-license/service-id-workspaces.png index 7701457f4165b..f31ceeeceed8c 100644 Binary files a/articles/marketplace/media/third-party-license/service-id-workspaces.png and b/articles/marketplace/media/third-party-license/service-id-workspaces.png differ diff --git a/articles/marketplace/orders-dashboard.md b/articles/marketplace/orders-dashboard.md index 5b9e00df3b7ea..84d2f92f16853 100644 --- a/articles/marketplace/orders-dashboard.md +++ b/articles/marketplace/orders-dashboard.md @@ -7,7 +7,7 @@ ms.topic: article author: smannepalle ms.author: smannepalle ms.reviewer: sroy -ms.date: 04/28/2022 +ms.date: 06/06/2022 --- # Orders dashboard in commercial marketplace analytics @@ -252,7 +252,7 @@ This table displays a numbered list of the 500 top orders sorted by date of acqu | Marketplace Subscription ID | Marketplace Subscription ID | The unique identifier associated with the Azure subscription the customer used to purchase your commercial marketplace offer. For infrastructure offers, this is the customer's Azure subscription GUID. For SaaS offers, this is shown as zeros since SaaS purchases do not require an Azure subscription. | Marketplace Subscription ID | | MonthStartDate | Month Start Date | Month Start Date represents month of Purchase. The format is yyyy-mm-dd. | MonthStartDate | | Offer Type | Offer Type | The type of commercial marketplace offering. | OfferType | -| Azure License Type | Azure License Type | The type of licensing agreement used by customers to purchase Azure. Also known as Channel. The possible values are:
                  • [Cloud Solution Provider](cloud-solution-providers.md)
                  • Enterprise
                  • Enterprise through Reseller
                  • Pay as You Go
                  • GTM
                  | AzureLicenseType | +| Azure License Type | Azure License Type | The type of licensing agreement used by customers to purchase Azure. Also known as Channel. The possible values are:
                  • Cloud Solution Provider
                  • Enterprise
                  • Enterprise through Reseller
                  • Pay as You Go
                  • GTM
                  | AzureLicenseType | | Marketplace License Type | Marketplace License Type | The billing method of the commercial marketplace offer. The possible values are:
                  • Billed through Azure
                  • Bring Your Own License
                  • Free
                  • Microsoft as Reseller
                  | MarketplaceLicenseType | | SKU | SKU | The plan associated with the offer | SKU | | Customer Country | Customer Country/Region | The country/region name provided by the customer. Country/region could be different than the country/region in a customer's Azure subscription. | CustomerCountry | @@ -266,9 +266,11 @@ This table displays a numbered list of the 500 top orders sorted by date of acqu | Customer Company Name | Customer Company Name | The company name provided by the customer. Name could be different than the city in a customer's Azure subscription. | CustomerCompanyName | | Order Purchase Date | Order Purchase Date | The date the commercial marketplace order was created. The format is yyyy-mm-dd. | OrderPurchaseDate | | Offer Name | Offer Name | The name of the commercial marketplace offering. | OfferName | -| Is Private Offer | Is Private Offer | Indicates whether a marketplace offer is private or a public offer
                  • 0 value indicates false
                  • 1 value indicates true | Is Private Offer | -| Term Start Date | TermStartDate | Indicates the start date of a term for an order. | TermStartDate | -| Term End Date | TermEndDate | Indicates the end date of a term for an order. | TermEndDate | +| Is Private Offer | Is Private Offer | Indicates whether a marketplace offer is private or a public offer
                    • 0 value indicates false
                    • 1 value indicates true
                    **Note:** [Private plans are different from Private offers](isv-customer-faq.yml). | Is Private Offer | +| Not available | BillingTerm | Indicates the term duration of the offer purchased by the customer | BillingTerm | +| Not available | BillingPlan | Indicates the billing frequency of the offer purchased by the customer | BillingPlan | +| Term Start Date | TermStartDate | Indicates the start date of a term for an order | TermStartDate | +| Term End Date | TermEndDate | Indicates the end date of a term for an order | TermEndDate | | Not available | purchaseRecordId | The identifier of the purchase record for an order purchase | purchaseRecordId | | Not available | purchaseRecordLineItemId | The identifier of the purchase record line item related to this order. | purchaseRecordLineItemId | | Billed Revenue USD | EstimatedCharges | The price the customer will be charged for all order units before taxation. This is calculated in customer transaction currency. In tax-inclusive countries, this price includes the tax, otherwise it does not. | EstimatedCharges | @@ -280,7 +282,10 @@ This table displays a numbered list of the 500 top orders sorted by date of acqu | Trial End Date | Trial End Date | The date the trial period for this order will end or has ended. | TrialEndDate | | Customer ID | Customer ID | The unique identifier assigned to a customer. A customer may have zero or more Azure Marketplace subscriptions. | CustomerID | | Billing Account ID | Billing Account ID | The identifier of the account on which billing is generated. Map **Billing Account ID** to **customerID** to connect your Payout Transaction Report with the Customer, Order, and Usage Reports. | BillingAccountId | +| Reference Id | ReferenceId | A key to link orders having usage details in usage report. Map this field value with the value for UsageReference key in usage report. This is applicable for SaaS with custom meters and VM software reservation offer types | ReferenceId | | PlanId | PlanId | The display name of the plan entered when the offer was created in Partner Center. Note that PlanId was originally a numeric number. | PlanId | +| Auto Renew | Auto Renew | Indicates whether a subscription is due for an automatic renewal. Possible values are:
                    • TRUE: Indicates that on the TermEnd the subscription will renew automatically.
                    • FALSE: Indicates that on the TermEnd the subscription will expire.
                    • NULL: The product does not support renewals. Indicates that on the TermEnd the subscription will expire. This is displayed "-" on the UI
                    | AutoRenew | +| Not available | Event Timestamp | Indicates the timestamp of an order management event, such as an order purchase, cancelation, renewal, and so on | EventTimestamp | ### Orders page filters diff --git a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-apis.md b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-apis.md index ad90336a78cb6..00391e43798a0 100644 --- a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-apis.md +++ b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-apis.md @@ -62,3 +62,7 @@ For more information about CSP, refer to https://partner.microsoft.com/licensing ## Next steps - If you have not already done so, register your SaaS application in the [Azure portal](https://portal.azure.com) as explained in [Register an Azure AD Application](./pc-saas-registration.md). Afterwards, use the most current version of this interface for development: [SaaS fulfillment Subscription APIs v2](pc-saas-fulfillment-subscription-api.md) and [SaaS fulfillment Operations APIs v2](pc-saas-fulfillment-operations-api.md). + +**Video tutorials** + +- [The SaaS Client Library for .NET](https://go.microsoft.com/fwlink/?linkid=2196324) diff --git a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-life-cycle.md b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-life-cycle.md index f3ec1606330a5..4d56d49303ec7 100644 --- a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-life-cycle.md +++ b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-life-cycle.md @@ -151,3 +151,8 @@ A SaaS subscription can be canceled at any point in its life cycle. After a subs - [SaaS fulfillment Subscription APIs v2](pc-saas-fulfillment-subscription-api.md) - [SaaS fulfillment operations APIs v2](pc-saas-fulfillment-operations-api.md) + +**Video tutorials** + +- [Building a Simple SaaS Publisher Portal in .NET](https://go.microsoft.com/fwlink/?linkid=2196257) +- [Using the SaaS Offer REST Fulfillment API](https://go.microsoft.com/fwlink/?linkid=2196320) diff --git a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-subscription-api.md b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-subscription-api.md index 71ceff09ab1d8..98bc8dbfb8093 100644 --- a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-subscription-api.md +++ b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-subscription-api.md @@ -4,7 +4,7 @@ description: Learn how to use the Subscription APIs, which are part of the the ms.service: marketplace ms.subservice: partnercenter-marketplace-publisher ms.topic: reference -ms.date: 03/07/2022 +ms.date: 06/03/2022 author: arifgani ms.author: argani --- @@ -65,13 +65,13 @@ Response body example: "emailId": "test@test.com", "objectId": "", "tenantId": "", - "pid": "" + "puid": "" }, "purchaser": { "emailId": "test@test.com", "objectId": "", "tenantId": "", - "pid": "" + "puid": "" }, "planId": "silver", "term": { @@ -238,13 +238,13 @@ Returns the list of all existing subscriptions for all offers made by this publi "emailId": " test@contoso.com", "objectId": "", "tenantId": "", - "pid": "" + "puid": "" }, "purchaser": { "emailId": "purchase@csp.com ", "objectId": "", "tenantId": "", - "pid": "" + "puid": "" }, "term": { "startDate": "2019-05-31", diff --git a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-webhook.md b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-webhook.md index f53eef85cb63f..244ec98d8086e 100644 --- a/articles/marketplace/partner-center-portal/pc-saas-fulfillment-webhook.md +++ b/articles/marketplace/partner-center-portal/pc-saas-fulfillment-webhook.md @@ -276,3 +276,9 @@ See [Support for the commercial marketplace program in Partner Center](../suppor See the [commercial marketplace metering service APIs](../marketplace-metering-service-apis.md) for more options for SaaS offers in the commercial marketplace. Review and use the [clients for different programming languages and samples](https://github.com/microsoft/commercial-marketplace-samples). + +**Video tutorials** + +- [SaaS Webhook Overview](https://go.microsoft.com/fwlink/?linkid=2196258) +- [Implementing a Simple SaaS Webhook in .NET](https://go.microsoft.com/fwlink/?linkid=2196159) +- [Azure AD Application Registrations](https://go.microsoft.com/fwlink/?linkid=2196262) diff --git a/articles/marketplace/partner-center-portal/pc-saas-registration.md b/articles/marketplace/partner-center-portal/pc-saas-registration.md index 0fee1c14ac80f..d1229ca8e45bd 100644 --- a/articles/marketplace/partner-center-portal/pc-saas-registration.md +++ b/articles/marketplace/partner-center-portal/pc-saas-registration.md @@ -121,3 +121,7 @@ Sample response: ## Next steps Your Azure AD-secured app can now use the [SaaS Fulfillment Subscription APIs Version 2](pc-saas-fulfillment-subscription-api.md) and [SaaS Fulfillment Operations APIs Version 2](pc-saas-fulfillment-operations-api.md). + +**Video tutorials** + +- [Azure AD Application Registrations](https://go.microsoft.com/fwlink/?linkid=2196262) diff --git a/articles/marketplace/partner-center-portal/saas-metered-billing.md b/articles/marketplace/partner-center-portal/saas-metered-billing.md index c8383dff79371..6da72ae090cf3 100644 --- a/articles/marketplace/partner-center-portal/saas-metered-billing.md +++ b/articles/marketplace/partner-center-portal/saas-metered-billing.md @@ -161,3 +161,8 @@ To understand publisher support options and open a support ticket with Microsoft ## Next steps - [Marketplace metered billing APIs](../marketplace-metering-service-apis.md) + +**Video tutorials** + +- [SaaS Metered Billing Overview](https://go.microsoft.com/fwlink/?linkid=2196314) +- [The SaaS Metered Billing API with REST](https://go.microsoft.com/fwlink/?linkid=2196418) diff --git a/articles/marketplace/plan-azure-app-managed-app.md b/articles/marketplace/plan-azure-app-managed-app.md index 7c4193286c099..818c2570f8070 100644 --- a/articles/marketplace/plan-azure-app-managed-app.md +++ b/articles/marketplace/plan-azure-app-managed-app.md @@ -7,7 +7,7 @@ ms.reviewer: dannyevers ms.service: marketplace ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual -ms.date: 11/02/2021 +ms.date: 06/03/2022 --- # Plan an Azure managed application for an Azure application offer @@ -28,7 +28,7 @@ Use an Azure Application: Managed application plan when the following conditions | An Azure subscription | Managed applications must be deployed to a customer's subscription, but they can be managed by a third party. | | Billing and metering | The resources are provided in a customer's Azure subscription. VMs that use the pay-as-you-go payment model are transacted with the customer via Microsoft and billed via the customer's Azure subscription.

                    For bring-your-own-license VMs, Microsoft bills any infrastructure costs that are incurred in the customer subscription, but you transact software licensing fees with the customer directly. | | Azure-compatible virtual hard disk (VHD) | VMs must be built on Windows or Linux. For more information, see:
                    * [Create an Azure VM technical asset](./azure-vm-certification-faq.yml#address-a-vulnerability-or-an-exploit-in-a-vm-offer) (for Windows VHDs).
                    * [Linux distributions endorsed on Azure](../virtual-machines/linux/endorsed-distros.md) (for Linux VHDs). | -| Customer usage attribution | All new Azure application offers must also include an [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md) GUID. For more information about customer usage attribution and how to enable it, see [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md). | +| Customer usage attribution | For more information about customer usage attribution and how to enable it, see [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md). | | Deployment package | You'll need a deployment package that will let customers deploy your plan. If you create multiple plans that require the same technical configuration, you can use the same package. For details, see the next section: Deployment package. | > [!NOTE] @@ -48,9 +48,9 @@ Use an Azure Application: Managed application plan when the following conditions ### Rules and known issues for AKS and containers in managed applications - AKS Node Resource Group does not inherit the Deny Assignments as a part of the Azure Managed Application. This means the customer will have full access to the AKS Node Resource Group that is created by the AKS resource when it is included in the managed application while the Managed Resource Group will have the proper Deny Assignments. - + - The publisher can include Helm charts and other scripts as part of the Azure Managed Application. However, the offer will be treated like a regular managed application deployment and there will be no automatic container-specific processing or Helm chart installation at deployment time. It is the publisher’s responsibility to execute the relevant scripts, either at deployment time, using the usual techniques such as VM custom script extension or Azure Deployment Scripts, or after deployment. - + - Same as with the regular Azure Managed Application, it is the publisher’s responsibility to ensure that the solution deploys successfully and that all components are properly configured, secured, and operational. For example, publishers can use their own container registry as the source of the images but are fully responsible for the container security and ongoing vulnerability scanning. > [!NOTE] @@ -70,8 +70,6 @@ Maximum file sizes supported are: - Up to 1 Gb in total compressed .zip archive size - Up to 1 Gb for any individual uncompressed file within the .zip archive -All new Azure application offers must also include an [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md) GUID. - ## Azure regions You can publish your plan to the Azure public region, Azure Government region, or both. Before publishing to [Azure Government](../azure-government/documentation-government-manage-marketplace-partners.md), test and validate your plan in the environment as certain endpoints may differ. To set up and test your plan, request a trial account from [Microsoft Azure Government trial](https://azure.microsoft.com/global-infrastructure/government/request/). @@ -136,7 +134,7 @@ You must indicate who can manage a managed application in each of the selected c For each principal ID, you will associate one of the Azure AD built-in roles (Owner or Contributor). The role you select describes the permissions the principal will have on the resources in the customer subscription. For more information, see [Azure built-in roles](../role-based-access-control/built-in-roles.md). For more information about role-based access control (RBAC), see [Get started with RBAC in the Azure portal](../role-based-access-control/overview.md). > [!NOTE] -> Although you may add up to 100 authorizations per Azure region, it's generally easier to create an Active Directory user group and specify its ID in the "Principal ID." This lets you add more users to the management group after the plan is deployed and reduce the need to update the plan just to add more authorizations. +> Although you may add up to 100 authorizations per Azure region, we recommend you create an Active Directory user group and specify its ID in the "Principal ID." This lets you add more users to the management group after the plan is deployed and reduces the need to update the plan just to add more authorizations. ## Policy settings diff --git a/articles/marketplace/plan-azure-app-solution-template.md b/articles/marketplace/plan-azure-app-solution-template.md index 3184ce22d15c6..752657e9c2b99 100644 --- a/articles/marketplace/plan-azure-app-solution-template.md +++ b/articles/marketplace/plan-azure-app-solution-template.md @@ -7,7 +7,7 @@ ms.reviewer: dannyevers ms.service: marketplace ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual -ms.date: 11/11/2021 +ms.date: 05/25/2022 --- # Plan a solution template for an Azure application offer @@ -40,8 +40,6 @@ Maximum file sizes supported are: - Up to 1 Gb in total compressed .zip archive size - Up to 1 Gb for any individual uncompressed file within the .zip archive -All new Azure application offers must also include an [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md) GUID. - ## Azure regions You can publish your plan to the Azure public region, Azure Government region, or both. Before publishing to [Azure Government](../azure-government/documentation-government-manage-marketplace-partners.md), test and validate your plan in the environment as certain endpoints may differ. To set up and test your plan, request a trial account from [Microsoft Azure Government trial](https://azure.microsoft.com/global-infrastructure/government/request/). diff --git a/articles/marketplace/plan-saas-offer.md b/articles/marketplace/plan-saas-offer.md index c8e48dd8aea0b..763c703a9839a 100644 --- a/articles/marketplace/plan-saas-offer.md +++ b/articles/marketplace/plan-saas-offer.md @@ -1,13 +1,13 @@ --- title: Plan a SaaS offer for the Microsoft commercial marketplace - Azure Marketplace -description: Plan for a new software as a service (SaaS) offer for listing or selling in Microsoft AppSource, Azure Marketplace, or through the Cloud Solution Provider (CSP) program using the commercial marketplace program in Microsoft Partner Center. +description: Plan a new software as a service (SaaS) offer for selling in Microsoft AppSource, Azure Marketplace, or through the Cloud Solution Provider (CSP) program using the commercial marketplace program in Microsoft Partner Center. author: mingshen-ms ms.author: mingshen ms.reviewer: dannyevers ms.service: marketplace ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual -ms.date: 10/26/2021 +ms.date: 05/26/2022 --- # Plan a SaaS offer for the commercial marketplace @@ -105,16 +105,16 @@ When you publish a SaaS offer, it will be listed in Microsoft AppSource, Azure M If your SaaS offer is *both* an IT solution (Azure Marketplace) and a business solution (AppSource), select a category and a subcategory applicable to each online store. Offers published to both online stores should have a value proposition as an IT solution *and* a business solution. > [!IMPORTANT] -> SaaS offers with [metered billing](partner-center-portal/saas-metered-billing.md) are available through Azure Marketplace and the Azure portal. SaaS offers with only private plans are available through the Azure portal and AppSource. +> SaaS offers with [metered billing](partner-center-portal/saas-metered-billing.md) are available through Azure Marketplace and the Azure portal. SaaS offers with only private plans are only available through the Azure portal. | Metered billing | Public plan | Private plan | Available in: | |---|---|---|---| | Yes | Yes | No | Azure Marketplace and Azure portal | | Yes | Yes | Yes | Azure Marketplace and Azure portal* | | Yes | No | Yes | Azure portal only | -| No | No | Yes | Azure portal and AppSource | +| No | No | Yes | Azure portal only | -* The private plan of the offer will only be available via the Azure portal and AppSource. +* The private plan of the offer will only be available via the Azure portal. For example, an offer with metered billing and a private plan only (no public plan), will be purchased by customers in the Azure portal. Learn more about [Private offers in Microsoft commercial marketplace](private-offers.md). @@ -294,3 +294,15 @@ You can choose to opt into Microsoft-supported marketing and sales channels. Whe - [Plan a test SaaS offer](plan-saas-dev-test-offer.md) - [Offer listing best practices](gtm-offer-listing-best-practices.md) - [Create a SaaS offer](create-new-saas-offer.md) + +**Video tutorials** + +- [SaaS offer overview](https://go.microsoft.com/fwlink/?linkid=2196417) +- [SaaS Offer Technical Overview](https://go.microsoft.com/fwlink/?linkid=2196315) +- [Publishing a SaaS offer](https://go.microsoft.com/fwlink/?linkid=2196318) +- [A SaaS Accelerator Hands-on Tour - The Basics](https://go.microsoft.com/fwlink/?linkid=2196164) +- [SaaS Accelerator Architecture](https://go.microsoft.com/fwlink/?linkid=2196167) +- [Installing the SaaS Accelerator With the Install Script](https://go.microsoft.com/fwlink/?linkid=2196326) +- [Invoking Metered Billing with the SaaS Accelerator](https://go.microsoft.com/fwlink/?linkid=2196161) +- [Configuring Email in the SaaS Accelerator](https://go.microsoft.com/fwlink/?linkid=2196165) +- [Custom Landing Page Fields with the SaaS Accelerator](https://go.microsoft.com/fwlink/?linkid=2196166) diff --git a/articles/marketplace/private-offers-api.md b/articles/marketplace/private-offers-api.md new file mode 100644 index 0000000000000..2d5d36c2de76c --- /dev/null +++ b/articles/marketplace/private-offers-api.md @@ -0,0 +1,804 @@ +--- +title: Private offer APIs in the commercial marketplace +description: Private offer APIs in the commercial marketplace (Azure Marketplace). +ms.subservice: partnercenter-marketplace-publisher +ms.service: marketplace +ms.topic: article +author: rigonzales +ms.author: rigonzales +ms.date: 06/07/2022 +--- + +# Create and manage private offers via API (preview) + +> [!NOTE] +> This API is in preview. If you have any questions about the preview program, contact [privateofferspreview@microsoft.com](mailto:privateofferspreview@microsoft.com). + +Private offers allow publishers and customers to transact one or more products in Azure Marketplace by creating time-bound pricing with customized terms. The private offers API enables ISVs to programmatically create and manage private offers for customers and resellers. This API is useful if your account manages many private offers and you want to automate and optimize their management workflows. This API uses Azure Active Directory (Azure AD) to authenticate the calls from your app or service. + +## Terminology + +- **Private offer** – A custom deal between an ISV and a specific customer with customized terms and pricing for a specific product in Azure Marketplace. +- **Product** – A single unit representing an offer in Azure Marketplace. There's one product per listing page. +- **Plan** – A single version of a particular product. There can be multiple plans for a given product that represent various levels of pricing or terms. +- **Job** – A task created when making a request in this API. When using this API to manage private offers, a job is created to complete the request. Once the job is completed, you can get more information about the relevant private offer. + +## Supported scenarios + +- Create a private offer for a customer +- Create a private offer for a reseller +- Delete a private offer +- Withdraw a private offer +- Upgrade a private offer +- Query for a list of private offers +- Query for a list of products and plans + +## Scenarios not supported via API + +These scenarios are only available through Partner Center: + +- **Creating in draft state** – All private offers created through the API will be published. +- **Republishing** – Private offers withdrawn via API can't be republished via API. +- **Publishing drafts** – Private offers in draft state can't be published via API. + +## Get ready to use this API + +Before you write code to call the private offers API, ensure you've completed the following prerequisites. + +### Step 1: Complete prerequisites for using the Microsoft Product Ingestion API (one-time) + +You or your organization must have an Azure AD directory and global administrator permission. If you already use Microsoft 365 or other business services from Microsoft, you already have Azure AD directory. If not, you can create a new Azure AD in Partner Center for free. + +You must [associate an Azure AD](https://aka.ms/PCtoAzureAD) application with your Partner Center account and obtain your tenant ID, client ID, and key. You need these values to obtain the Azure AD access token you'll use in calls to the private offers API. + +### Step 2: Obtain an Azure AD access token (every time) + +Before you call any of the methods in the Microsoft Store submission API, you need an Azure AD access token to pass to the authorization header of each method in the API. You have 60 minutes to use a token before it expires. After expiration, you can refresh a token so you can continue to use it in further calls to the API. + +To obtain the access token, see [Service to Service Calls Using Client Credentials](https://aka.ms/AADAccesstoken) to send an HTTP POST to the [https://login.microsoftonline.com//oauth2/token](https://login.microsoftonline.com/%3Ctenant_id%3E/oauth2/token) endpoint. Here's a sample request: + +```json +POST https://login.microsoftonline.com//oauth2/token HTTP/1.1 +Host: login.microsoftonline.com +Content-Type: application/x-www-form-urlencoded; charset=utf-8 +grant_type=client_credentials +&client_id= +&client_secret= +&resource=https://graph.microsoft.com/ +``` + +For the tenant_id value in the POST URI and the client_id and client_secret parameters, specify the tenant ID, client ID, and key for your application that you retrieved from Partner Center in the previous section. For the resource parameter, you must specify `https://graph.microsoft.com/`. + +### Find product, plan, and private offer IDs + +To use this API, you may need to reference several different types of IDs associated with your seller account. + +| ID | Where to find them | +| --- | --- | +| client_id | See [Associate an Azure AD application with your Partner Center account](https://aka.ms/PCtoAzureAD) | +| tenant_id | See [Associate an Azure AD application with your Partner Center account](https://aka.ms/PCtoAzureAD) | +| client_secret | See [Associate an Azure AD application with your Partner Center account](https://aka.ms/PCtoAzureAD) | +| productId | See [Retrieve products](#retrieve-products) below | +| planId | See [Retrieve plans for a specific product](#retrieve-plans-for-a-specific-product) below | +| privateofferId | See [Retrieve private offers](#retrieve-private-offers) below | + +#### Retrieve products + +A private offer is based on an existing product in your Partner Center account. To see a list of products associated with your Partner Center account, use this API call: + +`GET https://graph.microsoft.com/rp/product-ingestion/product/` + +The response appears in the following sample format: + +```json +{ + "value": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/product/2022-03-01-preview2", + "id": "string", + "identity": { + "externalId": "string" + }, + "type": "enum", + "alias": "string" + } + ], + "@nextLink": "opaque_uri" +} +``` + +#### Retrieve plans for a specific product + +For products that contain more than one plan, you may want to create a private offer based on one specific plan. If so, you'll need that plan's ID. Obtain a list of the plans (such as variants or SKUs) for the product using the following API call: + +`GET https://graph.microsoft.com/rp/product-ingestion/plan/?product=` + +The response appears in the following sample format: + +```json +{ + "value": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/plan/2022-03-01-preview2", + "product": "string", + "id": "string", + "identity": { + "externalId": "string" + }, + "alias": "string" + } + ] +} +``` + +#### Retrieve private offers + +To see a list of all private offers associated with your seller account, use the following API call: + +`GET https://graph.microsoft.com/rp/product-ingestion/private-offer/query?` + +## How to use the API + +The private offers API lets you create and manage private offers associated with products and plans within your Partner Center account. Here's a summary of the typical calling pattern when using this API. + +![Illustrates a three-step flow of the typical calling pattern when using this API.](media/api-call-pattern.svg) + +### Step 1. Make request + +When you make an API call to create, delete, withdraw, or upgrade a private offer, a new job is created to complete the requested task. The API response will contain a **jobId** associated with the job. + +### Step 2. Poll for job status + +Using the **jobId** from the initial API response, poll to get the status of the job. The status of the job will either be **running** or **completed**. Once the job is completed, the result will either be **succeeded** or **failed**. To avoid performance issues, don't poll a job more than once per minute. + +| jobStatus | Description | +| --- | --- | +| NotStarted | The job hasn't yet started; this is part of the response on the initial request. | +| Running | The job is still running. | +| Completed | The job has completed. See jobResult for more details. | + +| jobResult | Description | +| --- | --- | +| Pending | The job hasn't yet completed. | +| Succeeded | The job has completed successfully. This will also return a resourceURI
                    that refers to the private offer related to the job. Use this resourceURI
                    to obtain the full details of a private offer. | +| Failed | The job has failed. This will also return any relevant errors to help determine the cause of failure. | + +For more information, see [Querying the status of an existing job](#query-the-status-of-an-existing-job) later in this article. + +### Step 3. Obtain information from completed jobs + +A successful job will return a resourceUri referencing the relevant private offer. Use this resource Uri to obtain more details about the private offer in the future, such as the privateofferId. + +A failed job will contain errors that provide detail on why the job failed and how to resolve the issue. + +For more information, see [Obtaining details of an existing private offer](#obtaining-details-of-an-existing-private-offer) later in this article. + +## Create a private offer for a customer + +Use this method to create a private offer for a customer. + +### Request + +`POST https://graph.microsoft.com/rp/product-ingestion/configure` + +#### Request Header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +Optional: clientID + +#### Request parameters + +There are no parameters for this method. + +#### Request body + +Provide the details of the private offer using the ISV to Customer private offer schema. This must include a name. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure/2022-03-01-preview2", + "resources": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/private-offer/2022-03-01-preview2", + "name": "privateOffercustomer1705", + "state": "live", + "privateOfferType": "customerPromotion", + "variableStartDate": true, + "end": "2022-01-31", + "acceptBy": "2022-02-28", + "preparedBy": "amy@xyz.com", + "termsAndConditionsDocSasUrl": "https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4rFOA", + "notificationContacts": [ "amy@xyz.com" ], + "beneficiaries": [ + { "id": "xxxxxx-2163-5eea-ae4e-d6e88627c26b:6ea018a9-da9d-4eae-8610-22b51ebe260b_2019-05-31", "description": "Top First Customer"} + ], + "pricing": [ + { "product": "product/34771906-9711-4196-9f60-4af380fd5042", "plan":"plan/123456","discountType": "percentage", "discountPercentage": 5 } + ] + } + ] +} +``` + +##### Sample request body using absolute pricing + +If you're using absolute pricing instead of percentage-based discounting, you can create a new resource above the private offer resource that defines the absolute pricing, then include that newly created resource as an additional object in the resources list of the configure schema. + +Use this method to obtain the pricing resource for your existing public plan, edit the prices, and then use the edited resource for your private offer. + +`GET https://graph.microsoft.com/rp/product-ingestion/price-and-availability-private-offer-plan/{productId}?plan={planId}` + +Sample absolute pricing resource: + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/price-and-availability-private-offer-plan/2022-03-01-preview2", + "resourceName": "newSimpleAbsolutePricing", + "product": "product/7ba807c8-386a-4efe-80f1-b97bf8a554f8", + "plan": "plan/987654", + "pricing": { + "recurrentPrice": { + "priceInputOption": "usd", + "prices": [ + { + "pricePerPaymentInUsd": 1, + "billingTerm": { + "type": "month", + "value": 1 + } + }, + { + "pricePerPaymentInUsd": 2, + "paymentOption": { + "type": "month", + "value": 1 + } + "billingTerm": { + "type": "year", + "value": 1 + } + } + ] + }, + "customMeters": { + "priceInputOption": "usd", + "meters": { + "meter1": { + "pricePerPaymentInUsd": 1 + } + } + } + } +} + +``` + +Include that resource as an object in the pricing module: + +```json +[ + { + "product": "product/34771906-9711-4196-9f60-4af380fd5042", + "plan": "plan/123456", + "discountType": "percentage", + "discountPercentage": 5 + }, + { + "product": "product/7ba807c8-386a-4efe-80f1-b97bf8a554f8", + "plan": "plan/987654", + "discountType": "absolute", + "priceDetails": { + "resourceName": "newSimpleAbsolutePricing" + } + } +] +``` + +#### Response + +The response will contain the jobId you can use later to poll the status: + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "notStarted", + "jobResult": "pending", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "0001-01-01", + "errors": [] +} +``` + +#### Error codes + +| HTTP Status Code | Description | +| --- | --- | +| 401 | Authentication Error: Ensure you're using a valid Azure AD access token. | +| 400 | Schema Validation. Ensure your request body is following the correct schema and includes all required fields. | + +## Create a private offer for a reseller + +Use this method to create a new private offer for a customer. + +### Request + +`POST https://graph.microsoft.com/rp/product-ingestion/configure` + +#### Request header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +#### Request parameters + +There are no parameters for this method. + +#### Request body + +Provide the details of the private offer using the **ISV to reseller margin private offer** schema. You must include a name. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure/2022-03-01-preview2", + "resources": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/private-offer/2022-03-01-preview2", + "privateOfferType": "cspPromotion", + "name": "privateOffercsp1034", + "state": "live", + "variableStartDate": false, + "start": "2022-01-31", + "end": "2022-02-28", + "preparedBy": "amy@xyz.com", + "notificationContacts": [ "amy@xyz.com" ], + "beneficiaries": [ + { "id": "xxxxxxx-0a32-4b44-b904-39dd964dd790", "description": "Top First CSP"} + ], + "pricing": [ + { "product": "product/34771906-9711-4196-9f60-4af380fd5042", "plan":"plan/123456","discountType": "percentage","discountPercentage": 5 } + ] + } + ] +} +``` + +#### Sample request for a reseller offer restricted to a specified beneficiary + +If you're creating a margin for a reseller that applies to a specific customer, add that information as an object in the `beneficiaryRecipients` parameter array under beneficiaries. + +The request body will look like the sample below: + +```json +[ + { + "id": "xxxxxxx-0a32-4b44-b904-39dd964dd790", + "description": "Top First CSP", + "beneficiaryRecipients": [ + { + "id": "xxxxxxx-48b4-af80-66333cd9c609", + "recipientType": "cspCustomer" + } + ] + } +], +``` + +### Response + +The response will contain the jobId you can use later to poll the status. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "notStarted", + "jobResult": "pending", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "0001-01-01", + "errors": [] +} +``` + +### Error codes + +| Error code | Description | +| --- | --- | +| 401 | Authentication Error: Ensure you're using a valid Azure AD access token. | +| 400 | Schema Validation. Ensure your request body is following the correct schema and includes all required fields. | + +## Delete an existing private offer + +Use this method to delete an existing private offer while it's still in draft state. You must use the private offer ID to specify which private offer to delete. + +### Request + +`POST https://graph.microsoft.com/rp/product-ingestion/configure` + +#### Request header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +#### Request parameters + +There are no parameters for this method. + +#### Request body + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure/2022-03-01-preview2" + "resources": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/private-offer/2022-03-01-preview2", + "id": "private-offer/456e-a345-c457-1234", + "name": "privateOffercustomer1705", + "state": "deleted" + } + ] +} +``` + +### Response + +The response will contain the jobId you can use later to poll the status. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "notStarted", + "jobResult": "pending", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "0001-01-01", + "errors": [] +} +``` + +### Error codes + +| Error code | Description | +| --- | --- | +| 401 | Authentication Error: Ensure you're using a valid Azure AD access token. | +| 400 | Schema Validation. Ensure your request body is following the correct schema and includes all required fields. | + +## Withdraw an existing private offer + +Use this method to withdraw an existing private offer. Withdrawing a private offer means your customer will no longer be able to access it. A private offer can only be withdrawn if your customer hasn't accepted it. + +You must use the private offer ID to specify which private offer you want to withdraw. + +### Request + +`POST https://graph.microsoft.com/rp/product-ingestion/configure` + +#### Request header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +#### Request parameters + +There are no parameters for this method. + +#### Request body + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure/2022-03-01-preview2" + "resources": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/private-offer/2022-03-01-preview2", + "id": "private-offer/456e-a345-c457-1234", + "name": "privateOffercustomer1705", + "state": "withdrawn" + } + ] +} +``` + +### Response + +The response will contain the jobId you can later use to poll the status. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "notStarted", + "jobResult": "pending", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "0001-01-01", + "errors": [] +} +``` + +### Error Codes + +| Error code | Description | +| --- | --- | +| 401 | Authentication Error: Ensure you're using a valid Azure AD access token. | +| 400 | Schema Validation. Ensure your request body is following the correct schema and includes all required fields. | + +## Upgrade an existing customer private offer + +Use this method to upgrade an existing customer private offer. You must provide the ID of the customer private offer you wish to use as the basis for the upgrade as well as the new name of the offer. + +### Request + +`POST https://graph.microsoft.com/rp/product-ingestion/configure` + +#### Request header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +#### Request parameters + +There are no parameters for this method. + +#### Request body + +You can use the same schemas as the two methods to create a new private offer depending on whether it is for a customer or a margin reseller. When upgrading, you must specify the existing private offer to be used as the basis for the upgrade in the `upgradedFrom` property. + +> [!NOTE] +> If you provide pricing information in the upgrade request for a given product or plan, it will override the pricing information from the original private offer for that product or plan. If you do not provide new pricing information, the pricing information from the original private offer will be carried over. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure/2022-03-01-preview2", + "resources": [ + { + "$schema": "https://product-ingestion.azureedge.net/schema/private-offer/2022-03-01-preview2", + "name": "publicApiCustAPIUpgrade1", + "state": "live", + "privateOfferType": "customerPromotion", + "upgradedFrom": { + "name": "publicApiCustAPI", + "id": "private-offer/97ac19ce-04f9-40e7-934d-af41124a079d" + }, + "variableStartDate": false, + "start":"2022-11-01", + "end": "2022-12-31", + "acceptBy": "2022-10-31", + "pricing": [ + { "product": "product/4ce67c07-614f-4a5b-8627-95b16dbdbf2b", "discountType": "percentage", "discountPercentage": 20 }, + { "product": "product/92931a1c-f8ac-4bb8-a66f-4abcb9145852", "discountType": "percentage", "discountPercentage": 20 } + ] + } + ] + } +``` + +### Response + +The response will contain the jobId you can use later to poll the status. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "notStarted", + "jobResult": "pending", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "0001-01-01", + "errors": [] +} +``` + +### Error codes + +| Error code | Description | +| --- | --- | +| 401 | Authentication Error: Ensure you're using a valid Azure AD access token. | +| 400 | Schema Validation. Ensure your request body is following the correct schema and includes all required fields. | + +## Query the status of an existing job + +Use this method to query the status of an existing job. You can poll the status of an existing job with a polling interval with a maximum frequency of one request per minute. + +### Request + +`GET https://graph.microsoft.com/rp/product-ingestion/configure//status` + +#### Request header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +#### Request parameters + +jobId – required. This is the ID of the job you want to query the status of. It's available in the response data generated during a previous request to either create, delete, withdraw, or upgrade a private offer. + +#### Request body + +Don't provide a request body for this method. + +### Response + +There are three possible responses for a completed job. + +| jobResult | Description | +| --- | --- | +| Running | The job hasn't yet completed. | +| Succeeded | The job completed successfully. This will also return a resourceURI that refers to the private offer related to the job. Use this resourceURI to obtain the full details of a private offer. | +| Failed | The job failed. This will also return any relevant errors to help determine the cause of failure. | + +Sample outputs: + +**Running** + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "running", + "jobResult": "pending", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "2021-12-21T21:30:10.3649551Z", + "errors": [] +} +``` + +**Succeeded** + +```json +{ + "$schema": " https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "b3f49dff-381f-480d-a10e-17f4ce49b65f", + "jobStatus": "completed", + "jobResult": "succeeded", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "2021-12-21T21:30:10.3649551Z", + "resourceUri": "https://product-ingestion.mp.microsoft.com/configure/b3f49dff-381f-480d-a10e-17f4ce49b65f", + "errors": [] +} +``` + +> [!NOTE] +> If the job was created by a request to delete a private offer, then there will be no resourceURI in the response. + +**Failure** + + +```json +{ + "$schema": " https://product-ingestion.azureedge.net/schema/configure-status/2022-03-01-preview2", + "jobId": "c32dd7e8-8619-462d-a96b-0ac1974bace5", + "jobStatus": "completed", + "jobResult": "failed", + "jobStart": "2021-12-21T21:29:54.9702903Z", + "jobEnd": "2021-12-21T21:30:10.3649551Z", + "errors": [ + { + "code": "Conflict", + "message": "The start date should be defined" + } + ] +} +``` + +### Error codes + +| Error code | Description | +| --- | --- | +| 401 | Authentication Error: ensure you're using a valid Azure AD access token. | + +## Obtaining details of an existing private offer + +There are two methods to do this depending whether you have the resourceURI or the private offer ID. + +### Request + +`GET https://graph.microsoft.com/rp/product-ingestion/private-offer/` + +or + +`GET https://graph.microsoft.com/rp/product-ingestion/configure/` + +#### Request header + +| Header | Type | Description | +| --- | --- | --- | +| Authorization | String | Required. The Azure AD access token in the form **`Bearer `**. | + +#### Request parameters + +ID - required. This is the ID of the private offer you want the full details of. This ID is available in the response data generated during a previous request to obtain the details of an existing private offer using the jobId. + +jobId - required. This is the ID of the job you want the full details of. This ID is available in the response data generated during a previous request to either create, delete, withdraw, or upgrade a private offer. + +#### Request body + +Don't provide a request body for this method. + +### Response + +You'll receive the full details of the private offer. + +```json +{ + "$schema": "https://product-ingestion.azureedge.net/schema/configure/2022-03-01-preview2", + "resources": [ + { + "id": "private-offer/07380dd9-bcbb-cccbb-bbccbc", + "name": "privateOffercsp1015", + "privateOfferType": "cspPromotion", + "upgradedFrom": null, + "variableStartDate": false, + "start": "2021-12-01", + "end": "2022-01-31", + "acceptBy": null, + "preparedBy": "amy@xyz.com", + "notificationContacts": [ + "amy@xyz.com" + ], + "state": "Live", + "termsAndConditionsDocSasUrl": null, + "beneficiaries": [ + { + "id": "xxxxyyyzz", + "description": "Top First CSP", + "beneficiaryRecipients": null + } + ], + "pricing": [ + { + "product": "product/xxxxxyyyyyyzzzzz", + "plan": "plan/123456", + "discountType": "Percentage", + "discountPercentage": 5.0, + "featureAvailabilityId": null, + "availabilityInstanceId": null + } + ], + "lastModified": "0001-01-01", + "acceptanceLinks": null, + "_etag": "\"9600487b-0000-0800-0000-61c24c7f0000\"", + "schema": null, + "resourceName": null, + "validations": null + } + ] +} +``` + +### Error codes + +| Error code | Description | +| --- | --- | +| 401 | Authentication Error: Ensure you're using a valid Azure AD access token. | +| 404 | Resource not found. Ensure you're using the correct ID in the request. | + +## How to parse error messages in the response body + +![Screenshot showing error messages in a response body.](media/parsing-error-messages.png) + +## Schemas + +[Private offer](https://aka.ms/POSchema) + +[ISV to customer private offer](https://aka.ms/POCustomerSchema) + +[ISV to reseller margin private offer](https://aka.ms/POCSPSchema) + +[Private offer acceptance link](https://aka.ms/POacceptlinkschema) + +[Private offer beneficiary](https://aka.ms/PObeneficiary) + +[Private offer pricing](https://aka.ms/POpricing) + +[Private offer promotion reference](https://aka.ms/POpromoref) + +## Next steps + +- To start using private offers, follow the steps in [ISV to customer private offers](isv-customer.md). diff --git a/articles/marketplace/private-plans.md b/articles/marketplace/private-plans.md index 971263c0fbeaa..0780d898ad6f8 100644 --- a/articles/marketplace/private-plans.md +++ b/articles/marketplace/private-plans.md @@ -74,6 +74,12 @@ Private plans will also appear in search results and can be deployed via command [![[Private offers appearing in search results.]](media/marketplace-publishers-guide/private-product.png)](media/marketplace-publishers-guide/private-product.png#lightbox) +## Next steps + +**Video tutorials** + +- [Publishing a Private SaaS plan](https://go.microsoft.com/fwlink/?linkid=2196256) + \ No newline at end of file diff --git a/articles/network-watcher/network-watcher-packet-capture-manage-rest-vmss.md b/articles/network-watcher/network-watcher-packet-capture-manage-rest-vmss.md new file mode 100644 index 0000000000000..2a20e2dade433 --- /dev/null +++ b/articles/network-watcher/network-watcher-packet-capture-manage-rest-vmss.md @@ -0,0 +1,297 @@ +--- +title: Manage packet captures in Virtual machine scale sets with Azure Network Watcher- REST API | Microsoft Docs +description: This page explains how to manage the packet capture feature of virtual machine scale set in Network Watcher using Azure REST API +services: network-watcher +documentationcenter: na +author: shijaiswal +ms.service: network-watcher +ms.topic: how-to +ms.tgt_pltfrm: na +ms.workload: infrastructure-services +ms.date: 01/07/2021 +ms.author: shijaiswal +ms.custom: devx-track-azurepowershell + +--- + +# Manage packet captures in Virtual machine scale set with Azure Network Watcher using Azure REST API + +> [!div class="op_single_selector"] +> - [Azure portal](network-watcher-packet-capture-manage-portal-vmss.md) +> - [PowerShell](network-watcher-packet-capture-manage-powershell-vmss.md) +> - [Azure REST API](network-watcher-packet-capture-manage-rest-vmss.md) + +Network Watcher packet capture allows you to create capture sessions to track traffic to and from a virtual machine scale set instance/(s). Filters are provided for the capture session to ensure you capture only the traffic you want. Packet capture helps to diagnose network anomalies, both reactively, and proactively. Other uses include gathering network statistics, gaining information on network intrusions, to debug client-server communication, and much more. Being able to remotely trigger packet captures, eases the burden of running a packet capture manually on a desired virtual machine, which saves valuable time. + +This article takes you through the different management tasks that are currently available for packet capture. + +- [**Get a packet capture**](#get-a-packet-capture) +- [**List all packet captures**](#list-all-packet-captures) +- [**Query the status of a packet capture**](#query-packet-capture-status) +- [**Start a packet capture**](#start-packet-capture) +- [**Stop a packet capture**](#stop-packet-capture) +- [**Delete a packet capture**](#delete-packet-capture) + + +[!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] + +## Before you begin + +ARMclient is used to call the REST API using PowerShell. ARMClient is found on chocolatey at [ARMClient on Chocolatey](https://chocolatey.org/packages/ARMClient) + +This scenario assumes you've already followed the steps in [Create a Network Watcher](network-watcher-create.md) to create a Network Watcher. + +> Packet capture requires a virtual machine extension `AzureNetworkWatcherExtension`. For installing the extension on a Windows VM visit [Azure Network Watcher Agent virtual machine extension for Windows](../virtual-machines/extensions/network-watcher-windows.md) and for Linux VM visit [Azure Network Watcher Agent virtual machine extension for Linux](../virtual-machines/extensions/network-watcher-linux.md). + +## Log in with ARMClient + +```powershell +armclient login +``` + +## Retrieve a virtual machine + +Run the following script to return a virtual machine. This information is needed for starting a packet capture. + +The following code needs variables: + +- **subscriptionId** - The subscription id can also be retrieved with the **Get-AzSubscription** cmdlet. +- **resourceGroupName** - The name of a resource group that contains virtual machines. + +```powershell +$subscriptionId = "" +$resourceGroupName = "" + +Get List of all VM scale sets under a resource group + +armclient get https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets?api-version=2022-03-01 + +Display information about a virtual machine scale set + +armclient GET https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}?api-version=2022-03-01 +``` + + +## Get a packet capture + +The following example gets the status of a single packet capture + +```powershell +$subscriptionId = "" +$resourceGroupName = "NetworkWatcherRG" +$networkWatcherName = "NetworkWatcher_westcentralus" +armclient post "https://management.azure.com/subscriptions/${subscriptionId}/ResourceGroups/${resourceGroupName}/providers/Microsoft.Network/networkWatchers/${networkWatcherName}/packetCaptures/${packetCaptureName}/querystatus?api-version=2016-12-01" +``` + +The following responses are examples of a typical response returned when querying the status of a packet capture. + +```json +{ + "name": "TestPacketCapture5", + "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/NetworkWatcherRG/providers/Microsoft.Network/networkWatchers/NetworkWatcher_westcentralus/packetCaptures/TestPacketCapture6", + "captureStartTime": "2016-12-06T17:20:01.5671279Z", + "packetCaptureStatus": "Running", + "packetCaptureError": [] +} +``` + +```json +{ + "name": "TestPacketCapture5", + "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/NetworkWatcherRG/providers/Microsoft.Network/networkWatchers/NetworkWatcher_westcentralus/packetCaptures/TestPacketCapture6", + "captureStartTime": "2016-12-06T17:20:01.5671279Z", + "packetCaptureStatus": "Stopped", + "stopReason": "TimeExceeded", + "packetCaptureError": [] +} +``` + +## List all packet captures + +The following example gets all packet capture sessions in a region. + +```powershell +$subscriptionId = "" +$resourceGroupName = "NetworkWatcherRG" +$networkWatcherName = "NetworkWatcher_westcentralus" +armclient get "https://management.azure.com/subscriptions/${subscriptionId}/ResourceGroups/${resourceGroupName}/providers/Microsoft.Network/networkWatchers/${networkWatcherName}/packetCaptures?api-version=2016-12-01" +``` + +The following response is an example of a typical response returned when getting all packet captures + +```json +{ + "value": [ + { + "name": "TestPacketCapture6", + "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/NetworkWatcherRG/providers/Microsoft.Network/networkWatchers/NetworkWatcher_westcentralus/packetCaptures/TestPacketCapture6", + "etag": "W/\"091762e1-c23f-448b-89d5-37cf56e4c045\"", + "properties": { + "provisioningState": "Succeeded", + "target": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/ContosoExampleRG/providers/Microsoft.Compute/virtualMachines/ContosoVM", + "bytesToCapturePerPacket": 0, + "totalBytesPerSession": 1073741824, + "timeLimitInSeconds": 60, + "storageLocation": { + "storageId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/ContosoExampleRG/providers/Microsoft.Storage/storageAccounts/contosoexamplergdiag374", + "storagePath": "https://contosoexamplergdiag374.blob.core.windows.net/network-watcher-logs/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/contosoexamplerg/providers/microsoft.compute/virtualmachines/contosovm/2016/12/06/packetcap +ture_17_19_53_056.cap", + "filePath": "c:\\temp\\packetcapture.cap" + }, + "filters": [ + { + "protocol": "Any", + "localIPAddress": "", + "localPort": "", + "remoteIPAddress": "", + "remotePort": "" + } + ] + } + }, + { + "name": "TestPacketCapture7", + "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/NetworkWatcherRG/providers/Microsoft.Network/networkWatchers/NetworkWatcher_westcentralus/packetCaptures/TestPacketCapture7", + "etag": "W/\"091762e1-c23f-448b-89d5-37cf56e4c045\"", + "properties": { + "provisioningState": "Failed", + "target": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/ContosoExampleRG/providers/Microsoft.Compute/virtualMachines/ContosoVM", + "bytesToCapturePerPacket": 0, + "totalBytesPerSession": 1073741824, + "timeLimitInSeconds": 60, + "storageLocation": { + "storageId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/ContosoExampleRG/providers/Microsoft.Storage/storageAccounts/contosoexamplergdiag374", + "storagePath": "https://contosoexamplergdiag374.blob.core.windows.net/network-watcher-logs/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/contosoexamplerg/providers/microsoft.compute/virtualmachines/contosovm/2016/12/06/packetcap +ture_17_23_15_364.cap", + "filePath": "c:\\temp\\packetcapture.cap" + }, + "filters": [ + { + "protocol": "Any", + "localIPAddress": "", + "localPort": "", + "remoteIPAddress": "", + "remotePort": "" + } + ] + } + } + ] +} +``` + +## Query packet capture status + +The following example gets all packet capture sessions in a region. + +```powershell +$subscriptionId = "" +$resourceGroupName = "NetworkWatcherRG" +$networkWatcherName = "NetworkWatcher_westcentralus" +$packetCaptureName = "TestPacketCapture5" +armclient get "https://management.azure.com/subscriptions/${subscriptionId}/ResourceGroups/${resourceGroupName}/providers/Microsoft.Network/networkWatchers/${networkWatcherName}/packetCaptures/${packetCaptureName}/querystatus?api-version=2016-12-01" +``` + +The following response is an example of a typical response returned when querying the status of a packet capture. + +```json +{ + "name": "vm1PacketCapture", + "id": "/subscriptions/{guid}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}", + "captureStartTime" : "9/7/2016 12:35:24PM", + "packetCaptureStatus" : "Stopped", + "stopReason" : "TimeExceeded", + "packetCaptureError" : [ ] +} +``` + +## Start packet capture + +The following example creates a packet capture on a virtual machine. The example is parameterized to allow for flexibility in creating an example. + +```powershell +$subscriptionId = '' +$resourceGroupName = "NetworkWatcherRG" +$networkWatcherName = "NetworkWatcher_westcentralus" +$packetCaptureName = "TestPacketCapture5" +$storageaccountname = "contosoexamplergdiag374" +$vmssName = "ContosoVMSS" +$targetType = "AzureVMSS" +$bytestoCaptureperPacket = "0" +$bytesPerSession = "1073741824" +$captureTimeinSeconds = "60" +$localIP = "" +$localPort = "" # Examples are: 80, or 80-120 +$remoteIP = "" +$remotePort = "" # Examples are: 80, or 80-120 +$protocol = "" # Valid values are TCP, UDP and Any. +$targetUri = "" # Example: /subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.compute/virtualMachineScaleSet/$vmssName +$storageId = "" #Example "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/ContosoExampleRG/providers/Microsoft.Storage/storageAccounts/contosoexamplergdiag374" +$storagePath = "" # Example: "https://mytestaccountname.blob.core.windows.net/capture/vm1Capture.cap" +$localFilePath = "c:\\temp\\packetcapture.cap" # Example: "d:\capture\vm1Capture.cap" + +$requestBody = @" +{ + 'properties': { + 'target': '/${targetUri}', + 'targetType': '/${targetType}', + 'bytesToCapturePerPacket': '${bytestoCaptureperPacket}', + 'totalBytesPerSession': '${bytesPerSession}', + 'scope': { + 'include': [ "1", "2" ], + 'exclude': [ "3", "4" ], + }, + 'timeLimitinSeconds': '${captureTimeinSeconds}', + 'storageLocation': { + 'storageId': '${storageId}', + 'storagePath': '${storagePath}', + 'filePath': '${localFilePath}' + }, + 'filters': [ + { + 'protocol': '${protocol}', + 'localIPAddress': '${localIP}', + 'localPort': '${localPort}', + 'remoteIPAddress': '${remoteIP}', + 'remotePort': '${remotePort}' + } + ] + } +} +"@ + +armclient PUT "https://management.azure.com/subscriptions/${subscriptionId}/ResourceGroups/${resourceGroupName}/providers/Microsoft.Network/networkWatchers/${networkWatcherName}/packetCaptures/${packetCaptureName}?api-version=2016-07-01" $requestbody + +``` + +## Stop packet capture + +The following example stops a packet capture on a virtual machine. The example is parameterized to allow for flexibility in creating an example. + +```powershell +$subscriptionId = '' +$resourceGroupName = "NetworkWatcherRG" +$networkWatcherName = "NetworkWatcher_westcentralus" +$packetCaptureName = "TestPacketCapture5" +armclient post "https://management.azure.com/subscriptions/${subscriptionId}/ResourceGroups/${resourceGroupName}/providers/Microsoft.Network/networkWatchers/${networkWatcherName}/packetCaptures/${packetCaptureName}/stop?api-version=2016-12-01" +``` + +## Delete packet capture + +The following example deletes a packet capture on a virtual machine. The example is parameterized to allow for flexibility in creating an example. + +```powershell +$subscriptionId = '' +$resourceGroupName = "NetworkWatcherRG" +$networkWatcherName = "NetworkWatcher_westcentralus" +$packetCaptureName = "TestPacketCapture5" + +armclient delete "https://management.azure.com/subscriptions/${subscriptionId}/ResourceGroups/${resourceGroupName}/providers/Microsoft.Network/networkWatchers/${networkWatcherName}/packetCaptures/${packetCaptureName}?api-version=2016-12-01" +``` + +> [!NOTE] +> Deleting a packet capture does not delete the file in the storage account + +## Next steps + +For instructions on downloading files from Azure storage accounts, refer to [Get started with Azure Blob storage using .NET](../storage/blobs/storage-quickstart-blobs-dotnet.md). Another tool that can be used is Storage Explorer. More information about Storage Explorer can be found here at the following link: [Storage Explorer](https://storageexplorer.com/) diff --git a/articles/network-watcher/network-watcher-packet-capture-overview.md b/articles/network-watcher/network-watcher-packet-capture-overview.md index 3ff9cc1759246..6867da2bfc481 100644 --- a/articles/network-watcher/network-watcher-packet-capture-overview.md +++ b/articles/network-watcher/network-watcher-packet-capture-overview.md @@ -1,6 +1,6 @@ --- title: Introduction to Packet capture in Azure Network Watcher | Microsoft Docs -description: This page provides an overview of the Network Watcher packet capture capability +description: This page provides an overview of the Network Watcher packet capture's capability services: network-watcher documentationcenter: na author: damendo @@ -15,14 +15,18 @@ ms.author: damendo # Introduction to variable packet capture in Azure Network Watcher +>[!IMPORTANT] +>[!NEW] +> Packet capture ia now also available for `Virtual Machine Scale Sets`. To checkout please visit [Manage packet capture in the Azure portal for VMSS](network-watcher-packet-capture-manage-portal-vmss.md) + Network Watcher variable packet capture allows you to create packet capture sessions to track traffic to and from a virtual machine. Packet capture helps to diagnose network anomalies both reactively and proactively. Other uses include gathering network statistics, gaining information on network intrusions, to debug client-server communications and much more. -Packet capture is a virtual machine extension that is remotely started through Network Watcher. This capability eases the burden of running a packet capture manually on the desired virtual machine, which saves valuable time. Packet capture can be triggered through the portal, PowerShell, CLI, or REST API. One example of how packet capture can be triggered is with Virtual Machine alerts. Filters are provided for the capture session to ensure you capture traffic you want to monitor. Filters are based on 5-tuple (protocol, local IP address, remote IP address, local port, and remote port) information. The captured data is stored in the local disk or a storage blob. +Packet capture is an extension that is remotely started through Network Watcher. This capability eases the burden of running a packet capture manually on the desired virtual machine or virtual machine scale set instance/(S), which saves valuable time. Packet capture can be triggered through the portal, PowerShell, CLI, or REST API. One example of how packet capture can be triggered is with Virtual Machine alerts. Filters are provided for the capture session to ensure you capture traffic you want to monitor. Filters are based on 5-tuple (protocol, local IP address, remote IP address, local port, and remote port) information. The captured data is stored in the local disk or a storage blob. > [!IMPORTANT] > Packet capture requires a virtual machine extension `AzureNetworkWatcherExtension`. For installing the extension on a Windows VM visit [Azure Network Watcher Agent virtual machine extension for Windows](../virtual-machines/extensions/network-watcher-windows.md) and for Linux VM visit [Azure Network Watcher Agent virtual machine extension for Linux](../virtual-machines/extensions/network-watcher-linux.md). -To reduce the information you capture to only the information you want, the following options are available for a packet capture session: +To reduce the information in order to capture only required information, following options are available for a packet capture session: **Capture configuration** @@ -44,11 +48,11 @@ To reduce the information you capture to only the information you want, the foll ## Considerations -There is a limit of 10,000 parallel packet capture sessions per region per subscription. This limit applies only to the sessions and does not apply to the saved packet capture files either locally on the VM or in a storage account. See the [Network Watcher service limits page](../azure-resource-manager/management/azure-subscription-service-limits.md#network-watcher-limits) for a full list of limits. +There's a limit of 10,000 parallel packet capture sessions per region per subscription. This limit applies only to the sessions and doesn't apply to the saved packet capture files either locally on the VM or in a storage account. See the [Network Watcher service limits page](../azure-resource-manager/management/azure-subscription-service-limits.md#network-watcher-limits) for a full list of limits. ### Next steps -Learn how you can manage packet captures through the portal by visiting [Manage packet capture in the Azure portal](network-watcher-packet-capture-manage-portal.md) or with PowerShell by visiting [Manage Packet Capture with PowerShell](network-watcher-packet-capture-manage-powershell.md). +Learn how you can manage packet captures through the portal by visiting [Manage packet capture in the Azure portal for VM](network-watcher-packet-capture-manage-portal.md)and [Manage packet capture in the Azure portal for virtual machine scale set](network-watcher-packet-capture-manage-portal-vmss.md) or with PowerShell by visiting [Manage Packet Capture with PowerShell for VM](network-watcher-packet-capture-manage-powershell.md)and [Manage Packet Capture with PowerShell for virtual machine scale set](network-watcher-packet-capture-manage-powershell-vmss.md) Learn how to create proactive packet captures based on virtual machine alerts by visiting [Create an alert triggered packet capture](network-watcher-alert-triggered-packet-capture.md) diff --git a/articles/network-watcher/supported-region-traffic-analytics.md b/articles/network-watcher/supported-region-traffic-analytics.md new file mode 100644 index 0000000000000..7d2b539f5fdf8 --- /dev/null +++ b/articles/network-watcher/supported-region-traffic-analytics.md @@ -0,0 +1,147 @@ +--- +title: Azure Traffic Analytics supported regions | Microsoft Docs +description: This article provides the list of Traffic Analytics supported regions. +services: network-watcher +documentationcenter: na +author: v-ssenthilna + +ms.service: network-watcher +ms.topic: article +ms.tgt_pltfrm: na +ms.workload: infrastructure-services +ms.date: 05/11/2022 +ms.author: v-ssenthilna +ms.custon: references_regions + +--- +# Supported regions: NSG + +This article provides the list of regions supported by Traffic Analytics. You can view the list of supported regions of both NSG and Log Analytics Workspaces below. + +You can use traffic analytics for NSGs in any of the following supported regions: +:::row::: + :::column span=""::: + Australia Central + Australia East + Australia Southeast + Brazil South + Brazil Southeast + Canada Central + Canada East + Central India + Central US + China East 2 + China North + China North 2 + :::column-end::: + :::column span=""::: + East Asia + East US + East US 2 + East US 2 EUAP + France Central + Germany West Central + Japan East + Japan West + Korea Central + Korea South + North Central US + North Europe + :::column-end::: + :::column span=""::: + Norway East + South Africa North + South Central US + South India + Southeast Asia + Switzerland North + Switzerland West + UAE Central + UAE North + UK South + UK West + USGov Arizona + :::column-end::: + :::column span=""::: + USGov Texas + USGov Virginia + USNat East + USNat West + USSec East + USSec West + West Central US + West Europe + West US + West US 2 + West US 3 + :::column-end::: +:::row-end::: + +## Supported regions: Log Analytics Workspaces + +The Log Analytics workspace must exist in the following regions: +:::row::: + :::column span=""::: + Australia Central + Australia East + Australia Southeast + Brazil South + Brazil Southeast + Canada East + Canada Central + Central India + Central US + China East 2 + China North + China North 2 + :::column-end::: + :::column span=""::: + East Asia + East US + East US 2 + East US 2 EUAP + France Central + Germany West Central + Japan East + Japan West + Korea Central + Korea South + North Central US + North Europe + :::column-end::: + :::column span=""::: + Norway East + South Africa North + South Central US + South India + Southeast Asia + Switzerland North + Switzerland West + UAE Central + UAE North + UK South + UK West + USGov Arizona + :::column-end::: + :::column span=""::: + USGov Texas + USGov Virginia + USNat East + USNat West + USSec East + USSec West + West Central US + West Europe + West US + West US 2 + West US 3 + :::column-end::: +:::row-end::: + +> [!NOTE] +> If NSGs support a region, but the log analytics workspace does not support that region for traffic analytics as per above lists, then you can use log analytics workspace of any other supported region as a workaround. + +## Next steps + +- Learn how to [enable flow log settings](enable-network-watcher-flow-log-settings.md). +- Learn the ways to [use traffic analytics](usage-scenarios-traffic-analytics.md). \ No newline at end of file diff --git a/articles/network-watcher/toc.yml b/articles/network-watcher/toc.yml index 19ce1f8010534..aea8e4863f111 100644 --- a/articles/network-watcher/toc.yml +++ b/articles/network-watcher/toc.yml @@ -22,10 +22,24 @@ href: diagnose-vm-network-routing-problem.md - name: Monitor communication between VMs href: connection-monitor.md + - name: Monitor communication with virtual machine scale set + href: connection-monitor-virtual-machine-scale-set.md - name: Diagnose a communication problem between networks href: diagnose-communication-problem-between-networks.md - name: Log VM network traffic href: network-watcher-nsg-flow-logging-portal.md + - name: Configure NSG flow logs + items: + - name: Azure PowerShell + href: network-watcher-nsg-flow-logging-powershell.md + - name: Azure CLI + href: network-watcher-nsg-flow-logging-cli.md + - name: REST + href: network-watcher-nsg-flow-logging-rest.md + - name: Azure Resource Manager + href: network-watcher-nsg-flow-logging-azure-resource-manager.md + - name: Built-in Policy + href: nsg-flow-logs-policy-portal.md - name: Concepts items: - name: Connection Monitor @@ -44,6 +58,18 @@ href: network-watcher-troubleshoot-overview.md - name: Variable packet capture href: network-watcher-packet-capture-overview.md + - name: Traffic Analytics overview + items: + - name: Overview + href: traffic-analytics.md + - name: Supported regions + href: supported-region-traffic-analytics.md + - name: Network Watcher and flow log settings + href: enable-network-watcher-flow-log-settings.md + - name: Usage scenarios + href: usage-scenarios-traffic-analytics.md + - name: Frequently asked questions + href: traffic-analytics-faq.yml - name: Network security group flow logging href: network-watcher-nsg-flow-logging-overview.md - name: Network security group view @@ -102,14 +128,20 @@ items: - name: Manage a packet capture items: - - name: Azure portal + - name: Azure portal VM href: network-watcher-packet-capture-manage-portal.md - - name: Azure PowerShell + - name: Azure portal virtual machine scale set + href: network-watcher-packet-capture-manage-portal-vmss.md + - name: Azure PowerShell VM href: network-watcher-packet-capture-manage-powershell.md + - name: Azure PowerShell virtual machine scale set + href: network-watcher-packet-capture-manage-powershell-vmss.md - name: Azure CLI href: network-watcher-packet-capture-manage-cli.md - - name: REST + - name: REST VM href: network-watcher-packet-capture-manage-rest.md + - name: REST virtual machine scale set + href: network-watcher-packet-capture-manage-rest-vmss.md - name: Analyze a packet capture items: - name: Find anomalies @@ -120,38 +152,12 @@ href: network-watcher-intrusion-detection-open-source-tools.md - name: Visualize network traffic patterns using open source tools href: network-watcher-using-open-source-tools.md - - name: Work with network security groups + - name: Flow Log Traffic Monitoring items: - - name: Configure NSG flow logs - items: - - name: Azure PowerShell - href: network-watcher-nsg-flow-logging-powershell.md - - name: Azure CLI - href: network-watcher-nsg-flow-logging-cli.md - - name: REST - href: network-watcher-nsg-flow-logging-rest.md - - name: Azure Resource Manager - href: network-watcher-nsg-flow-logging-azure-resource-manager.md - - name: Built-in Policy - href: nsg-flow-logs-policy-portal.md - - name: Delete NSG flow log storage blobs - href: network-watcher-delete-nsg-flow-log-blobs.md + - name: Read NSG flow logs + href: network-watcher-read-nsg-flow-logs.md - name: Analyze NSG flow logs items: - - name: Read NSG flow logs - href: network-watcher-read-nsg-flow-logs.md - - name: Use traffic analytics - items: - - name: Traffic Analytics overview - href: traffic-analytics.md - - name: Frequently asked questions - href: traffic-analytics-faq.yml - - name: Built-in Policy - href: traffic-analytics-policy-portal.md - - name: Schema and Data Aggregation - href: traffic-analytics-schema.md - - name: Schema update (August 2019) - href: traffic-analytics-schema-update.md - name: Use Power BI href: network-watcher-visualize-nsg-flow-logs-power-bi.md - name: Use Elastic Stack @@ -160,6 +166,16 @@ href: network-watcher-nsg-grafana.md - name: Use Graylog href: network-watcher-analyze-nsg-flow-logs-graylog.md + - name: Delete NSG flow log storage blobs + href: network-watcher-delete-nsg-flow-log-blobs.md + - name: Enable traffic analytics + items: + - name: Built-in Policy + href: traffic-analytics-policy-portal.md + - name: Schema and Data Aggregation + href: traffic-analytics-schema.md + - name: Schema update (August 2019) + href: traffic-analytics-schema-update.md - name: View network security groups items: - name: Azure PowerShell diff --git a/articles/network-watcher/traffic-analytics-faq.yml b/articles/network-watcher/traffic-analytics-faq.yml index 3724d9594a8d3..15020f85dce49 100644 --- a/articles/network-watcher/traffic-analytics-faq.yml +++ b/articles/network-watcher/traffic-analytics-faq.yml @@ -9,12 +9,12 @@ metadata: ms.topic: faq ms.tgt_pltfrm: na ms.workload: infrastructure-services - ms.date: 01/04/2021 + ms.date: 05/12/2022 ms.author: damendo ms.custom: devx-track-azurepowershell -title: Traffic Analytics frequently asked questions +title: Traffic Analytics - frequently asked questions summary: | - This article collects in one place many of the most frequently asked questions about traffic analytics in Azure Network Watcher. + This article provides answers to the most frequently asked questions about traffic analytics in Azure Network Watcher. [!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] @@ -58,7 +58,7 @@ sections: 3. To list all the roles that are assigned to a specified user, use **Get-AzRoleAssignment -SignInName [user email] -IncludeClassicAdministrators**. - If you are not seeing any output, contact the respective subscription admin to get access to run the commands. For more details, see [Add or remove Azure role assignments using Azure PowerShell](../role-based-access-control/role-assignments-powershell.md). + If you are not seeing any output, contact the respective subscription admin to get access to run the commands. For more information, see [Add or remove Azure role assignments using Azure PowerShell](../role-based-access-control/role-assignments-powershell.md). - question: | Can the NSGs I enable flow logs for be in different regions than my workspace? @@ -93,7 +93,7 @@ sections: - question: | What if I am getting the status, “Failed to load,” under the NSG flow logs page? answer: | - The Microsoft.Insights provider must be registered for flow logging to work properly. If you are not sure whether the Microsoft.Insights provider is registered for your subscription, replace *xxxxx-xxxxx-xxxxxx-xxxx* in the following command, and run the following commands from PowerShell: + The Microsoft.Insights  provider must be registered for flow logging to work properly. If you are not sure whether the Microsoft.Insights provider is registered for your subscription, replace *xxxxx-xxxxx-xxxxxx-xxxx* in the following command, and run the following commands from PowerShell: ```powershell-interactive **Select-AzSubscription** -SubscriptionId xxxxx-xxxxx-xxxxxx-xxxx @@ -125,7 +125,7 @@ sections: If problems persist, raise concerns in the [User voice forum](https://feedback.azure.com/d365community/forum/8ae9bf04-8326-ec11-b6e6-000d3a4f0789?c=cd276b66-8326-ec11-b6e6-000d3a4f0789). - question: | - What if I get this message: “Looks like we have resources data (Topology) and no flows information. Meanwhile, click here to see resources data and refer to FAQs for further information.”? + What if I get this message: “Looks like we have resources data (Topology) and no flows information. For more information, click here to see resources data and refer to FAQs.”? answer: | You are seeing the resources information on the dashboard; however, no flow-related statistics are present. Data might not be present because of no communication flows between the resources. Wait for 60 minutes, and recheck status. If the problem persists, and you're sure that communication flows among resources exist, raise concerns in the [User voice forum](https://feedback.azure.com/d365community/forum/8ae9bf04-8326-ec11-b6e6-000d3a4f0789?c=cd276b66-8326-ec11-b6e6-000d3a4f0789). @@ -155,7 +155,7 @@ sections: { 'storageId': '${TAstorageId}', 'enabled': '', - 'retentionPolicy' : + 'retentionPolicy': { days: , enabled: @@ -165,7 +165,7 @@ sections: { 'networkWatcherFlowAnalyticsConfiguration': { - 'enabled':, + 'enabled': 'workspaceId':'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'workspaceRegion':'', 'workspaceResourceId':'/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/' @@ -216,14 +216,14 @@ sections: - question: | How does Traffic Analytics decide that an IP is malicious? answer: | - Traffic Analytics relies on Microsoft internal threat intelligence systems to deem an IP as malicious. These systems leverage diverse telemetry sources like Microsoft products and services,the Microsoft Digital Crimes Unit (DCU), the Microsoft Security Response Center (MSRC), and external feeds and build a lot of intelligence on top of it. + Traffic Analytics relies on Microsoft internal threat intelligence systems to deem an IP as malicious. These systems leverage diverse telemetry sources like Microsoft products and services, the Microsoft Digital Crimes Unit (DCU), the Microsoft Security Response Center (MSRC), and external feeds and build a lot of intelligence on top of it. Some of this data is Microsoft Internal. If a known IP is getting flagged as malicious, please raise a support ticket to know the details. - question: | How can I set alerts on Traffic Analytics data? answer: | Traffic Analytics does not have inbuilt support for alerts. However, since Traffic Analytics data is stored in Log Analytics you can write custom queries and set alerts on them. - Steps : + Steps: - You can use the shortlink for Log Analytics in Traffic Analytics. - Use the [schema documented here](traffic-analytics-schema.md) to write your queries - Click "New alert rule" to create the alert @@ -239,7 +239,7 @@ sections: | mvexpand vm = pack_array(VM1_s, VM2_s) to typeof(string) | where isnotempty(vm) | extend traffic = AllowedInFlows_d + DeniedInFlows_d + AllowedOutFlows_d + DeniedOutFlows_d // For bytes use: | extend traffic = InboundBytes_d + OutboundBytes_d - | make-series TotalTraffic = sum(traffic) default = 0 on FlowStartTime_t from datetime(
                  • Operations that include both READ and WRITE flags. For example: [SSH.NET create API](https://github.com/sshnet/SSH.NET/blob/develop/src/Renci.SshNet/SftpClient.cs#:~:text=public%20SftpFileStream-,Create,-(string%20path))
                  • Operations that include APPEND flag. For example: [SSH.NET append API](https://github.com/sshnet/SSH.NET/blob/develop/src/Renci.SshNet/SftpClient.cs#:~:text=public%20void-,AppendAllLines,-(string%20path%2C%20IEnumerable%3Cstring%3E%20contents)). | | Links |
                  • `symlink` - creating symbolic links
                  • `ln` - creating hard links
                  • Reading links not supported | | Capacity Information | `df` - usage info for filesystem | -| Extensions | Unsupported extensions include but are not limited to: fsync@openssh.com, limits@openssh.com, lsetstat@openssh.com, statvfs@openssh.com | +| Extensions | Unsupported extensions include but aren't limited to: fsync@openssh.com, limits@openssh.com, lsetstat@openssh.com, statvfs@openssh.com | | SSH Commands | SFTP is the only supported subsystem. Shell requests after the completion of the key exchange will fail. | -| Multi-protocol writes | Random writes and appends (`PutBlock`,`PutBlockList`, `GetBlockList`, `AppendBlock`, `AppendFile`) are not allowed from other protocols on blobs that are created by using SFTP. Full overwrites are allowed.| +| Multi-protocol writes | Random writes and appends (`PutBlock`,`PutBlockList`, `GetBlockList`, `AppendBlock`, `AppendFile`) aren't allowed from other protocols on blobs that are created by using SFTP. Full overwrites are allowed.| ## Authentication and authorization - _Local users_ is the only form of identity management that is currently supported for the SFTP endpoint. -- Azure Active Directory (Azure AD) is not supported for the SFTP endpoint. +- Azure Active Directory (Azure AD) isn't supported for the SFTP endpoint. -- POSIX-like access control lists (ACLs) are not supported for the SFTP endpoint. +- POSIX-like access control lists (ACLs) aren't supported for the SFTP endpoint. > [!NOTE] > After your data is ingested into Azure Storage, you can use the full breadth of Azure storage security settings. While authorization mechanisms such as role-based access control (RBAC) and access control lists aren't supported as a means to authorize a connecting SFTP client, they can be used to authorize access via Azure tools (such Azure portal, Azure CLI, Azure PowerShell commands, and AzCopy) as well as Azure SDKS, and Azure REST APIs. -- Account and container level operations are not supported for the SFTP endpoint. +- Account and container level operations aren't supported for the SFTP endpoint. ## Networking - To access the storage account using SFTP, your network must allow traffic on port 22. -- When a firewall is configured, connections from non-allowed IPs are not rejected as expected. However, if there is a successful connection for an authenticated user then all data plane operations will be rejected. - -- There's a 4 minute timeout for idle or inactive connections. OpenSSH will appear to stop responding and then disconnect. Some clients reconnect automatically. +- There's a 4-minute timeout for idle or inactive connections. OpenSSH will appear to stop responding and then disconnect. Some clients reconnect automatically. ## Security @@ -79,7 +73,7 @@ The following clients are known to be incompatible with SFTP for Azure Blob Stor ## Integrations -- Change feed and Event Grid notifications are not supported. +- Change feed and Event Grid notifications aren't supported. - Network File System (NFS) 3.0 and SFTP can't be enabled on the same storage account. @@ -89,15 +83,15 @@ For performance issues and considerations, see [SSH File Transfer Protocol (SFTP ## Other -- Special containers such as $logs, $blobchangefeed, $root, $web are not accessible via the SFTP endpoint. +- Special containers such as $logs, $blobchangefeed, $root, $web aren't accessible via the SFTP endpoint. -- Symbolic links are not supported. +- Symbolic links aren't supported. -- `ssh-keyscan` is not supported. +- `ssh-keyscan` isn't supported. -- SSH and SCP commands, that are not SFTP, are not supported. +- SSH and SCP commands that aren't SFTP aren't supported. -- FTPS and FTP are not supported. +- FTPS and FTP aren't supported. ## Troubleshooting diff --git a/articles/storage/blobs/secure-file-transfer-protocol-support-how-to.md b/articles/storage/blobs/secure-file-transfer-protocol-support-how-to.md index 8c77996974172..a0e4ffdc918a5 100644 --- a/articles/storage/blobs/secure-file-transfer-protocol-support-how-to.md +++ b/articles/storage/blobs/secure-file-transfer-protocol-support-how-to.md @@ -5,7 +5,7 @@ author: normesta ms.subservice: blobs ms.service: storage ms.topic: conceptual -ms.date: 03/04/2022 +ms.date: 06/03/2022 ms.author: normesta ms.reviewer: ylunagaria @@ -26,7 +26,7 @@ To learn more about SFTP support for Azure Blob Storage, see [SSH File Transfer ## Prerequisites -- A standard general-purpose v2 or premium block blob storage account. You can also enable SFTP as create the account. For more information on these types of storage accounts, see [Storage account overview](../common/storage-account-overview.md). +- A standard general-purpose v2 or premium block blob storage account. You can also enable SFTP as you create the account. For more information on these types of storage accounts, see [Storage account overview](../common/storage-account-overview.md). - The hierarchical namespace feature of the account must be enabled. To enable the hierarchical namespace feature, see [Upgrade Azure Blob Storage with Azure Data Lake Storage Gen2 capabilities](upgrade-to-data-lake-storage-gen2-how-to.md). @@ -34,7 +34,7 @@ To learn more about SFTP support for Azure Blob Storage, see [SSH File Transfer ## Enable SFTP support -This section shows you how to enable SFTP support for an existing storage account. To view an Azure Resource Manager template that enables SFTP support as part of creating the account, see [Create an Azure Storage Account and Blob Container accessible using SFTP protocol on Azure](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.storage/storage-sftp). +This section shows you how to enable SFTP support for an existing storage account. To view an Azure Resource Manager template that enables SFTP support as part of creating the account, see [Create an Azure Storage Account and Blob Container accessible using SFTP protocol on Azure](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.storage/storage-sftp). To view the Local User REST APIs and .NET references, see [Local Users](/rest/api/storagerp/local-users) and [LocalUser Class](/dotnet/api/microsoft.azure.management.storage.models.localuser). ### [Portal](#tab/azure-portal) @@ -63,6 +63,11 @@ $storageAccountName = "" Set-AzStorageAccount -ResourceGroupName $resourceGroupName -Name $storageAccountName -EnableSftp $true ``` + > [!NOTE] + > The `-EnableSftp` parameter is currently only available in preview versions of Azure Powershell. Use the command below to install the preview version: + > ``` + > Install-Module -Name Az.Storage -RequiredVersion 4.1.2-preview -AllowPrerelease + > ``` ### [Azure CLI](#tab/azure-cli) @@ -99,9 +104,9 @@ To learn more about the SFTP permissions model, see [SFTP Permissions model](sec > [!IMPORTANT] > While you can enable both forms of authentication, SFTP clients can connect by using only one of them. Multifactor authentication, whereby both a valid password and a valid public and private key pair are required for successful authentication is not supported. - If you select **Secure with a password**, then your password will appear when you've completed all of the steps in the **Add local user** configuration pane. + If you select **SSH Password**, then your password will appear when you've completed all of the steps in the **Add local user** configuration pane. - If you select **Secure with SSH public key**, then select **Add key source** to specify a key source. + If you select **SSH Key pair**, then select **Public key source** to specify a key source. > [!div class="mx-imgBorder"] > ![Local user configuration pane](./media/secure-file-transfer-protocol-support-how-to/add-local-user-config-page.png) @@ -160,7 +165,7 @@ To learn more about the SFTP permissions model, see [SFTP Permissions model](sec If you want to use a password to authenticate the local user, you can generate one after the local user is created. -3. If want to use an SSH key, create a public key object by using the **New-AzStorageLocalUserSshPublicKey** command. Set the `-Key` parameter to a string that contains the key type and public key. In the following example, the key type is `ssh-rsa` and the key is `ssh-rsa a2V5...`. +3. If you want to use an SSH key, create a public key object by using the **New-AzStorageLocalUserSshPublicKey** command. Set the `-Key` parameter to a string that contains the key type and public key. In the following example, the key type is `ssh-rsa` and the key is `ssh-rsa a2V5...`. ```powershell $sshkey = "ssh-rsa a2V5..." diff --git a/articles/storage/blobs/secure-file-transfer-protocol-support.md b/articles/storage/blobs/secure-file-transfer-protocol-support.md index 5b25441c2b703..226093e124455 100644 --- a/articles/storage/blobs/secure-file-transfer-protocol-support.md +++ b/articles/storage/blobs/secure-file-transfer-protocol-support.md @@ -5,7 +5,7 @@ author: normesta ms.subservice: blobs ms.service: storage ms.topic: conceptual -ms.date: 03/04/2022 +ms.date: 06/03/2022 ms.custom: references_regions ms.author: normesta ms.reviewer: ylunagaria @@ -14,7 +14,7 @@ ms.reviewer: ylunagaria # SSH File Transfer Protocol (SFTP) support for Azure Blob Storage (preview) -Blob storage now supports the SSH File Transfer Protocol (SFTP). This support provides the ability to securely connect to Blob Storage accounts via an SFTP endpoint, allowing you to leverage SFTP for file access, file transfer, as well as file management. +Blob storage now supports the SSH File Transfer Protocol (SFTP). This support provides the ability to securely connect to Blob Storage accounts via an SFTP endpoint, allowing you to use SFTP for file access, file transfer, and file management. > [!IMPORTANT] > SFTP support is currently in PREVIEW and is available on general-purpose v2 and premium block blob accounts. Complete [this form](https://forms.office.com/r/gZguN0j65Y) BEFORE using the feature in preview. Registration via 'preview features' is NOT required and confirmation email will NOT be sent after filling out the form. You can IMMEDIATELY access the feature. @@ -27,7 +27,7 @@ Azure allows secure data transfer to Blob Storage accounts using Azure Blob serv Prior to the release of this feature, if you wanted to use SFTP to transfer data to Azure Blob Storage you would have to either purchase a third party product or orchestrate your own solution. You would have to create a virtual machine (VM) in Azure to host an SFTP server, and then figure out a way to move data into the storage account. -Now, with SFTP support for Azure Blob Storage, you can enable an SFTP endpoint for Blob Storage accounts with a single setting. Then you can set up local user identities for authentication to transfer data securely without the need to do any additional work. +Now, with SFTP support for Azure Blob Storage, you can enable an SFTP endpoint for Blob Storage accounts with a single setting. Then you can set up local user identities for authentication to transfer data securely without the need to do any more work. This article describes SFTP support for Azure Blob Storage. To learn how to enable SFTP for your storage account, see [Connect to Azure Blob Storage by using the SSH File Transfer Protocol (SFTP) (preview)](secure-file-transfer-protocol-support-how-to.md). @@ -42,11 +42,11 @@ Different protocols extend from the hierarchical namespace. The SFTP is one of t ## SFTP permission model -Azure Blob Storage does not support Azure Active Directory (Azure AD) authentication or authorization via SFTP. Instead, SFTP utilizes a new form of identity management called _local users_. +Azure Blob Storage doesn't support Azure Active Directory (Azure AD) authentication or authorization via SFTP. Instead, SFTP utilizes a new form of identity management called _local users_. Local users must use either a password or a Secure Shell (SSH) private key credential for authentication. You can have a maximum of 1000 local users for a storage account. -To set up access permissions, you will create a local user, and choose authentication methods. Then, for each container in your account, you can specify the level of access you want to give that user. +To set up access permissions, you'll create a local user, and choose authentication methods. Then, for each container in your account, you can specify the level of access you want to give that user. > [!CAUTION] > Local users do not interoperate with other Azure Storage permission models such as RBAC (role based access control), ABAC (attribute based access control), and ACLs (access control lists). @@ -57,11 +57,11 @@ For SFTP enabled storage accounts, you can use the full breadth of Azure Blob St ## Authentication methods -You can authenticate local users connecting via SFTP by using a password or a Secure Shell (SSH) public-private keypair. You can configure both forms of authentication and let connecting local users choose which one to use. However, multifactor authentication, whereby both a valid password and a valid public-private key pair are required for successful authentication is not supported. +You can authenticate local users connecting via SFTP by using a password or a Secure Shell (SSH) public-private keypair. You can configure both forms of authentication and let connecting local users choose which one to use. However, multifactor authentication, whereby both a valid password and a valid public-private key pair are required for successful authentication isn't supported. #### Passwords -Passwords are generated for you. If you choose password authentication, then your password will be provided after you finish configuring a local user. Make sure to copy that password and save it in a location where you can find it later. You won't be able to retrieve that password from Azure again. If you lose the password, you will have to generate a new one. For security reasons, you can't set the password yourself. +Passwords are generated for you. If you choose password authentication, then your password will be provided after you finish configuring a local user. Make sure to copy that password and save it in a location where you can find it later. You won't be able to retrieve that password from Azure again. If you lose the password, you'll have to generate a new one. For security reasons, you can't set the password yourself. #### SSH key pairs @@ -71,7 +71,7 @@ If you choose to authenticate with private-public key pair, you can either gener ## Container permissions -In the current release, you can specify only container-level permissions. Directory-level permissions are not supported. You can choose which containers you want to grant access to and what level of access you want to provide (Read, Write, List, Delete, and Create). Those permissions apply to all directories and subdirectories in the container. You can grant each local user access to as many as 100 containers. Container permissions can also be updated after creating a local user. The following table describes each permission in more detail. +In the current release, you can specify only container-level permissions. Directory-level permissions aren't supported. You can choose which containers you want to grant access to and what level of access you want to provide (Read, Write, List, Delete, and Create). Those permissions apply to all directories and subdirectories in the container. You can grant each local user access to as many as 100 containers. Container permissions can also be updated after creating a local user. The following table describes each permission in more detail. | Permission | Symbol | Description | |---|---|---| @@ -93,7 +93,7 @@ sftp myaccount.myusername@myaccount.blob.core.windows.net put logfile.txt ``` -If you set the home directory of a user to `mycontainer/mydirectory`, then they would connect to that directory. Then, the `logfile.txt` file would be uploaded to `mycontainer/mydirectory`. If you did not set the home directory, then the connection attempt would fail. Instead, connecting users would have to specify a container along with the request and then use SFTP commands to navigate to the target directory before uploading a file. The following example shows this: +If you set the home directory of a user to `mycontainer/mydirectory`, then they would connect to that directory. Then, the `logfile.txt` file would be uploaded to `mycontainer/mydirectory`. If you didn't set the home directory, then the connection attempt would fail. Instead, connecting users would have to specify a container along with the request and then use SFTP commands to navigate to the target directory before uploading a file. The following example shows this: ```powershell sftp myaccount.mycontainer.myusername@myaccount.blob.core.windows.net @@ -120,24 +120,28 @@ SFTP support for Azure Blob Storage currently limits its cryptographic algorithm ### Known supported clients -The following clients have compatible algorithm support with SFTP for Azure Blob Storage (preview). See [Limitations and known issues with SSH File Transfer Protocol (SFTP) support for Azure Blob Storage](secure-file-transfer-protocol-known-issues.md) if you are having trouble connecting. +The following clients have compatible algorithm support with SFTP for Azure Blob Storage (preview). See [Limitations and known issues with SSH File Transfer Protocol (SFTP) support for Azure Blob Storage](secure-file-transfer-protocol-known-issues.md) if you're having trouble connecting. - AsyncSSH 2.1.0+ +- Axway - Cyberduck 7.8.2+ - edtFTPjPRO 7.0.0+ - FileZilla 3.53.0+ - libssh 0.9.5+ - Maverick Legacy 1.7.15+ +- Moveit 12.7 - OpenSSH 7.4+ - paramiko 2.8.1+ - PuTTY 0.74+ - QualysML 12.3.41.1+ - RebexSSH 5.0.7119.0+ +- Salesforce - ssh2js 0.1.20+ - sshj 0.27.0+ - SSH.NET 2020.0.0+ - WinSCP 5.10+ - Workday +- XFB.Gateway > [!NOTE] > The supported client list above is not exhaustive and may change over time. diff --git a/articles/storage/blobs/soft-delete-blob-enable.md b/articles/storage/blobs/soft-delete-blob-enable.md index a7a4ee4499b9e..149065063ca25 100644 --- a/articles/storage/blobs/soft-delete-blob-enable.md +++ b/articles/storage/blobs/soft-delete-blob-enable.md @@ -102,7 +102,7 @@ To enable blob soft delete for your storage account by using the Azure portal, f 1. Install the latest **PowershellGet** module. Then, close and reopen the PowerShell console. ```powershell - install-Module PowerShellGet -Repository PSGallery -Force + Install-Module PowerShellGet -Repository PSGallery -Force ``` 2. Install **Az.Storage** preview module. diff --git a/articles/storage/blobs/storage-blob-container-delete-javascript.md b/articles/storage/blobs/storage-blob-container-delete-javascript.md index 081fc9dc50763..5fc3fa71101bd 100644 --- a/articles/storage/blobs/storage-blob-container-delete-javascript.md +++ b/articles/storage/blobs/storage-blob-container-delete-javascript.md @@ -23,9 +23,9 @@ The [sample code snippets](https://github.com/Azure-Samples/AzureStorageSnippets To delete a container in JavaScript, use one of the following methods: -- BlobServiceClient.[deleteContainer](/javascript/api/@azure/storage-blob/blobserviceclien#@azure-storage-blob-blobserviceclient-deletecontainer) -- ContainerClient.[delete](/javascript/api/@azure/storage-blob/containerclien#@azure-storage-blob-containerclient-delete) -- ContainerClient.[deleteIfExists](/javascript/api/@azure/storage-blob/containerclien#@azure-storage-blob-containerclient-deleteifexists) +- BlobServiceClient.[deleteContainer](/javascript/api/@azure/storage-blob/blobserviceclient#@azure-storage-blob-blobserviceclient-deletecontainer#@azure-storage-blob-blobserviceclient-deletecontainer) +- ContainerClient.[delete](/javascript/api/@azure/storage-blob/blobserviceclient#@azure-storage-blob-blobserviceclient-deletecontainer) +- ContainerClient.[deleteIfExists](/javascript/api/@azure/storage-blob/blobserviceclient#@azure-storage-blob-containerclient-deleteifexists) After you delete a container, you can't create a container with the same name for at *least* 30 seconds. Attempting to create a container with the same name will fail with HTTP error code 409 (Conflict). Any other operations on the container or the blobs it contains will fail with HTTP error code 404 (Not Found). @@ -75,7 +75,7 @@ async function deleteContainersWithPrefix(blobServiceClient, blobNamePrefix){ When container soft delete is enabled for a storage account, a container and its contents may be recovered after it has been deleted, within a retention period that you specify. You can restore a soft deleted container by calling. -- BlobServiceClient.[undeleteContainer](/javascript/api/@azure/storage-blob/blobserviceclient#@azure-storage-blob-blobserviceclient-undeletecontainer) +- BlobServiceClient.[undeleteContainer](/javascript/api/@azure/storage-blob/blobserviceclient#@azure-storage-blob-blobserviceclient-deletecontainert#@azure-storage-blob-blobserviceclient-undeletecontainer) The following example finds a deleted container, gets the version ID of that deleted container, and then passes that ID into the **undeleteContainer** method to restore the container. diff --git a/articles/storage/blobs/storage-blob-download.md b/articles/storage/blobs/storage-blob-download.md index 97803d546ac36..97f304b57f2b8 100644 --- a/articles/storage/blobs/storage-blob-download.md +++ b/articles/storage/blobs/storage-blob-download.md @@ -59,7 +59,7 @@ public static async Task DownloadToStream(BlobClient blobClient, string localFil The following example downloads a blob to a string. This example assumes that the blob is a text file. ```csharp -public static async Task DownloadToText(BlobClient blobClient, string localFilePath) +public static async Task DownloadToText(BlobClient blobClient) { BlobDownloadResult downloadResult = await blobClient.DownloadContentAsync(); string downloadedData = downloadResult.Content.ToString(); diff --git a/articles/storage/blobs/storage-blobs-static-site-github-actions.md b/articles/storage/blobs/storage-blobs-static-site-github-actions.md index c31e18f735ce5..8af7cd88bb4d9 100644 --- a/articles/storage/blobs/storage-blobs-static-site-github-actions.md +++ b/articles/storage/blobs/storage-blobs-static-site-github-actions.md @@ -152,7 +152,7 @@ You need to provide your application's **Client ID**, **Tenant ID**, and **Subsc 1. Go to **Actions** for your GitHub repository. - :::image type="content" source="media/storage-blob-static-website/storage-blob-github-actions-header.png" alt-text="GitHub actions menu item"::: + :::image type="content" source="media/storage-blob-static-website/storage-blob-github-actions-header.png" alt-text="GitHub Actions menu item"::: 1. Select **Set up your workflow yourself**. @@ -240,7 +240,7 @@ You need to provide your application's **Client ID**, **Tenant ID**, and **Subsc 1. Go to **Actions** for your GitHub repository. - :::image type="content" source="media/storage-blob-static-website/storage-blob-github-actions-header.png" alt-text="GitHub actions menu item"::: + :::image type="content" source="media/storage-blob-static-website/storage-blob-github-actions-header.png" alt-text="GitHub Actions menu item"::: 1. Select **Set up your workflow yourself**. @@ -358,7 +358,7 @@ You need to provide your application's **Client ID**, **Tenant ID**, and **Subsc 1. Open the first result to see detailed logs of your workflow's run. - :::image type="content" source="../media/index/github-actions-run.png" alt-text="Log of GitHub actions run"::: + :::image type="content" source="../media/index/github-actions-run.png" alt-text="Log of GitHub Actions run"::: ## Clean up resources diff --git a/articles/storage/blobs/storage-feature-support-in-storage-accounts.md b/articles/storage/blobs/storage-feature-support-in-storage-accounts.md index 9849997a88f83..fce2ec6cdf788 100644 --- a/articles/storage/blobs/storage-feature-support-in-storage-accounts.md +++ b/articles/storage/blobs/storage-feature-support-in-storage-accounts.md @@ -20,7 +20,7 @@ The items that appear in these tables will change over time as support continues | Storage feature | Blob Storage (default support) | Data Lake Storage Gen2 1 | NFS 3.0 1 | SFTP 1 | |---------------|-------------------|---|---|--| | [Access tier - archive](access-tiers-overview.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | -| [Access tier - cold](access-tiers-overview.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png)| ![Yes](../media/icons/yes-icon.png) | +| [Access tier - cool](access-tiers-overview.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png)| ![Yes](../media/icons/yes-icon.png) | | [Access tier - hot](access-tiers-overview.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | | [Anonymous public access](anonymous-read-access-configure.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png)| ![Yes](../media/icons/yes-icon.png) | | [Azure Active Directory security](authorize-access-azure-active-directory.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | @@ -66,7 +66,7 @@ The items that appear in these tables will change over time as support continues | Storage feature | Blob Storage (default support) | Data Lake Storage Gen2 1 | NFS 3.0 1 | SFTP 1 | |---------------|-------------------|---|---|--| | [Access tier - archive](access-tiers-overview.md) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | -| [Access tier - cold](access-tiers-overview.md) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | +| [Access tier - cool](access-tiers-overview.md) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | | [Access tier - hot](access-tiers-overview.md) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | | [Anonymous public access](anonymous-read-access-configure.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![No](../media/icons/no-icon.png) | | [Azure Active Directory security](authorize-access-azure-active-directory.md) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | diff --git a/articles/storage/blobs/storage-manage-find-blobs.md b/articles/storage/blobs/storage-manage-find-blobs.md index 8f46409bb07f6..54f872e365f6e 100644 --- a/articles/storage/blobs/storage-manage-find-blobs.md +++ b/articles/storage/blobs/storage-manage-find-blobs.md @@ -139,7 +139,7 @@ The below table shows all the valid operators for `Find Blobs by Tags`: | > | Greater than | `"Date" > '2018-06-18'` | | >= | Greater than or equal | `"Priority" >= '5'` | | < | Less than | `"Age" < '32'` | -| <= | Less than or equal | `"Company" <= 'Contoso'` | +| <= | Less than or equal | `"Priority" <= '5'` | | AND | Logical and | `"Rank" >= '010' AND "Rank" < '100'` | | @container | Scope to a specific container | `@container = 'videofiles' AND "status" = 'done'` | @@ -165,7 +165,7 @@ The below table shows the valid operators for conditional operations: | > | Greater than | `"Date" > '2018-06-18'` | | >= | Greater than or equal | `"Priority" >= '5'` | | < | Less than | `"Age" < '32'` | -| <= | Less than or equal | `"Company" <= 'Contoso'` | +| <= | Less than or equal | `"Priority" <= '5'` | | AND | Logical and | `"Rank" >= '010' AND "Rank" < '100'` | | OR | Logical or | `"Status" = 'Done' OR "Priority" >= '05'` | diff --git a/articles/storage/blobs/storage-quickstart-blobs-go.md b/articles/storage/blobs/storage-quickstart-blobs-go.md index 7717ea853d764..d2c832f001ea0 100644 --- a/articles/storage/blobs/storage-quickstart-blobs-go.md +++ b/articles/storage/blobs/storage-quickstart-blobs-go.md @@ -66,7 +66,7 @@ Run the following AzureCli command to assign the storage account permissions: az role assignment create --assignee "" --role "Storage Blob Data Contributor" --scope "" ``` -Learn more about Azure's built-in RBAC roles, check out [Built-in roles](/azure/role-based-access-control/built-in-roles). +Learn more about Azure's built-in RBAC roles, check out [Built-in roles](../../role-based-access-control/built-in-roles.md). > Note: Azure Cli has built in helper fucntions that retrieve the storage access keys when permissions are not detected. That functionally does not transfer to the DefaultAzureCredential, which is the reason for assiging RBAC roles to your account. @@ -271,4 +271,4 @@ See these other resources for Go development with Blob storage: ## Next steps -In this quickstart, you learned how to transfer files between a local disk and Azure blob storage using Go. For more information about the Azure Storage Blob SDK, view the [Source Code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob) and [API Reference](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). +In this quickstart, you learned how to transfer files between a local disk and Azure blob storage using Go. For more information about the Azure Storage Blob SDK, view the [Source Code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob) and [API Reference](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). \ No newline at end of file diff --git a/articles/storage/blobs/storage-quickstart-blobs-python.md b/articles/storage/blobs/storage-quickstart-blobs-python.md index 54434035ad895..444a0c402a1ad 100644 --- a/articles/storage/blobs/storage-quickstart-blobs-python.md +++ b/articles/storage/blobs/storage-quickstart-blobs-python.md @@ -48,12 +48,6 @@ Create a Python application named *blob-quickstart-v12*. cd blob-quickstart-v12 ``` -1. In side the *blob-quickstart-v12* directory, create another directory called *data*. This directory is where the blob data files will be created and stored. - - ```console - mkdir data - ``` - ### Install the package While still in the application directory, install the Azure Blob Storage client library for Python package by using the `pip install` command. diff --git a/articles/storage/common/azure-defender-storage-configure.md b/articles/storage/common/azure-defender-storage-configure.md index c9a820b44a398..6bcae4b7f99fe 100644 --- a/articles/storage/common/azure-defender-storage-configure.md +++ b/articles/storage/common/azure-defender-storage-configure.md @@ -8,7 +8,7 @@ author: tamram ms.service: storage ms.subservice: common ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 05/31/2022 ms.author: tamram ms.reviewer: ozgun ms.custom: devx-track-azurepowershell @@ -63,10 +63,10 @@ Microsoft Defender for Storage is now enabled for all storage accounts in this s ### [Portal](#tab/azure-portal) 1. Launch the [Azure portal](https://portal.azure.com/). -1. Navigate to your storage account. Under **Settings**, select **Advanced security**. +1. Navigate to your storage account. Under **Security + networking**, select **Security**. 1. Select **Enable Microsoft Defender for Storage**. - :::image type="content" source="media/azure-defender-storage-configure/enable-azure-defender-portal.png" alt-text="Screenshot showing how to enable an account for Microsoft Defender for Storage."::: + :::image type="content" source="media/azure-defender-storage-configure/enable-azure-defender-portal.png" alt-text="Screenshot showing how to enable a storage account for Microsoft Defender for Storage."::: Microsoft Defender for Storage is now enabled for this storage account. diff --git a/articles/storage/common/media/azure-defender-storage-configure/enable-azure-defender-portal.png b/articles/storage/common/media/azure-defender-storage-configure/enable-azure-defender-portal.png index 4e9111ef7c9c7..fc7b73ec1835b 100644 Binary files a/articles/storage/common/media/azure-defender-storage-configure/enable-azure-defender-portal.png and b/articles/storage/common/media/azure-defender-storage-configure/enable-azure-defender-portal.png differ diff --git a/articles/storage/common/media/storage-account-create/create-account-networking-tab-lrg.png b/articles/storage/common/media/storage-account-create/create-account-networking-tab-lrg.png index 74f35d4361b93..fdc4e5196d5bf 100644 Binary files a/articles/storage/common/media/storage-account-create/create-account-networking-tab-lrg.png and b/articles/storage/common/media/storage-account-create/create-account-networking-tab-lrg.png differ diff --git a/articles/storage/common/media/storage-account-create/create-account-networking-tab-sml.png b/articles/storage/common/media/storage-account-create/create-account-networking-tab-sml.png index 8cf7cb4980e5b..72d0ac654ede6 100644 Binary files a/articles/storage/common/media/storage-account-create/create-account-networking-tab-sml.png and b/articles/storage/common/media/storage-account-create/create-account-networking-tab-sml.png differ diff --git a/articles/storage/common/media/storage-account-get-info/service-endpoints-portal-lrg.png b/articles/storage/common/media/storage-account-get-info/service-endpoints-portal-lrg.png new file mode 100644 index 0000000000000..702d27073e041 Binary files /dev/null and b/articles/storage/common/media/storage-account-get-info/service-endpoints-portal-lrg.png differ diff --git a/articles/storage/common/media/storage-account-get-info/service-endpoints-portal-sml.png b/articles/storage/common/media/storage-account-get-info/service-endpoints-portal-sml.png new file mode 100644 index 0000000000000..0660d3cfbe029 Binary files /dev/null and b/articles/storage/common/media/storage-account-get-info/service-endpoints-portal-sml.png differ diff --git a/articles/storage/common/media/storage-auth-abac-examples/current-version-read-only.png b/articles/storage/common/media/storage-auth-abac-examples/current-version-read-only.png new file mode 100644 index 0000000000000..ef82b45ad08dd Binary files /dev/null and b/articles/storage/common/media/storage-auth-abac-examples/current-version-read-only.png differ diff --git a/articles/storage/common/redundancy-migration.md b/articles/storage/common/redundancy-migration.md index 8046987b82987..569e1497dd4e5 100644 --- a/articles/storage/common/redundancy-migration.md +++ b/articles/storage/common/redundancy-migration.md @@ -36,7 +36,7 @@ The following table provides an overview of how to switch from each type of repl |--------------------|----------------------------------------------------|---------------------------------------------------------------------|----------------------------------------------------|---------------------------------------------------------------------| | …from LRS | N/A | Use Azure portal, PowerShell, or CLI to change the replication setting1,2 | Perform a manual migration

                    OR

                    Request a live migration5 | Perform a manual migration

                    OR

                    Switch to GRS/RA-GRS first and then request a live migration3 | | …from GRS/RA-GRS | Use Azure portal, PowerShell, or CLI to change the replication setting | N/A | Perform a manual migration

                    OR

                    Switch to LRS first and then request a live migration3 | Perform a manual migration

                    OR

                    Request a live migration3 | -| …from ZRS | Perform a manual migration | Perform a manual migration | N/A | Request a live migration3

                    OR

                    Use PowerShell or Azure CLI to change the replication setting as part of a failback operation only4 | +| …from ZRS | Perform a manual migration | Perform a manual migration | N/A | Use Azure Portal, PowerShell or Azure CLI to change the replication setting as part of a failback operation only4 | | …from GZRS/RA-GZRS | Perform a manual migration | Perform a manual migration | Use Azure portal, PowerShell, or CLI to change the replication setting | N/A | 1 Incurs a one-time egress charge.
                    diff --git a/articles/storage/common/scalability-targets-standard-account.md b/articles/storage/common/scalability-targets-standard-account.md index d1b7b8388eecf..1ada289eda5e1 100644 --- a/articles/storage/common/scalability-targets-standard-account.md +++ b/articles/storage/common/scalability-targets-standard-account.md @@ -7,7 +7,7 @@ author: tamram ms.service: storage ms.topic: conceptual -ms.date: 05/09/2022 +ms.date: 05/25/2022 ms.author: tamram ms.subservice: common --- diff --git a/articles/storage/common/storage-account-create.md b/articles/storage/common/storage-account-create.md index bbeb958afb0ef..735f2b460ce44 100644 --- a/articles/storage/common/storage-account-create.md +++ b/articles/storage/common/storage-account-create.md @@ -7,7 +7,7 @@ author: tamram ms.service: storage ms.topic: how-to -ms.date: 05/18/2022 +ms.date: 05/26/2022 ms.author: tamram ms.subservice: common ms.custom: devx-track-azurecli, devx-track-azurepowershell @@ -108,11 +108,11 @@ To create an Azure storage account with the Azure portal, follow these steps: 1. From the left portal menu, select **Storage accounts** to display a list of your storage accounts. If the portal menu isn't visible, click the menu button to toggle it on. - :::image type="content" source="media/storage-account-create/menu-expand-sml.png" alt-text="Image of the Azure Portal homepage showing the location of the Menu button near the top left corner of the browser" lightbox="media/storage-account-create/menu-expand-lrg.png"::: + :::image type="content" source="media/storage-account-create/menu-expand-sml.png" alt-text="Image of the Azure Portal homepage showing the location of the Menu button near the top left corner of the browser." lightbox="media/storage-account-create/menu-expand-lrg.png"::: 1. On the **Storage accounts** page, select **Create**. - :::image type="content" source="media/storage-account-create/create-button-sml.png" alt-text="Image showing the location of the create button within the Azure Portal Storage Accounts page" lightbox="media/storage-account-create/create-button-lrg.png"::: + :::image type="content" source="media/storage-account-create/create-button-sml.png" alt-text="Image showing the location of the create button within the Azure Portal Storage Accounts page." lightbox="media/storage-account-create/create-button-lrg.png"::: Options for your new storage account are organized into tabs in the **Create a storage account** page. The following sections describe each of the tabs and their options. @@ -133,7 +133,7 @@ The following table describes the fields on the **Basics** tab. The following image shows a standard configuration of the basic properties for a new storage account. -:::image type="content" source="media/storage-account-create/create-account-basics-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Basics tab" lightbox="media/storage-account-create/create-account-basics-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-basics-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Basics tab." lightbox="media/storage-account-create/create-account-basics-tab-lrg.png"::: ### Advanced tab @@ -157,7 +157,7 @@ The following table describes the fields on the **Advanced** tab. The following image shows a standard configuration of the advanced properties for a new storage account. -:::image type="content" source="media/storage-account-create/create-account-advanced-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Advanced tab" lightbox="media/storage-account-create/create-account-advanced-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-advanced-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Advanced tab." lightbox="media/storage-account-create/create-account-advanced-tab-lrg.png"::: ### Networking tab @@ -168,11 +168,16 @@ The following table describes the fields on the **Networking** tab. | Section | Field | Required or optional | Description | |--|--|--|--| | Network connectivity | Connectivity method | Required | By default, incoming network traffic is routed to the public endpoint for your storage account. You can specify that traffic must be routed to the public endpoint through an Azure virtual network. You can also configure private endpoints for your storage account. For more information, see [Use private endpoints for Azure Storage](storage-private-endpoints.md). | +| Network connectivity | Endpoint type | Required | Azure Storage supports two types of endpoints: standard endpoints (the default) and Azure DNS zone endpoints (preview). Within a given subscription, you can create up to 250 accounts with standard endpoints per region, and up to 5000 accounts with Azure DNS zone endpoints per region. To learn how to view the service endpoints for an existing storage account, see [Get service endpoints for the storage account](storage-account-get-info.md#get-service-endpoints-for-the-storage-account). | | Network routing | Routing preference | Required | The network routing preference specifies how network traffic is routed to the public endpoint of your storage account from clients over the internet. By default, a new storage account uses Microsoft network routing. You can also choose to route network traffic through the POP closest to the storage account, which may lower networking costs. For more information, see [Network routing preference for Azure Storage](network-routing-preference.md). | The following image shows a standard configuration of the networking properties for a new storage account. -:::image type="content" source="media/storage-account-create/create-account-networking-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Networking tab" lightbox="media/storage-account-create/create-account-Networking-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-networking-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Networking tab." lightbox="media/storage-account-create/create-account-Networking-tab-lrg.png"::: + +> [!IMPORTANT] +> Azure DNS zone endpoints are currently in PREVIEW. +> See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. ### Data protection tab @@ -192,7 +197,7 @@ The following table describes the fields on the **Data protection** tab. The following image shows a standard configuration of the data protection properties for a new storage account. -:::image type="content" source="media/storage-account-create/create-account-protection-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Data Protection tab" lightbox="media/storage-account-create/create-account-protection-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-protection-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Data Protection tab." lightbox="media/storage-account-create/create-account-protection-tab-lrg.png"::: ### Encryption tab @@ -208,7 +213,7 @@ On the **Encryption** tab, you can configure options that relate to how your dat The following image shows a standard configuration of the encryption properties for a new storage account. -:::image type="content" source="media/storage-account-create/create-account-encryption-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Encryption tab" lightbox="media/storage-account-create/create-account-encryption-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-encryption-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Encryption tab." lightbox="media/storage-account-create/create-account-encryption-tab-lrg.png"::: ### Tags tab @@ -216,7 +221,7 @@ On the **Tags** tab, you can specify Resource Manager tags to help organize your The following image shows a standard configuration of the index tag properties for a new storage account. -:::image type="content" source="media/storage-account-create/create-account-tags-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Tags tab" lightbox="media/storage-account-create/create-account-tags-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-tags-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Tags tab." lightbox="media/storage-account-create/create-account-tags-tab-lrg.png"::: ### Review + create tab @@ -226,13 +231,13 @@ If validation fails, then the portal indicates which settings need to be modifie The following image shows the **Review** tab data prior to the creation of a new storage account. -:::image type="content" source="media/storage-account-create/create-account-review-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Review tab" lightbox="media/storage-account-create/create-account-review-tab-lrg.png"::: +:::image type="content" source="media/storage-account-create/create-account-review-tab-sml.png" alt-text="Screenshot showing a standard configuration for a new storage account - Review tab." lightbox="media/storage-account-create/create-account-review-tab-lrg.png"::: # [PowerShell](#tab/azure-powershell) To create a general-purpose v2 storage account with PowerShell, first create a new resource group by calling the [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) command: -```azurepowershell-interactive +```azurepowershell $resourceGroup = "" $location = "" New-AzResourceGroup -Name $resourceGroup -Location $location @@ -240,13 +245,13 @@ New-AzResourceGroup -Name $resourceGroup -Location $location If you're not sure which region to specify for the `-Location` parameter, you can retrieve a list of supported regions for your subscription with the [Get-AzLocation](/powershell/module/az.resources/get-azlocation) command: -```azurepowershell-interactive +```azurepowershell Get-AzLocation | select Location ``` Next, create a standard general-purpose v2 storage account with read-access geo-redundant storage (RA-GRS) by using the [New-AzStorageAccount](/powershell/module/az.storage/new-azstorageaccount) command. Remember that the name of your storage account must be unique across Azure, so replace the placeholder value in brackets with your own unique value: -```azurepowershell-interactive +```azurepowershell New-AzStorageAccount -ResourceGroupName $resourceGroup ` -Name ` -Location $location ` @@ -254,7 +259,42 @@ New-AzStorageAccount -ResourceGroupName $resourceGroup ` -Kind StorageV2 ``` -To enable a hierarchical namespace for the storage account to use [Azure Data Lake Storage](https://azure.microsoft.com/services/storage/data-lake-storage/), set the `EnableHierarchicalNamespace' parameter to `$True` on the call to the **New-AzStorageAccount** command. +To create an account with Azure DNS zone endpoints (preview), follow these steps: + +1. Register for the preview as described in [Azure DNS zone endpoints (preview)](storage-account-overview.md#azure-dns-zone-endpoints-preview). + +1. Make sure you have the latest version of PowerShellGet installed. + + ```azurepowershell + Install-Module PowerShellGet –Repository PSGallery –Force + ``` + +1. Close and reopen the PowerShell console. + +1. Install version [4.4.2-preview](https://www.powershellgallery.com/packages/Az.Storage/4.4.2-preview) or later of the Az.Storage PowerShell module. You may need to uninstall other versions of the PowerShell module. For more information about installing Azure PowerShell, see [Install Azure PowerShell with PowerShellGet](/powershell/azure/install-az-ps). + + ```azurepowershell + Install-Module Az.Storage -Repository PsGallery -RequiredVersion 4.4.2-preview -AllowClobber -AllowPrerelease -Force + ``` + +Next, create the account, specifying `AzureDnsZone` for the `-DnsEndpointType` parameter. After the account is created, you can see the service endpoints by getting the `PrimaryEndpoints` and `SecondaryEndpoints` properties for the storage account. + +```azurepowershell +$rgName = "" +$accountName = "" + +$account = New-AzStorageAccount -ResourceGroupName $rgName ` + -Name $accountName ` + -SkuName Standard_RAGRS ` + -Location ` + -Kind StorageV2 ` + -DnsEndpointType AzureDnsZone + +$account.PrimaryEndpoints +$account.SecondaryEndpoints +``` + +To enable a hierarchical namespace for the storage account to use [Azure Data Lake Storage](https://azure.microsoft.com/services/storage/data-lake-storage/), set the `EnableHierarchicalNamespace` parameter to `$True` on the call to the **New-AzStorageAccount** command. The following table shows which values to use for the `SkuName` and `Kind` parameters to create a particular type of storage account with the desired redundancy configuration. @@ -274,7 +314,7 @@ To create a general-purpose v2 storage account with Azure CLI, first create a ne ```azurecli-interactive az group create \ --name storage-resource-group \ - --location westus + --location eastus ``` If you're not sure which region to specify for the `--location` parameter, you can retrieve a list of supported regions for your subscription with the [az account list-locations](/cli/azure/account#az-account-list) command. @@ -291,11 +331,36 @@ Next, create a standard general-purpose v2 storage account with read-access geo- az storage account create \ --name \ --resource-group storage-resource-group \ - --location westus \ + --location eastus \ --sku Standard_RAGRS \ --kind StorageV2 ``` +To create an account with Azure DNS zone endpoints (preview), first register for the preview as described in [Azure DNS zone endpoints (preview)](storage-account-overview.md#azure-dns-zone-endpoints-preview). Next, install the preview extension for the Azure CLI if it's not already installed: + +```azurecli +az extension add -name storage-preview +``` + +Next, create the account, specifying `AzureDnsZone` for the `--dns-endpoint-type` parameter. After the account is created, you can see the service endpoints by getting the `PrimaryEndpoints` property of the storage account. + +```azurecli +az storage account create \ + --name \ + --resource-group \ + --location \ + --dns-endpoint-type AzureDnsZone +``` + +After the account is created, you can return the service endpoints by getting the `primaryEndpoints` and `secondaryEndpoints` properties for the storage account. + +```azurecli +az storage account show \ + --resource-group \ + --name \ + --query '[primaryEndpoints, secondaryEndpoints]' +``` + To enable a hierarchical namespace for the storage account to use [Azure Data Lake Storage](https://azure.microsoft.com/services/storage/data-lake-storage/), set the `enable-hierarchical-namespace` parameter to `true` on the call to the **az storage account create** command. Creating a hierarchical namespace requires Azure CLI version 2.0.79 or later. The following table shows which values to use for the `sku` and `kind` parameters to create a particular type of storage account with the desired redundancy configuration. @@ -343,7 +408,7 @@ To learn how to modify this Bicep file or create new ones, see: You can use either Azure PowerShell or Azure CLI to deploy a Resource Manager template to create a storage account. The template used in this how-to article is from [Azure Resource Manager quickstart templates](https://azure.microsoft.com/resources/templates/storage-account-create/). To run the scripts, select **Try it** to open the Azure Cloud Shell. To paste the script, right-click the shell, and then select **Paste**. -```azurepowershell-interactive +```azurepowershell $resourceGroupName = Read-Host -Prompt "Enter the Resource Group name" $location = Read-Host -Prompt "Enter the location (i.e. centralus)" @@ -422,7 +487,7 @@ az storage account delete --name storageAccountName --resource-group resourceGro To delete the storage account, use either Azure PowerShell or Azure CLI. -```azurepowershell-interactive +```azurepowershell $storageResourceGroupName = Read-Host -Prompt "Enter the resource group name" $storageAccountName = Read-Host -Prompt "Enter the storage account name" Remove-AzStorageAccount -Name $storageAccountName -ResourceGroupName $storageResourceGroupName diff --git a/articles/storage/common/storage-account-get-info.md b/articles/storage/common/storage-account-get-info.md index a040cea612662..2c754bcee0faa 100644 --- a/articles/storage/common/storage-account-get-info.md +++ b/articles/storage/common/storage-account-get-info.md @@ -6,7 +6,7 @@ services: storage author: tamram ms.author: tamram -ms.date: 06/23/2021 +ms.date: 05/26/2022 ms.service: storage ms.subservice: common ms.topic: how-to @@ -94,6 +94,80 @@ az storage account show \ --- +## Get service endpoints for the storage account + +The service endpoints for a storage account provide the base URL for any blob, queue, table, or file object in Azure Storage. Use this base URL to construct the address for any given resource. + +# [Azure portal](#tab/portal) + +To get the service endpoints for a storage account in the Azure portal, follow these steps: + +1. Navigate to your storage account in the Azure portal. +1. In the **Settings** section, locate the **Endpoints** setting. +1. On the **Endpoints** page, you'll see the service endpoint for each Azure Storage service, as well as the resource ID. + + :::image type="content" source="media/storage-account-get-info/service-endpoints-portal-sml.png" alt-text="Screenshot showing how to retrieve service endpoints for a storage account." lightbox="media/storage-account-get-info/service-endpoints-portal-lrg.png"::: + +If the storage account is geo-replicated, the secondary endpoints will also appear on this page. + +# [PowerShell](#tab/powershell) + +To get the service endpoints for a storage account with PowerShell, call [Get-AzStorageAccount](/powershell/module/az.storage/get-azstorageaccount) and return the `PrimaryEndpoints` property. If the storage account is geo-replicated, then the `SecondaryEndpoints` property returns the secondary endpoints. + +```azurepowershell +(Get-AzStorageAccount -ResourceGroupName $rgName -Name $accountName).PrimaryEndpoints +(Get-AzStorageAccount -ResourceGroupName $rgName -Name $accountName).SecondaryEndpoints +``` + +# [Azure CLI](#tab/azure-cli) + +To get the service endpoints for a storage account with Azure CLI, call [az storage account show](/cli/azure/storage/account#az-storage-account-show) and return the `primaryEndpoints` property. If the storage account is geo-replicated, then the `secondaryEndpoints` property returns the secondary endpoints. + +```azurecli +az storage account show \ + --resource-group \ + --name \ + --query '[primaryEndpoints, secondaryEndpoints]' +``` + +--- + +## Get a connection string for the storage account + +You can use a connection string to authorize access to Azure Storage with the account access keys (Shared Key authorization). To learn more about connection strings, see [Configure Azure Storage connection strings](storage-configure-connection-string.md). + +[!INCLUDE [storage-account-key-note-include](../../../includes/storage-account-key-note-include.md)] + +# [Portal](#tab/portal) + +To get a connection string in the Azure portal, follow these steps: + +1. Navigate to your storage account in the Azure portal. +1. In the **Security + networking** section, locate the **Access keys** setting. +1. To display the account keys and associated connection strings, select the **Show keys** button at the top of the page. +1. To copy a connection string to the clipboard, select the **Copy** button to the right of the connection string. + +# [PowerShell](#tab/powershell) + +To get a connection string with PowerShell, first get a `StorageAccountContext` object, then retrieve the `ConnectionString` property. + +```azurepowershell +$rgName = "" +$accountName = "storagesamplesdnszone2" + +(Get-AzStorageAccount -ResourceGroupName -Name ).Context.ConnectionString +``` + +# [Azure CLI](#tab/azure-cli) + +To get a connection string with Azure CLI, call the [az storage account show-connection-string](/cli/azure/storage/account#az-storage-account-show-connection-string) command. + +```azurecli +az storage account show-connection-string --resource-group --name +``` + +--- + ## Next steps - [Storage account overview](storage-account-overview.md) diff --git a/articles/storage/common/storage-account-overview.md b/articles/storage/common/storage-account-overview.md index c31282e672fe9..dff57f31e419a 100644 --- a/articles/storage/common/storage-account-overview.md +++ b/articles/storage/common/storage-account-overview.md @@ -7,7 +7,7 @@ author: tamram ms.service: storage ms.topic: conceptual -ms.date: 04/05/2022 +ms.date: 05/26/2022 ms.author: tamram ms.subservice: common --- @@ -44,30 +44,78 @@ The service-level agreement (SLA) for Azure Storage accounts is available at [SL > [!NOTE] > You can't change a storage account to a different type after it's created. To move your data to a storage account of a different type, you must create a new account and copy the data to the new account. -## Storage account endpoints - -A storage account provides a unique namespace in Azure for your data. Every object that you store in Azure Storage has an address that includes your unique account name. The combination of the account name and the Azure Storage service endpoint forms the endpoints for your storage account. +## Storage account name When naming your storage account, keep these rules in mind: - Storage account names must be between 3 and 24 characters in length and may contain numbers and lowercase letters only. - Your storage account name must be unique within Azure. No two storage accounts can have the same name. -The following table lists the format of the endpoint for each of the Azure Storage services. +## Storage account endpoints + +A storage account provides a unique namespace in Azure for your data. Every object that you store in Azure Storage has a URL address that includes your unique account name. The combination of the account name and the service endpoint forms the endpoints for your storage account. + +There are two types of service endpoints available for a storage account: + +- Standard endpoints (recommended). You can create up to 250 storage accounts per region with standard endpoints in a given subscription. +- Azure DNS zone endpoints (preview). You can create up to 5000 storage accounts per region with Azure DNS zone endpoints in a given subscription. + +Within a single subscription, you can create accounts with either standard or Azure DNS Zone endpoints, for a maximum of 5250 accounts per subscription. + +> [!IMPORTANT] +> Azure DNS zone endpoints are currently in PREVIEW. +> See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. + +You can configure your storage account to use a custom domain for the Blob Storage endpoint. For more information, see [Configure a custom domain name for your Azure Storage account](../blobs/storage-custom-domain-name.md). + +### Standard endpoints + +A standard service endpoint in Azure Storage includes the protocol (HTTPS is recommended), the storage account name as the subdomain, and a fixed domain that includes the name of the service. + +The following table lists the format for the standard endpoints for each of the Azure Storage services. | Storage service | Endpoint | |--|--| | Blob Storage | `https://.blob.core.windows.net` | +| Static website (Blob Storage) | `https://.web.core.windows.net` | | Data Lake Storage Gen2 | `https://.dfs.core.windows.net` | | Azure Files | `https://.file.core.windows.net` | | Queue Storage | `https://.queue.core.windows.net` | | Table Storage | `https://.table.core.windows.net` | -Construct the URL for accessing an object in a storage account by appending the object's location in the storage account to the endpoint. For example, the URL for a blob will be similar to: +When your account is created with standard endpoints, you can easily construct the URL for an object in Azure Storage by appending the object's location in the storage account to the endpoint. For example, the URL for a blob will be similar to: `https://*mystorageaccount*.blob.core.windows.net/*mycontainer*/*myblob*` -You can also configure your storage account to use a custom domain for blobs. For more information, see [Configure a custom domain name for your Azure Storage account](../blobs/storage-custom-domain-name.md). +### Azure DNS zone endpoints (preview) + +When you create an Azure Storage account with Azure DNS zone endpoints (preview), Azure Storage dynamically selects an Azure DNS zone and assigns it to the storage account when it is created. The new storage account's endpoints are created in the dynamically selected Azure DNS zone. For more information about Azure DNS zones, see [DNS zones](../../dns/dns-zones-records.md#dns-zones). + +An Azure DNS zone service endpoint in Azure Storage includes the protocol (HTTPS is recommended), the storage account name as the subdomain, and a domain that includes the name of the service and the identifier for the DNS zone. The identifier for the DNS zone always begins with `z` and can range from `z00` to `z99`. + +The following table lists the format for Azure DNS Zone endpoints for each of the Azure Storage services, where the zone is `z5`. + +| Storage service | Endpoint | +|--|--| +| Blob Storage | `https://.z[00-99].blob.core.windows.net` | +| Static website (Blob Storage) | `https://.z[00-99].web.core.windows.net` | +| Data Lake Storage Gen2 | `https://.z[00-99].dfs.core.windows.net` | +| Azure Files | `https://.z[00-99].file.core.windows.net` | +| Queue Storage | `https://.z[00-99].queue.core.windows.net` | +| Table Storage | `https://.z[00-99].table.core.windows.net` | + +> [!IMPORTANT] +> You can create up to 5000 accounts with Azure DNS Zone endpoints per subscription. However, you may need to update your application code to query for the account endpoint at runtime. You can call the [Get Properties](/rest/api/storagerp/storage-accounts/get-properties) operation to query for the storage account endpoints. + +Azure DNS zone endpoints are supported for accounts created with the Azure Resource Manager deployment model only. For more information, see [Azure Resource Manager overview](../../azure-resource-manager/management/overview.md). + +To learn how to create a storage account with Azure DNS Zone endpoints, see [Create a storage account](storage-account-create.md). + +#### About the preview + +The Azure DNS zone endpoints preview is available in all public regions. The preview is not available in any government cloud regions. + +To register for the preview, follow the instructions provided in [Set up preview features in Azure subscription](../../azure-resource-manager/management/preview-features.md#register-preview-feature). Specify `PartitionedDnsPublicPreview` as the feature name and `Microsoft.Storage` as the provider namespace. ## Migrate a storage account @@ -114,6 +162,10 @@ The following table describes the legacy storage account types. These account ty | Standard general-purpose v1 | Blob Storage, Queue Storage, Table Storage, and Azure Files | LRS/GRS/RA-GRS | Resource Manager, classic | General-purpose v1 accounts may not have the latest features or the lowest per-gigabyte pricing. Consider using it for these scenarios:
                    • Your applications require the Azure [classic deployment model](../../azure-portal/supportability/classic-deployment-model-quota-increase-requests.md).
                    • Your applications are transaction-intensive or use significant geo-replication bandwidth, but don’t require large capacity. In this case, a general-purpose v1 account may be the most economical choice.
                    • You use a version of the Azure Storage REST API that is earlier than February 14, 2014, or a client library with a version lower than 4.x, and you can’t upgrade your application.
                    • You're selecting a storage account to use as a cache for Azure Site Recovery. Because Site Recovery is transaction-intensive, a general-purpose v1 account may be more cost-effective. For more information, see [Support matrix for Azure VM disaster recovery between Azure regions](../../site-recovery/azure-to-azure-support-matrix.md#cache-storage).
                    | | Standard Blob Storage | Blob Storage (block blobs and append blobs only) | LRS/GRS/RA-GRS | Resource Manager | Microsoft recommends using standard general-purpose v2 accounts instead when possible. | +## Scalability targets for standard storage accounts + +[!INCLUDE [azure-storage-account-limits-standard](../../../includes/azure-storage-account-limits-standard.md)] + ## Next steps - [Create a storage account](storage-account-create.md) diff --git a/articles/storage/common/storage-auth-abac-attributes.md b/articles/storage/common/storage-auth-abac-attributes.md index daf80652ef72e..021961145f4ce 100644 --- a/articles/storage/common/storage-auth-abac-attributes.md +++ b/articles/storage/common/storage-auth-abac-attributes.md @@ -7,7 +7,7 @@ author: santoshc ms.service: storage ms.topic: conceptual -ms.date: 05/16/2022 +ms.date: 05/24/2022 ms.author: santoshc ms.reviewer: jiacfan ms.subservice: common @@ -75,7 +75,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | All blob read operations excluding list. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read` | > | **Suboperation** | NOT `Blob.List` | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Encryption scope name](#encryption-scope-name) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Encryption scope name](#encryption-scope-name) | > | **Request attributes** | [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | > | **Examples** | `!(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read'} AND NOT SubOperationMatches{'Blob.List'})`
                    [Example: Read blobs in named containers with a path](storage-auth-abac-examples.md#example-read-blobs-in-named-containers-with-a-path) | @@ -89,7 +89,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | Read blobs with tags. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read` | > | **Suboperation** | `Blob.Read.WithTagConditions` | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys)
                    [Encryption scope name](#encryption-scope-name) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys)
                    [Encryption scope name](#encryption-scope-name) | > | **Request attributes** | [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | > | **Examples** | `!(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read'} AND SubOperationMatches{'Blob.Read.WithTagConditions'})`
                    [Example: Read blobs with a blob index tag](storage-auth-abac-examples.md#example-read-blobs-with-a-blob-index-tag) | @@ -104,7 +104,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | DataAction for reading blob index tags. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read` | > | **Suboperation** | | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys) | > | **Request attributes** | [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | > | **Learn more** | [Manage and find Azure Blob data with blob index tags](../blobs/storage-manage-find-blobs.md) | @@ -145,7 +145,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | DataAction for writing to blobs. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write` | > | **Suboperation** | `Blob.Write.Tier` | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Encryption scope name](#encryption-scope-name) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Encryption scope name](#encryption-scope-name) | > | **Request attributes** | [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | > | **Examples** | `!(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write'} AND SubOperationMatches{'Blob.Write.Tier'})` | @@ -188,7 +188,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | DataAction for writing blob index tags. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write` | > | **Suboperation** | | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path)
                    [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys) | > | **Request attributes** | [Blob index tags [Values in key]](#blob-index-tags-values-in-key)
                    [Blob index tags [Keys]](#blob-index-tags-keys)
                    [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | > | **Examples** | `!(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write'})`
                    [Example: Existing blobs must have blob index tag keys](storage-auth-abac-examples.md#example-existing-blobs-must-have-blob-index-tag-keys) | @@ -216,7 +216,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | DataAction for deleting blobs. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/delete` | > | **Suboperation** | | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path) | > | **Request attributes** | [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | > | **Examples** | `!(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/delete'})`
                    [Example: Read, write, or delete blobs in named containers](storage-auth-abac-examples.md#example-read-write-or-delete-blobs-in-named-containers) | @@ -244,7 +244,7 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | DataAction for permanently deleting a blob overriding soft-delete. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/permanentDelete/action` | > | **Suboperation** | | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path) | > | **Request attributes** | [Version ID](#version-id)
                    [Snapshot](#snapshot) | > | **Principal attributes support** | True | @@ -296,10 +296,10 @@ This section lists the supported Azure Blob storage actions and suboperations yo > | **Description** | DataAction for all data operations on storage accounts with hierarchical namespace enabled.
                    If your role definition includes the `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/runAsSuperUser/action` action, you should target this action in your condition. Targeting this action ensures the condition will still work as expected if hierarchical namespace is enabled for a storage account. | > | **DataAction** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/runAsSuperUser/action` | > | **Suboperation** | | -> | **Resource attributes** | [Account name](#account-name)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path) | +> | **Resource attributes** | [Account name](#account-name)
                    [Is Current Version](#is-current-version)
                    [Is hierarchical namespace enabled](#is-hierarchical-namespace-enabled)
                    [Container name](#container-name)
                    [Blob path](#blob-path) | > | **Request attributes** | | > | **Principal attributes support** | True | -> | **Examples** | [Example: Read only storage accounts with hierarchical namespace enabled](storage-auth-abac-examples.md#example-read-only-storage-accounts-with-hierarchical-namespace-enabled)
                    [Example: Read, write, or delete blobs in named containers](storage-auth-abac-examples.md#example-read-write-or-delete-blobs-in-named-containers)
                    [Example: Read blobs in named containers with a path](storage-auth-abac-examples.md#example-read-blobs-in-named-containers-with-a-path)
                    [Example: Read or list blobs in named containers with a path](storage-auth-abac-examples.md#example-read-or-list-blobs-in-named-containers-with-a-path)
                    [Example: Write blobs in named containers with a path](storage-auth-abac-examples.md#example-write-blobs-in-named-containers-with-a-path) | +> | **Examples** | [Example: Read, write, or delete blobs in named containers](storage-auth-abac-examples.md#example-read-write-or-delete-blobs-in-named-containers)
                    [Example: Read blobs in named containers with a path](storage-auth-abac-examples.md#example-read-blobs-in-named-containers-with-a-path)
                    [Example: Read or list blobs in named containers with a path](storage-auth-abac-examples.md#example-read-or-list-blobs-in-named-containers-with-a-path)
                    [Example: Write blobs in named containers with a path](storage-auth-abac-examples.md#example-write-blobs-in-named-containers-with-a-path)
                    [Example: Read only current blob versions](storage-auth-abac-examples.md#example-read-only-current-blob-versions)
                    [Example: Read current blob versions and any blob snapshots](storage-auth-abac-examples.md#example-read-current-blob-versions-and-any-blob-snapshots)
                    [Example: Read only storage accounts with hierarchical namespace enabled](storage-auth-abac-examples.md#example-read-only-storage-accounts-with-hierarchical-namespace-enabled) | > | **Learn more** | [Azure Data Lake Storage Gen2 hierarchical namespace](../blobs/data-lake-storage-namespace.md) | ## Azure Queue storage actions @@ -471,6 +471,18 @@ This section lists the Azure Blob storage attributes you can use in your conditi > | **Examples** | `@Resource[Microsoft.Storage/storageAccounts/encryptionScopes:name] ForAnyOfAnyValues:StringEquals {'validScope1', 'validScope2'}`
                    [Example: Read blobs with specific encryption scopes](storage-auth-abac-examples.md#example-read-blobs-with-specific-encryption-scopes) | > | **Learn more** | [Create and manage encryption scopes](../blobs/encryption-scope-manage.md) | +### Is Current Version + +> [!div class="mx-tdCol2BreakAll"] +> | Property | Value | +> | --- | --- | +> | **Display name** | Is Current Version | +> | **Description** | Identifies if the resource is the current version of the blob, in contrast of a snapshot or a specific blob version. | +> | **Attribute** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion` | +> | **Attribute source** | Resource | +> | **Attribute type** | Boolean | +> | **Examples** | `@Resource[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion] BoolEquals true`
                    [Example: Read only current blob versions](storage-auth-abac-examples.md#example-read-only-current-blob-versions)
                    [Example: Read current blob versions and a specific blob version](storage-auth-abac-examples.md#example-read-current-blob-versions-and-a-specific-blob-version) | + ### Is hierarchical namespace enabled > [!div class="mx-tdCol2BreakAll"] @@ -490,14 +502,14 @@ This section lists the Azure Blob storage attributes you can use in your conditi > | Property | Value | > | --- | --- | > | **Display name** | Snapshot | -> | **Description** | The Snapshot identifier for the Blob snapshot. | +> | **Description** | The Snapshot identifier for the Blob snapshot.
                    Available for storage accounts where hierarchical namespace is not enabled and currently in preview for storage accounts where hierarchical namespace is enabled. | > | **Attribute** | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs:snapshot` | > | **Attribute source** | Request | > | **Attribute type** | DateTime | > | **Exists support** | True | > | **Hierarchical namespace support** | False | > | **Examples** | `Exists @Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:snapshot]`
                    [Example: Read current blob versions and any blob snapshots](storage-auth-abac-examples.md#example-read-current-blob-versions-and-any-blob-snapshots) | -> | **Learn more** | [Azure Data Lake Storage Gen2 hierarchical namespace](../blobs/data-lake-storage-namespace.md) | +> | **Learn more** | [Blob snapshots](../blobs/snapshots-overview.md)
                    [Azure Data Lake Storage Gen2 hierarchical namespace](../blobs/data-lake-storage-namespace.md) | ### Version ID @@ -511,7 +523,7 @@ This section lists the Azure Blob storage attributes you can use in your conditi > | **Attribute type** | DateTime | > | **Exists support** | True | > | **Hierarchical namespace support** | False | -> | **Examples** | `@Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:versionId] DateTimeEquals '2022-06-01T23:38:32.8883645Z'`
                    [Example: Read current blob versions and a specific blob version](storage-auth-abac-examples.md#example-read-current-blob-versions-and-a-specific-blob-version) | +> | **Examples** | `@Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:versionId] DateTimeEquals '2022-06-01T23:38:32.8883645Z'`
                    [Example: Read current blob versions and a specific blob version](storage-auth-abac-examples.md#example-read-current-blob-versions-and-a-specific-blob-version)
                    [Example: Read current blob versions and any blob snapshots](storage-auth-abac-examples.md#example-read-current-blob-versions-and-any-blob-snapshots) | > | **Learn more** | [Azure Data Lake Storage Gen2 hierarchical namespace](../blobs/data-lake-storage-namespace.md) | ## Azure Queue storage attributes diff --git a/articles/storage/common/storage-auth-abac-examples.md b/articles/storage/common/storage-auth-abac-examples.md index bae08c8a6a95a..c60fcce953a72 100644 --- a/articles/storage/common/storage-auth-abac-examples.md +++ b/articles/storage/common/storage-auth-abac-examples.md @@ -9,7 +9,7 @@ ms.topic: conceptual ms.author: rolyon ms.reviewer: ms.subservice: common -ms.date: 05/16/2022 +ms.date: 05/24/2022 #Customer intent: As a dev, devops, or it admin, I want to learn about the conditions so that I write more complex conditions. --- @@ -812,12 +812,66 @@ $content = Get-AzStorageBlobContent -Container $grantedContainer -Blob "logs/Alp ## Blob versions or blob snapshots -### Example: Read current blob versions and a specific blob version +### Example: Read only current blob versions -This condition allows a user to read current blob versions as well as read blobs with a version ID of 2022-06-01T23:38:32.8883645Z. The user cannot read other blob versions. +This condition allows a user to only read current blob versions. The user cannot read other blob versions. -> [!NOTE] -> The condition includes a `NOT Exists` expression for the version ID attribute. This expression is included so that the Azure portal can list list the current version of the blob. +You must add this condition to any role assignments that include the following actions. + +> [!div class="mx-tableFixed"] +> | Action | Notes | +> | --- | --- | +> | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read` | | +> | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/runAsSuperUser/action` | Add if role definition includes this action, such as Storage Blob Data Owner. | + +![Diagram of condition showing read access to current blob version only.](./media/storage-auth-abac-examples/current-version-read-only.png) + +Storage Blob Data Owner + +``` +( + ( + !(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read'} AND NOT SubOperationMatches{'Blob.List'}) + AND + !(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/runAsSuperUser/action'}) + ) + OR + ( + @Resource[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion] BoolEquals true + ) +) +``` + +Storage Blob Data Reader, Storage Blob Data Contributor + +``` +( + ( + !(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read'} AND NOT SubOperationMatches{'Blob.List'}) + ) + OR + ( + @Resource[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion] BoolEquals true + ) +) +``` + +#### Azure portal + +Here are the settings to add this condition using the Azure portal. + +> [!div class="mx-tableFixed"] +> | Condition #1 | Setting | +> | --- | --- | +> | Actions | [Read a blob](storage-auth-abac-attributes.md#read-a-blob)
                    [All data operations for accounts with hierarchical namespace enabled](storage-auth-abac-attributes.md#all-data-operations-for-accounts-with-hierarchical-namespace-enabled) (if applicable) | +> | Attribute source | Resource | +> | Attribute | [Is Current Version](storage-auth-abac-attributes.md#is-current-version) | +> | Operator | [BoolEquals](../../role-based-access-control/conditions-format.md#boolean-comparison-operators) | +> | Value | True | + +### Example: Read current blob versions and a specific blob version + +This condition allows a user to read current blob versions as well as read blobs with a version ID of 2022-06-01T23:38:32.8883645Z. The user cannot read other blob versions. The [Version ID](storage-auth-abac-attributes.md#version-id) attribute is available only for storage accounts where hierarchical namespace is not enabled. You must add this condition to any role assignments that include the following action. @@ -837,7 +891,7 @@ You must add this condition to any role assignments that include the following a ( @Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:versionId] DateTimeEquals '2022-06-01T23:38:32.8883645Z' OR - NOT Exists @Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:versionId] + @Resource[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion] BoolEquals true ) ) ``` @@ -856,14 +910,14 @@ Here are the settings to add this condition using the Azure portal. > | Value | <blobVersionId> | > | **Expression 2** | | > | Operator | Or | -> | Attribute source | Request | -> | Attribute | [Version ID](storage-auth-abac-attributes.md#version-id) | -> | Exists | [Checked](../../role-based-access-control/conditions-format.md#exists) | -> | Negate this expression | Checked | +> | Attribute source | Resource | +> | Attribute | [Is Current Version](storage-auth-abac-attributes.md#is-current-version) | +> | Operator | [BoolEquals](../../role-based-access-control/conditions-format.md#boolean-comparison-operators) | +> | Value | True | ### Example: Delete old blob versions -This condition allows a user to delete versions of a blob that are older than 06/01/2022 to perform clean up. +This condition allows a user to delete versions of a blob that are older than 06/01/2022 to perform clean up. The [Version ID](storage-auth-abac-attributes.md#version-id) attribute is available only for storage accounts where hierarchical namespace is not enabled. You must add this condition to any role assignments that include the following actions. @@ -904,7 +958,7 @@ Here are the settings to add this condition using the Azure portal. ### Example: Read current blob versions and any blob snapshots -This condition allows a user to read current blob versions and any blob snapshots. +This condition allows a user to read current blob versions and any blob snapshots. The [Version ID](storage-auth-abac-attributes.md#version-id) attribute is available only for storage accounts where hierarchical namespace is not enabled. The [Snapshot](storage-auth-abac-attributes.md#snapshot) attribute is available for storage accounts where hierarchical namespace is not enabled and currently in preview for storage accounts where hierarchical namespace is enabled. You must add this condition to any role assignments that include the following action. @@ -912,21 +966,40 @@ You must add this condition to any role assignments that include the following a > | Action | Notes | > | --- | --- | > | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read` | | +> | `Microsoft.Storage/storageAccounts/blobServices/containers/blobs/runAsSuperUser/action` | Add if role definition includes this action, such as Storage Blob Data Owner. | ![Diagram of condition showing read access to current blob versions and any blob snapshots.](./media/storage-auth-abac-examples/version-id-snapshot-blob-read.png) +Storage Blob Data Owner + ``` ( ( !(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read'} AND NOT SubOperationMatches{'Blob.List'}) + AND + !(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/runAsSuperUser/action'}) ) OR ( Exists @Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:snapshot] OR - NOT Exists @Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:versionId] + @Resource[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion] BoolEquals true + ) +) +``` + +Storage Blob Data Reader, Storage Blob Data Contributor + +``` +( + ( + !(ActionMatches{'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read'} AND NOT SubOperationMatches{'Blob.List'}) + ) + OR + ( + Exists @Request[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:snapshot] OR - @Resource[Microsoft.Storage/storageAccounts:isHnsEnabled] BoolEquals true + @Resource[Microsoft.Storage/storageAccounts/blobServices/containers/blobs:isCurrentVersion] BoolEquals true ) ) ``` @@ -938,20 +1011,14 @@ Here are the settings to add this condition using the Azure portal. > [!div class="mx-tableFixed"] > | Condition #1 | Setting | > | --- | --- | -> | Actions | [Read a blob](storage-auth-abac-attributes.md#read-a-blob) | +> | Actions | [Read a blob](storage-auth-abac-attributes.md#read-a-blob)
                    [All data operations for accounts with hierarchical namespace enabled](storage-auth-abac-attributes.md#all-data-operations-for-accounts-with-hierarchical-namespace-enabled) (if applicable) | > | Attribute source | Request | > | Attribute | [Snapshot](storage-auth-abac-attributes.md#snapshot) | > | Exists | [Checked](../../role-based-access-control/conditions-format.md#exists) | > | **Expression 2** | | > | Operator | Or | -> | Attribute source | Request | -> | Attribute | [Version ID](storage-auth-abac-attributes.md#version-id) | -> | Exists | [Checked](../../role-based-access-control/conditions-format.md#exists) | -> | Negate this expression | Checked | -> | **Expression 3** | | -> | Operator | Or | > | Attribute source | Resource | -> | Attribute | [Is hierarchical namespace enabled](storage-auth-abac-attributes.md#is-hierarchical-namespace-enabled) | +> | Attribute | [Is Current Version](storage-auth-abac-attributes.md#is-current-version) | > | Operator | [BoolEquals](../../role-based-access-control/conditions-format.md#boolean-comparison-operators) | > | Value | True | diff --git a/articles/storage/common/storage-configure-connection-string.md b/articles/storage/common/storage-configure-connection-string.md index 6a992551f6d47..debf0f993331c 100644 --- a/articles/storage/common/storage-configure-connection-string.md +++ b/articles/storage/common/storage-configure-connection-string.md @@ -7,7 +7,7 @@ author: tamram ms.service: storage ms.topic: how-to -ms.date: 04/14/2022 +ms.date: 05/26/2022 ms.author: tamram ms.reviewer: nachakra ms.subservice: common diff --git a/articles/storage/common/storage-network-security.md b/articles/storage/common/storage-network-security.md index e9e95f3fbcb56..f5b668184dab4 100644 --- a/articles/storage/common/storage-network-security.md +++ b/articles/storage/common/storage-network-security.md @@ -458,15 +458,12 @@ You can manage IP network rules for storage accounts through the Azure portal, P -## Grant access from Azure resource instances (preview) +## Grant access from Azure resource instances In some cases, an application might depend on Azure resources that cannot be isolated through a virtual network or an IP address rule. However, you'd still like to secure and restrict storage account access to only your application's Azure resources. You can configure storage accounts to allow access to specific resource instances of some Azure services by creating a resource instance rule. The types of operations that a resource instance can perform on storage account data is determined by the Azure role assignments of the resource instance. Resource instances must be from the same tenant as your storage account, but they can belong to any subscription in the tenant. -> [!NOTE] -> This feature is in public preview and is available in all public cloud regions. - ### [Portal](#tab/azure-portal) You can add or remove resource network rules in the Azure portal. @@ -494,22 +491,6 @@ You can use PowerShell commands to add or remove resource network rules. > [!IMPORTANT] > Be sure to [set the default rule](#change-the-default-network-access-rule) to **deny**, or network rules have no effect. -#### Install the preview module - -Install the latest version of the PowershellGet module. Then, close and reopen the PowerShell console. - -```powershell -install-Module PowerShellGet –Repository PSGallery –Force -``` - -Install **Az. Storage** preview module. - -```powershell -Install-Module Az.Storage -Repository PsGallery -RequiredVersion 3.0.1-preview -AllowClobber -AllowPrerelease -Force -``` - -For more information about how to install PowerShell modules, see [Install the Azure PowerShell module](/powershell/azure/install-az-ps) - #### Grant access Add a network rule that grants access from a resource instance. @@ -574,24 +555,6 @@ $rule.ResourceAccessRules You can use Azure CLI commands to add or remove resource network rules. -#### Install the preview extension - -1. Open the [Azure Cloud Shell](../../cloud-shell/overview.md), or if you've [installed](/cli/azure/install-azure-cli) the Azure CLI locally, open a command console application such as Windows PowerShell. - -2. Then, verify that the version of Azure CLI that you have installed is `2.13.0` or higher by using the following command. - - ```azurecli - az --version - ``` - - If your version of Azure CLI is lower than `2.13.0`, then install a later version. See [Install the Azure CLI](/cli/azure/install-azure-cli). - -3. Type the following command to install the preview extension. - - ```azurecli - az extension add -n storage-preview - ``` - #### Grant access Add a network rule that grants access from a resource instance. @@ -674,7 +637,7 @@ If your account does not have the hierarchical namespace feature enabled on it, You can use the same technique for an account that has the hierarchical namespace feature enable on it. However, you don't have to assign an Azure role if you add the managed identity to the access control list (ACL) of any directory or blob contained in the storage account. In that case, the scope of access for the instance corresponds to the directory or file to which the managed identity has been granted access. You can also combine Azure roles and ACLs together. To learn more about how to combine them together to grant access, see [Access control model in Azure Data Lake Storage Gen2](../blobs/data-lake-storage-access-control-model.md). > [!TIP] -> The recommended way to grant access to specific resources is to use resource instance rules. To grant access to specific resource instances, see the [Grant access from Azure resource instances (preview)](#grant-access-specific-instances) section of this article. +> The recommended way to grant access to specific resources is to use resource instance rules. To grant access to specific resource instances, see the [Grant access from Azure resource instances](#grant-access-specific-instances) section of this article. | Service | Resource Provider Name | Purpose | | :----------------------------- | :------------------------------------- | :----------------- | diff --git a/articles/storage/common/storage-redundancy.md b/articles/storage/common/storage-redundancy.md index 950681ce60211..b795feab9d23d 100644 --- a/articles/storage/common/storage-redundancy.md +++ b/articles/storage/common/storage-redundancy.md @@ -7,7 +7,7 @@ author: tamram ms.service: storage ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 05/24/2022 ms.author: tamram ms.subservice: common ms.custom: references_regions @@ -108,6 +108,7 @@ Premium block blobs are available in a subset of Azure regions: - (North America) East US - (North America) East US 2 - (North America) West US 2 +- (North America) South Central US - (South America) Brazil South #### Premium file share accounts @@ -168,7 +169,12 @@ Only standard general-purpose v2 storage accounts support GZRS. GZRS is supporte ## Read access to data in the secondary region -Geo-redundant storage (with GRS or GZRS) replicates your data to another physical location in the secondary region to protect against regional outages. However, that data is available to be read only if the customer or Microsoft initiates a failover from the primary to secondary region. When you enable read access to the secondary region, your data is always available to be read, including in a situation where the primary region becomes unavailable. For read access to the secondary region, enable read-access geo-redundant storage (RA-GRS) or read-access geo-zone-redundant storage (RA-GZRS). +Geo-redundant storage (with GRS or GZRS) replicates your data to another physical location in the secondary region to protect against regional outages. With an account configured for GRS or GZRS, data in the secondary region is not directly accessible to users or applications, unless a failover occurs. The failover process updates the DNS entry provided by Azure Storage so that the secondary endpoint becomes the new primary endpoint for your storage account. During the failover process, your data is inaccessible. After the failover is complete, you can read and write data to the new primary region. For more information about failover and disaster recovery, see [How an account failover works](storage-disaster-recovery-guidance.md#how-an-account-failover-works). + +If your applications require high availability, then you can configure your storage account for read access to the secondary region. When you enable read access to the secondary region, then your data is always available to be read from the secondary, including in a situation where the primary region becomes unavailable. Read-access geo-redundant storage (RA-GRS) or read-access geo-zone-redundant storage (RA-GZRS) configurations permit read access to the secondary region. + +> [!CAUTION] +> Because data is replicated asynchronously from the primary to the secondary region, the secondary region is typically behind the primary region in terms of write operations. If a disaster were to strike the primary region, it's likely that some data would be lost. For more information about how to plan for potential data loss, see [Anticipate data loss](storage-disaster-recovery-guidance.md#anticipate-data-loss). > [!NOTE] > Azure Files does not support read-access geo-redundant storage (RA-GRS) or read-access geo-zone-redundant storage (RA-GZRS). diff --git a/articles/storage/common/storage-ref-azcopy-bench.md b/articles/storage/common/storage-ref-azcopy-bench.md index d82f453a3f1ca..be13c21b455a5 100644 --- a/articles/storage/common/storage-ref-azcopy-bench.md +++ b/articles/storage/common/storage-ref-azcopy-bench.md @@ -4,120 +4,99 @@ description: This article provides reference information for the azcopy bench co author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft --- -# azcopy benchmark +# azcopy bench -Runs a performance benchmark by uploading or downloading test data to or from a specified destination. -For uploads, the test data is automatically generated. +Runs a performance benchmark by uploading or downloading test data to or from a specified destination. For uploads, the test data is automatically generated. The benchmark command runs the same process as 'copy', except that: - - Instead of requiring both source and destination parameters, benchmark takes just one. This is the - blob container, Azure Files Share, or Azure Data Lake Storage Gen2 file system that you want to upload to or download from. +- Instead of requiring both source and destination parameters, benchmark takes just one. This is the blob container, Azure Files Share, or Azure Data Lake Storage Gen2 file system that you want to upload to or download from. - - The 'mode' parameter describes whether AzCopy should test uploads to or downloads from given target. Valid values are 'Upload' +- The 'mode' parameter describes whether AzCopy should test uploads to or downloads from given target. Valid values ar`e 'Upload' and 'Download'. Default value is 'Upload'. - - For upload benchmarks, the payload is described by command-line parameters, which control how many files are autogenerated and - how significant the files are. The generation process takes place entirely in memory. Disk is not used. +- For upload benchmarks, the payload is described by command line parameters, which control how many files are auto-generated and + how big they are. The generation process takes place entirely in memory. Disk isn't used. - - For downloads, the payload consists of whichever files already exist at the source. (See example below about how to generate +- For downloads, the payload consists of whichever files already exist at the source. (See example below about how to generate test files if needed). + +- Only a few of the optional parameters that are available to the copy command are supported. + +- Additional diagnostics are measured and reported. + +- For uploads, the default behavior is to delete the transferred data at the end of the test run. For downloads, the data is never actually saved locally. - - Only a few of the optional parameters that are available to the copy command are supported. - - - Additional diagnostics are measured and reported. - - - For uploads, the default behavior is to delete the transferred data at the end of the test run. For downloads, the data - is never saved locally. - -Benchmark mode will automatically tune itself to the number of parallel TCP connections that gives -the maximum throughput. It will display that number at the end. To prevent autotuning, set the -AZCOPY_CONCURRENCY_VALUE environment variable to a specific number of connections. +Benchmark mode will automatically tune itself to the number of parallel TCP connections that gives the maximum throughput. It will display that number at the end. To prevent auto-tuning, set the COPY_CONCURRENCY_VALUE environment variable to a specific number of connections. All the usual authentication types are supported. However, the most convenient approach for benchmarking upload is typically to create an empty container with a SAS token and use SAS authentication. (Download mode requires a set of test data to be present in the target container.) - -## Related conceptual articles - -- [Get started with AzCopy](storage-use-azcopy-v10.md) -- [Optimize the performance of AzCopy v10 with Azure Storage](storage-use-azcopy-optimize.md) - -## Examples - + ```azcopy -azcopy benchmark [destination] [flags] +azcopy bench [destination] [flags] ``` -Run a benchmark test with default parameters (suitable for benchmarking networks up to 1 Gbps):' +## Examples -```azcopy -azcopy bench "https://[account].blob.core.windows.net/[container]?" -``` +Run an upload benchmark with default parameters (suitable for benchmarking networks up to 1 Gbps): -Run a benchmark test that uploads 100 files, each 2 GiB in size: (suitable for benchmarking on a fast network, for example, 10 Gbps):' +`azcopy bench "https://[account].blob.core.windows.net/[container]?"` -```azcopy -azcopy bench "https://[account].blob.core.windows.net/[container]?"--file-count 100 --size-per-file 2G -``` +Run a benchmark test that uploads 100 files, each 2 GiB in size: (suitable for benchmarking on a fast network, e.g. 10 Gbps):' -Run a benchmark test but use 50,000 files, each 8 MiB in size and compute their MD5 hashes (in the same way that the `--put-md5` flag does this -in the copy command). The purpose of `--put-md5` when benchmarking is to test whether MD5 computation affects throughput for the -selected file count and size: +`azcopy bench "https://[account].blob.core.windows.net/[container]?" --file-count 100 --size-per-file 2G` -```azcopy -azcopy bench --mode='Upload' "https://[account].blob.core.windows.net/[container]?" --file-count 50000 --size-per-file 8M --put-md5 -``` +Same as above, but use 50,000 files, each 8 MiB in size and compute their MD5 hashes (in the same way that the --put-md5 flag does this +in the copy command). The purpose of --put-md5 when benchmarking is to test whether MD5 computation affects throughput for the selected file count and size: + +`azcopy bench --mode='Upload' "https://[account].blob.core.windows.net/[container]?" --file-count 50000 --size-per-file 8M --put-md5` Run a benchmark test that downloads existing files from a target -```azcopy -azcopy bench --mode='Download' "https://[account].blob.core.windows.net/[container]?" --file-count 100 --delete-test-data=false -``` +`azcopy bench "https://[account].blob.core.windows.net/[container]?" --file-count 100 --delete-test-data=false` ## Options -**--blob-type** string Defines the type of blob at the destination. Used to allow benchmarking different blob types. Identical to the same-named parameter in the copy command (default "Detect"). +`--blob-type string` defines the type of blob at the destination. Used to allow benchmarking different blob types. Identical to the same-named parameter in the copy command (default "Detect") -**--block-size-mb** float Use this block size (specified in MiB). Default is automatically calculated based on file size. Decimal fractions are allowed - for example, 0.25. Identical to the same-named parameter in the copy command. +`--block-size-mb float` Use this block size (specified in MiB). Default is automatically calculated based on file size. Decimal fractions are allowed - for example, 0.25. Identical to the same-named parameter in the copy command -**--check-length** Check the length of a file on the destination after the transfer. If there is a mismatch between source and destination, the transfer is marked as failed. (default true) +`--check-length` Check the length of a file on the destination after the transfer. If there's a mismatch between source and destination, the transfer is marked as failed. (default true) -**--delete-test-data** If true, the benchmark data will be deleted at the end of the benchmark run. Set it to false if you want to keep the data at the destination - for example, to use it for manual tests outside benchmark mode (default true). +`--delete-test-data` If true, the benchmark data will be deleted at the end of the benchmark run. Set it to false if you want to keep the data at the destination - for example, to use it for manual tests outside benchmark mode (default true) -**--file-count** uint. The number of autogenerated data files to use (default 100). +`--file-count` (uint) number of auto-generated data files to use (default 100) -**--help** Help for bench +`-h`, `--help` help for bench -**--log-level** string Define the log verbosity for the log file, available levels: INFO(all requests/responses), WARNING(slow responses), ERROR(only failed requests), and NONE(no output logs). (default "INFO") +`--log-level` (string) define the log verbosity for the log file, available levels: INFO(all requests/responses), WARNING(slow responses), ERROR(only failed requests), and NONE(no output logs). (default "INFO") -**--mode** string Defines if Azcopy should test uploads or downloads from this target. Valid values are 'upload' and 'download'. Defaulted option is 'upload'. (default 'upload') +`--mode` (string) Defines if Azcopy should test uploads or downloads from this target. Valid values are 'upload' and 'download'. Defaulted option is 'upload'. (default "upload") -**--number-of-folders** uint If larger than 0, create folders to divide up the data. +`--number-of-folders` (uint) If larger than 0, create folders to divide up the data. -**--put-md5** Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob/file. (By default the hash is NOT created.) Identical to the same-named parameter in the copy command. +`--put-md5` Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob/file. (By default the hash is NOT created.) Identical to the same-named parameter in the copy command -**--size-per-file** string Size of each autogenerated data file. Must be a number immediately followed by K, M, or G. E.g. 12k or 200G (default "250M"). +`--size-per-file` (string) Size of each auto-generated data file. Must be a number immediately followed by K, M or G. E.g. 12k or 200G (default "250M") ## Options inherited from parent commands -**--cap-mbps float** Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. -**--output-type** string Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text"). +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") -**--trusted-microsoft-suffixes** string Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-configuration-settings.md b/articles/storage/common/storage-ref-azcopy-configuration-settings.md index 0734b3fd358fe..8cfe3bdabf54b 100644 --- a/articles/storage/common/storage-ref-azcopy-configuration-settings.md +++ b/articles/storage/common/storage-ref-azcopy-configuration-settings.md @@ -4,7 +4,7 @@ description: This article provides reference information for AzCopy V10 configur author: normesta ms.service: storage ms.topic: reference -ms.date: 04/02/2021 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft diff --git a/articles/storage/common/storage-ref-azcopy-copy.md b/articles/storage/common/storage-ref-azcopy-copy.md index daa7f03e5bbda..82cfebae002fc 100644 --- a/articles/storage/common/storage-ref-azcopy-copy.md +++ b/articles/storage/common/storage-ref-azcopy-copy.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy copy com author: normesta ms.service: storage ms.topic: reference -ms.date: 09/01/2021 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -18,356 +18,311 @@ Copies source data to a destination location. Copies source data to a destination location. The supported directions are: - - local <-> Azure Blob (SAS or OAuth authentication) - - local <-> Azure Files (Share/directory SAS authentication) - - local <-> Azure Data Lake Storage Gen 2 (SAS, OAuth, or shared key authentication) - - Azure Blob (SAS or public) -> Azure Blob (SAS or OAuth authentication) - - Azure Blob (SAS or public) -> Azure Files (SAS) - - Azure Files (SAS) -> Azure Files (SAS) - - Azure Files (SAS) -> Azure Blob (SAS or OAuth authentication) - - Amazon Web Services (AWS) S3 (Access Key) -> Azure Block Blob (SAS or OAuth authentication) - - Google Cloud Storage (Service Account Key) -> Azure Block Blob (SAS or OAuth authentication) [Preview] +- local <-> Azure Blob (SAS or OAuth authentication) +- local <-> Azure Files (Share/directory SAS authentication) +- local <-> Azure Data Lake Storage Gen2 (SAS, OAuth, or SharedKey authentication) +- Azure Blob (SAS or public) -> Azure Blob (SAS or OAuth authentication) +- Azure Blob (SAS or public) -> Azure Files (SAS) +- Azure Files (SAS) -> Azure Files (SAS) +- Azure Files (SAS) -> Azure Blob (SAS or OAuth authentication) +- AWS S3 (Access Key) -> Azure Block Blob (SAS or OAuth authentication) +- Google Cloud Storage (Service Account Key) -> Azure Block Blob (SAS or OAuth authentication) -For more information, see the examples section of this article. +Refer to the examples for more information. -## Related conceptual articles +### Advanced -- [Get started with AzCopy](storage-use-azcopy-v10.md) -- [Tutorial: Migrate on-premises data to cloud storage with AzCopy](storage-use-azcopy-migrate-on-premises-data.md) -- [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) -- [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) +AzCopy automatically detects the content type of the files when uploading from the local disk, based on the file extension or content (if no extension is specified). -## Advanced - -AzCopy automatically detects the content type of the files based on the file extension or content (if no extension is specified) when you upload them from the local disk. - -The built-in lookup table is small, but on Unix, it is augmented by the local system's `mime.types` file(s) if they are available under one or more of these names: +The built-in lookup table is small, but on Unix, it's augmented by the local system's mime.types file(s) if available under one or more of these names: - /etc/mime.types - /etc/apache2/mime.types - /etc/apache/mime.types -On Windows, MIME types are extracted from the registry. This feature can be turned off with the help of a flag. For more information, see the flag section of this article. +On Windows, MIME types are extracted from the registry. This feature can be turned off with the help of a flag. Refer to the flag section. -If you set an environment variable by using the command line, that variable will be readable in your command-line history. Consider clearing variables that contain credentials from your command-line history. To keep variables from appearing in your history, you can use a script to prompt the user for their credentials, and to set the environment variable. +If you set an environment variable by using the command line, that variable will be readable in your command line history. Consider clearing variables that contain credentials from your command line history. To keep variables from appearing in your history, you can use a script to prompt the user for their credentials, and to set the environment variable. -``` +```azcopy azcopy copy [source] [destination] [flags] ``` ## Examples -Upload a single file by using OAuth authentication. If you have not yet logged into AzCopy, run the `azcopy login` command before you run the following command. +Upload a single file by using OAuth authentication. If you haven't yet logged into AzCopy, please run the azcopy login command before you run the following command. -```azcopy -azcopy cp "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" -``` +`azcopy cp "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]"` -Same as above, but this time, also compute MD5 hash of the file content and save it as the blob's Content-MD5 property: +Same as above, but this time also compute MD5 hash of the file content and save it as the blob's Content-MD5 property: -```azcopy -azcopy cp "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" --put-md5 -``` +`azcopy cp "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" --put-md5` Upload a single file by using a SAS token: -```azcopy -azcopy cp "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" -``` +`azcopy cp "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]"` Upload a single file by using a SAS token and piping (block blobs only): + +`cat "/path/to/file.txt" | azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" --from-to PipeBlob` -```azcopy -cat "/path/to/file.txt" | azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS] -``` +Upload a single file by using OAuth and piping (block blobs only): -Upload an entire directory by using a SAS token: +`cat "/path/to/file.txt" | azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]" --from-to PipeBlob` -```azcopy -azcopy cp "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive -``` +Upload an entire directory by using a SAS token: + +`azcopy cp "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true` or -```azcopy -azcopy cp "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive --put-md5 -``` +`azcopy cp "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true --put-md5` Upload a set of files by using a SAS token and wildcard (*) characters: -```azcopy -azcopy cp "/path/*foo/*bar/*.pdf" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" -``` +`azcopy cp "/path/*foo/*bar/*.pdf" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]"` Upload files and directories by using a SAS token and wildcard (*) characters: -```azcopy -azcopy cp "/path/*foo/*bar*" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive -``` +`azcopy cp "/path/*foo/*bar*" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true` Upload files and directories to Azure Storage account and set the query-string encoded tags on the blob. -- To set tags {key = "bla bla", val = "foo"} and {key = "bla bla 2", val = "bar"}, use the following syntax : `azcopy cp "/path/*foo/*bar*" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --blob-tags="bla%20bla=foo&bla%20bla%202=bar"` - +- To set tags {key = "bla bla", val = "foo"} and {key = "bla bla 2", val = "bar"}, use the following syntax: +- `azcopy cp "/path/*foo/*bar*" "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --blob-tags="bla%20bla=foo&bla%20bla%202=bar"` - Keys and values are URL encoded and the key-value pairs are separated by an ampersand('&') - - While setting tags on the blobs, there are additional permissions('t' for tags) in SAS without which the service will give authorization error back. -Download a single file by using OAuth authentication. If you have not yet logged into AzCopy, run the `azcopy login` command before you run the following command. +Download a single file by using OAuth authentication. If you haven't yet logged into AzCopy, please run the azcopy login command before you run the following command. -```azcopy -azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]" "/path/to/file.txt" -``` +`azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]" "/path/to/file.txt"` Download a single file by using a SAS token: -```azcopy -azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "/path/to/file.txt" -``` +`azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "/path/to/file.txt"` Download a single file by using a SAS token and then piping the output to a file (block blobs only): + +`azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" --from-to BlobPipe > "/path/to/file.txt"` -```azcopy -azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" > "/path/to/file.txt" -``` +Download a single file by using OAuth and then piping the output to a file (block blobs only): + +`azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/blob]" --from-to BlobPipe > "/path/to/file.txt"` Download an entire directory by using a SAS token: - -```azcopy -azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" "/path/to/dir" --recursive -``` + +`azcopy cp "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" "/path/to/dir" --recursive=true` A note about using a wildcard character (*) in URLs: There's only two supported ways to use a wildcard character in a URL. -- You can use one just after the final forward slash (/) of a URL. This use of the wildcard character copies all of the files in a directory directly to the destination without placing them into a subdirectory. +- You can use one just after the final forward slash (/) of a URL. This copies all of the files in a directory directly to the destination without placing them into a subdirectory. -- You can also use a wildcard character in the name of a container as long as the URL refers only to a container and not to a blob. You can use this approach to obtain files from a subset of containers. +- You can also use one in the name of a container as long as the URL refers only to a container and not to a blob. You can use this approach to obtain files from a subset of containers. Download the contents of a directory without copying the containing directory itself. -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/folder]/*?[SAS]" "/path/to/dir" -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/folder]/*?[SAS]" "/path/to/dir"` Download an entire storage account. -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net/" "/path/to/dir" --recursive -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net/" "/path/to/dir" --recursive` Download a subset of containers within a storage account by using a wildcard symbol (*) in the container name. -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net/[container*name]" "/path/to/dir" --recursive -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net/[container*name]" "/path/to/dir" --recursive` + +Download all the versions of a blob from Azure Storage to local directory. Ensure that source is a valid blob, destination is a local folder and `versionidsFile` which takes in a path to the file where each version is written on a separate line. All the specified versions will get downloaded in the destination folder specified. + +`azcopy cp "https://[srcaccount].blob.core.windows.net/[containername]/[blobname]" "/path/to/dir" --list-of-versions="/another/path/to/dir/[versionidsFile]"` Copy a single blob to another blob by using a SAS token. -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]"` -Copy a single blob to another blob by using a SAS token and an Auth token. You have to use a SAS token at the end of the source account URL, but the destination account doesn't need one if you log into AzCopy by using the `azcopy login` command. +Copy a single blob to another blob by using a SAS token and an OAuth token. You have to use a SAS token at the end of the source account URL, but the destination account doesn't need one if you log into AzCopy by using the azcopy login command. -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]" -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]"` Copy one blob virtual directory to another by using a SAS token: -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true` Copy all blob containers, directories, and blobs from storage account to another by using a SAS token: -```azcopy -azcopy cp "https://[srcaccount].blob.core.windows.net?[SAS]" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive -``` +`azcopy cp "https://[srcaccount].blob.core.windows.net?[SAS]" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive=true` -Copy a single object to Blob Storage from Amazon Web Services (AWS) S3 by using an access key and a SAS token. First, set the environment variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for AWS S3 source. +Copy a single object to Blob Storage from Amazon Web Services (AWS) S3 by using an access key and a SAS token. First, set the environment variable AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY for AWS S3 source. + +`azcopy cp "https://s3.amazonaws.com/[bucket]/[object]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]"` -```azcopy -azcopy cp "https://s3.amazonaws.com/[bucket]/[object]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" -``` +Copy an entire directory to Blob Storage from AWS S3 by using an access key and a SAS token. First, set the environment variable AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY for AWS S3 source. -Copy an entire directory to Blob Storage from AWS S3 by using an access key and a SAS token. First, set the environment variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for AWS S3 source. +`azcopy cp "https://s3.amazonaws.com/[bucket]/[folder]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true` -```azcopy -azcopy cp "https://s3.amazonaws.com/[bucket]/[folder]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive -``` +Refer to https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-folders.html to better understand the [folder] placeholder. - Refer to https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-folders.html to better understand the [folder] placeholder. +Copy all buckets to Blob Storage from Amazon Web Services (AWS) by using an access key and a SAS token. First, set the environment variable AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY for AWS S3 source. -Copy all buckets to Blob Storage from Amazon Web Services (AWS) by using an access key and a SAS token. First, set the environment variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for AWS S3 source. +`azcopy cp "https://s3.amazonaws.com/" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive=true` -```azcopy -azcopy cp "https://s3.amazonaws.com/" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive -``` +Copy all buckets to Blob Storage from an Amazon Web Services (AWS) region by using an access key and a SAS token. First, set the environment variable AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY for AWS S3 source. -Copy all buckets to Blob Storage from an Amazon Web Services (AWS) region by using an access key and a SAS token. First, set the environment variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for AWS S3 source. +`azcopy cp "https://s3-[region].amazonaws.com/" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive=true` -```azcopy -- azcopy cp "https://s3-[region].amazonaws.com/" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive -``` +Copy a subset of buckets by using a wildcard symbol (*) in the bucket name. Like the previous examples, you'll need an access key and a SAS token. Make sure to set the environment variable AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY for AWS S3 source. -Copy a subset of buckets by using a wildcard symbol (*) in the bucket name. Like the previous examples, you'll need an access key and a SAS token. Make sure to set the environment variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for AWS S3 source. +`azcopy cp "https://s3.amazonaws.com/[bucket*name]/" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive=true` -```azcopy -- azcopy cp "https://s3.amazonaws.com/[bucket*name]/" "https://[destaccount].blob.core.windows.net?[SAS]" --recursive -``` +Copy blobs from one blob storage to another and preserve the tags from source. To preserve tags, use the following syntax: + +`azcopy cp "https://[account].blob.core.windows.net/[source_container]/[path/to/directory]?[SAS]" "https://[account].blob.core.windows.net/[destination_container]/[path/to/directory]?[SAS]" --s2s-preserve-blob-tags=true` Transfer files and directories to Azure Storage account and set the given query-string encoded tags on the blob. -- To set tags {key = "bla bla", val = "foo"} and {key = "bla bla 2", val = "bar"}, use the following syntax : `azcopy cp "https://[account].blob.core.windows.net/[source_container]/[path/to/directory]?[SAS]" "https://[account].blob.core.windows.net/[destination_container]/[path/to/directory]?[SAS]" --blob-tags="bla%20bla=foo&bla%20bla%202=bar"` +- To set tags {key = "bla bla", val = "foo"} and {key = "bla bla 2", val = "bar"}, use the following syntax: + + `azcopy cp "https://[account].blob.core.windows.net/[source_container]/[path/to/directory]?[SAS]" "https://[account].blob.core.windows.net/[destination_container]/[path/to/directory]?[SAS]" --blob-tags="bla%20bla=foo&bla%20bla%202=bar"` - Keys and values are URL encoded and the key-value pairs are separated by an ampersand('&') - While setting tags on the blobs, there are additional permissions('t' for tags) in SAS without which the service will give authorization error back. -Copy a single object to Blob Storage from Google Cloud Storage by using a service account key and a SAS token. First, set the environment variable GOOGLE_APPLICATION_CREDENTIALS for Google Cloud Storage source. - -```azcopy -azcopy cp "https://storage.cloud.google.com/[bucket]/[object]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" -``` +Copy a single object to Blob Storage from Google Cloud Storage (GCS) by using a service account key and a SAS token. First, set the environment variable GOOGLE_APPLICATION_CREDENTIALS for GCS source. + +`azcopy cp "https://storage.cloud.google.com/[bucket]/[object]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]"` -Copy an entire directory to Blob Storage from Google Cloud Storage by using a service account key and a SAS token. First, set the environment variable GOOGLE_APPLICATION_CREDENTIALS for Google Cloud Storage source. +Copy an entire directory to Blob Storage from Google Cloud Storage (GCS) by using a service account key and a SAS token. First, set the environment variable GOOGLE_APPLICATION_CREDENTIALS for GCS source. -```azcopy - - azcopy cp "https://storage.cloud.google.com/[bucket]/[folder]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true -``` +`azcopy cp "https://storage.cloud.google.com/[bucket]/[folder]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true` -Copy an entire bucket to Blob Storage from Google Cloud Storage by using a service account key and a SAS token. First, set the environment variable GOOGLE_APPLICATION_CREDENTIALS for Google Cloud Storage source. +Copy an entire bucket to Blob Storage from Google Cloud Storage (GCS) by using a service account key and a SAS token. First, set the environment variable GOOGLE_APPLICATION_CREDENTIALS for GCS source. -```azcopy -azcopy cp "https://storage.cloud.google.com/[bucket]" "https://[destaccount].blob.core.windows.net/?[SAS]" --recursive=true -``` +`azcopy cp "https://storage.cloud.google.com/[bucket]" "https://[destaccount].blob.core.windows.net/?[SAS]" --recursive=true` -Copy all buckets to Blob Storage from Google Cloud Storage by using a service account key and a SAS token. First, set the environment variables GOOGLE_APPLICATION_CREDENTIALS and GOOGLE_CLOUD_PROJECT=<`project-id`> for GCS source +Copy all buckets to Blob Storage from Google Cloud Storage (GCS) by using a service account key and a SAS token. First, set the environment variables GOOGLE_APPLICATION_CREDENTIALS and `GOOGLE_CLOUD_PROJECT=` for GCS source -```azcopy - - azcopy cp "https://storage.cloud.google.com/" "https://[destaccount].blob.core.windows.net/?[SAS]" --recursive=true -``` +`azcopy cp "https://storage.cloud.google.com/" "https://[destaccount].blob.core.windows.net/?[SAS]" --recursive=true` -Copy a subset of buckets by using a wildcard symbol (*) in the bucket name from Google Cloud Storage by using a service account key and a SAS token for destination. First, set the environment variables GOOGLE_APPLICATION_CREDENTIALS and GOOGLE_CLOUD_PROJECT=<`project-id`> for the Google Cloud Storage source. +Copy a subset of buckets by using a wildcard symbol (*) in the bucket name from Google Cloud Storage (GCS) by using a service account key and a SAS token for destination. First, set the environment variables `GOOGLE_APPLICATION_CREDENTIALS and GOOGLE_CLOUD_PROJECT=` for GCS source -```azcopy -azcopy cp "https://storage.cloud.google.com/[bucket*name]/" "https://[destaccount].blob.core.windows.net/?[SAS]" --recursive=true -``` +`azcopy cp "https://storage.cloud.google.com/[bucket*name]/" "https://[destaccount].blob.core.windows.net/?[SAS]" --recursive=true` ## Options -**--backup** Activates Windows' SeBackupPrivilege for uploads, or SeRestorePrivilege for downloads, to allow AzCopy to see and read all files, regardless of their file system permissions, and to restore all permissions. Requires that the account running AzCopy already has these permissions (for example, has Administrator rights or is a member of the `Backup Operators` group). This flag activates privileges that the account already has. +`--as-subdir` True by default. Places folder sources as subdirectories under the destination. (default true) + +`--backup` Activates Windows' SeBackupPrivilege for uploads, or SeRestorePrivilege for downloads, to allow AzCopy to see read all files, regardless of their file system permissions, and to restore all permissions. Requires that the account running AzCopy already has these permissions (for example, has Administrator rights or is a member of the 'Backup Operators' group). This flag activates privileges that the account already has + +`--blob-tags` (string) Set tags on blobs to categorize data in your storage account -**--blob-tags** string Set tags on blobs to categorize data in your storage account. +`--blob-type` (string) Defines the type of blob at the destination. This is used for uploading blobs and when copying between accounts (default 'Detect'). Valid values include 'Detect', 'BlockBlob', 'PageBlob', and 'AppendBlob'. When copying between accounts, a value of 'Detect' causes AzCopy to use the type of source blob to determine the type of the destination blob. When uploading a file, 'Detect' determines if the file is a VHD or a VHDX file based on the file extension. If the file is either a VHD or VHDX file, AzCopy treats the file as a page blob. (default "Detect") -**--blob-type** string Defines the type of blob at the destination. This is used for uploading blobs and when copying between accounts (default `Detect`). Valid values include `Detect`, `BlockBlob`, `PageBlob`, and `AppendBlob`. When copying between accounts, a value of `Detect` causes AzCopy to use the type of source blob to determine the type of the destination blob. When uploading a file, `Detect` determines if the file is a VHD or a VHDX file based on the file extension. If the file is ether a VHD or VHDX file, AzCopy treats the file as a page blob. (default "Detect") +`--block-blob-tier` (string) upload block blob to Azure Storage using this blob tier. (default "None") -**--block-blob-tier** string Upload block blob to Azure Storage using this blob tier. (default "None") +`--block-size-mb` (float) Use this block size (specified in MiB) when uploading to Azure Storage, and downloading from Azure Storage. The default value is automatically calculated based on file size. Decimal fractions are allowed (For example: 0.25). -**--block-size-mb** float Use this block size (specified in MiB) when uploading to Azure Storage, and downloading from Azure Storage. The default value is automatically calculated based on file size. Decimal fractions are allowed (For example: 0.25). +`--cache-control` (string) Set the cache-control header. Returned on download. -**--cache-control** string Set the cache-control header. Returned on download. +`--check-length` Check the length of a file on the destination after the transfer. If there's a mismatch between source and destination, the transfer is marked as failed. (default true) -**--check-length** Check the length of a file on the destination after the transfer. If there is a mismatch between source and destination, the transfer is marked as failed. (default value is `true`) +`--check-md5` (string) Specifies how strictly MD5 hashes should be validated when downloading. Only available when downloading. Available options: NoCheck, LogOnly, FailIfDifferent, FailIfDifferentOrMissing. (default 'FailIfDifferent') (default "FailIfDifferent") -**--check-md5** string Specifies how strictly MD5 hashes should be validated when downloading. Only available when downloading. Available options: `NoCheck`, `LogOnly`, `FailIfDifferent`, `FailIfDifferentOrMissing`. (default `FailIfDifferent`) (default "FailIfDifferent") +`--content-disposition` (string) Set the content-disposition header. Returned on download. -**--content-disposition** string Set the content-disposition header. Returned on download. +`--content-encoding` (string) Set the content-encoding header. Returned on download. -**--content-encoding** string Set the content-encoding header. Returned on download. +`--content-language` (string) Set the content-language header. Returned on download. -**--content-language** string Set the content-language header. Returned on download. +`--content-type` (string) Specifies the content type of the file. Implies no-guess-mime-type. Returned on download. -**--content-type** string Specifies the content type of the file. Implies no-guess-mime-type. Returned on download. +`--cpk-by-name` (string) Client provided key by name that lets clients making requests against Azure Blob storage an option to provide an encryption key on a per-request basis. Provided key name will be fetched from Azure Key Vault and will be used to encrypt the data -**--cpk-by-name** string Client provided key by name let clients making requests against Azure Blob Storage an option to provide an encryption key on a per-request basis. Provided key name will be fetched from Azure Key Vault and will be used to encrypt the data. +`--cpk-by-value` Client provided key by name that let clients making requests against Azure Blob storage an option to provide an encryption key on a per-request basis. Provided key and its hash will be fetched from environment variables -**--cpk-by-value** Client provided key by name let clients making requests against Azure Blob Storage an option to provide an encryption key on a per-request basis. Provided key and its hash will be fetched from environment variables. +`--decompress` Automatically decompress files when downloading, if their content-encoding indicates that they're compressed. The supported content-encoding values are 'gzip' and 'deflate'. File extensions of '.gz'/'.gzip' or '.zz' aren't necessary, but will be removed if present. -**--decompress** Automatically decompress files when downloading, if their content-encoding indicates that they are compressed. The supported content-encoding values are `gzip` and `deflate`. File extensions of `.gz`/`.gzip` or `.zz` aren't necessary, but will be removed if present. +`--disable-auto-decoding` False by default to enable automatic decoding of illegal chars on Windows. Can be set to true to disable automatic decoding. -**--dry-run** Prints the file paths that would be copied by this command. This flag does not copy the actual files. +`--dry-run` Prints the file paths that would be copied by this command. This flag doesn't copy the actual files. -**--disable-auto-decoding** False by default to enable automatic decoding of illegal chars on Windows. Can be set to `true` to disable automatic decoding. +`--exclude-attributes` (string) (Windows only) Exclude files whose attributes match the attribute list. For example: A;S;R -**--exclude-attributes** string (Windows only) Excludes files whose attributes match the attribute list. For example: A;S;R +`--exclude-blob-type` (string) Optionally specifies the type of blob (BlockBlob/ PageBlob/ AppendBlob) to exclude when copying blobs from the container or the account. Use of this flag isn't applicable for copying data from non azure-service to service. More than one blob should be separated by ';'. -**--exclude-blob-type** string Optionally specifies the type of blob (`BlockBlob`/ `PageBlob`/ `AppendBlob`) to exclude when copying blobs from the container or the account. Use of this flag is not applicable for copying data from non-Azure service to service. More than one blob should be separated by `;`. +`--exclude-path` (string) Exclude these paths when copying. This option doesn't support wildcard characters (*). Checks relative path prefix(For example: myFolder;myFolder/subDirName/file.pdf). When used in combination with account traversal, paths don't include the container name. -**--exclude-path** string Exclude these paths when copying. This option does not support wildcard characters (*). Checks relative path prefix(For example: `myFolder;myFolder/subDirName/file.pdf`). When used in combination with account traversal, paths do not include the container name. +`--exclude-pattern` (string) Exclude these files when copying. This option supports wildcard characters (*) -**--exclude-pattern** string Exclude these files when copying. This option supports wildcard characters (*). +`--exclude-regex` (string) Exclude all the relative path of the files that align with regular expressions. Separate regular expressions with ';'. -**--exclude-regex** string Exclude all the relative path of the files that align with regular expressions. Separate regular expressions with ';'. +`--follow-symlinks` Follow symbolic links when uploading from local file system. -**--follow-symlinks** Follow symbolic links when uploading from local file system. +`--force-if-read-only` When overwriting an existing file on Windows or Azure Files, force the overwrite to work even if the existing file has its read-only attribute set -**--force-if-read-only** When overwriting an existing file on Windows or Azure Files, force the overwrite to work even if the existing file has -its read-only attribute set. +`--from-to` (string) Optionally specifies the source destination combination. For Example: LocalBlob, BlobLocal, LocalBlobFS. Piping: BlobPipe, PipeBlob -**--from-to** string Optionally specifies the source destination combination. For Example: `LocalBlob`, `BlobLocal`, `LocalBlobFS`. +`-h`, `--help` help for copy -**--help** help for copy. +`--include-after` (string) Include only those files modified on or after the given date/time. The value should be in ISO8601 format. If no timezone is specified, the value is assumed to be in the local timezone of the machine running AzCopy. E.g., `2020-08-19T15:04:00Z` for a UTC time, or `2020-08-19` for midnight (00:00) in the local timezone. As of AzCopy 10.5, this flag applies only to files, not folders, so folder properties won't be copied when using this flag with `--preserve-smb-info` or `--preserve-smb-permissions`. -**--include-after** string Include only those files modified on or after the given date/time. The value should be in ISO8601 format. If no timezone -is specified, the value is assumed to be in the local timezone of the machine running AzCopy. for example, `2020-08-19T15:04:00Z` for a UTC time, or `2020-08-19` for midnight (00:00) in the local timezone. As at AzCopy 10.5, this flag applies only to files, not folders, so folder properties won't be copied when using this flag with `--preserve-smb-info` or `--preserve-permissions`. +`--include-attributes` (string) (Windows only) Include files whose attributes match the attribute list. For example: A;S;R - **--include-before** string Include only those files modified before or on the given date/time. The value should be in ISO8601 format. If no timezone is specified, the value is assumed to be in the local timezone of the machine running AzCopy. E.g. `2020-08-19T15:04:00Z` for a UTC time, or `2020-08-19` for midnight (00:00) in the local timezone. As of AzCopy 10.7, this flag applies only to files, not folders, so folder properties won't be copied when using this flag with `--preserve-smb-info` or `--preserve-permissions`. +`--include-before` (string) Include only those files modified before or on the given date/time. The value should be in ISO8601 format. If no timezone is specified, the value is assumed to be in the local timezone of the machine running AzCopy. for example, `2020-08-19T15:04:00Z` for a UTC time, or `2020-08-19` for midnight (00:00) in the local timezone. As of AzCopy 10.7, this flag applies only to files, not folders, so folder properties won't be copied when using this flag with `--preserve-smb-info` or `--preserve-smb-permissions`. -**--include-attributes** string (Windows only) Includes files whose attributes match the attribute list. For example: A;S;R +`--include-directory-stub` False by default to ignore directory stubs. Directory stubs are blobs with metadata `hdi_isfolder:true`. Setting value to true will preserve directory stubs during transfers. -**--include-path** string Include only these paths when copying. This option does not support wildcard characters (*). Checks relative path prefix (For example: `myFolder;myFolder/subDirName/file.pdf`). +`--include-path` (string) Include only these paths when copying. This option doesn't support wildcard characters (*). Checks relative path prefix (For example: myFolder;myFolder/subDirName/file.pdf). -**--include-directory-stub** False by default to ignore directory stubs. Directory stubs are blobs with metadata 'hdi_isfolder:true'. Setting value to true will preserve directory stubs during transfers. +`--include-pattern` (string) Include only these files when copying. This option supports wildcard characters (*). Separate files by using a ';'. -**--include-pattern** string Include only these files when copying. This option supports wildcard characters (*). Separate files by using a `;`. +`--include-regex` (string) Include only the relative path of the files that align with regular expressions. Separate regular expressions with ';'. -**--include-regex** string Include only the relative path of the files that align with regular expressions. Separate regular expressions with ';'. +`--list-of-versions` (string) Specifies a file where each version ID is listed on a separate line. Ensure that the source must point to a single blob and all the version IDs specified in the file using this flag must belong to the source blob only. AzCopy will download the specified versions in the destination folder provided. -**--list-of-versions** string Specifies a file where each version ID is listed on a separate line. Ensure that the source must point to a single blob and all the version IDs specified in the file using this flag must belong to the source blob only. AzCopy will download the specified versions in the destination folder provided. For more information, see [Download previous versions of a blob](./storage-use-azcopy-v10.md#transfer-data). +`--log-level` (string) Define the log verbosity for the log file, available levels: INFO(all requests/responses), WARNING(slow responses), ERROR(only failed requests), and NONE(no output logs). (default 'INFO'). (default "INFO") -**--log-level** string Define the log verbosity for the log file, available levels: INFO(all requests/responses), WARNING(slow responses), ERROR(only failed requests), and NONE(no output logs). (default `INFO`). +`--metadata` (string) Upload to Azure Storage with these key-value pairs as metadata. -**--metadata** string Upload to Azure Storage with these key-value pairs as metadata. +`--no-guess-mime-type` Prevents AzCopy from detecting the content-type based on the extension or content of the file. -**--no-guess-mime-type** Prevents AzCopy from detecting the content-type based on the extension or content of the file. +`--overwrite` (string) Overwrite the conflicting files and blobs at the destination if this flag is set to true. (default 'true') Possible values include 'true', 'false', 'prompt', and 'ifSourceNewer'. For destinations that support folders, conflicting folder-level properties will be overwritten this flag is 'true' or if a positive response is provided to the prompt. (default "true") -**--overwrite** string Overwrite the conflicting files and blobs at the destination if this flag is set to true. (default `true`) Possible values include `true`, `false`, `prompt`, and `ifSourceNewer`. For destinations that support folders, conflicting folder-level properties will be overwritten this flag is `true` or if a positive response is provided to the prompt. (default "true") +`--page-blob-tier` (string) Upload page blob to Azure Storage using this blob tier. (default 'None'). (default "None") -**--page-blob-tier** string Upload page blob to Azure Storage using this blob tier. (default `None`). (default "None") +`--preserve-last-modified-time` Only available when destination is file system. -**--preserve-last-modified-time** Only available when destination is file system. +`--preserve-owner` Only has an effect in downloads, and only when `--preserve-smb-permissions` is used. If true (the default), the file Owner and Group are preserved in downloads. If set to false, -**--preserve-owner** Only has an effect in downloads, and only when `--preserve-permissions` is used. If true (the default), the file Owner and Group are preserved in downloads. If set to false,`--preserve-permissions` will still preserve ACLs but Owner and Group will be based on the user running AzCopy (default true) +`--preserve-smb-permissions` will still preserve ACLs but Owner and Group will be based on the user running AzCopy (default true) -**--preserve-smb-info** True by default. Preserves SMB property info (last write time, creation time, attribute bits) between SMB-aware resources (Windows and Azure Files). Only the attribute bits supported by Azure Files will be transferred; any others will be ignored. This flag applies to both files and folders, unless a file-only filter is specified (for example, include-pattern). The info transferred for folders is the same as that for files, except for Last Write Time that is never preserved for folders. +`--preserve-permissions` False by default. Preserves ACLs between aware resources (Windows and Azure Files, or Azure Data Lake Storage Gen2 to Azure Data Lake Storage Gen2). For Hierarchical Namespace accounts, you'll need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you'll also need the `--backup` flag to restore permissions where the new Owner won't be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (for example, include-pattern). -**--preserve-permissions** False by default. Preserves ACLs between aware resources (Windows and Azure Files, or Data Lake Storage Gen 2 to Data Lake Storage Gen 2). For accounts that have a hierarchical namespace, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern). +`--preserve-smb-info` For SMB-aware locations, flag will be set to true by default. Preserves SMB property info (last write time, creation time, attribute bits) between SMB-aware resources (Windows and Azure Files). Only the attribute bits supported by Azure Files will be transferred; any others will be ignored. This flag applies to both files and folders, unless a file-only filter is specified (for example, include-pattern). The info transferred for folders is the same as that for files, except for `Last Write Time` which is never preserved for folders. (default true) -**--put-md5** Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob or file. (By default the hash is NOT created.) Only available when uploading. +`--put-md5` Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob or file. (By default the hash is NOT created.) Only available when uploading. -**--recursive** Look into subdirectories recursively when uploading from local file system. +`--recursive` Look into subdirectories recursively when uploading from local file system. -**--s2s-detect-source-changed** Detect if the source file/blob changes while it is being read. (This parameter only applies to service-to-service copies, because the corresponding check is permanently enabled for uploads and downloads.) +`--s2s-detect-source-changed` Detect if the source file/blob changes while it is being read. (This parameter only applies to service-to-service copies, because the corresponding check is permanently enabled for uploads and downloads.) -**--s2s-handle-invalid-metadata** string Specifies how invalid metadata keys are handled. Available options: ExcludeIfInvalid, FailIfInvalid, RenameIfInvalid. (default `ExcludeIfInvalid`). +`--s2s-handle-invalid-metadata` (string) Specifies how invalid metadata keys are handled. Available options: ExcludeIfInvalid, FailIfInvalid, RenameIfInvalid. (default 'ExcludeIfInvalid'). (default "ExcludeIfInvalid") -**--s2s-preserve-access-tier** Preserve access tier during service to service copy. Refer to [Hot, Cool, and Archive access tiers for blob data](../blobs/access-tiers-overview.md) to ensure destination storage account supports setting access tier. In the cases that setting access tier is not supported, use s2sPreserveAccessTier=false to bypass copying access tier. (default `true`). +`--s2s-preserve-access-tier` Preserve access tier during service to service copy. Refer to [Azure Blob storage: hot, cool, and archive access tiers](/azure/storage/blobs/storage-blob-storage-tiers) to ensure destination storage account supports setting access tier. In the cases that setting access tier isn't supported, make sure to use s2sPreserveAccessTier=false to bypass copying access tier. (default true). (default true) -**--s2s-preserve-blob-tags** Preserve index tags during service to service transfer from one blob storage to another. +`--s2s-preserve-blob-tags` Preserve index tags during service to service transfer from one blob storage to another -**--s2s-preserve-properties** Preserve full properties during service to service copy. For AWS S3 and Azure File non-single file source, the list operation doesn't return full properties of objects and files. To preserve full properties, AzCopy needs to send one additional request per object or file. (default true) +`--s2s-preserve-properties` Preserve full properties during service to service copy. For AWS S3 and Azure File non-single file source, the list operation doesn't return full properties of objects and files. To +preserve full properties, AzCopy needs to send one more request per object or file. (default true) ## Options inherited from parent commands -**--cap-mbps float** Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. -**--output-type** string Format of the command's output. The choices include: text, json. The default value is `text`. (default "text") +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") -**--trusted-microsoft-suffixes** string Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is `*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net`. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-doc.md b/articles/storage/common/storage-ref-azcopy-doc.md index 1085170d084e4..4a52ff6c89323 100644 --- a/articles/storage/common/storage-ref-azcopy-doc.md +++ b/articles/storage/common/storage-ref-azcopy-doc.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy doc comm author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -24,25 +24,18 @@ By default, the files are stored in a folder named 'doc' inside the current dire azcopy doc [flags] ``` -## Related conceptual articles - -- [Get started with AzCopy](storage-use-azcopy-v10.md) -- [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) -- [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) - ## Options -|Option|Description| -|--|--| -|-h, --help|Shows help content for the doc command.| +`-h`, `--help` help for doc +`--output-location` (string) where to put the generated markdown files (default "./doc") ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string | Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text"). + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-env.md b/articles/storage/common/storage-ref-azcopy-env.md index 887fa23528715..4bbd78f7831a4 100644 --- a/articles/storage/common/storage-ref-azcopy-env.md +++ b/articles/storage/common/storage-ref-azcopy-env.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy env comm author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -16,33 +16,27 @@ Shows the environment variables that can configure AzCopy's behavior. For a comp ## Synopsis +Shows the environment variables that you can use to configure the behavior of AzCopy. + +If you set an environment variable by using the command line, that variable will be readable in your command line history. Consider clearing variables that contain credentials from your command line history. To keep variables from appearing in your history, you can use a script to prompt the user for their credentials, and to set the environment variable. + ```azcopy azcopy env [flags] ``` -> [!IMPORTANT] -> If you set an environment variable by using the command line, that variable will be readable in your command line history. Consider clearing variables that contain credentials from your command line history. To keep variables from appearing in your history, you can use a script to prompt the user for their credentials, and to set the environment variable. - -## Related conceptual articles - -- [Get started with AzCopy](storage-use-azcopy-v10.md) -- [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) -- [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) - ## Options -|Option|Description| -|--|--| -|-h, --help|Shows help content for the env command. | -|--show-sensitive|Shows sensitive/secret environment variables.| +`-h`, `--help` help for env +`--show-sensitive` Shows sensitive/secret environment variables. ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string | Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps float` Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de; +*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-jobs-clean.md b/articles/storage/common/storage-ref-azcopy-jobs-clean.md index 52b4d2d75c72f..b7ad763ea1355 100644 --- a/articles/storage/common/storage-ref-azcopy-jobs-clean.md +++ b/articles/storage/common/storage-ref-azcopy-jobs-clean.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy jobs cle author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -14,7 +14,7 @@ ms.reviewer: zezha-msft Remove all log and plan files for all jobs -``` +```azcopy azcopy jobs clean [flags] ``` @@ -26,23 +26,22 @@ azcopy jobs clean [flags] ## Examples -``` - azcopy jobs clean --with-status=completed +```azcopy +azcopy jobs clean --with-status=completed ``` ## Options -**--help** Help for clean. - -**--with-status** string Only remove the jobs with this status, available values: `Canceled`, `Completed`, `Failed`, `InProgress`, `All` (default `All`) +`-h`, `--help` help for clean +`--with-status` (string) only remove the jobs with this status, available values: All, Canceled, Failed, Completed CompletedWithErrors, CompletedWithSkipped, CompletedWithErrorsAndSkipped (default "All") ## Options inherited from parent commands -**--cap-mbps float** Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. -**--output-type** string Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") -**--trusted-microsoft-suffixes** string Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-jobs-list.md b/articles/storage/common/storage-ref-azcopy-jobs-list.md index 0f7215b7f48ac..cbe25c2fab905 100644 --- a/articles/storage/common/storage-ref-azcopy-jobs-list.md +++ b/articles/storage/common/storage-ref-azcopy-jobs-list.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy jobs lis author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -28,17 +28,16 @@ azcopy jobs list [flags] ## Options -|Option|Description| -|--|--| -|-h, --help|Show help content for the list command.| +`-h`, `--help` help for list +`--with-status` (string) List the jobs with given status, available values: All, Canceled, Failed, InProgress, Completed, CompletedWithErrors, CompletedWithFailures, CompletedWithErrorsAndSkipped (default "All") ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string | Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-jobs-remove.md b/articles/storage/common/storage-ref-azcopy-jobs-remove.md index 8b465f062ca22..5678ede639497 100644 --- a/articles/storage/common/storage-ref-azcopy-jobs-remove.md +++ b/articles/storage/common/storage-ref-azcopy-jobs-remove.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy jobs rem author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -17,7 +17,7 @@ Remove all files associated with the given job ID. > [!NOTE] > You can customize the location where log and plan files are saved. See the [azcopy env](storage-ref-azcopy-env.md) command to learn more. -``` +```azcopy azcopy jobs remove [jobID] [flags] ``` @@ -29,21 +29,21 @@ azcopy jobs remove [jobID] [flags] ## Examples -``` +```azcopy azcopy jobs rm e52247de-0323-b14d-4cc8-76e0be2e2d44 ``` ## Options -**--help** Help for remove. +`--help` Help for remove. ## Options inherited from parent commands -**--cap-mbps float** Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. -**--output-type** string Format of the command's output. The choices include: text, json. The default value is `text`. (default `text`) +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") -**--trusted-microsoft-suffixes** string Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-jobs-resume.md b/articles/storage/common/storage-ref-azcopy-jobs-resume.md index e1188ea38101b..bae2965f2375d 100644 --- a/articles/storage/common/storage-ref-azcopy-jobs-resume.md +++ b/articles/storage/common/storage-ref-azcopy-jobs-resume.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy jobs res author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -26,23 +26,25 @@ azcopy jobs resume [jobID] [flags] - [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) - [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) -## Options +### Options -|Option|Description| -|--|--| -|--destination-sas string|Destination SAS of the destination for given Job ID.| -|--exclude string|Filter: Exclude these failed transfer(s) when resuming the job. Files should be separated by ';'.| -|-h, --help|Show help content for the resume command.| -|--include string|Filter: only include these failed transfer(s) when resuming the job. Files should be separated by ';'.| -|--source-sas string |source SAS of the source for given Job ID.| +`--destination-sas` (string) destination SAS token of the destination for a given Job ID. -## Options inherited from parent commands +`--exclude` (string) Filter: exclude these failed transfer(s) when resuming the job. Files should be separated by ';'. -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`-h`, `--help` help for resume + +`--include` (string) Filter: only include these failed transfer(s) when resuming the job. Files should be separated by ';'. + +`--source-sas` (string) Source SAS token of the source for a given Job ID. + +### Options inherited from parent commands + +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-jobs-show.md b/articles/storage/common/storage-ref-azcopy-jobs-show.md index 4ca3e4c76a6c7..4a7503caf6889 100644 --- a/articles/storage/common/storage-ref-azcopy-jobs-show.md +++ b/articles/storage/common/storage-ref-azcopy-jobs-show.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy jobs sho author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -34,18 +34,16 @@ azcopy jobs show [jobID] [flags] ## Options -|Option|Description| -|--|--| -|-h, --help|Shows help content for the show command.| -|--with-status string|Only list the transfers of job with this status, available values: Started, Success, Failed| +`-h`, `--help` Help for show +`--with-status` (string) Only list the transfers of job with this status, available values: Started, Success, Failed. ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-jobs.md b/articles/storage/common/storage-ref-azcopy-jobs.md index 6c50fc6b704d9..5323170f59ba3 100644 --- a/articles/storage/common/storage-ref-azcopy-jobs.md +++ b/articles/storage/common/storage-ref-azcopy-jobs.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy jobs com author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -28,17 +28,15 @@ azcopy jobs show [jobID] ## Options -|Option|Description| -|--|--| -|-h, --help|Show help content for the jobs command.| +`-h`, `--help` Help for jobs ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-list.md b/articles/storage/common/storage-ref-azcopy-list.md index dd0ce14366a5e..e3ac239a47a87 100644 --- a/articles/storage/common/storage-ref-azcopy-list.md +++ b/articles/storage/common/storage-ref-azcopy-list.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy list com author: normesta ms.service: storage ms.topic: reference -ms.date: 09/21/2021 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -31,26 +31,28 @@ azcopy list [containerURL] [flags] ## Examples ```azcopy -azcopy list [containerURL] +azcopy list [containerURL] --properties [semicolon(;) separated list of attributes (LastModifiedTime, VersionId, BlobType, BlobAccessTier, ContentType, ContentEncoding, LeaseState, LeaseDuration, LeaseStatus) enclosed in double quotes (")] ``` ## Options -|Option|Description| -|--|--| -|-h, --help|Show help content for the list command.| -|--machine-readable|Lists file sizes in bytes.| -|--mega-units|Displays units in orders of 1000, not 1024.| -| --properties | delimiter (;) separated values of properties required in list output. | -|--running-tally|Counts the total number of files and their sizes.| +`-h`, `--help` Help for list + +`--machine-readable` Lists file sizes in bytes. + +`--mega-units` Displays units in orders of 1000, not 1024. + +`--properties` (string) delimiter (;) separated values of properties required in list output. + +`--running-tally` Counts the total number of files and their sizes. ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-login-status.md b/articles/storage/common/storage-ref-azcopy-login-status.md new file mode 100644 index 0000000000000..4f89e612eeab2 --- /dev/null +++ b/articles/storage/common/storage-ref-azcopy-login-status.md @@ -0,0 +1,68 @@ +--- +title: azcopy login status | Microsoft Docs +description: This article provides reference information for the azcopy login status command. +author: normesta +ms.service: storage +ms.topic: reference +ms.date: 05/26/2022 +ms.author: normesta +ms.subservice: common +ms.reviewer: zezha-msft +--- + +# azcopy login status + +Lists the entities in a given resource. + +## Synopsis + +Prints if you're currently logged in to your Azure Storage account. + +```azcopy +azcopy login status [flags] +``` + +## Related conceptual articles + +- [Get started with AzCopy](storage-use-azcopy-v10.md) +- [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) +- [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) + +### Options + +`--endpoint` Prints the Azure Active Directory endpoint that is being used in the current session. + +`-h`, `--help` Help for status + +`--tenant` Prints the Azure Active Directory tenant ID that is currently being used in session. + +### Options inherited from parent commands + +`--aad-endpoint` (string) The Azure Active Directory endpoint to use. The default (https://login.microsoftonline.com) is correct for the global Azure cloud. Set this parameter when authenticating in a national cloud. Not needed for Managed Service Identity + +`--application-id` (string) Application ID of user-assigned identity. Required for service principal auth. + +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--certificate-path` (string) Path to certificate for SPN authentication. Required for certificate-based service principal auth. + +`--identity` Log in using virtual machine's identity, also known as managed service identity (MSI). + +`--identity-client-id` (string) Client ID of user-assigned identity. + +`--identity-object-id` (string) Object ID of user-assigned identity. + +`--identity-resource-id` (string) Resource ID of user-assigned identity. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--service-principal` Log in via Service Principal Name (SPN) by using a certificate or a secret. The client secret or certificate password must be placed in the appropriate environment variable. +Type AzCopy env to see names and descriptions of environment variables. + +`--tenant-id` (string) The Azure Active Directory tenant ID to use for OAuth device interactive login. + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. + +## See also + +- [azcopy](storage-ref-azcopy.md) diff --git a/articles/storage/common/storage-ref-azcopy-login.md b/articles/storage/common/storage-ref-azcopy-login.md index e439fc554bb15..8eecb49b07632 100644 --- a/articles/storage/common/storage-ref-azcopy-login.md +++ b/articles/storage/common/storage-ref-azcopy-login.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy login co author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -40,88 +40,76 @@ azcopy login [flags] Log in interactively with default AAD tenant ID set to common: -```azcopy -azcopy login -``` +`azcopy login` Log in interactively with a specified tenant ID: -```azcopy -azcopy login --tenant-id "[TenantID]" -``` +`azcopy login --tenant-id "[TenantID]"` Log in by using the system-assigned identity of a Virtual Machine (VM): -```azcopy -azcopy login --identity -``` +`azcopy login --identity` Log in by using the user-assigned identity of a VM and a Client ID of the service identity: -```azcopy -azcopy login --identity --identity-client-id "[ServiceIdentityClientID]" -``` +`azcopy login --identity --identity-client-id "[ServiceIdentityClientID]"` Log in by using the user-assigned identity of a VM and an Object ID of the service identity: -```azcopy -azcopy login --identity --identity-object-id "[ServiceIdentityObjectID]" -``` +`azcopy login --identity --identity-object-id "[ServiceIdentityObjectID]"` Log in by using the user-assigned identity of a VM and a Resource ID of the service identity: -```azcopy -azcopy login --identity --identity-resource-id "/subscriptions//resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID" -``` +`azcopy login --identity --identity-resource-id "/subscriptions//resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID"` Log in as a service principal by using a client secret: + Set the environment variable AZCOPY_SPA_CLIENT_SECRET to the client secret for secret based service principal auth. -```azcopy -azcopy login --service-principal --application-id -``` +`azcopy login --service-principal --application-id ` Log in as a service principal by using a certificate and it's password: -Set the environment variable AZCOPY_SPA_CERT_PASSWORD to the certificate's password for cert-based service principal auth: +Set the environment variable `AZCOPY_SPA_CERT_PASSWORD` to the certificate's password for cert based service principal auth -```azcopy -azcopy login --service-principal --certificate-path /path/to/my/cert --application-id -``` +`azcopy login --service-principal --certificate-path /path/to/my/cert --application-id ` + +Treat /path/to/my/cert as a path to a PEM or PKCS12 file--. AzCopy doesn't reach into the system cert store to obtain your certificate. `--certificate-path` is mandatory when doing cert-based service principal auth. -Treat `/path/to/my/cert` as a path to a PEM or PKCS12 file. AzCopy does not reach into the system cert store to obtain your certificate. +Subcommand for login to check the login status of your current session. -`--certificate-path` is mandatory when doing cert-based service principal auth. +`azcopy login status` ## Options -**--aad-endpoint** string The Azure Active Directory endpoint to use. The default (https://login.microsoftonline.com) is correct for the global Azure cloud. Set this parameter when authenticating in a national cloud. Not needed for Managed Service Identity. +`--aad-endpoint` (string) The Azure Active Directory endpoint to use. The default (https://login.microsoftonline.com) is correct for the global Azure cloud. Set this parameter when authenticating in a national cloud. Not needed for Managed Service Identity -**--application-id** string Application ID of user-assigned identity. Required for service principal auth. +`--application-id` (string) Application ID of user-assigned identity. Required for service principal auth. -**--certificate-path** string Path to certificate for SPN authentication. Required for certificate-based service principal auth. +`--certificate-path` (string) Path to certificate for SPN authentication. Required for certificate-based service principal auth. -**--help** help for the `azcopy login` command. +`-h`, `--help` Help for login -**--identity** Login using virtual machine's identity, also known as managed service identity (MSI). +`--identity` Log in using virtual machine's identity, also known as managed service identity (MSI). -**--identity-client-id** string Client ID of user-assigned identity. +`--identity-client-id` (string) Client ID of user-assigned identity. -**--identity-object-id** string Object ID of user-assigned identity. +`--identity-object-id` (string) Object ID of user-assigned identity. -**--identity-resource-id** string Resource ID of user-assigned identity. +`--identity-resource-id` (string) Resource ID of user-assigned identity. -**--service-principal** Log in via Service Principal Name (SPN) by using a certificate or a secret. The client secret or certificate password must be placed in the appropriate environment variable. Type AzCopy env to see names and descriptions of environment variables. +`--service-principal` Log in via Service Principal Name (SPN) by using a certificate or a secret. The client secret or certificate password must be placed in the appropriate environment variable. Type +AzCopy env to see names and descriptions of environment variables. -**--tenant-id** string The Azure Active Directory tenant ID to use for OAuth device interactive login. +`--tenant-id` (string) The Azure Active Directory tenant ID to use for OAuth device interactive login. ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-logout.md b/articles/storage/common/storage-ref-azcopy-logout.md index 4cb9ee8978416..914118c97eff3 100644 --- a/articles/storage/common/storage-ref-azcopy-logout.md +++ b/articles/storage/common/storage-ref-azcopy-logout.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy logout c author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -28,19 +28,17 @@ azcopy logout [flags] - [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) - [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) -## Options +### Options -|Option|Description| -|--|--| -|-h, --help|Show help content for the logout command.| +`-h`, `--help` help for logout -## Options inherited from parent commands +### Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-make.md b/articles/storage/common/storage-ref-azcopy-make.md index 8bdb52b17e7a8..6f8823dd54ee4 100644 --- a/articles/storage/common/storage-ref-azcopy-make.md +++ b/articles/storage/common/storage-ref-azcopy-make.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy make com author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -36,18 +36,16 @@ azcopy make "https://[account-name].[blob,file,dfs].core.windows.net/[top-level- ## Options -|Option|Description| -|--|--| -|-h, --help|Show help content for the make command. | -|--quota-gb uint32|Specifies the maximum size of the share in gigabytes (GB), zero means you accept the file service's default quota.| +`-h`, `--help` help for make +`--quota-gb` (uint32) Specifies the maximum size of the share in gigabytes (GiB), 0 means you accept the file service's default quota. ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-remove.md b/articles/storage/common/storage-ref-azcopy-remove.md index b80d223cc71c9..1cf8a622f2ba1 100644 --- a/articles/storage/common/storage-ref-azcopy-remove.md +++ b/articles/storage/common/storage-ref-azcopy-remove.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy remove c author: normesta ms.service: storage ms.topic: reference -ms.date: 09/21/2021 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -30,92 +30,77 @@ azcopy remove [resourceURL] [flags] Remove a single blob by using a SAS token: -```azcopy -azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" -``` +`azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]"` Remove an entire virtual directory by using a SAS token: -```azcopy -azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true -``` +`azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true` Remove only the blobs inside of a virtual directory, but don't remove any subdirectories or blobs within those subdirectories: -```azcopy -azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --recursive=false -``` +`azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --recursive=false` -Remove a subset of blobs in a virtual directory (For example: remove only jpg and pdf files, or if the blob name is `exactName`): +Remove a subset of blobs in a virtual directory (For example: remove only jpg and pdf files, or if the blob name is "exactName"): -```azcopy -azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true --include-pattern="*.jpg;*.pdf;exactName" -``` +`azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true --include-pattern="*.jpg;*.pdf;exactName"` Remove an entire virtual directory but exclude certain blobs from the scope (For example: every blob that starts with foo or ends with bar): -```azcopy -azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true --exclude-pattern="foo*;*bar" -``` +`azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/directory]?[SAS]" --recursive=true --exclude-pattern="foo*;*bar"` + +Remove specified version IDs of a blob from Azure Storage. Ensure that source is a valid blob and `versionidsfile` which takes in a path to the file where each version is written on a separate line. All the specified versions will be removed from Azure Storage. + +`azcopy rm "https://[srcaccount].blob.core.windows.net/[containername]/[blobname]" "/path/to/dir" --list-of-versions="/path/to/dir/[versionidsfile]"` Remove specific blobs and virtual directories by putting their relative paths (NOT URL-encoded) in a file: -```azcopy -azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/parent/dir]" --recursive=true --list-of-files=/usr/bar/list.txt -- file content: - dir1/dir2 - blob1 - blob2 -``` +`azcopy rm "https://[account].blob.core.windows.net/[container]/[path/to/parent/dir]" --recursive=true --list-of-files=/usr/bar/list.txt` Remove a single file from a Blob Storage account that has a hierarchical namespace (include/exclude not supported): -```azcopy -azcopy rm "https://[account].dfs.core.windows.net/[container]/[path/to/file]?[SAS]" -``` +`azcopy rm "https://[account].dfs.core.windows.net/[container]/[path/to/file]?[SAS]"` Remove a single directory from a Blob Storage account that has a hierarchical namespace (include/exclude not supported): -```azcopy -azcopy rm "https://[account].dfs.core.windows.net/[container]/[path/to/directory]?[SAS]" -``` +`azcopy rm "https://[account].dfs.core.windows.net/[container]/[path/to/directory]?[SAS]"` ## Options -**--delete-snapshots** string By default, the delete operation fails if a blob has snapshots. Specify `include` to remove the root blob and all its snapshots; alternatively specify `only` to remove only the snapshots but keep the root blob. +`--delete-snapshots` (string) By default, the delete operation fails if a blob has snapshots. Specify 'include' to remove the root blob and all its snapshots; alternatively specify 'only' to remove only the snapshots but keep the root blob. + +`--dry-run` Prints the path files that would be removed by the command. This flag doesn't trigger the removal of the files. -**--dry-run** Prints the path files that would be removed by the command. This flag does not trigger the removal of the files. +`--exclude-path` (string) Exclude these paths when removing. This option doesn't support wildcard characters (*). Checks relative path prefix. For example: myFolder;myFolder/subDirName/file.pdf -**--exclude-path** string Exclude these paths when removing. This option does not support wildcard characters (*). Checks relative path prefix. For example: `myFolder;myFolder/subDirName/file.pdf` +`--exclude-pattern` (string) Exclude files where the name matches the pattern list. For example: *.jpg;*.pdf;exactName -**--exclude-pattern** string Exclude files where the name matches the pattern list. For example: `*.jpg`;`*.pdf`;`exactName` +`--force-if-read-only` When deleting an Azure Files file or folder, force the deletion to work even if the existing object has its read-only attribute set -**--force-if-read-only** When deleting an Azure Files file or folder, force the deletion to work even if the existing object is has its read-only attribute set. +`--from-to` (string) Optionally specifies the source destination combination. For Example: BlobTrash, FileTrash, BlobFSTrash -**--from-to** string Optionally specifies the source destination combination. For Example: BlobTrash, FileTrash, BlobFSTrash +`-h`, `--help` help for remove -**--help** help for remove. +`--include-path` (string) Include only these paths when removing. This option doesn't support wildcard characters (*). Checks relative path prefix. For example: myFolder;myFolder/subDirName/file.pdf -**--include-path** string Include only these paths when removing. This option does not support wildcard characters (*). Checks relative path prefix. For example: `myFolder;myFolder/subDirName/file.pdf` +`--include-pattern` (string) Include only files where the name matches the pattern list. For example: *.jpg;*.pdf;exactName -**--include-pattern** string Include only files where the name matches the pattern list. For example: *`.jpg`;*`.pdf`;`exactName` +`--list-of-files` (string) Defines the location of a file which contains the list of files and directories to be deleted. The relative paths should be delimited by line breaks, and the paths should NOT be URL-encoded. -**--list-of-files** string Defines the location of a file, which contains the list of files and directories to be deleted. The relative paths should be -delimited by line breaks, and the paths should NOT be URL-encoded. +`--list-of-versions` (string) Specifies a file where each version ID is listed on a separate line. Ensure that the source must point to a single blob and all the version IDs specified in the file using this flag must belong to the source blob only. Specified version IDs of the given blob will get deleted from Azure Storage. -**--list-of-versions** string Specifies a file where each version ID is listed on a separate line. Ensure that the source must point to a single blob and all the version IDs specified in the file using this flag must belong to the source blob only. Specified version IDs of the given blob will get deleted from Azure Storage. +`--log-level` (string) Define the log verbosity for the log file. Available levels include: INFO(all requests/responses), WARNING(slow responses), ERROR(only failed requests), and NONE(no output logs). (default 'INFO') (default "INFO") -**--log-level** string Define the log verbosity for the log file. Available levels include: `INFO`(all requests/responses), `WARNING`(slow responses), `ERROR`(only failed requests), and `NONE`(no output logs). (default `INFO`) (default `INFO`) +`--permanent-delete` (string) This is a preview feature that PERMANENTLY deletes soft-deleted snapshots/versions. Possible values include 'snapshots', 'versions', 'snapshotsandversions', 'none'. (default "none") -**--recursive** Look into subdirectories recursively when syncing between directories. +`--recursive` Look into subdirectories recursively when syncing between directories. ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps float|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps float` Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy-sync.md b/articles/storage/common/storage-ref-azcopy-sync.md index d99ac6fae79a1..a212b1e465945 100644 --- a/articles/storage/common/storage-ref-azcopy-sync.md +++ b/articles/storage/common/storage-ref-azcopy-sync.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy sync com author: normesta ms.service: storage ms.topic: reference -ms.date: 09/01/2021 +ms.date: 05/26/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -16,169 +16,139 @@ Replicates the source location to the destination location. This article provide ## Synopsis -The last modified times are used for comparison. The file is skipped if the last modified time in the destination is more recent. - -The supported pairs are: - -- Local <-> Azure Blob (either SAS or OAuth authentication can be used) +The last modified times are used for comparison. The file is skipped if the last modified time in the destination is more recent. The supported pairs are: + +- Local <-> Azure Blob / Azure File (either SAS or OAuth authentication can be used) - Azure Blob <-> Azure Blob (Source must include a SAS or is publicly accessible; either SAS or OAuth authentication can be used for destination) - Azure File <-> Azure File (Source must include a SAS or is publicly accessible; SAS authentication should be used for destination) -- Local <-> Azure File - Azure Blob <-> Azure File The sync command differs from the copy command in several ways: -1. By default, the recursive flag is true and sync copies all subdirectories. Sync only copies the top-level files inside a directory if the recursive flag is false. -2. When syncing between virtual directories, add a trailing slash to the path (refer to examples) if there's a blob with the same name as one of the virtual directories. -3. If the `--delete-destination` flag is set to true or prompt, then sync will delete files and blobs at the destination that are not present at the source. - -## Related conceptual articles - -- [Get started with AzCopy](storage-use-azcopy-v10.md) -- [Tutorial: Migrate on-premises data to cloud storage with AzCopy](storage-use-azcopy-migrate-on-premises-data.md) -- [Transfer data with AzCopy and Blob storage](./storage-use-azcopy-v10.md#transfer-data) -- [Transfer data with AzCopy and file storage](storage-use-azcopy-files.md) - -### Advanced + 1. By default, the recursive flag is true and sync copies all subdirectories. Sync only copies the top-level files inside a directory if the recursive flag is false. + 2. When syncing between virtual directories, add a trailing slash to the path (refer to examples) if there's a blob with the same name as one of the virtual directories. + 3. If the 'delete-destination' flag is set to true or prompt, then sync will delete files and blobs at the destination that aren't present at the source. -If you don't specify a file extension, AzCopy automatically detects the content type of the files when uploading from the local disk, based on the file extension or content (if no extension is specified). +Advanced: -The built-in lookup table is small, but on Unix, it's augmented by the local system's mime.types file(s) if available under one or more of these names: +Note that if you don't specify a file extension, AzCopy automatically detects the content type of the files when uploading from the local disk, based on the file extension or content. +The built-in lookup table is small but on Unix it's augmented by the local system's mime.types file(s) if available under one or more of these names: + - /etc/mime.types - /etc/apache2/mime.types - /etc/apache/mime.types On Windows, MIME types are extracted from the registry. +Also note that sync works off of the last modified times exclusively. So in the case of Azure File <-> Azure File, +the header field Last-Modified is used instead of x-ms-file-change-time, which means that metadata changes at the source can also trigger a full copy. + ```azcopy -azcopy sync [flags] +azcopy sync [flags] ``` ## Examples Sync a single file: -```azcopy -azcopy sync "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" -``` +`azcopy sync "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]"` Same as above, but also compute an MD5 hash of the file content, and then save that MD5 hash as the blob's Content-MD5 property. -```azcopy -azcopy sync "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" --put-md5 -``` +`azcopy sync "/path/to/file.txt" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" --put-md5` Sync an entire directory including its subdirectories (note that recursive is by default on): -```azcopy -azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" -``` - +`azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]"` or - -```azcopy -azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --put-md5 -``` +`azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --put-md5` Sync only the files inside of a directory but not subdirectories or the files inside of subdirectories: -```azcopy -azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --recursive=false -``` +`azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --recursive=false` -Sync a subset of files in a directory (For example: only jpg and pdf files, or if the file name is `exactName`): +Sync a subset of files in a directory (For example: only jpg and pdf files, or if the file name is "exactName"): -```azcopy -azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --include-pattern="*.jpg;*.pdf;exactName" -``` +`azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --include-pattern="*.jpg;*.pdf;exactName"` Sync an entire directory but exclude certain files from the scope (For example: every file that starts with foo or ends with bar): -```azcopy -azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --exclude-pattern="foo*;*bar" -``` +`azcopy sync "/path/to/dir" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --exclude-pattern="foo*;*bar"` Sync a single blob: -```azcopy -azcopy sync "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[account].blob.core.windows.net/[container]/[path/to/blob]" -``` +`azcopy sync "https://[account].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[account].blob.core.windows.net/[container]/[path/to/blob]"` Sync a virtual directory: -```azcopy -azcopy sync "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]?[SAS]" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --recursive=true -``` +`azcopy sync "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]?[SAS]" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]" --recursive=true` Sync a virtual directory that has the same name as a blob (add a trailing slash to the path in order to disambiguate): -```azcopy -azcopy sync "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]/?[SAS]" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]/" --recursive=true -``` +`azcopy sync "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]/?[SAS]" "https://[account].blob.core.windows.net/[container]/[path/to/virtual/dir]/" --recursive=true` -Sync an Azure File directory: +Sync an Azure File directory (same syntax as Blob): -```azcopy -azcopy sync "https://[account].file.core.windows.net/[share]/[path/to/dir]?[SAS]" "https://[account].file.core.windows.net/[share]/[path/to/dir]?[SAS]" --recursive=true -``` +`azcopy sync "https://[account].file.core.windows.net/[share]/[path/to/dir]?[SAS]" "https://[account].file.core.windows.net/[share]/[path/to/dir]" --recursive=true` -> [!NOTE] -> If include/exclude flags are used together, only files matching the include patterns would be looked at, but those matching the exclude patterns would be always be ignored. +Note: if include and exclude flags are used together, only files matching the include patterns are used, but those matching the exclude patterns are ignored. ## Options -**--block-size-mb** float Use this block size (specified in MiB) when uploading to Azure Storage or downloading from Azure Storage. Default is automatically calculated based on file size. Decimal fractions are allowed (For example: `0.25`). +`--block-size-mb` (float) Use this block size (specified in MiB) when uploading to Azure Storage or downloading from Azure Storage. Default is automatically calculated based on file size. Decimal fractions are allowed (For example: 0.25). + +`--check-md5` (string) Specifies how strictly MD5 hashes should be validated when downloading. This option is only available when downloading. Available values include: NoCheck, LogOnly, FailIfDifferent, FailIfDifferentOrMissing. (default 'FailIfDifferent'). (default "FailIfDifferent") -**--check-md5** string Specifies how strictly MD5 hashes should be validated when downloading. This option is only available when downloading. Available values include: `NoCheck`, `LogOnly`, `FailIfDifferent`, `FailIfDifferentOrMissing`. (default `FailIfDifferent`). (default `FailIfDifferent`) +`--cpk-by-name` (string) Client provided key by name let clients that make requests against Azure Blob storage an option to provide an encryption key on a per-request basis. Provided key name will be fetched from Azure Key Vault and will be used to encrypt the data -**--cpk-by-name** string Client provided key by name let clients making requests against Azure Blob Storage an option to provide an encryption key on a per-request basis. Provided key name will be fetched from Azure Key Vault and will be used to encrypt the data +`--cpk-by-value` Client provided key by name let clients that make requests against Azure Blob storage an option to provide an encryption key on a per-request basis. Provided key and its hash will be fetched from environment variables -**--cpk-by-value** Client provided key by name let clients making requests against Azure Blob Storage an option to provide an encryption key on a per-request basis. Provided key and its hash will be fetched from environment variables +`--delete-destination` (string) Defines whether to delete extra files from the destination that aren't present at the source. Could be set to true, false, or prompt. If set to prompt, the user will be asked a question before scheduling files and blobs for deletion. (default 'false'). (default "false") -**--delete-destination** string Defines whether to delete extra files from the destination that are not present at the source. Could be set to `true`, `false`, or `prompt`. If set to `prompt`, the user will be asked a question before scheduling files and blobs for deletion. (default `false`). (default `false`) +`--dry-run` Prints the path of files that would be copied or removed by the sync command. This flag doesn't copy or remove the actual files. -**--dry-run** Prints the path of files that would be copied or removed by the sync command. This flag does not copy or remove the actual files. +`--exclude-attributes` (string) (Windows only) Exclude files whose attributes match the attribute list. For example: A;S;R -**--exclude-attributes** string (Windows only) Excludes files whose attributes match the attribute list. For example: `A;S;R` +`--exclude-path` (string) Exclude these paths when comparing the source against the destination. This option doesn't support wildcard characters (*). Checks relative path prefix(For example: myFolder;myFolder/subDirName/file.pdf). -**--exclude-path** string Exclude these paths when comparing the source against the destination. This option does not support wildcard characters (*). Checks relative path prefix(For example: `myFolder;myFolder/subDirName/file.pdf`). +`--exclude-pattern` (string) Exclude files where the name matches the pattern list. For example: *.jpg;*.pdf;exactName -**--exclude-pattern** string Exclude files where the name matches the pattern list. For example: `*.jpg;*.pdf;exactName` +`--exclude-regex` (string) Exclude the relative path of the files that match with the regular expressions. Separate regular expressions with ';'. -**--exclude-regex** string Exclude the relative path of the files that match with the regular expressions. Separate regular expressions with ';'. +`--from-to` (string) Optionally specifies the source destination combination. For Example: LocalBlob, BlobLocal, LocalFile, FileLocal, BlobFile, FileBlob, etc. -**--help** help for sync. +`-h`, `--help` help for sync -**--include-attributes** string (Windows only) Includes only files whose attributes match the attribute list. For example: `A;S;R` +`--include-attributes` (string) (Windows only) Include only files whose attributes match the attribute list. For example: A;S;R -**--include-pattern** string Include only files where the name matches the pattern list. For example: `*.jpg;*.pdf;exactName` +`--include-pattern` (string) Include only files where the name matches the pattern list. For example: *.jpg;*.pdf;exactName -**--include-regex** string Include only the relative path of the files that align with regular expressions. Separate regular expressions with ';'. +`--include-regex` (string) Include the relative path of the files that match with the regular expressions. Separate regular expressions with ';'. -**--log-level** string Define the log verbosity for the log file, available levels: `INFO`(all requests and responses), `WARNING`(slow responses), `ERROR`(only failed requests), and `NONE`(no output logs). (default `INFO`). +`--log-level` (string) Define the log verbosity for the log file, available levels: INFO(all requests and responses), WARNING(slow responses), ERROR(only failed requests), and NONE(no output logs). (default INFO). (default "INFO") -**--mirror-mode** Disable last-modified-time based comparison and overwrites the conflicting files and blobs at the destination if this flag is set to `true`. Default is `false`. +`--mirror-mode` Disable last-modified-time based comparison and overwrites the conflicting files and blobs at the destination if this flag is set to true. Default is false -**--preserve-smb-info** True by default. Preserves SMB property info (last write time, creation time, attribute bits) between SMB-aware resources (Windows and Azure Files). This flag applies to both files and folders, unless a file-only filter is specified (for example, include-pattern). The info transferred for folders is the same as that for files, except for Last Write Time that is not preserved for folders. +`--preserve-permissions` False by default. Preserves ACLs between aware resources (Windows and Azure Files, or ADLS Gen 2 to ADLS Gen 2). For Hierarchical Namespace accounts, you'll need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you'll also need the `--backup` flag to restore permissions where the new Owner won't be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (for example, include-pattern). -**--preserve-permissions** False by default. Preserves ACLs between aware resources (Windows and Azure Files, or Data Lake Storage Gen 2 to Data Lake Storage Gen 2). For accounts that have a hierarchical namespace, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern). +`--preserve-smb-info` For SMB-aware locations, flag will be set to true by default. Preserves SMB property info (last write time, creation time, attribute bits) between SMB-aware resources (Azure Files). This flag applies to both files and folders, unless a file-only filter is specified (for example, include-pattern). The info transferred for folders is the same as that for files, except for Last Write Time that isn't preserved for folders. (default true) -**--put-md5** Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob or file. (By default the hash is NOT created.) Only available when uploading. +`--put-md5` Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob or file. (By default the hash is NOT created.) Only available when uploading. -**--recursive** `True` by default, look into subdirectories recursively when syncing between directories. (default `True`). +`--recursive` True by default, look into subdirectories recursively when syncing between directories. (default true). (default true) -**--s2s-preserve-access-tier** Preserve access tier during service to service copy. Refer to [Hot, Cool, and Archive access tiers for blob data](../blobs/access-tiers-overview.md) to ensure destination storage account supports setting access tier. In the cases that setting access tier is not supported, please use `--s2s-preserve-access-tier=false` to bypass copying access tier. (default `true`). +`--s2s-preserve-access-tier` Preserve access tier during service to service copy. Refer to [Azure Blob storage: hot, cool, and archive access tiers](../blobs/storage-blob-storage-tiers.md) to ensure destination storage account supports setting access tier. In the cases that setting access tier isn't supported, please use `s2sPreserveAccessTier=false` to bypass copying access tier. (default true). (default true) -**--s2s-preserve-blob-tags** Preserve index tags during service to service sync from one blob storage to another. +`--s2s-preserve-blob-tags` Preserve index tags during service to service sync from one blob storage to another ## Options inherited from parent commands -|Option|Description| -|---|---| -|--cap-mbps uint32|Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped.| -|--output-type string|Format of the command's output. The choices include: text, json. The default value is "text".| -|--trusted-microsoft-suffixes string |Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons.| +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it's omitted, the throughput isn't capped. + +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") + +`--trusted-microsoft-suffixes` (string) Specifies other domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also diff --git a/articles/storage/common/storage-ref-azcopy.md b/articles/storage/common/storage-ref-azcopy.md index 02452a38d6ea0..b60b4d42fa3ff 100644 --- a/articles/storage/common/storage-ref-azcopy.md +++ b/articles/storage/common/storage-ref-azcopy.md @@ -4,7 +4,7 @@ description: This article provides reference information for the azcopy command. author: normesta ms.service: storage ms.topic: reference -ms.date: 07/24/2020 +ms.date: 06/09/2022 ms.author: normesta ms.subservice: common ms.reviewer: zezha-msft @@ -12,6 +12,8 @@ ms.reviewer: zezha-msft # azcopy +Current version: 10.15.0 + AzCopy is a command-line tool that moves data into and out of Azure Storage. See the [Get started with AzCopy](storage-use-azcopy-v10.md) article to download AzCopy and learn about the ways that you can provide authorization credentials to the storage service. ## Synopsis @@ -29,13 +31,13 @@ To report issues or to learn more about the tool, see [https://github.com/Azure/ ## Options -**--cap-mbps** (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. +`--cap-mbps` (float) Caps the transfer rate, in megabits per second. Moment-by-moment throughput might vary slightly from the cap. If this option is set to zero, or it is omitted, the throughput isn't capped. -**--help** Help for azcopy +`-h`, `--help` help for azcopy -**--output-type** (string) Format of the command's output. The choices include: text, json. The default value is `text`. (default `text`) +`--output-type` (string) Format of the command's output. The choices include: text, json. The default value is 'text'. (default "text") -**--trusted-microsoft-suffixes** (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. +`--trusted-microsoft-suffixes` (string) Specifies additional domain suffixes where Azure Active Directory login tokens may be sent. The default is '*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net;*.storage.azure.net'. Any listed here are added to the default. For security, you should only put Microsoft Azure domains here. Separate multiple entries with semi-colons. ## See also @@ -52,6 +54,7 @@ To report issues or to learn more about the tool, see [https://github.com/Azure/ - [azcopy jobs show](storage-ref-azcopy-jobs-show.md) - [azcopy list](storage-ref-azcopy-list.md) - [azcopy login](storage-ref-azcopy-login.md) +- [azcopy login status](storage-ref-azcopy-login-status.md) - [azcopy logout](storage-ref-azcopy-logout.md) - [azcopy make](storage-ref-azcopy-make.md) - [azcopy remove](storage-ref-azcopy-remove.md) diff --git a/articles/storage/common/storage-use-azcopy-blobs-copy.md b/articles/storage/common/storage-use-azcopy-blobs-copy.md index 907d39ecec597..212e5ad3018fd 100644 --- a/articles/storage/common/storage-use-azcopy-blobs-copy.md +++ b/articles/storage/common/storage-use-azcopy-blobs-copy.md @@ -193,4 +193,5 @@ See these articles to configure settings, optimize performance, and troubleshoot - [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-blobs-download.md b/articles/storage/common/storage-use-azcopy-blobs-download.md index 7227a18e5ef55..fc9a44037f5c0 100644 --- a/articles/storage/common/storage-use-azcopy-blobs-download.md +++ b/articles/storage/common/storage-use-azcopy-blobs-download.md @@ -242,4 +242,5 @@ See these articles to configure settings, optimize performance, and troubleshoot - [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-blobs-synchronize.md b/articles/storage/common/storage-use-azcopy-blobs-synchronize.md index 592026e8d2bd4..4a55ef3bc056a 100644 --- a/articles/storage/common/storage-use-azcopy-blobs-synchronize.md +++ b/articles/storage/common/storage-use-azcopy-blobs-synchronize.md @@ -146,4 +146,5 @@ See these articles to configure settings, optimize performance, and troubleshoot - [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-blobs-upload.md b/articles/storage/common/storage-use-azcopy-blobs-upload.md index 38d5c4f4bfb2a..fffbe0b69a97a 100644 --- a/articles/storage/common/storage-use-azcopy-blobs-upload.md +++ b/articles/storage/common/storage-use-azcopy-blobs-upload.md @@ -286,4 +286,5 @@ See these articles to configure settings, optimize performance, and troubleshoot - [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-files.md b/articles/storage/common/storage-use-azcopy-files.md index 410491d4bcac4..63dcc6200f83b 100644 --- a/articles/storage/common/storage-use-azcopy-files.md +++ b/articles/storage/common/storage-use-azcopy-files.md @@ -535,4 +535,5 @@ See these articles to configure settings, optimize performance, and troubleshoot - [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-google-cloud.md b/articles/storage/common/storage-use-azcopy-google-cloud.md index 6d3e60239602e..c1901bd4be8e6 100644 --- a/articles/storage/common/storage-use-azcopy-google-cloud.md +++ b/articles/storage/common/storage-use-azcopy-google-cloud.md @@ -197,4 +197,5 @@ See these articles to configure settings, optimize performance, and troubleshoot - [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-migrate-on-premises-data.md b/articles/storage/common/storage-use-azcopy-migrate-on-premises-data.md index c0b3817e69326..148db68cf4f83 100644 --- a/articles/storage/common/storage-use-azcopy-migrate-on-premises-data.md +++ b/articles/storage/common/storage-use-azcopy-migrate-on-premises-data.md @@ -185,4 +185,10 @@ For more information about AzCopy, see any of these articles: - [Transfer data with AzCopy and Amazon S3 buckets](storage-use-azcopy-s3.md) -- [Configure, optimize, and troubleshoot AzCopy](storage-use-azcopy-configure.md) +- [AzCopy configuration settings](storage-ref-azcopy-configuration-settings.md) + +- [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) + +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) + +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) diff --git a/articles/storage/common/storage-use-azcopy-troubleshoot.md b/articles/storage/common/storage-use-azcopy-troubleshoot.md new file mode 100644 index 0000000000000..26ddcdb29a309 --- /dev/null +++ b/articles/storage/common/storage-use-azcopy-troubleshoot.md @@ -0,0 +1,162 @@ +--- +title: Troubleshoot problems with AzCopy (Azure Storage) | Microsoft Docs +description: Find workarounds to common issues with AzCopy v10. +author: normesta +ms.service: storage +ms.topic: how-to +ms.date: 06/09/2022 +ms.author: normesta +ms.subservice: common + +--- + +# Troubleshoot problems with AzCopy v10 + +This article describes common issues that you might encounter while using AzCopy, helps you to identify the causes of those issues, and then suggests ways to resolve them. + +## Identifying problems + +You can determine whether a job succeeds by looking at the exit code. + +If the exit code is `0-success`, then the job completed successfully. + +If the exit code is `error`, then examine the log file. Once you understand the exact error message, then it becomes much easier to search for the right key words and figure out the solution. To learn more, see [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md). + +If the exit code is `panic`, then check the log file exists. If the file doesn't exist, file a bug or reach out to support. + +## 403 errors + +It's common to encounter 403 errors. Sometimes they're benign and don't result in failed transfer. For example, in AzCopy logs, you might see that a HEAD request received 403 errors. Those errors appear when AzCopy checks whether a resource is public. In most cases, you can ignore those instances. + +In some cases 403 errors can result in a failed transfer. If this happens, other attempts to transfer files will likely fail until you resolve the issue. 403 errors can occur as a result of authentication and authorization issues. They can also occur when requests are blocked due to the storage account firewall configuration. + +### Authentication / Authorization issues + +403 errors that prevent data transfer occur because of issues with SAS tokens, role based access control (Azure RBAC) roles, and access control list (ACL) configurations. + +##### SAS tokens + +If you're using a shared access signature (SAS) token, verify the following: + +- The expiration and start times of the SAS token are appropriate. + +- You selected all the necessary permissions for the token. + +- You generated the token by using an official SDK or tool. Try Storage Explorer if you haven't already. + +##### Azure RBAC + +If you're using role based access control (Azure RBAC) roles via the `azcopy login` command, verify that you have the appropriate Azure roles assigned to your identity (For example: the Storage Blob Data Contributor role). + +To learn more about Azure roles, see [Assign an Azure role for access to blob data](../blobs/assign-azure-role-data-access.md). + +##### ACLs + +If you're using access control lists (ACLs), verify that your identity appears in an ACL entry for each file or directory that you intend to access. Also, make sure that each ACL entry reflects the appropriate permission level. + +To learn more about ACLs and ACL entries, see [Access control lists (ACLs) in Azure Data Lake Storage Gen2](../blobs/data-lake-storage-access-control.md). + +To learn about how to incorporate Azure roles together with ACLs, and how system evaluates them to make authorization decisions, see [Access control model in Azure Data Lake Storage Gen2](../blobs/data-lake-storage-access-control-model.md). + +### Firewall and private endpoint issues + +If the storage firewall configuration isn't configured to allow access from the machine where AzCopy is running, AzCopy operations will return an HTTP 403 error. + +##### Transferring data from or to a local machine + +If you're uploading or downloading data between a storage account and an on-premise machine, make sure that the machine that runs AzCopy is able to access either the source or destination storage account. You might have to use IP network rules in the firewall settings of either the source **or** destination accounts to allow access from the public IP address of the machine. + +##### Transferring data between storage accounts + +403 authorization errors can prevent you from transferring data between accounts by using the client machine where AzCopy is running. + +If you're copying data between storage accounts, make sure that the machine that runs AzCopy is able to access both the source **and** the destination account. You might have to use IP network rules in the firewall settings of both the source and destination accounts to allow access from the public IP address of the machine. The service will use the IP address of the AzCopy client machine to authorize the source to destination traffic. To learn how to add a public IP address to the firewall settings of a storage account, see [Grant access from an internet IP range](storage-network-security.md#grant-access-from-an-internet-ip-range). + +In case your VM doesn't or can't have a public IP address, consider using a private endpoint. See [Use private endpoints for Azure Storage](storage-private-endpoints.md). + +##### Using a Private link + +A [Private Link](../../private-link/private-link-overview.md) is at the virtual network (VNet) / subnet level. If you want AzCopy requests to go through a Private Link, then AzCopy must make those requests from a VM running in that VNet / subnet. For example, if you configure a Private Link in VNet1 / Subnet1 but the VM on which AzCopy runs is in VNet1 / Subnet2, then AzCopy requests won't use the Private Link and they're expected to fail. + +## Proxy-related errors + +If you encounter TCP errors such as `dial tcp: lookup proxy.x.x: no such host`, it means that your environment isn't configured to use the correct proxy, or you're using an advanced proxy that AzCopy doesn't recognize. + +You need to update the proxy settings to reflect the correct configurations. See [Configure proxy settings](storage-ref-azcopy-configuration-settings.md?toc=/azure/storage/blobs/toc.json#configure-proxy-settings). + +You can also bypass the proxy by setting the environment variable NO_PROXY="*". + +Here are the endpoints that AzCopy needs to use: + +| Log in endpoints | Azure Storage endpoints | +|---|---| +| `login.microsoftonline.com (global Azure)` | `(blob \| file \| dfs).core.windows.net (global Azure)` | +| `login.chinacloudapi.cn (Azure China)` | `(blob \| file \| dfs).core.chinacloudapi.cn (Azure China)` | +| `login.microsoftonline.de (Azure Germany)` | `(blob \| file \| dfs).core.cloudapi.de (Azure Germany)` | +| `login.microsoftonline.us (Azure US Government)` | `(blob \| file \| dfs).core.usgovcloudapi.net (Azure US Government)` | + +## x509: certificate signed by unknown authority + +This error is often related to the use of a proxy, which is using a Secure Sockets Layer (SSL) certificate that isn't trusted by the operating system. Verify your settings and make sure that the certificate is trusted at the operating system level. + +We recommend adding the certificate to your machine's root certificate store as that's where the trusted authorities are kept. + +## Unrecognized Parameters + +If you receive an error message stating that your parameters aren't recognized, make sure that you're using the correct version of AzCopy. AzCopy V8 and earlier versions are deprecated. [AzCopy V10](storage-use-azcopy-v10.md?toc=/azure/storage/blobs/toc.json) is the current version, and it's a complete rewrite that doesn't share any syntax with the previous versions. Refer to this migration guide [here](https://github.com/Azure/azure-storage-azcopy/blob/main/MigrationGuideV8toV10.md). + +Also, make sure to utilize built-in help messages by using the `-h` switch with any command (For example: `azcopy copy -h`). See [Get command help](storage-use-azcopy-v10.md?toc=/azure/storage/blobs/toc.json#get-command-help). To view the same information online, see [azcopy copy](storage-ref-azcopy-copy.md?toc=/azure/storage/blobs/toc.json). + +To help you understand commands, we provide an education tool located [here](https://azcopyvnextrelease.z22.web.core.windows.net/). This tool demonstrates the most popular AzCopy commands along with the most popular command flags. Our examples are [here](storage-use-azcopy-v10.md?toc=/azure/storage/blobs/toc.json#transfer-data). If you have any question, try searching through existing [GitHub issues](https://github.com/Azure/azure-storage-azcopy/issues) first to see if it was answered already. + +## Conditional access policy error + +You can receive the following error when you invoke the `azcopy login` command. + +"Failed to perform login command: +failed to login with tenantID "common", Azure directory endpoint "https://login.microsoftonline.com", autorest/adal/devicetoken: -REDACTED- AADSTS50005: User tried to log in to a device from a platform (Unknown) that's currently not supported through Conditional Access policy. Supported device platforms are: iOS, Android, Mac, and Windows flavors. +Trace ID: -REDACTED- +Correlation ID: -REDACTED- +Timestamp: 2021-01-05 01:58:28Z" + +This error means that your administrator has configured a conditional access policy that specifies what type of device you can log in from. AzCopy uses the device code flow, which can't guarantee that the machine where you're using the AzCopy tool is also where you're logging in. + +If your device is among the list of supported platforms, then you might be able to use Storage Explorer, which integrates AzCopy for all data transfers (it passes tokens to AzCopy via the secret store) but provides a login workflow that supports passing device information. AzCopy itself also supports managed identities and service principals, which could be used as an alternative. + +If your device isn't among the list of supported platforms, contact your administrator for help. + +## Server busy, network errors, timeouts + +If you see a large number of failed requests with the `503 Server Busy` status, then your requests are being throttled by the storage service. If you're seeing network errors or timeouts, you might be attempting to push through too much data across your infrastructure and that infrastructure is having difficulty handling it. In all cases, the workaround is similar. + +If you see a large file failing over and over again due to certain chunks failing each time, then try to limit the concurrent network connections or throughput limit depending on your specific case. We suggest that you first lower the performance drastically at first, observe whether it solved the initial problem, then ramp up performance again until an overall balance is achieved. + +For more information, see [Optimize the performance of AzCopy with Azure Storage](storage-use-azcopy-optimize.md) + +If you're copying data between accounts by using AzCopy, the quality and reliability of the network from where you run AzCopy might impact the overall performance. Event though data transfers from server to server, AzCopy does initiate calls for each file to copy between service endpoints. + +## Known constraints with AzCopy + +- Copying data from government clouds to commercial clouds isn't supported. However, copying data from commercial clouds to government clouds is supported. + +- Asynchronous service-side copy isn't supported. AzCopy performs synchronous copy only. In other words, by the time the job finishes, the data has been moved. + +- If when copying to an Azure File share you forgot to specify the flag `--preserve-smb-permissions`, and you do not want to transfer the data again, then consider using Robocopy to bring over the permissions. + +- If you're copying to Azure Files and you forgot to specify the `--preserve-smb-permissions` flag, and you don't want to transfer the data again, consider using Robocopy to bring over the only the permissions. + +- Azure Functions has a different endpoint for MSI authentication, which AzCopy doesn't yet support. + +## Known temporary issues + +There's a service issue impacting AzCopy 10.11+ which are using the [PutBlobFromURL API](/rest/api/storageservices/put-blob-from-url) to copy blobs smaller than the given block size (whose default is 8 MiB). If the user has any firewall (VNet / IP / PL / SE Policy) on the source account, the `PutBlobFromURL` API might mistakenly return the message `409 Copy source blob has been modified`. The workaround is to use AzCopy 10.10. + +- https://azcopyvnext.azureedge.net/release20210415/azcopy_darwin_amd64_10.10.0.zip +- https://azcopyvnext.azureedge.net/release20210415/azcopy_linux_amd64_10.10.0.tar.gz +- https://azcopyvnext.azureedge.net/release20210415/azcopy_windows_386_10.10.0.zip +- https://azcopyvnext.azureedge.net/release20210415/azcopy_windows_amd64_10.10.0.zip + +## See also + +- [Get started with AzCopy](storage-use-azcopy-v10.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) \ No newline at end of file diff --git a/articles/storage/common/storage-use-azcopy-v10.md b/articles/storage/common/storage-use-azcopy-v10.md index eeb7a7c3bfba2..09d9947835201 100644 --- a/articles/storage/common/storage-use-azcopy-v10.md +++ b/articles/storage/common/storage-use-azcopy-v10.md @@ -4,7 +4,7 @@ description: AzCopy is a command-line utility that you can use to copy data to, author: normesta ms.service: storage ms.topic: how-to -ms.date: 05/11/2022 +ms.date: 06/09/2022 ms.author: normesta ms.subservice: common ms.custom: contperf-fy21q2 @@ -128,8 +128,10 @@ The following table lists all AzCopy v10 commands. Each command links to a refer |[azcopy jobs remove](storage-ref-azcopy-jobs-remove.md?toc=/azure/storage/blobs/toc.json)|Remove all files associated with the given job ID.| |[azcopy jobs resume](storage-ref-azcopy-jobs-resume.md?toc=/azure/storage/blobs/toc.json)|Resumes the existing job with the given job ID.| |[azcopy jobs show](storage-ref-azcopy-jobs-show.md?toc=/azure/storage/blobs/toc.json)|Shows detailed information for the given job ID.| +|[azcopy jobs](storage-ref-azcopy-jobs.md?toc=/azure/storage/blobs/toc.json)|Subcommands related to managing jobs.| |[azcopy list](storage-ref-azcopy-list.md?toc=/azure/storage/blobs/toc.json)|Lists the entities in a given resource.| |[azcopy login](storage-ref-azcopy-login.md?toc=/azure/storage/blobs/toc.json)|Logs in to Azure Active Directory to access Azure Storage resources.| +|[azcopy login status](storage-ref-azcopy-login-status.md)|Lists the entities in a given resource.| |[azcopy logout](storage-ref-azcopy-logout.md?toc=/azure/storage/blobs/toc.json)|Logs the user out and terminates access to Azure Storage resources.| |[azcopy make](storage-ref-azcopy-make.md?toc=/azure/storage/blobs/toc.json)|Creates a container or file share.| |[azcopy remove](storage-ref-azcopy-remove.md?toc=/azure/storage/blobs/toc.json)|Delete blobs or files from an Azure storage account.| @@ -191,7 +193,9 @@ See any of the following resources: - [Optimize the performance of AzCopy](storage-use-azcopy-optimize.md) -- [Troubleshoot AzCopy V10 issues in Azure Storage by using log files](storage-use-azcopy-configure.md) +- [Find errors and resume jobs by using log and plan files in AzCopy](storage-use-azcopy-configure.md) + +- [Troubleshoot problems with AzCopy v10](storage-use-azcopy-troubleshoot.md) ## Use a previous version diff --git a/articles/storage/common/storage-use-azurite.md b/articles/storage/common/storage-use-azurite.md index 46aaf41a8f4f8..78f08f49526fc 100644 --- a/articles/storage/common/storage-use-azurite.md +++ b/articles/storage/common/storage-use-azurite.md @@ -4,7 +4,7 @@ description: The Azurite open-source emulator provides a free local environment author: normesta ms.author: normesta -ms.date: 12/03/2021 +ms.date: 06/03/2022 ms.service: storage ms.subservice: common ms.topic: how-to @@ -490,7 +490,7 @@ You can pass the following connection strings to the [Azure SDKs](https://aka.ms The full connection string is: -`DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;` +`DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;TableEndpoint=http://127.0.0.1:10001/devstoreaccount1;` To connect to the blob service only, the connection string is: @@ -500,11 +500,15 @@ To connect to the queue service only, the connection string is: `DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;` +To connect to the table service only, the connection string is: + +`DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;TableEndpoint=http://127.0.0.1:10001/devstoreaccount1;` + #### HTTPS connection strings The full HTTPS connection string is: -`DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=https://127.0.0.1:10001/devstoreaccount1;` +`DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=https://127.0.0.1:10001/devstoreaccount1;TableEndpoint=https://127.0.0.1:10001/devstoreaccount1;` To use the blob service only, the HTTPS connection string is: @@ -514,9 +518,13 @@ To use the queue service only, the HTTPS connection string is: `DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;QueueEndpoint=https://127.0.0.1:10001/devstoreaccount1;` +To use the table service only, the HTTPS connection string is: + +`DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;TableEndpoint=https://127.0.0.1:10001/devstoreaccount1;` + If you used `dotnet dev-certs` to generate your self-signed certificate, use the following connection string. -`DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://localhost:10000/devstoreaccount1;QueueEndpoint=https://localhost:10001/devstoreaccount1;` +`DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://localhost:10000/devstoreaccount1;QueueEndpoint=https://localhost:10001/devstoreaccount1;TableEndpoint=https://localhost:10001/devstoreaccount1;` Update the connection string when using [custom storage accounts and keys](#custom-storage-accounts-and-keys). @@ -542,7 +550,7 @@ var client = new BlobContainerClient( // With connection string var client = new BlobContainerClient( - "DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=https://127.0.0.1:10001/devstoreaccount1;", "container-name" + "DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://127.0.0.1:10000/devstoreaccount1;", "container-name" ); // With account name and key @@ -564,7 +572,7 @@ var client = new QueueClient( // With connection string var client = new QueueClient( - "DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=https://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=https://127.0.0.1:10001/devstoreaccount1;", "queue-name" + "DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;QueueEndpoint=https://127.0.0.1:10001/devstoreaccount1;", "queue-name" ); // With account name and key @@ -574,6 +582,28 @@ var client = new QueueClient( ); ``` +#### Azure Table Storage + +You can also instantiate a TableClient or TableServiceClient. + +```csharp +// With table URL and DefaultAzureCredential +var client = new Client( + new Uri("https://127.0.0.1:10001/devstoreaccount1/table-name"), new DefaultAzureCredential() + ); + +// With connection string +var client = new TableClient( + "DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;TableEndpoint=https://127.0.0.1:10001/devstoreaccount1;", "table-name" + ); + +// With account name and key +var client = new TableClient( + new Uri("https://127.0.0.1:10001/devstoreaccount1/table-name"), + new StorageSharedKeyCredential("devstoreaccount1", "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==") + ); +``` + ### Microsoft Azure Storage Explorer You can use Storage Explorer to view the data stored in Azurite. @@ -623,10 +653,13 @@ The following files and folders may be created in the workspace location when in - `__blobstorage__` - Directory containing Azurite blob service persisted binary data - `__queuestorage__` - Directory containing Azurite queue service persisted binary data +- `__tablestorage__` - Directory containing Azurite table service persisted binary data - `__azurite_db_blob__.json` - Azurite blob service metadata file - `__azurite_db_blob_extent__.json` - Azurite blob service extent metadata file - `__azurite_db_queue__.json` - Azurite queue service metadata file - `__azurite_db_queue_extent__.json` - Azurite queue service extent metadata file +- `__azurite_db_table__.json` - Azurite table service metadata file +- `__azurite_db_table_extent__.json` - Azurite table service extent metadata file To clean up Azurite, delete above files and folders and restart the emulator. diff --git a/articles/storage/file-sync/file-sync-cloud-tiering-overview.md b/articles/storage/file-sync/file-sync-cloud-tiering-overview.md index d52b8030d6a96..062fefb3156e9 100644 --- a/articles/storage/file-sync/file-sync-cloud-tiering-overview.md +++ b/articles/storage/file-sync/file-sync-cloud-tiering-overview.md @@ -64,7 +64,7 @@ Enabling proactive recalling may also result in increased bandwidth usage on the :::image type="content" source="media/storage-sync-files-deployment-guide/proactive-download.png" alt-text="An image showing the Azure file share download behavior for a server endpoint currently in effect and a button to open a menu that allows to change it."::: -For more information on proactive recall, see [Deploy Azure File Sync](file-sync-deployment-guide.md#proactively-recall-new-and-changed-files-from-an-azure-file-share). +For more information on proactive recall, see [Deploy Azure File Sync](file-sync-deployment-guide.md#optional-proactively-recall-new-and-changed-files-from-an-azure-file-share). ## Tiered vs. locally cached file behavior diff --git a/articles/storage/file-sync/file-sync-cloud-tiering-policy.md b/articles/storage/file-sync/file-sync-cloud-tiering-policy.md index 5f06f8f457f40..26bea783856d1 100644 --- a/articles/storage/file-sync/file-sync-cloud-tiering-policy.md +++ b/articles/storage/file-sync/file-sync-cloud-tiering-policy.md @@ -4,7 +4,7 @@ description: Details on how the date and volume free space policies work togethe author: khdownie ms.service: storage ms.topic: conceptual -ms.date: 04/13/2021 +ms.date: 06/07/2022 ms.author: kendownie ms.subservice: files --- @@ -15,25 +15,25 @@ Cloud tiering has two policies that determine which files are tiered to the clou The **volume free space policy** ensures that a specified percentage of the local volume the server endpoint is located on is always kept free. -The **date policy** tiers files last accessed x days ago or later. The volume free space policy will always take precedence; when there isn't enough free space on the volume to store as many days worth of files as described by the date policy, Azure File Sync will override the date policy and continue tiering the coldest files until the volume free space percentage is met. +The **date policy** tiers files last accessed x days ago or later. The volume free space policy will always take precedence. When there isn't enough free space on the volume to store as many days worth of files as described by the date policy, Azure File Sync will override the date policy and continue tiering the coldest files until the volume free space percentage is met. ## How both policies work together -We'll use an example to illustrate how these policies work: Let's say you configured Azure File Sync on a 500 GiB local volume, and cloud tiering was never enabled. These are the files in your file share: +We'll use an example to illustrate how these policies work: Let's say you configured Azure File Sync on a 500-GiB local volume, and cloud tiering was never enabled. These are the files in your file share: |File Name |Last Access Time |File Size |Stored In | |----------|------------------|-----------|----------| |File 1 | 2 days ago | 10 GiB | Server and Azure file share |File 2 | 10 days ago | 30 GiB | Server and Azure file share |File 3 | 1 year ago | 200 GiB | Server and Azure file share -|File 4 | 1 year, 2 days ago | 130 GiB | Server and Azure file share +|File 4 | 1 year, 2 days ago | 120 GiB | Server and Azure file share |File 5 | 2 years, 1 day ago | 140 GiB | Server and Azure file share **Change 1:** You enabled cloud tiering, set a volume free space policy of 20%, and kept the date policy disabled. With that configuration, cloud tiering ensures 20% (in this case 100 GiB) of space is kept free and available on the local machine. As a result, the total capacity of the local cache is 400 GiB. That 400 GiB will store the most recently and frequently accessed files on the local volume. -With this configuration, only files 1 through 4 would be stored in the local cache, and file 5 would be tiered. This is only 370 GiB out of the 400 GiB that could be used. File 5 is 140 GiB and would exceed the 400 GiB limit if it was locally cached. +With this configuration, only files 1 through 4 would be stored in the local cache, and file 5 would be tiered. This only accounts for 360 GiB out of the 400 GiB that could be used. File 5 is 140 GiB and would exceed the 400-GiB limit if it was locally cached. -**Change 2:** Say a user accesses file 5. This makes file 5 the most recently accessed file in the share. As a result, File 5 would be stored in the local cache and to fit under the 400 GiB limit, file 4 would be tiered. The following table shows where the files are stored, with these updates: +**Change 2:** Say a user accesses file 5. This makes file 5 the most recently accessed file in the share. As a result, File 5 would be stored in the local cache and to fit under the 400-GiB limit, file 4 would be tiered. The following table shows where the files are stored, with these updates: |File Name |Last Access Time |File Size |Stored In | |----------|------------------|-----------|----------| @@ -41,9 +41,9 @@ With this configuration, only files 1 through 4 would be stored in the local cac |File 1 | 2 days ago | 10 GiB | Server and Azure file share |File 2 | 10 days ago | 30 GiB | Server and Azure file share |File 3 | 1 year ago | 200 GiB | Server and Azure file share -|File 4 | 1 year, 2 days ago | 130 GiB | Azure file share, tiered locally +|File 4 | 1 year, 2 days ago | 120 GiB | Azure file share, tiered locally -**Change 3:** Let's say you updated the policies so that the date-based tiering policy is 60 days and the volume free space policy is 70%. Now, only up to 150 GiB can be stored in the local cache. Although File 2 has been accessed less than 60 days ago, the volume free space policy will override the date policy, and file 2 is tiered to maintain the 70% local free space. +**Change 3:** Imagine you updated the policies so that the date-based tiering policy is 60 days and the volume free space policy is 70%. Now, only up to 150 GiB can be stored in the local cache. Although File 2 has been accessed less than 60 days ago, the volume free space policy will override the date policy, and file 2 is tiered to maintain the 70% local free space. **Change 4:** If you changed the volume free space policy to 20% and then used `Invoke-StorageSyncFileRecall` to recall all the files that fit on the local drive while adhering to the cloud tiering policies, the table would look like this: @@ -53,7 +53,7 @@ With this configuration, only files 1 through 4 would be stored in the local cac |File 1 | 2 days ago | 10 GiB | Server and Azure file share |File 2 | 10 days ago | 30 GiB | Server and Azure file share |File 3 | 1 year ago | 200 GiB | Azure file share, tiered locally -|File 4 | 1 year, 2 days ago | 130 GiB | Azure file share, tiered locally +|File 4 | 1 year, 2 days ago | 120 GiB | Azure file share, tiered locally In this case, files 1, 2 and 5 would be locally cached and files 3 and 4 would be tiered. Because the date policy is 60 days, files 3 and 4 are tiered, even though the volume free space policy allows for up to 400 GiB locally. @@ -62,7 +62,7 @@ In this case, files 1, 2 and 5 would be locally cached and files 3 and 4 would b ## Multiple server endpoints on a local volume -Cloud tiering can be enabled for multiple server endpoints on a single local volume. For this configuration, you should set the volume free space to the same amount for all the server endpoints on the same volume. If you set different volume free space policies for several server endpoints on the same volume, the largest volume free space percentage will take precedence. This is called the **effective volume free space policy**. For example, if you have three server endpoints on the same local volume, one set to 15%, another set to 20%, and a third set to 30%, they will all begin to tier the coldest files when they have less than 30% free space available. +Cloud tiering can be enabled for multiple server endpoints on a single local volume. For this configuration, you should set the volume free space to the same amount for all the server endpoints on the same volume. If you set different volume free space policies for several server endpoints on the same volume, the largest volume free space percentage will take precedence. This is called the **effective volume free space policy**. For example, if you have three server endpoints on the same local volume, one set to 15%, another set to 20%, and a third set to 30%, they'll all begin to tier the coldest files when they have less than 30% free space available. ## Next steps diff --git a/articles/storage/file-sync/file-sync-deployment-guide.md b/articles/storage/file-sync/file-sync-deployment-guide.md index e3bd9b3b60503..463777cf2ff87 100644 --- a/articles/storage/file-sync/file-sync-deployment-guide.md +++ b/articles/storage/file-sync/file-sync-deployment-guide.md @@ -4,7 +4,7 @@ description: Learn how to deploy Azure File Sync, from start to finish, using th author: khdownie ms.service: storage ms.topic: how-to -ms.date: 04/12/2022 +ms.date: 06/03/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell, devx-track-azurecli @@ -21,19 +21,34 @@ We strongly recommend that you read [Planning for an Azure Files deployment](../ # [Portal](#tab/azure-portal) -1. An Azure file share in the same region that you want to deploy Azure File Sync. For more information, see: +1. An **Azure file share** in the same region that you want to deploy Azure File Sync. For more information, see: - [Region availability](file-sync-planning.md#azure-file-sync-region-availability) for Azure File Sync. - [Create a file share](../files/storage-how-to-create-file-share.md?toc=%2fazure%2fstorage%2ffilesync%2ftoc.json) for a step-by-step description of how to create a file share. -1. At least one supported instance of Windows Server or Windows Server cluster to sync with Azure File Sync. For more information about supported versions of Windows Server and recommended system resources, see [Windows file server considerations](file-sync-planning.md#windows-file-server-considerations). +2. The following **storage account** settings must be enabled to allow Azure File Sync access to the storage account: + - **SMB security settings** must allow **SMB 3.1.1** protocol version, **NTLM v2** authentication and **AES-128-GCM** encryption. To check the SMB security settings on the storage account, see [SMB security settings](../files/files-smb-protocol.md#smb-security-settings). + - **Allow storage account key access** must be **Enabled**. To check this setting, navigate to your storage account and select Configuration under the Settings section. +3. At least one supported instance of **Windows Server** to sync with Azure File Sync. For more information about supported versions of Windows Server and recommended system resources, see [Windows file server considerations](file-sync-planning.md#windows-file-server-considerations). +4. **Optional**: If you intend to use Azure File Sync with a Windows Server Failover Cluster, the **File Server for general use** role must be configured prior to installing the Azure File Sync agent on each node in the cluster. For more information on how to configure the **File Server for general use** role on a Failover Cluster, see [Deploying a two-node clustered file server](/windows-server/failover-clustering/deploy-two-node-clustered-file-server). + + > [!NOTE] + > The only scenario supported by Azure File Sync is Windows Server Failover Cluster with Clustered Disks. See [Failover Clustering](file-sync-planning.md#failover-clustering) for Azure File Sync. # [PowerShell](#tab/azure-powershell) -1. An Azure file share in the same region that you want to deploy Azure File Sync. For more information, see: +1. An **Azure file share** in the same region that you want to deploy Azure File Sync. For more information, see: - [Region availability](file-sync-planning.md#azure-file-sync-region-availability) for Azure File Sync. - [Create a file share](../files/storage-how-to-create-file-share.md?toc=%2fazure%2fstorage%2ffilesync%2ftoc.json) for a step-by-step description of how to create a file share. -1. At least one supported instance of Windows Server or Windows Server cluster to sync with Azure File Sync. For more information about supported versions of Windows Server and recommended system resources, see [Windows file server considerations](file-sync-planning.md#windows-file-server-considerations). +2. The following **storage account** settings must be enabled to allow Azure File Sync access to the storage account: + - **SMB security settings** must allow **SMB 3.1.1** protocol version, **NTLM v2** authentication and **AES-128-GCM** encryption. To check the SMB security settings on the storage account, see [SMB security settings](../files/files-smb-protocol.md#smb-security-settings). + - **Allow storage account key access** must be **Enabled**. To check this setting, navigate to your storage account and select Configuration under the Settings section. +3. At least one supported instance of **Windows Server** to sync with Azure File Sync. For more information about supported versions of Windows Server and recommended system resources, see [Windows file server considerations](file-sync-planning.md#windows-file-server-considerations). + +4. **Optional**: If you intend to use Azure File Sync with a Windows Server Failover Cluster, the **File Server for general use** role must be configured prior to installing the Azure File Sync agent on each node in the cluster. For more information on how to configure the **File Server for general use** role on a Failover Cluster, see [Deploying a two-node clustered file server](/windows-server/failover-clustering/deploy-two-node-clustered-file-server). -1. The Az PowerShell module may be used with either PowerShell 5.1 or PowerShell 6+. You may use the Az PowerShell module for Azure File Sync on any supported system, including non-Windows systems, however the server registration cmdlet must always be run on the Windows Server instance you are registering (this can be done directly or via PowerShell remoting). On Windows Server 2012 R2, you can verify that you are running at least PowerShell 5.1.\* by looking at the value of the **PSVersion** property of the **$PSVersionTable** object: + > [!NOTE] + > The only scenario supported by Azure File Sync is Windows Server Failover Cluster with Clustered Disks. See [Failover Clustering](file-sync-planning.md#failover-clustering) for Azure File Sync. + +5. The Az PowerShell module may be used with either PowerShell 5.1 or PowerShell 6+. You may use the Az PowerShell module for Azure File Sync on any supported system, including non-Windows systems, however the server registration cmdlet must always be run on the Windows Server instance you are registering (this can be done directly or via PowerShell remoting). On Windows Server 2012 R2, you can verify that you are running at least PowerShell 5.1.\* by looking at the value of the **PSVersion** property of the **$PSVersionTable** object: ```powershell $PSVersionTable.PSVersion @@ -46,7 +61,7 @@ We strongly recommend that you read [Planning for an Azure Files deployment](../ > [!IMPORTANT] > If you plan to use the Server Registration UI, rather than registering directly from PowerShell, you must use PowerShell 5.1. -1. If you have opted to use PowerShell 5.1, ensure that at least .NET 4.7.2 is installed. Learn more about [.NET Framework versions and dependencies](/dotnet/framework/migration-guide/versions-and-dependencies) on your system. +6. If you have opted to use PowerShell 5.1, ensure that at least .NET 4.7.2 is installed. Learn more about [.NET Framework versions and dependencies](/dotnet/framework/migration-guide/versions-and-dependencies) on your system. > [!IMPORTANT] > If you are installing .NET 4.7.2+ on Windows Server Core, you must install with the `quiet` and `norestart` flags or the installation will fail. For example, if installing .NET 4.8, the command would look like the following: @@ -54,19 +69,27 @@ We strongly recommend that you read [Planning for an Azure Files deployment](../ > Start-Process -FilePath "ndp48-x86-x64-allos-enu.exe" -ArgumentList "/q /norestart" -Wait > ``` -1. The Az PowerShell module, which can be installed by following the instructions here: [Install and configure Azure PowerShell](/powershell/azure/install-Az-ps). +7. The Az PowerShell module, which can be installed by following the instructions here: [Install and configure Azure PowerShell](/powershell/azure/install-Az-ps). > [!NOTE] > The Az.StorageSync module is now installed automatically when you install the Az PowerShell module. # [Azure CLI](#tab/azure-cli) -1. An Azure file share in the same region that you want to deploy Azure File Sync. For more information, see: +1. An **Azure file share** in the same region that you want to deploy Azure File Sync. For more information, see: - [Region availability](file-sync-planning.md#azure-file-sync-region-availability) for Azure File Sync. - [Create a file share](../files/storage-how-to-create-file-share.md?toc=%2fazure%2fstorage%2ffilesync%2ftoc.json) for a step-by-step description of how to create a file share. -1. At least one supported instance of Windows Server or Windows Server cluster to sync with Azure File Sync. For more information about supported versions of Windows Server and recommended system resources, see [Windows file server considerations](file-sync-planning.md#windows-file-server-considerations). +2. The following **storage account** settings must be enabled to allow Azure File Sync access to the storage account: + - **SMB security settings** must allow **SMB 3.1.1** protocol version, **NTLM v2** authentication and **AES-128-GCM** encryption. To check the SMB security settings on the storage account, see [SMB security settings](../files/files-smb-protocol.md#smb-security-settings). + - **Allow storage account key access** must be **Enabled**. To check this setting, navigate to your storage account and select Configuration under the Settings section. +3. At least one supported instance of **Windows Server** to sync with Azure File Sync. For more information about supported versions of Windows Server and recommended system resources, see [Windows file server considerations](file-sync-planning.md#windows-file-server-considerations). -1. [Install the Azure CLI](/cli/azure/install-azure-cli) +4. **Optional**: If you intend to use Azure File Sync with a Windows Server Failover Cluster, the **File Server for general use** role must be configured prior to installing the Azure File Sync agent on each node in the cluster. For more information on how to configure the **File Server for general use** role on a Failover Cluster, see [Deploying a two-node clustered file server](/windows-server/failover-clustering/deploy-two-node-clustered-file-server). + + > [!NOTE] + > The only scenario supported by Azure File Sync is Windows Server Failover Cluster with Clustered Disks. See [Failover Clustering](file-sync-planning.md#failover-clustering) for Azure File Sync. + +5. [Install the Azure CLI](/cli/azure/install-azure-cli) If you prefer, you can also use Azure Cloud Shell to complete the steps in this tutorial. Azure Cloud Shell is an interactive shell environment that you use through your browser. Start Cloud Shell by using one of these methods: @@ -76,7 +99,7 @@ We strongly recommend that you read [Planning for an Azure Files deployment](../ - Select the **Cloud Shell** button on the menu bar at the upper right corner in the [Azure portal](https://portal.azure.com) -1. Sign in. +6. Sign in. Sign in using the [az login](/cli/azure/reference-index#az-login) command if you're using a local install of the CLI. @@ -86,7 +109,7 @@ We strongly recommend that you read [Planning for an Azure Files deployment](../ Follow the steps displayed in your terminal to complete the authentication process. -1. Install the [az filesync](/cli/azure/storagesync) Azure CLI extension. +7. Install the [az filesync](/cli/azure/storagesync) Azure CLI extension. ```azurecli az extension add --name storagesync @@ -230,7 +253,7 @@ The Azure File Sync agent is a downloadable package that enables Windows Server You can download the agent from the [Microsoft Download Center](https://go.microsoft.com/fwlink/?linkid=858257). When the download is finished, double-click the MSI package to start the Azure File Sync agent installation. > [!IMPORTANT] -> If you intend to use Azure File Sync with a Failover Cluster, the Azure File Sync agent must be installed on every node in the cluster. Each node in the cluster must be registered to work with Azure File Sync. The only scenario supported by Azure File Sync is Windows Server Failover Cluster with Clustered Disks. See [Failover Clustering](file-sync-planning.md#failover-clustering) for Azure File Sync. +> If you are using Azure File Sync with a Failover Cluster, the Azure File Sync agent must be installed on every node in the cluster. Each node in the cluster must be registered to work with Azure File Sync. We recommend that you do the following: - Leave the default installation path (C:\Program Files\Azure\StorageSyncAgent), to simplify troubleshooting and server maintenance. @@ -250,7 +273,11 @@ Execute the following PowerShell code to download the appropriate version of the $osver = [System.Environment]::OSVersion.Version # Download the appropriate version of the Azure File Sync agent for your OS. -if ($osver.Equals([System.Version]::new(10, 0, 17763, 0))) { +if ($osver.Equals([System.Version]::new(10, 0, 20348, 0))) { + Invoke-WebRequest ` + -Uri https://aka.ms/afs/agent/Server2022 ` + -OutFile "StorageSyncAgent.msi" +} elseif ($osver.Equals([System.Version]::new(10, 0, 17763, 0))) { Invoke-WebRequest ` -Uri https://aka.ms/afs/agent/Server2019 ` -OutFile "StorageSyncAgent.msi" @@ -430,11 +457,11 @@ A server endpoint represents a specific location on a registered server, such as [!INCLUDE [storage-files-sync-create-server-endpoint](../../../includes/storage-files-sync-create-server-endpoint.md)] -## Configure firewall and virtual network settings +## Optional: Configure firewall and virtual network settings ### Portal -If you'd like to configure your Azure File sync to work with firewall and virtual network settings, do the following: +If you'd like to configure Azure File Sync to work with firewall and virtual network settings, do the following: 1. From the Azure portal, navigate to the storage account you want to secure. 1. Select **Networking** on the left menu. @@ -445,46 +472,7 @@ If you'd like to configure your Azure File sync to work with firewall and virtua ![Configuring firewall and virtual network settings to work with Azure File sync](media/storage-sync-files-deployment-guide/firewall-and-vnet.png) -## SMB over QUIC on a server endpoint -Although the Azure file share (cloud endpoint) is a full SMB endpoint capable of direct access from the cloud or on-premises, customers that desire accessing the file share data cloud-side often deploy an Azure File Sync server endpoint on a Windows Server instance hosted on an Azure VM. The most common reason to have an additional server endpoint rather than accessing the Azure file share directly is that changes made directly on the Azure file share may take up to 24 hours or longer to be discovered by Azure File Sync, while changes made on a server endpoint are discovered nearly immediately and synced to all other server and cloud-endpoints. - -This configuration is extremely common in environments where a substantial portion of users are not on-premises, such as when users are working from home or from the road. Traditionally, accessing any file share with SMB over the public internet, including both file shares hosted on Windows File Server or on Azure Files directly, is very difficult since most organizations and ISPs block port 445. You can work around this limitation with [private endpoints and VPNs](file-sync-networking-overview.md#private-endpoints), however Windows Server 2022 Azure Edition provides an additional access strategy: SMB over the QUIC transport protocol. - -SMB over QUIC communicates over port 443, which most organizations and ISPs have open to support HTTPS traffic. Using SMB over QUIC greatly simplifies the networking required to access a file share hosted on an Azure File Sync server endpoint for clients using Windows 11 or greater. To learn more about how to setup and configure SMB over QUIC on Windows Server Azure Edition, see [SMB over QUIC for Windows File Server](/windows-server/storage/file-server/smb-over-quic). - -## Onboarding with Azure File Sync - -The recommended steps to onboard on Azure File Sync for the first time with zero downtime while preserving full file fidelity and access control list (ACL) are as follows: - -1. Deploy a Storage Sync Service. -1. Create a sync group. -1. Install Azure File Sync agent on the server with the full data set. -1. Register that server and create a server endpoint on the share. -1. Let sync do the full upload to the Azure file share (cloud endpoint). -1. After the initial upload is complete, install Azure File Sync agent on each of the remaining servers. -1. Create new file shares on each of the remaining servers. -1. Create server endpoints on new file shares with cloud tiering policy, if desired. (This step requires additional storage to be available for the initial setup.) -1. Let Azure File Sync agent do a rapid restore of the full namespace without the actual data transfer. After the full namespace sync, sync engine will fill the local disk space based on the cloud tiering policy for the server endpoint. -1. Ensure sync completes and test your topology as desired. -1. Redirect users and applications to this new share. -1. You can optionally delete any duplicate shares on the servers. - -If you don't have extra storage for initial onboarding and would like to attach to the existing shares, you can pre-seed the data in the Azure files shares. This approach is suggested, if and only if you can accept downtime and absolutely guarantee no data changes on the server shares during the initial onboarding process. - -1. Ensure that data on any of the servers can't change during the onboarding process. -1. Pre-seed Azure file shares with the server data using any data transfer tool over the SMB. Robocopy, for example. You can also use AzCopy over REST. Be sure to use AzCopy with the appropriate switches to preserve ACLs timestamps and attributes. -1. Create Azure File Sync topology with the desired server endpoints pointing to the existing shares. -1. Let sync finish reconciliation process on all endpoints. -1. Once reconciliation is complete, you can open shares for changes. - -Currently, pre-seeding approach has a few limitations - -- Data changes on the server before the sync topology is fully up and running can cause conflicts on the server endpoints. -- After the cloud endpoint is created, Azure File Sync runs a process to detect the files in the cloud before starting the initial sync. The time taken to complete this process varies depending on the various factors like network speed, available bandwidth, and number of files and folders. For the rough estimation in the preview release, detection process runs approximately at 10 files/sec. Hence, even if pre-seeding runs fast, the overall time to get a fully running system may be significantly longer when data is pre-seeded in the cloud. - -## Self-service restore through Previous Versions and VSS (Volume Shadow Copy Service) - -> [!IMPORTANT] -> The following information can only be used with version 9 (or above) of the storage sync agent. Versions lower than 9 will not have the StorageSyncSelfService cmdlets. +## Optional: Self-service restore through Previous Versions and VSS (Volume Shadow Copy Service) Previous Versions is a Windows feature that allows you to utilize server-side VSS snapshots of a volume to present restorable versions of a file to an SMB client. This enables a powerful scenario, commonly referred to as self-service restore, directly for information workers instead of depending on the restore from an IT admin. @@ -498,7 +486,7 @@ Enable-StorageSyncSelfServiceRestore [-DriveLetter] [[-Force]] VSS snapshots are taken of an entire volume. By default, up to 64 snapshots can exist for a given volume, granted there is enough space to store the snapshots. VSS handles this automatically. The default snapshot schedule takes two snapshots per day, Monday through Friday. That schedule is configurable via a Windows Scheduled Task. The above PowerShell cmdlet does two things: -1. It configures Azure File Syncs cloud tiering on the specified volume to be compatible with previous versions and guarantees that a file can be restored from a previous version, even if it was tiered to the cloud on the server. +1. It configures Azure File Sync's cloud tiering on the specified volume to be compatible with previous versions and guarantees that a file can be restored from a previous version, even if it was tiered to the cloud on the server. 1. It enables the default VSS schedule. You can then decide to modify it later. > [!NOTE] @@ -525,11 +513,13 @@ The default maximum number of VSS snapshots per volume (64) as well as the defau If a maximum of 64 VSS snapshots per volume is not the correct setting for you, then [change that value via a registry key](/windows/win32/backup/registry-keys-for-backup-and-restore#maxshadowcopies). For the new limit to take effect, you need to re-run the cmdlet to enable previous version compatibility on every volume it was previously enabled, with the -Force flag to take the new maximum number of VSS snapshots per volume into account. This will result in a newly calculated number of compatible days. Please note that this change will only take effect on newly tiered files and overwrite any customizations on the VSS schedule you might have made. +VSS snapshots by default can consume up to 10% of the volume space. To adjust the amount of storage that can be used for VSS snapshots, use the [vssadmin resize shadowstorage](/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/cc788050(v=ws.11)) command. + -## Proactively recall new and changed files from an Azure file share +## Optional: Proactively recall new and changed files from an Azure file share -With agent version 11, a new mode becomes available on a server endpoint. This mode allows globally distributed companies to have the server cache in a remote region pre-populated even before local users are accessing any files. When enabled on a server endpoint, this mode will cause this server to recall files that have been created or changed in the Azure file share. +Azure File Sync has a mode that allows globally distributed companies to have the server cache in a remote region pre-populated even before local users are accessing any files. When enabled on a server endpoint, this mode will cause this server to recall files that have been created or changed in the Azure file share. ### Scenario @@ -560,6 +550,42 @@ Set-AzStorageSyncServerEndpoint -InputObject -LocalCacheMode --- +## Optional: SMB over QUIC on a server endpoint +Although the Azure file share (cloud endpoint) is a full SMB endpoint capable of direct access from the cloud or on-premises, customers that desire accessing the file share data cloud-side often deploy an Azure File Sync server endpoint on a Windows Server instance hosted on an Azure VM. The most common reason to have an additional server endpoint rather than accessing the Azure file share directly is that changes made directly on the Azure file share may take up to 24 hours or longer to be discovered by Azure File Sync, while changes made on a server endpoint are discovered nearly immediately and synced to all other server and cloud-endpoints. + +This configuration is extremely common in environments where a substantial portion of users are not on-premises, such as when users are working from home or from the road. Traditionally, accessing any file share with SMB over the public internet, including both file shares hosted on Windows File Server or on Azure Files directly, is very difficult since most organizations and ISPs block port 445. You can work around this limitation with [private endpoints and VPNs](file-sync-networking-overview.md#private-endpoints), however Windows Server 2022 Azure Edition provides an additional access strategy: SMB over the QUIC transport protocol. + +SMB over QUIC communicates over port 443, which most organizations and ISPs have open to support HTTPS traffic. Using SMB over QUIC greatly simplifies the networking required to access a file share hosted on an Azure File Sync server endpoint for clients using Windows 11 or greater. To learn more about how to setup and configure SMB over QUIC on Windows Server Azure Edition, see [SMB over QUIC for Windows File Server](/windows-server/storage/file-server/smb-over-quic). + +## Onboarding with Azure File Sync + +The recommended steps to onboard on Azure File Sync for the first time with zero downtime while preserving full file fidelity and access control list (ACL) are as follows: + +1. Deploy a Storage Sync Service. +1. Create a sync group. +1. Install Azure File Sync agent on the server with the full data set. +1. Register that server and create a server endpoint on the share. +1. Let sync do the full upload to the Azure file share (cloud endpoint). +1. After the initial upload is complete, install Azure File Sync agent on each of the remaining servers. +1. Create new file shares on each of the remaining servers. +1. Create server endpoints on new file shares with cloud tiering policy, if desired. (This step requires additional storage to be available for the initial setup.) +1. Let Azure File Sync agent do a rapid restore of the full namespace without the actual data transfer. After the full namespace sync, sync engine will fill the local disk space based on the cloud tiering policy for the server endpoint. +1. Ensure sync completes and test your topology as desired. +1. Redirect users and applications to this new share. +1. You can optionally delete any duplicate shares on the servers. + +If you don't have extra storage for initial onboarding and would like to attach to the existing shares, you can pre-seed the data in the Azure files shares. This approach is suggested, if and only if you can accept downtime and absolutely guarantee no data changes on the server shares during the initial onboarding process. + +1. Ensure that data on any of the servers can't change during the onboarding process. +1. Pre-seed Azure file shares with the server data using any data transfer tool over the SMB. Robocopy, for example. You can also use AzCopy over REST. Be sure to use AzCopy with the appropriate switches to preserve ACLs timestamps and attributes. +1. Create Azure File Sync topology with the desired server endpoints pointing to the existing shares. +1. Let sync finish reconciliation process on all endpoints. +1. Once reconciliation is complete, you can open shares for changes. + +Currently, pre-seeding approach has a few limitations - +- Data changes on the server before the sync topology is fully up and running can cause conflicts on the server endpoints. +- After the cloud endpoint is created, Azure File Sync runs a process to detect the files in the cloud before starting the initial sync. The time taken to complete this process varies depending on the various factors like network speed, available bandwidth, and number of files and folders. For the rough estimation in the preview release, detection process runs approximately at 10 files/sec. Hence, even if pre-seeding runs fast, the overall time to get a fully running system may be significantly longer when data is pre-seeded in the cloud. + ## Migrate a DFS Replication (DFS-R) deployment to Azure File Sync To migrate a DFS-R deployment to Azure File Sync: diff --git a/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md b/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md index 726007b71232c..c8397fff91aea 100644 --- a/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md +++ b/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md @@ -4,7 +4,7 @@ description: Learn about best practices for disaster recovery with Azure File Sy author: khdownie ms.service: storage ms.topic: how-to -ms.date: 08/18/2021 +ms.date: 05/24/2022 ms.author: kendownie ms.subservice: files --- @@ -54,7 +54,7 @@ If you enable cloud tiering, don't implement an on-premises backup solution. Wit If you decide to use an on-premises backup solution, backups should be performed on a server in the sync group with cloud tiering disabled. When performing a restore, use the volume-level or file-level restore options. Files restored using the file-level restore option will sync to all endpoints in the sync group and existing files will be replaced with the version restored from backup. Volume-level restores won't replace newer file versions in the cloud endpoint or other server endpoints. -In Azure File Sync agent version 9 and above, [Volume Shadow Copy Service (VSS) snapshots](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service) (including the **Previous Versions** tab) are supported on volumes with cloud tiering enabled. This allows you to perform self-service restores instead of relying on an admin to perform restores for you. However, you must enable previous version compatibility through PowerShell, which will increase your snapshot storage costs. VSS snapshots don't protect against disasters on the server endpoint itself, so they should only be used alongside cloud-side backups. For details, see [Self Service restore through Previous Versions and VSS](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service). +[Volume Shadow Copy Service (VSS) snapshots](file-sync-deployment-guide.md#optional-self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service) (including the **Previous Versions** tab) are supported on volumes with cloud tiering enabled. This allows you to perform self-service restores instead of relying on an admin to perform restores for you. However, you must enable previous version compatibility through PowerShell, which will increase your snapshot storage costs. VSS snapshots don't protect against disasters on the server endpoint itself, so they should only be used alongside cloud-side backups. For details, see [Self Service restore through Previous Versions and VSS](file-sync-deployment-guide.md#optional-self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service). ## Data redundancy diff --git a/articles/storage/file-sync/file-sync-how-to-manage-tiered-files.md b/articles/storage/file-sync/file-sync-how-to-manage-tiered-files.md index bbfb575078045..547c3f5203e13 100644 --- a/articles/storage/file-sync/file-sync-how-to-manage-tiered-files.md +++ b/articles/storage/file-sync/file-sync-how-to-manage-tiered-files.md @@ -4,7 +4,7 @@ description: Tips and PowerShell commandlets to help you manage tiered files author: khdownie ms.service: storage ms.topic: how-to -ms.date: 04/13/2021 +ms.date: 06/06/2022 ms.author: kendownie ms.subservice: files --- @@ -57,17 +57,52 @@ There are several ways to check whether a file has been tiered to your Azure fil > [!WARNING] > The `fsutil reparsepoint` utility command also has the ability to delete a reparse point. Do not execute this command unless the Azure File Sync engineering team asks you to. Running this command might result in data loss. +## How to exclude files or folders from being tiered + +If you want to exclude files or folders from being tiered and remain local on the Windows Server, you can configure the **GhostingExclusionList** registry setting under HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync. You can exclude files by file name, file extension or path. + +To exclude files or folders from cloud tiering, perform the following steps: +1. Open an elevated command prompt. +2. Run one of the following commands to configure exclusions: + + To exclude certain file extensions from tiering (for example, .one, .lnk, .log), run the following command: + **reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v GhostingExclusionList /t REG_SZ /d .one|.lnk|.log /f** + + To exclude a specific file name from tiering (for example, FileName.vhd), run the following command: + **reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v GhostingExclusionList /t REG_SZ /d FileName.vhd /f** + + To exclude all files under a folder from tiering (for example, D:\ShareRoot\Folder\SubFolder), run the following command: + **reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v GhostingExclusionList /t REG_SZ /d D:\\\\ShareRoot\\\\Folder\\\\SubFolder /f** + + To exclude a combination of file names, file extensions and folders from tiering (for example, D:\ShareRoot\Folder1\SubFolder1,FileName.log,.txt), run the following command: + **reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v GhostingExclusionList /t REG_SZ /d D:\\\\ShareRoot\\\\Folder1\\\\SubFolder1|FileName.log|.txt /f** + +3. For the cloud tiering exclusions to take effect, you must restart the Storage Sync Agent service (FileSyncSvc) by running the following commands: + **net stop filesyncsvc** + **net start filesyncsvc** + +### More information +- If the Azure File Sync agent is installed on a Failover Cluster, the **GhostingExclusionList** registry setting must be created under HKEY_LOCAL_MACHINE\Cluster\StorageSync\SOFTWARE\Microsoft\Azure\StorageSync. + - Example: **reg ADD "HKEY_LOCAL_MACHINE\Cluster\StorageSync\SOFTWARE\Microsoft\Azure\StorageSync" /v GhostingExclusionList /t REG_SZ /d .one|.lnk|.log /f** +- Each exclusion in the registry should be separated by a pipe (|) character. +- Use double backslash (\\\\) when specifying a path to exclude. + - Example: **reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v GhostingExclusionList /t REG_SZ /d D:\\\\ShareRoot\\\\Folder\\\\SubFolder /f** +- File name or file type exclusions apply to all server endpoints on the server. +- You cannot exclude file types from a particular folder only. +- Exclusions do not apply to files already tiered. Use the [Invoke-StorageSyncFileRecall](#how-to-recall-a-tiered-file-to-disk) cmdlet to recall files already tiered. +- Use Event ID 9001 in the Telemetry event log on the server to check the cloud tiering exclusions that are configured. The Telemetry event log is located in Event Viewer under Applications and Services\Microsoft\FileSync\Agent. + ## How to exclude applications from cloud tiering last access time tracking When an application accesses a file, the last access time for the file is updated in the cloud tiering database. Applications that scan the file system like anti-virus cause all files to have the same last access time, which impacts when files are tiered. -To exclude applications from last access time tracking, add the process name to the appropriate registry setting that is located under HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync. +To exclude applications from last access time tracking, add the process exclusions to the **HeatTrackingProcessNamesExclusionList** registry setting under HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync. + +Example: **reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v HeatTrackingProcessNamesExclusionList /t REG_SZ /d "SampleApp.exe|AnotherApp.exe" /f** -For v11 and v12 release, add the process exclusions to the HeatTrackingProcessNameExclusionList registry setting. -Example: reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v HeatTrackingProcessNameExclusionList /t REG_MULTI_SZ /d "SampleApp.exe\0AnotherApp.exe" /f +If the Azure File Sync agent is installed on a Failover Cluster, the **HeatTrackingProcessNamesExclusionList** registry setting must be created under HKEY_LOCAL_MACHINE\Cluster\StorageSync\SOFTWARE\Microsoft\Azure\StorageSync. -For v13 release and newer, add the process exclusions to the HeatTrackingProcessNamesExclusionList registry setting. -Example: reg ADD "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Azure\StorageSync" /v HeatTrackingProcessNamesExclusionList /t REG_SZ /d "SampleApp.exe|AnotherApp.exe" /f +Example: **reg ADD "HKEY_LOCAL_MACHINE\Cluster\StorageSync\SOFTWARE\Microsoft\Azure\StorageSync" /v HeatTrackingProcessNamesExclusionList /t REG_SZ /d "SampleApp.exe|AnotherApp.exe" /f** > [!NOTE] > Data Deduplication and File Server Resource Manager (FSRM) processes are excluded by default. Changes to the process exclusion list are honored by the system every 5 minutes. @@ -130,7 +165,7 @@ Optional parameters: - `-Order CloudTieringPolicy` will recall the most recently modified or accessed files first and is allowed by the current tiering policy. * If volume free space policy is configured, files will be recalled until the volume free space policy setting is reached. For example if the volume free policy setting is 20%, recall will stop once the volume free space reaches 20%. * If volume free space and date policy is configured, files will be recalled until the volume free space or date policy setting is reached. For example, if the volume free policy setting is 20% and the date policy is 7 days, recall will stop once the volume free space reaches 20% or all files accessed or modified within 7 days are local. -- `-ThreadCount` determines how many files can be recalled in parallel. +- `-ThreadCount` determines how many files can be recalled in parallel (thread count limit is 32). - `-PerFileRetryCount`determines how often a recall will be attempted of a file that is currently blocked. - `-PerFileRetryDelaySeconds`determines the time in seconds between retry to recall attempts and should always be used in combination with the previous parameter. diff --git a/articles/storage/file-sync/file-sync-monitoring.md b/articles/storage/file-sync/file-sync-monitoring.md index 68a6072fc2916..314cc9dd7a95f 100644 --- a/articles/storage/file-sync/file-sync-monitoring.md +++ b/articles/storage/file-sync/file-sync-monitoring.md @@ -67,6 +67,9 @@ Alerts proactively notify you when important conditions are found in your monito 5. Fill in the **Alert details** like **Alert rule name**, **Description** and **Severity**. 6. Click **Create alert rule** to create the alert. + > [!Note] + > If you configure an alert using the Server Name dimension and the server is renamed, the alert will need to be updated to monitor the new server name. + The following table lists some example scenarios to monitor and the proper metric to use for the alert: | Scenario | Metric to use for alert | diff --git a/articles/storage/file-sync/file-sync-networking-endpoints.md b/articles/storage/file-sync/file-sync-networking-endpoints.md index de5dc273011ab..a049674dad52e 100644 --- a/articles/storage/file-sync/file-sync-networking-endpoints.md +++ b/articles/storage/file-sync/file-sync-networking-endpoints.md @@ -4,7 +4,7 @@ description: Learn how to configure Azure File Sync network endpoints. author: khdownie ms.service: storage ms.topic: how-to -ms.date: 04/13/2021 +ms.date: 05/24/2021 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell, devx-track-azurecli @@ -121,9 +121,6 @@ Address: 192.168.0.5 --- ### Create the Storage Sync Service private endpoint -> [!Important] -> In order to use private endpoints on the Storage Sync Service resource, you must use Azure File Sync agent version 10.1 or greater. Agent versions prior to 10.1 do not support private endpoints on the Storage Sync Service. All prior agent versions support private endpoints on the storage account resource. - # [Portal](#tab/azure-portal) Navigate to the **Private Link Center** by typing *Private Link* into the search bar at the top of the Azure portal. In the table of contents for the Private Link Center, select **Private endpoints**, and then **+ Add** to create a new private endpoint. diff --git a/articles/storage/file-sync/file-sync-planning.md b/articles/storage/file-sync/file-sync-planning.md index 5d89b3c8780fb..903d01521fb47 100644 --- a/articles/storage/file-sync/file-sync-planning.md +++ b/articles/storage/file-sync/file-sync-planning.md @@ -4,7 +4,7 @@ description: Plan for a deployment with Azure File Sync, a service that allows y author: khdownie ms.service: storage ms.topic: conceptual -ms.date: 04/05/2022 +ms.date: 06/01/2022 ms.author: kendownie ms.subservice: files ms.custom: references_regions, devx-track-azurepowershell @@ -203,7 +203,7 @@ We'll use an example to illustrate how to estimate the amount of free space woul In this case, Azure File Sync would need about 209,500,000 KiB (209.5 GiB) of space for this namespace. Add this amount to any additional free space that is desired in order to figure out how much free space is required for this disk. ### Failover Clustering -1. Windows Server Failover Clustering is supported by Azure File Sync for the "File Server for general use" deployment option. +1. Windows Server Failover Clustering is supported by Azure File Sync for the "File Server for general use" deployment option. For more information on how to configure the "File Server for general use" role on a Failover Cluster, see [Deploying a two-node clustered file server](/windows-server/failover-clustering/deploy-two-node-clustered-file-server). 2. The only scenario supported by Azure File Sync is Windows Server Failover Cluster with Clustered Disks 3. Failover Clustering is not supported on "Scale-Out File Server for application data" (SOFS) or on Clustered Shared Volumes (CSVs) or local disks. @@ -211,15 +211,15 @@ In this case, Azure File Sync would need about 209,500,000 KiB (209.5 GiB) of sp > The Azure File Sync agent must be installed on every node in a Failover Cluster for sync to work correctly. ### Data Deduplication -**Windows Server 2016 and Windows Server 2019** -Data Deduplication is supported irrespective of whether cloud tiering is enabled or disabled on one or more server endpoints on the volume for Windows Server 2016 and Windows Server 2019. Enabling Data Deduplication on a volume with cloud tiering enabled lets you cache more files on-premises without provisioning more storage. +**Windows Server 2022, Windows Server 2019, and Windows Server 2016** +Data Deduplication is supported irrespective of whether cloud tiering is enabled or disabled on one or more server endpoints on the volume for Windows Server 2016, Windows Server 2019, and Windows Server 2022. Enabling Data Deduplication on a volume with cloud tiering enabled lets you cache more files on-premises without provisioning more storage. When Data Deduplication is enabled on a volume with cloud tiering enabled, Dedup optimized files within the server endpoint location will be tiered similar to a normal file based on the cloud tiering policy settings. Once the Dedup optimized files have been tiered, the Data Deduplication garbage collection job will run automatically to reclaim disk space by removing unnecessary chunks that are no longer referenced by other files on the volume. Note the volume savings only apply to the server; your data in the Azure file share will not be deduped. > [!Note] -> To support Data Deduplication on volumes with cloud tiering enabled on Windows Server 2019, Windows update [KB4520062 - October 2019](https://support.microsoft.com/help/4520062) or a later monthly rollup update must be installed and Azure File Sync agent version 12.0.0.0 or newer is required. +> To support Data Deduplication on volumes with cloud tiering enabled on Windows Server 2019, Windows update [KB4520062 - October 2019](https://support.microsoft.com/help/4520062) or a later monthly rollup update must be installed. **Windows Server 2012 R2** Azure File Sync does not support Data Deduplication and cloud tiering on the same volume on Windows Server 2012 R2. If Data Deduplication is enabled on a volume, cloud tiering must be disabled. @@ -232,9 +232,9 @@ Azure File Sync does not support Data Deduplication and cloud tiering on the sam - For ongoing Deduplication optimization jobs, cloud tiering with date policy will get delayed by the Data Deduplication [MinimumFileAgeDays](/powershell/module/deduplication/set-dedupvolume) setting, if the file is not already tiered. - Example: If the MinimumFileAgeDays setting is seven days and cloud tiering date policy is 30 days, the date policy will tier files after 37 days. - Note: Once a file is tiered by Azure File Sync, the Deduplication optimization job will skip the file. -- If a server running Windows Server 2012 R2 with the Azure File Sync agent installed is upgraded to Windows Server 2016 or Windows Server 2019, the following steps must be performed to support Data Deduplication and cloud tiering on the same volume: +- If a server running Windows Server 2012 R2 with the Azure File Sync agent installed is upgraded to Windows Server 2016, Windows Server 2019 or Windows Server 2022, the following steps must be performed to support Data Deduplication and cloud tiering on the same volume: - Uninstall the Azure File Sync agent for Windows Server 2012 R2 and restart the server. - - Download the Azure File Sync agent for the new server operating system version (Windows Server 2016 or Windows Server 2019). + - Download the Azure File Sync agent for the new server operating system version (Windows Server 2016, Windows Server 2019, or Windows Server 2022). - Install the Azure File Sync agent and restart the server. Note: The Azure File Sync configuration settings on the server are retained when the agent is uninstalled and reinstalled. @@ -365,7 +365,7 @@ If you have an existing Windows file server 2012R2 or newer, Azure File Sync can Check out the [Azure File Sync and Azure file share migration overview](../files/storage-files-migration-overview.md?toc=%2fazure%2fstorage%2ffilesync%2ftoc.json) article where you can find detailed guidance for your scenario. ## Antivirus -Because antivirus works by scanning files for known malicious code, an antivirus product might cause the recall of tiered files, resulting in high egress charges. In versions 4.0 and above of the Azure File Sync agent, tiered files have the secure Windows attribute FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS set. We recommend consulting with your software vendor to learn how to configure their solution to skip reading files with this attribute set (many do it automatically). +Because antivirus works by scanning files for known malicious code, an antivirus product might cause the recall of tiered files, resulting in high egress charges. Tiered files have the secure Windows attribute FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS set and we recommend consulting with your software vendor to learn how to configure their solution to skip reading files with this attribute set (many do it automatically). Microsoft's in-house antivirus solutions, Windows Defender and System Center Endpoint Protection (SCEP), both automatically skip reading files that have this attribute set. We have tested them and identified one minor issue: when you add a server to an existing sync group, files smaller than 800 bytes are recalled (downloaded) on the new server. These files will remain on the new server and will not be tiered since they do not meet the tiering size requirement (>64kb). @@ -377,14 +377,11 @@ If cloud tiering is enabled, solutions that directly back up the server endpoint If you prefer to use an on-premises backup solution, backups should be performed on a server in the sync group that has cloud tiering disabled. When performing a restore, use the volume-level or file-level restore options. Files restored using the file-level restore option will be synced to all endpoints in the sync group and existing files will be replaced with the version restored from backup. Volume-level restores will not replace newer file versions in the Azure file share or other server endpoints. -> [!WARNING] -> If you need to use Robocopy /B with an Azure File Sync agent running on either source or target server, please upgrade to Azure File Sync agent version v12.0 or above. Using Robocopy /B with agent versions less than v12.0 will lead to the corruption of tiered files during the copy. - > [!Note] > Bare-metal (BMR) restore can cause unexpected results and is not currently supported. > [!Note] -> With Version 9 of the Azure File Sync agent, VSS snapshots (including Previous Versions tab) are now supported on volumes which have cloud tiering enabled. However, you must enable previous version compatibility through PowerShell. [Learn how](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service). +> VSS snapshots (including Previous Versions tab) are supported on volumes which have cloud tiering enabled. However, you must enable previous version compatibility through PowerShell. [Learn how](file-sync-deployment-guide.md#optional-self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service). ## Data Classification If you have data classification software installed, enabling cloud tiering may result in increased cost for two reasons: diff --git a/articles/storage/file-sync/file-sync-release-notes.md b/articles/storage/file-sync/file-sync-release-notes.md index 1b0377e0880f9..ee1f97e906ac0 100644 --- a/articles/storage/file-sync/file-sync-release-notes.md +++ b/articles/storage/file-sync/file-sync-release-notes.md @@ -5,7 +5,7 @@ services: storage author: wmgries ms.service: storage ms.topic: conceptual -ms.date: 3/30/2022 +ms.date: 6/06/2022 ms.author: wgries ms.subservice: files --- @@ -24,14 +24,13 @@ The following Azure File Sync agent versions are supported: | V14.1 Release - [KB5001873](https://support.microsoft.com/topic/d06b8723-c4cf-4c64-b7ec-3f6635e044c5)| 14.1.0.0 | December 1, 2021 | Supported | | V14 Release - [KB5001872](https://support.microsoft.com/topic/92290aa1-75de-400f-9442-499c44c92a81)| 14.0.0.0 | October 29, 2021 | Supported | | V13 Release - [KB4588753](https://support.microsoft.com/topic/632fb833-42ed-4e4d-8abd-746bd01c1064)| 13.0.0.0 | July 12, 2021 | Supported - Agent version expires on August 8, 2022 | -| V12.1 Release - [KB4588751](https://support.microsoft.com/topic/497dc33c-d38b-42ca-8015-01c906b96132)| 12.1.0.0 | May 20, 2021 | Supported - Agent version expires on May 23, 2022 | -| V12 Release - [KB4568585](https://support.microsoft.com/topic/b9605f04-b4af-4ad8-86b0-2c490c535cfd)| 12.0.0.0 | March 26, 2021 | Supported - Agent version expires on May 23, 2022 | ## Unsupported versions The following Azure File Sync agent versions have expired and are no longer supported: | Milestone | Agent version number | Release date | Status | |----|----------------------|--------------|------------------| +| V12 Release | 12.0.0.0 - 12.1.0.0 | N/A | Not Supported - Agent versions expired on May 23, 2022 | | V11 Release | 11.1.0.0 - 11.3.0.0 | N/A | Not Supported - Agent versions expired on March 28, 2022 | | V10 Release | 10.0.0.0 - 10.1.0.0 | N/A | Not Supported - Agent versions expired on June 28, 2021 | | V9 Release | 9.0.0.0 - 9.1.0.0 | N/A | Not Supported - Agent versions expired on February 16, 2021 | @@ -83,18 +82,7 @@ The following release notes are for version 15.0.0.0 of the Azure File Sync agen ```powershell Import-Module "C:\Program Files\Azure\StorageSyncAgent\StorageSync.Management.ServerCmdlets.dll" Debug-StorageSyncServer -AFSDiag -OutputDirectory C:\output -KernelModeTraceLevel Verbose -UserModeTraceLevel Verbose - ``` -- Immediately run server change enumeration to detect files changes that were missed by USN journal - - Azure File Sync uses the Windows USN journal feature on Windows Server to immediately detect files that were changed and upload them to the Azure file share. If files changed are missed due to journal wrap or other issues, the files will not sync to the Azure file share until the changes are detected. Azure File Sync has a server change enumeration job that runs every 24 hours on the server endpoint path to detect changes that were missed by the USN journal. If you don't want to wait until the next server change enumeration job runs, you can now use the Invoke-StorageSyncServerChangeDetection PowerShell cmdlet to immediately run server change enumeration on a server endpoint path. - - To immediately run server change enumeration on a server endpoint path, run the following PowerShell commands: - ```powershell - Import-Module "C:\Program Files\Azure\StorageSyncAgent\StorageSync.Management.ServerCmdlets.dll" - Invoke-StorageSyncServerChangeDetection -ServerEndpointPath - ``` - > [!Note] - >By default, the server change enumeration scan will only check the modified timestamp. To perform a deeper check, use the -DeepScan parameter. - + ``` - Miscellaneous improvements - Reliability and telemetry improvements for cloud tiering and sync. @@ -313,95 +301,3 @@ The following items don't sync, but the rest of the system continues to operate ### Cloud tiering - If a tiered file is copied to another location by using Robocopy, the resulting file isn't tiered. The offline attribute might be set because Robocopy incorrectly includes that attribute in copy operations. - When copying files using robocopy, use the /MIR option to preserve file timestamps. This will ensure older files are tiered sooner than recently accessed files. - -## Agent version 12.1.0.0 -The following release notes are for version 12.1.0.0 of the Azure File Sync agent released May 20, 2021. These notes are in addition to the release notes listed for version 12.0.0.0. - -### Improvements and issues that are fixed -The v12.0 agent release had two bugs which are fixed in this release: -- Agent auto-update fails to update the agent to a later version. -- FileSyncErrorsReport.ps1 script does not provide the list of per-item errors. - -## Agent version 12.0.0.0 -The following release notes are for version 12.0.0.0 of the Azure File Sync agent (released March 26, 2021). - -### Improvements and issues that are fixed -- New portal experience to configure network access policy and private endpoint connections - - You can now use the portal to disable access to the Storage Sync Service public endpoint and to approve, reject and remove private endpoint connections. To configure the network access policy and private endpoint connections, open the Storage Sync Service portal, go to the Settings section and click Network. - -- Cloud Tiering support for volume cluster sizes larger than 64KiB - - Cloud Tiering now supports volume cluster sizes up to 2MiB on Server 2019. To learn more, see [What is the minimum file size for a file to tier?](./file-sync-choose-cloud-tiering-policies.md#minimum-file-size-for-a-file-to-tier). - -- Measure bandwidth and latency to Azure File Sync service and storage account - - The Test-StorageSyncNetworkConnectivity cmdlet can now be used to measure latency and bandwidth to the Azure File Sync service and storage account. Latency to the Azure File Sync service and storage account is measured by default when running the cmdlet. Upload and download bandwidth to the storage account is measured when using the "-MeasureBandwidth" parameter. - - For example, to measure bandwidth and latency to the Azure File Sync service and storage account, run the following PowerShell commands: - - ```powershell - Import-Module "C:\Program Files\Azure\StorageSyncAgent\StorageSync.Management.ServerCmdlets.dll" - Test-StorageSyncNetworkConnectivity -MeasureBandwidth - ``` - -- Improved error messages in the portal when server endpoint creation fails - - We heard your feedback and have improved the error messages and guidance when server endpoint creation fails. - -- Miscellaneous performance and reliability improvements - - Improved change detection performance to detect files that have changed in the Azure file share. - - Performance improvements for reconciliation sync sessions. - - Sync improvements to reduce ECS_E_SYNC_METADATA_KNOWLEDGE_SOFT_LIMIT_REACHED and ECS_E_SYNC_METADATA_KNOWLEDGE_LIMIT_REACHED errors. - - Fixed a bug that causes data corruption if cloud tiering is enabled and tiered files are copied using Robocopy with the /B parameter. - - Fixed a bug that can cause files to fail to tier on Server 2019 if Data Deduplication is enabled on the volume. - - Fixed a bug that can cause AFSDiag to fail to compress files if a file is larger than 2GiB. - -### Evaluation Tool -Before deploying Azure File Sync, you should evaluate whether it is compatible with your system using the Azure File Sync evaluation tool. This tool is an Azure PowerShell cmdlet that checks for potential issues with your file system and dataset, such as unsupported characters or an unsupported OS version. For installation and usage instructions, see [Evaluation Tool](file-sync-planning.md#evaluation-cmdlet) section in the planning guide. - -### Agent installation and server configuration -For more information on how to install and configure the Azure File Sync agent with Windows Server, see [Planning for an Azure File Sync deployment](file-sync-planning.md) and [How to deploy Azure File Sync](file-sync-deployment-guide.md). - -- A restart is required for servers that have an existing Azure File Sync agent installation. -- The agent installation package must be installed with elevated (admin) permissions. -- The agent is not supported on Nano Server deployment option. -- The agent is supported only on Windows Server 2019, Windows Server 2016, and Windows Server 2012 R2. -- The agent requires at least 2 GiB of memory. If the server is running in a virtual machine with dynamic memory enabled, the VM should be configured with a minimum 2048 MiB of memory. See [Recommended system resources](file-sync-planning.md#recommended-system-resources) for more information. -- The Storage Sync Agent (FileSyncSvc) service does not support server endpoints located on a volume that has the system volume information (SVI) directory compressed. This configuration will lead to unexpected results. - -### Interoperability -- Antivirus, backup, and other applications that access tiered files can cause undesirable recall unless they respect the offline attribute and skip reading the content of those files. For more information, see [Troubleshoot Azure File Sync](file-sync-troubleshoot.md). -- File Server Resource Manager (FSRM) file screens can cause endless sync failures when files are blocked because of the file screen. -- Running sysprep on a server that has the Azure File Sync agent installed is not supported and can lead to unexpected results. The Azure File Sync agent should be installed after deploying the server image and completing sysprep mini-setup. - -### Sync limitations -The following items don't sync, but the rest of the system continues to operate normally: -- Files with unsupported characters. See [Troubleshooting guide](file-sync-troubleshoot.md#handling-unsupported-characters) for list of unsupported characters. -- Files or directories that end with a period. -- Paths that are longer than 2,048 characters. -- The system access control list (SACL) portion of a security descriptor that's used for auditing. -- Extended attributes. -- Alternate data streams. -- Reparse points. -- Hard links. -- Compression (if it's set on a server file) isn't preserved when changes sync to that file from other endpoints. -- Any file that's encrypted with EFS (or other user mode encryption) that prevents the service from reading the data. - - > [!Note] - > Azure File Sync always encrypts data in transit. Data is always encrypted at rest in Azure. - -### Server endpoint -- A server endpoint can be created only on an NTFS volume. ReFS, FAT, FAT32, and other file systems aren't currently supported by Azure File Sync. -- Cloud tiering is not supported on the system volume. To create a server endpoint on the system volume, disable cloud tiering when creating the server endpoint. -- Failover Clustering is supported only with clustered disks, but not with Cluster Shared Volumes (CSVs). -- A server endpoint can't be nested. It can coexist on the same volume in parallel with another endpoint. -- Do not store an OS or application paging file within a server endpoint location. -- The server name in the portal is not updated if the server is renamed. - -### Cloud endpoint -- Azure File Sync supports making changes to the Azure file share directly. However, any changes made on the Azure file share first need to be discovered by an Azure File Sync change detection job. A change detection job is initiated for a cloud endpoint once every 24 hours. To immediately sync files that are changed in the Azure file share, the [Invoke-AzStorageSyncChangeDetection](/powershell/module/az.storagesync/invoke-azstoragesyncchangedetection) PowerShell cmdlet can be used to manually initiate the detection of changes in the Azure file share. In addition, changes made to an Azure file share over the REST protocol will not update the SMB last modified time and will not be seen as a change by sync. -- The storage sync service and/or storage account can be moved to a different resource group, subscription, or Azure AD tenant. After the storage sync service or storage account is moved, you need to give the Microsoft.StorageSync application access to the storage account (see [Ensure Azure File Sync has access to the storage account](file-sync-troubleshoot.md?tabs=portal1%252cportal#troubleshoot-rbac)). - - > [!Note] - > When creating the cloud endpoint, the storage sync service and storage account must be in the same Azure AD tenant. Once the cloud endpoint is created, the storage sync service and storage account can be moved to different Azure AD tenants. - -### Cloud tiering -- If a tiered file is copied to another location by using Robocopy, the resulting file isn't tiered. The offline attribute might be set because Robocopy incorrectly includes that attribute in copy operations. -- When copying files using robocopy, use the /MIR option to preserve file timestamps. This will ensure older files are tiered sooner than recently accessed files. diff --git a/articles/storage/file-sync/file-sync-server-endpoint-create.md b/articles/storage/file-sync/file-sync-server-endpoint-create.md index 9b6c78dce34bc..b7ef372618085 100644 --- a/articles/storage/file-sync/file-sync-server-endpoint-create.md +++ b/articles/storage/file-sync/file-sync-server-endpoint-create.md @@ -91,7 +91,14 @@ As part of this section, a choice can be made for how content from the Azure fil :::column-end::: :::row-end::: -Once you selected an initial download option, you cannot change it after you confirm to create the server endpoint. How files appear on the server after initial download finishes, depends on your use of the cloud tiering feature and whether or not you opted to [proactively recall changes in the cloud](file-sync-cloud-tiering-overview.md#proactive-recalling). The latter is a feature useful for sync groups with multiple server endpoints in different geographic locations. +Once you select an initial download option, you cannot change it after you confirm to create the server endpoint. + +> [!NOTE] +> To improve the file download performance when adding a server endpoint to a sync group, use the [Invoke-StorageSyncFileRecall](file-sync-how-to-manage-tiered-files.md#how-to-recall-a-tiered-file-to-disk) cmdlet. + +### File download behavior once initial download completes + +How files appear on the server after initial download finishes, depends on your use of the cloud tiering feature and whether or not you opted to [proactively recall changes in the cloud](file-sync-cloud-tiering-overview.md#proactive-recalling). The latter is a feature useful for sync groups with multiple server endpoints in different geographic locations. * **Cloud tiering is enabled**
                    New and changed files from other server endpoints will appear as tiered files on this server endpoint. These changes will only come down as full files if you opted for [proactive recall](file-sync-cloud-tiering-overview.md#proactive-recalling) of changes in the Azure file share by other server endpoints. * **Cloud tiering is disabled**
                    New and changed files from other server endpoints will appear as full files on this server endpoint. They will not appear as tiered files first and then recalled. Tiered files with cloud tiering off are a fast disaster recovery feature and appear only during initial provisioning. diff --git a/articles/storage/file-sync/file-sync-troubleshoot.md b/articles/storage/file-sync/file-sync-troubleshoot.md index 58df31ac64a2a..cb8a382ef82ca 100644 --- a/articles/storage/file-sync/file-sync-troubleshoot.md +++ b/articles/storage/file-sync/file-sync-troubleshoot.md @@ -4,7 +4,7 @@ description: Troubleshoot common issues in a deployment on Azure File Sync, whic author: khdownie ms.service: storage ms.topic: troubleshooting -ms.date: 11/2/2021 +ms.date: 6/2/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell @@ -116,17 +116,8 @@ If a server is not listed under **Registered servers** for a Storage Sync Servic ### Cloud endpoint creation errors -**Cloud endpoint creation fails, with this error: "The specified Azure FileShare is already in use by a different CloudEndpoint"** -This error occurs if the Azure file share is already in use by another cloud endpoint. - -If you see this message and the Azure file share currently is not in use by a cloud endpoint, complete the following steps to clear the Azure File Sync metadata on the Azure file share: - -> [!Warning] -> Deleting the metadata on an Azure file share that is currently in use by a cloud endpoint causes Azure File Sync operations to fail. If you then use this file share for sync in a different sync group, data loss for files in the old sync group is almost certain. - -1. In the Azure portal, go to your Azure file share.   -2. Right-click the Azure file share, and then select **Edit metadata**. -3. Right-click **SyncService**, and then select **Delete**. +**Cloud endpoint creation fails, with this error: "MgmtInternalError"** +This error can occur if the Azure File Sync service cannot access the storage account due to SMB security settings. To enable Azure File Sync to access the storage account, the SMB security settings on the storage account must allow **SMB 3.1.1** protocol version, **NTLM v2** authentication and **AES-128-GCM** encryption. To check the SMB security settings on the storage account, see [SMB security settings](../files/files-smb-protocol.md#smb-security-settings). **Cloud endpoint creation fails, with this error: "AuthorizationFailed"** This error occurs if your user account doesn't have sufficient rights to create a cloud endpoint. @@ -150,6 +141,18 @@ To determine whether your user account role has the required permissions: * **Role assignment** should have **Read** and **Write** permissions. * **Role definition** should have **Read** and **Write** permissions. +**Cloud endpoint creation fails, with this error: "The specified Azure FileShare is already in use by a different CloudEndpoint"** +This error occurs if the Azure file share is already in use by another cloud endpoint. + +If you see this message and the Azure file share currently is not in use by a cloud endpoint, complete the following steps to clear the Azure File Sync metadata on the Azure file share: + +> [!Warning] +> Deleting the metadata on an Azure file share that is currently in use by a cloud endpoint causes Azure File Sync operations to fail. If you then use this file share for sync in a different sync group, data loss for files in the old sync group is almost certain. + +1. In the Azure portal, go to your Azure file share.   +2. Right-click the Azure file share, and then select **Edit metadata**. +3. Right-click **SyncService**, and then select **Delete**. + ### Server endpoint creation and deletion errors **Server endpoint creation fails, with this error: "MgmtServerJobFailed" (Error code: -2134375898 or 0x80c80226)** @@ -358,7 +361,7 @@ To see these errors, run the **FileSyncErrorsReport.ps1** PowerShell script (loc | 0x80c8027C | -2134375812 | ECS_E_ACCESS_DENIED_EFS | The file is encrypted by an unsupported solution (like NTFS EFS). | Decrypt the file and use a supported encryption solution. For a list of support solutions, see the [Encryption](file-sync-planning.md#encryption) section of the planning guide. | | 0x80c80283 | -2160591491 | ECS_E_ACCESS_DENIED_DFSRRO | The file is located on a DFS-R read-only replication folder. | File is located on a DFS-R read-only replication folder. Azure Files Sync does not support server endpoints on DFS-R read-only replication folders. See [planning guide](file-sync-planning.md#distributed-file-system-dfs) for more information. | | 0x80070005 | -2147024891 | ERROR_ACCESS_DENIED | The file has a delete pending state. | No action required. File will be deleted once all open file handles are closed. | -| 0x80c86044 | -2134351804 | ECS_E_AZURE_AUTHORIZATION_FAILED | The file cannot be synced because the firewall and virtual network settings on the storage account are enabled and the server does not have access to the storage account. | Add the Server IP address or virtual network by following the steps documented in the [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) section in the deployment guide. | +| 0x80c86044 | -2134351804 | ECS_E_AZURE_AUTHORIZATION_FAILED | The file cannot be synced because the firewall and virtual network settings on the storage account are enabled and the server does not have access to the storage account. | Add the Server IP address or virtual network by following the steps documented in the [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) section in the deployment guide. | | 0x80c80243 | -2134375869 | ECS_E_SECURITY_DESCRIPTOR_SIZE_TOO_LARGE | The file cannot be synced because the security descriptor size exceeds the 64 KiB limit. | To resolve this issue, remove access control entries (ACE) on the file to reduce the security descriptor size. | | 0x8000ffff | -2147418113 | E_UNEXPECTED | The file cannot be synced due to an unexpected error. | If the error persists for several days, please open a support case. | | 0x80070020 | -2147024864 | ERROR_SHARING_VIOLATION | The file cannot be synced because it's in use. The file will be synced when it's no longer in use. | No action required. | @@ -469,7 +472,7 @@ This error occurs because the Azure File Sync agent cannot access the Azure file 1. [Verify the storage account exists.](#troubleshoot-storage-account) 2. [Ensure the Azure file share exists.](#troubleshoot-azure-file-share) 3. [Ensure Azure File Sync has access to the storage account.](#troubleshoot-rbac) -4. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) +4. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) **Sync failed because the request is not authorized to perform this operation.** @@ -484,7 +487,7 @@ This error occurs because the Azure File Sync agent is not authorized to access 1. [Verify the storage account exists.](#troubleshoot-storage-account) 2. [Ensure the Azure file share exists.](#troubleshoot-azure-file-share) -3. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) +3. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) 4. [Ensure Azure File Sync has access to the storage account.](#troubleshoot-rbac) **The storage account name used could not be resolved.** @@ -502,7 +505,7 @@ This error occurs because the Azure File Sync agent is not authorized to access Test-NetConnection -ComputerName .file.core.windows.net -Port 443 ``` 2. [Verify the storage account exists.](#troubleshoot-storage-account) -3. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) +3. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) > [!Note] > Once network connectivity to the Azure File Sync service is restored, sync may not resume immediately. By default, Azure File Sync will initiate a sync session every 30 minutes if no changes are detected within the server endpoint location. To force a sync session, restart the Storage Sync Agent (FileSyncSvc) service or make a change to a file or directory within the server endpoint location. @@ -517,7 +520,7 @@ This error occurs because the Azure File Sync agent is not authorized to access | **Remediation required** | Yes | 1. [Verify the storage account exists.](#troubleshoot-storage-account) -2. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) +2. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) **Sync failed due to storage account locked.** @@ -621,7 +624,7 @@ This error occurs when the Azure subscription is suspended. Sync will be reenabl | **Error string** | ECS_E_SERVER_BLOCKED_BY_NETWORK_ACL | | **Remediation required** | Yes | -This error occurs when the Azure file share is inaccessible because of a storage account firewall or because the storage account belongs to a virtual network. Verify the firewall and virtual network settings on the storage account are configured properly. For more information, see [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings). +This error occurs when the Azure file share is inaccessible because of a storage account firewall or because the storage account belongs to a virtual network. Verify the firewall and virtual network settings on the storage account are configured properly. For more information, see [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings). **Sync failed due to a problem with the sync database.** @@ -893,9 +896,9 @@ Verify you have the latest Azure File Sync agent version installed and give the | **Error string** | ECS_E_MGMT_STORAGEACLSBYPASSNOTSET | | **Remediation required** | Yes | -This error occurs if the firewall and virtual network settings are enabled on the storage account and the "Allow trusted Microsoft services to access this storage account" exception is not checked. To resolve this issue, follow the steps documented in the [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) section in the deployment guide. +This error occurs if the firewall and virtual network settings are enabled on the storage account and the "Allow trusted Microsoft services to access this storage account" exception is not checked. To resolve this issue, follow the steps documented in the [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) section in the deployment guide. -**Sync failed because permissions on the System Volume Information folder are incorrect.** +**Sync failed with access denied due to security settings on the storage account or NTFS permissions on the server.** | Error | Code | |-|-| @@ -904,15 +907,17 @@ This error occurs if the firewall and virtual network settings are enabled on th | **Error string** | ERROR_ACCESS_DENIED | | **Remediation required** | Yes | -This error can occur if the NT AUTHORITY\SYSTEM account does not have permissions to the System Volume Information folder on the volume where the server endpoint is located. Note, if individual files are failing to sync with ERROR_ACCESS_DENIED, perform the steps documented in the [Troubleshooting per file/directory sync errors](?tabs=portal1%252cazure-portal#troubleshooting-per-filedirectory-sync-errors) section. +This error can occur if Azure File Sync cannot access the storage account due to security settings or if the NT AUTHORITY\SYSTEM account does not have permissions to the System Volume Information folder on the volume where the server endpoint is located. Note, if individual files are failing to sync with ERROR_ACCESS_DENIED, perform the steps documented in the [Troubleshooting per file/directory sync errors](?tabs=portal1%252cazure-portal#troubleshooting-per-filedirectory-sync-errors) section. -To resolve this issue, perform the following steps: +1. Verify the **SMB security settings** on the storage account are allowing **SMB 3.1.1** protocol version, **NTLM v2** authentication and **AES-128-GCM** encryption. To check the SMB security settings on the storage account, see [SMB security settings](../files/files-smb-protocol.md#smb-security-settings). +2. [Verify the firewall and virtual network settings on the storage account are configured properly (if enabled)](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) +3. Verify the **NT AUTHORITY\SYSTEM** account has permissions to the System Volume Information folder on the volume where the server endpoint is located by performing the following steps: -1. Download [Psexec](/sysinternals/downloads/psexec) tool. -2. Run the following command from an elevated command prompt to launch a command prompt using the system account: `PsExec.exe -i -s -d cmd` -3. From the command prompt running under the system account, run the following command to confirm the NT AUTHORITY\SYSTEM account does not have access to the System Volume Information folder: `cacls "drive letter:\system volume information" /T /C` -4. If the NT AUTHORITY\SYSTEM account does not have access to the System Volume Information folder, run the following command: `cacls "drive letter:\system volume information" /T /E /G "NT AUTHORITY\SYSTEM:F"` - - If step #4 fails with access denied, run the following command to take ownership of the System Volume Information folder and then repeat step #4: `takeown /A /R /F "drive letter:\System Volume Information"` + a. Download [Psexec](/sysinternals/downloads/psexec) tool. + b. Run the following command from an elevated command prompt to launch a command prompt using the system account: `PsExec.exe -i -s -d cmd` + c. From the command prompt running under the system account, run the following command to confirm the NT AUTHORITY\SYSTEM account does not have access to the System Volume Information folder: `cacls "drive letter:\system volume information" /T /C` + d. If the NT AUTHORITY\SYSTEM account does not have access to the System Volume Information folder, run the following command: `cacls "drive letter:\system volume information" /T /E /G "NT AUTHORITY\SYSTEM:F"` + - If step #d fails with access denied, run the following command to take ownership of the System Volume Information folder and then repeat step #d: `takeown /A /R /F "drive letter:\System Volume Information"` **Sync failed because the Azure file share was deleted and recreated.** @@ -1235,7 +1240,7 @@ If files fail to be recalled: | 0x80070079 | -2147942521 | ERROR_SEM_TIMEOUT | The file failed to recall due to an I/O timeout. This issue can occur for several reasons: server resource constraints, poor network connectivity or an Azure storage issue (for example, throttling). | No action required. If the error persists for several hours, please open a support case. | | 0x80070036 | -2147024842 | ERROR_NETWORK_BUSY | The file failed to recall due to a network issue. | If the error persists, check network connectivity to the Azure file share. | | 0x80c80037 | -2134376393 | ECS_E_SYNC_SHARE_NOT_FOUND | The file failed to recall because the server endpoint was deleted. | To resolve this issue, see [Tiered files are not accessible on the server after deleting a server endpoint](?tabs=portal1%252cazure-portal#tiered-files-are-not-accessible-on-the-server-after-deleting-a-server-endpoint). | -| 0x80070005 | -2147024891 | ERROR_ACCESS_DENIED | The file failed to recall due to an access denied error. This issue can occur if the firewall and virtual network settings on the storage account are enabled and the server does not have access to the storage account. | To resolve this issue, add the Server IP address or virtual network by following the steps documented in the [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#configure-firewall-and-virtual-network-settings) section in the deployment guide. | +| 0x80070005 | -2147024891 | ERROR_ACCESS_DENIED | The file failed to recall due to an access denied error. This issue can occur if the firewall and virtual network settings on the storage account are enabled and the server does not have access to the storage account. | To resolve this issue, add the Server IP address or virtual network by following the steps documented in the [Configure firewall and virtual network settings](file-sync-deployment-guide.md?tabs=azure-portal#optional-configure-firewall-and-virtual-network-settings) section in the deployment guide. | | 0x80c86002 | -2134351870 | ECS_E_AZURE_RESOURCE_NOT_FOUND | The file failed to recall because it's not accessible in the Azure file share. | To resolve this issue, verify the file exists in the Azure file share. If the file exists in the Azure file share, upgrade to the latest Azure File Sync [agent version](file-sync-release-notes.md#supported-versions). | | 0x80c8305f | -2134364065 | ECS_E_EXTERNAL_STORAGE_ACCOUNT_AUTHORIZATION_FAILED | The file failed to recall due to authorization failure to the storage account. | To resolve this issue, verify [Azure File Sync has access to the storage account](?tabs=portal1%252cazure-portal#troubleshoot-rbac). | | 0x80c86030 | -2134351824 | ECS_E_AZURE_FILE_SHARE_NOT_FOUND | The file failed to recall because the Azure file share is not accessible. | Verify the file share exists and is accessible. If the file share was deleted and recreated, perform the steps documented in the [Sync failed because the Azure file share was deleted and recreated](?tabs=portal1%252cazure-portal#-2134375810) section to delete and recreate the sync group. | @@ -1330,7 +1335,7 @@ Antivirus, backup, and other applications that read large numbers of files cause Consult with your software vendor to learn how to configure their solution to skip reading offline files. -Unintended recalls also might occur in other scenarios, like when you are browsing files in File Explorer. Opening a folder that has cloud-tiered files in File Explorer on the server might result in unintended recalls. This is even more likely if an antivirus solution is enabled on the server. +Unintended recalls also might occur in other scenarios, like when you are browsing cloud-tiered files in File Explorer. This is likely to occur on Windows Server 2016 if the folder contains executable files. File Explorer was improved for Windows Server 2019 and later to better handle offline files. > [!NOTE] >Use Event ID 9059 in the Telemetry event log to determine which application(s) is causing recalls. This event provides application recall distribution for a server endpoint and is logged once an hour. diff --git a/articles/storage/files/files-nfs-protocol.md b/articles/storage/files/files-nfs-protocol.md index 671666cd7e509..bb2f001130e88 100644 --- a/articles/storage/files/files-nfs-protocol.md +++ b/articles/storage/files/files-nfs-protocol.md @@ -4,7 +4,7 @@ description: Learn about file shares hosted in Azure Files using the Network Fil author: khdownie ms.service: storage ms.topic: conceptual -ms.date: 04/19/2022 +ms.date: 05/25/2022 ms.author: kendownie ms.subservice: files ms.custom: references_regions @@ -16,7 +16,7 @@ Azure Files offers two industry-standard file system protocols for mounting Azur This article covers NFS Azure file shares. For information about SMB Azure file shares, see [SMB file shares in Azure Files](files-smb-protocol.md). > [!IMPORTANT] -> Before using NFS file shares for production, see the [Troubleshoot Azure NFS file shares](storage-troubleshooting-files-nfs.md) article for a list of known issues. +> NFS Azure file shares are not supported for Windows clients. Before using NFS Azure file shares for production, see the [Troubleshoot NFS Azure file shares](storage-troubleshooting-files-nfs.md) article for a list of known issues. ## Common scenarios NFS file shares are often used in the following scenarios: @@ -88,11 +88,11 @@ NFS Azure file shares are only offered on premium file shares, which store data ## Workloads > [!IMPORTANT] -> Before using NFS file shares for production, see the [Troubleshoot Azure NFS file shares](storage-troubleshooting-files-nfs.md) article for a list of known issues. +> Before using NFS Azure file shares for production, see [Troubleshoot NFS Azure file shares](storage-troubleshooting-files-nfs.md) for a list of known issues. NFS has been validated to work well with workloads such as SAP application layer, database backups, database replication, messaging queues, home directories for general purpose file servers, and content repositories for application workloads. -The following workloads have known issues. See the [Troubleshoot Azure NFS file shares](storage-troubleshooting-files-nfs.md) article for list of known issues: +The following workloads have known issues: - Oracle Database will experience incompatibility with its dNFS feature. diff --git a/articles/storage/files/media/storage-files-networking-overview/smb-over-quic.png b/articles/storage/files/media/storage-files-networking-overview/smb-over-quic.png new file mode 100644 index 0000000000000..a202fd43e0a50 Binary files /dev/null and b/articles/storage/files/media/storage-files-networking-overview/smb-over-quic.png differ diff --git a/articles/storage/files/media/storage-how-to-use-files-windows/3_mountonwindows10.png b/articles/storage/files/media/storage-how-to-use-files-windows/3_mountonwindows10.png deleted file mode 100644 index 6e0cd05f845b8..0000000000000 Binary files a/articles/storage/files/media/storage-how-to-use-files-windows/3_mountonwindows10.png and /dev/null differ diff --git a/articles/storage/files/media/storage-how-to-use-files-windows/credentials-use-a-different-account.png b/articles/storage/files/media/storage-how-to-use-files-windows/credentials-use-a-different-account.png new file mode 100644 index 0000000000000..93a2a7395581f Binary files /dev/null and b/articles/storage/files/media/storage-how-to-use-files-windows/credentials-use-a-different-account.png differ diff --git a/articles/storage/files/media/storage-how-to-use-files-windows/files-portal-mounting-cmdlet-resize.png b/articles/storage/files/media/storage-how-to-use-files-windows/files-portal-mounting-cmdlet-resize.png index 1a198fd2c8eae..c39bac158ffb9 100644 Binary files a/articles/storage/files/media/storage-how-to-use-files-windows/files-portal-mounting-cmdlet-resize.png and b/articles/storage/files/media/storage-how-to-use-files-windows/files-portal-mounting-cmdlet-resize.png differ diff --git a/articles/storage/files/media/storage-how-to-use-files-windows/select-file-shares.png b/articles/storage/files/media/storage-how-to-use-files-windows/select-file-shares.png index b46de4b9e311b..b2288ff7b1050 100644 Binary files a/articles/storage/files/media/storage-how-to-use-files-windows/select-file-shares.png and b/articles/storage/files/media/storage-how-to-use-files-windows/select-file-shares.png differ diff --git a/articles/storage/files/storage-files-active-directory-overview.md b/articles/storage/files/storage-files-active-directory-overview.md index d12f860873831..59516fcf51bbb 100644 --- a/articles/storage/files/storage-files-active-directory-overview.md +++ b/articles/storage/files/storage-files-active-directory-overview.md @@ -91,7 +91,7 @@ Identity-based authentication for Azure Files offers several benefits over using - **Enforce granular access control on Azure file shares** You can grant permissions to a specific identity at the share, directory, or file level. For example, suppose that you have several teams using a single Azure file share for project collaboration. You can grant all teams access to non-sensitive directories, while limiting access to directories containing sensitive financial data to your Finance team only. -- **Back up Windows ACLs (also known as NTFS) along with your data** +- **Back up Windows ACLs (also known as NTFS permissions) along with your data** You can use Azure file shares to back up your existing on-premises file shares. Azure Files preserves your ACLs along with your data when you back up a file share to Azure file shares over SMB. ## How it works diff --git a/articles/storage/files/storage-files-configure-p2s-vpn-windows.md b/articles/storage/files/storage-files-configure-p2s-vpn-windows.md index 242bdafeffeee..5daee00a8efc4 100644 --- a/articles/storage/files/storage-files-configure-p2s-vpn-windows.md +++ b/articles/storage/files/storage-files-configure-p2s-vpn-windows.md @@ -4,7 +4,7 @@ description: How to configure a Point-to-Site (P2S) VPN on Windows for use with author: khdownie ms.service: storage ms.topic: how-to -ms.date: 10/19/2019 +ms.date: 05/27/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell @@ -31,8 +31,10 @@ The article details the steps to configure a Point-to-Site VPN on Windows (Windo - A virtual network with a private endpoint for the storage account containing the Azure file share you want to mount on-premises. To learn more about how to create a private endpoint, see [Configuring Azure Files network endpoints](storage-files-networking-endpoints.md?tabs=azure-powershell). +- A [gateway subnet](/azure/vpn-gateway/vpn-gateway-about-vpn-gateway-settings#gwsub) must be created on the virtual network. + ## Collect environment information -In order to set up the point-to-site VPN, we first need to collect some information about your environment for use throughout the guide. See the [prerequisites](#prerequisites) section if you have not already created a storage account, virtual network, and/or private endpoints. +In order to set up the point-to-site VPN, we first need to collect some information about your environment for use throughout the guide. See the [prerequisites](#prerequisites) section if you have not already created a storage account, virtual network, gateway subnet, and/or private endpoints. Remember to replace ``, ``, ``, and `` with the appropriate values for your environment. @@ -118,9 +120,14 @@ foreach($line in $rawRootCertificate) { ``` ## Deploy virtual network gateway -The Azure virtual network gateway is the service that your on-premises Windows machines will connect to. Deploying this service requires two basic components: a public IP that will identify the gateway to your clients wherever they are in the world and a root certificate you created earlier which will be used to authenticate your clients. +The Azure virtual network gateway is the service that your on-premises Windows machines will connect to. Before deploying the virtual network gateway, a [gateway subnet](/azure/vpn-gateway/vpn-gateway-about-vpn-gateway-settings#gwsub) must be created on the virtual network. + +Deploying this service requires two basic components: + +1. A public IP address that will identify the gateway to your clients wherever they are in the world +2. The root certificate you created earlier, which will be used to authenticate your clients -Remember to replace `` with the name you would like for these resources. +Remember to replace `` and `` in the below script with the proper values for these variables. > [!Note] > Deploying the Azure virtual network gateway can take up to 45 minutes. While this resource is being deployed, this PowerShell script will block for the deployment to be completed. This is expected. @@ -128,6 +135,7 @@ Remember to replace `` with the name you would like for t ```PowerShell $vpnName = "" $publicIpAddressName = "$vpnName-PublicIP" +$region = "" $publicIPAddress = New-AzPublicIpAddress ` -ResourceGroupName $resourceGroupName ` diff --git a/articles/storage/files/storage-files-faq.md b/articles/storage/files/storage-files-faq.md index 3818c3444077f..8c8e3bf848bb7 100644 --- a/articles/storage/files/storage-files-faq.md +++ b/articles/storage/files/storage-files-faq.md @@ -3,7 +3,7 @@ title: Frequently asked questions (FAQ) for Azure Files | Microsoft Docs description: Get answers to Azure Files frequently asked questions. You can mount Azure file shares concurrently on cloud or on-premises Windows, Linux, or macOS deployments. author: khdownie ms.service: storage -ms.date: 02/09/2022 +ms.date: 06/06/2022 ms.author: kendownie ms.subservice: files ms.topic: conceptual @@ -16,7 +16,7 @@ ms.topic: conceptual * **Can I have domain-joined and non-domain-joined servers in the same sync group?** - Yes. A sync group can contain server endpoints that have different Active Directory memberships, even if they are not domain-joined. Although this configuration technically works, we do not recommend this as a typical configuration because access control lists (ACLs) that are defined for files and folders on one server might not be able to be enforced by other servers in the sync group. For best results, we recommend syncing between servers that are in the same Active Directory forest, between servers that are in different Active Directory forests but which have established trust relationships, or between servers that are not in a domain. We recommend that you avoid using a mix of these configurations. + Yes. A sync group can contain server endpoints that have different Active Directory memberships, even if they are not domain-joined. Although this configuration technically works, we do not recommend this as a typical configuration because access control lists (ACLs) that are defined for files and folders on one server might not be able to be enforced by other servers in the sync group. For best results, we recommend syncing between servers that are in the same Active Directory forest, between servers that are in different Active Directory forests but have established trust relationships, or between servers that aren't in a domain. We recommend that you avoid using a mix of these configurations. * **I created a file directly in my Azure file share by using SMB or in the portal. How long does it take for the file to sync to the servers in the sync group?** @@ -25,7 +25,7 @@ ms.topic: conceptual * **If the same file is changed on two servers at approximately the same time, what happens?** - Azure File Sync uses a simple conflict-resolution strategy: we keep both changes to files that are changed in two endpoints at the same time. The most recently written change keeps the original file name. The older file (determined by LastWriteTime) has the endpoint name and the conflict number appended to the filename. For server endpoints, the endpoint name is the name of the server. For cloud endpoints, the endpoint name is **Cloud**. The name follows this taxonomy: + Azure File Sync uses a simple conflict-resolution strategy: we keep both changes to files that are changed in two endpoints at the same time. The most recently written change keeps the original file name. The older file (determined by LastWriteTime) has the endpoint name and the conflict number appended to the filename. For server endpoints, the endpoint name is the name of the server. For cloud endpoints, the endpoint name is **Cloud**. The name follows this taxonomy: \-\\[-#\].\ @@ -43,13 +43,13 @@ ms.topic: conceptual **Why are my tiered files not showing thumbnails or previews in Windows Explorer?** For tiered files, thumbnails and previews won't be visible at your server endpoint. This behavior is expected since the thumbnail cache feature in Windows intentionally skips reading files with the offline attribute. With Cloud Tiering enabled, reading through tiered files would cause them to be downloaded (recalled). - This behavior is not specific to Azure File Sync, Windows Explorer displays a "grey X" for any files that have the offline attribute set. You will see the X icon when accessing files over SMB. For a detailed explanation of this behavior, refer to [Why don’t I get thumbnails for files that are marked offline?](https://devblogs.microsoft.com/oldnewthing/20170503-00/?p=96105) + This behavior isn't specific to Azure File Sync. Windows Explorer displays a "grey X" for any files that have the offline attribute set. You'll see the X icon when accessing files over SMB. For a detailed explanation of this behavior, refer to [Why don’t I get thumbnails for files that are marked offline?](https://devblogs.microsoft.com/oldnewthing/20170503-00/?p=96105) - For questions on how to manage tiered files, please see [How to manage tiered files](../file-sync/file-sync-how-to-manage-tiered-files.md). + For questions on how to manage tiered files, see [How to manage tiered files](../file-sync/file-sync-how-to-manage-tiered-files.md). * **Why do tiered files exist outside of the server endpoint namespace?** - Prior to Azure File Sync agent version 3, Azure File Sync blocked the move of tiered files outside the server endpoint but on the same volume as the server endpoint. Copy operations, moves of non-tiered files, and moves of tiered to other volumes were unaffected. The reason for this behavior was the implicit assumption that File Explorer and other Windows APIs have that move operations on the same volume are (nearly) instantaneous rename operations. This means moves will make File Explorer or other move methods (such as command line or PowerShell) appear unresponsive while Azure File Sync recalls the data from the cloud. Starting with [Azure File Sync agent version 3.0.12.0](../file-sync/file-sync-release-notes.md#supported-versions), Azure File Sync will allow you to move a tiered file outside of the server endpoint. We avoid the negative effects previously mentioned by allowing the tiered file to exist as a tiered file outside of the server endpoint and then recalling the file in the background. This means that moves on the same volume are instantaneous, and we do all the work to recall the file to disk after the move has completed. + Prior to Azure File Sync agent version 3, Azure File Sync blocked the move of tiered files outside the server endpoint but on the same volume as the server endpoint. Copy operations, moves of non-tiered files, and moves of tiered to other volumes were unaffected. The reason for this behavior was the implicit assumption that File Explorer and other Windows APIs have that move operations on the same volume are (nearly) instantaneous rename operations. This means moves will make File Explorer or other move methods (such as command line or PowerShell) appear unresponsive while Azure File Sync recalls the data from the cloud. Starting with [Azure File Sync agent version 3.0.12.0](../file-sync/file-sync-release-notes.md#supported-versions), Azure File Sync will allow you to move a tiered file outside of the server endpoint. We avoid the negative effects previously mentioned by allowing the tiered file to exist as a tiered file outside of the server endpoint and then recalling the file in the background. This means that moves on the same volume are instantaneous, and we do all the work to recall the file to disk after the move has completed. * **I'm having an issue with Azure File Sync on my server (sync, cloud tiering, etc.). Should I remove and recreate my server endpoint?** @@ -65,15 +65,15 @@ ms.topic: conceptual * **Does Azure File Sync preserve directory/file level NTFS ACLs along with data stored in Azure Files?** - As of February 24th, 2020, new and existing ACLs tiered by Azure file sync will be persisted in NTFS format, and ACL modifications made directly to the Azure file share will sync to all servers in the sync group. Any changes on ACLs made to Azure Files will sync down via Azure file sync. When copying data to Azure Files, make sure you use a copy tool that supports the necessary "fidelity" to copy attributes, timestamps and ACLs into an Azure file share - either via SMB or REST. When using Azure copy tools, such as AzCopy, it is important to use the latest version. Check the [file copy tools table](storage-files-migration-overview.md#file-copy-tools) to get an overview of Azure copy tools to ensure you can copy all of the important metadata of a file. + As of February 24, 2020, new and existing ACLs tiered by Azure file sync will be persisted in NTFS format, and ACL modifications made directly to the Azure file share will sync to all servers in the sync group. Any changes on ACLs made to Azure Files will sync down via Azure file sync. When copying data to Azure Files, make sure you use a copy tool that supports the necessary "fidelity" to copy attributes, timestamps and ACLs into an Azure file share - either via SMB or REST. When using Azure copy tools, such as AzCopy, it's important to use the latest version. Check the [file copy tools table](storage-files-migration-overview.md#file-copy-tools) to get an overview of Azure copy tools to ensure you can copy all of the important metadata of a file. If you have enabled Azure Backup on your file sync managed file shares, file ACLs can continue to be restored as part of the backup restore workflow. This works either for the entire share or individual files/directories. - If you are using snapshots as part of the self-managed backup solution for file shares managed by file sync, your ACLs may not be restored properly to NTFS ACLs if the snapshots were taken prior to February 24th, 2020. If this occurs, consider contacting Azure Support. + If you're using snapshots as part of the self-managed backup solution for file shares managed by file sync, your ACLs may not be restored properly to NTFS ACLs if the snapshots were taken before February 24, 2020. If this occurs, consider contacting Azure Support. * **Does Azure File Sync sync the LastWriteTime for directories?** - No, Azure File Sync does not sync the LastWriteTime for directories. + No, Azure File Sync doesn't sync the LastWriteTime for directories. ## Security, authentication, and access control @@ -82,13 +82,19 @@ ms.topic: conceptual There are two options that provide auditing functionality for Azure Files: - If users are accessing the Azure file share directly, [Azure Storage logs](../blobs/monitor-blob-storage.md?tabs=azure-powershell#analyzing-logs) can be used to track file changes and user access. These logs can be used for troubleshooting purposes and the requests are logged on a best-effort basis. - - If users are accessing the Azure file share via a Windows Server that has the Azure File Sync agent installed, use an [audit policy](/windows/security/threat-protection/auditing/apply-a-basic-audit-policy-on-a-file-or-folder) or 3rd party product to track file changes and user access on the Windows Server. + - If users are accessing the Azure file share via a Windows Server that has the Azure File Sync agent installed, use an [audit policy](/windows/security/threat-protection/auditing/apply-a-basic-audit-policy-on-a-file-or-folder) or third-party product to track file changes and user access on the Windows Server. + +* +**Does Azure Files support using Access-Based Enumeration (ABE) to control the visibility of the files and folders in SMB Azure file shares?** + + No, this scenario isn't supported. + ### AD DS & Azure AD DS Authentication * **Does Azure Active Directory Domain Services (Azure AD DS) support SMB access using Azure AD credentials from devices joined to or registered with Azure AD?** - No, this scenario is not supported. + No, this scenario isn't supported. * **Can I access Azure file shares with Azure AD credentials from a VM under a different subscription?** @@ -101,37 +107,41 @@ ms.topic: conceptual No, Azure Files only supports Azure AD DS or on-premises AD DS integration with an Azure AD tenant that resides in the same subscription as the file share. Only one subscription can be associated with an Azure AD tenant. This limitation applies to both Azure AD DS and on-premises AD DS authentication methods. When using on-premises AD DS for authentication, [the AD DS credential must be synced to the Azure AD](../../active-directory/hybrid/how-to-connect-install-roadmap.md) that the storage account is associated with. * -**Does on-premises AD DS authentication for Azure file shares support integration with an AD DS environment using multiple forests?** +**Does on-premises AD DS authentication for Azure file shares support integration with an AD DS environment using multiple forests?** Azure Files on-premises AD DS authentication only integrates with the forest of the domain service that the storage account is registered to. To support authentication from another forest, your environment must have a forest trust configured correctly. The way Azure Files register in AD DS almost the same as a regular file server, where it creates an identity (computer or service logon account) in AD DS for authentication. The only difference is that the registered SPN of the storage account ends with "file.core.windows.net" which does not match with the domain suffix. Consult your domain administrator to see if any update to your suffix routing policy is required to enable multiple forest authentication due to the different domain suffix. We provide an example below to configure suffix routing policy. - Example: When users in forest A domain want to reach an file share with the storage account registered against a domain in forest B, this will not automatically work because the service principal of the storage account does not have a suffix matching the suffix of any domain in forest A. We can address this issue by manually configuring a suffix routing rule from forest A to forest B for a custom suffix of "file.core.windows.net". - First, you must add a new custom suffix on forest B. Make sure you have the appropriate administrative permissions to change the configuration, then follow these steps: - 1. Logon to a machine domain joined to forest B - 2. Open up "Active Directory Domains and Trusts" console - 3. Right click on "Active Directory Domains and Trusts" - 4. Click on "Properties" - 5. Click on "Add" - 6. Add "file.core.windows.net" as the UPN Suffixes - 7. Click on "Apply", then "OK" to close the wizard + Example: When users in forest A domain want to reach a file share with the storage account registered against a domain in forest B, this won't automatically work because the service principal of the storage account doesn't have a suffix matching the suffix of any domain in forest A. We can address this issue by manually configuring a suffix routing rule from forest A to forest B for a custom suffix of "file.core.windows.net". + + First, you must add a new custom suffix on forest B. Make sure you have the appropriate administrative permissions to change the configuration, then follow these steps: + + 1. Logon to a machine domain joined to forest B. + 2. Open up the **Active Directory Domains and Trusts** console. + 3. Right-click on **Active Directory Domains and Trusts**. + 4. Select **Properties**. + 5. Select **Add**. + 6. Add "file.core.windows.net" as the UPN Suffixes. + 7. Select **Apply**, then **OK** to close the wizard. Next, add the suffix routing rule on forest A, so that it redirects to forest B. - 1. Logon to a machine domain joined to forest A - 2. Open up "Active Directory Domains and Trusts" console - 3. Right-click on the domain that you want to access the file share, then click on the "Trusts" tab and select forest B domain from outgoing trusts. If you haven't configure trust between the two forests, you need to setup the trust first - 4. Click on "Properties…" then "Name Suffix Routing" - 5. Check if the "*.file.core.windows.net" suffix shows up. If not, click on 'Refresh' - 6. Select "*.file.core.windows.net", then click on "Enable" and "Apply" + + 1. Logon to a machine domain joined to forest A. + 2. Open up "Active Directory Domains and Trusts" console. + 3. Right-click on the domain that you want to access the file share, then select the **Trusts** tab and select forest B domain from outgoing trusts. If you haven't configured trust between the two forests, you need to set up the trust first. + 4. Select **Properties** and then **Name Suffix Routing** + 5. Check if the "*.file.core.windows.net" suffix shows up. If not, click **Refresh**. + 6. Select "*.file.core.windows.net", then select **Enable** and **Apply**. * **Is there any difference in creating a computer account or service logon account to represent my storage account in AD?** - Creating either a [computer account](/windows/security/identity-protection/access-control/active-directory-accounts#manage-default-local-accounts-in-active-directory) (default) or a [service logon account](/windows/win32/ad/about-service-logon-accounts) has no difference on how the authentication would work with Azure Files. You can make your own choice on how to represent a storage account as an identity in your AD environment. The default DomainAccountType set in Join-AzStorageAccountForAuth cmdlet is computer account. However, the password expiration age configured in your AD environment can be different for computer or service logon account and you need to take that into consideration for [Update the password of your storage account identity in AD](./storage-files-identity-ad-ds-update-password.md). + Creating either a [computer account](/windows/security/identity-protection/access-control/active-directory-accounts#manage-default-local-accounts-in-active-directory) (default) or a [service logon account](/windows/win32/ad/about-service-logon-accounts) has no difference on how the authentication would work with Azure Files. You can make your own choice on how to represent a storage account as an identity in your AD environment. The default DomainAccountType set in `Join-AzStorageAccountForAuth` cmdlet is computer account. However, the password expiration age configured in your AD environment can be different for computer or service logon account and you need to take that into consideration for [Update the password of your storage account identity in AD](./storage-files-identity-ad-ds-update-password.md). * **How to remove cached credentials with storage account key and delete existing SMB connections before initializing new connection with Azure AD or AD credentials?** - You can follow the two step process below to remove the saved credential associated with the storage account key and remove the SMB connection: + You can follow the two step process below to remove the saved credential associated with the storage account key and remove the SMB connection: + 1. Run the cmdlet below in Windows Cmd.exe to remove the credential. If you cannot find one, it means that you have not persisted the credential and can skip this step. cmdkey /delete:Domain:target=storage-account-name.file.core.windows.net @@ -155,16 +165,16 @@ ms.topic: conceptual * **Can I migrate existing data to an NFS share?** - Within a region, you can use standard tools like scp, rsync, or SSHFS to move data. Because Azure Files NFS can be accessed from multiple compute instances concurrently, you can improve copying speeds with parallel uploads. If you want to bring data from outside of a region, use a VPN or a Expressroute to mount to your file system from your on-premises data center. + Within a region, you can use standard tools like scp, rsync, or SSHFS to move data. Because Azure Files NFS can be accessed from multiple compute instances concurrently, you can improve copying speeds with parallel uploads. If you want to bring data from outside of a region, use a VPN or a ExpressRoute to mount to your file system from your on-premises data center. * **Can you run IBM MQ (including multi-instance) on Azure Files NFS?** - * Azure Files NFS v4.1 file shares meets the three requirements set by IBM MQ + * Azure Files NFS v4.1 file shares meets the three requirements set by IBM MQ: - https://www.ibm.com/docs/en/ibm-mq/9.2?topic=multiplatforms-requirements-shared-file-systems + Data write integrity + Guaranteed exclusive access to files + Release locks on failure - * The following test cases run successfully + * The following test cases run successfully: 1. https://www.ibm.com/docs/en/ibm-mq/9.2?topic=multiplatforms-verifying-shared-file-system-behavior 2. https://www.ibm.com/docs/en/ibm-mq/9.2?topic=multiplatforms-running-amqsfhac-test-message-integrity @@ -175,18 +185,18 @@ ms.topic: conceptual * **Are my share snapshots geo-redundant?** - Share snapshots have the same redundancy as the Azure file share for which they were taken. If you have selected geo-redundant storage for your account, your share snapshot also is stored redundantly in the paired region. + Share snapshots have the same redundancy as the Azure file share for which they were taken. If you've selected geo-redundant storage for your account, your share snapshot also is stored redundantly in the paired region. ### Clean up share snapshots * **Can I delete my share but not delete my share snapshots?** - If you have active share snapshots on your share, you cannot delete your share. You can use an API to delete share snapshots, along with the share. You also can delete both the share snapshots and the share in the Azure portal. + If you have active share snapshots on your share, you can't delete your share. You can use an API to delete share snapshots, along with the share. You also can delete both the share snapshots and the share in the Azure portal. ## Billing and pricing * **How much do share snapshots cost?** - Share snapshots are incremental in nature. The base share snapshot is the share itself. All subsequent share snapshots are incremental and store only the difference from the preceding share snapshot. You are billed only for the changed content. If you have a share with 100 GiB of data but only 5 GiB has changed since your last share snapshot, the share snapshot consumes only 5 additional GiB, and you are billed for 105 GiB. For more information about transaction and standard egress charges, see the [Pricing page](https://azure.microsoft.com/pricing/details/storage/files/). + Share snapshots are incremental in nature. The base share snapshot is the share itself. All subsequent share snapshots are incremental and store only the difference from the preceding share snapshot. You're billed only for the changed content. If you have a share with 100 GiB of data but only 5 GiB has changed since your last share snapshot, the share snapshot consumes only 5 additional GiB, and you're billed for 105 GiB. For more information about transaction and standard egress charges, see the [Pricing page](https://azure.microsoft.com/pricing/details/storage/files/). ## See also * [Troubleshoot Azure Files in Windows](storage-troubleshoot-windows-file-connection-problems.md) diff --git a/articles/storage/files/storage-files-identity-ad-ds-enable.md b/articles/storage/files/storage-files-identity-ad-ds-enable.md index 5f81201226317..4946be0a36320 100644 --- a/articles/storage/files/storage-files-identity-ad-ds-enable.md +++ b/articles/storage/files/storage-files-identity-ad-ds-enable.md @@ -5,7 +5,7 @@ author: khdownie ms.service: storage ms.subservice: files ms.topic: how-to -ms.date: 05/06/2022 +ms.date: 05/24/2022 ms.author: kendownie ms.custom: devx-track-azurepowershell --- @@ -67,9 +67,8 @@ Connect-AzAccount # Define parameters # $StorageAccountName is the name of an existing storage account that you want to join to AD -# $SamAccountName is an AD object, see https://docs.microsoft.com/en-us/windows/win32/adschema/a-samaccountname +# $SamAccountName is the name of the to-be-created AD object, which is used by AD as the logon name for the object. See https://docs.microsoft.com/en-us/windows/win32/adschema/a-samaccountname # for more information. -# If you want to use AES256 encryption (recommended), except for the trailing '$', the storage account name must be the same as the computer object's SamAccountName. $SubscriptionId = "" $ResourceGroupName = "" $StorageAccountName = "" @@ -148,27 +147,25 @@ Set-AzStorageAccount ` -ActiveDirectoryForestName "" ` -ActiveDirectoryDomainGuid "" ` -ActiveDirectoryDomainsid "" ` - -ActiveDirectoryAzureStorageSid "" + -ActiveDirectoryAzureStorageSid "" ` + -ActiveDirectorySamAccountName "" ` + -ActiveDirectoryAccountType "" ``` #### Enable AES-256 encryption (recommended) To enable AES-256 encryption, follow the steps in this section. If you plan to use RC4, skip this section. -The domain object that represents your storage account must meet the following requirements: - -- The domain object must be created as a computer object in the on-premises AD domain. -- Except for the trailing '$', the storage account name must be the same as the computer object's SamAccountName. - -If your domain object doesn't meet those requirements, delete it and create a new domain object that does. +> [!IMPORTANT] +> The domain object that represents your storage account must be created as a computer object in the on-premises AD domain. If your domain object doesn't meet this requirement, delete it and create a new domain object that does. -Replace `` and `` with your values, then run the following cmdlet to configure AES-256 support: +Replace `` and `` with your values, then run the following cmdlet to configure AES-256 support: ```powershell Set-ADComputer -Identity -Server -KerberosEncryptionType "AES256" ``` -After you've run that cmdlet, replace `` in the following script with your value, then run the script to refresh your domain object password: +After you've run the above cmdlet, replace `` in the following script with your value, then run the script to refresh your domain object password: ```powershell $KeyName = "kerb1" # Could be either the first or second kerberos key, this script assumes we're refreshing the first @@ -181,7 +178,7 @@ Set-ADAccountPassword -Identity -Reset -NewPassword $Ne ### Debugging -You can run the Debug-AzStorageAccountAuth cmdlet to conduct a set of basic checks on your AD configuration with the logged on AD user. This cmdlet is supported on AzFilesHybrid v0.1.2+ version. For more information on the checks performed in this cmdlet, see [Unable to mount Azure Files with AD credentials](storage-troubleshoot-windows-file-connection-problems.md#unable-to-mount-azure-files-with-ad-credentials) in the troubleshooting guide for Windows. +You can run the `Debug-AzStorageAccountAuth` cmdlet to conduct a set of basic checks on your AD configuration with the logged on AD user. This cmdlet is supported on AzFilesHybrid v0.1.2+ version. For more information on the checks performed in this cmdlet, see [Unable to mount Azure Files with AD credentials](storage-troubleshoot-windows-file-connection-problems.md#unable-to-mount-azure-files-with-ad-credentials) in the troubleshooting guide for Windows. ```PowerShell Debug-AzStorageAccountAuth -StorageAccountName $StorageAccountName -ResourceGroupName $ResourceGroupName -Verbose diff --git a/articles/storage/files/storage-files-networking-overview.md b/articles/storage/files/storage-files-networking-overview.md index 6ef1e27b8cce7..127d8bd0e0c0e 100644 --- a/articles/storage/files/storage-files-networking-overview.md +++ b/articles/storage/files/storage-files-networking-overview.md @@ -4,7 +4,7 @@ description: An overview of networking options for Azure Files. author: khdownie ms.service: storage ms.topic: overview -ms.date: 04/19/2022 +ms.date: 05/23/2022 ms.author: kendownie ms.subservice: files --- @@ -160,9 +160,11 @@ This reflects the fact that the storage account can expose both the public endpo - Forward the `core.windows.net` zone from your on-premises DNS servers to your Azure private DNS zone. The Azure private DNS host can be reached through a special IP address (`168.63.129.16`) that is only accessible inside virtual networks that are linked to the Azure private DNS zone. To work around this limitation, you can run additional DNS servers within your virtual network that will forward `core.windows.net` on to the Azure private DNS zone. To simplify this set up, we have provided PowerShell cmdlets that will auto-deploy DNS servers in your Azure virtual network and configure them as desired. To learn how to set up DNS forwarding, see [Configuring DNS with Azure Files](storage-files-networking-dns.md). ## SMB over QUIC -Windows Server 2022 Azure Edition supports a new transport protocol called QUIC for the SMB server provided by the File Server role. QUIC is a replacement for TCP that is built on top of UDP, providing numerous advantages over TCP while still providing a reliable transport mechanism. Although there are multiple advantages to QUIC as a transport protocol, one key advantage for the SMB protocol is that all transport is done over port 443, which is widely open outbound to support HTTPS. This effectively means that SMB over QUIC offers a "SMB VPN" for file sharing over the public internet. Windows 11 ships with a SMB over QUIC capable client. +Windows Server 2022 Azure Edition supports a new transport protocol called QUIC for the SMB server provided by the File Server role. QUIC is a replacement for TCP that is built on top of UDP, providing numerous advantages over TCP while still providing a reliable transport mechanism. One key advantage for the SMB protocol is that instead of using port 445, all transport is done over port 443, which is widely open outbound to support HTTPS. This effectively means that SMB over QUIC offers an "SMB VPN" for file sharing over the public internet. Windows 11 ships with an SMB over QUIC capable client. -At this time, Azure Files does not directly support SMB over QUIC. However, you can create a lightweight cache of your Azure file shares on a Windows Server 2022 Azure Edition VM using Azure File Sync. To learn more about this option, see [Deploy Azure File Sync](../file-sync/file-sync-deployment-guide.md) and [SMB over QUIC](/windows-server/storage/file-server/smb-over-quic). +At this time, Azure Files doesn't directly support SMB over QUIC. However, you can get access to Azure file shares via Azure File Sync running on Windows Server as in the diagram below. This also gives you the option to have Azure File Sync caches both on-premises or in different Azure datacenters to provide local caches for a distributed workforce. To learn more about this option, see [Deploy Azure File Sync](../file-sync/file-sync-deployment-guide.md) and [SMB over QUIC](/windows-server/storage/file-server/smb-over-quic). + +:::image type="content" source="media/storage-files-networking-overview/smb-over-quic.png" alt-text="Diagram for creating a lightweight cache of your Azure file shares on a Windows Server 2022 Azure Edition V M using Azure File Sync." border="false"::: ## See also - [Azure Files overview](storage-files-introduction.md) diff --git a/articles/storage/files/storage-files-quick-create-use-linux.md b/articles/storage/files/storage-files-quick-create-use-linux.md index 3663577c1d9c2..324831335a094 100644 --- a/articles/storage/files/storage-files-quick-create-use-linux.md +++ b/articles/storage/files/storage-files-quick-create-use-linux.md @@ -4,7 +4,7 @@ description: This tutorial covers how to use the Azure portal to deploy a Linux author: khdownie ms.service: storage ms.topic: tutorial -ms.date: 03/22/2022 +ms.date: 05/24/2022 ms.author: kendownie ms.subservice: files #Customer intent: As an IT admin new to Azure Files, I want to try out Azure file share using NFS and Linux so I can determine whether I want to subscribe to the service. @@ -191,6 +191,9 @@ Now that you've created an NFS share, to use it you have to mount it on your Lin 1. You should see **Connect to this NFS share from Linux** along with sample commands to use NFS on your Linux distribution and a provided mounting script. + > [!IMPORTANT] + > The provided mounting script will mount the NFS share only until the Linux machine is rebooted. To automatically mount the share every time the machine reboots, [add an entry in /etc/fstab](storage-how-to-use-files-linux.md#static-mount-with-etcfstab). For more information, enter the command `man fstab` from the Linux command line. + :::image type="content" source="media/storage-files-quick-create-use-linux/mount-nfs-share.png" alt-text="Screenshot showing how to connect to an N F S file share from Linux using a provided mounting script." lightbox="media/storage-files-quick-create-use-linux/mount-nfs-share.png" border="true"::: 1. Select your Linux distribution (Ubuntu). diff --git a/articles/storage/files/storage-files-scale-targets.md b/articles/storage/files/storage-files-scale-targets.md index 6732a82e71fa0..d25fc7ec5635d 100644 --- a/articles/storage/files/storage-files-scale-targets.md +++ b/articles/storage/files/storage-files-scale-targets.md @@ -56,7 +56,7 @@ There are two main types of storage accounts for Azure Files: | Maximum size of a file share |
                    • 100 TiB, with large file share feature enabled2
                    • 5 TiB, default
                    | 100 TiB | | Maximum number of files in a file share | No limit | No limit | | Maximum request rate (Max IOPS) |
                    • 20,000, with large file share feature enabled2
                    • 1,000 or 100 requests per 100 ms, default
                    |
                    • Baseline IOPS: 3000 + 1 IOPS per GiB, up to 100,000
                    • IOPS bursting: Max (10000, 3x IOPS per GiB), up to 100,000
                    | -| Throughput (ingress + egress) for a single file share (MiB/sec) |
                    • Up to 300 MiB/sec, with large file share feature enabled2
                    • Up to 60 MiB/sec, default
                    | 100 + CEILING(0.04 * ProvisionedGiB) + CEILING(0.06 * ProvisionedGiB) | +| Throughput (ingress + egress) for a single file share (MiB/sec) |
                    • Up to 300 MiB/sec, with large file share feature enabled2
                    • Up to 60 MiB/sec, default
                    | 100 + CEILING(0.04 * ProvisionedStorageGiB) + CEILING(0.06 * ProvisionedStorageGiB) | | Maximum number of share snapshots | 200 snapshots | 200 snapshots | | Maximum object name length (total pathname including all directories and filename) | 2,048 characters | 2,048 characters | | Maximum individual pathname component length (in the path \A\B\C\D, each letter represents a directory or file that is an individual component) | 255 characters | 255 characters | diff --git a/articles/storage/files/storage-how-to-use-files-linux.md b/articles/storage/files/storage-how-to-use-files-linux.md index cce92e21095f4..0b6b682c3c533 100644 --- a/articles/storage/files/storage-how-to-use-files-linux.md +++ b/articles/storage/files/storage-how-to-use-files-linux.md @@ -92,7 +92,7 @@ uname -r If the connection was successful, you should see something similar to the following output: - ```ouput + ```output Connection to 445 port [tcp/microsoft-ds] succeeded! ``` diff --git a/articles/storage/files/storage-how-to-use-files-mac.md b/articles/storage/files/storage-how-to-use-files-mac.md index eea7b2d67c64e..9b804949c2137 100644 --- a/articles/storage/files/storage-how-to-use-files-mac.md +++ b/articles/storage/files/storage-how-to-use-files-mac.md @@ -4,7 +4,7 @@ description: Learn how to mount an Azure file share over SMB with macOS using Fi author: khdownie ms.service: storage ms.topic: how-to -ms.date: 09/23/2020 +ms.date: 05/26/2022 ms.author: kendownie ms.subservice: files --- @@ -13,11 +13,11 @@ ms.subservice: files [Azure Files](storage-files-introduction.md) is Microsoft's easy-to-use cloud file system. Azure file shares can be mounted with the industry standard SMB 3 protocol by macOS High Sierra 10.13+. This article shows two different ways to mount an Azure file share on macOS: with the Finder UI and using the Terminal. ## Prerequisites for mounting an Azure file share on macOS -* **Storage account name**: To mount an Azure file share, you will need the name of the storage account. +* **Storage account name**: To mount an Azure file share, you'll need the name of the storage account. -* **Storage account key**: To mount an Azure file share, you will need the primary (or secondary) storage key. SAS keys are not currently supported for mounting. +* **Storage account key**: To mount an Azure file share, you'll need the primary (or secondary) storage key. SAS keys are not currently supported for mounting. -* **Ensure port 445 is open**: SMB communicates over TCP port 445. On your client machine (the Mac), check to make sure your firewall is not blocking TCP port 445. +* **Ensure port 445 is open**: SMB communicates over TCP port 445. On your client machine (the Mac), check to make sure your firewall isn't blocking TCP port 445. If your organization or ISP is blocking port 445, you may need to set up a VPN from on-premises to your Azure storage account with Azure Files exposed on your internal network using private endpoints, so that the traffic will go through a secure tunnel as opposed to over the internet. For more information, see [Networking considerations for direct Azure file share access](storage-files-networking-overview.md). To see the summary of ISPs that allow or disallow access from port 445, go to [TechNet](https://social.technet.microsoft.com/wiki/contents/articles/32346.azure-summary-of-isps-that-allow-disallow-access-from-port-445.aspx). ## Applies to | File share type | SMB | NFS | @@ -27,7 +27,7 @@ ms.subservice: files | Premium file shares (FileStorage), LRS/ZRS | ![Yes](../media/icons/yes-icon.png) | ![No](../media/icons/no-icon.png) | ## Mount an Azure file share via Finder -1. **Open Finder**: Finder is open on macOS by default, but you can ensure it is the currently selected application by clicking the "macOS face icon" on the dock: +1. **Open Finder**: Finder is open on macOS by default, but you can ensure that it's the currently selected application by clicking the "macOS face icon" on the dock: ![The macOS face icon](./media/storage-how-to-use-files-mac/mount-via-finder-1.png) 2. **Select "Connect to Server" from the "Go" Menu**: Using the UNC path from the prerequisites, convert the beginning double backslash (`\\`) to `smb://` and all other backslashes (`\`) to forwards slashes (`/`). Your link should look like the following: diff --git a/articles/storage/files/storage-how-to-use-files-windows.md b/articles/storage/files/storage-how-to-use-files-windows.md index d5a3eed1051d8..48ceb13e7d870 100644 --- a/articles/storage/files/storage-how-to-use-files-windows.md +++ b/articles/storage/files/storage-how-to-use-files-windows.md @@ -4,7 +4,7 @@ description: Learn to use Azure file shares with Windows and Windows Server. Use author: khdownie ms.service: storage ms.topic: how-to -ms.date: 09/10/2021 +ms.date: 05/31/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell @@ -68,7 +68,7 @@ To get this script: 1. Select **File shares**. 1. Select the file share you'd like to mount. - :::image type="content" source="media/storage-how-to-use-files-windows/select-file-shares.png" alt-text="Screenshot of file shares blade, file share is highlighted."::: + :::image type="content" source="media/storage-how-to-use-files-windows/select-file-shares.png" alt-text="Screenshot of file shares blade, file share is highlighted." lightbox="media/storage-how-to-use-files-windows/select-file-shares.png"::: 1. Select **Connect**. @@ -85,30 +85,30 @@ You have now mounted your Azure file share. ### Mount the Azure file share with File Explorer > [!Note] -> Note that the following instructions are shown on Windows 10 and may differ slightly on older releases. +> Note that the following instructions are shown on Windows 10 and may differ slightly on older releases. -1. Open File Explorer. This can be done by opening from the Start Menu, or by pressing Win+E shortcut. +1. Open File Explorer by opening it from the Start Menu, or by pressing the Win+E shortcut. 1. Navigate to **This PC** on the left-hand side of the window. This will change the menus available in the ribbon. Under the Computer menu, select **Map network drive**. - - ![A screenshot of the "Map network drive" drop-down menu](./media/storage-how-to-use-files-windows/1_MountOnWindows10.png) -1. Select the drive letter and enter the UNC path, the UNC path format is `\\.file.core.windows.net\`. For example: `\\anexampleaccountname.file.core.windows.net\example-share-name`. - - ![A screenshot of the "Map Network Drive" dialog](./media/storage-how-to-use-files-windows/2_MountOnWindows10.png) + :::image type="content" source="media/storage-how-to-use-files-windows/1_MountOnWindows10.png" alt-text="Screenshot of the Map network drive drop-down menu."::: -1. Use the storage account name prepended with `AZURE\` as the username and a storage account key as the password. - - ![A screenshot of the network credential dialog](./media/storage-how-to-use-files-windows/3_MountOnWindows10.png) +1. Select the drive letter and enter the UNC path to your Azure file share. The UNC path format is `\\.file.core.windows.net\`. For example: `\\anexampleaccountname.file.core.windows.net\file-share-name`. Check the **Connect using different credentials** checkbox. Select **Finish**. + + :::image type="content" source="media/storage-how-to-use-files-windows/2_MountOnWindows10.png" alt-text="Screenshot of the Map Network Drive dialog."::: + +1. Select **More choices** > **Use a different account**. Under **Email address**, use the storage account name, and use a storage account key as the password. Select **OK**. + + :::image type="content" source="media/storage-how-to-use-files-windows/credentials-use-a-different-account.png" alt-text="Screenshot of the network credential dialog selecting use a different account."::: 1. Use Azure file share as desired. - - ![Azure file share is now mounted](./media/storage-how-to-use-files-windows/4_MountOnWindows10.png) -1. When you are ready to dismount the Azure file share, you can do so by right-clicking on the entry for the share under the **Network locations** in File Explorer and selecting **Disconnect**. + :::image type="content" source="media/storage-how-to-use-files-windows/4_MountOnWindows10.png" alt-text="Screenshot showing that the Azure file share is now mounted."::: + +1. When you're ready to dismount the Azure file share, right-click on the entry for the share under the **Network locations** in File Explorer and select **Disconnect**. ### Accessing share snapshots from Windows -If you have taken a share snapshot, either manually or automatically through a script or service like Azure Backup, you can view previous versions of a share, a directory, or a particular file from file share on Windows. You can take a share snapshot using [Azure PowerShell](./storage-how-to-use-files-portal.md), [Azure CLI](./storage-how-to-use-files-portal.md), or the [Azure portal](storage-how-to-use-files-portal.md). +If you've taken a share snapshot, either manually or automatically through a script or service like Azure Backup, you can view previous versions of a share, a directory, or a particular file from file share on Windows. You can take a share snapshot using [Azure PowerShell](./storage-how-to-use-files-portal.md), [Azure CLI](./storage-how-to-use-files-portal.md), or the [Azure portal](storage-how-to-use-files-portal.md). #### List previous versions Browse to the item or parent item that needs to be restored. Double-click to go to the desired directory. Right-click and select **Properties** from the menu. diff --git a/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md b/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md index 8c00f42741cdf..cbd3bf5818887 100644 --- a/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md +++ b/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md @@ -4,7 +4,7 @@ description: Troubleshooting Azure Files problems in Windows. See common issues author: khdownie ms.service: storage ms.topic: troubleshooting -ms.date: 01/31/2022 +ms.date: 05/26/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell @@ -113,11 +113,11 @@ TcpTestSucceeded : True ### Solution for cause 1 -#### Solution 1 — Use Azure File Sync -Azure File Sync can transform your on-premises Windows Server into a quick cache of your Azure file share. You can use any protocol that's available on Windows Server to access your data locally, including SMB, NFS, and FTPS. Azure File Sync works over port 443 and can thus be used as a workaround to access Azure Files from clients that have port 445 blocked. [Learn how to setup Azure File Sync](../file-sync/file-sync-extend-servers.md). +#### Solution 1 — Use Azure File Sync as a QUIC endpoint +Azure File Sync can be used as a workaround to access Azure Files from clients that have port 445 blocked. Although Azure Files doesn't directly support SMB over QUIC, Windows Server 2022 Azure Edition does support the QUIC protocol. You can create a lightweight cache of your Azure file shares on a Windows Server 2022 Azure Edition VM using Azure File Sync. This uses port 443, which is widely open outbound to support HTTPS, instead of port 445. To learn more about this option, see [SMB over QUIC with Azure File Sync](storage-files-networking-overview.md#smb-over-quic). -#### Solution 2 — Use VPN -By Setting up a VPN to your specific Storage Account, the traffic will go through a secure tunnel as opposed to over the internet. Follow the [instructions to setup VPN](storage-files-configure-p2s-vpn-windows.md) to access Azure Files from Windows. +#### Solution 2 — Use VPN or ExpressRoute +By setting up a VPN or ExpressRoute from on-premises to your Azure storage account, with Azure Files exposed on your internal network using private endpoints, the traffic will go through a secure tunnel as opposed to over the internet. Follow the [instructions to setup VPN](storage-files-configure-p2s-vpn-windows.md) to access Azure Files from Windows. #### Solution 3 — Unblock port 445 with help of your ISP/IT Admin Work with your IT department or ISP to open port 445 outbound to [Azure IP ranges](https://www.microsoft.com/download/details.aspx?id=41653). diff --git a/articles/storage/files/storage-troubleshooting-files-nfs.md b/articles/storage/files/storage-troubleshooting-files-nfs.md index 483b7bf3710d1..6ce50986b03d5 100644 --- a/articles/storage/files/storage-troubleshooting-files-nfs.md +++ b/articles/storage/files/storage-troubleshooting-files-nfs.md @@ -1,18 +1,21 @@ --- -title: Troubleshoot Azure NFS file share problems - Azure Files -description: Troubleshoot Azure NFS file share problems. +title: Troubleshoot NFS file share problems - Azure Files +description: Troubleshoot NFS Azure file share problems. author: khdownie ms.service: storage ms.topic: troubleshooting -ms.date: 09/15/2020 +ms.date: 05/25/2022 ms.author: kendownie ms.subservice: files ms.custom: references_regions, devx-track-azurepowershell --- -# Troubleshoot Azure NFS file share problems +# Troubleshoot NFS Azure file share problems -This article lists some common problems and known issues related to Azure NFS file shares. It provides potential causes and workarounds when these problems are encountered. +This article lists some common problems and known issues related to NFS Azure file shares. It provides potential causes and workarounds when these problems are encountered. + +> [!IMPORTANT] +> NFS Azure file shares are not supported for Windows clients. ## Applies to | File share type | SMB | NFS | @@ -51,7 +54,7 @@ NFS is only available on storage accounts with the following configuration: Follow the instructions in our article: [How to create an NFS share](storage-files-how-to-create-nfs-shares.md). -## Cannot connect to or mount an Azure NFS file share +## Cannot connect to or mount an NFS Azure file share ### Cause 1: Request originates from a client in an untrusted network/untrusted IP diff --git a/articles/storage/files/understanding-billing.md b/articles/storage/files/understanding-billing.md index 72978f57fbe36..d5335550c7071 100644 --- a/articles/storage/files/understanding-billing.md +++ b/articles/storage/files/understanding-billing.md @@ -4,7 +4,7 @@ description: Learn how to interpret the provisioned and pay-as-you-go billing mo author: khdownie ms.service: storage ms.topic: conceptual -ms.date: 4/19/2022 +ms.date: 06/02/2022 ms.author: kendownie ms.subservice: files --- @@ -17,7 +17,7 @@ Azure Files provides two distinct billing models: provisioned and pay-as-you-go. :::column-end::: :::column::: - This video is an interview that discusses the basics of the Azure Files billing model, including how to optimize Azure file shares to achieve the lowest costs possible and how to compare Azure Files to other file storage offerings on-premises and in the cloud. + This video is an interview that discusses the basics of the Azure Files billing model. It covers how to optimize Azure file shares to achieve the lowest costs possible and how to compare Azure Files to other file storage offerings on-premises and in the cloud. :::column-end::: :::row-end::: @@ -50,16 +50,16 @@ The following table shows how common operating systems measure and label storage | Linux distributions | Commonly base-2, some software may use base-10 | Inconsistent labeling, alignment between measurement and labeling depends on the software package. | | macOS, iOS, and iPad OS | Base-10 | [Consistently labels as base-10](https://support.apple.com/HT201402). | -Check with your operating system vendor if your operating system is not listed. +Check with your operating system vendor if your operating system isn't listed. ## File share total cost of ownership checklist -If you are migrating to Azure Files from on-premises or comparing Azure Files to other cloud storage solutions, you should consider the following factors to ensure a fair, apples-to-apples comparison: +If you're migrating to Azure Files from on-premises or comparing Azure Files to other cloud storage solutions, you should consider the following factors to ensure a fair, apples-to-apples comparison: -- **How do you pay for storage, IOPS, and bandwidth?** With Azure Files, the billing model you use depends on whether you are deploying [premium](#provisioned-model) or [standard](#pay-as-you-go-model) file shares. Most cloud solutions have models that align with the principles of either provisioned storage, such as price determinism and simplicity, or pay-as-you-go storage, which can optimize costs by only charging you for what you actually use. Of particular interest for provisioned models are minimum provisioned share size, the provisioning unit, and the ability to increase and decrease provisioning. +- **How do you pay for storage, IOPS, and bandwidth?** With Azure Files, the billing model you use depends on whether you're deploying [premium](#provisioned-model) or [standard](#pay-as-you-go-model) file shares. Most cloud solutions have models that align with the principles of either provisioned storage, such as price determinism and simplicity, or pay-as-you-go storage, which can optimize costs by only charging you for what you actually use. Of particular interest for provisioned models are minimum provisioned share size, the provisioning unit, and the ability to increase and decrease provisioning. -- **Are there any methods to optimize storage costs?** With Azure Files, you can use [capacity reservations](#reserve-capacity) to achieve an up to 36% discount on storage. Other solutions may employ storage efficiency strategies like deduplication or compression to optionally optimize storage, but remember, these storage optimization strategies often have non-monetary costs, such as reducing performance. Azure Files capacity reservations have no side effects on performance. +- **Are there any methods to optimize storage costs?** With Azure Files, you can use [capacity reservations](#reserve-capacity) to achieve an up to 36% discount on storage. Other solutions may employ strategies like deduplication or compression to optionally optimize storage efficiency. However, these storage optimization strategies often have non-monetary costs, such as reducing performance. Azure Files capacity reservations have no side effects on performance. -- **How do you achieve storage resiliency and redundancy?** With Azure Files, storage resiliency and redundancy are baked into the product offering. All tiers and redundancy levels ensure that data is highly available and at least three copies of your data are accessible. When considering other file storage options, consider whether storage resiliency and redundancy is built-in or something you must assemble yourself. +- **How do you achieve storage resiliency and redundancy?** With Azure Files, storage resiliency and redundancy are baked into the product offering. All tiers and redundancy levels ensure that data is highly available and at least three copies of your data are accessible. When considering other file storage options, consider whether storage resiliency and redundancy is built in or something you must assemble yourself. - **What do you need to manage?** With Azure Files, the basic unit of management is a storage account. Other solutions may require additional management, such as operating system updates or virtual resource management (VMs, disks, network IP addresses, etc.). @@ -69,12 +69,12 @@ If you are migrating to Azure Files from on-premises or comparing Azure Files to Azure Files supports storage capacity reservations, which enable you to achieve a discount on storage by pre-committing to storage utilization. You should consider purchasing reserved instances for any production workload, or dev/test workloads with consistent footprints. When you purchase reserved capacity, your reservation must specify the following dimensions: - **Capacity size**: Capacity reservations can be for either 10 TiB or 100 TiB, with more significant discounts for purchasing a higher capacity reservation. You can purchase multiple reservations, including reservations of different capacity sizes to meet your workload requirements. For example, if your production deployment has 120 TiB of file shares, you could purchase one 100 TiB reservation and two 10 TiB reservations to meet the total capacity requirements. -- **Term**: Reservations can be purchased for either a one-year or three-year term, with more significant discounts for purchasing a longer reservation term. +- **Term**: Reservations can be purchased for either a one-year or three-year term, with more significant discounts for purchasing a longer reservation term. - **Tier**: The tier of Azure Files for the capacity reservation. Reservations for Azure Files currently are available for the premium, hot, and cool tiers. - **Location**: The Azure region for the capacity reservation. Capacity reservations are available in a subset of Azure regions. - **Redundancy**: The storage redundancy for the capacity reservation. Reservations are supported for all redundancies Azure Files supports, including LRS, ZRS, GRS, and GZRS. -Once you purchase a capacity reservation, it will automatically be consumed by your existing storage utilization. If you use more storage than you have reserved, you will pay list price for the balance not covered by the capacity reservation. Transaction, bandwidth, data transfer, and metadata storage charges are not included in the reservation. +Once you purchase a capacity reservation, it will automatically be consumed by your existing storage utilization. If you use more storage than you have reserved, you'll pay list price for the balance not covered by the capacity reservation. Transaction, bandwidth, data transfer, and metadata storage charges aren't included in the reservation. For more information on how to purchase storage reservations, see [Optimize costs for Azure Files with reserved capacity](files-reserve-capacity.md). @@ -83,19 +83,19 @@ Azure Files uses a provisioned model for premium file shares. In a provisioned b The provisioned size of the file share can be increased at any time but can be decreased only after 24 hours since the last increase. After waiting for 24 hours without a quota increase, you can decrease the share quota as many times as you like, until you increase it again. IOPS/throughput scale changes will be effective within a few minutes after the provisioned size change. -It is possible to decrease the size of your provisioned share below your used GiB. If you do this, you will not lose data, but you will still be billed for the size used and receive the performance of the provisioned share, not the size used. +It's possible to decrease the size of your provisioned share below your used GiB. If you do, you won't lose data, but you'll still be billed for the size used and receive the performance of the provisioned share, not the size used. ### Provisioning method -When you provision a premium file share, you specify how many GiBs your workload requires. Each GiB that you provision entitles you to additional IOPS and throughput on a fixed ratio. In addition to the baseline IOPS for which you are guaranteed, each premium file share supports bursting on a best effort basis. The formulas for IOPS and throughput are as follows: +When you provision a premium file share, you specify how many GiBs your workload requires. Each GiB that you provision entitles you to more IOPS and throughput on a fixed ratio. In addition to the baseline IOPS for which you are guaranteed, each premium file share supports bursting on a best effort basis. The formulas for IOPS and throughput are as follows: | Item | Value | |-|-| | Minimum size of a file share | 100 GiB | | Provisioning unit | 1 GiB | -| Baseline IOPS formula | `MIN(3000 + 1 * ProvisionedGiB, 100000)` | -| Burst limit | `MIN(MAX(10000, 3 * ProvisionedGiB), 100000)` | +| Baseline IOPS formula | `MIN(3000 + 1 * ProvisionedStorageGiB, 100000)` | +| Burst limit | `MIN(MAX(10000, 3 * ProvisionedStorageGiB), 100000)` | | Burst credits | `(BurstLimit - BaselineIOPS) * 3600` | -| Throughput rate (ingress + egress) (MiB/sec) | `100 + CEILING(0.04 * ProvisionedGiB) + CEILING(0.06 * ProvisionedGiB)` | +| Throughput rate (ingress + egress) (MiB/sec) | `100 + CEILING(0.04 * ProvisionedStorageGiB) + CEILING(0.06 * ProvisionedStorageGiB)` | The following table illustrates a few examples of these formulae for the provisioned share sizes: @@ -113,9 +113,9 @@ The following table illustrates a few examples of these formulae for the provisi Effective file share performance is subject to machine network limits, available network bandwidth, IO sizes, and parallelism, among many other factors. For example, based on internal testing with 8 KiB read/write IO sizes, a single Windows virtual machine without SMB Multichannel enabled, *Standard F16s_v2*, connected to premium file share over SMB could achieve 20K read IOPS and 15K write IOPS. With 512 MiB read/write IO sizes, the same VM could achieve 1.1 GiB/s egress and 370 MiB/s ingress throughput. The same client can achieve up to \~3x performance if SMB Multichannel is enabled on the premium shares. To achieve maximum performance scale, [enable SMB Multichannel](files-smb-protocol.md#smb-multichannel) and spread the load across multiple VMs. Refer to [SMB Multichannel performance](storage-files-smb-multichannel-performance.md) and [troubleshooting guide](storage-troubleshooting-files-performance.md) for some common performance issues and workarounds. ### Bursting -If your workload needs the extra performance to meet peak demand, your share can use burst credits to go above the share's baseline IOPS limit to give the share the performance it needs to meet the demand. Premium file shares can burst their IOPS up to 4,000 or up to a factor of three, whichever is a higher value. Bursting is automated and operates based on a credit system. Bursting works on a best effort basis, and the burst limit is not a guarantee. +If your workload needs the extra performance to meet peak demand, your share can use burst credits to go above the share's baseline IOPS limit to give the share the performance it needs to meet the demand. Bursting is automated and operates based on a credit system. Bursting works on a best effort basis, and the burst limit isn't a guarantee. -Credits accumulate in a burst bucket whenever traffic for your file share is below baseline IOPS. For example, a 100 GiB share has 500 baseline IOPS. If actual traffic on the share was 100 IOPS for a specific 1-second interval, then the 400 unused IOPS are credited to a burst bucket. Similarly, an idle 1 TiB share accrues burst credit at 1,424 IOPS. These credits will then be used later when operations would exceed the baseline IOPS. +Credits accumulate in a burst bucket whenever traffic for your file share is below baseline IOPS. Earned credits are used later to enable burst when operations would exceed the baseline IOPS. Whenever a share exceeds the baseline IOPS and has credits in a burst bucket, it will burst up to the maximum allowed peak burst rate. Shares can continue to burst as long as credits are remaining, but this is based on the number of burst credits accrued. Each IO beyond baseline IOPS consumes one credit, and once all credits are consumed, the share would return to the baseline IOPS. @@ -125,26 +125,26 @@ Share credits have three states: - Declining, when the file share is using more than the baseline IOPS and in the bursting mode. - Constant, when the files share is using exactly the baseline IOPS, there are either no credits accrued or used. -New file shares start with the full number of credits in its burst bucket. Burst credits will not be accrued if the share IOPS fall below baseline IOPS due to throttling by the server. +New file shares start with the full number of credits in its burst bucket. Burst credits won't be accrued if the share IOPS fall below baseline IOPS due to throttling by the server. ## Pay-as-you-go model -Azure Files uses a pay-as-you-go business model for standard file shares. In a pay-as-you-go business model, the amount you pay is determined by how much you actually use, rather than based on a provisioned amount. At a high level, you pay a cost for the amount of logical data stored, and then an additional set of transactions based on your usage of that data. A pay-as-you-go model can be cost-efficient, because you don't need to overprovision to account for future growth or performance requirements, or deprovision if your workload and data footprint vary over time. On the other hand, a pay-as-you-go model can also be difficult to plan as part of a budgeting process, because the pay-as-you-go billing model is driven by end-user consumption. +Azure Files uses a pay-as-you-go business model for standard file shares. In a pay-as-you-go business model, the amount you pay is determined by how much you actually use, rather than based on a provisioned amount. At a high level, you pay a cost for the amount of logical data stored, and then an additional set of transactions based on your usage of that data. A pay-as-you-go model can be cost-efficient, because you don't need to overprovision to account for future growth or performance requirements. You also don't need to deprovision if your workload and data footprint vary over time. On the other hand, a pay-as-you-go model can also be difficult to plan as part of a budgeting process, because the pay-as-you-go billing model is driven by end-user consumption. ### Differences in standard tiers When you create a standard file share, you pick between the following tiers: transaction optimized, hot, and cool. All three tiers are stored on the exact same standard storage hardware. The main difference for these three tiers is their data at-rest storage prices, which are lower in cooler tiers, and the transaction prices, which are higher in the cooler tiers. This means: - Transaction optimized, as the name implies, optimizes the price for high transaction workloads. Transaction optimized has the highest data at-rest storage price, but the lowest transaction prices. -- Hot is for active workloads that do not involve a large number of transactions, and has a slightly lower data at-rest storage price, but slightly higher transaction prices as compared to transaction optimized. Think of it as the middle ground between the transaction optimized and cool tiers. -- Cool optimizes the price for workloads that do not have much activity, offering the lowest data at-rest storage price, but the highest transaction prices. +- Hot is for active workloads that don't involve a large number of transactions, and has a slightly lower data at-rest storage price, but slightly higher transaction prices as compared to transaction optimized. Think of it as the middle ground between the transaction optimized and cool tiers. +- Cool optimizes the price for workloads that don't have much activity, offering the lowest data at-rest storage price, but the highest transaction prices. -If you put an infrequently accessed workload in the transaction optimized tier, you will pay almost nothing for the few times in a month that you make transactions against your share, but you will pay a high amount for the data storage costs. If you were to move this same share to the cool tier, you would still pay almost nothing for the transaction costs, simply because you are infrequently making transactions for this workload, but the cool tier has a much cheaper data storage price. Selecting the appropriate tier for your use case allows you to considerably reduce your costs. +If you put an infrequently accessed workload in the transaction optimized tier, you'll pay almost nothing for the few times in a month that you make transactions against your share. However, you'll pay a high amount for the data storage costs. If you moved this same share to the cool tier, you'd still pay almost nothing for the transaction costs, simply because you're infrequently making transactions for this workload. However, the cool tier has a much cheaper data storage price. Selecting the appropriate tier for your use case allows you to considerably reduce your costs. -Similarly, if you put a highly accessed workload in the cool tier, you will pay a lot more in transaction costs, but less for data storage costs. This can lead to a situation where the increased costs from the transaction prices increase outweigh the savings from the decreased data storage price, leading you to pay more money on cool than you would have on transaction optimized. For some usage levels, it's possible that the hot tier will be the most cost efficient, and the cool tier will be more expensive than transaction optimized. +Similarly, if you put a highly accessed workload in the cool tier, you'll pay a lot more in transaction costs, but less for data storage costs. This can lead to a situation where the increased costs from the transaction prices increase outweigh the savings from the decreased data storage price, leading you to pay more money on cool than you would have on transaction optimized. For some usage levels, it's possible that the hot tier will be the most cost efficient, and the cool tier will be more expensive than transaction optimized. Your workload and activity level will determine the most cost efficient tier for your standard file share. In practice, the best way to pick the most cost efficient tier involves looking at the actual resource consumption of the share (data stored, write transactions, etc.). ### Choosing a tier -Regardless of how you migrate existing data into Azure Files, we recommend initially creating the file share in transaction optimized tier due to the large number of transactions incurred during migration. After your migration is complete and you've operated for a few days/weeks with regular usage, you can plug your transaction counts into the [pricing calculator](https://azure.microsoft.com/pricing/calculator/) to figure out which tier is best suited for your workload. +Regardless of how you migrate existing data into Azure Files, we recommend initially creating the file share in transaction optimized tier due to the large number of transactions incurred during migration. After your migration is complete and you've operated for a few days or weeks with regular usage, you can plug your transaction counts into the [pricing calculator](https://azure.microsoft.com/pricing/calculator/) to figure out which tier is best suited for your workload. Because standard file shares only show transaction information at the storage account level, using the storage metrics to estimate which tier is cheaper at the file share level is an imperfect science. If possible, we recommend deploying only one file share in each storage account to ensure full visibility into billing. @@ -162,7 +162,7 @@ To see previous transactions: ### What are transactions? Transactions are operations or requests against Azure Files to upload, download, or otherwise manipulate the contents of the file share. Every action taken on a file share translates to one or more transactions, and on standard shares that use the pay-as-you-go billing model, that translates to transaction costs. -There are five basic transaction categories: write, list, read, other, and delete. All operations done via the REST API or SMB are bucketed into one of these 4 categories as follows: +There are five basic transaction categories: write, list, read, other, and delete. All operations done via the REST API or SMB are bucketed into one of these categories: | Transaction bucket | Management operations | Data operations | |-|-|-| @@ -173,40 +173,40 @@ There are five basic transaction categories: write, list, read, other, and delet | Delete transactions |
                    • `DeleteShare`
                    |
                    • `ClearRange`
                    • `DeleteDirectory`
                    • `DeleteFile`
                    | > [!Note] -> NFS 4.1 is only available for premium file shares, which use the provisioned billing model. Transactions do not affect billing for premium file shares. +> NFS 4.1 is only available for premium file shares, which use the provisioned billing model. Transactions don't affect billing for premium file shares. ## Provisioned/quota, logical size, and physical size Azure Files tracks three distinct quantities with respect to share capacity: -- **Provisioned size or quota**: With both premium and standard file shares, you specify the maximum size that the file share is allowed to grow to. In premium file shares, this value is called the provisioned size, and whatever amount you provision is what you pay for, regardless of how much you actually use. In standard file shares, this value is called quota and does not directly affect your bill. Provisioned size is a required field for premium file shares, while standard file shares will default if not directly specified to the maximum value supported by the storage account, either 5 TiB or 100 TiB, depending on the storage account type and settings. +- **Provisioned size or quota**: With both premium and standard file shares, you specify the maximum size that the file share is allowed to grow to. In premium file shares, this value is called the provisioned size, and whatever amount you provision is what you pay for, regardless of how much you actually use. In standard file shares, this value is called quota and does not directly affect your bill. Provisioned size is a required field for premium file shares. For standard file shares, if provisioned size isn't directly specified, the share will default to the maximum value supported by the storage account. This is either 5 TiB or 100 TiB, depending on the storage account type and settings. -- **Logical size**: The logical size of a file share or file relates to how big it is without considering how it is actually stored, where additional optimizations may be applied. One way to think about this is that the logical size of the file is how many KiB/MiB/GiB will be transferred over the wire if you copy it to a different location. In both premium and standard file shares, the total logical size of the file share is what is used for enforcement against provisioned size/quota. In standard file shares, the logical size is the quantity used for the data at-rest usage billing. Logical size is referred to as "size" in the Windows properties dialog for a file/folder and as "content length" by Azure Files metrics. +- **Logical size**: The logical size of a file share or file relates to how big it is without considering how it's actually stored, where additional optimizations may be applied. One way to think about this is that the logical size of the file is how many KiB/MiB/GiB will be transferred over the wire if you copy it to a different location. In both premium and standard file shares, the total logical size of the file share is what is used for enforcement against provisioned size/quota. In standard file shares, the logical size is the quantity used for the data at-rest usage billing. Logical size is referred to as "size" in the Windows properties dialog for a file/folder and as "content length" by Azure Files metrics. -- **Physical size**: The physical size of the file relates to the size of the file as encoded on disk. This may align with the file's logical size, or it may be smaller, depending on how the file has been written to by the operating system. A common reason for the logical size and physical size to be different is through the use of [sparse files](/windows/win32/fileio/sparse-files). The physical size of the files in the share is used for snapshot billing, although allocated ranges are shared between snapshots if they are unchanged (differential storage). To learn more about how snapshots are billed in Azure Files, see [Snapshots](#snapshots). +- **Physical size**: The physical size of the file relates to the size of the file as encoded on disk. This may align with the file's logical size, or it may be smaller, depending on how the file has been written to by the operating system. A common reason for the logical size and physical size to be different is by using [sparse files](/windows/win32/fileio/sparse-files). The physical size of the files in the share is used for snapshot billing, although allocated ranges are shared between snapshots if they are unchanged (differential storage). To learn more about how snapshots are billed in Azure Files, see [Snapshots](#snapshots). ## Snapshots -Azure Files supports snapshots, which are similar to volume shadow copies (VSS) on Windows File Server. Snapshots are always differential from the live share and from each other, meaning that you are always paying only for what's different in each snapshot. For more information on share snapshots, see [Overview of snapshots for Azure Files](storage-snapshots-files.md). +Azure Files supports snapshots, which are similar to volume shadow copies (VSS) on Windows File Server. Snapshots are always differential from the live share and from each other, meaning that you're always paying only for what's different in each snapshot. For more information on share snapshots, see [Overview of snapshots for Azure Files](storage-snapshots-files.md). -Snapshots do not count against file share size limits, although you are limited to a specific number of snapshots. To see the current snapshot limits, see [Azure file share scale targets](storage-files-scale-targets.md#azure-file-share-scale-targets). +Snapshots do not count against file share size limits, although you're limited to a specific number of snapshots. To see the current snapshot limits, see [Azure file share scale targets](storage-files-scale-targets.md#azure-file-share-scale-targets). Snapshots are always billed based on the differential storage utilization of each snapshot, however this looks slightly different between premium file shares and standard file shares: -- In premium file shares, snapshots are billed against their own snapshot meter, which has a reduced price over the provisioned storage price. This means that you will see a separate line item on your bill representing snapshots for premium file shares for each FileStorage storage account on your bill. +- In premium file shares, snapshots are billed against their own snapshot meter, which has a reduced price over the provisioned storage price. This means that you'll see a separate line item on your bill representing snapshots for premium file shares for each FileStorage storage account on your bill. -- In standard file shares, snapshots are billed as part of the normal used storage meter, although you are still only billed for the differential cost of the snapshot. This means that you will not see a separate line item on your bill representing snapshots for each standard storage account containing Azure file shares. This also means that differential snapshot usage counts against capacity reservations that are purchased for standard file shares. +- In standard file shares, snapshots are billed as part of the normal used storage meter, although you're still only billed for the differential cost of the snapshot. This means that you won't see a separate line item on your bill representing snapshots for each standard storage account containing Azure file shares. This also means that differential snapshot usage counts against capacity reservations that are purchased for standard file shares. Value-added services for Azure Files may use snapshots as part of their value proposition. See [value-added services for Azure Files](#value-added-services) for more information on how snapshots are used. ## Value-added services -Like on-premises storage solutions which offer first- and third-party features/product integrations to bring additional value to the hosted file shares, Azure Files provides integration points for first- and third-party products to integrate with customer-owned file shares. Although these solutions may provide considerable extra value to Azure Files, you should consider the additional costs that these services add to the total cost of an Azure Files solution. +Like on-premises storage solutions that offer first- and third-party features and product integrations to add value to the hosted file shares, Azure Files provides integration points for first- and third-party products to integrate with customer-owned file shares. Although these solutions may provide considerable extra value to Azure Files, you should consider the extra costs that these services add to the total cost of an Azure Files solution. -Costs are generally broken down into three buckets: +Costs are broken down into three buckets: -- **Licensing costs for the value-added service.** These may come in the form of a fixed cost per customer, end user (sometimes referred to as a "head cost"), Azure file share or storage account, or in units of storage utilization, such as a fixed cost for every 500 GiB chunk of data in the file share. +- **Licensing costs for the value-added service.** These may come in the form of a fixed cost per customer, end user (sometimes called a "head cost"), Azure file share or storage account. They may also be based on units of storage utilization, such as a fixed cost for every 500 GiB chunk of data in the file share. - **Transaction costs for the value-added service.** Some value-added services have their own concept of transactions distinct from what Azure Files views as a transaction. These transactions will show up on your bill under the value-added service's charges; however, they relate directly to how you use the value-added service with your file share. -- **Azure Files costs for using a value-added service.** Azure Files does not directly charge customers costs for adding value-added services, but as part of adding value to the Azure file share, the value-added service might increase the costs that you see on your Azure file share. This is easy to see with standard file shares, because standard file shares have a pay-as-you-go model with transaction charges. If the value-added service does transactions against the file share on your behalf, they will show up in your Azure Files transaction bill even though you didn't directly do those transactions yourself. This applies to premium file shares as well, although it may be less noticeable. Additional transactions against premium file shares from value-added services count against your provisioned IOPS numbers, meaning that value-added services may require provisioning additional storage to have enough IOPS or throughput available for your workload. +- **Azure Files costs for using a value-added service.** Azure Files does not directly charge customers costs for adding value-added services, but as part of adding value to the Azure file share, the value-added service might increase the costs that you see on your Azure file share. This is easy to see with standard file shares, because standard file shares have a pay-as-you-go model with transaction charges. If the value-added service does transactions against the file share on your behalf, they will show up in your Azure Files transaction bill even though you didn't directly do those transactions yourself. This applies to premium file shares as well, although it may be less noticeable. Additional transactions against premium file shares from value-added services count against your provisioned IOPS numbers, meaning that value-added services may require provisioning more storage to have enough IOPS or throughput available for your workload. When computing the total cost of ownership for your file share, you should consider the costs of Azure Files and of all value-added services that you would like to use with Azure Files. @@ -221,17 +221,17 @@ When considering the total cost of ownership for a solution deployed using Azure To optimize costs for Azure Files with Azure File Sync, you should consider the tier of your file share. For more information on how to pick the tier for each file share, see [choosing a file share tier](#choosing-a-tier). -If you are migrating to Azure File Sync from StorSimple, see [Comparing the costs of StorSimple to Azure File Sync](../file-sync/file-sync-storsimple-cost-comparison.md). +If you're migrating to Azure File Sync from StorSimple, see [Comparing the costs of StorSimple to Azure File Sync](../file-sync/file-sync-storsimple-cost-comparison.md). ### Azure Backup -Azure Backup provides a serverless backup solution for Azure Files that seamlessly integrates with your file shares, as well as other value-added services such as Azure File Sync. Azure Backup for Azure Files is a snapshot-based backup solution, meaning that Azure Backup provides a scheduling mechanism for automatically taking snapshots on an administrator-defined schedule and a user-friendly interface for restoring deleted files/folders or the entire share to a particular point in time. To learn more about Azure Backup for Azure Files, see [About Azure file share backup](../../backup/azure-file-share-backup-overview.md?toc=/azure/storage/files/toc.json). +Azure Backup provides a serverless backup solution for Azure Files that seamlessly integrates with your file shares, and with other value-added services such as Azure File Sync. Azure Backup for Azure Files is a snapshot-based backup solution that provides a scheduling mechanism for automatically taking snapshots on an administrator-defined schedule. It also provides a user-friendly interface for restoring deleted files/folders or the entire share to a particular point in time. To learn more about Azure Backup for Azure Files, see [About Azure file share backup](../../backup/azure-file-share-backup-overview.md?toc=/azure/storage/files/toc.json). -When considering the costs of using Azure Backup to back up your Azure file shares, you should consider the following: +When considering the costs of using Azure Backup to back up your Azure file shares, consider the following: -- **Protected instance licensing cost for Azure file share data.** Azure Backup charges a protected instance licensing cost per storage account containing backed up Azure file shares. A protected instance is defined as 250 GiB of Azure file share storage. Storage accounts containing less than 250 GiB of Azure file share storage are subject to a fractional protected instance cost. See [Azure Backup pricing](https://azure.microsoft.com/pricing/details/backup/) for more information (note that you must select *Azure Files* from the list of services Azure Backup can protect). +- **Protected instance licensing cost for Azure file share data.** Azure Backup charges a protected instance licensing cost per storage account containing backed up Azure file shares. A protected instance is defined as 250 GiB of Azure file share storage. Storage accounts containing less than 250 GiB of Azure file share storage are subject to a fractional protected instance cost. For more information, see [Azure Backup pricing](https://azure.microsoft.com/pricing/details/backup/). Note that you must select *Azure Files* from the list of services Azure Backup can protect. - **Azure Files costs.** Azure Backup increases the costs of Azure Files in the following ways: - - **Differential costs from Azure file share snapshots.** Azure Backup automates taking Azure file share snapshots on an administrator-defined schedule. Snapshots are always differential; however, the additional cost added to the total bill depends on the length of time snapshots are kept and the amount of churn on the file share during that time, because that dictates how different the snapshot is from the live file share and therefore how much additional data is stored by Azure Files. + - **Differential costs from Azure file share snapshots.** Azure Backup automates taking Azure file share snapshots on an administrator-defined schedule. Snapshots are always differential; however, the additional cost added to the total bill depends on the length of time snapshots are kept and the amount of churn on the file share during that time. This dictates how different the snapshot is from the live file share and therefore how much additional data is stored by Azure Files. - **Transaction costs from restore operations.** Restore operations from the snapshot to the live share will cause transactions. For standard file shares, this means that reads from snapshots/writes from restores will be billed as normal file share transactions. For premium file shares, these operations are counted against the provisioned IOPS for the file share. @@ -240,7 +240,7 @@ Microsoft Defender provides support for Azure Files as part of its Microsoft Def Microsoft Defender for Storage does not support antivirus capabilities for Azure file shares. -The main cost from Microsoft Defender for Storage is an additional set of transaction costs that the product levies on top of the transactions that are done against the Azure file share. Although these costs are based on the transactions incurred in Azure Files, they are not part of the billing for Azure Files, but rather are part of the Microsoft Defender pricing. Microsoft Defender for Storage charges a transaction rate even on premium file shares, where Azure Files includes transactions as part of IOPS provisioning. The current transaction rate can be found on [Microsoft Defender for Cloud pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/) under the *Microsoft Defender for Storage* table row. +The main cost from Microsoft Defender for Storage is an additional set of transaction costs that the product levies on top of the transactions that are done against the Azure file share. Although these costs are based on the transactions incurred in Azure Files, they aren't part of the billing for Azure Files, but rather are part of the Microsoft Defender pricing. Microsoft Defender for Storage charges a transaction rate even on premium file shares, where Azure Files includes transactions as part of IOPS provisioning. The current transaction rate can be found on [Microsoft Defender for Cloud pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/) under the *Microsoft Defender for Storage* table row. Transaction heavy file shares will incur significant costs using Microsoft Defender for Storage. Based on these costs, you may wish to opt-out of Microsoft Defender for Storage for specific storage accounts. For more information, see [Exclude a storage account from Microsoft Defender for Storage protections](../../defender-for-cloud/defender-for-storage-exclude.md). diff --git a/articles/storage/queues/TOC.yml b/articles/storage/queues/TOC.yml index 43652d560e459..15a012a880b16 100644 --- a/articles/storage/queues/TOC.yml +++ b/articles/storage/queues/TOC.yml @@ -315,7 +315,7 @@ - name: Azure Storage client library version 12.x href: https://github.com/Azure/azure-sdk-for-java/tree/master/sdk/storage - name: Blobs, Queues, and Files (version 8) - href: /java/api/overview/azure/storage?view=azure-java-legacy&preserve-view=true + href: /java/api/overview/azure/storage - name: Azure Storage client library version 8.x and earlier href: https://github.com/Azure/azure-storage-java - name: Node.js diff --git a/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md b/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md index bc7214c8dbb33..fd5d052315b82 100644 --- a/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md +++ b/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md @@ -167,4 +167,4 @@ Now that you've learned the basics of Queue Storage, follow these links to learn - Visit the [Azure Storage team blog](/archive/blogs/windowsazurestorage/) - Visit the [Azure SDK for Ruby](https://github.com/WindowsAzure/azure-sdk-for-ruby) repository on GitHub -For a comparison between Azure Queue Storage discussed in this article and Azure Service Bus queues discussed in [How to use Service Bus queues](/azure/service-bus-messaging/service-bus-quickstart-portal), see [Azure Queue Storage and Service Bus queues - compared and contrasted](../../service-bus-messaging/service-bus-azure-and-service-bus-queues-compared-contrasted.md) +For a comparison between Azure Queue Storage discussed in this article and Azure Service Bus queues discussed in [How to use Service Bus queues](../../service-bus-messaging/service-bus-quickstart-portal.md), see [Azure Queue Storage and Service Bus queues - compared and contrasted](../../service-bus-messaging/service-bus-azure-and-service-bus-queues-compared-contrasted.md) \ No newline at end of file diff --git a/articles/storage/solution-integration/TOC.yml b/articles/storage/solution-integration/TOC.yml index 4254f73605c41..6a2326a03451b 100644 --- a/articles/storage/solution-integration/TOC.yml +++ b/articles/storage/solution-integration/TOC.yml @@ -1,4 +1,4 @@ -- name: Azure Storage validated partner solutions +- name: Azure Storage partner solutions href: ./index.yml - name: Analytics and big data partners items: diff --git a/articles/storage/solution-integration/index.yml b/articles/storage/solution-integration/index.yml index 39975a729b450..0f0af26c81b2c 100644 --- a/articles/storage/solution-integration/index.yml +++ b/articles/storage/solution-integration/index.yml @@ -1,11 +1,10 @@ ### YamlMime:Landing -title: Azure Storage validated partner solutions documentation # < 60 chars -summary: Azure Storage validated partner solutions enable advanced scenarios on top of Azure Storage. # < 160 chars - +title: Azure Storage solutions from Microsoft partners # < 60 chars +summary: Learn about the many available Microsoft partner solutions that use Azure Storage to enable advanced scenarios. # < 160 chars metadata: - title: Azure Storage validated partner solutions # Required; page title displayed in search results. Include the brand. < 60 chars. - description: Azure Storage validated partner solutions enable advanced scenarios on top of Azure Storage. # Required; article description that is displayed in search results. < 160 chars. + title: Azure Storage solutions from Microsoft partners # Required; page title displayed in search results. Include the brand. < 60 chars. + description: Learn about the many available Microsoft partner solutions that use Azure Storage to enable advanced scenarios. # Required; article description that is displayed in search results. < 160 chars. services: storage ms.service: storage #Required; service per approved list. Service slug assigned to your service by ACOM. ms.subservice: partner @@ -13,7 +12,8 @@ metadata: ms.collection: collection author: beber-msft ms.author: beber - ms.date: 03/15/2021 #Required; mm/dd/yyyy format. + ms.date: 06/09/2022 #Required; mm/dd/yyyy format. + ms.custom: kr2b-contr-experiment # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new @@ -21,18 +21,18 @@ landingContent: # Cards and links should be based on top customer tasks or top subjects # Start card title with a verb # Card - - title: Analytics and big data partners + - title: Analytics and big data partner solutions linkLists: - linkListType: overview links: - - text: Validated analytics partners + - text: Overview of analytics partner solutions url: .\validated-partners\analytics\partner-overview.md # Card - - title: Archive, backup and BCDR partners + - title: Archive, backup, and BCDR partner solutions linkLists: - linkListType: overview links: - - text: Validated archive and BCDR partners + - text: Overview of archive and BCDR partner solutions url: .\validated-partners\backup-archive-disaster-recovery\partner-overview.md - linkListType: get-started links: @@ -45,18 +45,18 @@ landingContent: - text: Tiger Bridge archiving with CDP and DR url: .\validated-partners\backup-archive-disaster-recovery\tiger-bridge-cdp-guide.md # Card - - title: Container solution partners + - title: Partner container solutions linkLists: - linkListType: overview links: - - text: Validated container solution partners + - text: Overview of partner container solutions url: .\validated-partners\container-solutions\partner-overview.md # Card - - title: Data management and migration partners + - title: Data management and migration partner solutions linkLists: - linkListType: overview links: - - text: Validated data management and migration partners + - text: Overview of data management and migration partner solutions url: .\validated-partners\data-management\partner-overview.md - text: Migration tools comparison url: ./validated-partners/data-management/migration-tools-comparison.md @@ -73,11 +73,11 @@ landingContent: - text: Cirrus Data getting started guide url: ./validated-partners/data-management/cirrus-data-migration-guide.md # Card - - title: Primary and secondary storage partners + - title: Primary and secondary storage partner solutions linkLists: - linkListType: overview links: - - text: Validated partners for primary and secondary storage + - text: Overview of primary and secondary storage partner solutions url: .\validated-partners\primary-secondary-storage\partner-overview.md - text: ISV file services url: .\validated-partners\primary-secondary-storage\isv-file-services.md diff --git a/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md b/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md index 3b1b69c352a6a..c3c1d049edae0 100644 --- a/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md +++ b/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md @@ -12,7 +12,7 @@ ms.subservice: partner # Tiger Bridge archiving with continuous data protection and disaster recovery -This article will guide you to set up Tiger Bridge data management system with Azure Blob Storage. Tiger Bridge Continuous data protection (CDP) integrates with [Soft Delete](/azure/storage/blobs/soft-delete-blob-overview) and [Versioning](/azure/storage/blobs/versioning-overview) to achieve a complete Continuous Data Protection solution. It applies policies to move data between [Azure Blob tiers](/azure/storage/blobs/access-tiers-overview) for optimal cost. Continuous data protection allows customers to have a real-time file-based backup with snapshots to achieve near zero RPO. CDP enables customers to protect their assets with minimum resources. Optionally, it can be used in WORM scenario using [immutable storage](/azure/storage/blobs/immutable-storage-overview). +This article will guide you to set up Tiger Bridge data management system with Azure Blob Storage. Tiger Bridge Continuous data protection (CDP) integrates with [Soft Delete](../../../blobs/soft-delete-blob-overview.md) and [Versioning](../../../blobs/versioning-overview.md) to achieve a complete Continuous Data Protection solution. It applies policies to move data between [Azure Blob tiers](../../../blobs/access-tiers-overview.md) for optimal cost. Continuous data protection allows customers to have a real-time file-based backup with snapshots to achieve near zero RPO. CDP enables customers to protect their assets with minimum resources. Optionally, it can be used in WORM scenario using [immutable storage](../../../blobs/immutable-storage-overview.md). In addition, Tiger Bridge provides easy and efficient Disaster Recovery. It can be combined with [Microsoft DFSR](/windows-server/storage/dfs-replication/dfsr-overview), but it isn't mandatory. It allows mirrored DR sites, or can be used with minimum storage DR sites (keeping only the most recent data on-prem plus). All the replicated files in Azure Blob Storage are stored as native objects, allowing the organization to access them without using Tiger Bridge. This approach prevents vendor locking. @@ -20,15 +20,15 @@ All the replicated files in Azure Blob Storage are stored as native objects, all :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-reference-architecture.png" alt-text="Tiger Bridge reference architecture."::: -More information on Tiger Bridge solution, and common use case can be read in [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide). +More information on Tiger Bridge solution, and common use case can be read in [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md). ## Before you begin -- **Refer to [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide)**, it describes initial steps needed for setting up CDP. +- **Refer to [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md)**, it describes initial steps needed for setting up CDP. - **Choose the right storage options**. When you use Azure as a backup target, you'll make use of [Azure Blob storage](https://azure.microsoft.com/services/storage/blobs/). Blob storage is optimized for storing massive amounts of unstructured data, which is data that doesn't adhere to any data model, or definition. It's durable, highly available, secure, and scalable. You can select the right storage for your workload by looking at two aspects: - - [Storage redundancy](/azure/storage/common/storage-redundancy) - - [Storage tier](/azure/storage/blobs/access-tiers-overview) + - [Storage redundancy](../../../common/storage-redundancy.md) + - [Storage tier](../../../blobs/access-tiers-overview.md) ### Sample backup to Azure cost model Subscription based model can be daunting to customers who are new to the cloud. While you pay for only the capacity used, you do also pay for transactions (read and write), and egress for data read back to your on-premises environment (depending on the network connection used). We recommend using the [Azure Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) to perform what-if analysis. You can base the analysis on list pricing or on Azure Storage Reserved Capacity pricing, which can deliver up to 38% savings. Below is an example pricing exercise to model the monthly cost of backing up to Azure. @@ -44,13 +44,13 @@ Subscription based model can be daunting to customers who are new to the cloud. > This is only an example. Your pricing may vary due to activities not captured here. Estimate was generated with Azure Pricing Calculator using East US Pay-as-you-go pricing. It is based on a 32 MB block size which generates 65,536 PUT Requests (write transactions), per day. This example may not reflect current Azure pricing, or not be applicable towards your requirements. ## Prepare Azure Blob Storage -Refer to [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide) +Refer to [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md) ## Deploy Tiger Bridge Before you can install Tiger Bridge, you need to have a Windows file server installed, and fully functional. Windows server must have access to the storage account prepare in [previous step](#prepare-azure-blob-storage). ## Configure continuous data protection -1. Deploy Tiger Bridge solution as described in [standalone hybrid configuration](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide#deploy-standalone-hybrid-configuration) (steps 1 to 4). +1. Deploy Tiger Bridge solution as described in [standalone hybrid configuration](../primary-secondary-storage/tiger-bridge-deployment-guide.md#deploy-standalone-hybrid-configuration) (steps 1 to 4). 1. Under Tiger Bridge settings, enable **Delete replica when source file is removed** and **Keep replica versions** :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-settings.png" alt-text="Screenshot that shows how to enable settings for CDP."::: 1. Set versioning policy either **By Age** or **By Count** @@ -71,7 +71,7 @@ Tiger Bridge can move a replicated file between Azure Blob Storage tiers to opti :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-pair-account.png" alt-text="Screenshot that shows how to pair a storage account with local source."::: - Change **Default access tier** to **Archive**. You can also select a default **[Rehydration priority](/azure/storage/blobs/archive-rehydrate-to-online-tier)**. + Change **Default access tier** to **Archive**. You can also select a default **[Rehydration priority](../../../blobs/archive-rehydrate-to-online-tier.md)**. :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-change-access-tier.png" alt-text="Screenshot that shows how to change a default access tier in Tiger Bridge Configuration."::: @@ -93,7 +93,7 @@ Tiger Bridge can be configured in Disaster Recovery mode. Typical configuration :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-dr-active-passive.png" alt-text="Architecture for Tiger Bridge in active - passive DR configuration."::: -1. Deploy and setup Tiger Bridge server on the primary and secondary site as instructed in [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide#deploy-standalone-hybrid-configuration) for standalone hybrid configuration +1. Deploy and setup Tiger Bridge server on the primary and secondary site as instructed in [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md#deploy-standalone-hybrid-configuration) for standalone hybrid configuration > [!NOTE] > Both Tiger Bridge servers on primary and secondary site must be connected to the same container and storage account. diff --git a/articles/storsimple/storsimple-8000-aad-registration-key.md b/articles/storsimple/storsimple-8000-aad-registration-key.md index 56d02c7510a5c..cd5777717ff17 100644 --- a/articles/storsimple/storsimple-8000-aad-registration-key.md +++ b/articles/storsimple/storsimple-8000-aad-registration-key.md @@ -49,7 +49,7 @@ If using a StorSimple 8000 series device, use the following table to determine w | If your device is running| Take the following action | |--------------------------|------------------------| -| Update 5.0 or earlier and the device is offline. | Transport Layer Security (TLS) 1.2 is being enforced by the StorSimple Device Manager service.
                    Install Update 5.1 (or higher):
                    1. [Connect to Windows PowerShell on the StorSimple 8000 series device](storsimple-8000-deployment-walkthrough-u2.md#use-putty-to-connect-to-the-device-serial-console), or connect directly to the appliance via serial cable.
                    2. Use [Start-HcsUpdate](/powershell/module/hcs/start-hcsupdate?view=winserver2012r2-ps) to update the device. For steps, see [Install regular updates via Windows PowerShell](storsimple-update-device.md#to-install-regular-updates-via-windows-powershell-for-storsimple). This update is non-disruptive.
                    3. If `Start-HcsUpdate` doesn’t work because of firewall issues, [install Update 5.1 (or higher) via the hotfix method](storsimple-8000-install-update-51.md#install-update-51-as-a-hotfix).
                    | +| Update 5.0 or earlier and the device is offline. | Transport Layer Security (TLS) 1.2 is being enforced by the StorSimple Device Manager service.
                    Install Update 5.1 (or higher):
                    1. [Connect to Windows PowerShell on the StorSimple 8000 series device](storsimple-8000-deployment-walkthrough-u2.md#use-putty-to-connect-to-the-device-serial-console), or connect directly to the appliance via serial cable.
                    2. Use [Start-HcsUpdate](/powershell/module/hcs/start-hcsupdate?view=winserver2012r2-ps&preserve-view=true) to update the device. For steps, see [Install regular updates via Windows PowerShell](storsimple-update-device.md#to-install-regular-updates-via-windows-powershell-for-storsimple). This update is non-disruptive.
                    3. If `Start-HcsUpdate` doesn’t work because of firewall issues, [install Update 5.1 (or higher) via the hotfix method](storsimple-8000-install-update-51.md#install-update-51-as-a-hotfix).
                    | | Update 5 or later and the device is offline.
                    You see an alert that the URL is not approved.|
                    1. Modify the firewall rules to include the authentication URL. See [authentication URLs](#url-changes-for-azure-ad-authentication).
                    2. [Get the Azure AD registration key from the service](#azure-ad-based-registration-keys).
                    3. [Connect to the Windows PowerShell interface of the StorSimple 8000 series device](storsimple-8000-deployment-walkthrough-u2.md#use-putty-to-connect-to-the-device-serial-console).
                    4. Use `Redo-DeviceRegistration` cmdlet to register the device through the Windows PowerShell. Supply the key you got in the previous step.
                    | | Update 4 or earlier and the device is offline. |
                    1. Modify the firewall rules to include the authentication URL.
                    2. [Download Update 5 through catalog server](storsimple-8000-install-update-5.md#download-updates-for-your-device).
                    3. [Apply Update 5 through the hotfix method](storsimple-8000-install-update-5.md#install-update-5-as-a-hotfix).
                    4. [Get the Azure AD registration key from the service](#azure-ad-based-registration-keys).
                    5. [Connect to the Windows PowerShell interface of the StorSimple 8000 series device](storsimple-8000-deployment-walkthrough-u2.md#use-putty-to-connect-to-the-device-serial-console).
                    6. Use `Redo-DeviceRegistration` cmdlet to register the device through the Windows PowerShell. Supply the key you got in the previous step.
                    | | Update 4 or earlier and the device is online. |Modify the firewall rules to include the authentication URL.
                    Install Update 5 through the Azure portal. | diff --git a/articles/storsimple/storsimple-8000-choose-storage-solution.md b/articles/storsimple/storsimple-8000-choose-storage-solution.md index 864e1c1219a95..68d1dc75d1bd3 100644 --- a/articles/storsimple/storsimple-8000-choose-storage-solution.md +++ b/articles/storsimple/storsimple-8000-choose-storage-solution.md @@ -1,6 +1,6 @@ --- title: Options for data transfer to Azure using an appliance | Microsoft Docs -description: Learn how to choose the right appliance for on-premises data transfer to Azure between Data Box Edge, Azure File Sync, and StorSimple 8000 series. +description: Learn how to choose the right appliance for on-premises data transfer to Azure between Azure Stack Edge, Azure File Sync, and StorSimple 8000 series. services: storsimple author: alkohli @@ -10,19 +10,19 @@ ms.date: 04/01/2019 ms.author: alkohli --- -# Compare StorSimple with Azure File Sync and Data Box Edge data transfer options +# Compare StorSimple with Azure File Sync and Azure Stack Edge data transfer options [!INCLUDE [storsimple-8000-eol-banner](../../includes/storsimple-8000-eol-banner.md)] -This document provides an overview of options for on-premises data transfer to Azure, comparing: Data Box Edge vs. Azure File Sync vs. StorSimple 8000 series. +This document provides an overview of options for on-premises data transfer to Azure, comparing: Azure Stack Edge vs. Azure File Sync vs. StorSimple 8000 series. -- **[Data Box Edge](../databox-online/azure-stack-edge-overview.md)** – Data Box Edge is an on-premises network device that moves data into and out of Azure and has AI-enabled Edge compute to pre-process data during upload. Data Box Gateway is a virtual version of the device with the same data transfer capabilities. +- **[Azure Stack Edge](../databox-online/azure-stack-edge-overview.md)** – Azure Stack Edge is an on-premises network device that moves data into and out of Azure and has AI-enabled Edge compute to pre-process data during upload. Data Box Gateway is a virtual version of the device with the same data transfer capabilities. - **[Azure File Sync](../storage/file-sync/file-sync-deployment-guide.md)** – Azure File Sync can be used to centralize your organization's file shares in Azure Files, while keeping the flexibility, performance, and compatibility of an on-premises file server. Azure File Sync transforms Windows Server into a quick cache of your Azure file share. General availability of Azure File Sync was announced earlier in 2018. - **[StorSimple](./storsimple-overview.md)** – StorSimple is a hybrid device that helps enterprises consolidate their storage infrastructure for primary storage, data protection, archiving, and disaster recovery on a single solution by tightly integrating with Azure storage. The product lifecycle for StorSimple can be found [here](https://support.microsoft.com/lifecycle/search?alpha=Azure%20StorSimple%208000%20Series). ## Comparison summary -| |StorSimple 8000 |Azure File Sync |Data Box Edge | +| |StorSimple 8000 |Azure File Sync |Azure Stack Edge | |---------------------------|----------------------------------------|-------------------------------|-----------------------------------------| |**Overview** |Tiered hybrid storage and archival|General file server storage with cloud tiering and multi-site sync. |Storage solution to pre-process data and send it over network to Azure. | |**Scenarios** |File server, archival, backup target |File server, archival (multi-site) |Data transfer, data pre-processing including ML inferencing, IoT, archival | @@ -36,4 +36,4 @@ This document provides an overview of options for on-premises data transfer to A ## Next steps - Learn about [Azure Data Box Edge](../databox-online/azure-stack-edge-overview.md) and [Azure Data Box Gateway](../databox-gateway/data-box-gateway-overview.md) -- Learn about [Azure File Sync](../storage/file-sync/file-sync-deployment-guide.md) \ No newline at end of file +- Learn about [Azure File Sync](../storage/file-sync/file-sync-deployment-guide.md) diff --git a/articles/stream-analytics/TOC.yml b/articles/stream-analytics/TOC.yml index f988d3d5fc712..21690c9b2845e 100644 --- a/articles/stream-analytics/TOC.yml +++ b/articles/stream-analytics/TOC.yml @@ -15,6 +15,9 @@ href: stream-analytics-quick-create-portal.md - name: Azure CLI href: quick-create-azure-cli.md + - name: Bicep + displayName: ARM, Resource Manager, Template + href: quick-create-bicep.md - name: ARM template displayName: Resource Manager href: quick-create-azure-resource-manager.md diff --git a/articles/stream-analytics/blob-storage-azure-data-lake-gen2-output.md b/articles/stream-analytics/blob-storage-azure-data-lake-gen2-output.md index 11ffccd58c265..a936e1554f48b 100644 --- a/articles/stream-analytics/blob-storage-azure-data-lake-gen2-output.md +++ b/articles/stream-analytics/blob-storage-azure-data-lake-gen2-output.md @@ -5,7 +5,7 @@ author: enkrumah ms.author: ebnkruma ms.service: stream-analytics ms.topic: conceptual -ms.date: 12/15/2021 +ms.date: 06/06/2022 --- # Blob storage and Azure Data Lake Gen2 output from Azure Stream Analytics @@ -14,6 +14,9 @@ Data Lake Storage Gen2 makes Azure Storage the foundation for building enterpris Azure Blob storage offers a cost-effective and scalable solution for storing large amounts of unstructured data in the cloud. For an introduction on Blob storage and its usage, see [Upload, download, and list blobs with the Azure portal](../storage/blobs/storage-quickstart-blobs-portal.md). +>[!NOTE] +> For details on the behaviors specific to the AVRO and Parquet formats, see the related sections in the [overview](stream-analytics-define-outputs.md). + ## Output configuration The following table lists the property names and their descriptions for creating a blob or ADLS Gen2 output. diff --git a/articles/stream-analytics/capture-event-hub-data-parquet.md b/articles/stream-analytics/capture-event-hub-data-parquet.md index 982b79db89c18..ca00316221c9c 100644 --- a/articles/stream-analytics/capture-event-hub-data-parquet.md +++ b/articles/stream-analytics/capture-event-hub-data-parquet.md @@ -6,7 +6,7 @@ ms.author: sidram ms.service: stream-analytics ms.topic: how-to ms.custom: mvc, event-tier1-build-2022 -ms.date: 05/08/2022 +ms.date: 05/24/2022 --- # Capture data from Event Hubs in Parquet format @@ -21,10 +21,10 @@ This article explains how to use the no code editor to automatically capture str Use the following steps to configure a Stream Analytics job to capture data in Azure Data Lake Storage Gen2. -1. In the Azure portal, locate and select your Azure Event Hubs instance. -1. Select **Features** > **Process Data**, and select **Start** on the **Capture data to Azure Data Lake Storage Gen2 in Parquet format** card. +1. In the Azure portal, navigate to your event hub. +1. Select **Features** > **Process Data**, and select **Start** on the **Capture data to ADLS Gen2 in Parquet format** card. :::image type="content" source="./media/capture-event-hub-data-parquet/process-event-hub-data-cards.png" alt-text="Screenshot showing the Process Event Hubs data start cards." lightbox="./media/capture-event-hub-data-parquet/process-event-hub-data-cards.png" ::: -1. Enter a name to identify your Stream Analytics job. Select **Create**. +1. Enter a **name** to identify your Stream Analytics job. Select **Create**. :::image type="content" source="./media/capture-event-hub-data-parquet/new-stream-analytics-job-name.png" alt-text="Screenshot showing the New Stream Analytics job window where you enter the job name." lightbox="./media/capture-event-hub-data-parquet/new-stream-analytics-job-name.png" ::: 1. Specify the **Serialization** type of your data in the Event Hubs and the **Authentication method** that the job will use to connect to Event Hubs. Then select **Connect**. :::image type="content" source="./media/capture-event-hub-data-parquet/event-hub-configuration.png" alt-text="Screenshot showing the Event Hubs connection configuration." lightbox="./media/capture-event-hub-data-parquet/event-hub-configuration.png" ::: @@ -32,23 +32,34 @@ Use the following steps to configure a Stream Analytics job to capture data in A - Fields that are present in the input data. You can choose **Add field** or you can select the three dot symbol next to a field to optionally remove, rename, or change its name. - A live sample of incoming data in the **Data preview** table under the diagram view. It refreshes periodically. You can select **Pause streaming preview** to view a static view of the sample input. :::image type="content" source="./media/capture-event-hub-data-parquet/edit-fields.png" alt-text="Screenshot showing sample data under Data Preview." lightbox="./media/capture-event-hub-data-parquet/edit-fields.png" ::: -1. Select the **Streaming blob** tile to edit the configuration. +1. Select the **Azure Data Lake Storage Gen2** tile to edit the configuration. +1. On the **Azure Data Lake Storage Gen2** configuration page, follow these steps: 1. Select the subscription, storage account name and container from the drop-down menu. 1. Once the subscription is selected, the authentication method and storage account key should be automatically filled in. 1. For streaming blobs, the directory path pattern is expected to be a dynamic value. It's required for the date to be a part of the file path for the blob – referenced as `{date}`. To learn about custom path patterns, see to [Azure Stream Analytics custom blob output partitioning](stream-analytics-custom-path-patterns-blob-storage-output.md). :::image type="content" source="./media/capture-event-hub-data-parquet/blob-configuration.png" alt-text="First screenshot showing the Blob window where you edit a blob's connection configuration." lightbox="./media/capture-event-hub-data-parquet/blob-configuration.png" ::: 1. Select **Connect** -1. When the connection is established, you will see fields that are present in the output data. -1. Select **Save** to save your configuration. -1. Select **Start** to start the streaming flow to capture data. Then in the Start Stream Analytics job window: +1. When the connection is established, you'll see fields that are present in the output data. +1. Select **Save** on the command bar to save your configuration. +1. Select **Start** on the command bar to start the streaming flow to capture data. Then in the Start Stream Analytics job window: 1. Choose the output start time. 1. Select the number of Streaming Units (SU) that the job runs with. SU represents the computing resources that are allocated to execute a Stream Analytics job. For more information, see [Streaming Units in Azure Stream Analytics](stream-analytics-streaming-unit-consumption.md). 1. In the **Choose Output data error handling** list, select the behavior you want when the output of the job fails due to data error. Select **Retry** to have the job retry until it writes successfully or select another option. :::image type="content" source="./media/capture-event-hub-data-parquet/start-job.png" alt-text="Screenshot showing the Start Stream Analytics job window where you set the output start time, streaming units, and error handling." lightbox="./media/capture-event-hub-data-parquet/start-job.png" ::: +## Verify output +Verify that the Parquet files are generated in the Azure Data Lake Storage container. + +:::image type="content" source="./media/capture-event-hub-data-parquet/verify-captured-data.png" alt-text="Screenshot showing the generated Parquet files in the ADLS container." lightbox="./media/capture-event-hub-data-parquet/verify-captured-data.png" ::: + + The new job is shown on the **Stream Analytics jobs** tab. Select **Open metrics** to monitor it. -:::image type="content" source="./media/capture-event-hub-data-parquet/stream-analytics-jobs-list.png" alt-text="Screenshot showing the Stream Analytics jobs list where you monitor job status." lightbox="./media/capture-event-hub-data-parquet/stream-analytics-jobs-list.png" ::: +:::image type="content" source="./media/capture-event-hub-data-parquet/open-metrics-link.png" alt-text="Screenshot showing Open Metrics link selected." lightbox="./media/capture-event-hub-data-parquet/open-metrics-link.png" ::: + +Here's an example screenshot of metrics showing input and output events. + +:::image type="content" source="./media/capture-event-hub-data-parquet/job-metrics.png" alt-text="Screenshot showing metrics of the Stream Analytics job." lightbox="./media/capture-event-hub-data-parquet/job-metrics.png" ::: ## Next steps diff --git a/articles/stream-analytics/data-error-codes.md b/articles/stream-analytics/data-error-codes.md index 53d4b041daf82..807d467a4d029 100644 --- a/articles/stream-analytics/data-error-codes.md +++ b/articles/stream-analytics/data-error-codes.md @@ -1,16 +1,17 @@ --- title: Data error codes - Azure Stream Analytics -description: Troubleshoot Azure Stream Analytics issues with data error codes. +description: Troubleshoot Azure Stream Analytics issues with data error codes, which occur when there's bad data in the stream. ms.author: sidram author: sidramadoss ms.topic: troubleshooting -ms.date: 05/07/2020 +ms.date: 05/25/2022 ms.service: stream-analytics +ms.custom: kr2b-contr-experiment --- # Azure Stream Analytics data error codes -You can use activity logs and resource logs to help debug unexpected behaviors from your Azure Stream Analytics job. This article lists the description for every data error error code. Data errors occur when there is bad data in the stream, such as an unexpected record schema. +You can use activity logs and resource logs to help debug unexpected behaviors from your Azure Stream Analytics job. This article lists the description for every data error code. Data errors occur when there's bad data in the stream, such as an unexpected record schema. ## InputDeserializationError @@ -18,11 +19,11 @@ You can use activity logs and resource logs to help debug unexpected behaviors f ## InputEventTimestampNotFound -* **Cause**: Stream Analytics is unable to get a timestamp for resource. +* **Cause**: Stream Analytics is unable to get a timestamp for a resource. ## InputEventTimestampByOverValueNotFound -* **Cause**: Stream Analytics is unable to get value of `TIMESTAMP BY OVER COLUMN`. +* **Cause**: Stream Analytics is unable to get the value of `TIMESTAMP BY OVER COLUMN`. ## InputEventLateBeyondThreshold @@ -38,7 +39,7 @@ You can use activity logs and resource logs to help debug unexpected behaviors f ## EventHubOutputRecordExceedsSizeLimit -* **Cause**: An output record exceeds the maximum size limit when writing to Event Hub. +* **Cause**: An output record exceeds the maximum size limit when writing to Azure Event Hubs. ## CosmosDBOutputInvalidId @@ -51,24 +52,24 @@ You can use activity logs and resource logs to help debug unexpected behaviors f ## CosmosDBOutputMissingId -* **Cause**: The output record doesn't contain the column \[id] to use as the primary key property. +* **Cause**: The output record doesn't contain the column `[id]` to use as the primary key property. ## CosmosDBOutputMissingIdColumn * **Cause**: The output record doesn't contain the Document ID property. -* **Recommendation**: Ensure the query output contains the column with a unique non-empty string less than '255' characters. +* **Recommendation**: Ensure the query output contains the column with a unique non-empty string of no more than 255 characters. ## CosmosDBOutputMissingPartitionKey -* **Cause**: The output record is missing the a column to use as the partition key property. +* **Cause**: The output record is missing a column to use as the partition key property. ## CosmosDBOutputSingleRecordTooLarge -* **Cause**: A single record write to Cosmos DB is too large. +* **Cause**: A single record write to Azure Cosmos DB is too large. ## SQLDatabaseOutputDataError -* **Cause**: Stream Analytics can't write event(s) to SQL Database due to issues in the data. +* **Cause**: Stream Analytics can't write event(s) to Azure SQL Database due to issues in the data. ## Next steps diff --git a/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md b/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md index 97c44c6b0953a..79fdc98cb9875 100644 --- a/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md +++ b/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md @@ -5,7 +5,7 @@ author: sidramadoss ms.author: sidram ms.service: stream-analytics ms.topic: how-to -ms.date: 05/23/2022 +ms.date: 05/25/2022 ms.custom: seodec18 --- @@ -27,40 +27,54 @@ In this tutorial, you learn how to: Before you start, make sure you've completed the following steps: * If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/). -* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this. +* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this step. * Create an [Azure Synapse Analytics workspace](../synapse-analytics/get-started-create-workspace.md) with a Data Lake Storage Gen2 account. ## Use no code editor to create a Stream Analytics job 1. Locate the Resource Group in which the TollApp event generator was deployed. -2. Select the Azure Event Hubs namespace. And then under the Event Hubs section, select **entrystream** instance. -3. Go to **Process data** under Features section and then click **start** on the Capture in parquet format template. -[ ![Screenshot of start capture experience from process data blade.](./media/stream-analytics-no-code/parquet-capture-start.png) ](./media/stream-analytics-no-code/parquet-capture-start.png#lightbox) -4. Name your job **parquetcapture** and select **Create**. -5. Configure your event hub input by specifying - * Consumer Group: Default - * Serialization type of your input data: JSON - * Authentication mode that the job will use to connect to your event hub: Connection String defaults - * Click **Connect** -6. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type. -[![Screenshot of event hub data and schema in no code editor.](./media/stream-analytics-no-code/event-hub-data-preview.png)](./media/stream-analytics-no-code/event-hub-data-preview.png#lightbox) -7. Click the Azure Data Lake Storage Gen2 tile on your canvas and configure it by specifying +2. Select the Azure Event Hubs **namespace**. +1. On the **Event Hubs Namespace** page, select **Event Hubs** under **Entities** on the left menu. +1. Select **entrystream** instance. + + :::image type="content" source="./media/stream-analytics-no-code/select-event-hub.png" alt-text="Screenshot showing the selection of the event hub." lightbox="./media/stream-analytics-no-code/select-event-hub.png"::: +3. On the **Event Hubs instance** page, select **Process data** in the **Features** section on the left menu. +1. Select **Start** on the **Capture data to ADLS Gen2 in Parquet format** tile. + + :::image type="content" source="./media/stream-analytics-no-code/parquet-capture-start.png" alt-text="Screenshot showing the selection of the **Capture data to ADLS Gen2 in Parquet format** tile." lightbox="./media/stream-analytics-no-code/parquet-capture-start.png"::: +1. Name your job **parquetcapture** and select **Create**. + + :::image type="content" source="./media/stream-analytics-no-code/new-stream-analytics-job.png" alt-text="Screenshot of the New Stream Analytics job page." lightbox="./media/stream-analytics-no-code/new-stream-analytics-job.png"::: +1. On the **event hub** configuration page, confirm the following settings, and then select **Connect**. + - *Consumer Group*: Default + - *Serialization type* of your input data: JSON + - *Authentication mode* that the job will use to connect to your event hub: Connection string. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png" alt-text="Screenshot of the configuration page for your event hub." lightbox="./media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png"::: +1. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/data-preview.png" alt-text="Screenshot showing the fields and preview of data." lightbox="./media/event-hubs-parquet-capture-tutorial/data-preview.png"::: +1. Select the **Azure Data Lake Storage Gen2** tile on your canvas and configure it by specifying * Subscription where your Azure Data Lake Gen2 account is located in - * Storage account name which should be the same ADLS Gen2 account used with your Azure Synapse Analytics workspace done in the Prerequisites section. + * Storage account name, which should be the same ADLS Gen2 account used with your Azure Synapse Analytics workspace done in the Prerequisites section. * Container inside which the Parquet files will be created. * Path pattern set to *{date}/{time}* * Date and time pattern as the default *yyyy-mm-dd* and *HH*. - * Click **Connect** -8. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then Select **Start** to run your job. -[![Screenshot of start job in no code editor.](./media/stream-analytics-no-code/no-code-start-job.png)](./media/stream-analytics-no-code/no-code-start-job.png#lightbox) -9. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. -[![Screenshot of job in running state after job creation.](./media/stream-analytics-no-code/no-code-job-running-state.png)](./media/stream-analytics-no-code/no-code-job-running-state.png#lightbox) + * Select **Connect** + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png" alt-text="Screenshot showing the configuration settings for the Data Lake Storage." lightbox="./media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png"::: +1. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then Select **Start** to run your job. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/start-job.png" alt-text="Screenshot showing the Start Stream Analytics Job page." lightbox="./media/event-hubs-parquet-capture-tutorial/start-job.png"::: +1. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. Select the **Refresh** button on the page to see the status changing from Created -> Starting -> Running. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/job-list.png" alt-text="Screenshot showing the list of Stream Analytics jobs." lightbox="./media/event-hubs-parquet-capture-tutorial/job-list.png"::: ## View output in your Azure Data Lake Storage Gen 2 account 1. Locate the Azure Data Lake Storage Gen2 account you had used in the previous step. 2. Select the container you had used in the previous step. You'll see parquet files created based on the *{date}/{time}* path pattern used in the previous step. [![Screenshot of parquet files in Azure Data Lake Storage Gen 2.](./media/stream-analytics-no-code/capture-parquet-files.png)](./media/stream-analytics-no-code/capture-parquet-files.png#lightbox) -## Query event hub Capture files in Parquet format with Azure Synapse Analytics +## Query captured data in Parquet format with Azure Synapse Analytics ### Query using Azure Synapse Spark 1. Locate your Azure Synapse Analytics workspace and open Synapse Studio. 2. [Create a serverless Apache Spark pool](../synapse-analytics/get-started-analyze-spark.md#create-a-serverless-apache-spark-pool) in your workspace if one doesn't already exist. @@ -74,12 +88,13 @@ Before you start, make sure you've completed the following steps: df.printSchema() ``` 5. Select **Run All** to see the results -[![Screenshot of spark run results in Azure Synapse Analytics.](./media/stream-analytics-no-code/spark-run-all.png)](./media/stream-analytics-no-code/spark-run-all.png#lightbox) + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/spark-run-all.png" alt-text="Screenshot of spark run results in Azure Synapse Analytics." lightbox="./media/event-hubs-parquet-capture-tutorial/spark-run-all.png"::: ### Query using Azure Synapse Serverless SQL 1. In the **Develop** hub, create a new **SQL script**. -2. Paste the following script and **Run** it using the **Built-in** serverless SQL endpoint. Replace *container* and *adlsname* with the name of the container and ADLS Gen2 account used in the previous step. - ``SQL +2. Paste the following script and **Run** it using the **Built-in** serverless SQL endpoint. Replace *container* and *adlsname* with the name of the container and ADLS Gen2 account used in the previous step. + ```SQL SELECT TOP 100 * FROM @@ -88,7 +103,8 @@ Before you start, make sure you've completed the following steps: FORMAT='PARQUET' ) AS [result] ``` -[![Screenshot of SQL query results using Azure Synapse Analytics.](./media/stream-analytics-no-code/sql-results.png)](./media/stream-analytics-no-code/sql-results.png#lightbox) + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/sql-results.png" alt-text="Screenshot of SQL script results in Azure Synapse Analytics." lightbox="./media/event-hubs-parquet-capture-tutorial/sql-results.png"::: ## Clean up resources 1. Locate your Event Hubs instance and see the list of Stream Analytics jobs under **Process Data** section. Stop any jobs that are running. diff --git a/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md b/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md index 90a910f03620f..3776e18a888a4 100644 --- a/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md +++ b/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md @@ -6,7 +6,7 @@ ms.author: sidram ms.service: stream-analytics ms.topic: how-to ms.custom: mvc, event-tier1-build-2022 -ms.date: 05/08/2022 +ms.date: 05/24/2022 --- # Filter and ingest to Azure Data Lake Storage Gen2 using the Stream Analytics no code editor @@ -32,9 +32,9 @@ This article describes how you can use the no code editor to easily create a Str :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/add-field.png" alt-text="Screenshot showing where you can add a field or remove, rename, or change a field type." lightbox="./media/filter-ingest-data-lake-storage-gen2/add-field.png" ::: 1. A live sample of incoming data in **Data preview** table under the diagram view. It automatically refreshes periodically. You can select **Pause streaming preview** to see a static view of sample input data. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/sample-input.png" alt-text="Screenshot showing sample data on the Data preview tab." lightbox="./media/filter-ingest-data-lake-storage-gen2/sample-input.png" ::: -1. In the **Filter** area, select a field to filter the incoming data with a condition. +1. Select the **Filter** tile. In the **Filter** area, select a field to filter the incoming data with a condition. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/filter-data.png" alt-text="Screenshot showing the Filter area where you can add a conditional filter." lightbox="./media/filter-ingest-data-lake-storage-gen2/filter-data.png" ::: -1. Select the Azure Data Lake Gen2 table to send your filtered data: +1. Select the **Azure Data Lake Storage Gen2** tile. Select the **Azure Data Lake Gen2** account to send your filtered data: 1. Select the **subscription**, **storage account name**, and **container** from the drop-down menu. 1. After the **subscription** is selected, the **authentication method** and **storage account key** should be automatically filled in. Select **Connect**. For more information about the fields and to see examples of path pattern, see [Blob storage and Azure Data Lake Gen2 output from Azure Stream Analytics](blob-storage-azure-data-lake-gen2-output.md). @@ -47,10 +47,29 @@ This article describes how you can use the no code editor to easily create a Str 1. After your select **Start**, the job starts running within two minutes. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png" alt-text="Screenshot showing the Start Stream Analytics job window." lightbox="./media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png" ::: -You can see the job under the Process Data section in the **Stream Analytics jobs** tab. Select **Open metrics** to monitor it or stop and restart it, as needed. +You can see the job under the Process Data section in the **Stream Analytics jobs** tab. Select **Refresh** until you see the job status as **Running**. Select **Open metrics** to monitor it or stop and restart it, as needed. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png" alt-text="Screenshot showing the Stream Analytics jobs tab." lightbox="./media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png" ::: +Here's a sample **Metrics** page: + +:::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/metrics-page.png" alt-text="Screenshot showing the Metrics page." lightbox="./media/filter-ingest-data-lake-storage-gen2/metrics-page.png" ::: + + +## Verify data in Data Lake Storage + +1. You should see files created in the container you specified. + + :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png" alt-text="Screenshot showing the generated file with filtered data in the Azure Data Lake Storage." lightbox="./media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png" ::: +1. Download and open the file to confirm that you see only the filtered data. In the following example, you see data with **SwitchNum** set to **US**. + + ```json + {"RecordType":"MO","SystemIdentity":"d0","FileNum":"548","SwitchNum":"US","CallingNum":"345697969","CallingIMSI":"466921402416657","CalledNum":"012332886","CalledIMSI":"466923101048691","DateS":"20220524","TimeType":0,"CallPeriod":0,"ServiceType":"S","Transfer":0,"OutgoingTrunk":"419","MSRN":"1416960750071","callrecTime":"2022-05-25T02:07:10Z","EventProcessedUtcTime":"2022-05-25T02:07:50.5478116Z","PartitionId":0,"EventEnqueuedUtcTime":"2022-05-25T02:07:09.5140000Z", "TimeS":null,"CallingCellID":null,"CalledCellID":null,"IncomingTrunk":null,"CalledNum2":null,"FCIFlag":null} + {"RecordType":"MO","SystemIdentity":"d0","FileNum":"552","SwitchNum":"US","CallingNum":"012351287","CallingIMSI":"262021390056324","CalledNum":"012301973","CalledIMSI":"466922202613463","DateS":"20220524","TimeType":3,"CallPeriod":0,"ServiceType":"V","Transfer":0,"OutgoingTrunk":"442","MSRN":"886932428242","callrecTime":"2022-05-25T02:07:13Z","EventProcessedUtcTime":"2022-05-25T02:07:50.5478116Z","PartitionId":0,"EventEnqueuedUtcTime":"2022-05-25T02:07:12.7350000Z", "TimeS":null,"CallingCellID":null,"CalledCellID":null,"IncomingTrunk":null,"CalledNum2":null,"FCIFlag":null} + {"RecordType":"MO","SystemIdentity":"d0","FileNum":"559","SwitchNum":"US","CallingNum":"456757102","CallingIMSI":"466920401237309","CalledNum":"345617823","CalledIMSI":"466923000886460","DateS":"20220524","TimeType":1,"CallPeriod":696,"ServiceType":"V","Transfer":1,"OutgoingTrunk":"419","MSRN":"886932429155","callrecTime":"2022-05-25T02:07:22Z","EventProcessedUtcTime":"2022-05-25T02:07:50.5478116Z","PartitionId":0,"EventEnqueuedUtcTime":"2022-05-25T02:07:21.9190000Z", "TimeS":null,"CallingCellID":null,"CalledCellID":null,"IncomingTrunk":null,"CalledNum2":null,"FCIFlag":null} + ``` + + ## Next steps Learn more about Azure Stream Analytics and how to monitor the job you've created. diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/edit-fields.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/edit-fields.png index dd3d6aac6b7a0..f2076a405899e 100644 Binary files a/articles/stream-analytics/media/capture-event-hub-data-parquet/edit-fields.png and b/articles/stream-analytics/media/capture-event-hub-data-parquet/edit-fields.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/event-hub-configuration.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/event-hub-configuration.png index 030111a0dcebe..b3e74e3a2857e 100644 Binary files a/articles/stream-analytics/media/capture-event-hub-data-parquet/event-hub-configuration.png and b/articles/stream-analytics/media/capture-event-hub-data-parquet/event-hub-configuration.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/job-metrics.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/job-metrics.png new file mode 100644 index 0000000000000..a06e594111500 Binary files /dev/null and b/articles/stream-analytics/media/capture-event-hub-data-parquet/job-metrics.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/new-stream-analytics-job-name.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/new-stream-analytics-job-name.png index 881d5a850f3cd..429a5b0f536db 100644 Binary files a/articles/stream-analytics/media/capture-event-hub-data-parquet/new-stream-analytics-job-name.png and b/articles/stream-analytics/media/capture-event-hub-data-parquet/new-stream-analytics-job-name.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/open-metrics-link.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/open-metrics-link.png new file mode 100644 index 0000000000000..375760d955d55 Binary files /dev/null and b/articles/stream-analytics/media/capture-event-hub-data-parquet/open-metrics-link.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/process-event-hub-data-cards.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/process-event-hub-data-cards.png index 3593b0c53a78a..3ba8a3e83247c 100644 Binary files a/articles/stream-analytics/media/capture-event-hub-data-parquet/process-event-hub-data-cards.png and b/articles/stream-analytics/media/capture-event-hub-data-parquet/process-event-hub-data-cards.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/start-job.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/start-job.png index 2ae10fc00bd6c..f1c254ea02076 100644 Binary files a/articles/stream-analytics/media/capture-event-hub-data-parquet/start-job.png and b/articles/stream-analytics/media/capture-event-hub-data-parquet/start-job.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/verify-captured-data.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/verify-captured-data.png new file mode 100644 index 0000000000000..a792f7fa5c64c Binary files /dev/null and b/articles/stream-analytics/media/capture-event-hub-data-parquet/verify-captured-data.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png new file mode 100644 index 0000000000000..980c1df0f919b Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-preview.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-preview.png new file mode 100644 index 0000000000000..4f1f8328bd150 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-preview.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png new file mode 100644 index 0000000000000..32dcff68832d7 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/job-list.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/job-list.png new file mode 100644 index 0000000000000..82bcf91ff8a72 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/job-list.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/spark-run-all.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/spark-run-all.png new file mode 100644 index 0000000000000..88460c872e151 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/spark-run-all.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/sql-results.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/sql-results.png new file mode 100644 index 0000000000000..5e599a43b5985 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/sql-results.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/start-job.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/start-job.png new file mode 100644 index 0000000000000..3c691fd3ecc16 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/start-job.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png index 3ce3e26077ad4..0fd8ad03cc7d8 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png index ea0d0f603985a..b19b8e82936e8 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png index bdef08cfe4db5..2dfc0ec838827 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png index ff11425a56b2e..ee2f61ea5b03e 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png index 85ad98232961a..9a31ccb699185 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png index 8f4295df04f1c..5d6d2443581b6 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png new file mode 100644 index 0000000000000..2a7cc46e9d83f Binary files /dev/null and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/metrics-page.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/metrics-page.png new file mode 100644 index 0000000000000..e8aac3b958326 Binary files /dev/null and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/metrics-page.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png index 69c0424f984b9..f4175d854169b 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png index b7aa61f658136..7050f7e8fdd0b 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png index f897fc399a6a0..58c0fd59bae6d 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png index e841001f6a7bb..08ae35445d90c 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/add-make-field.png b/articles/stream-analytics/media/stream-analytics-no-code/add-make-field.png new file mode 100644 index 0000000000000..884ad7009b8ac Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/add-make-field.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/connect-group.png b/articles/stream-analytics/media/stream-analytics-no-code/connect-group.png new file mode 100644 index 0000000000000..d8dbee6b76112 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/connect-group.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/data-preview-fields.png b/articles/stream-analytics/media/stream-analytics-no-code/data-preview-fields.png new file mode 100644 index 0000000000000..eedd7397c0858 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/data-preview-fields.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/event-hub-configuration.png b/articles/stream-analytics/media/stream-analytics-no-code/event-hub-configuration.png new file mode 100644 index 0000000000000..f142d8fb78f96 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/event-hub-configuration.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/group-settings.png b/articles/stream-analytics/media/stream-analytics-no-code/group-settings.png new file mode 100644 index 0000000000000..c0b0311135cd7 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/group-settings.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/job-list.png b/articles/stream-analytics/media/stream-analytics-no-code/job-list.png new file mode 100644 index 0000000000000..4c5e1a3ae5c98 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/job-list.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/job-name.png b/articles/stream-analytics/media/stream-analytics-no-code/job-name.png new file mode 100644 index 0000000000000..655b0eb94dbdc Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/job-name.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/manage-fields-page.png b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields-page.png new file mode 100644 index 0000000000000..81f8cb3488217 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields-page.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/manage-fields.png b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields.png new file mode 100644 index 0000000000000..beab5eca313cb Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/new-stream-analytics-job.png b/articles/stream-analytics/media/stream-analytics-no-code/new-stream-analytics-job.png new file mode 100644 index 0000000000000..ad6729f42f67b Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/new-stream-analytics-job.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png b/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png index 2796451ee272b..11a8b3532c908 100644 Binary files a/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png and b/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png b/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png index 8694696f688f4..734f700a32eb2 100644 Binary files a/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png and b/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/rename-fields.png b/articles/stream-analytics/media/stream-analytics-no-code/rename-fields.png new file mode 100644 index 0000000000000..cbaf60d548948 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/rename-fields.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/select-event-hub.png b/articles/stream-analytics/media/stream-analytics-no-code/select-event-hub.png new file mode 100644 index 0000000000000..1b9563c89f4c3 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/select-event-hub.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/start-analytics-job.png b/articles/stream-analytics/media/stream-analytics-no-code/start-analytics-job.png new file mode 100644 index 0000000000000..339711b3430dd Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/start-analytics-job.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/start-blank-canvas.png b/articles/stream-analytics/media/stream-analytics-no-code/start-blank-canvas.png new file mode 100644 index 0000000000000..91006cd3aec6f Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/start-blank-canvas.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/synapse-settings.png b/articles/stream-analytics/media/stream-analytics-no-code/synapse-settings.png new file mode 100644 index 0000000000000..4b6a623811010 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/synapse-settings.png differ diff --git a/articles/stream-analytics/no-code-power-bi-tutorial.md b/articles/stream-analytics/no-code-power-bi-tutorial.md index c52030a336fa7..372f02d054f91 100644 --- a/articles/stream-analytics/no-code-power-bi-tutorial.md +++ b/articles/stream-analytics/no-code-power-bi-tutorial.md @@ -5,7 +5,7 @@ author: sidramadoss ms.author: sidram ms.service: stream-analytics ms.topic: how-to -ms.date: 05/23/2022 +ms.date: 05/25/2022 ms.custom: seodec18 --- @@ -28,9 +28,9 @@ In this tutorial, you learn how to: Before you start, make sure you've completed the following steps: * If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/). -* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this. +* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this step. * Create an [Azure Synapse Analytics workspace](../synapse-analytics/get-started-create-workspace.md) with a [Dedicated SQL pool](../synapse-analytics/get-started-analyze-sql-pool.md#create-a-dedicated-sql-pool). -* Create a table named **carsummary** using your Dedicated SQL pool. You can do this by running the following SQL script: +* Create a table named **carsummary** using your Dedicated SQL pool. You can do it by running the following SQL script: ```SQL CREATE TABLE carsummary ( @@ -42,36 +42,65 @@ Before you start, make sure you've completed the following steps: ``` ## Use no code editor to create a Stream Analytics job 1. Locate the Resource Group in which the TollApp event generator was deployed. -2. Select the Azure Event Hubs namespace. And then under the Event Hubs section, select **entrystream** instance. -3. Go to **Process data** under Features section and then click **start** on the **Start with blank canvas** template. -[![Screenshot of real time dashboard template in no code editor.](./media/stream-analytics-no-code/real-time-dashboard-power-bi.png)](./media/stream-analytics-no-code/real-time-dashboard-power-bi.png#lightbox) -4. Name your job **carsummary** and select **Create**. -5. Configure your event hub input by specifying - * Consumer Group: Default - * Serialization type of your input data: JSON - * Authentication mode which the job will use to connect to your event hub: Connection String defaults - * Click **Connect** -6. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type if you want. -7. Click the **Group by** tile on the canvas and connect it to the event hub tile. Configure the Group By tile by specifying: - * Aggregation as **Count** - * Field as **Make** which is a nested field inside **CarModel** - * Click **Save** - * In the **Group by** settings, select **Make** and **Tumbling window** of **3 minutes** -8. Click the **Manage Fields** tile and connect it to the Group by tile on canvas. Configure the **Manage Fields** tile by specifying: - * Clicking on **Add all fields** - * Rename the fields by clicking on the fields and changing the names from: - * COUNT_make to CarCount - * Window_End_Time to times -9. Click the **Azure Synapse Analytics** tile and connect it to Manage Fields tile on your canvas. Configure Azure Synapse Analytics by specifying: +2. Select the Azure Event Hubs **namespace**. +1. On the **Event Hubs Namespace** page, select **Event Hubs** under **Entities** on the left menu. +1. Select **entrystream** instance. + + :::image type="content" source="./media/stream-analytics-no-code/select-event-hub.png" alt-text="Screenshot showing the selection of the event hub." lightbox="./media/stream-analytics-no-code/select-event-hub.png"::: +1. Go to **Process data** under Features section and then select **start** on the **Start with blank canvas** template. + + :::image type="content" source="./media/stream-analytics-no-code/start-blank-canvas.png" alt-text="Screenshot showing the selection of the Start button on the Start with a blank canvas tile." lightbox="./media/stream-analytics-no-code/start-blank-canvas.png"::: +1. Name your job **carsummary** and select **Create**. + + :::image type="content" source="./media/stream-analytics-no-code/job-name.png" alt-text="Screenshot of the New Stream Analytics job page." lightbox="./media/stream-analytics-no-code/job-name.png"::: +1. On the **event hub** configuration page, confirm the following settings, and then select **Connect**. + - *Consumer Group*: Default + - *Serialization type* of your input data: JSON + - *Authentication mode* that the job will use to connect to your event hub: Connection string. + + :::image type="content" source="./media/stream-analytics-no-code/event-hub-configuration.png" alt-text="Screenshot of the configuration page for your event hub." lightbox="./media/stream-analytics-no-code/event-hub-configuration.png"::: +1. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type if you want. + + :::image type="content" source="./media/stream-analytics-no-code/data-preview-fields.png" alt-text="Screenshot showing the preview of data in the event hub and the fields." lightbox="./media/stream-analytics-no-code/data-preview-fields.png"::: +1. Select the **Group by** tile on the canvas and connect it to the event hub tile. + + :::image type="content" source="./media/stream-analytics-no-code/connect-group.png" alt-text="Screenshot showing the Group tile connected to the Event Hubs tile." lightbox="./media/stream-analytics-no-code/connect-group.png"::: +1. Configure the **Group by** tile by specifying: + 1. Aggregation as **Count**. + 1. Field as **Make** which is a nested field inside **CarModel**. + 1. Select **Save**. + 1. In the **Group by** settings, select **Make** and **Tumbling window** of **3 minutes** + + :::image type="content" source="./media/stream-analytics-no-code/group-settings.png" alt-text="Screenshot of the Group by configuration page." lightbox="./media/stream-analytics-no-code/group-settings.png"::: +1. Select **Add field** on the **Manage fields** page, and add the **Make** field as shown in the following image, and then select **Save**. + + :::image type="content" source="./media/stream-analytics-no-code/add-make-field.png" alt-text="Screenshot showing the addition of the Make field." lightbox="./media/stream-analytics-no-code/add-make-field.png"::: +1. Select **Manage fields** on the command bar. Connect the **Manage Fields** tile to the **Group by tile** on canvas. Select **Add all fields** on the **Manage fields** configuration page. + + :::image type="content" source="./media/stream-analytics-no-code/manage-fields.png" alt-text="Screenshot of the Manage fields page." lightbox="./media/stream-analytics-no-code/manage-fields.png"::: +1. Select **...** next to the fields, and select **Edit** to rename them. + - **COUNT_make** to **CarCount** + - **Window_End_Time** to **times** + + :::image type="content" source="./media/stream-analytics-no-code/rename-fields.png" alt-text="Screenshot of the Manage fields page with the fields renamed." lightbox="./media/stream-analytics-no-code/rename-fields.png"::: +1. The **Manage fields** page should look as shown in the following image. + + :::image type="content" source="./media/stream-analytics-no-code/manage-fields-page.png" alt-text="Screenshot of the Manage fields page with three fields." lightbox="./media/stream-analytics-no-code/manage-fields-page.png"::: +1. Select **Synapse** on the command bar. Connect the **Synapse** tile to the **Manage fields** tile on your canvas. +1. Configure Azure Synapse Analytics by specifying: * Subscription where your Azure Synapse Analytics is located - * Database of the Dedicated SQL pool which you used to create the Table in the previous section. + * Database of the Dedicated SQL pool that you used to create the **carsummary** table in the previous section. * Username and password to authenticate * Table name as **carsummary** - * Click **Connect**. You'll see sample results that will be written to your Synapse SQL table. - [![Screenshot of synapse output in no code editor.](./media/stream-analytics-no-code/synapse-output.png)](./media/stream-analytics-no-code/synapse-output.png#lightbox) -8. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then click **Start** to run your job. Specify the storage account that will be used by Synapse SQL to load data into your data warehouse. -9. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. -[![Screenshot of job in running state in no code editor.](./media/stream-analytics-no-code/cosmos-db-running-state.png)](./media/stream-analytics-no-code/cosmos-db-running-state.png#lightbox) + * Select **Connect**. You'll see sample results that will be written to your Synapse SQL table. + + :::image type="content" source="./media/stream-analytics-no-code/synapse-settings.png" alt-text="Screenshot of the Synapse tile settings." lightbox="./media/stream-analytics-no-code/synapse-settings.png"::: +1. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then select **Start** to run your job. Specify the storage account that will be used by Synapse SQL to load data into your data warehouse. + + :::image type="content" source="./media/stream-analytics-no-code/start-analytics-job.png" alt-text="Screenshot of the Start Stream Analytics Job page." lightbox="./media/stream-analytics-no-code/start-analytics-job.png"::: +1. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. Select the **Refresh** button on the page to see the status changing from Created -> Starting -> Running. + + :::image type="content" source="./media/stream-analytics-no-code/job-list.png" alt-text="Screenshot showing the list of jobs." lightbox="./media/stream-analytics-no-code/job-list.png"::: ## Create a Power BI visualization 1. Download the latest version of [Power BI desktop](https://powerbi.microsoft.com/desktop). diff --git a/articles/stream-analytics/quick-create-bicep.md b/articles/stream-analytics/quick-create-bicep.md new file mode 100644 index 0000000000000..4691f706fab6a --- /dev/null +++ b/articles/stream-analytics/quick-create-bicep.md @@ -0,0 +1,105 @@ +--- +title: Quickstart - Create an Azure Stream Analytics job using Bicep +description: This quickstart shows how to use Bicep to create an Azure Stream Analytics job. +ms.service: stream-analytics +author: schaffererin +ms.author: v-eschaffer +ms.workload: big-data +ms.topic: quickstart +ms.custom: mvc, subject-armqs, devx-track-azurepowershell, mode-arm +ms.date: 05/17/2022 +--- + +# Quickstart: Create an Azure Stream Analytics job using Bicep + +In this quickstart, you use Bicep to create an Azure Stream Analytics job. Once the job is created, you validate the deployment. + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +To complete this article, you need to have an Azure subscription. [Create one for free](https://azure.microsoft.com/free/). + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/streamanalytics-create/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.streamanalytics/streamanalytics-create/main.bicep"::: + +The Azure resource defined in the Bicep file is [Microsoft.StreamAnalytics/StreamingJobs](/azure/templates/microsoft.streamanalytics/streamingjobs): create an Azure Stream Analytics job. + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters streamAnalyticsJobName = numberOfStreamingUnits= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -streamAnalyticsJobName "" -numberOfStreamingUnits + ``` + + --- + + You need to provide values for the following parameters: + + - **streamAnalyticsJobName**: Replace **\** with the Stream Analytics job name. The name can contain alphanumeric characters and hyphens, and it must be at least 3-63 characters long. + - **numberOfStreamingUnits**: Replace **\** with the number of Streaming Units. Allowed values include: 1, 3, 6, 12, 18, 24, 30, 36, 42, and 48. + + > [!NOTE] + > When the deployment finishes, you should see a message indicating the deployment succeeded. + +## Review deployed resources + +You can either use the Azure portal to check the Azure Stream Analytics job or use the following Azure CLI or Azure PowerShell script to list the resource. + +### Azure CLI + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +If you plan to continue on to subsequent tutorials, you may wish to leave these resources in place. When no longer needed, delete the resource group, which deletes the Azure Stream Analytics job. To delete the resource group by using Azure CLI or Azure PowerShell: + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +In this quickstart, you created an Azure Stream Analytics job using Bicep and validated the deployment. To learn how to create your own Bicep files using Visual Studio Code, continue on to the following article: + +> [!div class="nextstepaction"] +> [Quickstart: Create Bicep files with Visual Studio Code](../azure-resource-manager/bicep/quickstart-create-bicep-use-visual-studio-code.md) diff --git a/articles/stream-analytics/stream-analytics-define-outputs.md b/articles/stream-analytics/stream-analytics-define-outputs.md index c9dbefe9d22ba..c543800a4a5cb 100644 --- a/articles/stream-analytics/stream-analytics-define-outputs.md +++ b/articles/stream-analytics/stream-analytics-define-outputs.md @@ -6,7 +6,7 @@ ms.author: ebnkruma ms.service: stream-analytics ms.topic: conceptual ms.custom: contperf-fy21q1 -ms.date: 01/14/2022 +ms.date: 06/06/2022 --- # Outputs from Azure Stream Analytics @@ -46,6 +46,20 @@ Additionally, for more advanced tuning of the partitions, the number of output w All outputs support batching, but only some support batch size explicitly. Azure Stream Analytics uses variable-size batches to process events and write to outputs. Typically the Stream Analytics engine doesn't write one message at a time, and uses batches for efficiency. When the rate of both the incoming and outgoing events is high, Stream Analytics uses larger batches. When the egress rate is low, it uses smaller batches to keep latency low. +## Avro and Parquet file splitting behavior + +A Stream Analytics query can generate multiple schemas for a given output. The list of columns projected, and their type, can change on a row-by-row basis. +By design, the Avro and Parquet formats do not support variable schemas in a single file. + +The following behaviors may occur when directing a stream with variable schemas to an output using these formats: + +- If the schema change can be detected, the current output file will be closed, and a new one initialized on the new schema. Splitting files as such will severely slow down the output when schema changes happen frequently. With back pressure this will in turn severly impact the overall performance of the job +- If the schema change cannot be detected, the row will most likely be rejected, and the job become stuck as the row can't be output. Nested columns, or multi-type arrays, are situations that won't be discovered and be rejected. + +It is highly recommended to consider outputs using the Avro or Parquet format to be strongly typed, or schema-on-write, and queries targeting them to be written as such (explicit conversions and projections for a uniform schema). + +If multiple schemas need to be generated, consider creating multiple outputs and splitting records into each destination by using a `WHERE` clause. + ## Parquet output batching window properties When using Azure Resource Manager template deployment or the REST API, the two batching window properties are: diff --git a/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md b/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md index 80b646babcc5f..b13f109c1e7ab 100644 --- a/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md +++ b/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md @@ -2,13 +2,13 @@ title: Create a user defined restore point for a dedicated SQL pool description: Learn how to use the Azure portal to create a user-defined restore point for dedicated SQL pool in Azure Synapse Analytics. author: joannapea -manager: igorstan +manager: ms.service: synapse-analytics ms.topic: how-to ms.subservice: sql ms.date: 10/29/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- # User-defined restore points diff --git a/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md b/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md index be18c61dfd3e3..c242ee70fa5c9 100644 --- a/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md +++ b/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md @@ -7,7 +7,7 @@ ms.subservice: purview ms.topic: how-to ms.date: 12/16/2020 ms.author: jejiang -ms.reviewer: jrasnick +ms.reviewer: wiassaf --- # Discover, connect, and explore data in Synapse using Microsoft Purview diff --git a/articles/synapse-analytics/data-explorer/kusto-query-language/toc.yml b/articles/synapse-analytics/data-explorer/kusto-query-language/toc.yml index 9cb44464f714c..5b45c66b74662 100644 --- a/articles/synapse-analytics/data-explorer/kusto-query-language/toc.yml +++ b/articles/synapse-analytics/data-explorer/kusto-query-language/toc.yml @@ -134,7 +134,7 @@ items: href: /azure/data-explorer/kusto/query/pivotplugin?context=/azure/synapse-analytics/context/context - name: preview plugin href: /azure/data-explorer/kusto/query/previewplugin?context=/azure/synapse-analytics/context/context - - name: python plugin + - name: Python plugin href: /azure/data-explorer/kusto/query/pythonplugin?context=/azure/synapse-analytics/context/context - name: R plugin href: /azure/data-explorer/kusto/query/rplugin?context=/azure/synapse-analytics/context/context diff --git a/articles/synapse-analytics/guidance/implementation-success-assess-environment.md b/articles/synapse-analytics/guidance/implementation-success-assess-environment.md new file mode 100644 index 0000000000000..d0045b492e296 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-assess-environment.md @@ -0,0 +1,303 @@ +--- +title: "Synapse implementation success methodology: Assess environment" +description: "Learn how to assess your environment to help evaluate the solution design and make informed technology decisions to implement Azure Synapse Analytics." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Assess environment + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +The first step when implementing Azure Synapse Analytics is to assessment your environment. An assessment provides you with the opportunity to gather all the available information about your existing environment, environmental requirements, project requirements, constraints, timelines, and pain points. This information will form the basis of later evaluations and checkpoint activities. It will prove invaluable when it comes time to validate and compare against the project solution as it's planned, designed, and developed. We recommend that you dedicate a good amount of time to gather all the information and be sure to have necessary discussions with relevant groups. Relevant groups can include project stakeholders, business users, solution designers, and subject matter experts (SMEs) of the existing solution and environment. + +The assessment will become a guide to help you evaluate the solution design and make informed technology recommendations to implement Azure Synapse. + +## Workload assessment + +The workload assessment is concerned with the environment, analytical workload roles, ETL/ELT, networking and security, the Azure environment, and data consumption. + +### Environment + +For the environment, evaluate the following points. + +- Describe your existing analytical workload: + - What are the workloads (like data warehouse or big data)? + - How is this workload helping the business? What are the use case scenarios? + - What is the business driver for this analytical platform and for potential migration? + - Gather details about the existing architecture, design, and implementation choices. + - Gather details about all existing upstream and downstream dependent components and consumers. +- Are you migrating an existing data warehouse (like Microsoft SQL Server, Microsoft Analytics Platform System (APS), Netezza, Snowflake, or Teradata)? +- Are you migrating a big data platform (like Cloudera or Hortonworks)? +- Gather the architecture and dataflow diagrams for the current analytical environment. +- Where are the data sources for your planned analytical workloads located (Azure, other cloud providers, or on-premises)? +- What is the total size of existing datasets (historical and incremental)? What is the current rate of growth of your dataset(s)? What is the projected rate of growth of your datasets for the next 2-5 years? +- Do you have an existing data lake? Gather as much detail as possible about file types (like Parquet or CSV), file sizes, and security configuration. +- Do you have semi-structured or unstructured data to process and analyze? +- Describe the nature of the data processing (batch or real-time processing). +- Do you need interactive data exploration from relational data, data lake, or other sources? +- Do you need real-time data analysis and exploration from operational data sources? +- What are the pain points and limitations in the current environment? +- What source control and DevOps tools are you using today? +- Do you have a use case to build a hybrid (cloud and on-premises) analytical solution, cloud only, or multi-cloud? +- Gather information on the existing cloud environment. Is it a single-cloud provider or a multi-cloud provider? +- Gather plans about the future cloud environment. Will it be a single-cloud provider or a multi-cloud provider? +- What are the RPO/RTO/HA/SLA requirements in the existing environment? +- What are the RPO/RTO/HA/SLA requirements in the planned environment? + +### Analytical workload roles + +For the analytical workload roles, evaluate the following points. + +- Describe the different roles (data scientist, data engineer, data analyst, and others). +- Describe the analytical platform access control requirement for these roles. +- Identify the platform owner who's responsible to provision compute resources and grant access. +- Describe how different data roles currently collaborate. +- Are there multiple teams collaborating on the same analytical platform? If so, what's the access control and isolation requirements for each of these teams? +- What are the client tools that end users use to interact with the analytical platform? + +### ETL/ELT, transformation, and orchestration + +For ETL/ELT, transformation, and orchestration, evaluate the following points. + +- What tools are you using today for data ingestion (ETL or ELT)? +- Where do these tools exist in the existing environment (on-premises or the cloud)? +- What are your current data load and update requirements (real-time, micro batch, hourly, daily, weekly, or monthly)? +- Describe the transformation requirements for each layer (big data, data lake, data warehouse). +- What is the current programming approach for transforming the data (no-code, low-code, programming like SQL, Python, Scala, C#, or other)? +- What is the preferred planned programming approach to transform the data (no-code, low-code, programming like SQL, Python, Scala, C#, or other)? +- What tools are currently in use for data orchestration to automate the data-driven process? +- Where are the data sources for your existing ETL located (Azure, other cloud provider, or on-premises)? +- What are the existing data consumptions tools (reporting, BI tools, open-source tools) that require integration with the analytical platform? +- What are the planned data consumptions tools (reporting, BI tools, open-source tools) that will require integration with the analytical platform? + +### Networking and security + +For networking and security, evaluate the following points. + +- What regulatory requirements do you have for your data? +- If your data contains customer content, payment card industry (PCI), or Health Insurance Portability and Accountability Act of 1996 (HIPAA) data, has your security group certified Azure for this data? If so, for which Azure services? +- Describe your user authorization and authentication requirements. +- Are there security issues that could limit access to data during implementation? +- Is there test data available to use during development and testing? +- Describe the organizational network security requirements on the analytical compute and storage (private network, public network, or firewall restrictions). +- Describe the network security requirements for client tools to access analytical compute and storage (peered network, private endpoint, or other). +- Describe the current network setup between on-premises and Azure (Azure ExpressRoute, site-to-site, or other). + +Use the following checklists of possible requirements to guide your assessment. + +- Data protection: + - In-transit encryption + - Encryption at rest (default keys or customer-managed keys) + - Data discovery and classification +- Access control: + - Object-level security + - Row-level security + - Column-level security + - Dynamic data masking +- Authentication: + - SQL login + - Azure Active Directory (Azure AD) + - Multi-factor authentication (MFA) +- Network security: + - Virtual networks + - Firewall + - Azure ExpressRoute +- Threat protection: + - Threat detection + - Auditing + - Vulnerability assessment + +For more information, see the [Azure Synapse Analytics security white paper](security-white-paper-introduction.md). + +### Azure environment + +For the Azure environment, evaluate the following points. + +- Are you currently using Azure? Is it used for production workloads? +- If you're using Azure, which services are you using? Which regions are you using? +- Do you use Azure ExpressRoute? What's its bandwidth? +- Do you have budget approval to provision the required Azure services? +- How do you currently provision and manage resources (Azure Resource Manager (ARM) or Terraform)? +- Is your key team familiar with Synapse Analytics? Is any training required? + +### Data consumption + +For data consumption, evaluate the following points. + +- Describe how and what tools you currently use to perform activities like ingest, explore, prepare, and data visualization. +- Identify what tools you plan to use to perform activities like ingest, explore, prepare, and data visualization. +- What applications are planned to interact with the analytical platform (Microsoft Power BI, Microsoft Excel, Microsoft SQL Server Reporting Services, Tableau, or others)? +- Identify all data consumers. +- Identify data export and data sharing requirements. + +## Azure Synapse services assessment + +The Azure Synapse services assessment is concerned with the services within Azure Synapse. Azure Synapse has the following components for compute and data movement: + +- **Synapse SQL:** A distributed query system for Transact-SQL (T-SQL) that enables data warehousing and data virtualization scenarios. It also extends T-SQL to address streaming and machine learning (ML) scenarios. Synapse SQL offers both *serverless* and *dedicated* resource models. +- **Serverless SQL pool:** A distributed data processing system, built for large-scale data and computational functions. There's no infrastructure to set up or clusters to maintain. This service is suited for unplanned or burst workloads. Recommended scenarios include quick data exploration on files directly on the data lake, logical data warehouse, and data transformation of raw data. +- **Dedicated SQL pool:** Represents a collection of analytic resources that are provisioned when using Synapse SQL. The size of a dedicated SQL pool (formerly SQL DW) is determined by Data Warehousing Units (DWU). This service is suited for a data warehouse with predictable, high performance continuous workloads over data stored in SQL tables.  +- **Apache Spark pool:** Deeply and seamlessly integrates Apache Spark, which is the most popular open source big data engine used for data preparation, data engineering, ETL, and ML. +- **Data integration pipelines:** Azure Synapse contains the same data integration engine and experiences as Azure Data Factory (ADF). They allow you to create rich at-scale ETL pipelines without leaving Azure Synapse. + +To help determine the best SQL pool type (dedicated or serverless), evaluate the following points. + +- Do you want to build a traditional relational data warehouse by reserving processing power for data stored in SQL tables? +- Do your use cases demand predictable performance? +- Do you want to a build a logical data warehouse on top of a data lake? +- Do you want to query data directly from a data lake? +- Do you want to explore data from a data lake? + +The following table compares the two Synapse SQL pool types. + +| **Comparison** | **Dedicated SQL pool** | **Serverless SQL pool** | +|:-|:-|:-| +| Value propositions | Fully managed capabilities of a data warehouse. Predictable and high performance for continuous workloads. Optimized for managed (loaded) data. | Easy to get started and explore data lake data. Better total cost of ownership (TCO) for ad hoc and intermittent workloads. Optimized for querying data in a data lake. | +| Workloads | *Ideal for continuous workloads.* Loading boosts performance, with more complexity. Charging per DWU (when sized well) will be cost-beneficial. | *Ideal for ad hoc or intermittent workloads.* There's no need to load data, so it's easier to start and run. Charging per usage will be cost-beneficial. | +| Query performance | *Delivers high concurrency and low latency.* Supports rich caching options, including materialized views. There's the ability to choose trade-offs with workload management (WLM). | *Not suited for dashboarding queries.* Millisecond response times aren't expected. It works only on external data. | + +### Dedicated SQL pool assessment + +For the dedicated SQL pool assessment, evaluate the following platform points. + +- What is the current data warehouse platform (Microsoft SQL Server, Netezza, Teradata, Greenplum, or other)? +- For a migration workload, determine the make and model of your appliance for each environment. Include details of CPUs, GPUs, and memory. +- For an appliance migration, when was the hardware purchased? Has the appliance been fully depreciated? If not, when will depreciation end? And, how much capital expenditure is left? +- Are there any hardware and network architecture diagrams? +- Where are the data sources for your planned data warehouse located (Azure, other cloud provider, or on-premises)? +- What are the data hosting platforms of the data sources for your data warehouse (Microsoft SQL Server, Azure SQL Database, DB2, Oracle, Azure Blob Storage, AWS, Hadoop, or other)? +- Are any of the data sources data warehouses? If so, which ones? +- Identify all ETL, ELT, and data loading scenarios (batch windows, streaming, near real-time). Identify existing service level agreements (SLAs) for each scenario and document the expected SLAs in the new environment. +- What is the current data warehouse size? +- What rate of dataset growth is being targeted for the dedicated SQL pool? +- Describe the environments you're using today (development, test, or production). +- Which tools are currently in place for data movement (ADF, Microsoft SQL Server Integration Services (SSIS), robocopy, Informatica, SFTP, or others)? +- Are you planning to load real-time or near real-time data? + +Evaluate the following database points. + +- What is the number of objects in each data warehouse (schemas, tables, views, stored procedures, functions)? +- Is it a star schema, snowflake schema or other design? +- What are the largest tables in terms of size and number of records? +- What are the widest tables in terms of the number of columns? +- Is there already a data model designed for your data warehouse? Is it a Kimball, Inmon, or star schema design? +- Are Slowly Changing Dimensions (SCDs) in use? If so, which types? +- Will a semantic layer be implemented by using relational data marts or Analysis Services (tabular or multidimensional), or another product? +- What are the HA/RPO/RTO/data archiving requirements? +- What are the region replication requirements? + +Evaluate the following workload characteristics. + +- What is the estimated number of concurrent users or jobs accessing the data warehouse during *peak hours*? +- What is the estimated number of concurrent users or jobs accessing the data warehouse during *off peak hours*? +- Is there a period of time when there will be no users or jobs? +- What are your query execution performance expectations for interactive queries? +- What are your data load performance expectations for daily/weekly/monthly data loads or updates? +- What are your query execution expectations for reporting and analytical queries? +- How complex will the most commonly executed queries be? +- What percentage of your total dataset size is your active dataset? +- Approximately what percentage of the workload is anticipated for loading or updating, batch processing or reporting, interactive query, and analytical processing? +- Identify the data consuming patterns and platforms: + - Current and planned reporting method and tools. + - Which application or analytical tools will access the data warehouse? + - Number of concurrent queries? + - Average number of active queries at any point in time? + - What is the nature of data access (interactive, ad hoc, export, or others)? + - Data roles and complete description of their data requirements. + - Maximum number of concurrent connections. +- Query performance SLA pattern by: + - Dashboard users. + - Batch reporting. + - ML users. + - ETL process. +- What are the security requirements for the existing environment and for the new environment (row-level security, column-level security, access control, encryption, and others)? +- Do you have requirements to integrate ML model scoring with T-SQL? + +### Serverless SQL pool assessment + +Synapse Serverless SQL pool supports three major use cases. + +- **Basic discovery and exploration:** Quickly reason about the data in various formats (Parquet, CSV, JSON) in your data lake, so you can plan how to extract insights from it. +- **Logical data warehouse:** Provide a relational abstraction on top of raw or disparate data without relocating and transforming data, allowing an always-current view of your data. +- **Data transformation:** Simple, scalable, and performant way to transform data in the lake by using T-SQL, so it can be fed to BI and other tools or loaded into a relational data store (Synapse SQL databases, Azure SQL Database, or others). + +Different data roles can benefit from serverless SQL pool: + +- **Data engineers** can explore the data lake, transform and prepare data by using this service, and simplify their data transformation pipelines. +- **Data scientists** can quickly reason about the contents and structure of the data in the data lake, thanks to features such as OPENROWSET and automatic schema inference. +- **Data analysts** can [explore data and Spark external tables](../sql/develop-storage-files-spark-tables.md) created by data scientists or data engineers by using familiar T-SQL statements or their favorite query tools. +- **BI professionals** can quickly [create Power BI reports on top of data in the data lake](../sql/tutorial-connect-power-bi-desktop.md) and Spark tables. + +> [!NOTE] +> The T-SQL language is used in both dedicated SQL pool and the serverless SQL pool, however there are some differences in the set of supported features. For more information about T-SQL features supported in Synapse SQL (dedicated and serverless), see [Transact-SQL features supported in Azure Synapse SQL](../sql/overview-features.md). + +For the serverless SQL pool assessment, evaluate the following points. + +- Do you have use cases to discover and explore data from a data lake by using relational queries (T-SQL)? +- Do you have use cases to build a logical data warehouse on top of a data lake? +- Identify whether there are use cases to transform data in the data lake without first moving data from the data lake. +- Is your data already in Azure Data Lake Storage (ADLS) or Azure Blob Storage? +- If your data is already in ADLS, do you have a good partition strategy in the data lake? +- Do you have operational data in Azure Cosmos DB? Do you have use cases for real-time analytics on Azure Cosmos DB without impacting transactions? +- Identify the file types in the data lake. +- Identify the query performance SLA. Does your use case demand predictable performance and cost? +- Do you have unplanned or bursty SQL analytical workloads? +- Identify the data consuming pattern and platforms: + - Current and planned reporting method and tools. + - Which application or analytical tools will access the serverless SQL pool? + - Average number of active queries at any point in time. + - What is the nature of data access (interactive, ad hoc, export, or others)? + - Data roles and complete description of their data requirements. + - Maximum number of concurrent connections. + - Query complexity? +- What are the security requirements (access control, encryption, and others)? +- What is the required T-SQL functionality (stored procedures or functions)? +- Identify the number of queries that will be sent to the serverless SQL pool and the result set size of each query. + +> [!TIP] +> If you're new to serverless SQL pools, we recommend you work through the [Build data analytics solutions using Azure Synapse serverless SQL pools](/learn/paths/build-data-analytics-solutions-using-azure-synapse-serverless-sql-pools/) learning path. + +### Spark pool assessment + +Spark pools in Azure Synapse enable the following key scenarios. + +- **Data engineering/Data preparation:** Apache Spark includes many language features to support preparation and processing of large volumes of data. Preparation and processing can make the data more valuable and allow it to be consumed by other Azure Synapse services. It's enabled through multiple languages (C#, Scala, PySpark, Spark SQL) and by using supplied libraries for processing and connectivity. +- **Machine learning:** Apache Spark comes with [MLlib](https://spark.apache.org/mllib/), which is an ML library built on top of Spark that you can use from a Spark pool. Spark pools also include Anaconda, which is a Python distribution that comprises various packages for data science including ML. In addition, Apache Spark on Synapse provides pre-installed libraries for [Microsoft Machine Learning](https://mmlspark.blob.core.windows.net/website/index.html), which is a fault-tolerant, elastic, and RESTful ML framework. When combined with built-in support for notebooks, you have a rich environment for creating ML applications. + +> [!NOTE] +> For more information, see [Apache Spark in Azure Synapse Analytics](../spark/apache-spark-overview.md). +> +> Also, Azure Synapse is compatible with Linux Foundation Delta Lake. Delta Lake is an open-source storage layer that brings ACID (atomicity, consistency, isolation, and durability) transactions to Apache Spark and big data workloads. For more information, see [What is Delta Lake](../spark/apache-spark-what-is-delta-lake.md). + +For the Spark pool assessment, evaluate the following points. + +- Identify the workloads that require data engineering or data preparation. +- Clearly define the types of transformations. +- Identify whether you have unstructured data to process. +- When you're migrating from an existing Spark/Hadoop workload: + - What is the existing big data platform (Cloudera, Hortonworks, cloud services, or other)? + - If it's a migration from on-premises, has hardware depreciated or licenses expired? If not, when will depreciation or expiry happen? + - What is the existing cluster type? + - What are the required libraries and Spark versions? + - Is it a Hadoop migration to Spark? + - What are the current or preferred programming languages? + - What is the type of workload (big data, ML, or other)? + - What are the existing and planned client tools and reporting platforms? + - What are the security requirements? + - Are there any current pain points and limitations? +- Do you plan to use, or are currently using, Delta Lake? +- How do you manage packages today? +- Identify the required compute cluster types. +- Identify whether cluster customization is required. + +> [!TIP] +> If you're new to Spark pools, we recommend you work through the [Perform data engineering with Azure Synapse Apache Spark Pools](/learn/paths/perform-data-engineering-with-azure-synapse-apache-spark-pools/) learning path. + +## Next steps + +In the [next article](implementation-success-evaluate-workspace-design.md) in the *Azure Synapse success by design* series, learn how to evaluate the Synapse workspace design and validate that it meets guidelines and requirements. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-data-integration-design.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-data-integration-design.md new file mode 100644 index 0000000000000..3dd8a8ad3bc63 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-data-integration-design.md @@ -0,0 +1,87 @@ +--- +title: "Synapse implementation success methodology: Evaluate data integration design" +description: "Learn how to evaluate the data integration design and validate that it meets guidelines and requirements." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate data integration design + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Azure Synapse Analytics contains the same data integration engine and experiences as Azure Data Factory (ADF), allowing you to create rich at-scale ETL pipelines without leaving Azure Synapse Analytics. + +:::image type="content" source="media/implementation-success-evaluate-data-integration-design/azure-synapse-analytics-architecture-data-integration.png" alt-text="Image shows the components of Azure Synapse, with the Data Integration component highlighted."::: + +This article describes how to evaluate the design of the data integration components for your project. Specifically, it helps you to determine whether Azure Synapse pipelines are the best fit for your data integration requirements. Time invested in evaluating the design prior to solution development can help to eliminate unexpected design changes that may impact on your project timeline or cost. + +## Fit gap analysis + +You should perform a thorough fit gap analysis of your data integration strategy. If you choose Azure Synapse pipelines as the data integration tool, review the following points to ensure they're the best fit for your data integration requirements and orchestration. Even if you choose different data integration tools, you should still review the following points to validate that all key design points have been considered and that your chosen tool will support your solution needs. This information should have been captured during your assessment performed earlier in this methodology. + +- Review your data sources and destinations (targets): + - Validate that source and destination stores are [supported data stores](/azure/data-factory/connector-overview). + - If they're not supported, check whether you can use the [extensible options](/azure/data-factory/connector-overview#integrate-with-more-data-stores). +- Review the triggering points of your data integration and the frequency: + - Azure Synapse pipelines support schedule, tumbling window, and storage event triggers. + - Validate the minimum recurrence interval and supported storage events against your requirements. +- Review the required modes of data integration: + - Scheduled, periodic, and triggered batch processing can be effectively designed in Azure Synapse pipelines. + - To implement Change Data Capture (CDC) functionality, use third-party products or create a custom solution. + - To support real-time streaming, use [Azure Event Hubs](/azure/event-hubs/event-hubs-about), [Azure Event Hubs from Apache Kafka](/azure/event-hubs/event-hubs-for-kafka-ecosystem-overview), or [Azure IoT Hub](/azure/iot-hub/iot-concepts-and-iot-hub). + - To run Microsoft SQL Server Integration Services (SSIS) packages, you can [lift and shift SSIS workloads to the cloud](/sql/integration-services/lift-shift/ssis-azure-lift-shift-ssis-packages-overview?view=sql-server-ver15&preserve-view=true). +- Review the compute design: + - Does the compute required for the pipelines need to be serverless or provisioned? + - Azure Synapse pipelines support both modes of integration runtime (IR): serverless or self-hosted on a Windows machine. + - Validate [ports and firewalls](/azure/data-factory/create-self-hosted-integration-runtime?tabs=data-factory#ports-and-firewalls) and [proxy setting](/azure/data-factory/create-self-hosted-integration-runtime?tabs=data-factory#proxy-server-considerations) when using the self-hosted IR (provisioned). +- Review security requirements, networking and firewall configuration of the environment and compare them to the security, networking and firewall configuration design: + - Review how the data sources are secured and networked. + - Review how the target data stores are secured and networked. Azure Synapse pipelines have different [data access strategies](/azure/data-factory/data-access-strategies) that provide a secure way to connect data stores via private endpoints or virtual networks. + - Use [Azure Key Vault](/azure/key-vault/general/basic-concepts) to store credentials whenever applicable. + - Use ADF for customer-managed key (CMK) encryption of credentials and store them in the self-hosted IR. +- Review the design for ongoing monitoring of all data integration components. + +## Architecture considerations + +As you review the data integration design, consider the following recommendations and guidelines to ensure that the data integration components of your solution will provide ongoing operational excellence, performance efficiency, reliability, and security. + +### Operational excellence + +For operational excellence, evaluate the following points. + +- **Environment:** When planning your environments, segregate them by development/test, user acceptance testing (UAT), and production. Use the folder organizational options to organize your pipelines and datasets by business/ETL jobs to support better maintainability. Use [annotations](https://azure.microsoft.com/resources/videos/azure-friday-enhanced-monitoring-capabilities-and-tagsannotations-in-azure-data-factory/) to tag your pipelines so you can easily monitor them. Create reusable pipelines by using parameters, and iteration and conditional activities. +- **Monitoring and alerting:** Synapse workspaces include the [Monitor Hub](../get-started-monitor.md), which has rich monitoring information of each and every pipeline run. It also integrates with [Log Analytics](/azure/azure-monitor/logs/log-analytics-overview) for further log analysis and alerting. You should implement these features to provide proactive error notifications. Also, use *Upon Failure* paths to implement customized [error handling](https://techcommunity.microsoft.com/t5/azure-data-factory/understanding-pipeline-failures-and-error-handling/ba-p/1630459). +- **Automated deployment and testing:** Azure Synapse pipelines are built into Synapse workspace, so you can take advantage of workspace automation and deployment. Use [ARM templates](../quickstart-deployment-template-workspaces.md) to minimize manual activities when creating Synapse workspaces. Also, [integrate Synapse workspaces with Azure DevOps](../cicd/continuous-integration-delivery.md#set-up-a-release-pipeline-in-azure-devops) to build code versioning and automate publication. + +### Performance efficiency + +For performance efficiency, evaluate the following points. + +- Follow [performance guidance](/azure/data-factory/copy-activity-performance) and [optimization features](/azure/data-factory/copy-activity-performance-features) when working with the copy activity. +- Choose optimized connectors for data transfer instead of generic connectors. For example, use PolyBase instead of bulk insert when moving data from Azure Data Lake Storage Gen2 (ALDS Gen2) to a dedicated SQL pool. +- When creating a new Azure IR, set the region location as [auto-resolve](/azure/data-factory/concepts-integration-runtime#azure-ir-location) or select the same region as the data stores. +- For self-hosted IR, choose the [Azure virtual machine (VM) size](/azure/data-factory/copy-activity-performance-features#self-hosted-integration-runtime-scalability) based on the integration requirements. +- Choose a stable network connection, like [Azure ExpressRoute](/azure/expressroute/expressroute-introduction), for fast and consistent bandwidth. + +### Reliability + +When you execute a pipeline by using Azure IR, it's serverless in nature and so it provides resiliency out of the box. There's little for customers to manage. However, when a pipeline runs in a self-hosted IR, we recommend that you run it by using a [high availability configuration](/azure/data-factory/create-self-hosted-integration-runtime?tabs=data-factory#high-availability-and-scalability) in Azure VMs. This configuration ensures integration pipelines aren't broken even when a VM goes offline. Also, we recommend that you use Azure ExpressRoute for a fast and reliable network connection between on-premises and Azure. + +### Security + +A secured data platform is one of the key requirements of every organization. You should thoroughly plan security for the entire platform rather than individual components. Here are some security guidelines for Azure Synapse pipeline solutions. + +- Secure data movement to the cloud by using [Azure Synapse private endpoints](https://techcommunity.microsoft.com/t5/azure-architecture-blog/understanding-azure-synapse-private-endpoints/ba-p/2281463). +- Use Azure Active Directory (Azure AD) [managed identities](/azure/active-directory/managed-identities-azure-resources/overview) for authentication. +- Use Azure role-based access control (RBAC) and [Synapse RBAC](../security/synapse-workspace-synapse-rbac.md) for authorization. +- Store credentials, secrets, and keys in Azure Key Vault rather than in the pipeline. For more information, see [Use Azure Key Vault secrets in pipeline activities](/azure/data-factory/how-to-use-azure-key-vault-secrets-pipeline-activities). +- Connect to on-premises resources via Azure ExpressRoute or VPN over private endpoints. +- Enable the **Secure output** and **Secure input** options in pipeline activities when parameters store secrets or passwords. + +## Next steps + +In the [next article](implementation-success-evaluate-dedicated-sql-pool-design.md) in the *Azure Synapse success by design* series, learn how to evaluate your dedicated SQL pool design to identify issues and validate that it meets guidelines and requirements. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-dedicated-sql-pool-design.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-dedicated-sql-pool-design.md new file mode 100644 index 0000000000000..3fd21ecd99fe4 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-dedicated-sql-pool-design.md @@ -0,0 +1,57 @@ +--- +title: "Synapse implementation success methodology: Evaluate dedicated SQL pool design" +description: "Learn how to evaluate your dedicated SQL pool design to identify issues and validate that it meets guidelines and requirements." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate dedicated SQL pool design + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +You should evaluate your [dedicated SQL pool](../sql-data-warehouse/sql-data-warehouse-overview-what-is.md) design to identify issues and validate that it meets guidelines and requirements. By evaluating the design *before solution development begins*, you can avoid blockers and unexpected design changes. That way, you protect the project's timeline and budget. + +Synapse SQL has a scale-out architecture that distributes computational data processing across multiple nodes. Compute is separate from storage, which enables you to scale compute independently of the data in your system. For more information, see [Dedicated SQL pool (formerly SQL DW) architecture in Azure Synapse Analytics](../sql-data-warehouse/massively-parallel-processing-mpp-architecture.md). + +## Assessment analysis + +During the [assessment stage](implementation-success-assess-environment.md), you collected information about how the original system was deployed and details of the structures that were implemented. That information can now help you to identify gaps between what's implemented and what needs to be developed. For example, now's the time to consider the impact of designing round-robin tables instead of hash distributed tables, or the performance benefits of correctly using replicated tables. + +## Review the target architecture + +To successfully deploy a dedicated SQL pool, it's important to adopt an architecture that's aligned with business requirements. For more information, see [Data warehousing in Microsoft Azure](/azure/architecture/data-guide/relational-data/data-warehousing.md). + +## Migration path + +A migration project for Azure Synapse is similar to any other database migration. You should consider that there might be differences between the original system and Azure Synapse. + +Ensure that you have a clear migration path established for: + +- Database objects, scripts, and queries +- Data transfer (export from source and transit to the cloud) +- Initial data load into Azure Synapse +- Logins and users +- Data access control (row-level security) + +For more information, see [Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics](../migration-guides/migrate-to-synapse-analytics-guide.md). + +## Feature gaps + +Determine whether the original system depends on features that aren't supported by Azure Synapse. Unsupported features in dedicated SQL pools include certain data types, like XML and spatial data types, and cursors. + +For more information, see: + +- [Table data types for dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics](../sql-data-warehouse/sql-data-warehouse-tables-data-types.md#identify-unsupported-data-types) +- [Transact-SQL features supported in Azure Synapse SQL](../sql/overview-features.md) + +## Dedicated SQL pool testing + +As with any other project, you should conduct tests to ensure that your dedicated SQL pool delivers the required business needs. It's critical to test data quality, data integration, security, and performance. + +## Next steps + +In the [next article](implementation-success-evaluate-serverless-sql-pool-design.md) in the *Azure Synapse success by design* series, learn how to evaluate your Spark pool design to identify issues and validate that it meets guidelines and requirements. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-project-plan.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-project-plan.md new file mode 100644 index 0000000000000..6655576299de6 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-project-plan.md @@ -0,0 +1,54 @@ +--- +title: "Synapse implementation success methodology: Evaluate project plan" +description: "Learn how to evaluate your modern data warehouse project plan before the project starts." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate project plan + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +In the lifecycle of the project, the most important and extensive planning is done *before implementation*. This article describes how to conduct a high-level review of your project plan. The aim is to ensure it contains critical artifacts and information to deliver a successful solution. It includes checklists of items that you should complete and approve before the project starts. + +A detailed review should follow the high-level project plan review. The detailed review should focus on the specific Azure Synapse components identified during the [assessment stage](implementation-success-assess-environment.md). + +## Evaluate the project plan + +Work through the following two high-level checklists, taking care to verify that each task aligns with the information gathered during the [assessment stage](implementation-success-assess-environment.md). + +First, ensure that your project plan defines the following points. + +> [!div class="checklist"] +> - **The core resource team:** Assemble a group of key people that have expertise crucial to the project. +> - **Scope:** Document how the project scope will be defined, verified, measured, and how the work breakdown will be defined and assigned. +> - **Schedule:** Define the time duration required to complete the project. +> - **Cost:** Estimate costs for internal and external resources, including infrastructure, hardware, and software. + +Second, having defined and assigned the work breakdown, prepare the following artifacts. + +> [!div class="checklist"] +> - **Migration plan:** Document the plan to migrate from your current system to Azure Synapse. Incorporate tasks for executing the migration within the project plan scope and schedule. +> - **Success criteria:** Define the critical success criteria for stakeholders (or the project sponsor), including go and no-go criteria. +> - **Quality assurance:** Define how to conduct code reviews, and the development, staging, and production promotion approval processes. +> - **Test plan:** Define test cases, success criteria for unit, integration, user testing, and metrics to validate all deliverables. Incorporate tasks for developing and executing the test plans within the project plan scope and schedule. + +## Evaluate project plan detailed tasks + +Once the high-level project plan review is complete and approved, the next step is to drill down into each component of the project plan. + +Identify the project plan components that address each aspect of Azure Synapse as it's intended for use in your solution. Also, validate that the project plan accounts for all the effort and resources required to develop, test, deploy, and operate your solution by evaluating: + +- The workspace project plan. +- The data integration project plan. +- The dedicated SQL pool project plan. +- The serverless SQL pool project plan. +- The Spark pool project plan. + +## Next steps + +In the [next article](implementation-success-evaluate-solution-development-environment-design.md) in the *Azure Synapse success by design* series, learn how to evaluate the environments for your modern data warehouse project to support development, testing, and production. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-serverless-sql-pool-design.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-serverless-sql-pool-design.md new file mode 100644 index 0000000000000..d2be0cf5685cc --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-serverless-sql-pool-design.md @@ -0,0 +1,66 @@ +--- +title: "Synapse implementation success methodology: Evaluate serverless SQL pool design" +description: "Learn how to evaluate your serverless SQL pool design to identify issues and validate that it meets guidelines and requirements." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate serverless SQL pool design + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +You should evaluate your [serverless SQL pool](../sql/on-demand-workspace-overview.md) design to identify issues and validate that it meets guidelines and requirements. By evaluating the design *before solution development begins*, you can avoid blockers and unexpected design changes. That way, you protect the project's timeline and budget. + +The architectural separation of storage and compute for modern data, analytical platforms and services has been a trend and frequently used pattern. It provides cost savings and more flexibility allowing independent on-demand scaling of your storage and compute. Synapse SQL serverless extends this pattern by adding the capability to query your data lake data directly. There's no need to worry about compute management when using self-service types of workloads. + +## Fit gap analysis + +When planning to implement SQL serverless pools within Azure Synapse, you first need to ensure serverless pools are the right fit for your workloads. You should consider operational excellence, performance efficiency, reliability, and security. + +### Operational excellence + +For operational excellence, evaluate the following points. + +- **Solution development environment:** Within this methodology, there's an evaluation of the [solution development environment](implementation-success-evaluate-solution-development-environment-design.md). Identify how the environments (development, test, and production) are designed to support solution development. Commonly, you'll find a production and non-production environments (for development and test). You should find Synapse workspaces in all of the environments. In most cases, you'll be obliged to segregate your production and development/test users and workloads. +- **Synapse workspace design:** Within this methodology, there's an evaluation of the [Synapse workspace design](implementation-success-evaluate-workspace-design.md). Identify how the workspaces have been designed for your solution. Become familiar with the design and know whether the solution will use a single workspace or whether multiple workspaces form part of the solution. Know why a single or multiple workspace design was chosen. A multi-workspace design is often chosen to enforce strict security boundaries. +- **Deployment:** SQL serverless is available on-demand with every Synapse workspace, so it doesn't require any special deployment actions. Check regional proximity of the service and that of the Azure Data Lake Storage Gen2 (ADLS Gen2) account that it's connected to. +- **Monitoring:** Check whether built-in monitoring is sufficient and whether any external services need to be put in place to store historical log data. Log data allows analyzing changes in performance and allows you to define alerting or triggered actions for specific circumstances. + +### Performance efficiency + +Unlike traditional database engines, SQL serverless doesn't rely on its own optimized storage layer. For that reason, its performance is heavily dependent on how data is organized in ADLS Gen2. For performance efficiency, evaluate the following points. + +- **Data ingestion:** Review how data is stored in the data lake. File sizes, the number of files, and folder structure all have an impact on performance. Keep in mind that while some file sizes might work for SQL serverless, they may impose issues for efficient processing or consumption by other engines or applications. You'll need to evaluate the data storage design and validate it against all of the data consumers, including SQL serverless and any other data tools that form part of your solution. +- **Data placement:** Evaluate whether your design has unified and defined common patterns for data placement. Ensure that directory branching can support your security requirements. There are a few common patterns that can help you keep your time series data organized. Whatever your choice, ensure that it also works with other engines and workloads. Also, validate whether it can help partition auto-discovery for Spark applications and external tables. +- **Data formats:** In most cases, SQL serverless will offer the best performance and better compatibility feature-wise by using a Parquet format. Verify your performance and compatibility requirements, because while Parquet improves performance - thanks to better compression and reduction of IO (by reading only required columns needed for analysis) - it requires more compute resources. Also, because some source systems don't natively support Parquet as an export format, it could lead to more transformation steps in your pipelines and/or dependencies in your overall architecture. +- **Exploration:** Every industry is different. In many cases, however, there are common data access patterns found in the most frequent-run queries. Patterns typically involve filtering, and aggregations by dates, categories, or geographic regions. Identify your most common filtering criteria, and relate them to how much data is read/discarded by the most frequent-run queries. Validate whether the information on the data lake is organized to favor your exploration requirements and expectations. For the queries identified in your design and in your assessment, see whether you can eliminate unnecessary partitions in your OPENROWSET path parameter, or - if there are external tables - whether creating more indexes can help. + +### Reliability + +For reliability, evaluate the following points. + +- **Availability:** Validate any availability requirements that were identified during the [assessment stage](implementation-success-assess-environment.md). While there aren't any specific SLAs for SQL serverless, there's a 30-minute timeout for query execution. Identify the longest running queries from your assessment and validate them against your serverless SQL design. A 30-minute timeout could break the expectations for your workload and appear as a service problem. +- **Consistency:** SQL serverless is designed primarily for read workloads. So, validate whether all consistency checks have been performed during the data lake data provisioning and formation process. Keep abreast of new capabilities, like [Delta Lake](/spark/apache-spark-what-is-delta-lake.md) open-source storage layer, which provides support for ACID (atomicity, consistency, isolation, and durability) guarantees for transactions. This capability allows you to implement effective [lambda or kappa architectures](/azure/architecture/data-guide/big-data/) to support both streaming and batch use cases. Be sure to evaluate your design for opportunities to apply new capabilities but not at the expense of your project's timeline or cost. +- **Backup:** Review any disaster recovery requirements that were identified during the assessment. Validate them against your SQL serverless design for recovery. SQL serverless itself doesn't have its own storage layer and that would require handling snapshots and backup copies of your data. The data store accessed by serverless SQL is external (ADLS Gen2). Review the recovery design in your project for these datasets. + +### Security + +Organization of your data is important for building flexible security foundations. In most cases, different processes and users will require different permissions and access to specific sub areas of your data lake or logical data warehouse. + +For security, evaluate the following points. + +- **Data storage:** Using the information gathered during the [assessment stage](implementation-success-assess-environment.md), identify whether typical *Raw*, *Stage*, and *Curated* data lake areas need to be placed on the same storage account instead of independent storage accounts. The latter might result in more flexibility in terms of roles and permissions. It can also add more input/output operations per second (IOPS) capacity that might be needed if your architecture must support heavy and simultaneous read/write workloads (like real-time or IoT scenarios). Validate whether you need to segregate further by keeping your sandboxed and master data areas on separate storage accounts. Most users won't need to update or delete data, so they don't need write permissions to the data lake, except for sandboxed and private areas. +- From your assessment information, identify whether any requirements rely on security features like [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine?view=sql-server-ver15&viewFallbackFrom=azure-sqldw-latest&preserve-view=true), [Dynamic data masking](/azure/azure-sql/database/dynamic-data-masking-overview?view=azuresql&preserve-view=true) or [Row-level security](/sql/relational-databases/security/row-level-security?view=azure-sqldw-latest&preserve-view=true). Validate the availability of these features in specific scenarios, like when used with the OPENROWSET function. Anticipate potential workarounds that may be required. +- From your assessment information, identify what would be the best authentication methods. Consider Azure Active Directory (Azure AD) service principals, shared access signature (SAS), and when and how authentication pass-through can be used and integrated in the exploration tool of choice of the customer. Evaluate the design and validate that the best authentication method as part of the design. + +### Other considerations + +Review your design and check whether you have put in place [best practices and recommendations](../sql/best-practices-serverless-sql-pool.md). Give special attention to filter optimization and collation to ensure that predicate pushdown works properly. + +## Next steps + +In the [next article](implementation-success-evaluate-spark-pool-design.md) in the *Azure Synapse success by design* series, learn how to evaluate your Spark pool design to identify issues and validate that it meets guidelines and requirements. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-solution-development-environment-design.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-solution-development-environment-design.md new file mode 100644 index 0000000000000..2f50c43f79f6e --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-solution-development-environment-design.md @@ -0,0 +1,48 @@ +--- +title: "Synapse implementation success methodology: Evaluate solution development environment design" +description: "Learn how to set up multiple environments for your modern data warehouse project to support development, testing, and production." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate solution development environment design + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Solution development and the environment within which it's performed is key to the success of your project. Regardless of your selected project methodology (like waterfall, Agile, or Scrum), you should set up multiple environments to support development, testing, and production. You should also define clear processes for promoting changes between environments. + +Setting up a modern data warehouse environment for both production and pre-production use can be complex. Keep in mind that one of the key design decisions is automation. Automation helps increase productivity while minimizing the risk of errors. Further, your environments should support future agile development, including the addition of new workloads, like data science or real-time. During the design review, produce a solution development environment design that will support your solution not only for the current project but also for ongoing support and development of your solution. + +## Solution development environment design  + +The environment design should include the production environment, which hosts the production solution, and at least one non-production environment. Most environments contain two non-production environments: one for development and another for testing, Quality Assurance (QA), and User Acceptance Testing (UAT). Typically, environments are hosted in separate Azure subscriptions. Consider creating a production subscription, and a non-production subscription. This separation will provide a clear security boundary and delineation between production and non-production. + +Ideally, you should establish three environments. + +- **Development:** The environment within which your data and analytics solutions are built. Determine whether to provide sandboxes for developers. Sandboxes can allow developers to make and test their changes in isolation, while a shared development environment will host integrated changes from the entire development team. +- **Test/QA/UAT:** The production-like environment for testing deployments prior to their release to production. +- **Production:** The final production environment. + +### Synapse workspaces + +For each Synapse workspace in your solution, the environment should include a production workspace and at least one non-production workspace for development and test/QA/UAT. Use the same name for all pools and artifacts across environments. Consistent naming will ease the promotion of workspaces to other environments. + +Promoting a workspace to another workspace is a two-part process: + +1. Use an [Azure Resource Manager template (ARM template)](../../azure-resource-manager/templates/overview.md) to create or update workspace resources. +1. Migrate artifacts like SQL scripts, notebooks, Spark job definitions, pipelines, datasets, and data flows by using [Azure Synapse continuous integration and delivery (CI/CD) tools in Azure DevOps or on GitHub](../cicd/continuous-integration-delivery.md). + +### Azure DevOps or GitHub + +Ensure that integration with Azure DevOps or GitHub is properly set up. Design a repeatable process that releases changes across development, Test/QA/UAT, and production environments.  + +>[!IMPORTANT] +> We recommend that sensitive configuration data always be stored securely in [Azure Key Vault](/azure/key-vault/general/basic-concepts.md). Use Azure Key Vault to maintain a central, secure location for sensitive configuration data, like database connection strings. That way, appropriate services can access configuration data from within each environment. + +## Next steps + +In the [next article](implementation-success-evaluate-team-skill-sets.md) in the *Azure Synapse success by design* series, learn how to evaluate your team of skilled resources that will implement your Azure Synapse solution. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-spark-pool-design.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-spark-pool-design.md new file mode 100644 index 0000000000000..ec91332664109 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-spark-pool-design.md @@ -0,0 +1,81 @@ +--- +title: "Synapse implementation success methodology: Evaluate Spark pool design" +description: "Learn how to evaluate your Spark pool design to identify issues and validate that it meets guidelines and requirements." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate Spark pool design + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +You should evaluate your [Apache Spark pool](../spark/apache-spark-overview.md) design to identify issues and validate that it meets guidelines and requirements. By evaluating the design *before solution development begins*, you can avoid blockers and unexpected design changes. That way, you protect the project's timeline and budget. + +Apache Spark in Synapse brings the Apache Spark parallel data processing to Azure Synapse Analytics. This evaluation provides guidance on when Apache Spark in Azure Synapse is - or isn't - the right fit for your workload. It describes points to consider when you're evaluating your solution design elements that incorporate Spark pools. + +## Fit gap analysis + +When planning to implement Spark pools with Azure Synapse, first ensure they're the best fit for your workload. + +Consider the following points. + +- Does your workload require data engineering/data preparation? + - Apache Spark works best for workloads that require: + - Data cleaning. + - Transforming semi-structured data, like XML into relational. + - Complex free-text transformation, like fuzzy matching or natural language processing (NLP). + - Data preparation for machine learning (ML). +- Does your workload for data engineering/data preparation involve complex or simple transformations? And, are you looking for a low-code/no-code approach? + - For simple transformations, like removing columns, changing column data types, or joining datasets, consider creating an Azure Synapse pipeline by using a data flow activity. + - Data flow activities provide a low-code/no-code approach to prepare your data. +- Does your workload require ML on big data? + - Apache Spark works well for large datasets that will be used for ML. If you're using small datasets, consider using [Azure Machine Learning](../../machine-learning/overview-what-is-azure-ml.md) as the compute service. +- Do you plan to perform data exploration or ad hoc query analysis on big data? + - Apache Spark in Azure Synapse provides Python/Scala/SQL/.NET-based data exploration. However, if you need a full Transact-SQL (T-SQL) experience, consider using a [serverless SQL pool](../sql/on-demand-workspace-overview.md). +- Do you have a current Spark/Hadoop workload and do you need a unified big data platform? + - Azure Synapse provides a unified analytical platform for working with big data. There are Spark and SQL serverless pools for ad hoc queries, and the dedicated SQL pool for reporting and serving data. + - Moving from a Spark/Hadoop workload from on-premises (or another cloud environment) may involve some refactoring that you should take into consideration. + - If you're looking for a lift-and-shift approach of your Apache big data environment from on-premises to the cloud, and you need to meet a strict data engineering service level agreement (SLA), consider using [Azure HDInsight](../../hdinsight/hdinsight-overview.md). + +## Architecture considerations + +To ensure that your Apache Spark pool meets your requirements for operational excellence, performance, reliability, and security, there are key areas to validate in your architecture. + +### Operational excellence + +For operational excellence, evaluate the following points. + +- **Environment:** When configuring your environment, design your Spark pool to take advantage of features such as [autoscale and dynamic allocation](../spark/apache-spark-autoscale.md). Also, to reduce costs, consider enabling the [automatic pause](../spark/apache-spark-pool-configurations.md#automatic-pause) feature. +- **Package management:** Determine whether required Apache Spark libraries will be used at a workspace, pool, or session level. For more information, see [Manage libraries for Apache Spark in Azure Synapse Analytics](../spark/apache-spark-azure-portal-add-libraries.md). +- **Monitoring:** Apache Spark in Azure Synapse provides built-in monitoring of [Spark pools](../monitoring/how-to-monitor-spark-pools.md) and [applications](../monitoring/apache-spark-applications.md) with the creation of each spark session. Also consider implementing application monitoring with [Azure Log Analytics](../spark/apache-spark-azure-log-analytics.md) or [Prometheus and Grafana](../spark/use-prometheus-grafana-to-monitor-apache-spark-application-level-metrics.md), which you can use to visualize metrics and logs. + +### Performance efficiency + +For performance efficiency, evaluate the following points. + +- **File size and file type:** File size and the number of files have an impact on performance. Design the architecture to ensure that the file types are conducive to native ingestion with Apache Spark. Also, lean toward fewer large files instead of many small files. +- **Partitioning:** Identify whether partitioning at the folder and/or file level will be implemented for your workload. *Folder partitions* limit the amount of data to search and read. *File partitions* reduce the amount of data to be searched inside the file - but only apply to specific file formats that should be considered in the initial architecture. + +### Reliability + +For reliability, evaluate the following points. + +- **Availability:** Spark pools have a start time of three to four minutes. It could take longer if there are many libraries to install. When designing batch vs. streaming workloads, identify the SLA for executing the job from your assessment information and determine which architecture best meets your needs. Also, take into consideration that each job execution creates a new Spark pool cluster. +- **Checkpointing:** Apache Spark streaming has a built-in checkpointing mechanism. Checkpointing allows your stream to recover from the last processed entry should there be a failure on a node in your pool. + +### Security + +For security, evaluate the following points. + +- **Data access:** Data access must be considered for the Azure Data Lake Storage (ADLS) account that's attached to the Synapse workspace. In addition, determine the security levels required to access any data that isn't within the Azure Synapse environment. Refer to the information you collected during the [assessment stage](implementation-success-assess-environment.md). +- **Networking:** Review the networking information and requirements gathered during your assessment. If the design involves a managed virtual network with Azure Synapse, consider the implications this requirement will have on Apache Spark in Azure Synapse. One implication is the inability to use Spark SQL while accessing data. + +## Next steps + +In the [next article](implementation-success-evaluate-project-plan.md) in the *Azure Synapse success by design* series, learn how to evaluate your modern data warehouse project plan before the project starts. + +For more information on best practices, see [Apache Spark for Azure Synapse Guidance](https://azuresynapsestorage.blob.core.windows.net/customersuccess/Guidance%20Video%20Series/EGUI_Synapse_Spark_Guidance.pdf). diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-team-skill-sets.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-team-skill-sets.md new file mode 100644 index 0000000000000..24ba66866a179 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-team-skill-sets.md @@ -0,0 +1,141 @@ +--- +title: "Synapse implementation success methodology: Evaluate team skill sets" +description: "Learn how to evaluate your team of skilled resources that will implement your Azure Synapse solution." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate team skill sets + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Solution development requires a team comprising individuals with many different skills. It's important for the success of your solution that your team has the necessary skills to successfully complete their assigned tasks. This evaluation takes an honest and critical look at the skill level of your project resources, and it provides you with a list of roles that are often required during the implementation of an Azure Synapse solution. Your team needs to possess relevant experience and skills to complete their assigned project tasks within the expected time frame. + +## Microsoft learning level definitions + +This article uses the Microsoft standard level definitions for describing learning levels. + +| Level | Description | +|:-|:-| +| 100 | Assumes little or no expertise with the topic, and covers topic concepts, functions, features, and benefits. | +| 200 | Assumes 100-level knowledge and provides specific details about the topic. | +| 300 | *Advanced material.* Assumes 200-level knowledge, in-depth understanding of features in a real-world environment, and strong coding skills. Provides a detailed technical overview of a subset of product/technology features, covering architecture, performance, migration, deployment, and development. | +| 400 | *Expert material.* Assumes a deep level of technical knowledge and experience, and a detailed, thorough understanding of the topic. Provides expert-to-expert interaction and coverage of specialized topics. | + +## Roles, resources, and readiness + +Successfully delivering an Azure Synapse solution involves many different roles and skill sets. This topic describes roles commonly required to implement a successful project. Not all of these roles will be required for all projects, and not all of these roles will be required for the entire duration of the project. However, these roles will be required to complete some critical project tasks. You should evaluate the skill level of the individuals executing tasks to ensure their success in completing their job. + +Refer to your [project plan](implementation-success-evaluate-project-plan.md) and verify that these resources and roles were identified. Also, check to see if your project plan identifies other resources and roles. In many cases, you may find that individuals belong to more than one role. For example, the Azure administrator could also be your Azure network administrator. It's also possible that a role in your organization is split between multiple individuals. For example, the Synapse administrator doesn't get involved in Synapse SQL security. In this case, adjust your evaluation accordingly. + +Evaluate the following points. + +- Identify the roles that will be required by your solution implementation. +- Identify the specific individuals in your project that will fulfill each role. +- Identify the specific project tasks that will be performed by each individual. +- Assign a [learning level](#microsoft-learning-leveldefinitions) to each individual for their tasks and roles. + +Typically, a successful implementation requires that each individual has at least a level-300 proficiency for the tasks they'll perform. It's highly recommended that individuals at level-200 (or below) be provided with guidance and instruction to raise their level of understanding prior to beginning their project tasks. In this case, involve a level-300 (or above) individual to mentor and review. It's recommended that you adjust the project plan timeline and effort estimates to factor in learning new skills. + +> [!NOTE] +> We recommend you align your roles with the built-in roles. There are two sets of built-in roles: [RBAC roles for Azure Synapse](../security/synapse-workspace-synapse-rbac-roles.md) and [RBAC roles built into Azure](../../role-based-access-control/built-in-roles.md). These two sets of built-in roles and permissions are independent. + +### Azure administrator + +The *Azure administrator* manages administrative aspects of Azure. They're responsible for subscriptions, region identification, resource groups, monitoring, and portal access. They also provision resources, like resource groups, storage accounts, Azure Data Factory (ADF), Microsoft Purview, and more. + +### Security administrator + +The *security administrator* must have local knowledge of the existing security landscape and requirements. This role collaborates with the [Synapse administrator](#synapse-administrator), [Synapse database administrator](#synapse-database-administrator), [Synapse Spark administrator](#synapse-spark-administrator), and other roles to set up security requirements. The security administrator could also be an Azure Active Directory (Azure AD) administrator. + +### Network administrator + +The *network administrator* must have local knowledge of the existing networking landscape and requirements. This role requires Azure networking skills and Synapse networking skills. + +### Synapse administrator + +The *Synapse administrator* is responsible for the administration of the overall Azure Synapse environment. This role is responsible for the availability and scale of workspace resources, data lake administration, analytics runtimes, and workspace administration and monitoring. This role works closely with all other roles to ensure access to Azure Synapse, the availability of analytics services, and sufficient scale. Other responsibilities include: + +- Provision Synapse workspaces. +- Set up Azure Synapse networking and security requirements. +- Monitor Synapse workspace activity. + +### Synapse database administrator + +The *Synapse database administrator* is responsible for the design, implementation, maintenance, and operational aspects of the SQL pools (serverless and dedicated). This role is responsible for the overall availability, consistent performance, and optimizations of the SQL pools. This role is also responsible for managing the security of the data in the databases, granting privileges over the data, and granting or denying user access. Other responsibilities include: + +- Perform various dedicated SQL pool administration functions, like provisioning, scale, pauses, resumes, restores, workload management, monitoring, and others. +- Perform various dedicated SQL pool administration functions, like securing, monitoring, and others. +- Set up SQL pool database security. +- Performance tuning and troubleshooting. + +### Synapse Spark administrator + +The *Synapse Spark administrator* is responsible for the design, implementation, maintenance, and operational aspects of the Spark pools. This role is responsible for the overall availability, consistent performance, and optimizations of the Spark pools. This role is also responsible for managing the security of the data, granting privileges over the data, and granting or denying user access. Other responsibilities include: + +- Perform various dedicated Spark pool administration functions, like provisioning, monitoring, and others. +- Set up Spark pool data security. +- Notebook troubleshooting and performance. +- Pipeline Spark execution troubleshooting and performance. + +### Synapse SQL pool database developer + +The *Synapse pool database developer* is responsible for database design and development. For dedicated SQL pools, responsibilities include table structure and indexing, developing database objects, and schema design. For serverless SQL pools, responsibilities include external tables, views, and schema design. Other responsibilities include: + +- Logical and physical database design. +- Table design, including distribution, indexing, and partitioning. +- Programming object design and development, including stored procedures and functions. +- Design and development of other performance optimizations, including materialized views, workload management, and more. +- Design and implementation of [data protection](security-white-paper-data-protection.md), including data encryption. +- Design and implementation of [access control](security-white-paper-access-control.md), including object-level security, row-level security, column-level security, dynamic data masking, and Synapse role-based access control. +- Monitoring, auditing, performance tuning and troubleshooting. + +### Spark developer + +The *Spark developer* is responsible for creating notebooks and executing Spark processing by using Spark pools. + +### Data integration administrator + +The *Data integration administrator* is responsible for setting up and securing data integration by using Synapse pipelines, ADF, or third-party integration tools, and for performing all configuration and security functions to support the data integration tools. + +For Synapse pipelines and ADF, other responsibilities include setting up the integration runtime (IR), self-hosted integration runtime (SHIR), and/or SSIS integration runtime (SSIS-IR). Knowledge of virtual machine provisioning - on-premises or in Azure - may be required. + +### Data integration developer + +The *Data integration developer* is responsible for developing ETL/ELT and other data integration processes by using the solution's selected data integration tools. + +### Data consumption tools administrator + +The *Data consumption tools administrator* is responsible for the data consumption tools. Tools can include [Microsoft Power BI](https://powerbi.microsoft.com/), Microsoft Excel, Tableau, and others. The administrator of each tool will need to set up permissions to grant access to data in Azure Synapse. + +### Data engineer + +The *Data engineer* role is responsible for implementing data-related artifacts, including data ingestion pipelines, cleansing and transformation activities, and data stores for analytical workloads. It involves using a wide range of data platform technologies, including relational and non-relational databases, file stores, and data streams. + +Data engineers are responsible for ensuring that the privacy of data is maintained within the cloud, and spanning from on-premises to the cloud data stores. They also own the management and monitoring of data stores and data pipelines to ensure that data loads perform as expected. + +### Data scientist + +The *Data scientist* derives value and insights from data. Data scientists find innovative ways to work with data and help teams achieve a rapid return on investment (ROI) on analytics efforts. They work with data curation and advanced search, matching, and recommendation algorithms. Data scientists need access to the highest quality data and substantial amounts of computing resources to extract deep insights. + +### Data analyst + +The *Data analyst* enables businesses to maximize the value of their data assets. They transform raw data into relevant insights based on identified business requirements. Data analysts are responsible for designing and building scalable data models, cleaning, and transforming data, and presenting advanced analytics in reports and visualizations. + +### Azure DevOps engineer + +The *Azure DevOps engineer* is responsible for designing and implementing strategies for collaboration, code, infrastructure, source control, security, compliance, continuous integration, testing, delivery, and monitoring of an Azure Synapse project. + +## Learning resources and certifications + +If you're interested to learn about Microsoft Certifications that may help assess your team's readiness, browse the available certifications for [Azure Synapse Analytics](/learn/certifications/browse/?expanded=azure&products=azure-synapse-analytics). + +To complete online, self-paced training, browse the available learning paths and modules for [Azure Synapse Analytics](/learn/browse/?filter-products=synapse&products=azure-synapse-analytics). + +## Next steps + +In the [next article](implementation-success-perform-operational-readiness-review.md) in the *Azure Synapse success by design* series, learn how to perform an operational readiness review to evaluate your solution for its preparedness to provide optimal services to users. diff --git a/articles/synapse-analytics/guidance/implementation-success-evaluate-workspace-design.md b/articles/synapse-analytics/guidance/implementation-success-evaluate-workspace-design.md new file mode 100644 index 0000000000000..634c7979fcf42 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-evaluate-workspace-design.md @@ -0,0 +1,100 @@ +--- +title: "Synapse implementation success methodology: Evaluate workspace design" +description: "Learn how to evaluate the Synapse workspace design and validate that it meets guidelines and requirements." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Evaluate workspace design + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Synapse workspace is a unified graphical user experience that stitches together your analytical and data processing engines, data lakes, databases, tables, datasets, and reporting artifacts along with code and process orchestration. Considering the number of technologies and services that are integrated into Synapse workspace, ensure that the key components are included in your design. + +## Synapse workspace design review + +Identify whether your solution design involves one Synapse workspace or multiple workspaces. Determine the drivers of this design. While there might be different reasons, in most cases the reason for multiple workspaces is either security segregation or billing segregation. When determining the number of workspaces and database boundaries, keep in mind that there's a limit of 20 workspaces per subscription. + +Identify which elements or services within each workspace need to be shared and with which resources. Resources can include data lakes, integration runtimes (IRs), metadata or configurations, and code. Determine why this particular design was chosen in terms of potential synergies. Ask yourself whether these synergies justify the extra cost and management overhead. + +## Data lake design review + +We recommended that the data lake (if part of your solution) be properly tiered. You should divide your data lake into three major areas that relate to *Bronze*, *Silver*, and *Gold* datasets. Bronze - or the raw layer - might reside on its own separate storage account because it has stricter access controls due to unmasked sensitive data that it might store. + +## Security design review + +Review the security design for the workspace and compare it with the information you gathered during the assessment. Ensure all of the requirements are met, and all of the constraints have been taken into account. For ease of management, we recommended that users be organized into groups with appropriate permissions profiling: you can simplify access control by using security groups that align with roles. That way, network administrators can add or remove users from appropriate security groups to manage access. + +Serverless SQL pools and Apache Spark tables store their data in an Azure Data Lake Gen2 (ADLS Gen2) container that's associated with the workspace. User-installed Apache Spark libraries are also managed in this same storage account. To enable these use cases, both users and the workspace managed service identity (MSI) must be added to the **Storage Blob Data Contributor** role of the ADLS Gen2 storage container. Verify this requirement against your security requirements. + +Dedicated SQL pools provide a rich set of security features to encrypt and mask sensitive data. Both dedicated and serverless SQL pools enable the full surface area of SQL Server permissions including built-in roles, user-defined roles, SQL authentication, and Azure Active Directory (Azure AD) authentication. Review the security design for your solution's dedicated SQL pool and serverless SQL pool access and data. + +Review the security plan for your data lake and all the ADLS Gen2 storage accounts (and others) that will form part of your Azure Synapse Analytics solution. ADLS Gen2 storage isn't itself a compute engine and so it doesn't have a built-in ability to selectively mask data attributes. You can apply ADLS Gen2 permissions at the storage account or container level by using role-based access control (RBAC) and/or at the folder or file level by using access control lists (ACLs). Review the design carefully and strive to avoid unnecessary complexity. + +Here are some points to consider for the security design. + +- Make sure Azure AD set up requirements are included in the design. +- Check for cross-tenant scenarios. Such issues may arise because some data is in another Azure tenant, or it needs to move to another tenant, or it needs to be accessed by users from another tenant. Ensure these scenarios are considered in your design. +- What are the roles for each workspace? How will they use the workspace? +- How is the security designed within the workspace? + - Who can view all scripts, notebooks, and pipelines? + - Who can execute scripts and pipelines? + - Who can create/pause/resume SQL and Spark pools? + - Who can publish changes to the workspace? + - Who can commit changes to source control? +- Will pipelines access data by using stored credentials or the workspace managed identity? +- Do users have the appropriate access to the data lake to browse the data in Synapse Studio? +- Is the data lake properly secured by using the appropriate combination of RBAC and ACLs? +- Have the SQL pool user permissions been correctly set for each role (data scientist, developer, administrator, business user, and others)? + +## Networking design review + +Here are some points to consider for the network design. + +- Is connectivity designed between all the resources? +- What is the networking mechanism to be used (Azure ExpressRoute, public Internet, or private endpoints)? +- Do you need to be able to securely connect to Synapse Studio? +- Has data exfiltration been taken into consideration? +- Do you need to connect to on-premises data sources? +- Do you need to connect to other cloud data sources or compute engines, such as Azure Machine Learning? +- Have Azure networking components, like network security groups (NSGs), been reviewed for proper connectivity and data movement? +- Has integration with the private DNS zones been taken into consideration? +- Do you need to be able to browse the data lake from within Synapse Studio or simply query data in the data lake with serverless SQL or PolyBase? + +Finally, identify all of your data consumers and verify that their connectivity is accounted for in the design. Check that network and security outposts allow your service to access required on-premises sources and that its authentication protocols and mechanisms are supported. In some scenarios, you might need to have more than one self-hosted IR or data gateway for SaaS solutions, like Microsoft Power BI. + +## Monitoring design review + +Review the design of the monitoring of the Azure Synapse components to ensure they meet the requirements and expectations identified during the assessment. Verify that monitoring of resources and data access has been designed, and that it identifies each monitoring requirement. A robust monitoring solution should be put in place as part of the first deployment to production. That way, failures can be identified, diagnosed, and addressed in a timely manner. Aside from the base infrastructure and pipeline runs, data should also be monitored. Depending on the Azure Synapse components in use, identify the monitoring requirements for each component. For example, if Spark pools form part of the solution, monitor the malformed record store.  + +Here are some points to consider for the monitoring design. + +- Who can monitor each resource type (pipelines, pools, and others)? +- How long do database activity logs need to be retained? +- Will workspace and database log retention use Log Analytics or Azure Storage? +- Will alerts be triggered in the event of a pipeline error? If so, who should be notified? +- What threshold level of a SQL pool should trigger an alert? Who should be notified? + +## Source control design review + +By default, a Synapse workspace applies changes directly to the Synapse service by using the built-in publish functionality. You can enable source control integration, which provides many advantages. Advantages include better collaboration, versioning, approvals, and release pipelines to promote changes through to development, test, and production environments. Azure Synapse allows a single source control repository per workspace, which can be either Azure DevOps Git or GitHub. + +Here are some points to consider for the source control design. + +- If using Azure DevOps Git, is the Synapse workspace and its repository in the same tenant? +- Who will be able to access source control? +- What permissions will each user be granted in source control? +- Has a branching and merging strategy been developed? +- Will release pipelines be developed for deployment to different environments? +- Will an approval process be used for merging and for release pipelines? + +> [!NOTE] +> The design of the development environment is of critical importance to the success of your project. If a development environment has been designed, it will be evaluated in a [separate stage of this methodology](implementation-success-evaluate-solution-development-environment-design.md). + +## Next steps + +In the [next article](implementation-success-evaluate-data-integration-design.md) in the *Azure Synapse success by design* series, learn how to evaluate the data integration design and validate that it meets guidelines and requirements. diff --git a/articles/synapse-analytics/guidance/implementation-success-overview.md b/articles/synapse-analytics/guidance/implementation-success-overview.md new file mode 100644 index 0000000000000..8e9207cea7940 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-overview.md @@ -0,0 +1,123 @@ +--- +title: Azure Synapse implementation success by design +description: "Learn about the Azure Synapse success series of articles that's designed to help you deliver a successful implementation of Azure Synapse Analytics." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Azure Synapse implementation success by design + +The *Azure Synapse implementation success by design* series of articles is designed to help you deliver a successful implementation of Azure Synapse Analytics. It describes a methodology to complement your solution implementation project. It includes suggested checks at strategic points during your project that can help assure a successful implementation. It's important to understand that the methodology shouldn't replace or change your chosen project management methodology (Scrum, Agile, or waterfall). Rather, it suggests validations that can improve the success of your project deployment to a production environment. + +[Azure Synapse](../overview-what-is.md) is an enterprise analytics service that accelerates time to insight across data warehouses and big data systems. It brings together the best of SQL technologies used in enterprise data warehousing, Spark technologies used for big data, pipelines for data integration and ETL/ELT, and deep integration with other Azure services, such as Power BI, Azure Cosmos DB, and Azure Machine Learning. + +:::image type="content" source="media/implementation-success-overview/azure-synapse-analytics-architecture.png" alt-text="Image shows the Azure Synapse Analytics in terms of data lake, analytics runtimes, and Synapse Studio."::: + +The methodology uses a strategic checkpoint approach to assess and monitor the progress of your project. The goals of these checkpoints are: + +- Proactive identification of possible issues and blockers. +- Continuous validation of the solution's fit to the use cases. +- Successful deployment to production. +- Smooth operation and monitoring once in production. + +The checkpoints are invoked at four milestones during the project: + +1. [Project planning](#project-planning-checkpoint) +1. [Solution development](#solution-development-checkpoint) +1. [Pre go-live](#pre-go-live-checkpoint) +1. [Post go-live](#post-go-live-checkpoint) + +## Project planning checkpoint + +The project planning checkpoint includes the solution evaluation, project plan evaluation, the solution development environment design evaluation, and the team skill sets evaluation. + +#### Solution evaluation + +Evaluate your entire solution with a focus on how it intends to use Azure Synapse. An assessment involves gathering data that will identify the required components of Azure Synapse, the interfaces each will have with other products, a review of the data sources, the data consumers, the roles, and use cases. This assessment will also gather data about the existing environment including detailed specifications from existing data warehouses, big data environments, and integration and data consumption tooling. The assessment will identify which Azure Synapse components will be implemented and therefore which evaluations and checkpoints should be made throughout the implementation effort. This assessment will also provide additional information to validate the design and implementation against requirements, constraints, and assumptions. + +Here's a list of tasks you should complete. + +1. [Assess](implementation-success-assess-environment.md) your environment to help evaluate the solution design. +1. Make informed technology decisions to implement Azure Synapse and identify the solution components to implement. +1. [Evaluate the workspace design](implementation-success-evaluate-workspace-design.md). +1. [Evaluate the data integration design](implementation-success-evaluate-data-integration-design.md). +1. [Evaluate the dedicated SQL pool design](implementation-success-evaluate-dedicated-sql-pool-design.md). +1. [Evaluate the serverless SQL pool design](implementation-success-evaluate-serverless-sql-pool-design.md). +1. [Evaluate the Spark pool design](implementation-success-evaluate-spark-pool-design.md). +1. Review the results of each evaluation and respond accordingly. + +#### Project plan evaluation + +Evaluate the project plan as it relates to the Azure Synapse requirements that need to be developed. This evaluation isn't about producing a project plan. Rather, the evaluation is about identifying any steps that could lead to blockers or that could impact on the project timeline. Once evaluated, you may need to make adjustments to the project plan. + +Here's a list of tasks you should complete. + +1. [Evaluate the project plan](implementation-success-evaluate-project-plan.md). +1. Evaluate project planning specific to the Azure Synapse components you plan to implement. +1. Review the results of each evaluation and respond accordingly. + +#### Solution development environment design evaluation + +Evaluate the environment that's to be used to develop the solution. Establish separate development, test, and production environments. Also, it's important to understand that setting up automated deployment and source code control is essential to a successful and smooth development effort. + +Here's a list of tasks you should complete. + +1. [Evaluate the solution development environment design](implementation-success-evaluate-solution-development-environment-design.md). +1. Review the results of each evaluation and respond accordingly. + +#### Team skill sets evaluation + +Evaluate the project team with a focus on their skill level and readiness to implement the Azure Synapse solution. The success of the project depends on having the correct skill sets and experience. Many different skill sets are required to implement an Azure Synapse solution, so ensure you identify gaps and secure suitable resources that have the required skill sets (or arrange for them to complete training). This evaluation is critical at this stage of your project because a lack of the proper skills can impact on both the timeline and the overall success of the project. + +Here's a list of tasks you should complete. + +1. [Evaluate the team skill sets](implementation-success-evaluate-team-skill-sets.md). +1. Secure skilled resources, or upskill resources to expand their capabilities. +1. Review the results of each evaluation and respond accordingly. + +### Solution development checkpoint + +The solution development checkpoint includes periodic quality checks and additional skill building. + +#### Periodic quality checks + +During solution development, you should make periodic checks to validate that the solution is being developed according to recommended practices. Check that the project use cases will be satisfied and that enterprise requirements are being met. For the purposes of this methodology, these checks are called *periodic quality checks*. + +Implement the following quality checks: + +- Quality checks for workspaces. +- Quality checks for data integration. +- Quality checks for dedicated SQL pools. +- Quality checks for serverless SQL pools. +- Quality checks for Spark pools. + +#### Additional skill building + +As the project progresses, identify whether more skill sets are needed. Take the time to determine whether more skill sets could improve the quality of the solution. Supplementing the team with more skill sets can help to avoid project delays and project timeline impacts. + +### Pre go-live checkpoint + +Before deploying your solution to production, we recommend that you perform reviews to assess the preparedness of the solution. + +The *pre go-live* checklist provides a final readiness check to successfully deploy to production. + +1. [Perform the operational readiness review](implementation-success-perform-operational-readiness-review.md). +1. [Perform the user readiness and onboarding plan review](implementation-success-perform-user-readiness-and-onboarding-plan-review.md). +1. Review the results of each review and respond accordingly. + +### Post go-live checkpoint + +After deploying to production, we recommend that you validate that the solution operates as expected. + +The *post go-live* checklist provides a final readiness check to monitor your Azure Synapse solution. + +1. [Perform the monitoring review](implementation-success-perform-monitoring-review.md). +1. Continually monitor your Azure Synapse solution. + +## Next steps + +In the [next article](implementation-success-assess-environment.md) in the *Azure Synapse implementation success by design* series, learn how to assess your environment to help evaluate the solution design and make informed technology decisions to implement Azure Synapse. diff --git a/articles/synapse-analytics/guidance/implementation-success-perform-monitoring-review.md b/articles/synapse-analytics/guidance/implementation-success-perform-monitoring-review.md new file mode 100644 index 0000000000000..3d6bf82a7f8b5 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-perform-monitoring-review.md @@ -0,0 +1,51 @@ +--- +title: "Synapse implementation success methodology: Perform monitoring review" +description: "Learn how to perform monitoring of your Azure Synapse solution." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Perform monitoring review + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Monitoring is a key part of the operationalization of any Azure solution. This article provides guidance on reviewing and configuring the monitoring of your Azure Synapse Analytics environment. Key to this activity is the identification of what needs to be monitored and who needs to review the monitoring results. + +Using your solution requirements and other data collected during the [assessment stage](implementation-success-assess-environment.md) and [solution development](implementation-success-evaluate-solution-development-environment-design.md), build a list of important behaviors and activities that need to be monitored in your production environment. As you build this list, identify the groups of users that will need access to monitoring information and build the procedures they can follow to respond to monitoring results. + +You can use [Azure Monitor](/azure/azure-monitor/overview) to provide base-level infrastructure metrics, alerts, and logs for most Azure services. Azure diagnostic logs are emitted by a resource to provide rich, frequent data about the operation of that resource. Azure Synapse can write diagnostic logs in Azure Monitor. + +For more information, see [Use Azure Monitor with your Azure Synapse Analytics workspace](../monitoring/how-to-monitor-using-azure-monitor.md). + +## Monitor dedicated SQL pools + +You can monitor a dedicated SQL pool by using Azure Monitor, altering, dynamic management views (DMVs), and Log Analytics. + +- **Alerts:** You can set up alerts that send you an email or call a webhook when a certain metric reaches a predefined threshold. For example, you can receive an alert email when the database size grows too large. For more information, see [Create alerts for Azure SQL Database and Azure Synapse Analytics using the Azure portal](/azure/azure-sql/database/alerts-insights-configure-portal). +- **DMVs:** You can use [DMVs](../sql-data-warehouse/sql-data-warehouse-manage-monitor.md) to monitor workloads to help investigate query executions in SQL pools. +- **Log Analytics:** [Log Analytics](/azure/azure-monitor/logs/log-analytics-tutorial) is a tool in the Azure portal that you can use to edit and run log queries from data collected by Azure Monitor. For more information, see [Monitor workload - Azure portal](../sql-data-warehouse/sql-data-warehouse-monitor-workload-portal.md). + +## Monitor serverless SQL pools + +You can monitor a serverless SQL pool by [monitoring your SQL requests](../monitoring/how-to-monitor-sql-requests.md) in Synapse Studio. That way, you can keep an eye on the status of running requests and review details of historical requests. + +## Monitor Spark pools + +You can [monitor your Apache Spark applications](../monitoring/apache-spark-applications.md) in Synapse Studio. That way, you can keep an eye on the latest status, issues, and progress. + +You can enable the Synapse Studio connector that's built in to Log Analytics. You can then collect and send Apache Spark application metrics and logs to your Log Analytics workspace. You can also use an Azure Monitor workbook to visualize the metrics and logs. For more information, see [Monitor Apache Spark applications with Azure Log Analytics](../spark/apache-spark-azure-log-analytics.md). + +## Monitor pipelines + + Azure Synapse allows you to create complex pipelines that automate and integrate your data movement, data transformation, and compute activities. You can author and monitor pipelines by using Synapse Studio to keep an eye on the latest status, issues, and progress of your pipelines. For more information, see [Use Synapse Studio to monitor your workspace pipeline runs](../monitoring/how-to-monitor-pipeline-runs.md). + +## Next steps + +For more information about this article, check out the following resources: + +- [Synapse implementation success methodology](implementation-success-overview.md) +- [Use Azure Monitor with your Azure Synapse Analytics workspace](../monitoring/how-to-monitor-using-azure-monitor.md) diff --git a/articles/synapse-analytics/guidance/implementation-success-perform-operational-readiness-review.md b/articles/synapse-analytics/guidance/implementation-success-perform-operational-readiness-review.md new file mode 100644 index 0000000000000..353fe0d420003 --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-perform-operational-readiness-review.md @@ -0,0 +1,87 @@ +--- +title: "Synapse implementation success methodology: Perform operational readiness review" +description: "Learn how to perform an operational readiness review to evaluate your solution for its preparedness to provide optimal services to users." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Perform operational readiness review + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Once you build an Azure Synapse Analytics solution and it's ready to deploy, it's important to ensure the operational readiness of that solution. Performing an operational readiness review evaluates the solution for its preparedness to provide optimal services to users. Organizations that invest time and resources in assessing operational readiness before launch have a much higher rate of success. It's also important to conduct an operational readiness review periodically post deployment - perhaps annually - to ensure there isn't any drift from operational expectations. + +## Process and focus areas + +Process and focus areas include service operational goals, solution readiness, security, monitoring, high availability (HA) and disaster recovery (DR). + +### Service operational goals + + Document service expectations from the customer's point of view, and get buy-in from the business on these service expectations. Make any necessary modifications to meet business goals and objectives of the service. + +The service level agreement (SLA) of each Azure service varies based on the service. For example, Microsoft guarantees a specific monthly uptime percentage. For more information, see [SLA for Azure Synapse Analytics](https://azure.microsoft.com/support/legal/sla/synapse-analytics/). Ensure these SLAs align with your own business SLAs and document any gaps. It's also important to define any operational level agreements (OLAs) between different teams and ensure that they align with the SLAs. + +### Solution readiness + +It's important to review solution readiness by using the following points. + +- Describe the entire solution architecture calling out critical functionalities of different components and how they interact with each other. +- Document scalability aspects of your solution. Include specific details about the effort involved in scaling and the impact of it on business. Consider whether it can respond to sudden surges of user activity. Bear in mind that Azure Synapse provides functionality for scaling with minimal downtime. +- Document any single points of failure in your solution, along with how to recover should such failures occur. Include the impact of such failures on dependent services in order to minimize the impact. +- Document all dependent services on the solution and their impact. + +### Security + +Data security and privacy are non-negotiable. Azure Synapse implements a multi-layered security architecture for end-to-end protection of your data. Review security readiness by using the following points. + +- **Authentication:** Ensure Azure Active Directory (Azure AD) authentication is used whenever possible. If non-Azure AD authentication is used, ensure strong password mechanisms are in place and that passwords are rotated on a regular basis. For more information, see [Password Guidance](https://www.microsoft.com/research/publication/password-guidance/). Ensure monitoring is in place to detect suspicious actions related to user authentication. Consider using [Azure Identity Protection](/azure/active-directory/identity-protection/overview-identity-protection) to automate the detection and remediation of identity-based risks. +- **Access control:** Ensure proper access controls are in place following the [principle of least privilege](/azure/active-directory/develop/secure-least-privileged-access). Use security features available with Azure services to strengthen the security of your solution. For example, Azure Synapse provides granular security features, including row-level security (RLS), column-level security, and dynamic data masking. For more information, see [Azure Synapse Analytics security white paper: Access control](security-white-paper-access-control.md). +- **Threat protection:** Ensure proper threat detection mechanisms are place to prevent, detect, and respond to threats. Azure Synapse provides SQL Auditing, SQL Threat Detection, and Vulnerability Assessment to audit, protect, and monitor databases. For more information, see [Azure Synapse Analytics security white paper: Threat detection](security-white-paper-threat-protection.md). + +For more information, see the [Azure Synapse Analytics security white paper](security-white-paper-introduction.md). + +### Monitoring + +Set and document expectations for monitoring readiness with your business. These expectations should describe: + +- How to monitor the entire user experience, and whether it includes monitoring of a single-user experience. +- The specific metrics of each service to monitor. +- How and who to notify about poor user experience. +- Details of proactive health checks. +- Any mechanisms that are in place that automate actions in response to incidents, for example, raising tickets automatically. + +Consider using [Azure Monitor](/azure/azure-monitor/overview) to collect, analyze, and act on telemetry data from your Azure and on-premises environments. Azure Monitor helps you maximize performance and availability of your applications by proactively identify problems in seconds. + +List all the important metrics to monitor for each service in your solution along with their acceptable thresholds. For example, the following list includes important metrics to monitor for a dedicated SQL pool: + +- `DWULimit` +- `DWUUsed` +- `AdaptiveCacheHitPercent` +- `AdaptiveCacheUsedPercent` +- `LocalTempDBUsedPercent` +- `ActiveQueries` +- `QueuedQueries` + +Consider using [Azure Service Health](https://azure.microsoft.com/features/service-health/) to notify you about Azure service incidents and planned maintenance. That way, you can take action to mitigate downtime. You can set up customizable cloud alerts and use a personalized dashboard to analyze health issues, monitor the impact to your cloud resources, get guidance and support, and share details and updates. + +Lastly, ensure proper notifications are set up to notify appropriate people when incidents occur. Incidents could be proactive, such as when a certain metric exceeds a threshold, or reactive, such as a failure of a component or service. For more information, see [Overview of alerts in Microsoft Azure](/azure/azure-monitor/alerts/alerts-overview). + +### High availability + +Define and document *recovery time objective (RTO)* and *recovery point objective (RPO)* for your solution. RTO is how soon the service will be available to users, and RPO is how much data loss would occur in the event of a failover. + +Each of the Azure services publishes a set of guidelines and metrics on the expected high availability (HA) of the service. Ensure these HA metrics align with your business expectations. when they don't align, customizations may be necessary to meet your HA requirements. For example, Azure Synapse dedicated SQL pool supports an eight-hour RPO with automatic restore points. If that RPO isn't sufficient, you can set up user-defined restore points with an appropriate frequency to meet your RPO needs. For more information, see [Backup and restore in Azure Synapse dedicated SQL pool](../sql-data-warehouse/backup-and-restore.md). + +### Disaster recovery + +Define and document a detailed process for disaster recovery (DR) scenarios. DR scenarios can include a failover process, communication mechanisms, escalation process, war room setup, and others. Also document the process for identifying the causes of outages and the steps to take to recover from disasters. + +Use the built-in DR mechanisms available with Azure services for building your DR process. For example, Azure Synapse performs a standard geo-backup of SQL dedicated pools once every day to a paired data center. You can use a geo-backup to recover from a disaster at the primary location. You can also set up Azure Data Lake Storage (ADLS) to copy data to another Azure region that's hundreds of miles apart. If there's a disaster at the primary location, a failover can be initiated to transform the secondary storage location into the primary storage location. For more information, see [Disaster recovery and storage account failover](/azure/storage/common/storage-disaster-recovery-guidance). + +## Next steps + +In the [next article](implementation-success-perform-user-readiness-and-onboarding-plan-review.md) in the *Azure Synapse success by design* series, learn how to perform monitoring of your Azure Synapse solution. diff --git a/articles/synapse-analytics/guidance/implementation-success-perform-user-readiness-and-onboarding-plan-review.md b/articles/synapse-analytics/guidance/implementation-success-perform-user-readiness-and-onboarding-plan-review.md new file mode 100644 index 0000000000000..c3871148cd03e --- /dev/null +++ b/articles/synapse-analytics/guidance/implementation-success-perform-user-readiness-and-onboarding-plan-review.md @@ -0,0 +1,46 @@ +--- +title: "Synapse implementation success methodology: Perform user readiness and onboarding plan review" +description: "Learn how to perform user readiness and onboarding of new users to ensure successful adoption of your data warehouse." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Synapse implementation success methodology: Perform user readiness and onboarding plan review + +[!INCLUDE [implementation-success-context](includes/implementation-success-context.md)] + +Training technical people, like administrators and developers, is important to deliver success. Don't overlook that you must also extend training to include end users. Review the use cases and roles identified during the [assessment stage](implementation-success-assess-environment.md), [project planning](implementation-success-evaluate-project-plan.md), and [solution development](implementation-success-evaluate-solution-development-environment-design.md) to ensure that *everyone* is readied for success. + +Evaluate your project plan and prepare an onboarding plan for each group of users, including: + +- Big data analytics users. +- Structured data analytics users. +- Users of each of your identified data consumption tools. +- Operations support. +- Help desk and user support. + +## Onboarding and readiness + +It's unrealistic to expect that users figure out how to use Azure Synapse, even when they have experience with similar technologies. So, plan to reach out to your users to ensure a smooth transition for them to the new environment. Specifically, ensure that: + +- Users understand what Azure Synapse does and how it does it. +- Users understand how to use the Azure Synapse service or the platform that uses it. +- Onboarding of users is a consistent and continuous process. +- Users see and understand the value of the new environment. + +The onboarding of users starts with explanatory sessions or technical workshops. It also involves giving them access to the platform. The onboarding process can span several months depending on the complexity of the solution, and it should set the right tone for future interactions with the Azure Synapse platform and services. + +Take tracking steps and make sure users are capable of completing a set of core tasks that will form part of their daily operations. These tasks will be specific to different groups of user groups, roles, and use cases. Be sure to identify: + +- The core actions users need to be able to perform. +- The steps users must take to perform each action. + +Focus your efforts on the tasks that users struggle the most with. Be sure to provide straightforward instructions and processes, yet be mindful of how long it takes for users to complete specific tasks. It's always a good idea to request qualitative feedback to better track user readiness and improve the onboarding experience. + +## Next steps + +In the [next article](implementation-success-perform-monitoring-review.md) in the *Azure Synapse success by design* series, learn how to monitor your Azure Synapse solution. diff --git a/articles/synapse-analytics/guidance/includes/implementation-success-context.md b/articles/synapse-analytics/guidance/includes/implementation-success-context.md new file mode 100644 index 0000000000000..7b84fb8202502 --- /dev/null +++ b/articles/synapse-analytics/guidance/includes/implementation-success-context.md @@ -0,0 +1,11 @@ +--- +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: include +ms.date: 02/28/2022 +--- + +> [!NOTE] +> This article forms part of the *Azure Synapse implementation success by design* series of articles. For an overview of the series, see [Azure Synapse implementation success by design](../implementation-success-overview.md). diff --git a/articles/synapse-analytics/guidance/includes/proof-of-concept-playbook-context.md b/articles/synapse-analytics/guidance/includes/proof-of-concept-playbook-context.md new file mode 100644 index 0000000000000..f88cea3a6c2de --- /dev/null +++ b/articles/synapse-analytics/guidance/includes/proof-of-concept-playbook-context.md @@ -0,0 +1,11 @@ +--- +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: include +ms.date: 04/30/2022 +--- + +> [!NOTE] +> This article forms part of the *Azure Synapse proof of concept playbook* series of articles. For an overview of the series, see [Azure Synapse proof of concept playbook](../proof-of-concept-playbook-overview.md). diff --git a/articles/synapse-analytics/guidance/includes/security-white-paper-context.md b/articles/synapse-analytics/guidance/includes/security-white-paper-context.md index 5330be126b9d4..c5808f945cc89 100644 --- a/articles/synapse-analytics/guidance/includes/security-white-paper-context.md +++ b/articles/synapse-analytics/guidance/includes/security-white-paper-context.md @@ -8,4 +8,4 @@ ms.date: 01/14/2022 --- > [!NOTE] -> This article forms part of the Azure Synapse Analytics security white paper series of articles. For an overview of the series, see [Azure Synapse Analytics security white paper](../security-white-paper-introduction.md). \ No newline at end of file +> This article forms part of the *Azure Synapse Analytics security white paper* series of articles. For an overview of the series, see [Azure Synapse Analytics security white paper](../security-white-paper-introduction.md). diff --git a/articles/synapse-analytics/guidance/media/implementation-success-evaluate-data-integration-design/azure-synapse-analytics-architecture-data-integration.png b/articles/synapse-analytics/guidance/media/implementation-success-evaluate-data-integration-design/azure-synapse-analytics-architecture-data-integration.png new file mode 100644 index 0000000000000..459d11fbc15fd Binary files /dev/null and b/articles/synapse-analytics/guidance/media/implementation-success-evaluate-data-integration-design/azure-synapse-analytics-architecture-data-integration.png differ diff --git a/articles/synapse-analytics/guidance/media/implementation-success-overview/azure-synapse-analytics-architecture.png b/articles/synapse-analytics/guidance/media/implementation-success-overview/azure-synapse-analytics-architecture.png new file mode 100644 index 0000000000000..5a3dcffb91b27 Binary files /dev/null and b/articles/synapse-analytics/guidance/media/implementation-success-overview/azure-synapse-analytics-architecture.png differ diff --git a/articles/synapse-analytics/guidance/media/proof-of-concept-playbook-apache-spark-pool/apache-spark-history-server-diagnostic-tab.png b/articles/synapse-analytics/guidance/media/proof-of-concept-playbook-apache-spark-pool/apache-spark-history-server-diagnostic-tab.png new file mode 100644 index 0000000000000..23263124fd33f Binary files /dev/null and b/articles/synapse-analytics/guidance/media/proof-of-concept-playbook-apache-spark-pool/apache-spark-history-server-diagnostic-tab.png differ diff --git a/articles/synapse-analytics/guidance/media/proof-of-concept-playbook-dedicated-sql-pool/proof-of-concept-playbook-dedicated-sql-pool-setup.png b/articles/synapse-analytics/guidance/media/proof-of-concept-playbook-dedicated-sql-pool/proof-of-concept-playbook-dedicated-sql-pool-setup.png new file mode 100644 index 0000000000000..f5e965a5a80a5 Binary files /dev/null and b/articles/synapse-analytics/guidance/media/proof-of-concept-playbook-dedicated-sql-pool/proof-of-concept-playbook-dedicated-sql-pool-setup.png differ diff --git a/articles/synapse-analytics/guidance/proof-of-concept-playbook-dedicated-sql-pool.md b/articles/synapse-analytics/guidance/proof-of-concept-playbook-dedicated-sql-pool.md new file mode 100644 index 0000000000000..29e2f0eb54335 --- /dev/null +++ b/articles/synapse-analytics/guidance/proof-of-concept-playbook-dedicated-sql-pool.md @@ -0,0 +1,310 @@ +--- +title: "Synapse POC playbook: Data warehousing with dedicated SQL pool in Azure Synapse Analytics" +description: "A high-level methodology for preparing and running an effective Azure Synapse Analytics proof of concept (POC) project for dedicated SQL pool." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/23/2022 +--- + +# Synapse POC playbook: Data warehousing with dedicated SQL pool in Azure Synapse Analytics + +This article presents a high-level methodology for preparing and running an effective Azure Synapse Analytics proof of concept (POC) project for dedicated SQL pool. + +[!INCLUDE [proof-of-concept-playbook-context](includes/proof-of-concept-playbook-context.md)] + +> [!TIP] +> If you're new to dedicated SQL pools, we recommend you work through the [Work with Data Warehouses using Azure Synapse Analytics](/learn/paths/work-with-data-warehouses-using-azure-synapse-analytics/) learning path. + +## Prepare for the POC + +Before deciding on your Azure Synapse POC goals, we recommend that you first read the [Azure Synapse SQL architecture](../sql/overview-architecture.md) article to familiarize yourself with how a dedicated SQL pool separates compute and storage to provide industry-leading performance. + +### Identify sponsors and potential blockers + +Once you're familiar with Azure Synapse, it's time to make sure that your POC has the necessary support and won't hit any roadblocks. You should: + +- Identify any restrictions or policies that your organization has about moving data to, and storing data in, the cloud. +- Identify executive and business sponsorship for a cloud-based data warehouse project. +- Verify that your workload is appropriate for Azure Synapse. For more information, see [Dedicated SQL pool architecture in Azure Synapse Analytics](../sql-data-warehouse/massively-parallel-processing-mpp-architecture.md). + +### Set the timeline + +A POC is a scoped, time-bounded exercise with specific, measurable goals and metrics that define success. Ideally, it should have some basis in business reality so that the results are meaningful. + +POCs have the best outcome when they're *timeboxed*. Timeboxing allocates a fixed and maximum unit of time to an activity. In our experience, two weeks provides enough time to complete the work without the burden of too many use cases or complex test matrices. Working within this fixed time period, we suggest that you follow this timeline: + +1. **Data loading:** Three days or less +1. **Querying:** Five days or less +1. **Value added tests:** Two days or less + +Here are some tips: + +> [!div class="checklist"] +> - Make realistic estimates of the time that you will require to complete the tasks in your plan. +> - Recognize that the time to complete your POC will be related to the size of your dataset, the number of database objects (tables, views, and stored procedures), the complexity of the database objects, and the number of interfaces you will test. +> - If you estimate that your POC will run longer than four weeks, consider reducing the scope to focus only on the most important goals. +> - Get support from all the lead resources and sponsors for the timeline before commencing the POC. + +Once you've determined that there aren't any immediate obstacles and you've set the timeline, you can scope a high-level architecture. + +### Create a high-level scoped architecture + +A high-level future architecture likely contains many data sources and data consumers, big data components, and possibly machine learning and AI data consumers. To keep your POC goals achievable (and within the bounds of your set timeline), decide which of these components will form part of the POC and which will be excluded. + +Additionally, if you're already using Azure, identify the following: + +- Any existing Azure resources that you can use during the POC. For example, resources can include Azure Active Directory (Azure AD), or Azure ExpressRoute. +- What Azure region(s) your organization prefers. +- A subscription you can use for non-production POC work. +- The throughput of your network connection to Azure. + > [!IMPORTANT] + > Be sure to check that your POC can consume some of that throughput without having an adverse effect on production solutions. + +### Apply migration options + +If you're migrating from a legacy data warehouse system to Azure Synapse, here are some questions to consider: + +- Are you migrating and want to make as few changes to existing Extract, Transform, and Load (ETL) processes and data warehouse consumption as possible? +- Are you migrating but want to do some extensive improvements along the way? +- Are you building an entirely new data analytics environment (sometimes called a *greenfield project*)? + +Next, you need to consider your pain points. + +### Identify current pain points + +Your POC should contain use cases to prove potential solutions to address your current pain points. Here are some questions to consider: + +- What gaps in your current implementation do you expect Azure Synapse to fill? +- What new business needs are you required to support? +- What service level agreements (SLAs) are you required to meet? +- What will be the workloads (for example, ETL, batch queries, analytics, reporting queries, or interactive queries)? + +Next, you need to set your POC success criteria. + +### Set POC success criteria + +Identify why you're doing a POC and be sure to define clear goals. It's also important to know what outputs you want from your POC and what you plan to do with them. + +Keep in mind that a POC should be a short and focused effort to quickly prove or test a limited set of concepts. If you have a long list of items to prove, you may want to dive them into multiple POCs. POCs can have gates between them so you can determine whether to proceed to the next POC. + +Here are some example POC goals: + +- We need to know that the query performance for our big complex reporting queries will meet our new SLAs. +- We need to know the query performance for our interactive users. +- We need to know whether our existing ETL processes are a good fit and where improvements need to be made. +- We need to know whether we can shorten our ETL runtimes and by how much. +- We need to know that Synapse Analytics has sufficient security capabilities to adequately secure our data. + +Next, you need to create a test plan. + +### Create a test plan + +Using your goals, identify specific tests to run in order to support those goals and provide your identified outputs. It's important to make sure that you have at least one test for each goal and the expected output. Identify specific queries, reports, ETL and other processes that you will run to provide quantifiable results. + +Refine your tests by adding multiple testing scenarios to clarify any table structure questions that arise. + +Good planning usually defines an effective POC execution. Make sure all stakeholders agree to a written test plan that ties each POC goal to a set of clearly stated test cases and measurements of success. + +Most test plans revolve around performance and the expected user experience. What follows is an example of a test plan. It's important to customize your test plan to meet your business requirements. Clearly defining what you are testing will pay dividends later in this process. + +|Goal|Test|Expected outcomes| +|---------|---------|---------| +|We need to know that the query performance for our big complex reporting queries will meet our new SLAs|- Sequential test of complex queries
                    - Concurrency test of complex queries against stated SLAs|- Queries A, B, and C completed in 10, 13, and 21 seconds, respectively
                    - With 10 concurrent users, queries A, B, and C completed in 11, 15, and 23 seconds, on average| +|We need to know the query performance for our interactive users|- Concurrency test of selected queries at an expected concurrency level of 50 users.
                    - Run the preceding query with result set caching|- At 50 concurrent users, average execution time is expected to be under 10 seconds, and without result set caching
                    - At 50 concurrent users, average execution time is expected to be under five seconds with result set caching| +|We need to know whether our existing ETL processes can run within the SLA|- Run one or two ETL processes to mimic production loads|- Loading incrementally into a core fact table must complete in less than 20 minutes (including staging and data cleansing)
                    - Dimension processing needs to take less than five minutes| +|We need to know that the data warehouse has sufficient security capabilities to secure our data|- Review and enable [network security](security-white-paper-network-security.md) (VNet and private endpoints), [access control](security-white-paper-access-control.md) (row-level security, dynamic data masking)|- Prove that data never leaves our tenant.
                    - Ensure that customer content is easily secured| + +Next, you need to identify and validate the POC dataset. + +### Identify and validate the POC dataset + +Using the scoped tests, you can now identify the dataset required to execute those tests in Azure Synapse. Review your dataset by considering the following: + +- Verify that the dataset adequately represents your production dataset in terms of content, complexity, and scale. +- Don't use a dataset that's too small (less than 1TB), as you might not achieve representative performance. +- Don't use a dataset that's too large, as the POC isn't intended to complete a full data migration. +- Identify the [distribution pattern](../sql-data-warehouse/sql-data-warehouse-tables-distribute.md), [indexing option](../sql-data-warehouse/sql-data-warehouse-tables-index.md), and [partitioning](../sql-data-warehouse/sql-data-warehouse-tables-partition.md) for each table. If there are any questions regarding distribution, indexing, or partitioning, add tests to your POC to answer them. Bear in mind that you may want to test more than one distribution option or indexing option for some tables. +- Check with the business owners for any blockers for moving the POC dataset to the cloud. +- Identify any security or privacy concerns. + +> [!IMPORTANT] +> Make sure you check with business owners for any blockers before moving any data to the cloud. Identify any security or privacy concerns or any data obfuscation needs that should be done before moving data to the cloud. + +Next, you need to assemble the team of experts. + +### Assemble the team + +Identify the team members and their commitment to support your POC. Team members should include: + +- A project manager to run the POC project. +- A business representative to oversee requirements and results. +- An application data expert to source the data for the POC dataset. +- An Azure Synapse specialist. +- An expert advisor to optimize the POC tests. +- Any person who will be required for specific POC project tasks but who aren't required for its entire duration. These supporting resources could include network administrators, Azure administrators, or Azure AD administrators. + +> [!TIP] +> We recommend engaging an expert advisor to assist with your POC. [Microsoft's partner community](https://appsource.microsoft.com/marketplace/partner-dir) has global availability of expert consultants who can help you assess, evaluate, or implement Azure Synapse. + +Now that you are fully prepared, it's time to put your POC into practice. + +## Put the POC into practice + +It's important to keep the following in mind: + +- Implement your POC project with the discipline and rigor of any production project. +- Run the POC according to plan. +- Have a change request process in place to prevent your POC scope from growing or changing. + +Before tests can start, you need to set up the test environment. It involves four stages: + +1. Setup +1. Data loading +1. Querying +1. Value added tests + +:::image type="content" source="media/proof-of-concept-playbook-dedicated-sql-pool/proof-of-concept-playbook-dedicated-sql-pool-setup.png" alt-text="Image shows the four test environment stages: Setup, Data loading, Querying, and Value added tests."::: + +### Setup + +You can set up a POC on Azure Synapse by following these steps: + +1. Use [this quickstart](../sql-data-warehouse/create-data-warehouse-portal.md) to provision a Synapse workspace and set up storage and permissions according to the POC test plan. +1. Use [this quickstart](../quickstart-create-sql-pool-portal.md) to add a dedicated SQL pool to the Synapse workspace. +1. Set up [networking and security](security-white-paper-introduction.md) according to your requirements. +1. Grant appropriate access to POC team members. See [this article](/azure-sql/database/logins-create-manage) about authentication and authorization for accessing dedicated SQL pools. + +> [!TIP] +> We recommend that you *develop code and unit testing* by using the DW500c service level (or below). We recommend that you *run load and performance tests* by using the DW1000c service level (or above). You can [pause compute of the dedicated SQL pool](../sql-data-warehouse/pause-and-resume-compute-portal.md) at any time to cease compute billing, which will save on costs. + +### Data loading + +Once you've set up the dedicated SQL pool, you can follow these steps to load data: + +1. Load the data into [Azure Blob Storage](../../storage/blobs/storage-blobs-overview.md). For a POC, we recommend that you use a [general-purpose V2 storage account](../../storage/common/storage-account-overview.md) with [locally-redundant storage (LRS)](../../storage/common/storage-redundancy.md#locally-redundant-storage). While there are several tools for migrating data to Azure Blob Storage, the easiest way is to use [Azure Storage Explorer](https://azure.microsoft.com/features/storage-explorer/), which can copy files into a storage container. +2. Load the data into the dedicated SQL pool. Azure Synapse supports two T-SQL loading methods: [PolyBase](../sql-data-warehouse/design-elt-data-loading.md) and the [COPY](/sql/t-sql/statements/copy-into-transact-sql?view=azure-sqldw-latest&preserve-view=true) statement. You can use SSMS to connect to the dedicated SQL pool to use either method. + +When you load data into the dedicated SQL pool for the first time, you need to consider which [distribution pattern](../sql-data-warehouse/sql-data-warehouse-tables-distribute.md) and [index option](../sql-data-warehouse/sql-data-warehouse-tables-index.md) to use. While a dedicated SQL pool supports a variety of both, it's a best practice to rely on default settings. Default settings use round-robin distribution and a clustered columnstore index. If necessary, you can adjust these settings later, which is described later in this article. + +The following example shows the COPY load method: + +```sql +--Note when specifying the column list, input field numbers start from 1 +COPY INTO + test_1 (Col_1 default 'myStringDefault' 1, Col_2 default 1 3) +FROM + 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/' +WITH ( + FILE_TYPE = 'CSV', + CREDENTIAL = (IDENTITY = 'Storage Account Key' SECRET = ''), + FIELDQUOTE = '"', + FIELDTERMINATOR = ',', + ROWTERMINATOR = '0x0A', + ENCODING = 'UTF8', + FIRSTROW = 2 +); +``` + +### Querying + +The primary purpose of an data warehouse is to perform analytics, which requires querying the data warehouse. Most POCs start by running a small number of representative queries against the data warehouse, at first sequentially and then concurrently. You should define both approaches in your test plan. + +#### Sequential query tests + +It's easy to run sequential query tests in SSMS. It's important to run these tests by using a user with a sufficiently large [resource class](../sql-data-warehouse/resource-classes-for-workload-management.md). A resource class is a pre-determined resource limit in dedicated SQL pool that governs compute resources and concurrency for query execution. For simple queries, we recommend using the pre-defined **staticrc20** resource class. For more complex queries, we recommend using the pre-defined **staticrc40** resource class. + +Notice that the following first query uses a [query label](../sql/develop-label.md) to provide a mechanism to keep track of the query. The second query uses the `sys.dm_pdw_exec_requests` dynamic management view to search by the label. + +```sql +/* Use the OPTION(LABEL = '') Syntax to add a query label to track the query in DMVs */ +SELECT TOP (1000) + * +FROM + [dbo].[Date] +OPTION (LABEL = 'Test1'); + +/* Use sys.dm_pdw_exec_requests to determine query execution duration (ms) */ +SELECT + Total_elapsed_time AS [Elapsed_Time_ms], + [label] +FROM + sys.dm_pdw_exec_requests +WHERE + [label] = 'Test1'; +``` + +#### Concurrent query tests + +After recording sequential query performance, you can then run multiple queries concurrently. That way, you can simulate a business intelligence workload running against the dedicated SQL pool. The easiest way to run this test is to download a stress testing tool. The most popular tool is [Apache JMeter](https://jmeter.apache.org/download_jmeter.cgi), which is a third-party open source tool. + +The tool reports on minimum, maximum, and median query durations for a given concurrency level. For example, suppose that you want to simulate a business intelligence workload that generates 100 concurrent queries. You can setup JMeter to run those 100 concurrent queries in a loop and then review the steady state execution. It can be done with [result set caching](../sql-data-warehouse/performance-tuning-result-set-caching.md) on or off to evaluate the suitability of that feature. + +Be sure to document your results. Here's an example of some results: + +|Concurrency|# Queries run|DWU|Min duration(s)|Max duration(S)|Median duration(s)| +|---------|---------|---------|---------|---------|---------| +|100|1,000|5,000|3|10|5| +|50|5,000|5,000|3|6|4| + +#### Mixed workload tests + +Mixed workload testing is an extension of the [concurrent query tests](#concurrent-query-tests). By adding a data loading process into the workload mix, the workload will better simulate a real production workload. + +#### Optimize the data + +Depending on the query workload running on Azure Synapse, you may need to optimize your data warehouse's distributions and indexes and rerun the tests. For more information, see [Best practices for dedicated SQL pools in Azure Synapse Analytics](../sql-data-warehouse/sql-data-warehouse-best-practices.md). + +The most common mistakes seen during setup are: + +- Large queries run with a resource class that's too low. +- The dedicated SQL pool service level DWUs are too low for the workload. +- Large tables require hash distribution. + +To improve query performance, you can: + +- Create [materialized views](../sql-data-warehouse/performance-tuning-materialized-views.md), which can accelerate queries involving common aggregations. +- [Replicate tables](../sql-data-warehouse/design-guidance-for-replicated-tables.md), especially for small dimension tables. +- [Hash distribute](../sql-data-warehouse/sql-data-warehouse-tables-distribute.md) large fact tables that are joined or aggregated. + +### Value added tests + +Once query performance testing is complete, it's a good time to test specific features to verify that they satisfy your intended use cases. These features include: + +- [Row-level security](/sql/relational-databases/security/row-level-security?view=azure-sqldw-latest&preserve-view=true) +- [Column-level security](../sql-data-warehouse/column-level-security.md) +- [Dynamic data masking](/azure/azure-sql/database/dynamic-data-masking-overview) +- Intra-cluster scaling via [workload isolation](../sql-data-warehouse/sql-data-warehouse-workload-isolation.md) + +Finally, you need to interpret your POC results. + +## Interpret the POC results + +Once you have test results for your data warehouse, it's important to interpret that data. A common approach you can take is to compare the runs in terms of *price/performance*. Simply put, price/performance removes the differences in price per DWU or service hardware and provides a single comparable number for each performance test. + +Here's an example: + +|Test|Test duration|DWU|$/hr for DWU|Cost of test| +|---------|---------|---------|---------|---------| +|Test 1|10 min|1000|$12/hr|$2| +|Test 2|30 min|500|$6/hr|$3| + +This example makes it easy to see that **Test 1** at DWU1000 is more cost effective at $2 per test run compared with $3 per test run. + +> [!NOTE] +> You can also use this methodology to compare results *across vendors* in a POC. + +In summary, once you complete all the POC tests, you're ready to evaluate the results. Begin by evaluating whether the POC goals have been met and the desired outputs collected. Make note of where additional testing is warranted and additional questions that were raised. + +## Next steps + +> [!div class="nextstepaction"] +> [Data lake exploration with serverless SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-serverless-sql-pool.md) + +> [!div class="nextstepaction"] +> [Big data analytics with Apache Spark pool in Azure Synapse Analytics](proof-of-concept-playbook-spark-pool.md) + +> [!div class="nextstepaction"] +> [Azure Synapse Analytics frequently asked questions](../overview-faq.yml) diff --git a/articles/synapse-analytics/guidance/proof-of-concept-playbook-overview.md b/articles/synapse-analytics/guidance/proof-of-concept-playbook-overview.md new file mode 100644 index 0000000000000..0b3c72a028c81 --- /dev/null +++ b/articles/synapse-analytics/guidance/proof-of-concept-playbook-overview.md @@ -0,0 +1,58 @@ +--- +title: Azure Synapse proof of concept playbook +description: "Introduction to a series of articles that provide a high-level methodology for planning, preparing, and running an effective Azure Synapse Analytics proof of concept project." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/23/2022 +--- + +# Azure Synapse proof of concept playbook + +Whether it's an enterprise data warehouse migration, a big data re-platforming, or a greenfield implementation, each project traditionally starts with a proof of concept (POC). + +The *Synapse proof of concept playbook* is a series of related articles that provide a high-level methodology for planning, preparing, and running an effective Azure Synapse Analytics POC project. The overall objective of a POC is to validate potential solutions to technical problems, such as how to integrate systems or how to achieve certain results through a specific configuration. As emphasized by this series, an effective POC validates that certain concepts have the potential for real-world production application. + +> [!TIP] +> If you're new to Azure Synapse, we recommend you work through the [Introduction to Azure Synapse Analytics](/learn/modules/introduction-azure-synapse-analytics/) module. + +## Playbook audiences + +The playbook helps you to evaluate the use of Azure Synapse when migrating from an existing workload. We designed it for the following audiences: + +- Technical experts who are planning their own in-house Azure Synapse POC project. +- Business owners who will be part of the execution or evaluation of an Azure Synapse POC project. +- Anyone looking to learn more about data warehousing POC projects. + +## Playbook content + +The playbook delivers the following content: + +- Guidance on what makes an effective POC. +- Guidance on how to make valid comparisons between systems. +- Guidance on the technical aspects of running an Azure Synapse POC. +- A road map to relevant technical content from Azure Synapse. +- Guidance on how to evaluate POC results to support business decisions. +- Guidance on how and where to find additional help. + +The playbook includes three subjects: + +- [Data warehousing with dedicated SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-dedicated-sql-pool.md) +- [Data lake exploration with serverless SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-serverless-sql-pool.md) +- [Big data analytics with Apache Spark pool in Azure Synapse Analytics](proof-of-concept-playbook-spark-pool.md) + +## Next steps + +> [!div class="nextstepaction"] +> [Data warehousing with dedicated SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-dedicated-sql-pool.md) + +> [!div class="nextstepaction"] +> [Data lake exploration with serverless SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-serverless-sql-pool.md) + +> [!div class="nextstepaction"] +> [Big data analytics with Apache Spark pool in Azure Synapse Analytics](proof-of-concept-playbook-spark-pool.md) + +> [!div class="nextstepaction"] +> [Azure Synapse Analytics frequently asked questions](../overview-faq.yml) diff --git a/articles/synapse-analytics/guidance/proof-of-concept-playbook-serverless-sql-pool.md b/articles/synapse-analytics/guidance/proof-of-concept-playbook-serverless-sql-pool.md new file mode 100644 index 0000000000000..2bfa04b89fa02 --- /dev/null +++ b/articles/synapse-analytics/guidance/proof-of-concept-playbook-serverless-sql-pool.md @@ -0,0 +1,167 @@ +--- +title: "Synapse POC playbook: Data lake exploration with serverless SQL pool in Azure Synapse Analytics" +description: "A high-level methodology for preparing and running an effective Azure Synapse Analytics proof of concept (POC) project for serverless SQL pool." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/23/2022 +--- + +# Synapse POC playbook: Data lake exploration with serverless SQL pool in Azure Synapse Analytics + +This article presents a high-level methodology for preparing and running an effective Azure Synapse Analytics proof of concept (POC) project for serverless SQL pool. + +[!INCLUDE [proof-of-concept-playbook-context](includes/proof-of-concept-playbook-context.md)] + +## Prepare for the POC + +A POC project can help you make an informed business decision about implementing a big data and advanced analytics environment on a cloud-based platform that leverages serverless SQL pool in Azure Synapse. If you need to explore or gain insights from data in the data lake, or optimize your existing data transformation pipeline, you can benefit from using the serverless SQL pool. It's suitable for the following scenarios: + +- **Basic discovery and exploration:** Quickly reason about data stored in various formats (Parquet, CSV, JSON) in your data lake, so you can plan how to unlock insights from it. +- **Logical data warehouse:** Produce a relational abstraction on top of raw or disparate data without relocating or transforming it, providing an always up-to-date view of your data. +- **Data transformation:** Run simple, scalable, and highly performant data lake queries by using T-SQL. You can feed query results to business intelligence (BI) tools, or load them into a relational database. Target systems can include Azure Synapse dedicated SQL pools or Azure SQL Database. + +Different professional roles can benefit from serverless SQL pool: + +- **Data engineers** can explore the data lake, transform and prepare data by using serverless SQL pool, and simplify their data transformation pipelines. +- **Data scientists** can quickly reason about the contents and structure of the data stored in the data lake by using the [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql?view=sql-server-ver15&viewFallbackFrom=azure-sqldw-latest&preserve-view=true) T-SQL function and its automatic schema inference. +- **Data analysts** can write T-SQL queries in their preferred query tools, which can connect to serverless SQL pool. They can explore data in Spark external tables that were created by data scientists or data engineers. +- **BI professionals** can quickly create Power BI reports that connect to data lake or Spark tables. + +A serverless SQL pool POC project will identify your key goals and business drivers that serverless SQL pool is designed to support. It will also test key features and gather metrics to support your implementation decisions. A POC isn't designed to be deployed to a production environment. Rather, it's a short-term project that focuses on key questions, and its result can be discarded. + +Before you begin planning your serverless SQL Pool POC project: + +> [!div class="checklist"] +> - Identify any restrictions or guidelines your organization has about moving data to the cloud. +> - Identify executive or business sponsors for a big data and advanced analytics platform project. Secure their support for migration to the cloud. +> - Identify availability of technical experts and business users to support you during the POC execution. + +Before you start preparing for the POC project, we recommend you first read the [serverless SQL pool documentation](../sql/on-demand-workspace-overview.md). + +> [!TIP] +> If you're new to serverless SQL pools, we recommend you work through the [Build data analytics solutions using Azure Synapse serverless SQL pools](/learn/paths/build-data-analytics-solutions-using-azure-synapse-serverless-sql-pools/) learning path. + +### Set the goals + +A successful POC project requires planning. Start by identify why you're doing a POC to fully understand the real motivations. Motivations could include modernization, cost saving, performance improvement, or integrated experience. Be sure to document clear goals for your POC and the criteria that will define its success. Ask yourself: + +> [!div class="checklist"] +> - What do you want as the outputs of your POC? +> - What will you do with those outputs? +> - Who will use the outputs? +> - What will define a successful POC? + +Keep in mind that a POC should be a short and focused effort to quickly prove a limited set of concepts and capabilities. These concepts and capabilities should be representative of the overall workload. If you have a long list of items to prove, you may want to plan more than one POC. In that case, define gates between the POCs to determine whether you need to continue with the next one. Given the different professional roles that can use a serverless SQL pool (and the different scenarios that serverless SQL pool supports), you may choose to execute multiple POCs. For example, one POC could focus on requirements for the data scientist role, such as discovery and exploration of data in different formats. Another could focus on requirements for the data engineering role, such as data transformation and the creation of a logical data warehouse. + +As you consider your POC goals, ask yourself the following questions to help you shape the goals: + +> [!div class="checklist"] +> - Are you migrating from an existing big data and advanced analytics platform (on-premises or cloud)? +> - Are you migrating but want to make as few changes as possible to existing ingestion and data processing? +> - Are you migrating but want to do some extensive improvements along the way? +> - Are you building an entirely new big data and advanced analytics platform (greenfield project)? +> - What are your current pain points? For example, scalability, performance, or flexibility. +> - What new business requirements do you need to support? +> - What are the SLAs that you're required to meet? +> - What will be the workloads? For example, data exploration over different data formats, basic exploration, a logical data warehouse, data preparation and/or transformation, T-SQL interactive analysis, T-SQL querying of Spark tables, or reporting queries over the data lake. +> - What are the skills of the users who will own the project (should the POC be implemented)? + +Here are some examples of POC goal setting: + +- Why are we doing a POC? + - We need to know if we can explore all of the raw file formats we store by using serverless SQL pool. + - We need to know if our data engineers can quickly evaluate new data feeds. + - We need to know if data lake query performance by using serverless SQL pool will meet our data exploration requirements. + - We need to know if serverless SQL pool is a good choice for some of our visualizations and reporting requirements. + - We need to know if serverless SQL pool is a good choice for some of our data ingestion and processing requirements. + - We need to know if our move to Azure Synapse will meet our budget. +- At the conclusion of this PoC: + - We will have the data to identify the data transformations that are well suited to serverless SQL pool. + - We will have the data to identify when serverless SQL pool can be best used during data visualization. + - We will have the data to know the ease with which our data engineers and data scientists can adopt the new platform. + - We will have gained insight to better estimate the effort required to complete the implementation or migration project. + - We will have a list of items that may need more testing. + - Our POC will be successful if we have the data needed and have completed the testing identified to determine how serverless SQL pool will support our cloud-based big data and advance analytics platform. + - We will have determined whether we can move to the next phase or whether more POC testing is needed to finalize our decision. + - We will be able to make a sound business decision supported by specific data points. + +### Plan the project + +Use your goals to identify specific tests and to provide the outputs you identified. It's important to make sure that you have at least one test to support each goal and expected output. Also, identify specific data exploration and analysis tasks, specific transformations, and specific existing processing you want to test. Identify a specific dataset and codebase that you can use. + +Here's an example of the needed level of specificity in planning: + +- **Goal:** We need to know whether data engineers can achieve the equivalent processing of the existing ETL process named "Daily Batch Raw File Validation" within the required SLA. +- **Output:** We will have the data to determine whether we can use T-SQL queries to execute the "Daily Batch Raw File Validation" ETL process within the required SLA. +- **Test:** Validation queries A, B, and C are identified by data engineering, and they represent overall data processing needs. Compare the performance of these queries with the benchmark obtained from the existing system. + +### Evaluate the POC dataset + +Using the specific tests you identified, select a dataset to support the tests. Take time to review this dataset. You should verify that the dataset will adequately represent your future processing in terms of content, complexity, and scale. Don't use a dataset that's too small because it won't deliver representative performance. Conversely, don't use a dataset that's too large because the POC shouldn't become a full data migration. Be sure to obtain the appropriate benchmarks from existing systems so you can use them for performance comparisons. + +> [!IMPORTANT] +> Make sure you check with business owners for any blockers before moving any data to the cloud. Identify any security or privacy concerns or any data obfuscation needs that should be done before moving data to the cloud. + +### Create a high-level architecture + +Based upon the high-level architecture of your proposed future state architecture, identify the components that will form part of your POC. Your high-level future state architecture likely contains many data sources, numerous data consumers, big data components, and possibly machine learning and artificial intelligence (AI) data consumers. Your POC architecture should specifically identify components that will be part of the POC. Importantly, it should identify any components that won't form part of the POC testing. + +If you're already using Azure, identify any resources you already have in place (Azure Active Directory, ExpressRoute, and others) that you can use during the POC. Also identify the Azure regions your organization uses. Now is a great time to identify the throughput of your ExpressRoute connection and to check with other business users that your POC can consume some of that throughput without adverse impact on production systems. + +### Identify POC resources + +Specifically identify the technical resources and time commitments required to support your POC. Your POC will need: + +- A business representative to oversee requirements and results. +- An application data expert, to source the data for the POC and provide knowledge of the existing processes and logic. +- A serverless SQL pool expert. +- An expert advisor, to optimize the POC tests. +- Resources that will be required for specific components of your POC project, but not necessarily required for the duration of the POC. These resources could include network admins, Azure admins, Active Directory admins, Azure portal admins, and others. +- Ensure all the required Azure services resources are provisioned and the required level of access is granted, including access to storage accounts. +- Ensure you have an account that has required data access permissions to retrieve data from all data sources in the POC scope. + +> [!TIP] +> We recommend engaging an expert advisor to assist with your POC. [Microsoft's partner community](https://appsource.microsoft.com/marketplace/partner-dir) has global availability of expert consultants who can help you assess, evaluate, or implement Azure Synapse. + +### Set the timeline + +Review your POC planning details and business needs to identify a time frame for your POC. Make realistic estimates of the time that will be required to complete the POC goals. The time to complete your POC will be influenced by the size of your POC dataset, the number and complexity of tests, and the number of interfaces to test. If you estimate that your POC will run longer than four weeks, consider reducing the POC scope to focus on the highest priority goals. Be sure to obtain approval and commitment from all the lead resources and sponsors before continuing. + +## Put the POC into practice + +We recommend you execute your POC project with the discipline and rigor of any production project. Run the project according to plan and manage a change request process to prevent uncontrolled growth of the POC's scope. + +Here are some examples of high-level tasks: + +1. [Create a Synapse workspace](../quickstart-create-workspace.md), storage accounts, and the Azure resources identified in the POC plan. +1. Set up [networking and security](security-white-paper-introduction.md) according to your requirements. +1. Grant appropriate access to POC team members. See [this article](../sql/develop-storage-files-storage-access-control.md) about permissions for accessing files directly from Azure Storage. +1. Load the POC dataset. +1. Implement and configure the tests and/or migrate existing code to serverless SQL pool scripts and views. +1. Execute the tests: + - Many tests can be executed in parallel. + - Record your results in a consumable and readily understandable format. +1. Monitor for troubleshooting and performance. +1. Evaluate your results and present findings. +1. Work with technical stakeholders and the business to plan for the next stage of the project. The next stage could be a follow-up POC or a production implementation. + +## Interpret the POC results + +When you complete all the POC tests, you evaluate the results. Begin by evaluating whether the POC goals were met and the desired outputs were collected. Determine whether more testing is necessary or any questions need addressing. + +## Next steps + +> [!div class="nextstepaction"] +> [Data lake exploration with dedicated SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-dedicated-sql-pool.md) + +> [!div class="nextstepaction"] +> [Big data analytics with Apache Spark pool in Azure Synapse Analytics](proof-of-concept-playbook-spark-pool.md) + +> [!div class="nextstepaction"] +> [Build data analytics solutions using Azure Synapse serverless SQL pools](/learn/paths/build-data-analytics-solutions-using-azure-synapse-serverless-sql-pools/) + +> [!div class="nextstepaction"] +> [Azure Synapse Analytics frequently asked questions](../overview-faq.yml) diff --git a/articles/synapse-analytics/guidance/proof-of-concept-playbook-spark-pool.md b/articles/synapse-analytics/guidance/proof-of-concept-playbook-spark-pool.md new file mode 100644 index 0000000000000..ae431946d0e06 --- /dev/null +++ b/articles/synapse-analytics/guidance/proof-of-concept-playbook-spark-pool.md @@ -0,0 +1,211 @@ +--- +title: "Synapse POC playbook: Big data analytics with Apache Spark pool in Azure Synapse Analytics" +description: "A high-level methodology for preparing and running an effective Azure Synapse Analytics proof of concept (POC) project for Apache Spark pool." +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/23/2022 +--- + +# Synapse POC playbook: Big data analytics with Apache Spark pool in Azure Synapse Analytics + +This article presents a high-level methodology for preparing and running an effective Azure Synapse Analytics proof of concept (POC) project for Apache Spark pool. + +[!INCLUDE [proof-of-concept-playbook-context](includes/proof-of-concept-playbook-context.md)] + +## Prepare for the POC + +A POC project can help you make an informed business decision about implementing a big data and advanced analytics environment on a cloud-based platform that leverages Apache Spark pool in Azure Synapse. + +A POC project will identify your key goals and business drivers that cloud-based big data and advance analytics platform must support. It will test key metrics and prove key behaviors that are critical to the success of your data engineering, machine learning model building, and training requirements. A POC isn't designed to be deployed to a production environment. Rather, it's a short-term project that focuses on key questions, and its result can be discarded. + +Before you begin planning your Spark POC project: + +> [!div class="checklist"] +> - Identify any restrictions or guidelines your organization has about moving data to the cloud. +> - Identify executive or business sponsors for a big data and advanced analytics platform project. Secure their support for migration to the cloud. +> - Identify availability of technical experts and business users to support you during the POC execution. + +Before you start preparing for the POC project, we recommend you first read the [Apache Spark documentation](/hdinsight/spark/apache-spark-overview.md). + +> [!TIP] +> If you're new to Spark pools, we recommend you work through the [Perform data engineering with Azure Synapse Apache Spark Pools](/learn/paths/perform-data-engineering-with-azure-synapse-apache-spark-pools/) learning path. + +By now you should have determined that there are no immediate blockers and then you can start preparing for your POC. If you are new to Apache Spark Pools in Azure Synapse Analytics you can refer to [this documentation](../spark/apache-spark-overview.md) where you can get an overview of the Spark architecture and learn how it works in Azure Synapse. + +Develop an understanding of these key concepts: + +- Apache Spark and its distributed architecture. +- Spark concepts like Resilient Distributed Datasets (RDD) and partitions (in-memory and physical). +- Azure Synapse workspace, the different compute engines, pipeline, and monitoring. +- Separation of compute and storage in Spark pool. +- Authentication and authorization in Azure Synapse. +- Native connectors that integrate with Azure Synapse dedicated SQL pool, Azure Cosmos DB, and others. + +Azure Synapse decouples compute resources from storage so that you can better manage your data processing needs and control costs. The serverless architecture of Spark pool allows you to spin up and down as well as grow and shrink your Spark cluster, independent of your storage. You can pause (or setup auto-pause) a Spark cluster entirely. That way, you pay for compute only when it's in use. When it's not in use, you only pay for storage. You can scale up your Spark cluster for heavy data processing needs or large loads and then scale it back down during less intense processing times (or shut it down completely). You can effectively scale and pause a cluster to reduce costs. Your Spark POC tests should include data ingestion and data processing at different scales (small, medium, and large) to compare price and performance at different scale. For more information, see [Automatically scale Azure Synapse Analytics Apache Spark pools](../spark/apache-spark-autoscale.md). + +It's important to understand the difference between the different sets of Spark APIs so you can decide what works best for your scenario. You can choose the one that provides better performance or ease of use, taking advantage of your team's existing skill sets. For more information, see [A Tale of Three Apache Spark APIs: RDDs, DataFrames, and Datasets](https://databricks.com/session/a-tale-of-three-apache-spark-apis-rdds-dataframes-and-datasets). + +Data and file partitioning work slightly differently in Spark. Understanding the differences will help you to optimize for performance. For more information, see Apache Spark documentation: [Partition Discovery](https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery) and [Partition Configuration Options](https://spark.apache.org/docs/latest/sql-performance-tuning.html#other-configuration-options). + +### Set the goals + +A successful POC project requires planning. Start by identify why you're doing a POC to fully understand the real motivations. Motivations could include modernization, cost saving, performance improvement, or integrated experience. Be sure to document clear goals for your POC and the criteria that will define its success. Ask yourself: + +> [!div class="checklist"] +> - What do you want as the outputs of your POC? +> - What will you do with those outputs? +> - Who will use the outputs? +> - What will define a successful POC? + +Keep in mind that a POC should be a short and focused effort to quickly prove a limited set of concepts and capabilities. These concepts and capabilities should be representative of the overall workload. If you have a long list of items to prove, you may want to plan more than one POC. In that case, define gates between the POCs to determine whether you need to continue with the next one. Given the different professional roles that may use Spark pools and notebooks in Azure Synapse, you may choose to execute multiple POCs. For example, one POC could focus on requirements for the data engineering role, such as ingestion and processing. Another POC could focus on machine learning (ML) model development. + +As you consider your POC goals, ask yourself the following questions to help you shape the goals: + +> [!div class="checklist"] +> - Are you migrating from an existing big data and advanced analytics platform (on-premises or cloud)? +> - Are you migrating but want to make as few changes as possible to existing ingestion and data processing? For example, a Spark to Spark migration, or a Hadoop/Hive to Spark migration. +> - Are you migrating but want to do some extensive improvements along the way? For example, re-writing MapReduce jobs as Spark jobs, or converting legacy RDD-based code to DataFrame/Dataset-based code. +> - Are you building an entirely new big data and advanced analytics platform (greenfield project)? +> - What are your current pain points? For example, scalability, performance, or flexibility. +> - What new business requirements do you need to support? +> - What are the SLAs that you're required to meet? +> - What will be the workloads? For example, ETL, batch processing, stream processing, machine learning model training, analytics, reporting queries, or interactive queries? +> - What are the skills of the users who will own the project (should the POC be implemented)? For example, PySpark vs Scala skills, notebook vs IDE experience. + +Here are some examples of POC goal setting: + +- Why are we doing a POC? + - We need to know that the data ingestion and processing performance for our big data workload will meet our new SLAs. + - We need to know whether near real-time stream processing is possible and how much throughput it can support. (Will it support our business requirements?) + - We need to know if our existing data ingestion and transformation processes are a good fit and where improvements will need to be made. + - We need to know if we can shorten our data integration run times and by how much. + - We need to know if our data scientists can build and train machine learning models and leverage AI/ML libraries as needed in a Spark pool. + - Will the move to cloud-based Synapse Analytics meet our cost goals? +- At the conclusion of this POC: + - We will have the data to determine if our data processing performance requirements can be met for both batch and real-time streaming. + - We will have tested ingestion and processing of all our different data types (structured, semi and unstructured) that support our use cases. + - We will have tested some of our existing complex data processing and can identify the work that will need to be completed to migrate our portfolio of data integration to the new environment. + - We will have tested data ingestion and processing and will have the data points to estimate the effort required for the initial migration and load of historical data, as well as estimate the effort required to migrate our data ingestion (Azure Data Factory (ADF), Distcp, Databox, or others). + - We will have tested data ingestion and processing and can determine if our ETL/ELT processing requirements can be met. + - We will have gained insight to better estimate the effort required to complete the implementation project. + - We will have tested scale and scaling options and will have the data points to better configure our platform for better price-performance settings. + - We will have a list of items that may need more testing. + +### Plan the project + +Use your goals to identify specific tests and to provide the outputs you identified. It's important to make sure that you have at least one test to support each goal and expected output. Also, identify specific data ingestion, batch or stream processing, and all other processes that will be executed so you can identify a very specific dataset and codebase. This specific dataset and codebase will define the scope of the POC. + +Here's an example of the needed level of specificity in planning: + +- **Goal A:** We need to know whether our requirement for data ingestion and processing of batch data can be met under our defined SLA. +- **Output A:** We will have the data to determine whether our batch data ingestion and processing can meet the data processing requirement and SLA. + - **Test A1:** Processing queries A, B, and C are identified as good performance tests as they are commonly executed by the data engineering team. Also, they represent overall data processing needs. + - **Test A2:** Processing queries X, Y, and Z are identified as good performance tests as they contain near real-time stream processing requirements. Also, they represent overall event-based stream processing needs. + - **Test A3:** Compare the performance of these queries at different scale of the Spark cluster (varying number of worker nodes, size of the worker nodes - like small, medium, and large - number and size of executors) with the benchmark obtained from the existing system. Keep the *law of diminishing returns* in mind; adding more resources (either by scaling up or scaling out) can help to achieve parallelism, however there's a certain limit that's unique to each scenario to achieve the parallelism. Discover the optimal configuration for each identified use case in your testing. +- **Goal B:** We need to know if our data scientists can build and train machine learning models on this platform. +- **Output B:** We will have tested some of our machine learning models by training them on data in a Spark pool or a SQL pool, leveraging different machine learning libraries. These tests will help to determine which machine learning models can be migrated to the new environment + - **Test B1:** Specific machine learning models will be tested. + - **Test B2:** Test base machine learning libraries that come with Spark (Spark MLLib) along with an additional library that can be installed on Spark (like scikit-learn) to meet the requirement. +- **Goal C:** We will have tested data ingestion and will have the data points to: + - Estimate the effort for our initial historical data migration to data lake and/or the Spark pool. + - Plan an approach to migrate historical data. +- **Output C:** We will have tested and determined the data ingestion rate achievable in our environment and can determine whether our data ingestion rate is sufficient to migrate historical data during the available time window. + - **Test C1:** Test different approaches of historical data migration. For more information, see [Transfer data to and from Azure](/architecture/data-guide/scenarios/data-transfer.md). + - **Test C2:** Identify allocated bandwidth of ExpressRoute and if there is any throttling setup by the infra team. For more information, see [What is Azure ExpressRoute? (Bandwidth options)](/expressroute/expressroute-introduction#bandwidth-options.md). + - **Test C3:** Test data transfer rate for both online and offline data migration. For more information, see [Copy activity performance and scalability guide](/data-factory/copy-activity-performance#copy-performance-and-scalability-achievable-using-azure-data-factory-and-synapse-pipelines). + - **Test C4:** Test data transfer from the data lake to the SQL pool by using either ADF, Polybase, or the COPY command. For more information, see [Data loading strategies for dedicated SQL pool in Azure Synapse Analytics](/sql-data-warehouse/design-elt-data-loading.md). +- **Goal D:** We will have tested the data ingestion rate of incremental data loading and will have the data points to estimate the data ingestion and processing time window to the data lake and/or the dedicated SQL pool. +- **Output D:** We will have tested the data ingestion rate and can determine whether our data ingestion and processing requirements can be met with the identified approach. + - **Test D1:** Test the daily update data ingestion and processing. + - **Test D1:** Test the processed data load to the dedicated SQL pool table from the Spark pool. For more information, see [Azure Synapse Dedicated SQL Pool Connector for Apache Spark](../spark/synapse-spark-sql-pool-import-export.md). + - **Test D1:** Execute the daily update load process concurrently while running end user queries. + +Be sure to refine your tests by adding multiple testing scenarios. Azure Synapse makes it easy to test different scale (varying number of worker nodes, size of the worker nodes like small, medium, and large) to compare performance and behavior. + +Here are some testing scenarios: + +- **Spark pool test A:** We will execute data processing across multiple node types (small, medium, and large) as well as different numbers of worker nodes. +- **Spark pool test B:** We will load/retrieve processed data from the Spark pool to the dedicated SQL pool by using [the connector](../spark/synapse-spark-sql-pool-import-export.md). +- **Spark pool test C:** We will load/retrieve processed data from the Spark pool to Cosmos DB by using Azure Synapse Link. + +### Evaluate the POC dataset + +Using the specific tests you identified, select a dataset to support the tests. Take time to review this dataset. You should verify that the dataset will adequately represent your future processing in terms of content, complexity, and scale. Don't use a dataset that's too small (less than 1TB) because it won't deliver representative performance. Conversely, don't use a dataset that's too large because the POC shouldn't become a full data migration. Be sure to obtain the appropriate benchmarks from existing systems so you can use them for performance comparisons. + +> [!IMPORTANT] +> Make sure you check with business owners for any blockers before moving any data to the cloud. Identify any security or privacy concerns or any data obfuscation needs that should be done before moving data to the cloud. + +### Create a high-level architecture + +Based upon the high-level architecture of your proposed future state architecture, identify the components that will form part of your POC. Your high-level future state architecture likely contains many data sources, numerous data consumers, big data components, and possibly machine learning and artificial intelligence (AI) data consumers. Your POC architecture should specifically identify components that will be part of the POC. Importantly, it should identify any components that won't form part of the POC testing. + +If you're already using Azure, identify any resources you already have in place (Azure Active Directory, ExpressRoute, and others) that you can use during the POC. Also identify the Azure regions your organization uses. Now is a great time to identify the throughput of your ExpressRoute connection and to check with other business users that your POC can consume some of that throughput without adverse impact on production systems. + +For more information, see [Big data architectures](/architecture/data-guide/big-data.md). + +### Identify POC resources + +Specifically identify the technical resources and time commitments required to support your POC. Your POC will need: + +- A business representative to oversee requirements and results. +- An application data expert, to source the data for the POC and provide knowledge of the existing processes and logic. +- An Apache Spark and Spark pool expert. +- An expert advisor, to optimize the POC tests. +- Resources that will be required for specific components of your POC project, but not necessarily required for the duration of the POC. These resources could include network admins, Azure admins, Active Directory admins, Azure portal admins, and others. +- Ensure all the required Azure services resources are provisioned and the required level of access is granted, including access to storage accounts. +- Ensure you have an account that has required data access permissions to retrieve data from all data sources in the POC scope. + +> [!TIP] +> We recommend engaging an expert advisor to assist with your POC. [Microsoft's partner community](https://appsource.microsoft.com/marketplace/partner-dir) has global availability of expert consultants who can help you assess, evaluate, or implement Azure Synapse. + +### Set the timeline + +Review your POC planning details and business needs to identify a time frame for your POC. Make realistic estimates of the time that will be required to complete the POC goals. The time to complete your POC will be influenced by the size of your POC dataset, the number and complexity of tests, and the number of interfaces to test. If you estimate that your POC will run longer than four weeks, consider reducing the POC scope to focus on the highest priority goals. Be sure to obtain approval and commitment from all the lead resources and sponsors before continuing. + +## Put the POC into practice + +We recommend you execute your POC project with the discipline and rigor of any production project. Run the project according to plan and manage a change request process to prevent uncontrolled growth of the POC's scope. + +Here are some examples of high-level tasks: + +1. [Create a Synapse workspace](../quickstart-create-workspace.md), Spark pools and dedicated SQL pools, storage accounts, and all Azure +resources identified in the POC plan. +1. Load POC dataset: + - Make data available in Azure by extracting from the source or by creating sample data in Azure. For more information, see: + - [Transferring data to and from Azure](/architecture/databox/data-box-overview.md#use-cases) + - [Azure Data Box](https://azure.microsoft.com/services/databox/) + - [Copy activity performance and scalability guide](/data-factory/copy-activity-performance#copy-performance-and-scalability-achievable-using-adf.md) + - [Data loading strategies for dedicated SQL pool in Azure Synapse Analytics](../sql-data-warehouse/design-elt-data-loading.md) + - [Bulk load data using the COPY statement](../sql-data-warehouse/quickstart-bulk-load-copy-tsql.md?view=azure-sqldw-latest&preserve-view=true) + - Test the dedicated connector for the Spark pool and the dedicated SQL pool. +1. Migrate existing code to the Spark pool: + - If you're migrating from Spark, your migration effort is likely to be straightforward given that the Spark pool leverages the open-source Spark distribution. However, if you're using vendor specific features on top of core Spark features, you'll need to correctly map these features to the Spark pool features. + - If you're migrating from a non-Spark system, your migration effort will vary based on the complexity involved. +1. Execute the tests: + - Many tests can be executed in parallel across multiple Spark pool clusters. + - Record your results in a consumable and readily understandable format. +1. Monitor for troubleshooting and performance. For more information, see: + - [Monitor Apache Spark activities](../get-started-monitor.md#apache-spark-activities) + - [Monitor with web user interfaces - Spark's history server](https://spark.apache.org/docs/3.0.0-preview/web-ui.html) + - [Monitoring resource utilization and query activity in Azure Synapse Analytics](../../sql-data-warehouse/sql-data-warehouse-concept-resource-utilization-query-activity.md) +1. Monitor data skewness, time skewness and executor usage percentage by opening the **Diagnostic** tab of Spark's history server. + + :::image type="content" source="media/proof-of-concept-playbook-apache-spark-pool/apache-spark-history-server-diagnostic-tab.png" alt-text="Image shows the Diagnostic tab of Spark's history server."::: + +## Interpret the POC results + +When you complete all the POC tests, you evaluate the results. Begin by evaluating whether the POC goals were met and the desired outputs were collected. Determine whether more testing is necessary or any questions need addressing. + +## Next steps + +> [!div class="nextstepaction"] +> [Data lake exploration with dedicated SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-dedicated-sql-pool.md) + +> [!div class="nextstepaction"] +> [Data lake exploration with serverless SQL pool in Azure Synapse Analytics](proof-of-concept-playbook-serverless-sql-pool.md) + +> [!div class="nextstepaction"] +> [Azure Synapse Analytics frequently asked questions](../overview-faq.yml) diff --git a/articles/synapse-analytics/guidance/security-white-paper-introduction.md b/articles/synapse-analytics/guidance/security-white-paper-introduction.md index 909ba27cc6f6a..6460961df8252 100644 --- a/articles/synapse-analytics/guidance/security-white-paper-introduction.md +++ b/articles/synapse-analytics/guidance/security-white-paper-introduction.md @@ -65,7 +65,7 @@ Azure Synapse is a Platform-as-a-service (PaaS) analytics service that brings to [Pipelines](../../data-factory/concepts-pipelines-activities.md) are a logical grouping of activities that perform data movement and data transformation at scale. [Data flow](../../data-factory/concepts-data-flow-overview.md) is a transformation activity in a pipeline that's developed by using a low-code user interface. It can execute data transformations at scale. Behind the scenes, data flows use Apache Spark clusters of Azure Synapse to execute automatically generated code. Pipelines and data flows are compute-only services, and they don't have any managed storage associated with them. -Pipelines use the Integration Runtime (IR) as the scalable compute infrastructure for performing data movement and dispatch activities. Data movement activities run on the IR whereas the dispatch activities run on variety of other compute engines, including Azure SQL Database, Azure HDInsight, Azure Databricks, Apache Spark clusters of Azure Synapse, and others. Azure Synapse supports two types of IR: Azure Integration Runtime and Self-hosted Integration Runtime. The [Azure IR](/azure/data-factory/concepts-integration-runtime.md#azure-integration-runtime) provides a fully managed, scalable, and on-demand compute infrastructure. The [Self-hosted IR](/azure/data-factory/concepts-integration-runtime.md#self-hosted-integration-runtime) is installed and configured by the customer in their own network, either in on-premises machines or in Azure cloud virtual machines. +Pipelines use the Integration Runtime (IR) as the scalable compute infrastructure for performing data movement and dispatch activities. Data movement activities run on the IR whereas the dispatch activities run on variety of other compute engines, including Azure SQL Database, Azure HDInsight, Azure Databricks, Apache Spark clusters of Azure Synapse, and others. Azure Synapse supports two types of IR: Azure Integration Runtime and Self-hosted Integration Runtime. The [Azure IR](/azure/data-factory/concepts-integration-runtime#azure-integration-runtime) provides a fully managed, scalable, and on-demand compute infrastructure. The [Self-hosted IR](/azure/data-factory/concepts-integration-runtime#self-hosted-integration-runtime) is installed and configured by the customer in their own network, either in on-premises machines or in Azure cloud virtual machines. Customers can choose to associate their Synapse workspace with a [managed workspace virtual network](../security/synapse-workspace-managed-vnet.md). When associated with a managed workspace virtual network, Azure IRs and Apache Spark clusters that are used by pipelines, data flows, and the Apache Spark pools are deployed inside the managed workspace virtual network. This setup ensures network isolation between the workspaces for pipelines and Apache Spark workloads. diff --git a/articles/synapse-analytics/guidance/success-by-design-introduction.md b/articles/synapse-analytics/guidance/success-by-design-introduction.md new file mode 100644 index 0000000000000..e9f49c3c02fb5 --- /dev/null +++ b/articles/synapse-analytics/guidance/success-by-design-introduction.md @@ -0,0 +1,36 @@ +--- +title: Success by design +description: "TODO: Success by design" +author: peter-myers +ms.author: v-petermyers +ms.reviewer: sngun +ms.service: synapse-analytics +ms.topic: conceptual +ms.date: 05/23/2022 +--- + +# Success by design + +Welcome to the Azure Synapse Customer Success Engineering Success by Design repository. + +As part of the Synapse Engineering Group at Microsoft, the Azure Synapse Customer Success Engineering (CSE) team is relied upon to help with complex projects that involve Azure Synapse. As a team of experienced professionals, we provide guidance for all facets of Azure Synapse, including working with customers on architecture reviews, configuration guidance, and performance analysis and recommendations. It's from this experience that the CSE team created this repository of guidance articles to help you produce successful solutions. + +This guidance is intended to supplement the official Azure Synapse documentation with additional specialized content that: + +- Helps you evaluate Azure Synapse by using a Proof of Concept (POC) project. +- Helps you successfully implement a solution that incorporates Azure Synapse by using our [implementation success method](implementation-success-overview.md). This method can guide you through the key phases of your implementation, including planning, solution development, deployment, and post go-live evaluation. +- Provides detailed guidance on complex topics, including security, networking, troubleshooting, performance tuning, and migration. + +> [!NOTE] +> This guidance undergoes continuous improvement by the Synapse CSE team. In time, guidance articles will cover additional topics, including enterprise development support using continuous integration and continuous deployment (CI/CD) methods, business continuity and disaster recovery (BCDR), high availability (HA), and Azure Synapse monitoring. + +Next steps: + +> [!div class="nextstepaction"] +> [Synapse proof of concept playbook](proof-of-concept-playbook-overview.md) + +> [!div class="nextstepaction"] +> [Synapse implementation success method](implementation-success-overview.md) + +> [!div class="nextstepaction"] +> [Security white paper](security-white-paper-introduction.md) diff --git a/articles/synapse-analytics/machine-learning/concept-deep-learning.md b/articles/synapse-analytics/machine-learning/concept-deep-learning.md new file mode 100644 index 0000000000000..67747a8391a88 --- /dev/null +++ b/articles/synapse-analytics/machine-learning/concept-deep-learning.md @@ -0,0 +1,63 @@ +--- +title: 'Deep learning' +description: This article provides a conceptual overview of the deep learning and data science capabilities available through Apache Spark on Azure Synapse Analytics. +author: midesa +ms.service: synapse-analytics +ms.topic: conceptual +ms.subservice: machine-learning +ms.date: 04/19/2022 +ms.author: midesa +--- + +# Deep learning (Preview) + +Apache Spark in Azure Synapse Analytics enables machine learning with big data, providing the ability to obtain valuable insight from large amounts of structured, unstructured, and fast-moving data. There are several options when training machine learning models using Azure Spark in Azure Synapse Analytics: Apache Spark MLlib, Azure Machine Learning, and various other open-source libraries. + +## GPU-enabled Apache Spark pools + +To simplify the process for creating and managing pools, Azure Synapse takes care of pre-installing low-level libraries and setting up all the complex networking requirements between compute nodes. This integration allows users to get started with GPU- accelerated pools within just a few minutes. To learn more about how to create a GPU-accelerated pool, you can visit the quickstart on how to [create a GPU-accelerated pool](../quickstart-create-apache-gpu-pool-portal.md). + +> [!NOTE] +> - GPU-accelerated pools can be created in workspaces located in East US, Australia East, and North Europe. +> - GPU-accelerated pools are only available with the Apache Spark 3.1 and 3.2 runtime. +> - You might need to request a [limit increase](../spark/apache-spark-rapids-gpu.md#quotas-and-resource-constraints-in-azure-synapse-gpu-enabled-pools) in order to create GPU-enabled clusters. + +## GPU ML Environment + +Azure Synapse Analytics provides built-in support for deep learning infrastructure. The Azure Synapse Analytics runtimes for Apache Spark 3 include support for the most common deep learning libraries like TensorFlow and PyTorch. The Azure Synapse runtime also includes supporting libraries like Petastorm and Horovod which are commonly used for distributed training. + +### Tensorflow + +TensorFlow is an open source machine learning framework for all developers. It is used for implementing machine learning and deep learning applications. + +For more information about Tensorflow, you can visit the [Tensorflow API documentation](https://www.tensorflow.org/api_docs/python/tf). + +### PyTorch + +PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. + +For more information about PyTorch, you can visit the [PyTorch documentation](https://pytorch.org/docs/stable/index.html). + +### Horovod + +Horovod is a distributed deep learning training framework for TensorFlow, Keras, and PyTorch. Horovod was developed to make distributed deep learning fast and easy to use. With this framework, an existing training script can be scaled up to run on hundreds of GPUs in just a few lines of code. In addition, Horovod can run on top of Apache Spark, making it possible to unify data processing and model training into a single pipeline. + +To learn more about how to run distributed training jobs in Azure Synapse Analytics, you can visit the following tutorials: + - [Tutorial: Distributed training with Horovod and PyTorch](./tutorial-horovod-pytorch.md) + - [Tutorial: Distributed training with Horovod and Tensorflow](./tutorial-horovod-tensorflow.md) + +For more information about Horovod, you can visit the [Horovod documentation](https://horovod.readthedocs.io/stable/), + +### Petastorm + +Petastorm is an open source data access library which enables single-node or distributed training of deep learning models. This library enables training directly from datasets in Apache Parquet format and datasets that have already been loaded as an Apache Spark DataFrame. Petastorm supports popular training frameworks such as Tensorflow and PyTorch. + +For more information about Petastorm, you can visit the [Petastorm GitHub page](https://github.com/uber/petastorm) or the [Petastorm API documentation](https://petastorm.readthedocs.io/latest). + +## Next steps + +This article provides an overview of the various options to train machine learning models within Apache Spark pools in Azure Synapse Analytics. You can learn more about model training by following the tutorial below: + +- Run SparkML experiments: [Apache SparkML Tutorial](../spark/apache-spark-machine-learning-mllib-notebook.md) +- View libraries within the Apache Spark 3 runtime: [Apache Spark 3 Runtime](../spark/apache-spark-3-runtime.md) +- Accelerate ETL workloads with RAPIDS: [Apache Spark Rapids](../spark/apache-spark-rapids-gpu.md) \ No newline at end of file diff --git a/articles/synapse-analytics/machine-learning/tutorial-horovod-pytorch.md b/articles/synapse-analytics/machine-learning/tutorial-horovod-pytorch.md new file mode 100644 index 0000000000000..4098ad4330d54 --- /dev/null +++ b/articles/synapse-analytics/machine-learning/tutorial-horovod-pytorch.md @@ -0,0 +1,254 @@ +--- +title: 'Tutorial: Distributed training with Horovod and Pytorch' +description: Tutorial on how to run distributed training with the Horovod Estimator and PyTorch +ms.service: synapse-analytics +ms.subservice: machine-learning +ms.topic: tutorial +ms.date: 04/19/2022 +author: midesa +ms.author: midesa +--- + +# Tutorial: Distributed Training with Horovod Estimator and PyTorch (Preview) + +[Horovod](https://github.com/horovod/horovod) is a distributed training framework for libraries like TensorFlow and PyTorch. With Horovod, users can scale up an existing training script to run on hundreds of GPUs in just a few lines of code. + +Within Azure Synapse Analytics, users can quickly get started with Horovod using the default Apache Spark 3 runtime.For Spark ML pipeline applications using PyTorch, users can use the horovod.spark estimator API. This notebook uses an Apache Spark dataframe to perform distributed training of a distributed neural network (DNN) model on MNIST dataset. This tutorial leverages PyTorch and the Horovod Estimator to run the training process. + +## Prerequisites + +- [Azure Synapse Analytics workspace](../get-started-create-workspace.md) with an Azure Data Lake Storage Gen2 storage account configured as the default storage. You need to be the *Storage Blob Data Contributor* of the Data Lake Storage Gen2 file system that you work with. +- Create a GPU-enabled Apache Spark pool in your Azure Synapse Analytics workspace. For details, see [Create a GPU-enabled Apache Spark pool in Azure Synapse](../spark/apache-spark-gpu-concept.md). For this tutorial, we suggest using the GPU-Large cluster size with 3 nodes. + +## Configure the Apache Spark session + +At the start of the session, we will need to configure a few Apache Spark settings. In most cases, we only needs to set the numExecutors and spark.rapids.memory.gpu.reserve. For very large models, users may also need to configure the ```spark.kryoserializer.buffer.max``` setting. For Tensorflow models, users will need to set the ```spark.executorEnv.TF_FORCE_GPU_ALLOW_GROWTH``` to be true. + +In the example below, you can see how the Spark configurations can be passed with the ```%%configure``` command. The detailed meaning of each parameter is explained in the [Apache Spark configuration documentation](https://spark.apache.org/docs/latest/configuration.html). The values provided below are the suggested, best practice values for Azure Synapse GPU-large pools. + +```spark + +%%configure -f +{ + "driverMemory": "30g", + "driverCores": 4, + "executorMemory": "60g", + "executorCores": 12, + "numExecutors": 3, + "conf":{ + "spark.rapids.memory.gpu.reserve": "10g", + "spark.executorEnv.TF_FORCE_GPU_ALLOW_GROWTH": "true", + "spark.kryoserializer.buffer.max": "2000m" + } +} +``` + +For this tutorial, we will use the following configurations: + +```python + +%%configure -f +{ + "numExecutors": 3, + "conf":{ + "spark.rapids.memory.gpu.reserve": "10g" + } +} +``` + +> [!NOTE] +> When training with Horovod, users should set the Spark configuration for ```numExecutors``` to be less or equal to the number of nodes. + +## Import dependencies + +In this tutorial, we will leverage PySpark to read and process the dataset. We will then use PyTorch and Horovod to build the distributed neural network (DNN) model and run the training process. To get started, we will need to import the following dependencies: + +```python +# base libs +import sys +import uuid + +# numpy +import numpy as np + +# pyspark related +import pyspark +import pyspark.sql.types as T +from pyspark.ml.evaluation import MulticlassClassificationEvaluator +from pyspark.sql import SparkSession +from pyspark.sql.functions import udf + +# pytorch related +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + +# horovod related +import horovod.spark.torch as hvd +from horovod.spark.common.backend import SparkBackend +from horovod.spark.common.store import Store + +# azure related +from azure.synapse.ml.horovodutils import AdlsStore +``` + +## Connect to alternative storage account + +We will need the Azure Data Lake Storage (ADLS) account for storing intermediate and model data. If you are using an alternative storage account, be sure to set up the [linked service](../../data-factory/concepts-linked-services.md) to automatically authenticate and read from the account. In addition, you will need to modify the following properties below: ```remote_url```, ```account_name```, and ```linked_service_name```. + +```python +num_proc = 3 # equal to numExecutors +batch_size = 128 +epochs = 3 +lr_single_node = 0.01 # learning rate for single node code + +uuid_str = str(uuid.uuid4()) # with uuid, each run will use a new directory +work_dir = '/tmp/' + uuid_str + +# create adls store for model training, use your own adls account info +remote_url = "<>" +account_name = "<>" +linked_service_name = "<>" +sas_token = TokenLibrary.getConnectionString(linked_service_name) +adls_store_path = remote_url + work_dir + +store = AdlsStore.create(adls_store_path, + storage_options={ + 'account_name': account_name, + 'sas_token': sas_token + }, + save_runs=True) + +print(adls_store_path) +``` + +## Prepare dataset + +Next, we will prepare the dataset for training. In this tutorial, we will use the MNIST dataset from [Azure Open Datasets](https://docs.microsoft.com/azure/open-datasets/dataset-mnist?tabs=azureml-opendatasets). + +```python +# Initialize SparkSession +spark = SparkSession.builder.getOrCreate() + +# Download MNIST dataset from Azure Open Datasets +from azureml.opendatasets import MNIST + +mnist = MNIST.get_tabular_dataset() +mnist_df = mnist.to_pandas_dataframe() +mnist_df.info() + +# Preprocess dataset +mnist_df['features'] = mnist_df.iloc[:, :784].values.tolist() +mnist_df.drop(mnist_df.iloc[:, :784], inplace=True, axis=1) +mnist_df.head() +``` + +## Process data with Apache Spark + +Now, we will create an Apache Spark dataframe. This dataframe will be used with the ```HorovodEstimator``` for training. + +```python +# Create Spark DataFrame for training +df = spark.createDataFrame(mnist_df) + +# repartition DataFrame for training +train_df = df.repartition(num_proc) + +# Train/test split +train_df, test_df = train_df.randomSplit([0.9, 0.1]) + +# show the dataset +train_df.show() +train_df.count() +``` + +## Define DNN model + +Once we have finished processing our dataset, we can now define our PyTorch model. The same code could also be used to train a single-node PyTorch model. + +```python +# Define the PyTorch model without any Horovod-specific parameters +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = x.float() + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x) + + +model = Net() +optimizer = optim.SGD(model.parameters(), + lr=lr_single_node * num_proc, + momentum=0.5) # notice the lr is scaled up +loss = nn.NLLLoss() + +``` + +## Train model + +Now, we can train a Horovod Spark estimator on top of our Apache Spark dataframe. +```python +# Train a Horovod Spark Estimator on the DataFrame +backend = SparkBackend(num_proc=num_proc, + stdout=sys.stdout, + stderr=sys.stderr, + prefix_output_with_timestamp=True) + +torch_estimator = hvd.TorchEstimator( + backend=backend, + store=store, + partitions_per_process=1, # important for GPU training + model=model, + optimizer=optimizer, + loss=lambda input, target: loss(input, target.long()), + input_shapes=[[-1, 1, 28, 28]], + feature_cols=['features'], + label_cols=['label'], + batch_size=batch_size, + epochs=epochs, + validation=0.1, + verbose=2) + +torch_model = torch_estimator.fit(train_df).setOutputCols(['label_prob']) +``` + +## Evaluate trained model + +Once the training process has finished, we can then evaluate the model on the test dataset. + +```python +# Evaluate the model on the held-out test DataFrame +pred_df = torch_model.transform(test_df) + +argmax = udf(lambda v: float(np.argmax(v)), returnType=T.DoubleType()) +pred_df = pred_df.withColumn('label_pred', argmax(pred_df.label_prob)) +evaluator = MulticlassClassificationEvaluator(predictionCol='label_pred', + labelCol='label', + metricName='accuracy') + +print('Test accuracy:', evaluator.evaluate(pred_df)) +``` + +## Clean up resources + +To ensure the Spark instance is shut down, end any connected sessions(notebooks). The pool shuts down when the **idle time** specified in the Apache Spark pool is reached. You can also select **stop session** from the status bar at the upper right of the notebook. + +![Screenshot showing the Stop session button on the status bar.](./media/tutorial-build-applications-use-mmlspark/stop-session.png) + +## Next steps + +* [Check out Synapse sample notebooks](https://github.com/Azure-Samples/Synapse/tree/main/MachineLearning) +* [Learn more about GPU-enabled Apache Spark pools](../spark/apache-spark-gpu-concept.md) \ No newline at end of file diff --git a/articles/synapse-analytics/machine-learning/tutorial-horovod-tensorflow.md b/articles/synapse-analytics/machine-learning/tutorial-horovod-tensorflow.md new file mode 100644 index 0000000000000..dbcff6bff823c --- /dev/null +++ b/articles/synapse-analytics/machine-learning/tutorial-horovod-tensorflow.md @@ -0,0 +1,349 @@ +--- +title: 'Tutorial: Distributed training with Horovod and Tensorflow' +description: Tutorial on how to run distributed training with the Horovod Runner and Tensorflow +ms.service: synapse-analytics +ms.subservice: machine-learning +ms.topic: tutorial +ms.date: 04/19/2022 +author: midesa +ms.author: midesa +--- + +# Tutorial: Distributed Training with Horovod Runner and Tensorflow (Preview) + +[Horovod](https://github.com/horovod/horovod) is a distributed training framework for libraries like TensorFlow and PyTorch. With Horovod, users can scale up an existing training script to run on hundreds of GPUs in just a few lines of code. + +Within Azure Synapse Analytics, users can quickly get started with Horovod using the default Apache Spark 3 runtime.For Spark ML pipeline applications using Tensorflow, users can use ```HorovodRunner```. This notebook uses an Apache Spark dataframe to perform distributed training of a distributed neural network (DNN) model on MNIST dataset. This tutorial leverages Tensorflow and the ```HorovodRunner``` to run the training process. + +## Prerequisites + +- [Azure Synapse Analytics workspace](../get-started-create-workspace.md) with an Azure Data Lake Storage Gen2 storage account configured as the default storage. You need to be the *Storage Blob Data Contributor* of the Data Lake Storage Gen2 file system that you work with. +- Create a GPU-enabled Apache Spark pool in your Azure Synapse Analytics workspace. For details, see [Create a GPU-enabled Apache Spark pool in Azure Synapse](../spark/apache-spark-gpu-concept.md). For this tutorial, we suggest using the GPU-Large cluster size with 3 nodes. + +## Configure the Apache Spark session + +At the start of the session, we will need to configure a few Apache Spark settings. In most cases, we only needs to set the ```numExecutors``` and ```spark.rapids.memory.gpu.reserve```. For very large models, users may also need to configure the ```spark.kryoserializer.buffer.max``` setting. For Tensorflow models, users will need to set the ```spark.executorEnv.TF_FORCE_GPU_ALLOW_GROWTH``` to be true. + +In the example below, you can see how the Spark configurations can be passed with the ```%%configure``` command. The detailed meaning of each parameter is explained in the [Apache Spark configuration documentation](https://spark.apache.org/docs/latest/configuration.html). The values provided below are the suggested, best practice values for Azure Synapse GPU-large pools. + +```spark + +%%configure -f +{ + "driverMemory": "30g", + "driverCores": 4, + "executorMemory": "60g", + "executorCores": 12, + "numExecutors": 3, + "conf":{ + "spark.rapids.memory.gpu.reserve": "10g", + "spark.executorEnv.TF_FORCE_GPU_ALLOW_GROWTH": "true", + "spark.kryoserializer.buffer.max": "2000m" + } +} +``` + +For this tutorial, we will use the following configurations: + +```python + +%%configure -f +{ + "numExecutors": 3, + "conf":{ + "spark.rapids.memory.gpu.reserve": "10g", + "spark.executorEnv.TF_FORCE_GPU_ALLOW_GROWTH": "true" + } +} +``` + +> [!NOTE] +> When training with Horovod, users should set the Spark configuration for ```numExecutors``` to be less or equal to the number of nodes. + +## Setup primary storage account + +We will need the Azure Data Lake Storage (ADLS) account for storing intermediate and model data. If you are using an alternative storage account, be sure to set up the [linked service](../../data-factory/concepts-linked-services.md) to automatically authenticate and read from the account. + +In this example, we will read from the primary Azure Synapse Analytics storage account. To do this, you will need to modify the following properties below: ```remote_url```. + +```python +# Specify training parameters +num_proc = 3 # equal to numExecutors +batch_size = 128 +epochs = 3 +lr_single_node = 0.1 # learning rate for single node code + +# configure adls store remote url +remote_url = "<> +``` + +## Prepare dataset + +Next, we will prepare the dataset for training. In this tutorial, we will use the MNIST dataset from [Azure Open Datasets](https://docs.microsoft.com/azure/open-datasets/dataset-mnist?tabs=azureml-opendatasets). + +```python +def get_dataset(rank=0, size=1): + # import dependency libs + from azureml.opendatasets import MNIST + from sklearn.preprocessing import OneHotEncoder + import numpy as np + + # Download MNIST dataset from Azure Open Datasets + mnist = MNIST.get_tabular_dataset() + mnist_df = mnist.to_pandas_dataframe() + + # Preprocess dataset + mnist_df['features'] = mnist_df.iloc[:, :784].values.tolist() + mnist_df.drop(mnist_df.iloc[:, :784], inplace=True, axis=1) + + x = np.array(mnist_df['features'].values.tolist()) + y = np.array(mnist_df['label'].values.tolist()).reshape(-1, 1) + + enc = OneHotEncoder() + enc.fit(y) + y = enc.transform(y).toarray() + + (x_train, y_train), (x_test, y_test) = (x[:60000], y[:60000]), (x[60000:], + y[60000:]) + + # Prepare dataset for distributed training + x_train = x_train[rank::size] + y_train = y_train[rank::size] + x_test = x_test[rank::size] + y_test = y_test[rank::size] + + # Reshape and Normalize data for model input + x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) + x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) + x_train = x_train.astype('float32') + x_test = x_test.astype('float32') + x_train /= 255.0 + x_test /= 255.0 + + return (x_train, y_train), (x_test, y_test) + +``` + +## Define DNN model + +Once we have finished processing our dataset, we can now define our Tensorflow model. The same code could also be used to train a single-node Tensorflow model. + +```python +# Define the TensorFlow model without any Horovod-specific parameters +def get_model(): + from tensorflow.keras import models + from tensorflow.keras import layers + + model = models.Sequential() + model.add( + layers.Conv2D(32, + kernel_size=(3, 3), + activation='relu', + input_shape=(28, 28, 1))) + model.add(layers.Conv2D(64, (3, 3), activation='relu')) + model.add(layers.MaxPooling2D(pool_size=(2, 2))) + model.add(layers.Dropout(0.25)) + model.add(layers.Flatten()) + model.add(layers.Dense(128, activation='relu')) + model.add(layers.Dropout(0.5)) + model.add(layers.Dense(10, activation='softmax')) + return model + +``` + +## Define a training function for a single node + +First, we will train our Tensorflow model on the driver node of the Apache Spark pool. Once we have finished the training process, we will evaluate the model and print the loss and accuracy scores. + +```python + +def train(learning_rate=0.1): + import tensorflow as tf + from tensorflow import keras + + gpus = tf.config.experimental.list_physical_devices('GPU') + for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + + # Prepare dataset + (x_train, y_train), (x_test, y_test) = get_dataset() + + # Initialize model + model = get_model() + + # Specify the optimizer (Adadelta in this example) + optimizer = keras.optimizers.Adadelta(learning_rate=learning_rate) + + model.compile(optimizer=optimizer, + loss='categorical_crossentropy', + metrics=['accuracy']) + + model.fit(x_train, + y_train, + batch_size=batch_size, + epochs=epochs, + verbose=2, + validation_data=(x_test, y_test)) + return model + +# Run the training process on the driver +model = train(learning_rate=lr_single_node) + +# Evaluate the single node, trained model +_, (x_test, y_test) = get_dataset() +loss, accuracy = model.evaluate(x_test, y_test, batch_size=128) +print("loss:", loss) +print("accuracy:", accuracy) + +``` + +## Migrate to HorovodRunner for distributed training + +Next, we will take a look at how the same code could be re-run using ```HorovodRunner``` for distributed training. + +### Define training function + +To do this, we will first define a training function for ```HorovodRunner```. + +```python +# Define training function for Horovod runner +def train_hvd(learning_rate=0.1): + # Import base libs + import tempfile + import os + import shutil + import atexit + + # Import tensorflow modules to each worker + import tensorflow as tf + from tensorflow import keras + import horovod.tensorflow.keras as hvd + + # Initialize Horovod + hvd.init() + + # Pin GPU to be used to process local rank (one GPU per process) + # These steps are skipped on a CPU cluster + gpus = tf.config.experimental.list_physical_devices('GPU') + for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + if gpus: + tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], + 'GPU') + + # Call the get_dataset function you created, this time with the Horovod rank and size + (x_train, y_train), (x_test, y_test) = get_dataset(hvd.rank(), hvd.size()) + + # Initialize model with random weights + model = get_model() + + # Adjust learning rate based on number of GPUs + optimizer = keras.optimizers.Adadelta(learning_rate=learning_rate * + hvd.size()) + + # Use the Horovod Distributed Optimizer + optimizer = hvd.DistributedOptimizer(optimizer) + + model.compile(optimizer=optimizer, + loss='categorical_crossentropy', + metrics=['accuracy']) + + # Create a callback to broadcast the initial variable states from rank 0 to all other processes. + # This is required to ensure consistent initialization of all workers when training is started with random weights or restored from a checkpoint. + callbacks = [ + hvd.callbacks.BroadcastGlobalVariablesCallback(0), + ] + + # Model checkpoint location. + ckpt_dir = tempfile.mkdtemp() + ckpt_file = os.path.join(ckpt_dir, 'checkpoint.h5') + atexit.register(lambda: shutil.rmtree(ckpt_dir)) + + # Save checkpoints only on worker 0 to prevent conflicts between workers + if hvd.rank() == 0: + callbacks.append( + keras.callbacks.ModelCheckpoint(ckpt_file, + monitor='val_loss', + mode='min', + save_best_only=True)) + + model.fit(x_train, + y_train, + batch_size=batch_size, + callbacks=callbacks, + epochs=epochs, + verbose=2, + validation_data=(x_test, y_test)) + + # Return model bytes only on worker 0 + if hvd.rank() == 0: + with open(ckpt_file, 'rb') as f: + return f.read() + +``` + +### Run training + +Once we have defined the model, we will run the training process. + +```python +# Run training +import os +import sys +import horovod.spark + + +best_model_bytes = \ + horovod.spark.run(train_hvd, args=(lr_single_node, ), num_proc=num_proc, + env=os.environ.copy(), + stdout=sys.stdout, stderr=sys.stderr, verbose=2, + prefix_output_with_timestamp=True)[0] +``` + +### Save checkpoints to ADLS storage + +The code below shows how to save the checkpoints to the Azure Data Lake Storage (ADLS) account. + +```python +import tempfile +import fsspec +import os + +local_ckpt_dir = tempfile.mkdtemp() +local_ckpt_file = os.path.join(local_ckpt_dir, 'mnist-ckpt.h5') +adls_ckpt_file = remote_url + local_ckpt_file + +with open(local_ckpt_file, 'wb') as f: + f.write(best_model_bytes) + +## Upload local file to ADLS +fs = fsspec.filesystem('abfss') +fs.upload(local_ckpt_file, adls_ckpt_file) + +print(adls_ckpt_file) +``` + +### Evaluate Horovod trained model + +Once we have finished training our model, we can then take a look at the loss and accuracy for the final model. + +```python +import tensorflow as tf + +hvd_model = tf.keras.models.load_model(local_ckpt_file) + +_, (x_test, y_test) = get_dataset() +loss, accuracy = hvd_model.evaluate(x_test, y_test, batch_size=128) +print("loaded model loss and accuracy:", loss, accuracy) +``` + +## Clean up resources + +To ensure the Spark instance is shut down, end any connected sessions(notebooks). The pool shuts down when the **idle time** specified in the Apache Spark pool is reached. You can also select **stop session** from the status bar at the upper right of the notebook. + +![Screenshot showing the Stop session button on the status bar.](./media/tutorial-build-applications-use-mmlspark/stop-session.png) + +## Next steps + +* [Check out Synapse sample notebooks](https://github.com/Azure-Samples/Synapse/tree/main/MachineLearning) +* [Learn more about GPU-enabled Apache Spark pools](../spark/apache-spark-gpu-concept.md) \ No newline at end of file diff --git a/articles/synapse-analytics/machine-learning/tutorial-load-data-petastorm.md b/articles/synapse-analytics/machine-learning/tutorial-load-data-petastorm.md new file mode 100644 index 0000000000000..5d96a153b7e9e --- /dev/null +++ b/articles/synapse-analytics/machine-learning/tutorial-load-data-petastorm.md @@ -0,0 +1,263 @@ +--- +title: 'Load data with Petastorm' +description: This article provides a conceptual overview of how to load data with Petastorm. +author: midesa +ms.service: synapse-analytics +ms.topic: conceptual +ms.subservice: machine-learning +ms.date: 04/19/2022 +ms.author: midesa +--- + +# Load data with Petastorm (Preview) + +Petastorm is an open source data access library which enables single-node or distributed training of deep learning models. This library enables training directly from datasets in Apache Parquet format and datasets that have already been loaded as an Apache Spark DataFrame. Petastorm supports popular training frameworks such as Tensorflow and PyTorch. + +For more information about Petastorm, you can visit the [Petastorm GitHub page](https://github.com/uber/petastorm) or the [Petastorm API documentation](https://petastorm.readthedocs.io/latest). + +## Prerequisites + +- [Azure Synapse Analytics workspace](../get-started-create-workspace.md) with an Azure Data Lake Storage Gen2 storage account configured as the default storage. You need to be the *Storage Blob Data Contributor* of the Data Lake Storage Gen2 file system that you work with. +- Create a GPU-enabled Apache Spark pool in your Azure Synapse Analytics workspace. For details, see [Create a GPU-enabled Apache Spark pool in Azure Synapse](../spark/apache-spark-gpu-concept.md). For this tutorial, we suggest using the GPU-Large cluster size with 3 nodes. + +## Configure the Apache Spark session + +At the start of the session, we will need to configure a few Apache Spark settings. In most cases, we only needs to set the ```numExecutors``` and ```spark.rapids.memory.gpu.reserve```. In the example below, you can see how the Spark configurations can be passed with the ```%%configure``` command. The detailed meaning of each parameter is explained in the [Apache Spark configuration documentation](https://spark.apache.org/docs/latest/configuration.html). + +```python +%%configure -f +{ + "numExecutors": 3, + "conf":{ + "spark.rapids.memory.gpu.reserve": "10g" + } +} +``` + +## Petastorm write APIs + +A dataset created using Petastorm is stored in an Apache Parquet format. On top of a Parquet schema, Petastorm also stores higher-level schema information that makes multidimensional arrays into a native part of a Petastorm dataset. + +In the sample below, we create a dataset using PySpark. We write the dataset to an Azure Data Lake Storage Gen2 account. + +```python +import numpy as np +from pyspark.sql import SparkSession +from pyspark.sql.types import IntegerType + +from petastorm.codecs import ScalarCodec, CompressedImageCodec, NdarrayCodec +from petastorm.etl.dataset_metadata import materialize_dataset +from petastorm.unischema import dict_to_spark_row, Unischema, UnischemaField + +# The schema defines how the dataset schema looks like +HelloWorldSchema = Unischema('HelloWorldSchema', [ + UnischemaField('id', np.int32, (), ScalarCodec(IntegerType()), False), + UnischemaField('image1', np.uint8, (128, 256, 3), CompressedImageCodec('png'), False), + UnischemaField('array_4d', np.uint8, (None, 128, 30, None), NdarrayCodec(), False), +]) + + +def row_generator(x): + """Returns a single entry in the generated dataset. Return a bunch of random values as an example.""" + return {'id': x, + 'image1': np.random.randint(0, 255, dtype=np.uint8, size=(128, 256, 3)), + 'array_4d': np.random.randint(0, 255, dtype=np.uint8, size=(4, 128, 30, 3))} + + +def generate_petastorm_dataset(output_url): + rowgroup_size_mb = 256 + + spark = SparkSession.builder.config('spark.driver.memory', '2g').master('local[2]').getOrCreate() + sc = spark.sparkContext + + # Wrap dataset materialization portion. Will take care of setting up spark environment variables as + # well as save petastorm specific metadata + rows_count = 10 + with materialize_dataset(spark, output_url, HelloWorldSchema, rowgroup_size_mb): + + rows_rdd = sc.parallelize(range(rows_count))\ + .map(row_generator)\ + .map(lambda x: dict_to_spark_row(HelloWorldSchema, x)) + + spark.createDataFrame(rows_rdd, HelloWorldSchema.as_spark_schema()) \ + .coalesce(10) \ + .write \ + .mode('overwrite') \ + .parquet(output_url) + + +output_url = 'abfs://container_name@storage_account_url/data_dir' #use your own adls account info +generate_petastorm_dataset(output_url) +``` + +## Petastorm read APIs + +### Read dataset from a primary storage account + +The ```petastorm.reader.Reader``` class is the main entry point for user code that accesses the data from an ML framework such as Tensorflow or Pytorch. You can read a dataset using the ```petastorm.reader.Reader``` class and the ```petastorm.make_reader``` factory method. + +In the example below, you can see how you can pass an ```abfs``` URL protocol. + +```python +from petastorm import make_reader + +#on primary storage associated with the workspace, url can be abbreviated with container path for data directory +with make_reader('abfs:////') as reader: + for row in reader: + print(row) +``` + +### Read dataset from secondary storage account + +If you are using an alternative storage account, be sure to set up the [linked service](../../data-factory/concepts-linked-services.md) to automatically authenticate and read from the account. In addition, you will need to modify the following properties below: ```remote_url```, ```account_name```, and ```linked_service_name```. + +```python +from petastorm import make_reader + +# create sas token for storage account access, use your own adls account info +remote_url = "abfs://container_name@storage_account_url" +account_name = "<>" +linked_service_name = '<>' +TokenLibrary = spark._jvm.com.microsoft.azure.synapse.tokenlibrary.TokenLibrary +sas_token = TokenLibrary.getConnectionString(linked_service_name) + +with make_reader('{}/data_directory'.format(remote_url), storage_options = {'sas_token' : sas_token}) as reader: + for row in reader: + print(row) +``` + +### Read dataset in batches + +In the example below, you can see how you can pass an ```abfs``` URL protocol to read data in batches. This example uses the ```make_batch_reader``` class. + +```python +from petastorm import make_batch_reader + +with make_batch_reader('abfs:////', schema_fields=["value1", "value2"]) as reader: + for schema_view in reader: + print("Batched read:\nvalue1: {0} value2: {1}".format(schema_view.value1, schema_view.value2)) +``` + +## PyTorch API + +To read a Petastorm dataset from PyTorch, you can use the adapter ```petastorm.pytorch.DataLoader``` class. This allows for custom PyTorch collating functions and transforms to be supplied. + +In this example, we will show how Petastorm DataLoader can be used to load a Petastorm dataset with the help of make_reader API. This first section creates the definition of a ```Net``` class and ```train``` and ```test``` function. + +```python +from __future__ import division, print_function + +import argparse +import pyarrow +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import transforms + +from petastorm import make_reader, TransformSpec +from petastorm.pytorch import DataLoader +from pyspark.sql.functions import col + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +def train(model, device, train_loader, log_interval, optimizer, epoch): + model.train() + for batch_idx, row in enumerate(train_loader): + data, target = row['image'].to(device), row['digit'].to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % log_interval == 0: + print('Train Epoch: {} [{}]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), loss.item())) + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + count = 0 + with torch.no_grad(): + for row in test_loader: + data, target = row['image'].to(device), row['digit'].to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss + pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability + correct += pred.eq(target.view_as(pred)).sum().item() + count += data.shape[0] + test_loss /= count + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, count, 100. * correct / count)) + +def _transform_row(mnist_row): + # For this example, the images are stored as simpler ndarray (28,28), but the + # training network expects 3-dim images, hence the additional lambda transform. + transform = transforms.Compose([ + transforms.Lambda(lambda nd: nd.reshape(28, 28, 1)), + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ]) + # In addition, the petastorm pytorch DataLoader does not distinguish the notion of + # data or target transform, but that actually gives the user more flexibility + # to make the desired partial transform, as shown here. + result_row = { + 'image': transform(mnist_row['image']), + 'digit': mnist_row['digit'] + } + + return result_row +``` + +In this example, an Azure Data Lake Storage account is used to store intermediate data. To store this data, you must set up a Linked Service to the storage account and retrieve the following pieces of information: ```remote_url```, ```account_name```, and ```linked_service_name```. + +```python +from petastorm import make_reader + +# create sas token for storage account access, use your own adls account info +remote_url = "abfs://container_name@storage_account_url" +account_name = "" +linked_service_name = '' +TokenLibrary = spark._jvm.com.microsoft.azure.synapse.tokenlibrary.TokenLibrary +sas_token = TokenLibrary.getConnectionString(linked_service_name) + +# Read Petastorm dataset and apply custom PyTorch transformation functions + +device = torch.device('cpu') #For GPU, it will be torch.device('cuda'). More details: https://pytorch.org/docs/stable/tensor_attributes.html#torch-device + +model = Net().to(device) +optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + +loop_epochs = 1 +reader_epochs = 1 + +transform = TransformSpec(_transform_row, removed_fields=['idx']) + +for epoch in range(1, loop_epochs + 1): + with DataLoader(make_reader('{}/train'.format(remote_url), num_epochs=reader_epochs, transform_spec=transform),batch_size=5) as train_loader: + train(model, device, train_loader, 10, optimizer, epoch) + with DataLoader(make_reader('{}/test'.format(remote_url), num_epochs=reader_epochs, transform_spec=transform), batch_size=5) as test_loader: + test(model, device, test_loader) +``` + +## Next steps + +* [Check out Synapse sample notebooks](https://github.com/Azure-Samples/Synapse/tree/main/MachineLearning) +* [Learn more about GPU-enabled Apache Spark pools](../spark/apache-spark-gpu-concept.md) \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/index.yml b/articles/synapse-analytics/migration-guides/index.yml new file mode 100644 index 0000000000000..bfda95764d156 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/index.yml @@ -0,0 +1,89 @@ +### YamlMime:Landing + +title: Azure Synapse Analytics migration guides # < 60 chars +summary: Azure Synapse migration guides tell the story of bringing existing enterprise analytics solutions to the limitless analytics in Azure Synapse. # < 160 chars +metadata: + title: Azure Synapse Analytics Migration Guides # Required; page title displayed in search results. Include the brand. < 60 chars. + description: Azure Synapse migration guides tell the story of bringing existing enterprise analytics solutions to the limitless analytics in Azure Synapse. # Required; article description that is displayed in search results. < 160 chars. + ms.service: synapse-analytics #Required; service per approved list. service slug assigned to your service by ACOM. + ms.subservice: overview + ms.topic: landing-page # Required + ms.collection: collection + author: WilliamDAssafMSFT #Required; your GitHub user alias, with correct capitalization. + ms.author: wiassaf #Required; microsoft alias of author; optional team alias. + ms.date: 05/24/2022 #Required; mm/dd/yyyy format. + +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new + +landingContent: +# Cards and links should be based on top customer tasks or top subjects +# Start card title with a verb + # Card (optional) + - title: Overview + linkLists: + - linkListType: overview + links: + - text: What is Azure Synapse Analytics? + url: ../overview-what-is.md + - text: Get Started with Azure Synapse Analytics + url: ../get-started.md + - text: What is dedicated SQL pool (formerly SQL DW)? + url: ../sql-data-warehouse/sql-data-warehouse-overview-what-is.md + - text: Benefits of cloud migration + url: https://azure.microsoft.com/overview/cloud-migration-benefits-challenges + - text: Migrate a data warehouse to a dedicated SQL pool + url: migrate-to-synapse-analytics-guide.md + - text: Enable Synapse workspace features for a dedicated SQL pool (formerly SQL DW) + url: ../sql-data-warehouse/workspace-connected-create.md + - title: Migration resources + linkLists: + - linkListType: architecture + links: + - text: Azure Synapse Resources + url: ../index.yml + - text: Azure Synapse SQL architecture + url: ../sql/overview-architecture.md + - linkListType: reference + links: + - text: "Customer story: Co-op" + url: https://customers.microsoft.com/story/845578-co-op-retailers-azure + - linkListType: deploy + links: + - text: "Preferred migration accelerator: Next Pathway" + url: https://www.nextpathway.com/ + - title: From IBM Netezza + linkLists: + - linkListType: overview + links: + - text: 1. Design and performance for IBM Netezza migrations + url: netezza/1-design-performance-migration.md + - text: 2. ETL and load migration considerations + url: netezza/2-etl-load-migration-considerations.md + - text: 3. Security access operations + url: netezza/3-security-access-operations.md + - text: 4. Visualization and reporting + url: netezza/4-visualization-reporting.md + - text: 5. Minimize SQL issues + url: netezza/5-minimize-sql-issues.md + - text: 6. Microsoft and third-party migration tools + url: netezza/6-microsoft-third-party-migration-tools.md + - text: 7. Beyond migration implementation + url: netezza/7-beyond-data-warehouse-migration.md + - title: From Teradata + linkLists: + - linkListType: overview + links: + - text: 1. Design and performance for Teradata migrations + url: teradata/1-design-performance-migration.md + - text: 2. ETL and load migration considerations + url: teradata/2-etl-load-migration-considerations.md + - text: 3. Security access operations + url: teradata/3-security-access-operations.md + - text: 4. Visualization and reporting + url: teradata/4-visualization-reporting.md + - text: 5. Minimize SQL issues + url: teradata/5-minimize-sql-issues.md + - text: 6. Microsoft and third-party migration tools + url: teradata/6-microsoft-third-party-migration-tools.md + - text: 7. Beyond migration implementation + url: teradata/7-beyond-data-warehouse-migration.md \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/azure-synapse-ecosystem.png b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/azure-synapse-ecosystem.png new file mode 100644 index 0000000000000..1d8006150e423 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/azure-synapse-ecosystem.png differ diff --git a/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/migration-steps.png b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/migration-steps.png new file mode 100644 index 0000000000000..1314a31feec94 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/migration-steps.png differ diff --git a/articles/synapse-analytics/migration-guides/media/2-etl-load-migration-considerations/migration-options-flowchart.png b/articles/synapse-analytics/migration-guides/media/2-etl-load-migration-considerations/migration-options-flowchart.png new file mode 100644 index 0000000000000..d2757f735b3f0 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/2-etl-load-migration-considerations/migration-options-flowchart.png differ diff --git a/articles/synapse-analytics/migration-guides/media/3-security-access-operations/automating-migration-privileges.png b/articles/synapse-analytics/migration-guides/media/3-security-access-operations/automating-migration-privileges.png new file mode 100644 index 0000000000000..88bf2d7ff1c53 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/3-security-access-operations/automating-migration-privileges.png differ diff --git a/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/data-virtualization-semantics.png b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/data-virtualization-semantics.png new file mode 100644 index 0000000000000..c5db25678a17a Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/data-virtualization-semantics.png differ diff --git a/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/migration-data-virtualization.png b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/migration-data-virtualization.png new file mode 100644 index 0000000000000..a488cbde9745c Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/migration-data-virtualization.png differ diff --git a/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png new file mode 100644 index 0000000000000..4b6454590692e Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png differ diff --git a/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png new file mode 100644 index 0000000000000..a2ee2a1faccc0 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png differ diff --git a/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png new file mode 100644 index 0000000000000..dcc0309b25393 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png new file mode 100644 index 0000000000000..ee28b28424b38 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png new file mode 100644 index 0000000000000..737de76310305 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png new file mode 100644 index 0000000000000..da408160f6f4f Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png new file mode 100644 index 0000000000000..fa57bed387d61 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png new file mode 100644 index 0000000000000..9e4964526ad1b Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png new file mode 100644 index 0000000000000..1df60a79042ee Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png new file mode 100644 index 0000000000000..ec0bae316012a Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png differ diff --git a/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md b/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md index 7569031f8b37f..0cd2c0db6ca5a 100644 --- a/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md +++ b/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md @@ -8,8 +8,8 @@ ms.devlang: ms.topic: conceptual author: WilliamDAssafMSFT ms.author: wiassaf -ms.reviewer: sngun -ms.date: 03/10/2021 +ms.reviewer: +ms.date: 05/24/2022 --- # Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics @@ -26,7 +26,7 @@ Consider using Azure Synapse Analytics when you: - Need the ability to scale compute and storage. - Want to save on costs by pausing compute resources when you don't need them. -Rather than Azure Synapse Analytics, consider other options for operational (OLTP) workloads that have: +Rather than Azure Synapse Analytics, consider other options for operational online transaction processing (OLTP) workloads that have: - High frequency reads and writes. - Large numbers of singleton selects. @@ -54,6 +54,11 @@ Performing a successful migration requires you to migrate your table schemas, co The Customer Advisory Team has some great Azure Synapse Analytics (formerly Azure SQL Data Warehouse) guidance published as blog posts. For more information on migration, see [Migrating data to Azure SQL Data Warehouse in practice](/archive/blogs/sqlcat/migrating-data-to-azure-sql-data-warehouse-in-practice). +For more information specifically about migrations from Netezza or Teradata to Azure Synapse Analytics, start at the first step of a seven-article sequence on migrations: + +- [Netezza to Azure Synapse Analytics migrations](netezza/1-design-performance-migration.md) +- [Teradata to Azure Synapse Analytics migrations](teradata/1-design-performance-migration.md) + ## Migration assets from real-world engagements For more assistance with completing this migration scenario, see the following resources. They were developed in support of a real-world migration project engagement. @@ -64,8 +69,12 @@ For more assistance with completing this migration scenario, see the following r | [Handling data encoding issues while loading data to Azure Synapse Analytics](https://azure.microsoft.com/blog/handling-data-encoding-issues-while-loading-data-to-sql-data-warehouse/) | This blog post provides insight on some of the data encoding issues you might encounter while using PolyBase to load data to SQL Data Warehouse. This article also provides some options that you can use to overcome such issues and load the data successfully. | | [Getting table sizes in Azure Synapse Analytics dedicated SQL pool](https://github.com/Microsoft/DataMigrationTeam/blob/master/Whitepapers/Getting%20table%20sizes%20in%20SQL%20DW.pdf) | One of the key tasks that an architect must perform is to get metrics about a new environment post-migration. Examples include collecting load times from on-premises to the cloud and collecting PolyBase load times. One of the most important tasks is to determine the storage size in SQL Data Warehouse compared to the customer's current platform. | + The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. ## Videos Watch how [Walgreens migrated its retail inventory system](https://www.youtube.com/watch?v=86dhd8N1lH4) with about 100 TB of data from Netezza to Azure Synapse Analytics in record time. + +> [!TIP] +> For more information on Synapse migrations, see [Azure Synapse Analytics migration guides](index.yml). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/1-design-performance-migration.md b/articles/synapse-analytics/migration-guides/netezza/1-design-performance-migration.md new file mode 100644 index 0000000000000..d651dce4142a9 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/1-design-performance-migration.md @@ -0,0 +1,334 @@ +--- +title: "Design and performance for Netezza migrations" +description: Learn how Netezza and Azure Synapse SQL databases differ in their approach to high query performance on exceptionally large data volumes. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Design and performance for Netezza migrations + +This article is part one of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for design and performance. + +## Overview + +Due to end of support from IBM, many existing users of Netezza data warehouse systems want to take advantage of the innovations provided by newer environments such as cloud, IaaS, and PaaS, and to delegate tasks like infrastructure maintenance and platform development to the cloud provider. + +> [!TIP] +> More than just a database—the Azure environment includes a comprehensive set of capabilities and tools. + +Although Netezza and Azure Synapse Analytics are both SQL databases designed to use massively parallel processing (MPP) techniques to achieve high query performance on exceptionally large data volumes, there are some basic differences in approach: + +- Legacy Netezza systems are often installed on-premises and use proprietary hardware, while Azure Synapse is cloud-based and uses Azure storage and compute resources. + +- Upgrading a Netezza configuration is a major task involving additional physical hardware and potentially lengthy database reconfiguration, or dump and reload. Since storage and compute resources are separate in the Azure environment, these resources can be scaled upwards or downwards independently, leveraging the elastic scaling capability. + +- Azure Synapse can be paused or resized as required to reduce resource utilization and cost. + +Microsoft Azure is a globally available, highly secure, scalable cloud environment that includes Azure Synapse and an ecosystem of supporting tools and capabilities. The next diagram summarizes the Azure Synapse ecosystem. + +:::image type="content" source="../media/1-design-performance-migration/azure-synapse-ecosystem.png" border="true" alt-text="Chart showing the Azure Synapse ecosystem of supporting tools and capabilities."::: + +> [!TIP] +> Azure Synapse gives best-of-breed performance and price-performance in independent benchmarks. + +Azure Synapse provides best-of-breed relational database performance by using techniques such as massively parallel processing (MPP) and multiple levels of automated caching for frequently used data. See the results of this approach in independent benchmarks such as the one run recently by [GigaOm](https://research.gigaom.com/report/data-warehouse-cloud-benchmark/), which compares Azure Synapse to other popular cloud data warehouse offerings. Customers who have migrated to this environment have seen many benefits including: + +- Improved performance and price/performance. + +- Increased agility and shorter time to value. + +- Faster server deployment and application development. + +- Elastic scalability—only pay for actual usage. + +- Improved security/compliance. + +- Reduced storage and disaster recovery costs. + +- Lower overall TCO, better cost control, and streamlined operational expenditure (OPEX). + +To maximize these benefits, migrate new or existing data and applications to the Azure Synapse platform. In many organizations, this will include migrating an existing data warehouse from legacy on-premises platforms such as Netezza. At a high level, the basic process includes these steps: + +:::image type="content" source="../media/1-design-performance-migration/migration-steps.png" border="true" alt-text="Diagram showing the steps for preparing to migrate, migration, and post-migration."::: + +This paper looks at schema migration with a goal of equivalent or better performance of your migrated Netezza data warehouse and data marts on Azure Synapse. This paper applies specifically to migrations from an existing Netezza environment. + +## Design considerations + +### Migration scope + +> [!TIP] +> Create an inventory of objects to be migrated and document the migration process. + +#### Preparation for migration + +When migrating from a Netezza environment, there are some specific topics to consider in addition to the more general subjects described in this article. + +#### Choose the workload for the initial migration + +Legacy Netezza environments have typically evolved over time to encompass multiple subject areas and mixed workloads. When deciding where to start on an initial migration project, choose an area that can: + +- Prove the viability of migrating to Azure Synapse by quickly delivering the benefits of the new environment. + +- Allow the in-house technical staff to gain relevant experience of the processes and tools involved, which can be used in migrations to other areas. + +- Create a template for further migrations specific to the source Netezza environment and the current tools and processes that are already in place. + +A good candidate for an initial migration from the Netezza environment that would enable the preceding items is typically one that implements a BI/Analytics workload, rather than an online transaction processing (OLTP) workload, with a data model that can be migrated with minimal modification, normally a star or snowflake schema. + +The migration data volume for the initial exercise should be large enough to demonstrate the capabilities and benefits of the Azure Synapse environment while quickly demonstrating the value—typically in the 1-10 TB range. + +To minimize the risk and reduce implementation time for the initial migration project, confine the scope of the migration to just the data marts. However, this won't address the broader topics such as ETL migration and historical data migration as part of the initial migration project. Address these topics in later phases of the project, once the migrated data mart layer is backfilled with the data and processes required to build them. + +#### Lift and shift as-is versus a phased approach incorporating changes + +> [!TIP] +> "Lift and shift" is a good starting point, even if subsequent phases will implement changes to the data model. + +Whatever the drive and scope of the intended migration, there are—broadly speaking—two types of migration: + +##### Lift and shift + +In this case, the existing data model—such as a star schema—is migrated unchanged to the new Azure Synapse platform. The emphasis is on minimizing risk and the migration time required by reducing the work needed to realize the benefits of moving to the Azure cloud environment. + +This is a good fit for existing Netezza environments where a single data mart is being migrated, or where the data is already in a well-designed star or snowflake schema—or there are other pressures to move to a more modern cloud environment. + +##### Phased approach incorporating modifications + +In cases where a legacy warehouse has evolved over a long time, you might need to re-engineer to maintain the required performance levels or to support new data, such as Internet of Things (IoT) streams. Migrate to Azure Synapse to get the benefits of a scalable cloud environment as part of the re-engineering process. Migration could include a change in the underlying data model, such as a move from an Inmon model to a data vault. + +Microsoft recommends moving the existing data model as-is to Azure and using the performance and flexibility of the Azure environment to apply the re-engineering changes, leveraging Azure's capabilities to make the changes without impacting the existing source system. + +#### Use Azure Data Factory to implement a metadata-driven migration + +Automate and orchestrate the migration process by using the capabilities of the Azure environment. This approach minimizes the impact on the existing Netezza environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—to ingest data from disparate data stores. Data Factory can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage the migration process. + +### Design differences between Netezza and Azure Synapse + +#### Multiple databases versus a single database and schemas + +> [!TIP] +> Combine multiple databases into a single database in Azure Synapse and use schemas to logically separate the tables. + +In a Netezza environment, there are often multiple separate databases for individual parts of the overall environment. For example, there may be a separate database for data ingestion and staging tables, a database for the core warehouse tables, and another database for data marts, sometimes called a semantic layer. Processing these as ETL/ELT pipelines may implement cross-database joins and will move data between these separate databases. + +> [!TIP] +> Replace Netezza-specific features with Azure Synapse features. + +Querying within the Azure Synapse environment is limited to a single database. Schemas are used to separate the tables into logically separate groups. Therefore, we recommend using a series of schemas within the target Azure Synapse database to mimic any separate databases migrated from the Netezza environment. If the Netezza environment already uses schemas, you may need to use a new naming convention to move the existing Netezza tables and views to the new environment—for example, concatenate the existing Netezza schema and table names into the new Azure Synapse table name and use schema names in the new environment to maintain the original separate database names. Schema consolidation naming can have dots—however, Azure Synapse Spark may have issues. You can use SQL views over the underlying tables to maintain the logical structures, but there are some potential downsides to this approach: + +- Views in Azure Synapse are read-only, so any updates to the data must take place on the underlying base tables. + +- There may already be one or more layers of views in existence, and adding an extra layer of views might impact performance and supportability as nested views are difficult to troubleshoot. + +#### Table considerations + +> [!TIP] +> Use existing indexes to indicate candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and the metadata that describes it gets physically moved between the two environments. Other database elements from the source system—such as indexes—aren't migrated as these may not be needed or may be implemented differently within the new target environment. + +However, it's important to understand where performance optimizations such as indexes have been used in the source environment, as this can indicate where to add performance optimization in the new target environment. For example, if queries in the source Netezza environment frequently use zone maps, it may indicate that a non-clustered index should be created within the migrated Azure Synapse. Other native performance optimization techniques, such as table replication, may be more applicable than a straight "like-for-like" index creation. + +#### Unsupported Netezza database object types + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase + +Netezza implements some database objects that aren't directly supported in Azure Synapse, but there are methods to achieve the same functionality within the new environment: + +- Zone maps: in Netezza, zone maps are automatically created and maintained for some column types and are used at query time to restrict the amount of data to be scanned. Zone maps are created on the following column types: + - `INTEGER` columns of length 8 bytes or less. + - Temporal columns. For instance, `DATE`, `TIME`, and `TIMESTAMP`. + - `CHAR` columns, if these are part of a materialized view and mentioned in the `ORDER BY` clause. + + You can find out which columns have zone maps by using the `nz_zonemap` utility, which is part of the NZ Toolkit. Azure Synapse doesn't include zone maps, but you can achieve similar results by using other user-defined index types and/or partitioning. + +- Clustered base tables (CBT): in Netezza, CBTs are commonly used for fact tables, which can have billions of records. Scanning such a huge table requires a lot of processing time, since a full table scan might be needed to get relevant records. Organizing records on restrictive CBT allows Netezza to group records in same or nearby extents. This process also creates zone maps that improve the performance by reducing the amount of data to be scanned. + + In Azure Synapse, you can achieve a similar effect by use of partitioning and/or use of other indexes. + +- Materialized views: Netezza supports materialized views and recommends creating one or more of these over large tables having many columns where only a few of those columns are regularly used in queries. The system automatically maintains materialized views when data in the base table is updated. + + Azure Synapse supports materialized views, with the same functionality as Netezza. + +#### Netezza data type mapping + +Most Netezza data types have a direct equivalent in Azure Synapse. This table shows these data types together with the recommended approach for handling them. + +| Netezza Data Type | Azure Synapse Data Type | +|--------------------------------|-------------------------------------| +| BIGINT | BIGINT | +| BINARY VARYING(n) | VARBINARY(n) | +| BOOLEAN | BIT | +| BYTEINT | TINYINT | +| CHARACTER VARYING(n) | VARCHAR(n) | +| CHARACTER(n) | CHAR(n) | +| DATE | DATE(date) | +| DECIMAL(p,s) | DECIMAL(p,s) | +| DOUBLE PRECISION | FLOAT | +| FLOAT(n) | FLOAT(n) | +| INTEGER | INT | +| INTERVAL | INTERVAL data types aren't currently directly supported in Azure Synapse, but can be calculated using temporal functions such as DATEDIFF. | +| MONEY | MONEY | +| NATIONAL CHARACTER VARYING(n) | NVARCHAR(n) | +| NATIONAL CHARACTER(n) | NCHAR(n) | +| NUMERIC(p,s) | NUMERIC(p,s) | +| REAL | REAL | +| SMALLINT | SMALLINT | +| ST_GEOMETRY(n) | Spatial data types such as ST_GEOMETRY aren't currently supported in Azure Synapse, but the data could be stored as VARCHAR or VARBINARY. | +| TIME | TIME | +| TIME WITH TIME ZONE | DATETIMEOFFSET | +| TIMESTAMP | DATETIME | + +> [!TIP] +> Assess the number and type of non-data objects to be migrated as part of the preparation phase. + +There are third-party vendors who offer tools and services to automate migration, including the mapping of data types. If a third-party ETL tool such as Informatica or Talend is already in use in the Netezza environment, those tools can implement any required data transformations. + +#### SQL DML syntax differences + +There are a few differences in SQL Data Manipulation Language (DML) syntax between Netezza SQL and Azure Synapse (T-SQL) that you should be aware of during migration: + +- `STRPOS`: in Netezza, the `STRPOS` function returns the position of a substring within a string. The equivalent function in Azure Synapse is `CHARINDEX`, with the order of the arguments reversed. For example, `SELECT STRPOS('abcdef','def')...` in Netezza is equivalent to `SELECT CHARINDEX('def','abcdef')...` in Azure Synapse. + +- `AGE`: Netezza supports the `AGE` operator to give the interval between two temporal values, such as timestamps or dates. For example, `SELECT AGE('23-03-1956','01-01-2019') FROM...`. In Azure Synapse, `DATEDIFF` gives the interval. For example, `SELECT DATEDIFF(day, '1956-03-26','2019-01-01') FROM...`. Note the date representation sequence. + +- `NOW()`: Netezza uses `NOW()` to represent `CURRENT_TIMESTAMP` in Azure Synapse. + +#### Functions, stored procedures, and sequences + +> [!TIP] +> Assess the number and type of non-data objects to be migrated as part of the preparation phase. + +When migrating from a mature legacy data warehouse environment such as Netezza, you must often migrate elements other than simple tables and views to the new target environment. Examples include functions, stored procedures, and sequences. + +As part of the preparation phase, create an inventory of these objects to be migrated, and define the method of handling them. Assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as functions or stored procedures in the Netezza environment. In this case, it's more efficient to use the built-in Azure facilities rather than recoding the Netezza functions. + +[Data integration partners](../../partner/data-integration.md) offer tools and services that can automate the migration. + +##### Functions + +As with most database products, Netezza supports system functions and user-defined functions within an SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated if so. + +For system functions where there's no equivalent, or for arbitrary user-defined functions, recode these using the language(s) available in the target environment. Netezza user-defined functions are coded in nzlua or C++ languages while Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. + +##### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Netezza provides the NZPLSQL language for this purpose. NZPLSQL is based on Postgres PL/pgSQL. + +A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +Azure Synapse Analytics also supports stored procedures using T-SQL. If you must migrate stored procedures, recode these procedures for their new environment. + +##### Sequences + +In Netezza, a sequence is a named database object created via `CREATE SEQUENCE` that can provide the unique value via the `NEXT VALUE FOR` method. Use these to generate unique numbers for use as surrogate key values for primary key values. + +Within Azure Synapse, there's no `CREATE SEQUENCE`. Sequences are handled via use of [IDENTITY](/sql/t-sql/statements/create-table-transact-sql-identity-property?msclkid=8ab663accfd311ec87a587f5923eaa7b) columns or SQL code to create the next sequence number in a series. + +### Extract metadata and data from a Netezza environment + +#### Data Definition Language (DDL) generation + +> [!TIP] +> Use Netezza external tables for most efficient data extract. + +You can edit existing Netezza CREATE TABLE and CREATE VIEW scripts to create the equivalent definitions with modified data types, if necessary, as described in the previous section. Typically, this involves removing or modifying any extra Netezza-specific clauses such as `ORGANIZE ON`. + +However, all the information that specifies the current definitions of tables and views within the existing Netezza environment is maintained within system catalog tables. These tables are the best source of this information, as it's guaranteed to be up to date and complete. User-maintained documentation may not be in sync with the current table definitions. + +Access the information in these tables via utilities such as `nz_ddl_table` and generate the `CREATE TABLE` DDL statements for the equivalent tables in Azure Synapse. + +Third-party migration and ETL tools also use the catalog information to achieve the same result. + +#### Data extraction from Netezza + +Migrate the raw data from existing Netezza tables into flat delimited files using standard Netezza utilities, such as nzsql, nzunload, and via external tables. Compress these files using gzip and upload them to Azure Blob Storage via AzCopy or by using Azure data transport facilities such as Azure Data Box. + +During a migration exercise, extract the data as efficiently as possible. Use the external tables approach as this is the fastest method. Perform multiple extracts in parallel to maximize the throughput for data extraction. + +This is a simple example of an external table extract: + +```sql +CREATE EXTERNAL TABLE '/tmp/export_tab1.csv' USING (DELIM ',') AS SELECT * from ; +``` + +If sufficient network bandwidth is available, extract data directly from an on-premises Netezza system into Azure Synapse tables or Azure Blob Data Storage by using Azure Data Factory processes or third-party data migration or ETL products. + +Recommended data formats for the extracted data include delimited text files (also called Comma Separated Values or CSV), Optimized Row Columnar (ORC), or Parquet files. + +For more information about the process of migrating data and ETL from a Netezza environment, see [Data migration, ETL, and load for Netezza migrations](2-etl-load-migration-considerations.md). + +## Performance recommendations for Netezza migrations + +This article provides general information and guidelines about use of performance optimization techniques for Azure Synapse and adds specific recommendations for use when migrating from a Netezza environment. + +### Similarities in performance tuning approach concepts + +> [!TIP] +> Many Netezza tuning concepts hold true for Azure Synapse. + +When moving from a Netezza environment, many of the performance tuning concepts for Azure Data Warehouse will be remarkably familiar. For example: + +- Using data distribution to collocate data to be joined onto the same processing node. + +- Using the smallest data type for a given column will save storage space and accelerate query processing. + +- Ensuring data types of columns to be joined are identical will optimize join processing by reducing the need to transform data for matching. + +- Ensuring statistics are up to date will help the optimizer produce the best execution plan. + +### Differences in performance tuning approach + +> [!TIP] +> Prioritize early familiarity with Azure Synapse tuning options in a migration exercise. + +This section highlights lower-level implementation differences between Netezza and Azure Synapse for performance tuning. + +#### Data distribution options + +`CREATE TABLE` statements in both Netezza and Azure Synapse allow for specification of a distribution definition—via `DISTRIBUTE ON` in Netezza, and `DISTRIBUTION =` in Azure Synapse. + +Compared to Netezza, Azure Synapse provides an additional way to achieve local joins for small table-large table joins (typically dimension table to fact table in a star schema model), which is to replicate the smaller dimension table across all nodes. This ensures that any value of the join key of the larger table will have a matching dimension row locally available. The overhead of replicating the dimension tables is relatively low, provided the tables aren't very large (see [Design guidance for replicated tables](../../sql-data-warehouse/design-guidance-for-replicated-tables.md))—in which case, the hash distribution approach as described previously is more appropriate. For more information, see [Distributed tables design](../../sql-data-warehouse/sql-data-warehouse-tables-distribute.md). + +#### Data indexing + +Azure Synapse provides several user-definable indexing options, but these are different from the system-managed zone maps in Netezza. For more information about the different indexing options, see [table indexes](/azure/sql-data-warehouse/sql-data-warehouse-tables-index). + +The existing system-managed zone maps within the source Netezza environment can indicate how the data is currently used. They can identify candidate columns for indexing within the Azure Synapse environment. + +#### Data partitioning + +In an enterprise data warehouse, fact tables can contain many billions of rows. Partitioning optimizes the maintenance and querying of these tables by splitting them into separate parts to reduce the amount of data processed. The `CREATE TABLE` statement defines the partitioning specification for a table. + +Only one field per table can be used for partitioning. That field is frequently a date field since many queries are filtered by date or a date range. It's possible to change the partitioning of a table after initial load by recreating the table with the new distribution using the `CREATE TABLE AS` (or CTAS) statement. See [table partitions](/azure/sql-data-warehouse/sql-data-warehouse-tables-partition) for a detailed discussion of partitioning in Azure Synapse. + +#### Data table statistics + +Ensure that statistics on data tables are up to date by building in a [statistics](../../sql/develop-tables-statistics.md) step to ETL/ELT jobs. + +#### PolyBase for data loading + +PolyBase is the most efficient method for loading large amounts of data into the warehouse since it can leverage parallel loading streams. For more information, see [PolyBase data loading strategy](../../sql/load-data-overview.md). + +#### Use workload management + +Use [workload management](../../sql-data-warehouse/sql-data-warehouse-workload-management.md?context=%2fazure%2fsynapse-analytics%2fcontext%2fcontext) instead of resource classes. ETL would be in its own workgroup and should be configured to have more resources per query (less concurrency by more resources). For more information, see [What is dedicated SQL pool in Azure Synapse Analytics](../../sql-data-warehouse/sql-data-warehouse-overview-what-is.md). + +## Next steps + +To learn more about ETL and load for Netezza migration, see the next article in this series: [Data migration, ETL, and load for Netezza migrations](2-etl-load-migration-considerations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/2-etl-load-migration-considerations.md b/articles/synapse-analytics/migration-guides/netezza/2-etl-load-migration-considerations.md new file mode 100644 index 0000000000000..d22343bb2c81c --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/2-etl-load-migration-considerations.md @@ -0,0 +1,313 @@ +--- +title: "Data migration, ETL, and load for Netezza migrations" +description: Learn how to plan your data migration from Netezza to Azure Synapse Analytics to minimize the risk and impact on users. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Data migration, ETL, and load for Netezza migrations + +This article is part two of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for ETL and load migration. + +## Data migration considerations + +### Initial decisions for data migration from Netezza + +When migrating a Netezza data warehouse, you need to ask some basic data-related questions. For example: + +- Should unused table structures be migrated? + +- What's the best migration approach to minimize risk and user impact? + +- When migrating data marts: stay physical or go virtual? + +The next sections discuss these points within the context of migration from Netezza. + +#### Migrate unused tables? + +> [!TIP] +> In legacy systems, it's not unusual for tables to become redundant over time—these don't need to be migrated in most cases. + +It makes sense to only migrate tables that are in use in the existing system. Tables that aren't active can be archived rather than migrated, so that the data is available if necessary in future. It's best to use system metadata and log files rather than documentation to determine which tables are in use, because documentation can be out of date. + +If enabled, Netezza query history tables contain information that can determine when a given table was last accessed—which can in turn be used to decide whether a table is a candidate for migration. + +Here's an example query that looks for the usage of a specific table within a given time window: + +```sql +SELECT FORMAT_TABLE_ACCESS (usage), + hq.submittime +FROM "$v_hist_queries" hq + INNER JOIN "$hist_table_access_3" hta USING +(NPSID, NPSINSTANCEID, OPID, SESSIONID) +WHERE hq.dbname = 'PROD' +AND hta.schemaname = 'ADMIN' +AND hta.tablename = 'TEST_1' +AND hq.SUBMITTIME > '01-01-2015' +AND hq.SUBMITTIME <= '08-06-2015' +AND +( + instr(FORMAT_TABLE_ACCESS(usage),'ins') > 0 + OR instr(FORMAT_TABLE_ACCESS(usage),'upd') > 0 + OR instr(FORMAT_TABLE_ACCESS(usage),'del') > 0 +) +AND status=0; +``` + +```output +| FORMAT_TABLE_ACCESS | SUBMITTIME +----------------------+--------------------------- +ins | 2015-06-16 18:32:25.728042 +ins | 2015-06-16 17:46:14.337105 +ins | 2015-06-16 17:47:14.430995 +(3 rows) +``` + +This query uses the helper function `FORMAT_TABLE_ACCESS` and the digit at the end of the `$v_hist_table_access_3` view to match the installed query history version. + +#### What is the best migration approach to minimize risk and impact on users? + +> [!TIP] +> Migrate the existing model as-is initially, even if a change to the data model is planned in the future. + +This question comes up often since companies often want to lower the impact of changes on the data warehouse data model to improve agility. Companies see an opportunity to do so during a migration to modernize their data model. This approach carries a higher risk because it could impact ETL jobs populating the data warehouse from a data warehouse to feed dependent data marts. Because of that risk, it's usually better to redesign on this scale after the data warehouse migration. + +Even if a data model change is an intended part of the overall migration, it's good practice to migrate the existing model as-is to the new environment (Azure Synapse Analytics in this case), rather than do any re-engineering on the new platform during migration. This approach has the advantage of minimizing the impact on existing production systems, while also leveraging the performance and elastic scalability of the Azure platform for one-off re-engineering tasks. + +When migrating from Netezza, often the existing data model is already suitable for as-is migration to Azure Synapse. + +#### Migrate data marts: stay physical or go virtual? + +> [!TIP] +> Virtualizing data marts can save on storage and processing resources. + +In legacy Netezza data warehouse environments, it's common practice to create several data marts that are structured to provide good performance for ad hoc self-service queries and reports for a given department or business function within an organization. As such, a data mart typically consists of a subset of the data warehouse and contains aggregated versions of the data in a form that enables users to easily query that data with fast response times via user-friendly query tools such as Microsoft Power BI, Tableau, or MicroStrategy. This form is typically a dimensional data model. One use of data marts is to expose the data in a usable form, even if the underlying warehouse data model is something different, such as a data vault. + +You can use separate data marts for individual business units within an organization to implement robust data security regimes, by only allowing users to access specific data marts that are relevant to them, and eliminating, obfuscating, or anonymizing sensitive data. + +If these data marts are implemented as physical tables, they'll require additional storage resources to store them, and additional processing to build and refresh them regularly. Also, the data in the mart will only be as up to date as the last refresh operation, and so may be unsuitable for highly volatile data dashboards. + +> [!TIP] +> The performance and scalability of Azure Synapse enables virtualization without sacrificing performance. + +With the advent of relatively low-cost scalable MPP architectures, such as Azure Synapse, and the inherent performance characteristics of such architectures, it may be that you can provide data mart functionality without having to instantiate the mart as a set of physical tables. This is achieved by effectively virtualizing the data marts via SQL views onto the main data warehouse, or via a virtualization layer using features such as views in Azure or the [visualization products of Microsoft partners](../../partner/data-integration.md). This approach simplifies or eliminates the need for additional storage and aggregation processing and reduces the overall number of database objects to be migrated. + +There's another potential benefit to this approach. By implementing the aggregation and join logic within a virtualization layer, and presenting external reporting tools via a virtualized view, the processing required to create these views is "pushed down" into the data warehouse, which is generally the best place to run joins, aggregations, and other related operations on large data volumes. + +The primary drivers for choosing a virtual data mart implementation over a physical data mart are: + +- More agility: a virtual data mart is easier to change than physical tables and the associated ETL processes. + +- Lower total cost of ownership: a virtualized implementation requires fewer data stores and copies of data. + +- Elimination of ETL jobs to migrate and simplify data warehouse architecture in a virtualized environment. + +- Performance: although physical data marts have historically been more performant, virtualization products now implement intelligent caching techniques to mitigate. + +### Data migration from Netezza + +#### Understand your data + +Part of migration planning is understanding in detail the volume of data that needs to be migrated since that can impact decisions about the migration approach. Use system metadata to determine the physical space taken up by the "raw data" within the tables to be migrated. In this context, "raw data" means the amount of space used by the data rows within a table, excluding overheads such as indexes and compression. This is especially true for the largest fact tables since these will typically comprise more than 95% of the data. + +You can get an accurate number for the volume of data to be migrated for a given table by extracting a representative sample of the data—for example, one million rows—to an uncompressed delimited flat ASCII data file. Then, use the size of that file to get an average raw data size per row of that table. Finally, multiply that average size by the total number of rows in the full table to give a raw data size for the table. Use that raw data size in your planning. + +#### Netezza data type mapping + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase. + +Most Netezza data types have a direct equivalent in Azure Synapse. The following table shows these data types, together with the recommended approach for mapping them. + + +| Netezza data type | Azure Synapse data type | +|-----------------------------------|----------------------------------| +| BIGINT | BIGINT | +| BINARY VARYING(n) | VARBINARY(n) | +| BOOLEAN | BIT | +| BYTEINT | TINYINT | +| CHARACTER VARYING(n) | VARCHAR(n) | +| CHARACTER(n) | CHAR(n) | +| DATE | DATE(date) | +| DECIMAL(p,s) | DECIMAL(p,s) | +| DOUBLE PRECISION | FLOAT | +| FLOAT(n) | FLOAT(n) | +| INTEGER | INT | +| INTERVAL | INTERVAL data types aren't currently directly supported in Azure Synapse Analytics, but can be calculated using temporal functions, such as DATEDIFF. | +| MONEY | MONEY | +| NATIONAL CHARACTER VARYING(n) | NVARCHAR(n) | +| NATIONAL CHARACTER(n) | NCHAR(n) | +| NUMERIC(p,s) | NUMERIC(p,s) | +| REAL | REAL | +| SMALLINT | SMALLINT | +| ST_GEOMETRY(n) | Spatial data types such as ST_GEOMETRY aren't currently supported in Azure Synapse Analytics, but the data could be stored as VARCHAR or VARBINARY. | +| TIME | TIME | +| TIME WITH TIME ZONE | DATETIMEOFFSET | +| TIMESTAMP | DATETIME | + +Use the metadata from the Netezza catalog tables to determine whether any of these data types need to be migrated, and allow for this in your migration plan. The important metadata views in Netezza for this type of query are: + +- `_V_USER`: the user view gives information about the users in the Netezza system. + +- `_V_TABLE`: the table view contains the list of tables created in the Netezza performance system. + +- `_V_RELATION_COLUMN`: the relation column system catalog view contains the columns available in a table. + +- `_V_OBJECTS`: the objects view lists the different objects like tables, view, functions, and so on, that are available in Netezza. + +For example, this Netezza SQL query shows columns and column types: + +```sql +SELECT +tablename, + attname AS COL_NAME, + b.FORMAT_TYPE AS COL_TYPE, + attnum AS COL_NUM +FROM _v_table a + JOIN _v_relation_column b + ON a.objid = b.objid +WHERE a.tablename = 'ATT_TEST' +AND a.schema = 'ADMIN' +ORDER BY attnum; +``` + +```output +TABLENAME | COL_NAME | COL_TYPE | COL_NUM +----------+-------------+----------------------+-------- +ATT_TEST | COL_INT | INTEGER | 1 +ATT_TEST | COL_NUMERIC | NUMERIC(10,2) | 2 +ATT_TEST | COL_VARCHAR | CHARACTER VARYING(5) | 3 +ATT_TEST | COL_DATE | DATE | 4 +(4 rows) +``` + +The query can be modified to search all tables for any occurrences of unsupported data types. + +Azure Data Factory can be used to move data from a legacy Netezza environment. For more information, see [IBM Netezza connector](../../../data-factory/connector-netezza.md). + +[Third-party vendors](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration) offer tools and services to automate migration, including the mapping of data types as previously described. Also, third-party ETL tools, like Informatica or Talend, already in use in the Netezza environment can implement all required data transformations. The next section explores the migration of existing third-party ETL processes. + +## ETL migration considerations + +### Initial decisions regarding Netezza ETL migration + +> [!TIP] +> Plan the approach to ETL migration ahead of time and leverage Azure facilities where appropriate. + +For ETL/ELT processing, legacy Netezza data warehouses may use custom-built scripts using Netezza utilities such as nzsql and nzload, or third-party ETL tools such as Informatica or Ab Initio. Sometimes, Netezza data warehouses use a combination of ETL and ELT approaches that's evolved over time. When planning a migration to Azure Synapse, you need to determine the best way to implement the required ETL/ELT processing in the new environment, while minimizing the cost and risk involved. To learn more about ETL and ELT processing, see [ELT vs ETL design approach](../../sql-data-warehouse/design-elt-data-loading.md). + +The following sections discuss migration options and make recommendations for various use cases. This flowchart summarizes one approach: + +:::image type="content" source="../media/2-etl-load-migration-considerations/migration-options-flowchart.png" border="true" alt-text="Flowchart of migration options and recommendations."::: + +The first step is always to build an inventory of ETL/ELT processes that need to be migrated. As with other steps, it's possible that the standard "built-in" Azure features make it unnecessary to migrate some existing processes. For planning purposes, it's important to understand the scale of the migration to be performed. + +In the preceding flowchart, decision 1 relates to a high-level decision about whether to migrate to a totally Azure-native environment. If you're moving to a totally Azure-native environment, we recommend that you re-engineer the ETL processing using [Pipelines and activities in Azure Data Factory](../../../data-factory/concepts-pipelines-activities.md?msclkid=b6ea2be4cfda11ec929ac33e6e00db98&tabs=data-factory) or [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c). If you're not moving to a totally Azure-native environment, then decision 2 is whether an existing third-party ETL tool is already in use. + +> [!TIP] +> Leverage investment in existing third-party tools to reduce cost and risk. + +If a third-party ETL tool is already in use, and especially if there's a large investment in skills or several existing workflows and schedules use that tool, then decision 3 is whether the tool can efficiently support Azure Synapse as a target environment. Ideally, the tool will include "native" connectors that can leverage Azure facilities like PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for the most efficient data loading. There's a way to call an external process, such as PolyBase or `COPY INTO`, and pass in the appropriate parameters. In this case, leverage existing skills and workflows, with Azure Synapse as the new target environment. + +If you decide to retain an existing third-party ETL tool, there may be benefits to running that tool within the Azure environment (rather than on an existing on-premises ETL server) and having Azure Data Factory handle the overall orchestration of the existing workflows. One particular benefit is that less data needs to be downloaded from Azure, processed, and then uploaded back into Azure. So, decision 4 is whether to leave the existing tool running as-is or move it into the Azure environment to achieve cost, performance, and scalability benefits. + +### Re-engineer existing Netezza-specific scripts + +If some or all the existing Netezza warehouse ETL/ELT processing is handled by custom scripts that utilize Netezza-specific utilities, such as nzsql or nzload, then these scripts need to be recoded for the new Azure Synapse environment. Similarly, if ETL processes were implemented using stored procedures in Netezza, then these will also have to be recoded. + +> [!TIP] +> The inventory of ETL tasks to be migrated should include scripts and stored procedures. + +Some elements of the ETL process are easy to migrate, for example by simple bulk data load into a staging table from an external file. It may even be possible to automate those parts of the process, for example, by using PolyBase instead of nzload. Other parts of the process that contain arbitrary complex SQL and/or stored procedures will take more time to re-engineer. + +One way of testing Netezza SQL for compatibility with Azure Synapse is to capture some representative SQL statements from Netezza query history, then prefix those queries with `EXPLAIN`, and then—assuming a like-for-like migrated data model in Azure Synapse—run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will generate an error, and the error information can determine the scale of the recoding task. + +[Microsoft partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration) offer tools and services to migrate Netezza SQL and stored procedures to Azure Synapse. + +### Use third-party ETL tools + +As described in the previous section, in many cases the existing legacy data warehouse system will already be populated and maintained by third-party ETL products. For a list of Microsoft data integration partners for Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +## Data loading from Netezza + +### Choices available when loading data from Netezza + +> [!TIP] +> Third-party tools can simplify and automate the migration process and therefore reduce risk. + +When it comes to migrating data from a Netezza data warehouse, there are some basic questions associated with data loading that need to be resolved. You'll need to decide how the data will be physically moved from the existing on-premises Netezza environment into Azure Synapse in the cloud, and which tools will be used to perform the transfer and load. Consider the following questions, which are discussed in the next sections. + +- Will you extract the data to files, or move it directly via a network connection? + +- Will you orchestrate the process from the source system, or from the Azure target environment? + +- Which tools will you use to automate and manage the process? + +#### Transfer data via files or network connection? + +> [!TIP] +> Understand the data volumes to be migrated and the available network bandwidth since these factors influence the migration approach decision. + +Once the database tables to be migrated have been created in Azure Synapse, you can move the data to populate those tables out of the legacy Netezza system and into the new environment. There are two basic approaches: + +- **File extract**: extract the data from the Netezza tables to flat files, normally in CSV format, via nzsql with the -o option or via the `CREATE EXTERNAL TABLE` statement. Use an external table whenever possible since it's the most efficient in terms of data throughput. The following SQL example creates a CSV file via an external table: + + ```sql + CREATE EXTERNAL TABLE '/data/export.csv' USING (delimiter ',') + AS SELECT col1, col2, expr1, expr2, col3, col1 || col2 FROM your table; + ``` + + Use an external table if you're exporting data to a mounted file system on a local Netezza host. If you're exporting data to a remote machine that has JDBC, ODBC, or OLEDB installed, then your "remotesource odbc" option is the `USING` clause. + + This approach requires space to land the extracted data files. The space could be local to the Netezza source database (if sufficient storage is available), or remote in Azure Blob Storage. The best performance is achieved when a file is written locally, since that avoids network overhead. + + To minimize the storage and network transfer requirements, it's good practice to compress the extracted data files using a utility like gzip. + + Once extracted, the flat files can either be moved into Azure Blob Storage (collocated with the target Azure Synapse instance), or loaded directly into Azure Synapse using PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql). The method for physically moving data from local on-premises storage to the Azure cloud environment depends on the amount of data and the available network bandwidth. + + Microsoft provides various options to move large volumes of data, including AzCopy for moving files across the network into Azure Storage, Azure ExpressRoute for moving bulk data over a private network connection, and Azure Data Box for files moving to a physical storage device that's then shipped to an Azure data center for loading. For more information, see [data transfer](/azure/architecture/data-guide/scenarios/data-transfer). + +- **Direct extract and load across network**: the target Azure environment sends a data extract request, normally via a SQL command, to the legacy Netezza system to extract the data. The results are sent across the network and loaded directly into Azure Synapse, with no need to land the data into intermediate files. The limiting factor in this scenario is normally the bandwidth of the network connection between the Netezza database and the Azure environment. For very large data volumes, this approach may not be practical. + +There's also a hybrid approach that uses both methods. For example, you can use the direct network extract approach for smaller dimension tables and samples of the larger fact tables to quickly provide a test environment in Azure Synapse. For large volume historical fact tables, you can use the file extract and transfer approach using Azure Data Box. + +#### Orchestrate from Netezza or Azure? + +The recommended approach when moving to Azure Synapse is to orchestrate the data extract and loading from the Azure environment using [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), as well as associated utilities, such as PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for the most efficient data loading. This approach leverages Azure capabilities and provides an easy method to build reusable data loading pipelines. + +Other benefits of this approach include reduced impact on the Netezza system during the data load process since the management and loading process is running in Azure, and the ability to automate the process by using metadata-driven data load pipelines. + +#### Which tools can be used? + +The task of data transformation and movement is the basic function of all ETL products. If one of these products is already in use in the existing Netezza environment, then using the existing ETL tool may simplify data migration from Netezza to Azure Synapse. This approach assumes that the ETL tool supports Azure Synapse as a target environment. For more information on tools that support Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +If you're using an ETL tool, consider running that tool within the Azure environment to benefit from Azure cloud performance, scalability, and cost, and free up resources in the Netezza data center. Another benefit is reduced data movement between the cloud and on-premises environments. + +## Summary + +To summarize, our recommendations for migrating data and associated ETL processes from Netezza to Azure Synapse are: + +- Plan ahead to ensure a successful migration exercise. + +- Build a detailed inventory of data and processes to be migrated as soon as possible. + +- Use system metadata and log files to get an accurate understanding of data and process usage. Don't rely on documentation since it may be out of date. + +- Understand the data volumes to be migrated, and the network bandwidth between the on-premises data center and Azure cloud environments. + +- Leverage standard "built-in" Azure features to minimize the migration workload. + +- Identify and understand the most efficient tools for data extraction and loading in both Netezza and Azure environments. Use the appropriate tools in each phase of the process. + +- Use Azure facilities, such as [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), to orchestrate and automate the migration process while minimizing impact on the Netezza system. + +## Next steps + +To learn more about security access operations, see the next article in this series: [Security, access, and operations for Netezza migrations](3-security-access-operations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/3-security-access-operations.md b/articles/synapse-analytics/migration-guides/netezza/3-security-access-operations.md new file mode 100644 index 0000000000000..7ca7ed4093858 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/3-security-access-operations.md @@ -0,0 +1,317 @@ +--- +title: "Security, access, and operations for Netezza migrations" +description: Learn about authentication, users, roles, permissions, monitoring, and auditing, and workload management in Azure Synapse Analytics and Netezza. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Security, access, and operations for Netezza migrations + +This article is part three of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for security access operations. + +## Security considerations + +This article discusses connection methods for existing legacy Netezza environments and how they can be migrated to Azure Synapse Analytics with minimal risk and user impact. + +This article assumes that there's a requirement to migrate the existing methods of connection and user/role/permission structure as-is. If not, use the Azure portal to create and manage a new security regime. + +For more information on the [Azure Synapse security](../../sql-data-warehouse/sql-data-warehouse-overview-manage-security.md#authorization) options, see [Security whitepaper](../../guidance/security-white-paper-introduction.md). + +### Connection and authentication + +> [!TIP] +> Authentication in both Netezza and Azure Synapse can be "in database" or through external methods. + +#### Netezza authorization options + +The IBM Netezza system offers several authentication methods for Netezza database users: + +- **Local authentication**: Netezza administrators define database users and their passwords by using the `CREATE USER` command or through Netezza administrative interfaces. In local authentication, use the Netezza system to manage database accounts and passwords, and to add and remove database users from the system. This method is the default authentication method. + +- **LDAP authentication**: use an LDAP name server to authenticate database users, and manage passwords, database account activations, and deactivations. The Netezza system uses a Pluggable Authentication Module (PAM) to authenticate users on the LDAP name server. Microsoft Active Directory conforms to the LDAP protocol, so it can be treated like an LDAP server for the purposes of LDAP authentication. + +- **Kerberos authentication**: use a Kerberos distribution server to authenticate database users, and manage passwords, database account activations, and deactivations. + +Authentication is a system-wide setting. Users must be either locally authenticated or authenticated by using the LDAP or Kerberos method. If you choose LDAP or Kerberos authentication, create users with local authentication on a per-user basis. LDAP and Kerberos can't be used at the same time to authenticate users. Netezza host supports LDAP or Kerberos authentication for database user logins only, not for operating system logins on the host. + +#### Azure Synapse authorization options + +Azure Synapse supports two basic options for connection and authorization: + +- **SQL authentication**: SQL authentication is via a database connection that includes a database identifier, user ID, and password plus other optional parameters. This is functionally equivalent to Netezza local connections. + +- **Azure Active Directory (Azure AD) authentication**: with Azure AD authentication, you can centrally manage the identities of database users and other Microsoft services in one central location. Central ID management provides a single place to manage Azure Synapse users and simplifies permission management. Azure AD can also support connections to LDAP and Kerberos services—for example, Azure AD can be used to connect to existing LDAP directories if these are to remain in place after migration of the database. + +### Users, roles, and permissions + +#### Overview + +> [!TIP] +> High-level planning is essential for a successful migration project. + +Both Netezza and Azure Synapse implement database access control via a combination of users, roles (groups in Netezza), and permissions. Both use standard SQL `CREATE USER` and `CREATE ROLE/GROUP` statements to define users and roles, and `GRANT` and `REVOKE` statements to assign or remove permissions to those users and/or roles. + +> [!TIP] +> Automation of migration processes is recommended to reduce elapsed time and scope for errors. + +Conceptually the two databases are similar, and it might be possible to automate the migration of existing user IDs, groups, and permissions to some degree. Migrate such data by extracting the existing legacy user and group information from the Netezza system catalog tables and generating matching equivalent `CREATE USER` and `CREATE ROLE` statements to be run in Azure Synapse to recreate the same user/role hierarchy. + +After data extraction, use Netezza system catalog tables to generate equivalent `GRANT` statements to assign permissions (where an equivalent one exists). The following diagram shows how to use existing metadata to generate the necessary SQL. + +:::image type="content" source="../media/3-security-access-operations/automating-migration-privileges.png" border="true" alt-text="Chart showing how to automate the migration of privileges from an existing system."::: + +See the following sections for more details. + +#### Users and roles + +> [!TIP] +> Migration of a data warehouse requires more than just tables, views, and SQL statements. + +The information about current users and groups in a Netezza system is held in system catalog views `_v_users` and `_v_groupusers`. Use the nzsql utility or tools such as the Netezza Performance, NzAdmin, or Netezza Utility scripts to list user privileges. For example, use the `dpu` and `dpgu` commands in nzsql to display users or groups with their permissions. + +Use or edit the utility scripts `nz_get_users` and `nz_get_user_groups` to retrieve the same information in the required format. + +Query system catalog views directly (if the user has `SELECT` access to those views) to obtain current lists of users and roles defined within the system. See examples to list users, groups, or users and their associated groups: + +```sql +-- List of users +SELECT USERNAME FROM _V_USER; + +--List of groups +SELECT DISTINCT(GROUPNAME) FROM _V_USERGROUPS; + +--List of users and their associated groups +SELECT USERNAME, GROUPNAME FROM _V_GROUPUSERS; +``` + +Modify the example `SELECT` statement to produce a result set that is a series of `CREATE USER` and `CREATE GROUP` statements by including the appropriate text as a literal within the `SELECT` statement. + +There's no way to retrieve existing passwords, so you need to implement a scheme for allocating new initial passwords on Azure Synapse. + +#### Permissions + +> [!TIP] +> There are equivalent Azure Synapse permissions for basic database operations such as DML and DDL. + +In a Netezza system, the system table `_t_usrobj_priv` holds the access rights for users and roles. Query these tables (if the user has `SELECT` access to those tables) to obtain current lists of access rights defined within the system. + +In Netezza, the individual permissions are represented as individual bits within field privileges or g_privileges. See example SQL statement at [user group permissions](http://nz2nz.blogspot.com/2016/03/netezza-user-group-permissions-view_3.html) + +The simplest way to obtain a DDL script that contains the `GRANT` commands to replicate the current privileges for users and groups is to use the appropriate Netezza utility scripts: + +```sql +--List of group privileges +nz_ddl_grant_group -usrobj dbname > output_file_dbname; + +--List of user privileges +nz_ddl_grant_user -usrobj dbname > output_file_dbname; +``` + +The output file can be modified to produce a script that is a series of `GRANT` statements for Azure Synapse. + +Netezza supports two classes of access rights, Admin and Object. See the following tables for a list of Netezza access rights and their equivalent in Azure Synapse. + +| Admin Privilege | Description | Azure Synapse Equivalent | +|----------------------------|-------------|-----------------| +| Backup | Allows user to create backups. The user can run backups. The user can run the command `nzbackup`. | 1 | +| [Create] Aggregate | Allows the user to create user-defined aggregates (UDAs). Permission to operate on existing UDAs is controlled by object privileges. | CREATE FUNCTION 3 | +| [Create] Database | Allows the user to create databases. Permission to operate on existing databases is controlled by object privileges. | CREATE DATABASE | +| [Create] External Table | Allows the user to create external tables. Permission to operate on existing tables is controlled by object privileges. | CREATE TABLE | +| [Create] Function | Allows the user to create user-defined functions (UDFs). Permission to operate on existing UDFs is controlled by object privileges. | CREATE FUNCTION | +| [Create] Group | Allows the user to create groups. Permission to operate on existing groups is controlled by object privileges. | CREATE ROLE | +| [Create] Index | For system use only. Users can't create indexes. | CREATE INDEX | +| [Create] Library | Allows the user to create shared libraries. Permission to operate on existing shared libraries is controlled by object privileges. | 1 | +| [Create] Materialized View | Allows the user to create materialized views. | CREATE VIEW | +| [Create] Procedure | Allows the user to create stored procedures. Permission to operate on existing stored procedures is controlled by object privileges. | CREATE PROCEDURE | +| [Create] Schema | Allows the user to create schemas. Permission to operate on existing schemas is controlled by object privileges. | CREATE SCHEMA | +| [Create] Sequence | Allows the user to create database sequences. | 1 | +| [Create] Synonym | Allows the user to create synonyms. | CREATE SYNONYM | +| [Create] Table | Allows the user to create tables. Permission to operate on existing tables is controlled by object privileges. | CREATE TABLE | +| [Create] Temp Table | Allows the user to create temporary tables. Permission to operate on existing tables is controlled by object privileges. | CREATE TABLE | +| [Create] User | Allows the user to create users. Permission to operate on existing users is controlled by object privileges. | CREATE USER | +| [Create] View | Allows the user to create views. Permission to operate on existing views is controlled by object privileges. | CREATE VIEW | +| [Manage Hardware | Allows the user to do the following hardware-related operations: view hardware status, manage SPUs, manage topology and mirroring, and run diagnostic tests. The user can run these commands: nzhw and nzds. | 4 | +| [Manage Security | Allows the user to run commands and operations that relate to the following advanced security options such as: managing and configuring history databases, managing multi-level security objects, and specifying security for users and groups, managing database key stores and keys and key stores for the digital signing of audit data. | 4 | +| [Manage System | Allows the user to do the following management operations: start/stop/pause/resume the system, abort sessions, view the distribution map, system statistics, and logs. The user can use these commands: nzsystem, nzstate, nzstats, and nzsession. | 4 | +| Restore | Allows the user to restore the system. The user can run the nzrestore command. | 2 | +| Unfence | Allows the user to create or alter a user-defined function or aggregate to run in unfenced mode. | 1 | + +| Object Privilege Abort | Description | Azure Synapse Equivalent | +|----------------------------|-------------|-----------------| +| Abort | Allows the user to abort sessions. Applies to groups and users. | KILL DATABASE CONNECTION | +| Alter | Allows the user to modify object attributes. Applies to all objects. | ALTER | +| Delete | Allows the user to delete table rows. Applies only to tables. | DELETE | +| Drop | Allows the user to drop objects. Applies to all object types. | DROP | +| Execute | Allows the user to run user-defined functions, user-defined aggregates, or stored procedures. | EXECUTE | +| GenStats | Allows the user to generate statistics on tables or databases. The user can run GENERATE STATISTICS command. | 2 | +| Groom | Allows the user to reclaim disk space for deleted or outdated rows, and reorganize a table by the organizing keys, or to migrate data for tables that have multiple stored versions. | 2 | +| Insert | Allows the user to insert rows into a table. Applies only to tables. | INSERT | +| List | Allows the user to display an object name, either in a list or in another manner. Applies to all objects. | LIST | +| Select | Allows the user to select (or query) rows within a table. Applies to tables and views. | SELECT | +| Truncate | Allows the user to delete all rows from a table. Applies only to tables. | TRUNCATE | +| Update | Allows the user to modify table rows. Applies to tables only. | UPDATE | + +Table notes: + +1. There's no direct equivalent to this function in Azure Synapse. + +1. These Netezza functions are handled automatically in Azure Synapse. + +1. The Azure Synapse `CREATE FUNCTION` feature incorporates Netezza aggregate functionality. + +1. These features are managed automatically by the system or via the Azure portal in Azure Synapse. See the next section on Operational considerations. + +Refer to [Azure Synapse Analytics security permissions](../../guidance/security-white-paper-introduction.md). + +## Operational considerations + +> [!TIP] +> Operational tasks are necessary to keep any data warehouse operating efficiently. + +This section discusses how to implement typical Netezza operational tasks in Azure Synapse with minimal risk and impact to users. + +As with all data warehouse products, once in production there are ongoing management tasks that are necessary to keep the system running efficiently and to provide data for monitoring and auditing. Resource utilization and capacity planning for future growth also falls into this category, as does backup/restore of data. + +Netezza administration tasks typically fall into two categories: + +- System administration, which is managing the hardware, configuration settings, system status, access, disk space, usage, upgrades, and other tasks. + +- Database administration, which is managing user databases and their content, loading data, backing up data, restoring data, and controlling access to data and permissions. + +IBM Netezza offers several ways or interfaces that you can use to perform the various system and database management tasks: + +- Netezza commands (`nz*` commands) are installed in the `/nz/kit/bin` directory on the Netezza host. For many of the `nz*` commands, you must be able to sign into the Netezza system to access and run those commands. In most cases, users sign in as the default `nz` user account, but you can create other Linux user accounts on your system. Some commands require you to specify a database user account, password, and database to ensure that you have permission to do the task. + +- The Netezza CLI client kits package a subset of the `nz*` commands that can be run from Windows and UNIX client systems. The client commands might also require you to specify a database user account, password, and database to ensure that you have database administrative and object permissions to perform the task. + +- The SQL commands support administration tasks and queries within a SQL database session. You can run the SQL commands from the Netezza nzsql command interpreter or through SQL APIs such as ODBC, JDBC, and the OLE DB Provider. You must have a database user account to run the SQL commands with appropriate permissions for the queries and tasks that you perform. + +- The NzAdmin tool is a Netezza interface that runs on Windows client workstations to manage Netezza systems. + +While conceptually the management and operations tasks for different data warehouses are similar, the individual implementations may differ. In general, modern cloud-based products such as Azure Synapse tend to incorporate a more automated and "system managed" approach (as opposed to a more "manual" approach in legacy data warehouses such as Netezza). + +The following sections compare Netezza and Azure Synapse options for various operational tasks. + +### Housekeeping tasks + +> [!TIP] +> Housekeeping tasks keep a production warehouse operating efficiently and optimize use of resources such as storage. + +In most legacy data warehouse environments, regular "housekeeping" tasks are time-consuming. Reclaim disk storage space by removing old versions of updated or deleted rows or reorganizing data, log files, or index blocks for efficiency (`GROOM` and `VACUUM` in Netezza). Collecting statistics is also a potentially time-consuming task, required after a bulk data ingest to provide the query optimizer with up-to-date data on which to base query execution plans. + +Netezza recommends collecting statistics as follows: + +- Collect statistics on unpopulated tables to set up the interval histogram used in internal processing. This initial collection makes subsequent statistics collections faster. Make sure to recollect statistics after data is added. + +- Collect prototype phase statistics for newly populated tables. + +- Collect production phase statistics after a significant percentage of change to the table or partition (~10% of rows). For high volumes of nonunique values, such as dates or timestamps, it may be advantageous to recollect at 7%. + +- Collect production phase statistics after you've created users and applied real world query loads to the database (up to about three months of querying). + +- Collect statistics in the first few weeks after an upgrade or migration during periods of low CPU utilization. + +Netezza database contains many log tables in the data dictionary that accumulate data, either automatically or after certain features are enabled. Because log data grows over time, purge older information to avoid using up permanent space. There are options to automate the maintenance of these logs available. + +> [!TIP] +> Automate and monitor housekeeping tasks in Azure. + +Azure Synapse has an option to automatically create statistics so that they can be used as needed. Perform defragmentation of indexes and data blocks manually, on a scheduled basis, or automatically. Leveraging native built-in Azure capabilities can reduce the effort required in a migration exercise. + +### Monitoring and auditing + +> [!TIP] +> Netezza Performance Portal is the recommended method of monitoring and logging for Netezza systems. + +Netezza provides the Netezza Performance Portal to monitor various aspects of one or more Netezza systems including activity, performance, queuing, and resource utilization. Netezza Performance Portal is an interactive GUI that allows users to drill down into low-level details for any chart. + +> [!TIP] +> The Azure portal provides a UI to manage monitoring and auditing tasks for all Azure data and processes. + +Similarly, Azure Synapse provides a rich monitoring experience within the Azure portal to provide insights into your data warehouse workload. The Azure portal is the recommended tool when monitoring your data warehouse as it provides configurable retention periods, alerts, recommendations, and customizable charts and dashboards for metrics and logs. + +The portal also enables integration with other Azure monitoring services such as Operations Management Suite (OMS) and Azure Monitor (logs) to provide a holistic monitoring experience for not only the data warehouse but also the entire Azure analytics platform for an integrated monitoring experience. + +> [!TIP] +> Low-level and system-wide metrics are automatically logged in Azure Synapse. + +Resource utilization statistics for Azure Synapse are automatically logged within the system. The metrics for each query include usage statistics for CPU, memory, cache, I/O, and temporary workspace, as well as connectivity information like failed connection attempts. + +Azure Synapse provides a set of [Dynamic Management Views](../../sql-data-warehouse/sql-data-warehouse-manage-monitor.md?msclkid=3e6eefbccfe211ec82d019ada29b1834) (DMVs). These views are useful when actively troubleshooting and identifying performance bottlenecks with your workload. + +For more information, see [Azure Synapse operations and management options](/azure/sql-data-warehouse/sql-data-warehouse-how-to-manage-and-monitor-workload-importance). + +### High Availability (HA) and Disaster Recovery (DR) + +Netezza appliances are redundant, fault-tolerant systems, and there are diverse options in a Netezza system to enable high availability and disaster recovery. + +Adding IBM Netezza Replication Services for disaster recovery improves fault tolerance by extending redundancy across local and wide area networks. + +IBM Netezza Replication Services protects against data loss by synchronizing data on a primary system (the primary node) with data on one or more target nodes (subordinates). These nodes make up a replication set. + +High-Availability Linux (also called *Linux-HA*) provides the failover capabilities from a primary or active Netezza host to a secondary or standby Netezza host. The main cluster management daemon in the Linux-HA solution is called *Heartbeat*. Heartbeat watches the hosts and manages the communication and status checks of services. + +Each service is a resource. + +Netezza groups the Netezza-specific services into the nps resource group. When Heartbeat detects problems that imply a host failure condition or loss of service to the Netezza users, Heartbeat can initiate a failover to the standby host. For details about Linux-HA and its terms and operations, see the documentation at [http://www.linux-ha.org](http://www.linux-ha.org/). + +Distributed Replicated Block Device (DRBD) is a block device driver that mirrors the content of block devices (hard disks, partitions, and logical volumes) between the hosts. Netezza uses the DRBD replication only on the **/nz** and **/export/home** partitions. As new data is written to the **/nz** partition and the **/export/home** partition on the primary host, the DRBD software automatically makes the same changes to the **/nz** and **/export/home** partition of the standby host. + +> [!TIP] +> Azure Synapse creates snapshots automatically to ensure fast recovery times. + +Azure Synapse uses database snapshots to provide high availability of the warehouse. A data warehouse snapshot creates a restore point that can be used to recover or copy a data warehouse to a previous state. Since Azure Synapse is a distributed system, a data warehouse snapshot consists of many files that are in Azure Storage. Snapshots capture incremental changes from the data stored in your data warehouse. + +> [!TIP] +> Use user-defined snapshots to define a recovery point before key updates. + +> [!TIP] +> Microsoft Azure provides automatic backups to a separate geographical location to enable DR. + +Azure Synapse automatically takes snapshots throughout the day, creating restore points that are available for seven days. You can't change this retention period. Azure Synapse supports an eight-hour recovery point objective (RPO). A data warehouse can be restored in the primary region from any one of the snapshots taken in the past seven days. + +User-defined restore points are also supported, allowing manual triggering of snapshots to create restore points of a data warehouse before and after large modifications. This capability ensures that restore points are logically consistent, which provides additional data protection in case of any workload interruptions or user errors for a desired RPO less than 8 hours. + +As well as the snapshots described previously, Azure Synapse also performs as standard a geo-backup once per day to a [paired data center](/azure/best-practices-availability-paired-regions). The RPO for a geo-restore is 24 hours. You can restore the geo-backup to a server in any other region where Azure Synapse is supported. A geo-backup ensures that a data warehouse can be restored in case the restore points in the primary region aren't available. + +| Technique | Description | +|-----------|-------------| +| **Scheduler rules** | Scheduler rules influence the scheduling of plans. Each scheduler rule specifies a condition or set of conditions. Each time the scheduler receives a plan, it evaluates all modifying scheduler rules and carries out the appropriate actions. Each time the scheduler selects a plan for execution, it evaluates all limiting scheduler rules. The plan is executed only if doing so wouldn't exceed a limit imposed by a limiting scheduler rule. Otherwise, the plan waits. This provides you with a way to classify and manipulate plans in a way that influences the other WLM techniques (SQB, GRA, and PQE). | +| **Guaranteed resource allocation (GRA)** | You can assign a minimum share and a maximum percentage of total system resources to entities called *resource groups*. The scheduler ensures that each resource group receives system resources in proportion to its minimum share. A resource group receives a larger share of resources when other resource groups are idle, but never receives more than its configured maximum percentage. Each plan is associated with a resource group, and the settings of that resource group settings determine what fraction of available system resources are to be made available to process the plan. | +| **Short query bias (SQB)** | Resources (that is, scheduling slots, memory, and preferential queuing) are reserved for short queries. A short query is a query for which the cost estimate is less than a specified maximum value (the default is two seconds). With SQB, short queries can run even when the system is busy processing other, longer queries. | +| **Prioritized query execution (PQE)** | Based on settings that you configure, the system assigns a priority—critical, high, normal, or low—to each query. The priority depends on factors such as the user, group, or session associated with the query. The system can then use the priority as a basis for allocating resources. | + +### Workload management + +> [!TIP] +> In a production data warehouse, there are typically mixed workloads with different resource usage characteristics running concurrently. + +Netezza incorporates various features for managing workloads: + +In Azure Synapse, resource classes are pre-determined resource limits that govern compute resources and concurrency for query execution. Resource classes can help you manage your workload by setting limits on the number of queries that run concurrently and on the compute resources assigned to each query. There's a trade-off between memory and concurrency. + +See [Resource classes for workload management](/azure/sql-data-warehouse/resource-classes-for-workload-management) for detailed information. + +This information can also be used for capacity planning, determining the resources required for additional users or application workload. This also applies to planning scale up/scale downs of compute resources for cost-effective support of "spiky" workloads, such as workloads with temporary, intense bursts of activity surrounded by periods of infrequent activity. + +### Scale compute resources + +> [!TIP] +> A major benefit of Azure is the ability to independently scale up and down compute resources on demand to handle peaky workloads cost-effectively. + +The architecture of Azure Synapse separates storage and compute, allowing each to scale independently. As a result, [compute resources can be scaled](../../sql-data-warehouse/quickstart-scale-compute-portal.md) to meet performance demands independent of data storage. You can also pause and resume compute resources. A natural benefit of this architecture is that billing for compute and storage is separate. If a data warehouse isn't in use, you can save on compute costs by pausing compute. + +Compute resources can be scaled up or scaled back by adjusting the data warehouse units setting for the data warehouse. Loading and query performance will increase linearly as you add more data warehouse units. + +Adding more compute nodes adds more compute power and ability to leverage more parallel processing. As the number of compute nodes increases, the number of distributions per compute node decreases, providing more compute power and parallel processing for queries. Similarly, decreasing data warehouse units reduces the number of compute nodes, which reduces the compute resources for queries. + +## Next steps + +To learn more about visualization and reporting, see the next article in this series: [Visualization and reporting for Netezza migrations](4-visualization-reporting.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/4-visualization-reporting.md b/articles/synapse-analytics/migration-guides/netezza/4-visualization-reporting.md new file mode 100644 index 0000000000000..64b4b1f823012 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/4-visualization-reporting.md @@ -0,0 +1,318 @@ +--- +title: "Visualization and reporting for Netezza migrations" +description: Learn about Microsoft and third-party BI tools for reports and visualizations in Azure Synapse Analytics compared to Netezza. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Visualization and reporting for Netezza migrations + +This article is part four of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for visualization and reporting. + +## Access Azure Synapse Analytics using Microsoft and third-party BI tools + +Almost every organization accesses data warehouses and data marts using a range of BI tools and applications, such as: + +- Microsoft BI tools, like Power BI. + +- Office applications, like Microsoft Excel spreadsheets. + +- Third-party BI tools from various vendors. + +- Custom analytic applications that have embedded BI tool functionality inside the application. + +- Operational applications that request BI on demand, invoke queries and reports as-a-service on a BI platform, which in turn queries data in the data warehouse or data marts that are being migrated. + +- Interactive data science development tools, such as Azure Synapse Spark Notebooks, Azure Machine Learning, RStudio, and Jupyter Notebooks. + +The migration of visualization and reporting as part of a data warehouse migration program means that all the existing queries, reports, and dashboards generated and issued by these tools and applications need to run on Azure Synapse and yield the same results as they did in the original data warehouse prior to migration. + +> [!TIP] +> Existing users, user groups, roles and assignments of access security privileges need to be migrated first for migration of reports and visualizations to succeed. + +To make that happen, everything that BI tools and applications depend on still needs to work once you migrate your data warehouse schema and data to Azure Synapse. That includes the obvious and the not so obvious—such as access and security. Access and security are important considerations for data access in the migrated system, and are specifically discussed in [another guide](3-security-access-operations.md) in this series. When you address access and security, ensure that: + +- Authentication is migrated to let users sign in to the data warehouse and data mart databases on Azure Synapse. + +- All users are migrated to Azure Synapse. + +- All user groups are migrated to Azure Synapse. + +- All roles are migrated to Azure Synapse. + +- All authorization privileges governing access control are migrated to Azure Synapse. + +- User, role, and privilege assignments are migrated to mirror what you had on your existing data warehouse before migration. For example: + - Database object privileges assigned to roles + - Roles assigned to user groups + - Users assigned to user groups and/or roles + +> [!TIP] +> Communication and business user involvement is critical to success. + +In addition, all the required data needs to be migrated to ensure the same results appear in the same reports and dashboards that now query data on Azure Synapse. User expectation will undoubtedly be that migration is seamless and there will be no surprises that destroy their confidence in the migrated system on Azure Synapse. So, this is an area where you must take extreme care and communicate as much as possible to allay any fears in your user base. Their expectations are that: + +- Table structure will be the same if directly referred to in queries. + +- Table and column names remain the same if directly referred to in queries; for instance, so that calculated fields defined on columns in BI tools don't fail when aggregate reports are produced. + +- Historical analysis remains the same. + +- Data types should, if possible, remain the same. + +- Query behavior remains the same. + +- ODBC/JDBC drivers are tested to make sure nothing has changed in terms of query behavior. + +> [!TIP] +> Views and SQL queries using proprietary SQL query extensions are likely to result in incompatibilities that impact BI reports and dashboards. + +If BI tools are querying views in the underlying data warehouse or data mart database, then will these views still work? You might think yes, but if there are proprietary SQL extensions specific to your legacy data warehouse DBMS in these views that have no equivalent in Azure Synapse, you'll need to know about them and find a way to resolve them. + +Other issues like the behavior of nulls or data type variations across DBMS platforms need to be tested, in case they cause slightly different calculation results. Obviously, you want to minimize these issues and take all necessary steps to shield business users from any kind of impact. Depending on your legacy data warehouse system (such as Netezza), there are [tools](../../partner/data-integration.md) that can help hide these differences so that BI tools and applications are kept unaware of them and can run unchanged. + +> [!TIP] +> Use repeatable tests to ensure reports, dashboards, and other visualizations migrate successfully. + +Testing is critical to visualization and report migration. You need a test suite and agreed-on test data to run and rerun tests in both environments. A test harness is also useful, and a few are mentioned later in this guide. In addition, it's also important to have significant business involvement in this area of migration to keep confidence high and to keep them engaged and part of the project. + +Finally, you may also be thinking about switching BI tools. For example, you might want to [migrate to Power BI](/power-bi/guidance/powerbi-migration-overview). The temptation is to do all of this at the same time, while migrating your schema, data, ETL processing, and more. However, to minimize risk, it's better to migrate to Azure Synapse first and get everything working before undertaking further modernization. + +If your existing BI tools run on premises, ensure that they're able to connect to Azure Synapse through your firewall to run comparisons against both environments. Alternatively, if the vendor of your existing BI tools offers their product on Azure, you can try it there. The same applies for applications running on premises that embed BI or that call your BI server on-demand, requesting a "headless report" with data returned in XML or JSON, for example. + +There's a lot to think about here, so let's look at all this in more detail. + +> [!TIP] +> A lift and shift data warehouse migration is likely to minimize any disruption to reports, dashboards, and other visualizations. + +## Minimize the impact of data warehouse migration on BI tools and reports by using data virtualization + +> [!TIP] +> Data virtualization allows you to shield business users from structural changes during migration so that they remain unaware of changes. + +The temptation during data warehouse migration to the cloud is to take the opportunity to make changes during the migration to fulfill long-term requirements, such as opening business requests, missing data, new features, and more. However, these changes can affect the BI tools accessing your data warehouse, especially if it involves structural changes in your data model. If you want to adopt an agile data modeling technique or implement structural changes, do so *after* migration. + +One way in which you can minimize the impact of things like schema changes on BI tools is to introduce data virtualization between BI tools and your data warehouse and data marts. The following diagram shows how data virtualization can hide the migration from users. + +:::image type="content" source="../media/4-visualization-reporting/migration-data-virtualization.png" border="true" alt-text="Diagram showing how to hide the migration from users through data virtualization."::: + +This breaks the dependency between business users utilizing self-service BI tools and the physical schema of the underlying data warehouse and data marts that are being migrated. + +> [!TIP] +> Schema alterations to tune your data model for Azure Synapse can be hidden from users. + +By introducing data virtualization, any schema alterations made during data warehouse and data mart migration to Azure Synapse (to optimize performance, for example) can be hidden from business users because they only access virtual tables in the data virtualization layer. If structural changes are needed, only the mappings between the data warehouse or data marts, and any virtual tables would need to be changed so that users remain unaware of those changes and unaware of the migration. [Microsoft partners](../../partner/data-integration.md) provide useful data virtualization software. + +## Identify high priority reports to migrate first + +A key question when migrating your existing reports and dashboards to Azure Synapse is which ones to migrate first. Several factors can drive the decision. For example: + +- Business value + +- Usage + +- Ease of migration + +- Data migration strategy + +These factors are discussed in more detail later in this article. + +Whatever the decision is, it must involve the business, since they produce the reports and dashboards, and consume the insights these artifacts provide in support of the decisions that are made around your business. That said, if most reports and dashboards can be migrated seamlessly, with minimal effort, and offer up like-for-like results, simply by pointing your BI tool(s) at Azure Synapse, instead of your legacy data warehouse system, then everyone benefits. + +### Migrate reports based on usage + +Usage is interesting, since it's an indicator of business value. Reports and dashboards that are never used clearly aren't contributing to supporting any decisions and don't currently offer any value. So, do you have any mechanism for finding out which reports and dashboards are currently not used? Several BI tools provide statistics on usage, which would be an obvious place to start. + +If your legacy data warehouse has been up and running for many years, there's a high chance you could have hundreds, if not thousands, of reports in existence. In these situations, usage is an important indicator of the business value of a specific report or dashboard. In that sense, it's worth compiling an inventory of the reports and dashboards you have and defining their business purpose and usage statistics. + +For those that aren't used at all, it's an appropriate time to seek a business decision, to determine if it's necessary to decommission those reports to optimize your migration efforts. A key question worth asking when deciding to decommission unused reports is: are they unused because people don't know they exist, or is it because they offer no business value, or have they been superseded by others? + +### Migrate reports based on business value + +Usage on its own isn't a clear indicator of business value. There needs to be a deeper business context to determine the value to the business. In an ideal world, we would like to know the contribution of the insights produced in a report to the bottom line of the business. That's exceedingly difficult to determine, since every decision made, and its dependency on the insights in a specific report, would need to be recorded along with the contribution that each decision makes to the bottom line of the business. You would also need to do this over time. + +This level of detail is unlikely to be available in most organizations. One way in which you can get deeper on business value to drive migration order is to look at alignment with business strategy. A business strategy set by your executive typically lays out strategic business objectives, key performance indicators (KPIs), KPI targets that need to be achieved, and who is accountable for achieving them. In that sense, classifying your reports and dashboards by strategic business objectives—for example, reduce fraud, improve customer engagement, and optimize business operations—will help understand business purpose and show what objective(s), specific reports, and dashboards these are contributing to. Reports and dashboards associated with high priority objectives in the business strategy can then be highlighted so that migration is focused on delivering business value in a strategic high priority area. + +It's also worthwhile to classify reports and dashboards as operational, tactical, or strategic, to understand the level in the business where they're used. Delivering strategic business objectives requires contribution at all these levels. Knowing which reports and dashboards are used, at what level, and what objectives they're associated with helps to focus migration on high priority business value that will drive the company forward. Business contribution of reports and dashboards is needed to understand this, perhaps like what is shown in the following **business strategy objective** table. + +| **Level** | **Report / dashboard name** | **Business purpose** | **Department used** | **Usage frequency** | **Business priority** | +|-|-|-|-|-|-| +| **Strategic** | | | | | | +| **Tactical** | | | | | | +| **Operational** | | | | | | + +While this may seem too time consuming, you need a mechanism to understand the contribution of reports and dashboards to business value, whether you're migrating or not. Catalogs like Azure Data Catalog are becoming very important because they give you the ability to catalog reports and dashboards, automatically capture the metadata associated with them, and let business users tag and rate them to help you understand business value. + +### Migrate reports based on data migration strategy + +> [!TIP] +> Data migration strategy could also dictate which reports and visualizations get migrated first. + +If your migration strategy is based on migrating data marts first, the order of data mart migration will have a bearing on which reports and dashboards can be migrated first to run on Azure Synapse. Again, this is likely to be a business-value-related decision. Prioritizing which data marts are migrated first reflects business priorities. Metadata discovery tools can help you here by showing you which reports rely on data in which data mart tables. + +## Migration incompatibility issues that can impact reports and visualizations + +When it comes to migrating to Azure Synapse, there are several things that can impact the ease of migration for reports, dashboards, and other visualizations. The ease of migration is affected by: + +- Incompatibilities that occur during schema migration between your legacy data warehouse and Azure Synapse. + +- Incompatibilities in SQL between your legacy data warehouse and Azure Synapse. + +### The impact of schema incompatibilities + +> [!TIP] +> Schema incompatibilities include legacy warehouse DBMS table types and data types that are unsupported on Azure Synapse. + +BI tool reports and dashboards, and other visualizations, are produced by issuing SQL queries that access physical tables and/or views in your data warehouse or data mart. When it comes to migrating your data warehouse or data mart schema to Azure Synapse, there may be incompatibilities that can impact reports and dashboards, such as: + +- Non-standard table types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse. + +- Data types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse. + +In many cases, where there are incompatibilities, there may be ways around them. For example, the data in unsupported table types can be migrated into a standard table with appropriate data types and indexed or partitioned on a date/time column. Similarly, it may be possible to represent unsupported data types in another type of column and perform calculations in Azure Synapse to achieve the same. Either way, it will need refactoring. + +> [!TIP] +> Querying the system catalog of your legacy warehouse DBMS is a quick and straightforward way to identify schema incompatibilities with Azure Synapse. + +To identify reports and visualizations impacted by schema incompatibilities, run queries against the system catalog of your legacy data warehouse to identify tables with unsupported data types. Then use metadata from your BI tool or tools to identify reports that access these structures, to see what could be impacted. Obviously, this will depend on the legacy data warehouse DBMS you're migrating from. Find details of how to identify these incompatibilities in [Design and performance for Netezza migrations](1-design-performance-migration.md). + +The impact may be less than you think, because many BI tools don't support such data types. As a result, views may already exist in your legacy data warehouse that `CAST` unsupported data types to more generic types. + +### The impact of SQL incompatibilities and differences + +Additionally, any report, dashboard, or other visualization in an application or tool that makes use of proprietary SQL extensions associated with your legacy data warehouse DBMS is likely to be impacted when migrating to Azure Synapse. This could happen because the BI tool or application: + +- Accesses legacy data warehouse DBMS views that include proprietary SQL functions that have no equivalent in Azure Synapse. + +- Issues SQL queries, which include proprietary SQL functions peculiar to the SQL dialect of your legacy data warehouse DBMS, that have no equivalent in Azure Synapse. + +### Gauge the impact of SQL incompatibilities on your reporting portfolio + +You can't rely on documentation associated with reports, dashboards, and other visualizations to gauge how big of an impact SQL incompatibility may have on the portfolio of embedded query services, reports, dashboards, and other visualizations you're intending to migrate to Azure Synapse. There must be a more precise way of doing that. + +#### Use EXPLAIN statements to find SQL incompatibilities + +> [!TIP] +> Gauge the impact of SQL incompatibilities by harvesting your DBMS log files and running `EXPLAIN` statements. + +One way is to view the recent SQL activity of your legacy Netezza data warehouse. Query the `_v_qryhist` system table to view recent history data and determine a representative set of SQL statements into a file. For more information, see [Query history table](https://www.ibm.com/docs/en/psfa/7.2.1?topic=tables-query-history-table). Then, prefix each SQL statement with an `EXPLAIN` statement, and then run all the `EXPLAIN` statements in Azure Synapse. Any SQL statements containing proprietary SQL extensions from your legacy data warehouse that are unsupported will be rejected by Azure Synapse when the `EXPLAIN` statements are executed. This approach would at least give you an idea of how significant or otherwise the use of incompatible SQL is. + +Metadata from your legacy data warehouse DBMS will also help you when it comes to views. Again, you can capture and view SQL statements, and `EXPLAIN` them as described previously to identify incompatible SQL in views. + +## Test report and dashboard migration to Azure Synapse Analytics + +> [!TIP] +> Test performance and tune to minimize compute costs. + +A key element in data warehouse migration is the testing of reports and dashboards against Azure Synapse to verify that the migration has worked. To do this, you need to define a series of tests and a set of required outcomes for each test that needs to be run to verify success. It's important to ensure that reports and dashboards are tested and compared across your existing and migrated data warehouse systems to: + +- Identify whether schema changes made during migration, such as data types to be converted, have impacted reports in terms of ability to run results and corresponding visualizations. + +- Verify all users are migrated. + +- Verify all roles are migrated and users assigned to those roles. + +- Verify all data access security privileges are migrated to ensure access control list (ACL) migration. + +- Ensure consistent results of all known queries, reports, and dashboards. + +- Ensure that data and ETL migration is complete and error-free. + +- Ensure data privacy is upheld. + +- Test performance and scalability. + +- Test analytical functionality. + +For information about how to migrate users, user groups, roles, and privileges, see [Security, access, and operations for Netezza migrations](3-security-access-operations.md), which is part of this series. + +> [!TIP] +> Build an automated test suite to make tests repeatable. + +It's also best practice to automate testing as much as possible, to make each test repeatable and to allow a consistent approach to evaluating results. This works well for known regular reports, and could be managed via [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) orchestration. If you already have a suite of test queries in place for regression testing, you could use the testing tools to automate the post migration testing. + +> [!TIP] +> Leverage tools that can compare metadata lineage to verify results. + +Ad-hoc analysis and reporting are more challenging and require a set of tests to be compiled to verify that results are consistent across your legacy data warehouse DBMS and Azure Synapse. If reports and dashboards are inconsistent, then having the ability to compare metadata lineage across original and migrated systems is extremely valuable during migration testing, as it can highlight differences and pinpoint where they occurred when these aren't easy to detect. This is discussed in more detail later in this article. + +In terms of security, the best way to do this is to create roles, assign access privileges to roles, and then attach users to roles. To access your newly migrated data warehouse, set up an automated process to create new users, and to do role assignment. To detach users from roles, you can follow the same steps. + +It's also important to communicate the cutover to all users, so they know what's changing and what to expect. + +## Analyze lineage to understand dependencies between reports, dashboards, and data + +> [!TIP] +> Having access to metadata and data lineage from reports all the way back to data source is critical for verifying that migrated reports are working correctly. + +A critical success factor in migrating reports and dashboards is understanding lineage. Lineage is metadata that shows the journey that data has taken, so you can see the path from the report/dashboard all the way back to where the data originates. It shows how data has gone from point to point, its location in the data warehouse and/or data mart, and where it's used—for example, in what reports. It helps you understand what happens to data as it travels through different data stores—files and database—different ETL pipelines, and into reports. If business users have access to data lineage, it improves trust, breeds confidence, and enables more informed business decisions. + +> [!TIP] +> Tools that automate metadata collection and show end-to-end lineage in a multi-vendor environment are valuable when it comes to migration. + +In multi-vendor data warehouse environments, business analysts in BI teams may map out data lineage. For example, if you have Informatica for your ETL, Oracle for your data warehouse, and Tableau for reporting, each of which have their own metadata repository, figuring out where a specific data element in a report came from can be challenging and time consuming. + +To migrate seamlessly from a legacy data warehouse to Azure Synapse, end-to-end data lineage helps prove like-for-like migration when comparing reports and dashboards against your legacy environment. That means that metadata from several tools needs to be captured and integrated to show the end-to-end journey. Having access to tools that support automated metadata discovery and data lineage will let you see duplicate reports and ETL processes and reports that rely on data sources that are obsolete, questionable, or even non-existent. With this information, you can reduce the number of reports and ETL processes that you migrate. + +You can also compare end-to-end lineage of a report in Azure Synapse against the end-to-end lineage for the same report in your legacy data warehouse environment, to see if there are any differences that have occurred inadvertently during migration. This helps enormously with testing and verifying migration success. + +Data lineage visualization not only reduces time, effort, and error in the migration process, but also enables faster execution of the migration project. + +By leveraging automated metadata discovery and data lineage tools that can compare lineage, you can verify if a report is produced using data migrated to Azure Synapse and if it's produced in the same way as in your legacy environment. This kind of capability also helps you determine: + +- What data needs to be migrated to ensure successful report and dashboard execution on Azure Synapse. + +- What transformations have been and should be performed to ensure successful execution on Azure Synapse. + +- How to reduce report duplication. + +This substantially simplifies the data migration process, because the business will have a better idea of the data assets it has and what needs to be migrated to enable a solid reporting environment on Azure Synapse. + +> [!TIP] +> Azure Data Factory and several third-party ETL tools support lineage. + +Several ETL tools provide end-to-end lineage capability, and you may be able to make use of this via your existing ETL tool if you're continuing to use it with Azure Synapse. [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) lets you view lineage in mapping flows. Also, [Microsoft partners](../../partner/data-integration.md) provide automated metadata discovery, data lineage, and lineage comparison tools. + +## Migrate BI tool semantic layers to Azure Synapse Analytics + +> [!TIP] +> Some BI tools have semantic layers that simplify business user access to physical data structures in your data warehouse or data mart, like SAP Business Objects and IBM Cognos. + +Some BI tools have what is known as a semantic metadata layer. The role of this metadata layer is to simplify business user access to physical data structures in an underlying data warehouse or data mart database. It does this by providing high-level objects like dimensions, measures, hierarchies, calculated metrics, and joins. These objects use business terms familiar to business analysts and are mapped to the physical data structures in the data warehouse or data mart database. + +When it comes to data warehouse migration, changes to column names or table names may be forced upon you. For example, in Oracle, table names can have a "#". In Azure Synapse, the "#" is only allowed as a prefix to a table name to indicate a temporary table. Therefore, you may need to change a table name if migrating from Oracle. You may need to do rework to change mappings in such cases. + +A good way to get everything consistent across multiple BI tools is to create a universal semantic layer, using common data names for high-level objects like dimensions, measures, hierarchies, and joins, in a data virtualization server (as shown in the next diagram) that sits between applications, BI tools, and Azure Synapse. This allows you to set up everything once (instead of in every tool), including calculated fields, joins and mappings, and then point all BI tools at the data virtualization server. + +> [!TIP] +> Use data virtualization to create a common semantic layer to guarantee consistency across all BI tools in an Azure Synapse environment. + +In this way, you get consistency across all BI tools, while at the same time breaking the dependency between BI tools and applications and the underlying physical data structures in Azure Synapse. Use [Microsoft partners](../../partner/data-integration.md) on Azure to implement this. The following diagram shows how a common vocabulary in the data virtualization server lets multiple BI tools see a common semantic layer. + +:::image type="content" source="../media/4-visualization-reporting/data-virtualization-semantics.png" border="true" alt-text="Diagram with common data names and definitions that relate to the data virtualization server."::: + +## Conclusions + +> [!TIP] +> Identify incompatibilities early to gauge the extent of the migration effort. Migrate your users, group roles and privilege assignments. Only migrate the reports and visualizations that are used and are contributing to business value. + +In a lift and shift data warehouse migration to Azure Synapse, most reports and dashboards should migrate easily. + +However, if data structures change, then data is stored in unsupported data types, or access to data in the data warehouse or data mart is via a view that includes proprietary SQL that's unsupported in your Azure Synapse environment. You'll need to deal with those issues if they arise. + +You can't rely on documentation to find out where the issues are likely to be. Making use of `EXPLAIN` statements is a pragmatic and quick way to identify incompatibilities in SQL. Rework these to achieve similar results in Azure Synapse. In addition, it's recommended that you make use of automated metadata discovery and lineage tools to help you identify duplicate reports, reports that are no longer valid because they're using data from data sources that you no longer use, and to understand dependencies. Some of these tools help compare lineage to verify that reports running in your legacy data warehouse environment are produced identically in Azure Synapse. + +Don't migrate reports that you no longer use. BI tool usage data can help determine which ones aren't in use. For the visualizations and reports that you do want to migrate, migrate all users, user groups, roles, and privileges, and associate these reports with strategic business objectives and priorities to help you identify report insight contribution to specific objectives. This is useful if you're using business value to drive your report migration strategy. If you're migrating by data store, data mart by data mart, then metadata will also help you identify which reports are dependent on which tables and views, so that you can focus on migrating to these first. + +Finally, consider data virtualization to shield BI tools and applications from structural changes to the data warehouse and/or the data mart data model that may occur during migration. You can also use a common vocabulary with data virtualization to define a common semantic layer that guarantees consistent common data names, definitions, metrics, hierarchies, joins, and more across all BI tools and applications in a migrated Azure Synapse environment. + +## Next steps + +To learn more about minimizing SQL issues, see the next article in this series: [Minimizing SQL issues for Netezza migrations](5-minimize-sql-issues.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/5-minimize-sql-issues.md b/articles/synapse-analytics/migration-guides/netezza/5-minimize-sql-issues.md new file mode 100644 index 0000000000000..88ba703d887b6 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/5-minimize-sql-issues.md @@ -0,0 +1,268 @@ +--- +title: "Minimize SQL issues for Netezza migrations" +description: Learn how to minimize the risk of SQL issues when migrating from Netezza to Azure Synapse Analytics. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Minimize SQL issues for Netezza migrations + +This article is part five of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for minimizing SQL issues. + +## Overview + +### Characteristics of Netezza environments + +> [!TIP] +> Netezza pioneered the "data warehouse appliance" concept in the early 2000s. + +In 2003, Netezza initially released their data warehouse appliance product. It reduced the cost of entry and improved the ease-of-use of massively parallel processing (MPP) techniques to enable data processing at scale more efficiently than the existing mainframe or other MPP technologies available at the time. Since then, the product has evolved and has many installations among large financial institutions, telecommunications, and retail companies. The original implementation used proprietary hardware including field programmable gate arrays—or FPGAs—and was accessible via ODBC or JDBC network connection over TCP/IP. + +Most existing Netezza installations are on-premises, so many users are considering migrating some or all their Netezza data to Azure Synapse Analytics to gain the benefits of a move to a modern cloud environment. + +> [!TIP] +> Many existing Netezza installations are data warehouses using a dimensional data model. + +Netezza technology is often used to implement a data warehouse, supporting complex analytic queries on large data volumes using SQL. Dimensional data models—star or snowflake schemas—are common, as is the implementation of data marts for individual departments. + +This combination of SQL and dimensional data models simplifies migration to Azure Synapse, since the basic concepts and SQL skills are transferable. The recommended approach is to migrate the existing data model as-is to reduce risk and time taken. Even if the eventual intention is to make changes to the data model (for example, moving to a data vault model), perform an initial as-is migration and then make changes within the Azure cloud environment, leveraging the performance, elastic scalability, and cost advantages there. + +While the SQL language has been standardized, individual vendors have in some cases implemented proprietary extensions. This document highlights potential SQL differences you may encounter while migrating from a legacy Netezza environment, and provides workarounds. + +### Use Azure Data Factory to implement a metadata-driven migration + +> [!TIP] +> Automate the migration process by using Azure Data Factory capabilities. + +Automate and orchestrate the migration process by making use of the capabilities in the Azure environment. This approach also minimizes the migration's impact on the existing Netezza environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—that can ingest data from disparate data stores. It can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage and automate parts of the migration process. You can also use [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de). + +## SQL DDL differences between Netezza and Azure Synapse + +### SQL Data Definition Language (DDL) + +> [!TIP] +> SQL DDL commands `CREATE TABLE` and `CREATE VIEW` have standard core elements but are also used to define implementation-specific options. + +The ANSI SQL standard defines the basic syntax for DDL commands such as `CREATE TABLE` and `CREATE VIEW`. These commands are used within both Netezza and Azure Synapse, but they've also been extended to allow definition of implementation-specific features such as indexing, table distribution, and partitioning options. + +The following sections discuss Netezza-specific options to consider during a migration to Azure Synapse. + +### Table considerations + +> [!TIP] +> Use existing indexes to give an indication of candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and its descriptive metadata get physically moved between the two environments. Other database elements from the source system, such as indexes and log files, aren't directly migrated as these may not be needed or may be implemented differently within the new target environment. For example, the `TEMPORARY` option within Netezza's `CREATE TABLE` syntax is equivalent to prefixing the table name with a "#" character in Azure Synapse. + +It's important to understand where performance optimizations—such as indexes—were used in the source environment. This indicates where performance optimization can be added in the new target environment. For example, if zone maps were created in the source Netezza environment, this might indicate that a non-clustered index should be created in the migrated Azure Synapse database. Other native performance optimization techniques, such as table replication, may be more applicable than a straight "like-for-like" index creation. + +### Unsupported Netezza database object types + +> [!TIP] +> Netezza-specific features can be replaced by Azure Synapse features. + +Netezza implements some database objects that aren't directly supported in Azure Synapse, but there are methods to achieve the same functionality within the new environment: + +- Zone maps: in Netezza, zone maps are automatically created and maintained for some column types and are used at query time to restrict the amount of data to be scanned. Zone maps are created on the following column types: + - `INTEGER` columns of length 8 bytes or less. + - Temporal columns. For instance, `DATE`, `TIME`, and `TIMESTAMP`. + - `CHAR` columns, if these are part of a materialized view and mentioned in the `ORDER BY` clause. + + You can find out which columns have zone maps by using the `nz_zonemap` utility, which is part of the NZ Toolkit. Azure Synapse doesn't include zone maps, but you can achieve similar results by using other user-defined index types and/or partitioning. + +- Clustered base tables (CBT): in Netezza, CBTs are commonly used for fact tables, which can have billions of records. Scanning such a huge table requires a lot of processing time, since a full table scan might be needed to get relevant records. Organizing records on restrictive CBT allows Netezza to group records in same or nearby extents. This process also creates zone maps that improve the performance by reducing the amount of data to be scanned. + + In Azure Synapse, you can achieve a similar effect by use of partitioning and/or use of other indexes. + +- Materialized views: Netezza supports materialized views and recommends creating one or more of these over large tables having many columns where only a few of those columns are regularly used in queries. The system automatically maintains materialized views when data in the base table is updated. + + Azure Synapse supports materialized views, with the same functionality as Netezza. + +### Netezza data type mapping + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase. + +Most Netezza data types have a direct equivalent in Azure Synapse. The following table shows these data types along with the recommended approach for mapping them. + +| Netezza Data Type | Azure Synapse Data Type | +|--------------------------------|-------------------------------------| +| BIGINT | BIGINT | +| BINARY VARYING(n) | VARBINARY(n) | +| BOOLEAN | BIT | +| BYTEINT | TINYINT | +| CHARACTER VARYING(n) | VARCHAR(n) | +| CHARACTER(n) | CHAR(n) | +| DATE | DATE(date) | +| DECIMAL(p,s) | DECIMAL(p,s) | +| DOUBLE PRECISION | FLOAT | +| FLOAT(n) | FLOAT(n) | +| INTEGER | INT | +| INTERVAL | INTERVAL data types aren't currently directly supported in Azure Synapse but can be calculated using temporal functions such as DATEDIFF. | +| MONEY | MONEY | +| NATIONAL CHARACTER VARYING(n) | NVARCHAR(n) | +| NATIONAL CHARACTER(n) | NCHAR(n) | +| NUMERIC(p,s) | NUMERIC(p,s) | +| REAL | REAL | +| SMALLINT | SMALLINT | +| ST_GEOMETRY(n) | Spatial data types such as ST_GEOMETRY aren't currently supported in Azure Synapse, but the data could be stored as VARCHAR or VARBINARY. | +| TIME | TIME | +| TIME WITH TIME ZONE | DATETIMEOFFSET | +| TIMESTAMP | DATETIME | + +### Data Definition Language (DDL) generation + +> [!TIP] +> Use existing Netezza metadata to automate the generation of `CREATE TABLE` and `CREATE VIEW` DDL for Azure Synapse. + +Edit existing Netezza `CREATE TABLE` and `CREATE VIEW` scripts to create the equivalent definitions with modified data types as described previously if necessary. Typically, this involves removing or modifying any extra Netezza-specific clauses such as `ORGANIZE ON`. + +However, all the information that specifies the current definitions of tables and views within the existing Netezza environment is maintained within system catalog tables. This is the best source of this information as it's guaranteed to be up to date and complete. Be aware that user-maintained documentation may not be in sync with the current table definitions. + +Access this information by using utilities such as `nz_ddl_table` and generate the `CREATE TABLE` DDL statements. Edit these statements for the equivalent tables in Azure Synapse. + +> [!TIP] +> Third-party tools and services can automate data mapping tasks. + +There are [Microsoft partners](../../partner/data-integration.md) who offer tools and services to automate migration, including data-type mapping. Also, if a third-party ETL tool such as Informatica or Talend is already in use in the Netezza environment, that tool can implement any required data transformations. + +## SQL DML differences between Netezza and Azure Synapse + +### SQL Data Manipulation Language (DML) + +> [!TIP] +> SQL DML commands `SELECT`, `INSERT`, and `UPDATE` have standard core elements but may also implement different syntax options. + +The ANSI SQL standard defines the basic syntax for DML commands such as `SELECT`, `INSERT`, `UPDATE`, and `DELETE`. Both Netezza and Azure Synapse use these commands, but in some cases there are implementation differences. + +The following sections discuss the Netezza-specific DML commands that you should consider during a migration to Azure Synapse. + +### SQL DML syntax differences + +Be aware of these differences in SQL Data Manipulation Language (DML) syntax between Netezza SQL and Azure Synapse when migrating: + +- `STRPOS`: in Netezza, the `STRPOS` function returns the position of a substring within a string. The equivalent function in Azure Synapse is `CHARINDEX`, with the order of the arguments reversed. For example, `SELECT STRPOS('abcdef','def')...` in Netezza is equivalent to `SELECT CHARINDEX('def','abcdef')...` in Azure Synapse. + +- `AGE`: Netezza supports the `AGE` operator to give the interval between two temporal values, such as timestamps or dates. For example, `SELECT AGE('23-03-1956','01-01-2019') FROM...`. In Azure Synapse, `DATEDIFF` gives the interval. For example, `SELECT DATEDIFF(day, '1956-03-26','2019-01-01') FROM...`. Note the date representation sequence. + +- `NOW()`: Netezza uses `NOW()` to represent `CURRENT_TIMESTAMP` in Azure Synapse. + +### Functions, stored procedures, and sequences + +> [!TIP] +> As part of the preparation phase, assess the number and type of non-data objects being migrated. + +When migrating from a mature legacy data warehouse environment such as Netezza, there are often elements other than simple tables and views that need to be migrated to the new target environment. Examples of this include functions, stored procedures, and sequences. + +As part of the preparation phase, create an inventory of the objects that need to be migrated and define the methods for handling them. Then assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as either functions or stored procedures in the Netezza environment. In this case, it's often more efficient to use the built-in Azure facilities rather than recoding the Netezza functions. + +> [!TIP] +> Third-party products and services can automate migration of non-data elements. + +[Microsoft partners](../../partner/data-integration.md) offer tools and services that can automate the migration, including the mapping of data types. Also, third-party ETL tools, such as Informatica or Talend, that are already in use in the IBM Netezza environment can implement any required data transformations. + +See the following sections for more information on each of these elements. + +#### Functions + +As with most database products, Netezza supports system functions and user-defined functions within the SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated. System functions where there's no equivalent, such as arbitrary user-defined functions, may need to be recoded using the languages available in the target environment. Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. Netezza user-defined functions are coded in nzlua or C++ languages. + +#### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Netezza provides the NZPLSQL language, which is based on Postgres PL/pgSQL. A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +Azure Synapse Analytics also supports stored procedures using T-SQL, so if you must migrate stored procedures, recode them accordingly. + +#### Sequences + +In Netezza, a sequence is a named database object created via `CREATE SEQUENCE` that can provide the unique value via the `NEXT VALUE FOR` method. Use these to generate unique numbers for use as surrogate key values for primary key values. + +In Azure Synapse, there's no `CREATE SEQUENCE`. Sequences are handled using [IDENTITY to create surrogate keys](../../sql-data-warehouse/sql-data-warehouse-tables-identity.md) or [managed identity](../../../data-factory/data-factory-service-identity.md?tabs=data-factory) using SQL code to create the next sequence number in a series. + +### Use [EXPLAIN](/sql/t-sql/queries/explain-transact-sql?msclkid=91233fc1cff011ec9dff597671b7ae97) to validate legacy SQL + +> [!TIP] +> Find potential migration issues by using real queries from the existing system query logs. + +Capture some representative SQL statements from the legacy query history logs to evaluate legacy Netezza SQL for compatibility with Azure Synapse. Then prefix those queries with `EXPLAIN` and—assuming a "like-for-like" migrated data model in Azure Synapse with the same table and column names—run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will return an error. Use this information to determine the scale of the recoding task. This approach doesn't require data to be loaded into the Azure environment, only that the relevant tables and views have been created. + +#### IBM Netezza to T-SQL mapping + +The IBM Netezza to T-SQL compliant with Azure Synapse SQL data type mapping is in this table: + +| IBM Netezza Data Type | Azure Synapse SQL Data Type | +|------------------------------------------------------|-----------------------------| +| array    | *Not supported* | +| bigint  | bigint | +| binary large object \[(n\[K\|M\|G\])\] | nvarchar \[(n\|max)\] | +| blob \[(n\[K\|M\|G\])\]  | nvarchar \[(n\|max)\] | +| byte \[(n)\] | binary \[(n)\]\|varbinary(max) | +| byteint    | smallint | +| char varying \[(n)\] | varchar \[(n\|max)\] | +| character varying \[(n)\] | varchar \[(n\|max)\] | +| char \[(n)\] | char \[(n)\]\|varchar(max) | +| character \[(n)\] | char \[(n)\]\|varchar(max) | +| character large object \[(n\[K\|M\|G\])\] | varchar \[(n\|max) | +| clob \[(n\[K\|M\|G\])\] | varchar \[(n\|max) | +| dataset    | *Not supported* | +| date  | date | +| dec \[(p\[,s\])\]    | decimal \[(p\[,s\])\] | +| decimal \[(p\[,s\])\]    | decimal \[(p\[,s\])\] | +| double precision    | float(53) | +| float \[(n)\]    | float \[(n)\] | +| graphic \[(n)\] | nchar \[(n)\]\| varchar(max) | +| interval  | *Not supported* | +| json \[(n)\]  | nvarchar \[(n\|max)\] | +| long varchar  | nvarchar(max) | +| long vargraphic  | nvarchar(max) | +| mbb  | *Not supported* | +| mbr  | *Not supported* | +| number \[((p\|\*)\[,s\])\]  | numeric \[(p\[,s\])\]  | +| numeric \[(p \[,s\])\]  | numeric \[(p\[,s\])\]  | +| period  | *Not supported* | +| real  | real | +| smallint  | smallint | +| st_geometry    | *Not supported* | +| time  | time | +| time with time zone  | datetimeoffset | +| timestamp  | datetime2  | +| timestamp with time zone  | datetimeoffset | +| varbyte  | varbinary \[(n\|max)\] | +| varchar \[(n)\] | varchar \[(n)\] | +| vargraphic \[(n)\] | nvarchar \[(n\|max)\] | +| varray  | *Not supported* | +| xml  | *Not supported* | +| xmltype  | *Not supported* | + +## Summary + +Typical existing legacy Netezza installations are implemented in a way that makes migration to Azure Synapse easy. They use SQL for analytical queries on large data volumes, and are in some form of dimensional data model. These factors make them good candidates for migration to Azure Synapse. + +To minimize the task of migrating the actual SQL code, follow these recommendations: + +- Initial migration of the data warehouse should be as-is to minimize risk and time taken, even if the eventual final environment will incorporate a different data model such as data vault. + +- Understand the differences between Netezza SQL implementation and Azure Synapse. + +- Use metadata and query logs from the existing Netezza implementation to assess the impact of the differences and plan an approach to mitigate. + +- Automate the process wherever possible to minimize errors, risk, and time for the migration. + +- Consider using specialist [Microsoft partners](../../partner/data-integration.md) and services to streamline the migration. + +## Next steps + +To learn more about Microsoft and third-party tools, see the next article in this series: [Tools for Netezza data warehouse migration to Azure Synapse Analytics](6-microsoft-third-party-migration-tools.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/6-microsoft-third-party-migration-tools.md b/articles/synapse-analytics/migration-guides/netezza/6-microsoft-third-party-migration-tools.md new file mode 100644 index 0000000000000..dcc49910241e1 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/6-microsoft-third-party-migration-tools.md @@ -0,0 +1,132 @@ +--- +title: "Tools for Netezza data warehouse migration to Azure Synapse Analytics" +description: Learn about Microsoft and third-party data and database migration tools that can help you migrate from Netezza to Azure Synapse Analytics. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Tools for Netezza data warehouse migration to Azure Synapse Analytics + +This article is part six of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for Microsoft and third-party tools. + +## Data warehouse migration tools + +By migrating your existing data warehouse to Azure Synapse Analytics, you benefit from: + +- A globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. + +- The rich Microsoft analytical ecosystem that exists on Azure. This ecosystem consists of technologies to help modernize your data warehouse once it's migrated, and extends your analytical capabilities to drive new value. + +Several tools from Microsoft and third-party partner vendors can help you migrate your existing data warehouse to Azure Synapse. These tools include: + +- Microsoft data and database migration tools. + +- Third-party data warehouse automation tools to automate and document the migration to Azure Synapse. + +- Third-party data warehouse migration tools to migrate schema and data to Azure Synapse. + +- Third-party tools to minimize the impact on SQL differences between your existing data warehouse DBMS and Azure Synapse. + +The following sections discuss these tools in more detail. + +## Microsoft data migration tools + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +Microsoft offers several tools to help you migrate your existing data warehouse to Azure Synapse, such as: + +- Microsoft Azure Data Factory. + +- Microsoft services for physical data transfer. + +- Microsoft services for data ingestion. + +### Microsoft Azure Data Factory + +Microsoft Azure Data Factory is a fully managed, pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. It uses Spark to process and analyze data in parallel and in memory to maximize throughput. + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code-free. + +[Azure Data Factory connectors](../../../data-factory/connector-overview.md?msclkid=00086e4acff211ec9263dee5c7eb6e69) connect to external data sources and databases and have templates for common data integration tasks. A visual front-end, browser-based UI enables non-programmers to create and run process pipelines to ingest, transform, and load data. More experienced programmers have the option to incorporate custom code, such as Python programs. + +> [!TIP] +> Data Factory enables collaborative development between business and IT professionals. + +Data Factory is also an orchestration tool. It's the best Microsoft tool to automate the end-to-end migration process to reduce risk and make the migration process easily repeatable. The following diagram shows a Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +The next screenshot shows a Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +You can develop simple or comprehensive ETL and ELT processes without coding or maintenance with a few clicks. These processes ingest, move, prepare, transform, and process your data. You can design and manage scheduling and triggers in Azure Data Factory to build an automated data integration and loading environment. In Data Factory, you can define, manage, and schedule PolyBase bulk data load processes. + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +You can use Data Factory to implement and manage a hybrid environment that includes on-premises, cloud, streaming and SaaS data—for example, from applications like Salesforce—in a secure and consistent way. + +A new capability in Data Factory is wrangling data flows. This opens up Data Factory to business users who want to visually discover, explore, and prepare data at scale without writing code. This capability, similar to Microsoft Excel Power Query or Microsoft Power BI dataflows, offers self-service data preparation. Business users can prepare and integrate data through a spreadsheet-style user interface with drop-down transform options. + +Azure Data Factory is the recommended approach for implementing data integration and ETL/ELT processes for an Azure Synapse environment, especially if existing legacy processes need to be refactored. + +### Microsoft services for physical data transfer + +> [!TIP] +> Microsoft offers a range of products and services to assist with data transfer. + +#### Azure ExpressRoute + +Azure ExpressRoute creates private connections between Azure data centers and infrastructure on your premises or in a collocation environment. ExpressRoute connections don't go over the public internet, and they offer more reliability, faster speeds, and lower latencies than typical internet connections. In some cases, by using ExpressRoute connections to transfer data between on-premises systems and Azure, you gain significant cost benefits. + +#### AzCopy + +[AzCopy](../../../storage/common/storage-use-azcopy-v10.md) is a command line utility that copies files to Azure Blob Storage via a standard internet connection. In a warehouse migration project, you can use AzCopy to upload extracted, compressed, and delimited text files before loading through PolyBase, or a native Parquet reader if the exported files are Parquet format. AzCopy can upload individual files, file selections, or file directories. + +#### Azure Data Box + +Microsoft offers a service called Azure Data Box. This service writes data to be migrated to a physical storage device. This device is then shipped to an Azure data center and loaded into cloud storage. The service can be cost-effective for large volumes of data—for example, tens or hundreds of terabytes—or where network bandwidth isn't readily available. Azure Data Box is typically used for one-off historical data load when migrating a large amount of data to Azure Synapse. + +Another service is Data Box Gateway, a virtualized cloud storage gateway device that resides on your premises and sends your images, media, and other data to Azure. Use Data Box Gateway for one-off migration tasks or ongoing incremental data uploads. + +### Microsoft services for data ingestion + +#### COPY INTO + +The [COPY](/sql/t-sql/statements/copy-into-transact-sql) statement provides the most flexibility for high-throughput data ingestion into Azure Synapse Analytics. Refer to the list of capabilities that `COPY` offers for data ingestion. + +#### PolyBase + +> [!TIP] +> PolyBase can load data in parallel from Azure Blob Storage into Azure Synapse. + +PolyBase provides the fastest and most scalable method of loading bulk data into Azure Synapse. PolyBase leverages the MPP architecture to use parallel loading, to give the fastest throughput, and can read data from flat files in Azure Blob Storage or directly from external data sources and other relational databases via connectors. + +PolyBase can also directly read from files compressed with gzip—this reduces the physical volume of data moved during the load process. PolyBase supports popular data formats such as delimited text, ORC, and Parquet. + +> [!TIP] +> Invoke PolyBase from Azure Data Factory as part of a migration pipeline. + +PolyBase is tightly integrated with Azure Data Factory to enable data load ETL/ELT processes to be rapidly developed and scheduled through a visual GUI, leading to higher productivity and fewer errors than hand-written code. + +PolyBase is the recommended data load method for Azure Synapse, especially for high-volume data. PolyBase loads data using the `CREATE TABLE AS` or `INSERT...SELECT` statements—CTAS achieves the highest possible throughput as it minimizes the amount of logging required. Compressed delimited text files are the most efficient input format. For maximum throughput, split very large input files into multiple smaller files and load these in parallel. For fastest loading to a staging table, define the target table as type `HEAP` and use round-robin distribution. + +However, PolyBase has some limitations. Rows to be loaded must be less than 1 MB in length. Fixed-width format or nested data, such as JSON and XML, aren't directly readable. + +## Microsoft partners can help you migrate your data warehouse to Azure Synapse Analytics + +In addition to tools that can help you with various aspects of data warehouse migration, there are several practiced [Microsoft partners](../../partner/data-integration.md) that can bring their expertise to help you move your legacy on-premises data warehouse platform to Azure Synapse. + +## Next steps + +To learn more about implementing modern data warehouses, see the next article in this series: [Beyond Netezza migration, implementing a modern data warehouse in Microsoft Azure](7-beyond-data-warehouse-migration.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/7-beyond-data-warehouse-migration.md b/articles/synapse-analytics/migration-guides/netezza/7-beyond-data-warehouse-migration.md new file mode 100644 index 0000000000000..10938992257ac --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/7-beyond-data-warehouse-migration.md @@ -0,0 +1,375 @@ +--- +title: "Beyond Netezza migration, implementing a modern data warehouse in Microsoft Azure" +description: Learn how a Netezza migration to Azure Synapse Analytics lets you integrate your data warehouse with the Microsoft Azure analytical ecosystem. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Beyond Netezza migration, implementing a modern data warehouse in Microsoft Azure + +This article is part seven of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for implementing modern data warehouses. + +## Beyond data warehouse migration to Azure + +One of the key reasons to migrate your existing data warehouse to Azure Synapse Analytics is to utilize a globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. Azure Synapse also lets you integrate your migrated data warehouse with the complete Microsoft Azure analytical ecosystem to take advantage of, and integrate with, other Microsoft technologies that help you modernize your migrated data warehouse. This includes integrating with technologies like: + +- Azure Data Lake Storage for cost effective data ingestion, staging, cleansing, and transformation, to free up data warehouse capacity occupied by fast growing staging tables. + +- Azure Data Factory for collaborative IT and self-service data integration [with connectors](../../../data-factory/connector-overview.md) to cloud and on-premises data sources and streaming data. + +- [The Open Data Model Common Data Initiative](/common-data-model/) to share consistent trusted data across multiple technologies, including: + - Azure Synapse + - Azure Synapse Spark + - Azure HDInsight + - Power BI + - SAP + - Adobe Customer Experience Platform + - Azure IoT + - Microsoft ISV Partners + +- [Microsoft's data science technologies](/azure/architecture/data-science-process/platforms-and-tools), including: + - Azure Machine Learning Studio + - Azure Machine Learning + - Azure Synapse Spark (Spark as a service) + - Jupyter Notebooks + - RStudio + - ML.NET + - .NET for Apache Spark to enable data scientists to use Azure Synapse data to train machine learning models at scale. + +- [Azure HDInsight](../../../hdinsight/index.yml) to leverage big data analytical processing and join big data with Azure Synapse data by creating a logical data warehouse using PolyBase. + +- [Azure Event Hubs](../../../event-hubs/event-hubs-about.md), [Azure Stream Analytics](../../../stream-analytics/stream-analytics-introduction.md), and [Apache Kafka](/azure/databricks/spark/latest/structured-streaming/kafka) to integrate with live streaming data from Azure Synapse. + +There's often acute demand to integrate with [machine learning](../../machine-learning/what-is-machine-learning.md) to enable custom-built, trained machine learning models for use in Azure Synapse. This would enable in-database analytics to run at scale in-batch, on an event-driven basis and on-demand. The ability to exploit in-database analytics in Azure Synapse from multiple BI tools and applications also guarantees that all get the same predictions and recommendations. + +In addition, there's an opportunity to integrate Azure Synapse with Microsoft partner tools on Azure to shorten time to value. + +Let's look at these in more detail to understand how you can take advantage of the technologies in Microsoft's analytical ecosystem to modernize your data warehouse once you've migrated to Azure Synapse. + +## Offload data staging and ETL processing to Azure Data Lake and Azure Data Factory + +Enterprises today have a key problem resulting from digital transformation. So much new data is being generated and captured for analysis, and much of this data is finding its way into data warehouses. A good example is transaction data created by opening OLTP systems to self-service access from mobile devices. These OLTP systems are the main sources of data to a data warehouse, and with customers now driving the transaction rate rather than employees, data in data warehouse staging tables has been growing rapidly in volume. + +The rapid influx of data into the enterprise, along with new sources of data like Internet of Things (IoT) streams, means that companies need to find a way to deal with unprecedented data growth and scale data integration ETL processing beyond current levels. One way to do this is to offload ingestion, data cleansing, transformation, and integration to a data lake and process it at scale there, as part of a data warehouse modernization program. + +Once you've migrated your data warehouse to Azure Synapse, Microsoft provides the ability to modernize your ETL processing by ingesting data into, and staging data in, Azure Data Lake Storage. You can then clean, transform and integrate your data at scale using Data Factory before loading it into Azure Synapse in parallel using PolyBase. + +For ELT strategies, consider offloading ELT processing to Azure Data Lake to easily scale as your data volume or frequency grows. + +### Microsoft Azure Data Factory + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code-free. + +[Data Factory](https://azure.microsoft.com/services/data-factory/) is a pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. Data Factory provides a simple web-based user interface to build data integration pipelines in a code-free manner that can: + +- Build scalable data integration pipelines code-free. Easily acquire data at scale. Pay only for what you use, and connect to on-premises, cloud, and SaaS-based data sources. + +- Ingest, move, clean, transform, integrate, and analyze cloud and on-premises data at scale. Take automatic action, such as a recommendation or alert. + +- Seamlessly author, monitor, and manage pipelines that span data stores both on-premises and in the cloud. + +- Enable pay-as-you-go scale-out in alignment with customer growth. + +> [!TIP] +> Data Factory can connect to on-premises, cloud, and SaaS data. + +All of this can be done without writing any code. However, adding custom code to Data Factory pipelines is also supported. The next screenshot shows an example Data Factory pipeline. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory pipeline."::: + +> [!TIP] +> Pipelines called data factories control the integration and analysis of data. Data Factory is enterprise-class data integration software aimed at IT professionals with a data wrangling facility for business users. + +Implement Data Factory pipeline development from any of several places including: + +- Microsoft Azure portal + +- Microsoft Azure PowerShell + +- Programmatically from .NET and Python using a multi-language SDK + +- Azure Resource Manager (ARM) templates + +- REST APIs + +Developers and data scientists who prefer to write code can easily author Data Factory pipelines in Java, Python, and .NET using the software development kits (SDKs) available for those programming languages. Data Factory pipelines can also be hybrid since they can connect, ingest, clean, transform, and analyze data in on-premises data centers, Microsoft Azure, other clouds, and SaaS offerings. + +Once you develop Data Factory pipelines to integrate and analyze data, deploy those pipelines globally and schedule them to run in batch, invoke them on demand as a service, or run them in real-time on an event-driven basis. A Data Factory pipeline can also run on one or more execution engines and monitor pipeline execution to ensure performance and track errors. + +#### Use cases + +> [!TIP] +> Build data warehouses on Microsoft Azure. + +Data Factory can support multiple use cases, including: + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to populate your migrated data warehouse and data marts on Microsoft Azure Synapse. + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to produce training data for use in machine learning model development and in retraining analytical models. + +- Orchestrating data preparation and analytics to create predictive and prescriptive analytical pipelines for processing and analyzing data in batch, such as sentiment analytics, and either acting on the results of the analysis or populating your data warehouse with the results. + +- Preparing, integrating, and enriching data for data-driven business applications running on the Azure cloud on top of operational data stores like Azure Cosmos DB. + +> [!TIP] +> Build training data sets in data science to develop machine learning models. + +#### Data sources + +Data Factory lets you use [connectors](../../../data-factory/connector-overview.md) from both cloud and on-premises data sources. Agent software, known as a *self-hosted integration runtime*, securely accesses on-premises data sources and supports secure, scalable data transfer. + +#### Transform data using Azure Data Factory + +> [!TIP] +> Professional ETL developers can use Azure Data Factory mapping data flows to clean, transform, and integrate data without the need to write code. + +Within a Data Factory pipeline, ingest, clean, transform, integrate, and, if necessary, analyze any type of data from these sources. This includes structured, semi-structured such as JSON or Avro, and unstructured data. + +Professional ETL developers can use Data Factory mapping data flows to filter, split, join (many types), lookup, pivot, unpivot, sort, union, and aggregate data without writing any code. In addition, Data Factory supports surrogate keys, multiple write processing options such as insert, upsert, update, table recreation, and table truncation, and several types of target data stores—also known as sinks. ETL developers can also create aggregations, including time-series aggregations that require a window to be placed on data columns. + +> [!TIP] +> Data Factory supports the ability to automatically detect and manage schema changes in inbound data, such as in streaming data. + +Run mapping data flows that transform data as activities in a Data Factory pipeline. Include multiple mapping data flows in a single pipeline, if necessary. Break up challenging data transformation and integration tasks into smaller mapping dataflows that can be combined to handle the complexity and custom code added if necessary. In addition to this functionality, Data Factory mapping data flows include these abilities: + +- Define expressions to clean and transform data, compute aggregations, and enrich data. For example, these expressions can perform feature engineering on a date field to break it into multiple fields to create training data during machine learning model development. Construct expressions from a rich set of functions that include mathematical, temporal, split, merge, string concatenation, conditions, pattern match, replace, and many other functions. + +- Automatically handle schema drift so that data transformation pipelines can avoid being impacted by schema changes in data sources. This is especially important for streaming IoT data, where schema changes can happen without notice when devices are upgraded or when readings are missed by gateway devices collecting IoT data. + +- Partition data to enable transformations to run in parallel at scale. + +- Inspect data to view the metadata of a stream you're transforming. + +> [!TIP] +> Data Factory can also partition data to enable ETL processing to run at scale. + +The next screenshot shows an example Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +Data engineers can profile data quality and view the results of individual data transforms by switching on a debug capability during development. + +> [!TIP] +> Data Factory pipelines are also extensible since Data Factory allows you to write your own code and run it as part of a pipeline. + +Extend Data Factory transformational and analytical functionality by adding a linked service containing your own code into a pipeline. For example, an Azure Synapse Spark pool notebook containing Python code could use a trained model to score the data integrated by a mapping data flow. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores such as Azure Data Lake Storage, Azure Synapse, or Azure HDInsight (Hive tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +#### Utilize Spark to scale data integration + +Internally, Data Factory utilizes Azure Synapse Spark Pools—Microsoft's Spark-as-a-service offering—at run time to clean and integrate data on the Microsoft Azure cloud. This enables it to clean, integrate, and analyze high-volume and very high-velocity data (such as click stream data) at scale. Microsoft intends to execute Data Factory pipelines on other Spark distributions. In addition to executing ETL jobs on Spark, Data Factory can also invoke Pig scripts and Hive queries to access and transform data stored in Azure HDInsight. + +#### Link self-service data prep and Data Factory ETL processing using wrangling data flows + +> [!TIP] +> Data Factory support for wrangling data flows in addition to mapping data flows means that business and IT can work together on a common platform to integrate data. + +Another new capability in Data Factory is wrangling data flows. This lets business users (also known as citizen data integrators and data engineers) make use of the platform to visually discover, explore, and prepare data at scale without writing code. This easy-to-use Data Factory capability is similar to Microsoft Excel Power Query or Microsoft Power BI dataflows, where self-service data preparation business users use a spreadsheet-style UI with drop-down transforms to prepare and integrate data. The following screenshot shows an example Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +This differs from Excel and Power BI, as Data Factory [wrangling data flows](/azure/data-factory/wrangling-tutorial) use Power Query to generate M code and translate it into a massively parallel in-memory Spark job for cloud-scale execution. The combination of mapping data flows and wrangling data flows in Data Factory lets IT professional ETL developers and business users collaborate to prepare, integrate, and analyze data for a common business purpose. The preceding Data Factory mapping data flow diagram shows how both Data Factory and Azure Synapse Spark pool notebooks can be combined in the same Data Factory pipeline. This allows IT and business to be aware of what each has created. Mapping data flows and wrangling data flows can then be available for reuse to maximize productivity and consistency and minimize reinvention. + +#### Link data and analytics in analytical pipelines + +In addition to cleaning and transforming data, Data Factory can combine data integration and analytics in the same pipeline. Use Data Factory to create both data integration and analytical pipelines—the latter being an extension of the former. Drop an analytical model into a pipeline so that clean, integrated data can be stored to provide predictions or recommendations. Act on this information immediately or store it in your data warehouse to provide you with new insights and recommendations that can be viewed in BI tools. + +Models developed code-free with Azure Machine Learning Studio, or with the Azure Machine Learning SDK using Azure Synapse Spark pool notebooks or using R in RStudio, can be invoked as a service from within a Data Factory pipeline to batch score your data. Analysis happens at scale by executing Spark machine learning pipelines on Azure Synapse Spark pool notebooks. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores, such as Azure Data Lake Storage, Azure Synapse, or Azure HDInsight (Hive tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +## A lake database to share consistent trusted data + +> [!TIP] +> Microsoft has created a lake database to describe core data entities to be shared across the enterprise. + +A key objective in any data integration setup is the ability to integrate data once and reuse it everywhere, not just in a data warehouse—for example, in data science. Reuse avoids reinvention and ensures consistent, commonly understood data that everyone can trust. + +> [!TIP] +> Azure Data Lake Storage is shared storage that underpins Microsoft Azure Synapse, Azure Machine Learning, Azure Synapse Spark, and Azure HDInsight. + +To achieve this goal, establish a set of common data names and definitions describing logical data entities that need to be shared across the enterprise—such as customer, account, product, supplier, orders, payments, returns, and so forth. Once this is done, IT and business professionals can use data integration software to create these common data assets and store them to maximize their reuse to drive consistency everywhere. + +> [!TIP] +> Integrating data to create lake database logical entities in shared storage enables maximum reuse of common data assets. + +Microsoft has done this by creating a [lake database](../../database-designer/concepts-lake-database.md). The lake database is a common language for business entities that represents commonly used concepts and activities across a business. Azure Synapse Analytics provides industry specific database templates to help standardize data in the lake. [Lake database templates](../../database-designer/concepts-database-templates.md) provide schemas for predefined business areas, enabling data to be loaded into a lake database in a structured way. The power comes when data integration software is used to create lake database common data assets. This results in self-describing trusted data that can be consumed by applications and analytical systems. Create a lake database in Azure Data Lake Storage by using Azure Data Factory, and consume it with Power BI, Azure Synapse Spark, Azure Synapse, and Azure Machine Learning. The following diagram shows a lake database used in Azure Synapse Analytics. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png" border="true" alt-text="Screenshot showing how a lake database can be used in Azure Synapse Analytics."::: + +## Integration with Microsoft data science technologies on Azure + +Another key requirement in modernizing your migrated data warehouse is to integrate it with Microsoft and third-party data science technologies on Azure to produce insights for competitive advantage. Let's look at what Microsoft offers in terms of machine learning and data science technologies and see how these can be used with Azure Synapse in a modern data warehouse environment. + +### Microsoft technologies for data science on Azure + +> [!TIP] +> Develop machine learning models using a no/low-code approach or from a range of programming languages like Python, R, and .NET. + +Microsoft offers a range of technologies to build predictive analytical models using machine learning, analyze unstructured data using deep learning, and perform other kinds of advanced analytics. This includes: + +- Azure Machine Learning Studio + +- Azure Machine Learning + +- Azure Synapse Spark pool notebooks + +- ML.NET (API, CLI, or ML.NET Model Builder for Visual Studio) + +- .NET for Apache Spark + +Data scientists can use RStudio (R) and Jupyter Notebooks (Python) to develop analytical models, or they can use other frameworks such as Keras or TensorFlow. + +#### Azure Machine Learning Studio + +Azure Machine Learning Studio is a fully managed cloud service that lets you easily build, deploy, and share predictive analytics via a drag-and-drop web-based user interface. The next screenshot shows an Azure Machine Learning Studio user interface. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png" border="true" alt-text="Screenshot showing predictive analysis in the Azure Machine Learning Studio user interface."::: + +#### Azure Machine Learning + +> [!TIP] +> Azure Machine Learning provides an SDK for developing machine learning models using several open-source frameworks. + +Azure Machine Learning provides a software development kit (SDK) and services for Python to quickly prepare data, as well as train and deploy machine learning models. Use Azure Machine Learning from Azure notebooks (a Jupyter Notebook service) and utilize open-source frameworks, such as PyTorch, TensorFlow, Spark MLlib (Azure Synapse Spark pool notebooks), or scikit-learn. Azure Machine Learning provides an AutoML capability that automatically identifies the most accurate algorithms to expedite model development. You can also use it to build machine learning pipelines that manage end-to-end workflow, programmatically scale on the cloud, and deploy models both to the cloud and the edge. Azure Machine Learning uses logical containers called workspaces, which can be either created manually from the Azure portal or created programmatically. These workspaces keep compute targets, experiments, data stores, trained machine learning models, Docker images, and deployed services all in one place to enable teams to work together. Use Azure Machine Learning from Visual Studio with a Visual Studio for AI extension. + +> [!TIP] +> Organize and manage related data stores, experiments, trained models, Docker images, and deployed services in workspaces. + +#### Azure Synapse Spark pool notebooks + +> [!TIP] +> Azure Synapse Spark is Microsoft's dynamically scalable Spark-as-a-service, offering scalable execution of data preparation, model development, and deployed model execution. + +[Azure Synapse Spark pool notebooks](../../spark/apache-spark-development-using-notebooks.md?msclkid=cbe4b8ebcff511eca068920ea4bf16b9) is an Apache Spark service optimized to run on Azure, which: + +- Allows data engineers to build and execute scalable data preparation jobs using Azure Data Factory. + +- Allows data scientists to build and execute machine learning models at scale using notebooks written in languages such as Scala, R, Python, Java, and SQL; and to visualize results. + +> [!TIP] +> Azure Synapse Spark can access data in a range of Microsoft analytical ecosystem data stores on Azure. + +Jobs running in Azure Synapse Spark pool notebook can retrieve, process, and analyze data at scale from Azure Blob Storage, Azure Data Lake Storage, Azure Synapse, Azure HDInsight, and streaming data services such as Kafka. + +Autoscaling and auto-termination are also supported to reduce total cost of ownership (TCO). Data scientists can use the MLflow open-source framework to manage the machine learning lifecycle. + +#### ML.NET + +> [!TIP] +> Microsoft has extended its machine learning capability to .NET developers. + +ML.NET is an open-source and cross-platform machine learning framework (Windows, Linux, macOS), created by Microsoft for .NET developers so that they can use existing tools—like ML.NET Model Builder for Visual Studio—to develop custom machine learning models and integrate them into .NET applications. + +#### .NET for Apache Spark + +.NET for Apache Spark aims to make Spark accessible to .NET developers across all Spark APIs. It takes Spark support beyond R, Scala, Python, and Java to .NET. While initially only available on Apache Spark on HDInsight, Microsoft intends to make this available on Azure Synapse Spark pool notebook. + +### Use Azure Synapse Analytics with your data warehouse + +> [!TIP] +> Train, test, evaluate, and execute machine learning models at scale on Azure Synapse Spark pool notebook by using data in Azure Synapse. + +Combine machine learning models with Azure Synapse by: + +- Using machine learning models in batch mode or in real-time to produce new insights, and add them to what you already know in Azure Synapse. + +- Using the data in Azure Synapse to develop and train new predictive models for deployment elsewhere, such as in other applications. + +- Deploying machine learning models, including those trained elsewhere, in Azure Synapse to analyze data in the data warehouse and drive new business value. + +> [!TIP] +> Produce new insights using machine learning on Azure in batch or in real-time and add to what you know in your data warehouse. + +In terms of machine learning model development, data scientists can use RStudio, Jupyter Notebooks, and Azure Synapse Spark pool notebooks together with Azure Machine Learning to develop machine learning models that run at scale on Azure Synapse Spark pool notebooks using data in Azure Synapse. For example, they could create an unsupervised model to segment customers for use in driving different marketing campaigns. Use supervised machine learning to train a model to predict a specific outcome, such as predicting a customer's propensity to churn, or recommending the next best offer for a customer to try to increase their value. The next diagram shows how Azure Synapse Analytics can be leveraged for Azure Machine Learning. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png" border="true" alt-text="Screenshot of an Azure Synapse Analytics train and predict model."::: + +In addition, you can ingest big data—such as social network data or review website data—into Azure Data Lake, then prepare and analyze it at scale on Azure Synapse Spark pool notebook, using natural language processing to score sentiment about your products or your brand. Add these scores to your data warehouse to understand the impact of—for example—negative sentiment on product sales, and to leverage big data analytics to add to what you already know in your data warehouse. + +## Integrate live streaming data into Azure Synapse Analytics + +When analyzing data in a modern data warehouse, you must be able to analyze streaming data in real-time and join it with historical data in your data warehouse. An example of this would be combining IoT data with product or asset data. + +> [!TIP] +> Integrate your data warehouse with streaming data from IoT devices or clickstream. + +Once you've successfully migrated your data warehouse to Azure Synapse, you can introduce this capability as part of a data warehouse modernization exercise. Do this by taking advantage of additional functionality in Azure Synapse. + +> [!TIP] +> Ingest streaming data into Azure Data Lake Storage from Azure Event Hubs or Kafka, and access it from Azure Synapse using PolyBase external tables. + +To do this, ingest streaming data via Azure Event Hubs or other technologies, such as Kafka, using Azure Data Factory (or using an existing ETL tool if it supports the streaming data sources). Store the data in Azure Data Lake Storage (ADLS). Next, create an external table in Azure Synapse using PolyBase and point it at the data being streamed into Azure Data Lake. Your migrated data warehouse will now contain new tables that provide access to real-time streaming data. Query this external table as if the data was in the data warehouse via standard T-SQL from any BI tool that has access to Azure Synapse. You can also join this data to other tables containing historical data and create views that join live streaming data to historical data to make it easier for business users to access. In the following diagram, a real-time data warehouse on Azure Synapse Analytics is integrated with streaming data in ADLS. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png" border="true" alt-text="Screenshot of Azure Synapse Analytics with streaming data in an Azure Data Lake."::: + +## Create a logical data warehouse using PolyBase + +> [!TIP] +> PolyBase simplifies access to multiple underlying analytical data stores on Azure to simplify access for business users. + +PolyBase offers the capability to create a logical data warehouse to simplify user access to multiple analytical data stores. + +This is attractive because many companies have adopted "workload optimized" analytical data stores over the last several years in addition to their data warehouses. Examples of these platforms on Azure include: + +- ADLS with Azure Synapse Spark pool notebook (Spark-as-a-service), for big data analytics. + +- Azure HDInsight (Hadoop as-a-service), also for big data analytics. + +- NoSQL Graph databases for graph analysis, which could be done in Azure Cosmos DB. + +- Azure Event Hubs and Azure Stream Analytics, for real-time analysis of data in motion. + +You may have non-Microsoft equivalents of some of these. You may also have a master data management (MDM) system that needs to be accessed for consistent trusted data on customers, suppliers, products, assets, and more. + +These additional analytical platforms have emerged because of the explosion of new data sources—both inside and outside the enterprises—that business users want to capture and analyze. Examples include: + +- Machine generated data, such as IoT sensor data and clickstream data. + +- Human generated data, such as social network data, review web site data, customer inbound email, images, and video. + +- Other external data, such as open government data and weather data. + +This data is over and above the structured transaction data and master data sources that typically feed data warehouses. These new data sources include semi-structured data (like JSON, XML, or Avro) or unstructured data (like text, voice, image, or video), which is more complex to process and analyze. This data could be very high volume, high velocity, or both. + +As a result, the need for new kinds of more complex analysis has emerged, such as natural language processing, graph analysis, deep learning, streaming analytics, or complex analysis of large volumes of structured data. All of this is typically not happening in a data warehouse, so it's not surprising to see different analytical platforms for different types of analytical workloads, as shown in the following diagram. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png" border="true" alt-text="Screenshot of different analytical platforms for different types of analytical workloads in Azure Synapse Analytics."::: + +Since these platforms are producing new insights, it's normal to see a requirement to combine these insights with what you already know in Azure Synapse. That's what PolyBase makes possible. + +> [!TIP] +> The ability to make data in multiple analytical data stores look like it's all in one system and join it to Azure Synapse is known as a logical data warehouse architecture. + +By leveraging PolyBase data virtualization inside Azure Synapse, you can implement a logical data warehouse. Join data in Azure Synapse to data in other Azure and on-premises analytical data stores—like Azure HDInsight or Azure Cosmos DB—or to streaming data flowing into ADLS from Azure Stream Analytics and Event Hubs. Users access external tables in Azure Synapse, unaware that the data they're accessing is stored in multiple underlying analytical systems. The next diagram shows the complex data warehouse structure accessed through comparatively simpler but still powerful user interface methods. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png" alt-text="Screenshot showing an example of a complex data warehouse structure accessed through user interface methods."::: + +The previous diagram shows how other technologies of the Microsoft analytical ecosystem can be combined with the capability of Azure Synapse logical data warehouse architecture. For example, data can be ingested into ADLS and curated using Azure Data Factory to create trusted data products that represent Microsoft [lake database](../../database-designer/concepts-lake-database.md) logical data entities. This trusted, commonly understood data can then be consumed and reused in different analytical environments such as Azure Synapse, Azure Synapse Spark pool notebooks, or Azure Cosmos DB. All insights produced in these environments are accessible via a logical data warehouse data virtualization layer made possible by PolyBase. + +> [!TIP] +> A logical data warehouse architecture simplifies business user access to data and adds new value to what you already know in your data warehouse. + +## Conclusions + +> [!TIP] +> Migrating your data warehouse to Azure Synapse lets you make use of a rich Microsoft analytical ecosystem running on Azure. + +Once you migrate your data warehouse to Azure Synapse, you can leverage other technologies in the Microsoft analytical ecosystem. You don't only modernize your data warehouse, but combine insights produced in other Azure analytical data stores into an integrated analytical architecture. + +Broaden your ETL processing to ingest data of any type into ADLS. Prepare and integrate it at scale using Azure Data Factory to produce trusted, commonly understood data assets that can be consumed by your data warehouse and accessed by data scientists and other applications. Build real-time and batch-oriented analytical pipelines and create machine learning models to run in batch, in real-time on streaming data, and on-demand as a service. + +Leverage PolyBase and `COPY INTO` to go beyond your data warehouse. Simplify access to insights from multiple underlying analytical platforms on Azure by creating holistic integrated views in a logical data warehouse. Easily access streaming, big data, and traditional data warehouse insights from BI tools and applications to drive new value in your business. + +## Next steps + +To learn more about migrating to a dedicated SQL pool, see [Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics](../migrate-to-synapse-analytics-guide.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/1-design-performance-migration.md b/articles/synapse-analytics/migration-guides/teradata/1-design-performance-migration.md new file mode 100644 index 0000000000000..f7ce297f9fe47 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/1-design-performance-migration.md @@ -0,0 +1,336 @@ +--- +title: "Design and performance for Teradata migrations" +description: Learn how Teradata and Azure Synapse SQL databases differ in their approach to high query performance on exceptionally large data volumes. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Design and performance for Teradata migrations + +This article is part one of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for design and performance. + +## Overview + +Many existing users of Teradata data warehouse systems want to take advantage of the innovations provided by newer environments such as cloud, IaaS, and PaaS, and to delegate tasks like infrastructure maintenance and platform development to the cloud provider. + +> [!TIP] +> More than just a database—the Azure environment includes a comprehensive set of capabilities and tools. + +Although Teradata and Azure Synapse Analytics are both SQL databases designed to use massively parallel processing (MPP) techniques to achieve high query performance on exceptionally large data volumes, there are some basic differences in approach: + +- Legacy Teradata systems are often installed on-premises and use proprietary hardware, while Azure Synapse is cloud-based and uses Azure Storage and compute resources. + +- Since storage and compute resources are separate in the Azure environment, these resources can be scaled upwards or downwards independently, leveraging the elastic scaling capability. + +- Azure Synapse can be paused or resized as required to reduce resource utilization and cost. + +- Upgrading a Teradata configuration is a major task involving additional physical hardware and potentially lengthy database reconfiguration or reload. + +Microsoft Azure is a globally available, highly secure, scalable cloud environment that includes Azure Synapse and an ecosystem of supporting tools and capabilities. The next diagram summarizes the Azure Synapse ecosystem. + +:::image type="content" source="../media/1-design-performance-migration/azure-synapse-ecosystem.png" border="true" alt-text="Chart showing the Azure Synapse ecosystem of supporting tools and capabilities."::: + +> [!TIP] +> Azure Synapse gives best-of-breed performance and price-performance in independent benchmarks. + +Azure Synapse provides best-of-breed relational database performance by using techniques such as massively parallel processing (MPP) and multiple levels of automated caching for frequently used data. See the results of this approach in independent benchmarks such as the one run recently by [GigaOm](https://research.gigaom.com/report/data-warehouse-cloud-benchmark/), which compares Azure Synapse to other popular cloud data warehouse offerings. Customers who have migrated to this environment have seen many benefits including: + +- Improved performance and price/performance. + +- Increased agility and shorter time to value. + +- Faster server deployment and application development. + +- Elastic scalability—only pay for actual usage. + +- Improved security/compliance. + +- Reduced storage and disaster recovery costs. + +- Lower overall TCO, better cost control, and streamlined operational expenditure (OPEX). + +To maximize these benefits, migrate new or existing data and applications to the Azure Synapse platform. In many organizations, this will include migrating an existing data warehouse from legacy on-premises platforms such as Teradata. At a high level, the basic process includes these steps: + +:::image type="content" source="../media/1-design-performance-migration/migration-steps.png" border="true" alt-text="Diagram showing the steps for preparing to migrate, migration, and post-migration."::: + +This paper looks at schema migration with a goal of equivalent or better performance of your migrated Teradata data warehouse and data marts on Azure Synapse. This paper applies specifically to migrations from an existing Teradata environment. + +## Design considerations + +### Migration scope + +> [!TIP] +> Create an inventory of objects to be migrated and document the migration process. + +#### Preparation for migration + +When migrating from a Teradata environment, there are some specific topics to consider in addition to the more general subjects described in this article. + +#### Choose the workload for the initial migration + +Legacy Teradata environments have typically evolved over time to encompass multiple subject areas and mixed workloads. When deciding where to start on an initial migration project, choose an area that can: + +- Prove the viability of migrating to Azure Synapse by quickly delivering the benefits of the new environment. + +- Allow the in-house technical staff to gain relevant experience of the processes and tools involved, which can be used in migrations to other areas. + +- Create a template for further migrations specific to the source Teradata environment and the current tools and processes that are already in place. + +A good candidate for an initial migration from the Teradata environment that would enable the preceding items is typically one that implements a BI/Analytics workload, rather than an online transaction processing (OLTP) workload, with a data model that can be migrated with minimal modification, normally a star or snowflake schema. + +The migration data volume for the initial exercise should be large enough to demonstrate the capabilities and benefits of the Azure Synapse environment while quickly demonstrating the value—typically in the 1-10 TB range. + +To minimize the risk and reduce implementation time for the initial migration project, confine the scope of the migration to just the data marts, such as the OLAP DB part of a Teradata warehouse. However, this won't address the broader topics such as ETL migration and historical data migration. Address these topics in later phases of the project, once the migrated data mart layer is backfilled with the data and processes required to build them. + +#### Lift and shift as-is versus a phased approach incorporating changes + +> [!TIP] +> "Lift and shift" is a good starting point, even if subsequent phases will implement changes to the data model. + +Whatever the drive and scope of the intended migration, there are—broadly speaking—two types of migration: + +##### Lift and shift + +In this case, the existing data model—such as a star schema—is migrated unchanged to the new Azure Synapse platform. The emphasis is on minimizing risk and the migration time required by reducing the work needed to realize the benefits of moving to the Azure cloud environment. + +This is a good fit for existing Teradata environments where a single data mart is being migrated, or where the data is already in a well-designed star or snowflake schema—or there are other pressures to move to a more modern cloud environment. + +##### Phased approach incorporating modifications + +In cases where a legacy warehouse has evolved over a long time, you might need to re-engineer to maintain the required performance levels or to support new data, such as Internet of Things (IoT) streams. Migrate to Azure Synapse to get the benefits of a scalable cloud environment as part of the re-engineering process. Migration could include a change in the underlying data model, such as a move from an Inmon model to a data vault. + +Microsoft recommends moving the existing data model as-is to Azure (optionally using a VM Teradata instance in Azure) and using the performance and flexibility of the Azure environment to apply the re-engineering changes, leveraging Azure's capabilities to make the changes without impacting the existing source system. + +#### Use an Azure VM Teradata instance as part of a migration + +> [!TIP] +> Use Azure VMs to create a temporary Teradata instance to speed up migration and minimize impact on the source system. + +When migrating from an on-premises Teradata environment, you can leverage the Azure environment. Azure provides cheap cloud storage and elastic scalability to create a Teradata instance within a VM in Azure, collocating with the target Azure Synapse environment. + +With this approach, standard Teradata utilities such as Teradata Parallel Data Transporter can efficiently move the subset of Teradata tables being migrated onto the VM instance. Then, all migration tasks can take place within the Azure environment. This approach has several benefits: + +- After the initial replication of data, the source system isn't impacted by the migration tasks. + +- The familiar Teradata interfaces, tools, and utilities are available within the Azure environment. + +- Once in the Azure environment, there are no potential issues with network bandwidth availability between the on-premises source system and the cloud target system. + +- Tools like Azure Data Factory can efficiently call utilities like Teradata Parallel Transporter to migrate data quickly and easily. + +- The migration process is orchestrated and controlled entirely within the Azure environment, keeping everything in a single place. + +#### Use Azure Data Factory to implement a metadata-driven migration + +Automate and orchestrate the migration process by using the capabilities of the Azure environment. This approach minimizes the impact on the existing Teradata environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—to ingest data from disparate data stores. Data Factory can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage the migration process. + +### Design differences between Teradata and Azure Synapse + +#### Multiple databases versus a single database and schemas + +> [!TIP] +> Combine multiple databases into a single database in Azure Synapse and use schemas to logically separate the tables. + +In a Teradata environment, there are often multiple separate databases for individual parts of the overall environment. For example, there may be a separate database for data ingestion and staging tables, a database for the core warehouse tables, and another database for data marts, sometimes called a semantic layer. Processing these as ETL/ELT pipelines may implement cross-database joins and will move data between these separate databases. + +Querying within the Azure Synapse environment is limited to a single database. Schemas are used to separate the tables into logically separate groups. Therefore, we recommend using a series of schemas within the target Azure Synapse database to mimic any separate databases migrated from the Teradata environment. If the Teradata environment already uses schemas, you may need to use a new naming convention to move the existing Teradata tables and views to the new environment—for example, concatenate the existing Teradata schema and table names into the new Azure Synapse table name and use schema names in the new environment to maintain the original separate database names. Schema consolidation naming can have dots—however, Azure Synapse Spark may have issues. You can use SQL views over the underlying tables to maintain the logical structures, but there are some potential downsides to this approach: + +- Views in Azure Synapse are read-only, so any updates to the data must take place on the underlying base tables. + +- There may already be one or more layers of views in existence, and adding an extra layer of views might impact performance and supportability as nested views are difficult to troubleshoot. + +#### Table considerations + +> [!TIP] +> Use existing indexes to indicate candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and the metadata that describes it gets physically moved between the two environments. Other database elements from the source system—such as indexes—aren't migrated as these may not be needed or may be implemented differently within the new target environment. + +However, it's important to understand where performance optimizations such as indexes have been used in the source environment, as this can indicate where to add performance optimization in the new target environment. For example, if a non-unique secondary index (NUSI) has been created within the source Teradata environment, it may indicate that a non-clustered index should be created within the migrated Azure Synapse. Other native performance optimization techniques, such as table replication, may be more applicable than a straight "like-for-like" index creation. + +#### High availability for the database + +Teradata supports data replication across nodes via the `FALLBACK` option, where table rows that reside physically on a given node are replicated to another node within the system. This approach guarantees that data won't be lost if there's a node failure and provides the basis for failover scenarios. + +The goal of the high availability architecture in Azure SQL Database is to guarantee that your database is up and running 99.9% of the time, without worrying about the impact of maintenance operations and outages. Azure automatically handles critical servicing tasks such as patching, backups, and Windows and SQL upgrades, as well as unplanned events such as underlying hardware, software, or network failures. + +Data storage in Azure Synapse is automatically [backed up](../../sql-data-warehouse/backup-and-restore.md) with snapshots. These snapshots are a built-in feature of the service that creates restore points. You don't have to enable this capability. Users can't currently delete automatic restore points where the service uses these restore points to maintain SLAs for recovery. + +Azure Synapse Dedicated SQL pool takes snapshots of the data warehouse throughout the day creating restore points that are available for seven days. This retention period can't be changed. Azure Synapse supports an eight-hour recovery point objective (RPO). You can restore your data warehouse in the primary region from any one of the snapshots taken in the past seven days. If you require more granular backups, other user-defined options are available. + +#### Unsupported Teradata table types + +> [!TIP] +> Standard tables in Azure Synapse can support migrated Teradata time-series and temporal data. + +Teradata supports special table types for time-series and temporal data. The syntax and some of the functions for these table types aren't directly supported in Azure Synapse, but the data can be migrated into a standard table with appropriate data types and indexing or partitioning on the date/time column. + +Teradata implements the temporal query functionality via query rewriting to add additional filters within a temporal query to limit the applicable date range. If this functionality is currently used in the source Teradata environment and is to be migrated, add this additional filtering into the relevant temporal queries. + +The Azure environment also includes specific features for complex analytics on time-series data at a scale called [time series insights](https://azure.microsoft.com/services/time-series-insights/). This is aimed at IoT data analysis applications and may be more appropriate for this use case. + +#### SQL DML syntax differences + +There are a few differences in SQL Data Manipulation Language (DML) syntax between Teradata SQL and Azure Synapse (T-SQL) that you should be aware of during migration: + +- `QUALIFY`: Teradata supports the `QUALIFY` operator. For example: + + ```sql + SELECT col1 + FROM tab1 + WHERE col1='XYZ' + QUALIFY ROW_NUMBER () OVER (PARTITION by + col1 ORDER BY col1) = 1; + ``` + + The equivalent Azure Synapse syntax is: + + ```sql + SELECT * FROM ( + SELECT col1, ROW_NUMBER () OVER (PARTITION by col1 ORDER BY col1) rn + FROM tab1 WHERE col1='XYZ' + ) WHERE rn = 1; + ``` + +- Date arithmetic: Azure Synapse has operators such as `DATEADD` and `DATEDIFF` which can be used on `DATE` or `DATETIME` fields. Teradata supports direct subtraction on dates such as `SELECT DATE1 - DATE2 FROM...` + +- In `GROUP BY` ordinal, explicitly provide the T-SQL column name. + +- `LIKE ANY`: Teradata supports `LIKE ANY` syntax such as: + + ```sql + SELECT * FROM CUSTOMER + WHERE POSTCODE LIKE ANY + ('CV1%', 'CV2%', 'CV3%'); + ``` + + The equivalent in Azure Synapse syntax is: + + ```sql + SELECT * FROM CUSTOMER + WHERE + (POSTCODE LIKE 'CV1%') OR (POSTCODE LIKE 'CV2%') OR (POSTCODE LIKE 'CV3%'); + ``` + +- Depending on system settings, character comparisons in Teradata may be case insensitive by default. In Azure Synapse, character comparisons are always case sensitive. + +#### Functions, stored procedures, triggers, and sequences + +> [!TIP] +> Assess the number and type of non-data objects to be migrated as part of the preparation phase. + +When migrating from a mature legacy data warehouse environment such as Teradata, you must often migrate elements other than simple tables and views to the new target environment. Examples include functions, stored procedures, triggers, and sequences. + +As part of the preparation phase, create an inventory of these objects to be migrated, and define the method of handling them. Assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as functions or stored procedures in the Teradata environment. In this case, it's more efficient to use the built-in Azure facilities rather than recoding the Teradata functions. + +[Data integration partners](../../partner/data-integration.md) offer tools and services that can automate the migration. + +##### Functions + +As with most database products, Teradata supports system functions and user-defined functions within an SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated if so. + +For system functions where there's no equivalent, or for arbitrary user-defined functions, recode these using the language(s) available in the target environment. Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. + +##### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Teradata provides the SPL language for this purpose. + +A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +Azure Synapse Analytics also supports stored procedures using T-SQL. If you must migrate stored procedures, recode these procedures for their new environment. + +##### Triggers + +Azure Synapse doesn't support trigger creation, but trigger creation can be implemented with Azure Data Factory. + +##### Sequences + +With Azure Synapse, sequences are handled in a similar way to Teradata. Use [IDENTITY](/sql/t-sql/statements/create-table-transact-sql-identity-property?msclkid=8ab663accfd311ec87a587f5923eaa7b) columns or SQL code to create the next sequence number in a series. + +### Extract metadata and data from a Teradata environment + +#### Data Definition Language (DDL) generation + +> [!TIP] +> Use existing Teradata metadata to automate the generation of CREATE TABLE and CREATE VIEW DDL for Azure Synapse Analytics. + +You can edit existing Teradata `CREATE TABLE` and `CREATE VIEW` scripts to create the equivalent definitions with modified data types, if necessary, as described in the previous section. Typically, this involves removing extra Teradata-specific clauses such as `FALLBACK`. + +However, all the information that specifies the current definitions of tables and views within the existing Teradata environment is maintained within system catalog tables. These tables are the best source of this information, as it's guaranteed to be up to date and complete. User-maintained documentation may not be in sync with the current table definitions. + +Access the information in these tables via views into the catalog such as `DBC.ColumnsV`, and generate the equivalent `CREATE TABLE` DDL statements for the equivalent tables in Azure Synapse. + +Third-party migration and ETL tools also use the catalog information to achieve the same result. + +#### Data extraction from Teradata + +> [!TIP] +> Use Teradata Parallel Transporter for most efficient data extract. + +Migrate the raw data from existing Teradata tables using standard Teradata utilities, such as BTEQ and FASTEXPORT. During a migration exercise, extract the data as efficiently as possible. Use Teradata Parallel Transporter, which uses multiple parallel FASTEXPORT streams to achieve the best throughput. + +Call Teradata Parallel Transporter directly from Azure Data Factory. This is the recommended approach for managing the data migration process whether the Teradata instance in on-premises or copied to a VM in the Azure environment, as described in the previous section. + +Recommended data formats for the extracted data include delimited text files (also called Comma Separated Values or CSV), Optimized Row Columnar (ORC), or Parquet files. + +For more information about the process of migrating data and ETL from a Teradata environment, see [Data migration, ETL, and load for Teradata migrations](2-etl-load-migration-considerations.md). + +## Performance recommendations for Teradata migrations + +This article provides general information and guidelines about use of performance optimization techniques for Azure Synapse and adds specific recommendations for use when migrating from a Teradata environment. + +### Differences in performance tuning approach + +> [!TIP] +> Prioritize early familiarity with Azure Synapse tuning options in a migration exercise. + +This section highlights lower-level implementation differences between Teradata and Azure Synapse for performance tuning. + +#### Data distribution options + +Azure enables the specification of data distribution methods for individual tables. The aim is to reduce the amount of data that must be moved between processing nodes when executing a query. + +For large table-large table joins, hash distribute one or, ideally, both tables on one of the join columns—which has a wide range of values to help ensure an even distribution. Perform join processing locally, as the data rows to be joined will already be collocated on the same processing node. + +Another way to achieve local joins for small table-large table joins—typically dimension table to fact table in a star schema model—is to replicate the smaller dimension table across all nodes. This ensures that any value of the join key of the larger table will have a matching dimension row locally available. The overhead of replicating the dimension tables is relatively low, provided the tables aren't very large (see [Design guidance for replicated tables](../../sql-data-warehouse/design-guidance-for-replicated-tables.md))—in which case, the hash distribution approach as previously described is more appropriate. For more information, see [Distributed tables design](../../sql-data-warehouse/sql-data-warehouse-tables-distribute.md). + +#### Data indexing + +Azure Synapse provides several indexing options, but these are different from the indexing options implemented in Teradata. For more information about the different indexing options, see [table indexes](/azure/sql-data-warehouse/sql-data-warehouse-tables-index). + +Existing indexes within the source Teradata environment can however provide a useful indication of how the data is currently used. They can identify candidates for indexing within the Azure Synapse environment. + +#### Data partitioning + +In an enterprise data warehouse, fact tables can contain many billions of rows. Partitioning optimizes the maintenance and querying of these tables by splitting them into separate parts to reduce the amount of data processed. The `CREATE TABLE` statement defines the partitioning specification for a table. Partitioning should only be done on very large tables where each partition will contain at least 60 million rows. + +Only one field per table can be used for partitioning. That field is frequently a date field since many queries are filtered by date or a date range. It's possible to change the partitioning of a table after initial load by recreating the table with the new distribution using the `CREATE TABLE AS` (or CTAS) statement. See [table partitions](/azure/sql-data-warehouse/sql-data-warehouse-tables-partition) for a detailed discussion of partitioning in Azure Synapse. + +#### Data table statistics + +Ensure that statistics on data tables are up to date by building in a [statistics](../../sql/develop-tables-statistics.md) step to ETL/ELT jobs. + +#### PolyBase for data loading + +PolyBase is the most efficient method for loading large amounts of data into the warehouse since it can leverage parallel loading streams. For more information, see [PolyBase data loading strategy](../../sql/load-data-overview.md). + +#### Use workload management + +Use [workload management](../../sql-data-warehouse/sql-data-warehouse-workload-management.md?context=%2fazure%2fsynapse-analytics%2fcontext%2fcontext) instead of resource classes. ETL would be in its own workgroup and should be configured to have more resources per query (less concurrency by more resources). For more information, see [What is dedicated SQL pool in Azure Synapse Analytics](../../sql-data-warehouse/sql-data-warehouse-overview-what-is.md). + +## Next steps + +To learn more about ETL and load for Teradata migration, see the next article in this series: [Data migration, ETL, and load for Teradata migrations](2-etl-load-migration-considerations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/2-etl-load-migration-considerations.md b/articles/synapse-analytics/migration-guides/teradata/2-etl-load-migration-considerations.md new file mode 100644 index 0000000000000..9a7d986aa5f30 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/2-etl-load-migration-considerations.md @@ -0,0 +1,229 @@ +--- +title: "Data migration, ETL, and load for Teradata migrations" +description: Learn how to plan your data migration from Teradata to Azure Synapse Analytics to minimize the risk and impact on users. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Data migration, ETL, and load for Teradata migrations + +This article is part two of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for ETL and load migration. + +## Data migration considerations + +### Initial decisions for data migration from Teradata + +When migrating a Teradata data warehouse, you need to ask some basic data-related questions. For example: + +- Should unused table structures be migrated? + +- What's the best migration approach to minimize risk and user impact? + +- When migrating data marts: stay physical or go virtual? + +The next sections discuss these points within the context of migration from Teradata. + +#### Migrate unused tables? + +> [!TIP] +> In legacy systems, it's not unusual for tables to become redundant over time—these don't need to be migrated in most cases. + +It makes sense to only migrate tables that are in use in the existing system. Tables that aren't active can be archived rather than migrated, so that the data is available if necessary in future. It's best to use system metadata and log files rather than documentation to determine which tables are in use, because documentation can be out of date. + +If enabled, Teradata system catalog tables and logs contain information that can determine when a given table was last accessed—which can in turn be used to decide whether a table is a candidate for migration. + +Here's an example query on `DBC.Tables` that provides the date of last access and last modification: + +```sql +SELECT TableName, CreatorName, CreateTimeStamp, LastAlterName, +LastAlterTimeStamp, AccessCount, LastAccessTimeStamp +FROM DBC.Tables t +WHERE DataBaseName = 'databasename' +``` + +If logging is enabled and the log history is accessible, other information, such as SQL query text, is available in table DBQLogTbl and associated logging tables. For more information, see [Teradata log history](https://docs.teradata.com/reader/wada1XMYPkZVTqPKz2CNaw/PuQUxpyeCx4jvP8XCiEeGA). + +#### What is the best migration approach to minimize risk and impact on users? + +> [!TIP] +> Migrate the existing model as-is initially, even if a change to the data model is planned in the future. + +This question comes up often since companies often want to lower the impact of changes on the data warehouse data model to improve agility. Companies see an opportunity to do so during a migration to modernize their data model. This approach carries a higher risk because it could impact ETL jobs populating the data warehouse from a data warehouse to feed dependent data marts. Because of that risk, it's usually better to redesign on this scale after the data warehouse migration. + +Even if a data model change is an intended part of the overall migration, it's good practice to migrate the existing model as-is to the new environment (Azure Synapse Analytics in this case), rather than do any re-engineering on the new platform during migration. This approach has the advantage of minimizing the impact on existing production systems, while also leveraging the performance and elastic scalability of the Azure platform for one-off re-engineering tasks. + +When migrating from Teradata, consider creating a Teradata environment in a VM within Azure as a stepping stone in the migration process. + +#### Use a VM Teradata instance as part of a migration + +One optional approach for migrating from an on-premises Teradata environment is to leverage the Azure environment to create a Teradata instance in a VM within Azure, collocated with the target Azure Synapse environment. This is possible because Azure provides cheap cloud storage and elastic scalability. + +With this approach, standard Teradata utilities, such as Teradata Parallel Data Transporter—or third-party data replication tools, such as Attunity Replicate—can be used to efficiently move the subset of Teradata tables that need to be migrated to the VM instance. Then, all migration tasks can take place within the Azure environment. This approach has several benefits: + +- After the initial replication of data, migration tasks don't impact the source system. + +- The Azure environment has familiar Teradata interfaces, tools, and utilities. + +- The Azure environment provides network bandwidth availability between the on-premises source system and the cloud target system. + +- Tools like Azure Data Factory can efficiently call utilities like Teradata Parallel Transporter to migrate data quickly and easily. + +- The migration process is orchestrated and controlled entirely within the Azure environment. + +#### When migrating data marts: stay physical or go virtual? + +> [!TIP] +> Virtualizing data marts can save on storage and processing resources. + +In legacy Teradata data warehouse environments, it's common practice to create several data marts that are structured to provide good performance for ad hoc self-service queries and reports for a given department or business function within an organization. As such, a data mart typically consists of a subset of the data warehouse and contains aggregated versions of the data in a form that enables users to easily query that data with fast response times via user-friendly query tools such as Microsoft Power BI, Tableau, or MicroStrategy. This form is typically a dimensional data model. One use of data marts is to expose the data in a usable form, even if the underlying warehouse data model is something different, such as a data vault. + +You can use separate data marts for individual business units within an organization to implement robust data security regimes, by only allowing users to access specific data marts that are relevant to them, and eliminating, obfuscating, or anonymizing sensitive data. + +If these data marts are implemented as physical tables, they'll require additional storage resources to store them, and additional processing to build and refresh them regularly. Also, the data in the mart will only be as up to date as the last refresh operation, and so may be unsuitable for highly volatile data dashboards. + +> [!TIP] +> The performance and scalability of Azure Synapse enables virtualization without sacrificing performance. + +With the advent of relatively low-cost scalable MPP architectures, such as Azure Synapse, and the inherent performance characteristics of such architectures, it may be that you can provide data mart functionality without having to instantiate the mart as a set of physical tables. This is achieved by effectively virtualizing the data marts via SQL views onto the main data warehouse, or via a virtualization layer using features such as views in Azure or the [visualization products of Microsoft partners](../../partner/data-integration.md). This approach simplifies or eliminates the need for additional storage and aggregation processing and reduces the overall number of database objects to be migrated. + +There's another potential benefit to this approach. By implementing the aggregation and join logic within a virtualization layer, and presenting external reporting tools via a virtualized view, the processing required to create these views is "pushed down" into the data warehouse, which is generally the best place to run joins, aggregations, and other related operations on large data volumes. + +The primary drivers for choosing a virtual data mart implementation over a physical data mart are: + +- More agility: a virtual data mart is easier to change than physical tables and the associated ETL processes. + +- Lower total cost of ownership: a virtualized implementation requires fewer data stores and copies of data. + +- Elimination of ETL jobs to migrate and simplify data warehouse architecture in a virtualized environment. + +- Performance: although physical data marts have historically been more performant, virtualization products now implement intelligent caching techniques to mitigate. + +### Data migration from Teradata + +#### Understand your data + +Part of migration planning is understanding in detail the volume of data that needs to be migrated since that can impact decisions about the migration approach. Use system metadata to determine the physical space taken up by the "raw data" within the tables to be migrated. In this context, "raw data" means the amount of space used by the data rows within a table, excluding overheads such as indexes and compression. This is especially true for the largest fact tables since these will typically comprise more than 95% of the data. + +You can get an accurate number for the volume of data to be migrated for a given table by extracting a representative sample of the data—for example, one million rows—to an uncompressed delimited flat ASCII data file. Then, use the size of that file to get an average raw data size per row of that table. Finally, multiply that average size by the total number of rows in the full table to give a raw data size for the table. Use that raw data size in your planning. + +## ETL migration considerations + +### Initial decisions regarding Teradata ETL migration + +> [!TIP] +> Plan the approach to ETL migration ahead of time and leverage Azure facilities where appropriate. + +For ETL/ELT processing, legacy Teradata data warehouses may use custom-built scripts using Teradata utilities such as BTEQ and Teradata Parallel Transporter (TPT), or third-party ETL tools such as Informatica or Ab Initio. Sometimes, Teradata data warehouses use a combination of ETL and ELT approaches that's evolved over time. When planning a migration to Azure Synapse, you need to determine the best way to implement the required ETL/ELT processing in the new environment while minimizing the cost and risk involved. To learn more about ETL and ELT processing, see [ELT vs ETL design approach](../../sql-data-warehouse/design-elt-data-loading.md). + +The following sections discuss migration options and make recommendations for various use cases. This flowchart summarizes one approach: + +:::image type="content" source="../media/2-etl-load-migration-considerations/migration-options-flowchart.png" border="true" alt-text="Flowchart of migration options and recommendations."::: + +The first step is always to build an inventory of ETL/ELT processes that need to be migrated. As with other steps, it's possible that the standard "built-in" Azure features make it unnecessary to migrate some existing processes. For planning purposes, it's important to understand the scale of the migration to be performed. + +In the preceding flowchart, decision 1 relates to a high-level decision about whether to migrate to a totally Azure-native environment. If you're moving to a totally Azure-native environment, we recommend that you re-engineer the ETL processing using [Pipelines and activities in Azure Data Factory](../../../data-factory/concepts-pipelines-activities.md?msclkid=b6ea2be4cfda11ec929ac33e6e00db98&tabs=data-factory) or [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c). If you're not moving to a totally Azure-native environment, then decision 2 is whether an existing third-party ETL tool is already in use. + +In the Teradata environment, some or all ETL processing may be performed by custom scripts using Teradata-specific utilities like BTEQ and TPT. In this case, your approach should be to re-engineer using Data Factory. + +> [!TIP] +> Leverage investment in existing third-party tools to reduce cost and risk. + +If a third-party ETL tool is already in use, and especially if there's a large investment in skills or several existing workflows and schedules use that tool, then decision 3 is whether the tool can efficiently support Azure Synapse as a target environment. Ideally, the tool will include "native" connectors that can leverage Azure facilities like PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for the most efficient data loading. There's a way to call an external process, such as PolyBase or `COPY INTO`, and pass in the appropriate parameters. In this case, leverage existing skills and workflows, with Azure Synapse as the new target environment. + +If you decide to retain an existing third-party ETL tool, there may be benefits to running that tool within the Azure environment (rather than on an existing on-premises ETL server) and having Azure Data Factory handle the overall orchestration of the existing workflows. One particular benefit is that less data needs to be downloaded from Azure, processed, and then uploaded back into Azure. So, decision 4 is whether to leave the existing tool running as-is or move it into the Azure environment to achieve cost, performance, and scalability benefits. + +### Re-engineer existing Teradata-specific scripts + +If some or all the existing Teradata warehouse ETL/ELT processing is handled by custom scripts that utilize Teradata-specific utilities, such as BTEQ, MLOAD, or TPT, these scripts need to be recoded for the new Azure Synapse environment. Similarly, if ETL processes were implemented using stored procedures in Teradata, then these will also have to be recoded. + +> [!TIP] +> The inventory of ETL tasks to be migrated should include scripts and stored procedures. + +Some elements of the ETL process are easy to migrate, for example by simple bulk data load into a staging table from an external file. It may even be possible to automate those parts of the process, for example, by using PolyBase instead of fast load or MLOAD. If the exported files are Parquet, you can use a native Parquet reader, which is a faster option than PolyBase. Other parts of the process that contain arbitrary complex SQL and/or stored procedures will take more time to re-engineer. + +One way of testing Teradata SQL for compatibility with Azure Synapse is to capture some representative SQL statements from Teradata logs, then prefix those queries with `EXPLAIN`, and then—assuming a like-for-like migrated data model in Azure Synapse—run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will generate an error, and the error information can determine the scale of the recoding task. + +[Microsoft partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration) offer tools and services to migrate Teradata SQL and stored procedures to Azure Synapse. + +### Use third-party ETL tools + +As described in the previous section, in many cases the existing legacy data warehouse system will already be populated and maintained by third-party ETL products. For a list of Microsoft data integration partners for Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +## Data loading from Teradata + +### Choices available when loading data from Teradata + +> [!TIP] +> Third-party tools can simplify and automate the migration process and therefore reduce risk. + +When it comes to migrating data from a Teradata data warehouse, there are some basic questions associated with data loading that need to be resolved. You'll need to decide how the data will be physically moved from the existing on-premises Teradata environment into Azure Synapse in the cloud, and which tools will be used to perform the transfer and load. Consider the following questions, which are discussed in the next sections. + +- Will you extract the data to files, or move it directly via a network connection? + +- Will you orchestrate the process from the source system, or from the Azure target environment? + +- Which tools will you use to automate and manage the process? + +#### Transfer data via files or network connection? + +> [!TIP] +> Understand the data volumes to be migrated and the available network bandwidth since these factors influence the migration approach decision. + +Once the database tables to be migrated have been created in Azure Synapse, you can move the data to populate those tables out of the legacy Teradata system and into the new environment. There are two basic approaches: + +- **File extract**: extract the data from the Teradata tables to flat files, normally in CSV format, via BTEQ, Fast Export, or Teradata Parallel Transporter (TPT). Use TPT whenever possible since it's the most efficient in terms of data throughput. + + This approach requires space to land the extracted data files. The space could be local to the Teradata source database (if sufficient storage is available), or remote in Azure Blob Storage. The best performance is achieved when a file is written locally, since that avoids network overhead. + + To minimize the storage and network transfer requirements, it's good practice to compress the extracted data files using a utility like gzip. + + Once extracted, the flat files can either be moved into Azure Blob Storage (collocated with the target Azure Synapse instance) or loaded directly into Azure Synapse using PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql). The method for physically moving data from local on-premises storage to the Azure cloud environment depends on the amount of data and the available network bandwidth. + + Microsoft provides different options to move large volumes of data, including AZCopy for moving files across the network into Azure Storage, Azure ExpressRoute for moving bulk data over a private network connection, and Azure Data Box where the files are moved to a physical storage device that's then shipped to an Azure data center for loading. For more information, see [data transfer](/azure/architecture/data-guide/scenarios/data-transfer). + +- **Direct extract and load across network**: the target Azure environment sends a data extract request, normally via a SQL command, to the legacy Teradata system to extract the data. The results are sent across the network and loaded directly into Azure Synapse, with no need to land the data into intermediate files. The limiting factor in this scenario is normally the bandwidth of the network connection between the Teradata database and the Azure environment. For very large data volumes this approach may not be practical. + +There's also a hybrid approach that uses both methods. For example, you can use the direct network extract approach for smaller dimension tables and samples of the larger fact tables to quickly provide a test environment in Azure Synapse. For the large volume historical fact tables, you can use the file extract and transfer approach using Azure Data Box. + +#### Orchestrate from Teradata or Azure? + +The recommended approach when moving to Azure Synapse is to orchestrate the data extract and loading from the Azure environment using [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), as well as associated utilities, such as PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for most efficient data loading. This approach leverages the Azure capabilities and provides an easy method to build reusable data loading pipelines. + +Other benefits of this approach include reduced impact on the Teradata system during the data load process since the management and loading process is running in Azure, and the ability to automate the process by using metadata-driven data load pipelines. + +#### Which tools can be used? + +The task of data transformation and movement is the basic function of all ETL products. If one of these products is already in use in the existing Teradata environment, then using the existing ETL tool may simplify data migration from Teradata to Azure Synapse. This approach assumes that the ETL tool supports Azure Synapse as a target environment. For more information on tools that support Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +If you're using an ETL tool, consider running that tool within the Azure environment to benefit from Azure cloud performance, scalability, and cost, and free up resources in the Teradata data center. Another benefit is reduced data movement between the cloud and on-premises environments. + +## Summary + +To summarize, our recommendations for migrating data and associated ETL processes from Teradata to Azure Synapse are: + +- Plan ahead to ensure a successful migration exercise. + +- Build a detailed inventory of data and processes to be migrated as soon as possible. + +- Use system metadata and log files to get an accurate understanding of data and process usage. Don't rely on documentation since it may be out of date. + +- Understand the data volumes to be migrated, and the network bandwidth between the on-premises data center and Azure cloud environments. + +- Consider using a Teradata instance in an Azure VM as a stepping stone to offload migration from the legacy Teradata environment. + +- Leverage standard "built-in" Azure features to minimize the migration workload. + +- Identify and understand the most efficient tools for data extraction and loading in both Teradata and Azure environments. Use the appropriate tools in each phase of the process. + +- Use Azure facilities, such as [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), to orchestrate and automate the migration process while minimizing impact on the Teradata system. + +## Next steps + +To learn more about security access operations, see the next article in this series: [Security, access, and operations for Teradata migrations](3-security-access-operations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/3-security-access-operations.md b/articles/synapse-analytics/migration-guides/teradata/3-security-access-operations.md new file mode 100644 index 0000000000000..a130937e0feaf --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/3-security-access-operations.md @@ -0,0 +1,378 @@ +--- +title: "Security, access, and operations for Teradata migrations" +description: Learn about authentication, users, roles, permissions, monitoring, and auditing, and workload management in Azure Synapse Analytics and Teradata. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Security, access, and operations for Teradata migrations + +This article is part three of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for security access operations. + +## Security considerations + +This article discusses connection methods for existing legacy Teradata environments and how they can be migrated to Azure Synapse Analytics with minimal risk and user impact. + +This article assumes that there's a requirement to migrate the existing methods of connection and user/role/permission structure as-is. If not, use the Azure portal to create and manage a new security regime. + +For more information on the [Azure Synapse security](../../sql-data-warehouse/sql-data-warehouse-overview-manage-security.md#authorization) options, see [Security whitepaper](../../guidance/security-white-paper-introduction.md). + +### Connection and authentication + +#### Teradata authorization options + +> [!TIP] +> Authentication in both Teradata and Azure Synapse can be "in database" or through external methods. + +Teradata supports several mechanisms for connection and authorization. Valid mechanism values are: + +- **TD1**, which selects Teradata 1 as the authentication mechanism. Username and password are required. + +- **TD2**, which selects Teradata 2 as the authentication mechanism. Username and password are required. + +- **TDNEGO**, which selects one of the authentication mechanisms automatically based on the policy, without user involvement. + +- **LDAP**, which selects Lightweight Directory Access Protocol (LDAP) as the authentication mechanism. The application provides the username and password. + +- **KRB5**, which selects Kerberos (KRB5) on Windows clients working with Windows servers. To log on using KRB5, the user needs to supply a domain, username, and password. The domain is specified by setting the username to `MyUserName@MyDomain`. + +- **NTLM**, which selects NTLM on Windows clients working with Windows servers. The application provides the username and password. + +Kerberos (KRB5), Kerberos Compatibility (KRB5C), NT LAN Manager (NTLM), and NT LAN Manager Compatibility (NTLMC) are for Windows only. + +#### Azure Synapse authorization options + +Azure Synapse supports two basic options for connection and authorization: + +- **SQL authentication**: SQL authentication is via a database connection that includes a database identifier, user ID, and password plus other optional parameters. This is functionally equivalent to Teradata TD1, TD2 and default connections. + +- **Azure Active Directory (Azure AD) authentication**: with Azure AD authentication, you can centrally manage the identities of database users and other Microsoft services in one central location. Central ID management provides a single place to manage SQL Data Warehouse users and simplifies permission management. Azure AD can also support connections to LDAP and Kerberos services—for example, Azure AD can be used to connect to existing LDAP directories if these are to remain in place after migration of the database. + +### Users, roles, and permissions + +#### Overview + +> [!TIP] +> High-level planning is essential for a successful migration project. + +Both Teradata and Azure Synapse implement database access control via a combination of users, roles, and permissions. Both use standard SQL `CREATE USER` and `CREATE ROLE` statements to define users and roles, and `GRANT` and `REVOKE` statements to assign or remove permissions to those users and/or roles. + +> [!TIP] +> Automation of migration processes is recommended to reduce elapsed time and scope for errors. + +Conceptually the two databases are similar, and it might be possible to automate the migration of existing user IDs, roles, and permissions to some degree. Migrate such data by extracting the existing legacy user and role information from the Teradata system catalog tables and generating matching equivalent `CREATE USER` and `CREATE ROLE` statements to be run in Azure Synapse to recreate the same user/role hierarchy. + +After data extraction, use Teradata system catalog tables to generate equivalent `GRANT` statements to assign permissions (where an equivalent one exists). The following diagram shows how to use existing metadata to generate the necessary SQL. + +:::image type="content" source="../media/3-security-access-operations/automating-migration-privileges.png" border="true" alt-text="Chart showing how to automate the migration of privileges from an existing system."::: + +#### Users and roles + +> [!TIP] +> Migration of a data warehouse requires more than just tables, views, and SQL statements. + +The information about current users and roles in a Teradata system is found in the system catalog tables `DBC.USERS` (or `DBC.DATABASES`) and `DBC.ROLEMEMBERS`. Query these tables (if the user has `SELECT` access to those tables) to obtain current lists of users and roles defined within the system. The following are examples of queries to do this for individual users: + +```sql +/***SQL to find all users***/ +SELECT +DatabaseName AS UserName +FROM DBC.Databases +WHERE dbkind = 'u'; + +/***SQL to find all roles***/ +SELECT A.ROLENAME, A.GRANTEE, A.GRANTOR, + A.DefaultRole, + A.WithAdmin, + B.DATABASENAME, + B.TABLENAME, + B.COLUMNNAME, + B.GRANTORNAME, + B.AccessRight +FROM DBC.ROLEMEMBERS A +JOIN DBC.ALLROLERIGHTS B +ON A.ROLENAME = B.ROLENAME +GROUP BY 1,2,3,4,5,6,7 +ORDER BY 2,1,6; +``` + +These examples modify `SELECT` statements to produce a result set, which is a series of `CREATE USER` and `CREATE ROLE` statements, by including the appropriate text as a literal within the `SELECT` statement. + +There's no way to retrieve existing passwords, so you need to implement a scheme for allocating new initial passwords on Azure Synapse. + +#### Permissions + +> [!TIP] +> There are equivalent Azure Synapse permissions for basic database operations such as DML and DDL. + +In a Teradata system, the system tables `DBC.ALLRIGHTS` and `DBC.ALLROLERIGHTS` hold the access rights for users and roles. Query these tables (if the user has `SELECT` access to those tables) to obtain current lists of access rights defined within the system. The following are examples of queries for individual users: + +```sql +/**SQL for AccessRights held by a USER***/ +SELECT UserName, DatabaseName,TableName,ColumnName, +CASE WHEN Abbv.AccessRight IS NOT NULL THEN Abbv.Description ELSE +ALRTS.AccessRight +END AS AccessRight, GrantAuthority, GrantorName, AllnessFlag, CreatorName, CreateTimeStamp +FROM DBC.ALLRIGHTS ALRTS LEFT OUTER JOIN AccessRightsAbbv Abbv +ON ALRTS.AccessRight = Abbv.AccessRight +WHERE UserName='UserXYZ' +Order By 2,3,4,5; + +/**SQL for AccessRights held by a ROLE***/ +SELECT RoleName, DatabaseName,TableName,ColumnName, +CASE WHEN Abbv.AccessRight IS NOT NULL THEN Abbv.Description ELSE +ALRTS.AccessRight +END AS AccessRight, GrantorName, CreateTimeStamp +FROM DBC.ALLROLERIGHTS ALRTS LEFT OUTER JOIN AccessRightsAbbv +Abbv +ON ALRTS.AccessRight = Abbv.AccessRight +WHERE RoleName='BI_DEVELOPER' +Order By 2,3,4,5; +``` + +Modify these example `SELECT` statements to produce a result set that's a series of `GRANT` statements by including the appropriate text as a literal within the `SELECT` statement. + +Use the table `AccessRightsAbbv` to look up the full text of the access right, as the join key is an abbreviated 'type' field. See the following table for a list of Teradata access rights and their equivalent in Azure Synapse. + +| Teradata permission name | Teradata type | Azure Synapse equivalent | +|------------------------------|---------------|-----------------| +| **ABORT SESSION** | AS | KILL DATABASE CONNECTION | +| **ALTER EXTERNAL PROCEDURE** | AE | 4 | +| **ALTER FUNCTION** | AF | ALTER FUNCTION | +| **ALTER PROCEDURE** | AP | ALTER PROCEDURE | +| **CHECKPOINT** | CP | CHECKPOINT | +| **CREATE AUTHORIZATION** | CA | CREATE LOGIN | +| **CREATE DATABASE** | CD | CREATE DATABASE | +| **CREATE EXTERNAL** **PROCEDURE** | CE | 4 | +| **CREATE FUNCTION** | CF | CREATE FUNCTION | +| **CREATE GLOP** | GC | 3 | +| **CREATE MACRO** | CM | CREATE PROCEDURE 2 | +| **CREATE OWNER PROCEDURE** | OP | CREATE PROCEDURE | +| **CREATE PROCEDURE** | PC | CREATE PROCEDURE | +| **CREATE PROFILE** | CO | CREATE LOGIN 1 | +| **CREATE ROLE** | CR | CREATE ROLE | +| **DROP DATABASE** | DD | DROP DATABASE| +| **DROP FUNCTION** | DF | DROP FUNCTION | +| **DROP GLOP** | GD | 3 | +| **DROP MACRO** | DM | DROP PROCEDURE 2 | +| **DROP PROCEDURE** | PD | DELETE PROCEDURE | +| **DROP PROFILE** | DO | DROP LOGIN 1 | +| **DROP ROLE** | DR | DELETE ROLE | +| **DROP TABLE** | DT | DROP TABLE | +| **DROP TRIGGER** | DG | 3 | +| **DROP USER** | DU | DROP USER | +| **DROP VIEW** | DV | DROP VIEW | +| **DUMP** | DP | 4 | +| **EXECUTE** | E | EXECUTE | +| **EXECUTE FUNCTION** | EF | EXECUTE | +| **EXECUTE PROCEDURE** | PE | EXECUTE | +| **GLOP MEMBER** | GM | 3 | +| **INDEX** | IX | CREATE INDEX | +| **INSERT** | I | INSERT | +| **MONRESOURCE** | MR | 5 | +| **MONSESSION** | MS | 5 | +| **OVERRIDE DUMP CONSTRAINT** | OA | 4 | +| **OVERRIDE RESTORE CONSTRAINT** | OR | 4 | +| **REFERENCES** | RF | REFERENCES | +| **REPLCONTROL** | RO | 5 | +| **RESTORE** | RS | 4 | +| **SELECT** | R | SELECT | +| **SETRESRATE** | SR | 5 | +| **SETSESSRATE** | SS | 5 | +| **SHOW** | SH | 3 | +| **UPDATE** | U | UPDATE | + +`AccessRightsAbbv` table notes: + +1. Teradata `PROFILE` is functionally equivalent to `LOGIN` in Azure Synapse. + +1. The following table summarizes the differences between macros and stored procedures in Teradata. In Azure Synapse, procedures provide the functionality described in the table. + + | Macro | Stored procedure | + |-|-| + | Contains SQL | Contains SQL | + | May contain BTEQ dot commands | Contains comprehensive SPL | + | May receive parameter values passed to it | May receive parameter values passed to it | + | May retrieve one or more rows | Must use a cursor to retrieve more than one row | + | Stored in DBC PERM space | Stored in DATABASE or USER PERM | + | Returns rows to the client | May return one or more values to client as parameters | + +1. `SHOW`, `GLOP`, and `TRIGGER` have no direct equivalent in Azure Synapse. + +1. These features are managed automatically by the system in Azure Synapse. See [Operational considerations](#operational-considerations). + +1. In Azure Synapse, these features are handled outside of the database. + +For more information about access rights in Azure Synapse, see to [Azure Synapse Analytics security permissions](../../guidance/security-white-paper-introduction.md). + +## Operational considerations + +> [!TIP] +> Operational tasks are necessary to keep any data warehouse operating efficiently. + +This section discusses how to implement typical Teradata operational tasks in Azure Synapse with minimal risk and impact to users. + +As with all data warehouse products, once in production there are ongoing management tasks that are necessary to keep the system running efficiently and to provide data for monitoring and auditing. Resource utilization and capacity planning for future growth also falls into this category, as does backup/restore of data. + +While conceptually the management and operations tasks for different data warehouses are similar, the individual implementations may differ. In general, modern cloud-based products such as Azure Synapse tend to incorporate a more automated and "system managed" approach (as opposed to a more "manual" approach in legacy data warehouses such as Teradata). + +The following sections compare Teradata and Azure Synapse options for various operational tasks. + +### Housekeeping tasks + +> [!TIP] +> Housekeeping tasks keep a production warehouse operating efficiently and optimize use of resources such as storage. + +In most legacy data warehouse environments, there's a requirement to perform regular "housekeeping" tasks such as reclaiming disk storage space that can be freed up by removing old versions of updated or deleted rows, or reorganizing data log files or index blocks for efficiency. Collecting statistics is also a potentially time-consuming task. Collecting statistics is required after a bulk data ingest to provide the query optimizer with up-to-date data to base generation of query execution plans. + +Teradata recommends collecting statistics as follows: + +- Collect statistics on unpopulated tables to set up the interval histogram used in internal processing. This initial collection makes subsequent statistics collections faster. Make sure to recollect statistics after data is added. + +- Collect prototype phase statistics for newly populated tables. + +- Collect production phase statistics after a significant percentage of change to the table or partition (~10% of rows). For high volumes of nonunique values, such as dates or timestamps, it may be advantageous to recollect at 7%. + +- Collect production phase statistics after you've created users and applied real world query loads to the database (up to about three months of querying). + +- Collect statistics in the first few weeks after an upgrade or migration during periods of low CPU utilization. + +Statistics collection can be managed manually using Automated Statistics Management open APIs or automatically using the Teradata Viewpoint Stats Manager portlet. + +> [!TIP] +> Automate and monitor housekeeping tasks in Azure. + +Teradata Database contains many log tables in the Data Dictionary that accumulate data, either automatically or after certain features are enabled. Because log data grows over time, purge older information to avoid using up permanent space. There are options to automate the maintenance of these logs available. The Teradata dictionary tables that require maintenance are discussed next. + +#### Dictionary tables to maintain + +Reset accumulators and peak values using the `DBC.AMPUsage` view and the `ClearPeakDisk` macro provided with the software: + +- `DBC.Acctg`: resource usage by account/user + +- `DBC.DataBaseSpace`: database and table space accounting + +Teradata automatically maintains these tables, but good practices can reduce their size: + +- `DBC.AccessRights`: user rights on objects + +- `DBC.RoleGrants`: role rights on objects + +- `DBC.Roles`: defined roles + +- `DBC.Accounts`: account codes by user + +Archive these logging tables (if desired) and purge information 60-90 days old. Retention depends on customer requirements: + +- `DBC.SW_Event_Log`: database console log + +- `DBC.ResUsage`: resource monitoring tables + +- `DBC.EventLog`: session logon/logoff history + +- `DBC.AccLogTbl`: logged user/object events + +- `DBC.DBQL tables`: logged user/SQL activity + +- `.NETSecPolicyLogTbl`: logs dynamic security policy audit trails + +- `.NETSecPolicyLogRuleTbl`: controls when and how dynamic security policy is logged + +Purge these tables when the associated removable media is expired and overwritten: + +- `DBC.RCEvent`: archive/recovery events + +- `DBC.RCConfiguration`: archive/recovery config + +- `DBC.RCMedia`: VolSerial for archive/recovery + +Azure Synapse has an option to automatically create statistics so that they can be used as needed. Perform defragmentation of indexes and data blocks manually, on a scheduled basis, or automatically. Leveraging native built-in Azure capabilities can reduce the effort required in a migration exercise. + +### Monitoring and auditing + +> [!TIP] +> Over time, several different tools have been implemented to allow monitoring and logging of Teradata systems. + +Teradata provides several tools to monitor the operation including Teradata Viewpoint and Ecosystem Manager. For logging query history, the Database Query Log (DBQL) is a Teradata database feature that provides a series of predefined tables that can store historical records of queries and their duration, performance, and target activity based on user-defined rules. + +Database administrators can use Teradata Viewpoint to determine system status, trends, and individual query status. By observing trends in system usage, system administrators are better able to plan project implementations, batch jobs, and maintenance to avoid peak periods of use. Business users can use Teradata Viewpoint to quickly access the status of reports and queries and drill down into details. + +> [!TIP] +> The Azure portal provides a UI to manage monitoring and auditing tasks for all Azure data and processes. + +Similarly, Azure Synapse provides a rich monitoring experience within the Azure portal to provide insights into your data warehouse workload. The Azure portal is the recommended tool when monitoring your data warehouse as it provides configurable retention periods, alerts, recommendations, and customizable charts and dashboards for metrics and logs. + +The portal also enables integration with other Azure monitoring services such as Operations Management Suite (OMS) and [Azure Monitor](../../monitoring/how-to-monitor-using-azure-monitor.md?msclkid=d5e9e46ecfe111ec8ba8ee5360e77c4c) (logs) to provide a holistic monitoring experience for not only the data warehouse but also the entire Azure analytics platform for an integrated monitoring experience. + +> [!TIP] +> Low-level and system-wide metrics are automatically logged in Azure Synapse. + +Resource utilization statistics for Azure Synapse are automatically logged within the system. The metrics for each query include usage statistics for CPU, memory, cache, I/O, and temporary workspace, as well as connectivity information like failed connection attempts. + +Azure Synapse provides a set of [Dynamic Management Views](../../sql-data-warehouse/sql-data-warehouse-manage-monitor.md?msclkid=3e6eefbccfe211ec82d019ada29b1834) (DMVs). These views are useful when actively troubleshooting and identifying performance bottlenecks with your workload. + +For more information, see [Azure Synapse operations and management options](/azure/sql-data-warehouse/sql-data-warehouse-how-to-manage-and-monitor-workload-importance). + +### High Availability (HA) and Disaster Recovery (DR) + +Teradata implements features such as `FALLBACK`, Archive Restore Copy utility (ARC), and Data Stream Architecture (DSA) to provide protection against data loss and high availability (HA) via replication and archive of data. Disaster Recovery (DR) options include Dual Active Solution, DR as a service, or a replacement system depending on the recovery time requirement. + +> [!TIP] +> Azure Synapse creates snapshots automatically to ensure fast recovery times. + +Azure Synapse uses database snapshots to provide high availability of the warehouse. A data warehouse snapshot creates a restore point that can be used to recover or copy a data warehouse to a previous state. Since Azure Synapse is a distributed system, a data warehouse snapshot consists of many files that are in Azure Storage. Snapshots capture incremental changes from the data stored in your data warehouse. + +Azure Synapse automatically takes snapshots throughout the day creating restore points that are available for seven days. This retention period can't be changed. Azure Synapse supports an eight-hour recovery point objective (RPO). A data warehouse can be restored in the primary region from any one of the snapshots taken in the past seven days. + +> [!TIP] +> Use user-defined snapshots to define a recovery point before key updates. + +User-defined restore points are also supported, allowing manual triggering of snapshots to create restore points of a data warehouse before and after large modifications. This capability ensures that restore points are logically consistent, which provides additional data protection in case of any workload interruptions or user errors for a desired RPO of less than 8 hours. + +> [!TIP] +> Microsoft Azure provides automatic backups to a separate geographical location to enable DR. + +As well as the snapshots described previously, Azure Synapse also performs as standard a geo-backup once per day to a [paired data center](/azure/best-practices-availability-paired-regions). The RPO for a geo-restore is 24 hours. You can restore the geo-backup to a server in any other region where Azure Synapse is supported. A geo-backup ensures that a data warehouse can be restored in case the restore points in the primary region aren't available. + +### Workload management + +> [!TIP] +> In a production data warehouse, there are typically mixed workloads with different resource usage characteristics running concurrently. + +A workload is a class of database requests with common traits whose access to the database can be managed with a set of rules. Workloads are useful for: + +- Setting different access priorities for different types of requests. + +- Monitoring resource usage patterns, performance tuning, and capacity planning. + +- Limiting the number of requests or sessions that can run at the same time. + +In a Teradata system, workload management is the act of managing workload performance by monitoring system activity and acting when pre-defined limits are reached. Workload management uses rules, and each rule applies only to some database requests. However, the collection of all rules applies to all active work on the platform. Teradata Active System Management (TASM) performs full workload management in a Teradata Database. + +In Azure Synapse, resource classes are pre-determined resource limits that govern compute resources and concurrency for query execution. Resource classes can help you manage your workload by setting limits on the number of queries that run concurrently and on the compute resources assigned to each query. There's a trade-off between memory and concurrency. + +See [Resource classes for workload management](/azure/sql-data-warehouse/resource-classes-for-workload-management) for detailed information. + +This information can also be used for capacity planning, determining the resources required for additional users or application workload. This also applies to planning scale up/scale downs of compute resources for cost-effective support of "peaky" workloads. + +### Scale compute resources + +> [!TIP] +> A major benefit of Azure is the ability to independently scale up and down compute resources on demand to handle peaky workloads cost-effectively. + +The architecture of Azure Synapse separates storage and compute, allowing each to scale independently. As a result, [compute resources can be scaled](../../sql-data-warehouse/quickstart-scale-compute-portal.md) to meet performance demands independent of data storage. You can also pause and resume compute resources. A natural benefit of this architecture is that billing for compute and storage is separate. If a data warehouse isn't in use, you can save on compute costs by pausing compute. + +Compute resources can be scaled up or scaled back by adjusting the data warehouse units setting for the data warehouse. Loading and query performance will increase linearly as you add more data warehouse units. + +Adding more compute nodes adds more compute power and ability to leverage more parallel processing. As the number of compute nodes increases, the number of distributions per compute node decreases, providing more compute power and parallel processing for queries. Similarly, decreasing data warehouse units reduces the number of compute nodes, which reduces the compute resources for queries. + +## Next steps + +To learn more about visualization and reporting, see the next article in this series: [Visualization and reporting for Teradata migrations](4-visualization-reporting.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/4-visualization-reporting.md b/articles/synapse-analytics/migration-guides/teradata/4-visualization-reporting.md new file mode 100644 index 0000000000000..acf4eb40a9d65 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/4-visualization-reporting.md @@ -0,0 +1,318 @@ +--- +title: "Visualization and reporting for Teradata migrations" +description: Learn about Microsoft and third-party BI tools for reports and visualizations in Azure Synapse Analytics compared to Teradata. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Visualization and reporting for Teradata migrations + +This article is part four of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for visualization and reporting. + +## Access Azure Synapse Analytics using Microsoft and third-party BI tools + +Almost every organization accesses data warehouses and data marts using a range of BI tools and applications, such as: + +- Microsoft BI tools, like Power BI. + +- Office applications, like Microsoft Excel spreadsheets. + +- Third-party BI tools from various vendors. + +- Custom analytic applications that have embedded BI tool functionality inside the application. + +- Operational applications that request BI on demand, invoke queries and reports as-a-service on a BI platform, which in turn queries data in the data warehouse or data marts that are being migrated. + +- Interactive data science development tools, such as Azure Synapse Spark Notebooks, Azure Machine Learning, RStudio, and Jupyter Notebooks. + +The migration of visualization and reporting as part of a data warehouse migration program means that all the existing queries, reports, and dashboards generated and issued by these tools and applications need to run on Azure Synapse and yield the same results as they did in the original data warehouse prior to migration. + +> [!TIP] +> Existing users, user groups, roles and assignments of access security privileges need to be migrated first for migration of reports and visualizations to succeed. + +To make that happen, everything that BI tools and applications depend on still needs to work once you migrate your data warehouse schema and data to Azure Synapse. That includes the obvious and the not so obvious—such as access and security. Access and security are important considerations for data access in the migrated system, and are specifically discussed in [another guide](3-security-access-operations.md) in this series. When you address access and security, ensure that: + +- Authentication is migrated to let users sign in to the data warehouse and data mart databases on Azure Synapse. + +- All users are migrated to Azure Synapse. + +- All user groups are migrated to Azure Synapse. + +- All roles are migrated to Azure Synapse. + +- All authorization privileges governing access control are migrated to Azure Synapse. + +- User, role, and privilege assignments are migrated to mirror what you had on your existing data warehouse before migration. For example: + - Database object privileges assigned to roles + - Roles assigned to user groups + - Users assigned to user groups and/or roles + +> [!TIP] +> Communication and business user involvement is critical to success. + +In addition, all the required data needs to be migrated to ensure the same results appear in the same reports and dashboards that now query data on Azure Synapse. User expectation will undoubtedly be that migration is seamless and there will be no surprises that destroy their confidence in the migrated system on Azure Synapse. So, this is an area where you must take extreme care and communicate as much as possible to allay any fears in your user base. Their expectations are that: + +- Table structure will be the same if directly referred to in queries. + +- Table and column names remain the same if directly referred to in queries; for instance, so that calculated fields defined on columns in BI tools don't fail when aggregate reports are produced. + +- Historical analysis remains the same. + +- Data types should, if possible, remain the same. + +- Query behavior remains the same. + +- ODBC/JDBC drivers are tested to make sure nothing has changed in terms of query behavior. + +> [!TIP] +> Views and SQL queries using proprietary SQL query extensions are likely to result in incompatibilities that impact BI reports and dashboards. + +If BI tools are querying views in the underlying data warehouse or data mart database, then will these views still work? You might think yes, but if there are proprietary SQL extensions specific to your legacy data warehouse DBMS in these views that have no equivalent in Azure Synapse, you'll need to know about them and find a way to resolve them. + +Other issues like the behavior of nulls or data type variations across DBMS platforms need to be tested, in case they cause slightly different calculation results. Obviously, you want to minimize these issues and take all necessary steps to shield business users from any kind of impact. Depending on your legacy data warehouse system (such as Teradata), there are [tools](../../partner/data-integration.md) that can help hide these differences so that BI tools and applications are kept unaware of them and can run unchanged. + +> [!TIP] +> Use repeatable tests to ensure reports, dashboards, and other visualizations migrate successfully. + +Testing is critical to visualization and report migration. You need a test suite and agreed-on test data to run and rerun tests in both environments. A test harness is also useful, and a few are mentioned later in this guide. In addition, it's also important to have significant business involvement in this area of migration to keep confidence high and to keep them engaged and part of the project. + +Finally, you may also be thinking about switching BI tools. For example, you might want to [migrate to Power BI](/power-bi/guidance/powerbi-migration-overview). The temptation is to do all of this at the same time, while migrating your schema, data, ETL processing, and more. However, to minimize risk, it's better to migrate to Azure Synapse first and get everything working before undertaking further modernization. + +If your existing BI tools run on premises, ensure that they're able to connect to Azure Synapse through your firewall to run comparisons against both environments. Alternatively, if the vendor of your existing BI tools offers their product on Azure, you can try it there. The same applies for applications running on premises that embed BI or that call your BI server on-demand, requesting a "headless report" with data returned in XML or JSON, for example. + +There's a lot to think about here, so let's look at all this in more detail. + +> [!TIP] +> A lift and shift data warehouse migration is likely to minimize any disruption to reports, dashboards, and other visualizations. + +## Minimize the impact of data warehouse migration on BI tools and reports by using data virtualization + +> [!TIP] +> Data virtualization allows you to shield business users from structural changes during migration so that they remain unaware of changes. + +The temptation during data warehouse migration to the cloud is to take the opportunity to make changes during the migration to fulfill long-term requirements, such as opening business requests, missing data, new features, and more. However, these changes can affect the BI tools accessing your data warehouse, especially if it involves structural changes in your data model. If you want to adopt an agile data modeling technique or implement structural changes, do so *after* migration. + +One way in which you can minimize the impact of things like schema changes on BI tools is to introduce data virtualization between BI tools and your data warehouse and data marts. The following diagram shows how data virtualization can hide the migration from users. + +:::image type="content" source="../media/4-visualization-reporting/migration-data-virtualization.png" border="true" alt-text="Diagram showing how to hide the migration from users through data virtualization."::: + +This breaks the dependency between business users utilizing self-service BI tools and the physical schema of the underlying data warehouse and data marts that are being migrated. + +> [!TIP] +> Schema alterations to tune your data model for Azure Synapse can be hidden from users. + +By introducing data virtualization, any schema alterations made during data warehouse and data mart migration to Azure Synapse (to optimize performance, for example) can be hidden from business users because they only access virtual tables in the data virtualization layer. If structural changes are needed, only the mappings between the data warehouse or data marts, and any virtual tables would need to be changed so that users remain unaware of those changes and unaware of the migration. [Microsoft partners](../../partner/data-integration.md) provide useful data virtualization software. + +## Identify high priority reports to migrate first + +A key question when migrating your existing reports and dashboards to Azure Synapse is which ones to migrate first. Several factors can drive the decision. For example: + +- Business value + +- Usage + +- Ease of migration + +- Data migration strategy + +These factors are discussed in more detail later in this article. + +Whatever the decision is, it must involve the business, since they produce the reports and dashboards, and consume the insights these artifacts provide in support of the decisions that are made around your business. That said, if most reports and dashboards can be migrated seamlessly, with minimal effort, and offer up like-for-like results, simply by pointing your BI tool(s) at Azure Synapse, instead of your legacy data warehouse system, then everyone benefits. + +### Migrate reports based on usage + +Usage is interesting, since it's an indicator of business value. Reports and dashboards that are never used clearly aren't contributing to supporting any decisions and don't currently offer any value. So, do you have any mechanism for finding out which reports and dashboards are currently not used? Several BI tools provide statistics on usage, which would be an obvious place to start. + +If your legacy data warehouse has been up and running for many years, there's a high chance you could have hundreds, if not thousands, of reports in existence. In these situations, usage is an important indicator of the business value of a specific report or dashboard. In that sense, it's worth compiling an inventory of the reports and dashboards you have and defining their business purpose and usage statistics. + +For those that aren't used at all, it's an appropriate time to seek a business decision, to determine if it's necessary to decommission those reports to optimize your migration efforts. A key question worth asking when deciding to decommission unused reports is: are they unused because people don't know they exist, or is it because they offer no business value, or have they been superseded by others? + +### Migrate reports based on business value + +Usage on its own isn't a clear indicator of business value. There needs to be a deeper business context to determine the value to the business. In an ideal world, we would like to know the contribution of the insights produced in a report to the bottom line of the business. That's exceedingly difficult to determine, since every decision made, and its dependency on the insights in a specific report, would need to be recorded along with the contribution that each decision makes to the bottom line of the business. You would also need to do this over time. + +This level of detail is unlikely to be available in most organizations. One way in which you can get deeper on business value to drive migration order is to look at alignment with business strategy. A business strategy set by your executive typically lays out strategic business objectives, key performance indicators (KPIs), KPI targets that need to be achieved, and who is accountable for achieving them. In that sense, classifying your reports and dashboards by strategic business objectives—for example, reduce fraud, improve customer engagement, and optimize business operations—will help understand business purpose and show what objective(s), specific reports, and dashboards these are contributing to. Reports and dashboards associated with high priority objectives in the business strategy can then be highlighted so that migration is focused on delivering business value in a strategic high priority area. + +It's also worthwhile to classify reports and dashboards as operational, tactical, or strategic, to understand the level in the business where they're used. Delivering strategic business objectives requires contribution at all these levels. Knowing which reports and dashboards are used, at what level, and what objectives they're associated with helps to focus migration on high priority business value that will drive the company forward. Business contribution of reports and dashboards is needed to understand this, perhaps like what is shown in the following **business strategy objective** table. + +| **Level** | **Report / dashboard name** | **Business purpose** | **Department used** | **Usage frequency** | **Business priority** | +|-|-|-|-|-|-| +| **Strategic** | | | | | | +| **Tactical** | | | | | | +| **Operational** | | | | | | + +While this may seem too time consuming, you need a mechanism to understand the contribution of reports and dashboards to business value, whether you're migrating or not. Catalogs like Azure Data Catalog are becoming very important because they give you the ability to catalog reports and dashboards, automatically capture the metadata associated with them, and let business users tag and rate them to help you understand business value. + +### Migrate reports based on data migration strategy + +> [!TIP] +> Data migration strategy could also dictate which reports and visualizations get migrated first. + +If your migration strategy is based on migrating data marts first, the order of data mart migration will have a bearing on which reports and dashboards can be migrated first to run on Azure Synapse. Again, this is likely to be a business-value-related decision. Prioritizing which data marts are migrated first reflects business priorities. Metadata discovery tools can help you here by showing you which reports rely on data in which data mart tables. + +## Migration incompatibility issues that can impact reports and visualizations + +When it comes to migrating to Azure Synapse, there are several things that can impact the ease of migration for reports, dashboards, and other visualizations. The ease of migration is affected by: + +- Incompatibilities that occur during schema migration between your legacy data warehouse and Azure Synapse. + +- Incompatibilities in SQL between your legacy data warehouse and Azure Synapse. + +### The impact of schema incompatibilities + +> [!TIP] +> Schema incompatibilities include legacy warehouse DBMS table types and data types that are unsupported on Azure Synapse. + +BI tool reports and dashboards, and other visualizations, are produced by issuing SQL queries that access physical tables and/or views in your data warehouse or data mart. When it comes to migrating your data warehouse or data mart schema to Azure Synapse, there may be incompatibilities that can impact reports and dashboards, such as: + +- Non-standard table types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse, for example Teradata Time-Series tables. + +- Data types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse, for example Teradata Geospatial or Interval data types. + +In many cases, where there are incompatibilities, there may be ways around them. For example, the data in unsupported table types can be migrated into a standard table with appropriate data types and indexed or partitioned on a date/time column. Similarly, it may be possible to represent unsupported data types in another type of column and perform calculations in Azure Synapse to achieve the same. Either way, it will need refactoring. + +> [!TIP] +> Querying the system catalog of your legacy warehouse DBMS is a quick and straightforward way to identify schema incompatibilities with Azure Synapse. + +To identify reports and visualizations impacted by schema incompatibilities, run queries against the system catalog of your legacy data warehouse to identify tables with unsupported data types. Then use metadata from your BI tool or tools to identify reports that access these structures, to see what could be impacted. Obviously, this will depend on the legacy data warehouse DBMS you're migrating from. Find details of how to identify these incompatibilities in [Design and performance for Teradata migrations](1-design-performance-migration.md). + +The impact may be less than you think, because many BI tools don't support such data types. As a result, views may already exist in your legacy data warehouse that `CAST` unsupported data types to more generic types. + +### The impact of SQL incompatibilities and differences + +Additionally, any report, dashboard, or other visualization in an application or tool that makes use of proprietary SQL extensions associated with your legacy data warehouse DBMS is likely to be impacted when migrating to Azure Synapse. This could happen because the BI tool or application: + +- Accesses legacy data warehouse DBMS views that include proprietary SQL functions that have no equivalent in Azure Synapse. + +- Issues SQL queries, which include proprietary SQL functions peculiar to the SQL dialect of your legacy data warehouse DBMS, that have no equivalent in Azure Synapse. + +### Gauge the impact of SQL incompatibilities on your reporting portfolio + +You can't rely on documentation associated with reports, dashboards, and other visualizations to gauge how big of an impact SQL incompatibility may have on the portfolio of embedded query services, reports, dashboards, and other visualizations you're intending to migrate to Azure Synapse. There must be a more precise way of doing that. + +#### Use EXPLAIN statements to find SQL incompatibilities + +> [!TIP] +> Gauge the impact of SQL incompatibilities by harvesting your DBMS log files and running `EXPLAIN` statements. + +One way is to get hold of the SQL log files of your legacy data warehouse. Use a script to pull out a representative set of SQL statements into a file, prefix each SQL statement with an `EXPLAIN` statement, and then run all the `EXPLAIN` statements in Azure Synapse. Any SQL statements containing proprietary SQL extensions from your legacy data warehouse that are unsupported will be rejected by Azure Synapse when the `EXPLAIN` statements are executed. This approach would at least give you an idea of how significant or otherwise the use of incompatible SQL is. + +Metadata from your legacy data warehouse DBMS will also help you when it comes to views. Again, you can capture and view SQL statements, and `EXPLAIN` them as described previously to identify incompatible SQL in views. + +## Test report and dashboard migration to Azure Synapse Analytics + +> [!TIP] +> Test performance and tune to minimize compute costs. + +A key element in data warehouse migration is the testing of reports and dashboards against Azure Synapse to verify that the migration has worked. To do this, you need to define a series of tests and a set of required outcomes for each test that needs to be run to verify success. It's important to ensure that reports and dashboards are tested and compared across your existing and migrated data warehouse systems to: + +- Identify whether schema changes made during migration, such as data types to be converted, have impacted reports in terms of ability to run results and corresponding visualizations. + +- Verify all users are migrated. + +- Verify all roles are migrated and users assigned to those roles. + +- Verify all data access security privileges are migrated to ensure access control list (ACL) migration. + +- Ensure consistent results of all known queries, reports, and dashboards. + +- Ensure that data and ETL migration is complete and error-free. + +- Ensure data privacy is upheld. + +- Test performance and scalability. + +- Test analytical functionality. + +For information about how to migrate users, user groups, roles, and privileges, see [Security, access, and operations for Teradata migrations](3-security-access-operations.md), which is part of this series. + +> [!TIP] +> Build an automated test suite to make tests repeatable. + +It's also best practice to automate testing as much as possible, to make each test repeatable and to allow a consistent approach to evaluating results. This works well for known regular reports, and could be managed via [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) orchestration. If you already have a suite of test queries in place for regression testing, you could use the testing tools to automate the post migration testing. + +> [!TIP] +> Leverage tools that can compare metadata lineage to verify results. + +Ad-hoc analysis and reporting are more challenging and require a set of tests to be compiled to verify that results are consistent across your legacy data warehouse DBMS and Azure Synapse. If reports and dashboards are inconsistent, then having the ability to compare metadata lineage across original and migrated systems is extremely valuable during migration testing, as it can highlight differences and pinpoint where they occurred when these aren't easy to detect. This is discussed in more detail later in this article. + +In terms of security, the best way to do this is to create roles, assign access privileges to roles, and then attach users to roles. To access your newly migrated data warehouse, set up an automated process to create new users, and to do role assignment. To detach users from roles, you can follow the same steps. + +It's also important to communicate the cutover to all users, so they know what's changing and what to expect. + +## Analyze lineage to understand dependencies between reports, dashboards, and data + +> [!TIP] +> Having access to metadata and data lineage from reports all the way back to data source is critical for verifying that migrated reports are working correctly. + +A critical success factor in migrating reports and dashboards is understanding lineage. Lineage is metadata that shows the journey that data has taken, so you can see the path from the report/dashboard all the way back to where the data originates. It shows how data has gone from point to point, its location in the data warehouse and/or data mart, and where it's used—for example, in what reports. It helps you understand what happens to data as it travels through different data stores—files and database—different ETL pipelines, and into reports. If business users have access to data lineage, it improves trust, breeds confidence, and enables more informed business decisions. + +> [!TIP] +> Tools that automate metadata collection and show end-to-end lineage in a multi-vendor environment are valuable when it comes to migration. + +In multi-vendor data warehouse environments, business analysts in BI teams may map out data lineage. For example, if you have Informatica for your ETL, Oracle for your data warehouse, and Tableau for reporting, each of which have their own metadata repository, figuring out where a specific data element in a report came from can be challenging and time consuming. + +To migrate seamlessly from a legacy data warehouse to Azure Synapse, end-to-end data lineage helps prove like-for-like migration when comparing reports and dashboards against your legacy environment. That means that metadata from several tools needs to be captured and integrated to show the end-to-end journey. Having access to tools that support automated metadata discovery and data lineage will let you see duplicate reports and ETL processes and reports that rely on data sources that are obsolete, questionable, or even non-existent. With this information, you can reduce the number of reports and ETL processes that you migrate. + +You can also compare end-to-end lineage of a report in Azure Synapse against the end-to-end lineage for the same report in your legacy data warehouse environment, to see if there are any differences that have occurred inadvertently during migration. This helps enormously with testing and verifying migration success. + +Data lineage visualization not only reduces time, effort, and error in the migration process, but also enables faster execution of the migration project. + +By leveraging automated metadata discovery and data lineage tools that can compare lineage, you can verify if a report is produced using data migrated to Azure Synapse and if it's produced in the same way as in your legacy environment. This kind of capability also helps you determine: + +- What data needs to be migrated to ensure successful report and dashboard execution on Azure Synapse. + +- What transformations have been and should be performed to ensure successful execution on Azure Synapse. + +- How to reduce report duplication. + +This substantially simplifies the data migration process, because the business will have a better idea of the data assets it has and what needs to be migrated to enable a solid reporting environment on Azure Synapse. + +> [!TIP] +> Azure Data Factory and several third-party ETL tools support lineage. + +Several ETL tools provide end-to-end lineage capability, and you may be able to make use of this via your existing ETL tool if you're continuing to use it with Azure Synapse. [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) lets you view lineage in mapping flows. Also, [Microsoft partners](../../partner/data-integration.md) provide automated metadata discovery, data lineage, and lineage comparison tools. + +## Migrate BI tool semantic layers to Azure Synapse Analytics + +> [!TIP] +> Some BI tools have semantic layers that simplify business user access to physical data structures in your data warehouse or data mart, like SAP Business Objects and IBM Cognos. + +Some BI tools have what is known as a semantic metadata layer. The role of this metadata layer is to simplify business user access to physical data structures in an underlying data warehouse or data mart database. It does this by providing high-level objects like dimensions, measures, hierarchies, calculated metrics, and joins. These objects use business terms familiar to business analysts and are mapped to the physical data structures in the data warehouse or data mart database. + +When it comes to data warehouse migration, changes to column names or table names may be forced upon you. For example, in Oracle, table names can have a "#". In Azure Synapse, the "#" is only allowed as a prefix to a table name to indicate a temporary table. Therefore, you may need to change a table name if migrating from Oracle. You may need to do rework to change mappings in such cases. + +A good way to get everything consistent across multiple BI tools is to create a universal semantic layer, using common data names for high-level objects like dimensions, measures, hierarchies, and joins, in a data virtualization server (as shown in the next diagram) that sits between applications, BI tools, and Azure Synapse. This allows you to set up everything once (instead of in every tool), including calculated fields, joins and mappings, and then point all BI tools at the data virtualization server. + +> [!TIP] +> Use data virtualization to create a common semantic layer to guarantee consistency across all BI tools in an Azure Synapse environment. + +In this way, you get consistency across all BI tools, while at the same time breaking the dependency between BI tools and applications and the underlying physical data structures in Azure Synapse. Use [Microsoft partners](../../partner/data-integration.md) on Azure to implement this. The following diagram shows how a common vocabulary in the data virtualization server lets multiple BI tools see a common semantic layer. + +:::image type="content" source="../media/4-visualization-reporting/data-virtualization-semantics.png" border="true" alt-text="Diagram with common data names and definitions that relate to the data virtualization server."::: + +## Conclusions + +> [!TIP] +> Identify incompatibilities early to gauge the extent of the migration effort. Migrate your users, group roles and privilege assignments. Only migrate the reports and visualizations that are used and are contributing to business value. + +In a lift and shift data warehouse migration to Azure Synapse, most reports and dashboards should migrate easily. + +However, if data structures change, then data is stored in unsupported data types, or access to data in the data warehouse or data mart is via a view that includes proprietary SQL that's unsupported in your Azure Synapse environment. You'll need to deal with those issues if they arise. + +You can't rely on documentation to find out where the issues are likely to be. Making use of `EXPLAIN` statements is a pragmatic and quick way to identify incompatibilities in SQL. Rework these to achieve similar results in Azure Synapse. In addition, it's recommended that you make use of automated metadata discovery and lineage tools to help you identify duplicate reports, reports that are no longer valid because they're using data from data sources that you no longer use, and to understand dependencies. Some of these tools help compare lineage to verify that reports running in your legacy data warehouse environment are produced identically in Azure Synapse. + +Don't migrate reports that you no longer use. BI tool usage data can help determine which ones aren't in use. For the visualizations and reports that you do want to migrate, migrate all users, user groups, roles, and privileges, and associate these reports with strategic business objectives and priorities to help you identify report insight contribution to specific objectives. This is useful if you're using business value to drive your report migration strategy. If you're migrating by data store, data mart by data mart, then metadata will also help you identify which reports are dependent on which tables and views, so that you can focus on migrating to these first. + +Finally, consider data virtualization to shield BI tools and applications from structural changes to the data warehouse and/or the data mart data model that may occur during migration. You can also use a common vocabulary with data virtualization to define a common semantic layer that guarantees consistent common data names, definitions, metrics, hierarchies, joins, and more across all BI tools and applications in a migrated Azure Synapse environment. + +## Next steps + +To learn more about minimizing SQL issues, see the next article in this series: [Minimizing SQL issues for Teradata migrations](5-minimize-sql-issues.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/5-minimize-sql-issues.md b/articles/synapse-analytics/migration-guides/teradata/5-minimize-sql-issues.md new file mode 100644 index 0000000000000..b65a7ea1d55c8 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/5-minimize-sql-issues.md @@ -0,0 +1,378 @@ +--- +title: "Minimize SQL issues for Teradata migrations" +description: Learn how to minimize the risk of SQL issues when migrating from Teradata to Azure Synapse Analytics. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Minimize SQL issues for Teradata migrations + +This article is part five of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for minimizing SQL issues. + +## Overview + +### Characteristics of Teradata environments + +> [!TIP] +> Teradata pioneered large scale SQL databases using MPP in the 1980s. + +In 1984, Teradata initially released their database product. It introduced massively parallel processing (MPP) techniques to enable data processing at a scale more efficiently than the existing mainframe technologies available at the time. Since then, the product has evolved and has many installations among large financial institutions, telecommunications, and retail companies. The original implementation used proprietary hardware and was channel attached to mainframes—typically IBM or IBM-compatible processors. + +While more recent announcements have included network connectivity and the availability of the Teradata technology stack in the cloud (including Azure), most existing installations are on premises, so many users are considering migrating some or all their Teradata data to Azure Synapse Analytics to gain the benefits of a move to a modern cloud environment. + +> [!TIP] +> Many existing Teradata installations are data warehouses using a dimensional data model. + +Teradata technology is often used to implement a data warehouse, supporting complex analytic queries on large data volumes using SQL. Dimensional data models—star or snowflake schemas—are common, as is the implementation of data marts for individual departments. + +This combination of SQL and dimensional data models simplifies migration to Azure Synapse, since the basic concepts and SQL skills are transferable. The recommended approach is to migrate the existing data model as-is to reduce risk and time taken. Even if the eventual intention is to make changes to the data model (for example, moving to a data vault model), perform an initial as-is migration and then make changes within the Azure cloud environment, leveraging the performance, elastic scalability, and cost advantages there. + +While the SQL language has been standardized, individual vendors have in some cases implemented proprietary extensions. This document highlights potential SQL differences you may encounter while migrating from a legacy Teradata environment, and provides workarounds. + +### Use an Azure VM Teradata instance as part of a migration + +> [!TIP] +> Use an Azure VM to create a temporary Teradata instance to speed up migration and minimize impact on the source system. + +Leverage the Azure environment when running a migration from an on-premises Teradata environment. Azure provides affordable cloud storage and elastic scalability to create a Teradata instance within a VM in Azure, collocated with the target Azure Synapse environment. + +With this approach, standard Teradata utilities such as Teradata Parallel Data Transporter (or third-party data replication tools such as Attunity Replicate) can be used to efficiently move the subset of Teradata tables that are to be migrated onto the VM instance, and then all migration tasks can take place within the Azure environment. This approach has several benefits: + +- After the initial replication of data, the source system isn't impacted by the migration tasks. + +- The familiar Teradata interfaces, tools, and utilities are available within the Azure environment. + +- Once in the Azure environment there are no potential issues with network bandwidth availability between the on-premises source system and the cloud target system. + +- Tools such as Azure Data Factory can efficiently call utilities such as Teradata Parallel Transporter to migrate data quickly and easily. + +- The migration process is orchestrated and controlled entirely within the Azure environment. + +### Use Azure Data Factory to implement a metadata-driven migration + +> [!TIP] +> Automate the migration process by using Azure Data Factory capabilities. + +Automate and orchestrate the migration process by making use of the capabilities in the Azure environment. This approach also minimizes the migration's impact on the existing Teradata environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—that can ingest data from disparate data stores. It can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage and automate parts of the migration process. You can also use [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de). + +## SQL DDL differences between Teradata and Azure Synapse + +### SQL Data Definition Language (DDL) + +> [!TIP] +> SQL DDL commands `CREATE TABLE` and `CREATE VIEW` have standard core elements but are also used to define implementation-specific options. + +The ANSI SQL standard defines the basic syntax for DDL commands such as `CREATE TABLE` and `CREATE VIEW`. These commands are used within both Teradata and Azure Synapse, but they've also been extended to allow definition of implementation-specific features such as indexing, table distribution, and partitioning options. + +The following sections discuss Teradata-specific options to consider during a migration to Azure Synapse. + +### Table considerations + +> [!TIP] +> Use existing indexes to give an indication of candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and its descriptive metadata get physically moved between the two environments. Other database elements from the source system, such as indexes and log files, aren't directly migrated as these may not be needed or may be implemented differently within the new target environment. For example, there's no equivalent of the `MULTISET` option within Teradata's `CREATE TABLE` syntax. + +It's important to understand where performance optimizations—such as indexes—were used in the source environment. This indicates where performance optimization can be added in the new target environment. For example, if a non-unique secondary index (NUSI) has been created in the source Teradata environment, this might indicate that a non-clustered index should be created in the migrated Azure Synapse database. Other native performance optimization techniques, such as table replication, may be more applicable than a straight "like-for-like" index creation. + +### Unsupported Teradata table types + +> [!TIP] +> Standard tables within Azure Synapse can support migrated Teradata time-series and temporal tables. + +Teradata includes support for special table types for time-series and temporal data. The syntax and some of the functions for these table types aren't directly supported within Azure Synapse, but the data can be migrated into a standard table with appropriate data types and indexing or partitioning on the date/time column. + +Teradata implements the temporal query functionality via query rewriting to add additional filters within a temporal query to limit the applicable date range. If this functionality is currently in use within the source Teradata environment and is to be migrated, then this additional filtering will need to be added into the relevant temporal queries. + +The Azure environment also includes specific features for complex analytics on time-series data at scale called [time series insights](https://azure.microsoft.com/services/time-series-insights/)—this is aimed at IoT data analysis applications and may be more appropriate for this use-case. + +### Teradata data type mapping + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase. + +Most Teradata data types have a direct equivalent in Azure Synapse. This table shows these data types together with the recommended approach for handling them. In the table, Teradata column type is the type that's stored within the system catalog—for example, in `DBC.ColumnsV`. + +| Teradata column type | Teradata data type | Azure Synapse data type | +|----------------------|--------------------|----------------| +| ++ | TD_ANYTYPE | Not supported in Azure Synapse | +| A1 | ARRAY | Not supported in Azure Synapse | +| AN | ARRAY | Not supported in Azure Synapse | +| AT | TIME | TIME | +| BF | BYTE | BINARY | +| BO | BLOB | BLOB data type isn't directly supported but can be replaced with BINARY. | +| BV | VARBYTE | BINARY | +| CF | VARCHAR | CHAR | +| CO | CLOB | CLOB data type isn't directly supported but can be replaced with VARCHAR. | +| CV | VARCHAR | VARCHAR | +| D | DECIMAL | DECIMAL | +| DA | DATE | DATE | +| DH | INTERVAL DAY TO HOUR | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| DM | INTERVAL DAY TO MINUTE | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| DS | INTERVAL DAY TO SECOND | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| DT | DATASET | DATASET data type is supported in Azure Synapse. | +| DY | INTERVAL DAY | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| F | FLOAT | FLOAT | +| HM | INTERVAL HOUR TO MINUTE | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| HR | INTERVAL HOUR | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| HS | INTERVAL HOUR TO SECOND | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| I1 | BYTEINT | TINYINT | +| I2 | SMALLINT | SMALLINT | +| I8 | BIGINT | BIGINT | +| I | INTEGER | INT | +| JN | JSON | JSON data type isn't currently directly supported within Azure Synapse, but JSON data can be stored in a VARCHAR field. | +| MI | INTERVAL MINUTE | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| MO | INTERVAL MONTH | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| MS | INTERVAL MINUTE TO SECOND | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| N | NUMBER | NUMERIC | +| PD | PERIOD(DATE) | Can be converted to VARCHAR or split into two separate dates | +| PM | PERIOD (TIMESTAMP WITH TIME ZONE) | Can be converted to VARCHAR or split into two separate timestamps (DATETIMEOFFSET) | +| PS | PERIOD(TIMESTAMP) | Can be converted to VARCHAR or split into two separate timestamps (DATETIMEOFFSET) | +| PT | PERIOD(TIME) | Can be converted to VARCHAR or split into two separate times | +| PZ | PERIOD (TIME WITH TIME ZONE) | Can be converted to VARCHAR or split into two separate times but WITH TIME ZONE isn't supported for TIME | +| SC | INTERVAL SECOND | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| SZ | TIMESTAMP WITH TIME ZONE | DATETIMEOFFSET | +| TS | TIMESTAMP | DATETIME or DATETIME2 | +| TZ | TIME WITH TIME ZONE | TIME WITH TIME ZONE isn't supported because TIME is stored using \"wall clock\" time only without a time zone offset. | +| XM | XML | XML data type isn't currently directly supported within Azure Synapse, but XML data can be stored in a VARCHAR field. | +| YM | INTERVAL YEAR TO MONTH | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | +| YR | INTERVAL YEAR | INTERVAL data types aren't supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD). | + +Use the metadata from the Teradata catalog tables to determine whether any of these data types are to be migrated and allow for this in the migration plan. For example, use a SQL query like this one to find any occurrences of unsupported data types that need attention. + +```sql +SELECT +ColumnType, CASE +WHEN ColumnType = '++' THEN 'TD_ANYTYPE' +WHEN ColumnType = 'A1' THEN 'ARRAY' WHEN +ColumnType = 'AN' THEN 'ARRAY' WHEN +ColumnType = 'BO' THEN 'BLOB' +WHEN ColumnType = 'CO' THEN 'CLOB' +WHEN ColumnType = 'DH' THEN 'INTERVAL DAY TO HOUR' WHEN +ColumnType = 'DM' THEN 'INTERVAL DAY TO MINUTE' WHEN +ColumnType = 'DS' THEN 'INTERVAL DAY TO SECOND' WHEN +ColumnType = 'DT' THEN 'DATASET' +WHEN ColumnType = 'DY' THEN 'INTERVAL DAY' +WHEN ColumnType = 'HM' THEN 'INTERVAL HOUR TO MINUTE' WHEN +ColumnType = 'HR' THEN 'INTERVAL HOUR' +WHEN ColumnType = 'HS' THEN 'INTERVAL HOUR TO SECOND' WHEN +ColumnType = 'JN' THEN 'JSON' +WHEN ColumnType = 'MI' THEN 'INTERVAL MINUTE' WHEN +ColumnType = 'MO' THEN 'INTERVAL MONTH' +WHEN ColumnType = 'MS' THEN 'INTERVAL MINUTE TO SECOND' WHEN +ColumnType = 'PD' THEN 'PERIOD(DATE)' +WHEN ColumnType = 'PM' THEN 'PERIOD (TIMESTAMP WITH TIME ZONE)' +WHEN ColumnType = 'PS' THEN 'PERIOD(TIMESTAMP)' WHEN +ColumnType = 'PT' THEN 'PERIOD(TIME)' +WHEN ColumnType = 'PZ' THEN 'PERIOD (TIME WITH TIME ZONE)' WHEN +ColumnType = 'SC' THEN 'INTERVAL SECOND' +WHEN ColumnType = 'SZ' THEN 'TIMESTAMP WITH TIME ZONE' WHEN +ColumnType = 'XM' THEN 'XML' +WHEN ColumnType = 'YM' THEN 'INTERVAL YEAR TO MONTH' WHEN +ColumnType = 'YR' THEN 'INTERVAL YEAR' +END AS Data_Type, +COUNT (*) AS Data_Type_Count FROM +DBC.ColumnsV +WHERE DatabaseName IN ('UserDB1', 'UserDB2', 'UserDB3') -- select databases to be migrated +GROUP BY 1,2 +ORDER BY 1; +``` + +> [!TIP] +> Third-party tools and services can automate data mapping tasks. + +There are third-party vendors who offer tools and services to automate migration, including the mapping of data types. If a third-party ETL tool such as Informatica or Talend is already in use in the Teradata environment, those tools can implement any required data transformations. + +### Data Definition Language (DDL) generation + +> [!TIP] +> Use existing Teradata metadata to automate the generation of `CREATE TABLE` and `CREATE VIEW DDL` for Azure Synapse. + +Edit existing Teradata `CREATE TABLE` and `CREATE VIEW` scripts to create the equivalent definitions with modified data types as described previously if necessary. Typically, this involves removing extra Teradata-specific clauses such as `FALLBACK` or `MULTISET`. + +However, all the information that specifies the current definitions of tables and views within the existing Teradata environment is maintained within system catalog tables. This is the best source of this information as it's guaranteed to be up to date and complete. Be aware that user-maintained documentation may not be in sync with the current table definitions. + +Access this information via views onto the catalog such as `DBC.ColumnsV` and generate the equivalent `CREATE TABLE` DDL statements for the equivalent tables in Azure Synapse. + +> [!TIP] +> Third-party tools and services can automate data mapping tasks. + +There are [Microsoft partners](../../partner/data-integration.md) who offer tools and services to automate migration, including data-type mapping. Also, if a third-party ETL tool such as Informatica or Talend is already in use in the Teradata environment, that tool can implement any required data transformations. + +## SQL DML differences between Teradata and Azure Synapse + +### SQL Data Manipulation Language (DML) + +> [!TIP] +> SQL DML commands `SELECT`, `INSERT`, and `UPDATE` have standard core elements but may also implement different syntax options. + +The ANSI SQL standard defines the basic syntax for DML commands such as `SELECT`, `INSERT`, `UPDATE`, and `DELETE`. Both Teradata and Azure Synapse use these commands, but in some cases there are implementation differences. + +The following sections discuss the Teradata-specific DML commands that you should consider during a migration to Azure Synapse. + +### SQL DML syntax differences + +Be aware of these differences in SQL Data Manipulation Language (DML) syntax between Teradata SQL and Azure Synapse (T-SQL) when migrating: + +- `QUALIFY`: Teradata supports the `QUALIFY` operator. For example: + + ```sql + SELECT col1 + FROM tab1 + WHERE col1='XYZ' + QUALIFY ROW_NUMBER () OVER (PARTITION by + col1 ORDER BY col1) = 1; + ``` + + The equivalent Azure Synapse syntax is: + + ```sql + SELECT * FROM ( + SELECT col1, ROW_NUMBER () OVER (PARTITION by col1 ORDER BY col1) rn + FROM tab1 WHERE col1='XYZ' + ) WHERE rn = 1; + ``` + +- Date arithmetic: Azure Synapse has operators such as `DATEADD` and `DATEDIFF` which can be used on `DATE` or `DATETIME` fields. Teradata supports direct subtraction on dates such as `SELECT DATE1 - DATE2 FROM...` + +- In `GROUP BY` ordinal, explicitly provide the T-SQL column name. + +- `LIKE ANY`: Teradata supports `LIKE ANY` syntax such as: + + ```sql + SELECT * FROM CUSTOMER + WHERE POSTCODE LIKE ANY + ('CV1%', 'CV2%', 'CV3%'); + ``` + + The equivalent in Azure Synapse syntax is: + + ```sql + SELECT * FROM CUSTOMER + WHERE + (POSTCODE LIKE 'CV1%') OR (POSTCODE LIKE 'CV2%') OR (POSTCODE LIKE 'CV3%'); + ``` + +- Depending on system settings, character comparisons in Teradata may be case insensitive by default. In Azure Synapse, character comparisons are always case sensitive. + +### Use EXPLAIN to validate legacy SQL + +> [!TIP] +> Use real queries from the existing system query logs to find potential migration issues. + +One way of testing legacy Teradata SQL for compatibility with Azure Synapse is to capture some representative SQL statements from the legacy system query logs, prefix those queries with [EXPLAIN](/sql/t-sql/queries/explain-transact-sql?msclkid=91233fc1cff011ec9dff597671b7ae97), and (assuming a "like-for-like" migrated data model in Azure Synapse with the same table and column names) run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will throw an error—use this information to determine the scale of the recoding task. This approach doesn't require that data is loaded into the Azure environment, only that the relevant tables and views have been created. + +### Functions, stored procedures, triggers, and sequences + +> [!TIP] +> As part of the preparation phase, assess the number and type of non-data objects being migrated. + +When migrating from a mature legacy data warehouse environment such as Teradata, there are often elements other than simple tables and views that need to be migrated to the new target environment. Examples of this include functions, stored procedures, triggers, and sequences. + +As part of the preparation phase, create an inventory of the objects that need to be migrated and define the methods for handling them. Then assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as either functions or stored procedures in the Teradata environment. In this case, it's often more efficient to use the built-in Azure facilities rather than recoding the Teradata functions. + +> [!TIP] +> Third-party products and services can automate migration of non-data elements. + +[Microsoft partners](../../partner/data-integration.md) offer tools and services that can automate the migration. + +See the following sections for more information on each of these elements. + +#### Functions + +As with most database products, Teradata supports system functions and user-defined functions within the SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated. System functions where there's no equivalent, such as arbitrary user-defined functions, may need to be recoded using the languages available in the target environment. Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. + +#### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Teradata provides the SPL language for this purpose. A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +The dedicated SQL pools of Azure Synapse Analytics also support stored procedures using T-SQL, so if you must migrate stored procedures, recode them accordingly. + +#### Triggers + +Azure Synapse doesn't support the creation of triggers, but you can implement them within Azure Data Factory. + +#### Sequences + +Azure Synapse sequences are handled in a similar way to Teradata, using [IDENTITY to create surrogate keys](../../sql-data-warehouse/sql-data-warehouse-tables-identity.md) or [managed identity](../../../data-factory/data-factory-service-identity.md?tabs=data-factory). + +#### Teradata to T-SQL mapping + +This table shows the Teradata to T-SQL compliant with Azure Synapse SQL data type mapping: + +| Teradata Data Type | Azure Synapse SQL Data Type | +|----------------------------------------|-----------------------------| +| bigint  | bigint | +| bool  | bit | +| boolean  | bit | +| byteint  | tinyint | +| char \[(*p*)\]  | char \[(*p*)\] | +| char varying \[(*p*)\]  | varchar \[(*p*)\] | +| character \[(*p*)\]  | char \[(*p*)\] | +| character varying \[(*p*)\]  | varchar \[(*p*)\] | +| date  | date | +| datetime  | datetime | +| dec \[(*p*\[,*s*\])\]  | decimal \[(*p*\[,*s*\])\]  | +| decimal \[(*p*\[,*s*\])\]  | decimal \[(*p*\[,*s*\])\] | +| double  | float(53) | +| double precision  | float(53) | +| float \[(*p*)\]  | float \[(*p*)\] | +| float4  | float(53) | +| float8  | float(53) | +| int  | int | +| int1  | tinyint  | +| int2  | smallint | +| int4  | int  | +| int8  | bigint  | +| integer  | integer | +| interval  | *Not supported* | +| national char varying \[(*p*)\]  | nvarchar \[(*p*)\]  | +| national character \[(*p*)\]  | nchar \[(*p*)\] | +| national character varying \[(*p*)\]  | nvarchar \[(*p*)\] | +| nchar \[(*p*)\]  | nchar \[(*p*)\] | +| numeric \[(*p*\[,*s*\])\]  | numeric \[(*p*\[,*s*\]) | +| nvarchar \[(*p*)\]  | nvarchar \[(*p*)\] | +| real  | real | +| smallint  | smallint | +| time  | time | +| time with time zone  | datetimeoffset | +| time without time zone  | time | +| timespan  | *Not supported* | +| timestamp  | datetime2 | +| timetz  | datetimeoffset | +| varchar \[(*p*)\]  | varchar \[(*p*)\] | + +## Summary + +Typical existing legacy Teradata installations are implemented in a way that makes migration to Azure Synapse easy. They use SQL for analytical queries on large data volumes, and are in some form of dimensional data model. These factors make them good candidates for migration to Azure Synapse. + +To minimize the task of migrating the actual SQL code, follow these recommendations: + +- Initial migration of the data warehouse should be as-is to minimize risk and time taken, even if the eventual final environment will incorporate a different data model such as data vault. + +- Consider using a Teradata instance in an Azure VM as a stepping stone as part of the migration process. + +- Understand the differences between Teradata SQL implementation and Azure Synapse. + +- Use metadata and query logs from the existing Teradata implementation to assess the impact of the differences and plan an approach to mitigate. + +- Automate the process wherever possible to minimize errors, risk, and time for the migration. + +- Consider using specialist [Microsoft partners](../../partner/data-integration.md) and services to streamline the migration. + +## Next steps + +To learn more about Microsoft and third-party tools, see the next article in this series: [Tools for Teradata data warehouse migration to Azure Synapse Analytics](6-microsoft-third-party-migration-tools.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/6-microsoft-third-party-migration-tools.md b/articles/synapse-analytics/migration-guides/teradata/6-microsoft-third-party-migration-tools.md new file mode 100644 index 0000000000000..e4b9cab9ce5bc --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/6-microsoft-third-party-migration-tools.md @@ -0,0 +1,132 @@ +--- +title: "Tools for Teradata data warehouse migration to Azure Synapse Analytics" +description: Learn about Microsoft and third-party data and database migration tools that can help you migrate from Teradata to Azure Synapse Analytics. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Tools for Teradata data warehouse migration to Azure Synapse Analytics + +This article is part six of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for Microsoft and third-party tools. + +## Data warehouse migration tools + +By migrating your existing data warehouse to Azure Synapse Analytics, you benefit from: + +- A globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. + +- The rich Microsoft analytical ecosystem that exists on Azure. This ecosystem consists of technologies to help modernize your data warehouse once it's migrated, and extends your analytical capabilities to drive new value. + +Several tools from Microsoft and third-party partner vendors can help you migrate your existing data warehouse to Azure Synapse. These tools include: + +- Microsoft data and database migration tools. + +- Third-party data warehouse automation tools to automate and document the migration to Azure Synapse. + +- Third-party data warehouse migration tools to migrate schema and data to Azure Synapse. + +- Third-party tools to minimize the impact on SQL differences between your existing data warehouse DBMS and Azure Synapse. + +The following sections discuss these tools in more detail. + +## Microsoft data migration tools + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +Microsoft offers several tools to help you migrate your existing data warehouse to Azure Synapse, such as: + +- Microsoft Azure Data Factory. + +- Microsoft services for physical data transfer. + +- Microsoft services for data ingestion. + +### Microsoft Azure Data Factory + +Microsoft Azure Data Factory is a fully managed, pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. It uses Spark to process and analyze data in parallel and in memory to maximize throughput. + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code-free. + +[Azure Data Factory connectors](../../../data-factory/connector-overview.md?msclkid=00086e4acff211ec9263dee5c7eb6e69) connect to external data sources and databases and have templates for common data integration tasks. A visual front-end, browser-based UI enables non-programmers to create and run process pipelines to ingest, transform, and load data. More experienced programmers have the option to incorporate custom code, such as Python programs. + +> [!TIP] +> Data Factory enables collaborative development between business and IT professionals. + +Data Factory is also an orchestration tool. It's the best Microsoft tool to automate the end-to-end migration process to reduce risk and make the migration process easily repeatable. The following diagram shows a Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +The next screenshot shows a Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +You can develop simple or comprehensive ETL and ELT processes without coding or maintenance with a few clicks. These processes ingest, move, prepare, transform, and process your data. You can design and manage scheduling and triggers in Azure Data Factory to build an automated data integration and loading environment. In Data Factory, you can define, manage, and schedule PolyBase bulk data load processes. + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +You can use Data Factory to implement and manage a hybrid environment that includes on-premises, cloud, streaming and SaaS data—for example, from applications like Salesforce—in a secure and consistent way. + +A new capability in Data Factory is wrangling data flows. This opens up Data Factory to business users who want to visually discover, explore, and prepare data at scale without writing code. This capability, similar to Microsoft Excel Power Query or Microsoft Power BI dataflows, offers self-service data preparation. Business users can prepare and integrate data through a spreadsheet-style user interface with drop-down transform options. + +Azure Data Factory is the recommended approach for implementing data integration and ETL/ELT processes for an Azure Synapse environment, especially if existing legacy processes need to be refactored. + +### Microsoft services for physical data transfer + +> [!TIP] +> Microsoft offers a range of products and services to assist with data transfer. + +#### Azure ExpressRoute + +Azure ExpressRoute creates private connections between Azure data centers and infrastructure on your premises or in a collocation environment. ExpressRoute connections don't go over the public internet, and they offer more reliability, faster speeds, and lower latencies than typical internet connections. In some cases, by using ExpressRoute connections to transfer data between on-premises systems and Azure, you gain significant cost benefits. + +#### AzCopy + +[AzCopy](../../../storage/common/storage-use-azcopy-v10.md) is a command line utility that copies files to Azure Blob Storage via a standard internet connection. In a warehouse migration project, you can use AzCopy to upload extracted, compressed, and delimited text files before loading through PolyBase, or a native Parquet reader if the exported files are Parquet format. AzCopy can upload individual files, file selections, or file directories. + +#### Azure Data Box + +Microsoft offers a service called Azure Data Box. This service writes data to be migrated to a physical storage device. This device is then shipped to an Azure data center and loaded into cloud storage. The service can be cost-effective for large volumes of data—for example, tens or hundreds of terabytes—or where network bandwidth isn't readily available. Azure Data Box is typically used for one-off historical data load when migrating a large amount of data to Azure Synapse. + +Another service is Data Box Gateway, a virtualized cloud storage gateway device that resides on your premises and sends your images, media, and other data to Azure. Use Data Box Gateway for one-off migration tasks or ongoing incremental data uploads. + +### Microsoft services for data ingestion + +#### COPY INTO + +The [COPY](/sql/t-sql/statements/copy-into-transact-sql) statement provides the most flexibility for high-throughput data ingestion into Azure Synapse Analytics. Refer to the list of capabilities that `COPY` offers for data ingestion. + +#### PolyBase + +> [!TIP] +> PolyBase can load data in parallel from Azure Blob Storage into Azure Synapse. + +PolyBase provides the fastest and most scalable method of loading bulk data into Azure Synapse. PolyBase leverages the MPP architecture to use parallel loading, to give the fastest throughput, and can read data from flat files in Azure Blob Storage or directly from external data sources and other relational databases via connectors. + +PolyBase can also directly read from files compressed with gzip—this reduces the physical volume of data moved during the load process. PolyBase supports popular data formats such as delimited text, ORC, and Parquet. + +> [!TIP] +> Invoke PolyBase from Azure Data Factory as part of a migration pipeline. + +PolyBase is tightly integrated with Azure Data Factory to enable data load ETL/ELT processes to be rapidly developed and scheduled through a visual GUI, leading to higher productivity and fewer errors than hand-written code. + +PolyBase is the recommended data load method for Azure Synapse, especially for high-volume data. PolyBase loads data using the `CREATE TABLE AS` or `INSERT...SELECT` statements—CTAS achieves the highest possible throughput as it minimizes the amount of logging required. Compressed delimited text files are the most efficient input format. For maximum throughput, split very large input files into multiple smaller files and load these in parallel. For fastest loading to a staging table, define the target table as type `HEAP` and use round-robin distribution. + +However, PolyBase has some limitations. Rows to be loaded must be less than 1 MB in length. Fixed-width format or nested data, such as JSON and XML, aren't directly readable. + +## Microsoft partners can help you migrate your data warehouse to Azure Synapse Analytics + +In addition to tools that can help you with various aspects of data warehouse migration, there are several practiced [Microsoft partners](../../partner/data-integration.md) that can bring their expertise to help you move your legacy on-premises data warehouse platform to Azure Synapse. + +## Next steps + +To learn more about implementing modern data warehouses, see the next article in this series: [Beyond Teradata migration, implementing a modern data warehouse in Microsoft Azure](7-beyond-data-warehouse-migration.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/7-beyond-data-warehouse-migration.md b/articles/synapse-analytics/migration-guides/teradata/7-beyond-data-warehouse-migration.md new file mode 100644 index 0000000000000..3043dd20b095f --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/7-beyond-data-warehouse-migration.md @@ -0,0 +1,375 @@ +--- +title: "Beyond Teradata migration, implementing a modern data warehouse in Microsoft Azure" +description: Learn how a Teradata migration to Azure Synapse Analytics lets you integrate your data warehouse with the Microsoft Azure analytical ecosystem. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/31/2022 +--- + +# Beyond Teradata migration, implementing a modern data warehouse in Microsoft Azure + +This article is part seven of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for implementing modern data warehouses. + +## Beyond data warehouse migration to Azure + +One of the key reasons to migrate your existing data warehouse to Azure Synapse Analytics is to utilize a globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. Azure Synapse also lets you integrate your migrated data warehouse with the complete Microsoft Azure analytical ecosystem to take advantage of, and integrate with, other Microsoft technologies that help you modernize your migrated data warehouse. This includes integrating with technologies like: + +- Azure Data Lake Storage for cost effective data ingestion, staging, cleansing, and transformation, to free up data warehouse capacity occupied by fast growing staging tables. + +- Azure Data Factory for collaborative IT and self-service data integration [with connectors](../../../data-factory/connector-overview.md) to cloud and on-premises data sources and streaming data. + +- [The Open Data Model Common Data Initiative](/common-data-model/) to share consistent trusted data across multiple technologies, including: + - Azure Synapse + - Azure Synapse Spark + - Azure HDInsight + - Power BI + - SAP + - Adobe Customer Experience Platform + - Azure IoT + - Microsoft ISV Partners + +- [Microsoft's data science technologies](/azure/architecture/data-science-process/platforms-and-tools), including: + - Azure Machine Learning Studio + - Azure Machine Learning + - Azure Synapse Spark (Spark as a service) + - Jupyter Notebooks + - RStudio + - ML.NET + - .NET for Apache Spark to enable data scientists to use Azure Synapse data to train machine learning models at scale. + +- [Azure HDInsight](../../../hdinsight/index.yml) to leverage big data analytical processing and join big data with Azure Synapse data by creating a logical data warehouse using PolyBase. + +- [Azure Event Hubs](../../../event-hubs/event-hubs-about.md), [Azure Stream Analytics](../../../stream-analytics/stream-analytics-introduction.md), and [Apache Kafka](/azure/databricks/spark/latest/structured-streaming/kafka) to integrate with live streaming data from Azure Synapse. + +There's often acute demand to integrate with [machine learning](../../machine-learning/what-is-machine-learning.md) to enable custom-built, trained machine learning models for use in Azure Synapse. This would enable in-database analytics to run at scale in-batch, on an event-driven basis and on-demand. The ability to exploit in-database analytics in Azure Synapse from multiple BI tools and applications also guarantees that all get the same predictions and recommendations. + +In addition, there's an opportunity to integrate Azure Synapse with Microsoft partner tools on Azure to shorten time to value. + +Let's look at these in more detail to understand how you can take advantage of the technologies in Microsoft's analytical ecosystem to modernize your data warehouse once you've migrated to Azure Synapse. + +## Offload data staging and ETL processing to Azure Data Lake and Azure Data Factory + +Enterprises today have a key problem resulting from digital transformation. So much new data is being generated and captured for analysis, and much of this data is finding its way into data warehouses. A good example is transaction data created by opening OLTP systems to self-service access from mobile devices. These OLTP systems are the main sources of data to a data warehouse, and with customers now driving the transaction rate rather than employees, data in data warehouse staging tables has been growing rapidly in volume. + +The rapid influx of data into the enterprise, along with new sources of data like Internet of Things (IoT) streams, means that companies need to find a way to deal with unprecedented data growth and scale data integration ETL processing beyond current levels. One way to do this is to offload ingestion, data cleansing, transformation, and integration to a data lake and process it at scale there, as part of a data warehouse modernization program. + +Once you've migrated your data warehouse to Azure Synapse, Microsoft provides the ability to modernize your ETL processing by ingesting data into, and staging data in, Azure Data Lake Storage. You can then clean, transform and integrate your data at scale using Data Factory before loading it into Azure Synapse in parallel using PolyBase. + +For ELT strategies, consider offloading ELT processing to Azure Data Lake to easily scale as your data volume or frequency grows. + +### Microsoft Azure Data Factory + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code-free. + +[Data Factory](https://azure.microsoft.com/services/data-factory/) is a pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. Data Factory provides a simple web-based user interface to build data integration pipelines in a code-free manner that can: + +- Build scalable data integration pipelines code-free. Easily acquire data at scale. Pay only for what you use, and connect to on-premises, cloud, and SaaS-based data sources. + +- Ingest, move, clean, transform, integrate, and analyze cloud and on-premises data at scale. Take automatic action, such as a recommendation or alert. + +- Seamlessly author, monitor, and manage pipelines that span data stores both on-premises and in the cloud. + +- Enable pay-as-you-go scale-out in alignment with customer growth. + +> [!TIP] +> Data Factory can connect to on-premises, cloud, and SaaS data. + +All of this can be done without writing any code. However, adding custom code to Data Factory pipelines is also supported. The next screenshot shows an example Data Factory pipeline. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory pipeline."::: + +> [!TIP] +> Pipelines called data factories control the integration and analysis of data. Data Factory is enterprise-class data integration software aimed at IT professionals with a data wrangling facility for business users. + +Implement Data Factory pipeline development from any of several places including: + +- Microsoft Azure portal + +- Microsoft Azure PowerShell + +- Programmatically from .NET and Python using a multi-language SDK + +- Azure Resource Manager (ARM) templates + +- REST APIs + +Developers and data scientists who prefer to write code can easily author Data Factory pipelines in Java, Python, and .NET using the software development kits (SDKs) available for those programming languages. Data Factory pipelines can also be hybrid since they can connect, ingest, clean, transform, and analyze data in on-premises data centers, Microsoft Azure, other clouds, and SaaS offerings. + +Once you develop Data Factory pipelines to integrate and analyze data, deploy those pipelines globally and schedule them to run in batch, invoke them on demand as a service, or run them in real-time on an event-driven basis. A Data Factory pipeline can also run on one or more execution engines and monitor pipeline execution to ensure performance and track errors. + +#### Use cases + +> [!TIP] +> Build data warehouses on Microsoft Azure. + +Data Factory can support multiple use cases, including: + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to populate your migrated data warehouse and data marts on Microsoft Azure Synapse. + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to produce training data for use in machine learning model development and in retraining analytical models. + +- Orchestrating data preparation and analytics to create predictive and prescriptive analytical pipelines for processing and analyzing data in batch, such as sentiment analytics, and either acting on the results of the analysis or populating your data warehouse with the results. + +- Preparing, integrating, and enriching data for data-driven business applications running on the Azure cloud on top of operational data stores like Azure Cosmos DB. + +> [!TIP] +> Build training data sets in data science to develop machine learning models. + +#### Data sources + +Data Factory lets you use [connectors](../../../data-factory/connector-overview.md) from both cloud and on-premises data sources. Agent software, known as a *self-hosted integration runtime*, securely accesses on-premises data sources and supports secure, scalable data transfer. + +#### Transform data using Azure Data Factory + +> [!TIP] +> Professional ETL developers can use Azure Data Factory mapping data flows to clean, transform, and integrate data without the need to write code. + +Within a Data Factory pipeline, ingest, clean, transform, integrate, and, if necessary, analyze any type of data from these sources. This includes structured, semi-structured such as JSON or Avro, and unstructured data. + +Professional ETL developers can use Data Factory mapping data flows to filter, split, join (many types), lookup, pivot, unpivot, sort, union, and aggregate data without writing any code. In addition, Data Factory supports surrogate keys, multiple write processing options such as insert, upsert, update, table recreation, and table truncation, and several types of target data stores—also known as sinks. ETL developers can also create aggregations, including time-series aggregations that require a window to be placed on data columns. + +> [!TIP] +> Data Factory supports the ability to automatically detect and manage schema changes in inbound data, such as in streaming data. + +Run mapping data flows that transform data as activities in a Data Factory pipeline. Include multiple mapping data flows in a single pipeline, if necessary. Break up challenging data transformation and integration tasks into smaller mapping dataflows that can be combined to handle the complexity and custom code added if necessary. In addition to this functionality, Data Factory mapping data flows include these abilities: + +- Define expressions to clean and transform data, compute aggregations, and enrich data. For example, these expressions can perform feature engineering on a date field to break it into multiple fields to create training data during machine learning model development. Construct expressions from a rich set of functions that include mathematical, temporal, split, merge, string concatenation, conditions, pattern match, replace, and many other functions. + +- Automatically handle schema drift so that data transformation pipelines can avoid being impacted by schema changes in data sources. This is especially important for streaming IoT data, where schema changes can happen without notice when devices are upgraded or when readings are missed by gateway devices collecting IoT data. + +- Partition data to enable transformations to run in parallel at scale. + +- Inspect data to view the metadata of a stream you're transforming. + +> [!TIP] +> Data Factory can also partition data to enable ETL processing to run at scale. + +The next screenshot shows an example Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +Data engineers can profile data quality and view the results of individual data transforms by switching on a debug capability during development. + +> [!TIP] +> Data Factory pipelines are also extensible since Data Factory allows you to write your own code and run it as part of a pipeline. + +Extend Data Factory transformational and analytical functionality by adding a linked service containing your own code into a pipeline. For example, an Azure Synapse Spark pool notebook containing Python code could use a trained model to score the data integrated by a mapping data flow. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores such as Azure Data Lake Storage, Azure Synapse, or Azure HDInsight (Hive tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +#### Utilize Spark to scale data integration + +Internally, Data Factory utilizes Azure Synapse Spark Pools—Microsoft's Spark-as-a-service offering—at run time to clean and integrate data on the Microsoft Azure cloud. This enables it to clean, integrate, and analyze high-volume and very high-velocity data (such as click stream data) at scale. Microsoft intends to execute Data Factory pipelines on other Spark distributions. In addition to executing ETL jobs on Spark, Data Factory can also invoke Pig scripts and Hive queries to access and transform data stored in Azure HDInsight. + +#### Link self-service data prep and Data Factory ETL processing using wrangling data flows + +> [!TIP] +> Data Factory support for wrangling data flows in addition to mapping data flows means that business and IT can work together on a common platform to integrate data. + +Another new capability in Data Factory is wrangling data flows. This lets business users (also known as citizen data integrators and data engineers) make use of the platform to visually discover, explore, and prepare data at scale without writing code. This easy-to-use Data Factory capability is similar to Microsoft Excel Power Query or Microsoft Power BI dataflows, where self-service data preparation business users use a spreadsheet-style UI with drop-down transforms to prepare and integrate data. The following screenshot shows an example Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +This differs from Excel and Power BI, as Data Factory [wrangling data flows](/azure/data-factory/wrangling-tutorial) use Power Query to generate M code and translate it into a massively parallel in-memory Spark job for cloud-scale execution. The combination of mapping data flows and wrangling data flows in Data Factory lets IT professional ETL developers and business users collaborate to prepare, integrate, and analyze data for a common business purpose. The preceding Data Factory mapping data flow diagram shows how both Data Factory and Azure Synapse Spark pool notebooks can be combined in the same Data Factory pipeline. This allows IT and business to be aware of what each has created. Mapping data flows and wrangling data flows can then be available for reuse to maximize productivity and consistency and minimize reinvention. + +#### Link data and analytics in analytical pipelines + +In addition to cleaning and transforming data, Data Factory can combine data integration and analytics in the same pipeline. Use Data Factory to create both data integration and analytical pipelines—the latter being an extension of the former. Drop an analytical model into a pipeline so that clean, integrated data can be stored to provide predictions or recommendations. Act on this information immediately or store it in your data warehouse to provide you with new insights and recommendations that can be viewed in BI tools. + +Models developed code-free with Azure Machine Learning Studio, or with the Azure Machine Learning SDK using Azure Synapse Spark pool notebooks or using R in RStudio, can be invoked as a service from within a Data Factory pipeline to batch score your data. Analysis happens at scale by executing Spark machine learning pipelines on Azure Synapse Spark pool notebooks. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores, such as Azure Data Lake Storage, Azure Synapse, or Azure HDInsight (Hive tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +## A lake database to share consistent trusted data + +> [!TIP] +> Microsoft has created a lake database to describe core data entities to be shared across the enterprise. + +A key objective in any data integration setup is the ability to integrate data once and reuse it everywhere, not just in a data warehouse—for example, in data science. Reuse avoids reinvention and ensures consistent, commonly understood data that everyone can trust. + +> [!TIP] +> Azure Data Lake Storage is shared storage that underpins Microsoft Azure Synapse, Azure Machine Learning, Azure Synapse Spark, and Azure HDInsight. + +To achieve this goal, establish a set of common data names and definitions describing logical data entities that need to be shared across the enterprise—such as customer, account, product, supplier, orders, payments, returns, and so forth. Once this is done, IT and business professionals can use data integration software to create these common data assets and store them to maximize their reuse to drive consistency everywhere. + +> [!TIP] +> Integrating data to create lake database logical entities in shared storage enables maximum reuse of common data assets. + +Microsoft has done this by creating a [lake database](../../database-designer/concepts-lake-database.md). The lake database is a common language for business entities that represents commonly used concepts and activities across a business. Azure Synapse Analytics provides industry specific database templates to help standardize data in the lake. [Lake database templates](../../database-designer/concepts-database-templates.md) provide schemas for predefined business areas, enabling data to be loaded into a lake database in a structured way. The power comes when data integration software is used to create lake database common data assets. This results in self-describing trusted data that can be consumed by applications and analytical systems. Create a lake database in Azure Data Lake Storage by using Azure Data Factory, and consume it with Power BI, Azure Synapse Spark, Azure Synapse, and Azure Machine Learning. The following diagram shows a lake database used in Azure Synapse Analytics. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png" border="true" alt-text="Screenshot showing how a lake database can be used in Azure Synapse Analytics."::: + +## Integration with Microsoft data science technologies on Azure + +Another key requirement in modernizing your migrated data warehouse is to integrate it with Microsoft and third-party data science technologies on Azure to produce insights for competitive advantage. Let's look at what Microsoft offers in terms of machine learning and data science technologies and see how these can be used with Azure Synapse in a modern data warehouse environment. + +### Microsoft technologies for data science on Azure + +> [!TIP] +> Develop machine learning models using a no/low-code approach or from a range of programming languages like Python, R, and .NET. + +Microsoft offers a range of technologies to build predictive analytical models using machine learning, analyze unstructured data using deep learning, and perform other kinds of advanced analytics. This includes: + +- Azure Machine Learning Studio + +- Azure Machine Learning + +- Azure Synapse Spark pool notebooks + +- ML.NET (API, CLI, or ML.NET Model Builder for Visual Studio) + +- .NET for Apache Spark + +Data scientists can use RStudio (R) and Jupyter Notebooks (Python) to develop analytical models, or they can use other frameworks such as Keras or TensorFlow. + +#### Azure Machine Learning Studio + +Azure Machine Learning Studio is a fully managed cloud service that lets you easily build, deploy, and share predictive analytics via a drag-and-drop web-based user interface. The next screenshot shows an Azure Machine Learning Studio user interface. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png" border="true" alt-text="Screenshot showing predictive analysis in the Azure Machine Learning Studio user interface."::: + +#### Azure Machine Learning + +> [!TIP] +> Azure Machine Learning provides an SDK for developing machine learning models using several open-source frameworks. + +Azure Machine Learning provides a software development kit (SDK) and services for Python to quickly prepare data, as well as train and deploy machine learning models. Use Azure Machine Learning from Azure notebooks (a Jupyter Notebook service) and utilize open-source frameworks, such as PyTorch, TensorFlow, Spark MLlib (Azure Synapse Spark pool notebooks), or scikit-learn. Azure Machine Learning provides an AutoML capability that automatically identifies the most accurate algorithms to expedite model development. You can also use it to build machine learning pipelines that manage end-to-end workflow, programmatically scale on the cloud, and deploy models both to the cloud and the edge. Azure Machine Learning uses logical containers called workspaces, which can be either created manually from the Azure portal or created programmatically. These workspaces keep compute targets, experiments, data stores, trained machine learning models, Docker images, and deployed services all in one place to enable teams to work together. Use Azure Machine Learning from Visual Studio with a Visual Studio for AI extension. + +> [!TIP] +> Organize and manage related data stores, experiments, trained models, Docker images, and deployed services in workspaces. + +#### Azure Synapse Spark pool notebooks + +> [!TIP] +> Azure Synapse Spark is Microsoft's dynamically scalable Spark-as-a-service, offering scalable execution of data preparation, model development, and deployed model execution. + +[Azure Synapse Spark pool notebooks](../../spark/apache-spark-development-using-notebooks.md?msclkid=cbe4b8ebcff511eca068920ea4bf16b9) is an Apache Spark service optimized to run on Azure, which: + +- Allows data engineers to build and execute scalable data preparation jobs using Azure Data Factory. + +- Allows data scientists to build and execute machine learning models at scale using notebooks written in languages such as Scala, R, Python, Java, and SQL; and to visualize results. + +> [!TIP] +> Azure Synapse Spark can access data in a range of Microsoft analytical ecosystem data stores on Azure. + +Jobs running in Azure Synapse Spark pool notebook can retrieve, process, and analyze data at scale from Azure Blob Storage, Azure Data Lake Storage, Azure Synapse, Azure HDInsight, and streaming data services such as Kafka. + +Autoscaling and auto-termination are also supported to reduce total cost of ownership (TCO). Data scientists can use the MLflow open-source framework to manage the machine learning lifecycle. + +#### ML.NET + +> [!TIP] +> Microsoft has extended its machine learning capability to .NET developers. + +ML.NET is an open-source and cross-platform machine learning framework (Windows, Linux, macOS), created by Microsoft for .NET developers so that they can use existing tools—like ML.NET Model Builder for Visual Studio—to develop custom machine learning models and integrate them into .NET applications. + +#### .NET for Apache Spark + +.NET for Apache Spark aims to make Spark accessible to .NET developers across all Spark APIs. It takes Spark support beyond R, Scala, Python, and Java to .NET. While initially only available on Apache Spark on HDInsight, Microsoft intends to make this available on Azure Synapse Spark pool notebook. + +### Use Azure Synapse Analytics with your data warehouse + +> [!TIP] +> Train, test, evaluate, and execute machine learning models at scale on Azure Synapse Spark pool notebook by using data in Azure Synapse. + +Combine machine learning models with Azure Synapse by: + +- Using machine learning models in batch mode or in real-time to produce new insights, and add them to what you already know in Azure Synapse. + +- Using the data in Azure Synapse to develop and train new predictive models for deployment elsewhere, such as in other applications. + +- Deploying machine learning models, including those trained elsewhere, in Azure Synapse to analyze data in the data warehouse and drive new business value. + +> [!TIP] +> Produce new insights using machine learning on Azure in batch or in real-time and add to what you know in your data warehouse. + +In terms of machine learning model development, data scientists can use RStudio, Jupyter Notebooks, and Azure Synapse Spark pool notebooks together with Azure Machine Learning to develop machine learning models that run at scale on Azure Synapse Spark pool notebooks using data in Azure Synapse. For example, they could create an unsupervised model to segment customers for use in driving different marketing campaigns. Use supervised machine learning to train a model to predict a specific outcome, such as predicting a customer's propensity to churn, or recommending the next best offer for a customer to try to increase their value. The next diagram shows how Azure Synapse Analytics can be leveraged for Azure Machine Learning. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png" border="true" alt-text="Screenshot of an Azure Synapse Analytics train and predict model."::: + +In addition, you can ingest big data—such as social network data or review website data—into Azure Data Lake, then prepare and analyze it at scale on Azure Synapse Spark pool notebook, using natural language processing to score sentiment about your products or your brand. Add these scores to your data warehouse to understand the impact of—for example—negative sentiment on product sales, and to leverage big data analytics to add to what you already know in your data warehouse. + +## Integrate live streaming data into Azure Synapse Analytics + +When analyzing data in a modern data warehouse, you must be able to analyze streaming data in real-time and join it with historical data in your data warehouse. An example of this would be combining IoT data with product or asset data. + +> [!TIP] +> Integrate your data warehouse with streaming data from IoT devices or clickstream. + +Once you've successfully migrated your data warehouse to Azure Synapse, you can introduce this capability as part of a data warehouse modernization exercise. Do this by taking advantage of additional functionality in Azure Synapse. + +> [!TIP] +> Ingest streaming data into Azure Data Lake Storage from Azure Event Hubs or Kafka, and access it from Azure Synapse using PolyBase external tables. + +To do this, ingest streaming data via Azure Event Hubs or other technologies, such as Kafka, using Azure Data Factory (or using an existing ETL tool if it supports the streaming data sources). Store the data in Azure Data Lake Storage (ADLS). Next, create an external table in Azure Synapse using PolyBase and point it at the data being streamed into Azure Data Lake. Your migrated data warehouse will now contain new tables that provide access to real-time streaming data. Query this external table as if the data was in the data warehouse via standard T-SQL from any BI tool that has access to Azure Synapse. You can also join this data to other tables containing historical data and create views that join live streaming data to historical data to make it easier for business users to access. In the following diagram, a real-time data warehouse on Azure Synapse Analytics is integrated with streaming data in ADLS. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png" border="true" alt-text="Screenshot of Azure Synapse Analytics with streaming data in an Azure Data Lake."::: + +## Create a logical data warehouse using PolyBase + +> [!TIP] +> PolyBase simplifies access to multiple underlying analytical data stores on Azure to simplify access for business users. + +PolyBase offers the capability to create a logical data warehouse to simplify user access to multiple analytical data stores. + +This is attractive because many companies have adopted "workload optimized" analytical data stores over the last several years in addition to their data warehouses. Examples of these platforms on Azure include: + +- ADLS with Azure Synapse Spark pool notebook (Spark-as-a-service), for big data analytics. + +- Azure HDInsight (Hadoop as-a-service), also for big data analytics. + +- NoSQL Graph databases for graph analysis, which could be done in Azure Cosmos DB. + +- Azure Event Hubs and Azure Stream Analytics, for real-time analysis of data in motion. + +You may have non-Microsoft equivalents of some of these. You may also have a master data management (MDM) system that needs to be accessed for consistent trusted data on customers, suppliers, products, assets, and more. + +These additional analytical platforms have emerged because of the explosion of new data sources—both inside and outside the enterprises—that business users want to capture and analyze. Examples include: + +- Machine generated data, such as IoT sensor data and clickstream data. + +- Human generated data, such as social network data, review web site data, customer inbound email, images, and video. + +- Other external data, such as open government data and weather data. + +This data is over and above the structured transaction data and master data sources that typically feed data warehouses. These new data sources include semi-structured data (like JSON, XML, or Avro) or unstructured data (like text, voice, image, or video), which is more complex to process and analyze. This data could be very high volume, high velocity, or both. + +As a result, the need for new kinds of more complex analysis has emerged, such as natural language processing, graph analysis, deep learning, streaming analytics, or complex analysis of large volumes of structured data. All of this is typically not happening in a data warehouse, so it's not surprising to see different analytical platforms for different types of analytical workloads, as shown in the following diagram. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png" border="true" alt-text="Screenshot of different analytical platforms for different types of analytical workloads in Azure Synapse Analytics."::: + +Since these platforms are producing new insights, it's normal to see a requirement to combine these insights with what you already know in Azure Synapse. That's what PolyBase makes possible. + +> [!TIP] +> The ability to make data in multiple analytical data stores look like it's all in one system and join it to Azure Synapse is known as a logical data warehouse architecture. + +By leveraging PolyBase data virtualization inside Azure Synapse, you can implement a logical data warehouse. Join data in Azure Synapse to data in other Azure and on-premises analytical data stores—like Azure HDInsight or Azure Cosmos DB—or to streaming data flowing into ADLS from Azure Stream Analytics and Event Hubs. Users access external tables in Azure Synapse, unaware that the data they're accessing is stored in multiple underlying analytical systems. The next diagram shows the complex data warehouse structure accessed through comparatively simpler but still powerful user interface methods. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png" alt-text="Screenshot showing an example of a complex data warehouse structure accessed through user interface methods."::: + +The previous diagram shows how other technologies of the Microsoft analytical ecosystem can be combined with the capability of Azure Synapse logical data warehouse architecture. For example, data can be ingested into ADLS and curated using Azure Data Factory to create trusted data products that represent Microsoft [lake database](../../database-designer/concepts-lake-database.md) logical data entities. This trusted, commonly understood data can then be consumed and reused in different analytical environments such as Azure Synapse, Azure Synapse Spark pool notebooks, or Azure Cosmos DB. All insights produced in these environments are accessible via a logical data warehouse data virtualization layer made possible by PolyBase. + +> [!TIP] +> A logical data warehouse architecture simplifies business user access to data and adds new value to what you already know in your data warehouse. + +## Conclusions + +> [!TIP] +> Migrating your data warehouse to Azure Synapse lets you make use of a rich Microsoft analytical ecosystem running on Azure. + +Once you migrate your data warehouse to Azure Synapse, you can leverage other technologies in the Microsoft analytical ecosystem. You don't only modernize your data warehouse, but combine insights produced in other Azure analytical data stores into an integrated analytical architecture. + +Broaden your ETL processing to ingest data of any type into ADLS. Prepare and integrate it at scale using Azure Data Factory to produce trusted, commonly understood data assets that can be consumed by your data warehouse and accessed by data scientists and other applications. Build real-time and batch-oriented analytical pipelines and create machine learning models to run in batch, in real-time on streaming data, and on-demand as a service. + +Leverage PolyBase and `COPY INTO` to go beyond your data warehouse. Simplify access to insights from multiple underlying analytical platforms on Azure by creating holistic integrated views in a logical data warehouse. Easily access streaming, big data, and traditional data warehouse insights from BI tools and applications to drive new value in your business. + +## Next steps + +To learn more about migrating to a dedicated SQL pool, see [Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics](../migrate-to-synapse-analytics-guide.md). diff --git a/articles/synapse-analytics/monitoring/apache-spark-applications.md b/articles/synapse-analytics/monitoring/apache-spark-applications.md index 198479ae6b9c6..cbd3179a09337 100644 --- a/articles/synapse-analytics/monitoring/apache-spark-applications.md +++ b/articles/synapse-analytics/monitoring/apache-spark-applications.md @@ -6,10 +6,10 @@ author: matt1883 ms.service: synapse-analytics ms.topic: how-to ms.subservice: monitoring -ms.date: 04/15/2020 +ms.date: 06/01/2022 ms.author: mahi ms.reviewer: mahi -ms.custom: contperf-fy21q4 +ms.custom: kr2b-contr-experiment --- # Use Synapse Studio to monitor your Apache Spark applications @@ -19,14 +19,16 @@ With Azure Synapse Analytics, you can use Apache Spark to run notebooks, jobs, a This article explains how to monitor your Apache Spark applications, allowing you to keep an eye on the latest status, issues, and progress. ## View Apache Spark applications + You can view all Apache Spark applications from **Monitor** -> **Apache Spark applications**. - ![apache spark applications](./media/how-to-monitor-spark-applications/apache-spark-applications.png) -## View completed Apache Spark application + ![Screenshot of Apache Spark applications.](./media/how-to-monitor-spark-applications/apache-spark-applications.png) + +## View completed Apache Spark applications -Open **Monitor**, then select **Apache Spark applications**. To view the details about the completed Apache Spark applications, select the Apache Spark application and view the details. +Open **Monitor**, then select **Apache Spark applications**. To view the details about the completed Apache Spark applications, select the Apache Spark application. - ![select completed job](./media/how-to-monitor-spark-applications/select-completed-job.png) + ![Screenshot of completed job details.](./media/how-to-monitor-spark-applications/select-completed-job.png) 1. Check the **Completed tasks**, **Status**, and **Total duration**. @@ -50,8 +52,7 @@ Open **Monitor**, then select **Apache Spark applications**. To view the details 11. Use scroll bar to zoom in and zoom out the job graph, you can also select **Zoom to Fit** to make it fit the screen. - [![view completed job](./media/how-to-monitor-spark-applications/view-completed-job.png)](./media/how-to-monitor-spark-applications/view-completed-job.png#lightbox) - + [![Screenshot of completed job.](./media/how-to-monitor-spark-applications/view-completed-job.png)](./media/how-to-monitor-spark-applications/view-completed-job.png#lightbox) 12. The job graph node displays the following information of each stage: @@ -63,12 +64,12 @@ Open **Monitor**, then select **Apache Spark applications**. To view the details - Data written: the sum of output size and shuffle writes size - Stage number - ![job graph node](./media/how-to-monitor-spark-applications/job-graph-node.png) + ![Screenshot of job graph node.](./media/how-to-monitor-spark-applications/job-graph-node.png) 13. Hover the mouse over a job, and the job details will be displayed in the tooltip: - - Icon of job status: If the job status is successful, it will be displayed as a green "√"; if the job detects a problem, it will display a yellow "!". - - Job ID. + - Icon of job status: If the job status is successful, it will be displayed as a green "√"; if the job detects a problem, it will display a yellow "!" + - Job ID - General part: - Progress - Duration time @@ -82,19 +83,19 @@ Open **Monitor**, then select **Apache Spark applications**. To view the details - Time skew - Stage number - ![hover a job](./media/how-to-monitor-spark-applications/hover-a-job.png) + ![Screenshot of tooltip hovering over a job.](./media/how-to-monitor-spark-applications/hover-a-job.png) 14. Click **Stage number** to expand all the stages contained in the job. Click **Collapse** next to the Job ID to collapse all the stages in the job. -15. Click on **View details** in a stage graph,then the details for stage will show out. +15. Click on **View details** in a stage graph, then the details for a stage will appear. - [![expand all the stages](./media/how-to-monitor-spark-applications/expand-all-the-stages.png)](./media/how-to-monitor-spark-applications/expand-all-the-stages.png#lightbox) + [![Screenshot of stages expanded.](./media/how-to-monitor-spark-applications/expand-all-the-stages.png)](./media/how-to-monitor-spark-applications/expand-all-the-stages.png#lightbox) -## Monitor running Apache Spark application +## Monitor Apache Spark application progress -Open **Monitor**, then select **Apache Spark applications**. To view the details about the Apache Spark applications that are running, select the submitting Apache Spark application and view the details. If the Apache Spark application is still running, you can monitor the progress. +Open **Monitor**, then select **Apache Spark applications**. To view the details about the Apache Spark applications that are running, select the submitted Apache Spark application. If the Apache Spark application is still running, you can monitor the progress. - ![select running job](./media/how-to-monitor-spark-applications/select-running-job.png) + ![Screenshot of selected running job](./media/how-to-monitor-spark-applications/select-running-job.png) 1. Check the **Completed tasks**, **Status**, and **Total duration**. @@ -104,15 +105,15 @@ Open **Monitor**, then select **Apache Spark applications**. To view the details 4. Click on **Spark UI** button to go to Spark Job page. -5. For **Job graph**, **Summary**, **Diagnostics**, **Logs**. You can see an overview of your job in the generated job graph. Refer to step 5 - 15 of [View completed Apache Spark application](#view-completed-apache-spark-application). +5. For **Job graph**, **Summary**, **Diagnostics**, **Logs**. You can see an overview of your job in the generated job graph. Refer to steps 5 - 15 of [View completed Apache Spark applications](#view-completed-apache-spark-applications). - [![view running job](./media/how-to-monitor-spark-applications/view-running-job.png)](./media/how-to-monitor-spark-applications/view-running-job.png#lightbox) + [![Screenshot of running job.](./media/how-to-monitor-spark-applications/view-running-job.png)](./media/how-to-monitor-spark-applications/view-running-job.png#lightbox) -## View canceled Apache Spark application +## View canceled Apache Spark applications -Open **Monitor**, then select **Apache Spark applications**. To view the details about the canceled Apache Spark applications, select the Apache Spark application and view the details. +Open **Monitor**, then select **Apache Spark applications**. To view the details about the canceled Apache Spark applications, select the Apache Spark application. - ![select cancelled job](./media/how-to-monitor-spark-applications/select-cancelled-job.png) + ![Screenshot of canceled job.](./media/how-to-monitor-spark-applications/select-cancelled-job.png) 1. Check the **Completed tasks**, **Status**, and **Total duration**. @@ -122,15 +123,15 @@ Open **Monitor**, then select **Apache Spark applications**. To view the details 4. Open Apache history server link by clicking **Spark history server**. -5. View the graph. You can see an overview of your job in the generated job graph. Refer to step 5 - 15 of [View completed Apache Spark application](#view-completed-apache-spark-application). +5. View the graph. You can see an overview of your job in the generated job graph. Refer to steps 5 - 15 of [View completed Apache Spark applications](#view-completed-apache-spark-applications). - [![view cancelled job](./media/how-to-monitor-spark-applications/view-cancelled-job.png)](./media/how-to-monitor-spark-applications/view-cancelled-job.png#lightbox) + [![Screenshot of canceled job details.](./media/how-to-monitor-spark-applications/view-cancelled-job.png)](./media/how-to-monitor-spark-applications/view-cancelled-job.png#lightbox) ## Debug failed Apache Spark application -Open **Monitor**, then select **Apache Spark applications**. To view the details about the failed Apache Spark applications, select the Apache Spark application and view the details. +Open **Monitor**, then select **Apache Spark applications**. To view the details about the failed Apache Spark applications, select the Apache Spark application. -![select failed job](./media/how-to-monitor-spark-applications/select-failed-job.png) + ![Screenshot of failed job.](./media/how-to-monitor-spark-applications/select-failed-job.png) 1. Check the **Completed tasks**, **Status**, and **Total duration**. @@ -140,14 +141,14 @@ Open **Monitor**, then select **Apache Spark applications**. To view the details 4. Open Apache history server link by clicking **Spark history server**. -5. View the graph. You can see an overview of your job in the generated job graph. Refer to step 5 - 15 of [View completed Apache Spark application](#view-completed-apache-spark-application). +5. View the graph. You can see an overview of your job in the generated job graph. Refer to steps 5 - 15 of [View completed Apache Spark applications](#view-completed-apache-spark-applications). - [![failed job info](./media/how-to-monitor-spark-applications/failed-job-info.png)](./media/how-to-monitor-spark-applications/failed-job-info.png#lightbox) + [![Screenshot of failed job details.](./media/how-to-monitor-spark-applications/failed-job-info.png)](./media/how-to-monitor-spark-applications/failed-job-info.png#lightbox) -## View input data/output data for Apache Spark Application +## View input data/output data -Select an Apache Spark application, and click on **Input data/Output data tab** to view dates of the input and output for Apache Spark application. This function can better help you debug the Spark job. And the data source supports three storage methods: gen1, gen2, and blob. +Select an Apache Spark application, and click on **Input data/Output data tab** to view dates of the input and output for Apache Spark application. This function can help you debug the Spark job. And the data source supports three storage methods: gen1, gen2, and blob. **Input data tab** @@ -159,39 +160,39 @@ Select an Apache Spark application, and click on **Input data/Output data tab** 4. You can sort the input files by clicking **Name**, **Read format**, and **path**. -5. Use the mouse hover on an input file, the icon of the **Download/Copy path/More** button will show out. +5. Use the mouse to hover over an input file, the icon of the **Download/Copy path/More** button will appear. - ![input tab](./media/how-to-monitor-spark-applications/input-tab.png) + ![Screenshot of input tab.](./media/how-to-monitor-spark-applications/input-tab.png) -6. Click on **More** button, the **Copy path/Show in explorer/Properties** show the context menu. +6. Click on **More** button. The **Copy path/Show in explorer/Properties** will appear in the context menu. - ![input more](./media/how-to-monitor-spark-applications/input-more.png) + ![Screenshot of more input menu.](./media/how-to-monitor-spark-applications/input-more.png) * Copy path: can copy **Full path** and **Relative path**. * Show in explorer: can jump to the linked storage account (Data->Linked). * Properties: show the basic properties of the file (File name/File path/Read format/Size/Modified). - ![properties image](./media/how-to-monitor-spark-applications/properties.png) + ![Screenshot of properties.](./media/how-to-monitor-spark-applications/properties.png) **Output data tab** - Have the same features as the input. + Displays the same features as the input tab. - ![output-image](./media/how-to-monitor-spark-applications/output.png) + ![Screenshot of output data.](./media/how-to-monitor-spark-applications/output.png) ## Compare Apache Spark Applications -There are two ways to compare applications. You can compare by choose a **Compare Application**, or click the **Compare in notebook** button to view it in the notebook. +There are two ways to compare applications. You can compare by choosing **Compare Application**, or click the **Compare in notebook** button to view it in the notebook. -### Compare by choosing an application +### Compare by application -Click on **Compare applications** button and choose an application to compare performance, you can intuitively see the difference between the two applications. +Click on **Compare applications** button and choose an application to compare performance. You can see the difference between the two applications. -![compare applications](./media/how-to-monitor-spark-applications/compare-applications.png) +![Screenshot of compare applications.](./media/how-to-monitor-spark-applications/compare-applications.png) -![details compare applications](./media/how-to-monitor-spark-applications/details-compare-applications.png) +![Screenshot of details to compare applications.](./media/how-to-monitor-spark-applications/details-compare-applications.png) -1. Use the mouse to hover on an application, and then the **Compare applications** icon is displayed. +1. Use the mouse to hover over an application, and then the **Compare applications** icon is displayed. 2. Click on the **Compare applications** icon, and the Compare applications page will pop up. @@ -199,21 +200,21 @@ Click on **Compare applications** button and choose an application to compare pe 4. When choosing the comparison application, you need to either enter the application URL, or choose from the recurring list. Then, click **OK** button. - ![choose comparison application](./media/how-to-monitor-spark-applications/choose-comparison-application.png) + ![Screenshot of choose comparison application.](./media/how-to-monitor-spark-applications/choose-comparison-application.png) 5. The comparison result will be displayed on the compare applications page. - ![comparison result](./media/how-to-monitor-spark-applications/comparison-result.png) + ![Screenshot of comparison result.](./media/how-to-monitor-spark-applications/comparison-result.png) -### Compare by Compare in notebook +### Compare in notebook Click the **Compare in Notebook** button on the **Compare applications** page to open the notebook. The default name of the *.ipynb* file is **Recurrent Application Analytics**. -![compare in notebook](./media/how-to-monitor-spark-applications/compare-in-notebook.png) +![Screenshot of compare in notebook.](./media/how-to-monitor-spark-applications/compare-in-notebook.png) In the Notebook: Recurrent Application Analytics file, you can run it directly after setting the Spark pool and Language. -![recurrent application analytics](./media/how-to-monitor-spark-applications/recurrent-application-analytics.png) +![Screenshot of recurrent application analytics.](./media/how-to-monitor-spark-applications/recurrent-application-analytics.png) ## Next steps diff --git a/articles/synapse-analytics/partner/business-intelligence.md b/articles/synapse-analytics/partner/business-intelligence.md index 2cd3cd6b0e970..7a4d4ea2db477 100644 --- a/articles/synapse-analytics/partner/business-intelligence.md +++ b/articles/synapse-analytics/partner/business-intelligence.md @@ -8,7 +8,7 @@ ms.subservice: sql-dw ms.date: 07/09/2021 author: gillharmeet ms.author: harmeetgill -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- @@ -23,7 +23,7 @@ To create your data warehouse solution, you can choose from different kinds of i | ![Birst](./media/business-intelligence/birst_logo.png) |**Birst**
                    Birst connects the entire organization through a network of interwoven virtualized BI instances on-top of a shared common analytical fabric|[Product page](https://www.birst.com/)
                    | | ![Count](./media/business-intelligence/count-logo.png) |**Count**
                    Count is the next generation SQL editor, giving you the fastest way to explore and share your data with your team. At Count's core is a data notebook built for SQL, allowing you to structure your code, iterate quickly and stay in flow. Visualize your results instantly or customize them to build beautifully detailed charts in just a few clicks. Instantly share anything from one-off queries to full interactive data stories built off any of your Azure Synapse data sources. |[Product page](https://count.co/)
                    | | ![Dremio](./media/business-intelligence/dremio-logo.png) |**Dremio**
                    Analysts and data scientists can discover, explore and curate data using Dremio's intuitive UI, while IT maintains governance and security. Dremio makes it easy to join ADLS with Blob Storage, Azure SQL Database, Azure Synapse SQL, HDInsight, and more. With Dremio, Power BI analysts can search for new datasets stored on ADLS, immediately access that data in Power BI with no preparation by IT, create visualizations, and iteratively refine reports in real-time. And analysts can create new reports that combine data between ADLS and other databases. |[Product page](https://www.dremio.com/azure/)
                    [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/dremiocorporation.dremio_ce)
                    | -| ![Dundas](./media/business-intelligence/dundas_software_logo.png) |**Dundas BI**
                    Dundas Data Visualization is a leading, global provider of Business Intelligence and Data Visualization software. Dundas dashboards, reporting, and visual data analytics provide seamless integration into business applications, enabling better decisions and faster insights.|[Product page](https://www.dundas.com/dundas-bi)
                    [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/dundas.dundas-bi)
                    | +| ![Dundas](./media/business-intelligence/dundas_software_logo.png) |**Dundas BI**
                    Dundas Data Visualization is a leading, global provider of Business Intelligence and Data Visualization software. Dundas dashboards, reporting, and visual data analytics provide seamless integration into business applications, enabling better decisions and faster insights.|[Product page](https://www.dundas.com/dundas-bi)
                    | | ![IBM Cognos](./media/business-intelligence/cognos_analytics_logo.png) |**IBM Cognos Analytics**
                    Cognos Analytics includes self-service capabilities that make it simple, clear, and easy to use, whether you're an experienced business analyst examining a vast supply chain, or a marketer optimizing a campaign. Cognos Analytics uses AI and other capabilities to guide data exploration. It makes it easier for users to get the answers they need|[Product page](https://www.ibm.com/products/cognos-analytics)
                    | | ![Information Builders](./media/business-intelligence/informationbuilders_logo.png) |**Information Builders (WebFOCUS)**
                    WebFOCUS business intelligence helps companies use data more strategically across and beyond the enterprise. It allows users and administrators to rapidly create dashboards that combine content from multiple data sources and formats. It also provides robust security and comprehensive governance that enables seamless and secure sharing of any BI and analytics content|[Product page](https://www.informationbuilders.com/products/bi-and-analytics-platform)
                    | | ![Jinfonet](./media/business-intelligence/jinfonet_logo.png) |**Jinfonet JReport**
                    JReport is an embeddable BI solution for the enterprise. The solution offers capabilities such as report creation, dashboards, and data analysis on cloud, big data, and transactional data sources. By visualizing data, you can conduct your own reporting and data discovery for agile, on-the-fly decision making. |[Product page](https://www.logianalytics.com/jreport/)
                    | diff --git a/articles/synapse-analytics/partner/data-integration.md b/articles/synapse-analytics/partner/data-integration.md index 94fe24ba60eab..20ec38d892b8f 100644 --- a/articles/synapse-analytics/partner/data-integration.md +++ b/articles/synapse-analytics/partner/data-integration.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/27/2019 ms.author: harmeetgill -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/partner/data-management.md b/articles/synapse-analytics/partner/data-management.md index e0970413db9b9..e7dd4c268b338 100644 --- a/articles/synapse-analytics/partner/data-management.md +++ b/articles/synapse-analytics/partner/data-management.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: harmeetgill -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/security/synapse-workspace-ip-firewall.md b/articles/synapse-analytics/security/synapse-workspace-ip-firewall.md index 1f7ae40c3525b..575f2a28e2bfa 100644 --- a/articles/synapse-analytics/security/synapse-workspace-ip-firewall.md +++ b/articles/synapse-analytics/security/synapse-workspace-ip-firewall.md @@ -38,8 +38,7 @@ You can also add IP firewall rules to a Synapse workspace after the workspace is You can connect to your Synapse workspace using Synapse Studio. You can also use SQL Server Management Studio (SSMS) to connect to the SQL resources (dedicated SQL pools and serverless SQL pool) in your workspace. -Make sure that the firewall on your network and local computer allows outgoing communication on TCP ports 80, 443 and 1433 for Synapse Studio. -For private endpoints of your workspace target resources (Sql, SqlOnDemand, Dev), allow outgoing communication on TCP port 443 and 1433, unless you have configured other custom ports. +Make sure that the firewall on your network and local computer allows outgoing communication on TCP ports 80, 443 and 1443 for Synapse Studio. Also, you need to allow outgoing communication on UDP port 53 for Synapse Studio. To connect using tools such as SSMS and Power BI, you must allow outgoing communication on TCP port 1433. diff --git a/articles/synapse-analytics/security/synapse-workspace-synapse-rbac-roles.md b/articles/synapse-analytics/security/synapse-workspace-synapse-rbac-roles.md index 654a8f3114cca..47775bde704f3 100644 --- a/articles/synapse-analytics/security/synapse-workspace-synapse-rbac-roles.md +++ b/articles/synapse-analytics/security/synapse-workspace-synapse-rbac-roles.md @@ -60,13 +60,13 @@ The following table lists the built-in roles and the actions/permissions that ea Role|Actions --|-- -Synapse Administrator|workspaces/read
                    workspaces/roleAssignments/write, delete
                    workspaces/managedPrivateEndpoint/write, delete
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/integrationRuntimes/useCompute/action
                    workspaces/integrationRuntimes/viewLogs/action
                    workspaces/artifacts/read
                    workspaces/notebooks/write, delete
                    workspaces/sparkJobDefinitions/write, delete
                    workspaces/sqlScripts/write, delete
                    workspaces/kqlScripts/write, delete
                    workspaces/dataFlows/write, delete
                    workspaces/pipelines/write, delete
                    workspaces/triggers/write, delete
                    workspaces/datasets/write, delete
                    workspaces/libraries/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action
                    workspaces/linkedServices/useSecret/action
                    workspaces/credentials/useSecret/action| +Synapse Administrator|workspaces/read
                    workspaces/roleAssignments/write, delete
                    workspaces/managedPrivateEndpoint/write, delete
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/integrationRuntimes/useCompute/action
                    workspaces/integrationRuntimes/viewLogs/action
                    workspaces/artifacts/read
                    workspaces/notebooks/write, delete
                    workspaces/sparkJobDefinitions/write, delete
                    workspaces/sqlScripts/write, delete
                    workspaces/kqlScripts/write, delete
                    workspaces/dataFlows/write, delete
                    workspaces/pipelines/write, delete
                    workspaces/triggers/write, delete
                    workspaces/datasets/write, delete
                    workspaces/libraries/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action
                    workspaces/linkedServices/useSecret/action
                    workspaces/credentials/useSecret/action
                    workspaces/linkConnections/read
                    workspaces/linkConnections/write
                    workspaces/linkConnections/delete
                    workspaces/linkConnections/useCompute/action| |Synapse Apache Spark Administrator|workspaces/read
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/notebooks/viewOutputs/action
                    workspaces/artifacts/read
                    workspaces/notebooks/write, delete
                    workspaces/sparkJobDefinitions/write, delete
                    workspaces/libraries/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete| |Synapse SQL Administrator|workspaces/read
                    workspaces/artifacts/read
                    workspaces/sqlScripts/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete| -|Synapse Contributor|workspaces/read
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/integrationRuntimes/useCompute/action
                    workspaces/integrationRuntimes/viewLogs/action
                    workspaces/artifacts/read
                    workspaces/notebooks/write, delete
                    workspaces/sparkJobDefinitions/write, delete
                    workspaces/sqlScripts/write, delete
                    workspaces/kqlScripts/write, delete
                    workspaces/dataFlows/write, delete
                    workspaces/pipelines/write, delete
                    workspaces/triggers/write, delete
                    workspaces/datasets/write, delete
                    workspaces/libraries/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action| +|Synapse Contributor|workspaces/read
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/integrationRuntimes/useCompute/action
                    workspaces/integrationRuntimes/viewLogs/action
                    workspaces/artifacts/read
                    workspaces/notebooks/write, delete
                    workspaces/sparkJobDefinitions/write, delete
                    workspaces/sqlScripts/write, delete
                    workspaces/kqlScripts/write, delete
                    workspaces/dataFlows/write, delete
                    workspaces/pipelines/write, delete
                    workspaces/triggers/write, delete
                    workspaces/datasets/write, delete
                    workspaces/libraries/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action
                    workspaces/linkConnections/read
                    workspaces/linkConnections/write
                    workspaces/linkConnections/delete
                    workspaces/linkConnections/useCompute/action| |Synapse Artifact Publisher|workspaces/read
                    workspaces/artifacts/read
                    workspaces/notebooks/write, delete
                    workspaces/sparkJobDefinitions/write, delete
                    workspaces/sqlScripts/write, delete
                    workspaces/kqlScripts/write, delete
                    workspaces/dataFlows/write, delete
                    workspaces/pipelines/write, delete
                    workspaces/triggers/write, delete
                    workspaces/datasets/write, delete
                    workspaces/libraries/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action| |Synapse Artifact User|workspaces/read
                    workspaces/artifacts/read
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action| -|Synapse Compute Operator |workspaces/read
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/integrationRuntimes/useCompute/action
                    workspaces/integrationRuntimes/viewLogs/action| +|Synapse Compute Operator |workspaces/read
                    workspaces/bigDataPools/useCompute/action
                    workspaces/bigDataPools/viewLogs/action
                    workspaces/integrationRuntimes/useCompute/action
                    workspaces/integrationRuntimes/viewLogs/action
                    workspaces/linkConnections/read
                    workspaces/linkConnections/useCompute/action| |Synapse Monitoring Operator |workspaces/read
                    workspaces/artifacts/read
                    workspaces/notebooks/viewOutputs/action
                    workspaces/pipelines/viewOutputs/action
                    workspaces/integrationRuntimes/viewLogs/action
                    workspaces/bigDataPools/viewLogs/action| |Synapse Credential User|workspaces/read
                    workspaces/linkedServices/useSecret/action
                    workspaces/credentials/useSecret/action| |Synapse Linked Data Manager|workspaces/read
                    workspaces/managedPrivateEndpoint/write, delete
                    workspaces/linkedServices/write, delete
                    workspaces/credentials/write, delete| @@ -85,6 +85,8 @@ workspaces/bigDataPools/useCompute/action|Synapse Administrator
                    Synapse Apac workspaces/bigDataPools/viewLogs/action|Synapse Administrator
                    Synapse Apache Spark Administrator
                    Synapse Contributor
                    Synapse Compute Operator workspaces/integrationRuntimes/useCompute/action|Synapse Administrator
                    Synapse Contributor
                    Synapse Compute Operator
                    Synapse Monitoring Operator workspaces/integrationRuntimes/viewLogs/action|Synapse Administrator
                    Synapse Contributor
                    Synapse Compute Operator
                    Synapse Monitoring Operator +workspaces/linkConnections/read|Synapse Administrator
                    Synapse Contributor
                    Synapse Compute Operator +workspaces/linkConnections/useCompute/action|Synapse Administrator
                    Synapse Contributor
                    Synapse Compute Operator workspaces/artifacts/read|Synapse Administrator
                    Synapse Apache Spark Administrator
                    Synapse SQL Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher
                    Synapse Artifact User workspaces/notebooks/write, delete|Synapse Administrator
                    Synapse Apache Spark Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher workspaces/sparkJobDefinitions/write, delete|Synapse Administrator
                    Synapse Apache Spark Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher @@ -92,6 +94,7 @@ workspaces/sqlScripts/write, delete|Synapse Administrator
                    Synapse SQL Admini workspaces/kqlScripts/write, delete|Synapse Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher workspaces/dataFlows/write, delete|Synapse Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher workspaces/pipelines/write, delete|Synapse Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher +workspaces/linkConnections/write, delete|Synapse Administrator
                    Synapse Contributor workspaces/triggers/write, delete|Synapse Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher workspaces/datasets/write, delete|Synapse Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher workspaces/libraries/write, delete|Synapse Administrator
                    Synapse Apache Spark Administrator
                    Synapse Contributor
                    Synapse Artifact Publisher diff --git a/articles/synapse-analytics/spark/apache-spark-gpu-concept.md b/articles/synapse-analytics/spark/apache-spark-gpu-concept.md index af884469a76bc..6ed9b91f25aab 100644 --- a/articles/synapse-analytics/spark/apache-spark-gpu-concept.md +++ b/articles/synapse-analytics/spark/apache-spark-gpu-concept.md @@ -5,11 +5,12 @@ author: midesa ms.service: synapse-analytics ms.topic: overview ms.subservice: spark -ms.date: 11/10/2021 +ms.date: 4/10/2022 ms.author: midesa --- -# GPU-accelerated Apache Spark pools in Azure Synapse Analytics +# GPU-accelerated Apache Spark pools in Azure Synapse Analytics (Preview) + Azure Synapse Analytics now supports Apache Spark pools accelerated with graphics processing units (GPUs). By using NVIDIA GPUs, data scientists and engineers can reduce the time necessary to run data integration pipelines, score machine learning models, and more. This article describes how GPU-accelerated pools can be created and used with Azure Synapse Analytics. This article also details the GPU drivers and libraries that are pre-installed as part of the GPU-accelerated runtime. @@ -18,6 +19,7 @@ By using NVIDIA GPUs, data scientists and engineers can reduce the time necessar > Azure Synapse GPU-enabled pools are currently in Public Preview. ## Create a GPU-accelerated pool + To simplify the process for creating and managing pools, Azure Synapse takes care of pre-installing low-level libraries and setting up all the complex networking requirements between compute nodes. This integration allows users to get started with GPU- accelerated pools within just a few minutes. To learn more about how to create a GPU-accelerated pool, you can visit the quickstart on how to [create a GPU-accelerated pool](../quickstart-create-apache-gpu-pool-portal.md). > [!NOTE] @@ -28,6 +30,7 @@ To simplify the process for creating and managing pools, Azure Synapse takes car ## GPU-accelerated runtime ### NVIDIA GPU driver, CUDA, and cuDNN + Azure Synapse Analytics now offers GPU-accelerated Apache Spark pools, which include various NVIDIA libraries and configurations. By default, Azure Synapse Analytics installs the NVIDIA driver and libraries required to use GPUs on Spark driver and worker instances: - CUDA 11.2 - libnccl2=2.8.4 @@ -39,6 +42,7 @@ Azure Synapse Analytics now offers GPU-accelerated Apache Spark pools, which inc > This software contains source code provided by NVIDIA Corporation. Specifically, to support the GPU-accelerated pools, Azure Synapse Apache Spark pools include code from [CUDA Samples](https://docs.nvidia.com/cuda/eula/#nvidia-cuda-samples-preface). ### NVIDIA End User License Agreement (EULA) + When you select a GPU-accelerated Hardware option in Synapse Spark, you implicitly agree to the terms and conditions outlined in the NVIDIA EULA with respect to: - CUDA 11.2: [EULA :: CUDA Toolkit Documentation (nvidia.com)](https://docs.nvidia.com/cuda/eula/index.html) - libnccl2=2.8.4: [nccl/LICENSE.txt at master · NVIDIA/nccl (github.com)](https://github.com/NVIDIA/nccl/blob/master/LICENSE.txt) @@ -48,12 +52,23 @@ When you select a GPU-accelerated Hardware option in Synapse Spark, you implicit - The CUDA, NCCL, and cuDNN libraries, and the [NVIDIA End User License Agreement (with NCCL Supplement)](https://docs.nvidia.com/deeplearning/nccl/sla/index.html#overview) for the NCCL library ## Accelerate ETL workloads + With built-in support for NVIDIA’s [RAPIDS Accelerator for Apache Spark](https://nvidia.github.io/spark-rapids/), GPU-accelerated Spark pools in Azure Synapse can provide significant performance improvements compared to standard analytical benchmarks without requiring any code changes. Built on top of NVIDIA CUDA and UCX, NVIDIA RAPIDS enables GPU-accelerated SQL, DataFrame operations, and Spark shuffles. Since there are no code changes required to leverage these accelerations, users can also accelerate their data pipelines that rely on Linux Foundation’s Delta Lake or Microsoft’s Hyperspace indexing. To learn more about how you can use the NVIDIA RAPIDS Accelerator with your GPU-accelerated pool in Azure Synapse Analytics, visit this guide on how to [improve performance with RAPIDS](apache-spark-rapids-gpu.md). +## Train deep learning models + +Deep learning models are often data and computation intensive. Because of this, organizations often accelerate their training process with GPU-enabled clusters. In Azure Synapse Analytics, organizations can build models using frameworks like Tensorflow and PyTorch. Then, users can scale up their deep learning models with Horovod and Petastorm. + +To learn more about how you can train distributed deep learning models, visit the following guides: + - [Tutorial: Distributed training with Horovod and Tensorflow](../machine-learning/tutorial-horovod-tensorflow.md) + - [Tutorial: Distributed training with Horovod and PyTorch](../machine-learning/tutorial-horovod-pytorch.md) + ## Improve machine learning scoring workloads + Many organizations rely on large batch scoring jobs to frequently execute during narrow windows of time. To achieve improved batch scoring jobs, you can also use GPU-accelerated Spark pools with Microsoft’s [Hummingbird library](https://github.com/Microsoft/hummingbird). With Hummingbird, users can take their traditional, tree-based ML models and compile them into tensor computations. Hummingbird allows users to then seamlessly leverage native hardware acceleration and neural network frameworks to accelerate their ML model scoring without needing to rewrite their models. ## Next steps + - [Azure Synapse Analytics](../overview-what-is.md) diff --git a/articles/synapse-analytics/spark/apache-spark-overview.md b/articles/synapse-analytics/spark/apache-spark-overview.md index 0b86b40c89ccb..ea2602eca2d3c 100644 --- a/articles/synapse-analytics/spark/apache-spark-overview.md +++ b/articles/synapse-analytics/spark/apache-spark-overview.md @@ -1,5 +1,5 @@ --- -title: What is Apache Spark +title: Apache Spark in Azure Synapse Analytics overview description: This article provides an introduction to Apache Spark in Azure Synapse Analytics and the different scenarios in which you can use Spark. services: synapse-analytics author: juluczni @@ -7,65 +7,64 @@ ms.author: juluczni ms.service: synapse-analytics ms.topic: overview ms.subservice: spark -ms.date: 02/15/2022 +ms.date: 05/23/2022 ms.reviewer: euang +ms.custom: kr2b-contr-experiment --- # Apache Spark in Azure Synapse Analytics -Apache Spark is a parallel processing framework that supports in-memory processing to boost the performance of big-data analytic applications. Apache Spark in Azure Synapse Analytics is one of Microsoft's implementations of Apache Spark in the cloud. Azure Synapse makes it easy to create and configure a serverless Apache Spark pool in Azure. Spark pools in Azure Synapse are compatible with Azure Storage and Azure Data Lake Generation 2 Storage. So you can use Spark pools to process your data stored in Azure. +Apache Spark is a parallel processing framework that supports in-memory processing to boost the performance of big data analytic applications. Apache Spark in Azure Synapse Analytics is one of Microsoft's implementations of Apache Spark in the cloud. Azure Synapse makes it easy to create and configure a serverless Apache Spark pool in Azure. Spark pools in Azure Synapse are compatible with Azure Storage and Azure Data Lake Generation 2 Storage. So you can use Spark pools to process your data stored in Azure. -![Spark: a unified framework](./media/apache-spark-overview/spark-overview.png) +![Diagram shows Spark SQL, Spark MLib, and GraphX linked to the Spark core engine, above a YARN layer over storage services.](./media/apache-spark-overview/spark-overview.png) ## What is Apache Spark Apache Spark provides primitives for in-memory cluster computing. A Spark job can load and cache data into memory and query it repeatedly. In-memory computing is much faster than disk-based applications. Spark also integrates with multiple programming languages to let you manipulate distributed data sets like local collections. There's no need to structure everything as map and reduce operations. -![Traditional MapReduce vs. Spark](./media/apache-spark-overview/map-reduce-vs-spark.png) +![Diagram shows Traditional MapReduce, with disk-based apps and Spark, with cache-based operations.](./media/apache-spark-overview/map-reduce-vs-spark.png) Spark pools in Azure Synapse offer a fully managed Spark service. The benefits of creating a Spark pool in Azure Synapse Analytics are listed here. | Feature | Description | | --- | --- | -| Speed and efficiency |Spark instances start in approximately 2 minutes for fewer than 60 nodes and approximately 5 minutes for more than 60 nodes. The instance shuts down, by default, 5 minutes after the last job executed unless it is kept alive by a notebook connection. | +| Speed and efficiency |Spark instances start in approximately 2 minutes for fewer than 60 nodes and approximately 5 minutes for more than 60 nodes. The instance shuts down, by default, 5 minutes after the last job runs unless it's kept alive by a notebook connection. | | Ease of creation |You can create a new Spark pool in Azure Synapse in minutes using the Azure portal, Azure PowerShell, or the Synapse Analytics .NET SDK. See [Get started with Spark pools in Azure Synapse Analytics](../quickstart-create-apache-spark-pool-studio.md). | -| Ease of use |Synapse Analytics includes a custom notebook derived from [Nteract](https://nteract.io/). You can use these notebooks for interactive data processing and visualization.| +| Ease of use |Synapse Analytics includes a custom notebook derived from [nteract](https://nteract.io/). You can use these notebooks for interactive data processing and visualization.| | REST APIs |Spark in Azure Synapse Analytics includes [Apache Livy](https://github.com/cloudera/hue/tree/master/apps/spark/java#welcome-to-livy-the-rest-spark-server), a REST API-based Spark job server to remotely submit and monitor jobs. | -| Support for Azure Data Lake Storage Generation 2| Spark pools in Azure Synapse can use Azure Data Lake Storage Generation 2 as well as BLOB storage. For more information on Data Lake Storage, see [Overview of Azure Data Lake Storage](../../data-lake-store/data-lake-store-overview.md). | +| Support for Azure Data Lake Storage Generation 2| Spark pools in Azure Synapse can use Azure Data Lake Storage Generation 2 and BLOB storage. For more information on Data Lake Storage, see [Overview of Azure Data Lake Storage](../../data-lake-store/data-lake-store-overview.md). | | Integration with third-party IDEs | Azure Synapse provides an IDE plugin for [JetBrains' IntelliJ IDEA](https://www.jetbrains.com/idea/) that is useful to create and submit applications to a Spark pool. | -| Pre-loaded Anaconda libraries |Spark pools in Azure Synapse come with Anaconda libraries pre-installed. [Anaconda](https://docs.continuum.io/anaconda/) provides close to 200 libraries for machine learning, data analysis, visualization, etc. | +| Preloaded Anaconda libraries |Spark pools in Azure Synapse come with Anaconda libraries preinstalled. [Anaconda](https://docs.continuum.io/anaconda/) provides close to 200 libraries for machine learning, data analysis, visualization, and other technologies. | | Scalability | Apache Spark in Azure Synapse pools can have Auto-Scale enabled, so that pools scale by adding or removing nodes as needed. Also, Spark pools can be shut down with no loss of data since all the data is stored in Azure Storage or Data Lake Storage. | -Spark pools in Azure Synapse include the following components that are available on the pools by default. +Spark pools in Azure Synapse include the following components that are available on the pools by default: - [Spark Core](https://spark.apache.org/docs/2.4.5/). Includes Spark Core, Spark SQL, GraphX, and MLlib. - [Anaconda](https://docs.continuum.io/anaconda/) - [Apache Livy](https://github.com/cloudera/hue/tree/master/apps/spark/java#welcome-to-livy-the-rest-spark-server) -- [Nteract notebook](https://nteract.io/) +- [nteract notebook](https://nteract.io/) ## Spark pool architecture -It is easy to understand the components of Spark by understanding how Spark runs on Azure Synapse Analytics. +Spark applications run as independent sets of processes on a pool, coordinated by the `SparkContext` object in your main program, called the *driver program*. -Spark applications run as independent sets of processes on a pool, coordinated by the SparkContext object in your main program (called the driver program). +The `SparkContext` can connect to the cluster manager, which allocates resources across applications. The cluster manager is [Apache Hadoop YARN](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html). Once connected, Spark acquires executors on nodes in the pool, which are processes that run computations and store data for your application. Next, it sends your application code, defined by JAR or Python files passed to `SparkContext`, to the executors. Finally, `SparkContext` sends tasks to the executors to run. -The SparkContext can connect to the cluster manager, which allocates resources across applications. The cluster manager is [Apache Hadoop YARN](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html). Once connected, Spark acquires executors on nodes in the pool, which are processes that run computations and store data for your application. Next, it sends your application code (defined by JAR or Python files passed to SparkContext) to the executors. Finally, SparkContext sends tasks to the executors to run. +The `SparkContext` runs the user's main function and executes the various parallel operations on the nodes. Then, the `SparkContext` collects the results of the operations. The nodes read and write data from and to the file system. The nodes also cache transformed data in-memory as Resilient Distributed Datasets (RDDs). -The SparkContext runs the user's main function and executes the various parallel operations on the nodes. Then, the SparkContext collects the results of the operations. The nodes read and write data from and to the file system. The nodes also cache transformed data in-memory as Resilient Distributed Datasets (RDDs). - -The SparkContext connects to the Spark pool and is responsible for converting an application to a directed acyclic graph (DAG). The graph consists of individual tasks that get executed within an executor process on the nodes. Each application gets its own executor processes, which stay up for the duration of the whole application and run tasks in multiple threads. +The `SparkContext` connects to the Spark pool and is responsible for converting an application to a directed acyclic graph (DAG). The graph consists of individual tasks that run within an executor process on the nodes. Each application gets its own executor processes, which stay up during the whole application and run tasks in multiple threads. ## Apache Spark in Azure Synapse Analytics use cases Spark pools in Azure Synapse Analytics enable the following key scenarios: -### Data Engineering/Data Preparation +- Data Engineering/Data Preparation -Apache Spark includes many language features to support preparation and processing of large volumes of data so that it can be made more valuable and then consumed by other services within Azure Synapse Analytics. This is enabled through multiple languages (C#, Scala, PySpark, Spark SQL) and supplied libraries for processing and connectivity. + Apache Spark includes language features to support preparation and processing of large volumes of data so that it can be made more valuable and then consumed by other services within Azure Synapse Analytics. This approach is enabled through multiple languages, including C#, Scala, PySpark, and Spark SQL, and supplied libraries for processing and connectivity. -### Machine Learning +- Machine Learning -Apache Spark comes with [MLlib](https://spark.apache.org/mllib/), a machine learning library built on top of Spark that you can use from a Spark pool in Azure Synapse Analytics. Spark pools in Azure Synapse Analytics also include Anaconda, a Python distribution with a variety of packages for data science including machine learning. When combined with built-in support for notebooks, you have an environment for creating machine learning applications. + Apache Spark comes with [MLlib](https://spark.apache.org/mllib/), a machine learning library built on top of Spark that you can use from a Spark pool in Azure Synapse Analytics. Spark pools in Azure Synapse Analytics also include Anaconda, a Python distribution with various packages for data science including machine learning. When combined with built-in support for notebooks, you have an environment for creating machine learning applications. ## Where do I start @@ -77,10 +76,10 @@ Use the following articles to learn more about Apache Spark in Azure Synapse Ana - [Apache Spark official documentation](https://spark.apache.org/docs/2.4.5/) > [!NOTE] -> Some of the official Apache Spark documentation relies on using the spark console, this is not available on Azure Synapse Spark, use the notebook or IntelliJ experiences instead +> Some of the official Apache Spark documentation relies on using the Spark console, which is not available on Azure Synapse Spark. Use the notebook or IntelliJ experiences instead. ## Next steps -In this overview, you get a basic understanding of Apache Spark in Azure Synapse Analytics. Advance to the next article to learn how to create a Spark pool in Azure Synapse Analytics: +This overview provided a basic understanding of Apache Spark in Azure Synapse Analytics. Advance to the next article to learn how to create a Spark pool in Azure Synapse Analytics: - [Create a Spark pool in Azure Synapse](../quickstart-create-apache-spark-pool-portal.md) diff --git a/articles/synapse-analytics/spark/apache-spark-rapids-gpu.md b/articles/synapse-analytics/spark/apache-spark-rapids-gpu.md index 2e3de4c679882..460e81af7d90b 100644 --- a/articles/synapse-analytics/spark/apache-spark-rapids-gpu.md +++ b/articles/synapse-analytics/spark/apache-spark-rapids-gpu.md @@ -171,7 +171,7 @@ Most Spark jobs can see improved performance through tuning configuration settin ### Workspace level -Every Azure Synapse workspace comes with a default quota of 50 GPU vCores. In order to increase your quota of GPU cores, send an email to AzureSynapseGPU@microsoft.com with your workspace name, the region, and the total GPU quota required for your workload. +Every Azure Synapse workspace comes with a default quota of 50 GPU vCores. In order to increase your quota of GPU cores, please [submit a support request through the Azure portal](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md). ## Next steps - [Azure Synapse Analytics](../overview-what-is.md) diff --git a/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md b/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md index 557466b445e8b..13f0587f07087 100644 --- a/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md +++ b/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md @@ -195,17 +195,22 @@ This section presents reference code templates to describe how to use and invoke #### Read Request - `synapsesql` method signature +##### [Scala](#tab/scala) + ```Scala synapsesql(tableName:String) => org.apache.spark.sql.DataFrame ``` +##### [Python](#tab/python) + ```python synapsesql(table_name: str) -> org.apache.spark.sql.DataFrame ``` +--- #### Read using Azure AD based authentication -##### [Scala](#tab/scala) +##### [Scala](#tab/scala1) ```Scala //Use case is to read data from an internal table in Synapse Dedicated SQL Pool DB @@ -234,7 +239,7 @@ val dfToReadFromTable:DataFrame = spark.read. dfToReadFromTable.show() ``` -##### [Python](#tab/python) +##### [Python](#tab/python1) ```python # Add required imports @@ -261,10 +266,11 @@ dfToReadFromTable = (spark.read # Show contents of the dataframe dfToReadFromTable.show() ``` +--- #### Read using basic authentication -##### [Scala](#tab/scala1) +##### [Scala](#tab/scala2) ```Scala //Use case is to read data from an internal table in Synapse Dedicated SQL Pool DB @@ -298,7 +304,7 @@ val dfToReadFromTable:DataFrame = spark.read. dfToReadFromTable.show() ``` -##### [Python](#tab/python1) +##### [Python](#tab/python2) ```python # Add required imports @@ -332,6 +338,7 @@ dfToReadFromTable = (spark.read dfToReadFromTable.show() ``` +--- ### Write to Azure Synapse Dedicated SQL Pool @@ -349,6 +356,8 @@ synapsesql(tableName:String, * Spark Pool Version 3.1.2 +##### [Scala](#tab/scala3) + ```Scala synapsesql(tableName:String, tableType:String = Constants.INTERNAL, @@ -356,15 +365,18 @@ synapsesql(tableName:String, callBackHandle=Option[(Map[String, Any], Option[Throwable])=>Unit]):Unit ``` +##### [Python](#tab/python3) + ```python synapsesql(table_name: str, table_type: str = Constants.INTERNAL, location: str = None) -> None ``` +--- #### Write using Azure AD based authentication Following is a comprehensive code template that describes how to use the Connector for write scenarios: -##### [Scala](#tab/scala2) +##### [Scala](#tab/scala4) ```Scala //Add required imports @@ -423,7 +435,7 @@ readDF. if(errorDuringWrite.isDefined) throw errorDuringWrite.get ``` -##### [Python](#tab/python2) +##### [Python](#tab/python4) ```python @@ -475,12 +487,13 @@ from com.microsoft.spark.sqlanalytics.Constants import Constants "/path/to/external/table")) ``` +--- #### Write using basic authentication Following code snippet replaces the write definition described in the [Write using Azure AD based authentication](#write-using-azure-ad-based-authentication) section, to submit write request using SQL basic authentication approach: -##### [Scala](#tab/scala3) +##### [Scala](#tab/scala5) ```Scala //Define write options to use SQL basic authentication @@ -509,7 +522,7 @@ readDF. callBackHandle = Some(callBackFunctionToReceivePostWriteMetrics)) ``` -##### [Python](#tab/python3) +##### [Python](#tab/python5) ```python # Write using Basic Auth to Internal table @@ -570,6 +583,7 @@ from com.microsoft.spark.sqlanalytics.Constants import Constants "/path/to/external/table")) ``` +--- In a basic authentication approach, in order to read data from a source storage path other configuration options are required. Following code snippet provides an example to read from an Azure Data Lake Storage Gen2 data source using Service Principal credentials: @@ -700,9 +714,9 @@ Spark DataFrame's `createOrReplaceTempView` can be used to access data fetched i * Now, change the language preference on the Notebook to `PySpark (Python)` and fetch data from the registered view `` - ```Python +```Python spark.sql("select * from ").show() - ``` +``` ### Response handling diff --git a/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md b/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md index f2d4afe47e544..c588de4f5bad4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md +++ b/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md @@ -8,7 +8,7 @@ ms.topic: overview ms.subservice: sql-dw ms.date: 11/04/2019 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Cheat sheet for dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytic diff --git a/articles/synapse-analytics/sql-data-warehouse/column-level-security.md b/articles/synapse-analytics/sql-data-warehouse/column-level-security.md index 4636f7fc73104..2796817263057 100644 --- a/articles/synapse-analytics/sql-data-warehouse/column-level-security.md +++ b/articles/synapse-analytics/sql-data-warehouse/column-level-security.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/19/2020 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tags: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-azure-cli.md b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-azure-cli.md index 617d51f52679c..d9b5cf8247ffc 100644 --- a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-azure-cli.md +++ b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-azure-cli.md @@ -7,7 +7,8 @@ ms.topic: quickstart ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: wiassaf -ms.custom: azure-synapse, devx-track-azurecli, mode-api +ms.tool: azure-cli +ms.custom: azure-synapse, mode-api --- # Quickstart: Create a Synapse SQL pool with Azure CLI diff --git a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md index a1850c73e8f71..c0d20727f623d 100644 --- a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md +++ b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md @@ -4,7 +4,7 @@ description: Create and query a dedicated SQL pool (formerly SQL DW) using the A author: pimorano ms.author: pimorano manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 05/28/2019 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md index cd13bc83bacb9..2f6d0b6df42f9 100644 --- a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md +++ b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md @@ -4,7 +4,7 @@ description: Quickly create a dedicated SQL pool (formerly SQL DW) with a server author: joannapea ms.author: joanpo manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 4/11/2019 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md b/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md index d6d0d9e193097..11926fe1d6794 100644 --- a/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md +++ b/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md b/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md index 3bdb35a7ea664..ed2facf17a75e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md +++ b/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md @@ -2,13 +2,13 @@ title: Disable geo-backups description: How-to guide for disabling geo-backups for a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics author: joannapea -manager: igorstan +manager: ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 01/06/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md b/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md index 434b5d28e5b43..f0053d4be2c9b 100644 --- a/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md +++ b/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 10/12/2018 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md b/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md index 3bdd62346b252..bc5398c242ac1 100644 --- a/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md +++ b/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/23/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md b/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md index 6ba9bab6aa92d..6bc180870a707 100644 --- a/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md +++ b/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 01/12/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, synapse-analytics --- diff --git a/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md b/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md index 9d238bfb650fa..987c0a137760f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md +++ b/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/27/2018 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md b/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md index ee11a0dece350..f33b1f7e0550d 100644 --- a/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md +++ b/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/04/2019 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Dedicated SQL pool (formerly SQL DW) architecture in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md index 91c8e1045aed6..d9c9abfd1fbc4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md +++ b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md @@ -4,7 +4,7 @@ description: Use the Azure portal to pause compute for dedicated SQL pool to sav author: WilliamDAssafMSFT ms.author: wiassaf manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 11/23/2020 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md index e9e6938dea126..4cbec486a8efc 100644 --- a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md +++ b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md @@ -4,7 +4,7 @@ description: You can use Azure PowerShell to pause and resume dedicated SQL pool author: WilliamDAssafMSFT ms.author: wiassaf manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 03/20/2019 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md index a6e7880e42292..4c3271eee7702 100644 --- a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md +++ b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md @@ -1,14 +1,13 @@ --- title: Performance tune with materialized views description: Learn about recommendations and considerations you should know as you use materialized views to improve your query performance. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/17/2021 +author: XiaoyuMSFT ms.author: xiaoyul -ms.reviewer: nibruno; jrasnick; azure-synapse +ms.reviewer: wiassaf --- # Performance tune with materialized views diff --git a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md index eb7890c6a97ea..4dfebcafa350f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md +++ b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/13/2021 ms.author: xiaoyul -ms.reviewer: nibruno; jrasnick +ms.reviewer: nibruno; wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/quickstart-bicep.md b/articles/synapse-analytics/sql-data-warehouse/quickstart-bicep.md new file mode 100644 index 0000000000000..ce7c74e10455a --- /dev/null +++ b/articles/synapse-analytics/sql-data-warehouse/quickstart-bicep.md @@ -0,0 +1,101 @@ +--- +title: Create an Azure Synapse Analytics dedicated SQL pool (formerly SQL DW) using Bicep +description: Learn how to create an Azure Synapse Analytics SQL pool using Bicep. +services: azure-resource-manager +author: schaffererin +ms.service: azure-resource-manager +ms.topic: quickstart +ms.author: v-eschaffer +ms.date: 05/20/2022 +ms.custom: devx-track-azurepowershell, subject-armqs, mode-arm +--- + +# Quickstart: Create an Azure Synapse Analytics dedicated SQL pool (formerly SQL DW) using Bicep + +This Bicep file will create a dedicated SQL pool (formerly SQL DW) with Transparent Data Encryption enabled. Dedicated SQL pool (formerly SQL DW) refers to the enterprise data warehousing features that are generally available in Azure Synapse. + +[!INCLUDE [About Bicep](../../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/sql-data-warehouse-transparent-encryption-create/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.sql/sql-data-warehouse-transparent-encryption-create/main.bicep"::: + +The Bicep file defines one resource: + +- [Microsoft.Sql/servers](/azure/templates/microsoft.sql/servers) + +## Deploy the Bicep file + +1. Save the Bicep file as `main.bicep` to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters sqlAdministratorLogin= databasesName= capacity= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -sqlAdministratorLogin "" -databasesName "" -capacity + ``` + + --- + + > [!NOTE] + > Replace **\** with the administrator login username for the SQL server. Replace **\** with the name of the database. Replace **\** with the DW performance level. The minimum value is 900 and the maximum value is 54000. You'll also be prompted to enter **sqlAdministratorPassword**. + + When the deployment finishes, you should see a message indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, use the Azure portal, Azure CLI, or Azure PowerShell to delete the resource group and its resources. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +In this quickstart, you created a dedicated SQL pool (formerly SQL DW) using Bicep and validated the deployment. To learn more about Azure Synapse Analytics and Bicep, see the articles below. + +- Read an [Overview of Azure Synapse Analytics](sql-data-warehouse-overview-what-is.md) +- Learn more about [Bicep](../../azure-resource-manager/bicep/overview.md) +- [Quickstart: Create Bicep files with Visual Studio Code](../../azure-resource-manager/bicep/quickstart-create-bicep-use-visual-studio-code.md) diff --git a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md index 0d98b4acd3a23..258b5a786d5fc 100644 --- a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md +++ b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md @@ -4,7 +4,7 @@ description: You can scale compute for dedicated SQL pool (formerly SQL DW) usin author: kedodd ms.author: kedodd manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 03/09/2022 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md index 82a23617ef623..7cf88124e09a3 100644 --- a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md +++ b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md @@ -8,7 +8,7 @@ ms.service: synapse-analytics ms.topic: quickstart ms.subservice: sql-dw ms.date: 03/09/2022 -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse, mode-other --- diff --git a/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md b/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md index b330b4c16dd69..89d774e166989 100644 --- a/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md +++ b/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md @@ -2,13 +2,13 @@ title: Single region residency description: How-to guide for configuring single region residency for a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics author: joannapea -manager: igorstan +manager: ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/15/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md index 1fba6d6085e63..0c1e0e14b7633 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/02/2019 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tag: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md index e533581ffd526..88ebf34b48d56 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 06/26/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md index 451f3b411f068..feeeed5f0a51e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse, seo-lt-2019, devx-track-csharp --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md index 8dd395f7642cd..38de137294cf3 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse, seo-lt-2019, devx-track-csharp --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md index 522cf6f7b0ce8..8d35658d664c0 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md @@ -8,7 +8,7 @@ ms.topic: how-to ms.subservice: sql-dw ms.date: 02/04/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- # Continuous integration and deployment for dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md index 39c0e76513a51..22fb728e66368 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md @@ -1,14 +1,13 @@ --- title: Optimizing transactions -description: Learn how to optimize the performance of your transactional code in dedicated SQL pool while minimizing risk for long rollbacks. -author: XiaoyuMSFT -manager: craigg +description: Learn how to optimize the performance of your transactional code in an Azure Synapse Analytics dedicated SQL pool while minimizing risk for long rollbacks. ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/19/2018 -ms.author: xiaoyul -ms.reviewer: igorstan +author: KevinConanMSFT +ms.author: kecona +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md index 07bdf2ff25fc1..5077463e4d4d2 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/26/2019 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seoapril2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md index 48177b18fc626..542dab6a25e8e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md index 27ff082c61db1..f41365def64b5 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md index 49ace451670af..4f70b8f8912fb 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md index 7007c973e4729..91af02eb4bd9e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md index 300138cd6fb3e..710239317c375 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/02/2019 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md index 159ec98124622..0a95f380a24aa 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md @@ -1,15 +1,14 @@ --- title: Use transactions in Azure Synapse Analytics SQL pool description: This article includes tips for implementing transactions and developing solutions in Synapse SQL pool. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/22/2019 -ms.author: xiaoyul +author: KevinConanMSFT +ms.author: kecona ms.custom: azure-synapse -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Use transactions in a SQL pool in Azure Synapse diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md index 17847af3c4299..020f29af4ca44 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md index 87e5a0bb4cfbb..d1b964a870002 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md index ad201b586b7cb..2e3a11e20c419 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: machine-learning ms.date: 07/15/2020 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tag: azure-Synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md index b8857ca00ab66..55f86140342c6 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md index 739b9b6b79421..7d76f21bdc83d 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md @@ -7,7 +7,7 @@ ms.subservice: sql-dw ms.date: 03/10/2020 author: WilliamDAssafMSFT ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md index da84e23db3300..e1b1d84312dc4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md @@ -8,7 +8,7 @@ ms.subservice: sql-dw ms.topic: conceptual ms.date: 08/13/2020 ms.author: rortloff -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md index cff78a90b2606..bb56ff4f15b9e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md @@ -8,7 +8,7 @@ ms.subservice: sql-dw ms.topic: conceptual ms.date: 11/20/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md index 6dbffff60718f..835ef4cfb3576 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md @@ -10,7 +10,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/11/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Getting started with Visual Studio 2019 diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md index d0ec3f524907d..1b7d59d65fd4f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 9/25/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md index 06106bb438574..0dd6fb6f87558 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md index a758172076a2e..3cf7bbf6ed7f7 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md index 09a8c5d7b3509..9b6f166bf1e6b 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/12/2019 ms.author: rortloff -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md index 872da8ce051bf..368fcaff7c46f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md @@ -8,7 +8,7 @@ ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/09/2022 -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md index e9e0b2a7ce86d..5fd9c27db1d0d 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/15/2021 ms.author: rortloff -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: synapse-analytics --- @@ -61,7 +61,7 @@ ORDER BY total_elapsed_time DESC; From the preceding query results, **note the Request ID** of the query that you would like to investigate. -Queries in the **Suspended** state can be queued due to a large number of active running queries. These queries also appear in the [sys.dm_pdw_waits](/sql/relational-databases/system-dynamic-management-views/sys-dm-pdw-waits-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) waits query with a type of UserConcurrencyResourceType. For information on concurrency limits, see [Memory and concurrency limits](memory-concurrency-limits.md) or [Resource classes for workload management](resource-classes-for-workload-management.md). Queries can also wait for other reasons such as for object locks. If your query is waiting for a resource, see [Investigating queries waiting for resources](#monitor-waiting-queries) further down in this article. +Queries in the **Suspended** state can be queued due to a large number of active running queries. These queries also appear in the [sys.dm_pdw_waits](/sql/relational-databases/system-dynamic-management-views/sys-dm-pdw-waits-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true). In that case, look for waits such as UserConcurrencyResourceType. For information on concurrency limits, see [Memory and concurrency limits](memory-concurrency-limits.md) or [Resource classes for workload management](resource-classes-for-workload-management.md). Queries can also wait for other reasons such as for object locks. If your query is waiting for a resource, see [Investigating queries waiting for resources](#monitor-waiting-queries) further down in this article. To simplify the lookup of a query in the [sys.dm_pdw_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-pdw-exec-requests-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) table, use [LABEL](/sql/t-sql/queries/option-clause-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) to assign a comment to your query, which can be looked up in the sys.dm_pdw_exec_requests view. @@ -212,7 +212,7 @@ WHERE DB_NAME(ssu.database_id) = 'tempdb' ORDER BY sr.request_id; ``` -If you have a query that is consuming a large amount of memory or have received an error message related to allocation of tempdb, it could be due to a very large [CREATE TABLE AS SELECT (CTAS)](/sql/t-sql/statements/create-table-as-select-azure-sql-data-warehouse) or [INSERT SELECT](/sql/t-sql/statements/insert-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) statement running that is failing in the final data movement operation. This can usually be identified as a ShuffleMove operation in the distributed query plan right before the final INSERT SELECT. Use [sys.dm_pdw_request_steps](/sql/relational-databases/system-dynamic-management-views/sys-dm-pdw-request-steps-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) to monitor ShuffleMove operations. +If you have a query that is consuming a large amount of memory or have received an error message related to the allocation of tempdb, it could be due to a very large [CREATE TABLE AS SELECT (CTAS)](/sql/t-sql/statements/create-table-as-select-azure-sql-data-warehouse) or [INSERT SELECT](/sql/t-sql/statements/insert-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) statement running that is failing in the final data movement operation. This can usually be identified as a ShuffleMove operation in the distributed query plan right before the final INSERT SELECT. Use [sys.dm_pdw_request_steps](/sql/relational-databases/system-dynamic-management-views/sys-dm-pdw-request-steps-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) to monitor ShuffleMove operations. The most common mitigation is to break your CTAS or INSERT SELECT statement into multiple load statements so the data volume will not exceed the 2TB per node tempdb limit (when at or above DW500c). You can also scale your cluster to a larger size which will spread the tempdb size across more nodes reducing the tempdb on each individual node. @@ -303,6 +303,65 @@ ORDER BY gb_processed desc; ``` +## Monitor query blockings + +The following query provides the top 500 blocked queries in the environment. + +```sql + +--Collect the top blocking +SELECT + TOP 500 waiting.request_id AS WaitingRequestId, + waiting.object_type AS LockRequestType, + waiting.object_name AS ObjectLockRequestName, + waiting.request_time AS ObjectLockRequestTime, + blocking.session_id AS BlockingSessionId, + blocking.request_id AS BlockingRequestId +FROM + sys.dm_pdw_waits waiting + INNER JOIN sys.dm_pdw_waits blocking + ON waiting.object_type = blocking.object_type + AND waiting.object_name = blocking.object_name +WHERE + waiting.state = 'Queued' + AND blocking.state = 'Granted' +ORDER BY + ObjectLockRequestTime ASC; + +``` + +## Retrieve query text from waiting and blocking queries + +The following query provides the query text and identifier for the waiting and blocking queries to easily troubleshoot. + +```sql + +-- To retrieve query text from waiting and blocking queries + +SELECT waiting.session_id AS WaitingSessionId, + waiting.request_id AS WaitingRequestId, + COALESCE(waiting_exec_request.command,waiting_exec_request.command2) AS WaitingExecRequestText, + blocking.session_id AS BlockingSessionId, + blocking.request_id AS BlockingRequestId, + COALESCE(blocking_exec_request.command,blocking_exec_request.command2) AS BlockingExecRequestText, + waiting.object_name AS Blocking_Object_Name, + waiting.object_type AS Blocking_Object_Type, + waiting.type AS Lock_Type, + waiting.request_time AS Lock_Request_Time, + datediff(ms, waiting.request_time, getdate())/1000.0 AS Blocking_Time_sec +FROM sys.dm_pdw_waits waiting + INNER JOIN sys.dm_pdw_waits blocking + ON waiting.object_type = blocking.object_type + AND waiting.object_name = blocking.object_name + INNER JOIN sys.dm_pdw_exec_requests blocking_exec_request + ON blocking.request_id = blocking_exec_request.request_id + INNER JOIN sys.dm_pdw_exec_requests waiting_exec_request + ON waiting.request_id = waiting_exec_request.request_id +WHERE waiting.state = 'Queued' + AND blocking.state = 'Granted' +ORDER BY Lock_Request_Time DESC; +``` + ## Next steps For more information about DMVs, see [System views](../sql/reference-tsql-system-views.md). diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md index 8ba4c91340f1b..20da0477a397c 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/29/2018 ms.author: xiaoyul -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Design decisions and coding techniques for a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml index 8a877363d0b10..48b0b0da32c94 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml @@ -8,7 +8,7 @@ metadata: ms.subservice: sql-dw ms.date: 11/04/2019 ms.author: martinle - ms.reviewer: igorstan + ms.reviewer: wiassaf title: Dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics frequently asked questions summary: | diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md index bc31af25b877e..ff005451b6eee 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md index 9c26df9058ba8..d8983bb966010 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tags: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md index 2ebcd896e4383..41d16c2ec904e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/27/2018 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md index 5e642771d2829..7ecf39f031381 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md @@ -1,14 +1,13 @@ --- title: Connect to dedicated SQL pool (formerly SQL DW) with SSMS description: Use SQL Server Management Studio (SSMS) to connect to and query a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 -ms.author: xiaoyul -ms.reviewer: igorstan +author: nanditavalsan +ms.author: nanditav +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md index 3de06db12c450..3203cff3642f4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/15/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md index 519398e1f957f..9d780389af41d 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md @@ -2,7 +2,7 @@ title: Data warehouse collation types description: Collation types supported for dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics. author: jasonwhowell -manager: igorstan +manager: ms.service: synapse-analytics ms.subservice: sql ms.topic: conceptual diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md index 39a72873b7c60..11777573f723a 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md index 648525eb00857..f187823bfba4e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 06/13/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md index 633939cd620b1..e873fea9a980b 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/01/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md index b09117dfa1165..504eb690e395f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 01/06/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md index cff3030f388fb..f842557dc635f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md @@ -1,14 +1,14 @@ --- title: Restore a deleted dedicated SQL pool (formerly SQL DW) description: How to guide for restoring a deleted dedicated SQL pool in Azure Synapse Analytics. -author: anumjs +author: joannapea manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/29/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md index 1b86a99d7377f..a5e911c940647 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md @@ -1,14 +1,14 @@ --- title: Restore a dedicated SQL pool from a geo-backup description: How-to guide for geo-restoring a dedicated SQL pool in Azure Synapse Analytics -author: anumjs +author: joannapea manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/13/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md index d11d1973233c8..7f91aaf2f3ce4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 07/03/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md index 97e46fb008b3b..ee2f0da2eb354 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 2/19/2020 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md index cd3154806c1b5..4dcaac2606d33 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md @@ -8,7 +8,7 @@ ms.topic: overview ms.subservice: sql-dw ms.date: 08/23/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Source Control Integration for dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md index 53749d007e26d..6257e7cc54d24 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 09/05/2019 ms.author: emtehran -ms.reviewer: nibruno; jrasnick +ms.reviewer: nibruno; wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md index 04745a4203c5c..ffcdf2b9256a3 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 07/20/2020 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-partition.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-partition.md index 0830a5535ec95..e9d9b0fcc8c46 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-partition.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-partition.md @@ -76,8 +76,7 @@ WITH ,20030101,20040101,20050101 ) ) -) -; +); ``` ## Migrate partitions from SQL Server @@ -87,7 +86,7 @@ To migrate SQL Server partition definitions to dedicated SQL pool simply: - Eliminate the SQL Server [partition scheme](/sql/t-sql/statements/create-partition-scheme-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true). - Add the [partition function](/sql/t-sql/statements/create-partition-function-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) definition to your CREATE TABLE. -If you are migrating a partitioned table from a SQL Server instance, the following SQL can help you to figure out the number of rows that in each partition. Keep in mind that if the same partitioning granularity is used in dedicated SQL pool, the number of rows per partition decreases by a factor of 60. +If you are migrating a partitioned table from a SQL Server instance, the following SQL can help you to figure out the number of rows in each partition. Keep in mind that if the same partitioning granularity is used in dedicated SQL pool, the number of rows per partition decreases by a factor of 60. ```sql -- Partition information for a SQL Server Database @@ -119,15 +118,14 @@ GROUP BY s.[name] , p.[partition_number] , p.[rows] , rv.[value] -, p.[data_compression_desc] -; +, p.[data_compression_desc]; ``` ## Partition switching Dedicated SQL pool supports partition splitting, merging, and switching. Each of these functions is executed using the [ALTER TABLE](/sql/t-sql/statements/alter-table-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) statement. -To switch partitions between two tables, you must ensure that the partitions align on their respective boundaries and that the table definitions match. As check constraints are not available to enforce the range of values in a table, the source table must contain the same partition boundaries as the target table. If the partition boundaries are not then same, then the partition switch will fail as the partition metadata will not be synchronized. +To switch partitions between two tables, you must ensure that the partitions align on their respective boundaries and that the table definitions match. As check constraints are not available to enforce the range of values in a table, the source table must contain the same partition boundaries as the target table. If the partition boundaries are not the same, then the partition switch will fail as the partition metadata will not be synchronized. A partition split requires the respective partition (not necessarily the whole table) to be empty if the table has a clustered columnstore index (CCI). Other partitions in the same table can contain data. A partition that contains data cannot be split, it will result in error: `ALTER PARTITION statement failed because the partition is not empty. Only empty partitions can be split in when a columnstore index exists on the table. Consider disabling the columnstore index before issuing the ALTER PARTITION statement, then rebuilding the columnstore index after ALTER PARTITION is complete.` As a workaround to split a partition containing data, see [How to split a partition that contains data](#how-to-split-a-partition-that-contains-data). @@ -156,11 +154,11 @@ WITH (20000101 ) ) -) -; +); INSERT INTO dbo.FactInternetSales VALUES (1,19990101,1,1,1,1,1,1); + INSERT INTO dbo.FactInternetSales VALUES (1,20000101,1,1,1,1,1,1); ``` @@ -178,8 +176,7 @@ JOIN sys.tables t ON p.[object_id] = t.[object_id] JOIN sys.schemas s ON t.[schema_id] = s.[schema_id] JOIN sys.indexes i ON p.[object_id] = i.[object_Id] AND p.[index_Id] = i.[index_Id] -WHERE t.[name] = 'FactInternetSales' -; +WHERE t.[name] = 'FactInternetSales'; ``` The following split command receives an error message: @@ -198,23 +195,22 @@ However, you can use `CTAS` to create a new table to hold the data. ```sql CREATE TABLE dbo.FactInternetSales_20000101 WITH ( DISTRIBUTION = HASH(ProductKey) - , CLUSTERED COLUMNSTORE INDEX + , CLUSTERED COLUMNSTORE INDEX , PARTITION ( [OrderDateKey] RANGE RIGHT FOR VALUES (20000101 ) ) - ) +) AS SELECT * FROM FactInternetSales -WHERE 1=2 -; +WHERE 1=2; ``` As the partition boundaries are aligned, a switch is permitted. This will leave the source table with an empty partition that you can subsequently split. ```sql -ALTER TABLE FactInternetSales SWITCH PARTITION 2 TO FactInternetSales_20000101 PARTITION 2; +ALTER TABLE FactInternetSales SWITCH PARTITION 2 TO FactInternetSales_20000101 PARTITION 2; ALTER TABLE FactInternetSales SPLIT RANGE (20010101); ``` @@ -234,8 +230,7 @@ AS SELECT * FROM [dbo].[FactInternetSales_20000101] WHERE [OrderDateKey] >= 20000101 -AND [OrderDateKey] < 20010101 -; +AND [OrderDateKey] < 20010101; ALTER TABLE dbo.FactInternetSales_20000101_20010101 SWITCH PARTITION 2 TO dbo.FactInternetSales PARTITION 2; ``` @@ -245,6 +240,7 @@ Once you have completed the movement of the data, it is a good idea to refresh t ```sql UPDATE STATISTICS [dbo].[FactInternetSales]; ``` +Finally, in the case of a one-time partition switch to move data, you could drop the tables created for the partition switch, `FactInternetSales_20000101_20010101` and `FactInternetSales_20000101`. Alternatively, you may want to keep empty tables for regular, automated partition switches. ### Load new data into partitions that contain data in one step @@ -303,8 +299,7 @@ To avoid your table definition from **rusting** in your source control system, y ( CLUSTERED COLUMNSTORE INDEX , DISTRIBUTION = HASH([ProductKey]) , PARTITION ( [OrderDateKey] RANGE RIGHT FOR VALUES () ) - ) - ; + ); ``` 1. `SPLIT` the table as part of the deployment process: @@ -331,8 +326,7 @@ To avoid your table definition from **rusting** in your source control system, y SELECT CAST(20030101 AS INT) UNION ALL SELECT CAST(20040101 AS INT) - ) a - ; + ) a; -- Iterate over the partition boundaries and split the table @@ -341,8 +335,7 @@ To avoid your table definition from **rusting** in your source control system, y , @q NVARCHAR(4000) --query , @p NVARCHAR(20) = N'' --partition_number , @s NVARCHAR(128) = N'dbo' --schema - , @t NVARCHAR(128) = N'FactInternetSales' --table - ; + , @t NVARCHAR(128) = N'FactInternetSales' --table; WHILE @i <= @c BEGIN diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md index b152f95df3505..cba2424f38048 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md @@ -1,14 +1,13 @@ --- title: Create and update statistics on tables description: Recommendations and examples for creating and updating query-optimization statistics on tables in dedicated SQL pool. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/09/2018 -ms.author: xiaoyul -ms.reviewer: igorstan +author: mstehrani +ms.author: emtehran +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- @@ -105,39 +104,39 @@ actualRowCounts.logical_table_name, statsRowCounts.stats_row_count, actualRowCounts.actual_row_count, row_count_difference = CASE - WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN actualRowCounts.actual_row_count - statsRowCounts.stats_row_count - ELSE statsRowCounts.stats_row_count - actualRowCounts.actual_row_count + WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN actualRowCounts.actual_row_count - statsRowCounts.stats_row_count + ELSE statsRowCounts.stats_row_count - actualRowCounts.actual_row_count END, percent_deviation_from_actual = CASE - WHEN actualRowCounts.actual_row_count = 0 THEN statsRowCounts.stats_row_count - WHEN statsRowCounts.stats_row_count = 0 THEN actualRowCounts.actual_row_count - WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (actualRowCounts.actual_row_count - statsRowCounts.stats_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) - ELSE CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (statsRowCounts.stats_row_count - actualRowCounts.actual_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) + WHEN actualRowCounts.actual_row_count = 0 THEN statsRowCounts.stats_row_count + WHEN statsRowCounts.stats_row_count = 0 THEN actualRowCounts.actual_row_count + WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (actualRowCounts.actual_row_count - statsRowCounts.stats_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) + ELSE CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (statsRowCounts.stats_row_count - actualRowCounts.actual_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) END from ( - select distinct object_id from sys.stats where stats_id > 1 + select distinct object_id from sys.stats where stats_id > 1 ) objIdsWithStats left join ( - select object_id, sum(rows) as stats_row_count from sys.partitions group by object_id + select object_id, sum(rows) as stats_row_count from sys.partitions group by object_id ) statsRowCounts on objIdsWithStats.object_id = statsRowCounts.object_id left join ( - SELECT sm.name [schema] , - tb.name logical_table_name , - tb.object_id object_id , - SUM(rg.row_count) actual_row_count - FROM sys.schemas sm - INNER JOIN sys.tables tb ON sm.schema_id = tb.schema_id - INNER JOIN sys.pdw_table_mappings mp ON tb.object_id = mp.object_id - INNER JOIN sys.pdw_nodes_tables nt ON nt.name = mp.physical_name - INNER JOIN sys.dm_pdw_nodes_db_partition_stats rg ON rg.object_id = nt.object_id - AND rg.pdw_node_id = nt.pdw_node_id - AND rg.distribution_id = nt.distribution_id - WHERE rg.index_id = 1 - GROUP BY sm.name, tb.name, tb.object_id + SELECT sm.name [schema] , + tb.name logical_table_name , + tb.object_id object_id , + SUM(rg.row_count) actual_row_count + FROM sys.schemas sm + INNER JOIN sys.tables tb ON sm.schema_id = tb.schema_id + INNER JOIN sys.pdw_table_mappings mp ON tb.object_id = mp.object_id + INNER JOIN sys.pdw_nodes_tables nt ON nt.name = mp.physical_name + INNER JOIN sys.dm_pdw_nodes_db_partition_stats rg ON rg.object_id = nt.object_id + AND rg.pdw_node_id = nt.pdw_node_id + AND rg.distribution_id = nt.distribution_id + WHERE rg.index_id = 1 + GROUP BY sm.name, tb.name, tb.object_id ) actualRowCounts on objIdsWithStats.object_id = actualRowCounts.object_id diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md index 96e126d000935..2cbeee8f46c1a 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/27/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: "seo-lt-2019, azure-synapse, devx-track-csharp" --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md index 40a2f02ee3603..6df0b0c8f4455 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 02/15/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md b/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md index 2f90d5ec6e0bc..644f7db1c5620 100644 --- a/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md +++ b/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 10/12/2018 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/toc.yml b/articles/synapse-analytics/sql-data-warehouse/toc.yml index ace4cf3bf45a4..cfb4bd4f0a787 100644 --- a/articles/synapse-analytics/sql-data-warehouse/toc.yml +++ b/articles/synapse-analytics/sql-data-warehouse/toc.yml @@ -34,6 +34,9 @@ items: href: create-data-warehouse-portal.md - name: PowerShell href: create-data-warehouse-powershell.md + - name: Bicep + displayName: ARM, Resource Manager, Template + href: quickstart-bicep.md - name: ARM template displayName: Resource Manager href: quickstart-arm-template.md diff --git a/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md b/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md index 7d60b618456fb..bebc9c4d5a9e1 100644 --- a/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md +++ b/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/22/2019 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql/data-loading-best-practices.md b/articles/synapse-analytics/sql/data-loading-best-practices.md index 63c7279a74c40..2cdc2eaea7558 100644 --- a/articles/synapse-analytics/sql/data-loading-best-practices.md +++ b/articles/synapse-analytics/sql/data-loading-best-practices.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 08/26/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md b/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md index 44a91f9db68d4..21620b975e016 100644 --- a/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md +++ b/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: xiaoyul -ms.reviewer: nibruno; jrasnick +ms.reviewer: nibruno; wiassaf --- # Performance tuning with materialized views using dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md b/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md index 192510c1cfeaa..99add2eab33d8 100644 --- a/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md +++ b/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md @@ -114,7 +114,7 @@ You can use the following combinations of authorization and Azure Storage types: ## Firewall protected storage -You can configure storage accounts to allow access to specific serverless SQL pool by creating a [resource instance rule](../../storage/common/storage-network-security.md?tabs=azure-portal#grant-access-from-azure-resource-instances-preview). +You can configure storage accounts to allow access to specific serverless SQL pool by creating a [resource instance rule](../../storage/common/storage-network-security.md?tabs=azure-portal#grant-access-from-azure-resource-instances). When accessing storage that is protected with the firewall, you can use **User Identity** or **Managed Identity**. > [!NOTE] diff --git a/articles/synapse-analytics/sql/develop-stored-procedures.md b/articles/synapse-analytics/sql/develop-stored-procedures.md index 9ea6f6172ee63..eaac9607138b1 100644 --- a/articles/synapse-analytics/sql/develop-stored-procedures.md +++ b/articles/synapse-analytics/sql/develop-stored-procedures.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 11/03/2020 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Stored procedures using Synapse SQL in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/develop-tables-external-tables.md b/articles/synapse-analytics/sql/develop-tables-external-tables.md index 5e0d8b0c921b6..cd304dfda201a 100644 --- a/articles/synapse-analytics/sql/develop-tables-external-tables.md +++ b/articles/synapse-analytics/sql/develop-tables-external-tables.md @@ -181,6 +181,8 @@ CREATE EXTERNAL DATA SOURCE SqlOnDemandDemo WITH ( CREDENTIAL = sqlondemand ); ``` +> [!NOTE] +> The SQL users needs to have proper permissions on database scoped credentials to access the data source in Azure Synapse Analytics Serverless SQL Pool. [Access external storage using serverless SQL pool in Azure Synapse Analytics](./develop-storage-files-overview.md?tabs=impersonation#permissions). The following example creates an external data source for Azure Data Lake Gen2 pointing to the publicly available New York data set: @@ -483,4 +485,4 @@ The external table is now created, for future exploration of the content of this ## Next steps -See the [CETAS](develop-tables-cetas.md) article for how to save query results to an external table in Azure Storage. Or you can start querying [Apache Spark for Azure Synapse external tables](develop-storage-files-spark-tables.md). +See the [CETAS](develop-tables-cetas.md) article for how to save query results to an external table in Azure Storage. Or you can start querying [Apache Spark for Azure Synapse external tables](develop-storage-files-spark-tables.md). \ No newline at end of file diff --git a/articles/synapse-analytics/sql/develop-transaction-best-practices.md b/articles/synapse-analytics/sql/develop-transaction-best-practices.md index 142b973f90daa..d952205888582 100644 --- a/articles/synapse-analytics/sql/develop-transaction-best-practices.md +++ b/articles/synapse-analytics/sql/develop-transaction-best-practices.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Optimize transactions with dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/develop-transactions.md b/articles/synapse-analytics/sql/develop-transactions.md index 28a208df16d8c..b7ec4aa5779c2 100644 --- a/articles/synapse-analytics/sql/develop-transactions.md +++ b/articles/synapse-analytics/sql/develop-transactions.md @@ -1,14 +1,13 @@ --- -title: Use transactions +title: Use transactions with dedicated SQL pool in Azure Synapse Analytics description: Tips for implementing transactions with dedicated SQL pool in Azure Synapse Analytics for developing solutions. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 -ms.author: xiaoyul -ms.reviewer: igorstan +author: KevinConanMSFT +ms.author: kecona +ms.reviewer: wiassaf --- # Use transactions with dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/how-to-pause-resume-pipelines.md b/articles/synapse-analytics/sql/how-to-pause-resume-pipelines.md index aca48b71cf296..589eb5d488eba 100644 --- a/articles/synapse-analytics/sql/how-to-pause-resume-pipelines.md +++ b/articles/synapse-analytics/sql/how-to-pause-resume-pipelines.md @@ -151,7 +151,7 @@ Evaluate the desired state, Pause or Resume, and the current status, Online, or 1. On the **Activities** tab, copy the code below into the **Expression**. ```HTTP - @concat(activity('CheckState').output.properties.status,'-',pipeline().parameters.PauseOrResume) + @concat(activity('CheckState').output.value[0].properties.status,'-',pipeline().parameters.PauseOrResume) ``` Where Check State is the name of the preceding Web activity with output.properties.status defining the current status and pipeline().parameters.PauseOrResume indicates the desired state. diff --git a/articles/synapse-analytics/sql/load-data-overview.md b/articles/synapse-analytics/sql/load-data-overview.md index a50209b410047..153791bd944dd 100644 --- a/articles/synapse-analytics/sql/load-data-overview.md +++ b/articles/synapse-analytics/sql/load-data-overview.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Design a PolyBase data loading strategy for dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/overview-architecture.md b/articles/synapse-analytics/sql/overview-architecture.md index fe2e03d817818..6238bd617674c 100644 --- a/articles/synapse-analytics/sql/overview-architecture.md +++ b/articles/synapse-analytics/sql/overview-architecture.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Azure Synapse SQL architecture diff --git a/articles/synapse-analytics/sql/query-cosmos-db-analytical-store.md b/articles/synapse-analytics/sql/query-cosmos-db-analytical-store.md index df9e8ac07a097..8fa9298b278a6 100644 --- a/articles/synapse-analytics/sql/query-cosmos-db-analytical-store.md +++ b/articles/synapse-analytics/sql/query-cosmos-db-analytical-store.md @@ -283,7 +283,7 @@ The result of this query might look like the following table: | bb1206963e831f1… | The Use of Convalescent Sera in Immune-E… | `{"title":"The Use of Convalescent…` | `[{"first":"Antonio","last":"Lavazza","suffix":"", …` | | bb378eca9aac649… | Tylosema esculentum (Marama) Tuber and B… | `{"title":"Tylosema esculentum (Ma…` | `[{"first":"Walter","last":"Chingwaru","suffix":"",…` | -Learn more about analyzing [complex data types in Azure Synapse Link](../how-to-analyze-complex-schema.md) and [nested structures in a serverless SQL pool](query-parquet-nested-types.md). +Learn more about analyzing [complex data types like Parquet files and containers in Azure Synapse Link for Azure Cosmos DB](../how-to-analyze-complex-schema.md) or [nested structures in a serverless SQL pool](query-parquet-nested-types.md). > [!IMPORTANT] > If you see unexpected characters in your text like `Mélade` instead of `Mélade`, then your database collation isn't set to [UTF-8](/sql/relational-databases/collations/collation-and-unicode-support#utf8) collation. diff --git a/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md b/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md index 950b1c5b130fa..73d37980a4d7c 100644 --- a/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md +++ b/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md @@ -207,7 +207,7 @@ Since you have only one folder that matches the criteria, the query result is th ## Traverse folders recursively -Serverless SQL pool can recursively traverse folders if you specify /** at the end of path. The following query will read all files from all folders and subfolders located in the *csv* folder. +Serverless SQL pool can recursively traverse folders if you specify /** at the end of path. The following query will read all files from all folders and subfolders located in the *csv/taxi* folder. ```sql SELECT diff --git a/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md b/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md index e847bf4232153..d8820c6a2c767 100644 --- a/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md +++ b/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md @@ -643,7 +643,7 @@ Confirm the storage account accessed is using the Archive access tier. The Archive access tier is an offline tier. While a blob is in the Archive access tier, it can't be read or modified. -To read or download a blob in the Archive tier, rehydrate it to an online tier. See [Archive access tier](/azure/storage/blobs/access-tiers-overview#archive-access-tier). +To read or download a blob in the Archive tier, rehydrate it to an online tier. See [Archive access tier](../../storage/blobs/access-tiers-overview.md#archive-access-tier). ### [0x80070057](#tab/x80070057) @@ -1028,7 +1028,7 @@ Some general system constraints might affect your workload: | Maximum number of databases objects per database | The sum of the number of all objects in a database can't exceed 2,147,483,647. See [Limitations in SQL Server database engine](/sql/sql-server/maximum-capacity-specifications-for-sql-server#objects). | | Maximum identifier length in characters | 128. See [Limitations in SQL Server database engine](/sql/sql-server/maximum-capacity-specifications-for-sql-server#objects).| | Maximum query duration | 30 minutes. | -| Maximum size of the result set | Up to 200 GB shared between concurrent queries. | +| Maximum size of the result set | Up to 400 GB shared between concurrent queries. | | Maximum concurrency | Not limited and depends on the query complexity and amount of data scanned. One serverless SQL pool can concurrently handle 1,000 active sessions that are executing lightweight queries. The numbers will drop if the queries are more complex or scan a larger amount of data. | ### Can't create a database in serverless SQL pool diff --git a/articles/synapse-analytics/sql/tutorial-data-analyst.md b/articles/synapse-analytics/sql/tutorial-data-analyst.md index 5cd728fe617e9..a718d7db6f91b 100644 --- a/articles/synapse-analytics/sql/tutorial-data-analyst.md +++ b/articles/synapse-analytics/sql/tutorial-data-analyst.md @@ -1,33 +1,44 @@ --- -title: 'Tutorial: Use serverless SQL pool to analyze Azure Open Datasets in Synapse Studio' -description: This tutorial shows you how to easily perform exploratory data analysis combining different Azure Open Datasets using serverless SQL pool and visualize the results in Synapse Studio. +title: 'Tutorial: Analyze Azure Open Datasets in Synapse Studio' +description: This tutorial shows you how to perform data analysis combining different Azure Open Datasets using serverless SQL pool and visualize results in Synapse Studio. author: azaricstefan ms.service: synapse-analytics ms.topic: tutorial ms.subservice: sql -ms.date: 11/20/2020 +ms.date: 05/25/2022 +ms.custom: kr2b-contr-experiment ms.author: stefanazaric ms.reviewer: sngun --- # Tutorial: Explore and Analyze data lakes with serverless SQL pool -In this tutorial, you learn how to perform exploratory data analysis. You'll combine different Azure Open Datasets using serverless SQL pool. You'll then visualize the results in Synapse Studio for Azure Synapse Analytics. +In this tutorial, you learn how to perform exploratory data analysis. You combine different Azure Open Datasets using serverless SQL pool. You then visualize the results in Synapse Studio for Azure Synapse Analytics. -The OPENROWSET(BULK...) function allows you to access files in Azure Storage. [OPENROWSET](develop-openrowset.md) function reads content of a remote data source (for example file) and returns the content as a set of rows. +The `OPENROWSET(BULK...)` function allows you to access files in Azure Storage. `[OPENROWSET](develop-openrowset.md)` reads content of a remote data source, such as a file, and returns the content as a set of rows. ## Automatic schema inference -Since data is stored in the Parquet file format, automatic schema inference is available. You can easily query the data without listing the data types of all columns in the files. You also can use the virtual column mechanism and the filepath function to filter out a certain subset of files. +Since data is stored in the Parquet file format, automatic schema inference is available. You can query the data without listing the data types of all columns in the files. You also can use the virtual column mechanism and the `filepath` function to filter out a certain subset of files. > [!NOTE] -> If you are using database with non-default collation (this is default collation SQL_Latin1_General_CP1_CI_AS), you should take into account case sensitivity. -> -> If you create a database with case sensitive collation then when you specify columns make sure to use correct name of the column. -> -> Example for a column name 'tpepPickupDateTime' would be correct while 'tpeppickupdatetime' wouldn't work in non-default collation. +> The default collation is `SQL_Latin1_General_CP1_CI_ASIf`. For a non-default collation, take into account case sensitivity. +> +> If you create a database with case sensitive collation when you specify columns, make sure to use correct name of the column. +> +> A column name `tpepPickupDateTime` would be correct while `tpeppickupdatetime` wouldn't work in a non-default collation. -Let's first get familiar with the NYC Taxi data by running the following query: +This tutorial uses a dataset about [New York City (NYC) Taxi](https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/): + +- Pick-up and drop-off dates and times +- Pick-up and drop-off locations +- Trip distances +- Itemized fares +- Rate types +- Payment types +- Driver-reported passenger counts + +To get familiar with the NYC Taxi data, run the following query: ```sql SELECT TOP 100 * FROM @@ -37,16 +48,6 @@ SELECT TOP 100 * FROM ) AS [nyc] ``` -[New York City (NYC) Taxi dataset](https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/) includes: - -- Pick-up and drop-off dates and times. -- Pick-up and drop-off locations. -- Trip distances. -- Itemized fares. -- Rate types. -- Payment types. -- Driver-reported passenger counts. - Similarly, you can query the Public Holidays dataset by using the following query: ```sql @@ -57,7 +58,7 @@ SELECT TOP 100 * FROM ) AS [holidays] ``` -Lastly, you can also query the Weather Data dataset by using the following query: +You can also query the Weather Data dataset by using the following query: ```sql SELECT @@ -69,14 +70,15 @@ FROM ) AS [weather] ``` -You can learn more about the meaning of the individual columns in the descriptions of the data sets: +You can learn more about the meaning of the individual columns in the descriptions of the data sets: + - [NYC Taxi](https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/) - [Public Holidays](https://azure.microsoft.com/services/open-datasets/catalog/public-holidays/) - [Weather Data](https://azure.microsoft.com/services/open-datasets/catalog/noaa-integrated-surface-data/) ## Time series, seasonality, and outlier analysis -You can easily summarize the yearly number of taxi rides by using the following query: +You can summarize the yearly number of taxi rides by using the following query: ```sql SELECT @@ -94,18 +96,18 @@ ORDER BY 1 ASC The following snippet shows the result for the yearly number of taxi rides: -![Yearly number of taxi rides result snippet](./media/tutorial-data-analyst/yearly-taxi-rides.png) +![Screenshot shows a table of yearly number of taxi rides.](./media/tutorial-data-analyst/yearly-taxi-rides.png) The data can be visualized in Synapse Studio by switching from the **Table** to the **Chart** view. You can choose among different chart types, such as **Area**, **Bar**, **Column**, **Line**, **Pie**, and **Scatter**. In this case, plot the **Column** chart with the **Category** column set to **current_year**: -![Column chart showing rides per year](./media/tutorial-data-analyst/column-chart-rides-year.png) +![Screenshot shows a column chart that displays rides per year.](./media/tutorial-data-analyst/column-chart-rides-year.png) -From this visualization, you can see a trend of decreasing ride numbers over the years. Presumably, this decrease is due to the recent increased popularity of ride-sharing companies. +From this visualization, you can see a trend of decreasing ride numbers over the years. Presumably, this decrease is due to the recent increased popularity of ride-sharing companies. > [!NOTE] > At the time of writing this tutorial, data for 2019 is incomplete. As a result, there's a huge drop in the number of rides for that year. -Next, let's focus the analysis on a single year, for example, 2016. The following query returns the daily number of rides during that year: +You can focus the analysis on a single year, for example, 2016. The following query returns the daily number of rides during that year: ```sql SELECT @@ -123,15 +125,15 @@ ORDER BY 1 ASC The following snippet shows the result for this query: -![Daily number of rides for 2016 result snippet](./media/tutorial-data-analyst/daily-rides.png) +![Screenshot shows a table of the daily number of rides for 2016 result.](./media/tutorial-data-analyst/daily-rides.png) -Again, you can easily visualize data by plotting the **Column** chart with the **Category** column set to **current_day** and the **Legend (series)** column set to **rides_per_day**. +Again, you can visualize data by plotting the **Column** chart with the **Category** column set to **current_day** and the **Legend (series)** column set to **rides_per_day**. -![Column chart showing daily number of rides for 2016](./media/tutorial-data-analyst/column-chart-daily-rides.png) +![Screenshot shows a column chart that displays the daily number of rides for 2016.](./media/tutorial-data-analyst/column-chart-daily-rides.png) -From the plot chart, you can see there's a weekly pattern, with Saturdays as the peak day. During summer months, there are fewer taxi rides because of vacations. Also, notice some significant drops in the number of taxi rides without a clear pattern of when and why they occur. +From the plot chart, you can see there's a weekly pattern, with Saturdays as the peak day. During Summer months, there are fewer taxi rides because of vacations. Also, notice some significant drops in the number of taxi rides without a clear pattern of when and why they occur. -Next, let's see if the drop in rides correlates with public holidays. We can see if there is a correlation by joining the NYC Taxi rides dataset with the Public Holidays dataset: +Next, see if the drop in rides correlates with public holidays. Check if there's a correlation by joining the NYC Taxi rides dataset with the Public Holidays dataset: ```sql WITH taxi_rides AS ( @@ -175,11 +177,11 @@ FROM joined_data ORDER BY current_day ASC ``` -![NYC Taxi rides and Public Holidays datasets result visualization](./media/tutorial-data-analyst/rides-public-holidays.png) +![Screenshot shows a table of N Y C Taxi rides and Public Holidays datasets result.](./media/tutorial-data-analyst/rides-public-holidays.png) -This time, we want to highlight the number of taxi rides during public holidays. For that purpose, we choose **current_day** for the **Category** column and **rides_per_day** and **holiday_rides** as the **Legend (series)** columns. +Highlight the number of taxi rides during public holidays. For that purpose, choose **current_day** for the **Category** column and **rides_per_day** and **holiday_rides** as the **Legend (series)** columns. -![Number of taxi rides during public holidays plot chart](./media/tutorial-data-analyst/plot-chart-public-holidays.png) +![Screenshot shows the number of taxi rides during public holidays as a plot chart.](./media/tutorial-data-analyst/plot-chart-public-holidays.png) From the plot chart, you can see that during public holidays the number of taxi rides is lower. There's still one unexplained large drop on January 23. Let's check the weather in NYC on that day by querying the Weather Data dataset: @@ -208,7 +210,7 @@ FROM WHERE countryorregion = 'US' AND CAST([datetime] AS DATE) = '2016-01-23' AND stationname = 'JOHN F KENNEDY INTERNATIONAL AIRPORT' ``` -![Weather Data dataset result visualization](./media/tutorial-data-analyst/weather-data-set-visualization.png) +![Screenshot shows a Weather Data dataset result visualization.](./media/tutorial-data-analyst/weather-data-set-visualization.png) The results of the query indicate that the drop in the number of taxi rides occurred because: @@ -216,11 +218,10 @@ The results of the query indicate that the drop in the number of taxi rides occu - It was cold (temperature was below zero degrees Celsius). - It was windy (~10 m/s). -This tutorial has shown how a data analyst can quickly perform exploratory data analysis, easily combine different datasets by using serverless SQL pool, and visualize the results by using Azure Synapse Studio. +This tutorial has shown how a data analyst can quickly perform exploratory data analysis. You can combine different datasets by using serverless SQL pool and visualize the results by using Azure Synapse Studio. ## Next steps To learn how to connect serverless SQL pool to Power BI Desktop and create reports, see [Connect serverless SQL pool to Power BI Desktop and create reports](tutorial-connect-power-bi-desktop.md). To learn how to use External tables in serverless SQL pool see [Use external tables with Synapse SQL](develop-tables-external-tables.md?tabs=sql-pool) - diff --git a/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-database.md b/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-database.md index d883d9d61c0f5..8511ae66fe325 100644 --- a/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-database.md +++ b/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-database.md @@ -91,7 +91,7 @@ This article provides a step-by-step guide for getting started with Azure Synaps 1. Select **Create**. > [!NOTE] - > The linked service that you create here is not dedicated to Azure Synapse Link for SQL - it can be used by any workspace user that has the appropriate permissions. Please take time to understand the scope of users who may have access to this linked service and its credentials. For more information on permissions in Azure Synapse workspaces, see [Azure Synapse workspace access control overview - Azure Synapse Analytics](/synapse-analytics/security/synapse-workspace-access-control-overview). + > The linked service that you create here is not dedicated to Azure Synapse Link for SQL - it can be used by any workspace user that has the appropriate permissions. Please take time to understand the scope of users who may have access to this linked service and its credentials. For more information on permissions in Azure Synapse workspaces, see [Azure Synapse workspace access control overview - Azure Synapse Analytics](/azure/synapse-analytics/security/synapse-workspace-access-control-overview). 1. Select one or more source tables to replicate to your Synapse workspace and select **Continue**. diff --git a/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-server-2022.md b/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-server-2022.md index 50ff8baf0dec9..61b3e0c5a33db 100644 --- a/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-server-2022.md +++ b/articles/synapse-analytics/synapse-link/connect-synapse-link-sql-server-2022.md @@ -100,7 +100,7 @@ This article provides a step-by-step guide for getting started with Azure Synaps :::image type="content" source="../media/connect-synapse-link-sql-server-2022/view-linked-service-connection.png" alt-text="View the linked service connection."::: > [!NOTE] - > The linked service that you create here is not dedicated to Azure Synapse Link for SQL - it can be used by any workspace user that has the appropriate permissions. Please take time to understand the scope of users who may have access to this linked service and its credentials. For more information on permissions in Azure Synapse workspaces, see [Azure Synapse workspace access control overview - Azure Synapse Analytics](/synapse-analytics/security/synapse-workspace-access-control-overview). + > The linked service that you create here is not dedicated to Azure Synapse Link for SQL - it can be used by any workspace user that has the appropriate permissions. Please take time to understand the scope of users who may have access to this linked service and its credentials. For more information on permissions in Azure Synapse workspaces, see [Azure Synapse workspace access control overview - Azure Synapse Analytics](/azure/synapse-analytics/security/synapse-workspace-access-control-overview). ## Create linked service to connect to your landing zone on Azure Data Lake Storage Gen2 @@ -136,7 +136,7 @@ This article provides a step-by-step guide for getting started with Azure Synaps :::image type="content" source="../media/connect-synapse-link-sql-server-2022/storage-gen2-linked-service-created.png" alt-text="New linked service to Azure Data Lake Storage Gen2."::: > [!NOTE] - > The linked service that you create here is not dedicated to Azure Synapse Link for SQL - it can be used by any workspace user that has the appropriate permissions. Please take time to understand the scope of users who may have access to this linked service and its credentials. For more information on permissions in Azure Synapse workspaces, see [Azure Synapse workspace access control overview - Azure Synapse Analytics](/synapse-analytics/security/synapse-workspace-access-control-overview). + > The linked service that you create here is not dedicated to Azure Synapse Link for SQL - it can be used by any workspace user that has the appropriate permissions. Please take time to understand the scope of users who may have access to this linked service and its credentials. For more information on permissions in Azure Synapse workspaces, see [Azure Synapse workspace access control overview - Azure Synapse Analytics](/azure/synapse-analytics/security/synapse-workspace-access-control-overview). ## Create the Azure Synapse Link connection diff --git a/articles/synapse-analytics/synapse-link/faq.yml b/articles/synapse-analytics/synapse-link/faq.yml index f09829daf609c..aaaf3d2fa9f6c 100644 --- a/articles/synapse-analytics/synapse-link/faq.yml +++ b/articles/synapse-analytics/synapse-link/faq.yml @@ -59,5 +59,5 @@ sections: - question: | How should I select the structure type of my destination table in the Synapse dedicated SQL pool? answer: | - You can refer to (Indexing tables - Azure Synapse Analytics | Microsoft Docs)[../sql-data-warehouse/sql-data-warehouse-tables-index.md] to understand the three options for table structure type. When clustered columnstore index is chosen, data type with max length (eg. VARCHAR(MAX)) is not supported. + You can refer to [Indexing tables - Azure Synapse Analytics | Microsoft Docs](../sql-data-warehouse/sql-data-warehouse-tables-index.md) to understand the three options for table structure type. When clustered columnstore index is chosen, data type with max length (eg. VARCHAR(MAX)) is not supported. diff --git a/articles/synapse-analytics/synapse-link/synapse-link-for-sql-known-issues.md b/articles/synapse-analytics/synapse-link/synapse-link-for-sql-known-issues.md index 1f51ce7d1ce7d..298f1c1db9132 100644 --- a/articles/synapse-analytics/synapse-link/synapse-link-for-sql-known-issues.md +++ b/articles/synapse-analytics/synapse-link/synapse-link-for-sql-known-issues.md @@ -79,7 +79,7 @@ This is the list of known limitations for Azure Synapse Link for SQL. * When enabling Azure Synapse Link for SQL on your Azure SQL Database, you should ensure that aggressive log truncation is disabled. ### SQL Server 2022 only -* When creating SQL Server linked service, choose SQL Authentication, Windows Authentication or Azure AD Authentication. +* When creating SQL Server linked service, choose SQL Authentication. * Azure Synapse Link for SQL works with SQL Server on Linux, but HA scenarios with Linux Pacemaker aren't supported. Shelf hosted IR cannot be installed on Linux environment. * Azure Synapse Link for SQL can't be enabled on databases that are transactional replication publishers or distributors. * If the SAS key of landing zone expires and gets rotated during the snapshot process, the new key won't get picked up. The snapshot will fail and restart automatically with the new key. diff --git a/articles/synapse-analytics/toc.yml b/articles/synapse-analytics/toc.yml index f08adfe7e51f3..d66010bedcf9d 100644 --- a/articles/synapse-analytics/toc.yml +++ b/articles/synapse-analytics/toc.yml @@ -566,6 +566,8 @@ items: href: sql/create-use-external-tables.md - name: Store query results to storage href: sql/create-external-table-as-select.md + - name: Query Parquet files and containers in Azure Synapse Link for Azure Cosmos DB + href: how-to-analyze-complex-schema.md - name: Integrate items: - name: Build data pipelines @@ -660,10 +662,8 @@ items: href: data-explorer/functions-library/toc.yml - name: Apache Spark items: - - name: Overview - items: - - name: Apache Spark overview - href: ./spark/apache-spark-overview.md + - name: Apache Spark overview + href: ./spark/apache-spark-overview.md - name: Quickstarts items: - name: Create a notebook @@ -796,8 +796,6 @@ items: href: ./spark/synapse-spark-sql-pool-import-export.md - name: Azure SQL and SQL Server href: ./spark/data-sources/apache-spark-sql-connector.md - - name: JSON - href: how-to-analyze-complex-schema.md - name: External Hive Metastore href: ./spark/apache-spark-external-metastore.md - name: Synapse Link @@ -973,6 +971,8 @@ items: href: ../data-factory/connector-amazon-simple-storage-service.md?context=/azure/synapse-analytics/context/context&tabs=synapse-analytics - name: Amazon S3 Compatible Storage href: ../data-factory/connector-amazon-s3-compatible-storage.md?context=/azure/synapse-analytics/context/context&tabs=synapse-analytics + - name: Asana + href: ../data-factory/connector-asana.md?context=/azure/synapse-analytics/context/context&tabs=synapse-analytics - name: Avro format href: ../data-factory/format-avro.md?context=/azure/synapse-analytics/context/context&tabs=synapse-analytics - name: Azure Blob Storage @@ -1435,6 +1435,8 @@ items: items: - name: Train machine learning models href: ./spark/apache-spark-machine-learning-training.md + - name: Deep learning + href: ./machine-learning/concept-deep-learning.md - name: Tutorials items: - name: Data access and preparation @@ -1455,6 +1457,14 @@ items: - name: In Python displayName: train model, automated machine learning, automated ml, automl href: ./spark/apache-spark-azure-machine-learning-tutorial.md + - name: With Horovod and Petastorm + items: + - name: Train a model with Horovod and PyTorch + href: ./machine-learning/tutorial-horovod-pytorch.md + - name: Train a model with Horovod and TensorFlow + href: ./machine-learning/tutorial-horovod-tensorflow.md + - name: Write and load data with Petastorm + href: ./machine-learning/tutorial-load-data-petastorm.md - name: Score a model items: - name: Use the scoring wizard for dedicated SQL pools @@ -1510,10 +1520,86 @@ items: href: how-to-access-container-with-access-control-lists.md - name: Guidance items: + - name: Azure Synapse Success by Design + items: + - name: Introduction + href: ./guidance/success-by-design-introduction.md + - name: Synapse proof of concept playbook + items: + - name: Overview + href: ./guidance/proof-of-concept-playbook-overview.md + - name: Data warehousing with dedicated SQL pool + href: ./guidance/proof-of-concept-playbook-dedicated-sql-pool.md + - name: Data lake exploration with serverless SQL pool + href: ./guidance/proof-of-concept-playbook-serverless-sql-pool.md + - name: Big data analytics with Apache Spark pool + href: ./guidance/proof-of-concept-playbook-spark-pool.md + - name: Synapse implementation success methodology + items: + - name: Overview + href: ./guidance/implementation-success-overview.md + - name: Assess environment + href: ./guidance/implementation-success-assess-environment.md + - name: Evaluate workspace design + href: ./guidance/implementation-success-evaluate-workspace-design.md + - name: Evaluate data integration design + href: ./guidance/implementation-success-evaluate-data-integration-design.md + - name: Evaluate dedicated SQL pool design + href: ./guidance/implementation-success-evaluate-dedicated-sql-pool-design.md + - name: Evaluate serverless SQL pool design + href: ./guidance/implementation-success-evaluate-serverless-sql-pool-design.md + - name: Evaluate Spark pool design + href: ./guidance/implementation-success-evaluate-spark-pool-design.md + - name: Evaluate project plan + href: ./guidance/implementation-success-evaluate-project-plan.md + - name: Evaluate solution development environment design + href: ./guidance/implementation-success-evaluate-solution-development-environment-design.md + - name: Evaluate team skill sets + href: ./guidance/implementation-success-evaluate-team-skill-sets.md + - name: Perform operational readiness review + href: ./guidance/implementation-success-perform-operational-readiness-review.md + - name: Perform user readiness and onboarding plan review + href: ./guidance/implementation-success-perform-user-readiness-and-onboarding-plan-review.md + - name: Perform monitoring review + href: ./guidance/implementation-success-perform-monitoring-review.md - name: Migration guides items: + - name: All migration guides + href: migration-guides/index.yml - name: Migrate to a dedicated SQL pool href: migration-guides/migrate-to-synapse-analytics-guide.md + - name: From Teradata + items: + - name: 1 Design performance for Teradata migration + href: migration-guides/teradata/1-design-performance-migration.md + - name: 2 ETL and load migration considerations + href: migration-guides/teradata/2-etl-load-migration-considerations.md + - name: 3 Security access operations + href: migration-guides/teradata/3-security-access-operations.md + - name: 4 Visualization and reporting + href: migration-guides/teradata/4-visualization-reporting.md + - name: 5 Minimizing SQL issues + href: migration-guides/teradata/5-minimize-sql-issues.md + - name: 6 Microsoft and third-party tools + href: migration-guides/teradata/6-microsoft-third-party-migration-tools.md + - name: 7 Implementing modern data warehouses + href: migration-guides/teradata/7-beyond-data-warehouse-migration.md + - name: From Netezza + items: + - name: 1 Design performance for Netezza migration + href: migration-guides/netezza/1-design-performance-migration.md + - name: 2 ETL and load migration considerations + href: migration-guides/netezza/2-etl-load-migration-considerations.md + - name: 3 Security access operations + href: migration-guides/netezza/3-security-access-operations.md + - name: 4 Visualization and reporting + href: migration-guides/netezza/4-visualization-reporting.md + - name: 5 Minimizing SQL issues + href: migration-guides/netezza/5-minimize-sql-issues.md + - name: 6 Microsoft and third-party tools + href: migration-guides/netezza/6-microsoft-third-party-migration-tools.md + - name: 7 Implementing modern data warehouses + href: migration-guides/netezza/7-beyond-data-warehouse-migration.md - name: Security white paper items: - name: Introduction diff --git a/articles/synapse-analytics/whats-new.md b/articles/synapse-analytics/whats-new.md index 1d3f43f5fe445..56e35ffd98315 100644 --- a/articles/synapse-analytics/whats-new.md +++ b/articles/synapse-analytics/whats-new.md @@ -11,7 +11,7 @@ ms.date: 04/15/2022 # What's new in Azure Synapse Analytics? -This article lists updates to Azure Synapse Analytics that are published in Mar 2022. Each update links to the Azure Synapse Analytics blog and an article that provides more information. For previous months releases, check out [Azure Synapse Analytics - updates archive](whats-new-archive.md). +This article lists updates to Azure Synapse Analytics that are published in April 2022. Each update links to the Azure Synapse Analytics blog and an article that provides more information. For previous months releases, check out [Azure Synapse Analytics - updates archive](whats-new-archive.md). The following updates are new to Azure Synapse Analytics this month. @@ -35,7 +35,7 @@ The following updates are new to Azure Synapse Analytics this month. * Assigning parameters dynamically based on variables, metadata, or specifying Pipeline specific parameters has been one of your top feature requests. Now, with the release of parameterization for the Spark job definition activity, you can do just that. For more details, read [Transform data using Apache Spark job definition](quickstart-transform-data-using-spark-job-definition.md#settings-tab). -* We often receive customer requests to access the snapshot of the Notebook when there is a Pipeline Notebook run failure or there is a long-running Notebook job. With the release of the Synapse Notebook snapshot feature, you can now view the snapshot of the Notebook activity run with the original Notebook code, the cell output, and the input parameters. You can also access the snapshot of the referenced Notebook from the referencing Notebook cell output if you refer to other Notebooks through Spark utils. To learn more, read [Transform data by running a Synapse notebook](synapse-notebook-activity.md?tabs=classical#see-notebook-activity-run-history) and [Introduction to Microsoft Spark utilities](/spark/microsoft-spark-utilities.md?pivots=programming-language-scala#reference-a-notebook-1). +* We often receive customer requests to access the snapshot of the Notebook when there is a Pipeline Notebook run failure or there is a long-running Notebook job. With the release of the Synapse Notebook snapshot feature, you can now view the snapshot of the Notebook activity run with the original Notebook code, the cell output, and the input parameters. You can also access the snapshot of the referenced Notebook from the referencing Notebook cell output if you refer to other Notebooks through Spark utils. To learn more, read [Transform data by running a Synapse notebook](synapse-notebook-activity.md?tabs=classical#see-notebook-activity-run-history) and [Introduction to Microsoft Spark utilities](/azure/synapse-analytics/spark/microsoft-spark-utilities?pivots=programming-language-scala#reference-a-notebook-1). ## Security diff --git a/articles/virtual-desktop/TOC.yml b/articles/virtual-desktop/TOC.yml index 363bc96ad4565..627e9fbfa1644 100644 --- a/articles/virtual-desktop/TOC.yml +++ b/articles/virtual-desktop/TOC.yml @@ -187,7 +187,7 @@ items: - name: Create a host pool using PowerShell or the Azure CLI href: create-host-pools-powershell.md - - name: Deploy an Azure AD joined session host + - name: Deploy an Azure AD-joined session host href: deploy-azure-ad-joined-vm.md - name: Deploy a Windows 7 virtual machine href: deploy-windows-7-virtual-machine.md @@ -285,7 +285,7 @@ href: app-attach-msixmgr.md - name: Use Microsoft Teams href: teams-on-avd.md - - name: Set up Azure AD multifactor authentication + - name: Enforce Azure AD MFA href: set-up-mfa.md - name: Configure AD FS single sign-on href: configure-adfs-sso.md @@ -380,6 +380,6 @@ - name: Azure Virtual Desktop roadmap href: https://aka.ms/avdroadmap - name: Azure Resource Manager templates - href: https://github.com/Azure/RDS-Templates/tree/master/wvd-templates + href: https://github.com/Azure/RDS-Templates/tree/master/ARM-wvd-templates - name: Azure compliance offerings - href: https://azure.microsoft.com/resources/microsoft-azure-compliance-offerings/ \ No newline at end of file + href: https://azure.microsoft.com/resources/microsoft-azure-compliance-offerings/ diff --git a/articles/virtual-desktop/authentication.md b/articles/virtual-desktop/authentication.md index b9ed466925b4c..a27c867ac717d 100644 --- a/articles/virtual-desktop/authentication.md +++ b/articles/virtual-desktop/authentication.md @@ -39,9 +39,9 @@ Azure Virtual Desktop currently doesn't support [external identities](../active- To access Azure Virtual Desktop resources, you must first authenticate to the service by signing in to an Azure AD account. Authentication happens when subscribing to a workspace to retrieve your resources or every time you connect to apps or desktops. You can use [third-party identity providers](../active-directory/devices/azureadjoin-plan.md#federated-environment) as long as they federate with Azure AD. -### Multifactor authentication +### Multi-factor authentication -Follow the instructions in [Set up multifactor authentication in Azure Virtual Desktop](set-up-mfa.md) to learn how to enable multifactor authentication (MFA) for your deployment. That article will also tell you how to configure how often your users are prompted to enter their credentials. When deploying Azure AD-joined VMs, follow the configuration guide in [Enabling MFA for Azure AD-joined VMs](deploy-azure-ad-joined-vm.md#enabling-mfa-for-azure-ad-joined-vms). +Follow the instructions in [Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access](set-up-mfa.md) to learn how to enforce Azure AD Multi-Factor Authentication for your deployment. That article will also tell you how to configure how often your users are prompted to enter their credentials. When deploying Azure AD-joined VMs, note the extra steps for [Azure AD-joined session host VMs](set-up-mfa.md#azure-ad-joined-session-host-vms). ### Smart card authentication diff --git a/articles/virtual-desktop/configure-vm-gpu.md b/articles/virtual-desktop/configure-vm-gpu.md index 1f569aa0e87d8..ce043d0a879da 100644 --- a/articles/virtual-desktop/configure-vm-gpu.md +++ b/articles/virtual-desktop/configure-vm-gpu.md @@ -1,10 +1,10 @@ --- title: Configure GPU for Azure Virtual Desktop - Azure description: How to enable GPU-accelerated rendering and encoding in Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: how-to ms.date: 05/06/2019 -ms.author: denisgun +ms.author: femila --- # Configure graphics processing unit (GPU) acceleration for Azure Virtual Desktop diff --git a/articles/virtual-desktop/create-profile-container-azure-ad.md b/articles/virtual-desktop/create-profile-container-azure-ad.md index be12b3ecc9562..256e5d33b6634 100644 --- a/articles/virtual-desktop/create-profile-container-azure-ad.md +++ b/articles/virtual-desktop/create-profile-container-azure-ad.md @@ -7,7 +7,7 @@ manager: femila ms.service: virtual-desktop ms.topic: how-to -ms.date: 04/05/2022 +ms.date: 06/03/2022 ms.author: helohr --- # Create a profile container with Azure Files and Azure Active Directory (preview) @@ -160,7 +160,7 @@ To enable Azure AD authentication on a storage account, you need to create an Az } '@ $now = [DateTime]::UtcNow - $json = $json -replace "", $now.AddDays(-1).ToString("s") + $json = $json -replace "", $now.AddHours(-12).ToString("s") $json = $json -replace "", $now.AddMonths(6).ToString("s") $json = $json -replace "", $password $Headers = @{'authorization' = "Bearer $($Token)"} @@ -350,7 +350,9 @@ This section will show you how to configure a VM with FSLogix. You'll need to fo To configure FSLogix: -1. [Update or install FSLogix](/fslogix/install-ht) on your session host, if needed. +1. [Update or install FSLogix](/fslogix/install-ht) on your session host, if needed. + > [!NOTE] + > If the session host is created using the Azure Virtual Desktop service, FSLogix should already be pre-installed. 2. Follow the instructions in [Configure profile container registry settings](/fslogix/configure-profile-container-tutorial#configure-profile-container-registry-settings) to create the **Enabled** and **VHDLocations** registry values. Set the value of **VHDLocations** to `\\.file.core.windows.net\`. @@ -437,7 +439,7 @@ The service principal's password will expire every six months. To update the pas '@ $now = [DateTime]::UtcNow - $json = $json -replace "", $now.AddDays(-1).ToString("s") + $json = $json -replace "", $now.AddHours(-12).ToString("s") $json = $json -replace "", $now.AddMonths(6).ToString("s") $json = $json -replace "", $password diff --git a/articles/virtual-desktop/data-locations.md b/articles/virtual-desktop/data-locations.md index 36864f0846486..4f624ece91dbf 100644 --- a/articles/virtual-desktop/data-locations.md +++ b/articles/virtual-desktop/data-locations.md @@ -4,51 +4,52 @@ description: A brief overview of which locations Azure Virtual Desktop's data an author: Heidilohr ms.topic: conceptual ms.custom: references_regions -ms.date: 06/30/2021 +ms.date: 06/07/2022 ms.author: helohr manager: femila --- # Data locations for Azure Virtual Desktop -Azure Virtual Desktop is currently available for all geographical locations. Administrators can choose the location to store user data when they create the host pool virtual machines and associated services, such as file servers. Learn more about Azure geographies at [Data residency in Azure](https://azure.microsoft.com/global-infrastructure/data-residency/#overview). +Azure Virtual Desktop is available in many Azure regions, which are grouped by geography. When Azure Virtual Desktop resources are deployed, you have to specify the Azure region they'll be created in. The location of the resource determines where its information will be stored and the geography where related information will be stored. Azure Virtual Desktop itself is a non-regional service where there's no dependency on a specific Azure region. Learn more about [Data residency in Azure](https://azure.microsoft.com/global-infrastructure/data-residency/#overview) and [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/). ->[!NOTE] ->Microsoft doesn't control or limit the regions where you or your users can access your user and app-specific data. +Azure Virtual Desktop stores various information for service objects, such as host pool names, application group names, workspace names, and user principal names. Data is categorized into different types, such as customer input, customer data, diagnostic data, and service-generated data. For more information about data category definitions, see [How Microsoft categorizes data for online services](https://www.microsoft.com/trust-center/privacy/customer-data-definitions). ->[!IMPORTANT] ->Azure Virtual Desktop stores various types of information like host pool names, app group names, workspace names, and user principal names in a datacenter. While creating any of the service objects, the customer has to enter the location where the object needs to be created. The location of this object determines where the information for the object will be stored. The customer will choose an Azure region and the related information will be stored in the associated geography. Customers also choose a region for the Session host Virtual Machines in an additional step in the deployment process. This region can be any Azure region, hence it can be the same region as the service objects or a separate region. For a list of all Azure regions and related geographies, visit [https://azure.microsoft.com/global-infrastructure/geographies/](https://azure.microsoft.com/global-infrastructure/geographies/). - -This article describes which information the Azure Virtual Desktop service stores. To learn more about the customer data definitions, see [How Microsoft categorizes data for online services](https://www.microsoft.com/trust-center/privacy/customer-data-definitions). +> [!NOTE] +> Microsoft doesn't control or limit the regions where you or your users can access your user and app-specific data. ## Customer input -To set up the Azure Virtual Desktop service, the customer must create host pools and other service objects. During configuration, the customer must give information like the host pool name, application group name, and so on. This information is considered customer input. Customer input is stored in the geography associated with the region the object is created in. Azure Resource Manager paths to the objects are considered organizational information, so data residency doesn't apply to them. Data about Azure Resource Manager paths will be stored outside of the chosen geography. +To set up Azure Virtual Desktop, you must create host pools and other service objects. During configuration, you must enter information such as the host pool name, application group name, and so on. This information is considered "customer input." Customer input is stored in the geography associated with the Azure region the resource is created in. The stored data includes all data that you input into the host pool deployment process and any data you add after deployment while making configuration changes to Azure Virtual Desktop objects. Basically, stored data is the same data you can access using the Azure Virtual Desktop portal, PowerShell, or Azure command-line interface (CLI). To ascertain this data using PowerShell as an example, review the PowerShell commands you can use to retrieve this "customer input" data that the [Azure Virtual Desktop service stores](https://docs.microsoft.com/powershell/module/az.desktopvirtualization/?view=azps-8.0.0) + +Azure Resource Manager paths to service objects are considered organizational information, so data residency doesn't apply to them. Data about Azure Resource Manager paths is stored outside of the chosen geography. ## Customer data -The service doesn't directly store any user created or app-related information, but it does store customer data like application names and user principal names because they're part of the object setup process. This information is stored in the geography associated with the region the customer created the object in. +The Azure Virtual Desktop service doesn't directly store any user-created or app-related information, but it does store customer data, such as application names and user principal names, because they're part of the resource deployment process. This information is stored in the geography associated with the region you created the resource in. ## Diagnostic data -Azure Virtual Desktop gathers service-generated diagnostic data whenever the customer or user interacts with the service. This data is only used for troubleshooting, support, and checking the health of the service in aggregate form. For example, from the session host side, when a VM registers to the service, we generate information that includes the virtual machine (VM) name, which host pool the VM belongs to, and so on. This information is stored in the geography associated with the region the host pool is created in. Also, when a user connects to the service and launches a remote desktop, we generate diagnostic information that includes the user principal name, client location, client IP address, which host pool the user is connecting to, and so on. This information is sent to two different locations: +Diagnostic data is generated by the Azure Virtual Desktop service and is gathered whenever administrators or users interact with the service. This data is only used for troubleshooting, support, and checking the health of the service in aggregate form. For example, when a session host VM is registered to a host pool, information is generated that includes the virtual machine (VM) name, which host pool the VM belongs to, and so on. This information is stored in the geography associated with the Azure region the host pool is created in. Also, when a user connects to the service and launches a session, diagnostic information is generated that includes the user principal name, client location, client IP address, which host pool the user is connecting to, and so on. This information is sent to two different locations: -- The location closest to the user where the service infrastructure (client traces, user traces, diagnostic data) is present. +- The location closest to the user where the service infrastructure (client traces, user traces, and diagnostic data) is present. - The location where the host pool is located. ## Service-generated data -To keep Azure Virtual Desktop reliable and scalable, we aggregate traffic patterns and usage to check the health and performance of the infrastructure control plane. For example, to understand how to ramp up regional infrastructure capacity as service usage increases, we process service usage log data. We then review the logs for peak times and decide which data centers to add to meet this capacity. +To keep Azure Virtual Desktop reliable and scalable, traffic patterns and usage are aggregated to check the health and performance of the infrastructure control plane. For example, to help us understand how to ramp up regional infrastructure capacity as service usage increases, we process service usage log data. We then review the logs for peak times and decide where to increase capacity. -We currently support storing the aforementioned data in the following locations: +Storing service-generated data is currently supported in the following geographies: - United States (US) - Europe (EU) - United Kingdom (UK) - Canada (CA) -- Japan (JP) (Public Preview) +- Japan (JP) \**in Public Preview* + +In addition, service-generated data is aggregated from all locations where the service infrastructure is, and sent to the US geography. The data sent to the US includes scrubbed data, but not customer data. -In addition we aggregate service-generated from all locations where the service infrastructure is, then send it to the US geography. The data sent to the US region includes scrubbed data, but not customer data. +## Data storage -More geographies will be added as the service grows. The stored information is encrypted at rest, and geo-redundant mirrors are maintained within the geography. Customer data, such as app settings and user data, resides in the location the customer chooses and isn't managed by the service. +Stored information is encrypted at rest, and geo-redundant mirrors are maintained within the geography. Data generated by the Azure Virtual Desktop service is replicated within the Azure geography for disaster recovery purposes. -The outlined data is replicated within the Azure geography for disaster recovery purposes. +User-created or app-related information, such as app settings and user data, resides in the Azure region you choose and isn't managed by the Azure Virtual Desktop service. diff --git a/articles/virtual-desktop/deploy-azure-ad-joined-vm.md b/articles/virtual-desktop/deploy-azure-ad-joined-vm.md index 42fc121a1b07d..0e659eeaec417 100644 --- a/articles/virtual-desktop/deploy-azure-ad-joined-vm.md +++ b/articles/virtual-desktop/deploy-azure-ad-joined-vm.md @@ -73,9 +73,9 @@ To enable access from Windows devices not joined to Azure AD, add **targetisaadj To access Azure AD-joined VMs using the web, Android, macOS and iOS clients, you must add **targetisaadjoined:i:1** as a [custom RDP property](customize-rdp-properties.md) to the host pool. These connections are restricted to entering user name and password credentials when signing in to the session host. -### Enabling MFA for Azure AD joined VMs +### Enforcing Azure AD Multi-Factor Authentication for Azure AD-joined session VMs -You can enable [multifactor authentication](set-up-mfa.md) for Azure AD-joined VMs by setting a Conditional Access policy on the Azure Virtual Desktop app. For connections to succeed, you must [disable the legacy per-user multifactor authentication](../active-directory/devices/howto-vm-sign-in-azure-ad-windows.md#mfa-sign-in-method-required). If you don't want to restrict signing in to strong authentication methods like Windows Hello for Business, you'll also need to [exclude the Azure Windows VM Sign-In app](../active-directory/devices/howto-vm-sign-in-azure-ad-windows.md#mfa-sign-in-method-required) from your Conditional Access policy. +You can use Azure AD Multi-Factor Authentication with Azure AD-joined VMs. Follow the steps to [Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access](set-up-mfa.md) and note the extra steps for [Azure AD-joined session host VMs](set-up-mfa.md#azure-ad-joined-session-host-vms). ## User profiles diff --git a/articles/virtual-desktop/faq.yml b/articles/virtual-desktop/faq.yml index 2f30e18009e78..bbec22666a40e 100644 --- a/articles/virtual-desktop/faq.yml +++ b/articles/virtual-desktop/faq.yml @@ -100,7 +100,7 @@ sections: - question: | How does Azure Virtual Desktop handle backups? - answer: There are multiple options in Azure Virtual Desktop for handling backup. At the Compute level, backup is recommended only for Personal Host Pools through [Azure Backup](https://docs.microsoft.com/azure/backup/backup-azure-vms-introduction). At the Storage level, recomemmended backup solution varies based on the backend storage used to store user profiles. If Azure Files Share is used, [Azure Backup for File Share](https://docs.microsoft.com/azure/backup/azure-file-share-backup-overview) is recommended. If Azure NetApp Files is used, [Snaphots/Policies](https://docs.microsoft.com/azure/azure-netapp-files/snapshots-manage-policy) or [Azure NetApp Files Backup](https://docs.microsoft.com/en-us/azure/azure-netapp-files/backup-introductionhttps://docs.microsoft.com/azure/azure-netapp-files/backup-introduction) are tools available. + answer: There are multiple options in Azure Virtual Desktop for handling backup. At the Compute level, backup is recommended only for Personal Host Pools through [Azure Backup](../backup/backup-azure-vms-introduction.md). At the Storage level, recomemmended backup solution varies based on the backend storage used to store user profiles. If Azure Files Share is used, [Azure Backup for File Share](../backup/azure-file-share-backup-overview.md) is recommended. If Azure NetApp Files is used, [Snaphots/Policies](../azure-netapp-files/snapshots-manage-policy.md) or [Azure NetApp Files Backup](/azure/azure-netapp-files/backup-introduction) are tools available. - question: | Does Azure Virtual Desktop support third-party collaboration apps? diff --git a/articles/virtual-desktop/language-packs.md b/articles/virtual-desktop/language-packs.md index c4e82965f0749..b3dcd7cae892f 100644 --- a/articles/virtual-desktop/language-packs.md +++ b/articles/virtual-desktop/language-packs.md @@ -3,7 +3,7 @@ title: Install language packs on Windows 10 VMs in Azure Virtual Desktop - Azure description: How to install language packs for Windows 10 multi-session VMs in Azure Virtual Desktop. author: Heidilohr ms.topic: how-to -ms.date: 04/01/2022 +ms.date: 06/01/2022 ms.author: helohr manager: femila --- @@ -40,7 +40,7 @@ You need the following things to customize your Windows 10 Enterprise multi-sess - [Windows 10, version 20H2 Inbox Apps ISO](https://software-download.microsoft.com/download/pr/19041.508.200905-1327.vb_release_svc_prod1_amd64fre_InboxApps.iso) - [Windows 10, version 21H1 or 21H2 Inbox Apps ISO](https://software-download.microsoft.com/download/sg/19041.928.210407-2138.vb_release_svc_prod1_amd64fre_InboxApps.iso) - - If you use Local Experience Pack (LXP) ISO files to localize your images, you will also need to download the appropriate LXP ISO for the best language experience + - If you use Local Experience Pack (LXP) ISO files to localize your images, you'll also need to download the appropriate LXP ISO for the best language experience - If you're using Windows 10, version 1903 or 1909: - [Windows 10, version 1903 or 1909 LXP ISO](https://software-download.microsoft.com/download/pr/Win_10_1903_32_64_ARM64_MultiLng_LngPkAll_LXP_ONLY.iso) - If you're using Windows 10, version 2004, 20H2, or 21H1, use the information in [Adding languages in Windows 10: Known issues](/windows-hardware/manufacture/desktop/language-packs-known-issue) to figure out which of the following LXP ISOs is right for you: @@ -90,9 +90,13 @@ To create the content repository for language packages and FODs and a repository To create a custom Windows 10 Enterprise multi-session image manually: 1. Deploy an Azure VM, then go to the Azure Gallery and select the current version of Windows 10 Enterprise multi-session you're using. -2. After you've deployed the VM, connect to it using RDP as a local admin. -3. Make sure your VM has all the latest Windows Updates. Download the updates and restart the VM, if necessary. -4. Connect to the language package, FOD, and Inbox Apps file share repository and mount it to a letter drive (for example, drive E). +1. After you've deployed the VM, connect to it using RDP as a local admin. +1. Make sure your VM has all the latest Windows Updates. Download the updates and restart the VM, if necessary. + + > [!IMPORTANT] + > After you install a language pack, you have to reinstall the latest cumulative update that is installed on your image. If you do not reinstall the latest cumulative update, you may encounter errors. If the latest cumulative update is already installed, Windows Update does not offer it again; you have to manually reinstall it. For more information, see [Languages overview](/windows-hardware/manufacture/desktop/languages-overview.md?view=windows-10&preserve-view=true#considerations). + +1. Connect to the language package, FOD, and Inbox Apps file share repository and mount it to a letter drive (for example, drive E). ## Create a custom Windows 10 Enterprise multi-session image automatically @@ -100,7 +104,7 @@ If you'd rather install languages through an automated process, you can set up a ```powershell ######################################################## -## Add Languages to running Windows Image for Capture## +## Add Languages to running Windows Image for Capture ## ######################################################## ##Disable Language Pack Cleanup## @@ -175,7 +179,7 @@ The script might take a while depending on the number of languages you need to i Once the script is finished running, check to make sure the language packs installed correctly by going to **Start** > **Settings** > **Time & Language** > **Language**. If the language files are there, you're all set. -After adding additional languages to the Windows image, the inbox apps are also required to be updated to support the added languages. This can be done by refreshing the pre-installed apps with the content from the inbox apps ISO. +After you've added additional languages to the Windows image, the inbox apps are also required to be updated to support the added languages. This can be done by refreshing the pre-installed apps with the content from the inbox apps ISO. To perform this refresh in an environment where the VM doesn't have internet access, you can use the following PowerShell script template to automate the process and update only installed versions of inbox apps. ```powershell @@ -230,7 +234,7 @@ To run sysprep: 2. Stop the VM, then capture it in a managed image by following the instructions in [Create a managed image of a generalized VM in Azure](../virtual-machines/windows/capture-image-resource.md). -3. You can now use the customized image to deploy a Azure Virtual Desktop host pool. To learn how to deploy a host pool, see [Tutorial: Create a host pool with the Azure portal](create-host-pools-azure-marketplace.md). +3. You can now use the customized image to deploy an Azure Virtual Desktop host pool. To learn how to deploy a host pool, see [Tutorial: Create a host pool with the Azure portal](create-host-pools-azure-marketplace.md). ## Enable languages in Windows settings app diff --git a/articles/virtual-desktop/media/conditional-access-client-apps.png b/articles/virtual-desktop/media/conditional-access-client-apps.png new file mode 100644 index 0000000000000..39852e28d1e20 Binary files /dev/null and b/articles/virtual-desktop/media/conditional-access-client-apps.png differ diff --git a/articles/virtual-desktop/media/uninstall-remote-desktop-services-sxs-network-stack.png b/articles/virtual-desktop/media/uninstall-remote-desktop-services-sxs-network-stack.png new file mode 100644 index 0000000000000..08cad2a0fb2c7 Binary files /dev/null and b/articles/virtual-desktop/media/uninstall-remote-desktop-services-sxs-network-stack.png differ diff --git a/articles/virtual-desktop/network-connectivity.md b/articles/virtual-desktop/network-connectivity.md index b6dd2f63d2c10..97af3e3d7f5b9 100644 --- a/articles/virtual-desktop/network-connectivity.md +++ b/articles/virtual-desktop/network-connectivity.md @@ -2,10 +2,10 @@ title: Understanding Azure Virtual Desktop network connectivity titleSuffix: Azure description: Learn about Azure Virtual Desktop network connectivity -author: gundarev +author: femila ms.topic: conceptual ms.date: 11/16/2020 -ms.author: denisgun +ms.author: femila --- # Understanding Azure Virtual Desktop network connectivity diff --git a/articles/virtual-desktop/rdp-bandwidth.md b/articles/virtual-desktop/rdp-bandwidth.md index add738b6ebab4..35e0433a2bfdb 100644 --- a/articles/virtual-desktop/rdp-bandwidth.md +++ b/articles/virtual-desktop/rdp-bandwidth.md @@ -2,10 +2,10 @@ title: Remote Desktop Protocol bandwidth requirements Azure Virtual Desktop - Azure titleSuffix: Azure description: Understanding RDP bandwidth requirements for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 11/16/2020 -ms.author: denisgun +ms.author: femila --- # Remote Desktop Protocol (RDP) bandwidth requirements diff --git a/articles/virtual-desktop/rdp-quality-of-service-qos.md b/articles/virtual-desktop/rdp-quality-of-service-qos.md index 02a1502d1bfbf..b5d7bd91bb4dc 100644 --- a/articles/virtual-desktop/rdp-quality-of-service-qos.md +++ b/articles/virtual-desktop/rdp-quality-of-service-qos.md @@ -2,10 +2,10 @@ title: Implement Quality of Service (QoS) for Azure Virtual Desktop titleSuffix: Azure description: How to set up QoS for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 10/18/2021 -ms.author: denisgun +ms.author: femila --- # Implement Quality of Service (QoS) for Azure Virtual Desktop diff --git a/articles/virtual-desktop/safe-url-list.md b/articles/virtual-desktop/safe-url-list.md index e7624eed728ca..a4b1cc9c6e1de 100644 --- a/articles/virtual-desktop/safe-url-list.md +++ b/articles/virtual-desktop/safe-url-list.md @@ -3,7 +3,7 @@ title: Azure Virtual Desktop required URL list - Azure description: A list of URLs you must unblock to ensure your Azure Virtual Desktop deployment works as intended. author: Heidilohr ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 05/26/2022 ms.author: helohr manager: femila --- @@ -79,6 +79,8 @@ The Azure virtual machines you create for Azure Virtual Desktop must have access |wvdportalstorageblob.blob.core.windows.net|443|Azure portal support|AzureCloud| | 169.254.169.254 | 80 | [Azure Instance Metadata service endpoint](../virtual-machines/windows/instance-metadata-service.md) | N/A | | 168.63.129.16 | 80 | [Session host health monitoring](../virtual-network/network-security-groups-overview.md#azure-platform-considerations) | N/A | +| oneocsp.microsoft.com | 443 | Certificates | N/A | +| microsoft.com | 443 | Certificates | N/A | A [Service Tag](../virtual-network/service-tags-overview.md) represents a group of IP address prefixes from a given Azure service. Microsoft manages the address prefixes encompassed by the service tag and automatically updates the service tag as addresses change, minimizing the complexity of frequent updates to network security rules. Service Tags can be used in both Network Security Group ([NSG](../virtual-network/network-security-groups-overview.md)) and [Azure Firewall](../firewall/service-tags.md) rules to restrict outbound network access. Service Tags can be also used in User Defined Route ([UDR](../virtual-network/virtual-networks-udr-overview.md#user-defined)) to customize traffic routing behavior. @@ -112,6 +114,7 @@ The Azure virtual machines you create for Azure Virtual Desktop must have access |wvdportalstorageblob.blob.core.usgovcloudapi.net|443|Azure portal support|AzureCloud| | 169.254.169.254 | 80 | [Azure Instance Metadata service endpoint](../virtual-machines/windows/instance-metadata-service.md) | N/A | | 168.63.129.16 | 80 | [Session host health monitoring](../virtual-network/network-security-groups-overview.md#azure-platform-considerations) | N/A | +| ocsp.msocsp.com | 443 | Certificates | N/A | > [!IMPORTANT] > We are currently transitioning the URLs we use for Agent traffic. We still support the URLs below, however we encourage you to switch to ***.prod.warm.ingest.monitor.core.usgovcloudapi.net** as soon as possible. diff --git a/articles/virtual-desktop/screen-capture-protection.md b/articles/virtual-desktop/screen-capture-protection.md index 5e7e2710ad89a..bce6d5cc8af4c 100644 --- a/articles/virtual-desktop/screen-capture-protection.md +++ b/articles/virtual-desktop/screen-capture-protection.md @@ -2,10 +2,10 @@ title: Azure Virtual Desktop screen capture protection titleSuffix: Azure description: How to set up screen capture protection for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 08/30/2021 -ms.author: denisgun +ms.author: femila ms.service: virtual-desktop --- diff --git a/articles/virtual-desktop/set-up-customize-master-image.md b/articles/virtual-desktop/set-up-customize-master-image.md index dbe79e481ea97..da34c94354abc 100644 --- a/articles/virtual-desktop/set-up-customize-master-image.md +++ b/articles/virtual-desktop/set-up-customize-master-image.md @@ -3,7 +3,7 @@ title: Prepare and customize a VHD image of Azure Virtual Desktop - Azure description: How to prepare, customize and upload a Azure Virtual Desktop image to Azure. author: Heidilohr ms.topic: how-to -ms.date: 01/19/2021 +ms.date: 06/01/2022 ms.author: helohr manager: femila --- @@ -24,7 +24,7 @@ The second option is to create the image locally by downloading the image, provi ### Local image creation -Once you've downloaded the image to a local location, open **Hyper-V Manager** to create a VM with the VHD you copied. The following instructions are a simple version, but you can find more detailed instructions in [Create a virtual machine in Hyper-V](/windows-server/virtualization/hyper-v/get-started/create-a-virtual-machine-in-hyper-v/). +You can download an image following the instructions in [Export an image version to a managed disk](../virtual-machines/managed-disk-from-image-version.md) and then [Download a Windows VHD from Azure](../virtual-machines/windows/download-vhd.md). Once you've downloaded the image to a local location, open **Hyper-V Manager** to create a VM with the VHD you copied. The following instructions are a simple version, but you can find more detailed instructions in [Create a virtual machine in Hyper-V](/windows-server/virtualization/hyper-v/get-started/create-a-virtual-machine-in-hyper-v/). To create a VM with the copied VHD: @@ -53,7 +53,7 @@ If you create a VM from an existing VHD, it creates a dynamic disk by default. I > [!div class="mx-imgBorder"] > ![A screenshot of the Edit Disk option.](media/35772414b5a0f81f06f54065561d1414.png) -You can also run the following PowerShell cmdlet to change the disk to a fixed disk. +You can also run the following PowerShell command to change the disk to a fixed disk. ```powershell Convert-VHD –Path c:\test\MY-VM.vhdx –DestinationPath c:\test\MY-NEW-VM.vhd -VHDType Fixed @@ -88,18 +88,18 @@ To disable Automatic Updates via local Group Policy: 1. Open **Local Group Policy Editor\\Administrative Templates\\Windows Components\\Windows Update**. 2. Right-click **Configure Automatic Update** and set it to **Disabled**. -You can also run the following command on a command prompt to disable Automatic Updates. +You can also run the following command from an elevated PowerShell prompt to disable Automatic Updates. -```cmd -reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU" /v NoAutoUpdate /t REG_DWORD /d 1 /f +```powershell +New-ItemProperty -Path "HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU" -Name NoAutoUpdate -PropertyType DWORD -Value 1 -Force ``` ### Specify Start layout for Windows 10 PCs (optional) -Run this command to specify a Start layout for Windows 10 PCs. +Run the following command from an elevated PowerShell prompt to specify a Start layout for Windows 10 PCs. -```cmd -reg add "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer" /v SpecialRoamingOverrideAllowed /t REG_DWORD /d 1 /f +```powershell +New-ItemProperty -Path "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer" -Name SpecialRoamingOverrideAllowed -PropertyType DWORD -Value 1 -Force ``` ### Set up time zone redirection @@ -114,10 +114,10 @@ To redirect time zones: 4. In the **Group Policy Management Editor**, navigate to **Computer Configuration** > **Policies** > **Administrative Templates** > **Windows Components** > **Remote Desktop Services** > **Remote Desktop Session Host** > **Device and Resource Redirection**. 5. Enable the **Allow time zone redirection** setting. -You can also run this command on the master image to redirect time zones: +You can also run the following command from an elevated PowerShell prompt to redirect time zones: -```cmd -reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows NT\Terminal Services" /v fEnableTimeZoneRedirection /t REG_DWORD /d 1 /f +```powershell +New-ItemProperty -Path "HKLM:\SOFTWARE\Policies\Microsoft\Windows NT\Terminal Services" -Name fEnableTimeZoneRedirection -PropertyType DWORD -Value 1 -Force ``` ### Disable Storage Sense @@ -127,10 +127,10 @@ For Azure Virtual Desktop session hosts that use Windows 10 Enterprise or Window > [!div class="mx-imgBorder"] > ![A screenshot of the Storage menu under Settings. The "Storage sense" option is turned off.](media/storagesense.png) -You can also change the setting with the registry by running the following command: +You can also run the following command from an elevated PowerShell prompt to disable Storage Sense: -```cmd -reg add "HKCU\Software\Microsoft\Windows\CurrentVersion\StorageSense\Parameters\StoragePolicy" /v 01 /t REG_DWORD /d 0 /f +```powershell +New-ItemProperty -Path "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\StorageSense\Parameters\StoragePolicy" -Name 01 -PropertyType DWORD -Value 0 -Force ``` ### Include additional language support @@ -143,33 +143,29 @@ This article doesn't cover how to configure language and regional support. For m ### Other applications and registry configuration -This section covers application and operating system configuration. All configuration in this section is done through registry entries that can be executed by command-line and regedit tools. - ->[!NOTE] ->You can implement best practices in configuration with either Group Policy Objects (GPOs) or registry imports. The administrator can choose either option based on their organization's requirements. +This section covers application and operating system configuration. All configuration in this section is done through adding, changing, or removing registry entries. -For feedback hub collection of telemetry data on Windows 10 Enterprise multi-session, run this command: +For feedback hub collection of telemetry data on Windows 10 Enterprise multi-session, run the following command from an elevated PowerShell prompt: -```cmd -reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows\DataCollection" /v AllowTelemetry /t REG_DWORD /d 3 /f +```powershell +New-ItemProperty -Path "HKLM:\SOFTWARE\Policies\Microsoft\Windows\DataCollection" -Name AllowTelemetry -PropertyType DWORD -Value 3 -Force ``` -Run the following command to fix Watson crashes: +To prevent Watson crashes, run the following command from an elevated PowerShell prompt: -```cmd -remove CorporateWerServer* from Computer\HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\Windows Error Reporting +```powershell +Remove-ItemProperty -Path "HKLM:\SOFTWARE\Microsoft\Windows\Windows Error Reporting" -Name Corporate* -Force -Verbose ``` -Enter the following commands into the registry editor to fix 5k resolution support. You must run the commands before you can enable the side-by-side stack. +To enable 5k resolution support, run the following commands from an elevated PowerShell prompt. You must run the commands before you can enable the side-by-side stack. -```cmd -reg add "HKLM\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp" /v MaxMonitors /t REG_DWORD /d 4 /f -reg add "HKLM\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp" /v MaxXResolution /t REG_DWORD /d 5120 /f -reg add "HKLM\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp" /v MaxYResolution /t REG_DWORD /d 2880 /f - -reg add "HKLM\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\rdp-sxs" /v MaxMonitors /t REG_DWORD /d 4 /f -reg add "HKLM\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\rdp-sxs" /v MaxXResolution /t REG_DWORD /d 5120 /f -reg add "HKLM\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\rdp-sxs" /v MaxYResolution /t REG_DWORD /d 2880 /f +```powershell +New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp" -Name MaxMonitors -PropertyType DWORD -Value 4 -Force +New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp" -Name MaxXResolution -PropertyType DWORD -Value 5120 -Force +New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp" -Name MaxYResolution -PropertyType DWORD -Value 2880 -Force +New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\rdp-sxs" -Name MaxMonitors -PropertyType DWORD -Value 4 -Force +New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\rdp-sxs" -Name MaxXResolution -PropertyType DWORD -Value 5120 -Force +New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations\rdp-sxs" -Name MaxYResolution -PropertyType DWORD -Value 2880 -Force ``` ## Prepare the image for upload to Azure diff --git a/articles/virtual-desktop/set-up-mfa.md b/articles/virtual-desktop/set-up-mfa.md index ca414115adf77..31e4a459b75d2 100644 --- a/articles/virtual-desktop/set-up-mfa.md +++ b/articles/virtual-desktop/set-up-mfa.md @@ -1,90 +1,107 @@ --- -title: Azure multifactor authentication for Azure Virtual Desktop - Azure -description: How to set up Azure multifactor authentication to make Azure Virtual Desktop more secure. +title: Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access - Azure +description: How to enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access to help make it more secure. author: Heidilohr ms.topic: how-to -ms.date: 12/10/2020 +ms.date: 05/27/2022 ms.author: helohr manager: femila --- -# Enable Azure multifactor authentication for Azure Virtual Desktop +# Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access ->[!IMPORTANT] +> [!IMPORTANT] > If you're visiting this page from the Azure Virtual Desktop (classic) documentation, make sure to [return to the Azure Virtual Desktop (classic) documentation](./virtual-desktop-fall-2019/tenant-setup-azure-active-directory.md) once you're finished. -The Windows client for Azure Virtual Desktop is an excellent option for integrating Azure Virtual Desktop with your local machine. However, when you configure your Azure Virtual Desktop account into the Windows Client, there are certain measures you'll need to take to keep yourself and your users safe. +Users can sign into Azure Virtual Desktop from anywhere using different devices and clients. However, there are certain measures you should take to help keep yourself and your users safe. Using Azure Active Directory (AD) Multi-Factor Authentication with Azure Virtual Desktop prompts users during the sign-in process for an additional form of identification, in addition to their username and password. You can enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access and whether it applies for the web client or mobile apps and desktop clients, or both. -When you first sign in, the client asks for your username, password, and Azure multifactor authentication. After that, the next time you sign in, the client will remember your token from your Azure Active Directory (AD) Enterprise Application. When you select **Remember me** on the prompt for credentials for the session host, your users can sign in after restarting the client without needing to reenter their credentials. +How often a user is prompted to reauthenticate depends on [Azure AD session lifetime configuration settings](../active-directory/authentication/concepts-azure-multi-factor-authentication-prompts-session-lifetime.md#azure-ad-session-lifetime-configuration-settings). For example, if their Windows client device is registered with Azure AD, it will receive a [Primary Refresh Token](../active-directory/devices/concept-primary-refresh-token.md) (PRT) to use single sign-on (SSO) across applications. Once issued, a PRT is valid for 14 days and is continuously renewed as long as the user actively uses the device. -While remembering credentials is convenient, it can also make deployments on Enterprise scenarios or personal devices less secure. To protect your users, you can make sure the client keeps asking for Azure multifactor authentication credentials more frequently. This article will show you how to configure the Conditional Access policy for Azure Virtual Desktop to enable this setting. +While remembering credentials is convenient, it can also make deployments for Enterprise scenarios using personal devices less secure. To protect your users, you can make sure the client keeps asking for Azure AD Multi-Factor Authentication credentials more frequently. You can use Conditional Access to configure this behavior. + +Learn how to enforce Azure AD Multi-Factor Authentication for Azure Virtual Desktop and optionally configure sign-in frequency below. ## Prerequisites Here's what you'll need to get started: -- Assign users a license that includes Azure Active Directory Premium P1 or P2. -- An Azure Active Directory group with your users assigned as group members. -- Enable Azure multifactor authentication for all your users. For more information about how to do that, see [How to require two-step verification for a user](../active-directory/authentication/howto-mfa-userstates.md#view-the-status-for-a-user). - -> [!NOTE] -> The following setting also applies to the [Azure Virtual Desktop web client](https://rdweb.wvd.microsoft.com/arm/webclient/index.html). +- Assign users a license that includes [Azure Active Directory Premium P1 or P2](../active-directory/authentication/concept-mfa-licensing.md). +- An [Azure Active Directory group](../active-directory/fundamentals/active-directory-groups-create-azure-portal.md) with your Azure Virtual Desktop users assigned as group members. +- Enable Azure AD Multi-Factor Authentication for your users. For more information about how to do that, see [Enable Azure AD Multi-Factor Authentication](../active-directory/authentication/tutorial-enable-azure-mfa.md). ## Create a Conditional Access policy -Here's how to create a Conditional Access policy that requires multifactor authentication when connecting to Azure Virtual Desktop: - -1. Sign in to the **Azure portal** as a global administrator, security administrator, or Conditional Access administrator. -2. Browse to **Azure Active Directory** > **Security** > **Conditional Access**. -3. Select **New policy**. -4. Give your policy a name. We recommend that organizations create a meaningful standard for the names of their policies. -5. Under **Assignments**, select **Users and groups**. -6. Under **Include**, select **Select users and groups** > **Users and groups** > Choose the group you created in the [prerequisites](#prerequisites) stage. -7. Select **Done**. -8. Under **Cloud apps or actions** > **Include**, select **Select apps**. -9. Select one of the following apps based on which version of Azure Virtual Desktop you're using. +Here's how to create a Conditional Access policy that requires multi-factor authentication when connecting to Azure Virtual Desktop: + +1. Sign in to the [Azure portal](https://portal.azure.com) as a global administrator, security administrator, or Conditional Access administrator. +1. In the search bar, type *Azure Active Directory* and select the matching service entry. +1. Browse to **Security** > **Conditional Access**. +1. Select **New policy** > **Create new policy**. +1. Give your policy a name. We recommend that organizations create a meaningful standard for the names of their policies. +1. Under **Assignments**, select **Users or workload entities**. +1. Under the **Include** tab, select **Select users and groups** and tick **Users and groups**. On the right, search for and choose the group that contains your Azure Virtual Desktop users as group members. +1. Select **Select**. +1. Under **Assignments**, select **Cloud apps or actions**. +1. Under the **Include** tab, select **Select apps**. +1. On the right, select one of the following apps based on which version of Azure Virtual Desktop you're using. - - If you're using Azure Virtual Desktop (classic), choose these apps: - - - **Windows Virtual Desktop** (App ID 5a0aa725-4958-4b0c-80a9-34562e23f3b7) - - **Windows Virtual Desktop Client** (App ID fa4345a4-a730-4230-84a8-7d9651b86739), which will let you set policies on the web client + - If you're using Azure Virtual Desktop (based on Azure Resource Manager), choose this app: - After that, skip ahead to step 11. + - **Azure Virtual Desktop** (app ID 9cdead84-a844-4324-93f2-b2e6bb768d07) - - If you're using Azure Virtual Desktop, choose this app instead: + > [!TIP] + > The app name was previously *Windows Virtual Desktop*. If you registered the *Microsoft.DesktopVirtualization* resource provider before the display name changed, the application will be named **Windows Virtual Desktop** with the same app ID as above. + + After that, go to step 10. + + - If you're using Azure Virtual Desktop (classic), choose these apps: - - **Azure Virtual Desktop** (App ID 9cdead84-a844-4324-93f2-b2e6bb768d07) + - **Windows Virtual Desktop** (app ID 5a0aa725-4958-4b0c-80a9-34562e23f3b7) + - **Windows Virtual Desktop Client** (app ID fa4345a4-a730-4230-84a8-7d9651b86739), which will let you set policies on the web client - After that, go to step 10. + > [!TIP] + > If you're using Azure Virtual Desktop (classic) and if the Conditional Access policy blocks all access excluding Azure Virtual Desktop app IDs, you can fix this by also adding the **Azure Virtual Desktop** (app ID 9cdead84-a844-4324-93f2-b2e6bb768d07) to the policy. Not adding this app ID will block feed discovery of Azure Virtual Desktop (classic) resources. - >[!IMPORTANT] - > Don't select the app called Azure Virtual Desktop Azure Resource Manager Provider (50e95039-b200-4007-bc97-8d5790743a63). This app is only used for retrieving the user feed and shouldn't have multifactor authentication. - > - > If you're using Azure Virtual Desktop (classic), if the Conditional Access policy blocks all access and only excludes Azure Virtual Desktop app IDs, you can fix this by adding the app ID 9cdead84-a844-4324-93f2-b2e6bb768d07 to the policy. Not adding this app ID will block feed discovery of Azure Virtual Desktop (classic) resources. + After that, skip ahead to step 11. -10. Once you've selected your app, choose **Select**, and then select **Done**. + > [!IMPORTANT] + > Don't select the app called Azure Virtual Desktop Azure Resource Manager Provider (app ID 50e95039-b200-4007-bc97-8d5790743a63). This app is only used for retrieving the user feed and shouldn't have multi-factor authentication. - > [!div class="mx-imgBorder"] - > ![A screenshot of the Cloud apps or actions page. The Azure Virtual Desktop and Azure Virtual Desktop Client apps are highlighted in red.](media/cloud-apps-enterprise.png) +1. Once you've selected your app, select **Select**. - >[!NOTE] - >To find the App ID of the app you want to select, go to **Enterprise Applications** and select **Microsoft Applications** from the application type drop-down menu. - -11. Go to **Conditions** > **Client apps**. In **Configure**, select **Yes**, and then select where to apply the policy: + > [!div class="mx-imgBorder"] + > ![A screenshot of the Conditional Access Cloud apps or actions page. The Azure Virtual Desktop app is shown.](media/cloud-apps-enterprise.png) +1. Under **Assignments**, select **Conditions** > **Client apps**. On the right, for **Configure**, select **Yes**, and then select the client apps this policy will apply to: + + - Select both check boxes if you want to apply the policy to all clients. - Select **Browser** if you want the policy to apply to the web client. - Select **Mobile apps and desktop clients** if you want to apply the policy to other clients. - - Select both check boxes if you want to apply the policy to all clients. + - Deselect values for legacy authentication clients. > [!div class="mx-imgBorder"] - > ![A screenshot of the Client apps page. The user has selected the mobile apps and desktop clients check box.](media/select-apply.png) + > ![A screenshot of the Conditional Access Client apps page. The user has selected the mobile apps and desktop clients, and browser check boxes.](media/conditional-access-client-apps.png) + +1. Once you've selected the client apps this policy will apply to, select **Done**. +1. Under **Assignments**, select **Access controls** > **Grant**, select **Grant access**, **Require multi-factor authentication**, and then select **Select**. +1. At the bottom of the page, set **Enable policy** to **On** and select **Create**. + +> [!NOTE] +> When you use the web client to sign in to Azure Virtual Desktop through your browser, the log will list the client app ID as a85cf173-4192-42f8-81fa-777a763e6e2c (Azure Virtual Desktop client). This is because the client app is internally linked to the server app ID where the conditional access policy was set. + +> [!TIP] +> Some users may see a prompt titled *Stay signed in to all your apps* if the Windows device they're using is not already registered with Azure AD. If they deselect **Allow my organization to manage my device** and select **No, sign in to this app only**, this may reappear frequently. + +## Configure sign-in frequency + +To optionally configure the time period before a user is asked to sign-in again: + +1. Open the policy you created previously. +1. Under **Assignments**, select **Access controls** > **Session**. On the right, select **Sign-in frequency**. Set the value for the time period before a user is asked to sign-in again, and then select **Select**. For example, setting the value to **1** and the unit to **Hours**, will require multi-factor authentication if a connection is launched over an hour after the last one. +1. At the bottom of the page, under **Enable policy** select **Save**. -12. Under **Access controls** > **Grant**, select **Grant access**, **Require multi-factor authentication**, and then **Select**. -13. Under **Access controls** > **Session**, select **Sign-in frequency**, set the value to the time you want between prompts, and then select **Select**. For example, setting the value to **1** and the unit to **Hours**, will require multifactor authentication if a connection is launched an hour after the last one. -14. Confirm your settings and set **Enable policy** to **On**. -15. Select **Create** to enable your policy. +## Azure AD joined session host VMs ->[!NOTE] ->When you use the web client to sign in to Azure Virtual Desktop through your browser, the log will list the client app ID as a85cf173-4192-42f8-81fa-777a763e6e2c (Azure Virtual Desktop client). This is because the client app is internally linked to the server app ID where the conditional access policy was set. +For connections to succeed, you must [disable the legacy per-user multi-factor authentication sign-in method](../active-directory/devices/howto-vm-sign-in-azure-ad-windows.md#mfa-sign-in-method-required). If you don't want to restrict signing in to strong authentication methods like Windows Hello for Business, you'll also need to [exclude the Azure Windows VM Sign-In app](../active-directory/devices/howto-vm-sign-in-azure-ad-windows.md#mfa-sign-in-method-required) from your Conditional Access policy. ## Next steps diff --git a/articles/virtual-desktop/shortpath-public.md b/articles/virtual-desktop/shortpath-public.md index b06fb5f856311..0e05df80a8bfd 100644 --- a/articles/virtual-desktop/shortpath-public.md +++ b/articles/virtual-desktop/shortpath-public.md @@ -2,10 +2,10 @@ title: Azure Virtual Desktop RDP Shortpath for public networks (preview) - Azure titleSuffix: Azure description: How to set up RDP Shortpath for public networks for Azure Virtual Desktop (preview). -author: gundarev +author: femila ms.topic: conceptual ms.date: 04/13/2022 -ms.author: denisgun +ms.author: femila --- # Azure Virtual Desktop RDP Shortpath for public networks (preview) diff --git a/articles/virtual-desktop/shortpath.md b/articles/virtual-desktop/shortpath.md index d9030aff46207..29ec79d657438 100644 --- a/articles/virtual-desktop/shortpath.md +++ b/articles/virtual-desktop/shortpath.md @@ -2,10 +2,10 @@ title: Azure Virtual Desktop RDP Shortpath for managed networks titleSuffix: Azure description: How to set up RDP Shortpath for managed networks for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 03/08/2022 -ms.author: denisgun +ms.author: femila --- # Azure Virtual Desktop RDP Shortpath for managed networks diff --git a/articles/virtual-desktop/teams-on-avd.md b/articles/virtual-desktop/teams-on-avd.md index 6e26fc34a799e..ff8752222ba30 100644 --- a/articles/virtual-desktop/teams-on-avd.md +++ b/articles/virtual-desktop/teams-on-avd.md @@ -3,7 +3,7 @@ title: Microsoft Teams on Azure Virtual Desktop - Azure description: How to use Microsoft Teams on Azure Virtual Desktop. author: Heidilohr ms.topic: how-to -ms.date: 04/25/2022 +ms.date: 05/24/2022 ms.author: helohr manager: femila --- @@ -40,7 +40,7 @@ This section will show you how to install the Teams desktop app on your Windows ### Prepare your image for Teams -To enable media optimization for Teams, set the following registry key on the host: +To enable media optimization for Teams, set the following registry key on the host VM: 1. From the start menu, run **RegEdit** as an administrator. Navigate to **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Teams**. Create the Teams key if it doesn't already exist. @@ -168,16 +168,17 @@ Using Teams in a virtualized environment is different from using Teams in a non- - The Teams desktop client in Azure Virtual Desktop environments doesn't support creating live events, but you can join live events. For now, we recommend you create live events from the [Teams web client](https://teams.microsoft.com) in your remote session instead. When watching a live event in the browser, [enable multimedia redirection (MMR) for Teams live events](multimedia-redirection.md#how-to-use-mmr-for-teams-live-events) for smoother playback. - Calls or meetings don't currently support application sharing. Desktop sessions support desktop sharing. - Give control and take control aren't currently supported. -- Teams on Azure Virtual Desktop only supports one incoming video input at a time. This means that whenever someone tries to share their screen, their screen will appear instead of the meeting leader's screen. - Due to WebRTC limitations, incoming and outgoing video stream resolution is limited to 720p. - The Teams app doesn't support HID buttons or LED controls with other devices. - New Meeting Experience (NME) is not currently supported in VDI environments. +- Teams for Azure Virtual Desktop doesn't currently support uploading custom background images. For Teams known issues that aren't related to virtualized environments, see [Support Teams in your organization](/microsoftteams/known-issues). ### Known issues for Teams for macOS (preview) -You can't configure audio devices from the Teams app, and the client will automatically use the default client audio device. To switch audio devices, you'll need to configure your settings from the client audio preferences instead. +- You can't configure audio devices from the Teams app, and the client will automatically use the default client audio device. To switch audio devices, you'll need to configure your settings from the client audio preferences instead. +- Teams for Azure Virtual Desktop on macOS doesn't currently support background effects such as background blur and background images. ## Collect Teams logs diff --git a/articles/virtual-desktop/troubleshoot-agent.md b/articles/virtual-desktop/troubleshoot-agent.md index 6bf7ae06b3c81..880ebe392e04a 100644 --- a/articles/virtual-desktop/troubleshoot-agent.md +++ b/articles/virtual-desktop/troubleshoot-agent.md @@ -1,23 +1,24 @@ --- title: Troubleshoot Azure Virtual Desktop Agent Issues - Azure -description: How to resolve common agent and connectivity issues. -author: Sefriend +description: How to resolve common Azure Virtual Desktop Agent and connectivity issues. +author: sefriend ms.topic: troubleshooting -ms.date: 12/16/2020 +ms.date: 05/26/2022 ms.author: sefriend manager: clarkn --- # Troubleshoot common Azure Virtual Desktop Agent issues The Azure Virtual Desktop Agent can cause connection issues because of multiple factors: - - An error on the broker that makes the agent stop the service. - - Problems with updates. - - Issues with installing during the agent installation, which disrupts connection to the session host. + + - An error on the broker that makes the agent stop the service. + - Problems with updates. + - Issues with installing during the agent installation, which disrupts connection to the session host. This article will guide you through solutions to these common scenarios and how to address connection issues. ->[!NOTE] ->For troubleshooting issues related to session connectivity and the Azure Virtual Desktop agent, we recommend you review the event logs in **Event Viewer** > **Windows Logs** > **Application**. Look for events that have one of the following sources to identify your issue: +> [!NOTE] +> For troubleshooting issues related to session connectivity and the Azure Virtual Desktop agent, we recommend you review the event logs on your session host virtual machines (VMs) by going to **Event Viewer** > **Windows Logs** > **Application**. Look for events that have one of the following sources to identify your issue: > >- WVD-Agent >- WVD-Agent-Updater @@ -26,63 +27,65 @@ This article will guide you through solutions to these common scenarios and how ## Error: The RDAgentBootLoader and/or Remote Desktop Agent Loader has stopped running -If you're seeing any of the following issues, this means that the boot loader, which loads the agent, was unable to install the agent properly and the agent service isn't running: +If you're seeing any of the following issues, this means that the boot loader, which loads the agent, was unable to install the agent properly and the agent service isn't running on your session host VM: + - **RDAgentBootLoader** is either stopped or not running. - There's no status for **Remote Desktop Agent Loader**. To resolve this issue, start the RDAgent boot loader: 1. In the Services window, right-click **Remote Desktop Agent Loader**. -2. Select **Start**. If this option is greyed out for you, you don't have administrator permissions and will need to get them to start the service. -3. Wait 10 seconds, then right-click **Remote Desktop Agent Loader**. -4. Select **Refresh**. -5. If the service stops after you started and refreshed it, you may have a registration failure. For more information, see [INVALID_REGISTRATION_TOKEN](#error-invalid_registration_token). +1. Select **Start**. If this option is greyed out for you, you don't have administrator permissions and will need to get them to start the service. +1. Wait 10 seconds, then right-click **Remote Desktop Agent Loader**. +1. Select **Refresh**. +1. If the service stops after you started and refreshed it, you may have a registration failure. For more information, see [INVALID_REGISTRATION_TOKEN](#error-invalid_registration_token). ## Error: INVALID_REGISTRATION_TOKEN -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277, that says **INVALID_REGISTRATION_TOKEN** in the description, the registration token that you have isn't recognized as valid. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **INVALID_REGISTRATION_TOKEN** in the description, the registration token that has been used isn't recognized as valid. To resolve this issue, create a valid registration token: 1. To create a new registration token, follow the steps in the [Generate a new registration key for the VM](#step-3-generate-a-new-registration-key-for-the-vm) section. -2. Open the Registry Editor. -3. Go to **HKEY_LOCAL_MACHINE** > **SOFTWARE** > **Microsoft** > **RDInfraAgent**. -4. Select **IsRegistered**. -5. In the **Value data:** entry box, type **0** and select **Ok**. -6. Select **RegistrationToken**. -7. In the **Value data:** entry box, paste the registration token from step 1. +1. Open Registry Editor. +1. Go to **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\RDInfraAgent**. +1. Select **IsRegistered**. +1. In the **Value data:** entry box, type **0** and select **Ok**. +1. Select **RegistrationToken**. +1. In the **Value data:** entry box, paste the registration token from step 1. > [!div class="mx-imgBorder"] > ![Screenshot of IsRegistered 0](media/isregistered-token.png) -8. Open a command prompt as an administrator. -9. Enter **net stop RDAgentBootLoader**. -10. Enter **net start RDAgentBootLoader**. -11. Open the Registry Editor. -12. Go to **HKEY_LOCAL_MACHINE** > **SOFTWARE** > **Microsoft** > **RDInfraAgent**. -13. Verify that **IsRegistered** is set to 1 and there is nothing in the data column for **RegistrationToken**. +1. Open a PowerShell prompt as an administrator and run the following command to restart the RDAgentBootLoader service: - > [!div class="mx-imgBorder"] - > ![Screenshot of IsRegistered 1](media/isregistered-registry.png) + ```powershell + Restart-Service RDAgentBootLoader + ``` + +1. Go back to Registry Editor. +1. Go to **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\RDInfraAgent**. +1. Verify that **IsRegistered** is set to 1 and there is nothing in the data column for **RegistrationToken**. + + > [!div class="mx-imgBorder"] + > ![Screenshot of IsRegistered 1](media/isregistered-registry.png) ## Error: Agent cannot connect to broker with INVALID_FORM -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 that says "INVALID_FORM" in the description, something went wrong with the communication between the agent and the broker. The agent can't connect to the broker or reach a particular URL because of certain firewall or DNS settings. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **INVALID_FORM** in the description, the agent can't connect to the broker or reach a particular endpoint. This may be because of certain firewall or DNS settings. + +To resolve this issue, check that you can reach the two endpoints referred to as *BrokerURI* and *BrokerURIGlobal*: -To resolve this issue, check that you can reach BrokerURI and BrokerURIGlobal: -1. Open the Registry Editor. -2. Go to **HKEY_LOCAL_MACHINE** > **SOFTWARE** > **Microsoft** > **RDInfraAgent**. -3. Make note of the values for **BrokerURI** and **BrokerURIGlobal**. +1. Open Registry Editor. +1. Go to **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\RDInfraAgent**. +1. Make note of the values for **BrokerURI** and **BrokerURIGlobal**. > [!div class="mx-imgBorder"] > ![Screenshot of broker uri and broker uri global](media/broker-uri.png) - -4. Open a browser and go to *\api/health*. - - Make sure you use the value from step 3 in the **BrokerURI**. In this section's example, it would be . -5. Open another tab in the browser and go to *\api/health*. - - Make sure you use the value from step 3 in the **BrokerURIGlobal** link. In this section's example, it would be . -6. If the network isn't blocking broker connection, both pages will load successfully and will show a message that says **"RD Broker is Healthy"** as shown in the following screenshots. +1. Open a web browser and enter your value for *BrokerURI* in the address bar and add */api/health* to the end, for example `https://rdbroker-g-us-r0.wvd.microsoft.com/api/health`. +1. Open another tab in the browser and enter your value for *BrokerURIGlobal* in the address bar and add */api/health* to the end, for example `https://rdbroker.wvd.microsoft.com/api/health`. +1. If your network isn't blocking the connection to the broker, both pages will load successfully and will show a message stating **RD Broker is Healthy**, as shown in the following screenshots: > [!div class="mx-imgBorder"] > ![Screenshot of successfully loaded broker uri access](media/broker-uri-web.png) @@ -90,8 +93,7 @@ To resolve this issue, check that you can reach BrokerURI and BrokerURIGlobal: > [!div class="mx-imgBorder"] > ![Screenshot of successfully loaded broker global uri access](media/broker-global.png) - -7. If the network is blocking broker connection, the pages will not load, as shown in the following screenshot. +1. If the network is blocking broker connection, the pages will not load, as shown in the following screenshot. > [!div class="mx-imgBorder"] > ![Screenshot of unsuccessful loaded broker access](media/unsuccessful-broker-uri.png) @@ -99,211 +101,212 @@ To resolve this issue, check that you can reach BrokerURI and BrokerURIGlobal: > [!div class="mx-imgBorder"] > ![Screenshot of unsuccessful loaded broker global access](media/unsuccessful-broker-global.png) -8. If the network is blocking these URLs, you will need to unblock the required URLs. For more information, see [Required URL List](safe-url-list.md). -9. If this does not resolve your issue, make sure that you do not have any group policies with ciphers that block the agent to broker connection. Azure Virtual Desktop uses the same TLS 1.2 ciphers as [Azure Front Door](../frontdoor/concept-end-to-end-tls.md#supported-cipher-suites). For more information, see [Connection Security](network-connectivity.md#connection-security). + You will need to unblock the required endpoints and then repeat steps 4 to 7. For more information, see [Required URL List](safe-url-list.md). + +1. If this does not resolve your issue, make sure that you do not have any group policies with ciphers that block the agent to broker connection. Azure Virtual Desktop uses the same TLS 1.2 ciphers as [Azure Front Door](../frontdoor/concept-end-to-end-tls.md#supported-cipher-suites). For more information, see [Connection Security](network-connectivity.md#connection-security). ## Error: 3703 -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3703 that says "RD Gateway Url: is not accessible" in the description, the agent is unable to reach the gateway URLs. To successfully connect to your session host and allow network traffic to these endpoints to bypass restrictions, you must unblock the URLs from the [Required URL List](safe-url-list.md). Also, make sure your firewall or proxy settings don't block these URLs. Unblocking these URLs is required to use Azure Virtual Desktop. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3703 with **RD Gateway Url: is not accessible** in the description, the agent is unable to reach the gateway URLs. To successfully connect to your session host, you must allow network traffic to the URLs from the [Required URL List](safe-url-list.md). Also, make sure your firewall or proxy settings don't block these URLs. Unblocking these URLs is required to use Azure Virtual Desktop. To resolve this issue, verify that your firewall and/or DNS settings are not blocking these URLs: 1. [Use Azure Firewall to protect Azure Virtual Desktop deployments.](../firewall/protect-azure-virtual-desktop.md). -2. Configure your [Azure Firewall DNS settings](../firewall/dns-settings.md). +1. Configure your [Azure Firewall DNS settings](../firewall/dns-settings.md). ## Error: 3019 -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3019, this means the agent can't reach the web socket transport URLs. To successfully connect to your session host and allow network traffic to bypass these restrictions, you must unblock the URLs listed in the the [Required URL list](safe-url-list.md). Work with the Azure Networking team to make sure your firewall, proxy, and DNS settings aren't blocking these URLs. You can also check your network trace logs to identify where the Azure Virtual Desktop service is being blocked. If you open a support request for this particular issue, make sure to attach your network trace logs to the request. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3019, this means the agent can't reach the web socket transport URLs. To successfully connect to your session host and allow network traffic to bypass these restrictions, you must unblock the URLs listed in the the [Required URL list](safe-url-list.md). Work with your networking team to make sure your firewall, proxy, and DNS settings aren't blocking these URLs. You can also check your network trace logs to identify where the Azure Virtual Desktop service is being blocked. If you open a Microsoft Support case for this particular issue, make sure to attach your network trace logs to the request. ## Error: InstallationHealthCheckFailedException -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 that says "InstallationHealthCheckFailedException" in the description, that means the stack listener isn't working because the terminal server has toggled the registry key for the stack listener. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **InstallationHealthCheckFailedException** in the description, this means the stack listener isn't working because the terminal server has toggled the registry key for the stack listener. To resolve this issue: -1. Check to see if [the stack listener is working](#error-stack-listener-isnt-working-on-windows-10-2004-vm). -2. If the stack listener isn't working, [manually uninstall and reinstall the stack component](#error-vms-are-stuck-in-unavailable-or-upgrading-state). +1. Check to see if [the stack listener is working](#error-stack-listener-isnt-working-on-a-windows-10-2004-session-host-vm) +1. If the stack listener isn't working, [manually uninstall and reinstall the stack component](#error-session-host-vms-are-stuck-in-unavailable-or-upgrading-state). ## Error: ENDPOINT_NOT_FOUND -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 that says "ENDPOINT_NOT_FOUND" in the description that means the broker couldn't find an endpoint to establish a connection with. This connection issue can happen for one of the following reasons: +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **ENDPOINT_NOT_FOUND** in the description, this means the broker couldn't find an endpoint to establish a connection with. This connection issue can happen for one of the following reasons: -- There aren't VMs in your host pool -- The VMs in your host pool aren't active -- All VMs in your host pool have exceeded the max session limit -- None of the VMs in your host pool have the agent service running on them +- There aren't any session host VMs in your host pool. +- The session host VMs in your host pool aren't active. +- All session host VMs in your host pool have exceeded the max session limit. +- None of the VMs in your host pool have the agent service running on them. To resolve this issue: 1. Make sure the VM is powered on and hasn't been removed from the host pool. -2. Make sure that the VM hasn't exceeded the max session limit. -3. Make sure the [agent service is running](#error-the-rdagentbootloader-andor-remote-desktop-agent-loader-has-stopped-running) and the [stack listener is working](#error-stack-listener-isnt-working-on-windows-10-2004-vm). -4. Make sure [the agent can connect to the broker](#error-agent-cannot-connect-to-broker-with-invalid_form). -5. Make sure [your VM has a valid registration token](#error-invalid_registration_token). -6. Make sure [the VM registration token hasn't expired](./faq.yml). +1. Make sure that the VM hasn't exceeded the max session limit. +1. Make sure the [agent service is running](#error-the-rdagentbootloader-andor-remote-desktop-agent-loader-has-stopped-running) and the [stack listener is working](#error-stack-listener-isnt-working-on-a-windows-10-2004-session-host-vm). +1. Make sure [the agent can connect to the broker](#error-agent-cannot-connect-to-broker-with-invalid_form). +1. Make sure [your VM has a valid registration token](#error-invalid_registration_token). +1. Make sure [the VM registration token hasn't expired](./faq.yml). ## Error: InstallMsiException -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277, that says **InstallMsiException** in the description, the installer is already running for another application while you're trying to install the agent, or a policy is blocking the msiexec.exe program from running. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **InstallMsiException** in the description, the installer is already running for another application while you're trying to install the agent, or group policy is blocking `msiexec.exe` from running. -To resolve this issue, disable the following policy: - - Turn off Windows Installer - - Category Path: Computer Configuration\Administrative Templates\Windows Components\Windows Installer - ->[!NOTE] ->This isn't a comprehensive list of policies, just the ones we're currently aware of. +To check whether group policy is blocking `msiexec.exe` from running: -To disable a policy: -1. Open a command prompt as an administrator. -2. Enter and run **rsop.msc**. -3. In the **Resultant Set of Policy** window that pops up, go to the category path. -4. Select the policy. -5. Select **Disabled**. -6. Select **Apply**. +1. Open Resultant Set of Policy by running **rsop.msc** from an elevated command prompt. +1. In the **Resultant Set of Policy** window that pops up, go to **Computer Configuration > Administrative Templates > Windows Components > Windows Installer > Turn off Windows Installer**. If the state is **Enabled**, work with your Active Directory team to allow `msiexec.exe` to run. > [!div class="mx-imgBorder"] > ![Screenshot of Windows Installer policy in Resultant Set of Policy](media/gpo-policy.png) -## Error: Win32Exception + > [!NOTE] + > This isn't a comprehensive list of policies, just the one we're currently aware of. -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277, that says **InstallMsiException** in the description, a policy is blocking cmd.exe from launching. Blocking this program prevents you from running the console window, which is what you need to use to restart the service whenever the agent updates. +## Error: Win32Exception -To resolve this issue, disable the following policy: - - Prevent access to the command prompt - - Category Path: User Configuration\Administrative Templates\System - ->[!NOTE] ->This isn't a comprehensive list of policies, just the ones we're currently aware of. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **InstallMsiException** in the description, a policy is blocking `cmd.exe` from launching. Blocking this program prevents you from running the console window, which is what you need to use to restart the service whenever the agent updates. -To disable a policy: -1. Open a command prompt as an administrator. -2. Enter and run **rsop.msc**. -3. In the **Resultant Set of Policy** window that pops up, go to the category path. -4. Select the policy. -5. Select **Disabled**. -6. Select **Apply**. +1. Open Resultant Set of Policy by running **rsop.msc** from an elevated command prompt. +1. In the **Resultant Set of Policy** window that pops up, go to **User Configuration > Administrative Templates > System > Prevent access to the command prompt**. If the state is **Enabled**, work with your Active Directory team to allow `cmd.exe` to run. -## Error: Stack listener isn't working on Windows 10 2004 VM +## Error: Stack listener isn't working on a Windows 10 2004 session host VM -Run **qwinsta** in your command prompt and make note of the version number that appears next to **rdp-sxs**. If you're not seeing the **rdp-tcp** and **rdp-sxs** components say **Listen** next to them or they aren't showing up at all after running **qwinsta**, it means that there's a stack issue. Stack updates get installed along with agent updates, and when this installation goes awry, the Azure Virtual Desktop Listener won't work. +On your session host VM, from a command prompt run `qwinsta.exe` and make note of the version number that appears next to **rdp-sxs** in the *SESSIONNAME* column. If the *STATE* column for **rdp-tcp** and **rdp-sxs** entries isn't **Listen**, or if **rdp-tcp** and **rdp-sxs** entries aren't listed at all, it means that there's a stack issue. Stack updates get installed along with agent updates, but if this hasn't been successful, the Azure Virtual Desktop Listener won't work. To resolve this issue: + 1. Open the Registry Editor. -2. Go to **HKEY_LOCAL_MACHINE** > **SYSTEM** > **CurrentControlSet** > **Control** > **Terminal Server** > **WinStations**. -3. Under **WinStations** you may see several folders for different stack versions, select the folder that matches the version information you saw when running **qwinsta** in your Command Prompt. -4. Find **fReverseConnectMode** and make sure its data value is **1**. Also make sure that **fEnableWinStation** is set to **1**. +1. Go to **HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations**. +1. Under **WinStations** you may see several folders for different stack versions, select a folder that matches the version information you saw when running `qwinsta.exe` in a command prompt. + 1. Find **fReverseConnectMode** and make sure its data value is **1**. Also make sure that **fEnableWinStation** is set to **1**. - > [!div class="mx-imgBorder"] - > ![Screenshot of fReverseConnectMode](media/fenable-2.png) + > [!div class="mx-imgBorder"] + > ![Screenshot of fReverseConnectMode](media/fenable-2.png) -5. If **fReverseConnectMode** isn't set to **1**, select **fReverseConnectMode** and enter **1** in its value field. -6. If **fEnableWinStation** isn't set to **1**, select **fEnableWinStation** and enter **1** into its value field. -7. Restart your VM. + 1. If **fReverseConnectMode** isn't set to **1**, select **fReverseConnectMode** and enter **1** in its value field. + 1. If **fEnableWinStation** isn't set to **1**, select **fEnableWinStation** and enter **1** into its value field. +1. Repeat the previous steps for each folder that matches the version information you saw when running `qwinsta.exe` in a command prompt. ->[!NOTE] ->To change the **fReverseConnectMode** or **fEnableWinStation** mode for multiple VMs at a time, you can do one of the following two things: -> ->- Export the registry key from the machine that you already have working and import it into all other machines that need this change. ->- Create a group policy object (GPO) that sets the registry key value for the machines that need the change. + > [!TIP] + > To change the **fReverseConnectMode** or **fEnableWinStation** mode for multiple VMs at a time, you can do one of the following two things: + > + > - Export the registry key from the machine that you already have working and import it into all other machines that need this change. + > - Create a group policy object (GPO) that sets the registry key value for the machines that need the change. -7. Go to **HKEY_LOCAL_MACHINE** > **SYSTEM** > **CurrentControlSet** > **Control** > **Terminal Server** > **ClusterSettings**. -8. Under **ClusterSettings**, find **SessionDirectoryListener** and make sure its data value is **rdp-sxs...**. -9. If **SessionDirectoryListener** isn't set to **rdp-sxs...**, you'll need to follow the steps in the [Uninstall the agent and boot loader](#step-1-uninstall-all-agent-boot-loader-and-stack-component-programs) section to first uninstall the agent, boot loader, and stack components, and then [Reinstall the agent and boot loader](#step-4-reinstall-the-agent-and-boot-loader). This will reinstall the side-by-side stack. +1. Restart your session host VM. +1. Open the Registry Editor. +1. Go to **HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Terminal Server\ClusterSettings**. +1. Under **ClusterSettings**, find **SessionDirectoryListener** and make sure its data value is `rdp-sxs **Windows Logs** > **Application**. If you see an event with ID 3277, that says **DownloadMsiException** in the description, there isn't enough space on the disk for the RDAgent. +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3277 with **DownloadMsiException** in the description, there isn't enough space on the disk for the RDAgent. To resolve this issue, make space on your disk by: - - Deleting files that are no longer in user - - Increasing the storage capacity of your VM + - Deleting files that are no longer in user. + - Increasing the storage capacity of your session host VM. ## Error: Agent fails to update with MissingMethodException -Go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3389 that says "MissingMethodException: Method not found" in the description, that means the Azure Virtual Desktop agent didn't update successfully and reverted to an earlier version. This may be because the version number of the .NET framework currently installed on your VMs is lower than 4.7.2. To resolve this issue, you need to upgrade the .NET to version 4.7.2 or later by following the installation instructions in the [.NET Framework documentation](https://support.microsoft.com/topic/microsoft-net-framework-4-7-2-offline-installer-for-windows-05a72734-2127-a15d-50cf-daf56d5faec2). +On your session host VM, go to **Event Viewer** > **Windows Logs** > **Application**. If you see an event with ID 3389 with **MissingMethodException: Method not found** in the description, this means the Azure Virtual Desktop agent didn't update successfully and reverted to an earlier version. This may be because the version number of the .NET framework currently installed on your VMs is lower than 4.7.2. To resolve this issue, you need to upgrade the .NET to version 4.7.2 or later by following the installation instructions in the [.NET Framework documentation](https://support.microsoft.com/topic/microsoft-net-framework-4-7-2-offline-installer-for-windows-05a72734-2127-a15d-50cf-daf56d5faec2). + +## Error: Session host VMs are stuck in Unavailable or Upgrading state + +If the status listed for session hosts in your host pool always says **Unavailable** or **Upgrading**, the agent or stack didn't install successfully. + +To resolve this issue, first reinstall the side-by-side stack: +1. Sign in to your session host VM as an administrator. +1. From an elevated PowerShell prompt run `qwinsta.exe` and make note of the version number that appears next to **rdp-sxs** in the *SESSIONNAME* column. If the *STATE* column for **rdp-tcp** and **rdp-sxs** entries isn't **Listen**, or if **rdp-tcp** and **rdp-sxs** entries aren't listed at all, it means that there's a stack issue. -## Error: VMs are stuck in Unavailable or Upgrading state +1. Run the following command to stop the RDAgentBootLoader service: -Open a PowerShell window as an administrator and run the following cmdlet: + ```powershell + Stop-Service RDAgentBootLoader + ``` -```powershell -Get-AzWvdSessionHost -ResourceGroupName -HostPoolName | Select-Object * -``` +1. Go to **Control Panel** > **Programs** > **Programs and Features**, or on Windows 11 go to the **Settings App > Apps**. +1. Uninstall the latest version of the **Remote Desktop Services SxS Network Stack** or the version listed in Registry Editor in **HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Terminal Server\WinStations** under the value for **ReverseConnectionListener**. +1. Back at the PowerShell prompt, run the following commands to add the file path of the latest installer available on your session host VM for the side-by-side stack to a variable and list its name: -If the status listed for the session host or hosts in your host pool always says "Unavailable" or "Upgrading," the agent or stack didn't install successfully. + ```powershell + $sxsMsi = (Get-ChildItem "$env:SystemDrive\Program Files\Microsoft RDInfra\" | ? Name -like SxSStack*.msi | Sort-Object CreationTime -Descending | Select-Object -First 1).FullName + $sxsMsi + ``` -To resolve this issue, reinstall the side-by-side stack: -1. Open a command prompt as an administrator. -2. Enter **net stop RDAgentBootLoader**. -3. Go to **Control Panel** > **Programs** > **Programs and Features**. -4. Uninstall the latest version of the **Remote Desktop Services SxS Network Stack** or the version listed in **HKEY_LOCAL_MACHINE** > **SYSTEM** > **CurrentControlSet** > **Control** > **Terminal Server** > **WinStations** under **ReverseConnectListener**. -5. Open a console window as an administrator and go to **Program Files** > **Microsoft RDInfra**. -6. Select the **SxSStack** component or run the **`msiexec /i SxsStack-.msi`** command to install the MSI. -8. Restart your VM. -9. Go back to the command prompt and run the **qwinsta** command. -10. Verify that the stack component installed in step 6 says **Listen** next to it. - - If so, enter **net start RDAgentBootLoader** in the command prompt and restart your VM. - - If not, you will need to [re-register your VM and reinstall the agent](#your-issue-isnt-listed-here-or-wasnt-resolved) component. +1. Install the latest installer available on your session host VM for the side-by-side stack by running the following command: + + ```powershell + msiexec /i $sxsMsi + ``` + +1. Restart your session host VM. +1. From a command prompt run `qwinsta.exe` again and verify the *STATE* column for **rdp-tcp** and **rdp-sxs** entries is **Listen**. If not, you will need to [re-register your VM and reinstall the agent](#your-issue-isnt-listed-here-or-wasnt-resolved) component. ## Error: Connection not found: RDAgent does not have an active connection to the broker -Your VMs may be at their connection limit, so the VM can't accept new connections. +Your session host VMs may be at their connection limit and can't accept new connections. -To resolve this issue: - - Decrease the max session limit. This ensures that resources are more evenly distributed across session hosts and will prevent resource depletion. - - Increase the resource capacity of the VMs. +To resolve this issue, either: +- Decrease the max session limit. This ensures that resources are more evenly distributed across session hosts and will prevent resource depletion. +- Increase the resource capacity of the session host VMs. ## Error: Operating a Pro VM or other unsupported OS The side-by-side stack is only supported by Windows Enterprise or Windows Server SKUs, which means that operating systems like Pro VM aren't. If you don't have an Enterprise or Server SKU, the stack will be installed on your VM but won't be activated, so you won't see it show up when you run **qwinsta** in your command line. -To resolve this issue, create a VM that is Windows Enterprise or Windows Server. -1. Go to [Virtual machine details](create-host-pools-azure-marketplace.md#virtual-machine-details) and follow steps 1-12 to set up one of the following recommended images: - - Windows 10 Enterprise multi-session, version 1909 - - Windows 10 Enterprise multi-session, version 1909 + Microsoft 365 Apps - - Windows Server 2019 Datacenter - - Windows 10 Enterprise multi-session, version 2004 - - Windows 10 Enterprise multi-session, version 2004 + Microsoft 365 Apps -2. Select **Review and Create**. +To resolve this issue, [create session host VMs](expand-existing-host-pool.md) using a [supported operating system](prerequisites.md#operating-systems-and-licenses). ## Error: NAME_ALREADY_REGISTERED -The name of your VM has already been registered and is probably a duplicate. +The name of your session host VM has already been registered and is probably a duplicate. To resolve this issue: 1. Follow the steps in the [Remove the session host from the host pool](#step-2-remove-the-session-host-from-the-host-pool) section. -2. [Create another VM](expand-existing-host-pool.md#add-virtual-machines-with-the-azure-portal). Make sure to choose a unique name for this VM. -3. Go to the [Azure portal](https://portal.azure.com) and open the **Overview** page for the host pool your VM was in. -4. Open the **Session Hosts** tab and check to make sure all session hosts are in that host pool. -5. Wait for 5-10 minutes for the session host status to say **Available**. +1. [Create another VM](expand-existing-host-pool.md#add-virtual-machines-with-the-azure-portal). Make sure to choose a unique name for this VM. +1. Go to the [Azure portal](https://portal.azure.com) and open the **Overview** page for the host pool your VM was in. +1. Open the **Session Hosts** tab and check to make sure all session hosts are in that host pool. +1. Wait for 5-10 minutes for the session host status to say **Available**. > [!div class="mx-imgBorder"] > ![Screenshot of available session host](media/hostpool-portal.png) ## Your issue isn't listed here or wasn't resolved -If you can't find your issue in this article or the instructions didn't help you, we recommend you uninstall, reinstall, and re-register Azure Virtual Desktop Agent. The instructions in this section will show you how to reregister your VM to the Azure Virtual Desktop service by uninstalling all agent, boot loader, and stack components, removing the session host from the host pool, generating a new registration key for the VM, and reinstalling the agent and boot loader. If one or more of the following scenarios apply to you, follow these instructions: -- Your VM is stuck in **Upgrading** or **Unavailable** -- Your stack listener isn't working and you're running on Windows 10 1809, 1903, or 1909 -- You're receiving an **EXPIRED_REGISTRATION_TOKEN** error -- You're not seeing your VMs show up in the session hosts list -- You don't see the **Remote Desktop Agent Loader** in the Services window -- You don't see the **RdAgentBootLoader** component in the Task Manager -- You're receiving a **Connection Broker couldn't validate the settings** error on custom image VMs -- The instructions in this article didn't resolve your issue +If you can't find your issue in this article or the instructions didn't help you, we recommend you uninstall, reinstall, and re-register the Azure Virtual Desktop Agent. The instructions in this section will show you how to reregister your session host VM to the Azure Virtual Desktop service by: +1. Uninstalling all agent, boot loader, and stack components +1. Removing the session host from the host pool +1. Generating a new registration key for the VM +1. Reinstalling the Azure Virtual Desktop Agent and boot loader. + +Follow these instructions in this section if one or more of the following scenarios apply to you: + +- The state of your session host VM is stuck as **Upgrading** or **Unavailable**. +- Your stack listener isn't working and you're running on Windows 10 version 1809, 1903, or 1909. +- You're receiving an **EXPIRED_REGISTRATION_TOKEN** error. +- You're not seeing your session host VMs show up in the session hosts list. +- You don't see the **Remote Desktop Agent Loader** service in the Services console. +- You don't see the **RdAgentBootLoader** component as a running process in Task Manager. +- You're receiving a **Connection Broker couldn't validate the settings** error on custom image VMs. +- Previous sections in this article didn't resolve your issue. ### Step 1: Uninstall all agent, boot loader, and stack component programs -Before reinstalling the agent, boot loader, and stack, you must uninstall any existing component programs from your VM. To uninstall all agent, boot loader, and stack component programs: -1. Sign in to your VM as an administrator. -2. Go to **Control Panel** > **Programs** > **Programs and Features**. -3. Remove the following programs: +Before reinstalling the agent, boot loader, and stack, you must uninstall any existing components from your VM. To uninstall all agent, boot loader, and stack component programs: +1. Sign in to your session host VM as an administrator. +2. Go to **Control Panel** > **Programs** > **Programs and Features**, or on Windows 11 go to the **Settings App > Apps**. +3. Uninstall the following programs, then restart your session host VM: + + > [!CAUTION] + > When uninstalling **Remote Desktop Services SxS Network Stack**, you'll be prompted that *Remote Desktop Services* and *Remote Desktop Services UserMode Port Redirector* should be closed. If you're connected to the session host VM using RDP, select **Do not close applications** then select **OK**, otherwise your RDP connection will be closed. + > + > [!div class="mx-imgBorder"] + > ![Screenshot showing prompt that Remote Desktop Services and Remote Desktop Services UserMode Port Redirector should be closed](media/uninstall-remote-desktop-services-sxs-network-stack.png) + - Remote Desktop Agent Boot Loader - Remote Desktop Services Infrastructure Agent - Remote Desktop Services Infrastructure Geneva Agent - Remote Desktop Services SxS Network Stack ->[!NOTE] ->You may see multiple instances of these programs. Make sure to remove all of them. + > [!NOTE] + > You may see multiple instances of these programs. Make sure to remove all of them. > [!div class="mx-imgBorder"] > ![Screenshot of uninstalling programs](media/uninstall-program.png) @@ -311,54 +314,64 @@ Before reinstalling the agent, boot loader, and stack, you must uninstall any ex ### Step 2: Remove the session host from the host pool When you remove the session host from the host pool, the session host is no longer registered to that host pool. This acts as a reset for the session host registration. To remove the session host from the host pool: -1. Go to the **Overview** page for the host pool that your VM is in, in the [Azure portal](https://portal.azure.com). -2. Go to the **Session Hosts** tab to see the list of all session hosts in that host pool. -3. Look at the list of session hosts and select the VM that you want to remove. -4. Select **Remove**. + +1. Sign in to the [Azure portal](https://portal.azure.com). +1. In the search bar, type *Azure Virtual Desktop* and select the matching service entry. +1. Select **Host pools** and select the name of the host pool that your session host VM is in. +1. Select **Session Hosts** to see the list of all session hosts in that host pool. +1. Look at the list of session hosts and tick the box next to the session host that you want to remove. +1. Select **Remove**. > [!div class="mx-imgBorder"] > ![Screenshot of removing VM from host pool](media/remove-sh.png) ### Step 3: Generate a new registration key for the VM -You must generate a new registration key that is used to re-register your VM to the host pool and to the service. To generate a new registration key for the VM: -1. Open the [Azure portal](https://portal.azure.com) and go to the **Overview** page for the host pool of the VM you want to edit. -2. Select **Registration key**. +You must generate a new registration key that is used to re-register your session VM to the host pool and to the service. To generate a new registration key for the VM: +1. Sign in to the [Azure portal](https://portal.azure.com). +1. In the search bar, type *Azure Virtual Desktop* and select the matching service entry. +1. Select **Host pools** and select the name of the host pool that your session host VM is in. +1. On the **Overview** blade, select **Registration key**. > [!div class="mx-imgBorder"] > ![Screenshot of registration key in portal](media/reg-key.png) -3. Open the **Registration key** tab and select **Generate new key**. -4. Enter the expiration date and then select **Ok**. +1. Open the **Registration key** tab and select **Generate new key**. +1. Enter the expiration date and then select **Ok**. ->[!NOTE] ->The expiration date can be no less than an hour and no longer than 27 days from its generation time and date. We highly recommend you set the expiration date to the 27 day maximum. + > [!NOTE] + > The expiration date can be no less than an hour and no longer than 27 days from its generation time and date. Generate a registration key only for as long as you need. -5. Copy the newly generated key to your clipboard. You'll need this key later. +1. Copy the newly generated key to your clipboard or download the file. You'll need this key later. ### Step 4: Reinstall the agent and boot loader By reinstalling the most updated version of the agent and boot loader, the side-by-side stack and Geneva monitoring agent automatically get installed as well. To reinstall the agent and boot loader: -1. Sign in to your VM as an administrator and use the correct version of the agent installer for your deployment depending on which version of Windows your VM is running. If you have a Windows 10 VM, follow the instructions in [Register virtual machines](create-host-pools-powershell.md#register-the-virtual-machines-to-the-azure-virtual-desktop-host-pool) to download the **Azure Virtual Desktop Agent** and the **Azure Virtual Desktop Agent Bootloader**. If you have a Windows 7 VM, follow steps 13-14 in [Register virtual machines](deploy-windows-7-virtual-machine.md#configure-a-windows-7-virtual-machine) to download the **Azure Virtual Desktop Agent** and the **Azure Virtual Desktop Agent Manager**. - > [!div class="mx-imgBorder"] - > ![Screenshot of agent and bootloader download page](media/download-agent.png) +1. Sign in to your session host VM as an administrator and use the correct version of the agent installer for the operating system of your session host VM: + 1. For Windows 10 and Windows 11: + 1. [Azure Virtual Desktop Agent](https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RWrmXv) + 1. [Azure Virtual Desktop Agent Bootloader](https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RWrxrH) + 1. For Windows 7: + 1. [Azure Virtual Desktop Agent](https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE3JZCm) + 1. [Azure Virtual Desktop Agent Bootloader](https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE3K2e3) + + > [!TIP] + > For each of the the agent and boot loader installers you downloaded, you may need to unblock them. Right-click each file and select **Properties**, then select **Unblock**, and finally select **OK**. -2. Right-click the agent and boot loader installers you downloaded. -3. Select **Properties**. -4. Select **Unblock**. -5. Select **Ok**. -6. Run the agent installer. -7. When the installer asks you for the registration token, paste the registration key from your clipboard. +1. Run the agent installer +1. When the installer asks you for the registration token, paste the registration key from the from your clipboard. > [!div class="mx-imgBorder"] > ![Screenshot of pasted registration token](media/pasted-agent-token.png) -8. Run the boot loader installer. -9. Restart your VM. -10. Go to the [Azure portal](https://portal.azure.com) and open the **Overview** page for the host pool your VM belongs to. -11. Go to the **Session Hosts** tab to see the list of all session hosts in that host pool. -12. You should now see the session host registered in the host pool with the status **Available**. +1. Run the boot loader installer. +1. Restart your session VM. +1. Sign in to the [Azure portal](https://portal.azure.com). +1. In the search bar, type *Azure Virtual Desktop* and select the matching service entry. +1. Select **Host pools** and select the name of the host pool that your session host VM is in. +1. Select **Session Hosts** to see the list of all session hosts in that host pool. +1. You should now see the session host registered in the host pool with the status **Available**. > [!div class="mx-imgBorder"] > ![Screenshot of available session host](media/hostpool-portal.png) diff --git a/articles/virtual-desktop/troubleshoot-azure-ad-connections.md b/articles/virtual-desktop/troubleshoot-azure-ad-connections.md index a8d5b3553809d..e24325cd7f9e9 100644 --- a/articles/virtual-desktop/troubleshoot-azure-ad-connections.md +++ b/articles/virtual-desktop/troubleshoot-azure-ad-connections.md @@ -31,15 +31,15 @@ If you come across an error saying **Your account is configured to prevent you f If you can't sign in and keep receiving an error message that says your credentials are incorrect, first make sure you're using the right credentials. If you keep seeing error messages, ask yourself the following questions: -- Does your Conditional Access policy exclude multifactor authentication requirements for the Azure Windows VM sign-in cloud application? +- Does your Conditional Access policy exclude multi-factor authentication requirements for the Azure Windows VM sign-in cloud application? - Have you assigned the **Virtual Machine User Login** role-based access control (RBAC) permission to the VM or resource group for each user? -If you answered "no" to either of these questions, follow the instructions in [Enable multifactor authentication](deploy-azure-ad-joined-vm.md#enabling-mfa-for-azure-ad-joined-vms) to reconfigure your multifactor authentication. +If you answered "no" to either of these questions, follow the instructions in [Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access](set-up-mfa.md#azure-ad-joined-session-host-vms) to reconfigure your multi-factor authentication. > [!WARNING] -> VM sign-ins don't support per-user enabled or enforced Azure AD multifactor authentication. If you try to sign in with multifactor authentication on a VM, you won't be able to sign in and will receive an error message. +> VM sign-ins don't support per-user enabled or enforced Azure AD Multi-Factor Authentication. If you try to sign in with multi-factor authentication on a VM, you won't be able to sign in and will receive an error message. -If you can access your Azure AD sign-in logs through Log Analytics, you can see if you've enabled multifactor authentication and which Conditional Access policy is triggering the event. The events shown are non-interactive user login events for the VM, which means the IP address will appear to come from the external IP address that your VM accesses Azure AD from. +If you can access your Azure AD sign-in logs through Log Analytics, you can see if you've enabled multi-factor authentication and which Conditional Access policy is triggering the event. The events shown are non-interactive user login events for the VM, which means the IP address will appear to come from the external IP address that your VM accesses Azure AD from. You can access your sign-in logs by running the following Kusto query: @@ -61,11 +61,11 @@ If you come across an error saying **The logon attempt failed** on the Windows S - You are on a device that is Azure AD-joined or hybrid Azure AD-joined to the same Azure AD tenant as the session host OR - You are on a device running Windows 10 2004 or later that is Azure AD registered to the same Azure AD tenant as the session host - The [PKU2U protocol is enabled](/windows/security/threat-protection/security-policy-settings/network-security-allow-pku2u-authentication-requests-to-this-computer-to-use-online-identities) on both the local PC and the session host -- [Per-user multifactor authentication is disabled](deploy-azure-ad-joined-vm.md#enabling-mfa-for-azure-ad-joined-vms) for the user account as it's not supported for Azure AD-joined VMs. +- [Per-user multi-factor authentication is disabled](set-up-mfa.md#azure-ad-joined-session-host-vms) for the user account as it's not supported for Azure AD-joined VMs. ### The sign-in method you're trying to use isn't allowed -If you come across an error saying **The sign-in method you're trying to use isn't allowed. Try a different sign-in method or contact your system administrator**, you have Conditional Access policies restricting access. Follow the instructions in [Enable multifactor authentication](deploy-azure-ad-joined-vm.md#enabling-mfa-for-azure-ad-joined-vms) to enable multifactor authentication for your Azure AD-joined VMs. +If you come across an error saying **The sign-in method you're trying to use isn't allowed. Try a different sign-in method or contact your system administrator**, you have Conditional Access policies restricting access. Follow the instructions in [Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access](set-up-mfa.md#azure-ad-joined-session-host-vms) to enforce Azure Active Directory Multi-Factor Authentication for your Azure AD-joined VMs. ## Web client @@ -75,7 +75,7 @@ If you come across an error saying **Oops, we couldn't connect to NAME. Sign in ### We couldn't connect to the remote PC because of a security error -If you come across an error saying **Oops, we couldn't connect to NAME. We couldn't connect to the remote PC because of a security error. If this keeps happening, ask your admin or tech support for help.**, you have Conditional Access policies restricting access. Follow the instructions in [Enable multifactor authentication](deploy-azure-ad-joined-vm.md#enabling-mfa-for-azure-ad-joined-vms) to enable multifactor authentication for your Azure AD-joined VMs. +If you come across an error saying **Oops, we couldn't connect to NAME. We couldn't connect to the remote PC because of a security error. If this keeps happening, ask your admin or tech support for help.**, you have Conditional Access policies restricting access. Follow the instructions in [Enforce Azure Active Directory Multi-Factor Authentication for Azure Virtual Desktop using Conditional Access](set-up-mfa.md#azure-ad-joined-session-host-vms) to enforce Azure Active Directory Multi-Factor Authentication for your Azure AD-joined VMs. ## Android client diff --git a/articles/virtual-desktop/user-documentation/connect-windows-7-10.md b/articles/virtual-desktop/user-documentation/connect-windows-7-10.md index 8ad2bef13cc37..8e430713aa500 100644 --- a/articles/virtual-desktop/user-documentation/connect-windows-7-10.md +++ b/articles/virtual-desktop/user-documentation/connect-windows-7-10.md @@ -7,6 +7,7 @@ ms.date: 01/27/2022 ms.author: helohr manager: femila ms.custom: template-how-to +adobe-target: true --- # Connect with the Windows Desktop client diff --git a/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md b/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md index 0fc5a9016e9df..bec6d2c1765de 100644 --- a/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md +++ b/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md @@ -1,10 +1,10 @@ --- title: Configure GPU for Azure Virtual Desktop (classic) - Azure description: How to enable GPU-accelerated rendering and encoding in Azure Virtual Desktop (classic). -author: gundarev +author: femila ms.topic: how-to ms.date: 03/30/2020 -ms.author: denisgun +ms.author: femila --- # Configure graphics processing unit (GPU) acceleration for Azure Virtual Desktop (classic) diff --git a/articles/virtual-desktop/virtual-desktop-fall-2019/manage-resources-using-ui.md b/articles/virtual-desktop/virtual-desktop-fall-2019/manage-resources-using-ui.md index d52f0a3c07304..f026a475748b4 100644 --- a/articles/virtual-desktop/virtual-desktop-fall-2019/manage-resources-using-ui.md +++ b/articles/virtual-desktop/virtual-desktop-fall-2019/manage-resources-using-ui.md @@ -18,7 +18,9 @@ The instructions in this article will tell you how to deploy the UI by using an Since the app requires consent to interact with Azure Virtual Desktop, this tool doesn't support Business-to-Business (B2B) scenarios. Each Azure Active Directory (AAD) tenant's subscription will need its own separate deployment of the management tool. -This management tool is a sample. Microsoft will provide important security and quality updates. The [source code is available in GitHub](https://github.com/Azure/RDS-Templates/tree/master/wvd-templates/wvd-management-ux/deploy). Customers and partners are encouraged to customize the tool to fit their business needs. +This management tool is a sample. Microsoft will provide important security and quality updates. The [source code is available in GitHub](https://github.com/Azure/RDS-Templates/tree/master/wvd-templates/wvd-management-ux/deploy). Microsoft Support is not handling issues for the management tool. If you come across any issues, follow the directions in Azure Resource Manager templates for Remote Desktop Services to report them on [GitHub](https://github.com/Azure/RDS-Templates/tree/master/wvd-templates/wvd-management-ux/deploy). + +Customers and partners are encouraged to customize the tool to fit their business needs. To following browsers are compatible with the management tool: - Google Chrome 68 or later diff --git a/articles/virtual-desktop/whats-new.md b/articles/virtual-desktop/whats-new.md index 0f99b5d678b5c..17ec9bc5a3661 100644 --- a/articles/virtual-desktop/whats-new.md +++ b/articles/virtual-desktop/whats-new.md @@ -3,7 +3,7 @@ title: What's new in Azure Virtual Desktop? - Azure description: New features and product updates for Azure Virtual Desktop. author: Heidilohr ms.topic: overview -ms.date: 05/04/2022 +ms.date: 06/02/2022 ms.author: helohr ms.reviewer: thhickli; darank manager: femila @@ -20,6 +20,26 @@ Azure Virtual Desktop updates regularly. This article is where you'll find out a Make sure to check back here often to keep up with new updates. +## May 2022 + +Here's what changed in May 2022: + +### Background effects with Teams on Azure Virtual Desktop now generally available + +Users can now make meetings more personalized and avoid unexpected distractions by applying background effects. Meeting participants can select an available image in Teams to change their background or choose to blur their background. For more information, see [our blog post](https://techcommunity.microsoft.com/t5/azure-virtual-desktop-blog/microsoft-teams-background-effects-is-now-generally-available-on/ba-p/3401961). + +### Multi-window and "Call me with Teams" features now generally available + +The multi-window feature gives users the option to pop out chats, meetings, calls, or documents into separate windows to streamline their workflow. The "Call me with Teams" feature lets users transfer a Teams call to their phone. Both features are now generally available in Teams on Azure Virtual Desktop. For more information, see [our blog post](https://techcommunity.microsoft.com/t5/azure-virtual-desktop-blog/microsoft-teams-multi-window-support-and-call-me-are-now-in-ga/ba-p/3401830). + +### Japan metadata service in public preview + +The Azure Virtual Desktop metadata database located in Japan is now in public preview. This allows customers to store their Azure Virtual Desktop objects and metadata within a database located within our Japan geography, ensuring that the data will only reside within Japan. For more information, see [our blog post](https://techcommunity.microsoft.com/t5/azure-virtual-desktop/announcing-the-public-preview-of-the-azure-virtual-desktop/m-p/3417497). + +### FSLogix 2201 hotfix + +The latest update for FSLogix 2201 includes fixes to Cloud Cache and container redirection processes. No new features are included with this update. Learn more at [What’s new in FSLogix](/fslogix/whats-new?context=%2Fazure%2Fvirtual-desktop%2Fcontext%2Fcontext) and [our blog post](https://techcommunity.microsoft.com/t5/azure-virtual-desktop/announcing-fslogix-2201-hotfix-1-2-9-8171-14983-has-been/m-p/3435445). + ## April 2022 Here's what changed in April 2022: diff --git a/articles/virtual-machine-scale-sets/instance-generalized-image-version.md b/articles/virtual-machine-scale-sets/instance-generalized-image-version.md index 49fc037af4d3b..3683aaea90a05 100644 --- a/articles/virtual-machine-scale-sets/instance-generalized-image-version.md +++ b/articles/virtual-machine-scale-sets/instance-generalized-image-version.md @@ -15,6 +15,9 @@ ms.reviewer: cynthn **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Uniform scale sets +> [!IMPORTANT] +> You can't currently create a Flexible virtual machine scale set from an image shared by another tenant. + Create a scale set from a generalized image version stored in an [Azure Compute Gallery](../virtual-machines/shared-image-galleries.md). If you want to create a scale set using a specialized image version, see [Create scale set instances from a specialized image](instance-specialized-image-version-cli.md). ## Create a scale set from an image in your gallery diff --git a/articles/virtual-machine-scale-sets/instance-specialized-image-version.md b/articles/virtual-machine-scale-sets/instance-specialized-image-version.md index 357792755ef9f..f1bbe18effd66 100644 --- a/articles/virtual-machine-scale-sets/instance-specialized-image-version.md +++ b/articles/virtual-machine-scale-sets/instance-specialized-image-version.md @@ -16,6 +16,9 @@ ms.custom: devx-track-azurecli **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Uniform scale sets +> [!IMPORTANT] +> You can't currently create a Flexible virtual machine scale set from an image shared by another tenant. + Create a scale set from a [specialized image version](../virtual-machines/shared-image-galleries.md#generalized-and-specialized-images) stored in an Azure Compute Gallery. If you want to create a scale set using a generalized image version, see [Create a scale set from a generalized image](instance-generalized-image-version-cli.md). > [!IMPORTANT] diff --git a/articles/virtual-machine-scale-sets/media/virtual-machine-scale-sets-maintenance-notifications/monitor-create-service-health-alert.png b/articles/virtual-machine-scale-sets/media/virtual-machine-scale-sets-maintenance-notifications/monitor-create-service-health-alert.png new file mode 100644 index 0000000000000..f7791ab3a80e1 Binary files /dev/null and b/articles/virtual-machine-scale-sets/media/virtual-machine-scale-sets-maintenance-notifications/monitor-create-service-health-alert.png differ diff --git a/articles/virtual-machine-scale-sets/media/virtual-machine-scale-sets-maintenance-notifications/monitor-service-health.png b/articles/virtual-machine-scale-sets/media/virtual-machine-scale-sets-maintenance-notifications/monitor-service-health.png new file mode 100644 index 0000000000000..07ea98e430996 Binary files /dev/null and b/articles/virtual-machine-scale-sets/media/virtual-machine-scale-sets-maintenance-notifications/monitor-service-health.png differ diff --git a/articles/virtual-machine-scale-sets/quick-create-portal.md b/articles/virtual-machine-scale-sets/quick-create-portal.md index 3a660e689d1b1..2a89fbef43404 100644 --- a/articles/virtual-machine-scale-sets/quick-create-portal.md +++ b/articles/virtual-machine-scale-sets/quick-create-portal.md @@ -65,8 +65,8 @@ You can deploy a scale set with a Windows Server image or Linux image such as RH :::image type="content" source="./media/virtual-machine-scale-sets-create-portal/quick-create-scale-set.png" alt-text="Image shows create options for scale sets in the Azure portal."::: 1. Select **Next** to move the the other pages. -1. Leave the defaults for the **Instance** and **Disks** pages. -1. On the **Networking** page, under **Load balancing**, select **Yes** to put the scale set instances behind a load balancer. +1. Leave the defaults for the **Disks** page. +1. On the **Networking** page, under **Load balancing**, select the **Use a load balancer** option to put the scale set instances behind a load balancer. 1. In **Load balancing options**, select **Azure load balancer**. 1. In **Select a load balancer**, select *myLoadBalancer* that you created earlier. 1. For **Select a backend pool**, select **Create new**, type *myBackendPool*, then select **Create**. diff --git a/articles/virtual-machine-scale-sets/scripts/cli-sample-enable-autoscale.md b/articles/virtual-machine-scale-sets/scripts/cli-sample-enable-autoscale.md index d1b0a6d22bea0..593d57c0281f3 100644 --- a/articles/virtual-machine-scale-sets/scripts/cli-sample-enable-autoscale.md +++ b/articles/virtual-machine-scale-sets/scripts/cli-sample-enable-autoscale.md @@ -45,7 +45,7 @@ This script uses the commands outlined in the following table: |---|---| | [az group create](/cli/azure/ad/group) | Creates a resource group in which all resources are stored. | | [az vmss create](/cli/azure/vmss) | Creates the virtual machine scale set and connects it to the virtual network, subnet, and network security group. A load balancer is also created to distribute traffic to multiple VM instances. This command also specifies the VM image to be used and administrative credentials. | -| [az monitor autoscale-settings create](/cli/azure/monitor/autoscale-settings) | Creates and applies autoscale rules to a virtual machine scale set. | +| [az monitor autoscale-settings create](/cli/azure/monitor/autoscale) | Creates and applies autoscale rules to a virtual machine scale set. | | [az group delete](/cli/azure/ad/group) | Deletes a resource group including all nested resources. | ## Next steps diff --git a/articles/virtual-machine-scale-sets/share-images-across-tenants.md b/articles/virtual-machine-scale-sets/share-images-across-tenants.md index e2b38d6e22a69..dedc2e37ee610 100644 --- a/articles/virtual-machine-scale-sets/share-images-across-tenants.md +++ b/articles/virtual-machine-scale-sets/share-images-across-tenants.md @@ -20,6 +20,9 @@ ms.custom: devx-track-azurecli ## Create a scale set using Azure CLI +> [!IMPORTANT] +> You can't currently create a Flexible virtual machine scale set from an image shared by another tenant. + Sign in the service principal for tenant 1 using the appID, the app key, and the ID of tenant 1. You can use `az account show --query "tenantId"` to get the tenant IDs if needed. ```azurecli-interactive diff --git a/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml b/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml index bbb35f7f270a4..6c78a81e022b6 100644 --- a/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml +++ b/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml @@ -473,7 +473,10 @@ sections: - Reset the password by using the VM access extensions. Make sure to follow the password requirements, as described in [the FAQ](../virtual-machines/windows/faq.yml#what-are-the-password-requirements-when-creating-a-vm-). - Using a VM access extension doesn't require reimaging, because the extension doesn't update the password in the model. The extension runs a script to append the password to the password or to the SSH key file. The extension doesn't remove the original SSH key. + Using a VM access extension doesn't require reimaging, because the extension doesn't update the password in the model. The extension runs a script to append the password to the password or to the SSH key file. The extension doesn't remove the original SSH key. Once the extension is updated, upgrade the instances to apply the updates to the username and password on all the VM instances. + + >[!NOTE] + > If Auto upgrade policy is set to `manual`, manually select the instance to perform an upgrade operation on individual VM instances. If auto upgrade set to `Auto`, the extension will automatically upgrade. For more information, see [Automatic Extension Upgrades](../virtual-machines/automatic-extension-upgrade.md) Use the following PowerShell example for a Windows virtual machine scale set: diff --git a/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications.md b/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications.md index 4977005ce21b0..d9dacb2e34f4c 100644 --- a/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications.md +++ b/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications.md @@ -89,12 +89,24 @@ The **Self-service maintenance** column now appears in the list of virtual machi Azure communicates a schedule for planned maintenance by sending an email to the subscription owner and co-owners group. You can add recipients and channels to this communication by creating Activity Log alerts. For more information, see [Monitor subscription activity with the Azure Activity Log](../azure-monitor/essentials/platform-logs-overview.md). 1. Sign in to the [Azure portal](https://portal.azure.com). -2. In the left menu, select **Monitor**. -3. In the **Monitor - Alerts (classic)** pane, select **+Add activity log alert**. -4. On the **Add activity log alert** page, select or enter the requested information. In **Criteria**, make sure that you set the following values: - - **Event category**: Select **Service Health**. - - **Services**: Select **Virtual Machine Scale Sets and Virtual Machines**. - - **Type**: Select **Planned maintenance**. +1. In the left menu, select **Monitor**. +1. In the Monitor menu, select **Service Health**. + + :::image type="content" source="./media/virtual-machine-scale-sets-maintenance-notifications/monitor-service-health.png" alt-text="Select Service Health in the Monitor menu."::: + +1. In Service Health, select **+ Create service health alert**. + + :::image type="content" source="./media/virtual-machine-scale-sets-maintenance-notifications/monitor-create-service-health-alert.png" alt-text="Select Create service health alert button."::: + +1. On the **Create an alert rule** page: + 1. Select the relevant **Subscription** and **Region** containing the resources to monitor for planned maintenance events. + 1. Specify the following: + - **Services**: *Virtual Machine Scale Sets* and *Virtual Machines* + - **Event type**: *Planned maintenance* +1. Under **Actions**, add action groups to the alert rule in order to send notifications or invoke actions when a planned maintenance event is received. +1. Fill out the details under **Alert rule details**. +1. Select **Create alert rule**. + To learn more about how to configure Activity Log alerts, see [Create Activity Log alerts](../azure-monitor/alerts/activity-log-alerts.md) diff --git a/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-orchestration-modes.md b/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-orchestration-modes.md index 90265b6afe41d..07bc2b44fbebe 100644 --- a/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-orchestration-modes.md +++ b/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-orchestration-modes.md @@ -100,6 +100,7 @@ The following table compares the Flexible orchestration mode, Uniform orchestrat | SKUs supported | D series, E series, F series, A series, B series, Intel, AMD; Specialty SKUs (G, H, L, M, N) are not supported | All SKUs | All SKUs | | Full control over VM, NICs, Disks | Yes | Limited control with virtual machine scale sets VM API | Yes | | RBAC Permissions Required | Compute VMSS Write, Compute VM Write, Network | Compute VMSS Write | N/A | +| Cross tenant shared image gallery | No | Yes | Yes | | Accelerated networking | Yes | Yes | Yes | | Spot instances and pricing  | Yes, you can have both Spot and Regular priority instances | Yes, instances must either be all Spot or all Regular | No, Regular priority instances only | | Mix operating systems | Yes, Linux and Windows can reside in the same Flexible scale set | No, instances are the same operating system | Yes, Linux and Windows can reside in the same availability set | @@ -108,12 +109,13 @@ The following table compares the Flexible orchestration mode, Uniform orchestrat | Write Accelerator  | No | Yes | Yes | | Proximity Placement Groups  | Yes, read [Proximity Placement Groups documentation](../virtual-machine-scale-sets/proximity-placement-groups.md) | Yes, read [Proximity Placement Groups documentation](../virtual-machine-scale-sets/proximity-placement-groups.md) | Yes | | Azure Dedicated Hosts  | No | Yes | Yes | -| Managed Identity | User Assigned Identity Only | System Assigned or User Assigned | N/A (can specify Managed Identity on individual instances) | +| Managed Identity | [User Assigned Identity](../active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vmss.md#user-assigned-managed-identity) only1 | System Assigned or User Assigned | N/A (can specify Managed Identity on individual instances) | | Add/remove existing VM to the group | No | No | No | | Service Fabric | No | Yes | No | | Azure Kubernetes Service (AKS) / AKE | No | Yes | No | | UserData | Yes | Yes | UserData can be specified for individual VMs | +1 For Uniform scale sets, the `GET VMSS` response will have a reference to the *identity*, *clientID*, and *principalID*. For Flexible scale sets, the response will only get a reference the *identity*. You can make a call to `Identity` to get the *clientID* and *PrincipalID*. ### Autoscaling and instance orchestration | Feature | Supported by Flexible orchestration for scale sets | Supported by Uniform orchestration for scale sets | Supported by Availability Sets | diff --git a/articles/virtual-machines/TOC.yml b/articles/virtual-machines/TOC.yml index 4d905ea175f31..b51da9f1ea703 100644 --- a/articles/virtual-machines/TOC.yml +++ b/articles/virtual-machines/TOC.yml @@ -240,6 +240,10 @@ href: sizes-storage.md - name: Lsv2-series href: lsv2-series.md + - name: Lsv3-series + href: lsv3-series.md + - name: Lasv3-series + href: lasv3-series.md - name: Optimize performance items: - name: Linux @@ -1009,6 +1013,9 @@ - name: Portal displayName: availability and scale, availability, scale, autoscale, availability zones, AV zones href: ./create-portal-availability-zone.md + - name: Migrate to Availability Zones + displayName: availability and scale, availability, scale, autoscale, availability zones, AV zones + href: ../availability-zones/migrate-vm.md - name: Proximity Placement Groups items: - name: Overview @@ -1290,6 +1297,8 @@ href: ephemeral-os-disks.md - name: Create a VM using ephemeral OS disks href: ephemeral-os-disks-deploy.md + - name: FAQ on ephemeral OS disks + href: ephemeral-os-disks-faq.md - name: Securely import/export a disk items: - name: Configure private links for disks - CLI @@ -1847,9 +1856,6 @@ - name: Overview displayName: VM insights href: ../azure-monitor/vm/vminsights-overview.md?context=%2fazure%2fvirtual-machines%2fcontext%2fcontext - - name: General Availability FAQ - displayName: VM insights - href: ../azure-monitor/vm/vminsights-ga-release-faq.yml?context=%2fazure%2fvirtual-machines%2fcontext%2fcontext - name: FAQ displayName: VM insights href: ../azure-monitor/faq.yml?context=%2fazure%2fvirtual-machines%2fcontext%2fcontext diff --git a/articles/virtual-machines/av1-series-retirement.md b/articles/virtual-machines/av1-series-retirement.md index fb0dc410e6897..ef00faa6f0aaf 100644 --- a/articles/virtual-machines/av1-series-retirement.md +++ b/articles/virtual-machines/av1-series-retirement.md @@ -3,7 +3,7 @@ title: Av1-series retirement description: Retirement information for the Av1 series VM sizes. author: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 07/26/2021 ms.author: mimckitt diff --git a/articles/virtual-machines/av2-series.md b/articles/virtual-machines/av2-series.md index 3ead75a32a21a..998ef9804781c 100644 --- a/articles/virtual-machines/av2-series.md +++ b/articles/virtual-machines/av2-series.md @@ -1,12 +1,12 @@ --- title: Av2-series description: Specifications for the Av2-series VMs. -author: migerdes +author: rishabv90 ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: risverma --- # Av2-series diff --git a/articles/virtual-machines/azure-compute-gallery.md b/articles/virtual-machines/azure-compute-gallery.md index 164bf2f7ad98e..6dded7735af42 100644 --- a/articles/virtual-machines/azure-compute-gallery.md +++ b/articles/virtual-machines/azure-compute-gallery.md @@ -180,6 +180,21 @@ As the Azure Compute Gallery, definition, and version are all resources, they ca We recommend sharing at the Gallery level for the best experience. We do not recommend sharing individual image versions. For more information about Azure RBAC, see [Assign Azure roles](../role-based-access-control/role-assignments-portal.md). +## Activity Log +The [Activity log](../azure-monitor/essentials/activity-log.md) displays recent activity on the gallery, image, or version including any configuration changes and when it was created and deleted. View the activity log in the Azure portal, or create a [diagnostic setting to send it to a Log Analytics workspace](../azure-monitor/essentials/activity-log.md#send-to-log-analytics-workspace), where you can view events over time or analyze them with other collected data + +The following table lists a few example operations that relate to gallery operations in the activity log. For a complete list of possible log entries, see [Microsoft.Compute Resource Provider options](../role-based-access-control/resource-provider-operations.md#compute) + +| Operation | Description | +|:---|:---| +| Microsoft.Compute/galleries/write | Creates a new Gallery or updates an existing one | +| Microsoft.Compute/galleries/delete | Deletes the Gallery | +| Microsoft.Compute/galleries/share/action | Shares a Gallery to different scopes | +| Microsoft.Compute/galleries/images/read | Gets the properties of Gallery Image | +| Microsoft.Compute/galleries/images/write | Creates a new Gallery Image or updates an existing one | +| Microsoft.Compute/galleries/images/versions/read | Gets the properties of Gallery Image Version | +| | | + ## Billing There is no extra charge for using the Azure Compute Gallery service. You will be charged for the following resources: diff --git a/articles/virtual-machines/co-location.md b/articles/virtual-machines/co-location.md index 1cad6c526ea32..a2b23d495dff1 100644 --- a/articles/virtual-machines/co-location.md +++ b/articles/virtual-machines/co-location.md @@ -43,7 +43,8 @@ Proximity placement groups offer colocation in the same data center. However, be - When you ask for the first virtual machine in the proximity placement group, the data center is automatically selected. In some cases, a second request for a different virtual machine SKU, may fail if it doesn't exist in that data center. In this case, an **OverconstrainedAllocationRequest** error is returned. To avoid this, try changing the order in which you deploy your SKUs or have both resources deployed using a single ARM template. - In the case of elastic workloads, where you add and remove VM instances, having a proximity placement group constraint on your deployment may result in a failure to satisfy the request resulting in **AllocationFailure** error. -- Stopping (deallocate) and starting your VMs as needed is another way to achieve elasticity. Since the capacity is not kept once you stop (deallocate) a VM, starting it again may result in an **AllocationFailure** error. +- Stopping (deallocate) and starting your VMs as needed is another way to achieve elasticity. Since the capacity is not kept once you stop (deallocate) a VM, starting it again may result in an **AllocationFailure** error. +- VM start and redeploy operations will continue to respect the Proximity Placement Group once sucessfully configured. ## Planned maintenance and Proximity Placement Groups diff --git a/articles/virtual-machines/dasv5-dadsv5-series.md b/articles/virtual-machines/dasv5-dadsv5-series.md index 9cfe685781b1e..751765d2e6e8d 100644 --- a/articles/virtual-machines/dasv5-dadsv5-series.md +++ b/articles/virtual-machines/dasv5-dadsv5-series.md @@ -5,7 +5,7 @@ author: mamccrea ms.author: mamccrea ms.reviewer: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 10/8/2021 diff --git a/articles/virtual-machines/dav4-dasv4-series.md b/articles/virtual-machines/dav4-dasv4-series.md index f3b06afdeee30..d983cf0a52dc5 100644 --- a/articles/virtual-machines/dav4-dasv4-series.md +++ b/articles/virtual-machines/dav4-dasv4-series.md @@ -4,7 +4,7 @@ description: Specifications for the Dav4 and Dasv4-series VMs. author: mamccrea ms.author: mamccrea ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 ms.reviewer: jushiman @@ -56,7 +56,7 @@ Dasv4-series sizes are based on the 2.35Ghz AMD EPYCTM 7452 processor [VM Generation Support](generation-2.md): Generation 1 and 2
                    [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported
                    [Ephemeral OS Disks](ephemeral-os-disks.md): Supported
                    -[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not Supported
                    +[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not supported

                    | Size | vCPU | Memory: GiB | Temp storage (SSD) GiB | Max data disks | Max cached and temp storage throughput: IOPS / MBps (cache size in GiB) | Max burst cached and temp storage throughput: IOPS / MBps1 | Max uncached disk throughput: IOPS / MBps | Max burst uncached disk throughput: IOPS/MBps1 | Max NICs | Expected network bandwidth (Mbps) | diff --git a/articles/virtual-machines/dcasv5-dcadsv5-series.md b/articles/virtual-machines/dcasv5-dcadsv5-series.md index 47ab49d84ae30..b7b835ddf8dd6 100644 --- a/articles/virtual-machines/dcasv5-dcadsv5-series.md +++ b/articles/virtual-machines/dcasv5-dcadsv5-series.md @@ -5,7 +5,7 @@ author: runcai ms.author: runcai ms.reviewer: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 11/15/2021 diff --git a/articles/virtual-machines/dcv2-series.md b/articles/virtual-machines/dcv2-series.md index 56e47f946082b..0426d600a8b22 100644 --- a/articles/virtual-machines/dcv2-series.md +++ b/articles/virtual-machines/dcv2-series.md @@ -1,12 +1,12 @@ --- title: DCsv2-series - Azure Virtual Machines description: Specifications for the DCsv2-series VMs. -author: mmcrey +author: linuxelf001 ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 02/20/2020 -ms.author: jushiman +ms.author: raginjup ms.custom: ignite-fall-2021 --- diff --git a/articles/virtual-machines/dcv3-series.md b/articles/virtual-machines/dcv3-series.md index 15efe5d7ac6c6..d04862c3941ea 100644 --- a/articles/virtual-machines/dcv3-series.md +++ b/articles/virtual-machines/dcv3-series.md @@ -1,12 +1,12 @@ --- -title: DCsv3 and DCdsv3-series - Azure Virtual Machines -description: Specifications for the DCsv3 and DCdsv3-series VMs. -author: mmcrey +title: DCsv3 and DCdsv3-series +description: Specifications for the DCsv3 and DCdsv3-series Azure Virtual Machines. +author: linuxelf001 ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual -ms.date: 11/01/2021 -ms.author: mmcrey +ms.date: 05/24/2022 +ms.author: raginjup ms.custom: ignite-fall-2021 --- @@ -14,40 +14,34 @@ ms.custom: ignite-fall-2021 **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets -> [!IMPORTANT] -> DCsv3 and DCdsv3 are in public preview as of November 1st, 2021. +The DCsv3 and DCdsv3-series Azure Virtual Machines help protect the confidentiality and integrity of your code and data while they're being processed in the public cloud. By using Intel® Software Guard Extensions and Intel® [Total Memory Encryption - Multi Key](https://itpeernetwork.intel.com/memory-encryption/), customers can ensure their data is always encrypted and protected in use. -The DCsv3 and DCdsv3-series virtual machines help protect the confidentiality and integrity of your code and data whilst it’s processed in the public cloud. By leveraging Intel® Software Guard Extensions and Intel® Total Memory Encryption - Multi Key, customers can ensure their data is always encrypted and protected in use. +These machines are powered by the latest 3rd Generation Intel® Xeon Scalable processors, and use Intel® Turbo Boost Max Technology 3.0 to reach 3.5 GHz. -These machines are powered by the latest 3rd Generation Intel® Xeon Scalable processors, and leverage Intel® Turbo Boost Max Technology 3.0 to reach 3.5 GHz. - -With this generation, CPU Cores have increased 6x (up to a maximum of 48 physical cores), Encrypted Memory (EPC) has increased 1500x to 256GB, Regular Memory has increased 12x to 384GB. All these changes substantially improve the performance gen-on-gen and unlock new entirely new scenarios. +With this generation, CPU Cores have increased 6x (up to a maximum of 48 physical cores). Encrypted Memory (EPC) has increased 1500x to 256 GB. Regular Memory has increased 12x to 384 GB. All these changes substantially improve the performance and unlock new entirely new scenarios. > [!NOTE] > Hyperthreading is disabled for added security posture. Pricing is the same as Dv5 and Dsv5-series per physical core. -We are offering two variants dependent on whether the workload benefits from a local disk or not. Whether you choose a VM with a local disk or not, you can attach remote persistent disk storage to all VMs. Remote disk options (such as for the VM boot disk) are billed separately from the VMs in any case, as always. - -## Configuration - -CPU: 3rd Generation Intel® Xeon Scalable Processor 8370C
                    -Base All-Core Frequency: 2.8 GHz
                    -[Turbo Boost Max 3.0](https://www.intel.com/content/www/us/en/gaming/resources/turbo-boost.html): Enabled, Max Frequency 3.5 GHz
                    -[Hyper-Threading](https://www.intel.com/content/www/us/en/gaming/resources/hyper-threading.html): Not Supported
                    -[Total Memory Encryption - Multi Key](https://itpeernetwork.intel.com/memory-encryption/): Enabled
                    -[Premium Storage](premium-storage-performance.md): Supported
                    -[Ultra-Disk Storage](disks-enable-ultra-ssd.md): Supported
                    -[Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported
                    -[Azure Kubernetes Service](../aks/intro-kubernetes.md): Supported (CLI provisioning only initially)
                    -[Live Migration](maintenance-and-updates.md): Not Supported
                    -[Memory Preserving Updates](maintenance-and-updates.md): Not Supported
                    -[VM Generation Support](generation-2.md): Generation 2
                    -[Trusted Launch](trusted-launch.md): Coming Soon
                    -[Ephemeral OS Disks](ephemeral-os-disks.md): Supported for DCdsv3-series
                    -[Dedicated Host](dedicated-hosts.md): Coming Soon
                    -[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not Supported
                    - -## DCsv3-series Technical specifications +There are two variants for each series, depending on whether the workload benefits from a local disk or not. You can attach remote persistent disk storage to all VMs, whether or not the VM has a local disk. As always, remote disk options (such as for the VM boot disk) are billed separately from the VMs in any case. + +Dcsv3-series instances run on a 3rd Generation Intel® Xeon Scalable Processor 8370C. The base All-Core frequency is 2.8 GHz. [Turbo Boost Max 3.0](https://www.intel.com/content/www/us/en/gaming/resources/turbo-boost.html) is enabled with a max frequency of 3.5 GHz. + +- [Premium Storage](premium-storage-performance.md): Supported +- [Live Migration](maintenance-and-updates.md): Not supported +- [Memory Preserving Updates](maintenance-and-updates.md): Not supported +- [VM Generation Support](generation-2.md): Generation 2 +- [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported +- [Ephemeral OS Disks](ephemeral-os-disks.md): Supported +- [Ultra-Disk Storage](disks-enable-ultra-ssd.md): Supported +- [Azure Kubernetes Service](../aks/intro-kubernetes.md): Supported (CLI provisioning only) +- [Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not Supported +- [Hyper-Threading](https://www.intel.com/content/www/us/en/gaming/resources/hyper-threading.html): Not supported +- [Trusted Launch](trusted-launch.md): Not supported +- [Dedicated Host](dedicated-hosts.md): Not supported + + +## DCsv3-series | Size | Physical Cores | Memory GB | Temp storage (SSD) GiB | Max data disks | Max NICs | EPC Memory GB | |------------------|----------------|-------------|------------------------|----------------|---------|---------------------| @@ -60,7 +54,7 @@ Base All-Core Frequency: 2.8 GHz
                    | Standard_DC32s_v3 | 32 | 256 | Remote Storage Only | 32 | 8 | 192 | | Standard_DC48s_v3 | 48 | 384 | Remote Storage Only | 32 | 8 | 256 | -## DCdsv3-series Technical specifications +## DCdsv3-series | Size | Physical Cores | Memory GB | Temp storage (SSD) GiB | Max data disks | Max NICs | EPC Memory GB | |------------------|----------------|-------------|------------------------|----------------|---------|---------------------| @@ -73,11 +67,7 @@ Base All-Core Frequency: 2.8 GHz
                    | Standard_DC32ds_v3 | 32 | 256 | 2400 | 32 | 8 | 192 | | Standard_DC48ds_v3 | 48 | 384 | 2400 | 32 | 8 | 256 | -## Get started - -- Create DCsv3 and DCdsv3 VMs using the [Azure portal](./linux/quick-create-portal.md) -- DCsv3 and DCdsv3 VMs are [Generation 2 VMs](./generation-2.md#creating-a-generation-2-vm) and only support `Gen2` images. -- Currently available in the regions listed in [Azure Products by Region](https://azure.microsoft.com/global-infrastructure/services/?products=virtual-machines®ions=all). +[!INCLUDE [virtual-machines-common-sizes-table-defs](../../includes/virtual-machines-common-sizes-table-defs.md)] ## More sizes and information @@ -89,6 +79,12 @@ Base All-Core Frequency: 2.8 GHz
                    - [Previous generations](sizes-previous-gen.md) - [Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) -Pricing Calculator : [Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) +## Next steps + +- Create DCsv3 and DCdsv3 VMs using the [Azure portal](./linux/quick-create-portal.md) +- DCsv3 and DCdsv3 VMs are [Generation 2 VMs](./generation-2.md#creating-a-generation-2-vm) and only support `Gen2` images. +- Currently available in the regions listed in [Azure Products by Region](https://azure.microsoft.com/global-infrastructure/services/?products=virtual-machines®ions=all). + +Pricing Calculator: [Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) Learn more about how [Azure compute units (ACU)](acu.md) can help you compare compute performance across Azure SKUs. diff --git a/articles/virtual-machines/ddv4-ddsv4-series.md b/articles/virtual-machines/ddv4-ddsv4-series.md index fed323b7c3477..4352ace0eaa98 100644 --- a/articles/virtual-machines/ddv4-ddsv4-series.md +++ b/articles/virtual-machines/ddv4-ddsv4-series.md @@ -6,7 +6,7 @@ ms.author: shuji ms.reviewer: mimckitt ms.custom: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 06/01/2020 --- diff --git a/articles/virtual-machines/ddv5-ddsv5-series.md b/articles/virtual-machines/ddv5-ddsv5-series.md index 9c8c674262258..129d0b2dccd5e 100644 --- a/articles/virtual-machines/ddv5-ddsv5-series.md +++ b/articles/virtual-machines/ddv5-ddsv5-series.md @@ -5,7 +5,7 @@ author: andysports8 ms.author: shuji ms.custom: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 10/20/2021 --- diff --git a/articles/virtual-machines/dedicated-host-compute-optimized-skus.md b/articles/virtual-machines/dedicated-host-compute-optimized-skus.md index f3fae4b53d5cb..ec8759d934828 100644 --- a/articles/virtual-machines/dedicated-host-compute-optimized-skus.md +++ b/articles/virtual-machines/dedicated-host-compute-optimized-skus.md @@ -39,13 +39,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Fsv2-Type3 -The Fsv2-Type3 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 84 vCPUs, and 504 GiB of RAM. The Fsv2-Type3 runs [Fsv2-series](fsv2-series.md) VMs. +The Fsv2-Type3 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 80 vCPUs, and 504 GiB of RAM. The Fsv2-Type3 runs [Fsv2-series](fsv2-series.md) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Fsv2-Type3 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 52 | 84 | 504 GiB | F2s v2 | 32 | +| 52 | 80 | 504 GiB | F2s v2 | 32 | | | | | F4s v2 | 21 | | | | | F8s v2 | 10 | | | | | F16s v2 | 5 | @@ -56,13 +56,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Fsv2-Type4 -The Fsv2-Type4 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Fsv2-Type4 runs [Fsv2-series](fsv2-series.md) VMs. +The Fsv2-Type4 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Fsv2-Type4 runs [Fsv2-series](fsv2-series.md) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Fsv2-Type4 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 96 | 768 GiB | F2s v2 | 32 | +| 64 | 119 | 768 GiB | F2s v2 | 32 | | | | | F4s v2 | 24 | | | | | F8s v2 | 12 | | | | | F16s v2 | 6 | diff --git a/articles/virtual-machines/dedicated-host-general-purpose-skus.md b/articles/virtual-machines/dedicated-host-general-purpose-skus.md index 369162a19087a..64b77a728fe18 100644 --- a/articles/virtual-machines/dedicated-host-general-purpose-skus.md +++ b/articles/virtual-machines/dedicated-host-general-purpose-skus.md @@ -59,13 +59,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Ddsv5 ### Ddsv5-Type1 -The Ddsv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Ddsv5-Type1 runs [Ddsv5-series](ddv5-ddsv5-series.md#ddsv5-series) VMs. Please refer to the VM size documentation to better understand specific VM performance information. +The Ddsv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Ddsv5-Type1 runs [Ddsv5-series](ddv5-ddsv5-series.md#ddsv5-series) VMs. Please refer to the VM size documentation to better understand specific VM performance information. The following packing configuration outlines the max packing of uniform VMs you can put onto a Ddsv5-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 96 | 768 GiB | D2ds v5 | 32 | +| 64 | 119 | 768 GiB | D2ds v5 | 32 | | | | | D4ds v5 | 22 | | | | | D8ds v5 | 11 | | | | | D16ds v5 | 5 | @@ -77,13 +77,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Dsv5 ### Dsv5-Type1 -The Dsv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 100 vCPUs, and 768 GiB of RAM. The Dsv5-Type1 runs [Dsv5-series](dv5-dsv5-series.md#dsv5-series) VMs. Please refer to the VM size documentation to better understand specific VM performance information. +The Dsv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Dsv5-Type1 runs [Dsv5-series](dv5-dsv5-series.md#dsv5-series) VMs. Please refer to the VM size documentation to better understand specific VM performance information. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dsv5-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 100 | 768 GiB | D2s v5 | 32 | +| 64 | 119 | 768 GiB | D2s v5 | 32 | | | | | D4s v5 | 25 | | | | | D8s v5 | 12 | | | | | D16s v5 | 6 | @@ -115,13 +115,13 @@ You can also mix multiple VM sizes on the Dasv4-Type1. The following are sample - 20 D4asv4 + 8 D2asv4 ### Dasv4-Type2 -The Dasv4-Type2 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 110 vCPUs, and 768 GiB of RAM. The Dasv4-Type2 runs [Dasv4-series](dav4-dasv4-series.md#dasv4-series) VMs. Please refer to the VM size documentation to better understand specific VM performance information. +The Dasv4-Type2 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 112 vCPUs, and 768 GiB of RAM. The Dasv4-Type2 runs [Dasv4-series](dav4-dasv4-series.md#dasv4-series) VMs. Please refer to the VM size documentation to better understand specific VM performance information. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dasv4-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 110 | 768 GiB | D2as v4 | 32 | +| 64 | 112 | 768 GiB | D2as v4 | 32 | | | | | D4as v4 | 25 | | | | | D8as v4 | 12 | | | | | D16as v4 | 6 | @@ -132,13 +132,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Ddsv4 ### Ddsv4-Type1 -The Ddsv4-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 68 vCPUs, and 504 GiB of RAM. The Ddsv4-Type1 runs [Ddsv4-series](ddv4-ddsv4-series.md#ddsv4-series) VMs. +The Ddsv4-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 80 vCPUs, and 504 GiB of RAM. The Ddsv4-Type1 runs [Ddsv4-series](ddv4-ddsv4-series.md#ddsv4-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Ddsv4-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 52 | 68 | 504 GiB | D2ds v4 | 32 | +| 52 | 80 | 504 GiB | D2ds v4 | 32 | | | | | D4ds v4 | 17 | | | | | D8ds v4 | 8 | | | | | D16ds v4 | 4 | @@ -152,13 +152,13 @@ You can also mix multiple VM sizes on the Ddsv4-Type1. The following are sample - 10 D4dsv4 + 14 D2dsv4 ### Ddsv4-Type2 -The Ddsv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 76 vCPUs, and 768 GiB of RAM. The Ddsv4-Type2 runs [Ddsv4-series](ddv4-ddsv4-series.md#ddsv4-series) VMs. +The Ddsv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Ddsv4-Type2 runs [Ddsv4-series](ddv4-ddsv4-series.md#ddsv4-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Ddsv4-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 76 | 768 GiB | D2ds v4 | 32 | +| 64 | 119 | 768 GiB | D2ds v4 | 32 | | | | | D4ds v4 | 19 | | | | | D8ds v4 | 9 | | | | | D16ds v4 | 4 | @@ -190,13 +190,13 @@ You can also mix multiple VM sizes on the Dsv4-Type1. The following are sample c ### Dsv4-Type2 -The Dsv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Dsv4-Type2 runs [Dsv4-series](dv4-dsv4-series.md#dsv4-series) VMs. +The Dsv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Dsv4-Type2 runs [Dsv4-series](dv4-dsv4-series.md#dsv4-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dsv4-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 96 | 768 GiB | D2s v4 | 32 | +| 64 | 119 | 768 GiB | D2s v4 | 32 | | | | | D4s v4 | 25 | | | | | D8s v4 | 12 | | | | | D16s v4 | 6 | @@ -207,13 +207,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Dsv3 ### Dsv3-Type1 -The Dsv3-Type1 is a Dedicated Host SKU utilizing the Intel® Broadwell (2.3 GHz Xeon® E5-2673 v4) processor. It offers 40 physical cores, 68 vCPUs, and 256 GiB of RAM. The Dsv3-Type1 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. +The Dsv3-Type1 is a Dedicated Host SKU utilizing the Intel® Broadwell (2.3 GHz Xeon® E5-2673 v4) processor. It offers 40 physical cores, 64 vCPUs, and 256 GiB of RAM. The Dsv3-Type1 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dsv3-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 40 | 68 | 256 GiB | D2s v3 | 32 | +| 40 | 64 | 256 GiB | D2s v3 | 32 | | | | | D4s v3 | 17 | | | | | D8s v3 | 8 | | | | | D16s v3 | 4 | @@ -228,13 +228,13 @@ You can also mix multiple VM sizes on the Dsv3-Type1. The following are sample c ### Dsv3-Type2 -The Dsv3-Type2 is a Dedicated Host SKU utilizing the Intel® Skylake (2.1 GHz Xeon® Platinum 8171M) processor. It offers 48 physical cores, 80 vCPUs, and 504 GiB of RAM. The Dsv3-Type2 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. +The Dsv3-Type2 is a Dedicated Host SKU utilizing the Intel® Skylake (2.1 GHz Xeon® Platinum 8171M) processor. It offers 48 physical cores, 76 vCPUs, and 504 GiB of RAM. The Dsv3-Type2 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dsv3-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 48 | 80 | 504 GiB | D2s v3 | 32 | +| 48 | 76 | 504 GiB | D2s v3 | 32 | | | | | D4s v3 | 20 | | | | | D8s v3 | 10 | | | | | D16s v3 | 5 | @@ -249,13 +249,13 @@ You can also mix multiple VM sizes on the Dsv3-Type2. The following are sample c ### Dsv3-Type3 -The Dsv3-Type3 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 84 vCPUs, and 504 GiB of RAM. The Dsv3-Type3 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. +The Dsv3-Type3 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 80 vCPUs, and 504 GiB of RAM. The Dsv3-Type3 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dsv3-Type3 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 52 | 84 | 504 GiB | D2s v3 | 32 | +| 52 | 80 | 504 GiB | D2s v3 | 32 | | | | | D4s v3 | 21 | | | | | D8s v3 | 10 | | | | | D16s v3 | 5 | @@ -270,13 +270,13 @@ You can also mix multiple VM sizes on the Dsv3-Type3. The following are sample c ### Dsv3-Type4 -The Dsv3-Type4 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Dsv3-Type4 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. +The Dsv3-Type4 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Dsv3-Type4 runs [Dsv3-series](dv3-dsv3-series.md#dsv3-series) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a Dsv3-Type4 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 96 | 768 GiB | D2s v3 | 32 | +| 64 | 119 | 768 GiB | D2s v3 | 32 | | | | | D4s v3 | 24 | | | | | D8s v3 | 12 | | | | | D16s v3 | 6 | diff --git a/articles/virtual-machines/dedicated-host-gpu-optimized-skus.md b/articles/virtual-machines/dedicated-host-gpu-optimized-skus.md index a84e1fedf1002..c345742a8eee1 100644 --- a/articles/virtual-machines/dedicated-host-gpu-optimized-skus.md +++ b/articles/virtual-machines/dedicated-host-gpu-optimized-skus.md @@ -37,7 +37,7 @@ The following packing configuration outlines the max packing of uniform VMs you ## NVsv3 ### NVsv3-Type1 -The NVsv3-Type1 is a Dedicated Host SKU utilizing the Intel® Broadwell (E5-2690 v4) processor with NVDIDIA Tesla M60 GPUs and NVIDIA GRID technology. It offers 28 physical cores, 48 vCPUs, and 448 GiB of RAM. The NVsv3-Type1 runs [NVv3-series](nvv3-series.md) VMs. +The NVsv3-Type1 is a Dedicated Host SKU utilizing the Intel® Broadwell (E5-2690 v4) processor with NVIDIA Tesla M60 GPUs and NVIDIA GRID technology. It offers 28 physical cores, 48 vCPUs, and 448 GiB of RAM. The NVsv3-Type1 runs [NVv3-series](nvv3-series.md) VMs. The following packing configuration outlines the max packing of uniform VMs you can put onto a NVsv3-Type1 host. diff --git a/articles/virtual-machines/dedicated-host-memory-optimized-skus.md b/articles/virtual-machines/dedicated-host-memory-optimized-skus.md index acaeeaec8f291..c9c515f5c06f9 100644 --- a/articles/virtual-machines/dedicated-host-memory-optimized-skus.md +++ b/articles/virtual-machines/dedicated-host-memory-optimized-skus.md @@ -22,13 +22,13 @@ The sizes and hardware types available for dedicated hosts vary by region. Refer ## Eadsv5 ### Eadsv5-Type1 -The Eadsv5-Type1 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Eadsv5-Type1 runs [Eadsv5-series](easv5-eadsv5-series.md#eadsv5-series) VMs. +The Eadsv5-Type1 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 112 vCPUs, and 768 GiB of RAM. The Eadsv5-Type1 runs [Eadsv5-series](easv5-eadsv5-series.md#eadsv5-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Eadsv5-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Eadsv5-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|-----------|-------| -| 64 | 96 | 768 GiB | E2ads v5 | 32 | +| 64 | 112 | 768 GiB | E2ads v5 | 32 | | | | | E4ads v5 | 21 | | | | | E8ads v5 | 10 | | | | | E16ads v5 | 5 | @@ -41,13 +41,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Easv5 ### Easv5-Type1 -The Easv5-Type1 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Easv5-Type1 runs [Easv5-series](easv5-eadsv5-series.md#easv5-series) VMs. +The Easv5-Type1 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 112 vCPUs, and 768 GiB of RAM. The Easv5-Type1 runs [Easv5-series](easv5-eadsv5-series.md#easv5-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Easv5-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Easv5-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 96 | 768 GiB | E2as v5 | 32 | +| 64 | 112 | 768 GiB | E2as v5 | 32 | | | | | E4as v5 | 21 | | | | | E8as v5 | 10 | | | | | E16as v5 | 5 | @@ -60,13 +60,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Edsv5 ### Edsv5-Type1 -The Edsv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Edsv5-Type1 runs [Edsv5-series](edv5-edsv5-series.md#edsv5-series) VMs. +The Edsv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Edsv5-Type1 runs [Edsv5-series](edv5-edsv5-series.md#edsv5-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Edsv5-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Edsv5-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 96 | 768 GiB | E2ds v5 | 32 | +| 64 | 119 | 768 GiB | E2ds v5 | 32 | | | | | E4ds v5 | 21 | | | | | E8ds v5 | 10 | | | | | E16ds v5 | 5 | @@ -78,13 +78,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Esv5 ### Esv5-Type1 -The Esv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 84 vCPUs, and 768 GiB of RAM. The Esv5-Type1 runs [Esv5-series](ev5-esv5-series.md#esv5-series) VMs. +The Esv5-Type1 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Esv5-Type1 runs [Esv5-series](ev5-esv5-series.md#esv5-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv5-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv5-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 84 | 768 GiB | E2s v5 | 32 | +| 64 | 119 | 768 GiB | E2s v5 | 32 | | | | | E4s v5 | 21 | | | | | E8s v5 | 10 | | | | | E16s v5 | 5 | @@ -98,7 +98,7 @@ The following packing configuration outlines the max packing of uniform VMs you The Easv4-Type1 is a Dedicated Host SKU utilizing AMD's 2.35 GHz EPYC™ 7452 processor. It offers 64 physical cores, 96 vCPUs, and 672 GiB of RAM. The Easv4-Type1 runs [Easv4-series](eav4-easv4-series.md#easv4-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Easv4-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Easv4-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| @@ -114,13 +114,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Easv4-Type2 -The Easv4-Type2 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 96 vCPUs, and 768 GiB of RAM. The Easv4-Type2 runs [Easv4-series](eav4-easv4-series.md#easv4-series) VMs. +The Easv4-Type2 is a Dedicated Host SKU utilizing AMD's EPYC™ 7763v processor. It offers 64 physical cores, 112 vCPUs, and 768 GiB of RAM. The Easv4-Type2 runs [Easv4-series](eav4-easv4-series.md#easv4-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Easv4-Type2 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Easv4-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 96 | 768 GiB | E2as v4 | 32 | +| 64 | 112 | 768 GiB | E2as v4 | 32 | | | | | E4as v4 | 21 | | | | | E8as v4 | 10 | | | | | E16as v4 | 5 | @@ -134,13 +134,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Edsv4 ### Edsv4-Type1 -The Edsv4-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 64 vCPUs, and 504 GiB of RAM. The Edsv4-Type1 runs [Edsv4-series](edv4-edsv4-series.md#edsv4-series) VMs. +The Edsv4-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 80 vCPUs, and 504 GiB of RAM. The Edsv4-Type1 runs [Edsv4-series](edv4-edsv4-series.md#edsv4-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Edsv4-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Edsv4-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 52 | 64 | 504 GiB | E2ds v4 | 31 | +| 52 | 80 | 504 GiB | E2ds v4 | 31 | | | | | E4ds v4 | 15 | | | | | E8ds v4 | 7 | | | | | E16ds v4 | 3 | @@ -151,13 +151,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Edsv4-Type2 -The Edsv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 76 vCPUs, and 768 GiB of RAM. The Edsv4-Type2 runs [Edsv4-series](edv4-edsv4-series.md#edsv4-series) VMs. +The Edsv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Edsv4-Type2 runs [Edsv4-series](edv4-edsv4-series.md#edsv4-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Edsv4-Type2 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Edsv4-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|----------|-------| -| 64 | 76 | 768 GiB | E2ds v4 | 32 | +| 64 | 119 | 768 GiB | E2ds v4 | 32 | | | | | E4ds v4 | 19 | | | | | E8ds v4 | 9 | | | | | E16ds v4 | 4 | @@ -169,13 +169,13 @@ The following packing configuration outlines the max packing of uniform VMs you ## Esv4 ### Esv4-Type1 -The Esv4-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 64 vCPUs, and 504 GiB of RAM. The Esv4-Type1 runs [Esv4-series](ev4-esv4-series.md#esv4-series) VMs. +The Esv4-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 80 vCPUs, and 504 GiB of RAM. The Esv4-Type1 runs [Esv4-series](ev4-esv4-series.md#esv4-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv4-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv4-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 52 | 64 | 504 GiB | E2s v4 | 31 | +| 52 | 80 | 504 GiB | E2s v4 | 31 | | | | | E4s v4 | 15 | | | | | E8s v4 | 7 | | | | | E16s v4 | 3 | @@ -186,13 +186,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Esv4-Type2 -The Esv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 84 vCPUs, and 768 GiB of RAM. The Esv4-Type2 runs [Esv4-series](ev4-esv4-series.md#esv4-series) VMs. +The Esv4-Type2 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Esv4-Type2 runs [Esv4-series](ev4-esv4-series.md#esv4-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv4-Type2 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv4-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 84 | 768 GiB | E2s v4 | 32 | +| 64 | 119 | 768 GiB | E2s v4 | 32 | | | | | E4s v4 | 21 | | | | | E8s v4 | 10 | | | | | E16s v4 | 5 | @@ -206,7 +206,7 @@ The following packing configuration outlines the max packing of uniform VMs you The Esv3-Type1 is a Dedicated Host SKU utilizing the Intel® Broadwell (2.3 GHz Xeon® E5-2673 v4) processor. It offers 40 physical cores, 64 vCPUs, and 448 GiB of RAM. The Esv3-Type1 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv3-Type1 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv3-Type1 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| @@ -221,13 +221,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Esv3-Type2 -The Esv3-Type2 is a Dedicated Host SKU utilizing the Intel® Skylake (Xeon® 8171M) processor. It offers 48 physical cores, 64 vCPUs, and 504 GiB of RAM. The Esv3-Type2 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. +The Esv3-Type2 is a Dedicated Host SKU utilizing the Intel® Skylake (Xeon® 8171M) processor. It offers 48 physical cores, 76 vCPUs, and 504 GiB of RAM. The Esv3-Type2 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv3-Type2 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv3-Type2 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 48 | 64 | 504 GiB | E2s v3 | 31 | +| 48 | 76 | 504 GiB | E2s v3 | 31 | | | | | E4s v3 | 15 | | | | | E8s v3 | 7 | | | | | E16s v3 | 3 | @@ -238,13 +238,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Esv3-Type3 -The Esv3-Type3 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 64 vCPUs, and 504 GiB of RAM. The Esv3-Type3 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. +The Esv3-Type3 is a Dedicated Host SKU utilizing the Intel® Cascade Lake (Xeon® Platinum 8272CL) processor. It offers 52 physical cores, 80 vCPUs, and 504 GiB of RAM. The Esv3-Type3 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv3-Type3 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv3-Type3 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 52 | 64 | 504 GiB | E2s v3 | 31 | +| 52 | 80 | 504 GiB | E2s v3 | 31 | | | | | E4s v3 | 15 | | | | | E8s v3 | 7 | | | | | E16s v3 | 3 | @@ -255,13 +255,13 @@ The following packing configuration outlines the max packing of uniform VMs you ### Esv3-Type4 -The Esv3-Type4 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 84 vCPUs, and 768 GiB of RAM. The Esv3-Type4 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. +The Esv3-Type4 is a Dedicated Host SKU utilizing the Intel® Ice Lake (Xeon® Platinum 8370C) processor. It offers 64 physical cores, 119 vCPUs, and 768 GiB of RAM. The Esv3-Type4 runs [Esv3-series](ev3-esv3-series.md#ev3-series) VMs. -The following packing configuration outlines the max packing of uniform VMs you can put onto a Esv3-Type4 host. +The following packing configuration outlines the max packing of uniform VMs you can put onto an Esv3-Type4 host. | Physical cores | Available vCPUs | Available RAM | VM Size | # VMs | |----------------|-----------------|---------------|---------|-------| -| 64 | 84 | 768 GiB | E2s v3 | 32 | +| 64 | 119 | 768 GiB | E2s v3 | 32 | | | | | E4s v3 | 21 | | | | | E8s v3 | 10 | | | | | E16s v3 | 5 | @@ -397,4 +397,4 @@ The Mdsv2MedMem-Type1 is a Dedicated Host SKU utilizing the Intel® Cascade Lake - For more information, see the [Dedicated hosts](dedicated-hosts.md) overview. -- There is sample template, available at [Azure quickstart templates](https://github.com/Azure/azure-quickstart-templates/blob/master/quickstarts/microsoft.compute/vm-dedicated-hosts/README.md), that uses both zones and fault domains for maximum resiliency in a region. +- There's sample template, available at [Azure Quickstart Templates](https://github.com/Azure/azure-quickstart-templates/blob/master/quickstarts/microsoft.compute/vm-dedicated-hosts/README.md), which uses both zones and fault domains for maximum resiliency in a region. diff --git a/articles/virtual-machines/dedicated-host-migration-guide.md b/articles/virtual-machines/dedicated-host-migration-guide.md index 910d90a44e27d..3e6ff7716125d 100644 --- a/articles/virtual-machines/dedicated-host-migration-guide.md +++ b/articles/virtual-machines/dedicated-host-migration-guide.md @@ -142,7 +142,7 @@ On Azure portal, go through the following steps: #### Delete the old Dedicated Host -Once all VMs have been migrated from your old Dedicated Host to the target Dedicated Host, [delete the old Dedicated Host](dedicated-hosts-how-to.md#deleting-hosts). +Once all VMs have been migrated from your old Dedicated Host to the target Dedicated Host, [delete the old Dedicated Host](dedicated-hosts-how-to.md#deleting-a-host). ## Help and support diff --git a/articles/virtual-machines/dedicated-hosts-how-to.md b/articles/virtual-machines/dedicated-hosts-how-to.md index 834ac30fd7e58..79a5cb2ed3281 100644 --- a/articles/virtual-machines/dedicated-hosts-how-to.md +++ b/articles/virtual-machines/dedicated-hosts-how-to.md @@ -38,7 +38,7 @@ You can also decide to use both availability zones and fault domains. ### [Portal](#tab/portal) -In this example, we will create a host group using one availability zone and two fault domains. +In this example, we'll create a host group using one availability zone and two fault domains. 1. Open the Azure [portal](https://portal.azure.com). 1. Select **Create a resource** in the upper left corner. @@ -65,7 +65,7 @@ Not all host SKUs are available in all regions, and availability zones. You can az vm list-skus -l eastus2 -r hostGroups/hosts -o table ``` -In this example, we will use [az vm host group create](/cli/azure/vm/host/group#az-vm-host-group-create) to create a host group using both availability zones and fault domains. +In this example, we'll use [az vm host group create](/cli/azure/vm/host/group#az-vm-host-group-create) to create a host group using both availability zones and fault domains. ```azurecli-interactive az vm host group create \ @@ -145,7 +145,7 @@ If you set a fault domain count for your host group, you'll need to specify the ### [CLI](#tab/cli) -Use [az vm host create](/cli/azure/vm/host#az-vm-host-create) to create a host. If you set a fault domain count for your host group, you will be asked to specify the fault domain for your host. +Use [az vm host create](/cli/azure/vm/host#az-vm-host-create) to create a host. If you set a fault domain count for your host group, you'll be asked to specify the fault domain for your host. ```azurecli-interactive az vm host create \ @@ -305,7 +305,7 @@ You can add an existing VM to a dedicated host, but the VM must first be Stop\De - The VM can't be in an availability set. - If the VM is in an availability zone, it must be the same availability zone as the host group. The availability zone settings for the VM and the host group must match. -### [Portal](#tab/portal2) +### [Portal](#tab/portal) Move the VM to a dedicated host using the [portal](https://portal.azure.com). @@ -318,7 +318,29 @@ Move the VM to a dedicated host using the [portal](https://portal.azure.com). 1. At the top of the page, select **Start** to restart the VM. -### [PowerShell](#tab/powershell2) +## [CLI](#tab/cli) + +Move the existing VM to a dedicated host using the CLI. The VM must be Stop/Deallocated using [az vm deallocate](/cli/azure/vm#az_vm_stop) in order to assign it to a dedicated host. + +Replace the values with your own information. + +```azurecli-interactive +az vm deallocate -n myVM -g myResourceGroup +az vm update - n myVM -g myResourceGroup --host myHost +az vm start -n myVM -g myResourceGroup +``` + +For automatically placed VMs, only update the host group. For more information, see [Manual vs. automatic placement](dedicated-hosts.md#manual-vs-automatic-placement). + +Replace the values with your own information. + +```azurecli-interactive +az vm deallocate -n myVM -g myResourceGroup +az vm update -n myVM -g myResourceGroup --host-group myHostGroup +az vm start -n myVM -g myResourceGroup +``` + +### [PowerShell](#tab/powershell) Replace the values of the variables with your own information. @@ -554,10 +576,10 @@ Tags : {} --- -## Deleting hosts +## Deleting a host -being charged for your dedicated hosts even when no virtual machines are deployed. You should delete any hosts you're currently not using to save costs. +You're being charged for your dedicated host even when no virtual machines are deployed on the host. You should delete any hosts you're currently not using to save costs. You can only delete a host when there are no any longer virtual machines using it. @@ -628,5 +650,5 @@ Remove-AzResourceGroup -Name $rgName - For more information, see the [Dedicated hosts](dedicated-hosts.md) overview. -- There's sample template, available at [Azure quickstart templates](https://github.com/Azure/azure-quickstart-templates/blob/master/quickstarts/microsoft.compute/vm-dedicated-hosts/README.md), that uses both zones and fault domains for maximum resiliency in a region. +- There's sample template, available at [Azure Quickstart Templates](https://github.com/Azure/azure-quickstart-templates/blob/master/quickstarts/microsoft.compute/vm-dedicated-hosts/README.md), which uses both zones and fault domains for maximum resiliency in a region. diff --git a/articles/virtual-machines/delete.md b/articles/virtual-machines/delete.md index 03e11a326239b..1e05871679617 100644 --- a/articles/virtual-machines/delete.md +++ b/articles/virtual-machines/delete.md @@ -267,7 +267,7 @@ PATCH https://management.azure.com/subscriptions/subID/resourceGroups/resourcegr ## Force Delete for VMs -Force delete allows you to forcefully delete your virtual machine, reducing delete latency and immediately freeing up attached resources. Force delete should only be used when you are not intending to re-use virtual hard disks. You can use force delete through Portal, CLI, PowerShell, and Rest API. +Force delete allows you to forcefully delete your virtual machine, reducing delete latency and immediately freeing up attached resources. Force delete should only be used when you are not intending to re-use virtual hard disks. You can use force delete through Portal, CLI, PowerShell, and REST API. ### [Portal](#tab/portal3) @@ -309,7 +309,7 @@ You can use the Azure REST API to apply force delete to your virtual machines. U ## Force Delete for virtual machine scale sets -Force delete allows you to forcefully delete your **Uniform** virtual machine scale sets, reducing delete latency and immediately freeing up attached resources. Force delete should only be used when you are not intending to re-use virtual hard disks. You can use force delete through Portal, CLI, PowerShell, and Rest API. +Force delete allows you to forcefully delete your **Uniform** virtual machine scale sets, reducing delete latency and immediately freeing up attached resources. Force delete should only be used when you are not intending to re-use virtual hard disks. You can use force delete through Portal, CLI, PowerShell, and REST API. ### [Portal](#tab/portal4) diff --git a/articles/virtual-machines/disks-enable-ultra-ssd.md b/articles/virtual-machines/disks-enable-ultra-ssd.md index 54e713decbf91..70887eb359725 100644 --- a/articles/virtual-machines/disks-enable-ultra-ssd.md +++ b/articles/virtual-machines/disks-enable-ultra-ssd.md @@ -4,7 +4,7 @@ description: Learn about ultra disks for Azure VMs author: roygara ms.service: storage ms.topic: how-to -ms.date: 12/07/2021 +ms.date: 06/06/2022 ms.author: rogarana ms.subservice: disks ms.custom: references_regions, devx-track-azurecli, devx-track-azurepowershell diff --git a/articles/virtual-machines/disks-shared-enable.md b/articles/virtual-machines/disks-shared-enable.md index 1e32c8d33aa1a..a1fa7ffa3a348 100644 --- a/articles/virtual-machines/disks-shared-enable.md +++ b/articles/virtual-machines/disks-shared-enable.md @@ -4,7 +4,7 @@ description: Configure an Azure managed disk with shared disks so that you can s author: roygara ms.service: storage ms.topic: how-to -ms.date: 01/13/2022 +ms.date: 06/09/2022 ms.author: rogarana ms.subservice: disks ms.custom: devx-track-azurecli, devx-track-azurepowershell diff --git a/articles/virtual-machines/disks-shared.md b/articles/virtual-machines/disks-shared.md index e703b9e2176ce..0623f9e378ef9 100644 --- a/articles/virtual-machines/disks-shared.md +++ b/articles/virtual-machines/disks-shared.md @@ -4,7 +4,7 @@ description: Learn about sharing Azure managed disks across multiple Linux VMs. author: roygara ms.service: storage ms.topic: conceptual -ms.date: 01/13/2022 +ms.date: 06/09/2022 ms.author: rogarana ms.subservice: disks --- @@ -21,7 +21,7 @@ VMs in the cluster can read or write to their attached disk based on the reserva Shared managed disks offer shared block storage that can be accessed from multiple VMs, these are exposed as logical unit numbers (LUNs). LUNs are then presented to an initiator (VM) from a target (disk). These LUNs look like direct-attached-storage (DAS) or a local drive to the VM. -Shared managed disks do not natively offer a fully managed file system that can be accessed using SMB/NFS. You need to use a cluster manager, like Windows Server Failover Cluster (WSFC) or Pacemaker, that handles cluster node communication and write locking. +Shared managed disks don't natively offer a fully managed file system that can be accessed using SMB/NFS. You need to use a cluster manager, like Windows Server Failover Cluster (WSFC), or Pacemaker, that handles cluster node communication and write locking. ## Limitations @@ -31,6 +31,18 @@ Shared managed disks do not natively offer a fully managed file system that can Shared disks support several operating systems. See the [Windows](#windows) or [Linux](#linux) sections for the supported operating systems. +## Billing implications + +When you share a disk, your billing could be impacted in two different ways, depending on the type of disk. + +For shared premium SSDs, in addition to cost of the disk's tier, there's an extra charge that increases with each VM the SSD is mounted to. See [managed disks pricing](https://azure.microsoft.com/pricing/details/managed-disks/) for details. + +Ultra disks don't have an extra charge for each VM that they're mounted to. They're billed on the total IOPS and MBps that the disk is configured for. Normally, an ultra disk has two performance throttles that determine its total IOPS/MBps. However, when configured as a shared ultra disk, two more performance throttles are exposed, for a total of four. These two additional throttles allow for increased performance at an extra expense and each meter has a default value, which raises the performance and cost of the disk. + +The four performance throttles a shared ultra disk has are diskMBpsReadWrite, diskIOPSReadOnly, diskMBpsReadWrite, and diskMBpsReadOnly. Each performance throttle can be configured to change the performance of your disk. The performance for shared ultra disk is calculated in the following ways: total provisioned IOPS (diskIOPSReadWrite + diskIOPSReadOnly) and for total provisioned throughput MBps (diskMBpsReadWrite + diskMBpsReadOnly). + +Once you've determined your total provisioned IOPS and total provisioned throughput, you can use them in the [pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=managed-disks) to determine the cost of an ultra shared disk. + ## Disk sizes [!INCLUDE [virtual-machines-disks-shared-sizes](../../includes/virtual-machines-disks-shared-sizes.md)] @@ -55,8 +67,9 @@ Some popular applications running on WSFC include: Azure shared disks are supported on: - [SUSE SLE HA 15 SP1 and above](https://www.suse.com/c/azure-shared-disks-excercise-w-sles-for-sap-or-sle-ha/) - [Ubuntu 18.04 and above](https://discourse.ubuntu.com/t/ubuntu-high-availability-corosync-pacemaker-shared-disk-environments/14874) -- [RHEL 8.3 and above](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/deploying_red_hat_enterprise_linux_8_on_public_cloud_platforms/index?lb_target=production#azure-configuring-shared-block-storage-configuring-rhel-high-availability-on-azure) - - It may be possible to use RHEL 7 or an older version of RHEL 8 with shared disks, contact SharedDiskFeedback @microsoft.com +- Red Hat Enterprise Linux (RHEL) ([support policy](https://access.redhat.com/articles/3444601)) + - [RHEL 7.9](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/deploying_red_hat_enterprise_linux_7_on_public_cloud_platforms/configuring-rhel-high-availability-on-azure_cloud-content) + - [RHEL 8.3 and above](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_red_hat_enterprise_linux_8_on_public_cloud_platforms/configuring-rhel-high-availability-on-azure_cloud-content) - [Oracle Enterprise Linux](https://docs.oracle.com/en/operating-systems/oracle-linux/8/availability/) Linux clusters can use cluster managers such as [Pacemaker](https://wiki.clusterlabs.org/wiki/Pacemaker). Pacemaker builds on [Corosync](http://corosync.github.io/corosync/), enabling cluster communications for applications deployed in highly available environments. Some common clustered filesystems include [ocfs2](https://oss.oracle.com/projects/ocfs2/) and [gfs2](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/global_file_system_2/ch-overview-gfs2). You can use SCSI Persistent Reservation (SCSI PR) and/or STONITH Block Device (SBD) based clustering models for arbitrating access to the disk. When using SCSI PR, you can manipulate reservations and registrations using utilities such as [fence_scsi](http://manpages.ubuntu.com/manpages/eoan/man8/fence_scsi.8.html) and [sg_persist](https://linux.die.net/man/8/sg_persist). @@ -65,13 +78,13 @@ Linux clusters can use cluster managers such as [Pacemaker](https://wiki.cluster The following diagram illustrates a sample 2-node clustered database application that uses SCSI PR to enable failover from one node to the other. -![Two node cluster. An application running on the cluster is handling access to the disk](media/virtual-machines-disks-shared-disks/shared-disk-updated-two-node-cluster-diagram.png) +![Two node cluster consisting of Azure VM1, VM2, and a disk shared between them. An application running on the cluster handles access to the disk.](media/virtual-machines-disks-shared-disks/shared-disk-updated-two-node-cluster-diagram.png) The flow is as follows: 1. The clustered application running on both Azure VM1 and VM2 registers its intent to read or write to the disk. 1. The application instance on VM1 then takes exclusive reservation to write to the disk. -1. This reservation is enforced on your Azure disk and the database can now exclusively write to the disk. Any writes from the application instance on VM2 will not succeed. +1. This reservation is enforced on your Azure disk and the database can now exclusively write to the disk. Any writes from the application instance on VM2 won't succeed. 1. If the application instance on VM1 goes down, the instance on VM2 can now initiate a database failover and take-over of the disk. 1. This reservation is now enforced on the Azure disk and the disk will no longer accept writes from VM1. It will only accept writes from VM2. 1. The clustered application can complete the database failover and serve requests from VM2. @@ -89,7 +102,7 @@ The flow is as follows: ### Ultra disks reservation flow -Ultra disks offer an additional throttle, for a total of two throttles. Due to this, ultra disks reservation flow can work as described in the earlier section, or it can throttle and distribute performance more granularly. +Ultra disks offer two extra throttles, for a total of four throttles. Due to this, ultra disks reservation flow can work as described in the earlier section, or it can throttle and distribute performance more granularly. :::image type="content" source="media/virtual-machines-disks-shared-disks/ultra-reservation-table.png" alt-text="An image of a table that depicts the `ReadOnly` or `Read/Write` access for Reservation Holder, Registered, and Others."::: @@ -101,19 +114,19 @@ With premium SSD, the disk IOPS and throughput is fixed, for example, IOPS of a ### Ultra disk performance throttles -Ultra disks have the unique capability of allowing you to set your performance by exposing modifiable attributes and allowing you to modify them. By default, there are only two modifiable attributes but, shared ultra disks have two additional attributes. +Ultra disks have the unique capability of allowing you to set your performance by exposing modifiable attributes and allowing you to modify them. By default, there are only two modifiable attributes but, shared ultra disks have two more attributes. |Attribute |Description | |---------|---------| -|DiskIOPSReadWrite |The total number of IOPS allowed across all VMs mounting the share disk with write access. | +|DiskIOPSReadWrite |The total number of IOPS allowed across all VMs mounting the shared disk with write access. | |DiskMBpsReadWrite |The total throughput (MB/s) allowed across all VMs mounting the shared disk with write access. | |DiskIOPSReadOnly* |The total number of IOPS allowed across all VMs mounting the shared disk as `ReadOnly`. | |DiskMBpsReadOnly* |The total throughput (MB/s) allowed across all VMs mounting the shared disk as `ReadOnly`. | \* Applies to shared ultra disks only -The following formulas explain how the performance attributes can be set, since they are user modifiable: +The following formulas explain how the performance attributes can be set, since they're user modifiable: - DiskIOPSReadWrite/DiskIOPSReadOnly: - IOPS limits of 300 IOPS/GiB, up to a maximum of 160 K IOPS per disk @@ -145,9 +158,9 @@ The following is an example of a 4-node Linux cluster with a single writer and t :::image type="content" source="media/virtual-machines-disks-shared-disks/ultra-four-node-example.png" alt-text="Four node ultra throttling example"::: -#### Ultra pricing +##### Ultra pricing -Ultra shared disks are priced based on provisioned capacity, total provisioned IOPS (diskIOPSReadWrite + diskIOPSReadOnly) and total provisioned Throughput MBps (diskMBpsReadWrite + diskMBpsReadOnly). There is no extra charge for each additional VM mount. For example, an ultra shared disk with the following configuration (diskSizeGB: 1024, DiskIOPSReadWrite: 10000, DiskMBpsReadWrite: 600, DiskIOPSReadOnly: 100, DiskMBpsReadOnly: 1) is charged with 1024 GiB, 10100 IOPS, and 601 MBps regardless of whether it is mounted to two VMs or five VMs. +Ultra shared disks are priced based on provisioned capacity, total provisioned IOPS (diskIOPSReadWrite + diskIOPSReadOnly) and total provisioned Throughput MBps (diskMBpsReadWrite + diskMBpsReadOnly). There's no extra charge for each additional VM mount. For example, an ultra shared disk with the following configuration (diskSizeGB: 1024, DiskIOPSReadWrite: 10000, DiskMBpsReadWrite: 600, DiskIOPSReadOnly: 100, DiskMBpsReadOnly: 1) is charged with 1024 GiB, 10100 IOPS, and 601 MBps regardless of whether it is mounted to two VMs or five VMs. ## Next steps diff --git a/articles/virtual-machines/dv2-dsv2-series-memory.md b/articles/virtual-machines/dv2-dsv2-series-memory.md index 9dbd7067b4a75..2689bb1c6b6f6 100644 --- a/articles/virtual-machines/dv2-dsv2-series-memory.md +++ b/articles/virtual-machines/dv2-dsv2-series-memory.md @@ -4,7 +4,7 @@ description: Specifications for the Dv2 and DSv2-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 --- diff --git a/articles/virtual-machines/dv2-dsv2-series.md b/articles/virtual-machines/dv2-dsv2-series.md index bd67c9620fefc..4138887cd2e07 100644 --- a/articles/virtual-machines/dv2-dsv2-series.md +++ b/articles/virtual-machines/dv2-dsv2-series.md @@ -4,7 +4,7 @@ description: Specifications for the Dv2 and Dsv2-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 --- diff --git a/articles/virtual-machines/dv3-dsv3-series.md b/articles/virtual-machines/dv3-dsv3-series.md index c317f0c715be5..e889cdd65f3b8 100644 --- a/articles/virtual-machines/dv3-dsv3-series.md +++ b/articles/virtual-machines/dv3-dsv3-series.md @@ -4,7 +4,7 @@ description: Specifications for the Dv3 and Dsv3-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 09/22/2020 --- diff --git a/articles/virtual-machines/dv4-dsv4-series.md b/articles/virtual-machines/dv4-dsv4-series.md index 5760f21ab3d21..9e853a0792b2f 100644 --- a/articles/virtual-machines/dv4-dsv4-series.md +++ b/articles/virtual-machines/dv4-dsv4-series.md @@ -4,7 +4,7 @@ description: Specifications for the Dv4 and Dsv4-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 06/08/2020 --- diff --git a/articles/virtual-machines/dv5-dsv5-series.md b/articles/virtual-machines/dv5-dsv5-series.md index 8994c43589333..591d7e8aef17a 100644 --- a/articles/virtual-machines/dv5-dsv5-series.md +++ b/articles/virtual-machines/dv5-dsv5-series.md @@ -4,7 +4,7 @@ description: Specifications for the Dv5 and Dsv5-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.date: 10/20/2021 --- diff --git a/articles/virtual-machines/easv5-eadsv5-series.md b/articles/virtual-machines/easv5-eadsv5-series.md index cfffe37bf3f1d..80f9baa1199ae 100644 --- a/articles/virtual-machines/easv5-eadsv5-series.md +++ b/articles/virtual-machines/easv5-eadsv5-series.md @@ -5,7 +5,7 @@ author: mamccrea ms.author: mamccrea ms.reviewer: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 10/8/2021 diff --git a/articles/virtual-machines/eav4-easv4-series.md b/articles/virtual-machines/eav4-easv4-series.md index 65b9a34685e6e..7ce0468a6c07e 100644 --- a/articles/virtual-machines/eav4-easv4-series.md +++ b/articles/virtual-machines/eav4-easv4-series.md @@ -3,7 +3,7 @@ title: Eav4-series and Easv4-series description: Specifications for the Eav4 and Easv4-series VMs. author: ayshakeen ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 07/13/2021 ms.author: ayshak @@ -56,7 +56,7 @@ Eav4-series sizes are based on the 2.35Ghz AMD EPYCTM 7452 processor [VM Generation Support](generation-2.md): Generations 1 and 2
                    [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported
                    [Ephemeral OS Disks](ephemeral-os-disks.md): Supported
                    -[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Supported
                    +[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not Supported

                    Easv4-series sizes are based on the 2.35Ghz AMD EPYCTM 7452 processor that can achieve a boosted maximum frequency of 3.35GHz and use premium SSD. The Easv4-series sizes are ideal for memory-intensive enterprise applications. diff --git a/articles/virtual-machines/ecasv5-ecadsv5-series.md b/articles/virtual-machines/ecasv5-ecadsv5-series.md index 8a8d751006d6a..fed42f6c71300 100644 --- a/articles/virtual-machines/ecasv5-ecadsv5-series.md +++ b/articles/virtual-machines/ecasv5-ecadsv5-series.md @@ -5,7 +5,7 @@ author: runcai ms.author: runcai ms.reviewer: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 11/15/2021 diff --git a/articles/virtual-machines/edv4-edsv4-series.md b/articles/virtual-machines/edv4-edsv4-series.md index e848c223c00c0..d8c88bd50b23d 100644 --- a/articles/virtual-machines/edv4-edsv4-series.md +++ b/articles/virtual-machines/edv4-edsv4-series.md @@ -4,7 +4,7 @@ description: Specifications for the Ev4, Edv4, Esv4 and Edsv4-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 10/20/2021 --- diff --git a/articles/virtual-machines/edv5-edsv5-series.md b/articles/virtual-machines/edv5-edsv5-series.md index 4b1de28437547..9ef4f896e8438 100644 --- a/articles/virtual-machines/edv5-edsv5-series.md +++ b/articles/virtual-machines/edv5-edsv5-series.md @@ -4,7 +4,7 @@ description: Specifications for the Edv5 and Edsv5-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 10/20/2021 --- diff --git a/articles/virtual-machines/ephemeral-os-disks-faq.md b/articles/virtual-machines/ephemeral-os-disks-faq.md new file mode 100644 index 0000000000000..e53e7885aa21f --- /dev/null +++ b/articles/virtual-machines/ephemeral-os-disks-faq.md @@ -0,0 +1,71 @@ +--- +title: FAQ Ephemeral OS disks +description: Frequently asked questions on ephemeral OS disks for Azure VMs. +author: Aarthi-Vijayaraghavan +ms.service: virtual-machines +ms.workload: infrastructure-services +ms.topic: how-to +ms.date: 05/26/2022 +ms.author: aarthiv +ms.subservice: disks +ms.custom: devx-track-azurepowershell, devx-track-azurecli +--- + +# Frequently asked questions about Ephemeral OS disks + +**Q: What is the size of the local OS Disks?** + +A: We support platform, Shared Image Gallery, and custom images, up to the VM cache size with OS cache placement and up to Temp disk size with Temp disk placement, where all read/writes to the OS disk will be local on the same node as the Virtual Machine. + +**Q: Can the ephemeral OS disk be resized?** + +A: No, once the ephemeral OS disk is provisioned, the OS disk cannot be resized. + +**Q: Can the ephemeral OS disk placement be modified after creation of VM?** + +A: No, once the ephemeral OS disk is provisioned, the OS disk placement cannot be changed. But the VM can be recreated via ARM template deployment/PowerShell/CLI by updating the OS disk placement of choosing. This would result in the recreation of the VM with Data on the OS disk deleted and OS is reprovisioned. + +**Q: Is there any Temp disk created if image size equals to Temp disk size of VM size selected?** + +A: No, in that case, there won't be any Temp disk drive created. + +**Q: Are Ephemeral OS disks supported on low-priority VMs and Spot VMs?** + +A: Yes. There is no option of Stop-Deallocate for Ephemeral VMs, rather users need to Delete instead of deallocating them. + +**Q: Can I attach a Managed Disks to an Ephemeral VM?** + +A: Yes, you can attach a managed data disk to a VM that uses an ephemeral OS disk. + +**Q: Will all VM sizes be supported for ephemeral OS disks?** + +A: No, most Premium Storage VM sizes are supported (DS, ES, FS, GS, M, etc.). To know whether a particular VM size supports ephemeral OS disks, you can: + +Call `Get-AzComputeResourceSku` PowerShell cmdlet +```azurepowershell-interactive + +$vmSizes=Get-AzComputeResourceSku | where{$_.ResourceType -eq 'virtualMachines' -and $_.Locations.Contains('CentralUSEUAP')} + +foreach($vmSize in $vmSizes) +{ + foreach($capability in $vmSize.capabilities) + { + if($capability.Name -eq 'EphemeralOSDiskSupported' -and $capability.Value -eq 'true') + { + $vmSize + } + } +} +``` + +**Q: Can the ephemeral OS disk be applied to existing VMs and scale sets?** + +A: No, ephemeral OS disk can only be used during VM and scale set creation. + +**Q: Can you mix ephemeral and normal OS disks in a scale set?** + +A: No, you can't have a mix of ephemeral and persistent OS disk instances within the same scale set. + +**Q: Can the ephemeral OS disk be created using PowerShell or CLI?** + +A: Yes, you can create VMs with Ephemeral OS Disk using REST, Templates, PowerShell, and CLI. \ No newline at end of file diff --git a/articles/virtual-machines/ephemeral-os-disks.md b/articles/virtual-machines/ephemeral-os-disks.md index 027acd5c622b4..3a892f4dc2e2b 100644 --- a/articles/virtual-machines/ephemeral-os-disks.md +++ b/articles/virtual-machines/ephemeral-os-disks.md @@ -42,7 +42,12 @@ Key differences between persistent and ephemeral OS disks: | **Redeploy** | OS disk data is preserved | Data on the OS disk is deleted, OS is reprovisioned | | **Stop/ Start of VM** | OS disk data is preserved | Not Supported | | **Page file placement**| For Windows, page file is stored on the resource disk| For Windows, page file is stored on the OS disk (for both OS cache placement and Temp disk placement).| +| **Maintenance of VM/VMSS using [healing](understand-vm-reboots.md#unexpected-downtime)** | OS disk data is preserved | OS disk data is not preserved | +| **Maintenance of VM/VMSS using [Live Migration](maintenance-and-updates.md#live-migration)** | OS disk data is preserved | OS disk data is preserved | +## Placement options for Ephemeral OS disks +Ephemeral OS disk can be stored either on VM's OS cache disk or VM's temp/resource disk. +[DiffDiskPlacement](/rest/api/compute/virtualmachines/list#diffdiskplacement) is the new property that can be used to specify where you want to place the Ephemeral OS disk. With this feature, when a Windows VM is provisioned, we configure the pagefile to be located on the OS Disk. ## Size requirements @@ -55,13 +60,11 @@ If you want to opt for **Temp disk placement**: Standard Ubuntu server image fro > [!Important] > If opting for temp disk placement the Final Temp disk size = (Initial temp disk size - OS image size). +In the case of **Temp disk placement** as Ephemeral OS disk is placed on temp disk it will share the IOPS with temp disk as per the VM size chosen by you. + Basic Linux and Windows Server images in the Marketplace that are denoted by `[smallsize]` tend to be around 30 GiB and can use most of the available VM sizes. Ephemeral disks also require that the VM size supports **Premium storage**. The sizes usually (but not always) have an `s` in the name, like DSv2 and EsV3. For more information, see [Azure VM sizes](sizes.md) for details around which sizes support Premium storage. -## Placement options for Ephemeral OS disks -Ephemeral OS disk can be stored either on VM's OS cache disk or VM's temp/resource disk. -[DiffDiskPlacement](/rest/api/compute/virtualmachines/list#diffdiskplacement) is the new property that can be used to specify where you want to place the Ephemeral OS disk. -With this feature, when a Windows VM is provisioned, we configure the pagefile to be located on the OS Disk. ## Unsupported features - Capturing VM images @@ -71,7 +74,7 @@ With this feature, when a Windows VM is provisioned, we configure the pagefile t - Azure Site Recovery - OS Disk Swap - ## Trusted Launch for Ephemeral OS disks (Preview) + ## Trusted Launch for Ephemeral OS disks Ephemeral OS disks can be created with Trusted launch. Not all VM sizes and regions are supported for trusted launch. Please check [limitations of trusted launch](trusted-launch.md#limitations) for supported sizes and regions. VM guest state (VMGS) is specific to trusted launch VMs. It is a blob that is managed by Azure and contains the unified extensible firmware interface (UEFI) secure boot signature databases and other security information. While using trusted launch by default **1 GiB** from the **OS cache** or **temp storage** based on the chosen placement option is reserved for VMGS.The lifecycle of the VMGS blob is tied to that of the OS Disk. @@ -80,71 +83,12 @@ For example, If you try to create a Trusted launch Ephemeral OS disk VM using OS This is because the temp storage for [Standard_DS4_v2](dv2-dsv2-series.md) is 56 GiB, and 1 GiB is reserved for VMGS when using trusted launch. For the same example above if you create a standard Ephemeral OS disk VM you would not get any errors and it would be a successful operation. -> [!NOTE] +> [!Important] > > While using ephemeral disks for Trusted Launch VMs, keys and secrets generated or sealed by the vTPM after VM creation may not be persisted for operations like reimaging and platform events like service healing. > For more information on [how to deploy a trusted launch VM](trusted-launch-portal.md) -## Frequently asked questions - -**Q: What is the size of the local OS Disks?** - -A: We support platform, Shared Image Gallery, and custom images, up to the VM cache size with OS cache placement and up to Temp disk size with Temp disk placement, where all read/writes to the OS disk will be local on the same node as the Virtual Machine. - -**Q: Can the ephemeral OS disk be resized?** - -A: No, once the ephemeral OS disk is provisioned, the OS disk cannot be resized. - -**Q: Can the ephemeral OS disk placement be modified after creation of VM?** - -A: No, once the ephemeral OS disk is provisioned, the OS disk placement cannot be changed. But the VM can be recreated via ARM template deployment/PowerShell/CLI by updating the OS disk placement of choosing. This would result in the recreation of the VM with Data on the OS disk deleted and OS is reprovisioned. - -**Q: Is there any Temp disk created if image size equals to Temp disk size of VM size selected?** - -A: No, in that case, there won't be any Temp disk drive created. - -**Q: Are Ephemeral OS disks supported on low-priority VMs and Spot VMs?** - -A: Yes. There is no option of Stop-Deallocate for Ephemeral VMs, rather users need to Delete instead of deallocating them. - -**Q: Can I attach a Managed Disks to an Ephemeral VM?** - -A: Yes, you can attach a managed data disk to a VM that uses an ephemeral OS disk. - -**Q: Will all VM sizes be supported for ephemeral OS disks?** - -A: No, most Premium Storage VM sizes are supported (DS, ES, FS, GS, M, etc.). To know whether a particular VM size supports ephemeral OS disks, you can: - -Call `Get-AzComputeResourceSku` PowerShell cmdlet -```azurepowershell-interactive - -$vmSizes=Get-AzComputeResourceSku | where{$_.ResourceType -eq 'virtualMachines' -and $_.Locations.Contains('CentralUSEUAP')} - -foreach($vmSize in $vmSizes) -{ - foreach($capability in $vmSize.capabilities) - { - if($capability.Name -eq 'EphemeralOSDiskSupported' -and $capability.Value -eq 'true') - { - $vmSize - } - } -} -``` - -**Q: Can the ephemeral OS disk be applied to existing VMs and scale sets?** - -A: No, ephemeral OS disk can only be used during VM and scale set creation. - -**Q: Can you mix ephemeral and normal OS disks in a scale set?** - -A: No, you can't have a mix of ephemeral and persistent OS disk instances within the same scale set. - -**Q: Can the ephemeral OS disk be created using PowerShell or CLI?** - -A: Yes, you can create VMs with Ephemeral OS Disk using REST, Templates, PowerShell, and CLI. - > [!NOTE] > > Ephemeral disk will not be accessible through the portal. You will receive a "Resource not Found" or "404" error when accessing the ephemeral disk which is expected. @@ -152,3 +96,4 @@ A: Yes, you can create VMs with Ephemeral OS Disk using REST, Templates, PowerSh ## Next steps Create a VM with ephemeral OS disk using [Azure Portal/CLI/Powershell/ARM template](ephemeral-os-disks-deploy.md). +Check out the [frequently asked questions on ephemeral os disk](ephemeral-os-disks-faq.md). diff --git a/articles/virtual-machines/ev3-esv3-series.md b/articles/virtual-machines/ev3-esv3-series.md index a16b84c2484a4..929e358384c20 100644 --- a/articles/virtual-machines/ev3-esv3-series.md +++ b/articles/virtual-machines/ev3-esv3-series.md @@ -2,7 +2,7 @@ title: Ev3-series and Esv3-series description: Specifications for the Ev3 and Esv3-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 09/22/2020 author: andysports8 diff --git a/articles/virtual-machines/ev4-esv4-series.md b/articles/virtual-machines/ev4-esv4-series.md index 9434c9b434c9a..755e9390c3324 100644 --- a/articles/virtual-machines/ev4-esv4-series.md +++ b/articles/virtual-machines/ev4-esv4-series.md @@ -4,7 +4,7 @@ description: Specifications for the Ev4, and Esv4-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 6/8/2020 diff --git a/articles/virtual-machines/ev5-esv5-series.md b/articles/virtual-machines/ev5-esv5-series.md index 8934110e3ca19..3e1a8ea2c730a 100644 --- a/articles/virtual-machines/ev5-esv5-series.md +++ b/articles/virtual-machines/ev5-esv5-series.md @@ -4,7 +4,7 @@ description: Specifications for the Ev5 and Esv5-series VMs. author: andysports8 ms.author: shuji ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 10/20/2021 --- diff --git a/articles/virtual-machines/extensions/agent-dependency-linux.md b/articles/virtual-machines/extensions/agent-dependency-linux.md index 3a5fa68e68042..d23c3233e450b 100644 --- a/articles/virtual-machines/extensions/agent-dependency-linux.md +++ b/articles/virtual-machines/extensions/agent-dependency-linux.md @@ -157,7 +157,7 @@ az vm extension list --resource-group myResourceGroup --vm-name myVM -o table Extension execution output is logged to the following file: ``` -/opt/microsoft/dependency-agent/log/install.log +/var/opt/microsoft/dependency-agent/log/install.log ``` ### Support diff --git a/articles/virtual-machines/extensions/custom-script-linux.md b/articles/virtual-machines/extensions/custom-script-linux.md index 1e6d5df54957f..2b7aa553600c2 100644 --- a/articles/virtual-machines/extensions/custom-script-linux.md +++ b/articles/virtual-machines/extensions/custom-script-linux.md @@ -379,7 +379,7 @@ az vm extension set \ If you deploy the Custom Script Extension from the Azure portal, you don't have control over the expiration of the SAS token for accessing the script in your storage account. The result is that the initial deployment works, but when the storage account's SAS token expires, any subsequent scaling operation fails because the Custom Script Extension can no longer access the storage account. -We recommend that you use [PowerShell](/powershell/module/az.Compute/Add-azVmssExtension?view=azps-7.0.0), the [Azure CLI](/cli/azure/vmss/extension), or an [Azure Resource Manager template](/azure/templates/microsoft.compute/virtualmachinescalesets/extensions) when you deploy the Custom Script Extension on a virtual machine scale set. This way, you can choose to use a managed identity or have direct control of the expiration of the SAS token for accessing the script in your storage account for as long as you need. +We recommend that you use [PowerShell](/powershell/module/az.Compute/Add-azVmssExtension?view=azps-7.0.0&preserve-view=true), the [Azure CLI](/cli/azure/vmss/extension), or an [Azure Resource Manager template](/azure/templates/microsoft.compute/virtualmachinescalesets/extensions) when you deploy the Custom Script Extension on a virtual machine scale set. This way, you can choose to use a managed identity or have direct control of the expiration of the SAS token for accessing the script in your storage account for as long as you need. ## Troubleshooting When the Custom Script Extension runs, the script is created or downloaded into a directory that's similar to the following example. The command output is also saved into this directory in `stdout` and `stderr` files. diff --git a/articles/virtual-machines/extensions/custom-script-windows.md b/articles/virtual-machines/extensions/custom-script-windows.md index 74773b1467a78..49ae48d17ec9e 100644 --- a/articles/virtual-machines/extensions/custom-script-windows.md +++ b/articles/virtual-machines/extensions/custom-script-windows.md @@ -291,7 +291,7 @@ The response content cannot be parsed because the Internet Explorer engine is no If you deploy the Custom Script Extension from the Azure portal, you don't have control over the expiration of the SAS token for accessing the script in your storage account. The result is that the initial deployment works, but when the storage account's SAS token expires, any subsequent scaling operation fails because the Custom Script Extension can no longer access the storage account. -We recommend that you use [PowerShell](/powershell/module/az.Compute/Add-azVmssExtension?view=azps-7.0.0), the [Azure CLI](/cli/azure/vmss/extension), or an Azure Resource Manager template when you deploy the Custom Script Extension on a virtual machine scale set. This way, you can choose to use a managed identity or have direct control of the expiration of the SAS token for accessing the script in your storage account for as long as you need. +We recommend that you use [PowerShell](/powershell/module/az.Compute/Add-azVmssExtension?view=azps-7.0.0Fixed&preserve-view=true), the [Azure CLI](/cli/azure/vmss/extension), or an Azure Resource Manager template when you deploy the Custom Script Extension on a virtual machine scale set. This way, you can choose to use a managed identity or have direct control of the expiration of the SAS token for accessing the script in your storage account for as long as you need. ## Classic VMs diff --git a/articles/virtual-machines/extensions/features-linux.md b/articles/virtual-machines/extensions/features-linux.md index ff66b9f55c53c..f6498f9601538 100644 --- a/articles/virtual-machines/extensions/features-linux.md +++ b/articles/virtual-machines/extensions/features-linux.md @@ -1,5 +1,5 @@ --- -title: Azure VM extensions and features for Linux +title: Azure VM extensions and features for Linux description: Learn what extensions are available for Azure virtual machines on Linux, grouped by what they provide or improve. ms.topic: article ms.service: virtual-machines @@ -13,11 +13,11 @@ ms.date: 03/30/2018 # Virtual machine extensions and features for Linux -Azure virtual machine (VM) extensions are small applications that provide post-deployment configuration and automation tasks on Azure VMs. For example, if a virtual machine requires software installation, antivirus protection, or the ability to run a script inside it, you can use a VM extension. +Azure virtual machine (VM) extensions are small applications that provide post-deployment configuration and automation tasks on Azure VMs. For example, if a virtual machine requires software installation, antivirus protection, or the ability to run a script inside it, you can use a VM extension. You can run Azure VM extensions by using the Azure CLI, PowerShell, Azure Resource Manager templates (ARM templates), and the Azure portal. You can bundle extensions with a new VM deployment or run them against any existing system. -This article provides an overview of Azure VM extensions, prerequisites for using them, and guidance on how to detect, manage, and remove them. This article provides generalized information because many VM extensions are available. Each has a potentially unique configuration and its own documentation. +This article provides an overview of Azure VM extensions, prerequisites for using them, and guidance on how to detect, manage, and remove them. This article provides generalized information because many VM extensions are available. Each has a potentially unique configuration and its own documentation. ## Use cases and samples @@ -25,9 +25,9 @@ Each Azure VM extension has a specific use case. Examples include: - Apply PowerShell desired state configurations (DSCs) to a VM by using the [DSC extension for Linux](https://github.com/Azure/azure-linux-extensions/tree/master/DSC). - Configure monitoring of a VM by using the [Microsoft Monitoring Agent VM extension](/previous-versions/azure/virtual-machines/linux/tutorial-monitor). -- Configure monitoring of your Azure infrastructure by using the [Chef](https://docs.chef.io/) or [Datadog](https://www.datadoghq.com/blog/introducing-azure-monitoring-with-one-click-datadog-deployment/) extension. +- Configure monitoring of your Azure infrastructure by using the [Chef](https://docs.chef.io/) or [Datadog](https://www.datadoghq.com/blog/introducing-azure-monitoring-with-one-click-datadog-deployment/) extension. -In addition to process-specific extensions, a Custom Script extension is available for both Windows and Linux virtual machines. The [Custom Script extension for Linux](custom-script-linux.md) allows any Bash script to be run on a VM. Custom scripts are useful for designing Azure deployments that require configuration beyond what native Azure tooling can provide. +In addition to process-specific extensions, a Custom Script extension is available for both Windows and Linux virtual machines. The [Custom Script extension for Linux](custom-script-linux.md) allows any Bash script to be run on a VM. Custom scripts are useful for designing Azure deployments that require configuration beyond what native Azure tooling can provide. ## Prerequisites @@ -35,7 +35,7 @@ In addition to process-specific extensions, a Custom Script extension is availab To handle the extension on the VM, you need the [Azure Linux Agent](agent-linux.md) installed. Some individual extensions have prerequisites, such as access to resources or dependencies. -The Azure Linux Agent manages interactions between an Azure VM and the Azure fabric controller. The agent is responsible for many functional aspects of deploying and managing Azure VMs, including running VM extensions. +The Azure Linux Agent manages interactions between an Azure VM and the Azure fabric controller. The agent is responsible for many functional aspects of deploying and managing Azure VMs, including running VM extensions. The Azure Linux Agent is preinstalled on Azure Marketplace images. It can also be installed manually on supported operating systems. @@ -43,7 +43,7 @@ The agent runs on multiple operating systems. However, the extensions framework ### Network access -Extension packages are downloaded from the Azure Storage extension repository. Extension status uploads are posted to Azure Storage. +Extension packages are downloaded from the Azure Storage extension repository. Extension status uploads are posted to Azure Storage. If you use a [supported version of the Azure Linux Agent](https://support.microsoft.com/en-us/help/4049215/extensions-and-virtual-machine-agent-minimum-version-support), you don't need to allow access to Azure Storage in the VM region. You can use the agent to redirect the communication to the Azure fabric controller for agent communications. If you're on an unsupported version of the agent, you need to allow outbound access to Azure Storage in that region from the VM. @@ -56,12 +56,26 @@ To redirect agent traffic requests, the Azure Linux Agent has proxy server suppo ## Discover VM extensions +### [Azure CLI](#tab/azure-cli) + Many VM extensions are available for use with Azure VMs. To see a complete list, use [az vm extension image list](/cli/azure/vm/extension/image#az-vm-extension-image-list). The following example lists all available extensions in the *westus* location: ```azurecli az vm extension image list --location westus --output table ``` +### [Azure PowerShell](#tab/azure-powershell) + +Many VM extensions are available for use with Azure VMs. To see a complete list, use [Get-AzVMExtensionImage](/powershell/module/az.compute/get-azvmextensionimage). The following example lists all available extensions in the *westus* location: + +```azurepowershell +Get-AzVmImagePublisher -Location "westus" | +Get-AzVMExtensionImageType | +Get-AzVMExtensionImage | Select-Object Type, PublisherName, Version +``` + +--- + ## Run VM extensions Azure VM extensions run on existing VMs. That's useful when you need to make configuration changes or recover connectivity on an already deployed VM. VM extensions can also be bundled with ARM template deployments. By using extensions with ARM templates, you can deploy and configure Azure VMs without post-deployment intervention. @@ -70,7 +84,7 @@ You can use the following methods to run an extension against an existing VM. ### Azure CLI -You can run Azure VM extensions against an existing VM by using the [az vm extension set](/cli/azure/vm/extension#az-vm-extension-set) command. The following example runs the Custom Script extension against a VM named *myVM* in a resource group named *myResourceGroup*. Replace the example resource group name, VM name, and script to run (https:\//raw.githubusercontent.com/me/project/hello.sh) with your own information. +You can run Azure VM extensions against an existing VM by using the [az vm extension set](/cli/azure/vm/extension#az-vm-extension-set) command. The following example runs the Custom Script extension against a VM named *myVM* in a resource group named *myResourceGroup*. Replace the example resource group name, VM name, and script to run (https:\//raw.githubusercontent.com/me/project/hello.sh) with your own information. ```azurecli az vm extension set \ @@ -90,6 +104,30 @@ info: Executing command vm extension set info: vm extension set command OK ``` +### Azure PowerShell + +You can run Azure VM extensions against an existing VM by using the [Set-AzVMExtension](/powershell/module/az.compute/set-azvmextension) command. The following example runs the Custom Script extension against a VM named *myVM* in a resource group named *myResourceGroup*. Replace the example resource group name, VM name, and script to run (https:\//raw.githubusercontent.com/me/project/hello.sh) with your own information. + +```azurepowershell +$Params = @{ + ResourceGroupName = 'myResourceGroup' + VMName = 'myVM' + Name = 'CustomScript' + Publisher = 'Microsoft.Azure.Extensions' + ExtensionType = 'CustomScript' + TypeHandlerVersion = '2.1' + Settings = @{fileUris = @('https://raw.githubusercontent.com/me/project/hello.sh'); commandToExecute = './hello.sh'} +} +Set-AzVMExtension @Params +``` +When the extension runs correctly, the output is similar to the following example: + +```Output +RequestId IsSuccessStatusCode StatusCode ReasonPhrase +--------- ------------------- ---------- ------------ + True OK OK +``` + ### Azure portal You can apply VM extensions to an existing VM through the Azure portal. Select the VM in the portal, select **Extensions**, and then select **Add**. Choose the extension that you want from the list of available extensions, and follow the instructions in the wizard. @@ -100,7 +138,7 @@ The following image shows the installation of the Custom Script extension for Li ### Azure Resource Manager templates -You can add VM extensions to an ARM template and run them with the deployment of the template. When you deploy an extension with a template, you can create fully configured Azure deployments. +You can add VM extensions to an ARM template and run them with the deployment of the template. When you deploy an extension with a template, you can create fully configured Azure deployments. For example, the following JSON is taken from a [full ARM template](https://github.com/Microsoft/dotnet-core-sample-templates/tree/master/dotnet-core-music-linux) that deploys a set of load-balanced VMs and an Azure SQL database, and then installs a .NET Core application on each VM. The VM extension takes care of the software installation. @@ -119,7 +157,7 @@ For example, the following JSON is taken from a [full ARM template](https://gith "properties": { "publisher": "Microsoft.Azure.Extensions", "type": "CustomScript", - "typeHandlerVersion": "2.0", + "typeHandlerVersion": "2.1", "autoUpgradeMinorVersion": true, "settings": { "fileUris": [ @@ -156,7 +194,7 @@ The following example shows an instance of the Custom Script extension for Linux "properties": { "publisher": "Microsoft.Azure.Extensions", "type": "CustomScript", - "typeHandlerVersion": "2.0", + "typeHandlerVersion": "2.1", "autoUpgradeMinorVersion": true, "settings": { "fileUris": [ @@ -185,7 +223,7 @@ Moving the `commandToExecute` property to the `protected` configuration helps se "properties": { "publisher": "Microsoft.Azure.Extensions", "type": "CustomScript", - "typeHandlerVersion": "2.0", + "typeHandlerVersion": "2.1", "autoUpgradeMinorVersion": true, "settings": { "fileUris": [ @@ -220,7 +258,7 @@ Publishers make updates available to regions at various times, so it's possible #### Agent updates -The Linux VM Agent contains *Provisioning Agent code* and *extension-handling code* in one package. They can't be separated. +The Linux VM Agent contains *Provisioning Agent code* and *extension-handling code* in one package. They can't be separated. You can disable the Provisioning Agent when you want to [provision on Azure by using cloud-init](../linux/using-cloud-init.md). @@ -244,12 +282,12 @@ waagent --version The output is similar to the following example: ```bash -WALinuxAgent-2.2.17 running on ubuntu 16.04 -Python: 3.6.0 -Goal state agent: 2.2.18 +WALinuxAgent-2.2.45 running on ubuntu 18.04 +Python: 3.6.9 +Goal state agent: 2.7.1.0 ``` -In the preceding example output, the parent (or package deployed version) is `WALinuxAgent-2.2.17`. The `Goal state agent` value is the auto-update version. +In the preceding example output, the parent (or package deployed version) is `WALinuxAgent-2.2.45`. The `Goal state agent` value is the auto-update version. We highly recommend that you always enable automatic update for the agent: [AutoUpdate.Enabled=y](./update-linux-agent.md). If you don't enable automatic update, you'll need to keep manually updating the agent, and you won't get bug and security fixes. @@ -262,7 +300,7 @@ Automatic extension updates are either *minor* or *hotfix*. You can opt in or op ```json "publisher": "Microsoft.Azure.Extensions", "type": "CustomScript", - "typeHandlerVersion": "2.0", + "typeHandlerVersion": "2.1", "autoUpgradeMinorVersion": true, "settings": { "fileUris": [ @@ -273,12 +311,14 @@ Automatic extension updates are either *minor* or *hotfix*. You can opt in or op To get the latest minor-release bug fixes, we highly recommend that you always select automatic update in your extension deployments. You can't opt out of hotfix updates that carry security or key bug fixes. -If you disable automatic updates or you need to upgrade a major version, use [az vm extension set](/cli/azure/vm/extension#az-vm-extension-set) and specify the target version. +If you disable automatic updates or you need to upgrade a major version, use [az vm extension set](/cli/azure/vm/extension#az-vm-extension-set) or [Set-AzVMExtension](/powershell/module/az.compute/set-azvmextension) and specify the target version. ### How to identify extension updates #### Identify if the extension is set with autoUpgradeMinorVersion on a VM +### [Azure CLI](#tab/azure-cli) + You can see from the VM model if the extension was provisioned with `autoUpgradeMinorVersion`. To check, use [az vm show](/cli/azure/vm#az-vm-show) and provide the resource group and VM name as follows: ```azurecli @@ -292,9 +332,31 @@ The following example output shows that `autoUpgradeMinorVersion` is set to `tru { "autoUpgradeMinorVersion": true, "forceUpdateTag": null, - "id": "/subscriptions/guid/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM/extensions/CustomScriptExtension", + "id": "/subscriptions/guid/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM/extensions/customScript", +``` + +### [Azure PowerShell](#tab/azure-powershell) + +You can see from the VM model if the extension was provisioned with `AutoUpgradeMinorVersion`. To check, use [Get-AzVM](/powershell/module/az.compute/get-azvm) and provide the resource group and VM name as follows: + +```azurepowershell +Get-AzVM -ResourceGroupName myResourceGroup -Name myVM | Select-Object -ExpandProperty Extensions +``` + +The following example output shows that `AutoUpgradeMinorVersion` is set to `True`: + +```Output +ForceUpdateTag : +Publisher : Microsoft.Azure.Extensions +VirtualMachineExtensionType : CustomScript +TypeHandlerVersion : 2.1 +AutoUpgradeMinorVersion : True +EnableAutomaticUpgrade : +... ``` +--- + #### Identify when an autoUpgradeMinorVersion event occurred To see when an update to the extension occurred, review the agent logs on the VM at */var/log/waagent.log*. @@ -326,7 +388,7 @@ To perform its tasks, the agent needs to run as *root*. ## Troubleshoot VM extensions -Each VM extension might have specific troubleshooting steps. For example, when you use the Custom Script extension, you can find script execution details locally on the VM where the extension was run. +Each VM extension might have specific troubleshooting steps. For example, when you use the Custom Script extension, you can find script execution details locally on the VM where the extension was run. The following troubleshooting actions apply to all VM extensions: @@ -348,11 +410,13 @@ The following troubleshooting actions apply to all VM extensions: ### View extension status +### [Azure CLI](#tab/azure-cli) + After a VM extension has been run against a VM, use [az vm get-instance-view](/cli/azure/vm#az-vm-get-instance-view) to return extension status as follows: ```azurecli az vm get-instance-view \ - --resource-group rgName \ + --resource-group myResourceGroup \ --name myVM \ --query "instanceView.extensions" ``` @@ -372,16 +436,47 @@ The output is similar to the following example: } ], "substatuses": null, - "type": "Microsoft.Azure.Extensions.customScript", - "typeHandlerVersion": "2.0.6" + "type": "Microsoft.Azure.Extensions.CustomScript", + "typeHandlerVersion": "2.1.6" } ``` +### [Azure PowerShell](#tab/azure-powershell) + +After a VM extension has been run against a VM, use [Get-AzVM](/powershell/module/az.compute/get-azvm) and specify the `-Status` switch parameter to return extension status as follows: + +```azurepowershell +Get-AzVM -ResourceGroupName myResourceGroup -Name myVM -Status | +Select-Object -ExpandProperty Extensions | +Select-Object -ExpandProperty Statuses +``` + +The output is similar to the following example: + +```Output +Code : ProvisioningState/failed/0 +Level : Error +DisplayStatus : Provisioning failed +Message : Enable failed: failed to execute command: command terminated with exit status=127 + [stdout] + + [stderr] + /bin/sh: 1: ./hello.sh: not found + +Time : +``` + +--- + You can also find extension execution status in the Azure portal. Select the VM, select **Extensions**, and then select the desired extension. ### Rerun a VM extension -There might be cases in which a VM extension needs to be rerun. You can rerun an extension by removing it, and then rerunning the extension with an execution method of your choice. To remove an extension, use [az vm extension delete](/cli/azure/vm/extension#az-vm-extension-delete) as follows: +There might be cases in which a VM extension needs to be rerun. You can rerun an extension by removing it, and then rerunning the extension with an execution method of your choice. + +### [Azure CLI](#tab/azure-cli) + +To remove an extension, use [az vm extension delete](/cli/azure/vm/extension#az-vm-extension-delete) as follows: ```azurecli az vm extension delete \ @@ -390,6 +485,18 @@ az vm extension delete \ --name customScript ``` +### [Azure PowerShell](#tab/azure-powershell) + +To remove an extension, use [Remove-AzVMExtension](/powershell/module/az.compute/remove-azvmextension) as follows: + +```azurepowershell +Remove-AzVMExtension -ResourceGroupName myResourceGroup -VMName myVM -Name customScript +``` + +To force the command to run without asking for user confirmation specify the `-Force` switch parameter. + +--- + You can also remove an extension in the Azure portal: 1. Select a VM. diff --git a/articles/virtual-machines/extensions/key-vault-windows.md b/articles/virtual-machines/extensions/key-vault-windows.md index b962e327262fc..5fee5652ea6a4 100644 --- a/articles/virtual-machines/extensions/key-vault-windows.md +++ b/articles/virtual-machines/extensions/key-vault-windows.md @@ -294,9 +294,9 @@ The Key Vault VM extension logs only exist locally on the VM and are most inform |Location|Description| |--|--| | C:\WindowsAzure\Logs\WaAppAgent.log | Shows when an update to the extension occurred. | -| C:\WindowsAzure\Logs\Plugins\Microsoft.Azure.KeyVault.KeyVaultForWindows\\ | Shows the status of certificate download. The download location will always be the Windows computer's MY store (certlm.msc). | -| C:\Packages\Plugins\Microsoft.Azure.KeyVault.KeyVaultForWindows\\RuntimeSettings\ | The Key Vault VM Extension service logs show the status of the akvvm_service service. | -| C:\Packages\Plugins\Microsoft.Azure.KeyVault.KeyVaultForWindows\\Status\ | The configuration and binaries for Key Vault VM Extension service. | +| C:\WindowsAzure\Logs\Plugins\Microsoft.Azure.KeyVault.KeyVaultForWindows\\\\ | Shows the status of certificate download. The download location will always be the Windows computer's MY store (certlm.msc). | +| C:\Packages\Plugins\Microsoft.Azure.KeyVault.KeyVaultForWindows\\\\RuntimeSettings\ | The Key Vault VM Extension service logs show the status of the akvvm_service service. | +| C:\Packages\Plugins\Microsoft.Azure.KeyVault.KeyVaultForWindows\\\\Status\ | The configuration and binaries for Key Vault VM Extension service. | ||| diff --git a/articles/virtual-machines/extensions/update-linux-agent.md b/articles/virtual-machines/extensions/update-linux-agent.md index 008b8c6744976..147b698c103af 100644 --- a/articles/virtual-machines/extensions/update-linux-agent.md +++ b/articles/virtual-machines/extensions/update-linux-agent.md @@ -379,15 +379,15 @@ Open [the release of Azure Linux Agent in GitHub](https://github.com/Azure/WALin For version 2.2.x or later, type: ```bash -wget https://github.com/Azure/WALinuxAgent/archive/v2.2.x.zip +wget https://github.com/Azure/WALinuxAgent/archive/refs/tags/v2.2.x.zip unzip v2.2.x.zip cd WALinuxAgent-2.2.x ``` -The following line uses version 2.2.0 as an example: +The following line uses version 2.2.14 as an example: ```bash -wget https://github.com/Azure/WALinuxAgent/archive/v2.2.14.zip +wget https://github.com/Azure/WALinuxAgent/archive/refs/tags/v2.2.14.zip unzip v2.2.14.zip cd WALinuxAgent-2.2.14 ``` diff --git a/articles/virtual-machines/field-programmable-gate-arrays-attestation.md b/articles/virtual-machines/field-programmable-gate-arrays-attestation.md index 7c4fffd29f627..51eab055a4e09 100644 --- a/articles/virtual-machines/field-programmable-gate-arrays-attestation.md +++ b/articles/virtual-machines/field-programmable-gate-arrays-attestation.md @@ -2,7 +2,7 @@ title: Azure FPGA Attestation Service description: Attestation service for the NP-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 04/01/2021 --- diff --git a/articles/virtual-machines/flexible-virtual-machine-scale-sets.md b/articles/virtual-machines/flexible-virtual-machine-scale-sets.md index b499553c12f10..8d445098f6b2c 100644 --- a/articles/virtual-machines/flexible-virtual-machine-scale-sets.md +++ b/articles/virtual-machines/flexible-virtual-machine-scale-sets.md @@ -109,6 +109,7 @@ The following tables list the Flexible orchestration mode features and links to | SKUs supported | D series, E series, F series, A series, B series, Intel, AMD; Specialty SKUs (G, H, L, M, N) are not supported | | Full control over VM, NICs, Disks | Yes | | RBAC Permissions Required | Compute VMSS Write, Compute VM Write, Network | +| Cross tenant shared image gallery | No | | Accelerated networking | Yes | | Spot instances and pricing  | Yes, you can have both Spot and Regular priority instances | | Mix operating systems | Yes, Linux and Windows can reside in the same Flexible scale set | @@ -117,12 +118,13 @@ The following tables list the Flexible orchestration mode features and links to | Write Accelerator  | No | | Proximity Placement Groups  | Yes, read [Proximity Placement Groups documentation](../virtual-machine-scale-sets/proximity-placement-groups.md) | | Azure Dedicated Hosts  | No | -| Managed Identity | User Assigned Identity Only | +| Managed Identity | [User Assigned Identity](../active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vmss.md#user-assigned-managed-identity) only1 | | Add/remove existing VM to the group | No | | Service Fabric | No | | Azure Kubernetes Service (AKS) / AKE | No | | UserData | Yes | +1 For Uniform scale sets, the `GET VMSS` response will have a reference to the *identity*, *clientID*, and *principalID*. For Flexible scale sets, the response will only get a reference the *identity*. You can make a call to `Identity` to get the *clientID* and *PrincipalID*. ### Autoscaling and instance orchestration diff --git a/articles/virtual-machines/fsv2-series.md b/articles/virtual-machines/fsv2-series.md index 1151a2837c463..6c820913fcf35 100644 --- a/articles/virtual-machines/fsv2-series.md +++ b/articles/virtual-machines/fsv2-series.md @@ -1,12 +1,12 @@ --- title: Fsv2-series description: Specifications for the Fsv2-series VMs. -author: brbell +author: priyashan-19 ms.service: virtual-machines -ms.subservice: vm-sizes-compute +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: priyashan --- # Fsv2-series diff --git a/articles/virtual-machines/fx-series.md b/articles/virtual-machines/fx-series.md index b08d30528000e..c554012e8f5ef 100644 --- a/articles/virtual-machines/fx-series.md +++ b/articles/virtual-machines/fx-series.md @@ -1,12 +1,12 @@ --- title: FX-series description: Specifications for the FX-series VMs. -author: brbell +author: priyashan-19 ms.service: virtual-machines -ms.subservice: vm-sizes-compute +ms.subservice: sizes ms.topic: conceptual ms.date: 06/10/2021 -ms.author: jushiman +ms.author: priyashan --- # FX-series diff --git a/articles/virtual-machines/generalize.md b/articles/virtual-machines/generalize.md index b6fb370cfed39..830e207cc2956 100644 --- a/articles/virtual-machines/generalize.md +++ b/articles/virtual-machines/generalize.md @@ -87,17 +87,15 @@ To generalize your Windows VM, follow these steps: 2. Open a Command Prompt window as an administrator. -3. Delete the panther directory (C:\Windows\Panther). Then change the directory to %windir%\system32\sysprep, and then run `sysprep.exe`. - -4. In the **System Preparation Tool** dialog box, select **Enter System Out-of-Box Experience (OOBE)** and select the **Generalize** check box. - -5. For **Shutdown Options**, select **Shutdown**. - -6. Select **OK**. - - :::image type="content" source="windows/media/upload-generalized-managed/sysprepgeneral.png" alt-text="![Start Sysprep](./media/upload-generalized-managed/sysprepgeneral.png)"::: +3. Delete the panther directory (C:\Windows\Panther). + +5. Then change the directory to %windir%\system32\sysprep, and then run: + ``` + sysprep /generalize /shutdown /mode:vm + ``` +6. The VM will shut down when Sysprep is finished generalizing the VM. Do not restart the VM. + -6. When Sysprep completes, it shuts down the VM. Do not restart the VM. > [!TIP] > **Optional** Use [DISM](/windows-hardware/manufacture/desktop/dism-optimize-image-command-line-options) to optimize your image and reduce your VM's first boot time. diff --git a/articles/virtual-machines/generation-2.md b/articles/virtual-machines/generation-2.md index 3503374573c5c..de13db8713b4f 100644 --- a/articles/virtual-machines/generation-2.md +++ b/articles/virtual-machines/generation-2.md @@ -1,13 +1,13 @@ --- title: Azure support for generation 2 VMs description: Overview of Azure support for generation 2 VMs -author: ju-shim +author: lauradolan ms.service: virtual-machines ms.subservice: sizes ms.workload: infrastructure-services ms.topic: how-to ms.date: 02/26/2021 -ms.author: jushiman +ms.author: ladolan --- # Support for generation 2 VMs on Azure diff --git a/articles/virtual-machines/h-series-retirement.md b/articles/virtual-machines/h-series-retirement.md index 5e04494dd679d..41e2d828e0fe6 100644 --- a/articles/virtual-machines/h-series-retirement.md +++ b/articles/virtual-machines/h-series-retirement.md @@ -2,7 +2,7 @@ title: H-series retirement description: H-series retirement started September 1, 2021. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 08/02/2021 --- diff --git a/articles/virtual-machines/h-series.md b/articles/virtual-machines/h-series.md index 9faf367c78d24..d2f2e13f335f2 100644 --- a/articles/virtual-machines/h-series.md +++ b/articles/virtual-machines/h-series.md @@ -2,7 +2,7 @@ title: H-series - Azure Virtual Machines description: Specifications for the H-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 09/11/2021 ms.reviewer: jushiman diff --git a/articles/virtual-machines/hb-series-retirement.md b/articles/virtual-machines/hb-series-retirement.md index 1ec025cbbe062..be9df3b8b3564 100644 --- a/articles/virtual-machines/hb-series-retirement.md +++ b/articles/virtual-machines/hb-series-retirement.md @@ -2,7 +2,7 @@ title: HB-series retirement description: HB-series retirement started September 1, 2021. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 08/02/2021 --- diff --git a/articles/virtual-machines/hb-series.md b/articles/virtual-machines/hb-series.md index 8bacd2cade149..e30185f76b341 100644 --- a/articles/virtual-machines/hb-series.md +++ b/articles/virtual-machines/hb-series.md @@ -2,7 +2,7 @@ title: HB-series description: Specifications for the HB-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 03/22/2021 ms.reviewer: jushiman diff --git a/articles/virtual-machines/hbv2-series.md b/articles/virtual-machines/hbv2-series.md index 82b6703cbb078..049ac426bc0c1 100644 --- a/articles/virtual-machines/hbv2-series.md +++ b/articles/virtual-machines/hbv2-series.md @@ -2,7 +2,7 @@ title: HBv2-series - Azure Virtual Machines description: Specifications for the HBv2-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 03/08/2021 ms.reviewer: jushiman diff --git a/articles/virtual-machines/hbv3-series.md b/articles/virtual-machines/hbv3-series.md index 6b7ec71b761d3..fb7f7cdef303c 100644 --- a/articles/virtual-machines/hbv3-series.md +++ b/articles/virtual-machines/hbv3-series.md @@ -2,7 +2,7 @@ title: HBv3-series - Azure Virtual Machines description: Specifications for the HBv3-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 01/10/2022 ms.reviewer: cynthn diff --git a/articles/virtual-machines/hc-series.md b/articles/virtual-machines/hc-series.md index 9fed4816fab3e..30fbe4aca3c48 100644 --- a/articles/virtual-machines/hc-series.md +++ b/articles/virtual-machines/hc-series.md @@ -2,7 +2,7 @@ title: HC-series - Azure Virtual Machines description: Specifications for the HC-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.date: 03/05/2021 ms.reviewer: jushiman diff --git a/articles/virtual-machines/image-builder-api-update-release-notes.md b/articles/virtual-machines/image-builder-api-update-release-notes.md index e21f9e1e12071..8d2e37c3ee9be 100644 --- a/articles/virtual-machines/image-builder-api-update-release-notes.md +++ b/articles/virtual-machines/image-builder-api-update-release-notes.md @@ -22,7 +22,14 @@ This document contains all major API changes and feature updates for the Azure I ## API Releases +### 2022-02-14 +**Improvements**: +- [Validation Support](https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json#properties-validate) + - Shell (Linux) - Script or Inline + - PowerShell (Windows) - Script or Inline, run elevated, run as system + - Source-Validation-Only mode +- [Customized staging resource group support](https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json#properties-stagingresourcegroup) ### 2021-10-01 @@ -80,7 +87,7 @@ For API versions 2021-10-01 and newer, the error output will look like the follo - Added support for customers to use their own VNet. - Added support for customers to customize the build VM (VM size, OS disk size). - Added support for user assigned MSI (for customize/distribute steps). - - Added support for [Gen2 images.](image-builder-overview.md#hyper-v-generation). + - Added support for [Gen2 images.](image-builder-overview.md#hyper-v-generation) ### Preview APIs diff --git a/articles/virtual-machines/image-builder-overview.md b/articles/virtual-machines/image-builder-overview.md index 690e6aa32a26c..99f7db1fe49fe 100644 --- a/articles/virtual-machines/image-builder-overview.md +++ b/articles/virtual-machines/image-builder-overview.md @@ -35,7 +35,7 @@ While it is possible to create custom VM images by hand or by other tools, the p ### Infrastructure As Code - There is no need to manage long-term infrastructure (*like Storage Accounts to hold customization data*) or transient infrastructure (*like temporary Virtual Machine to build the image*). -- Image Builder stores your VM image build specification and customization artifacts as Azure resources removing the need of maintaining offline definitions and the risk of environment drifts caused by accidental deletions or updates. +- Image Builder stores your VM image build artifacts as Azure resources which removes the need to maintain offline definitions and the risk of environment drifts caused by accidental deletions or updates. ### Security @@ -43,7 +43,7 @@ While it is possible to create custom VM images by hand or by other tools, the p - You do not have to make your customization artifacts publicly accessible for Image Builder to be able to fetch them. Image Builder can use your [Azure Managed Identity](../active-directory/managed-identities-azure-resources/overview.md) to fetch these resources and you can restrict the privileges of this identity as tightly as required using Azure-RBAC. This not only means you can keep your artifacts secret, but they also cannot be tampered with by unauthorized actors. - Copies of customization artifacts, transient compute & storage resources, and resulting images are all stored securely within your subscription with access controlled by Azure-RBAC. This includes the build VM used to create the customized image and ensuring your customization scripts and files are not being copied to an unknown VM in an unknown subscription. Furthermore, you can achieve a high degree of isolation from other customers’ workloads using [Isolated VM offerings](./isolation.md) for the build VM. - You can connect Image Builder to your existing virtual networks so you can communicate with existing configuration servers (DSC, Chef, Puppet, etc.), file shares, or any other routable servers & services. -- You can configure Image Builder to assign your User Assigned Identities to the Image Builder Build VM (*that is created by the Image Builder service in your subscription and is used to build and customize the image*). You can then use these identities at customization time to access Azure resources, including secrets, in your subscription. There is no need to assign Image Builder direct access to those resources. +- You can configure Image Builder to assign your User Assigned Identities to the Image Builder Build VM. The Image Builder Build VM is created by the Image Builder service in your subscription and is used to build and customize the image. You can then use these identities at customization time to access Azure resources, including secrets, in your subscription. There is no need to assign Image Builder direct access to those resources. ## Regions @@ -81,6 +81,16 @@ The Azure Image Builder Service is available in the following regions: regions. - East Asia - Korea Central - South Africa North +- USGov Arizona (Public Preview) +- USGov Virginia (Public Preview) + +> [!IMPORTANT] +> Register the feature "Microsoft.VirtualMachineImages/FairfaxPublicPreview" to access the Azure Image Builder public preview in Fairfax regions (USGov Arizona and USGov Virginia). + +Use the following command to register the feature for Azure Image Builder in Fairfax regions (USGov Arizona and USGov Virginia). +```azurecli-interactive +az feature register --namespace Microsoft.VirtualMachineImages --name FairfaxPublicPreview +``` ## OS support @@ -94,6 +104,7 @@ Azure Image Builder will support Azure Marketplace base OS images: - Windows 10 RS5 Enterprise/Enterprise multi-session/Professional - Windows 2016 - Windows 2019 +- CBL-Mariner >[!IMPORTANT] > Listed operating systems have been tested and now work with Azure Image Builder. However, Azure Image Builder should work with any Linux or Windows image in the marketplace. diff --git a/articles/virtual-machines/image-version.md b/articles/virtual-machines/image-version.md index 264777b0f8499..a4c83ecdf0cae 100644 --- a/articles/virtual-machines/image-version.md +++ b/articles/virtual-machines/image-version.md @@ -17,7 +17,7 @@ ms.custom: # Create an image definition and an image version -A [Azure Compute Gallery](shared-image-galleries.md) (formerly known as Shared Image Gallery)simplifies custom image sharing across your organization. Custom images are like marketplace images, but you create them yourself. Images can be created from a VM, VHD, snapshot, managed image, or another image version. +A [Azure Compute Gallery](shared-image-galleries.md) (formerly known as Shared Image Gallery) simplifies custom image sharing across your organization. Custom images are like marketplace images, but you create them yourself. Images can be created from a VM, VHD, snapshot, managed image, or another image version. The Azure Compute Gallery lets you share your custom VM images with others in your organization, within or across regions, within an Azure AD tenant, or publicly using a [community gallery (preview)](azure-compute-gallery.md#community). Choose which images you want to share, which regions you want to make them available in, and who you want to share them with. You can create multiple galleries so that you can logically group images. diff --git a/articles/virtual-machines/lasv3-series.md b/articles/virtual-machines/lasv3-series.md new file mode 100644 index 0000000000000..f26b44d992697 --- /dev/null +++ b/articles/virtual-machines/lasv3-series.md @@ -0,0 +1,68 @@ +--- +title: Lasv3-series - Azure Virtual Machines +description: Specifications for the Lasv3-series of Azure Virtual Machines (Azure VMs). +author: sasha-melamed +ms.service: virtual-machines +ms.subservice: sizes +ms.topic: conceptual +ms.date: 06/01/2022 +ms.author: sasham +--- + +# Lasv3-series + +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets + +The Lasv3-series of Azure Virtual Machines (Azure VMs) features high-throughput, low latency, directly mapped local NVMe storage. These VMs run on an AMD 3rd Generation EPYC™ 7763v processor in a multi-threaded configuration with an L3 cache of up to 256 MB that can achieve a boosted maximum frequency of 3.5 GHz. The Lasv3-series VMs are available in sizes from 8 to 80 vCPUs in a simultaneous multi-threading configuration. There are 8 GiB of memory per vCPU, and one 1.92 TB NVMe SSD device per 8 vCPUs, with up to 19.2 TB (10x1.92TB) available on the L80as_v3 size. + +> [!NOTE] +> The Lasv3-series VMs are optimized to use the local disk on the node attached directly to the VM rather than using [durable data disks](disks-types.md). This method allows for greater IOPS and throughput for your workloads. The Lsv3, Lasv3, Lsv2, and Ls-series don't support the creation of a local cache to increase the IOPS achievable by durable data disks. +> +> The high throughput and IOPS of the local disk makes the Lasv3-series VMs ideal for NoSQL stores such as Apache Cassandra and MongoDB. These stores replicate data across multiple VMs to achieve persistence in the event of the failure of a single VM. +> +> To learn more, see how optimize performance on Lasv3-series [Windows-based VMs](../virtual-machines/windows/storage-performance.md) or [Linux-based VMs](../virtual-machines/linux/storage-performance.md). + +- [Premium Storage](premium-storage-performance.md): Supported +- [Premium Storage caching](premium-storage-performance.md): Not Supported +- [Live Migration](maintenance-and-updates.md): Not Supported +- [Memory Preserving Updates](maintenance-and-updates.md): Supported +- [VM Generation Support](generation-2.md): Generation 1 and 2 +- [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported +- [Ephemeral OS Disks](ephemeral-os-disks.md): Supported +- [Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Supported + +| Size | vCPU | Memory (GiB) | Temp disk (GiB) | NVMe Disks | NVMe Disk throughput (Read IOPS/MBps) | Uncached data disk throughput (IOPS/MBps) | Max burst uncached data disk throughput (IOPS/MBps)| Max Data Disks | Max NICs | Expected network bandwidth (Mbps) | +|---|---|---|---|---|---|---|---|---|---|---| +| Standard_L8as_v3 | 8 | 64 | 80 | 1x1.92 TB | 400000/2000 | 12800/200 | 20000/1280 | 16 | 4 | 12500 | +| Standard_L16as_v3 | 16 | 128 | 160 | 2x1.92 TB | 800000/4000 | 25600/384 | 40000/1280 | 32 | 8 | 12500 | +| Standard_L32as_v3 | 32 | 256 | 320 | 4x1.92 TB | 1.5M/8000 | 51200/768 | 80000/1600 | 32 | 8 | 16000 | +| Standard_L48as_v3 | 48 | 384 | 480 | 6x1.92 TB | 2.2M/14000 | 76800/1152 | 80000/2000 | 32 | 8 | 24000 | +| Standard_L64as_v3 | 64 | 512 | 640 | 8x1.92 TB | 2.9M/16000 | 80000/1280 | 80000/2000 | 32 | 8 | 32000 | +| Standard_L80as_v3 | 80 | 640 | 800 | 10x1.92TB | 3.8M/20000 | 80000/1400 | 80000/2000 | 32 | 8 | 32000 | + +1. **Temp disk**: Lasv3-series VMs have a standard SCSI-based temp resource disk for use by the OS paging or swap file (`D:` on Windows, `/dev/sdb` on Linux). This disk provides 80 GiB of storage, 4000 IOPS, and 80 MBps transfer rate for every 8 vCPUs. For example, Standard_L80as_v3 provides 800 GiB at 40000 IOPS and 800 MBPS. This configuration ensures that the NVMe drives can be fully dedicated to application use. This disk is ephemeral, and all data is lost on stop or deallocation. +1. **NVMe Disks**: NVMe disk throughput can go higher than the specified numbers. However, higher performance isn't guaranteed. Local NVMe disks are ephemeral. Data is lost on these disks if you stop or deallocate your VM. Local NVMe disks aren't encrypted by [Azure Storage encryption](disk-encryption.md), even if you enable [encryption at host](disk-encryption.md#supported-vm-sizes). +1. **NVMe Disk throughput**: Hyper-V NVMe Direct technology provides unthrottled access to local NVMe drives mapped securely into the guest VM space. Lasv3 NVMe disk throughput can go higher than the specified numbers, but higher performance isn't guaranteed. To achieve maximum performance, see how to optimize performance on Lasv3-series [Windows-based VMs](../virtual-machines/windows/storage-performance.md) or [Linux-based VMs](../virtual-machines/linux/storage-performance.md). Read/write performance varies based on IO size, drive load, and capacity utilization. +1. **Max burst uncached data disk throughput**: Lasv3-series VMs can [burst their disk performance](./disk-bursting.md) for up to 30 minutes at a time. + +> [!NOTE] +> Lasv3-series VMs don't provide a host cache for the data disk because this configuration doesn't benefit the Lasv3 workloads. + +[!INCLUDE [virtual-machines-common-sizes-table-defs](../../includes/virtual-machines-common-sizes-table-defs.md)] + +## Other sizes and information + +- [General purpose](sizes-general.md) +- [Memory optimized](sizes-memory.md) +- [Storage optimized](sizes-storage.md) +- [GPU optimized](sizes-gpu.md) +- [High performance compute](sizes-hpc.md) +- [Previous generations](sizes-previous-gen.md) + +Pricing Calculator: [Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) + +More information on Disks Types: [Disk Types](./disks-types.md#ultra-disks) + +## Next steps + +Learn more about how [Azure compute units (ACU)](acu.md) can help you compare compute performance across Azure SKUs. diff --git a/articles/virtual-machines/linux-vm-connect.md b/articles/virtual-machines/linux-vm-connect.md index 6473f092ab114..4b1d578e71fee 100644 --- a/articles/virtual-machines/linux-vm-connect.md +++ b/articles/virtual-machines/linux-vm-connect.md @@ -14,13 +14,13 @@ ms.author: cynthn In Azure there are multiple ways to connect to a Linux virtual machine. The most common practice for connecting to a Linux VM is using the Secure Shell Protocol (SSH). This is done via any standard SSH aware client commonly found in Linux; on Windows you can use [Windows Sub System for Linux](/windows/wsl/about) or any local terminal. You can also use [Azure Cloud Shell](../cloud-shell/overview.md) from any browser. -This document describes how to connect, via SSH, to a VM that has a public IP. If you need to connect to a VM without a public IP see [Azure Bastion Service](../bastion/bastion-overview.md) +This document describes how to connect, via SSH, to a VM that has a public IP. If you need to connect to a VM without a public IP, see [Azure Bastion Service](../bastion/bastion-overview.md). ## Prerequisites -- You need an SSH key pair. If you don't already have one Azure will create a key pair during the deployment process. If you need help with creating one manually, see [Create and use an SSH public-private key pair for Linux VMs in Azure](./linux/mac-create-ssh-keys.md). - -- In order to connect to a Linux Virtual Machine you need the appropriate port open: normally this will be port 22. The following instructions assume port 22 but the process is the same for other port numbers. You can validate an appropriate port is open for SSH using the troubleshooter or by checking manually in your VM settings. To check if port 22 is open: +- You need an SSH key pair. If you don't already have one, Azure will create a key pair during the deployment process. If you need help with creating one manually, see [Create and use an SSH public-private key pair for Linux VMs in Azure](./linux/mac-create-ssh-keys.md). +- You need an existing Network Security Group (NSG). Most VMs will have an NSG by default, but if you don't already have one you can create one and attach it manually. For more information, see [Create, change, or delete a network security group](../virtual-network/manage-network-security-group.md). +- To connect to a Linux VM, you need the appropriate port open. Typically this will be port 22. The following instructions assume port 22 but the process is the same for other port numbers. You can validate an appropriate port is open for SSH using the troubleshooter or by checking manually in your VM settings. To check if port 22 is open: 1. On the page for the VM, select **Networking** from the left menu. 1. On the **Networking** page, check to see if there is a rule which allows TCP on port 22 from the IP address of the computer you are using to connect to the VM. If the rule exists, you can move to the next section. @@ -28,13 +28,13 @@ This document describes how to connect, via SSH, to a VM that has a public IP. I :::image type="content" source="media/linux-vm-connect/check-rule.png" alt-text="Screenshot showing how to check to see if there is already a rule allowing S S H connections."::: 1. If there isn't a rule, add one by selecting **Add inbound port rule**. - 1. From the **Service** dropdown select **SSH**. + 1. For **Service**, select **SSH** from the dropdown. :::image type="content" source="media/linux-vm-connect/create-rule.png" alt-text="Screenshot showing where to choose S S H when creating a new N S G rule."::: 1. Edit **Priority** and **Source** if necessary 1. For **Name**, type *SSH*. - 1. When you are done, select **Add**. + 1. When you're done, select **Add**. 1. You should now have an SSH rule in the table of inbound port rules. - Your VM must have a public IP address. To check if your VM has a public IP address, select **Overview** from the left menu and look at the **Networking** section. If you see an IP address next to **Public IP address**, then your VM has a public IP @@ -52,10 +52,10 @@ This document describes how to connect, via SSH, to a VM that has a public IP. I ## Connect to the VM -Once the above prerequisites are met, you are ready to connect to your VM. Open your SSH client of choice. +Once the above prerequisites are met, you're ready to connect to your VM. Open your SSH client of choice. -- If you are using Linux or macOS this is most commonly terminal or shell. +- If you're using Linux or macOS, the SSH client is usually terminal or shell. - For a Windows machine this might be [WSL](/windows/wsl/about), or any local terminal like [PowerShell](/powershell/scripting/overview). If you do not have an SSH client you can [install WSL](/windows/wsl/install), or consider using [Azure Cloud Shell](../cloud-shell/overview.md). > [!NOTE] @@ -64,15 +64,18 @@ Once the above prerequisites are met, you are ready to connect to your VM. Open ## [WSL, macOS, or native Linux client](#tab/Linux) ### SSH with a new key pair -1. Ensure your public and private keys are in the correct directory. This is usually the ~/.ssh directory. +1. Ensure your public and private keys are in the correct directory. The directory is usually `~/.ssh`. If you generated keys manually or generated them with the CLI, then the keys are probably already there. However, if you downloaded them in pem format from the Azure portal, you may need to move them to the right location. This can be done with the following syntax: `mv PRIVATE_KEY_SOURCE PRIVATE_KEY_DESTINATION` For example, if the key is in the `Downloads` folder, and `myKey.pem` is the name of your SSH key, type: ```bash mv /Downloads/myKey.pem ~/.ssh - ``` -2. Ensure you have read-only access to the private Key by running + ``` + > [!NOTE] + > If you're using WSL, local files are found in the `mnt/c/` directory. Accordingly, the path to the downloads folder and SSH key would be `/mnt/c/Users/{USERNAME}/Downloads/myKey.pem` + +2. Ensure you have read-only access to the private key by running ```bash chmod 400 ~/.ssh/myKey.pem ``` @@ -84,12 +87,12 @@ Once the above prerequisites are met, you are ready to connect to your VM. Open ``` 4. Validate the returned fingerprint. - If you have never connected to this VM before you will be asked to verify the hosts fingerprint. It is tempting to simply accept the fingerprint presented, however, this exposes you to a potential person in the middle attack. You should always validate the hosts fingerprint. You only need to do this on the first time you connect from a client. To obtain the host fingerprint via the portal, use the Run Command feature to execute the command: + If you have never connected to this VM before, you'll be asked to verify the hosts fingerprint. It's tempting to simply accept the fingerprint presented, but that exposes you to a potential person in the middle attack. You should always validate the hosts fingerprint. You only need to do this the first time you connect from a client. To get the host fingerprint via the portal, use the Run Command feature to execute the command: ```bash ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub | awk '{print $2}' ``` -5. Success! You should now be connected to your VM. If you are unable to connect, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). +5. Success! You should now be connected to your VM. If you're unable to connect, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). ### SSH With existing public key 1. Run the following command in your SSH client. In this example, *20.51.230.13* is the public IP Address of your VM and *azureuser* is the username you created when you created the VM. @@ -104,7 +107,7 @@ Once the above prerequisites are met, you are ready to connect to your VM. Open ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub | awk '{print $2}' ``` -3. Success! You should now be connected to your VM. If you are unable to connect, see our troubleshooting guide [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). +3. Success! You should now be connected to your VM. If you're unable to connect, see our troubleshooting guide [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). ### Password authentication @@ -126,7 +129,7 @@ Once the above prerequisites are met, you are ready to connect to your VM. Open ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub | awk '{print $2}' ``` -3. Success! You should now be connected to your VM. If you are unable to connect using the correct method above, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). +3. Success! You should now be connected to your VM. If you're unable to connect using the correct method above, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). ## [Windows 10 Command Line (cmd.exe, PowerShell etc.)](#tab/Windows) @@ -146,7 +149,7 @@ Once the above prerequisites are met, you are ready to connect to your VM. Open ```bash ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub | awk '{print $2}' ``` -4. Success! You should now be connected to your VM. If you are unable to connect, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). +4. Success! You should now be connected to your VM. If you're unable to connect, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). ### Password authentication @@ -169,7 +172,7 @@ Once the above prerequisites are met, you are ready to connect to your VM. Open ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub | awk '{print $2}' ``` -3. Success! You should now be connected to your VM. If you are unable to connect using the methods above, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). +3. Success! You should now be connected to your VM. If you're unable to connect using the methods above, see [Troubleshoot SSH connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection). --- diff --git a/articles/virtual-machines/linux/add-disk.md b/articles/virtual-machines/linux/add-disk.md index 11de59262ebd8..ed2b66f7676cb 100644 --- a/articles/virtual-machines/linux/add-disk.md +++ b/articles/virtual-machines/linux/add-disk.md @@ -1,22 +1,21 @@ --- title: Add a data disk to Linux VM using the Azure CLI description: Learn to add a persistent data disk to your Linux VM with the Azure CLI -author: cynthn +author: roygara ms.service: virtual-machines ms.subservice: disks ms.collection: linux ms.topic: how-to -ms.date: 05/12/2021 -ms.author: cynthn - +ms.date: 06/08/2022 +ms.author: rogarana --- + # Add a disk to a Linux VM **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Flexible scale sets This article shows you how to attach a persistent disk to your VM so that you can preserve your data - even if your VM is reprovisioned due to maintenance or resizing. - ## Attach a new disk to a VM If you want to add a new, empty data disk on your VM, use the [az vm disk attach](/cli/azure/vm/disk) command with the `--new` parameter. If your VM is in an Availability Zone, the disk is automatically created in the same zone as the VM. For more information, see [Overview of Availability Zones](../../availability-zones/az-overview.md). The following example creates a disk named *myDataDisk* that is 50 Gb in size: @@ -30,6 +29,12 @@ az vm disk attach \ --size-gb 50 ``` +### Lower latency + +In select regions, the disk attach latency has been reduced, so you'll see an improvement of up to 15%. This is useful if you have planned/unplanned failovers between VMs, you're scaling your workload, or are running a high scale stateful workload such as Azure Kubernetes Service. However, this improvement is limited to the explicit disk attach command, `az vm disk attach`. You won't see the performance improvement if you call a command that may implicitly perform an attach, like `az vm update`. You don't need to take any action other than calling the explicit attach command to see this improvement. + +[!INCLUDE [virtual-machines-disks-fast-attach-detach-regions](../../../includes/virtual-machines-disks-fast-attach-detach-regions.md)] + ## Attach an existing disk To attach an existing disk, find the disk ID and pass the ID to the [az vm disk attach](/cli/azure/vm/disk) command. The following example queries for a disk named *myDataDisk* in *myResourceGroup*, then attaches it to the VM named *myVM*: diff --git a/articles/virtual-machines/linux/azure-hybrid-benefit-byos-linux.md b/articles/virtual-machines/linux/azure-hybrid-benefit-byos-linux.md index 64b95bc7101b9..3f8dd99e71794 100644 --- a/articles/virtual-machines/linux/azure-hybrid-benefit-byos-linux.md +++ b/articles/virtual-machines/linux/azure-hybrid-benefit-byos-linux.md @@ -17,28 +17,28 @@ ms.author: mathapli # How Azure Hybrid Benefit for BYOS VMs (AHB BYOS) applies for Linux virtual machines >[!IMPORTANT] ->The below article is scoped to Azure Hybrid Benefit for BYOS VMs (AHB BYOS) which caters to conversion of custom on-prem image VMs and RHEL or SLES BYOS VMs. For conversion of RHEL PAYG or SLES PAYG VMs, refer to [Azure Hybrid Benefit for PAYG VMs here](./azure-hybrid-benefit-linux.md). +>The below article is scoped to Azure Hybrid Benefit for BYOS VMs (AHB BYOS) which caters to conversion of custom image VMs and RHEL or SLES BYOS VMs. For conversion of RHEL PAYG or SLES PAYG VMs, refer to [Azure Hybrid Benefit for PAYG VMs here](./azure-hybrid-benefit-linux.md). >[!NOTE] ->Azure Hybrid Benefit for BYOS VMs is in Preview now. You can [sign up for the preview here.](https://aka.ms/ahb-linux-form) You will receive a mail from Microsoft once your subscriptions are enabled for Preview. +>Azure Hybrid Benefit for BYOS VMs is in Public Preview now. You can start using the capability on Azure by following steps provided in the [section below](#get-started). -Azure Hybrid Benefit for BYOS VMs is a licensing benefit that helps you to get software updates and integrated support for Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES) virtual machines (VMs) directly from Azure infrastructure. This benefit is available to RHEL and SLES custom on-prem image VMs (VMs generated from on-prem images), and to RHEL and SLES Marketplace bring-your-own-subscription (BYOS) VMs. +Azure Hybrid Benefit for BYOS VMs is a licensing benefit that helps you to get software updates and integrated support for Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES) virtual machines (VMs) directly from Azure infrastructure. This benefit is available to RHEL and SLES custom image VMs (VMs generated from on-premises images), and to RHEL and SLES Marketplace bring-your-own-subscription (BYOS) VMs. ## Benefit description -Before AHB BYOS, RHEL and SLES customers who migrated their on-prem machines to Azure by creating images of on-prem systems and migrating them as VMs on Azure did not have the flexibility to get software updates directly from Azure similar to Marketplace PAYG VMs. Hence, you needed to still buy cloud access licenses from the Enterprise Linux distributors to get security support as well as software updates. With Azure Hybrid Benefit for BYOS VMs, we will allow you to get software updates and support for on-prem custom image VMs as well as RHEL and SLES BYOS VMs similar to PAYG VMs by paying the same software fees as charged to PAYG VMs. In addition, these conversions can happen without any redeployment, so you can avoid any downtime risk. + Azure Hybrid Benefit for BYOS VMs allows you to get software updates and integrated support for Marketplace BYOS or on-premises migrated RHEL and SLES BYOS VMs without reboot. This benefit converts bring-your-own-subscription BYOS) billing model to pay-as-you-go (PAYG) billing model and you pay the same software fees as charged to PAYG VMs. :::image type="content" source="./media/ahb-linux/azure-hybrid-benefit-byos-cost.png" alt-text="Azure Hybrid Benefit cost visualization on Linux VMs."::: -After you enable the AHB for BYOS VMs benefit on RHEL or SLES VM, you will be charged for the additional software fee typically incurred on a PAYG VM and you will also start getting software updates typically provided to a PAYG VM. +After you enable the AHB for BYOS VMs benefit on RHEL or SLES VM,you'll be charged for the software fee typically incurred on a PAYG VM and you'll also start getting software updates typically provided to a PAYG VM. -You can also choose to convert a VM that has had the benefit enabled on it back to a BYOS billing model which will stop software billing and software updates from Azure infrastructure. +You can also choose to convert a VM that has had the benefit enabled on it back to a BYOS billing model, which will stop software billing and software updates from Azure infrastructure. ## Scope of Azure Hybrid Benefit for BYOS VMs eligibility for Linux VMs -**Azure Hybrid Benefit for BYOS VMs** is available for all RHEL and SLES custom on-prem image VMs as well as RHEL and SLES Marketplace BYOS VMs. For RHEL and SLES PAYG Marketplace VMs, [refer to AHB for PAYG VMs here](./azure-hybrid-benefit-linux.md) +**Azure Hybrid Benefit for BYOS VMs** is available for all RHEL and SLES custom image VMs as well as RHEL and SLES Marketplace BYOS VMs. For RHEL and SLES PAYG Marketplace VMs, [refer to AHB for PAYG VMs here](./azure-hybrid-benefit-linux.md) -Azure Dedicated Host instances, and SQL hybrid benefits are not eligible for Azure Hybrid Benefit for BYOS VMs if you're already using the benefit with Linux VMs. Virtual Machine Scale Sets (VMSS) are Reserved Instances (RIs) are not in scope for AHB BYOS. +Azure Dedicated Host instances, and SQL hybrid benefits aren't eligible for Azure Hybrid Benefit for BYOS VMs if you're already using the benefit with Linux VMs. Virtual Machine Scale Sets are Reserved Instances (RIs) aren't in scope for AHB BYOS. ## Get started @@ -46,57 +46,57 @@ Azure Dedicated Host instances, and SQL hybrid benefits are not eligible for Azu To start using the benefit for Red Hat: -1. Install the 'AHBForRHEL' extension on the virtual machine on which you wish to apply the AHB BYOS benefit. This is a prerequisite before moving to next step. You can do this via the portal or use Azure CLI. +1. Install the 'AHBForRHEL' extension on the virtual machine on which you wish to apply the AHB BYOS benefit. You can do this installation via Azure CLI or Powershell. 1. Depending on the software updates you want, change the license type to relevant value. Here are the available license type values and the software updates associated with them: | License Type | Software Updates | Allowed VMs| |---|---|---| - | RHEL_BASE | Installs Red Hat regular/base repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom on-prem image VMs| - | RHEL_EUS | Installs Red Hat Extended Update Support (EUS) repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom on-prem image VMs| - | RHEL_SAPAPPS | Installs RHEL for SAP Business Apps repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom on-prem image VMs| - | RHEL_SAPHA | Installs RHEL for SAP with HA repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom on-prem image VMs| - | RHEL_BASESAPAPPS | Installs RHEL regular/base SAP Business Apps repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom on-prem image VMs| - | RHEL_BASESAPHA | Installs regular/base RHEL for SAP with HA repositories into your virtual machine.| RHEL BYOS VMs, RHEL custom on-prem image VMs| + | RHEL_BASE | Installs Red Hat regular/base repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom image VMs| + | RHEL_EUS | Installs Red Hat Extended Update Support (EUS) repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom image VMs| + | RHEL_SAPAPPS | Installs RHEL for SAP Business Apps repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom image VMs| + | RHEL_SAPHA | Installs RHEL for SAP with HA repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom image VMs| + | RHEL_BASESAPAPPS | Installs RHEL regular/base SAP Business Apps repositories into your virtual machine. | RHEL BYOS VMs, RHEL custom image VMs| + | RHEL_BASESAPHA | Installs regular/base RHEL for SAP with HA repositories into your virtual machine.| RHEL BYOS VMs, RHEL custom image VMs| 1. Wait for one hour for the extension to read the license type value and install the repositories. 1. You should now be connected to Azure Red Hat Update Infrastructure and the relevant repositories will be installed in your machine. -1. In case the extension is not running by itself, you can run it on demand as well. +1. In case the extension isn't running by itself, you can run it on demand as well. -1. In case you want to switch back to the bring-your-own-subscription model, just change the license type to 'None' and run the extension. This will remove all RHUI repositories from your virtual machine and stop the billing. +1. In case you want to switch back to the bring-your-own-subscription model, just change the license type to 'None' and run the extension. This action will remove all RHUI repositories from your virtual machine and stop the billing. >[!Note] -> In the unlikely event that extension is not able to install repositories or there are any issues, please change the license type back to empty and reach out to support for help. This will ensure you are not getting billed for software updates. +> In the unlikely event that extension isn't able to install repositories or there are any issues, please change the license type back to empty and reach out to support for help. This will ensure you aren't getting billed for software updates. ### SUSE customers -To start using the benefit for SUSE: +To start using the benefit for SLES VMs: -1. Install the Azure Hybrid Benefit for BYOS VMs extension on the virtual machine on which you wish to apply the AHB BYOS benefit. This is a prerequisite before moving to next step. +1. Install the Azure Hybrid Benefit for BYOS VMs extension on the virtual machine on which you wish to apply the AHB BYOS benefit. 1. Depending on the software updates you want, change the license type to relevant value. Here are the available license type values and the software updates associated with them: | License Type | Software Updates | Allowed VMs| |---|---|---| - | SLES | Installs SLES standard repositories into your virtual machine. | SLES BYOS VMs, SLES custom on-prem image VMs| - | SLES_SAP | Installs SLES SAP repositories into your virtual machine. | SLES SAP BYOS VMs, SLES custom on-prem image VMs| - | SLES_HPC | Installs SLES High Performance Compute related repositories into your virtual machine. | SLES HPC BYOS VMs, SLES custom on-prem image VMs| + | SLES | Installs SLES standard repositories into your virtual machine. | SLES BYOS VMs, SLES custom image VMs| + | SLES_SAP | Installs SLES SAP repositories into your virtual machine. | SLES SAP BYOS VMs, SLES custom image VMs| + | SLES_HPC | Installs SLES High Performance Compute related repositories into your virtual machine. | SLES HPC BYOS VMs, SLES custom image VMs| 1. Wait for 5 minutes for the extension to read the license type value and install the repositories. 1. You should now be connected to the SUSE Public Cloud Update Infrastructure on Azure and the relevant repositories will be installed in your machine. -1. In case the extension is not running by itself, you can run it on demand as well. +1. In case the extension isn't running by itself, you can run it on demand as well. -1. In case you want to switch back to the bring-your-own-subscription model, just change the license type to 'None' and run the extension. This will remove all repositories from your virtual machine and stop the billing. +1. In case you want to switch back to the bring-your-own-subscription model, just change the license type to 'None' and run the extension. This action will remove all repositories from your virtual machine and stop the billing. ## Enable and disable the benefit for RHEL You can install the `AHBForRHEL` extension to install the extension. After successfully installing the extension, -you can use the `az vm update` command to update existing license type on running VMs. For SLES VMs, run the command and set `--license-type` parameter to one of the following: `RHEL_BASE`, `RHEL_EUS`, `RHEL_SAPHA`, `RHEL_SAPAPPS`, `RHEL_BASESAPAPPS` or `RHEL_BASESAPHA`. +you can use the `az vm update` command to update existing license type on running VMs. For SLES VMs, run the command and set `--license-type` parameter to one of the following license types: `RHEL_BASE`, `RHEL_EUS`, `RHEL_SAPHA`, `RHEL_SAPAPPS`, `RHEL_BASESAPAPPS` or `RHEL_BASESAPHA`. ### CLI example to enable the benefit for RHEL @@ -128,19 +128,23 @@ you can use the `az vm update` command to update existing license type on runnin ``` 1. Wait for 5 minutes for the extension to read the license type value and install the repositories. -1. You should now be connected to Azure Red Hat Update Infrastructure and the relevant repositories will be installed in your machine. You can check the same by performing the command below on your VM which outputs installed repository packages on your VM: +1. You should now be connected to Azure Red Hat Update Infrastructure and the relevant repositories will be installed in your machine. You can validate the same by performing the command below on your VM: ```bash yum repolist ``` - 1. In case the extension is not running by itself, you can try the below command on the VM: + 1. In case the extension isn't running by itself, you can try the below command on the VM: ```bash - systemctl start azure-hybrid-benefit.service + systemctl start azure-hybrid-benefit.service + ``` + 1. You can use the below command in your RHEL VM to get the current status of the service: + ```bash + ahb-service -status ``` ## Enable and disable the benefit for SLES You can install the `AHBForSLES` extension to install the extension. After successfully installing the extension, -you can use the `az vm update` command to update existing license type on running VMs. For SLES VMs, run the command and set `--license-type` parameter to one of the following: `SLES`, `SLES_SAP` or `SLES_HPC`. +you can use the `az vm update` command to update existing license type on running VMs. For SLES VMs, run the command and set `--license-type` parameter to one of the following license types: `SLES_STANDARD`, `SLES_SAP` or `SLES_HPC`. ### CLI example to enable the benefit for SLES 1. Install the Azure Hybrid Benefit extension on running VM using the portal or via Azure CLI using the command below: @@ -162,7 +166,7 @@ you can use the `az vm update` command to update existing license type on runnin ``` 1. Wait for 5 minutes for the extension to read the license type value and install the repositories. -1. You should now be connected to the SUSE Public Cloud Update Infrastructure on Azure and the relevant repositories will be installed in your machine. You can verify this by performing the command below on your VM which list SUSE repositories on your VM: +1. You should now be connected to the SUSE Public Cloud Update Infrastructure on Azure and the relevant repositories will be installed in your machine. You can verify this change by performing the command below on your VM, which lists SUSE repositories on your VM: ```bash zypper repos ``` @@ -196,12 +200,12 @@ Customers who use Azure Hybrid Benefit for BYOS VMs for RHEL agree to the standa ### SUSE -To use Azure Hybrid Benefit for BYOS VMs for your SLES VMs, and for information about moving from SLES PAYG to BYOS or moving from SLES BYOS to PAYG, see [SUSE Linux Enterprise and Azure Hybrid Benefit](https://aka.ms/suse-ahb). +Customers who use Azure Hybrid Benefit for BYOS VMs for SLES and want more for information about moving from SLES PAYG to BYOS or moving from SLES BYOS to PAYG, see [SUSE Linux Enterprise and Azure Hybrid Benefit](https://aka.ms/suse-ahb). ## Frequently asked questions -*Q: What are the additional licensing cost I pay with AHB for BYOS VMs?* +*Q: What is the licensing cost I pay with AHB for BYOS VMs?* -A: On using AHB for BYOS VMs, you will essentially convert your bring your own subscription (BYOS) billing model to pay as you go (PAYG) billing model. Hence, you will be paying similar to PAYG VMs for software subscription cost. The table below maps the PAYG flavors available on Azure and links to pricing page to help you understand the cost associated with AHB for BYOS VMs. +A: On using AHB for BYOS VMs, you'll essentially convert bring-your-own-subscription (BYOS) billing model to pay-as-you-go (PAYG) billing model. Hence, you'll be paying similar to PAYG VMs for software subscription cost. The table below maps the PAYG flavors available on Azure and links to pricing page to help you understand the cost associated with AHB for BYOS VMs. | License type | Relevant PAYG VM image & Pricing Link (Keep the AHB for PAYG filter off) | |---|---|---| @@ -225,7 +229,7 @@ A: RHEL versions greater than 7.4 are supported with AHB for BYOS VMs. *Q: I've uploaded my own RHEL or SLES image from on-premises (via Azure Migrate, Azure Site Recovery, or otherwise) to Azure. Can I convert the billing on these images from BYOS to PAYG?* -A: Yes, this is the capability AHB for BYOS VMs supports. Please [follow steps shared here](#get-started). +A: Yes, this capability supports image from on-premises to Azure. Please [follow steps shared here](#get-started). *Q: Can I use Azure Hybrid Benefit for BYOS VMs on RHEL and SLES PAYG Marketplace VMs?* @@ -233,15 +237,15 @@ A: No, as these VMs are already pay-as-you-go (PAYG). However, with AHB v1 and v *Q: Can I use Azure Hybrid Benefit for BYOS VMs on virtual machine scale sets for RHEL and SLES?* -A: No, Azure Hybrid Benefit for BYOS VMs is not available for virtual machine scale sets currently. +A: No, Azure Hybrid Benefit for BYOS VMs isn't available for virtual machine scale sets currently. *Q: Can I use Azure Hybrid Benefit for BYOS VMs on a virtual machine deployed for SQL Server on RHEL images?* -A: No, you can't. There is no plan for supporting these virtual machines. +A: No, you can't. There's no plan for supporting these virtual machines. *Q: Can I use Azure Hybrid Benefit for BYOS VMs on my RHEL Virtual Data Center subscription?* -A: No, you cannot. VDC is not supported on Azure at all, including AHB. +A: No, you can't. VDC isn't supported on Azure at all, including AHB. ## Next steps diff --git a/articles/virtual-machines/linux/create-upload-generic.md b/articles/virtual-machines/linux/create-upload-generic.md index 21401ec3f746d..7be8609ddbe59 100644 --- a/articles/virtual-machines/linux/create-upload-generic.md +++ b/articles/virtual-machines/linux/create-upload-generic.md @@ -274,7 +274,8 @@ The [Azure Linux Agent](../extensions/agent-linux.md) `waagent` provisions a Lin If you want to mount, format and create swap you can either: 1. Pass this in as a cloud-init config every time you create a VM through `customdata`. This is the recommended method. 2. Use a cloud-init directive baked into the image that will do this every time the VM is created. - ``` + + ``` echo 'DefaultEnvironment="CLOUD_CFG=/etc/cloud/cloud.cfg.d/00-azure-swap.cfg"' >> /etc/systemd/system.conf cat > /etc/cloud/cloud.cfg.d/00-azure-swap.cfg << EOF #cloud-config @@ -293,7 +294,8 @@ The [Azure Linux Agent](../extensions/agent-linux.md) `waagent` provisions a Lin - ["ephemeral0.1", "/mnt"] - ["ephemeral0.2", "none", "swap", "sw,nofail,x-systemd.requires=cloud-init.service,x-systemd.device-timeout=2", "0", "0"] EOF - ``` + + ``` 1. Deprovision. > [!CAUTION] diff --git a/articles/virtual-machines/linux/detach-disk.md b/articles/virtual-machines/linux/detach-disk.md index b74ee9788d6d8..f4bd948fee17c 100644 --- a/articles/virtual-machines/linux/detach-disk.md +++ b/articles/virtual-machines/linux/detach-disk.md @@ -5,7 +5,7 @@ author: roygara ms.service: virtual-machines ms.collection: linux ms.topic: how-to -ms.date: 07/18/2018 +ms.date: 06/08/2022 ms.author: rogarana ms.subservice: disks ms.custom: devx-track-azurecli @@ -101,6 +101,12 @@ az vm disk detach \ The disk stays in storage but is no longer attached to a virtual machine. +### Lower latency + +In select regions, the disk detach latency has been reduced, so you'll see an improvement of up to 15%. This is useful if you have planned/unplanned failovers between VMs, you're scaling your workload, or are running a high scale stateful workload such as Azure Kubernetes Service. However, this improvement is limited to the explicit disk detach command, `az vm disk detach`. You won't see the performance improvement if you call a command that may implicitly perform a detach, like `az vm update`. You don't need to take any action other than calling the explicit detach command to see this improvement. + +[!INCLUDE [virtual-machines-disks-fast-attach-detach-regions](../../../includes/virtual-machines-disks-fast-attach-detach-regions.md)] + ## Detach a data disk using the portal diff --git a/articles/virtual-machines/linux/flatcar-create-upload-vhd.md b/articles/virtual-machines/linux/flatcar-create-upload-vhd.md index b37a430433643..cbcd0549bb9fa 100644 --- a/articles/virtual-machines/linux/flatcar-create-upload-vhd.md +++ b/articles/virtual-machines/linux/flatcar-create-upload-vhd.md @@ -32,7 +32,7 @@ Alternatively, you can choose to build your own Flatcar Container Linux image. On any linux based machine, follow the instructions detailed in the -[Flatcar Container Linux developer SDK guide](https://docs.flatcar-linux.org/os/sdk-modifying-flatcar/). When +[Flatcar Container Linux developer SDK guide](https://www.flatcar.org/docs/latest/reference/developer-guides/). When running the `image_to_vm.sh` script, make sure you pass `--format=azure` to create an Azure virtual hard disk. diff --git a/articles/virtual-machines/linux/image-builder-json.md b/articles/virtual-machines/linux/image-builder-json.md index b18bfac0e2996..1004ab0ba8148 100644 --- a/articles/virtual-machines/linux/image-builder-json.md +++ b/articles/virtual-machines/linux/image-builder-json.md @@ -30,6 +30,7 @@ This is the basic template format: "identity": {}, "properties": { "buildTimeoutInMinutes": , + "stagingResourceGroup": "/subscriptions//resourceGroups/", "vmProfile": { "vmSize": "", "proxyVmSize": "", @@ -45,8 +46,9 @@ This is the basic template format: ] }, "source": {}, - "customize": {}, - "distribute": {} + "customize": [], + "validate": {}, + "distribute": [] } } ``` @@ -93,7 +95,16 @@ The location is the region where the custom image will be created. The following - East Asia - Korea Central - South Africa North +- USGov Arizona (Public Preview) +- USGov Virginia (Public Preview) +> [!IMPORTANT] +> Register the feature "Microsoft.VirtualMachineImages/FairfaxPublicPreview" to access the Azure Image Builder public preview in Fairfax regions (USGov Arizona and USGov Virginia). + +Use the following command to register the feature for Azure Image Builder in Fairfax regions (USGov Arizona and USGov Virginia). +```azurecli-interactive +az feature register --namespace Microsoft.VirtualMachineImages --name FairfaxPublicPreview +``` ```json "location": "", @@ -145,7 +156,7 @@ There are two ways to add user assigned identities explained below. ### User Assigned Identity for Azure Image Builder image template resource -Required - For Image Builder to have permissions to read/write images, read in scripts from Azure Storage you must create an Azure User-Assigned Identity, that has permissions to the individual resources. For details on how Image Builder permissions work, and relevant steps, please review the [documentation](image-builder-user-assigned-identity.md). +Required - For Image Builder to have permissions to read/write images, read in scripts from Azure Storage you must create an Azure User-Assigned Identity, that has permissions to the individual resources. For details on how Image Builder permissions work, and relevant steps, review the [documentation](image-builder-user-assigned-identity.md). ```json @@ -169,10 +180,10 @@ For more information on deploying this feature, see [Configure managed identitie This field is only available in API versions 2021-10-01 and newer. -Optional - The Image Builder Build VM, that is created by the Image Builder service in your subscription, is used to build and customize the image. For the Image Builder Build VM to have permissions to authenticate with other services like Azure Key Vault in your subscription, you must create one or more Azure User Assigned Identities that have permissions to the individual resources. Azure Image Builder can then associate these User Assigned Identities with the Build VM. Customizer scripts running inside the Build VM can then fetch tokens for these identities and interact with other Azure resources as needed. Please be aware, the user assigned identity for Azure Image Builder must have the "Managed Identity Operator" role assignment on all the user assigned identities for Azure Image Builder to be able to associate them to the build VM. +Optional - The Image Builder Build VM, that is created by the Image Builder service in your subscription, is used to build and customize the image. For the Image Builder Build VM to have permissions to authenticate with other services like Azure Key Vault in your subscription, you must create one or more Azure User Assigned Identities that have permissions to the individual resources. Azure Image Builder can then associate these User Assigned Identities with the Build VM. Customizer scripts running inside the Build VM can then fetch tokens for these identities and interact with other Azure resources as needed. Be aware, the user assigned identity for Azure Image Builder must have the "Managed Identity Operator" role assignment on all the user assigned identities for Azure Image Builder to be able to associate them to the build VM. > [!NOTE] -> Please be aware that multiple identities can be specified for the Image Builder Build VM, including the identity you created for the [image template resource](#user-assigned-identity-for-azure-image-builder-image-template-resource). By default, the identity you created for the image template resource will not automatically be added to the build VM. +> Be aware that multiple identities can be specified for the Image Builder Build VM, including the identity you created for the [image template resource](#user-assigned-identity-for-azure-image-builder-image-template-resource). By default, the identity you created for the image template resource will not automatically be added to the build VM. ```json "properties": { @@ -191,6 +202,32 @@ The Image Builder Build VM User Assigned Identity: To learn more, see [How to use managed identities for Azure resources on an Azure VM to acquire an access token](../../active-directory/managed-identities-azure-resources/how-to-use-vm-token.md) and [How to use managed identities for Azure resources on an Azure VM for sign-in](../../active-directory/managed-identities-azure-resources/how-to-use-vm-sign-in.md). +## Properties: stagingResourceGroup +The `stagingResourceGroup` field contains information about the staging resource group that the Image Builder service will create for use during the image build process. The `stagingResourceGroup` is an optional field for anyone who wants more control over the resource group created by Image Builder during the image build process. You can create your own resource group and specify it in the `stagingResourceGroup` section or have Image Builder create one on your behalf. + +```json + "properties": { + "stagingResourceGroup": "/subscriptions//resourceGroups/" + } +``` + +### Template Creation Scenarios + +#### The stagingResourceGroup field is left empty +If the `stagingResourceGroup` field is not specified or specified with an empty string, the Image Builder service will create a staging resource group with the default name convention "IT_***". The staging resource group will have the default tags applied to it: `createdBy`, `imageTemplateName`, `imageTemplateResourceGroupName`. Also, the default RBAC will be applied to the identity assigned to the Azure Image Builder template resource, which is "Contributor". + +#### The stagingResourceGroup field is specified with a resource group that exists +If the `stagingResourceGroup` field is specified with a resource group that does exist, then the Image Builder service will check to make sure the resource group is empty (no resources inside), in the same region as the image template, and has either "Contributor" or "Owner" RBAC applied to the identity assigned to the Azure Image Builder image template resource. If any of the aforementioned requirements are not met an error will be thrown. The staging resource group will have the following tags added to it: `usedBy`, `imageTemplateName`, `imageTemplateResourceGroupName`. Preexisting tags are not deleted. + +#### The stagingResourceGroup field is specified with a resource group that DOES NOT exist +If the `stagingResourceGroup` field is specified with a resource group that does not exist, then the Image Builder service will create a staging resource group with the name provided in the `stagingResourceGroup` field. There will be an error if the given name does not meet Azure naming requirements for resource groups. The staging resource group will have the default tags applied to it: `createdBy`, `imageTemplateName`, `imageTemplateResourceGroupName`. By default the identity assigned to the Azure Image Builder image template resource will have the "Contributor" RBAC applied to it in the resource group. + +### Template Deletion +Any staging resource group created by the Image Builder service will be deleted after the image template is deleted. This includes staging resource groups that were specified in the `stagingResourceGroup` field, but did not exist prior to the image build. + +If Image Builder did not create the staging resource group, but it did create resources inside of it, those resources will be deleted after the image template is deleted as long as the Image Builder service has the appropriate permissions or role required to delete resources. + + ## Properties: source The `source` section contains information about the source image that will be used by Image Builder. Image Builder currently only natively supports creating Hyper-V generation (Gen1) 1 images to the Azure Compute Gallery (SIG) or managed image. If you want to create Gen2 images, then you need to use a source Gen2 image, and distribute to VHD. After, you will then need to create a managed image from the VHD, and inject it into the SIG as a Gen2 image. @@ -261,7 +298,7 @@ The `imageId` should be the ResourceId of the managed image. Use `az image list` Sets the source image as an existing image version in an Azure Compute Gallery. > [!NOTE] -> The source shared image version must be of a supported OS and the image version must reside in the same region as your Azure Image Builder template, if not, please replicate the image version to the Image Builder Template region. +> The source shared image version must be of a supported OS and the image version must reside in the same region as your Azure Image Builder template, if not, replicate the image version to the Image Builder Template region. ```json @@ -295,10 +332,10 @@ If you find you need more time for customizations to complete, set this to what Image Builder supports multiple `customizers`. Customizers are functions that are used to customize your image, such as running scripts, or rebooting servers. When using `customize`: -- You can use multiple customizers, but they must have a unique `name`. +- You can use multiple customizers - Customizers execute in the order specified in the template. - If one customizer fails, then the whole customization component will fail and report back an error. -- It is strongly advised you test the script thoroughly before using it in a template. Debugging the script on your own VM will be easier. +- It is advised you test the script thoroughly before using it in a template. Debugging the script on your own VM will be easier. - don't put sensitive data in the scripts. - The script locations need to be publicly accessible, unless you're using [MSI](./image-builder-user-assigned-identity.md). @@ -361,7 +398,7 @@ Customize properties: * To generate the sha256Checksum, using a terminal on Mac/Linux run: `sha256sum ` > [!NOTE] -> Inline commands are stored as part of the image template definition, you can see these when you dump out the image definition. If you have sensitive commands or values (including passwords, SAS token, authentication tokens etc), it is strongly recommended these are moved into scripts, and use a user identity to authenticate to Azure Storage. +> Inline commands are stored as part of the image template definition, you can see these when you dump out the image definition. If you have sensitive commands or values (including passwords, SAS token, authentication tokens etc), it is recommended these are moved into scripts, and use a user identity to authenticate to Azure Storage. #### Super user privileges For commands to run with super user privileges, they must be prefixed with `sudo`, you can add these into scripts or use it inline commands, for example: @@ -468,7 +505,7 @@ File customizer properties: - **sourceUri** - an accessible storage endpoint, this can be GitHub or Azure storage. You can only download one file, not an entire directory. If you need to download a directory, use a compressed file, then uncompress it using the Shell or PowerShell customizers. > [!NOTE] -> If the sourceUri is an Azure Storage Account, irrespective if the blob is marked public, you will to grant the Managed User Identity permissions to read access on the blob. Please see this [example](./image-builder-user-assigned-identity.md#create-a-resource-group) to set the storage permissions. +> If the sourceUri is an Azure Storage Account, irrespective if the blob is marked public, you will to grant the Managed User Identity permissions to read access on the blob. See this [example](./image-builder-user-assigned-identity.md#create-a-resource-group) to set the storage permissions. - **destination** – this is the full destination path and file name. Any referenced path and subdirectories must exist, use the Shell or PowerShell customizers to set these up beforehand. You can use the script customizers to create the path. @@ -483,7 +520,7 @@ If there is an error trying to download the file, or put it in a specified direc > The file customizer is only suitable for small file downloads, < 20MB. For larger file downloads, use a script or inline command, then use code to download files, such as, Linux `wget` or `curl`, Windows, `Invoke-WebRequest`. ### Windows Update Customizer -This customizer is built on the [community Windows Update Provisioner](https://packer.io/docs/provisioners/community-supported.html) for Packer, which is an open source project maintained by the Packer community. Microsoft tests and validate the provisioner with the Image Builder service, and will support investigating issues with it, and work to resolve issues, however the open source project is not officially supported by Microsoft. For detailed documentation on and help with the Windows Update Provisioner, please see the project repository. +This customizer is built on the [community Windows Update Provisioner](https://packer.io/docs/provisioners/community-supported.html) for Packer, which is an open source project maintained by the Packer community. Microsoft tests and validate the provisioner with the Image Builder service, and will support investigating issues with it, and work to resolve issues, however the open source project is not officially supported by Microsoft. For detailed documentation on and help with the Windows Update Provisioner, see the project repository. ```json "customize": [ @@ -544,7 +581,12 @@ Write-Output '>>> Sysprep complete ...' #### Default Linux deprovision command ```bash -/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync +WAAGENT=/usr/sbin/waagent +waagent -version 1> /dev/null 2>&1 +if [ $? -eq 0 ]; then + WAAGENT=waagent +fi +$WAAGENT -force -deprovision+user && export HISTSIZE=0 && sync ``` #### Overriding the Commands @@ -554,6 +596,103 @@ To override the commands, use the PowerShell or Shell script provisioners to cre * Linux: /tmp/DeprovisioningScript.sh Image Builder will read these commands, these are written out to the AIB logs, `customization.log`. See [troubleshooting](image-builder-troubleshoot.md#customization-log) on how to collect logs. + +## Properties: validate +You can use the `validate` property to validate platform images and any customized images you create regardless of if you used Azure Image Builder to create them. + +Azure Image Builder supports a 'Source-Validation-Only' mode that can be set using the `sourceValidationOnly` field. If the `sourceValidationOnly` field is set to true, the image specified in the `source` section will directly be validated. No separate build will be run to generate and then validate a customized image. + +The `inVMValidations` field takes a list of validators that will be performed on the image. Azure Image Builder supports both PowerShell and Shell validators. + +The `continueDistributeOnFailure` field is responsible for whether the output image(s) will be distributed if validation fails. If validation fails and this field is set to false, the output image(s) will not be distributed (this is the default behavior). If validation fails and this field is set to true, the output image(s) will still be distributed. Use this option with caution as it may result in failed images being distributed for use. In either case (true or false), the end to end image run will be reported as a failed in the case of a validation failure. This field has no effect on whether validation succeeds or not. + +When using `validate`: +- You can use multiple validators +- Validators execute in the order specified in the template. +- If one validator fails, then the whole validation component will fail and report back an error. +- It is advised you test the script thoroughly before using it in a template. Debugging the script on your own VM will be easier. +- Don't put sensitive data in the scripts. +- The script locations need to be publicly accessible, unless you're using [MSI](./image-builder-user-assigned-identity.md). + +How to use the `validate` property to validate Windows images + +```json +{ + "properties": { + "validate": { + "continueDistributeOnFailure": false, + "sourceValidationOnly": false, + "inVMValidations": [ + { + "type": "PowerShell", + "name": "test PowerShell validator inline", + "inline": [ + "" + ], + "validExitCodes": "", + "runElevated": , + "runAsSystem": + }, + { + "type": "PowerShell", + "name": "", + "scriptUri": "", + "runElevated": , + "sha256Checksum": "" + } + ] + }, + } +} +``` + +`inVMValidations` properties: + +- **type** – PowerShell. +- **name** - name of the validator +- **scriptUri** - URI of the PowerShell script file. +- **inline** – array of commands to be run, separated by commas. +- **validExitCodes** – Optional, valid codes that can be returned from the script/inline command, this will avoid reported failure of the script/inline command. +- **runElevated** – Optional, boolean, support for running commands and scripts with elevated permissions. +- **sha256Checksum** - Value of sha256 checksum of the file, you generate this locally, and then Image Builder will checksum and validate. + * To generate the sha256Checksum, using a PowerShell on Windows [Get-Hash](/powershell/module/microsoft.powershell.utility/get-filehash) + +How to use the `validate` property to validate Linux images + +```json +{ + "properties": { + "validate": { + "continueDistributeOnFailure": false, + "sourceValidationOnly": false, + "inVMValidations": [ + { + "type": "Shell", + "name": "", + "inline": [ + "" + ] + }, + { + "type": "Shell", + "name": "", + "scriptUri": "", + "sha256Checksum": "" + } + ] + }, + } + } +``` + +`inVMValidations` properties: + +- **type** – Shell +- **name** - name of the validator +- **scriptUri** - URI of the script file +- **inline** - array of commands to be run, separated by commas. +- **sha256Checksum** - Value of sha256 checksum of the file, you generate this locally, and then Image Builder will checksum and validate. + * To generate the sha256Checksum, using a terminal on Mac/Linux run: `sha256sum ` ## Properties: distribute @@ -731,7 +870,7 @@ az resource invoke-action \ ### Cancelling an Image Build If you're running an image build that you believe is incorrect, waiting for user input, or you feel will never complete successfully, then you can cancel the build. -The build can be canceled any time. If the distribution phase has started you can still cancel, but you will need to clean up any images that may not be completed. The cancel command doesn't wait for cancel to complete, please monitor `lastrunstatus.runstate` for canceling progress, using these status [commands](image-builder-troubleshoot.md#customization-log). +The build can be canceled any time. If the distribution phase has started you can still cancel, but you will need to clean up any images that may not be completed. The cancel command doesn't wait for cancel to complete, monitor `lastrunstatus.runstate` for canceling progress, using these status [commands](image-builder-troubleshoot.md#customization-log). Examples of `cancel` commands: diff --git a/articles/virtual-machines/linux/media/quick-create-cli/nginix-welcome-page-debian.png b/articles/virtual-machines/linux/media/quick-create-cli/nginix-welcome-page-debian.png new file mode 100644 index 0000000000000..d9fc3b9c72e44 Binary files /dev/null and b/articles/virtual-machines/linux/media/quick-create-cli/nginix-welcome-page-debian.png differ diff --git a/articles/virtual-machines/linux/media/quick-create-cli/nginix-welcome-page.png b/articles/virtual-machines/linux/media/quick-create-cli/nginix-welcome-page.png deleted file mode 100644 index eba4da317eb8b..0000000000000 Binary files a/articles/virtual-machines/linux/media/quick-create-cli/nginix-welcome-page.png and /dev/null differ diff --git a/articles/virtual-machines/linux/media/quick-create-cli/nginx-welcome-page.png b/articles/virtual-machines/linux/media/quick-create-cli/nginx-welcome-page.png new file mode 100644 index 0000000000000..7d27b0d3b9a95 Binary files /dev/null and b/articles/virtual-machines/linux/media/quick-create-cli/nginx-welcome-page.png differ diff --git a/articles/virtual-machines/linux/media/quick-create-cli/view-the-nginx-welcome-page.png b/articles/virtual-machines/linux/media/quick-create-cli/view-the-nginx-welcome-page.png deleted file mode 100644 index eba4da317eb8b..0000000000000 Binary files a/articles/virtual-machines/linux/media/quick-create-cli/view-the-nginx-welcome-page.png and /dev/null differ diff --git a/articles/virtual-machines/linux/n-series-driver-setup.md b/articles/virtual-machines/linux/n-series-driver-setup.md index 680c73d06548e..5238bca122d15 100644 --- a/articles/virtual-machines/linux/n-series-driver-setup.md +++ b/articles/virtual-machines/linux/n-series-driver-setup.md @@ -4,7 +4,7 @@ description: How to set up NVIDIA GPU drivers for N-series VMs running Linux in services: virtual-machines author: vikancha-MSFT ms.service: virtual-machines -ms.subervice: vm-sizes-gpu +ms.subservice: vm-sizes-gpu ms.collection: linux ms.topic: how-to ms.workload: infrastructure-services @@ -48,7 +48,7 @@ Then run installation commands specific for your distribution. > [!NOTE] > The example below shows the CUDA package path for Ubuntu 16.04. Replace the path specific to the version you plan to use. > - > Visit the [Nvidia Download Center] (https://developer.download.nvidia.com/compute/cuda/repos/) for the full path specific to each version. + > Visit the [Nvidia Download Center](https://developer.download.nvidia.com/compute/cuda/repos/) for the full path specific to each version. > ```bash CUDA_REPO_PKG=cuda-repo-ubuntu1604_10.0.130-1_amd64.deb @@ -97,7 +97,7 @@ sudo reboot 2. Install the latest [Linux Integration Services for Hyper-V and Azure](https://www.microsoft.com/download/details.aspx?id=55106). Check if LIS is required by verifying the results of lspci. If all GPU devices are listed as expected (and documented above), installing LIS is not required. - Please note that LIS is applicable to Red Hat Enterprise Linux, CentOS, and the Oracle Linux Red Hat Compatible Kernel 5.2-5.11, 6.0-6.10, and 7.0-7.7. Please refer to the [Linux Integration Services documentation] (https://www.microsoft.com/en-us/download/details.aspx?id=55106) for more details. + Please note that LIS is applicable to Red Hat Enterprise Linux, CentOS, and the Oracle Linux Red Hat Compatible Kernel 5.2-5.11, 6.0-6.10, and 7.0-7.7. Please refer to the [Linux Integration Services documentation](https://www.microsoft.com/en-us/download/details.aspx?id=55106) for more details. Skip this step if you plan to use CentOS/RHEL 7.8 (or higher versions) as LIS is no longer required for these versions. ```bash @@ -115,7 +115,7 @@ sudo reboot sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm sudo yum install dkms - wget https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo /etc/yum.repos.d/cuda-rhel7.repo + sudo wget https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo -O /etc/yum.repos.d/cuda-rhel7.repo sudo yum install cuda-drivers ``` @@ -132,7 +132,7 @@ For example, CentOS 8 and RHEL 8 will need the following steps. sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm sudo yum install dkms - wget https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo /etc/yum.repos.d/cuda-rhel8.repo + sudo wget https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo -O /etc/yum.repos.d/cuda-rhel8.repo sudo yum install cuda-drivers ``` diff --git a/articles/virtual-machines/linux/quick-create-cli.md b/articles/virtual-machines/linux/quick-create-cli.md index 2b3eb2c6a66a6..b9072583c71b5 100644 --- a/articles/virtual-machines/linux/quick-create-cli.md +++ b/articles/virtual-machines/linux/quick-create-cli.md @@ -6,7 +6,7 @@ ms.service: virtual-machines ms.collection: linux ms.topic: quickstart ms.workload: infrastructure -ms.date: 03/30/2021 +ms.date: 06/01/2022 ms.author: cynthn ms.custom: mvc, seo-javascript-september2019, seo-javascript-october2019, seo-python-october2019, devx-track-azurecli, mode-api --- @@ -47,7 +47,7 @@ The following example creates a VM named *myVM* and adds a user account named *a az vm create \ --resource-group myResourceGroup \ --name myVM \ - --image UbuntuLTS \ + --image Debian \ --admin-username azureuser \ --generate-ssh-keys ``` @@ -67,42 +67,33 @@ It takes a few minutes to create the VM and supporting resources. The following } ``` -Note your own `publicIpAddress` in the output from your VM. This address is used to access the VM in the next steps. +Make a note of the `publicIpAddress` to use later. -[!INCLUDE [ephemeral-ip-note.md](../../../includes/ephemeral-ip-note.md)] - -## Open port 80 for web traffic +## Install web server -By default, only SSH connections are opened when you create a Linux VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the NGINX web server: +To see your VM in action, install the NGINX web server. Update your package sources and then install the latest NGINX package. ```azurecli-interactive -az vm open-port --port 80 --resource-group myResourceGroup --name myVM -``` - -## Connect to virtual machine - -SSH to your VM as normal. Replace the IP address in the example with the public IP address of your VM as noted in the previous output: - -```bash -ssh azureuser@40.68.254.142 +az vm run-command invoke \ + -g myResourceGroup \ + -n myVM \ + --command-id RunShellScript \ + --scripts "sudo apt-get update && sudo apt-get install -y nginx" ``` -## Install web server +## Open port 80 for web traffic -To see your VM in action, install the NGINX web server. Update your package sources and then install the latest NGINX package. +By default, only SSH connections are opened when you create a Linux VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the NGINX web server: -```bash -sudo apt-get -y update -sudo apt-get -y install nginx +```azurecli-interactive +az vm open-port --port 80 --resource-group myResourceGroup --name myVM ``` -When done, type `exit` to leave the SSH session. - ## View the web server in action Use a web browser of your choice to view the default NGINX welcome page. Use the public IP address of your VM as the web address. The following example shows the default NGINX web site: -![View the NGINX welcome page](./media/quick-create-cli/view-the-nginx-welcome-page.png) +![Screenshot showing the N G I N X default web page.](./media/quick-create-cli/nginix-welcome-page-debian.png) ## Clean up resources @@ -114,8 +105,10 @@ az group delete --name myResourceGroup ## Next steps -In this quickstart, you deployed a simple virtual machine, open a network port for web traffic, and installed a basic web server. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. +In this quickstart, you deployed a simple virtual machine, opened a network port for web traffic, and installed a basic web server. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. > [!div class="nextstepaction"] > [Azure Linux virtual machine tutorials](./tutorial-manage-vm.md) + + diff --git a/articles/virtual-machines/linux/quick-create-powershell.md b/articles/virtual-machines/linux/quick-create-powershell.md index 747a63c8bd250..3ecaf6f02cc1e 100644 --- a/articles/virtual-machines/linux/quick-create-powershell.md +++ b/articles/virtual-machines/linux/quick-create-powershell.md @@ -6,7 +6,7 @@ ms.service: virtual-machines ms.collection: linux ms.topic: quickstart ms.workload: infrastructure -ms.date: 01/14/2022 +ms.date: 06/01/2022 ms.author: cynthn ms.custom: mvc, devx-track-azurepowershell, --- @@ -31,13 +31,12 @@ To open the Cloud Shell, just select **Try it** from the upper right corner of a Create an Azure resource group with [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). A resource group is a logical container into which Azure resources are deployed and managed: ```azurepowershell-interactive -New-AzResourceGroup -Name "myResourceGroup" -Location "EastUS" +New-AzResourceGroup -Name 'myResourceGroup' -Location 'EastUS' ``` - ## Create a virtual machine -We will be automatically generating an SSH key pair to use for connecting to the VM. The public key that is created using `-GenerateSshKey` will be stored in Azure as a resource, using the name you provide as `SshKeyName`. The SSH key resource can be reused for creating additional VMs. Both the public and private keys will also downloaded for you. When you create your SSH key pair using the Cloud Shell, the keys are stored in a [storage account that is automatically created by Cloud Shell](../../cloud-shell/persisting-shell-storage.md). Don't delete the storage account, or the file share in it, until after you have retrieved your keys or you will lose access to the VM. +We will be automatically generating an SSH key pair to use for connecting to the VM. The public key that is created using `-GenerateSshKey` will be stored in Azure as a resource, using the name you provide as `SshKeyName`. The SSH key resource can be reused for creating additional VMs. Both the public and private keys will also be downloaded for you. When you create your SSH key pair using the Cloud Shell, the keys are stored in a [storage account that is automatically created by Cloud Shell](../../cloud-shell/persisting-shell-storage.md). Don't delete the storage account, or the file share in it, until after you have retrieved your keys or you will lose access to the VM. You will be prompted for a user name that will be used when you connect to the VM. You will also be asked for a password, which you can leave blank. Password login for the VM is disabled when using an SSH key. @@ -45,13 +44,13 @@ In this example, you create a VM named *myVM*, in *East US*, using the *Standard ```azurepowershell-interactive New-AzVm ` - -ResourceGroupName "myResourceGroup" ` - -Name "myVM" ` - -Location "East US" ` - -Image UbuntuLTS ` + -ResourceGroupName 'myResourceGroup' ` + -Name 'myVM' ` + -Location 'East US' ` + -Image Debian ` -size Standard_B2s ` -PublicIpAddressName myPubIP ` - -OpenPorts 80,22 ` + -OpenPorts 80 ` -GenerateSshKey ` -SshKeyName mySSHKey ``` @@ -63,55 +62,41 @@ Private key is saved to /home/user/.ssh/1234567891 Public key is saved to /home/user/.ssh/1234567891.pub ``` -Make a note of the path to your private key to use later. - It will take a few minutes for your VM to be deployed. When the deployment is finished, move on to the next section. +## Install NGINX -## Connect to the VM - -You need to change the permission on the SSH key using `chmod`. Replace *~/.ssh/1234567891* in the following example with the private key name and path from the earlier output. - -```azurepowershell-interactive -chmod 600 ~/.ssh/1234567891 -``` - -Create an SSH connection with the VM using the public IP address. To see the public IP address of the VM, use the [Get-AzPublicIpAddress](/powershell/module/az.network/get-azpublicipaddress) cmdlet: +To see your VM in action, install the NGINX web server. ```azurepowershell-interactive -Get-AzPublicIpAddress -ResourceGroupName "myResourceGroup" | Select "IpAddress" +Invoke-AzVMRunCommand ` + -ResourceGroupName 'myResourceGroup' ` + -Name 'myVM' ` + -CommandId 'RunShellScript' ` + -ScriptString 'sudo apt-get update && sudo apt-get install -y nginx' ``` -Using the same shell you used to create your SSH key pair, paste the the following command into the shell to create an SSH session. Replace *~/.ssh/1234567891* in the following example with the private key name and path from the earlier output. Replace *10.111.12.123* with the IP address of your VM and *azureuser* with the name you provided when you created the VM. +The `-ScriptString' parameter requires version `4.27.0` or later of the 'Az.Compute` module. -```bash -ssh -i ~/.ssh/1234567891 azureuser@10.111.12.123 -``` -## Install NGINX +## View the web server in action -To see your VM in action, install the NGINX web server. From your SSH session, update your package sources and then install the latest NGINX package. +Get the public IP address of your VM: -```bash -sudo apt-get -y update -sudo apt-get -y install nginx +```azurepowershell-interactive +Get-AzPublicIpAddress -Name myPubIP -ResourceGroupName myResourceGroup | select "IpAddress" ``` -When done, type `exit` to leave the SSH session. - - -## View the web server in action - -Use a web browser of your choice to view the default NGINX welcome page. Enter the public IP address of the VM as the web address. The public IP address can be found on the VM overview page or as part of the SSH connection string you used earlier. +Use a web browser of your choice to view the default NGINX welcome page. Enter the public IP address of the VM as the web address. -![NGINX default Welcome page](./media/quick-create-cli/nginix-welcome-page.png) +![Screenshot showing the N G I N X default web page.](./media/quick-create-cli/nginix-welcome-page-debian.png) ## Clean up resources When no longer needed, you can use the [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) cmdlet to remove the resource group, VM, and all related resources: ```azurepowershell-interactive -Remove-AzResourceGroup -Name "myResourceGroup" +Remove-AzResourceGroup -Name 'myResourceGroup' ``` ## Next steps diff --git a/articles/virtual-machines/linux/run-command-managed.md b/articles/virtual-machines/linux/run-command-managed.md index 8ce6bb1f7f6ca..78fce25fa09e0 100644 --- a/articles/virtual-machines/linux/run-command-managed.md +++ b/articles/virtual-machines/linux/run-command-managed.md @@ -20,9 +20,9 @@ ms.custom: devx-track-azurepowershell, devx-track-azurecli > This preview version is provided without a service-level agreement, and we don't recommend it for production workloads. Certain features might not be supported or might have constrained capabilities. > For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). -The Run Command feature uses the virtual machine (VM) agent to scripts within an Azure Linux VM. You can use these scripts for general machine or application management. They can help you quickly diagnose and remediate VM access and network issues and get the VM back to a good state. +The Run Command feature uses the virtual machine (VM) agent to run scripts within an Azure Linux VM. You can use these scripts for general machine or application management. They can help you quickly diagnose and remediate VM access and network issues and get the VM back to a good state. -The *updated* managed Run Command uses the same VM agent channel to execute scripts and provides the following enhancements over the [original action orientated Run Command](run-command.md): +The *updated* managed Run Command uses the same VM agent channel to execute scripts and provides the following enhancements over the [original action oriented Run Command](run-command.md): - Support for updated Run Command through ARM deployment template - Parallel execution of multiple scripts - Sequential execution of scripts @@ -58,7 +58,7 @@ az vm run-command list --vm-name "myVM" --resource-group "myRG" This command will retrieve current execution progress, including latest output, start/end time, exit code, and terminal state of the execution. ```azurecli-interactive -az vm run-command show --name "myRunCommand" --vm-name "myVM" --resource-group "myRG" –expand +az vm run-command show --name "myRunCommand" --vm-name "myVM" --resource-group "myRG" --expand instanceView ``` ### Delete RunCommand resource from the VM @@ -74,33 +74,31 @@ az vm run-command delete --name "myRunCommand" --vm-name "myVM" --resource-group ### Execute a script with the VM This command will deliver the script to the VM, execute it, and return the captured output. -```powershell-interactive -Set-AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" -Name "RunCommandName" – Script "echo Hello World!" +```azurepowershell-interactive +Set-AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" -Location "EastUS" -RunCommandName "RunCommandName" –SourceScript "echo Hello World!" ``` ### List all deployed RunCommand resources on a VM This command will return a full list of previously deployed Run Commands along with their properties. -```powershell-interactive -Get-AzVMRunCommand AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" +```azurepowershell-interactive +Get-AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" ``` ### Get execution status and results This command will retrieve current execution progress, including latest output, start/end time, exit code, and terminal state of the execution. -```powershell-interactive -Get-AzVMRunCommand AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" -Name "RunCommandName" -Status +```azurepowershell-interactive +Get-AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" -RunCommandName "RunCommandName" -Expand instanceView ``` ### Delete RunCommand resource from the VM Remove the RunCommand resource previously deployed on the VM. If the script execution is still in progress, execution will be terminated. -```powershell-interactive -Remove-AzVMRunCommand AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" -Name "RunCommandName" +```azurepowershell-interactive +Remove-AzVMRunCommand -ResourceGroupName "myRG" -VMName "myVM" -RunCommandName "RunCommandName" ``` - - ## REST API To deploy a new Run Command, execute a PUT on the VM directly and specify a unique name for the Run Command instance. diff --git a/articles/virtual-machines/linux/run-command.md b/articles/virtual-machines/linux/run-command.md index 2f277f4ffed52..0bbcb3ec45b58 100644 --- a/articles/virtual-machines/linux/run-command.md +++ b/articles/virtual-machines/linux/run-command.md @@ -20,7 +20,7 @@ The Run Command feature uses the virtual machine (VM) agent to run shell scripts ## Benefits -You can access your virtual machines in multiple ways. Run Command can run scripts on your virtual machines remotely by using the VM agent. You use Run Command through the Azure portal, [REST API](/rest/api/compute/virtual-machines-run-commands/run-command), or [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke) for Linux VMs. +You can access your virtual machines in multiple ways. Run Command can run scripts on your virtual machines remotely by using the VM agent. You use Run Command through the Azure portal, [REST API](/rest/api/compute/virtual-machine-run-commands), or [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke) for Linux VMs. This capability is useful in all scenarios where you want to run a script within a virtual machine. It's one of the only ways to troubleshoot and remediate a virtual machine that doesn't have the RDP or SSH port open because of network or administrative user configuration. diff --git a/articles/virtual-machines/linux/run-scripts-in-vm.md b/articles/virtual-machines/linux/run-scripts-in-vm.md index 8df48bd524e70..a73b60e8b564c 100644 --- a/articles/virtual-machines/linux/run-scripts-in-vm.md +++ b/articles/virtual-machines/linux/run-scripts-in-vm.md @@ -31,7 +31,7 @@ The [Custom Script Extension](../extensions/custom-script-linux.md) is primarily The [Run Command](run-command.md) feature enables virtual machine and application management and troubleshooting using scripts, and is available even when the machine is not reachable, for example if the guest firewall doesn't have the RDP or SSH port open. * Run scripts in Azure virtual machines. -* Can be run using [Azure portal](run-command.md), [REST API](/rest/api/compute/virtual-machines-run-commands/run-command), [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke), or [PowerShell](/powershell/module/az.compute/invoke-azvmruncommand) +* Can be run using [Azure portal](run-command.md), [REST API](/azure/virtual-machines/windows/run-command), [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke), or [PowerShell](/powershell/module/az.compute/invoke-azvmruncommand) * Quickly run a script and view output and repeat as needed in the Azure portal. * Script can be typed directly or you can run one of the built-in scripts. * Run PowerShell script in Windows machines and Bash script in Linux machines. diff --git a/articles/virtual-machines/linux/ssh-from-windows.md b/articles/virtual-machines/linux/ssh-from-windows.md index 7a955e7325700..1952d6b1c6a53 100644 --- a/articles/virtual-machines/linux/ssh-from-windows.md +++ b/articles/virtual-machines/linux/ssh-from-windows.md @@ -56,7 +56,7 @@ You can also create key pairs with the [Azure CLI](/cli/azure) with the [az sshk To create an SSH key pair on your local computer using the `ssh-keygen` command from PowerShell or a command prompt, type the following: ```powershell -ssh-keygen -m PEM -t rsa -b 4096 +ssh-keygen -m PEM -t rsa -b 2048 ``` Enter a filename, or use the default shown in parenthesis (for example `C:\Users\username/.ssh/id_rsa`). Enter a passphrase for the file, or leave the passphrase blank if you do not want to use a passphrase. diff --git a/articles/virtual-machines/linux/storage-performance.md b/articles/virtual-machines/linux/storage-performance.md index ccbeb8d3b0474..8d506bdd6c034 100644 --- a/articles/virtual-machines/linux/storage-performance.md +++ b/articles/virtual-machines/linux/storage-performance.md @@ -1,108 +1,98 @@ --- -title: Optimize performance on Azure Lsv2-series virtual machines - Storage -description: Learn how to optimize performance for your solution on the Lsv2-series virtual machines using a Linux example. -author: laurenhughes -ms.service: virtual-machines -ms-subservice: vm-sizes-storage -ms.collection: linux -ms.topic: conceptual -ms.tgt_pltfrm: vm-linux -ms.workload: infrastructure-services -ms.date: 08/05/2019 -ms.author: joelpell ---- - -# Optimize performance on the Lsv2-series Linux virtual machines - -**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Flexible scale sets - -Lsv2-series virtual machines support a variety of workloads that need high I/O and throughput on local storage across a wide range of applications and industries. The Lsv2-series is ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases, including Cassandra, MongoDB, Cloudera, and Redis. +title: Optimize performance on Lsv3, Lasv3, and Lsv2-series Linux VMs +description: Learn how to optimize performance for your solution on the Lsv3, Lasv3, and Lsv2-series Linux virtual machines (VMs) on Azure. +author: sasha-melamed +ms.service: virtual-machines +ms.subservice: sizes +ms.collection: linux +ms.topic: conceptual +ms.tgt_pltfrm: vm-linux +ms.workload: infrastructure-services +ms.date: 06/01/2022 +ms.author: sasham +--- -The design of the Lsv2-series Virtual Machines (VMs) maximizes the AMD EPYC™ 7551 processor to provide the best performance between the processor, memory, NVMe devices, and the VMs. Working with partners in Linux, several builds are available Azure Marketplace that are optimized for Lsv2-series performance and currently include: +# Optimize performance on Lsv3, Lasv3, and Lsv2-series Linux VMs -- Ubuntu 18.04 -- Ubuntu 16.04 -- RHEL 8.0 -- Debian 9 -- Debian 10 +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Uniform scale sets -This article provides tips and suggestions to ensure your workloads and applications achieve the maximum performance designed into the VMs. The information on this page will be continuously updated as more Lsv2 optimized images are added to the Azure Marketplace. +Lsv3, Lasv3, and Lsv2-series Azure Virtual Machines (Azure VMs) support various workloads that need high I/O and throughput on local storage across a wide range of applications and industries. The L-series is ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases, including Cassandra, MongoDB, Cloudera, and Redis. -## AMD EPYC™ chipset architecture +Several builds are available Azure Marketplace due to work with partners in Linux. These builds are optimized for Lsv3, Lasv3, and Lsv2-series performance. Available builds include the following and later versions of: -Lsv2-series VMs use AMD EYPC™ server processors based on the Zen microarchitecture. AMD developed Infinity Fabric (IF) for EYPC™ as scalable interconnect for its NUMA model that could be used for on-die, on-package, and multi-package communications. Compared with QPI (Quick-Path Interconnect) and UPI (Ultra-Path Interconnect) used on Intel modern monolithic-die processors, AMD’s many-NUMA small-die architecture may bring both performance benefits as well as challenges. The actual impact of memory bandwidth and latency constraints could vary depending on the type of workloads running. +- Ubuntu 16.04 +- RHEL 8.0 and clones, including CentOS, Rocky Linux, and Alma Linux +- Debian 9 +- SUSE Linux 15 +- Oracle Linux 8.0 -## Tips to maximize performance +This article provides tips and suggestions to ensure your workloads and applications achieve the maximum performance designed into the VMs. -* If you are uploading a custom Linux GuestOS for your workload, note that Accelerated Networking will be **OFF** by default. If you intend to enable Accelerated Networking, enable it at the time of VM creation for best performance. +## AMD EPYC™ chipset architecture -* The hardware that powers the Lsv2-series VMs utilizes NVMe devices with eight I/O Queue Pairs (QP)s. Every NVMe device I/O queue is actually a pair: a submission queue and a completion queue. The NVMe driver is set up to optimize the utilization of these eight I/O QPs by distributing I/O’s in a round robin schedule. To gain max performance, run eight jobs per device to match. +Lasv3 and Lsv2-series VMs use AMD EPYC™ server processors based on the Zen micro-architecture. AMD developed Infinity Fabric (IF) for EPYC™ as scalable interconnect for its NUMA model that can be used for on-die, on-package, and multi-package communications. Compared with QPI (Quick-Path Interconnect) and UPI (Ultra-Path Interconnect) used on Intel modern monolithic-die processors, AMD's many-NUMA small-die architecture can bring both performance benefits and challenges. The actual effects of memory bandwidth and latency constraints might vary depending on the type of workloads running. -* Avoid mixing NVMe admin commands (for example, NVMe SMART info query, etc.) with NVMe I/O commands during active workloads. Lsv2 NVMe devices are backed by Hyper-V NVMe Direct technology, which switches into “slow mode” whenever any NVMe admin commands are pending. Lsv2 users could see a dramatic performance drop in NVMe I/O performance if that happens. +## Tips to maximize performance -* Lsv2 users should not rely on device NUMA information (all 0) reported from within the VM for data drives to decide the NUMA affinity for their apps. The recommended way for better performance is to spread workloads across CPUs if possible. +* If you're uploading a custom Linux GuestOS for your workload, Accelerated Networking is turned off by default. If you intend to enable Accelerated Networking, enable it at the time of VM creation for best performance. +* To gain max performance, run multiple jobs with deep queue depth per device. +* Avoid mixing NVMe admin commands (for example, NVMe SMART info query, etc.) with NVMe I/O commands during active workloads. Lsv3, Lasv3, and Lsv2 NVMe devices are backed by Hyper-V NVMe Direct technology, which switches into “slow mode” whenever any NVMe admin commands are pending. Lsv3, Lasv3, and Lsv2 users might see a dramatic performance drop in NVMe I/O performance if that happens. +* Lsv2 users aren't recommended to rely on device NUMA information (all 0) reported from within the VM for data drives to decide the NUMA affinity for their apps. The recommended way for better performance is to spread workloads across CPUs if possible. +* The maximum supported queue depth per I/O queue pair for Lsv3, Lasv3, and Lsv2 VM NVMe device is 1024. Lsv3, Lasv3, and Lsv2 users are recommended to limit their (synthetic) benchmarking workloads to queue depth 1024 or lower to avoid triggering queue full conditions, which can reduce performance. +* The best performance is obtained when I/O is done directly to each of the raw NVMe devices with no partitioning, no file systems, no RAID config, etc. Before starting a testing session, ensure the configuration is in a known fresh/clean state by running `blkdiscard` on each of the NVMe devices. -* The maximum supported queue depth per I/O queue pair for Lsv2 VM NVMe device is 1024 (vs. Amazon i3 QD 32 limit). Lsv2 users should limit their (synthetic) benchmarking workloads to queue depth 1024 or lower to avoid triggering queue full conditions, which can reduce performance. +## Utilizing local NVMe storage -## Utilizing local NVMe storage +Local storage on the 1.92 TB NVMe disk on all Lsv3, Lasv3, and Lsv2 VMs is ephemeral. During a successful standard reboot of the VM, the data on the local NVMe disk persists. The data doesn't persist on the NVMe if the VM is redeployed, de-allocated, or deleted. Data doesn't persist if another issue causes the VM, or the hardware it's running on, to become unhealthy. When scenario happens, any data on the old host is securely erased. -Local storage on the 1.92 TB NVMe disk on all Lsv2 VMs is ephemeral. During a successful standard reboot of the VM, the data on the local NVMe disk will persist. The data will not persist on the NVMe if the VM is redeployed, de-allocated, or deleted. Data will not persist if another issue causes the VM, or the hardware it is running on, to become unhealthy. When this happens, any data on the old host is securely erased. +There are also cases when the VM needs to be moved to a different host machine, for example, during a planned maintenance operation. Planned maintenance operations and some hardware failures can be anticipated with [Scheduled Events](scheduled-events.md). Use Scheduled Events to stay updated on any predicted maintenance and recovery operations. -There will also be cases when the VM needs to be moved to a different host machine, for example, during a planned maintenance operation. Planned maintenance operations and some hardware failures can be anticipated with [Scheduled Events](scheduled-events.md). Scheduled Events should be used to stay updated on any predicted maintenance and recovery operations. +In the case that a planned maintenance event requires the VM to be recreated on a new host with empty local disks, the data needs to be resynchronized (again, with any data on the old host being securely erased). This scenario occurs because Lsv3, Lasv3, and Lsv2-series VMs don't currently support live migration on the local NVMe disk. -In the case that a planned maintenance event requires the VM to be recreated on a new host with empty local disks, the data will need to be resynchronized (again, with any data on the old host being securely erased). This occurs because Lsv2-series VMs do not currently support live migration on the local NVMe disk. +There are two modes for planned maintenance. -There are two modes for planned maintenance. +### Standard VM customer-controlled maintenance -### Standard VM customer-controlled maintenance +- The VM is moved to an updated host during a 30-day window. +- Lsv3, Lasv3, and Lsv2 local storage data could be lost, so backing-up data prior to the event is recommended. -- The VM is moved to an updated host during a 30-day window. -- Lsv2 local storage data could be lost, so backing-up data prior to the event is recommended. +### Automatic maintenance -### Automatic maintenance +- Occurs if the customer doesn't execute customer-controlled maintenance, or because of emergency procedures, such as a security zero-day event. +- Intended to preserve customer data, but there's a small risk of a VM freeze or reboot. +- Lsv3, Lasv3, and Lsv2 local storage data could be lost, so backing-up data prior to the event is recommended. -- Occurs if the customer does not execute customer-controlled maintenance, or in the event of emergency procedures such as a security zero-day event. -- Intended to preserve customer data, but there is a small risk of a VM freeze or reboot. -- Lsv2 local storage data could be lost, so backing-up data prior to the event is recommended. +For any upcoming service events, use the controlled maintenance process to select a time most convenient to you for the update. Prior to the event, back up your data in premium storage. After the maintenance event completes, you can return your data to the refreshed Lsv3, Lasv3, and Lsv2 VMs local NVMe storage. -For any upcoming service events, use the controlled maintenance process to select a time most convenient to you for the update. Prior to the event, you may back up your data in premium storage. After the maintenance event completes, you can return your data to the refreshed Lsv2 VMs local NVMe storage. +Scenarios that maintain data on local NVMe disks include: -Scenarios that maintain data on local NVMe disks include: +- The VM is running and healthy. +- The VM is rebooted in place (by you or Azure). +- The VM is paused (stopped without de-allocation). +- Most the planned maintenance servicing operations. -- The VM is running and healthy. -- The VM is rebooted in place (by you or Azure). -- The VM is paused (stopped without de-allocation). -- The majority of the planned maintenance servicing operations. +Scenarios that securely erase data to protect the customer include: -Scenarios that securely erase data to protect the customer include: +- The VM is redeployed, stopped (de-allocated), or deleted (by you). +- The VM becomes unhealthy and has to service heal to another node due to a hardware issue. +- A few of the planned maintenance servicing operations that require the VM to be reallocated to another host for servicing. -- The VM is redeployed, stopped (de-allocated), or deleted (by you). -- The VM becomes unhealthy and has to service heal to another node due to a hardware issue. -- A small number of the planned maintenance servicing operations that requires the VM to be reallocated to another host for servicing. +## Frequently asked questions -## Frequently asked questions +The following are frequently asked questions about these series. -* **How do I start deploying Lsv2-series VMs?** - Much like any other VM, use the [Portal](quick-create-portal.md), [Azure CLI](quick-create-cli.md), or [PowerShell](quick-create-powershell.md) to create a VM. +### How do I start deploying L-series VMs? -* **Will a single NVMe disk failure cause all VMs on the host to fail?** - If a disk failure is detected on the hardware node, the hardware is in a failed state. When this occurs, all VMs on the node are automatically de-allocated and moved to a healthy node. For Lsv2-series VMs, this means that the customer’s data on the failing node is also securely erased and will need to be recreated by the customer on the new node. As noted, before live migration becomes available on Lsv2, the data on the failing node will be proactively moved with the VMs as they are transferred to another node. +Much like any other VM, use the [Portal](quick-create-portal.md), [Azure CLI](quick-create-cli.md), or [PowerShell](quick-create-powershell.md) to create a VM. -* **Do I need to make any adjustments to rq_affinity for performance?** - The rq_affinity setting is a minor adjustment when using the absolute maximum input/output operations per second (IOPS). Once everything else is working well, then try to set rq_affinity to 0 to see if it makes a difference. +### Does a single NVMe disk failure cause all VMs on the host to fail? -* **Do I need to change the blk_mq settings?** - RHEL/CentOS 7.x automatically uses blk-mq for the NVMe devices. No configuration changes or settings are necessary. The scsi_mod.use_blk_mq setting is for SCSI only and was used during Lsv2 Preview because the NVMe devices were visible in the guest VMs as SCSI devices. Currently, the NVMe devices are visible as NVMe devices, so the SCSI blk-mq setting is irrelevant. +If a disk failure is detected on the hardware node, the hardware is in a failed state. When this problem occurs, all VMs on the node are automatically de-allocated and moved to a healthy node. For Lsv3, Lasv3, and Lsv2-series VMs, this problem means that the customer's data on the failing node is also securely erased. The customer needs to recreate the data on the new node. -* **Do I need to change “fio”?** - To get maximum IOPS with a performance measuring tool like ‘fio’ in the L64v2 and L80v2 VM sizes, set “rq_affinity” to 0 on each NVMe device. For example, this command line will set “rq_affinity” to zero for all 10 NVMe devices in an L80v2 VM: +### Do I need to change the blk_mq settings? - ```console - for i in `seq 0 9`; do echo 0 >/sys/block/nvme${i}n1/queue/rq_affinity; done - ``` +RHEL/CentOS 7.x automatically uses blk-mq for the NVMe devices. No configuration changes or settings are necessary. - Also note that the best performance is obtained when I/O is done directly to each of the raw NVMe devices with no partitioning, no file systems, no RAID 0 config, etc. Before starting a testing session, ensure the configuration is in a known fresh/clean state by running `blkdiscard` on each of the NVMe devices. - -## Next steps +## Next steps -* See specifications for all [VMs optimized for storage performance](../sizes-storage.md) on Azure +See specifications for all [VMs optimized for storage performance](../sizes-storage.md) on Azure diff --git a/articles/virtual-machines/linux/upload-vhd.md b/articles/virtual-machines/linux/upload-vhd.md index efa6bd22485b8..ebf0e2d2623f9 100644 --- a/articles/virtual-machines/linux/upload-vhd.md +++ b/articles/virtual-machines/linux/upload-vhd.md @@ -74,7 +74,7 @@ You can now upload VHD straight into a managed disk. For instructions, see [Uplo You can also create a customized VM in Azure and then copy the OS disk and attach it to a new VM to create another copy. This is fine for testing, but if you want to use an existing Azure VM as the model for multiple new VMs, create an *image* instead. For more information about creating an image from an existing Azure VM, see [Create a custom image of an Azure VM by using the CLI](tutorial-custom-images.md). -If you want to copy an existing VM to another region, you might want to use azcopy to [creat a copy of a disk in another region](disks-upload-vhd-to-managed-disk-cli.md#copy-a-managed-disk). +If you want to copy an existing VM to another region, you might want to use azcopy to [create a copy of a disk in another region](disks-upload-vhd-to-managed-disk-cli.md#copy-a-managed-disk). Otherwise, you should take a snapshot of the VM and then create a new OS VHD from the snapshot. diff --git a/articles/virtual-machines/linux/use-remote-desktop.md b/articles/virtual-machines/linux/use-remote-desktop.md index 756060badde3d..0593022c49bb9 100644 --- a/articles/virtual-machines/linux/use-remote-desktop.md +++ b/articles/virtual-machines/linux/use-remote-desktop.md @@ -13,11 +13,11 @@ ms.author: cynthn --- # Install and configure xrdp to use Remote Desktop with Ubuntu -**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Flexible scale sets +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Flexible scale sets Linux virtual machines (VMs) in Azure are usually managed from the command line using a secure shell (SSH) connection. When new to Linux, or for quick troubleshooting scenarios, the use of remote desktop may be easier. This article details how to install and configure a desktop environment ([xfce](https://www.xfce.org)) and remote desktop ([xrdp](http://xrdp.org)) for your Linux VM running Ubuntu. -The article was writen and tested using an Ubuntu 18.04 VM. +The article was written and tested using an Ubuntu 18.04 VM. ## Prerequisites @@ -84,16 +84,47 @@ sudo passwd azureuser ## Create a Network Security Group rule for Remote Desktop traffic To allow Remote Desktop traffic to reach your Linux VM, a network security group rule needs to be created that allows TCP on port 3389 to reach your VM. For more information about network security group rules, see [What is a network security group?](../../virtual-network/network-security-groups-overview.md) You can also [use the Azure portal to create a network security group rule](../windows/nsg-quickstart-portal.md). +### [Azure CLI](#tab/azure-cli) + The following example creates a network security group rule with [az vm open-port](/cli/azure/vm#az-vm-open-port) on port *3389*. From the Azure CLI, not the SSH session to your VM, open the following network security group rule: ```azurecli az vm open-port --resource-group myResourceGroup --name myVM --port 3389 ``` +### [Azure PowerShell](#tab/azure-powershell) + +The following example adds a network security group rule with [Add-AzNetworkSecurityRuleConfig](/powershell/module/az.network/add-aznetworksecurityruleconfig) on port *3389* to the existing network security group. From the Azure PowerShell, not the SSH session to your VM, get the existing network security group named *myVMnsg*: + +```azurepowershell +$nsg = Get-AzNetworkSecurityGroup -ResourceGroupName myResourceGroup -Name myVMnsg +``` + +Add an RDP network security rule named *open-port-3389* to your `$nsg` network security group and update the network security group with [Set-AzNetworkSecurityGroup](/powershell/module/az.network/set-aznetworksecuritygroup) in order for your changes to take effect: + +```azurepowershell +$params = @{ + Name = 'open-port-3389' + Description = 'Allow RDP' + NetworkSecurityGroup = $nsg + Access = 'Allow' + Protocol = 'TCP' + Direction = 'Inbound' + Priority = 100 + SourceAddressPrefix = 'Internet' + SourcePortRange = '*' + DestinationAddressPrefix = '*' + DestinationPortRange = '3389' +} + +Add-AzNetworkSecurityRuleConfig @params | Set-AzNetworkSecurityGroup +``` + +--- ## Connect your Linux VM with a Remote Desktop client -Open your local remote desktop client and connect to the IP address or DNS name of your Linux VM. +Open your local remote desktop client and connect to the IP address or DNS name of your Linux VM. :::image type="content" source="media/use-remote-desktop/remote-desktop.png" alt-text="Screenshot of the remote desktop client."::: diff --git a/articles/virtual-machines/lsv2-series.md b/articles/virtual-machines/lsv2-series.md index e357c1bc7a402..4beeb1e39fb6c 100644 --- a/articles/virtual-machines/lsv2-series.md +++ b/articles/virtual-machines/lsv2-series.md @@ -3,15 +3,15 @@ title: Lsv2-series - Azure Virtual Machines description: Specifications for the Lsv2-series VMs. author: sasha-melamed ms.service: virtual-machines -ms.subservice: vm-sizes-storage +ms.subservice: sizes ms.topic: conceptual -ms.date: 02/03/2020 -ms.author: jushiman +ms.date: 06/01/2022 +ms.author: sasham --- # Lsv2-series -**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Uniform scale sets The Lsv2-series features high throughput, low latency, directly mapped local NVMe storage running on the [AMD EPYCTM 7551 processor](https://www.amd.com/en/products/epyc-7000-series) with an all core boost of 2.55GHz and a max boost of 3.0GHz. The Lsv2-series VMs come in sizes from 8 to 80 vCPU in a simultaneous multi-threading configuration. There is 8 GiB of memory per vCPU, and one 1.92TB NVMe SSD M.2 device per 8 vCPUs, with up to 19.2TB (10x1.92TB) available on the L80s v2. diff --git a/articles/virtual-machines/lsv3-series.md b/articles/virtual-machines/lsv3-series.md new file mode 100644 index 0000000000000..8dd5d8664acc9 --- /dev/null +++ b/articles/virtual-machines/lsv3-series.md @@ -0,0 +1,70 @@ +--- +title: Lsv3-series - Azure Virtual Machines +description: Specifications for the Lsv3-series of Azure Virtual Machines (Azure VMs). +author: sasha-melamed +ms.service: virtual-machines +ms.subservice: sizes +ms.topic: conceptual +ms.date: 06/01/2022 +ms.author: sasham +--- + +# Lsv3-series + +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets + +The Lsv3-series of Azure Virtual Machines (Azure VMs) features high-throughput, low latency, directly mapped local NVMe storage. These VMs run on the 3rd Generation Intel® Xeon® Platinum 8370C (Ice Lake) processor in a [hyper-threaded configuration](https://www.intel.com/content/www/us/en/architecture-and-technology/hyper-threading/hyper-threading-technology.html). This new processor features an all-core turbo clock speed of 3.5 GHz with [Intel® Turbo Boost Technology](https://www.intel.com/content/www/us/en/architecture-and-technology/turbo-boost/turbo-boost-technology.html), [Intel® Advanced-Vector Extensions 512 (Intel® AVX-512)](https://www.intel.com/content/www/us/en/architecture-and-technology/avx-512-overview.html) and [Intel® Deep Learning Boost](https://software.intel.com/content/www/us/en/develop/topics/ai/deep-learning-boost.html). + +The Lsv3-series VMs are available in sizes from 8 to 80 vCPUs. There are 8 GiB of memory allocated per vCPU, and one 1.92TB NVMe SSD device allocated per 8 vCPUs, with up to 19.2TB (10x1.92TB) available on the L80s_v3 size. + +> [!NOTE] +> The Lsv3-series VMs are optimized to use the local disk on the node attached directly to the VM rather than using [durable data disks](disks-types.md). This method allows for greater IOPS and throughput for your workloads. The Lsv3, Lasv3, Lsv2, and Ls-series VMs don't support the creation of a host cache to increase the IOPS achievable by durable data disks. +> +> The high throughput and IOPS of the local disk makes the Lsv3-series VMs ideal for NoSQL stores such as Apache Cassandra and MongoDB. These stores replicate data across multiple VMs to achieve persistence in the event of the failure of a single VM. +> +> To learn more, see how to optimize performance on the Lsv3-series [Windows-based VMs](../virtual-machines/windows/storage-performance.md) or [Linux-based VMs](../virtual-machines/linux/storage-performance.md). + +- [Premium Storage](premium-storage-performance.md): Supported +- [Premium Storage caching](premium-storage-performance.md): Not Supported +- [Live Migration](maintenance-and-updates.md): Not Supported +- [Memory Preserving Updates](maintenance-and-updates.md): Supported +- [VM Generation Support](generation-2.md): Generation 1 and 2 +- [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported +- [Ephemeral OS Disks](ephemeral-os-disks.md): Supported +- [Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Supported + +| Size | vCPU | Memory (GiB) | Temp disk (GiB) | NVMe Disks | NVMe Disk throughput (Read IOPS/MBps) | Uncached data disk throughput (IOPS/MBps) | Max burst uncached data disk throughput (IOPS/MBps) | Max Data Disks | Max NICs | Expected network bandwidth (Mbps) | +|---|---|---|---|---|---|---|---|---|---|---| +| Standard_L8s_v3 | 8 | 64 | 80 | 1x1.92 TB | 400000/2000 | 12800/290 | 20000/1200 | 16 | 4 | 12500 | +| Standard_L16s_v3 | 16 | 128 | 160 | 2x1.92 TB | 800000/4000 | 25600/600 | 40000/1600 | 32 | 8 | 12500 | +| Standard_L32s_v3 | 32 | 256 | 320 | 4x1.92 TB | 1.5M/8000 | 51200/865 | 80000/2000 | 32 | 8 | 16000 | +| Standard_L48s_v3 | 48 | 384 | 480 | 6x1.92 TB | 2.2M/14000 | 76800/1315 | 80000/3000 | 32 | 8 | 24000 | +| Standard_L64s_v3 | 64 | 512 | 640 | 8x1.92 TB | 2.9M/16000 | 80000/1735 | 80000/3000 | 32 | 8 | 30000 | +| Standard_L80s_v3 | 80 | 640 | 800 | 10x1.92TB | 3.8M/20000 | 80000/2160 | 80000/3000 | 32 | 8 | 32000 | + +1. **Temp disk**: Lsv3-series VMs have a standard SCSI-based temp resource disk for use by the OS paging or swap file (`D:` on Windows, `/dev/sdb` on Linux). This disk provides 80 GiB of storage, 4,000 IOPS, and 80 MBps transfer rate for every 8 vCPUs. For example, Standard_L80s_v3 provides 800 GiB at 40000 IOPS and 800 MBPS. This configuration ensures the NVMe drives can be fully dedicated to application use. This disk is ephemeral, and all data is lost on stop or deallocation. +1. **NVMe Disks**: NVMe disk throughput can go higher than the specified numbers. However, higher performance isn't guaranteed. Local NVMe disks are ephemeral. Data is lost on these disks if you stop or deallocate your VM. Local NVMe disks aren't encrypted by [Azure Storage encryption](disk-encryption.md), even if you enable [encryption at host](disk-encryption.md#supported-vm-sizes). +1. **NVMe Disk throughput**: Hyper-V NVMe Direct technology provides unthrottled access to local NVMe drives mapped securely into the guest VM space. Lsv3 NVMe disk throughput can go higher than the specified numbers, but higher performance isn't guaranteed. To achieve maximum performance, see how to optimize performance on the Lsv3-series [Windows-based VMs](../virtual-machines/windows/storage-performance.md) or [Linux-based VMs](../virtual-machines/linux/storage-performance.md). Read/write performance varies based on IO size, drive load, and capacity utilization. +1. **Max burst uncached data disk throughput**: Lsv3-series VMs can [burst their disk performance](./disk-bursting.md) for up to 30 minutes at a time. + +> [!NOTE] +> Lsv3-series VMs don't provide host cache for data disk as it doesn't benefit the Lsv3 workloads. + +[!INCLUDE [virtual-machines-common-sizes-table-defs](../../includes/virtual-machines-common-sizes-table-defs.md)] + +## Other sizes and information + +- [General purpose](sizes-general.md) +- [Memory optimized](sizes-memory.md) +- [Storage optimized](sizes-storage.md) +- [GPU optimized](sizes-gpu.md) +- [High performance compute](sizes-hpc.md) +- [Previous generations](sizes-previous-gen.md) + +Pricing Calculator: [Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) + +More information on Disks Types: [Disk Types](./disks-types.md#ultra-disks) + +## Next steps + +Learn more about how [Azure compute units (ACU)](acu.md) can help you compare compute performance across Azure SKUs. diff --git a/articles/virtual-machines/m-series.md b/articles/virtual-machines/m-series.md index 9c480f5eafe8e..e6f02394dcb20 100644 --- a/articles/virtual-machines/m-series.md +++ b/articles/virtual-machines/m-series.md @@ -1,12 +1,12 @@ --- title: M-series - Azure Virtual Machines description: Specifications for the M-series VMs. -author: ayshakeen +author: lauradolan ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 03/31/2020 -ms.author: jushiman +ms.author: ayshak --- # M-series diff --git a/articles/virtual-machines/maintenance-and-updates.md b/articles/virtual-machines/maintenance-and-updates.md index 28ba0885ff1b9..87a824be16f8a 100644 --- a/articles/virtual-machines/maintenance-and-updates.md +++ b/articles/virtual-machines/maintenance-and-updates.md @@ -32,7 +32,7 @@ Most nonzero-impact maintenance pauses the VM for less than 10 seconds. In certa Memory-preserving maintenance works for more than 90 percent of Azure VMs. It doesn't work for G, L, M, N, and H series. Azure increasingly uses live-migration technologies and improves memory-preserving maintenance mechanisms to reduce the pause durations. -These maintenance operations that don't require a reboot are applied one fault domain at a time. They stop if they receive any warning health signals from platform monitoring tools. +These maintenance operations that don't require a reboot are applied one fault domain at a time. They stop if they receive any warning health signals from platform monitoring tools. Maintenance operations that do not require a reboot may occur simultaneously in paired regions or Availability Zones. For a given change, the deployment are mostly sequenced across Availability Zones and across Region pairs, but there can be overlap at the tail. These types of updates can affect some applications. When the VM is live-migrated to a different host, some sensitive workloads might show a slight performance degradation in the few minutes leading up to the VM pause. To prepare for VM maintenance and reduce impact during Azure maintenance, try [using Scheduled Events for Windows](./windows/scheduled-events.md) or [Linux](./linux/scheduled-events.md) for such applications. @@ -63,7 +63,7 @@ In the rare case where VMs need to be rebooted for planned maintenance, you'll b During the *self-service phase*, which typically lasts four weeks, you start the maintenance on your VMs. As part of the self-service, you can query each VM to see its status and the result of your last maintenance request. -When you start self-service maintenance, your VM is redeployed to an already updated node. Because the VM reboots, the temporary disk is lost and dynamic IP addresses associated with the virtual network interface are updated. +When you start self-service maintenance, your VM is redeployed to an already updated node. Because the VM is redeployed, the temporary disk is lost and dynamic IP addresses associated with the virtual network interface are updated. If an error arises during self-service maintenance, the operation stops, the VM isn't updated, and you get the option to retry the self-service maintenance. @@ -85,7 +85,7 @@ Availability zones are unique physical locations within an Azure region. Each zo An availability zone is a combination of a fault domain and an update domain. If you create three or more VMs across three zones in an Azure region, your VMs are effectively distributed across three fault domains and three update domains. The Azure platform recognizes this distribution across update domains to make sure that VMs in different zones are not updated at the same time. -Each infrastructure update rolls out zone by zone, within a single region. But, you can have deployment going on in Zone 1, and different deployment going in Zone 2, at the same time. Deployments are not all serialized. But, a single deployment only rolls out one zone at a time to reduce risk. +Each infrastructure update rolls out zone by zone, within a single region. But, you can have deployment going on in Zone 1, and different deployment going in Zone 2, at the same time. Deployments are not all serialized. But, a single deployment that requires a reboot only rolls out one zone at a time to reduce risk. In general, updates that require a reboot are avoided when possible, and Azure attempts to use Live Migration or provide customers control. #### Virtual machine scale sets diff --git a/articles/virtual-machines/media/vmapps/select-extension-app.png b/articles/virtual-machines/media/vmapps/select-extension-app.png new file mode 100644 index 0000000000000..19f28a19ac2cf Binary files /dev/null and b/articles/virtual-machines/media/vmapps/select-extension-app.png differ diff --git a/articles/virtual-machines/msv2-mdsv2-series.md b/articles/virtual-machines/msv2-mdsv2-series.md index 31b9b68f35393..6dfabff0cdf42 100644 --- a/articles/virtual-machines/msv2-mdsv2-series.md +++ b/articles/virtual-machines/msv2-mdsv2-series.md @@ -1,12 +1,12 @@ --- title: Msv2/Mdsv2 Medium Memory Series - Azure Virtual Machines description: Specifications for the Msv2-series VMs. -author: ayshakeen +author: lauradolan ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 04/07/2020 -ms.author: jushiman +ms.author: ayshak --- # Msv2 and Mdsv2-series Medium Memory diff --git a/articles/virtual-machines/mv2-series.md b/articles/virtual-machines/mv2-series.md index 35013cdb1f2b7..324452982cdcd 100644 --- a/articles/virtual-machines/mv2-series.md +++ b/articles/virtual-machines/mv2-series.md @@ -1,12 +1,12 @@ --- title: Mv2-series - Azure Virtual Machines description: Specifications for the Mv2-series VMs. -author: ayshakeen +author: lauradolan ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.date: 04/07/2020 -ms.author: jushiman +ms.author: ayshak --- # Mv2-series diff --git a/articles/virtual-machines/n-series-migration.md b/articles/virtual-machines/n-series-migration.md index f3a487c62a1bb..da737ab15fce8 100644 --- a/articles/virtual-machines/n-series-migration.md +++ b/articles/virtual-machines/n-series-migration.md @@ -2,7 +2,7 @@ title: Migration Guide for GPU Compute Workloads in Azure description: NC, ND, NCv2-series migration guide. ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 08/15/2020 --- diff --git a/articles/virtual-machines/nc-a100-v4-series.md b/articles/virtual-machines/nc-a100-v4-series.md index 31995d87018f8..737d984eb2d5d 100644 --- a/articles/virtual-machines/nc-a100-v4-series.md +++ b/articles/virtual-machines/nc-a100-v4-series.md @@ -1,21 +1,16 @@ --- -title: NC A100 v4-series (preview) +title: NC A100 v4-series description: Specifications for the NC A100 v4-series Azure VMs. These VMs include Linux, Windows, Flexible scale sets, and uniform scale sets.``` author: sherrywangms ms.author: sherrywang ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual -ms.date: 03/01/2022 +ms.date: 06/01/2022 --- -# NC A100 v4-series (Preview) - -> [!IMPORTANT] -> The NC A100 v4-series of Azure virtual machines (VMs) is currently in PREVIEW. -> See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. -> To use this preview feature, [sign up for the NC A100 v4 series preview](https://aka.ms/AzureNCA100v4Signup). +# NC A100 v4-series **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets @@ -38,26 +33,27 @@ These VMs are ideal for real-world Applied AI workloads, such as: To get started with NC A100 v4 VMs, refer to [HPC Workload Configuration and Optimization](./workloads/hpc/configure.md) for steps including driver and network configuration. -Due to increased GPU memory I/O footprint, the NC A100 v4 requires the use of [Generation 2 VMs](./generation-2.md) and marketplace images. The [Azure HPC images](./workloads/hpc/configure.md) are strongly recommended. Azure HPC Ubuntu 18.04, 20.04 and Azure HPC CentOS 7.9 images are supported. Windows Service 2019 and Windows Service 2022 images are supported. +Due to increased GPU memory I/O footprint, the NC A100 v4 requires the use of [Generation 2 VMs](./generation-2.md) and marketplace images. The [Azure HPC images](./workloads/hpc/configure.md) are strongly recommended. Azure HPC Ubuntu 18.04, 20.04 and Azure HPC CentOS 7.9, CentOS 8.4, RHEL 7.9 and RHEL 8.5 images are supported. Windows Service 2019 and Windows Service 2022 images are supported. -Key Features: -- [Premium Storage](premium-storage-performance.md) -- [Premium Storage caching](premium-storage-performance.md) -- [VM Generation 2](generation-2.md) -- [Ephemeral OS Disks](ephemeral-os-disks.md) -- NVIDIA NVLink Interconnect -These features are not supported:[Live Migration](maintenance-and-updates.md), [Memory Preserving Updates](maintenance-and-updates.md) and [Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization) . +- [Premium Storage](premium-storage-performance.md): Supported +- [Premium Storage caching](premium-storage-performance.md): Supported +- [Ultra Disks](disks-types.md#ultra-disks): Not Supported +- [Live Migration](maintenance-and-updates.md): Not Supported +- [Memory Preserving Updates](maintenance-and-updates.md): Not Supported +- [VM Generation Support](generation-2.md): Generation 2 +- [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported +- [Ephemeral OS Disks](ephemeral-os-disks.md): Supported +- InfiniBand: Not Supported +- Nvidia NVLink Interconnect: Supported +- [Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not Supported -> [!IMPORTANT] -> This VM series is currently in preview. These specifications are subject to change. -> -| Size | vCPU | Memory: GiB | Temp Storage (with NVMe): GiB | GPU | GPU Memory: GiB | Max data disks | Max uncached disk throughput: IOPS / MBps | Max NICs/network bandwidth (Mbps) | +| Size | vCPU | Memory: GiB | Temp Storage (with NVMe) : GiB | GPU | GPU Memory: GiB | Max data disks | Max uncached disk throughput: IOPS / MBps | Max NICs/network bandwidth (Mbps) | |---|---|---|---|---|---|---|---|---| -| Standard_NC24ads_A100_v4 | 24 | 220 | 1123 | 1 | 80 | 12 | 30000/1000 | 2/20,000 | -| Standard_NC48ads_A100_v4 | 48 | 440 | 2246 | 2 | 160 | 24 | 60000/2000 | 4/40,000 | +| Standard_NC24ads_A100_v4 | 24 | 220 | 1123 | 1 | 80 | 12 | 30000/1000 | 2/20,000 | +| Standard_NC48ads_A100_v4 | 48 | 440 | 2246 | 2 | 160 | 24 | 60000/2000 | 4/40,000 | | Standard_NC96ads_A100_v4 | 96 | 880 | 4492 | 4 | 320 | 32 | 120000/4000 | 8/80,000 | 1 GPU = one A100 card diff --git a/articles/virtual-machines/nc-series-retirement.md b/articles/virtual-machines/nc-series-retirement.md index 1c89d0eccd357..8bcc422367ebf 100644 --- a/articles/virtual-machines/nc-series-retirement.md +++ b/articles/virtual-machines/nc-series-retirement.md @@ -3,7 +3,7 @@ title: NC-series retirement description: NC-series retirement by August 31, 2023 author: sherrywangms ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 09/01/2021 ms.author: sherrywang diff --git a/articles/virtual-machines/nc-series.md b/articles/virtual-machines/nc-series.md index ecb368f9fb61e..8ab9fb3966ef8 100644 --- a/articles/virtual-machines/nc-series.md +++ b/articles/virtual-machines/nc-series.md @@ -3,10 +3,10 @@ title: NC-series - Azure Virtual Machines description: Specifications for the NC-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # NC-series diff --git a/articles/virtual-machines/nct4-v3-series.md b/articles/virtual-machines/nct4-v3-series.md index c05b96b2dc35e..a7c893a055d54 100644 --- a/articles/virtual-machines/nct4-v3-series.md +++ b/articles/virtual-machines/nct4-v3-series.md @@ -2,7 +2,7 @@ title: NCas T4 v3-series description: Specifications for the NCas T4 v3-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes author: vikancha-MSFT ms.topic: conceptual ms.date: 01/12/2021 diff --git a/articles/virtual-machines/ncv2-series-retirement.md b/articles/virtual-machines/ncv2-series-retirement.md index ac99be91f6772..bfe924e5244de 100644 --- a/articles/virtual-machines/ncv2-series-retirement.md +++ b/articles/virtual-machines/ncv2-series-retirement.md @@ -3,7 +3,7 @@ title: NCv2-series retirement description: NCv2-series retirement by August 31, 2023 author: sherrywangms ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 09/01/2021 ms.author: sherrywang diff --git a/articles/virtual-machines/ncv2-series.md b/articles/virtual-machines/ncv2-series.md index 1e86b24c27996..b79e6450653a1 100644 --- a/articles/virtual-machines/ncv2-series.md +++ b/articles/virtual-machines/ncv2-series.md @@ -3,10 +3,10 @@ title: NCv2-series - Azure Virtual Machines description: Specifications for the NCv2-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # NCv2-series diff --git a/articles/virtual-machines/ncv3-series.md b/articles/virtual-machines/ncv3-series.md index 6d411e1a63b61..5983474c44020 100644 --- a/articles/virtual-machines/ncv3-series.md +++ b/articles/virtual-machines/ncv3-series.md @@ -3,10 +3,10 @@ title: NCv3-series - Azure Virtual Machines description: Specifications for the NCv3-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # NCv3-series diff --git a/articles/virtual-machines/nd-series-retirement.md b/articles/virtual-machines/nd-series-retirement.md index 536ef2235adac..cd7d5c43d70d0 100644 --- a/articles/virtual-machines/nd-series-retirement.md +++ b/articles/virtual-machines/nd-series-retirement.md @@ -3,7 +3,7 @@ title: ND-series retirement description: ND-series retirement by August 31, 2023 author: sherrywangms ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 09/01/2021 ms.author: sherrywang diff --git a/articles/virtual-machines/nd-series.md b/articles/virtual-machines/nd-series.md index 5a54c86ca0a59..072392dacd94e 100644 --- a/articles/virtual-machines/nd-series.md +++ b/articles/virtual-machines/nd-series.md @@ -3,10 +3,10 @@ title: ND-series - Azure Virtual Machines description: Specifications for the ND-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # ND-series diff --git a/articles/virtual-machines/nda100-v4-series.md b/articles/virtual-machines/nda100-v4-series.md index d85fcbdc832d7..cbe213085a298 100644 --- a/articles/virtual-machines/nda100-v4-series.md +++ b/articles/virtual-machines/nda100-v4-series.md @@ -2,7 +2,7 @@ title: ND A100 v4-series description: Specifications for the ND A100 v4-series VMs. ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes author: sherrywangms ms.author: sherrywang ms.topic: conceptual diff --git a/articles/virtual-machines/ndm-a100-v4-series.md b/articles/virtual-machines/ndm-a100-v4-series.md index 9479c49fb7066..0bcac56039b9a 100644 --- a/articles/virtual-machines/ndm-a100-v4-series.md +++ b/articles/virtual-machines/ndm-a100-v4-series.md @@ -4,7 +4,7 @@ description: Specifications for the NDm A100 v4-series VMs. author: sherrywangms ms.author: sherrywang ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 10/26/2021 diff --git a/articles/virtual-machines/ndv2-series.md b/articles/virtual-machines/ndv2-series.md index 28fb9df2aaf8b..2ae664fcfe5b5 100644 --- a/articles/virtual-machines/ndv2-series.md +++ b/articles/virtual-machines/ndv2-series.md @@ -3,10 +3,10 @@ title: NDv2-series description: Specifications for the NDv2-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # Updated NDv2-series diff --git a/articles/virtual-machines/np-series.md b/articles/virtual-machines/np-series.md index 6fc3f4a4aafab..eb284eaa84fff 100644 --- a/articles/virtual-machines/np-series.md +++ b/articles/virtual-machines/np-series.md @@ -3,7 +3,7 @@ title: NP-series - Azure Virtual Machines description: Specifications for the NP-series VMs. author: luismcMSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/09/2021 ms.author: luismc diff --git a/articles/virtual-machines/nv-series-migration-guide.md b/articles/virtual-machines/nv-series-migration-guide.md index c77d993f9a830..b221a6b0e85a4 100644 --- a/articles/virtual-machines/nv-series-migration-guide.md +++ b/articles/virtual-machines/nv-series-migration-guide.md @@ -3,7 +3,7 @@ title: NV series migration guide description: NV series migration guide author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 01/12/2020 ms.author: vikancha diff --git a/articles/virtual-machines/nv-series-retirement.md b/articles/virtual-machines/nv-series-retirement.md index 4a335f5ad502f..da495ddd9c73e 100644 --- a/articles/virtual-machines/nv-series-retirement.md +++ b/articles/virtual-machines/nv-series-retirement.md @@ -3,7 +3,7 @@ title: NV series retirement description: NV series retirement starting September 1, 2021 author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 01/12/2020 ms.author: vikancha diff --git a/articles/virtual-machines/nv-series.md b/articles/virtual-machines/nv-series.md index 0af9ce5e6ea15..74920885e5137 100644 --- a/articles/virtual-machines/nv-series.md +++ b/articles/virtual-machines/nv-series.md @@ -3,10 +3,10 @@ title: NV-series - Azure Virtual Machines description: Specifications for the NV-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 03/29/2022 -ms.author: jushiman +ms.author: vikancha --- # NV-series diff --git a/articles/virtual-machines/nva10v5-series.md b/articles/virtual-machines/nva10v5-series.md index 2bf1e2c10a36c..f34dca1440aa9 100644 --- a/articles/virtual-machines/nva10v5-series.md +++ b/articles/virtual-machines/nva10v5-series.md @@ -3,7 +3,7 @@ title: NV A10 v5-series description: Specifications for the NV A10 v5-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/01/2022 ms.author: vikancha diff --git a/articles/virtual-machines/nvv3-series.md b/articles/virtual-machines/nvv3-series.md index 367d870a55b8a..39ff6598fcb6c 100644 --- a/articles/virtual-machines/nvv3-series.md +++ b/articles/virtual-machines/nvv3-series.md @@ -4,10 +4,10 @@ description: Specifications for the NVv3-series VMs. services: virtual-machines author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # NVv3-series diff --git a/articles/virtual-machines/nvv4-series.md b/articles/virtual-machines/nvv4-series.md index 611d5c96a937b..57f9d83e0eae7 100644 --- a/articles/virtual-machines/nvv4-series.md +++ b/articles/virtual-machines/nvv4-series.md @@ -3,7 +3,7 @@ title: NVv4-series description: Specifications for the NVv4-series VMs. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.date: 01/12/2020 ms.author: vikancha diff --git a/articles/virtual-machines/scripts/create-managed-disk-from-snapshot.md b/articles/virtual-machines/scripts/create-managed-disk-from-snapshot.md index ea1151a6c4f43..f74b57505128b 100644 --- a/articles/virtual-machines/scripts/create-managed-disk-from-snapshot.md +++ b/articles/virtual-machines/scripts/create-managed-disk-from-snapshot.md @@ -9,7 +9,7 @@ manager: kavithag tags: azure-service-management ms.assetid: -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.devlang: azurecli ms.topic: sample ms.tgt_pltfrm: vm-linux diff --git a/articles/virtual-machines/scripts/create-vm-from-managed-os-disks.md b/articles/virtual-machines/scripts/create-vm-from-managed-os-disks.md index 4bbaab1c1aa95..b0903c696b0e4 100644 --- a/articles/virtual-machines/scripts/create-vm-from-managed-os-disks.md +++ b/articles/virtual-machines/scripts/create-vm-from-managed-os-disks.md @@ -9,7 +9,7 @@ editor: ramankum tags: azure-service-management ms.assetid: -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.devlang: azurecli ms.topic: sample ms.tgt_pltfrm: vm-linux diff --git a/articles/virtual-machines/scripts/create-vm-from-snapshot.md b/articles/virtual-machines/scripts/create-vm-from-snapshot.md index f136573590ff0..09a8f6aa8754d 100644 --- a/articles/virtual-machines/scripts/create-vm-from-snapshot.md +++ b/articles/virtual-machines/scripts/create-vm-from-snapshot.md @@ -9,7 +9,7 @@ editor: ramankum tags: azure-service-management ms.assetid: -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.devlang: azurecli ms.topic: sample ms.tgt_pltfrm: vm-linux diff --git a/articles/virtual-machines/share-gallery.md b/articles/virtual-machines/share-gallery.md index 617a52e73284c..c4ece7726b8ed 100644 --- a/articles/virtual-machines/share-gallery.md +++ b/articles/virtual-machines/share-gallery.md @@ -21,6 +21,9 @@ There are two main ways to share images in an Azure Compute Gallery: - Role-based access control (RBAC) lets you share resources to specific people, groups, or service principals on a granular level. - Community gallery lets you share your entire gallery publicly, to all Azure users. +> [!IMPORTANT] +> You can't currently create a Flexible virtual machine scale set from an image shared by another tenant. + ## RBAC The Azure Compute Gallery, definitions, and versions are all resources, they can be shared using the built-in native Azure RBAC controls. Using Azure RBAC you can share these resources to other users, service principals, and groups. You can even share access to individuals outside of the tenant they were created within. Once a user has access to the image or application version, they can deploy a VM or a Virtual Machine Scale Set. diff --git a/articles/virtual-machines/shared-image-galleries.md b/articles/virtual-machines/shared-image-galleries.md index 9e29814da4bc7..f02a7a7880d96 100644 --- a/articles/virtual-machines/shared-image-galleries.md +++ b/articles/virtual-machines/shared-image-galleries.md @@ -111,6 +111,9 @@ Image version: ## Sharing +> [!IMPORTANT] +> You can't currently create a Flexible virtual machine scale set from an image shared by another tenant. + You can [share images](share-gallery.md) to users and groups using the standard role-based access control (RBAC) or you can share an entire gallery of images to the public, using a [community gallery (preview)](azure-compute-gallery.md#community). > [!IMPORTANT] @@ -173,12 +176,30 @@ To list all the Azure Compute Gallery resources across subscriptions that you ha 1. Select all the subscriptions under which you'd like to list all the resources. 1. Look for resources of the **Azure Compute Gallery** type. +### [Azure CLI](#tab/azure-cli) + To list all the Azure Compute Gallery resources, across subscriptions that you have permissions to, use the following command in the Azure CLI: ```azurecli az account list -otsv --query "[].id" | xargs -n 1 az sig list --subscription ``` +### [Azure PowerShell](#tab/azure-powershell) + +To list all the Azure Compute Gallery resources, across subscriptions that you have permissions to, use the following command in the Azure PowerShell: + +```azurepowershell +$params = @{ + Begin = { $currentContext = Get-AzContext } + Process = { $null = Set-AzContext -SubscriptionObject $_; Get-AzGallery } + End = { $null = Set-AzContext -Context $currentContext } +} + +Get-AzSubscription | ForEach-Object @params +``` + +--- + For more information, see [List, update, and delete image resources](update-image-resources.md). ### Can I move my existing image to an Azure Compute Gallery? @@ -225,7 +246,7 @@ Source region is the region in which your image version will be created, and tar ### How do I specify the source region while creating the image version? -While creating an image version, you can use the **--location** tag in CLI and the **-Location** tag in PowerShell to specify the source region. Please ensure the managed image that you are using as the base image to create the image version is in the same location as the location in which you intend to create the image version. Also, make sure that you pass the source region location as one of the target regions when you create an image version. +While creating an image version, you can use the **--location** argument in CLI and the **-Location** parameter in PowerShell to specify the source region. Please ensure the managed image that you are using as the base image to create the image version is in the same location as the location in which you intend to create the image version. Also, make sure that you pass the source region location as one of the target regions when you create an image version. ### How do I specify the number of image version replicas to be created in each region? @@ -234,11 +255,23 @@ There are two ways you can specify the number of image version replicas to be cr 1. The regional replica count which specifies the number of replicas you want to create per region. 2. The common replica count which is the default per region count in case regional replica count is not specified. -To specify the regional replica count, pass the location along with the number of replicas you want to create in that region: "South Central US=2". +### [Azure CLI](#tab/azure-cli) + +To specify the regional replica count, pass the location along with the number of replicas you want to create in that region: "South Central US=2". + +If regional replica count is not specified with each location, then the default number of replicas will be the common replica count that you specified. + +To specify the common replica count in Azure CLI, use the **--replica-count** argument in the `az sig image-version create` command. -If regional replica count is not specified with each location, then the default number of replicas will be the common replica count that you specified. +### [Azure PowerShell](#tab/azure-powershell) -To specify the common replica count in CLI, use the **--replica-count** argument in the `az sig image-version create` command. +To specify the regional replica count, pass the location along with the number of replicas you want to create in that region, `@{Name = 'South Central US';ReplicaCount = 2}`, to the **-TargetRegion** parameter in the `New-AzGalleryImageVersion` command. + +If regional replica count is not specified with each location, then the default number of replicas will be the common replica count that you specified. + +To specify the common replica count in Azure PowerShell, use the **-ReplicaCount** parameter in the `New-AzGalleryImageVersion` command. + +--- ### Can I create the gallery in a different location than the one for the image definition and image version? diff --git a/articles/virtual-machines/sizes-b-series-burstable.md b/articles/virtual-machines/sizes-b-series-burstable.md index 0db2e5de2338d..8fc9810ee190e 100644 --- a/articles/virtual-machines/sizes-b-series-burstable.md +++ b/articles/virtual-machines/sizes-b-series-burstable.md @@ -2,7 +2,7 @@ title: B-series burstable - Azure Virtual Machines description: Describes the B-series of burstable Azure VM sizes. services: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes author: rishabv90 ms.service: virtual-machines ms.topic: conceptual diff --git a/articles/virtual-machines/sizes-compute.md b/articles/virtual-machines/sizes-compute.md index ed9a43aef724d..8224165b72c6b 100644 --- a/articles/virtual-machines/sizes-compute.md +++ b/articles/virtual-machines/sizes-compute.md @@ -3,7 +3,7 @@ title: Azure VM sizes - Compute optimized | Microsoft Docs description: Lists the different compute optimized sizes available for virtual machines in Azure. Lists information about the number of vCPUs, data disks, and NICs as well as storage throughput and network bandwidth for sizes in this series. author: mimckitt ms.service: virtual-machines -ms.subservice: vm-sizes-compute +ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services ms.date: 02/03/2020 diff --git a/articles/virtual-machines/sizes-field-programmable-gate-arrays.md b/articles/virtual-machines/sizes-field-programmable-gate-arrays.md index fe7d20ab654f0..48e5c74b743f7 100644 --- a/articles/virtual-machines/sizes-field-programmable-gate-arrays.md +++ b/articles/virtual-machines/sizes-field-programmable-gate-arrays.md @@ -3,7 +3,7 @@ title: Azure virtual machine sizes for field-programmable gate arrays (FPGA) description: Lists the different FPGA optimized sizes available for virtual machines in Azure. Lists information about the number of vCPUs, data disks and NICs as well as storage throughput and network bandwidth for sizes in this series. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-fpga +ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services ms.date: 02/03/2020 diff --git a/articles/virtual-machines/sizes-general.md b/articles/virtual-machines/sizes-general.md index d49072aebf335..085a75b38167a 100644 --- a/articles/virtual-machines/sizes-general.md +++ b/articles/virtual-machines/sizes-general.md @@ -3,7 +3,7 @@ title: Azure VM sizes - General purpose | Microsoft Docs description: Lists the different general purpose sizes available for virtual machines in Azure. Lists information about the number of vCPUs, data disks, and NICs as well as storage throughput and network bandwidth for sizes in this series. author: mamccrea ms.service: virtual-machines -ms.subservice: vm-sizes-general +ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services ms.date: 10/20/2021 diff --git a/articles/virtual-machines/sizes-gpu.md b/articles/virtual-machines/sizes-gpu.md index ba5aac7ce406b..050ddd25218ac 100644 --- a/articles/virtual-machines/sizes-gpu.md +++ b/articles/virtual-machines/sizes-gpu.md @@ -3,11 +3,11 @@ title: Azure VM sizes - GPU | Microsoft Docs description: Lists the different GPU optimized sizes available for virtual machines in Azure. Lists information about the number of vCPUs, data disks and NICs as well as storage throughput and network bandwidth for sizes in this series. author: vikancha-MSFT ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services ms.date: 02/03/2020 -ms.author: jushiman +ms.author: vikancha --- # GPU optimized virtual machine sizes diff --git a/articles/virtual-machines/sizes-hpc.md b/articles/virtual-machines/sizes-hpc.md index 0b0f9875ffe97..bf75ff421193e 100644 --- a/articles/virtual-machines/sizes-hpc.md +++ b/articles/virtual-machines/sizes-hpc.md @@ -2,7 +2,7 @@ title: Azure VM sizes - HPC | Microsoft Docs description: Lists the different sizes available for high performance computing virtual machines in Azure. Lists information about the number of vCPUs, data disks and NICs as well as storage throughput and network bandwidth for sizes in this series. ms.service: virtual-machines -ms.subservice: vm-sizes-hpc +ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services ms.date: 03/19/2021 @@ -18,7 +18,7 @@ ms.reviewer: jushiman Azure H-series virtual machines (VMs) are designed to deliver leadership-class performance, scalability, and cost efficiency for various real-world HPC workloads. -[HBv3-series](hbv3-series.md) VMs are optimized for HPC applications such as fluid dynamics, explicit and implicit finite element analysis, weather modeling, seismic processing, reservoir simulation, and RTL simulation. HBv3 VMs feature up to 120 AMD EPYC™ 7003-series (Milan) CPU cores, 448 GB of RAM, and no hyperthreading. HBv3-series VMs also provide 350 GB/sec of memory bandwidth, up to 32 MB of L3 cache per core, up to 7 GB/s of block device SSD performance, and clock frequencies up to 3.675 GHz. +[HBv3-series](hbv3-series.md) VMs are optimized for HPC applications such as fluid dynamics, explicit and implicit finite element analysis, weather modeling, seismic processing, reservoir simulation, and RTL simulation. HBv3 VMs feature up to 120 AMD EPYC™ 7003-series (Milan) CPU cores, 448 GB of RAM, and no hyperthreading. HBv3-series VMs also provide 350 GB/sec of memory bandwidth, up to 32 MB of L3 cache per core, up to 7 GB/s of block device SSD performance, and clock frequencies up to 3.5 GHz. All HBv3-series VMs feature 200 Gb/sec HDR InfiniBand from NVIDIA Networking to enable supercomputer-scale MPI workloads. These VMs are connected in a non-blocking fat tree for optimized and consistent RDMA performance. The HDR InfiniBand fabric also supports Adaptive Routing and the Dynamic Connected Transport (DCT, in addition to standard RC and UD transports). These features enhance application performance, scalability, and consistency, and their usage is strongly recommended. diff --git a/articles/virtual-machines/sizes-memory.md b/articles/virtual-machines/sizes-memory.md index 1436797277f32..0c311c0645a1c 100644 --- a/articles/virtual-machines/sizes-memory.md +++ b/articles/virtual-machines/sizes-memory.md @@ -9,7 +9,7 @@ tags: azure-resource-manager,azure-service-management keywords: VM isolation,isolated VM,isolation,isolated ms.assetid: ms.service: virtual-machines -ms.subservice: vm-sizes-memory +ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services ms.date: 04/04/2022 diff --git a/articles/virtual-machines/sizes-storage.md b/articles/virtual-machines/sizes-storage.md index 969bf917b382b..16aff752282c7 100644 --- a/articles/virtual-machines/sizes-storage.md +++ b/articles/virtual-machines/sizes-storage.md @@ -1,41 +1,41 @@ ---- -title: Azure VM sizes - Storage | Microsoft Docs -description: Lists the different storage optimized sizes available for virtual machines in Azure. Lists information about the number of vCPUs, data disks, and NICs as well as storage throughput and network bandwidth for sizes in this series. -ms.subservice: vm-sizes-storage -documentationcenter: '' +--- +title: Storage optimized virtual machine sizes +description: Learn about the different storage optimized sizes available for Azure Virtual Machines (Azure VMs). Find information about the number of vCPUs, data disks, NICs, storage throughput, and network bandwidth for sizes in this series. author: sasha-melamed ms.service: virtual-machines -ms.topic: conceptual -ms.workload: infrastructure-services -ms.date: 02/03/2020 -ms.author: jushiman - ---- +ms.subservice: sizes +ms.topic: conceptual +ms.workload: infrastructure-services +ms.date: 06/01/2022 +ms.author: sasham +--- # Storage optimized virtual machine sizes -**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets - -> [!TIP] -> Try the **[Virtual machines selector tool](https://aka.ms/vm-selector)** to find other sizes that best fit your workload. - -Storage optimized VM sizes offer high disk throughput and IO, and are ideal for Big Data, SQL, NoSQL databases, data warehousing, and large transactional databases. Examples include Cassandra, MongoDB, Cloudera, and Redis. This article provides information about the number of vCPUs, data disks, and NICs as well as local storage throughput and network bandwidth for each optimized size. +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets + +Storage optimized virtual machine (VM) sizes offer high disk throughput and IO, and are ideal for Big Data, SQL, NoSQL databases, data warehousing, and large transactional databases. Examples include Cassandra, MongoDB, Cloudera, and Redis. This article provides information about the number of vCPUs, data disks, NICs, local storage throughput, and network bandwidth for each optimized size. -The [Lsv2-series](lsv2-series.md) features high throughput, low latency, directly mapped local NVMe storage running on the [AMD EPYCTM 7551 processor](https://www.amd.com/en/products/epyc-7000-series) with an all core boost of 2.55GHz and a max boost of 3.0GHz. The Lsv2-series VMs come in sizes from 8 to 80 vCPU in a simultaneous multi-threading configuration. There is 8 GiB of memory per vCPU, and one 1.92TB NVMe SSD M.2 device per 8 vCPUs, with up to 19.2TB (10x1.92TB) available on the L80s v2. +> [!TIP] +> Try the [virtual machines selector tool](https://aka.ms/vm-selector) to find other sizes that best fit your workload. -## Other sizes +The Lsv3, Lasv3, and Lsv2-series feature high-throughput, low latency, directly mapped local NVMe storage. These VM series come in sizes from 8 to 80 vCPU. There are 8 GiB of memory per vCPU, and one 1.92TB NVMe SSD device per 8 vCPUs, with up to 19.2TB (10x1.92TB) available on the largest VM sizes. -- [General purpose](sizes-general.md) -- [Compute optimized](sizes-compute.md) -- [Memory optimized](sizes-memory.md) -- [GPU optimized](sizes-gpu.md) -- [High performance compute](sizes-hpc.md) -- [Previous generations](sizes-previous-gen.md) +- The [Lsv3-series](lsv3-series.md) runs on the third Generation Intel® Xeon® Platinum 8370C (Ice Lake) processor in a [hyper-threaded configuration](https://www.intel.com/content/www/us/en/architecture-and-technology/hyper-threading/hyper-threading-technology.html). This new processor features an all-core turbo clock speed of 3.5 GHz with [Intel® Turbo Boost Technology](https://www.intel.com/content/www/us/en/architecture-and-technology/turbo-boost/turbo-boost-technology.html), [Intel® Advanced-Vector Extensions 512 (Intel® AVX-512)](https://www.intel.com/content/www/us/en/architecture-and-technology/avx-512-overview.html) and [Intel® Deep Learning Boost](https://software.intel.com/content/www/us/en/develop/topics/ai/deep-learning-boost.html). +- The [Lasv3-series](lasv3-series.md) runs on the AMD 3rd Generation EPYC™ 7763v processor. This series runs in a multi-threaded configuration with up to 256 MB L3 cache, which can achieve a boosted maximum frequency of 3.5 GHz. +- The [Lsv2-series](lsv2-series.md) runs on the [AMD EPYC™ 7551 processor](https://www.amd.com/en/products/epyc-7000-series) with an all-core boost of 2.55 GHz and a max boost of 3.0 GHz. -## Next steps +## Other sizes -Learn more about how [Azure compute units (ACU)](acu.md) can help you compare compute performance across Azure SKUs. +- [General purpose](sizes-general.md) +- [Compute optimized](sizes-compute.md) +- [Memory optimized](sizes-memory.md) +- [GPU optimized](sizes-gpu.md) +- [High performance compute](sizes-hpc.md) +- [Previous generations](sizes-previous-gen.md) -Learn how to optimize performance on the Lsv2-series virtual machines for [Windows](windows/storage-performance.md) or [Linux](linux/storage-performance.md). +## Next steps -For more information on how Azure names its VMs, see [Azure virtual machine sizes naming conventions](./vm-naming-conventions.md). \ No newline at end of file +- Learn more about how [Azure compute units (ACU)](acu.md) can help you compare compute performance across Azure SKUs. +- Learn how to optimize performance on the Lsv2-series [Windows VMs](windows/storage-performance.md) and [Linux VMs](linux/storage-performance.md). +- For more information on how Azure names its VMs, see [Azure virtual machine sizes naming conventions](./vm-naming-conventions.md). diff --git a/articles/virtual-machines/sizes.md b/articles/virtual-machines/sizes.md index 89035cf3c6df9..df69d9c17278a 100644 --- a/articles/virtual-machines/sizes.md +++ b/articles/virtual-machines/sizes.md @@ -1,13 +1,13 @@ --- title: VM sizes description: Lists the different sizes available for virtual machines in Azure. -author: ju-shim +author: lauradolan ms.service: virtual-machines ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services -ms.date: 04/04/2022 -ms.author: jushiman +ms.date: 06/01/2022 +ms.author: ladolan --- # Sizes for virtual machines in Azure @@ -26,7 +26,7 @@ This article describes the available sizes and options for the Azure virtual mac | [General purpose](sizes-general.md) | B, Dsv3, Dv3, Dasv4, Dav4, DSv2, Dv2, Av2, DC, DCv2, Dv4, Dsv4, Ddv4, Ddsv4, Dv5, Dsv5, Ddv5, Ddsv5, Dasv5, Dadsv5 | Balanced CPU-to-memory ratio. Ideal for testing and development, small to medium databases, and low to medium traffic web servers. | | [Compute optimized](sizes-compute.md) | F, Fs, Fsv2, FX | High CPU-to-memory ratio. Good for medium traffic web servers, network appliances, batch processes, and application servers. | | [Memory optimized](sizes-memory.md) | Esv3, Ev3, Easv4, Eav4, Ebdsv5, Ebsv5, Ev4, Esv4, Edv4, Edsv4, Ev5, Esv5, Edv5, Edsv5, Easv5, Eadsv5, Mv2, M, DSv2, Dv2 | High memory-to-CPU ratio. Great for relational database servers, medium to large caches, and in-memory analytics. | -| [Storage optimized](sizes-storage.md) | Lsv2 | High disk throughput and IO ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases. | +| [Storage optimized](sizes-storage.md) | Lsv2, Lsv3, Lasv3 | High disk throughput and IO ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases. | | [GPU](sizes-gpu.md) | NC, NCv2, NCv3, NCasT4_v3, ND, NDv2, NV, NVv3, NVv4, NDasrA100_v4, NDm_A100_v4 | Specialized virtual machines targeted for heavy graphic rendering and video editing, as well as model training and inferencing (ND) with deep learning. Available with single or multiple GPUs. | | [High performance compute](sizes-hpc.md) | HB, HBv2, HBv3, HC, H | Our fastest and most powerful CPU virtual machines with optional high-throughput network interfaces (RDMA). | diff --git a/articles/virtual-machines/states-billing.md b/articles/virtual-machines/states-billing.md index 6afa831a18e12..abdde1590abd6 100644 --- a/articles/virtual-machines/states-billing.md +++ b/articles/virtual-machines/states-billing.md @@ -1,54 +1,56 @@ --- -title: States and billing status of Azure Virtual Machines -description: Overview of various states a VM can enter and when a user is billed. +title: States and billing status +description: Learn about the provisioning and power states that a virtual machine can enter. Provisioning and power states affect billing. services: virtual-machines author: mimckitt ms.service: virtual-machines ms.subservice: billing ms.topic: conceptual -ms.date: 03/8/2021 +ms.date: 06/08/2022 ms.author: mimckitt ms.reviewer: cynthn +ms.custom: kr2b-contr-experiment --- # States and billing status of Azure Virtual Machines **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets -Azure Virtual Machines (VMs) go through different states that can be categorized into *provisioning* and *power* states. The purpose of this article is to describe these states and specifically highlight when customers are billed for instance usage. +Azure Virtual Machines (VM) instances go through different states. There are *provisioning* and *power* states. This article describes these states and highlights when customers are billed for instance usage. ## Get states using Instance View -The instance view API provides VM running-state information. For more information, see the [Virtual Machines - Instance View](/rest/api/compute/virtualmachines/instanceview) API documentation. +The instance view API provides VM running-state information. For more information, see [Virtual Machines - Instance View](/rest/api/compute/virtualmachines/instanceview). Azure Resources Explorer provides a simple UI for viewing the VM running state: [Resource Explorer](https://resources.azure.com/). -The VM provisioning state is available (in slightly different forms) from within the VM properties `provisioningState` and the InstanceView. In the VM InstanceView there will be an element within the `status` array in the form of `ProvisioningState/[/]`. +The VM provisioning state is available, in slightly different forms, from within the VM properties `provisioningState` and the InstanceView. In the VM InstanceView, there's an element within the `status` array in the form of `ProvisioningState/[/]`. -To retrieve the power state of all the VMs in your subscription, use the [Virtual Machines - List All API](/rest/api/compute/virtualmachines/listall) with parameter **statusOnly** set to *true*. +To retrieve the power state of all the VMs in your subscription, use the [Virtual Machines - List All API](/rest/api/compute/virtualmachines/listall) with parameter `statusOnly` set to `true`. > [!NOTE] -> [Virtual Machines - List All API](/rest/api/compute/virtualmachines/listall) with parameter **statusOnly** set to true will retrieve the power states of all VMs in a subscription. However, in some rare situations, the power state may not available due to intermittent issues in the retrieval process. In such situations, we recommend retrying using the same API or using [Azure Resource Health](../service-health/resource-health-overview.md) to check the power state of your VMs. - +> [Virtual Machines - List All API](/rest/api/compute/virtualmachines/listall) with parameter `statusOnly` set to `true` retrieves the power states of all VMs in a subscription. However, in some rare situations, the power state may not available due to intermittent issues in the retrieval process. In such situations, we recommend retrying using the same API or using [Azure Resource Health](../service-health/resource-health-overview.md) to check the power state of your VMs. + ## Power states and billing The power state represents the last known state of the VM. -:::image type="content" source="./media/virtual-machines-common-states-lifecycle/vm-power-states.png" alt-text="Image shows diagram of the power states a VM can go through. "::: +:::image type="content" source="./media/virtual-machines-common-states-lifecycle/vm-power-states.png" alt-text="Diagram shows the power states a V M can go through, as described below."::: -The following table provides a description of each instance state and indicates whether it is billed for instance usage or not. +The following table provides a description of each instance state and indicates whether that state is billed for instance usage. | Power state | Description | Billing | |---|---|---| -| Starting| Virtual Machine is powering up. | Billed | -| Running | Virtual Machine is fully up. This is the standard working state. | Billed | -| Stopping | This is a transitional state between running and stopped. | Billed| -|Stopped | The Virtual Machine is allocated on a host but not running. Also called PoweredOff state or *Stopped (Allocated)*. This can be result of invoking the PowerOff API operation or invoking shutdown from within the guest OS. The Stopped state may also be observed briefly during VM creation or while starting a VM from Deallocated state. | Billed | -| Deallocating | This is the transitional state between running and deallocated. | Not billed* | -| Deallocated | The Virtual Machine has released the lease on the underlying hardware and is completely powered off. This state is also referred to as *Stopped (Deallocated)*. | Not billed* | +| Starting| Virtual machine is powering up. | Billed | +| Running | Virtual machine is fully up. This state is the standard working state. | Billed | +| Stopping | This state is transitional between running and stopped. | Billed | +| Stopped | The virtual machine is allocated on a host but not running. Also called *PoweredOff* state or *Stopped (Allocated)*. This state can be result of invoking the `PowerOff` API operation or invoking shutdown from within the guest OS. The *Stopped* state may also be observed briefly during VM creation or while starting a VM from *Deallocated* state. | Billed | +| Deallocating | This state is transitional between *Running* and *Deallocated*. | Not billed* | +| Deallocated | The virtual machine has released the lease on the underlying hardware and is powered off. This state is also referred to as *Stopped (Deallocated)*. | Not billed* | +\* Some Azure resources, such as [Disks](https://azure.microsoft.com/pricing/details/managed-disks) and [Networking](https://azure.microsoft.com/pricing/details/bandwidth/) continue to incur charges. -**Example of PowerState in JSON** +Example of PowerState in JSON: ```json { @@ -58,9 +60,6 @@ The following table provides a description of each instance state and indicates } ``` -* Some Azure resources, such as [Disks](https://azure.microsoft.com/pricing/details/managed-disks) and [Networking](https://azure.microsoft.com/pricing/details/bandwidth/) will continue to incur charges. - - ## Provisioning states The provisioning state is the status of a user-initiated, control-plane operation on the VM. These states are separate from the power state of a VM. @@ -69,22 +68,23 @@ The provisioning state is the status of a user-initiated, control-plane operatio |---|---| | Creating | Virtual machine is being created. | | Updating | Virtual machine is updating to the latest model. Some non-model changes to a virtual machine such as start and restart fall under the updating state. | -| Failed | Last operation on the virtual machine resource was not successful. | -| Succeeded | Last operation on the virtual machine resource was successful. | -| Deleting | Virtual machine is being deleted. | -| Migrating | Seen when migrating from Azure Service Manager to Azure Resource Manager. | +| Failed | Last operation on the virtual machine resource was unsuccessful. | +| Succeeded | Last operation on the virtual machine resource was successful. | +| Deleting | Virtual machine is being deleted. | +| Migrating | Seen when migrating from Azure Service Manager to Azure Resource Manager. | ## OS Provisioning states -OS Provisioning states only apply to virtual machines created with a [generalized](./linux/imaging.md#generalized-images) OS image. [Specialized](./linux/imaging.md#specialized-images) images and disks attached as OS disk will not display these states. The OS provisioning state is not shown separately. It is a sub-state of the Provisioning State in the VM instanceView. For example, `ProvisioningState/creating/osProvisioningComplete`. -:::image type="content" source="./media/virtual-machines-common-states-lifecycle/os-provisioning-states.png" alt-text="Image shows the OS provisioning states a VM can go through."::: +OS Provisioning states only apply to virtual machines created with a [generalized](./linux/imaging.md#generalized-images) OS image. [Specialized](./linux/imaging.md#specialized-images) images and disks attached as OS disk don't display these states. The OS provisioning state isn't shown separately. It's a substate of the Provisioning State in the VM InstanceView. For example, `ProvisioningState/creating/osProvisioningComplete`. -| OS Provisioning state | Description | +:::image type="content" source="./media/virtual-machines-common-states-lifecycle/os-provisioning-states.png" alt-text="Diagram shows the O S provisioning states a V M can go through, as described below."::: + +| OS Provisioning state | Description | |---|---| | OSProvisioningInProgress | The VM is running and the initialization (setup) of the Guest OS is in progress. | -| OSProvisioningComplete | This is a short-lived state. The virtual machine quickly transitions from this state to **Success**. If extensions are still being installed you will continue to see this state until they are complete. | -| Succeeded | The user-initiated actions have completed. | -| Failed | Represents a failed operation. Refer to the error code for more information and possible solutions. | +| OSProvisioningComplete | This state is a short-lived state. The virtual machine quickly transitions from this state to *Success*. If extensions are still being installed, you continue to see this state until installation is complete. | +| Succeeded | The user-initiated actions have completed. | +| Failed | Represents a failed operation. For more information and possible solutions, see the error code. | ## Troubleshooting VM states @@ -92,8 +92,8 @@ To troubleshoot specific VM state issues, see [Troubleshoot Windows VM deploymen For other troubleshooting help visit [Azure Virtual Machines troubleshooting documentation](/troubleshoot/azure/virtual-machines/welcome-virtual-machines). - ## Next steps + - Review the [Azure Cost Management and Billing documentation](../cost-management-billing/index.yml) - Use the [Azure Pricing calculator](https://azure.microsoft.com/pricing/calculator/) to plan your deployments. -- Learn more about monitoring your VM, see [Monitor virtual machines in Azure](../azure-monitor/vm/monitor-vm-azure.md). \ No newline at end of file +- Learn more about monitoring your VM, see [Monitor virtual machines in Azure](../azure-monitor/vm/monitor-vm-azure.md). diff --git a/articles/virtual-machines/trusted-launch-portal.md b/articles/virtual-machines/trusted-launch-portal.md index 89bcd768417cf..13de95960806f 100644 --- a/articles/virtual-machines/trusted-launch-portal.md +++ b/articles/virtual-machines/trusted-launch-portal.md @@ -92,7 +92,7 @@ az vm update \ ### [PowerShell](#tab/powershell) -In order to provision a VM with Trusted Launch, it first needs to be enabled with the `TrustedLaunch` using the `Set-AzVmSecurityType` cmdlet. Then you can use the Set-AzVmUefi cmdlet to set the vTPM and SecureBoot configuration. Use the below snippet as a quick start, remember to replace the values in this example with your own. +In order to provision a VM with Trusted Launch, it first needs to be enabled with the `TrustedLaunch` using the `Set-AzVmSecurityProfile` cmdlet. Then you can use the Set-AzVmUefi cmdlet to set the vTPM and SecureBoot configuration. Use the below snippet as a quick start, remember to replace the values in this example with your own. ```azurepowershell-interactive $resourceGroup = "myResourceGroup" @@ -128,7 +128,7 @@ $vm = Set-AzVMOSDisk -VM $vm ` -StorageAccountType "StandardSSD_LRS" ` -CreateOption "FromImage" -$vm = Set-AzVmSecurityType -VM $vm ` +$vm = Set-AzVmSecurityProfile -VM $vm ` -SecurityType "TrustedLaunch" $vm = Set-AzVmUefi -VM $vm ` diff --git a/articles/virtual-machines/trusted-launch.md b/articles/virtual-machines/trusted-launch.md index a3b1f3dfa857f..4986bc9281adb 100644 --- a/articles/virtual-machines/trusted-launch.md +++ b/articles/virtual-machines/trusted-launch.md @@ -6,7 +6,7 @@ ms.author: dejv ms.service: virtual-machines ms.subservice: trusted-launch ms.topic: conceptual -ms.date: 05/02/2022 +ms.date: 05/31/2022 ms.reviewer: cynthn ms.custom: template-concept; references_regions --- @@ -33,9 +33,9 @@ Azure offers trusted launch as a seamless way to improve the security of [genera **VM size support**: - B-series -- Dav4-series, Dasv4-series - DCsv2-series - Dv4-series, Dsv4-series, Dsv3-series, Dsv2-series +- Dav4-series, Dasv4-series - Ddv4-series, Ddsv4-series - Dv5-series, Dsv5-series - Ddv5-series, Ddsv5-series @@ -168,11 +168,69 @@ Trusted launch now allows images to be created and shared through the Azure Comp ### Does trusted launch support Azure Backup? -Trusted launch now supports Azure Backup in preview. For more information, see [Support matrix for Azure VM backup](../backup/backup-support-matrix-iaas.md#vm-compute-support). +Trusted launch now supports Azure Backup. For more information, see [Support matrix for Azure VM backup](../backup/backup-support-matrix-iaas.md#vm-compute-support). ### Does trusted launch support ephemeral OS disks? -Trusted launch now supports ephemeral OS disks in preview. Note that, while using ephemeral disks for Trusted Launch VMs, keys and secrets generated or sealed by the vTPM after the creation of the VM may not be persisted across operations like reimaging and platform events like service healing. For more information, see [Trusted Launch for Ephemeral OS disks (Preview)](https://aka.ms/ephemeral-os-disks-support-trusted-launch). +Trusted launch supports ephemeral OS disks. Note that, while using ephemeral disks for Trusted Launch VMs, keys and secrets generated or sealed by the vTPM after the creation of the VM may not be persisted across operations like reimaging and platform events like service healing. For more information, see [Trusted Launch for Ephemeral OS disks (Preview)](https://aka.ms/ephemeral-os-disks-support-trusted-launch). + +### How can I find VM sizes that support Trusted launch? + +See the list of [Generation 2 VM sizes supporting Trusted launch](trusted-launch.md#limitations). + +The following commands can be used to check if a [Generation 2 VM Size](../virtual-machines/generation-2.md#generation-2-vm-sizes) does not support Trusted launch. + +#### CLI + +```azurecli +subscription="" +region="westus" +vmSize="Standard_NC12s_v3" + +az vm list-skus --resource-type virtualMachines --location $region --query "[?name=='$vmSize'].capabilities" --subscription $subscription +``` +#### PowerShell + +```azurepowershell +$region = "southeastasia" +$vmSize = "Standard_M64" +(Get-AzComputeResourceSku | where {$_.Locations.Contains($region) -and ($_.Name -eq $vmSize) })[0].Capabilities +``` + +The response will be similar to the following form. `TrustedLaunchDisabled True` in the output indicates that the Generation 2 VM size does not support Trusted launch. If it's a Generation 2 VM size and `TrustedLaunchDisabled` is not part of the output, it implies that Trusted launch is supported for that VM size. + +``` +Name Value +---- ----- +MaxResourceVolumeMB 8192000 +OSVhdSizeMB 1047552 +vCPUs 64 +MemoryPreservingMaintenanceSupported False +HyperVGenerations V1,V2 +MemoryGB 1000 +MaxDataDiskCount 64 +CpuArchitectureType x64 +MaxWriteAcceleratorDisksAllowed 8 +LowPriorityCapable True +PremiumIO True +VMDeploymentTypes IaaS +vCPUsAvailable 64 +ACUs 160 +vCPUsPerCore 2 +CombinedTempDiskAndCachedIOPS 80000 +CombinedTempDiskAndCachedReadBytesPerSecond 838860800 +CombinedTempDiskAndCachedWriteBytesPerSecond 838860800 +CachedDiskBytes 1318554959872 +UncachedDiskIOPS 40000 +UncachedDiskBytesPerSecond 1048576000 +EphemeralOSDiskSupported True +EncryptionAtHostSupported True +CapacityReservationSupported False +TrustedLaunchDisabled True +AcceleratedNetworkingEnabled True +RdmaEnabled False +MaxNetworkInterfaces 8 +``` ### What is VM Guest State (VMGS)? diff --git a/articles/virtual-machines/update-image-resources.md b/articles/virtual-machines/update-image-resources.md index a3503ac273e39..1a174f3782de1 100644 --- a/articles/virtual-machines/update-image-resources.md +++ b/articles/virtual-machines/update-image-resources.md @@ -44,7 +44,7 @@ az sig image-definition list --resource-group myGalleryRG --gallery-name myGalle **List image versions** -List image versions in your gallery using [az sig image-version list](/cli/azure/sig/image-version#az_sig_image_version_list): +List image versions in your gallery using [az sig image-version list](/cli/azure/sig/image-version#az-sig-image-version-list): ```azurecli-interactive @@ -55,7 +55,7 @@ az sig image-version list --resource-group myGalleryRG --gallery-name myGallery **Get a specific image version** -Get the ID of a specific image version in your gallery using [az sig image-version show](/cli/azure/sig/image-version#az_sig_image_version_show). +Get the ID of a specific image version in your gallery using [az sig image-version show](/cli/azure/sig/image-version#az-sig-image-version-show). ```azurecli-interactive az sig image-version show \ @@ -328,7 +328,7 @@ az sig list --query [*]."{Name:name,PublicName:sharingProfile.communityGalleryIn > As an end user, to get the public name of a community gallery, you currently need to use the portal. Go to **Virtual machines** > **Create** > **Azure virtual machine** > **Image** > **See all images** > **Community Images** > **Public gallery name**. -List all of the image definitions that are available in a community gallery using [az sig image-definition list-community](/cli/azure/sig/image-definition#az_sig_image_definition_list_community). +List all of the image definitions that are available in a community gallery using [az sig image-definition list-community](/cli/azure/sig/image-definition#az-sig-image-definition-list-community). In this example, we list all of the images in the *ContosoImage* gallery in *West US* and by name, the unique ID that is needed to create a VM, OS and OS state. @@ -339,7 +339,7 @@ In this example, we list all of the images in the *ContosoImage* gallery in *Wes --query [*]."{Name:name,ID:uniqueId,OS:osType,State:osState}" -o table ``` -List image versions shared in a community gallery using [az sig image-version list-community](/cli/azure/sig/image-version#az_sig_image_version_list_community): +List image versions shared in a community gallery using [az sig image-version list-community](/cli/azure/sig/image-version#az-sig-image-version-list-community): ```azurecli-interactive az sig image-version list-community \ diff --git a/articles/virtual-machines/vm-applications-how-to.md b/articles/virtual-machines/vm-applications-how-to.md index 91e3f3ce9e577..8227c1bc192e3 100644 --- a/articles/virtual-machines/vm-applications-how-to.md +++ b/articles/virtual-machines/vm-applications-how-to.md @@ -29,7 +29,7 @@ Before you get started, make sure you have the following: This article assumes you already have an Azure Compute Gallery. If you don't already have a gallery, create one first. To learn more, see [Create a gallery for storing and sharing resources](create-gallery.md).. -You should have uploaded your application to a container in an Azure storage account. Your application can be stored in a block or page blob. If you choose to use a page blob, you need to byte align the files before you upload them. Here is a sample that will byte align your file: +You should have uploaded your application to a container in an [Azure storage account](../storage/common/storage-account-create.md). Your application can be stored in a block or page blob. If you choose to use a page blob, you need to byte align the files before you upload them. Here's a sample that will byte align your file: ```azurepowershell-interactive $inputFile = @@ -48,9 +48,9 @@ if ($remainder -ne 0){ } ``` -You need to make sure the files are publicly available, or you will need the SAS URI for the files in your storage account. You can use [Storage Explorer](../vs-azure-tools-storage-explorer-blobs.md) to quickly create a SAS URI if you don't already have one. +You need to make sure the files are publicly available, or you'll need the SAS URI for the files in your storage account. You can use [Storage Explorer](../vs-azure-tools-storage-explorer-blobs.md) to quickly create a SAS URI if you don't already have one. -If you are using PowerShell, you need to be using version 3.11.0 of the Az.Storage module. +If you're using PowerShell, you need to be using version 3.11.0 of the Az.Storage module. ## Create the VM application @@ -69,19 +69,19 @@ Choose an option below for creating your VM application definition and version: - Link to a Eula - URI of a privacy statement - URI for release notes -1. When you are done, select **Review + create**. +1. When you're done, select **Review + create**. 1. When validation completes, select **Create** to have the definition deployed. 1. Once the deployment is complete, select **Go to resource**. 1. On the page for the application, select **Create a VM application version**. The **Create a VM Application Version** page will open. 1. Enter a version number like 1.0.0. -1. Select the region where you have uploaded your application package. -1. Under **Source application package**, select **Browse**. Select the storage account, then the container where your package is located. Select the package from the list and then click **Select** when you are done. +1. Select the region where you've uploaded your application package. +1. Under **Source application package**, select **Browse**. Select the storage account, then the container where your package is located. Select the package from the list and then click **Select** when you're done. 1. Type in the **Install script**. You can also provide the **Uninstall script** and **Update script**. See the [Overview](vm-applications.md#command-interpreter) for information on how to create the scripts. 1. If you have a default configuration file uploaded to a storage account, you can select it in **Default configuration**. -1. Select **Exclude from latest** if you do not want this version to appear as the latest version when you create a VM. -1. For **End of life date**, choose a date in the future to track when this version should be retired. It is not deleted or removed automatically, it is only for your own tracking. -1. To replicate this version to other regions, select the **Replication** tab and add more regions and make changes to the number of replicas per region. The original region where your version was created must be in the list and cannot be removed. -1. When you are done making changes, select **Review + create** at the bottom of the page. +1. Select **Exclude from latest** if you don't want this version to appear as the latest version when you create a VM. +1. For **End of life date**, choose a date in the future to track when this version should be retired. It isn't deleted or removed automatically, it's only for your own tracking. +1. To replicate this version to other regions, select the **Replication** tab and add more regions and make changes to the number of replicas per region. The original region where your version was created must be in the list and can't be removed. +1. When you're done making changes, select **Review + create** at the bottom of the page. 1. When validation shows as passed, select **Create** to deploy your VM application version. @@ -93,13 +93,24 @@ Select the VM application from the list, and then select **Save** at the bottom :::image type="content" source="media/vmapps/select-app.png" alt-text="Screenshot showing selecting a VM application to install on the VM."::: -If you have more than one VM application to install, you can set the install order for each VM application back on the **Advanced tab**. +If you've more than one VM application to install, you can set the install order for each VM application back on the **Advanced tab**. + +You can also deploy the VM application to currently running VMs. Select the **Extensions + applications** option under **Settings** in the left menu when viewing the VM details in the portal. + +Choose **VM applications** and then select **Add application** to add your VM application. + +:::image type="content" source="media/vmapps/select-extension-app.png" alt-text="Screenshot showing selecting a VM application to install on a currently running VM."::: + +Select the VM application from the list, and then select **Save** at the bottom of the page. + +:::image type="content" source="media/vmapps/select-app.png" alt-text="Screenshot showing selecting a VM application to install on the VM."::: ### [CLI](#tab/cli) VM applications require [Azure CLI](/cli/azure/install-azure-cli) version 2.30.0 or later. -Crate the VM application definition using [az sig gallery-application create](/cli/azure/sig/gallery-application#az-sig-gallery-application-create). In this example we are creating a VM application definition named *myApp* for Linux-based VMs. +Create the VM application definition using [az sig gallery-application create](/cli/azure/sig/gallery-application#az_sig_gallery_application_create). In this example we're creating a VM application definition named *myApp* for Linux-based VMs. + ```azurecli-interactive az sig gallery-application create \ @@ -124,19 +135,26 @@ az sig gallery-application version create \ --package-file-link "https://.blob.core.windows.net//" \ --install-command "mv myApp .\myApp\myApp" \ --remove-command "rm .\myApp\myApp" \ - --update-command "mv myApp .\myApp\myApp \ + --update-command "mv myApp .\myApp\myApp" \ --default-configuration-file-link "https://.blob.core.windows.net//"\ ``` +Set a VM application to an existing VM using [az vm application set](/cli/azure/vm/application#az-vm-application-set) and replace the values of the parameters with your own. +```azurecli-interactive +az vm application set \ + --resource-group myResourceGroup \ + --name myVM \ +--app-version-ids /subscriptions/{subID}/resourceGroups/MyResourceGroup/providers/Microsoft.Compute/galleries/myGallery/applications/myApp/versions/1.0.0 \ +``` ### [PowerShell](#tab/powershell) -Create the VM application definition using `New-AzGalleryApplication`. In this example, we are creating a Linux app named *myApp* in the *myGallery* Azure Compute Gallery, in the *myGallery* resource group and I've given a short description of the VM application for my own use. Replace the values as needed. +Create the VM application definition using `New-AzGalleryApplication`. In this example, we're creating a Linux app named *myApp* in the *myGallery* Azure Compute Gallery, in the *myGallery* resource group and I've given a short description of the VM application for my own use. Replace the values as needed. ```azurepowershell-interactive -$galleryName = myGallery -$rgName = myResourceGroup -$applicationName = myApp +$galleryName = "myGallery" +$rgName = "myResourceGroup" +$applicationName = "myApp" New-AzGalleryApplication ` -ResourceGroupName $rgName ` -GalleryName $galleryName ` @@ -148,10 +166,13 @@ New-AzGalleryApplication ` Create a version of your application using `New-AzGalleryApplicationVersion`. Allowed characters for version are numbers and periods. Numbers must be within the range of a 32-bit integer. Format: *MajorVersion*.*MinorVersion*.*Patch*. -In this example, we are creating version number *1.0.0*. Replace the values of the variables as needed. +In this example, we're creating version number *1.0.0*. Replace the values of the variables as needed. ```azurepowershell-interactive -$version = 1.0.0 +$galleryName = "myGallery" +$rgName = "myResourceGroup" +$applicationName = "myApp" +$version = "1.0.0" New-AzGalleryApplicationVersion ` -ResourceGroupName $rgName ` -GalleryName $galleryName ` @@ -159,16 +180,19 @@ New-AzGalleryApplicationVersion ` -Name $version ` -PackageFileLink "https://.blob.core.windows.net//" ` -Location "East US" ` - -Install myApp.exe /silent ` - -Remove myApp.exe /uninstall ` + -Install "mv myApp .\myApp\myApp" ` + -Remove "rm .\myApp\myApp" ` ``` To add the application to an existing VM, get the application version and use that to get the VM application version ID. Use the ID to add the application to the VM configuration. ```azurepowershell-interactive -$vmname = "myVM" -$vm = Get-AzVM -ResourceGroupName $rgname -Name $vmname +$galleryName = "myGallery" +$rgName = "myResourceGroup" +$applicationName = "myApp" +$vmName = "myVM" +$vm = Get-AzVM -ResourceGroupName $rgname -Name $vmName $appversion = Get-AzGalleryApplicationVersion ` -GalleryApplicationName $applicationname ` -GalleryName $galleryname ` @@ -176,7 +200,7 @@ $appversion = Get-AzGalleryApplicationVersion ` -ResourceGroupName $rgname $packageid = $appversion.Id $app = New-AzVmGalleryApplication -PackageReferenceId $packageid -Add-AzVmGalleryApplication -VM $vmname -GalleryApplication $app +Add-AzVmGalleryApplication -VM $vm -GalleryApplication $app Update-AzVM -ResourceGroupName $rgname -VM $vm ``` @@ -210,7 +234,7 @@ PUT |--|--|--| | name | A unique name for the VM Application within the gallery | Max length of 117 characters. Allowed characters are uppercase or lowercase letters, digits, hyphen(-), period (.), underscore (_). Names not allowed to end with period(.). | | supportedOSType | Whether this is a Windows or Linux application | “Windows” or “Linux” | -| endOfLifeDate | A future end of life date for the application. Note that this is for reference only, and is not enforced. | Valid future date | +| endOfLifeDate | A future end of life date for the application. Note this is for reference only, and isn't enforced. | Valid future date | Create a VM application version. @@ -256,8 +280,8 @@ PUT | Update | Optional. The command to update the application. If not specified and an update is required, the old version will be removed and the new one installed. | Valid command for the given OS | | targetRegions/name | The name of a region to which to replicate | Validate Azure region | | targetRegions/regionalReplicaCount | Optional. The number of replicas in the region to create. Defaults to 1. | Integer between 1 and 3 inclusive | -| endOfLifeDate | A future end of life date for the application version. Note that this is for customer reference only, and is not enforced. | Valid future date | -| excludeFromLatest | If specified, this version will not be considered for latest. | True or false | +| endOfLifeDate | A future end of life date for the application version. Note this is for customer reference only, and isn't enforced. | Valid future date | +| excludeFromLatest | If specified, this version won't be considered for latest. | True or false | @@ -318,16 +342,16 @@ virtualMachineScaleSets/\<**VMSSName**\>?api-version=2019-03-01 | Field Name | Description | Limitations | |--|--|--| | order | Optional. The order in which the applications should be deployed. See below. | Validate integer | -| packageReferenceId | A reference the the gallery application version | Valid application version reference | +| packageReferenceId | A reference the gallery application version | Valid application version reference | | configurationReference | Optional. The full url of a storage blob containing the configuration for this deployment. This will override any value provided for defaultConfiguration earlier. | Valid storage blob reference | The order field may be used to specify dependencies between applications. The rules for order are the following: | Case | Install Meaning | Failure Meaning | |--|--|--| -| No order specified | Unordered applications are installed after ordered applications. There is no guarantee of installation order amongst the unordered applications. | Installation failures of other applications, be it ordered or unordered doesn’t affect the installation of unordered applications. | +| No order specified | Unordered applications are installed after ordered applications. There's no guarantee of installation order amongst the unordered applications. | Installation failures of other applications, be it ordered or unordered doesn’t affect the installation of unordered applications. | | Duplicate order values | Application will be installed in any order compared to other applications with the same order. All applications of the same order will be installed after those with lower orders and before those with higher orders. | If a previous application with a lower order failed to install, no applications with this order will install. If any application with this order fails to install, no applications with a higher order will install. | -| Increasing orders | Application will be installed after those with lower orders and before those with higher orders. | If a previous application with a lower order failed to install, this application will not install. If this application fails to install, no application with a higher order will install. | +| Increasing orders | Application will be installed after those with lower orders and before those with higher orders. | If a previous application with a lower order failed to install, this application won't install. If this application fails to install, no application with a higher order will install. | The response will include the full VM model. The following are the relevant parts. @@ -357,7 +381,7 @@ relevant parts. ``` -If the VM applications have not yet been installed on the VM, the value will be empty. +If the VM applications haven't yet been installed on the VM, the value will be empty. --- diff --git a/articles/virtual-machines/vm-applications.md b/articles/virtual-machines/vm-applications.md index a054401e7acf1..e48578f975a44 100644 --- a/articles/virtual-machines/vm-applications.md +++ b/articles/virtual-machines/vm-applications.md @@ -35,10 +35,12 @@ Application packages provide benefits over other deployment and packaging method - Support for virtual machines, and both flexible and uniform scale sets + - If you have Network Security Group (NSG) rules applied on your VM or scale set, downloading the packages from an internet repository might not be possible. And with storage accounts, downloading packages onto locked-down VMs would require setting up private links. + ## What are VM app packages? The VM application packages use multiple resource types: diff --git a/articles/virtual-machines/vm-specialized-image-version.md b/articles/virtual-machines/vm-specialized-image-version.md index c525c25100cff..0ca6fd9044c38 100644 --- a/articles/virtual-machines/vm-specialized-image-version.md +++ b/articles/virtual-machines/vm-specialized-image-version.md @@ -177,7 +177,7 @@ To create a VM using an image shared to a community gallery, use the unique ID o As an end user, to get the public name of a community gallery, you need to use the portal. Go to **Virtual machines** > **Create** > **Azure virtual machine** > **Image** > **See all images** > **Community Images** > **Public gallery name**. -List all of the image definitions that are available in a community gallery using [az sig image-definition list-community](/cli/azure/sig/image-definition#az_sig_image_definition_list_community). In this example, we list all of the images in the *ContosoImage* gallery in *West US* and by name, the unique ID that is needed to create a VM, OS and OS state. +List all of the image definitions that are available in a community gallery using [az sig image-definition list-community](/cli/azure/sig/image-definition#az-sig-image-definition-list-community). In this example, we list all of the images in the *ContosoImage* gallery in *West US* and by name, the unique ID that is needed to create a VM, OS and OS state. ```azurecli-interactive az sig image-definition list-community \ diff --git a/articles/virtual-machines/windows/attach-disk-ps.md b/articles/virtual-machines/windows/attach-disk-ps.md index 554dfa07c715b..bab63a39d332d 100644 --- a/articles/virtual-machines/windows/attach-disk-ps.md +++ b/articles/virtual-machines/windows/attach-disk-ps.md @@ -5,7 +5,7 @@ author: roygara ms.service: virtual-machines ms.collection: windows ms.topic: how-to -ms.date: 10/16/2018 +ms.date: 06/08/2022 ms.author: rogarana ms.subservice: disks ms.custom: devx-track-azurepowershell @@ -24,6 +24,12 @@ First, review these tips: This article uses PowerShell within the [Azure Cloud Shell](../../cloud-shell/overview.md), which is constantly updated to the latest version. To open the Cloud Shell, select **Try it** from the top of any code block. +## Lower latency + +In select regions, the disk attach latency has been reduced, so you'll see an improvement of up to 15%. This is useful if you have planned/unplanned failovers between VMs, you're scaling your workload, or are running a high scale stateful workload such as Azure Kubernetes Service. However, this improvement is limited to the explicit disk attach command, `Add-AzVMDataDisk`. You won't see the performance improvement if you call a command that may implicitly perform an attach, like `Update-AzVM`. You don't need to take any action other than calling the explicit attach command to see this improvement. + +[!INCLUDE [virtual-machines-disks-fast-attach-detach-regions](../../../includes/virtual-machines-disks-fast-attach-detach-regions.md)] + ## Add an empty data disk to a virtual machine This example shows how to add an empty data disk to an existing virtual machine. diff --git a/articles/virtual-machines/windows/compute-benchmark-scores.md b/articles/virtual-machines/windows/compute-benchmark-scores.md index 27bf89b827c6a..e6aac8d6f6eba 100644 --- a/articles/virtual-machines/windows/compute-benchmark-scores.md +++ b/articles/virtual-machines/windows/compute-benchmark-scores.md @@ -1,14 +1,14 @@ --- -title: Compute benchmark scores for Azure Windows VMs -description: Compare SPECint compute benchmark scores for Azure VMs running Windows Server. -author: cynthn +title: Compute benchmark scores for Azure Windows VMs +description: Compare Coremark compute benchmark scores for Azure VMs running Windows Server. +author:: DavidBerg-MSFT ms.service: virtual-machines ms.subservice: sizes ms.topic: conceptual ms.workload: infrastructure-services -ms.date: 04/26/2022 -ms.author: cynthn -ms.reviewer: davberg +ms.date: 05/31/2022 +ms.author: davberg +ms.reviewer: ladolan --- # Compute benchmark scores for Windows VMs @@ -32,98 +32,101 @@ The following CoreMark benchmark scores show compute performance for select Azur | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | -| Standard_F2s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 2 | 1 | 4.0 | 34,903 | 1,101 | 3.15% | 112 | -| Standard_F2s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 2 | 1 | 4.0 | 34,738 | 1,331 | 3.83% | 224 | -| Standard_F4s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 4 | 1 | 8.0 | 66,828 | 1,524 | 2.28% | 168 | -| Standard_F4s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 4 | 1 | 8.0 | 66,903 | 1,047 | 1.57% | 182 | -| Standard_F8s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 8 | 1 | 16.0 | 131,477 | 2,180 | 1.66% | 140 | -| Standard_F8s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 8 | 1 | 16.0 | 132,533 | 1,732 | 1.31% | 210 | -| Standard_F16s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 16 | 1 | 32.0 | 260,760 | 3,629 | 1.39% | 112 | -| Standard_F16s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 16 | 1 | 32.0 | 265,158 | 2,185 | 0.82% | 182 | -| Standard_F32s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 32 | 1 | 64.0 | 525,608 | 6,270 | 1.19% | 98 | -| Standard_F32s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 32 | 1 | 64.0 | 530,137 | 6,085 | 1.15% | 140 | -| Standard_F48s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 48 | 2 | 96.0 | 769,768 | 7,567 | 0.98% | 112 | -| Standard_F48s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 48 | 1 | 96.0 | 742,828 | 17,316 | 2.33% | 112 | -| Standard_F64s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz | 64 | 2 | 128.0 | 1,030,552 | 8,106 | 0.79% | 70 | -| Standard_F64s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 64 | 2 | 128.0 | 1,028,052 | 9,373 | 0.91% | 168 | +| Standard_F2s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 2 | 1 | 4.0 | 34,903 | 1,101 | 3.15% | 112 | +| Standard_F2s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 2 | 1 | 4.0 | 34,738 | 1,331 | 3.83% | 224 | +| Standard_F4s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 4 | 1 | 8.0 | 66,828 | 1,524 | 2.28% | 168 | +| Standard_F4s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 4 | 1 | 8.0 | 66,903 | 1,047 | 1.57% | 182 | +| Standard_F8s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 8 | 1 | 16.0 | 131,477 | 2,180 | 1.66% | 140 | +| Standard_F8s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 8 | 1 | 16.0 | 132,533 | 1,732 | 1.31% | 210 | +| Standard_F16s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 16 | 1 | 32.0 | 260,760 | 3,629 | 1.39% | 112 | +| Standard_F16s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 16 | 1 | 32.0 | 265,158 | 2,185 | 0.82% | 182 | +| Standard_F32s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 32 | 1 | 64.0 | 525,608 | 6,270 | 1.19% | 98 | +| Standard_F32s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 32 | 1 | 64.0 | 530,137 | 6,085 | 1.15% | 140 | +| Standard_F48s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 48 | 2 | 96.0 | 769,768 | 7,567 | 0.98% | 112 | +| Standard_F48s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 48 | 1 | 96.0 | 742,828 | 17,316 | 2.33% | 112 | +| Standard_F64s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 64 | 2 | 128.0 | 1,030,552 | 8,106 | 0.79% | 70 | +| Standard_F64s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 64 | 2 | 128.0 | 1,028,052 | 9,373 | 0.91% | 168 | +| Standard_F72s_v2 | Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70 GHz | 72 | 2 | 144.0 | N/A | - | - | - | +| Standard_F72s_v2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 72 | 2 | 144.0 | N/A | - | - | - | + ### Fs - Compute Optimized + Premium Storage (04/28/2021 PBIID:9198755) | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | -| Standard_F1s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 1 | 1 | 2.0 | 16,445 | 825 | 5.02% | 42 | -| Standard_F1s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 1 | 1 | 2.0 | 17,614 | 2,873 | 16.31% | 210 | -| Standard_F1s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 1 | 1 | 2.0 | 16,053 | 1,802 | 11.22% | 70 | -| Standard_F1s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 1 | 1 | 2.0 | 20,007 | 1,684 | 8.42% | 28 | -| Standard_F2s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 2 | 1 | 4.0 | 33,451 | 3,424 | 10.24% | 70 | -| Standard_F2s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 2 | 1 | 4.0 | 33,626 | 2,990 | 8.89% | 154 | -| Standard_F2s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 2 | 1 | 4.0 | 34,386 | 3,851 | 11.20% | 98 | -| Standard_F2s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 2 | 1 | 4.0 | 36,826 | 344 | 0.94% | 28 | -| Standard_F4s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 4 | 1 | 8.0 | 67,351 | 4,407 | 6.54% | 42 | -| Standard_F4s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 4 | 1 | 8.0 | 67,009 | 4,637 | 6.92% | 196 | -| Standard_F4s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 4 | 1 | 8.0 | 63,668 | 3,375 | 5.30% | 84 | -| Standard_F4s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 4 | 1 | 8.0 | 79,153 | 15,034 | 18.99% | 28 | -| Standard_F8s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 8 | 1 | 16.0 | 128,232 | 1,272 | 0.99% | 42 | -| Standard_F8s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 8 | 1 | 16.0 | 127,871 | 5,109 | 4.00% | 154 | -| Standard_F8s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 8 | 1 | 16.0 | 122,811 | 5,481 | 4.46% | 126 | -| Standard_F8s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 8 | 1 | 16.0 | 154,842 | 10,354 | 6.69% | 28 | -| Standard_F16s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 16 | 2 | 32.0 | 260,883 | 15,853 | 6.08% | 42 | -| Standard_F16s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 16 | 1 | 32.0 | 255,762 | 4,966 | 1.94% | 182 | -| Standard_F16s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 16 | 1 | 32.0 | 248,884 | 11,035 | 4.43% | 70 | -| Standard_F16s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 16 | 1 | 32.0 | 310,303 | 21,942 | 7.07% | 28 | +| Standard_F1s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 1 | 1 | 2.0 | 16,445 | 825 | 5.02% | 42 | +| Standard_F1s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 1 | 1 | 2.0 | 17,614 | 2,873 | 16.31% | 210 | +| Standard_F1s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 1 | 1 | 2.0 | 16,053 | 1,802 | 11.22% | 70 | +| Standard_F1s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 1 | 1 | 2.0 | 20,007 | 1,684 | 8.42% | 28 | +| Standard_F2s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 2 | 1 | 4.0 | 33,451 | 3,424 | 10.24% | 70 | +| Standard_F2s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 2 | 1 | 4.0 | 33,626 | 2,990 | 8.89% | 154 | +| Standard_F2s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 2 | 1 | 4.0 | 34,386 | 3,851 | 11.20% | 98 | +| Standard_F2s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 2 | 1 | 4.0 | 36,826 | 344 | 0.94% | 28 | +| Standard_F4s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 4 | 1 | 8.0 | 67,351 | 4,407 | 6.54% | 42 | +| Standard_F4s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 4 | 1 | 8.0 | 67,009 | 4,637 | 6.92% | 196 | +| Standard_F4s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 4 | 1 | 8.0 | 63,668 | 3,375 | 5.30% | 84 | +| Standard_F4s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 4 | 1 | 8.0 | 79,153 | 15,034 | 18.99% | 28 | +| Standard_F8s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 8 | 1 | 16.0 | 128,232 | 1,272 | 0.99% | 42 | +| Standard_F8s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 8 | 1 | 16.0 | 127,871 | 5,109 | 4.00% | 154 | +| Standard_F8s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 8 | 1 | 16.0 | 122,811 | 5,481 | 4.46% | 126 | +| Standard_F8s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 8 | 1 | 16.0 | 154,842 | 10,354 | 6.69% | 28 | +| Standard_F16s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 16 | 2 | 32.0 | 260,883 | 15,853 | 6.08% | 42 | +| Standard_F16s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 16 | 1 | 32.0 | 255,762 | 4,966 | 1.94% | 182 | +| Standard_F16s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 16 | 1 | 32.0 | 248,884 | 11,035 | 4.43% | 70 | +| Standard_F16s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 16 | 1 | 32.0 | 310,303 | 21,942 | 7.07% | 28 | ### F - Compute Optimized (04/28/2021 PBIID:9198755) | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | -| Standard_F1 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 1 | 1 | 2.0 | 17,356 | 1,151 | 6.63% | 112 | -| Standard_F1 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 1 | 1 | 2.0 | 16,508 | 1,740 | 10.54% | 154 | -| Standard_F1 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 1 | 1 | 2.0 | 16,076 | 2,065 | 12.84% | 70 | -| Standard_F1 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 1 | 1 | 2.0 | 20,074 | 1,612 | 8.03% | 14 | -| Standard_F2 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 2 | 1 | 4.0 | 32,770 | 1,915 | 5.84% | 126 | -| Standard_F2 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 2 | 1 | 4.0 | 33,081 | 2,242 | 6.78% | 126 | -| Standard_F2 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 2 | 1 | 4.0 | 33,310 | 2,532 | 7.60% | 84 | -| Standard_F2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 2 | 1 | 4.0 | 40,746 | 2,027 | 4.98% | 14 | -| Standard_F4 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 4 | 1 | 8.0 | 65,694 | 3,512 | 5.35% | 126 | -| Standard_F4 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 4 | 1 | 8.0 | 65,054 | 3,457 | 5.31% | 154 | -| Standard_F4 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 4 | 1 | 8.0 | 61,607 | 3,662 | 5.94% | 56 | -| Standard_F4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 4 | 1 | 8.0 | 76,884 | 1,763 | 2.29% | 14 | -| Standard_F8 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 8 | 1 | 16.0 | 130,415 | 5,353 | 4.10% | 98 | -| Standard_F8 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 8 | 1 | 16.0 | 126,139 | 2,917 | 2.31% | 126 | -| Standard_F8 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 8 | 1 | 16.0 | 122,443 | 4,391 | 3.59% | 98 | -| Standard_F8 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 8 | 1 | 16.0 | 144,696 | 2,172 | 1.50% | 14 | -| Standard_F16 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 16 | 2 | 32.0 | 253,473 | 8,597 | 3.39% | 140 | -| Standard_F16 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 16 | 1 | 32.0 | 257,457 | 7,596 | 2.95% | 126 | -| Standard_F16 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 16 | 1 | 32.0 | 244,559 | 8,036 | 3.29% | 70 | -| Standard_F16 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 16 | 1 | 32.0 | 283,565 | 8,683 | 3.06% | 14 | +| Standard_F1 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 1 | 1 | 2.0 | 17,356 | 1,151 | 6.63% | 112 | +| Standard_F1 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 1 | 1 | 2.0 | 16,508 | 1,740 | 10.54% | 154 | +| Standard_F1 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 1 | 1 | 2.0 | 16,076 | 2,065 | 12.84% | 70 | +| Standard_F1 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 1 | 1 | 2.0 | 20,074 | 1,612 | 8.03% | 14 | +| Standard_F2 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 2 | 1 | 4.0 | 32,770 | 1,915 | 5.84% | 126 | +| Standard_F2 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 2 | 1 | 4.0 | 33,081 | 2,242 | 6.78% | 126 | +| Standard_F2 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 2 | 1 | 4.0 | 33,310 | 2,532 | 7.60% | 84 | +| Standard_F2 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 2 | 1 | 4.0 | 40,746 | 2,027 | 4.98% | 14 | +| Standard_F4 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 4 | 1 | 8.0 | 65,694 | 3,512 | 5.35% | 126 | +| Standard_F4 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 4 | 1 | 8.0 | 65,054 | 3,457 | 5.31% | 154 | +| Standard_F4 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 4 | 1 | 8.0 | 61,607 | 3,662 | 5.94% | 56 | +| Standard_F4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 4 | 1 | 8.0 | 76,884 | 1,763 | 2.29% | 14 | +| Standard_F8 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 8 | 1 | 16.0 | 130,415 | 5,353 | 4.10% | 98 | +| Standard_F8 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 8 | 1 | 16.0 | 126,139 | 2,917 | 2.31% | 126 | +| Standard_F8 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 8 | 1 | 16.0 | 122,443 | 4,391 | 3.59% | 98 | +| Standard_F8 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 8 | 1 | 16.0 | 144,696 | 2,172 | 1.50% | 14 | +| Standard_F16 | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 16 | 2 | 32.0 | 253,473 | 8,597 | 3.39% | 140 | +| Standard_F16 | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 16 | 1 | 32.0 | 257,457 | 7,596 | 2.95% | 126 | +| Standard_F16 | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 16 | 1 | 32.0 | 244,559 | 8,036 | 3.29% | 70 | +| Standard_F16 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 16 | 1 | 32.0 | 283,565 | 8,683 | 3.06% | 14 | ### GS - Compute Optimized + Premium Storage (05/27/2021 PBIID:9198755) | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | -| Standard_GS1 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 2 | 1 | 28.0 | 35,593 | 2,888 | 8.11% | 252 | -| Standard_GS2 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 4 | 1 | 56.0 | 72,188 | 5,949 | 8.24% | 252 | -| Standard_GS3 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 8 | 1 | 112.0 | 132,665 | 6,910 | 5.21% | 238 | -| Standard_GS4 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 16 | 1 | 224.0 | 261,542 | 3,722 | 1.42% | 252 | -| Standard_GS4-4 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 4 | 1 | 224.0 | 79,352 | 4,935 | 6.22% | 224 | -| Standard_GS4-8 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 8 | 1 | 224.0 | 137,774 | 6,887 | 5.00% | 238 | -| Standard_GS5 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 32 | 2 | 448.0 | 507,026 | 6,895 | 1.36% | 252 | -| Standard_GS5-8 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 8 | 2 | 448.0 | 157,541 | 3,151 | 2.00% | 238 | -| Standard_GS5-16 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 16 | 2 | 448.0 | 278,656 | 5,235 | 1.88% | 224 | +| Standard_GS1 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 2 | 1 | 28.0 | 35,593 | 2,888 | 8.11% | 252 | +| Standard_GS2 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 4 | 1 | 56.0 | 72,188 | 5,949 | 8.24% | 252 | +| Standard_GS3 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 8 | 1 | 112.0 | 132,665 | 6,910 | 5.21% | 238 | +| Standard_GS4 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 16 | 1 | 224.0 | 261,542 | 3,722 | 1.42% | 252 | +| Standard_GS4-4 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 4 | 1 | 224.0 | 79,352 | 4,935 | 6.22% | 224 | +| Standard_GS4-8 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 8 | 1 | 224.0 | 137,774 | 6,887 | 5.00% | 238 | +| Standard_GS5 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.0 0GHz | 32 | 2 | 448.0 | 507,026 | 6,895 | 1.36% | 252 | +| Standard_GS5-8 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 8 | 2 | 448.0 | 157,541 | 3,151 | 2.00% | 238 | +| Standard_GS5-16 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 16 | 2 | 448.0 | 278,656 | 5,235 | 1.88% | 224 | ### G - Compute Optimized (05/27/2021 PBIID:9198755) | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | -| Standard_G1 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 2 | 1 | 28.0 | 36,386 | 4,100 | 11.27% | 252 | -| Standard_G2 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 4 | 1 | 56.0 | 72,484 | 5,563 | 7.67% | 252 | -| Standard_G3 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 8 | 1 | 112.0 | 136,618 | 5,714 | 4.18% | 252 | -| Standard_G4 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 16 | 1 | 224.0 | 261,708 | 3,426 | 1.31% | 238 | -| Standard_G5 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00GHz | 32 | 2 | 448.0 | 507,423 | 7,261 | 1.43% | 252 | +| Standard_G1 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 2 | 1 | 28.0 | 36,386 | 4,100 | 11.27% | 252 | +| Standard_G2 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 4 | 1 | 56.0 | 72,484 | 5,563 | 7.67% | 252 | +| Standard_G3 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 8 | 1 | 112.0 | 136,618 | 5,714 | 4.18% | 252 | +| Standard_G4 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 16 | 1 | 224.0 | 261,708 | 3,426 | 1.31% | 238 | +| Standard_G5 | Intel(R) Xeon(R) CPU E5-2698B v3 @ 2.00 GHz | 32 | 2 | 448.0 | 507,423 | 7,261 | 1.43% | 252 | ## General purpose @@ -132,44 +135,44 @@ The following CoreMark benchmark scores show compute performance for select Azur | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | -| Standard_B1ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 1 | 1 | 2.0 | 18,093 | 679 | 3.75% | 42 | -| Standard_B1ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 1 | 1 | 2.0 | 18,197 | 1,341 | 7.37% | 168 | -| Standard_B1ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 1 | 1 | 2.0 | 17,975 | 920 | 5.12% | 112 | -| Standard_B1ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 1 | 1 | 2.0 | 20,176 | 1,568 | 7.77% | 28 | -| Standard_B2s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 2 | 1 | 4.0 | 35,546 | 660 | 1.86% | 42 | -| Standard_B2s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 2 | 1 | 4.0 | 36,569 | 2,172 | 5.94% | 154 | -| Standard_B2s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 2 | 1 | 4.0 | 36,136 | 924 | 2.56% | 140 | -| Standard_B2s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 2 | 1 | 4.0 | 42,546 | 834 | 1.96% | 14 | -| Standard_B2hms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 2 | 1 | 8.0 | 36,949 | 1,494 | 4.04% | 28 | -| Standard_B2hms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 2 | 1 | 8.0 | 36,512 | 2,537 | 6.95% | 70 | -| Standard_B2hms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 2 | 1 | 8.0 | 36,389 | 990 | 2.72% | 56 | -| Standard_B2ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 2 | 1 | 8.0 | 35,758 | 1,028 | 2.88% | 42 | -| Standard_B2ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 2 | 1 | 8.0 | 36,028 | 1,605 | 4.45% | 182 | -| Standard_B2ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 2 | 1 | 8.0 | 36,122 | 2,128 | 5.89% | 112 | -| Standard_B2ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 2 | 1 | 8.0 | 42,525 | 672 | 1.58% | 14 | -| Standard_B4hms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 4 | 1 | 16.0 | 71,028 | 879 | 1.24% | 28 | -| Standard_B4hms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 4 | 1 | 16.0 | 73,126 | 2,954 | 4.04% | 56 | -| Standard_B4hms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 4 | 1 | 16.0 | 68,451 | 1,571 | 2.29% | 56 | -| Standard_B4hms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 4 | 1 | 16.0 | 83,525 | 563 | 0.67% | 14 | -| Standard_B4ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 4 | 1 | 16.0 | 70,831 | 1,135 | 1.60% | 28 | -| Standard_B4ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 4 | 1 | 16.0 | 70,987 | 2,287 | 3.22% | 168 | -| Standard_B4ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 4 | 1 | 16.0 | 68,796 | 1,897 | 2.76% | 84 | -| Standard_B4ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 4 | 1 | 16.0 | 81,712 | 4,042 | 4.95% | 70 | -| Standard_B8ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 8 | 1 | 32.0 | 141,620 | 2,256 | 1.59% | 42 | -| Standard_B8ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 8 | 1 | 32.0 | 139,090 | 3,229 | 2.32% | 182 | -| Standard_B8ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 8 | 1 | 32.0 | 135,510 | 2,653 | 1.96% | 112 | -| Standard_B8ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 8 | 1 | 32.0 | 164,510 | 2,254 | 1.37% | 14 | -| Standard_B12ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 12 | 1 | 48.0 | 206,957 | 5,240 | 2.53% | 56 | -| Standard_B12ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 12 | 1 | 48.0 | 211,461 | 4,115 | 1.95% | 154 | -| Standard_B12ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 12 | 1 | 48.0 | 200,729 | 3,475 | 1.73% | 140 | -| Standard_B16ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 16 | 2 | 64.0 | 273,257 | 3,862 | 1.41% | 42 | -| Standard_B16ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 16 | 1 | 64.0 | 282,187 | 5,030 | 1.78% | 154 | -| Standard_B16ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 16 | 1 | 64.0 | 265,834 | 5,545 | 2.09% | 112 | -| Standard_B16ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 16 | 1 | 64.0 | 331,694 | 3,537 | 1.07% | 28 | -| Standard_B20ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz | 20 | 2 | 80.0 | 334,369 | 8,555 | 2.56% | 42 | -| Standard_B20ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz | 20 | 1 | 80.0 | 345,686 | 6,702 | 1.94% | 154 | -| Standard_B20ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz | 20 | 1 | 80.0 | 328,900 | 7,625 | 2.32% | 126 | -| Standard_B20ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 20 | 1 | 80.0 | 409,515 | 4,792 | 1.17% | 14 | +| Standard_B1ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 1 | 1 | 2.0 | 18,093 | 679 | 3.75% | 42 | +| Standard_B1ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 1 | 1 | 2.0 | 18,197 | 1,341 | 7.37% | 168 | +| Standard_B1ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 1 | 1 | 2.0 | 17,975 | 920 | 5.12% | 112 | +| Standard_B1ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 1 | 1 | 2.0 | 20,176 | 1,568 | 7.77% | 28 | +| Standard_B2s | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 2 | 1 | 4.0 | 35,546 | 660 | 1.86% | 42 | +| Standard_B2s | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 2 | 1 | 4.0 | 36,569 | 2,172 | 5.94% | 154 | +| Standard_B2s | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 2 | 1 | 4.0 | 36,136 | 924 | 2.56% | 140 | +| Standard_B2s | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 2 | 1 | 4.0 | 42,546 | 834 | 1.96% | 14 | +| Standard_B2hms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 2 | 1 | 8.0 | 36,949 | 1,494 | 4.04% | 28 | +| Standard_B2hms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 2 | 1 | 8.0 | 36,512 | 2,537 | 6.95% | 70 | +| Standard_B2hms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 2 | 1 | 8.0 | 36,389 | 990 | 2.72% | 56 | +| Standard_B2ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 2 | 1 | 8.0 | 35,758 | 1,028 | 2.88% | 42 | +| Standard_B2ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 2 | 1 | 8.0 | 36,028 | 1,605 | 4.45% | 182 | +| Standard_B2ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 2 | 1 | 8.0 | 36,122 | 2,128 | 5.89% | 112 | +| Standard_B2ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 2 | 1 | 8.0 | 42,525 | 672 | 1.58% | 14 | +| Standard_B4hms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 4 | 1 | 16.0 | 71,028 | 879 | 1.24% | 28 | +| Standard_B4hms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 4 | 1 | 16.0 | 73,126 | 2,954 | 4.04% | 56 | +| Standard_B4hms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 4 | 1 | 16.0 | 68,451 | 1,571 | 2.29% | 56 | +| Standard_B4hms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 4 | 1 | 16.0 | 83,525 | 563 | 0.67% | 14 | +| Standard_B4ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 4 | 1 | 16.0 | 70,831 | 1,135 | 1.60% | 28 | +| Standard_B4ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 4 | 1 | 16.0 | 70,987 | 2,287 | 3.22% | 168 | +| Standard_B4ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 4 | 1 | 16.0 | 68,796 | 1,897 | 2.76% | 84 | +| Standard_B4ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 4 | 1 | 16.0 | 81,712 | 4,042 | 4.95% | 70 | +| Standard_B8ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 8 | 1 | 32.0 | 141,620 | 2,256 | 1.59% | 42 | +| Standard_B8ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 8 | 1 | 32.0 | 139,090 | 3,229 | 2.32% | 182 | +| Standard_B8ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 8 | 1 | 32.0 | 135,510 | 2,653 | 1.96% | 112 | +| Standard_B8ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 8 | 1 | 32.0 | 164,510 | 2,254 | 1.37% | 14 | +| Standard_B12ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 12 | 1 | 48.0 | 206,957 | 5,240 | 2.53% | 56 | +| Standard_B12ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 12 | 1 | 48.0 | 211,461 | 4,115 | 1.95% | 154 | +| Standard_B12ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 12 | 1 | 48.0 | 200,729 | 3,475 | 1.73% | 140 | +| Standard_B16ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 16 | 2 | 64.0 | 273,257 | 3,862 | 1.41% | 42 | +| Standard_B16ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 16 | 1 | 64.0 | 282,187 | 5,030 | 1.78% | 154 | +| Standard_B16ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 16 | 1 | 64.0 | 265,834 | 5,545 | 2.09% | 112 | +| Standard_B16ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 16 | 1 | 64.0 | 331,694 | 3,537 | 1.07% | 28 | +| Standard_B20ms | Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40 GHz | 20 | 2 | 80.0 | 334,369 | 8,555 | 2.56% | 42 | +| Standard_B20ms | Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30 GHz | 20 | 1 | 80.0 | 345,686 | 6,702 | 1.94% | 154 | +| Standard_B20ms | Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60 GHz | 20 | 1 | 80.0 | 328,900 | 7,625 | 2.32% | 126 | +| Standard_B20ms | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60 GHz | 20 | 1 | 80.0 | 409,515 | 4,792 | 1.17% | 14 | ### Dasv4 (03/25/2021 PBIID:9198755) @@ -183,7 +186,7 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_D32as_v4 | AMD EPYC 7452 32-Core Processor | 32 | 4 | 128.0 | 566,270 | 8,484 | 1.50% | 140 | | Standard_D48as_v4 | AMD EPYC 7452 32-Core Processor | 48 | 6 | 192.0 | 829,547 | 15,679 | 1.89% | 126 | | Standard_D64as_v4 | AMD EPYC 7452 32-Core Processor | 64 | 8 | 256.0 | 1,088,030 | 16,708 | 1.54% | 28 | - +| Standard_D96as_v4 | AMD EPYC 7452 32-Core Processor | 96 | 12 | 384.0 | N/A | - | - | - | ### Dav4 (03/25/2021 PBIID:9198755) @@ -197,7 +200,7 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_D32a_v4 | AMD EPYC 7452 32-Core Processor | 32 | 4 | 128.0 | 567,019 | 11,019 | 1.94% | 210 | | Standard_D48a_v4 | AMD EPYC 7452 32-Core Processor | 48 | 6 | 192.0 | 835,617 | 13,097 | 1.57% | 140 | | Standard_D64a_v4 | AMD EPYC 7452 32-Core Processor | 64 | 8 | 256.0 | 1,099,165 | 21,962 | 2.00% | 252 | - +| Standard_D96a_v4 | AMD EPYC 7452 32-Core Processor | 96 | 12 | 384.0 | N/A | - | - | - | ### DDSv4 (03/26/2021 PBIID:9198755) @@ -430,6 +433,7 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_E64as_v4 | AMD EPYC 7452 32-Core Processor | 64 | 8 | 512.0 | 1,097,588 | 26,100 | 2.38% | 280 | | Standard_E64-16as_v4 | AMD EPYC 7452 32-Core Processor | 16 | 8 | 512.0 | 284,934 | 5,065 | 1.78% | 154 | | Standard_E64-32as_v4 | AMD EPYC 7452 32-Core Processor | 32 | 8 | 512.0 | 561,951 | 9,691 | 1.72% | 140 | +| Standard_E96as_v4 | AMD EPYC 7452 32-Core Processor | 96 | 12 | 672.0 | N/A | - | - | - | | Standard_E96-24as_v4 | AMD EPYC 7452 32-Core Processor | 24 | 11 | 672.0 | 423,442 | 8,504 | 2.01% | 182 | | Standard_E96-48as_v4 | AMD EPYC 7452 32-Core Processor | 48 | 11 | 672.0 | 839,993 | 14,218 | 1.69% | 70 | @@ -446,6 +450,7 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_E32a_v4 | AMD EPYC 7452 32-Core Processor | 32 | 4 | 256.0 | 565,363 | 10,941 | 1.94% | 126 | | Standard_E48a_v4 | AMD EPYC 7452 32-Core Processor | 48 | 6 | 384.0 | 837,493 | 15,803 | 1.89% | 126 | | Standard_E64a_v4 | AMD EPYC 7452 32-Core Processor | 64 | 8 | 512.0 | 1,097,111 | 30,290 | 2.76% | 336 | +| Standard_E96a_v4 | AMD EPYC 7452 32-Core Processor | 96 | 12 | 672.0 | N/A | - | - | - | ### EDSv4 (03/27/2021 PBIID:9198755) @@ -471,6 +476,13 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_E64-16ds_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 16 | 2 | 504.0 | 260,677 | 3,340 | 1.28% | 154 | | Standard_E64-32ds_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 32 | 2 | 504.0 | 514,504 | 4,082 | 0.79% | 98 | +### Edsv4 Isolated Extended +(04/05/2021 PBIID:9198755) + +| VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | +| --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | +| Standard_E80ids_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 80 | 2 | 504.0 |N/A | - | - | - | + ### EDv4 (03/26/2021 PBIID:9198755) @@ -485,6 +497,13 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_E48d_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 48 | 2 | 384.0 | 761,410 | 21,640 | 2.84% | 336 | | Standard_E64d_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 64 | 2 | 504.0 | 1,030,708 | 9,500 | 0.92% | 322 | +### EIASv4 +(04/05/2021 PBIID:9198755) + +| VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | +| --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | +| Standard_E96ias_v4 | AMD EPYC 7452 32-Core Processor | 96 | 12 | 672.0 | N/A | - | - | - | + ### Esv4 (03/25/2021 PBIID:9198755) @@ -509,9 +528,13 @@ The following CoreMark benchmark scores show compute performance for select Azur | Standard_E64-16s_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 16 | 2 | 504.0 | 224,499 | 3,955 | 1.76% | 168 | | Standard_E64-32s_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 32 | 2 | 504.0 | 441,521 | 30,939 | 7.01% | 168 | +### Esv4 Isolated Extended +| VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | +| --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | +| Standard_E80is_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 80 | 2 | 504.0 | N/A | - | - | - | + ### Ev4 (03/25/2021 PBIID:9198755) - | VM Size | CPU | vCPUs | NUMA Nodes | Memory(GiB) | Avg Score | StdDev | StdDev% | #Runs | | --- | --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | | Standard_E2_v4 | Intel(R) Xeon(R) Platinum 8272CL CPU @ 2.60GHz | 2 | 1 | 16.0 | 30,825 | 2,765 | 8.97% | 406 | @@ -675,10 +698,11 @@ The following CoreMark benchmark scores show compute performance for select Azur ## About CoreMark -[CoreMark](https://www.eembc.org/coremark/faq.php) is a benchmark that tests the functionality of a microctronoller (MCU) or central processing unit (CPU). CoreMark is not system dependent, so it functions the same regardless of the platform (e.g. big or little endian, high-end or low-end processor). +[CoreMark](https://www.eembc.org/coremark/faq.php) is a benchmark that tests the functionality of a microctronoller (MCU) or central processing unit (CPU). CoreMark isn't system dependent, so it functions the same regardless of the platform (for example, big or little endian, high-end or low-end processor). Windows numbers were computed by running CoreMark on Windows Server 2019. CoreMark was configured with the number of threads set to the number of virtual CPUs, and concurrency set to `PThreads`. The target number of iterations was adjusted based on expected performance to provide a runtime of at least 20 seconds (typically much longer). The final score represents the number of iterations completed divided by the number of seconds it took to run the test. Each test was run at least seven times on each VM. Test run dates shown above. Tests run on multiple VMs across Azure public regions the VM was supported in on the date run. +Windows numbers were computed by running CoreMark on Windows Server 2019. CoreMark was configured with the number of threads set to the number of virtual CPUs, and concurrency set to `PThreads`. The target number of iterations was adjusted based on expected performance to provide a runtime of at least 20 seconds (typically much longer). The final score represents the number of iterations completed divided by the number of seconds it took to run the test. Each test was run at least seven times on each VM. Test run dates shown above. Tests run on multiple VMs across Azure public regions the VM was supported in on the date run. (Coremark doesn't properly support more than 64 vCPUs on Windows, therefore SKUs with > 64 vCPUs have been marked as N/A.) ### Running Coremark on Azure VMs @@ -693,7 +717,7 @@ To build and run the benchmark, type: `> make` Full results are available in the files ```run1.log``` and ```run2.log```. -```run1.log``` contains CoreMark results. These are the benchmark results with performance parameters. +```run1.log``` contains CoreMark results with performance parameters. ```run2.log``` contains benchmark results with validation parameters. **Run Time:** @@ -717,7 +741,7 @@ The above will compile the benchmark for execution on 4 cores. - The benchmark needs to run for at least 10 seconds, probably longer on larger systems. - All source files must be compiled with same flags. -- Do not change source files other than ```core_portme*``` (use ```make check``` to validate) +- Don't change source files other than ```core_portme*``` (use ```make check``` to validate) - Multiple runs are suggested for best results. ## GPU Series @@ -728,4 +752,4 @@ Performance of GPU based VM series is best understood by using GPU appropriate b ## Next steps -* For storage capacities, disk details, and additional considerations for choosing among VM sizes, see [Sizes for virtual machines](../sizes.md). +* For storage capacities, disk details, and other considerations for choosing among VM sizes, see [Sizes for virtual machines](../sizes.md). diff --git a/articles/virtual-machines/windows/detach-disk.md b/articles/virtual-machines/windows/detach-disk.md index 53288b7051b00..ab0bfe7ff6f81 100644 --- a/articles/virtual-machines/windows/detach-disk.md +++ b/articles/virtual-machines/windows/detach-disk.md @@ -1,14 +1,14 @@ --- title: Detach a data disk from a Windows VM - Azure description: Detach a data disk from a virtual machine in Azure using the Resource Manager deployment model. -author: cynthn +author: roygara ms.service: virtual-machines ms.subservice: disks ms.collection: windows ms.workload: infrastructure-services ms.topic: how-to -ms.date: 03/03/2021 -ms.author: cynthn +ms.date: 06/08/2022 +ms.author: rogarana ms.custom: devx-track-azurepowershell --- @@ -23,8 +23,6 @@ When you no longer need a data disk that's attached to a virtual machine, you ca If you want to use the existing data on the disk again, you can reattach it to the same virtual machine, or another one. - - ## Detach a data disk using PowerShell You can *hot* remove a data disk using PowerShell, but make sure nothing is actively using the disk before detaching it from the VM. @@ -45,6 +43,12 @@ Update-AzVM ` The disk stays in storage but is no longer attached to a virtual machine. +### Lower latency + +In select regions, the disk detach latency has been reduced, so you'll see an improvement of up to 15%. This is useful if you have planned/unplanned failovers between VMs, you're scaling your workload, or are running a high scale stateful workload such as Azure Kubernetes Service. However, this improvement is limited to the explicit disk detach command, `Remove-AzVMDataDisk`. You won't see the performance improvement if you call a command that may implicitly perform a detach, like `Update-AzVM`. You don't need to take any action other than calling the explicit detach command to see this improvement. + +[!INCLUDE [virtual-machines-disks-fast-attach-detach-regions](../../../includes/virtual-machines-disks-fast-attach-detach-regions.md)] + ## Detach a data disk using the portal You can *hot* remove a data disk, but make sure nothing is actively using the disk before detaching it from the VM. @@ -55,7 +59,7 @@ You can *hot* remove a data disk, but make sure nothing is actively using the di 1. In the **Disks** pane, to the far right of the data disk that you would like to detach, select the **X** button to detach. 1. Select **Save** on the top of the page to save your changes. -The disk stays in storage but is no longer attached to a virtual machine. The disk is not deleted. +The disk stays in storage but is no longer attached to a virtual machine. The disk isn't deleted. ## Next steps diff --git a/articles/virtual-machines/windows/faq.yml b/articles/virtual-machines/windows/faq.yml index 173b4d7fd6752..e08932962e42c 100644 --- a/articles/virtual-machines/windows/faq.yml +++ b/articles/virtual-machines/windows/faq.yml @@ -169,4 +169,4 @@ sections: - For additional information and restrictions for password creation reference this [password guidance documentation](https://docs.microsoft.com/windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference). + For additional information and restrictions for password creation reference this [password guidance documentation](/windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference). \ No newline at end of file diff --git a/articles/virtual-machines/windows/hybrid-use-benefit-licensing.md b/articles/virtual-machines/windows/hybrid-use-benefit-licensing.md index abbbe1b1b9de1..2dc3685e00dd5 100644 --- a/articles/virtual-machines/windows/hybrid-use-benefit-licensing.md +++ b/articles/virtual-machines/windows/hybrid-use-benefit-licensing.md @@ -1,14 +1,12 @@ --- title: Azure Hybrid Benefit for Windows Server description: Learn how to maximize your Windows Software Assurance benefits to bring on-premises licenses to Azure. -author: xujing-ms ms.service: virtual-machines ms.subservice: billing ms.collection: windows ms.topic: how-to ms.workload: infrastructure-services ms.date: 4/22/2018 -ms.author: xujing ms.custom: devx-track-azurepowershell, devx-track-azurecli ms.devlang: azurecli @@ -151,7 +149,7 @@ az vm get-instance-view -g MyResourceGroup -n MyVM --query "[?licenseType=='Wind To see and count all virtual machines and virtual machine scale sets deployed with Azure Hybrid Benefit for Windows Server, you can run the following command from your subscription: ### Portal -From the Virtual Machine or Virtual machine scale sets resource blade, you can view a list of all your VM(s) and licensing type by configuring the table column to include "Azure Hybrid Benefit". The VM setting can either be in "Enabled", "Not enabled" or "Not supported" state. +From the Virtual Machine or Virtual machine scale sets resource blade, you can view a list of all your VM(s) and licensing type by configuring the table column to include "OS licensing benefit". The VM setting can either be in **Azure Hybrid Benefit for Windows**, **Not enabled**, or **Windows client with multi-tenant hosting** state. ### PowerShell For virtual machines: diff --git a/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md b/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md index dc01b7a5dbc30..09f9f8e39b0f8 100644 --- a/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md +++ b/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md @@ -7,7 +7,7 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder +ms.subservice: image-builder ms.collection: windows --- # Create a new Windows VM image version from an existing image version using Azure Image Builder diff --git a/articles/virtual-machines/windows/image-builder-gallery.md b/articles/virtual-machines/windows/image-builder-gallery.md index eb59f28edac42..993a17643cb72 100644 --- a/articles/virtual-machines/windows/image-builder-gallery.md +++ b/articles/virtual-machines/windows/image-builder-gallery.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows ms.custom: devx-track-azurepowershell --- # Create a Windows image and distribute it to an Azure Compute Gallery diff --git a/articles/virtual-machines/windows/image-builder-powershell.md b/articles/virtual-machines/windows/image-builder-powershell.md index d97d98202700e..1491853e635a8 100644 --- a/articles/virtual-machines/windows/image-builder-powershell.md +++ b/articles/virtual-machines/windows/image-builder-powershell.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows ms.custom: devx-track-azurepowershell --- # Create a Windows VM with Azure Image Builder using PowerShell diff --git a/articles/virtual-machines/windows/image-builder-vnet.md b/articles/virtual-machines/windows/image-builder-vnet.md index 7d7b06a293e31..e3fe8dd18f12b 100644 --- a/articles/virtual-machines/windows/image-builder-vnet.md +++ b/articles/virtual-machines/windows/image-builder-vnet.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows ms.custom: devx-track-azurepowershell --- # Use Azure Image Builder for Windows VMs allowing access to an existing Azure VNET diff --git a/articles/virtual-machines/windows/image-builder.md b/articles/virtual-machines/windows/image-builder.md index 8d26b0a4f5f59..5767fa997ffd3 100644 --- a/articles/virtual-machines/windows/image-builder.md +++ b/articles/virtual-machines/windows/image-builder.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 04/23/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows --- # Create a Windows VM with Azure Image Builder diff --git a/articles/virtual-machines/windows/n-series-amd-driver-setup.md b/articles/virtual-machines/windows/n-series-amd-driver-setup.md index 0346cf667673f..6a68e9310b1e9 100644 --- a/articles/virtual-machines/windows/n-series-amd-driver-setup.md +++ b/articles/virtual-machines/windows/n-series-amd-driver-setup.md @@ -4,7 +4,7 @@ description: How to set up AMD GPU drivers for N-series VMs running Windows Serv author: vikancha-MSFT manager: jkabat ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.collection: windows ms.topic: how-to ms.workload: infrastructure-services diff --git a/articles/virtual-machines/windows/n-series-driver-setup.md b/articles/virtual-machines/windows/n-series-driver-setup.md index b9ca308db960f..da722bcb9486f 100644 --- a/articles/virtual-machines/windows/n-series-driver-setup.md +++ b/articles/virtual-machines/windows/n-series-driver-setup.md @@ -4,7 +4,7 @@ description: How to set up NVIDIA GPU drivers for N-series VMs running Windows S author: vikancha-MSFT manager: jkabat ms.service: virtual-machines -ms.subservice: vm-sizes-gpu +ms.subservice: sizes ms.collection: windows ms.topic: how-to ms.date: 09/24/2018 diff --git a/articles/virtual-machines/windows/nsg-quickstart-powershell.md b/articles/virtual-machines/windows/nsg-quickstart-powershell.md index 6d3ee365dfa98..924fb960ae3e3 100644 --- a/articles/virtual-machines/windows/nsg-quickstart-powershell.md +++ b/articles/virtual-machines/windows/nsg-quickstart-powershell.md @@ -37,11 +37,11 @@ $httprule = New-AzNetworkSecurityRuleConfig ` -Access "Allow" ` -Protocol "Tcp" ` -Direction "Inbound" ` - -Priority "100" ` + -Priority 100 ` -SourceAddressPrefix "Internet" ` -SourcePortRange * ` -DestinationAddressPrefix * ` - -DestinationPortRange 80 + -DestinationPortRange "80" ``` Next, create your Network Security group with [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup) and assign the HTTP rule you just created as follows. The following example creates a Network Security Group named *myNetworkSecurityGroup*: diff --git a/articles/virtual-machines/windows/on-prem-to-azure.md b/articles/virtual-machines/windows/on-prem-to-azure.md index 49fa349a38bfc..10bb4c0040d4c 100644 --- a/articles/virtual-machines/windows/on-prem-to-azure.md +++ b/articles/virtual-machines/windows/on-prem-to-azure.md @@ -4,7 +4,7 @@ description: Create VMs in Azure using VHDs uploaded from other clouds like AWS author: roygara manager: twooley ms.service: storage -ms.subervice: disks +ms.subservice: disks ms.workload: infrastructure-services ms.tgt_pltfrm: vm-windows ms.topic: conceptual diff --git a/articles/virtual-machines/windows/prepare-for-upload-vhd-image.md b/articles/virtual-machines/windows/prepare-for-upload-vhd-image.md index 5d33e7797bfd2..fbfd083d948f8 100644 --- a/articles/virtual-machines/windows/prepare-for-upload-vhd-image.md +++ b/articles/virtual-machines/windows/prepare-for-upload-vhd-image.md @@ -524,7 +524,7 @@ generalized disk, see ## Convert the virtual disk to a fixed size VHD > [!NOTE] -> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0) will perform it for you. +> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0&preserve-view=true) will perform it for you. Use one of the methods in this section to convert and resize your virtual disk to the required format for Azure: @@ -571,7 +571,7 @@ You can convert a virtual disk using the [Convert-VHD](/powershell/module/hyper- cmdlet in PowerShell. If you need information about installing this cmdlet see [Install the Hyper-V role](/windows-server/virtualization/hyper-v/get-started/install-the-hyper-v-role-on-windows-server). > [!NOTE] -> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0) will perform it for you. +> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0&preserve-view=true) will perform it for you. The following example converts the disk from VHDX to VHD. It also converts the disk from a dynamically expanding disk to a fixed-size disk. @@ -587,7 +587,7 @@ disk. ### Use Hyper-V Manager to resize the disk > [!NOTE] -> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0) will perform it for you. +> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0&preserve-view=true) will perform it for you. 1. Open Hyper-V Manager and select your local computer on the left. In the menu above the computer list, select **Action** > **Edit Disk**. @@ -599,7 +599,7 @@ disk. ### Use PowerShell to resize the disk > [!NOTE] -> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0) will perform it for you. +> If you're going to use Azure PowerShell to [upload your disk to Azure](disks-upload-vhd-to-managed-disk-powershell.md) and you have [Hyper-V](/windows-server/virtualization/hyper-v/hyper-v-technology-overview) enabled, this step is optional. [Add-AzVHD](/powershell/module/az.compute/add-azvhd?view=azps-7.1.0&viewFallbackFrom=azps-5.4.0&preserve-view=true) will perform it for you. You can resize a virtual disk using the [Resize-VHD](/powershell/module/hyper-v/resize-vhd) cmdlet in PowerShell. If you need information about installing this cmdlet see [Install the Hyper-V role](/windows-server/virtualization/hyper-v/get-started/install-the-hyper-v-role-on-windows-server). diff --git a/articles/virtual-machines/windows/quick-create-cli.md b/articles/virtual-machines/windows/quick-create-cli.md index 44f2880fceced..cb70f81c65ea5 100644 --- a/articles/virtual-machines/windows/quick-create-cli.md +++ b/articles/virtual-machines/windows/quick-create-cli.md @@ -46,7 +46,7 @@ Using the example below, you will be prompted to enter a password at the command az vm create \ --resource-group myResourceGroup \ --name myVM \ - --image Win2019Datacenter \ + --image Win2022AzureEditionCore \ --public-ip-sku Standard \ --admin-username azureuser ``` @@ -69,32 +69,22 @@ It takes a few minutes to create the VM and supporting resources. The following Note your own `publicIpAddress` in the output from your VM. This address is used to access the VM in the next steps. -## Open port 80 for web traffic +## Install web server -By default, only RDP connections are opened when you create a Windows VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the IIS web server: +To see your VM in action, install the IIS web server. ```azurecli-interactive -az vm open-port --port 80 --resource-group myResourceGroup --name myVM -``` - -## Connect to virtual machine - -Use the following command to create a remote desktop session from your local computer. Replace the IP address with the public IP address of your VM. When prompted, enter the credentials used when the VM was created: - -```powershell -mstsc /v:publicIpAddress +az vm run-command invoke -g MyResourceGroup -n MyVm --command-id RunPowerShellScript --scripts "Install-WindowsFeature -name Web-Server -IncludeManagementTools" ``` -## Install web server +## Open port 80 for web traffic -To see your VM in action, install the IIS web server. Open a PowerShell prompt on the VM and run the following command: +By default, only RDP connections are opened when you create a Windows VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the IIS web server: -```powershell -Install-WindowsFeature -name Web-Server -IncludeManagementTools +```azurecli-interactive +az vm open-port --port 80 --resource-group myResourceGroup --name myVM ``` -When done, close the RDP connection to the VM. - ## View the web server in action With IIS installed and port 80 now open on your VM from the Internet, use a web browser of your choice to view the default IIS welcome page. Use the public IP address of your VM obtained in a previous step. The following example shows the default IIS web site: diff --git a/articles/virtual-machines/windows/quick-create-powershell.md b/articles/virtual-machines/windows/quick-create-powershell.md index dde3962ac33c8..da8bb4e54d3e0 100644 --- a/articles/virtual-machines/windows/quick-create-powershell.md +++ b/articles/virtual-machines/windows/quick-create-powershell.md @@ -31,7 +31,7 @@ To open the Cloud Shell, just select **Try it** from the upper right corner of a Create an Azure resource group with [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). A resource group is a logical container into which Azure resources are deployed and managed. ```azurepowershell-interactive -New-AzResourceGroup -Name myResourceGroup -Location 'EastUS' +New-AzResourceGroup -Name 'myResourceGroup' -Location 'EastUS' ``` ## Create virtual machine @@ -52,35 +52,17 @@ New-AzVm ` -OpenPorts 80,3389 ``` -## Connect to virtual machine - -After the deployment has completed, RDP to the VM. To see your VM in action, the IIS web server is then installed. - -To see the public IP address of the VM, use the [Get-AzPublicIpAddress](/powershell/module/az.network/get-azpublicipaddress) cmdlet: - -```azurepowershell-interactive -Get-AzPublicIpAddress -ResourceGroupName 'myResourceGroup' | Select-Object -Property 'IpAddress' -``` - -Use the following command to create a remote desktop session from your local computer. Replace `publicIpAddress` with the public IP address of your VM. - -```powershell -mstsc /v:publicIpAddress -``` - -In the **Windows Security** window, select **More choices**, and then select **Use a different account**. Type the username as **localhost**\\*username*, enter password you created for the virtual machine, and then click **OK**. - -You may receive a certificate warning during the sign-in process. Click **Yes** or **Continue** to create the connection - ## Install web server To see your VM in action, install the IIS web server. Open a PowerShell prompt on the VM and run the following command: -```powershell -Install-WindowsFeature -Name Web-Server -IncludeManagementTools +```azurepowershell-interactive +Invoke-AzVMRunCommand -ResourceGroupName 'myResourceGroup' -VMName 'myVM' -CommandId 'RunPowerShellScript' -ScriptString +'Install-WindowsFeature -Name Web-Server -IncludeManagementTools' ``` -When done, close the RDP connection to the VM. +The `-ScriptString` parameter requires version `4.27.0` or later of the `Az.Compute` module. + ## View the web server in action @@ -93,7 +75,7 @@ With IIS installed and port 80 now open on your VM from the Internet, use a web When no longer needed, you can use the [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) cmdlet to remove the resource group, VM, and all related resources: ```azurepowershell-interactive -Remove-AzResourceGroup -Name myResourceGroup +Remove-AzResourceGroup -Name 'myResourceGroup' ``` ## Next steps diff --git a/articles/virtual-machines/windows/run-scripts-in-vm.md b/articles/virtual-machines/windows/run-scripts-in-vm.md index 1c9d89f95f6f1..db1f0d02dd77a 100644 --- a/articles/virtual-machines/windows/run-scripts-in-vm.md +++ b/articles/virtual-machines/windows/run-scripts-in-vm.md @@ -31,7 +31,7 @@ The [Custom Script Extension](../extensions/custom-script-windows.md) is primari The [Run Command](run-command.md) feature enables virtual machine and application management and troubleshooting using scripts, and is available even when the machine is not reachable, for example if the guest firewall doesn't have the RDP or SSH port open. * Run scripts in Azure virtual machines. -* Can be run using [Azure portal](run-command.md), [REST API](/rest/api/compute/virtual-machines-run-commands/run-command), [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke), or [PowerShell](/powershell/module/az.compute/invoke-azvmruncommand) +* Can be run using [Azure portal](run-command.md), [REST API](/azure/virtual-machines/windows/run-command), [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke), or [PowerShell](/powershell/module/az.compute/invoke-azvmruncommand) * Quickly run a script and view output and repeat as needed in the Azure portal. * Script can be typed directly or you can run one of the built-in scripts. * Run PowerShell script in Windows machines and Bash script in Linux machines. diff --git a/articles/virtual-machines/windows/storage-performance.md b/articles/virtual-machines/windows/storage-performance.md index 8296ac1a4bd2d..d431a988f5ac1 100644 --- a/articles/virtual-machines/windows/storage-performance.md +++ b/articles/virtual-machines/windows/storage-performance.md @@ -1,93 +1,104 @@ ---- -title: Optimize performance on Azure Lsv2-series virtual machines -description: Learn how to optimize performance for your solution on the Lsv2-series virtual machines using a Windows example. -author: sasha-melamed -ms.service: virtual-machines -ms.subservice: vm-sizes-storage -ms.topic: how-to -ms.workload: infrastructure-services -ms.date: 04/17/2019 -ms.author: joelpell ---- +--- +title: Optimize performance on Lsv3, Lasv3, and Lsv2-series Windows VMs +description: Learn how to optimize performance for your solution on the Lsv2-series Windows virtual machines (VMs) on Azure. +author: sasha-melamed +ms.service: virtual-machines +ms.subservice: sizes +ms.topic: how-to +ms.workload: infrastructure-services +ms.date: 06/01/2022 +ms.author: sasham +--- +# Optimize performance on Lsv3, Lasv3, and Lsv2-series Windows VMs -# Optimize performance on the Lsv2-series Windows virtual machines -**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Flexible scale sets :heavy_check_mark: Uniform scale sets +**Applies to:** :heavy_check_mark: Windows VMs :heavy_check_mark: Uniform scale sets -Lsv2-series virtual machines support a variety of workloads that need high I/O and throughput on local storage across a wide range of applications and industries. The Lsv2-series is ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases, including Cassandra, MongoDB, Cloudera, and Redis. +Lsv3, Lasv3, and Lsv2-series Azure Virtual Machines (Azure VMs) support various workloads that need high I/O and throughput on local storage across a wide range of applications and industries. The L-series is ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases, including Cassandra, MongoDB, Cloudera, and Redis. -The design of the Lsv2-series Virtual Machines (VMs) maximizes the AMD EPYC™ 7551 processor to provide the best performance between the processor, memory, NVMe devices, and the VMs. In addition to maximizing the hardware performance, Lsv2-series VMs are designed to work with the needs of Windows and Linux operating systems for better performance with the hardware and the software. +Lsv3, Lasv3, and Lsv2-series VMs are designed to work with the needs of Windows and Linux operating systems for better performance with hardware and the software. -Tuning the software and hardware resulted in the optimized version of [Windows Server 2019 Datacenter](https://www.microsoft.com/cloud-platform/windows-server-pricing), released in early December 2018 to the Azure Marketplace, which supports maximum performance on the NVMe devices in Lsv2-series VMs. +Software and hardware tuning resulted in the optimized version of [Windows Server 2019 Datacenter](https://www.microsoft.com/cloud-platform/windows-server-pricing), released to the Azure Marketplace (and later versions), which support maximum performance on the NVMe devices in L-series VMs. -This article provides tips and suggestions to ensure your workloads and applications achieve the maximum performance designed into the VMs. The information on this page will be continuously updated as more Lsv2 optimized images are added to the Azure Marketplace. +This article provides tips and suggestions to ensure your workloads and applications achieve the maximum performance designed into the VMs. -## AMD EYPC™ chipset architecture +## AMD EPYC™ chipset architecture -Lsv2-series VMs use AMD EYPC™ server processors based on the Zen microarchitecture. AMD developed Infinity Fabric (IF) for EYPC™ as scalable interconnect for its NUMA model that could be used for on-die, on-package, and multi-package communications. Compared with QPI (Quick-Path Interconnect) and UPI (Ultra-Path Interconnect) used on Intel modern monolithic-die processors, AMD’s many-NUMA small-die architecture may bring both performance benefits as well as challenges. The actual impact of memory bandwidth and latency constraints could vary depending on the type of workloads running. +Lasv3 and Lsv2-series VMs use AMD EPYC™ server processors based on the Zen micro-architecture. AMD developed Infinity Fabric (IF) for EPYC™ as a scalable interconnect for its NUMA model that can be used for on-die, on-package, and multi-package communications. Compared with QPI (Quick-Path Interconnect) and UPI (Ultra-Path Interconnect), used on Intel modern monolithic-die processors, AMD's many-NUMA small-die architecture can bring both performance benefits and challenges. The actual effects of memory bandwidth and latency constraints can vary depending on the type of workloads. -## Tips for maximizing performance +## Tips for maximizing performance -* The hardware that powers the Lsv2-series VMs utilizes NVMe devices with eight I/O Queue Pairs (QP)s. Every NVMe device I/O queue is actually a pair: a submission queue and a completion queue. The NVMe driver is set up to optimize the utilization of these eight I/O QPs by distributing I/O’s in a round robin schedule. To gain max performance, run eight jobs per device to match. +- To gain max performance, run multiple jobs with deep queue depth per device. -* Avoid mixing NVMe admin commands (for example, NVMe SMART info query, etc.) with NVMe I/O commands during active workloads. Lsv2 NVMe devices are backed by Hyper-V NVMe Direct technology, which switches into “slow mode” whenever any NVMe admin commands are pending. Lsv2 users could see a dramatic performance drop in NVMe I/O performance if that happens. +- Avoid mixing NVMe admin commands (for example, NVMe SMART info query) with NVMe I/O commands during active workloads. Lsv3, Lasv3, and Lsv2 NVMe devices are backed by Hyper-V NVMe Direct technology, which switches into "slow mode" whenever any NVMe admin commands are pending. Lsv3, Lasv3, and Lsv2 users might see a dramatic performance drop in NVMe I/O performance if that scenario happens. -* Lsv2 users should not rely on device NUMA information (all 0) reported from within the VM for data drives to decide the NUMA affinity for their apps. The recommended way for better performance is to spread workloads across CPUs if possible. +- It's not recommended for Lsv2 users to rely on device NUMA information (all 0) reported from within the VM for data drives to decide the NUMA affinity for their apps. For better performance, it's recommended to spread workloads across CPUs if possible. -* The maximum supported queue depth per I/O queue pair for Lsv2 VM NVMe device is 1024 (vs. Amazon i3 QD 32 limit). Lsv2 users should limit their (synthetic) benchmarking workloads to queue depth 1024 or lower to avoid triggering queue full conditions, which can reduce performance. +- The maximum supported queue depth per I/O queue pair for Lsv3, Lasv3, and Lsv2 VM NVMe device is 1024. Lsv3, Lasv3, and Lsv2 users are recommended to limit their (synthetic) benchmarking workloads to queue depth 1024 or lower to avoid triggering queue full conditions, which can reduce performance. -## Utilizing local NVMe storage +- The best performance is obtained when I/O is done directly to each of the raw NVMe devices with no partitioning, no file systems, no RAID config, etc. +## Utilizing local NVMe storage -Local storage on the 1.92 TB NVMe disk on all Lsv2 VMs is ephemeral. During a successful standard reboot of the VM, the data on the local NVMe disk will persist. The data will not persist on the NVMe if the VM is redeployed, de-allocated, or deleted. Data will not persist if another issue causes the VM, or the hardware it is running on, to become unhealthy. When this happens, any data on the old host is securely erased. +Local storage on the 1.92 TB NVMe disk on all Lsv3, Lasv3, and Lsv2 VMs is ephemeral. During a successful standard reboot of the VM, the data on the local NVMe disk persists. The data doesn't persist on the NVMe if the VM is redeployed, deallocated, or deleted. Data doesn't persist if another issue causes the VM, or the hardware on which the VM is running, to become unhealthy. When this scenario happens, any data on the old host is securely erased. -There will also be cases when the VM needs to be moved to a different host machine, for example, during a planned maintenance operation. Planned maintenance operations and some hardware failures can be anticipated with [Scheduled Events](scheduled-events.md). Scheduled Events should be used to stay updated on any predicted maintenance and recovery operations. +There are also cases when the VM needs to be moved to a different host machine; for example, during a planned maintenance operation. Planned maintenance operations and some hardware failures can be anticipated with [Scheduled Events](scheduled-events.md). Use Scheduled Events to stay updated on any predicted maintenance and recovery operations. -In the case that a planned maintenance event requires the VM to be recreated on a new host with empty local disks, the data will need to be resynchronized (again, with any data on the old host being securely erased). This occurs because Lsv2-series VMs do not currently support live migration on the local NVMe disk. +In the case that a planned maintenance event requires the VM to be recreated on a new host with empty local disks, the data needs to be resynchronized (again, with any data on the old host being securely erased). This scenario occurs because Lsv3, Lasv3, and Lsv2-series VMs don't currently support live migration on the local NVMe disk. -There are two modes for planned maintenance. +There are two modes for planned maintenance: [standard VM customer-controlled maintenance](#standard-vm-customer-controlled-maintenance) and [automatic maintenance](#automatic-maintenance). -### Standard VM customer-controlled maintenance +For any upcoming service events, use the controlled maintenance process to select a time most convenient to you for the update. Prior to the event, back up your data in premium storage. After the maintenance event completes, return your data to the refreshed Lsv2 VMs local NVMe storage. -- The VM is moved to an updated host during a 30-day window. -- Lsv2 local storage data could be lost, so backing-up data prior to the event is recommended. +Scenarios that maintain data on local NVMe disks include when: -### Automatic maintenance +- The VM is running and healthy. +- The VM is rebooted in place by you or by Azure. +- The VM is paused (stopped without deallocation). +- Most planned maintenance servicing operations. -- Occurs if the customer does not execute customer-controlled maintenance, or in the event of emergency procedures such as a security zero-day event. -- Intended to preserve customer data, but there is a small risk of a VM freeze or reboot. -- Lsv2 local storage data could be lost, so backing-up data prior to the event is recommended. +Scenarios that securely erase data to protect the customer include when: -For any upcoming service events, use the controlled maintenance process to select a time most convenient to you for the update. Prior to the event, you may back up your data in premium storage. After the maintenance event completes, you can return your data to the refreshed Lsv2 VMs local NVMe storage. +- The VM is redeployed, stopped (deallocated), or deleted by you. +- The VM becomes unhealthy and has to service heal to another node due to a hardware issue. +- A few the planned maintenance servicing operations that require the VM to be reallocated to another host for servicing. -Scenarios that maintain data on local NVMe disks include: +### Standard VM customer-controlled maintenance -- The VM is running and healthy. -- The VM is rebooted in place (by you or Azure). -- The VM is paused (stopped without de-allocation). -- The majority of the planned maintenance servicing operations. +In standard VM customer-controlled maintenance, the VM is moved to an updated host during a 30-day window. -Scenarios that securely erase data to protect the customer include: +Lsv3, Lasv3, and Lsv2 local storage data might be lost, so backing-up data prior to the event is recommended. -- The VM is redeployed, stopped (de-allocated), or deleted (by you). -- The VM becomes unhealthy and has to service heal to another node due to a hardware issue. -- A small number of the planned maintenance servicing operations that requires the VM to be reallocated to another host for servicing. +### Automatic maintenance -## Frequently asked questions +Automatic maintenance occurs if the customer doesn't execute customer-controlled maintenance. Automatic maintenance can also occur because of emergency procedures, such as a security zero-day event. -* **How do I start deploying Lsv2-series VMs?** - Much like any other VM, use the [Portal](quick-create-portal.md), [Azure CLI](quick-create-cli.md), or [PowerShell](quick-create-powershell.md) to create a VM. +This type of maintenance is intended to preserve customer data, but there's a small risk of a VM freeze or reboot. -* **Will a single NVMe disk failure cause all VMs on the host to fail?** - If a disk failure is detected on the hardware node, the hardware is in a failed state. When this occurs, all VMs on the node are automatically de-allocated and moved to a healthy node. For Lsv2-series VMs, this means that the customer’s data on the failing node is also securely erased and will need to be recreated by the customer on the new node. As noted, before live migration becomes available on Lsv2, the data on the failing node will be proactively moved with the VMs as they are transferred to another node. +Lsv3, Lasv3, and Lsv2 local storage data might be lost, so backing-up data prior to the event is recommended. -* **Do I need to make polling adjustments in Windows in Windows Server 2012 or Windows Server 2016?** - NVMe polling is only available on Windows Server 2019 on Azure. +## Frequently asked questions -* **Can I switch back to a traditional interrupt service routine (ISR) model?** - Lsv2-series VMs are optimized for NVMe polling. Updates are continuously provided to improve polling performance. +The following are frequently asked questions about these series. -* **Can I adjust the polling settings in Windows Server 2019?** - The polling settings are not user adjustable. - -## Next steps +### How do I start deploying L-series VMs? -* See specifications for all [VMs optimized for storage performance](../sizes-storage.md) on Azure +Much like any other VM, create a VM [using the Azure portal](quick-create-portal.md), [through the Azure Command-Line Interface (Azure CLI)](quick-create-cli.md), or [through PowerShell](quick-create-powershell.md). + +### Does a single NVMe disk failure cause all VMs on the host to fail? + +If a disk failure is detected on the hardware node, the hardware is in a failed state. When this problem occurs, all VMs on the node are automatically deallocated and moved to a healthy node. For Lsv3, Lasv3, and Lsv2-series VMs, this scenario means that the customer's data on the failing node is also securely erased. The customer needs to recreate the data on the new node. + +### Do I need to make polling adjustments in Windows Server 2012 or Windows Server 2016? + +NVMe polling is only available on Windows Server 2019 and later versions on Azure. + +### Can I switch back to a traditional interrupt service routine (ISR) model? + +Lasv3, and Lsv2-series VMs are optimized for NVMe polling. Updates are continuously provided to improve polling performance. + +### Can I adjust the polling settings in Windows Server 2019 or later versions? + +The polling settings aren't user adjustable. + +## Next steps + +See specifications for all [VMs optimized for storage performance](../sizes-storage.md) on Azure. diff --git a/articles/virtual-machines/workloads/oracle/oracle-vm-solutions.md b/articles/virtual-machines/workloads/oracle/oracle-vm-solutions.md index 6b27dd42402ef..c2021b561c282 100644 --- a/articles/virtual-machines/workloads/oracle/oracle-vm-solutions.md +++ b/articles/virtual-machines/workloads/oracle/oracle-vm-solutions.md @@ -160,13 +160,6 @@ For related information, see KB article **860340.1** at [!NOTE] > All RHEL images are available in Azure public and Azure Government clouds. They are not available in Azure China clouds. ## List of RHEL images -This is a list of RHEL images available in Azure. Unless otherwise stated, all images are LVM-partitioned and attached to regular RHEL repositories (not EUS, not E4S). The following images are currently available for general use: +This section provides list of RHEL images available in Azure. Unless otherwise stated, all images are LVM-partitioned and attached to regular RHEL repositories (not EUS, not E4S). The following images are currently available for general use: -> [!NOTE] -> RAW images are no longer being produced in favor of LVM-partitioned images. LVM provides several advantages over the older raw (non-LVM) partitioning scheme, including significantly more flexible partition resizing options. +### RHEL x64 architecture images Offer| SKU | Partitioning | Provisioning | Notes :----|:----|:-------------|:-------------|:----- -RHEL | 6.7 | RAW | Linux Agent | Extended Lifecycle Support available from December 1st. [More details here.](redhat-extended-lifecycle-support.md) -| | 6.8 | RAW | Linux Agent | Extended Lifecycle Support available from December 1st. [More details here.](redhat-extended-lifecycle-support.md) -| | 6.9 | RAW | Linux Agent | Extended Lifecycle Support available from December 1st. [More details here.](redhat-extended-lifecycle-support.md) -| | 6.10 | RAW | Linux Agent | Extended Lifecycle Support available from December 1st. [More details here.](redhat-extended-lifecycle-support.md) +RHEL | 6.7 | RAW | Linux Agent | Extended Lifecycle Support available. [More details here.](redhat-extended-lifecycle-support.md) +| | 6.8 | RAW | Linux Agent | Extended Lifecycle Support available. [More details here.](redhat-extended-lifecycle-support.md) +| | 6.9 | RAW | Linux Agent | Extended Lifecycle Support available. [More details here.](redhat-extended-lifecycle-support.md) +| | 6.10 | RAW | Linux Agent | Extended Lifecycle Support available. [More details here.](redhat-extended-lifecycle-support.md) | | 7-RAW | RAW | Linux Agent | RHEL 7.x family of images.
                    Attached to regular repositories by default (not EUS). -| | 7-LVM | LVM | Linux Agent | RHEL 7.x family of images.
                    Attached to regular repositories by default (not EUS). If you are looking for a standard RHEL image to deploy, use this set of images and/or its Generation 2 counterpart. -| | 7lvm-gen2| LVM | Linux Agent | Generation 2, RHEL 7.x family of images.
                    Attached to regular repositories by default (not EUS). If you are looking for a standard RHEL image to deploy, use this set of images and/or its Generation 1 counterpart. +| | 7-LVM | LVM | Linux Agent | RHEL 7.x family of images.
                    Attached to regular repositories by default (not EUS). If you're looking for a standard RHEL image to deploy, use this set of images and/or its Generation 2 counterpart. +| | 7lvm-gen2| LVM | Linux Agent | Generation 2, RHEL 7.x family of images.
                    Attached to regular repositories by default (not EUS). If you're looking for a standard RHEL image to deploy, use this set of images and/or its Generation 1 counterpart. | | 7-RAW-CI | RAW-CI | cloud-init | RHEL 7.x family of images.
                    Attached to regular repositories by default (not EUS). | | 7.2 | RAW | Linux Agent | | | 7.3 | RAW | Linux Agent | @@ -59,59 +58,89 @@ RHEL | 6.7 | RAW | Linux Agent | Extended Lifecycle Support ava | | 81-ci-gen2| LVM | Linux Agent | Hyper-V Generation 2 - Attached to EUS repositories as of November 2020. | | 8.2 | LVM | Linux Agent | Attached to EUS repositories as of November 2020. | | 82gen2 | LVM | Linux Agent | Hyper-V Generation 2 - Attached to EUS repositories as of November 2020. -| | 8.3 | LVM | Linux Agent | Attached to regular repositories (EUS unavailable for RHEL 8.3) -| | 83-gen2 | LVM | Linux Agent |Hyper-V Generation 2 - Attached to regular repositories (EUS unavailable for RHEL 8.3) -RHEL-SAP | 7.4 | LVM | Linux Agent | RHEL 7.4 for SAP HANA and Business Apps. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -| | 74sap-gen2| LVM | Linux Agent | RHEL 7.4 for SAP HANA and Business Apps. Generation 2 image. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -| | 7.5 | LVM | Linux Agent | RHEL 7.5 for SAP HANA and Business Apps. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -| | 75sap-gen2| LVM | Linux Agent | RHEL 7.5 for SAP HANA and Business Apps. Generation 2 image. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -| | 7.6 | LVM | Linux Agent | RHEL 7.6 for SAP HANA and Business Apps. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -| | 76sap-gen2| LVM | Linux Agent | RHEL 7.6 for SAP HANA and Business Apps. Generation 2 image. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -| | 7.7 | LVM | Linux Agent | RHEL 7.7 for SAP HANA and Business Apps. Attached to E4S repositories, will charge a premium for SAP and RHEL as well as the base compute fee. -RHEL-SAP-HANA (To be removed in November 2020) | 6.7 | RAW | Linux Agent | RHEL 6.7 for SAP HANA. Outdated in favor of the RHEL-SAP images. This image will be removed in November 2020. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271). -| | 7.2 | LVM | Linux Agent | RHEL 7.2 for SAP HANA. Outdated in favor of the RHEL-SAP images. This image will be removed in November 2020. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271). -| | 7.3 | LVM | Linux Agent | RHEL 7.3 for SAP HANA. Outdated in favor of the RHEL-SAP images. This image will be removed in November 2020. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271). -RHEL-SAP-APPS | 6.8 | RAW | Linux Agent | RHEL 6.8 for SAP Business Applications. Outdated in favor of the RHEL-SAP images. -| | 7.3 | LVM | Linux Agent | RHEL 7.3 for SAP Business Applications. Outdated in favor of the RHEL-SAP images. -| | 7.4 | LVM | Linux Agent | RHEL 7.4 for SAP Business Applications. -| | 7.6 | LVM | Linux Agent | RHEL 7.6 for SAP Business Applications. -| | 7.7 | LVM | Linux Agent | RHEL 7.7 for SAP Business Applications. -| | 77-gen2 | LVM | Linux Agent | RHEL 7.7 for SAP Business Applications. Generation 2 image -| | 8.1 | LVM | Linux Agent | RHEL 8.1 for SAP Business Applications. -| | 81-gen2 | LVM | Linux Agent | RHEL 8.1 for SAP Business Applications. Generation 2 image. -| | 8.2 | LVM | Linux Agent | RHEL 8.2 for SAP Business Applications. -| | 82-gen2 | LVM | Linux Agent | RHEL 8.2 for SAP Business Applications. Generation 2 image. -RHEL-HA | 7.4 | LVM | Linux Agent | RHEL 7.4 with HA Add-On. Will charge a premium for HA and RHEL on top of the base compute fee. Outdated in favor of the RHEL-SAP-HA images. -| | 7.5 | LVM | Linux Agent | RHEL 7.5 with HA Add-On. Will charge a premium for HA and RHEL on top of the base compute fee. Outdated in favor of the RHEL-SAP-HA images. -| | 7.6 | LVM | Linux Agent | RHEL 7.6 with HA Add-On. Will charge a premium for HA and RHEL on top of the base compute fee. Outdated in favor of the RHEL-SAP-HA images. -RHEL-SAP-HA | 7.4 | LVM | Linux Agent | RHEL 7.4 for SAP with HA and Update Services. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 74sapha-gen2 | LVM | Linux Agent | RHEL 7.4 for SAP with HA and Update Services. Generation 2 image. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 7.5 | LVM | Linux Agent | RHEL 7.5 for SAP with HA and Update Services. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 7.6 | LVM | Linux Agent | RHEL 7.6 for SAP with HA and Update Services. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 76sapha-gen2 | LVM | Linux Agent | RHEL 7.6 for SAP with HA and Update Services. Generation 2 image. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 7.7 | LVM | Linux Agent | RHEL 7.7 for SAP with HA and Update Services. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 77sapha-gen2 | LVM | Linux Agent | RHEL 7.7 for SAP with HA and Update Services. Generation 2 image. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 8.1 | LVM | Linux Agent | RHEL 8.1 for SAP with HA and Update Services. Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 81sapha-gen2 | LVM | Linux Agent | RHEL 8.1 for SAP with HA and Update Services. Generation 2 images Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 8.2 | LVM | Linux Agent | RHEL 8.2 for SAP with HA and Update Services. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -| | 82sapha-gen2 | LVM | Linux Agent | RHEL 8.2 for SAP with HA and Update Services. Generation 2 images Attached to E4S repositories. Will charge a premium for SAP and HA repositories as well as RHEL, on top of the base compute fees. -rhel-byos |rhel-lvm74| LVM | Linux Agent | RHEL 7.4 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm75| LVM | Linux Agent | RHEL 7.5 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm76| LVM | Linux Agent | RHEL 7.6 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm76-gen2| LVM | Linux Agent | RHEL 7.6 Generation 2 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm77| LVM | Linux Agent | RHEL 7.7 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm77-gen2| LVM | Linux Agent | RHEL 7.7 Generation 2 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm78| LVM | Linux Agent | RHEL 7.8 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm78-gen2| LVM | Linux Agent | RHEL 7.8 Generation 2 BYOS images, not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm8 | LVM | Linux Agent | RHEL 8.0 BYOS images , not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm8-gen2 | LVM | Linux Agent | RHEL 8.0 Generation 2 BYOS images , not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm81 | LVM | Linux Agent | RHEL 8.1 BYOS images , not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm81-gen2 | LVM | Linux Agent | RHEL 8.1 Generation 2 BYOS images , not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm82 | LVM | Linux Agent | RHEL 8.2 BYOS images , not attached to any source of updates, will not charge a RHEL premium. -| |rhel-lvm82-gen2 | LVM | Linux Agent | RHEL 8.2 Generation 2 BYOS images , not attached to any source of updates, will not charge a RHEL premium. +| | 8.3 | LVM | Linux Agent | Attached to regular repositories (EUS unavailable for RHEL 8.3) +| | 83-gen2 | LVM | Linux Agent |Hyper-V Generation 2 - Attached to regular repositories (EUS unavailable for RHEL 8.3) +| | 8.4 | LVM | Linux Agent | Attached to EUS repositories +| | 84-gen2 | LVM | Linux Agent |Hyper-V Generation 2 - Hyper-V Generation 2 - Attached to EUS repositories +| | 8.5 | LVM | Linux Agent | Attached to regular repositories (EUS unavailable for RHEL 8.5) +| | 85-gen2 | LVM | Linux Agent |Hyper-V Generation 2 - Attached to regular repositories (EUS unavailable for RHEL 8.5) +| | 8.6 | LVM | Linux Agent | Attached to EUS repositories +| | 86-gen2 | LVM | Linux Agent |Hyper-V Generation 2 - Hyper-V Generation 2 - Attached to EUS repositories +| | 9.0 | LVM | Linux Agent | Attached to EUS repositories +| | 90-gen2 | LVM | Linux Agent |Hyper-V Generation 2 - Hyper-V Generation 2 - Attached to EUS repositories +RHEL-SAP-APPS | 6.8 | RAW | Linux Agent | RHEL 6.8 for SAP Business Applications. Outdated in favor of the RHEL-SAP images. +| | 7.3 | LVM | Linux Agent | RHEL 7.3 for SAP Business Applications. Outdated in favor of the RHEL-SAP images. +| | 7.4 | LVM | Linux Agent | RHEL 7.4 for SAP Business Applications +| | 7.6 | LVM | Linux Agent | RHEL 7.6 for SAP Business Applications +| | 7.7 | LVM | Linux Agent | RHEL 7.7 for SAP Business Applications +| | 77-gen2 | LVM | Linux Agent | RHEL 7.7 for SAP Business Applications. Generation 2 image +| | 8.1 | LVM | Linux Agent | RHEL 8.1 for SAP Business Applications +| | 81-gen2 | LVM | Linux Agent | RHEL 8.1 for SAP Business Applications. Generation 2 image +| | 8.2 | LVM | Linux Agent | RHEL 8.2 for SAP Business Applications +| | 82-gen2 | LVM | Linux Agent | RHEL 8.2 for SAP Business Applications. Generation 2 image +| | 8.4 | LVM | Linux Agent | RHEL 8.4 for SAP Business Applications +| | 84-gen2 | LVM | Linux Agent | RHEL 8.4 for SAP Business Applications. Generation 2 image +| | 8.6 | LVM | Linux Agent | RHEL 8.6 for SAP Business Applications +| | 86-gen2 | LVM | Linux Agent | RHEL 8.6 for SAP Business Applications. Generation 2 image +RHEL-SAP-HA | 7.4 | LVM | Linux Agent | RHEL 7.4 for SAP with HA and Update Services. Images are attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 74sapha-gen2 | LVM | Linux Agent | RHEL 7.4 for SAP with HA and Update Services. Generation 2 image Attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 7.5 | LVM | Linux Agent | RHEL 7.5 for SAP with HA and Update Services. Images are attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 7.6 | LVM | Linux Agent | RHEL 7.6 for SAP with HA and Update Services. Images are attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 76sapha-gen2 | LVM | Linux Agent | RHEL 7.6 for SAP with HA and Update Services. Generation 2 image Attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 7.7 | LVM | Linux Agent | RHEL 7.7 for SAP with HA and Update Services. Images are attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 77sapha-gen2 | LVM | Linux Agent | RHEL 7.7 for SAP with HA and Update Services. Generation 2 image Attached to E4S repositories.Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 8.1 | LVM | Linux Agent | RHEL 8.1 for SAP with HA and Update Services. Images are attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 81sapha-gen2 | LVM | Linux Agent | RHEL 8.1 for SAP with HA and Update Services. Generation 2 images Attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 8.2 | LVM | Linux Agent | RHEL 8.2 for SAP with HA and Update Services. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 82sapha-gen2 | LVM | Linux Agent | RHEL 8.2 for SAP with HA and Update Services. Generation 2 images Attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 8.4 | LVM | Linux Agent | RHEL 8.4 for SAP with HA and Update Services. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 84sapha-gen2 | LVM | Linux Agent | RHEL 8.4 for SAP with HA and Update Services. Generation 2 images Attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 8.6 | LVM | Linux Agent | RHEL 8.6 for SAP with HA and Update Services. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +| | 86sapha-gen2 | LVM | Linux Agent | RHEL 8.6 for SAP with HA and Update Services. Generation 2 images Attached to E4S repositories. Will charge a premium for SAP and HA repositories and RHEL, on top of the base compute fees +rhel-byos |rhel-lvm74| LVM | Linux Agent | RHEL 7.4 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm75| LVM | Linux Agent | RHEL 7.5 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm76| LVM | Linux Agent | RHEL 7.6 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm76-gen2| LVM | Linux Agent | RHEL 7.6 Generation 2 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm77| LVM | Linux Agent | RHEL 7.7 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm77-gen2| LVM | Linux Agent | RHEL 7.7 Generation 2 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm78| LVM | Linux Agent | RHEL 7.8 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm78-gen2| LVM | Linux Agent | RHEL 7.8 Generation 2 BYOS images, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm8 | LVM | Linux Agent | RHEL 8.0 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm8-gen2 | LVM | Linux Agent | RHEL 8.0 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm81 | LVM | Linux Agent | RHEL 8.1 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm81-gen2 | LVM | Linux Agent | RHEL 8.1 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm82 | LVM | Linux Agent | RHEL 8.2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm82-gen2 | LVM | Linux Agent | RHEL 8.2 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm83 | LVM | Linux Agent | RHEL 8.2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm83-gen2 | LVM | Linux Agent | RHEL 8.2 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm84 | LVM | Linux Agent | RHEL 8.2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm84-gen2 | LVM | Linux Agent | RHEL 8.2 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm85 | LVM | Linux Agent | RHEL 8.2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm85-gen2 | LVM | Linux Agent | RHEL 8.2 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm86 | LVM | Linux Agent | RHEL 8.2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +| |rhel-lvm86-gen2 | LVM | Linux Agent | RHEL 8.2 Generation 2 BYOSimages, not attached to any source of updates, won't charge an RHEL premium +RHEL-SAP (out of support) | 7.4 | LVM | Linux Agent | RHEL 7.4 for SAP HANA and Business Apps. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +| | 74sap-gen2| LVM | Linux Agent | RHEL 7.4 for SAP HANA and Business Apps. Generation 2 image. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +| | 7.5 | LVM | Linux Agent | RHEL 7.5 for SAP HANA and Business Apps. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +| | 75sap-gen2| LVM | Linux Agent | RHEL 7.5 for SAP HANA and Business Apps. Generation 2 image. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +| | 7.6 | LVM | Linux Agent | RHEL 7.6 for SAP HANA and Business Apps. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +| | 76sap-gen2| LVM | Linux Agent | RHEL 7.6 for SAP HANA and Business Apps. Generation 2 image. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +| | 7.7 | LVM | Linux Agent | RHEL 7.7 for SAP HANA and Business Apps. Images are attached to E4S repositories, will charge a premium for SAP and RHEL and the base compute fee +RHEL-SAP-HANA (out of support) | 6.7 | RAW | Linux Agent | RHEL 6.7 for SAP HANA. Outdated in favor of the RHEL-SAP images. This image will be removed in November 2020. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271) +| | 7.2 | LVM | Linux Agent | RHEL 7.2 for SAP HANA. Outdated in favor of the RHEL-SAP images. This image will be removed in November 2020. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271) +| | 7.3 | LVM | Linux Agent | RHEL 7.3 for SAP HANA. Outdated in favor of the RHEL-SAP images. This image will be removed in November 2020. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271) +RHEL-HA (out of support) | 7.4 | LVM | Linux Agent | RHEL 7.4 with HA Add-On. Will charge a premium for HA and RHEL on top of the base compute fee. Outdated in favor of the RHEL-SAP-HA images +| | 7.5 | LVM | Linux Agent | RHEL 7.5 with HA Add-On. Will charge a premium for HA and RHEL on top of the base compute fee. Outdated in favor of the RHEL-SAP-HA images +| | 7.6 | LVM | Linux Agent | RHEL 7.6 with HA Add-On. Will charge a premium for HA and RHEL on top of the base compute fee. Outdated in favor of the RHEL-SAP-HA images > [!NOTE] -> The RHEL-SAP-HANA product offering is considered end of life by Red Hat. Existing deployments will continue to work normally, but Red Hat recommends that customers migrate from the RHEL-SAP-HANA images to the RHEL-SAP-HA images which includes the SAP HANA repositories as well as the HA add-on. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271). +> The RHEL-SAP-HANA product offering is considered end of life by Red Hat. Existing deployments will continue to work normally, but Red Hat recommends that customers migrate from the RHEL-SAP-HANA images to the RHEL-SAP-HA images which includes the SAP HANA repositories and the HA add-on. More details about Red Hat's SAP cloud offerings are available at [SAP offerings on certified cloud providers](https://access.redhat.com/articles/3751271). + +### RHEL ARM64 architecture images + +Offer| SKU | Partitioning | Provisioning | Notes +:----|:----|:-------------|:-------------|:----- +RHEL | 8_6-arm64 | LVM | Linux Agent | Hyper-V Generation 2 - Attached to EUS repositories ## Next steps * Learn more about the [Red Hat images in Azure](./redhat-images.md). diff --git a/articles/virtual-machines/workloads/redhat/redhat-rhui.md b/articles/virtual-machines/workloads/redhat/redhat-rhui.md index d6f8eeae81c22..cb8ab014098f4 100644 --- a/articles/virtual-machines/workloads/redhat/redhat-rhui.md +++ b/articles/virtual-machines/workloads/redhat/redhat-rhui.md @@ -84,122 +84,6 @@ At the time of this writing, EUS support has ended for RHEL <= 7.4. See the "Red * RHEL 7.6 EUS support ends May 31, 2021 * RHEL 7.7 EUS support ends August 30, 2021 -### Switch a RHEL VM 7.x to EUS (version-lock to a specific minor version) -Use the following instructions to lock a RHEL 7.x VM to a particular minor release (run as root): - ->[!NOTE] -> This only applies for RHEL 7.x versions for which EUS is available. At the time of this writing, this includes RHEL 7.2-7.7. More details are available at the [Red Hat Enterprise Linux Life Cycle](https://access.redhat.com/support/policy/updates/errata) page. - -1. Disable non-EUS repos: - ```bash - yum --disablerepo='*' remove 'rhui-azure-rhel7' - ``` - -1. Add EUS repos: - ```bash - yum --config='https://rhelimage.blob.core.windows.net/repositories/rhui-microsoft-azure-rhel7-eus.config' install 'rhui-azure-rhel7-eus' - ``` - -1. Lock the `releasever` variable (run as root): - ```bash - echo $(. /etc/os-release && echo $VERSION_ID) > /etc/yum/vars/releasever - ``` - - >[!NOTE] - > The above instruction will lock the RHEL minor release to the current minor release. Enter a specific minor release if you are looking to upgrade and lock to a later minor release that is not the latest. For example, `echo 7.5 > /etc/yum/vars/releasever` will lock your RHEL version to RHEL 7.5. - -1. Update your RHEL VM - ```bash - sudo yum update - ``` - -### Switch a RHEL VM 8.x to EUS (version-lock to a specific minor version) -Use the following instructions to lock a RHEL 8.x VM to a particular minor release (run as root): - ->[!NOTE] -> This only applies for RHEL 8.x versions for which EUS is available. At the time of this writing, this includes RHEL 8.1-8.2. More details are available at the [Red Hat Enterprise Linux Life Cycle](https://access.redhat.com/support/policy/updates/errata) page. - -1. Disable non-EUS repos: - ```bash - yum --disablerepo='*' remove 'rhui-azure-rhel8' - ``` - -1. Get the EUS repos config file: - ```bash - wget https://rhelimage.blob.core.windows.net/repositories/rhui-microsoft-azure-rhel8-eus.config - ``` - -1. Add EUS repos: - ```bash - yum --config=rhui-microsoft-azure-rhel8-eus.config install rhui-azure-rhel8-eus - ``` - -1. Lock the `releasever` variable (run as root): - ```bash - echo $(. /etc/os-release && echo $VERSION_ID) > /etc/yum/vars/releasever - ``` - - >[!NOTE] - > The above instruction will lock the RHEL minor release to the current minor release. Enter a specific minor release if you are looking to upgrade and lock to a later minor release that is not the latest. For example, `echo 8.1 > /etc/yum/vars/releasever` will lock your RHEL version to RHEL 8.1. - - >[!NOTE] - > If there are permission issues to access the releasever, you can edit the file using 'nano /etc/yum/vars/releaseve' and add the image version details and save ('Ctrl+o' then press enter and then 'Ctrl+x'). - -1. Update your RHEL VM - ```bash - sudo yum update - ``` - - -### Switch a RHEL 7.x VM back to non-EUS (remove a version lock) -Run the following as root: -1. Remove the `releasever` file: - ```bash - rm /etc/yum/vars/releasever - ``` - -1. Disable EUS repos: - ```bash - yum --disablerepo='*' remove 'rhui-azure-rhel7-eus' - ``` - -1. Configure RHEL VM - ```bash - yum --config='https://rhelimage.blob.core.windows.net/repositories/rhui-microsoft-azure-rhel7.config' install 'rhui-azure-rhel7' - ``` - -1. Update your RHEL VM - ```bash - sudo yum update - ``` - -### Switch a RHEL 8.x VM back to non-EUS (remove a version lock) -Run the following as root: -1. Remove the `releasever` file: - ```bash - rm /etc/yum/vars/releasever - ``` - -1. Disable EUS repos: - ```bash - yum --disablerepo='*' remove 'rhui-azure-rhel8-eus' - ``` - -1. Get the regular repos config file: - ```bash - wget https://rhelimage.blob.core.windows.net/repositories/rhui-microsoft-azure-rhel8.config - ``` - -1. Add non-EUS repos: - ```bash - yum --config=rhui-microsoft-azure-rhel8.config install rhui-azure-rhel8 - ``` - -1. Update your RHEL VM - ```bash - sudo yum update - ``` - ## The IPs for the RHUI content delivery servers RHUI is available in all regions where RHEL on-demand images are available. It currently includes all public regions listed on the [Azure status dashboard](https://azure.microsoft.com/status/) page, Azure US Government, and Microsoft Azure Germany regions. @@ -220,9 +104,6 @@ If you're using a network configuration to further restrict access from RHEL PAY 13.72.14.155 52.244.249.194 -# Azure Germany -51.5.243.77 -51.4.228.145 ``` >[!NOTE] >The new Azure US Government images,as of January 2020, will be using Public IP mentioned under Azure Global header above. diff --git a/articles/virtual-machines/workloads/sap/automation-configure-control-plane.md b/articles/virtual-machines/workloads/sap/automation-configure-control-plane.md index bfa3ea4e7a555..42ad99848e5ff 100644 --- a/articles/virtual-machines/workloads/sap/automation-configure-control-plane.md +++ b/articles/virtual-machines/workloads/sap/automation-configure-control-plane.md @@ -19,7 +19,7 @@ The control plane for the [SAP deployment automation framework on Azure](automat ## Deployer -The [deployer](automation-deployment-framework.md#deployment-components) is the execution engine of the [SAP automation framework](automation-deployment-framework.md). It is a pre-configured virtual machine (VM) that is used for executing Terraform and Ansible commands. +The [deployer](automation-deployment-framework.md#deployment-components) is the execution engine of the [SAP automation framework](automation-deployment-framework.md). It's a pre-configured virtual machine (VM) that is used for executing Terraform and Ansible commands. The configuration of the deployer is performed in a Terraform tfvars variable file. @@ -28,22 +28,33 @@ The configuration of the deployer is performed in a Terraform tfvars variable fi The table below contains the Terraform parameters, these parameters need to be entered manually if not using the deployment scripts > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ----------------------- | ------------------------------------- | ---------- | +> | Variable | Description | Type | +> | ----------------------- | ------------------------------------------------------------------------------------------------------------ | ---------- | > | `tfstate_resource_id` | Azure resource identifier for the storage account in the SAP Library that contains the Terraform state files | Required | -### Generic Parameters +### Environment Parameters -The table below contains the parameters that define the resource group and the resource naming. +The table below contains the parameters that define the resource naming. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ----------------------- | ------------------------------------- | ---------- | -> | `environment` | A five-character identifier for the workload zone. For example, `PROD` for a production environment and `NP` for a non-production environment. | Mandatory | -> | `location` | The Azure region in which to deploy. | Required | -> | `resource_group_name` | Name of the resource group to be created | Optional | +> | Variable | Description | Type | Notes | +> | ----------------------- | ------------------------------------------------- | ---------- | ------------------------------------------------------------------------------------------- | +> | `environment` | Identifier for the control plane (max 5 chars) | Mandatory | For example, `PROD` for a production environment and `NP` for a non-production environment. | +> | `location` | The Azure region in which to deploy. | Required | Use lower case | +> | 'name_override_file' | Name override file | Optional | see [Custom naming](automation-naming-module.md) | + +### Resource Group + +The table below contains the parameters that define the resource group. + +> [!div class="mx-tdCol2BreakAll "] +> | Variable | Description | Type | +> | ----------------------- | -------------------------------------------------------- | ---------- | +> | `resource_group_name` | Name of the resource group to be created | Optional | > | `resource_group_arm_id` | Azure resource identifier for an existing resource group | Optional | +> | `resourcegroup_tags` | Tags to be associated with the resource group | Optional | + ### Network Parameters @@ -61,31 +72,40 @@ The table below contains the networking parameters. > [!div class="mx-tdCol2BreakAll "] > | Variable | Description | Type | Notes | > | ------------------------------------------ | ---------------------------------------------------------------- | ---------- | ------ | -> | `management_network_name` | The logical name of the network (DEV-WEEU-MGMT01-INFRASTRUCTURE) | Required | | -> | `management_network_arm_id` | The Azure resource identifier for the virtual network | Optional | For existing environment deployments | -> | `management_network_address_space` | The address range for the virtual network | Mandatory | For new environment deployments | +> | `management_network_name` | The name of the VNet into which the deployer will be deployed | Optional | For green field deployments. | +> | `management_network_logical_name` | The logical name of the network (DEV-WEEU-MGMT01-INFRASTRUCTURE) | Required | | +> | `management_network_arm_id` | The Azure resource identifier for the virtual network | Optional | For brown field deployments. | +> | `management_network_address_space` | The address range for the virtual network | Mandatory | For green field deployments. | +> | | | | | > | `management_subnet_name` | The name of the subnet | Optional | | -> | `management_subnet_address_prefix` | The address range for the subnet | Mandatory | For new environment deployments | -> | `management_subnet_arm_id` | The Azure resource identifier for the subnet | Mandatory | For existing environment deployments | +> | `management_subnet_address_prefix` | The address range for the subnet | Mandatory | For green field deployments. | +> | `management_subnet_arm_id` | The Azure resource identifier for the subnet | Mandatory | For brown field deployments. | > | `management_subnet_nsg_name` | The name of the Network Security Group name | Optional | | -> | `management_subnet_nsg_arm_id` | The Azure resource identifier for the Network Security Group | Mandatory | Mandatory for existing environment deployments | +> | `management_subnet_nsg_arm_id` | The Azure resource identifier for the Network Security Group | Mandatory | Mandatory For brown field deployments. | > | `management_subnet_nsg_allowed_ips` | Range of allowed IP addresses to add to Azure Firewall | Optional | | -> | `management_firewall_subnet_arm_id` | The Azure resource identifier for the Network Security Group | Mandatory | For existing environment deployments | -> | `management_firewall_subnet_address_prefix` | The address range for the subnet | Mandatory | For new environment deployments | +> | | | | | +> | `management_firewall_subnet_arm_id` | The Azure resource identifier for the Firewall subnet | Mandatory | For brown field deployments. | +> | `management_firewall_subnet_address_prefix` | The address range for the subnet | Mandatory | For green field deployments. | +> | | | | | +> | `management_bastion_subnet_arm_id` | The Azure resource identifier for the Bastion subnet | Mandatory | For brown field deployments. | +> | `management_bastion_subnet_address_prefix` | The address range for the subnet | Mandatory | For green field deployments. | ### Deployer Virtual Machine Parameters The table below contains the parameters related to the deployer virtual machine. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ------------------------------- | ---------------------------------------------------------------------------- | ---------- | -> | `deployer_size` | Defines the Virtual machine SKU to use, for example Standard_D4s_v3 | Optional | -> | `deployer_image` | Defines the Virtual machine image to use, see below | Optional | -> | `deployer_disk_type` | Defines the disk type, for example Premium_LRS | Optional | -> | `deployer_use_DHCP` | Controls if Azure subnet provided IP addresses should be used (dynamic) true | Optional | -> | `deployer_private_ip_address` | Defines the Private IP address to use | Optional | -> | `deployer_enable_public_ip` | Defined if the deployer has a public IP | Optional | +> | Variable | Description | Type | +> | ------------------------------- | -------------------------------------------------------------------------------------- | ---------- | +> | `deployer_size` | Defines the Virtual machine SKU to use, for example Standard_D4s_v3 | Optional | +> | `deployer_count` | Defines the number of Deployers | Optional | +> | `deployer_image` | Defines the Virtual machine image to use, see below | Optional | +> | `deployer_disk_type` | Defines the disk type, for example Premium_LRS | Optional | +> | `deployer_use_DHCP` | Controls if Azure subnet provided IP addresses should be used (dynamic) true | Optional | +> | `deployer_private_ip_address` | Defines the Private IP address to use | Optional | +> | `deployer_enable_public_ip` | Defines if the deployer has a public IP | Optional | +> | `auto_configure_deployer` | Defines deployer will be configured with the required software (Terraform and Ansible) | Optional | + The Virtual Machine image is defined using the following structure: ```python @@ -120,40 +140,49 @@ The table below defines the parameters used for defining the Key Vault informati > [!div class="mx-tdCol2BreakAll "] > | Variable | Description | Type | > | ------------------------------------ | --------------------------------------------------------------------------- | ---------- | -> | `user_keyvault_id` | Azure resource identifier for the user key vault | Optional | -> | `spn_keyvault_id` | Azure resource identifier for the user key vault containing the SPN details | Optional | -> | `deployer_private_key_secret_name` | The Azure Key Vault secret name for the deployer private key | Optional | -> | `deployer_public_key_secret_name` | The Azure Key Vault secret name for the deployer public key | Optional | -> | `deployer_username_secret_name` | The Azure Key Vault secret name for the deployer username | Optional | -> | `deployer_password_secret_name` | The Azure Key Vault secret name for the deployer password | Optional | +> | `user_keyvault_id` | Azure resource identifier for the user key vault | Optional | +> | `spn_keyvault_id` | Azure resource identifier for the user key vault containing the SPN details | Optional | +> | `deployer_private_key_secret_name` | The Azure Key Vault secret name for the deployer private key | Optional | +> | `deployer_public_key_secret_name` | The Azure Key Vault secret name for the deployer public key | Optional | +> | `deployer_username_secret_name` | The Azure Key Vault secret name for the deployer username | Optional | +> | `deployer_password_secret_name` | The Azure Key Vault secret name for the deployer password | Optional | ### Other parameters > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ------------------------------------ | ------------------------------------- | ----------- | -> | `firewall_deployment` | Boolean flag controlling if an Azure firewall is to be deployed | Mandatory | -> | `bastion_deployment` | Boolean flag controlling if Azure bastion host is to be deployed | Mandatory | -> | `enable_purge_control_for_keyvaults` | Boolean flag controlling if purge control is enabled on the Key Vault. Use only for test deployments | Optional | -> | `use_private_endpoint` | Boolean flag controlling if private endpoints are used. | Optional | +> | Variable | Description | Type | Notes | +> | ------------------------------------ | ---------------------------------------------------------------------- | ----------- | ----------------------------- | +> | `firewall_deployment` | Boolean flag controlling if an Azure firewall is to be deployed | Optional | | +> | `bastion_deployment` | Boolean flag controlling if Azure bastion host is to be deployed | Optional | | +> | `enable_purge_control_for_keyvaults` | Boolean flag controlling if purge control is enabled on the Key Vault. | Optional | Use only for test deployments | +> | `use_private_endpoint` | Boolean flag controlling if private endpoints are used. | Optional | Recommended | ### Example parameters file for deployer (required parameters only) -```bash +```terraform # The environment value is a mandatory field, it is used for partitioning the environments, for example (PROD and NP) environment="MGMT" + # The location/region value is a mandatory field, it is used to control where the resources are deployed location="westeurope" # management_network_address_space is the address space for management virtual network management_network_address_space="10.10.20.0/25" + # management_subnet_address_prefix is the address prefix for the management subnet management_subnet_address_prefix="10.10.20.64/28" + # management_firewall_subnet_address_prefix is the address prefix for the firewall subnet management_firewall_subnet_address_prefix="10.10.20.0/26" -deployer_enable_public_ip=true +# management_bastion_subnet_address_prefix is a mandatory parameter if bastion is deployed and if the subnets are not defined in the workload or if existing subnets are not used +management_bastion_subnet_address_prefix = "10.10.20.128/26" + +deployer_enable_public_ip=false + firewall_deployment=true + +bastion_deployment=true ``` @@ -172,29 +201,39 @@ The table below contains the Terraform parameters, these parameters need to be > | ----------------------- | ------------------------------------- | ---------- | > | `deployer_tfstate_key` | The state file name for the deployer | Required | -### Generic Parameters +### Environment Parameters -The table below contains the parameters that define the resource group and the resource naming. +The table below contains the parameters that define the resource naming. + +> [!div class="mx-tdCol2BreakAll "] +> | Variable | Description | Type | Notes | +> | ----------------------- | ------------------------------------------------- | ---------- | ------------------------------------------------------------------------------------------- | +> | `environment` | Identifier for the control plane (max 5 chars) | Mandatory | For example, `PROD` for a production environment and `NP` for a non-production environment. | +> | `location` | The Azure region in which to deploy. | Required | Use lower case | +> | 'name_override_file' | Name override file | Optional | see [Custom naming](automation-naming-module.md) | +### Resource Group + +The table below contains the parameters that define the resource group. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ----------------------- | ------------------------------------- | ---------- | -> | `environment` | A five-character identifier for the workload zone. For example, `PROD` for a production environment and `NP` for a non-production environment. | Mandatory | -> | `location` | The Azure region in which to deploy. | Required | -> | `resource_group_name` | Name of the resource group to be created | Optional | +> | Variable | Description | Type | +> | ----------------------- | -------------------------------------------------------- | ---------- | +> | `resource_group_name` | Name of the resource group to be created | Optional | > | `resource_group_arm_id` | Azure resource identifier for an existing resource group | Optional | +> | `resourcegroup_tags` | Tags to be associated with the resource group | Optional | + ### Deployer Parameters The table below contains the parameters that define the resource group and the resource naming. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | --------------------- | ------------------------------------- | ----- | -> | `deployer_environment` | A five-character identifier for the workload zone. For example, `PROD` for a production environment and `NP` for a non-production environment. | Mandatory | -> | `deployer_location` | The Azure region in which to deploy. | Mandatory | -> | `deployer_vnet` | The logical name for the deployer VNet | Mandatory | +> | Variable | Description | Type | Notes | +> | --------------------- | ------------------------------------------------- | --------- | ------------------------------------------------------------------------------------------- | +> | `deployer_environment` | Identifier for the control plane (max 5 chars) | Mandatory | For example, `PROD` for a production environment and `NP` for a non-production environment. | +> | `deployer_location` | The Azure region in which to deploy. | Mandatory | | +> | `deployer_vnet` | The logical name for the deployer VNet | Mandatory | | ### SAP Installation media storage account @@ -222,22 +261,26 @@ The table below contains the parameters that define the resource group and the r ### Example parameters file for sap library (required parameters only) -```bash +```terraform # The environment value is a mandatory field, it is used for partitioning the environments, for example (PROD and NP) environment="MGMT" + # The location/region value is a mandatory field, it is used to control where the resources are deployed location="westeurope" + # The deployer_environment value is a mandatory field, it is used for identifying the deployer deployer_environment="MGMT" + # The deployer_location value is a mandatory field, it is used for identifying the deployer deployer_location="westeurope" + # The deployer_vnet value is a mandatory field, it is used for identifying the deployer deployer_vnet="DEP00" ``` -## Next step +## Next steps > [!div class="nextstepaction"] > [Configure SAP system](automation-configure-system.md) diff --git a/articles/virtual-machines/workloads/sap/automation-configure-devops.md b/articles/virtual-machines/workloads/sap/automation-configure-devops.md index ccfa2e84002c7..6791251e910ea 100644 --- a/articles/virtual-machines/workloads/sap/automation-configure-devops.md +++ b/articles/virtual-machines/workloads/sap/automation-configure-devops.md @@ -11,7 +11,9 @@ ms.service: virtual-machines-sap # Use SAP Deployment Automation Framework from Azure DevOps Services +Using Azure DevOps will streamline the deployment process by providing pipelines that can be executed to perform both the infrastructure deployment and the configuration and SAP installation activities. You can use Azure Repos to store your configuration files and Azure Pipelines to deploy and configure the infrastructure and the SAP application. + ## Sign up for Azure DevOps Services To use Azure DevOps Services, you'll need an Azure DevOps organization. An organization is used to connect groups of related projects. Use your work or school account to automatically connect your organization to your Azure Active Directory (Azure AD). To create an account, open [Azure DevOps](https://azure.microsoft.com/services/devops/) and either _sign-in_ or create a new account. @@ -32,7 +34,7 @@ Start by importing the SAP Deployment Automation Framework GitHub repository int Navigate to the Repositories section and choose Import a repository, import the 'https://github.com/Azure/sap-automation.git' repository into Azure DevOps. For more info, see [Import a repository](/azure/devops/repos/git/import-git-repository?view=azure-devops&preserve-view=true) -If you are unable to import a repository, you can create the 'sap-automation' repository and manually import the content from the SAP Deployment Automation Framework GitHub repository to it. +If you're unable to import a repository, you can create the 'sap-automation' repository and manually import the content from the SAP Deployment Automation Framework GitHub repository to it. ### Create the repository for manual import @@ -51,7 +53,7 @@ Clone the repository to a local folder by clicking the _Clone_ button in the Fi ### Manually importing the repository content using a local clone -In case you were not able to import the content from the SAP Deployment Automation Framework GitHub repository you can download the content manually and add it to the folder of your local clone of the Azure DevOps repository. +You can also download the content from the SAP Deployment Automation Framework repository manually and add it to your local clone of the Azure DevOps repository. Navigate to 'https://github.com/Azure/SAP-automation' repository and download the repository content as a ZIP file by clicking the _Code_ button and choosing _Download ZIP_. @@ -62,13 +64,18 @@ Open the local folder in Visual Studio code, you should see that there are chang :::image type="content" source="./media/automation-devops/automation-vscode-changes.png" alt-text="Picture showing that source code has changed"::: Select the source control icon and provide a message about the change, for example: "Import from GitHub" and press Cntr-Enter to commit the changes. Next select the _Sync Changes_ button to synchronize the changes back to the repository. + ### Create configuration root folder -Create a top level folder called 'WORKSPACES', this folder will be the root folder for all the SAP deployment configuration files. Create the following folders in the 'WORKSPACES' folder: 'DEPLOYER', 'LIBRARY', 'LANDSCAPE' and 'SYSTEM'. +> [!IMPORTANT] + > In order to ensure that your configuration files are not overwritten by changes in the SAP Deployment Automation Framework, store them in a separate folder hierarchy. + -Optionally you may copy the sample configuration files from the 'samples/WORKSPACES' folders to the WORKSPACES folder you just created, this will allow you to experiment with sample deployments. +Create a top level folder called 'WORKSPACES', this folder will be the root folder for all the SAP deployment configuration files. Create the following folders in the 'WORKSPACES' folder: 'DEPLOYER', 'LIBRARY', 'LANDSCAPE' and 'SYSTEM'. These will contain the configuration files for the different components of the SAP Deployment Automation Framework. -Push the changes to Azure DevOps repos by selecting the source control icon and providing a message about the change, for example: "Import of sample configurations" and press Cntr-Enter to commit the changes. Next select the _Sync Changes_ button to synchronize the changes back to the repository. +Optionally you may copy the sample configuration files from the 'samples/WORKSPACES' folders to the WORKSPACES folder you created, this will allow you to experiment with sample deployments. + +Push the changes back to the repository by selecting the source control icon and providing a message about the change, for example: "Import of sample configurations" and press Cntr-Enter to commit the changes. Next select the _Sync Changes_ button to synchronize the changes back to the repository. ## Create Azure Pipelines @@ -182,26 +189,45 @@ The pipelines use a custom task to run Ansible. The custom task can be installed The pipelines use a custom task to perform cleanup activities post deployment. The custom task can be installed from [Post Build Cleanup](https://marketplace.visualstudio.com/items?itemName=mspremier.PostBuildCleanup). Install it to your Azure DevOps organization before running the pipelines. + +## Preparations for self-hosted agent + + +1. Create an Agent Pool by navigating to the Organizational Settings and selecting _Agent Pools_ from the Pipelines section. Click the _Add Pool_ button and choose Self-hosted as the pool type. Name the pool to align with the workload zone environment, for example `DEV-WEEU-POOL`. Ensure _Grant access permission to all pipelines_ is selected and create the pool using the _Create_ button. + +1. Sign in with the user account you plan to use in your Azure DevOps organization (https://dev.azure.com). + +1. From your home page, open your user settings, and then select _Personal access tokens_. + + :::image type="content" source="./media/automation-devops/automation-select-personal-access-tokens.jpg" alt-text="Diagram showing the creation of the Personal Access Token (PAT)."::: + +1. Create a personal access token. Ensure that _Read & manage_ is selected for _Agent Pools_ and _Read & write_ is selected for _Code_. Write down the created token value. + + :::image type="content" source="./media/automation-devops/automation-new-pat.png" alt-text="Diagram showing the attributes of the Personal Access Token (PAT)."::: + ## Variable definitions The deployment pipelines are configured to use a set of predefined parameter values. In Azure DevOps the variables are defined using variable groups. + ### Common variables There's a set of common variables that are used by all the deployment pipelines. These variables are stored in a variable group called 'SDAF-General'. Create a new variable group 'SDAF-General' using the Library page in the Pipelines section. Add the following variables: -| Variable | Value | Notes | -| ---------------------------------- | --------------------------------------- | ---------------------------------------------------------------- | -| `ANSIBLE_HOST_KEY_CHECKING` | false | | -| Deployment_Configuration_Path | WORKSPACES | For testing the sample configuration use 'samples/WORKSPACES' instead of WORKSPACES. | -| Branch | main | | -| S-Username | `` | | -| S-Password | `` | Change variable type to secret by clicking the lock icon | -| `advice.detachedHead` | false | | -| `skipComponentGovernanceDetection` | true | | -| `tf_version` | 1.1.7 | The Terraform version to use, see [Terraform download](https://www.terraform.io/downloads) | +| Variable | Value | Notes | +| ---------------------------------- | --------------------------------------- | ------------------------------------------------------------------------------------------- | +| `ANSIBLE_HOST_KEY_CHECKING` | false | | +| Deployment_Configuration_Path | WORKSPACES | For testing the sample configuration use 'samples/WORKSPACES' instead of WORKSPACES. | +| Branch | main | | +| S-Username | `` | | +| S-Password | `` | Change variable type to secret by clicking the lock icon. | +| `PAT` | `` | Use the Personal Token defined in the previous step. | +| `POOL` | `` | Use the Agent pool defined in the previous step. | +| `advice.detachedHead` | false | | +| `skipComponentGovernanceDetection` | true | | +| `tf_version` | 1.1.7 | The Terraform version to use, see [Terraform download](https://www.terraform.io/downloads) | Save the variables. @@ -214,18 +240,18 @@ As each environment may have different deployment credentials you'll need to cre Create a new variable group 'SDAF-MGMT' for the control plane environment using the Library page in the Pipelines section. Add the following variables: -| Variable | Value | Notes | -| --------------------- | ----------------------------------------------- | -------------------------------------------------------- | -| Agent | 'Azure Pipelines' or the name of the agent pool | Note, this pool will be created in a later step. | -| ARM_CLIENT_ID | Enter the Service principal application id. | | -| ARM_CLIENT_SECRET | Enter the Service principal password. | Change variable type to secret by clicking the lock icon | -| ARM_SUBSCRIPTION_ID | Enter the target subscription id. | | -| ARM_TENANT_ID | Enter the Tenant id for the service principal. | | -| AZURE_CONNECTION_NAME | Previously created connection name | | -| sap_fqdn | SAP Fully Qualified Domain Name, for example sap.contoso.net | Only needed if Private DNS isn't used. | -| FENCING_SPN_ID | Enter the service principal application id for the fencing agent. | Required for highly available deployments | -| FENCING_SPN_PWD | Enter the service principal password for the fencing agent. | Required for highly available deployments | -| FENCING_SPN_TENANT | Enter the service principal tenant id for the fencing agent. | Required for highly available deployments | +| Variable | Value | Notes | +| --------------------- | ------------------------------------------------------------------ | -------------------------------------------------------- | +| Agent | 'Azure Pipelines' or the name of the agent pool | Note, this pool will be created in a later step. | +| ARM_CLIENT_ID | Enter the Service principal application ID. | | +| ARM_CLIENT_SECRET | Enter the Service principal password. | Change variable type to secret by clicking the lock icon | +| ARM_SUBSCRIPTION_ID | Enter the target subscription ID. | | +| ARM_TENANT_ID | Enter the Tenant ID for the service principal. | | +| AZURE_CONNECTION_NAME | Previously created connection name. | | +| sap_fqdn | SAP Fully Qualified Domain Name, for example 'sap.contoso.net'. | Only needed if Private DNS isn't used. | +| FENCING_SPN_ID | Enter the service principal application ID for the fencing agent. | Required for highly available deployments. | +| FENCING_SPN_PWD | Enter the service principal password for the fencing agent. | Required for highly available deployments. | +| FENCING_SPN_TENANT | Enter the service principal tenant ID for the fencing agent. | Required for highly available deployments. | Save the variables. @@ -257,21 +283,6 @@ Enter a Service connection name, for instance 'Connection to MGMT subscription' You must use the Deployer as a [self-hosted agent for Azure DevOps](/azure/devops/pipelines/agents/v2-linux) to perform the Ansible configuration activities. As a one-time step, you must register the Deployer as a self-hosted agent. -### Prerequisites - -1. Connect to your Azure DevOps instance Sign-in to [Azure DevOps](https://dev.azure.com). Navigate to the Project you want to connect to and note the URL to the Azure DevOps project. - -1. Create an Agent Pool by navigating to the Organizational Settings and selecting _Agent Pools_ from the Pipelines section. Click the _Add Pool_ button and choose Self-hosted as the pool type. Name the pool to align with the workload zone environment, for example `DEV-WEEU-POOL`. Ensure _Grant access permission to all pipelines_ is selected and create the pool using the _Create_ button. - -1. Sign in with the user account you plan to use in your Azure DevOps organization (https://dev.azure.com). - -1. From your home page, open your user settings, and then select _Personal access tokens_. - - :::image type="content" source="./media/automation-devops/automation-select-personal-access-tokens.jpg" alt-text="Diagram showing the creation of the Personal Access Token (PAT)."::: - -1. Create a personal access token. Ensure that _Read & manage_ is selected for _Agent Pools_ and _Read & write_ is selected for _Code_. Write down the created token value. - - :::image type="content" source="./media/automation-devops/automation-new-pat.png" alt-text="Diagram showing the attributes of the Personal Access Token (PAT)."::: ## Deploy the Control Plane @@ -333,4 +344,4 @@ The agent will now be configured and started. ## Next step > [!div class="nextstepaction"] -> [DevOps Hands on Lab](automation-devops-tutorial.md) +> [DevOps hands on lab](automation-devops-tutorial.md) diff --git a/articles/virtual-machines/workloads/sap/automation-configure-extra-disks.md b/articles/virtual-machines/workloads/sap/automation-configure-extra-disks.md index ca2b8820b87c2..3ce2636eafe87 100644 --- a/articles/virtual-machines/workloads/sap/automation-configure-extra-disks.md +++ b/articles/virtual-machines/workloads/sap/automation-configure-extra-disks.md @@ -1,10 +1,10 @@ --- -title: Add more disks to SAP deployment automation configuration -description: Configure more disks for your system in the SAP deployment automation framework on Azure. Add extra disks to a new system, or an existing system. +title: Custom disk configurations +description: Provide custom disk configurations for your system in the SAP deployment automation framework on Azure. Add extra disks to a new system, or an existing system. author: kimforss ms.author: kimforss ms.reviewer: kimforss -ms.date: 11/17/2021 +ms.date: 06/09/2022 ms.topic: conceptual ms.service: virtual-machines-sap --- @@ -36,6 +36,16 @@ The table below shows the default disk configuration for HANA systems. | M208ms_v2 | Standard_M208ms_v2 | P10 (128 GB) | 4 P40 (2048 GB) | 3 P15 (256 GB) | P30 (1024 GB) | P6 (64 GB) | 3 P40 (2048 GB) | | M416s_v2 | Standard_M416s_v2 | P10 (128 GB) | 4 P40 (2048 GB) | 3 P15 (256 GB) | P30 (1024 GB) | P6 (64 GB) | 3 P40 (2048 GB) | | M416ms_v2 | Standard_M416m_v2 | P10 (128 GB) | 4 P50 (4096 GB) | 3 P15 (256 GB) | P30 (1024 GB) | P6 (64 GB) | 4 P50 (4096 GB) | +| E20ds_v4 | Standard_E20ds_v4 | P6 (64 GB) | 3 P10 (128 GB) | 1 Ultra (80 GB) | P15 (256 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E20ds_v5 | Standard_E20ds_v5 | P6 (64 GB) | 3 P10 (128 GB) | 1 Ultra (80 GB) | P15 (256 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E32ds_v4 | Standard_E32ds_v4 | P6 (64 GB) | 3 P10 (128 GB) | 1 Ultra (128 GB) | P15 (256 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E32ds_v5 | Standard_E32ds_v5 | P6 (64 GB) | 3 P10 (128 GB) | 1 Ultra (128 GB) | P15 (256 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E48ds_v4 | Standard_E48ds_v4 | P6 (64 GB) | 3 P15 (256 GB) | 1 Ultra (192 GB) | P20 (512 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E48ds_v5 | Standard_E48ds_v4 | P6 (64 GB) | 3 P15 (256 GB) | 1 Ultra (192 GB) | P20 (512 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E64ds_v3 | Standard_E64ds_v3 | P6 (64 GB) | 3 P15 (256 GB) | 1 Ultra (220 GB) | P20 (512 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E64ds_v4 | Standard_E64ds_v4 | P6 (64 GB) | 3 P15 (256 GB) | 1 Ultra (256 GB) | P20 (512 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E64ds_v5 | Standard_E64ds_v5 | P6 (64 GB) | 3 P15 (256 GB) | 1 Ultra (256 GB) | P20 (512 GB) | P6 (64 GB) | 1 P15 (256 GB) | +| E96ds_v5 | Standard_E96ds_v4 | P6 (64 GB) | 3 P15 (256 GB) | 1 Ultra (256 GB) | P20 (512 GB) | P6 (64 GB) | 1 P15 (256 GB) | ### AnyDB databases @@ -59,11 +69,17 @@ The table below shows the default disk configuration for AnyDB systems. ## Custom sizing file -The disk sizing for an SAP system can be defined using a custom sizing file. +The disk sizing for an SAP system can be defined using a custom sizing json file. The file is grouped in four sections: "db", "app", "scs", and "web" and each section contains a list of disk configuration names, for example for the database tier "M32ts", "M64s", etc. -Create a file using the structure shown below and save the file in the same folder as the parameter file for the system, for instance 'XO1_db_sizes.json'. Then, define the parameter `db_disk_sizes_filename` in the parameter file for the database tier. For example, `db_disk_sizes_filename = "XO1_db_sizes.json"`. +These sections contain the information for which is the default Virtual machine size and the list of disk to be deployed for each tier. -The following sample code is an example configuration for the database tier. It defines three data disks (LUNs 0, 1, and 2), a log disk (LUN 9, using the Ultra SKU) and a backup disk (LUN 13, using the standard SSDN SKU). +Create a file using the structure shown below and save the file in the same folder as the parameter file for the system, for instance 'XO1_sizes.json'. Then, define the parameter `custom_disk_sizes_filename` in the parameter file. For example, `custom_disk_sizes_filename = "XO1_db_sizes.json"`. + +> [!TIP] +> The path to the disk configuration needs to be relative to the folder containing the tfvars file. + + +The following sample code is an example configuration file. It defines three data disks (LUNs 0, 1, and 2), a log disk (LUN 9, using the Ultra SKU) and a backup disk (LUN 13, using the standard SSDN SKU). The application tier servers (Application, Central Services amd Web Dispatchers) will be deployed with jus a single 'sap' data disk. ```json { @@ -111,6 +127,84 @@ The following sample code is an example configuration for the database tier. It "lun_start" : 13 } + ] + } + }, + "app" : { + "Default": { + "compute": { + "vm_size" : "Standard_D4s_v3" + }, + "storage": [ + { + "name" : "os", + "count" : 1, + "disk_type" : "Premium_LRS", + "size_gb" : 128, + "caching" : "ReadWrite" + }, + { + "name" : "sap", + "count" : 1, + "disk_type" : "Premium_LRS", + "size_gb" : 128, + "caching" : "ReadWrite", + "write_accelerator" : false, + "lun_start" : 0 + } + + ] + } + }, + "scs" : { + "Default": { + "compute": { + "vm_size" : "Standard_D4s_v3" + }, + "storage": [ + { + "name" : "os", + "count" : 1, + "disk_type" : "Premium_LRS", + "size_gb" : 128, + "caching" : "ReadWrite" + }, + { + "name" : "sap", + "count" : 1, + "disk_type" : "Premium_LRS", + "size_gb" : 128, + "caching" : "ReadWrite", + "write_accelerator" : false, + "lun_start" : 0 + } + + ] + } + }, + "web" : { + "Default": { + "compute": { + "vm_size" : "Standard_D4s_v3" + }, + "storage": [ + { + "name" : "os", + "count" : 1, + "disk_type" : "Premium_LRS", + "size_gb" : 128, + "caching" : "ReadWrite" + }, + { + "name" : "sap", + "count" : 1, + "disk_type" : "Premium_LRS", + "size_gb" : 128, + "caching" : "ReadWrite", + "write_accelerator" : false, + "lun_start" : 0 + } + ] } } diff --git a/articles/virtual-machines/workloads/sap/automation-configure-system.md b/articles/virtual-machines/workloads/sap/automation-configure-system.md index 947bd138d51f2..65590ecd1cd04 100644 --- a/articles/virtual-machines/workloads/sap/automation-configure-system.md +++ b/articles/virtual-machines/workloads/sap/automation-configure-system.md @@ -11,11 +11,11 @@ ms.service: virtual-machines-sap # Configure SAP system parameters -Configuration for the [SAP deployment automation framework on Azure](automation-deployment-framework.md)] happens through parameters files. You provide information about your SAP system properties in a tfvars file, which the automation framework uses for deployment. +Configuration for the [SAP deployment automation framework on Azure](automation-deployment-framework.md)] happens through parameters files. You provide information about your SAP system properties in a tfvars file, which the automation framework uses for deployment. You can find examples of the variable file in the 'samples/WORKSPACES/SYSTEM' folder. -The automation supports both creating resources (greenfield deployment) or using existing resources (brownfield deployment). +The automation supports both creating resources (green field deployment) or using existing resources (brownfield deployment). -For the greenfield scenario, the automation defines default names for resources, however some resource names may be defined in the tfvars file. +For the green field scenario, the automation defines default names for resources, however some resource names may be defined in the tfvars file. For the brownfield scenario, the Azure resource identifiers for the resources must be specified. @@ -29,7 +29,8 @@ The automation framework can be used to deploy the following SAP architectures: ### Standalone -In the Standalone architecture all the SAP roles are installed on a single server. +In the Standalone architecture, all the SAP roles are installed on a single server. + To configure this topology, define the database tier values and set `enable_app_tier_deployment` to false. @@ -40,25 +41,24 @@ To configure this topology, define the database tier values and define `scs_serv ### High Availability -The Distributed (Highly Available) deployment is similar to the Distributed architecture but either the database or SAP Central Services are both highly available using two virtual machines each with Pacemaker clusters. +The Distributed (Highly Available) deployment is similar to the Distributed architecture. In this deployment, the database and/or SAP Central Services can both be configured using a highly available configuration using two virtual machines each with Pacemaker clusters. To configure this topology, define the database tier values and set `database_high_availability` to true. Set `scs_server_count = 1` and `scs_high_availability` = true and `application_server_count` >= 1 ## Environment parameters -The table below contains the parameters that define the environment settings and the resource naming. +The table below contains the parameters that define the environment settings. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ----------------------- | -------------------------------------------------------- | ---------- | -> | `environment` | A five-character identifier for the workload zone. For example, `PROD` for a production environment and `NP` for a non-production environment. | Mandatory | -> | `location` | The Azure region in which to deploy. | Required | -> | `custom_prefix` | Specifies the custom prefix used in the resource naming | Optional | -> | `use_prefix` | Controls if the resource naming includes the prefix, DEV-WEEU-SAP01-X00_xxxx | Optional | -> | 'name_override_file' | Name override file | Optional | - +> | Variable | Description | Type | Notes | +> | ----------------------- | -------------------------------------------------------- | ---------- | ------------------------------------------------------------------------------------------- | +> | `environment` | Identifier for the workload zone (max 5 chars) | Mandatory | For example, `PROD` for a production environment and `NP` for a non-production environment. | +> | `location` | The Azure region in which to deploy. | Required | | +> | `custom_prefix` | Specifies the custom prefix used in the resource naming | Optional | | +> | `use_prefix` | Controls if the resource naming includes the prefix | Optional | DEV-WEEU-SAP01-X00_xxxx | +> | 'name_override_file' | Name override file | Optional | see [Custom naming](automation-naming-module.md) | ## Resource group parameters @@ -70,7 +70,18 @@ The table below contains the parameters that define the resource group. > | ----------------------- | -------------------------------------------------------- | ---------- | > | `resource_group_name` | Name of the resource group to be created | Optional | > | `resource_group_arm_id` | Azure resource identifier for an existing resource group | Optional | +> | `resource_group_tags` | Tags to be associated to the resource group | Optional | + + +## SAP Virtual Hostname parameters +In the SAP deployment automation framework, the SAP virtual hostname is defined by specifying the `use_secondary_ips` parameter. + + +> [!div class="mx-tdCol2BreakAll "] +> | Variable | Description | Type | +> | ----------------------- | -------------------------------------------------------- | ---------- | +> | `use_secondary_ips` | Boolean flag indicating if SAP should be installed using Virtual hostnames | Optional | ### Database tier parameters @@ -86,24 +97,25 @@ The database tier defines the infrastructure for the database tier, supported da > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | -------------------------------- | -----------------------------------------------------------------------------------------| ----------- | ------------------ | -> | `database_sid` | Defines the database SID | Required | | -> | `database_platform` | Defines the database backend | Required | | -> | `database_high_availability` | Defines if the database tier is deployed highly available | Optional | See [High availability configuration](automation-configure-system.md#high-availability-configuration) | -> | `database_server_count` | Defines the number of database servers | Optional | Default value is 1 | -> | `database_vm_zones` | Defines the Availability Zones | Optional | | -> | `database_size` | Defines the database sizing information | Required | See [Custom Sizing](automation-configure-extra-disks.md) | -> | `db_disk_sizes_filename` | Defines the custom database sizing | Optional | See [Custom Sizing](automation-configure-extra-disks.md) | -> | `database_vm_use_DHCP` | Controls if Azure subnet provided IP addresses should be used (dynamic) true | Optional | | -> | `database_vm_db_nic_ips` | Defines the static IP addresses for the database servers (database subnet) | Optional | | -> | `database_vm_admin_nic_ips` | Defines the static IP addresses for the database servers (admin subnet) | Optional | | -> | `database_vm_image` | Defines the Virtual machine image to use, see below | Optional | | -> | `database_vm_authentication_type` | Defines the authentication type for the database virtual machines (key/password) | Optional | | -> | `database_no_avset` | Controls if the database virtual machines are deployed without availability sets | Optional | default is false | -> | `database_no_ppg` | Controls if the database servers will not be placed in a proximity placement group | Optional | default is false | -> | `database_vm_avset_arm_ids` | Defines the existing availability sets Azure resource IDs | Optional | Primarily used together with ANF pinning| -> | `hana_dual_nics` | Controls if the HANA database servers will have dual network interfaces | Optional | default is true | +> | Variable | Description | Type | Notes | +> | ---------------------------------- | ----------------------------------------------------------------------------------- | ----------- | ------------------ | +> | `database_sid` | Defines the database SID. | Required | | +> | `database_platform` | Defines the database backend. | Supported values are `HANA`, `DB2`, `ORACLE`, `ASE`, `SQLSERVER`, `NONE` | +> | `database_high_availability` | Defines if the database tier is deployed highly available. | Optional | See [High availability configuration](automation-configure-system.md#high-availability-configuration) | +> | `database_server_count` | Defines the number of database servers. | Optional | Default value is 1 | +> | `database_vm_zones` | Defines the Availability Zones for the database servers. | Optional | | +> | `db_sizing_dictionary_key` | Defines the database sizing information. | Required | See [Custom Sizing](automation-configure-extra-disks.md) | +> | `db_disk_sizes_filename` | Defines the custom database sizing file name. | Optional | See [Custom Sizing](automation-configure-extra-disks.md) | +> | `database_vm_use_DHCP` | Controls if Azure subnet provided IP addresses should be used. | Optional | | +> | `database_vm_db_nic_ips` | Defines the IP addresses for the database servers (database subnet). | Optional | | +> | `database_vm_db_nic_secondary_ips` | Defines the secondary IP addresses for the database servers (database subnet). | Optional | | +> | `database_vm_admin_nic_ips` | Defines the IP addresses for the database servers (admin subnet). | Optional | | +> | `database_vm_image` | Defines the Virtual machine image to use, see below. | Optional | | +> | `database_vm_authentication_type` | Defines the authentication type (key/password). | Optional | | +> | `database_no_avset` | Controls if the database virtual machines are deployed without availability sets. | Optional | default is false | +> | `database_no_ppg` | Controls if the database servers will not be placed in a proximity placement group. | Optional | default is false | +> | `database_vm_avset_arm_ids` | Defines the existing availability sets Azure resource IDs. | Optional | Primarily used together with ANF pinning| +> | `hana_dual_nics` | Controls if the HANA database servers will have dual network interfaces. | Optional | default is true | The Virtual Machine and the operating system image is defined using the following structure: @@ -114,7 +126,7 @@ The Virtual Machine and the operating system image is defined using the followin publisher="SUSE" offer="sles-sap-15-sp3" sku="gen2" - version="8.2.2021040902" + version="latest" } ``` @@ -128,7 +140,7 @@ The application tier defines the infrastructure for the application tier, which > | ---------------------------------- | --------------------------------------------------------------------------- | -----------| ------ | > | `enable_app_tier_deployment` | Defines if the application tier is deployed | Optional | | > | `sid` | Defines the SAP application SID | Required | | -> | `app_tier_vm_sizing` | Lookup value defining the VM SKU and the disk layout for tha application tier servers | Optional | +> | `app_tier_sizing_dictionary_key` | Lookup value defining the VM SKU and the disk layout for tha application tier servers | Optional | > | `app_disk_sizes_filename` | Defines the custom disk size file for the application tier servers | Optional | See [Custom Sizing](automation-configure-extra-disks.md) | > | `app_tier_authentication_type` | Defines the authentication type for the application tier virtual machine(s) | Optional | | > | `app_tier_use_DHCP` | Controls if Azure subnet provided IP addresses should be used (dynamic) | Optional | | @@ -140,58 +152,61 @@ The application tier defines the infrastructure for the application tier, which > [!div class="mx-tdCol2BreakAll "] > | Variable | Description | Type | Notes | > | -------------------------------------- | -------------------------------------------------------------------- | ----------| ------ | -> | `scs_server_count` | Defines the number of scs servers | Required | | -> | `scs_high_availability` | Defines if the Central Services is highly available | Optional | See [High availability configuration](automation-configure-system.md#high-availability-configuration) | -> | `scs_instance_number` | The instance number of SCS | Optional | | -> | `ers_instance_number` | The instance number of ERS | Optional | | -> | `scs_server_sku` | Defines the Virtual machine SKU to use | Optional | | -> | `scs_server_image` | Defines the Virtual machine image to use | Required | | -> | `scs_server_zones` | Defines the availability zones to which the scs servers are deployed | Optional | | -> | `scs_server_app_nic_ips` | List of IP addresses for the scs server (app subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `scs_server_app_admin_nic_ips` | List of IP addresses for the scs server (admin subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `scs_server_loadbalancer_ips` | List of IP addresses for the scs load balancer (app subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `scs_server_no_ppg` | Controls scs server proximity placement group | Optional | | -> | `scs_server_no_avset` | Controls scs server availability set placement | Optional | | -> | `scs_server_tags` | Defines a list of tags to be applied to the scs servers | Optional | | +> | `scs_server_count` | Defines the number of SCS servers. | Required | | +> | `scs_high_availability` | Defines if the Central Services is highly available. | Optional | See [High availability configuration](automation-configure-system.md#high-availability-configuration) | +> | `scs_instance_number` | The instance number of SCS. | Optional | | +> | `ers_instance_number` | The instance number of ERS. | Optional | | +> | `scs_server_sku` | Defines the Virtual machine SKU to use. | Optional | | +> | `scs_server_image` | Defines the Virtual machine image to use. | Required | | +> | `scs_server_zones` | Defines the availability zones of the SCS servers. | Optional | | +> | `scs_server_app_nic_ips` | List of IP addresses for the SCS servers (app subnet). | Optional | | +> | `scs_server_app_nic_secondary_ips[]` | List of secondary IP addresses for the SCS servers (app subnet). | Optional | | +> | `scs_server_app_admin_nic_ips` | List of IP addresses for the SCS servers (admin subnet). | Optional | | +> | `scs_server_loadbalancer_ips` | List of IP addresses for the scs load balancer (app subnet). | Optional | | +> | `scs_server_no_ppg` | Controls SCS server proximity placement group. | Optional | | +> | `scs_server_no_avset` | Controls SCS server availability set placement. | Optional | | +> | `scs_server_tags` | Defines a list of tags to be applied to the SCS servers. | Optional | | ### Application server parameters > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | -------------------------------------- | ---------------------------------------------------------------------------- | -----------| ------ | -> | `application_server_count` | Defines the number of application servers | Required | | -> | `application_server_sku` | Defines the Virtual machine SKU to use | Optional | | -> | `application_server_image` | Defines the Virtual machine image to use | Required | | -> | `application_server_zones` | Defines the availability zones to which the application servers are deployed | Optional | | -> | `application_server_app_nic_ips[]` | List of IP addresses for the application server (app subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `application_server_app_admin_nic_ips` | List of IP addresses for the application server (admin subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `application_server_no_ppg` | Controls application server proximity placement group | Optional | | -> | `application_server_no_avset` | Controls application server availability set placement | Optional | | -> | `application_server_tags` | Defines a list of tags to be applied to the application servers | Optional | | +> | Variable | Description | Type | Notes | +> | ----------------------------------------- | ---------------------------------------------------------------------------- | -----------| ------ | +> | `application_server_count` | Defines the number of application servers. | Required | | +> | `application_server_sku` | Defines the Virtual machine SKU to use. | Optional | | +> | `application_server_image` | Defines the Virtual machine image to use. | Required | | +> | `application_server_zones` | Defines the availability zones to which the application servers are deployed. | Optional | | +> | `application_server_app_nic_ips[]` | List of IP addresses for the application servers (app subnet). | Optional | | +> | `application_server_nic_secondary_ips[]` | List of secondary IP addresses for the application servers (app subnet). | Optional | | +> | `application_server_app_admin_nic_ips` | List of IP addresses for the application server (admin subnet). | Optional | | +> | `application_server_no_ppg` | Controls application server proximity placement group. | Optional | | +> | `application_server_no_avset` | Controls application server availability set placement. | Optional | | +> | `application_server_tags` | Defines a list of tags to be applied to the application servers. | Optional | | ### Web dispatcher parameters > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | --------------------------------------- | ------------------------------------------------------------------------ | --------- | ------ | -> | `webdispatcher_server_count` | Defines the number of web dispatcher servers | Required | | -> | `webdispatcher_server_sku` | Defines the Virtual machine SKU to use | Optional | | -> | `webdispatcher_server_image` | Defines the Virtual machine image to use | Optional | | -> | `webdispatcher_server_zones` | Defines the availability zones to which the web dispatchers are deployed | Optional | | -> | `webdispatcher_server_app_nic_ips[]` | List of IP addresses for the web dispatcher server (app subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `webdispatcher_server_app_admin_nic_ips`| List of IP addresses for the web dispatcher server (admin subnet) | Optional | Ignored if `app_tier_use_DHCP` is used | -> | `webdispatcher_server_no_ppg` | Controls web proximity placement group placement | Optional | | -> | `webdispatcher_server_no_avset` | Defines web dispatcher availability set placement | Optional | | -> | `webdispatcher_server_tags` | Defines a list of tags to be applied to the web dispatcher servers | Optional | | +> | Variable | Description | Type | Notes | +> | ------------------------------------------ | ------------------------------------------------------------------------------ | --------- | ------ | +> | `webdispatcher_server_count` | Defines the number of web dispatcher servers. | Required | | +> | `webdispatcher_server_sku` | Defines the Virtual machine SKU to use. | Optional | | +> | `webdispatcher_server_image` | Defines the Virtual machine image to use. | Optional | | +> | `webdispatcher_server_zones` | Defines the availability zones to which the web dispatchers are deployed. | Optional | | +> | `webdispatcher_server_app_nic_ips[]` | List of IP addresses for the web dispatcher server (app/web subnet). | Optional | | +> | `webdispatcher_server_nic_secondary_ips[]` | List of secondary IP addresses for the web dispatcher server (app/web subnet). | Optional | | +> | `webdispatcher_server_app_admin_nic_ips` | List of IP addresses for the web dispatcher server (admin subnet). | Optional | | +> | `webdispatcher_server_no_ppg` | Controls web proximity placement group placement. | Optional | | +> | `webdispatcher_server_no_avset` | Defines web dispatcher availability set placement. | Optional | | +> | `webdispatcher_server_tags` | Defines a list of tags to be applied to the web dispatcher servers. | Optional | | ## Network parameters -If the subnets are not deployed using the workload zone deployment, they can be added in the system's tfvars file. +If the subnets aren't deployed using the workload zone deployment, they can be added in the system's tfvars file. -The automation framework can either deploy the virtual network and the subnets for new environment deployments (greenfield) or using an existing virtual network and existing subnets for existing environment deployments (brownfield). - - For the greenfield scenario, the virtual network address space and the subnet address prefixes must be specified +The automation framework can either deploy the virtual network and the subnets (green field deployment) or using an existing virtual network and existing subnets (brown field deployments). + - For the green field scenario, the virtual network address space and the subnet address prefixes must be specified - For the brownfield scenario, the Azure resource identifier for the virtual network and the subnets must be specified Ensure that the virtual network address space is large enough to host all the resources. @@ -200,32 +215,49 @@ The table below contains the networking parameters. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | -------------------------------- | -------------------------------------------------------------------- | --------- | ------ | -> | `network_logical_name` | The logical name of the network. | Required | | -> | `network_address_space` | The address range for the virtual network. | Mandatory | For new environment deployments | -> | `admin_subnet_name` | The name of the 'admin' subnet. | Optional | | -> | `admin_subnet_address_prefix` | The address range for the 'admin' subnet. | Mandatory | For new environment deployments | -> | `admin_subnet_arm_id` | The Azure resource identifier for the 'admin' subnet. | Mandatory | For existing environment deployments | -> | `admin_subnet_nsg_name` | The name of the 'admin' Network Security Group name. | Optional | | -> | `admin_subnet_nsg_arm_id` | The Azure resource identifier for the 'admin' Network Security Group | Mandatory | For existing environment deployments | -> | `db_subnet_name` | The name of the 'db' subnet. | Optional | | -> | `db_subnet_address_prefix` | The address range for the 'db' subnet. | Mandatory | For new environment deployments | -> | `db_subnet_arm_id` | The Azure resource identifier for the 'db' subnet. | Mandatory | For existing environment deployments | -> | `db_subnet_nsg_name` | The name of the 'db' Network Security Group name. | Optional | | -> | `db_subnet_nsg_arm_id` | The Azure resource identifier for the 'db' Network Security Group. | Mandatory | For existing environment deployments | -> | `app_subnet_name` | The name of the 'app' subnet. | Optional | | -> | `app_subnet_address_prefix` | The address range for the 'app' subnet. | Mandatory | For new environment deployments | -> | `app_subnet_arm_id` | The Azure resource identifier for the 'app' subnet. | Mandatory | For existing environment deployments | -> | `app_subnet_nsg_name` | The name of the 'app' Network Security Group name. | Optional | | -> | `app_subnet_nsg_arm_id` | The Azure resource identifier for the 'app' Network Security Group. | Mandatory | For existing environment deployments | -> | `web_subnet_name` | The name of the 'web' subnet. | Optional | | -> | `web_subnet_address_prefix` | The address range for the 'web' subnet. | Mandatory | For new environment deployments | -> | `web_subnet_arm_id` | The Azure resource identifier for the 'web' subnet. | Mandatory | For existing environment deployments | -> | `web_subnet_nsg_name` | The name of the 'web' Network Security Group name. | Optional | | -> | `web_subnet_nsg_arm_id` | The Azure resource identifier for the 'web' Network Security Group. | Mandatory | For existing environment deployments | - -\* = Required for existing environment deployments +> | Variable | Description | Type | Notes | +> | -------------------------------- | -------------------------------------------------------------------- | --------- | ---------------------------- | +> | `network_logical_name` | The logical name of the network. | Required | | +> | | | Optional | | +> | `admin_subnet_name` | The name of the 'admin' subnet. | Optional | | +> | `admin_subnet_address_prefix` | The address range for the 'admin' subnet. | Mandatory | For green field deployments. | +> | `admin_subnet_arm_id` * | The Azure resource identifier for the 'admin' subnet. | Mandatory | For brown field deployments. | +> | `admin_subnet_nsg_name` | The name of the 'admin' Network Security Group name. | Optional | | +> | `admin_subnet_nsg_arm_id` * | The Azure resource identifier for the 'admin' Network Security Group | Mandatory | For brown field deployments. | +> | | | Optional | | +> | `db_subnet_name` | The name of the 'db' subnet. | Optional | | +> | `db_subnet_address_prefix` | The address range for the 'db' subnet. | Mandatory | For green field deployments. | +> | `db_subnet_arm_id` * | The Azure resource identifier for the 'db' subnet. | Mandatory | For brown field deployments. | +> | `db_subnet_nsg_name` | The name of the 'db' Network Security Group name. | Optional | | +> | `db_subnet_nsg_arm_id` * | The Azure resource identifier for the 'db' Network Security Group. | Mandatory | For brown field deployments. | +> | | | Optional | | +> | `app_subnet_name` | The name of the 'app' subnet. | Optional | | +> | `app_subnet_address_prefix` | The address range for the 'app' subnet. | Mandatory | For green field deployments. | +> | `app_subnet_arm_id` * | The Azure resource identifier for the 'app' subnet. | Mandatory | For brown field deployments. | +> | `app_subnet_nsg_name` | The name of the 'app' Network Security Group name. | Optional | | +> | `app_subnet_nsg_arm_id` * | The Azure resource identifier for the 'app' Network Security Group. | Mandatory | For brown field deployments. | +> | | | Optional | | +> | `web_subnet_name` | The name of the 'web' subnet. | Optional | | +> | `web_subnet_address_prefix` | The address range for the 'web' subnet. | Mandatory | For green field deployments. | +> | `web_subnet_arm_id` * | The Azure resource identifier for the 'web' subnet. | Mandatory | For brown field deployments. | +> | `web_subnet_nsg_name` | The name of the 'web' Network Security Group name. | Optional | | +> | `web_subnet_nsg_arm_id` * | The Azure resource identifier for the 'web' Network Security Group. | Mandatory | For brown field deployments. | + +\* = Required For brown field deployments. + +## Key Vault Parameters + +If you don't want to use the workload zone key vault but another one, this can be added in the system's tfvars file. + +The table below defines the parameters used for defining the Key Vault information. + +> [!div class="mx-tdCol2BreakAll "] +> | Variable | Description | Type | Notes | +> | ----------------------------------- | ------------------------------------------------------------------------------ | ------------ | ----------------------------------- | +> | `user_keyvault_id` | Azure resource identifier for existing system credentials key vault | Optional | | +> | `spn_keyvault_id` | Azure resource identifier for existing deployment credentials (SPNs) key vault | Optional | | +> | `enable_purge_control_for_keyvaults | Disables the purge protection for Azure key vaults. | Optional | Only use this for test environments | + ### Anchor virtual machine parameters @@ -247,12 +279,12 @@ The table below contains the parameters related to the anchor virtual machine. The Virtual Machine and the operating system image is defined using the following structure: ```python { -os_type="" -source_image_id="" -publisher="Canonical" -offer="0001-com-ubuntu-server-focal" -sku="20_04-lts" -version="latest" + os_type="linux" + source_image_id="" + publisher="SUSE" + offer="sles-sap-15-sp3" + sku="gen2" + version="latest" } ``` @@ -263,7 +295,7 @@ By default the SAP System deployment uses the credentials from the SAP Workload > [!div class="mx-tdCol2BreakAll "] > | Variable | Description | Type | > | ---------------------------------- | -------------------------------------| ----------- | -> | `automation_username` | Administrator account name | Optional | +> | `automation_username` | Administrator account name | Optional | > | `automation_password` | Administrator password | Optional | > | `automation_path_to_public_key` | Path to existing public key | Optional | > | `automation_path_to_private_key` | Path to existing private key | Optional | @@ -273,13 +305,15 @@ By default the SAP System deployment uses the credentials from the SAP Workload > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ---------------------------------------------- | ------------------------------------- | ----------- | +> | Variable | Description | Type | +> | ---------------------------------------------- | ------------------------------------------------------------------------------- | ----------- | > | `resource_offset` | Provides and offset for resource naming. The offset number for resource naming when creating multiple resources. The default value is 0, which creates a naming pattern of disk0, disk1, and so on. An offset of 1 creates a naming pattern of disk1, disk2, and so on. | Optional | > | `disk_encryption_set_id` | The disk encryption key to use for encrypting managed disks using customer provided keys | Optional | > | `use_loadbalancers_for_standalone_deployments` | Controls if load balancers are deployed for standalone installations | Optional | > | `license_type` | Specifies the license type for the virtual machines. | Possible values are `RHEL_BYOS` and `SLES_BYOS`. For Windows the possible values are `None`, `Windows_Client` and `Windows_Server`. | > | `use_zonal_markers` | Specifies if zonal Virtual Machines will include a zonal identifier. 'xooscs_z1_00l###' vs 'xooscs00l###'| Default value is true. | +> | `proximityplacementgroup_names` | Specifies the names of the proximity placement groups | | +> | `proximityplacementgroup_arm_ids` | Specifies the Azure resource identifiers of existing proximity placement groups| | ## NFS support @@ -301,21 +335,39 @@ By default the SAP System deployment uses the credentials from the SAP Workload ### Azure NetApp Files Support > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | ---------------------------------- | -----------------------------------------------------------------------| ----------- | ------ | -> | `ANF_use_for_HANA_data` | Create Azure NetApp Files volume for HANA data | Optional | | -> | `ANF_use_existing_data_volume` | Use existing Azure NetApp Files volume for HANA data | Optional | Use for pre-created volumes | -> | `ANF_data_volume_name` | Azure NetApp Files volume name for HANA data | Optional | | -> | `ANF_HANA_data_volume_size` | Azure NetApp Files volume size in GB for HANA data | Optional | default size 256 | -> | `ANF_use_for_HANA_log` | Create Azure NetApp Files volume for HANA data | Optional | | -> | `ANF_use_existing_log_volume` | Use existing Azure NetApp Files volume for HANA data | Optional | Use for pre-created volumes | -> | `ANF_log_volume_name` | Azure NetApp Files volume name for HANA data | Optional | | -> | `ANF_HANA_log_volume_size` | Azure NetApp Files volume size in GB for HANA data | Optional | default size 128 | +> | Variable | Description | Type | Notes | +> | ---------------------------------- | -----------------------------------------------------------------------| ----------- | --------------------------- | +> | `ANF_use_for_HANA_data` | Create Azure NetApp Files volume for HANA data. | Optional | | +> | `ANF_use_existing_data_volume` | Use existing Azure NetApp Files volume for HANA data. | Optional | Use for pre-created volumes | +> | `ANF_data_volume_name` | Azure NetApp Files volume name for HANA data. | Optional | | +> | `ANF_HANA_data_volume_size` | Azure NetApp Files volume size in GB for HANA data. | Optional | default size 256 | +> | | | | | +> | `ANF_use_for_HANA_log` | Create Azure NetApp Files volume for HANA log. | Optional | | +> | `ANF_use_existing_log_volume` | Use existing Azure NetApp Files volume for HANA log. | Optional | Use for pre-created volumes | +> | `ANF_log_volume_name` | Azure NetApp Files volume name for HANA log. | Optional | | +> | `ANF_HANA_log_volume_size` | Azure NetApp Files volume size in GB for HANA log. | Optional | default size 128 | +> | | | | | +> | `ANF_use_for_HANA_shared` | Create Azure NetApp Files volume for HANA shared. | Optional | | +> | `ANF_use_existing_shared_volume` | Use existing Azure NetApp Files volume for HANA shared. | Optional | Use for pre-created volumes | +> | `ANF_shared_volume_name` | Azure NetApp Files volume name for HANA shared. | Optional | | +> | `ANF_HANA_shared_volume_size` | Azure NetApp Files volume size in GB for HANA shared. | Optional | default size 128 | +> | | | | | +> | `ANF_use_for_sapmnt` | Create Azure NetApp Files volume for sapmnt. | Optional | | +> | `ANF_use_existing_sapmnt_volume` | Use existing Azure NetApp Files volume for sapmnt. | Optional | Use for pre-created volumes | +> | `ANF_sapmnt_volume_name` | Azure NetApp Files volume name for sapmnt. | Optional | | +> | `ANF_sapmnt_volume_size` | Azure NetApp Files volume size in GB for sapmnt. | Optional | default size 128 | +> | | | | | +> | `ANF_use_for_usrsap` | Create Azure NetApp Files volume for usrsap. | Optional | | +> | `ANF_use_existing_usrsap_volume` | Use existing Azure NetApp Files volume for usrsap. | Optional | Use for pre-created volumes | +> | `ANF_usrsap_volume_name` | Azure NetApp Files volume name for usrsap. | Optional | | +> | `ANF_usrsap_volume_size` | Azure NetApp Files volume size in GB for usrsap. | Optional | default size 128 | ## Oracle parameters -When deploying Oracle based systems these parameters need to be updated in the sap-parameters.yaml file. +> [!NOTE] +> These parameters need to be updated in the sap-parameters.yaml file when deploying Oracle based systems. + > [!div class="mx-tdCol2BreakAll "] > | Variable | Description | Type | Notes | @@ -370,5 +422,5 @@ az keyvault secret set --name "-fencing-spn-tenant" --vault-name " [!div class="nextstepaction"] -> [Deploy SAP System](automation-deploy-system.md) +> [Deploy SAP system](automation-deploy-system.md) diff --git a/articles/virtual-machines/workloads/sap/automation-configure-workload-zone.md b/articles/virtual-machines/workloads/sap/automation-configure-workload-zone.md index afacd338cfffe..7f7da32b77b90 100644 --- a/articles/virtual-machines/workloads/sap/automation-configure-workload-zone.md +++ b/articles/virtual-machines/workloads/sap/automation-configure-workload-zone.md @@ -15,35 +15,38 @@ An [SAP application](automation-deployment-framework.md#sap-concepts) typically ## Workload zone deployment configuration -The configuration of the SAP workload zone is done via a Terraform tfvars variable file. +The configuration of the SAP workload zone is done via a Terraform tfvars variable file. You can find examples of the variable file in the 'samples/WORKSPACES/LANDSCAPE' folder. -## Terraform Parameters +The sections below show the different sections of the variable file. -The table below contains the Terraform parameters, these parameters need to be entered manually if not using the deployment scripts. +## Environment parameters +The table below contains the parameters that define the environment settings. + + +> [!div class="mx-tdCol2BreakAll "] +> | Variable | Description | Type | Notes | +> | ----------------------- | -------------------------------------------------------- | ---------- | ------------------------------------------------------------------------------------------- | +> | `environment` | Identifier for the workload zone (max 5 chars) | Mandatory | For example, `PROD` for a production environment and `NP` for a non-production environment. | +> | `location` | The Azure region in which to deploy. | Required | | +> | 'name_override_file' | Name override file | Optional | see [Custom naming](automation-naming-module.md) | -| Variable | Type | Description | -| ----------------------- | ---------- | ------------------------------------- | -| `tfstate_resource_id` | Required * | Azure resource identifier for the Storage account in the SAP Library that will contain the Terraform state files | -| `deployer_tfstate_key` | Required * | The name of the state file for the Deployer | -## Generic Parameters +## Resource group parameters -The table below contains the parameters that define the resource group and the resource naming. +The table below contains the parameters that define the resource group. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ----------------------- | ------------------------------------- | ----- | -> | `environment` | A five-character identifier for the workload zone. For example, `PROD` for a production environment and `NP` for a non-production environment.| Required | -> | `location` | The Azure region in which to deploy. | Required | -> | `resource_group_name` | Name of the resource group to be created | Optional | -> | `resource_group_arm_id` | Azure resource identifier for an existing resource group | Optional | +> | Variable | Description | Type | +> | ----------------------- | -------------------------------------------------------- | ---------- | +> | `resource_group_name` | Name of the resource group to be created | Optional | +> | `resource_group_arm_id` | Azure resource identifier for an existing resource group | Optional | ## Network Parameters -The automation framework supports both creating the virtual network and the subnets for new environment deployments (Green field) or using an existing virtual network and existing subnets for existing environment deployments (Brown field) or a combination of for new environment deployments and for existing environment deployments. +The automation framework supports both creating the virtual network and the subnets For green field deployments. (Green field) or using an existing virtual network and existing subnets For brown field deployments. (Brown field) or a combination of For green field deployments. and For brown field deployments. - For the green field scenario, the virtual network address space and the subnet address prefixes must be specified - For the brown field scenario, the Azure resource identifier for the virtual network and the subnets must be specified @@ -52,90 +55,96 @@ Ensure that the virtual network address space is large enough to host all the re The table below contains the networking parameters. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | -------------------------------- | -------------------------------------------------------------------- | --------- | ------ | -> | `network_name` | The logical name of the network | Required | | -> | `network_arm_id` | The Azure resource identifier for the virtual network | Optional | For existing environment deployments | -> | `network_address_space` | The address range for the virtual network | Mandatory | For new environment deployments | -> | `admin_subnet_name` | The name of the `admin` subnet | Optional | | -> | `admin_subnet_address_prefix` | The address range for the `admin` subnet | Mandatory | For new environment deployments | -> | `admin_subnet_arm_id` | The Azure resource identifier for the `admin` subnet | Mandatory | For existing environment deployments | -> | `admin_subnet_nsg_name` | The name of the `admin`Network Security Group name | Optional | | -> | `admin_subnet_nsg_arm_id` | The Azure resource identifier for the `admin` Network Security Group | Mandatory | For existing environment deployments | -> | `db_subnet_name` | The name of the `db` subnet | Optional | | -> | `db_subnet_address_prefix` | The address range for the `db` subnet | Mandatory | For new environment deployments | -> | `db_subnet_arm_id` | The Azure resource identifier for the `db` subnet | Mandatory | For existing environment deployments | -> | `db_subnet_nsg_name` | The name of the `db` Network Security Group name | Optional | | -> | `db_subnet_nsg_arm_id` | The Azure resource identifier for the `db` Network Security Group | Mandatory | For existing environment deployments | -> | `app_subnet_name` | The name of the `app` subnet | Optional | | -> | `app_subnet_address_prefix` | The address range for the `app` subnet | Mandatory | For new environment deployments | -> | `app_subnet_arm_id` | The Azure resource identifier for the `app` subnet | Mandatory | For existing environment deployments | -> | `app_subnet_nsg_name` | The name of the `app` Network Security Group name | Optional | | -> | `app_subnet_nsg_arm_id` | The Azure resource identifier for the `app` Network Security Group | Mandatory | For existing environment deployments | -> | `web_subnet_name` | The name of the `web` subnet | Optional | | -> | `web_subnet_address_prefix` | The address range for the `web` subnet | Mandatory | For new environment deployments | -> | `web_subnet_arm_id` | The Azure resource identifier for the `web` subnet | Mandatory | For existing environment deployments | -> | `web_subnet_nsg_name` | The name of the `web` Network Security Group name | Optional | | -> | `web_subnet_nsg_arm_id` | The Azure resource identifier for the `web` Network Security Group | Mandatory | For existing environment deployments | - -## ISCSI Parameters +> | Variable | Description | Type | Notes | +> | -------------------------------- | --------------------------------------------------------------------- | --------- | ---------------------------- | +> | `network_name` | The name of the network. | Optional | | +> | `network_logical_name` | The logical name of the network, for eaxmple 'SAP01' | Required | Used for resource naming. | +> | `network_arm_id` | The Azure resource identifier for the virtual network. | Optional | For brown field deployments. | +> | `network_address_space` | The address range for the virtual network. | Mandatory | For green field deployments. | +> | | | | | +> | `admin_subnet_name` | The name of the `admin` subnet. | Optional | | +> | `admin_subnet_address_prefix` | The address range for the `admin` subnet. | Mandatory | For green field deployments. | +> | `admin_subnet_arm_id` | The Azure resource identifier for the `admin` subnet. | Mandatory | For brown field deployments. | +> | | | | | +> | `admin_subnet_nsg_name` | The name of the `admin`Network Security Group name. | Optional | | +> | `admin_subnet_nsg_arm_id` | The Azure resource identifier for the `admin` Network Security Group. | Mandatory | For brown field deployments. | +> | | | | | +> | `db_subnet_name` | The name of the `db` subnet. | Optional | | +> | `db_subnet_address_prefix` | The address range for the `db` subnet. | Mandatory | For green field deployments. | +> | `db_subnet_arm_id` | The Azure resource identifier for the `db` subnet. | Mandatory | For brown field deployments. | +> | | | | | +> | `db_subnet_nsg_name` | The name of the `db` Network Security Group name. | Optional | | +> | `db_subnet_nsg_arm_id` | The Azure resource identifier for the `db` Network Security Group | Mandatory | For brown field deployments. | +> | | | | | +> | `app_subnet_name` | The name of the `app` subnet. | Optional | | +> | `app_subnet_address_prefix` | The address range for the `app` subnet. | Mandatory | For green field deployments. | +> | `app_subnet_arm_id` | The Azure resource identifier for the `app` subnet. | Mandatory | For brown field deployments. | +> | | | | | +> | `app_subnet_nsg_name` | The name of the `app` Network Security Group name. | Optional | | +> | `app_subnet_nsg_arm_id` | The Azure resource identifier for the `app` Network Security Group. | Mandatory | For brown field deployments. | +> | | | | | +> | `web_subnet_name` | The name of the `web` subnet. | Optional | | +> | `web_subnet_address_prefix` | The address range for the `web` subnet. | Mandatory | For green field deployments. | +> | `web_subnet_arm_id` | The Azure resource identifier for the `web` subnet. | Mandatory | For brown field deployments. | +> | | | | | +> | `web_subnet_nsg_name` | The name of the `web` Network Security Group name. | Optional | | +> | `web_subnet_nsg_arm_id` | The Azure resource identifier for the `web` Network Security Group | Mandatory | For brown field deployments. | + + +The table below contains the networking parameters if Azure NetApp Files are used. > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | -------------------------------- | ------------------------------------------------------------------------- | --------- | -------------------------------------- | -> | `iscsi_subnet_name` | The name of the `iscsi` subnet | Optional | | -> | `iscsi_subnet_address_prefix` | The address range for the `iscsi` subnet | Mandatory | For new environment deployments | -> | `iscsi_subnet_arm_id` | The Azure resource identifier for the `iscsi` subnet | Mandatory | For existing environment deployments | -> | `iscsi_subnet_nsg_name` | The name of the `iscsi` Network Security Group name | Optional | | -> | `iscsi_subnet_nsg_arm_id` | The Azure resource identifier for the `iscsi` Network Security Group | Mandatory | For existing environment deployments | -> | `iscsi_count` | The number of iSCSI Virtual Machines | Optional | | -> | `iscsi_use_DHCP` | Controls whether to use dynamic IP addresses provided by the Azure subnet | Optional | | -> | `iscsi_image` | Defines the Virtual machine image to use, see below | Optional | | -> | `iscsi_authentication_type` | Defines the default authentication for the iSCSI Virtual Machines | Optional | | -> | `iscsi__authentication_username` | Administrator account name | Optional | | -> | `iscsi_nic_ips` | IP addresses for the iSCSI Virtual Machines | Optional | ignored if `iscsi_use_DHCP` is defined | - +> | Variable | Description | Type | Notes | +> | -------------------------------- | -------------------------------------------------------------------- | --------- | ---------------------------------- | +> | `anf_subnet_name` | The name of the ANF subnet. | Optional | | +> | `anf_subnet_arm_id` | The Azure resource identifier for the `ANF` subnet. | Required | When using existing subnets | +> | `anf_subnet_address_prefix` | The address range for the `ANF` subnet. | Required | When using ANF for new deployments | -```python -{ -os_type="" -source_image_id="" -publisher="SUSE" -offer="sles-sap-12-sp5" -sku="gen1" -version="latest" -} +**Minimum required network definition** + +```terraform +network_logical_name = "SAP01" +network_address_space = "10.110.0.0/16" + +db_subnet_address_prefix = "10.110.96.0/19" +app_subnet_address_prefix = "10.110.32.0/19" + ``` ### Authentication Parameters - The table below defines the credentials used for defining the Virtual Machine authentication > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ---------------------------------- | -------------------------------------| ----------- | -> | `automation_username` | Administrator account name | Optional | -> | `automation_password` | Administrator password | Optional | -> | `automation_path_to_public_key` | Path to existing public key | Optional | -> | `automation_path_to_private_key` | Path to existing private key | Optional | +> | Variable | Description | Type | Notes | +> | ---------------------------------- | -------------------------------------| ----------- | ------------------- | +> | `automation_username` | Administrator account name | Optional | Default: 'azureadm' | +> | `automation_password` | Administrator password | Optional | | +> | `automation_path_to_public_key` | Path to existing public key | Optional | | +> | `automation_path_to_private_key` | Path to existing private key | Optional | | + +**Minimum required authentication definition** + +```terraform +automation_username = "azureadm" + +``` ## Key Vault Parameters The table below defines the parameters used for defining the Key Vault information - > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ---------------------------------- | ------------------------------------------------------------------------- | ------------- | -> | `user_keyvault_id` | Azure resource identifier for the system credentials key vault | Optional | -> | `spn_keyvault_id` | Azure resource identifier for the deployment credentials (SPNs) key vault | Optional | - +> | Variable | Description | Type | Notes | +> | ----------------------------------- | ------------------------------------------------------------------------------ | ------------ | ----------------------------------- | +> | `user_keyvault_id` | Azure resource identifier for existing system credentials key vault | Optional | | +> | `spn_keyvault_id` | Azure resource identifier for existing deployment credentials (SPNs) key vault | Optional | | +> | `enable_purge_control_for_keyvaults | Disables the purge protection for Azure key vaults. | Optional | Only use this for test environments | -## DNS +## Private DNS > [!div class="mx-tdCol2BreakAll "] @@ -147,31 +156,62 @@ The table below defines the parameters used for defining the Key Vault informati ## NFS Support > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | -> | ---------------------------------- | ----------------------------------------------------------------------- | ----------- | -> | `NFS_provider` | Defines what NFS backend to use, the options are 'AFS' for Azure Files NFS or 'ANF' for Azure NetApp files, 'NONE' for NFS from the SCS server or 'NFS' for an external NFS solution. | Optional | -> | `transport_volume_size` | Defines the size (in GB) for the 'transport' volume | Optional | +> | Variable | Description | Type | Notes | +> | ---------------------------------- | ----------------------------------------------------------------------- | ----------- | ------ | +> | `NFS_provider` | Defines what NFS backend to use, the options are 'AFS' for Azure Files NFS or 'ANF' for Azure NetApp files, 'NONE' for NFS from the SCS server or 'NFS' for an external NFS solution. | Optional | | +> | `install_volume_size` | Defines the size (in GB) for the 'install' volume | Optional | | +> | `install_private_endpoint_id` | Azure resource ID for the 'install' private endpoint | Optional | For existing endpoints| +> | `transport_volume_size` | Defines the size (in GB) for the 'transport' volume | Optional | | +> | `transport_private_endpoint_id` | Azure resource ID for the 'transport' private endpoint | Optional | For existing endpoints| ### Azure Files NFS Support > [!div class="mx-tdCol2BreakAll "] > | Variable | Description | Type | Notes | > | ---------------------------------- | -----------------------------------------------------------------------| ----------- | ------ | -> | `azure_files_transport_storage_account_id` | Azure resource identifier for the 'transport' storage account. | Optional | For existing environment deployments | +> | `install_storage_account_id` | Azure resource identifier for the 'install' storage account. | Optional | For brown field deployments. | +> | `transport_storage_account_id` | Azure resource identifier for the 'transport' storage account. | Optional | For brown field deployments. | + +**Minimum required Azure Files NFS definition** + +```terraform +NFS_provider = "AFS" +use_private_endpoint = true + +``` + ### Azure NetApp Files Support > [!div class="mx-tdCol2BreakAll "] -> | Variable | Description | Type | Notes | -> | ---------------------------------- | -----------------------------------------------------------------------| ----------- | ------ | -> | `ANF_account_arm_id` | Azure resource identifier for the Azure NetApp Files Account | Optional | For existing environment deployments | -> | `ANF_account_name` | Name for the Azure NetApp Files Account | Optional | | -> | `ANF_service_level` | Service level for the Azure NetApp Files Capacity Pool | Optional | | -> | `ANF_pool_size` | The size (in GB) of the Azure NetApp Files Capacity Pool | Optional | | -> | `anf_subnet_name` | The name of the ANF subnet | Optional | | -> | `anf_subnet_arm_id` | The Azure resource identifier for the `ANF` subnet | Required | For existing environment deployments | -> | `anf_subnet_address_prefix` | The address range for the `ANF` subnet | Required | For new environment deployments | +> | Variable | Description | Type | Notes | +> | ------------------------------------ | -----------------------------------------------------------------------| ----------- | ------ | +> | `ANF_account_arm_id` | Azure resource identifier for the Azure NetApp Files Account. | Optional | For brown field deployments. | +> | `ANF_account_name` | Name for the Azure NetApp Files Account. | Optional | | +> | `ANF_service_level` | Service level for the Azure NetApp Files Capacity Pool. | Optional | | +> | `ANF_use_existing_pool` | Use existing the Azure NetApp Files Capacity Pool. | Optional | | +> | `ANF_pool_size` | The size (in GB) of the Azure NetApp Files Capacity Pool. | Optional | | +> | `ANF_pool_name` | The name of the Azure NetApp Files Capacity Pool. | Optional | | +> | | | | | +> | `ANF_use_existing_transport_volume` | Defines if an existing transport volume is used. | Optional | | +> | `ANF_transport_volume_name` | Defines the transport volume name. | Optional | | +> | `ANF_transport_volume_size` | Defines the size of the transport volume in GB. | Optional | | +> | `ANF_transport_volume_throughput` | Defines the throughput of the transport volume. | Optional | | +> | | | | | +> | `ANF_use_existing_install_volume` | Defines if an existing install volume is used. | Optional | | +> | `ANF_install_volume_name` | Defines the install volume name. | Optional | | +> | `ANF_install_volume_size` | Defines the size of the install volume in GB. | Optional | | +> | `ANF_install_volume_throughput` | Defines the throughput of the install volume. | Optional | | + + +**Minimum required ANF definition** + +```terraform +NFS_provider = "ANF" +anf_subnet_address_prefix = "10.110.64.0/27" +ANF_service_level = "Ultra" +``` ## Other Parameters @@ -180,8 +220,38 @@ The table below defines the parameters used for defining the Key Vault informati > | ------------------------------------ | ---------------------------------------------------------------------- | -------- | ------------------------------------- | > | `enable_purge_control_for_keyvaults` | Boolean flag controlling if purge control is enabled on the Key Vault. | Optional | Use only for test deployments | > | `use_private_endpoint` | Boolean flag controlling if private endpoints are used for storage accounts and key vaults. | Optional | | -> | `diagnostics_storage_account_arm_id` | The Azure resource identifier for the diagnostics storage account | Required | For existing environment deployments | -> | `witness_storage_account_arm_id` | The Azure resource identifier for the witness storage account | Required | For existing environment deployments | +> | `diagnostics_storage_account_arm_id` | The Azure resource identifier for the diagnostics storage account | Required | For brown field deployments. | +> | `witness_storage_account_arm_id` | The Azure resource identifier for the witness storage account | Required | For brown field deployments. | + + +## ISCSI Parameters + + +> [!div class="mx-tdCol2BreakAll "] +> | Variable | Description | Type | Notes | +> | -------------------------------- | ------------------------------------------------------------------------- | --------- | -------------------------------------- | +> | `iscsi_subnet_name` | The name of the `iscsi` subnet. | Optional | | +> | `iscsi_subnet_address_prefix` | The address range for the `iscsi` subnet. | Mandatory | For green field deployments. | +> | `iscsi_subnet_arm_id` | The Azure resource identifier for the `iscsi` subnet. | Mandatory | For brown field deployments. | +> | `iscsi_subnet_nsg_name` | The name of the `iscsi` Network Security Group name | Optional | | +> | `iscsi_subnet_nsg_arm_id` | The Azure resource identifier for the `iscsi` Network Security Group | Mandatory | For brown field deployments. | +> | `iscsi_count` | The number of iSCSI Virtual Machines | Optional | | +> | `iscsi_use_DHCP` | Controls whether to use dynamic IP addresses provided by the Azure subnet | Optional | | +> | `iscsi_image` | Defines the Virtual machine image to use, see below | Optional | | +> | `iscsi_authentication_type` | Defines the default authentication for the iSCSI Virtual Machines | Optional | | +> | `iscsi__authentication_username` | Administrator account name | Optional | | +> | `iscsi_nic_ips` | IP addresses for the iSCSI Virtual Machines | Optional | ignored if `iscsi_use_DHCP` is defined | + + +## Terraform Parameters + +The table below contains the Terraform parameters. These parameters need to be entered manually if not using the deployment scripts. + + +| Variable | Type | Description | +| ----------------------- | ---------- | ------------------------------------- | +| `tfstate_resource_id` | Required * | Azure resource identifier for the Storage account in the SAP Library that will contain the Terraform state files | +| `deployer_tfstate_key` | Required * | The name of the state file for the Deployer | ## Next Step diff --git a/articles/virtual-machines/workloads/sap/automation-deploy-control-plane.md b/articles/virtual-machines/workloads/sap/automation-deploy-control-plane.md index 02be55a993571..56da0f10bf2d2 100644 --- a/articles/virtual-machines/workloads/sap/automation-deploy-control-plane.md +++ b/articles/virtual-machines/workloads/sap/automation-deploy-control-plane.md @@ -21,7 +21,6 @@ The control plane deployment for the [SAP deployment automation framework on Azu The SAP Deployment Frameworks uses Service Principals when doing the deployments. You can create the Service Principal for the Control Plane deployment using the following steps using an account with permissions to create Service Principals: - ```azurecli az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/" --name="-Deployment-Account" @@ -60,27 +59,33 @@ cp -Rp sap-automation/samples/WORKSPACES WORKSPACES ``` +Run the following command to deploy the control plane: + ```bash -cd ~/Azure_SAP_Automated_Deployment/WORKSPACES az logout az login -export DEPLOYMENT_REPO_PATH=~/Azure_SAP_Automated_Deployment/sap-automation -export ARM_SUBSCRIPTION_ID= -export subscriptionID= -export spn_id= -export spn_secret= -export tenant_id= -export region_code=WEEU - -${DEPLOYMENT_REPO_PATH}/deploy/scripts/prepare_region.sh \ - --deployer_parameter_file DEPLOYER/MGMT-${region_code}-DEP00-INFRASTRUCTURE/MGMT-${region_code}-DEP00-INFRASTRUCTURE.tfvars \ - --library_parameter_file LIBRARY/MGMT-${region_code}-SAP_LIBRARY/MGMT-${region_code}-SAP_LIBRARY.tfvars \ - --subscription $subscriptionID \ - --spn_id "${spn_id}" \ - --spn_secret "${spn_secret}" \ - --tenant_id "${tenant_id}" -``` +cd ~/Azure_SAP_Automated_Deployment/WORKSPACES + + export subscriptionId="" + export spn_id="" + export spn_secret="" + export tenant_id="" + export env_code="MGMT" + export region_code="" + + export DEPLOYMENT_REPO_PATH="${HOME}/Azure_SAP_Automated_Deployment/sap-automation" + export ARM_SUBSCRIPTION_ID="${subscriptionId}" + + ${DEPLOYMENT_REPO_PATH}/deploy/scripts/prepare_region.sh \ + --deployer_parameter_file DEPLOYER/${env_code}-${region_code}-DEP00-INFRASTRUCTURE/${env_code}-${region_code}-DEP00-INFRASTRUCTURE.tfvars \ + --library_parameter_file LIBRARY/${env_code}-${region_code}-SAP_LIBRARY/${env_code}-${region_code}-SAP_LIBRARY.tfvars \ + --subscription "${subscriptionId}" \ + --spn_id "${spn_id}" \ + --spn_secret "${spn_secret}" \ + --tenant_id "${tenant_id}" \ + --auto-approve + ``` # [Windows](#tab/windows) @@ -108,13 +113,31 @@ cd C:\Azure_SAP_Automated_Deployment\WORKSPACES New-SAPAutomationRegion -DeployerParameterfile .\DEPLOYER\MGMT-WEEU-DEP00-INFRASTRUCTURE\MGMT-WEEU-DEP00-INFRASTRUCTURE.tfvars -LibraryParameterfile .\LIBRARY\MGMT-WEEU-SAP_LIBRARY\MGMT-WEEU-SAP_LIBRARY.tfvars -Subscription $subscription -SPN_id $appId -SPN_password $spn_secret -Tenant_id $tenant_id ``` ---- + > [!NOTE] > Be sure to replace the sample value `` with your subscription ID. > Replace the ``, ``, `` values with the output values of the SPN creation +# [Azure DevOps](#tab/devops) + +Open (https://dev.azure.com) and and go to your Azure DevOps project. + +> [!NOTE] +> Ensure that the 'Deployment_Configuration_Path' variable in the 'SDAF-General' variable group is set to the folder that contains your configuration files, for this example you can use 'samples/WORKSPACES'. + +The deployment will use the configuration defined in the Terraform variable files located in the 'samples/WORKSPACES/DEPLOYER/MGMT-WEEU-DEP00-INFRASTRUCTURE' and 'samples/WORKSPACES/LIBRARY/MGMT-WEEU-SAP_LIBRARY' folders. + +Run the pipeline by selecting the _Deploy control plane_ pipeline from the Pipelines section. Enter the configuration names for the deployer and the SAP library. Use 'MGMT-WEEU-DEP00-INFRASTRUCTURE' as the Deployer configuration name and 'MGMT-WEEU-SAP_LIBRARY' as the SAP Library configuration name. + +:::image type="content" source="media/automation-devops/automation-run-pipeline.png" alt-text="Screenshot of Azure DevOps run pipeline dialog."::: + +You can track the progress in the Azure DevOps portal. Once the deployment is complete, you can see the Control Plane details in the _Extensions_ tab. + + :::image type="content" source="media/automation-devops/automation-run-pipeline-control-plane.png" alt-text="Screenshot of the run Azure DevOps pipeline run results."::: + +--- ### Manually configure the deployer using Azure Bastion @@ -154,9 +177,7 @@ cd sap-automation/deploy/scripts The script will install Terraform and Ansible and configure the deployer. -### Manually configure the deployer (deployments without public IP) - -If you deploy the deployer without a public IP Terraform isn't able to configure the deployer Virtual Machine as it will not be able to connect to it. +### Manually configure the deployer > [!NOTE] >You need to connect to the deployer virtual Machine from a computer that is able to reach the Azure Virtual Network diff --git a/articles/virtual-machines/workloads/sap/automation-deploy-system.md b/articles/virtual-machines/workloads/sap/automation-deploy-system.md index ef3bb0631ff2b..ed908030399a5 100644 --- a/articles/virtual-machines/workloads/sap/automation-deploy-system.md +++ b/articles/virtual-machines/workloads/sap/automation-deploy-system.md @@ -161,11 +161,24 @@ New-SAPSystem -Parameterfile DEV-WEEU-SAP01-X01.tfvars -Type sap_system ``` +# [Azure DevOps](#tab/devops) + +Open (https://dev.azure.com) and go to your Azure DevOps Services project. + +> [!NOTE] +> Ensure that the 'Deployment_Configuration_Path' variable in the 'SDAF-General' variable group is set to the folder that contains your configuration files, for this example you can use 'samples/WORKSPACES'. + +The deployment will use the configuration defined in the Terraform variable file located in the 'samples/WORKSPACES/SYSTEM/DEV-WEEU-SAP01-X00' folder. + +Run the pipeline by selecting the _SAP system deployment_ pipeline from the Pipelines section. Enter 'DEV-WEEU-SAP01-X00' as the SAP System configuration name. + +You can track the progress in the Azure DevOps Services portal. Once the deployment is complete, you can see the SAP System details in the _Extensions_ tab. + --- ### Output files -The deployment will create a Ansible hosts file (`SID_hosts.yaml`) and an Ansible parameter file (`sap-parameters.yaml`) that are required input for the Ansible playbooks. +The deployment will create an Ansible hosts file (`SID_hosts.yaml`) and an Ansible parameter file (`sap-parameters.yaml`) that are required input for the Ansible playbooks. ## Next steps > [!div class="nextstepaction"] diff --git a/articles/virtual-machines/workloads/sap/automation-deploy-workload-zone.md b/articles/virtual-machines/workloads/sap/automation-deploy-workload-zone.md index fe8f64979197a..a8c698eee5d9f 100644 --- a/articles/virtual-machines/workloads/sap/automation-deploy-workload-zone.md +++ b/articles/virtual-machines/workloads/sap/automation-deploy-workload-zone.md @@ -40,7 +40,7 @@ environment="DEV" # The location value is a mandatory field, it is used to control where the resources are deployed location="westeurope" -# The network logical name is mandatory - it is used in the naming convention and should map to the workload virtual network logical name +# The network logical name is mandatory - it is used in the naming convention and should map to the workload virtual network logical name network_name="SAP01" # network_address_space is a mandatory parameter when an existing Virtual network is not used @@ -67,7 +67,7 @@ The SAP Deployment Frameworks uses Service Principals when doing the deployment. ```azurecli-interactive az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/" --name="-Deployment-Account" - + ``` > [!IMPORTANT] @@ -78,7 +78,7 @@ az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/ - password > - tenant -Assign the correct permissions to the Service Principal: +Assign the correct permissions to the Service Principal: ```azurecli az role assignment create --assignee \ @@ -87,7 +87,7 @@ az role assignment create --assignee \ ``` ## Deploying the SAP Workload zone - + The sample Workload Zone configuration file `DEV-WEEU-SAP01-INFRASTRUCTURE.tfvars` is located in the `~/Azure_SAP_Automated_Deployment/samples/WORKSPACES/LANDSCAPE/DEV-WEEU-SAP01-INFRASTRUCTURE` folder. Running the command below will deploy the SAP Workload Zone. @@ -109,29 +109,24 @@ cp -R sap-automation/samples/WORKSPACES WORKSPACES ```bash -export subscriptionID="" -export spn_id="" -export spn_secret="" -export tenant_id="" -export region_code="WEEU" -export storageaccount="" -export keyvault="" +export subscriptionId="" +export spn_id="" +export spn_secret="" +export tenant_id="" +export env_code="MGMT" +export region_code="" export DEPLOYMENT_REPO_PATH="${HOME}/Azure_SAP_Automated_Deployment/sap-automation" -export ARM_SUBSCRIPTION_ID="${subscriptionID}" - -cd ~/Azure_SAP_Automated_Deployment/WORKSPACES/LANDSCAPE/DEV-${region_code}-SAP01-INFRASTRUCTURE - -${DEPLOYMENT_REPO_PATH}/deploy/scripts/install_workloadzone.sh \ - --parameterfile ./DEV-${region_code}-SAP01-INFRASTRUCTURE.tfvars \ - --deployer_environment MGMT \ - --deployer_tfstate_key MGMT-${region_code}-DEP00-INFRASTRUCTURE.terraform.tfstate \ - --subscription "${subscriptionID}" \ - --spn_id "${spn_id}" \ - --spn_secret "${spn_secret}" \ - --tenant_id "${tenant_id}" \ - --keyvault "${keyvault}" \ - --storageaccountname "${storageaccount}" +export ARM_SUBSCRIPTION_ID="${subscriptionId}" + +${DEPLOYMENT_REPO_PATH}/deploy/scripts/prepare_region.sh \ + --deployer_parameter_file DEPLOYER/${env_code}-${region_code}-DEP00-INFRASTRUCTURE/${env_code}-${region_code}-DEP00-INFRASTRUCTURE.tfvars \ + --library_parameter_file LIBRARY/${env_code}-${region_code}-SAP_LIBRARY/${env_code}-${region_code}-SAP_LIBRARY.tfvars \ + --subscription "${subscriptionId}" \ + --spn_id "${spn_id}" \ + --spn_secret "${spn_secret}" \ + --tenant_id "${tenant_id}" \ + --auto-approve ``` # [Windows](#tab/windows) @@ -158,7 +153,7 @@ $region_code="WEEU" cd C:\Azure_SAP_Automated_Deployment\WORKSPACES\LANDSCAPE\DEV-$region_code-SAP01-INFRASTRUCTURE -New-SAPWorkloadZone -Parameterfile DEV-$region_code-SAP01-INFRASTRUCTURE.tfvars +New-SAPWorkloadZone -Parameterfile DEV-$region_code-SAP01-INFRASTRUCTURE.tfvars -Subscription $subscription -SPN_id $spn_id -SPN_password $spn_secret -Tenant_id $tenant_id -State_subscription $statefile_subscription -Vault $keyvault -$StorageAccountName $storageaccount ``` @@ -172,12 +167,26 @@ New-SAPWorkloadZone -Parameterfile DEV-$region_code-SAP01-INFRASTRUCTURE.tfvars > Replace `` with the name of the storage account containing the Terraform state files > Replace `` with the subscription ID for the storage account containing the Terraform state files +# [Azure DevOps](#tab/devops) + +Open (https://dev.azure.com) and go to your Azure DevOps Services project. + +> [!NOTE] +> Ensure that the 'Deployment_Configuration_Path' variable in the 'SDAF-General' variable group is set to the folder that contains your configuration files, for this example you can use 'samples/WORKSPACES'. + +The deployment will use the configuration defined in the Terraform variable file located in the 'samples/WORKSPACES/LANDSCAPE/DEV-WEEU-SAP01-INFRASTRUCTURE' folder. + +Run the pipeline by selecting the _Deploy workload zone_ pipeline from the Pipelines section. Enter the workload zone configuration name and the deployer environment name. Use 'DEV-WEEU-SAP01-INFRASTRUCTURE' as the Workload zone configuration name and 'MGMT' as the Deployer Environment Name. + +You can track the progress in the Azure DevOps Services portal. Once the deployment is complete, you can see the Workload Zone details in the _Extensions_ tab. + --- + > [!TIP] > If the scripts fail to run, it can sometimes help to clear the local cache files by removing `~/.sap_deployment_automation/` and `~/.terraform.d/` directories before running the scripts again. -## Next step +## Next steps > [!div class="nextstepaction"] > [About SAP system deployment in automation framework](automation-configure-system.md) diff --git a/articles/virtual-machines/workloads/sap/automation-deployment-framework.md b/articles/virtual-machines/workloads/sap/automation-deployment-framework.md index c758b78d6ad8f..9b95451bb0055 100644 --- a/articles/virtual-machines/workloads/sap/automation-deployment-framework.md +++ b/articles/virtual-machines/workloads/sap/automation-deployment-framework.md @@ -4,13 +4,13 @@ description: Overview of the framework and tooling for the SAP deployment automa author: kimforss ms.author: kimforss ms.reviewer: kimforss -ms.date: 11/17/2021 +ms.date: 05/29/2022 ms.service: virtual-machines-sap ms.topic: conceptual --- # SAP deployment automation framework on Azure -The [SAP deployment automation framework on Azure](https://github.com/Azure/sap-automation) is an open-source orchestration tool for deploying, installing and maintaining SAP environments. You can create infrastructure for SAP landscapes based on SAP HANA and NetWeaver with AnyDB on any of the SAP-supported operating system versions and deploy them into any Azure region. The framework uses [Terraform](https://www.terraform.io/) for infrastructure deployment, and [Ansible](https://www.ansible.com/) for the operating system and application configuration. +The [SAP deployment automation framework on Azure](https://github.com/Azure/sap-automation) is an open-source orchestration tool for deploying, installing and maintaining SAP environments. You can create infrastructure for SAP landscapes based on SAP HANA and NetWeaver with AnyDB. The framework uses [Terraform](https://www.terraform.io/) for infrastructure deployment, and [Ansible](https://www.ansible.com/) for the operating system and application configuration. The systems can be deployed on any of the SAP-supported operating system versions and deployed into any Azure region. Hashicorp [Terraform](https://www.terraform.io/) is an open-source tool for provisioning and managing cloud infrastructure. @@ -20,7 +20,7 @@ The [automation framework](https://github.com/Azure/sap-automation) has two main - Deployment infrastructure (control plane) - SAP Infrastructure (SAP Workload) -You will use the control plane of the SAP deployment automation framework to deploy the SAP Infrastructure and the SAP application infrastructure. The deployment uses Terraform templates to create the [infrastructure as a service (IaaS)](https://azure.microsoft.com/overview/what-is-iaas) defined infrastructure to host the SAP Applications. +You'll use the control plane of the SAP deployment automation framework to deploy the SAP Infrastructure and the SAP application infrastructure. The deployment uses Terraform templates to create the [infrastructure as a service (IaaS)](https://azure.microsoft.com/overview/what-is-iaas) defined infrastructure to host the SAP Applications. > [!NOTE] > This automation framework is based on Microsoft best practices and principles for SAP on Azure. Review the [get-started guide for SAP on Azure virtual machines (Azure VMs)](get-started.md) to understand how to use certified virtual machines and storage solutions for stability, reliability, and performance. @@ -33,32 +33,17 @@ The automation framework can be used to deploy the following SAP architectures: - Distributed - Distributed (Highly Available) -In the Standalone architecture all the SAP roles are installed on a single server. In the distributed architecture you can separate the database server and the application tier. The application tier can further be separated in two by having SAP Central Services on a virtual machine and one or more application servers. +In the Standalone architecture, all the SAP roles are installed on a single server. In the distributed architecture, you can separate the database server and the application tier. The application tier can further be separated in two by having SAP Central Services on a virtual machine and one or more application servers. -The Distributed (Highly Available) deployment is similar to the Distributed architecture but either the datebase or SAP Central Services are both highly available using two virtual machines each with Pacemaker clusters. +The Distributed (Highly Available) deployment is similar to the Distributed architecture. In this deployment, the database and/or SAP Central Services can both be configured using a highly available configuration using two virtual machines each with Pacemaker clusters. -The dependency between the control plane and the application plane is illustrated in the diagram below. In a typical deployment a single control plane is used to manage multiple SAP deployments. +The dependency between the control plane and the application plane is illustrated in the diagram below. In a typical deployment, a single control plane is used to manage multiple SAP deployments. :::image type="content" source="./media/automation-deployment-framework/control-plane-sap-infrastructure.png" alt-text="Diagram showing the SAP deployment automation framework's dependency between the control plane and application plane."::: -The following diagram shows the key components of the control plane and workload zone. - -:::image type="content" source="./media/automation-deployment-framework/automation-diagram-full.png" alt-text="Diagram showing the SAP deployment automation framework environment."::: - -The application configuration will be performed from the Ansible Controller in the Control plane using a set of pre-defined playbooks. These playbooks will: - -- Configure base operating system settings -- Configure SAP-specific operating system settings -- Make the installation media available in the system -- Install the SAP system -- Install the SAP database (SAP HANA, AnyDB) -- Configure high availability (HA) using Pacemaker -- Configure high availability (HA) for your SAP database - - ## About the control plane -The control plane houses the deployment infrastructure from which other environments will be deployed. Once the control plane is deployed, it rarely needs to be redeployed, if ever. +The control plane houses the deployment infrastructure from which other environments will be deployed. Once the control plane is deployed, it rarely needs to be redeployed, if ever. The control plane provides the following services - Terraform Deployment Infrastructure @@ -76,9 +61,26 @@ The key components of the control plane are: - Storage account for SAP installation media - Azure Key Vault for deployment credentials +The following diagram shows the key components of the control plane and workload zone. + +:::image type="content" source="./media/automation-deployment-framework/automation-diagram-full.png" alt-text="Diagram showing the SAP deployment automation framework environment."::: + +The application configuration will be performed from the Ansible Controller in the Control plane using a set of pre-defined playbooks. These playbooks will: + +- Configure base operating system settings +- Configure SAP-specific operating system settings +- Make the installation media available in the system +- Install the SAP system +- Install the SAP database (SAP HANA, AnyDB) +- Configure high availability (HA) using Pacemaker +- Configure high availability (HA) for your SAP database + + +For more information of how to configure and deploy the control plane, see [Configuring the control plane](automation-configure-control-plane.md) and [Deploying the control plane](automation-deploy-control-plane.md). + ### Deployer Virtual Machine -This virtual machine is used to run the orchestration scripts that will deploy the Azure resources using Terraform. It is also the Ansible Controller and is used to execute the Ansible playbooks on all the managed nodes, i.e the virtual machines of an SAP deployment. +This virtual machine is used to run the orchestration scripts that will deploy the Azure resources using Terraform. It's also the Ansible Controller and is used to execute the Ansible playbooks on all the managed nodes, i.e the virtual machines of an SAP deployment. ## About the SAP Workload @@ -89,13 +91,14 @@ The SAP Workload has two main components: ## About the SAP Workload Zone -The workload zone allows for partitioning of the deployments into different environments (Development, -Test, Production) +The workload zone allows for partitioning of the deployments into different environments (Development, Test, Production). The Workload zone will provide the shared services (networking, credentials management) to the SAP systems. + The SAP Workload Zone provides the following services to the SAP Systems - Virtual Networking infrastructure -- Secure storage for system credentials (Virtual Machines and SAP) +- Azure Key Vault for system credentials (Virtual Machines and SAP) - Shared Storage (optional) +For more information of how to configure and deploy the SAP Workload zone, see [Configuring the workload zone](automation-configure-workload-zone.md) and [Deploying the SAP workload zone](automation-deploy-workload-zone.md). ## About the SAP System @@ -104,17 +107,20 @@ The system deployment consists of the virtual machines that will be running the The SAP System provides the following services - Virtual machine, storage, and supporting infrastructure to host the SAP applications. +For more information of how to configure and deploy the SAP System, see [Configuring the SAP System](automation-configure-system.md) and [Deploying the SAP system](automation-deploy-system.md). + ## Glossary The following terms are important concepts for understanding the automation framework. ### SAP concepts -| Term | Description | -| ---- | ----------- | -| System | An instance of an SAP application that contains the resources the application needs to run. Defined by a unique three-letter identifier, the **SID**. -| Landscape | A collection of systems in different environments within an SAP application. For example, SAP ERP Central Component (ECC), SAP customer relationship management (CRM), and SAP Business Warehouse (BW). | -| Workload zone | Partitions the SAP applications to environments, such as non-production and production environments or development, quality assurance, and production environments. Provides shared resources, such as virtual networks and key vault, to all systems within. | +> [!div class="mx-tdCol2BreakAll "] +> | Term | Description | +> | ---------------------------------- | ----------------------------------------------------------------------------------- | +> | System | An instance of an SAP application that contains the resources the application needs to run. Defined by a unique three-letter identifier, the **SID**. +> | Landscape | A collection of systems in different environments within an SAP application. For example, SAP ERP Central Component (ECC), SAP customer relationship management (CRM), and SAP Business Warehouse (BW). | +> | Workload zone | Partitions the SAP applications to environments, such as non-production and production environments or development, quality assurance, and production environments. Provides shared resources, such as virtual networks and key vault, to all systems within. | The following diagram shows the relationships between SAP systems, workload zones (environments), and landscapes. In this example setup, the customer has three SAP landscapes: ECC, CRM, and BW. Each landscape contains three workload zones: production, quality assurance, and development. Each workload zone contains one or more systems. @@ -122,15 +128,21 @@ The following diagram shows the relationships between SAP systems, workload zone ### Deployment components -| Term | Description | Scope | -| ---- | ----------- | ----- | -| Deployer | A virtual machine that can execute Terraform and Ansible commands. Deployed to a virtual network, either new or existing, that is peered to the SAP virtual network. | Region | -| Library | Provides storage for the Terraform state files and SAP installation media. | Region | -| Workload zone | Contains the virtual network into which you deploy the SAP system or systems. Also contains a key vault that holds the credentials for the systems in the environment. | Workload zone | -| System | The deployment unit for the SAP application (SID). Contains virtual machines and supporting infrastructure artifacts, such as load balancers and availability sets. | Workload zone | +> [!div class="mx-tdCol2BreakAll "] +> | Term | Description | Scope | +> | ---------------------------------- | -------------------------------------------------------------------------------------------------- | ----------------------- | +> | Deployer | A virtual machine that can execute Terraform and Ansible commands. | Region | +> | Library | Provides storage for the Terraform state files and the SAP installation media. | Region | +> | Workload zone | Contains the virtual network for the SAP systems and a key vault that holds the system credentials | Workload zone | +> | System | The deployment unit for the SAP application (SID). Contains all infrastructure assets | Workload zone | ## Next steps > [!div class="nextstepaction"] > [Get started with the deployment automation framework](automation-get-started.md) +> [Configuring Azure DevOps for the automation framwework](automation-configure-devops.md) +> [Configuring the control plane](automation-configure-control-plane.md) +> [Configuring the workload zone](automation-configure-workload-zone.md) +> [Configuring the SAP System](automation-configure-system.md) + diff --git a/articles/virtual-machines/workloads/sap/automation-devops-tutorial.md b/articles/virtual-machines/workloads/sap/automation-devops-tutorial.md index 622e79f6d69a8..2b12c8d27fa7d 100644 --- a/articles/virtual-machines/workloads/sap/automation-devops-tutorial.md +++ b/articles/virtual-machines/workloads/sap/automation-devops-tutorial.md @@ -45,8 +45,8 @@ You'll perform the following tasks during this lab: ## Overview These steps reference and use the [default naming convention](automation-naming.md) for the automation framework. Example values are also used for naming throughout the configurations. In this tutorial, the following names are used: -- Azure DevOps project name is `SAP-Deployment` -- Azure DevOps repository name is `sap-automation` +- Azure DevOps Services project name is `SAP-Deployment` +- Azure DevOps Services repository name is `sap-automation` - The control plane environment is named `MGMT`, in the region West Europe (`WEEU`) and installed in the virtual network `DEP00`, giving a deployer configuration name: `MGMT-WEEU-DEP00-INFRASTRUCTURE` - The SAP workload zone has the environment name `DEV` and is in the same region as the control plane using the virtual network `SAP01`, giving the SAP workload zone configuration name: `DEV-WEEU-SAP01-INFRASTRUCTURE` @@ -78,11 +78,12 @@ Ensure that the 'Deployment_Configuration_Path' variable in the 'SDAF-General' v Run the pipeline by selecting the _Deploy control plane_ pipeline from the Pipelines section. Enter 'MGMT-WEEU-DEP00-INFRASTRUCTURE' as the Deployer configuration name and 'MGMT-WEEU-SAP_LIBRARY' as the SAP Library configuration name. -:::image type="content" source="media/automation-devops/automation-run-pipeline.png" alt-text="Picture showing the DevOps tutorial run pipeline dialog"::: +:::image type="content" source="media/automation-devops/automation-run-pipeline.png" alt-text="Screenshot of the DevOps tutorial run pipeline dialog."::: -You can track the progress in the Azure DevOps portal. Once the deployment is complete, you can see the Control Plane details in the _Extensions_ tab. +You can track the progress in the Azure DevOps Services portal. Once the deployment is complete, you can see the Control Plane details in the _Extensions_ tab. + + :::image type="content" source="media/automation-devops/automation-run-pipeline-control-plane.png" alt-text="Screenshot of the DevOps run pipeline results."::: - :::image type="content" source="media/automation-devops/automation-run-pipeline-control-plane.png" alt-text="Picture showing the DevOps tutorial run pipeline results"::: ## Deploy the Workload zone @@ -90,7 +91,7 @@ The deployment will use the configuration defined in the Terraform variable file Run the pipeline by selecting the _Deploy workload zone_ pipeline from the Pipelines section. Enter 'DEV-WEEU-SAP01-INFRASTRUCTURE' as the Workload zone configuration name and 'MGMT' as the Deployer Environment Name. -You can track the progress in the Azure DevOps portal. Once the deployment is complete, you can see the Workload Zone details in the _Extensions_ tab. +You can track the progress in the Azure DevOps Services portal. Once the deployment is complete, you can see the Workload Zone details in the _Extensions_ tab. ## Deploy the SAP System @@ -98,11 +99,11 @@ The deployment will use the configuration defined in the Terraform variable file Run the pipeline by selecting the _SAP system deployment_ pipeline from the Pipelines section. Enter 'DEV-WEEU-SAP01-X00' as the SAP System configuration name. -You can track the progress in the Azure DevOps portal. Once the deployment is complete, you can see the SAP System details in the _Extensions_ tab. +You can track the progress in the Azure DevOps Services portal. Once the deployment is complete, you can see the SAP System details in the _Extensions_ tab. ## Download the SAP Software -Run the pipeline by selecting the _SAP software acquisition_ pipeline from the Pipelines section. Enter 'S41909SPS03_v0010ms' as the Name of Bill of Materials (BoM), 'MGMT' as the Control Plane Environment name: MGMT and 'WEEU' as the +Run the pipeline by selecting the _SAP software acquisition_ pipeline from the Pipelines section. Enter 'S41909SPS03_v0011ms' as the Name of Bill of Materials (BoM), 'MGMT' as the Control Plane Environment name: MGMT and 'WEEU' as the Control Plane (SAP Library) location code. You can track the progress in the Azure DevOps portal. @@ -113,9 +114,9 @@ Run the pipeline by selecting the _Configuration and SAP installation_ pipeline Choose the playbooks to execute. -:::image type="content" source="media/automation-devops/automation-os-sap.png" alt-text="Picture showing the DevOps tutorial, OS and SAP configuration"::: +:::image type="content" source="media/automation-devops/automation-os-sap.png" alt-text="Screenshot showing the DevOps tutorial, OS and SAP configuration."::: -You can track the progress in the Azure DevOps portal. +You can track the progress in the Azure DevOps Services portal. ## Run the Repository update pipeline @@ -140,7 +141,7 @@ Enter 'DEV-WEEU-SAP01-INFRASTRUCTURE' as the SAP workload zone configuration nam Enter 'MGMT-WEEU-DEP00-INFRASTRUCTURE' as the Deployer configuration name and 'MGMT-WEEU-SAP_LIBRARY' as the SAP Library configuration name. -## Next step +## Next steps > [!div class="nextstepaction"] > [Configure Control Plane](automation-configure-control-plane.md) diff --git a/articles/virtual-machines/workloads/sap/automation-faq.yml b/articles/virtual-machines/workloads/sap/automation-faq.yml index cfb2157d8abf4..2b5dc4504fb9f 100644 --- a/articles/virtual-machines/workloads/sap/automation-faq.yml +++ b/articles/virtual-machines/workloads/sap/automation-faq.yml @@ -31,14 +31,14 @@ sections: answer: | The naming convention for the SAP automation framework carefully matches Azure naming standards. If you want to use your own naming conventions, [follow the custom naming module instructions](automation-naming-module.md) - question: | - What does the error " does not have secrets get permission on key vault" mean? + What does the error " doesn't have secrets get permission on key vault" mean? answer: | - You might get the error `Error: checking for existing Secret….User, group, or application…does not have secrets get permission on key vault`. + You might get the error `Error: checking for existing Secret… User, group, or application…doesn't have secrets get permission on key vault`. Make sure that the deployment credentials have sufficient permissions to read the secrets from the deployment key vault. Also make sure that the `SPN AppID` and `SPN Secret` in the user's key vault are correct. - question: | - What does the error "the client does not have authorization to perform action Microsoft.Authorization/roleAssignments/write" mean? + What does the error "the client doesn't have authorization to perform action Microsoft.Authorization/roleAssignments/write" mean? answer: | - You might get the error `authorization.RoleAssignment.Client#Create: The client… with object ID… does not have authorization to perform action 'Microsoft.Authorization/roleAssignments/write' over scope…or scope is invalid`. + You might get the error `authorization.RoleAssignment.Client#Create: The client… with object ID… doesn't have authorization to perform action 'Microsoft.Authorization/roleAssignments/write' over scope…or scope is invalid`. Make sure that you pass the correct application ID and application secret for the deployment. Validate the credential details by looking at the secrets in the deployment credentials key vault. - question: | What does the error "A resource with ID already exists... this resource needs to be imported into the State" mean? @@ -53,7 +53,7 @@ sections: What does the error "error executing "/tmp/terraform_273760270.sh" mean? answer: | You might see the error `error executing "/tmp/terraform_273760270.sh": Process exited with status 127`. - This error means there's an issue with the line endings in `sap-hana/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl`. + This error means there's an issue with the line endings in `sap-automation/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl`. Make sure to save this file with LF line endings, not CRLF. If you open the file in a Windows environment, the line endings might change from the original LF to CRLF. - question: | What does the error "building account: getting authenticated object ID: Error parsing json result from the Azure CLI" mean? @@ -103,6 +103,10 @@ sections: answer: | You might see the error `'netAppAccounts' has been restricted in this region`. This error means the subscription isn't registered for the `netAppAccounts` resource provider. [Register the provider](../../../azure-netapp-files/azure-netapp-files-register.md) to fix the issue. + - question: | + What does the error "AADSTS70043: The refresh token has expired or is invalid due to sign-in frequency checks by conditional access" mean? + answer: | + This error means that the credentials for the service principal have expired. Update the secret for the Service Principal in Azure AD. Also update the secret in the deployer key vault. - name: Ansible configuration questions: - question: | @@ -111,6 +115,14 @@ sections: When you deploy the SAP HANA software from `configuration_menu.sh`, you might get the fatal error `Retrieve SSH Key Secret Details`. Make sure that the name of the secrets in the user credentials' key vault match the environment naming prefix. For example, you might need to change `DEV--SAP01` to `DEV--SAP`. + - name: DevOps + questions: + - question: | + What does the error "ERROR: AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app" mean? + answer: | + This error means that the secret for the service principal stored in the variable group is incorrect. + + additionalContent: | ## Next steps diff --git a/articles/virtual-machines/workloads/sap/cal-s4h.md b/articles/virtual-machines/workloads/sap/cal-s4h.md index 713e30b400f58..aa47d1d5ea6ee 100644 --- a/articles/virtual-machines/workloads/sap/cal-s4h.md +++ b/articles/virtual-machines/workloads/sap/cal-s4h.md @@ -10,7 +10,7 @@ tags: azure-resource-manager keywords: '' ms.assetid: 44bbd2b6-a376-4b5c-b824-e76917117fa9 -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: article ms.tgt_pltfrm: vm-linux ms.workload: infrastructure-services diff --git a/articles/virtual-machines/workloads/sap/deployment-guide.md b/articles/virtual-machines/workloads/sap/deployment-guide.md index e127a076d34ae..53061d738dd62 100644 --- a/articles/virtual-machines/workloads/sap/deployment-guide.md +++ b/articles/virtual-machines/workloads/sap/deployment-guide.md @@ -10,7 +10,7 @@ ms.service: virtual-machines-sap ms.topic: article ms.tgt_pltfrm: vm-linux ms.workload: infrastructure-services -ms.date: 07/16/2020 +ms.date: 06/02/2022 ms.author: sedusch --- # Azure Virtual Machines deployment for SAP NetWeaver @@ -861,3 +861,6 @@ For more information about user-defined routes, see [User-defined routes and IP When you've prepared the VM as described in [Deployment scenarios of VMs for SAP on Azure][deployment-guide-3], the Azure VM Agent is installed on the virtual machine. The next step is to deploy the Azure Extension for SAP, which is available in the Azure Extension Repository in the global Azure datacenters. For more information, see [Configure the Azure Extension for SAP][deployment-guide-4.5]. +## Next steps + +Learn about [RHEL for SAP in-place upgrade](../redhat/redhat-in-place-upgrade.md#upgrade-sap-environments-from-rhel-7-vms-to-rhel-8-vms) diff --git a/articles/virtual-machines/workloads/sap/get-started.md b/articles/virtual-machines/workloads/sap/get-started.md index 476b15895d452..f0b161d1801c0 100644 --- a/articles/virtual-machines/workloads/sap/get-started.md +++ b/articles/virtual-machines/workloads/sap/get-started.md @@ -13,7 +13,7 @@ ms.assetid: ad8e5c75-0cf6-4564-ae62-ea1246b4e5f2 ms.topic: article ms.tgt_pltfrm: vm-linux ms.workload: infrastructure-services -ms.date: 05/11/2022 +ms.date: 06/08/2022 ms.author: juergent ms.custom: H1Hack27Feb2017 @@ -78,6 +78,9 @@ In this section, you find documents about Microsoft Power BI integration into SA ## Change Log +- June 08, 2022: Change in [HA for SAP NW on Azure VMs on SLES with ANF](./high-availability-guide-suse-netapp-files.md) and [HA for SAP NW on Azure VMs on RHEL with ANF](./high-availability-guide-rhel-netapp-files.md) to adjust timeouts when using NFSv4.1 (related to NFSv4.1 lease renewal) for more resilient Pacemaker configuration +- June 02, 2022: Change in the [SAP Deployment Guide](deployment-guide.md) to add a link to RHEL in-place upgrade documentation +- June 02, 2022: Change in [HA for SAP NetWeaver on Azure VMs on Windows with Azure NetApp Files(SMB)](./high-availability-guide-windows-netapp-files-smb.md), [HA for SAP NW on Azure VMs on SLES with ANF](./high-availability-guide-suse-netapp-files.md) and [HA for SAP NW on Azure VMs on RHEL with ANF](./high-availability-guide-rhel-netapp-files.md) to add sizing considerations - May 11, 2022: Change in [Cluster an SAP ASCS/SCS instance on a Windows failover cluster by using a cluster shared disk in Azure](./sap-high-availability-guide-wsfc-shared-disk.md), [Prepare the Azure infrastructure for SAP HA by using a Windows failover cluster and shared disk for SAP ASCS/SCS](./sap-high-availability-infrastructure-wsfc-shared-disk.md) and [SAP ASCS/SCS instance multi-SID high availability with Windows server failover clustering and Azure shared disk](./sap-ascs-ha-multi-sid-wsfc-azure-shared-disk.md) to update instruction about the usage of Azure shared disk for SAP deployment with PPG. - May 10, 2022: Changes in Change in [HA for SAP HANA scale-up with ANF on RHEL](./sap-hana-high-availability-netapp-files-red-hat.md), [SAP HANA scale-out HSR with Pacemaker on Azure VMs on RHEL](./sap-hana-high-availability-scale-out-hsr-rhel.md), [HA for SAP HANA Scale-up with Azure NetApp Files on SLES](./sap-hana-high-availability-netapp-files-suse.md), [SAP HANA scale-out with standby node on Azure VMs with ANF on SLES](./sap-hana-scale-out-standby-netapp-files-suse.md), [SAP HANA scale-out HSR with Pacemaker on Azure VMs on SLES](./sap-hana-high-availability-scale-out-hsr-suse.md) and [SAP HANA scale-out with standby node on Azure VMs with ANF on RHEL](./sap-hana-scale-out-standby-netapp-files-rhel.md) to adjust parameters per SAP note 3024346 - April 26, 2022: Changes in [Setting up Pacemaker on SUSE Linux Enterprise Server in Azure](high-availability-guide-suse-pacemaker.md) to add Azure Identity python module to installation instructions for Azure Fence Agent diff --git a/articles/virtual-machines/workloads/sap/hana-vm-operations.md b/articles/virtual-machines/workloads/sap/hana-vm-operations.md index cf302623a37dc..bb7ab0f902570 100644 --- a/articles/virtual-machines/workloads/sap/hana-vm-operations.md +++ b/articles/virtual-machines/workloads/sap/hana-vm-operations.md @@ -12,14 +12,14 @@ ms.service: virtual-machines-sap ms.topic: article ms.tgt_pltfrm: vm-linux ms.workload: infrastructure -ms.date: 02/11/2022 +ms.date: 06/06/2022 ms.author: juergent ms.custom: H1Hack27Feb2017 --- # SAP HANA infrastructure configurations and operations on Azure -This document provides guidance for configuring Azure infrastructure and operating SAP HANA systems that are deployed on Azure native virtual machines (VMs). The document also includes configuration information for SAP HANA scale-out for the M128s VM SKU. This document is not intended to replace the standard SAP documentation, which includes the following content: +This document provides guidance for configuring Azure infrastructure and operating SAP HANA systems that are deployed on Azure native virtual machines (VMs). The document also includes configuration information for SAP HANA scale-out for the M128s VM SKU. This document isn't intended to replace the standard SAP documentation, which includes the following content: - [SAP administration guide](https://help.sap.com/viewer/6b94445c94ae495c83a19646e7c3fd56/2.0.02/330e5550b09d4f0f8b6cceb14a64cd22.html) - [SAP installation guides](https://service.sap.com/instguides) @@ -63,7 +63,7 @@ Deploy the VMs in Azure by using: You also can deploy a complete installed SAP HANA platform on the Azure VM services through the [SAP Cloud platform](https://cal.sap.com/). The installation process is described in [Deploy SAP S/4HANA or BW/4HANA on Azure](./cal-s4h.md) or with the automation released on [GitHub](https://github.com/AzureCAT-GSI/SAP-HANA-ARM). >[!IMPORTANT] -> In order to use M208xx_v2 VMs, you need to be careful selecting your Linux image. For more details, see [Memory optimized virtual machine sizes](../../mv2-series.md). +> In order to use M208xx_v2 VMs, you need to be careful selecting your Linux image. For more information, see [Memory optimized virtual machine sizes](../../mv2-series.md). > @@ -72,7 +72,7 @@ For storage configurations and storage types to be used with SAP HANA in Azure, ### Set up Azure virtual networks -When you have site-to-site connectivity into Azure via VPN or ExpressRoute, you must have at least one Azure virtual network that is connected through a Virtual Gateway to the VPN or ExpressRoute circuit. In simple deployments, the Virtual Gateway can be deployed in a subnet of the Azure virtual network (VNet) that hosts the SAP HANA instances as well. To install SAP HANA, you create two additional subnets within the Azure virtual network. One subnet hosts the VMs to run the SAP HANA instances. The other subnet runs Jumpbox or Management VMs to host SAP HANA Studio, other management software, or your application software. +When you have site-to-site connectivity into Azure via VPN or ExpressRoute, you must have at least one Azure virtual network that is connected through a Virtual Gateway to the VPN or ExpressRoute circuit. In simple deployments, the Virtual Gateway can be deployed in a subnet of the Azure virtual network (VNet) that hosts the SAP HANA instances as well. To install SAP HANA, you create two more subnets within the Azure virtual network. One subnet hosts the VMs to run the SAP HANA instances. The other subnet runs Jumpbox or Management VMs to host SAP HANA Studio, other management software, or your application software. > [!IMPORTANT] > Out of functionality, but more important out of performance reasons, it is not supported to configure [Azure Network Virtual Appliances](https://azure.microsoft.com/solutions/network-appliances/) in the communication path between the SAP application and the DBMS layer of a SAP NetWeaver, Hybris or S/4HANA based SAP system. The communication between the SAP application layer and the DBMS layer needs to be a direct one. The restriction does not include [Azure ASG and NSG rules](../../../virtual-network/network-security-groups-overview.md) as long as those ASG and NSG rules allow a direct communication. Further scenarios where NVAs are not supported are in communication paths between Azure VMs that represent Linux Pacemaker cluster nodes and SBD devices as described in [High availability for SAP NetWeaver on Azure VMs on SUSE Linux Enterprise Server for SAP applications](./high-availability-guide-suse.md). Or in communication paths between Azure VMs and Windows Server SOFS set up as described in [Cluster an SAP ASCS/SCS instance on a Windows failover cluster by using a file share in Azure](./sap-high-availability-guide-wsfc-file-share.md). NVAs in communication paths can easily double the network latency between two communication partners, can restrict throughput in critical paths between the SAP application layer and the DBMS layer. In some scenarios observed with customers, NVAs can cause Pacemaker Linux clusters to fail in cases where communications between the Linux Pacemaker cluster nodes need to communicate to their SBD device through an NVA. @@ -91,7 +91,7 @@ When you install the VMs to run SAP HANA, the VMs need: > > -However, for deployments that are enduring, you need to create a virtual datacenter network architecture in Azure. This architecture recommends the separation of the Azure VNet Gateway that connects to on-premises into a separate Azure VNet. This separate VNet should host all the traffic that leaves either to on-premises or to the internet. This approach allows you to deploy software for auditing and logging traffic that enters the virtual datacenter in Azure in this separate hub VNet. So you have one VNet that hosts all the software and configurations that relates to in- and outgoing traffic to your Azure deployment. +However, for deployments that are enduring, you need to create a virtual datacenter network architecture in Azure. This architecture recommends the separation of the Azure VNet Gateway that connects to on-premises into a separate Azure VNet. This separate VNet should host all the traffic that leaves either to on-premises or to the internet. This approach allows you to deploy software for auditing and logging traffic that enters the virtual datacenter in Azure in this separate hub VNet. So you have one VNet that hosts all the software and configurations that relate to in- and outgoing traffic to your Azure deployment. The articles [Azure Virtual Datacenter: A Network Perspective](/azure/architecture/vdc/networking-virtual-datacenter) and [Azure Virtual Datacenter and the Enterprise Control Plane](/azure/architecture/vdc/) give more information on the virtual datacenter approach and related Azure VNet design. @@ -106,10 +106,6 @@ For VMs running SAP HANA, you should work with static IP addresses assigned. Rea [Azure Network Security Groups (NSGs)](../../../virtual-network/virtual-network-vnet-plan-design-arm.md) are used to direct traffic that's routed to the SAP HANA instance or the jumpbox. The NSGs and eventually [Application Security Groups](../../../virtual-network/network-security-groups-overview.md#application-security-groups) are associated to the SAP HANA subnet and the Management subnet. -The following image shows an overview of a rough deployment schema for SAP HANA following a hub and spoke VNet architecture: - -![Rough deployment schema for SAP HANA](media/hana-vm-operations/hana-simple-networking-dmz.png) - To deploy SAP HANA in Azure without a site-to-site connection, you still want to shield the SAP HANA instance from the public internet and hide it behind a forward proxy. In this basic scenario, the deployment relies on Azure built-in DNS services to resolve hostnames. In a more complex deployment where public-facing IP addresses are used, Azure built-in DNS services are especially important. Use Azure NSGs and [Azure NVAs](https://azure.microsoft.com/solutions/network-appliances/) to control, monitor the routing from the internet into your Azure VNet architecture in Azure. The following image shows a rough schema for deploying SAP HANA without a site-to-site connection in a hub and spoke VNet architecture: ![Rough deployment schema for SAP HANA without a site-to-site connection](media/hana-vm-operations/hana-simple-networking-dmz.png) @@ -119,9 +115,10 @@ Another description on how to use Azure NVAs to control and monitor access from ## Configuring Azure infrastructure for SAP HANA scale-out -In order to find out the Azure VM types that are certified for either OLAP scale-out or S/4HANA scale-out, check the [SAP HANA hardware directory](https://www.sap.com/dmc/exp/2014-09-02-hana-hardware/enEN/iaas.html#categories=Microsoft%20Azure). A checkmark in the column 'Clustering' indicates scale-out support. Application type indicates whether OLAP scale-out or S/4HANA scale-out is supported. For details on nodes certified in scale-out for each of the VMs, check the details of the entries in the particular VM SKU listed in the SAP HANA hardware directory. -The minimum OS releases for deploying scale-out configurations in Azure VMs, check the details of the entries in the particular VM SKU listed in the SAP HANA hardware directory. Of a n-node OLAP scale-out configuration, one node functions as master node. The other nodes up to the limit of the certification act as worker node. Additional standby nodes don't count into the number of certified nodes +In order to find out the Azure VM types that are certified for either OLAP scale-out or S/4HANA scale-out, check the [SAP HANA hardware directory](https://www.sap.com/dmc/exp/2014-09-02-hana-hardware/enEN/iaas.html#categories=Microsoft%20Azure). A checkmark in the column 'Clustering' indicates scale-out support. Application type indicates whether OLAP scale-out or S/4HANA scale-out is supported. For details on nodes certified in scale-out, review the entry for a specific VM SKU listed in the SAP HANA hardware directory. + +The minimum OS releases for deploying scale-out configurations in Azure VMs, check the details of the entries in the particular VM SKU listed in the SAP HANA hardware directory. Of a n-node OLAP scale-out configuration, one node functions as the main node. The other nodes up to the limit of the certification act as worker node. More standby nodes don't count into the number of certified nodes >[!NOTE] > Azure VM scale-out deployments of SAP HANA with standby node are only possible using the [Azure NetApp Files](https://azure.microsoft.com/services/netapp/) storage. No other SAP HANA certified Azure storage allows the configuration of SAP HANA standby nodes @@ -136,14 +133,14 @@ A typical basic design for a single node in a scale-out configuration is going t The basic configuration of a VM node for SAP HANA scale-out looks like: - For **/hana/shared**, you use the native NFS service provided through Azure NetApp Files. -- All other disk volumes are not shared among the different nodes and are not based on NFS. Installation configurations and steps for scale-out HANA installations with non-shared **/hana/data** and **/hana/log** is provided further later in this document. For HANA certified storage that can be used, check the article [SAP HANA Azure virtual machine storage configurations](./hana-vm-operations-storage.md). +- All other disk volumes aren't shared among the different nodes and aren't based on NFS. Installation configurations and steps for scale-out HANA installations with non-shared **/hana/data** and **/hana/log** is provided further later in this document. For HANA certified storage that can be used, check the article [SAP HANA Azure virtual machine storage configurations](./hana-vm-operations-storage.md). Sizing the volumes or disks, you need to check the document [SAP HANA TDI Storage Requirements](https://archive.sap.com/kmuuid2/70c8e423-c8aa-3210-3fae-e043f5c1ca92/SAP%20HANA%20TDI%20-%20Storage%20Requirements.pdf), for the size required dependent on the number of worker nodes. The document releases a formula you need to apply to get the required capacity of the volume The other design criteria that is displayed in the graphics of the single node configuration for a scale-out SAP HANA VM is the VNet, or better the subnet configuration. SAP highly recommends a separation of the client/application facing traffic from the communications between the HANA nodes. As shown in the graphics, this goal is achieved by having two different vNICs attached to the VM. Both vNICs are in different subnets, have two different IP addresses. You then control the flow of traffic with routing rules using NSGs or user-defined routes. -Particularly in Azure, there are no means and methods to enforce quality of service and quotas on specific vNICs. As a result, the separation of client/application facing and intra-node communication does not open any opportunities to prioritize one traffic stream over the other. Instead the separation remains a measure of security in shielding the intra-node communications of the scale-out configurations. +Particularly in Azure, there are no means and methods to enforce quality of service and quotas on specific vNICs. As a result, the separation of client/application facing and intra-node communication doesn't open any opportunities to prioritize one traffic stream over the other. Instead the separation remains a measure of security in shielding the intra-node communications of the scale-out configurations. >[!NOTE] >SAP recommends separating network traffic to the client/application side and intra-node traffic as described in this document. Therefore putting an architecture in place as shown in the last graphics is recommended. Also consult your security and compliance team for requirements that deviate from the recommendation @@ -160,34 +157,32 @@ Installing a scale-out SAP configuration, you need to perform rough steps of: - Deploying new or adapting an existing Azure VNet infrastructure - Deploying the new VMs using Azure Managed Premium Storage, Ultra disk volumes, and/or NFS volumes based on ANF -- - Adapt network routing to make sure that, for example, intra-node communication between VMs is not routed through an [NVA](https://azure.microsoft.com/solutions/network-appliances/). -- Install the SAP HANA master node. -- Adapt configuration parameters of the SAP HANA master node +- - Adapt network routing to make sure that, for example, intra-node communication between VMs isn't routed through an [NVA](https://azure.microsoft.com/solutions/network-appliances/). +- Install the SAP HANA main node. +- Adapt configuration parameters of the SAP HANA main node - Continue with the installation of the SAP HANA worker nodes #### Installation of SAP HANA in scale-out configuration As your Azure VM infrastructure is deployed, and all other preparations are done, you need to install the SAP HANA scale-out configurations in these steps: -- Install the SAP HANA master node according to SAP's documentation -- In case of using Azure Premium Storage or Ultra disk storage with non-shared disks of /hana/data and /hana/log, you need to change the global.ini file and add the parameter 'basepath_shared = no' to the global.ini file. This parameter enables SAP HANA to run in scale-out without 'shared' **/hana/data** and **/hana/log** volumes between the nodes. Details are documented in [SAP Note #2080991](https://launchpad.support.sap.com/#/notes/2080991). If you are using NFS volumes based on ANF for /hana/data and /hana/log, you don't need to make this change +- Install the SAP HANA main node according to SAP's documentation +- When using Azure Premium Storage or Ultra disk storage with non-shared disks of `/hana/data` and `/hana/log`, add the parameter `basepath_shared = no` to the `global.ini` file. This parameter enables SAP HANA to run in scale-out without shared `/hana/data` and `/hana/log` volumes between the nodes. Details are documented in [SAP Note #2080991](https://launchpad.support.sap.com/#/notes/2080991). If you're using NFS volumes based on ANF for /hana/data and /hana/log, you don't need to make this change - After the eventual change in the global.ini parameter, restart the SAP HANA instance -- Add additional worker nodes. See also . Specify the internal network for SAP HANA inter-node communication during the installation or afterwards using, for example, the local hdblcm. For more detailed documentation, see also [SAP Note #2183363](https://launchpad.support.sap.com/#/notes/2183363). +- Add more worker nodes. See also . Specify the internal network for SAP HANA inter-node communication during the installation or afterwards using, for example, the local hdblcm. For more detailed documentation, see also [SAP Note #2183363](https://launchpad.support.sap.com/#/notes/2183363). -Details to set up an SAP HANA scale-out system with standby node on SUSE Linux is described in detail in [Deploy a SAP HANA scale-out system with standby node on Azure VMs by using Azure NetApp Files on SUSE Linux Enterprise Server](./sap-hana-scale-out-standby-netapp-files-suse.md). Equivalent documentation for Red Hat can be found in the article [Deploy a SAP HANA scale-out system with standby node on Azure VMs by using Azure NetApp Files on Red Hat Enterprise Linux](./sap-hana-scale-out-standby-netapp-files-rhel.md). +To set up an SAP HANA scale-out system with a standby node, see the [SUSE Linux deployment instructions](./sap-hana-scale-out-standby-netapp-files-suse.md) or the [Red Hat deployment instructions](./sap-hana-scale-out-standby-netapp-files-rhel.md). ## SAP HANA Dynamic Tiering 2.0 for Azure virtual machines -In addition to the SAP HANA certifications on Azure M-series VMs, SAP HANA Dynamic Tiering 2.0 is also supported on Microsoft Azure -(see SAP HANA Dynamic Tiering documentation links further down). While there is no difference in installing the product or -operating it, for example, via SAP HANA Cockpit inside an Azure Virtual Machine, there are a few important items, which are mandatory for official support on Azure. These key points are described below. Throughout the article, the abbreviation "DT 2.0" is going to be used instead of the full name Dynamic Tiering 2.0. +In addition to the SAP HANA certifications on Azure M-series VMs, SAP HANA Dynamic Tiering 2.0 is also supported on Microsoft Azure. For more information, see [Links to DT 2.0 documentation](#links-to-dt-20-documentation). There's no difference in installing or operating the product. For example, you can install SAP HANA Cockpit inside an Azure VM. However, there are some mandatory requirements, as described in the following section, for official support on Azure. Throughout the article, the abbreviation "DT 2.0" is going to be used instead of the full name Dynamic Tiering 2.0. SAP HANA Dynamic Tiering 2.0 isn't supported by SAP BW or S4HANA. Main use cases right now are native HANA applications. ### Overview -The picture below gives an overview regarding DT 2.0 support on Microsoft Azure. There is a set of mandatory requirements, which +The picture below gives an overview regarding DT 2.0 support on Microsoft Azure. There's a set of mandatory requirements, which has to be followed to comply with the official certification: - DT 2.0 must be installed on a dedicated Azure VM. It may not run on the same VM where SAP HANA runs @@ -205,7 +200,7 @@ More details are going to be explained in the following sections. ### Dedicated Azure VM for SAP HANA DT 2.0 -On Azure IaaS, DT 2.0 is only supported on a dedicated VM. It is not allowed to run DT 2.0 on the same Azure VM where the HANA +On Azure IaaS, DT 2.0 is only supported on a dedicated VM. It isn't allowed to run DT 2.0 on the same Azure VM where the HANA instance is running. Initially two VM types can be used to run SAP HANA DT 2.0: - M64-32ms @@ -214,7 +209,7 @@ instance is running. Initially two VM types can be used to run SAP HANA DT 2.0: For more information on the VM type description, see [Azure VM sizes - Memory](../../sizes-memory.md) Given the basic idea of DT 2.0, which is about offloading "warm" data in order to save costs it makes sense to use corresponding -VM sizes. There is no strict rule though regarding the possible combinations. It depends on the specific customer workload. +VM sizes. There's no strict rule though regarding the possible combinations. It depends on the specific customer workload. Recommended configurations would be: @@ -238,14 +233,15 @@ See additional information about Azure accelerated networking [Create an Azure V ### VM Storage for SAP HANA DT 2.0 -According to DT 2.0 best practice guidance, the disk IO throughput should be minimum 50 MB/sec per physical core. Looking at the spec for the two -Azure VM types, which are supported for DT 2.0 the maximum disk IO throughput limit for the VM look like: +According to DT 2.0 best practice guidance, the disk IO throughput should be minimum 50 MB/sec per physical core. -- E32sv3 : 768 MB/sec (uncached) which means a ratio of 48 MB/sec per physical core -- M64-32ms : 1000 MB/sec (uncached) which means a ratio of 62.5 MB/sec per physical core +According to the specifications for the two Azure VM types, which are supported for DT 2.0, the maximum disk IO throughput limit for the VM looks like: -It is required to attach multiple Azure disks to the DT 2.0 VM and create a software raid (striping) on OS level to achieve the max limit of disk throughput -per VM. A single Azure disk cannot provide the throughput to reach the max VM limit in this regard. Azure Premium storage is mandatory to run DT 2.0. +- E32sv3: 768 MB/sec (uncached) which means a ratio of 48 MB/sec per physical core +- M64-32ms: 1000 MB/sec (uncached) which means a ratio of 62.5 MB/sec per physical core + +It's required to attach multiple Azure disks to the DT 2.0 VM and create a software raid (striping) on OS level to achieve the max limit of disk throughput +per VM. A single Azure disk can't provide the throughput to reach the max VM limit in this regard. Azure Premium storage is mandatory to run DT 2.0. - Details about available Azure disk types can be found on the [Select a disk type for Azure IaaS VMs - managed disks](../../disks-types.md) page - Details about creating software raid via mdadm can be found on the [Configure software RAID on a Linux VM](/previous-versions/azure/virtual-machines/linux/configure-raid) page @@ -269,8 +265,9 @@ Especially in case the workload is read-intense it could boost IO performance to data volumes of database software. Whereas for the transaction log Azure host disk cache must be "none". Regarding the size of the log volume a recommended starting point is a heuristic of 15% of the data size. The creation of the log volume can be accomplished by using different -Azure disk types depending on cost and throughput requirements. For the log volume, high I/O throughput is required. In case of using the VM type M64-32ms it is -mandatory to enable [Write Accelerator](../../how-to-enable-write-accelerator.md). Azure Write Accelerator provides optimal disk write latency for the transaction +Azure disk types depending on cost and throughput requirements. For the log volume, high I/O throughput is required. + +When using the VM type M64-32ms, it's mandatory to enable [Write Accelerator](../../how-to-enable-write-accelerator.md). Azure Write Accelerator provides optimal disk write latency for the transaction log (only available for M-series). There are some items to consider though like the maximum number of disks per VM type. Details about Write Accelerator can be found on the [Azure Write Accelerator](../../how-to-enable-write-accelerator.md) page @@ -285,7 +282,7 @@ Here are a few examples about sizing the log volume: Like for SAP HANA scale-out, the /hana/shared directory has to be shared between the SAP HANA VM and the DT 2.0 VM. The same architecture as for SAP HANA scale-out using dedicated VMs, which act as a highly available NFS server is recommended. In order to provide a shared backup volume, -the identical design can be used. But it is up to the customer if HA would be necessary or if it is sufficient to just use a dedicated VM with +the identical design can be used. But it's up to the customer if HA would be necessary or if it's sufficient to just use a dedicated VM with enough storage capacity to act as a backup server. diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-netapp-files.md b/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-netapp-files.md index 3d68f7f89edea..566b1076211a8 100644 --- a/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-netapp-files.md +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-netapp-files.md @@ -12,7 +12,7 @@ ms.service: virtual-machines-sap ms.topic: article ms.tgt_pltfrm: vm-windows ms.workload: infrastructure-services -ms.date: 03/25/2022 +ms.date: 06/08/2022 ms.author: radeltch --- @@ -81,6 +81,7 @@ Read the following SAP Notes and papers first: * [Support Policies for RHEL High Availability Clusters - Microsoft Azure Virtual Machines as Cluster Members](https://access.redhat.com/articles/3131341) * [Installing and Configuring a Red Hat Enterprise Linux 7.4 (and later) High-Availability Cluster on Microsoft Azure](https://access.redhat.com/articles/3252491) * [NetApp SAP Applications on Microsoft Azure using Azure NetApp Files][anf-sap-applications-azure] +* [NetApp NFS Best Practices](https://www.netapp.com/media/10720-tr-4067.pdf) ## Overview @@ -134,6 +135,7 @@ When considering Azure NetApp Files for the SAP Netweaver on RHEL High Availabil - The minimum volume is 100 GiB - Azure NetApp Files and all virtual machines, where Azure NetApp Files volumes will be mounted, must be in the same Azure Virtual Network or in [peered virtual networks](../../../virtual-network/virtual-network-peering-overview.md) in the same region. Azure NetApp Files access over VNET peering in the same region is supported now. Azure NetApp access over global peering is not yet supported. - The selected virtual network must have a subnet, delegated to Azure NetApp Files. +- The throughput and performance characteristics of an Azure NetApp Files volume is a function of the volume quota and service level, as documented in [Service level for Azure NetApp Files](../../../azure-netapp-files/azure-netapp-files-service-levels.md). While sizing the SAP Azure NetApp volumes, make sure that the resulting throughput meets the application requirements. - Azure NetApp Files offers [export policy](../../../azure-netapp-files/azure-netapp-files-configure-export-policy.md): you can control the allowed clients, the access type (Read&Write, Read Only, etc.). - Azure NetApp Files feature isn't zone aware yet. Currently Azure NetApp Files feature isn't deployed in all Availability zones in an Azure region. Be aware of the potential latency implications in some Azure regions. - Azure NetApp Files volumes can be deployed as NFSv3 or NFSv4.1 volumes. Both protocols are supported for the SAP application layer (ASCS/ERS, SAP application servers). @@ -440,7 +442,7 @@ The following items are prefixed with either **[A]** - applicable to all nodes, # If using NFSv4.1 sudo pcs resource create fs_QAS_ASCS Filesystem device='192.168.24.5:/sapQAS/usrsapQASascs' \ directory='/usr/sap/QAS/ASCS00' fstype='nfs' force_unmount=safe options='sec=sys,vers=4.1' \ - op start interval=0 timeout=60 op stop interval=0 timeout=120 op monitor interval=200 timeout=40 \ + op start interval=0 timeout=60 op stop interval=0 timeout=120 op monitor interval=200 timeout=105 \ --group g-QAS_ASCS sudo pcs resource create vip_QAS_ASCS IPaddr2 \ @@ -503,7 +505,7 @@ The following items are prefixed with either **[A]** - applicable to all nodes, # If using NFSv4.1 sudo pcs resource create fs_QAS_AERS Filesystem device='192.168.24.5:/sapQAS/usrsapQASers' \ directory='/usr/sap/QAS/ERS01' fstype='nfs' force_unmount=safe options='sec=sys,vers=4.1' \ - op start interval=0 timeout=60 op stop interval=0 timeout=120 op monitor interval=200 timeout=40 \ + op start interval=0 timeout=60 op stop interval=0 timeout=120 op monitor interval=200 timeout=105 \ --group g-QAS_AERS sudo pcs resource create vip_QAS_AERS IPaddr2 \ @@ -614,8 +616,9 @@ The following items are prefixed with either **[A]** - applicable to all nodes, If using enqueue server 1 architecture (ENSA1), define the resources as follows: ``` - sudo pcs property set maintenance-mode=true + sudo pcs property set maintenance-mode=true + # If using NFSv3 sudo pcs resource create rsc_sap_QAS_ASCS00 SAPInstance \ InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \ AUTOMATIC_RECOVER=false \ @@ -624,14 +627,31 @@ The following items are prefixed with either **[A]** - applicable to all nodes, op start interval=0 timeout=600 op stop interval=0 timeout=600 \ --group g-QAS_ASCS + # If using NFSv4.1 + sudo pcs resource create rsc_sap_QAS_ASCS00 SAPInstance \ + InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \ + AUTOMATIC_RECOVER=false \ + meta resource-stickiness=5000 migration-threshold=1 failure-timeout=60 \ + op monitor interval=20 on-fail=restart timeout=105 \ + op start interval=0 timeout=600 op stop interval=0 timeout=600 \ + --group g-QAS_ASCS + sudo pcs resource meta g-QAS_ASCS resource-stickiness=3000 + # If using NFSv3 sudo pcs resource create rsc_sap_QAS_ERS01 SAPInstance \ InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" \ AUTOMATIC_RECOVER=false IS_ERS=true \ op monitor interval=20 on-fail=restart timeout=60 op start interval=0 timeout=600 op stop interval=0 timeout=600 \ --group g-QAS_AERS - + + # If using NFSv4.1 + sudo pcs resource create rsc_sap_QAS_ERS01 SAPInstance \ + InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" \ + AUTOMATIC_RECOVER=false IS_ERS=true \ + op monitor interval=20 on-fail=restart timeout=105 op start interval=0 timeout=600 op stop interval=0 timeout=600 \ + --group g-QAS_AERS + sudo pcs constraint colocation add g-QAS_AERS with g-QAS_ASCS -5000 sudo pcs constraint location rsc_sap_QAS_ASCS00 rule score=2000 runs_ers_QAS eq 1 sudo pcs constraint order start g-QAS_ASCS then stop g-QAS_AERS kind=Optional symmetrical=false @@ -646,6 +666,7 @@ The following items are prefixed with either **[A]** - applicable to all nodes, ``` sudo pcs property set maintenance-mode=true + # If using NFSv3 sudo pcs resource create rsc_sap_QAS_ASCS00 SAPInstance \ InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \ AUTOMATIC_RECOVER=false \ @@ -654,14 +675,31 @@ The following items are prefixed with either **[A]** - applicable to all nodes, op start interval=0 timeout=600 op stop interval=0 timeout=600 \ --group g-QAS_ASCS + # If using NFSv4.1 + sudo pcs resource create rsc_sap_QAS_ASCS00 SAPInstance \ + InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \ + AUTOMATIC_RECOVER=false \ + meta resource-stickiness=5000 \ + op monitor interval=20 on-fail=restart timeout=105 \ + op start interval=0 timeout=600 op stop interval=0 timeout=600 \ + --group g-QAS_ASCS + sudo pcs resource meta g-QAS_ASCS resource-stickiness=3000 + # If using NFSv3 sudo pcs resource create rsc_sap_QAS_ERS01 SAPInstance \ InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" \ AUTOMATIC_RECOVER=false IS_ERS=true \ op monitor interval=20 on-fail=restart timeout=60 op start interval=0 timeout=600 op stop interval=0 timeout=600 \ --group g-QAS_AERS - + + # If using NFSv4.1 + sudo pcs resource create rsc_sap_QAS_ERS01 SAPInstance \ + InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" \ + AUTOMATIC_RECOVER=false IS_ERS=true \ + op monitor interval=20 on-fail=restart timeout=105 op start interval=0 timeout=600 op stop interval=0 timeout=600 \ + --group g-QAS_AERS + sudo pcs resource meta rsc_sap_QAS_ERS01 resource-stickiness=3000 sudo pcs constraint colocation add g-QAS_AERS with g-QAS_ASCS -5000 @@ -675,6 +713,8 @@ The following items are prefixed with either **[A]** - applicable to all nodes, If you are upgrading from an older version and switching to enqueue server 2, see SAP note [2641322](https://launchpad.support.sap.com/#/notes/2641322). > [!NOTE] + > The higher timeouts, suggested when using NFSv4.1 are necessary due to protocol-specific pause, related to NFSv4.1 lease renewals. + > For more information see [NFS in NetApp Best practice](https://www.netapp.com/media/10720-tr-4067.pdf). > The timeouts in the above configuration are just examples and may need to be adapted to the specific SAP setup. Make sure that the cluster status is ok and that all resources are started. It is not important on which node the resources are running. diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md b/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md index 5fb1958a9b83b..835826e0a2985 100644 --- a/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md @@ -13,7 +13,7 @@ ms.topic: article ms.tgt_pltfrm: vm-windows ms.workload: infrastructure-services ms.custom: subject-rbac-steps -ms.date: 12/07/2021 +ms.date: 05/26/2022 ms.author: radeltch --- @@ -239,17 +239,17 @@ The STONITH device uses a Service Principal to authorize against Microsoft Azure ### **[1]** Create a custom role for the fence agent -The Service Principal does not have permissions to access your Azure resources by default. You need to give the Service Principal permissions to start and stop (power-off) all virtual machines of the cluster. If you did not already create the custom role, you can create it using [PowerShell](../../../role-based-access-control/role-assignments-powershell.md) or [Azure CLI](../../../role-based-access-control/role-assignments-cli.md) +The Service Principal does not have permissions to access your Azure resources by default. You need to give the Service Principal permissions to start and stop (power-off) all virtual machines of the cluster. If you did not already create the custom role, you can create it using [PowerShell](../../../role-based-access-control/custom-roles-powershell.md) or [Azure CLI](../../../role-based-access-control/custom-roles-cli.md) -Use the following content for the input file. You need to adapt the content to your subscriptions that is, replace c276fc76-9cd4-44c9-99a7-4fd71546436e and e91d47c4-76f3-4271-a796-21b4ecfe3624 with the Ids of your subscription. If you only have one subscription, remove the second entry in AssignableScopes. +Use the following content for the input file. You need to adapt the content to your subscriptions that is, replace *xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx* and *yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy* with the Ids of your subscription. If you only have one subscription, remove the second entry in AssignableScopes. ```json { "Name": "Linux Fence Agent Role", "description": "Allows to power-off and start virtual machines", "assignableScopes": [ - "/subscriptions/e663cc2d-722b-4be1-b636-bbd9e4c60fd9", - "/subscriptions/e91d47c4-76f3-4271-a796-21b4ecfe3624" + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "/subscriptions/yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy" ], "actions": [ "Microsoft.Compute/*/read", diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-suse-netapp-files.md b/articles/virtual-machines/workloads/sap/high-availability-guide-suse-netapp-files.md index 41c3a9e9b81e7..466e11caa29c2 100644 --- a/articles/virtual-machines/workloads/sap/high-availability-guide-suse-netapp-files.md +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-suse-netapp-files.md @@ -13,7 +13,7 @@ ms.service: virtual-machines-sap ms.topic: article ms.tgt_pltfrm: vm-windows ms.workload: infrastructure-services -ms.date: 03/25/2022 +ms.date: 06/08/2022 ms.author: radeltch --- @@ -81,6 +81,7 @@ Read the following SAP Notes and papers first: The guides contain all required information to set up Netweaver HA and SAP HANA System Replication on-premises. Use these guides as a general baseline. They provide much more detailed information. * [SUSE High Availability Extension 12 SP3 Release Notes][suse-ha-12sp3-relnotes] * [NetApp SAP Applications on Microsoft Azure using Azure NetApp Files][anf-sap-applications-azure] +* [NetApp NFS Best Practices](https://www.netapp.com/media/10720-tr-4067.pdf) ## Overview @@ -133,10 +134,11 @@ In this example, we used Azure NetApp Files for all SAP Netweaver file systems t When considering Azure NetApp Files for the SAP Netweaver on SUSE High Availability architecture, be aware of the following important considerations: -- The minimum capacity pool is 4 TiB. The capacity pool size can be increased be in 1 TiB increments. +- The minimum capacity pool is 4 TiB. The capacity pool size can be increased in 1 TiB increments. - The minimum volume is 100 GiB - Azure NetApp Files and all virtual machines, where Azure NetApp Files volumes will be mounted, must be in the same Azure Virtual Network or in [peered virtual networks](../../../virtual-network/virtual-network-peering-overview.md) in the same region. Azure NetApp Files access over VNET peering in the same region is supported now. Azure NetApp access over global peering is not yet supported. - The selected virtual network must have a subnet, delegated to Azure NetApp Files. +- The throughput and performance characteristics of an Azure NetApp Files volume is a function of the volume quota and service level, as documented in [Service level for Azure NetApp Files](../../../azure-netapp-files/azure-netapp-files-service-levels.md). While sizing the SAP Azure NetApp volumes, make sure that the resulting throughput meets the application requirements. - Azure NetApp Files offers [export policy](../../../azure-netapp-files/azure-netapp-files-configure-export-policy.md): you can control the allowed clients, the access type (Read&Write, Read Only, etc.). - Azure NetApp Files feature isn't zone aware yet. Currently Azure NetApp Files feature isn't deployed in all Availability zones in an Azure region. Be aware of the potential latency implications in some Azure regions. - Azure NetApp Files volumes can be deployed as NFSv3 or NFSv4.1 volumes. Both protocols are supported for the SAP application layer (ASCS/ERS, SAP application servers). @@ -489,7 +491,7 @@ The following items are prefixed with either **[A]** - applicable to all nodes, sudo crm configure primitive fs_QAS_ASCS Filesystem device='10.1.0.4:/usrsapqas/usrsapQASascs' directory='/usr/sap/QAS/ASCS00' fstype='nfs' options='sec=sys,vers=4.1' \ op start timeout=60s interval=0 \ op stop timeout=60s interval=0 \ - op monitor interval=20s timeout=40s + op monitor interval=20s timeout=105s sudo crm configure primitive vip_QAS_ASCS IPaddr2 \ params ip=10.1.1.20 \ @@ -548,7 +550,7 @@ The following items are prefixed with either **[A]** - applicable to all nodes, sudo crm configure primitive fs_QAS_ERS Filesystem device='10.1.0.4:/usrsapqas/usrsapQASers' directory='/usr/sap/QAS/ERS01' fstype='nfs' options='sec=sys,vers=4.1' \ op start timeout=60s interval=0 \ op stop timeout=60s interval=0 \ - op monitor interval=20s timeout=40s + op monitor interval=20s timeout=105s sudo crm configure primitive vip_QAS_ERS IPaddr2 \ params ip=10.1.1.21 \ @@ -667,20 +669,36 @@ The following items are prefixed with either **[A]** - applicable to all nodes, If using enqueue server 1 architecture (ENSA1), define the resources as follows:
                    sudo crm configure property maintenance-mode="true"
                    -   
                    +   # If using NFSv3
                        sudo crm configure primitive rsc_sap_QAS_ASCS00 SAPInstance \
                         operations \$id=rsc_sap_QAS_ASCS00-operations \
                         op monitor interval=11 timeout=60 on-fail=restart \
                         params InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \
                         AUTOMATIC_RECOVER=false \
                         meta resource-stickiness=5000 failure-timeout=60 migration-threshold=1 priority=10
                    -   
                    +
                    +   # If using NFSv4.1
                    +   sudo crm configure primitive rsc_sap_QAS_ASCS00 SAPInstance \
                    +    operations \$id=rsc_sap_QAS_ASCS00-operations \
                    +    op monitor interval=11 timeout=105 on-fail=restart \
                    +    params InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \
                    +    AUTOMATIC_RECOVER=false \
                    +    meta resource-stickiness=5000 failure-timeout=105 migration-threshold=1 priority=10
                    +
                    +   # If using NFSv3   
                        sudo crm configure primitive rsc_sap_QAS_ERS01 SAPInstance \
                         operations \$id=rsc_sap_QAS_ERS01-operations \
                         op monitor interval=11 timeout=60 on-fail=restart \
                         params InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" AUTOMATIC_RECOVER=false IS_ERS=true \
                         meta priority=1000
                    -   
                    +
                    +   # If using NFSv4.1
                    +   sudo crm configure primitive rsc_sap_QAS_ERS01 SAPInstance \
                    +    operations \$id=rsc_sap_QAS_ERS01-operations \
                    +    op monitor interval=11 timeout=105 on-fail=restart \
                    +    params InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" AUTOMATIC_RECOVER=false IS_ERS=true \
                    +    meta priority=1000
                    +
                        sudo crm configure modgroup g-QAS_ASCS add rsc_sap_QAS_ASCS00
                        sudo crm configure modgroup g-QAS_ERS add rsc_sap_QAS_ERS01
                        
                    @@ -697,6 +715,7 @@ If using enqueue server 1 architecture (ENSA1), define the resources as follows:
                     
                        
                    sudo crm configure property maintenance-mode="true"
                        
                    +   # If using NFSv3
                        sudo crm configure primitive rsc_sap_QAS_ASCS00 SAPInstance \
                         operations \$id=rsc_sap_QAS_ASCS00-operations \
                         op monitor interval=11 timeout=60 on-fail=restart \
                    @@ -704,11 +723,26 @@ If using enqueue server 1 architecture (ENSA1), define the resources as follows:
                         AUTOMATIC_RECOVER=false \
                         meta resource-stickiness=5000
                        
                    +   # If using NFSv4.1
                    +   sudo crm configure primitive rsc_sap_QAS_ASCS00 SAPInstance \
                    +    operations \$id=rsc_sap_QAS_ASCS00-operations \
                    +    op monitor interval=11 timeout=105 on-fail=restart \
                    +    params InstanceName=QAS_ASCS00_anftstsapvh START_PROFILE="/sapmnt/QAS/profile/QAS_ASCS00_anftstsapvh" \
                    +    AUTOMATIC_RECOVER=false \
                    +    meta resource-stickiness=5000
                    +   
                    +   # If using NFSv3
                        sudo crm configure primitive rsc_sap_QAS_ERS01 SAPInstance \
                         operations \$id=rsc_sap_QAS_ERS01-operations \
                         op monitor interval=11 timeout=60 on-fail=restart \
                         params InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" AUTOMATIC_RECOVER=false IS_ERS=true
                        
                    +   # If using NFSv4.1
                    +   sudo crm configure primitive rsc_sap_QAS_ERS01 SAPInstance \
                    +    operations \$id=rsc_sap_QAS_ERS01-operations \
                    +    op monitor interval=11 timeout=105 on-fail=restart \
                    +    params InstanceName=QAS_ERS01_anftstsapers START_PROFILE="/sapmnt/QAS/profile/QAS_ERS01_anftstsapers" AUTOMATIC_RECOVER=false IS_ERS=true
                    +   
                        sudo crm configure modgroup g-QAS_ASCS add rsc_sap_QAS_ASCS00
                        sudo crm configure modgroup g-QAS_ERS add rsc_sap_QAS_ERS01
                        
                    @@ -721,6 +755,11 @@ If using enqueue server 1 architecture (ENSA1), define the resources as follows:
                     
                        If you are upgrading from an older version and switching to enqueue server 2, see SAP note [2641019](https://launchpad.support.sap.com/#/notes/2641019). 
                     
                    +   > [!NOTE]
                    +   > The higher timeouts, suggested when using NFSv4.1 are necessary due to protocol-specific pause, related to NFSv4.1 lease renewals. 
                    +   > For more information see [NFS in NetApp Best practice](https://www.netapp.com/media/10720-tr-4067.pdf).  
                    +   > The timeouts in the above configuration may need to be adapted to the specific SAP setup. 
                    +
                        Make sure that the cluster status is ok and that all resources are started. It is not important on which node the resources are running.
                     
                        
                    sudo crm_mon -r
                    diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md b/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md
                    index 965fcfd576307..57df553db76be 100644
                    --- a/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md
                    +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md
                    @@ -13,7 +13,7 @@ ms.topic: article
                     ms.tgt_pltfrm: vm-windows
                     ms.workload: infrastructure-services
                     ms.custom: subject-rbac-steps
                    -ms.date: 04/26/2022
                    +ms.date: 05/26/2022
                     ms.author: radeltch
                     
                     ---
                    @@ -485,15 +485,15 @@ This section applies only if you're using a STONITH device that's based on an Az
                     
                     By default, the service principal doesn't have permissions to access your Azure resources. You need to give the service principal permissions to start and stop (deallocate) all virtual machines in the cluster. If you didn't already create the custom role, you can do so by using [PowerShell](../../../role-based-access-control/custom-roles-powershell.md#create-a-custom-role) or the [Azure CLI](../../../role-based-access-control/custom-roles-cli.md).
                     
                    -Use the following content for the input file. You need to adapt the content to your subscriptions. That is, replace *c276fc76-9cd4-44c9-99a7-4fd71546436e* and *e91d47c4-76f3-4271-a796-21b4ecfe3624* with your own subscription IDs. If you have only one subscription, remove the second entry under AssignableScopes.
                    +Use the following content for the input file. You need to adapt the content to your subscriptions. That is, replace *xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx* and *yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy* with your own subscription IDs. If you have only one subscription, remove the second entry under AssignableScopes.
                     
                     ```json
                     {
                           "Name": "Linux fence agent Role",
                           "description": "Allows to power-off and start virtual machines",
                           "assignableScopes": [
                    -              "/subscriptions/e663cc2d-722b-4be1-b636-bbd9e4c60fd9",
                    -              "/subscriptions/e91d47c4-76f3-4271-a796-21b4ecfe3624"
                    +              "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
                    +              "/subscriptions/yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy"
                           ],
                           "actions": [
                                   "Microsoft.Compute/*/read",
                    diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-windows-netapp-files-smb.md b/articles/virtual-machines/workloads/sap/high-availability-guide-windows-netapp-files-smb.md
                    index 696b25049f4e2..9dc19791310d3 100644
                    --- a/articles/virtual-machines/workloads/sap/high-availability-guide-windows-netapp-files-smb.md
                    +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-windows-netapp-files-smb.md
                    @@ -13,7 +13,7 @@ ms.service: virtual-machines-sap
                     ms.topic: article
                     ms.tgt_pltfrm: vm-windows
                     ms.workload: infrastructure-services
                    -ms.date: 02/13/2022
                    +ms.date: 06/02/2022
                     ms.author: radeltch
                     
                     ---
                    @@ -121,6 +121,16 @@ Perform the following steps, as preparation for using Azure NetApp Files.
                     > [!TIP]
                     > You can find the instructions on how to mount the Azure NetApp Files volume, if you navigate in [Azure Portal](https://portal.azure.com/#home) to the Azure NetApp Files object, click on the **Volumes** blade, then **Mount Instructions**.  
                     
                    +### Important considerations
                    +
                    +When considering Azure NetApp Files for the SAP Netweaver architecture, be aware of the following important considerations:
                    +
                    +- The minimum capacity pool is 4 TiB. The capacity pool size can be increased in 1 TiB increments.
                    +- The minimum volume is 100 GiB
                    +- The selected virtual network must have a subnet, delegated to Azure NetApp Files.
                    +- The throughput and performance characteristics of an Azure NetApp Files volume is a function of the volume quota and service level, as documented in [Service level for Azure NetApp Files](../../../azure-netapp-files/azure-netapp-files-service-levels.md). While sizing the SAP Azure NetApp volumes, make sure that the resulting throughput meets the application requirements.  
                    + 
                    +
                     ## Prepare the infrastructure for SAP HA by using a Windows failover cluster 
                     
                     1. [Set the ASCS/SCS load balancing rules for the Azure internal load balancer](./sap-high-availability-infrastructure-wsfc-shared-disk.md#fe0bd8b5-2b43-45e3-8295-80bee5415716).
                    diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide.md b/articles/virtual-machines/workloads/sap/high-availability-guide.md
                    index 05de41bca34a1..1e6aefd7fb732 100644
                    --- a/articles/virtual-machines/workloads/sap/high-availability-guide.md
                    +++ b/articles/virtual-machines/workloads/sap/high-availability-guide.md
                    @@ -401,7 +401,6 @@ ms.custom: H1Hack27Feb2017, ignite-fall-2021
                     [virtual-machines-manage-availability]:../../windows/manage-availability.md
                     [virtual-machines-ps-create-preconfigure-windows-resource-manager-vms]:../../virtual-machines-windows-ps-create.md
                     [virtual-machines-sizes]:../../virtual-machines-windows-sizes.md
                    -[virtual-machines-windows-portal-sql-alwayson-availability-groups-manual]:../../windows/sql/virtual-machines-windows-portal-sql-alwayson-availability-groups-manual.md
                     [virtual-machines-windows-portal-sql-alwayson-int-listener]:/azure/azure-sql/virtual-machines/windows/availability-group-load-balancer-portal-configure
                     [virtual-machines-upload-image-windows-resource-manager]:../../virtual-machines-windows-upload-image.md
                     [virtual-machines-windows-tutorial]:../../virtual-machines-windows-hero-tutorial.md
                    @@ -618,7 +617,7 @@ _**Figure 7:** Example of a high-availability SAP DBMS, with SQL Server Always O
                     
                     For more information about clustering SQL Server in Azure by using the Azure Resource Manager deployment model, see these articles:
                     
                    -* [Configure Always On availability group in Azure Virtual Machines manually by using Resource Manager][virtual-machines-windows-portal-sql-alwayson-availability-groups-manual]
                    +* [Configure Always On availability group in Azure Virtual Machines manually by using Resource Manager](/azure/azure-sql/virtual-machines/windows/availability-group-overview)
                     * [Configure an Azure internal load balancer for an Always On availability group in Azure][virtual-machines-windows-portal-sql-alwayson-int-listener]
                     
                     ##  End-to-end high-availability deployment scenarios
                    diff --git a/articles/virtual-machines/workloads/sap/planning-guide.md b/articles/virtual-machines/workloads/sap/planning-guide.md
                    index 51bd1c01aa8c1..767be4ae34bd8 100644
                    --- a/articles/virtual-machines/workloads/sap/planning-guide.md
                    +++ b/articles/virtual-machines/workloads/sap/planning-guide.md
                    @@ -634,7 +634,7 @@ For more documentation, see [this article][vpn-gateway-create-site-to-site-rm-po
                     #### VNet to VNet Connection
                     
                     Using Multi-Site VPN, you need to configure a separate Azure Virtual Network in each of the regions. However often you have the requirement that the software components in the different regions should communicate with each other. Ideally this communication should not be routed from one Azure Region to on-premises and from there to the other Azure Region. To shortcut, Azure offers the possibility to configure a connection from one Azure Virtual Network in one region to another Azure Virtual Network hosted in another region. This functionality is called VNet-to-VNet connection. More details on this functionality can be found here:
                    -[Configure a VNet-to-VNet VPN gateway connection by using the Azure portal](/azure/vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal).
                    +[Configure a VNet-to-VNet VPN gateway connection by using the Azure portal](../../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md).
                     
                     #### Private Connection to Azure ExpressRoute
                     
                    @@ -644,18 +644,18 @@ Find more details on Azure ExpressRoute and offerings here:
                     
                     * [ExpressRoute documentation](https://azure.microsoft.com/documentation/services/expressroute/)
                     * [Azure ExpressRoute pricing](https://azure.microsoft.com/pricing/details/expressroute/)
                    -* [ExpressRoute FAQ](/azure/expressroute/expressroute-faqs)
                    +* [ExpressRoute FAQ](../../../expressroute/expressroute-faqs.md)
                     
                     Express Route enables multiple Azure subscriptions through one ExpressRoute circuit as documented here
                     
                    -* [Tutorial: Connect a virtual network to an ExpressRoute circuit](/azure/expressroute/expressroute-howto-linkvnet-arm)
                    -* [Quickstart: Create and modify an ExpressRoute circuit using Azure PowerShell](/azure/expressroute/expressroute-howto-circuit-arm)
                    +* [Tutorial: Connect a virtual network to an ExpressRoute circuit](../../../expressroute/expressroute-howto-linkvnet-arm.md)
                    +* [Quickstart: Create and modify an ExpressRoute circuit using Azure PowerShell](../../../expressroute/expressroute-howto-circuit-arm.md)
                     
                     #### Forced tunneling in case of cross-premises
                     For VMs joining on-premises domains through site-to-site, point-to-site, or ExpressRoute, you need to make sure that the Internet proxy settings are getting deployed for all the users in those VMs as well. By default, software running in those VMs or users using a browser to access the internet would not go through the company proxy, but would connect straight through Azure to the internet. But even the proxy setting is not a 100% solution to direct the traffic through the company proxy since it is responsibility of software and services to check for the proxy. If software running in the VM is not doing that or an administrator manipulates the settings, traffic to the Internet can be detoured again directly through Azure to the Internet.
                     
                     In order to avoid such a direct internet connectivity, you can configure Forced Tunneling with site-to-site connectivity between on-premises and Azure. The detailed description of the Forced Tunneling feature is published here: 
                    -[Configure forced tunneling using the classic deployment model](/azure/vpn-gateway/vpn-gateway-about-forced-tunneling)
                    +[Configure forced tunneling using the classic deployment model](../../../vpn-gateway/vpn-gateway-about-forced-tunneling.md)
                     
                     Forced Tunneling with ExpressRoute is enabled by customers advertising a default route via the ExpressRoute BGP peering sessions.
                     
                    @@ -1908,7 +1908,7 @@ High Availability and Disaster recovery functionality for DBMS in general as wel
                     
                     Here are two examples of a complete SAP NetWeaver HA architecture in Azure - one for Windows and one for Linux.
                     
                    -Unmanaged disks only: The concepts as explained below may need to be compromised a bit when you deploy many SAP systems and the number of VMs deployed are exceeding the maximum limit of Storage Accounts per subscription. In such cases, VHDs of VMs need to be combined within one Storage Account. Usually you would do so by combining VHDs of SAP application layer VMs of different SAP systems.  We also combined different VHDs of different DBMS VMs of different SAP systems in one Azure Storage Account. Thereby keeping the IOPS limits of Azure Storage Accounts in mind [Scalability and performance targets for standard storage accounts](/azure/storage/common/scalability-targets-standard-account)
                    +Unmanaged disks only: The concepts as explained below may need to be compromised a bit when you deploy many SAP systems and the number of VMs deployed are exceeding the maximum limit of Storage Accounts per subscription. In such cases, VHDs of VMs need to be combined within one Storage Account. Usually you would do so by combining VHDs of SAP application layer VMs of different SAP systems.  We also combined different VHDs of different DBMS VMs of different SAP systems in one Azure Storage Account. Thereby keeping the IOPS limits of Azure Storage Accounts in mind [Scalability and performance targets for standard storage accounts](../../../storage/common/scalability-targets-standard-account.md)
                     
                     
                     ##### ![Windows logo.][Logo_Windows] HA on Windows
                    @@ -2037,4 +2037,4 @@ Read the articles:
                     
                     - [Azure Virtual Machines deployment for SAP NetWeaver](./deployment-guide.md)
                     - [Considerations for Azure Virtual Machines DBMS deployment for SAP workload](./dbms_guide_general.md)
                    -- [SAP HANA infrastructure configurations and operations on Azure](./hana-vm-operations.md)
                    +- [SAP HANA infrastructure configurations and operations on Azure](./hana-vm-operations.md)
                    \ No newline at end of file
                    diff --git a/articles/virtual-machines/workloads/sap/sap-hana-scale-out-standby-netapp-files-suse.md b/articles/virtual-machines/workloads/sap/sap-hana-scale-out-standby-netapp-files-suse.md
                    index de32c425312e0..e13942f9758cd 100644
                    --- a/articles/virtual-machines/workloads/sap/sap-hana-scale-out-standby-netapp-files-suse.md
                    +++ b/articles/virtual-machines/workloads/sap/sap-hana-scale-out-standby-netapp-files-suse.md
                    @@ -351,7 +351,7 @@ Configure and prepare your OS by doing the following steps:
                     > [!TIP]
                     > Avoid setting net.ipv4.ip_local_port_range and net.ipv4.ip_local_reserved_ports explicitly in the sysctl configuration files to allow SAP Host Agent to manage the port ranges. For more details see SAP note [2382421](https://launchpad.support.sap.com/#/notes/2382421).  
                     
                    -4. **[A]** Adjust the sunrpc settings, as recommended in SAP note [3024346 - Linux Kernel Settings for NetApp NFS](https://launchpad.support.sap.com/#/notes/3024346).  
                    +4. **[A]** Adjust the sunrpc settings for NFSv3 volumes, as recommended in SAP note [3024346 - Linux Kernel Settings for NetApp NFS](https://launchpad.support.sap.com/#/notes/3024346).  
                     
                         
                    
                         vi /etc/modprobe.d/sunrpc.conf
                    diff --git a/articles/virtual-machines/workloads/sap/sap-high-availability-architecture-scenarios.md b/articles/virtual-machines/workloads/sap/sap-high-availability-architecture-scenarios.md
                    index 66871e59175e0..f8e7665260aff 100644
                    --- a/articles/virtual-machines/workloads/sap/sap-high-availability-architecture-scenarios.md
                    +++ b/articles/virtual-machines/workloads/sap/sap-high-availability-architecture-scenarios.md
                    @@ -81,7 +81,6 @@ ms.custom: H1Hack27Feb2017
                     [planning-guide-9.1]:planning-guide.md#6f0a47f3-a289-4090-a053-2521618a28c3
                     [planning-guide-azure-premium-storage]:planning-guide.md#ff5ad0f9-f7f4-4022-9102-af07aef3bc92
                     
                    -[virtual-machines-windows-portal-sql-alwayson-availability-groups-manual]:../../windows/sql/virtual-machines-windows-portal-sql-alwayson-availability-groups-manual.md
                     [virtual-machines-windows-portal-sql-alwayson-int-listener]:/azure/azure-sql/virtual-machines/windows/availability-group-load-balancer-portal-configure
                     
                     [sap-ha-bc-virtual-env-hyperv-vmware-white-paper]:https://scn.sap.com/docs/DOC-44415
                    @@ -424,7 +423,7 @@ _**Figure 3:** Example of a high-availability SAP DBMS, with SQL Server AlwaysOn
                     
                     For more information about clustering SQL Server DBMS in Azure by using the Azure Resource Manager deployment model, see these articles:
                     
                    -* [Configure an AlwaysOn availability group in Azure virtual machines manually by using Resource Manager][virtual-machines-windows-portal-sql-alwayson-availability-groups-manual]
                    +* [Configure an AlwaysOn availability group in Azure virtual machines manually by using Resource Manager](/azure/azure-sql/virtual-machines/windows/availability-group-overview)
                     
                     * [Configure an Azure internal load balancer for an AlwaysOn availability group in Azure][virtual-machines-windows-portal-sql-alwayson-int-listener]
                     
                    diff --git a/articles/virtual-machines/workloads/sap/sap-rise-integration.md b/articles/virtual-machines/workloads/sap/sap-rise-integration.md
                    index ac6c725f7944f..c04317c3739ff 100644
                    --- a/articles/virtual-machines/workloads/sap/sap-rise-integration.md
                    +++ b/articles/virtual-machines/workloads/sap/sap-rise-integration.md
                    @@ -139,7 +139,7 @@ With the information about available interfaces to the SAP RISE/ECS landscape, s
                     
                     Integrating your SAP system with Azure cloud native services such as Azure Data Factory or Azure Synapse would use these communication channels to the SAP RISE/ECS managed environment. 
                     
                    -The following high-level architecture shows possible integration scenario with Azure data services such as [Data Factory](/azure/data-factory) or [Synapse Analytics](/azure/synapse-analytics). For these Azure services either a self-hosted integration runtime (self-hosted IR or IR) or Azure integration runtime (Azure IR) can be used. The use of either integration runtime depends on the [chosen data connector](/azure/data-factory/copy-activity-overview#supported-data-stores-and-formats), most SAP connectors are only available for the self-hosted IR. [SAP ECC connector](/azure/data-factory/connector-sap-ecc?tabs=data-factory) is capable of being using through both Azure IR and self-hosted IR. The choice of IR governs the network path taken. SAP .NET connector is used for [SAP table connector](/azure/data-factory/connector-sap-ecc?tabs=data-factory), [SAP BW](/azure/data-factory/connector-sap-business-warehouse?tabs=data-factory) and [SAP OpenHub](/azure/data-factory/connector-sap-business-warehouse-open-hub) connectors alike. All these connectors use SAP function modules (FM) on the SAP system, executed through RFC connections. Last if direct database access has been agreed with SAP, along with users and connection path opened, ODBC/JDBC connector for [SAP HANA](/azure/data-factory/connector-sap-hana?tabs=data-factory) can be used from the self-hosted IR as well.
                    +The following high-level architecture shows possible integration scenario with Azure data services such as [Data Factory](../../../data-factory/index.yml) or [Synapse Analytics](../../../synapse-analytics/index.yml). For these Azure services either a self-hosted integration runtime (self-hosted IR or IR) or Azure integration runtime (Azure IR) can be used. The use of either integration runtime depends on the [chosen data connector](../../../data-factory/copy-activity-overview.md#supported-data-stores-and-formats), most SAP connectors are only available for the self-hosted IR. [SAP ECC connector](../../../data-factory/connector-sap-ecc.md?tabs=data-factory) is capable of being using through both Azure IR and self-hosted IR. The choice of IR governs the network path taken. SAP .NET connector is used for [SAP table connector](../../../data-factory/connector-sap-ecc.md?tabs=data-factory), [SAP BW](../../../data-factory/connector-sap-business-warehouse.md?tabs=data-factory) and [SAP OpenHub](../../../data-factory/connector-sap-business-warehouse-open-hub.md) connectors alike. All these connectors use SAP function modules (FM) on the SAP system, executed through RFC connections. Last if direct database access has been agreed with SAP, along with users and connection path opened, ODBC/JDBC connector for [SAP HANA](../../../data-factory/connector-sap-hana.md?tabs=data-factory) can be used from the self-hosted IR as well.
                     
                     [![SAP RISE/ECS accessed by Azure ADF or Synapse.](./media/sap-rise-integration/sap-rise-adf-synapse.png)](./media/sap-rise-integration/sap-rise-adf-synapse.png#lightbox)
                     
                    @@ -155,13 +155,13 @@ The customer is responsible for deployment and operation of the self-hosted inte
                     To learn the overall support on SAP data integration scenario, see [SAP data integration using Azure Data Factory whitepaper](https://github.com/Azure/Azure-DataFactory/blob/master/whitepaper/SAP%20Data%20Integration%20using%20Azure%20Data%20Factory.pdf) with detailed introduction on each SAP connector, comparison and guidance.
                     
                     ## On-premise data gateway
                    -Further Azure Services such as [Logic Apps](/azure/logic-apps/logic-apps-using-sap-connector), [Power Apps](/connectors/saperp/) or [Power BI](/power-bi/connect-data/desktop-sap-bw-connector) communicate and exchange data with SAP systems through an on-premise data gateway. The on-premise data gateway is a virtual machine, running in Azure or on-premise. It provides secure data transfer between these Azure Services and your SAP systems. 
                    +Further Azure Services such as [Logic Apps](../../../logic-apps/logic-apps-using-sap-connector.md), [Power Apps](/connectors/saperp/) or [Power BI](/power-bi/connect-data/desktop-sap-bw-connector) communicate and exchange data with SAP systems through an on-premise data gateway. The on-premise data gateway is a virtual machine, running in Azure or on-premise. It provides secure data transfer between these Azure Services and your SAP systems. 
                     
                     With SAP RISE, the on-premise data gateway can connect to Azure Services running in customer’s Azure subscription. This VM running the data gateway is deployed and operated by the customer. With below high-level architecture as overview, similar method can be used for either service. 
                     
                     [![SAP RISE/ECS accessed from Azure on-premise data gateway and connected Azure services.](./media/sap-rise-integration/sap-rise-on-premises-data-gateway.png)](./media/sap-rise-integration/sap-rise-on-premises-data-gateway.png#lightbox)
                     
                    -The SAP RISE environment here provides access to the SAP ports for RFC and https described earlier. The communication ports are accessed by the private network address through the vnet peering or VPN site-to-site connection. The on-premise data gateway VM running in customer’s Azure subscription uses the [SAP .NET connector](https://support.sap.com/en/product/connectors/msnet.html) to run RFC, BAPI or IDoc calls through the RFC connection. Additionally, depending on service and way the communication is setup, a way to connect to public IP of the SAP systems REST API through https might be required. The https connection to a public IP can be exposed through SAP RISE/ECS managed application gateway. This high level architecture shows the possible integration scenario. Alternatives to it such as using Logic Apps single tenant and [private endpoints](/azure/logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint) to secure the communication and other can be seen as extension and are not described here in. 
                    +The SAP RISE environment here provides access to the SAP ports for RFC and https described earlier. The communication ports are accessed by the private network address through the vnet peering or VPN site-to-site connection. The on-premise data gateway VM running in customer’s Azure subscription uses the [SAP .NET connector](https://support.sap.com/en/product/connectors/msnet.html) to run RFC, BAPI or IDoc calls through the RFC connection. Additionally, depending on service and way the communication is setup, a way to connect to public IP of the SAP systems REST API through https might be required. The https connection to a public IP can be exposed through SAP RISE/ECS managed application gateway. This high level architecture shows the possible integration scenario. Alternatives to it such as using Logic Apps single tenant and [private endpoints](../../../logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint.md) to secure the communication and other can be seen as extension and are not described here in. 
                     
                     SAP RISE/ECS exposes the communication ports for these applications to use but has no knowledge about any details of the connected application or service running in a customer’s subscription. 
                     
                    @@ -170,7 +170,7 @@ SAP RISE/ECS exposes the communication ports for these applications to use but h
                     
                     ## Azure Monitoring for SAP with SAP RISE
                     
                    -[Azure Monitoring for SAP](/azure/virtual-machines/workloads/sap/monitor-sap-on-azure) is an Azure-native solution for monitoring your SAP system. It extends the Azure monitor platform monitoring capability with support to gather data about SAP NetWeaver, database, and operating system details. 
                    +[Azure Monitoring for SAP](./monitor-sap-on-azure.md) is an Azure-native solution for monitoring your SAP system. It extends the Azure monitor platform monitoring capability with support to gather data about SAP NetWeaver, database, and operating system details. 
                     
                     > [!Note]
                     > SAP RISE/ECS is a fully managed service for your SAP landscape and thus Azure Monitoring for SAP is not intended to be utilized for such managed environment.
                    diff --git a/articles/virtual-network-manager/TOC.yml b/articles/virtual-network-manager/TOC.yml
                    index 94b1fc1ca4fbc..c3b2ef886c172 100644
                    --- a/articles/virtual-network-manager/TOC.yml
                    +++ b/articles/virtual-network-manager/TOC.yml
                    @@ -68,7 +68,7 @@
                       - name: REST
                         href: /rest/api/networkmanager/
                       - name: .NET SDK
                    -    href: /dotnet/api/microsoft.azure.management.network?view=azure-dotnet
                    +    href: /dotnet/api/microsoft.azure.management.network
                     - name: Resources
                       items:
                       - name: Azure roadmap
                    diff --git a/articles/virtual-network-manager/create-virtual-network-manager-powershell.md b/articles/virtual-network-manager/create-virtual-network-manager-powershell.md
                    index e929dedcdf67a..cc4eb76185a1a 100644
                    --- a/articles/virtual-network-manager/create-virtual-network-manager-powershell.md
                    +++ b/articles/virtual-network-manager/create-virtual-network-manager-powershell.md
                    @@ -1,8 +1,8 @@
                     ---
                     title: 'Quickstart: Create a mesh network with Azure Virtual Network Manager using Azure PowerShell'
                     description: Use this quickstart to learn how to create a mesh network with Virtual Network Manager using Azure PowerShell.
                    -author: duongau
                    -ms.author: duau
                    +author: mbender-ms
                    +ms.author: mbender
                     ms.service: virtual-network-manager
                     ms.topic: quickstart
                     ms.date: 11/02/2021
                    @@ -23,9 +23,12 @@ In this quickstart, you'll deploy three virtual networks and use Azure Virtual N
                     ## Prerequisites
                     
                     * An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F).
                    -* Make sure you have the latest PowerShell modules, or you can use Azure Cloud Shell in the portal.
                    +* During preview, the `4.15.1-preview` version of `Az.Network` is required to access the required cmdlets.
                     * If you're running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure.
                     
                    +> [!IMPORTANT]
                    +> Perform this quickstart using Powershell localy, not through Azure Cloud Shell. The version of `Az.Network` in Azure Cloud Shell does not currently support the Azure Virtual Network Manager cmdlets.
                    +
                     ## Register subscription for public preview
                     
                     Use the following command to register your Azure subscription for Public Preview of Azure Virtual Network Manager:
                    @@ -39,7 +42,7 @@ Register-AzProviderFeature -FeatureName AllowAzureNetworkManager -ProviderNamesp
                     Install the latest *Az.Network* Azure PowerShell module using this command:
                     
                     ```azurepowershell-interactive
                    -Install-Module -Name Az.Network -AllowPrerelease
                    + Install-Module -Name Az.Network -RequiredVersion 4.15.1-preview -AllowPrerelease
                     ```
                     
                     ## Create a resource group
                    @@ -59,7 +62,7 @@ New-AzResourceGroup @rg
                     1. Define the scope and access type this Azure Virtual Network Manager instance will have. You can choose to create the scope with subscriptions group or management group or a combination of both. Create the scope by using New-AzNetworkManagerScope.
                     
                         ```azurepowershell-interactive
                    -    Import-Module -Name Az.Network -RequiredVersion "4.12.1"
                    +    Import-Module -Name Az.Network -RequiredVersion "4.15.1"
                         
                         [System.Collections.Generic.List[string]]$subGroup = @()  
                         $subGroup.Add("/subscriptions/abcdef12-3456-7890-abcd-ef1234567890")
                    diff --git a/articles/virtual-network-manager/faq.md b/articles/virtual-network-manager/faq.md
                    index 4be3d41fe6c02..c067fe570c8c1 100644
                    --- a/articles/virtual-network-manager/faq.md
                    +++ b/articles/virtual-network-manager/faq.md
                    @@ -2,11 +2,11 @@
                     title: Frequently asked questions about Azure Virtual Network Manager
                     description: Find answers to frequently asked questions about Azure Virtual Network Manager.
                     services: virtual-network-manager
                    -author: duongau
                    +author: mbender-ms
                     ms.service: virtual-network-manager
                     ms.topic: article
                     ms.date: 4/18/2022
                    -ms.author: duau
                    +ms.author: mbender
                     ms.custom: references_regions, ignite-fall-2021
                     ---
                     
                    @@ -48,7 +48,7 @@ ms.custom: references_regions, ignite-fall-2021
                     
                     * Central India
                     
                    -* All regions that have [Availability Zones](../availability-zones/az-overview.md#azure-regions-with-availability-zones), except France Central.
                    +* All regions have [Availability Zones](../availability-zones/az-overview.md#azure-regions-with-availability-zones), except France Central.
                     
                     > [!NOTE]
                     > Even if an Azure Virtual Network Manager instance isn't available because all zones are down, configurations applied to resources will still persist.
                    @@ -127,6 +127,17 @@ Azure SQL Managed Instance has some network requirements. If your security admin
                     | 443, 12000 | TCP	| **VirtualNetwork** | AzureCloud | Allow |
                     | Any | Any | **VirtualNetwork** | **VirtualNetwork** | Allow |
                     
                    +
                    +## Can an Azure Virtual WAN hub be part of a network group? 
                    +
                    +No, an Azure Virtual WAN hub can't be in a network group at this time.
                    +
                    +
                    +## Can an Azure Virtual WAN be used as the hub in AVNM's hub and spoke topology configuration? 
                    +
                    +No, an Azure Virtual WAN hub isn't supported as the hub in a hub and spoke topology at this time.
                    +
                    +
                     ## Limits
                     
                     ### What are the service limitation of Azure Virtual Network Manager?
                    diff --git a/articles/virtual-network-manager/how-to-create-mesh-network-powershell.md b/articles/virtual-network-manager/how-to-create-mesh-network-powershell.md
                    index 94f9e3ccd6895..eace54a467565 100644
                    --- a/articles/virtual-network-manager/how-to-create-mesh-network-powershell.md
                    +++ b/articles/virtual-network-manager/how-to-create-mesh-network-powershell.md
                    @@ -1,8 +1,8 @@
                     ---
                     title: 'Create a mesh network topology with Azure Virtual Network Manager (Preview) - Azure PowerShell'
                     description: Learn how to create a mesh network topology with Azure Virtual Network Manager using Azure PowerShell.
                    -author: duongau
                    -ms.author: duau
                    +author: mbender-ms    
                    +ms.author: mbender
                     ms.service: virtual-network-manager
                     ms.topic: how-to
                     ms.date: 11/02/2021
                    @@ -54,7 +54,7 @@ This section will help you create a network group containing the virtual network
                         }' 
                         ```
                     
                    -1. Create the network group using either the static membership group (GroupMember) or the dynamic membership group (ConditionalMembership) define previously using New-AzNetworkManagerGroup.
                    +1. Create the network group using either the static membership group (GroupMember) or the dynamic membership group (ConditionalMembership) defined previously using New-AzNetworkManagerGroup.
                     
                         ```azurepowershell-interactive
                         $ng = @{
                    diff --git a/articles/virtual-network-manager/overview.md b/articles/virtual-network-manager/overview.md
                    index 81a6937c167d0..b239a9928a509 100644
                    --- a/articles/virtual-network-manager/overview.md
                    +++ b/articles/virtual-network-manager/overview.md
                    @@ -2,11 +2,11 @@
                     title: 'What is Azure Virtual Network Manager (Preview)?'
                     description: Learn how Azure Virtual Network Manager can simplify management and scalability of your virtual networks.
                     services: virtual-network-manager
                    -author: duongau
                    +author: mbender-ms
                     ms.service: virtual-network-manager
                     ms.topic: overview
                     ms.date: 11/02/2021
                    -ms.author: duau
                    +ms.author: mbender
                     ms.custom: references_regions, ignite-fall-2021
                     #Customer intent: As an IT administrator, I want to learn about Azure Virtual Network Manager and what I can use it for.
                     ---
                    @@ -48,6 +48,8 @@ A connectivity configuration enables you to create a mesh or a hub-and-spoke net
                     
                     * North Central US
                     
                    +* South Central US
                    +
                     * West US
                     
                     * West US 2
                    @@ -56,11 +58,25 @@ A connectivity configuration enables you to create a mesh or a hub-and-spoke net
                     
                     * East US 2
                     
                    +* Canada Central
                    +
                     * North Europe
                     
                     * West Europe
                     
                    -* France Central
                    +* UK South
                    +
                    +* Switzerland North
                    +
                    +* Southeast Asia
                    +
                    +* Japan East
                    +
                    +* Japan West
                    +
                    +* Australia East
                    +
                    +* Central India
                     
                     ## Next steps
                     
                    diff --git a/articles/virtual-network/ip-services/public-ip-addresses.md b/articles/virtual-network/ip-services/public-ip-addresses.md
                    index 26dff517522eb..eb337c6d474e0 100644
                    --- a/articles/virtual-network/ip-services/public-ip-addresses.md
                    +++ b/articles/virtual-network/ip-services/public-ip-addresses.md
                    @@ -108,6 +108,15 @@ The fully qualified domain name (FQDN) **contoso.westus.cloudapp.azure.com** res
                     
                     If a custom domain is desired for services that use a Public IP, you can use [Azure DNS](../../dns/dns-custom-domain.md?toc=%2fazure%2fvirtual-network%2ftoc.json#public-ip-address) or an external DNS provider for your DNS Record.
                     
                    +## Availability Zone
                    +
                    +Public IP addresses with a Standard SKU can be created as non-zonal, zonal, or zone-redundant in [regions that support availability zones](../../availability-zones/az-region.md). A zone-redundant IP is created in all zones for a region and can survive any single zone failure.  A zonal IP is tied to a specific availability zone, and shares fate with the health of the zone. A "non-zonal" public IP addresses is placed into a zone for you by Azure and does not give a guarantee of redundancy.
                    +
                    +In regions without availability zones, all public IP addresses are created as non-zonal. Public IP addresses created in a region that is later upgraded to have availability zones remain non-zonal.
                    +
                    +> [!NOTE]
                    +> All Basic SKU public IP addresses are created as non-zonal.  Any IP that is upgraded from a Basic SKU to Standard SKU remains non-zonal.
                    +
                     ## Other public IP address features
                     
                     There are other attributes that can be used for a public IP address.  
                    diff --git a/articles/virtual-network/ip-services/routing-preference-azure-kubernetes-service-cli.md b/articles/virtual-network/ip-services/routing-preference-azure-kubernetes-service-cli.md
                    index 31b8eb765627c..f271414511bcf 100644
                    --- a/articles/virtual-network/ip-services/routing-preference-azure-kubernetes-service-cli.md
                    +++ b/articles/virtual-network/ip-services/routing-preference-azure-kubernetes-service-cli.md
                    @@ -1,7 +1,7 @@
                     ---
                    -title: 'Tutorial: Configure routing preference for an Azure Kubernetes service - Azure CLI'
                    +title: 'Tutorial: Configure routing preference for an Azure Kubernetes Service - Azure CLI'
                     titlesuffix: Azure virtual network
                    -description: Use this tutorial to learn how to configure routing preference for an Azure Kubernetes service.
                    +description: Use this tutorial to learn how to configure routing preference for an Azure Kubernetes Service.
                     author: asudbring
                     ms.author: allensu
                     ms.service: virtual-network
                    @@ -12,7 +12,7 @@ ms.custom: template-tutorial #Required; leave this attribute/value as-is., devx-
                     ms.devlang: azurecli
                     ---
                     
                    -# Tutorial: Configure routing preference for an Azure Kubernetes service using the Azure CLI
                    +# Tutorial: Configure routing preference for an Azure Kubernetes Service using the Azure CLI
                     
                     This article shows you how to configure routing preference via ISP network (**Internet** option) for a Kubernetes cluster using Azure CLI. Routing preference is set by creating a public IP address of routing preference type **Internet** and then using it while creating the AKS cluster.
                     
                    diff --git a/articles/virtual-network/ip-services/virtual-network-public-ip-address.md b/articles/virtual-network/ip-services/virtual-network-public-ip-address.md
                    index 1db81d0e0f167..056c5922b5660 100644
                    --- a/articles/virtual-network/ip-services/virtual-network-public-ip-address.md
                    +++ b/articles/virtual-network/ip-services/virtual-network-public-ip-address.md
                    @@ -89,13 +89,13 @@ Learn how to assign a public IP address to the following resources:
                     
                     - A [Windows](../../virtual-machines/windows/quick-create-portal.md?toc=%2fazure%2fvirtual-network%2ftoc.json) or [Linux](../../virtual-machines/linux/quick-create-portal.md?toc=%2fazure%2fvirtual-network%2ftoc.json) Virtual Machine on creation. Add IP to an [existing virtual machine](./virtual-network-network-interface-addresses.md#add-ip-addresses).
                     - [Virtual Machine Scale Set](../../virtual-machine-scale-sets/quick-create-portal.md?toc=%2fazure%2fvirtual-network%2ftoc.json)
                    -- [Public load balancer](/configure-public-ip-load-balancer.md)
                    +- [Public load balancer](/azure/virtual-network/ip-services/configure-public-ip-load-balancer)
                     - [Cross-region load balancer](../../load-balancer/tutorial-cross-region-portal.md?toc=%2fazure%2fvirtual-network%2ftoc.json)
                    -- [Application Gateway](/configure-public-ip-application-gateway.md)
                    +- [Application Gateway](/azure/virtual-network/ip-services/configure-public-ip-application-gateway)
                     - [Site-to-site connection using a VPN gateway](configure-public-ip-vpn-gateway.md)
                    -- [NAT gateway](/configure-public-ip-nat-gateway.md)
                    -- [Azure Bastion](/configure-public-ip-bastion.md)
                    -- [Azure Firewall](/configure-public-ip-firewall.md)
                    +- [NAT gateway](/azure/virtual-network/ip-services/configure-public-ip-nat-gateway)
                    +- [Azure Bastion](/azure/virtual-network/ip-services/configure-public-ip-bastion)
                    +- [Azure Firewall](/azure/virtual-network/ip-services/configure-public-ip-firewall)
                     
                     ## Region availability
                     
                    diff --git a/articles/virtual-network/kubernetes-network-policies.md b/articles/virtual-network/kubernetes-network-policies.md
                    index de93256c34de0..87a0febcbee77 100644
                    --- a/articles/virtual-network/kubernetes-network-policies.md
                    +++ b/articles/virtual-network/kubernetes-network-policies.md
                    @@ -123,16 +123,16 @@ The different quantile levels in "exec_time" metrics help you differentiate betw
                     
                     There's also an "exec_time_count" and "exec_time_sum" metric for each "exec_time" Summary metric.
                     
                    -The metrics can be scraped through Azure Monitor for Containers or through Prometheus.
                    +The metrics can be scraped through Container insights or through Prometheus.
                     
                     ### Setup for Azure Monitor
                    -The first step is to enable Azure Monitor for containers for your Kubernetes cluster. Steps can be found in [Azure Monitor for containers Overview](../azure-monitor/containers/container-insights-overview.md). Once you have Azure Monitor for containers enabled, configure the [Azure Monitor for containers ConfigMap](https://aka.ms/container-azm-ms-agentconfig) to enable NPM integration and collection of Prometheus NPM metrics. Azure monitor for containers ConfigMap has an ```integrations``` section with settings to collect NPM metrics. These settings are disabled by default in the ConfigMap. Enabling the basic setting ```collect_basic_metrics = true```, will collect basic NPM metrics. Enabling advanced setting ```collect_advanced_metrics = true``` will collect advanced metrics in addition to basic metrics. 
                    +The first step is to enable Container insights for your Kubernetes cluster. Steps can be found in [Container insights Overview](../azure-monitor/containers/container-insights-overview.md). Once you have Container insights enabled, configure the [Container insights ConfigMap](https://aka.ms/container-azm-ms-agentconfig) to enable NPM integration and collection of Prometheus NPM metrics. Container insights ConfigMap has an ```integrations``` section with settings to collect NPM metrics. These settings are disabled by default in the ConfigMap. Enabling the basic setting ```collect_basic_metrics = true```, will collect basic NPM metrics. Enabling advanced setting ```collect_advanced_metrics = true``` will collect advanced metrics in addition to basic metrics. 
                     
                     After editing the ConfigMap, save it locally and apply the ConfigMap to your cluster as follows.
                     
                     `kubectl apply -f container-azm-ms-agentconfig.yaml`
                     
                    -Below is a snippet from the [Azure monitor for containers ConfigMap](https://aka.ms/container-azm-ms-agentconfig), which shows the NPM integration enabled with advanced metrics collection.
                    +Below is a snippet from the [Container insights ConfigMap](https://aka.ms/container-azm-ms-agentconfig), which shows the NPM integration enabled with advanced metrics collection.
                     ```
                     integrations: |-
                         [integrations.azure_network_policy_manager]
                    @@ -141,7 +141,7 @@ integrations: |-
                     ```
                     Advanced metrics are optional, and turning them on will automatically turn on basic metrics collection. Advanced metrics currently include only `npm_ipset_counts`
                     
                    -Learn more about [Azure monitor for containers collection settings in config map](../azure-monitor/containers/container-insights-agent-config.md)
                    +Learn more about [Container insights collection settings in config map](../azure-monitor/containers/container-insights-agent-config.md)
                     
                     ### Visualization Options for Azure Monitor
                     Once NPM metrics collection is enabled, you can view the metrics in the Azure portal using Container Insights or in Grafana.
                    @@ -161,7 +161,7 @@ Set up your Grafana Server and configure a Log Analytics Data Source as describe
                     The dashboard has visuals similar to the Azure Workbook. You can add panels to chart & visualize NPM metrics from InsightsMetrics table.
                     
                     ### Setup for Prometheus Server
                    -Some users may choose to collect metrics with a Prometheus Server instead of Azure Monitor for containers. You merely need to add two jobs to your scrape config to collect NPM metrics.
                    +Some users may choose to collect metrics with a Prometheus Server instead of Container insights. You merely need to add two jobs to your scrape config to collect NPM metrics.
                     
                     To install a simple Prometheus Server, add this helm repo on your cluster
                     ```
                    diff --git a/articles/virtual-network/monitor-virtual-network-reference.md b/articles/virtual-network/monitor-virtual-network-reference.md
                    index 8d7012236dda9..6bdbf9c41e60d 100644
                    --- a/articles/virtual-network/monitor-virtual-network-reference.md
                    +++ b/articles/virtual-network/monitor-virtual-network-reference.md
                    @@ -64,7 +64,7 @@ This section refers to all of the Azure Monitor Logs Kusto tables relevant to Az
                     
                     **Virtual network**
                     
                    -Azure virtual network does not have diagnostic logs.
                    +Azure virtual network doesn't have diagnostic logs.
                     
                     ## Activity log
                     
                    @@ -80,5 +80,5 @@ For more information on the schema of Activity Log entries, see [Activity Log sc
                     
                     ## See also
                     
                    -- See [Monitoring Azure Azure virtual network](monitor-virtual-network.md) for a description of monitoring Azure Azure virtual network.
                    +- See [Monitoring Azure virtual network](monitor-virtual-network.md) for a description of monitoring Azure virtual network.
                     - See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources.
                    diff --git a/articles/virtual-network/monitor-virtual-network.md b/articles/virtual-network/monitor-virtual-network.md
                    index 3a76726a2ae13..40834017ab52d 100644
                    --- a/articles/virtual-network/monitor-virtual-network.md
                    +++ b/articles/virtual-network/monitor-virtual-network.md
                    @@ -2,8 +2,8 @@
                     title: Monitoring Azure virtual networks
                     description: Start here to learn how to monitor Azure virtual networks
                     services: virtual-network
                    -author: duongau
                    -ms.author: duau
                    +author: mbender-ms
                    +ms.author: mbender
                     ms.service: virtual-network
                     ms.topic: how-to
                     ms.custom: subject-monitoring
                    @@ -14,7 +14,7 @@ ms.date: 06/29/2021
                     
                     When you have critical applications and business processes relying on Azure resources, you want to monitor those resources for their availability, performance, and operation. 
                     
                    -This article describes the monitoring data generated by Azure virtual network. Azure virtual network uses [Azure Monitor](../azure-monitor/overview.md). If you are unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md).
                    +This article describes the monitoring data generated by Azure virtual network. Azure virtual network uses [Azure Monitor](../azure-monitor/overview.md). If you're unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md).
                     
                     ## Monitoring data 
                     
                    @@ -26,7 +26,7 @@ See [Monitoring Azure virtual network data reference](monitor-virtual-network-re
                     
                     Platform metrics and the Activity log are collected and stored automatically, but can be routed to other locations by using a diagnostic setting.  
                     
                    -Resource Logs are not collected and stored until you create a diagnostic setting and route them to one or more locations.
                    +Resource Logs aren't collected and stored until you create a diagnostic setting and route them to one or more locations.
                     
                     See [Create diagnostic setting to collect platform logs and metrics in Azure](../azure-monitor/essentials/diagnostic-settings.md) for the detailed process for creating a diagnostic setting using the Azure portal, CLI, or PowerShell. When you create a diagnostic setting, you specify which categories of logs to collect. The categories for *Azure virtual network* are listed in [Azure virtual network monitoring data reference](monitor-virtual-network-reference.md#resource-logs).
                     
                    @@ -47,11 +47,11 @@ For reference, you can see a list of [all resource metrics supported in Azure Mo
                     
                     ## Analyzing logs
                     
                    -Azure virtual network does not support resource logs.
                    +Azure virtual network doesn't support resource logs.
                     
                     For a list of the types of resource logs collected for resources in a virtual network, see [Monitoring virtual network data reference](monitor-virtual-network-reference.md#resource-logs)   
                     
                    -The [Activity log](../azure-monitor/essentials/activity-log.md) is a type of platform log in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics.  
                    +The [Activity log](../azure-monitor/essentials/activity-log.md) is a type of platform sign-in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics.  
                     
                     ## Alerts
                     
                    @@ -61,8 +61,8 @@ The following table lists common and recommended activity alert rules for Azure
                     
                     | Alert type | Condition | Description  |
                     |:---|:---|:---|
                    -| Create or Update Virtual Network | Event Level: All selected, Status: All selected, Event initiated by: All services and users | When a user creates or make configuration changes to the virtual network. |
                    -| Delete Virtual Network | Event Level: All selected, Status: Started | When a user delete a virtual network. |
                    +| Create or Update Virtual Network | Event Level: All selected, Status: All selected, Event initiated by: All services and users | When a user creates or makes configuration changes to the virtual network. |
                    +| Delete Virtual Network | Event Level: All selected, Status: Started | When a user deletes a virtual network. |
                     
                     ## Next steps
                     
                    diff --git a/articles/virtual-network/nat-gateway/faq.yml b/articles/virtual-network/nat-gateway/faq.yml
                    index b81d660955504..b2be167f4165d 100644
                    --- a/articles/virtual-network/nat-gateway/faq.yml
                    +++ b/articles/virtual-network/nat-gateway/faq.yml
                    @@ -24,7 +24,7 @@ sections:
                     
                           - question: How can I use custom IP prefixes (BYOIP) with Virtual Network NAT gateway?
                             answer: |
                    -          You can use public IP prefixes and addresses derived from custom IP prefixes (BYOIP) with your NAT gateway resource. See [Custom IP address prefix (BYOIP)](/azure/virtual-network/ip-services/custom-ip-address-prefix) to learn more. 
                    +          You can use public IP prefixes and addresses derived from custom IP prefixes (BYOIP) with your NAT gateway resource. See [Custom IP address prefix (BYOIP)](../ip-services/custom-ip-address-prefix.md) to learn more. 
                           
                           - question: Can a zone-redundant public IP address be attached to a NAT gateway? 
                             answer: |
                    @@ -119,4 +119,4 @@ sections:
                     additionalContent: |
                     
                       ## Next steps
                    -     If your question is not listed above, please send feedback about this page with your question. This will create a GitHub issue for the product team to ensure all of our valued customer questions are answered.
                    +     If your question is not listed above, please send feedback about this page with your question. This will create a GitHub issue for the product team to ensure all of our valued customer questions are answered.
                    \ No newline at end of file
                    diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png
                    index 515725d4bf49b..83d25904169ee 100644
                    Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png differ
                    diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png
                    index b7070296fffbe..c6549285c3a3d 100644
                    Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png differ
                    diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png
                    index 5a899e68afcd4..37c1ae6423298 100644
                    Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png differ
                    diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png
                    index bf00ee57864d1..33096a52e481e 100644
                    Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png differ
                    diff --git a/articles/virtual-network/nat-gateway/nat-overview.md b/articles/virtual-network/nat-gateway/nat-overview.md
                    index 582980f90fa6f..235f890f78c0f 100644
                    --- a/articles/virtual-network/nat-gateway/nat-overview.md
                    +++ b/articles/virtual-network/nat-gateway/nat-overview.md
                    @@ -56,7 +56,7 @@ Virtual Network NAT is a software defined networking service. A NAT gateway won'
                     
                       * Public IP prefixes
                     
                    -  * Public IP addresses and prefixes derived from custom IP prefixes (BYOIP), to learn more, see [Custom IP address prefix (BYOIP)](/azure/virtual-network/ip-services/custom-ip-address-prefix)
                    +  * Public IP addresses and prefixes derived from custom IP prefixes (BYOIP), to learn more, see [Custom IP address prefix (BYOIP)](../ip-services/custom-ip-address-prefix.md)
                     
                     * Virtual Network NAT is compatible with standard SKU public IP addresses or public IP prefix resources or a combination of both. You can use a public IP prefix directly or distribute the public IP addresses of the prefix across multiple NAT gateway resources. The NAT gateway will groom all traffic to the range of IP addresses of the prefix. 
                     
                    @@ -98,4 +98,4 @@ For information on the SLA, see [SLA for Virtual Network NAT](https://azure.micr
                     
                     * Learn about the [NAT gateway resource](./nat-gateway-resource.md).
                     
                    -* [Learn module: Introduction to Azure Virtual Network NAT](/learn/modules/intro-to-azure-virtual-network-nat).
                    +* [Learn module: Introduction to Azure Virtual Network NAT](/learn/modules/intro-to-azure-virtual-network-nat).
                    \ No newline at end of file
                    diff --git a/articles/virtual-network/nat-gateway/resource-health.md b/articles/virtual-network/nat-gateway/resource-health.md
                    index a4bacf54a36ec..221fae2dad2aa 100644
                    --- a/articles/virtual-network/nat-gateway/resource-health.md
                    +++ b/articles/virtual-network/nat-gateway/resource-health.md
                    @@ -16,7 +16,7 @@ This article provides guidance on how to use Azure Resource Health to monitor an
                     
                     ## Resource health status
                     
                    -[Azure Resource Health](/azure/service-health/overview) provides information about the health of your NAT gateway resource. You can use resource health and Azure monitor notifications to keep you informed on the availability and health status of your NAT gateway resource. Resource health can help you quickly assess whether an issue is due to a problem in your Azure infrastructure or because of an Azure platform event. The resource health of your NAT gateway is evaluated by measuring the data-path availability of your NAT gateway endpoint.
                    +[Azure Resource Health](../../service-health/overview.md) provides information about the health of your NAT gateway resource. You can use resource health and Azure monitor notifications to keep you informed on the availability and health status of your NAT gateway resource. Resource health can help you quickly assess whether an issue is due to a problem in your Azure infrastructure or because of an Azure platform event. The resource health of your NAT gateway is evaluated by measuring the data-path availability of your NAT gateway endpoint.
                     
                     You can view the status of your NAT gateway’s health status on the **Resource Health** page, found under **Support + troubleshooting** for your NAT gateway resource.  
                     
                    @@ -29,7 +29,7 @@ The health of your NAT gateway resource is displayed as one of the following sta
                     | Unavailable | Your NAT gateway resource is not healthy. The metric for the data-path availability has reported less than 25% for the past 15 minutes. You may experience unavailability of your NAT gateway resource for outbound connectivity. |
                     | Unknown | Health status for your NAT gateway resource hasn’t been updated or hasn’t received information for data-path availability for more than 5 minutes. This state should be transient and will reflect the correct status as soon as data is received. |
                     
                    -For more information about Azure Resource Health, see [Resource Health overview](/azure/service-health/resource-health-overview).
                    +For more information about Azure Resource Health, see [Resource Health overview](../../service-health/resource-health-overview.md).
                     
                     To view the health of your NAT gateway resource:
                     
                    @@ -41,7 +41,7 @@ To view the health of your NAT gateway resource:
                     
                     ## Next steps
                     
                    -- Learn about [Virtual Network NAT](/azure/virtual-network/nat-gateway/nat-overview)
                    -- Learn about [metrics and alerts for NAT gateway](/azure/virtual-network/nat-gateway/nat-metrics)
                    -- Learn about [troubleshooting NAT gateway resources](/azure/virtual-network/nat-gateway/troubleshoot-nat)
                    -- Learn about [Azure resource health](/azure/service-health/resource-health-overview)
                    +- Learn about [Virtual Network NAT](./nat-overview.md)
                    +- Learn about [metrics and alerts for NAT gateway](./nat-metrics.md)
                    +- Learn about [troubleshooting NAT gateway resources](./troubleshoot-nat.md)
                    +- Learn about [Azure resource health](../../service-health/resource-health-overview.md)
                    \ No newline at end of file
                    diff --git a/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md b/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md
                    index f419a50c2a96f..3a26d1ed7f366 100644
                    --- a/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md
                    +++ b/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md
                    @@ -7,7 +7,7 @@ ms.author: allensu
                     ms.service: virtual-network
                     ms.subservice: nat
                     ms.topic: tutorial
                    -ms.date: 2/07/2022
                    +ms.date: 5/25/2022
                     ms.custom: template-tutorial 
                     ---
                     
                    @@ -83,7 +83,7 @@ In this section, you’ll create a NAT gateway with the IP address you previousl
                     
                     2. In **NAT gateways**, select **+ Create**.
                     
                    -3. In **Create network address translation (NAT) gateway**, enter or select the following information.
                    +3. In **Create network address translation (NAT) gateway**, enter or select the following information in the **Basics** tab.
                     
                         | Setting | Value |
                         | ------- | ----- |
                    diff --git a/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md b/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md
                    index a4788d5938a60..bb29a2efbbe06 100644
                    --- a/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md
                    +++ b/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md
                    @@ -7,7 +7,7 @@ ms.author: allensu
                     ms.service: virtual-network
                     ms.subservice: nat
                     ms.topic: tutorial
                    -ms.date: 1/11/2022
                    +ms.date: 5/25/2022
                     ms.custom: template-tutorial 
                     ---
                     
                    @@ -33,7 +33,7 @@ In this tutorial, you learn how to:
                         * The load balancer name used in the examples is **myLoadBalancer**.
                     
                     > [!NOTE]
                    -> Virtual Network NAT provides outbound connectivity for standard internal load balancers. To configure create a NAT gateway resource and associate it to your subnet. For more information on integrating a NAT gateway with your internal load balancers, see [Tutorial: Integrate NAT gateway with an internal load balancer - Azure portal - Virtual Network NAT](tutorial-nat-gateway-load-balancer-internal-portal.md).
                    +> Virtual Network NAT provides outbound connectivity for standard internal load balancers. For more information on integrating a NAT gateway with your internal load balancers, see [Tutorial: Integrate a NAT gateway with an internal load balancer using Azure portal](tutorial-nat-gateway-load-balancer-internal-portal.md).
                     
                     ## Migrate default outbound access
                     
                    @@ -45,7 +45,7 @@ In this section, you’ll learn how to change your outbound connectivity method
                     
                     3. In **NAT gateways**, select **+ Create**.
                     
                    -4. In **Create network address translation (NAT) gateway**, enter or select the following information.
                    +4. In **Create network address translation (NAT) gateway**, enter or select the following information in the **Basics** tab.
                     
                         | Setting | Value |
                         | ------- | ----- |
                    @@ -113,7 +113,7 @@ In this section, you’ll create a NAT gateway with the IP address previously us
                     
                     2. In **NAT gateways**, select **+ Create**.
                     
                    -3. In **Create network address translation (NAT) gateway**, enter or select the following information.
                    +3. In **Create network address translation (NAT) gateway**, enter or select the following information in the **Basics** tab.
                     
                         | Setting | Value |
                         | ------- | ----- |
                    diff --git a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md
                    index 3d0a4f118039c..254371eed0495 100644
                    --- a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md
                    +++ b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md
                    @@ -7,7 +7,7 @@ ms.author: allensu
                     ms.service: virtual-network
                     ms.subservice: nat
                     ms.topic: tutorial
                    -ms.date: 08/04/2021
                    +ms.date: 05/24/2022
                     ms.custom: template-tutorial
                     ---
                     
                    @@ -73,7 +73,7 @@ In this section, you'll create a virtual network and subnet.
                     
                     8. Select **Save**.
                     
                    -9. Select the **Security** tab.
                    +9. Select the **Security** tab or select the **Next: Security** button at the bottom of the page.
                     
                     10. Under **BastionHost**, select **Enable**. Enter this information:
                     
                    @@ -112,64 +112,66 @@ During the creation of the load balancer, you'll configure:
                         | **Instance details** |   |
                         | Name                   | Enter **myLoadBalancer**                                   |
                         | Region         | Select **(US) East US**.                                        |
                    -    | Type          | Select **Internal**.                                        |
                         | SKU           | Leave the default **Standard**. |
                    +    | Type          | Select **Internal**.                                        |
                     
                     4. Select **Next: Frontend IP configuration** at the bottom of the page.
                     
                    -5. In **Frontend IP configuration**, select **+ Add a frontend IP**.
                    +5. In **Frontend IP configuration**, select **+ Add a frontend IP configuration**.
                     
                     6. Enter **LoadBalancerFrontend** in **Name**.
                     
                    -7. Select **myBackendSubnet** in **Subnet**.
                    +7. Select **myVNet** in **Virtual network**.
                    +
                    +8. Select **myBackendSubnet** in **Subnet**.
                     
                    -8. Select **Dynamic** for **Assignment**.
                    +9. Select **Dynamic** for **Assignment**.
                     
                    -9. Select **Zone-redundant** in **Availability zone**.
                    +10. Select **Zone-redundant** in **Availability zone**.
                     
                         > [!NOTE]
                         > In regions with [Availability Zones](../../availability-zones/az-overview.md?toc=%2fazure%2fvirtual-network%2ftoc.json#availability-zones), you have the option to select no-zone (default option), a specific zone, or zone-redundant. The choice will depend on your specific domain failure requirements. In regions without Availability Zones, this field won't appear. 
                    For more information on availability zones, see [Availability zones overview](../../availability-zones/az-overview.md). -10. Select **Add**. +11. Select **Add**. -11. Select **Next: Backend pools** at the bottom of the page. +12. Select **Next: Backend pools** at the bottom of the page. -12. In the **Backend pools** tab, select **+ Add a backend pool**. +13. In the **Backend pools** tab, select **+ Add a backend pool**. -13. Enter **myBackendPool** for **Name** in **Add backend pool**. +14. Enter **myBackendPool** for **Name** in **Add backend pool**. -14. Select **NIC** or **IP Address** for **Backend Pool Configuration**. +15. Select **NIC** or **IP Address** for **Backend Pool Configuration**. -15. Select **IPv4** or **IPv6** for **IP version**. +16. Select **IPv4** or **IPv6** for **IP version**. -16. Select **Add**. +17. Select **Add**. -17. Select the **Next: Inbound rules** button at the bottom of the page. +18. Select the **Next: Inbound rules** button at the bottom of the page. -18. In **Load balancing rule** in the **Inbound rules** tab, select **+ Add a load balancing rule**. +19. In **Load balancing rule** in the **Inbound rules** tab, select **+ Add a load balancing rule**. -19. In **Add load balancing rule**, enter or select the following information: +20. In **Add load balancing rule**, enter or select the following information: | Setting | Value | | ------- | ----- | | Name | Enter **myHTTPRule** | | IP Version | Select **IPv4** or **IPv6** depending on your requirements. | | Frontend IP address | Select **LoadBalancerFrontend**. | + | Backend pool | Select **myBackendPool**. | | Protocol | Select **TCP**. | | Port | Enter **80**. | | Backend port | Enter **80**. | - | Backend pool | Select **myBackendPool**. | | Health probe | Select **Create new**.
                    In **Name**, enter **myHealthProbe**.
                    Select **HTTP** in **Protocol**.
                    Leave the rest of the defaults, and select **OK**. | | Session persistence | Select **None**. | | Idle timeout (minutes) | Enter or select **15**. | | TCP reset | Select **Enabled**. | | Floating IP | Select **Disabled**. | -20. Select **Add**. +21. Select **Add**. -21. Select the blue **Review + create** button at the bottom of the page. +22. Select the blue **Review + create** button at the bottom of the page. -22. Select **Create**. +23. Select **Create**. ## Create virtual machines @@ -275,25 +277,23 @@ In this section, you'll create a NAT gateway and assign it to the subnet in the In this section, we'll test the NAT gateway. We'll first discover the public IP of the NAT gateway. We'll then connect to the test virtual machine and verify the outbound connection through the NAT gateway. -1. Find the public IP address for the NAT gateway on the **Overview** screen. Select **All services** in the left-hand menu, select **All resources**, and then select **myPublicIP**. +1. Select **Resource groups** in the left-hand menu, select the **TutorIntLBNAT-rg** resource group, and then from the resources list, select **myNATgatewayIP**. 2. Make note of the public IP address: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png" alt-text="Screenshot of discover public IP address of NAT gateway." border="true"::: -3. Select **All services** in the left-hand menu, select **All resources**, and then from the resources list, select **myVM1** that is located in the **TutorIntLBNAT-rg** resource group. +3. Select **Resource groups** in the left-hand menu, select the **TutorIntLBNAT-rg** resource group, and then from the resources list, select **myVM1**. 4. On the **Overview** page, select **Connect**, then **Bastion**. -5. Select the blue **Use Bastion** button. - -6. Enter the username and password entered during VM creation. +5. Enter the username and password entered during VM creation. -7. Open **Internet Explorer** on **myVM1**. +6. Open **Internet Explorer** on **myVM1**. -8. Enter **https://whatsmyip.com** in the address bar. +7. Enter **https://whatsmyip.com** in the address bar. -9. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: +8. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png" alt-text="Screenshot of Internet Explorer showing external outbound IP." border="true"::: diff --git a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md index 4cb9294dd9ce2..4c2627b40a2fd 100644 --- a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md +++ b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md @@ -7,7 +7,7 @@ ms.author: allensu ms.service: virtual-network ms.subservice: nat ms.topic: tutorial -ms.date: 03/19/2021 +ms.date: 05/24/2022 ms.custom: template-tutorial --- @@ -35,7 +35,7 @@ An Azure account with an active subscription. [Create an account for free](https In this section, you'll create a virtual network and subnet. -1. In the search box at the top of the portal, enter **Virtual network**. Select **Virtual Networks** in the search results. +1. In the search box at the top of the portal, enter **Virtual network**. Select **Virtual networks** in the search results. 2. In **Virtual networks**, select **+ Create**. @@ -69,7 +69,7 @@ In this section, you'll create a virtual network and subnet. 8. Select **Save**. -9. Select the **Security** tab. +9. Select the **Security** tab or select the **Next: Security** button at the bottom of the page. 10. Under **BastionHost**, select **Enable**. Enter this information: @@ -108,14 +108,14 @@ During the creation of the load balancer, you'll configure: | **Instance details** | | | Name | Enter **myLoadBalancer** | | Region | Select **(US) East US**. | - | Type | Select **Public**. | | SKU | Leave the default **Standard**. | + | Type | Select **Public**. | | Tier | Leave the default **Regional**. | 4. Select **Next: Frontend IP configuration** at the bottom of the page. -5. In **Frontend IP configuration**, select **+ Add a frontend IP**. +5. In **Frontend IP configuration**, select **+ Add a frontend IP configuration**. 6. Enter **LoadBalancerFrontend** in **Name**. @@ -169,10 +169,10 @@ During the creation of the load balancer, you'll configure: | Name | Enter **myHTTPRule** | | IP Version | Select **IPv4** or **IPv6** depending on your requirements. | | Frontend IP address | Select **LoadBalancerFrontend**. | + | Backend pool | Select **myBackendPool**. | | Protocol | Select **TCP**. | | Port | Enter **80**. | | Backend port | Enter **80**. | - | Backend pool | Select **myBackendPool**. | | Health probe | Select **Create new**.
                    In **Name**, enter **myHealthProbe**.
                    Select **HTTP** in **Protocol**.
                    Leave the rest of the defaults, and select **OK**. | | Session persistence | Select **None**. | | Idle timeout (minutes) | Enter or select **15**. | @@ -290,25 +290,23 @@ In this section, you'll create a NAT gateway and assign it to the subnet in the In this section, we'll test the NAT gateway. We'll first discover the public IP of the NAT gateway. We'll then connect to the test virtual machine and verify the outbound connection through the NAT gateway. -1. Find the public IP address for the NAT gateway on the **Overview** screen. Select **All services** in the left-hand menu, select **All resources**, and then select **myPublicIP**. +1. Select **Resource groups** in the left-hand menu, select the **TutorPubLBNAT-rg** resource group, and then from the resources list, select **myNATgatewayIP**. 2. Make note of the public IP address: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png" alt-text="Screenshot discover public IP address of NAT gateway." border="true"::: -3. Select **All services** in the left-hand menu, select **All resources**, and then from the resources list, select **myVM1** that is located in the **TutorPubLBNAT-rg** resource group. +3. Select **Resource groups** in the left-hand menu, select the **TutorPubLBNAT-rg** resource group, and then from the resources list, select **myVM1**. 4. On the **Overview** page, select **Connect**, then **Bastion**. -5. Select the blue **Use Bastion** button. - -6. Enter the username and password entered during VM creation. +5. Enter the username and password entered during VM creation. -7. Open **Internet Explorer** on **myVM1**. +6. Open **Internet Explorer** on **myVM1**. -8. Enter **https://whatsmyip.com** in the address bar. +7. Enter **https://whatsmyip.com** in the address bar. -9. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: +8. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png" alt-text="Screenshot Internet Explorer showing external outbound IP." border="true"::: diff --git a/articles/virtual-network/service-tags-overview.md b/articles/virtual-network/service-tags-overview.md index a6226878e27e1..11fa23e1a50f5 100644 --- a/articles/virtual-network/service-tags-overview.md +++ b/articles/virtual-network/service-tags-overview.md @@ -36,14 +36,14 @@ The columns indicate whether the tag: - Is suitable for rules that cover inbound or outbound traffic. - Supports [regional](https://azure.microsoft.com/regions) scope. -- Is usable in [Azure Firewall](../firewall/service-tags.md) rules. +- Is usable in [Azure Firewall](../firewall/service-tags.md) rules as a *destination* rule only for inbound or outbound traffic. -By default, service tags reflect the ranges for the entire cloud. Some service tags also allow more granular control by restricting the corresponding IP ranges to a specified region. For example, the service tag **Storage** represents Azure Storage for the entire cloud, but **Storage.WestUS** narrows the range to only the storage IP address ranges from the WestUS region. The following table indicates whether each service tag supports such regional scope. Note that the direction listed for each tag is a recommendation. For example, the AzureCloud tag may be used to allow inbound traffic. However, we don't recommend this in most scenarios since this means allowing traffic from all Azure IP's, including those used by other Azure customers. +By default, service tags reflect the ranges for the entire cloud. Some service tags also allow more granular control by restricting the corresponding IP ranges to a specified region. For example, the service tag **Storage** represents Azure Storage for the entire cloud, but **Storage.WestUS** narrows the range to only the storage IP address ranges from the WestUS region. The following table indicates whether each service tag supports such regional scope, and the direction listed for each tag is a recommendation. For example, the AzureCloud tag may be used to allow inbound traffic. In most scenarios, we don't recommend allowing traffic from all Azure IPs since IPs used by other Azure customers are included as part of the service tag. | Tag | Purpose | Can use inbound or outbound? | Can be regional? | Can use with Azure Firewall? | | --- | -------- |:---:|:---:|:---:| | **ActionGroup** | Action Group. | Inbound | No | No | -| **ApiManagement** | Management traffic for Azure API Management-dedicated deployments.

                    **Note**: This tag represents the Azure API Management service endpoint for control plane per region. This enables customers to perform management operations on the APIs, Operations, Policies, NamedValues configured on the API Management service. | Inbound | Yes | Yes | +| **ApiManagement** | Management traffic for Azure API Management-dedicated deployments.

                    **Note**: This tag represents the Azure API Management service endpoint for control plane per region. The tag enables customers to perform management operations on the APIs, Operations, Policies, NamedValues configured on the API Management service. | Inbound | Yes | Yes | | **ApplicationInsightsAvailability** | Application Insights Availability. | Inbound | No | No | | **AppConfiguration** | App Configuration. | Outbound | No | No | | **AppService** | Azure App Service. This tag is recommended for outbound security rules to web apps and Function apps. | Outbound | Yes | Yes | @@ -56,7 +56,7 @@ By default, service tags reflect the ranges for the entire cloud. Some service t | **AzureBackup** |Azure Backup.

                    **Note**: This tag has a dependency on the **Storage** and **AzureActiveDirectory** tags. | Outbound | No | Yes | | **AzureBotService** | Azure Bot Service. | Outbound | No | No | | **AzureCloud** | All [datacenter public IP addresses](https://www.microsoft.com/download/details.aspx?id=56519). | Outbound | Yes | Yes | -| **AzureCognitiveSearch** | Azure Cognitive Search.

                    This tag or the IP addresses covered by this tag can be used to grant indexers secure access to data sources. Refer to the [indexer connection documentation](../search/search-indexer-troubleshooting.md#connection-errors) for more details.

                    **Note**: The IP of the search service is not included in the list of IP ranges for this service tag and **also needs to be added** to the IP firewall of data sources. | Inbound | No | No | +| **AzureCognitiveSearch** | Azure Cognitive Search.

                    This tag or the IP addresses covered by this tag can be used to grant indexers secure access to data sources. For more information about indexers, see [indexer connection documentation](../search/search-indexer-troubleshooting.md#connection-errors).

                    **Note**: The IP of the search service isn't included in the list of IP ranges for this service tag and **also needs to be added** to the IP firewall of data sources. | Inbound | No | No | | **AzureConnectors** | This tag represents the IP addresses used for managed connectors that make inbound webhook callbacks to the Azure Logic Apps service and outbound calls to their respective services, for example, Azure Storage or Azure Event Hubs. | Inbound / Outbound | Yes | Yes | | **AzureContainerRegistry** | Azure Container Registry. | Outbound | Yes | Yes | | **AzureCosmosDB** | Azure Cosmos DB. | Outbound | Yes | Yes | @@ -86,7 +86,7 @@ By default, service tags reflect the ranges for the entire cloud. Some service t | **AzureSphere** | This tag or the IP addresses covered by this tag can be used to restrict access to Azure Sphere Security Services. | Both | No | Yes | | **AzureStack** | Azure Stack Bridge services.
                    This tag represents the Azure Stack Bridge service endpoint per region. | Outbound | No | Yes | | **AzureTrafficManager** | Azure Traffic Manager probe IP addresses.

                    For more information on Traffic Manager probe IP addresses, see [Azure Traffic Manager FAQ](../traffic-manager/traffic-manager-faqs.md). | Inbound | No | Yes | -| **AzureUpdateDelivery** | For accessing Windows Updates.

                    **Note**: This tag provides access to Windows Update metadata services. To successfully download updates you must also enable the **AzureFrontDoor.FirstParty** service tag and configure outbound security rules with the protocol and port defined as follows:
                    • AzureUpdateDelivery: TCP, port 443
                    • AzureFrontDoor.FirstParty: TCP, port 80
                    | Outbound | No | No | +| **AzureUpdateDelivery** | For accessing Windows Updates.

                    **Note**: This tag provides access to Windows Update metadata services. To successfully download updates, you must also enable the **AzureFrontDoor.FirstParty** service tag and configure outbound security rules with the protocol and port defined as follows:
                    • AzureUpdateDelivery: TCP, port 443
                    • AzureFrontDoor.FirstParty: TCP, port 80
                    | Outbound | No | No | | **BatchNodeManagement** | Management traffic for deployments dedicated to Azure Batch. | Both | No | Yes | | **CognitiveServicesManagement** | The address ranges for traffic for Azure Cognitive Services. | Both | No | No | | **DataFactory** | Azure Data Factory | Both | No | No | @@ -110,7 +110,7 @@ By default, service tags reflect the ranges for the entire cloud. Some service t | **PowerQueryOnline** | Power Query Online. | Both | No | No | | **ServiceBus** | Azure Service Bus traffic that uses the Premium service tier. | Outbound | Yes | Yes | | **ServiceFabric** | Azure Service Fabric.

                    **Note**: This tag represents the Service Fabric service endpoint for control plane per region. This enables customers to perform management operations for their Service Fabric clusters from their VNET (endpoint eg. https:// westus.servicefabric.azure.com). | Both | No | No | -| **Sql** | Azure SQL Database, Azure Database for MySQL, Azure Database for PostgreSQL, Azure Database for MariaDB, and Azure Synapse Analytics.

                    **Note**: This tag represents the service, but not specific instances of the service. For example, the tag represents the Azure SQL Database service, but not a specific SQL database or server. This tag does not apply to SQL managed instance. | Outbound | Yes | Yes | +| **Sql** | Azure SQL Database, Azure Database for MySQL, Azure Database for PostgreSQL, Azure Database for MariaDB, and Azure Synapse Analytics.

                    **Note**: This tag represents the service, but not specific instances of the service. For example, the tag represents the Azure SQL Database service, but not a specific SQL database or server. This tag doesn't apply to SQL managed instance. | Outbound | Yes | Yes | | **SqlManagement** | Management traffic for SQL-dedicated deployments. | Both | No | Yes | | **Storage** | Azure Storage.

                    **Note**: This tag represents the service, but not specific instances of the service. For example, the tag represents the Azure Storage service, but not a specific Azure Storage account. | Outbound | Yes | Yes | | **StorageSyncService** | Storage Sync Service. | Both | No | No | @@ -119,6 +119,7 @@ By default, service tags reflect the ranges for the entire cloud. Some service t | **VirtualNetwork** | The virtual network address space (all IP address ranges defined for the virtual network), all connected on-premises address spaces, [peered](virtual-network-peering-overview.md) virtual networks, virtual networks connected to a [virtual network gateway](../vpn-gateway/vpn-gateway-about-vpngateways.md?toc=%2fazure%2fvirtual-network%3ftoc.json), the [virtual IP address of the host](./network-security-groups-overview.md#azure-platform-considerations), and address prefixes used on [user-defined routes](virtual-networks-udr-overview.md). This tag might also contain default routes. | Both | No | No | > [!NOTE] +> - When using service tags with Azure Firewall, you can only create destination rules on inbound and outbound traffic. Source rules are not supported. For more information, see the [Azure Firewall Service Tags](../firewall/service-tags.md) doc. > > - Service tags of Azure services denote the address prefixes from the specific cloud being used. For example, the underlying IP ranges that correspond to the **Sql** tag value on the Azure Public cloud will be different from the underlying ranges on the Azure China cloud. > @@ -165,12 +166,12 @@ You can download JSON files that contain the current list of service tags togeth - [Azure Public](https://www.microsoft.com/download/details.aspx?id=56519) - [Azure US Government](https://www.microsoft.com/download/details.aspx?id=57063) -- [Azure China](https://www.microsoft.com/download/details.aspx?id=57062) +- [Azure China 21Vianet](https://www.microsoft.com/download/details.aspx?id=57062) - [Azure Germany](https://www.microsoft.com/download/details.aspx?id=57064) The IP address ranges in these files are in CIDR notation. -The following AzureCloud tags do not have regional names formatted according to the normal schema: +The following AzureCloud tags don't have regional names formatted according to the normal schema: - AzureCloud.centralfrance (FranceCentral) - AzureCloud.southfrance (FranceSouth) - AzureCloud.germanywc (GermanyWestCentral) diff --git a/articles/virtual-network/virtual-networks-udr-overview.md b/articles/virtual-network/virtual-networks-udr-overview.md index a7fa580402966..54cc100ed83f7 100644 --- a/articles/virtual-network/virtual-networks-udr-overview.md +++ b/articles/virtual-network/virtual-networks-udr-overview.md @@ -38,7 +38,7 @@ Each route contains an address prefix and next hop type. When traffic leaving a The next hop types listed in the previous table represent how Azure routes traffic destined for the address prefix listed. Explanations for the next hop types follow: -* **Virtual network**: Routes traffic between address ranges within the [address space](manage-virtual-network.md#add-or-remove-an-address-range) of a virtual network. Azure creates a route with an address prefix that corresponds to each address range defined within the address space of a virtual network. If the virtual network address space has multiple address ranges defined, Azure creates an individual route for each address range. Azure automatically routes traffic between subnets using the routes created for each address range. You don't need to define gateways for Azure to route traffic between subnets. Though a virtual network contains subnets, and each subnet has a defined address range, Azure doesn't* create default routes for subnet address ranges, because each subnet address range is within an address range of the address space of a virtual network. +* **Virtual network**: Routes traffic between address ranges within the [address space](manage-virtual-network.md#add-or-remove-an-address-range) of a virtual network. Azure creates a route with an address prefix that corresponds to each address range defined within the address space of a virtual network. If the virtual network address space has multiple address ranges defined, Azure creates an individual route for each address range. Azure automatically routes traffic between subnets using the routes created for each address range. You don't need to define gateways for Azure to route traffic between subnets. Though a virtual network contains subnets, and each subnet has a defined address range, Azure doesn't create default routes for subnet address ranges. This is because each subnet address range is within an address range of the address space of a virtual network. * **Internet**: Routes traffic specified by the address prefix to the Internet. The system default route specifies the 0.0.0.0/0 address prefix. If you don't override Azure's default routes, Azure routes traffic for any address not specified by an address range within a virtual network, to the Internet, with one exception. If the destination address is for one of Azure's services, Azure routes the traffic directly to the service over Azure's backbone network, rather than routing the traffic to the Internet. Traffic between Azure services doesn't traverse the Internet, regardless of which Azure region the virtual network exists in, or which Azure region an instance of the Azure service is deployed in. You can override Azure's default system route for the 0.0.0.0/0 address prefix with a [custom route](#custom-routes). * **None**: Traffic routed to the **None** next hop type is dropped, rather than routed outside the subnet. Azure automatically creates default routes for the following address prefixes: @@ -118,11 +118,6 @@ The same command for CLI will be: az network route-table route create -g MyResourceGroup --route-table-name MyRouteTable -n StorageRoute --address-prefix Storage --next-hop-type VirtualAppliance --next-hop-ip-address 10.0.100.4 ``` - -#### Known Issues (April 2021) - -When BGP routes are present or a Service Endpoint is configured on your subnet, routes may not be evaluated with the correct priority. This feature doesn't currently work for dual stack (IPv4+IPv6) virtual networks. A fix for these scenarios is currently in progress
                    - ## Next hop types across Azure tools The name displayed and referenced for next hop types is different between the Azure portal and command-line tools, and the Azure Resource Manager and classic deployment models. The following table lists the names used to refer to each next hop type with the different tools and [deployment models](../azure-resource-manager/management/deployment-models.md?toc=%2fazure%2fvirtual-network%2ftoc.json): diff --git a/articles/virtual-wan/TOC.yml b/articles/virtual-wan/TOC.yml index f88daa0c8c5fb..53a6c6fbd6619 100644 --- a/articles/virtual-wan/TOC.yml +++ b/articles/virtual-wan/TOC.yml @@ -59,6 +59,8 @@ href: about-virtual-hub-routing.md - name: Routing scenarios items: + - name: Virtual hub routing preference + href: about-virtual-hub-routing-preference.md - name: Any-to-any href: scenario-any-to-any.md - name: Isolating VNets @@ -191,6 +193,8 @@ items: - name: Configure virtual hub routing href: how-to-virtual-hub-routing.md + - name: Configure virtual hub routing preference + href: howto-virtual-hub-routing-preference.md - name: View virtual hub effective routes href: effective-routes-virtual-hub.md - name: How to configure routing intent and policies @@ -219,6 +223,8 @@ href: monitor-virtual-wan.md - name: Azure Monitor Insights href: azure-monitor-insights.md + - name: BGP dashboard + href: monitor-bgp-dashboard.md - name: Configure S2S VPN packet captures items: - name: Azure portal diff --git a/articles/virtual-wan/about-virtual-hub-routing-preference.md b/articles/virtual-wan/about-virtual-hub-routing-preference.md new file mode 100644 index 0000000000000..0a1bcec8e4169 --- /dev/null +++ b/articles/virtual-wan/about-virtual-hub-routing-preference.md @@ -0,0 +1,114 @@ +--- +title: 'Virtual WAN virtual hub routing preference - Preview' +titleSuffix: Azure Virtual WAN +description: Learn about Virtual WAN Virtual virtual hub routing preference. +author: cherylmc +ms.service: virtual-wan +ms.topic: conceptual +ms.date: 05/31/2022 +ms.author: cherylmc +--- +# Virtual hub routing preference (Preview) + +A Virtual WAN virtual hub connects to virtual networks (VNets) and on-premises using connectivity gateways, such as site-to-site (S2S) VPN gateway, ExpressRoute (ER) gateway, point-to-site (P2S) gateway, and SD-WAN Network Virtual Appliance (NVA). The virtual hub router provides central route management and enables advanced routing scenarios using route propagation, route association, and custom route tables. + +The virtual hub router takes routing decisions using built-in route selection algorithm. To influence routing decisions in virtual hub router towards on-premises, we now have a new Virtual WAN hub feature called **Hub routing preference** (HRP). When a virtual hub router learns multiple routes across S2S VPN, ER and SD-WAN NVA connections for a destination route-prefix in on-premises, the virtual hub router’s route selection algorithm will adapt based on the hub routing preference configuration and selects the best routes. You can now configure **Hub routing preference** using the [Azure Preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). + +> [!IMPORTANT] +> The Virtual WAN feature **Hub routing preference** is currently in public preview. If you are interested in trying this feature, please follow the documentation below. +This public preview is provided without a service-level agreement and shouldn't be used for production workloads. Certain features might not be supported, might have constrained capabilities, or might not be available in all Azure locations. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). +> + +## Route selection algorithm for virtual hub + +This section explains the route selection algorithm in a virtual hub along with the control provided by HRP. When a virtual hub has multiple routes to a destination prefix for on-premises, the best route or routes are selected in the order of preference as follows: + +1. Select routes with Longest Prefix Match (LPM). + +1. Prefer static routes over BGP routes. + +1. Select best path based on the HRP configuration. There are three possible configurations for HRP and the route preference changes accordingly. + + * **ExpressRoute** (This is the default setting.) + + 1. Prefer routes from connections local to a virtual hub over routes learned from remote hub. + 1. If there are still routes from both ER and S2S VPN connections, then see below. Else proceed to the next rule. + * If all the routes are local to the hub, then choose routes learned from ER connections because HRP is set to ER. + * If all the routes are through remote hubs, then choose route from S2S VPN connection over ER connections because any transit between ER to ER is supported only if the circuits have ER Global Reach enabled and an Azure Firewall or NVA is provisioned inside the virtual hub. + 1. Then, prefer the routes with the shortest BGP AS-Path length. + + * **VPN** + + 1. Prefer routes from connections local to a virtual hub over routes learned from remote hub. + 1. If there are routes from both ER and S2S VPN connections, then choose S2S VPN routes. + 1. Then, prefer the routes with the shortest BGP AS-Path length. + + * **AS Path** + + 1. Prefer routes with the shortest BGP AS-Path length irrespective of the source of the route advertisements. For example, whether the routes are learned from on-premises connected via S2S VPN or ER. + 1. Prefer routes from connections local to the virtual hub over routes learned from remote hub. + 1. If there are routes from both ER and S2S VPN connections, then see below. Else proceed to the next rule. + * If all the routes are local to the virtual hub, then choose routes from ER connections. + * If all the routes are through remote virtual hubs, then choose routes from S2S VPN connections. + +1. If there are still multiple routes, load-balance across all paths using equal-cost multi-path (ECMP) routing. + +**Things to note:** + +* When there are multiple virtual hubs in a Virtual WAN scenario, a virtual hub selects the best routes using the route selection algorithm described above, and then advertises them to the other virtual hubs in the virtual WAN. + +* **Limitation:** If a route-prefix is reachable via ER or VPN connections, and via virtual hub SD-WAN NVA, then the latter route is ignored by the route-selection algorithm. Therefore, the flows to prefixes reachable only via virtual hub SD-WAN NVA will ever take the route through the NVA. This is a limitation during the Preview phase of the **Hub routing preference** feature. + +## Routing scenarios + +Virtual WAN hub routing preference is beneficial when multiple on-premises are advertising routes to same destination prefixes, which can happen in customer Virtual WAN scenarios that use any of the following setups. + +* Virtual WAN hub using ER connections as primary and VPN connections as back-up. +* Virtual WAN with connections to multiple on-premises and customer is using one on-premises site as active, and another as standby for a service deployed using the same IP address ranges in both the sites. +* Virtual WAN has both VPN and ER connections simultaneously and the customer is distributing services across connections by controlling route advertisements from on-premises. + +The example below is a hypothetical Virtual WAN deployment that encompasses multiple scenarios described above. We'll use it to demonstrate the route selection by a virtual hub. + +A brief overview of the setup: + +* Each on-premises site is connected to one or more of the virtual hubs Hub_1 or Hub_2 using S2S VPN, or ER circuit, or SD-WAN NVA connections. +* For each on-premises site, the ASN it uses and the route-prefixes it advertises are listed in the diagram. Notice that there are multiple routes for several route-prefixes. + + :::image type="content" source="./media/about-virtual-hub-routing-preference/diagram.png" alt-text="Example diagram for hub-route-preference scenario." lightbox="./media/about-virtual-hub-routing-preference/diagram.png"::: + +Let’s say there are flows from a virtual network VNET1 connected to Hub_1 to various destination route-prefixes advertised by the on-premises. The path that each of those flows takes for different configurations of Virtual WAN **hub routing preference** on Hub_1 and Hub_2 is described in the tables below. The paths have been labeled in the diagram and referred to in the tables below for ease of understanding. + +**When only local routes are available:** + +| Flow destination route-prefix | HRP of Hub_1 | HRP of Hub_2 | Path used by flow | All possible paths | Explanation | +| --- | --- | --- | --- | --- |---| +| 10.61.1.5 | AS Path | N/A | 4 | 1,2,3,4 | Paths 1, 4 and 5 have the shortest AS Path but ER takes precedence over VPN, so path 4 is chosen. | +| 10.61.1.5 | VPN | N/A | 1 | 1,2,3,4 | VPN route is preferred over ER, so paths 1 and 2 are preferred, but path 1 has the shorter AS Path. | +| 10.61.1.5 | ER | N/A | 4 | 1,2,3,4 | ER routes 3 and 4 are selected, but path 4 has the shorter AS Path. | + +**When only remote routes are available:** + +| Flow destination route-prefix | HRP of Hub_1 | HRP of Hub_2 | Path used by flow | All possible paths | Explanation | +| --- | --- | --- | --- | --- |---| +| 10.62.1.5 | Any setting | AS Path or ER | ECMP across 9 & 10 | 7,8,9,10,11 | All available paths are remote and have equal AS Path, so ER paths 9 and 10 are chosen and advertised by Hub_2. Hub_1’s HRP setting has no impact. | +| 10.62.1.5 | Any setting | VPN | ECMP across 7 & 8 | 7,8,9,10,11 | The Hub_2 will only advertise best routes 7 & 8 and they're only choices for Hub_1, so Hub_1’s HRP setting has no impact. | + +**When local and remote routes are available:** + +| Flow destination route-prefix | HRP of Hub_1 | HRP of Hub_2 | Path used by flow | All possible paths | Explanation | +| --- | --- | --- | --- | --- |---| +| 10.50.2.5  | Any setting | Any setting | 1 | 1,2,3,4,7,8,9,10,11 | Hub_2 will advertise only 7 due to LPM. Hub_1 selects 1 due to LPM and being local route. | +| 10.50.1.5 | AS Path or ER | Any setting | 4 | 1,2,3,4,7,8,9,10,11 | Hub_2 will advertise different routes based on its HRP setting, but Hub_1 will select 4 due to being local, ER route with the shortest AS Path. | +| 10.50.1.5 | VPN | Any setting | 1 | 1,2,3,4,7,8,9,10,11 | Hub_2 will advertise different routes based on its HRP setting, but Hub_1 will select 1 due to being local, VPN route with the shortest AS Path. | +| 10.55.2.5 | AS Path | AS Path or ER | 9 | 2,3,8,9 | Hub_2 will only advertise 9, because 8 and 9 have same AS Path but 9 is ER route. On Hub_1, among 2, 3 and 9 routes, it selects 9 due to having the shortest AS Path. | +| 10.55.2.5 | AS Path | VPN | 8 | 2,3,8,9 | Hub_2 will only advertise 8, because 8 and 9 have same AS Path but 8 is VPN route. On Hub_1, among 2, 3 and 8 routes, it selects 8 due to having the shortest AS Path. | +| 10.55.2.5 | ER | Any setting | 3 | 2,3,8,9 | Hub_2 will advertise different routes based on its HRP setting, but Hub_1 will select 3 due to being local and ER. | +| 10.55.2.5 | VPN | Any setting | 2 | 2,3,8,9 | Hub_2 will advertise different routes based on its HRP setting, but Hub_1 will select 2 due to being local and VPN. | + +**Key takeaways:** + +* To prefer remote routes over local routes on a virtual hub, set its hub routing preference to AS Path and increase the AS Path length of the local routes. + +## Next steps + +* To use virtual hub routing preference, see [How to configure virtual hub routing preference](howto-virtual-hub-routing-preference.md). diff --git a/articles/virtual-wan/how-to-routing-policies.md b/articles/virtual-wan/how-to-routing-policies.md index 8663178729066..5d5476ecb8047 100644 --- a/articles/virtual-wan/how-to-routing-policies.md +++ b/articles/virtual-wan/how-to-routing-policies.md @@ -16,10 +16,10 @@ ms.author: wellee >[!NOTE] > Hub Routing Intent is currently in gated public preview. > -> This preview is provided without a service-level agreement and isn't recommended for production workloads. Some features might be unsupported or have constrained capabilities. For more information, see [Supplemental terms of use for Microsoft Azure previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). +> The preview for Hub Routing Intent impacts routing and route advertisements for **all** connections to the Virtual Hub (Point-to-site VPN, Site-to-site VPN, ExpressRoute, NVA, Virtual Network). + +This preview is provided without a service-level agreement and isn't recommended for production workloads. Some features might be unsupported or have constrained capabilities. For more information, see [Supplemental terms of use for Microsoft Azure previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). > -> Inspecting inter-hub traffic via Azure Firewall or NVA between Virtual Hubs deployed in **different** Azure regions is available in select Azure Regions. Please reach out to previewinterhub@microsoft.com for more details. -> > To obtain access to the preview, please deploy any Virtual WAN hubs and gateways (Site-to-site VPN Gateways, Point-to-site Gateways and ExpressRouteGateways) and then reach out to previewinterhub@microsoft.com with the Virtual WAN ID, Subscription ID and Azure Region you wish to configure Routing Intent in. Expect a response within 48 business hours (Monday-Friday) with confirmation of feature enablement. Please note that any gateways created after feature enablement will need to be upgraded by the Virtual WAN team. ## Background @@ -31,10 +31,12 @@ Routing Intent and Routing policies allow you to specify how the Virtual WAN hub While Private Traffic includes both branch and Virtual Network address prefixes, Routing Policies considers them as one entity within the Routing Intent Concepts. >[!NOTE] -> Inter-region traffic can be inspected by Azure Firewall or NVA for Virtual Hubs deployed in select Azure regions. For available regions, please contact previewinterhub@microsoft.com. +> Inter-region traffic **cannot** be inspected by Azure Firewall or NVA. + +* **Internet Traffic Routing Policy**: When an Internet Traffic Routing Policy is configured on a Virtual WAN hub, all branch (User VPN (Point-to-site VPN), Site-to-site VPN, and ExpressRoute) and Virtual Network connections to that Virtual WAN Hub will forward Internet-bound traffic to the Azure Firewall resource, Third-Party Security provider or **Network Virtual Appliance** specified as part of the Routing Policy. -* **Internet Traffic Routing Policy**: When an Internet Traffic Routing Policy is configured on a Virtual WAN hub, all branch (User VPN (Point-to-site VPN), Site-to-site VPN, and ExpressRoute) and Virtual Network connections to that Virtual WAN Hub will forward Internet-bound traffic to the Azure Firewall resource, Third-Party Security provider or Network Virtual Appliance specified as part of the Routing Policy. + In other words, when Traffic Routing Policy is configured on a Virtual WAN hub, the Virtual WAN will propagate a **default** route to all spokes and Gateways. In the case of a **Network Virtual Appliance** this routes will be learned and propagated through BGP via the vWAN Route Service and learned by the BGP speakers inside the **Network Virtual Appliance** * **Private Traffic Routing Policy**: When a Private Traffic Routing Policy is configured on a Virtual WAN hub, **all** branch and Virtual Network traffic in and out of the Virtual WAN Hub including inter-hub traffic will be forwarded to the Next Hop Azure Firewall resource or Network Virtual Appliance resource that was specified in the Private Traffic Routing Policy. @@ -44,9 +46,10 @@ While Private Traffic includes both branch and Virtual Network address prefixes ## Key considerations * You will **not** be able to enable routing policies on your deployments with existing Custom Route tables configured or if there are static routes configured in your Default Route Table. -* Currently, Private Traffic Routing Policies are not supported in Hubs with Encrypted ExpressRoute connections (Site-to-site VPN Tunnel running over ExpressRoute Private connectivity). -* In the gated public preview of Virtual WAN Hub routing policies, inter-regional traffic is only inspected by Azure Firewall or Network Virtual Appliances deployed in the Virtual WAN Hub for traffic between select Azure regions. For more information, reach out to previewinterhub@microsoft.com. -* Routing Intent and Routing Policies currently must be configured via the custom portal link provided in Step 3 of **Prerequisites**. Routing Intents and Policies are not supported via Terraform, PowerShell, and CLI. +* Currently, Private Traffic Routing Policies are not supported in Hubs with Encrypted ExpressRoute connections (Site-to-site VPN Tunnel running over ExpressRoute Private connectivity). +* In the gated public preview of Virtual WAN Hub routing policies, inter-hub traffic between hubs in different Azure regions is dropped. +* Routing Intent and Routing Policies currently must be configured via the custom portal link provided in Step 3 of **Prerequisites**. Routing Intents and Policies are not supported via Terraform, PowerShell, and CLI. + ## Prerequisites @@ -81,7 +84,7 @@ While Private Traffic includes both branch and Virtual Network address prefixes 10. Repeat steps 2-8 for other Secured Virtual WAN hubs that you want to configure Routing policies for. 11. At this point, you are ready to send test traffic. Please make sure your Firewall Policies are configured appropriately to allow/deny traffic based on your desired security configurations. -## Configure routing policies (through Virtual WAN portal) +## Configure routing policies for network virtual appliances (through Virtual WAN portal) >[!NOTE] > The only Network Virtual Appliance deployed in the Virtual WAN hub compatible with routing intent and routing policies are listed in the [Partners section](about-nva-hub.md) as dual-role connectivity and Next-Generation Firewall solution providers. @@ -97,6 +100,9 @@ While Private Traffic includes both branch and Virtual Network address prefixes 4. If you want to configure a Private Traffic Routing Policy and have branches or virtual networks using non-IANA RFC1918 Prefixes, select **Additional Prefixes** and specify the non-IANA RFC1918 prefix ranges in the text box that comes up. Select **Done**. + > [!NOTE] + > At this point in time, Routing Policies for **Network Virtual Appliances** do not allow you to edit the RFC1918 prefixes. Azure vWAN will be propagating the RFC 1918 space to all spokes and Gateways across, as well as to BGP speakers inside the ****Network Virtual Appliances**. Be mindful of the implications about the propagation of these prefixes into your environment and create the appropriate policies inside your **Network Virtual Appliance** to control routing behavior. Should it be desired to propagate more specific RFC 1918 spaces (i.e Spoke address space), those prefixes need to be added as well on the box below explicit. + :::image type="content" source="./media/routing-policies/private-prefixes-nva.png"alt-text="Screenshot showing how to configure additional private prefixes for NVA routing policies."lightbox="./media/routing-policies/private-prefixes-nva.png"::: 5. If you want to configure a Internet Traffic Routing Policy, under **Internet traffic** select **Network Virtual Appliance** and under **Next Hop Resource** select the Network Virtual Appliance you want to send internet-bound traffic to. @@ -176,6 +182,7 @@ Consider the following configuration where Hub 1 (Normal) and Hub 2 (Secured) ar The following section describes common issues encountered when you configure Routing Policies on your Virtual WAN Hub. Read the below sections and if your issue is still unresolved, reach out to previewinterhub@microsoft.com for support. Expect a response within 48 business hours (Monday through Friday). ### Troubleshooting configuration issues + * Make sure that you have gotten confirmation from previewinterhub@microsoft.com that access to the gated public preview has been granted to your subscription and chosen region. You will **not** be able to configure routing policies without being granted access to the preview. * After enabling the Routing Policy feature on your deployment, ensure you **only** use the custom portal link provided as part of your confirmation email. Do not use PowerShell, CLI, or REST API calls to manage your Virtual WAN deployments. This includes creating new Branch (Site-to-site VPN, Point-to-site VPN or ExpressRoute) connections. @@ -218,7 +225,7 @@ This scenario is not supported in the gated public preview. However, reach out No. Currently, branches and Virtual Networks will egress to the internet using an Azure Firewall deployed inside of the Virtual WAN hub the branches and Virtual Networks are connected to. You cannot configure a connection to access the Internet via the Firewall in a remote hub. -### Why do I see RFC1918 prefixes advertised to my on-premises devices? +### Why do I see RFC1918 aggregate prefixes advertised to my on-premises devices? When Private Traffic Routing Policies are configured, Virtual WAN Gateways will automatically advertise static routes that are in the default route table (RFC1918 prefixes: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16) in addition to the explicit branch and Virtual Network prefixes. diff --git a/articles/virtual-wan/howto-virtual-hub-routing-preference.md b/articles/virtual-wan/howto-virtual-hub-routing-preference.md new file mode 100644 index 0000000000000..dd3925c457654 --- /dev/null +++ b/articles/virtual-wan/howto-virtual-hub-routing-preference.md @@ -0,0 +1,48 @@ +--- +title: 'Configure virtual hub routing preference - Preview' +titleSuffix: Azure Virtual WAN +description: Learn how to configure Virtual WAN virtual hub routing preference. +author: cherylmc +ms.service: virtual-wan +ms.topic: conceptual +ms.date: 05/30/2022 +ms.author: cherylmc +--- +# Configure virtual hub routing preference (Preview) + +The following steps help you configure virtual hub routing preference settings. For information about this feature, see [Virtual hub routing preference](about-virtual-hub-routing-preference.md). + +> [!IMPORTANT] +> The Virtual WAN feature **Hub routing preference** is currently in public preview. If you are interested in trying this feature, please follow the documentation below. +This public preview is provided without a service-level agreement and shouldn't be used for production workloads. Certain features might not be supported, might have constrained capabilities, or might not be available in all Azure locations. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). +> + +## Configure + +You can configure a new virtual hub to include the virtual hub routing preference setting by using the [Azure Preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). Follow the steps in the [Tutorial: Create a site-to-site connection](virtual-wan-site-to-site-portal.md) article. + +To configure virtual hub routing preference for an existing virtual hub, use the following steps. + +1. Open the [Azure Preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). You can't use the regular Azure portal yet for this feature. + +1. Go to your virtual WAN. In the left pane, under the **Connectivity** section, click **Hubs** to view the list of hubs. Select **… > Edit virtual hub** to open the **Edit virtual hub** dialog box. + + :::image type="content" source="./media/howto-virtual-hub-routing-preference/edit-virtual-hub.png" alt-text="Screenshot shows select Edit virtual hub." lightbox="./media/howto-virtual-hub-routing-preference/edit-virtual-hub-expand.png"::: + + You can also click on the hub to open the virtual hub, and then under virtual hub resource, click the **Edit virtual hub** button. + + :::image type="content" source="./media/howto-virtual-hub-routing-preference/hub-edit.png" alt-text="Screenshot shows Edit virtual hub." lightbox="./media/howto-virtual-hub-routing-preference/hub-edit.png"::: + +1. On the **Edit virtual hub** page, select from the dropdown to configure the field **Hub routing preference**. To determine the setting to use, see [About virtual hub routing preference](about-virtual-hub-routing-preference.md). + + Click **Confirm** to save the settings. + + :::image type="content" source="./media/howto-virtual-hub-routing-preference/select-preference.png" alt-text="Screenshot shows the dropdown showing ExpressRoute, VPN, and AS PATH." lightbox="./media/howto-virtual-hub-routing-preference/select-preference.png"::: + +1. After the settings have saved, you can verify the configuration on the **Overview** page for the virtual hub. + + :::image type="content" source="./media/howto-virtual-hub-routing-preference/view-preference.png" alt-text="Screenshot shows virtual hub Overview page with routing preference." lightbox="./media/howto-virtual-hub-routing-preference/view-preference-expand.png"::: + +## Next steps + +To learn more about virtual hub routing preference, see [About virtual hub routing preference](about-virtual-hub-routing-preference.md). \ No newline at end of file diff --git a/articles/virtual-wan/hub-settings.md b/articles/virtual-wan/hub-settings.md index d4d4cfdb9b345..f0b9ceb0c1839 100644 --- a/articles/virtual-wan/hub-settings.md +++ b/articles/virtual-wan/hub-settings.md @@ -5,7 +5,7 @@ description: This article answers common questions about virtual hub settings an author: cherylmc ms.service: virtual-wan ms.topic: conceptual -ms.date: 05/20/2022 +ms.date: 05/30/2022 ms.author: cherylmc --- @@ -23,7 +23,7 @@ You can create an empty virtual hub (a virtual hub that doesn't contain any gate By default, the virtual hub router is automatically configured to deploy with a virtual hub capacity of 2 routing infrastructure units. This supports a minimum of 3 Gbps aggregate throughput, and 2000 connected VMs deployed in all virtual networks connected to that virtual hub. -When you deploy a new virtual hub, you can specify additional routing infrastructure units to increase the default virtual hub capacity in increments of 1 Gbps and 1000 VMs. This feature gives you the ability to secure upfront capacity without having to wait for the virtual hub to scale out when more throughput is needed. The scale unit on which the virtual hub is created becomes the minimum capacity. You can view routing infrastructure units, router Gbps, and number of VMs supported, in the Azure portal **Virtual hub** pages for **Create virtual hub** and **Edit virtual hub**. +When you deploy a new virtual hub, you can specify additional routing infrastructure units to increase the default virtual hub capacity in increments of 1 Gbps and 1000 VMs. This feature gives you the ability to secure upfront capacity without having to wait for the virtual hub to scale out when more throughput is needed. The scale unit on which the virtual hub is created becomes the minimum capacity. Creating a virtual hub without a gateway takes about 5 - 7 minutes while creating a virtual hub and a gateway can take about 30 minutes to complete. You can view routing infrastructure units, router Gbps, and number of VMs supported, in the Azure portal **Virtual hub** pages for **Create virtual hub** and **Edit virtual hub**. ### Configure virtual hub capacity @@ -37,6 +37,10 @@ Adjust the virtual hub capacity when you need to support additional virtual mach To add additional virtual hub capacity, go to the virtual hub in the Azure portal. On the **Overview** page, click **Edit virtual hub**. Adjust the **Virtual hub capacity** using the dropdown, then **Confirm**. +> [!NOTE] +> When you edit virtual hub capacity, there will be data path disruption if the change in scale units has resulted in an underlying VPN GW SKU change. +> + ### Routing infrastructure unit table For pricing information, see [Azure Virtual WAN pricing](https://azure.microsoft.com/pricing/details/virtual-wan/). @@ -93,6 +97,14 @@ For pricing information, see [Azure Virtual WAN pricing](https://azure.microsoft | 49 | 49 | 49000 | | 50 | 50 | 50000 | +## Virtual hub routing preference (Preview) + +A Virtual WAN virtual hub connects to virtual networks (VNets) and on-premises sites using connectivity gateways, such as site-to-site (S2S) VPN gateway, ExpressRoute (ER) gateway, point-to-site (P2S) gateway, and SD-WAN Network Virtual Appliance (NVA). The virtual hub router provides central route management and enables advanced routing scenarios using route propagation, route association, and custom route tables. When a virtual hub router makes routing decisions, it considers the configuration of such capabilities. + +Previously, there wasn't a configuration option for you to use to influence routing decisions within virtual hub router for prefixes in on-premises sites. These decisions relied on the virtual hub router's built-in route selection algorithm and the options available within gateways to manage routes before they reach the virtual hub router. To influence routing decisions in virtual hub router for prefixes in on-premises sites, you can now adjust the **Hub routing preference** using the [Azure Preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). + +For more information, see [About virtual hub routing preference](about-virtual-hub-routing-preference.md). + ## Gateway settings Each virtual hub can contain multiple gateways (site-to-site, point-to-site User VPN, and ExpressRoute). When you create your virtual hub, you can configure gateways at the same time, or create an empty virtual hub and add the gateway settings later. When you edit a virtual hub, you'll see settings that pertain to gateways. For example, gateway scale units. @@ -115,4 +127,4 @@ The following table shows the configurations available for each virtual WAN type ## Next steps -For virtual hub routing, see [About virtual hub routing](about-virtual-hub-routing.md). \ No newline at end of file +For virtual hub routing, see [About virtual hub routing](about-virtual-hub-routing.md). diff --git a/articles/virtual-wan/media/about-virtual-hub-routing-preference/diagram.png b/articles/virtual-wan/media/about-virtual-hub-routing-preference/diagram.png new file mode 100644 index 0000000000000..a3d88b968b586 Binary files /dev/null and b/articles/virtual-wan/media/about-virtual-hub-routing-preference/diagram.png differ diff --git a/articles/virtual-wan/media/howto-virtual-hub-routing-preference/edit-virtual-hub-expand.png b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/edit-virtual-hub-expand.png new file mode 100644 index 0000000000000..7a55d69bece61 Binary files /dev/null and b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/edit-virtual-hub-expand.png differ diff --git a/articles/virtual-wan/media/howto-virtual-hub-routing-preference/edit-virtual-hub.png b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/edit-virtual-hub.png new file mode 100644 index 0000000000000..9ad8eb5c84c31 Binary files /dev/null and b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/edit-virtual-hub.png differ diff --git a/articles/virtual-wan/media/howto-virtual-hub-routing-preference/hub-edit.png b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/hub-edit.png new file mode 100644 index 0000000000000..091f7e3f2c66b Binary files /dev/null and b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/hub-edit.png differ diff --git a/articles/virtual-wan/media/howto-virtual-hub-routing-preference/select-preference.png b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/select-preference.png new file mode 100644 index 0000000000000..c7fc5d2511dbe Binary files /dev/null and b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/select-preference.png differ diff --git a/articles/virtual-wan/media/howto-virtual-hub-routing-preference/view-preference-expand.png b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/view-preference-expand.png new file mode 100644 index 0000000000000..3b71ac3586960 Binary files /dev/null and b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/view-preference-expand.png differ diff --git a/articles/virtual-wan/media/howto-virtual-hub-routing-preference/view-preference.png b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/view-preference.png new file mode 100644 index 0000000000000..2def139453cfd Binary files /dev/null and b/articles/virtual-wan/media/howto-virtual-hub-routing-preference/view-preference.png differ diff --git a/articles/virtual-wan/media/monitor-bgp-dashboard/advertised-routes.png b/articles/virtual-wan/media/monitor-bgp-dashboard/advertised-routes.png new file mode 100644 index 0000000000000..40c437261a4bc Binary files /dev/null and b/articles/virtual-wan/media/monitor-bgp-dashboard/advertised-routes.png differ diff --git a/articles/virtual-wan/media/monitor-bgp-dashboard/bgp-dashboard.png b/articles/virtual-wan/media/monitor-bgp-dashboard/bgp-dashboard.png new file mode 100644 index 0000000000000..b14d3c1ca8683 Binary files /dev/null and b/articles/virtual-wan/media/monitor-bgp-dashboard/bgp-dashboard.png differ diff --git a/articles/virtual-wan/media/monitor-bgp-dashboard/bgp-peers.png b/articles/virtual-wan/media/monitor-bgp-dashboard/bgp-peers.png new file mode 100644 index 0000000000000..8e300415d77e1 Binary files /dev/null and b/articles/virtual-wan/media/monitor-bgp-dashboard/bgp-peers.png differ diff --git a/articles/virtual-wan/media/monitor-bgp-dashboard/learned-routes.png b/articles/virtual-wan/media/monitor-bgp-dashboard/learned-routes.png new file mode 100644 index 0000000000000..03d37e1698e17 Binary files /dev/null and b/articles/virtual-wan/media/monitor-bgp-dashboard/learned-routes.png differ diff --git a/articles/virtual-wan/media/monitor-bgp-dashboard/routes-advertising.png b/articles/virtual-wan/media/monitor-bgp-dashboard/routes-advertising.png new file mode 100644 index 0000000000000..b2cb5a2e75578 Binary files /dev/null and b/articles/virtual-wan/media/monitor-bgp-dashboard/routes-advertising.png differ diff --git a/articles/virtual-wan/media/monitor-bgp-dashboard/routes-learning.png b/articles/virtual-wan/media/monitor-bgp-dashboard/routes-learning.png new file mode 100644 index 0000000000000..15cc46f7cecec Binary files /dev/null and b/articles/virtual-wan/media/monitor-bgp-dashboard/routes-learning.png differ diff --git a/articles/virtual-wan/media/virtual-wan-expressroute-portal/edithub.png b/articles/virtual-wan/media/virtual-wan-expressroute-portal/edithub.png deleted file mode 100644 index fb347ee14d3d7..0000000000000 Binary files a/articles/virtual-wan/media/virtual-wan-expressroute-portal/edithub.png and /dev/null differ diff --git a/articles/virtual-wan/media/virtual-wan-site-to-site-portal/edit-gateway.png b/articles/virtual-wan/media/virtual-wan-site-to-site-portal/edit-gateway.png index fb143628f5523..d60e72cf88cca 100644 Binary files a/articles/virtual-wan/media/virtual-wan-site-to-site-portal/edit-gateway.png and b/articles/virtual-wan/media/virtual-wan-site-to-site-portal/edit-gateway.png differ diff --git a/articles/virtual-wan/monitor-bgp-dashboard.md b/articles/virtual-wan/monitor-bgp-dashboard.md new file mode 100644 index 0000000000000..e5774b5023a5b --- /dev/null +++ b/articles/virtual-wan/monitor-bgp-dashboard.md @@ -0,0 +1,94 @@ +--- +title: 'Monitor S2S VPN BGP routes - BGP dashboard' +titleSuffix: Azure Virtual WAN +description: Learn how to monitor BGP peers for site-to-site VPNs using the BGP dashboard. +author: cherylmc +ms.service: virtual-wan +ms.topic: conceptual +ms.date: 06/06/2022 +ms.author: cherylmc +--- +# Monitor site-to-site VPN BGP routes using the BGP dashboard + +This article helps you monitor Virtual WAN site-to-site VPN BGP information using the **BGP Dashboard**. Using the BGP dashboard, you can monitor BGP peers, advertised routes, and learned routes. The BGP dashboard is available for site-to-site VPNs that are configured to use BGP. The BGP dashboard can be accessed on the page for the site that you want to monitor. + +## BGP dashboard + +The following steps walk you through one way to navigate to your site and open the BGP dashboard. + +1. Go to the **Azure portal -> your virtual WAN**. +1. On your virtual WAN, in the left pane, under Connectivity, click **VPN sites**. On the VPN sites page, you can see the sites that are connected to your virtual WAN. +1. Click the site that you want to view. +1. On the page for the site, click **BGP Dashboard**. + + :::image type="content" source="./media/monitor-bgp-dashboard/bgp-dashboard.png" alt-text="Screenshot shows the overview page for the site with the B G P dashboard highlighted." lightbox="./media/monitor-bgp-dashboard/bgp-dashboard.png"::: + +## BGP peers + +1. To open the BGP Peers page, go to the **BGP Dashboard**. + +1. The **BGP Peers** page is the main view that you see when you open the BGP dashboard. + + :::image type="content" source="./media/monitor-bgp-dashboard/bgp-peers.png" alt-text="Screenshot shows the B G P Peers page." lightbox="./media/monitor-bgp-dashboard/bgp-peers.png"::: + +1. On the **BGP Peers** page, the following values are available: + + |Value | Description| + |---|---| + |Peer address| The BGP address of the remote connection. | + |Local address | The BGP address of the virtual wan hub. | + | Gateway instance| The instance of the virtual wan hub. | + |ASN| The Autonomous System Number. | + |Status | The status the peer is currently in.
                    Available statuses are: Connecting, Connected | + |Connected duration |The length of time the peer has been connected. HH:MM:SS | + |Routes received |The number of routes received by the remote site. | + |Messages sent |The number of messages sent to the remote site. | + |Messages received | The number of messages received from the remote site. | + +## Advertised routes + +The **Advertised Routes** page contains the routes that are being advertised to remote sites. + +1. On the **BGP Peers** page, click **Routes the site-to-site gateway is advertising** to show the **Advertised Routes** page. + + :::image type="content" source="./media/monitor-bgp-dashboard/routes-advertising.png" alt-text="Screenshot shows B G P peers page with routes the site-to-site gateway is advertising highlighted." lightbox="./media/monitor-bgp-dashboard/routes-advertising.png"::: + +1. On the **Advertised Routes** page, you can view the top 50 BGP routes. To view all routes, click **Download advertised routes**. + + :::image type="content" source="./media/monitor-bgp-dashboard/advertised-routes.png" alt-text="Screenshot shows the Advertised Routes page with Download advertised routes highlighted." lightbox="./media/monitor-bgp-dashboard/advertised-routes.png"::: + +1. On the **Advertised Routes** page, the following values are available: + + |Value | Description| + |---|---| + | Network |The address prefix that is being advertised. | + | Link Name | The name of the link. | + | Local address | A BGP address of the virtual wan hub.| + | Next hop | The next hop address for the prefix. | + |AS Path | The BGP AS path attribute. | + +## Learned routes + +The **Learned Routes** page shows the routes that are learned. + +1. On the **BGP Peers** page, click **Routes the site-to-site gateway is learning** to show the **Learned Routes** page. + + :::image type="content" source="./media/monitor-bgp-dashboard/routes-learning.png" alt-text="Screenshot shows B G P peers page with routes the site-to-site gateway is learning highlighted." lightbox="./media/monitor-bgp-dashboard/routes-learning.png"::: + +1. On the **Learned Routes** page, you can view the top 50 BGP routes. To view all routes, click **Download learned routes**. + + :::image type="content" source="./media/monitor-bgp-dashboard/learned-routes.png" alt-text="Screenshot shows the Learned Routes page with Download advertised routes highlighted." lightbox="./media/monitor-bgp-dashboard/learned-routes.png"::: + +1. On the **Learned Routes** page, the following values are available: + + |Value | Description| + |---|---| + | Network | The address prefix that is being advertised. | + | Link Name |The name of the link. | + |Local address |A BGP address of the virtual wan hub. | + |Source Peer |The address the routes is being learned from. | + | AS Path | The BGP AS path attribute. | + +## Next steps + +For more monitoring information, see [Monitoring Azure Virtual WAN](monitor-virtual-wan.md). diff --git a/articles/virtual-wan/monitor-virtual-wan.md b/articles/virtual-wan/monitor-virtual-wan.md index 06f16defb2086..f05d35e2b6705 100644 --- a/articles/virtual-wan/monitor-virtual-wan.md +++ b/articles/virtual-wan/monitor-virtual-wan.md @@ -1,19 +1,17 @@ --- title: 'Monitoring Azure Virtual WAN' description: Learn about Azure Virtual WAN logs and metrics using Azure Monitor. -services: virtual-wan author: cherylmc - ms.service: virtual-wan ms.topic: how-to -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.author: cherylmc --- # Monitoring Virtual WAN -You can monitor Azure Virtual WAN using Azure Monitor. Virtual WAN is a networking service that brings together many networking, security, and routing functionalities to provide a single operational interface. Virtual WAN VPN gateways, ExpressRoute gateways, and Azure Firewall have logging and metrics available through Azure Monitor. +You can monitor Azure Virtual WAN using Azure Monitor. Virtual WAN is a networking service that brings together many networking, security, and routing functionalities to provide a single operational interface. Virtual WAN VPN gateways, ExpressRoute gateways, and Azure Firewall have logging and metrics available through Azure Monitor. This article discusses metrics and diagnostics that are available through the portal. Metrics are lightweight and can support near real-time scenarios, making them useful for alerting and fast issue detection. @@ -29,7 +27,6 @@ Diagnostics and logging configuration must be done from there accessing the **Di :::image type="content" source="./media/monitor-virtual-wan/firewall-diagnostic-settings.png" alt-text="Screenshot shows Firewall diagnostic settings."::: - ## Metrics Metrics in Azure Monitor are numerical values that describe some aspect of a system at a particular time. Metrics are collected every minute, and are useful for alerting because they can be sampled frequently. An alert can be fired quickly with relatively simple logic. @@ -71,7 +68,7 @@ $MetricInformation.Data * Minimum – Minimum bytes that were sent during the selected time grain period. * Maximum – Maximum bytes that were sent during the selected time grain period * Total – Total bytes/sec that were sent during the selected time grain period. - + ### Site-to-site VPN gateways The following metrics are available for Azure site-to-site VPN gateways: @@ -129,14 +126,15 @@ The following metrics are available for Azure ExpressRoute gateways: | Metric | Description| | --- | --- | -| **BitsInPerSecond** | Bits per second ingressing Azure through the ExpressRoute Gateway. | -| **BitsOutPerSecond** | Bits per second egressing Azure through the ExpressRoute Gateway | -| **CPU Utilization** | CPU Utilization of the ExpressRoute Gateway.| -| **Packets per second** | Total Packets received on ExpressRoute Gateway per second.| -| **Count of routes advertised to peer**| Count of Routes Advertised to Peer by ExpressRoute Gateway. | -| **Count of routes learned from peer**| Count of Routes Learned from Peer by ExpressRoute Gateway.| -| **Frequency of routes changed** | Frequency of Route changes in ExpressRoute Gateway.| -| **Number of VMs in Virtual Network**| Number of VM's that use this ExpressRoute Gateway.| +| **BitsInPerSecond** | Bits per second ingressing Azure via ExpressRoute gateway which can be further split for specific connections. | +| **BitsOutPerSecond** | Bits per second egressing Azure via ExpressRoute gateway which can be further split for specific connection. | +| **Bits Received Per Second** | Total Bits received on ExpressRoute gateway per second. | +| **CPU Utilization** | CPU Utilization of the ExpressRoute gateway.| +| **Packets per second** | Total Packets received on ExpressRoute gateway per second.| +| **Count of routes advertised to peer**| Count of Routes Advertised to Peer by ExpressRoute gateway. | +| **Count of routes learned from peer**| Count of Routes Learned from Peer by ExpressRoute gateway.| +| **Frequency of routes changed** | Frequency of Route changes in ExpressRoute gateway.| +| **Number of VMs in Virtual Network**| Number of VMs that use this ExpressRoute gateway.| ### View gateway metrics @@ -221,8 +219,8 @@ In order to execute the query, you have to open the Log Analytics resource you c :::image type="content" source="./media/monitor-virtual-wan/log-analytics-query-samples.png" alt-text="Log Analytics Query Samples."::: -For additional Log Analytics query samples for Azure VPN Gateway, both Site-to-Site and Point-to-Site, you can visit the page [Troubleshoot Azure VPN Gateway using diagnostic logs](../vpn-gateway/troubleshoot-vpn-with-azure-diagnostics.md). -For Azure Firewall, a [workbook](../firewall/firewall-workbook.md) is provided to make log analysis easier. Using its graphical interface, it will be possible to investigate into the diagnostic data without manually writing any Log Analytics query. +For additional Log Analytics query samples for Azure VPN Gateway, both Site-to-Site and Point-to-Site, you can visit the page [Troubleshoot Azure VPN Gateway using diagnostic logs](../vpn-gateway/troubleshoot-vpn-with-azure-diagnostics.md). +For Azure Firewall, a [workbook](../firewall/firewall-workbook.md) is provided to make log analysis easier. Using its graphical interface, it will be possible to investigate into the diagnostic data without manually writing any Log Analytics query. ## Activity logs diff --git a/articles/virtual-wan/virtual-wan-about.md b/articles/virtual-wan/virtual-wan-about.md index bf0aa788c51b9..369c44826b4b3 100644 --- a/articles/virtual-wan/virtual-wan-about.md +++ b/articles/virtual-wan/virtual-wan-about.md @@ -5,7 +5,7 @@ author: cherylmc ms.service: virtual-wan ms.topic: overview -ms.date: 05/20/2022 +ms.date: 06/07/2022 ms.author: cherylmc # Customer intent: As someone with a networking background, I want to understand what Virtual WAN is and if it is the right choice for my Azure network. --- @@ -117,7 +117,7 @@ Connectivity between the virtual network connections assumes, by default, a maxi Virtual WAN allows transit connectivity between VPN and ExpressRoute. This implies that VPN-connected sites or remote users can communicate with ExpressRoute-connected sites. There is also an implicit assumption that the **Branch-to-branch flag** is enabled and BGP is supported in VPN and ExpressRoute connections. This flag can be located in the Azure Virtual WAN settings in Azure portal. All route management is provided by the virtual hub router, which also enables transit connectivity between virtual networks. -### Custom Routing +### Custom routing Virtual WAN provides advanced routing enhancements. Ability to set up custom route tables, optimize virtual network routing with route association and propagation, logically group route tables with labels and simplify numerous network virtual appliances (NVAs) or shared services routing scenarios. @@ -137,13 +137,14 @@ If you have pre-existing routes in Routing section for the hub in the Azure port ## Gated public preview -The following features are currently in gated public preview. +The following features are currently in gated public preview. If, after working with the listed articles, you have questions or require support, please reach out the the contact alias that corresponds to the feature. -| Feature | Description | -| ---------- | --------- | -| Routing intent and policies enabling Inter-hub security | This feature allows customers to configure internet-bound, private, or inter-hub traffic flow through the Azure Firewall. For more information, see [Routing intent and policies](../virtual-wan/how-to-routing-policies.md).| -| Hub-to-hub over ER preview link | This feature allows traffic between 2 hubs traverse through the Azure Virtual WAN router in each hub and uses a hub-to-hub path instead of the ExpressRoute path (which traverses through the Microsoft edge routers/MSEE). For more information, see [Hub-to-hub over ER preview link](virtual-wan-faq.md#expressroute-bow-tie).| -| BGP peering with a virtual hub | This feature provides the ability for the virtual hub to pair with and directly exchange routing information through Border Gateway Protocol (BGP) routing protocol. For more information, see [BGP peering with a virtual hub](create-bgp-peering-hub-portal.md) and [How to peer BGP with a virtual hub](scenario-bgp-peering-hub.md).| +| Feature | Description | Contact alias | +| ---------- | --------- | ---| +| Routing intent and policies enabling Inter-hub security | This feature allows you to configure internet-bound, private, or inter-hub traffic flow through the Azure Firewall. For more information, see [Routing intent and policies](../virtual-wan/how-to-routing-policies.md).| previewinterhub@microsoft.com | +| Hub-to-hub over ER preview link | This feature allows traffic between 2 hubs traverse through the Azure Virtual WAN router in each hub and uses a hub-to-hub path instead of the ExpressRoute path (which traverses through the Microsoft edge routers/MSEE). For more information, see [Hub-to-hub over ER preview link](virtual-wan-faq.md#expressroute-bow-tie).| previewpreferh2h@microsoft.com | +| BGP peering with a virtual hub | This feature provides the ability for the virtual hub to pair with and directly exchange routing information through Border Gateway Protocol (BGP) routing protocol. For more information, see [BGP peering with a virtual hub](create-bgp-peering-hub-portal.md) and [How to peer BGP with a virtual hub](scenario-bgp-peering-hub.md).| previewbgpwithvhub@microsoft.com | +| Virtual hub routing preference | This features allows you to influence routing decisions for the virtual hub router. For more information, see [Virtual hub routing preference](about-virtual-hub-routing-preference.md). | Coming soon | ## FAQ diff --git a/articles/virtual-wan/virtual-wan-expressroute-portal.md b/articles/virtual-wan/virtual-wan-expressroute-portal.md index b66ec1074eb27..b19857d1434f8 100644 --- a/articles/virtual-wan/virtual-wan-expressroute-portal.md +++ b/articles/virtual-wan/virtual-wan-expressroute-portal.md @@ -1,12 +1,10 @@ --- title: 'Tutorial: Create ExpressRoute connections using Azure Virtual WAN' description: In this tutorial, learn how to use Azure Virtual WAN to create ExpressRoute connections to Azure and on-premises environments. -services: virtual-wan author: cherylmc - ms.service: virtual-wan ms.topic: tutorial -ms.date: 04/27/2021 +ms.date: 05/25/2022 ms.author: cherylmc # Customer intent: As someone with a networking background, I want to connect my corporate on-premises network(s) to my VNets using Virtual WAN and ExpressRoute. --- @@ -41,25 +39,11 @@ Verify that you have met the following criteria before beginning your configurat ## Create a virtual WAN -From a browser, navigate to the [Azure portal](https://portal.azure.com) and sign in with your Azure account. - -1. Navigate to the Virtual WAN page. In the portal, click **+Create a resource**. Type **Virtual WAN** into the search box and select Enter. -2. Select **Virtual WAN** from the results. On the Virtual WAN page, click **Create** to open the Create WAN page. -3. On the **Create WAN** page, on the **Basics** tab, fill in the following fields: - - :::image type="content" source="./media/virtual-wan-expressroute-portal/createwan.png" alt-text="Screenshot shows Create WAN page." border="false"::: - - * **Subscription** - Select the subscription that you want to use. - * **Resource Group** - Create new or use existing. - * **Resource group location** - Choose a resource location from the dropdown. A WAN is a global resource and does not live in a particular region. However, you must select a region in order to more easily manage and locate the WAN resource that you create. - * **Name** - Type the name that you want to call your WAN. - * **Type** - Select **Standard**. You can't create an ExpressRoute gateway using the Basic SKU. -4. After you finish filling out the fields, select **Review +Create**. -5. Once validation passes, select **Create** to create the virtual WAN. +[!INCLUDE [Create a virtual WAN](../../includes/virtual-wan-create-vwan-include.md)] ## Create a virtual hub and gateway -A virtual hub is a virtual network that is created and used by Virtual WAN. It can contain various gateways, such as VPN and ExpressRoute. In this section, you will create an ExpressRoute gateway for your virtual hub. You can either create the gateway when you [create a new virtual hub](#newhub), or you can create the gateway in an [existing hub](#existinghub) by editing it. +A virtual hub is a virtual network that is created and used by Virtual WAN. It can contain various gateways, such as VPN and ExpressRoute. In this section, you will create an ExpressRoute gateway for your virtual hub. You can either create the gateway when you [create a new virtual hub](#newhub), or you can create the gateway in an [existing hub](#existinghub) by editing it. ExpressRoute gateways are provisioned in units of 2 Gbps. 1 scale unit = 2 Gbps with support up to 10 scale units = 20 Gbps. It takes about 30 minutes for a virtual hub and gateway to fully create. @@ -71,13 +55,14 @@ Create a new virtual hub. Once a hub is created, you'll be charged for the hub, ### To create a gateway in an existing hub -You can also create a gateway in an existing hub by editing it. - -1. Navigate to the virtual hub that you want to edit and select it. -2. On the **Edit virtual hub** page, select the checkbox **Include ExpressRoute gateway**. -3. Select **Confirm** to confirm your changes. It takes about 30 minutes for the hub and hub resources to fully create. +You can also create a gateway in an existing hub by editing the hub. - :::image type="content" source="./media/virtual-wan-expressroute-portal/edithub.png" alt-text="Screenshot shows editing an existing hub." border="false"::: +1. Go to the virtual WAN. +1. In the left pane, select **Hubs**. +1. On the **Virtual WAN | Hubs** page, click the hub that you want to edit. +1. On the **Virtual HUB** page, at the top of the page, click **Edit virtual hub**. +1. On the **Edit virtual hub** page, select the checkbox **Include ExpressRoute gateway** and adjust any other settings that you require. +1. Select **Confirm** to confirm your changes. It takes about 30 minutes for the hub and hub resources to fully create. ### To view a gateway diff --git a/articles/virtual-wan/virtual-wan-site-to-site-portal.md b/articles/virtual-wan/virtual-wan-site-to-site-portal.md index 7ce9c0793f8f4..2d916f47de186 100644 --- a/articles/virtual-wan/virtual-wan-site-to-site-portal.md +++ b/articles/virtual-wan/virtual-wan-site-to-site-portal.md @@ -1,7 +1,7 @@ --- title: 'Tutorial: Create site-to-site connections using Virtual WAN' +titleSuffix: Azure Virtual WAN description: Learn how to use Azure Virtual WAN to create a site-to-site VPN connection to Azure. -services: virtual-wan author: cherylmc ms.service: virtual-wan ms.topic: tutorial @@ -17,12 +17,12 @@ In this tutorial you learn how to: > [!div class="checklist"] > * Create a virtual WAN -> * Configure hub Basic settings +> * Configure virtual hub Basic settings > * Configure site-to-site VPN gateway settings > * Create a site -> * Connect a site to a hub -> * Connect a VPN site to a hub -> * Connect a VNet to a hub +> * Connect a site to a virtual hub +> * Connect a VPN site to a virtual hub +> * Connect a VNet to a virtual hub > * Download a configuration file > * View or edit your VPN gateway @@ -42,35 +42,35 @@ Verify that you've met the following criteria before beginning your configuratio [!INCLUDE [Create a virtual WAN](../../includes/virtual-wan-create-vwan-include.md)] -## Configure hub settings +## Configure virtual hub settings -A hub is a virtual network that can contain gateways for site-to-site, ExpressRoute, or point-to-site functionality. For this tutorial, you begin by filling out the **Basics** tab for the virtual hub and then continue on to fill out the site-to-site tab in the next section. It's also possible to create an empty hub (a hub that doesn't contain any gateways) and then add gateways (S2S, P2S, ExpressRoute, etc.) later. Once a hub is created, you'll be charged for the hub, even if you don't attach any sites or create any gateways within the hub. +A virtual hub is a virtual network that can contain gateways for site-to-site, ExpressRoute, or point-to-site functionality. For this tutorial, you begin by filling out the **Basics** tab for the virtual hub and then continue on to fill out the site-to-site tab in the next section. It's also possible to create an empty virtual hub (a virtual hub that doesn't contain any gateways) and then add gateways (S2S, P2S, ExpressRoute, etc.) later. Once a virtual hub is created, you'll be charged for the virtual hub, even if you don't attach any sites or create any gateways within the virtual hub. -[!INCLUDE [Create a hub](../../includes/virtual-wan-tutorial-s2s-hub-include.md)] +[!INCLUDE [Create a virtual hub](../../includes/virtual-wan-tutorial-s2s-hub-include.md)] -Don't create the hub yet. Continue on to the next section to configure additional settings. +Don't create the virtual hub yet. Continue on to the next section to configure additional settings. ## Configure a site-to-site gateway -In this section, you configure site-to-site connectivity settings, and then proceed to create the hub and site-to-site VPN gateway. A hub and gateway can take about 30 minutes to create. +In this section, you configure site-to-site connectivity settings, and then proceed to create the virtual hub and site-to-site VPN gateway. A virtual hub and gateway can take about 30 minutes to create. [!INCLUDE [Create a gateway](../../includes/virtual-wan-tutorial-s2s-gateway-include.md)] ## Create a site -In this section, you create site. Sites correspond to your physical locations. Create as many sites as you need. For example, if you have a branch office in NY, a branch office in London, and a branch office and LA, you'd create three separate sites. These sites contain your on-premises VPN device endpoints. You can create up to 1000 sites per virtual hub in a virtual WAN. If you had multiple hubs, you can create 1000 per each of those hubs. If you have Virtual WAN partner CPE device, check with them to learn about their automation to Azure. Typically, automation implies a simple click experience to export large-scale branch information into Azure, and setting up connectivity from the CPE to Azure Virtual WAN VPN gateway. For more information, see [Automation guidance from Azure to CPE partners](virtual-wan-configure-automation-providers.md). +In this section, you create site. Sites correspond to your physical locations. Create as many sites as you need. For example, if you have a branch office in NY, a branch office in London, and a branch office and LA, you'd create three separate sites. These sites contain your on-premises VPN device endpoints. You can create up to 1000 sites per virtual hub in a virtual WAN. If you had multiple virtual hubs, you can create 1000 per each of those virtual hubs. If you have Virtual WAN partner CPE device, check with them to learn about their automation to Azure. Typically, automation implies a simple click experience to export large-scale branch information into Azure, and setting up connectivity from the CPE to Azure Virtual WAN VPN gateway. For more information, see [Automation guidance from Azure to CPE partners](virtual-wan-configure-automation-providers.md). [!INCLUDE [Create a site](../../includes/virtual-wan-tutorial-s2s-site-include.md)] -## Connect the VPN site to a hub +## Connect the VPN site to a virtual hub -In this section, you connect your VPN site to the hub. +In this section, you connect your VPN site to the virtual hub. [!INCLUDE [Connect VPN sites](../../includes/virtual-wan-tutorial-s2s-connect-vpn-site-include.md)] -## Connect a VNet to the hub +## Connect a VNet to the virtual hub -In this section, you create a connection between the hub and your VNet. +In this section, you create a connection between the virtual hub and your VNet. [!INCLUDE [Connect](../../includes/virtual-wan-connect-vnet-hub-include.md)] @@ -92,7 +92,7 @@ Use the VPN device configuration file to configure your on-premises VPN device. The device configuration file contains the settings to use when configuring your on-premises VPN device. When you view this file, notice the following information: -* **vpnSiteConfiguration -** This section denotes the device details set up as a site connecting to the virtual WAN. It includes the name and public ip address of the branch device. +* **vpnSiteConfiguration -** This section denotes the device details set up as a site connecting to the virtual WAN. It includes the name and public IP address of the branch device. * **vpnSiteConnections -** This section provides information about the following settings: * **Address space** of the virtual hub(s) VNet.
                    Example: @@ -100,7 +100,7 @@ The device configuration file contains the settings to use when configuring your ``` "AddressSpace":"10.1.0.0/24" ``` - * **Address space** of the VNets that are connected to the hub.
                    Example: + * **Address space** of the VNets that are connected to the virtual hub.
                    Example: ``` "ConnectedSubnets":["10.2.0.0/16","10.3.0.0/16"] @@ -245,7 +245,7 @@ On the **Edit VPN Gateway** page, you can see the following settings: * **Public IP Address**: Assigned by Azure. * **Private IP Address**: Assigned by Azure. * **Default BGP IP Address**: Assigned by Azure. -* **Custom BGP IP Address**: This field is reserved for APIPA (Automatic Private IP Addressing). Azure supports BGP IP in the ranges 169.254.21.* and 169.254.22.*. Azure accepts BGP connections in these ranges but will dial connection with the default BGP IP. +* **Custom BGP IP Address**: This field is reserved for APIPA (Automatic Private IP Addressing). Azure supports BGP IP in the ranges 169.254.21.* and 169.254.22.*. Azure accepts BGP connections in these ranges but will dial connection with the default BGP IP. Users can specify multiple custom BGP IP addresses for each instance. The same custom BGP IP address shouldn't be used for both instances. :::image type="content" source="media/virtual-wan-site-to-site-portal/edit-gateway.png" alt-text="Screenshot shows the Edit VPN Gateway page with the Edit button highlighted." lightbox="media/virtual-wan-site-to-site-portal/edit-gateway.png"::: diff --git a/articles/vpn-gateway/index.yml b/articles/vpn-gateway/index.yml index cf4b5c88ebc08..13f3a891d3b0e 100644 --- a/articles/vpn-gateway/index.yml +++ b/articles/vpn-gateway/index.yml @@ -66,10 +66,8 @@ landingContent: url: openvpn-azure-ad-tenant.md - text: Multiple authentication types url: howto-point-to-site-multi-auth.md - - text: OpenVPN tunnel type - url: ikev2-openvpn-from-sstp.md - # Card +# Card - title: Site-to-site VPNs linkLists: - linkListType: tutorial @@ -139,4 +137,4 @@ landingContent: - text: Azure PowerShell url: /powershell/module/az.network#vpn - text: REST - url: /rest/api/network-gateway \ No newline at end of file + url: /rest/api/network-gateway diff --git a/articles/vpn-gateway/media/reset-gateway/reset-connection-expand.png b/articles/vpn-gateway/media/reset-gateway/reset-connection-expand.png new file mode 100644 index 0000000000000..d598b7118f129 Binary files /dev/null and b/articles/vpn-gateway/media/reset-gateway/reset-connection-expand.png differ diff --git a/articles/vpn-gateway/media/reset-gateway/reset-connection.png b/articles/vpn-gateway/media/reset-gateway/reset-connection.png index d598b7118f129..729be78ac0657 100644 Binary files a/articles/vpn-gateway/media/reset-gateway/reset-connection.png and b/articles/vpn-gateway/media/reset-gateway/reset-connection.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png new file mode 100644 index 0000000000000..8b2d26de23129 Binary files /dev/null and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png index 8b2d26de23129..a0283a52d5da9 100644 Binary files a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png new file mode 100644 index 0000000000000..be2bc9e9fba15 Binary files /dev/null and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png index be2bc9e9fba15..f4d35ef277a9e 100644 Binary files a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png differ diff --git a/articles/vpn-gateway/point-to-site-about.md b/articles/vpn-gateway/point-to-site-about.md index f2132b2d89829..74e7df6ea2d76 100644 --- a/articles/vpn-gateway/point-to-site-about.md +++ b/articles/vpn-gateway/point-to-site-about.md @@ -42,7 +42,7 @@ The validation of the client certificate is performed by the VPN gateway and hap ### Authenticate using native Azure Active Directory authentication -Azure AD authentication allows users to connect to Azure using their Azure Active Directory credentials. Native Azure AD authentication is only supported for OpenVPN protocol and Windows 10 and 11 and also requires the use of the [Azure VPN Client](https://go.microsoft.com/fwlink/?linkid=2117554). +Azure AD authentication allows users to connect to Azure using their Azure Active Directory credentials. Native Azure AD authentication is only supported for OpenVPN protocol and Windows 10 and later and also requires the use of the [Azure VPN Client](https://go.microsoft.com/fwlink/?linkid=2117554). With native Azure AD authentication, you can leverage Azure AD's conditional access as well as Multi-Factor Authentication (MFA) features for VPN. diff --git a/articles/vpn-gateway/point-to-site-how-to-radius-ps.md b/articles/vpn-gateway/point-to-site-how-to-radius-ps.md index 735b9e5907224..6b1199a76c86b 100644 --- a/articles/vpn-gateway/point-to-site-how-to-radius-ps.md +++ b/articles/vpn-gateway/point-to-site-how-to-radius-ps.md @@ -22,7 +22,7 @@ A P2S VPN connection is started from Windows and Mac devices. Connecting clients * RADIUS server * VPN Gateway native certificate authentication -* Native Azure Active Directory authentication (Windows 10 only) +* Native Azure Active Directory authentication (Windows 10 and later only) This article helps you configure a P2S configuration with authentication using RADIUS server. If you want to authenticate using generated certificates and VPN gateway native certificate authentication instead, see [Configure a Point-to-Site connection to a VNet using VPN gateway native certificate authentication](vpn-gateway-howto-point-to-site-rm-ps.md) or [Create an Azure Active Directory tenant for P2S OpenVPN protocol connections](openvpn-azure-ad-tenant.md) for Azure Active Directory authentication. diff --git a/articles/vpn-gateway/point-to-site-vpn-client-configuration-radius-other.md b/articles/vpn-gateway/point-to-site-vpn-client-configuration-radius-other.md index f460cf38c8b11..f2e6689edd713 100644 --- a/articles/vpn-gateway/point-to-site-vpn-client-configuration-radius-other.md +++ b/articles/vpn-gateway/point-to-site-vpn-client-configuration-radius-other.md @@ -51,7 +51,7 @@ You can generate the VPN client configuration files by using the Azure portal, o ### Azure PowerShell -Use the [Get-AzVpnClientConfiguration](/powershell/module/az.network/get-azvpnclientconfiguration.md) cmdlet to generate the VPN client configuration for EapMSChapv2. +Use the [Get-AzVpnClientConfiguration](/powershell/module/az.network/get-azvpnclientconfiguration) cmdlet to generate the VPN client configuration for EapMSChapv2. ## View the files and configure the VPN client diff --git a/articles/vpn-gateway/reset-gateway.md b/articles/vpn-gateway/reset-gateway.md index bb86bb63444e8..eb789769bf3f9 100644 --- a/articles/vpn-gateway/reset-gateway.md +++ b/articles/vpn-gateway/reset-gateway.md @@ -3,17 +3,16 @@ title: 'Reset a VPN gateway or connection to reestablish IPsec tunnels' titleSuffix: Azure VPN Gateway description: Learn how to reset a gateway or a gateway connection to reestablish IPsec tunnels. author: cherylmc - ms.service: vpn-gateway ms.topic: how-to -ms.date: 02/22/2021 +ms.date: 05/26/2022 ms.author: cherylmc ms.custom: devx-track-azurepowershell --- # Reset a VPN gateway or a connection -Resetting an Azure VPN gateway or gateway connection is helpful if you lose cross-premises VPN connectivity on one or more Site-to-Site VPN tunnels. In this situation, your on-premises VPN devices are all working correctly, but are not able to establish IPsec tunnels with the Azure VPN gateways. This article helps you reset a VPN gateway or gateway connection. +Resetting an Azure VPN gateway or gateway connection is helpful if you lose cross-premises VPN connectivity on one or more site-to-site VPN tunnels. In this situation, your on-premises VPN devices are all working correctly, but aren't able to establish IPsec tunnels with the Azure VPN gateways. This article helps you reset a VPN gateway or gateway connection. ## What happens during a reset @@ -23,13 +22,13 @@ A VPN gateway is composed of two VM instances running in an active-standby confi When you issue the command to reset the gateway, the current active instance of the Azure VPN gateway is rebooted immediately. There will be a brief gap during the failover from the active instance (being rebooted), to the standby instance. The gap should be less than one minute. -If the connection is not restored after the first reboot, issue the same command again to reboot the second VM instance (the new active gateway). If the two reboots are requested back to back, there will be a slightly longer period where both VM instances (active and standby) are being rebooted. This will cause a longer gap on the VPN connectivity, up to 30 to 45 minutes for VMs to complete the reboots. +If the connection isn't restored after the first reboot, issue the same command again to reboot the second VM instance (the new active gateway). If the two reboots are requested back to back, there will be a slightly longer period where both VM instances (active and standby) are being rebooted. This will cause a longer gap on the VPN connectivity, up to 30 to 45 minutes for VMs to complete the reboots. -After two reboots, if you are still experiencing cross-premises connectivity problems, please open a support request from the Azure portal. +After two reboots, if you're still experiencing cross-premises connectivity problems, please open a support request from the Azure portal. ### Connection reset -When you select to reset a connection, the gateway does not reboot. Only the selected connection is reset and restored. +When you select to reset a connection, the gateway doesn't reboot. Only the selected connection is reset and restored. ## Reset a connection @@ -39,11 +38,11 @@ You can reset a connection easily using the Azure portal. 1. On the **Connection** page, select **Reset** from the left menu. 1. On the **Reset** page, click **Reset** to reset the connection. - :::image type="content" source="./media/reset-gateway/reset-connection.png" alt-text="Screenshot showing Reset."::: + :::image type="content" source="./media/reset-gateway/reset-connection.png" alt-text="Screenshot showing the Reset button selected." lightbox="./media/reset-gateway/reset-connection-expand.png"::: ## Reset a VPN gateway -Before you reset your gateway, verify the key items listed below for each IPsec Site-to-Site (S2S) VPN tunnel. Any mismatch in the items will result in the disconnect of S2S VPN tunnels. Verifying and correcting the configurations for your on-premises and Azure VPN gateways saves you from unnecessary reboots and disruptions for the other working connections on the gateways. +Before you reset your gateway, verify the key items listed below for each IPsec site-to-site (S2S) VPN tunnel. Any mismatch in the items will result in the disconnect of S2S VPN tunnels. Verifying and correcting the configurations for your on-premises and Azure VPN gateways saves you from unnecessary reboots and disruptions for the other working connections on the gateways. Verify the following items before resetting your gateway: @@ -72,11 +71,11 @@ Reset-AzVirtualNetworkGateway -VirtualNetworkGateway $gw Result: -When you receive a return result, you can assume the gateway reset was successful. However, there is nothing in the return result that indicates explicitly that the reset was successful. If you want to look closely at the history to see exactly when the gateway reset occurred, you can view that information in the [Azure portal](https://portal.azure.com). In the portal, navigate to **'GatewayName' -> Resource Health**. +When you receive a return result, you can assume the gateway reset was successful. However, there's nothing in the return result that indicates explicitly that the reset was successful. If you want to look closely at the history to see exactly when the gateway reset occurred, you can view that information in the [Azure portal](https://portal.azure.com). In the portal, navigate to **'GatewayName' -> Resource Health**. #### Classic deployment model -The cmdlet for resetting a gateway is **Reset-AzureVNetGateway**. The Azure PowerShell cmdlets for Service Management must be installed locally on your desktop. You can't use Azure Cloud Shell. Before performing a reset, make sure you have the latest version of the [Service Management (SM) PowerShell cmdlets](/powershell/azure/servicemanagement/install-azure-ps#azure-service-management-cmdlets). When using this command, make sure you are using the full name of the virtual network. Classic VNets that were created using the portal have a long name that is required for PowerShell. You can view the long name by using 'Get-AzureVNetConfig -ExportToFile C:\Myfoldername\NetworkConfig.xml'. +The cmdlet for resetting a gateway is **Reset-AzureVNetGateway**. The Azure PowerShell cmdlets for Service Management must be installed locally on your desktop. You can't use Azure Cloud Shell. Before performing a reset, make sure you have the latest version of the [Service Management (SM) PowerShell cmdlets](/powershell/azure/servicemanagement/install-azure-ps#azure-service-management-cmdlets). When using this command, make sure you're using the full name of the virtual network. Classic VNets that were created using the portal have a long name that is required for PowerShell. You can view the long name by using 'Get-AzureVNetConfig -ExportToFile C:\Myfoldername\NetworkConfig.xml'. The following example resets the gateway for a virtual network named "Group TestRG1 TestVNet1" (which shows as simply "TestVNet1" in the portal): @@ -105,4 +104,4 @@ az network vnet-gateway reset -n VNet5GW -g TestRG5 Result: -When you receive a return result, you can assume the gateway reset was successful. However, there is nothing in the return result that indicates explicitly that the reset was successful. If you want to look closely at the history to see exactly when the gateway reset occurred, you can view that information in the [Azure portal](https://portal.azure.com). In the portal, navigate to **'GatewayName' -> Resource Health**. +When you receive a return result, you can assume the gateway reset was successful. However, there's nothing in the return result that indicates explicitly that the reset was successful. If you want to look closely at the history to see exactly when the gateway reset occurred, you can view that information in the [Azure portal](https://portal.azure.com). In the portal, navigate to **'GatewayName' -> Resource Health**. diff --git a/articles/vpn-gateway/site-to-site-vpn-private-peering.md b/articles/vpn-gateway/site-to-site-vpn-private-peering.md index b03601abad3d5..cf68ffbd57916 100644 --- a/articles/vpn-gateway/site-to-site-vpn-private-peering.md +++ b/articles/vpn-gateway/site-to-site-vpn-private-peering.md @@ -7,7 +7,7 @@ author: cherylmc ms.service: vpn-gateway ms.topic: how-to -ms.date: 04/28/2021 +ms.date: 05/26/2022 ms.author: cherylmc --- @@ -22,7 +22,7 @@ You can configure a Site-to-Site VPN to a virtual network gateway over an Expres * It is possible to deploy Site-to-Site VPN connections over ExpressRoute private peering at the same time as Site-to-Site VPN connections via the Internet on the same VPN gateway. >[!NOTE] ->This feature is only supported on zone-redundant gateways. For example, VpnGw1AZ, VpnGw2AZ, etc. +>This feature is supported on gateways with a Standard Public IP only. > To complete this configuration, verify that you meet the following prerequisites: @@ -71,9 +71,7 @@ In both of these examples, Azure will send traffic to 10.0.1.0/24 over the VPN c ## Portal steps -1. Configure a Site-to-Site connection. For steps, see the [Site-to-site configuration](./tutorial-site-to-site-portal.md) article. Be sure to pick a zone-redundant gateway SKU for the gateway. - - Zone-redundant SKUs have “AZ” at the end of the SKU. For example, **VpnGw1AZ**. Zone-redundant gateways are only available in regions where the availability zone service is available. For information about the regions in which we support availability zones, see [Regions that support availability zones](../availability-zones/az-region.md). +1. Configure a Site-to-Site connection. For steps, see the [Site-to-site configuration](./tutorial-site-to-site-portal.md) article. Be sure to pick a gateway with a Standard Public IP. :::image type="content" source="media/site-to-site-vpn-private-peering/gateway.png" alt-text="Gateway Private IPs"::: 1. Enable Private IPs on the gateway. Select **Configuration**, then set **Gateway Private IPs** to **Enabled**. Select **Save** to save your changes. @@ -87,7 +85,7 @@ In both of these examples, Azure will send traffic to 10.0.1.0/24 over the VPN c ## PowerShell steps -1. Configure a Site-to-Site connection. For steps, see the [Configure a Site-to-Site VPN](./tutorial-site-to-site-portal.md) article. Be sure to pick a zone-redundant gateway SKU for the gateway. Zone-redundant SKUs have “AZ” at the end of the SKU. For example, VpnGw1AZ. +1. Configure a Site-to-Site connection. For steps, see the [Configure a Site-to-Site VPN](./tutorial-site-to-site-portal.md) article. Be sure to pick a gateway with a Standard Public IP. 1. Set the flag to use the private IP on the gateway using the following PowerShell commands: ```azurepowershell-interactive diff --git a/articles/vpn-gateway/tutorial-site-to-site-portal.md b/articles/vpn-gateway/tutorial-site-to-site-portal.md index 07f4dd2cd629f..bf0f409d431fa 100644 --- a/articles/vpn-gateway/tutorial-site-to-site-portal.md +++ b/articles/vpn-gateway/tutorial-site-to-site-portal.md @@ -6,7 +6,7 @@ author: cherylmc ms.author: cherylmc ms.service: vpn-gateway ms.topic: tutorial -ms.date: 04/29/2022 +ms.date: 05/26/2022 --- @@ -106,7 +106,7 @@ Create a local network gateway using the following values: Site-to-site connections to an on-premises network require a VPN device. In this step, you configure your VPN device. When configuring your VPN device, you need the following values: * A shared key. This is the same shared key that you specify when creating your site-to-site VPN connection. In our examples, we use a basic shared key. We recommend that you generate a more complex key to use. -* The Public IP address of your virtual network gateway. You can view the public IP address by using the Azure portal, PowerShell, or CLI. To find the Public IP address of your VPN gateway using the Azure portal, navigate to **Virtual network gateways**, then select the name of your gateway. +* The Public IP address of your virtual network gateway. You can view the public IP address by using the Azure portal, PowerShell, or CLI. To find the Public IP address of your VPN gateway using the Azure portal, go to **Virtual network gateways**, then select the name of your gateway. [!INCLUDE [Configure a VPN device](../../includes/vpn-gateway-configure-vpn-device-include.md)] @@ -122,14 +122,11 @@ Create a connection using the following values: [!INCLUDE [Add a site-to-site connection](../../includes/vpn-gateway-add-site-to-site-connection-portal-include.md)] -### To add another connection +### To configure additional connection settings (optional) -You can connect to multiple on-premises sites from the same VPN gateway. If you want to configure multiple connections, the address spaces can’t overlap between any of the connections. +You can configure additional settings for your connection, if necessary. Otherwise, skip this section and leave the defaults in place. -1. To add an additional connection, navigate to the VPN gateway, then select **Connections** to open the Connections page. -1. Select **+Add** to add your connection. Adjust the connection type to reflect either VNet-to-VNet (if connecting to another VNet gateway), or Site-to-site. -1. If you're connecting using Site-to-site and you haven't already created a local network gateway for the site you want to connect to, you can create a new one. -1. Specify the shared key that you want to use, then select **OK** to create the connection. +[!INCLUDE [Configure additional connection settings with screenshot](../../includes/vpn-gateway-connection-settings-portal-include.md)] ## Verify the VPN connection @@ -153,6 +150,15 @@ Resetting an Azure VPN gateway is helpful if you lose cross-premises VPN connect [!INCLUDE [reset a gateway](../../includes/vpn-gateway-reset-gw-portal-include.md)] +### Add another connection + +You can create a connection to multiple on-premises sites from the same VPN gateway. If you want to configure multiple connections, the address spaces can’t overlap between any of the connections. + +1. To add an additional connection, go to the VPN gateway, then select **Connections** to open the Connections page. +1. Select **+Add** to add your connection. Adjust the connection type to reflect either VNet-to-VNet (if connecting to another VNet gateway), or Site-to-site. +1. If you're connecting using Site-to-site and you haven't already created a local network gateway for the site you want to connect to, you can create a new one. +1. Specify the shared key that you want to use, then select **OK** to create the connection. + ### Additional configuration considerations S2S configurations can be customized in a variety of ways. For more information, see the following articles: diff --git a/articles/vpn-gateway/vpn-gateway-bgp-overview.md b/articles/vpn-gateway/vpn-gateway-bgp-overview.md index 92be4bc40b47f..22a5ae55ebbc1 100644 --- a/articles/vpn-gateway/vpn-gateway-bgp-overview.md +++ b/articles/vpn-gateway/vpn-gateway-bgp-overview.md @@ -2,32 +2,32 @@ title: 'About BGP with VPN Gateway' titleSuffix: Azure VPN Gateway description: Learn about Border Gateway Protocol (BGP) in Azure VPN, the standard internet protocol to exchange routing and reachability information between networks. -services: vpn-gateway author: cherylmc - - ms.service: vpn-gateway ms.topic: article -ms.date: 09/02/2020 +ms.date: 05/18/2022 ms.author: cherylmc - --- # About BGP with Azure VPN Gateway + This article provides an overview of BGP (Border Gateway Protocol) support in Azure VPN Gateway. -BGP is the standard routing protocol commonly used in the Internet to exchange routing and reachability information between two or more networks. When used in the context of Azure Virtual Networks, BGP enables the Azure VPN Gateways and your on-premises VPN devices, called BGP peers or neighbors, to exchange "routes" that will inform both gateways on the availability and reachability for those prefixes to go through the gateways or routers involved. BGP can also enable transit routing among multiple networks by propagating routes a BGP gateway learns from one BGP peer to all other BGP peers. +BGP is the standard routing protocol commonly used in the Internet to exchange routing and reachability information between two or more networks. When used in the context of Azure Virtual Networks, BGP enables the Azure VPN Gateways and your on-premises VPN devices, called BGP peers or neighbors, to exchange "routes" that will inform both gateways on the availability and reachability for those prefixes to go through the gateways or routers involved. BGP can also enable transit routing among multiple networks by propagating routes a BGP gateway learns from one BGP peer to all other BGP peers. ## Why use BGP? + BGP is an optional feature you can use with Azure Route-Based VPN gateways. You should also make sure your on-premises VPN devices support BGP before you enable the feature. You can continue to use Azure VPN gateways and your on-premises VPN devices without BGP. It is the equivalent of using static routes (without BGP) *vs.* using dynamic routing with BGP between your networks and Azure. There are several advantages and new capabilities with BGP: ### Support automatic and flexible prefix updates + With BGP, you only need to declare a minimum prefix to a specific BGP peer over the IPsec S2S VPN tunnel. It can be as small as a host prefix (/32) of the BGP peer IP address of your on-premises VPN device. You can control which on-premises network prefixes you want to advertise to Azure to allow your Azure Virtual Network to access. You can also advertise larger prefixes that may include some of your VNet address prefixes, such as a large private IP address space (for example, 10.0.0.0/8). Note though the prefixes cannot be identical with any one of your VNet prefixes. Those routes identical to your VNet prefixes will be rejected. ### Support multiple tunnels between a VNet and an on-premises site with automatic failover based on BGP + You can establish multiple connections between your Azure VNet and your on-premises VPN devices in the same location. This capability provides multiple tunnels (paths) between the two networks in an active-active configuration. If one of the tunnels is disconnected, the corresponding routes will be withdrawn via BGP and the traffic automatically shifts to the remaining tunnels. The following diagram shows a simple example of this highly available setup: @@ -35,6 +35,7 @@ The following diagram shows a simple example of this highly available setup: ![Multiple active paths](./media/vpn-gateway-bgp-overview/multiple-active-tunnels.png) ### Support transit routing between your on-premises networks and multiple Azure VNets + BGP enables multiple gateways to learn and propagate prefixes from different networks, whether they are directly or indirectly connected. This can enable transit routing with Azure VPN gateways between your on-premises sites or across multiple Azure Virtual Networks. The following diagram shows an example of a multi-hop topology with multiple paths that can transit traffic between the two on-premises networks through Azure VPN gateways within the Microsoft Networks: @@ -42,8 +43,9 @@ The following diagram shows an example of a multi-hop topology with multiple pat ![Multi-hop transit](./media/vpn-gateway-bgp-overview/full-mesh-transit.png) ## BGP FAQ + [!INCLUDE [vpn-gateway-faq-bgp-include](../../includes/vpn-gateway-faq-bgp-include.md)] ## Next steps -See [Getting started with BGP on Azure VPN gateways](vpn-gateway-bgp-resource-manager-ps.md) for steps to configure BGP for your cross-premises and VNet-to-VNet connections. +See [Getting started with BGP on Azure VPN gateways](vpn-gateway-bgp-resource-manager-ps.md) for steps to configure BGP for your cross-premises and VNet-to-VNet connections. \ No newline at end of file diff --git a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md index e1c720bb42f51..4c7bdecb0ef59 100644 --- a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md +++ b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md @@ -15,7 +15,7 @@ ms.author: cherylmc Point-to-Site connections use certificates to authenticate. This article shows you how to create a self-signed root certificate and generate client certificates using MakeCert. If you are looking for different certificate instructions, see [Certificates - PowerShell](vpn-gateway-certificates-point-to-site.md) or [Certificates - Linux](vpn-gateway-certificates-point-to-site-linux.md). -While we recommend using the [Windows 10 PowerShell steps](vpn-gateway-certificates-point-to-site.md) to create your certificates, we provide these MakeCert instructions as an optional method. The certificates that you generate using either method can be installed on [any supported client operating system](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq). However, MakeCert has the following limitation: +While we recommend using the [Windows 10 or later PowerShell steps](vpn-gateway-certificates-point-to-site.md) to create your certificates, we provide these MakeCert instructions as an optional method. The certificates that you generate using either method can be installed on [any supported client operating system](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq). However, MakeCert has the following limitation: * MakeCert is deprecated. This means that this tool could be removed at any point. Any certificates that you already generated using MakeCert won't be affected when MakeCert is no longer available. MakeCert is only used to generate the certificates, not as a validating mechanism. diff --git a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md index f9dc1004e9157..311975444f144 100644 --- a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md +++ b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md @@ -13,11 +13,11 @@ ms.author: cherylmc --- # Generate and export certificates for Point-to-Site using PowerShell -Point-to-Site connections use certificates to authenticate. This article shows you how to create a self-signed root certificate and generate client certificates using PowerShell on Windows 10 or Windows Server 2016. If you are looking for different certificate instructions, see [Certificates - Linux](vpn-gateway-certificates-point-to-site-linux.md) or [Certificates - MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). +Point-to-Site connections use certificates to authenticate. This article shows you how to create a self-signed root certificate and generate client certificates using PowerShell on Windows 10 or later, or Windows Server 2016. If you are looking for different certificate instructions, see [Certificates - Linux](vpn-gateway-certificates-point-to-site-linux.md) or [Certificates - MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). -The steps in this article apply to Windows 10 or Windows Server 2016. The PowerShell cmdlets that you use to generate certificates are part of the operating system and do not work on other versions of Windows. The Windows 10 or Windows Server 2016 computer is only needed to generate the certificates. Once the certificates are generated, you can upload them, or install them on any supported client operating system. +The steps in this article apply to Windows 10 or later, or Windows Server 2016. The PowerShell cmdlets that you use to generate certificates are part of the operating system and do not work on other versions of Windows. The Windows 10 or later, or Windows Server 2016 computer is only needed to generate the certificates. Once the certificates are generated, you can upload them, or install them on any supported client operating system. -If you do not have access to a Windows 10 or Windows Server 2016 computer, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md) to generate certificates. The certificates that you generate using either method can be installed on any [supported](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq) client operating system. +If you do not have access to a Windows 10 or later, or Windows Server 2016 computer, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md) to generate certificates. The certificates that you generate using either method can be installed on any [supported](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq) client operating system. [!INCLUDE [generate and export certificates](../../includes/vpn-gateway-generate-export-certificates-include.md)] diff --git a/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md b/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md index f3341d2e37c34..4b607d1747d36 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md +++ b/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md @@ -1,7 +1,7 @@ --- title: 'Configure an Always-On VPN tunnel' titleSuffix: Azure VPN Gateway -description: Learn how to use gateways with Windows 10 Always On to establish and configure persistent device tunnels to Azure. +description: Learn how to use gateways with Windows 10 or later Always On to establish and configure persistent device tunnels to Azure. services: vpn-gateway author: cherylmc diff --git a/articles/vpn-gateway/vpn-gateway-howto-aws-bgp.md b/articles/vpn-gateway/vpn-gateway-howto-aws-bgp.md index 93d82230484c2..13714e84a5b90 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-aws-bgp.md +++ b/articles/vpn-gateway/vpn-gateway-howto-aws-bgp.md @@ -2,8 +2,8 @@ title: 'Tutorial - Configure a BGP-enabled connection between Azure and Amazon Web Services (AWS) using the portal' description: In this tutorial, learn how to connect Azure and AWS using an active-active VPN Gateway and two site-to-site connections on AWS. titleSuffix: Azure VPN Gateway -author: jaidharosenblatt -ms.author: jrosenblatt +author: cherylmc +ms.author: cherylmc ms.service: vpn-gateway ms.topic: tutorial ms.date: 12/2/2021 diff --git a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md index 7301852679240..e14f55707d18d 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md +++ b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md @@ -96,7 +96,7 @@ If you already have a VNet, verify that the settings are compatible with your VP Azure uses certificates to authenticate VPN clients for Point-to-Site VPNs. You upload the public key information of the root certificate to Azure. The public key is then considered *trusted*. Client certificates must be generated from the trusted root certificate, and then installed on each client computer in the Certificates-Current User\Personal\Certificates certificate store. The certificate is used to authenticate the client when it connects to the VNet. -If you use self-signed certificates, they must be created by using specific parameters. You can create a self-signed certificate by using the instructions for [PowerShell and Windows 10](vpn-gateway-certificates-point-to-site.md), or [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important to follow the steps in these instructions when you use self-signed root certificates and generate client certificates from the self-signed root certificate. Otherwise, the certificates you create won't be compatible with P2S connections and you'll receive a connection error. +If you use self-signed certificates, they must be created by using specific parameters. You can create a self-signed certificate by using the instructions for [PowerShell and Windows 10 or later](vpn-gateway-certificates-point-to-site.md), or [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important to follow the steps in these instructions when you use self-signed root certificates and generate client certificates from the self-signed root certificate. Otherwise, the certificates you create won't be compatible with P2S connections and you'll receive a connection error. ### Acquire the public key (.cer) for the root certificate diff --git a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md index 3daafc90d6c88..27dff45a6f6e7 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md +++ b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md @@ -1,19 +1,17 @@ --- title: 'Connect to a VNet using P2S VPN & certificate authentication: portal' titleSuffix: Azure VPN Gateway -description: Learn how to connect Windows, macOS, and Linux clients securely to a VNet using VPN Gateway Point-to-Site connections and self-signed or CA issued certificates. -services: vpn-gateway +description: Learn how to connect Windows, macOS, and Linux clients securely to a VNet using VPN Gateway point-to-site connections and self-signed or CA issued certificates. author: cherylmc - ms.service: vpn-gateway ms.topic: how-to -ms.date: 04/20/2022 +ms.date: 05/26/2022 ms.author: cherylmc --- -# Configure a Point-to-Site VPN connection using Azure certificate authentication: Azure portal +# Configure a point-to-site VPN connection using Azure certificate authentication: Azure portal -This article helps you securely connect individual clients running Windows, Linux, or macOS to an Azure VNet. Point-to-Site VPN connections are useful when you want to connect to your VNet from a remote location, such when you're telecommuting from home or a conference. You can also use P2S instead of a Site-to-Site VPN when you have only a few clients that need to connect to a VNet. Point-to-Site connections don't require a VPN device or a public-facing IP address. P2S creates the VPN connection over either SSTP (Secure Socket Tunneling Protocol), or IKEv2. For more information about Point-to-Site VPN, see [About Point-to-Site VPN](point-to-site-about.md). +This article helps you securely connect individual clients running Windows, Linux, or macOS to an Azure VNet. point-to-site VPN connections are useful when you want to connect to your VNet from a remote location, such when you're telecommuting from home or a conference. You can also use P2S instead of a Site-to-Site VPN when you have only a few clients that need to connect to a VNet. point-to-site connections don't require a VPN device or a public-facing IP address. P2S creates the VPN connection over either SSTP (Secure Socket Tunneling Protocol), or IKEv2. For more information about point-to-site VPN, see [About point-to-site VPN](point-to-site-about.md). :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/point-to-site-diagram.png" alt-text="Connect from a computer to an Azure VNet - point-to-site connection diagram."::: @@ -52,7 +50,7 @@ You can use the following values to create a test environment, or refer to these **Connection type and client address pool** * **Connection type:** Point-to-site -* **Client address pool:** 172.16.201.0/24
                    VPN clients that connect to the VNet using this Point-to-Site connection receive an IP address from the client address pool. +* **Client address pool:** 172.16.201.0/24
                    VPN clients that connect to the VNet using this point-to-site connection receive an IP address from the client address pool. ## Create a VNet @@ -60,7 +58,7 @@ In this section, you create a virtual network. [!INCLUDE [About cross-premises addresses](../../includes/vpn-gateway-cross-premises.md)] -[!INCLUDE [Basic Point-to-Site VNet](../../includes/vpn-gateway-basic-vnet-rm-portal-include.md)] +[!INCLUDE [Basic point-to-site VNet](../../includes/vpn-gateway-basic-vnet-rm-portal-include.md)] ## Create the VPN gateway @@ -81,7 +79,7 @@ You can see the deployment status on the Overview page for your gateway. After t ## Generate certificates -Certificates are used by Azure to authenticate clients connecting to a VNet over a Point-to-Site VPN connection. Once you obtain a root certificate, you [upload](#uploadfile) the public key information to Azure. The root certificate is then considered 'trusted' by Azure for connection over P2S to the virtual network. You also generate client certificates from the trusted root certificate, and then install them on each client computer. The client certificate is used to authenticate the client when it initiates a connection to the VNet. +Certificates are used by Azure to authenticate clients connecting to a VNet over a point-to-site VPN connection. Once you obtain a root certificate, you [upload](#uploadfile) the public key information to Azure. The root certificate is then considered 'trusted' by Azure for connection over P2S to the virtual network. You also generate client certificates from the trusted root certificate, and then install them on each client computer. The client certificate is used to authenticate the client when it initiates a connection to the VNet. ### Generate a root certificate @@ -93,7 +91,7 @@ Certificates are used by Azure to authenticate clients connecting to a VNet over ## Add the VPN client address pool -The client address pool is a range of private IP addresses that you specify. The clients that connect over a Point-to-Site VPN dynamically receive an IP address from this range. Use a private IP address range that doesn't overlap with the on-premises location that you connect from, or the VNet that you want to connect to. If you configure multiple protocols and SSTP is one of the protocols, then the configured address pool is split between the configured protocols equally. +The client address pool is a range of private IP addresses that you specify. The clients that connect over a point-to-site VPN dynamically receive an IP address from this range. Use a private IP address range that doesn't overlap with the on-premises location that you connect from, or the VNet that you want to connect to. If you configure multiple protocols and SSTP is one of the protocols, then the configured address pool is split between the configured protocols equally. 1. Once the virtual network gateway has been created, navigate to the **Settings** section of the virtual network gateway page. In **Settings**, select **Point-to-site configuration**. Select **Configure now** to open the configuration page. @@ -126,10 +124,10 @@ In this section, you upload public root certificate data to Azure. Once the publ 1. Navigate to your **Virtual network gateway -> Point-to-site configuration** page in the **Root certificate** section. This section is only visible if you have selected **Azure certificate** for the authentication type. 1. Make sure that you exported the root certificate as a **Base-64 encoded X.509 (.CER)** file in the previous steps. You need to export the certificate in this format so you can open the certificate with text editor. You don't need to export the private key. - :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png" alt-text="Screenshot showing export as Base-64 encoded X.509." lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png" ::: + :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png" alt-text="Screenshot showing export as Base-64 encoded X.509." lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png" ::: 1. Open the certificate with a text editor, such as Notepad. When copying the certificate data, make sure that you copy the text as one continuous line without carriage returns or line feeds. You may need to modify your view in the text editor to 'Show Symbol/Show all characters' to see the carriage returns and line feeds. Copy only the following section as one continuous line: - :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png" alt-text="Screenshot showing root certificate information in Notepad." border="false" lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png"::: + :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png" alt-text="Screenshot showing root certificate information in Notepad." border="false" lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png"::: 1. In the **Root certificate** section, you can add up to 20 trusted root certificates. * Paste the certificate data into the **Public certificate data** field. @@ -175,7 +173,7 @@ If you're having trouble connecting, verify that the virtual network gateway isn These instructions apply to Windows clients. 1. To verify that your VPN connection is active, open an elevated command prompt, and run *ipconfig/all*. -2. View the results. Notice that the IP address you received is one of the addresses within the Point-to-Site VPN Client Address Pool that you specified in your configuration. The results are similar to this example: +2. View the results. Notice that the IP address you received is one of the addresses within the point-to-site VPN Client Address Pool that you specified in your configuration. The results are similar to this example: ``` PPP adapter VNet1: @@ -214,7 +212,7 @@ To remove a trusted root certificate: ## To revoke a client certificate -You can revoke client certificates. The certificate revocation list allows you to selectively deny Point-to-Site connectivity based on individual client certificates. This is different than removing a trusted root certificate. If you remove a trusted root certificate .cer from Azure, it revokes the access for all client certificates generated/signed by the revoked root certificate. Revoking a client certificate, rather than the root certificate, allows the other certificates that were generated from the root certificate to continue to be used for authentication. +You can revoke client certificates. The certificate revocation list allows you to selectively deny point-to-site connectivity based on individual client certificates. This is different than removing a trusted root certificate. If you remove a trusted root certificate .cer from Azure, it revokes the access for all client certificates generated/signed by the revoked root certificate. Revoking a client certificate, rather than the root certificate, allows the other certificates that were generated from the root certificate to continue to be used for authentication. The common practice is to use the root certificate to manage access at team or organization levels, while using revoked client certificates for fine-grained access control on individual users. @@ -228,7 +226,7 @@ You can revoke a client certificate by adding the thumbprint to the revocation l 1. The thumbprint validates and is automatically added to the revocation list. A message appears on the screen that the list is updating. 1. After updating has completed, the certificate can no longer be used to connect. Clients that try to connect using this certificate receive a message saying that the certificate is no longer valid. -## Point-to-Site FAQ +## Point-to-site FAQ For frequently asked questions, see the [FAQ](vpn-gateway-vpn-faq.md#P2S). diff --git a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md index f4aa093d1cc86..4b817f2451957 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md +++ b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md @@ -145,7 +145,7 @@ Set-AzVirtualNetworkGateway -VirtualNetworkGateway $Gateway -VpnClientAddressPoo Certificates are used by Azure to authenticate VPN clients for point-to-site VPNs. You upload the public key information of the root certificate to Azure. The public key is then considered 'trusted'. Client certificates must be generated from the trusted root certificate, and then installed on each client computer in the Certificates-Current User/Personal certificate store. The certificate is used to authenticate the client when it initiates a connection to the VNet. -If you use self-signed certificates, they must be created using specific parameters. You can create a self-signed certificate using the instructions for [PowerShell and Windows 10](vpn-gateway-certificates-point-to-site.md), or, if you don't have Windows 10, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important that you follow the steps in the instructions when generating self-signed root certificates and client certificates. Otherwise, the certificates you generate will not be compatible with P2S connections and you receive a connection error. +If you use self-signed certificates, they must be created using specific parameters. You can create a self-signed certificate using the instructions for [PowerShell and Windows 10 or later](vpn-gateway-certificates-point-to-site.md), or, if you don't have Windows 10 or later, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important that you follow the steps in the instructions when generating self-signed root certificates and client certificates. Otherwise, the certificates you generate will not be compatible with P2S connections and you receive a connection error. ### Root certificate @@ -161,7 +161,7 @@ If you use self-signed certificates, they must be created using specific paramet ## Upload root certificate public key information -Verify that your VPN gateway has finished creating. Once it has completed, you can upload the .cer file (which contains the public key information) for a trusted root certificate to Azure. Once a.cer file is uploaded, Azure can use it to authenticate clients that have installed a client certificate generated from the trusted root certificate. You can upload additional trusted root certificate files - up to a total of 20 - later, if needed. +Verify that your VPN gateway has finished creating. Once it has completed, you can upload the .cer file (which contains the public key information) for a trusted root certificate to Azure. Once a .cer file is uploaded, Azure can use it to authenticate clients that have installed a client certificate generated from the trusted root certificate. You can upload additional trusted root certificate files - up to a total of 20 - later, if needed. >[!NOTE] > You can't upload the .cer file using Azure Cloud Shell. You can either use PowerShell locally on your computer, or you can use the [Azure portal steps](vpn-gateway-howto-point-to-site-resource-manager-portal.md#uploadfile). @@ -384,4 +384,4 @@ For additional point-to-site information, see the [VPN Gateway point-to-site FAQ Once your connection is complete, you can add virtual machines to your virtual networks. For more information, see [Virtual Machines](../index.yml). To understand more about networking and virtual machines, see [Azure and Linux VM network overview](../virtual-network/network-overview.md). -For P2S troubleshooting information, [Troubleshooting: Azure point-to-site connection problems](vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md). \ No newline at end of file +For P2S troubleshooting information, [Troubleshooting: Azure point-to-site connection problems](vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md). diff --git a/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md b/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md index 947c7eddc9fba..7c007ab2cf545 100644 --- a/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md +++ b/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md @@ -61,7 +61,7 @@ When you try and connect to an Azure virtual network gateway using IKEv2 on Wind IKEv2 is supported on Windows 10 and Server 2016. However, in order to use IKEv2, you must install updates and set a registry key value locally. OS versions prior to Windows 10 are not supported and can only use SSTP. -To prepare Windows 10 or Server 2016 for IKEv2: +To prepare Windows 10 , or Server 2016 for IKEv2: 1. Install the update. diff --git a/articles/vpn-gateway/vpn-gateway-vpn-faq.md b/articles/vpn-gateway/vpn-gateway-vpn-faq.md index 03ea13ffcceb4..31c7cbf8b83f4 100644 --- a/articles/vpn-gateway/vpn-gateway-vpn-faq.md +++ b/articles/vpn-gateway/vpn-gateway-vpn-faq.md @@ -6,7 +6,7 @@ author: cherylmc ms.service: vpn-gateway ms.topic: conceptual -ms.date: 12/16/2021 +ms.date: 05/25/2022 ms.author: cherylmc --- # VPN Gateway FAQ @@ -15,7 +15,7 @@ ms.author: cherylmc ### Can I connect virtual networks in different Azure regions? -Yes. There is no region constraint. One virtual network can connect to another virtual network in the same region, or in a different Azure region. +Yes. There's no region constraint. One virtual network can connect to another virtual network in the same region, or in a different Azure region. ### Can I connect virtual networks in different subscriptions? @@ -37,21 +37,21 @@ No. The following cross-premises virtual network gateway connections are supported: -* **Site-to-Site:** VPN connection over IPsec (IKE v1 and IKE v2). This type of connection requires a VPN device or RRAS. For more information, see [Site-to-Site](./tutorial-site-to-site-portal.md). -* **Point-to-Site:** VPN connection over SSTP (Secure Socket Tunneling Protocol) or IKE v2. This connection does not require a VPN device. For more information, see [Point-to-Site](vpn-gateway-howto-point-to-site-resource-manager-portal.md). -* **VNet-to-VNet:** This type of connection is the same as a Site-to-Site configuration. VNet to VNet is a VPN connection over IPsec (IKE v1 and IKE v2). It does not require a VPN device. For more information, see [VNet-to-VNet](vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). -* **Multi-Site:** This is a variation of a Site-to-Site configuration that allows you to connect multiple on-premises sites to a virtual network. For more information, see [Multi-Site](vpn-gateway-howto-multi-site-to-site-resource-manager-portal.md). +* **Site-to-site:** VPN connection over IPsec (IKE v1 and IKE v2). This type of connection requires a VPN device or RRAS. For more information, see [Site-to-site](./tutorial-site-to-site-portal.md). +* **Point-to-site:** VPN connection over SSTP (Secure Socket Tunneling Protocol) or IKE v2. This connection doesn't require a VPN device. For more information, see [Point-to-site](vpn-gateway-howto-point-to-site-resource-manager-portal.md). +* **VNet-to-VNet:** This type of connection is the same as a site-to-site configuration. VNet to VNet is a VPN connection over IPsec (IKE v1 and IKE v2). It doesn't require a VPN device. For more information, see [VNet-to-VNet](vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). +* **Multi-Site:** This is a variation of a site-to-site configuration that allows you to connect multiple on-premises sites to a virtual network. For more information, see [Multi-Site](vpn-gateway-howto-multi-site-to-site-resource-manager-portal.md). * **ExpressRoute:** ExpressRoute is a private connection to Azure from your WAN, not a VPN connection over the public Internet. For more information, see the [ExpressRoute Technical Overview](../expressroute/expressroute-introduction.md) and the [ExpressRoute FAQ](../expressroute/expressroute-faqs.md). For more information about VPN Gateway connections, see [About VPN Gateway](vpn-gateway-about-vpngateways.md). -### What is the difference between a Site-to-Site connection and Point-to-Site? +### What is the difference between a site-to-site connection and point-to-site? -**Site-to-Site** (IPsec/IKE VPN tunnel) configurations are between your on-premises location and Azure. This means that you can connect from any of your computers located on your premises to any virtual machine or role instance within your virtual network, depending on how you choose to configure routing and permissions. It's a great option for an always-available cross-premises connection and is well suited for hybrid configurations. This type of connection relies on an IPsec VPN appliance (hardware device or soft appliance), which must be deployed at the edge of your network. To create this type of connection, you must have an externally facing IPv4 address. +**Site-to-site** (IPsec/IKE VPN tunnel) configurations are between your on-premises location and Azure. This means that you can connect from any of your computers located on your premises to any virtual machine or role instance within your virtual network, depending on how you choose to configure routing and permissions. It's a great option for an always-available cross-premises connection and is well suited for hybrid configurations. This type of connection relies on an IPsec VPN appliance (hardware device or soft appliance), which must be deployed at the edge of your network. To create this type of connection, you must have an externally facing IPv4 address. -**Point-to-Site** (VPN over SSTP) configurations let you connect from a single computer from anywhere to anything located in your virtual network. It uses the Windows in-box VPN client. As part of the Point-to-Site configuration, you install a certificate and a VPN client configuration package, which contains the settings that allow your computer to connect to any virtual machine or role instance within the virtual network. It's great when you want to connect to a virtual network, but aren't located on-premises. It's also a good option when you don't have access to VPN hardware or an externally facing IPv4 address, both of which are required for a Site-to-Site connection. +**Point-to-site** (VPN over SSTP) configurations let you connect from a single computer from anywhere to anything located in your virtual network. It uses the Windows in-box VPN client. As part of the point-to-site configuration, you install a certificate and a VPN client configuration package, which contains the settings that allow your computer to connect to any virtual machine or role instance within the virtual network. It's great when you want to connect to a virtual network, but aren't located on-premises. It's also a good option when you don't have access to VPN hardware or an externally facing IPv4 address, both of which are required for a site-to-site connection. -You can configure your virtual network to use both Site-to-Site and Point-to-Site concurrently, as long as you create your Site-to-Site connection using a route-based VPN type for your gateway. Route-based VPN types are called dynamic gateways in the classic deployment model. +You can configure your virtual network to use both site-to-site and point-to-site concurrently, as long as you create your site-to-site connection using a route-based VPN type for your gateway. Route-based VPN types are called dynamic gateways in the classic deployment model. ## Privacy @@ -81,7 +81,7 @@ The custom configured traffic selectors will be proposed only when an Azure VPN ### Can I update my policy-based VPN gateway to route-based? -No. A gateway type cannot be changed from policy-based to route-based, or from route-based to policy-based. To change a gateway type, the gateway must be deleted and recreated. This process takes about 60 minutes. When you create the new gateway, you cannot retain the IP address of the original gateway. +No. A gateway type can't be changed from policy-based to route-based, or from route-based to policy-based. To change a gateway type, the gateway must be deleted and recreated. This process takes about 60 minutes. When you create the new gateway, you can't retain the IP address of the original gateway. 1. Delete any connections associated with the gateway. @@ -90,7 +90,7 @@ No. A gateway type cannot be changed from policy-based to route-based, or from r * [Azure portal](vpn-gateway-delete-vnet-gateway-portal.md) * [Azure PowerShell](vpn-gateway-delete-vnet-gateway-powershell.md) * [Azure PowerShell - classic](vpn-gateway-delete-vnet-gateway-classic-powershell.md) -1. Create a new gateway using the gateway type that you want, and then complete the VPN setup. For steps, see the [Site-to-Site tutorial](./tutorial-site-to-site-portal.md#VNetGateway). +1. Create a new gateway using the gateway type that you want, and then complete the VPN setup. For steps, see the [Site-to-site tutorial](./tutorial-site-to-site-portal.md#VNetGateway). ### Do I need a 'GatewaySubnet'? @@ -104,15 +104,15 @@ No. ### Can I get my VPN gateway IP address before I create it? -Zone-redundant and zonal gateways (gateway SKUs that have _AZ_ in the name) both rely on a _Standard SKU_ Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. Therefore, you will have the public IP address for your VPN gateway as soon as you create the Standard SKU public IP resource you intend to use for it. +Zone-redundant and zonal gateways (gateway SKUs that have _AZ_ in the name) both rely on a _Standard SKU_ Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. Therefore, you'll have the public IP address for your VPN gateway as soon as you create the Standard SKU public IP resource you intend to use for it. -For non-zone-redundant and non-zonal gateways (gateway SKUs that do _not_ have _AZ_ in the name), you cannot obtain the VPN gateway IP address before it is created. The IP address changes only if you delete and re-create your VPN gateway. +For non-zone-redundant and non-zonal gateways (gateway SKUs that do *not* have *AZ* in the name), you can't obtain the VPN gateway IP address before it's created. The IP address changes only if you delete and re-create your VPN gateway. ### Can I request a Static Public IP address for my VPN gateway? -Zone-redundant and zonal gateways (gateway SKUs that have _AZ_ in the name) both rely on a _Standard SKU_ Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. +Zone-redundant and zonal gateways (gateway SKUs that have *AZ* in the name) both rely on a *Standard SKU* Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. -For non-zone-redundant and non-zonal gateways (gateway SKUs that do _not_ have _AZ_ in the name), only dynamic IP address assignment is supported. However, this doesn't mean that the IP address changes after it has been assigned to your VPN gateway. The only time the VPN gateway IP address changes is when the gateway is deleted and then re-created. The VPN gateway public IP address doesn't change when you resize, reset, or complete other internal maintenance and upgrades of your VPN gateway. +For non-zone-redundant and non-zonal gateways (gateway SKUs that do *not* have *AZ* in the name), only dynamic IP address assignment is supported. However, this doesn't mean that the IP address changes after it has been assigned to your VPN gateway. The only time the VPN gateway IP address changes is when the gateway is deleted and then re-created. The VPN gateway public IP address doesn't change when you resize, reset, or complete other internal maintenance and upgrades of your VPN gateway. ### How does my VPN tunnel get authenticated? @@ -124,7 +124,7 @@ Yes, the Set Pre-Shared Key API and PowerShell cmdlet can be used to configure b ### Can I use other authentication options? -We are limited to using pre-shared keys (PSK) for authentication. +We're limited to using pre-shared keys (PSK) for authentication. ### How do I specify which traffic goes through the VPN gateway? @@ -147,19 +147,19 @@ Yes, you can deploy your own VPN gateways or servers in Azure either from the Az ### Why are certain ports opened on my virtual network gateway? -They are required for Azure infrastructure communication. They are protected (locked down) by Azure certificates. Without proper certificates, external entities, including the customers of those gateways, will not be able to cause any effect on those endpoints. +They're required for Azure infrastructure communication. They're protected (locked down) by Azure certificates. Without proper certificates, external entities, including the customers of those gateways, won't be able to cause any effect on those endpoints. -A virtual network gateway is fundamentally a multi-homed device with one NIC tapping into the customer private network, and one NIC facing the public network. Azure infrastructure entities cannot tap into customer private networks for compliance reasons, so they need to utilize public endpoints for infrastructure communication. The public endpoints are periodically scanned by Azure security audit. +A virtual network gateway is fundamentally a multi-homed device with one NIC tapping into the customer private network, and one NIC facing the public network. Azure infrastructure entities can't tap into customer private networks for compliance reasons, so they need to utilize public endpoints for infrastructure communication. The public endpoints are periodically scanned by Azure security audit. ### More information about gateway types, requirements, and throughput For more information, see [About VPN Gateway configuration settings](vpn-gateway-about-vpn-gateway-settings.md). -## Site-to-Site connections and VPN devices +## Site-to-site connections and VPN devices ### What should I consider when selecting a VPN device? -We have validated a set of standard Site-to-Site VPN devices in partnership with device vendors. A list of known compatible VPN devices, their corresponding configuration instructions or samples, and device specs can be found in the [About VPN devices](vpn-gateway-about-vpn-devices.md) article. All devices in the device families listed as known compatible should work with Virtual Network. To help configure your VPN device, refer to the device configuration sample or link that corresponds to appropriate device family. +We've validated a set of standard site-to-site VPN devices in partnership with device vendors. A list of known compatible VPN devices, their corresponding configuration instructions or samples, and device specs can be found in the [About VPN devices](vpn-gateway-about-vpn-devices.md) article. All devices in the device families listed as known compatible should work with Virtual Network. To help configure your VPN device, refer to the device configuration sample or link that corresponds to appropriate device family. ### Where can I find VPN device configuration settings? @@ -179,21 +179,21 @@ This is expected behavior for policy-based (also known as static routing) VPN ga ### Can I use software VPNs to connect to Azure? -We support Windows Server 2012 Routing and Remote Access (RRAS) servers for Site-to-Site cross-premises configuration. +We support Windows Server 2012 Routing and Remote Access (RRAS) servers for site-to-site cross-premises configuration. Other software VPN solutions should work with our gateway as long as they conform to industry standard IPsec implementations. Contact the vendor of the software for configuration and support instructions. -### Can I connect to a VPN gateway via Point-to-Site when located at a Site that has an active Site-to-Site connection? +### Can I connect to a VPN gateway via point-to-site when located at a Site that has an active site-to-site connection? -Yes, but the Public IP address(es) of the Point-to-Site client need to be different than the Public IP address(es) used by the Site-to-Site VPN device, or else the Point-to-Site connection will not work. Point-to-Site connections with IKEv2 cannot be initiated from the same Public IP address(es) where a Site-to-Site VPN connection is configured on the same Azure VPN gateway. +Yes, but the Public IP address(es) of the point-to-site client need to be different than the Public IP address(es) used by the site-to-site VPN device, or else the point-to-site connection won't work. point-to-site connections with IKEv2 can't be initiated from the same Public IP address(es) where a site-to-site VPN connection is configured on the same Azure VPN gateway. -## Point-to-Site - Certificate authentication +## Point-to-site - Certificate authentication This section applies to the Resource Manager deployment model. [!INCLUDE [P2S Azure cert](../../includes/vpn-gateway-faq-p2s-azurecert-include.md)] -## Point-to-Site - RADIUS authentication +## Point-to-site - RADIUS authentication This section applies to the Resource Manager deployment model. @@ -213,15 +213,15 @@ If you want to enable routing between your branch connected to ExpressRoute and Yes. See the [BGP](#bgp) section for more information. **Classic deployment model**
                    -Transit traffic via Azure VPN gateway is possible using the classic deployment model, but relies on statically defined address spaces in the network configuration file. BGP is not yet supported with Azure Virtual Networks and VPN gateways using the classic deployment model. Without BGP, manually defining transit address spaces is very error prone, and not recommended. +Transit traffic via Azure VPN gateway is possible using the classic deployment model, but relies on statically defined address spaces in the network configuration file. BGP isn't yet supported with Azure Virtual Networks and VPN gateways using the classic deployment model. Without BGP, manually defining transit address spaces is very error prone, and not recommended. ### Does Azure generate the same IPsec/IKE pre-shared key for all my VPN connections for the same virtual network? -No, Azure by default generates different pre-shared keys for different VPN connections. However, you can use the Set VPN Gateway Key REST API or PowerShell cmdlet to set the key value you prefer. The key MUST only contain printable ASCII characters except space, hyphen (-) or tilde (~). +No, Azure by default generates different pre-shared keys for different VPN connections. However, you can use the `Set VPN Gateway Key` REST API or PowerShell cmdlet to set the key value you prefer. The key MUST only contain printable ASCII characters except space, hyphen (-) or tilde (~). -### Do I get more bandwidth with more Site-to-Site VPNs than for a single virtual network? +### Do I get more bandwidth with more site-to-site VPNs than for a single virtual network? -No, all VPN tunnels, including Point-to-Site VPNs, share the same Azure VPN gateway and the available bandwidth. +No, all VPN tunnels, including point-to-site VPNs, share the same Azure VPN gateway and the available bandwidth. ### Can I configure multiple tunnels between my virtual network and my on-premises site using multi-site VPN? @@ -233,15 +233,15 @@ Yes, Azure VPN gateway will honor AS Path prepending to help make routing decisi ### Can I use the RoutingWeight property when creating a new VPN VirtualNetworkGateway connection? -No, such setting is reserved for ExpressRoute gateway connections. If you want to influence routing decisions between multiple connections you need to use AS Path prepending. +No, such setting is reserved for ExpressRoute gateway connections. If you want to influence routing decisions between multiple connections, you need to use AS Path prepending. -### Can I use Point-to-Site VPNs with my virtual network with multiple VPN tunnels? +### Can I use point-to-site VPNs with my virtual network with multiple VPN tunnels? -Yes, Point-to-Site (P2S) VPNs can be used with the VPN gateways connecting to multiple on-premises sites and other virtual networks. +Yes, point-to-site (P2S) VPNs can be used with the VPN gateways connecting to multiple on-premises sites and other virtual networks. ### Can I connect a virtual network with IPsec VPNs to my ExpressRoute circuit? -Yes, this is supported. For more information, see [Configure ExpressRoute and Site-to-Site VPN connections that coexist](../expressroute/expressroute-howto-coexist-classic.md). +Yes, this is supported. For more information, see [Configure ExpressRoute and site-to-site VPN connections that coexist](../expressroute/expressroute-howto-coexist-classic.md). ## IPsec/IKE policy @@ -265,7 +265,7 @@ Yes. See [Configure forced tunneling](vpn-gateway-about-forced-tunneling.md). You have a few options. If you have RDP enabled for your VM, you can connect to your virtual machine by using the private IP address. In that case, you would specify the private IP address and the port that you want to connect to (typically 3389). You'll need to configure the port on your virtual machine for the traffic. -You can also connect to your virtual machine by private IP address from another virtual machine that's located on the same virtual network. You can't RDP to your virtual machine by using the private IP address if you are connecting from a location outside of your virtual network. For example, if you have a Point-to-Site virtual network configured and you don't establish a connection from your computer, you can't connect to the virtual machine by private IP address. +You can also connect to your virtual machine by private IP address from another virtual machine that's located on the same virtual network. You can't RDP to your virtual machine by using the private IP address if you're connecting from a location outside of your virtual network. For example, if you have a point-to-site virtual network configured and you don't establish a connection from your computer, you can't connect to the virtual machine by private IP address. ### If my virtual machine is in a virtual network with cross-premises connectivity, does all the traffic from my VM go through that connection? diff --git a/articles/web-application-firewall/afds/afds-overview.md b/articles/web-application-firewall/afds/afds-overview.md index 55c604347c1ca..0967b6104d9c8 100644 --- a/articles/web-application-firewall/afds/afds-overview.md +++ b/articles/web-application-firewall/afds/afds-overview.md @@ -5,7 +5,7 @@ services: web-application-firewall author: vhorne ms.service: web-application-firewall ms.topic: conceptual -ms.date: 03/30/2022 +ms.date: 05/06/2022 ms.author: victorh --- @@ -108,7 +108,7 @@ If bot protection is enabled, incoming requests that match bot rules are logged ## Configuration -You can configure and deploy all WAF rule types using the Azure portal, REST APIs, Azure Resource Manager templates, and Azure PowerShell. +You can configure and deploy all WAF policies using the Azure portal, REST APIs, Azure Resource Manager templates, and Azure PowerShell. You can also configure and manage Azure WAF policies at scale using Firewall Manager integration (preview). For more information, see [Use Azure Firewall Manager to manage Web Application Firewall policies (preview)](../shared/manage-policies.md). ## Monitoring diff --git a/articles/web-application-firewall/afds/waf-front-door-monitor.md b/articles/web-application-firewall/afds/waf-front-door-monitor.md index 34cd149c1fb15..a43a4182f88e0 100644 --- a/articles/web-application-firewall/afds/waf-front-door-monitor.md +++ b/articles/web-application-firewall/afds/waf-front-door-monitor.md @@ -16,7 +16,7 @@ Azure Web Application Firewall (WAF) monitoring and logging are provided through ## Azure Monitor -WAF with FrontDoor log is integrated with [Azure Monitor](../../azure-monitor/overview.md). Azure Monitor allows you to track diagnostic information including WAF alerts and logs. You can configure WAF monitoring within the Front Door resource in the portal under the **Diagnostics** tab or through the Azure Monitor service directly. +Front Door's WAF log is integrated with [Azure Monitor](../../azure-monitor/overview.md). Azure Monitor enables you to track diagnostic information including WAF alerts and logs. You can configure WAF monitoring within the Front Door resource in the portal under the **Diagnostics** tab, through infrastructure as code approaches, or by using the Azure Monitor service directly. From Azure portal, go to Front Door resource type. From **Monitoring**/**Metrics** tab on the left, you can add **WebApplicationFirewallRequestCount** to track number of requests that match WAF rules. Custom filters can be created based on action types and rule names. @@ -24,130 +24,157 @@ From Azure portal, go to Front Door resource type. From **Monitoring**/**Metrics ## Logs and diagnostics -WAF with Front Door provides detailed reporting on each threat it detects. Logging is integrated with Azure Diagnostics logs and alerts are recorded in a json format. These logs can be integrated with [Azure Monitor logs](../../azure-monitor/insights/azure-networking-analytics.md). +WAF with Front Door provides detailed reporting on each request, and each threat that it detects. Logging is integrated with Azure's diagnostics logs and alerts. These logs can be integrated with [Azure Monitor logs](../../azure-monitor/insights/azure-networking-analytics.md). ![WAFDiag](../media/waf-frontdoor-monitor/waf-frontdoor-diagnostics.png) -[FrontDoorAccessLog](../../frontdoor/standard-premium/how-to-logs.md#access-log) logs all requests. `FrontDoorWebApplicationFirewalllog` logs any request that matches a WAF rule and each log entry has the following schema. +Front Door provides two types of logs: access logs and WAF logs. -For logging on the classic tier, use [FrontdoorAccessLog](../../frontdoor/front-door-diagnostics.md) logs for Front Door requests and `FrontdoorWebApplicationFirewallLog` logs for matched WAF rules using the following schema: +### Access logs -| Property | Description | -| ------------- | ------------- | -|Action|Action taken on the request. WAF log shows all action values. WAF metrics show all action values, except *Log*.| -| ClientIp | The IP address of the client that made the request. If there was an X-Forwarded-For header in the request, then the Client IP is picked from the header field. | -| ClientPort | The IP port of the client that made the request. | -| Details|Additional details on the matched request | -|| matchVariableName: http parameter name of the request matched, for example, header names (max chars 100)| -|| matchVariableValue: values that triggered the match (max chars 100)| -| Host | The host header of the matched request | -| Policy | The name of the WAF policy that the request matched. | -| PolicyMode | Operations mode of the WAF policy. Possible values are "Prevention" and "Detection" | -| RequestUri | Full URI of the matched request. | -| RuleName | The name of the WAF rule that the request matched. | -| SocketIp | The source IP address seen by WAF. This IP address is based on TCP session, independent of any request headers.| -| TrackingReference | The unique reference string that identifies a request served by Front Door, also sent as X-Azure-Ref header to the client. Required for searching details in the access logs for a specific request. | +::: zone pivot="front-door-standard-premium" + +The **FrontDoorAccessLog** includes all requests that go through Front Door. For more information on the Front Door access log, including the log schema, see [Azure Front Door logs](../../frontdoor/standard-premium/how-to-logs.md#access-log). -The following query example returns WAF logs on blocked requests: +::: zone-end ::: zone pivot="front-door-classic" -``` WAFlogQuery + +The **FrontdoorAccessLog** includes all requests that go through Front Door. For more information on the Front Door access log, including the log schema, see [Monitoring metrics and logs in Azure Front Door (classic)](../../frontdoor/front-door-diagnostics.md). + +::: zone-end + +The following example query returns the access log entries: + +::: zone pivot="front-door-standard-premium" + +```kusto AzureDiagnostics -| where ResourceType == "FRONTDOORS" and Category == "FrontdoorWebApplicationFirewallLog" -| where action_s == "Block" +| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorAccessLog" ``` ::: zone-end -::: zone pivot="front-door-standard-premium" -``` WAFlogQuery -AzureDiagnostics -| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorWebApplicationFirewallLog" -| where action_s == "Block" +::: zone pivot="front-door-classic" +```kusto +AzureDiagnostics +| where ResourceType == "FRONTDOORS" and Category == "FrontdoorAccessLog" ``` + ::: zone-end -Here is an example of a logged request in WAF log: +The following shows an example log entry: -``` WAFlogQuerySample +```json { - "time": "2020-06-09T22:32:17.8376810Z", - "category": "FrontdoorWebApplicationFirewallLog", - "operationName": "Microsoft.Network/FrontDoorWebApplicationFirewallLog/Write", - "properties": - { - "clientIP":"xxx.xxx.xxx.xxx", - "clientPort":"52097", - "socketIP":"xxx.xxx.xxx.xxx", - "requestUri":"https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", - "ruleName":"Microsoft_DefaultRuleSet-1.1-SQLI-942100", - "policy":"WafDemoCustomPolicy", - "action":"Block", - "host":"wafdemofrontdoorwebapp.azurefd.net", - "trackingReference":"08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", - "policyMode":"prevention", - "details": - { - "matches": - [{ - "matchVariableName":"QueryParamValue:q", - "matchVariableValue":"' or 1=1" - }] - } - } + "time": "2020-06-09T22:32:17.8383427Z", + "category": "FrontdoorAccessLog", + "operationName": "Microsoft.Network/FrontDoor/AccessLog/Write", + "properties": { + "trackingReference": "08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", + "httpMethod": "GET", + "httpVersion": "2.0", + "requestUri": "https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", + "requestBytes": "715", + "responseBytes": "380", + "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4157.0 Safari/537.36 Edg/85.0.531.1", + "clientIp": "xxx.xxx.xxx.xxx", + "socketIp": "xxx.xxx.xxx.xxx", + "clientPort": "52097", + "timeTaken": "0.003", + "securityProtocol": "TLS 1.2", + "routingRuleName": "WAFdemoWebAppRouting", + "rulesEngineMatchNames": [], + "backendHostname": "wafdemowebappuscentral.azurewebsites.net:443", + "sentToOriginShield": false, + "httpStatusCode": "403", + "httpStatusDetails": "403", + "pop": "SJC", + "cacheStatus": "CONFIG_NOCACHE" + } } ``` -The following example query returns AccessLogs entries: +### WAF logs + +::: zone pivot="front-door-standard-premium" + +The **FrontDoorWebApplicationFirewallLog** includes requests that match a WAF rule. + +::: zone-end ::: zone pivot="front-door-classic" -``` AccessLogQuery -AzureDiagnostics -| where ResourceType == "FRONTDOORS" and Category == "FrontdoorAccessLog" +The **FrontdoorWebApplicationFirewallLog** includes any request that matches a WAF rule. + +::: zone-end + +The following table shows the values logged for each request: + +| Property | Description | +| ------------- | ------------- | +| Action |Action taken on the request. Logs include requests with all actions. Metrics include requests with all actions except *Log*.| +| ClientIp | The IP address of the client that made the request. If there was an `X-Forwarded-For` header in the request, the client IP address is taken from that header field instead. | +| ClientPort | The IP port of the client that made the request. | +| Details | Additional details on the request, including any threats that were detected.
                    matchVariableName: HTTP parameter name of the request matched, for example, header names (up to 100 characters maximum).
                    matchVariableValue: Values that triggered the match (up to 100 characters maximum). | +| Host | The `Host` header of the request. | +| Policy | The name of the WAF policy that processed the request. | +| PolicyMode | Operations mode of the WAF policy. Possible values are `Prevention` and `Detection`. | +| RequestUri | Full URI of the request. | +| RuleName | The name of the WAF rule that the request matched. | +| SocketIp | The source IP address seen by WAF. This IP address is based on the TCP session, and does not consider any request headers. | +| TrackingReference | The unique reference string that identifies a request served by Front Door. This value is sent to the client in the `X-Azure-Ref` response header. Use this field when searching for a specific request in the log. | + +The following example query shows the requests that were blocked by the Front Door WAF: + +::: zone pivot="front-door-classic" + +```kusto +AzureDiagnostics +| where ResourceType == "FRONTDOORS" and Category == "FrontdoorWebApplicationFirewallLog" +| where action_s == "Block" ``` + ::: zone-end ::: zone pivot="front-door-standard-premium" -``` AccessLogQuery -AzureDiagnostics -| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorAccessLog" +```kusto +AzureDiagnostics +| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorWebApplicationFirewallLog" +| where action_s == "Block" ``` + ::: zone-end -Here is an example of a logged request in Access log: +The following shows an example log entry, including the reason that the request was blocked: -``` AccessLogSample +```json { -"time": "2020-06-09T22:32:17.8383427Z", -"category": "FrontdoorAccessLog", -"operationName": "Microsoft.Network/FrontDoor/AccessLog/Write", - "properties": - { - "trackingReference":"08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", - "httpMethod":"GET", - "httpVersion":"2.0", - "requestUri":"https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", - "requestBytes":"715", - "responseBytes":"380", - "userAgent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4157.0 Safari/537.36 Edg/85.0.531.1", - "clientIp":"xxx.xxx.xxx.xxx", - "socketIp":"xxx.xxx.xxx.xxx", - "clientPort":"52097", - "timeTaken":"0.003", - "securityProtocol":"TLS 1.2", - "routingRuleName":"WAFdemoWebAppRouting", - "rulesEngineMatchNames":[], - "backendHostname":"wafdemowebappuscentral.azurewebsites.net:443", - "sentToOriginShield":false, - "httpStatusCode":"403", - "httpStatusDetails":"403", - "pop":"SJC", - "cacheStatus":"CONFIG_NOCACHE" + "time": "2020-06-09T22:32:17.8376810Z", + "category": "FrontdoorWebApplicationFirewallLog", + "operationName": "Microsoft.Network/FrontDoorWebApplicationFirewallLog/Write", + "properties": { + "clientIP": "xxx.xxx.xxx.xxx", + "clientPort": "52097", + "socketIP": "xxx.xxx.xxx.xxx", + "requestUri": "https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", + "ruleName": "Microsoft_DefaultRuleSet-1.1-SQLI-942100", + "policy": "WafDemoCustomPolicy", + "action": "Block", + "host": "wafdemofrontdoorwebapp.azurefd.net", + "trackingReference": "08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", + "policyMode": "prevention", + "details": { + "matches": [ + { + "matchVariableName": "QueryParamValue:q", + "matchVariableValue": "' or 1=1" + } + ] } + } } - ``` ## Next steps diff --git a/articles/web-application-firewall/ag/ag-overview.md b/articles/web-application-firewall/ag/ag-overview.md index 8a99486acf568..8d19dc6cab94c 100644 --- a/articles/web-application-firewall/ag/ag-overview.md +++ b/articles/web-application-firewall/ag/ag-overview.md @@ -5,7 +5,7 @@ description: This article provides an overview of Web Application Firewall (WAF) services: web-application-firewall author: vhorne ms.service: web-application-firewall -ms.date: 04/21/2022 +ms.date: 05/06/2022 ms.author: victorh ms.topic: conceptual --- @@ -141,6 +141,10 @@ There's a threshold of 5 for the Anomaly Score to block traffic. So, a single *C > [!NOTE] > The message that's logged when a WAF rule matches traffic includes the action value "Blocked." But the traffic is actually only blocked for an Anomaly Score of 5 or higher. For more information, see [Troubleshoot Web Application Firewall (WAF) for Azure Application Gateway](web-application-firewall-troubleshoot.md#understanding-waf-logs). +### Configuration + +You can configure and deploy all WAF policies using the Azure portal, REST APIs, Azure Resource Manager templates, and Azure PowerShell. You can also configure and manage Azure WAF policies at scale using Firewall Manager integration (preview). For more information, see [Use Azure Firewall Manager to manage Web Application Firewall policies (preview)](../shared/manage-policies.md). + ### WAF monitoring Monitoring the health of your application gateway is important. Monitoring the health of your WAF and the applications that it protects are supported by integration with Microsoft Defender for Cloud, Azure Monitor, and Azure Monitor logs. diff --git a/articles/web-application-firewall/cdn/cdn-overview.md b/articles/web-application-firewall/cdn/cdn-overview.md index 942f34afd6fa5..f1c76c5a8c7dc 100644 --- a/articles/web-application-firewall/cdn/cdn-overview.md +++ b/articles/web-application-firewall/cdn/cdn-overview.md @@ -5,7 +5,7 @@ services: web-application-firewall author: vhorne ms.service: web-application-firewall ms.topic: conceptual -ms.date: 08/31/2020 +ms.date: 05/26/2022 ms.author: victorh --- @@ -28,7 +28,7 @@ You can configure a WAF policy and associate that policy to one or more CDN endp - custom rules that you can create. -- managed rule sets that are a collection of Azure managed pre-configured rules. +- managed rule sets that are a collection of Azure-managed pre-configured rules. When both are present, custom rules are processed before processing the rules in a managed rule set. A rule is made of a match condition, a priority, and an action. Action types supported are: *ALLOW*, *BLOCK*, *LOG*, and *REDIRECT*. You can create a fully customized policy that meets your specific application protection requirements by combining managed and custom rules. @@ -51,7 +51,7 @@ You can choose one of the following actions when a request matches a rule's cond - *Allow*: The request passes through the WAF and is forwarded to back-end. No further lower priority rules can block this request. - *Block*: The request is blocked and WAF sends a response to the client without forwarding the request to the back-end. - *Log*: The request is logged in the WAF logs and WAF continues evaluating lower priority rules. -- *Redirect*: WAF redirects the request to the specified URI. The URI specified is a policy level setting. Once configured, all requests that match the *Redirect* action is sent to that URI. +- *Redirect*: WAF redirects the request to the specified URI. The URI specified is a policy level setting. Once configured, all requests that match the *Redirect* action are sent to that URI. ## WAF rules @@ -66,7 +66,7 @@ Custom rules can have match rules and rate control rules. You can configure the following custom match rules: -- *IP allow list and block list*: You can control access to your web applications based on a list of client IP addresses or IP address ranges. Both IPv4 and IPv6 address types are supported. This list can be configured to either block or allow those requests where the source IP matches an IP in the list. +- *IP allowlist and blocklist*: You can control access to your web applications based on a list of client IP addresses or IP address ranges. Both IPv4 and IPv6 address types are supported. This list can be configured to either block or allow those requests where the source IP matches an IP in the list. - *Geographic based access control*: You can control access to your web applications based on the country code that's associated with a client's IP address. @@ -78,7 +78,7 @@ You can configure the following custom match rules: A rate control rule limits abnormally high traffic from any client IP address. -- *Rate limiting rules*: You can configure a threshold on the number of web requests allowed from a client IP address during a one-minute duration. This rule is distinct from an IP list-based allow/block custom rule that either allows all or blocks all request from a client IP address. Rate limits can be combined with additional match conditions such as HTTP(S) parameter matches for granular rate control. +- *Rate limiting rules*: You can configure a threshold on the number of web requests allowed from a client IP address during a one-minute duration. This rule is distinct from an IP list-based allow/block custom rule that either allows all or blocks all request from a client IP address. Rate limits can be combined with more match conditions such as HTTP(S) parameter matches for granular rate control. ### Azure-managed rule sets @@ -109,4 +109,4 @@ Monitoring for WAF with CDN is integrated with Azure Monitor to track alerts and ## Next steps -- [Tutorial: Create a WAF policy with Azure CDN using the Azure portal](waf-cdn-create-portal.md) +- [Azure CLI for CDN WAF](/cli/azure/cdn/waf) diff --git a/articles/web-application-firewall/cdn/waf-cdn-create-portal.md b/articles/web-application-firewall/cdn/waf-cdn-create-portal.md deleted file mode 100644 index 0857f5a1f545b..0000000000000 --- a/articles/web-application-firewall/cdn/waf-cdn-create-portal.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Tutorial: Create WAF policy for Azure CDN - Azure portal' -description: In this tutorial, you learn how to create a Web Application Firewall (WAF) policy on Azure CDN using the Azure portal. -author: vhorne -ms.service: web-application-firewall -services: web-application-firewall -ms.topic: tutorial -ms.date: 09/16/2020 -ms.author: victorh ---- - -# Tutorial: Create a WAF policy on Azure CDN using the Azure portal - -This tutorial shows you how to create a basic Azure Web Application Firewall (WAF) policy and apply it to an endpoint on Azure Content Delivery Network (CDN). - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> * Create a WAF policy -> * Associate it with a CDN endpoint. You can associate a WAF policy only with endpoints that are hosted on the **Azure CDN Standard from Microsoft** SKU. -> * Configure WAF rules - -## Prerequisites - -Create an Azure CDN profile and endpoint by following the instructions in [Quickstart: Create an Azure CDN profile and endpoint](../../cdn/cdn-create-new-endpoint.md). - -## Create a Web Application Firewall policy - -First, create a basic WAF policy with a managed Default Rule Set (DRS) using the portal. - -1. On the top left-hand side of the screen, select **Create a resource**>search for **WAF**>select **Web application firewall** > select **Create**. -2. In the **Basics** tab of the **Create a WAF policy** page, enter or select the following information, accept the defaults for the remaining settings, and then select **Review + create**: - - | Setting | Value | - | --- | --- | - | Policy For |Select Azure CDN (Preview).| - | Subscription |Select your CDN Profile subscription name.| - | Resource group |Select your CDN Profile resource group name.| - | Policy name |Enter a unique name for your WAF policy.| - - :::image type="content" source="../media/waf-cdn-create-portal/basic.png" alt-text="Screenshot of the Create a W A F policy page, with a Review + create button and values entered for various settings." border="false"::: - -3. In the **Association** tab of the **Create a WAF policy** page, select **Add CDN Endpoint**, enter the following settings, and then select **Add**: - - | Setting | Value | - | --- | --- | - | CDN Profile | Select your CDN profile name.| - | Endpoint | Select the name of your endpoint, then select **Add**.| - - > [!NOTE] - > If the endpoint is associated with a WAF policy, it is shown grayed out. You must first remove the Endpoint from the associated policy, and then re-associate the endpoint to a new WAF policy. -1. Select **Review + create**, then select **Create**. - -## Configure Web Application Firewall policy (optional) - -### Change mode - -By default WAF policy is in *Detection* mode when you create a WAF policy. In *Detection* mode, WAF doesn't block any requests. Instead, requests matching the WAF rules are logged at WAF logs. - -To see WAF in action, you can change the mode settings from *Detection* to *Prevention*. In *Prevention* mode, requests that match rules that are defined in Default Rule Set (DRS) are blocked and logged at WAF logs. - - :::image type="content" source="../media/waf-cdn-create-portal/policy.png" alt-text="Screenshot of the Policy settings section. The Mode toggle is set to Prevention." border="false"::: - -### Custom rules - -To create a custom rule, select **Add custom rule** under the **Custom rules** section. This opens the custom rule configuration page. There are two types of custom rules: **match rule** and **rate limit** rule. - -The following screenshot shows a custom match rule to block a request if the query string contains the value **blockme**. - -:::image type="content" source="../media/waf-cdn-create-portal/custommatch.png" alt-text="Screenshot of the custom rule configuration page showing settings for a rule that checks whether the QueryString variable contains the value blockme." border="false"::: - -Rate limit rules require two additional fields: **Rate limit duration** and **Rate limit threshold (requests)** as shown in the following example: - -:::image type="content" source="../media/waf-cdn-create-portal/customrate.png" alt-text="Screenshot of the rate limit rule configuration page. A Rate limit duration list box and a Rate limit threshold (requests) box are visible." border="false"::: - -### Default Rule Set (DRS) - -The Azure managed Default Rule Set is enabled by default. To disable an individual rule within a rule group, expand the rules within that rule group, select the check box in front of the rule number, and select **Disable** on the tab above. To change actions types for individual rules within the rule set, select the check box in front of the rule number, and then select the **Change action** tab above. - - :::image type="content" source="../media/waf-cdn-create-portal/managed2.png" alt-text="Screenshot of the Managed rules page showing a rule set, rule groups, rules, and Enable, Disable, and Change Action buttons. One rule is checked." border="false"::: - -## Clean up resources - -When no longer needed, remove the resource group and all related resources. - - -## Next steps - -> [!div class="nextstepaction"] -> [Learn about Azure Web Application Firewall](../overview.md) diff --git a/articles/web-application-firewall/index.yml b/articles/web-application-firewall/index.yml index 873060806cb96..a7500979dac22 100644 --- a/articles/web-application-firewall/index.yml +++ b/articles/web-application-firewall/index.yml @@ -5,13 +5,13 @@ summary: Web Application Firewall (WAF) provides centralized protection of your metadata: title: Web Application Firewall documentation - description: Learn about Web Application Firewall that can deployed with Application Gateway and Front Door + description: Learn about Web Application Firewall that can be deployed with Application Gateway and Front Door services: web-application-firewall ms.service: web-application-firewall ms.topic: landing-page author: vhorne ms.author: victorh - ms.date: 06/30/2021 + ms.date: 05/26/2022 # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new @@ -54,8 +54,6 @@ landingContent: url: ./ag/application-gateway-web-application-firewall-portal.md - text: Web Application Firewall on Front Door url: ./afds/waf-front-door-create-portal.md - - text: WAF policy on Azure CDN - url: ./cdn/waf-cdn-create-portal.md # Card - title: Manage WAF rules on Application Gateway diff --git a/articles/web-application-firewall/media/manage-policies/manage-security.png b/articles/web-application-firewall/media/manage-policies/manage-security.png new file mode 100644 index 0000000000000..dd55ccc31dff8 Binary files /dev/null and b/articles/web-application-firewall/media/manage-policies/manage-security.png differ diff --git a/articles/web-application-firewall/media/manage-policies/policies.png b/articles/web-application-firewall/media/manage-policies/policies.png new file mode 100644 index 0000000000000..0c92c43aeecdd Binary files /dev/null and b/articles/web-application-firewall/media/manage-policies/policies.png differ diff --git a/articles/web-application-firewall/media/manage-policies/upgrade-policy.png b/articles/web-application-firewall/media/manage-policies/upgrade-policy.png new file mode 100644 index 0000000000000..869e95cf2d7c9 Binary files /dev/null and b/articles/web-application-firewall/media/manage-policies/upgrade-policy.png differ diff --git a/articles/web-application-firewall/shared/manage-policies.md b/articles/web-application-firewall/shared/manage-policies.md new file mode 100644 index 0000000000000..43298fc2a5a27 --- /dev/null +++ b/articles/web-application-firewall/shared/manage-policies.md @@ -0,0 +1,45 @@ +--- +title: Use Azure Firewall Manager to manage Web Application Firewall policies (preview) +description: Learn about managing Azure Web Application Firewall policies using Azure Firewall Manager +author: vhorne +ms.author: victorh +ms.service: web-application-firewall +ms.topic: conceptual +ms.date: 06/02/2022 +--- + +# Configure WAF policies using Azure Firewall Manager (preview) + +> [!IMPORTANT] +> Configure Web Application Firewall policies using Azure Firewall Manager is currently in PREVIEW. +> See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. + +Azure Firewall Manager is a platform to manage and protect your network security resources at scale. You can associate your WAF policies to an Application Gateway or Azure Front Door within Azure Firewall Manager, all in a single place. + +## View and manage WAF policies + +In Azure Firewall Manager, you can create and view all WAF policies in one central place across subscriptions and regions. + +To navigate to WAF policies, select the **Web Application Firewall Policies** tab on the left, under **Security**. + +:::image type="content" source="../media/manage-policies/policies.png" alt-text="Screenshot showing Web Application Firewall policies in Firewall Manager." lightbox="../media/manage-policies/policies.png"::: + +## Associate of dissociate WAF policies + +In Azure Firewall Manager, you can create and view all WAF policies in your subscriptions. These policies can be associated or dissociated with an application delivery platform. Select the service and then select **Manage Security**. + +:::image type="content" source="../media/manage-policies/manage-security.png" alt-text="Screenshot showing Manage Security in Firewall Manager."::: + +## Upgrade Application Gateway WAF configuration to WAF policy + +For Application Gateway with WAF configuration, you can upgrade the WAF configuration to a WAF policy associated with Application Gateway. + +The WAF policy can be shared to multiple application gateways. Also, a WAF policy allows you to take advantage of advanced and new features like bot protection, newer rule sets, and reduced false positives. New features are only released on WAF policies. + +To upgrade a WAF configuration to a WAF policy, select **Upgrade from WAF configuration** from the desired application gateway. + +:::image type="content" source="../media/manage-policies/upgrade-policy.png" alt-text="Screenshot showing upgrade from WAF configuration."::: + +## Next steps + +- [Manage Azure Web Application Firewall policies (preview)](../../firewall-manager/manage-web-application-firewall-policies.md) diff --git a/articles/web-application-firewall/toc.yml b/articles/web-application-firewall/toc.yml index d70e0915e1616..26c7721cbc404 100644 --- a/articles/web-application-firewall/toc.yml +++ b/articles/web-application-firewall/toc.yml @@ -37,10 +37,6 @@ items: - name: Configure WAF policy - portal href: ./afds/waf-front-door-create-portal.md - - name: Content Delivery Network - items: - - name: Configure WAF policy - portal - href: ./cdn/waf-cdn-create-portal.md - name: Samples items: - name: Azure PowerShell @@ -145,6 +141,8 @@ href: ./afds/waf-front-door-tutorial-geo-filtering.md - name: Monitoring and logging href: ./afds/waf-front-door-monitor.md + - name: Configure policies using Firewall Manager + href: ./shared/manage-policies.md - name: Using Microsoft Sentinel with Web Application Firewall href: waf-sentinel.md - name: Troubleshoot diff --git a/articles/zone-pivot-groups.yml b/articles/zone-pivot-groups.yml index 491fcd17deb79..94b3cb271cbae 100644 --- a/articles/zone-pivot-groups.yml +++ b/articles/zone-pivot-groups.yml @@ -885,6 +885,17 @@ groups: title: CLI - id: programming-language-rest title: REST +- id: speech-studio-cli-rest +# Owner: eur + title: Programming languages + prompt: Choose a tool or API + pivots: + - id: speech-studio + title: Speech Studio + - id: speech-cli + title: CLI + - id: rest-api + title: REST - id: programming-languages-set-nineteen title: Programming languages prompt: Choose a programming language @@ -1650,8 +1661,8 @@ groups: title: Data Box shipping prompt: Choose shipping location pivots: - - id: north-america - title: North America + - id: americas + title: Americas - id: europe title: Europe - id: asia diff --git a/bread/toc.yml b/bread/toc.yml index 0942dd8459848..62de5fdb78d71 100644 --- a/bread/toc.yml +++ b/bread/toc.yml @@ -314,6 +314,9 @@ - name: Logz.io tocHref: /azure/partner-solutions/logzio/ topicHref: /azure/partner-solutions/logzio/overview + - name: Dynatrace + tocHref: /azure/partner-solutions/dynatrace/ + topicHref: /azure/partner-solutions/dynatrace/dynatrace-overview - name: NGINX tocHref: /azure/partner-solutions/nginx/ topicHref: /azure/partner-solutions/nginx/nginx-overview diff --git a/docfx.json b/docfx.json index 13e30d5a3a2a1..75506726d3aa4 100644 --- a/docfx.json +++ b/docfx.json @@ -221,7 +221,7 @@ "articles/app-service/environment/*.md": "madsd", "articles/app-service/scripts/*.md": "cephalin", "articles/app-service-mobile/**/*.md": "elamalani", - "articles/automanage/*md": "daberry", + "articles/automanage/*md": "orspod", "articles/automation/**/*.md": "sgsneha", "articles/azure-arc/**/*.md": "JnHs", "articles/azure-arc/servers/**/*.md": "johnmarco", @@ -242,6 +242,7 @@ "articles/azure-monitor/*.md": "bwren", "articles/azure-monitor/insights/*.md": "bwren", "articles/azure-monitor/logs/*.md": "guywi-ms", + "articles/azure-monitor/profiler/*.md": "hhunter-ms", "articles/azure-monitor/visualize/**/*.md": "rboucher", "articles/azure-monitor/vm/*.md": "bwren", "articles/azure-portal/**/*.md": "JnHs", @@ -254,7 +255,6 @@ "articles/azure-resource-manager/templates/**/*.md": "mumian", "articles/azure-resource-manager/bicep/**/*.md": "mumian", "articles/azure-resource-manager/troubleshooting/**/*.md": "davidsmatlak", - "articles/partner-solutions/**/*.md": "davidsmatlak", "articles/azure-vmware/**/*.md": "suzizuber", "articles/backup/**/*.md": "v-amallick", "articles/backup/**/*.yml": "v-amallick", @@ -284,6 +284,7 @@ "articles/applied-ai-services/metrics-advisor/**/*.md": "mrbullwinkle", "articles/cognitive-services/personalizer/**/*.md": "edjez", "articles/cognitive-services/QnAMaker/**/*.md": "jboback", + "articles/cognitive-services/openai/**/*.md": "mrbullwinkle", "articles/cognitive-services/Speech-Service/**/*.md": "eric-urban", "articles/cognitive-services/language-service/**/*.md": "aahill", "articles/cognitive-services/Translator/**/*.md": "laujan", @@ -586,7 +587,7 @@ "articles/app-service/environment/*.md": "madsd", "articles/app-service/scripts/*.md": "cephalin", "articles/app-service-mobile/**/*.md": "emalani", - "articles/automanage/*md": "daberry", + "articles/automanage/*md": "orspodek", "articles/automation/**/*.md": "sudhirsneha", "articles/azure-arc/**/*.md": "jenhayes", "articles/azure-arc/servers/**/*.md": "johnmarc", @@ -598,11 +599,13 @@ "articles/azure-monitor/agents/*.md": "bwren", "articles/azure-monitor/alerts/*.md": "abbyweisberg", "articles/azure-monitor/autoscale/*.md": "robb", + "articles/azure-monitor/change/*.md": "hannahhunter", "articles/azure-monitor/containers/*.md": "bwren", "articles/azure-monitor/insights/container-insights*.md": "bwren", "articles/azure-monitor/insights/*.md": "robb", "articles/azure-monitor/essentials/*.md": "robb", "articles/azure-monitor/logs/*.md": "guywild", + "articles/azure-monitor/profiler/*.md": "hannahhunter", "articles/azure-monitor/visualize/**/*.md": "abbyweisberg", "articles/azure-monitor/vm/*.md": "bwren", "articles/azure-percept/*.md": "ngt", @@ -617,7 +620,6 @@ "articles/azure-resource-manager/templates/**/*.md": "jgao", "articles/azure-resource-manager/bicep/**/*.md": "jgao", "articles/azure-resource-manager/troubleshooting/**/*.md": "davidsmatlak", - "articles/partner-solutions/**/*.md": "davidsmatlak", "articles/azure-vmware/**/*.md": "v-szuber", "articles/backup/**/*.md": "v-amallick", "articles/backup/**/*.yml": "v-amallick", @@ -646,7 +648,8 @@ "articles/cognitive-services/LUIS/**/*.md": "aahi", "articles/applied-ai-services/metrics-advisor/**/*.md": "mbullwin", "articles/cognitive-services/personalizer/**/*.md": "edjez", - "articles/cognitive-services/QnAMaker/**/*.md": "mbullwin", + "articles/cognitive-services/QnAMaker/**/*.md": "jboback", + "articles/cognitive-services/openai/**/*.md": "mbullwin", "articles/cognitive-services/Speech-Service/**/*.md": "eur", "articles/cognitive-services/language-service/**/*.md": "aahi", "articles/cognitive-services/Translator/**/*.md": "lajanuar", @@ -821,6 +824,7 @@ "articles/azure-monitor/agents/**/*.md": "agents", "articles/azure-monitor/alerts/**/*.md": "alerts", "articles/azure-monitor/autoscale/**/*.md": "autoscale", + "articles/azure-monitor/change/*.md": "change-analysis", "articles/azure-monitor/containers/**/*.md": "containers", "articles/azure-monitor/essentials/**/*.md": "essentials", "articles/azure-monitor/*.md": "general", @@ -836,6 +840,8 @@ "articles/partner-solutions/datadog/**/*.md": "datadog", "articles/partner-solutions/elastic/**/*.md": "elastic", "articles/partner-solutions/logzio/**/*.md": "logzio", + "articles/partner-solutions/dynatrace/**/*.md": "dynatrace", + "articles/partner-solutions/nginx/**/*.md": "nginx", "articles/cognitive-services/Anomaly-Detector/**/*.md": "anomaly-detector", "articles/cognitive-services/Bing-Autosuggest/**/*.md": "bing-autosuggest", "articles/cognitive-services/Bing-Custom-Search/**/*.md": "bing-custom-search", @@ -994,8 +1000,8 @@ "articles/app-service/scripts/*.yml": "Azure App Service", "articles/asc-for-iot/**/*.md": "Azure Security Center for IoT", "articles/asc-for-iot/**/*.yml": "Azure Security Center for IoT", - "articles/active-directory/develop/**/*.md": "Microsoft identity platform", - "articles/active-directory/develop/**/*.yml": "Microsoft identity platform", + "articles/active-directory/**/*.md": "Microsoft Entra", + "articles/active-directory/**/*.yml": "Microsoft Entra", "articles/azure-arc/**/*.md": "Azure Arc", "articles/azure-fluid-relay/**/*.md": "Azure Fluid Relay", "articles/azure-fluid-relay/**/*.yml": "Azure Fluid Relay", diff --git a/includes/active-directory-app-provisioning-ldap.md b/includes/active-directory-app-provisioning-ldap.md index ced97879f6fe9..4f76aa727167d 100644 --- a/includes/active-directory-app-provisioning-ldap.md +++ b/includes/active-directory-app-provisioning-ldap.md @@ -7,7 +7,7 @@ For important details on what this service does, how it works, and frequently as ### On-premises prerequisites - A target directory, such as Active Directory Lightweight Services (AD LDS), in which users can be created, updated, and deleted. This directory instance should not be a directory that is also used to provision users into Azure AD, because having both scenarios may create a loop with Azure AD Connect. - - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target directory, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](/azure/azure-portal/azure-portal-safelist-urls?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. + - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target directory, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](../articles/azure-portal/azure-portal-safelist-urls.md?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. - The .NET Framework 4.7.2 needs to be installed. - Optional: Although it is not required, it is recommended to download [Microsoft Edge for Windows Server](https://www.microsoft.com/en-us/edge?r=1) and use it in-place of Internet Explorer. @@ -37,7 +37,7 @@ For more information, see the [Generic LDAP Connector reference](/microsoft-iden - An Azure AD tenant with Azure AD Premium P1 or Premium P2 (or EMS E3 or E5). [!INCLUDE [active-directory-p1-license.md](active-directory-p1-license.md)] - - The Hybrid Identity Administrator role for configuring the provisioning agent and the Application Administrator or Cloud Administrator roles for configuring provisioning in the Azure portal. + - The Hybrid Identity Administrator role for configuring the provisioning agent and the Application Administrator or Cloud Application Administrator roles for configuring provisioning in the Azure portal. ### More recommendations and limitations The following bullet points are more recommendations and limitations. @@ -137,7 +137,7 @@ Currently, the LDAP connector provisions users with a blank password. This prov >Please use different provisioning agents for on-premises application provisioning and Azure AD Connect Cloud Sync / HR-driven provisioning. All three scenarios should not be managed on the same agent. 1. Open the provisioning agent installer, agree to the terms of service, and select **next**. 1. Open the provisioning agent wizard, and select **On-premises provisioning** when prompted for the extension you want to enable. - 1. Provide credentials for an Azure AD administrator when you're prompted to authorize. The Hybrid Identity Administrator or global administrator role is required. + 1. Provide credentials for an Azure AD administrator when you're prompted to authorize. The Hybrid Identity Administrator or Global Administrator role is required. 1. Select **Confirm** to confirm the installation was successful. 1. Sign in to the Azure portal. 1. Go to **Enterprise applications** > **Add a new application**. @@ -268,7 +268,8 @@ Currently, the LDAP connector provisions users with a blank password. This prov Now that you have the Azure AD ECMA Connector Host talking with Azure AD, and the attribute mapping configured, you can move on to configuring who's in scope for provisioning. >[!IMPORTANT] ->If you were signed in using a Hybrid identity administrator role, you need to sign-out and sign-in with an account that has the app administrator or global administrator role, for this section. The Hybrid identity administrator role does not have permissions to assign users to applications. +>If you were signed in using a Hybrid Identity Administrator role, you need to sign-out and sign-in with an account that has the Application Administrator, Cloud Application Administrator or Global Administrator role, for this section. The Hybrid Identity Administrator role does not have permissions to assign users to applications. + 1. In the Azure portal, select **Enterprise applications**. 2. Select the **On-premises ECMA app** application. diff --git a/includes/active-directory-app-provisioning-sql.md b/includes/active-directory-app-provisioning-sql.md index 5878b0f3c24f9..6b34ca61f79af 100644 --- a/includes/active-directory-app-provisioning-sql.md +++ b/includes/active-directory-app-provisioning-sql.md @@ -10,7 +10,7 @@ For important details on what this service does, how it works, and frequently as ### On-premises prerequisites - The application relies upon a SQL database, in which records for users can be created, updated, and deleted. - - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target database system, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](/azure/azure-portal/azure-portal-safelist-urls?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. + - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target database system, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](../articles/azure-portal/azure-portal-safelist-urls.md?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. - The computer should have .NET Framework 4.7.2 and an ODBC driver for the SQL database. Configuration of the connection to the application's database is done via a wizard. Depending on the options you select, some of the wizard screens might not be available and the information might be slightly different. Use the following information to guide you in your configuration. @@ -30,7 +30,7 @@ Note: The table-based method of the generic SQL connector requires that column n - An Azure AD tenant with Azure AD Premium P1 or Premium P2 (or EMS E3 or E5). [!INCLUDE [active-directory-p1-license.md](active-directory-p1-license.md)] - - The Hybrid identity administrator role for configuring the provisioning agent and the Application Administrator or Cloud Administrator roles for configuring provisioning in the Azure portal. + - The Hybrid Identity Administrator role for configuring the provisioning agent and the Application Administrator or Cloud Application Administrator roles for configuring provisioning in the Azure portal. ## Prepare the sample database @@ -122,7 +122,7 @@ The generic SQL connector requires a Data Source Name (DSN) file to connect to t >Please use different provisioning agents for on-premises application provisioning and Azure AD Connect Cloud Sync / HR-driven provisioning. All three scenarios should not be managed on the same agent. 1. Open the provisioning agent installer, agree to the terms of service, and select **next**. 1. Open the provisioning agent wizard, and select **On-premises provisioning** when prompted for the extension you want to enable. - 1. Provide credentials for an Azure AD administrator when you're prompted to authorize. Hybrid identity administrator or global administrator is required. + 1. Provide credentials for an Azure AD administrator when you're prompted to authorize. The Hybrid Identity Administrator or Global Administrator role is required. 1. Select **Confirm** to confirm the installation was successful. 1. Sign in to the Azure portal. 1. Go to **Enterprise applications** > **Add a new application**. @@ -311,7 +311,7 @@ Now you need to map attributes between the representation of the user in Azure A Now that you have the Azure AD ECMA Connector Host talking with Azure AD, and the attribute mapping configured, you can move on to configuring who's in scope for provisioning. >[!IMPORTANT] ->If you were signed in using a Hybrid identity administrator role, you need to sign-out and sign-in with an account that has the app administrator or global administrator role, for this section. The Hybrid identity administrator role does not have permissions to assign users to applications. +>If you were signed in using a Hybrid Identity Administrator role, you need to sign-out and sign-in with an account that has the Application Administrator, Cloud Application Administrator or Global Administrator role, for this section. The Hybrid Identity Administrator role does not have permissions to assign users to applications. 1. In the Azure portal, select **Enterprise applications**. 2. Select the **On-premises ECMA app** application. @@ -388,4 +388,4 @@ GO ALTER TABLE [dbo].[Employees] ADD CONSTRAINT [DF_Employees_InternalGUID] DEFAULT (newid()) FOR [InternalGUID] GO -``` +``` \ No newline at end of file diff --git a/includes/aml-compute-target-deploy.md b/includes/aml-compute-target-deploy.md index a485cb232c4d3..271f37a97d58f 100644 --- a/includes/aml-compute-target-deploy.md +++ b/includes/aml-compute-target-deploy.md @@ -17,18 +17,16 @@ The compute target you use to host your model will affect the cost and availabil | Compute target | Used for | GPU support | FPGA support | Description | | ----- | ----- | ----- | ----- | ----- | | [Local web service](../articles/machine-learning/how-to-deploy-local-container-notebook-vm.md) | Testing/debugging |   |   | Use for limited testing and troubleshooting. Hardware acceleration depends on use of libraries in the local system. -| [Azure Kubernetes Service (AKS)](../articles/machine-learning/v1/how-to-deploy-azure-kubernetes-service.md) | Real-time inference

                    Recommended for production workloads. | [Yes](../articles/machine-learning/how-to-deploy-with-triton.md) (web service deployment) | [Yes](../articles/machine-learning/how-to-deploy-fpga-web-service.md) |Use for high-scale production deployments. Provides fast response time and autoscaling of the deployed service. Cluster autoscaling isn't supported through the Azure Machine Learning SDK. To change the nodes in the AKS cluster, use the UI for your AKS cluster in the Azure portal.

                    Supported in the designer. | +| [Azure Machine Learning Kubernetes](../articles/machine-learning/how-to-attach-kubernetes-anywhere.md) | Real-time inference

                    Batch inference | Yes | N/A | Run inferencing workloads on on-premise, cloud, and edge Kubernetes clusters. | | [Azure Container Instances](../articles/machine-learning/v1/how-to-deploy-azure-container-instance.md) | Real-time inference

                    Recommended for dev/test purposes only.|   |   | Use for low-scale CPU-based workloads that require less than 48 GB of RAM. Doesn't require you to manage a cluster.

                    Supported in the designer. | | [Azure Machine Learning compute clusters](../articles/machine-learning/tutorial-pipeline-batch-scoring-classification.md) | Batch inference | [Yes](../articles/machine-learning/tutorial-pipeline-batch-scoring-classification.md) (machine learning pipeline) |   | Run batch scoring on serverless compute. Supports normal and low-priority VMs. No support for real-time inference.| -| [Azure Arc enabled Kubernetes](../articles/machine-learning/how-to-attach-compute-targets.md) | Real-time inference

                    Batch inference | Yes | N/A | Run inferencing workloads on on-premise, cloud, and edge Kubernetes clusters managed in Azure Arc | > [!NOTE] -> Although compute targets like local, and Azure Machine Learning compute clusters support GPU for training and experimentation, using GPU for inference _when deployed as a web service_ is supported only on AKS. +> Although compute targets like local, and Azure Machine Learning compute clusters support GPU for training and experimentation, using GPU for inference _when deployed as a web service_ is supported only on Azure Machine Learning Kubernetes. > > Using a GPU for inference _when scoring with a machine learning pipeline_ is supported only on Azure Machine Learning compute. > > When choosing a cluster SKU, first scale up and then scale out. Start with a machine that has 150% of the RAM your model requires, profile the result and find a machine that has the performance you need. Once you've learned that, increase the number of machines to fit your need for concurrent inference. > [!NOTE] -> * Container instances are suitable only for small models less than 1 GB in size. -> * Use single-node AKS clusters for dev/test of larger models. \ No newline at end of file +> * Container instances are suitable only for small models less than 1 GB in size. \ No newline at end of file diff --git a/includes/aml-compute-target-train.md b/includes/aml-compute-target-train.md index 12db2552fe82b..876379ab4121a 100644 --- a/includes/aml-compute-target-train.md +++ b/includes/aml-compute-target-train.md @@ -20,14 +20,13 @@ You can use any of the following resources for a training compute target for mos |[Local computer](../articles/machine-learning/v1/how-to-attach-compute-targets.md#local-computer)| Yes |   |   | |[Azure Machine Learning compute cluster](../articles/machine-learning/how-to-create-attach-compute-cluster.md)| Yes | Yes | Yes | |[Azure Machine Learning compute instance](../articles/machine-learning/how-to-create-manage-compute-instance.md) | Yes (through SDK) | Yes | Yes | +|[Azure Machine Learning Kubernetes](../articles/machine-learning/how-to-attach-kubernetes-anywhere.md) | Yes | Yes | Yes | |[Remote VM](../articles/machine-learning/v1/how-to-attach-compute-targets.md#remote-virtual-machines) | Yes | Yes |   | |[Apache Spark pools (preview)](../articles/machine-learning/how-to-attach-compute-targets.md#apache-spark-pools)| Yes (SDK local mode only) | Yes |   | |[Azure Databricks](../articles/machine-learning/v1/how-to-attach-compute-targets.md#azure-databricks)| Yes (SDK local mode only) | Yes |   | |[Azure Data Lake Analytics](../articles/machine-learning/v1/how-to-attach-compute-targets.md#azure-data-lake-analytics) |   | Yes |   | |[Azure HDInsight](../articles/machine-learning/v1/how-to-attach-compute-targets.md#azure-hdinsight ) |   | Yes |   | |[Azure Batch](../articles/machine-learning/v1/how-to-attach-compute-targets.md#azbatch) |   | Yes |   | -|[Azure Kubernetes Service](../articles/machine-learning/v1/how-to-attach-compute-targets.md#kubernetes) (preview) | Yes | Yes | Yes | -|[Azure Arc-enabled Kubernetes](../articles/machine-learning/v1/how-to-attach-compute-targets.md#kubernetes) (preview) | Yes | Yes | Yes | > [!TIP] > The compute instance has 120GB OS disk. If you run out of disk space, [use the terminal](../articles/machine-learning/how-to-access-terminal.md) to clear at least 1-2 GB before you [stop or restart](../articles/machine-learning/how-to-create-manage-compute-instance.md#manage) the compute instance. diff --git a/includes/api-management-portal-legacy.md b/includes/api-management-portal-legacy.md index 9cce2ff9af2c2..461e77a02cdfd 100644 --- a/includes/api-management-portal-legacy.md +++ b/includes/api-management-portal-legacy.md @@ -7,7 +7,7 @@ ms.author: danlep --- > [!NOTE] -> This documentation content is about the deprecated developer portal. You can continue to use it, as per usual, until its retirement in October 2023, when it will be removed from all API Management services. The deprecated portal will only receive critical security updates. Refer to the following articles for more details: +> The following documentation content is about the deprecated developer portal. You can continue to use it, as per usual, until its retirement in October 2023, when it will be removed from all API Management services. The deprecated portal will only receive critical security updates. Refer to the following articles for more details: > > - [Learn how to migrate to the new developer portal](../articles/api-management/developer-portal-deprecated-migration.md) > - [Azure API Management new developer portal overview](../articles/api-management/api-management-howto-developer-portal.md) diff --git a/includes/api-management-service-limits.md b/includes/api-management-service-limits.md index ff0e33eca0a52..78031db38128e 100644 --- a/includes/api-management-service-limits.md +++ b/includes/api-management-service-limits.md @@ -37,7 +37,7 @@ ms.custom: Include file | Maximum size of request or response body in [validate-content policy](../articles/api-management/validation-policies.md#validate-content)10 | 100 KB | | Maximum number of self-hosted gateways11 | 25 | | Maximum number of active WebSocket connections per unit | 200 | -| Maximum number of tags supported by an API Management resource|15|| +| Maximum number of tags supported by an API Management resource|15| 1 Scaling limits depend on the pricing tier. For details on the pricing tiers and their scaling limits, see [API Management pricing](https://azure.microsoft.com/pricing/details/api-management/).
                    2 Per unit cache size depends on the pricing tier. To see the pricing tiers and their scaling limits, see [API Management pricing](https://azure.microsoft.com/pricing/details/api-management/).
                    diff --git a/includes/azure-batch-limits.md b/includes/azure-batch-limits.md index 3bf8e1a2932c0..1f92e740d109b 100644 --- a/includes/azure-batch-limits.md +++ b/includes/azure-batch-limits.md @@ -17,6 +17,7 @@ | Low-priority cores per Batch account | 10-100 | Contact support | | **[Active](/rest/api/batchservice/job/get#jobstate)** jobs and job schedules per Batch account (**completed** jobs have no limit) | 100-300 | 1,0001 | | Pools per Batch account | 20-100 | 5001 | +| Private endpoint connections per Batch account | 100 | 100 | 1To request an increase beyond this limit, contact Azure Support. diff --git a/includes/azure-monitor-limits-app-insights.md b/includes/azure-monitor-limits-app-insights.md deleted file mode 100644 index c11d18eef540b..0000000000000 --- a/includes/azure-monitor-limits-app-insights.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "include file" -description: "include file" -services: azure-monitor -author: rboucher -tags: azure-service-management -ms.topic: "include" -ms.date: 02/07/2019 -ms.author: robb -ms.custom: "include file" ---- - -There are some limits on the number of metrics and events per application, that is, per instrumentation key. Limits depend on the [pricing plan](https://azure.microsoft.com/pricing/details/application-insights/) that you choose. - -| Resource | Limit | Note -| --- | --- | --- | -| Total data per day | 100 GB | You can reduce data by setting a cap. If you need more data, you can increase the limit in the portal, up to 1,000 GB. For capacities greater than 1,000 GB, send email to AIDataCap@microsoft.com. -| Throttling | 32,000 events/second | The limit is measured over a minute. -| Data retention | [30-730 days](../articles/azure-monitor/logs/data-retention-archive.md#set-retention-and-archive-policy-by-table) | This resource is for [Search](../articles/azure-monitor/app/diagnostic-search.md), [Analytics](../articles/azure-monitor/logs/log-query-overview.md), and [Metrics Explorer](../articles/azure-monitor/essentials/metrics-charts.md). -| [Availability multi-step test](../articles/azure-monitor/app/availability-multistep.md) detailed results retention | 90 days | This resource provides detailed results of each step. -| Maximum event size | 64,000 bytes | -| Property and metric name length | 150 | See [type schemas](https://github.com/MohanGsk/ApplicationInsights-Home/tree/master/EndpointSpecs/Schemas/Bond/). -| Property value string length | 8,192 | See [type schemas](https://github.com/MohanGsk/ApplicationInsights-Home/tree/master/EndpointSpecs/Schemas/Bond/). -| Trace and exception message length | 32,768 | See [type schemas](https://github.com/MohanGsk/ApplicationInsights-Home/tree/master/EndpointSpecs/Schemas/Bond/). -| [Availability tests](../articles/azure-monitor/app/monitor-web-app-availability.md) count per app | 100 | -| [Profiler](../articles/azure-monitor/app/profiler.md) data retention | 5 days | -| [Profiler](../articles/azure-monitor/app/profiler.md) data sent per day | 10 GB | - -For more information, see ](../articles/azure-monitor/logs/cost-logs.md#application-insights-billing). \ No newline at end of file diff --git a/includes/azure-monitor-limits-custom-logs.md b/includes/azure-monitor-limits-custom-logs.md index d95919b10fa5b..6c20b605163d9 100644 --- a/includes/azure-monitor-limits-custom-logs.md +++ b/includes/azure-monitor-limits-custom-logs.md @@ -13,6 +13,6 @@ ms.custom: "include file" | Limit | Value | |:---|:---| | Maximum size of API call | 1MB for both compressed and uncompressed data | -| Maximum data/minute per DCR | 1 GB for both compressed and uncompressed data. Retry after the duration listed in the `Retry-After` header in the response. | -| Maximum requests/minute per DCR | 6,000. Retry after the duration listed in the `Retry-After` header in the response. | +| Maximum data/minute per DCE | 2 GB for both compressed and uncompressed data. Retry after the duration listed in the `Retry-After` header in the response. | +| Maximum requests/minute per DCE | 6,000. Retry after the duration listed in the `Retry-After` header in the response. | diff --git a/includes/azure-monitor-limits-data-collection-rules.md b/includes/azure-monitor-limits-data-collection-rules.md index 94dc70235f686..54073b3ae8d98 100644 --- a/includes/azure-monitor-limits-data-collection-rules.md +++ b/includes/azure-monitor-limits-data-collection-rules.md @@ -21,3 +21,4 @@ ms.custom: "include file" | Maximum number of extensions | 10 | | Maximum size of extension settings | 32 Kb | | Maximum number of Log Analytics workspaces | 10 | +| Maximum number of characters in a transformation | 15,360 | diff --git a/includes/azure-storage-account-limits-standard.md b/includes/azure-storage-account-limits-standard.md index aaa2a80fd281c..bbec919721e7a 100644 --- a/includes/azure-storage-account-limits-standard.md +++ b/includes/azure-storage-account-limits-standard.md @@ -5,7 +5,7 @@ author: tamram ms.service: storage ms.topic: include - ms.date: 05/09/2022 + ms.date: 05/26/2022 ms.author: tamram ms.custom: include file, references_regions --- @@ -19,12 +19,13 @@ Microsoft recommends that you use a GPv2 storage account for most scenarios. You | Resource | Limit | |--|--| -| Number of storage accounts per region per subscription, including standard, and premium storage accounts. | 250 | +| Maximum number of storage accounts with standard endpoints per region per subscription, including standard and premium storage accounts. | 250 | +| Maximum number of storage accounts with Azure DNS zone endpoints (preview) per region per subscription, including standard and premium storage accounts. | 5000 (preview) | | Default maximum storage account capacity | 5 PiB 1 | | Maximum number of blob containers, blobs, file shares, tables, queues, entities, or messages per storage account. | No limit | | Default maximum request rate per storage account | 20,000 requests per second1 | | Default maximum ingress per general-purpose v2 and Blob storage account in the following regions (LRS/GRS):
                    • Australia East
                    • Central US
                    • East Asia
                    • East US 2
                    • Japan East
                    • Korea Central
                    • North Europe
                    • South Central US
                    • Southeast Asia
                    • UK South
                    • West Europe
                    • West US
                    | 60 Gbps1 | -| Default maximum ingress per general-purpose v2 and Blob storage account in the following regions (ZRS):
                    • Australia East
                    • Central US
                    • East US
                    • East US 2
                    • Japan East
                    • North Europe
                    • South Central US
                    • Southeast Asia
                    • UK South
                    • West Europe
                    • West US 2
                    | 60 Gbps1 | +| Default maximum ingress per general-purpose v2 and Blob storage account in the following regions (ZRS):
                    • Australia East
                    • Central US
                    • East US
                    • East US 2
                    • Japan East
                    • North Europe
                    • South Central US
                    • Southeast Asia
                    • UK South
                    • West Europe
                    • West US 2
                    | 60 Gb ps1 | | Default maximum ingress per general-purpose v2 and Blob storage account in regions that aren't listed in the previous row. | 25 Gbps1 | | Default maximum ingress for general-purpose v1 storage accounts (all regions) | 10 Gbps1 | | Default maximum egress for general-purpose v2 and Blob storage accounts in the following regions (LRS/GRS):
                    • Australia East
                    • Central US
                    • East Asia
                    • East US 2
                    • Japan East
                    • Korea Central
                    • North Europe
                    • South Central US
                    • Southeast Asia
                    • UK South
                    • West Europe
                    • West US
                    | 120 Gbps1 | diff --git a/includes/bastion-vm-rdp.md b/includes/bastion-vm-rdp.md index 8958eb8395218..c7efc30c11228 100644 --- a/includes/bastion-vm-rdp.md +++ b/includes/bastion-vm-rdp.md @@ -5,7 +5,7 @@ author: cherylmc ms.service: bastion ms.topic: include - ms.date: 03/02/2022 + ms.date: 06/02/2022 ms.author: cherylmc ms.custom: include file --- @@ -18,7 +18,7 @@ :::image type="content" source="./media/bastion-vm-rdp/select-use-bastion.png" alt-text="Screenshot of Select Use Bastion."::: -1. On the **Connect using Azure Bastion** page, enter the username and password for your virtual machine, then select **Connect**. +1. On the **Connect using Azure Bastion** page, enter the username and password for your virtual machine. If your VM is domain-joined, you must use the following format: **username@domain.com**. Then, select **Connect**. :::image type="content" source="./media/bastion-vm-rdp/connect-vm-host.png" alt-text="Screenshot of Connect button."::: diff --git a/includes/connectors-create-connection-general-intro.md b/includes/connectors-create-connection-general-intro.md index d8fb9201d46a1..c608d225f3d63 100644 --- a/includes/connectors-create-connection-general-intro.md +++ b/includes/connectors-create-connection-general-intro.md @@ -3,15 +3,14 @@ ms.service: logic-apps ms.topic: include author: ecfan ms.author: estfan -ms.date: 10/11/2021 +ms.date: 06/02/2022 --- -When you add a trigger or action that connects to a service or system for the first time, the workflow designer prompts you to create a *connection* by providing the necessary information, which varies based on the connection, for example: +When you add a trigger or action that connects to a service or system, and you don't have an existing or active connection, +Azure Logic Apps prompts you to provide the connection information, which varies based on the connection type, for example: -* The name that you want to use for the new connection - -* The name for the system or server - -* Your user or account credentials - -* The authentication type to use \ No newline at end of file +* A name to use for the connection +* Your account credentials +* The server or system name +* A connection string +* The authentication type to use diff --git a/includes/container-apps-code-to-cloud-setup.md b/includes/container-apps-code-to-cloud-setup.md index 05675ba2d6bd4..c6ade737b1a89 100644 --- a/includes/container-apps-code-to-cloud-setup.md +++ b/includes/container-apps-code-to-cloud-setup.md @@ -15,6 +15,7 @@ RESOURCE_GROUP="album-containerapps" LOCATION="canadacentral" ENVIRONMENT="env-album-containerapps" API_NAME="album-api" +FRONTEND_NAME="album-ui" GITHUB_USERNAME="" ``` @@ -35,6 +36,7 @@ $RESOURCE_GROUP="album-containerapps" $LOCATION="canadacentral" $ENVIRONMENT="env-album-containerapps" $API_NAME="album-api" +$FRONTEND_NAME="album-ui" $GITHUB_USERNAME="" ``` diff --git a/includes/digital-twins-3d-embed.md b/includes/digital-twins-3d-embed.md new file mode 100644 index 0000000000000..7355f18dc5b47 --- /dev/null +++ b/includes/digital-twins-3d-embed.md @@ -0,0 +1,16 @@ +--- +author: baanders +ms.service: digital-twins +description: include for the embeddable viewer component in 3D Scenes Studio +ms.topic: include +ms.date: 06/02/2022 +ms.author: baanders +--- + +Here's an example of what the embedded viewer might look like in an independent application: + +:::image type="content" source="../articles/digital-twins/media/concepts-3d-scenes-studio/embedded-view.png" alt-text="Screenshot of 3D Scenes Studio in embedded view." lightbox="../articles/digital-twins/media/concepts-3d-scenes-studio/embedded-view.png"::: + +The 3D visualization component library is available in GitHub, in the [iot-cardboard-js](https://github.com/microsoft/iot-cardboard-js) repository. This is an open source React component library for creating internet of things (IoT) web experiences. + +For instructions on how to use the library to embed 3D experiences into custom applications, see the repository's wiki, [Embedding 3D Scenes](https://github.com/microsoft/iot-cardboard-js/wiki/Embedding-3D-Scenes). You can also use the [CRA IoT Cardboard](https://github.com/Azure-Samples/cra-iot-cardboard-js) sample, which deploys a Create React App template for the library, to get started building a custom app with an embedded viewer. diff --git a/includes/digital-twins-create-app-registration-selector.md b/includes/digital-twins-create-app-registration-selector.md deleted file mode 100644 index 32998472c917a..0000000000000 --- a/includes/digital-twins-create-app-registration-selector.md +++ /dev/null @@ -1,15 +0,0 @@ ---- - title: include file - description: include file for selecting between versions of Azure Digital Twins app registration article - services: digital-twins - author: baanders - ms.service: digital-twins - ms.topic: include - ms.date: 05/13/2021 - ms.author: baanders - ms.custom: include file ---- - -> [!div class="op_single_selector"] -> * [Portal](../articles/digital-twins/how-to-create-app-registration-portal.md) -> * [CLI](../articles/digital-twins/how-to-create-app-registration-cli.md) \ No newline at end of file diff --git a/includes/digital-twins-limits.md b/includes/digital-twins-limits.md index f3930bec4f211..6937b5528679d 100644 --- a/includes/digital-twins-limits.md +++ b/includes/digital-twins-limits.md @@ -47,6 +47,7 @@ The following table reflects the rate limits of different APIs. | Digital Twins API | Number of patch requests per second | 1,000 | Yes | | Digital Twins API | Number of create/delete operations per second across all twins and relationships | 50 | Yes | | Digital Twins API | Number of create/update/delete operations per second on a single twin or its incoming/outgoing relationships | 10 | No | +| Digital Twins API | Number of outstanding operations on a single twin or its incoming/outgoing relationships | 500 | No | | Query API | Number of requests per second | 500 | Yes | | Query API | [Query Units](../articles/digital-twins/concepts-query-units.md) per second | 4,000 | Yes | | Event Routes API | Number of requests per second | 100 | Yes | diff --git a/includes/digital-twins-prereq-registration.md b/includes/digital-twins-prereq-registration.md index c94480a61bcf9..c09d048298dc5 100644 --- a/includes/digital-twins-prereq-registration.md +++ b/includes/digital-twins-prereq-registration.md @@ -7,6 +7,6 @@ ms.date: 10/29/2020 ms.author: baanders --- -To authenticate all the resources used in this article, you'll need to set up an [Azure Active Directory (Azure AD)](../articles/active-directory/fundamentals/active-directory-whatis.md) app registration. Follow the instructions in [Create an app registration with Azure Digital Twins access](../articles/digital-twins/how-to-create-app-registration-portal.md) to set this up. +To authenticate all the resources used in this article, you'll need to set up an [Azure Active Directory (Azure AD)](../articles/active-directory/fundamentals/active-directory-whatis.md) app registration. Follow the instructions in [Create an app registration with Azure Digital Twins access](../articles/digital-twins/how-to-create-app-registration.md) to set this up. -Once you have an app registration, you'll need the registration's **Application (client) ID**, **Directory (tenant) ID**, and **client secret value** ([find in the Azure portal](../articles/digital-twins/how-to-create-app-registration-portal.md#collect-important-values)). Take note of these values to use them later to grant access to the Azure Digital Twins APIs. \ No newline at end of file +Once you have an app registration, you'll need the registration's **Application (client) ID**, **Directory (tenant) ID**, and **client secret value** ([find in the Azure portal](../articles/digital-twins/how-to-create-app-registration.md?tabs=portal#collect-important-values)). Take note of these values to use them later to grant access to the Azure Digital Twins APIs. \ No newline at end of file diff --git a/includes/firewall-limits.md b/includes/firewall-limits.md index 55affc12cf94b..cc2973d992caa 100644 --- a/includes/firewall-limits.md +++ b/includes/firewall-limits.md @@ -5,7 +5,7 @@ author: vhorne ms.service: firewall ms.topic: include - ms.date: 05/23/2022 + ms.date: 06/02/2022 ms.author: victorh ms.custom: include file --- @@ -15,6 +15,7 @@ | Data throughput |30 Gbps| |Rule limits|10,000 unique source/destinations in network and application rules| |Total size of rules within a single Rule Collection Group| 2 MB| +|Number of Rule Collection Groups in a firewall policy|50| |Maximum DNAT rules|298 (for firewalls configured with a single Public IP address)

                    The DNAT limitation is due to the underlying platform. The maximum number of DNAT rules is 298. However, any additional public IP addresses reduce the number of the available DNAT rules. For example, two public IP addresses allow for 297 DNAT rules. If a rule's protocol is configured for both TCP and UDP, it counts as two rules.| |Minimum AzureFirewallSubnet size |/26| |Port range in network and application rules|1 - 65535| diff --git a/includes/front-door-edge-locations.md b/includes/front-door-edge-locations.md index ae4d56523d11c..0c9815be354dc 100644 --- a/includes/front-door-edge-locations.md +++ b/includes/front-door-edge-locations.md @@ -5,7 +5,7 @@ author: duongau ms.service: frontdoor ms.topic: include - ms.date: 05/13/2021 + ms.date: 06/01/2022 ms.author: duau ms.custom: include file --- @@ -14,9 +14,15 @@ |--|--| | North America | Montreal, Canada
                    Toronto, Canada
                    Vancouver, Canada
                    Querétaro, Mexico
                    San Juan, Puerto Rico
                    Ashburn, VA, USA
                    Atlanta, GA, USA
                    Boston, MA, USA
                    Boydton, VA, USA
                    Cheyenne, WY, USA
                    Chicago, IL, USA
                    Dallas, TX, USA
                    Denver, CO, USA
                    Honolulu, HI, USA
                    Houston, TX, USA
                    Jacksonville, FL, USA
                    Las Vegas, NV, USA
                    Los Angeles, CA, USA
                    Miami, FL, USA
                    Minneapolis, MN, USA
                    Nashville, TN, USA
                    New York, NY, USA
                    Newark, NJ, USA
                    Philadelphia, PA, USA
                    Phoenix, AZ, USA
                    Portland, OR, USA
                    San Antonio, TX, USA
                    San Diego, CA, USA
                    San Jose, CA, USA
                    Seattle, WA, USA | | South America | Buenos Aires, Argentina
                    Campinas, Brazil
                    Rio de Janeiro, Brazil
                    Sao Paulo, Brazil
                    Santiago, Chile
                    Bogota, Colombia | -| Europe | Vienna, Austria
                    Brussels, Belgium
                    Sofia, Bulgaria
                    Zagreb, Croatia
                    Prague, Czech Republic
                    Copenhagen, Denmark
                    Helsinki, Finland
                    Marseille, France
                    Paris, France
                    Saint Denis, France
                    Berlin, Germany
                    Duesseldorf, Germany
                    Frankfurt, Germany
                    Munich, Germany
                    Athens, Greece
                    Budapest, Hungary
                    Dublin, Ireland
                    Milan, Italy
                    Rome, Italy
                    Oslo, Norway
                    Warsaw, Poland
                    Lisbon, Portugal
                    Bucharest, Romania
                    Moscow, Russia
                    Barcelona, Spain
                    Madrid, Spain
                    Stockholm, Sweden
                    Geneva, Switzerland
                    Zurich, Switzerland
                    London, United Kingdom
                    Manchester, United Kingdom
                    Kyiv, Ukraine | +| Europe | Vienna, Austria
                    Brussels, Belgium
                    Sofia, Bulgaria
                    Zagreb, Croatia
                    Prague, Czech Republic
                    Copenhagen, Denmark
                    Helsinki, Finland
                    Marseille, France
                    Paris, France
                    Saint Denis, France
                    Berlin, Germany
                    Duesseldorf, Germany
                    Frankfurt, Germany
                    Munich, Germany
                    Athens, Greece
                    Budapest, Hungary
                    Dublin, Ireland
                    Milan, Italy
                    Rome, Italy
                    Oslo, Norway
                    Warsaw, Poland
                    Lisbon, Portugal
                    Bucharest, Romania
                    Barcelona, Spain
                    Madrid, Spain
                    Stockholm, Sweden
                    Geneva, Switzerland
                    Zurich, Switzerland
                    London, United Kingdom
                    Manchester, United Kingdom
                    Kyiv, Ukraine | | Africa | Cairo, Egypt
                    Nairobi, Kenya
                    Rabat, Morocco
                    Lagos, Nigeria
                    Cape Town, South Africa
                    Johannesburg, South Africa | | Middle East | Tel Aviv, Israel
                    Istanbul, Turkey
                    Dubai, United Arab Emirates | | India | Chennai, India
                    Hyderabad, India
                    Mumbai, India
                    New Delhi, India | | Asia | Chai Wan, Hong Kong SAR
                    Hong Kong
                    Jakarta, Indonesia
                    Osaka, Japan
                    Tokyo, Japan
                    Kuala Lumpur, Malaysia
                    Manila, Philippines
                    Singapore
                    Busan, South Korea
                    Seoul, South Korea
                    Taipei, Taiwan
                    Taipei City, Taiwan
                    Bangkok, Thailand
                    Ho Chi Minh City, Vietnam | | Australia and New Zealand | Brisbane, Australia
                    Melbourne, Australia
                    Perth, Australia
                    Sydney, Australia
                    Auckland, New Zealand | + +## Azure Government edge locations + +| Region | Location | +| --- | --- | +| North America (US Gov) | Arizona, AZ, USA
                    Iowa, IA, USA,
                    Texas, TX, USA
                    Virginia, VA, USA | diff --git a/includes/functions-add-output-binding-view-queue-cli.md b/includes/functions-add-output-binding-view-queue-cli.md index 1ecceca372db5..0aa600817cc5a 100644 --- a/includes/functions-add-output-binding-view-queue-cli.md +++ b/includes/functions-add-output-binding-view-queue-cli.md @@ -35,7 +35,7 @@ You can view the queue in the [Azure portal](../articles/storage/queues/storage- az storage queue list --output tsv ``` -1. Use the [`az storage message get`](/cli/azure/storage/message#az-storage-message-get) command to read the message from this queue, which should be the first name you used when testing the function earlier. The command reads and removes the first message from the queue. +1. Use the [`az storage message get`](/cli/azure/storage/message#az-storage-message-get) command to read the message from this queue, which should be the value you supplied when testing the function earlier. The command reads and removes the first message from the queue. # [bash](#tab/bash) diff --git a/includes/functions-bindings-event-hubs-trigger.md b/includes/functions-bindings-event-hubs-trigger.md index 7378d89642692..c71c9d4bce8fe 100644 --- a/includes/functions-bindings-event-hubs-trigger.md +++ b/includes/functions-bindings-event-hubs-trigger.md @@ -18,7 +18,7 @@ The following example shows a [C# function](../articles/azure-functions/function ```csharp [FunctionName("EventHubTriggerCSharp")] -public static void Run([EventHubTrigger("samples-workitems", Connection = "EventHubConnectionAppSetting")] string myEventHubMessage, ILogger log) +public void Run([EventHubTrigger("samples-workitems", Connection = "EventHubConnectionAppSetting")] string myEventHubMessage, ILogger log) { log.LogInformation($"C# function triggered to process a message: {myEventHubMessage}"); } @@ -28,7 +28,7 @@ To get access to [event metadata](#event-metadata) in function code, bind to an ```csharp [FunctionName("EventHubTriggerCSharp")] -public static void Run( +public void Run( [EventHubTrigger("samples-workitems", Connection = "EventHubConnectionAppSetting")] EventData myEventHubMessage, DateTime enqueuedTimeUtc, Int64 sequenceNumber, @@ -54,7 +54,7 @@ To receive events in a batch, make `string` or `EventData` an array. ```cs [FunctionName("EventHubTriggerCSharp")] -public static void Run([EventHubTrigger("samples-workitems", Connection = "EventHubConnectionAppSetting")] EventData[] eventHubMessages, ILogger log) +public void Run([EventHubTrigger("samples-workitems", Connection = "EventHubConnectionAppSetting")] EventData[] eventHubMessages, ILogger log) { foreach (var message in eventHubMessages) { @@ -106,7 +106,7 @@ using System; using Microsoft.ServiceBus.Messaging; using Microsoft.Azure.EventHubs; -public static void Run(EventData myEventHubMessage, +public void Run(EventData myEventHubMessage, DateTime enqueuedTimeUtc, Int64 sequenceNumber, string offset, diff --git a/includes/functions-portal-language-support.md b/includes/functions-portal-language-support.md index abe75faa38e06..42fbb0c8a43d6 100644 --- a/includes/functions-portal-language-support.md +++ b/includes/functions-portal-language-support.md @@ -10,9 +10,9 @@ ms.author: glenga The following table shows which languages supported by Functions can run on Linux or Windows. It also indicates whether your language supports editing in the Azure portal. The language is based on the **Runtime stack** option you choose when [creating your function app in the Azure portal](../articles/azure-functions/functions-create-function-app-portal.md#create-a-function-app). This is the same as the `--worker-runtime` option when using the `func init` command in Azure Functions Core Tools. -| Language | Runtime stack | Linux | Windows | In-portal editing1 | +| Language | Runtime stack | Linux | Windows | In-portal editing | |:--- |:-- |:--|:--- |:--- | -| [C# class library](../articles/azure-functions/functions-dotnet-class-library.md)2 |.NET|✓ |✓ | | +| [C# class library](../articles/azure-functions/functions-dotnet-class-library.md)1 |.NET|✓ |✓ | | | [C# script](../articles/azure-functions/functions-reference-csharp.md) | .NET | ✓ |✓ |✓ | | [JavaScript](../articles/azure-functions/functions-reference-node.md) | Node.js |✓ |✓ | ✓ | | [Python](../articles/azure-functions/functions-reference-python.md) | Python |✓ | |✓ | @@ -20,9 +20,8 @@ The following table shows which languages supported by Functions can run on Linu | [PowerShell](../articles/azure-functions/functions-reference-powershell.md) |PowerShell Core |✓ |✓ |✓ | | [TypeScript](../articles/azure-functions/functions-reference-node.md) | Node.js |✓ |✓ | | | [Go/Rust/other](../articles/azure-functions/functions-custom-handlers.md) | Custom Handlers |✓ |✓ | | - -1 When running on Linux, in-portal editing is only supported in a [Dedicated (App Service) plan](../articles/azure-functions/dedicated-plan.md). -2 In the portal, you can't currently create function apps that run on .NET 5.0. To learn more, see [Develop and publish .NET 5 functions using Azure Functions](../articles/azure-functions/dotnet-isolated-process-guide.md). + +1 In the portal, you can't currently create function apps that run on .NET 5.0. To learn more, see [Develop and publish .NET 5 functions using Azure Functions](../articles/azure-functions/dotnet-isolated-process-guide.md). For more details, see [Operating system/runtime support](../articles/azure-functions/functions-scale.md#operating-systemruntime). diff --git a/includes/functions-premium-plan-note.md b/includes/functions-premium-plan-note.md new file mode 100644 index 0000000000000..04385fc968212 --- /dev/null +++ b/includes/functions-premium-plan-note.md @@ -0,0 +1,10 @@ +--- +author: ggailey777 +ms.service: azure-functions +ms.topic: include +ms.date: 03/24/2022 +ms.author: glenga +--- + +>[!IMPORTANT] +>Azure Functions runs on the Azure App Service platform. In the App Service platform, plans that host Premium plan function apps are referred to as *Elastic* Premium plans, with SKU names like `EP1`. If you choose to run your function app on a Premium plan, make sure to create a plan with an SKU name that starts with "E", such as `EP1`. App Service plan SKU names that start with "P", such as `P1V2` (Premium V2 Small plan), are actually [Dedicated hosting plans](../articles/azure-functions/dedicated-plan.md). Because they are Dedicated and not Elastic Premium, plans with SKU names starting with "P" won't scale dynamically and may increase your costs. \ No newline at end of file diff --git a/includes/functions-vstools-cleanup.md b/includes/functions-vstools-cleanup.md index b097a70eff0e8..49f4833b9e1a0 100644 --- a/includes/functions-vstools-cleanup.md +++ b/includes/functions-vstools-cleanup.md @@ -8,7 +8,7 @@ ms.author: glenga Use the following steps to delete the function app and its related resources to avoid incurring any further costs. -1. In the Cloud Explorer, expand your subscription > **App Services**, right-click your function app, and choose **Open in Portal**. +1. In the Visual Studio Publish dialogue, in the Hosting section, select **Open in Azure portal**. 1. In the function app page, select the **Overview** tab and then select the link under **Resource group**. diff --git a/includes/functions-vstools-create.md b/includes/functions-vstools-create.md index 79dfbc25209db..f72fa071054f4 100644 --- a/includes/functions-vstools-create.md +++ b/includes/functions-vstools-create.md @@ -20,7 +20,7 @@ The Azure Functions project template in Visual Studio creates a C# class library | Setting | Value | Description | | ------------ | ------- |----------------------------------------- | - | **.NET version** | **.NET Core 3 (LTS)** | This value creates a function project that runs in-process with version 3.x of the Azure Functions runtime. Azure Functions 1.x supports the .NET Framework. For more information, see [Azure Functions runtime versions overview](../articles/azure-functions/functions-versions.md). | + | **.NET version** | **.NET 6** | This value creates a function project that runs in-process with version 4.x of the Azure Functions runtime. Azure Functions 1.x supports the .NET Framework. For more information, see [Azure Functions runtime versions overview](../articles/azure-functions/functions-versions.md). | | **Function template** | **HTTP trigger** | This value creates a function triggered by an HTTP request. | | **Storage account (AzureWebJobsStorage)** | **Storage emulator** | Because a function app in Azure requires a storage account, one is assigned or created when you publish your project to Azure. An HTTP trigger doesn't use an Azure Storage account connection string; all other trigger types require a valid Azure Storage account connection string. | | **Authorization level** | **Anonymous** | The created function can be triggered by any client without providing a key. This authorization setting makes it easy to test your new function. For more information about keys and authorization, see [Authorization keys](../articles/azure-functions/functions-bindings-http-webhook-trigger.md#authorization-keys) and [HTTP and webhook bindings](../articles/azure-functions/functions-bindings-http-webhook.md). | @@ -32,7 +32,7 @@ The Azure Functions project template in Visual Studio creates a C# class library | Setting | Value | Description | | ------------ | ------- |----------------------------------------- | - | **.NET version** | **.NET 5 (Isolated)** | This value creates a function project that runs in an isolated process, which supports .NET 5.0. For more information, see [Azure Functions runtime versions overview](../articles/azure-functions/functions-versions.md). | + | **.NET version** | **.NET 6 Isolated** | This value creates a function project that runs in an isolated process, which supports .NET 6.0. For more information, see [Azure Functions runtime versions overview](../articles/azure-functions/functions-versions.md). | | **Function template** | **HTTP trigger** | This value creates a function triggered by an HTTP request. | | **Storage account (AzureWebJobsStorage)** | **Storage emulator** | Because a function app in Azure requires a storage account, one is assigned or created when you publish your project to Azure. An HTTP trigger doesn't use an Azure Storage account connection string; all other trigger types require a valid Azure Storage account connection string. | | **Authorization level** | **Anonymous** | The created function can be triggered by any client without providing a key. This authorization setting makes it easy to test your new function. For more information about keys and authorization, see [Authorization keys](../articles/azure-functions/functions-bindings-http-webhook-trigger.md#authorization-keys) and [HTTP and webhook bindings](../articles/azure-functions/functions-bindings-http-webhook.md). | diff --git a/includes/functions-vstools-publish.md b/includes/functions-vstools-publish.md index 0b7e6f3095015..ac7037eb617aa 100644 --- a/includes/functions-vstools-publish.md +++ b/includes/functions-vstools-publish.md @@ -2,11 +2,11 @@ author: ggailey777 ms.service: azure-functions ms.topic: include -ms.date: 09/30/2020 +ms.date: 05/19/2022 ms.author: glenga --- -1. In **Solution Explorer**, right-click the project and select **Publish** and in **Target**, select **Azure** then **Next**. +1. In **Solution Explorer**, right-click the project and select **Publish**. In **Target**, select **Azure** then **Next**. 1. For the **Specific target**, choose **Azure Function App (Windows)**, which creates a function app that runs on Windows. @@ -35,10 +35,8 @@ ms.author: glenga 1. Select **Finish**, and on the Publish page, select **Publish** to deploy the package containing your project files to your new function app in Azure. - After the deployment completes the root URL of the function app in Azure is shown in the **Publish** tab. + After the deployment completes, the root URL of the function app in Azure is shown in the **Publish** tab. -1. In the Publish tab, choose **Manage in Cloud Explorer**. This opens the new function app Azure resource in Cloud Explorer. +1. In the Publish tab, in the Hosting section, choose **Open in Azure portal**. This opens the new function app Azure resource in the Azure portal. - :::image type="content" source="media/functions-vstools-publish/functions-visual-studio-publish-complete.png" alt-text="Publish success message"::: - - Cloud Explorer lets you use Visual Studio to view the contents of the site, start and stop the function app, and browse directly to function app resources on Azure and in the Azure portal. + :::image type="content" source="media/functions-vstools-publish/functions-visual-studio-publish-complete.png" alt-text="Publish success message"::: diff --git a/includes/iot-central-connection-configuration.md b/includes/iot-central-connection-configuration.md index 9f36d6f2edd19..713084998d28a 100644 --- a/includes/iot-central-connection-configuration.md +++ b/includes/iot-central-connection-configuration.md @@ -15,7 +15,7 @@ When you run the sample device application later in this tutorial, you need the * ID scope: In your IoT Central application, navigate to **Permissions > Device connection groups**. Make a note of the **ID scope** value. * Group primary key: In your IoT Central application, navigate to **Permissions > Device connection groups > SAS-IoT-Devices**. Make a note of the shared access signature **Primary key** value. -Use the Cloud Shell to generate a device key from the group primary key you retrieved: +Use the Azure Cloud Shell to generate a device key from the group primary key you retrieved: ```azurecli-interactive az extension add --name azure-iot @@ -23,3 +23,6 @@ az iot central device compute-device-key --device-id sample-device-01 --pk [!NOTE] +> To run this sample, you don't need to register the device in advance in your IoT Central application. The sample uses the IoT Central capability to [automatically register devices](../articles/iot-central/core/concepts-device-authentication.md#automatically-register-devices) when they connect for the first time. diff --git a/includes/iot-central-prerequisites-basic.md b/includes/iot-central-prerequisites-basic.md index a1cca224cb3e6..6e4a419a77f95 100644 --- a/includes/iot-central-prerequisites-basic.md +++ b/includes/iot-central-prerequisites-basic.md @@ -8,4 +8,4 @@ ms.date: 05/11/2021 - An active Azure subscription. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -- A V3 IoT Central application created from the **Custom application** template. To learn more, see [Create an IoT Central application](../articles/iot-central/core/howto-create-iot-central-application.md) and [About your application](../articles/iot-central/core/howto-get-app-info.md). +- An IoT Central application created from the **Custom application** template. To learn more, see [Create an IoT Central application](../articles/iot-central/core/howto-create-iot-central-application.md) and [About your application](../articles/iot-central/core/howto-get-app-info.md). diff --git a/includes/iot-edge-clean-up-cloud-resources.md b/includes/iot-edge-clean-up-cloud-resources.md index 4a1a08655017e..38384d96ac2b5 100644 --- a/includes/iot-edge-clean-up-cloud-resources.md +++ b/includes/iot-edge-clean-up-cloud-resources.md @@ -2,11 +2,11 @@ title: include file description: include file services: iot-edge -author: kgremban +author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 08/10/2018 -ms.author: kgremban +ms.author: patricka ms.custom: include file --- diff --git a/includes/iot-edge-create-dps-enrollment-symmetric.md b/includes/iot-edge-create-dps-enrollment-symmetric.md index b5beb7a5f13bc..ecb0a3ebac505 100644 --- a/includes/iot-edge-create-dps-enrollment-symmetric.md +++ b/includes/iot-edge-create-dps-enrollment-symmetric.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-create-dps-enrollment-x509.md b/includes/iot-edge-create-dps-enrollment-x509.md index be3fe3471830e..9acfd9544a146 100644 --- a/includes/iot-edge-create-dps-enrollment-x509.md +++ b/includes/iot-edge-create-dps-enrollment-x509.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-deploy-module.md b/includes/iot-edge-deploy-module.md index ac2a1b27f720a..a899fce83e22b 100644 --- a/includes/iot-edge-deploy-module.md +++ b/includes/iot-edge-deploy-module.md @@ -2,11 +2,11 @@ title: include file description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 06/30/2020 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- @@ -18,7 +18,7 @@ Follow these steps to deploy your first module from Azure Marketplace. 1. Sign in to the [Azure portal](https://portal.azure.com) and go to your IoT hub. -1. From the menu on the left, under **Automatic Device Management**, select **IoT Edge**. +1. From the menu on the left, under **Device Management**, select **IoT Edge**. 1. Select the device ID of the target device from the list of devices. diff --git a/includes/iot-edge-generate-device-identity-certs.md b/includes/iot-edge-generate-device-identity-certs.md index d1d5e2f0677d8..a966a9324bcd7 100644 --- a/includes/iot-edge-generate-device-identity-certs.md +++ b/includes/iot-edge-generate-device-identity-certs.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-install-linux-on-windows.md b/includes/iot-edge-install-linux-on-windows.md index 8d6cbfbba1707..752466568ebb2 100644 --- a/includes/iot-edge-install-linux-on-windows.md +++ b/includes/iot-edge-install-linux-on-windows.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 01/25/2022 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-install-windows.md b/includes/iot-edge-install-windows.md index d1e3f57617e4c..913939f42c822 100644 --- a/includes/iot-edge-install-windows.md +++ b/includes/iot-edge-install-windows.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-invoke-webrequest.md b/includes/iot-edge-invoke-webrequest.md index 2bb724aa4c780..0c8744d0940ac 100644 --- a/includes/iot-edge-invoke-webrequest.md +++ b/includes/iot-edge-invoke-webrequest.md @@ -2,11 +2,11 @@ title: include file for Invoke-WebReqest procedure description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 11/05/2019 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- diff --git a/includes/iot-edge-prerequisites-at-scale-cloud-resources.md b/includes/iot-edge-prerequisites-at-scale-cloud-resources.md index 48c79aba9f49a..fdf6911f594bf 100644 --- a/includes/iot-edge-prerequisites-at-scale-cloud-resources.md +++ b/includes/iot-edge-prerequisites-at-scale-cloud-resources.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-prerequisites-linux-on-windows.md b/includes/iot-edge-prerequisites-linux-on-windows.md index 7f664e493e5b4..7b24acfd91a68 100644 --- a/includes/iot-edge-prerequisites-linux-on-windows.md +++ b/includes/iot-edge-prerequisites-linux-on-windows.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-prerequisites-register-device.md b/includes/iot-edge-prerequisites-register-device.md index 4fad08e527950..2ce91c38e4ec0 100644 --- a/includes/iot-edge-prerequisites-register-device.md +++ b/includes/iot-edge-prerequisites-register-device.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- diff --git a/includes/iot-edge-register-device-symmetric.md b/includes/iot-edge-register-device-symmetric.md index 6c47dd732ca54..1759eb08ad610 100644 --- a/includes/iot-edge-register-device-symmetric.md +++ b/includes/iot-edge-register-device-symmetric.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- @@ -19,7 +19,7 @@ In your IoT hub in the Azure portal, IoT Edge devices are created and managed se 1. In the left pane, select **IoT Edge** from the menu, then select **Add an IoT Edge device**. - ![Add an IoT Edge device from the Azure portal](./media/iot-edge-register-device-symmetric/portal-add-iot-edge-device.png) + :::image type="content" source="./media/iot-edge-register-device/portal-add-iot-edge-device.png" alt-text="Screenshot of how to add an I o T Edge device from the Azure portal."::: 1. On the **Create a device** page, provide the following information: @@ -83,7 +83,7 @@ Devices that use symmetric key authentication need their connection strings to c All the edge-enabled devices that connect to your IoT hub are listed on the **IoT Edge** page. -![Use the Azure portal to view all IoT Edge devices in your IoT hub](./media/iot-edge-register-device-symmetric/portal-view-devices.png) +:::image type="content" source="./media/iot-edge-register-device/portal-view-devices.png" alt-text="Screenshot of how to view your devices in the Azure portal, I o T Hub."::: When you're ready to set up your device, you need the connection string that links your physical device with its identity in the IoT hub. diff --git a/includes/iot-edge-register-device-x509.md b/includes/iot-edge-register-device-x509.md index 6b425b1cce45b..0f715c06f9a75 100644 --- a/includes/iot-edge-register-device-x509.md +++ b/includes/iot-edge-register-device-x509.md @@ -1,8 +1,8 @@ --- ms.topic: include ms.date: 10/29/2021 -author: kgremban -ms.author: kgremban +author: PatAltimore +ms.author: patricka ms.service: iot-edge services: iot-edge --- @@ -19,7 +19,7 @@ In your IoT hub in the Azure portal, IoT Edge devices are created and managed se 1. In the left pane, select **IoT Edge** from the menu, then select **Add an IoT Edge device**. - ![Add an IoT Edge device from the Azure portal](./media/iot-edge-register-device-x509/portal-add-iot-edge-device.png) + :::image type="content" source="./media/iot-edge-register-device/portal-add-iot-edge-device.png" alt-text="Screenshot of how to add an I o T Edge device from the Azure portal."::: 1. On the **Create a device** page, provide the following information: @@ -62,7 +62,7 @@ Devices that use X.509 certificate authentication need their IoT hub name, their All the edge-enabled devices that connect to your IoT hub are listed on the **IoT Edge** page. -![Use the Azure portal to view all IoT Edge devices in your IoT hub](./media/iot-edge-register-device-x509/portal-view-devices.png) +:::image type="content" source="./media/iot-edge-register-device/portal-view-devices.png" alt-text="Screenshot of how to view all I o T Edge devices in your IoT hub."::: # [Visual Studio Code](#tab/visual-studio-code) diff --git a/includes/iot-edge-verify-troubleshoot-install.md b/includes/iot-edge-verify-troubleshoot-install.md index f1bbd56c9336e..f6ea82fbf5a90 100644 --- a/includes/iot-edge-verify-troubleshoot-install.md +++ b/includes/iot-edge-verify-troubleshoot-install.md @@ -2,11 +2,11 @@ title: After installation and provisioning, verify success and troubleshoot description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 10/06/2020 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- diff --git a/includes/iot-edge-version-201806-or-202011.md b/includes/iot-edge-version-201806-or-202011.md index 3b8242308a0f1..b7b2a1b9a45a2 100644 --- a/includes/iot-edge-version-201806-or-202011.md +++ b/includes/iot-edge-version-201806-or-202011.md @@ -2,11 +2,11 @@ title: IoT Edge two different article versions - 1.1 (LTS) and 1.2 (Preview) description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 04/09/2021 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- diff --git a/includes/iot-edge-version-201806.md b/includes/iot-edge-version-201806.md index 4b5875dba71ac..7544322dd0f62 100644 --- a/includes/iot-edge-version-201806.md +++ b/includes/iot-edge-version-201806.md @@ -2,11 +2,11 @@ title: IoT Edge one supported version - 1.1 (LTS) description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 03/05/2021 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- diff --git a/includes/iot-edge-version-202011.md b/includes/iot-edge-version-202011.md index 1454d9df51565..35519b19e67a5 100644 --- a/includes/iot-edge-version-202011.md +++ b/includes/iot-edge-version-202011.md @@ -2,11 +2,11 @@ title: IoT Edge one supported versions - 1.2 (Preview) description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 04/09/2021 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- diff --git a/includes/iot-edge-version-all-supported.md b/includes/iot-edge-version-all-supported.md index 335253bfbb5fd..7d750d71dc2fa 100644 --- a/includes/iot-edge-version-all-supported.md +++ b/includes/iot-edge-version-all-supported.md @@ -2,11 +2,11 @@ title: IoT Edge all versions supported - 1.1 (LTS) and 1.2 (Preview) description: include file services: iot-edge - author: kgremban + author: PatAltimore ms.service: iot-edge ms.topic: include ms.date: 04/09/2021 - ms.author: kgremban + ms.author: patricka ms.custom: include file --- diff --git a/includes/logic-apps-sku-azure-arc-enabled.md b/includes/logic-apps-sku-azure-arc-enabled.md new file mode 100644 index 0000000000000..06715823a5075 --- /dev/null +++ b/includes/logic-apps-sku-azure-arc-enabled.md @@ -0,0 +1,9 @@ +--- +ms.service: logic-apps +ms.topic: include +author: ecfan +ms.author: estfan +ms.date: 06/08/2022 +--- + +Applies to: **Azure Arc enabled Logic Apps** \ No newline at end of file diff --git a/includes/logic-apps-sku-consumption-standard.md b/includes/logic-apps-sku-consumption-standard.md new file mode 100644 index 0000000000000..4644d4aa4ecbc --- /dev/null +++ b/includes/logic-apps-sku-consumption-standard.md @@ -0,0 +1,9 @@ +--- +ms.service: logic-apps +ms.topic: include +author: ecfan +ms.author: estfan +ms.date: 06/08/2022 +--- + +Applies to: **Azure Logic Apps (Consumption + Standard)** diff --git a/includes/logic-apps-sku-consumption.md b/includes/logic-apps-sku-consumption.md new file mode 100644 index 0000000000000..f31323edeeea3 --- /dev/null +++ b/includes/logic-apps-sku-consumption.md @@ -0,0 +1,9 @@ +--- +ms.service: logic-apps +ms.topic: include +author: ecfan +ms.author: estfan +ms.date: 06/08/2022 +--- + +Applies to: **Azure Logic Apps (Consumption)** \ No newline at end of file diff --git a/includes/logic-apps-sku-standard.md b/includes/logic-apps-sku-standard.md new file mode 100644 index 0000000000000..c80f5ec0af803 --- /dev/null +++ b/includes/logic-apps-sku-standard.md @@ -0,0 +1,9 @@ +--- +ms.service: logic-apps +ms.topic: include +author: ecfan +ms.author: estfan +ms.date: 06/08/2022 +--- + +Applies to: **Azure Logic Apps (Standard)** \ No newline at end of file diff --git a/includes/machine-learning-dev-v1.md b/includes/machine-learning-dev-v1.md index 6970f60ff97f4..fa2a92c43d96b 100644 --- a/includes/machine-learning-dev-v1.md +++ b/includes/machine-learning-dev-v1.md @@ -8,4 +8,4 @@ ms.author: sgilley **APPLIES TO:** :::image type="icon" source="media/applies-to/yes.png" border="false":::[Azure CLI ml extension **v1**](../articles/machine-learning/how-to-configure-cli.md) -:::image type="icon" source="media/applies-to/yes.png" border="false"::: [Python SDK azureml **v1**](/python/api/overview/azure/ml/?view=azure-ml-py) \ No newline at end of file +:::image type="icon" source="media/applies-to/yes.png" border="false"::: [Python SDK azureml **v1**](/python/api/overview/azure/ml/?view=azure-ml-py&preserve-view=true) diff --git a/includes/machine-learning-online-endpoint-troubleshooting.md b/includes/machine-learning-online-endpoint-troubleshooting.md index b6fd62fcfda8c..53f5f02d1745e 100644 --- a/includes/machine-learning-online-endpoint-troubleshooting.md +++ b/includes/machine-learning-online-endpoint-troubleshooting.md @@ -6,6 +6,15 @@ ms.date: 05/10/2022 ms.author: larryfr --- +### Online endpoint creation fails with a V1LegacyMode == true message + +The Azure Machine Learning workspace can be configured for `v1_legacy_mode`, which disables v2 APIs. Managed online endpoints are a feature of the v2 API platform, and won't work if `v1_legacy_mode` is enabled for the workspace. + +> [!IMPORTANT] +> Check with your network security team before disabling `v1_legacy_mode`. It may have been enabled by your network security team for a reason. + +For information on how to disable `v1_legacy_mode`, see [Network isolation with v2](/azure/machine-learning/how-to-configure-network-isolation-with-v2). + ### Online endpoint creation with key-based authentication fails Use the following command to list the network rules of the Azure Key Vault for your workspace. Replace `` with the name of your key vault: @@ -25,7 +34,7 @@ The response for this command is similar to the following JSON document: } ``` -If the value of `bypass` isn't `AzureServices`, use the guidance in the [Configure key vault network settings](/azure/key-vault/general/how-to-azure-key-vault-network-security?tabs=azure-cli) to set it to `AzureServices`. +If the value of `bypass` isn't `AzureServices`, use the guidance in the [Configure key vault network settings](../articles/key-vault/general/how-to-azure-key-vault-network-security.md?tabs=azure-cli) to set it to `AzureServices`. ### Online deployments fail with an image download error @@ -59,7 +68,7 @@ If the value of `bypass` isn't `AzureServices`, use the guidance in the [Configu ``` The results should contain an entry that is similar to `*..inference.`. -1. If no inference value is returned, delete the private endpoint for the workspace and then recreate it. For more information, see [How to configure a private endpoint](/machine-learning/how-to-configure-private-link). +1. If no inference value is returned, delete the private endpoint for the workspace and then recreate it. For more information, see [How to configure a private endpoint](/azure/container-registry/container-registry-private-link). ### Online deployments can't be scored @@ -86,4 +95,4 @@ If the value of `bypass` isn't `AzureServices`, use the guidance in the [Configu az ml online-deployment get-logs -e -n ``` - Look through the logs to see if there's a problem running the scoring code when you submit a request to the deployment. \ No newline at end of file + Look through the logs to see if there's a problem running the scoring code when you submit a request to the deployment. diff --git a/includes/machine-learning-public-internet-access.md b/includes/machine-learning-public-internet-access.md index 91bea99469905..f185a1c794c4f 100644 --- a/includes/machine-learning-public-internet-access.md +++ b/includes/machine-learning-public-internet-access.md @@ -18,12 +18,12 @@ Azure Machine Learning requires both inbound and outbound access to the public i | Inbound | 44224 | AzureMachineLearning | Create, update, and delete of Azure Machine Learning compute instance. | | Outbound | 443 | AzureMonitor | Used to log monitoring and metrics to App Insights and Azure Monitor. | | Outbound | 80, 443 | AzureActiveDirectory | Authentication using Azure AD. | -| Outbound | 443 | AzureMachineLearning | Using Azure Machine Learning services. | +| Outbound | 443, 8787, 18881 | AzureMachineLearning | Using Azure Machine Learning services. | | Outbound | 443 | AzureResourceManager | Creation of Azure resources with Azure Machine Learning. | | Outbound | 443 | Storage.region | Access data stored in the Azure Storage Account for the Azure Batch service. | | Outbound | 443 | AzureFrontDoor.FrontEnd
                    * Not needed in Azure China. | Global entry point for [Azure Machine Learning studio](https://ml.azure.com). | | Outbound | 443 | ContainerRegistry.region | Access docker images provided by Microsoft. | -| Outbound | 443 | MicrosoftContainerRegistry.region | Access docker images provided by Microsoft. Setup of the Azure Machine Learning router for Azure Kubernetes Service. | +| Outbound | 443 | MicrosoftContainerRegistry.region
                    **Note** that this tag has a dependency on the **AzureFrontDoor.FirstParty** tag | Access docker images provided by Microsoft. Setup of the Azure Machine Learning router for Azure Kubernetes Service. | | Outbound | 443 | Keyvault.region | Access the key vault for the Azure Batch service. Only needed if your workspace was created with the [hbi_workspace](/python/api/azureml-core/azureml.core.workspace%28class%29#create-name--auth-none--subscription-id-none--resource-group-none--location-none--create-resource-group-true--sku--basic---friendly-name-none--storage-account-none--key-vault-none--app-insights-none--container-registry-none--cmk-keyvault-none--resource-cmk-uri-none--hbi-workspace-false--default-cpu-compute-target-none--default-gpu-compute-target-none--exist-ok-false--show-output-true-) flag enabled. | > [!TIP] diff --git a/includes/machine-learning-sdk-v1.md b/includes/machine-learning-sdk-v1.md index 351c410f3a630..d4ad9d1dc712c 100644 --- a/includes/machine-learning-sdk-v1.md +++ b/includes/machine-learning-sdk-v1.md @@ -6,4 +6,4 @@ ms.date: 04/22/2022 ms.author: sgilley --- -**APPLIES TO:** :::image type="icon" source="media/applies-to/yes.png" border="false"::: [Python SDK azureml **v1**](/python/api/overview/azure/ml/?view=azure-ml-py) \ No newline at end of file +**APPLIES TO:** :::image type="icon" source="media/applies-to/yes.png" border="false"::: [Python SDK azureml **v1**](/python/api/overview/azure/ml/?view=azure-ml-py&preserve-view=true) diff --git a/includes/managed-disks-ultra-disks-GA-scope-and-limitations.md b/includes/managed-disks-ultra-disks-GA-scope-and-limitations.md index bb2e9ead494a2..98b30abb66c85 100644 --- a/includes/managed-disks-ultra-disks-GA-scope-and-limitations.md +++ b/includes/managed-disks-ultra-disks-GA-scope-and-limitations.md @@ -5,7 +5,7 @@ services: virtual-machines author: roygara ms.service: virtual-machines ms.topic: include -ms.date: 03/09/2022 +ms.date: 06/06/2022 ms.author: rogarana ms.custom: include file --- @@ -31,9 +31,9 @@ Not every VM size is available in every supported region with ultra disks. The f |VM Type |Sizes |Description | |------------|---------|-------------| -| General purpose|[DSv3-series](../articles/virtual-machines/dv3-dsv3-series.md#dsv3-series), [Ddsv4-series](../articles/virtual-machines/ddv4-ddsv4-series.md#ddsv4-series), [Dsv4-series](../articles/virtual-machines/dv4-dsv4-series.md#dsv4-series), [Dasv4-series](../articles/virtual-machines/dav4-dasv4-series.md#dasv4-series)| Balanced CPU-to-memory ratio. Ideal for testing and development, small to medium databases, and low to medium traffic web servers.| +| General purpose|[DSv3-series](../articles/virtual-machines/dv3-dsv3-series.md#dsv3-series), [Ddsv4-series](../articles/virtual-machines/ddv4-ddsv4-series.md#ddsv4-series), [Dsv4-series](../articles/virtual-machines/dv4-dsv4-series.md#dsv4-series), [Dasv4-series](../articles/virtual-machines/dav4-dasv4-series.md#dasv4-series), [Dsv5-series](../articles/virtual-machines/dv5-dsv5-series.md#dsv5-series), [Ddsv5-series](../articles/virtual-machines/ddv5-ddsv5-series.md#ddsv5-series), [Dasv5-series](../articles/virtual-machines/dasv5-dadsv5-series.md#dasv5-series)| Balanced CPU-to-memory ratio. Ideal for testing and development, small to medium databases, and low to medium traffic web servers.| | Compute optimized|[FSv2-series](../articles/virtual-machines/fsv2-series.md)| High CPU-to-memory ratio. Good for medium traffic web servers, network appliances, batch processes, and application servers.| -| Memory optimized|[ESv3-series](../articles/virtual-machines/ev3-esv3-series.md#esv3-series), [Easv4-series](../articles/virtual-machines/eav4-easv4-series.md#easv4-series), [Edsv4-series](../articles/virtual-machines/edv4-edsv4-series.md#edsv4-series), [Esv4-series](../articles/virtual-machines/ev4-esv4-series.md#esv4-series), [M-series](../articles/virtual-machines/m-series.md), [Mv2-series](../articles/virtual-machines/mv2-series.md), [Msv2/Mdsv2-series](../articles/virtual-machines/msv2-mdsv2-series.md)|High memory-to-CPU ratio. Great for relational database servers, medium to large caches, and in-memory analytics. +| Memory optimized|[ESv3-series](../articles/virtual-machines/ev3-esv3-series.md#esv3-series), [Easv4-series](../articles/virtual-machines/eav4-easv4-series.md#easv4-series), [Edsv4-series](../articles/virtual-machines/edv4-edsv4-series.md#edsv4-series), [Esv4-series](../articles/virtual-machines/ev4-esv4-series.md#esv4-series), [Esv5-series](../articles/virtual-machines/ev5-esv5-series.md#esv5-series), [Edsv5-series](../articles/virtual-machines/edv5-edsv5-series.md#edsv5-series), [Easv5-series](../articles/virtual-machines/easv5-eadsv5-series.md#easv5-series), [Ebsv5 series](../articles/virtual-machines/ebdsv5-ebsv5-series.md#ebsv5-series), [Ebdsv5 series](../articles/virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series), [M-series](../articles/virtual-machines/m-series.md), [Mv2-series](../articles/virtual-machines/mv2-series.md), [Msv2/Mdsv2-series](../articles/virtual-machines/msv2-mdsv2-series.md)|High memory-to-CPU ratio. Great for relational database servers, medium to large caches, and in-memory analytics. | Storage optimized|[LSv2-series](../articles/virtual-machines/lsv2-series.md)|High disk throughput and IO ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases.| | GPU optimized|[NCv2-series](../articles/virtual-machines/ncv2-series.md), [NCv3-series](../articles/virtual-machines/ncv3-series.md), [NCasT4_v3-series](../articles/virtual-machines/nct4-v3-series.md), [ND-series](../articles/virtual-machines/nd-series.md), [NDv2-series](../articles/virtual-machines/ndv2-series.md), [NVv3-series](../articles/virtual-machines/nvv3-series.md), [NVv4-series](../articles/virtual-machines/nvv4-series.md)| Specialized virtual machines targeted for heavy graphic rendering and video editing, as well as model training and inferencing (ND) with deep learning. Available with single or multiple GPUs.| | Performance optimized |[HB-series](../articles/virtual-machines/hb-series.md), [HC-series](../articles/virtual-machines/hc-series.md), [HBv2-series](../articles/virtual-machines/hbv2-series.md)|The fastest and most powerful CPU virtual machines with optional high-throughput network interfaces (RDMA).| diff --git a/includes/media/functions-vs-tools-create/functions-project-settings-isolated.png b/includes/media/functions-vs-tools-create/functions-project-settings-isolated.png index 988c2857475d1..962ed7f8dfc55 100644 Binary files a/includes/media/functions-vs-tools-create/functions-project-settings-isolated.png and b/includes/media/functions-vs-tools-create/functions-project-settings-isolated.png differ diff --git a/includes/media/functions-vs-tools-create/functions-project-settings-v4.png b/includes/media/functions-vs-tools-create/functions-project-settings-v4.png index 9e7f8ed5026ff..c9b01526849d6 100644 Binary files a/includes/media/functions-vs-tools-create/functions-project-settings-v4.png and b/includes/media/functions-vs-tools-create/functions-project-settings-v4.png differ diff --git a/includes/media/functions-vs-tools-create/functions-project-settings.png b/includes/media/functions-vs-tools-create/functions-project-settings.png index b8fa0b69db2be..7840a703f6538 100644 Binary files a/includes/media/functions-vs-tools-create/functions-project-settings.png and b/includes/media/functions-vs-tools-create/functions-project-settings.png differ diff --git a/includes/media/functions-vstools-publish/functions-visual-studio-publish-complete.png b/includes/media/functions-vstools-publish/functions-visual-studio-publish-complete.png index 65bc418bfcc93..b846fb6e2f6d1 100644 Binary files a/includes/media/functions-vstools-publish/functions-visual-studio-publish-complete.png and b/includes/media/functions-vstools-publish/functions-visual-studio-publish-complete.png differ diff --git a/includes/media/iot-edge-register-device-symmetric/portal-add-iot-edge-device.png b/includes/media/iot-edge-register-device-symmetric/portal-add-iot-edge-device.png deleted file mode 100644 index fe411ccc82727..0000000000000 Binary files a/includes/media/iot-edge-register-device-symmetric/portal-add-iot-edge-device.png and /dev/null differ diff --git a/includes/media/iot-edge-register-device-symmetric/portal-view-devices.png b/includes/media/iot-edge-register-device-symmetric/portal-view-devices.png deleted file mode 100644 index 66a3f01f68bdf..0000000000000 Binary files a/includes/media/iot-edge-register-device-symmetric/portal-view-devices.png and /dev/null differ diff --git a/includes/media/iot-edge-register-device-x509/portal-add-iot-edge-device.png b/includes/media/iot-edge-register-device-x509/portal-add-iot-edge-device.png deleted file mode 100644 index fe411ccc82727..0000000000000 Binary files a/includes/media/iot-edge-register-device-x509/portal-add-iot-edge-device.png and /dev/null differ diff --git a/includes/media/iot-edge-register-device-x509/portal-view-devices.png b/includes/media/iot-edge-register-device-x509/portal-view-devices.png deleted file mode 100644 index 66a3f01f68bdf..0000000000000 Binary files a/includes/media/iot-edge-register-device-x509/portal-view-devices.png and /dev/null differ diff --git a/includes/media/iot-edge-register-device/portal-add-iot-edge-device.png b/includes/media/iot-edge-register-device/portal-add-iot-edge-device.png new file mode 100644 index 0000000000000..efa8792c25b45 Binary files /dev/null and b/includes/media/iot-edge-register-device/portal-add-iot-edge-device.png differ diff --git a/includes/media/iot-edge-register-device/portal-view-devices.png b/includes/media/iot-edge-register-device/portal-view-devices.png new file mode 100644 index 0000000000000..b8c60c1cc0ef6 Binary files /dev/null and b/includes/media/iot-edge-register-device/portal-view-devices.png differ diff --git a/includes/media/virtual-wan-empty-hub/basics.png b/includes/media/virtual-wan-empty-hub/basics.png new file mode 100644 index 0000000000000..77dd9502a5373 Binary files /dev/null and b/includes/media/virtual-wan-empty-hub/basics.png differ diff --git a/includes/media/virtual-wan-p2s-hub/create-hub.png b/includes/media/virtual-wan-p2s-hub/create-hub.png index 363adba902ae9..77dd9502a5373 100644 Binary files a/includes/media/virtual-wan-p2s-hub/create-hub.png and b/includes/media/virtual-wan-p2s-hub/create-hub.png differ diff --git a/includes/media/virtual-wan-tutorial-er-hub/create-hub.png b/includes/media/virtual-wan-tutorial-er-hub/create-hub.png index 363adba902ae9..77dd9502a5373 100644 Binary files a/includes/media/virtual-wan-tutorial-er-hub/create-hub.png and b/includes/media/virtual-wan-tutorial-er-hub/create-hub.png differ diff --git a/includes/media/virtual-wan-tutorial-hub-include/create-hub.png b/includes/media/virtual-wan-tutorial-hub-include/create-hub.png index 363adba902ae9..77dd9502a5373 100644 Binary files a/includes/media/virtual-wan-tutorial-hub-include/create-hub.png and b/includes/media/virtual-wan-tutorial-hub-include/create-hub.png differ diff --git a/includes/media/vpn-gateway-connection-settings-portal-include/configuration-expand.png b/includes/media/vpn-gateway-connection-settings-portal-include/configuration-expand.png new file mode 100644 index 0000000000000..75ecb360d7c1a Binary files /dev/null and b/includes/media/vpn-gateway-connection-settings-portal-include/configuration-expand.png differ diff --git a/includes/media/vpn-gateway-connection-settings-portal-include/configuration-portal.png b/includes/media/vpn-gateway-connection-settings-portal-include/configuration-portal.png new file mode 100644 index 0000000000000..6895e4126cad4 Binary files /dev/null and b/includes/media/vpn-gateway-connection-settings-portal-include/configuration-portal.png differ diff --git a/includes/media/vpn-gateway-reset-gw-portal/menu.png b/includes/media/vpn-gateway-reset-gw-portal/menu.png index a1de30337cb39..d45b3ff7471e3 100644 Binary files a/includes/media/vpn-gateway-reset-gw-portal/menu.png and b/includes/media/vpn-gateway-reset-gw-portal/menu.png differ diff --git a/includes/media/vpn-gateway-reset-gw-portal/reset-expand.png b/includes/media/vpn-gateway-reset-gw-portal/reset-expand.png new file mode 100644 index 0000000000000..6f4d72c5334b3 Binary files /dev/null and b/includes/media/vpn-gateway-reset-gw-portal/reset-expand.png differ diff --git a/includes/media/vpn-gateway-reset-gw-portal/reset.png b/includes/media/vpn-gateway-reset-gw-portal/reset.png index 6f4d72c5334b3..ca428d2fe990e 100644 Binary files a/includes/media/vpn-gateway-reset-gw-portal/reset.png and b/includes/media/vpn-gateway-reset-gw-portal/reset.png differ diff --git a/includes/quickstarts-free-trial-note.md b/includes/quickstarts-free-trial-note.md index e8fdd0f95dd69..e98fe8f2dfa08 100644 --- a/includes/quickstarts-free-trial-note.md +++ b/includes/quickstarts-free-trial-note.md @@ -5,4 +5,4 @@ ms.topic: include ms.date: 01/18/2022 ms.author: cfowler --- -If you don't have an [Azure subscription](/azure/guides/developer/azure-developer-guide#understanding-accounts-subscriptions-and-billing), create an [Azure free account](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin. +If you don't have an [Azure subscription](../articles/guides/developer/azure-developer-guide.md#understanding-accounts-subscriptions-and-billing), create an [Azure free account](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin. \ No newline at end of file diff --git a/includes/resource-graph/samples/bycat/azure-monitor.md b/includes/resource-graph/samples/bycat/azure-monitor.md index dc78a888699e8..7f22aa2031cc7 100644 --- a/includes/resource-graph/samples/bycat/azure-monitor.md +++ b/includes/resource-graph/samples/bycat/azure-monitor.md @@ -81,3 +81,13 @@ Search-AzGraph -Query "Resources | where type =~ 'Microsoft.Kubernetes/connected --- +### Returns all Azure Monitor alerts in a subscription in the last day + +```json +{ + "subscriptions": [ + + ], + "query": "alertsmanagementresources | where properties.essentials.lastModifiedDateTime > ago(1d) | project alertInstanceId = id, parentRuleId = tolower(tostring(properties['essentials']['alertRule'])), sourceId = properties['essentials']['sourceCreatedId'], alertName = name, severity = properties.essentials.severity, status = properties.essentials.monitorCondition, state = properties.essentials.alertState, affectedResource = properties.essentials.targetResourceName, monitorService = properties.essentials.monitorService, signalType = properties.essentials.signalType, firedTime = properties['essentials']['startDateTime'], lastModifiedDate = properties.essentials.lastModifiedDateTime, lastModifiedBy = properties.essentials.lastModifiedUserName" +} +``` diff --git a/includes/resource-graph/samples/bycat/microsoft-defender.md b/includes/resource-graph/samples/bycat/microsoft-defender.md index a8a7a7886105b..0f87be3e527cd 100644 --- a/includes/resource-graph/samples/bycat/microsoft-defender.md +++ b/includes/resource-graph/samples/bycat/microsoft-defender.md @@ -195,7 +195,7 @@ Search-AzGraph -Query "SecurityResources | where type =~ 'microsoft.security/iot ### List Container Registry vulnerability assessment results -Returns all the all the vulnerabilities found on container images. Microsoft Defender for Containers has to be enabled in order to view these security findings. +Returns all the vulnerabilities found on container images. Microsoft Defender for Containers has to be enabled in order to view these security findings. ```kusto SecurityResources diff --git a/includes/storage-files-redundancy-premium-zrs.md b/includes/storage-files-redundancy-premium-zrs.md index 4c955fb65e0a7..c7aa8cd22a908 100644 --- a/includes/storage-files-redundancy-premium-zrs.md +++ b/includes/storage-files-redundancy-premium-zrs.md @@ -18,4 +18,5 @@ ZRS for premium file shares is available for a subset of Azure regions: - (North America) East US - (North America) East US 2 - (North America) West US 2 +- (North America) South Central US - (South America) Brazil South diff --git a/includes/storage-files-sync-create-server-endpoint.md b/includes/storage-files-sync-create-server-endpoint.md index 289efcbb8dd15..1bf2d606c16ea 100644 --- a/includes/storage-files-sync-create-server-endpoint.md +++ b/includes/storage-files-sync-create-server-endpoint.md @@ -5,7 +5,7 @@ services: storage author: khdownie ms.service: storage ms.topic: include -ms.date: 6/01/2021 +ms.date: 6/02/2022 ms.author: kendownie ms.custom: include file, devx-track-azurecli ms.devlang: azurecli @@ -20,11 +20,24 @@ The **Add server endpoint** blade opens, enter the following information to crea - **Registered server**: The name of the server or cluster where you want to create the server endpoint. - **Path**: The Windows Server path to be synced as part of the sync group. -- **Cloud Tiering**: A switch to enable or disable cloud tiering. With cloud tiering, infrequently used or accessed files can be tiered to Azure Files. -- **Volume Free Space**: The amount of free space to reserve on the volume on which the server endpoint is located. For example, if volume free space is set to 50% on a volume that has only one server endpoint, roughly half the amount of data is tiered to Azure Files. Regardless of whether cloud tiering is enabled, your Azure file share always has a complete copy of the data in the sync group. -- **Initial download mode**: An optional selection, that can be helpful when there are files in the Azure file share but not on the server. Such a situation can exist, for instance, if you create a server endpoint to add another branch office server to a sync group or when you disaster-recover a failed server. If cloud tiering is enabled, the default is to only recall the namespace, no file content initially. That is useful if you believe that rather user access requests should decide what file content is recalled to the server. If cloud tiering is disabled, the default is that the namespace will download first and then files will be recalled based on last-modified timestamp until the local capacity has been reached. You can however change the initial download mode to namespace only. A third mode can only be used if cloud tiering is disabled for this server endpoint. This mode avoids recalling the namespace first. Files will only appear on the local server if they had a chance to fully download. This mode is useful if for instance an application requires full files to be present and cannot tolerate tiered files in its namespace. +- **Cloud Tiering**: A switch to enable or disable cloud tiering. With cloud tiering, infrequently used or accessed files can be tiered to Azure Files. When you enable cloud tiering, there are two policies that you can set to inform Azure File Sync when to tier cool files: the **Volume Free Space Policy** and the **Date Policy**. + - **Volume Free Space**: The amount of free space to reserve on the volume on which the server endpoint is located. For example, if volume free space is set to 50% on a volume that has only one server endpoint, roughly half the amount of data is tiered to Azure Files. Regardless of whether cloud tiering is enabled, your Azure file share always has a complete copy of the data in the sync group. + - **Date Policy**: Files are tiered to the cloud if they haven't been accessed (that is, read or written to) for the specified number of days. For example, if you noticed that files that have gone more than 15 days without being accessed are typically archival files, you should set your date policy to 15 days. +- **Initial Sync**: The Initial Sync section is available only for the first server endpoint in a sync group (section changes to Initial Download when creating more than one server endpoint in a sync group). Within the Initial Sync section, you can select the **Initial Upload** and **Initial Download** behavior. + - **Intial Upload**: You can select how the server initially uploads the data to the Azure file share: + - Option #1: Merge the content of this server path with the content in the Azure file share. Files with the same name and path will lead to conflicts if their content is different. Both versions of those files will be stored next to each other. If your server path or Azure file share are empty, always choose this option. + - Option #2: Authoritatively overwrite files and folders in the Azure file share with content in this server’s path. This option avoids file conflicts. + + To learn more, see [Intial sync](../articles/storage/file-sync/file-sync-server-endpoint-create.md#initial-sync-section). -To add the server endpoint, select **Create**. Your files are now kept in sync across your Azure file share and Windows Server. + - **Intial Download**: You can select how the server initially downloads the Azure file share data: + - Option #1: Download the namespace first and then recall the file content, as much as will fit on the local disk. + - Option #2: Download the namespace only. The file content will be recalled when accessed. + - Option #3: Avoid tiered files. Files will only appear on the server once they are fully downloaded. + + To learn more, see [Intial download](../articles/storage/file-sync/file-sync-server-endpoint-create.md#initial-download-section). + +To add the server endpoint, select **Create**. Your files are now kept in sync across your Azure file share and Windows Server. # [PowerShell](#tab/azure-powershell) Execute the following PowerShell commands to create the server endpoint, and be sure to replace ``, `` with the desired values and check the settings for the optional [initial download](../articles/storage/file-sync/file-sync-server-endpoint-create.md#initial-download-section) and [initial upload](../articles/storage/file-sync/file-sync-server-endpoint-create.md#initial-sync-section) policies. diff --git a/includes/storage-files-tiers-overview.md b/includes/storage-files-tiers-overview.md index 8ca4e7413e376..4456bae06c258 100644 --- a/includes/storage-files-tiers-overview.md +++ b/includes/storage-files-tiers-overview.md @@ -5,7 +5,7 @@ author: khdownie ms.service: storage ms.topic: include - ms.date: 08/28/2020 + ms.date: 06/02/2022 ms.author: kendownie ms.custom: include file --- @@ -22,6 +22,6 @@ When selecting a storage tier for your workload, consider your performance and u Once you've created a file share in a storage account, you cannot move it to tiers exclusive to different storage account kinds. For example, to move a transaction optimized file share to the premium tier, you must create a new file share in a FileStorage storage account and copy the data from your original share to a new file share in the FileStorage account. We recommend using AzCopy to copy data between Azure file shares, but you may also use tools like `robocopy` on Windows or `rsync` for macOS and Linux. -File shares deployed within GPv2 storage accounts can be moved between the standard tiers (transaction optimized, hot, and cool) without creating a new storage account and migrating data, but you will incur transaction costs when you change your tier. When you move a share from a hotter tier to a cooler tier, you will incur the cooler tier's write transaction charge for each file in the share. Moving a file share from a cooler tier to a hotter tier will incur the cool tier's read transaction charge for each file in the share. +File shares deployed within GPv2 storage accounts can be moved between the standard tiers (transaction optimized, hot, and cool) without creating a new storage account and migrating data, but you will incur transaction costs when you change your tier. When you move a share from a hotter tier to a cooler tier, you will incur the cooler tier's write transaction charge for each file in the share. Moving a file share from a cooler tier to a hotter tier will incur the cool tier's read transaction charge for each file in the share. In addition to the per file transaction charge, when you move from cool to hot or transaction optimized, you will incur a data retrieval charge based on the size of data moved. Only the cool tier has a data retrieval charge. See [Understanding Azure Files billing](../articles/storage/files/understanding-billing.md) for more information. \ No newline at end of file diff --git a/includes/stream-analytics-limits-table.md b/includes/stream-analytics-limits-table.md index 2a4b34d4e1cd8..2a677419f93f2 100644 --- a/includes/stream-analytics-limits-table.md +++ b/includes/stream-analytics-limits-table.md @@ -5,7 +5,7 @@ | Maximum number of inputs per job |60 |There's a hard limit of 60 inputs per Azure Stream Analytics job. | | Maximum number of outputs per job |60 |There's a hard limit of 60 outputs per Stream Analytics job. | | Maximum number of functions per job |60 |There's a hard limit of 60 functions per Stream Analytics job. | -| Maximum number of streaming units per job |192 |There's a hard limit of 192 streaming units per Stream Analytics job. | +| Maximum number of streaming units per job |396 |There's a hard limit of 396 streaming units per Stream Analytics job. | | Maximum number of jobs per region |1,500 |Each subscription can have up to 1,500 jobs per geographical region. | | Reference data blob MB | 5 GB | Up to 5 GB when using 6 SUs or more. | | Maximum number of characters in a query | 512000 | There's a hard limit of 512k characters in an Azure Stream Analytics job query.| diff --git a/includes/traffic-manager-limits.md b/includes/traffic-manager-limits.md index 62f852f5b188d..33131df53996c 100644 --- a/includes/traffic-manager-limits.md +++ b/includes/traffic-manager-limits.md @@ -2,15 +2,17 @@ title: include file description: include file services: -author: KumudD +author: greg-lindsay ms.service: ms.topic: include -ms.date: 05/09/2019 -ms.author: +ms.date: 06/07/2021 +ms.author: greglin ms.custom: include file --- | Resource | Limit | | --- | --- | -| Profiles per subscription |200 | +| Profiles per subscription |200 1 | | Endpoints per profile |200 | + +1If you need to increase these limits, contact Azure Support. \ No newline at end of file diff --git a/includes/verifiable-credentials-brand.md b/includes/verifiable-credentials-brand.md new file mode 100644 index 0000000000000..ae650f0033b59 --- /dev/null +++ b/includes/verifiable-credentials-brand.md @@ -0,0 +1,14 @@ +--- + title: include file + description: include file + author: barclayn + ms.service: decentralized-identity + ms.subservice: verifiable-credentials + ms.topic: include + ms.date: 05/19/2022 + ms.author: barclayn + ms.custom: include file +--- + +>[!NOTE] +> Azure Active Directory Verifiable Credentials is now Microsoft Entra Verified ID and part of the Microsoft Entra family of products. We'll be updating our documentation in the next few months as we move toward general availability. Learn more about the [Microsoft Entra family](https://aka.ms/EntraAnnouncement) of identity solutions and get started in the [unified Microsoft Entra admin center](https://entra.microsoft.com). diff --git a/includes/virtual-machines-common-network-overview.md b/includes/virtual-machines-common-network-overview.md index 02ca215f8fde8..4c44962666170 100644 --- a/includes/virtual-machines-common-network-overview.md +++ b/includes/virtual-machines-common-network-overview.md @@ -3,7 +3,7 @@ title: include file description: include file services: virtual-machines-windows author: cynthn -ms.service: virtual-machines-windows +ms.service: virtual-machines ms.topic: include ms.date: 11/01/2018 ms.author: cynthn diff --git a/includes/virtual-machines-common-nsg-quickstart.md b/includes/virtual-machines-common-nsg-quickstart.md index 7c4969ee1941d..488b34203c184 100644 --- a/includes/virtual-machines-common-nsg-quickstart.md +++ b/includes/virtual-machines-common-nsg-quickstart.md @@ -3,7 +3,7 @@ description: include file services: virtual-machines-windows author: cynthn - ms.service: virtual-machines-windows + ms.service: virtual-machines ms.topic: include ms.date: 09/12/2018 ms.author: cynthn diff --git a/includes/virtual-machines-common-ssh-overview.md b/includes/virtual-machines-common-ssh-overview.md index c5d19a9307a9f..b505748037a42 100644 --- a/includes/virtual-machines-common-ssh-overview.md +++ b/includes/virtual-machines-common-ssh-overview.md @@ -3,7 +3,7 @@ description: include file services: virtual-machines-linux author: cynthn - ms.service: virtual-machines-linux + ms.service: virtual-machines ms.topic: include ms.date: 07/09/2020 ms.author: cynthn diff --git a/includes/virtual-machines-common-ssh-support.md b/includes/virtual-machines-common-ssh-support.md index 8a3b33a5073f7..e11a6e74104ce 100644 --- a/includes/virtual-machines-common-ssh-support.md +++ b/includes/virtual-machines-common-ssh-support.md @@ -3,7 +3,7 @@ description: include file services: virtual-machines-linux author: cynthn - ms.service: virtual-machines-linux + ms.service: virtual-machines ms.topic: include ms.date: 04/17/2018 ms.author: cynthn diff --git a/includes/virtual-machines-disks-fast-attach-detach-regions.md b/includes/virtual-machines-disks-fast-attach-detach-regions.md new file mode 100644 index 0000000000000..2372962742ce4 --- /dev/null +++ b/includes/virtual-machines-disks-fast-attach-detach-regions.md @@ -0,0 +1,24 @@ +--- + title: include file + description: include file + services: virtual-machines + author: roygara + ms.service: virtual-machines + ms.topic: include + ms.date: 06/02/2022 + ms.author: rogarana + ms.custom: include file, references_regions +--- + +Lower latency is currently available in every public region except for: + +- Canada Central +- Central US +- East US +- East US 2 +- South Central US +- West US 2 +- Germany North +- Jio India West +- North Europe +- West Europe \ No newline at end of file diff --git a/includes/virtual-machines-disks-shared-limitations.md b/includes/virtual-machines-disks-shared-limitations.md index f406ae67df6ce..3f30a75a48bd3 100644 --- a/includes/virtual-machines-disks-shared-limitations.md +++ b/includes/virtual-machines-disks-shared-limitations.md @@ -21,8 +21,6 @@ When sharing ultra disks, they have the following additional limitations: - Only basic disks can be used with some versions of Windows Server Failover Cluster, for details see [Failover clustering hardware requirements and storage options](/windows-server/failover-clustering/clustering-requirements). - Only [server-side encryption](../articles/virtual-machines/disk-encryption.md) is supported, [Azure Disk Encryption](../articles/virtual-machines/windows/disk-encryption-overview.md) isn't currently supported. -Shared ultra disks are available in all regions that support ultra disks by default, and don't require you to sign up for access to use them. - ### Premium SSDs - Can only be enabled on data disks, not OS disks. @@ -35,10 +33,6 @@ Shared ultra disks are available in all regions that support ultra disks by defa - Azure Backup is available through [Azure Disk Backup](../articles/backup/disk-backup-overview.md). - Only [server-side encryption](../articles/virtual-machines/disk-encryption.md) is supported, [Azure Disk Encryption](../articles/virtual-machines/windows/disk-encryption-overview.md) isn't currently supported. -#### Premium SSD regional availability - -Shared disks on all premium SSD sizes are available in all regions that managed disks are available. - ### Standard SSDs @@ -49,8 +43,4 @@ Shared disks on all premium SSD sizes are available in all regions that managed - Only basic disks can be used with some versions of Windows Server Failover Cluster, for details see [Failover clustering hardware requirements and storage options](/windows-server/failover-clustering/clustering-requirements). - Azure Site Recovery support isn't yet available. - Azure Backup is available through [Azure Disk Backup](../articles/backup/disk-backup-overview.md). -- Only [server-side encryption](../articles/virtual-machines/disk-encryption.md) is supported, [Azure Disk Encryption](../articles/virtual-machines/windows/disk-encryption-overview.md) isn't currently supported. - -#### Standard SSD regional availability - -Shared disks on all standard SSD sizes are available in all regions that managed disks are available. +- Only [server-side encryption](../articles/virtual-machines/disk-encryption.md) is supported, [Azure Disk Encryption](../articles/virtual-machines/windows/disk-encryption-overview.md) isn't currently supported. \ No newline at end of file diff --git a/includes/virtual-machines-extensions-deprecation-statement.md b/includes/virtual-machines-extensions-deprecation-statement.md index 7f9b562e6ae09..af2d5f4ea8ec9 100644 --- a/includes/virtual-machines-extensions-deprecation-statement.md +++ b/includes/virtual-machines-extensions-deprecation-statement.md @@ -3,7 +3,7 @@ description: include file services: virtual-machines-linux author: cynthn - ms.service: virtual-machines-linux + ms.service: virtual-machines ms.topic: include file ms.date: 04/20/2018 ms.author: cynthn diff --git a/includes/virtual-machines-linux-lunzero.md b/includes/virtual-machines-linux-lunzero.md index d65a422f808fb..8044898c64cf6 100644 --- a/includes/virtual-machines-linux-lunzero.md +++ b/includes/virtual-machines-linux-lunzero.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-machines-linux-troubleshoot-deploy-vm-top.md b/includes/virtual-machines-linux-troubleshoot-deploy-vm-top.md index 0799de5ded747..0de885a3bbaf0 100644 --- a/includes/virtual-machines-linux-troubleshoot-deploy-vm-top.md +++ b/includes/virtual-machines-linux-troubleshoot-deploy-vm-top.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-machines-linux-troubleshoot-deployment-new-vm-table.md b/includes/virtual-machines-linux-troubleshoot-deployment-new-vm-table.md index 28bf6bba17b07..db1d26f9e81fc 100644 --- a/includes/virtual-machines-linux-troubleshoot-deployment-new-vm-table.md +++ b/includes/virtual-machines-linux-troubleshoot-deployment-new-vm-table.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-machines-linux-tutorial-wordpress.md b/includes/virtual-machines-linux-tutorial-wordpress.md index 25e3451528105..5341c08454e72 100644 --- a/includes/virtual-machines-linux-tutorial-wordpress.md +++ b/includes/virtual-machines-linux-tutorial-wordpress.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-machines-n-series-considerations.md b/includes/virtual-machines-n-series-considerations.md index ecc30218e4863..e035415168b85 100644 --- a/includes/virtual-machines-n-series-considerations.md +++ b/includes/virtual-machines-n-series-considerations.md @@ -3,7 +3,7 @@ description: include file services: virtual-machines-linux author: cynthn - ms.service: virtual-machines-linux + ms.service: virtual-machines ms.topic: include ms.date: 06/19/2018 ms.author: cynthn diff --git a/includes/virtual-machines-n-series-linux-support.md b/includes/virtual-machines-n-series-linux-support.md index ad9c3bb0b7ca4..3f1894a7d14c1 100644 --- a/includes/virtual-machines-n-series-linux-support.md +++ b/includes/virtual-machines-n-series-linux-support.md @@ -3,7 +3,7 @@ description: include file services: virtual-machines-linux author: cynthn - ms.service: virtual-machines-linux + ms.service: virtual-machines ms.topic: include ms.date: 11/15/2021 ms.author: cynthn diff --git a/includes/virtual-machines-n-series-windows-support.md b/includes/virtual-machines-n-series-windows-support.md index 091a5bb2118f9..80dc509e09bf2 100644 --- a/includes/virtual-machines-n-series-windows-support.md +++ b/includes/virtual-machines-n-series-windows-support.md @@ -3,7 +3,7 @@ description: include services: virtual-machines-windows author: cynthn - ms.service: virtual-machines-windows + ms.service: virtual-machines ms.topic: include ms.date: 02/11/2019 ms.author: cynthn @@ -22,8 +22,8 @@ NVIDIA Tesla (CUDA) drivers for NC, NCv2, NCv3, NCasT4_v3, ND, and NDv2-series V | OS | Driver | | -------- |------------- | -| Windows Server 2019 | [451.82](http://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | -| Windows Server 2016 | [451.82](http://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | +| Windows Server 2019 | [451.82](https://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | +| Windows Server 2016 | [451.82](https://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | ### NVIDIA GRID drivers diff --git a/includes/virtual-machines-oracle-applications-bastion.md b/includes/virtual-machines-oracle-applications-bastion.md index ce6062838ff47..1aed565649a30 100644 --- a/includes/virtual-machines-oracle-applications-bastion.md +++ b/includes/virtual-machines-oracle-applications-bastion.md @@ -1,6 +1,6 @@ --- author: dlepow -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 07/10/2019 ms.author: danlep diff --git a/includes/virtual-machines-oracle-applications-database.md b/includes/virtual-machines-oracle-applications-database.md index 90106e0d32019..9b540671ee034 100644 --- a/includes/virtual-machines-oracle-applications-database.md +++ b/includes/virtual-machines-oracle-applications-database.md @@ -1,6 +1,6 @@ --- author: dlepow -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 06/01/2020 ms.author: danlep diff --git a/includes/virtual-machines-oracle-applications-identity.md b/includes/virtual-machines-oracle-applications-identity.md index 8c78d386fb269..e2f553880a894 100644 --- a/includes/virtual-machines-oracle-applications-identity.md +++ b/includes/virtual-machines-oracle-applications-identity.md @@ -1,6 +1,6 @@ --- author: dlepow -ms.service: virtual-machines-linux +ms.service: virtual-machines ms.topic: include ms.date: 07/10/2019 ms.author: danlep diff --git a/includes/virtual-machines-share-images-across-tenants.md b/includes/virtual-machines-share-images-across-tenants.md index c84541cf6c2f9..3785a11d0fd78 100644 --- a/includes/virtual-machines-share-images-across-tenants.md +++ b/includes/virtual-machines-share-images-across-tenants.md @@ -24,11 +24,11 @@ Create an application registration that will be used by both tenants to share th 1. Select **New registration** from the menu at the top of the page. 1. In **Name**, type *myGalleryApp*. 1. In **Supported account types**, select **Accounts in any organizational directory and personal Microsoft accounts**. -1. In **Redirect URI**, type *https://www.microsoft.com* and then select **Register**. After the app registration has been created, the overview page will open. +1. In **Redirect URI**, select *Web* from the **Select a platform** dropdown and type *https://www.microsoft.com*, then select **Register**. After the app registration has been created, the overview page will open. 1. On the overview page, copy the **Application (client) ID** and save for use later. 1. Select **Certificates & secrets**, and then select **New client secret**. 1. In **Description**, type *Shared image gallery cross-tenant app secret*. -1. In **Expires**, leave the default of **In 1 year** and then select **Add**. +1. In **Expires**, change from the default of **6 months (recommended)** to **12 months** and then select **Add**. 1. Copy the value of the secret and save it to a safe place. You cannot retrieve it after you leave the page. @@ -37,7 +37,7 @@ Give the app registration permission to use the shared image gallery. 1. Select **select Access control (IAM)**, and under **Add role assignment** select *Add*. 1. Under **Role**, select **Reader**. 1. Under **Assign access to:**, leave this as **Azure AD user, group, or service principal**. -1. Under **Select**, type *myGalleryApp* and select it when it shows up in the list. When you are done, select **Save**. +1. Under **Select members**, type *myGalleryApp* and select it when it shows up in the list. When you are done, select **Review + assign**. ## Give Tenant 2 access @@ -53,7 +53,7 @@ In the [Azure portal](https://portal.azure.com) sign in as Tenant 2 and give the 1. Select the resource group and then select **Access control (IAM)**. Under **Add role assignment** select **Add**. 1. Under **Role**, type **Contributor**. 1. Under **Assign access to:**, leave this as **Azure AD user, group, or service principal**. -1. Under **Select** type *myGalleryApp* then select it when it shows up in the list. When you are done, select **Save**. +1. Under **Select members** type *myGalleryApp* then select it when it shows up in the list. When you are done, select **Review + assign**. > [!NOTE] > You need to wait for the image version to completely finish being built and replicated before you can use the same managed image to create another image version. diff --git a/includes/virtual-machines-windows-portal-sql-alwayson-ag-template.md b/includes/virtual-machines-windows-portal-sql-alwayson-ag-template.md index 64f957aa1eeae..ded1acf655abe 100644 --- a/includes/virtual-machines-windows-portal-sql-alwayson-ag-template.md +++ b/includes/virtual-machines-windows-portal-sql-alwayson-ag-template.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-windows +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-machines-windows-troubleshoot-deploy-vm-top.md b/includes/virtual-machines-windows-troubleshoot-deploy-vm-top.md index 173e2ba9edd4f..d55dacac789fe 100644 --- a/includes/virtual-machines-windows-troubleshoot-deploy-vm-top.md +++ b/includes/virtual-machines-windows-troubleshoot-deploy-vm-top.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-windows +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-machines-windows-troubleshoot-deployment-new-vm-table.md b/includes/virtual-machines-windows-troubleshoot-deployment-new-vm-table.md index 58402cb99ceb2..67f8bd2a87cd7 100644 --- a/includes/virtual-machines-windows-troubleshoot-deployment-new-vm-table.md +++ b/includes/virtual-machines-windows-troubleshoot-deployment-new-vm-table.md @@ -1,6 +1,6 @@ --- author: cynthn -ms.service: virtual-machines-windows +ms.service: virtual-machines ms.topic: include ms.date: 10/26/2018 ms.author: cynthn diff --git a/includes/virtual-wan-create-vwan-include.md b/includes/virtual-wan-create-vwan-include.md index 4c2ac48344ad1..88e28070aa2df 100644 --- a/includes/virtual-wan-create-vwan-include.md +++ b/includes/virtual-wan-create-vwan-include.md @@ -1,7 +1,7 @@ --- ms.author: cherylmc author: cherylmc -ms.date: 04/12/2022 +ms.date: 05/25/2022 ms.service: virtual-wan ms.topic: include diff --git a/includes/virtual-wan-empty-hub-include.md b/includes/virtual-wan-empty-hub-include.md index 2e38f5edd58a8..8e015c705bcd6 100644 --- a/includes/virtual-wan-empty-hub-include.md +++ b/includes/virtual-wan-empty-hub-include.md @@ -1,27 +1,29 @@ --- author: cherylmc ms.author: cherylmc -ms.date: 05/20/2022 +ms.date: 05/25/2022 ms.service: virtual-wan ms.topic: include #This include is used in multiple articles. Before modifying, verify that any changes apply to all articles that use this include. --- -1. Locate the Virtual WAN that you created. On the Virtual WAN page, under the **Connectivity** section, select **Hubs**. +1. Go to the virtual WAN that you created. On the virtual WAN page left pane, under the **Connectivity**, select **Hubs**. -1. On the **Hubs** page, click **+ New Hub** to open the **Create virtual hub** page. +1. On the **Hubs** page, select **+New Hub** to open the **Create virtual hub** page. :::image type="content" source="media/virtual-wan-empty-hub/new-hub.jpg" alt-text="Screenshot shows the Hubs configuration dialog box with New Hub selected."::: 1. On the **Basics** tab, fill in the values. - :::image type="content" source="media/virtual-wan-empty-hub/basics-hub.png" alt-text="Screenshot shows the Create virtual hub pane where you can enter values." lightbox= "media/virtual-wan-empty-hub/basics-hub.png"::: + :::image type="content" source="media/virtual-wan-empty-hub/basics.png" alt-text="Screenshot shows the Create virtual hub pane where you can enter values."::: - * **Region**: Select the region in which you want to deploy the virtual hub. + * **Region**: This setting was previously referred to as location. It's the region in which you want to create your virtual hub. * **Name**: The name by which you want the virtual hub to be known. - * **Hub private address space**: The hub's address range in CIDR notation. + * **Hub private address space**: The hub's address range in CIDR notation. The minimum address space is /24 to create a hub. * **Virtual hub capacity**: Select from the dropdown. For more information, see [Virtual hub settings](../articles/virtual-wan/hub-settings.md). + * **Hub routing preference**: This field is only available as part of the virtual hub routing preference preview and can only be viewed in the [preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). See [Virtual hub routing preference](../articles/virtual-wan/about-virtual-hub-routing-preference.md) for more information. + * **Router ASN**: Set the Autonomous System Number for the virtual hub router. You can use any ASN number except numbers that are reserved by [Azure or IANA](../articles/vpn-gateway/vpn-gateway-bgp-overview.md#what-asns-autonomous-system-numbers-can-i-use). 1. Click **Review + create**. diff --git a/includes/virtual-wan-p2s-hub-include.md b/includes/virtual-wan-p2s-hub-include.md index 4ccbf2ab15b4f..1c958bd955d51 100644 --- a/includes/virtual-wan-p2s-hub-include.md +++ b/includes/virtual-wan-p2s-hub-include.md @@ -1,12 +1,14 @@ --- ms.author: cherylmc author: cherylmc -ms.date: 05/20/2022 +ms.date: 05/25/2022 ms.service: virtual-wan ms.topic: include --- -1. On the page for your **virtual WAN**, on the left pane, select **Hubs**. On the **Hubs** page, select **+New Hub**. +1. Go to the virtual WAN that you created. On the virtual WAN page left pane, under the **Connectivity**, select **Hubs**. + +1. On the **Hubs** page, select **+New Hub** to open the **Create virtual hub** page. :::image type="content" source="media/virtual-wan-p2s-hub/new-hub.png" alt-text="Screenshot of new hub."::: @@ -16,10 +18,12 @@ ms.topic: include 1. On the **Basics** tab, configure the following settings: - * **Region**: Select the region in which you want to deploy the virtual hub. + * **Region**: This setting was previously referred to as location. It's the region in which you want to create your virtual hub. * **Name**: The name by which you want the virtual hub to be known. - * **Hub private address space**: The hub's address range in CIDR notation. + * **Hub private address space**: The hub's address range in CIDR notation. The minimum address space is /24 to create a hub. * **Virtual hub capacity**: Select from the dropdown. For more information, see [Virtual hub settings](../articles/virtual-wan/hub-settings.md). + * **Hub routing preference**: This field is only available as part of the virtual hub routing preference preview and can only be viewed in the [preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). See [Virtual hub routing preference](../articles/virtual-wan/about-virtual-hub-routing-preference.md) for more information. + * **Router ASN**: Set the Autonomous System Number for the virtual hub router. You can use any ASN number except numbers that are reserved by [Azure or IANA](../articles/vpn-gateway/vpn-gateway-bgp-overview.md#what-asns-autonomous-system-numbers-can-i-use). 1. Click the **Point to site** tab to open the configuration page for point-to-site. To view the point to site settings, click **Yes**. @@ -40,11 +44,11 @@ ms.topic: include After you create the User VPN gateway, go to gateway and note the RADIUS proxy IPs field. The RADIUS proxy IPs are the source IPs of the RADIUS packets the User VPN gateway sends to your RADIUS server. Therefore, your RADIUS server needs to be configured to accept authentication requests from the RADIUS proxy IPs. If the RADIUS proxy IPs field is blank or none, configure the RADIUS server to accept authentication requests from the hub's address space. - :::image type="content" source="media/virtual-wan-p2s-hub/radius-proxy-ips.png" alt-text="Screenshot of User V P N Config with RADIUS Proxy I P's." lightbox="media/virtual-wan-p2s-hub/radius-proxy-ips.png"::: + :::image type="content" source="media/virtual-wan-p2s-hub/radius-proxy-ips.png" alt-text="Screenshot of User V P N Config with RADIUS Proxy I Ps." lightbox="media/virtual-wan-p2s-hub/radius-proxy-ips.png"::: * **Client address pool** - The address pool from which IP addresses will be automatically assigned to VPN clients. For more information, see [About client address pools](../articles/virtual-wan/about-client-address-pools.md). * **Custom DNS Servers** - The IP address of the DNS server(s) the clients will use. You can specify up to 5. 1. Select **Review + create** to validate your settings. -1. When validation passes, select **Create**. Creating a hub can take 30 minutes or more to complete. +1. When validation passes, select **Create**. Creating a hub can take 30 minutes or more to complete. \ No newline at end of file diff --git a/includes/virtual-wan-tutorial-er-hub-include.md b/includes/virtual-wan-tutorial-er-hub-include.md index cd8c6bbf589a5..fc64c83ce13c1 100644 --- a/includes/virtual-wan-tutorial-er-hub-include.md +++ b/includes/virtual-wan-tutorial-er-hub-include.md @@ -1,11 +1,11 @@ --- ms.author: cherylmc author: cherylmc -ms.date: 05/20/2022 +ms.date: 05/25/2022 ms.service: virtual-wan ms.topic: include --- -1. Locate the virtual WAN that you created. On the virtual WAN page left pane, under the **Connectivity**, select **Hubs**. +1. Go to the virtual WAN that you created. On the virtual WAN page left pane, under the **Connectivity**, select **Hubs**. 1. On the **Hubs** page, select **+New Hub** to open the **Create virtual hub** page. @@ -15,8 +15,10 @@ ms.topic: include * **Region**: This setting was previously referred to as location. It's the region in which you want to create your virtual hub. * **Name**: The name by which you want the virtual hub to be known. - * **Hub private address space**: The minimum address space is /24 to create a hub. + * **Hub private address space**: The hub's address range in CIDR notation. The minimum address space is /24 to create a hub. * **Virtual hub capacity**: Select from the dropdown. For more information, see [Virtual hub settings](../articles/virtual-wan/hub-settings.md). + * **Hub routing preference**: This field is only available as part of the virtual hub routing preference preview and can only be viewed in the [preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). See [Virtual hub routing preference](../articles/virtual-wan/about-virtual-hub-routing-preference.md) for more information. + * **Router ASN**: Set the Autonomous System Number for the virtual hub router. You can use any ASN number except numbers that are reserved by [Azure or IANA](../articles/vpn-gateway/vpn-gateway-bgp-overview.md#what-asns-autonomous-system-numbers-can-i-use). 1. Select the **ExpressRoute tab**. Click **Yes** to reveal settings and fill out the field. For information about gateway scale units, see the [FAQ](../articles/virtual-wan/virtual-wan-faq.md#what-are-virtual-wan-gateway-scale-units). diff --git a/includes/virtual-wan-tutorial-s2s-hub-include.md b/includes/virtual-wan-tutorial-s2s-hub-include.md index b0a832b7ba9f4..42a91cd22874a 100644 --- a/includes/virtual-wan-tutorial-s2s-hub-include.md +++ b/includes/virtual-wan-tutorial-s2s-hub-include.md @@ -1,12 +1,12 @@ --- ms.author: cherylmc author: cherylmc -ms.date: 05/20/2022 +ms.date: 05/25/2022 ms.service: virtual-wan ms.topic: include --- -1. Locate the virtual WAN that you created. On the virtual WAN page left pane, under the **Connectivity**, select **Hubs**. +1. Go to the virtual WAN that you created. On the virtual WAN page left pane, under the **Connectivity**, select **Hubs**. 1. On the **Hubs** page, select **+New Hub** to open the **Create virtual hub** page. @@ -14,7 +14,9 @@ ms.topic: include 1. On the **Create virtual hub** page **Basics** tab, complete the following fields: - * **Region**: This setting was previously referred to as location. It's the region in which you want to create your virtual hub. + * **Region**: Select the region in which you want to deploy the virtual hub. * **Name**: The name by which you want the virtual hub to be known. - * **Hub private address space**: The minimum address space is /24 to create a hub. + * **Hub private address space**: The hub's address range in CIDR notation. The minimum address space is /24 to create a hub. * **Virtual hub capacity**: Select from the dropdown. For more information, see [Virtual hub settings](../articles/virtual-wan/hub-settings.md). + * **Hub routing preference**: This field is only available as part of the virtual hub routing preference preview and can only be viewed in the [preview portal](https://portal.azure.com/?feature.customRouterAsn=true&feature.virtualWanRoutingPreference=true#home). See [Virtual hub routing preference](../articles/virtual-wan/about-virtual-hub-routing-preference.md) for more information. + * **Router ASN**: Set the Autonomous System Number for the virtual hub router. You can use any ASN number except numbers that are reserved by [Azure or IANA](../articles/vpn-gateway/vpn-gateway-bgp-overview.md#what-asns-autonomous-system-numbers-can-i-use). diff --git a/includes/vpn-gateway-add-site-to-site-connection-portal-include.md b/includes/vpn-gateway-add-site-to-site-connection-portal-include.md index 1ba05fa20a2d0..7628daf4e0e2c 100644 --- a/includes/vpn-gateway-add-site-to-site-connection-portal-include.md +++ b/includes/vpn-gateway-add-site-to-site-connection-portal-include.md @@ -1,15 +1,11 @@ --- - title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 04/29/2022 + ms.date: 05/26/2022 ms.author: cherylmc - ms.custom: include file --- -1. Go to your virtual network. On your VNet page, click **Connected devices** on the left. Locate your VPN gateway and click to open it. +1. Go to your virtual network. On your VNet page, select **Connected devices** on the left. Locate your VPN gateway and click to open it. 1. On the page for the gateway, select **Connections**. At the top of the Connections page, select **+Add** to open the **Add connection** page. :::image type="content" source="./media/vpn-gateway-add-site-to-site-connection-portal-include/connection.png" alt-text="Screenshot of Add Connection page." lightbox="./media/vpn-gateway-add-site-to-site-connection-portal-include/connection-expand.png" ::: @@ -27,4 +23,4 @@ * The remaining values for **Subscription**, **Resource Group**, and **Location** are fixed. 1. Select **OK** to create your connection. You'll see *Creating Connection* flash on the screen. -1. You can view the connection in the **Connections** page of the virtual network gateway. The Status will go from *Unknown* to *Connecting*, and then to *Succeeded*. +1. You can view the connection in the **Connections** page of the virtual network gateway. The Status will go from *Unknown* to *Connecting*, and then to *Succeeded*. \ No newline at end of file diff --git a/includes/vpn-gateway-connection-settings-portal-include.md b/includes/vpn-gateway-connection-settings-portal-include.md new file mode 100644 index 0000000000000..6dc953d2bcacb --- /dev/null +++ b/includes/vpn-gateway-connection-settings-portal-include.md @@ -0,0 +1,14 @@ +--- + author: cherylmc + ms.service: vpn-gateway + ms.topic: include + ms.date: 05/26/2022 + ms.author: cherylmc +--- +1. Go to your virtual network gateway and select **Connections** to open the Connections page. +1. Select the name of the connection you want to configure to open the **Connection** page. +1. On the Connection page left side, select **Configuration** to open the Configuration page. Make any necessary changes, then **Save**. + + In the following screenshot, for demonstration purposes, we've enabled all the settings to show you the configuration settings available in the portal. When you configure your connections, only configure the settings that you require. Otherwise, leave the default settings in place. + + :::image type="content" source="./media/vpn-gateway-connection-settings-portal-include/configuration-portal.png" alt-text="Screenshot of the connection page showing additional connection settings." lightbox="./media/vpn-gateway-connection-settings-portal-include/configuration-expand.png"::: diff --git a/includes/vpn-gateway-cross-premises-include.md b/includes/vpn-gateway-cross-premises-include.md index 483c712d78d7d..be49333a31e2e 100644 --- a/includes/vpn-gateway-cross-premises-include.md +++ b/includes/vpn-gateway-cross-premises-include.md @@ -2,13 +2,13 @@ services: vpn-gateway author: cherylmc ms.topic: include - ms.date: 04/22/2022 + ms.date: 06/08/2022 ms.author: cherylmc --- | | **Point-to-Site** | **Site-to-Site** | **ExpressRoute** | | --- | --- | --- | --- | | **Azure Supported Services** |Cloud Services and Virtual Machines |Cloud Services and Virtual Machines |[Services list](../articles/expressroute/expressroute-faqs.md#supported-services) | -| **Typical Bandwidths** |Based on the gateway SKU |Typically < 1 Gbps aggregate |50 Mbps, 100 Mbps, 200 Mbps, 500 Mbps, 1 Gbps, 2 Gbps, 5 Gbps, 10 Gbps | +| **Typical Bandwidths** |Based on the gateway SKU |Typically < 10 Gbps aggregate |50 Mbps, 100 Mbps, 200 Mbps, 500 Mbps, 1 Gbps, 2 Gbps, 5 Gbps, 10 Gbps, 100 Gbps | | **Protocols Supported** |Secure Sockets Tunneling Protocol (SSTP), OpenVPN and IPsec |IPsec |Direct connection over VLANs, NSP's VPN technologies (MPLS, VPLS,...) | | **Routing** |RouteBased (dynamic) |We support PolicyBased (static routing) and RouteBased (dynamic routing VPN) |BGP | | **Connection resiliency** |active-passive |active-passive or active-active |active-active | diff --git a/includes/vpn-gateway-faq-bgp-include.md b/includes/vpn-gateway-faq-bgp-include.md index 5a3a439f9d65e..4eea63a1ca713 100644 --- a/includes/vpn-gateway-faq-bgp-include.md +++ b/includes/vpn-gateway-faq-bgp-include.md @@ -1,13 +1,9 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 03/22/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- ### Is BGP supported on all Azure VPN Gateway SKUs? @@ -63,7 +59,7 @@ Your on-premises BGP peer address must not be the same as the public IP address ### Can I use the same ASN for both on-premises VPN networks and Azure virtual networks? -No, you must assign different ASNs between your on-premises networks and your Azure virtual networks if you're connecting them together with BGP. Azure VPN gateways have a default ASN of 65515 assigned, whether BGP is enabled or not for your cross-premises connectivity. You can override this default by assigning a different ASN when you're creating the VPN gateway, or you can change the ASN after the gateway is created. You will need to assign your on-premises ASNs to the corresponding Azure local network gateways. +No, you must assign different ASNs between your on-premises networks and your Azure virtual networks if you're connecting them together with BGP. Azure VPN gateways have a default ASN of 65515 assigned, whether BGP is enabled or not for your cross-premises connectivity. You can override this default by assigning a different ASN when you're creating the VPN gateway, or you can change the ASN after the gateway is created. You'll need to assign your on-premises ASNs to the corresponding Azure local network gateways. ### What address prefixes will Azure VPN gateways advertise to me? diff --git a/includes/vpn-gateway-faq-ipsecikepolicy-include.md b/includes/vpn-gateway-faq-ipsecikepolicy-include.md index 02c3d747fe5f1..64625d7f1f121 100644 --- a/includes/vpn-gateway-faq-ipsecikepolicy-include.md +++ b/includes/vpn-gateway-faq-ipsecikepolicy-include.md @@ -1,13 +1,9 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 03/23/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- ### Is Custom IPsec/IKE policy supported on all Azure VPN Gateway SKUs? @@ -19,7 +15,7 @@ You can only specify ***one*** policy combination for a given connection. ### Can I specify a partial policy on a connection? (for example, only IKE algorithms, but not IPsec) -No, you must specify all algorithms and parameters for both IKE (Main Mode) and IPsec (Quick Mode). Partial policy specification is not allowed. +No, you must specify all algorithms and parameters for both IKE (Main Mode) and IPsec (Quick Mode). Partial policy specification isn't allowed. ### What are the algorithms and key strengths supported in the custom policy? @@ -38,7 +34,7 @@ The following table lists the supported cryptographic algorithms and key strengt | | | > [!IMPORTANT] -> * DHGroup2048 & PFS2048 are the same as Diffie-Hellman Group **14** in IKE and IPsec PFS. See [Diffie-Hellman Groups](#DH) for the complete mappings. +> * DHGroup2048 & PFS2048 are the same as Diffie-Hellman Group **14** in IKE and IPsec PFS. See [Diffie-Hellman Groups](#DH) for the complete mappings. > * For GCMAES algorithms, you must specify the same GCMAES algorithm and key length for both IPsec Encryption and Integrity. > * IKEv2 Main Mode SA lifetime is fixed at 28,800 seconds on the Azure VPN gateways. > * QM SA Lifetimes are optional parameters. If none was specified, default values of 27,000 seconds (7.5 hrs) and 102400000 KBytes (102GB) are used. @@ -56,7 +52,7 @@ Your on-premises VPN device configuration must match or contain the following al * PFS Group * Traffic Selector (*) -The SA lifetimes are local specifications only, do not need to match. +The SA lifetimes are local specifications only, don't need to match. If you enable **UsePolicyBasedTrafficSelectors**, you need to ensure your VPN device has the matching traffic selectors defined with all combinations of your on-premises network (local network gateway) prefixes to/from the Azure virtual network prefixes, instead of any-to-any. For example, if your on-premises network prefixes are 10.1.0.0/16 and 10.2.0.0/16, and your virtual network prefixes are 192.168.0.0/16 and 172.16.0.0/16, you need to specify the following traffic selectors: * 10.1.0.0/16 <====> 192.168.0.0/16 @@ -116,24 +112,24 @@ No. IPsec/IKE policy only works on S2S VPN and VNet-to-VNet connections via the ### How do I create connections with IKEv1 or IKEv2 protocol type? -IKEv1 connections can be created on all RouteBased VPN type SKUs, except the Basic SKU, Standard SKU, and other [legacy SKUs](../articles/vpn-gateway/vpn-gateway-about-skus-legacy.md#gwsku). You can specify a connection protocol type of IKEv1 or IKEv2 while creating connections. If you do not specify a connection protocol type, IKEv2 is used as default option where applicable. For more information, see the [PowerShell cmdlet](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) documentation. For SKU types and IKEv1/IKEv2 support, see [Connect gateways to policy-based VPN devices](../articles/vpn-gateway/vpn-gateway-connect-multiple-policybased-rm-ps.md). +IKEv1 connections can be created on all RouteBased VPN type SKUs, except the Basic SKU, Standard SKU, and other [legacy SKUs](../articles/vpn-gateway/vpn-gateway-about-skus-legacy.md#gwsku). You can specify a connection protocol type of IKEv1 or IKEv2 while creating connections. If you don't specify a connection protocol type, IKEv2 is used as default option where applicable. For more information, see the [PowerShell cmdlet](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) documentation. For SKU types and IKEv1/IKEv2 support, see [Connect gateways to policy-based VPN devices](../articles/vpn-gateway/vpn-gateway-connect-multiple-policybased-rm-ps.md). -### Is transit between between IKEv1 and IKEv2 connections allowed? +### Is transit between IKEv1 and IKEv2 connections allowed? Yes. Transit between IKEv1 and IKEv2 connections is supported. ### Can I have IKEv1 site-to-site connections on Basic SKUs of RouteBased VPN type? -No. The Basic SKU does not support this. +No. The Basic SKU doesn't support this. ### Can I change the connection protocol type after the connection is created (IKEv1 to IKEv2 and vice versa)? -No. Once the connection is created, IKEv1/IKEv2 protocols cannot be changed. You must delete and recreate a new connection with the desired protocol type. +No. Once the connection is created, IKEv1/IKEv2 protocols can't be changed. You must delete and recreate a new connection with the desired protocol type. ### Why is my IKEv1 connection frequently reconnecting? -If your static routing or route based IKEv1 connection is disconnecting at routine intervals, it is likely due to VPN gateways not supporting in-place rekeys. When Main mode is getting rekeyed, your IKEv1 tunnels will disconnect and take up to 5 seconds to reconnect. Your Main mode negotiation time out value will determine the frequency of rekeys. To prevent these reconnects, you can switch to using IKEv2, which supports in-place rekeys. +If your static routing or route based IKEv1 connection is disconnecting at routine intervals, it's likely due to VPN gateways not supporting in-place rekeys. When Main mode is getting rekeyed, your IKEv1 tunnels will disconnect and take up to 5 seconds to reconnect. Your Main mode negotiation time out value will determine the frequency of rekeys. To prevent these reconnects, you can switch to using IKEv2, which supports in-place rekeys. -If your connection is reconnecting at random times, follow our [troubleshooting guide](../articles/vpn-gateway/vpn-gateway-troubleshoot-site-to-site-disconnected-intermittently.md). +If your connection is reconnecting at random times, follow our [troubleshooting guide](../articles/vpn-gateway/vpn-gateway-troubleshoot-site-to-site-disconnected-intermittently.md). ### Where can I find more configuration information for IPsec? diff --git a/includes/vpn-gateway-faq-nat-include.md b/includes/vpn-gateway-faq-nat-include.md index 1394d62271462..0313276d88f3f 100644 --- a/includes/vpn-gateway-faq-nat-include.md +++ b/includes/vpn-gateway-faq-nat-include.md @@ -19,7 +19,7 @@ You can create up to 100 NAT rules (Ingress and Egress rules combined) on a VPN ### Is NAT applied to all connections on a VPN gateway? -NAT is applied to the connections with NAT rules. If a connection does not have a NAT rule, NAT will not take effect on that connection. On the same VPN gateway, you can have some connections with NAT, and other connections without NAT working together. +NAT is applied to the connections with NAT rules. If a connection doesn't have a NAT rule, NAT won't take effect on that connection. On the same VPN gateway, you can have some connections with NAT, and other connections without NAT working together. ### What types of NAT is supported on Azure VPN gateways? @@ -69,8 +69,8 @@ Yes, you can create multiple EgressSNAT rules for the same VNet address space, a ### Can I use the same IngressSNAT rule on different connections? -Yes, this is typically used when the connections are for the same on-premises network to provide redundancy. You cannot use the same Ingress rule if the connections are for different on-premises networks. +Yes, this is typically used when the connections are for the same on-premises network to provide redundancy. You can't use the same Ingress rule if the connections are for different on-premises networks. ### Do I need both Ingress and Egress rules on a NAT connection? -You need both Ingress and Egress rules on the same connection when the on-premise network address space overlaps with the VNet address space. If the VNet address space is unique among all connected networks, you do not need the EgressSNAT rule on those connections. You can use the Ingress rules to avoid address overlap among the on-premises networks. +You need both Ingress and Egress rules on the same connection when the on-premise network address space overlaps with the VNet address space. If the VNet address space is unique among all connected networks, you don't need the EgressSNAT rule on those connections. You can use the Ingress rules to avoid address overlap among the on-premises networks. diff --git a/includes/vpn-gateway-faq-p2s-all-include.md b/includes/vpn-gateway-faq-p2s-all-include.md index 0887f711e4bc1..d4a7f56630632 100644 --- a/includes/vpn-gateway-faq-p2s-all-include.md +++ b/includes/vpn-gateway-faq-p2s-all-include.md @@ -1,19 +1,16 @@ --- - title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 2/10/2022 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file, devx-track-azurepowershell + ms.custom: devx-track-azurepowershell --- -### How many VPN client endpoints can I have in my Point-to-Site configuration? +### How many VPN client endpoints can I have in my point-to-site configuration? It depends on the gateway SKU. For more information on the number of connections supported, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). -### What client operating systems can I use with Point-to-Site? +### What client operating systems can I use with point-to-site? The following client operating systems are supported: @@ -25,14 +22,14 @@ The following client operating systems are supported: * Windows Server 2019 (64-bit only) * Windows Server 2022 (64-bit only) * Windows 10 -* Windows 11 +* Windows 11 * macOS version 10.11 or above * Linux (StrongSwan) * iOS [!INCLUDE [TLS](vpn-gateway-tls-updates.md)] -### Can I traverse proxies and firewalls using Point-to-Site capability? +### Can I traverse proxies and firewalls using point-to-site capability? Azure supports three types of Point-to-site VPN options: @@ -40,45 +37,46 @@ Azure supports three types of Point-to-site VPN options: * OpenVPN. OpenVPN is a SSL-based solution that can penetrate firewalls since most firewalls open the outbound TCP port that 443 SSL uses. -* IKEv2 VPN. IKEv2 VPN is a standards-based IPsec VPN solution that uses outbound UDP ports 500 and 4500 and IP protocol no. 50. Firewalls do not always open these ports, so there is a possibility of IKEv2 VPN not being able to traverse proxies and firewalls. +* IKEv2 VPN. IKEv2 VPN is a standards-based IPsec VPN solution that uses outbound UDP ports 500 and 4500 and IP protocol no. 50. Firewalls don't always open these ports, so there's a possibility of IKEv2 VPN not being able to traverse proxies and firewalls. -### If I restart a client computer configured for Point-to-Site, will the VPN automatically reconnect? +### If I restart a client computer configured for point-to-site, will the VPN automatically reconnect? Auto-reconnect is a function of the client being used. Windows supports auto-reconnect by configuring the **Always On VPN** client feature. -### Does Point-to-Site support DDNS on the VPN clients? +### Does point-to-site support DDNS on the VPN clients? -DDNS is currently not supported in Point-to-Site VPNs. +DDNS is currently not supported in point-to-site VPNs. -### Can I have Site-to-Site and Point-to-Site configurations coexist for the same virtual network? +### Can I have Site-to-Site and point-to-site configurations coexist for the same virtual network? -Yes. For the Resource Manager deployment model, you must have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We do not support Point-to-Site for static routing VPN gateways or PolicyBased VPN gateways. +Yes. For the Resource Manager deployment model, you must have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We don't support point-to-site for static routing VPN gateways or PolicyBased VPN gateways. -### Can I configure a Point-to-Site client to connect to multiple virtual network gateways at the same time? +### Can I configure a point-to-site client to connect to multiple virtual network gateways at the same time? -Depending on the VPN Client software used, you may be able to connect to multiple Virtual Network Gateways provided the virtual networks being connected to do not have conflicting address spaces between them or the network from with the client is connecting from. While the Azure VPN Client supports many VPN connections, only one connection can be Connected at any given time. +Depending on the VPN Client software used, you may be able to connect to multiple Virtual Network Gateways provided the virtual networks being connected to don't have conflicting address spaces between them or the network from with the client is connecting from. While the Azure VPN Client supports many VPN connections, only one connection can be Connected at any given time. -### Can I configure a Point-to-Site client to connect to multiple virtual networks at the same time? +### Can I configure a point-to-site client to connect to multiple virtual networks at the same time? -Yes, Point-to-Site client connections to a virtual network gateway that is deployed in a VNet which is peered with other VNets may have access to other peered VNets. Point-to-Site clients will be able to connect to peered VNets as long as the peered VNets are using the UseRemoteGateway / AllowGatewayTransit features. For more information, see [About Point-to-Site routing](../articles/vpn-gateway/vpn-gateway-about-point-to-site-routing.md). +Yes, point-to-site client connections to a virtual network gateway that is deployed in a VNet that is peered with other VNets may have access to other peered VNets. point-to-site clients will be able to connect to peered VNets as long as the peered VNets are using the UseRemoteGateway / AllowGatewayTransit features. For more information, see [About point-to-site routing](../articles/vpn-gateway/vpn-gateway-about-point-to-site-routing.md). -### How much throughput can I expect through Site-to-Site or Point-to-Site connections? +### How much throughput can I expect through Site-to-Site or point-to-site connections? -It's difficult to maintain the exact throughput of the VPN tunnels. IPsec and SSTP are crypto-heavy VPN protocols. Throughput is also limited by the latency and bandwidth between your premises and the Internet. For a VPN Gateway with only IKEv2 Point-to-Site VPN connections, the total throughput that you can expect depends on the Gateway SKU. For more information on throughput, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). +It's difficult to maintain the exact throughput of the VPN tunnels. IPsec and SSTP are crypto-heavy VPN protocols. Throughput is also limited by the latency and bandwidth between your premises and the Internet. For a VPN Gateway with only IKEv2 point-to-site VPN connections, the total throughput that you can expect depends on the Gateway SKU. For more information on throughput, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). -### Can I use any software VPN client for Point-to-Site that supports SSTP and/or IKEv2? +### Can I use any software VPN client for point-to-site that supports SSTP and/or IKEv2? No. You can only use the native VPN client on Windows for SSTP, and the native VPN client on Mac for IKEv2. However, you can use the OpenVPN client on all platforms to connect over OpenVPN protocol. Refer to the list of [supported client operating systems](#supportedclientos). -### Can I change the authentication type for a Point-to-Site connection? +### Can I change the authentication type for a point-to-site connection? -Yes. In the portal, navigate to the **VPN gateway -> Point-to-site configuration** page. For **Authentication type**, select the authentication types that you want to use . Please note that after you make a change to an authentication type, current clients may not be able to connect until a new VPN client configuration profile has been generated, downloaded, and applied to each VPN client. +Yes. In the portal, navigate to the **VPN gateway -> Point-to-site configuration** page. For **Authentication type**, select the authentication types that you want to use. Note that after you make a change to an authentication type, current clients may not be able to connect until a new VPN client configuration profile has been generated, downloaded, and applied to each VPN client. ### Does Azure support IKEv2 VPN with Windows? -IKEv2 is supported on Windows 10 and Server 2016. However, in order to use IKEv2 in certain OS versions, you must install updates and set a registry key value locally. Note that OS versions prior to Windows 10 are not supported and can only use SSTP or **OpenVPN® Protocol**. +IKEv2 is supported on Windows 10 and Server 2016. However, in order to use IKEv2 in certain OS versions, you must install updates and set a registry key value locally. OS versions prior to Windows 10 aren't supported and can only use SSTP or **OpenVPN® Protocol**. -> NOTE: Windows OS builds newer than Windows 10 Version 1709 and Windows Server 2016 Version 1607 do not require these steps. +> [!NOTE] +> Windows OS builds newer than Windows 10 Version 1709 and Windows Server 2016 Version 1607 do not require these steps. To prepare Windows 10 or Server 2016 for IKEv2: @@ -100,7 +98,7 @@ The traffic selectors limit in Windows determines the maximum number of address ### What happens when I configure both SSTP and IKEv2 for P2S VPN connections? -When you configure both SSTP and IKEv2 in a mixed environment (consisting of Windows and Mac devices), the Windows VPN client will always try IKEv2 tunnel first, but will fall back to SSTP if the IKEv2 connection is not successful. MacOSX will only connect via IKEv2. +When you configure both SSTP and IKEv2 in a mixed environment (consisting of Windows and Mac devices), the Windows VPN client will always try IKEv2 tunnel first, but will fall back to SSTP if the IKEv2 connection isn't successful. MacOSX will only connect via IKEv2. ### Other than Windows and Mac, which other platforms does Azure support for P2S VPN? @@ -108,7 +106,7 @@ Azure supports Windows, Mac, and Linux for P2S VPN. ### I already have an Azure VPN Gateway deployed. Can I enable RADIUS and/or IKEv2 VPN on it? -Yes, if the gateway SKU that you are using supports RADIUS and/or IKEv2, you can enable these features on gateways that you've already deployed by using PowerShell or the Azure portal. Note that the Basic SKU does not support RADIUS or IKEv2. +Yes, if the gateway SKU that you're using supports RADIUS and/or IKEv2, you can enable these features on gateways that you've already deployed by using PowerShell or the Azure portal. The Basic SKU doesn't support RADIUS or IKEv2. ### How do I remove the configuration of a P2S connection? diff --git a/includes/vpn-gateway-faq-p2s-azurecert-include.md b/includes/vpn-gateway-faq-p2s-azurecert-include.md index 2e410896a679b..cd8944931ccf2 100644 --- a/includes/vpn-gateway-faq-p2s-azurecert-include.md +++ b/includes/vpn-gateway-faq-p2s-azurecert-include.md @@ -1,13 +1,9 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/28/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- [!INCLUDE [P2S FAQ All](vpn-gateway-faq-p2s-all-include.md)] @@ -15,7 +11,7 @@ Uncheck **"Verify the server's identity by validating the certificate"** or **add the server FQDN along with the certificate** when creating a profile manually. You can do this by running **rasphone** from a command prompt and picking the profile from the drop-down list. -Bypassing server identity validation is not recommended in general, but with Azure certificate authentication, the same certificate is being used for server validation in the VPN tunneling protocol (IKEv2/SSTP) and the EAP protocol. Since the server certificate and FQDN is already validated by the VPN tunneling protocol, it is redundant to validate the same again in EAP. +Bypassing server identity validation isn't recommended in general, but with Azure certificate authentication, the same certificate is being used for server validation in the VPN tunneling protocol (IKEv2/SSTP) and the EAP protocol. Since the server certificate and FQDN is already validated by the VPN tunneling protocol, it's redundant to validate the same again in EAP. ![point-to-site auth](./media/vpn-gateway-faq-p2s-all-include/servercert.png "Server Certificate") @@ -39,11 +35,11 @@ You can use your Enterprise PKI solution (your internal PKI), Azure PowerShell, * **MakeCert:** See the [MakeCert](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md) article for steps. -* **OpenSSL:** +* **OpenSSL:** - * When exporting certificates, be sure to convert the root certificate to Base64. + * When exporting certificates, be sure to convert the root certificate to Base64. - * For the client certificate: + * For the client certificate: - * When creating the private key, specify the length as 4096. - * When creating the certificate, for the *-extensions* parameter, specify *usr_cert*. + * When creating the private key, specify the length as 4096. + * When creating the certificate, for the *-extensions* parameter, specify *usr_cert*. diff --git a/includes/vpn-gateway-faq-p2s-radius-include.md b/includes/vpn-gateway-faq-p2s-radius-include.md index 158f726ed311a..629ba236ae7e4 100644 --- a/includes/vpn-gateway-faq-p2s-radius-include.md +++ b/includes/vpn-gateway-faq-p2s-radius-include.md @@ -1,53 +1,52 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/28/2021 + ms.date: 05/24/2022 ms.author: cherylmc - ms.custom: include file --- [!INCLUDE [P2S FAQ All](vpn-gateway-faq-p2s-all-include.md)] ### Is RADIUS authentication supported on all Azure VPN Gateway SKUs? -RADIUS authentication is supported for VpnGw1, VpnGw2, and VpnGw3 SKUs. If you are using legacy SKUs, RADIUS authentication is supported on Standard and High Performance SKUs. It is not supported on the Basic Gateway SKU.  - +RADIUS authentication is supported for VpnGw1, VpnGw2, and VpnGw3 SKUs. + +For legacy SKUs, RADIUS authentication is supported on Standard and High Performance SKUs. It isn't supported on the Basic Gateway SKU. + ### Is RADIUS authentication supported for the classic deployment model? - -No. RADIUS authentication is not supported for the classic deployment model. + +No. RADIUS authentication isn't supported for the classic deployment model. ### What is the timeout period for RADIUS requests sent to the RADIUS server? -RADIUS requests are set to timeout after 30 seconds. User defined timeout values are not supported today. - + +RADIUS requests are set to timeout after 30 seconds. User defined timeout values aren't supported today. + ### Are 3rd-party RADIUS servers supported? Yes, 3rd-party RADIUS servers are supported. - + ### What are the connectivity requirements to ensure that the Azure gateway is able to reach an on-premises RADIUS server? -A VPN Site-to-Site connection to the on-premises site, with the proper routes configured, is required.   - +A site-to-site VPN connection to the on-premises site, with the proper routes configured, is required. + ### Can traffic to an on-premises RADIUS server (from the Azure VPN gateway) be routed over an ExpressRoute connection? -No. It can only be routed over a Site-to-Site connection. - +No. It can only be routed over a site-to-site connection. + ### Is there a change in the number of SSTP connections supported with RADIUS authentication? What is the maximum number of SSTP and IKEv2 connections supported? -There is no change in the maximum number of SSTP connections supported on a gateway with RADIUS authentication. It remains 128 for SSTP, but depends on the gateway SKU for IKEv2. For more information on the number of connections supported, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). - -### What is the difference between doing certificate authentication using a RADIUS server vs. using Azure native certificate authentication (by uploading a trusted certificate to Azure). +There is no change in the maximum number of SSTP connections supported on a gateway with RADIUS authentication. It remains 128 for SSTP, but depends on the gateway SKU for IKEv2. For more information on the number of connections supported, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). + +### What is the difference between doing certificate authentication using a RADIUS server vs. using Azure native certificate authentication (by uploading a trusted certificate to Azure)? In RADIUS certificate authentication, the authentication request is forwarded to a RADIUS server that handles the actual certificate validation. This option is useful if you want to integrate with a certificate authentication infrastructure that you already have through RADIUS. - + When using Azure for certificate authentication, the Azure VPN gateway performs the validation of the certificate. You need to upload your certificate public key to the gateway. You can also specify list of revoked certificates that shouldn’t be allowed to connect. ### Does RADIUS authentication work with both IKEv2, and SSTP VPN? -Yes, RADIUS authentication is supported for both IKEv2, and SSTP VPN.  +Yes, RADIUS authentication is supported for both IKEv2, and SSTP VPN. ### Does RADIUS authentication work with the OpenVPN client? -RADIUS authentication is supported for the OpenVPN protocol only through PowerShell. +RADIUS authentication is supported for the OpenVPN protocol only through PowerShell. \ No newline at end of file diff --git a/includes/vpn-gateway-faq-point-to-site-classic-include.md b/includes/vpn-gateway-faq-point-to-site-classic-include.md index d00005f614399..36bb6c036ba4b 100644 --- a/includes/vpn-gateway-faq-point-to-site-classic-include.md +++ b/includes/vpn-gateway-faq-point-to-site-classic-include.md @@ -1,17 +1,13 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/28/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- This FAQ applies to P2S connections that use the classic deployment model. -### What client operating systems can I use with Point-to-Site? +### What client operating systems can I use with point-to-site? The following client operating systems are supported: @@ -22,40 +18,41 @@ The following client operating systems are supported: * Windows Server 2012 (64-bit only) * Windows Server 2012 R2 (64-bit only) * Windows 10 +* Windows 11 -### Can I use any software VPN client that supports SSTP for Point-to-Site? +### Can I use any software VPN client that supports SSTP for point-to-site? No. Support is limited only to the listed Windows operating system versions. -### How many VPN client endpoints can exist in my Point-to-Site configuration? +### How many VPN client endpoints can exist in my point-to-site configuration? The number of VPN client endpoints depends on your gateway sku and protocol. [!INCLUDE [Aggregated throughput by SKU](./vpn-gateway-table-gwtype-aggtput-include.md)] -### Can I use my own internal PKI root CA for Point-to-Site connectivity? +### Can I use my own internal PKI root CA for point-to-site connectivity? Yes. Previously, only self-signed root certificates could be used. You can still upload up to 20 root certificates. -### Can I traverse proxies and firewalls by using Point-to-Site? +### Can I traverse proxies and firewalls by using point-to-site? Yes. We use Secure Socket Tunneling Protocol (SSTP) to tunnel through firewalls. This tunnel appears as an HTTPS connection. -### If I restart a client computer configured for Point-to-Site, will the VPN automatically reconnect? +### If I restart a client computer configured for point-to-site, will the VPN automatically reconnect? By default, the client computer won't reestablish the VPN connection automatically. -### Does Point-to-Site support auto reconnect and DDNS on the VPN clients? +### Does point-to-site support auto reconnect and DDNS on the VPN clients? -No. Auto reconnect and DDNS are currently not supported in Point-to-Site VPNs. +No. Auto reconnect and DDNS are currently not supported in point-to-site VPNs. -### Can I have Site-to-Site and Point-to-Site configurations for the same virtual network? +### Can I have Site-to-Site and point-to-site configurations for the same virtual network? -Yes. Both solutions will work if you have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We don't support Point-to-Site for static routing VPN gateways or gateways that use the **-VpnType PolicyBased** cmdlet. +Yes. Both solutions will work if you have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We don't support point-to-site for static routing VPN gateways or gateways that use the **-VpnType PolicyBased** cmdlet. -### Can I configure a Point-to-Site client to connect to multiple virtual networks at the same time? +### Can I configure a point-to-site client to connect to multiple virtual networks at the same time? -Yes. However, the virtual networks can't have overlapping IP prefixes and the Point-to-Site address spaces must not overlap between the virtual networks. +Yes. However, the virtual networks can't have overlapping IP prefixes and the point-to-site address spaces must not overlap between the virtual networks. -### How much throughput can I expect through Site-to-Site or Point-to-Site connections? +### How much throughput can I expect through Site-to-Site or point-to-site connections? It's difficult to maintain the exact throughput of the VPN tunnels. IPsec and SSTP are crypto-heavy VPN protocols. Throughput is also limited by the latency and bandwidth between your premises and the internet. diff --git a/includes/vpn-gateway-faq-vnet-vnet-include.md b/includes/vpn-gateway-faq-vnet-vnet-include.md index 471ac0c9d9d92..aca50593481e8 100644 --- a/includes/vpn-gateway-faq-vnet-vnet-include.md +++ b/includes/vpn-gateway-faq-vnet-vnet-include.md @@ -1,5 +1,4 @@ --- - description: include file author: cherylmc ms.service: vpn-gateway ms.topic: include @@ -79,7 +78,4 @@ No. You can't have overlapping IP address ranges. ### Can there be overlapping address spaces among connected virtual networks and on-premises local sites? -No. You can't have overlapping IP address ranges. - - - +No. You can't have overlapping IP address ranges. \ No newline at end of file diff --git a/includes/vpn-gateway-generate-export-certificates-include.md b/includes/vpn-gateway-generate-export-certificates-include.md index 1b10a96a42994..fc98d4545b431 100644 --- a/includes/vpn-gateway-generate-export-certificates-include.md +++ b/includes/vpn-gateway-generate-export-certificates-include.md @@ -1,13 +1,9 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file # This include is used for both Virtual WAN and VPN Gateway articles. Any changes you make must apply address both services. --- @@ -16,7 +12,7 @@ Use the New-SelfSignedCertificate cmdlet to create a self-signed root certificate. For additional parameter information, see [New-SelfSignedCertificate](/powershell/module/pki/new-selfsignedcertificate). -1. From a computer running Windows 10 or Windows Server 2016, open a Windows PowerShell console with elevated privileges. These examples do not work in the Azure Cloud Shell "Try It". You must run these examples locally. +1. From a computer running Windows 10 or later, or Windows Server 2016, open a Windows PowerShell console with elevated privileges. These examples don't work in the Azure Cloud Shell "Try It". You must run these examples locally. 1. Use the following example to create the self-signed root certificate. The following example creates a self-signed root certificate named 'P2SRootCert' that is automatically installed in 'Certificates-Current User\Personal\Certificates'. You can view the certificate by opening *certmgr.msc*, or *Manage User Certificates*. Run the following example with any necessary modifications. @@ -32,7 +28,7 @@ Use the New-SelfSignedCertificate cmdlet to create a self-signed root certificat ## Generate a client certificate -Each client computer that connects to a VNet using Point-to-Site must have a client certificate installed. You generate a client certificate from the self-signed root certificate, and then export and install the client certificate. If the client certificate is not installed, authentication fails. +Each client computer that connects to a VNet using Point-to-Site must have a client certificate installed. You generate a client certificate from the self-signed root certificate, and then export and install the client certificate. If the client certificate isn't installed, authentication fails. The following steps walk you through generating a client certificate from a self-signed root certificate. You may generate multiple client certificates from the same root certificate. When you generate client certificates using the steps below, the client certificate is automatically installed on the computer that you used to generate the certificate. If you want to install a client certificate on another client computer, you can export the certificate. @@ -40,9 +36,9 @@ The examples use the New-SelfSignedCertificate cmdlet to generate a client certi ### Example 1 - PowerShell console session still open -Use this example if you have not closed your PowerShell console after creating the self-signed root certificate. This example continues from the previous section and uses the declared '$cert' variable. If you closed the PowerShell console after creating the self-signed root certificate, or are creating additional client certificates in a new PowerShell console session, use the steps in [Example 2](#ex2). +Use this example if you haven't closed your PowerShell console after creating the self-signed root certificate. This example continues from the previous section and uses the declared '$cert' variable. If you closed the PowerShell console after creating the self-signed root certificate, or are creating additional client certificates in a new PowerShell console session, use the steps in [Example 2](#ex2). -Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Do not change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. +Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Don't change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. ```powershell New-SelfSignedCertificate -Type Custom -DnsName P2SChildCert -KeySpec Signature ` @@ -54,7 +50,7 @@ New-SelfSignedCertificate -Type Custom -DnsName P2SChildCert -KeySpec Signature ### Example 2 - New PowerShell console session -If you are creating additional client certificates, or are not using the same PowerShell session that you used to create your self-signed root certificate, use the following steps: +If you're creating additional client certificates, or aren't using the same PowerShell session that you used to create your self-signed root certificate, use the following steps: 1. Identify the self-signed root certificate that is installed on the computer. This cmdlet returns a list of certificates that are installed on your computer. @@ -83,7 +79,7 @@ If you are creating additional client certificates, or are not using the same Po $cert = Get-ChildItem -Path "Cert:\CurrentUser\My\7181AA8C1B4D34EEDB2F3D3BEC5839F3FE52D655" ``` -1. Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Do not change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. +1. Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Don't change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. ```powershell New-SelfSignedCertificate -Type Custom -DnsName P2SChildCert -KeySpec Signature ` diff --git a/includes/vpn-gateway-p2s-clientcert-include.md b/includes/vpn-gateway-p2s-clientcert-include.md index 100f61f411d96..be3fe7dfed16c 100644 --- a/includes/vpn-gateway-p2s-clientcert-include.md +++ b/includes/vpn-gateway-p2s-clientcert-include.md @@ -1,11 +1,8 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 10/28/2020 + ms.date: 05/23/2022 ms.author: cherylmc --- @@ -27,8 +24,8 @@ You can generate client certificates by using the following methods: The steps in these articles generate a compatible client certificate, which you can then export and distribute. - * [Windows 10 PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md#clientcert): These instructions require Windows 10 and PowerShell to generate certificates. The generated certificates can be installed on any supported P2S client. + * [Windows 10 or later PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md#clientcert): These instructions require Windows 10 or later, and PowerShell to generate certificates. The generated certificates can be installed on any supported P2S client. - * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 computer for generating certificates. Although MakeCert is deprecated, you can still use it to generate certificates. You can install the generated certificates on any supported P2S client. + * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 or later computer for generating certificates. Although MakeCert is deprecated, you can still use it to generate certificates. You can install the generated certificates on any supported P2S client. * [Linux instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-linux.md). \ No newline at end of file diff --git a/includes/vpn-gateway-p2s-rootcert-include.md b/includes/vpn-gateway-p2s-rootcert-include.md index 3cc2ce293ca1d..97234d9706fd2 100644 --- a/includes/vpn-gateway-p2s-rootcert-include.md +++ b/includes/vpn-gateway-p2s-rootcert-include.md @@ -1,19 +1,15 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 10/28/2020 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file --- Obtain the .cer file for the root certificate. You can use either a root certificate that was generated with an enterprise solution (recommended), or generate a self-signed certificate. After you create the root certificate, export the public certificate data (not the private key) as a Base64 encoded X.509 .cer file. You upload this file later to Azure. * **Enterprise certificate:** If you're using an enterprise solution, you can use your existing certificate chain. Acquire the .cer file for the root certificate that you want to use. * **Self-signed root certificate:** If you aren't using an enterprise certificate solution, create a self-signed root certificate. Otherwise, the certificates you create won't be compatible with your P2S connections and clients will receive a connection error when they try to connect. You can use Azure PowerShell, MakeCert, or OpenSSL. The steps in the following articles describe how to generate a compatible self-signed root certificate: - * [Windows 10 PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md): These instructions require Windows 10 and PowerShell to generate certificates. Client certificates that are generated from the root certificate can be installed on any supported P2S client. - * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 computer to use to generate certificates. Although MakeCert is deprecated, you can still use it to generate certificates. Client certificates that you generate from the root certificate can be installed on any supported P2S client. + * [Windows 10 or later PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md): These instructions require Windows 10 or later and PowerShell to generate certificates. Client certificates that are generated from the root certificate can be installed on any supported P2S client. + * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 or later computer to use to generate certificates. Although MakeCert is deprecated, you can still use it to generate certificates. Client certificates that you generate from the root certificate can be installed on any supported P2S client. * [Linux instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-linux.md). \ No newline at end of file diff --git a/includes/vpn-gateway-reset-gw-portal-include.md b/includes/vpn-gateway-reset-gw-portal-include.md index 85b2958fb422e..d61e635b0622e 100644 --- a/includes/vpn-gateway-reset-gw-portal-include.md +++ b/includes/vpn-gateway-reset-gw-portal-include.md @@ -1,13 +1,9 @@ --- - title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 04/29/2022 + ms.date: 05/26/2022 ms.author: cherylmc - ms.custom: include file --- 1. In the portal, navigate to the virtual network gateway that you want to reset. @@ -16,4 +12,4 @@ :::image type="content" source="./media/vpn-gateway-reset-gw-portal/menu.png" alt-text="Screenshot shows menu with Reset." lightbox="./media/vpn-gateway-reset-gw-portal/menu-expand.png"::: 1. On the **Reset** page, select **Reset**. Once the command is issued, the current active instance of the Azure VPN gateway is rebooted immediately. Resetting the gateway will cause a gap in VPN connectivity, and may limit future root cause analysis of the issue. - :::image type="content" source="./media/vpn-gateway-reset-gw-portal/reset.png" alt-text="Screenshot shows page with Reset." lightbox="./media/vpn-gateway-reset-gw-portal/reset.png"::: + :::image type="content" source="./media/vpn-gateway-reset-gw-portal/reset.png" alt-text="Screenshot shows page with Reset." lightbox="./media/vpn-gateway-reset-gw-portal/reset-expand.png"::: diff --git a/includes/vpn-gateway-table-gwtype-aggtput-include.md b/includes/vpn-gateway-table-gwtype-aggtput-include.md index 295e5338c9d0e..2c1a4e60ec39d 100644 --- a/includes/vpn-gateway-table-gwtype-aggtput-include.md +++ b/includes/vpn-gateway-table-gwtype-aggtput-include.md @@ -2,7 +2,7 @@ author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 05/17/2022 + ms.date: 06/04/2022 ms.author: cherylmc --- @@ -35,18 +35,29 @@ * SLA (Service Level Agreement) information can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/vpn-gateway/) page. -* IPsec limits the throughput of a single VPN tunnel (both S2S and P2S connections) to 1.25Gbps. **If you have a lot of P2S connections, it can negatively impact your S2S connections.** The Aggregate Throughput Benchmarks were tested by maximizing a combination of S2S and P2S connections. A single P2S or S2S connection can have a much lower throughput than the 1.25Gbps limit. +* If you have a lot of P2S connections, it can negatively impact your S2S connections. The Aggregate Throughput Benchmarks were tested by maximizing a combination of S2S and P2S connections. A single P2S or S2S connection can have a much lower throughput. * Note that all benchmarks aren't guaranteed due to Internet traffic conditions and your application behaviors -To help our customers understand the relative performance of SKUs using different algorithms, we used publicly available iPerf and CTSTraffic tools to measure performances for site-to-site connections. The table below lists the results of performance tests for Generation 1, VpnGw SKUs. As you can see, the best performance is obtained when we used GCMAES256 algorithm for both IPsec Encryption and Integrity. We got average performance when using AES256 for IPsec Encryption and SHA256 for Integrity. When we used DES3 for IPsec Encryption and SHA256 for Integrity we got lowest performance. +To help our customers understand the relative performance of SKUs using different algorithms, we used publicly available iPerf and CTSTraffic tools to measure performances for site-to-site connections. The table below lists the results of performance tests for VpnGw SKUs. As you can see, the best performance is obtained when we used GCMAES256 algorithm for both IPsec Encryption and Integrity. We got average performance when using AES256 for IPsec Encryption and SHA256 for Integrity. When we used DES3 for IPsec Encryption and SHA256 for Integrity we got lowest performance. A VPN tunnel connects to a VPN gateway instance. Each instance throughput is mentioned in the above throughput table and is available aggregated across all tunnels connecting to that instance. +The table below shows the observed bandwidth and packets per second throughput per tunnel for the different gateway SKUs. All testing was performed between gateways (endpoints) within Azure across different regions with 100 connections and under standard load conditions. + |**Generation**|**SKU** | **Algorithms
                    used** | **Throughput
                    observed per tunnel** | **Packets per second per tunnel
                    observed** | |--- |--- | --- | --- | --- | -|**Generation1**|**VpnGw1**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 650 Mbps
                    500 Mbps
                    120 Mbps | 58,000
                    50,000
                    50,000| -|**Generation1**|**VpnGw2**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1 Gbps
                    500 Mbps
                    120 Mbps | 90,000
                    80,000
                    55,000| -|**Generation1**|**VpnGw3**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.25 Gbps
                    550 Mbps
                    120 Mbps | 105,000
                    90,000
                    60,000| -|**Generation1**|**VpnGw1AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 650 Mbps
                    500 Mbps
                    120 Mbps | 58,000
                    50,000
                    50,000| -|**Generation1**|**VpnGw2AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1 Gbps
                    500 Mbps
                    120 Mbps | 90,000
                    80,000
                    55,000| -|**Generation1**|**VpnGw3AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.25 Gbps
                    550 Mbps
                    120 Mbps | 105,000
                    90,000
                    60,000| +|**Generation1**|**VpnGw1**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 650 Mbps
                    500 Mbps
                    130 Mbps | 62,000
                    47,000
                    12,000| +|**Generation1**|**VpnGw2**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.2 Gbps
                    650 Mbps
                    140 Mbps | 100,000
                    61,000
                    13,000| +|**Generation1**|**VpnGw3**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.25 Gbps
                    700 Mbps
                    140 Mbps | 120,000
                    66,000
                    13,000| +|**Generation1**|**VpnGw1AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 650 Mbps
                    500 Mbps
                    130 Mbps | 62,000
                    47,000
                    12,000| +|**Generation1**|**VpnGw2AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.2 Gbps
                    650 Mbps
                    140 Mbps | 110,000
                    61,000
                    13,000| +|**Generation1**|**VpnGw3AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.25 Gbps
                    700 Mbps
                    140 Mbps | 120,000
                    66,000
                    13,000| +| | | +|**Generation2**|**VpnGw2**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.25 Gbps
                    550 Mbps
                    130 Mbps | 120,000
                    52,000
                    12,000| +|**Generation2**|**VpnGw3**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.5 Gbps
                    700 Mbps
                    140 Mbps | 140,000
                    66,000
                    13,000| +|**Generation2**|**VpnGw4**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 2.3 Gbps
                    700 Mbps
                    140 Mbps | 220,000
                    66,000
                    13,000| +|**Generation2**|**VpnGw5**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 2.3 Gbps
                    700 Mbps
                    140 Mbps | 220,000
                    66,000
                    13,000| +|**Generation2**|**VpnGw2AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.25 Gbps
                    550 Mbps
                    130 Mbps | 120,000
                    52,000
                    12,000| +|**Generation2**|**VpnGw3AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 1.5 Gbps
                    700 Mbps
                    140 Mbps | 140,000
                    66,000
                    13,000| +|**Generation2**|**VpnGw4AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 2.3 Gbps
                    700 Mbps
                    140 Mbps | 220,000
                    66,000
                    13,000| +|**Generation2**|**VpnGw5AZ**| GCMAES256
                    AES256 & SHA256
                    DES3 & SHA256| 2.3 Gbps
                    700 Mbps
                    140 Mbps | 220,000
                    66,000
                    13,000| diff --git a/includes/vpn-gateway-tls-change.md b/includes/vpn-gateway-tls-change.md index fa357ba72f710..d0bb56550e2c8 100644 --- a/includes/vpn-gateway-tls-change.md +++ b/includes/vpn-gateway-tls-change.md @@ -1,12 +1,8 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 06/05/2018 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file --- -Starting July 1, 2018, support is being removed for TLS 1.0 and 1.1 from Azure VPN Gateway. VPN Gateway will support only TLS 1.2. Only point-to-site connections are impacted; site-to-site connections will not be affected. If you’re using TLS for point-to-site VPNs on Windows 10 clients, you don’t need to take any action. If you are using TLS for point-to-site connections on Windows 7 and Windows 8 clients, see the [VPN Gateway FAQ](../articles/vpn-gateway/vpn-gateway-vpn-faq.md#P2S) for update instructions. \ No newline at end of file +Starting July 1, 2018, support is being removed for TLS 1.0 and 1.1 from Azure VPN Gateway. VPN Gateway will support only TLS 1.2. Only point-to-site connections are impacted; site-to-site connections won't be affected. If you’re using TLS for point-to-site VPNs on Windows 10 or later clients, you don’t need to take any action. If you're using TLS for point-to-site connections on Windows 7 and Windows 8 clients, see the [VPN Gateway FAQ](../articles/vpn-gateway/vpn-gateway-vpn-faq.md#P2S) for update instructions. \ No newline at end of file diff --git a/includes/vpn-gateway-tls-include.md b/includes/vpn-gateway-tls-include.md index f63087515e324..5f1a4a6bd1d2f 100644 --- a/includes/vpn-gateway-tls-include.md +++ b/includes/vpn-gateway-tls-include.md @@ -1,13 +1,10 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 07/27/2018 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file --- diff --git a/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md b/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md index d14b7cf66c9ac..de680536c2f5c 100644 --- a/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md +++ b/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md @@ -1,18 +1,14 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 04/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file # This include is used for both Virtual WAN and VPN Gateway articles. Any changes you make must apply address both services. --- -You can deploy profiles for Azure VPN clients (Windows 10) by using Microsoft Intune. This article helps you create an Intune profile using custom settings. +You can deploy profiles for Azure VPN clients (Windows 10 or later) by using Microsoft Intune. This article helps you create an Intune profile using custom settings. > [!NOTE] >* This article applies to deploying profiles that use Azure Active Directory for authentication only. @@ -21,7 +17,7 @@ You can deploy profiles for Azure VPN clients (Windows 10) by using Microsoft In ## Prerequisites * Devices are already enrolled with Intune MDM. -* The Azure VPN Client for Windows 10 is already deployed on the client machine. +* The Azure VPN Client for Windows 10 or later is already deployed on the client machine. * Only Windows version 19H2 or higher is supported. ## Modify XML diff --git a/includes/vpn-gateway-vwan-always-on-device.md b/includes/vpn-gateway-vwan-always-on-device.md index 0eab77642e4ff..060b7f5312670 100644 --- a/includes/vpn-gateway-vwan-always-on-device.md +++ b/includes/vpn-gateway-vwan-always-on-device.md @@ -1,13 +1,9 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include ms.date: 05/26/2021 ms.author: cherylmc - ms.custom: include file # this file is used for both virtual wan and vpn gateway. When modifying, make sure that your changes work for both environments. --- @@ -18,12 +14,12 @@ The following requirements must be met in order to successfully establish a devi * The tunnel is only configurable for the Windows built-in VPN solution and is established using IKEv2 with computer certificate authentication. * Only one device tunnel can be configured per device. -1. Install client certificates on the Windows 10 client using the [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate needs to be in the Local Machine store. +1. Install client certificates on the Windows 10 or later client using the [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate needs to be in the Local Machine store. 1. Create a VPN Profile and configure device tunnel in the context of the LOCAL SYSTEM account using [these instructions](/windows-server/remote/remote-access/vpn/vpn-device-tunnel-config#vpn-device-tunnel-configuration). ### Configuration example for device tunnel -After you have configured the virtual network gateway and installed the client certificate in the Local Machine store on the Windows 10 client, use the following examples to configure a client device tunnel: +After you have configured the virtual network gateway and installed the client certificate in the Local Machine store on the Windows 10 or later client, use the following examples to configure a client device tunnel: 1. Copy the following text and save it as ***devicecert.ps1***. diff --git a/includes/vpn-gateway-vwan-always-on-intro.md b/includes/vpn-gateway-vwan-always-on-intro.md index 89614ade57fde..47234e97680cd 100644 --- a/includes/vpn-gateway-vwan-always-on-intro.md +++ b/includes/vpn-gateway-vwan-always-on-intro.md @@ -1,20 +1,17 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 05/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file # this file is used for both virtual wan and vpn gateway. When modifying, make sure that your changes work for both environments. --- -A new feature of the Windows 10 VPN client, Always On, is the ability to maintain a VPN connection. With Always On, the active VPN profile can connect automatically and remain connected based on triggers, such as user sign-in, network state change, or device screen active. +A new feature of the Windows 10 or later VPN client, Always On, is the ability to maintain a VPN connection. With Always On, the active VPN profile can connect automatically and remain connected based on triggers, such as user sign-in, network state change, or device screen active. -You can use gateways with Windows 10 Always On to establish persistent user tunnels and device tunnels to Azure. +You can use gateways with Always On to establish persistent user tunnels and device tunnels to Azure. Always On VPN connections include either of two types of tunnels: diff --git a/includes/vpn-gateway-vwan-always-on-user.md b/includes/vpn-gateway-vwan-always-on-user.md index 9d418f3a571cc..3fd3d5a9455aa 100644 --- a/includes/vpn-gateway-vwan-always-on-user.md +++ b/includes/vpn-gateway-vwan-always-on-user.md @@ -1,25 +1,22 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file + # this file is used for both virtual wan and vpn gateway. When modifying, make sure that your changes work for both environments. --- -1. Install client certificates on the Windows 10 client, as shown in this [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate must be in the current user store. +1. Install client certificates on the Windows 10 or later client, as shown in this [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate must be in the current user store. -1. Configure the Always On VPN client through PowerShell, Configuration Manager, or Intune by following the instructions in [Configure Windows 10 client Always On VPN connections](/windows-server/remote/remote-access/vpn/always-on-vpn/deploy/vpn-deploy-client-vpn-connections). +1. Configure the Always On VPN client through PowerShell, Configuration Manager, or Intune by following the instructions in [Configure Windows 10 or later client Always On VPN connections](/windows-server/remote/remote-access/vpn/always-on-vpn/deploy/vpn-deploy-client-vpn-connections). ### Example configuration for the user tunnel -After you've configured the virtual network gateway and installed the client certificate in the local machine store on the Windows 10 client, configure a client device tunnel by using the following examples: +After you've configured the virtual network gateway and installed the client certificate in the local machine store on the Windows 10 or later client, configure a client device tunnel by using the following examples: 1. Copy the following text, and save it as *usercert.ps1*: